2 * Huawei SSD device driver
3 * Copyright (c) 2016, Huawei Technologies Co., Ltd.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 #ifndef LINUX_VERSION_CODE
16 #include <linux/version.h>
18 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16))
19 #include <linux/config.h>
21 #include <linux/types.h>
22 #include <linux/kernel.h>
23 #include <linux/module.h>
24 #include <linux/bio.h>
25 #include <linux/timer.h>
26 #include <linux/init.h>
27 #include <linux/pci.h>
28 #include <linux/slab.h>
29 #include <linux/spinlock.h>
30 #include <linux/blkdev.h>
31 #include <linux/sched.h>
32 #include <linux/fcntl.h>
33 #include <linux/interrupt.h>
34 #include <linux/compiler.h>
35 #include <linux/bitops.h>
36 #include <linux/delay.h>
37 #include <linux/time.h>
38 #include <linux/stat.h>
40 #include <linux/dma-mapping.h>
41 #include <linux/completion.h>
42 #include <linux/workqueue.h>
44 #include <linux/ioctl.h>
45 #include <linux/hdreg.h> /* HDIO_GETGEO */
46 #include <linux/list.h>
47 #include <linux/reboot.h>
48 #include <linux/kthread.h>
49 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0))
50 #include <linux/seq_file.h>
52 #include <asm/uaccess.h>
53 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,2,0))
54 #include <linux/scatterlist.h>
55 #include <linux/vmalloc.h>
57 #include <asm/scatterlist.h>
60 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,17))
61 #include <linux/devfs_fs_kernel.h>
63 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,3,0))
64 #define bio_endio(bio, errors) bio_endio(bio)
68 #define MODULE_NAME "hio"
69 #define DRIVER_VERSION "2.1.0.23"
70 #define DRIVER_VERSION_LEN 16
72 #define SSD_FW_MIN 0x1
74 #define SSD_DEV_NAME MODULE_NAME
75 #define SSD_DEV_NAME_LEN 16
76 #define SSD_CDEV_NAME "c"SSD_DEV_NAME
77 #define SSD_SDEV_NAME "s"SSD_DEV_NAME
82 #define SSD_MAJOR_SL 0
85 #define SSD_MAX_DEV 702
86 #define SSD_ALPHABET_NUM 26
88 #define hio_info(f, arg...) printk(KERN_INFO MODULE_NAME"info: " f , ## arg)
89 #define hio_note(f, arg...) printk(KERN_NOTICE MODULE_NAME"note: " f , ## arg)
90 #define hio_warn(f, arg...) printk(KERN_WARNING MODULE_NAME"warn: " f , ## arg)
91 #define hio_err(f, arg...) printk(KERN_ERR MODULE_NAME"err: " f , ## arg)
94 #define SSD_SLAVE_PORT_DEVID 0x000a
98 /* 2.6.9 msi affinity bug, should turn msi & msi-x off */
100 #define SSD_ESCAPE_IRQ
106 #define SSD_MSIX_VEC 8
109 //#undef SSD_ESCAPE_IRQ
110 #define SSD_MSIX_AFFINITY_FORCE
115 /* Over temperature protect */
116 #define SSD_OT_PROTECT
118 #ifdef SSD_QUEUE_PBIO
119 #define BIO_SSD_PBIO 20
123 //#define SSD_DEBUG_ERR
126 #define SSD_CMD_TIMEOUT (60*HZ)
129 #define SSD_SPI_TIMEOUT (5*HZ)
130 #define SSD_I2C_TIMEOUT (5*HZ)
132 #define SSD_I2C_MAX_DATA (127)
133 #define SSD_SMBUS_BLOCK_MAX (32)
134 #define SSD_SMBUS_DATA_MAX (SSD_SMBUS_BLOCK_MAX + 2)
137 #define SSD_INIT_WAIT (1000) //1s
138 #define SSD_CONTROLLER_WAIT (20*1000/SSD_INIT_WAIT) //20s
139 #define SSD_INIT_MAX_WAIT (500*1000/SSD_INIT_WAIT) //500s
140 #define SSD_INIT_MAX_WAIT_V3_2 (1400*1000/SSD_INIT_WAIT) //1400s
141 #define SSD_RAM_INIT_MAX_WAIT (10*1000/SSD_INIT_WAIT) //10s
142 #define SSD_CH_INFO_MAX_WAIT (10*1000/SSD_INIT_WAIT) //10s
144 /* blkdev busy wait */
145 #define SSD_DEV_BUSY_WAIT 1000 //ms
146 #define SSD_DEV_BUSY_MAX_WAIT (8*1000/SSD_DEV_BUSY_WAIT) //8s
149 #define SSD_SMBUS_RETRY_INTERVAL (5) //ms
150 #define SSD_SMBUS_RETRY_MAX (1000/SSD_SMBUS_RETRY_INTERVAL)
152 #define SSD_BM_RETRY_MAX 7
154 /* bm routine interval */
155 #define SSD_BM_CAP_LEARNING_DELAY (10*60*1000)
157 /* routine interval */
158 #define SSD_ROUTINE_INTERVAL (10*1000) //10s
159 #define SSD_HWMON_ROUTINE_TICK (60*1000/SSD_ROUTINE_INTERVAL)
160 #define SSD_CAPMON_ROUTINE_TICK ((3600*1000/SSD_ROUTINE_INTERVAL)*24*30)
161 #define SSD_CAPMON2_ROUTINE_TICK (10*60*1000/SSD_ROUTINE_INTERVAL) //fault recover
164 #define SSD_DMA_ALIGN (16)
166 /* some hw defalut */
167 #define SSD_LOG_MAX_SZ 4096
169 #define SSD_NAND_OOB_SZ 1024
170 #define SSD_NAND_ID_SZ 8
171 #define SSD_NAND_ID_BUFF_SZ 1024
172 #define SSD_NAND_MAX_CE 2
174 #define SSD_BBT_RESERVED 8
176 #define SSD_ECC_MAX_FLIP (64+1)
178 #define SSD_RAM_ALIGN 16
181 #define SSD_RELOAD_FLAG 0x3333CCCC
182 #define SSD_RELOAD_FW 0xAA5555AA
183 #define SSD_RESET_NOINIT 0xAA5555AA
184 #define SSD_RESET 0x55AAAA55
185 #define SSD_RESET_FULL 0x5A
186 //#define SSD_RESET_WAIT 1000 //1s
187 //#define SSD_RESET_MAX_WAIT (200*1000/SSD_RESET_WAIT) //200s
191 #define SSD_PROTOCOL_V1 0x0
193 #define SSD_ROM_SIZE (16*1024*1024)
194 #define SSD_ROM_BLK_SIZE (256*1024)
195 #define SSD_ROM_PAGE_SIZE (256)
196 #define SSD_ROM_NR_BRIDGE_FW 2
197 #define SSD_ROM_NR_CTRL_FW 2
198 #define SSD_ROM_BRIDGE_FW_BASE 0
199 #define SSD_ROM_BRIDGE_FW_SIZE (2*1024*1024)
200 #define SSD_ROM_CTRL_FW_BASE (SSD_ROM_NR_BRIDGE_FW*SSD_ROM_BRIDGE_FW_SIZE)
201 #define SSD_ROM_CTRL_FW_SIZE (5*1024*1024)
202 #define SSD_ROM_LABEL_BASE (SSD_ROM_CTRL_FW_BASE+SSD_ROM_CTRL_FW_SIZE*SSD_ROM_NR_CTRL_FW)
203 #define SSD_ROM_VP_BASE (SSD_ROM_LABEL_BASE+SSD_ROM_BLK_SIZE)
206 #define SSD_PROTOCOL_V3 0x3000000
207 #define SSD_PROTOCOL_V3_1_1 0x3010001
208 #define SSD_PROTOCOL_V3_1_3 0x3010003
209 #define SSD_PROTOCOL_V3_2 0x3020000
210 #define SSD_PROTOCOL_V3_2_1 0x3020001 /* <4KB improved */
211 #define SSD_PROTOCOL_V3_2_2 0x3020002 /* ot protect */
212 #define SSD_PROTOCOL_V3_2_4 0x3020004
215 #define SSD_PV3_ROM_NR_BM_FW 1
216 #define SSD_PV3_ROM_BM_FW_SZ (64*1024*8)
218 #define SSD_ROM_LOG_SZ (64*1024*4)
220 #define SSD_ROM_NR_SMART_MAX 2
221 #define SSD_PV3_ROM_NR_SMART SSD_ROM_NR_SMART_MAX
222 #define SSD_PV3_ROM_SMART_SZ (64*1024)
225 #define SSD_PV3_2_ROM_LOG_SZ (64*1024*80) /* 5MB */
226 #define SSD_PV3_2_ROM_SEC_SZ (256*1024) /* 256KB */
230 #define SSD_REQ_FIFO_REG 0x0000
231 #define SSD_RESP_FIFO_REG 0x0008 //0x0010
232 #define SSD_RESP_PTR_REG 0x0010 //0x0018
233 #define SSD_INTR_INTERVAL_REG 0x0018
234 #define SSD_READY_REG 0x001C
235 #define SSD_BRIDGE_TEST_REG 0x0020
236 #define SSD_STRIPE_SIZE_REG 0x0028
237 #define SSD_CTRL_VER_REG 0x0030 //controller
238 #define SSD_BRIDGE_VER_REG 0x0034 //bridge
239 #define SSD_PCB_VER_REG 0x0038
240 #define SSD_BURN_FLAG_REG 0x0040
241 #define SSD_BRIDGE_INFO_REG 0x0044
243 #define SSD_WL_VAL_REG 0x0048 //32-bit
245 #define SSD_BB_INFO_REG 0x004C
247 #define SSD_ECC_TEST_REG 0x0050 //test only
248 #define SSD_ERASE_TEST_REG 0x0058 //test only
249 #define SSD_WRITE_TEST_REG 0x0060 //test only
251 #define SSD_RESET_REG 0x0068
252 #define SSD_RELOAD_FW_REG 0x0070
254 #define SSD_RESERVED_BLKS_REG 0x0074
255 #define SSD_VALID_PAGES_REG 0x0078
256 #define SSD_CH_INFO_REG 0x007C
258 #define SSD_CTRL_TEST_REG_SZ 0x8
259 #define SSD_CTRL_TEST_REG0 0x0080
260 #define SSD_CTRL_TEST_REG1 0x0088
261 #define SSD_CTRL_TEST_REG2 0x0090
262 #define SSD_CTRL_TEST_REG3 0x0098
263 #define SSD_CTRL_TEST_REG4 0x00A0
264 #define SSD_CTRL_TEST_REG5 0x00A8
265 #define SSD_CTRL_TEST_REG6 0x00B0
266 #define SSD_CTRL_TEST_REG7 0x00B8
268 #define SSD_FLASH_INFO_REG0 0x00C0
269 #define SSD_FLASH_INFO_REG1 0x00C8
270 #define SSD_FLASH_INFO_REG2 0x00D0
271 #define SSD_FLASH_INFO_REG3 0x00D8
272 #define SSD_FLASH_INFO_REG4 0x00E0
273 #define SSD_FLASH_INFO_REG5 0x00E8
274 #define SSD_FLASH_INFO_REG6 0x00F0
275 #define SSD_FLASH_INFO_REG7 0x00F8
277 #define SSD_RESP_INFO_REG 0x01B8
278 #define SSD_NAND_BUFF_BASE 0x01BC //for nand write
280 #define SSD_CHIP_INFO_REG_SZ 0x10
281 #define SSD_CHIP_INFO_REG0 0x0100 //128 bit
282 #define SSD_CHIP_INFO_REG1 0x0110
283 #define SSD_CHIP_INFO_REG2 0x0120
284 #define SSD_CHIP_INFO_REG3 0x0130
285 #define SSD_CHIP_INFO_REG4 0x0140
286 #define SSD_CHIP_INFO_REG5 0x0150
287 #define SSD_CHIP_INFO_REG6 0x0160
288 #define SSD_CHIP_INFO_REG7 0x0170
290 #define SSD_RAM_INFO_REG 0x01C4
292 #define SSD_BBT_BASE_REG 0x01C8
293 #define SSD_ECT_BASE_REG 0x01CC
295 #define SSD_CLEAR_INTR_REG 0x01F0
297 #define SSD_INIT_STATE_REG_SZ 0x8
298 #define SSD_INIT_STATE_REG0 0x0200
299 #define SSD_INIT_STATE_REG1 0x0208
300 #define SSD_INIT_STATE_REG2 0x0210
301 #define SSD_INIT_STATE_REG3 0x0218
302 #define SSD_INIT_STATE_REG4 0x0220
303 #define SSD_INIT_STATE_REG5 0x0228
304 #define SSD_INIT_STATE_REG6 0x0230
305 #define SSD_INIT_STATE_REG7 0x0238
307 #define SSD_ROM_INFO_REG 0x0600
308 #define SSD_ROM_BRIDGE_FW_INFO_REG 0x0604
309 #define SSD_ROM_CTRL_FW_INFO_REG 0x0608
310 #define SSD_ROM_VP_INFO_REG 0x060C
312 #define SSD_LOG_INFO_REG 0x0610
313 #define SSD_LED_REG 0x0614
314 #define SSD_MSG_BASE_REG 0x06F8
317 #define SSD_SPI_REG_CMD 0x0180
318 #define SSD_SPI_REG_CMD_HI 0x0184
319 #define SSD_SPI_REG_WDATA 0x0188
320 #define SSD_SPI_REG_ID 0x0190
321 #define SSD_SPI_REG_STATUS 0x0198
322 #define SSD_SPI_REG_RDATA 0x01A0
323 #define SSD_SPI_REG_READY 0x01A8
326 #define SSD_I2C_CTRL_REG 0x06F0
327 #define SSD_I2C_RDATA_REG 0x06F4
329 /* temperature reg */
330 #define SSD_BRIGE_TEMP_REG 0x0618
332 #define SSD_CTRL_TEMP_REG0 0x0700
333 #define SSD_CTRL_TEMP_REG1 0x0708
334 #define SSD_CTRL_TEMP_REG2 0x0710
335 #define SSD_CTRL_TEMP_REG3 0x0718
336 #define SSD_CTRL_TEMP_REG4 0x0720
337 #define SSD_CTRL_TEMP_REG5 0x0728
338 #define SSD_CTRL_TEMP_REG6 0x0730
339 #define SSD_CTRL_TEMP_REG7 0x0738
341 /* reversion 3 reg */
342 #define SSD_PROTOCOL_VER_REG 0x01B4
344 #define SSD_FLUSH_TIMEOUT_REG 0x02A4
345 #define SSD_BM_FAULT_REG 0x0660
347 #define SSD_PV3_RAM_STATUS_REG_SZ 0x4
348 #define SSD_PV3_RAM_STATUS_REG0 0x0260
349 #define SSD_PV3_RAM_STATUS_REG1 0x0264
350 #define SSD_PV3_RAM_STATUS_REG2 0x0268
351 #define SSD_PV3_RAM_STATUS_REG3 0x026C
352 #define SSD_PV3_RAM_STATUS_REG4 0x0270
353 #define SSD_PV3_RAM_STATUS_REG5 0x0274
354 #define SSD_PV3_RAM_STATUS_REG6 0x0278
355 #define SSD_PV3_RAM_STATUS_REG7 0x027C
357 #define SSD_PV3_CHIP_INFO_REG_SZ 0x40
358 #define SSD_PV3_CHIP_INFO_REG0 0x0300
359 #define SSD_PV3_CHIP_INFO_REG1 0x0340
360 #define SSD_PV3_CHIP_INFO_REG2 0x0380
361 #define SSD_PV3_CHIP_INFO_REG3 0x03B0
362 #define SSD_PV3_CHIP_INFO_REG4 0x0400
363 #define SSD_PV3_CHIP_INFO_REG5 0x0440
364 #define SSD_PV3_CHIP_INFO_REG6 0x0480
365 #define SSD_PV3_CHIP_INFO_REG7 0x04B0
367 #define SSD_PV3_INIT_STATE_REG_SZ 0x20
368 #define SSD_PV3_INIT_STATE_REG0 0x0500
369 #define SSD_PV3_INIT_STATE_REG1 0x0520
370 #define SSD_PV3_INIT_STATE_REG2 0x0540
371 #define SSD_PV3_INIT_STATE_REG3 0x0560
372 #define SSD_PV3_INIT_STATE_REG4 0x0580
373 #define SSD_PV3_INIT_STATE_REG5 0x05A0
374 #define SSD_PV3_INIT_STATE_REG6 0x05C0
375 #define SSD_PV3_INIT_STATE_REG7 0x05E0
377 /* reversion 3.1.1 reg */
378 #define SSD_FULL_RESET_REG 0x01B0
380 #define SSD_CTRL_REG_ZONE_SZ 0x800
382 #define SSD_BB_THRESHOLD_L1_REG 0x2C0
383 #define SSD_BB_THRESHOLD_L2_REG 0x2C4
385 #define SSD_BB_ACC_REG_SZ 0x4
386 #define SSD_BB_ACC_REG0 0x21C0
387 #define SSD_BB_ACC_REG1 0x29C0
388 #define SSD_BB_ACC_REG2 0x31C0
390 #define SSD_EC_THRESHOLD_L1_REG 0x2C8
391 #define SSD_EC_THRESHOLD_L2_REG 0x2CC
393 #define SSD_EC_ACC_REG_SZ 0x4
394 #define SSD_EC_ACC_REG0 0x21E0
395 #define SSD_EC_ACC_REG1 0x29E0
396 #define SSD_EC_ACC_REG2 0x31E0
398 /* reversion 3.1.2 & 3.1.3 reg */
399 #define SSD_HW_STATUS_REG 0x02AC
401 #define SSD_PLP_INFO_REG 0x0664
403 /*reversion 3.2 reg*/
404 #define SSD_POWER_ON_REG 0x01EC
405 #define SSD_PCIE_LINKSTATUS_REG 0x01F8
406 #define SSD_PL_CAP_LEARN_REG 0x01FC
408 #define SSD_FPGA_1V0_REG0 0x2070
409 #define SSD_FPGA_1V8_REG0 0x2078
410 #define SSD_FPGA_1V0_REG1 0x2870
411 #define SSD_FPGA_1V8_REG1 0x2878
413 /*reversion 3.2 reg*/
414 #define SSD_READ_OT_REG0 0x2260
415 #define SSD_WRITE_OT_REG0 0x2264
416 #define SSD_READ_OT_REG1 0x2A60
417 #define SSD_WRITE_OT_REG1 0x2A64
421 #define SSD_FUNC_READ 0x01
422 #define SSD_FUNC_WRITE 0x02
423 #define SSD_FUNC_NAND_READ_WOOB 0x03
424 #define SSD_FUNC_NAND_READ 0x04
425 #define SSD_FUNC_NAND_WRITE 0x05
426 #define SSD_FUNC_NAND_ERASE 0x06
427 #define SSD_FUNC_NAND_READ_ID 0x07
428 #define SSD_FUNC_READ_LOG 0x08
429 #define SSD_FUNC_TRIM 0x09
430 #define SSD_FUNC_RAM_READ 0x10
431 #define SSD_FUNC_RAM_WRITE 0x11
432 #define SSD_FUNC_FLUSH 0x12 //cache / bbt
435 #define SSD_SPI_CMD_PROGRAM 0x02
436 #define SSD_SPI_CMD_READ 0x03
437 #define SSD_SPI_CMD_W_DISABLE 0x04
438 #define SSD_SPI_CMD_READ_STATUS 0x05
439 #define SSD_SPI_CMD_W_ENABLE 0x06
440 #define SSD_SPI_CMD_ERASE 0xd8
441 #define SSD_SPI_CMD_CLSR 0x30
442 #define SSD_SPI_CMD_READ_ID 0x9f
445 #define SSD_I2C_CTRL_READ 0x00
446 #define SSD_I2C_CTRL_WRITE 0x01
448 /* i2c internal register */
449 #define SSD_I2C_CFG_REG 0x00
450 #define SSD_I2C_DATA_REG 0x01
451 #define SSD_I2C_CMD_REG 0x02
452 #define SSD_I2C_STATUS_REG 0x03
453 #define SSD_I2C_SADDR_REG 0x04
454 #define SSD_I2C_LEN_REG 0x05
455 #define SSD_I2C_RLEN_REG 0x06
456 #define SSD_I2C_WLEN_REG 0x07
457 #define SSD_I2C_RESET_REG 0x08 //write for reset
458 #define SSD_I2C_PRER_REG 0x09
462 /* FPGA volt = ADC_value / 4096 * 3v */
463 #define SSD_FPGA_1V0_ADC_MIN 1228 // 0.9v
464 #define SSD_FPGA_1V0_ADC_MAX 1502 // 1.1v
465 #define SSD_FPGA_1V8_ADC_MIN 2211 // 1.62v
466 #define SSD_FPGA_1V8_ADC_MAX 2703 // 1.98
469 #define SSD_FPGA_VOLT_MAX(val) (((val) & 0xffff) >> 4)
470 #define SSD_FPGA_VOLT_MIN(val) (((val >> 16) & 0xffff) >> 4)
471 #define SSD_FPGA_VOLT_CUR(val) (((val >> 32) & 0xffff) >> 4)
472 #define SSD_FPGA_VOLT(val) ((val * 3000) >> 12)
474 #define SSD_VOLT_LOG_DATA(idx, ctrl, volt) (((uint32_t)idx << 24) | ((uint32_t)ctrl << 16) | ((uint32_t)volt))
485 SSD_CLOCK_166M_LOST
= 0,
493 #define SSD_SENSOR_LM75_SADDRESS (0x49 << 1)
494 #define SSD_SENSOR_LM80_SADDRESS (0x28 << 1)
496 #define SSD_SENSOR_CONVERT_TEMP(val) ((int)(val >> 8))
498 #define SSD_INLET_OT_TEMP (55) //55 DegC
499 #define SSD_INLET_OT_HYST (50) //50 DegC
500 #define SSD_FLASH_OT_TEMP (70) //70 DegC
501 #define SSD_FLASH_OT_HYST (65) //65 DegC
514 SSD_LM75_REG_TEMP
= 0,
521 #define SSD_LM80_REG_IN_MAX(nr) (0x2a + (nr) * 2)
522 #define SSD_LM80_REG_IN_MIN(nr) (0x2b + (nr) * 2)
523 #define SSD_LM80_REG_IN(nr) (0x20 + (nr))
525 #define SSD_LM80_REG_FAN1 0x28
526 #define SSD_LM80_REG_FAN2 0x29
527 #define SSD_LM80_REG_FAN_MIN(nr) (0x3b + (nr))
529 #define SSD_LM80_REG_TEMP 0x27
530 #define SSD_LM80_REG_TEMP_HOT_MAX 0x38
531 #define SSD_LM80_REG_TEMP_HOT_HYST 0x39
532 #define SSD_LM80_REG_TEMP_OS_MAX 0x3a
533 #define SSD_LM80_REG_TEMP_OS_HYST 0x3b
535 #define SSD_LM80_REG_CONFIG 0x00
536 #define SSD_LM80_REG_ALARM1 0x01
537 #define SSD_LM80_REG_ALARM2 0x02
538 #define SSD_LM80_REG_MASK1 0x03
539 #define SSD_LM80_REG_MASK2 0x04
540 #define SSD_LM80_REG_FANDIV 0x05
541 #define SSD_LM80_REG_RES 0x06
543 #define SSD_LM80_CONVERT_VOLT(val) ((val * 10) >> 8)
545 #define SSD_LM80_3V3_VOLT(val) ((val)*33/19)
547 #define SSD_LM80_CONV_INTERVAL (1000)
556 SSD_LM80_IN_FPGA_3V3
,
561 struct ssd_lm80_limit
567 /* +/- 5% except cap in*/
568 static struct ssd_lm80_limit ssd_lm80_limit
[SSD_LM80_IN_NR
] = {
569 {171, 217}, /* CAP in: 1710 ~ 2170 */
578 /* temperature sensors */
588 #ifdef SSD_OT_PROTECT
589 #define SSD_OT_DELAY (60) //ms
591 #define SSD_OT_TEMP (90) //90 DegC
593 #define SSD_OT_TEMP_HYST (85) //85 DegC
596 /* fpga temperature */
597 //#define CONVERT_TEMP(val) ((float)(val)*503.975f/4096.0f-273.15f)
598 #define CONVERT_TEMP(val) ((val)*504/4096-273)
600 #define MAX_TEMP(val) CONVERT_TEMP(((val & 0xffff) >> 4))
601 #define MIN_TEMP(val) CONVERT_TEMP((((val>>16) & 0xffff) >> 4))
602 #define CUR_TEMP(val) CONVERT_TEMP((((val>>32) & 0xffff) >> 4))
606 #define SSD_PL_CAP_U1 SSD_LM80_REG_IN(SSD_LM80_IN_CAP)
607 #define SSD_PL_CAP_U2 SSD_LM80_REG_IN(SSD_LM80_IN_1V8)
608 #define SSD_PL_CAP_LEARN(u1, u2, t) ((t*(u1+u2))/(2*162*(u1-u2)))
609 #define SSD_PL_CAP_LEARN_WAIT (20) //20ms
610 #define SSD_PL_CAP_LEARN_MAX_WAIT (1000/SSD_PL_CAP_LEARN_WAIT) //1s
612 #define SSD_PL_CAP_CHARGE_WAIT (1000)
613 #define SSD_PL_CAP_CHARGE_MAX_WAIT ((120*1000)/SSD_PL_CAP_CHARGE_WAIT) //120s
615 #define SSD_PL_CAP_VOLT(val) (val*7)
617 #define SSD_PL_CAP_VOLT_FULL (13700)
618 #define SSD_PL_CAP_VOLT_READY (12880)
620 #define SSD_PL_CAP_THRESHOLD (8900)
621 #define SSD_PL_CAP_CP_THRESHOLD (5800)
622 #define SSD_PL_CAP_THRESHOLD_HYST (100)
624 enum ssd_pl_cap_status
632 SSD_PL_CAP_DEFAULT
= 0, /* 4 cap */
633 SSD_PL_CAP_CP
/* 3 cap */
638 #define SSD_HWMON_OFFS_TEMP (0)
639 #define SSD_HWMON_OFFS_SENSOR (SSD_HWMON_OFFS_TEMP + SSD_TEMP_NR)
640 #define SSD_HWMON_OFFS_PL_CAP (SSD_HWMON_OFFS_SENSOR + SSD_SENSOR_NR)
641 #define SSD_HWMON_OFFS_LM80 (SSD_HWMON_OFFS_PL_CAP + SSD_PL_CAP_NR)
642 #define SSD_HWMON_OFFS_CLOCK (SSD_HWMON_OFFS_LM80 + SSD_LM80_IN_NR)
643 #define SSD_HWMON_OFFS_FPGA (SSD_HWMON_OFFS_CLOCK + SSD_CLOCK_NR)
645 #define SSD_HWMON_TEMP(idx) (SSD_HWMON_OFFS_TEMP + idx)
646 #define SSD_HWMON_SENSOR(idx) (SSD_HWMON_OFFS_SENSOR + idx)
647 #define SSD_HWMON_PL_CAP(idx) (SSD_HWMON_OFFS_PL_CAP + idx)
648 #define SSD_HWMON_LM80(idx) (SSD_HWMON_OFFS_LM80 + idx)
649 #define SSD_HWMON_CLOCK(idx) (SSD_HWMON_OFFS_CLOCK + idx)
650 #define SSD_HWMON_FPGA(ctrl, idx) (SSD_HWMON_OFFS_FPGA + (ctrl * SSD_FPGA_VOLT_NR) + idx)
666 static int sfifo_alloc(struct sfifo
*fifo
, uint32_t size
, uint32_t esize
)
670 if (!fifo
|| size
> INT_MAX
|| esize
== 0) {
674 while (__size
< size
) __size
<<= 1;
680 fifo
->data
= vmalloc(esize
* __size
);
687 fifo
->mask
= __size
- 1;
690 spin_lock_init(&fifo
->lock
);
695 static void sfifo_free(struct sfifo
*fifo
)
710 static int __sfifo_put(struct sfifo
*fifo
, void *val
)
712 if (((fifo
->in
+ 1) & fifo
->mask
) == fifo
->out
) {
716 memcpy((fifo
->data
+ (fifo
->in
* fifo
->esize
)), val
, fifo
->esize
);
717 fifo
->in
= (fifo
->in
+ 1) & fifo
->mask
;
722 static int sfifo_put(struct sfifo
*fifo
, void *val
)
730 if (!in_interrupt()) {
731 spin_lock_irq(&fifo
->lock
);
732 ret
= __sfifo_put(fifo
, val
);
733 spin_unlock_irq(&fifo
->lock
);
735 spin_lock(&fifo
->lock
);
736 ret
= __sfifo_put(fifo
, val
);
737 spin_unlock(&fifo
->lock
);
743 static int __sfifo_get(struct sfifo
*fifo
, void *val
)
745 if (fifo
->out
== fifo
->in
) {
749 memcpy(val
, (fifo
->data
+ (fifo
->out
* fifo
->esize
)), fifo
->esize
);
750 fifo
->out
= (fifo
->out
+ 1) & fifo
->mask
;
755 static int sfifo_get(struct sfifo
*fifo
, void *val
)
763 if (!in_interrupt()) {
764 spin_lock_irq(&fifo
->lock
);
765 ret
= __sfifo_get(fifo
, val
);
766 spin_unlock_irq(&fifo
->lock
);
768 spin_lock(&fifo
->lock
);
769 ret
= __sfifo_get(fifo
, val
);
770 spin_unlock(&fifo
->lock
);
777 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30))
783 static inline void ssd_blist_init(struct ssd_blist
*ssd_bl
)
789 static inline struct bio
*ssd_blist_get(struct ssd_blist
*ssd_bl
)
791 struct bio
*bio
= ssd_bl
->prev
;
799 static inline void ssd_blist_add(struct ssd_blist
*ssd_bl
, struct bio
*bio
)
804 ssd_bl
->next
->bi_next
= bio
;
813 #define ssd_blist bio_list
814 #define ssd_blist_init bio_list_init
815 #define ssd_blist_get bio_list_get
816 #define ssd_blist_add bio_list_add
819 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,14,0))
820 #define bio_start(bio) (bio->bi_sector)
822 #define bio_start(bio) (bio->bi_iter.bi_sector)
826 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16))
827 #define mutex_lock down
828 #define mutex_unlock up
829 #define mutex semaphore
830 #define mutex_init init_MUTEX
834 typedef union ssd_i2c_ctrl
{
842 }__attribute__((packed
)) ssd_i2c_ctrl_t
;
844 typedef union ssd_i2c_data
{
851 }__attribute__((packed
)) ssd_i2c_data_t
;
856 SSD_WMODE_BUFFER
= 0,
873 typedef struct ssd_sg_entry
878 }__attribute__((packed
))ssd_sg_entry_t
;
880 typedef struct ssd_rw_msg
886 uint32_t reserved
; //for 64-bit align
887 struct ssd_sg_entry sge
[1]; //base
888 }__attribute__((packed
))ssd_rw_msg_t
;
890 typedef struct ssd_resp_msg
898 }__attribute__((packed
))ssd_resp_msg_t
;
900 typedef struct ssd_flush_msg
903 uint8_t flag
:2; //flash cache 0 or bbt 1
907 uint32_t reserved
; //align
908 }__attribute__((packed
))ssd_flush_msg_t
;
910 typedef struct ssd_nand_op_msg
916 uint32_t reserved
; //align
922 }__attribute__((packed
))ssd_nand_op_msg_t
;
924 typedef struct ssd_ram_op_msg
930 uint32_t reserved
; //align
934 }__attribute__((packed
))ssd_ram_op_msg_t
;
938 typedef struct ssd_log_msg
944 uint32_t reserved
; //align
946 }__attribute__((packed
))ssd_log_msg_t
;
948 typedef struct ssd_log_op_msg
954 uint32_t reserved
; //align
955 uint64_t reserved1
; //align
957 }__attribute__((packed
))ssd_log_op_msg_t
;
959 typedef struct ssd_log_resp_msg
963 uint16_t reserved1
:2; //align with the normal resp msg
967 }__attribute__((packed
))ssd_log_resp_msg_t
;
971 typedef union ssd_response_msq
973 ssd_resp_msg_t resp_msg
;
974 ssd_log_resp_msg_t log_resp_msg
;
977 } ssd_response_msq_t
;
981 typedef struct ssd_protocol_info
984 uint32_t init_state_reg
;
985 uint32_t init_state_reg_sz
;
986 uint32_t chip_info_reg
;
987 uint32_t chip_info_reg_sz
;
988 } ssd_protocol_info_t
;
990 typedef struct ssd_hw_info
995 uint32_t cmd_fifo_sz
;
996 uint32_t cmd_fifo_sz_mask
;
999 uint32_t resp_ptr_sz
;
1000 uint32_t resp_msg_sz
;
1004 uint16_t nr_data_ch
;
1010 uint8_t upper_pcb_ver
;
1012 uint8_t nand_vendor_id
;
1013 uint8_t nand_dev_id
;
1020 uint16_t bbf_seek
; //
1022 uint16_t page_count
; //per block
1024 uint32_t block_count
; //per flash
1028 uint32_t ram_max_len
;
1032 uint64_t md_base
; //metadata
1034 uint32_t md_entry_sz
;
1038 uint64_t nand_wbuff_base
;
1040 uint32_t md_reserved_blks
;
1041 uint32_t reserved_blks
;
1042 uint32_t valid_pages
;
1043 uint32_t max_valid_pages
;
1047 typedef struct ssd_hw_info_extend
1053 uint8_t form_factor
;
1056 }ssd_hw_info_extend_t
;
1058 typedef struct ssd_rom_info
1061 uint32_t block_size
;
1063 uint8_t nr_bridge_fw
;
1067 uint32_t bridge_fw_base
;
1068 uint32_t bridge_fw_sz
;
1069 uint32_t ctrl_fw_base
;
1070 uint32_t ctrl_fw_sz
;
1071 uint32_t bm_fw_base
;
1075 uint32_t smart_base
;
1078 uint32_t label_base
;
1086 SSD_DEBUG_WRITE_ERR
,
1096 typedef struct ssd_debug_info
1112 #define SSD_LABEL_FIELD_SZ 32
1113 #define SSD_SN_SZ 16
1115 typedef struct ssd_label
1117 char date
[SSD_LABEL_FIELD_SZ
];
1118 char sn
[SSD_LABEL_FIELD_SZ
];
1119 char part
[SSD_LABEL_FIELD_SZ
];
1120 char desc
[SSD_LABEL_FIELD_SZ
];
1121 char other
[SSD_LABEL_FIELD_SZ
];
1122 char maf
[SSD_LABEL_FIELD_SZ
];
1125 #define SSD_LABEL_DESC_SZ 256
1127 typedef struct ssd_labelv3
1129 char boardtype
[SSD_LABEL_FIELD_SZ
];
1130 char barcode
[SSD_LABEL_FIELD_SZ
];
1131 char item
[SSD_LABEL_FIELD_SZ
];
1132 char description
[SSD_LABEL_DESC_SZ
];
1133 char manufactured
[SSD_LABEL_FIELD_SZ
];
1134 char vendorname
[SSD_LABEL_FIELD_SZ
];
1135 char issuenumber
[SSD_LABEL_FIELD_SZ
];
1136 char cleicode
[SSD_LABEL_FIELD_SZ
];
1137 char bom
[SSD_LABEL_FIELD_SZ
];
1141 typedef struct ssd_battery_info
1144 } ssd_battery_info_t
;
1146 /* ssd power stat */
1147 typedef struct ssd_power_stat
1149 uint64_t nr_poweron
;
1150 uint64_t nr_powerloss
;
1151 uint64_t init_failed
;
1155 typedef struct ssd_io_stat
1168 typedef struct ssd_ecc_info
1170 uint64_t bitflip
[SSD_ECC_MAX_FLIP
];
1176 SSD_LOG_LEVEL_INFO
= 0,
1177 SSD_LOG_LEVEL_NOTICE
,
1178 SSD_LOG_LEVEL_WARNING
,
1183 typedef struct ssd_log_info
1186 uint64_t stat
[SSD_LOG_NR_LEVEL
];
1190 #define SSD_SMART_MAGIC (0x5452414D53445353ull)
1192 typedef struct ssd_smart
1194 struct ssd_power_stat pstat
;
1195 struct ssd_io_stat io_stat
;
1196 struct ssd_ecc_info ecc_info
;
1197 struct ssd_log_info log_info
;
1203 typedef struct ssd_internal_log
1207 } ssd_internal_log_t
;
1210 typedef struct ssd_cmd
1213 struct scatterlist
*sgl
;
1214 struct list_head list
;
1217 int flag
; /*pbio(1) or bio(0)*/
1223 unsigned long start_time
;
1226 unsigned int nr_log
;
1228 struct timer_list cmd_timer
;
1229 struct completion
*waiting
;
1232 typedef void (*send_cmd_func
)(struct ssd_cmd
*);
1233 typedef int (*ssd_event_call
)(struct gendisk
*, int, int); /* gendisk, event id, event level */
1236 #define SSD_DCMD_MAX_SZ 32
1238 typedef struct ssd_dcmd
1240 struct list_head list
;
1242 uint8_t msg
[SSD_DCMD_MAX_SZ
];
1258 #define SSD_QUEUE_NAME_LEN 16
1259 typedef struct ssd_queue
{
1260 char name
[SSD_QUEUE_NAME_LEN
];
1266 uint32_t resp_idx_mask
;
1267 uint32_t resp_msg_sz
;
1272 struct ssd_cmd
*cmd
;
1274 struct ssd_io_stat io_stat
;
1275 struct ssd_ecc_info ecc_info
;
1278 typedef struct ssd_device
{
1279 char name
[SSD_DEV_NAME_LEN
];
1286 #ifdef SSD_ESCAPE_IRQ
1292 int ot_delay
; //in ms
1296 atomic_t in_flight
[2]; //r&w
1300 struct list_head list
;
1301 struct pci_dev
*pdev
;
1303 unsigned long mmio_base
;
1304 unsigned long mmio_len
;
1305 void __iomem
*ctrlp
;
1307 struct mutex spi_mutex
;
1308 struct mutex i2c_mutex
;
1310 struct ssd_protocol_info protocol_info
;
1311 struct ssd_hw_info hw_info
;
1312 struct ssd_rom_info rom_info
;
1313 struct ssd_label label
;
1315 struct ssd_smart smart
;
1318 spinlock_t sendq_lock
;
1319 struct ssd_blist sendq
;
1320 struct task_struct
*send_thread
;
1321 wait_queue_head_t send_waitq
;
1324 spinlock_t doneq_lock
;
1325 struct ssd_blist doneq
;
1326 struct task_struct
*done_thread
;
1327 wait_queue_head_t done_waitq
;
1329 struct ssd_dcmd
*dcmd
;
1330 spinlock_t dcmd_lock
;
1331 struct list_head dcmd_list
; /* direct cmd list */
1332 wait_queue_head_t dcmd_wq
;
1334 unsigned long *tag_map
;
1335 wait_queue_head_t tag_wq
;
1337 spinlock_t cmd_lock
;
1338 struct ssd_cmd
*cmd
;
1341 ssd_event_call event_call
;
1343 dma_addr_t msg_base_dma
;
1346 void *resp_msg_base
;
1347 void *resp_ptr_base
;
1348 dma_addr_t resp_msg_base_dma
;
1349 dma_addr_t resp_ptr_base_dma
;
1352 struct msix_entry entry
[SSD_MSIX_VEC
];
1353 struct ssd_queue queue
[SSD_MSIX_VEC
];
1355 struct request_queue
*rq
; /* The device request queue */
1356 struct gendisk
*gd
; /* The gendisk structure */
1358 struct mutex internal_log_mutex
;
1359 struct ssd_internal_log internal_log
;
1360 struct workqueue_struct
*workq
;
1361 struct work_struct log_work
; /* get log */
1364 unsigned long state
; /* device state, for example, block device inited */
1366 struct module
*owner
;
1377 struct mutex gd_mutex
;
1378 struct ssd_log_info log_info
; /* volatile */
1380 atomic_t queue_depth
;
1381 struct mutex barrier_mutex
;
1382 struct mutex fw_mutex
;
1384 struct ssd_hw_info_extend hw_info_ext
;
1385 struct ssd_labelv3 labelv3
;
1389 struct mutex bm_mutex
;
1390 struct work_struct bm_work
; /* check bm */
1391 struct timer_list bm_timer
;
1392 struct sfifo log_fifo
;
1394 struct timer_list routine_timer
;
1395 unsigned long routine_tick
;
1396 unsigned long hwmon
;
1398 struct work_struct hwmon_work
; /* check hw */
1399 struct work_struct capmon_work
; /* check battery */
1400 struct work_struct tempmon_work
; /* check temp */
1403 struct ssd_debug_info db_info
;
1408 typedef struct ssd_acc_info
{
1409 uint32_t threshold_l1
;
1410 uint32_t threshold_l2
;
1414 typedef struct ssd_reg_op_info
1418 } ssd_reg_op_info_t
;
1420 typedef struct ssd_spi_op_info
1425 } ssd_spi_op_info_t
;
1427 typedef struct ssd_i2c_op_info
1434 } ssd_i2c_op_info_t
;
1436 typedef struct ssd_smbus_op_info
1442 } ssd_smbus_op_info_t
;
1444 typedef struct ssd_ram_op_info
{
1448 uint8_t __user
*buf
;
1449 } ssd_ram_op_info_t
;
1451 typedef struct ssd_flash_op_info
{
1456 uint8_t __user
*buf
;
1457 } ssd_flash_op_info_t
;
1459 typedef struct ssd_sw_log_info
{
1463 } ssd_sw_log_info_t
;
1465 typedef struct ssd_version_info
1467 uint32_t bridge_ver
; /* bridge fw version */
1468 uint32_t ctrl_ver
; /* controller fw version */
1469 uint32_t bm_ver
; /* battery manager fw version */
1470 uint8_t pcb_ver
; /* main pcb version */
1471 uint8_t upper_pcb_ver
;
1474 } ssd_version_info_t
;
1476 typedef struct pci_addr
1484 typedef struct ssd_drv_param_info
{
1494 } ssd_drv_param_info_t
;
1498 enum ssd_form_factor
1500 SSD_FORM_FACTOR_HHHL
= 0,
1501 SSD_FORM_FACTOR_FHHL
1505 /* ssd power loss protect */
1514 #define SSD_BM_SLAVE_ADDRESS 0x16
1515 #define SSD_BM_CAP 5
1518 #define SSD_BM_SAFETYSTATUS 0x51
1519 #define SSD_BM_OPERATIONSTATUS 0x54
1521 /* ManufacturerAccess */
1522 #define SSD_BM_MANUFACTURERACCESS 0x00
1523 #define SSD_BM_ENTER_CAP_LEARNING 0x0023 /* cap learning */
1525 /* Data flash access */
1526 #define SSD_BM_DATA_FLASH_SUBCLASS_ID 0x77
1527 #define SSD_BM_DATA_FLASH_SUBCLASS_ID_PAGE1 0x78
1528 #define SSD_BM_SYSTEM_DATA_SUBCLASS_ID 56
1529 #define SSD_BM_CONFIGURATION_REGISTERS_ID 64
1531 /* min cap voltage */
1532 #define SSD_BM_CAP_VOLT_MIN 500
1537 SSD_BM_CAP_VINA = 1,
1543 SSD_BMSTATUS_OK
= 0,
1544 SSD_BMSTATUS_CHARGING
, /* not fully charged */
1545 SSD_BMSTATUS_WARNING
1550 SBS_UNIT_TEMPERATURE
,
1555 SBS_UNIT_CAPACITANCE
1583 uint16_t cap_volt
[SSD_BM_CAP
];
1590 struct ssd_bm_manufacturer_data
1592 uint16_t pack_lot_code
;
1593 uint16_t pcb_lot_code
;
1594 uint16_t firmware_ver
;
1595 uint16_t hardware_ver
;
1598 struct ssd_bm_configuration_registers
1611 uint16_t fet_action
;
1616 #define SBS_VALUE_MASK 0xffff
1618 #define bm_var_offset(var) ((size_t) &((struct ssd_bm *)0)->var)
1619 #define bm_var(start, offset) ((void *) start + (offset))
1621 static struct sbs_cmd ssd_bm_sbs
[] = {
1622 {0x08, SBS_SIZE_WORD
, SBS_UNIT_TEMPERATURE
, bm_var_offset(temp
), SBS_VALUE_MASK
, "Temperature"},
1623 {0x09, SBS_SIZE_WORD
, SBS_UNIT_VOLTAGE
, bm_var_offset(volt
), SBS_VALUE_MASK
, "Voltage"},
1624 {0x0a, SBS_SIZE_WORD
, SBS_UNIT_CURRENT
, bm_var_offset(curr
), SBS_VALUE_MASK
, "Current"},
1625 {0x0b, SBS_SIZE_WORD
, SBS_UNIT_ESR
, bm_var_offset(esr
), SBS_VALUE_MASK
, "ESR"},
1626 {0x0d, SBS_SIZE_BYTE
, SBS_UNIT_PERCENT
, bm_var_offset(rsoc
), SBS_VALUE_MASK
, "RelativeStateOfCharge"},
1627 {0x0e, SBS_SIZE_BYTE
, SBS_UNIT_PERCENT
, bm_var_offset(health
), SBS_VALUE_MASK
, "Health"},
1628 {0x10, SBS_SIZE_WORD
, SBS_UNIT_CAPACITANCE
, bm_var_offset(cap
), SBS_VALUE_MASK
, "Capacitance"},
1629 {0x14, SBS_SIZE_WORD
, SBS_UNIT_CURRENT
, bm_var_offset(chg_curr
), SBS_VALUE_MASK
, "ChargingCurrent"},
1630 {0x15, SBS_SIZE_WORD
, SBS_UNIT_VOLTAGE
, bm_var_offset(chg_volt
), SBS_VALUE_MASK
, "ChargingVoltage"},
1631 {0x3b, SBS_SIZE_WORD
, SBS_UNIT_VOLTAGE
, (uint8_t)bm_var_offset(cap_volt
[4]), SBS_VALUE_MASK
, "CapacitorVoltage5"},
1632 {0x3c, SBS_SIZE_WORD
, SBS_UNIT_VOLTAGE
, (uint8_t)bm_var_offset(cap_volt
[3]), SBS_VALUE_MASK
, "CapacitorVoltage4"},
1633 {0x3d, SBS_SIZE_WORD
, SBS_UNIT_VOLTAGE
, (uint8_t)bm_var_offset(cap_volt
[2]), SBS_VALUE_MASK
, "CapacitorVoltage3"},
1634 {0x3e, SBS_SIZE_WORD
, SBS_UNIT_VOLTAGE
, (uint8_t)bm_var_offset(cap_volt
[1]), SBS_VALUE_MASK
, "CapacitorVoltage2"},
1635 {0x3f, SBS_SIZE_WORD
, SBS_UNIT_VOLTAGE
, (uint8_t)bm_var_offset(cap_volt
[0]), SBS_VALUE_MASK
, "CapacitorVoltage1"},
1636 {0x50, SBS_SIZE_WORD
, SBS_UNIT_VALUE
, bm_var_offset(sf_alert
), 0x870F, "SafetyAlert"},
1637 {0x51, SBS_SIZE_WORD
, SBS_UNIT_VALUE
, bm_var_offset(sf_status
), 0xE7BF, "SafetyStatus"},
1638 {0x54, SBS_SIZE_WORD
, SBS_UNIT_VALUE
, bm_var_offset(op_status
), 0x79F4, "OperationStatus"},
1639 {0x5a, SBS_SIZE_WORD
, SBS_UNIT_VOLTAGE
, bm_var_offset(sys_volt
), SBS_VALUE_MASK
, "SystemVoltage"},
1640 {0, 0, 0, 0, 0, NULL
},
1644 #define SSD_CMD_GET_PROTOCOL_INFO _IOR('H', 100, struct ssd_protocol_info)
1645 #define SSD_CMD_GET_HW_INFO _IOR('H', 101, struct ssd_hw_info)
1646 #define SSD_CMD_GET_ROM_INFO _IOR('H', 102, struct ssd_rom_info)
1647 #define SSD_CMD_GET_SMART _IOR('H', 103, struct ssd_smart)
1648 #define SSD_CMD_GET_IDX _IOR('H', 105, int)
1649 #define SSD_CMD_GET_AMOUNT _IOR('H', 106, int)
1650 #define SSD_CMD_GET_TO_INFO _IOR('H', 107, int)
1651 #define SSD_CMD_GET_DRV_VER _IOR('H', 108, char[DRIVER_VERSION_LEN])
1653 #define SSD_CMD_GET_BBACC_INFO _IOR('H', 109, struct ssd_acc_info)
1654 #define SSD_CMD_GET_ECACC_INFO _IOR('H', 110, struct ssd_acc_info)
1656 #define SSD_CMD_GET_HW_INFO_EXT _IOR('H', 111, struct ssd_hw_info_extend)
1658 #define SSD_CMD_REG_READ _IOWR('H', 120, struct ssd_reg_op_info)
1659 #define SSD_CMD_REG_WRITE _IOWR('H', 121, struct ssd_reg_op_info)
1661 #define SSD_CMD_SPI_READ _IOWR('H', 125, struct ssd_spi_op_info)
1662 #define SSD_CMD_SPI_WRITE _IOWR('H', 126, struct ssd_spi_op_info)
1663 #define SSD_CMD_SPI_ERASE _IOWR('H', 127, struct ssd_spi_op_info)
1665 #define SSD_CMD_I2C_READ _IOWR('H', 128, struct ssd_i2c_op_info)
1666 #define SSD_CMD_I2C_WRITE _IOWR('H', 129, struct ssd_i2c_op_info)
1667 #define SSD_CMD_I2C_WRITE_READ _IOWR('H', 130, struct ssd_i2c_op_info)
1669 #define SSD_CMD_SMBUS_SEND_BYTE _IOWR('H', 131, struct ssd_smbus_op_info)
1670 #define SSD_CMD_SMBUS_RECEIVE_BYTE _IOWR('H', 132, struct ssd_smbus_op_info)
1671 #define SSD_CMD_SMBUS_WRITE_BYTE _IOWR('H', 133, struct ssd_smbus_op_info)
1672 #define SSD_CMD_SMBUS_READ_BYTE _IOWR('H', 135, struct ssd_smbus_op_info)
1673 #define SSD_CMD_SMBUS_WRITE_WORD _IOWR('H', 136, struct ssd_smbus_op_info)
1674 #define SSD_CMD_SMBUS_READ_WORD _IOWR('H', 137, struct ssd_smbus_op_info)
1675 #define SSD_CMD_SMBUS_WRITE_BLOCK _IOWR('H', 138, struct ssd_smbus_op_info)
1676 #define SSD_CMD_SMBUS_READ_BLOCK _IOWR('H', 139, struct ssd_smbus_op_info)
1678 #define SSD_CMD_BM_GET_VER _IOR('H', 140, uint16_t)
1679 #define SSD_CMD_BM_GET_NR_CAP _IOR('H', 141, int)
1680 #define SSD_CMD_BM_CAP_LEARNING _IOW('H', 142, int)
1681 #define SSD_CMD_CAP_LEARN _IOR('H', 143, uint32_t)
1682 #define SSD_CMD_GET_CAP_STATUS _IOR('H', 144, int)
1684 #define SSD_CMD_RAM_READ _IOWR('H', 150, struct ssd_ram_op_info)
1685 #define SSD_CMD_RAM_WRITE _IOWR('H', 151, struct ssd_ram_op_info)
1687 #define SSD_CMD_NAND_READ_ID _IOR('H', 160, struct ssd_flash_op_info)
1688 #define SSD_CMD_NAND_READ _IOWR('H', 161, struct ssd_flash_op_info) //with oob
1689 #define SSD_CMD_NAND_WRITE _IOWR('H', 162, struct ssd_flash_op_info)
1690 #define SSD_CMD_NAND_ERASE _IOWR('H', 163, struct ssd_flash_op_info)
1691 #define SSD_CMD_NAND_READ_EXT _IOWR('H', 164, struct ssd_flash_op_info) //ingore EIO
1693 #define SSD_CMD_UPDATE_BBT _IOW('H', 180, struct ssd_flash_op_info)
1695 #define SSD_CMD_CLEAR_ALARM _IOW('H', 190, int)
1696 #define SSD_CMD_SET_ALARM _IOW('H', 191, int)
1698 #define SSD_CMD_RESET _IOW('H', 200, int)
1699 #define SSD_CMD_RELOAD_FW _IOW('H', 201, int)
1700 #define SSD_CMD_UNLOAD_DEV _IOW('H', 202, int)
1701 #define SSD_CMD_LOAD_DEV _IOW('H', 203, int)
1702 #define SSD_CMD_UPDATE_VP _IOWR('H', 205, uint32_t)
1703 #define SSD_CMD_FULL_RESET _IOW('H', 206, int)
1705 #define SSD_CMD_GET_NR_LOG _IOR('H', 220, uint32_t)
1706 #define SSD_CMD_GET_LOG _IOR('H', 221, void *)
1707 #define SSD_CMD_LOG_LEVEL _IOW('H', 222, int)
1709 #define SSD_CMD_OT_PROTECT _IOW('H', 223, int)
1710 #define SSD_CMD_GET_OT_STATUS _IOR('H', 224, int)
1712 #define SSD_CMD_CLEAR_LOG _IOW('H', 230, int)
1713 #define SSD_CMD_CLEAR_SMART _IOW('H', 231, int)
1715 #define SSD_CMD_SW_LOG _IOW('H', 232, struct ssd_sw_log_info)
1717 #define SSD_CMD_GET_LABEL _IOR('H', 235, struct ssd_label)
1718 #define SSD_CMD_GET_VERSION _IOR('H', 236, struct ssd_version_info)
1719 #define SSD_CMD_GET_TEMPERATURE _IOR('H', 237, int)
1720 #define SSD_CMD_GET_BMSTATUS _IOR('H', 238, int)
1721 #define SSD_CMD_GET_LABEL2 _IOR('H', 239, void *)
1724 #define SSD_CMD_FLUSH _IOW('H', 240, int)
1725 #define SSD_CMD_SAVE_MD _IOW('H', 241, int)
1727 #define SSD_CMD_SET_WMODE _IOW('H', 242, int)
1728 #define SSD_CMD_GET_WMODE _IOR('H', 243, int)
1729 #define SSD_CMD_GET_USER_WMODE _IOR('H', 244, int)
1731 #define SSD_CMD_DEBUG _IOW('H', 250, struct ssd_debug_info)
1732 #define SSD_CMD_DRV_PARAM_INFO _IOR('H', 251, struct ssd_drv_param_info)
1736 #define SSD_LOG_MAX_SZ 4096
1737 #define SSD_LOG_LEVEL SSD_LOG_LEVEL_NOTICE
1741 SSD_LOG_DATA_NONE
= 0,
1746 typedef struct ssd_log_entry
1764 }__attribute__((packed
))ssd_log_entry_t
;
1766 typedef struct ssd_log
1769 uint64_t ctrl_idx
:8;
1771 } __attribute__((packed
)) ssd_log_t
;
1773 typedef struct ssd_log_desc
1781 } __attribute__((packed
)) ssd_log_desc_t
;
1783 #define SSD_LOG_SW_IDX 0xF
1784 #define SSD_UNKNOWN_EVENT ((uint16_t)-1)
1785 static struct ssd_log_desc ssd_log_desc
[] = {
1786 /* event, level, show flash, show block, show page, desc */
1787 {0x0, SSD_LOG_LEVEL_WARNING
, SSD_LOG_DATA_LOC
, 0, 0, "Create BBT failure"}, //g3
1788 {0x1, SSD_LOG_LEVEL_WARNING
, SSD_LOG_DATA_LOC
, 0, 0, "Read BBT failure"}, //g3
1789 {0x2, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 0, "Mark bad block"},
1790 {0x3, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 0, 0, "Flush BBT failure"},
1791 {0x4, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Program failure"},
1792 {0x7, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_LOC
, 1, 1, "No available blocks"},
1793 {0x8, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 0, "Bad EC header"},
1794 {0x9, SSD_LOG_LEVEL_WARNING
, SSD_LOG_DATA_LOC
, 1, 0, "Bad VID header"}, //g3
1795 {0xa, SSD_LOG_LEVEL_INFO
, SSD_LOG_DATA_LOC
, 1, 0, "Wear leveling"},
1796 {0xb, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "WL read back failure"},
1797 {0x11, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_LOC
, 1, 1, "Data recovery failure"}, // err
1798 {0x20, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_LOC
, 1, 1, "Init: scan mapping table failure"}, // err g3
1799 {0x21, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Program failure"},
1800 {0x22, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Program failure"},
1801 {0x23, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Program failure"},
1802 {0x24, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 0, "Merge: read mapping page failure"},
1803 {0x25, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Merge: read back failure"},
1804 {0x26, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Program failure"},
1805 {0x27, SSD_LOG_LEVEL_WARNING
, SSD_LOG_DATA_LOC
, 1, 1, "Data corrupted for abnormal power down"}, //g3
1806 {0x28, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Merge: mapping page corrupted"},
1807 {0x29, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 0, "Init: no mapping page"},
1808 {0x2a, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Init: mapping pages incomplete"},
1809 {0x2b, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_LOC
, 1, 1, "Read back failure after programming failure"}, // err
1810 {0xf1, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_LOC
, 1, 1, "Read failure without recovery"}, // err
1811 {0xf2, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_LOC
, 0, 0, "No available blocks"}, // maybe err g3
1812 {0xf3, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_LOC
, 1, 0, "Init: RAID incomplete"}, // err g3
1813 {0xf4, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Program failure"},
1814 {0xf5, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Read failure in moving data"},
1815 {0xf6, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Program failure"},
1816 {0xf7, SSD_LOG_LEVEL_WARNING
, SSD_LOG_DATA_LOC
, 1, 1, "Init: RAID not complete"},
1817 {0xf8, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 0, "Init: data moving interrupted"},
1818 {0xfe, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 0, 0, "Data inspection failure"},
1819 {0xff, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "IO: ECC failed"},
1822 {0x2e, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_LOC
, 0, 0, "No available reserved blocks" }, // err
1823 {0x30, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 0, 0, "Init: PMT membership not found"},
1824 {0x31, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "Init: PMT corrupted"},
1825 {0x32, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 0, 0, "Init: PBT membership not found"},
1826 {0x33, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 0, 0, "Init: PBT not found"},
1827 {0x34, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 0, 0, "Init: PBT corrupted"},
1828 {0x35, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Init: PMT page read failure"},
1829 {0x36, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Init: PBT page read failure"},
1830 {0x37, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Init: PBT backup page read failure"},
1831 {0x38, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Init: PBMT read failure"},
1832 {0x39, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_LOC
, 1, 1, "Init: PBMT scan failure"}, // err
1833 {0x3a, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Init: first page read failure"},
1834 {0x3b, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_LOC
, 1, 1, "Init: first page scan failure"}, // err
1835 {0x3c, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_LOC
, 1, 1, "Init: scan unclosed block failure"}, // err
1836 {0x3d, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Init: write pointer mismatch"},
1837 {0x3e, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Init: PMT recovery: PBMT read failure"},
1838 {0x3f, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 0, "Init: PMT recovery: PBMT scan failure"},
1839 {0x40, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_LOC
, 1, 1, "Init: PMT recovery: data page read failure"}, //err
1840 {0x41, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Init: PBT write pointer mismatch"},
1841 {0x42, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Init: PBT latest version corrupted"},
1842 {0x43, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_LOC
, 1, 0, "Init: too many unclosed blocks"},
1843 {0x44, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "Init: PDW block found"},
1844 {0x45, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_HEX
, 0, 0, "Init: more than one PDW block found"}, //err
1845 {0x46, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Init: first page is blank or read failure"},
1846 {0x47, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 0, 0, "Init: PDW block not found"},
1848 {0x50, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_LOC
, 1, 0, "Cache: hit error data"}, // err
1849 {0x51, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_LOC
, 1, 0, "Cache: read back failure"}, // err
1850 {0x52, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Cache: unknown command"}, //?
1851 {0x53, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_LOC
, 1, 1, "GC/WL read back failure"}, // err
1853 {0x60, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 0, "Erase failure"},
1855 {0x70, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "LPA not matched"},
1856 {0x71, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "PBN not matched"},
1857 {0x72, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Read retry failure"},
1858 {0x73, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Need raid recovery"},
1859 {0x74, SSD_LOG_LEVEL_INFO
, SSD_LOG_DATA_LOC
, 1, 1, "Need read retry"},
1860 {0x75, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Read invalid data page"},
1861 {0x76, SSD_LOG_LEVEL_INFO
, SSD_LOG_DATA_LOC
, 1, 1, "ECC error, data in cache, PBN matched"},
1862 {0x77, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "ECC error, data in cache, PBN not matched"},
1863 {0x78, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "ECC error, data in flash, PBN not matched"},
1864 {0x79, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "ECC ok, data in cache, LPA not matched"},
1865 {0x7a, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "ECC ok, data in flash, LPA not matched"},
1866 {0x7b, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "RAID data in cache, LPA not matched"},
1867 {0x7c, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "RAID data in flash, LPA not matched"},
1868 {0x7d, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Read data page status error"},
1869 {0x7e, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Read blank page"},
1870 {0x7f, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Access flash timeout"},
1872 {0x80, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 0, "EC overflow"},
1873 {0x81, SSD_LOG_LEVEL_INFO
, SSD_LOG_DATA_NONE
, 0, 0, "Scrubbing completed"},
1874 {0x82, SSD_LOG_LEVEL_INFO
, SSD_LOG_DATA_LOC
, 1, 0, "Unstable block(too much bit flip)"},
1875 {0x83, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 0, "GC: ram error"}, //?
1876 {0x84, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 0, "GC: one PBMT read failure"},
1878 {0x88, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 0, "GC: mark bad block"},
1879 {0x89, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 0, "GC: invalid page count error"}, // maybe err
1880 {0x8a, SSD_LOG_LEVEL_WARNING
, SSD_LOG_DATA_NONE
, 0, 0, "Warning: Bad Block close to limit"},
1881 {0x8b, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_NONE
, 0, 0, "Error: Bad Block over limit"},
1882 {0x8c, SSD_LOG_LEVEL_WARNING
, SSD_LOG_DATA_NONE
, 0, 0, "Warning: P/E cycles close to limit"},
1883 {0x8d, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_NONE
, 0, 0, "Error: P/E cycles over limit"},
1885 {0x90, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Over temperature"}, //xx
1886 {0x91, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Temperature is OK"}, //xx
1887 {0x92, SSD_LOG_LEVEL_WARNING
, SSD_LOG_DATA_NONE
, 0, 0, "Battery fault"},
1888 {0x93, SSD_LOG_LEVEL_WARNING
, SSD_LOG_DATA_NONE
, 0, 0, "SEU fault"}, //err
1889 {0x94, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_NONE
, 0, 0, "DDR error"}, //err
1890 {0x95, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_NONE
, 0, 0, "Controller serdes error"}, //err
1891 {0x96, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_NONE
, 0, 0, "Bridge serdes 1 error"}, //err
1892 {0x97, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_NONE
, 0, 0, "Bridge serdes 2 error"}, //err
1893 {0x98, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "SEU fault (corrected)"}, //err
1894 {0x99, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Battery is OK"},
1895 {0x9a, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Temperature close to limit"}, //xx
1897 {0x9b, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "SEU fault address (low)"},
1898 {0x9c, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "SEU fault address (high)"},
1899 {0x9d, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "I2C fault" },
1900 {0x9e, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "DDR single bit error" },
1901 {0x9f, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Board voltage fault" },
1903 {0xa0, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "LPA not matched"},
1904 {0xa1, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Re-read data in cache"},
1905 {0xa2, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Read blank page"},
1906 {0xa3, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "RAID recovery: Read blank page"},
1907 {0xa4, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "RAID recovery: new data in cache"},
1908 {0xa5, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "RAID recovery: PBN not matched"},
1909 {0xa6, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Read data with error flag"},
1910 {0xa7, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "RAID recovery: recoverd data with error flag"},
1911 {0xa8, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Blank page in cache, PBN matched"},
1912 {0xa9, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "RAID recovery: Blank page in cache, PBN matched"},
1913 {0xaa, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 0, 0, "Flash init failure"},
1914 {0xab, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "Mapping table recovery failure"},
1915 {0xac, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_LOC
, 1, 1, "RAID recovery: ECC failed"},
1916 {0xb0, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Temperature is up to degree 95"},
1917 {0xb1, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Temperature is up to degree 100"},
1919 {0x300, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_HEX
, 0, 0, "CMD timeout"},
1920 {0x301, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "Power on"},
1921 {0x302, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Power off"},
1922 {0x303, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Clear log"},
1923 {0x304, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "Set capacity"},
1924 {0x305, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Clear data"},
1925 {0x306, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "BM safety status"},
1926 {0x307, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_HEX
, 0, 0, "I/O error"},
1927 {0x308, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "CMD error"},
1928 {0x309, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "Set wmode"},
1929 {0x30a, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_HEX
, 0, 0, "DDR init failed" },
1930 {0x30b, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "PCIe link status" },
1931 {0x30c, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_HEX
, 0, 0, "Controller reset sync error" },
1932 {0x30d, SSD_LOG_LEVEL_ERR
, SSD_LOG_DATA_HEX
, 0, 0, "Clock fault" },
1933 {0x30e, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "FPGA voltage fault status" },
1934 {0x30f, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "Set capacity finished"},
1935 {0x310, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Clear data finished"},
1936 {0x311, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "Reset"},
1937 {0x312, SSD_LOG_LEVEL_WARNING
,SSD_LOG_DATA_HEX
, 0, 0, "CAP: voltage fault"},
1938 {0x313, SSD_LOG_LEVEL_WARNING
,SSD_LOG_DATA_NONE
, 0, 0, "CAP: learn fault"},
1939 {0x314, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "CAP status"},
1940 {0x315, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "Board voltage fault status"},
1941 {0x316, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Inlet over temperature"},
1942 {0x317, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Inlet temperature is OK"},
1943 {0x318, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Flash over temperature"},
1944 {0x319, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Flash temperature is OK"},
1945 {0x31a, SSD_LOG_LEVEL_WARNING
,SSD_LOG_DATA_NONE
, 0, 0, "CAP: short circuit"},
1946 {0x31b, SSD_LOG_LEVEL_WARNING
,SSD_LOG_DATA_HEX
, 0, 0, "Sensor fault"},
1947 {0x31c, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Erase all data"},
1948 {0x31d, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_NONE
, 0, 0, "Erase all data finished"},
1950 {SSD_UNKNOWN_EVENT
, SSD_LOG_LEVEL_NOTICE
, SSD_LOG_DATA_HEX
, 0, 0, "unknown event"},
1953 #define SSD_LOG_OVER_TEMP 0x90
1954 #define SSD_LOG_NORMAL_TEMP 0x91
1955 #define SSD_LOG_WARN_TEMP 0x9a
1956 #define SSD_LOG_SEU_FAULT 0x93
1957 #define SSD_LOG_SEU_FAULT1 0x98
1958 #define SSD_LOG_BATTERY_FAULT 0x92
1959 #define SSD_LOG_BATTERY_OK 0x99
1960 #define SSD_LOG_BOARD_VOLT_FAULT 0x9f
1963 #define SSD_LOG_TIMEOUT 0x300
1964 #define SSD_LOG_POWER_ON 0x301
1965 #define SSD_LOG_POWER_OFF 0x302
1966 #define SSD_LOG_CLEAR_LOG 0x303
1967 #define SSD_LOG_SET_CAPACITY 0x304
1968 #define SSD_LOG_CLEAR_DATA 0x305
1969 #define SSD_LOG_BM_SFSTATUS 0x306
1970 #define SSD_LOG_EIO 0x307
1971 #define SSD_LOG_ECMD 0x308
1972 #define SSD_LOG_SET_WMODE 0x309
1973 #define SSD_LOG_DDR_INIT_ERR 0x30a
1974 #define SSD_LOG_PCIE_LINK_STATUS 0x30b
1975 #define SSD_LOG_CTRL_RST_SYNC 0x30c
1976 #define SSD_LOG_CLK_FAULT 0x30d
1977 #define SSD_LOG_VOLT_FAULT 0x30e
1978 #define SSD_LOG_SET_CAPACITY_END 0x30F
1979 #define SSD_LOG_CLEAR_DATA_END 0x310
1980 #define SSD_LOG_RESET 0x311
1981 #define SSD_LOG_CAP_VOLT_FAULT 0x312
1982 #define SSD_LOG_CAP_LEARN_FAULT 0x313
1983 #define SSD_LOG_CAP_STATUS 0x314
1984 #define SSD_LOG_VOLT_STATUS 0x315
1985 #define SSD_LOG_INLET_OVER_TEMP 0x316
1986 #define SSD_LOG_INLET_NORMAL_TEMP 0x317
1987 #define SSD_LOG_FLASH_OVER_TEMP 0x318
1988 #define SSD_LOG_FLASH_NORMAL_TEMP 0x319
1989 #define SSD_LOG_CAP_SHORT_CIRCUIT 0x31a
1990 #define SSD_LOG_SENSOR_FAULT 0x31b
1991 #define SSD_LOG_ERASE_ALL 0x31c
1992 #define SSD_LOG_ERASE_ALL_END 0x31d
1995 /* sw log fifo depth */
1996 #define SSD_LOG_FIFO_SZ 1024
2000 static DEFINE_PER_CPU(struct list_head
, ssd_doneq
);
2001 static DEFINE_PER_CPU(struct tasklet_struct
, ssd_tasklet
);
2004 /* unloading driver */
2005 static volatile int ssd_exiting
= 0;
2007 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,12))
2008 static struct class_simple
*ssd_class
;
2010 static struct class *ssd_class
;
2013 static int ssd_cmajor
= SSD_CMAJOR
;
2015 /* ssd block device major, minors */
2016 static int ssd_major
= SSD_MAJOR
;
2017 static int ssd_major_sl
= SSD_MAJOR_SL
;
2018 static int ssd_minors
= SSD_MINORS
;
2020 /* ssd device list */
2021 static struct list_head ssd_list
;
2022 static unsigned long ssd_index_bits
[SSD_MAX_DEV
/ BITS_PER_LONG
+ 1];
2023 static unsigned long ssd_index_bits_sl
[SSD_MAX_DEV
/ BITS_PER_LONG
+ 1];
2024 static atomic_t ssd_nr
;
2029 SSD_DRV_MODE_STANDARD
= 0, /* full */
2030 SSD_DRV_MODE_DEBUG
= 2, /* debug */
2031 SSD_DRV_MODE_BASE
/* base only */
2041 #if (defined SSD_MSIX)
2042 #define SSD_INT_MODE_DEFAULT SSD_INT_MSIX
2043 #elif (defined SSD_MSI)
2044 #define SSD_INT_MODE_DEFAULT SSD_INT_MSI
2046 /* auto select the defaut int mode according to the kernel version*/
2047 /* suse 11 sp1 irqbalance bug: use msi instead*/
2048 #if ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) || (defined RHEL_MAJOR && RHEL_MAJOR >= 6) || (defined RHEL_MAJOR && RHEL_MAJOR == 5 && RHEL_MINOR >= 5))
2049 #define SSD_INT_MODE_DEFAULT SSD_INT_MSIX
2051 #define SSD_INT_MODE_DEFAULT SSD_INT_MSI
2055 static int mode
= SSD_DRV_MODE_STANDARD
;
2056 static int status_mask
= 0xFF;
2057 static int int_mode
= SSD_INT_MODE_DEFAULT
;
2058 static int threaded_irq
= 0;
2059 static int log_level
= SSD_LOG_LEVEL_WARNING
;
2060 static int ot_protect
= 1;
2061 static int wmode
= SSD_WMODE_DEFAULT
;
2062 static int finject
= 0;
2064 module_param(mode
, int, 0);
2065 module_param(status_mask
, int, 0);
2066 module_param(int_mode
, int, 0);
2067 module_param(threaded_irq
, int, 0);
2068 module_param(log_level
, int, 0);
2069 module_param(ot_protect
, int, 0);
2070 module_param(wmode
, int, 0);
2071 module_param(finject
, int, 0);
2074 MODULE_PARM_DESC(mode
, "driver mode, 0 - standard, 1 - debug, 2 - debug without IO, 3 - basic debug mode");
2075 MODULE_PARM_DESC(status_mask
, "command status mask, 0 - without command error, 0xff - with command error");
2076 MODULE_PARM_DESC(int_mode
, "preferred interrupt mode, 0 - legacy, 1 - msi, 2 - msix");
2077 MODULE_PARM_DESC(threaded_irq
, "threaded irq, 0 - normal irq, 1 - threaded irq");
2078 MODULE_PARM_DESC(log_level
, "log level to display, 0 - info and above, 1 - notice and above, 2 - warning and above, 3 - error only");
2079 MODULE_PARM_DESC(ot_protect
, "over temperature protect, 0 - disable, 1 - enable");
2080 MODULE_PARM_DESC(wmode
, "write mode, 0 - write buffer (with risk for the 6xx firmware), 1 - write buffer ex, 2 - write through, 3 - auto, 4 - default");
2081 MODULE_PARM_DESC(finject
, "enable fault simulation, 0 - off, 1 - on, for debug purpose only");
2085 static int __init
ssd_drv_mode(char *str
)
2087 mode
= (int)simple_strtoul(str
, NULL
, 0);
2092 static int __init
ssd_status_mask(char *str
)
2094 status_mask
= (int)simple_strtoul(str
, NULL
, 16);
2099 static int __init
ssd_int_mode(char *str
)
2101 int_mode
= (int)simple_strtoul(str
, NULL
, 0);
2106 static int __init
ssd_threaded_irq(char *str
)
2108 threaded_irq
= (int)simple_strtoul(str
, NULL
, 0);
2113 static int __init
ssd_log_level(char *str
)
2115 log_level
= (int)simple_strtoul(str
, NULL
, 0);
2120 static int __init
ssd_ot_protect(char *str
)
2122 ot_protect
= (int)simple_strtoul(str
, NULL
, 0);
2127 static int __init
ssd_wmode(char *str
)
2129 wmode
= (int)simple_strtoul(str
, NULL
, 0);
2134 static int __init
ssd_finject(char *str
)
2136 finject
= (int)simple_strtoul(str
, NULL
, 0);
2141 __setup(MODULE_NAME
"_mode=", ssd_drv_mode
);
2142 __setup(MODULE_NAME
"_status_mask=", ssd_status_mask
);
2143 __setup(MODULE_NAME
"_int_mode=", ssd_int_mode
);
2144 __setup(MODULE_NAME
"_threaded_irq=", ssd_threaded_irq
);
2145 __setup(MODULE_NAME
"_log_level=", ssd_log_level
);
2146 __setup(MODULE_NAME
"_ot_protect=", ssd_ot_protect
);
2147 __setup(MODULE_NAME
"_wmode=", ssd_wmode
);
2148 __setup(MODULE_NAME
"_finject=", ssd_finject
);
2152 #ifdef CONFIG_PROC_FS
2153 #include <linux/proc_fs.h>
2154 #include <asm/uaccess.h>
2156 #define SSD_PROC_DIR MODULE_NAME
2157 #define SSD_PROC_INFO "info"
2159 static struct proc_dir_entry
*ssd_proc_dir
= NULL
;
2160 static struct proc_dir_entry
*ssd_proc_info
= NULL
;
2162 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,2,0))
2163 static int ssd_proc_read(char *page
, char **start
,
2164 off_t off
, int count
, int *eof
, void *data
)
2166 struct ssd_device
*dev
= NULL
;
2167 struct ssd_device
*n
= NULL
;
2177 len
+= snprintf((page
+ len
), (count
- len
), "Driver Version:\t%s\n", DRIVER_VERSION
);
2179 list_for_each_entry_safe(dev
, n
, &ssd_list
, list
) {
2181 size
= dev
->hw_info
.size
;
2182 do_div(size
, 1000000000);
2184 len
+= snprintf((page
+ len
), (count
- len
), "\n");
2186 len
+= snprintf((page
+ len
), (count
- len
), "HIO %d Size:\t%uGB\n", idx
, (uint32_t)size
);
2188 len
+= snprintf((page
+ len
), (count
- len
), "HIO %d Bridge FW VER:\t%03X\n", idx
, dev
->hw_info
.bridge_ver
);
2189 if (dev
->hw_info
.ctrl_ver
!= 0) {
2190 len
+= snprintf((page
+ len
), (count
- len
), "HIO %d Controller FW VER:\t%03X\n", idx
, dev
->hw_info
.ctrl_ver
);
2193 len
+= snprintf((page
+ len
), (count
- len
), "HIO %d PCB VER:\t.%c\n", idx
, dev
->hw_info
.pcb_ver
);
2195 if (dev
->hw_info
.upper_pcb_ver
>= 'A') {
2196 len
+= snprintf((page
+ len
), (count
- len
), "HIO %d Upper PCB VER:\t.%c\n", idx
, dev
->hw_info
.upper_pcb_ver
);
2199 len
+= snprintf((page
+ len
), (count
- len
), "HIO %d Device:\t%s\n", idx
, dev
->name
);
2207 static int ssd_proc_show(struct seq_file
*m
, void *v
)
2209 struct ssd_device
*dev
= NULL
;
2210 struct ssd_device
*n
= NULL
;
2218 seq_printf(m
, "Driver Version:\t%s\n", DRIVER_VERSION
);
2220 list_for_each_entry_safe(dev
, n
, &ssd_list
, list
) {
2222 size
= dev
->hw_info
.size
;
2223 do_div(size
, 1000000000);
2225 seq_printf(m
, "\n");
2227 seq_printf(m
, "HIO %d Size:\t%uGB\n", idx
, (uint32_t)size
);
2229 seq_printf(m
, "HIO %d Bridge FW VER:\t%03X\n", idx
, dev
->hw_info
.bridge_ver
);
2230 if (dev
->hw_info
.ctrl_ver
!= 0) {
2231 seq_printf(m
, "HIO %d Controller FW VER:\t%03X\n", idx
, dev
->hw_info
.ctrl_ver
);
2234 seq_printf(m
, "HIO %d PCB VER:\t.%c\n", idx
, dev
->hw_info
.pcb_ver
);
2236 if (dev
->hw_info
.upper_pcb_ver
>= 'A') {
2237 seq_printf(m
, "HIO %d Upper PCB VER:\t.%c\n", idx
, dev
->hw_info
.upper_pcb_ver
);
2240 seq_printf(m
, "HIO %d Device:\t%s\n", idx
, dev
->name
);
2246 static int ssd_proc_open(struct inode
*inode
, struct file
*file
)
2248 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(3,9,0))
2249 return single_open(file
, ssd_proc_show
, PDE(inode
)->data
);
2251 return single_open(file
, ssd_proc_show
, PDE_DATA(inode
));
2255 static const struct file_operations ssd_proc_fops
= {
2256 .open
= ssd_proc_open
,
2258 .llseek
= seq_lseek
,
2259 .release
= single_release
,
2264 static void ssd_cleanup_proc(void)
2266 if (ssd_proc_info
) {
2267 remove_proc_entry(SSD_PROC_INFO
, ssd_proc_dir
);
2268 ssd_proc_info
= NULL
;
2271 remove_proc_entry(SSD_PROC_DIR
, NULL
);
2272 ssd_proc_dir
= NULL
;
2275 static int ssd_init_proc(void)
2277 ssd_proc_dir
= proc_mkdir(SSD_PROC_DIR
, NULL
);
2279 goto out_proc_mkdir
;
2281 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,2,0))
2282 ssd_proc_info
= create_proc_entry(SSD_PROC_INFO
, S_IFREG
| S_IRUGO
| S_IWUSR
, ssd_proc_dir
);
2284 goto out_create_proc_entry
;
2286 ssd_proc_info
->read_proc
= ssd_proc_read
;
2289 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30))
2290 ssd_proc_info
->owner
= THIS_MODULE
;
2293 ssd_proc_info
= proc_create(SSD_PROC_INFO
, 0600, ssd_proc_dir
, &ssd_proc_fops
);
2295 goto out_create_proc_entry
;
2300 out_create_proc_entry
:
2301 remove_proc_entry(SSD_PROC_DIR
, NULL
);
2307 static void ssd_cleanup_proc(void)
2311 static int ssd_init_proc(void)
2315 #endif /* CONFIG_PROC_FS */
2318 static void ssd_unregister_sysfs(struct ssd_device
*dev
)
2323 static int ssd_register_sysfs(struct ssd_device
*dev
)
2328 static void ssd_cleanup_sysfs(void)
2333 static int ssd_init_sysfs(void)
2338 static inline void ssd_put_index(int slave
, int index
)
2340 unsigned long *index_bits
= ssd_index_bits
;
2343 index_bits
= ssd_index_bits_sl
;
2346 if (test_and_clear_bit(index
, index_bits
)) {
2347 atomic_dec(&ssd_nr
);
2351 static inline int ssd_get_index(int slave
)
2353 unsigned long *index_bits
= ssd_index_bits
;
2357 index_bits
= ssd_index_bits_sl
;
2361 if ((index
= find_first_zero_bit(index_bits
, SSD_MAX_DEV
)) >= SSD_MAX_DEV
) {
2365 if (test_and_set_bit(index
, index_bits
)) {
2369 atomic_inc(&ssd_nr
);
2374 static void ssd_cleanup_index(void)
2379 static int ssd_init_index(void)
2381 INIT_LIST_HEAD(&ssd_list
);
2382 atomic_set(&ssd_nr
, 0);
2383 memset(ssd_index_bits
, 0, (SSD_MAX_DEV
/ BITS_PER_LONG
+ 1));
2384 memset(ssd_index_bits_sl
, 0, (SSD_MAX_DEV
/ BITS_PER_LONG
+ 1));
2389 static void ssd_set_dev_name(char *name
, size_t size
, int idx
)
2391 if(idx
< SSD_ALPHABET_NUM
) {
2392 snprintf(name
, size
, "%c", 'a'+idx
);
2394 idx
-= SSD_ALPHABET_NUM
;
2395 snprintf(name
, size
, "%c%c", 'a'+(idx
/SSD_ALPHABET_NUM
), 'a'+(idx
%SSD_ALPHABET_NUM
));
2399 /* pci register r&w */
2400 static inline void ssd_reg_write(void *addr
, uint64_t val
)
2402 iowrite32((uint32_t)val
, addr
);
2403 iowrite32((uint32_t)(val
>> 32), addr
+ 4);
2407 static inline uint64_t ssd_reg_read(void *addr
)
2410 uint32_t val_lo
, val_hi
;
2412 val_lo
= ioread32(addr
);
2413 val_hi
= ioread32(addr
+ 4);
2416 val
= val_lo
| ((uint64_t)val_hi
<< 32);
2422 #define ssd_reg32_write(addr, val) writel(val, addr)
2423 #define ssd_reg32_read(addr) readl(addr)
2426 static void ssd_clear_alarm(struct ssd_device
*dev
)
2430 if (dev
->protocol_info
.ver
<= SSD_PROTOCOL_V3
) {
2434 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_LED_REG
);
2436 /* firmware control */
2439 ssd_reg32_write(dev
->ctrlp
+ SSD_LED_REG
, val
);
2442 static void ssd_set_alarm(struct ssd_device
*dev
)
2446 if (dev
->protocol_info
.ver
<= SSD_PROTOCOL_V3
) {
2450 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_LED_REG
);
2454 /* software control */
2457 ssd_reg32_write(dev
->ctrlp
+ SSD_LED_REG
, val
);
2460 #define u32_swap(x) \
2462 (((uint32_t)(x) & (uint32_t)0x000000ffUL) << 24) | \
2463 (((uint32_t)(x) & (uint32_t)0x0000ff00UL) << 8) | \
2464 (((uint32_t)(x) & (uint32_t)0x00ff0000UL) >> 8) | \
2465 (((uint32_t)(x) & (uint32_t)0xff000000UL) >> 24)))
2467 #define u16_swap(x) \
2469 (((uint16_t)(x) & (uint16_t)0x00ff) << 8) | \
2470 (((uint16_t)(x) & (uint16_t)0xff00) >> 8) ))
2474 /* No lock, for init only*/
2475 static int ssd_spi_read_id(struct ssd_device
*dev
, uint32_t *id
)
2485 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD
, SSD_SPI_CMD_READ_ID
);
2487 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_READY
);
2488 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_READY
);
2489 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_READY
);
2490 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_READY
);
2494 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_READY
);
2495 if (val
== 0x1000000) {
2499 if (time_after(jiffies
, (st
+ SSD_SPI_TIMEOUT
))) {
2506 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_ID
);
2515 static int ssd_init_spi(struct ssd_device
*dev
)
2521 mutex_lock(&dev
->spi_mutex
);
2524 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD
, SSD_SPI_CMD_READ_STATUS
);
2527 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_READY
);
2529 if (time_after(jiffies
, (st
+ SSD_SPI_TIMEOUT
))) {
2534 } while (val
!= 0x1000000);
2536 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_STATUS
);
2541 if (time_after(jiffies
, (st
+ SSD_SPI_TIMEOUT
))) {
2549 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_2
) {
2551 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD
, SSD_SPI_CMD_CLSR
);
2554 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD
, SSD_SPI_CMD_W_DISABLE
);
2555 mutex_unlock(&dev
->spi_mutex
);
2562 static int ssd_spi_page_read(struct ssd_device
*dev
, void *buf
, uint32_t off
, uint32_t size
)
2573 if ((off
% sizeof(uint32_t)) != 0 || (size
% sizeof(uint32_t)) != 0 || size
== 0 ||
2574 ((uint64_t)off
+ (uint64_t)size
) > dev
->rom_info
.size
|| size
> dev
->rom_info
.page_size
) {
2578 mutex_lock(&dev
->spi_mutex
);
2579 while (rlen
< size
) {
2580 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD_HI
, ((off
+ rlen
) >> 24));
2582 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD
, (((off
+ rlen
) << 8) | SSD_SPI_CMD_READ
));
2584 (void)ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_READY
);
2585 (void)ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_READY
);
2586 (void)ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_READY
);
2587 (void)ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_READY
);
2591 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_READY
);
2592 if (val
== 0x1000000) {
2596 if (time_after(jiffies
, (st
+ SSD_SPI_TIMEOUT
))) {
2603 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_RDATA
);
2604 *(uint32_t *)(buf
+ rlen
)= u32_swap(val
);
2606 rlen
+= sizeof(uint32_t);
2610 mutex_unlock(&dev
->spi_mutex
);
2614 static int ssd_spi_page_write(struct ssd_device
*dev
, void *buf
, uint32_t off
, uint32_t size
)
2626 if ((off
% sizeof(uint32_t)) != 0 || (size
% sizeof(uint32_t)) != 0 || size
== 0 ||
2627 ((uint64_t)off
+ (uint64_t)size
) > dev
->rom_info
.size
|| size
> dev
->rom_info
.page_size
||
2628 (off
/ dev
->rom_info
.page_size
) != ((off
+ size
- 1) / dev
->rom_info
.page_size
)) {
2632 mutex_lock(&dev
->spi_mutex
);
2634 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD
, SSD_SPI_CMD_W_ENABLE
);
2636 wlen
= size
/ sizeof(uint32_t);
2637 for (i
=0; i
<(int)wlen
; i
++) {
2638 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_WDATA
, u32_swap(*((uint32_t *)buf
+ i
)));
2642 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD_HI
, (off
>> 24));
2644 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD
, ((off
<< 8) | SSD_SPI_CMD_PROGRAM
));
2650 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD
, SSD_SPI_CMD_READ_STATUS
);
2652 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_READY
);
2654 if (time_after(jiffies
, (st
+ SSD_SPI_TIMEOUT
))) {
2659 } while (val
!= 0x1000000);
2661 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_STATUS
);
2666 if (time_after(jiffies
, (st
+ SSD_SPI_TIMEOUT
))) {
2673 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_2
) {
2674 if ((val
>> 6) & 0x1) {
2681 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_2
) {
2683 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD
, SSD_SPI_CMD_CLSR
);
2686 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD
, SSD_SPI_CMD_W_DISABLE
);
2688 mutex_unlock(&dev
->spi_mutex
);
2693 static int ssd_spi_block_erase(struct ssd_device
*dev
, uint32_t off
)
2703 if ((off
% dev
->rom_info
.block_size
) != 0 || off
>= dev
->rom_info
.size
) {
2707 mutex_lock(&dev
->spi_mutex
);
2709 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD
, SSD_SPI_CMD_W_ENABLE
);
2710 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD
, SSD_SPI_CMD_W_ENABLE
);
2713 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD_HI
, (off
>> 24));
2715 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD
, ((off
<< 8) | SSD_SPI_CMD_ERASE
));
2719 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD
, SSD_SPI_CMD_READ_STATUS
);
2722 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_READY
);
2724 if (time_after(jiffies
, (st
+ SSD_SPI_TIMEOUT
))) {
2729 } while (val
!= 0x1000000);
2731 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_STATUS
);
2736 if (time_after(jiffies
, (st
+ SSD_SPI_TIMEOUT
))) {
2743 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_2
) {
2744 if ((val
>> 5) & 0x1) {
2751 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_2
) {
2753 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD
, SSD_SPI_CMD_CLSR
);
2756 ssd_reg32_write(dev
->ctrlp
+ SSD_SPI_REG_CMD
, SSD_SPI_CMD_W_DISABLE
);
2758 mutex_unlock(&dev
->spi_mutex
);
2763 static int ssd_spi_read(struct ssd_device
*dev
, void *buf
, uint32_t off
, uint32_t size
)
2774 if ((off
% sizeof(uint32_t)) != 0 || (size
% sizeof(uint32_t)) != 0 || size
== 0 ||
2775 ((uint64_t)off
+ (uint64_t)size
) > dev
->rom_info
.size
) {
2779 while (len
< size
) {
2780 roff
= (off
+ len
) % dev
->rom_info
.page_size
;
2781 rsize
= dev
->rom_info
.page_size
- roff
;
2782 if ((size
- len
) < rsize
) {
2783 rsize
= (size
- len
);
2787 ret
= ssd_spi_page_read(dev
, (buf
+ len
), roff
, rsize
);
2801 static int ssd_spi_write(struct ssd_device
*dev
, void *buf
, uint32_t off
, uint32_t size
)
2812 if ((off
% sizeof(uint32_t)) != 0 || (size
% sizeof(uint32_t)) != 0 || size
== 0 ||
2813 ((uint64_t)off
+ (uint64_t)size
) > dev
->rom_info
.size
) {
2817 while (len
< size
) {
2818 woff
= (off
+ len
) % dev
->rom_info
.page_size
;
2819 wsize
= dev
->rom_info
.page_size
- woff
;
2820 if ((size
- len
) < wsize
) {
2821 wsize
= (size
- len
);
2825 ret
= ssd_spi_page_write(dev
, (buf
+ len
), woff
, wsize
);
2839 static int ssd_spi_erase(struct ssd_device
*dev
, uint32_t off
, uint32_t size
)
2849 if (size
== 0 || ((uint64_t)off
+ (uint64_t)size
) > dev
->rom_info
.size
||
2850 (off
% dev
->rom_info
.block_size
) != 0 || (size
% dev
->rom_info
.block_size
) != 0) {
2854 while (len
< size
) {
2857 ret
= ssd_spi_block_erase(dev
, eoff
);
2862 len
+= dev
->rom_info
.block_size
;
2872 static uint32_t __ssd_i2c_reg32_read(void *addr
)
2874 return ssd_reg32_read(addr
);
2877 static void __ssd_i2c_reg32_write(void *addr
, uint32_t val
)
2879 ssd_reg32_write(addr
, val
);
2880 ssd_reg32_read(addr
);
2883 static int __ssd_i2c_clear(struct ssd_device
*dev
, uint8_t saddr
)
2885 ssd_i2c_ctrl_t ctrl
;
2886 ssd_i2c_data_t data
;
2893 ctrl
.bits
.wdata
= 0;
2894 ctrl
.bits
.addr
= SSD_I2C_STATUS_REG
;
2895 ctrl
.bits
.rw
= SSD_I2C_CTRL_READ
;
2896 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
2900 data
.val
= __ssd_i2c_reg32_read(dev
->ctrlp
+ SSD_I2C_RDATA_REG
);
2901 if (data
.bits
.valid
== 0) {
2906 if (time_after(jiffies
, (st
+ SSD_I2C_TIMEOUT
))) {
2912 status
= data
.bits
.rdata
;
2914 if (!(status
& 0x4)) {
2915 /* clear read fifo data */
2916 ctrl
.bits
.wdata
= 0;
2917 ctrl
.bits
.addr
= SSD_I2C_DATA_REG
;
2918 ctrl
.bits
.rw
= SSD_I2C_CTRL_READ
;
2919 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
2923 data
.val
= __ssd_i2c_reg32_read(dev
->ctrlp
+ SSD_I2C_RDATA_REG
);
2924 if (data
.bits
.valid
== 0) {
2929 if (time_after(jiffies
, (st
+ SSD_I2C_TIMEOUT
))) {
2937 if (nr_data
<= SSD_I2C_MAX_DATA
) {
2946 ctrl
.bits
.wdata
= 0x04;
2947 ctrl
.bits
.addr
= SSD_I2C_CMD_REG
;
2948 ctrl
.bits
.rw
= SSD_I2C_CTRL_WRITE
;
2949 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
2952 if (!(status
& 0x8)) {
2954 /* reset i2c controller */
2955 ctrl
.bits
.wdata
= 0x0;
2956 ctrl
.bits
.addr
= SSD_I2C_RESET_REG
;
2957 ctrl
.bits
.rw
= SSD_I2C_CTRL_WRITE
;
2958 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
2965 static int ssd_i2c_write(struct ssd_device
*dev
, uint8_t saddr
, uint8_t size
, uint8_t *buf
)
2967 ssd_i2c_ctrl_t ctrl
;
2968 ssd_i2c_data_t data
;
2974 mutex_lock(&dev
->i2c_mutex
);
2979 ctrl
.bits
.wdata
= saddr
;
2980 ctrl
.bits
.addr
= SSD_I2C_SADDR_REG
;
2981 ctrl
.bits
.rw
= SSD_I2C_CTRL_WRITE
;
2982 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
2985 while (off
< size
) {
2986 ctrl
.bits
.wdata
= buf
[off
];
2987 ctrl
.bits
.addr
= SSD_I2C_DATA_REG
;
2988 ctrl
.bits
.rw
= SSD_I2C_CTRL_WRITE
;
2989 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
2995 ctrl
.bits
.wdata
= 0x01;
2996 ctrl
.bits
.addr
= SSD_I2C_CMD_REG
;
2997 ctrl
.bits
.rw
= SSD_I2C_CTRL_WRITE
;
2998 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
3003 ctrl
.bits
.wdata
= 0;
3004 ctrl
.bits
.addr
= SSD_I2C_STATUS_REG
;
3005 ctrl
.bits
.rw
= SSD_I2C_CTRL_READ
;
3006 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
3009 data
.val
= __ssd_i2c_reg32_read(dev
->ctrlp
+ SSD_I2C_RDATA_REG
);
3010 if (data
.bits
.valid
== 0) {
3015 if (time_after(jiffies
, (st
+ SSD_I2C_TIMEOUT
))) {
3022 status
= data
.bits
.rdata
;
3027 if (time_after(jiffies
, (st
+ SSD_I2C_TIMEOUT
))) {
3034 if (!(status
& 0x1)) {
3040 if (status
& 0x20) {
3046 if (status
& 0x10) {
3053 if (__ssd_i2c_clear(dev
, saddr
)) {
3057 mutex_unlock(&dev
->i2c_mutex
);
3062 static int ssd_i2c_read(struct ssd_device
*dev
, uint8_t saddr
, uint8_t size
, uint8_t *buf
)
3064 ssd_i2c_ctrl_t ctrl
;
3065 ssd_i2c_data_t data
;
3071 mutex_lock(&dev
->i2c_mutex
);
3076 ctrl
.bits
.wdata
= saddr
;
3077 ctrl
.bits
.addr
= SSD_I2C_SADDR_REG
;
3078 ctrl
.bits
.rw
= SSD_I2C_CTRL_WRITE
;
3079 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
3082 ctrl
.bits
.wdata
= size
;
3083 ctrl
.bits
.addr
= SSD_I2C_LEN_REG
;
3084 ctrl
.bits
.rw
= SSD_I2C_CTRL_WRITE
;
3085 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
3088 ctrl
.bits
.wdata
= 0x02;
3089 ctrl
.bits
.addr
= SSD_I2C_CMD_REG
;
3090 ctrl
.bits
.rw
= SSD_I2C_CTRL_WRITE
;
3091 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
3096 ctrl
.bits
.wdata
= 0;
3097 ctrl
.bits
.addr
= SSD_I2C_STATUS_REG
;
3098 ctrl
.bits
.rw
= SSD_I2C_CTRL_READ
;
3099 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
3102 data
.val
= __ssd_i2c_reg32_read(dev
->ctrlp
+ SSD_I2C_RDATA_REG
);
3103 if (data
.bits
.valid
== 0) {
3108 if (time_after(jiffies
, (st
+ SSD_I2C_TIMEOUT
))) {
3115 status
= data
.bits
.rdata
;
3120 if (time_after(jiffies
, (st
+ SSD_I2C_TIMEOUT
))) {
3127 if (!(status
& 0x2)) {
3133 if (status
& 0x20) {
3139 if (status
& 0x10) {
3145 while (off
< size
) {
3146 ctrl
.bits
.wdata
= 0;
3147 ctrl
.bits
.addr
= SSD_I2C_DATA_REG
;
3148 ctrl
.bits
.rw
= SSD_I2C_CTRL_READ
;
3149 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
3153 data
.val
= __ssd_i2c_reg32_read(dev
->ctrlp
+ SSD_I2C_RDATA_REG
);
3154 if (data
.bits
.valid
== 0) {
3159 if (time_after(jiffies
, (st
+ SSD_I2C_TIMEOUT
))) {
3166 buf
[off
] = data
.bits
.rdata
;
3173 if (__ssd_i2c_clear(dev
, saddr
)) {
3177 mutex_unlock(&dev
->i2c_mutex
);
3182 static int ssd_i2c_write_read(struct ssd_device
*dev
, uint8_t saddr
, uint8_t wsize
, uint8_t *wbuf
, uint8_t rsize
, uint8_t *rbuf
)
3184 ssd_i2c_ctrl_t ctrl
;
3185 ssd_i2c_data_t data
;
3191 mutex_lock(&dev
->i2c_mutex
);
3196 ctrl
.bits
.wdata
= saddr
;
3197 ctrl
.bits
.addr
= SSD_I2C_SADDR_REG
;
3198 ctrl
.bits
.rw
= SSD_I2C_CTRL_WRITE
;
3199 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
3203 while (off
< wsize
) {
3204 ctrl
.bits
.wdata
= wbuf
[off
];
3205 ctrl
.bits
.addr
= SSD_I2C_DATA_REG
;
3206 ctrl
.bits
.rw
= SSD_I2C_CTRL_WRITE
;
3207 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
3213 ctrl
.bits
.wdata
= rsize
;
3214 ctrl
.bits
.addr
= SSD_I2C_LEN_REG
;
3215 ctrl
.bits
.rw
= SSD_I2C_CTRL_WRITE
;
3216 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
3219 ctrl
.bits
.wdata
= 0x03;
3220 ctrl
.bits
.addr
= SSD_I2C_CMD_REG
;
3221 ctrl
.bits
.rw
= SSD_I2C_CTRL_WRITE
;
3222 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
3227 ctrl
.bits
.wdata
= 0;
3228 ctrl
.bits
.addr
= SSD_I2C_STATUS_REG
;
3229 ctrl
.bits
.rw
= SSD_I2C_CTRL_READ
;
3230 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
3233 data
.val
= __ssd_i2c_reg32_read(dev
->ctrlp
+ SSD_I2C_RDATA_REG
);
3234 if (data
.bits
.valid
== 0) {
3239 if (time_after(jiffies
, (st
+ SSD_I2C_TIMEOUT
))) {
3246 status
= data
.bits
.rdata
;
3251 if (time_after(jiffies
, (st
+ SSD_I2C_TIMEOUT
))) {
3258 if (!(status
& 0x2)) {
3264 if (status
& 0x20) {
3270 if (status
& 0x10) {
3277 while (off
< rsize
) {
3278 ctrl
.bits
.wdata
= 0;
3279 ctrl
.bits
.addr
= SSD_I2C_DATA_REG
;
3280 ctrl
.bits
.rw
= SSD_I2C_CTRL_READ
;
3281 __ssd_i2c_reg32_write(dev
->ctrlp
+ SSD_I2C_CTRL_REG
, ctrl
.val
);
3285 data
.val
= __ssd_i2c_reg32_read(dev
->ctrlp
+ SSD_I2C_RDATA_REG
);
3286 if (data
.bits
.valid
== 0) {
3291 if (time_after(jiffies
, (st
+ SSD_I2C_TIMEOUT
))) {
3298 rbuf
[off
] = data
.bits
.rdata
;
3305 if (__ssd_i2c_clear(dev
, saddr
)) {
3308 mutex_unlock(&dev
->i2c_mutex
);
3313 static int ssd_smbus_send_byte(struct ssd_device
*dev
, uint8_t saddr
, uint8_t *buf
)
3319 ret
= ssd_i2c_write(dev
, saddr
, 1, buf
);
3320 if (!ret
|| -ETIMEDOUT
== ret
) {
3325 if (i
>= SSD_SMBUS_RETRY_MAX
) {
3328 msleep(SSD_SMBUS_RETRY_INTERVAL
);
3334 static int ssd_smbus_receive_byte(struct ssd_device
*dev
, uint8_t saddr
, uint8_t *buf
)
3340 ret
= ssd_i2c_read(dev
, saddr
, 1, buf
);
3341 if (!ret
|| -ETIMEDOUT
== ret
) {
3346 if (i
>= SSD_SMBUS_RETRY_MAX
) {
3349 msleep(SSD_SMBUS_RETRY_INTERVAL
);
3355 static int ssd_smbus_write_byte(struct ssd_device
*dev
, uint8_t saddr
, uint8_t cmd
, uint8_t *buf
)
3357 uint8_t smb_data
[SSD_SMBUS_DATA_MAX
] = {0};
3362 memcpy((smb_data
+ 1), buf
, 1);
3365 ret
= ssd_i2c_write(dev
, saddr
, 2, smb_data
);
3366 if (!ret
|| -ETIMEDOUT
== ret
) {
3371 if (i
>= SSD_SMBUS_RETRY_MAX
) {
3374 msleep(SSD_SMBUS_RETRY_INTERVAL
);
3380 static int ssd_smbus_read_byte(struct ssd_device
*dev
, uint8_t saddr
, uint8_t cmd
, uint8_t *buf
)
3382 uint8_t smb_data
[SSD_SMBUS_DATA_MAX
] = {0};
3389 ret
= ssd_i2c_write_read(dev
, saddr
, 1, smb_data
, 1, buf
);
3390 if (!ret
|| -ETIMEDOUT
== ret
) {
3395 if (i
>= SSD_SMBUS_RETRY_MAX
) {
3398 msleep(SSD_SMBUS_RETRY_INTERVAL
);
3404 static int ssd_smbus_write_word(struct ssd_device
*dev
, uint8_t saddr
, uint8_t cmd
, uint8_t *buf
)
3406 uint8_t smb_data
[SSD_SMBUS_DATA_MAX
] = {0};
3411 memcpy((smb_data
+ 1), buf
, 2);
3414 ret
= ssd_i2c_write(dev
, saddr
, 3, smb_data
);
3415 if (!ret
|| -ETIMEDOUT
== ret
) {
3420 if (i
>= SSD_SMBUS_RETRY_MAX
) {
3423 msleep(SSD_SMBUS_RETRY_INTERVAL
);
3429 static int ssd_smbus_read_word(struct ssd_device
*dev
, uint8_t saddr
, uint8_t cmd
, uint8_t *buf
)
3431 uint8_t smb_data
[SSD_SMBUS_DATA_MAX
] = {0};
3438 ret
= ssd_i2c_write_read(dev
, saddr
, 1, smb_data
, 2, buf
);
3439 if (!ret
|| -ETIMEDOUT
== ret
) {
3444 if (i
>= SSD_SMBUS_RETRY_MAX
) {
3447 msleep(SSD_SMBUS_RETRY_INTERVAL
);
3453 static int ssd_smbus_write_block(struct ssd_device
*dev
, uint8_t saddr
, uint8_t cmd
, uint8_t size
, uint8_t *buf
)
3455 uint8_t smb_data
[SSD_SMBUS_DATA_MAX
] = {0};
3461 memcpy((smb_data
+ 2), buf
, size
);
3464 ret
= ssd_i2c_write(dev
, saddr
, (2 + size
), smb_data
);
3465 if (!ret
|| -ETIMEDOUT
== ret
) {
3470 if (i
>= SSD_SMBUS_RETRY_MAX
) {
3473 msleep(SSD_SMBUS_RETRY_INTERVAL
);
3479 static int ssd_smbus_read_block(struct ssd_device
*dev
, uint8_t saddr
, uint8_t cmd
, uint8_t size
, uint8_t *buf
)
3481 uint8_t smb_data
[SSD_SMBUS_DATA_MAX
] = {0};
3489 ret
= ssd_i2c_write_read(dev
, saddr
, 1, smb_data
, (SSD_SMBUS_BLOCK_MAX
+ 1), (smb_data
+ 1));
3490 if (!ret
|| -ETIMEDOUT
== ret
) {
3495 if (i
>= SSD_SMBUS_RETRY_MAX
) {
3498 msleep(SSD_SMBUS_RETRY_INTERVAL
);
3504 rsize
= smb_data
[1];
3506 if (rsize
> size
) {
3510 memcpy(buf
, (smb_data
+ 2), rsize
);
3516 static int ssd_gen_swlog(struct ssd_device
*dev
, uint16_t event
, uint32_t data
);
3519 static int ssd_init_lm75(struct ssd_device
*dev
, uint8_t saddr
)
3524 ret
= ssd_smbus_read_byte(dev
, saddr
, SSD_LM75_REG_CONF
, &conf
);
3529 conf
&= (uint8_t)(~1u);
3531 ret
= ssd_smbus_write_byte(dev
, saddr
, SSD_LM75_REG_CONF
, &conf
);
3540 static int ssd_lm75_read(struct ssd_device
*dev
, uint8_t saddr
, uint16_t *data
)
3545 ret
= ssd_smbus_read_word(dev
, saddr
, SSD_LM75_REG_TEMP
, (uint8_t *)&val
);
3550 *data
= u16_swap(val
);
3555 static int ssd_init_lm80(struct ssd_device
*dev
, uint8_t saddr
)
3564 ret
= ssd_smbus_write_byte(dev
, saddr
, SSD_LM80_REG_CONFIG
, &val
);
3571 ret
= ssd_smbus_write_byte(dev
, saddr
, SSD_LM80_REG_RES
, &val
);
3576 /* set volt limit */
3577 for (i
=0; i
<SSD_LM80_IN_NR
; i
++) {
3578 high
= ssd_lm80_limit
[i
].high
;
3579 low
= ssd_lm80_limit
[i
].low
;
3581 if (SSD_LM80_IN_CAP
== i
) {
3585 if (dev
->hw_info
.nr_ctrl
<= 1 && SSD_LM80_IN_1V2
== i
) {
3591 ret
= ssd_smbus_write_byte(dev
, saddr
, SSD_LM80_REG_IN_MAX(i
), &high
);
3597 ret
= ssd_smbus_write_byte(dev
, saddr
, SSD_LM80_REG_IN_MIN(i
), &low
);
3603 /* set interrupt mask: allow volt in interrupt except cap in*/
3605 ret
= ssd_smbus_write_byte(dev
, saddr
, SSD_LM80_REG_MASK1
, &val
);
3610 /* set interrupt mask: disable others */
3612 ret
= ssd_smbus_write_byte(dev
, saddr
, SSD_LM80_REG_MASK2
, &val
);
3619 ret
= ssd_smbus_write_byte(dev
, saddr
, SSD_LM80_REG_CONFIG
, &val
);
3628 static int ssd_lm80_enable_in(struct ssd_device
*dev
, uint8_t saddr
, int idx
)
3633 if (idx
>= SSD_LM80_IN_NR
|| idx
< 0) {
3637 ret
= ssd_smbus_read_byte(dev
, saddr
, SSD_LM80_REG_MASK1
, &val
);
3642 val
&= ~(1UL << (uint32_t)idx
);
3644 ret
= ssd_smbus_write_byte(dev
, saddr
, SSD_LM80_REG_MASK1
, &val
);
3653 static int ssd_lm80_disable_in(struct ssd_device
*dev
, uint8_t saddr
, int idx
)
3658 if (idx
>= SSD_LM80_IN_NR
|| idx
< 0) {
3662 ret
= ssd_smbus_read_byte(dev
, saddr
, SSD_LM80_REG_MASK1
, &val
);
3667 val
|= (1UL << (uint32_t)idx
);
3669 ret
= ssd_smbus_write_byte(dev
, saddr
, SSD_LM80_REG_MASK1
, &val
);
3678 static int ssd_lm80_read_temp(struct ssd_device
*dev
, uint8_t saddr
, uint16_t *data
)
3683 ret
= ssd_smbus_read_word(dev
, saddr
, SSD_LM80_REG_TEMP
, (uint8_t *)&val
);
3688 *data
= u16_swap(val
);
3693 static int ssd_lm80_check_event(struct ssd_device
*dev
, uint8_t saddr
)
3696 uint16_t val
= 0, status
;
3697 uint8_t alarm1
= 0, alarm2
= 0;
3701 /* read interrupt status to clear interrupt */
3702 ret
= ssd_smbus_read_byte(dev
, saddr
, SSD_LM80_REG_ALARM1
, &alarm1
);
3707 ret
= ssd_smbus_read_byte(dev
, saddr
, SSD_LM80_REG_ALARM2
, &alarm2
);
3712 status
= (uint16_t)alarm1
| ((uint16_t)alarm2
<< 8);
3714 /* parse inetrrupt status */
3715 for (i
=0; i
<SSD_LM80_IN_NR
; i
++) {
3716 if (!((status
>> (uint32_t)i
) & 0x1)) {
3717 if (test_and_clear_bit(SSD_HWMON_LM80(i
), &dev
->hwmon
)) {
3718 /* enable INx irq */
3719 ret
= ssd_lm80_enable_in(dev
, saddr
, i
);
3728 /* disable INx irq */
3729 ret
= ssd_lm80_disable_in(dev
, saddr
, i
);
3734 if (test_and_set_bit(SSD_HWMON_LM80(i
), &dev
->hwmon
)) {
3738 ret
= ssd_smbus_read_word(dev
, saddr
, SSD_LM80_REG_IN(i
), (uint8_t *)&val
);
3743 volt
= SSD_LM80_CONVERT_VOLT(u16_swap(val
));
3746 case SSD_LM80_IN_CAP
: {
3748 ssd_gen_swlog(dev
, SSD_LOG_CAP_SHORT_CIRCUIT
, 0);
3750 ssd_gen_swlog(dev
, SSD_LOG_CAP_VOLT_FAULT
, SSD_PL_CAP_VOLT(volt
));
3755 case SSD_LM80_IN_1V2
:
3756 case SSD_LM80_IN_1V2a
:
3757 case SSD_LM80_IN_1V5
:
3758 case SSD_LM80_IN_1V8
: {
3759 ssd_gen_swlog(dev
, SSD_LOG_VOLT_STATUS
, SSD_VOLT_LOG_DATA(i
, 0, volt
));
3762 case SSD_LM80_IN_FPGA_3V3
:
3763 case SSD_LM80_IN_3V3
: {
3764 ssd_gen_swlog(dev
, SSD_LOG_VOLT_STATUS
, SSD_VOLT_LOG_DATA(i
, 0, SSD_LM80_3V3_VOLT(volt
)));
3774 if (!test_and_set_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM80
), &dev
->hwmon
)) {
3775 ssd_gen_swlog(dev
, SSD_LOG_SENSOR_FAULT
, (uint32_t)saddr
);
3778 test_and_clear_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM80
), &dev
->hwmon
);
3783 static int ssd_init_sensor(struct ssd_device
*dev
)
3787 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
3791 ret
= ssd_init_lm75(dev
, SSD_SENSOR_LM75_SADDRESS
);
3793 hio_warn("%s: init lm75 failed\n", dev
->name
);
3794 if (!test_and_set_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM75
), &dev
->hwmon
)) {
3795 ssd_gen_swlog(dev
, SSD_LOG_SENSOR_FAULT
, SSD_SENSOR_LM75_SADDRESS
);
3800 if (dev
->hw_info
.pcb_ver
>= 'B' || dev
->hw_info_ext
.form_factor
== SSD_FORM_FACTOR_HHHL
) {
3801 ret
= ssd_init_lm80(dev
, SSD_SENSOR_LM80_SADDRESS
);
3803 hio_warn("%s: init lm80 failed\n", dev
->name
);
3804 if (!test_and_set_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM80
), &dev
->hwmon
)) {
3805 ssd_gen_swlog(dev
, SSD_LOG_SENSOR_FAULT
, SSD_SENSOR_LM80_SADDRESS
);
3812 /* skip error if not in standard mode */
3813 if (mode
!= SSD_DRV_MODE_STANDARD
) {
3820 static int ssd_mon_boardvolt(struct ssd_device
*dev
)
3822 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
3826 if (dev
->hw_info_ext
.form_factor
== SSD_FORM_FACTOR_FHHL
&& dev
->hw_info
.pcb_ver
< 'B') {
3830 return ssd_lm80_check_event(dev
, SSD_SENSOR_LM80_SADDRESS
);
3834 static int ssd_mon_temp(struct ssd_device
*dev
)
3840 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
3844 if (dev
->hw_info_ext
.form_factor
== SSD_FORM_FACTOR_FHHL
&& dev
->hw_info
.pcb_ver
< 'B') {
3849 ret
= ssd_lm80_read_temp(dev
, SSD_SENSOR_LM80_SADDRESS
, &val
);
3851 if (!test_and_set_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM80
), &dev
->hwmon
)) {
3852 ssd_gen_swlog(dev
, SSD_LOG_SENSOR_FAULT
, SSD_SENSOR_LM80_SADDRESS
);
3856 test_and_clear_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM80
), &dev
->hwmon
);
3858 cur
= SSD_SENSOR_CONVERT_TEMP(val
);
3859 if (cur
>= SSD_INLET_OT_TEMP
) {
3860 if (!test_and_set_bit(SSD_HWMON_TEMP(SSD_TEMP_INLET
), &dev
->hwmon
)) {
3861 ssd_gen_swlog(dev
, SSD_LOG_INLET_OVER_TEMP
, (uint32_t)cur
);
3863 } else if(cur
< SSD_INLET_OT_HYST
) {
3864 if (test_and_clear_bit(SSD_HWMON_TEMP(SSD_TEMP_INLET
), &dev
->hwmon
)) {
3865 ssd_gen_swlog(dev
, SSD_LOG_INLET_NORMAL_TEMP
, (uint32_t)cur
);
3870 ret
= ssd_lm75_read(dev
, SSD_SENSOR_LM75_SADDRESS
, &val
);
3872 if (!test_and_set_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM75
), &dev
->hwmon
)) {
3873 ssd_gen_swlog(dev
, SSD_LOG_SENSOR_FAULT
, SSD_SENSOR_LM75_SADDRESS
);
3877 test_and_clear_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM75
), &dev
->hwmon
);
3879 cur
= SSD_SENSOR_CONVERT_TEMP(val
);
3880 if (cur
>= SSD_FLASH_OT_TEMP
) {
3881 if (!test_and_set_bit(SSD_HWMON_TEMP(SSD_TEMP_FLASH
), &dev
->hwmon
)) {
3882 ssd_gen_swlog(dev
, SSD_LOG_FLASH_OVER_TEMP
, (uint32_t)cur
);
3884 } else if(cur
< SSD_FLASH_OT_HYST
) {
3885 if (test_and_clear_bit(SSD_HWMON_TEMP(SSD_TEMP_FLASH
), &dev
->hwmon
)) {
3886 ssd_gen_swlog(dev
, SSD_LOG_FLASH_NORMAL_TEMP
, (uint32_t)cur
);
3895 static inline void ssd_put_tag(struct ssd_device
*dev
, int tag
)
3897 test_and_clear_bit(tag
, dev
->tag_map
);
3898 wake_up(&dev
->tag_wq
);
3901 static inline int ssd_get_tag(struct ssd_device
*dev
, int wait
)
3906 while ((tag
= find_first_zero_bit(dev
->tag_map
, dev
->hw_info
.cmd_fifo_sz
)) >= atomic_read(&dev
->queue_depth
)) {
3907 DEFINE_WAIT(__wait
);
3913 prepare_to_wait_exclusive(&dev
->tag_wq
, &__wait
, TASK_UNINTERRUPTIBLE
);
3916 finish_wait(&dev
->tag_wq
, &__wait
);
3919 if (test_and_set_bit(tag
, dev
->tag_map
)) {
3926 static void ssd_barrier_put_tag(struct ssd_device
*dev
, int tag
)
3928 test_and_clear_bit(tag
, dev
->tag_map
);
3931 static int ssd_barrier_get_tag(struct ssd_device
*dev
)
3935 if (test_and_set_bit(tag
, dev
->tag_map
)) {
3942 static void ssd_barrier_end(struct ssd_device
*dev
)
3944 atomic_set(&dev
->queue_depth
, dev
->hw_info
.cmd_fifo_sz
);
3945 wake_up_all(&dev
->tag_wq
);
3947 mutex_unlock(&dev
->barrier_mutex
);
3950 static int ssd_barrier_start(struct ssd_device
*dev
)
3954 mutex_lock(&dev
->barrier_mutex
);
3956 atomic_set(&dev
->queue_depth
, 0);
3958 for (i
=0; i
<SSD_CMD_TIMEOUT
; i
++) {
3959 if (find_first_bit(dev
->tag_map
, dev
->hw_info
.cmd_fifo_sz
) >= dev
->hw_info
.cmd_fifo_sz
) {
3963 __set_current_state(TASK_INTERRUPTIBLE
);
3964 schedule_timeout(1);
3967 atomic_set(&dev
->queue_depth
, dev
->hw_info
.cmd_fifo_sz
);
3968 wake_up_all(&dev
->tag_wq
);
3970 mutex_unlock(&dev
->barrier_mutex
);
3975 static int ssd_busy(struct ssd_device
*dev
)
3977 if (find_first_bit(dev
->tag_map
, dev
->hw_info
.cmd_fifo_sz
) >= dev
->hw_info
.cmd_fifo_sz
) {
3984 static int ssd_wait_io(struct ssd_device
*dev
)
3988 for (i
=0; i
<SSD_CMD_TIMEOUT
; i
++) {
3989 if (find_first_bit(dev
->tag_map
, dev
->hw_info
.cmd_fifo_sz
) >= dev
->hw_info
.cmd_fifo_sz
) {
3993 __set_current_state(TASK_INTERRUPTIBLE
);
3994 schedule_timeout(1);
4001 static int ssd_in_barrier(struct ssd_device
*dev
)
4003 return (0 == atomic_read(&dev
->queue_depth
));
4007 static void ssd_cleanup_tag(struct ssd_device
*dev
)
4009 kfree(dev
->tag_map
);
4012 static int ssd_init_tag(struct ssd_device
*dev
)
4014 int nr_ulongs
= ALIGN(dev
->hw_info
.cmd_fifo_sz
, BITS_PER_LONG
) / BITS_PER_LONG
;
4016 mutex_init(&dev
->barrier_mutex
);
4018 atomic_set(&dev
->queue_depth
, dev
->hw_info
.cmd_fifo_sz
);
4020 dev
->tag_map
= kmalloc(nr_ulongs
* sizeof(unsigned long), GFP_ATOMIC
);
4021 if (!dev
->tag_map
) {
4025 memset(dev
->tag_map
, 0, nr_ulongs
* sizeof(unsigned long));
4027 init_waitqueue_head(&dev
->tag_wq
);
4033 static void ssd_end_io_acct(struct ssd_cmd
*cmd
)
4035 struct ssd_device
*dev
= cmd
->dev
;
4036 struct bio
*bio
= cmd
->bio
;
4037 unsigned long dur
= jiffies
- cmd
->start_time
;
4038 int rw
= bio_data_dir(bio
);
4040 #if ((LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,0)) || (defined RHEL_MAJOR && RHEL_MAJOR == 6 && RHEL_MINOR >= 7))
4041 int cpu
= part_stat_lock();
4042 struct hd_struct
*part
= disk_map_sector_rcu(dev
->gd
, bio_start(bio
));
4043 part_round_stats(cpu
, part
);
4044 part_stat_add(cpu
, part
, ticks
[rw
], dur
);
4045 part_dec_in_flight(part
, rw
);
4047 #elif (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,27))
4048 int cpu
= part_stat_lock();
4049 struct hd_struct
*part
= &dev
->gd
->part0
;
4050 part_round_stats(cpu
, part
);
4051 part_stat_add(cpu
, part
, ticks
[rw
], dur
);
4053 part
->in_flight
[rw
] = atomic_dec_return(&dev
->in_flight
[rw
]);
4054 #elif (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,14))
4056 disk_round_stats(dev
->gd
);
4058 disk_stat_add(dev
->gd
, ticks
[rw
], dur
);
4059 dev
->gd
->in_flight
= atomic_dec_return(&dev
->in_flight
[0]);
4062 disk_round_stats(dev
->gd
);
4065 disk_stat_add(dev
->gd
, write_ticks
, dur
);
4067 disk_stat_add(dev
->gd
, read_ticks
, dur
);
4069 dev
->gd
->in_flight
= atomic_dec_return(&dev
->in_flight
[0]);
4073 static void ssd_start_io_acct(struct ssd_cmd
*cmd
)
4075 struct ssd_device
*dev
= cmd
->dev
;
4076 struct bio
*bio
= cmd
->bio
;
4077 int rw
= bio_data_dir(bio
);
4079 #if ((LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,0)) || (defined RHEL_MAJOR && RHEL_MAJOR == 6 && RHEL_MINOR >= 7))
4080 int cpu
= part_stat_lock();
4081 struct hd_struct
*part
= disk_map_sector_rcu(dev
->gd
, bio_start(bio
));
4082 part_round_stats(cpu
, part
);
4083 part_stat_inc(cpu
, part
, ios
[rw
]);
4084 part_stat_add(cpu
, part
, sectors
[rw
], bio_sectors(bio
));
4085 part_inc_in_flight(part
, rw
);
4087 #elif (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,27))
4088 int cpu
= part_stat_lock();
4089 struct hd_struct
*part
= &dev
->gd
->part0
;
4090 part_round_stats(cpu
, part
);
4091 part_stat_inc(cpu
, part
, ios
[rw
]);
4092 part_stat_add(cpu
, part
, sectors
[rw
], bio_sectors(bio
));
4094 part
->in_flight
[rw
] = atomic_inc_return(&dev
->in_flight
[rw
]);
4095 #elif (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,14))
4097 disk_round_stats(dev
->gd
);
4099 disk_stat_inc(dev
->gd
, ios
[rw
]);
4100 disk_stat_add(dev
->gd
, sectors
[rw
], bio_sectors(bio
));
4101 dev
->gd
->in_flight
= atomic_inc_return(&dev
->in_flight
[0]);
4104 disk_round_stats(dev
->gd
);
4107 disk_stat_inc(dev
->gd
, writes
);
4108 disk_stat_add(dev
->gd
, write_sectors
, bio_sectors(bio
));
4110 disk_stat_inc(dev
->gd
, reads
);
4111 disk_stat_add(dev
->gd
, read_sectors
, bio_sectors(bio
));
4113 dev
->gd
->in_flight
= atomic_inc_return(&dev
->in_flight
[0]);
4116 cmd
->start_time
= jiffies
;
4120 static void ssd_queue_bio(struct ssd_device
*dev
, struct bio
*bio
)
4122 spin_lock(&dev
->sendq_lock
);
4123 ssd_blist_add(&dev
->sendq
, bio
);
4124 spin_unlock(&dev
->sendq_lock
);
4126 atomic_inc(&dev
->in_sendq
);
4127 wake_up(&dev
->send_waitq
);
4130 static inline void ssd_end_request(struct ssd_cmd
*cmd
)
4132 struct ssd_device
*dev
= cmd
->dev
;
4133 struct bio
*bio
= cmd
->bio
;
4134 int errors
= cmd
->errors
;
4138 #if (defined SSD_TRIM && (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36)))
4139 if (!(bio
->bi_rw
& REQ_DISCARD
)) {
4140 ssd_end_io_acct(cmd
);
4142 pci_unmap_sg(dev
->pdev
, cmd
->sgl
, cmd
->nsegs
,
4143 bio_data_dir(bio
) == READ
? PCI_DMA_FROMDEVICE
: PCI_DMA_TODEVICE
);
4146 #elif (defined SSD_TRIM && (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32)))
4147 if (!bio_rw_flagged(bio
, BIO_RW_DISCARD
)) {
4148 ssd_end_io_acct(cmd
);
4150 pci_unmap_sg(dev
->pdev
, cmd
->sgl
, cmd
->nsegs
,
4151 bio_data_dir(bio
) == READ
? PCI_DMA_FROMDEVICE
: PCI_DMA_TODEVICE
);
4155 ssd_end_io_acct(cmd
);
4158 pci_unmap_sg(dev
->pdev
, cmd
->sgl
, cmd
->nsegs
,
4159 bio_data_dir(bio
) == READ
? PCI_DMA_FROMDEVICE
: PCI_DMA_TODEVICE
);
4164 ssd_put_tag(dev
, tag
);
4166 if (SSD_INT_MSIX
== dev
->int_mode
|| tag
< 16 || errors
) {
4167 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24))
4168 bio_endio(bio
, errors
);
4170 bio_endio(bio
, bio
->bi_size
, errors
);
4172 } else /* if (bio->bi_idx >= bio->bi_vcnt)*/ {
4173 spin_lock(&dev
->doneq_lock
);
4174 ssd_blist_add(&dev
->doneq
, bio
);
4175 spin_unlock(&dev
->doneq_lock
);
4177 atomic_inc(&dev
->in_doneq
);
4178 wake_up(&dev
->done_waitq
);
4182 complete(cmd
->waiting
);
4187 static void ssd_end_timeout_request(struct ssd_cmd
*cmd
)
4189 struct ssd_device
*dev
= cmd
->dev
;
4190 struct ssd_rw_msg
*msg
= (struct ssd_rw_msg
*)cmd
->msg
;
4193 for (i
=0; i
<dev
->nr_queue
; i
++) {
4194 disable_irq(dev
->entry
[i
].vector
);
4197 atomic_inc(&dev
->tocnt
);
4199 hio_err("%s: cmd timeout: tag %d fun %#x\n", dev
->name
, msg
->tag
, msg
->fun
);
4200 cmd
->errors
= -ETIMEDOUT
;
4201 ssd_end_request(cmd
);
4204 for (i
=0; i
<dev
->nr_queue
; i
++) {
4205 enable_irq(dev
->entry
[i
].vector
);
4213 static void ssd_cmd_add_timer(struct ssd_cmd
*cmd
, int timeout
, void (*complt
)(struct ssd_cmd
*))
4215 init_timer(&cmd
->cmd_timer
);
4217 cmd
->cmd_timer
.data
= (unsigned long)cmd
;
4218 cmd
->cmd_timer
.expires
= jiffies
+ timeout
;
4219 cmd
->cmd_timer
.function
= (void (*)(unsigned long)) complt
;
4221 add_timer(&cmd
->cmd_timer
);
4224 static int ssd_cmd_del_timer(struct ssd_cmd
*cmd
)
4226 return del_timer(&cmd
->cmd_timer
);
4229 static void ssd_add_timer(struct timer_list
*timer
, int timeout
, void (*complt
)(void *), void *data
)
4233 timer
->data
= (unsigned long)data
;
4234 timer
->expires
= jiffies
+ timeout
;
4235 timer
->function
= (void (*)(unsigned long)) complt
;
4240 static int ssd_del_timer(struct timer_list
*timer
)
4242 return del_timer(timer
);
4245 static void ssd_cmd_timeout(struct ssd_cmd
*cmd
)
4247 struct ssd_device
*dev
= cmd
->dev
;
4248 uint32_t msg
= *(uint32_t *)cmd
->msg
;
4250 ssd_end_timeout_request(cmd
);
4252 ssd_gen_swlog(dev
, SSD_LOG_TIMEOUT
, msg
);
4256 static void __ssd_done(unsigned long data
)
4258 struct ssd_cmd
*cmd
;
4261 local_irq_disable();
4262 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,13,0))
4263 list_splice_init(&__get_cpu_var(ssd_doneq
), &localq
);
4265 list_splice_init(this_cpu_ptr(&ssd_doneq
), &localq
);
4269 while (!list_empty(&localq
)) {
4270 cmd
= list_entry(localq
.next
, struct ssd_cmd
, list
);
4271 list_del_init(&cmd
->list
);
4273 ssd_end_request(cmd
);
4277 static void __ssd_done_db(unsigned long data
)
4279 struct ssd_cmd
*cmd
;
4280 struct ssd_device
*dev
;
4284 local_irq_disable();
4285 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,13,0))
4286 list_splice_init(&__get_cpu_var(ssd_doneq
), &localq
);
4288 list_splice_init(this_cpu_ptr(&ssd_doneq
), &localq
);
4292 while (!list_empty(&localq
)) {
4293 cmd
= list_entry(localq
.next
, struct ssd_cmd
, list
);
4294 list_del_init(&cmd
->list
);
4296 dev
= (struct ssd_device
*)cmd
->dev
;
4300 sector_t off
= dev
->db_info
.data
.loc
.off
;
4301 uint32_t len
= dev
->db_info
.data
.loc
.len
;
4303 switch (dev
->db_info
.type
) {
4304 case SSD_DEBUG_READ_ERR
:
4305 if (bio_data_dir(bio
) == READ
&&
4306 !((off
+ len
) <= bio_start(bio
) || off
>= (bio_start(bio
) + bio_sectors(bio
)))) {
4310 case SSD_DEBUG_WRITE_ERR
:
4311 if (bio_data_dir(bio
) == WRITE
&&
4312 !((off
+ len
) <= bio_start(bio
) || off
>= (bio_start(bio
) + bio_sectors(bio
)))) {
4313 cmd
->errors
= -EROFS
;
4316 case SSD_DEBUG_RW_ERR
:
4317 if (!((off
+ len
) <= bio_start(bio
) || off
>= (bio_start(bio
) + bio_sectors(bio
)))) {
4318 if (bio_data_dir(bio
) == READ
) {
4321 cmd
->errors
= -EROFS
;
4330 ssd_end_request(cmd
);
4334 static inline void ssd_done_bh(struct ssd_cmd
*cmd
)
4336 unsigned long flags
= 0;
4338 if (unlikely(!ssd_cmd_del_timer(cmd
))) {
4339 struct ssd_device
*dev
= cmd
->dev
;
4340 struct ssd_rw_msg
*msg
= (struct ssd_rw_msg
*)cmd
->msg
;
4341 hio_err("%s: unknown cmd: tag %d fun %#x\n", dev
->name
, msg
->tag
, msg
->fun
);
4348 local_irq_save(flags
);
4349 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,13,0))
4350 list_add_tail(&cmd
->list
, &__get_cpu_var(ssd_doneq
));
4351 tasklet_hi_schedule(&__get_cpu_var(ssd_tasklet
));
4353 list_add_tail(&cmd
->list
, this_cpu_ptr(&ssd_doneq
));
4354 tasklet_hi_schedule(this_cpu_ptr(&ssd_tasklet
));
4356 local_irq_restore(flags
);
4361 static inline void ssd_done(struct ssd_cmd
*cmd
)
4363 if (unlikely(!ssd_cmd_del_timer(cmd
))) {
4364 struct ssd_device
*dev
= cmd
->dev
;
4365 struct ssd_rw_msg
*msg
= (struct ssd_rw_msg
*)cmd
->msg
;
4366 hio_err("%s: unknown cmd: tag %d fun %#x\n", dev
->name
, msg
->tag
, msg
->fun
);
4373 ssd_end_request(cmd
);
4378 static inline void ssd_dispatch_cmd(struct ssd_cmd
*cmd
)
4380 struct ssd_device
*dev
= (struct ssd_device
*)cmd
->dev
;
4382 ssd_cmd_add_timer(cmd
, SSD_CMD_TIMEOUT
, ssd_cmd_timeout
);
4384 spin_lock(&dev
->cmd_lock
);
4385 ssd_reg_write(dev
->ctrlp
+ SSD_REQ_FIFO_REG
, cmd
->msg_dma
);
4386 spin_unlock(&dev
->cmd_lock
);
4389 static inline void ssd_send_cmd(struct ssd_cmd
*cmd
)
4391 struct ssd_device
*dev
= (struct ssd_device
*)cmd
->dev
;
4393 ssd_cmd_add_timer(cmd
, SSD_CMD_TIMEOUT
, ssd_cmd_timeout
);
4395 ssd_reg32_write(dev
->ctrlp
+ SSD_REQ_FIFO_REG
, ((uint32_t)cmd
->tag
| ((uint32_t)cmd
->nsegs
<< 16)));
4398 static inline void ssd_send_cmd_db(struct ssd_cmd
*cmd
)
4400 struct ssd_device
*dev
= (struct ssd_device
*)cmd
->dev
;
4401 struct bio
*bio
= cmd
->bio
;
4403 ssd_cmd_add_timer(cmd
, SSD_CMD_TIMEOUT
, ssd_cmd_timeout
);
4406 switch (dev
->db_info
.type
) {
4407 case SSD_DEBUG_READ_TO
:
4408 if (bio_data_dir(bio
) == READ
) {
4412 case SSD_DEBUG_WRITE_TO
:
4413 if (bio_data_dir(bio
) == WRITE
) {
4417 case SSD_DEBUG_RW_TO
:
4425 ssd_reg32_write(dev
->ctrlp
+ SSD_REQ_FIFO_REG
, ((uint32_t)cmd
->tag
| ((uint32_t)cmd
->nsegs
<< 16)));
4429 /* fixed for BIOVEC_PHYS_MERGEABLE */
4430 #ifdef SSD_BIOVEC_PHYS_MERGEABLE_FIXED
4431 #include <linux/bio.h>
4432 #include <linux/io.h>
4433 #include <xen/page.h>
4435 static bool xen_biovec_phys_mergeable_fixed(const struct bio_vec
*vec1
,
4436 const struct bio_vec
*vec2
)
4438 unsigned long mfn1
= pfn_to_mfn(page_to_pfn(vec1
->bv_page
));
4439 unsigned long mfn2
= pfn_to_mfn(page_to_pfn(vec2
->bv_page
));
4441 return __BIOVEC_PHYS_MERGEABLE(vec1
, vec2
) &&
4442 ((mfn1
== mfn2
) || ((mfn1
+1) == mfn2
));
4445 #ifdef BIOVEC_PHYS_MERGEABLE
4446 #undef BIOVEC_PHYS_MERGEABLE
4448 #define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \
4449 (__BIOVEC_PHYS_MERGEABLE(vec1, vec2) && \
4450 (!xen_domain() || xen_biovec_phys_mergeable_fixed(vec1, vec2)))
4454 static inline int ssd_bio_map_sg(struct ssd_device
*dev
, struct bio
*bio
, struct scatterlist
*sgl
)
4456 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,14,0))
4457 struct bio_vec
*bvec
, *bvprv
= NULL
;
4458 struct scatterlist
*sg
= NULL
;
4459 int i
= 0, nsegs
= 0;
4461 #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23))
4462 sg_init_table(sgl
, dev
->hw_info
.cmd_max_sg
);
4466 * for each segment in bio
4468 bio_for_each_segment(bvec
, bio
, i
) {
4469 if (bvprv
&& BIOVEC_PHYS_MERGEABLE(bvprv
, bvec
)) {
4470 sg
->length
+= bvec
->bv_len
;
4472 if (unlikely(nsegs
>= (int)dev
->hw_info
.cmd_max_sg
)) {
4476 sg
= sg
? (sg
+ 1) : sgl
;
4477 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24))
4478 sg_set_page(sg
, bvec
->bv_page
, bvec
->bv_len
, bvec
->bv_offset
);
4480 sg
->page
= bvec
->bv_page
;
4481 sg
->length
= bvec
->bv_len
;
4482 sg
->offset
= bvec
->bv_offset
;
4489 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24))
4499 struct bio_vec bvec
, bvprv
;
4500 struct bvec_iter iter
;
4501 struct scatterlist
*sg
= NULL
;
4505 sg_init_table(sgl
, dev
->hw_info
.cmd_max_sg
);
4508 * for each segment in bio
4510 bio_for_each_segment(bvec
, bio
, iter
) {
4511 if (!first
&& BIOVEC_PHYS_MERGEABLE(&bvprv
, &bvec
)) {
4512 sg
->length
+= bvec
.bv_len
;
4514 if (unlikely(nsegs
>= (int)dev
->hw_info
.cmd_max_sg
)) {
4518 sg
= sg
? (sg
+ 1) : sgl
;
4520 sg_set_page(sg
, bvec
.bv_page
, bvec
.bv_len
, bvec
.bv_offset
);
4537 static int __ssd_submit_pbio(struct ssd_device
*dev
, struct bio
*bio
, int wait
)
4539 struct ssd_cmd
*cmd
;
4540 struct ssd_rw_msg
*msg
;
4541 struct ssd_sg_entry
*sge
;
4542 sector_t block
= bio_start(bio
);
4546 tag
= ssd_get_tag(dev
, wait
);
4551 cmd
= &dev
->cmd
[tag
];
4555 msg
= (struct ssd_rw_msg
*)cmd
->msg
;
4557 #if (defined SSD_TRIM && (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36)))
4558 if (bio
->bi_rw
& REQ_DISCARD
) {
4559 unsigned int length
= bio_sectors(bio
);
4561 //printk(KERN_WARNING "%s: discard len %u, block %llu\n", dev->name, bio_sectors(bio), block);
4563 msg
->fun
= SSD_FUNC_TRIM
;
4566 for (i
=0; i
<(dev
->hw_info
.cmd_max_sg
); i
++) {
4568 sge
->length
= (length
>= dev
->hw_info
.sg_max_sec
) ? dev
->hw_info
.sg_max_sec
: length
;
4571 block
+= sge
->length
;
4572 length
-= sge
->length
;
4579 msg
->nsegs
= cmd
->nsegs
= (i
+ 1);
4584 #elif (defined SSD_TRIM && (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32)))
4585 if (bio_rw_flagged(bio
, BIO_RW_DISCARD
)) {
4586 unsigned int length
= bio_sectors(bio
);
4588 //printk(KERN_WARNING "%s: discard len %u, block %llu\n", dev->name, bio_sectors(bio), block);
4590 msg
->fun
= SSD_FUNC_TRIM
;
4593 for (i
=0; i
<(dev
->hw_info
.cmd_max_sg
); i
++) {
4595 sge
->length
= (length
>= dev
->hw_info
.sg_max_sec
) ? dev
->hw_info
.sg_max_sec
: length
;
4598 block
+= sge
->length
;
4599 length
-= sge
->length
;
4606 msg
->nsegs
= cmd
->nsegs
= (i
+ 1);
4613 //msg->nsegs = cmd->nsegs = ssd_bio_map_sg(dev, bio, sgl);
4614 msg
->nsegs
= cmd
->nsegs
= bio
->bi_vcnt
;
4617 if (bio_data_dir(bio
) == READ
) {
4618 msg
->fun
= SSD_FUNC_READ
;
4621 msg
->fun
= SSD_FUNC_WRITE
;
4622 msg
->flag
= dev
->wmode
;
4626 for (i
=0; i
<bio
->bi_vcnt
; i
++) {
4628 sge
->length
= bio
->bi_io_vec
[i
].bv_len
>> 9;
4629 sge
->buf
= (uint64_t)((void *)bio
->bi_io_vec
[i
].bv_page
+ bio
->bi_io_vec
[i
].bv_offset
);
4631 block
+= sge
->length
;
4637 #ifdef SSD_OT_PROTECT
4638 if (unlikely(dev
->ot_delay
> 0 && dev
->ot_protect
!= 0)) {
4639 msleep_interruptible(dev
->ot_delay
);
4643 ssd_start_io_acct(cmd
);
4649 static inline int ssd_submit_bio(struct ssd_device
*dev
, struct bio
*bio
, int wait
)
4651 struct ssd_cmd
*cmd
;
4652 struct ssd_rw_msg
*msg
;
4653 struct ssd_sg_entry
*sge
;
4654 struct scatterlist
*sgl
;
4655 sector_t block
= bio_start(bio
);
4659 tag
= ssd_get_tag(dev
, wait
);
4664 cmd
= &dev
->cmd
[tag
];
4668 msg
= (struct ssd_rw_msg
*)cmd
->msg
;
4672 #if (defined SSD_TRIM && (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36)))
4673 if (bio
->bi_rw
& REQ_DISCARD
) {
4674 unsigned int length
= bio_sectors(bio
);
4676 //printk(KERN_WARNING "%s: discard len %u, block %llu\n", dev->name, bio_sectors(bio), block);
4678 msg
->fun
= SSD_FUNC_TRIM
;
4681 for (i
=0; i
<(dev
->hw_info
.cmd_max_sg
); i
++) {
4683 sge
->length
= (length
>= dev
->hw_info
.sg_max_sec
) ? dev
->hw_info
.sg_max_sec
: length
;
4686 block
+= sge
->length
;
4687 length
-= sge
->length
;
4694 msg
->nsegs
= cmd
->nsegs
= (i
+ 1);
4699 #elif (defined SSD_TRIM && (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32)))
4700 if (bio_rw_flagged(bio
, BIO_RW_DISCARD
)) {
4701 unsigned int length
= bio_sectors(bio
);
4703 //printk(KERN_WARNING "%s: discard len %u, block %llu\n", dev->name, bio_sectors(bio), block);
4705 msg
->fun
= SSD_FUNC_TRIM
;
4708 for (i
=0; i
<(dev
->hw_info
.cmd_max_sg
); i
++) {
4710 sge
->length
= (length
>= dev
->hw_info
.sg_max_sec
) ? dev
->hw_info
.sg_max_sec
: length
;
4713 block
+= sge
->length
;
4714 length
-= sge
->length
;
4721 msg
->nsegs
= cmd
->nsegs
= (i
+ 1);
4728 msg
->nsegs
= cmd
->nsegs
= ssd_bio_map_sg(dev
, bio
, sgl
);
4731 if (bio_data_dir(bio
) == READ
) {
4732 msg
->fun
= SSD_FUNC_READ
;
4734 pci_map_sg(dev
->pdev
, sgl
, cmd
->nsegs
, PCI_DMA_FROMDEVICE
);
4736 msg
->fun
= SSD_FUNC_WRITE
;
4737 msg
->flag
= dev
->wmode
;
4738 pci_map_sg(dev
->pdev
, sgl
, cmd
->nsegs
, PCI_DMA_TODEVICE
);
4742 for (i
=0; i
<cmd
->nsegs
; i
++) {
4744 sge
->length
= sg_dma_len(sgl
) >> 9;
4745 sge
->buf
= sg_dma_address(sgl
);
4747 block
+= sge
->length
;
4754 #ifdef SSD_OT_PROTECT
4755 if (unlikely(dev
->ot_delay
> 0 && dev
->ot_protect
!= 0)) {
4756 msleep_interruptible(dev
->ot_delay
);
4760 ssd_start_io_acct(cmd
);
4767 static int ssd_done_thread(void *data
)
4769 struct ssd_device
*dev
;
4778 //set_user_nice(current, -5);
4780 while (!kthread_should_stop()) {
4781 wait_event_interruptible(dev
->done_waitq
, (atomic_read(&dev
->in_doneq
) || kthread_should_stop()));
4783 while (atomic_read(&dev
->in_doneq
)) {
4785 spin_lock(&dev
->doneq_lock
);
4786 bio
= ssd_blist_get(&dev
->doneq
);
4787 spin_unlock(&dev
->doneq_lock
);
4789 spin_lock_irq(&dev
->doneq_lock
);
4790 bio
= ssd_blist_get(&dev
->doneq
);
4791 spin_unlock_irq(&dev
->doneq_lock
);
4795 next
= bio
->bi_next
;
4796 bio
->bi_next
= NULL
;
4797 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24))
4800 bio_endio(bio
, bio
->bi_size
, 0);
4802 atomic_dec(&dev
->in_doneq
);
4808 #ifdef SSD_ESCAPE_IRQ
4809 if (unlikely(smp_processor_id() == dev
->irq_cpu
)) {
4810 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28))
4811 cpumask_var_t new_mask
;
4812 alloc_cpumask_var(&new_mask
, GFP_ATOMIC
);
4813 cpumask_setall(new_mask
);
4814 cpumask_clear_cpu(dev
->irq_cpu
, new_mask
);
4815 set_cpus_allowed_ptr(current
, new_mask
);
4816 free_cpumask_var(new_mask
);
4819 cpus_setall(new_mask
);
4820 cpu_clear(dev
->irq_cpu
, new_mask
);
4821 set_cpus_allowed(current
, new_mask
);
4830 static int ssd_send_thread(void *data
)
4832 struct ssd_device
*dev
;
4841 //set_user_nice(current, -5);
4843 while (!kthread_should_stop()) {
4844 wait_event_interruptible(dev
->send_waitq
, (atomic_read(&dev
->in_sendq
) || kthread_should_stop()));
4846 while (atomic_read(&dev
->in_sendq
)) {
4847 spin_lock(&dev
->sendq_lock
);
4848 bio
= ssd_blist_get(&dev
->sendq
);
4849 spin_unlock(&dev
->sendq_lock
);
4852 next
= bio
->bi_next
;
4853 bio
->bi_next
= NULL
;
4854 #ifdef SSD_QUEUE_PBIO
4855 if (test_and_clear_bit(BIO_SSD_PBIO
, &bio
->bi_flags
)) {
4856 __ssd_submit_pbio(dev
, bio
, 1);
4858 ssd_submit_bio(dev
, bio
, 1);
4861 ssd_submit_bio(dev
, bio
, 1);
4863 atomic_dec(&dev
->in_sendq
);
4869 #ifdef SSD_ESCAPE_IRQ
4870 if (unlikely(smp_processor_id() == dev
->irq_cpu
)) {
4871 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28))
4872 cpumask_var_t new_mask
;
4873 alloc_cpumask_var(&new_mask
, GFP_ATOMIC
);
4874 cpumask_setall(new_mask
);
4875 cpumask_clear_cpu(dev
->irq_cpu
, new_mask
);
4876 set_cpus_allowed_ptr(current
, new_mask
);
4877 free_cpumask_var(new_mask
);
4880 cpus_setall(new_mask
);
4881 cpu_clear(dev
->irq_cpu
, new_mask
);
4882 set_cpus_allowed(current
, new_mask
);
4892 static void ssd_cleanup_thread(struct ssd_device
*dev
)
4894 kthread_stop(dev
->send_thread
);
4895 kthread_stop(dev
->done_thread
);
4898 static int ssd_init_thread(struct ssd_device
*dev
)
4902 atomic_set(&dev
->in_doneq
, 0);
4903 atomic_set(&dev
->in_sendq
, 0);
4905 spin_lock_init(&dev
->doneq_lock
);
4906 spin_lock_init(&dev
->sendq_lock
);
4908 ssd_blist_init(&dev
->doneq
);
4909 ssd_blist_init(&dev
->sendq
);
4911 init_waitqueue_head(&dev
->done_waitq
);
4912 init_waitqueue_head(&dev
->send_waitq
);
4914 dev
->done_thread
= kthread_run(ssd_done_thread
, dev
, "%s/d", dev
->name
);
4915 if (IS_ERR(dev
->done_thread
)) {
4916 ret
= PTR_ERR(dev
->done_thread
);
4917 goto out_done_thread
;
4920 dev
->send_thread
= kthread_run(ssd_send_thread
, dev
, "%s/s", dev
->name
);
4921 if (IS_ERR(dev
->send_thread
)) {
4922 ret
= PTR_ERR(dev
->send_thread
);
4923 goto out_send_thread
;
4929 kthread_stop(dev
->done_thread
);
4935 static void ssd_put_dcmd(struct ssd_dcmd
*dcmd
)
4937 struct ssd_device
*dev
= (struct ssd_device
*)dcmd
->dev
;
4939 spin_lock(&dev
->dcmd_lock
);
4940 list_add_tail(&dcmd
->list
, &dev
->dcmd_list
);
4941 spin_unlock(&dev
->dcmd_lock
);
4944 static struct ssd_dcmd
*ssd_get_dcmd(struct ssd_device
*dev
)
4946 struct ssd_dcmd
*dcmd
= NULL
;
4948 spin_lock(&dev
->dcmd_lock
);
4949 if (!list_empty(&dev
->dcmd_list
)) {
4950 dcmd
= list_entry(dev
->dcmd_list
.next
,
4951 struct ssd_dcmd
, list
);
4952 list_del_init(&dcmd
->list
);
4954 spin_unlock(&dev
->dcmd_lock
);
4959 static void ssd_cleanup_dcmd(struct ssd_device
*dev
)
4964 static int ssd_init_dcmd(struct ssd_device
*dev
)
4966 struct ssd_dcmd
*dcmd
;
4967 int dcmd_sz
= sizeof(struct ssd_dcmd
)*dev
->hw_info
.cmd_fifo_sz
;
4970 spin_lock_init(&dev
->dcmd_lock
);
4971 INIT_LIST_HEAD(&dev
->dcmd_list
);
4972 init_waitqueue_head(&dev
->dcmd_wq
);
4974 dev
->dcmd
= kmalloc(dcmd_sz
, GFP_KERNEL
);
4976 hio_warn("%s: can not alloc dcmd\n", dev
->name
);
4977 goto out_alloc_dcmd
;
4979 memset(dev
->dcmd
, 0, dcmd_sz
);
4981 for (i
=0, dcmd
=dev
->dcmd
; i
<(int)dev
->hw_info
.cmd_fifo_sz
; i
++, dcmd
++) {
4983 INIT_LIST_HEAD(&dcmd
->list
);
4984 list_add_tail(&dcmd
->list
, &dev
->dcmd_list
);
4993 static void ssd_put_dmsg(void *msg
)
4995 struct ssd_dcmd
*dcmd
= container_of(msg
, struct ssd_dcmd
, msg
);
4996 struct ssd_device
*dev
= (struct ssd_device
*)dcmd
->dev
;
4998 memset(dcmd
->msg
, 0, SSD_DCMD_MAX_SZ
);
5000 wake_up(&dev
->dcmd_wq
);
5003 static void *ssd_get_dmsg(struct ssd_device
*dev
)
5005 struct ssd_dcmd
*dcmd
= ssd_get_dcmd(dev
);
5009 prepare_to_wait_exclusive(&dev
->dcmd_wq
, &wait
, TASK_UNINTERRUPTIBLE
);
5012 dcmd
= ssd_get_dcmd(dev
);
5014 finish_wait(&dev
->dcmd_wq
, &wait
);
5020 static int ssd_do_request(struct ssd_device
*dev
, int rw
, void *msg
, int *done
)
5022 DECLARE_COMPLETION(wait
);
5023 struct ssd_cmd
*cmd
;
5027 tag
= ssd_get_tag(dev
, 1);
5032 cmd
= &dev
->cmd
[tag
];
5034 memcpy(cmd
->msg
, msg
, SSD_DCMD_MAX_SZ
);
5035 ((struct ssd_rw_msg
*)cmd
->msg
)->tag
= tag
;
5037 cmd
->waiting
= &wait
;
5041 wait_for_completion(cmd
->waiting
);
5042 cmd
->waiting
= NULL
;
5044 if (cmd
->errors
== -ETIMEDOUT
) {
5046 } else if (cmd
->errors
) {
5051 *done
= cmd
->nr_log
;
5053 ssd_put_tag(dev
, cmd
->tag
);
5058 static int ssd_do_barrier_request(struct ssd_device
*dev
, int rw
, void *msg
, int *done
)
5060 DECLARE_COMPLETION(wait
);
5061 struct ssd_cmd
*cmd
;
5065 tag
= ssd_barrier_get_tag(dev
);
5070 cmd
= &dev
->cmd
[tag
];
5072 memcpy(cmd
->msg
, msg
, SSD_DCMD_MAX_SZ
);
5073 ((struct ssd_rw_msg
*)cmd
->msg
)->tag
= tag
;
5075 cmd
->waiting
= &wait
;
5079 wait_for_completion(cmd
->waiting
);
5080 cmd
->waiting
= NULL
;
5082 if (cmd
->errors
== -ETIMEDOUT
) {
5084 } else if (cmd
->errors
) {
5089 *done
= cmd
->nr_log
;
5091 ssd_barrier_put_tag(dev
, cmd
->tag
);
5096 #ifdef SSD_OT_PROTECT
5097 static void ssd_check_temperature(struct ssd_device
*dev
, int temp
)
5104 if (mode
!= SSD_DRV_MODE_STANDARD
) {
5108 if (dev
->protocol_info
.ver
<= SSD_PROTOCOL_V3
) {
5111 for (i
=0; i
<dev
->hw_info
.nr_ctrl
; i
++) {
5112 off
= SSD_CTRL_TEMP_REG0
+ i
* sizeof(uint64_t);
5114 val
= ssd_reg_read(dev
->ctrlp
+ off
);
5115 if (val
== 0xffffffffffffffffull
) {
5119 cur
= (int)CUR_TEMP(val
);
5121 if (!test_and_set_bit(SSD_HWMON_TEMP(SSD_TEMP_CTRL
), &dev
->hwmon
)) {
5122 if (dev
->protocol_info
.ver
> SSD_PROTOCOL_V3
&& dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2_2
) {
5123 hio_warn("%s: Over temperature, please check the fans.\n", dev
->name
);
5124 dev
->ot_delay
= SSD_OT_DELAY
;
5131 if (test_and_clear_bit(SSD_HWMON_TEMP(SSD_TEMP_CTRL
), &dev
->hwmon
)) {
5132 if (dev
->protocol_info
.ver
> SSD_PROTOCOL_V3
&& dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2_2
) {
5133 hio_warn("%s: Temperature is OK.\n", dev
->name
);
5140 static int ssd_get_ot_status(struct ssd_device
*dev
, int *status
)
5146 if (!dev
|| !status
) {
5150 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_2_2
) {
5151 for (i
=0; i
<dev
->hw_info
.nr_ctrl
; i
++) {
5152 off
= SSD_READ_OT_REG0
+ (i
* SSD_CTRL_REG_ZONE_SZ
);
5153 val
= ssd_reg32_read(dev
->ctrlp
+ off
);
5154 if ((val
>> 22) & 0x1) {
5160 off
= SSD_WRITE_OT_REG0
+ (i
* SSD_CTRL_REG_ZONE_SZ
);
5161 val
= ssd_reg32_read(dev
->ctrlp
+ off
);
5162 if ((val
>> 22) & 0x1) {
5168 *status
= !!dev
->ot_delay
;
5175 static void ssd_set_ot_protect(struct ssd_device
*dev
, int protect
)
5181 mutex_lock(&dev
->fw_mutex
);
5183 dev
->ot_protect
= !!protect
;
5185 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_2_2
) {
5186 for (i
=0; i
<dev
->hw_info
.nr_ctrl
; i
++) {
5187 off
= SSD_READ_OT_REG0
+ (i
* SSD_CTRL_REG_ZONE_SZ
);
5188 val
= ssd_reg32_read(dev
->ctrlp
+ off
);
5189 if (dev
->ot_protect
) {
5194 ssd_reg32_write(dev
->ctrlp
+ off
, val
);
5197 off
= SSD_WRITE_OT_REG0
+ (i
* SSD_CTRL_REG_ZONE_SZ
);
5198 val
= ssd_reg32_read(dev
->ctrlp
+ off
);
5199 if (dev
->ot_protect
) {
5204 ssd_reg32_write(dev
->ctrlp
+ off
, val
);
5208 mutex_unlock(&dev
->fw_mutex
);
5211 static int ssd_init_ot_protect(struct ssd_device
*dev
)
5213 ssd_set_ot_protect(dev
, ot_protect
);
5215 #ifdef SSD_OT_PROTECT
5216 ssd_check_temperature(dev
, SSD_OT_TEMP
);
5223 static int ssd_read_log(struct ssd_device
*dev
, int ctrl_idx
, void *buf
, int *nr_log
)
5225 struct ssd_log_op_msg
*msg
;
5226 struct ssd_log_msg
*lmsg
;
5228 size_t length
= dev
->hw_info
.log_sz
;
5231 if (ctrl_idx
>= dev
->hw_info
.nr_ctrl
) {
5235 buf_dma
= pci_map_single(dev
->pdev
, buf
, length
, PCI_DMA_FROMDEVICE
);
5236 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,26))
5237 ret
= dma_mapping_error(buf_dma
);
5239 ret
= dma_mapping_error(&(dev
->pdev
->dev
), buf_dma
);
5242 hio_warn("%s: unable to map read DMA buffer\n", dev
->name
);
5243 goto out_dma_mapping
;
5246 msg
= (struct ssd_log_op_msg
*)ssd_get_dmsg(dev
);
5248 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
5249 lmsg
= (struct ssd_log_msg
*)msg
;
5250 lmsg
->fun
= SSD_FUNC_READ_LOG
;
5251 lmsg
->ctrl_idx
= ctrl_idx
;
5252 lmsg
->buf
= buf_dma
;
5254 msg
->fun
= SSD_FUNC_READ_LOG
;
5255 msg
->ctrl_idx
= ctrl_idx
;
5259 ret
= ssd_do_request(dev
, READ
, msg
, nr_log
);
5262 pci_unmap_single(dev
->pdev
, buf_dma
, length
, PCI_DMA_FROMDEVICE
);
5268 #define SSD_LOG_PRINT_BUF_SZ 256
5269 static int ssd_parse_log(struct ssd_device
*dev
, struct ssd_log
*log
, int print
)
5271 struct ssd_log_desc
*log_desc
= ssd_log_desc
;
5272 struct ssd_log_entry
*le
;
5274 char print_buf
[SSD_LOG_PRINT_BUF_SZ
];
5280 while (log_desc
->event
!= SSD_UNKNOWN_EVENT
) {
5281 if (log_desc
->event
== le
->event
) {
5291 if (log_desc
->level
< log_level
) {
5296 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
5299 sn
= dev
->labelv3
.barcode
;
5302 print_len
= snprintf(print_buf
, SSD_LOG_PRINT_BUF_SZ
, "%s (%s): <%#x>", dev
->name
, sn
, le
->event
);
5304 if (log
->ctrl_idx
!= SSD_LOG_SW_IDX
) {
5305 print_len
+= snprintf((print_buf
+ print_len
), (SSD_LOG_PRINT_BUF_SZ
- print_len
), " controller %d", log
->ctrl_idx
);
5308 switch (log_desc
->data
) {
5309 case SSD_LOG_DATA_NONE
:
5311 case SSD_LOG_DATA_LOC
:
5312 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
5313 print_len
+= snprintf((print_buf
+ print_len
), (SSD_LOG_PRINT_BUF_SZ
- print_len
), " flash %d", le
->data
.loc
.flash
);
5314 if (log_desc
->sblock
) {
5315 print_len
+= snprintf((print_buf
+ print_len
), (SSD_LOG_PRINT_BUF_SZ
- print_len
), " block %d", le
->data
.loc
.block
);
5317 if (log_desc
->spage
) {
5318 print_len
+= snprintf((print_buf
+ print_len
), (SSD_LOG_PRINT_BUF_SZ
- print_len
), " page %d", le
->data
.loc
.page
);
5321 print_len
+= snprintf((print_buf
+ print_len
), (SSD_LOG_PRINT_BUF_SZ
- print_len
), " flash %d", le
->data
.loc1
.flash
);
5322 if (log_desc
->sblock
) {
5323 print_len
+= snprintf((print_buf
+ print_len
), (SSD_LOG_PRINT_BUF_SZ
- print_len
), " block %d", le
->data
.loc1
.block
);
5325 if (log_desc
->spage
) {
5326 print_len
+= snprintf((print_buf
+ print_len
), (SSD_LOG_PRINT_BUF_SZ
- print_len
), " page %d", le
->data
.loc1
.page
);
5330 case SSD_LOG_DATA_HEX
:
5331 print_len
+= snprintf((print_buf
+ print_len
), (SSD_LOG_PRINT_BUF_SZ
- print_len
), " info %#x", le
->data
.val
);
5336 /*print_len += */snprintf((print_buf
+ print_len
), (SSD_LOG_PRINT_BUF_SZ
- print_len
), ": %s", log_desc
->desc
);
5338 switch (log_desc
->level
) {
5339 case SSD_LOG_LEVEL_INFO
:
5340 hio_info("%s\n", print_buf
);
5342 case SSD_LOG_LEVEL_NOTICE
:
5343 hio_note("%s\n", print_buf
);
5345 case SSD_LOG_LEVEL_WARNING
:
5346 hio_warn("%s\n", print_buf
);
5348 case SSD_LOG_LEVEL_ERR
:
5349 hio_err("%s\n", print_buf
);
5350 //printk(KERN_ERR MODULE_NAME": some exception occurred, please check the data or refer to FAQ.");
5353 hio_warn("%s\n", print_buf
);
5358 return log_desc
->level
;
5361 static int ssd_bm_get_sfstatus(struct ssd_device
*dev
, uint16_t *status
);
5362 static int ssd_switch_wmode(struct ssd_device
*dev
, int wmode
);
5365 static int ssd_handle_event(struct ssd_device
*dev
, uint16_t event
, int level
)
5370 case SSD_LOG_OVER_TEMP
: {
5371 #ifdef SSD_OT_PROTECT
5372 if (!test_and_set_bit(SSD_HWMON_TEMP(SSD_TEMP_CTRL
), &dev
->hwmon
)) {
5373 if (dev
->protocol_info
.ver
> SSD_PROTOCOL_V3
&& dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2_2
) {
5374 hio_warn("%s: Over temperature, please check the fans.\n", dev
->name
);
5375 dev
->ot_delay
= SSD_OT_DELAY
;
5382 case SSD_LOG_NORMAL_TEMP
: {
5383 #ifdef SSD_OT_PROTECT
5384 /* need to check all controller's temperature */
5385 ssd_check_temperature(dev
, SSD_OT_TEMP_HYST
);
5390 case SSD_LOG_BATTERY_FAULT
: {
5393 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
5394 if (!ssd_bm_get_sfstatus(dev
, &sfstatus
)) {
5395 ssd_gen_swlog(dev
, SSD_LOG_BM_SFSTATUS
, sfstatus
);
5399 if (!test_and_set_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP
), &dev
->hwmon
)) {
5400 ssd_switch_wmode(dev
, dev
->user_wmode
);
5405 case SSD_LOG_BATTERY_OK
: {
5406 if (test_and_clear_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP
), &dev
->hwmon
)) {
5407 ssd_switch_wmode(dev
, dev
->user_wmode
);
5412 case SSD_LOG_BOARD_VOLT_FAULT
: {
5413 ssd_mon_boardvolt(dev
);
5417 case SSD_LOG_CLEAR_LOG
: {
5419 memset(&dev
->smart
.log_info
, 0, sizeof(struct ssd_log_info
));
5423 case SSD_LOG_CAP_VOLT_FAULT
:
5424 case SSD_LOG_CAP_LEARN_FAULT
:
5425 case SSD_LOG_CAP_SHORT_CIRCUIT
: {
5426 if (!test_and_set_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP
), &dev
->hwmon
)) {
5427 ssd_switch_wmode(dev
, dev
->user_wmode
);
5436 /* ssd event call */
5437 if (dev
->event_call
) {
5438 dev
->event_call(dev
->gd
, event
, level
);
5441 if (SSD_LOG_CAP_VOLT_FAULT
== event
|| SSD_LOG_CAP_LEARN_FAULT
== event
|| SSD_LOG_CAP_SHORT_CIRCUIT
== event
) {
5442 dev
->event_call(dev
->gd
, SSD_LOG_BATTERY_FAULT
, level
);
5449 static int ssd_save_log(struct ssd_device
*dev
, struct ssd_log
*log
)
5455 mutex_lock(&dev
->internal_log_mutex
);
5457 size
= sizeof(struct ssd_log
);
5458 off
= dev
->internal_log
.nr_log
* size
;
5460 if (off
== dev
->rom_info
.log_sz
) {
5461 if (dev
->internal_log
.nr_log
== dev
->smart
.log_info
.nr_log
) {
5462 hio_warn("%s: internal log is full\n", dev
->name
);
5467 internal_log
= dev
->internal_log
.log
+ off
;
5468 memcpy(internal_log
, log
, size
);
5470 if (dev
->protocol_info
.ver
> SSD_PROTOCOL_V3
) {
5471 off
+= dev
->rom_info
.log_base
;
5473 ret
= ssd_spi_write(dev
, log
, off
, size
);
5479 dev
->internal_log
.nr_log
++;
5482 mutex_unlock(&dev
->internal_log_mutex
);
5486 static int ssd_save_swlog(struct ssd_device
*dev
, uint16_t event
, uint32_t data
)
5493 if (unlikely(mode
!= SSD_DRV_MODE_STANDARD
))
5496 memset(&log
, 0, sizeof(struct ssd_log
));
5498 do_gettimeofday(&tv
);
5499 log
.ctrl_idx
= SSD_LOG_SW_IDX
;
5500 log
.time
= tv
.tv_sec
;
5501 log
.le
.event
= event
;
5502 log
.le
.data
.val
= data
;
5504 level
= ssd_parse_log(dev
, &log
, 0);
5505 if (level
>= SSD_LOG_LEVEL
) {
5506 ret
= ssd_save_log(dev
, &log
);
5510 if (SSD_LOG_LEVEL_ERR
== level
) {
5515 dev
->smart
.log_info
.nr_log
++;
5516 dev
->smart
.log_info
.stat
[level
]++;
5519 ssd_handle_event(dev
, event
, level
);
5524 static int ssd_gen_swlog(struct ssd_device
*dev
, uint16_t event
, uint32_t data
)
5526 struct ssd_log_entry le
;
5529 if (unlikely(mode
!= SSD_DRV_MODE_STANDARD
))
5537 memset(&le
, 0, sizeof(struct ssd_log_entry
));
5541 ret
= sfifo_put(&dev
->log_fifo
, &le
);
5546 if (test_bit(SSD_INIT_WORKQ
, &dev
->state
)) {
5547 queue_work(dev
->workq
, &dev
->log_work
);
5553 static int ssd_do_swlog(struct ssd_device
*dev
)
5555 struct ssd_log_entry le
;
5558 memset(&le
, 0, sizeof(struct ssd_log_entry
));
5559 while (!sfifo_get(&dev
->log_fifo
, &le
)) {
5560 ret
= ssd_save_swlog(dev
, le
.event
, le
.data
.val
);
5569 static int __ssd_clear_log(struct ssd_device
*dev
)
5571 uint32_t off
, length
;
5574 if (dev
->protocol_info
.ver
<= SSD_PROTOCOL_V3
) {
5578 if (dev
->internal_log
.nr_log
== 0) {
5582 mutex_lock(&dev
->internal_log_mutex
);
5584 off
= dev
->rom_info
.log_base
;
5585 length
= dev
->rom_info
.log_sz
;
5587 ret
= ssd_spi_erase(dev
, off
, length
);
5589 hio_warn("%s: log erase: failed\n", dev
->name
);
5593 dev
->internal_log
.nr_log
= 0;
5596 mutex_unlock(&dev
->internal_log_mutex
);
5600 static int ssd_clear_log(struct ssd_device
*dev
)
5604 ret
= __ssd_clear_log(dev
);
5606 ssd_gen_swlog(dev
, SSD_LOG_CLEAR_LOG
, 0);
5612 static int ssd_do_log(struct ssd_device
*dev
, int ctrl_idx
, void *buf
)
5614 struct ssd_log_entry
*le
;
5621 ret
= ssd_read_log(dev
, ctrl_idx
, buf
, &nr_log
);
5626 do_gettimeofday(&tv
);
5628 log
.time
= tv
.tv_sec
;
5629 log
.ctrl_idx
= ctrl_idx
;
5631 le
= (ssd_log_entry_t
*)buf
;
5632 while (nr_log
> 0) {
5633 memcpy(&log
.le
, le
, sizeof(struct ssd_log_entry
));
5635 level
= ssd_parse_log(dev
, &log
, 1);
5636 if (level
>= SSD_LOG_LEVEL
) {
5637 ssd_save_log(dev
, &log
);
5641 if (SSD_LOG_LEVEL_ERR
== level
) {
5645 dev
->smart
.log_info
.nr_log
++;
5646 if (SSD_LOG_SEU_FAULT
!= le
->event
&& SSD_LOG_SEU_FAULT1
!= le
->event
) {
5647 dev
->smart
.log_info
.stat
[level
]++;
5651 /* log to the volatile log info */
5652 dev
->log_info
.nr_log
++;
5653 dev
->log_info
.stat
[level
]++;
5657 ssd_reg32_write(dev
->ctrlp
+ SSD_RELOAD_FW_REG
, SSD_RELOAD_FLAG
);
5659 /*dev->readonly = 1;
5660 set_disk_ro(dev->gd, 1);
5661 hio_warn("%s: switched to read-only mode.\n", dev->name);*/
5665 ssd_handle_event(dev
, le
->event
, level
);
5674 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20))
5675 static void ssd_log_worker(void *data
)
5677 struct ssd_device
*dev
= (struct ssd_device
*)data
;
5679 static void ssd_log_worker(struct work_struct
*work
)
5681 struct ssd_device
*dev
= container_of(work
, struct ssd_device
, log_work
);
5686 if (!test_bit(SSD_LOG_ERR
, &dev
->state
) && test_bit(SSD_ONLINE
, &dev
->state
)) {
5688 if (!dev
->log_buf
) {
5689 dev
->log_buf
= kmalloc(dev
->hw_info
.log_sz
, GFP_KERNEL
);
5690 if (!dev
->log_buf
) {
5691 hio_warn("%s: ssd_log_worker: no mem\n", dev
->name
);
5697 if (test_and_clear_bit(SSD_LOG_HW
, &dev
->state
)) {
5698 for (i
=0; i
<dev
->hw_info
.nr_ctrl
; i
++) {
5699 ret
= ssd_do_log(dev
, i
, dev
->log_buf
);
5701 (void)test_and_set_bit(SSD_LOG_ERR
, &dev
->state
);
5702 hio_warn("%s: do log fail\n", dev
->name
);
5708 ret
= ssd_do_swlog(dev
);
5710 hio_warn("%s: do swlog fail\n", dev
->name
);
5714 static void ssd_cleanup_log(struct ssd_device
*dev
)
5717 kfree(dev
->log_buf
);
5718 dev
->log_buf
= NULL
;
5721 sfifo_free(&dev
->log_fifo
);
5723 if (dev
->internal_log
.log
) {
5724 vfree(dev
->internal_log
.log
);
5725 dev
->internal_log
.log
= NULL
;
5729 static int ssd_init_log(struct ssd_device
*dev
)
5731 struct ssd_log
*log
;
5736 mutex_init(&dev
->internal_log_mutex
);
5738 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20))
5739 INIT_WORK(&dev
->log_work
, ssd_log_worker
, dev
);
5741 INIT_WORK(&dev
->log_work
, ssd_log_worker
);
5744 off
= dev
->rom_info
.log_base
;
5745 size
= dev
->rom_info
.log_sz
;
5747 dev
->internal_log
.log
= vmalloc(size
);
5748 if (!dev
->internal_log
.log
) {
5753 ret
= sfifo_alloc(&dev
->log_fifo
, SSD_LOG_FIFO_SZ
, sizeof(struct ssd_log_entry
));
5755 goto out_alloc_log_fifo
;
5758 if (dev
->protocol_info
.ver
<= SSD_PROTOCOL_V3
) {
5762 log
= (struct ssd_log
*)dev
->internal_log
.log
;
5763 while (len
< size
) {
5764 ret
= ssd_spi_read(dev
, log
, off
, sizeof(struct ssd_log
));
5769 if (log
->ctrl_idx
== 0xff) {
5773 dev
->internal_log
.nr_log
++;
5775 len
+= sizeof(struct ssd_log
);
5776 off
+= sizeof(struct ssd_log
);
5782 sfifo_free(&dev
->log_fifo
);
5784 vfree(dev
->internal_log
.log
);
5785 dev
->internal_log
.log
= NULL
;
5786 dev
->internal_log
.nr_log
= 0;
5788 /* skip error if not in standard mode */
5789 if (mode
!= SSD_DRV_MODE_STANDARD
) {
5796 static void ssd_stop_workq(struct ssd_device
*dev
)
5798 test_and_clear_bit(SSD_INIT_WORKQ
, &dev
->state
);
5799 flush_workqueue(dev
->workq
);
5802 static void ssd_start_workq(struct ssd_device
*dev
)
5804 (void)test_and_set_bit(SSD_INIT_WORKQ
, &dev
->state
);
5807 queue_work(dev
->workq
, &dev
->log_work
);
5810 static void ssd_cleanup_workq(struct ssd_device
*dev
)
5812 flush_workqueue(dev
->workq
);
5813 destroy_workqueue(dev
->workq
);
5817 static int ssd_init_workq(struct ssd_device
*dev
)
5821 dev
->workq
= create_singlethread_workqueue(dev
->name
);
5832 static int ssd_init_rom_info(struct ssd_device
*dev
)
5836 mutex_init(&dev
->spi_mutex
);
5837 mutex_init(&dev
->i2c_mutex
);
5839 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
5840 /* fix bug: read data to clear status */
5841 (void)ssd_reg32_read(dev
->ctrlp
+ SSD_SPI_REG_RDATA
);
5843 dev
->rom_info
.size
= SSD_ROM_SIZE
;
5844 dev
->rom_info
.block_size
= SSD_ROM_BLK_SIZE
;
5845 dev
->rom_info
.page_size
= SSD_ROM_PAGE_SIZE
;
5847 dev
->rom_info
.bridge_fw_base
= SSD_ROM_BRIDGE_FW_BASE
;
5848 dev
->rom_info
.bridge_fw_sz
= SSD_ROM_BRIDGE_FW_SIZE
;
5849 dev
->rom_info
.nr_bridge_fw
= SSD_ROM_NR_BRIDGE_FW
;
5851 dev
->rom_info
.ctrl_fw_base
= SSD_ROM_CTRL_FW_BASE
;
5852 dev
->rom_info
.ctrl_fw_sz
= SSD_ROM_CTRL_FW_SIZE
;
5853 dev
->rom_info
.nr_ctrl_fw
= SSD_ROM_NR_CTRL_FW
;
5855 dev
->rom_info
.log_sz
= SSD_ROM_LOG_SZ
;
5857 dev
->rom_info
.vp_base
= SSD_ROM_VP_BASE
;
5858 dev
->rom_info
.label_base
= SSD_ROM_LABEL_BASE
;
5859 } else if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
5860 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_ROM_INFO_REG
);
5861 dev
->rom_info
.size
= 0x100000 * (1U << (val
& 0xFF));
5862 dev
->rom_info
.block_size
= 0x10000 * (1U << ((val
>>8) & 0xFF));
5863 dev
->rom_info
.page_size
= (val
>>16) & 0xFFFF;
5865 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_ROM_BRIDGE_FW_INFO_REG
);
5866 dev
->rom_info
.bridge_fw_base
= dev
->rom_info
.block_size
* (val
& 0xFFFF);
5867 dev
->rom_info
.bridge_fw_sz
= dev
->rom_info
.block_size
* ((val
>>16) & 0x3FFF);
5868 dev
->rom_info
.nr_bridge_fw
= ((val
>> 30) & 0x3) + 1;
5870 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_ROM_CTRL_FW_INFO_REG
);
5871 dev
->rom_info
.ctrl_fw_base
= dev
->rom_info
.block_size
* (val
& 0xFFFF);
5872 dev
->rom_info
.ctrl_fw_sz
= dev
->rom_info
.block_size
* ((val
>>16) & 0x3FFF);
5873 dev
->rom_info
.nr_ctrl_fw
= ((val
>> 30) & 0x3) + 1;
5875 dev
->rom_info
.bm_fw_base
= dev
->rom_info
.ctrl_fw_base
+ (dev
->rom_info
.nr_ctrl_fw
* dev
->rom_info
.ctrl_fw_sz
);
5876 dev
->rom_info
.bm_fw_sz
= SSD_PV3_ROM_BM_FW_SZ
;
5877 dev
->rom_info
.nr_bm_fw
= SSD_PV3_ROM_NR_BM_FW
;
5879 dev
->rom_info
.log_base
= dev
->rom_info
.bm_fw_base
+ (dev
->rom_info
.nr_bm_fw
* dev
->rom_info
.bm_fw_sz
);
5880 dev
->rom_info
.log_sz
= SSD_ROM_LOG_SZ
;
5882 dev
->rom_info
.smart_base
= dev
->rom_info
.log_base
+ dev
->rom_info
.log_sz
;
5883 dev
->rom_info
.smart_sz
= SSD_PV3_ROM_SMART_SZ
;
5884 dev
->rom_info
.nr_smart
= SSD_PV3_ROM_NR_SMART
;
5886 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_ROM_VP_INFO_REG
);
5887 dev
->rom_info
.vp_base
= dev
->rom_info
.block_size
* val
;
5888 dev
->rom_info
.label_base
= dev
->rom_info
.vp_base
+ dev
->rom_info
.block_size
;
5889 if (dev
->rom_info
.label_base
>= dev
->rom_info
.size
) {
5890 dev
->rom_info
.label_base
= dev
->rom_info
.vp_base
- dev
->rom_info
.block_size
;
5893 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_ROM_INFO_REG
);
5894 dev
->rom_info
.size
= 0x100000 * (1U << (val
& 0xFF));
5895 dev
->rom_info
.block_size
= 0x10000 * (1U << ((val
>>8) & 0xFF));
5896 dev
->rom_info
.page_size
= (val
>>16) & 0xFFFF;
5898 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_ROM_BRIDGE_FW_INFO_REG
);
5899 dev
->rom_info
.bridge_fw_base
= dev
->rom_info
.block_size
* (val
& 0xFFFF);
5900 dev
->rom_info
.bridge_fw_sz
= dev
->rom_info
.block_size
* ((val
>>16) & 0x3FFF);
5901 dev
->rom_info
.nr_bridge_fw
= ((val
>> 30) & 0x3) + 1;
5903 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_ROM_CTRL_FW_INFO_REG
);
5904 dev
->rom_info
.ctrl_fw_base
= dev
->rom_info
.block_size
* (val
& 0xFFFF);
5905 dev
->rom_info
.ctrl_fw_sz
= dev
->rom_info
.block_size
* ((val
>>16) & 0x3FFF);
5906 dev
->rom_info
.nr_ctrl_fw
= ((val
>> 30) & 0x3) + 1;
5908 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_ROM_VP_INFO_REG
);
5909 dev
->rom_info
.vp_base
= dev
->rom_info
.block_size
* val
;
5910 dev
->rom_info
.label_base
= dev
->rom_info
.vp_base
- SSD_PV3_2_ROM_SEC_SZ
;
5912 dev
->rom_info
.nr_smart
= SSD_PV3_ROM_NR_SMART
;
5913 dev
->rom_info
.smart_sz
= SSD_PV3_2_ROM_SEC_SZ
;
5914 dev
->rom_info
.smart_base
= dev
->rom_info
.label_base
- (dev
->rom_info
.smart_sz
* dev
->rom_info
.nr_smart
);
5915 if (dev
->rom_info
.smart_sz
> dev
->rom_info
.block_size
) {
5916 dev
->rom_info
.smart_sz
= dev
->rom_info
.block_size
;
5919 dev
->rom_info
.log_sz
= SSD_PV3_2_ROM_LOG_SZ
;
5920 dev
->rom_info
.log_base
= dev
->rom_info
.smart_base
- dev
->rom_info
.log_sz
;
5923 return ssd_init_spi(dev
);
5927 static int ssd_update_smart(struct ssd_device
*dev
, struct ssd_smart
*smart
)
5931 #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,27))
5932 struct hd_struct
*part
;
5938 if (!test_bit(SSD_INIT_BD
, &dev
->state
)) {
5942 do_gettimeofday(&tv
);
5943 if ((uint64_t)tv
.tv_sec
< dev
->uptime
) {
5946 run_time
= tv
.tv_sec
- dev
->uptime
;
5949 /* avoid frequently update */
5950 if (run_time
>= 60) {
5955 smart
->io_stat
.run_time
+= run_time
;
5957 #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,27))
5958 cpu
= part_stat_lock();
5959 part
= &dev
->gd
->part0
;
5960 part_round_stats(cpu
, part
);
5963 smart
->io_stat
.nr_read
+= part_stat_read(part
, ios
[READ
]);
5964 smart
->io_stat
.nr_write
+= part_stat_read(part
, ios
[WRITE
]);
5965 smart
->io_stat
.rsectors
+= part_stat_read(part
, sectors
[READ
]);
5966 smart
->io_stat
.wsectors
+= part_stat_read(part
, sectors
[WRITE
]);
5967 #elif (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,14))
5969 disk_round_stats(dev
->gd
);
5972 smart
->io_stat
.nr_read
+= disk_stat_read(dev
->gd
, ios
[READ
]);
5973 smart
->io_stat
.nr_write
+= disk_stat_read(dev
->gd
, ios
[WRITE
]);
5974 smart
->io_stat
.rsectors
+= disk_stat_read(dev
->gd
, sectors
[READ
]);
5975 smart
->io_stat
.wsectors
+= disk_stat_read(dev
->gd
, sectors
[WRITE
]);
5978 disk_round_stats(dev
->gd
);
5981 smart
->io_stat
.nr_read
+= disk_stat_read(dev
->gd
, reads
);
5982 smart
->io_stat
.nr_write
+= disk_stat_read(dev
->gd
, writes
);
5983 smart
->io_stat
.rsectors
+= disk_stat_read(dev
->gd
, read_sectors
);
5984 smart
->io_stat
.wsectors
+= disk_stat_read(dev
->gd
, write_sectors
);
5987 smart
->io_stat
.nr_to
+= atomic_read(&dev
->tocnt
);
5989 for (i
=0; i
<dev
->nr_queue
; i
++) {
5990 smart
->io_stat
.nr_rwerr
+= dev
->queue
[i
].io_stat
.nr_rwerr
;
5991 smart
->io_stat
.nr_ioerr
+= dev
->queue
[i
].io_stat
.nr_ioerr
;
5994 for (i
=0; i
<dev
->nr_queue
; i
++) {
5995 for (j
=0; j
<SSD_ECC_MAX_FLIP
; j
++) {
5996 smart
->ecc_info
.bitflip
[j
] += dev
->queue
[i
].ecc_info
.bitflip
[j
];
6000 //dev->uptime = tv.tv_sec;
6005 static int ssd_clear_smart(struct ssd_device
*dev
)
6009 uint32_t off
, length
;
6013 if (dev
->protocol_info
.ver
<= SSD_PROTOCOL_V3
) {
6018 off
= dev
->rom_info
.smart_base
;
6019 length
= dev
->rom_info
.smart_sz
* dev
->rom_info
.nr_smart
;
6021 ret
= ssd_spi_erase(dev
, off
, length
);
6023 hio_warn("%s: info erase: failed\n", dev
->name
);
6027 sversion
= dev
->smart
.version
;
6029 memset(&dev
->smart
, 0, sizeof(struct ssd_smart
));
6030 dev
->smart
.version
= sversion
+ 1;
6031 dev
->smart
.magic
= SSD_SMART_MAGIC
;
6033 /* clear all tmp acc */
6034 for (i
=0; i
<dev
->nr_queue
; i
++) {
6035 memset(&(dev
->queue
[i
].io_stat
), 0, sizeof(struct ssd_io_stat
));
6036 memset(&(dev
->queue
[i
].ecc_info
), 0, sizeof(struct ssd_ecc_info
));
6039 atomic_set(&dev
->tocnt
, 0);
6041 /* clear tmp log info */
6042 memset(&dev
->log_info
, 0, sizeof(struct ssd_log_info
));
6044 do_gettimeofday(&tv
);
6045 dev
->uptime
= tv
.tv_sec
;
6048 //ssd_clear_alarm(dev);
6053 static int ssd_save_smart(struct ssd_device
*dev
)
6059 if (unlikely(mode
!= SSD_DRV_MODE_STANDARD
))
6062 if (dev
->protocol_info
.ver
<= SSD_PROTOCOL_V3
) {
6066 if (!ssd_update_smart(dev
, &dev
->smart
)) {
6070 dev
->smart
.version
++;
6072 for (i
=0; i
<dev
->rom_info
.nr_smart
; i
++) {
6073 off
= dev
->rom_info
.smart_base
+ (dev
->rom_info
.smart_sz
* i
);
6074 size
= dev
->rom_info
.smart_sz
;
6076 ret
= ssd_spi_erase(dev
, off
, size
);
6078 hio_warn("%s: info erase failed\n", dev
->name
);
6082 size
= sizeof(struct ssd_smart
);
6084 ret
= ssd_spi_write(dev
, &dev
->smart
, off
, size
);
6086 hio_warn("%s: info write failed\n", dev
->name
);
6097 static int ssd_init_smart(struct ssd_device
*dev
)
6099 struct ssd_smart
*smart
;
6105 do_gettimeofday(&tv
);
6106 dev
->uptime
= tv
.tv_sec
;
6108 if (dev
->protocol_info
.ver
<= SSD_PROTOCOL_V3
) {
6112 smart
= kmalloc(sizeof(struct ssd_smart
) * SSD_ROM_NR_SMART_MAX
, GFP_KERNEL
);
6118 memset(&dev
->smart
, 0, sizeof(struct ssd_smart
));
6121 for (i
=0; i
<dev
->rom_info
.nr_smart
; i
++) {
6122 memset(&smart
[i
], 0, sizeof(struct ssd_smart
));
6124 off
= dev
->rom_info
.smart_base
+ (dev
->rom_info
.smart_sz
* i
);
6125 size
= sizeof(struct ssd_smart
);
6127 ret
= ssd_spi_read(dev
, &smart
[i
], off
, size
);
6129 hio_warn("%s: info read failed\n", dev
->name
);
6133 if (smart
[i
].magic
!= SSD_SMART_MAGIC
) {
6135 smart
[i
].version
= 0;
6139 if (smart
[i
].version
> dev
->smart
.version
) {
6140 memcpy(&dev
->smart
, &smart
[i
], sizeof(struct ssd_smart
));
6144 if (dev
->smart
.magic
!= SSD_SMART_MAGIC
) {
6145 /* first time power up */
6146 dev
->smart
.magic
= SSD_SMART_MAGIC
;
6147 dev
->smart
.version
= 1;
6150 /* check log info */
6152 struct ssd_log_info log_info
;
6153 struct ssd_log
*log
= (struct ssd_log
*)dev
->internal_log
.log
;
6155 memset(&log_info
, 0, sizeof(struct ssd_log_info
));
6157 while (log_info
.nr_log
< dev
->internal_log
.nr_log
) {
6158 /* skip the volatile log info */
6159 if (SSD_LOG_SEU_FAULT
!= log
->le
.event
&& SSD_LOG_SEU_FAULT1
!= log
->le
.event
) {
6160 log_info
.stat
[ssd_parse_log(dev
, log
, 0)]++;
6168 for (i
=(SSD_LOG_NR_LEVEL
-1); i
>=0; i
--) {
6169 if (log_info
.stat
[i
] > dev
->smart
.log_info
.stat
[i
]) {
6171 memcpy(&dev
->smart
.log_info
, &log_info
, sizeof(struct ssd_log_info
));
6172 dev
->smart
.version
++;
6178 for (i
=0; i
<dev
->rom_info
.nr_smart
; i
++) {
6179 if (smart
[i
].magic
== SSD_SMART_MAGIC
&& smart
[i
].version
== dev
->smart
.version
) {
6183 off
= dev
->rom_info
.smart_base
+ (dev
->rom_info
.smart_sz
* i
);
6184 size
= dev
->rom_info
.smart_sz
;
6186 ret
= ssd_spi_erase(dev
, off
, size
);
6188 hio_warn("%s: info erase failed\n", dev
->name
);
6192 size
= sizeof(struct ssd_smart
);
6193 ret
= ssd_spi_write(dev
, &dev
->smart
, off
, size
);
6195 hio_warn("%s: info write failed\n", dev
->name
);
6202 /* sync smart with alarm led */
6203 if (dev
->smart
.io_stat
.nr_to
|| dev
->smart
.io_stat
.nr_rwerr
|| dev
->smart
.log_info
.stat
[SSD_LOG_LEVEL_ERR
]) {
6204 hio_warn("%s: some fault found in the history info\n", dev
->name
);
6211 /* skip error if not in standard mode */
6212 if (mode
!= SSD_DRV_MODE_STANDARD
) {
6219 static int __ssd_bm_get_version(struct ssd_device
*dev
, uint16_t *ver
)
6221 struct ssd_bm_manufacturer_data bm_md
= {0};
6222 uint16_t sc_id
= SSD_BM_SYSTEM_DATA_SUBCLASS_ID
;
6230 mutex_lock(&dev
->bm_mutex
);
6232 cmd
= SSD_BM_DATA_FLASH_SUBCLASS_ID
;
6233 ret
= ssd_smbus_write_word(dev
, SSD_BM_SLAVE_ADDRESS
, cmd
, (uint8_t *)&sc_id
);
6238 cmd
= SSD_BM_DATA_FLASH_SUBCLASS_ID_PAGE1
;
6239 ret
= ssd_smbus_read_block(dev
, SSD_BM_SLAVE_ADDRESS
, cmd
, sizeof(struct ssd_bm_manufacturer_data
), (uint8_t *)&bm_md
);
6244 if (bm_md
.firmware_ver
& 0xF000) {
6249 *ver
= bm_md
.firmware_ver
;
6252 mutex_unlock(&dev
->bm_mutex
);
6256 static int ssd_bm_get_version(struct ssd_device
*dev
, uint16_t *ver
)
6259 int i
= SSD_BM_RETRY_MAX
;
6263 ret
= __ssd_bm_get_version(dev
, &tmp
);
6277 static int __ssd_bm_nr_cap(struct ssd_device
*dev
, int *nr_cap
)
6279 struct ssd_bm_configuration_registers bm_cr
;
6280 uint16_t sc_id
= SSD_BM_CONFIGURATION_REGISTERS_ID
;
6284 mutex_lock(&dev
->bm_mutex
);
6286 cmd
= SSD_BM_DATA_FLASH_SUBCLASS_ID
;
6287 ret
= ssd_smbus_write_word(dev
, SSD_BM_SLAVE_ADDRESS
, cmd
, (uint8_t *)&sc_id
);
6292 cmd
= SSD_BM_DATA_FLASH_SUBCLASS_ID_PAGE1
;
6293 ret
= ssd_smbus_read_block(dev
, SSD_BM_SLAVE_ADDRESS
, cmd
, sizeof(struct ssd_bm_configuration_registers
), (uint8_t *)&bm_cr
);
6298 if (bm_cr
.operation_cfg
.cc
== 0 || bm_cr
.operation_cfg
.cc
> 4) {
6303 *nr_cap
= bm_cr
.operation_cfg
.cc
+ 1;
6306 mutex_unlock(&dev
->bm_mutex
);
6310 static int ssd_bm_nr_cap(struct ssd_device
*dev
, int *nr_cap
)
6313 int i
= SSD_BM_RETRY_MAX
;
6317 ret
= __ssd_bm_nr_cap(dev
, &tmp
);
6331 static int ssd_bm_enter_cap_learning(struct ssd_device
*dev
)
6333 uint16_t buf
= SSD_BM_ENTER_CAP_LEARNING
;
6334 uint8_t cmd
= SSD_BM_MANUFACTURERACCESS
;
6337 ret
= ssd_smbus_write_word(dev
, SSD_BM_SLAVE_ADDRESS
, cmd
, (uint8_t *)&buf
);
6346 static int ssd_bm_get_sfstatus(struct ssd_device
*dev
, uint16_t *status
)
6349 uint8_t cmd
= SSD_BM_SAFETYSTATUS
;
6352 ret
= ssd_smbus_read_word(dev
, SSD_BM_SLAVE_ADDRESS
, cmd
, (uint8_t *)&val
);
6362 static int ssd_bm_get_opstatus(struct ssd_device
*dev
, uint16_t *status
)
6365 uint8_t cmd
= SSD_BM_OPERATIONSTATUS
;
6368 ret
= ssd_smbus_read_word(dev
, SSD_BM_SLAVE_ADDRESS
, cmd
, (uint8_t *)&val
);
6378 static int ssd_get_bmstruct(struct ssd_device
*dev
, struct ssd_bm
*bm_status_out
)
6380 struct sbs_cmd
*bm_sbs
= ssd_bm_sbs
;
6381 struct ssd_bm bm_status
;
6382 uint8_t buf
[2] = {0, };
6387 memset(&bm_status
, 0, sizeof(struct ssd_bm
));
6389 while (bm_sbs
->desc
!= NULL
) {
6390 switch (bm_sbs
->size
) {
6392 ret
= ssd_smbus_read_byte(dev
, SSD_BM_SLAVE_ADDRESS
, bm_sbs
->cmd
, buf
);
6394 //printf("Error: smbus read byte %#x\n", bm_sbs->cmd);
6400 ret
= ssd_smbus_read_word(dev
, SSD_BM_SLAVE_ADDRESS
, bm_sbs
->cmd
, (uint8_t *)&val
);
6402 //printf("Error: smbus read word %#x\n", bm_sbs->cmd);
6405 //val = *(uint16_t *)buf;
6413 switch (bm_sbs
->unit
) {
6414 case SBS_UNIT_VALUE
:
6415 *(uint16_t *)bm_var(&bm_status
, bm_sbs
->off
) = val
& bm_sbs
->mask
;
6417 case SBS_UNIT_TEMPERATURE
:
6418 cval
= (uint16_t)(val
- 2731) / 10;
6419 *(uint16_t *)bm_var(&bm_status
, bm_sbs
->off
) = cval
;
6421 case SBS_UNIT_VOLTAGE
:
6422 *(uint16_t *)bm_var(&bm_status
, bm_sbs
->off
) = val
;
6424 case SBS_UNIT_CURRENT
:
6425 *(uint16_t *)bm_var(&bm_status
, bm_sbs
->off
) = val
;
6428 *(uint16_t *)bm_var(&bm_status
, bm_sbs
->off
) = val
;
6430 case SBS_UNIT_PERCENT
:
6431 *(uint16_t *)bm_var(&bm_status
, bm_sbs
->off
) = val
;
6433 case SBS_UNIT_CAPACITANCE
:
6434 *(uint16_t *)bm_var(&bm_status
, bm_sbs
->off
) = val
;
6445 memcpy(bm_status_out
, &bm_status
, sizeof(struct ssd_bm
));
6451 static int __ssd_bm_status(struct ssd_device
*dev
, int *status
)
6453 struct ssd_bm bm_status
= {0};
6458 ret
= ssd_get_bmstruct(dev
, &bm_status
);
6463 /* capacitor voltage */
6464 ret
= ssd_bm_nr_cap(dev
, &nr_cap
);
6469 for (i
=0; i
<nr_cap
; i
++) {
6470 if (bm_status
.cap_volt
[i
] < SSD_BM_CAP_VOLT_MIN
) {
6471 *status
= SSD_BMSTATUS_WARNING
;
6477 if (bm_status
.sf_status
) {
6478 *status
= SSD_BMSTATUS_WARNING
;
6483 if (!((bm_status
.op_status
>> 12) & 0x1)) {
6484 *status
= SSD_BMSTATUS_CHARGING
;
6486 *status
= SSD_BMSTATUS_OK
;
6493 static void ssd_set_flush_timeout(struct ssd_device
*dev
, int mode
);
6495 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20))
6496 static void ssd_bm_worker(void *data
)
6498 struct ssd_device
*dev
= (struct ssd_device
*)data
;
6500 static void ssd_bm_worker(struct work_struct
*work
)
6502 struct ssd_device
*dev
= container_of(work
, struct ssd_device
, bm_work
);
6508 if (mode
!= SSD_DRV_MODE_STANDARD
) {
6512 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_1_1
) {
6516 if (dev
->hw_info_ext
.plp_type
!= SSD_PLP_SCAP
) {
6520 ret
= ssd_bm_get_opstatus(dev
, &opstatus
);
6522 hio_warn("%s: get bm operationstatus failed\n", dev
->name
);
6526 /* need cap learning ? */
6527 if (!(opstatus
& 0xF0)) {
6528 ret
= ssd_bm_enter_cap_learning(dev
);
6530 hio_warn("%s: enter capacitance learning failed\n", dev
->name
);
6536 static void ssd_bm_routine_start(void *data
)
6538 struct ssd_device
*dev
;
6545 if (test_bit(SSD_INIT_WORKQ
, &dev
->state
)) {
6546 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
6547 queue_work(dev
->workq
, &dev
->bm_work
);
6549 queue_work(dev
->workq
, &dev
->capmon_work
);
6555 static int ssd_do_cap_learn(struct ssd_device
*dev
, uint32_t *cap
)
6562 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
6567 if (dev
->hw_info_ext
.form_factor
== SSD_FORM_FACTOR_FHHL
&& dev
->hw_info
.pcb_ver
< 'B') {
6572 /* make sure the lm80 voltage value is updated */
6573 msleep(SSD_LM80_CONV_INTERVAL
);
6575 /* check if full charged */
6578 ret
= ssd_smbus_read_word(dev
, SSD_SENSOR_LM80_SADDRESS
, SSD_PL_CAP_U1
, (uint8_t *)&val
);
6580 if (!test_and_set_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM80
), &dev
->hwmon
)) {
6581 ssd_gen_swlog(dev
, SSD_LOG_SENSOR_FAULT
, SSD_SENSOR_LM80_SADDRESS
);
6585 u1
= SSD_LM80_CONVERT_VOLT(u16_swap(val
));
6586 if (SSD_PL_CAP_VOLT(u1
) >= SSD_PL_CAP_VOLT_FULL
) {
6591 if (wait
> SSD_PL_CAP_CHARGE_MAX_WAIT
) {
6595 msleep(SSD_PL_CAP_CHARGE_WAIT
);
6598 ret
= ssd_smbus_read_word(dev
, SSD_SENSOR_LM80_SADDRESS
, SSD_PL_CAP_U2
, (uint8_t *)&val
);
6600 if (!test_and_set_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM80
), &dev
->hwmon
)) {
6601 ssd_gen_swlog(dev
, SSD_LOG_SENSOR_FAULT
, SSD_SENSOR_LM80_SADDRESS
);
6605 u2
= SSD_LM80_CONVERT_VOLT(u16_swap(val
));
6612 /* enter cap learn */
6613 ssd_reg32_write(dev
->ctrlp
+ SSD_PL_CAP_LEARN_REG
, 0x1);
6617 msleep(SSD_PL_CAP_LEARN_WAIT
);
6619 t
= ssd_reg32_read(dev
->ctrlp
+ SSD_PL_CAP_LEARN_REG
);
6620 if (!((t
>> 1) & 0x1)) {
6625 if (wait
> SSD_PL_CAP_LEARN_MAX_WAIT
) {
6631 if ((t
>> 4) & 0x1) {
6642 *cap
= SSD_PL_CAP_LEARN(u1
, u2
, t
);
6648 static int ssd_cap_learn(struct ssd_device
*dev
, uint32_t *cap
)
6656 mutex_lock(&dev
->bm_mutex
);
6658 ssd_stop_workq(dev
);
6660 ret
= ssd_do_cap_learn(dev
, cap
);
6662 ssd_gen_swlog(dev
, SSD_LOG_CAP_LEARN_FAULT
, 0);
6666 ssd_gen_swlog(dev
, SSD_LOG_CAP_STATUS
, *cap
);
6669 ssd_start_workq(dev
);
6670 mutex_unlock(&dev
->bm_mutex
);
6675 static int ssd_check_pl_cap(struct ssd_device
*dev
)
6683 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
6687 if (dev
->hw_info_ext
.form_factor
== SSD_FORM_FACTOR_FHHL
&& dev
->hw_info
.pcb_ver
< 'B') {
6694 ret
= ssd_smbus_read_word(dev
, SSD_SENSOR_LM80_SADDRESS
, SSD_PL_CAP_U1
, (uint8_t *)&val
);
6696 if (!test_and_set_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM80
), &dev
->hwmon
)) {
6697 ssd_gen_swlog(dev
, SSD_LOG_SENSOR_FAULT
, SSD_SENSOR_LM80_SADDRESS
);
6701 u1
= SSD_LM80_CONVERT_VOLT(u16_swap(val
));
6702 if (SSD_PL_CAP_VOLT(u1
) >= SSD_PL_CAP_VOLT_READY
) {
6707 if (wait
> SSD_PL_CAP_CHARGE_MAX_WAIT
) {
6709 ssd_gen_swlog(dev
, SSD_LOG_CAP_VOLT_FAULT
, SSD_PL_CAP_VOLT(u1
));
6712 msleep(SSD_PL_CAP_CHARGE_WAIT
);
6715 low
= ssd_lm80_limit
[SSD_LM80_IN_CAP
].low
;
6716 ret
= ssd_smbus_write_byte(dev
, SSD_SENSOR_LM80_SADDRESS
, SSD_LM80_REG_IN_MIN(SSD_LM80_IN_CAP
), &low
);
6721 /* enable cap INx */
6722 ret
= ssd_lm80_enable_in(dev
, SSD_SENSOR_LM80_SADDRESS
, SSD_LM80_IN_CAP
);
6724 if (!test_and_set_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM80
), &dev
->hwmon
)) {
6725 ssd_gen_swlog(dev
, SSD_LOG_SENSOR_FAULT
, SSD_SENSOR_LM80_SADDRESS
);
6731 /* skip error if not in standard mode */
6732 if (mode
!= SSD_DRV_MODE_STANDARD
) {
6738 static int ssd_check_pl_cap_fast(struct ssd_device
*dev
)
6744 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
6748 if (dev
->hw_info_ext
.form_factor
== SSD_FORM_FACTOR_FHHL
&& dev
->hw_info
.pcb_ver
< 'B') {
6753 ret
= ssd_smbus_read_word(dev
, SSD_SENSOR_LM80_SADDRESS
, SSD_PL_CAP_U1
, (uint8_t *)&val
);
6757 u1
= SSD_LM80_CONVERT_VOLT(u16_swap(val
));
6758 if (SSD_PL_CAP_VOLT(u1
) < SSD_PL_CAP_VOLT_READY
) {
6766 static int ssd_init_pl_cap(struct ssd_device
*dev
)
6770 /* set here: user write mode */
6771 dev
->user_wmode
= wmode
;
6773 mutex_init(&dev
->bm_mutex
);
6775 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
6777 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_BM_FAULT_REG
);
6778 if ((val
>> 1) & 0x1) {
6779 (void)test_and_set_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP
), &dev
->hwmon
);
6782 ret
= ssd_check_pl_cap(dev
);
6784 (void)test_and_set_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP
), &dev
->hwmon
);
6792 static void __end_str(char *str
, int len
)
6796 for(i
=0; i
<len
; i
++) {
6797 if (*(str
+i
) == '\0')
6803 static int ssd_init_label(struct ssd_device
*dev
)
6809 /* label location */
6810 off
= dev
->rom_info
.label_base
;
6812 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
6813 size
= sizeof(struct ssd_label
);
6816 ret
= ssd_spi_read(dev
, &dev
->label
, off
, size
);
6818 memset(&dev
->label
, 0, size
);
6822 __end_str(dev
->label
.date
, SSD_LABEL_FIELD_SZ
);
6823 __end_str(dev
->label
.sn
, SSD_LABEL_FIELD_SZ
);
6824 __end_str(dev
->label
.part
, SSD_LABEL_FIELD_SZ
);
6825 __end_str(dev
->label
.desc
, SSD_LABEL_FIELD_SZ
);
6826 __end_str(dev
->label
.other
, SSD_LABEL_FIELD_SZ
);
6827 __end_str(dev
->label
.maf
, SSD_LABEL_FIELD_SZ
);
6829 size
= sizeof(struct ssd_labelv3
);
6832 ret
= ssd_spi_read(dev
, &dev
->labelv3
, off
, size
);
6834 memset(&dev
->labelv3
, 0, size
);
6838 __end_str(dev
->labelv3
.boardtype
, SSD_LABEL_FIELD_SZ
);
6839 __end_str(dev
->labelv3
.barcode
, SSD_LABEL_FIELD_SZ
);
6840 __end_str(dev
->labelv3
.item
, SSD_LABEL_FIELD_SZ
);
6841 __end_str(dev
->labelv3
.description
, SSD_LABEL_DESC_SZ
);
6842 __end_str(dev
->labelv3
.manufactured
, SSD_LABEL_FIELD_SZ
);
6843 __end_str(dev
->labelv3
.vendorname
, SSD_LABEL_FIELD_SZ
);
6844 __end_str(dev
->labelv3
.issuenumber
, SSD_LABEL_FIELD_SZ
);
6845 __end_str(dev
->labelv3
.cleicode
, SSD_LABEL_FIELD_SZ
);
6846 __end_str(dev
->labelv3
.bom
, SSD_LABEL_FIELD_SZ
);
6850 /* skip error if not in standard mode */
6851 if (mode
!= SSD_DRV_MODE_STANDARD
) {
6857 int ssd_get_label(struct block_device
*bdev
, struct ssd_label
*label
)
6859 struct ssd_device
*dev
;
6861 if (!bdev
|| !label
|| !(bdev
->bd_disk
)) {
6865 dev
= bdev
->bd_disk
->private_data
;
6867 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_2
) {
6868 memset(label
, 0, sizeof(struct ssd_label
));
6869 memcpy(label
->date
, dev
->labelv3
.manufactured
, SSD_LABEL_FIELD_SZ
);
6870 memcpy(label
->sn
, dev
->labelv3
.barcode
, SSD_LABEL_FIELD_SZ
);
6871 memcpy(label
->desc
, dev
->labelv3
.boardtype
, SSD_LABEL_FIELD_SZ
);
6872 memcpy(label
->maf
, dev
->labelv3
.vendorname
, SSD_LABEL_FIELD_SZ
);
6874 memcpy(label
, &dev
->label
, sizeof(struct ssd_label
));
6880 static int __ssd_get_version(struct ssd_device
*dev
, struct ssd_version_info
*ver
)
6882 uint16_t bm_ver
= 0;
6885 if (dev
->protocol_info
.ver
> SSD_PROTOCOL_V3
&& dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
6886 ret
= ssd_bm_get_version(dev
, &bm_ver
);
6892 ver
->bridge_ver
= dev
->hw_info
.bridge_ver
;
6893 ver
->ctrl_ver
= dev
->hw_info
.ctrl_ver
;
6894 ver
->bm_ver
= bm_ver
;
6895 ver
->pcb_ver
= dev
->hw_info
.pcb_ver
;
6896 ver
->upper_pcb_ver
= dev
->hw_info
.upper_pcb_ver
;
6903 int ssd_get_version(struct block_device
*bdev
, struct ssd_version_info
*ver
)
6905 struct ssd_device
*dev
;
6908 if (!bdev
|| !ver
|| !(bdev
->bd_disk
)) {
6912 dev
= bdev
->bd_disk
->private_data
;
6914 mutex_lock(&dev
->fw_mutex
);
6915 ret
= __ssd_get_version(dev
, ver
);
6916 mutex_unlock(&dev
->fw_mutex
);
6921 static int __ssd_get_temperature(struct ssd_device
*dev
, int *temp
)
6929 if (dev
->protocol_info
.ver
<= SSD_PROTOCOL_V3
) {
6935 if (dev
->db_info
.type
== SSD_DEBUG_LOG
&&
6936 (dev
->db_info
.data
.log
.event
== SSD_LOG_OVER_TEMP
||
6937 dev
->db_info
.data
.log
.event
== SSD_LOG_NORMAL_TEMP
||
6938 dev
->db_info
.data
.log
.event
== SSD_LOG_WARN_TEMP
)) {
6939 *temp
= (int)dev
->db_info
.data
.log
.extra
;
6944 for (i
=0; i
<dev
->hw_info
.nr_ctrl
; i
++) {
6945 off
= SSD_CTRL_TEMP_REG0
+ i
* sizeof(uint64_t);
6947 val
= ssd_reg_read(dev
->ctrlp
+ off
);
6948 if (val
== 0xffffffffffffffffull
) {
6952 cur
= (int)CUR_TEMP(val
);
6963 int ssd_get_temperature(struct block_device
*bdev
, int *temp
)
6965 struct ssd_device
*dev
;
6968 if (!bdev
|| !temp
|| !(bdev
->bd_disk
)) {
6972 dev
= bdev
->bd_disk
->private_data
;
6975 mutex_lock(&dev
->fw_mutex
);
6976 ret
= __ssd_get_temperature(dev
, temp
);
6977 mutex_unlock(&dev
->fw_mutex
);
6982 int ssd_set_otprotect(struct block_device
*bdev
, int otprotect
)
6984 struct ssd_device
*dev
;
6986 if (!bdev
|| !(bdev
->bd_disk
)) {
6990 dev
= bdev
->bd_disk
->private_data
;
6991 ssd_set_ot_protect(dev
, !!otprotect
);
6996 int ssd_bm_status(struct block_device
*bdev
, int *status
)
6998 struct ssd_device
*dev
;
7001 if (!bdev
|| !status
|| !(bdev
->bd_disk
)) {
7005 dev
= bdev
->bd_disk
->private_data
;
7007 mutex_lock(&dev
->fw_mutex
);
7008 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_2
) {
7009 if (test_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP
), &dev
->hwmon
)) {
7010 *status
= SSD_BMSTATUS_WARNING
;
7012 *status
= SSD_BMSTATUS_OK
;
7014 } else if(dev
->protocol_info
.ver
> SSD_PROTOCOL_V3
) {
7015 ret
= __ssd_bm_status(dev
, status
);
7017 *status
= SSD_BMSTATUS_OK
;
7019 mutex_unlock(&dev
->fw_mutex
);
7024 int ssd_get_pciaddr(struct block_device
*bdev
, struct pci_addr
*paddr
)
7026 struct ssd_device
*dev
;
7028 if (!bdev
|| !paddr
|| !bdev
->bd_disk
) {
7032 dev
= bdev
->bd_disk
->private_data
;
7034 paddr
->domain
= pci_domain_nr(dev
->pdev
->bus
);
7035 paddr
->bus
= dev
->pdev
->bus
->number
;
7036 paddr
->slot
= PCI_SLOT(dev
->pdev
->devfn
);
7037 paddr
->func
= PCI_FUNC(dev
->pdev
->devfn
);
7043 static int ssd_bb_acc(struct ssd_device
*dev
, struct ssd_acc_info
*acc
)
7048 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_1_1
) {
7052 acc
->threshold_l1
= ssd_reg32_read(dev
->ctrlp
+ SSD_BB_THRESHOLD_L1_REG
);
7053 if (0xffffffffull
== acc
->threshold_l1
) {
7056 acc
->threshold_l2
= ssd_reg32_read(dev
->ctrlp
+ SSD_BB_THRESHOLD_L2_REG
);
7057 if (0xffffffffull
== acc
->threshold_l2
) {
7062 for (ctrl
=0; ctrl
<dev
->hw_info
.nr_ctrl
; ctrl
++) {
7063 for (chip
=0; chip
<dev
->hw_info
.nr_chip
; chip
++) {
7064 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_BB_ACC_REG0
+ (SSD_CTRL_REG_ZONE_SZ
* ctrl
) + (SSD_BB_ACC_REG_SZ
* chip
));
7065 if (0xffffffffull
== acc
->val
) {
7068 if (val
> acc
->val
) {
7077 static int ssd_ec_acc(struct ssd_device
*dev
, struct ssd_acc_info
*acc
)
7082 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_1_1
) {
7086 acc
->threshold_l1
= ssd_reg32_read(dev
->ctrlp
+ SSD_EC_THRESHOLD_L1_REG
);
7087 if (0xffffffffull
== acc
->threshold_l1
) {
7090 acc
->threshold_l2
= ssd_reg32_read(dev
->ctrlp
+ SSD_EC_THRESHOLD_L2_REG
);
7091 if (0xffffffffull
== acc
->threshold_l2
) {
7096 for (ctrl
=0; ctrl
<dev
->hw_info
.nr_ctrl
; ctrl
++) {
7097 for (chip
=0; chip
<dev
->hw_info
.nr_chip
; chip
++) {
7098 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_EC_ACC_REG0
+ (SSD_CTRL_REG_ZONE_SZ
* ctrl
) + (SSD_EC_ACC_REG_SZ
* chip
));
7099 if (0xffffffffull
== acc
->val
) {
7103 if (val
> acc
->val
) {
7114 static int ssd_ram_read_4k(struct ssd_device
*dev
, void *buf
, size_t length
, loff_t ofs
, int ctrl_idx
)
7116 struct ssd_ram_op_msg
*msg
;
7118 size_t len
= length
;
7122 if (ctrl_idx
>= dev
->hw_info
.nr_ctrl
|| (uint64_t)(ofs
+ length
) > dev
->hw_info
.ram_size
7123 || !length
|| length
> dev
->hw_info
.ram_max_len
7124 || (length
& (dev
->hw_info
.ram_align
- 1)) != 0 || ((uint64_t)ofs
& (dev
->hw_info
.ram_align
- 1)) != 0) {
7128 len
/= dev
->hw_info
.ram_align
;
7129 do_div(ofs_w
, dev
->hw_info
.ram_align
);
7131 buf_dma
= pci_map_single(dev
->pdev
, buf
, length
, PCI_DMA_FROMDEVICE
);
7132 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,26))
7133 ret
= dma_mapping_error(buf_dma
);
7135 ret
= dma_mapping_error(&(dev
->pdev
->dev
), buf_dma
);
7138 hio_warn("%s: unable to map read DMA buffer\n", dev
->name
);
7139 goto out_dma_mapping
;
7142 msg
= (struct ssd_ram_op_msg
*)ssd_get_dmsg(dev
);
7144 msg
->fun
= SSD_FUNC_RAM_READ
;
7145 msg
->ctrl_idx
= ctrl_idx
;
7146 msg
->start
= (uint32_t)ofs_w
;
7150 ret
= ssd_do_request(dev
, READ
, msg
, NULL
);
7153 pci_unmap_single(dev
->pdev
, buf_dma
, length
, PCI_DMA_FROMDEVICE
);
7159 static int ssd_ram_write_4k(struct ssd_device
*dev
, void *buf
, size_t length
, loff_t ofs
, int ctrl_idx
)
7161 struct ssd_ram_op_msg
*msg
;
7163 size_t len
= length
;
7167 if (ctrl_idx
>= dev
->hw_info
.nr_ctrl
|| (uint64_t)(ofs
+ length
) > dev
->hw_info
.ram_size
7168 || !length
|| length
> dev
->hw_info
.ram_max_len
7169 || (length
& (dev
->hw_info
.ram_align
- 1)) != 0 || ((uint64_t)ofs
& (dev
->hw_info
.ram_align
- 1)) != 0) {
7173 len
/= dev
->hw_info
.ram_align
;
7174 do_div(ofs_w
, dev
->hw_info
.ram_align
);
7176 buf_dma
= pci_map_single(dev
->pdev
, buf
, length
, PCI_DMA_TODEVICE
);
7177 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,26))
7178 ret
= dma_mapping_error(buf_dma
);
7180 ret
= dma_mapping_error(&(dev
->pdev
->dev
), buf_dma
);
7183 hio_warn("%s: unable to map write DMA buffer\n", dev
->name
);
7184 goto out_dma_mapping
;
7187 msg
= (struct ssd_ram_op_msg
*)ssd_get_dmsg(dev
);
7189 msg
->fun
= SSD_FUNC_RAM_WRITE
;
7190 msg
->ctrl_idx
= ctrl_idx
;
7191 msg
->start
= (uint32_t)ofs_w
;
7195 ret
= ssd_do_request(dev
, WRITE
, msg
, NULL
);
7198 pci_unmap_single(dev
->pdev
, buf_dma
, length
, PCI_DMA_TODEVICE
);
7205 static int ssd_ram_read(struct ssd_device
*dev
, void *buf
, size_t length
, loff_t ofs
, int ctrl_idx
)
7212 if (ctrl_idx
>= dev
->hw_info
.nr_ctrl
|| (uint64_t)(ofs
+ length
) > dev
->hw_info
.ram_size
|| !length
7213 || (length
& (dev
->hw_info
.ram_align
- 1)) != 0 || ((uint64_t)ofs
& (dev
->hw_info
.ram_align
- 1)) != 0) {
7218 len
= dev
->hw_info
.ram_max_len
;
7219 if (left
< (int)dev
->hw_info
.ram_max_len
) {
7223 ret
= ssd_ram_read_4k(dev
, buf
, len
, off
, ctrl_idx
);
7236 static int ssd_ram_write(struct ssd_device
*dev
, void *buf
, size_t length
, loff_t ofs
, int ctrl_idx
)
7243 if (ctrl_idx
>= dev
->hw_info
.nr_ctrl
|| (uint64_t)(ofs
+ length
) > dev
->hw_info
.ram_size
|| !length
7244 || (length
& (dev
->hw_info
.ram_align
- 1)) != 0 || ((uint64_t)ofs
& (dev
->hw_info
.ram_align
- 1)) != 0) {
7249 len
= dev
->hw_info
.ram_max_len
;
7250 if (left
< (int)dev
->hw_info
.ram_max_len
) {
7254 ret
= ssd_ram_write_4k(dev
, buf
, len
, off
, ctrl_idx
);
7269 static int ssd_check_flash(struct ssd_device
*dev
, int flash
, int page
, int ctrl_idx
)
7271 int cur_ch
= flash
% dev
->hw_info
.max_ch
;
7272 int cur_chip
= flash
/dev
->hw_info
.max_ch
;
7274 if (ctrl_idx
>= dev
->hw_info
.nr_ctrl
) {
7278 if (cur_ch
>= dev
->hw_info
.nr_ch
|| cur_chip
>= dev
->hw_info
.nr_chip
) {
7282 if (page
>= (int)(dev
->hw_info
.block_count
* dev
->hw_info
.page_count
)) {
7288 static int ssd_nand_read_id(struct ssd_device
*dev
, void *id
, int flash
, int chip
, int ctrl_idx
)
7290 struct ssd_nand_op_msg
*msg
;
7297 buf_dma
= pci_map_single(dev
->pdev
, id
, SSD_NAND_ID_BUFF_SZ
, PCI_DMA_FROMDEVICE
);
7298 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,26))
7299 ret
= dma_mapping_error(buf_dma
);
7301 ret
= dma_mapping_error(&(dev
->pdev
->dev
), buf_dma
);
7304 hio_warn("%s: unable to map read DMA buffer\n", dev
->name
);
7305 goto out_dma_mapping
;
7308 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
7309 flash
= ((uint32_t)flash
<< 1) | (uint32_t)chip
;
7313 msg
= (struct ssd_nand_op_msg
*)ssd_get_dmsg(dev
);
7315 msg
->fun
= SSD_FUNC_NAND_READ_ID
;
7316 msg
->chip_no
= flash
;
7317 msg
->chip_ce
= chip
;
7318 msg
->ctrl_idx
= ctrl_idx
;
7321 ret
= ssd_do_request(dev
, READ
, msg
, NULL
);
7324 pci_unmap_single(dev
->pdev
, buf_dma
, SSD_NAND_ID_BUFF_SZ
, PCI_DMA_FROMDEVICE
);
7331 static int ssd_nand_read(struct ssd_device
*dev
, void *buf
,
7332 int flash
, int chip
, int page
, int page_count
, int ctrl_idx
)
7334 struct ssd_nand_op_msg
*msg
;
7343 if ((page
+ page_count
) > dev
->hw_info
.block_count
*dev
->hw_info
.page_count
) {
7347 ret
= ssd_check_flash(dev
, flash
, page
, ctrl_idx
);
7352 length
= page_count
* dev
->hw_info
.page_size
;
7354 buf_dma
= pci_map_single(dev
->pdev
, buf
, length
, PCI_DMA_FROMDEVICE
);
7355 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,26))
7356 ret
= dma_mapping_error(buf_dma
);
7358 ret
= dma_mapping_error(&(dev
->pdev
->dev
), buf_dma
);
7361 hio_warn("%s: unable to map read DMA buffer\n", dev
->name
);
7362 goto out_dma_mapping
;
7365 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
7366 flash
= (flash
<< 1) | chip
;
7370 msg
= (struct ssd_nand_op_msg
*)ssd_get_dmsg(dev
);
7372 msg
->fun
= SSD_FUNC_NAND_READ
;
7373 msg
->ctrl_idx
= ctrl_idx
;
7374 msg
->chip_no
= flash
;
7375 msg
->chip_ce
= chip
;
7376 msg
->page_no
= page
;
7377 msg
->page_count
= page_count
;
7380 ret
= ssd_do_request(dev
, READ
, msg
, NULL
);
7383 pci_unmap_single(dev
->pdev
, buf_dma
, length
, PCI_DMA_FROMDEVICE
);
7390 static int ssd_nand_read_w_oob(struct ssd_device
*dev
, void *buf
,
7391 int flash
, int chip
, int page
, int count
, int ctrl_idx
)
7393 struct ssd_nand_op_msg
*msg
;
7402 if ((page
+ count
) > (int)(dev
->hw_info
.block_count
* dev
->hw_info
.page_count
)) {
7406 ret
= ssd_check_flash(dev
, flash
, page
, ctrl_idx
);
7411 length
= count
* (dev
->hw_info
.page_size
+ dev
->hw_info
.oob_size
);
7413 buf_dma
= pci_map_single(dev
->pdev
, buf
, length
, PCI_DMA_FROMDEVICE
);
7414 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,26))
7415 ret
= dma_mapping_error(buf_dma
);
7417 ret
= dma_mapping_error(&(dev
->pdev
->dev
), buf_dma
);
7420 hio_warn("%s: unable to map read DMA buffer\n", dev
->name
);
7421 goto out_dma_mapping
;
7424 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
7425 flash
= ((uint32_t)flash
<< 1) | (uint32_t)chip
;
7429 msg
= (struct ssd_nand_op_msg
*)ssd_get_dmsg(dev
);
7431 msg
->fun
= SSD_FUNC_NAND_READ_WOOB
;
7432 msg
->ctrl_idx
= ctrl_idx
;
7433 msg
->chip_no
= flash
;
7434 msg
->chip_ce
= chip
;
7435 msg
->page_no
= page
;
7436 msg
->page_count
= count
;
7439 ret
= ssd_do_request(dev
, READ
, msg
, NULL
);
7442 pci_unmap_single(dev
->pdev
, buf_dma
, length
, PCI_DMA_FROMDEVICE
);
7449 static int ssd_nand_write(struct ssd_device
*dev
, void *buf
,
7450 int flash
, int chip
, int page
, int count
, int ctrl_idx
)
7452 struct ssd_nand_op_msg
*msg
;
7457 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
7469 ret
= ssd_check_flash(dev
, flash
, page
, ctrl_idx
);
7474 length
= count
* (dev
->hw_info
.page_size
+ dev
->hw_info
.oob_size
);
7476 /* write data to ram */
7477 /*ret = ssd_ram_write(dev, buf, length, dev->hw_info.nand_wbuff_base, ctrl_idx);
7482 buf_dma
= pci_map_single(dev
->pdev
, buf
, length
, PCI_DMA_TODEVICE
);
7483 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,26))
7484 ret
= dma_mapping_error(buf_dma
);
7486 ret
= dma_mapping_error(&(dev
->pdev
->dev
), buf_dma
);
7489 hio_warn("%s: unable to map write DMA buffer\n", dev
->name
);
7490 goto out_dma_mapping
;
7493 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
7494 flash
= ((uint32_t)flash
<< 1) | (uint32_t)chip
;
7498 msg
= (struct ssd_nand_op_msg
*)ssd_get_dmsg(dev
);
7500 msg
->fun
= SSD_FUNC_NAND_WRITE
;
7501 msg
->ctrl_idx
= ctrl_idx
;
7502 msg
->chip_no
= flash
;
7503 msg
->chip_ce
= chip
;
7505 msg
->page_no
= page
;
7506 msg
->page_count
= count
;
7509 ret
= ssd_do_request(dev
, WRITE
, msg
, NULL
);
7512 pci_unmap_single(dev
->pdev
, buf_dma
, length
, PCI_DMA_TODEVICE
);
7518 static int ssd_nand_erase(struct ssd_device
*dev
, int flash
, int chip
, int page
, int ctrl_idx
)
7520 struct ssd_nand_op_msg
*msg
;
7523 ret
= ssd_check_flash(dev
, flash
, page
, ctrl_idx
);
7528 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
7529 flash
= ((uint32_t)flash
<< 1) | (uint32_t)chip
;
7533 msg
= (struct ssd_nand_op_msg
*)ssd_get_dmsg(dev
);
7535 msg
->fun
= SSD_FUNC_NAND_ERASE
;
7536 msg
->ctrl_idx
= ctrl_idx
;
7537 msg
->chip_no
= flash
;
7538 msg
->chip_ce
= chip
;
7539 msg
->page_no
= page
;
7541 ret
= ssd_do_request(dev
, WRITE
, msg
, NULL
);
7547 static int ssd_update_bbt(struct ssd_device
*dev
, int flash
, int ctrl_idx
)
7549 struct ssd_nand_op_msg
*msg
;
7550 struct ssd_flush_msg
*fmsg
;
7553 ret
= ssd_check_flash(dev
, flash
, 0, ctrl_idx
);
7558 msg
= (struct ssd_nand_op_msg
*)ssd_get_dmsg(dev
);
7560 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
7561 fmsg
= (struct ssd_flush_msg
*)msg
;
7563 fmsg
->fun
= SSD_FUNC_FLUSH
;
7565 fmsg
->flash
= flash
;
7566 fmsg
->ctrl_idx
= ctrl_idx
;
7568 msg
->fun
= SSD_FUNC_FLUSH
;
7570 msg
->chip_no
= flash
;
7571 msg
->ctrl_idx
= ctrl_idx
;
7574 ret
= ssd_do_request(dev
, WRITE
, msg
, NULL
);
7580 /* flash controller init state */
7581 static int __ssd_check_init_state(struct ssd_device
*dev
)
7583 uint32_t *init_state
= NULL
;
7584 int reg_base
, reg_sz
;
7585 int max_wait
= SSD_INIT_MAX_WAIT
;
7591 for (i=0; i<dev->hw_info.nr_ctrl; i++) {
7592 ssd_reg32_write(dev->ctrlp + SSD_CTRL_TEST_REG0 + i * 8, test_data);
7593 read_data = ssd_reg32_read(dev->ctrlp + SSD_CTRL_TEST_REG0 + i * 8);
7594 if (read_data == ~test_data) {
7595 //dev->hw_info.nr_ctrl++;
7596 dev->hw_info.nr_ctrl_map |= 1<<i;
7602 read_data = ssd_reg32_read(dev->ctrlp + SSD_READY_REG);
7604 for (i=0; i<dev->hw_info.nr_ctrl; i++) {
7605 if (((read_data>>i) & 0x1) == 0) {
7610 if (dev->hw_info.nr_ctrl != j) {
7611 printk(KERN_WARNING "%s: nr_ctrl mismatch: %d %d\n", dev->name, dev->hw_info.nr_ctrl, j);
7617 init_state = ssd_reg_read(dev->ctrlp + SSD_FLASH_INFO_REG0);
7618 for (j=1; j<dev->hw_info.nr_ctrl;j++) {
7619 if (init_state != ssd_reg_read(dev->ctrlp + SSD_FLASH_INFO_REG0 + j*8)) {
7620 printk(KERN_WARNING "SSD_FLASH_INFO_REG[%d], not match\n", j);
7626 /* init_state = ssd_reg_read(dev->ctrlp + SSD_CHIP_INFO_REG0);
7627 for (j=1; j<dev->hw_info.nr_ctrl; j++) {
7628 if (init_state != ssd_reg_read(dev->ctrlp + SSD_CHIP_INFO_REG0 + j*16)) {
7629 printk(KERN_WARNING "SSD_CHIP_INFO_REG Lo [%d], not match\n", j);
7634 init_state = ssd_reg_read(dev->ctrlp + SSD_CHIP_INFO_REG0 + 8);
7635 for (j=1; j<dev->hw_info.nr_ctrl; j++) {
7636 if (init_state != ssd_reg_read(dev->ctrlp + SSD_CHIP_INFO_REG0 + 8 + j*16)) {
7637 printk(KERN_WARNING "SSD_CHIP_INFO_REG Hi [%d], not match\n", j);
7643 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_2
) {
7644 max_wait
= SSD_INIT_MAX_WAIT_V3_2
;
7647 reg_base
= dev
->protocol_info
.init_state_reg
;
7648 reg_sz
= dev
->protocol_info
.init_state_reg_sz
;
7650 init_state
= (uint32_t *)kmalloc(reg_sz
, GFP_KERNEL
);
7655 for (i
=0; i
<dev
->hw_info
.nr_ctrl
; i
++) {
7657 for (j
=0, k
=0; j
<reg_sz
; j
+=sizeof(uint32_t), k
++) {
7658 init_state
[k
] = ssd_reg32_read(dev
->ctrlp
+ reg_base
+ j
);
7661 if (dev
->protocol_info
.ver
> SSD_PROTOCOL_V3
) {
7662 /* just check the last bit, no need to check all channel */
7663 ch_start
= dev
->hw_info
.max_ch
- 1;
7668 for (j
=0; j
<dev
->hw_info
.nr_chip
; j
++) {
7669 for (k
=ch_start
; k
<dev
->hw_info
.max_ch
; k
++) {
7670 if (test_bit((j
*dev
->hw_info
.max_ch
+ k
), (void *)init_state
)) {
7675 if (init_wait
<= max_wait
) {
7676 msleep(SSD_INIT_WAIT
);
7679 if (k
< dev
->hw_info
.nr_ch
) {
7680 hio_warn("%s: controller %d chip %d ch %d init failed\n",
7681 dev
->name
, i
, j
, k
);
7683 hio_warn("%s: controller %d chip %d init failed\n",
7694 //printk(KERN_WARNING "%s: init wait %d\n", dev->name, init_wait);
7700 static int ssd_check_init_state(struct ssd_device
*dev
)
7702 if (mode
!= SSD_DRV_MODE_STANDARD
) {
7706 return __ssd_check_init_state(dev
);
7709 static void ssd_reset_resp_ptr(struct ssd_device
*dev
);
7711 /* reset flash controller etc */
7712 static int __ssd_reset(struct ssd_device
*dev
, int type
)
7714 if (type
< SSD_RST_NOINIT
|| type
> SSD_RST_FULL
) {
7718 mutex_lock(&dev
->fw_mutex
);
7720 if (type
== SSD_RST_NOINIT
) { //no init
7721 ssd_reg32_write(dev
->ctrlp
+ SSD_RESET_REG
, SSD_RESET_NOINIT
);
7722 } else if (type
== SSD_RST_NORMAL
) { //reset & init
7723 ssd_reg32_write(dev
->ctrlp
+ SSD_RESET_REG
, SSD_RESET
);
7724 } else { // full reset
7725 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
7726 mutex_unlock(&dev
->fw_mutex
);
7730 ssd_reg32_write(dev
->ctrlp
+ SSD_FULL_RESET_REG
, SSD_RESET_FULL
);
7733 ssd_reset_resp_ptr(dev
);
7736 #ifdef SSD_OT_PROTECT
7743 ssd_set_flush_timeout(dev
, dev
->wmode
);
7745 mutex_unlock(&dev
->fw_mutex
);
7746 ssd_gen_swlog(dev
, SSD_LOG_RESET
, (uint32_t)type
);
7748 return __ssd_check_init_state(dev
);
7751 static int ssd_save_md(struct ssd_device
*dev
)
7753 struct ssd_nand_op_msg
*msg
;
7756 if (unlikely(mode
!= SSD_DRV_MODE_STANDARD
))
7759 if (dev
->protocol_info
.ver
<= SSD_PROTOCOL_V3
) {
7763 if (!dev
->save_md
) {
7767 msg
= (struct ssd_nand_op_msg
*)ssd_get_dmsg(dev
);
7769 msg
->fun
= SSD_FUNC_FLUSH
;
7774 ret
= ssd_do_request(dev
, WRITE
, msg
, NULL
);
7780 static int ssd_barrier_save_md(struct ssd_device
*dev
)
7782 struct ssd_nand_op_msg
*msg
;
7785 if (unlikely(mode
!= SSD_DRV_MODE_STANDARD
))
7788 if (dev
->protocol_info
.ver
<= SSD_PROTOCOL_V3
) {
7792 if (!dev
->save_md
) {
7796 msg
= (struct ssd_nand_op_msg
*)ssd_get_dmsg(dev
);
7798 msg
->fun
= SSD_FUNC_FLUSH
;
7803 ret
= ssd_do_barrier_request(dev
, WRITE
, msg
, NULL
);
7809 static int ssd_flush(struct ssd_device
*dev
)
7811 struct ssd_nand_op_msg
*msg
;
7812 struct ssd_flush_msg
*fmsg
;
7815 if (unlikely(mode
!= SSD_DRV_MODE_STANDARD
))
7818 msg
= (struct ssd_nand_op_msg
*)ssd_get_dmsg(dev
);
7820 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
7821 fmsg
= (struct ssd_flush_msg
*)msg
;
7823 fmsg
->fun
= SSD_FUNC_FLUSH
;
7828 msg
->fun
= SSD_FUNC_FLUSH
;
7834 ret
= ssd_do_request(dev
, WRITE
, msg
, NULL
);
7840 static int ssd_barrier_flush(struct ssd_device
*dev
)
7842 struct ssd_nand_op_msg
*msg
;
7843 struct ssd_flush_msg
*fmsg
;
7846 if (unlikely(mode
!= SSD_DRV_MODE_STANDARD
))
7849 msg
= (struct ssd_nand_op_msg
*)ssd_get_dmsg(dev
);
7851 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
7852 fmsg
= (struct ssd_flush_msg
*)msg
;
7854 fmsg
->fun
= SSD_FUNC_FLUSH
;
7859 msg
->fun
= SSD_FUNC_FLUSH
;
7865 ret
= ssd_do_barrier_request(dev
, WRITE
, msg
, NULL
);
7871 #define SSD_WMODE_BUFFER_TIMEOUT 0x00c82710
7872 #define SSD_WMODE_BUFFER_EX_TIMEOUT 0x000500c8
7873 #define SSD_WMODE_FUA_TIMEOUT 0x000503E8
7874 static void ssd_set_flush_timeout(struct ssd_device
*dev
, int m
)
7879 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_1_1
) {
7884 case SSD_WMODE_BUFFER
:
7885 to
= SSD_WMODE_BUFFER_TIMEOUT
;
7887 case SSD_WMODE_BUFFER_EX
:
7888 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2_1
) {
7889 to
= SSD_WMODE_BUFFER_EX_TIMEOUT
;
7891 to
= SSD_WMODE_BUFFER_TIMEOUT
;
7895 to
= SSD_WMODE_FUA_TIMEOUT
;
7901 val
= (((uint32_t)((uint32_t)m
& 0x3) << 28) | to
);
7903 ssd_reg32_write(dev
->ctrlp
+ SSD_FLUSH_TIMEOUT_REG
, val
);
7906 static int ssd_do_switch_wmode(struct ssd_device
*dev
, int m
)
7910 ret
= ssd_barrier_start(dev
);
7915 ret
= ssd_barrier_flush(dev
);
7917 goto out_barrier_end
;
7920 /* set contoller flush timeout */
7921 ssd_set_flush_timeout(dev
, m
);
7927 ssd_barrier_end(dev
);
7932 static int ssd_switch_wmode(struct ssd_device
*dev
, int m
)
7938 if (!test_bit(SSD_ONLINE
, &dev
->state
)) {
7942 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
7943 default_wmode
= SSD_WMODE_BUFFER
;
7945 default_wmode
= SSD_WMODE_BUFFER_EX
;
7948 if (SSD_WMODE_AUTO
== m
) {
7949 /* battery fault ? */
7950 if (test_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP
), &dev
->hwmon
)) {
7951 next_wmode
= SSD_WMODE_FUA
;
7953 next_wmode
= default_wmode
;
7955 } else if (SSD_WMODE_DEFAULT
== m
) {
7956 next_wmode
= default_wmode
;
7961 if (next_wmode
!= dev
->wmode
) {
7962 hio_warn("%s: switch write mode (%d -> %d)\n", dev
->name
, dev
->wmode
, next_wmode
);
7963 ret
= ssd_do_switch_wmode(dev
, next_wmode
);
7965 hio_err("%s: can not switch write mode (%d -> %d)\n", dev
->name
, dev
->wmode
, next_wmode
);
7972 static int ssd_init_wmode(struct ssd_device
*dev
)
7977 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
7978 default_wmode
= SSD_WMODE_BUFFER
;
7980 default_wmode
= SSD_WMODE_BUFFER_EX
;
7984 if (SSD_WMODE_AUTO
== dev
->user_wmode
) {
7985 /* battery fault ? */
7986 if (test_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP
), &dev
->hwmon
)) {
7987 dev
->wmode
= SSD_WMODE_FUA
;
7989 dev
->wmode
= default_wmode
;
7991 } else if (SSD_WMODE_DEFAULT
== dev
->user_wmode
) {
7992 dev
->wmode
= default_wmode
;
7994 dev
->wmode
= dev
->user_wmode
;
7996 ssd_set_flush_timeout(dev
, dev
->wmode
);
8001 static int __ssd_set_wmode(struct ssd_device
*dev
, int m
)
8005 /* not support old fw*/
8006 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_1_1
) {
8011 if (m
< SSD_WMODE_BUFFER
|| m
> SSD_WMODE_DEFAULT
) {
8016 ssd_gen_swlog(dev
, SSD_LOG_SET_WMODE
, m
);
8018 dev
->user_wmode
= m
;
8020 ret
= ssd_switch_wmode(dev
, dev
->user_wmode
);
8029 int ssd_set_wmode(struct block_device
*bdev
, int m
)
8031 struct ssd_device
*dev
;
8033 if (!bdev
|| !(bdev
->bd_disk
)) {
8037 dev
= bdev
->bd_disk
->private_data
;
8039 return __ssd_set_wmode(dev
, m
);
8042 static int ssd_do_reset(struct ssd_device
*dev
)
8046 if (test_and_set_bit(SSD_RESETING
, &dev
->state
)) {
8050 ssd_stop_workq(dev
);
8052 ret
= ssd_barrier_start(dev
);
8057 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
8059 ret
= __ssd_reset(dev
, SSD_RST_NORMAL
);
8062 //ret = __ssd_reset(dev, SSD_RST_FULL);
8063 ret
= __ssd_reset(dev
, SSD_RST_NORMAL
);
8066 goto out_barrier_end
;
8070 ssd_barrier_end(dev
);
8072 ssd_start_workq(dev
);
8073 test_and_clear_bit(SSD_RESETING
, &dev
->state
);
8077 static int ssd_full_reset(struct ssd_device
*dev
)
8081 if (test_and_set_bit(SSD_RESETING
, &dev
->state
)) {
8085 ssd_stop_workq(dev
);
8087 ret
= ssd_barrier_start(dev
);
8092 ret
= ssd_barrier_flush(dev
);
8094 goto out_barrier_end
;
8097 ret
= ssd_barrier_save_md(dev
);
8099 goto out_barrier_end
;
8102 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
8104 ret
= __ssd_reset(dev
, SSD_RST_NORMAL
);
8107 //ret = __ssd_reset(dev, SSD_RST_FULL);
8108 ret
= __ssd_reset(dev
, SSD_RST_NORMAL
);
8111 goto out_barrier_end
;
8115 ssd_barrier_end(dev
);
8117 ssd_start_workq(dev
);
8118 test_and_clear_bit(SSD_RESETING
, &dev
->state
);
8122 int ssd_reset(struct block_device
*bdev
)
8124 struct ssd_device
*dev
;
8126 if (!bdev
|| !(bdev
->bd_disk
)) {
8130 dev
= bdev
->bd_disk
->private_data
;
8132 return ssd_full_reset(dev
);
8135 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20))
8136 static int ssd_issue_flush_fn(struct request_queue
*q
, struct gendisk
*disk
,
8137 sector_t
*error_sector
)
8139 struct ssd_device
*dev
= q
->queuedata
;
8141 return ssd_flush(dev
);
8145 void ssd_submit_pbio(struct request_queue
*q
, struct bio
*bio
)
8147 struct ssd_device
*dev
= q
->queuedata
;
8148 #ifdef SSD_QUEUE_PBIO
8152 if (!test_bit(SSD_ONLINE
, &dev
->state
)) {
8153 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24))
8154 bio_endio(bio
, -ENODEV
);
8156 bio_endio(bio
, bio
->bi_size
, -ENODEV
);
8161 #ifdef SSD_DEBUG_ERR
8162 if (atomic_read(&dev
->tocnt
)) {
8163 hio_warn("%s: IO rejected because of IO timeout!\n", dev
->name
);
8164 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24))
8165 bio_endio(bio
, -EIO
);
8167 bio_endio(bio
, bio
->bi_size
, -EIO
);
8173 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32))
8174 if (unlikely(bio_barrier(bio
))) {
8175 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24))
8176 bio_endio(bio
, -EOPNOTSUPP
);
8178 bio_endio(bio
, bio
->bi_size
, -EOPNOTSUPP
);
8182 #elif (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36))
8183 if (unlikely(bio_rw_flagged(bio
, BIO_RW_BARRIER
))) {
8184 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24))
8185 bio_endio(bio
, -EOPNOTSUPP
);
8187 bio_endio(bio
, bio
->bi_size
, -EOPNOTSUPP
);
8191 #elif (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,37))
8192 if (unlikely(bio
->bi_rw
& REQ_HARDBARRIER
)) {
8193 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24))
8194 bio_endio(bio
, -EOPNOTSUPP
);
8196 bio_endio(bio
, bio
->bi_size
, -EOPNOTSUPP
);
8202 if (unlikely(bio
->bi_rw
& REQ_FUA
)) {
8203 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24))
8204 bio_endio(bio
, -EOPNOTSUPP
);
8206 bio_endio(bio
, bio
->bi_size
, -EOPNOTSUPP
);
8212 if (unlikely(dev
->readonly
&& bio_data_dir(bio
) == WRITE
)) {
8213 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24))
8214 bio_endio(bio
, -EROFS
);
8216 bio_endio(bio
, bio
->bi_size
, -EROFS
);
8221 #ifdef SSD_QUEUE_PBIO
8222 if (0 == atomic_read(&dev
->in_sendq
)) {
8223 ret
= __ssd_submit_pbio(dev
, bio
, 0);
8227 (void)test_and_set_bit(BIO_SSD_PBIO
, &bio
->bi_flags
);
8228 ssd_queue_bio(dev
, bio
);
8231 __ssd_submit_pbio(dev
, bio
, 1);
8238 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,0))
8239 static blk_qc_t
ssd_make_request(struct request_queue
*q
, struct bio
*bio
)
8240 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0))
8241 static void ssd_make_request(struct request_queue
*q
, struct bio
*bio
)
8243 static int ssd_make_request(struct request_queue
*q
, struct bio
*bio
)
8246 struct ssd_device
*dev
= q
->queuedata
;
8249 if (!test_bit(SSD_ONLINE
, &dev
->state
)) {
8250 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24))
8251 bio_endio(bio
, -ENODEV
);
8253 bio_endio(bio
, bio
->bi_size
, -ENODEV
);
8258 #ifdef SSD_DEBUG_ERR
8259 if (atomic_read(&dev
->tocnt
)) {
8260 hio_warn("%s: IO rejected because of IO timeout!\n", dev
->name
);
8261 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24))
8262 bio_endio(bio
, -EIO
);
8264 bio_endio(bio
, bio
->bi_size
, -EIO
);
8270 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32))
8271 if (unlikely(bio_barrier(bio
))) {
8272 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24))
8273 bio_endio(bio
, -EOPNOTSUPP
);
8275 bio_endio(bio
, bio
->bi_size
, -EOPNOTSUPP
);
8279 #elif (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36))
8280 if (unlikely(bio_rw_flagged(bio
, BIO_RW_BARRIER
))) {
8281 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24))
8282 bio_endio(bio
, -EOPNOTSUPP
);
8284 bio_endio(bio
, bio
->bi_size
, -EOPNOTSUPP
);
8288 #elif (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,37))
8289 if (unlikely(bio
->bi_rw
& REQ_HARDBARRIER
)) {
8290 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24))
8291 bio_endio(bio
, -EOPNOTSUPP
);
8293 bio_endio(bio
, bio
->bi_size
, -EOPNOTSUPP
);
8299 if (unlikely(bio
->bi_rw
& REQ_FUA
)) {
8300 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24))
8301 bio_endio(bio
, -EOPNOTSUPP
);
8303 bio_endio(bio
, bio
->bi_size
, -EOPNOTSUPP
);
8308 /* writeback_cache_control.txt: REQ_FLUSH requests without data can be completed successfully without doing any work */
8309 if (unlikely((bio
->bi_rw
& REQ_FLUSH
) && !bio_sectors(bio
))) {
8316 if (0 == atomic_read(&dev
->in_sendq
)) {
8317 ret
= ssd_submit_bio(dev
, bio
, 0);
8321 ssd_queue_bio(dev
, bio
);
8325 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,0))
8326 return BLK_QC_T_NONE
;
8327 #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0))
8334 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16))
8335 static int ssd_block_getgeo(struct block_device
*bdev
, struct hd_geometry
*geo
)
8337 struct ssd_device
*dev
;
8343 dev
= bdev
->bd_disk
->private_data
;
8350 geo
->cylinders
= (dev
->hw_info
.size
& ~0x3f) >> 6;
8355 static void ssd_cleanup_blkdev(struct ssd_device
*dev
);
8356 static int ssd_init_blkdev(struct ssd_device
*dev
);
8357 static int ssd_ioctl_common(struct ssd_device
*dev
, unsigned int cmd
, unsigned long arg
)
8359 void __user
*argp
= (void __user
*)arg
;
8360 void __user
*buf
= NULL
;
8365 case SSD_CMD_GET_PROTOCOL_INFO
:
8366 if (copy_to_user(argp
, &dev
->protocol_info
, sizeof(struct ssd_protocol_info
))) {
8367 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
8373 case SSD_CMD_GET_HW_INFO
:
8374 if (copy_to_user(argp
, &dev
->hw_info
, sizeof(struct ssd_hw_info
))) {
8375 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
8381 case SSD_CMD_GET_ROM_INFO
:
8382 if (copy_to_user(argp
, &dev
->rom_info
, sizeof(struct ssd_rom_info
))) {
8383 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
8389 case SSD_CMD_GET_SMART
: {
8390 struct ssd_smart smart
;
8393 memcpy(&smart
, &dev
->smart
, sizeof(struct ssd_smart
));
8395 mutex_lock(&dev
->gd_mutex
);
8396 ssd_update_smart(dev
, &smart
);
8397 mutex_unlock(&dev
->gd_mutex
);
8399 /* combine the volatile log info */
8400 if (dev
->log_info
.nr_log
) {
8401 for (i
=0; i
<SSD_LOG_NR_LEVEL
; i
++) {
8402 smart
.log_info
.stat
[i
] += dev
->log_info
.stat
[i
];
8406 if (copy_to_user(argp
, &smart
, sizeof(struct ssd_smart
))) {
8407 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
8415 case SSD_CMD_GET_IDX
:
8416 if (copy_to_user(argp
, &dev
->idx
, sizeof(int))) {
8417 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
8423 case SSD_CMD_GET_AMOUNT
: {
8424 int nr_ssd
= atomic_read(&ssd_nr
);
8425 if (copy_to_user(argp
, &nr_ssd
, sizeof(int))) {
8426 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
8433 case SSD_CMD_GET_TO_INFO
: {
8434 int tocnt
= atomic_read(&dev
->tocnt
);
8436 if (copy_to_user(argp
, &tocnt
, sizeof(int))) {
8437 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
8444 case SSD_CMD_GET_DRV_VER
: {
8445 char ver
[] = DRIVER_VERSION
;
8446 int len
= sizeof(ver
);
8448 if (len
> (DRIVER_VERSION_LEN
- 1)) {
8449 len
= (DRIVER_VERSION_LEN
- 1);
8451 if (copy_to_user(argp
, ver
, len
)) {
8452 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
8459 case SSD_CMD_GET_BBACC_INFO
: {
8460 struct ssd_acc_info acc
;
8462 mutex_lock(&dev
->fw_mutex
);
8463 ret
= ssd_bb_acc(dev
, &acc
);
8464 mutex_unlock(&dev
->fw_mutex
);
8469 if (copy_to_user(argp
, &acc
, sizeof(struct ssd_acc_info
))) {
8470 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
8477 case SSD_CMD_GET_ECACC_INFO
: {
8478 struct ssd_acc_info acc
;
8480 mutex_lock(&dev
->fw_mutex
);
8481 ret
= ssd_ec_acc(dev
, &acc
);
8482 mutex_unlock(&dev
->fw_mutex
);
8487 if (copy_to_user(argp
, &acc
, sizeof(struct ssd_acc_info
))) {
8488 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
8495 case SSD_CMD_GET_HW_INFO_EXT
:
8496 if (copy_to_user(argp
, &dev
->hw_info_ext
, sizeof(struct ssd_hw_info_extend
))) {
8497 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
8503 case SSD_CMD_REG_READ
: {
8504 struct ssd_reg_op_info reg_info
;
8506 if (copy_from_user(®_info
, argp
, sizeof(struct ssd_reg_op_info
))) {
8507 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
8512 if (reg_info
.offset
> dev
->mmio_len
-sizeof(uint32_t)) {
8517 reg_info
.value
= ssd_reg32_read(dev
->ctrlp
+ reg_info
.offset
);
8518 if (copy_to_user(argp
, ®_info
, sizeof(struct ssd_reg_op_info
))) {
8519 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
8527 case SSD_CMD_REG_WRITE
: {
8528 struct ssd_reg_op_info reg_info
;
8530 if (copy_from_user(®_info
, argp
, sizeof(struct ssd_reg_op_info
))) {
8531 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
8536 if (reg_info
.offset
> dev
->mmio_len
-sizeof(uint32_t)) {
8541 ssd_reg32_write(dev
->ctrlp
+ reg_info
.offset
, reg_info
.value
);
8546 case SSD_CMD_SPI_READ
: {
8547 struct ssd_spi_op_info spi_info
;
8550 if (copy_from_user(&spi_info
, argp
, sizeof(struct ssd_spi_op_info
))) {
8551 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
8557 size
= spi_info
.len
;
8560 if (size
> dev
->rom_info
.size
|| 0 == size
|| (off
+ size
) > dev
->rom_info
.size
) {
8565 kbuf
= kmalloc(size
, GFP_KERNEL
);
8571 ret
= ssd_spi_page_read(dev
, kbuf
, off
, size
);
8577 if (copy_to_user(buf
, kbuf
, size
)) {
8578 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
8589 case SSD_CMD_SPI_WRITE
: {
8590 struct ssd_spi_op_info spi_info
;
8593 if (copy_from_user(&spi_info
, argp
, sizeof(struct ssd_spi_op_info
))) {
8594 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
8600 size
= spi_info
.len
;
8603 if (size
> dev
->rom_info
.size
|| 0 == size
|| (off
+ size
) > dev
->rom_info
.size
) {
8608 kbuf
= kmalloc(size
, GFP_KERNEL
);
8614 if (copy_from_user(kbuf
, buf
, size
)) {
8615 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
8621 ret
= ssd_spi_page_write(dev
, kbuf
, off
, size
);
8632 case SSD_CMD_SPI_ERASE
: {
8633 struct ssd_spi_op_info spi_info
;
8636 if (copy_from_user(&spi_info
, argp
, sizeof(struct ssd_spi_op_info
))) {
8637 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
8644 if ((off
+ dev
->rom_info
.block_size
) > dev
->rom_info
.size
) {
8649 ret
= ssd_spi_block_erase(dev
, off
);
8657 case SSD_CMD_I2C_READ
: {
8658 struct ssd_i2c_op_info i2c_info
;
8662 if (copy_from_user(&i2c_info
, argp
, sizeof(struct ssd_i2c_op_info
))) {
8663 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
8668 saddr
= i2c_info
.saddr
;
8669 rsize
= i2c_info
.rsize
;
8670 buf
= i2c_info
.rbuf
;
8672 if (rsize
<= 0 || rsize
> SSD_I2C_MAX_DATA
) {
8677 kbuf
= kmalloc(rsize
, GFP_KERNEL
);
8683 ret
= ssd_i2c_read(dev
, saddr
, rsize
, kbuf
);
8689 if (copy_to_user(buf
, kbuf
, rsize
)) {
8690 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
8701 case SSD_CMD_I2C_WRITE
: {
8702 struct ssd_i2c_op_info i2c_info
;
8706 if (copy_from_user(&i2c_info
, argp
, sizeof(struct ssd_i2c_op_info
))) {
8707 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
8712 saddr
= i2c_info
.saddr
;
8713 wsize
= i2c_info
.wsize
;
8714 buf
= i2c_info
.wbuf
;
8716 if (wsize
<= 0 || wsize
> SSD_I2C_MAX_DATA
) {
8721 kbuf
= kmalloc(wsize
, GFP_KERNEL
);
8727 if (copy_from_user(kbuf
, buf
, wsize
)) {
8728 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
8734 ret
= ssd_i2c_write(dev
, saddr
, wsize
, kbuf
);
8745 case SSD_CMD_I2C_WRITE_READ
: {
8746 struct ssd_i2c_op_info i2c_info
;
8752 if (copy_from_user(&i2c_info
, argp
, sizeof(struct ssd_i2c_op_info
))) {
8753 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
8758 saddr
= i2c_info
.saddr
;
8759 wsize
= i2c_info
.wsize
;
8760 rsize
= i2c_info
.rsize
;
8761 buf
= i2c_info
.wbuf
;
8763 if (wsize
<= 0 || wsize
> SSD_I2C_MAX_DATA
) {
8768 if (rsize
<= 0 || rsize
> SSD_I2C_MAX_DATA
) {
8773 size
= wsize
+ rsize
;
8775 kbuf
= kmalloc(size
, GFP_KERNEL
);
8781 if (copy_from_user((kbuf
+ rsize
), buf
, wsize
)) {
8782 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
8788 buf
= i2c_info
.rbuf
;
8790 ret
= ssd_i2c_write_read(dev
, saddr
, wsize
, (kbuf
+ rsize
), rsize
, kbuf
);
8796 if (copy_to_user(buf
, kbuf
, rsize
)) {
8797 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
8808 case SSD_CMD_SMBUS_SEND_BYTE
: {
8809 struct ssd_smbus_op_info smbus_info
;
8810 uint8_t smb_data
[SSD_SMBUS_BLOCK_MAX
];
8814 if (copy_from_user(&smbus_info
, argp
, sizeof(struct ssd_smbus_op_info
))) {
8815 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
8820 saddr
= smbus_info
.saddr
;
8821 buf
= smbus_info
.buf
;
8824 if (copy_from_user(smb_data
, buf
, size
)) {
8825 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
8830 ret
= ssd_smbus_send_byte(dev
, saddr
, smb_data
);
8838 case SSD_CMD_SMBUS_RECEIVE_BYTE
: {
8839 struct ssd_smbus_op_info smbus_info
;
8840 uint8_t smb_data
[SSD_SMBUS_BLOCK_MAX
];
8844 if (copy_from_user(&smbus_info
, argp
, sizeof(struct ssd_smbus_op_info
))) {
8845 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
8850 saddr
= smbus_info
.saddr
;
8851 buf
= smbus_info
.buf
;
8854 ret
= ssd_smbus_receive_byte(dev
, saddr
, smb_data
);
8859 if (copy_to_user(buf
, smb_data
, size
)) {
8860 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
8868 case SSD_CMD_SMBUS_WRITE_BYTE
: {
8869 struct ssd_smbus_op_info smbus_info
;
8870 uint8_t smb_data
[SSD_SMBUS_BLOCK_MAX
];
8875 if (copy_from_user(&smbus_info
, argp
, sizeof(struct ssd_smbus_op_info
))) {
8876 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
8881 saddr
= smbus_info
.saddr
;
8882 command
= smbus_info
.cmd
;
8883 buf
= smbus_info
.buf
;
8886 if (copy_from_user(smb_data
, buf
, size
)) {
8887 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
8892 ret
= ssd_smbus_write_byte(dev
, saddr
, command
, smb_data
);
8900 case SSD_CMD_SMBUS_READ_BYTE
: {
8901 struct ssd_smbus_op_info smbus_info
;
8902 uint8_t smb_data
[SSD_SMBUS_BLOCK_MAX
];
8907 if (copy_from_user(&smbus_info
, argp
, sizeof(struct ssd_smbus_op_info
))) {
8908 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
8913 saddr
= smbus_info
.saddr
;
8914 command
= smbus_info
.cmd
;
8915 buf
= smbus_info
.buf
;
8918 ret
= ssd_smbus_read_byte(dev
, saddr
, command
, smb_data
);
8923 if (copy_to_user(buf
, smb_data
, size
)) {
8924 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
8932 case SSD_CMD_SMBUS_WRITE_WORD
: {
8933 struct ssd_smbus_op_info smbus_info
;
8934 uint8_t smb_data
[SSD_SMBUS_BLOCK_MAX
];
8939 if (copy_from_user(&smbus_info
, argp
, sizeof(struct ssd_smbus_op_info
))) {
8940 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
8945 saddr
= smbus_info
.saddr
;
8946 command
= smbus_info
.cmd
;
8947 buf
= smbus_info
.buf
;
8950 if (copy_from_user(smb_data
, buf
, size
)) {
8951 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
8956 ret
= ssd_smbus_write_word(dev
, saddr
, command
, smb_data
);
8964 case SSD_CMD_SMBUS_READ_WORD
: {
8965 struct ssd_smbus_op_info smbus_info
;
8966 uint8_t smb_data
[SSD_SMBUS_BLOCK_MAX
];
8971 if (copy_from_user(&smbus_info
, argp
, sizeof(struct ssd_smbus_op_info
))) {
8972 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
8977 saddr
= smbus_info
.saddr
;
8978 command
= smbus_info
.cmd
;
8979 buf
= smbus_info
.buf
;
8982 ret
= ssd_smbus_read_word(dev
, saddr
, command
, smb_data
);
8987 if (copy_to_user(buf
, smb_data
, size
)) {
8988 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
8996 case SSD_CMD_SMBUS_WRITE_BLOCK
: {
8997 struct ssd_smbus_op_info smbus_info
;
8998 uint8_t smb_data
[SSD_SMBUS_BLOCK_MAX
];
9003 if (copy_from_user(&smbus_info
, argp
, sizeof(struct ssd_smbus_op_info
))) {
9004 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9009 saddr
= smbus_info
.saddr
;
9010 command
= smbus_info
.cmd
;
9011 buf
= smbus_info
.buf
;
9012 size
= smbus_info
.size
;
9014 if (size
> SSD_SMBUS_BLOCK_MAX
) {
9019 if (copy_from_user(smb_data
, buf
, size
)) {
9020 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9025 ret
= ssd_smbus_write_block(dev
, saddr
, command
, size
, smb_data
);
9033 case SSD_CMD_SMBUS_READ_BLOCK
: {
9034 struct ssd_smbus_op_info smbus_info
;
9035 uint8_t smb_data
[SSD_SMBUS_BLOCK_MAX
];
9040 if (copy_from_user(&smbus_info
, argp
, sizeof(struct ssd_smbus_op_info
))) {
9041 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9046 saddr
= smbus_info
.saddr
;
9047 command
= smbus_info
.cmd
;
9048 buf
= smbus_info
.buf
;
9049 size
= smbus_info
.size
;
9051 if (size
> SSD_SMBUS_BLOCK_MAX
) {
9056 ret
= ssd_smbus_read_block(dev
, saddr
, command
, size
, smb_data
);
9061 if (copy_to_user(buf
, smb_data
, size
)) {
9062 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
9070 case SSD_CMD_BM_GET_VER
: {
9073 ret
= ssd_bm_get_version(dev
, &ver
);
9078 if (copy_to_user(argp
, &ver
, sizeof(uint16_t))) {
9079 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
9087 case SSD_CMD_BM_GET_NR_CAP
: {
9090 ret
= ssd_bm_nr_cap(dev
, &nr_cap
);
9095 if (copy_to_user(argp
, &nr_cap
, sizeof(int))) {
9096 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
9104 case SSD_CMD_BM_CAP_LEARNING
: {
9105 ret
= ssd_bm_enter_cap_learning(dev
);
9114 case SSD_CMD_CAP_LEARN
: {
9117 ret
= ssd_cap_learn(dev
, &cap
);
9122 if (copy_to_user(argp
, &cap
, sizeof(uint32_t))) {
9123 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
9131 case SSD_CMD_GET_CAP_STATUS
: {
9134 if (test_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP
), &dev
->hwmon
)) {
9138 if (copy_to_user(argp
, &cap_status
, sizeof(int))) {
9139 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
9147 case SSD_CMD_RAM_READ
: {
9148 struct ssd_ram_op_info ram_info
;
9151 size_t rlen
, len
= dev
->hw_info
.ram_max_len
;
9154 if (copy_from_user(&ram_info
, argp
, sizeof(struct ssd_ram_op_info
))) {
9155 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9160 ofs
= ram_info
.start
;
9161 length
= ram_info
.length
;
9163 ctrl_idx
= ram_info
.ctrl_idx
;
9165 if (ofs
>= dev
->hw_info
.ram_size
|| length
> dev
->hw_info
.ram_size
|| 0 == length
|| (ofs
+ length
) > dev
->hw_info
.ram_size
) {
9170 kbuf
= kmalloc(len
, GFP_KERNEL
);
9176 for (rlen
=0; rlen
<length
; rlen
+=len
, buf
+=len
, ofs
+=len
) {
9177 if ((length
- rlen
) < len
) {
9178 len
= length
- rlen
;
9181 ret
= ssd_ram_read(dev
, kbuf
, len
, ofs
, ctrl_idx
);
9186 if (copy_to_user(buf
, kbuf
, len
)) {
9197 case SSD_CMD_RAM_WRITE
: {
9198 struct ssd_ram_op_info ram_info
;
9201 size_t wlen
, len
= dev
->hw_info
.ram_max_len
;
9204 if (copy_from_user(&ram_info
, argp
, sizeof(struct ssd_ram_op_info
))) {
9205 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9209 ofs
= ram_info
.start
;
9210 length
= ram_info
.length
;
9212 ctrl_idx
= ram_info
.ctrl_idx
;
9214 if (ofs
>= dev
->hw_info
.ram_size
|| length
> dev
->hw_info
.ram_size
|| 0 == length
|| (ofs
+ length
) > dev
->hw_info
.ram_size
) {
9219 kbuf
= kmalloc(len
, GFP_KERNEL
);
9225 for (wlen
=0; wlen
<length
; wlen
+=len
, buf
+=len
, ofs
+=len
) {
9226 if ((length
- wlen
) < len
) {
9227 len
= length
- wlen
;
9230 if (copy_from_user(kbuf
, buf
, len
)) {
9235 ret
= ssd_ram_write(dev
, kbuf
, len
, ofs
, ctrl_idx
);
9246 case SSD_CMD_NAND_READ_ID
: {
9247 struct ssd_flash_op_info flash_info
;
9248 int chip_no
, chip_ce
, length
, ctrl_idx
;
9250 if (copy_from_user(&flash_info
, argp
, sizeof(struct ssd_flash_op_info
))) {
9251 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9256 chip_no
= flash_info
.flash
;
9257 chip_ce
= flash_info
.chip
;
9258 ctrl_idx
= flash_info
.ctrl_idx
;
9259 buf
= flash_info
.buf
;
9260 length
= dev
->hw_info
.id_size
;
9262 //kbuf = kmalloc(length, GFP_KERNEL);
9263 kbuf
= kmalloc(SSD_NAND_ID_BUFF_SZ
, GFP_KERNEL
); //xx
9268 memset(kbuf
, 0, length
);
9270 ret
= ssd_nand_read_id(dev
, kbuf
, chip_no
, chip_ce
, ctrl_idx
);
9276 if (copy_to_user(buf
, kbuf
, length
)) {
9287 case SSD_CMD_NAND_READ
: { //with oob
9288 struct ssd_flash_op_info flash_info
;
9290 int flash
, chip
, page
, ctrl_idx
;
9293 if (copy_from_user(&flash_info
, argp
, sizeof(struct ssd_flash_op_info
))) {
9294 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9299 flash
= flash_info
.flash
;
9300 chip
= flash_info
.chip
;
9301 page
= flash_info
.page
;
9302 buf
= flash_info
.buf
;
9303 ctrl_idx
= flash_info
.ctrl_idx
;
9305 length
= dev
->hw_info
.page_size
+ dev
->hw_info
.oob_size
;
9307 kbuf
= kmalloc(length
, GFP_KERNEL
);
9313 err
= ret
= ssd_nand_read_w_oob(dev
, kbuf
, flash
, chip
, page
, 1, ctrl_idx
);
9314 if (ret
&& (-EIO
!= ret
)) {
9319 if (copy_to_user(buf
, kbuf
, length
)) {
9331 case SSD_CMD_NAND_WRITE
: {
9332 struct ssd_flash_op_info flash_info
;
9333 int flash
, chip
, page
, ctrl_idx
;
9336 if (copy_from_user(&flash_info
, argp
, sizeof(struct ssd_flash_op_info
))) {
9337 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9342 flash
= flash_info
.flash
;
9343 chip
= flash_info
.chip
;
9344 page
= flash_info
.page
;
9345 buf
= flash_info
.buf
;
9346 ctrl_idx
= flash_info
.ctrl_idx
;
9348 length
= dev
->hw_info
.page_size
+ dev
->hw_info
.oob_size
;
9350 kbuf
= kmalloc(length
, GFP_KERNEL
);
9356 if (copy_from_user(kbuf
, buf
, length
)) {
9362 ret
= ssd_nand_write(dev
, kbuf
, flash
, chip
, page
, 1, ctrl_idx
);
9372 case SSD_CMD_NAND_ERASE
: {
9373 struct ssd_flash_op_info flash_info
;
9374 int flash
, chip
, page
, ctrl_idx
;
9376 if (copy_from_user(&flash_info
, argp
, sizeof(struct ssd_flash_op_info
))) {
9377 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9382 flash
= flash_info
.flash
;
9383 chip
= flash_info
.chip
;
9384 page
= flash_info
.page
;
9385 ctrl_idx
= flash_info
.ctrl_idx
;
9387 if ((page
% dev
->hw_info
.page_count
) != 0) {
9392 //hio_warn("erase fs = %llx\n", ofs);
9393 ret
= ssd_nand_erase(dev
, flash
, chip
, page
, ctrl_idx
);
9401 case SSD_CMD_NAND_READ_EXT
: { //ingore EIO
9402 struct ssd_flash_op_info flash_info
;
9404 int flash
, chip
, page
, ctrl_idx
;
9406 if (copy_from_user(&flash_info
, argp
, sizeof(struct ssd_flash_op_info
))) {
9407 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9412 flash
= flash_info
.flash
;
9413 chip
= flash_info
.chip
;
9414 page
= flash_info
.page
;
9415 buf
= flash_info
.buf
;
9416 ctrl_idx
= flash_info
.ctrl_idx
;
9418 length
= dev
->hw_info
.page_size
+ dev
->hw_info
.oob_size
;
9420 kbuf
= kmalloc(length
, GFP_KERNEL
);
9426 ret
= ssd_nand_read_w_oob(dev
, kbuf
, flash
, chip
, page
, 1, ctrl_idx
);
9427 if (-EIO
== ret
) { //ingore EIO
9435 if (copy_to_user(buf
, kbuf
, length
)) {
9445 case SSD_CMD_UPDATE_BBT
: {
9446 struct ssd_flash_op_info flash_info
;
9447 int ctrl_idx
, flash
;
9449 if (copy_from_user(&flash_info
, argp
, sizeof(struct ssd_flash_op_info
))) {
9450 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9455 ctrl_idx
= flash_info
.ctrl_idx
;
9456 flash
= flash_info
.flash
;
9457 ret
= ssd_update_bbt(dev
, flash
, ctrl_idx
);
9465 case SSD_CMD_CLEAR_ALARM
:
9466 ssd_clear_alarm(dev
);
9469 case SSD_CMD_SET_ALARM
:
9474 ret
= ssd_do_reset(dev
);
9477 case SSD_CMD_RELOAD_FW
:
9479 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_2
) {
9480 ssd_reg32_write(dev
->ctrlp
+ SSD_RELOAD_FW_REG
, SSD_RELOAD_FLAG
);
9481 } else if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_1_1
) {
9482 ssd_reg32_write(dev
->ctrlp
+ SSD_RELOAD_FW_REG
, SSD_RELOAD_FW
);
9487 case SSD_CMD_UNLOAD_DEV
: {
9488 if (atomic_read(&dev
->refcnt
)) {
9494 ssd_save_smart(dev
);
9496 ret
= ssd_flush(dev
);
9501 /* cleanup the block device */
9502 if (test_and_clear_bit(SSD_INIT_BD
, &dev
->state
)) {
9503 mutex_lock(&dev
->gd_mutex
);
9504 ssd_cleanup_blkdev(dev
);
9505 mutex_unlock(&dev
->gd_mutex
);
9511 case SSD_CMD_LOAD_DEV
: {
9513 if (test_bit(SSD_INIT_BD
, &dev
->state
)) {
9518 ret
= ssd_init_smart(dev
);
9520 hio_warn("%s: init info: failed\n", dev
->name
);
9524 ret
= ssd_init_blkdev(dev
);
9526 hio_warn("%s: register block device: failed\n", dev
->name
);
9529 (void)test_and_set_bit(SSD_INIT_BD
, &dev
->state
);
9534 case SSD_CMD_UPDATE_VP
: {
9536 uint32_t new_vp
, new_vp1
= 0;
9538 if (test_bit(SSD_INIT_BD
, &dev
->state
)) {
9543 if (copy_from_user(&new_vp
, argp
, sizeof(uint32_t))) {
9544 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9549 if (new_vp
> dev
->hw_info
.max_valid_pages
|| new_vp
<= 0) {
9554 while (new_vp
<= dev
->hw_info
.max_valid_pages
) {
9555 ssd_reg32_write(dev
->ctrlp
+ SSD_VALID_PAGES_REG
, new_vp
);
9557 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_VALID_PAGES_REG
);
9558 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
9559 new_vp1
= val
& 0x3FF;
9561 new_vp1
= val
& 0x7FFF;
9564 if (new_vp1
== new_vp
) {
9569 /*if (new_vp == dev->hw_info.valid_pages) {
9574 if (new_vp1
!= new_vp
|| new_vp
> dev
->hw_info
.max_valid_pages
) {
9576 ssd_reg32_write(dev
->ctrlp
+ SSD_VALID_PAGES_REG
, dev
->hw_info
.valid_pages
);
9581 if (copy_to_user(argp
, &new_vp
, sizeof(uint32_t))) {
9582 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
9583 ssd_reg32_write(dev
->ctrlp
+ SSD_VALID_PAGES_REG
, dev
->hw_info
.valid_pages
);
9589 dev
->hw_info
.valid_pages
= new_vp
;
9590 dev
->hw_info
.size
= (uint64_t)dev
->hw_info
.valid_pages
* dev
->hw_info
.page_size
;
9591 dev
->hw_info
.size
*= (dev
->hw_info
.block_count
- dev
->hw_info
.reserved_blks
);
9592 dev
->hw_info
.size
*= ((uint64_t)dev
->hw_info
.nr_data_ch
* (uint64_t)dev
->hw_info
.nr_chip
* (uint64_t)dev
->hw_info
.nr_ctrl
);
9597 case SSD_CMD_FULL_RESET
: {
9598 ret
= ssd_full_reset(dev
);
9602 case SSD_CMD_GET_NR_LOG
: {
9603 if (copy_to_user(argp
, &dev
->internal_log
.nr_log
, sizeof(dev
->internal_log
.nr_log
))) {
9610 case SSD_CMD_GET_LOG
: {
9611 uint32_t length
= dev
->rom_info
.log_sz
;
9615 if (copy_to_user(buf
, dev
->internal_log
.log
, length
)) {
9623 case SSD_CMD_LOG_LEVEL
: {
9625 if (copy_from_user(&level
, argp
, sizeof(int))) {
9626 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9631 if (level
>= SSD_LOG_NR_LEVEL
|| level
< SSD_LOG_LEVEL_INFO
) {
9632 level
= SSD_LOG_LEVEL_ERR
;
9635 //just for showing log, no need to protect
9640 case SSD_CMD_OT_PROTECT
: {
9643 if (copy_from_user(&protect
, argp
, sizeof(int))) {
9644 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9649 ssd_set_ot_protect(dev
, !!protect
);
9653 case SSD_CMD_GET_OT_STATUS
: {
9654 int status
= ssd_get_ot_status(dev
, &status
);
9656 if (copy_to_user(argp
, &status
, sizeof(int))) {
9657 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
9664 case SSD_CMD_CLEAR_LOG
: {
9665 ret
= ssd_clear_log(dev
);
9669 case SSD_CMD_CLEAR_SMART
: {
9670 ret
= ssd_clear_smart(dev
);
9674 case SSD_CMD_SW_LOG
: {
9675 struct ssd_sw_log_info sw_log
;
9677 if (copy_from_user(&sw_log
, argp
, sizeof(struct ssd_sw_log_info
))) {
9678 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9683 ret
= ssd_gen_swlog(dev
, sw_log
.event
, sw_log
.data
);
9687 case SSD_CMD_GET_LABEL
: {
9689 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_2
) {
9694 if (copy_to_user(argp
, &dev
->label
, sizeof(struct ssd_label
))) {
9695 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
9702 case SSD_CMD_GET_VERSION
: {
9703 struct ssd_version_info ver
;
9705 mutex_lock(&dev
->fw_mutex
);
9706 ret
= __ssd_get_version(dev
, &ver
);
9707 mutex_unlock(&dev
->fw_mutex
);
9712 if (copy_to_user(argp
, &ver
, sizeof(struct ssd_version_info
))) {
9713 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
9720 case SSD_CMD_GET_TEMPERATURE
: {
9723 mutex_lock(&dev
->fw_mutex
);
9724 ret
= __ssd_get_temperature(dev
, &temp
);
9725 mutex_unlock(&dev
->fw_mutex
);
9730 if (copy_to_user(argp
, &temp
, sizeof(int))) {
9731 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
9738 case SSD_CMD_GET_BMSTATUS
: {
9741 mutex_lock(&dev
->fw_mutex
);
9742 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_2
) {
9743 if (test_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP
), &dev
->hwmon
)) {
9744 status
= SSD_BMSTATUS_WARNING
;
9746 status
= SSD_BMSTATUS_OK
;
9748 } else if(dev
->protocol_info
.ver
> SSD_PROTOCOL_V3
) {
9749 ret
= __ssd_bm_status(dev
, &status
);
9751 status
= SSD_BMSTATUS_OK
;
9753 mutex_unlock(&dev
->fw_mutex
);
9758 if (copy_to_user(argp
, &status
, sizeof(int))) {
9759 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
9766 case SSD_CMD_GET_LABEL2
: {
9770 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
9771 label
= &dev
->label
;
9772 length
= sizeof(struct ssd_label
);
9774 label
= &dev
->labelv3
;
9775 length
= sizeof(struct ssd_labelv3
);
9778 if (copy_to_user(argp
, label
, length
)) {
9786 ret
= ssd_flush(dev
);
9788 hio_warn("%s: ssd_flush: failed\n", dev
->name
);
9794 case SSD_CMD_SAVE_MD
: {
9797 if (copy_from_user(&save_md
, argp
, sizeof(int))) {
9798 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9803 dev
->save_md
= !!save_md
;
9807 case SSD_CMD_SET_WMODE
: {
9810 if (copy_from_user(&new_wmode
, argp
, sizeof(int))) {
9811 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9816 ret
= __ssd_set_wmode(dev
, new_wmode
);
9824 case SSD_CMD_GET_WMODE
: {
9825 if (copy_to_user(argp
, &dev
->wmode
, sizeof(int))) {
9826 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
9834 case SSD_CMD_GET_USER_WMODE
: {
9835 if (copy_to_user(argp
, &dev
->user_wmode
, sizeof(int))) {
9836 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
9844 case SSD_CMD_DEBUG
: {
9845 struct ssd_debug_info db_info
;
9852 if (copy_from_user(&db_info
, argp
, sizeof(struct ssd_debug_info
))) {
9853 hio_warn("%s: copy_from_user: failed\n", dev
->name
);
9858 if (db_info
.type
< SSD_DEBUG_NONE
|| db_info
.type
>= SSD_DEBUG_NR
) {
9864 if (db_info
.type
>= SSD_DEBUG_READ_ERR
&& db_info
.type
<= SSD_DEBUG_RW_ERR
&&
9865 (db_info
.data
.loc
.off
+ db_info
.data
.loc
.len
) > (dev
->hw_info
.size
>> 9)) {
9870 memcpy(&dev
->db_info
, &db_info
, sizeof(struct ssd_debug_info
));
9872 #ifdef SSD_OT_PROTECT
9874 if (db_info
.type
== SSD_DEBUG_NONE
) {
9875 ssd_check_temperature(dev
, SSD_OT_TEMP
);
9876 } else if (db_info
.type
== SSD_DEBUG_LOG
) {
9877 if (db_info
.data
.log
.event
== SSD_LOG_OVER_TEMP
) {
9878 dev
->ot_delay
= SSD_OT_DELAY
;
9879 } else if (db_info
.data
.log
.event
== SSD_LOG_NORMAL_TEMP
) {
9886 if (db_info
.type
== SSD_DEBUG_OFFLINE
) {
9887 test_and_clear_bit(SSD_ONLINE
, &dev
->state
);
9888 } else if (db_info
.type
== SSD_DEBUG_NONE
) {
9889 (void)test_and_set_bit(SSD_ONLINE
, &dev
->state
);
9893 if (db_info
.type
== SSD_DEBUG_LOG
&& dev
->event_call
&& dev
->gd
) {
9894 dev
->event_call(dev
->gd
, db_info
.data
.log
.event
, 0);
9900 case SSD_CMD_DRV_PARAM_INFO
: {
9901 struct ssd_drv_param_info drv_param
;
9903 memset(&drv_param
, 0, sizeof(struct ssd_drv_param_info
));
9905 drv_param
.mode
= mode
;
9906 drv_param
.status_mask
= status_mask
;
9907 drv_param
.int_mode
= int_mode
;
9908 drv_param
.threaded_irq
= threaded_irq
;
9909 drv_param
.log_level
= log_level
;
9910 drv_param
.wmode
= wmode
;
9911 drv_param
.ot_protect
= ot_protect
;
9912 drv_param
.finject
= finject
;
9914 if (copy_to_user(argp
, &drv_param
, sizeof(struct ssd_drv_param_info
))) {
9915 hio_warn("%s: copy_to_user: failed\n", dev
->name
);
9931 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,27))
9932 static int ssd_block_ioctl(struct inode
*inode
, struct file
*file
,
9933 unsigned int cmd
, unsigned long arg
)
9935 struct ssd_device
*dev
;
9936 void __user
*argp
= (void __user
*)arg
;
9942 dev
= inode
->i_bdev
->bd_disk
->private_data
;
9947 static int ssd_block_ioctl(struct block_device
*bdev
, fmode_t mode
,
9948 unsigned int cmd
, unsigned long arg
)
9950 struct ssd_device
*dev
;
9951 void __user
*argp
= (void __user
*)arg
;
9958 dev
= bdev
->bd_disk
->private_data
;
9966 struct hd_geometry geo
;
9967 geo
.cylinders
= (dev
->hw_info
.size
& ~0x3f) >> 6;
9970 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,27))
9971 geo
.start
= get_start_sect(inode
->i_bdev
);
9973 geo
.start
= get_start_sect(bdev
);
9975 if (copy_to_user(argp
, &geo
, sizeof(geo
))) {
9984 ret
= ssd_flush(dev
);
9986 hio_warn("%s: ssd_flush: failed\n", dev
->name
);
9994 ret
= ssd_ioctl_common(dev
, cmd
, arg
);
10005 static void ssd_free_dev(struct kref
*kref
)
10007 struct ssd_device
*dev
;
10013 dev
= container_of(kref
, struct ssd_device
, kref
);
10017 ssd_put_index(dev
->slave
, dev
->idx
);
10022 static void ssd_put(struct ssd_device
*dev
)
10024 kref_put(&dev
->kref
, ssd_free_dev
);
10027 static int ssd_get(struct ssd_device
*dev
)
10029 kref_get(&dev
->kref
);
10034 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,27))
10035 static int ssd_block_open(struct inode
*inode
, struct file
*filp
)
10037 struct ssd_device
*dev
;
10043 dev
= inode
->i_bdev
->bd_disk
->private_data
;
10048 static int ssd_block_open(struct block_device
*bdev
, fmode_t mode
)
10050 struct ssd_device
*dev
;
10056 dev
= bdev
->bd_disk
->private_data
;
10062 /*if (!try_module_get(dev->owner))
10068 atomic_inc(&dev
->refcnt
);
10073 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,27))
10074 static int ssd_block_release(struct inode
*inode
, struct file
*filp
)
10076 struct ssd_device
*dev
;
10082 dev
= inode
->i_bdev
->bd_disk
->private_data
;
10086 #elif (LINUX_VERSION_CODE <= KERNEL_VERSION(3,9,0))
10087 static int ssd_block_release(struct gendisk
*disk
, fmode_t mode
)
10089 struct ssd_device
*dev
;
10095 dev
= disk
->private_data
;
10100 static void ssd_block_release(struct gendisk
*disk
, fmode_t mode
)
10102 struct ssd_device
*dev
;
10108 dev
= disk
->private_data
;
10114 atomic_dec(&dev
->refcnt
);
10118 //module_put(dev->owner);
10119 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(3,9,0))
10124 static struct block_device_operations ssd_fops
= {
10125 .owner
= THIS_MODULE
,
10126 .open
= ssd_block_open
,
10127 .release
= ssd_block_release
,
10128 .ioctl
= ssd_block_ioctl
,
10129 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16))
10130 .getgeo
= ssd_block_getgeo
,
10134 static void ssd_init_trim(ssd_device_t
*dev
)
10136 #if (defined SSD_TRIM && (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32)))
10137 if (dev
->protocol_info
.ver
<= SSD_PROTOCOL_V3
) {
10140 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD
, dev
->rq
);
10142 #if ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,33)) || (defined RHEL_MAJOR && RHEL_MAJOR >= 6))
10143 dev
->rq
->limits
.discard_zeroes_data
= 1;
10144 dev
->rq
->limits
.discard_alignment
= 4096;
10145 dev
->rq
->limits
.discard_granularity
= 4096;
10147 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2_4
) {
10148 dev
->rq
->limits
.max_discard_sectors
= dev
->hw_info
.sg_max_sec
;
10150 dev
->rq
->limits
.max_discard_sectors
= (dev
->hw_info
.sg_max_sec
) * (dev
->hw_info
.cmd_max_sg
);
10155 static void ssd_cleanup_queue(struct ssd_device
*dev
)
10159 blk_cleanup_queue(dev
->rq
);
10163 static int ssd_init_queue(struct ssd_device
*dev
)
10165 dev
->rq
= blk_alloc_queue(GFP_KERNEL
);
10166 if (dev
->rq
== NULL
) {
10167 hio_warn("%s: alloc queue: failed\n ", dev
->name
);
10168 goto out_init_queue
;
10171 /* must be first */
10172 blk_queue_make_request(dev
->rq
, ssd_make_request
);
10174 #if ((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,34)) && !(defined RHEL_MAJOR && RHEL_MAJOR == 6))
10175 blk_queue_max_hw_segments(dev
->rq
, dev
->hw_info
.cmd_max_sg
);
10176 blk_queue_max_phys_segments(dev
->rq
, dev
->hw_info
.cmd_max_sg
);
10177 blk_queue_max_sectors(dev
->rq
, dev
->hw_info
.sg_max_sec
);
10179 blk_queue_max_segments(dev
->rq
, dev
->hw_info
.cmd_max_sg
);
10180 blk_queue_max_hw_sectors(dev
->rq
, dev
->hw_info
.sg_max_sec
);
10183 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31))
10184 blk_queue_hardsect_size(dev
->rq
, 512);
10186 blk_queue_logical_block_size(dev
->rq
, 512);
10188 /* not work for make_request based drivers(bio) */
10189 blk_queue_max_segment_size(dev
->rq
, dev
->hw_info
.sg_max_sec
<< 9);
10191 blk_queue_bounce_limit(dev
->rq
, BLK_BOUNCE_HIGH
);
10193 dev
->rq
->queuedata
= dev
;
10195 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20))
10196 blk_queue_issue_flush_fn(dev
->rq
, ssd_issue_flush_fn
);
10199 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28))
10200 queue_flag_set_unlocked(QUEUE_FLAG_NONROT
, dev
->rq
);
10203 ssd_init_trim(dev
);
10211 static void ssd_cleanup_blkdev(struct ssd_device
*dev
)
10213 del_gendisk(dev
->gd
);
10216 static int ssd_init_blkdev(struct ssd_device
*dev
)
10222 dev
->gd
= alloc_disk(ssd_minors
);
10224 hio_warn("%s: alloc_disk fail\n", dev
->name
);
10227 dev
->gd
->major
= dev
->major
;
10228 dev
->gd
->first_minor
= dev
->idx
* ssd_minors
;
10229 dev
->gd
->fops
= &ssd_fops
;
10230 dev
->gd
->queue
= dev
->rq
;
10231 dev
->gd
->private_data
= dev
;
10232 dev
->gd
->driverfs_dev
= &dev
->pdev
->dev
;
10233 snprintf (dev
->gd
->disk_name
, sizeof(dev
->gd
->disk_name
), "%s", dev
->name
);
10235 set_capacity(dev
->gd
, dev
->hw_info
.size
>> 9);
10245 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,10))
10246 static int ssd_ioctl(struct inode
*inode
, struct file
*file
,
10247 unsigned int cmd
, unsigned long arg
)
10249 static long ssd_ioctl(struct file
*file
,
10250 unsigned int cmd
, unsigned long arg
)
10253 struct ssd_device
*dev
;
10259 dev
= file
->private_data
;
10264 return (long)ssd_ioctl_common(dev
, cmd
, arg
);
10267 static int ssd_open(struct inode
*inode
, struct file
*file
)
10269 struct ssd_device
*dev
= NULL
;
10270 struct ssd_device
*n
= NULL
;
10274 if (!inode
|| !file
) {
10278 idx
= iminor(inode
);
10280 list_for_each_entry_safe(dev
, n
, &ssd_list
, list
) {
10281 if (dev
->idx
== idx
) {
10291 file
->private_data
= dev
;
10298 static int ssd_release(struct inode
*inode
, struct file
*file
)
10300 struct ssd_device
*dev
;
10306 dev
= file
->private_data
;
10313 file
->private_data
= NULL
;
10318 static struct file_operations ssd_cfops
= {
10319 .owner
= THIS_MODULE
,
10321 .release
= ssd_release
,
10322 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,10))
10323 .ioctl
= ssd_ioctl
,
10325 .unlocked_ioctl
= ssd_ioctl
,
10329 static void ssd_cleanup_chardev(struct ssd_device
*dev
)
10335 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,12))
10336 class_simple_device_remove(MKDEV((dev_t
)dev
->cmajor
, (dev_t
)dev
->idx
));
10337 devfs_remove("c%s", dev
->name
);
10338 #elif (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,14))
10339 class_device_destroy(ssd_class
, MKDEV((dev_t
)dev
->cmajor
, (dev_t
)dev
->idx
));
10340 devfs_remove("c%s", dev
->name
);
10341 #elif (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,17))
10342 class_device_destroy(ssd_class
, MKDEV((dev_t
)dev
->cmajor
, (dev_t
)dev
->idx
));
10343 devfs_remove("c%s", dev
->name
);
10344 #elif (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,24))
10345 class_device_destroy(ssd_class
, MKDEV((dev_t
)dev
->cmajor
, (dev_t
)dev
->idx
));
10347 device_destroy(ssd_class
, MKDEV((dev_t
)dev
->cmajor
, (dev_t
)dev
->idx
));
10351 static int ssd_init_chardev(struct ssd_device
*dev
)
10359 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,12))
10360 ret
= devfs_mk_cdev(MKDEV((dev_t
)dev
->cmajor
, (dev_t
)dev
->idx
), S_IFCHR
|S_IRUSR
|S_IWUSR
, "c%s", dev
->name
);
10364 class_simple_device_add(ssd_class
, MKDEV((dev_t
)dev
->cmajor
, (dev_t
)dev
->idx
), NULL
, "c%s", dev
->name
);
10366 #elif (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,14))
10367 ret
= devfs_mk_cdev(MKDEV((dev_t
)dev
->cmajor
, (dev_t
)dev
->idx
), S_IFCHR
|S_IRUSR
|S_IWUSR
, "c%s", dev
->name
);
10371 class_device_create(ssd_class
, MKDEV((dev_t
)dev
->cmajor
, (dev_t
)dev
->idx
), NULL
, "c%s", dev
->name
);
10373 #elif (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,17))
10374 ret
= devfs_mk_cdev(MKDEV((dev_t
)dev
->cmajor
, (dev_t
)dev
->idx
), S_IFCHR
|S_IRUSR
|S_IWUSR
, "c%s", dev
->name
);
10378 class_device_create(ssd_class
, NULL
, MKDEV((dev_t
)dev
->cmajor
, (dev_t
)dev
->idx
), NULL
, "c%s", dev
->name
);
10380 #elif (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,24))
10381 class_device_create(ssd_class
, NULL
, MKDEV((dev_t
)dev
->cmajor
, (dev_t
)dev
->idx
), NULL
, "c%s", dev
->name
);
10382 #elif (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,26))
10383 device_create(ssd_class
, NULL
, MKDEV((dev_t
)dev
->cmajor
, (dev_t
)dev
->idx
), "c%s", dev
->name
);
10384 #elif (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,27))
10385 device_create_drvdata(ssd_class
, NULL
, MKDEV((dev_t
)dev
->cmajor
, (dev_t
)dev
->idx
), NULL
, "c%s", dev
->name
);
10387 device_create(ssd_class
, NULL
, MKDEV((dev_t
)dev
->cmajor
, (dev_t
)dev
->idx
), NULL
, "c%s", dev
->name
);
10393 static int ssd_check_hw(struct ssd_device
*dev
)
10395 uint32_t test_data
= 0x55AA5AA5;
10396 uint32_t read_data
;
10398 ssd_reg32_write(dev
->ctrlp
+ SSD_BRIDGE_TEST_REG
, test_data
);
10399 read_data
= ssd_reg32_read(dev
->ctrlp
+ SSD_BRIDGE_TEST_REG
);
10400 if (read_data
!= ~(test_data
)) {
10401 //hio_warn("%s: check bridge error: %#x\n", dev->name, read_data);
10408 static int ssd_check_fw(struct ssd_device
*dev
)
10413 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_1_3
) {
10417 for (i
=0; i
<SSD_CONTROLLER_WAIT
; i
++) {
10418 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_HW_STATUS_REG
);
10419 if ((val
& 0x1) && ((val
>> 8) & 0x1)) {
10423 msleep(SSD_INIT_WAIT
);
10426 if (!(val
& 0x1)) {
10427 /* controller fw status */
10428 hio_warn("%s: controller firmware load failed: %#x\n", dev
->name
, val
);
10430 } else if (!((val
>> 8) & 0x1)) {
10431 /* controller state */
10432 hio_warn("%s: controller state error: %#x\n", dev
->name
, val
);
10436 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_RELOAD_FW_REG
);
10438 dev
->reload_fw
= 1;
10444 static int ssd_init_fw_info(struct ssd_device
*dev
)
10449 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_BRIDGE_VER_REG
);
10450 dev
->hw_info
.bridge_ver
= val
& 0xFFF;
10451 if (dev
->hw_info
.bridge_ver
< SSD_FW_MIN
) {
10452 hio_warn("%s: bridge firmware version %03X is not supported\n", dev
->name
, dev
->hw_info
.bridge_ver
);
10455 hio_info("%s: bridge firmware version: %03X\n", dev
->name
, dev
->hw_info
.bridge_ver
);
10457 ret
= ssd_check_fw(dev
);
10463 /* skip error if not in standard mode */
10464 if (mode
!= SSD_DRV_MODE_STANDARD
) {
10470 static int ssd_check_clock(struct ssd_device
*dev
)
10475 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_1_3
) {
10479 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_HW_STATUS_REG
);
10482 if (!((val
>> 4 ) & 0x1)) {
10483 if (!test_and_set_bit(SSD_HWMON_CLOCK(SSD_CLOCK_166M_LOST
), &dev
->hwmon
)) {
10484 hio_warn("%s: 166MHz clock losed: %#x\n", dev
->name
, val
);
10485 ssd_gen_swlog(dev
, SSD_LOG_CLK_FAULT
, val
);
10490 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_2
) {
10491 if (!((val
>> 5 ) & 0x1)) {
10492 if (!test_and_set_bit(SSD_HWMON_CLOCK(SSD_CLOCK_166M_SKEW
), &dev
->hwmon
)) {
10493 hio_warn("%s: 166MHz clock is skew: %#x\n", dev
->name
, val
);
10494 ssd_gen_swlog(dev
, SSD_LOG_CLK_FAULT
, val
);
10498 if (!((val
>> 6 ) & 0x1)) {
10499 if (!test_and_set_bit(SSD_HWMON_CLOCK(SSD_CLOCK_156M_LOST
), &dev
->hwmon
)) {
10500 hio_warn("%s: 156.25MHz clock lost: %#x\n", dev
->name
, val
);
10501 ssd_gen_swlog(dev
, SSD_LOG_CLK_FAULT
, val
);
10505 if (!((val
>> 7 ) & 0x1)) {
10506 if (!test_and_set_bit(SSD_HWMON_CLOCK(SSD_CLOCK_156M_SKEW
), &dev
->hwmon
)) {
10507 hio_warn("%s: 156.25MHz clock is skew: %#x\n", dev
->name
, val
);
10508 ssd_gen_swlog(dev
, SSD_LOG_CLK_FAULT
, val
);
10517 static int ssd_check_volt(struct ssd_device
*dev
)
10524 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
10528 for (i
=0; i
<dev
->hw_info
.nr_ctrl
; i
++) {
10530 if (!test_bit(SSD_HWMON_FPGA(i
, SSD_FPGA_1V0
), &dev
->hwmon
)) {
10531 val
= ssd_reg_read(dev
->ctrlp
+ SSD_FPGA_1V0_REG0
+ i
* SSD_CTRL_REG_ZONE_SZ
);
10532 adc_val
= SSD_FPGA_VOLT_MAX(val
);
10533 if (adc_val
< SSD_FPGA_1V0_ADC_MIN
|| adc_val
> SSD_FPGA_1V0_ADC_MAX
) {
10534 (void)test_and_set_bit(SSD_HWMON_FPGA(i
, SSD_FPGA_1V0
), &dev
->hwmon
);
10535 hio_warn("%s: controller %d 1.0V fault: %d mV.\n", dev
->name
, i
, SSD_FPGA_VOLT(adc_val
));
10536 ssd_gen_swlog(dev
, SSD_LOG_VOLT_FAULT
, SSD_VOLT_LOG_DATA(SSD_FPGA_1V0
, i
, adc_val
));
10540 adc_val
= SSD_FPGA_VOLT_MIN(val
);
10541 if (adc_val
< SSD_FPGA_1V0_ADC_MIN
|| adc_val
> SSD_FPGA_1V0_ADC_MAX
) {
10542 (void)test_and_set_bit(SSD_HWMON_FPGA(i
, SSD_FPGA_1V0
), &dev
->hwmon
);
10543 hio_warn("%s: controller %d 1.0V fault: %d mV.\n", dev
->name
, i
, SSD_FPGA_VOLT(adc_val
));
10544 ssd_gen_swlog(dev
, SSD_LOG_VOLT_FAULT
, SSD_VOLT_LOG_DATA(SSD_FPGA_1V0
, i
, adc_val
));
10550 if (!test_bit(SSD_HWMON_FPGA(i
, SSD_FPGA_1V8
), &dev
->hwmon
)) {
10551 val
= ssd_reg_read(dev
->ctrlp
+ SSD_FPGA_1V8_REG0
+ i
* SSD_CTRL_REG_ZONE_SZ
);
10552 adc_val
= SSD_FPGA_VOLT_MAX(val
);
10553 if (adc_val
< SSD_FPGA_1V8_ADC_MIN
|| adc_val
> SSD_FPGA_1V8_ADC_MAX
) {
10554 (void)test_and_set_bit(SSD_HWMON_FPGA(i
, SSD_FPGA_1V8
), &dev
->hwmon
);
10555 hio_warn("%s: controller %d 1.8V fault: %d mV.\n", dev
->name
, i
, SSD_FPGA_VOLT(adc_val
));
10556 ssd_gen_swlog(dev
, SSD_LOG_VOLT_FAULT
, SSD_VOLT_LOG_DATA(SSD_FPGA_1V8
, i
, adc_val
));
10560 adc_val
= SSD_FPGA_VOLT_MIN(val
);
10561 if (adc_val
< SSD_FPGA_1V8_ADC_MIN
|| adc_val
> SSD_FPGA_1V8_ADC_MAX
) {
10562 (void)test_and_set_bit(SSD_HWMON_FPGA(i
, SSD_FPGA_1V8
), &dev
->hwmon
);
10563 hio_warn("%s: controller %d 1.8V fault: %d mV.\n", dev
->name
, i
, SSD_FPGA_VOLT(adc_val
));
10564 ssd_gen_swlog(dev
, SSD_LOG_VOLT_FAULT
, SSD_VOLT_LOG_DATA(SSD_FPGA_1V8
, i
, adc_val
));
10573 static int ssd_check_reset_sync(struct ssd_device
*dev
)
10577 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_1_3
) {
10581 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_HW_STATUS_REG
);
10582 if (!((val
>> 8) & 0x1)) {
10583 /* controller state */
10584 hio_warn("%s: controller state error: %#x\n", dev
->name
, val
);
10588 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
10592 if (((val
>> 9 ) & 0x1)) {
10593 hio_warn("%s: controller reset asynchronously: %#x\n", dev
->name
, val
);
10594 ssd_gen_swlog(dev
, SSD_LOG_CTRL_RST_SYNC
, val
);
10601 static int ssd_check_hw_bh(struct ssd_device
*dev
)
10605 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_1_3
) {
10610 ret
= ssd_check_clock(dev
);
10616 /* skip error if not in standard mode */
10617 if (mode
!= SSD_DRV_MODE_STANDARD
) {
10623 static int ssd_check_controller(struct ssd_device
*dev
)
10627 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_1_3
) {
10632 ret
= ssd_check_reset_sync(dev
);
10638 /* skip error if not in standard mode */
10639 if (mode
!= SSD_DRV_MODE_STANDARD
) {
10645 static int ssd_check_controller_bh(struct ssd_device
*dev
)
10647 uint32_t test_data
= 0x55AA5AA5;
10649 int reg_base
, reg_sz
;
10654 if (mode
!= SSD_DRV_MODE_STANDARD
) {
10659 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_READY_REG
);
10661 hio_warn("%s: controller 0 not ready\n", dev
->name
);
10665 for (i
=0; i
<dev
->hw_info
.nr_ctrl
; i
++) {
10666 reg_base
= SSD_CTRL_TEST_REG0
+ i
* SSD_CTRL_TEST_REG_SZ
;
10667 ssd_reg32_write(dev
->ctrlp
+ reg_base
, test_data
);
10668 val
= ssd_reg32_read(dev
->ctrlp
+ reg_base
);
10669 if (val
!= ~(test_data
)) {
10670 hio_warn("%s: check controller %d error: %#x\n", dev
->name
, i
, val
);
10676 ret
= ssd_check_volt(dev
);
10682 if (dev
->protocol_info
.ver
> SSD_PROTOCOL_V3
) {
10683 reg_base
= SSD_PV3_RAM_STATUS_REG0
;
10684 reg_sz
= SSD_PV3_RAM_STATUS_REG_SZ
;
10686 for (i
=0; i
<dev
->hw_info
.nr_ctrl
; i
++) {
10688 val
= ssd_reg32_read(dev
->ctrlp
+ reg_base
);
10690 if (!((val
>> 1) & 0x1)) {
10692 if (init_wait
<= SSD_RAM_INIT_MAX_WAIT
) {
10693 msleep(SSD_INIT_WAIT
);
10694 goto check_ram_status
;
10696 hio_warn("%s: controller %d ram init failed: %#x\n", dev
->name
, i
, val
);
10697 ssd_gen_swlog(dev
, SSD_LOG_DDR_INIT_ERR
, i
);
10702 reg_base
+= reg_sz
;
10707 for (i
=0; i
<SSD_CH_INFO_MAX_WAIT
; i
++) {
10708 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_CH_INFO_REG
);
10709 if (!((val
>> 31) & 0x1)) {
10713 msleep(SSD_INIT_WAIT
);
10715 if ((val
>> 31) & 0x1) {
10716 hio_warn("%s: channel info init failed: %#x\n", dev
->name
, val
);
10723 static int ssd_init_protocol_info(struct ssd_device
*dev
)
10727 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_PROTOCOL_VER_REG
);
10728 if (val
== (uint32_t)-1) {
10729 hio_warn("%s: protocol version error: %#x\n", dev
->name
, val
);
10732 dev
->protocol_info
.ver
= val
;
10734 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
10735 dev
->protocol_info
.init_state_reg
= SSD_INIT_STATE_REG0
;
10736 dev
->protocol_info
.init_state_reg_sz
= SSD_INIT_STATE_REG_SZ
;
10738 dev
->protocol_info
.chip_info_reg
= SSD_CHIP_INFO_REG0
;
10739 dev
->protocol_info
.chip_info_reg_sz
= SSD_CHIP_INFO_REG_SZ
;
10741 dev
->protocol_info
.init_state_reg
= SSD_PV3_INIT_STATE_REG0
;
10742 dev
->protocol_info
.init_state_reg_sz
= SSD_PV3_INIT_STATE_REG_SZ
;
10744 dev
->protocol_info
.chip_info_reg
= SSD_PV3_CHIP_INFO_REG0
;
10745 dev
->protocol_info
.chip_info_reg_sz
= SSD_PV3_CHIP_INFO_REG_SZ
;
10751 static int ssd_init_hw_info(struct ssd_device
*dev
)
10759 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_RESP_INFO_REG
);
10760 dev
->hw_info
.resp_ptr_sz
= 16 * (1U << (val
& 0xFF));
10761 dev
->hw_info
.resp_msg_sz
= 16 * (1U << ((val
>> 8) & 0xFF));
10763 if (0 == dev
->hw_info
.resp_ptr_sz
|| 0 == dev
->hw_info
.resp_msg_sz
) {
10764 hio_warn("%s: response info error\n", dev
->name
);
10769 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_BRIDGE_INFO_REG
);
10770 dev
->hw_info
.cmd_fifo_sz
= 1U << ((val
>> 4) & 0xF);
10771 dev
->hw_info
.cmd_max_sg
= 1U << ((val
>> 8) & 0xF);
10772 dev
->hw_info
.sg_max_sec
= 1U << ((val
>> 12) & 0xF);
10773 dev
->hw_info
.cmd_fifo_sz_mask
= dev
->hw_info
.cmd_fifo_sz
- 1;
10775 if (0 == dev
->hw_info
.cmd_fifo_sz
|| 0 == dev
->hw_info
.cmd_max_sg
|| 0 == dev
->hw_info
.sg_max_sec
) {
10776 hio_warn("%s: cmd info error\n", dev
->name
);
10782 if (ssd_check_hw_bh(dev
)) {
10783 hio_warn("%s: check hardware status failed\n", dev
->name
);
10788 if (ssd_check_controller(dev
)) {
10789 hio_warn("%s: check controller state failed\n", dev
->name
);
10794 /* nr controller : read again*/
10795 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_BRIDGE_INFO_REG
);
10796 dev
->hw_info
.nr_ctrl
= (val
>> 16) & 0xF;
10798 /* nr ctrl configured */
10799 nr_ctrl
= (val
>> 20) & 0xF;
10800 if (0 == dev
->hw_info
.nr_ctrl
) {
10801 hio_warn("%s: nr controller error: %u\n", dev
->name
, dev
->hw_info
.nr_ctrl
);
10804 } else if (0 != nr_ctrl
&& nr_ctrl
!= dev
->hw_info
.nr_ctrl
) {
10805 hio_warn("%s: nr controller error: configured %u but found %u\n", dev
->name
, nr_ctrl
, dev
->hw_info
.nr_ctrl
);
10806 if (mode
<= SSD_DRV_MODE_STANDARD
) {
10812 if (ssd_check_controller_bh(dev
)) {
10813 hio_warn("%s: check controller failed\n", dev
->name
);
10818 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_PCB_VER_REG
);
10819 dev
->hw_info
.pcb_ver
= (uint8_t) ((val
>> 4) & 0xF) + 'A' -1;
10820 if ((val
& 0xF) != 0xF) {
10821 dev
->hw_info
.upper_pcb_ver
= (uint8_t) (val
& 0xF) + 'A' -1;
10824 if (dev
->hw_info
.pcb_ver
< 'A' || (0 != dev
->hw_info
.upper_pcb_ver
&& dev
->hw_info
.upper_pcb_ver
< 'A')) {
10825 hio_warn("%s: PCB version error: %#x %#x\n", dev
->name
, dev
->hw_info
.pcb_ver
, dev
->hw_info
.upper_pcb_ver
);
10831 if (mode
<= SSD_DRV_MODE_DEBUG
) {
10832 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_CH_INFO_REG
);
10833 dev
->hw_info
.nr_data_ch
= val
& 0xFF;
10834 dev
->hw_info
.nr_ch
= dev
->hw_info
.nr_data_ch
+ ((val
>> 8) & 0xFF);
10835 dev
->hw_info
.nr_chip
= (val
>> 16) & 0xFF;
10837 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
10838 dev
->hw_info
.max_ch
= 1;
10839 while (dev
->hw_info
.max_ch
< dev
->hw_info
.nr_ch
) dev
->hw_info
.max_ch
<<= 1;
10841 /* set max channel 32 */
10842 dev
->hw_info
.max_ch
= 32;
10845 if (0 == dev
->hw_info
.nr_chip
) {
10847 dev
->hw_info
.nr_chip
= 1;
10851 dev
->hw_info
.id_size
= SSD_NAND_ID_SZ
;
10852 dev
->hw_info
.max_ce
= SSD_NAND_MAX_CE
;
10854 if (0 == dev
->hw_info
.nr_data_ch
|| 0 == dev
->hw_info
.nr_ch
|| 0 == dev
->hw_info
.nr_chip
) {
10855 hio_warn("%s: channel info error: data_ch %u ch %u chip %u\n", dev
->name
, dev
->hw_info
.nr_data_ch
, dev
->hw_info
.nr_ch
, dev
->hw_info
.nr_chip
);
10862 if (mode
<= SSD_DRV_MODE_DEBUG
) {
10863 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_RAM_INFO_REG
);
10864 dev
->hw_info
.ram_size
= 0x4000000ull
* (1ULL << (val
& 0xF));
10865 dev
->hw_info
.ram_align
= 1U << ((val
>> 12) & 0xF);
10866 if (dev
->hw_info
.ram_align
< SSD_RAM_ALIGN
) {
10867 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
10868 dev
->hw_info
.ram_align
= SSD_RAM_ALIGN
;
10870 hio_warn("%s: ram align error: %u\n", dev
->name
, dev
->hw_info
.ram_align
);
10875 dev
->hw_info
.ram_max_len
= 0x1000 * (1U << ((val
>> 16) & 0xF));
10877 if (0 == dev
->hw_info
.ram_size
|| 0 == dev
->hw_info
.ram_align
|| 0 == dev
->hw_info
.ram_max_len
|| dev
->hw_info
.ram_align
> dev
->hw_info
.ram_max_len
) {
10878 hio_warn("%s: ram info error\n", dev
->name
);
10883 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
10884 dev
->hw_info
.log_sz
= SSD_LOG_MAX_SZ
;
10886 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_LOG_INFO_REG
);
10887 dev
->hw_info
.log_sz
= 0x1000 * (1U << (val
& 0xFF));
10889 if (0 == dev
->hw_info
.log_sz
) {
10890 hio_warn("%s: log size error\n", dev
->name
);
10895 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_BBT_BASE_REG
);
10896 dev
->hw_info
.bbt_base
= 0x40000ull
* (val
& 0xFFFF);
10897 dev
->hw_info
.bbt_size
= 0x40000 * (((val
>> 16) & 0xFFFF) + 1) / (dev
->hw_info
.max_ch
* dev
->hw_info
.nr_chip
);
10898 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
10899 if (dev
->hw_info
.bbt_base
> dev
->hw_info
.ram_size
|| 0 == dev
->hw_info
.bbt_size
) {
10900 hio_warn("%s: bbt info error\n", dev
->name
);
10906 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_ECT_BASE_REG
);
10907 dev
->hw_info
.md_base
= 0x40000ull
* (val
& 0xFFFF);
10908 if (dev
->protocol_info
.ver
<= SSD_PROTOCOL_V3
) {
10909 dev
->hw_info
.md_size
= 0x40000 * (((val
>> 16) & 0xFFF) + 1) / (dev
->hw_info
.max_ch
* dev
->hw_info
.nr_chip
);
10911 dev
->hw_info
.md_size
= 0x40000 * (((val
>> 16) & 0xFFF) + 1) / (dev
->hw_info
.nr_chip
);
10913 dev
->hw_info
.md_entry_sz
= 8 * (1U << ((val
>> 28) & 0xF));
10914 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3
) {
10915 if (dev
->hw_info
.md_base
> dev
->hw_info
.ram_size
|| 0 == dev
->hw_info
.md_size
||
10916 0 == dev
->hw_info
.md_entry_sz
|| dev
->hw_info
.md_entry_sz
> dev
->hw_info
.md_size
) {
10917 hio_warn("%s: md info error\n", dev
->name
);
10923 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
10924 dev
->hw_info
.nand_wbuff_base
= dev
->hw_info
.ram_size
+ 1;
10926 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_NAND_BUFF_BASE
);
10927 dev
->hw_info
.nand_wbuff_base
= 0x8000ull
* val
;
10932 if (mode
<= SSD_DRV_MODE_DEBUG
) {
10933 if (dev
->hw_info
.nr_ctrl
> 1) {
10934 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_CTRL_VER_REG
);
10935 dev
->hw_info
.ctrl_ver
= val
& 0xFFF;
10936 hio_info("%s: controller firmware version: %03X\n", dev
->name
, dev
->hw_info
.ctrl_ver
);
10939 val64
= ssd_reg_read(dev
->ctrlp
+ SSD_FLASH_INFO_REG0
);
10940 dev
->hw_info
.nand_vendor_id
= ((val64
>> 56) & 0xFF);
10941 dev
->hw_info
.nand_dev_id
= ((val64
>> 48) & 0xFF);
10943 dev
->hw_info
.block_count
= (((val64
>> 32) & 0xFFFF) + 1);
10944 dev
->hw_info
.page_count
= ((val64
>>16) & 0xFFFF);
10945 dev
->hw_info
.page_size
= (val64
& 0xFFFF);
10947 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_BB_INFO_REG
);
10948 dev
->hw_info
.bbf_pages
= val
& 0xFF;
10949 dev
->hw_info
.bbf_seek
= (val
>> 8) & 0x1;
10951 if (0 == dev
->hw_info
.block_count
|| 0 == dev
->hw_info
.page_count
|| 0 == dev
->hw_info
.page_size
|| dev
->hw_info
.block_count
> INT_MAX
) {
10952 hio_warn("%s: flash info error\n", dev
->name
);
10958 dev
->hw_info
.oob_size
= SSD_NAND_OOB_SZ
; //(dev->hw_info.page_size) >> 5;
10960 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_VALID_PAGES_REG
);
10961 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
10962 dev
->hw_info
.valid_pages
= val
& 0x3FF;
10963 dev
->hw_info
.max_valid_pages
= (val
>>20) & 0x3FF;
10965 dev
->hw_info
.valid_pages
= val
& 0x7FFF;
10966 dev
->hw_info
.max_valid_pages
= (val
>>15) & 0x7FFF;
10968 if (0 == dev
->hw_info
.valid_pages
|| 0 == dev
->hw_info
.max_valid_pages
||
10969 dev
->hw_info
.valid_pages
> dev
->hw_info
.max_valid_pages
|| dev
->hw_info
.max_valid_pages
> dev
->hw_info
.page_count
) {
10970 hio_warn("%s: valid page info error: valid_pages %d, max_valid_pages %d\n", dev
->name
, dev
->hw_info
.valid_pages
, dev
->hw_info
.max_valid_pages
);
10975 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_RESERVED_BLKS_REG
);
10976 dev
->hw_info
.reserved_blks
= val
& 0xFFFF;
10977 dev
->hw_info
.md_reserved_blks
= (val
>> 16) & 0xFF;
10978 if (dev
->protocol_info
.ver
<= SSD_PROTOCOL_V3
) {
10979 dev
->hw_info
.md_reserved_blks
= SSD_BBT_RESERVED
;
10981 if (dev
->hw_info
.reserved_blks
> dev
->hw_info
.block_count
|| dev
->hw_info
.md_reserved_blks
> dev
->hw_info
.block_count
) {
10982 hio_warn("%s: reserved blocks info error: reserved_blks %d, md_reserved_blks %d\n", dev
->name
, dev
->hw_info
.reserved_blks
, dev
->hw_info
.md_reserved_blks
);
10989 if (mode
< SSD_DRV_MODE_DEBUG
) {
10990 dev
->hw_info
.size
= (uint64_t)dev
->hw_info
.valid_pages
* dev
->hw_info
.page_size
;
10991 dev
->hw_info
.size
*= (dev
->hw_info
.block_count
- dev
->hw_info
.reserved_blks
);
10992 dev
->hw_info
.size
*= ((uint64_t)dev
->hw_info
.nr_data_ch
* (uint64_t)dev
->hw_info
.nr_chip
* (uint64_t)dev
->hw_info
.nr_ctrl
);
10995 /* extend hardware info */
10996 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_PCB_VER_REG
);
10997 dev
->hw_info_ext
.board_type
= (val
>> 24) & 0xF;
10999 dev
->hw_info_ext
.form_factor
= SSD_FORM_FACTOR_FHHL
;
11000 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_2_1
) {
11001 dev
->hw_info_ext
.form_factor
= (val
>> 31) & 0x1;
11004 dev->hw_info_ext.cap_type = (val >> 28) & 0x3;
11005 if (SSD_BM_CAP_VINA != dev->hw_info_ext.cap_type && SSD_BM_CAP_JH != dev->hw_info_ext.cap_type) {
11006 dev->hw_info_ext.cap_type = SSD_BM_CAP_VINA;
11009 /* power loss protect */
11010 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_PLP_INFO_REG
);
11011 dev
->hw_info_ext
.plp_type
= (val
& 0x3);
11012 if (dev
->protocol_info
.ver
>= SSD_PROTOCOL_V3_2
) {
11014 dev
->hw_info_ext
.cap_type
= ((val
>> 2)& 0x1);
11018 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_CH_INFO_REG
);
11019 dev
->hw_info_ext
.work_mode
= (val
>> 25) & 0x1;
11022 /* skip error if not in standard mode */
11023 if (mode
!= SSD_DRV_MODE_STANDARD
) {
11029 static void ssd_cleanup_response(struct ssd_device
*dev
)
11031 int resp_msg_sz
= dev
->hw_info
.resp_msg_sz
* dev
->hw_info
.cmd_fifo_sz
* SSD_MSIX_VEC
;
11032 int resp_ptr_sz
= dev
->hw_info
.resp_ptr_sz
* SSD_MSIX_VEC
;
11034 pci_free_consistent(dev
->pdev
, resp_ptr_sz
, dev
->resp_ptr_base
, dev
->resp_ptr_base_dma
);
11035 pci_free_consistent(dev
->pdev
, resp_msg_sz
, dev
->resp_msg_base
, dev
->resp_msg_base_dma
);
11038 static int ssd_init_response(struct ssd_device
*dev
)
11040 int resp_msg_sz
= dev
->hw_info
.resp_msg_sz
* dev
->hw_info
.cmd_fifo_sz
* SSD_MSIX_VEC
;
11041 int resp_ptr_sz
= dev
->hw_info
.resp_ptr_sz
* SSD_MSIX_VEC
;
11043 dev
->resp_msg_base
= pci_alloc_consistent(dev
->pdev
, resp_msg_sz
, &(dev
->resp_msg_base_dma
));
11044 if (!dev
->resp_msg_base
) {
11045 hio_warn("%s: unable to allocate resp msg DMA buffer\n", dev
->name
);
11046 goto out_alloc_resp_msg
;
11048 memset(dev
->resp_msg_base
, 0xFF, resp_msg_sz
);
11050 dev
->resp_ptr_base
= pci_alloc_consistent(dev
->pdev
, resp_ptr_sz
, &(dev
->resp_ptr_base_dma
));
11051 if (!dev
->resp_ptr_base
){
11052 hio_warn("%s: unable to allocate resp ptr DMA buffer\n", dev
->name
);
11053 goto out_alloc_resp_ptr
;
11055 memset(dev
->resp_ptr_base
, 0, resp_ptr_sz
);
11056 dev
->resp_idx
= *(uint32_t *)(dev
->resp_ptr_base
) = dev
->hw_info
.cmd_fifo_sz
* 2 - 1;
11058 ssd_reg_write(dev
->ctrlp
+ SSD_RESP_FIFO_REG
, dev
->resp_msg_base_dma
);
11059 ssd_reg_write(dev
->ctrlp
+ SSD_RESP_PTR_REG
, dev
->resp_ptr_base_dma
);
11063 out_alloc_resp_ptr
:
11064 pci_free_consistent(dev
->pdev
, resp_msg_sz
, dev
->resp_msg_base
, dev
->resp_msg_base_dma
);
11065 out_alloc_resp_msg
:
11069 static int ssd_cleanup_cmd(struct ssd_device
*dev
)
11071 int msg_sz
= ALIGN(sizeof(struct ssd_rw_msg
) + (dev
->hw_info
.cmd_max_sg
- 1) * sizeof(struct ssd_sg_entry
), SSD_DMA_ALIGN
);
11074 for (i
=0; i
<(int)dev
->hw_info
.cmd_fifo_sz
; i
++) {
11075 kfree(dev
->cmd
[i
].sgl
);
11078 pci_free_consistent(dev
->pdev
, (msg_sz
* dev
->hw_info
.cmd_fifo_sz
), dev
->msg_base
, dev
->msg_base_dma
);
11082 static int ssd_init_cmd(struct ssd_device
*dev
)
11084 int sgl_sz
= sizeof(struct scatterlist
) * dev
->hw_info
.cmd_max_sg
;
11085 int cmd_sz
= sizeof(struct ssd_cmd
) * dev
->hw_info
.cmd_fifo_sz
;
11086 int msg_sz
= ALIGN(sizeof(struct ssd_rw_msg
) + (dev
->hw_info
.cmd_max_sg
- 1) * sizeof(struct ssd_sg_entry
), SSD_DMA_ALIGN
);
11089 spin_lock_init(&dev
->cmd_lock
);
11091 dev
->msg_base
= pci_alloc_consistent(dev
->pdev
, (msg_sz
* dev
->hw_info
.cmd_fifo_sz
), &dev
->msg_base_dma
);
11092 if (!dev
->msg_base
) {
11093 hio_warn("%s: can not alloc cmd msg\n", dev
->name
);
11094 goto out_alloc_msg
;
11097 dev
->cmd
= kmalloc(cmd_sz
, GFP_KERNEL
);
11099 hio_warn("%s: can not alloc cmd\n", dev
->name
);
11100 goto out_alloc_cmd
;
11102 memset(dev
->cmd
, 0, cmd_sz
);
11104 for (i
=0; i
<(int)dev
->hw_info
.cmd_fifo_sz
; i
++) {
11105 dev
->cmd
[i
].sgl
= kmalloc(sgl_sz
, GFP_KERNEL
);
11106 if (!dev
->cmd
[i
].sgl
) {
11107 hio_warn("%s: can not alloc cmd sgl %d\n", dev
->name
, i
);
11108 goto out_alloc_sgl
;
11111 dev
->cmd
[i
].msg
= dev
->msg_base
+ (msg_sz
* i
);
11112 dev
->cmd
[i
].msg_dma
= dev
->msg_base_dma
+ ((dma_addr_t
)msg_sz
* i
);
11114 dev
->cmd
[i
].dev
= dev
;
11115 dev
->cmd
[i
].tag
= i
;
11116 dev
->cmd
[i
].flag
= 0;
11118 INIT_LIST_HEAD(&dev
->cmd
[i
].list
);
11121 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3
) {
11122 dev
->scmd
= ssd_dispatch_cmd
;
11124 ssd_reg_write(dev
->ctrlp
+ SSD_MSG_BASE_REG
, dev
->msg_base_dma
);
11126 dev
->scmd
= ssd_send_cmd_db
;
11128 dev
->scmd
= ssd_send_cmd
;
11135 for (i
--; i
>=0; i
--) {
11136 kfree(dev
->cmd
[i
].sgl
);
11140 pci_free_consistent(dev
->pdev
, (msg_sz
* dev
->hw_info
.cmd_fifo_sz
), dev
->msg_base
, dev
->msg_base_dma
);
11145 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,30))
11146 static irqreturn_t
ssd_interrupt_check(int irq
, void *dev_id
)
11148 struct ssd_queue
*queue
= (struct ssd_queue
*)dev_id
;
11150 if (*(uint32_t *)queue
->resp_ptr
== queue
->resp_idx
) {
11154 return IRQ_WAKE_THREAD
;
11157 static irqreturn_t
ssd_interrupt_threaded(int irq
, void *dev_id
)
11159 struct ssd_queue
*queue
= (struct ssd_queue
*)dev_id
;
11160 struct ssd_device
*dev
= (struct ssd_device
*)queue
->dev
;
11161 struct ssd_cmd
*cmd
;
11162 union ssd_response_msq __msg
;
11163 union ssd_response_msq
*msg
= &__msg
;
11165 uint32_t resp_idx
= queue
->resp_idx
;
11166 uint32_t new_resp_idx
= *(uint32_t *)queue
->resp_ptr
;
11167 uint32_t end_resp_idx
;
11169 if (unlikely(resp_idx
== new_resp_idx
)) {
11173 end_resp_idx
= new_resp_idx
& queue
->resp_idx_mask
;
11176 resp_idx
= (resp_idx
+ 1) & queue
->resp_idx_mask
;
11179 u64_msg
= (uint64_t *)(queue
->resp_msg
+ queue
->resp_msg_sz
* resp_idx
);
11180 msg
->u64_msg
= *u64_msg
;
11182 if (unlikely(msg
->u64_msg
== (uint64_t)(-1))) {
11183 hio_err("%s: empty resp msg: queue %d idx %u\n", dev
->name
, queue
->idx
, resp_idx
);
11186 /* clear the resp msg */
11187 *u64_msg
= (uint64_t)(-1);
11189 cmd
= &queue
->cmd
[msg
->resp_msg
.tag
];
11190 /*if (unlikely(!cmd->bio)) {
11191 printk(KERN_WARNING "%s: unknown tag %d fun %#x\n",
11192 dev->name, msg->resp_msg.tag, msg->resp_msg.fun);
11196 if(unlikely(msg
->resp_msg
.status
& (uint32_t)status_mask
)) {
11197 cmd
->errors
= -EIO
;
11201 cmd
->nr_log
= msg
->log_resp_msg
.nr_log
;
11205 if (unlikely(msg
->resp_msg
.fun
!= SSD_FUNC_READ_LOG
&& msg
->resp_msg
.log
> 0)) {
11206 (void)test_and_set_bit(SSD_LOG_HW
, &dev
->state
);
11207 if (test_bit(SSD_INIT_WORKQ
, &dev
->state
)) {
11208 queue_work(dev
->workq
, &dev
->log_work
);
11212 if (unlikely(msg
->resp_msg
.status
)) {
11213 if (msg
->resp_msg
.fun
== SSD_FUNC_READ
|| msg
->resp_msg
.fun
== SSD_FUNC_WRITE
) {
11214 hio_err("%s: I/O error %d: tag %d fun %#x\n",
11215 dev
->name
, msg
->resp_msg
.status
, msg
->resp_msg
.tag
, msg
->resp_msg
.fun
);
11218 ssd_set_alarm(dev
);
11219 queue
->io_stat
.nr_rwerr
++;
11220 ssd_gen_swlog(dev
, SSD_LOG_EIO
, msg
->u32_msg
[0]);
11222 hio_info("%s: CMD error %d: tag %d fun %#x\n",
11223 dev
->name
, msg
->resp_msg
.status
, msg
->resp_msg
.tag
, msg
->resp_msg
.fun
);
11225 ssd_gen_swlog(dev
, SSD_LOG_ECMD
, msg
->u32_msg
[0]);
11227 queue
->io_stat
.nr_ioerr
++;
11230 if (msg
->resp_msg
.fun
== SSD_FUNC_READ
||
11231 msg
->resp_msg
.fun
== SSD_FUNC_NAND_READ_WOOB
||
11232 msg
->resp_msg
.fun
== SSD_FUNC_NAND_READ
) {
11234 queue
->ecc_info
.bitflip
[msg
->resp_msg
.bitflip
]++;
11236 }while (resp_idx
!= end_resp_idx
);
11238 queue
->resp_idx
= new_resp_idx
;
11240 return IRQ_HANDLED
;
11244 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
11245 static irqreturn_t
ssd_interrupt(int irq
, void *dev_id
, struct pt_regs
*regs
)
11247 static irqreturn_t
ssd_interrupt(int irq
, void *dev_id
)
11250 struct ssd_queue
*queue
= (struct ssd_queue
*)dev_id
;
11251 struct ssd_device
*dev
= (struct ssd_device
*)queue
->dev
;
11252 struct ssd_cmd
*cmd
;
11253 union ssd_response_msq __msg
;
11254 union ssd_response_msq
*msg
= &__msg
;
11256 uint32_t resp_idx
= queue
->resp_idx
;
11257 uint32_t new_resp_idx
= *(uint32_t *)queue
->resp_ptr
;
11258 uint32_t end_resp_idx
;
11260 if (unlikely(resp_idx
== new_resp_idx
)) {
11264 #if (defined SSD_ESCAPE_IRQ)
11265 if (SSD_INT_MSIX
!= dev
->int_mode
) {
11266 dev
->irq_cpu
= smp_processor_id();
11270 end_resp_idx
= new_resp_idx
& queue
->resp_idx_mask
;
11273 resp_idx
= (resp_idx
+ 1) & queue
->resp_idx_mask
;
11276 u64_msg
= (uint64_t *)(queue
->resp_msg
+ queue
->resp_msg_sz
* resp_idx
);
11277 msg
->u64_msg
= *u64_msg
;
11279 if (unlikely(msg
->u64_msg
== (uint64_t)(-1))) {
11280 hio_err("%s: empty resp msg: queue %d idx %u\n", dev
->name
, queue
->idx
, resp_idx
);
11283 /* clear the resp msg */
11284 *u64_msg
= (uint64_t)(-1);
11286 cmd
= &queue
->cmd
[msg
->resp_msg
.tag
];
11287 /*if (unlikely(!cmd->bio)) {
11288 printk(KERN_WARNING "%s: unknown tag %d fun %#x\n",
11289 dev->name, msg->resp_msg.tag, msg->resp_msg.fun);
11293 if(unlikely(msg
->resp_msg
.status
& (uint32_t)status_mask
)) {
11294 cmd
->errors
= -EIO
;
11298 cmd
->nr_log
= msg
->log_resp_msg
.nr_log
;
11302 if (unlikely(msg
->resp_msg
.fun
!= SSD_FUNC_READ_LOG
&& msg
->resp_msg
.log
> 0)) {
11303 (void)test_and_set_bit(SSD_LOG_HW
, &dev
->state
);
11304 if (test_bit(SSD_INIT_WORKQ
, &dev
->state
)) {
11305 queue_work(dev
->workq
, &dev
->log_work
);
11309 if (unlikely(msg
->resp_msg
.status
)) {
11310 if (msg
->resp_msg
.fun
== SSD_FUNC_READ
|| msg
->resp_msg
.fun
== SSD_FUNC_WRITE
) {
11311 hio_err("%s: I/O error %d: tag %d fun %#x\n",
11312 dev
->name
, msg
->resp_msg
.status
, msg
->resp_msg
.tag
, msg
->resp_msg
.fun
);
11315 ssd_set_alarm(dev
);
11316 queue
->io_stat
.nr_rwerr
++;
11317 ssd_gen_swlog(dev
, SSD_LOG_EIO
, msg
->u32_msg
[0]);
11319 hio_info("%s: CMD error %d: tag %d fun %#x\n",
11320 dev
->name
, msg
->resp_msg
.status
, msg
->resp_msg
.tag
, msg
->resp_msg
.fun
);
11322 ssd_gen_swlog(dev
, SSD_LOG_ECMD
, msg
->u32_msg
[0]);
11324 queue
->io_stat
.nr_ioerr
++;
11327 if (msg
->resp_msg
.fun
== SSD_FUNC_READ
||
11328 msg
->resp_msg
.fun
== SSD_FUNC_NAND_READ_WOOB
||
11329 msg
->resp_msg
.fun
== SSD_FUNC_NAND_READ
) {
11331 queue
->ecc_info
.bitflip
[msg
->resp_msg
.bitflip
]++;
11333 }while (resp_idx
!= end_resp_idx
);
11335 queue
->resp_idx
= new_resp_idx
;
11337 return IRQ_HANDLED
;
11340 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
11341 static irqreturn_t
ssd_interrupt_legacy(int irq
, void *dev_id
, struct pt_regs
*regs
)
11343 static irqreturn_t
ssd_interrupt_legacy(int irq
, void *dev_id
)
11347 struct ssd_queue
*queue
= (struct ssd_queue
*)dev_id
;
11348 struct ssd_device
*dev
= (struct ssd_device
*)queue
->dev
;
11350 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19))
11351 ret
= ssd_interrupt(irq
, dev_id
, regs
);
11353 ret
= ssd_interrupt(irq
, dev_id
);
11357 if (IRQ_HANDLED
== ret
) {
11358 ssd_reg32_write(dev
->ctrlp
+ SSD_CLEAR_INTR_REG
, 1);
11364 static void ssd_reset_resp_ptr(struct ssd_device
*dev
)
11368 for (i
=0; i
<dev
->nr_queue
; i
++) {
11369 *(uint32_t *)dev
->queue
[i
].resp_ptr
= dev
->queue
[i
].resp_idx
= (dev
->hw_info
.cmd_fifo_sz
* 2) - 1;
11373 static void ssd_free_irq(struct ssd_device
*dev
)
11377 #if ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) || (defined RHEL_MAJOR && RHEL_MAJOR == 6))
11378 if (SSD_INT_MSIX
== dev
->int_mode
) {
11379 for (i
=0; i
<dev
->nr_queue
; i
++) {
11380 irq_set_affinity_hint(dev
->entry
[i
].vector
, NULL
);
11385 for (i
=0; i
<dev
->nr_queue
; i
++) {
11386 free_irq(dev
->entry
[i
].vector
, &dev
->queue
[i
]);
11389 if (SSD_INT_MSIX
== dev
->int_mode
) {
11390 pci_disable_msix(dev
->pdev
);
11391 } else if (SSD_INT_MSI
== dev
->int_mode
) {
11392 pci_disable_msi(dev
->pdev
);
11397 static int ssd_init_irq(struct ssd_device
*dev
)
11399 #if (!defined MODULE) && (defined SSD_MSIX_AFFINITY_FORCE)
11400 const struct cpumask
*cpu_mask
;
11401 static int cpu_affinity
= 0;
11403 #if ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) || (defined RHEL_MAJOR && RHEL_MAJOR == 6))
11404 const struct cpumask
*mask
;
11405 static int cpu
= 0;
11409 unsigned long flags
= 0;
11412 ssd_reg32_write(dev
->ctrlp
+ SSD_INTR_INTERVAL_REG
, 0x800);
11414 #ifdef SSD_ESCAPE_IRQ
11418 if (int_mode
>= SSD_INT_MSIX
&& pci_find_capability(dev
->pdev
, PCI_CAP_ID_MSIX
)) {
11419 dev
->nr_queue
= SSD_MSIX_VEC
;
11420 for (i
=0; i
<dev
->nr_queue
; i
++) {
11421 dev
->entry
[i
].entry
= i
;
11424 ret
= pci_enable_msix(dev
->pdev
, dev
->entry
, dev
->nr_queue
);
11427 } else if (ret
> 0) {
11428 dev
->nr_queue
= ret
;
11430 hio_warn("%s: can not enable msix\n", dev
->name
);
11432 ssd_set_alarm(dev
);
11437 #if ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) || (defined RHEL_MAJOR && RHEL_MAJOR == 6))
11438 mask
= (dev_to_node(&dev
->pdev
->dev
) == -1) ? cpu_online_mask
: cpumask_of_node(dev_to_node(&dev
->pdev
->dev
));
11439 if ((0 == cpu
) || (!cpumask_intersects(mask
, cpumask_of(cpu
)))) {
11440 cpu
= cpumask_first(mask
);
11442 for (i
=0; i
<dev
->nr_queue
; i
++) {
11443 irq_set_affinity_hint(dev
->entry
[i
].vector
, cpumask_of(cpu
));
11444 cpu
= cpumask_next(cpu
, mask
);
11445 if (cpu
>= nr_cpu_ids
) {
11446 cpu
= cpumask_first(mask
);
11451 dev
->int_mode
= SSD_INT_MSIX
;
11452 } else if (int_mode
>= SSD_INT_MSI
&& pci_find_capability(dev
->pdev
, PCI_CAP_ID_MSI
)) {
11453 ret
= pci_enable_msi(dev
->pdev
);
11455 hio_warn("%s: can not enable msi\n", dev
->name
);
11457 ssd_set_alarm(dev
);
11462 dev
->entry
[0].vector
= dev
->pdev
->irq
;
11464 dev
->int_mode
= SSD_INT_MSI
;
11467 dev
->entry
[0].vector
= dev
->pdev
->irq
;
11469 dev
->int_mode
= SSD_INT_LEGACY
;
11472 for (i
=0; i
<dev
->nr_queue
; i
++) {
11473 if (dev
->nr_queue
> 1) {
11474 snprintf(dev
->queue
[i
].name
, SSD_QUEUE_NAME_LEN
, "%s_e100-%d", dev
->name
, i
);
11476 snprintf(dev
->queue
[i
].name
, SSD_QUEUE_NAME_LEN
, "%s_e100", dev
->name
);
11479 dev
->queue
[i
].dev
= dev
;
11480 dev
->queue
[i
].idx
= i
;
11482 dev
->queue
[i
].resp_idx
= (dev
->hw_info
.cmd_fifo_sz
* 2) - 1;
11483 dev
->queue
[i
].resp_idx_mask
= dev
->hw_info
.cmd_fifo_sz
- 1;
11485 dev
->queue
[i
].resp_msg_sz
= dev
->hw_info
.resp_msg_sz
;
11486 dev
->queue
[i
].resp_msg
= dev
->resp_msg_base
+ dev
->hw_info
.resp_msg_sz
* dev
->hw_info
.cmd_fifo_sz
* i
;
11487 dev
->queue
[i
].resp_ptr
= dev
->resp_ptr_base
+ dev
->hw_info
.resp_ptr_sz
* i
;
11488 *(uint32_t *)dev
->queue
[i
].resp_ptr
= dev
->queue
[i
].resp_idx
;
11490 dev
->queue
[i
].cmd
= dev
->cmd
;
11493 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20))
11494 flags
= IRQF_SHARED
;
11499 for (i
=0; i
<dev
->nr_queue
; i
++) {
11500 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,30))
11501 if (threaded_irq
) {
11502 ret
= request_threaded_irq(dev
->entry
[i
].vector
, ssd_interrupt_check
, ssd_interrupt_threaded
, flags
, dev
->queue
[i
].name
, &dev
->queue
[i
]);
11503 } else if (dev
->int_mode
== SSD_INT_LEGACY
) {
11504 ret
= request_irq(dev
->entry
[i
].vector
, &ssd_interrupt_legacy
, flags
, dev
->queue
[i
].name
, &dev
->queue
[i
]);
11506 ret
= request_irq(dev
->entry
[i
].vector
, &ssd_interrupt
, flags
, dev
->queue
[i
].name
, &dev
->queue
[i
]);
11509 if (dev
->int_mode
== SSD_INT_LEGACY
) {
11510 ret
= request_irq(dev
->entry
[i
].vector
, &ssd_interrupt_legacy
, flags
, dev
->queue
[i
].name
, &dev
->queue
[i
]);
11512 ret
= request_irq(dev
->entry
[i
].vector
, &ssd_interrupt
, flags
, dev
->queue
[i
].name
, &dev
->queue
[i
]);
11516 hio_warn("%s: request irq failed\n", dev
->name
);
11518 ssd_set_alarm(dev
);
11519 goto out_request_irq
;
11522 #if (!defined MODULE) && (defined SSD_MSIX_AFFINITY_FORCE)
11523 cpu_mask
= (dev_to_node(&dev
->pdev
->dev
) == -1) ? cpu_online_mask
: cpumask_of_node(dev_to_node(&dev
->pdev
->dev
));
11524 if (SSD_INT_MSIX
== dev
->int_mode
) {
11525 if ((0 == cpu_affinity
) || (!cpumask_intersects(mask
, cpumask_of(cpu_affinity
)))) {
11526 cpu_affinity
= cpumask_first(cpu_mask
);
11529 irq_set_affinity(dev
->entry
[i
].vector
, cpumask_of(cpu_affinity
));
11530 cpu_affinity
= cpumask_next(cpu_affinity
, cpu_mask
);
11531 if (cpu_affinity
>= nr_cpu_ids
) {
11532 cpu_affinity
= cpumask_first(cpu_mask
);
11541 #if ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) || (defined RHEL_MAJOR && RHEL_MAJOR == 6))
11542 if (SSD_INT_MSIX
== dev
->int_mode
) {
11543 for (j
=0; j
<dev
->nr_queue
; j
++) {
11544 irq_set_affinity_hint(dev
->entry
[j
].vector
, NULL
);
11549 for (i
--; i
>=0; i
--) {
11550 free_irq(dev
->entry
[i
].vector
, &dev
->queue
[i
]);
11553 if (SSD_INT_MSIX
== dev
->int_mode
) {
11554 pci_disable_msix(dev
->pdev
);
11555 } else if (SSD_INT_MSI
== dev
->int_mode
) {
11556 pci_disable_msi(dev
->pdev
);
11563 static void ssd_initial_log(struct ssd_device
*dev
)
11566 uint32_t speed
, width
;
11568 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
11572 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_POWER_ON_REG
);
11574 ssd_gen_swlog(dev
, SSD_LOG_POWER_ON
, dev
->hw_info
.bridge_ver
);
11577 val
= ssd_reg32_read(dev
->ctrlp
+ SSD_PCIE_LINKSTATUS_REG
);
11579 width
= (val
>> 4)& 0x3F;
11580 if (0x1 == speed
) {
11581 hio_info("%s: PCIe: 2.5GT/s, x%u\n", dev
->name
, width
);
11582 } else if (0x2 == speed
) {
11583 hio_info("%s: PCIe: 5GT/s, x%u\n", dev
->name
, width
);
11585 hio_info("%s: PCIe: unknown GT/s, x%u\n", dev
->name
, width
);
11587 ssd_gen_swlog(dev
, SSD_LOG_PCIE_LINK_STATUS
, val
);
11592 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20))
11593 static void ssd_hwmon_worker(void *data
)
11595 struct ssd_device
*dev
= (struct ssd_device
*)data
;
11597 static void ssd_hwmon_worker(struct work_struct
*work
)
11599 struct ssd_device
*dev
= container_of(work
, struct ssd_device
, hwmon_work
);
11602 if (ssd_check_hw(dev
)) {
11603 //hio_err("%s: check hardware failed\n", dev->name);
11607 ssd_check_clock(dev
);
11608 ssd_check_volt(dev
);
11610 ssd_mon_boardvolt(dev
);
11613 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20))
11614 static void ssd_tempmon_worker(void *data
)
11616 struct ssd_device
*dev
= (struct ssd_device
*)data
;
11618 static void ssd_tempmon_worker(struct work_struct
*work
)
11620 struct ssd_device
*dev
= container_of(work
, struct ssd_device
, tempmon_work
);
11623 if (ssd_check_hw(dev
)) {
11624 //hio_err("%s: check hardware failed\n", dev->name);
11632 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20))
11633 static void ssd_capmon_worker(void *data
)
11635 struct ssd_device
*dev
= (struct ssd_device
*)data
;
11637 static void ssd_capmon_worker(struct work_struct
*work
)
11639 struct ssd_device
*dev
= container_of(work
, struct ssd_device
, capmon_work
);
11642 uint32_t cap_threshold
= SSD_PL_CAP_THRESHOLD
;
11645 if (dev
->protocol_info
.ver
< SSD_PROTOCOL_V3_2
) {
11649 if (dev
->hw_info_ext
.form_factor
== SSD_FORM_FACTOR_FHHL
&& dev
->hw_info
.pcb_ver
< 'B') {
11653 /* fault before? */
11654 if (test_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP
), &dev
->hwmon
)) {
11655 ret
= ssd_check_pl_cap_fast(dev
);
11662 ret
= ssd_do_cap_learn(dev
, &cap
);
11664 hio_err("%s: cap learn failed\n", dev
->name
);
11665 ssd_gen_swlog(dev
, SSD_LOG_CAP_LEARN_FAULT
, 0);
11669 ssd_gen_swlog(dev
, SSD_LOG_CAP_STATUS
, cap
);
11671 if (SSD_PL_CAP_CP
== dev
->hw_info_ext
.cap_type
) {
11672 cap_threshold
= SSD_PL_CAP_CP_THRESHOLD
;
11675 //use the fw event id?
11676 if (cap
< cap_threshold
) {
11677 if (!test_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP
), &dev
->hwmon
)) {
11678 ssd_gen_swlog(dev
, SSD_LOG_BATTERY_FAULT
, 0);
11680 } else if (cap
>= (cap_threshold
+ SSD_PL_CAP_THRESHOLD_HYST
)) {
11681 if (test_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP
), &dev
->hwmon
)) {
11682 ssd_gen_swlog(dev
, SSD_LOG_BATTERY_OK
, 0);
11687 static void ssd_routine_start(void *data
)
11689 struct ssd_device
*dev
;
11696 dev
->routine_tick
++;
11698 if (test_bit(SSD_INIT_WORKQ
, &dev
->state
) && !ssd_busy(dev
)) {
11699 (void)test_and_set_bit(SSD_LOG_HW
, &dev
->state
);
11700 queue_work(dev
->workq
, &dev
->log_work
);
11703 if ((dev
->routine_tick
% SSD_HWMON_ROUTINE_TICK
) == 0 && test_bit(SSD_INIT_WORKQ
, &dev
->state
)) {
11704 queue_work(dev
->workq
, &dev
->hwmon_work
);
11707 if ((dev
->routine_tick
% SSD_CAPMON_ROUTINE_TICK
) == 0 && test_bit(SSD_INIT_WORKQ
, &dev
->state
)) {
11708 queue_work(dev
->workq
, &dev
->capmon_work
);
11711 if ((dev
->routine_tick
% SSD_CAPMON2_ROUTINE_TICK
) == 0 && test_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP
), &dev
->hwmon
) && test_bit(SSD_INIT_WORKQ
, &dev
->state
)) {
11712 /* CAP fault? check again */
11713 queue_work(dev
->workq
, &dev
->capmon_work
);
11716 if (test_bit(SSD_INIT_WORKQ
, &dev
->state
)) {
11717 queue_work(dev
->workq
, &dev
->tempmon_work
);
11720 /* schedule routine */
11721 mod_timer(&dev
->routine_timer
, jiffies
+ msecs_to_jiffies(SSD_ROUTINE_INTERVAL
));
11724 static void ssd_cleanup_routine(struct ssd_device
*dev
)
11726 if (unlikely(mode
!= SSD_DRV_MODE_STANDARD
))
11729 (void)ssd_del_timer(&dev
->routine_timer
);
11731 (void)ssd_del_timer(&dev
->bm_timer
);
11734 static int ssd_init_routine(struct ssd_device
*dev
)
11736 if (unlikely(mode
!= SSD_DRV_MODE_STANDARD
))
11739 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20))
11740 INIT_WORK(&dev
->bm_work
, ssd_bm_worker
, dev
);
11741 INIT_WORK(&dev
->hwmon_work
, ssd_hwmon_worker
, dev
);
11742 INIT_WORK(&dev
->capmon_work
, ssd_capmon_worker
, dev
);
11743 INIT_WORK(&dev
->tempmon_work
, ssd_tempmon_worker
, dev
);
11745 INIT_WORK(&dev
->bm_work
, ssd_bm_worker
);
11746 INIT_WORK(&dev
->hwmon_work
, ssd_hwmon_worker
);
11747 INIT_WORK(&dev
->capmon_work
, ssd_capmon_worker
);
11748 INIT_WORK(&dev
->tempmon_work
, ssd_tempmon_worker
);
11752 ssd_initial_log(dev
);
11754 /* schedule bm routine */
11755 ssd_add_timer(&dev
->bm_timer
, msecs_to_jiffies(SSD_BM_CAP_LEARNING_DELAY
), ssd_bm_routine_start
, dev
);
11757 /* schedule routine */
11758 ssd_add_timer(&dev
->routine_timer
, msecs_to_jiffies(SSD_ROUTINE_INTERVAL
), ssd_routine_start
, dev
);
11764 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38))
11767 ssd_remove_one (struct pci_dev
*pdev
)
11769 struct ssd_device
*dev
;
11775 dev
= pci_get_drvdata(pdev
);
11780 list_del_init(&dev
->list
);
11782 ssd_unregister_sysfs(dev
);
11784 /* offline firstly */
11785 test_and_clear_bit(SSD_ONLINE
, &dev
->state
);
11787 /* clean work queue first */
11789 test_and_clear_bit(SSD_INIT_WORKQ
, &dev
->state
);
11790 ssd_cleanup_workq(dev
);
11794 (void)ssd_flush(dev
);
11795 (void)ssd_save_md(dev
);
11799 ssd_save_smart(dev
);
11802 if (test_and_clear_bit(SSD_INIT_BD
, &dev
->state
)) {
11803 ssd_cleanup_blkdev(dev
);
11807 ssd_cleanup_chardev(dev
);
11810 /* clean routine */
11812 ssd_cleanup_routine(dev
);
11815 ssd_cleanup_queue(dev
);
11817 ssd_cleanup_tag(dev
);
11818 ssd_cleanup_thread(dev
);
11822 ssd_cleanup_dcmd(dev
);
11823 ssd_cleanup_cmd(dev
);
11824 ssd_cleanup_response(dev
);
11827 ssd_cleanup_log(dev
);
11830 if (dev
->reload_fw
) { //reload fw
11831 ssd_reg32_write(dev
->ctrlp
+ SSD_RELOAD_FW_REG
, SSD_RELOAD_FW
);
11834 /* unmap physical adress */
11835 #ifdef LINUX_SUSE_OS
11836 iounmap(dev
->ctrlp
);
11838 pci_iounmap(pdev
, dev
->ctrlp
);
11841 release_mem_region(dev
->mmio_base
, dev
->mmio_len
);
11843 pci_disable_device(pdev
);
11845 pci_set_drvdata(pdev
, NULL
);
11851 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38))
11854 ssd_init_one(struct pci_dev
*pdev
,
11855 const struct pci_device_id
*ent
)
11857 struct ssd_device
*dev
;
11860 if (!pdev
|| !ent
) {
11865 dev
= kmalloc(sizeof(struct ssd_device
), GFP_KERNEL
);
11868 goto out_alloc_dev
;
11870 memset(dev
, 0, sizeof(struct ssd_device
));
11872 dev
->owner
= THIS_MODULE
;
11874 if (SSD_SLAVE_PORT_DEVID
== ent
->device
) {
11878 dev
->idx
= ssd_get_index(dev
->slave
);
11879 if (dev
->idx
< 0) {
11881 goto out_get_index
;
11885 snprintf(dev
->name
, SSD_DEV_NAME_LEN
, SSD_DEV_NAME
);
11886 ssd_set_dev_name(&dev
->name
[strlen(SSD_DEV_NAME
)], SSD_DEV_NAME_LEN
-strlen(SSD_DEV_NAME
), dev
->idx
);
11888 dev
->major
= ssd_major
;
11889 dev
->cmajor
= ssd_cmajor
;
11891 snprintf(dev
->name
, SSD_DEV_NAME_LEN
, SSD_SDEV_NAME
);
11892 ssd_set_dev_name(&dev
->name
[strlen(SSD_SDEV_NAME
)], SSD_DEV_NAME_LEN
-strlen(SSD_SDEV_NAME
), dev
->idx
);
11893 dev
->major
= ssd_major_sl
;
11897 atomic_set(&(dev
->refcnt
), 0);
11898 atomic_set(&(dev
->tocnt
), 0);
11900 mutex_init(&dev
->fw_mutex
);
11903 mutex_init(&dev
->gd_mutex
);
11906 pci_set_drvdata(pdev
, dev
);
11908 kref_init(&dev
->kref
);
11910 ret
= pci_enable_device(pdev
);
11912 hio_warn("%s: can not enable device\n", dev
->name
);
11913 goto out_enable_device
;
11916 pci_set_master(pdev
);
11918 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31))
11919 ret
= pci_set_dma_mask(pdev
, DMA_64BIT_MASK
);
11921 ret
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(64));
11924 hio_warn("%s: set dma mask: failed\n", dev
->name
);
11925 goto out_set_dma_mask
;
11928 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31))
11929 ret
= pci_set_consistent_dma_mask(pdev
, DMA_64BIT_MASK
);
11931 ret
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(64));
11934 hio_warn("%s: set consistent dma mask: failed\n", dev
->name
);
11935 goto out_set_dma_mask
;
11938 dev
->mmio_base
= pci_resource_start(pdev
, 0);
11939 dev
->mmio_len
= pci_resource_len(pdev
, 0);
11941 if (!request_mem_region(dev
->mmio_base
, dev
->mmio_len
, SSD_DEV_NAME
)) {
11942 hio_warn("%s: can not reserve MMIO region 0\n", dev
->name
);
11944 goto out_request_mem_region
;
11947 /* 2.6.9 kernel bug */
11948 dev
->ctrlp
= pci_iomap(pdev
, 0, 0);
11950 hio_warn("%s: can not remap IO region 0\n", dev
->name
);
11952 goto out_pci_iomap
;
11955 ret
= ssd_check_hw(dev
);
11957 hio_err("%s: check hardware failed\n", dev
->name
);
11961 ret
= ssd_init_protocol_info(dev
);
11963 hio_err("%s: init protocol info failed\n", dev
->name
);
11964 goto out_init_protocol_info
;
11968 ssd_clear_alarm(dev
);
11970 ret
= ssd_init_fw_info(dev
);
11972 hio_err("%s: init firmware info failed\n", dev
->name
);
11974 ssd_set_alarm(dev
);
11975 goto out_init_fw_info
;
11983 ret
= ssd_init_rom_info(dev
);
11985 hio_err("%s: init rom info failed\n", dev
->name
);
11987 ssd_set_alarm(dev
);
11988 goto out_init_rom_info
;
11991 ret
= ssd_init_label(dev
);
11993 hio_err("%s: init label failed\n", dev
->name
);
11995 ssd_set_alarm(dev
);
11996 goto out_init_label
;
11999 ret
= ssd_init_workq(dev
);
12001 hio_warn("%s: init workq failed\n", dev
->name
);
12002 goto out_init_workq
;
12004 (void)test_and_set_bit(SSD_INIT_WORKQ
, &dev
->state
);
12006 ret
= ssd_init_log(dev
);
12008 hio_err("%s: init log failed\n", dev
->name
);
12010 ssd_set_alarm(dev
);
12014 ret
= ssd_init_smart(dev
);
12016 hio_err("%s: init info failed\n", dev
->name
);
12018 ssd_set_alarm(dev
);
12019 goto out_init_smart
;
12023 ret
= ssd_init_hw_info(dev
);
12025 hio_err("%s: init hardware info failed\n", dev
->name
);
12027 ssd_set_alarm(dev
);
12028 goto out_init_hw_info
;
12036 ret
= ssd_init_sensor(dev
);
12038 hio_err("%s: init sensor failed\n", dev
->name
);
12040 ssd_set_alarm(dev
);
12041 goto out_init_sensor
;
12044 ret
= ssd_init_pl_cap(dev
);
12046 hio_err("%s: int pl_cap failed\n", dev
->name
);
12048 ssd_set_alarm(dev
);
12049 goto out_init_pl_cap
;
12053 ret
= ssd_check_init_state(dev
);
12055 hio_err("%s: check init state failed\n", dev
->name
);
12057 ssd_set_alarm(dev
);
12058 goto out_check_init_state
;
12061 ret
= ssd_init_response(dev
);
12063 hio_warn("%s: init resp_msg failed\n", dev
->name
);
12064 goto out_init_response
;
12067 ret
= ssd_init_cmd(dev
);
12069 hio_warn("%s: init msg failed\n", dev
->name
);
12073 ret
= ssd_init_dcmd(dev
);
12075 hio_warn("%s: init cmd failed\n", dev
->name
);
12076 goto out_init_dcmd
;
12079 ret
= ssd_init_irq(dev
);
12081 hio_warn("%s: init irq failed\n", dev
->name
);
12085 ret
= ssd_init_thread(dev
);
12087 hio_warn("%s: init thread failed\n", dev
->name
);
12088 goto out_init_thread
;
12091 ret
= ssd_init_tag(dev
);
12093 hio_warn("%s: init tags failed\n", dev
->name
);
12094 goto out_init_tags
;
12098 (void)test_and_set_bit(SSD_ONLINE
, &dev
->state
);
12100 ret
= ssd_init_queue(dev
);
12102 hio_warn("%s: init queue failed\n", dev
->name
);
12103 goto out_init_queue
;
12111 ret
= ssd_init_ot_protect(dev
);
12113 hio_err("%s: int ot_protect failed\n", dev
->name
);
12115 ssd_set_alarm(dev
);
12116 goto out_int_ot_protect
;
12119 ret
= ssd_init_wmode(dev
);
12121 hio_warn("%s: init write mode\n", dev
->name
);
12122 goto out_init_wmode
;
12125 /* init routine after hw is ready */
12126 ret
= ssd_init_routine(dev
);
12128 hio_warn("%s: init routine\n", dev
->name
);
12129 goto out_init_routine
;
12132 ret
= ssd_init_chardev(dev
);
12134 hio_warn("%s: register char device failed\n", dev
->name
);
12135 goto out_init_chardev
;
12139 ret
= ssd_init_blkdev(dev
);
12141 hio_warn("%s: register block device failed\n", dev
->name
);
12142 goto out_init_blkdev
;
12144 (void)test_and_set_bit(SSD_INIT_BD
, &dev
->state
);
12146 ret
= ssd_register_sysfs(dev
);
12148 hio_warn("%s: register sysfs failed\n", dev
->name
);
12149 goto out_register_sysfs
;
12154 list_add_tail(&dev
->list
, &ssd_list
);
12158 out_register_sysfs
:
12159 test_and_clear_bit(SSD_INIT_BD
, &dev
->state
);
12160 ssd_cleanup_blkdev(dev
);
12164 ssd_cleanup_chardev(dev
);
12169 ssd_cleanup_routine(dev
);
12173 out_int_ot_protect
:
12174 ssd_cleanup_queue(dev
);
12176 test_and_clear_bit(SSD_ONLINE
, &dev
->state
);
12177 ssd_cleanup_tag(dev
);
12179 ssd_cleanup_thread(dev
);
12183 ssd_cleanup_dcmd(dev
);
12185 ssd_cleanup_cmd(dev
);
12187 ssd_cleanup_response(dev
);
12189 out_check_init_state
:
12196 ssd_cleanup_log(dev
);
12201 test_and_clear_bit(SSD_INIT_WORKQ
, &dev
->state
);
12202 ssd_cleanup_workq(dev
);
12208 out_init_protocol_info
:
12210 #ifdef LINUX_SUSE_OS
12211 iounmap(dev
->ctrlp
);
12213 pci_iounmap(pdev
, dev
->ctrlp
);
12216 release_mem_region(dev
->mmio_base
, dev
->mmio_len
);
12217 out_request_mem_region
:
12219 pci_disable_device(pdev
);
12221 pci_set_drvdata(pdev
, NULL
);
12229 static void ssd_cleanup_tasklet(void)
12232 for_each_online_cpu(i
) {
12233 tasklet_kill(&per_cpu(ssd_tasklet
, i
));
12237 static int ssd_init_tasklet(void)
12241 for_each_online_cpu(i
) {
12242 INIT_LIST_HEAD(&per_cpu(ssd_doneq
, i
));
12245 tasklet_init(&per_cpu(ssd_tasklet
, i
), __ssd_done_db
, 0);
12247 tasklet_init(&per_cpu(ssd_tasklet
, i
), __ssd_done
, 0);
12254 static struct pci_device_id ssd_pci_tbl
[] = {
12255 { 0x10ee, 0x0007, PCI_ANY_ID
, PCI_ANY_ID
, }, /* g3 */
12256 { 0x19e5, 0x0007, PCI_ANY_ID
, PCI_ANY_ID
, }, /* v1 */
12257 //{ 0x19e5, 0x0008, PCI_ANY_ID, PCI_ANY_ID, }, /* v1 sp*/
12258 { 0x19e5, 0x0009, PCI_ANY_ID
, PCI_ANY_ID
, }, /* v2 */
12259 { 0x19e5, 0x000a, PCI_ANY_ID
, PCI_ANY_ID
, }, /* v2 dp slave*/
12262 MODULE_DEVICE_TABLE(pci
, ssd_pci_tbl
);
12264 static struct pci_driver ssd_driver
= {
12265 .name
= MODULE_NAME
,
12266 .id_table
= ssd_pci_tbl
,
12267 .probe
= ssd_init_one
,
12268 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38))
12269 .remove
= __devexit_p(ssd_remove_one
),
12271 .remove
= ssd_remove_one
,
12275 /* notifier block to get a notify on system shutdown/halt/reboot */
12276 static int ssd_notify_reboot(struct notifier_block
*nb
, unsigned long event
, void *buf
)
12278 struct ssd_device
*dev
= NULL
;
12279 struct ssd_device
*n
= NULL
;
12281 list_for_each_entry_safe(dev
, n
, &ssd_list
, list
) {
12282 ssd_gen_swlog(dev
, SSD_LOG_POWER_OFF
, 0);
12284 (void)ssd_flush(dev
);
12285 (void)ssd_save_md(dev
);
12289 ssd_save_smart(dev
);
12291 ssd_stop_workq(dev
);
12293 if (dev
->reload_fw
) {
12294 ssd_reg32_write(dev
->ctrlp
+ SSD_RELOAD_FW_REG
, SSD_RELOAD_FW
);
12302 static struct notifier_block ssd_notifier
= {
12303 ssd_notify_reboot
, NULL
, 0
12306 static int __init
ssd_init_module(void)
12310 hio_info("driver version: %s\n", DRIVER_VERSION
);
12312 ret
= ssd_init_index();
12314 hio_warn("init index failed\n");
12315 goto out_init_index
;
12318 ret
= ssd_init_proc();
12320 hio_warn("init proc failed\n");
12321 goto out_init_proc
;
12324 ret
= ssd_init_sysfs();
12326 hio_warn("init sysfs failed\n");
12327 goto out_init_sysfs
;
12330 ret
= ssd_init_tasklet();
12332 hio_warn("init tasklet failed\n");
12333 goto out_init_tasklet
;
12336 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,12))
12337 ssd_class
= class_simple_create(THIS_MODULE
, SSD_DEV_NAME
);
12339 ssd_class
= class_create(THIS_MODULE
, SSD_DEV_NAME
);
12341 if (IS_ERR(ssd_class
)) {
12342 ret
= PTR_ERR(ssd_class
);
12343 goto out_class_create
;
12346 if (ssd_cmajor
> 0) {
12347 ret
= register_chrdev(ssd_cmajor
, SSD_CDEV_NAME
, &ssd_cfops
);
12349 ret
= ssd_cmajor
= register_chrdev(ssd_cmajor
, SSD_CDEV_NAME
, &ssd_cfops
);
12352 hio_warn("unable to register chardev major number\n");
12353 goto out_register_chardev
;
12356 if (ssd_major
> 0) {
12357 ret
= register_blkdev(ssd_major
, SSD_DEV_NAME
);
12359 ret
= ssd_major
= register_blkdev(ssd_major
, SSD_DEV_NAME
);
12362 hio_warn("unable to register major number\n");
12363 goto out_register_blkdev
;
12366 if (ssd_major_sl
> 0) {
12367 ret
= register_blkdev(ssd_major_sl
, SSD_SDEV_NAME
);
12369 ret
= ssd_major_sl
= register_blkdev(ssd_major_sl
, SSD_SDEV_NAME
);
12372 hio_warn("unable to register slave major number\n");
12373 goto out_register_blkdev_sl
;
12376 if (mode
< SSD_DRV_MODE_STANDARD
|| mode
> SSD_DRV_MODE_BASE
) {
12377 mode
= SSD_DRV_MODE_STANDARD
;
12381 if (mode
!= SSD_DRV_MODE_STANDARD
) {
12385 if (int_mode
< SSD_INT_LEGACY
|| int_mode
> SSD_INT_MSIX
) {
12386 int_mode
= SSD_INT_MODE_DEFAULT
;
12389 if (threaded_irq
) {
12390 int_mode
= SSD_INT_MSI
;
12393 if (log_level
>= SSD_LOG_NR_LEVEL
|| log_level
< SSD_LOG_LEVEL_INFO
) {
12394 log_level
= SSD_LOG_LEVEL_ERR
;
12397 if (wmode
< SSD_WMODE_BUFFER
|| wmode
> SSD_WMODE_DEFAULT
) {
12398 wmode
= SSD_WMODE_DEFAULT
;
12401 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20))
12402 ret
= pci_module_init(&ssd_driver
);
12404 ret
= pci_register_driver(&ssd_driver
);
12407 hio_warn("pci init failed\n");
12411 ret
= register_reboot_notifier(&ssd_notifier
);
12413 hio_warn("register reboot notifier failed\n");
12414 goto out_register_reboot_notifier
;
12419 out_register_reboot_notifier
:
12421 pci_unregister_driver(&ssd_driver
);
12422 unregister_blkdev(ssd_major_sl
, SSD_SDEV_NAME
);
12423 out_register_blkdev_sl
:
12424 unregister_blkdev(ssd_major
, SSD_DEV_NAME
);
12425 out_register_blkdev
:
12426 unregister_chrdev(ssd_cmajor
, SSD_CDEV_NAME
);
12427 out_register_chardev
:
12428 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,12))
12429 class_simple_destroy(ssd_class
);
12431 class_destroy(ssd_class
);
12434 ssd_cleanup_tasklet();
12436 ssd_cleanup_sysfs();
12438 ssd_cleanup_proc();
12440 ssd_cleanup_index();
12446 static void __exit
ssd_cleanup_module(void)
12449 hio_info("unload driver: %s\n", DRIVER_VERSION
);
12453 unregister_reboot_notifier(&ssd_notifier
);
12455 pci_unregister_driver(&ssd_driver
);
12457 unregister_blkdev(ssd_major_sl
, SSD_SDEV_NAME
);
12458 unregister_blkdev(ssd_major
, SSD_DEV_NAME
);
12459 unregister_chrdev(ssd_cmajor
, SSD_CDEV_NAME
);
12460 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,12))
12461 class_simple_destroy(ssd_class
);
12463 class_destroy(ssd_class
);
12466 ssd_cleanup_tasklet();
12467 ssd_cleanup_sysfs();
12468 ssd_cleanup_proc();
12469 ssd_cleanup_index();
12472 int ssd_register_event_notifier(struct block_device
*bdev
, ssd_event_call event_call
)
12474 struct ssd_device
*dev
;
12476 struct ssd_log
*le
;
12480 if (!bdev
|| !event_call
|| !(bdev
->bd_disk
)) {
12484 dev
= bdev
->bd_disk
->private_data
;
12485 dev
->event_call
= event_call
;
12487 do_gettimeofday(&tv
);
12490 le
= (struct ssd_log
*)(dev
->internal_log
.log
);
12491 log_nr
= dev
->internal_log
.nr_log
;
12494 if (le
->time
<= cur
&& le
->time
>= dev
->uptime
) {
12495 (void)dev
->event_call(dev
->gd
, le
->le
.event
, ssd_parse_log(dev
, le
, 0));
12503 int ssd_unregister_event_notifier(struct block_device
*bdev
)
12505 struct ssd_device
*dev
;
12507 if (!bdev
|| !(bdev
->bd_disk
)) {
12511 dev
= bdev
->bd_disk
->private_data
;
12512 dev
->event_call
= NULL
;
12517 EXPORT_SYMBOL(ssd_get_label
);
12518 EXPORT_SYMBOL(ssd_get_version
);
12519 EXPORT_SYMBOL(ssd_set_otprotect
);
12520 EXPORT_SYMBOL(ssd_bm_status
);
12521 EXPORT_SYMBOL(ssd_submit_pbio
);
12522 EXPORT_SYMBOL(ssd_get_pciaddr
);
12523 EXPORT_SYMBOL(ssd_get_temperature
);
12524 EXPORT_SYMBOL(ssd_register_event_notifier
);
12525 EXPORT_SYMBOL(ssd_unregister_event_notifier
);
12526 EXPORT_SYMBOL(ssd_reset
);
12527 EXPORT_SYMBOL(ssd_set_wmode
);
12531 module_init(ssd_init_module
);
12532 module_exit(ssd_cleanup_module
);
12533 MODULE_VERSION(DRIVER_VERSION
);
12534 MODULE_LICENSE("GPL");
12535 MODULE_AUTHOR("Huawei SSD DEV Team");
12536 MODULE_DESCRIPTION("Huawei SSD driver");