]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Huawei SSD device driver | |
3 | * Copyright (c) 2016, Huawei Technologies Co., Ltd. | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify it | |
6 | * under the terms and conditions of the GNU General Public License, | |
7 | * version 2, as published by the Free Software Foundation. | |
8 | * | |
9 | * This program is distributed in the hope it will be useful, but WITHOUT | |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
12 | * more details. | |
13 | */ | |
14 | ||
15 | #ifndef LINUX_VERSION_CODE | |
16 | #include <linux/version.h> | |
17 | #endif | |
18 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16)) | |
19 | #include <linux/config.h> | |
20 | #endif | |
21 | #include <linux/types.h> | |
22 | #include <linux/kernel.h> | |
23 | #include <linux/module.h> | |
24 | #include <linux/bio.h> | |
25 | #include <linux/timer.h> | |
26 | #include <linux/init.h> | |
27 | #include <linux/pci.h> | |
28 | #include <linux/slab.h> | |
29 | #include <linux/spinlock.h> | |
30 | #include <linux/blkdev.h> | |
31 | #include <linux/sched.h> | |
32 | #include <linux/fcntl.h> | |
33 | #include <linux/interrupt.h> | |
34 | #include <linux/compiler.h> | |
35 | #include <linux/bitops.h> | |
36 | #include <linux/delay.h> | |
37 | #include <linux/time.h> | |
38 | #include <linux/stat.h> | |
39 | #include <linux/fs.h> | |
40 | #include <linux/dma-mapping.h> | |
41 | #include <linux/completion.h> | |
42 | #include <linux/workqueue.h> | |
43 | #include <linux/mm.h> | |
44 | #include <linux/ioctl.h> | |
45 | #include <linux/hdreg.h> /* HDIO_GETGEO */ | |
46 | #include <linux/list.h> | |
47 | #include <linux/reboot.h> | |
48 | #include <linux/kthread.h> | |
49 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0)) | |
50 | #include <linux/seq_file.h> | |
51 | #endif | |
52 | #include <asm/uaccess.h> | |
53 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,2,0)) | |
54 | #include <linux/scatterlist.h> | |
55 | #include <linux/vmalloc.h> | |
56 | #else | |
57 | #include <asm/scatterlist.h> | |
58 | #endif | |
59 | #include <asm/io.h> | |
60 | #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,17)) | |
61 | #include <linux/devfs_fs_kernel.h> | |
62 | #endif | |
63 | ||
64 | /* driver */ | |
65 | #define MODULE_NAME "hio" | |
66 | #define DRIVER_VERSION "2.1.0.40" | |
67 | #define DRIVER_VERSION_LEN 16 | |
68 | ||
69 | #define SSD_FW_MIN 0x1 | |
70 | ||
71 | #define SSD_DEV_NAME MODULE_NAME | |
72 | #define SSD_DEV_NAME_LEN 16 | |
73 | #define SSD_CDEV_NAME "c"SSD_DEV_NAME | |
74 | #define SSD_SDEV_NAME "s"SSD_DEV_NAME | |
75 | ||
76 | ||
77 | #define SSD_CMAJOR 0 | |
78 | #define SSD_MAJOR 0 | |
79 | #define SSD_MAJOR_SL 0 | |
80 | #define SSD_MINORS 16 | |
81 | ||
82 | #define SSD_MAX_DEV 702 | |
83 | #define SSD_ALPHABET_NUM 26 | |
84 | ||
85 | #define hio_info(f, arg...) printk(KERN_INFO MODULE_NAME"info: " f , ## arg) | |
86 | #define hio_note(f, arg...) printk(KERN_NOTICE MODULE_NAME"note: " f , ## arg) | |
87 | #define hio_warn(f, arg...) printk(KERN_WARNING MODULE_NAME"warn: " f , ## arg) | |
88 | #define hio_err(f, arg...) printk(KERN_ERR MODULE_NAME"err: " f , ## arg) | |
89 | ||
90 | /* slave port */ | |
91 | #define SSD_SLAVE_PORT_DEVID 0x000a | |
92 | ||
93 | /* int mode */ | |
94 | ||
95 | /* 2.6.9 msi affinity bug, should turn msi & msi-x off */ | |
96 | //#define SSD_MSI | |
97 | #define SSD_ESCAPE_IRQ | |
98 | ||
99 | //#define SSD_MSIX | |
100 | #ifndef MODULE | |
101 | #define SSD_MSIX | |
102 | #endif | |
103 | #define SSD_MSIX_VEC 8 | |
104 | #ifdef SSD_MSIX | |
105 | #undef SSD_MSI | |
106 | #undef SSD_ESCAPE_IRQ | |
107 | #define SSD_MSIX_AFFINITY_FORCE | |
108 | #endif | |
109 | ||
110 | #define SSD_TRIM | |
111 | ||
112 | /* Over temperature protect */ | |
113 | #define SSD_OT_PROTECT | |
114 | ||
115 | #ifdef SSD_QUEUE_PBIO | |
116 | #define BIO_SSD_PBIO 20 | |
117 | #endif | |
118 | ||
119 | /* debug */ | |
120 | //#define SSD_DEBUG_ERR | |
121 | ||
122 | /* cmd timer */ | |
123 | #define SSD_CMD_TIMEOUT (60*HZ) | |
124 | ||
125 | /* i2c & smbus */ | |
126 | #define SSD_SPI_TIMEOUT (5*HZ) | |
127 | #define SSD_I2C_TIMEOUT (5*HZ) | |
128 | ||
129 | #define SSD_I2C_MAX_DATA (127) | |
130 | #define SSD_SMBUS_BLOCK_MAX (32) | |
131 | #define SSD_SMBUS_DATA_MAX (SSD_SMBUS_BLOCK_MAX + 2) | |
132 | ||
133 | /* wait for init */ | |
134 | #define SSD_INIT_WAIT (1000) //1s | |
135 | #define SSD_CONTROLLER_WAIT (20*1000/SSD_INIT_WAIT) //20s | |
136 | #define SSD_INIT_MAX_WAIT (500*1000/SSD_INIT_WAIT) //500s | |
137 | #define SSD_INIT_MAX_WAIT_V3_2 (1400*1000/SSD_INIT_WAIT) //1400s | |
138 | #define SSD_RAM_INIT_MAX_WAIT (10*1000/SSD_INIT_WAIT) //10s | |
139 | #define SSD_CH_INFO_MAX_WAIT (10*1000/SSD_INIT_WAIT) //10s | |
140 | ||
141 | /* blkdev busy wait */ | |
142 | #define SSD_DEV_BUSY_WAIT 1000 //ms | |
143 | #define SSD_DEV_BUSY_MAX_WAIT (8*1000/SSD_DEV_BUSY_WAIT) //8s | |
144 | ||
145 | /* smbus retry */ | |
146 | #define SSD_SMBUS_RETRY_INTERVAL (5) //ms | |
147 | #define SSD_SMBUS_RETRY_MAX (1000/SSD_SMBUS_RETRY_INTERVAL) | |
148 | ||
149 | #define SSD_BM_RETRY_MAX 7 | |
150 | ||
151 | /* bm routine interval */ | |
152 | #define SSD_BM_CAP_LEARNING_DELAY (10*60*1000) | |
153 | ||
154 | /* routine interval */ | |
155 | #define SSD_ROUTINE_INTERVAL (10*1000) //10s | |
156 | #define SSD_HWMON_ROUTINE_TICK (60*1000/SSD_ROUTINE_INTERVAL) | |
157 | #define SSD_CAPMON_ROUTINE_TICK ((3600*1000/SSD_ROUTINE_INTERVAL)*24*30) | |
158 | #define SSD_CAPMON2_ROUTINE_TICK (10*60*1000/SSD_ROUTINE_INTERVAL) //fault recover | |
159 | ||
160 | /* dma align */ | |
161 | #define SSD_DMA_ALIGN (16) | |
162 | ||
163 | /* some hw defalut */ | |
164 | #define SSD_LOG_MAX_SZ 4096 | |
165 | ||
166 | #define SSD_NAND_OOB_SZ 1024 | |
167 | #define SSD_NAND_ID_SZ 8 | |
168 | #define SSD_NAND_ID_BUFF_SZ 1024 | |
169 | #define SSD_NAND_MAX_CE 2 | |
170 | ||
171 | #define SSD_BBT_RESERVED 8 | |
172 | ||
173 | #define SSD_ECC_MAX_FLIP (64+1) | |
174 | ||
175 | #define SSD_RAM_ALIGN 16 | |
176 | ||
177 | ||
178 | #define SSD_RELOAD_FLAG 0x3333CCCC | |
179 | #define SSD_RELOAD_FW 0xAA5555AA | |
180 | #define SSD_RESET_NOINIT 0xAA5555AA | |
181 | #define SSD_RESET 0x55AAAA55 | |
182 | #define SSD_RESET_FULL 0x5A | |
183 | //#define SSD_RESET_WAIT 1000 //1s | |
184 | //#define SSD_RESET_MAX_WAIT (200*1000/SSD_RESET_WAIT) //200s | |
185 | ||
186 | ||
187 | /* reverion 1 */ | |
188 | #define SSD_PROTOCOL_V1 0x0 | |
189 | ||
190 | #define SSD_ROM_SIZE (16*1024*1024) | |
191 | #define SSD_ROM_BLK_SIZE (256*1024) | |
192 | #define SSD_ROM_PAGE_SIZE (256) | |
193 | #define SSD_ROM_NR_BRIDGE_FW 2 | |
194 | #define SSD_ROM_NR_CTRL_FW 2 | |
195 | #define SSD_ROM_BRIDGE_FW_BASE 0 | |
196 | #define SSD_ROM_BRIDGE_FW_SIZE (2*1024*1024) | |
197 | #define SSD_ROM_CTRL_FW_BASE (SSD_ROM_NR_BRIDGE_FW*SSD_ROM_BRIDGE_FW_SIZE) | |
198 | #define SSD_ROM_CTRL_FW_SIZE (5*1024*1024) | |
199 | #define SSD_ROM_LABEL_BASE (SSD_ROM_CTRL_FW_BASE+SSD_ROM_CTRL_FW_SIZE*SSD_ROM_NR_CTRL_FW) | |
200 | #define SSD_ROM_VP_BASE (SSD_ROM_LABEL_BASE+SSD_ROM_BLK_SIZE) | |
201 | ||
202 | /* reverion 3 */ | |
203 | #define SSD_PROTOCOL_V3 0x3000000 | |
204 | #define SSD_PROTOCOL_V3_1_1 0x3010001 | |
205 | #define SSD_PROTOCOL_V3_1_3 0x3010003 | |
206 | #define SSD_PROTOCOL_V3_2 0x3020000 | |
207 | #define SSD_PROTOCOL_V3_2_1 0x3020001 /* <4KB improved */ | |
208 | #define SSD_PROTOCOL_V3_2_2 0x3020002 /* ot protect */ | |
209 | #define SSD_PROTOCOL_V3_2_4 0x3020004 | |
210 | ||
211 | ||
212 | #define SSD_PV3_ROM_NR_BM_FW 1 | |
213 | #define SSD_PV3_ROM_BM_FW_SZ (64*1024*8) | |
214 | ||
215 | #define SSD_ROM_LOG_SZ (64*1024*4) | |
216 | ||
217 | #define SSD_ROM_NR_SMART_MAX 2 | |
218 | #define SSD_PV3_ROM_NR_SMART SSD_ROM_NR_SMART_MAX | |
219 | #define SSD_PV3_ROM_SMART_SZ (64*1024) | |
220 | ||
221 | /* reverion 3.2 */ | |
222 | #define SSD_PV3_2_ROM_LOG_SZ (64*1024*80) /* 5MB */ | |
223 | #define SSD_PV3_2_ROM_SEC_SZ (256*1024) /* 256KB */ | |
224 | ||
225 | ||
226 | /* register */ | |
227 | #define SSD_REQ_FIFO_REG 0x0000 | |
228 | #define SSD_RESP_FIFO_REG 0x0008 //0x0010 | |
229 | #define SSD_RESP_PTR_REG 0x0010 //0x0018 | |
230 | #define SSD_INTR_INTERVAL_REG 0x0018 | |
231 | #define SSD_READY_REG 0x001C | |
232 | #define SSD_BRIDGE_TEST_REG 0x0020 | |
233 | #define SSD_STRIPE_SIZE_REG 0x0028 | |
234 | #define SSD_CTRL_VER_REG 0x0030 //controller | |
235 | #define SSD_BRIDGE_VER_REG 0x0034 //bridge | |
236 | #define SSD_PCB_VER_REG 0x0038 | |
237 | #define SSD_BURN_FLAG_REG 0x0040 | |
238 | #define SSD_BRIDGE_INFO_REG 0x0044 | |
239 | ||
240 | #define SSD_WL_VAL_REG 0x0048 //32-bit | |
241 | ||
242 | #define SSD_BB_INFO_REG 0x004C | |
243 | ||
244 | #define SSD_ECC_TEST_REG 0x0050 //test only | |
245 | #define SSD_ERASE_TEST_REG 0x0058 //test only | |
246 | #define SSD_WRITE_TEST_REG 0x0060 //test only | |
247 | ||
248 | #define SSD_RESET_REG 0x0068 | |
249 | #define SSD_RELOAD_FW_REG 0x0070 | |
250 | ||
251 | #define SSD_RESERVED_BLKS_REG 0x0074 | |
252 | #define SSD_VALID_PAGES_REG 0x0078 | |
253 | #define SSD_CH_INFO_REG 0x007C | |
254 | ||
255 | #define SSD_CTRL_TEST_REG_SZ 0x8 | |
256 | #define SSD_CTRL_TEST_REG0 0x0080 | |
257 | #define SSD_CTRL_TEST_REG1 0x0088 | |
258 | #define SSD_CTRL_TEST_REG2 0x0090 | |
259 | #define SSD_CTRL_TEST_REG3 0x0098 | |
260 | #define SSD_CTRL_TEST_REG4 0x00A0 | |
261 | #define SSD_CTRL_TEST_REG5 0x00A8 | |
262 | #define SSD_CTRL_TEST_REG6 0x00B0 | |
263 | #define SSD_CTRL_TEST_REG7 0x00B8 | |
264 | ||
265 | #define SSD_FLASH_INFO_REG0 0x00C0 | |
266 | #define SSD_FLASH_INFO_REG1 0x00C8 | |
267 | #define SSD_FLASH_INFO_REG2 0x00D0 | |
268 | #define SSD_FLASH_INFO_REG3 0x00D8 | |
269 | #define SSD_FLASH_INFO_REG4 0x00E0 | |
270 | #define SSD_FLASH_INFO_REG5 0x00E8 | |
271 | #define SSD_FLASH_INFO_REG6 0x00F0 | |
272 | #define SSD_FLASH_INFO_REG7 0x00F8 | |
273 | ||
274 | #define SSD_RESP_INFO_REG 0x01B8 | |
275 | #define SSD_NAND_BUFF_BASE 0x01BC //for nand write | |
276 | ||
277 | #define SSD_CHIP_INFO_REG_SZ 0x10 | |
278 | #define SSD_CHIP_INFO_REG0 0x0100 //128 bit | |
279 | #define SSD_CHIP_INFO_REG1 0x0110 | |
280 | #define SSD_CHIP_INFO_REG2 0x0120 | |
281 | #define SSD_CHIP_INFO_REG3 0x0130 | |
282 | #define SSD_CHIP_INFO_REG4 0x0140 | |
283 | #define SSD_CHIP_INFO_REG5 0x0150 | |
284 | #define SSD_CHIP_INFO_REG6 0x0160 | |
285 | #define SSD_CHIP_INFO_REG7 0x0170 | |
286 | ||
287 | #define SSD_RAM_INFO_REG 0x01C4 | |
288 | ||
289 | #define SSD_BBT_BASE_REG 0x01C8 | |
290 | #define SSD_ECT_BASE_REG 0x01CC | |
291 | ||
292 | #define SSD_CLEAR_INTR_REG 0x01F0 | |
293 | ||
294 | #define SSD_INIT_STATE_REG_SZ 0x8 | |
295 | #define SSD_INIT_STATE_REG0 0x0200 | |
296 | #define SSD_INIT_STATE_REG1 0x0208 | |
297 | #define SSD_INIT_STATE_REG2 0x0210 | |
298 | #define SSD_INIT_STATE_REG3 0x0218 | |
299 | #define SSD_INIT_STATE_REG4 0x0220 | |
300 | #define SSD_INIT_STATE_REG5 0x0228 | |
301 | #define SSD_INIT_STATE_REG6 0x0230 | |
302 | #define SSD_INIT_STATE_REG7 0x0238 | |
303 | ||
304 | #define SSD_ROM_INFO_REG 0x0600 | |
305 | #define SSD_ROM_BRIDGE_FW_INFO_REG 0x0604 | |
306 | #define SSD_ROM_CTRL_FW_INFO_REG 0x0608 | |
307 | #define SSD_ROM_VP_INFO_REG 0x060C | |
308 | ||
309 | #define SSD_LOG_INFO_REG 0x0610 | |
310 | #define SSD_LED_REG 0x0614 | |
311 | #define SSD_MSG_BASE_REG 0x06F8 | |
312 | ||
313 | /*spi reg */ | |
314 | #define SSD_SPI_REG_CMD 0x0180 | |
315 | #define SSD_SPI_REG_CMD_HI 0x0184 | |
316 | #define SSD_SPI_REG_WDATA 0x0188 | |
317 | #define SSD_SPI_REG_ID 0x0190 | |
318 | #define SSD_SPI_REG_STATUS 0x0198 | |
319 | #define SSD_SPI_REG_RDATA 0x01A0 | |
320 | #define SSD_SPI_REG_READY 0x01A8 | |
321 | ||
322 | /* i2c register */ | |
323 | #define SSD_I2C_CTRL_REG 0x06F0 | |
324 | #define SSD_I2C_RDATA_REG 0x06F4 | |
325 | ||
326 | /* temperature reg */ | |
327 | #define SSD_BRIGE_TEMP_REG 0x0618 | |
328 | ||
329 | #define SSD_CTRL_TEMP_REG0 0x0700 | |
330 | #define SSD_CTRL_TEMP_REG1 0x0708 | |
331 | #define SSD_CTRL_TEMP_REG2 0x0710 | |
332 | #define SSD_CTRL_TEMP_REG3 0x0718 | |
333 | #define SSD_CTRL_TEMP_REG4 0x0720 | |
334 | #define SSD_CTRL_TEMP_REG5 0x0728 | |
335 | #define SSD_CTRL_TEMP_REG6 0x0730 | |
336 | #define SSD_CTRL_TEMP_REG7 0x0738 | |
337 | ||
338 | /* reversion 3 reg */ | |
339 | #define SSD_PROTOCOL_VER_REG 0x01B4 | |
340 | ||
341 | #define SSD_FLUSH_TIMEOUT_REG 0x02A4 | |
342 | #define SSD_BM_FAULT_REG 0x0660 | |
343 | ||
344 | #define SSD_PV3_RAM_STATUS_REG_SZ 0x4 | |
345 | #define SSD_PV3_RAM_STATUS_REG0 0x0260 | |
346 | #define SSD_PV3_RAM_STATUS_REG1 0x0264 | |
347 | #define SSD_PV3_RAM_STATUS_REG2 0x0268 | |
348 | #define SSD_PV3_RAM_STATUS_REG3 0x026C | |
349 | #define SSD_PV3_RAM_STATUS_REG4 0x0270 | |
350 | #define SSD_PV3_RAM_STATUS_REG5 0x0274 | |
351 | #define SSD_PV3_RAM_STATUS_REG6 0x0278 | |
352 | #define SSD_PV3_RAM_STATUS_REG7 0x027C | |
353 | ||
354 | #define SSD_PV3_CHIP_INFO_REG_SZ 0x40 | |
355 | #define SSD_PV3_CHIP_INFO_REG0 0x0300 | |
356 | #define SSD_PV3_CHIP_INFO_REG1 0x0340 | |
357 | #define SSD_PV3_CHIP_INFO_REG2 0x0380 | |
358 | #define SSD_PV3_CHIP_INFO_REG3 0x03B0 | |
359 | #define SSD_PV3_CHIP_INFO_REG4 0x0400 | |
360 | #define SSD_PV3_CHIP_INFO_REG5 0x0440 | |
361 | #define SSD_PV3_CHIP_INFO_REG6 0x0480 | |
362 | #define SSD_PV3_CHIP_INFO_REG7 0x04B0 | |
363 | ||
364 | #define SSD_PV3_INIT_STATE_REG_SZ 0x20 | |
365 | #define SSD_PV3_INIT_STATE_REG0 0x0500 | |
366 | #define SSD_PV3_INIT_STATE_REG1 0x0520 | |
367 | #define SSD_PV3_INIT_STATE_REG2 0x0540 | |
368 | #define SSD_PV3_INIT_STATE_REG3 0x0560 | |
369 | #define SSD_PV3_INIT_STATE_REG4 0x0580 | |
370 | #define SSD_PV3_INIT_STATE_REG5 0x05A0 | |
371 | #define SSD_PV3_INIT_STATE_REG6 0x05C0 | |
372 | #define SSD_PV3_INIT_STATE_REG7 0x05E0 | |
373 | ||
374 | /* reversion 3.1.1 reg */ | |
375 | #define SSD_FULL_RESET_REG 0x01B0 | |
376 | ||
377 | #define SSD_CTRL_REG_ZONE_SZ 0x800 | |
378 | ||
379 | #define SSD_BB_THRESHOLD_L1_REG 0x2C0 | |
380 | #define SSD_BB_THRESHOLD_L2_REG 0x2C4 | |
381 | ||
382 | #define SSD_BB_ACC_REG_SZ 0x4 | |
383 | #define SSD_BB_ACC_REG0 0x21C0 | |
384 | #define SSD_BB_ACC_REG1 0x29C0 | |
385 | #define SSD_BB_ACC_REG2 0x31C0 | |
386 | ||
387 | #define SSD_EC_THRESHOLD_L1_REG 0x2C8 | |
388 | #define SSD_EC_THRESHOLD_L2_REG 0x2CC | |
389 | ||
390 | #define SSD_EC_ACC_REG_SZ 0x4 | |
391 | #define SSD_EC_ACC_REG0 0x21E0 | |
392 | #define SSD_EC_ACC_REG1 0x29E0 | |
393 | #define SSD_EC_ACC_REG2 0x31E0 | |
394 | ||
395 | /* reversion 3.1.2 & 3.1.3 reg */ | |
396 | #define SSD_HW_STATUS_REG 0x02AC | |
397 | ||
398 | #define SSD_PLP_INFO_REG 0x0664 | |
399 | ||
400 | /*reversion 3.2 reg*/ | |
401 | #define SSD_POWER_ON_REG 0x01EC | |
402 | #define SSD_PCIE_LINKSTATUS_REG 0x01F8 | |
403 | #define SSD_PL_CAP_LEARN_REG 0x01FC | |
404 | ||
405 | #define SSD_FPGA_1V0_REG0 0x2070 | |
406 | #define SSD_FPGA_1V8_REG0 0x2078 | |
407 | #define SSD_FPGA_1V0_REG1 0x2870 | |
408 | #define SSD_FPGA_1V8_REG1 0x2878 | |
409 | ||
410 | /*reversion 3.2 reg*/ | |
411 | #define SSD_READ_OT_REG0 0x2260 | |
412 | #define SSD_WRITE_OT_REG0 0x2264 | |
413 | #define SSD_READ_OT_REG1 0x2A60 | |
414 | #define SSD_WRITE_OT_REG1 0x2A64 | |
415 | ||
416 | ||
417 | /* function */ | |
418 | #define SSD_FUNC_READ 0x01 | |
419 | #define SSD_FUNC_WRITE 0x02 | |
420 | #define SSD_FUNC_NAND_READ_WOOB 0x03 | |
421 | #define SSD_FUNC_NAND_READ 0x04 | |
422 | #define SSD_FUNC_NAND_WRITE 0x05 | |
423 | #define SSD_FUNC_NAND_ERASE 0x06 | |
424 | #define SSD_FUNC_NAND_READ_ID 0x07 | |
425 | #define SSD_FUNC_READ_LOG 0x08 | |
426 | #define SSD_FUNC_TRIM 0x09 | |
427 | #define SSD_FUNC_RAM_READ 0x10 | |
428 | #define SSD_FUNC_RAM_WRITE 0x11 | |
429 | #define SSD_FUNC_FLUSH 0x12 //cache / bbt | |
430 | ||
431 | /* spi function */ | |
432 | #define SSD_SPI_CMD_PROGRAM 0x02 | |
433 | #define SSD_SPI_CMD_READ 0x03 | |
434 | #define SSD_SPI_CMD_W_DISABLE 0x04 | |
435 | #define SSD_SPI_CMD_READ_STATUS 0x05 | |
436 | #define SSD_SPI_CMD_W_ENABLE 0x06 | |
437 | #define SSD_SPI_CMD_ERASE 0xd8 | |
438 | #define SSD_SPI_CMD_CLSR 0x30 | |
439 | #define SSD_SPI_CMD_READ_ID 0x9f | |
440 | ||
441 | /* i2c */ | |
442 | #define SSD_I2C_CTRL_READ 0x00 | |
443 | #define SSD_I2C_CTRL_WRITE 0x01 | |
444 | ||
445 | /* i2c internal register */ | |
446 | #define SSD_I2C_CFG_REG 0x00 | |
447 | #define SSD_I2C_DATA_REG 0x01 | |
448 | #define SSD_I2C_CMD_REG 0x02 | |
449 | #define SSD_I2C_STATUS_REG 0x03 | |
450 | #define SSD_I2C_SADDR_REG 0x04 | |
451 | #define SSD_I2C_LEN_REG 0x05 | |
452 | #define SSD_I2C_RLEN_REG 0x06 | |
453 | #define SSD_I2C_WLEN_REG 0x07 | |
454 | #define SSD_I2C_RESET_REG 0x08 //write for reset | |
455 | #define SSD_I2C_PRER_REG 0x09 | |
456 | ||
457 | ||
458 | /* hw mon */ | |
459 | /* FPGA volt = ADC_value / 4096 * 3v */ | |
460 | #define SSD_FPGA_1V0_ADC_MIN 1228 // 0.9v | |
461 | #define SSD_FPGA_1V0_ADC_MAX 1502 // 1.1v | |
462 | #define SSD_FPGA_1V8_ADC_MIN 2211 // 1.62v | |
463 | #define SSD_FPGA_1V8_ADC_MAX 2703 // 1.98 | |
464 | ||
465 | /* ADC value */ | |
466 | #define SSD_FPGA_VOLT_MAX(val) (((val) & 0xffff) >> 4) | |
467 | #define SSD_FPGA_VOLT_MIN(val) (((val >> 16) & 0xffff) >> 4) | |
468 | #define SSD_FPGA_VOLT_CUR(val) (((val >> 32) & 0xffff) >> 4) | |
469 | #define SSD_FPGA_VOLT(val) ((val * 3000) >> 12) | |
470 | ||
471 | #define SSD_VOLT_LOG_DATA(idx, ctrl, volt) (((uint32_t)idx << 24) | ((uint32_t)ctrl << 16) | ((uint32_t)volt)) | |
472 | ||
473 | enum ssd_fpga_volt | |
474 | { | |
475 | SSD_FPGA_1V0 = 0, | |
476 | SSD_FPGA_1V8, | |
477 | SSD_FPGA_VOLT_NR | |
478 | }; | |
479 | ||
480 | enum ssd_clock | |
481 | { | |
482 | SSD_CLOCK_166M_LOST = 0, | |
483 | SSD_CLOCK_166M_SKEW, | |
484 | SSD_CLOCK_156M_LOST, | |
485 | SSD_CLOCK_156M_SKEW, | |
486 | SSD_CLOCK_NR | |
487 | }; | |
488 | ||
489 | /* sensor */ | |
490 | #define SSD_SENSOR_LM75_SADDRESS (0x49 << 1) | |
491 | #define SSD_SENSOR_LM80_SADDRESS (0x28 << 1) | |
492 | ||
493 | #define SSD_SENSOR_CONVERT_TEMP(val) ((int)(val >> 8)) | |
494 | ||
495 | #define SSD_INLET_OT_TEMP (55) //55 DegC | |
496 | #define SSD_INLET_OT_HYST (50) //50 DegC | |
497 | #define SSD_FLASH_OT_TEMP (70) //70 DegC | |
498 | #define SSD_FLASH_OT_HYST (65) //65 DegC | |
499 | ||
500 | enum ssd_sensor | |
501 | { | |
502 | SSD_SENSOR_LM80 = 0, | |
503 | SSD_SENSOR_LM75, | |
504 | SSD_SENSOR_NR | |
505 | }; | |
506 | ||
507 | ||
508 | /* lm75 */ | |
509 | enum ssd_lm75_reg | |
510 | { | |
511 | SSD_LM75_REG_TEMP = 0, | |
512 | SSD_LM75_REG_CONF, | |
513 | SSD_LM75_REG_THYST, | |
514 | SSD_LM75_REG_TOS | |
515 | }; | |
516 | ||
517 | /* lm96080 */ | |
518 | #define SSD_LM80_REG_IN_MAX(nr) (0x2a + (nr) * 2) | |
519 | #define SSD_LM80_REG_IN_MIN(nr) (0x2b + (nr) * 2) | |
520 | #define SSD_LM80_REG_IN(nr) (0x20 + (nr)) | |
521 | ||
522 | #define SSD_LM80_REG_FAN1 0x28 | |
523 | #define SSD_LM80_REG_FAN2 0x29 | |
524 | #define SSD_LM80_REG_FAN_MIN(nr) (0x3b + (nr)) | |
525 | ||
526 | #define SSD_LM80_REG_TEMP 0x27 | |
527 | #define SSD_LM80_REG_TEMP_HOT_MAX 0x38 | |
528 | #define SSD_LM80_REG_TEMP_HOT_HYST 0x39 | |
529 | #define SSD_LM80_REG_TEMP_OS_MAX 0x3a | |
530 | #define SSD_LM80_REG_TEMP_OS_HYST 0x3b | |
531 | ||
532 | #define SSD_LM80_REG_CONFIG 0x00 | |
533 | #define SSD_LM80_REG_ALARM1 0x01 | |
534 | #define SSD_LM80_REG_ALARM2 0x02 | |
535 | #define SSD_LM80_REG_MASK1 0x03 | |
536 | #define SSD_LM80_REG_MASK2 0x04 | |
537 | #define SSD_LM80_REG_FANDIV 0x05 | |
538 | #define SSD_LM80_REG_RES 0x06 | |
539 | ||
540 | #define SSD_LM80_CONVERT_VOLT(val) ((val * 10) >> 8) | |
541 | ||
542 | #define SSD_LM80_3V3_VOLT(val) ((val)*33/19) | |
543 | ||
544 | #define SSD_LM80_CONV_INTERVAL (1000) | |
545 | ||
546 | enum ssd_lm80_in | |
547 | { | |
548 | SSD_LM80_IN_CAP = 0, | |
549 | SSD_LM80_IN_1V2, | |
550 | SSD_LM80_IN_1V2a, | |
551 | SSD_LM80_IN_1V5, | |
552 | SSD_LM80_IN_1V8, | |
553 | SSD_LM80_IN_FPGA_3V3, | |
554 | SSD_LM80_IN_3V3, | |
555 | SSD_LM80_IN_NR | |
556 | }; | |
557 | ||
558 | struct ssd_lm80_limit | |
559 | { | |
560 | uint8_t low; | |
561 | uint8_t high; | |
562 | }; | |
563 | ||
564 | /* +/- 5% except cap in*/ | |
565 | static struct ssd_lm80_limit ssd_lm80_limit[SSD_LM80_IN_NR] = { | |
566 | {171, 217}, /* CAP in: 1710 ~ 2170 */ | |
567 | {114, 126}, | |
568 | {114, 126}, | |
569 | {142, 158}, | |
570 | {171, 189}, | |
571 | {180, 200}, | |
572 | {180, 200}, | |
573 | }; | |
574 | ||
575 | /* temperature sensors */ | |
576 | enum ssd_temp_sensor | |
577 | { | |
578 | SSD_TEMP_INLET = 0, | |
579 | SSD_TEMP_FLASH, | |
580 | SSD_TEMP_CTRL, | |
581 | SSD_TEMP_NR | |
582 | }; | |
583 | ||
584 | ||
585 | #ifdef SSD_OT_PROTECT | |
586 | #define SSD_OT_DELAY (60) //ms | |
587 | ||
588 | #define SSD_OT_TEMP (90) //90 DegC | |
589 | ||
590 | #define SSD_OT_TEMP_HYST (85) //85 DegC | |
591 | #endif | |
592 | ||
593 | /* fpga temperature */ | |
594 | //#define CONVERT_TEMP(val) ((float)(val)*503.975f/4096.0f-273.15f) | |
595 | #define CONVERT_TEMP(val) ((val)*504/4096-273) | |
596 | ||
597 | #define MAX_TEMP(val) CONVERT_TEMP(((val & 0xffff) >> 4)) | |
598 | #define MIN_TEMP(val) CONVERT_TEMP((((val>>16) & 0xffff) >> 4)) | |
599 | #define CUR_TEMP(val) CONVERT_TEMP((((val>>32) & 0xffff) >> 4)) | |
600 | ||
601 | ||
602 | /* CAP monitor */ | |
603 | #define SSD_PL_CAP_U1 SSD_LM80_REG_IN(SSD_LM80_IN_CAP) | |
604 | #define SSD_PL_CAP_U2 SSD_LM80_REG_IN(SSD_LM80_IN_1V8) | |
605 | #define SSD_PL_CAP_LEARN(u1, u2, t) ((t*(u1+u2))/(2*162*(u1-u2))) | |
606 | #define SSD_PL_CAP_LEARN_WAIT (20) //20ms | |
607 | #define SSD_PL_CAP_LEARN_MAX_WAIT (1000/SSD_PL_CAP_LEARN_WAIT) //1s | |
608 | ||
609 | #define SSD_PL_CAP_CHARGE_WAIT (1000) | |
610 | #define SSD_PL_CAP_CHARGE_MAX_WAIT ((120*1000)/SSD_PL_CAP_CHARGE_WAIT) //120s | |
611 | ||
612 | #define SSD_PL_CAP_VOLT(val) (val*7) | |
613 | ||
614 | #define SSD_PL_CAP_VOLT_FULL (13700) | |
615 | #define SSD_PL_CAP_VOLT_READY (12880) | |
616 | ||
617 | #define SSD_PL_CAP_THRESHOLD (8900) | |
618 | #define SSD_PL_CAP_CP_THRESHOLD (5800) | |
619 | #define SSD_PL_CAP_THRESHOLD_HYST (100) | |
620 | ||
621 | enum ssd_pl_cap_status | |
622 | { | |
623 | SSD_PL_CAP = 0, | |
624 | SSD_PL_CAP_NR | |
625 | }; | |
626 | ||
627 | enum ssd_pl_cap_type | |
628 | { | |
629 | SSD_PL_CAP_DEFAULT = 0, /* 4 cap */ | |
630 | SSD_PL_CAP_CP /* 3 cap */ | |
631 | }; | |
632 | ||
633 | ||
634 | /* hwmon offset */ | |
635 | #define SSD_HWMON_OFFS_TEMP (0) | |
636 | #define SSD_HWMON_OFFS_SENSOR (SSD_HWMON_OFFS_TEMP + SSD_TEMP_NR) | |
637 | #define SSD_HWMON_OFFS_PL_CAP (SSD_HWMON_OFFS_SENSOR + SSD_SENSOR_NR) | |
638 | #define SSD_HWMON_OFFS_LM80 (SSD_HWMON_OFFS_PL_CAP + SSD_PL_CAP_NR) | |
639 | #define SSD_HWMON_OFFS_CLOCK (SSD_HWMON_OFFS_LM80 + SSD_LM80_IN_NR) | |
640 | #define SSD_HWMON_OFFS_FPGA (SSD_HWMON_OFFS_CLOCK + SSD_CLOCK_NR) | |
641 | ||
642 | #define SSD_HWMON_TEMP(idx) (SSD_HWMON_OFFS_TEMP + idx) | |
643 | #define SSD_HWMON_SENSOR(idx) (SSD_HWMON_OFFS_SENSOR + idx) | |
644 | #define SSD_HWMON_PL_CAP(idx) (SSD_HWMON_OFFS_PL_CAP + idx) | |
645 | #define SSD_HWMON_LM80(idx) (SSD_HWMON_OFFS_LM80 + idx) | |
646 | #define SSD_HWMON_CLOCK(idx) (SSD_HWMON_OFFS_CLOCK + idx) | |
647 | #define SSD_HWMON_FPGA(ctrl, idx) (SSD_HWMON_OFFS_FPGA + (ctrl * SSD_FPGA_VOLT_NR) + idx) | |
648 | ||
649 | ||
650 | ||
651 | /* fifo */ | |
652 | typedef struct sfifo | |
653 | { | |
654 | uint32_t in; | |
655 | uint32_t out; | |
656 | uint32_t size; | |
657 | uint32_t esize; | |
658 | uint32_t mask; | |
659 | spinlock_t lock; | |
660 | void *data; | |
661 | } sfifo_t; | |
662 | ||
663 | static int sfifo_alloc(struct sfifo *fifo, uint32_t size, uint32_t esize) | |
664 | { | |
665 | uint32_t __size = 1; | |
666 | ||
667 | if (!fifo || size > INT_MAX || esize == 0) { | |
668 | return -EINVAL; | |
669 | } | |
670 | ||
671 | while (__size < size) __size <<= 1; | |
672 | ||
673 | if (__size < 2) { | |
674 | return -EINVAL; | |
675 | } | |
676 | ||
677 | fifo->data = vmalloc(esize * __size); | |
678 | if (!fifo->data) { | |
679 | return -ENOMEM; | |
680 | } | |
681 | ||
682 | fifo->in = 0; | |
683 | fifo->out = 0; | |
684 | fifo->mask = __size - 1; | |
685 | fifo->size = __size; | |
686 | fifo->esize = esize; | |
687 | spin_lock_init(&fifo->lock); | |
688 | ||
689 | return 0; | |
690 | } | |
691 | ||
692 | static void sfifo_free(struct sfifo *fifo) | |
693 | { | |
694 | if (!fifo) { | |
695 | return; | |
696 | } | |
697 | ||
698 | vfree(fifo->data); | |
699 | fifo->data = NULL; | |
700 | fifo->in = 0; | |
701 | fifo->out = 0; | |
702 | fifo->mask = 0; | |
703 | fifo->size = 0; | |
704 | fifo->esize = 0; | |
705 | } | |
706 | ||
707 | static int __sfifo_put(struct sfifo *fifo, void *val) | |
708 | { | |
709 | if (((fifo->in + 1) & fifo->mask) == fifo->out) { | |
710 | return -1; | |
711 | } | |
712 | ||
713 | memcpy((fifo->data + (fifo->in * fifo->esize)), val, fifo->esize); | |
714 | fifo->in = (fifo->in + 1) & fifo->mask; | |
715 | ||
716 | return 0; | |
717 | } | |
718 | ||
719 | static int sfifo_put(struct sfifo *fifo, void *val) | |
720 | { | |
721 | int ret = 0; | |
722 | ||
723 | if (!fifo || !val) { | |
724 | return -EINVAL; | |
725 | } | |
726 | ||
727 | if (!in_interrupt()) { | |
728 | spin_lock_irq(&fifo->lock); | |
729 | ret = __sfifo_put(fifo, val); | |
730 | spin_unlock_irq(&fifo->lock); | |
731 | } else { | |
732 | spin_lock(&fifo->lock); | |
733 | ret = __sfifo_put(fifo, val); | |
734 | spin_unlock(&fifo->lock); | |
735 | } | |
736 | ||
737 | return ret; | |
738 | } | |
739 | ||
740 | static int __sfifo_get(struct sfifo *fifo, void *val) | |
741 | { | |
742 | if (fifo->out == fifo->in) { | |
743 | return -1; | |
744 | } | |
745 | ||
746 | memcpy(val, (fifo->data + (fifo->out * fifo->esize)), fifo->esize); | |
747 | fifo->out = (fifo->out + 1) & fifo->mask; | |
748 | ||
749 | return 0; | |
750 | } | |
751 | ||
752 | static int sfifo_get(struct sfifo *fifo, void *val) | |
753 | { | |
754 | int ret = 0; | |
755 | ||
756 | if (!fifo || !val) { | |
757 | return -EINVAL; | |
758 | } | |
759 | ||
760 | if (!in_interrupt()) { | |
761 | spin_lock_irq(&fifo->lock); | |
762 | ret = __sfifo_get(fifo, val); | |
763 | spin_unlock_irq(&fifo->lock); | |
764 | } else { | |
765 | spin_lock(&fifo->lock); | |
766 | ret = __sfifo_get(fifo, val); | |
767 | spin_unlock(&fifo->lock); | |
768 | } | |
769 | ||
770 | return ret; | |
771 | } | |
772 | ||
773 | /* bio list */ | |
774 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30)) | |
775 | struct ssd_blist { | |
776 | struct bio *prev; | |
777 | struct bio *next; | |
778 | }; | |
779 | ||
780 | static inline void ssd_blist_init(struct ssd_blist *ssd_bl) | |
781 | { | |
782 | ssd_bl->prev = NULL; | |
783 | ssd_bl->next = NULL; | |
784 | } | |
785 | ||
786 | static inline struct bio *ssd_blist_get(struct ssd_blist *ssd_bl) | |
787 | { | |
788 | struct bio *bio = ssd_bl->prev; | |
789 | ||
790 | ssd_bl->prev = NULL; | |
791 | ssd_bl->next = NULL; | |
792 | ||
793 | return bio; | |
794 | } | |
795 | ||
796 | static inline void ssd_blist_add(struct ssd_blist *ssd_bl, struct bio *bio) | |
797 | { | |
798 | bio->bi_next = NULL; | |
799 | ||
800 | if (ssd_bl->next) { | |
801 | ssd_bl->next->bi_next = bio; | |
802 | } else { | |
803 | ssd_bl->prev = bio; | |
804 | } | |
805 | ||
806 | ssd_bl->next = bio; | |
807 | } | |
808 | ||
809 | #else | |
810 | #define ssd_blist bio_list | |
811 | #define ssd_blist_init bio_list_init | |
812 | #define ssd_blist_get bio_list_get | |
813 | #define ssd_blist_add bio_list_add | |
814 | #endif | |
815 | ||
816 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,14,0)) | |
817 | #define bio_start(bio) (bio->bi_sector) | |
818 | #else | |
819 | #define bio_start(bio) (bio->bi_iter.bi_sector) | |
820 | #endif | |
821 | ||
822 | /* mutex */ | |
823 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16)) | |
824 | #define mutex_lock down | |
825 | #define mutex_unlock up | |
826 | #define mutex semaphore | |
827 | #define mutex_init init_MUTEX | |
828 | #endif | |
829 | ||
830 | /* i2c */ | |
831 | typedef union ssd_i2c_ctrl { | |
832 | uint32_t val; | |
833 | struct { | |
834 | uint8_t wdata; | |
835 | uint8_t addr; | |
836 | uint16_t rw:1; | |
837 | uint16_t pad:15; | |
838 | } bits; | |
839 | }__attribute__((packed)) ssd_i2c_ctrl_t; | |
840 | ||
841 | typedef union ssd_i2c_data { | |
842 | uint32_t val; | |
843 | struct { | |
844 | uint32_t rdata:8; | |
845 | uint32_t valid:1; | |
846 | uint32_t pad:23; | |
847 | } bits; | |
848 | }__attribute__((packed)) ssd_i2c_data_t; | |
849 | ||
850 | /* write mode */ | |
851 | enum ssd_write_mode | |
852 | { | |
853 | SSD_WMODE_BUFFER = 0, | |
854 | SSD_WMODE_BUFFER_EX, | |
855 | SSD_WMODE_FUA, | |
856 | /* dummy */ | |
857 | SSD_WMODE_AUTO, | |
858 | SSD_WMODE_DEFAULT | |
859 | }; | |
860 | ||
861 | /* reset type */ | |
862 | enum ssd_reset_type | |
863 | { | |
864 | SSD_RST_NOINIT = 0, | |
865 | SSD_RST_NORMAL, | |
866 | SSD_RST_FULL | |
867 | }; | |
868 | ||
869 | /* ssd msg */ | |
870 | typedef struct ssd_sg_entry | |
871 | { | |
872 | uint64_t block:48; | |
873 | uint64_t length:16; | |
874 | uint64_t buf; | |
875 | }__attribute__((packed))ssd_sg_entry_t; | |
876 | ||
877 | typedef struct ssd_rw_msg | |
878 | { | |
879 | uint8_t tag; | |
880 | uint8_t flag; | |
881 | uint8_t nsegs; | |
882 | uint8_t fun; | |
883 | uint32_t reserved; //for 64-bit align | |
884 | struct ssd_sg_entry sge[1]; //base | |
885 | }__attribute__((packed))ssd_rw_msg_t; | |
886 | ||
887 | typedef struct ssd_resp_msg | |
888 | { | |
889 | uint8_t tag; | |
890 | uint8_t status:2; | |
891 | uint8_t bitflip:6; | |
892 | uint8_t log; | |
893 | uint8_t fun; | |
894 | uint32_t reserved; | |
895 | }__attribute__((packed))ssd_resp_msg_t; | |
896 | ||
897 | typedef struct ssd_flush_msg | |
898 | { | |
899 | uint8_t tag; | |
900 | uint8_t flag:2; //flash cache 0 or bbt 1 | |
901 | uint8_t flash:6; | |
902 | uint8_t ctrl_idx; | |
903 | uint8_t fun; | |
904 | uint32_t reserved; //align | |
905 | }__attribute__((packed))ssd_flush_msg_t; | |
906 | ||
907 | typedef struct ssd_nand_op_msg | |
908 | { | |
909 | uint8_t tag; | |
910 | uint8_t flag; | |
911 | uint8_t ctrl_idx; | |
912 | uint8_t fun; | |
913 | uint32_t reserved; //align | |
914 | uint16_t page_count; | |
915 | uint8_t chip_ce; | |
916 | uint8_t chip_no; | |
917 | uint32_t page_no; | |
918 | uint64_t buf; | |
919 | }__attribute__((packed))ssd_nand_op_msg_t; | |
920 | ||
921 | typedef struct ssd_ram_op_msg | |
922 | { | |
923 | uint8_t tag; | |
924 | uint8_t flag; | |
925 | uint8_t ctrl_idx; | |
926 | uint8_t fun; | |
927 | uint32_t reserved; //align | |
928 | uint32_t start; | |
929 | uint32_t length; | |
930 | uint64_t buf; | |
931 | }__attribute__((packed))ssd_ram_op_msg_t; | |
932 | ||
933 | ||
934 | /* log msg */ | |
935 | typedef struct ssd_log_msg | |
936 | { | |
937 | uint8_t tag; | |
938 | uint8_t flag; | |
939 | uint8_t ctrl_idx; | |
940 | uint8_t fun; | |
941 | uint32_t reserved; //align | |
942 | uint64_t buf; | |
943 | }__attribute__((packed))ssd_log_msg_t; | |
944 | ||
945 | typedef struct ssd_log_op_msg | |
946 | { | |
947 | uint8_t tag; | |
948 | uint8_t flag; | |
949 | uint8_t ctrl_idx; | |
950 | uint8_t fun; | |
951 | uint32_t reserved; //align | |
952 | uint64_t reserved1; //align | |
953 | uint64_t buf; | |
954 | }__attribute__((packed))ssd_log_op_msg_t; | |
955 | ||
956 | typedef struct ssd_log_resp_msg | |
957 | { | |
958 | uint8_t tag; | |
959 | uint16_t status :2; | |
960 | uint16_t reserved1 :2; //align with the normal resp msg | |
961 | uint16_t nr_log :12; | |
962 | uint8_t fun; | |
963 | uint32_t reserved; | |
964 | }__attribute__((packed))ssd_log_resp_msg_t; | |
965 | ||
966 | ||
967 | /* resp msg */ | |
968 | typedef union ssd_response_msq | |
969 | { | |
970 | ssd_resp_msg_t resp_msg; | |
971 | ssd_log_resp_msg_t log_resp_msg; | |
972 | uint64_t u64_msg; | |
973 | uint32_t u32_msg[2]; | |
974 | } ssd_response_msq_t; | |
975 | ||
976 | ||
977 | /* custom struct */ | |
978 | typedef struct ssd_protocol_info | |
979 | { | |
980 | uint32_t ver; | |
981 | uint32_t init_state_reg; | |
982 | uint32_t init_state_reg_sz; | |
983 | uint32_t chip_info_reg; | |
984 | uint32_t chip_info_reg_sz; | |
985 | } ssd_protocol_info_t; | |
986 | ||
987 | typedef struct ssd_hw_info | |
988 | { | |
989 | uint32_t bridge_ver; | |
990 | uint32_t ctrl_ver; | |
991 | ||
992 | uint32_t cmd_fifo_sz; | |
993 | uint32_t cmd_fifo_sz_mask; | |
994 | uint32_t cmd_max_sg; | |
995 | uint32_t sg_max_sec; | |
996 | uint32_t resp_ptr_sz; | |
997 | uint32_t resp_msg_sz; | |
998 | ||
999 | uint16_t nr_ctrl; | |
1000 | ||
1001 | uint16_t nr_data_ch; | |
1002 | uint16_t nr_ch; | |
1003 | uint16_t max_ch; | |
1004 | uint16_t nr_chip; | |
1005 | ||
1006 | uint8_t pcb_ver; | |
1007 | uint8_t upper_pcb_ver; | |
1008 | ||
1009 | uint8_t nand_vendor_id; | |
1010 | uint8_t nand_dev_id; | |
1011 | ||
1012 | uint8_t max_ce; | |
1013 | uint8_t id_size; | |
1014 | uint16_t oob_size; | |
1015 | ||
1016 | uint16_t bbf_pages; | |
1017 | uint16_t bbf_seek; // | |
1018 | ||
1019 | uint16_t page_count; //per block | |
1020 | uint32_t page_size; | |
1021 | uint32_t block_count; //per flash | |
1022 | ||
1023 | uint64_t ram_size; | |
1024 | uint32_t ram_align; | |
1025 | uint32_t ram_max_len; | |
1026 | ||
1027 | uint64_t bbt_base; | |
1028 | uint32_t bbt_size; | |
1029 | uint64_t md_base; //metadata | |
1030 | uint32_t md_size; | |
1031 | uint32_t md_entry_sz; | |
1032 | ||
1033 | uint32_t log_sz; | |
1034 | ||
1035 | uint64_t nand_wbuff_base; | |
1036 | ||
1037 | uint32_t md_reserved_blks; | |
1038 | uint32_t reserved_blks; | |
1039 | uint32_t valid_pages; | |
1040 | uint32_t max_valid_pages; | |
1041 | uint64_t size; | |
1042 | } ssd_hw_info_t; | |
1043 | ||
1044 | typedef struct ssd_hw_info_extend | |
1045 | { | |
1046 | uint8_t board_type; | |
1047 | uint8_t cap_type; | |
1048 | uint8_t plp_type; | |
1049 | uint8_t work_mode; | |
1050 | uint8_t form_factor; | |
1051 | ||
1052 | uint8_t pad[59]; | |
1053 | }ssd_hw_info_extend_t; | |
1054 | ||
1055 | typedef struct ssd_rom_info | |
1056 | { | |
1057 | uint32_t size; | |
1058 | uint32_t block_size; | |
1059 | uint16_t page_size; | |
1060 | uint8_t nr_bridge_fw; | |
1061 | uint8_t nr_ctrl_fw; | |
1062 | uint8_t nr_bm_fw; | |
1063 | uint8_t nr_smart; | |
1064 | uint32_t bridge_fw_base; | |
1065 | uint32_t bridge_fw_sz; | |
1066 | uint32_t ctrl_fw_base; | |
1067 | uint32_t ctrl_fw_sz; | |
1068 | uint32_t bm_fw_base; | |
1069 | uint32_t bm_fw_sz; | |
1070 | uint32_t log_base; | |
1071 | uint32_t log_sz; | |
1072 | uint32_t smart_base; | |
1073 | uint32_t smart_sz; | |
1074 | uint32_t vp_base; | |
1075 | uint32_t label_base; | |
1076 | } ssd_rom_info_t; | |
1077 | ||
1078 | /* debug info */ | |
1079 | enum ssd_debug_type | |
1080 | { | |
1081 | SSD_DEBUG_NONE = 0, | |
1082 | SSD_DEBUG_READ_ERR, | |
1083 | SSD_DEBUG_WRITE_ERR, | |
1084 | SSD_DEBUG_RW_ERR, | |
1085 | SSD_DEBUG_READ_TO, | |
1086 | SSD_DEBUG_WRITE_TO, | |
1087 | SSD_DEBUG_RW_TO, | |
1088 | SSD_DEBUG_LOG, | |
1089 | SSD_DEBUG_OFFLINE, | |
1090 | SSD_DEBUG_NR | |
1091 | }; | |
1092 | ||
1093 | typedef struct ssd_debug_info | |
1094 | { | |
1095 | int type; | |
1096 | union { | |
1097 | struct { | |
1098 | uint64_t off; | |
1099 | uint32_t len; | |
1100 | } loc; | |
1101 | struct { | |
1102 | int event; | |
1103 | uint32_t extra; | |
1104 | } log; | |
1105 | } data; | |
1106 | }ssd_debug_info_t; | |
1107 | ||
1108 | /* label */ | |
1109 | #define SSD_LABEL_FIELD_SZ 32 | |
1110 | #define SSD_SN_SZ 16 | |
1111 | ||
1112 | typedef struct ssd_label | |
1113 | { | |
1114 | char date[SSD_LABEL_FIELD_SZ]; | |
1115 | char sn[SSD_LABEL_FIELD_SZ]; | |
1116 | char part[SSD_LABEL_FIELD_SZ]; | |
1117 | char desc[SSD_LABEL_FIELD_SZ]; | |
1118 | char other[SSD_LABEL_FIELD_SZ]; | |
1119 | char maf[SSD_LABEL_FIELD_SZ]; | |
1120 | } ssd_label_t; | |
1121 | ||
1122 | #define SSD_LABEL_DESC_SZ 256 | |
1123 | ||
1124 | typedef struct ssd_labelv3 | |
1125 | { | |
1126 | char boardtype[SSD_LABEL_FIELD_SZ]; | |
1127 | char barcode[SSD_LABEL_FIELD_SZ]; | |
1128 | char item[SSD_LABEL_FIELD_SZ]; | |
1129 | char description[SSD_LABEL_DESC_SZ]; | |
1130 | char manufactured[SSD_LABEL_FIELD_SZ]; | |
1131 | char vendorname[SSD_LABEL_FIELD_SZ]; | |
1132 | char issuenumber[SSD_LABEL_FIELD_SZ]; | |
1133 | char cleicode[SSD_LABEL_FIELD_SZ]; | |
1134 | char bom[SSD_LABEL_FIELD_SZ]; | |
1135 | } ssd_labelv3_t; | |
1136 | ||
1137 | /* battery */ | |
1138 | typedef struct ssd_battery_info | |
1139 | { | |
1140 | uint32_t fw_ver; | |
1141 | } ssd_battery_info_t; | |
1142 | ||
1143 | /* ssd power stat */ | |
1144 | typedef struct ssd_power_stat | |
1145 | { | |
1146 | uint64_t nr_poweron; | |
1147 | uint64_t nr_powerloss; | |
1148 | uint64_t init_failed; | |
1149 | } ssd_power_stat_t; | |
1150 | ||
1151 | /* io stat */ | |
1152 | typedef struct ssd_io_stat | |
1153 | { | |
1154 | uint64_t run_time; | |
1155 | uint64_t nr_to; | |
1156 | uint64_t nr_ioerr; | |
1157 | uint64_t nr_rwerr; | |
1158 | uint64_t nr_read; | |
1159 | uint64_t nr_write; | |
1160 | uint64_t rsectors; | |
1161 | uint64_t wsectors; | |
1162 | } ssd_io_stat_t; | |
1163 | ||
1164 | /* ecc */ | |
1165 | typedef struct ssd_ecc_info | |
1166 | { | |
1167 | uint64_t bitflip[SSD_ECC_MAX_FLIP]; | |
1168 | } ssd_ecc_info_t; | |
1169 | ||
1170 | /* log */ | |
1171 | enum ssd_log_level | |
1172 | { | |
1173 | SSD_LOG_LEVEL_INFO = 0, | |
1174 | SSD_LOG_LEVEL_NOTICE, | |
1175 | SSD_LOG_LEVEL_WARNING, | |
1176 | SSD_LOG_LEVEL_ERR, | |
1177 | SSD_LOG_NR_LEVEL | |
1178 | }; | |
1179 | ||
1180 | typedef struct ssd_log_info | |
1181 | { | |
1182 | uint64_t nr_log; | |
1183 | uint64_t stat[SSD_LOG_NR_LEVEL]; | |
1184 | } ssd_log_info_t; | |
1185 | ||
1186 | /* S.M.A.R.T. */ | |
1187 | #define SSD_SMART_MAGIC (0x5452414D53445353ull) | |
1188 | ||
1189 | typedef struct ssd_smart | |
1190 | { | |
1191 | struct ssd_power_stat pstat; | |
1192 | struct ssd_io_stat io_stat; | |
1193 | struct ssd_ecc_info ecc_info; | |
1194 | struct ssd_log_info log_info; | |
1195 | uint64_t version; | |
1196 | uint64_t magic; | |
1197 | } ssd_smart_t; | |
1198 | ||
1199 | /* internal log */ | |
1200 | typedef struct ssd_internal_log | |
1201 | { | |
1202 | uint32_t nr_log; | |
1203 | void *log; | |
1204 | } ssd_internal_log_t; | |
1205 | ||
1206 | /* ssd cmd */ | |
1207 | typedef struct ssd_cmd | |
1208 | { | |
1209 | struct bio *bio; | |
1210 | struct scatterlist *sgl; | |
1211 | struct list_head list; | |
1212 | void *dev; | |
1213 | int nsegs; | |
1214 | int flag; /*pbio(1) or bio(0)*/ | |
1215 | ||
1216 | int tag; | |
1217 | void *msg; | |
1218 | dma_addr_t msg_dma; | |
1219 | ||
1220 | unsigned long start_time; | |
1221 | ||
1222 | int errors; | |
1223 | unsigned int nr_log; | |
1224 | ||
1225 | struct timer_list cmd_timer; | |
1226 | struct completion *waiting; | |
1227 | } ssd_cmd_t; | |
1228 | ||
1229 | typedef void (*send_cmd_func)(struct ssd_cmd *); | |
1230 | typedef int (*ssd_event_call)(struct gendisk *, int, int); /* gendisk, event id, event level */ | |
1231 | ||
1232 | /* dcmd sz */ | |
1233 | #define SSD_DCMD_MAX_SZ 32 | |
1234 | ||
1235 | typedef struct ssd_dcmd | |
1236 | { | |
1237 | struct list_head list; | |
1238 | void *dev; | |
1239 | uint8_t msg[SSD_DCMD_MAX_SZ]; | |
1240 | } ssd_dcmd_t; | |
1241 | ||
1242 | ||
1243 | enum ssd_state { | |
1244 | SSD_INIT_WORKQ, | |
1245 | SSD_INIT_BD, | |
1246 | SSD_ONLINE, | |
1247 | /* full reset */ | |
1248 | SSD_RESETING, | |
1249 | /* hw log */ | |
1250 | SSD_LOG_HW, | |
1251 | /* log err */ | |
1252 | SSD_LOG_ERR, | |
1253 | }; | |
1254 | ||
1255 | #define SSD_QUEUE_NAME_LEN 16 | |
1256 | typedef struct ssd_queue { | |
1257 | char name[SSD_QUEUE_NAME_LEN]; | |
1258 | void *dev; | |
1259 | ||
1260 | int idx; | |
1261 | ||
1262 | uint32_t resp_idx; | |
1263 | uint32_t resp_idx_mask; | |
1264 | uint32_t resp_msg_sz; | |
1265 | ||
1266 | void *resp_msg; | |
1267 | void *resp_ptr; | |
1268 | ||
1269 | struct ssd_cmd *cmd; | |
1270 | ||
1271 | struct ssd_io_stat io_stat; | |
1272 | struct ssd_ecc_info ecc_info; | |
1273 | } ssd_queue_t; | |
1274 | ||
1275 | typedef struct ssd_device { | |
1276 | char name[SSD_DEV_NAME_LEN]; | |
1277 | ||
1278 | int idx; | |
1279 | int major; | |
1280 | int readonly; | |
1281 | ||
1282 | int int_mode; | |
1283 | #ifdef SSD_ESCAPE_IRQ | |
1284 | int irq_cpu; | |
1285 | #endif | |
1286 | ||
1287 | int reload_fw; | |
1288 | ||
1289 | int ot_delay; //in ms | |
1290 | ||
1291 | atomic_t refcnt; | |
1292 | atomic_t tocnt; | |
1293 | atomic_t in_flight[2]; //r&w | |
1294 | ||
1295 | uint64_t uptime; | |
1296 | ||
1297 | struct list_head list; | |
1298 | struct pci_dev *pdev; | |
1299 | ||
1300 | unsigned long mmio_base; | |
1301 | unsigned long mmio_len; | |
1302 | void __iomem *ctrlp; | |
1303 | ||
1304 | struct mutex spi_mutex; | |
1305 | struct mutex i2c_mutex; | |
1306 | ||
1307 | struct ssd_protocol_info protocol_info; | |
1308 | struct ssd_hw_info hw_info; | |
1309 | struct ssd_rom_info rom_info; | |
1310 | struct ssd_label label; | |
1311 | ||
1312 | struct ssd_smart smart; | |
1313 | ||
1314 | atomic_t in_sendq; | |
1315 | spinlock_t sendq_lock; | |
1316 | struct ssd_blist sendq; | |
1317 | struct task_struct *send_thread; | |
1318 | wait_queue_head_t send_waitq; | |
1319 | ||
1320 | atomic_t in_doneq; | |
1321 | spinlock_t doneq_lock; | |
1322 | struct ssd_blist doneq; | |
1323 | struct task_struct *done_thread; | |
1324 | wait_queue_head_t done_waitq; | |
1325 | ||
1326 | struct ssd_dcmd *dcmd; | |
1327 | spinlock_t dcmd_lock; | |
1328 | struct list_head dcmd_list; /* direct cmd list */ | |
1329 | wait_queue_head_t dcmd_wq; | |
1330 | ||
1331 | unsigned long *tag_map; | |
1332 | wait_queue_head_t tag_wq; | |
1333 | ||
1334 | spinlock_t cmd_lock; | |
1335 | struct ssd_cmd *cmd; | |
1336 | send_cmd_func scmd; | |
1337 | ||
1338 | ssd_event_call event_call; | |
1339 | void *msg_base; | |
1340 | dma_addr_t msg_base_dma; | |
1341 | ||
1342 | uint32_t resp_idx; | |
1343 | void *resp_msg_base; | |
1344 | void *resp_ptr_base; | |
1345 | dma_addr_t resp_msg_base_dma; | |
1346 | dma_addr_t resp_ptr_base_dma; | |
1347 | ||
1348 | int nr_queue; | |
1349 | struct msix_entry entry[SSD_MSIX_VEC]; | |
1350 | struct ssd_queue queue[SSD_MSIX_VEC]; | |
1351 | ||
1352 | struct request_queue *rq; /* The device request queue */ | |
1353 | struct gendisk *gd; /* The gendisk structure */ | |
1354 | ||
1355 | struct mutex internal_log_mutex; | |
1356 | struct ssd_internal_log internal_log; | |
1357 | struct workqueue_struct *workq; | |
1358 | struct work_struct log_work; /* get log */ | |
1359 | void *log_buf; | |
1360 | ||
1361 | unsigned long state; /* device state, for example, block device inited */ | |
1362 | ||
1363 | struct module *owner; | |
1364 | ||
1365 | /* extend */ | |
1366 | ||
1367 | int slave; | |
1368 | int cmajor; | |
1369 | int save_md; | |
1370 | int ot_protect; | |
1371 | ||
1372 | struct kref kref; | |
1373 | ||
1374 | struct mutex gd_mutex; | |
1375 | struct ssd_log_info log_info; /* volatile */ | |
1376 | ||
1377 | atomic_t queue_depth; | |
1378 | struct mutex barrier_mutex; | |
1379 | struct mutex fw_mutex; | |
1380 | ||
1381 | struct ssd_hw_info_extend hw_info_ext; | |
1382 | struct ssd_labelv3 labelv3; | |
1383 | ||
1384 | int wmode; | |
1385 | int user_wmode; | |
1386 | struct mutex bm_mutex; | |
1387 | struct work_struct bm_work; /* check bm */ | |
1388 | struct timer_list bm_timer; | |
1389 | struct sfifo log_fifo; | |
1390 | ||
1391 | struct timer_list routine_timer; | |
1392 | unsigned long routine_tick; | |
1393 | unsigned long hwmon; | |
1394 | ||
1395 | struct work_struct hwmon_work; /* check hw */ | |
1396 | struct work_struct capmon_work; /* check battery */ | |
1397 | struct work_struct tempmon_work; /* check temp */ | |
1398 | ||
1399 | /* debug info */ | |
1400 | struct ssd_debug_info db_info; | |
1401 | uint64_t reset_time; | |
1402 | int has_non_0x98_reg_access; | |
1403 | spinlock_t in_flight_lock; | |
1404 | ||
1405 | uint64_t last_poweron_id; | |
1406 | ||
1407 | } ssd_device_t; | |
1408 | ||
1409 | ||
1410 | /* Ioctl struct */ | |
1411 | typedef struct ssd_acc_info { | |
1412 | uint32_t threshold_l1; | |
1413 | uint32_t threshold_l2; | |
1414 | uint32_t val; | |
1415 | } ssd_acc_info_t; | |
1416 | ||
1417 | typedef struct ssd_reg_op_info | |
1418 | { | |
1419 | uint32_t offset; | |
1420 | uint32_t value; | |
1421 | } ssd_reg_op_info_t; | |
1422 | ||
1423 | typedef struct ssd_spi_op_info | |
1424 | { | |
1425 | void __user *buf; | |
1426 | uint32_t off; | |
1427 | uint32_t len; | |
1428 | } ssd_spi_op_info_t; | |
1429 | ||
1430 | typedef struct ssd_i2c_op_info | |
1431 | { | |
1432 | uint8_t saddr; | |
1433 | uint8_t wsize; | |
1434 | uint8_t rsize; | |
1435 | void __user *wbuf; | |
1436 | void __user *rbuf; | |
1437 | } ssd_i2c_op_info_t; | |
1438 | ||
1439 | typedef struct ssd_smbus_op_info | |
1440 | { | |
1441 | uint8_t saddr; | |
1442 | uint8_t cmd; | |
1443 | uint8_t size; | |
1444 | void __user *buf; | |
1445 | } ssd_smbus_op_info_t; | |
1446 | ||
1447 | typedef struct ssd_ram_op_info { | |
1448 | uint8_t ctrl_idx; | |
1449 | uint32_t length; | |
1450 | uint64_t start; | |
1451 | uint8_t __user *buf; | |
1452 | } ssd_ram_op_info_t; | |
1453 | ||
1454 | typedef struct ssd_flash_op_info { | |
1455 | uint32_t page; | |
1456 | uint16_t flash; | |
1457 | uint8_t chip; | |
1458 | uint8_t ctrl_idx; | |
1459 | uint8_t __user *buf; | |
1460 | } ssd_flash_op_info_t; | |
1461 | ||
1462 | typedef struct ssd_sw_log_info { | |
1463 | uint16_t event; | |
1464 | uint16_t pad; | |
1465 | uint32_t data; | |
1466 | } ssd_sw_log_info_t; | |
1467 | ||
1468 | typedef struct ssd_version_info | |
1469 | { | |
1470 | uint32_t bridge_ver; /* bridge fw version */ | |
1471 | uint32_t ctrl_ver; /* controller fw version */ | |
1472 | uint32_t bm_ver; /* battery manager fw version */ | |
1473 | uint8_t pcb_ver; /* main pcb version */ | |
1474 | uint8_t upper_pcb_ver; | |
1475 | uint8_t pad0; | |
1476 | uint8_t pad1; | |
1477 | } ssd_version_info_t; | |
1478 | ||
1479 | typedef struct pci_addr | |
1480 | { | |
1481 | uint16_t domain; | |
1482 | uint8_t bus; | |
1483 | uint8_t slot; | |
1484 | uint8_t func; | |
1485 | } pci_addr_t; | |
1486 | ||
1487 | typedef struct ssd_drv_param_info { | |
1488 | int mode; | |
1489 | int status_mask; | |
1490 | int int_mode; | |
1491 | int threaded_irq; | |
1492 | int log_level; | |
1493 | int wmode; | |
1494 | int ot_protect; | |
1495 | int finject; | |
1496 | int pad[8]; | |
1497 | } ssd_drv_param_info_t; | |
1498 | ||
1499 | ||
1500 | /* form factor */ | |
1501 | enum ssd_form_factor | |
1502 | { | |
1503 | SSD_FORM_FACTOR_HHHL = 0, | |
1504 | SSD_FORM_FACTOR_FHHL | |
1505 | }; | |
1506 | ||
1507 | ||
1508 | /* ssd power loss protect */ | |
1509 | enum ssd_plp_type | |
1510 | { | |
1511 | SSD_PLP_SCAP = 0, | |
1512 | SSD_PLP_CAP, | |
1513 | SSD_PLP_NONE | |
1514 | }; | |
1515 | ||
1516 | /* ssd bm */ | |
1517 | #define SSD_BM_SLAVE_ADDRESS 0x16 | |
1518 | #define SSD_BM_CAP 5 | |
1519 | ||
1520 | /* SBS cmd */ | |
1521 | #define SSD_BM_SAFETYSTATUS 0x51 | |
1522 | #define SSD_BM_OPERATIONSTATUS 0x54 | |
1523 | ||
1524 | /* ManufacturerAccess */ | |
1525 | #define SSD_BM_MANUFACTURERACCESS 0x00 | |
1526 | #define SSD_BM_ENTER_CAP_LEARNING 0x0023 /* cap learning */ | |
1527 | ||
1528 | /* Data flash access */ | |
1529 | #define SSD_BM_DATA_FLASH_SUBCLASS_ID 0x77 | |
1530 | #define SSD_BM_DATA_FLASH_SUBCLASS_ID_PAGE1 0x78 | |
1531 | #define SSD_BM_SYSTEM_DATA_SUBCLASS_ID 56 | |
1532 | #define SSD_BM_CONFIGURATION_REGISTERS_ID 64 | |
1533 | ||
1534 | /* min cap voltage */ | |
1535 | #define SSD_BM_CAP_VOLT_MIN 500 | |
1536 | ||
1537 | /* | |
1538 | enum ssd_bm_cap | |
1539 | { | |
1540 | SSD_BM_CAP_VINA = 1, | |
1541 | SSD_BM_CAP_JH = 3 | |
1542 | };*/ | |
1543 | ||
1544 | enum ssd_bmstatus | |
1545 | { | |
1546 | SSD_BMSTATUS_OK = 0, | |
1547 | SSD_BMSTATUS_CHARGING, /* not fully charged */ | |
1548 | SSD_BMSTATUS_WARNING | |
1549 | }; | |
1550 | ||
1551 | enum sbs_unit { | |
1552 | SBS_UNIT_VALUE = 0, | |
1553 | SBS_UNIT_TEMPERATURE, | |
1554 | SBS_UNIT_VOLTAGE, | |
1555 | SBS_UNIT_CURRENT, | |
1556 | SBS_UNIT_ESR, | |
1557 | SBS_UNIT_PERCENT, | |
1558 | SBS_UNIT_CAPACITANCE | |
1559 | }; | |
1560 | ||
1561 | enum sbs_size { | |
1562 | SBS_SIZE_BYTE = 1, | |
1563 | SBS_SIZE_WORD, | |
1564 | SBS_SIZE_BLK, | |
1565 | }; | |
1566 | ||
1567 | struct sbs_cmd { | |
1568 | uint8_t cmd; | |
1569 | uint8_t size; | |
1570 | uint8_t unit; | |
1571 | uint8_t off; | |
1572 | uint16_t mask; | |
1573 | char *desc; | |
1574 | }; | |
1575 | ||
1576 | struct ssd_bm { | |
1577 | uint16_t temp; | |
1578 | uint16_t volt; | |
1579 | uint16_t curr; | |
1580 | uint16_t esr; | |
1581 | uint16_t rsoc; | |
1582 | uint16_t health; | |
1583 | uint16_t cap; | |
1584 | uint16_t chg_curr; | |
1585 | uint16_t chg_volt; | |
1586 | uint16_t cap_volt[SSD_BM_CAP]; | |
1587 | uint16_t sf_alert; | |
1588 | uint16_t sf_status; | |
1589 | uint16_t op_status; | |
1590 | uint16_t sys_volt; | |
1591 | }; | |
1592 | ||
1593 | struct ssd_bm_manufacturer_data | |
1594 | { | |
1595 | uint16_t pack_lot_code; | |
1596 | uint16_t pcb_lot_code; | |
1597 | uint16_t firmware_ver; | |
1598 | uint16_t hardware_ver; | |
1599 | }; | |
1600 | ||
1601 | struct ssd_bm_configuration_registers | |
1602 | { | |
1603 | struct { | |
1604 | uint16_t cc:3; | |
1605 | uint16_t rsvd:5; | |
1606 | uint16_t stack:1; | |
1607 | uint16_t rsvd1:2; | |
1608 | uint16_t temp:2; | |
1609 | uint16_t rsvd2:1; | |
1610 | uint16_t lt_en:1; | |
1611 | uint16_t rsvd3:1; | |
1612 | } operation_cfg; | |
1613 | uint16_t pad; | |
1614 | uint16_t fet_action; | |
1615 | uint16_t pad1; | |
1616 | uint16_t fault; | |
1617 | }; | |
1618 | ||
1619 | #define SBS_VALUE_MASK 0xffff | |
1620 | ||
1621 | #define bm_var_offset(var) ((size_t) &((struct ssd_bm *)0)->var) | |
1622 | #define bm_var(start, offset) ((void *) start + (offset)) | |
1623 | ||
1624 | static struct sbs_cmd ssd_bm_sbs[] = { | |
1625 | {0x08, SBS_SIZE_WORD, SBS_UNIT_TEMPERATURE, bm_var_offset(temp), SBS_VALUE_MASK, "Temperature"}, | |
1626 | {0x09, SBS_SIZE_WORD, SBS_UNIT_VOLTAGE, bm_var_offset(volt), SBS_VALUE_MASK, "Voltage"}, | |
1627 | {0x0a, SBS_SIZE_WORD, SBS_UNIT_CURRENT, bm_var_offset(curr), SBS_VALUE_MASK, "Current"}, | |
1628 | {0x0b, SBS_SIZE_WORD, SBS_UNIT_ESR, bm_var_offset(esr), SBS_VALUE_MASK, "ESR"}, | |
1629 | {0x0d, SBS_SIZE_BYTE, SBS_UNIT_PERCENT, bm_var_offset(rsoc), SBS_VALUE_MASK, "RelativeStateOfCharge"}, | |
1630 | {0x0e, SBS_SIZE_BYTE, SBS_UNIT_PERCENT, bm_var_offset(health), SBS_VALUE_MASK, "Health"}, | |
1631 | {0x10, SBS_SIZE_WORD, SBS_UNIT_CAPACITANCE, bm_var_offset(cap), SBS_VALUE_MASK, "Capacitance"}, | |
1632 | {0x14, SBS_SIZE_WORD, SBS_UNIT_CURRENT, bm_var_offset(chg_curr), SBS_VALUE_MASK, "ChargingCurrent"}, | |
1633 | {0x15, SBS_SIZE_WORD, SBS_UNIT_VOLTAGE, bm_var_offset(chg_volt), SBS_VALUE_MASK, "ChargingVoltage"}, | |
1634 | {0x3b, SBS_SIZE_WORD, SBS_UNIT_VOLTAGE, (uint8_t)bm_var_offset(cap_volt[4]), SBS_VALUE_MASK, "CapacitorVoltage5"}, | |
1635 | {0x3c, SBS_SIZE_WORD, SBS_UNIT_VOLTAGE, (uint8_t)bm_var_offset(cap_volt[3]), SBS_VALUE_MASK, "CapacitorVoltage4"}, | |
1636 | {0x3d, SBS_SIZE_WORD, SBS_UNIT_VOLTAGE, (uint8_t)bm_var_offset(cap_volt[2]), SBS_VALUE_MASK, "CapacitorVoltage3"}, | |
1637 | {0x3e, SBS_SIZE_WORD, SBS_UNIT_VOLTAGE, (uint8_t)bm_var_offset(cap_volt[1]), SBS_VALUE_MASK, "CapacitorVoltage2"}, | |
1638 | {0x3f, SBS_SIZE_WORD, SBS_UNIT_VOLTAGE, (uint8_t)bm_var_offset(cap_volt[0]), SBS_VALUE_MASK, "CapacitorVoltage1"}, | |
1639 | {0x50, SBS_SIZE_WORD, SBS_UNIT_VALUE, bm_var_offset(sf_alert), 0x870F, "SafetyAlert"}, | |
1640 | {0x51, SBS_SIZE_WORD, SBS_UNIT_VALUE, bm_var_offset(sf_status), 0xE7BF, "SafetyStatus"}, | |
1641 | {0x54, SBS_SIZE_WORD, SBS_UNIT_VALUE, bm_var_offset(op_status), 0x79F4, "OperationStatus"}, | |
1642 | {0x5a, SBS_SIZE_WORD, SBS_UNIT_VOLTAGE, bm_var_offset(sys_volt), SBS_VALUE_MASK, "SystemVoltage"}, | |
1643 | {0, 0, 0, 0, 0, NULL}, | |
1644 | }; | |
1645 | ||
1646 | /* ssd ioctl */ | |
1647 | #define SSD_CMD_GET_PROTOCOL_INFO _IOR('H', 100, struct ssd_protocol_info) | |
1648 | #define SSD_CMD_GET_HW_INFO _IOR('H', 101, struct ssd_hw_info) | |
1649 | #define SSD_CMD_GET_ROM_INFO _IOR('H', 102, struct ssd_rom_info) | |
1650 | #define SSD_CMD_GET_SMART _IOR('H', 103, struct ssd_smart) | |
1651 | #define SSD_CMD_GET_IDX _IOR('H', 105, int) | |
1652 | #define SSD_CMD_GET_AMOUNT _IOR('H', 106, int) | |
1653 | #define SSD_CMD_GET_TO_INFO _IOR('H', 107, int) | |
1654 | #define SSD_CMD_GET_DRV_VER _IOR('H', 108, char[DRIVER_VERSION_LEN]) | |
1655 | ||
1656 | #define SSD_CMD_GET_BBACC_INFO _IOR('H', 109, struct ssd_acc_info) | |
1657 | #define SSD_CMD_GET_ECACC_INFO _IOR('H', 110, struct ssd_acc_info) | |
1658 | ||
1659 | #define SSD_CMD_GET_HW_INFO_EXT _IOR('H', 111, struct ssd_hw_info_extend) | |
1660 | ||
1661 | #define SSD_CMD_REG_READ _IOWR('H', 120, struct ssd_reg_op_info) | |
1662 | #define SSD_CMD_REG_WRITE _IOWR('H', 121, struct ssd_reg_op_info) | |
1663 | ||
1664 | #define SSD_CMD_SPI_READ _IOWR('H', 125, struct ssd_spi_op_info) | |
1665 | #define SSD_CMD_SPI_WRITE _IOWR('H', 126, struct ssd_spi_op_info) | |
1666 | #define SSD_CMD_SPI_ERASE _IOWR('H', 127, struct ssd_spi_op_info) | |
1667 | ||
1668 | #define SSD_CMD_I2C_READ _IOWR('H', 128, struct ssd_i2c_op_info) | |
1669 | #define SSD_CMD_I2C_WRITE _IOWR('H', 129, struct ssd_i2c_op_info) | |
1670 | #define SSD_CMD_I2C_WRITE_READ _IOWR('H', 130, struct ssd_i2c_op_info) | |
1671 | ||
1672 | #define SSD_CMD_SMBUS_SEND_BYTE _IOWR('H', 131, struct ssd_smbus_op_info) | |
1673 | #define SSD_CMD_SMBUS_RECEIVE_BYTE _IOWR('H', 132, struct ssd_smbus_op_info) | |
1674 | #define SSD_CMD_SMBUS_WRITE_BYTE _IOWR('H', 133, struct ssd_smbus_op_info) | |
1675 | #define SSD_CMD_SMBUS_READ_BYTE _IOWR('H', 135, struct ssd_smbus_op_info) | |
1676 | #define SSD_CMD_SMBUS_WRITE_WORD _IOWR('H', 136, struct ssd_smbus_op_info) | |
1677 | #define SSD_CMD_SMBUS_READ_WORD _IOWR('H', 137, struct ssd_smbus_op_info) | |
1678 | #define SSD_CMD_SMBUS_WRITE_BLOCK _IOWR('H', 138, struct ssd_smbus_op_info) | |
1679 | #define SSD_CMD_SMBUS_READ_BLOCK _IOWR('H', 139, struct ssd_smbus_op_info) | |
1680 | ||
1681 | #define SSD_CMD_BM_GET_VER _IOR('H', 140, uint16_t) | |
1682 | #define SSD_CMD_BM_GET_NR_CAP _IOR('H', 141, int) | |
1683 | #define SSD_CMD_BM_CAP_LEARNING _IOW('H', 142, int) | |
1684 | #define SSD_CMD_CAP_LEARN _IOR('H', 143, uint32_t) | |
1685 | #define SSD_CMD_GET_CAP_STATUS _IOR('H', 144, int) | |
1686 | ||
1687 | #define SSD_CMD_RAM_READ _IOWR('H', 150, struct ssd_ram_op_info) | |
1688 | #define SSD_CMD_RAM_WRITE _IOWR('H', 151, struct ssd_ram_op_info) | |
1689 | ||
1690 | #define SSD_CMD_NAND_READ_ID _IOR('H', 160, struct ssd_flash_op_info) | |
1691 | #define SSD_CMD_NAND_READ _IOWR('H', 161, struct ssd_flash_op_info) //with oob | |
1692 | #define SSD_CMD_NAND_WRITE _IOWR('H', 162, struct ssd_flash_op_info) | |
1693 | #define SSD_CMD_NAND_ERASE _IOWR('H', 163, struct ssd_flash_op_info) | |
1694 | #define SSD_CMD_NAND_READ_EXT _IOWR('H', 164, struct ssd_flash_op_info) //ingore EIO | |
1695 | ||
1696 | #define SSD_CMD_UPDATE_BBT _IOW('H', 180, struct ssd_flash_op_info) | |
1697 | ||
1698 | #define SSD_CMD_CLEAR_ALARM _IOW('H', 190, int) | |
1699 | #define SSD_CMD_SET_ALARM _IOW('H', 191, int) | |
1700 | ||
1701 | #define SSD_CMD_RESET _IOW('H', 200, int) | |
1702 | #define SSD_CMD_RELOAD_FW _IOW('H', 201, int) | |
1703 | #define SSD_CMD_UNLOAD_DEV _IOW('H', 202, int) | |
1704 | #define SSD_CMD_LOAD_DEV _IOW('H', 203, int) | |
1705 | #define SSD_CMD_UPDATE_VP _IOWR('H', 205, uint32_t) | |
1706 | #define SSD_CMD_FULL_RESET _IOW('H', 206, int) | |
1707 | ||
1708 | #define SSD_CMD_GET_NR_LOG _IOR('H', 220, uint32_t) | |
1709 | #define SSD_CMD_GET_LOG _IOR('H', 221, void *) | |
1710 | #define SSD_CMD_LOG_LEVEL _IOW('H', 222, int) | |
1711 | ||
1712 | #define SSD_CMD_OT_PROTECT _IOW('H', 223, int) | |
1713 | #define SSD_CMD_GET_OT_STATUS _IOR('H', 224, int) | |
1714 | ||
1715 | #define SSD_CMD_CLEAR_LOG _IOW('H', 230, int) | |
1716 | #define SSD_CMD_CLEAR_SMART _IOW('H', 231, int) | |
1717 | ||
1718 | #define SSD_CMD_SW_LOG _IOW('H', 232, struct ssd_sw_log_info) | |
1719 | ||
1720 | #define SSD_CMD_GET_LABEL _IOR('H', 235, struct ssd_label) | |
1721 | #define SSD_CMD_GET_VERSION _IOR('H', 236, struct ssd_version_info) | |
1722 | #define SSD_CMD_GET_TEMPERATURE _IOR('H', 237, int) | |
1723 | #define SSD_CMD_GET_BMSTATUS _IOR('H', 238, int) | |
1724 | #define SSD_CMD_GET_LABEL2 _IOR('H', 239, void *) | |
1725 | ||
1726 | ||
1727 | #define SSD_CMD_FLUSH _IOW('H', 240, int) | |
1728 | #define SSD_CMD_SAVE_MD _IOW('H', 241, int) | |
1729 | ||
1730 | #define SSD_CMD_SET_WMODE _IOW('H', 242, int) | |
1731 | #define SSD_CMD_GET_WMODE _IOR('H', 243, int) | |
1732 | #define SSD_CMD_GET_USER_WMODE _IOR('H', 244, int) | |
1733 | ||
1734 | #define SSD_CMD_DEBUG _IOW('H', 250, struct ssd_debug_info) | |
1735 | #define SSD_CMD_DRV_PARAM_INFO _IOR('H', 251, struct ssd_drv_param_info) | |
1736 | ||
1737 | #define SSD_CMD_CLEAR_WARNING _IOW('H', 260, int) | |
1738 | ||
1739 | ||
1740 | /* log */ | |
1741 | #define SSD_LOG_MAX_SZ 4096 | |
1742 | #define SSD_LOG_LEVEL SSD_LOG_LEVEL_NOTICE | |
1743 | #define SSD_DIF_WITH_OLD_LOG 0x3f | |
1744 | ||
1745 | enum ssd_log_data | |
1746 | { | |
1747 | SSD_LOG_DATA_NONE = 0, | |
1748 | SSD_LOG_DATA_LOC, | |
1749 | SSD_LOG_DATA_HEX | |
1750 | }; | |
1751 | ||
1752 | typedef struct ssd_log_entry | |
1753 | { | |
1754 | union { | |
1755 | struct { | |
1756 | uint32_t page:10; | |
1757 | uint32_t block:14; | |
1758 | uint32_t flash:8; | |
1759 | } loc; | |
1760 | struct { | |
1761 | uint32_t page:12; | |
1762 | uint32_t block:12; | |
1763 | uint32_t flash:8; | |
1764 | } loc1; | |
1765 | uint32_t val; | |
1766 | } data; | |
1767 | uint16_t event:10; | |
1768 | uint16_t mod:6; | |
1769 | uint16_t idx; | |
1770 | }__attribute__((packed))ssd_log_entry_t; | |
1771 | ||
1772 | typedef struct ssd_log | |
1773 | { | |
1774 | uint64_t time:56; | |
1775 | uint64_t ctrl_idx:8; | |
1776 | ssd_log_entry_t le; | |
1777 | } __attribute__((packed)) ssd_log_t; | |
1778 | ||
1779 | typedef struct ssd_log_desc | |
1780 | { | |
1781 | uint16_t event; | |
1782 | uint8_t level; | |
1783 | uint8_t data; | |
1784 | uint8_t sblock; | |
1785 | uint8_t spage; | |
1786 | char *desc; | |
1787 | } __attribute__((packed)) ssd_log_desc_t; | |
1788 | ||
1789 | #define SSD_LOG_SW_IDX 0xF | |
1790 | #define SSD_UNKNOWN_EVENT ((uint16_t)-1) | |
1791 | static struct ssd_log_desc ssd_log_desc[] = { | |
1792 | /* event, level, show flash, show block, show page, desc */ | |
1793 | {0x0, SSD_LOG_LEVEL_WARNING, SSD_LOG_DATA_LOC, 0, 0, "Create BBT failure"}, //g3 | |
1794 | {0x1, SSD_LOG_LEVEL_WARNING, SSD_LOG_DATA_LOC, 0, 0, "Read BBT failure"}, //g3 | |
1795 | {0x2, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 0, "Mark bad block"}, | |
1796 | {0x3, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 0, 0, "Flush BBT failure"}, | |
1797 | {0x4, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Program failure"}, | |
1798 | {0x7, SSD_LOG_LEVEL_ERR, SSD_LOG_DATA_LOC, 1, 1, "No available blocks"}, | |
1799 | {0x8, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 0, "Bad EC header"}, | |
1800 | {0x9, SSD_LOG_LEVEL_WARNING, SSD_LOG_DATA_LOC, 1, 0, "Bad VID header"}, //g3 | |
1801 | {0xa, SSD_LOG_LEVEL_INFO, SSD_LOG_DATA_LOC, 1, 0, "Wear leveling"}, | |
1802 | {0xb, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "WL read back failure"}, | |
1803 | {0x11, SSD_LOG_LEVEL_ERR, SSD_LOG_DATA_LOC, 1, 1, "Data recovery failure"}, // err | |
1804 | {0x20, SSD_LOG_LEVEL_ERR, SSD_LOG_DATA_LOC, 1, 1, "Init: scan mapping table failure"}, // err g3 | |
1805 | {0x21, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Program failure"}, | |
1806 | {0x22, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Program failure"}, | |
1807 | {0x23, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Program failure"}, | |
1808 | {0x24, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 0, "Merge: read mapping page failure"}, | |
1809 | {0x25, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Merge: read back failure"}, | |
1810 | {0x26, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Program failure"}, | |
1811 | {0x27, SSD_LOG_LEVEL_WARNING, SSD_LOG_DATA_LOC, 1, 1, "Data corrupted for abnormal power down"}, //g3 | |
1812 | {0x28, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Merge: mapping page corrupted"}, | |
1813 | {0x29, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 0, "Init: no mapping page"}, | |
1814 | {0x2a, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Init: mapping pages incomplete"}, | |
1815 | {0x2b, SSD_LOG_LEVEL_ERR, SSD_LOG_DATA_LOC, 1, 1, "Read back failure after programming failure"}, // err | |
1816 | {0xf1, SSD_LOG_LEVEL_ERR, SSD_LOG_DATA_LOC, 1, 1, "Read failure without recovery"}, // err | |
1817 | {0xf2, SSD_LOG_LEVEL_ERR, SSD_LOG_DATA_LOC, 0, 0, "No available blocks"}, // maybe err g3 | |
1818 | {0xf3, SSD_LOG_LEVEL_ERR, SSD_LOG_DATA_LOC, 1, 0, "Init: RAID incomplete"}, // err g3 | |
1819 | {0xf4, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Program failure"}, | |
1820 | {0xf5, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Read failure in moving data"}, | |
1821 | {0xf6, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Program failure"}, | |
1822 | {0xf7, SSD_LOG_LEVEL_WARNING, SSD_LOG_DATA_LOC, 1, 1, "Init: RAID not complete"}, | |
1823 | {0xf8, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 0, "Init: data moving interrupted"}, | |
1824 | {0xfe, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Data inspection failure"}, | |
1825 | {0xff, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "IO: ECC failed"}, | |
1826 | ||
1827 | /* new */ | |
1828 | {0x2e, SSD_LOG_LEVEL_ERR, SSD_LOG_DATA_LOC, 0, 0, "No available reserved blocks" }, // err | |
1829 | {0x30, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 0, 0, "Init: PMT membership not found"}, | |
1830 | {0x31, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_HEX, 0, 0, "Init: PMT corrupted"}, | |
1831 | {0x32, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 0, 0, "Init: PBT membership not found"}, | |
1832 | {0x33, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 0, 0, "Init: PBT not found"}, | |
1833 | {0x34, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 0, 0, "Init: PBT corrupted"}, | |
1834 | {0x35, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Init: PMT page read failure"}, | |
1835 | {0x36, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Init: PBT page read failure"}, | |
1836 | {0x37, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Init: PBT backup page read failure"}, | |
1837 | {0x38, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Init: PBMT read failure"}, | |
1838 | {0x39, SSD_LOG_LEVEL_ERR, SSD_LOG_DATA_LOC, 1, 1, "Init: PBMT scan failure"}, // err | |
1839 | {0x3a, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Init: first page read failure"}, | |
1840 | {0x3b, SSD_LOG_LEVEL_ERR, SSD_LOG_DATA_LOC, 1, 1, "Init: first page scan failure"}, // err | |
1841 | {0x3c, SSD_LOG_LEVEL_ERR, SSD_LOG_DATA_LOC, 1, 1, "Init: scan unclosed block failure"}, // err | |
1842 | {0x3d, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Init: write pointer mismatch"}, | |
1843 | {0x3e, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Init: PMT recovery: PBMT read failure"}, | |
1844 | {0x3f, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 0, "Init: PMT recovery: PBMT scan failure"}, | |
1845 | {0x40, SSD_LOG_LEVEL_ERR, SSD_LOG_DATA_LOC, 1, 1, "Init: PMT recovery: data page read failure"}, //err | |
1846 | {0x41, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Init: PBT write pointer mismatch"}, | |
1847 | {0x42, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Init: PBT latest version corrupted"}, | |
1848 | {0x43, SSD_LOG_LEVEL_ERR, SSD_LOG_DATA_LOC, 1, 0, "Init: too many unclosed blocks"}, | |
1849 | {0x44, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_HEX, 0, 0, "Init: PDW block found"}, | |
1850 | {0x45, SSD_LOG_LEVEL_ERR, SSD_LOG_DATA_HEX, 0, 0, "Init: more than one PDW block found"}, //err | |
1851 | {0x46, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Init: first page is blank or read failure"}, | |
1852 | {0x47, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 0, 0, "Init: PDW block not found"}, | |
1853 | ||
1854 | {0x50, SSD_LOG_LEVEL_ERR, SSD_LOG_DATA_LOC, 1, 0, "Cache: hit error data"}, // err | |
1855 | {0x51, SSD_LOG_LEVEL_ERR, SSD_LOG_DATA_LOC, 1, 0, "Cache: read back failure"}, // err | |
1856 | {0x52, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_NONE, 0, 0, "Cache: unknown command"}, //? | |
1857 | {0x53, SSD_LOG_LEVEL_ERR, SSD_LOG_DATA_LOC, 1, 1, "GC/WL read back failure"}, // err | |
1858 | ||
1859 | {0x60, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 0, "Erase failure"}, | |
1860 | ||
1861 | {0x70, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "LPA not matched"}, | |
1862 | {0x71, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "PBN not matched"}, | |
1863 | {0x72, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Read retry failure"}, | |
1864 | {0x73, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Need raid recovery"}, | |
1865 | {0x74, SSD_LOG_LEVEL_INFO, SSD_LOG_DATA_LOC, 1, 1, "Need read retry"}, | |
1866 | {0x75, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Read invalid data page"}, | |
1867 | {0x76, SSD_LOG_LEVEL_INFO, SSD_LOG_DATA_LOC, 1, 1, "ECC error, data in cache, PBN matched"}, | |
1868 | {0x77, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "ECC error, data in cache, PBN not matched"}, | |
1869 | {0x78, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "ECC error, data in flash, PBN not matched"}, | |
1870 | {0x79, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "ECC ok, data in cache, LPA not matched"}, | |
1871 | {0x7a, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "ECC ok, data in flash, LPA not matched"}, | |
1872 | {0x7b, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "RAID data in cache, LPA not matched"}, | |
1873 | {0x7c, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "RAID data in flash, LPA not matched"}, | |
1874 | {0x7d, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Read data page status error"}, | |
1875 | {0x7e, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Read blank page"}, | |
1876 | {0x7f, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Access flash timeout"}, | |
1877 | ||
1878 | {0x80, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 0, "EC overflow"}, | |
1879 | {0x81, SSD_LOG_LEVEL_INFO, SSD_LOG_DATA_NONE, 0, 0, "Scrubbing completed"}, | |
1880 | {0x82, SSD_LOG_LEVEL_INFO, SSD_LOG_DATA_LOC, 1, 0, "Unstable block(too much bit flip)"}, | |
1881 | {0x83, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 0, "GC: ram error"}, //? | |
1882 | {0x84, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 0, "GC: one PBMT read failure"}, | |
1883 | ||
1884 | {0x88, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 0, "GC: mark bad block"}, | |
1885 | {0x89, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 0, "GC: invalid page count error"}, // maybe err | |
1886 | {0x8a, SSD_LOG_LEVEL_WARNING, SSD_LOG_DATA_NONE, 0, 0, "Warning: Bad Block close to limit"}, | |
1887 | {0x8b, SSD_LOG_LEVEL_ERR, SSD_LOG_DATA_NONE, 0, 0, "Error: Bad Block over limit"}, | |
1888 | {0x8c, SSD_LOG_LEVEL_WARNING, SSD_LOG_DATA_NONE, 0, 0, "Warning: P/E cycles close to limit"}, | |
1889 | {0x8d, SSD_LOG_LEVEL_ERR, SSD_LOG_DATA_NONE, 0, 0, "Error: P/E cycles over limit"}, | |
1890 | ||
1891 | {0x90, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_NONE, 0, 0, "Warning: Over temperature"}, //90 | |
1892 | {0x91, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_NONE, 0, 0, "Info: Temperature is OK"}, //80 | |
1893 | {0x92, SSD_LOG_LEVEL_WARNING, SSD_LOG_DATA_NONE, 0, 0, "Battery fault"}, | |
1894 | {0x93, SSD_LOG_LEVEL_WARNING, SSD_LOG_DATA_NONE, 0, 0, "SEU fault"}, //err | |
1895 | {0x94, SSD_LOG_LEVEL_ERR, SSD_LOG_DATA_NONE, 0, 0, "DDR error"}, //err | |
1896 | {0x95, SSD_LOG_LEVEL_ERR, SSD_LOG_DATA_NONE, 0, 0, "Controller serdes error"}, //err | |
1897 | {0x96, SSD_LOG_LEVEL_ERR, SSD_LOG_DATA_NONE, 0, 0, "Bridge serdes 1 error"}, //err | |
1898 | {0x97, SSD_LOG_LEVEL_ERR, SSD_LOG_DATA_NONE, 0, 0, "Bridge serdes 2 error"}, //err | |
1899 | {0x98, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_NONE, 0, 0, "SEU fault (corrected)"}, //err | |
1900 | {0x99, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_NONE, 0, 0, "Battery is OK"}, | |
1901 | {0x9a, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_NONE, 0, 0, "Info: Temperature close to limit"}, //85 | |
1902 | ||
1903 | {0x9b, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_HEX, 0, 0, "SEU fault address (low)"}, | |
1904 | {0x9c, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_HEX, 0, 0, "SEU fault address (high)"}, | |
1905 | {0x9d, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_NONE, 0, 0, "I2C fault" }, | |
1906 | {0x9e, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_NONE, 0, 0, "DDR single bit error" }, | |
1907 | {0x9f, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_NONE, 0, 0, "Board voltage fault" }, | |
1908 | ||
1909 | {0xa0, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_HEX, 0, 0, "LPA not matched"}, | |
1910 | {0xa1, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Re-read data in cache"}, | |
1911 | {0xa2, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Read blank page"}, | |
1912 | {0xa3, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "RAID recovery: Read blank page"}, | |
1913 | {0xa4, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "RAID recovery: new data in cache"}, | |
1914 | {0xa5, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "RAID recovery: PBN not matched"}, | |
1915 | {0xa6, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Read data with error flag"}, | |
1916 | {0xa7, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "RAID recovery: recoverd data with error flag"}, | |
1917 | {0xa8, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Blank page in cache, PBN matched"}, | |
1918 | {0xa9, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "RAID recovery: Blank page in cache, PBN matched"}, | |
1919 | {0xaa, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 0, 0, "Flash init failure"}, | |
1920 | {0xab, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Mapping table recovery failure"}, | |
1921 | {0xac, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "RAID recovery: ECC failed"}, | |
1922 | {0xb0, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_NONE, 0, 0, "Warning: Temperature is 95 degrees C"}, | |
1923 | {0xb1, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_NONE, 0, 0, "Warning: Temperature is 100 degrees C"}, | |
1924 | ||
1925 | {0x300, SSD_LOG_LEVEL_ERR, SSD_LOG_DATA_HEX, 0, 0, "CMD timeout"}, | |
1926 | {0x301, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_HEX, 0, 0, "Power on"}, | |
1927 | {0x302, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_NONE, 0, 0, "Power off"}, | |
1928 | {0x303, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_NONE, 0, 0, "Clear log"}, | |
1929 | {0x304, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_HEX, 0, 0, "Set capacity"}, | |
1930 | {0x305, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_NONE, 0, 0, "Clear data"}, | |
1931 | {0x306, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_HEX, 0, 0, "BM safety status"}, | |
1932 | {0x307, SSD_LOG_LEVEL_ERR, SSD_LOG_DATA_HEX, 0, 0, "I/O error"}, | |
1933 | {0x308, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_HEX, 0, 0, "CMD error"}, | |
1934 | {0x309, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_HEX, 0, 0, "Set wmode"}, | |
1935 | {0x30a, SSD_LOG_LEVEL_ERR, SSD_LOG_DATA_HEX, 0, 0, "DDR init failed" }, | |
1936 | {0x30b, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_HEX, 0, 0, "PCIe link status" }, | |
1937 | {0x30c, SSD_LOG_LEVEL_ERR, SSD_LOG_DATA_HEX, 0, 0, "Controller reset sync error" }, | |
1938 | {0x30d, SSD_LOG_LEVEL_ERR, SSD_LOG_DATA_HEX, 0, 0, "Clock fault" }, | |
1939 | {0x30e, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_HEX, 0, 0, "FPGA voltage fault status" }, | |
1940 | {0x30f, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_HEX, 0, 0, "Set capacity finished"}, | |
1941 | {0x310, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_NONE, 0, 0, "Clear data finished"}, | |
1942 | {0x311, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_HEX, 0, 0, "Reset"}, | |
1943 | {0x312, SSD_LOG_LEVEL_WARNING,SSD_LOG_DATA_HEX, 0, 0, "CAP: voltage fault"}, | |
1944 | {0x313, SSD_LOG_LEVEL_WARNING,SSD_LOG_DATA_NONE, 0, 0, "CAP: learn fault"}, | |
1945 | {0x314, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_HEX, 0, 0, "CAP status"}, | |
1946 | {0x315, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_HEX, 0, 0, "Board voltage fault status"}, | |
1947 | {0x316, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_NONE, 0, 0, "Info: Inlet temperature is 55 degrees C"}, //55 | |
1948 | {0x317, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_NONE, 0, 0, "Info: Inlet temperature is 50 degrees C"}, //50 | |
1949 | {0x318, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_NONE, 0, 0, "Info: Flash over temperature"}, //70 | |
1950 | {0x319, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_NONE, 0, 0, "Info: Flash temperature is OK"}, //65 | |
1951 | {0x31a, SSD_LOG_LEVEL_WARNING,SSD_LOG_DATA_NONE, 0, 0, "CAP: short circuit"}, | |
1952 | {0x31b, SSD_LOG_LEVEL_WARNING,SSD_LOG_DATA_HEX, 0, 0, "Sensor fault"}, | |
1953 | {0x31c, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_NONE, 0, 0, "Erase all data"}, | |
1954 | {0x31d, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_NONE, 0, 0, "Erase all data finished"}, | |
1955 | {0x320, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_HEX, 0, 0, "Temperature sensor event"}, | |
1956 | ||
1957 | {0x350, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_NONE, 0, 0, "Clear smart"}, | |
1958 | {0x351, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_NONE, 0, 0, "Clear warning"}, | |
1959 | ||
1960 | {SSD_UNKNOWN_EVENT, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_HEX, 0, 0, "unknown event"}, | |
1961 | }; | |
1962 | /* */ | |
1963 | #define SSD_LOG_OVER_TEMP 0x90 | |
1964 | #define SSD_LOG_NORMAL_TEMP 0x91 | |
1965 | #define SSD_LOG_WARN_TEMP 0x9a | |
1966 | #define SSD_LOG_SEU_FAULT 0x93 | |
1967 | #define SSD_LOG_SEU_FAULT1 0x98 | |
1968 | #define SSD_LOG_BATTERY_FAULT 0x92 | |
1969 | #define SSD_LOG_BATTERY_OK 0x99 | |
1970 | #define SSD_LOG_BOARD_VOLT_FAULT 0x9f | |
1971 | ||
1972 | /* software log */ | |
1973 | #define SSD_LOG_TIMEOUT 0x300 | |
1974 | #define SSD_LOG_POWER_ON 0x301 | |
1975 | #define SSD_LOG_POWER_OFF 0x302 | |
1976 | #define SSD_LOG_CLEAR_LOG 0x303 | |
1977 | #define SSD_LOG_SET_CAPACITY 0x304 | |
1978 | #define SSD_LOG_CLEAR_DATA 0x305 | |
1979 | #define SSD_LOG_BM_SFSTATUS 0x306 | |
1980 | #define SSD_LOG_EIO 0x307 | |
1981 | #define SSD_LOG_ECMD 0x308 | |
1982 | #define SSD_LOG_SET_WMODE 0x309 | |
1983 | #define SSD_LOG_DDR_INIT_ERR 0x30a | |
1984 | #define SSD_LOG_PCIE_LINK_STATUS 0x30b | |
1985 | #define SSD_LOG_CTRL_RST_SYNC 0x30c | |
1986 | #define SSD_LOG_CLK_FAULT 0x30d | |
1987 | #define SSD_LOG_VOLT_FAULT 0x30e | |
1988 | #define SSD_LOG_SET_CAPACITY_END 0x30F | |
1989 | #define SSD_LOG_CLEAR_DATA_END 0x310 | |
1990 | #define SSD_LOG_RESET 0x311 | |
1991 | #define SSD_LOG_CAP_VOLT_FAULT 0x312 | |
1992 | #define SSD_LOG_CAP_LEARN_FAULT 0x313 | |
1993 | #define SSD_LOG_CAP_STATUS 0x314 | |
1994 | #define SSD_LOG_VOLT_STATUS 0x315 | |
1995 | #define SSD_LOG_INLET_OVER_TEMP 0x316 | |
1996 | #define SSD_LOG_INLET_NORMAL_TEMP 0x317 | |
1997 | #define SSD_LOG_FLASH_OVER_TEMP 0x318 | |
1998 | #define SSD_LOG_FLASH_NORMAL_TEMP 0x319 | |
1999 | #define SSD_LOG_CAP_SHORT_CIRCUIT 0x31a | |
2000 | #define SSD_LOG_SENSOR_FAULT 0x31b | |
2001 | #define SSD_LOG_ERASE_ALL 0x31c | |
2002 | #define SSD_LOG_ERASE_ALL_END 0x31d | |
2003 | #define SSD_LOG_TEMP_SENSOR_EVENT 0x320 | |
2004 | #define SSD_LOG_CLEAR_SMART 0x350 | |
2005 | #define SSD_LOG_CLEAR_WARNING 0x351 | |
2006 | ||
2007 | ||
2008 | /* sw log fifo depth */ | |
2009 | #define SSD_LOG_FIFO_SZ 1024 | |
2010 | ||
2011 | ||
2012 | /* done queue */ | |
2013 | static DEFINE_PER_CPU(struct list_head, ssd_doneq); | |
2014 | static DEFINE_PER_CPU(struct tasklet_struct, ssd_tasklet); | |
2015 | ||
2016 | ||
2017 | /* unloading driver */ | |
2018 | static volatile int ssd_exiting = 0; | |
2019 | ||
2020 | #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,12)) | |
2021 | static struct class_simple *ssd_class; | |
2022 | #else | |
2023 | static struct class *ssd_class; | |
2024 | #endif | |
2025 | ||
2026 | static int ssd_cmajor = SSD_CMAJOR; | |
2027 | ||
2028 | /* ssd block device major, minors */ | |
2029 | static int ssd_major = SSD_MAJOR; | |
2030 | static int ssd_major_sl = SSD_MAJOR_SL; | |
2031 | static int ssd_minors = SSD_MINORS; | |
2032 | ||
2033 | /* ssd device list */ | |
2034 | static struct list_head ssd_list; | |
2035 | static unsigned long ssd_index_bits[SSD_MAX_DEV / BITS_PER_LONG + 1]; | |
2036 | static unsigned long ssd_index_bits_sl[SSD_MAX_DEV / BITS_PER_LONG + 1]; | |
2037 | static atomic_t ssd_nr; | |
2038 | ||
2039 | /* module param */ | |
2040 | enum ssd_drv_mode | |
2041 | { | |
2042 | SSD_DRV_MODE_STANDARD = 0, /* full */ | |
2043 | SSD_DRV_MODE_DEBUG = 2, /* debug */ | |
2044 | SSD_DRV_MODE_BASE /* base only */ | |
2045 | }; | |
2046 | ||
2047 | enum ssd_int_mode | |
2048 | { | |
2049 | SSD_INT_LEGACY = 0, | |
2050 | SSD_INT_MSI, | |
2051 | SSD_INT_MSIX | |
2052 | }; | |
2053 | ||
2054 | #if (defined SSD_MSIX) | |
2055 | #define SSD_INT_MODE_DEFAULT SSD_INT_MSIX | |
2056 | #elif (defined SSD_MSI) | |
2057 | #define SSD_INT_MODE_DEFAULT SSD_INT_MSI | |
2058 | #else | |
2059 | /* auto select the defaut int mode according to the kernel version*/ | |
2060 | /* suse 11 sp1 irqbalance bug: use msi instead*/ | |
2061 | #if ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) || (defined RHEL_MAJOR && RHEL_MAJOR >= 6) || (defined RHEL_MAJOR && RHEL_MAJOR == 5 && RHEL_MINOR >= 5)) | |
2062 | #define SSD_INT_MODE_DEFAULT SSD_INT_MSIX | |
2063 | #else | |
2064 | #define SSD_INT_MODE_DEFAULT SSD_INT_MSI | |
2065 | #endif | |
2066 | #endif | |
2067 | ||
2068 | static int mode = SSD_DRV_MODE_STANDARD; | |
2069 | static int status_mask = 0xFF; | |
2070 | static int int_mode = SSD_INT_MODE_DEFAULT; | |
2071 | static int threaded_irq = 0; | |
2072 | static int log_level = SSD_LOG_LEVEL_WARNING; | |
2073 | static int ot_protect = 1; | |
2074 | static int wmode = SSD_WMODE_DEFAULT; | |
2075 | static int finject = 0; | |
2076 | ||
2077 | module_param(mode, int, 0); | |
2078 | module_param(status_mask, int, 0); | |
2079 | module_param(int_mode, int, 0); | |
2080 | module_param(threaded_irq, int, 0); | |
2081 | module_param(log_level, int, 0); | |
2082 | module_param(ot_protect, int, 0); | |
2083 | module_param(wmode, int, 0); | |
2084 | module_param(finject, int, 0); | |
2085 | ||
2086 | ||
2087 | MODULE_PARM_DESC(mode, "driver mode, 0 - standard, 1 - debug, 2 - debug without IO, 3 - basic debug mode"); | |
2088 | MODULE_PARM_DESC(status_mask, "command status mask, 0 - without command error, 0xff - with command error"); | |
2089 | MODULE_PARM_DESC(int_mode, "preferred interrupt mode, 0 - legacy, 1 - msi, 2 - msix"); | |
2090 | MODULE_PARM_DESC(threaded_irq, "threaded irq, 0 - normal irq, 1 - threaded irq"); | |
2091 | MODULE_PARM_DESC(log_level, "log level to display, 0 - info and above, 1 - notice and above, 2 - warning and above, 3 - error only"); | |
2092 | MODULE_PARM_DESC(ot_protect, "over temperature protect, 0 - disable, 1 - enable"); | |
2093 | MODULE_PARM_DESC(wmode, "write mode, 0 - write buffer (with risk for the 6xx firmware), 1 - write buffer ex, 2 - write through, 3 - auto, 4 - default"); | |
2094 | MODULE_PARM_DESC(finject, "enable fault simulation, 0 - off, 1 - on, for debug purpose only"); | |
2095 | ||
2096 | // API adaption layer | |
2097 | static inline void ssd_bio_endio(struct bio *bio, int error) | |
2098 | { | |
2099 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,0)) | |
2100 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(4,13,0)) | |
2101 | bio->bi_error = error; | |
2102 | #else | |
2103 | bio->bi_status = errno_to_blk_status(error); | |
2104 | #endif | |
2105 | bio_endio(bio); | |
2106 | #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)) | |
2107 | bio_endio(bio, error); | |
2108 | #else | |
2109 | bio_endio(bio, bio->bi_size, error); | |
2110 | #endif | |
2111 | } | |
2112 | ||
2113 | static inline int ssd_bio_has_discard(struct bio *bio) | |
2114 | { | |
2115 | #ifndef SSD_TRIM | |
2116 | return 0; | |
2117 | #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(4,8,0)) | |
2118 | return bio_op(bio) == REQ_OP_DISCARD; | |
2119 | #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36)) | |
2120 | return bio->bi_rw & REQ_DISCARD; | |
2121 | #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32)) | |
2122 | return bio_rw_flagged(bio, BIO_RW_DISCARD); | |
2123 | #else | |
2124 | return 0; | |
2125 | #endif | |
2126 | } | |
2127 | ||
2128 | static inline int ssd_bio_has_flush(struct bio *bio) | |
2129 | { | |
2130 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,8,0)) | |
2131 | return bio_op(bio) == REQ_OP_FLUSH; | |
2132 | #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37)) | |
2133 | return bio->bi_rw & REQ_FLUSH; | |
2134 | #else | |
2135 | return 0; | |
2136 | #endif | |
2137 | } | |
2138 | ||
2139 | static inline int ssd_bio_has_barrier_or_fua(struct bio * bio) | |
2140 | { | |
2141 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,8,0)) | |
2142 | return bio->bi_opf & REQ_FUA; | |
2143 | #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37)) | |
2144 | return bio->bi_rw & REQ_FUA; | |
2145 | #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36)) | |
2146 | return bio->bi_rw & REQ_HARDBARRIER; | |
2147 | #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32)) | |
2148 | return bio_rw_flagged(bio, BIO_RW_BARRIER); | |
2149 | #else | |
2150 | return bio_barrier(bio); | |
2151 | #endif | |
2152 | } | |
2153 | ||
2154 | #ifndef MODULE | |
2155 | static int __init ssd_drv_mode(char *str) | |
2156 | { | |
2157 | mode = (int)simple_strtoul(str, NULL, 0); | |
2158 | ||
2159 | return 1; | |
2160 | } | |
2161 | ||
2162 | static int __init ssd_status_mask(char *str) | |
2163 | { | |
2164 | status_mask = (int)simple_strtoul(str, NULL, 16); | |
2165 | ||
2166 | return 1; | |
2167 | } | |
2168 | ||
2169 | static int __init ssd_int_mode(char *str) | |
2170 | { | |
2171 | int_mode = (int)simple_strtoul(str, NULL, 0); | |
2172 | ||
2173 | return 1; | |
2174 | } | |
2175 | ||
2176 | static int __init ssd_threaded_irq(char *str) | |
2177 | { | |
2178 | threaded_irq = (int)simple_strtoul(str, NULL, 0); | |
2179 | ||
2180 | return 1; | |
2181 | } | |
2182 | ||
2183 | static int __init ssd_log_level(char *str) | |
2184 | { | |
2185 | log_level = (int)simple_strtoul(str, NULL, 0); | |
2186 | ||
2187 | return 1; | |
2188 | } | |
2189 | ||
2190 | static int __init ssd_ot_protect(char *str) | |
2191 | { | |
2192 | ot_protect = (int)simple_strtoul(str, NULL, 0); | |
2193 | ||
2194 | return 1; | |
2195 | } | |
2196 | ||
2197 | static int __init ssd_wmode(char *str) | |
2198 | { | |
2199 | wmode = (int)simple_strtoul(str, NULL, 0); | |
2200 | ||
2201 | return 1; | |
2202 | } | |
2203 | ||
2204 | static int __init ssd_finject(char *str) | |
2205 | { | |
2206 | finject = (int)simple_strtoul(str, NULL, 0); | |
2207 | ||
2208 | return 1; | |
2209 | } | |
2210 | ||
2211 | __setup(MODULE_NAME"_mode=", ssd_drv_mode); | |
2212 | __setup(MODULE_NAME"_status_mask=", ssd_status_mask); | |
2213 | __setup(MODULE_NAME"_int_mode=", ssd_int_mode); | |
2214 | __setup(MODULE_NAME"_threaded_irq=", ssd_threaded_irq); | |
2215 | __setup(MODULE_NAME"_log_level=", ssd_log_level); | |
2216 | __setup(MODULE_NAME"_ot_protect=", ssd_ot_protect); | |
2217 | __setup(MODULE_NAME"_wmode=", ssd_wmode); | |
2218 | __setup(MODULE_NAME"_finject=", ssd_finject); | |
2219 | #endif | |
2220 | ||
2221 | ||
2222 | #ifdef CONFIG_PROC_FS | |
2223 | #include <linux/proc_fs.h> | |
2224 | #include <asm/uaccess.h> | |
2225 | ||
2226 | #define SSD_PROC_DIR MODULE_NAME | |
2227 | #define SSD_PROC_INFO "info" | |
2228 | ||
2229 | static struct proc_dir_entry *ssd_proc_dir = NULL; | |
2230 | static struct proc_dir_entry *ssd_proc_info = NULL; | |
2231 | ||
2232 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,2,0)) | |
2233 | static int ssd_proc_read(char *page, char **start, | |
2234 | off_t off, int count, int *eof, void *data) | |
2235 | { | |
2236 | struct ssd_device *dev = NULL; | |
2237 | struct ssd_device *n = NULL; | |
2238 | uint64_t size; | |
2239 | int idx; | |
2240 | int len = 0; | |
2241 | //char type; //xx | |
2242 | ||
2243 | if (ssd_exiting || off != 0) { | |
2244 | return 0; | |
2245 | } | |
2246 | ||
2247 | len += snprintf((page + len), (count - len), "Driver Version:\t%s\n", DRIVER_VERSION); | |
2248 | ||
2249 | list_for_each_entry_safe(dev, n, &ssd_list, list) { | |
2250 | idx = dev->idx + 1; | |
2251 | size = dev->hw_info.size ; | |
2252 | do_div(size, 1000000000); | |
2253 | ||
2254 | len += snprintf((page + len), (count - len), "\n"); | |
2255 | ||
2256 | len += snprintf((page + len), (count - len), "HIO %d Size:\t%uGB\n", idx, (uint32_t)size); | |
2257 | ||
2258 | len += snprintf((page + len), (count - len), "HIO %d Bridge FW VER:\t%03X\n", idx, dev->hw_info.bridge_ver); | |
2259 | if (dev->hw_info.ctrl_ver != 0) { | |
2260 | len += snprintf((page + len), (count - len), "HIO %d Controller FW VER:\t%03X\n", idx, dev->hw_info.ctrl_ver); | |
2261 | } | |
2262 | ||
2263 | len += snprintf((page + len), (count - len), "HIO %d PCB VER:\t.%c\n", idx, dev->hw_info.pcb_ver); | |
2264 | ||
2265 | if (dev->hw_info.upper_pcb_ver >= 'A') { | |
2266 | len += snprintf((page + len), (count - len), "HIO %d Upper PCB VER:\t.%c\n", idx, dev->hw_info.upper_pcb_ver); | |
2267 | } | |
2268 | ||
2269 | len += snprintf((page + len), (count - len), "HIO %d Device:\t%s\n", idx, dev->name); | |
2270 | } | |
2271 | ||
2272 | *eof = 1; | |
2273 | return len; | |
2274 | } | |
2275 | ||
2276 | #else | |
2277 | ||
2278 | static int ssd_proc_show(struct seq_file *m, void *v) | |
2279 | { | |
2280 | struct ssd_device *dev = NULL; | |
2281 | struct ssd_device *n = NULL; | |
2282 | uint64_t size; | |
2283 | int idx; | |
2284 | ||
2285 | if (ssd_exiting) { | |
2286 | return 0; | |
2287 | } | |
2288 | ||
2289 | seq_printf(m, "Driver Version:\t%s\n", DRIVER_VERSION); | |
2290 | ||
2291 | list_for_each_entry_safe(dev, n, &ssd_list, list) { | |
2292 | idx = dev->idx + 1; | |
2293 | size = dev->hw_info.size ; | |
2294 | do_div(size, 1000000000); | |
2295 | ||
2296 | seq_printf(m, "\n"); | |
2297 | ||
2298 | seq_printf(m, "HIO %d Size:\t%uGB\n", idx, (uint32_t)size); | |
2299 | ||
2300 | seq_printf(m, "HIO %d Bridge FW VER:\t%03X\n", idx, dev->hw_info.bridge_ver); | |
2301 | if (dev->hw_info.ctrl_ver != 0) { | |
2302 | seq_printf(m, "HIO %d Controller FW VER:\t%03X\n", idx, dev->hw_info.ctrl_ver); | |
2303 | } | |
2304 | ||
2305 | seq_printf(m, "HIO %d PCB VER:\t.%c\n", idx, dev->hw_info.pcb_ver); | |
2306 | ||
2307 | if (dev->hw_info.upper_pcb_ver >= 'A') { | |
2308 | seq_printf(m, "HIO %d Upper PCB VER:\t.%c\n", idx, dev->hw_info.upper_pcb_ver); | |
2309 | } | |
2310 | ||
2311 | seq_printf(m, "HIO %d Device:\t%s\n", idx, dev->name); | |
2312 | } | |
2313 | ||
2314 | return 0; | |
2315 | } | |
2316 | ||
2317 | static int ssd_proc_open(struct inode *inode, struct file *file) | |
2318 | { | |
2319 | #if (LINUX_VERSION_CODE <= KERNEL_VERSION(3,9,0)) | |
2320 | return single_open(file, ssd_proc_show, PDE(inode)->data); | |
2321 | #else | |
2322 | return single_open(file, ssd_proc_show, PDE_DATA(inode)); | |
2323 | #endif | |
2324 | } | |
2325 | ||
2326 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(5,6,0)) | |
2327 | static const struct file_operations ssd_proc_fops = { | |
2328 | .open = ssd_proc_open, | |
2329 | .read = seq_read, | |
2330 | .llseek = seq_lseek, | |
2331 | .release = single_release, | |
2332 | }; | |
2333 | #else | |
2334 | static const struct proc_ops ssd_proc_fops = { | |
2335 | .proc_open = ssd_proc_open, | |
2336 | .proc_read = seq_read, | |
2337 | .proc_lseek = seq_lseek, | |
2338 | .proc_release = single_release, | |
2339 | }; | |
2340 | #endif | |
2341 | #endif | |
2342 | ||
2343 | ||
2344 | static void ssd_cleanup_proc(void) | |
2345 | { | |
2346 | if (ssd_proc_info) { | |
2347 | remove_proc_entry(SSD_PROC_INFO, ssd_proc_dir); | |
2348 | ssd_proc_info = NULL; | |
2349 | } | |
2350 | if (ssd_proc_dir) { | |
2351 | remove_proc_entry(SSD_PROC_DIR, NULL); | |
2352 | ssd_proc_dir = NULL; | |
2353 | } | |
2354 | } | |
2355 | static int ssd_init_proc(void) | |
2356 | { | |
2357 | ssd_proc_dir = proc_mkdir(SSD_PROC_DIR, NULL); | |
2358 | if (!ssd_proc_dir) | |
2359 | goto out_proc_mkdir; | |
2360 | ||
2361 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,2,0)) | |
2362 | ssd_proc_info = create_proc_entry(SSD_PROC_INFO, S_IFREG | S_IRUGO | S_IWUSR, ssd_proc_dir); | |
2363 | if (!ssd_proc_info) | |
2364 | goto out_create_proc_entry; | |
2365 | ||
2366 | ssd_proc_info->read_proc = ssd_proc_read; | |
2367 | ||
2368 | /* kernel bug */ | |
2369 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30)) | |
2370 | ssd_proc_info->owner = THIS_MODULE; | |
2371 | #endif | |
2372 | #else | |
2373 | ssd_proc_info = proc_create(SSD_PROC_INFO, 0600, ssd_proc_dir, &ssd_proc_fops); | |
2374 | if (!ssd_proc_info) | |
2375 | goto out_create_proc_entry; | |
2376 | #endif | |
2377 | ||
2378 | return 0; | |
2379 | ||
2380 | out_create_proc_entry: | |
2381 | remove_proc_entry(SSD_PROC_DIR, NULL); | |
2382 | out_proc_mkdir: | |
2383 | return -ENOMEM; | |
2384 | } | |
2385 | ||
2386 | #else | |
2387 | static void ssd_cleanup_proc(void) | |
2388 | { | |
2389 | return; | |
2390 | } | |
2391 | static int ssd_init_proc(void) | |
2392 | { | |
2393 | return 0; | |
2394 | } | |
2395 | #endif /* CONFIG_PROC_FS */ | |
2396 | ||
2397 | /* sysfs */ | |
2398 | static void ssd_unregister_sysfs(struct ssd_device *dev) | |
2399 | { | |
2400 | return; | |
2401 | } | |
2402 | ||
2403 | static int ssd_register_sysfs(struct ssd_device *dev) | |
2404 | { | |
2405 | return 0; | |
2406 | } | |
2407 | ||
2408 | static void ssd_cleanup_sysfs(void) | |
2409 | { | |
2410 | return; | |
2411 | } | |
2412 | ||
2413 | static int ssd_init_sysfs(void) | |
2414 | { | |
2415 | return 0; | |
2416 | } | |
2417 | ||
2418 | static inline void ssd_put_index(int slave, int index) | |
2419 | { | |
2420 | unsigned long *index_bits = ssd_index_bits; | |
2421 | ||
2422 | if (slave) { | |
2423 | index_bits = ssd_index_bits_sl; | |
2424 | } | |
2425 | ||
2426 | if (test_and_clear_bit(index, index_bits)) { | |
2427 | atomic_dec(&ssd_nr); | |
2428 | } | |
2429 | } | |
2430 | ||
2431 | static inline int ssd_get_index(int slave) | |
2432 | { | |
2433 | unsigned long *index_bits = ssd_index_bits; | |
2434 | int index; | |
2435 | ||
2436 | if (slave) { | |
2437 | index_bits = ssd_index_bits_sl; | |
2438 | } | |
2439 | ||
2440 | find_index: | |
2441 | if ((index = find_first_zero_bit(index_bits, SSD_MAX_DEV)) >= SSD_MAX_DEV) { | |
2442 | return -1; | |
2443 | } | |
2444 | ||
2445 | if (test_and_set_bit(index, index_bits)) { | |
2446 | goto find_index; | |
2447 | } | |
2448 | ||
2449 | atomic_inc(&ssd_nr); | |
2450 | ||
2451 | return index; | |
2452 | } | |
2453 | ||
2454 | static void ssd_cleanup_index(void) | |
2455 | { | |
2456 | return; | |
2457 | } | |
2458 | ||
2459 | static int ssd_init_index(void) | |
2460 | { | |
2461 | INIT_LIST_HEAD(&ssd_list); | |
2462 | atomic_set(&ssd_nr, 0); | |
2463 | memset(ssd_index_bits, 0, sizeof(ssd_index_bits)); | |
2464 | memset(ssd_index_bits_sl, 0, sizeof(ssd_index_bits_sl)); | |
2465 | ||
2466 | return 0; | |
2467 | } | |
2468 | ||
2469 | static void ssd_set_dev_name(char *name, size_t size, int idx) | |
2470 | { | |
2471 | if(idx < SSD_ALPHABET_NUM) { | |
2472 | snprintf(name, size, "%c", 'a'+idx); | |
2473 | } else { | |
2474 | idx -= SSD_ALPHABET_NUM; | |
2475 | snprintf(name, size, "%c%c", 'a'+(idx/SSD_ALPHABET_NUM), 'a'+(idx%SSD_ALPHABET_NUM)); | |
2476 | } | |
2477 | } | |
2478 | ||
2479 | /* pci register r&w */ | |
2480 | static inline void ssd_reg_write(void *addr, uint64_t val) | |
2481 | { | |
2482 | iowrite32((uint32_t)val, addr); | |
2483 | iowrite32((uint32_t)(val >> 32), addr + 4); | |
2484 | wmb(); | |
2485 | } | |
2486 | ||
2487 | static inline uint64_t ssd_reg_read(void *addr) | |
2488 | { | |
2489 | uint64_t val; | |
2490 | uint32_t val_lo, val_hi; | |
2491 | ||
2492 | val_lo = ioread32(addr); | |
2493 | val_hi = ioread32(addr + 4); | |
2494 | ||
2495 | rmb(); | |
2496 | val = val_lo | ((uint64_t)val_hi << 32); | |
2497 | ||
2498 | return val; | |
2499 | } | |
2500 | ||
2501 | ||
2502 | #define ssd_reg32_write(addr, val) writel(val, addr) | |
2503 | #define ssd_reg32_read(addr) readl(addr) | |
2504 | ||
2505 | /* alarm led */ | |
2506 | static void ssd_clear_alarm(struct ssd_device *dev) | |
2507 | { | |
2508 | uint32_t val; | |
2509 | ||
2510 | if (dev->protocol_info.ver <= SSD_PROTOCOL_V3) { | |
2511 | return; | |
2512 | } | |
2513 | ||
2514 | val = ssd_reg32_read(dev->ctrlp + SSD_LED_REG); | |
2515 | ||
2516 | /* firmware control */ | |
2517 | val &= ~0x2; | |
2518 | ||
2519 | ssd_reg32_write(dev->ctrlp + SSD_LED_REG, val); | |
2520 | } | |
2521 | ||
2522 | static void ssd_set_alarm(struct ssd_device *dev) | |
2523 | { | |
2524 | uint32_t val; | |
2525 | ||
2526 | if (dev->protocol_info.ver <= SSD_PROTOCOL_V3) { | |
2527 | return; | |
2528 | } | |
2529 | ||
2530 | val = ssd_reg32_read(dev->ctrlp + SSD_LED_REG); | |
2531 | ||
2532 | /* light up */ | |
2533 | val &= ~0x1; | |
2534 | /* software control */ | |
2535 | val |= 0x2; | |
2536 | ||
2537 | ssd_reg32_write(dev->ctrlp + SSD_LED_REG, val); | |
2538 | } | |
2539 | ||
2540 | #define u32_swap(x) \ | |
2541 | ((uint32_t)( \ | |
2542 | (((uint32_t)(x) & (uint32_t)0x000000ffUL) << 24) | \ | |
2543 | (((uint32_t)(x) & (uint32_t)0x0000ff00UL) << 8) | \ | |
2544 | (((uint32_t)(x) & (uint32_t)0x00ff0000UL) >> 8) | \ | |
2545 | (((uint32_t)(x) & (uint32_t)0xff000000UL) >> 24))) | |
2546 | ||
2547 | #define u16_swap(x) \ | |
2548 | ((uint16_t)( \ | |
2549 | (((uint16_t)(x) & (uint16_t)0x00ff) << 8) | \ | |
2550 | (((uint16_t)(x) & (uint16_t)0xff00) >> 8) )) | |
2551 | ||
2552 | ||
2553 | #if 0 | |
2554 | /* No lock, for init only*/ | |
2555 | static int ssd_spi_read_id(struct ssd_device *dev, uint32_t *id) | |
2556 | { | |
2557 | uint32_t val; | |
2558 | unsigned long st; | |
2559 | int ret = 0; | |
2560 | ||
2561 | if (!dev || !id) { | |
2562 | return -EINVAL; | |
2563 | } | |
2564 | ||
2565 | ssd_reg32_write(dev->ctrlp + SSD_SPI_REG_CMD, SSD_SPI_CMD_READ_ID); | |
2566 | ||
2567 | val = ssd_reg32_read(dev->ctrlp + SSD_SPI_REG_READY); | |
2568 | val = ssd_reg32_read(dev->ctrlp + SSD_SPI_REG_READY); | |
2569 | val = ssd_reg32_read(dev->ctrlp + SSD_SPI_REG_READY); | |
2570 | val = ssd_reg32_read(dev->ctrlp + SSD_SPI_REG_READY); | |
2571 | ||
2572 | st = jiffies; | |
2573 | for (;;) { | |
2574 | val = ssd_reg32_read(dev->ctrlp + SSD_SPI_REG_READY); | |
2575 | if (val == 0x1000000) { | |
2576 | break; | |
2577 | } | |
2578 | ||
2579 | if (time_after(jiffies, (st + SSD_SPI_TIMEOUT))) { | |
2580 | ret = -ETIMEDOUT; | |
2581 | goto out; | |
2582 | } | |
2583 | cond_resched(); | |
2584 | } | |
2585 | ||
2586 | val = ssd_reg32_read(dev->ctrlp + SSD_SPI_REG_ID); | |
2587 | *id = val; | |
2588 | ||
2589 | out: | |
2590 | return ret; | |
2591 | } | |
2592 | #endif | |
2593 | ||
2594 | /* spi access */ | |
2595 | static int ssd_init_spi(struct ssd_device *dev) | |
2596 | { | |
2597 | uint32_t val; | |
2598 | unsigned long st; | |
2599 | int ret = 0; | |
2600 | ||
2601 | mutex_lock(&dev->spi_mutex); | |
2602 | st = jiffies; | |
2603 | for(;;) { | |
2604 | ssd_reg32_write(dev->ctrlp + SSD_SPI_REG_CMD, SSD_SPI_CMD_READ_STATUS); | |
2605 | ||
2606 | do { | |
2607 | val = ssd_reg32_read(dev->ctrlp + SSD_SPI_REG_READY); | |
2608 | ||
2609 | if (time_after(jiffies, (st + SSD_SPI_TIMEOUT))) { | |
2610 | ret = -ETIMEDOUT; | |
2611 | goto out; | |
2612 | } | |
2613 | cond_resched(); | |
2614 | } while (val != 0x1000000); | |
2615 | ||
2616 | val = ssd_reg32_read(dev->ctrlp + SSD_SPI_REG_STATUS); | |
2617 | if (!(val & 0x1)) { | |
2618 | break; | |
2619 | } | |
2620 | ||
2621 | if (time_after(jiffies, (st + SSD_SPI_TIMEOUT))) { | |
2622 | ret = -ETIMEDOUT; | |
2623 | goto out; | |
2624 | } | |
2625 | cond_resched(); | |
2626 | } | |
2627 | ||
2628 | out: | |
2629 | if (dev->protocol_info.ver >= SSD_PROTOCOL_V3_2) { | |
2630 | if (val & 0x1) { | |
2631 | ssd_reg32_write(dev->ctrlp + SSD_SPI_REG_CMD, SSD_SPI_CMD_CLSR); | |
2632 | } | |
2633 | } | |
2634 | ssd_reg32_write(dev->ctrlp + SSD_SPI_REG_CMD, SSD_SPI_CMD_W_DISABLE); | |
2635 | mutex_unlock(&dev->spi_mutex); | |
2636 | ||
2637 | ret = 0; | |
2638 | ||
2639 | return ret; | |
2640 | } | |
2641 | ||
2642 | static int ssd_spi_page_read(struct ssd_device *dev, void *buf, uint32_t off, uint32_t size) | |
2643 | { | |
2644 | uint32_t val; | |
2645 | uint32_t rlen = 0; | |
2646 | unsigned long st; | |
2647 | int ret = 0; | |
2648 | ||
2649 | if (!dev || !buf) { | |
2650 | return -EINVAL; | |
2651 | } | |
2652 | ||
2653 | if ((off % sizeof(uint32_t)) != 0 || (size % sizeof(uint32_t)) != 0 || size == 0 || | |
2654 | ((uint64_t)off + (uint64_t)size) > dev->rom_info.size || size > dev->rom_info.page_size) { | |
2655 | return -EINVAL; | |
2656 | } | |
2657 | ||
2658 | mutex_lock(&dev->spi_mutex); | |
2659 | while (rlen < size) { | |
2660 | ssd_reg32_write(dev->ctrlp + SSD_SPI_REG_CMD_HI, ((off + rlen) >> 24)); | |
2661 | wmb(); | |
2662 | ssd_reg32_write(dev->ctrlp + SSD_SPI_REG_CMD, (((off + rlen) << 8) | SSD_SPI_CMD_READ)); | |
2663 | ||
2664 | (void)ssd_reg32_read(dev->ctrlp + SSD_SPI_REG_READY); | |
2665 | (void)ssd_reg32_read(dev->ctrlp + SSD_SPI_REG_READY); | |
2666 | (void)ssd_reg32_read(dev->ctrlp + SSD_SPI_REG_READY); | |
2667 | (void)ssd_reg32_read(dev->ctrlp + SSD_SPI_REG_READY); | |
2668 | ||
2669 | st = jiffies; | |
2670 | for (;;) { | |
2671 | val = ssd_reg32_read(dev->ctrlp + SSD_SPI_REG_READY); | |
2672 | if (val == 0x1000000) { | |
2673 | break; | |
2674 | } | |
2675 | ||
2676 | if (time_after(jiffies, (st + SSD_SPI_TIMEOUT))) { | |
2677 | ret = -ETIMEDOUT; | |
2678 | goto out; | |
2679 | } | |
2680 | cond_resched(); | |
2681 | } | |
2682 | ||
2683 | val = ssd_reg32_read(dev->ctrlp + SSD_SPI_REG_RDATA); | |
2684 | *(uint32_t *)(buf + rlen)= u32_swap(val); | |
2685 | ||
2686 | rlen += sizeof(uint32_t); | |
2687 | } | |
2688 | ||
2689 | out: | |
2690 | mutex_unlock(&dev->spi_mutex); | |
2691 | return ret; | |
2692 | } | |
2693 | ||
2694 | static int ssd_spi_page_write(struct ssd_device *dev, void *buf, uint32_t off, uint32_t size) | |
2695 | { | |
2696 | uint32_t val; | |
2697 | uint32_t wlen; | |
2698 | unsigned long st; | |
2699 | int i; | |
2700 | int ret = 0; | |
2701 | ||
2702 | if (!dev || !buf) { | |
2703 | return -EINVAL; | |
2704 | } | |
2705 | ||
2706 | if ((off % sizeof(uint32_t)) != 0 || (size % sizeof(uint32_t)) != 0 || size == 0 || | |
2707 | ((uint64_t)off + (uint64_t)size) > dev->rom_info.size || size > dev->rom_info.page_size || | |
2708 | (off / dev->rom_info.page_size) != ((off + size - 1) / dev->rom_info.page_size)) { | |
2709 | return -EINVAL; | |
2710 | } | |
2711 | ||
2712 | mutex_lock(&dev->spi_mutex); | |
2713 | ||
2714 | ssd_reg32_write(dev->ctrlp + SSD_SPI_REG_CMD, SSD_SPI_CMD_W_ENABLE); | |
2715 | ||
2716 | wlen = size / sizeof(uint32_t); | |
2717 | for (i=0; i<(int)wlen; i++) { | |
2718 | ssd_reg32_write(dev->ctrlp + SSD_SPI_REG_WDATA, u32_swap(*((uint32_t *)buf + i))); | |
2719 | } | |
2720 | ||
2721 | wmb(); | |
2722 | ssd_reg32_write(dev->ctrlp + SSD_SPI_REG_CMD_HI, (off >> 24)); | |
2723 | wmb(); | |
2724 | ssd_reg32_write(dev->ctrlp + SSD_SPI_REG_CMD, ((off << 8) | SSD_SPI_CMD_PROGRAM)); | |
2725 | ||
2726 | udelay(1); | |
2727 | ||
2728 | st = jiffies; | |
2729 | for (;;) { | |
2730 | ssd_reg32_write(dev->ctrlp + SSD_SPI_REG_CMD, SSD_SPI_CMD_READ_STATUS); | |
2731 | do { | |
2732 | val = ssd_reg32_read(dev->ctrlp + SSD_SPI_REG_READY); | |
2733 | ||
2734 | if (time_after(jiffies, (st + SSD_SPI_TIMEOUT))) { | |
2735 | ret = -ETIMEDOUT; | |
2736 | goto out; | |
2737 | } | |
2738 | cond_resched(); | |
2739 | } while (val != 0x1000000); | |
2740 | ||
2741 | val = ssd_reg32_read(dev->ctrlp + SSD_SPI_REG_STATUS); | |
2742 | if (!(val & 0x1)) { | |
2743 | break; | |
2744 | } | |
2745 | ||
2746 | if (time_after(jiffies, (st + SSD_SPI_TIMEOUT))) { | |
2747 | ret = -ETIMEDOUT; | |
2748 | goto out; | |
2749 | } | |
2750 | cond_resched(); | |
2751 | } | |
2752 | ||
2753 | if (dev->protocol_info.ver >= SSD_PROTOCOL_V3_2) { | |
2754 | if ((val >> 6) & 0x1) { | |
2755 | ret = -EIO; | |
2756 | goto out; | |
2757 | } | |
2758 | } | |
2759 | ||
2760 | out: | |
2761 | if (dev->protocol_info.ver >= SSD_PROTOCOL_V3_2) { | |
2762 | if (val & 0x1) { | |
2763 | ssd_reg32_write(dev->ctrlp + SSD_SPI_REG_CMD, SSD_SPI_CMD_CLSR); | |
2764 | } | |
2765 | } | |
2766 | ssd_reg32_write(dev->ctrlp + SSD_SPI_REG_CMD, SSD_SPI_CMD_W_DISABLE); | |
2767 | ||
2768 | mutex_unlock(&dev->spi_mutex); | |
2769 | ||
2770 | return ret; | |
2771 | } | |
2772 | ||
2773 | static int ssd_spi_block_erase(struct ssd_device *dev, uint32_t off) | |
2774 | { | |
2775 | uint32_t val; | |
2776 | unsigned long st; | |
2777 | int ret = 0; | |
2778 | ||
2779 | if (!dev) { | |
2780 | return -EINVAL; | |
2781 | } | |
2782 | ||
2783 | if ((off % dev->rom_info.block_size) != 0 || off >= dev->rom_info.size) { | |
2784 | return -EINVAL; | |
2785 | } | |
2786 | ||
2787 | mutex_lock(&dev->spi_mutex); | |
2788 | ||
2789 | ssd_reg32_write(dev->ctrlp + SSD_SPI_REG_CMD, SSD_SPI_CMD_W_ENABLE); | |
2790 | ssd_reg32_write(dev->ctrlp + SSD_SPI_REG_CMD, SSD_SPI_CMD_W_ENABLE); | |
2791 | ||
2792 | wmb(); | |
2793 | ssd_reg32_write(dev->ctrlp + SSD_SPI_REG_CMD_HI, (off >> 24)); | |
2794 | wmb(); | |
2795 | ssd_reg32_write(dev->ctrlp + SSD_SPI_REG_CMD, ((off << 8) | SSD_SPI_CMD_ERASE)); | |
2796 | ||
2797 | st = jiffies; | |
2798 | for (;;) { | |
2799 | ssd_reg32_write(dev->ctrlp + SSD_SPI_REG_CMD, SSD_SPI_CMD_READ_STATUS); | |
2800 | ||
2801 | do { | |
2802 | val = ssd_reg32_read(dev->ctrlp + SSD_SPI_REG_READY); | |
2803 | ||
2804 | if (time_after(jiffies, (st + SSD_SPI_TIMEOUT))) { | |
2805 | ret = -ETIMEDOUT; | |
2806 | goto out; | |
2807 | } | |
2808 | cond_resched(); | |
2809 | } while (val != 0x1000000); | |
2810 | ||
2811 | val = ssd_reg32_read(dev->ctrlp + SSD_SPI_REG_STATUS); | |
2812 | if (!(val & 0x1)) { | |
2813 | break; | |
2814 | } | |
2815 | ||
2816 | if (time_after(jiffies, (st + SSD_SPI_TIMEOUT))) { | |
2817 | ret = -ETIMEDOUT; | |
2818 | goto out; | |
2819 | } | |
2820 | cond_resched(); | |
2821 | } | |
2822 | ||
2823 | if (dev->protocol_info.ver >= SSD_PROTOCOL_V3_2) { | |
2824 | if ((val >> 5) & 0x1) { | |
2825 | ret = -EIO; | |
2826 | goto out; | |
2827 | } | |
2828 | } | |
2829 | ||
2830 | out: | |
2831 | if (dev->protocol_info.ver >= SSD_PROTOCOL_V3_2) { | |
2832 | if (val & 0x1) { | |
2833 | ssd_reg32_write(dev->ctrlp + SSD_SPI_REG_CMD, SSD_SPI_CMD_CLSR); | |
2834 | } | |
2835 | } | |
2836 | ssd_reg32_write(dev->ctrlp + SSD_SPI_REG_CMD, SSD_SPI_CMD_W_DISABLE); | |
2837 | ||
2838 | mutex_unlock(&dev->spi_mutex); | |
2839 | ||
2840 | return ret; | |
2841 | } | |
2842 | ||
2843 | static int ssd_spi_read(struct ssd_device *dev, void *buf, uint32_t off, uint32_t size) | |
2844 | { | |
2845 | uint32_t len = 0; | |
2846 | uint32_t roff; | |
2847 | uint32_t rsize; | |
2848 | int ret = 0; | |
2849 | ||
2850 | if (!dev || !buf) { | |
2851 | return -EINVAL; | |
2852 | } | |
2853 | ||
2854 | if ((off % sizeof(uint32_t)) != 0 || (size % sizeof(uint32_t)) != 0 || size == 0 || | |
2855 | ((uint64_t)off + (uint64_t)size) > dev->rom_info.size) { | |
2856 | return -EINVAL; | |
2857 | } | |
2858 | ||
2859 | while (len < size) { | |
2860 | roff = (off + len) % dev->rom_info.page_size; | |
2861 | rsize = dev->rom_info.page_size - roff; | |
2862 | if ((size - len) < rsize) { | |
2863 | rsize = (size - len); | |
2864 | } | |
2865 | roff = off + len; | |
2866 | ||
2867 | ret = ssd_spi_page_read(dev, (buf + len), roff, rsize); | |
2868 | if (ret) { | |
2869 | goto out; | |
2870 | } | |
2871 | ||
2872 | len += rsize; | |
2873 | ||
2874 | cond_resched(); | |
2875 | } | |
2876 | ||
2877 | out: | |
2878 | return ret; | |
2879 | } | |
2880 | ||
2881 | static int ssd_spi_write(struct ssd_device *dev, void *buf, uint32_t off, uint32_t size) | |
2882 | { | |
2883 | uint32_t len = 0; | |
2884 | uint32_t woff; | |
2885 | uint32_t wsize; | |
2886 | int ret = 0; | |
2887 | ||
2888 | if (!dev || !buf) { | |
2889 | return -EINVAL; | |
2890 | } | |
2891 | ||
2892 | if ((off % sizeof(uint32_t)) != 0 || (size % sizeof(uint32_t)) != 0 || size == 0 || | |
2893 | ((uint64_t)off + (uint64_t)size) > dev->rom_info.size) { | |
2894 | return -EINVAL; | |
2895 | } | |
2896 | ||
2897 | while (len < size) { | |
2898 | woff = (off + len) % dev->rom_info.page_size; | |
2899 | wsize = dev->rom_info.page_size - woff; | |
2900 | if ((size - len) < wsize) { | |
2901 | wsize = (size - len); | |
2902 | } | |
2903 | woff = off + len; | |
2904 | ||
2905 | ret = ssd_spi_page_write(dev, (buf + len), woff, wsize); | |
2906 | if (ret) { | |
2907 | goto out; | |
2908 | } | |
2909 | ||
2910 | len += wsize; | |
2911 | ||
2912 | cond_resched(); | |
2913 | } | |
2914 | ||
2915 | out: | |
2916 | return ret; | |
2917 | } | |
2918 | ||
2919 | static int ssd_spi_erase(struct ssd_device *dev, uint32_t off, uint32_t size) | |
2920 | { | |
2921 | uint32_t len = 0; | |
2922 | uint32_t eoff; | |
2923 | int ret = 0; | |
2924 | ||
2925 | if (!dev) { | |
2926 | return -EINVAL; | |
2927 | } | |
2928 | ||
2929 | if (size == 0 || ((uint64_t)off + (uint64_t)size) > dev->rom_info.size || | |
2930 | (off % dev->rom_info.block_size) != 0 || (size % dev->rom_info.block_size) != 0) { | |
2931 | return -EINVAL; | |
2932 | } | |
2933 | ||
2934 | while (len < size) { | |
2935 | eoff = (off + len); | |
2936 | ||
2937 | ret = ssd_spi_block_erase(dev, eoff); | |
2938 | if (ret) { | |
2939 | goto out; | |
2940 | } | |
2941 | ||
2942 | len += dev->rom_info.block_size; | |
2943 | ||
2944 | cond_resched(); | |
2945 | } | |
2946 | ||
2947 | out: | |
2948 | return ret; | |
2949 | } | |
2950 | ||
2951 | /* i2c access */ | |
2952 | static uint32_t __ssd_i2c_reg32_read(void *addr) | |
2953 | { | |
2954 | return ssd_reg32_read(addr); | |
2955 | } | |
2956 | ||
2957 | static void __ssd_i2c_reg32_write(void *addr, uint32_t val) | |
2958 | { | |
2959 | ssd_reg32_write(addr, val); | |
2960 | ssd_reg32_read(addr); | |
2961 | } | |
2962 | ||
2963 | static int __ssd_i2c_clear(struct ssd_device *dev, uint8_t saddr) | |
2964 | { | |
2965 | ssd_i2c_ctrl_t ctrl; | |
2966 | ssd_i2c_data_t data; | |
2967 | uint8_t status = 0; | |
2968 | int nr_data = 0; | |
2969 | unsigned long st; | |
2970 | int ret = 0; | |
2971 | ||
2972 | check_status: | |
2973 | ctrl.bits.wdata = 0; | |
2974 | ctrl.bits.addr = SSD_I2C_STATUS_REG; | |
2975 | ctrl.bits.rw = SSD_I2C_CTRL_READ; | |
2976 | __ssd_i2c_reg32_write(dev->ctrlp + SSD_I2C_CTRL_REG, ctrl.val); | |
2977 | ||
2978 | st = jiffies; | |
2979 | for (;;) { | |
2980 | data.val = __ssd_i2c_reg32_read(dev->ctrlp + SSD_I2C_RDATA_REG); | |
2981 | if (data.bits.valid == 0) { | |
2982 | break; | |
2983 | } | |
2984 | ||
2985 | /* retry */ | |
2986 | if (time_after(jiffies, (st + SSD_I2C_TIMEOUT))) { | |
2987 | ret = -ETIMEDOUT; | |
2988 | goto out; | |
2989 | } | |
2990 | cond_resched(); | |
2991 | } | |
2992 | status = data.bits.rdata; | |
2993 | ||
2994 | if (!(status & 0x4)) { | |
2995 | /* clear read fifo data */ | |
2996 | ctrl.bits.wdata = 0; | |
2997 | ctrl.bits.addr = SSD_I2C_DATA_REG; | |
2998 | ctrl.bits.rw = SSD_I2C_CTRL_READ; | |
2999 | __ssd_i2c_reg32_write(dev->ctrlp + SSD_I2C_CTRL_REG, ctrl.val); | |
3000 | ||
3001 | st = jiffies; | |
3002 | for (;;) { | |
3003 | data.val = __ssd_i2c_reg32_read(dev->ctrlp + SSD_I2C_RDATA_REG); | |
3004 | if (data.bits.valid == 0) { | |
3005 | break; | |
3006 | } | |
3007 | ||
3008 | /* retry */ | |
3009 | if (time_after(jiffies, (st + SSD_I2C_TIMEOUT))) { | |
3010 | ret = -ETIMEDOUT; | |
3011 | goto out; | |
3012 | } | |
3013 | cond_resched(); | |
3014 | } | |
3015 | ||
3016 | nr_data++; | |
3017 | if (nr_data <= SSD_I2C_MAX_DATA) { | |
3018 | goto check_status; | |
3019 | } else { | |
3020 | goto out_reset; | |
3021 | } | |
3022 | } | |
3023 | ||
3024 | if (status & 0x3) { | |
3025 | /* clear int */ | |
3026 | ctrl.bits.wdata = 0x04; | |
3027 | ctrl.bits.addr = SSD_I2C_CMD_REG; | |
3028 | ctrl.bits.rw = SSD_I2C_CTRL_WRITE; | |
3029 | __ssd_i2c_reg32_write(dev->ctrlp + SSD_I2C_CTRL_REG, ctrl.val); | |
3030 | } | |
3031 | ||
3032 | if (!(status & 0x8)) { | |
3033 | out_reset: | |
3034 | /* reset i2c controller */ | |
3035 | ctrl.bits.wdata = 0x0; | |
3036 | ctrl.bits.addr = SSD_I2C_RESET_REG; | |
3037 | ctrl.bits.rw = SSD_I2C_CTRL_WRITE; | |
3038 | __ssd_i2c_reg32_write(dev->ctrlp + SSD_I2C_CTRL_REG, ctrl.val); | |
3039 | } | |
3040 | ||
3041 | out: | |
3042 | return ret; | |
3043 | } | |
3044 | ||
3045 | static int ssd_i2c_write(struct ssd_device *dev, uint8_t saddr, uint8_t size, uint8_t *buf) | |
3046 | { | |
3047 | ssd_i2c_ctrl_t ctrl; | |
3048 | ssd_i2c_data_t data; | |
3049 | uint8_t off = 0; | |
3050 | uint8_t status = 0; | |
3051 | unsigned long st; | |
3052 | int ret = 0; | |
3053 | ||
3054 | mutex_lock(&dev->i2c_mutex); | |
3055 | ||
3056 | ctrl.val = 0; | |
3057 | ||
3058 | /* slave addr */ | |
3059 | ctrl.bits.wdata = saddr; | |
3060 | ctrl.bits.addr = SSD_I2C_SADDR_REG; | |
3061 | ctrl.bits.rw = SSD_I2C_CTRL_WRITE; | |
3062 | __ssd_i2c_reg32_write(dev->ctrlp + SSD_I2C_CTRL_REG, ctrl.val); | |
3063 | ||
3064 | /* data */ | |
3065 | while (off < size) { | |
3066 | ctrl.bits.wdata = buf[off]; | |
3067 | ctrl.bits.addr = SSD_I2C_DATA_REG; | |
3068 | ctrl.bits.rw = SSD_I2C_CTRL_WRITE; | |
3069 | __ssd_i2c_reg32_write(dev->ctrlp + SSD_I2C_CTRL_REG, ctrl.val); | |
3070 | ||
3071 | off++; | |
3072 | } | |
3073 | ||
3074 | /* write */ | |
3075 | ctrl.bits.wdata = 0x01; | |
3076 | ctrl.bits.addr = SSD_I2C_CMD_REG; | |
3077 | ctrl.bits.rw = SSD_I2C_CTRL_WRITE; | |
3078 | __ssd_i2c_reg32_write(dev->ctrlp + SSD_I2C_CTRL_REG, ctrl.val); | |
3079 | ||
3080 | /* wait */ | |
3081 | st = jiffies; | |
3082 | for (;;) { | |
3083 | ctrl.bits.wdata = 0; | |
3084 | ctrl.bits.addr = SSD_I2C_STATUS_REG; | |
3085 | ctrl.bits.rw = SSD_I2C_CTRL_READ; | |
3086 | __ssd_i2c_reg32_write(dev->ctrlp + SSD_I2C_CTRL_REG, ctrl.val); | |
3087 | ||
3088 | for (;;) { | |
3089 | data.val = __ssd_i2c_reg32_read(dev->ctrlp + SSD_I2C_RDATA_REG); | |
3090 | if (data.bits.valid == 0) { | |
3091 | break; | |
3092 | } | |
3093 | ||
3094 | /* retry */ | |
3095 | if (time_after(jiffies, (st + SSD_I2C_TIMEOUT))) { | |
3096 | ret = -ETIMEDOUT; | |
3097 | goto out_clear; | |
3098 | } | |
3099 | cond_resched(); | |
3100 | } | |
3101 | ||
3102 | status = data.bits.rdata; | |
3103 | if (status & 0x1) { | |
3104 | break; | |
3105 | } | |
3106 | ||
3107 | if (time_after(jiffies, (st + SSD_I2C_TIMEOUT))) { | |
3108 | ret = -ETIMEDOUT; | |
3109 | goto out_clear; | |
3110 | } | |
3111 | cond_resched(); | |
3112 | } | |
3113 | ||
3114 | if (!(status & 0x1)) { | |
3115 | ret = -1; | |
3116 | goto out_clear; | |
3117 | } | |
3118 | ||
3119 | /* busy ? */ | |
3120 | if (status & 0x20) { | |
3121 | ret = -2; | |
3122 | goto out_clear; | |
3123 | } | |
3124 | ||
3125 | /* ack ? */ | |
3126 | if (status & 0x10) { | |
3127 | ret = -3; | |
3128 | goto out_clear; | |
3129 | } | |
3130 | ||
3131 | /* clear */ | |
3132 | out_clear: | |
3133 | if (__ssd_i2c_clear(dev, saddr)) { | |
3134 | if (!ret) ret = -4; | |
3135 | } | |
3136 | ||
3137 | mutex_unlock(&dev->i2c_mutex); | |
3138 | ||
3139 | return ret; | |
3140 | } | |
3141 | ||
3142 | static int ssd_i2c_read(struct ssd_device *dev, uint8_t saddr, uint8_t size, uint8_t *buf) | |
3143 | { | |
3144 | ssd_i2c_ctrl_t ctrl; | |
3145 | ssd_i2c_data_t data; | |
3146 | uint8_t off = 0; | |
3147 | uint8_t status = 0; | |
3148 | unsigned long st; | |
3149 | int ret = 0; | |
3150 | ||
3151 | mutex_lock(&dev->i2c_mutex); | |
3152 | ||
3153 | ctrl.val = 0; | |
3154 | ||
3155 | /* slave addr */ | |
3156 | ctrl.bits.wdata = saddr; | |
3157 | ctrl.bits.addr = SSD_I2C_SADDR_REG; | |
3158 | ctrl.bits.rw = SSD_I2C_CTRL_WRITE; | |
3159 | __ssd_i2c_reg32_write(dev->ctrlp + SSD_I2C_CTRL_REG, ctrl.val); | |
3160 | ||
3161 | /* read len */ | |
3162 | ctrl.bits.wdata = size; | |
3163 | ctrl.bits.addr = SSD_I2C_LEN_REG; | |
3164 | ctrl.bits.rw = SSD_I2C_CTRL_WRITE; | |
3165 | __ssd_i2c_reg32_write(dev->ctrlp + SSD_I2C_CTRL_REG, ctrl.val); | |
3166 | ||
3167 | /* read */ | |
3168 | ctrl.bits.wdata = 0x02; | |
3169 | ctrl.bits.addr = SSD_I2C_CMD_REG; | |
3170 | ctrl.bits.rw = SSD_I2C_CTRL_WRITE; | |
3171 | __ssd_i2c_reg32_write(dev->ctrlp + SSD_I2C_CTRL_REG, ctrl.val); | |
3172 | ||
3173 | /* wait */ | |
3174 | st = jiffies; | |
3175 | for (;;) { | |
3176 | ctrl.bits.wdata = 0; | |
3177 | ctrl.bits.addr = SSD_I2C_STATUS_REG; | |
3178 | ctrl.bits.rw = SSD_I2C_CTRL_READ; | |
3179 | __ssd_i2c_reg32_write(dev->ctrlp + SSD_I2C_CTRL_REG, ctrl.val); | |
3180 | ||
3181 | for (;;) { | |
3182 | data.val = __ssd_i2c_reg32_read(dev->ctrlp + SSD_I2C_RDATA_REG); | |
3183 | if (data.bits.valid == 0) { | |
3184 | break; | |
3185 | } | |
3186 | ||
3187 | /* retry */ | |
3188 | if (time_after(jiffies, (st + SSD_I2C_TIMEOUT))) { | |
3189 | ret = -ETIMEDOUT; | |
3190 | goto out_clear; | |
3191 | } | |
3192 | cond_resched(); | |
3193 | } | |
3194 | ||
3195 | status = data.bits.rdata; | |
3196 | if (status & 0x2) { | |
3197 | break; | |
3198 | } | |
3199 | ||
3200 | if (time_after(jiffies, (st + SSD_I2C_TIMEOUT))) { | |
3201 | ret = -ETIMEDOUT; | |
3202 | goto out_clear; | |
3203 | } | |
3204 | cond_resched(); | |
3205 | } | |
3206 | ||
3207 | if (!(status & 0x2)) { | |
3208 | ret = -1; | |
3209 | goto out_clear; | |
3210 | } | |
3211 | ||
3212 | /* busy ? */ | |
3213 | if (status & 0x20) { | |
3214 | ret = -2; | |
3215 | goto out_clear; | |
3216 | } | |
3217 | ||
3218 | /* ack ? */ | |
3219 | if (status & 0x10) { | |
3220 | ret = -3; | |
3221 | goto out_clear; | |
3222 | } | |
3223 | ||
3224 | /* data */ | |
3225 | while (off < size) { | |
3226 | ctrl.bits.wdata = 0; | |
3227 | ctrl.bits.addr = SSD_I2C_DATA_REG; | |
3228 | ctrl.bits.rw = SSD_I2C_CTRL_READ; | |
3229 | __ssd_i2c_reg32_write(dev->ctrlp + SSD_I2C_CTRL_REG, ctrl.val); | |
3230 | ||
3231 | st = jiffies; | |
3232 | for (;;) { | |
3233 | data.val = __ssd_i2c_reg32_read(dev->ctrlp + SSD_I2C_RDATA_REG); | |
3234 | if (data.bits.valid == 0) { | |
3235 | break; | |
3236 | } | |
3237 | ||
3238 | /* retry */ | |
3239 | if (time_after(jiffies, (st + SSD_I2C_TIMEOUT))) { | |
3240 | ret = -ETIMEDOUT; | |
3241 | goto out_clear; | |
3242 | } | |
3243 | cond_resched(); | |
3244 | } | |
3245 | ||
3246 | buf[off] = data.bits.rdata; | |
3247 | ||
3248 | off++; | |
3249 | } | |
3250 | ||
3251 | /* clear */ | |
3252 | out_clear: | |
3253 | if (__ssd_i2c_clear(dev, saddr)) { | |
3254 | if (!ret) ret = -4; | |
3255 | } | |
3256 | ||
3257 | mutex_unlock(&dev->i2c_mutex); | |
3258 | ||
3259 | return ret; | |
3260 | } | |
3261 | ||
3262 | static int ssd_i2c_write_read(struct ssd_device *dev, uint8_t saddr, uint8_t wsize, uint8_t *wbuf, uint8_t rsize, uint8_t *rbuf) | |
3263 | { | |
3264 | ssd_i2c_ctrl_t ctrl; | |
3265 | ssd_i2c_data_t data; | |
3266 | uint8_t off = 0; | |
3267 | uint8_t status = 0; | |
3268 | unsigned long st; | |
3269 | int ret = 0; | |
3270 | ||
3271 | mutex_lock(&dev->i2c_mutex); | |
3272 | ||
3273 | ctrl.val = 0; | |
3274 | ||
3275 | /* slave addr */ | |
3276 | ctrl.bits.wdata = saddr; | |
3277 | ctrl.bits.addr = SSD_I2C_SADDR_REG; | |
3278 | ctrl.bits.rw = SSD_I2C_CTRL_WRITE; | |
3279 | __ssd_i2c_reg32_write(dev->ctrlp + SSD_I2C_CTRL_REG, ctrl.val); | |
3280 | ||
3281 | /* data */ | |
3282 | off = 0; | |
3283 | while (off < wsize) { | |
3284 | ctrl.bits.wdata = wbuf[off]; | |
3285 | ctrl.bits.addr = SSD_I2C_DATA_REG; | |
3286 | ctrl.bits.rw = SSD_I2C_CTRL_WRITE; | |
3287 | __ssd_i2c_reg32_write(dev->ctrlp + SSD_I2C_CTRL_REG, ctrl.val); | |
3288 | ||
3289 | off++; | |
3290 | } | |
3291 | ||
3292 | /* read len */ | |
3293 | ctrl.bits.wdata = rsize; | |
3294 | ctrl.bits.addr = SSD_I2C_LEN_REG; | |
3295 | ctrl.bits.rw = SSD_I2C_CTRL_WRITE; | |
3296 | __ssd_i2c_reg32_write(dev->ctrlp + SSD_I2C_CTRL_REG, ctrl.val); | |
3297 | ||
3298 | /* write -> read */ | |
3299 | ctrl.bits.wdata = 0x03; | |
3300 | ctrl.bits.addr = SSD_I2C_CMD_REG; | |
3301 | ctrl.bits.rw = SSD_I2C_CTRL_WRITE; | |
3302 | __ssd_i2c_reg32_write(dev->ctrlp + SSD_I2C_CTRL_REG, ctrl.val); | |
3303 | ||
3304 | /* wait */ | |
3305 | st = jiffies; | |
3306 | for (;;) { | |
3307 | ctrl.bits.wdata = 0; | |
3308 | ctrl.bits.addr = SSD_I2C_STATUS_REG; | |
3309 | ctrl.bits.rw = SSD_I2C_CTRL_READ; | |
3310 | __ssd_i2c_reg32_write(dev->ctrlp + SSD_I2C_CTRL_REG, ctrl.val); | |
3311 | ||
3312 | for (;;) { | |
3313 | data.val = __ssd_i2c_reg32_read(dev->ctrlp + SSD_I2C_RDATA_REG); | |
3314 | if (data.bits.valid == 0) { | |
3315 | break; | |
3316 | } | |
3317 | ||
3318 | /* retry */ | |
3319 | if (time_after(jiffies, (st + SSD_I2C_TIMEOUT))) { | |
3320 | ret = -ETIMEDOUT; | |
3321 | goto out_clear; | |
3322 | } | |
3323 | cond_resched(); | |
3324 | } | |
3325 | ||
3326 | status = data.bits.rdata; | |
3327 | if (status & 0x2) { | |
3328 | break; | |
3329 | } | |
3330 | ||
3331 | if (time_after(jiffies, (st + SSD_I2C_TIMEOUT))) { | |
3332 | ret = -ETIMEDOUT; | |
3333 | goto out_clear; | |
3334 | } | |
3335 | cond_resched(); | |
3336 | } | |
3337 | ||
3338 | if (!(status & 0x2)) { | |
3339 | ret = -1; | |
3340 | goto out_clear; | |
3341 | } | |
3342 | ||
3343 | /* busy ? */ | |
3344 | if (status & 0x20) { | |
3345 | ret = -2; | |
3346 | goto out_clear; | |
3347 | } | |
3348 | ||
3349 | /* ack ? */ | |
3350 | if (status & 0x10) { | |
3351 | ret = -3; | |
3352 | goto out_clear; | |
3353 | } | |
3354 | ||
3355 | /* data */ | |
3356 | off = 0; | |
3357 | while (off < rsize) { | |
3358 | ctrl.bits.wdata = 0; | |
3359 | ctrl.bits.addr = SSD_I2C_DATA_REG; | |
3360 | ctrl.bits.rw = SSD_I2C_CTRL_READ; | |
3361 | __ssd_i2c_reg32_write(dev->ctrlp + SSD_I2C_CTRL_REG, ctrl.val); | |
3362 | ||
3363 | st = jiffies; | |
3364 | for (;;) { | |
3365 | data.val = __ssd_i2c_reg32_read(dev->ctrlp + SSD_I2C_RDATA_REG); | |
3366 | if (data.bits.valid == 0) { | |
3367 | break; | |
3368 | } | |
3369 | ||
3370 | /* retry */ | |
3371 | if (time_after(jiffies, (st + SSD_I2C_TIMEOUT))) { | |
3372 | ret = -ETIMEDOUT; | |
3373 | goto out_clear; | |
3374 | } | |
3375 | cond_resched(); | |
3376 | } | |
3377 | ||
3378 | rbuf[off] = data.bits.rdata; | |
3379 | ||
3380 | off++; | |
3381 | } | |
3382 | ||
3383 | /* clear */ | |
3384 | out_clear: | |
3385 | if (__ssd_i2c_clear(dev, saddr)) { | |
3386 | if (!ret) ret = -4; | |
3387 | } | |
3388 | mutex_unlock(&dev->i2c_mutex); | |
3389 | ||
3390 | return ret; | |
3391 | } | |
3392 | ||
3393 | static int ssd_smbus_send_byte(struct ssd_device *dev, uint8_t saddr, uint8_t *buf) | |
3394 | { | |
3395 | int i = 0; | |
3396 | int ret = 0; | |
3397 | ||
3398 | for (;;) { | |
3399 | ret = ssd_i2c_write(dev, saddr, 1, buf); | |
3400 | if (!ret || -ETIMEDOUT == ret) { | |
3401 | break; | |
3402 | } | |
3403 | ||
3404 | i++; | |
3405 | if (i >= SSD_SMBUS_RETRY_MAX) { | |
3406 | break; | |
3407 | } | |
3408 | msleep(SSD_SMBUS_RETRY_INTERVAL); | |
3409 | } | |
3410 | ||
3411 | return ret; | |
3412 | } | |
3413 | ||
3414 | static int ssd_smbus_receive_byte(struct ssd_device *dev, uint8_t saddr, uint8_t *buf) | |
3415 | { | |
3416 | int i = 0; | |
3417 | int ret = 0; | |
3418 | ||
3419 | for (;;) { | |
3420 | ret = ssd_i2c_read(dev, saddr, 1, buf); | |
3421 | if (!ret || -ETIMEDOUT == ret) { | |
3422 | break; | |
3423 | } | |
3424 | ||
3425 | i++; | |
3426 | if (i >= SSD_SMBUS_RETRY_MAX) { | |
3427 | break; | |
3428 | } | |
3429 | msleep(SSD_SMBUS_RETRY_INTERVAL); | |
3430 | } | |
3431 | ||
3432 | return ret; | |
3433 | } | |
3434 | ||
3435 | static int ssd_smbus_write_byte(struct ssd_device *dev, uint8_t saddr, uint8_t cmd, uint8_t *buf) | |
3436 | { | |
3437 | uint8_t smb_data[SSD_SMBUS_DATA_MAX] = {0}; | |
3438 | int i = 0; | |
3439 | int ret = 0; | |
3440 | ||
3441 | smb_data[0] = cmd; | |
3442 | memcpy((smb_data + 1), buf, 1); | |
3443 | ||
3444 | for (;;) { | |
3445 | ret = ssd_i2c_write(dev, saddr, 2, smb_data); | |
3446 | if (!ret || -ETIMEDOUT == ret) { | |
3447 | break; | |
3448 | } | |
3449 | ||
3450 | i++; | |
3451 | if (i >= SSD_SMBUS_RETRY_MAX) { | |
3452 | break; | |
3453 | } | |
3454 | msleep(SSD_SMBUS_RETRY_INTERVAL); | |
3455 | } | |
3456 | ||
3457 | return ret; | |
3458 | } | |
3459 | ||
3460 | static int ssd_smbus_read_byte(struct ssd_device *dev, uint8_t saddr, uint8_t cmd, uint8_t *buf) | |
3461 | { | |
3462 | uint8_t smb_data[SSD_SMBUS_DATA_MAX] = {0}; | |
3463 | int i = 0; | |
3464 | int ret = 0; | |
3465 | ||
3466 | smb_data[0] = cmd; | |
3467 | ||
3468 | for (;;) { | |
3469 | ret = ssd_i2c_write_read(dev, saddr, 1, smb_data, 1, buf); | |
3470 | if (!ret || -ETIMEDOUT == ret) { | |
3471 | break; | |
3472 | } | |
3473 | ||
3474 | i++; | |
3475 | if (i >= SSD_SMBUS_RETRY_MAX) { | |
3476 | break; | |
3477 | } | |
3478 | msleep(SSD_SMBUS_RETRY_INTERVAL); | |
3479 | } | |
3480 | ||
3481 | return ret; | |
3482 | } | |
3483 | ||
3484 | static int ssd_smbus_write_word(struct ssd_device *dev, uint8_t saddr, uint8_t cmd, uint8_t *buf) | |
3485 | { | |
3486 | uint8_t smb_data[SSD_SMBUS_DATA_MAX] = {0}; | |
3487 | int i = 0; | |
3488 | int ret = 0; | |
3489 | ||
3490 | smb_data[0] = cmd; | |
3491 | memcpy((smb_data + 1), buf, 2); | |
3492 | ||
3493 | for (;;) { | |
3494 | ret = ssd_i2c_write(dev, saddr, 3, smb_data); | |
3495 | if (!ret || -ETIMEDOUT == ret) { | |
3496 | break; | |
3497 | } | |
3498 | ||
3499 | i++; | |
3500 | if (i >= SSD_SMBUS_RETRY_MAX) { | |
3501 | break; | |
3502 | } | |
3503 | msleep(SSD_SMBUS_RETRY_INTERVAL); | |
3504 | } | |
3505 | ||
3506 | return ret; | |
3507 | } | |
3508 | ||
3509 | static int ssd_smbus_read_word(struct ssd_device *dev, uint8_t saddr, uint8_t cmd, uint8_t *buf) | |
3510 | { | |
3511 | uint8_t smb_data[SSD_SMBUS_DATA_MAX] = {0}; | |
3512 | int i = 0; | |
3513 | int ret = 0; | |
3514 | ||
3515 | smb_data[0] = cmd; | |
3516 | ||
3517 | for (;;) { | |
3518 | ret = ssd_i2c_write_read(dev, saddr, 1, smb_data, 2, buf); | |
3519 | if (!ret || -ETIMEDOUT == ret) { | |
3520 | break; | |
3521 | } | |
3522 | ||
3523 | i++; | |
3524 | if (i >= SSD_SMBUS_RETRY_MAX) { | |
3525 | break; | |
3526 | } | |
3527 | msleep(SSD_SMBUS_RETRY_INTERVAL); | |
3528 | } | |
3529 | ||
3530 | return ret; | |
3531 | } | |
3532 | ||
3533 | static int ssd_smbus_write_block(struct ssd_device *dev, uint8_t saddr, uint8_t cmd, uint8_t size, uint8_t *buf) | |
3534 | { | |
3535 | uint8_t smb_data[SSD_SMBUS_DATA_MAX] = {0}; | |
3536 | int i = 0; | |
3537 | int ret = 0; | |
3538 | ||
3539 | smb_data[0] = cmd; | |
3540 | smb_data[1] = size; | |
3541 | memcpy((smb_data + 2), buf, size); | |
3542 | ||
3543 | for (;;) { | |
3544 | ret = ssd_i2c_write(dev, saddr, (2 + size), smb_data); | |
3545 | if (!ret || -ETIMEDOUT == ret) { | |
3546 | break; | |
3547 | } | |
3548 | ||
3549 | i++; | |
3550 | if (i >= SSD_SMBUS_RETRY_MAX) { | |
3551 | break; | |
3552 | } | |
3553 | msleep(SSD_SMBUS_RETRY_INTERVAL); | |
3554 | } | |
3555 | ||
3556 | return ret; | |
3557 | } | |
3558 | ||
3559 | static int ssd_smbus_read_block(struct ssd_device *dev, uint8_t saddr, uint8_t cmd, uint8_t size, uint8_t *buf) | |
3560 | { | |
3561 | uint8_t smb_data[SSD_SMBUS_DATA_MAX] = {0}; | |
3562 | uint8_t rsize; | |
3563 | int i = 0; | |
3564 | int ret = 0; | |
3565 | ||
3566 | smb_data[0] = cmd; | |
3567 | ||
3568 | for (;;) { | |
3569 | ret = ssd_i2c_write_read(dev, saddr, 1, smb_data, (SSD_SMBUS_BLOCK_MAX + 1), (smb_data + 1)); | |
3570 | if (!ret || -ETIMEDOUT == ret) { | |
3571 | break; | |
3572 | } | |
3573 | ||
3574 | i++; | |
3575 | if (i >= SSD_SMBUS_RETRY_MAX) { | |
3576 | break; | |
3577 | } | |
3578 | msleep(SSD_SMBUS_RETRY_INTERVAL); | |
3579 | } | |
3580 | if (ret) { | |
3581 | return ret; | |
3582 | } | |
3583 | ||
3584 | rsize = smb_data[1]; | |
3585 | ||
3586 | if (rsize > size ) { | |
3587 | rsize = size; | |
3588 | } | |
3589 | ||
3590 | memcpy(buf, (smb_data + 2), rsize); | |
3591 | ||
3592 | return 0; | |
3593 | } | |
3594 | ||
3595 | ||
3596 | static int ssd_gen_swlog(struct ssd_device *dev, uint16_t event, uint32_t data); | |
3597 | ||
3598 | /* sensor */ | |
3599 | static int ssd_init_lm75(struct ssd_device *dev, uint8_t saddr) | |
3600 | { | |
3601 | uint8_t conf = 0; | |
3602 | int ret = 0; | |
3603 | ||
3604 | ret = ssd_smbus_read_byte(dev, saddr, SSD_LM75_REG_CONF, &conf); | |
3605 | if (ret) { | |
3606 | goto out; | |
3607 | } | |
3608 | ||
3609 | conf &= (uint8_t)(~1u); | |
3610 | ||
3611 | ret = ssd_smbus_write_byte(dev, saddr, SSD_LM75_REG_CONF, &conf); | |
3612 | if (ret) { | |
3613 | goto out; | |
3614 | } | |
3615 | ||
3616 | out: | |
3617 | return ret; | |
3618 | } | |
3619 | ||
3620 | static int ssd_lm75_read(struct ssd_device *dev, uint8_t saddr, uint16_t *data) | |
3621 | { | |
3622 | uint16_t val = 0; | |
3623 | int ret; | |
3624 | ||
3625 | ret = ssd_smbus_read_word(dev, saddr, SSD_LM75_REG_TEMP, (uint8_t *)&val); | |
3626 | if (ret) { | |
3627 | return ret; | |
3628 | } | |
3629 | ||
3630 | *data = u16_swap(val); | |
3631 | ||
3632 | return 0; | |
3633 | } | |
3634 | ||
3635 | static int ssd_init_lm80(struct ssd_device *dev, uint8_t saddr) | |
3636 | { | |
3637 | uint8_t val; | |
3638 | uint8_t low, high; | |
3639 | int i; | |
3640 | int ret = 0; | |
3641 | ||
3642 | /* init */ | |
3643 | val = 0x80; | |
3644 | ret = ssd_smbus_write_byte(dev, saddr, SSD_LM80_REG_CONFIG, &val); | |
3645 | if (ret) { | |
3646 | goto out; | |
3647 | } | |
3648 | ||
3649 | /* 11-bit temp */ | |
3650 | val = 0x08; | |
3651 | ret = ssd_smbus_write_byte(dev, saddr, SSD_LM80_REG_RES, &val); | |
3652 | if (ret) { | |
3653 | goto out; | |
3654 | } | |
3655 | ||
3656 | /* set volt limit */ | |
3657 | for (i=0; i<SSD_LM80_IN_NR; i++) { | |
3658 | high = ssd_lm80_limit[i].high; | |
3659 | low = ssd_lm80_limit[i].low; | |
3660 | ||
3661 | if (SSD_LM80_IN_CAP == i) { | |
3662 | low = 0; | |
3663 | } | |
3664 | ||
3665 | if (dev->hw_info.nr_ctrl <= 1 && SSD_LM80_IN_1V2 == i) { | |
3666 | high = 0xFF; | |
3667 | low = 0; | |
3668 | } | |
3669 | ||
3670 | /* high limit */ | |
3671 | ret = ssd_smbus_write_byte(dev, saddr, SSD_LM80_REG_IN_MAX(i), &high); | |
3672 | if (ret) { | |
3673 | goto out; | |
3674 | } | |
3675 | ||
3676 | /* low limit*/ | |
3677 | ret = ssd_smbus_write_byte(dev, saddr, SSD_LM80_REG_IN_MIN(i), &low); | |
3678 | if (ret) { | |
3679 | goto out; | |
3680 | } | |
3681 | } | |
3682 | ||
3683 | /* set interrupt mask: allow volt in interrupt except cap in*/ | |
3684 | val = 0x81; | |
3685 | ret = ssd_smbus_write_byte(dev, saddr, SSD_LM80_REG_MASK1, &val); | |
3686 | if (ret) { | |
3687 | goto out; | |
3688 | } | |
3689 | ||
3690 | /* set interrupt mask: disable others */ | |
3691 | val = 0xFF; | |
3692 | ret = ssd_smbus_write_byte(dev, saddr, SSD_LM80_REG_MASK2, &val); | |
3693 | if (ret) { | |
3694 | goto out; | |
3695 | } | |
3696 | ||
3697 | /* start */ | |
3698 | val = 0x03; | |
3699 | ret = ssd_smbus_write_byte(dev, saddr, SSD_LM80_REG_CONFIG, &val); | |
3700 | if (ret) { | |
3701 | goto out; | |
3702 | } | |
3703 | ||
3704 | out: | |
3705 | return ret; | |
3706 | } | |
3707 | ||
3708 | static int ssd_lm80_enable_in(struct ssd_device *dev, uint8_t saddr, int idx) | |
3709 | { | |
3710 | uint8_t val = 0; | |
3711 | int ret = 0; | |
3712 | ||
3713 | if (idx >= SSD_LM80_IN_NR || idx < 0) { | |
3714 | return -EINVAL; | |
3715 | } | |
3716 | ||
3717 | ret = ssd_smbus_read_byte(dev, saddr, SSD_LM80_REG_MASK1, &val); | |
3718 | if (ret) { | |
3719 | goto out; | |
3720 | } | |
3721 | ||
3722 | val &= ~(1UL << (uint32_t)idx); | |
3723 | ||
3724 | ret = ssd_smbus_write_byte(dev, saddr, SSD_LM80_REG_MASK1, &val); | |
3725 | if (ret) { | |
3726 | goto out; | |
3727 | } | |
3728 | ||
3729 | out: | |
3730 | return ret; | |
3731 | } | |
3732 | ||
3733 | static int ssd_lm80_disable_in(struct ssd_device *dev, uint8_t saddr, int idx) | |
3734 | { | |
3735 | uint8_t val = 0; | |
3736 | int ret = 0; | |
3737 | ||
3738 | if (idx >= SSD_LM80_IN_NR || idx < 0) { | |
3739 | return -EINVAL; | |
3740 | } | |
3741 | ||
3742 | ret = ssd_smbus_read_byte(dev, saddr, SSD_LM80_REG_MASK1, &val); | |
3743 | if (ret) { | |
3744 | goto out; | |
3745 | } | |
3746 | ||
3747 | val |= (1UL << (uint32_t)idx); | |
3748 | ||
3749 | ret = ssd_smbus_write_byte(dev, saddr, SSD_LM80_REG_MASK1, &val); | |
3750 | if (ret) { | |
3751 | goto out; | |
3752 | } | |
3753 | ||
3754 | out: | |
3755 | return ret; | |
3756 | } | |
3757 | ||
3758 | static int ssd_lm80_read_temp(struct ssd_device *dev, uint8_t saddr, uint16_t *data) | |
3759 | { | |
3760 | uint16_t val = 0; | |
3761 | int ret; | |
3762 | ||
3763 | ret = ssd_smbus_read_word(dev, saddr, SSD_LM80_REG_TEMP, (uint8_t *)&val); | |
3764 | if (ret) { | |
3765 | return ret; | |
3766 | } | |
3767 | ||
3768 | *data = u16_swap(val); | |
3769 | ||
3770 | return 0; | |
3771 | } | |
3772 | static int ssd_generate_sensor_fault_log(struct ssd_device *dev, uint16_t event, uint8_t addr,uint32_t ret) | |
3773 | { | |
3774 | uint32_t data; | |
3775 | data = ((ret & 0xffff) << 16) | (addr << 8) | addr; | |
3776 | ssd_gen_swlog(dev,event,data); | |
3777 | return 0; | |
3778 | } | |
3779 | static int ssd_lm80_check_event(struct ssd_device *dev, uint8_t saddr) | |
3780 | { | |
3781 | uint32_t volt; | |
3782 | uint16_t val = 0, status; | |
3783 | uint8_t alarm1 = 0, alarm2 = 0; | |
3784 | uint32_t low, high; | |
3785 | int i,j=0; | |
3786 | int ret = 0; | |
3787 | ||
3788 | /* read interrupt status to clear interrupt */ | |
3789 | ret = ssd_smbus_read_byte(dev, saddr, SSD_LM80_REG_ALARM1, &alarm1); | |
3790 | if (ret) { | |
3791 | goto out; | |
3792 | } | |
3793 | ||
3794 | ret = ssd_smbus_read_byte(dev, saddr, SSD_LM80_REG_ALARM2, &alarm2); | |
3795 | if (ret) { | |
3796 | goto out; | |
3797 | } | |
3798 | ||
3799 | status = (uint16_t)alarm1 | ((uint16_t)alarm2 << 8); | |
3800 | ||
3801 | /* parse inetrrupt status */ | |
3802 | for (i=0; i<SSD_LM80_IN_NR; i++) { | |
3803 | if (!((status >> (uint32_t)i) & 0x1)) { | |
3804 | if (test_and_clear_bit(SSD_HWMON_LM80(i), &dev->hwmon)) { | |
3805 | /* enable INx irq */ | |
3806 | ret = ssd_lm80_enable_in(dev, saddr, i); | |
3807 | if (ret) { | |
3808 | goto out; | |
3809 | } | |
3810 | } | |
3811 | ||
3812 | continue; | |
3813 | } | |
3814 | ||
3815 | /* disable INx irq */ | |
3816 | ret = ssd_lm80_disable_in(dev, saddr, i); | |
3817 | if (ret) { | |
3818 | goto out; | |
3819 | } | |
3820 | ||
3821 | if (test_and_set_bit(SSD_HWMON_LM80(i), &dev->hwmon)) { | |
3822 | continue; | |
3823 | } | |
3824 | ||
3825 | high = (uint32_t)ssd_lm80_limit[i].high * (uint32_t)10; | |
3826 | low = (uint32_t)ssd_lm80_limit[i].low * (uint32_t)10; | |
3827 | ||
3828 | for (j=0; j<3; j++) { | |
3829 | ret = ssd_smbus_read_word(dev, saddr, SSD_LM80_REG_IN(i), (uint8_t *)&val); | |
3830 | if (ret) { | |
3831 | goto out; | |
3832 | } | |
3833 | volt = SSD_LM80_CONVERT_VOLT(u16_swap(val)); | |
3834 | if ((volt>high) || (volt<=low)) { | |
3835 | if(j<2) { | |
3836 | msleep(SSD_LM80_CONV_INTERVAL); | |
3837 | } | |
3838 | } else { | |
3839 | break; | |
3840 | } | |
3841 | } | |
3842 | ||
3843 | if (j<3) { | |
3844 | continue; | |
3845 | } | |
3846 | ||
3847 | switch (i) { | |
3848 | case SSD_LM80_IN_CAP: { | |
3849 | if (0 == volt) { | |
3850 | ssd_gen_swlog(dev, SSD_LOG_CAP_SHORT_CIRCUIT, 0); | |
3851 | } else { | |
3852 | ssd_gen_swlog(dev, SSD_LOG_CAP_VOLT_FAULT, SSD_PL_CAP_VOLT(volt)); | |
3853 | } | |
3854 | break; | |
3855 | } | |
3856 | ||
3857 | case SSD_LM80_IN_1V2: | |
3858 | case SSD_LM80_IN_1V2a: | |
3859 | case SSD_LM80_IN_1V5: | |
3860 | case SSD_LM80_IN_1V8: { | |
3861 | ssd_gen_swlog(dev, SSD_LOG_VOLT_STATUS, SSD_VOLT_LOG_DATA(i, 0, volt)); | |
3862 | break; | |
3863 | } | |
3864 | case SSD_LM80_IN_FPGA_3V3: | |
3865 | case SSD_LM80_IN_3V3: { | |
3866 | ssd_gen_swlog(dev, SSD_LOG_VOLT_STATUS, SSD_VOLT_LOG_DATA(i, 0, SSD_LM80_3V3_VOLT(volt))); | |
3867 | break; | |
3868 | } | |
3869 | default: | |
3870 | break; | |
3871 | } | |
3872 | } | |
3873 | ||
3874 | out: | |
3875 | if (ret) { | |
3876 | if (!test_and_set_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM80), &dev->hwmon)) { | |
3877 | ssd_generate_sensor_fault_log(dev, SSD_LOG_SENSOR_FAULT, (uint32_t)saddr,ret); | |
3878 | } | |
3879 | } else { | |
3880 | test_and_clear_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM80), &dev->hwmon); | |
3881 | } | |
3882 | return ret; | |
3883 | } | |
3884 | ||
3885 | ||
3886 | static int ssd_init_sensor(struct ssd_device *dev) | |
3887 | { | |
3888 | int ret = 0; | |
3889 | ||
3890 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_2) { | |
3891 | goto out; | |
3892 | } | |
3893 | ||
3894 | ret = ssd_init_lm75(dev, SSD_SENSOR_LM75_SADDRESS); | |
3895 | if (ret) { | |
3896 | hio_warn("%s: init lm75 failed\n", dev->name); | |
3897 | if (!test_and_set_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM75), &dev->hwmon)) { | |
3898 | ssd_generate_sensor_fault_log(dev, SSD_LOG_SENSOR_FAULT, SSD_SENSOR_LM75_SADDRESS,ret); | |
3899 | } | |
3900 | goto out; | |
3901 | } | |
3902 | ||
3903 | if (dev->hw_info.pcb_ver >= 'B' || dev->hw_info_ext.form_factor == SSD_FORM_FACTOR_HHHL) { | |
3904 | ret = ssd_init_lm80(dev, SSD_SENSOR_LM80_SADDRESS); | |
3905 | if (ret) { | |
3906 | hio_warn("%s: init lm80 failed\n", dev->name); | |
3907 | if (!test_and_set_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM80), &dev->hwmon)) { | |
3908 | ssd_generate_sensor_fault_log(dev, SSD_LOG_SENSOR_FAULT, SSD_SENSOR_LM80_SADDRESS,ret); | |
3909 | } | |
3910 | goto out; | |
3911 | } | |
3912 | } | |
3913 | ||
3914 | out: | |
3915 | /* skip error if not in standard mode */ | |
3916 | if (mode != SSD_DRV_MODE_STANDARD) { | |
3917 | ret = 0; | |
3918 | } | |
3919 | return ret; | |
3920 | } | |
3921 | ||
3922 | /* board volt */ | |
3923 | static int ssd_mon_boardvolt(struct ssd_device *dev) | |
3924 | { | |
3925 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_2) { | |
3926 | return 0; | |
3927 | } | |
3928 | ||
3929 | if (dev->hw_info_ext.form_factor == SSD_FORM_FACTOR_FHHL && dev->hw_info.pcb_ver < 'B') { | |
3930 | return 0; | |
3931 | } | |
3932 | ||
3933 | return ssd_lm80_check_event(dev, SSD_SENSOR_LM80_SADDRESS); | |
3934 | } | |
3935 | ||
3936 | /* temperature */ | |
3937 | static int ssd_mon_temp(struct ssd_device *dev) | |
3938 | { | |
3939 | int cur; | |
3940 | uint16_t val = 0; | |
3941 | int ret = 0; | |
3942 | ||
3943 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_2) { | |
3944 | return 0; | |
3945 | } | |
3946 | ||
3947 | if (dev->hw_info_ext.form_factor == SSD_FORM_FACTOR_FHHL && dev->hw_info.pcb_ver < 'B') { | |
3948 | return 0; | |
3949 | } | |
3950 | ||
3951 | /* inlet */ | |
3952 | ret = ssd_lm80_read_temp(dev, SSD_SENSOR_LM80_SADDRESS, &val); | |
3953 | if (ret) { | |
3954 | if (!test_and_set_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM80), &dev->hwmon)) { | |
3955 | ssd_generate_sensor_fault_log(dev, SSD_LOG_TEMP_SENSOR_EVENT, SSD_SENSOR_LM80_SADDRESS,ret); | |
3956 | } | |
3957 | goto out; | |
3958 | } | |
3959 | test_and_clear_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM80), &dev->hwmon); | |
3960 | ||
3961 | cur = SSD_SENSOR_CONVERT_TEMP(val); | |
3962 | if (cur >= SSD_INLET_OT_TEMP) { | |
3963 | if (!test_and_set_bit(SSD_HWMON_TEMP(SSD_TEMP_INLET), &dev->hwmon)) { | |
3964 | ssd_gen_swlog(dev, SSD_LOG_INLET_OVER_TEMP, (uint32_t)cur); | |
3965 | } | |
3966 | } else if(cur < SSD_INLET_OT_HYST) { | |
3967 | if (test_and_clear_bit(SSD_HWMON_TEMP(SSD_TEMP_INLET), &dev->hwmon)) { | |
3968 | ssd_gen_swlog(dev, SSD_LOG_INLET_NORMAL_TEMP, (uint32_t)cur); | |
3969 | } | |
3970 | } | |
3971 | ||
3972 | /* flash */ | |
3973 | ret = ssd_lm75_read(dev, SSD_SENSOR_LM75_SADDRESS, &val); | |
3974 | if (ret) { | |
3975 | if (!test_and_set_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM75), &dev->hwmon)) { | |
3976 | ssd_generate_sensor_fault_log(dev, SSD_LOG_TEMP_SENSOR_EVENT, SSD_SENSOR_LM75_SADDRESS,ret); | |
3977 | } | |
3978 | goto out; | |
3979 | } | |
3980 | test_and_clear_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM75), &dev->hwmon); | |
3981 | ||
3982 | cur = SSD_SENSOR_CONVERT_TEMP(val); | |
3983 | if (cur >= SSD_FLASH_OT_TEMP) { | |
3984 | if (!test_and_set_bit(SSD_HWMON_TEMP(SSD_TEMP_FLASH), &dev->hwmon)) { | |
3985 | ssd_gen_swlog(dev, SSD_LOG_FLASH_OVER_TEMP, (uint32_t)cur); | |
3986 | } | |
3987 | } else if(cur < SSD_FLASH_OT_HYST) { | |
3988 | if (test_and_clear_bit(SSD_HWMON_TEMP(SSD_TEMP_FLASH), &dev->hwmon)) { | |
3989 | ssd_gen_swlog(dev, SSD_LOG_FLASH_NORMAL_TEMP, (uint32_t)cur); | |
3990 | } | |
3991 | } | |
3992 | ||
3993 | out: | |
3994 | return ret; | |
3995 | } | |
3996 | ||
3997 | /* cmd tag */ | |
3998 | static inline void ssd_put_tag(struct ssd_device *dev, int tag) | |
3999 | { | |
4000 | test_and_clear_bit(tag, dev->tag_map); | |
4001 | wake_up(&dev->tag_wq); | |
4002 | } | |
4003 | ||
4004 | static inline int ssd_get_tag(struct ssd_device *dev, int wait) | |
4005 | { | |
4006 | int tag; | |
4007 | ||
4008 | find_tag: | |
4009 | while ((tag = find_first_zero_bit(dev->tag_map, dev->hw_info.cmd_fifo_sz)) >= atomic_read(&dev->queue_depth)) { | |
4010 | DEFINE_WAIT(__wait); | |
4011 | ||
4012 | if (!wait) { | |
4013 | return -1; | |
4014 | } | |
4015 | ||
4016 | prepare_to_wait_exclusive(&dev->tag_wq, &__wait, TASK_UNINTERRUPTIBLE); | |
4017 | schedule(); | |
4018 | ||
4019 | finish_wait(&dev->tag_wq, &__wait); | |
4020 | } | |
4021 | ||
4022 | if (test_and_set_bit(tag, dev->tag_map)) { | |
4023 | goto find_tag; | |
4024 | } | |
4025 | ||
4026 | return tag; | |
4027 | } | |
4028 | ||
4029 | static void ssd_barrier_put_tag(struct ssd_device *dev, int tag) | |
4030 | { | |
4031 | test_and_clear_bit(tag, dev->tag_map); | |
4032 | } | |
4033 | ||
4034 | static int ssd_barrier_get_tag(struct ssd_device *dev) | |
4035 | { | |
4036 | int tag = 0; | |
4037 | ||
4038 | if (test_and_set_bit(tag, dev->tag_map)) { | |
4039 | return -1; | |
4040 | } | |
4041 | ||
4042 | return tag; | |
4043 | } | |
4044 | ||
4045 | static void ssd_barrier_end(struct ssd_device *dev) | |
4046 | { | |
4047 | atomic_set(&dev->queue_depth, dev->hw_info.cmd_fifo_sz); | |
4048 | wake_up_all(&dev->tag_wq); | |
4049 | ||
4050 | mutex_unlock(&dev->barrier_mutex); | |
4051 | } | |
4052 | ||
4053 | static int ssd_barrier_start(struct ssd_device *dev) | |
4054 | { | |
4055 | int i; | |
4056 | ||
4057 | mutex_lock(&dev->barrier_mutex); | |
4058 | ||
4059 | atomic_set(&dev->queue_depth, 0); | |
4060 | ||
4061 | for (i=0; i<SSD_CMD_TIMEOUT; i++) { | |
4062 | if (find_first_bit(dev->tag_map, dev->hw_info.cmd_fifo_sz) >= dev->hw_info.cmd_fifo_sz) { | |
4063 | return 0; | |
4064 | } | |
4065 | ||
4066 | __set_current_state(TASK_INTERRUPTIBLE); | |
4067 | schedule_timeout(1); | |
4068 | } | |
4069 | ||
4070 | atomic_set(&dev->queue_depth, dev->hw_info.cmd_fifo_sz); | |
4071 | wake_up_all(&dev->tag_wq); | |
4072 | ||
4073 | mutex_unlock(&dev->barrier_mutex); | |
4074 | ||
4075 | return -EBUSY; | |
4076 | } | |
4077 | ||
4078 | static int ssd_busy(struct ssd_device *dev) | |
4079 | { | |
4080 | if (find_first_bit(dev->tag_map, dev->hw_info.cmd_fifo_sz) >= dev->hw_info.cmd_fifo_sz) { | |
4081 | return 0; | |
4082 | } | |
4083 | ||
4084 | return 1; | |
4085 | } | |
4086 | ||
4087 | static int ssd_wait_io(struct ssd_device *dev) | |
4088 | { | |
4089 | int i; | |
4090 | ||
4091 | for (i=0; i<SSD_CMD_TIMEOUT; i++) { | |
4092 | if (find_first_bit(dev->tag_map, dev->hw_info.cmd_fifo_sz) >= dev->hw_info.cmd_fifo_sz) { | |
4093 | return 0; | |
4094 | } | |
4095 | ||
4096 | __set_current_state(TASK_INTERRUPTIBLE); | |
4097 | schedule_timeout(1); | |
4098 | } | |
4099 | ||
4100 | return -EBUSY; | |
4101 | } | |
4102 | ||
4103 | #if 0 | |
4104 | static int ssd_in_barrier(struct ssd_device *dev) | |
4105 | { | |
4106 | return (0 == atomic_read(&dev->queue_depth)); | |
4107 | } | |
4108 | #endif | |
4109 | ||
4110 | static void ssd_cleanup_tag(struct ssd_device *dev) | |
4111 | { | |
4112 | kfree(dev->tag_map); | |
4113 | } | |
4114 | ||
4115 | static int ssd_init_tag(struct ssd_device *dev) | |
4116 | { | |
4117 | int nr_ulongs = ALIGN(dev->hw_info.cmd_fifo_sz, BITS_PER_LONG) / BITS_PER_LONG; | |
4118 | ||
4119 | mutex_init(&dev->barrier_mutex); | |
4120 | ||
4121 | atomic_set(&dev->queue_depth, dev->hw_info.cmd_fifo_sz); | |
4122 | ||
4123 | dev->tag_map = kmalloc(nr_ulongs * sizeof(unsigned long), GFP_ATOMIC); | |
4124 | if (!dev->tag_map) { | |
4125 | return -ENOMEM; | |
4126 | } | |
4127 | ||
4128 | memset(dev->tag_map, 0, nr_ulongs * sizeof(unsigned long)); | |
4129 | ||
4130 | init_waitqueue_head(&dev->tag_wq); | |
4131 | ||
4132 | return 0; | |
4133 | } | |
4134 | ||
4135 | /* io stat */ | |
4136 | static void ssd_end_io_acct(struct ssd_cmd *cmd) | |
4137 | { | |
4138 | struct ssd_device *dev = cmd->dev; | |
4139 | struct bio *bio = cmd->bio; | |
4140 | unsigned long dur = jiffies - cmd->start_time; | |
4141 | int rw = bio_data_dir(bio); | |
4142 | #if ((LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,0)) || (defined RHEL_MAJOR && RHEL_MAJOR == 6 && RHEL_MINOR >= 7)) | |
4143 | #else | |
4144 | unsigned long flag; | |
4145 | #endif | |
4146 | ||
4147 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,14,0)) | |
4148 | struct hd_struct *part = disk_map_sector_rcu(dev->gd, bio_start(bio)); | |
4149 | generic_end_io_acct(dev->rq, rw, part, cmd->start_time); | |
4150 | #elif ((LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,0)) || (defined RHEL_MAJOR && RHEL_MAJOR == 6 && RHEL_MINOR >= 7)) | |
4151 | int cpu = part_stat_lock(); | |
4152 | struct hd_struct *part = disk_map_sector_rcu(dev->gd, bio_start(bio)); | |
4153 | part_round_stats(cpu, part); | |
4154 | part_stat_add(cpu, part, ticks[rw], dur); | |
4155 | part_dec_in_flight(part, rw); | |
4156 | part_stat_unlock(); | |
4157 | #elif (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,27)) | |
4158 | int cpu = part_stat_lock(); | |
4159 | struct hd_struct *part = &dev->gd->part0; | |
4160 | part_round_stats(cpu, part); | |
4161 | part_stat_add(cpu, part, ticks[rw], dur); | |
4162 | ||
4163 | spin_lock_irqsave(&dev->in_flight_lock,flag); | |
4164 | part->in_flight[rw]--; | |
4165 | spin_unlock_irqrestore(&dev->in_flight_lock,flag); | |
4166 | ||
4167 | part_stat_unlock(); | |
4168 | ||
4169 | #elif (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,14)) | |
4170 | preempt_disable(); | |
4171 | disk_round_stats(dev->gd); | |
4172 | disk_stat_add(dev->gd, ticks[rw], dur); | |
4173 | ||
4174 | spin_lock_irqsave(&dev->in_flight_lock,flag); | |
4175 | dev->gd->in_flight--; | |
4176 | spin_unlock_irqrestore(&dev->in_flight_lock,flag); | |
4177 | ||
4178 | preempt_enable(); | |
4179 | ||
4180 | #else | |
4181 | preempt_disable(); | |
4182 | disk_round_stats(dev->gd); | |
4183 | if (rw == WRITE) { | |
4184 | disk_stat_add(dev->gd, write_ticks, dur); | |
4185 | } else { | |
4186 | disk_stat_add(dev->gd, read_ticks, dur); | |
4187 | } | |
4188 | spin_lock_irqsave(&dev->in_flight_lock,flag); | |
4189 | dev->gd->in_flight--; | |
4190 | spin_unlock_irqrestore(&dev->in_flight_lock,flag); | |
4191 | ||
4192 | preempt_enable(); | |
4193 | ||
4194 | #endif | |
4195 | } | |
4196 | ||
4197 | static void ssd_start_io_acct(struct ssd_cmd *cmd) | |
4198 | { | |
4199 | struct ssd_device *dev = cmd->dev; | |
4200 | struct bio *bio = cmd->bio; | |
4201 | int rw = bio_data_dir(bio); | |
4202 | #if ((LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,0)) || (defined RHEL_MAJOR && RHEL_MAJOR == 6 && RHEL_MINOR >= 7)) | |
4203 | #else | |
4204 | unsigned long flag; | |
4205 | #endif | |
4206 | ||
4207 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,14,0)) | |
4208 | struct hd_struct *part = disk_map_sector_rcu(dev->gd, bio_start(bio)); | |
4209 | generic_start_io_acct(dev->rq, rw, bio_sectors(bio), part); | |
4210 | #elif ((LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,0)) || (defined RHEL_MAJOR && RHEL_MAJOR == 6 && RHEL_MINOR >= 7)) | |
4211 | int cpu = part_stat_lock(); | |
4212 | struct hd_struct *part = disk_map_sector_rcu(dev->gd, bio_start(bio)); | |
4213 | part_round_stats(cpu, part); | |
4214 | part_stat_inc(cpu, part, ios[rw]); | |
4215 | part_stat_add(cpu, part, sectors[rw], bio_sectors(bio)); | |
4216 | part_inc_in_flight(part, rw); | |
4217 | part_stat_unlock(); | |
4218 | #elif (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,27)) | |
4219 | int cpu = part_stat_lock(); | |
4220 | struct hd_struct *part = &dev->gd->part0; | |
4221 | part_round_stats(cpu, part); | |
4222 | part_stat_inc(cpu, part, ios[rw]); | |
4223 | part_stat_add(cpu, part, sectors[rw], bio_sectors(bio)); | |
4224 | ||
4225 | spin_lock_irqsave(&dev->in_flight_lock,flag); | |
4226 | part->in_flight[rw]++; | |
4227 | spin_unlock_irqrestore(&dev->in_flight_lock,flag); | |
4228 | ||
4229 | part_stat_unlock(); | |
4230 | ||
4231 | #elif (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,14)) | |
4232 | preempt_disable(); | |
4233 | disk_round_stats(dev->gd); | |
4234 | disk_stat_inc(dev->gd, ios[rw]); | |
4235 | disk_stat_add(dev->gd, sectors[rw], bio_sectors(bio)); | |
4236 | ||
4237 | spin_lock_irqsave(&dev->in_flight_lock,flag); | |
4238 | dev->gd->in_flight++; | |
4239 | spin_unlock_irqrestore(&dev->in_flight_lock,flag); | |
4240 | ||
4241 | preempt_enable(); | |
4242 | #else | |
4243 | preempt_disable(); | |
4244 | disk_round_stats(dev->gd); | |
4245 | if (rw == WRITE) { | |
4246 | disk_stat_inc(dev->gd, writes); | |
4247 | disk_stat_add(dev->gd, write_sectors, bio_sectors(bio)); | |
4248 | } else { | |
4249 | disk_stat_inc(dev->gd, reads); | |
4250 | disk_stat_add(dev->gd, read_sectors, bio_sectors(bio)); | |
4251 | } | |
4252 | ||
4253 | spin_lock_irqsave(&dev->in_flight_lock,flag); | |
4254 | dev->gd->in_flight++; | |
4255 | spin_unlock_irqrestore(&dev->in_flight_lock,flag); | |
4256 | ||
4257 | preempt_enable(); | |
4258 | ||
4259 | #endif | |
4260 | ||
4261 | cmd->start_time = jiffies; | |
4262 | } | |
4263 | ||
4264 | /* io */ | |
4265 | static void ssd_queue_bio(struct ssd_device *dev, struct bio *bio) | |
4266 | { | |
4267 | spin_lock(&dev->sendq_lock); | |
4268 | ssd_blist_add(&dev->sendq, bio); | |
4269 | spin_unlock(&dev->sendq_lock); | |
4270 | ||
4271 | atomic_inc(&dev->in_sendq); | |
4272 | wake_up(&dev->send_waitq); | |
4273 | } | |
4274 | ||
4275 | static inline void ssd_end_request(struct ssd_cmd *cmd) | |
4276 | { | |
4277 | struct ssd_device *dev = cmd->dev; | |
4278 | struct bio *bio = cmd->bio; | |
4279 | int errors = cmd->errors; | |
4280 | int tag = cmd->tag; | |
4281 | ||
4282 | if (bio) { | |
4283 | if (!ssd_bio_has_discard(bio)) { | |
4284 | ssd_end_io_acct(cmd); | |
4285 | if (!cmd->flag) { | |
4286 | pci_unmap_sg(dev->pdev, cmd->sgl, cmd->nsegs, | |
4287 | bio_data_dir(bio) == READ ? PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE); | |
4288 | } | |
4289 | } | |
4290 | ||
4291 | cmd->bio = NULL; | |
4292 | ssd_put_tag(dev, tag); | |
4293 | ||
4294 | if (SSD_INT_MSIX == dev->int_mode || tag < 16 || errors) { | |
4295 | ssd_bio_endio(bio, errors); | |
4296 | } else /* if (bio->bi_idx >= bio->bi_vcnt)*/ { | |
4297 | spin_lock(&dev->doneq_lock); | |
4298 | ssd_blist_add(&dev->doneq, bio); | |
4299 | spin_unlock(&dev->doneq_lock); | |
4300 | ||
4301 | atomic_inc(&dev->in_doneq); | |
4302 | wake_up(&dev->done_waitq); | |
4303 | } | |
4304 | } else { | |
4305 | if (cmd->waiting) { | |
4306 | complete(cmd->waiting); | |
4307 | } | |
4308 | } | |
4309 | } | |
4310 | ||
4311 | static void ssd_end_timeout_request(struct ssd_cmd *cmd) | |
4312 | { | |
4313 | struct ssd_device *dev = cmd->dev; | |
4314 | struct ssd_rw_msg *msg = (struct ssd_rw_msg *)cmd->msg; | |
4315 | int i; | |
4316 | ||
4317 | for (i=0; i<dev->nr_queue; i++) { | |
4318 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0)) | |
4319 | disable_irq(dev->entry[i].vector); | |
4320 | #else | |
4321 | disable_irq(pci_irq_vector(dev->pdev, i)); | |
4322 | #endif | |
4323 | } | |
4324 | ||
4325 | atomic_inc(&dev->tocnt); | |
4326 | //if (cmd->bio) { | |
4327 | hio_err("%s: cmd timeout: tag %d fun %#x\n", dev->name, msg->tag, msg->fun); | |
4328 | cmd->errors = -ETIMEDOUT; | |
4329 | ssd_end_request(cmd); | |
4330 | //} | |
4331 | ||
4332 | for (i=0; i<dev->nr_queue; i++) { | |
4333 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0)) | |
4334 | enable_irq(dev->entry[i].vector); | |
4335 | #else | |
4336 | enable_irq(pci_irq_vector(dev->pdev, i)); | |
4337 | #endif | |
4338 | } | |
4339 | ||
4340 | /* alarm led */ | |
4341 | ssd_set_alarm(dev); | |
4342 | } | |
4343 | ||
4344 | /* cmd timer */ | |
4345 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(4,15,0)) | |
4346 | static void ssd_cmd_add_timer(struct ssd_cmd *cmd, int timeout, void (*complt)(struct ssd_cmd *)) | |
4347 | #else | |
4348 | static void ssd_cmd_add_timer(struct ssd_cmd *cmd, int timeout, void (*complt)(struct timer_list *)) | |
4349 | #endif | |
4350 | { | |
4351 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(4,15,0)) | |
4352 | init_timer(&cmd->cmd_timer); | |
4353 | ||
4354 | cmd->cmd_timer.data = (unsigned long)cmd; | |
4355 | cmd->cmd_timer.function = (void (*)(unsigned long)) complt; | |
4356 | #else | |
4357 | timer_setup(&cmd->cmd_timer, complt, 0); | |
4358 | #endif | |
4359 | ||
4360 | cmd->cmd_timer.expires = jiffies + timeout; | |
4361 | add_timer(&cmd->cmd_timer); | |
4362 | } | |
4363 | ||
4364 | static int ssd_cmd_del_timer(struct ssd_cmd *cmd) | |
4365 | { | |
4366 | return del_timer(&cmd->cmd_timer); | |
4367 | } | |
4368 | ||
4369 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(4,15,0)) | |
4370 | static void ssd_add_timer(struct timer_list *timer, int timeout, void (*complt)(void *), void *data) | |
4371 | #else | |
4372 | static void ssd_add_timer(struct timer_list *timer, int timeout, void (*complt)(struct timer_list *), void *data) | |
4373 | #endif | |
4374 | { | |
4375 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(4,15,0)) | |
4376 | init_timer(timer); | |
4377 | ||
4378 | timer->data = (unsigned long)data; | |
4379 | timer->function = (void (*)(unsigned long)) complt; | |
4380 | #else | |
4381 | timer_setup(timer, complt, 0); | |
4382 | #endif | |
4383 | ||
4384 | timer->expires = jiffies + timeout; | |
4385 | add_timer(timer); | |
4386 | } | |
4387 | ||
4388 | static int ssd_del_timer(struct timer_list *timer) | |
4389 | { | |
4390 | return del_timer(timer); | |
4391 | } | |
4392 | ||
4393 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(4,15,0)) | |
4394 | static void ssd_cmd_timeout(struct ssd_cmd *cmd) | |
4395 | #else | |
4396 | static void ssd_cmd_timeout(struct timer_list *t) | |
4397 | #endif | |
4398 | { | |
4399 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,15,0)) | |
4400 | struct ssd_cmd *cmd = from_timer(cmd, t, cmd_timer); | |
4401 | #endif | |
4402 | struct ssd_device *dev = cmd->dev; | |
4403 | uint32_t msg = *(uint32_t *)cmd->msg; | |
4404 | ||
4405 | ssd_end_timeout_request(cmd); | |
4406 | ||
4407 | ssd_gen_swlog(dev, SSD_LOG_TIMEOUT, msg); | |
4408 | } | |
4409 | ||
4410 | ||
4411 | static void __ssd_done(unsigned long data) | |
4412 | { | |
4413 | struct ssd_cmd *cmd; | |
4414 | LIST_HEAD(localq); | |
4415 | ||
4416 | local_irq_disable(); | |
4417 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,13,0)) | |
4418 | list_splice_init(&__get_cpu_var(ssd_doneq), &localq); | |
4419 | #else | |
4420 | list_splice_init(this_cpu_ptr(&ssd_doneq), &localq); | |
4421 | #endif | |
4422 | local_irq_enable(); | |
4423 | ||
4424 | while (!list_empty(&localq)) { | |
4425 | cmd = list_entry(localq.next, struct ssd_cmd, list); | |
4426 | list_del_init(&cmd->list); | |
4427 | ||
4428 | ssd_end_request(cmd); | |
4429 | } | |
4430 | } | |
4431 | ||
4432 | static void __ssd_done_db(unsigned long data) | |
4433 | { | |
4434 | struct ssd_cmd *cmd; | |
4435 | struct ssd_device *dev; | |
4436 | struct bio *bio; | |
4437 | LIST_HEAD(localq); | |
4438 | ||
4439 | local_irq_disable(); | |
4440 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,13,0)) | |
4441 | list_splice_init(&__get_cpu_var(ssd_doneq), &localq); | |
4442 | #else | |
4443 | list_splice_init(this_cpu_ptr(&ssd_doneq), &localq); | |
4444 | #endif | |
4445 | local_irq_enable(); | |
4446 | ||
4447 | while (!list_empty(&localq)) { | |
4448 | cmd = list_entry(localq.next, struct ssd_cmd, list); | |
4449 | list_del_init(&cmd->list); | |
4450 | ||
4451 | dev = (struct ssd_device *)cmd->dev; | |
4452 | bio = cmd->bio; | |
4453 | ||
4454 | if (bio) { | |
4455 | sector_t off = dev->db_info.data.loc.off; | |
4456 | uint32_t len = dev->db_info.data.loc.len; | |
4457 | ||
4458 | switch (dev->db_info.type) { | |
4459 | case SSD_DEBUG_READ_ERR: | |
4460 | if (bio_data_dir(bio) == READ && | |
4461 | !((off + len) <= bio_start(bio) || off >= (bio_start(bio) + bio_sectors(bio)))) { | |
4462 | cmd->errors = -EIO; | |
4463 | } | |
4464 | break; | |
4465 | case SSD_DEBUG_WRITE_ERR: | |
4466 | if (bio_data_dir(bio) == WRITE && | |
4467 | !((off + len) <= bio_start(bio) || off >= (bio_start(bio) + bio_sectors(bio)))) { | |
4468 | cmd->errors = -EROFS; | |
4469 | } | |
4470 | break; | |
4471 | case SSD_DEBUG_RW_ERR: | |
4472 | if (!((off + len) <= bio_start(bio) || off >= (bio_start(bio) + bio_sectors(bio)))) { | |
4473 | if (bio_data_dir(bio) == READ) { | |
4474 | cmd->errors = -EIO; | |
4475 | } else { | |
4476 | cmd->errors = -EROFS; | |
4477 | } | |
4478 | } | |
4479 | break; | |
4480 | default: | |
4481 | break; | |
4482 | } | |
4483 | } | |
4484 | ||
4485 | ssd_end_request(cmd); | |
4486 | } | |
4487 | } | |
4488 | ||
4489 | static inline void ssd_done_bh(struct ssd_cmd *cmd) | |
4490 | { | |
4491 | unsigned long flags = 0; | |
4492 | ||
4493 | if (unlikely(!ssd_cmd_del_timer(cmd))) { | |
4494 | struct ssd_device *dev = cmd->dev; | |
4495 | struct ssd_rw_msg *msg = (struct ssd_rw_msg *)cmd->msg; | |
4496 | hio_err("%s: unknown cmd: tag %d fun %#x\n", dev->name, msg->tag, msg->fun); | |
4497 | ||
4498 | /* alarm led */ | |
4499 | ssd_set_alarm(dev); | |
4500 | return; | |
4501 | } | |
4502 | ||
4503 | local_irq_save(flags); | |
4504 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,13,0)) | |
4505 | list_add_tail(&cmd->list, &__get_cpu_var(ssd_doneq)); | |
4506 | tasklet_hi_schedule(&__get_cpu_var(ssd_tasklet)); | |
4507 | #else | |
4508 | list_add_tail(&cmd->list, this_cpu_ptr(&ssd_doneq)); | |
4509 | tasklet_hi_schedule(this_cpu_ptr(&ssd_tasklet)); | |
4510 | #endif | |
4511 | local_irq_restore(flags); | |
4512 | ||
4513 | return; | |
4514 | } | |
4515 | ||
4516 | static inline void ssd_done(struct ssd_cmd *cmd) | |
4517 | { | |
4518 | if (unlikely(!ssd_cmd_del_timer(cmd))) { | |
4519 | struct ssd_device *dev = cmd->dev; | |
4520 | struct ssd_rw_msg *msg = (struct ssd_rw_msg *)cmd->msg; | |
4521 | hio_err("%s: unknown cmd: tag %d fun %#x\n", dev->name, msg->tag, msg->fun); | |
4522 | ||
4523 | /* alarm led */ | |
4524 | ssd_set_alarm(dev); | |
4525 | return; | |
4526 | } | |
4527 | ||
4528 | ssd_end_request(cmd); | |
4529 | ||
4530 | return; | |
4531 | } | |
4532 | ||
4533 | static inline void ssd_dispatch_cmd(struct ssd_cmd *cmd) | |
4534 | { | |
4535 | struct ssd_device *dev = (struct ssd_device *)cmd->dev; | |
4536 | ||
4537 | ssd_cmd_add_timer(cmd, SSD_CMD_TIMEOUT, ssd_cmd_timeout); | |
4538 | ||
4539 | spin_lock(&dev->cmd_lock); | |
4540 | ssd_reg_write(dev->ctrlp + SSD_REQ_FIFO_REG, cmd->msg_dma); | |
4541 | spin_unlock(&dev->cmd_lock); | |
4542 | } | |
4543 | ||
4544 | static inline void ssd_send_cmd(struct ssd_cmd *cmd) | |
4545 | { | |
4546 | struct ssd_device *dev = (struct ssd_device *)cmd->dev; | |
4547 | ||
4548 | ssd_cmd_add_timer(cmd, SSD_CMD_TIMEOUT, ssd_cmd_timeout); | |
4549 | ||
4550 | ssd_reg32_write(dev->ctrlp + SSD_REQ_FIFO_REG, ((uint32_t)cmd->tag | ((uint32_t)cmd->nsegs << 16))); | |
4551 | } | |
4552 | ||
4553 | static inline void ssd_send_cmd_db(struct ssd_cmd *cmd) | |
4554 | { | |
4555 | struct ssd_device *dev = (struct ssd_device *)cmd->dev; | |
4556 | struct bio *bio = cmd->bio; | |
4557 | ||
4558 | ssd_cmd_add_timer(cmd, SSD_CMD_TIMEOUT, ssd_cmd_timeout); | |
4559 | ||
4560 | if (bio) { | |
4561 | switch (dev->db_info.type) { | |
4562 | case SSD_DEBUG_READ_TO: | |
4563 | if (bio_data_dir(bio) == READ) { | |
4564 | return; | |
4565 | } | |
4566 | break; | |
4567 | case SSD_DEBUG_WRITE_TO: | |
4568 | if (bio_data_dir(bio) == WRITE) { | |
4569 | return; | |
4570 | } | |
4571 | break; | |
4572 | case SSD_DEBUG_RW_TO: | |
4573 | return; | |
4574 | break; | |
4575 | default: | |
4576 | break; | |
4577 | } | |
4578 | } | |
4579 | ||
4580 | ssd_reg32_write(dev->ctrlp + SSD_REQ_FIFO_REG, ((uint32_t)cmd->tag | ((uint32_t)cmd->nsegs << 16))); | |
4581 | } | |
4582 | ||
4583 | ||
4584 | /* fixed for BIOVEC_PHYS_MERGEABLE */ | |
4585 | #ifdef SSD_BIOVEC_PHYS_MERGEABLE_FIXED | |
4586 | #include <linux/bio.h> | |
4587 | #include <linux/io.h> | |
4588 | #include <xen/page.h> | |
4589 | ||
4590 | static bool xen_biovec_phys_mergeable_fixed(const struct bio_vec *vec1, | |
4591 | const struct bio_vec *vec2) | |
4592 | { | |
4593 | unsigned long mfn1 = pfn_to_mfn(page_to_pfn(vec1->bv_page)); | |
4594 | unsigned long mfn2 = pfn_to_mfn(page_to_pfn(vec2->bv_page)); | |
4595 | ||
4596 | return __BIOVEC_PHYS_MERGEABLE(vec1, vec2) && | |
4597 | ((mfn1 == mfn2) || ((mfn1+1) == mfn2)); | |
4598 | } | |
4599 | ||
4600 | #ifdef BIOVEC_PHYS_MERGEABLE | |
4601 | #undef BIOVEC_PHYS_MERGEABLE | |
4602 | #endif | |
4603 | #define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \ | |
4604 | (__BIOVEC_PHYS_MERGEABLE(vec1, vec2) && \ | |
4605 | (!xen_domain() || xen_biovec_phys_mergeable_fixed(vec1, vec2))) | |
4606 | ||
4607 | #endif | |
4608 | ||
4609 | /* | |
4610 | * BIOVEC_PHYS_MERGEABLE not available from 4.20 onward, and it seems likely | |
4611 | * that all the merging that can be done has been done by the block core | |
4612 | * already. Just stub it out. | |
4613 | */ | |
4614 | #if (LINUX_VERSION_CODE > KERNEL_VERSION(4,20,0)) | |
4615 | # ifdef BIOVEC_PHYS_MERGEABLE | |
4616 | # undef BIOVEC_PHYS_MERGEABLE | |
4617 | # endif | |
4618 | # define BIOVEC_PHYS_MERGEABLE(vec1, vec2) (0) | |
4619 | #endif | |
4620 | ||
4621 | static inline int ssd_bio_map_sg(struct ssd_device *dev, struct bio *bio, struct scatterlist *sgl) | |
4622 | { | |
4623 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,14,0)) | |
4624 | struct bio_vec *bvec, *bvprv = NULL; | |
4625 | struct scatterlist *sg = NULL; | |
4626 | int i = 0, nsegs = 0; | |
4627 | ||
4628 | #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)) | |
4629 | sg_init_table(sgl, dev->hw_info.cmd_max_sg); | |
4630 | #endif | |
4631 | ||
4632 | /* | |
4633 | * for each segment in bio | |
4634 | */ | |
4635 | bio_for_each_segment(bvec, bio, i) { | |
4636 | if (bvprv && BIOVEC_PHYS_MERGEABLE(bvprv, bvec)) { | |
4637 | sg->length += bvec->bv_len; | |
4638 | } else { | |
4639 | if (unlikely(nsegs >= (int)dev->hw_info.cmd_max_sg)) { | |
4640 | break; | |
4641 | } | |
4642 | ||
4643 | sg = sg ? (sg + 1) : sgl; | |
4644 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)) | |
4645 | sg_set_page(sg, bvec->bv_page, bvec->bv_len, bvec->bv_offset); | |
4646 | #else | |
4647 | sg->page = bvec->bv_page; | |
4648 | sg->length = bvec->bv_len; | |
4649 | sg->offset = bvec->bv_offset; | |
4650 | #endif | |
4651 | nsegs++; | |
4652 | } | |
4653 | bvprv = bvec; | |
4654 | } | |
4655 | ||
4656 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)) | |
4657 | if (sg) { | |
4658 | sg_mark_end(sg); | |
4659 | } | |
4660 | #endif | |
4661 | ||
4662 | bio->bi_idx = i; | |
4663 | ||
4664 | return nsegs; | |
4665 | #else | |
4666 | struct bio_vec bvec, bvprv; | |
4667 | struct bvec_iter iter; | |
4668 | struct scatterlist *sg = NULL; | |
4669 | int nsegs = 0; | |
4670 | int first = 1; | |
4671 | ||
4672 | sg_init_table(sgl, dev->hw_info.cmd_max_sg); | |
4673 | ||
4674 | /* | |
4675 | * for each segment in bio | |
4676 | */ | |
4677 | bio_for_each_segment(bvec, bio, iter) { | |
4678 | if (!first && BIOVEC_PHYS_MERGEABLE(&bvprv, &bvec)) { | |
4679 | sg->length += bvec.bv_len; | |
4680 | } else { | |
4681 | if (unlikely(nsegs >= (int)dev->hw_info.cmd_max_sg)) { | |
4682 | break; | |
4683 | } | |
4684 | ||
4685 | sg = sg ? (sg + 1) : sgl; | |
4686 | ||
4687 | sg_set_page(sg, bvec.bv_page, bvec.bv_len, bvec.bv_offset); | |
4688 | ||
4689 | nsegs++; | |
4690 | first = 0; | |
4691 | } | |
4692 | bvprv = bvec; | |
4693 | } | |
4694 | ||
4695 | if (sg) { | |
4696 | sg_mark_end(sg); | |
4697 | } | |
4698 | ||
4699 | return nsegs; | |
4700 | #endif | |
4701 | } | |
4702 | ||
4703 | ||
4704 | static int __ssd_submit_pbio(struct ssd_device *dev, struct bio *bio, int wait) | |
4705 | { | |
4706 | struct ssd_cmd *cmd; | |
4707 | struct ssd_rw_msg *msg; | |
4708 | struct ssd_sg_entry *sge; | |
4709 | sector_t block = bio_start(bio); | |
4710 | int tag; | |
4711 | int i; | |
4712 | ||
4713 | tag = ssd_get_tag(dev, wait); | |
4714 | if (tag < 0) { | |
4715 | return -EBUSY; | |
4716 | } | |
4717 | ||
4718 | cmd = &dev->cmd[tag]; | |
4719 | cmd->bio = bio; | |
4720 | cmd->flag = 1; | |
4721 | ||
4722 | msg = (struct ssd_rw_msg *)cmd->msg; | |
4723 | ||
4724 | if (ssd_bio_has_discard(bio)) { | |
4725 | unsigned int length = bio_sectors(bio); | |
4726 | ||
4727 | //printk(KERN_WARNING "%s: discard len %u, block %llu\n", dev->name, bio_sectors(bio), block); | |
4728 | msg->tag = tag; | |
4729 | msg->fun = SSD_FUNC_TRIM; | |
4730 | ||
4731 | sge = msg->sge; | |
4732 | for (i=0; i<(dev->hw_info.cmd_max_sg); i++) { | |
4733 | sge->block = block; | |
4734 | sge->length = (length >= dev->hw_info.sg_max_sec) ? dev->hw_info.sg_max_sec : length; | |
4735 | sge->buf = 0; | |
4736 | ||
4737 | block += sge->length; | |
4738 | length -= sge->length; | |
4739 | sge++; | |
4740 | ||
4741 | if (length <= 0) { | |
4742 | ++i; | |
4743 | break; | |
4744 | } | |
4745 | } | |
4746 | msg->nsegs = cmd->nsegs = i; | |
4747 | ||
4748 | dev->scmd(cmd); | |
4749 | return 0; | |
4750 | } | |
4751 | ||
4752 | //msg->nsegs = cmd->nsegs = ssd_bio_map_sg(dev, bio, sgl); | |
4753 | msg->nsegs = cmd->nsegs = bio->bi_vcnt; | |
4754 | ||
4755 | //xx | |
4756 | if (bio_data_dir(bio) == READ) { | |
4757 | msg->fun = SSD_FUNC_READ; | |
4758 | msg->flag = 0; | |
4759 | } else { | |
4760 | msg->fun = SSD_FUNC_WRITE; | |
4761 | msg->flag = dev->wmode; | |
4762 | } | |
4763 | ||
4764 | sge = msg->sge; | |
4765 | for (i=0; i<bio->bi_vcnt; i++) { | |
4766 | sge->block = block; | |
4767 | sge->length = bio->bi_io_vec[i].bv_len >> 9; | |
4768 | sge->buf = (uint64_t)((void *)bio->bi_io_vec[i].bv_page + bio->bi_io_vec[i].bv_offset); | |
4769 | ||
4770 | block += sge->length; | |
4771 | sge++; | |
4772 | } | |
4773 | ||
4774 | msg->tag = tag; | |
4775 | ||
4776 | #ifdef SSD_OT_PROTECT | |
4777 | if (unlikely(dev->ot_delay > 0 && dev->ot_protect != 0)) { | |
4778 | msleep_interruptible(dev->ot_delay); | |
4779 | } | |
4780 | #endif | |
4781 | ||
4782 | ssd_start_io_acct(cmd); | |
4783 | dev->scmd(cmd); | |
4784 | ||
4785 | return 0; | |
4786 | } | |
4787 | ||
4788 | static inline int ssd_submit_bio(struct ssd_device *dev, struct bio *bio, int wait) | |
4789 | { | |
4790 | struct ssd_cmd *cmd; | |
4791 | struct ssd_rw_msg *msg; | |
4792 | struct ssd_sg_entry *sge; | |
4793 | struct scatterlist *sgl; | |
4794 | sector_t block = bio_start(bio); | |
4795 | int tag; | |
4796 | int i; | |
4797 | ||
4798 | tag = ssd_get_tag(dev, wait); | |
4799 | if (tag < 0) { | |
4800 | return -EBUSY; | |
4801 | } | |
4802 | ||
4803 | cmd = &dev->cmd[tag]; | |
4804 | cmd->bio = bio; | |
4805 | cmd->flag = 0; | |
4806 | ||
4807 | msg = (struct ssd_rw_msg *)cmd->msg; | |
4808 | ||
4809 | sgl = cmd->sgl; | |
4810 | ||
4811 | if (ssd_bio_has_discard(bio)) { | |
4812 | unsigned int length = bio_sectors(bio); | |
4813 | ||
4814 | //printk(KERN_WARNING "%s: discard len %u, block %llu\n", dev->name, bio_sectors(bio), block); | |
4815 | msg->tag = tag; | |
4816 | msg->fun = SSD_FUNC_TRIM; | |
4817 | ||
4818 | sge = msg->sge; | |
4819 | for (i=0; i<(dev->hw_info.cmd_max_sg); i++) { | |
4820 | sge->block = block; | |
4821 | sge->length = (length >= dev->hw_info.sg_max_sec) ? dev->hw_info.sg_max_sec : length; | |
4822 | sge->buf = 0; | |
4823 | ||
4824 | block += sge->length; | |
4825 | length -= sge->length; | |
4826 | sge++; | |
4827 | ||
4828 | if (length <= 0) { | |
4829 | ++i; | |
4830 | break; | |
4831 | } | |
4832 | } | |
4833 | msg->nsegs = cmd->nsegs = i; | |
4834 | ||
4835 | dev->scmd(cmd); | |
4836 | return 0; | |
4837 | } | |
4838 | ||
4839 | msg->nsegs = cmd->nsegs = ssd_bio_map_sg(dev, bio, sgl); | |
4840 | ||
4841 | //xx | |
4842 | if (bio_data_dir(bio) == READ) { | |
4843 | msg->fun = SSD_FUNC_READ; | |
4844 | msg->flag = 0; | |
4845 | pci_map_sg(dev->pdev, sgl, cmd->nsegs, PCI_DMA_FROMDEVICE); | |
4846 | } else { | |
4847 | msg->fun = SSD_FUNC_WRITE; | |
4848 | msg->flag = dev->wmode; | |
4849 | pci_map_sg(dev->pdev, sgl, cmd->nsegs, PCI_DMA_TODEVICE); | |
4850 | } | |
4851 | ||
4852 | sge = msg->sge; | |
4853 | for (i=0; i<cmd->nsegs; i++) { | |
4854 | sge->block = block; | |
4855 | sge->length = sg_dma_len(sgl) >> 9; | |
4856 | sge->buf = sg_dma_address(sgl); | |
4857 | ||
4858 | block += sge->length; | |
4859 | sgl++; | |
4860 | sge++; | |
4861 | } | |
4862 | ||
4863 | msg->tag = tag; | |
4864 | ||
4865 | #ifdef SSD_OT_PROTECT | |
4866 | if (unlikely(dev->ot_delay > 0 && dev->ot_protect != 0)) { | |
4867 | msleep_interruptible(dev->ot_delay); | |
4868 | } | |
4869 | #endif | |
4870 | ||
4871 | ssd_start_io_acct(cmd); | |
4872 | dev->scmd(cmd); | |
4873 | ||
4874 | return 0; | |
4875 | } | |
4876 | ||
4877 | /* threads */ | |
4878 | static int ssd_done_thread(void *data) | |
4879 | { | |
4880 | struct ssd_device *dev; | |
4881 | struct bio *bio; | |
4882 | struct bio *next; | |
4883 | ||
4884 | if (!data) { | |
4885 | return -EINVAL; | |
4886 | } | |
4887 | dev = data; | |
4888 | ||
4889 | current->flags |= PF_NOFREEZE; | |
4890 | //set_user_nice(current, -5); | |
4891 | ||
4892 | while (!kthread_should_stop()) { | |
4893 | wait_event_interruptible(dev->done_waitq, (atomic_read(&dev->in_doneq) || kthread_should_stop())); | |
4894 | ||
4895 | while (atomic_read(&dev->in_doneq)) { | |
4896 | if (threaded_irq) { | |
4897 | spin_lock(&dev->doneq_lock); | |
4898 | bio = ssd_blist_get(&dev->doneq); | |
4899 | spin_unlock(&dev->doneq_lock); | |
4900 | } else { | |
4901 | spin_lock_irq(&dev->doneq_lock); | |
4902 | bio = ssd_blist_get(&dev->doneq); | |
4903 | spin_unlock_irq(&dev->doneq_lock); | |
4904 | } | |
4905 | ||
4906 | while (bio) { | |
4907 | next = bio->bi_next; | |
4908 | bio->bi_next = NULL; | |
4909 | ssd_bio_endio(bio, 0); | |
4910 | atomic_dec(&dev->in_doneq); | |
4911 | bio = next; | |
4912 | } | |
4913 | ||
4914 | cond_resched(); | |
4915 | ||
4916 | #ifdef SSD_ESCAPE_IRQ | |
4917 | if (unlikely(smp_processor_id() == dev->irq_cpu)) { | |
4918 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28)) | |
4919 | cpumask_var_t new_mask; | |
4920 | if (alloc_cpumask_var(&new_mask, GFP_ATOMIC)) { | |
4921 | cpumask_setall(new_mask); | |
4922 | cpumask_clear_cpu(dev->irq_cpu, new_mask); | |
4923 | set_cpus_allowed_ptr(current, new_mask); | |
4924 | free_cpumask_var(new_mask); | |
4925 | } | |
4926 | #else | |
4927 | cpumask_t new_mask; | |
4928 | cpus_setall(new_mask); | |
4929 | cpu_clear(dev->irq_cpu, new_mask); | |
4930 | set_cpus_allowed(current, new_mask); | |
4931 | #endif | |
4932 | } | |
4933 | #endif | |
4934 | } | |
4935 | } | |
4936 | return 0; | |
4937 | } | |
4938 | ||
4939 | static int ssd_send_thread(void *data) | |
4940 | { | |
4941 | struct ssd_device *dev; | |
4942 | struct bio *bio; | |
4943 | struct bio *next; | |
4944 | ||
4945 | if (!data) { | |
4946 | return -EINVAL; | |
4947 | } | |
4948 | dev = data; | |
4949 | ||
4950 | current->flags |= PF_NOFREEZE; | |
4951 | //set_user_nice(current, -5); | |
4952 | ||
4953 | while (!kthread_should_stop()) { | |
4954 | wait_event_interruptible(dev->send_waitq, (atomic_read(&dev->in_sendq) || kthread_should_stop())); | |
4955 | ||
4956 | while (atomic_read(&dev->in_sendq)) { | |
4957 | spin_lock(&dev->sendq_lock); | |
4958 | bio = ssd_blist_get(&dev->sendq); | |
4959 | spin_unlock(&dev->sendq_lock); | |
4960 | ||
4961 | while (bio) { | |
4962 | next = bio->bi_next; | |
4963 | bio->bi_next = NULL; | |
4964 | #ifdef SSD_QUEUE_PBIO | |
4965 | if (test_and_clear_bit(BIO_SSD_PBIO, &bio->bi_flags)) { | |
4966 | __ssd_submit_pbio(dev, bio, 1); | |
4967 | } else { | |
4968 | ssd_submit_bio(dev, bio, 1); | |
4969 | } | |
4970 | #else | |
4971 | ssd_submit_bio(dev, bio, 1); | |
4972 | #endif | |
4973 | atomic_dec(&dev->in_sendq); | |
4974 | bio = next; | |
4975 | } | |
4976 | ||
4977 | cond_resched(); | |
4978 | ||
4979 | #ifdef SSD_ESCAPE_IRQ | |
4980 | if (unlikely(smp_processor_id() == dev->irq_cpu)) { | |
4981 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28)) | |
4982 | cpumask_var_t new_mask; | |
4983 | if (alloc_cpumask_var(&new_mask, GFP_ATOMIC)) { | |
4984 | cpumask_setall(new_mask); | |
4985 | cpumask_clear_cpu(dev->irq_cpu, new_mask); | |
4986 | set_cpus_allowed_ptr(current, new_mask); | |
4987 | free_cpumask_var(new_mask); | |
4988 | } | |
4989 | #else | |
4990 | cpumask_t new_mask; | |
4991 | cpus_setall(new_mask); | |
4992 | cpu_clear(dev->irq_cpu, new_mask); | |
4993 | set_cpus_allowed(current, new_mask); | |
4994 | #endif | |
4995 | } | |
4996 | #endif | |
4997 | } | |
4998 | } | |
4999 | ||
5000 | return 0; | |
5001 | } | |
5002 | ||
5003 | static void ssd_cleanup_thread(struct ssd_device *dev) | |
5004 | { | |
5005 | kthread_stop(dev->send_thread); | |
5006 | kthread_stop(dev->done_thread); | |
5007 | } | |
5008 | ||
5009 | static int ssd_init_thread(struct ssd_device *dev) | |
5010 | { | |
5011 | int ret; | |
5012 | ||
5013 | atomic_set(&dev->in_doneq, 0); | |
5014 | atomic_set(&dev->in_sendq, 0); | |
5015 | ||
5016 | spin_lock_init(&dev->doneq_lock); | |
5017 | spin_lock_init(&dev->sendq_lock); | |
5018 | ||
5019 | ssd_blist_init(&dev->doneq); | |
5020 | ssd_blist_init(&dev->sendq); | |
5021 | ||
5022 | init_waitqueue_head(&dev->done_waitq); | |
5023 | init_waitqueue_head(&dev->send_waitq); | |
5024 | ||
5025 | dev->done_thread = kthread_run(ssd_done_thread, dev, "%s/d", dev->name); | |
5026 | if (IS_ERR(dev->done_thread)) { | |
5027 | ret = PTR_ERR(dev->done_thread); | |
5028 | goto out_done_thread; | |
5029 | } | |
5030 | ||
5031 | dev->send_thread = kthread_run(ssd_send_thread, dev, "%s/s", dev->name); | |
5032 | if (IS_ERR(dev->send_thread)) { | |
5033 | ret = PTR_ERR(dev->send_thread); | |
5034 | goto out_send_thread; | |
5035 | } | |
5036 | ||
5037 | return 0; | |
5038 | ||
5039 | out_send_thread: | |
5040 | kthread_stop(dev->done_thread); | |
5041 | out_done_thread: | |
5042 | return ret; | |
5043 | } | |
5044 | ||
5045 | /* dcmd pool */ | |
5046 | static void ssd_put_dcmd(struct ssd_dcmd *dcmd) | |
5047 | { | |
5048 | struct ssd_device *dev = (struct ssd_device *)dcmd->dev; | |
5049 | ||
5050 | spin_lock(&dev->dcmd_lock); | |
5051 | list_add_tail(&dcmd->list, &dev->dcmd_list); | |
5052 | spin_unlock(&dev->dcmd_lock); | |
5053 | } | |
5054 | ||
5055 | static struct ssd_dcmd *ssd_get_dcmd(struct ssd_device *dev) | |
5056 | { | |
5057 | struct ssd_dcmd *dcmd = NULL; | |
5058 | ||
5059 | spin_lock(&dev->dcmd_lock); | |
5060 | if (!list_empty(&dev->dcmd_list)) { | |
5061 | dcmd = list_entry(dev->dcmd_list.next, | |
5062 | struct ssd_dcmd, list); | |
5063 | list_del_init(&dcmd->list); | |
5064 | } | |
5065 | spin_unlock(&dev->dcmd_lock); | |
5066 | ||
5067 | return dcmd; | |
5068 | } | |
5069 | ||
5070 | static void ssd_cleanup_dcmd(struct ssd_device *dev) | |
5071 | { | |
5072 | kfree(dev->dcmd); | |
5073 | } | |
5074 | ||
5075 | static int ssd_init_dcmd(struct ssd_device *dev) | |
5076 | { | |
5077 | struct ssd_dcmd *dcmd; | |
5078 | int dcmd_sz = sizeof(struct ssd_dcmd)*dev->hw_info.cmd_fifo_sz; | |
5079 | int i; | |
5080 | ||
5081 | spin_lock_init(&dev->dcmd_lock); | |
5082 | INIT_LIST_HEAD(&dev->dcmd_list); | |
5083 | init_waitqueue_head(&dev->dcmd_wq); | |
5084 | ||
5085 | dev->dcmd = kmalloc(dcmd_sz, GFP_KERNEL); | |
5086 | if (!dev->dcmd) { | |
5087 | hio_warn("%s: can not alloc dcmd\n", dev->name); | |
5088 | goto out_alloc_dcmd; | |
5089 | } | |
5090 | memset(dev->dcmd, 0, dcmd_sz); | |
5091 | ||
5092 | for (i=0, dcmd=dev->dcmd; i<(int)dev->hw_info.cmd_fifo_sz; i++, dcmd++) { | |
5093 | dcmd->dev = dev; | |
5094 | INIT_LIST_HEAD(&dcmd->list); | |
5095 | list_add_tail(&dcmd->list, &dev->dcmd_list); | |
5096 | } | |
5097 | ||
5098 | return 0; | |
5099 | ||
5100 | out_alloc_dcmd: | |
5101 | return -ENOMEM; | |
5102 | } | |
5103 | ||
5104 | static void ssd_put_dmsg(void *msg) | |
5105 | { | |
5106 | struct ssd_dcmd *dcmd = container_of(msg, struct ssd_dcmd, msg); | |
5107 | struct ssd_device *dev = (struct ssd_device *)dcmd->dev; | |
5108 | ||
5109 | memset(dcmd->msg, 0, SSD_DCMD_MAX_SZ); | |
5110 | ssd_put_dcmd(dcmd); | |
5111 | wake_up(&dev->dcmd_wq); | |
5112 | } | |
5113 | ||
5114 | static void *ssd_get_dmsg(struct ssd_device *dev) | |
5115 | { | |
5116 | struct ssd_dcmd *dcmd = ssd_get_dcmd(dev); | |
5117 | ||
5118 | while (!dcmd) { | |
5119 | DEFINE_WAIT(wait); | |
5120 | prepare_to_wait_exclusive(&dev->dcmd_wq, &wait, TASK_UNINTERRUPTIBLE); | |
5121 | schedule(); | |
5122 | ||
5123 | dcmd = ssd_get_dcmd(dev); | |
5124 | ||
5125 | finish_wait(&dev->dcmd_wq, &wait); | |
5126 | } | |
5127 | return dcmd->msg; | |
5128 | } | |
5129 | ||
5130 | /* do direct cmd */ | |
5131 | static int ssd_do_request(struct ssd_device *dev, int rw, void *msg, int *done) | |
5132 | { | |
5133 | DECLARE_COMPLETION(wait); | |
5134 | struct ssd_cmd *cmd; | |
5135 | int tag; | |
5136 | int ret = 0; | |
5137 | ||
5138 | tag = ssd_get_tag(dev, 1); | |
5139 | if (tag < 0) { | |
5140 | return -EBUSY; | |
5141 | } | |
5142 | ||
5143 | cmd = &dev->cmd[tag]; | |
5144 | cmd->nsegs = 1; | |
5145 | memcpy(cmd->msg, msg, SSD_DCMD_MAX_SZ); | |
5146 | ((struct ssd_rw_msg *)cmd->msg)->tag = tag; | |
5147 | ||
5148 | cmd->waiting = &wait; | |
5149 | ||
5150 | dev->scmd(cmd); | |
5151 | ||
5152 | wait_for_completion(cmd->waiting); | |
5153 | cmd->waiting = NULL; | |
5154 | ||
5155 | if (cmd->errors == -ETIMEDOUT) { | |
5156 | ret = cmd->errors; | |
5157 | } else if (cmd->errors) { | |
5158 | ret = -EIO; | |
5159 | } | |
5160 | ||
5161 | if (done != NULL) { | |
5162 | *done = cmd->nr_log; | |
5163 | } | |
5164 | ssd_put_tag(dev, cmd->tag); | |
5165 | ||
5166 | return ret; | |
5167 | } | |
5168 | ||
5169 | static int ssd_do_barrier_request(struct ssd_device *dev, int rw, void *msg, int *done) | |
5170 | { | |
5171 | DECLARE_COMPLETION(wait); | |
5172 | struct ssd_cmd *cmd; | |
5173 | int tag; | |
5174 | int ret = 0; | |
5175 | ||
5176 | tag = ssd_barrier_get_tag(dev); | |
5177 | if (tag < 0) { | |
5178 | return -EBUSY; | |
5179 | } | |
5180 | ||
5181 | cmd = &dev->cmd[tag]; | |
5182 | cmd->nsegs = 1; | |
5183 | memcpy(cmd->msg, msg, SSD_DCMD_MAX_SZ); | |
5184 | ((struct ssd_rw_msg *)cmd->msg)->tag = tag; | |
5185 | ||
5186 | cmd->waiting = &wait; | |
5187 | ||
5188 | dev->scmd(cmd); | |
5189 | ||
5190 | wait_for_completion(cmd->waiting); | |
5191 | cmd->waiting = NULL; | |
5192 | ||
5193 | if (cmd->errors == -ETIMEDOUT) { | |
5194 | ret = cmd->errors; | |
5195 | } else if (cmd->errors) { | |
5196 | ret = -EIO; | |
5197 | } | |
5198 | ||
5199 | if (done != NULL) { | |
5200 | *done = cmd->nr_log; | |
5201 | } | |
5202 | ssd_barrier_put_tag(dev, cmd->tag); | |
5203 | ||
5204 | return ret; | |
5205 | } | |
5206 | ||
5207 | #ifdef SSD_OT_PROTECT | |
5208 | static void ssd_check_temperature(struct ssd_device *dev, int temp) | |
5209 | { | |
5210 | uint64_t val; | |
5211 | uint32_t off; | |
5212 | int cur; | |
5213 | int i; | |
5214 | ||
5215 | if (mode != SSD_DRV_MODE_STANDARD) { | |
5216 | return; | |
5217 | } | |
5218 | ||
5219 | if (dev->protocol_info.ver <= SSD_PROTOCOL_V3) { | |
5220 | } | |
5221 | ||
5222 | for (i=0; i<dev->hw_info.nr_ctrl; i++) { | |
5223 | off = SSD_CTRL_TEMP_REG0 + i * sizeof(uint64_t); | |
5224 | ||
5225 | val = ssd_reg_read(dev->ctrlp + off); | |
5226 | if (val == 0xffffffffffffffffull) { | |
5227 | continue; | |
5228 | } | |
5229 | ||
5230 | cur = (int)CUR_TEMP(val); | |
5231 | if (cur >= temp) { | |
5232 | if (!test_and_set_bit(SSD_HWMON_TEMP(SSD_TEMP_CTRL), &dev->hwmon)) { | |
5233 | if (dev->protocol_info.ver > SSD_PROTOCOL_V3 && dev->protocol_info.ver < SSD_PROTOCOL_V3_2_2) { | |
5234 | hio_warn("%s: Over temperature, please check the fans.\n", dev->name); | |
5235 | dev->ot_delay = SSD_OT_DELAY; | |
5236 | } | |
5237 | } | |
5238 | return; | |
5239 | } | |
5240 | } | |
5241 | ||
5242 | if (test_and_clear_bit(SSD_HWMON_TEMP(SSD_TEMP_CTRL), &dev->hwmon)) { | |
5243 | if (dev->protocol_info.ver > SSD_PROTOCOL_V3 && dev->protocol_info.ver < SSD_PROTOCOL_V3_2_2) { | |
5244 | hio_warn("%s: Temperature is OK.\n", dev->name); | |
5245 | dev->ot_delay = 0; | |
5246 | } | |
5247 | } | |
5248 | } | |
5249 | #endif | |
5250 | ||
5251 | static int ssd_get_ot_status(struct ssd_device *dev, int *status) | |
5252 | { | |
5253 | uint32_t off; | |
5254 | uint32_t val; | |
5255 | int i; | |
5256 | ||
5257 | if (!dev || !status) { | |
5258 | return -EINVAL; | |
5259 | } | |
5260 | ||
5261 | if (dev->protocol_info.ver >= SSD_PROTOCOL_V3_2_2) { | |
5262 | for (i=0; i<dev->hw_info.nr_ctrl; i++) { | |
5263 | off = SSD_READ_OT_REG0 + (i * SSD_CTRL_REG_ZONE_SZ); | |
5264 | val = ssd_reg32_read(dev->ctrlp + off); | |
5265 | if ((val >> 22) & 0x1) { | |
5266 | *status = 1; | |
5267 | goto out; | |
5268 | } | |
5269 | ||
5270 | ||
5271 | off = SSD_WRITE_OT_REG0 + (i * SSD_CTRL_REG_ZONE_SZ); | |
5272 | val = ssd_reg32_read(dev->ctrlp + off); | |
5273 | if ((val >> 22) & 0x1) { | |
5274 | *status = 1; | |
5275 | goto out; | |
5276 | } | |
5277 | } | |
5278 | } else { | |
5279 | *status = !!dev->ot_delay; | |
5280 | } | |
5281 | ||
5282 | out: | |
5283 | return 0; | |
5284 | } | |
5285 | ||
5286 | static void ssd_set_ot_protect(struct ssd_device *dev, int protect) | |
5287 | { | |
5288 | uint32_t off; | |
5289 | uint32_t val; | |
5290 | int i; | |
5291 | ||
5292 | mutex_lock(&dev->fw_mutex); | |
5293 | ||
5294 | dev->ot_protect = !!protect; | |
5295 | ||
5296 | if (dev->protocol_info.ver >= SSD_PROTOCOL_V3_2_2) { | |
5297 | for (i=0; i<dev->hw_info.nr_ctrl; i++) { | |
5298 | off = SSD_READ_OT_REG0 + (i * SSD_CTRL_REG_ZONE_SZ); | |
5299 | val = ssd_reg32_read(dev->ctrlp + off); | |
5300 | if (dev->ot_protect) { | |
5301 | val |= (1U << 21); | |
5302 | } else { | |
5303 | val &= ~(1U << 21); | |
5304 | } | |
5305 | ssd_reg32_write(dev->ctrlp + off, val); | |
5306 | ||
5307 | ||
5308 | off = SSD_WRITE_OT_REG0 + (i * SSD_CTRL_REG_ZONE_SZ); | |
5309 | val = ssd_reg32_read(dev->ctrlp + off); | |
5310 | if (dev->ot_protect) { | |
5311 | val |= (1U << 21); | |
5312 | } else { | |
5313 | val &= ~(1U << 21); | |
5314 | } | |
5315 | ssd_reg32_write(dev->ctrlp + off, val); | |
5316 | } | |
5317 | } | |
5318 | ||
5319 | mutex_unlock(&dev->fw_mutex); | |
5320 | } | |
5321 | ||
5322 | static int ssd_init_ot_protect(struct ssd_device *dev) | |
5323 | { | |
5324 | ssd_set_ot_protect(dev, ot_protect); | |
5325 | ||
5326 | #ifdef SSD_OT_PROTECT | |
5327 | ssd_check_temperature(dev, SSD_OT_TEMP); | |
5328 | #endif | |
5329 | ||
5330 | return 0; | |
5331 | } | |
5332 | ||
5333 | /* log */ | |
5334 | static int ssd_read_log(struct ssd_device *dev, int ctrl_idx, void *buf, int *nr_log) | |
5335 | { | |
5336 | struct ssd_log_op_msg *msg; | |
5337 | struct ssd_log_msg *lmsg; | |
5338 | dma_addr_t buf_dma; | |
5339 | size_t length = dev->hw_info.log_sz; | |
5340 | int ret = 0; | |
5341 | ||
5342 | if (ctrl_idx >= dev->hw_info.nr_ctrl) { | |
5343 | return -EINVAL; | |
5344 | } | |
5345 | ||
5346 | buf_dma = pci_map_single(dev->pdev, buf, length, PCI_DMA_FROMDEVICE); | |
5347 | #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,26)) | |
5348 | ret = dma_mapping_error(buf_dma); | |
5349 | #else | |
5350 | ret = dma_mapping_error(&(dev->pdev->dev), buf_dma); | |
5351 | #endif | |
5352 | if (ret) { | |
5353 | hio_warn("%s: unable to map read DMA buffer\n", dev->name); | |
5354 | goto out_dma_mapping; | |
5355 | } | |
5356 | ||
5357 | msg = (struct ssd_log_op_msg *)ssd_get_dmsg(dev); | |
5358 | ||
5359 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3) { | |
5360 | lmsg = (struct ssd_log_msg *)msg; | |
5361 | lmsg->fun = SSD_FUNC_READ_LOG; | |
5362 | lmsg->ctrl_idx = ctrl_idx; | |
5363 | lmsg->buf = buf_dma; | |
5364 | } else { | |
5365 | msg->fun = SSD_FUNC_READ_LOG; | |
5366 | msg->ctrl_idx = ctrl_idx; | |
5367 | msg->buf = buf_dma; | |
5368 | } | |
5369 | ||
5370 | ret = ssd_do_request(dev, READ, msg, nr_log); | |
5371 | ssd_put_dmsg(msg); | |
5372 | ||
5373 | pci_unmap_single(dev->pdev, buf_dma, length, PCI_DMA_FROMDEVICE); | |
5374 | ||
5375 | out_dma_mapping: | |
5376 | return ret; | |
5377 | } | |
5378 | ||
5379 | #define SSD_LOG_PRINT_BUF_SZ 256 | |
5380 | static int ssd_parse_log(struct ssd_device *dev, struct ssd_log *log, int print) | |
5381 | { | |
5382 | struct ssd_log_desc *log_desc = ssd_log_desc; | |
5383 | struct ssd_log_entry *le; | |
5384 | char *sn = NULL; | |
5385 | char print_buf[SSD_LOG_PRINT_BUF_SZ]; | |
5386 | int print_len; | |
5387 | ||
5388 | le = &log->le; | |
5389 | ||
5390 | /* find desc */ | |
5391 | while (log_desc->event != SSD_UNKNOWN_EVENT) { | |
5392 | if (log_desc->event == le->event) { | |
5393 | break; | |
5394 | } | |
5395 | log_desc++; | |
5396 | } | |
5397 | ||
5398 | if (!print) { | |
5399 | goto out; | |
5400 | } | |
5401 | ||
5402 | if (log_desc->level < log_level) { | |
5403 | goto out; | |
5404 | } | |
5405 | ||
5406 | /* parse */ | |
5407 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_2) { | |
5408 | sn = dev->label.sn; | |
5409 | } else { | |
5410 | sn = dev->labelv3.barcode; | |
5411 | } | |
5412 | ||
5413 | print_len = snprintf(print_buf, SSD_LOG_PRINT_BUF_SZ, "%s (%s): <%#x>", dev->name, sn, le->event); | |
5414 | ||
5415 | if (log->ctrl_idx != SSD_LOG_SW_IDX) { | |
5416 | print_len += snprintf((print_buf + print_len), (SSD_LOG_PRINT_BUF_SZ - print_len), " controller %d", log->ctrl_idx); | |
5417 | } | |
5418 | ||
5419 | switch (log_desc->data) { | |
5420 | case SSD_LOG_DATA_NONE: | |
5421 | break; | |
5422 | case SSD_LOG_DATA_LOC: | |
5423 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_2) { | |
5424 | print_len += snprintf((print_buf + print_len), (SSD_LOG_PRINT_BUF_SZ - print_len), " flash %d", le->data.loc.flash); | |
5425 | if (log_desc->sblock) { | |
5426 | print_len += snprintf((print_buf + print_len), (SSD_LOG_PRINT_BUF_SZ - print_len), " block %d", le->data.loc.block); | |
5427 | } | |
5428 | if (log_desc->spage) { | |
5429 | print_len += snprintf((print_buf + print_len), (SSD_LOG_PRINT_BUF_SZ - print_len), " page %d", le->data.loc.page); | |
5430 | } | |
5431 | } else { | |
5432 | print_len += snprintf((print_buf + print_len), (SSD_LOG_PRINT_BUF_SZ - print_len), " flash %d", le->data.loc1.flash); | |
5433 | if (log_desc->sblock) { | |
5434 | print_len += snprintf((print_buf + print_len), (SSD_LOG_PRINT_BUF_SZ - print_len), " block %d", le->data.loc1.block); | |
5435 | } | |
5436 | if (log_desc->spage) { | |
5437 | print_len += snprintf((print_buf + print_len), (SSD_LOG_PRINT_BUF_SZ - print_len), " page %d", le->data.loc1.page); | |
5438 | } | |
5439 | } | |
5440 | break; | |
5441 | case SSD_LOG_DATA_HEX: | |
5442 | print_len += snprintf((print_buf + print_len), (SSD_LOG_PRINT_BUF_SZ - print_len), " info %#x", le->data.val); | |
5443 | break; | |
5444 | default: | |
5445 | break; | |
5446 | } | |
5447 | /*print_len += */snprintf((print_buf + print_len), (SSD_LOG_PRINT_BUF_SZ - print_len), ": %s", log_desc->desc); | |
5448 | ||
5449 | switch (log_desc->level) { | |
5450 | case SSD_LOG_LEVEL_INFO: | |
5451 | hio_info("%s\n", print_buf); | |
5452 | break; | |
5453 | case SSD_LOG_LEVEL_NOTICE: | |
5454 | hio_note("%s\n", print_buf); | |
5455 | break; | |
5456 | case SSD_LOG_LEVEL_WARNING: | |
5457 | hio_warn("%s\n", print_buf); | |
5458 | break; | |
5459 | case SSD_LOG_LEVEL_ERR: | |
5460 | hio_err("%s\n", print_buf); | |
5461 | //printk(KERN_ERR MODULE_NAME": some exception occurred, please check the data or refer to FAQ."); | |
5462 | break; | |
5463 | default: | |
5464 | hio_warn("%s\n", print_buf); | |
5465 | break; | |
5466 | } | |
5467 | ||
5468 | out: | |
5469 | return log_desc->level; | |
5470 | } | |
5471 | ||
5472 | static int ssd_bm_get_sfstatus(struct ssd_device *dev, uint16_t *status); | |
5473 | static int ssd_switch_wmode(struct ssd_device *dev, int wmode); | |
5474 | ||
5475 | ||
5476 | static int ssd_handle_event(struct ssd_device *dev, uint16_t event, int level) | |
5477 | { | |
5478 | int ret = 0; | |
5479 | ||
5480 | switch (event) { | |
5481 | case SSD_LOG_OVER_TEMP: { | |
5482 | #ifdef SSD_OT_PROTECT | |
5483 | if (!test_and_set_bit(SSD_HWMON_TEMP(SSD_TEMP_CTRL), &dev->hwmon)) { | |
5484 | if (dev->protocol_info.ver > SSD_PROTOCOL_V3 && dev->protocol_info.ver < SSD_PROTOCOL_V3_2_2) { | |
5485 | hio_warn("%s: Over temperature, please check the fans.\n", dev->name); | |
5486 | dev->ot_delay = SSD_OT_DELAY; | |
5487 | } | |
5488 | } | |
5489 | #endif | |
5490 | break; | |
5491 | } | |
5492 | ||
5493 | case SSD_LOG_NORMAL_TEMP: { | |
5494 | #ifdef SSD_OT_PROTECT | |
5495 | /* need to check all controller's temperature */ | |
5496 | ssd_check_temperature(dev, SSD_OT_TEMP_HYST); | |
5497 | #endif | |
5498 | break; | |
5499 | } | |
5500 | ||
5501 | case SSD_LOG_BATTERY_FAULT: { | |
5502 | uint16_t sfstatus; | |
5503 | ||
5504 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_2) { | |
5505 | if (!ssd_bm_get_sfstatus(dev, &sfstatus)) { | |
5506 | ssd_gen_swlog(dev, SSD_LOG_BM_SFSTATUS, sfstatus); | |
5507 | } | |
5508 | } | |
5509 | ||
5510 | if (!test_and_set_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP), &dev->hwmon)) { | |
5511 | ssd_switch_wmode(dev, dev->user_wmode); | |
5512 | } | |
5513 | break; | |
5514 | } | |
5515 | ||
5516 | case SSD_LOG_BATTERY_OK: { | |
5517 | if (test_and_clear_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP), &dev->hwmon)) { | |
5518 | ssd_switch_wmode(dev, dev->user_wmode); | |
5519 | } | |
5520 | break; | |
5521 | } | |
5522 | ||
5523 | case SSD_LOG_BOARD_VOLT_FAULT: { | |
5524 | ssd_mon_boardvolt(dev); | |
5525 | break; | |
5526 | } | |
5527 | ||
5528 | case SSD_LOG_CLEAR_LOG: { | |
5529 | /* update smart */ | |
5530 | memset(&dev->smart.log_info, 0, sizeof(struct ssd_log_info)); | |
5531 | break; | |
5532 | } | |
5533 | ||
5534 | case SSD_LOG_CAP_VOLT_FAULT: | |
5535 | case SSD_LOG_CAP_LEARN_FAULT: | |
5536 | case SSD_LOG_CAP_SHORT_CIRCUIT: { | |
5537 | if (!test_and_set_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP), &dev->hwmon)) { | |
5538 | ssd_switch_wmode(dev, dev->user_wmode); | |
5539 | } | |
5540 | break; | |
5541 | } | |
5542 | ||
5543 | default: | |
5544 | break; | |
5545 | } | |
5546 | ||
5547 | /* ssd event call */ | |
5548 | if (dev->event_call) { | |
5549 | dev->event_call(dev->gd, event, level); | |
5550 | ||
5551 | /* FIXME */ | |
5552 | if (SSD_LOG_CAP_VOLT_FAULT == event || SSD_LOG_CAP_LEARN_FAULT == event || SSD_LOG_CAP_SHORT_CIRCUIT == event) { | |
5553 | dev->event_call(dev->gd, SSD_LOG_BATTERY_FAULT, level); | |
5554 | } | |
5555 | } | |
5556 | ||
5557 | return ret; | |
5558 | } | |
5559 | ||
5560 | static int ssd_save_log(struct ssd_device *dev, struct ssd_log *log) | |
5561 | { | |
5562 | uint32_t off, size; | |
5563 | void *internal_log; | |
5564 | int ret = 0; | |
5565 | ||
5566 | mutex_lock(&dev->internal_log_mutex); | |
5567 | ||
5568 | size = sizeof(struct ssd_log); | |
5569 | off = dev->internal_log.nr_log * size; | |
5570 | ||
5571 | if (off == dev->rom_info.log_sz) { | |
5572 | if (dev->internal_log.nr_log == dev->smart.log_info.nr_log) { | |
5573 | hio_warn("%s: internal log is full\n", dev->name); | |
5574 | } | |
5575 | goto out; | |
5576 | } | |
5577 | ||
5578 | internal_log = dev->internal_log.log + off; | |
5579 | memcpy(internal_log, log, size); | |
5580 | ||
5581 | if (dev->protocol_info.ver > SSD_PROTOCOL_V3) { | |
5582 | off += dev->rom_info.log_base; | |
5583 | ||
5584 | ret = ssd_spi_write(dev, log, off, size); | |
5585 | if (ret) { | |
5586 | goto out; | |
5587 | } | |
5588 | } | |
5589 | ||
5590 | dev->internal_log.nr_log++; | |
5591 | ||
5592 | out: | |
5593 | mutex_unlock(&dev->internal_log_mutex); | |
5594 | return ret; | |
5595 | } | |
5596 | ||
5597 | /** CRC table for the CRC-16. The poly is 0x8005 (x^16 + x^15 + x^2 + 1) */ | |
5598 | static unsigned short const crc16_table[256] = { | |
5599 | 0x0000, 0xC0C1, 0xC181, 0x0140, 0xC301, 0x03C0, 0x0280, 0xC241, | |
5600 | 0xC601, 0x06C0, 0x0780, 0xC741, 0x0500, 0xC5C1, 0xC481, 0x0440, | |
5601 | 0xCC01, 0x0CC0, 0x0D80, 0xCD41, 0x0F00, 0xCFC1, 0xCE81, 0x0E40, | |
5602 | 0x0A00, 0xCAC1, 0xCB81, 0x0B40, 0xC901, 0x09C0, 0x0880, 0xC841, | |
5603 | 0xD801, 0x18C0, 0x1980, 0xD941, 0x1B00, 0xDBC1, 0xDA81, 0x1A40, | |
5604 | 0x1E00, 0xDEC1, 0xDF81, 0x1F40, 0xDD01, 0x1DC0, 0x1C80, 0xDC41, | |
5605 | 0x1400, 0xD4C1, 0xD581, 0x1540, 0xD701, 0x17C0, 0x1680, 0xD641, | |
5606 | 0xD201, 0x12C0, 0x1380, 0xD341, 0x1100, 0xD1C1, 0xD081, 0x1040, | |
5607 | 0xF001, 0x30C0, 0x3180, 0xF141, 0x3300, 0xF3C1, 0xF281, 0x3240, | |
5608 | 0x3600, 0xF6C1, 0xF781, 0x3740, 0xF501, 0x35C0, 0x3480, 0xF441, | |
5609 | 0x3C00, 0xFCC1, 0xFD81, 0x3D40, 0xFF01, 0x3FC0, 0x3E80, 0xFE41, | |
5610 | 0xFA01, 0x3AC0, 0x3B80, 0xFB41, 0x3900, 0xF9C1, 0xF881, 0x3840, | |
5611 | 0x2800, 0xE8C1, 0xE981, 0x2940, 0xEB01, 0x2BC0, 0x2A80, 0xEA41, | |
5612 | 0xEE01, 0x2EC0, 0x2F80, 0xEF41, 0x2D00, 0xEDC1, 0xEC81, 0x2C40, | |
5613 | 0xE401, 0x24C0, 0x2580, 0xE541, 0x2700, 0xE7C1, 0xE681, 0x2640, | |
5614 | 0x2200, 0xE2C1, 0xE381, 0x2340, 0xE101, 0x21C0, 0x2080, 0xE041, | |
5615 | 0xA001, 0x60C0, 0x6180, 0xA141, 0x6300, 0xA3C1, 0xA281, 0x6240, | |
5616 | 0x6600, 0xA6C1, 0xA781, 0x6740, 0xA501, 0x65C0, 0x6480, 0xA441, | |
5617 | 0x6C00, 0xACC1, 0xAD81, 0x6D40, 0xAF01, 0x6FC0, 0x6E80, 0xAE41, | |
5618 | 0xAA01, 0x6AC0, 0x6B80, 0xAB41, 0x6900, 0xA9C1, 0xA881, 0x6840, | |
5619 | 0x7800, 0xB8C1, 0xB981, 0x7940, 0xBB01, 0x7BC0, 0x7A80, 0xBA41, | |
5620 | 0xBE01, 0x7EC0, 0x7F80, 0xBF41, 0x7D00, 0xBDC1, 0xBC81, 0x7C40, | |
5621 | 0xB401, 0x74C0, 0x7580, 0xB541, 0x7700, 0xB7C1, 0xB681, 0x7640, | |
5622 | 0x7200, 0xB2C1, 0xB381, 0x7340, 0xB101, 0x71C0, 0x7080, 0xB041, | |
5623 | 0x5000, 0x90C1, 0x9181, 0x5140, 0x9301, 0x53C0, 0x5280, 0x9241, | |
5624 | 0x9601, 0x56C0, 0x5780, 0x9741, 0x5500, 0x95C1, 0x9481, 0x5440, | |
5625 | 0x9C01, 0x5CC0, 0x5D80, 0x9D41, 0x5F00, 0x9FC1, 0x9E81, 0x5E40, | |
5626 | 0x5A00, 0x9AC1, 0x9B81, 0x5B40, 0x9901, 0x59C0, 0x5880, 0x9841, | |
5627 | 0x8801, 0x48C0, 0x4980, 0x8941, 0x4B00, 0x8BC1, 0x8A81, 0x4A40, | |
5628 | 0x4E00, 0x8EC1, 0x8F81, 0x4F40, 0x8D01, 0x4DC0, 0x4C80, 0x8C41, | |
5629 | 0x4400, 0x84C1, 0x8581, 0x4540, 0x8701, 0x47C0, 0x4680, 0x8641, | |
5630 | 0x8201, 0x42C0, 0x4380, 0x8341, 0x4100, 0x81C1, 0x8081, 0x4040 | |
5631 | }; | |
5632 | ||
5633 | static unsigned short crc16_byte(unsigned short crc, const unsigned char data) | |
5634 | { | |
5635 | return (crc >> 8) ^ crc16_table[(crc ^ data) & 0xff]; | |
5636 | } | |
5637 | /** | |
5638 | * crc16 - compute the CRC-16 for the data buffer | |
5639 | * @crc: previous CRC value | |
5640 | * @buffer: data pointer | |
5641 | * @len: number of bytes in the buffer | |
5642 | * | |
5643 | * Returns the updated CRC value. | |
5644 | */ | |
5645 | static unsigned short crc16(unsigned short crc, unsigned char const *buffer, int len) | |
5646 | { | |
5647 | while (len--) | |
5648 | crc = crc16_byte(crc, *buffer++); | |
5649 | return crc; | |
5650 | } | |
5651 | ||
5652 | static int ssd_save_swlog(struct ssd_device *dev, uint16_t event, uint32_t data) | |
5653 | { | |
5654 | struct ssd_log log; | |
5655 | int level; | |
5656 | int ret = 0; | |
5657 | ||
5658 | if (unlikely(mode != SSD_DRV_MODE_STANDARD)) | |
5659 | return 0; | |
5660 | ||
5661 | memset(&log, 0, sizeof(struct ssd_log)); | |
5662 | ||
5663 | log.ctrl_idx = SSD_LOG_SW_IDX; | |
5664 | log.time = ktime_get_real_seconds(); | |
5665 | log.le.event = event; | |
5666 | log.le.data.val = data; | |
5667 | ||
5668 | log.le.mod = SSD_DIF_WITH_OLD_LOG; | |
5669 | log.le.idx = crc16(0,(const unsigned char *)&log,14); | |
5670 | level = ssd_parse_log(dev, &log, 0); | |
5671 | if (level >= SSD_LOG_LEVEL) { | |
5672 | ret = ssd_save_log(dev, &log); | |
5673 | } | |
5674 | ||
5675 | /* set alarm */ | |
5676 | if (SSD_LOG_LEVEL_ERR == level) { | |
5677 | ssd_set_alarm(dev); | |
5678 | } | |
5679 | ||
5680 | /* update smart */ | |
5681 | dev->smart.log_info.nr_log++; | |
5682 | dev->smart.log_info.stat[level]++; | |
5683 | ||
5684 | /* handle event */ | |
5685 | ssd_handle_event(dev, event, level); | |
5686 | ||
5687 | return ret; | |
5688 | } | |
5689 | ||
5690 | static int ssd_gen_swlog(struct ssd_device *dev, uint16_t event, uint32_t data) | |
5691 | { | |
5692 | struct ssd_log_entry le; | |
5693 | int ret; | |
5694 | ||
5695 | if (unlikely(mode != SSD_DRV_MODE_STANDARD)) | |
5696 | return 0; | |
5697 | ||
5698 | /* slave port ? */ | |
5699 | if (dev->slave) { | |
5700 | return 0; | |
5701 | } | |
5702 | ||
5703 | memset(&le, 0, sizeof(struct ssd_log_entry)); | |
5704 | le.event = event; | |
5705 | le.data.val = data; | |
5706 | ||
5707 | ret = sfifo_put(&dev->log_fifo, &le); | |
5708 | if (ret) { | |
5709 | return ret; | |
5710 | } | |
5711 | ||
5712 | if (test_bit(SSD_INIT_WORKQ, &dev->state)) { | |
5713 | queue_work(dev->workq, &dev->log_work); | |
5714 | } | |
5715 | ||
5716 | return 0; | |
5717 | } | |
5718 | ||
5719 | static int ssd_do_swlog(struct ssd_device *dev) | |
5720 | { | |
5721 | struct ssd_log_entry le; | |
5722 | int ret = 0; | |
5723 | ||
5724 | memset(&le, 0, sizeof(struct ssd_log_entry)); | |
5725 | while (!sfifo_get(&dev->log_fifo, &le)) { | |
5726 | ret = ssd_save_swlog(dev, le.event, le.data.val); | |
5727 | if (ret) { | |
5728 | break; | |
5729 | } | |
5730 | } | |
5731 | ||
5732 | return ret; | |
5733 | } | |
5734 | ||
5735 | static int __ssd_clear_log(struct ssd_device *dev) | |
5736 | { | |
5737 | uint32_t off, length; | |
5738 | int ret; | |
5739 | ||
5740 | if (dev->protocol_info.ver <= SSD_PROTOCOL_V3) { | |
5741 | return 0; | |
5742 | } | |
5743 | ||
5744 | if (dev->internal_log.nr_log == 0) { | |
5745 | return 0; | |
5746 | } | |
5747 | ||
5748 | mutex_lock(&dev->internal_log_mutex); | |
5749 | ||
5750 | off = dev->rom_info.log_base; | |
5751 | length = dev->rom_info.log_sz; | |
5752 | ||
5753 | ret = ssd_spi_erase(dev, off, length); | |
5754 | if (ret) { | |
5755 | hio_warn("%s: log erase: failed\n", dev->name); | |
5756 | goto out; | |
5757 | } | |
5758 | ||
5759 | dev->internal_log.nr_log = 0; | |
5760 | ||
5761 | out: | |
5762 | mutex_unlock(&dev->internal_log_mutex); | |
5763 | return ret; | |
5764 | } | |
5765 | ||
5766 | static int ssd_clear_log(struct ssd_device *dev) | |
5767 | { | |
5768 | int ret; | |
5769 | ||
5770 | ret = __ssd_clear_log(dev); | |
5771 | if(!ret) { | |
5772 | ssd_gen_swlog(dev, SSD_LOG_CLEAR_LOG, 0); | |
5773 | } | |
5774 | ||
5775 | return ret; | |
5776 | } | |
5777 | ||
5778 | static int ssd_do_log(struct ssd_device *dev, int ctrl_idx, void *buf) | |
5779 | { | |
5780 | struct ssd_log_entry *le; | |
5781 | struct ssd_log log; | |
5782 | int nr_log = 0; | |
5783 | int level; | |
5784 | int ret = 0; | |
5785 | ||
5786 | ret = ssd_read_log(dev, ctrl_idx, buf, &nr_log); | |
5787 | if (ret) { | |
5788 | return ret; | |
5789 | } | |
5790 | ||
5791 | log.time = ktime_get_real_seconds(); | |
5792 | log.ctrl_idx = ctrl_idx; | |
5793 | ||
5794 | le = (ssd_log_entry_t *)buf; | |
5795 | while (nr_log > 0) { | |
5796 | memcpy(&log.le, le, sizeof(struct ssd_log_entry)); | |
5797 | ||
5798 | log.le.mod = SSD_DIF_WITH_OLD_LOG; | |
5799 | log.le.idx = crc16(0,(const unsigned char *)&log,14); | |
5800 | level = ssd_parse_log(dev, &log, 1); | |
5801 | if (level >= SSD_LOG_LEVEL) { | |
5802 | ssd_save_log(dev, &log); | |
5803 | } | |
5804 | ||
5805 | /* set alarm */ | |
5806 | if (SSD_LOG_LEVEL_ERR == level) { | |
5807 | ssd_set_alarm(dev); | |
5808 | } | |
5809 | ||
5810 | dev->smart.log_info.nr_log++; | |
5811 | if (SSD_LOG_SEU_FAULT != le->event && SSD_LOG_SEU_FAULT1 != le->event) { | |
5812 | dev->smart.log_info.stat[level]++; | |
5813 | } else { | |
5814 | /* SEU fault */ | |
5815 | ||
5816 | /* log to the volatile log info */ | |
5817 | dev->log_info.nr_log++; | |
5818 | dev->log_info.stat[level]++; | |
5819 | ||
5820 | /* do something */ | |
5821 | dev->reload_fw = 1; | |
5822 | ssd_reg32_write(dev->ctrlp + SSD_RELOAD_FW_REG, SSD_RELOAD_FLAG); | |
5823 | if (le->event != SSD_LOG_SEU_FAULT1) { | |
5824 | dev->has_non_0x98_reg_access = 1; | |
5825 | } | |
5826 | ||
5827 | /*dev->readonly = 1; | |
5828 | set_disk_ro(dev->gd, 1); | |
5829 | hio_warn("%s: switched to read-only mode.\n", dev->name);*/ | |
5830 | } | |
5831 | ||
5832 | /* handle event */ | |
5833 | ssd_handle_event(dev, le->event, level); | |
5834 | ||
5835 | le++; | |
5836 | nr_log--; | |
5837 | } | |
5838 | ||
5839 | return 0; | |
5840 | } | |
5841 | ||
5842 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)) | |
5843 | static void ssd_log_worker(void *data) | |
5844 | { | |
5845 | struct ssd_device *dev = (struct ssd_device *)data; | |
5846 | #else | |
5847 | static void ssd_log_worker(struct work_struct *work) | |
5848 | { | |
5849 | struct ssd_device *dev = container_of(work, struct ssd_device, log_work); | |
5850 | #endif | |
5851 | int i; | |
5852 | int ret; | |
5853 | ||
5854 | if (!test_bit(SSD_LOG_ERR, &dev->state) && test_bit(SSD_ONLINE, &dev->state)) { | |
5855 | /* alloc log buf */ | |
5856 | if (!dev->log_buf) { | |
5857 | dev->log_buf = kmalloc(dev->hw_info.log_sz, GFP_KERNEL); | |
5858 | if (!dev->log_buf) { | |
5859 | hio_warn("%s: ssd_log_worker: no mem\n", dev->name); | |
5860 | return; | |
5861 | } | |
5862 | } | |
5863 | ||
5864 | /* get log */ | |
5865 | if (test_and_clear_bit(SSD_LOG_HW, &dev->state)) { | |
5866 | for (i=0; i<dev->hw_info.nr_ctrl; i++) { | |
5867 | ret = ssd_do_log(dev, i, dev->log_buf); | |
5868 | if (ret) { | |
5869 | (void)test_and_set_bit(SSD_LOG_ERR, &dev->state); | |
5870 | hio_warn("%s: do log fail\n", dev->name); | |
5871 | } | |
5872 | } | |
5873 | } | |
5874 | } | |
5875 | ||
5876 | ret = ssd_do_swlog(dev); | |
5877 | if (ret) { | |
5878 | hio_warn("%s: do swlog fail\n", dev->name); | |
5879 | } | |
5880 | } | |
5881 | ||
5882 | static void ssd_cleanup_log(struct ssd_device *dev) | |
5883 | { | |
5884 | if (dev->log_buf) { | |
5885 | kfree(dev->log_buf); | |
5886 | dev->log_buf = NULL; | |
5887 | } | |
5888 | ||
5889 | sfifo_free(&dev->log_fifo); | |
5890 | ||
5891 | if (dev->internal_log.log) { | |
5892 | vfree(dev->internal_log.log); | |
5893 | dev->internal_log.nr_log = 0; | |
5894 | dev->internal_log.log = NULL; | |
5895 | } | |
5896 | } | |
5897 | ||
5898 | static int ssd_init_log(struct ssd_device *dev) | |
5899 | { | |
5900 | struct ssd_log *log; | |
5901 | uint32_t off, size; | |
5902 | uint32_t len = 0; | |
5903 | int ret = 0; | |
5904 | ||
5905 | mutex_init(&dev->internal_log_mutex); | |
5906 | ||
5907 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)) | |
5908 | INIT_WORK(&dev->log_work, ssd_log_worker, dev); | |
5909 | #else | |
5910 | INIT_WORK(&dev->log_work, ssd_log_worker); | |
5911 | #endif | |
5912 | ||
5913 | off = dev->rom_info.log_base; | |
5914 | size = dev->rom_info.log_sz; | |
5915 | ||
5916 | dev->internal_log.nr_log = 0; | |
5917 | dev->internal_log.log = vmalloc(size); | |
5918 | if (!dev->internal_log.log) { | |
5919 | ret = -ENOMEM; | |
5920 | goto out_alloc_log; | |
5921 | } | |
5922 | ||
5923 | ret = sfifo_alloc(&dev->log_fifo, SSD_LOG_FIFO_SZ, sizeof(struct ssd_log_entry)); | |
5924 | if (ret < 0) { | |
5925 | goto out_alloc_log_fifo; | |
5926 | } | |
5927 | ||
5928 | if (dev->protocol_info.ver <= SSD_PROTOCOL_V3) { | |
5929 | return 0; | |
5930 | } | |
5931 | ||
5932 | log = (struct ssd_log *)dev->internal_log.log; | |
5933 | while (len < size) { | |
5934 | ret = ssd_spi_read(dev, log, off, sizeof(struct ssd_log)); | |
5935 | if (ret) { | |
5936 | goto out_read_log; | |
5937 | } | |
5938 | ||
5939 | if (log->ctrl_idx == 0xff) { | |
5940 | break; | |
5941 | } | |
5942 | ||
5943 | if (log->le.event == SSD_LOG_POWER_ON) { | |
5944 | if (dev->internal_log.nr_log > dev->last_poweron_id) { | |
5945 | dev->last_poweron_id = dev->internal_log.nr_log; | |
5946 | } | |
5947 | } | |
5948 | ||
5949 | dev->internal_log.nr_log++; | |
5950 | log++; | |
5951 | len += sizeof(struct ssd_log); | |
5952 | off += sizeof(struct ssd_log); | |
5953 | } | |
5954 | ||
5955 | return 0; | |
5956 | ||
5957 | out_read_log: | |
5958 | sfifo_free(&dev->log_fifo); | |
5959 | out_alloc_log_fifo: | |
5960 | vfree(dev->internal_log.log); | |
5961 | dev->internal_log.log = NULL; | |
5962 | dev->internal_log.nr_log = 0; | |
5963 | out_alloc_log: | |
5964 | /* skip error if not in standard mode */ | |
5965 | if (mode != SSD_DRV_MODE_STANDARD) { | |
5966 | ret = 0; | |
5967 | } | |
5968 | return ret; | |
5969 | } | |
5970 | ||
5971 | /* work queue */ | |
5972 | static void ssd_stop_workq(struct ssd_device *dev) | |
5973 | { | |
5974 | test_and_clear_bit(SSD_INIT_WORKQ, &dev->state); | |
5975 | flush_workqueue(dev->workq); | |
5976 | } | |
5977 | ||
5978 | static void ssd_start_workq(struct ssd_device *dev) | |
5979 | { | |
5980 | (void)test_and_set_bit(SSD_INIT_WORKQ, &dev->state); | |
5981 | ||
5982 | /* log ? */ | |
5983 | queue_work(dev->workq, &dev->log_work); | |
5984 | } | |
5985 | ||
5986 | static void ssd_cleanup_workq(struct ssd_device *dev) | |
5987 | { | |
5988 | flush_workqueue(dev->workq); | |
5989 | destroy_workqueue(dev->workq); | |
5990 | dev->workq = NULL; | |
5991 | } | |
5992 | ||
5993 | static int ssd_init_workq(struct ssd_device *dev) | |
5994 | { | |
5995 | int ret = 0; | |
5996 | ||
5997 | dev->workq = create_singlethread_workqueue(dev->name); | |
5998 | if (!dev->workq) { | |
5999 | ret = -ESRCH; | |
6000 | goto out; | |
6001 | } | |
6002 | ||
6003 | out: | |
6004 | return ret; | |
6005 | } | |
6006 | ||
6007 | /* rom */ | |
6008 | static int ssd_init_rom_info(struct ssd_device *dev) | |
6009 | { | |
6010 | uint32_t val; | |
6011 | ||
6012 | mutex_init(&dev->spi_mutex); | |
6013 | mutex_init(&dev->i2c_mutex); | |
6014 | ||
6015 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3) { | |
6016 | /* fix bug: read data to clear status */ | |
6017 | (void)ssd_reg32_read(dev->ctrlp + SSD_SPI_REG_RDATA); | |
6018 | ||
6019 | dev->rom_info.size = SSD_ROM_SIZE; | |
6020 | dev->rom_info.block_size = SSD_ROM_BLK_SIZE; | |
6021 | dev->rom_info.page_size = SSD_ROM_PAGE_SIZE; | |
6022 | ||
6023 | dev->rom_info.bridge_fw_base = SSD_ROM_BRIDGE_FW_BASE; | |
6024 | dev->rom_info.bridge_fw_sz = SSD_ROM_BRIDGE_FW_SIZE; | |
6025 | dev->rom_info.nr_bridge_fw = SSD_ROM_NR_BRIDGE_FW; | |
6026 | ||
6027 | dev->rom_info.ctrl_fw_base = SSD_ROM_CTRL_FW_BASE; | |
6028 | dev->rom_info.ctrl_fw_sz = SSD_ROM_CTRL_FW_SIZE; | |
6029 | dev->rom_info.nr_ctrl_fw = SSD_ROM_NR_CTRL_FW; | |
6030 | ||
6031 | dev->rom_info.log_sz = SSD_ROM_LOG_SZ; | |
6032 | ||
6033 | dev->rom_info.vp_base = SSD_ROM_VP_BASE; | |
6034 | dev->rom_info.label_base = SSD_ROM_LABEL_BASE; | |
6035 | } else if (dev->protocol_info.ver < SSD_PROTOCOL_V3_2) { | |
6036 | val = ssd_reg32_read(dev->ctrlp + SSD_ROM_INFO_REG); | |
6037 | dev->rom_info.size = 0x100000 * (1U << (val & 0xFF)); | |
6038 | dev->rom_info.block_size = 0x10000 * (1U << ((val>>8) & 0xFF)); | |
6039 | dev->rom_info.page_size = (val>>16) & 0xFFFF; | |
6040 | ||
6041 | val = ssd_reg32_read(dev->ctrlp + SSD_ROM_BRIDGE_FW_INFO_REG); | |
6042 | dev->rom_info.bridge_fw_base = dev->rom_info.block_size * (val & 0xFFFF); | |
6043 | dev->rom_info.bridge_fw_sz = dev->rom_info.block_size * ((val>>16) & 0x3FFF); | |
6044 | dev->rom_info.nr_bridge_fw = ((val >> 30) & 0x3) + 1; | |
6045 | ||
6046 | val = ssd_reg32_read(dev->ctrlp + SSD_ROM_CTRL_FW_INFO_REG); | |
6047 | dev->rom_info.ctrl_fw_base = dev->rom_info.block_size * (val & 0xFFFF); | |
6048 | dev->rom_info.ctrl_fw_sz = dev->rom_info.block_size * ((val>>16) & 0x3FFF); | |
6049 | dev->rom_info.nr_ctrl_fw = ((val >> 30) & 0x3) + 1; | |
6050 | ||
6051 | dev->rom_info.bm_fw_base = dev->rom_info.ctrl_fw_base + (dev->rom_info.nr_ctrl_fw * dev->rom_info.ctrl_fw_sz); | |
6052 | dev->rom_info.bm_fw_sz = SSD_PV3_ROM_BM_FW_SZ; | |
6053 | dev->rom_info.nr_bm_fw = SSD_PV3_ROM_NR_BM_FW; | |
6054 | ||
6055 | dev->rom_info.log_base = dev->rom_info.bm_fw_base + (dev->rom_info.nr_bm_fw * dev->rom_info.bm_fw_sz); | |
6056 | dev->rom_info.log_sz = SSD_ROM_LOG_SZ; | |
6057 | ||
6058 | dev->rom_info.smart_base = dev->rom_info.log_base + dev->rom_info.log_sz; | |
6059 | dev->rom_info.smart_sz = SSD_PV3_ROM_SMART_SZ; | |
6060 | dev->rom_info.nr_smart = SSD_PV3_ROM_NR_SMART; | |
6061 | ||
6062 | val = ssd_reg32_read(dev->ctrlp + SSD_ROM_VP_INFO_REG); | |
6063 | dev->rom_info.vp_base = dev->rom_info.block_size * val; | |
6064 | dev->rom_info.label_base = dev->rom_info.vp_base + dev->rom_info.block_size; | |
6065 | if (dev->rom_info.label_base >= dev->rom_info.size) { | |
6066 | dev->rom_info.label_base = dev->rom_info.vp_base - dev->rom_info.block_size; | |
6067 | } | |
6068 | } else { | |
6069 | val = ssd_reg32_read(dev->ctrlp + SSD_ROM_INFO_REG); | |
6070 | dev->rom_info.size = 0x100000 * (1U << (val & 0xFF)); | |
6071 | dev->rom_info.block_size = 0x10000 * (1U << ((val>>8) & 0xFF)); | |
6072 | dev->rom_info.page_size = (val>>16) & 0xFFFF; | |
6073 | ||
6074 | val = ssd_reg32_read(dev->ctrlp + SSD_ROM_BRIDGE_FW_INFO_REG); | |
6075 | dev->rom_info.bridge_fw_base = dev->rom_info.block_size * (val & 0xFFFF); | |
6076 | dev->rom_info.bridge_fw_sz = dev->rom_info.block_size * ((val>>16) & 0x3FFF); | |
6077 | dev->rom_info.nr_bridge_fw = ((val >> 30) & 0x3) + 1; | |
6078 | ||
6079 | val = ssd_reg32_read(dev->ctrlp + SSD_ROM_CTRL_FW_INFO_REG); | |
6080 | dev->rom_info.ctrl_fw_base = dev->rom_info.block_size * (val & 0xFFFF); | |
6081 | dev->rom_info.ctrl_fw_sz = dev->rom_info.block_size * ((val>>16) & 0x3FFF); | |
6082 | dev->rom_info.nr_ctrl_fw = ((val >> 30) & 0x3) + 1; | |
6083 | ||
6084 | val = ssd_reg32_read(dev->ctrlp + SSD_ROM_VP_INFO_REG); | |
6085 | dev->rom_info.vp_base = dev->rom_info.block_size * val; | |
6086 | dev->rom_info.label_base = dev->rom_info.vp_base - SSD_PV3_2_ROM_SEC_SZ; | |
6087 | ||
6088 | dev->rom_info.nr_smart = SSD_PV3_ROM_NR_SMART; | |
6089 | dev->rom_info.smart_sz = SSD_PV3_2_ROM_SEC_SZ; | |
6090 | dev->rom_info.smart_base = dev->rom_info.label_base - (dev->rom_info.smart_sz * dev->rom_info.nr_smart); | |
6091 | if (dev->rom_info.smart_sz > dev->rom_info.block_size) { | |
6092 | dev->rom_info.smart_sz = dev->rom_info.block_size; | |
6093 | } | |
6094 | ||
6095 | dev->rom_info.log_sz = SSD_PV3_2_ROM_LOG_SZ; | |
6096 | dev->rom_info.log_base = dev->rom_info.smart_base - dev->rom_info.log_sz; | |
6097 | } | |
6098 | ||
6099 | return ssd_init_spi(dev); | |
6100 | } | |
6101 | ||
6102 | /* smart */ | |
6103 | static int ssd_update_smart(struct ssd_device *dev, struct ssd_smart *smart) | |
6104 | { | |
6105 | uint64_t cur_time, run_time; | |
6106 | #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,27)) | |
6107 | struct hd_struct *part; | |
6108 | int cpu; | |
6109 | #endif | |
6110 | int i, j; | |
6111 | int ret = 0; | |
6112 | ||
6113 | if (!test_bit(SSD_INIT_BD, &dev->state)) { | |
6114 | return 0; | |
6115 | } | |
6116 | ||
6117 | cur_time = (uint64_t)ktime_get_real_seconds(); | |
6118 | if (cur_time < dev->uptime) { | |
6119 | run_time = 0; | |
6120 | } else { | |
6121 | run_time = cur_time - dev->uptime; | |
6122 | } | |
6123 | ||
6124 | /* avoid frequently update */ | |
6125 | if (run_time >= 60) { | |
6126 | ret = 1; | |
6127 | } | |
6128 | ||
6129 | /* io stat */ | |
6130 | smart->io_stat.run_time += run_time; | |
6131 | ||
6132 | #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,27)) | |
6133 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(5,0,0)) | |
6134 | cpu = part_stat_lock(); | |
6135 | part = &dev->gd->part0; | |
6136 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,14,0)) | |
6137 | part_round_stats(dev->rq, cpu, part); | |
6138 | #else | |
6139 | part_round_stats(cpu, part); | |
6140 | #endif | |
6141 | part_stat_unlock(); | |
6142 | #endif | |
6143 | ||
6144 | smart->io_stat.nr_read += part_stat_read(part, ios[READ]); | |
6145 | smart->io_stat.nr_write += part_stat_read(part, ios[WRITE]); | |
6146 | smart->io_stat.rsectors += part_stat_read(part, sectors[READ]); | |
6147 | smart->io_stat.wsectors += part_stat_read(part, sectors[WRITE]); | |
6148 | #elif (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,14)) | |
6149 | preempt_disable(); | |
6150 | disk_round_stats(dev->gd); | |
6151 | preempt_enable(); | |
6152 | ||
6153 | smart->io_stat.nr_read += disk_stat_read(dev->gd, ios[READ]); | |
6154 | smart->io_stat.nr_write += disk_stat_read(dev->gd, ios[WRITE]); | |
6155 | smart->io_stat.rsectors += disk_stat_read(dev->gd, sectors[READ]); | |
6156 | smart->io_stat.wsectors += disk_stat_read(dev->gd, sectors[WRITE]); | |
6157 | #else | |
6158 | preempt_disable(); | |
6159 | disk_round_stats(dev->gd); | |
6160 | preempt_enable(); | |
6161 | ||
6162 | smart->io_stat.nr_read += disk_stat_read(dev->gd, reads); | |
6163 | smart->io_stat.nr_write += disk_stat_read(dev->gd, writes); | |
6164 | smart->io_stat.rsectors += disk_stat_read(dev->gd, read_sectors); | |
6165 | smart->io_stat.wsectors += disk_stat_read(dev->gd, write_sectors); | |
6166 | #endif | |
6167 | ||
6168 | smart->io_stat.nr_to += atomic_read(&dev->tocnt); | |
6169 | ||
6170 | for (i=0; i<dev->nr_queue; i++) { | |
6171 | smart->io_stat.nr_rwerr += dev->queue[i].io_stat.nr_rwerr; | |
6172 | smart->io_stat.nr_ioerr += dev->queue[i].io_stat.nr_ioerr; | |
6173 | } | |
6174 | ||
6175 | for (i=0; i<dev->nr_queue; i++) { | |
6176 | for (j=0; j<SSD_ECC_MAX_FLIP; j++) { | |
6177 | smart->ecc_info.bitflip[j] += dev->queue[i].ecc_info.bitflip[j]; | |
6178 | } | |
6179 | } | |
6180 | ||
6181 | //dev->uptime = tv.tv_sec; | |
6182 | ||
6183 | return ret; | |
6184 | } | |
6185 | ||
6186 | static int __ssd_clear_smart(struct ssd_device *dev) | |
6187 | { | |
6188 | uint64_t sversion; | |
6189 | uint32_t off, length; | |
6190 | int i; | |
6191 | int ret; | |
6192 | ||
6193 | if (dev->protocol_info.ver <= SSD_PROTOCOL_V3) { | |
6194 | return 0; | |
6195 | } | |
6196 | ||
6197 | /* clear smart */ | |
6198 | off = dev->rom_info.smart_base; | |
6199 | length = dev->rom_info.smart_sz * dev->rom_info.nr_smart; | |
6200 | ||
6201 | ret = ssd_spi_erase(dev, off, length); | |
6202 | if (ret) { | |
6203 | hio_warn("%s: info erase: failed\n", dev->name); | |
6204 | goto out; | |
6205 | } | |
6206 | ||
6207 | sversion = dev->smart.version; | |
6208 | ||
6209 | memset(&dev->smart, 0, sizeof(struct ssd_smart)); | |
6210 | dev->smart.version = sversion + 1; | |
6211 | dev->smart.magic = SSD_SMART_MAGIC; | |
6212 | ||
6213 | /* clear all tmp acc */ | |
6214 | for (i=0; i<dev->nr_queue; i++) { | |
6215 | memset(&(dev->queue[i].io_stat), 0, sizeof(struct ssd_io_stat)); | |
6216 | memset(&(dev->queue[i].ecc_info), 0, sizeof(struct ssd_ecc_info)); | |
6217 | } | |
6218 | ||
6219 | atomic_set(&dev->tocnt, 0); | |
6220 | ||
6221 | /* clear tmp log info */ | |
6222 | memset(&dev->log_info, 0, sizeof(struct ssd_log_info)); | |
6223 | ||
6224 | dev->uptime = (uint64_t)ktime_get_real_seconds(); | |
6225 | ||
6226 | /* clear alarm ? */ | |
6227 | //ssd_clear_alarm(dev); | |
6228 | out: | |
6229 | return ret; | |
6230 | } | |
6231 | ||
6232 | static int __ssd_clear_warning(struct ssd_device *dev) | |
6233 | { | |
6234 | uint32_t off, size; | |
6235 | int i, ret = 0; | |
6236 | ||
6237 | if (dev->protocol_info.ver <= SSD_PROTOCOL_V3) { | |
6238 | return 0; | |
6239 | } | |
6240 | ||
6241 | /* clear log_info warning */ | |
6242 | memset(&dev->smart.log_info, 0, sizeof(dev->smart.log_info)); | |
6243 | ||
6244 | /* clear io_stat warning */ | |
6245 | dev->smart.io_stat.nr_to = 0; | |
6246 | dev->smart.io_stat.nr_rwerr = 0; | |
6247 | dev->smart.io_stat.nr_ioerr = 0; | |
6248 | ||
6249 | /* clear ecc_info warning */ | |
6250 | memset(&dev->smart.ecc_info, 0, sizeof(dev->smart.ecc_info)); | |
6251 | ||
6252 | /* clear queued warnings */ | |
6253 | for (i=0; i<dev->nr_queue; i++) { | |
6254 | /* queued io_stat warning */ | |
6255 | dev->queue[i].io_stat.nr_to = 0; | |
6256 | dev->queue[i].io_stat.nr_rwerr = 0; | |
6257 | dev->queue[i].io_stat.nr_ioerr = 0; | |
6258 | ||
6259 | /* queued ecc_info warning */ | |
6260 | memset(&(dev->queue[i].ecc_info), 0, sizeof(dev->queue[i].ecc_info)); | |
6261 | } | |
6262 | ||
6263 | /* write smart back to nor */ | |
6264 | for (i = 0; i < dev->rom_info.nr_smart; i++) { | |
6265 | off = dev->rom_info.smart_base + (dev->rom_info.smart_sz * i); | |
6266 | size = dev->rom_info.smart_sz; | |
6267 | ||
6268 | ret = ssd_spi_erase(dev, off, size); | |
6269 | if (ret) { | |
6270 | hio_warn("%s: warning erase: failed with code 1\n", dev->name); | |
6271 | goto out; | |
6272 | } | |
6273 | ||
6274 | size = sizeof(struct ssd_smart); | |
6275 | ||
6276 | ret = ssd_spi_write(dev, &dev->smart, off, size); | |
6277 | if (ret) { | |
6278 | hio_warn("%s: warning erase: failed with code 2\n", dev->name); | |
6279 | goto out; | |
6280 | } | |
6281 | } | |
6282 | ||
6283 | dev->smart.version++; | |
6284 | ||
6285 | /* clear cmd timeout warning */ | |
6286 | atomic_set(&dev->tocnt, 0); | |
6287 | ||
6288 | /* clear tmp log info */ | |
6289 | memset(&dev->log_info, 0, sizeof(dev->log_info)); | |
6290 | ||
6291 | out: | |
6292 | return ret; | |
6293 | } | |
6294 | ||
6295 | static int ssd_clear_smart(struct ssd_device *dev) | |
6296 | { | |
6297 | int ret; | |
6298 | ||
6299 | ret = __ssd_clear_smart(dev); | |
6300 | if(!ret) { | |
6301 | ssd_gen_swlog(dev, SSD_LOG_CLEAR_SMART, 0); | |
6302 | } | |
6303 | ||
6304 | return ret; | |
6305 | } | |
6306 | ||
6307 | static int ssd_clear_warning(struct ssd_device *dev) | |
6308 | { | |
6309 | int ret; | |
6310 | ||
6311 | ret = __ssd_clear_warning(dev); | |
6312 | if(!ret) { | |
6313 | ssd_gen_swlog(dev, SSD_LOG_CLEAR_WARNING, 0); | |
6314 | } | |
6315 | ||
6316 | return ret; | |
6317 | } | |
6318 | ||
6319 | static int ssd_save_smart(struct ssd_device *dev) | |
6320 | { | |
6321 | uint32_t off, size; | |
6322 | int i; | |
6323 | int ret = 0; | |
6324 | ||
6325 | if (unlikely(mode != SSD_DRV_MODE_STANDARD)) | |
6326 | return 0; | |
6327 | ||
6328 | if (dev->protocol_info.ver <= SSD_PROTOCOL_V3) { | |
6329 | return 0; | |
6330 | } | |
6331 | ||
6332 | if (!ssd_update_smart(dev, &dev->smart)) { | |
6333 | return 0; | |
6334 | } | |
6335 | ||
6336 | dev->smart.version++; | |
6337 | ||
6338 | for (i=0; i<dev->rom_info.nr_smart; i++) { | |
6339 | off = dev->rom_info.smart_base + (dev->rom_info.smart_sz * i); | |
6340 | size = dev->rom_info.smart_sz; | |
6341 | ||
6342 | ret = ssd_spi_erase(dev, off, size); | |
6343 | if (ret) { | |
6344 | hio_warn("%s: info erase failed\n", dev->name); | |
6345 | goto out; | |
6346 | } | |
6347 | ||
6348 | size = sizeof(struct ssd_smart); | |
6349 | ||
6350 | ret = ssd_spi_write(dev, &dev->smart, off, size); | |
6351 | if (ret) { | |
6352 | hio_warn("%s: info write failed\n", dev->name); | |
6353 | goto out; | |
6354 | } | |
6355 | ||
6356 | //xx | |
6357 | } | |
6358 | ||
6359 | out: | |
6360 | return ret; | |
6361 | } | |
6362 | ||
6363 | static int ssd_init_smart(struct ssd_device *dev) | |
6364 | { | |
6365 | struct ssd_smart *smart; | |
6366 | uint32_t off, size, val; | |
6367 | int i; | |
6368 | int ret = 0; | |
6369 | int update_smart = 0; | |
6370 | ||
6371 | dev->uptime = (uint64_t)ktime_get_real_seconds(); | |
6372 | ||
6373 | if (dev->protocol_info.ver <= SSD_PROTOCOL_V3) { | |
6374 | return 0; | |
6375 | } | |
6376 | ||
6377 | smart = kmalloc(sizeof(struct ssd_smart) * SSD_ROM_NR_SMART_MAX, GFP_KERNEL); | |
6378 | if (!smart) { | |
6379 | ret = -ENOMEM; | |
6380 | goto out_nomem; | |
6381 | } | |
6382 | ||
6383 | memset(&dev->smart, 0, sizeof(struct ssd_smart)); | |
6384 | ||
6385 | /* read smart */ | |
6386 | for (i=0; i<dev->rom_info.nr_smart; i++) { | |
6387 | memset(&smart[i], 0, sizeof(struct ssd_smart)); | |
6388 | ||
6389 | off = dev->rom_info.smart_base + (dev->rom_info.smart_sz * i); | |
6390 | size = sizeof(struct ssd_smart); | |
6391 | ||
6392 | ret = ssd_spi_read(dev, &smart[i], off, size); | |
6393 | if (ret) { | |
6394 | hio_warn("%s: info read failed\n", dev->name); | |
6395 | goto out; | |
6396 | } | |
6397 | ||
6398 | if (smart[i].magic != SSD_SMART_MAGIC) { | |
6399 | smart[i].magic = 0; | |
6400 | smart[i].version = 0; | |
6401 | continue; | |
6402 | } | |
6403 | ||
6404 | if (smart[i].version > dev->smart.version) { | |
6405 | memcpy(&dev->smart, &smart[i], sizeof(struct ssd_smart)); | |
6406 | } | |
6407 | } | |
6408 | ||
6409 | if (dev->smart.magic != SSD_SMART_MAGIC) { | |
6410 | /* first time power up */ | |
6411 | dev->smart.magic = SSD_SMART_MAGIC; | |
6412 | dev->smart.version = 1; | |
6413 | } | |
6414 | ||
6415 | val = ssd_reg32_read(dev->ctrlp + SSD_INTR_INTERVAL_REG); | |
6416 | if (!val) { | |
6417 | dev->last_poweron_id = ~0; | |
6418 | ssd_gen_swlog(dev, SSD_LOG_POWER_ON, dev->hw_info.bridge_ver); | |
6419 | if (dev->smart.io_stat.nr_to) { | |
6420 | dev->smart.io_stat.nr_to = 0; | |
6421 | update_smart = 1; | |
6422 | } | |
6423 | } | |
6424 | ||
6425 | /* check log info */ | |
6426 | { | |
6427 | struct ssd_log_info log_info; | |
6428 | struct ssd_log *log = (struct ssd_log *)dev->internal_log.log; | |
6429 | ||
6430 | memset(&log_info, 0, sizeof(struct ssd_log_info)); | |
6431 | ||
6432 | while (log_info.nr_log < dev->internal_log.nr_log) { | |
6433 | int skip = 0; | |
6434 | ||
6435 | switch (log->le.event) { | |
6436 | /* skip the volatile log info */ | |
6437 | case SSD_LOG_SEU_FAULT: | |
6438 | case SSD_LOG_SEU_FAULT1: | |
6439 | skip = 1; | |
6440 | break; | |
6441 | case SSD_LOG_TIMEOUT: | |
6442 | skip = (dev->last_poweron_id >= log_info.nr_log); | |
6443 | break; | |
6444 | } | |
6445 | ||
6446 | if (!skip) { | |
6447 | log_info.stat[ssd_parse_log(dev, log, 0)]++; | |
6448 | } | |
6449 | ||
6450 | log_info.nr_log++; | |
6451 | log++; | |
6452 | } | |
6453 | ||
6454 | /* check */ | |
6455 | for (i=(SSD_LOG_NR_LEVEL-1); i>=0; i--) { | |
6456 | if (log_info.stat[i] != dev->smart.log_info.stat[i]) { | |
6457 | /* unclean */ | |
6458 | memcpy(&dev->smart.log_info, &log_info, sizeof(struct ssd_log_info)); | |
6459 | update_smart = 1; | |
6460 | break; | |
6461 | } | |
6462 | } | |
6463 | ||
6464 | if (update_smart) { | |
6465 | ++dev->smart.version; | |
6466 | } | |
6467 | } | |
6468 | ||
6469 | for (i=0; i<dev->rom_info.nr_smart; i++) { | |
6470 | if (smart[i].magic == SSD_SMART_MAGIC && smart[i].version == dev->smart.version) { | |
6471 | continue; | |
6472 | } | |
6473 | ||
6474 | off = dev->rom_info.smart_base + (dev->rom_info.smart_sz * i); | |
6475 | size = dev->rom_info.smart_sz; | |
6476 | ||
6477 | ret = ssd_spi_erase(dev, off, size); | |
6478 | if (ret) { | |
6479 | hio_warn("%s: info erase failed\n", dev->name); | |
6480 | goto out; | |
6481 | } | |
6482 | ||
6483 | size = sizeof(struct ssd_smart); | |
6484 | ret = ssd_spi_write(dev, &dev->smart, off, size); | |
6485 | if (ret) { | |
6486 | hio_warn("%s: info write failed\n", dev->name); | |
6487 | goto out; | |
6488 | } | |
6489 | ||
6490 | //xx | |
6491 | } | |
6492 | ||
6493 | /* sync smart with alarm led */ | |
6494 | if (dev->smart.io_stat.nr_to || dev->smart.io_stat.nr_rwerr || dev->smart.log_info.stat[SSD_LOG_LEVEL_ERR]) { | |
6495 | hio_warn("%s: some fault found in the history info\n", dev->name); | |
6496 | ssd_set_alarm(dev); | |
6497 | } | |
6498 | ||
6499 | out: | |
6500 | kfree(smart); | |
6501 | out_nomem: | |
6502 | /* skip error if not in standard mode */ | |
6503 | if (mode != SSD_DRV_MODE_STANDARD) { | |
6504 | ret = 0; | |
6505 | } | |
6506 | return ret; | |
6507 | } | |
6508 | ||
6509 | /* bm */ | |
6510 | static int __ssd_bm_get_version(struct ssd_device *dev, uint16_t *ver) | |
6511 | { | |
6512 | struct ssd_bm_manufacturer_data bm_md = {0}; | |
6513 | uint16_t sc_id = SSD_BM_SYSTEM_DATA_SUBCLASS_ID; | |
6514 | uint8_t cmd; | |
6515 | int ret = 0; | |
6516 | ||
6517 | if (!dev || !ver) { | |
6518 | return -EINVAL; | |
6519 | } | |
6520 | ||
6521 | mutex_lock(&dev->bm_mutex); | |
6522 | ||
6523 | cmd = SSD_BM_DATA_FLASH_SUBCLASS_ID; | |
6524 | ret = ssd_smbus_write_word(dev, SSD_BM_SLAVE_ADDRESS, cmd, (uint8_t *)&sc_id); | |
6525 | if (ret) { | |
6526 | goto out; | |
6527 | } | |
6528 | ||
6529 | cmd = SSD_BM_DATA_FLASH_SUBCLASS_ID_PAGE1; | |
6530 | ret = ssd_smbus_read_block(dev, SSD_BM_SLAVE_ADDRESS, cmd, sizeof(struct ssd_bm_manufacturer_data), (uint8_t *)&bm_md); | |
6531 | if (ret) { | |
6532 | goto out; | |
6533 | } | |
6534 | ||
6535 | if (bm_md.firmware_ver & 0xF000) { | |
6536 | ret = -EIO; | |
6537 | goto out; | |
6538 | } | |
6539 | ||
6540 | *ver = bm_md.firmware_ver; | |
6541 | ||
6542 | out: | |
6543 | mutex_unlock(&dev->bm_mutex); | |
6544 | return ret; | |
6545 | } | |
6546 | ||
6547 | static int ssd_bm_get_version(struct ssd_device *dev, uint16_t *ver) | |
6548 | { | |
6549 | uint16_t tmp = 0; | |
6550 | int i = SSD_BM_RETRY_MAX; | |
6551 | int ret = 0; | |
6552 | ||
6553 | while (i-- > 0) { | |
6554 | ret = __ssd_bm_get_version(dev, &tmp); | |
6555 | if (!ret) { | |
6556 | break; | |
6557 | } | |
6558 | } | |
6559 | if (ret) { | |
6560 | return ret; | |
6561 | } | |
6562 | ||
6563 | *ver = tmp; | |
6564 | ||
6565 | return 0; | |
6566 | } | |
6567 | ||
6568 | static int __ssd_bm_nr_cap(struct ssd_device *dev, int *nr_cap) | |
6569 | { | |
6570 | struct ssd_bm_configuration_registers bm_cr; | |
6571 | uint16_t sc_id = SSD_BM_CONFIGURATION_REGISTERS_ID; | |
6572 | uint8_t cmd; | |
6573 | int ret; | |
6574 | ||
6575 | mutex_lock(&dev->bm_mutex); | |
6576 | ||
6577 | cmd = SSD_BM_DATA_FLASH_SUBCLASS_ID; | |
6578 | ret = ssd_smbus_write_word(dev, SSD_BM_SLAVE_ADDRESS, cmd, (uint8_t *)&sc_id); | |
6579 | if (ret) { | |
6580 | goto out; | |
6581 | } | |
6582 | ||
6583 | cmd = SSD_BM_DATA_FLASH_SUBCLASS_ID_PAGE1; | |
6584 | ret = ssd_smbus_read_block(dev, SSD_BM_SLAVE_ADDRESS, cmd, sizeof(struct ssd_bm_configuration_registers), (uint8_t *)&bm_cr); | |
6585 | if (ret) { | |
6586 | goto out; | |
6587 | } | |
6588 | ||
6589 | if (bm_cr.operation_cfg.cc == 0 || bm_cr.operation_cfg.cc > 4) { | |
6590 | ret = -EIO; | |
6591 | goto out; | |
6592 | } | |
6593 | ||
6594 | *nr_cap = bm_cr.operation_cfg.cc + 1; | |
6595 | ||
6596 | out: | |
6597 | mutex_unlock(&dev->bm_mutex); | |
6598 | return ret; | |
6599 | } | |
6600 | ||
6601 | static int ssd_bm_nr_cap(struct ssd_device *dev, int *nr_cap) | |
6602 | { | |
6603 | int tmp = 0; | |
6604 | int i = SSD_BM_RETRY_MAX; | |
6605 | int ret = 0; | |
6606 | ||
6607 | while (i-- > 0) { | |
6608 | ret = __ssd_bm_nr_cap(dev, &tmp); | |
6609 | if (!ret) { | |
6610 | break; | |
6611 | } | |
6612 | } | |
6613 | if (ret) { | |
6614 | return ret; | |
6615 | } | |
6616 | ||
6617 | *nr_cap = tmp; | |
6618 | ||
6619 | return 0; | |
6620 | } | |
6621 | ||
6622 | static int ssd_bm_enter_cap_learning(struct ssd_device *dev) | |
6623 | { | |
6624 | uint16_t buf = SSD_BM_ENTER_CAP_LEARNING; | |
6625 | uint8_t cmd = SSD_BM_MANUFACTURERACCESS; | |
6626 | int ret; | |
6627 | ||
6628 | ret = ssd_smbus_write_word(dev, SSD_BM_SLAVE_ADDRESS, cmd, (uint8_t *)&buf); | |
6629 | if (ret) { | |
6630 | goto out; | |
6631 | } | |
6632 | ||
6633 | out: | |
6634 | return ret; | |
6635 | } | |
6636 | ||
6637 | static int ssd_bm_get_sfstatus(struct ssd_device *dev, uint16_t *status) | |
6638 | { | |
6639 | uint16_t val = 0; | |
6640 | uint8_t cmd = SSD_BM_SAFETYSTATUS; | |
6641 | int ret; | |
6642 | ||
6643 | ret = ssd_smbus_read_word(dev, SSD_BM_SLAVE_ADDRESS, cmd, (uint8_t *)&val); | |
6644 | if (ret) { | |
6645 | goto out; | |
6646 | } | |
6647 | ||
6648 | *status = val; | |
6649 | out: | |
6650 | return ret; | |
6651 | } | |
6652 | ||
6653 | static int ssd_bm_get_opstatus(struct ssd_device *dev, uint16_t *status) | |
6654 | { | |
6655 | uint16_t val = 0; | |
6656 | uint8_t cmd = SSD_BM_OPERATIONSTATUS; | |
6657 | int ret; | |
6658 | ||
6659 | ret = ssd_smbus_read_word(dev, SSD_BM_SLAVE_ADDRESS, cmd, (uint8_t *)&val); | |
6660 | if (ret) { | |
6661 | goto out; | |
6662 | } | |
6663 | ||
6664 | *status = val; | |
6665 | out: | |
6666 | return ret; | |
6667 | } | |
6668 | ||
6669 | static int ssd_get_bmstruct(struct ssd_device *dev, struct ssd_bm *bm_status_out) | |
6670 | { | |
6671 | struct sbs_cmd *bm_sbs = ssd_bm_sbs; | |
6672 | struct ssd_bm bm_status; | |
6673 | uint8_t buf[2] = {0, }; | |
6674 | uint16_t val = 0; | |
6675 | uint16_t cval; | |
6676 | int ret = 0; | |
6677 | ||
6678 | memset(&bm_status, 0, sizeof(struct ssd_bm)); | |
6679 | ||
6680 | while (bm_sbs->desc != NULL) { | |
6681 | switch (bm_sbs->size) { | |
6682 | case SBS_SIZE_BYTE: | |
6683 | ret = ssd_smbus_read_byte(dev, SSD_BM_SLAVE_ADDRESS, bm_sbs->cmd, buf); | |
6684 | if (ret) { | |
6685 | //printf("Error: smbus read byte %#x\n", bm_sbs->cmd); | |
6686 | goto out; | |
6687 | } | |
6688 | val = buf[0]; | |
6689 | break; | |
6690 | case SBS_SIZE_WORD: | |
6691 | ret = ssd_smbus_read_word(dev, SSD_BM_SLAVE_ADDRESS, bm_sbs->cmd, (uint8_t *)&val); | |
6692 | if (ret) { | |
6693 | //printf("Error: smbus read word %#x\n", bm_sbs->cmd); | |
6694 | goto out; | |
6695 | } | |
6696 | //val = *(uint16_t *)buf; | |
6697 | break; | |
6698 | default: | |
6699 | ret = -1; | |
6700 | goto out; | |
6701 | break; | |
6702 | } | |
6703 | ||
6704 | switch (bm_sbs->unit) { | |
6705 | case SBS_UNIT_VALUE: | |
6706 | *(uint16_t *)bm_var(&bm_status, bm_sbs->off) = val & bm_sbs->mask; | |
6707 | break; | |
6708 | case SBS_UNIT_TEMPERATURE: | |
6709 | cval = (uint16_t)(val - 2731) / 10; | |
6710 | *(uint16_t *)bm_var(&bm_status, bm_sbs->off) = cval; | |
6711 | break; | |
6712 | case SBS_UNIT_VOLTAGE: | |
6713 | *(uint16_t *)bm_var(&bm_status, bm_sbs->off) = val; | |
6714 | break; | |
6715 | case SBS_UNIT_CURRENT: | |
6716 | *(uint16_t *)bm_var(&bm_status, bm_sbs->off) = val; | |
6717 | break; | |
6718 | case SBS_UNIT_ESR: | |
6719 | *(uint16_t *)bm_var(&bm_status, bm_sbs->off) = val; | |
6720 | break; | |
6721 | case SBS_UNIT_PERCENT: | |
6722 | *(uint16_t *)bm_var(&bm_status, bm_sbs->off) = val; | |
6723 | break; | |
6724 | case SBS_UNIT_CAPACITANCE: | |
6725 | *(uint16_t *)bm_var(&bm_status, bm_sbs->off) = val; | |
6726 | break; | |
6727 | default: | |
6728 | ret = -1; | |
6729 | goto out; | |
6730 | break; | |
6731 | } | |
6732 | ||
6733 | bm_sbs++; | |
6734 | } | |
6735 | ||
6736 | memcpy(bm_status_out, &bm_status, sizeof(struct ssd_bm)); | |
6737 | ||
6738 | out: | |
6739 | return ret; | |
6740 | } | |
6741 | ||
6742 | static int __ssd_bm_status(struct ssd_device *dev, int *status) | |
6743 | { | |
6744 | struct ssd_bm bm_status = {0}; | |
6745 | int nr_cap = 0; | |
6746 | int i; | |
6747 | int ret = 0; | |
6748 | ||
6749 | ret = ssd_get_bmstruct(dev, &bm_status); | |
6750 | if (ret) { | |
6751 | goto out; | |
6752 | } | |
6753 | ||
6754 | /* capacitor voltage */ | |
6755 | ret = ssd_bm_nr_cap(dev, &nr_cap); | |
6756 | if (ret) { | |
6757 | goto out; | |
6758 | } | |
6759 | ||
6760 | for (i=0; i<nr_cap; i++) { | |
6761 | if (bm_status.cap_volt[i] < SSD_BM_CAP_VOLT_MIN) { | |
6762 | *status = SSD_BMSTATUS_WARNING; | |
6763 | goto out; | |
6764 | } | |
6765 | } | |
6766 | ||
6767 | /* Safety Status */ | |
6768 | if (bm_status.sf_status) { | |
6769 | *status = SSD_BMSTATUS_WARNING; | |
6770 | goto out; | |
6771 | } | |
6772 | ||
6773 | /* charge status */ | |
6774 | if (!((bm_status.op_status >> 12) & 0x1)) { | |
6775 | *status = SSD_BMSTATUS_CHARGING; | |
6776 | }else{ | |
6777 | *status = SSD_BMSTATUS_OK; | |
6778 | } | |
6779 | ||
6780 | out: | |
6781 | return ret; | |
6782 | } | |
6783 | ||
6784 | static void ssd_set_flush_timeout(struct ssd_device *dev, int mode); | |
6785 | ||
6786 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)) | |
6787 | static void ssd_bm_worker(void *data) | |
6788 | { | |
6789 | struct ssd_device *dev = (struct ssd_device *)data; | |
6790 | #else | |
6791 | static void ssd_bm_worker(struct work_struct *work) | |
6792 | { | |
6793 | struct ssd_device *dev = container_of(work, struct ssd_device, bm_work); | |
6794 | #endif | |
6795 | ||
6796 | uint16_t opstatus; | |
6797 | int ret = 0; | |
6798 | ||
6799 | if (mode != SSD_DRV_MODE_STANDARD) { | |
6800 | return; | |
6801 | } | |
6802 | ||
6803 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_1_1) { | |
6804 | return; | |
6805 | } | |
6806 | ||
6807 | if (dev->hw_info_ext.plp_type != SSD_PLP_SCAP) { | |
6808 | return; | |
6809 | } | |
6810 | ||
6811 | ret = ssd_bm_get_opstatus(dev, &opstatus); | |
6812 | if (ret) { | |
6813 | hio_warn("%s: get bm operationstatus failed\n", dev->name); | |
6814 | return; | |
6815 | } | |
6816 | ||
6817 | /* need cap learning ? */ | |
6818 | if (!(opstatus & 0xF0)) { | |
6819 | ret = ssd_bm_enter_cap_learning(dev); | |
6820 | if (ret) { | |
6821 | hio_warn("%s: enter capacitance learning failed\n", dev->name); | |
6822 | return; | |
6823 | } | |
6824 | } | |
6825 | } | |
6826 | ||
6827 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(4,15,0)) | |
6828 | static void ssd_bm_routine_start(void *data) | |
6829 | #else | |
6830 | static void ssd_bm_routine_start(struct timer_list *t) | |
6831 | #endif | |
6832 | { | |
6833 | struct ssd_device *dev; | |
6834 | ||
6835 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(4,15,0)) | |
6836 | if (!data) { | |
6837 | return; | |
6838 | } | |
6839 | dev = data; | |
6840 | #else | |
6841 | dev = from_timer(dev, t, bm_timer); | |
6842 | #endif | |
6843 | ||
6844 | if (test_bit(SSD_INIT_WORKQ, &dev->state)) { | |
6845 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_2) { | |
6846 | queue_work(dev->workq, &dev->bm_work); | |
6847 | } else { | |
6848 | queue_work(dev->workq, &dev->capmon_work); | |
6849 | } | |
6850 | } | |
6851 | } | |
6852 | ||
6853 | /* CAP */ | |
6854 | static int ssd_do_cap_learn(struct ssd_device *dev, uint32_t *cap) | |
6855 | { | |
6856 | uint32_t u1, u2, t; | |
6857 | uint16_t val = 0; | |
6858 | int wait = 0; | |
6859 | int ret = 0; | |
6860 | ||
6861 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_2) { | |
6862 | *cap = 0; | |
6863 | return 0; | |
6864 | } | |
6865 | ||
6866 | if (dev->hw_info_ext.form_factor == SSD_FORM_FACTOR_FHHL && dev->hw_info.pcb_ver < 'B') { | |
6867 | *cap = 0; | |
6868 | return 0; | |
6869 | } | |
6870 | ||
6871 | /* make sure the lm80 voltage value is updated */ | |
6872 | msleep(SSD_LM80_CONV_INTERVAL); | |
6873 | ||
6874 | /* check if full charged */ | |
6875 | wait = 0; | |
6876 | for (;;) { | |
6877 | ret = ssd_smbus_read_word(dev, SSD_SENSOR_LM80_SADDRESS, SSD_PL_CAP_U1, (uint8_t *)&val); | |
6878 | if (ret) { | |
6879 | if (!test_and_set_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM80), &dev->hwmon)) { | |
6880 | ssd_generate_sensor_fault_log(dev, SSD_LOG_SENSOR_FAULT, SSD_SENSOR_LM80_SADDRESS,ret); | |
6881 | } | |
6882 | goto out; | |
6883 | } | |
6884 | u1 = SSD_LM80_CONVERT_VOLT(u16_swap(val)); | |
6885 | if (SSD_PL_CAP_VOLT(u1) >= SSD_PL_CAP_VOLT_FULL) { | |
6886 | break; | |
6887 | } | |
6888 | ||
6889 | wait++; | |
6890 | if (wait > SSD_PL_CAP_CHARGE_MAX_WAIT) { | |
6891 | ret = -ETIMEDOUT; | |
6892 | goto out; | |
6893 | } | |
6894 | msleep(SSD_PL_CAP_CHARGE_WAIT); | |
6895 | } | |
6896 | ||
6897 | ret = ssd_smbus_read_word(dev, SSD_SENSOR_LM80_SADDRESS, SSD_PL_CAP_U2, (uint8_t *)&val); | |
6898 | if (ret) { | |
6899 | if (!test_and_set_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM80), &dev->hwmon)) { | |
6900 | ssd_generate_sensor_fault_log(dev, SSD_LOG_SENSOR_FAULT, SSD_SENSOR_LM80_SADDRESS,ret); | |
6901 | } | |
6902 | goto out; | |
6903 | } | |
6904 | u2 = SSD_LM80_CONVERT_VOLT(u16_swap(val)); | |
6905 | ||
6906 | if (u1 == u2) { | |
6907 | ret = -EINVAL; | |
6908 | goto out; | |
6909 | } | |
6910 | ||
6911 | /* enter cap learn */ | |
6912 | ssd_reg32_write(dev->ctrlp + SSD_PL_CAP_LEARN_REG, 0x1); | |
6913 | ||
6914 | wait = 0; | |
6915 | for (;;) { | |
6916 | msleep(SSD_PL_CAP_LEARN_WAIT); | |
6917 | ||
6918 | t = ssd_reg32_read(dev->ctrlp + SSD_PL_CAP_LEARN_REG); | |
6919 | if (!((t >> 1) & 0x1)) { | |
6920 | break; | |
6921 | } | |
6922 | ||
6923 | wait++; | |
6924 | if (wait > SSD_PL_CAP_LEARN_MAX_WAIT) { | |
6925 | ret = -ETIMEDOUT; | |
6926 | goto out; | |
6927 | } | |
6928 | } | |
6929 | ||
6930 | if ((t >> 4) & 0x1) { | |
6931 | ret = -ETIMEDOUT; | |
6932 | goto out; | |
6933 | } | |
6934 | ||
6935 | t = (t >> 8); | |
6936 | if (0 == t) { | |
6937 | ret = -EINVAL; | |
6938 | goto out; | |
6939 | } | |
6940 | ||
6941 | *cap = SSD_PL_CAP_LEARN(u1, u2, t); | |
6942 | ||
6943 | out: | |
6944 | return ret; | |
6945 | } | |
6946 | ||
6947 | static int ssd_cap_learn(struct ssd_device *dev, uint32_t *cap) | |
6948 | { | |
6949 | int ret = 0; | |
6950 | ||
6951 | if (!dev || !cap) { | |
6952 | return -EINVAL; | |
6953 | } | |
6954 | ||
6955 | mutex_lock(&dev->bm_mutex); | |
6956 | ||
6957 | ssd_stop_workq(dev); | |
6958 | ||
6959 | ret = ssd_do_cap_learn(dev, cap); | |
6960 | if (ret) { | |
6961 | ssd_gen_swlog(dev, SSD_LOG_CAP_LEARN_FAULT, 0); | |
6962 | goto out; | |
6963 | } | |
6964 | ||
6965 | ssd_gen_swlog(dev, SSD_LOG_CAP_STATUS, *cap); | |
6966 | ||
6967 | out: | |
6968 | ssd_start_workq(dev); | |
6969 | mutex_unlock(&dev->bm_mutex); | |
6970 | ||
6971 | return ret; | |
6972 | } | |
6973 | ||
6974 | static int ssd_check_pl_cap(struct ssd_device *dev) | |
6975 | { | |
6976 | uint32_t u1; | |
6977 | uint16_t val = 0; | |
6978 | uint8_t low = 0; | |
6979 | int wait = 0; | |
6980 | int ret = 0; | |
6981 | ||
6982 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_2) { | |
6983 | return 0; | |
6984 | } | |
6985 | ||
6986 | if (dev->hw_info_ext.form_factor == SSD_FORM_FACTOR_FHHL && dev->hw_info.pcb_ver < 'B') { | |
6987 | return 0; | |
6988 | } | |
6989 | ||
6990 | /* cap ready ? */ | |
6991 | wait = 0; | |
6992 | for (;;) { | |
6993 | ret = ssd_smbus_read_word(dev, SSD_SENSOR_LM80_SADDRESS, SSD_PL_CAP_U1, (uint8_t *)&val); | |
6994 | if (ret) { | |
6995 | if (!test_and_set_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM80), &dev->hwmon)) { | |
6996 | ssd_generate_sensor_fault_log(dev, SSD_LOG_SENSOR_FAULT, SSD_SENSOR_LM80_SADDRESS,ret); | |
6997 | } | |
6998 | goto out; | |
6999 | } | |
7000 | u1 = SSD_LM80_CONVERT_VOLT(u16_swap(val)); | |
7001 | if (SSD_PL_CAP_VOLT(u1) >= SSD_PL_CAP_VOLT_READY) { | |
7002 | break; | |
7003 | } | |
7004 | ||
7005 | wait++; | |
7006 | if (wait > SSD_PL_CAP_CHARGE_MAX_WAIT) { | |
7007 | ret = -ETIMEDOUT; | |
7008 | ssd_gen_swlog(dev, SSD_LOG_CAP_VOLT_FAULT, SSD_PL_CAP_VOLT(u1)); | |
7009 | goto out; | |
7010 | } | |
7011 | msleep(SSD_PL_CAP_CHARGE_WAIT); | |
7012 | } | |
7013 | ||
7014 | low = ssd_lm80_limit[SSD_LM80_IN_CAP].low; | |
7015 | ret = ssd_smbus_write_byte(dev, SSD_SENSOR_LM80_SADDRESS, SSD_LM80_REG_IN_MIN(SSD_LM80_IN_CAP), &low); | |
7016 | if (ret) { | |
7017 | goto out; | |
7018 | } | |
7019 | ||
7020 | /* enable cap INx */ | |
7021 | ret = ssd_lm80_enable_in(dev, SSD_SENSOR_LM80_SADDRESS, SSD_LM80_IN_CAP); | |
7022 | if (ret) { | |
7023 | if (!test_and_set_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM80), &dev->hwmon)) { | |
7024 | ssd_generate_sensor_fault_log(dev, SSD_LOG_SENSOR_FAULT, SSD_SENSOR_LM80_SADDRESS,ret); | |
7025 | } | |
7026 | goto out; | |
7027 | } | |
7028 | ||
7029 | out: | |
7030 | /* skip error if not in standard mode */ | |
7031 | if (mode != SSD_DRV_MODE_STANDARD) { | |
7032 | ret = 0; | |
7033 | } | |
7034 | return ret; | |
7035 | } | |
7036 | ||
7037 | static int ssd_check_pl_cap_fast(struct ssd_device *dev) | |
7038 | { | |
7039 | uint32_t u1; | |
7040 | uint16_t val = 0; | |
7041 | int ret = 0; | |
7042 | ||
7043 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_2) { | |
7044 | return 0; | |
7045 | } | |
7046 | ||
7047 | if (dev->hw_info_ext.form_factor == SSD_FORM_FACTOR_FHHL && dev->hw_info.pcb_ver < 'B') { | |
7048 | return 0; | |
7049 | } | |
7050 | ||
7051 | /* cap ready ? */ | |
7052 | ret = ssd_smbus_read_word(dev, SSD_SENSOR_LM80_SADDRESS, SSD_PL_CAP_U1, (uint8_t *)&val); | |
7053 | if (ret) { | |
7054 | goto out; | |
7055 | } | |
7056 | u1 = SSD_LM80_CONVERT_VOLT(u16_swap(val)); | |
7057 | if (SSD_PL_CAP_VOLT(u1) < SSD_PL_CAP_VOLT_READY) { | |
7058 | ret = 1; | |
7059 | } | |
7060 | ||
7061 | out: | |
7062 | return ret; | |
7063 | } | |
7064 | ||
7065 | static int ssd_init_pl_cap(struct ssd_device *dev) | |
7066 | { | |
7067 | int ret = 0; | |
7068 | ||
7069 | /* set here: user write mode */ | |
7070 | dev->user_wmode = wmode; | |
7071 | ||
7072 | mutex_init(&dev->bm_mutex); | |
7073 | ||
7074 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_2) { | |
7075 | uint32_t val; | |
7076 | val = ssd_reg32_read(dev->ctrlp + SSD_BM_FAULT_REG); | |
7077 | if ((val >> 1) & 0x1) { | |
7078 | (void)test_and_set_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP), &dev->hwmon); | |
7079 | } | |
7080 | } else { | |
7081 | ret = ssd_check_pl_cap(dev); | |
7082 | if (ret) { | |
7083 | (void)test_and_set_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP), &dev->hwmon); | |
7084 | } | |
7085 | } | |
7086 | ||
7087 | return 0; | |
7088 | } | |
7089 | ||
7090 | /* label */ | |
7091 | static void __end_str(char *str, int len) | |
7092 | { | |
7093 | int i; | |
7094 | ||
7095 | for(i=0; i<len; i++) { | |
7096 | if (*(str+i) == '\0') | |
7097 | return; | |
7098 | } | |
7099 | *str = '\0'; | |
7100 | } | |
7101 | ||
7102 | static int ssd_init_label(struct ssd_device *dev) | |
7103 | { | |
7104 | uint32_t off; | |
7105 | uint32_t size; | |
7106 | int ret; | |
7107 | ||
7108 | /* label location */ | |
7109 | off = dev->rom_info.label_base; | |
7110 | ||
7111 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_2) { | |
7112 | size = sizeof(struct ssd_label); | |
7113 | ||
7114 | /* read label */ | |
7115 | ret = ssd_spi_read(dev, &dev->label, off, size); | |
7116 | if (ret) { | |
7117 | memset(&dev->label, 0, size); | |
7118 | goto out; | |
7119 | } | |
7120 | ||
7121 | __end_str(dev->label.date, SSD_LABEL_FIELD_SZ); | |
7122 | __end_str(dev->label.sn, SSD_LABEL_FIELD_SZ); | |
7123 | __end_str(dev->label.part, SSD_LABEL_FIELD_SZ); | |
7124 | __end_str(dev->label.desc, SSD_LABEL_FIELD_SZ); | |
7125 | __end_str(dev->label.other, SSD_LABEL_FIELD_SZ); | |
7126 | __end_str(dev->label.maf, SSD_LABEL_FIELD_SZ); | |
7127 | } else { | |
7128 | size = sizeof(struct ssd_labelv3); | |
7129 | ||
7130 | /* read label */ | |
7131 | ret = ssd_spi_read(dev, &dev->labelv3, off, size); | |
7132 | if (ret) { | |
7133 | memset(&dev->labelv3, 0, size); | |
7134 | goto out; | |
7135 | } | |
7136 | ||
7137 | __end_str(dev->labelv3.boardtype, SSD_LABEL_FIELD_SZ); | |
7138 | __end_str(dev->labelv3.barcode, SSD_LABEL_FIELD_SZ); | |
7139 | __end_str(dev->labelv3.item, SSD_LABEL_FIELD_SZ); | |
7140 | __end_str(dev->labelv3.description, SSD_LABEL_DESC_SZ); | |
7141 | __end_str(dev->labelv3.manufactured, SSD_LABEL_FIELD_SZ); | |
7142 | __end_str(dev->labelv3.vendorname, SSD_LABEL_FIELD_SZ); | |
7143 | __end_str(dev->labelv3.issuenumber, SSD_LABEL_FIELD_SZ); | |
7144 | __end_str(dev->labelv3.cleicode, SSD_LABEL_FIELD_SZ); | |
7145 | __end_str(dev->labelv3.bom, SSD_LABEL_FIELD_SZ); | |
7146 | } | |
7147 | ||
7148 | out: | |
7149 | /* skip error if not in standard mode */ | |
7150 | if (mode != SSD_DRV_MODE_STANDARD) { | |
7151 | ret = 0; | |
7152 | } | |
7153 | return ret; | |
7154 | } | |
7155 | ||
7156 | int ssd_get_label(struct block_device *bdev, struct ssd_label *label) | |
7157 | { | |
7158 | struct ssd_device *dev; | |
7159 | ||
7160 | if (!bdev || !label || !(bdev->bd_disk)) { | |
7161 | return -EINVAL; | |
7162 | } | |
7163 | ||
7164 | dev = bdev->bd_disk->private_data; | |
7165 | ||
7166 | if (dev->protocol_info.ver >= SSD_PROTOCOL_V3_2) { | |
7167 | memset(label, 0, sizeof(struct ssd_label)); | |
7168 | memcpy(label->date, dev->labelv3.manufactured, SSD_LABEL_FIELD_SZ); | |
7169 | memcpy(label->sn, dev->labelv3.barcode, SSD_LABEL_FIELD_SZ); | |
7170 | memcpy(label->desc, dev->labelv3.boardtype, SSD_LABEL_FIELD_SZ); | |
7171 | memcpy(label->maf, dev->labelv3.vendorname, SSD_LABEL_FIELD_SZ); | |
7172 | } else { | |
7173 | memcpy(label, &dev->label, sizeof(struct ssd_label)); | |
7174 | } | |
7175 | ||
7176 | return 0; | |
7177 | } | |
7178 | ||
7179 | static int __ssd_get_version(struct ssd_device *dev, struct ssd_version_info *ver) | |
7180 | { | |
7181 | uint16_t bm_ver = 0; | |
7182 | int ret = 0; | |
7183 | ||
7184 | if (dev->protocol_info.ver > SSD_PROTOCOL_V3 && dev->protocol_info.ver < SSD_PROTOCOL_V3_2) { | |
7185 | ret = ssd_bm_get_version(dev, &bm_ver); | |
7186 | if(ret){ | |
7187 | goto out; | |
7188 | } | |
7189 | } | |
7190 | ||
7191 | ver->bridge_ver = dev->hw_info.bridge_ver; | |
7192 | ver->ctrl_ver = dev->hw_info.ctrl_ver; | |
7193 | ver->bm_ver = bm_ver; | |
7194 | ver->pcb_ver = dev->hw_info.pcb_ver; | |
7195 | ver->upper_pcb_ver = dev->hw_info.upper_pcb_ver; | |
7196 | ||
7197 | out: | |
7198 | return ret; | |
7199 | ||
7200 | } | |
7201 | ||
7202 | int ssd_get_version(struct block_device *bdev, struct ssd_version_info *ver) | |
7203 | { | |
7204 | struct ssd_device *dev; | |
7205 | int ret; | |
7206 | ||
7207 | if (!bdev || !ver || !(bdev->bd_disk)) { | |
7208 | return -EINVAL; | |
7209 | } | |
7210 | ||
7211 | dev = bdev->bd_disk->private_data; | |
7212 | ||
7213 | mutex_lock(&dev->fw_mutex); | |
7214 | ret = __ssd_get_version(dev, ver); | |
7215 | mutex_unlock(&dev->fw_mutex); | |
7216 | ||
7217 | return ret; | |
7218 | } | |
7219 | ||
7220 | static int __ssd_get_temperature(struct ssd_device *dev, int *temp) | |
7221 | { | |
7222 | uint64_t val; | |
7223 | uint32_t off; | |
7224 | int max = -300; | |
7225 | int cur; | |
7226 | int i; | |
7227 | ||
7228 | if (dev->protocol_info.ver <= SSD_PROTOCOL_V3) { | |
7229 | *temp = 0; | |
7230 | return 0; | |
7231 | } | |
7232 | ||
7233 | if (finject) { | |
7234 | if (dev->db_info.type == SSD_DEBUG_LOG && | |
7235 | (dev->db_info.data.log.event == SSD_LOG_OVER_TEMP || | |
7236 | dev->db_info.data.log.event == SSD_LOG_NORMAL_TEMP || | |
7237 | dev->db_info.data.log.event == SSD_LOG_WARN_TEMP)) { | |
7238 | *temp = (int)dev->db_info.data.log.extra; | |
7239 | return 0; | |
7240 | } | |
7241 | } | |
7242 | ||
7243 | for (i=0; i<dev->hw_info.nr_ctrl; i++) { | |
7244 | off = SSD_CTRL_TEMP_REG0 + i * sizeof(uint64_t); | |
7245 | ||
7246 | val = ssd_reg_read(dev->ctrlp + off); | |
7247 | if (val == 0xffffffffffffffffull) { | |
7248 | continue; | |
7249 | } | |
7250 | ||
7251 | cur = (int)CUR_TEMP(val); | |
7252 | if (cur >= max) { | |
7253 | max = cur; | |
7254 | } | |
7255 | } | |
7256 | ||
7257 | *temp = max; | |
7258 | ||
7259 | return 0; | |
7260 | } | |
7261 | ||
7262 | int ssd_get_temperature(struct block_device *bdev, int *temp) | |
7263 | { | |
7264 | struct ssd_device *dev; | |
7265 | int ret; | |
7266 | ||
7267 | if (!bdev || !temp || !(bdev->bd_disk)) { | |
7268 | return -EINVAL; | |
7269 | } | |
7270 | ||
7271 | dev = bdev->bd_disk->private_data; | |
7272 | ||
7273 | ||
7274 | mutex_lock(&dev->fw_mutex); | |
7275 | ret = __ssd_get_temperature(dev, temp); | |
7276 | mutex_unlock(&dev->fw_mutex); | |
7277 | ||
7278 | return ret; | |
7279 | } | |
7280 | ||
7281 | int ssd_set_otprotect(struct block_device *bdev, int otprotect) | |
7282 | { | |
7283 | struct ssd_device *dev; | |
7284 | ||
7285 | if (!bdev || !(bdev->bd_disk)) { | |
7286 | return -EINVAL; | |
7287 | } | |
7288 | ||
7289 | dev = bdev->bd_disk->private_data; | |
7290 | ssd_set_ot_protect(dev, !!otprotect); | |
7291 | ||
7292 | return 0; | |
7293 | } | |
7294 | ||
7295 | int ssd_bm_status(struct block_device *bdev, int *status) | |
7296 | { | |
7297 | struct ssd_device *dev; | |
7298 | int ret = 0; | |
7299 | ||
7300 | if (!bdev || !status || !(bdev->bd_disk)) { | |
7301 | return -EINVAL; | |
7302 | } | |
7303 | ||
7304 | dev = bdev->bd_disk->private_data; | |
7305 | ||
7306 | mutex_lock(&dev->fw_mutex); | |
7307 | if (dev->protocol_info.ver >= SSD_PROTOCOL_V3_2) { | |
7308 | if (test_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP), &dev->hwmon)) { | |
7309 | *status = SSD_BMSTATUS_WARNING; | |
7310 | } else { | |
7311 | *status = SSD_BMSTATUS_OK; | |
7312 | } | |
7313 | } else if(dev->protocol_info.ver > SSD_PROTOCOL_V3) { | |
7314 | ret = __ssd_bm_status(dev, status); | |
7315 | } else { | |
7316 | *status = SSD_BMSTATUS_OK; | |
7317 | } | |
7318 | mutex_unlock(&dev->fw_mutex); | |
7319 | ||
7320 | return ret; | |
7321 | } | |
7322 | ||
7323 | int ssd_get_pciaddr(struct block_device *bdev, struct pci_addr *paddr) | |
7324 | { | |
7325 | struct ssd_device *dev; | |
7326 | ||
7327 | if (!bdev || !paddr || !bdev->bd_disk) { | |
7328 | return -EINVAL; | |
7329 | } | |
7330 | ||
7331 | dev = bdev->bd_disk->private_data; | |
7332 | ||
7333 | paddr->domain = pci_domain_nr(dev->pdev->bus); | |
7334 | paddr->bus = dev->pdev->bus->number; | |
7335 | paddr->slot = PCI_SLOT(dev->pdev->devfn); | |
7336 | paddr->func= PCI_FUNC(dev->pdev->devfn); | |
7337 | ||
7338 | return 0; | |
7339 | } | |
7340 | ||
7341 | /* acc */ | |
7342 | static int ssd_bb_acc(struct ssd_device *dev, struct ssd_acc_info *acc) | |
7343 | { | |
7344 | uint32_t val; | |
7345 | int ctrl, chip; | |
7346 | ||
7347 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_1_1) { | |
7348 | return -EOPNOTSUPP; | |
7349 | } | |
7350 | ||
7351 | acc->threshold_l1 = ssd_reg32_read(dev->ctrlp + SSD_BB_THRESHOLD_L1_REG); | |
7352 | if (0xffffffffull == acc->threshold_l1) { | |
7353 | return -EIO; | |
7354 | } | |
7355 | acc->threshold_l2 = ssd_reg32_read(dev->ctrlp + SSD_BB_THRESHOLD_L2_REG); | |
7356 | if (0xffffffffull == acc->threshold_l2) { | |
7357 | return -EIO; | |
7358 | } | |
7359 | acc->val = 0; | |
7360 | ||
7361 | for (ctrl=0; ctrl<dev->hw_info.nr_ctrl; ctrl++) { | |
7362 | for (chip=0; chip<dev->hw_info.nr_chip; chip++) { | |
7363 | val = ssd_reg32_read(dev->ctrlp + SSD_BB_ACC_REG0 + (SSD_CTRL_REG_ZONE_SZ * ctrl) + (SSD_BB_ACC_REG_SZ * chip)); | |
7364 | if (0xffffffffull == acc->val) { | |
7365 | return -EIO; | |
7366 | } | |
7367 | if (val > acc->val) { | |
7368 | acc->val = val; | |
7369 | } | |
7370 | } | |
7371 | } | |
7372 | ||
7373 | return 0; | |
7374 | } | |
7375 | ||
7376 | static int ssd_ec_acc(struct ssd_device *dev, struct ssd_acc_info *acc) | |
7377 | { | |
7378 | uint32_t val; | |
7379 | int ctrl, chip; | |
7380 | ||
7381 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_1_1) { | |
7382 | return -EOPNOTSUPP; | |
7383 | } | |
7384 | ||
7385 | acc->threshold_l1 = ssd_reg32_read(dev->ctrlp + SSD_EC_THRESHOLD_L1_REG); | |
7386 | if (0xffffffffull == acc->threshold_l1) { | |
7387 | return -EIO; | |
7388 | } | |
7389 | acc->threshold_l2 = ssd_reg32_read(dev->ctrlp + SSD_EC_THRESHOLD_L2_REG); | |
7390 | if (0xffffffffull == acc->threshold_l2) { | |
7391 | return -EIO; | |
7392 | } | |
7393 | acc->val = 0; | |
7394 | ||
7395 | for (ctrl=0; ctrl<dev->hw_info.nr_ctrl; ctrl++) { | |
7396 | for (chip=0; chip<dev->hw_info.nr_chip; chip++) { | |
7397 | val = ssd_reg32_read(dev->ctrlp + SSD_EC_ACC_REG0 + (SSD_CTRL_REG_ZONE_SZ * ctrl) + (SSD_EC_ACC_REG_SZ * chip)); | |
7398 | if (0xffffffffull == acc->val) { | |
7399 | return -EIO; | |
7400 | } | |
7401 | ||
7402 | if (val > acc->val) { | |
7403 | acc->val = val; | |
7404 | } | |
7405 | } | |
7406 | } | |
7407 | ||
7408 | return 0; | |
7409 | } | |
7410 | ||
7411 | ||
7412 | /* ram r&w */ | |
7413 | static int ssd_ram_read_4k(struct ssd_device *dev, void *buf, size_t length, loff_t ofs, int ctrl_idx) | |
7414 | { | |
7415 | struct ssd_ram_op_msg *msg; | |
7416 | dma_addr_t buf_dma; | |
7417 | size_t len = length; | |
7418 | loff_t ofs_w = ofs; | |
7419 | int ret = 0; | |
7420 | ||
7421 | if (ctrl_idx >= dev->hw_info.nr_ctrl || (uint64_t)(ofs + length) > dev->hw_info.ram_size | |
7422 | || !length || length > dev->hw_info.ram_max_len | |
7423 | || (length & (dev->hw_info.ram_align - 1)) != 0 || ((uint64_t)ofs & (dev->hw_info.ram_align - 1)) != 0) { | |
7424 | return -EINVAL; | |
7425 | } | |
7426 | ||
7427 | len /= dev->hw_info.ram_align; | |
7428 | do_div(ofs_w, dev->hw_info.ram_align); | |
7429 | ||
7430 | buf_dma = pci_map_single(dev->pdev, buf, length, PCI_DMA_FROMDEVICE); | |
7431 | #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,26)) | |
7432 | ret = dma_mapping_error(buf_dma); | |
7433 | #else | |
7434 | ret = dma_mapping_error(&(dev->pdev->dev), buf_dma); | |
7435 | #endif | |
7436 | if (ret) { | |
7437 | hio_warn("%s: unable to map read DMA buffer\n", dev->name); | |
7438 | goto out_dma_mapping; | |
7439 | } | |
7440 | ||
7441 | msg = (struct ssd_ram_op_msg *)ssd_get_dmsg(dev); | |
7442 | ||
7443 | msg->fun = SSD_FUNC_RAM_READ; | |
7444 | msg->ctrl_idx = ctrl_idx; | |
7445 | msg->start = (uint32_t)ofs_w; | |
7446 | msg->length = len; | |
7447 | msg->buf = buf_dma; | |
7448 | ||
7449 | ret = ssd_do_request(dev, READ, msg, NULL); | |
7450 | ssd_put_dmsg(msg); | |
7451 | ||
7452 | pci_unmap_single(dev->pdev, buf_dma, length, PCI_DMA_FROMDEVICE); | |
7453 | ||
7454 | out_dma_mapping: | |
7455 | return ret; | |
7456 | } | |
7457 | ||
7458 | static int ssd_ram_write_4k(struct ssd_device *dev, void *buf, size_t length, loff_t ofs, int ctrl_idx) | |
7459 | { | |
7460 | struct ssd_ram_op_msg *msg; | |
7461 | dma_addr_t buf_dma; | |
7462 | size_t len = length; | |
7463 | loff_t ofs_w = ofs; | |
7464 | int ret = 0; | |
7465 | ||
7466 | if (ctrl_idx >= dev->hw_info.nr_ctrl || (uint64_t)(ofs + length) > dev->hw_info.ram_size | |
7467 | || !length || length > dev->hw_info.ram_max_len | |
7468 | || (length & (dev->hw_info.ram_align - 1)) != 0 || ((uint64_t)ofs & (dev->hw_info.ram_align - 1)) != 0) { | |
7469 | return -EINVAL; | |
7470 | } | |
7471 | ||
7472 | len /= dev->hw_info.ram_align; | |
7473 | do_div(ofs_w, dev->hw_info.ram_align); | |
7474 | ||
7475 | buf_dma = pci_map_single(dev->pdev, buf, length, PCI_DMA_TODEVICE); | |
7476 | #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,26)) | |
7477 | ret = dma_mapping_error(buf_dma); | |
7478 | #else | |
7479 | ret = dma_mapping_error(&(dev->pdev->dev), buf_dma); | |
7480 | #endif | |
7481 | if (ret) { | |
7482 | hio_warn("%s: unable to map write DMA buffer\n", dev->name); | |
7483 | goto out_dma_mapping; | |
7484 | } | |
7485 | ||
7486 | msg = (struct ssd_ram_op_msg *)ssd_get_dmsg(dev); | |
7487 | ||
7488 | msg->fun = SSD_FUNC_RAM_WRITE; | |
7489 | msg->ctrl_idx = ctrl_idx; | |
7490 | msg->start = (uint32_t)ofs_w; | |
7491 | msg->length = len; | |
7492 | msg->buf = buf_dma; | |
7493 | ||
7494 | ret = ssd_do_request(dev, WRITE, msg, NULL); | |
7495 | ssd_put_dmsg(msg); | |
7496 | ||
7497 | pci_unmap_single(dev->pdev, buf_dma, length, PCI_DMA_TODEVICE); | |
7498 | ||
7499 | out_dma_mapping: | |
7500 | return ret; | |
7501 | ||
7502 | } | |
7503 | ||
7504 | static int ssd_ram_read(struct ssd_device *dev, void *buf, size_t length, loff_t ofs, int ctrl_idx) | |
7505 | { | |
7506 | int left = length; | |
7507 | size_t len; | |
7508 | loff_t off = ofs; | |
7509 | int ret = 0; | |
7510 | ||
7511 | if (ctrl_idx >= dev->hw_info.nr_ctrl || (uint64_t)(ofs + length) > dev->hw_info.ram_size || !length | |
7512 | || (length & (dev->hw_info.ram_align - 1)) != 0 || ((uint64_t)ofs & (dev->hw_info.ram_align - 1)) != 0) { | |
7513 | return -EINVAL; | |
7514 | } | |
7515 | ||
7516 | while (left > 0) { | |
7517 | len = dev->hw_info.ram_max_len; | |
7518 | if (left < (int)dev->hw_info.ram_max_len) { | |
7519 | len = left; | |
7520 | } | |
7521 | ||
7522 | ret = ssd_ram_read_4k(dev, buf, len, off, ctrl_idx); | |
7523 | if (ret) { | |
7524 | break; | |
7525 | } | |
7526 | ||
7527 | left -= len; | |
7528 | off += len; | |
7529 | buf += len; | |
7530 | } | |
7531 | ||
7532 | return ret; | |
7533 | } | |
7534 | ||
7535 | static int ssd_ram_write(struct ssd_device *dev, void *buf, size_t length, loff_t ofs, int ctrl_idx) | |
7536 | { | |
7537 | int left = length; | |
7538 | size_t len; | |
7539 | loff_t off = ofs; | |
7540 | int ret = 0; | |
7541 | ||
7542 | if (ctrl_idx >= dev->hw_info.nr_ctrl || (uint64_t)(ofs + length) > dev->hw_info.ram_size || !length | |
7543 | || (length & (dev->hw_info.ram_align - 1)) != 0 || ((uint64_t)ofs & (dev->hw_info.ram_align - 1)) != 0) { | |
7544 | return -EINVAL; | |
7545 | } | |
7546 | ||
7547 | while (left > 0) { | |
7548 | len = dev->hw_info.ram_max_len; | |
7549 | if (left < (int)dev->hw_info.ram_max_len) { | |
7550 | len = left; | |
7551 | } | |
7552 | ||
7553 | ret = ssd_ram_write_4k(dev, buf, len, off, ctrl_idx); | |
7554 | if (ret) { | |
7555 | break; | |
7556 | } | |
7557 | ||
7558 | left -= len; | |
7559 | off += len; | |
7560 | buf += len; | |
7561 | } | |
7562 | ||
7563 | return ret; | |
7564 | } | |
7565 | ||
7566 | ||
7567 | /* flash op */ | |
7568 | static int ssd_check_flash(struct ssd_device *dev, int flash, int page, int ctrl_idx) | |
7569 | { | |
7570 | int cur_ch = flash % dev->hw_info.max_ch; | |
7571 | int cur_chip = flash /dev->hw_info.max_ch; | |
7572 | ||
7573 | if (ctrl_idx >= dev->hw_info.nr_ctrl) { | |
7574 | return -EINVAL; | |
7575 | } | |
7576 | ||
7577 | if (cur_ch >= dev->hw_info.nr_ch || cur_chip >= dev->hw_info.nr_chip) { | |
7578 | return -EINVAL; | |
7579 | } | |
7580 | ||
7581 | if (page >= (int)(dev->hw_info.block_count * dev->hw_info.page_count)) { | |
7582 | return -EINVAL; | |
7583 | } | |
7584 | return 0; | |
7585 | } | |
7586 | ||
7587 | static int ssd_nand_read_id(struct ssd_device *dev, void *id, int flash, int chip, int ctrl_idx) | |
7588 | { | |
7589 | struct ssd_nand_op_msg *msg; | |
7590 | dma_addr_t buf_dma; | |
7591 | int ret = 0; | |
7592 | ||
7593 | if (unlikely(!id)) | |
7594 | return -EINVAL; | |
7595 | ||
7596 | buf_dma = pci_map_single(dev->pdev, id, SSD_NAND_ID_BUFF_SZ, PCI_DMA_FROMDEVICE); | |
7597 | #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,26)) | |
7598 | ret = dma_mapping_error(buf_dma); | |
7599 | #else | |
7600 | ret = dma_mapping_error(&(dev->pdev->dev), buf_dma); | |
7601 | #endif | |
7602 | if (ret) { | |
7603 | hio_warn("%s: unable to map read DMA buffer\n", dev->name); | |
7604 | goto out_dma_mapping; | |
7605 | } | |
7606 | ||
7607 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3) { | |
7608 | flash = ((uint32_t)flash << 1) | (uint32_t)chip; | |
7609 | chip = 0; | |
7610 | } | |
7611 | ||
7612 | msg = (struct ssd_nand_op_msg *)ssd_get_dmsg(dev); | |
7613 | ||
7614 | msg->fun = SSD_FUNC_NAND_READ_ID; | |
7615 | msg->chip_no = flash; | |
7616 | msg->chip_ce = chip; | |
7617 | msg->ctrl_idx = ctrl_idx; | |
7618 | msg->buf = buf_dma; | |
7619 | ||
7620 | ret = ssd_do_request(dev, READ, msg, NULL); | |
7621 | ssd_put_dmsg(msg); | |
7622 | ||
7623 | pci_unmap_single(dev->pdev, buf_dma, SSD_NAND_ID_BUFF_SZ, PCI_DMA_FROMDEVICE); | |
7624 | ||
7625 | out_dma_mapping: | |
7626 | return ret; | |
7627 | } | |
7628 | ||
7629 | #if 0 | |
7630 | static int ssd_nand_read(struct ssd_device *dev, void *buf, | |
7631 | int flash, int chip, int page, int page_count, int ctrl_idx) | |
7632 | { | |
7633 | struct ssd_nand_op_msg *msg; | |
7634 | dma_addr_t buf_dma; | |
7635 | int length; | |
7636 | int ret = 0; | |
7637 | ||
7638 | if (!buf) { | |
7639 | return -EINVAL; | |
7640 | } | |
7641 | ||
7642 | if ((page + page_count) > dev->hw_info.block_count*dev->hw_info.page_count) { | |
7643 | return -EINVAL; | |
7644 | } | |
7645 | ||
7646 | ret = ssd_check_flash(dev, flash, page, ctrl_idx); | |
7647 | if (ret) { | |
7648 | return ret; | |
7649 | } | |
7650 | ||
7651 | length = page_count * dev->hw_info.page_size; | |
7652 | ||
7653 | buf_dma = pci_map_single(dev->pdev, buf, length, PCI_DMA_FROMDEVICE); | |
7654 | #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,26)) | |
7655 | ret = dma_mapping_error(buf_dma); | |
7656 | #else | |
7657 | ret = dma_mapping_error(&(dev->pdev->dev), buf_dma); | |
7658 | #endif | |
7659 | if (ret) { | |
7660 | hio_warn("%s: unable to map read DMA buffer\n", dev->name); | |
7661 | goto out_dma_mapping; | |
7662 | } | |
7663 | ||
7664 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3) { | |
7665 | flash = (flash << 1) | chip; | |
7666 | chip = 0; | |
7667 | } | |
7668 | ||
7669 | msg = (struct ssd_nand_op_msg *)ssd_get_dmsg(dev); | |
7670 | ||
7671 | msg->fun = SSD_FUNC_NAND_READ; | |
7672 | msg->ctrl_idx = ctrl_idx; | |
7673 | msg->chip_no = flash; | |
7674 | msg->chip_ce = chip; | |
7675 | msg->page_no = page; | |
7676 | msg->page_count = page_count; | |
7677 | msg->buf = buf_dma; | |
7678 | ||
7679 | ret = ssd_do_request(dev, READ, msg, NULL); | |
7680 | ssd_put_dmsg(msg); | |
7681 | ||
7682 | pci_unmap_single(dev->pdev, buf_dma, length, PCI_DMA_FROMDEVICE); | |
7683 | ||
7684 | out_dma_mapping: | |
7685 | return ret; | |
7686 | } | |
7687 | #endif | |
7688 | ||
7689 | static int ssd_nand_read_w_oob(struct ssd_device *dev, void *buf, | |
7690 | int flash, int chip, int page, int count, int ctrl_idx) | |
7691 | { | |
7692 | struct ssd_nand_op_msg *msg; | |
7693 | dma_addr_t buf_dma; | |
7694 | int length; | |
7695 | int ret = 0; | |
7696 | ||
7697 | if (!buf) { | |
7698 | return -EINVAL; | |
7699 | } | |
7700 | ||
7701 | if ((page + count) > (int)(dev->hw_info.block_count * dev->hw_info.page_count)) { | |
7702 | return -EINVAL; | |
7703 | } | |
7704 | ||
7705 | ret = ssd_check_flash(dev, flash, page, ctrl_idx); | |
7706 | if (ret) { | |
7707 | return ret; | |
7708 | } | |
7709 | ||
7710 | length = count * (dev->hw_info.page_size + dev->hw_info.oob_size); | |
7711 | ||
7712 | buf_dma = pci_map_single(dev->pdev, buf, length, PCI_DMA_FROMDEVICE); | |
7713 | #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,26)) | |
7714 | ret = dma_mapping_error(buf_dma); | |
7715 | #else | |
7716 | ret = dma_mapping_error(&(dev->pdev->dev), buf_dma); | |
7717 | #endif | |
7718 | if (ret) { | |
7719 | hio_warn("%s: unable to map read DMA buffer\n", dev->name); | |
7720 | goto out_dma_mapping; | |
7721 | } | |
7722 | ||
7723 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3) { | |
7724 | flash = ((uint32_t)flash << 1) | (uint32_t)chip; | |
7725 | chip = 0; | |
7726 | } | |
7727 | ||
7728 | msg = (struct ssd_nand_op_msg *)ssd_get_dmsg(dev); | |
7729 | ||
7730 | msg->fun = SSD_FUNC_NAND_READ_WOOB; | |
7731 | msg->ctrl_idx = ctrl_idx; | |
7732 | msg->chip_no = flash; | |
7733 | msg->chip_ce = chip; | |
7734 | msg->page_no = page; | |
7735 | msg->page_count = count; | |
7736 | msg->buf = buf_dma; | |
7737 | ||
7738 | ret = ssd_do_request(dev, READ, msg, NULL); | |
7739 | ssd_put_dmsg(msg); | |
7740 | ||
7741 | pci_unmap_single(dev->pdev, buf_dma, length, PCI_DMA_FROMDEVICE); | |
7742 | ||
7743 | out_dma_mapping: | |
7744 | return ret; | |
7745 | } | |
7746 | ||
7747 | /* write 1 page */ | |
7748 | static int ssd_nand_write(struct ssd_device *dev, void *buf, | |
7749 | int flash, int chip, int page, int count, int ctrl_idx) | |
7750 | { | |
7751 | struct ssd_nand_op_msg *msg; | |
7752 | dma_addr_t buf_dma; | |
7753 | int length; | |
7754 | int ret = 0; | |
7755 | ||
7756 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3) { | |
7757 | return -EINVAL; | |
7758 | } | |
7759 | ||
7760 | if (!buf) { | |
7761 | return -EINVAL; | |
7762 | } | |
7763 | ||
7764 | if (count != 1) { | |
7765 | return -EINVAL; | |
7766 | } | |
7767 | ||
7768 | ret = ssd_check_flash(dev, flash, page, ctrl_idx); | |
7769 | if (ret) { | |
7770 | return ret; | |
7771 | } | |
7772 | ||
7773 | length = count * (dev->hw_info.page_size + dev->hw_info.oob_size); | |
7774 | ||
7775 | /* write data to ram */ | |
7776 | /*ret = ssd_ram_write(dev, buf, length, dev->hw_info.nand_wbuff_base, ctrl_idx); | |
7777 | if (ret) { | |
7778 | return ret; | |
7779 | }*/ | |
7780 | ||
7781 | buf_dma = pci_map_single(dev->pdev, buf, length, PCI_DMA_TODEVICE); | |
7782 | #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,26)) | |
7783 | ret = dma_mapping_error(buf_dma); | |
7784 | #else | |
7785 | ret = dma_mapping_error(&(dev->pdev->dev), buf_dma); | |
7786 | #endif | |
7787 | if (ret) { | |
7788 | hio_warn("%s: unable to map write DMA buffer\n", dev->name); | |
7789 | goto out_dma_mapping; | |
7790 | } | |
7791 | ||
7792 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3) { | |
7793 | flash = ((uint32_t)flash << 1) | (uint32_t)chip; | |
7794 | chip = 0; | |
7795 | } | |
7796 | ||
7797 | msg = (struct ssd_nand_op_msg *)ssd_get_dmsg(dev); | |
7798 | ||
7799 | msg->fun = SSD_FUNC_NAND_WRITE; | |
7800 | msg->ctrl_idx = ctrl_idx; | |
7801 | msg->chip_no = flash; | |
7802 | msg->chip_ce = chip; | |
7803 | ||
7804 | msg->page_no = page; | |
7805 | msg->page_count = count; | |
7806 | msg->buf = buf_dma; | |
7807 | ||
7808 | ret = ssd_do_request(dev, WRITE, msg, NULL); | |
7809 | ssd_put_dmsg(msg); | |
7810 | ||
7811 | pci_unmap_single(dev->pdev, buf_dma, length, PCI_DMA_TODEVICE); | |
7812 | ||
7813 | out_dma_mapping: | |
7814 | return ret; | |
7815 | } | |
7816 | ||
7817 | static int ssd_nand_erase(struct ssd_device *dev, int flash, int chip, int page, int ctrl_idx) | |
7818 | { | |
7819 | struct ssd_nand_op_msg *msg; | |
7820 | int ret = 0; | |
7821 | ||
7822 | ret = ssd_check_flash(dev, flash, page, ctrl_idx); | |
7823 | if (ret) { | |
7824 | return ret; | |
7825 | } | |
7826 | ||
7827 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3) { | |
7828 | flash = ((uint32_t)flash << 1) | (uint32_t)chip; | |
7829 | chip = 0; | |
7830 | } | |
7831 | ||
7832 | msg = (struct ssd_nand_op_msg *)ssd_get_dmsg(dev); | |
7833 | ||
7834 | msg->fun = SSD_FUNC_NAND_ERASE; | |
7835 | msg->ctrl_idx = ctrl_idx; | |
7836 | msg->chip_no = flash; | |
7837 | msg->chip_ce = chip; | |
7838 | msg->page_no = page; | |
7839 | ||
7840 | ret = ssd_do_request(dev, WRITE, msg, NULL); | |
7841 | ssd_put_dmsg(msg); | |
7842 | ||
7843 | return ret; | |
7844 | } | |
7845 | ||
7846 | static int ssd_update_bbt(struct ssd_device *dev, int flash, int ctrl_idx) | |
7847 | { | |
7848 | struct ssd_nand_op_msg *msg; | |
7849 | struct ssd_flush_msg *fmsg; | |
7850 | int ret = 0; | |
7851 | ||
7852 | ret = ssd_check_flash(dev, flash, 0, ctrl_idx); | |
7853 | if (ret) { | |
7854 | return ret; | |
7855 | } | |
7856 | ||
7857 | msg = (struct ssd_nand_op_msg *)ssd_get_dmsg(dev); | |
7858 | ||
7859 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3) { | |
7860 | fmsg = (struct ssd_flush_msg *)msg; | |
7861 | ||
7862 | fmsg->fun = SSD_FUNC_FLUSH; | |
7863 | fmsg->flag = 0x1; | |
7864 | fmsg->flash = flash; | |
7865 | fmsg->ctrl_idx = ctrl_idx; | |
7866 | } else { | |
7867 | msg->fun = SSD_FUNC_FLUSH; | |
7868 | msg->flag = 0x1; | |
7869 | msg->chip_no = flash; | |
7870 | msg->ctrl_idx = ctrl_idx; | |
7871 | } | |
7872 | ||
7873 | ret = ssd_do_request(dev, WRITE, msg, NULL); | |
7874 | ssd_put_dmsg(msg); | |
7875 | ||
7876 | return ret; | |
7877 | } | |
7878 | ||
7879 | /* flash controller init state */ | |
7880 | static int __ssd_check_init_state(struct ssd_device *dev) | |
7881 | { | |
7882 | uint32_t *init_state = NULL; | |
7883 | int reg_base, reg_sz; | |
7884 | int max_wait = SSD_INIT_MAX_WAIT; | |
7885 | int init_wait = 0; | |
7886 | int i, j, k; | |
7887 | int ch_start = 0; | |
7888 | ||
7889 | /* | |
7890 | for (i=0; i<dev->hw_info.nr_ctrl; i++) { | |
7891 | ssd_reg32_write(dev->ctrlp + SSD_CTRL_TEST_REG0 + i * 8, test_data); | |
7892 | read_data = ssd_reg32_read(dev->ctrlp + SSD_CTRL_TEST_REG0 + i * 8); | |
7893 | if (read_data == ~test_data) { | |
7894 | //dev->hw_info.nr_ctrl++; | |
7895 | dev->hw_info.nr_ctrl_map |= 1<<i; | |
7896 | } | |
7897 | } | |
7898 | */ | |
7899 | ||
7900 | /* | |
7901 | read_data = ssd_reg32_read(dev->ctrlp + SSD_READY_REG); | |
7902 | j=0; | |
7903 | for (i=0; i<dev->hw_info.nr_ctrl; i++) { | |
7904 | if (((read_data>>i) & 0x1) == 0) { | |
7905 | j++; | |
7906 | } | |
7907 | } | |
7908 | ||
7909 | if (dev->hw_info.nr_ctrl != j) { | |
7910 | printk(KERN_WARNING "%s: nr_ctrl mismatch: %d %d\n", dev->name, dev->hw_info.nr_ctrl, j); | |
7911 | return -1; | |
7912 | } | |
7913 | */ | |
7914 | ||
7915 | /* | |
7916 | init_state = ssd_reg_read(dev->ctrlp + SSD_FLASH_INFO_REG0); | |
7917 | for (j=1; j<dev->hw_info.nr_ctrl;j++) { | |
7918 | if (init_state != ssd_reg_read(dev->ctrlp + SSD_FLASH_INFO_REG0 + j*8)) { | |
7919 | printk(KERN_WARNING "SSD_FLASH_INFO_REG[%d], not match\n", j); | |
7920 | return -1; | |
7921 | } | |
7922 | } | |
7923 | */ | |
7924 | ||
7925 | /* init_state = ssd_reg_read(dev->ctrlp + SSD_CHIP_INFO_REG0); | |
7926 | for (j=1; j<dev->hw_info.nr_ctrl; j++) { | |
7927 | if (init_state != ssd_reg_read(dev->ctrlp + SSD_CHIP_INFO_REG0 + j*16)) { | |
7928 | printk(KERN_WARNING "SSD_CHIP_INFO_REG Lo [%d], not match\n", j); | |
7929 | return -1; | |
7930 | } | |
7931 | } | |
7932 | ||
7933 | init_state = ssd_reg_read(dev->ctrlp + SSD_CHIP_INFO_REG0 + 8); | |
7934 | for (j=1; j<dev->hw_info.nr_ctrl; j++) { | |
7935 | if (init_state != ssd_reg_read(dev->ctrlp + SSD_CHIP_INFO_REG0 + 8 + j*16)) { | |
7936 | printk(KERN_WARNING "SSD_CHIP_INFO_REG Hi [%d], not match\n", j); | |
7937 | return -1; | |
7938 | } | |
7939 | } | |
7940 | */ | |
7941 | ||
7942 | if (dev->protocol_info.ver >= SSD_PROTOCOL_V3_2) { | |
7943 | max_wait = SSD_INIT_MAX_WAIT_V3_2; | |
7944 | } | |
7945 | ||
7946 | reg_base = dev->protocol_info.init_state_reg; | |
7947 | reg_sz = dev->protocol_info.init_state_reg_sz; | |
7948 | ||
7949 | init_state = (uint32_t *)kmalloc(reg_sz, GFP_KERNEL); | |
7950 | if (!init_state) { | |
7951 | return -ENOMEM; | |
7952 | } | |
7953 | ||
7954 | for (i=0; i<dev->hw_info.nr_ctrl; i++) { | |
7955 | check_init: | |
7956 | for (j=0, k=0; j<reg_sz; j+=sizeof(uint32_t), k++) { | |
7957 | init_state[k] = ssd_reg32_read(dev->ctrlp + reg_base + j); | |
7958 | } | |
7959 | ||
7960 | if (dev->protocol_info.ver > SSD_PROTOCOL_V3) { | |
7961 | /* just check the last bit, no need to check all channel */ | |
7962 | ch_start = dev->hw_info.max_ch - 1; | |
7963 | } else { | |
7964 | ch_start = 0; | |
7965 | } | |
7966 | ||
7967 | for (j=0; j<dev->hw_info.nr_chip; j++) { | |
7968 | for (k=ch_start; k<dev->hw_info.max_ch; k++) { | |
7969 | if (test_bit((j*dev->hw_info.max_ch + k), (void *)init_state)) { | |
7970 | continue; | |
7971 | } | |
7972 | ||
7973 | init_wait++; | |
7974 | if (init_wait <= max_wait) { | |
7975 | msleep(SSD_INIT_WAIT); | |
7976 | goto check_init; | |
7977 | } else { | |
7978 | if (k < dev->hw_info.nr_ch) { | |
7979 | hio_warn("%s: controller %d chip %d ch %d init failed\n", | |
7980 | dev->name, i, j, k); | |
7981 | } else { | |
7982 | hio_warn("%s: controller %d chip %d init failed\n", | |
7983 | dev->name, i, j); | |
7984 | } | |
7985 | ||
7986 | kfree(init_state); | |
7987 | return -1; | |
7988 | } | |
7989 | } | |
7990 | } | |
7991 | reg_base += reg_sz; | |
7992 | } | |
7993 | //printk(KERN_WARNING "%s: init wait %d\n", dev->name, init_wait); | |
7994 | ||
7995 | kfree(init_state); | |
7996 | return 0; | |
7997 | } | |
7998 | ||
7999 | static int ssd_check_init_state(struct ssd_device *dev) | |
8000 | { | |
8001 | if (mode != SSD_DRV_MODE_STANDARD) { | |
8002 | return 0; | |
8003 | } | |
8004 | ||
8005 | return __ssd_check_init_state(dev); | |
8006 | } | |
8007 | ||
8008 | static void ssd_reset_resp_ptr(struct ssd_device *dev); | |
8009 | ||
8010 | /* reset flash controller etc */ | |
8011 | static int __ssd_reset(struct ssd_device *dev, int type) | |
8012 | { | |
8013 | if (type < SSD_RST_NOINIT || type > SSD_RST_FULL) { | |
8014 | return -EINVAL; | |
8015 | } | |
8016 | ||
8017 | mutex_lock(&dev->fw_mutex); | |
8018 | ||
8019 | if (type == SSD_RST_NOINIT) { //no init | |
8020 | ssd_reg32_write(dev->ctrlp + SSD_RESET_REG, SSD_RESET_NOINIT); | |
8021 | } else if (type == SSD_RST_NORMAL) { //reset & init | |
8022 | ssd_reg32_write(dev->ctrlp + SSD_RESET_REG, SSD_RESET); | |
8023 | } else { // full reset | |
8024 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_2) { | |
8025 | mutex_unlock(&dev->fw_mutex); | |
8026 | return -EINVAL; | |
8027 | } | |
8028 | ||
8029 | ssd_reg32_write(dev->ctrlp + SSD_FULL_RESET_REG, SSD_RESET_FULL); | |
8030 | ||
8031 | /* ?? */ | |
8032 | ssd_reset_resp_ptr(dev); | |
8033 | } | |
8034 | ||
8035 | #ifdef SSD_OT_PROTECT | |
8036 | dev->ot_delay = 0; | |
8037 | #endif | |
8038 | ||
8039 | msleep(1000); | |
8040 | ||
8041 | /* xx */ | |
8042 | ssd_set_flush_timeout(dev, dev->wmode); | |
8043 | ||
8044 | mutex_unlock(&dev->fw_mutex); | |
8045 | ssd_gen_swlog(dev, SSD_LOG_RESET, (uint32_t)type); | |
8046 | dev->reset_time = (uint64_t)ktime_get_real_seconds(); | |
8047 | ||
8048 | return __ssd_check_init_state(dev); | |
8049 | } | |
8050 | ||
8051 | static int ssd_save_md(struct ssd_device *dev) | |
8052 | { | |
8053 | struct ssd_nand_op_msg *msg; | |
8054 | int ret = 0; | |
8055 | ||
8056 | if (unlikely(mode != SSD_DRV_MODE_STANDARD)) | |
8057 | return 0; | |
8058 | ||
8059 | if (dev->protocol_info.ver <= SSD_PROTOCOL_V3) { | |
8060 | return 0; | |
8061 | } | |
8062 | ||
8063 | if (!dev->save_md) { | |
8064 | return 0; | |
8065 | } | |
8066 | ||
8067 | msg = (struct ssd_nand_op_msg *)ssd_get_dmsg(dev); | |
8068 | ||
8069 | msg->fun = SSD_FUNC_FLUSH; | |
8070 | msg->flag = 0x2; | |
8071 | msg->ctrl_idx = 0; | |
8072 | msg->chip_no = 0; | |
8073 | ||
8074 | ret = ssd_do_request(dev, WRITE, msg, NULL); | |
8075 | ssd_put_dmsg(msg); | |
8076 | ||
8077 | return ret; | |
8078 | } | |
8079 | ||
8080 | static int ssd_barrier_save_md(struct ssd_device *dev) | |
8081 | { | |
8082 | struct ssd_nand_op_msg *msg; | |
8083 | int ret = 0; | |
8084 | ||
8085 | if (unlikely(mode != SSD_DRV_MODE_STANDARD)) | |
8086 | return 0; | |
8087 | ||
8088 | if (dev->protocol_info.ver <= SSD_PROTOCOL_V3) { | |
8089 | return 0; | |
8090 | } | |
8091 | ||
8092 | if (!dev->save_md) { | |
8093 | return 0; | |
8094 | } | |
8095 | ||
8096 | msg = (struct ssd_nand_op_msg *)ssd_get_dmsg(dev); | |
8097 | ||
8098 | msg->fun = SSD_FUNC_FLUSH; | |
8099 | msg->flag = 0x2; | |
8100 | msg->ctrl_idx = 0; | |
8101 | msg->chip_no = 0; | |
8102 | ||
8103 | ret = ssd_do_barrier_request(dev, WRITE, msg, NULL); | |
8104 | ssd_put_dmsg(msg); | |
8105 | ||
8106 | return ret; | |
8107 | } | |
8108 | ||
8109 | static int ssd_flush(struct ssd_device *dev) | |
8110 | { | |
8111 | struct ssd_nand_op_msg *msg; | |
8112 | struct ssd_flush_msg *fmsg; | |
8113 | int ret = 0; | |
8114 | ||
8115 | if (unlikely(mode != SSD_DRV_MODE_STANDARD)) | |
8116 | return 0; | |
8117 | ||
8118 | msg = (struct ssd_nand_op_msg *)ssd_get_dmsg(dev); | |
8119 | ||
8120 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3) { | |
8121 | fmsg = (struct ssd_flush_msg *)msg; | |
8122 | ||
8123 | fmsg->fun = SSD_FUNC_FLUSH; | |
8124 | fmsg->flag = 0; | |
8125 | fmsg->ctrl_idx = 0; | |
8126 | fmsg->flash = 0; | |
8127 | } else { | |
8128 | msg->fun = SSD_FUNC_FLUSH; | |
8129 | msg->flag = 0; | |
8130 | msg->ctrl_idx = 0; | |
8131 | msg->chip_no = 0; | |
8132 | } | |
8133 | ||
8134 | ret = ssd_do_request(dev, WRITE, msg, NULL); | |
8135 | ssd_put_dmsg(msg); | |
8136 | ||
8137 | return ret; | |
8138 | } | |
8139 | ||
8140 | static int ssd_barrier_flush(struct ssd_device *dev) | |
8141 | { | |
8142 | struct ssd_nand_op_msg *msg; | |
8143 | struct ssd_flush_msg *fmsg; | |
8144 | int ret = 0; | |
8145 | ||
8146 | if (unlikely(mode != SSD_DRV_MODE_STANDARD)) | |
8147 | return 0; | |
8148 | ||
8149 | msg = (struct ssd_nand_op_msg *)ssd_get_dmsg(dev); | |
8150 | ||
8151 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3) { | |
8152 | fmsg = (struct ssd_flush_msg *)msg; | |
8153 | ||
8154 | fmsg->fun = SSD_FUNC_FLUSH; | |
8155 | fmsg->flag = 0; | |
8156 | fmsg->ctrl_idx = 0; | |
8157 | fmsg->flash = 0; | |
8158 | } else { | |
8159 | msg->fun = SSD_FUNC_FLUSH; | |
8160 | msg->flag = 0; | |
8161 | msg->ctrl_idx = 0; | |
8162 | msg->chip_no = 0; | |
8163 | } | |
8164 | ||
8165 | ret = ssd_do_barrier_request(dev, WRITE, msg, NULL); | |
8166 | ssd_put_dmsg(msg); | |
8167 | ||
8168 | return ret; | |
8169 | } | |
8170 | ||
8171 | #define SSD_WMODE_BUFFER_TIMEOUT 0x00c82710 | |
8172 | #define SSD_WMODE_BUFFER_EX_TIMEOUT 0x000500c8 | |
8173 | #define SSD_WMODE_FUA_TIMEOUT 0x000503E8 | |
8174 | static void ssd_set_flush_timeout(struct ssd_device *dev, int m) | |
8175 | { | |
8176 | uint32_t to; | |
8177 | uint32_t val = 0; | |
8178 | ||
8179 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_1_1) { | |
8180 | return; | |
8181 | } | |
8182 | ||
8183 | switch(m) { | |
8184 | case SSD_WMODE_BUFFER: | |
8185 | to = SSD_WMODE_BUFFER_TIMEOUT; | |
8186 | break; | |
8187 | case SSD_WMODE_BUFFER_EX: | |
8188 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_2_1) { | |
8189 | to = SSD_WMODE_BUFFER_EX_TIMEOUT; | |
8190 | } else { | |
8191 | to = SSD_WMODE_BUFFER_TIMEOUT; | |
8192 | } | |
8193 | break; | |
8194 | case SSD_WMODE_FUA: | |
8195 | to = SSD_WMODE_FUA_TIMEOUT; | |
8196 | break; | |
8197 | default: | |
8198 | return; | |
8199 | } | |
8200 | ||
8201 | val = (((uint32_t)((uint32_t)m & 0x3) << 28) | to); | |
8202 | ||
8203 | ssd_reg32_write(dev->ctrlp + SSD_FLUSH_TIMEOUT_REG, val); | |
8204 | } | |
8205 | ||
8206 | static int ssd_do_switch_wmode(struct ssd_device *dev, int m) | |
8207 | { | |
8208 | int ret = 0; | |
8209 | ||
8210 | ret = ssd_barrier_start(dev); | |
8211 | if (ret) { | |
8212 | goto out; | |
8213 | } | |
8214 | ||
8215 | ret = ssd_barrier_flush(dev); | |
8216 | if (ret) { | |
8217 | goto out_barrier_end; | |
8218 | } | |
8219 | ||
8220 | /* set contoller flush timeout */ | |
8221 | ssd_set_flush_timeout(dev, m); | |
8222 | ||
8223 | dev->wmode = m; | |
8224 | mb(); | |
8225 | ||
8226 | out_barrier_end: | |
8227 | ssd_barrier_end(dev); | |
8228 | out: | |
8229 | return ret; | |
8230 | } | |
8231 | ||
8232 | static int ssd_switch_wmode(struct ssd_device *dev, int m) | |
8233 | { | |
8234 | int default_wmode; | |
8235 | int next_wmode; | |
8236 | int ret = 0; | |
8237 | ||
8238 | if (!test_bit(SSD_ONLINE, &dev->state)) { | |
8239 | return -ENODEV; | |
8240 | } | |
8241 | ||
8242 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_2) { | |
8243 | default_wmode = SSD_WMODE_BUFFER; | |
8244 | } else { | |
8245 | default_wmode = SSD_WMODE_BUFFER_EX; | |
8246 | } | |
8247 | ||
8248 | if (SSD_WMODE_AUTO == m) { | |
8249 | /* battery fault ? */ | |
8250 | if (test_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP), &dev->hwmon)) { | |
8251 | next_wmode = SSD_WMODE_FUA; | |
8252 | } else { | |
8253 | next_wmode = default_wmode; | |
8254 | } | |
8255 | } else if (SSD_WMODE_DEFAULT == m) { | |
8256 | next_wmode = default_wmode; | |
8257 | } else { | |
8258 | next_wmode = m; | |
8259 | } | |
8260 | ||
8261 | if (next_wmode != dev->wmode) { | |
8262 | hio_warn("%s: switch write mode (%d -> %d)\n", dev->name, dev->wmode, next_wmode); | |
8263 | ret = ssd_do_switch_wmode(dev, next_wmode); | |
8264 | if (ret) { | |
8265 | hio_err("%s: can not switch write mode (%d -> %d)\n", dev->name, dev->wmode, next_wmode); | |
8266 | } | |
8267 | } | |
8268 | ||
8269 | return ret; | |
8270 | } | |
8271 | ||
8272 | static int ssd_init_wmode(struct ssd_device *dev) | |
8273 | { | |
8274 | int default_wmode; | |
8275 | int ret = 0; | |
8276 | ||
8277 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_2) { | |
8278 | default_wmode = SSD_WMODE_BUFFER; | |
8279 | } else { | |
8280 | default_wmode = SSD_WMODE_BUFFER_EX; | |
8281 | } | |
8282 | ||
8283 | /* dummy mode */ | |
8284 | if (SSD_WMODE_AUTO == dev->user_wmode) { | |
8285 | /* battery fault ? */ | |
8286 | if (test_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP), &dev->hwmon)) { | |
8287 | dev->wmode = SSD_WMODE_FUA; | |
8288 | } else { | |
8289 | dev->wmode = default_wmode; | |
8290 | } | |
8291 | } else if (SSD_WMODE_DEFAULT == dev->user_wmode) { | |
8292 | dev->wmode = default_wmode; | |
8293 | } else { | |
8294 | dev->wmode = dev->user_wmode; | |
8295 | } | |
8296 | ssd_set_flush_timeout(dev, dev->wmode); | |
8297 | ||
8298 | return ret; | |
8299 | } | |
8300 | ||
8301 | static int __ssd_set_wmode(struct ssd_device *dev, int m) | |
8302 | { | |
8303 | int ret = 0; | |
8304 | ||
8305 | /* not support old fw*/ | |
8306 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_1_1) { | |
8307 | ret = -EOPNOTSUPP; | |
8308 | goto out; | |
8309 | } | |
8310 | ||
8311 | if (m < SSD_WMODE_BUFFER || m > SSD_WMODE_DEFAULT) { | |
8312 | ret = -EINVAL; | |
8313 | goto out; | |
8314 | } | |
8315 | ||
8316 | ssd_gen_swlog(dev, SSD_LOG_SET_WMODE, m); | |
8317 | ||
8318 | dev->user_wmode = m; | |
8319 | ||
8320 | ret = ssd_switch_wmode(dev, dev->user_wmode); | |
8321 | if (ret) { | |
8322 | goto out; | |
8323 | } | |
8324 | ||
8325 | out: | |
8326 | return ret; | |
8327 | } | |
8328 | ||
8329 | int ssd_set_wmode(struct block_device *bdev, int m) | |
8330 | { | |
8331 | struct ssd_device *dev; | |
8332 | ||
8333 | if (!bdev || !(bdev->bd_disk)) { | |
8334 | return -EINVAL; | |
8335 | } | |
8336 | ||
8337 | dev = bdev->bd_disk->private_data; | |
8338 | ||
8339 | return __ssd_set_wmode(dev, m); | |
8340 | } | |
8341 | ||
8342 | static int ssd_do_reset(struct ssd_device *dev) | |
8343 | { | |
8344 | int ret = 0; | |
8345 | ||
8346 | if (test_and_set_bit(SSD_RESETING, &dev->state)) { | |
8347 | return 0; | |
8348 | } | |
8349 | ||
8350 | ssd_stop_workq(dev); | |
8351 | ||
8352 | ret = ssd_barrier_start(dev); | |
8353 | if (ret) { | |
8354 | goto out; | |
8355 | } | |
8356 | ||
8357 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_2) { | |
8358 | /* old reset */ | |
8359 | ret = __ssd_reset(dev, SSD_RST_NORMAL); | |
8360 | } else { | |
8361 | /* full reset */ | |
8362 | //ret = __ssd_reset(dev, SSD_RST_FULL); | |
8363 | ret = __ssd_reset(dev, SSD_RST_NORMAL); | |
8364 | } | |
8365 | if (ret) { | |
8366 | goto out_barrier_end; | |
8367 | } | |
8368 | ||
8369 | out_barrier_end: | |
8370 | ssd_barrier_end(dev); | |
8371 | out: | |
8372 | ssd_start_workq(dev); | |
8373 | test_and_clear_bit(SSD_RESETING, &dev->state); | |
8374 | return ret; | |
8375 | } | |
8376 | ||
8377 | static int ssd_full_reset(struct ssd_device *dev) | |
8378 | { | |
8379 | int ret = 0; | |
8380 | ||
8381 | if (test_and_set_bit(SSD_RESETING, &dev->state)) { | |
8382 | return 0; | |
8383 | } | |
8384 | ||
8385 | ssd_stop_workq(dev); | |
8386 | ||
8387 | ret = ssd_barrier_start(dev); | |
8388 | if (ret) { | |
8389 | goto out; | |
8390 | } | |
8391 | ||
8392 | ret = ssd_barrier_flush(dev); | |
8393 | if (ret) { | |
8394 | goto out_barrier_end; | |
8395 | } | |
8396 | ||
8397 | ret = ssd_barrier_save_md(dev); | |
8398 | if (ret) { | |
8399 | goto out_barrier_end; | |
8400 | } | |
8401 | ||
8402 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_2) { | |
8403 | /* old reset */ | |
8404 | ret = __ssd_reset(dev, SSD_RST_NORMAL); | |
8405 | } else { | |
8406 | /* full reset */ | |
8407 | //ret = __ssd_reset(dev, SSD_RST_FULL); | |
8408 | ret = __ssd_reset(dev, SSD_RST_NORMAL); | |
8409 | } | |
8410 | if (ret) { | |
8411 | goto out_barrier_end; | |
8412 | } | |
8413 | ||
8414 | out_barrier_end: | |
8415 | ssd_barrier_end(dev); | |
8416 | out: | |
8417 | ssd_start_workq(dev); | |
8418 | test_and_clear_bit(SSD_RESETING, &dev->state); | |
8419 | return ret; | |
8420 | } | |
8421 | ||
8422 | int ssd_reset(struct block_device *bdev) | |
8423 | { | |
8424 | int ret; | |
8425 | struct ssd_device *dev; | |
8426 | ||
8427 | if (!bdev || !(bdev->bd_disk)) { | |
8428 | return -EINVAL; | |
8429 | } | |
8430 | ||
8431 | dev = bdev->bd_disk->private_data; | |
8432 | ||
8433 | ret = ssd_full_reset(dev); | |
8434 | if (!ret) { | |
8435 | if (!dev->has_non_0x98_reg_access) { | |
8436 | ssd_reg32_write(dev->ctrlp + SSD_RELOAD_FW_REG, 0); | |
8437 | } | |
8438 | } | |
8439 | ||
8440 | return ret ; | |
8441 | } | |
8442 | ||
8443 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)) | |
8444 | static int ssd_issue_flush_fn(struct request_queue *q, struct gendisk *disk, | |
8445 | sector_t *error_sector) | |
8446 | { | |
8447 | struct ssd_device *dev = q->queuedata; | |
8448 | ||
8449 | return ssd_flush(dev); | |
8450 | } | |
8451 | #endif | |
8452 | ||
8453 | void ssd_submit_pbio(struct request_queue *q, struct bio *bio) | |
8454 | { | |
8455 | struct ssd_device *dev = q->queuedata; | |
8456 | #ifdef SSD_QUEUE_PBIO | |
8457 | int ret = -EBUSY; | |
8458 | #endif | |
8459 | ||
8460 | if (!test_bit(SSD_ONLINE, &dev->state)) { | |
8461 | ssd_bio_endio(bio, -ENODEV); | |
8462 | goto out; | |
8463 | } | |
8464 | ||
8465 | #ifdef SSD_DEBUG_ERR | |
8466 | if (atomic_read(&dev->tocnt)) { | |
8467 | hio_warn("%s: IO rejected because of IO timeout!\n", dev->name); | |
8468 | ssd_bio_endio(bio, -EIO); | |
8469 | goto out; | |
8470 | } | |
8471 | #endif | |
8472 | ||
8473 | if (unlikely(ssd_bio_has_barrier_or_fua(bio))) { | |
8474 | ssd_bio_endio(bio, -EOPNOTSUPP); | |
8475 | goto out; | |
8476 | } | |
8477 | ||
8478 | if (unlikely(dev->readonly && bio_data_dir(bio) == WRITE)) { | |
8479 | ssd_bio_endio(bio, -EROFS); | |
8480 | goto out; | |
8481 | } | |
8482 | ||
8483 | #ifdef SSD_QUEUE_PBIO | |
8484 | if (0 == atomic_read(&dev->in_sendq)) { | |
8485 | ret = __ssd_submit_pbio(dev, bio, 0); | |
8486 | } | |
8487 | ||
8488 | if (ret) { | |
8489 | (void)test_and_set_bit(BIO_SSD_PBIO, &bio->bi_flags); | |
8490 | ssd_queue_bio(dev, bio); | |
8491 | } | |
8492 | #else | |
8493 | __ssd_submit_pbio(dev, bio, 1); | |
8494 | #endif | |
8495 | ||
8496 | out: | |
8497 | return; | |
8498 | } | |
8499 | ||
8500 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,0)) | |
8501 | static blk_qc_t ssd_make_request(struct request_queue *q, struct bio *bio) | |
8502 | #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0)) | |
8503 | static void ssd_make_request(struct request_queue *q, struct bio *bio) | |
8504 | #else | |
8505 | static int ssd_make_request(struct request_queue *q, struct bio *bio) | |
8506 | #endif | |
8507 | { | |
8508 | struct ssd_device *dev = q->queuedata; | |
8509 | int ret = -EBUSY; | |
8510 | ||
8511 | if (!test_bit(SSD_ONLINE, &dev->state)) { | |
8512 | ssd_bio_endio(bio, -ENODEV); | |
8513 | goto out; | |
8514 | } | |
8515 | ||
8516 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,13,0)) | |
8517 | blk_queue_split(q, &bio); | |
8518 | #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(4,3,0)) | |
8519 | blk_queue_split(q, &bio, q->bio_split); | |
8520 | #endif | |
8521 | ||
8522 | #ifdef SSD_DEBUG_ERR | |
8523 | if (atomic_read(&dev->tocnt)) { | |
8524 | hio_warn("%s: IO rejected because of IO timeout!\n", dev->name); | |
8525 | ssd_bio_endio(bio, -EIO); | |
8526 | goto out; | |
8527 | } | |
8528 | #endif | |
8529 | ||
8530 | if (unlikely(ssd_bio_has_barrier_or_fua(bio))) { | |
8531 | ssd_bio_endio(bio, -EOPNOTSUPP); | |
8532 | goto out; | |
8533 | } | |
8534 | ||
8535 | /* writeback_cache_control.txt: REQ_FLUSH requests without data can be completed successfully without doing any work */ | |
8536 | if (unlikely(ssd_bio_has_flush(bio) && !bio_sectors(bio))) { | |
8537 | ssd_bio_endio(bio, 0); | |
8538 | goto out; | |
8539 | } | |
8540 | ||
8541 | if (0 == atomic_read(&dev->in_sendq)) { | |
8542 | ret = ssd_submit_bio(dev, bio, 0); | |
8543 | } | |
8544 | ||
8545 | if (ret) { | |
8546 | ssd_queue_bio(dev, bio); | |
8547 | } | |
8548 | ||
8549 | out: | |
8550 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,0)) | |
8551 | return BLK_QC_T_NONE; | |
8552 | #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0)) | |
8553 | return; | |
8554 | #else | |
8555 | return 0; | |
8556 | #endif | |
8557 | } | |
8558 | ||
8559 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)) | |
8560 | static int ssd_block_getgeo(struct block_device *bdev, struct hd_geometry *geo) | |
8561 | { | |
8562 | struct ssd_device *dev; | |
8563 | ||
8564 | if (!bdev) { | |
8565 | return -EINVAL; | |
8566 | } | |
8567 | ||
8568 | dev = bdev->bd_disk->private_data; | |
8569 | if (!dev) { | |
8570 | return -EINVAL; | |
8571 | } | |
8572 | ||
8573 | geo->heads = 4; | |
8574 | geo->sectors = 16; | |
8575 | geo->cylinders = (dev->hw_info.size & ~0x3f) >> 6; | |
8576 | return 0; | |
8577 | } | |
8578 | #endif | |
8579 | ||
8580 | static int ssd_init_queue(struct ssd_device *dev); | |
8581 | static void ssd_cleanup_queue(struct ssd_device *dev); | |
8582 | static void ssd_cleanup_blkdev(struct ssd_device *dev); | |
8583 | static int ssd_init_blkdev(struct ssd_device *dev); | |
8584 | static int ssd_ioctl_common(struct ssd_device *dev, unsigned int cmd, unsigned long arg) | |
8585 | { | |
8586 | void __user *argp = (void __user *)arg; | |
8587 | void __user *buf = NULL; | |
8588 | void *kbuf = NULL; | |
8589 | int ret = 0; | |
8590 | ||
8591 | switch (cmd) { | |
8592 | case SSD_CMD_GET_PROTOCOL_INFO: | |
8593 | if (copy_to_user(argp, &dev->protocol_info, sizeof(struct ssd_protocol_info))) { | |
8594 | hio_warn("%s: copy_to_user: failed\n", dev->name); | |
8595 | ret = -EFAULT; | |
8596 | break; | |
8597 | } | |
8598 | break; | |
8599 | ||
8600 | case SSD_CMD_GET_HW_INFO: | |
8601 | if (copy_to_user(argp, &dev->hw_info, sizeof(struct ssd_hw_info))) { | |
8602 | hio_warn("%s: copy_to_user: failed\n", dev->name); | |
8603 | ret = -EFAULT; | |
8604 | break; | |
8605 | } | |
8606 | break; | |
8607 | ||
8608 | case SSD_CMD_GET_ROM_INFO: | |
8609 | if (copy_to_user(argp, &dev->rom_info, sizeof(struct ssd_rom_info))) { | |
8610 | hio_warn("%s: copy_to_user: failed\n", dev->name); | |
8611 | ret = -EFAULT; | |
8612 | break; | |
8613 | } | |
8614 | break; | |
8615 | ||
8616 | case SSD_CMD_GET_SMART: { | |
8617 | struct ssd_smart smart; | |
8618 | int i; | |
8619 | ||
8620 | memcpy(&smart, &dev->smart, sizeof(struct ssd_smart)); | |
8621 | ||
8622 | mutex_lock(&dev->gd_mutex); | |
8623 | ssd_update_smart(dev, &smart); | |
8624 | mutex_unlock(&dev->gd_mutex); | |
8625 | ||
8626 | /* combine the volatile log info */ | |
8627 | if (dev->log_info.nr_log) { | |
8628 | for (i=0; i<SSD_LOG_NR_LEVEL; i++) { | |
8629 | smart.log_info.stat[i] += dev->log_info.stat[i]; | |
8630 | } | |
8631 | } | |
8632 | ||
8633 | if (copy_to_user(argp, &smart, sizeof(struct ssd_smart))) { | |
8634 | hio_warn("%s: copy_to_user: failed\n", dev->name); | |
8635 | ret = -EFAULT; | |
8636 | break; | |
8637 | } | |
8638 | ||
8639 | break; | |
8640 | } | |
8641 | ||
8642 | case SSD_CMD_GET_IDX: | |
8643 | if (copy_to_user(argp, &dev->idx, sizeof(int))) { | |
8644 | hio_warn("%s: copy_to_user: failed\n", dev->name); | |
8645 | ret = -EFAULT; | |
8646 | break; | |
8647 | } | |
8648 | break; | |
8649 | ||
8650 | case SSD_CMD_GET_AMOUNT: { | |
8651 | int nr_ssd = atomic_read(&ssd_nr); | |
8652 | if (copy_to_user(argp, &nr_ssd, sizeof(int))) { | |
8653 | hio_warn("%s: copy_to_user: failed\n", dev->name); | |
8654 | ret = -EFAULT; | |
8655 | break; | |
8656 | } | |
8657 | break; | |
8658 | } | |
8659 | ||
8660 | case SSD_CMD_GET_TO_INFO: { | |
8661 | int tocnt = atomic_read(&dev->tocnt); | |
8662 | ||
8663 | if (copy_to_user(argp, &tocnt, sizeof(int))) { | |
8664 | hio_warn("%s: copy_to_user: failed\n", dev->name); | |
8665 | ret = -EFAULT; | |
8666 | break; | |
8667 | } | |
8668 | break; | |
8669 | } | |
8670 | ||
8671 | case SSD_CMD_GET_DRV_VER: { | |
8672 | char ver[] = DRIVER_VERSION; | |
8673 | int len = sizeof(ver); | |
8674 | ||
8675 | if (len > (DRIVER_VERSION_LEN - 1)) { | |
8676 | len = (DRIVER_VERSION_LEN - 1); | |
8677 | } | |
8678 | if (copy_to_user(argp, ver, len)) { | |
8679 | hio_warn("%s: copy_to_user: failed\n", dev->name); | |
8680 | ret = -EFAULT; | |
8681 | break; | |
8682 | } | |
8683 | break; | |
8684 | } | |
8685 | ||
8686 | case SSD_CMD_GET_BBACC_INFO: { | |
8687 | struct ssd_acc_info acc; | |
8688 | ||
8689 | mutex_lock(&dev->fw_mutex); | |
8690 | ret = ssd_bb_acc(dev, &acc); | |
8691 | mutex_unlock(&dev->fw_mutex); | |
8692 | if (ret) { | |
8693 | break; | |
8694 | } | |
8695 | ||
8696 | if (copy_to_user(argp, &acc, sizeof(struct ssd_acc_info))) { | |
8697 | hio_warn("%s: copy_to_user: failed\n", dev->name); | |
8698 | ret = -EFAULT; | |
8699 | break; | |
8700 | } | |
8701 | break; | |
8702 | } | |
8703 | ||
8704 | case SSD_CMD_GET_ECACC_INFO: { | |
8705 | struct ssd_acc_info acc; | |
8706 | ||
8707 | mutex_lock(&dev->fw_mutex); | |
8708 | ret = ssd_ec_acc(dev, &acc); | |
8709 | mutex_unlock(&dev->fw_mutex); | |
8710 | if (ret) { | |
8711 | break; | |
8712 | } | |
8713 | ||
8714 | if (copy_to_user(argp, &acc, sizeof(struct ssd_acc_info))) { | |
8715 | hio_warn("%s: copy_to_user: failed\n", dev->name); | |
8716 | ret = -EFAULT; | |
8717 | break; | |
8718 | } | |
8719 | break; | |
8720 | } | |
8721 | ||
8722 | case SSD_CMD_GET_HW_INFO_EXT: | |
8723 | if (copy_to_user(argp, &dev->hw_info_ext, sizeof(struct ssd_hw_info_extend))) { | |
8724 | hio_warn("%s: copy_to_user: failed\n", dev->name); | |
8725 | ret = -EFAULT; | |
8726 | break; | |
8727 | } | |
8728 | break; | |
8729 | ||
8730 | case SSD_CMD_REG_READ: { | |
8731 | struct ssd_reg_op_info reg_info; | |
8732 | ||
8733 | if (copy_from_user(®_info, argp, sizeof(struct ssd_reg_op_info))) { | |
8734 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
8735 | ret = -EFAULT; | |
8736 | break; | |
8737 | } | |
8738 | ||
8739 | if (reg_info.offset > dev->mmio_len-sizeof(uint32_t)) { | |
8740 | ret = -EINVAL; | |
8741 | break; | |
8742 | } | |
8743 | ||
8744 | reg_info.value = ssd_reg32_read(dev->ctrlp + reg_info.offset); | |
8745 | if (copy_to_user(argp, ®_info, sizeof(struct ssd_reg_op_info))) { | |
8746 | hio_warn("%s: copy_to_user: failed\n", dev->name); | |
8747 | ret = -EFAULT; | |
8748 | break; | |
8749 | } | |
8750 | ||
8751 | break; | |
8752 | } | |
8753 | ||
8754 | case SSD_CMD_REG_WRITE: { | |
8755 | struct ssd_reg_op_info reg_info; | |
8756 | ||
8757 | if (copy_from_user(®_info, argp, sizeof(struct ssd_reg_op_info))) { | |
8758 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
8759 | ret = -EFAULT; | |
8760 | break; | |
8761 | } | |
8762 | ||
8763 | if (reg_info.offset > dev->mmio_len-sizeof(uint32_t)) { | |
8764 | ret = -EINVAL; | |
8765 | break; | |
8766 | } | |
8767 | ||
8768 | ssd_reg32_write(dev->ctrlp + reg_info.offset, reg_info.value); | |
8769 | ||
8770 | break; | |
8771 | } | |
8772 | ||
8773 | case SSD_CMD_SPI_READ: { | |
8774 | struct ssd_spi_op_info spi_info; | |
8775 | uint32_t off, size; | |
8776 | ||
8777 | if (copy_from_user(&spi_info, argp, sizeof(struct ssd_spi_op_info))) { | |
8778 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
8779 | ret = -EFAULT; | |
8780 | break; | |
8781 | } | |
8782 | ||
8783 | off = spi_info.off; | |
8784 | size = spi_info.len; | |
8785 | buf = spi_info.buf; | |
8786 | ||
8787 | if (size > dev->rom_info.size || 0 == size || (off + size) > dev->rom_info.size) { | |
8788 | ret = -EINVAL; | |
8789 | break; | |
8790 | } | |
8791 | ||
8792 | kbuf = kmalloc(size, GFP_KERNEL); | |
8793 | if (!kbuf) { | |
8794 | ret = -ENOMEM; | |
8795 | break; | |
8796 | } | |
8797 | ||
8798 | ret = ssd_spi_page_read(dev, kbuf, off, size); | |
8799 | if (ret) { | |
8800 | kfree(kbuf); | |
8801 | break; | |
8802 | } | |
8803 | ||
8804 | if (copy_to_user(buf, kbuf, size)) { | |
8805 | hio_warn("%s: copy_to_user: failed\n", dev->name); | |
8806 | kfree(kbuf); | |
8807 | ret = -EFAULT; | |
8808 | break; | |
8809 | } | |
8810 | ||
8811 | kfree(kbuf); | |
8812 | ||
8813 | break; | |
8814 | } | |
8815 | ||
8816 | case SSD_CMD_SPI_WRITE: { | |
8817 | struct ssd_spi_op_info spi_info; | |
8818 | uint32_t off, size; | |
8819 | ||
8820 | if (copy_from_user(&spi_info, argp, sizeof(struct ssd_spi_op_info))) { | |
8821 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
8822 | ret = -EFAULT; | |
8823 | break; | |
8824 | } | |
8825 | ||
8826 | off = spi_info.off; | |
8827 | size = spi_info.len; | |
8828 | buf = spi_info.buf; | |
8829 | ||
8830 | if (size > dev->rom_info.size || 0 == size || (off + size) > dev->rom_info.size) { | |
8831 | ret = -EINVAL; | |
8832 | break; | |
8833 | } | |
8834 | ||
8835 | kbuf = kmalloc(size, GFP_KERNEL); | |
8836 | if (!kbuf) { | |
8837 | ret = -ENOMEM; | |
8838 | break; | |
8839 | } | |
8840 | ||
8841 | if (copy_from_user(kbuf, buf, size)) { | |
8842 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
8843 | kfree(kbuf); | |
8844 | ret = -EFAULT; | |
8845 | break; | |
8846 | } | |
8847 | ||
8848 | ret = ssd_spi_page_write(dev, kbuf, off, size); | |
8849 | if (ret) { | |
8850 | kfree(kbuf); | |
8851 | break; | |
8852 | } | |
8853 | ||
8854 | kfree(kbuf); | |
8855 | ||
8856 | break; | |
8857 | } | |
8858 | ||
8859 | case SSD_CMD_SPI_ERASE: { | |
8860 | struct ssd_spi_op_info spi_info; | |
8861 | uint32_t off; | |
8862 | ||
8863 | if (copy_from_user(&spi_info, argp, sizeof(struct ssd_spi_op_info))) { | |
8864 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
8865 | ret = -EFAULT; | |
8866 | break; | |
8867 | } | |
8868 | ||
8869 | off = spi_info.off; | |
8870 | ||
8871 | if ((off + dev->rom_info.block_size) > dev->rom_info.size) { | |
8872 | ret = -EINVAL; | |
8873 | break; | |
8874 | } | |
8875 | ||
8876 | ret = ssd_spi_block_erase(dev, off); | |
8877 | if (ret) { | |
8878 | break; | |
8879 | } | |
8880 | ||
8881 | break; | |
8882 | } | |
8883 | ||
8884 | case SSD_CMD_I2C_READ: { | |
8885 | struct ssd_i2c_op_info i2c_info; | |
8886 | uint8_t saddr; | |
8887 | uint8_t rsize; | |
8888 | ||
8889 | if (copy_from_user(&i2c_info, argp, sizeof(struct ssd_i2c_op_info))) { | |
8890 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
8891 | ret = -EFAULT; | |
8892 | break; | |
8893 | } | |
8894 | ||
8895 | saddr = i2c_info.saddr; | |
8896 | rsize = i2c_info.rsize; | |
8897 | buf = i2c_info.rbuf; | |
8898 | ||
8899 | if (rsize <= 0 || rsize > SSD_I2C_MAX_DATA) { | |
8900 | ret = -EINVAL; | |
8901 | break; | |
8902 | } | |
8903 | ||
8904 | kbuf = kmalloc(rsize, GFP_KERNEL); | |
8905 | if (!kbuf) { | |
8906 | ret = -ENOMEM; | |
8907 | break; | |
8908 | } | |
8909 | ||
8910 | ret = ssd_i2c_read(dev, saddr, rsize, kbuf); | |
8911 | if (ret) { | |
8912 | kfree(kbuf); | |
8913 | break; | |
8914 | } | |
8915 | ||
8916 | if (copy_to_user(buf, kbuf, rsize)) { | |
8917 | hio_warn("%s: copy_to_user: failed\n", dev->name); | |
8918 | kfree(kbuf); | |
8919 | ret = -EFAULT; | |
8920 | break; | |
8921 | } | |
8922 | ||
8923 | kfree(kbuf); | |
8924 | ||
8925 | break; | |
8926 | } | |
8927 | ||
8928 | case SSD_CMD_I2C_WRITE: { | |
8929 | struct ssd_i2c_op_info i2c_info; | |
8930 | uint8_t saddr; | |
8931 | uint8_t wsize; | |
8932 | ||
8933 | if (copy_from_user(&i2c_info, argp, sizeof(struct ssd_i2c_op_info))) { | |
8934 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
8935 | ret = -EFAULT; | |
8936 | break; | |
8937 | } | |
8938 | ||
8939 | saddr = i2c_info.saddr; | |
8940 | wsize = i2c_info.wsize; | |
8941 | buf = i2c_info.wbuf; | |
8942 | ||
8943 | if (wsize <= 0 || wsize > SSD_I2C_MAX_DATA) { | |
8944 | ret = -EINVAL; | |
8945 | break; | |
8946 | } | |
8947 | ||
8948 | kbuf = kmalloc(wsize, GFP_KERNEL); | |
8949 | if (!kbuf) { | |
8950 | ret = -ENOMEM; | |
8951 | break; | |
8952 | } | |
8953 | ||
8954 | if (copy_from_user(kbuf, buf, wsize)) { | |
8955 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
8956 | kfree(kbuf); | |
8957 | ret = -EFAULT; | |
8958 | break; | |
8959 | } | |
8960 | ||
8961 | ret = ssd_i2c_write(dev, saddr, wsize, kbuf); | |
8962 | if (ret) { | |
8963 | kfree(kbuf); | |
8964 | break; | |
8965 | } | |
8966 | ||
8967 | kfree(kbuf); | |
8968 | ||
8969 | break; | |
8970 | } | |
8971 | ||
8972 | case SSD_CMD_I2C_WRITE_READ: { | |
8973 | struct ssd_i2c_op_info i2c_info; | |
8974 | uint8_t saddr; | |
8975 | uint8_t wsize; | |
8976 | uint8_t rsize; | |
8977 | uint8_t size; | |
8978 | ||
8979 | if (copy_from_user(&i2c_info, argp, sizeof(struct ssd_i2c_op_info))) { | |
8980 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
8981 | ret = -EFAULT; | |
8982 | break; | |
8983 | } | |
8984 | ||
8985 | saddr = i2c_info.saddr; | |
8986 | wsize = i2c_info.wsize; | |
8987 | rsize = i2c_info.rsize; | |
8988 | buf = i2c_info.wbuf; | |
8989 | ||
8990 | if (wsize <= 0 || wsize > SSD_I2C_MAX_DATA) { | |
8991 | ret = -EINVAL; | |
8992 | break; | |
8993 | } | |
8994 | ||
8995 | if (rsize <= 0 || rsize > SSD_I2C_MAX_DATA) { | |
8996 | ret = -EINVAL; | |
8997 | break; | |
8998 | } | |
8999 | ||
9000 | size = wsize + rsize; | |
9001 | ||
9002 | kbuf = kmalloc(size, GFP_KERNEL); | |
9003 | if (!kbuf) { | |
9004 | ret = -ENOMEM; | |
9005 | break; | |
9006 | } | |
9007 | ||
9008 | if (copy_from_user((kbuf + rsize), buf, wsize)) { | |
9009 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
9010 | kfree(kbuf); | |
9011 | ret = -EFAULT; | |
9012 | break; | |
9013 | } | |
9014 | ||
9015 | buf = i2c_info.rbuf; | |
9016 | ||
9017 | ret = ssd_i2c_write_read(dev, saddr, wsize, (kbuf + rsize), rsize, kbuf); | |
9018 | if (ret) { | |
9019 | kfree(kbuf); | |
9020 | break; | |
9021 | } | |
9022 | ||
9023 | if (copy_to_user(buf, kbuf, rsize)) { | |
9024 | hio_warn("%s: copy_to_user: failed\n", dev->name); | |
9025 | kfree(kbuf); | |
9026 | ret = -EFAULT; | |
9027 | break; | |
9028 | } | |
9029 | ||
9030 | kfree(kbuf); | |
9031 | ||
9032 | break; | |
9033 | } | |
9034 | ||
9035 | case SSD_CMD_SMBUS_SEND_BYTE: { | |
9036 | struct ssd_smbus_op_info smbus_info; | |
9037 | uint8_t smb_data[SSD_SMBUS_BLOCK_MAX]; | |
9038 | uint8_t saddr; | |
9039 | uint8_t size; | |
9040 | ||
9041 | if (copy_from_user(&smbus_info, argp, sizeof(struct ssd_smbus_op_info))) { | |
9042 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
9043 | ret = -EFAULT; | |
9044 | break; | |
9045 | } | |
9046 | ||
9047 | saddr = smbus_info.saddr; | |
9048 | buf = smbus_info.buf; | |
9049 | size = 1; | |
9050 | ||
9051 | if (copy_from_user(smb_data, buf, size)) { | |
9052 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
9053 | ret = -EFAULT; | |
9054 | break; | |
9055 | } | |
9056 | ||
9057 | ret = ssd_smbus_send_byte(dev, saddr, smb_data); | |
9058 | if (ret) { | |
9059 | break; | |
9060 | } | |
9061 | ||
9062 | break; | |
9063 | } | |
9064 | ||
9065 | case SSD_CMD_SMBUS_RECEIVE_BYTE: { | |
9066 | struct ssd_smbus_op_info smbus_info; | |
9067 | uint8_t smb_data[SSD_SMBUS_BLOCK_MAX]; | |
9068 | uint8_t saddr; | |
9069 | uint8_t size; | |
9070 | ||
9071 | if (copy_from_user(&smbus_info, argp, sizeof(struct ssd_smbus_op_info))) { | |
9072 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
9073 | ret = -EFAULT; | |
9074 | break; | |
9075 | } | |
9076 | ||
9077 | saddr = smbus_info.saddr; | |
9078 | buf = smbus_info.buf; | |
9079 | size = 1; | |
9080 | ||
9081 | ret = ssd_smbus_receive_byte(dev, saddr, smb_data); | |
9082 | if (ret) { | |
9083 | break; | |
9084 | } | |
9085 | ||
9086 | if (copy_to_user(buf, smb_data, size)) { | |
9087 | hio_warn("%s: copy_to_user: failed\n", dev->name); | |
9088 | ret = -EFAULT; | |
9089 | break; | |
9090 | } | |
9091 | ||
9092 | break; | |
9093 | } | |
9094 | ||
9095 | case SSD_CMD_SMBUS_WRITE_BYTE: { | |
9096 | struct ssd_smbus_op_info smbus_info; | |
9097 | uint8_t smb_data[SSD_SMBUS_BLOCK_MAX]; | |
9098 | uint8_t saddr; | |
9099 | uint8_t command; | |
9100 | uint8_t size; | |
9101 | ||
9102 | if (copy_from_user(&smbus_info, argp, sizeof(struct ssd_smbus_op_info))) { | |
9103 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
9104 | ret = -EFAULT; | |
9105 | break; | |
9106 | } | |
9107 | ||
9108 | saddr = smbus_info.saddr; | |
9109 | command = smbus_info.cmd; | |
9110 | buf = smbus_info.buf; | |
9111 | size = 1; | |
9112 | ||
9113 | if (copy_from_user(smb_data, buf, size)) { | |
9114 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
9115 | ret = -EFAULT; | |
9116 | break; | |
9117 | } | |
9118 | ||
9119 | ret = ssd_smbus_write_byte(dev, saddr, command, smb_data); | |
9120 | if (ret) { | |
9121 | break; | |
9122 | } | |
9123 | ||
9124 | break; | |
9125 | } | |
9126 | ||
9127 | case SSD_CMD_SMBUS_READ_BYTE: { | |
9128 | struct ssd_smbus_op_info smbus_info; | |
9129 | uint8_t smb_data[SSD_SMBUS_BLOCK_MAX]; | |
9130 | uint8_t saddr; | |
9131 | uint8_t command; | |
9132 | uint8_t size; | |
9133 | ||
9134 | if (copy_from_user(&smbus_info, argp, sizeof(struct ssd_smbus_op_info))) { | |
9135 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
9136 | ret = -EFAULT; | |
9137 | break; | |
9138 | } | |
9139 | ||
9140 | saddr = smbus_info.saddr; | |
9141 | command = smbus_info.cmd; | |
9142 | buf = smbus_info.buf; | |
9143 | size = 1; | |
9144 | ||
9145 | ret = ssd_smbus_read_byte(dev, saddr, command, smb_data); | |
9146 | if (ret) { | |
9147 | break; | |
9148 | } | |
9149 | ||
9150 | if (copy_to_user(buf, smb_data, size)) { | |
9151 | hio_warn("%s: copy_to_user: failed\n", dev->name); | |
9152 | ret = -EFAULT; | |
9153 | break; | |
9154 | } | |
9155 | ||
9156 | break; | |
9157 | } | |
9158 | ||
9159 | case SSD_CMD_SMBUS_WRITE_WORD: { | |
9160 | struct ssd_smbus_op_info smbus_info; | |
9161 | uint8_t smb_data[SSD_SMBUS_BLOCK_MAX]; | |
9162 | uint8_t saddr; | |
9163 | uint8_t command; | |
9164 | uint8_t size; | |
9165 | ||
9166 | if (copy_from_user(&smbus_info, argp, sizeof(struct ssd_smbus_op_info))) { | |
9167 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
9168 | ret = -EFAULT; | |
9169 | break; | |
9170 | } | |
9171 | ||
9172 | saddr = smbus_info.saddr; | |
9173 | command = smbus_info.cmd; | |
9174 | buf = smbus_info.buf; | |
9175 | size = 2; | |
9176 | ||
9177 | if (copy_from_user(smb_data, buf, size)) { | |
9178 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
9179 | ret = -EFAULT; | |
9180 | break; | |
9181 | } | |
9182 | ||
9183 | ret = ssd_smbus_write_word(dev, saddr, command, smb_data); | |
9184 | if (ret) { | |
9185 | break; | |
9186 | } | |
9187 | ||
9188 | break; | |
9189 | } | |
9190 | ||
9191 | case SSD_CMD_SMBUS_READ_WORD: { | |
9192 | struct ssd_smbus_op_info smbus_info; | |
9193 | uint8_t smb_data[SSD_SMBUS_BLOCK_MAX]; | |
9194 | uint8_t saddr; | |
9195 | uint8_t command; | |
9196 | uint8_t size; | |
9197 | ||
9198 | if (copy_from_user(&smbus_info, argp, sizeof(struct ssd_smbus_op_info))) { | |
9199 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
9200 | ret = -EFAULT; | |
9201 | break; | |
9202 | } | |
9203 | ||
9204 | saddr = smbus_info.saddr; | |
9205 | command = smbus_info.cmd; | |
9206 | buf = smbus_info.buf; | |
9207 | size = 2; | |
9208 | ||
9209 | ret = ssd_smbus_read_word(dev, saddr, command, smb_data); | |
9210 | if (ret) { | |
9211 | break; | |
9212 | } | |
9213 | ||
9214 | if (copy_to_user(buf, smb_data, size)) { | |
9215 | hio_warn("%s: copy_to_user: failed\n", dev->name); | |
9216 | ret = -EFAULT; | |
9217 | break; | |
9218 | } | |
9219 | ||
9220 | break; | |
9221 | } | |
9222 | ||
9223 | case SSD_CMD_SMBUS_WRITE_BLOCK: { | |
9224 | struct ssd_smbus_op_info smbus_info; | |
9225 | uint8_t smb_data[SSD_SMBUS_BLOCK_MAX]; | |
9226 | uint8_t saddr; | |
9227 | uint8_t command; | |
9228 | uint8_t size; | |
9229 | ||
9230 | if (copy_from_user(&smbus_info, argp, sizeof(struct ssd_smbus_op_info))) { | |
9231 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
9232 | ret = -EFAULT; | |
9233 | break; | |
9234 | } | |
9235 | ||
9236 | saddr = smbus_info.saddr; | |
9237 | command = smbus_info.cmd; | |
9238 | buf = smbus_info.buf; | |
9239 | size = smbus_info.size; | |
9240 | ||
9241 | if (size > SSD_SMBUS_BLOCK_MAX) { | |
9242 | ret = -EINVAL; | |
9243 | break; | |
9244 | } | |
9245 | ||
9246 | if (copy_from_user(smb_data, buf, size)) { | |
9247 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
9248 | ret = -EFAULT; | |
9249 | break; | |
9250 | } | |
9251 | ||
9252 | ret = ssd_smbus_write_block(dev, saddr, command, size, smb_data); | |
9253 | if (ret) { | |
9254 | break; | |
9255 | } | |
9256 | ||
9257 | break; | |
9258 | } | |
9259 | ||
9260 | case SSD_CMD_SMBUS_READ_BLOCK: { | |
9261 | struct ssd_smbus_op_info smbus_info; | |
9262 | uint8_t smb_data[SSD_SMBUS_BLOCK_MAX]; | |
9263 | uint8_t saddr; | |
9264 | uint8_t command; | |
9265 | uint8_t size; | |
9266 | ||
9267 | if (copy_from_user(&smbus_info, argp, sizeof(struct ssd_smbus_op_info))) { | |
9268 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
9269 | ret = -EFAULT; | |
9270 | break; | |
9271 | } | |
9272 | ||
9273 | saddr = smbus_info.saddr; | |
9274 | command = smbus_info.cmd; | |
9275 | buf = smbus_info.buf; | |
9276 | size = smbus_info.size; | |
9277 | ||
9278 | if (size > SSD_SMBUS_BLOCK_MAX) { | |
9279 | ret = -EINVAL; | |
9280 | break; | |
9281 | } | |
9282 | ||
9283 | ret = ssd_smbus_read_block(dev, saddr, command, size, smb_data); | |
9284 | if (ret) { | |
9285 | break; | |
9286 | } | |
9287 | ||
9288 | if (copy_to_user(buf, smb_data, size)) { | |
9289 | hio_warn("%s: copy_to_user: failed\n", dev->name); | |
9290 | ret = -EFAULT; | |
9291 | break; | |
9292 | } | |
9293 | ||
9294 | break; | |
9295 | } | |
9296 | ||
9297 | case SSD_CMD_BM_GET_VER: { | |
9298 | uint16_t ver; | |
9299 | ||
9300 | ret = ssd_bm_get_version(dev, &ver); | |
9301 | if (ret) { | |
9302 | break; | |
9303 | } | |
9304 | ||
9305 | if (copy_to_user(argp, &ver, sizeof(uint16_t))) { | |
9306 | hio_warn("%s: copy_to_user: failed\n", dev->name); | |
9307 | ret = -EFAULT; | |
9308 | break; | |
9309 | } | |
9310 | ||
9311 | break; | |
9312 | } | |
9313 | ||
9314 | case SSD_CMD_BM_GET_NR_CAP: { | |
9315 | int nr_cap; | |
9316 | ||
9317 | ret = ssd_bm_nr_cap(dev, &nr_cap); | |
9318 | if (ret) { | |
9319 | break; | |
9320 | } | |
9321 | ||
9322 | if (copy_to_user(argp, &nr_cap, sizeof(int))) { | |
9323 | hio_warn("%s: copy_to_user: failed\n", dev->name); | |
9324 | ret = -EFAULT; | |
9325 | break; | |
9326 | } | |
9327 | ||
9328 | break; | |
9329 | } | |
9330 | ||
9331 | case SSD_CMD_BM_CAP_LEARNING: { | |
9332 | ret = ssd_bm_enter_cap_learning(dev); | |
9333 | ||
9334 | if (ret) { | |
9335 | break; | |
9336 | } | |
9337 | ||
9338 | break; | |
9339 | } | |
9340 | ||
9341 | case SSD_CMD_CAP_LEARN: { | |
9342 | uint32_t cap = 0; | |
9343 | ||
9344 | ret = ssd_cap_learn(dev, &cap); | |
9345 | if (ret) { | |
9346 | break; | |
9347 | } | |
9348 | ||
9349 | if (copy_to_user(argp, &cap, sizeof(uint32_t))) { | |
9350 | hio_warn("%s: copy_to_user: failed\n", dev->name); | |
9351 | ret = -EFAULT; | |
9352 | break; | |
9353 | } | |
9354 | ||
9355 | break; | |
9356 | } | |
9357 | ||
9358 | case SSD_CMD_GET_CAP_STATUS: { | |
9359 | int cap_status = 0; | |
9360 | ||
9361 | if (test_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP), &dev->hwmon)) { | |
9362 | cap_status = 1; | |
9363 | } | |
9364 | ||
9365 | if (copy_to_user(argp, &cap_status, sizeof(int))) { | |
9366 | hio_warn("%s: copy_to_user: failed\n", dev->name); | |
9367 | ret = -EFAULT; | |
9368 | break; | |
9369 | } | |
9370 | ||
9371 | break; | |
9372 | } | |
9373 | ||
9374 | case SSD_CMD_RAM_READ: { | |
9375 | struct ssd_ram_op_info ram_info; | |
9376 | uint64_t ofs; | |
9377 | uint32_t length; | |
9378 | size_t rlen, len = dev->hw_info.ram_max_len; | |
9379 | int ctrl_idx; | |
9380 | ||
9381 | if (copy_from_user(&ram_info, argp, sizeof(struct ssd_ram_op_info))) { | |
9382 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
9383 | ret = -EFAULT; | |
9384 | break; | |
9385 | } | |
9386 | ||
9387 | ofs = ram_info.start; | |
9388 | length = ram_info.length; | |
9389 | buf = ram_info.buf; | |
9390 | ctrl_idx = ram_info.ctrl_idx; | |
9391 | ||
9392 | if (ofs >= dev->hw_info.ram_size || length > dev->hw_info.ram_size || 0 == length || (ofs + length) > dev->hw_info.ram_size) { | |
9393 | ret = -EINVAL; | |
9394 | break; | |
9395 | } | |
9396 | ||
9397 | kbuf = kmalloc(len, GFP_KERNEL); | |
9398 | if (!kbuf) { | |
9399 | ret = -ENOMEM; | |
9400 | break; | |
9401 | } | |
9402 | ||
9403 | for (rlen=0; rlen<length; rlen+=len, buf+=len, ofs+=len) { | |
9404 | if ((length - rlen) < len) { | |
9405 | len = length - rlen; | |
9406 | } | |
9407 | ||
9408 | ret = ssd_ram_read(dev, kbuf, len, ofs, ctrl_idx); | |
9409 | if (ret) { | |
9410 | break; | |
9411 | } | |
9412 | ||
9413 | if (copy_to_user(buf, kbuf, len)) { | |
9414 | ret = -EFAULT; | |
9415 | break; | |
9416 | } | |
9417 | } | |
9418 | ||
9419 | kfree(kbuf); | |
9420 | ||
9421 | break; | |
9422 | } | |
9423 | ||
9424 | case SSD_CMD_RAM_WRITE: { | |
9425 | struct ssd_ram_op_info ram_info; | |
9426 | uint64_t ofs; | |
9427 | uint32_t length; | |
9428 | size_t wlen, len = dev->hw_info.ram_max_len; | |
9429 | int ctrl_idx; | |
9430 | ||
9431 | if (copy_from_user(&ram_info, argp, sizeof(struct ssd_ram_op_info))) { | |
9432 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
9433 | ret = -EFAULT; | |
9434 | break; | |
9435 | } | |
9436 | ofs = ram_info.start; | |
9437 | length = ram_info.length; | |
9438 | buf = ram_info.buf; | |
9439 | ctrl_idx = ram_info.ctrl_idx; | |
9440 | ||
9441 | if (ofs >= dev->hw_info.ram_size || length > dev->hw_info.ram_size || 0 == length || (ofs + length) > dev->hw_info.ram_size) { | |
9442 | ret = -EINVAL; | |
9443 | break; | |
9444 | } | |
9445 | ||
9446 | kbuf = kmalloc(len, GFP_KERNEL); | |
9447 | if (!kbuf) { | |
9448 | ret = -ENOMEM; | |
9449 | break; | |
9450 | } | |
9451 | ||
9452 | for (wlen=0; wlen<length; wlen+=len, buf+=len, ofs+=len) { | |
9453 | if ((length - wlen) < len) { | |
9454 | len = length - wlen; | |
9455 | } | |
9456 | ||
9457 | if (copy_from_user(kbuf, buf, len)) { | |
9458 | ret = -EFAULT; | |
9459 | break; | |
9460 | } | |
9461 | ||
9462 | ret = ssd_ram_write(dev, kbuf, len, ofs, ctrl_idx); | |
9463 | if (ret) { | |
9464 | break; | |
9465 | } | |
9466 | } | |
9467 | ||
9468 | kfree(kbuf); | |
9469 | ||
9470 | break; | |
9471 | } | |
9472 | ||
9473 | case SSD_CMD_NAND_READ_ID: { | |
9474 | struct ssd_flash_op_info flash_info; | |
9475 | int chip_no, chip_ce, length, ctrl_idx; | |
9476 | ||
9477 | if (copy_from_user(&flash_info, argp, sizeof(struct ssd_flash_op_info))) { | |
9478 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
9479 | ret = -EFAULT; | |
9480 | break; | |
9481 | } | |
9482 | ||
9483 | chip_no = flash_info.flash; | |
9484 | chip_ce = flash_info.chip; | |
9485 | ctrl_idx = flash_info.ctrl_idx; | |
9486 | buf = flash_info.buf; | |
9487 | length = dev->hw_info.id_size; | |
9488 | ||
9489 | //kbuf = kmalloc(length, GFP_KERNEL); | |
9490 | kbuf = kmalloc(SSD_NAND_ID_BUFF_SZ, GFP_KERNEL); //xx | |
9491 | if (!kbuf) { | |
9492 | ret = -ENOMEM; | |
9493 | break; | |
9494 | } | |
9495 | memset(kbuf, 0, length); | |
9496 | ||
9497 | ret = ssd_nand_read_id(dev, kbuf, chip_no, chip_ce, ctrl_idx); | |
9498 | if (ret) { | |
9499 | kfree(kbuf); | |
9500 | break; | |
9501 | } | |
9502 | ||
9503 | if (copy_to_user(buf, kbuf, length)) { | |
9504 | kfree(kbuf); | |
9505 | ret = -EFAULT; | |
9506 | break; | |
9507 | } | |
9508 | ||
9509 | kfree(kbuf); | |
9510 | ||
9511 | break; | |
9512 | } | |
9513 | ||
9514 | case SSD_CMD_NAND_READ: { //with oob | |
9515 | struct ssd_flash_op_info flash_info; | |
9516 | uint32_t length; | |
9517 | int flash, chip, page, ctrl_idx; | |
9518 | int err = 0; | |
9519 | ||
9520 | if (copy_from_user(&flash_info, argp, sizeof(struct ssd_flash_op_info))) { | |
9521 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
9522 | ret = -EFAULT; | |
9523 | break; | |
9524 | } | |
9525 | ||
9526 | flash = flash_info.flash; | |
9527 | chip = flash_info.chip; | |
9528 | page = flash_info.page; | |
9529 | buf = flash_info.buf; | |
9530 | ctrl_idx = flash_info.ctrl_idx; | |
9531 | ||
9532 | length = dev->hw_info.page_size + dev->hw_info.oob_size; | |
9533 | ||
9534 | kbuf = kmalloc(length, GFP_KERNEL); | |
9535 | if (!kbuf) { | |
9536 | ret = -ENOMEM; | |
9537 | break; | |
9538 | } | |
9539 | ||
9540 | err = ret = ssd_nand_read_w_oob(dev, kbuf, flash, chip, page, 1, ctrl_idx); | |
9541 | if (ret && (-EIO != ret)) { | |
9542 | kfree(kbuf); | |
9543 | break; | |
9544 | } | |
9545 | ||
9546 | if (copy_to_user(buf, kbuf, length)) { | |
9547 | kfree(kbuf); | |
9548 | ret = -EFAULT; | |
9549 | break; | |
9550 | } | |
9551 | ||
9552 | ret = err; | |
9553 | ||
9554 | kfree(kbuf); | |
9555 | break; | |
9556 | } | |
9557 | ||
9558 | case SSD_CMD_NAND_WRITE: { | |
9559 | struct ssd_flash_op_info flash_info; | |
9560 | int flash, chip, page, ctrl_idx; | |
9561 | uint32_t length; | |
9562 | ||
9563 | if (copy_from_user(&flash_info, argp, sizeof(struct ssd_flash_op_info))) { | |
9564 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
9565 | ret = -EFAULT; | |
9566 | break; | |
9567 | } | |
9568 | ||
9569 | flash = flash_info.flash; | |
9570 | chip = flash_info.chip; | |
9571 | page = flash_info.page; | |
9572 | buf = flash_info.buf; | |
9573 | ctrl_idx = flash_info.ctrl_idx; | |
9574 | ||
9575 | length = dev->hw_info.page_size + dev->hw_info.oob_size; | |
9576 | ||
9577 | kbuf = kmalloc(length, GFP_KERNEL); | |
9578 | if (!kbuf) { | |
9579 | ret = -ENOMEM; | |
9580 | break; | |
9581 | } | |
9582 | ||
9583 | if (copy_from_user(kbuf, buf, length)) { | |
9584 | kfree(kbuf); | |
9585 | ret = -EFAULT; | |
9586 | break; | |
9587 | } | |
9588 | ||
9589 | ret = ssd_nand_write(dev, kbuf, flash, chip, page, 1, ctrl_idx); | |
9590 | if (ret) { | |
9591 | kfree(kbuf); | |
9592 | break; | |
9593 | } | |
9594 | ||
9595 | kfree(kbuf); | |
9596 | break; | |
9597 | } | |
9598 | ||
9599 | case SSD_CMD_NAND_ERASE: { | |
9600 | struct ssd_flash_op_info flash_info; | |
9601 | int flash, chip, page, ctrl_idx; | |
9602 | ||
9603 | if (copy_from_user(&flash_info, argp, sizeof(struct ssd_flash_op_info))) { | |
9604 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
9605 | ret = -EFAULT; | |
9606 | break; | |
9607 | } | |
9608 | ||
9609 | flash = flash_info.flash; | |
9610 | chip = flash_info.chip; | |
9611 | page = flash_info.page; | |
9612 | ctrl_idx = flash_info.ctrl_idx; | |
9613 | ||
9614 | if ((page % dev->hw_info.page_count) != 0) { | |
9615 | ret = -EINVAL; | |
9616 | break; | |
9617 | } | |
9618 | ||
9619 | //hio_warn("erase fs = %llx\n", ofs); | |
9620 | ret = ssd_nand_erase(dev, flash, chip, page, ctrl_idx); | |
9621 | if (ret) { | |
9622 | break; | |
9623 | } | |
9624 | ||
9625 | break; | |
9626 | } | |
9627 | ||
9628 | case SSD_CMD_NAND_READ_EXT: { //ingore EIO | |
9629 | struct ssd_flash_op_info flash_info; | |
9630 | uint32_t length; | |
9631 | int flash, chip, page, ctrl_idx; | |
9632 | ||
9633 | if (copy_from_user(&flash_info, argp, sizeof(struct ssd_flash_op_info))) { | |
9634 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
9635 | ret = -EFAULT; | |
9636 | break; | |
9637 | } | |
9638 | ||
9639 | flash = flash_info.flash; | |
9640 | chip = flash_info.chip; | |
9641 | page = flash_info.page; | |
9642 | buf = flash_info.buf; | |
9643 | ctrl_idx = flash_info.ctrl_idx; | |
9644 | ||
9645 | length = dev->hw_info.page_size + dev->hw_info.oob_size; | |
9646 | ||
9647 | kbuf = kmalloc(length, GFP_KERNEL); | |
9648 | if (!kbuf) { | |
9649 | ret = -ENOMEM; | |
9650 | break; | |
9651 | } | |
9652 | ||
9653 | ret = ssd_nand_read_w_oob(dev, kbuf, flash, chip, page, 1, ctrl_idx); | |
9654 | if (-EIO == ret) { //ingore EIO | |
9655 | ret = 0; | |
9656 | } | |
9657 | if (ret) { | |
9658 | kfree(kbuf); | |
9659 | break; | |
9660 | } | |
9661 | ||
9662 | if (copy_to_user(buf, kbuf, length)) { | |
9663 | kfree(kbuf); | |
9664 | ret = -EFAULT; | |
9665 | break; | |
9666 | } | |
9667 | ||
9668 | kfree(kbuf); | |
9669 | break; | |
9670 | } | |
9671 | ||
9672 | case SSD_CMD_UPDATE_BBT: { | |
9673 | struct ssd_flash_op_info flash_info; | |
9674 | int ctrl_idx, flash; | |
9675 | ||
9676 | if (copy_from_user(&flash_info, argp, sizeof(struct ssd_flash_op_info))) { | |
9677 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
9678 | ret = -EFAULT; | |
9679 | break; | |
9680 | } | |
9681 | ||
9682 | ctrl_idx = flash_info.ctrl_idx; | |
9683 | flash = flash_info.flash; | |
9684 | ret = ssd_update_bbt(dev, flash, ctrl_idx); | |
9685 | if (ret) { | |
9686 | break; | |
9687 | } | |
9688 | ||
9689 | break; | |
9690 | } | |
9691 | ||
9692 | case SSD_CMD_CLEAR_ALARM: | |
9693 | ssd_clear_alarm(dev); | |
9694 | break; | |
9695 | ||
9696 | case SSD_CMD_SET_ALARM: | |
9697 | ssd_set_alarm(dev); | |
9698 | break; | |
9699 | ||
9700 | case SSD_CMD_RESET: | |
9701 | ret = ssd_do_reset(dev); | |
9702 | break; | |
9703 | ||
9704 | case SSD_CMD_RELOAD_FW: | |
9705 | dev->reload_fw = 1; | |
9706 | dev->has_non_0x98_reg_access = 1; | |
9707 | if (dev->protocol_info.ver >= SSD_PROTOCOL_V3_2) { | |
9708 | ssd_reg32_write(dev->ctrlp + SSD_RELOAD_FW_REG, SSD_RELOAD_FLAG); | |
9709 | } else if (dev->protocol_info.ver >= SSD_PROTOCOL_V3_1_1) { | |
9710 | ssd_reg32_write(dev->ctrlp + SSD_RELOAD_FW_REG, SSD_RELOAD_FW); | |
9711 | ||
9712 | } | |
9713 | break; | |
9714 | ||
9715 | case SSD_CMD_UNLOAD_DEV: { | |
9716 | if (atomic_read(&dev->refcnt)) { | |
9717 | ret = -EBUSY; | |
9718 | break; | |
9719 | } | |
9720 | ||
9721 | /* save smart */ | |
9722 | ssd_save_smart(dev); | |
9723 | ||
9724 | ret = ssd_flush(dev); | |
9725 | if (ret) { | |
9726 | break; | |
9727 | } | |
9728 | ||
9729 | /* cleanup the block device */ | |
9730 | if (test_and_clear_bit(SSD_INIT_BD, &dev->state)) { | |
9731 | mutex_lock(&dev->gd_mutex); | |
9732 | ssd_cleanup_blkdev(dev); | |
9733 | ssd_cleanup_queue(dev); | |
9734 | mutex_unlock(&dev->gd_mutex); | |
9735 | } | |
9736 | ||
9737 | break; | |
9738 | } | |
9739 | ||
9740 | case SSD_CMD_LOAD_DEV: { | |
9741 | ||
9742 | if (test_bit(SSD_INIT_BD, &dev->state)) { | |
9743 | ret = -EINVAL; | |
9744 | break; | |
9745 | } | |
9746 | ||
9747 | ret = ssd_init_smart(dev); | |
9748 | if (ret) { | |
9749 | hio_warn("%s: init info: failed\n", dev->name); | |
9750 | break; | |
9751 | } | |
9752 | ||
9753 | ret = ssd_init_queue(dev); | |
9754 | if (ret) { | |
9755 | hio_warn("%s: init queue failed\n", dev->name); | |
9756 | break; | |
9757 | } | |
9758 | ret = ssd_init_blkdev(dev); | |
9759 | if (ret) { | |
9760 | hio_warn("%s: register block device: failed\n", dev->name); | |
9761 | break; | |
9762 | } | |
9763 | (void)test_and_set_bit(SSD_INIT_BD, &dev->state); | |
9764 | ||
9765 | break; | |
9766 | } | |
9767 | ||
9768 | case SSD_CMD_UPDATE_VP: { | |
9769 | uint32_t val; | |
9770 | uint32_t new_vp, new_vp1 = 0; | |
9771 | ||
9772 | if (test_bit(SSD_INIT_BD, &dev->state)) { | |
9773 | ret = -EINVAL; | |
9774 | break; | |
9775 | } | |
9776 | ||
9777 | if (copy_from_user(&new_vp, argp, sizeof(uint32_t))) { | |
9778 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
9779 | ret = -EFAULT; | |
9780 | break; | |
9781 | } | |
9782 | ||
9783 | if (new_vp > dev->hw_info.max_valid_pages || new_vp <= 0) { | |
9784 | ret = -EINVAL; | |
9785 | break; | |
9786 | } | |
9787 | ||
9788 | while (new_vp <= dev->hw_info.max_valid_pages) { | |
9789 | ssd_reg32_write(dev->ctrlp + SSD_VALID_PAGES_REG, new_vp); | |
9790 | msleep(10); | |
9791 | val = ssd_reg32_read(dev->ctrlp + SSD_VALID_PAGES_REG); | |
9792 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_2) { | |
9793 | new_vp1 = val & 0x3FF; | |
9794 | } else { | |
9795 | new_vp1 = val & 0x7FFF; | |
9796 | } | |
9797 | ||
9798 | if (new_vp1 == new_vp) { | |
9799 | break; | |
9800 | } | |
9801 | ||
9802 | new_vp++; | |
9803 | /*if (new_vp == dev->hw_info.valid_pages) { | |
9804 | new_vp++; | |
9805 | }*/ | |
9806 | } | |
9807 | ||
9808 | if (new_vp1 != new_vp || new_vp > dev->hw_info.max_valid_pages) { | |
9809 | /* restore */ | |
9810 | ssd_reg32_write(dev->ctrlp + SSD_VALID_PAGES_REG, dev->hw_info.valid_pages); | |
9811 | ret = -EINVAL; | |
9812 | break; | |
9813 | } | |
9814 | ||
9815 | if (copy_to_user(argp, &new_vp, sizeof(uint32_t))) { | |
9816 | hio_warn("%s: copy_to_user: failed\n", dev->name); | |
9817 | ssd_reg32_write(dev->ctrlp + SSD_VALID_PAGES_REG, dev->hw_info.valid_pages); | |
9818 | ret = -EFAULT; | |
9819 | break; | |
9820 | } | |
9821 | ||
9822 | /* new */ | |
9823 | dev->hw_info.valid_pages = new_vp; | |
9824 | dev->hw_info.size = (uint64_t)dev->hw_info.valid_pages * dev->hw_info.page_size; | |
9825 | dev->hw_info.size *= (dev->hw_info.block_count - dev->hw_info.reserved_blks); | |
9826 | dev->hw_info.size *= ((uint64_t)dev->hw_info.nr_data_ch * (uint64_t)dev->hw_info.nr_chip * (uint64_t)dev->hw_info.nr_ctrl); | |
9827 | ||
9828 | break; | |
9829 | } | |
9830 | ||
9831 | case SSD_CMD_FULL_RESET: { | |
9832 | ret = ssd_full_reset(dev); | |
9833 | break; | |
9834 | } | |
9835 | ||
9836 | case SSD_CMD_GET_NR_LOG: { | |
9837 | if (copy_to_user(argp, &dev->internal_log.nr_log, sizeof(dev->internal_log.nr_log))) { | |
9838 | ret = -EFAULT; | |
9839 | break; | |
9840 | } | |
9841 | break; | |
9842 | } | |
9843 | ||
9844 | case SSD_CMD_GET_LOG: { | |
9845 | uint32_t length = dev->rom_info.log_sz; | |
9846 | ||
9847 | buf = argp; | |
9848 | ||
9849 | if (copy_to_user(buf, dev->internal_log.log, length)) { | |
9850 | ret = -EFAULT; | |
9851 | break; | |
9852 | } | |
9853 | ||
9854 | break; | |
9855 | } | |
9856 | ||
9857 | case SSD_CMD_LOG_LEVEL: { | |
9858 | int level = 0; | |
9859 | if (copy_from_user(&level, argp, sizeof(int))) { | |
9860 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
9861 | ret = -EFAULT; | |
9862 | break; | |
9863 | } | |
9864 | ||
9865 | if (level >= SSD_LOG_NR_LEVEL || level < SSD_LOG_LEVEL_INFO) { | |
9866 | level = SSD_LOG_LEVEL_ERR; | |
9867 | } | |
9868 | ||
9869 | //just for showing log, no need to protect | |
9870 | log_level = level; | |
9871 | break; | |
9872 | } | |
9873 | ||
9874 | case SSD_CMD_OT_PROTECT: { | |
9875 | int protect = 0; | |
9876 | ||
9877 | if (copy_from_user(&protect, argp, sizeof(int))) { | |
9878 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
9879 | ret = -EFAULT; | |
9880 | break; | |
9881 | } | |
9882 | ||
9883 | ssd_set_ot_protect(dev, !!protect); | |
9884 | break; | |
9885 | } | |
9886 | ||
9887 | case SSD_CMD_GET_OT_STATUS: { | |
9888 | int status = ssd_get_ot_status(dev, &status); | |
9889 | ||
9890 | if (copy_to_user(argp, &status, sizeof(int))) { | |
9891 | hio_warn("%s: copy_to_user: failed\n", dev->name); | |
9892 | ret = -EFAULT; | |
9893 | break; | |
9894 | } | |
9895 | break; | |
9896 | } | |
9897 | ||
9898 | case SSD_CMD_CLEAR_LOG: { | |
9899 | ret = ssd_clear_log(dev); | |
9900 | break; | |
9901 | } | |
9902 | ||
9903 | case SSD_CMD_CLEAR_SMART: { | |
9904 | ret = ssd_clear_smart(dev); | |
9905 | break; | |
9906 | } | |
9907 | ||
9908 | case SSD_CMD_CLEAR_WARNING: { | |
9909 | ret = ssd_clear_warning(dev); | |
9910 | break; | |
9911 | } | |
9912 | ||
9913 | case SSD_CMD_SW_LOG: { | |
9914 | struct ssd_sw_log_info sw_log; | |
9915 | ||
9916 | if (copy_from_user(&sw_log, argp, sizeof(struct ssd_sw_log_info))) { | |
9917 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
9918 | ret = -EFAULT; | |
9919 | break; | |
9920 | } | |
9921 | ||
9922 | ret = ssd_gen_swlog(dev, sw_log.event, sw_log.data); | |
9923 | break; | |
9924 | } | |
9925 | ||
9926 | case SSD_CMD_GET_LABEL: { | |
9927 | ||
9928 | if (dev->protocol_info.ver >= SSD_PROTOCOL_V3_2) { | |
9929 | ret = -EINVAL; | |
9930 | break; | |
9931 | } | |
9932 | ||
9933 | if (copy_to_user(argp, &dev->label, sizeof(struct ssd_label))) { | |
9934 | hio_warn("%s: copy_to_user: failed\n", dev->name); | |
9935 | ret = -EFAULT; | |
9936 | break; | |
9937 | } | |
9938 | break; | |
9939 | } | |
9940 | ||
9941 | case SSD_CMD_GET_VERSION: { | |
9942 | struct ssd_version_info ver; | |
9943 | ||
9944 | mutex_lock(&dev->fw_mutex); | |
9945 | ret = __ssd_get_version(dev, &ver); | |
9946 | mutex_unlock(&dev->fw_mutex); | |
9947 | if (ret) { | |
9948 | break; | |
9949 | } | |
9950 | ||
9951 | if (copy_to_user(argp, &ver, sizeof(struct ssd_version_info))) { | |
9952 | hio_warn("%s: copy_to_user: failed\n", dev->name); | |
9953 | ret = -EFAULT; | |
9954 | break; | |
9955 | } | |
9956 | break; | |
9957 | } | |
9958 | ||
9959 | case SSD_CMD_GET_TEMPERATURE: { | |
9960 | int temp; | |
9961 | ||
9962 | mutex_lock(&dev->fw_mutex); | |
9963 | ret = __ssd_get_temperature(dev, &temp); | |
9964 | mutex_unlock(&dev->fw_mutex); | |
9965 | if (ret) { | |
9966 | break; | |
9967 | } | |
9968 | ||
9969 | if (copy_to_user(argp, &temp, sizeof(int))) { | |
9970 | hio_warn("%s: copy_to_user: failed\n", dev->name); | |
9971 | ret = -EFAULT; | |
9972 | break; | |
9973 | } | |
9974 | break; | |
9975 | } | |
9976 | ||
9977 | case SSD_CMD_GET_BMSTATUS: { | |
9978 | int status; | |
9979 | ||
9980 | mutex_lock(&dev->fw_mutex); | |
9981 | if (dev->protocol_info.ver >= SSD_PROTOCOL_V3_2) { | |
9982 | if (test_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP), &dev->hwmon)) { | |
9983 | status = SSD_BMSTATUS_WARNING; | |
9984 | } else { | |
9985 | status = SSD_BMSTATUS_OK; | |
9986 | } | |
9987 | } else if(dev->protocol_info.ver > SSD_PROTOCOL_V3) { | |
9988 | ret = __ssd_bm_status(dev, &status); | |
9989 | } else { | |
9990 | status = SSD_BMSTATUS_OK; | |
9991 | } | |
9992 | mutex_unlock(&dev->fw_mutex); | |
9993 | if (ret) { | |
9994 | break; | |
9995 | } | |
9996 | ||
9997 | if (copy_to_user(argp, &status, sizeof(int))) { | |
9998 | hio_warn("%s: copy_to_user: failed\n", dev->name); | |
9999 | ret = -EFAULT; | |
10000 | break; | |
10001 | } | |
10002 | break; | |
10003 | } | |
10004 | ||
10005 | case SSD_CMD_GET_LABEL2: { | |
10006 | void *label; | |
10007 | int length; | |
10008 | ||
10009 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_2) { | |
10010 | label = &dev->label; | |
10011 | length = sizeof(struct ssd_label); | |
10012 | } else { | |
10013 | label = &dev->labelv3; | |
10014 | length = sizeof(struct ssd_labelv3); | |
10015 | } | |
10016 | ||
10017 | if (copy_to_user(argp, label, length)) { | |
10018 | ret = -EFAULT; | |
10019 | break; | |
10020 | } | |
10021 | break; | |
10022 | } | |
10023 | ||
10024 | case SSD_CMD_FLUSH: | |
10025 | ret = ssd_flush(dev); | |
10026 | if (ret) { | |
10027 | hio_warn("%s: ssd_flush: failed\n", dev->name); | |
10028 | ret = -EFAULT; | |
10029 | break; | |
10030 | } | |
10031 | break; | |
10032 | ||
10033 | case SSD_CMD_SAVE_MD: { | |
10034 | int save_md = 0; | |
10035 | ||
10036 | if (copy_from_user(&save_md, argp, sizeof(int))) { | |
10037 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
10038 | ret = -EFAULT; | |
10039 | break; | |
10040 | } | |
10041 | ||
10042 | dev->save_md = !!save_md; | |
10043 | break; | |
10044 | } | |
10045 | ||
10046 | case SSD_CMD_SET_WMODE: { | |
10047 | int new_wmode = 0; | |
10048 | ||
10049 | if (copy_from_user(&new_wmode, argp, sizeof(int))) { | |
10050 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
10051 | ret = -EFAULT; | |
10052 | break; | |
10053 | } | |
10054 | ||
10055 | ret = __ssd_set_wmode(dev, new_wmode); | |
10056 | if (ret) { | |
10057 | break; | |
10058 | } | |
10059 | ||
10060 | break; | |
10061 | } | |
10062 | ||
10063 | case SSD_CMD_GET_WMODE: { | |
10064 | if (copy_to_user(argp, &dev->wmode, sizeof(int))) { | |
10065 | hio_warn("%s: copy_to_user: failed\n", dev->name); | |
10066 | ret = -EFAULT; | |
10067 | break; | |
10068 | } | |
10069 | ||
10070 | break; | |
10071 | } | |
10072 | ||
10073 | case SSD_CMD_GET_USER_WMODE: { | |
10074 | if (copy_to_user(argp, &dev->user_wmode, sizeof(int))) { | |
10075 | hio_warn("%s: copy_to_user: failed\n", dev->name); | |
10076 | ret = -EFAULT; | |
10077 | break; | |
10078 | } | |
10079 | ||
10080 | break; | |
10081 | } | |
10082 | ||
10083 | case SSD_CMD_DEBUG: { | |
10084 | struct ssd_debug_info db_info; | |
10085 | ||
10086 | if (!finject) { | |
10087 | ret = -EOPNOTSUPP; | |
10088 | break; | |
10089 | } | |
10090 | ||
10091 | if (copy_from_user(&db_info, argp, sizeof(struct ssd_debug_info))) { | |
10092 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
10093 | ret = -EFAULT; | |
10094 | break; | |
10095 | } | |
10096 | ||
10097 | if (db_info.type < SSD_DEBUG_NONE || db_info.type >= SSD_DEBUG_NR) { | |
10098 | ret = -EINVAL; | |
10099 | break; | |
10100 | } | |
10101 | ||
10102 | /* IO */ | |
10103 | if (db_info.type >= SSD_DEBUG_READ_ERR && db_info.type <= SSD_DEBUG_RW_ERR && | |
10104 | (db_info.data.loc.off + db_info.data.loc.len) > (dev->hw_info.size >> 9)) { | |
10105 | ret = -EINVAL; | |
10106 | break; | |
10107 | } | |
10108 | ||
10109 | memcpy(&dev->db_info, &db_info, sizeof(struct ssd_debug_info)); | |
10110 | ||
10111 | #ifdef SSD_OT_PROTECT | |
10112 | /* temperature */ | |
10113 | if (db_info.type == SSD_DEBUG_NONE) { | |
10114 | ssd_check_temperature(dev, SSD_OT_TEMP); | |
10115 | } else if (db_info.type == SSD_DEBUG_LOG) { | |
10116 | if (db_info.data.log.event == SSD_LOG_OVER_TEMP) { | |
10117 | dev->ot_delay = SSD_OT_DELAY; | |
10118 | } else if (db_info.data.log.event == SSD_LOG_NORMAL_TEMP) { | |
10119 | dev->ot_delay = 0; | |
10120 | } | |
10121 | } | |
10122 | #endif | |
10123 | ||
10124 | /* offline */ | |
10125 | if (db_info.type == SSD_DEBUG_OFFLINE) { | |
10126 | test_and_clear_bit(SSD_ONLINE, &dev->state); | |
10127 | } else if (db_info.type == SSD_DEBUG_NONE) { | |
10128 | (void)test_and_set_bit(SSD_ONLINE, &dev->state); | |
10129 | } | |
10130 | ||
10131 | /* log */ | |
10132 | if (db_info.type == SSD_DEBUG_LOG && dev->event_call && dev->gd) { | |
10133 | dev->event_call(dev->gd, db_info.data.log.event, 0); | |
10134 | } | |
10135 | ||
10136 | break; | |
10137 | } | |
10138 | ||
10139 | case SSD_CMD_DRV_PARAM_INFO: { | |
10140 | struct ssd_drv_param_info drv_param; | |
10141 | ||
10142 | memset(&drv_param, 0, sizeof(struct ssd_drv_param_info)); | |
10143 | ||
10144 | drv_param.mode = mode; | |
10145 | drv_param.status_mask = status_mask; | |
10146 | drv_param.int_mode = int_mode; | |
10147 | drv_param.threaded_irq = threaded_irq; | |
10148 | drv_param.log_level = log_level; | |
10149 | drv_param.wmode = wmode; | |
10150 | drv_param.ot_protect = ot_protect; | |
10151 | drv_param.finject = finject; | |
10152 | ||
10153 | if (copy_to_user(argp, &drv_param, sizeof(struct ssd_drv_param_info))) { | |
10154 | hio_warn("%s: copy_to_user: failed\n", dev->name); | |
10155 | ret = -EFAULT; | |
10156 | break; | |
10157 | } | |
10158 | break; | |
10159 | } | |
10160 | ||
10161 | default: | |
10162 | ret = -EINVAL; | |
10163 | break; | |
10164 | } | |
10165 | ||
10166 | return ret; | |
10167 | } | |
10168 | ||
10169 | ||
10170 | #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,27)) | |
10171 | static int ssd_block_ioctl(struct inode *inode, struct file *file, | |
10172 | unsigned int cmd, unsigned long arg) | |
10173 | { | |
10174 | struct ssd_device *dev; | |
10175 | void __user *argp = (void __user *)arg; | |
10176 | int ret = 0; | |
10177 | ||
10178 | if (!inode) { | |
10179 | return -EINVAL; | |
10180 | } | |
10181 | dev = inode->i_bdev->bd_disk->private_data; | |
10182 | if (!dev) { | |
10183 | return -EINVAL; | |
10184 | } | |
10185 | #else | |
10186 | static int ssd_block_ioctl(struct block_device *bdev, fmode_t mode, | |
10187 | unsigned int cmd, unsigned long arg) | |
10188 | { | |
10189 | struct ssd_device *dev; | |
10190 | void __user *argp = (void __user *)arg; | |
10191 | int ret = 0; | |
10192 | ||
10193 | if (!bdev) { | |
10194 | return -EINVAL; | |
10195 | } | |
10196 | ||
10197 | dev = bdev->bd_disk->private_data; | |
10198 | if (!dev) { | |
10199 | return -EINVAL; | |
10200 | } | |
10201 | #endif | |
10202 | ||
10203 | switch (cmd) { | |
10204 | case HDIO_GETGEO: { | |
10205 | struct hd_geometry geo; | |
10206 | geo.cylinders = (dev->hw_info.size & ~0x3f) >> 6; | |
10207 | geo.heads = 4; | |
10208 | geo.sectors = 16; | |
10209 | #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,27)) | |
10210 | geo.start = get_start_sect(inode->i_bdev); | |
10211 | #else | |
10212 | geo.start = get_start_sect(bdev); | |
10213 | #endif | |
10214 | if (copy_to_user(argp, &geo, sizeof(geo))) { | |
10215 | ret = -EFAULT; | |
10216 | break; | |
10217 | } | |
10218 | ||
10219 | break; | |
10220 | } | |
10221 | ||
10222 | case BLKFLSBUF: | |
10223 | ret = ssd_flush(dev); | |
10224 | if (ret) { | |
10225 | hio_warn("%s: ssd_flush: failed\n", dev->name); | |
10226 | ret = -EFAULT; | |
10227 | break; | |
10228 | } | |
10229 | break; | |
10230 | ||
10231 | default: | |
10232 | if (!dev->slave) { | |
10233 | ret = ssd_ioctl_common(dev, cmd, arg); | |
10234 | } else { | |
10235 | ret = -EFAULT; | |
10236 | } | |
10237 | break; | |
10238 | } | |
10239 | ||
10240 | return ret; | |
10241 | } | |
10242 | ||
10243 | ||
10244 | static void ssd_free_dev(struct kref *kref) | |
10245 | { | |
10246 | struct ssd_device *dev; | |
10247 | ||
10248 | if (!kref) { | |
10249 | return; | |
10250 | } | |
10251 | ||
10252 | dev = container_of(kref, struct ssd_device, kref); | |
10253 | ||
10254 | put_disk(dev->gd); | |
10255 | ||
10256 | ssd_put_index(dev->slave, dev->idx); | |
10257 | ||
10258 | kfree(dev); | |
10259 | } | |
10260 | ||
10261 | static void ssd_put(struct ssd_device *dev) | |
10262 | { | |
10263 | kref_put(&dev->kref, ssd_free_dev); | |
10264 | } | |
10265 | ||
10266 | static int ssd_get(struct ssd_device *dev) | |
10267 | { | |
10268 | kref_get(&dev->kref); | |
10269 | return 0; | |
10270 | } | |
10271 | ||
10272 | /* block device */ | |
10273 | #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,27)) | |
10274 | static int ssd_block_open(struct inode *inode, struct file *filp) | |
10275 | { | |
10276 | struct ssd_device *dev; | |
10277 | ||
10278 | if (!inode) { | |
10279 | return -EINVAL; | |
10280 | } | |
10281 | ||
10282 | dev = inode->i_bdev->bd_disk->private_data; | |
10283 | if (!dev) { | |
10284 | return -EINVAL; | |
10285 | } | |
10286 | #else | |
10287 | static int ssd_block_open(struct block_device *bdev, fmode_t mode) | |
10288 | { | |
10289 | struct ssd_device *dev; | |
10290 | ||
10291 | if (!bdev) { | |
10292 | return -EINVAL; | |
10293 | } | |
10294 | ||
10295 | dev = bdev->bd_disk->private_data; | |
10296 | if (!dev) { | |
10297 | return -EINVAL; | |
10298 | } | |
10299 | #endif | |
10300 | ||
10301 | /*if (!try_module_get(dev->owner)) | |
10302 | return -ENODEV; | |
10303 | */ | |
10304 | ||
10305 | ssd_get(dev); | |
10306 | ||
10307 | atomic_inc(&dev->refcnt); | |
10308 | ||
10309 | return 0; | |
10310 | } | |
10311 | ||
10312 | #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,27)) | |
10313 | static int ssd_block_release(struct inode *inode, struct file *filp) | |
10314 | { | |
10315 | struct ssd_device *dev; | |
10316 | ||
10317 | if (!inode) { | |
10318 | return -EINVAL; | |
10319 | } | |
10320 | ||
10321 | dev = inode->i_bdev->bd_disk->private_data; | |
10322 | if (!dev) { | |
10323 | return -EINVAL; | |
10324 | } | |
10325 | #elif (LINUX_VERSION_CODE <= KERNEL_VERSION(3,9,0)) | |
10326 | static int ssd_block_release(struct gendisk *disk, fmode_t mode) | |
10327 | { | |
10328 | struct ssd_device *dev; | |
10329 | ||
10330 | if (!disk) { | |
10331 | return -EINVAL; | |
10332 | } | |
10333 | ||
10334 | dev = disk->private_data; | |
10335 | if (!dev) { | |
10336 | return -EINVAL; | |
10337 | } | |
10338 | #else | |
10339 | static void ssd_block_release(struct gendisk *disk, fmode_t mode) | |
10340 | { | |
10341 | struct ssd_device *dev; | |
10342 | ||
10343 | if (!disk) { | |
10344 | return; | |
10345 | } | |
10346 | ||
10347 | dev = disk->private_data; | |
10348 | if (!dev) { | |
10349 | return; | |
10350 | } | |
10351 | #endif | |
10352 | ||
10353 | atomic_dec(&dev->refcnt); | |
10354 | ||
10355 | ssd_put(dev); | |
10356 | ||
10357 | //module_put(dev->owner); | |
10358 | #if (LINUX_VERSION_CODE <= KERNEL_VERSION(3,9,0)) | |
10359 | return 0; | |
10360 | #endif | |
10361 | } | |
10362 | ||
10363 | static struct block_device_operations ssd_fops = { | |
10364 | .owner = THIS_MODULE, | |
10365 | .open = ssd_block_open, | |
10366 | .release = ssd_block_release, | |
10367 | .ioctl = ssd_block_ioctl, | |
10368 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)) | |
10369 | .getgeo = ssd_block_getgeo, | |
10370 | #endif | |
10371 | }; | |
10372 | ||
10373 | static void ssd_init_trim(ssd_device_t *dev) | |
10374 | { | |
10375 | #if (defined SSD_TRIM && (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32))) | |
10376 | if (dev->protocol_info.ver <= SSD_PROTOCOL_V3) { | |
10377 | return; | |
10378 | } | |
10379 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,17,0)) | |
10380 | blk_queue_flag_set(QUEUE_FLAG_DISCARD, dev->rq); | |
10381 | #else | |
10382 | queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, dev->rq); | |
10383 | #endif | |
10384 | ||
10385 | #if ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,33)) || (defined RHEL_MAJOR && RHEL_MAJOR >= 6)) | |
10386 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(4,12,0)) | |
10387 | dev->rq->limits.discard_zeroes_data = 1; | |
10388 | #endif | |
10389 | dev->rq->limits.discard_alignment = 4096; | |
10390 | dev->rq->limits.discard_granularity = 4096; | |
10391 | #endif | |
10392 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_2_4) { | |
10393 | dev->rq->limits.max_discard_sectors = dev->hw_info.sg_max_sec; | |
10394 | } else { | |
10395 | dev->rq->limits.max_discard_sectors = (dev->hw_info.sg_max_sec) * (dev->hw_info.cmd_max_sg); | |
10396 | } | |
10397 | #endif | |
10398 | } | |
10399 | ||
10400 | static void ssd_cleanup_queue(struct ssd_device *dev) | |
10401 | { | |
10402 | ssd_wait_io(dev); | |
10403 | ||
10404 | blk_cleanup_queue(dev->rq); | |
10405 | dev->rq = NULL; | |
10406 | } | |
10407 | ||
10408 | static int ssd_init_queue(struct ssd_device *dev) | |
10409 | { | |
10410 | dev->rq = blk_alloc_queue(GFP_KERNEL); | |
10411 | if (dev->rq == NULL) { | |
10412 | hio_warn("%s: alloc queue: failed\n ", dev->name); | |
10413 | goto out_init_queue; | |
10414 | } | |
10415 | ||
10416 | /* must be first */ | |
10417 | blk_queue_make_request(dev->rq, ssd_make_request); | |
10418 | ||
10419 | #if ((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,34)) && !(defined RHEL_MAJOR && RHEL_MAJOR == 6)) | |
10420 | blk_queue_max_hw_segments(dev->rq, dev->hw_info.cmd_max_sg); | |
10421 | blk_queue_max_phys_segments(dev->rq, dev->hw_info.cmd_max_sg); | |
10422 | blk_queue_max_sectors(dev->rq, dev->hw_info.sg_max_sec); | |
10423 | #else | |
10424 | blk_queue_max_segments(dev->rq, dev->hw_info.cmd_max_sg); | |
10425 | blk_queue_max_hw_sectors(dev->rq, dev->hw_info.sg_max_sec); | |
10426 | #endif | |
10427 | ||
10428 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31)) | |
10429 | blk_queue_hardsect_size(dev->rq, 512); | |
10430 | #else | |
10431 | blk_queue_logical_block_size(dev->rq, 512); | |
10432 | #endif | |
10433 | /* not work for make_request based drivers(bio) */ | |
10434 | blk_queue_max_segment_size(dev->rq, dev->hw_info.sg_max_sec << 9); | |
10435 | ||
10436 | blk_queue_bounce_limit(dev->rq, BLK_BOUNCE_HIGH); | |
10437 | ||
10438 | dev->rq->queuedata = dev; | |
10439 | ||
10440 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)) | |
10441 | blk_queue_issue_flush_fn(dev->rq, ssd_issue_flush_fn); | |
10442 | #endif | |
10443 | ||
10444 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28)) | |
10445 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,17,0)) | |
10446 | blk_queue_flag_set(QUEUE_FLAG_NONROT, dev->rq); | |
10447 | #else | |
10448 | queue_flag_set_unlocked(QUEUE_FLAG_NONROT, dev->rq); | |
10449 | #endif | |
10450 | #endif | |
10451 | ||
10452 | ssd_init_trim(dev); | |
10453 | ||
10454 | return 0; | |
10455 | ||
10456 | out_init_queue: | |
10457 | return -ENOMEM; | |
10458 | } | |
10459 | ||
10460 | static void ssd_cleanup_blkdev(struct ssd_device *dev) | |
10461 | { | |
10462 | del_gendisk(dev->gd); | |
10463 | } | |
10464 | ||
10465 | static int ssd_init_blkdev(struct ssd_device *dev) | |
10466 | { | |
10467 | if (dev->gd) { | |
10468 | put_disk(dev->gd); | |
10469 | } | |
10470 | ||
10471 | dev->gd = alloc_disk(ssd_minors); | |
10472 | if (!dev->gd) { | |
10473 | hio_warn("%s: alloc_disk fail\n", dev->name); | |
10474 | goto out_alloc_gd; | |
10475 | } | |
10476 | dev->gd->major = dev->major; | |
10477 | dev->gd->first_minor = dev->idx * ssd_minors; | |
10478 | dev->gd->fops = &ssd_fops; | |
10479 | dev->gd->queue = dev->rq; | |
10480 | dev->gd->private_data = dev; | |
10481 | ||
10482 | snprintf (dev->gd->disk_name, sizeof(dev->gd->disk_name), "%s", dev->name); | |
10483 | ||
10484 | set_capacity(dev->gd, dev->hw_info.size >> 9); | |
10485 | ||
10486 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,20,0)) | |
10487 | device_add_disk(&dev->pdev->dev, dev->gd, NULL); | |
10488 | #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(4,8,0)) | |
10489 | device_add_disk(&dev->pdev->dev, dev->gd); | |
10490 | #else | |
10491 | dev->gd->driverfs_dev = &dev->pdev->dev; | |
10492 | add_disk(dev->gd); | |
10493 | #endif | |
10494 | ||
10495 | return 0; | |
10496 | ||
10497 | out_alloc_gd: | |
10498 | return -ENOMEM; | |
10499 | } | |
10500 | ||
10501 | #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,10)) | |
10502 | static int ssd_ioctl(struct inode *inode, struct file *file, | |
10503 | unsigned int cmd, unsigned long arg) | |
10504 | #else | |
10505 | static long ssd_ioctl(struct file *file, | |
10506 | unsigned int cmd, unsigned long arg) | |
10507 | #endif | |
10508 | { | |
10509 | struct ssd_device *dev; | |
10510 | ||
10511 | if (!file) { | |
10512 | return -EINVAL; | |
10513 | } | |
10514 | ||
10515 | dev = file->private_data; | |
10516 | if (!dev) { | |
10517 | return -EINVAL; | |
10518 | } | |
10519 | ||
10520 | return (long)ssd_ioctl_common(dev, cmd, arg); | |
10521 | } | |
10522 | ||
10523 | static int ssd_open(struct inode *inode, struct file *file) | |
10524 | { | |
10525 | struct ssd_device *dev = NULL; | |
10526 | struct ssd_device *n = NULL; | |
10527 | int idx; | |
10528 | int ret = -ENODEV; | |
10529 | ||
10530 | if (!inode || !file) { | |
10531 | return -EINVAL; | |
10532 | } | |
10533 | ||
10534 | idx = iminor(inode); | |
10535 | ||
10536 | list_for_each_entry_safe(dev, n, &ssd_list, list) { | |
10537 | if (dev->idx == idx) { | |
10538 | ret = 0; | |
10539 | break; | |
10540 | } | |
10541 | } | |
10542 | ||
10543 | if (ret) { | |
10544 | return ret; | |
10545 | } | |
10546 | ||
10547 | file->private_data = dev; | |
10548 | ||
10549 | ssd_get(dev); | |
10550 | ||
10551 | return 0; | |
10552 | } | |
10553 | ||
10554 | static int ssd_release(struct inode *inode, struct file *file) | |
10555 | { | |
10556 | struct ssd_device *dev; | |
10557 | ||
10558 | if (!file) { | |
10559 | return -EINVAL; | |
10560 | } | |
10561 | ||
10562 | dev = file->private_data; | |
10563 | if (!dev) { | |
10564 | return -EINVAL; | |
10565 | } | |
10566 | ||
10567 | ssd_put(dev); | |
10568 | ||
10569 | file->private_data = NULL; | |
10570 | ||
10571 | return 0; | |
10572 | } | |
10573 | ||
10574 | static int ssd_reload_ssd_ptr(struct ssd_device *dev) | |
10575 | { | |
10576 | ssd_reset_resp_ptr(dev); | |
10577 | ||
10578 | //update base reg address | |
10579 | if (dev->protocol_info.ver >= SSD_PROTOCOL_V3) { | |
10580 | ||
10581 | ssd_reg_write(dev->ctrlp + SSD_MSG_BASE_REG, dev->msg_base_dma); | |
10582 | } | |
10583 | ||
10584 | //update response base reg address | |
10585 | ssd_reg_write(dev->ctrlp + SSD_RESP_FIFO_REG, dev->resp_msg_base_dma); | |
10586 | ssd_reg_write(dev->ctrlp + SSD_RESP_PTR_REG, dev->resp_ptr_base_dma); | |
10587 | ||
10588 | return 0; | |
10589 | } | |
10590 | ||
10591 | static struct file_operations ssd_cfops = { | |
10592 | .owner = THIS_MODULE, | |
10593 | .open = ssd_open, | |
10594 | .release = ssd_release, | |
10595 | #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,10)) | |
10596 | .ioctl = ssd_ioctl, | |
10597 | #else | |
10598 | .unlocked_ioctl = ssd_ioctl, | |
10599 | #endif | |
10600 | }; | |
10601 | ||
10602 | static void ssd_cleanup_chardev(struct ssd_device *dev) | |
10603 | { | |
10604 | if (dev->slave) { | |
10605 | return; | |
10606 | } | |
10607 | ||
10608 | #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,12)) | |
10609 | class_simple_device_remove(MKDEV((dev_t)dev->cmajor, (dev_t)dev->idx)); | |
10610 | devfs_remove("c%s", dev->name); | |
10611 | #elif (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,14)) | |
10612 | class_device_destroy(ssd_class, MKDEV((dev_t)dev->cmajor, (dev_t)dev->idx)); | |
10613 | devfs_remove("c%s", dev->name); | |
10614 | #elif (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,17)) | |
10615 | class_device_destroy(ssd_class, MKDEV((dev_t)dev->cmajor, (dev_t)dev->idx)); | |
10616 | devfs_remove("c%s", dev->name); | |
10617 | #elif (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,24)) | |
10618 | class_device_destroy(ssd_class, MKDEV((dev_t)dev->cmajor, (dev_t)dev->idx)); | |
10619 | #else | |
10620 | device_destroy(ssd_class, MKDEV((dev_t)dev->cmajor, (dev_t)dev->idx)); | |
10621 | #endif | |
10622 | } | |
10623 | ||
10624 | static int ssd_init_chardev(struct ssd_device *dev) | |
10625 | { | |
10626 | int ret = 0; | |
10627 | ||
10628 | if (dev->slave) { | |
10629 | return 0; | |
10630 | } | |
10631 | ||
10632 | #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,12)) | |
10633 | ret = devfs_mk_cdev(MKDEV((dev_t)dev->cmajor, (dev_t)dev->idx), S_IFCHR|S_IRUSR|S_IWUSR, "c%s", dev->name); | |
10634 | if (ret) { | |
10635 | goto out; | |
10636 | } | |
10637 | class_simple_device_add(ssd_class, MKDEV((dev_t)dev->cmajor, (dev_t)dev->idx), NULL, "c%s", dev->name); | |
10638 | out: | |
10639 | #elif (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,14)) | |
10640 | ret = devfs_mk_cdev(MKDEV((dev_t)dev->cmajor, (dev_t)dev->idx), S_IFCHR|S_IRUSR|S_IWUSR, "c%s", dev->name); | |
10641 | if (ret) { | |
10642 | goto out; | |
10643 | } | |
10644 | class_device_create(ssd_class, MKDEV((dev_t)dev->cmajor, (dev_t)dev->idx), NULL, "c%s", dev->name); | |
10645 | out: | |
10646 | #elif (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,17)) | |
10647 | ret = devfs_mk_cdev(MKDEV((dev_t)dev->cmajor, (dev_t)dev->idx), S_IFCHR|S_IRUSR|S_IWUSR, "c%s", dev->name); | |
10648 | if (ret) { | |
10649 | goto out; | |
10650 | } | |
10651 | class_device_create(ssd_class, NULL, MKDEV((dev_t)dev->cmajor, (dev_t)dev->idx), NULL, "c%s", dev->name); | |
10652 | out: | |
10653 | #elif (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,24)) | |
10654 | class_device_create(ssd_class, NULL, MKDEV((dev_t)dev->cmajor, (dev_t)dev->idx), NULL, "c%s", dev->name); | |
10655 | #elif (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,26)) | |
10656 | device_create(ssd_class, NULL, MKDEV((dev_t)dev->cmajor, (dev_t)dev->idx), "c%s", dev->name); | |
10657 | #elif (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,27)) | |
10658 | device_create_drvdata(ssd_class, NULL, MKDEV((dev_t)dev->cmajor, (dev_t)dev->idx), NULL, "c%s", dev->name); | |
10659 | #else | |
10660 | device_create(ssd_class, NULL, MKDEV((dev_t)dev->cmajor, (dev_t)dev->idx), NULL, "c%s", dev->name); | |
10661 | #endif | |
10662 | ||
10663 | return ret; | |
10664 | } | |
10665 | ||
10666 | static int ssd_check_hw(struct ssd_device *dev) | |
10667 | { | |
10668 | uint32_t test_data = 0x55AA5AA5; | |
10669 | uint32_t read_data; | |
10670 | ||
10671 | ssd_reg32_write(dev->ctrlp + SSD_BRIDGE_TEST_REG, test_data); | |
10672 | read_data = ssd_reg32_read(dev->ctrlp + SSD_BRIDGE_TEST_REG); | |
10673 | if (read_data != ~(test_data)) { | |
10674 | //hio_warn("%s: check bridge error: %#x\n", dev->name, read_data); | |
10675 | return -1; | |
10676 | } | |
10677 | ||
10678 | return 0; | |
10679 | } | |
10680 | ||
10681 | static int ssd_check_fw(struct ssd_device *dev) | |
10682 | { | |
10683 | uint32_t val = 0; | |
10684 | int i; | |
10685 | ||
10686 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_1_3) { | |
10687 | return 0; | |
10688 | } | |
10689 | ||
10690 | for (i=0; i<SSD_CONTROLLER_WAIT; i++) { | |
10691 | val = ssd_reg32_read(dev->ctrlp + SSD_HW_STATUS_REG); | |
10692 | if ((val & 0x1) && ((val >> 8) & 0x1)) { | |
10693 | break; | |
10694 | } | |
10695 | ||
10696 | msleep(SSD_INIT_WAIT); | |
10697 | } | |
10698 | ||
10699 | if (!(val & 0x1)) { | |
10700 | /* controller fw status */ | |
10701 | hio_warn("%s: controller firmware load failed: %#x\n", dev->name, val); | |
10702 | return -1; | |
10703 | } else if (!((val >> 8) & 0x1)) { | |
10704 | /* controller state */ | |
10705 | hio_warn("%s: controller state error: %#x\n", dev->name, val); | |
10706 | return -1; | |
10707 | } | |
10708 | ||
10709 | val = ssd_reg32_read(dev->ctrlp + SSD_RELOAD_FW_REG); | |
10710 | if (val) { | |
10711 | dev->reload_fw = 1; | |
10712 | } | |
10713 | ||
10714 | return 0; | |
10715 | } | |
10716 | ||
10717 | static int ssd_init_fw_info(struct ssd_device *dev) | |
10718 | { | |
10719 | uint32_t val; | |
10720 | int ret = 0; | |
10721 | ||
10722 | val = ssd_reg32_read(dev->ctrlp + SSD_BRIDGE_VER_REG); | |
10723 | dev->hw_info.bridge_ver = val & 0xFFF; | |
10724 | if (dev->hw_info.bridge_ver < SSD_FW_MIN) { | |
10725 | hio_warn("%s: bridge firmware version %03X is not supported\n", dev->name, dev->hw_info.bridge_ver); | |
10726 | return -EINVAL; | |
10727 | } | |
10728 | hio_info("%s: bridge firmware version: %03X\n", dev->name, dev->hw_info.bridge_ver); | |
10729 | ||
10730 | ret = ssd_check_fw(dev); | |
10731 | if (ret) { | |
10732 | goto out; | |
10733 | } | |
10734 | ||
10735 | out: | |
10736 | /* skip error if not in standard mode */ | |
10737 | if (mode != SSD_DRV_MODE_STANDARD) { | |
10738 | ret = 0; | |
10739 | } | |
10740 | return ret; | |
10741 | } | |
10742 | ||
10743 | static int ssd_check_clock(struct ssd_device *dev) | |
10744 | { | |
10745 | uint32_t val; | |
10746 | int ret = 0; | |
10747 | ||
10748 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_1_3) { | |
10749 | return 0; | |
10750 | } | |
10751 | ||
10752 | val = ssd_reg32_read(dev->ctrlp + SSD_HW_STATUS_REG); | |
10753 | ||
10754 | /* clock status */ | |
10755 | if (!((val >> 4 ) & 0x1)) { | |
10756 | if (!test_and_set_bit(SSD_HWMON_CLOCK(SSD_CLOCK_166M_LOST), &dev->hwmon)) { | |
10757 | hio_warn("%s: 166MHz clock losed: %#x\n", dev->name, val); | |
10758 | ssd_gen_swlog(dev, SSD_LOG_CLK_FAULT, val); | |
10759 | } | |
10760 | ret = -1; | |
10761 | } | |
10762 | ||
10763 | if (dev->protocol_info.ver >= SSD_PROTOCOL_V3_2) { | |
10764 | if (!((val >> 5 ) & 0x1)) { | |
10765 | if (!test_and_set_bit(SSD_HWMON_CLOCK(SSD_CLOCK_166M_SKEW), &dev->hwmon)) { | |
10766 | hio_warn("%s: 166MHz clock is skew: %#x\n", dev->name, val); | |
10767 | ssd_gen_swlog(dev, SSD_LOG_CLK_FAULT, val); | |
10768 | } | |
10769 | ret = -1; | |
10770 | } | |
10771 | if (!((val >> 6 ) & 0x1)) { | |
10772 | if (!test_and_set_bit(SSD_HWMON_CLOCK(SSD_CLOCK_156M_LOST), &dev->hwmon)) { | |
10773 | hio_warn("%s: 156.25MHz clock lost: %#x\n", dev->name, val); | |
10774 | ssd_gen_swlog(dev, SSD_LOG_CLK_FAULT, val); | |
10775 | } | |
10776 | ret = -1; | |
10777 | } | |
10778 | if (!((val >> 7 ) & 0x1)) { | |
10779 | if (!test_and_set_bit(SSD_HWMON_CLOCK(SSD_CLOCK_156M_SKEW), &dev->hwmon)) { | |
10780 | hio_warn("%s: 156.25MHz clock is skew: %#x\n", dev->name, val); | |
10781 | ssd_gen_swlog(dev, SSD_LOG_CLK_FAULT, val); | |
10782 | } | |
10783 | ret = -1; | |
10784 | } | |
10785 | } | |
10786 | ||
10787 | return ret; | |
10788 | } | |
10789 | ||
10790 | static int ssd_check_volt(struct ssd_device *dev) | |
10791 | { | |
10792 | int i = 0; | |
10793 | uint64_t val; | |
10794 | uint32_t adc_val; | |
10795 | int ret =0; | |
10796 | ||
10797 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_2) { | |
10798 | return 0; | |
10799 | } | |
10800 | ||
10801 | for (i=0; i<dev->hw_info.nr_ctrl; i++) { | |
10802 | /* 1.0v */ | |
10803 | if (!test_bit(SSD_HWMON_FPGA(i, SSD_FPGA_1V0), &dev->hwmon)) { | |
10804 | val = ssd_reg_read(dev->ctrlp + SSD_FPGA_1V0_REG0 + i * SSD_CTRL_REG_ZONE_SZ); | |
10805 | adc_val = SSD_FPGA_VOLT_MAX(val); | |
10806 | if (adc_val < SSD_FPGA_1V0_ADC_MIN || adc_val > SSD_FPGA_1V0_ADC_MAX) { | |
10807 | (void)test_and_set_bit(SSD_HWMON_FPGA(i, SSD_FPGA_1V0), &dev->hwmon); | |
10808 | hio_warn("%s: controller %d 1.0V fault: %d mV.\n", dev->name, i, SSD_FPGA_VOLT(adc_val)); | |
10809 | ssd_gen_swlog(dev, SSD_LOG_VOLT_FAULT, SSD_VOLT_LOG_DATA(SSD_FPGA_1V0, i, adc_val)); | |
10810 | ret = -1; | |
10811 | } | |
10812 | ||
10813 | adc_val = SSD_FPGA_VOLT_MIN(val); | |
10814 | if (adc_val < SSD_FPGA_1V0_ADC_MIN || adc_val > SSD_FPGA_1V0_ADC_MAX) { | |
10815 | (void)test_and_set_bit(SSD_HWMON_FPGA(i, SSD_FPGA_1V0), &dev->hwmon); | |
10816 | hio_warn("%s: controller %d 1.0V fault: %d mV.\n", dev->name, i, SSD_FPGA_VOLT(adc_val)); | |
10817 | ssd_gen_swlog(dev, SSD_LOG_VOLT_FAULT, SSD_VOLT_LOG_DATA(SSD_FPGA_1V0, i, adc_val)); | |
10818 | ret = -2; | |
10819 | } | |
10820 | } | |
10821 | ||
10822 | /* 1.8v */ | |
10823 | if (!test_bit(SSD_HWMON_FPGA(i, SSD_FPGA_1V8), &dev->hwmon)) { | |
10824 | val = ssd_reg_read(dev->ctrlp + SSD_FPGA_1V8_REG0 + i * SSD_CTRL_REG_ZONE_SZ); | |
10825 | adc_val = SSD_FPGA_VOLT_MAX(val); | |
10826 | if (adc_val < SSD_FPGA_1V8_ADC_MIN || adc_val > SSD_FPGA_1V8_ADC_MAX) { | |
10827 | (void)test_and_set_bit(SSD_HWMON_FPGA(i, SSD_FPGA_1V8), &dev->hwmon); | |
10828 | hio_warn("%s: controller %d 1.8V fault: %d mV.\n", dev->name, i, SSD_FPGA_VOLT(adc_val)); | |
10829 | ssd_gen_swlog(dev, SSD_LOG_VOLT_FAULT, SSD_VOLT_LOG_DATA(SSD_FPGA_1V8, i, adc_val)); | |
10830 | ret = -3; | |
10831 | } | |
10832 | ||
10833 | adc_val = SSD_FPGA_VOLT_MIN(val); | |
10834 | if (adc_val < SSD_FPGA_1V8_ADC_MIN || adc_val > SSD_FPGA_1V8_ADC_MAX) { | |
10835 | (void)test_and_set_bit(SSD_HWMON_FPGA(i, SSD_FPGA_1V8), &dev->hwmon); | |
10836 | hio_warn("%s: controller %d 1.8V fault: %d mV.\n", dev->name, i, SSD_FPGA_VOLT(adc_val)); | |
10837 | ssd_gen_swlog(dev, SSD_LOG_VOLT_FAULT, SSD_VOLT_LOG_DATA(SSD_FPGA_1V8, i, adc_val)); | |
10838 | ret = -4; | |
10839 | } | |
10840 | } | |
10841 | } | |
10842 | ||
10843 | return ret; | |
10844 | } | |
10845 | ||
10846 | static int ssd_check_reset_sync(struct ssd_device *dev) | |
10847 | { | |
10848 | uint32_t val; | |
10849 | ||
10850 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_1_3) { | |
10851 | return 0; | |
10852 | } | |
10853 | ||
10854 | val = ssd_reg32_read(dev->ctrlp + SSD_HW_STATUS_REG); | |
10855 | if (!((val >> 8) & 0x1)) { | |
10856 | /* controller state */ | |
10857 | hio_warn("%s: controller state error: %#x\n", dev->name, val); | |
10858 | return -1; | |
10859 | } | |
10860 | ||
10861 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_2) { | |
10862 | return 0; | |
10863 | } | |
10864 | ||
10865 | if (((val >> 9 ) & 0x1)) { | |
10866 | hio_warn("%s: controller reset asynchronously: %#x\n", dev->name, val); | |
10867 | ssd_gen_swlog(dev, SSD_LOG_CTRL_RST_SYNC, val); | |
10868 | return -1; | |
10869 | } | |
10870 | ||
10871 | return 0; | |
10872 | } | |
10873 | ||
10874 | static int ssd_check_hw_bh(struct ssd_device *dev) | |
10875 | { | |
10876 | int ret; | |
10877 | ||
10878 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_1_3) { | |
10879 | return 0; | |
10880 | } | |
10881 | ||
10882 | /* clock status */ | |
10883 | ret = ssd_check_clock(dev); | |
10884 | if (ret) { | |
10885 | goto out; | |
10886 | } | |
10887 | ||
10888 | out: | |
10889 | /* skip error if not in standard mode */ | |
10890 | if (mode != SSD_DRV_MODE_STANDARD) { | |
10891 | ret = 0; | |
10892 | } | |
10893 | return ret; | |
10894 | } | |
10895 | ||
10896 | static int ssd_check_controller(struct ssd_device *dev) | |
10897 | { | |
10898 | int ret; | |
10899 | ||
10900 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_1_3) { | |
10901 | return 0; | |
10902 | } | |
10903 | ||
10904 | /* sync reset */ | |
10905 | ret = ssd_check_reset_sync(dev); | |
10906 | if (ret) { | |
10907 | goto out; | |
10908 | } | |
10909 | ||
10910 | out: | |
10911 | /* skip error if not in standard mode */ | |
10912 | if (mode != SSD_DRV_MODE_STANDARD) { | |
10913 | ret = 0; | |
10914 | } | |
10915 | return ret; | |
10916 | } | |
10917 | ||
10918 | static int ssd_check_controller_bh(struct ssd_device *dev) | |
10919 | { | |
10920 | uint32_t test_data = 0x55AA5AA5; | |
10921 | uint32_t val; | |
10922 | int reg_base, reg_sz; | |
10923 | int init_wait = 0; | |
10924 | int i; | |
10925 | int ret = 0; | |
10926 | ||
10927 | if (mode != SSD_DRV_MODE_STANDARD) { | |
10928 | return 0; | |
10929 | } | |
10930 | ||
10931 | /* controller */ | |
10932 | val = ssd_reg32_read(dev->ctrlp + SSD_READY_REG); | |
10933 | if (val & 0x1) { | |
10934 | hio_warn("%s: controller 0 not ready\n", dev->name); | |
10935 | return -1; | |
10936 | } | |
10937 | ||
10938 | for (i=0; i<dev->hw_info.nr_ctrl; i++) { | |
10939 | reg_base = SSD_CTRL_TEST_REG0 + i * SSD_CTRL_TEST_REG_SZ; | |
10940 | ssd_reg32_write(dev->ctrlp + reg_base, test_data); | |
10941 | val = ssd_reg32_read(dev->ctrlp + reg_base); | |
10942 | if (val != ~(test_data)) { | |
10943 | hio_warn("%s: check controller %d error: %#x\n", dev->name, i, val); | |
10944 | return -1; | |
10945 | } | |
10946 | } | |
10947 | ||
10948 | /* clock */ | |
10949 | ret = ssd_check_volt(dev); | |
10950 | if (ret) { | |
10951 | return ret; | |
10952 | } | |
10953 | ||
10954 | /* ddr */ | |
10955 | if (dev->protocol_info.ver > SSD_PROTOCOL_V3) { | |
10956 | reg_base = SSD_PV3_RAM_STATUS_REG0; | |
10957 | reg_sz = SSD_PV3_RAM_STATUS_REG_SZ; | |
10958 | ||
10959 | for (i=0; i<dev->hw_info.nr_ctrl; i++) { | |
10960 | check_ram_status: | |
10961 | val = ssd_reg32_read(dev->ctrlp + reg_base); | |
10962 | ||
10963 | if (!((val >> 1) & 0x1)) { | |
10964 | init_wait++; | |
10965 | if (init_wait <= SSD_RAM_INIT_MAX_WAIT) { | |
10966 | msleep(SSD_INIT_WAIT); | |
10967 | goto check_ram_status; | |
10968 | } else { | |
10969 | hio_warn("%s: controller %d ram init failed: %#x\n", dev->name, i, val); | |
10970 | ssd_gen_swlog(dev, SSD_LOG_DDR_INIT_ERR, i); | |
10971 | return -1; | |
10972 | } | |
10973 | } | |
10974 | ||
10975 | reg_base += reg_sz; | |
10976 | } | |
10977 | } | |
10978 | ||
10979 | /* ch info */ | |
10980 | for (i=0; i<SSD_CH_INFO_MAX_WAIT; i++) { | |
10981 | val = ssd_reg32_read(dev->ctrlp + SSD_CH_INFO_REG); | |
10982 | if (!((val >> 31) & 0x1)) { | |
10983 | break; | |
10984 | } | |
10985 | ||
10986 | msleep(SSD_INIT_WAIT); | |
10987 | } | |
10988 | if ((val >> 31) & 0x1) { | |
10989 | hio_warn("%s: channel info init failed: %#x\n", dev->name, val); | |
10990 | return -1; | |
10991 | } | |
10992 | ||
10993 | return 0; | |
10994 | } | |
10995 | ||
10996 | static int ssd_init_protocol_info(struct ssd_device *dev) | |
10997 | { | |
10998 | uint32_t val; | |
10999 | ||
11000 | val = ssd_reg32_read(dev->ctrlp + SSD_PROTOCOL_VER_REG); | |
11001 | if (val == (uint32_t)-1) { | |
11002 | hio_warn("%s: protocol version error: %#x\n", dev->name, val); | |
11003 | return -EINVAL; | |
11004 | } | |
11005 | dev->protocol_info.ver = val; | |
11006 | ||
11007 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3) { | |
11008 | dev->protocol_info.init_state_reg = SSD_INIT_STATE_REG0; | |
11009 | dev->protocol_info.init_state_reg_sz = SSD_INIT_STATE_REG_SZ; | |
11010 | ||
11011 | dev->protocol_info.chip_info_reg = SSD_CHIP_INFO_REG0; | |
11012 | dev->protocol_info.chip_info_reg_sz = SSD_CHIP_INFO_REG_SZ; | |
11013 | } else { | |
11014 | dev->protocol_info.init_state_reg = SSD_PV3_INIT_STATE_REG0; | |
11015 | dev->protocol_info.init_state_reg_sz = SSD_PV3_INIT_STATE_REG_SZ; | |
11016 | ||
11017 | dev->protocol_info.chip_info_reg = SSD_PV3_CHIP_INFO_REG0; | |
11018 | dev->protocol_info.chip_info_reg_sz = SSD_PV3_CHIP_INFO_REG_SZ; | |
11019 | } | |
11020 | ||
11021 | return 0; | |
11022 | } | |
11023 | ||
11024 | static int ssd_init_hw_info(struct ssd_device *dev) | |
11025 | { | |
11026 | uint64_t val64; | |
11027 | uint32_t val; | |
11028 | uint32_t nr_ctrl; | |
11029 | int ret = 0; | |
11030 | ||
11031 | /* base info */ | |
11032 | val = ssd_reg32_read(dev->ctrlp + SSD_RESP_INFO_REG); | |
11033 | dev->hw_info.resp_ptr_sz = 16 * (1U << (val & 0xFF)); | |
11034 | dev->hw_info.resp_msg_sz = 16 * (1U << ((val >> 8) & 0xFF)); | |
11035 | ||
11036 | if (0 == dev->hw_info.resp_ptr_sz || 0 == dev->hw_info.resp_msg_sz) { | |
11037 | hio_warn("%s: response info error\n", dev->name); | |
11038 | ret = -EINVAL; | |
11039 | goto out; | |
11040 | } | |
11041 | ||
11042 | val = ssd_reg32_read(dev->ctrlp + SSD_BRIDGE_INFO_REG); | |
11043 | dev->hw_info.cmd_fifo_sz = 1U << ((val >> 4) & 0xF); | |
11044 | dev->hw_info.cmd_max_sg = 1U << ((val >> 8) & 0xF); | |
11045 | dev->hw_info.sg_max_sec = 1U << ((val >> 12) & 0xF); | |
11046 | dev->hw_info.cmd_fifo_sz_mask = dev->hw_info.cmd_fifo_sz - 1; | |
11047 | ||
11048 | if (0 == dev->hw_info.cmd_fifo_sz || 0 == dev->hw_info.cmd_max_sg || 0 == dev->hw_info.sg_max_sec) { | |
11049 | hio_warn("%s: cmd info error\n", dev->name); | |
11050 | ret = -EINVAL; | |
11051 | goto out; | |
11052 | } | |
11053 | ||
11054 | /* check hw */ | |
11055 | if (ssd_check_hw_bh(dev)) { | |
11056 | hio_warn("%s: check hardware status failed\n", dev->name); | |
11057 | ret = -EINVAL; | |
11058 | goto out; | |
11059 | } | |
11060 | ||
11061 | if (ssd_check_controller(dev)) { | |
11062 | hio_warn("%s: check controller state failed\n", dev->name); | |
11063 | ret = -EINVAL; | |
11064 | goto out; | |
11065 | } | |
11066 | ||
11067 | /* nr controller : read again*/ | |
11068 | val = ssd_reg32_read(dev->ctrlp + SSD_BRIDGE_INFO_REG); | |
11069 | dev->hw_info.nr_ctrl = (val >> 16) & 0xF; | |
11070 | ||
11071 | /* nr ctrl configured */ | |
11072 | nr_ctrl = (val >> 20) & 0xF; | |
11073 | if (0 == dev->hw_info.nr_ctrl) { | |
11074 | hio_warn("%s: nr controller error: %u\n", dev->name, dev->hw_info.nr_ctrl); | |
11075 | ret = -EINVAL; | |
11076 | goto out; | |
11077 | } else if (0 != nr_ctrl && nr_ctrl != dev->hw_info.nr_ctrl) { | |
11078 | hio_warn("%s: nr controller error: configured %u but found %u\n", dev->name, nr_ctrl, dev->hw_info.nr_ctrl); | |
11079 | if (mode <= SSD_DRV_MODE_STANDARD) { | |
11080 | ret = -EINVAL; | |
11081 | goto out; | |
11082 | } | |
11083 | } | |
11084 | ||
11085 | if (ssd_check_controller_bh(dev)) { | |
11086 | hio_warn("%s: check controller failed\n", dev->name); | |
11087 | ret = -EINVAL; | |
11088 | goto out; | |
11089 | } | |
11090 | ||
11091 | val = ssd_reg32_read(dev->ctrlp + SSD_PCB_VER_REG); | |
11092 | dev->hw_info.pcb_ver = (uint8_t) ((val >> 4) & 0xF) + 'A' -1; | |
11093 | if ((val & 0xF) != 0xF) { | |
11094 | dev->hw_info.upper_pcb_ver = (uint8_t) (val & 0xF) + 'A' -1; | |
11095 | } | |
11096 | ||
11097 | if (dev->hw_info.pcb_ver < 'A' || (0 != dev->hw_info.upper_pcb_ver && dev->hw_info.upper_pcb_ver < 'A')) { | |
11098 | hio_warn("%s: PCB version error: %#x %#x\n", dev->name, dev->hw_info.pcb_ver, dev->hw_info.upper_pcb_ver); | |
11099 | ret = -EINVAL; | |
11100 | goto out; | |
11101 | } | |
11102 | ||
11103 | /* channel info */ | |
11104 | if (mode <= SSD_DRV_MODE_DEBUG) { | |
11105 | val = ssd_reg32_read(dev->ctrlp + SSD_CH_INFO_REG); | |
11106 | dev->hw_info.nr_data_ch = val & 0xFF; | |
11107 | dev->hw_info.nr_ch = dev->hw_info.nr_data_ch + ((val >> 8) & 0xFF); | |
11108 | dev->hw_info.nr_chip = (val >> 16) & 0xFF; | |
11109 | ||
11110 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_2) { | |
11111 | dev->hw_info.max_ch = 1; | |
11112 | while (dev->hw_info.max_ch < dev->hw_info.nr_ch) dev->hw_info.max_ch <<= 1; | |
11113 | } else { | |
11114 | /* set max channel 32 */ | |
11115 | dev->hw_info.max_ch = 32; | |
11116 | } | |
11117 | ||
11118 | if (0 == dev->hw_info.nr_chip) { | |
11119 | //for debug mode | |
11120 | dev->hw_info.nr_chip = 1; | |
11121 | } | |
11122 | ||
11123 | //xx | |
11124 | dev->hw_info.id_size = SSD_NAND_ID_SZ; | |
11125 | dev->hw_info.max_ce = SSD_NAND_MAX_CE; | |
11126 | ||
11127 | if (0 == dev->hw_info.nr_data_ch || 0 == dev->hw_info.nr_ch || 0 == dev->hw_info.nr_chip) { | |
11128 | hio_warn("%s: channel info error: data_ch %u ch %u chip %u\n", dev->name, dev->hw_info.nr_data_ch, dev->hw_info.nr_ch, dev->hw_info.nr_chip); | |
11129 | ret = -EINVAL; | |
11130 | goto out; | |
11131 | } | |
11132 | } | |
11133 | ||
11134 | /* ram info */ | |
11135 | if (mode <= SSD_DRV_MODE_DEBUG) { | |
11136 | val = ssd_reg32_read(dev->ctrlp + SSD_RAM_INFO_REG); | |
11137 | dev->hw_info.ram_size = 0x4000000ull * (1ULL << (val & 0xF)); | |
11138 | dev->hw_info.ram_align = 1U << ((val >> 12) & 0xF); | |
11139 | if (dev->hw_info.ram_align < SSD_RAM_ALIGN) { | |
11140 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3) { | |
11141 | dev->hw_info.ram_align = SSD_RAM_ALIGN; | |
11142 | } else { | |
11143 | hio_warn("%s: ram align error: %u\n", dev->name, dev->hw_info.ram_align); | |
11144 | ret = -EINVAL; | |
11145 | goto out; | |
11146 | } | |
11147 | } | |
11148 | dev->hw_info.ram_max_len = 0x1000 * (1U << ((val >> 16) & 0xF)); | |
11149 | ||
11150 | if (0 == dev->hw_info.ram_size || 0 == dev->hw_info.ram_align || 0 == dev->hw_info.ram_max_len || dev->hw_info.ram_align > dev->hw_info.ram_max_len) { | |
11151 | hio_warn("%s: ram info error\n", dev->name); | |
11152 | ret = -EINVAL; | |
11153 | goto out; | |
11154 | } | |
11155 | ||
11156 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3) { | |
11157 | dev->hw_info.log_sz = SSD_LOG_MAX_SZ; | |
11158 | } else { | |
11159 | val = ssd_reg32_read(dev->ctrlp + SSD_LOG_INFO_REG); | |
11160 | dev->hw_info.log_sz = 0x1000 * (1U << (val & 0xFF)); | |
11161 | } | |
11162 | if (0 == dev->hw_info.log_sz) { | |
11163 | hio_warn("%s: log size error\n", dev->name); | |
11164 | ret = -EINVAL; | |
11165 | goto out; | |
11166 | } | |
11167 | ||
11168 | val = ssd_reg32_read(dev->ctrlp + SSD_BBT_BASE_REG); | |
11169 | dev->hw_info.bbt_base = 0x40000ull * (val & 0xFFFF); | |
11170 | dev->hw_info.bbt_size = 0x40000 * (((val >> 16) & 0xFFFF) + 1) / (dev->hw_info.max_ch * dev->hw_info.nr_chip); | |
11171 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3) { | |
11172 | if (dev->hw_info.bbt_base > dev->hw_info.ram_size || 0 == dev->hw_info.bbt_size) { | |
11173 | hio_warn("%s: bbt info error\n", dev->name); | |
11174 | ret = -EINVAL; | |
11175 | goto out; | |
11176 | } | |
11177 | } | |
11178 | ||
11179 | val = ssd_reg32_read(dev->ctrlp + SSD_ECT_BASE_REG); | |
11180 | dev->hw_info.md_base = 0x40000ull * (val & 0xFFFF); | |
11181 | if (dev->protocol_info.ver <= SSD_PROTOCOL_V3) { | |
11182 | dev->hw_info.md_size = 0x40000 * (((val >> 16) & 0xFFF) + 1) / (dev->hw_info.max_ch * dev->hw_info.nr_chip); | |
11183 | } else { | |
11184 | dev->hw_info.md_size = 0x40000 * (((val >> 16) & 0xFFF) + 1) / (dev->hw_info.nr_chip); | |
11185 | } | |
11186 | dev->hw_info.md_entry_sz = 8 * (1U << ((val >> 28) & 0xF)); | |
11187 | if (dev->protocol_info.ver >= SSD_PROTOCOL_V3) { | |
11188 | if (dev->hw_info.md_base > dev->hw_info.ram_size || 0 == dev->hw_info.md_size || | |
11189 | 0 == dev->hw_info.md_entry_sz || dev->hw_info.md_entry_sz > dev->hw_info.md_size) { | |
11190 | hio_warn("%s: md info error\n", dev->name); | |
11191 | ret = -EINVAL; | |
11192 | goto out; | |
11193 | } | |
11194 | } | |
11195 | ||
11196 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3) { | |
11197 | dev->hw_info.nand_wbuff_base = dev->hw_info.ram_size + 1; | |
11198 | } else { | |
11199 | val = ssd_reg32_read(dev->ctrlp + SSD_NAND_BUFF_BASE); | |
11200 | dev->hw_info.nand_wbuff_base = 0x8000ull * val; | |
11201 | } | |
11202 | } | |
11203 | ||
11204 | /* flash info */ | |
11205 | if (mode <= SSD_DRV_MODE_DEBUG) { | |
11206 | if (dev->hw_info.nr_ctrl > 1) { | |
11207 | val = ssd_reg32_read(dev->ctrlp + SSD_CTRL_VER_REG); | |
11208 | dev->hw_info.ctrl_ver = val & 0xFFF; | |
11209 | hio_info("%s: controller firmware version: %03X\n", dev->name, dev->hw_info.ctrl_ver); | |
11210 | } | |
11211 | ||
11212 | val64 = ssd_reg_read(dev->ctrlp + SSD_FLASH_INFO_REG0); | |
11213 | dev->hw_info.nand_vendor_id = ((val64 >> 56) & 0xFF); | |
11214 | dev->hw_info.nand_dev_id = ((val64 >> 48) & 0xFF); | |
11215 | ||
11216 | dev->hw_info.block_count = (((val64 >> 32) & 0xFFFF) + 1); | |
11217 | dev->hw_info.page_count = ((val64>>16) & 0xFFFF); | |
11218 | dev->hw_info.page_size = (val64 & 0xFFFF); | |
11219 | ||
11220 | val = ssd_reg32_read(dev->ctrlp + SSD_BB_INFO_REG); | |
11221 | dev->hw_info.bbf_pages = val & 0xFF; | |
11222 | dev->hw_info.bbf_seek = (val >> 8) & 0x1; | |
11223 | ||
11224 | if (0 == dev->hw_info.block_count || 0 == dev->hw_info.page_count || 0 == dev->hw_info.page_size || dev->hw_info.block_count > INT_MAX) { | |
11225 | hio_warn("%s: flash info error\n", dev->name); | |
11226 | ret = -EINVAL; | |
11227 | goto out; | |
11228 | } | |
11229 | ||
11230 | //xx | |
11231 | dev->hw_info.oob_size = SSD_NAND_OOB_SZ; //(dev->hw_info.page_size) >> 5; | |
11232 | ||
11233 | val = ssd_reg32_read(dev->ctrlp + SSD_VALID_PAGES_REG); | |
11234 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_2) { | |
11235 | dev->hw_info.valid_pages = val & 0x3FF; | |
11236 | dev->hw_info.max_valid_pages = (val>>20) & 0x3FF; | |
11237 | } else { | |
11238 | dev->hw_info.valid_pages = val & 0x7FFF; | |
11239 | dev->hw_info.max_valid_pages = (val>>15) & 0x7FFF; | |
11240 | } | |
11241 | if (0 == dev->hw_info.valid_pages || 0 == dev->hw_info.max_valid_pages || | |
11242 | dev->hw_info.valid_pages > dev->hw_info.max_valid_pages || dev->hw_info.max_valid_pages > dev->hw_info.page_count) { | |
11243 | hio_warn("%s: valid page info error: valid_pages %d, max_valid_pages %d\n", dev->name, dev->hw_info.valid_pages, dev->hw_info.max_valid_pages); | |
11244 | ret = -EINVAL; | |
11245 | goto out; | |
11246 | } | |
11247 | ||
11248 | val = ssd_reg32_read(dev->ctrlp + SSD_RESERVED_BLKS_REG); | |
11249 | dev->hw_info.reserved_blks = val & 0xFFFF; | |
11250 | dev->hw_info.md_reserved_blks = (val >> 16) & 0xFF; | |
11251 | if (dev->protocol_info.ver <= SSD_PROTOCOL_V3) { | |
11252 | dev->hw_info.md_reserved_blks = SSD_BBT_RESERVED; | |
11253 | } | |
11254 | if (dev->hw_info.reserved_blks > dev->hw_info.block_count || dev->hw_info.md_reserved_blks > dev->hw_info.block_count) { | |
11255 | hio_warn("%s: reserved blocks info error: reserved_blks %d, md_reserved_blks %d\n", dev->name, dev->hw_info.reserved_blks, dev->hw_info.md_reserved_blks); | |
11256 | ret = -EINVAL; | |
11257 | goto out; | |
11258 | } | |
11259 | } | |
11260 | ||
11261 | /* size */ | |
11262 | if (mode < SSD_DRV_MODE_DEBUG) { | |
11263 | dev->hw_info.size = (uint64_t)dev->hw_info.valid_pages * dev->hw_info.page_size; | |
11264 | dev->hw_info.size *= (dev->hw_info.block_count - dev->hw_info.reserved_blks); | |
11265 | dev->hw_info.size *= ((uint64_t)dev->hw_info.nr_data_ch * (uint64_t)dev->hw_info.nr_chip * (uint64_t)dev->hw_info.nr_ctrl); | |
11266 | } | |
11267 | ||
11268 | /* extend hardware info */ | |
11269 | val = ssd_reg32_read(dev->ctrlp + SSD_PCB_VER_REG); | |
11270 | dev->hw_info_ext.board_type = (val >> 24) & 0xF; | |
11271 | ||
11272 | dev->hw_info_ext.form_factor = SSD_FORM_FACTOR_FHHL; | |
11273 | if (dev->protocol_info.ver >= SSD_PROTOCOL_V3_2_1) { | |
11274 | dev->hw_info_ext.form_factor = (val >> 31) & 0x1; | |
11275 | } | |
11276 | /* | |
11277 | dev->hw_info_ext.cap_type = (val >> 28) & 0x3; | |
11278 | if (SSD_BM_CAP_VINA != dev->hw_info_ext.cap_type && SSD_BM_CAP_JH != dev->hw_info_ext.cap_type) { | |
11279 | dev->hw_info_ext.cap_type = SSD_BM_CAP_VINA; | |
11280 | }*/ | |
11281 | ||
11282 | /* power loss protect */ | |
11283 | val = ssd_reg32_read(dev->ctrlp + SSD_PLP_INFO_REG); | |
11284 | dev->hw_info_ext.plp_type = (val & 0x3); | |
11285 | if (dev->protocol_info.ver >= SSD_PROTOCOL_V3_2) { | |
11286 | /* 3 or 4 cap */ | |
11287 | dev->hw_info_ext.cap_type = ((val >> 2)& 0x1); | |
11288 | } | |
11289 | ||
11290 | /* work mode */ | |
11291 | val = ssd_reg32_read(dev->ctrlp + SSD_CH_INFO_REG); | |
11292 | dev->hw_info_ext.work_mode = (val >> 25) & 0x1; | |
11293 | ||
11294 | out: | |
11295 | /* skip error if not in standard mode */ | |
11296 | if (mode != SSD_DRV_MODE_STANDARD) { | |
11297 | ret = 0; | |
11298 | } | |
11299 | return ret; | |
11300 | } | |
11301 | ||
11302 | static void ssd_cleanup_response(struct ssd_device *dev) | |
11303 | { | |
11304 | int resp_msg_sz = dev->hw_info.resp_msg_sz * dev->hw_info.cmd_fifo_sz * SSD_MSIX_VEC; | |
11305 | int resp_ptr_sz = dev->hw_info.resp_ptr_sz * SSD_MSIX_VEC; | |
11306 | ||
11307 | pci_free_consistent(dev->pdev, resp_ptr_sz, dev->resp_ptr_base, dev->resp_ptr_base_dma); | |
11308 | pci_free_consistent(dev->pdev, resp_msg_sz, dev->resp_msg_base, dev->resp_msg_base_dma); | |
11309 | } | |
11310 | ||
11311 | static int ssd_init_response(struct ssd_device *dev) | |
11312 | { | |
11313 | int resp_msg_sz = dev->hw_info.resp_msg_sz * dev->hw_info.cmd_fifo_sz * SSD_MSIX_VEC; | |
11314 | int resp_ptr_sz = dev->hw_info.resp_ptr_sz * SSD_MSIX_VEC; | |
11315 | ||
11316 | dev->resp_msg_base = pci_alloc_consistent(dev->pdev, resp_msg_sz, &(dev->resp_msg_base_dma)); | |
11317 | if (!dev->resp_msg_base) { | |
11318 | hio_warn("%s: unable to allocate resp msg DMA buffer\n", dev->name); | |
11319 | goto out_alloc_resp_msg; | |
11320 | } | |
11321 | memset(dev->resp_msg_base, 0xFF, resp_msg_sz); | |
11322 | ||
11323 | dev->resp_ptr_base = pci_alloc_consistent(dev->pdev, resp_ptr_sz, &(dev->resp_ptr_base_dma)); | |
11324 | if (!dev->resp_ptr_base){ | |
11325 | hio_warn("%s: unable to allocate resp ptr DMA buffer\n", dev->name); | |
11326 | goto out_alloc_resp_ptr; | |
11327 | } | |
11328 | memset(dev->resp_ptr_base, 0, resp_ptr_sz); | |
11329 | dev->resp_idx = *(uint32_t *)(dev->resp_ptr_base) = dev->hw_info.cmd_fifo_sz * 2 - 1; | |
11330 | ||
11331 | ssd_reg_write(dev->ctrlp + SSD_RESP_FIFO_REG, dev->resp_msg_base_dma); | |
11332 | ssd_reg_write(dev->ctrlp + SSD_RESP_PTR_REG, dev->resp_ptr_base_dma); | |
11333 | ||
11334 | return 0; | |
11335 | ||
11336 | out_alloc_resp_ptr: | |
11337 | pci_free_consistent(dev->pdev, resp_msg_sz, dev->resp_msg_base, dev->resp_msg_base_dma); | |
11338 | out_alloc_resp_msg: | |
11339 | return -ENOMEM; | |
11340 | } | |
11341 | ||
11342 | static int ssd_cleanup_cmd(struct ssd_device *dev) | |
11343 | { | |
11344 | int msg_sz = ALIGN(sizeof(struct ssd_rw_msg) + (dev->hw_info.cmd_max_sg - 1) * sizeof(struct ssd_sg_entry), SSD_DMA_ALIGN); | |
11345 | int i; | |
11346 | ||
11347 | for (i=0; i<(int)dev->hw_info.cmd_fifo_sz; i++) { | |
11348 | kfree(dev->cmd[i].sgl); | |
11349 | } | |
11350 | kfree(dev->cmd); | |
11351 | pci_free_consistent(dev->pdev, (msg_sz * dev->hw_info.cmd_fifo_sz), dev->msg_base, dev->msg_base_dma); | |
11352 | return 0; | |
11353 | } | |
11354 | ||
11355 | static int ssd_init_cmd(struct ssd_device *dev) | |
11356 | { | |
11357 | int sgl_sz = sizeof(struct scatterlist) * dev->hw_info.cmd_max_sg; | |
11358 | int cmd_sz = sizeof(struct ssd_cmd) * dev->hw_info.cmd_fifo_sz; | |
11359 | int msg_sz = ALIGN(sizeof(struct ssd_rw_msg) + (dev->hw_info.cmd_max_sg - 1) * sizeof(struct ssd_sg_entry), SSD_DMA_ALIGN); | |
11360 | int i; | |
11361 | ||
11362 | spin_lock_init(&dev->cmd_lock); | |
11363 | ||
11364 | dev->msg_base = pci_alloc_consistent(dev->pdev, (msg_sz * dev->hw_info.cmd_fifo_sz), &dev->msg_base_dma); | |
11365 | if (!dev->msg_base) { | |
11366 | hio_warn("%s: can not alloc cmd msg\n", dev->name); | |
11367 | goto out_alloc_msg; | |
11368 | } | |
11369 | ||
11370 | dev->cmd = kmalloc(cmd_sz, GFP_KERNEL); | |
11371 | if (!dev->cmd) { | |
11372 | hio_warn("%s: can not alloc cmd\n", dev->name); | |
11373 | goto out_alloc_cmd; | |
11374 | } | |
11375 | memset(dev->cmd, 0, cmd_sz); | |
11376 | ||
11377 | for (i=0; i<(int)dev->hw_info.cmd_fifo_sz; i++) { | |
11378 | dev->cmd[i].sgl = kmalloc(sgl_sz, GFP_KERNEL); | |
11379 | if (!dev->cmd[i].sgl) { | |
11380 | hio_warn("%s: can not alloc cmd sgl %d\n", dev->name, i); | |
11381 | goto out_alloc_sgl; | |
11382 | } | |
11383 | ||
11384 | dev->cmd[i].msg = dev->msg_base + (msg_sz * i); | |
11385 | dev->cmd[i].msg_dma = dev->msg_base_dma + ((dma_addr_t)msg_sz * i); | |
11386 | ||
11387 | dev->cmd[i].dev = dev; | |
11388 | dev->cmd[i].tag = i; | |
11389 | dev->cmd[i].flag = 0; | |
11390 | ||
11391 | INIT_LIST_HEAD(&dev->cmd[i].list); | |
11392 | } | |
11393 | ||
11394 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3) { | |
11395 | dev->scmd = ssd_dispatch_cmd; | |
11396 | } else { | |
11397 | ssd_reg_write(dev->ctrlp + SSD_MSG_BASE_REG, dev->msg_base_dma); | |
11398 | if (finject) { | |
11399 | dev->scmd = ssd_send_cmd_db; | |
11400 | } else { | |
11401 | dev->scmd = ssd_send_cmd; | |
11402 | } | |
11403 | } | |
11404 | ||
11405 | return 0; | |
11406 | ||
11407 | out_alloc_sgl: | |
11408 | for (i--; i>=0; i--) { | |
11409 | kfree(dev->cmd[i].sgl); | |
11410 | } | |
11411 | kfree(dev->cmd); | |
11412 | out_alloc_cmd: | |
11413 | pci_free_consistent(dev->pdev, (msg_sz * dev->hw_info.cmd_fifo_sz), dev->msg_base, dev->msg_base_dma); | |
11414 | out_alloc_msg: | |
11415 | return -ENOMEM; | |
11416 | } | |
11417 | ||
11418 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,30)) | |
11419 | static irqreturn_t ssd_interrupt_check(int irq, void *dev_id) | |
11420 | { | |
11421 | struct ssd_queue *queue = (struct ssd_queue *)dev_id; | |
11422 | ||
11423 | if (*(uint32_t *)queue->resp_ptr == queue->resp_idx) { | |
11424 | return IRQ_NONE; | |
11425 | } | |
11426 | ||
11427 | return IRQ_WAKE_THREAD; | |
11428 | } | |
11429 | ||
11430 | static irqreturn_t ssd_interrupt_threaded(int irq, void *dev_id) | |
11431 | { | |
11432 | struct ssd_queue *queue = (struct ssd_queue *)dev_id; | |
11433 | struct ssd_device *dev = (struct ssd_device *)queue->dev; | |
11434 | struct ssd_cmd *cmd; | |
11435 | union ssd_response_msq __msg; | |
11436 | union ssd_response_msq *msg = &__msg; | |
11437 | uint64_t *u64_msg; | |
11438 | uint32_t resp_idx = queue->resp_idx; | |
11439 | uint32_t new_resp_idx = *(uint32_t *)queue->resp_ptr; | |
11440 | uint32_t end_resp_idx; | |
11441 | ||
11442 | if (unlikely(resp_idx == new_resp_idx)) { | |
11443 | return IRQ_NONE; | |
11444 | } | |
11445 | ||
11446 | end_resp_idx = new_resp_idx & queue->resp_idx_mask; | |
11447 | ||
11448 | do { | |
11449 | resp_idx = (resp_idx + 1) & queue->resp_idx_mask; | |
11450 | ||
11451 | /* the resp msg */ | |
11452 | u64_msg = (uint64_t *)(queue->resp_msg + queue->resp_msg_sz * resp_idx); | |
11453 | msg->u64_msg = *u64_msg; | |
11454 | ||
11455 | if (unlikely(msg->u64_msg == (uint64_t)(-1))) { | |
11456 | hio_err("%s: empty resp msg: queue %d idx %u\n", dev->name, queue->idx, resp_idx); | |
11457 | continue; | |
11458 | } | |
11459 | /* clear the resp msg */ | |
11460 | *u64_msg = (uint64_t)(-1); | |
11461 | ||
11462 | cmd = &queue->cmd[msg->resp_msg.tag]; | |
11463 | /*if (unlikely(!cmd->bio)) { | |
11464 | printk(KERN_WARNING "%s: unknown tag %d fun %#x\n", | |
11465 | dev->name, msg->resp_msg.tag, msg->resp_msg.fun); | |
11466 | continue; | |
11467 | }*/ | |
11468 | ||
11469 | if(unlikely(msg->resp_msg.status & (uint32_t)status_mask)) { | |
11470 | cmd->errors = -EIO; | |
11471 | } else { | |
11472 | cmd->errors = 0; | |
11473 | } | |
11474 | cmd->nr_log = msg->log_resp_msg.nr_log; | |
11475 | ||
11476 | ssd_done(cmd); | |
11477 | ||
11478 | if (unlikely(msg->resp_msg.fun != SSD_FUNC_READ_LOG && msg->resp_msg.log > 0)) { | |
11479 | (void)test_and_set_bit(SSD_LOG_HW, &dev->state); | |
11480 | if (test_bit(SSD_INIT_WORKQ, &dev->state)) { | |
11481 | queue_work(dev->workq, &dev->log_work); | |
11482 | } | |
11483 | } | |
11484 | ||
11485 | if (unlikely(msg->resp_msg.status)) { | |
11486 | if (msg->resp_msg.fun == SSD_FUNC_READ || msg->resp_msg.fun == SSD_FUNC_WRITE) { | |
11487 | hio_err("%s: I/O error %d: tag %d fun %#x\n", | |
11488 | dev->name, msg->resp_msg.status, msg->resp_msg.tag, msg->resp_msg.fun); | |
11489 | ||
11490 | /* alarm led */ | |
11491 | ssd_set_alarm(dev); | |
11492 | queue->io_stat.nr_rwerr++; | |
11493 | ssd_gen_swlog(dev, SSD_LOG_EIO, msg->u32_msg[0]); | |
11494 | } else { | |
11495 | hio_info("%s: CMD error %d: tag %d fun %#x\n", | |
11496 | dev->name, msg->resp_msg.status, msg->resp_msg.tag, msg->resp_msg.fun); | |
11497 | ||
11498 | ssd_gen_swlog(dev, SSD_LOG_ECMD, msg->u32_msg[0]); | |
11499 | } | |
11500 | queue->io_stat.nr_ioerr++; | |
11501 | } | |
11502 | ||
11503 | if (msg->resp_msg.fun == SSD_FUNC_READ || | |
11504 | msg->resp_msg.fun == SSD_FUNC_NAND_READ_WOOB || | |
11505 | msg->resp_msg.fun == SSD_FUNC_NAND_READ) { | |
11506 | ||
11507 | queue->ecc_info.bitflip[msg->resp_msg.bitflip]++; | |
11508 | } | |
11509 | }while (resp_idx != end_resp_idx); | |
11510 | ||
11511 | queue->resp_idx = new_resp_idx; | |
11512 | ||
11513 | return IRQ_HANDLED; | |
11514 | } | |
11515 | #endif | |
11516 | ||
11517 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)) | |
11518 | static irqreturn_t ssd_interrupt(int irq, void *dev_id, struct pt_regs *regs) | |
11519 | #else | |
11520 | static irqreturn_t ssd_interrupt(int irq, void *dev_id) | |
11521 | #endif | |
11522 | { | |
11523 | struct ssd_queue *queue = (struct ssd_queue *)dev_id; | |
11524 | struct ssd_device *dev = (struct ssd_device *)queue->dev; | |
11525 | struct ssd_cmd *cmd; | |
11526 | union ssd_response_msq __msg; | |
11527 | union ssd_response_msq *msg = &__msg; | |
11528 | uint64_t *u64_msg; | |
11529 | uint32_t resp_idx = queue->resp_idx; | |
11530 | uint32_t new_resp_idx = *(uint32_t *)queue->resp_ptr; | |
11531 | uint32_t end_resp_idx; | |
11532 | ||
11533 | if (unlikely(resp_idx == new_resp_idx)) { | |
11534 | return IRQ_NONE; | |
11535 | } | |
11536 | ||
11537 | #if (defined SSD_ESCAPE_IRQ) | |
11538 | if (SSD_INT_MSIX != dev->int_mode) { | |
11539 | dev->irq_cpu = smp_processor_id(); | |
11540 | } | |
11541 | #endif | |
11542 | ||
11543 | end_resp_idx = new_resp_idx & queue->resp_idx_mask; | |
11544 | ||
11545 | do { | |
11546 | resp_idx = (resp_idx + 1) & queue->resp_idx_mask; | |
11547 | ||
11548 | /* the resp msg */ | |
11549 | u64_msg = (uint64_t *)(queue->resp_msg + queue->resp_msg_sz * resp_idx); | |
11550 | msg->u64_msg = *u64_msg; | |
11551 | ||
11552 | if (unlikely(msg->u64_msg == (uint64_t)(-1))) { | |
11553 | hio_err("%s: empty resp msg: queue %d idx %u\n", dev->name, queue->idx, resp_idx); | |
11554 | continue; | |
11555 | } | |
11556 | /* clear the resp msg */ | |
11557 | *u64_msg = (uint64_t)(-1); | |
11558 | ||
11559 | cmd = &queue->cmd[msg->resp_msg.tag]; | |
11560 | /*if (unlikely(!cmd->bio)) { | |
11561 | printk(KERN_WARNING "%s: unknown tag %d fun %#x\n", | |
11562 | dev->name, msg->resp_msg.tag, msg->resp_msg.fun); | |
11563 | continue; | |
11564 | }*/ | |
11565 | ||
11566 | if(unlikely(msg->resp_msg.status & (uint32_t)status_mask)) { | |
11567 | cmd->errors = -EIO; | |
11568 | } else { | |
11569 | cmd->errors = 0; | |
11570 | } | |
11571 | cmd->nr_log = msg->log_resp_msg.nr_log; | |
11572 | ||
11573 | ssd_done_bh(cmd); | |
11574 | ||
11575 | if (unlikely(msg->resp_msg.fun != SSD_FUNC_READ_LOG && msg->resp_msg.log > 0)) { | |
11576 | (void)test_and_set_bit(SSD_LOG_HW, &dev->state); | |
11577 | if (test_bit(SSD_INIT_WORKQ, &dev->state)) { | |
11578 | queue_work(dev->workq, &dev->log_work); | |
11579 | } | |
11580 | } | |
11581 | ||
11582 | if (unlikely(msg->resp_msg.status)) { | |
11583 | if (msg->resp_msg.fun == SSD_FUNC_READ || msg->resp_msg.fun == SSD_FUNC_WRITE) { | |
11584 | hio_err("%s: I/O error %d: tag %d fun %#x\n", | |
11585 | dev->name, msg->resp_msg.status, msg->resp_msg.tag, msg->resp_msg.fun); | |
11586 | ||
11587 | /* alarm led */ | |
11588 | ssd_set_alarm(dev); | |
11589 | queue->io_stat.nr_rwerr++; | |
11590 | ssd_gen_swlog(dev, SSD_LOG_EIO, msg->u32_msg[0]); | |
11591 | } else { | |
11592 | hio_info("%s: CMD error %d: tag %d fun %#x\n", | |
11593 | dev->name, msg->resp_msg.status, msg->resp_msg.tag, msg->resp_msg.fun); | |
11594 | ||
11595 | ssd_gen_swlog(dev, SSD_LOG_ECMD, msg->u32_msg[0]); | |
11596 | } | |
11597 | queue->io_stat.nr_ioerr++; | |
11598 | } | |
11599 | ||
11600 | if (msg->resp_msg.fun == SSD_FUNC_READ || | |
11601 | msg->resp_msg.fun == SSD_FUNC_NAND_READ_WOOB || | |
11602 | msg->resp_msg.fun == SSD_FUNC_NAND_READ) { | |
11603 | ||
11604 | queue->ecc_info.bitflip[msg->resp_msg.bitflip]++; | |
11605 | } | |
11606 | }while (resp_idx != end_resp_idx); | |
11607 | ||
11608 | queue->resp_idx = new_resp_idx; | |
11609 | ||
11610 | return IRQ_HANDLED; | |
11611 | } | |
11612 | ||
11613 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)) | |
11614 | static irqreturn_t ssd_interrupt_legacy(int irq, void *dev_id, struct pt_regs *regs) | |
11615 | #else | |
11616 | static irqreturn_t ssd_interrupt_legacy(int irq, void *dev_id) | |
11617 | #endif | |
11618 | { | |
11619 | irqreturn_t ret; | |
11620 | struct ssd_queue *queue = (struct ssd_queue *)dev_id; | |
11621 | struct ssd_device *dev = (struct ssd_device *)queue->dev; | |
11622 | ||
11623 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)) | |
11624 | ret = ssd_interrupt(irq, dev_id, regs); | |
11625 | #else | |
11626 | ret = ssd_interrupt(irq, dev_id); | |
11627 | #endif | |
11628 | ||
11629 | /* clear intr */ | |
11630 | if (IRQ_HANDLED == ret) { | |
11631 | ssd_reg32_write(dev->ctrlp + SSD_CLEAR_INTR_REG, 1); | |
11632 | } | |
11633 | ||
11634 | return ret; | |
11635 | } | |
11636 | ||
11637 | static void ssd_reset_resp_ptr(struct ssd_device *dev) | |
11638 | { | |
11639 | int i; | |
11640 | ||
11641 | for (i=0; i<dev->nr_queue; i++) { | |
11642 | *(uint32_t *)dev->queue[i].resp_ptr = dev->queue[i].resp_idx = (dev->hw_info.cmd_fifo_sz * 2) - 1; | |
11643 | } | |
11644 | } | |
11645 | ||
11646 | static void ssd_free_irq(struct ssd_device *dev) | |
11647 | { | |
11648 | int i; | |
11649 | ||
11650 | #if ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) || (defined RHEL_MAJOR && RHEL_MAJOR == 6)) && (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0)) | |
11651 | if (SSD_INT_MSIX == dev->int_mode) { | |
11652 | for (i=0; i<dev->nr_queue; i++) { | |
11653 | irq_set_affinity_hint(dev->entry[i].vector, NULL); | |
11654 | } | |
11655 | } | |
11656 | #endif | |
11657 | ||
11658 | for (i=0; i<dev->nr_queue; i++) { | |
11659 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0)) | |
11660 | free_irq(dev->entry[i].vector, &dev->queue[i]); | |
11661 | #else | |
11662 | free_irq(pci_irq_vector(dev->pdev, i), &dev->queue[i]); | |
11663 | #endif | |
11664 | } | |
11665 | ||
11666 | if (SSD_INT_MSIX == dev->int_mode) { | |
11667 | pci_disable_msix(dev->pdev); | |
11668 | } else if (SSD_INT_MSI == dev->int_mode) { | |
11669 | pci_disable_msi(dev->pdev); | |
11670 | } | |
11671 | ||
11672 | } | |
11673 | ||
11674 | static int ssd_init_irq(struct ssd_device *dev) | |
11675 | { | |
11676 | #if (!defined MODULE) && (defined SSD_MSIX_AFFINITY_FORCE) && (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0)) | |
11677 | const struct cpumask *cpu_mask = NULL; | |
11678 | static int cpu_affinity = 0; | |
11679 | #endif | |
11680 | #if ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) || (defined RHEL_MAJOR && RHEL_MAJOR == 6)) && (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0)) | |
11681 | const struct cpumask *mask = NULL; | |
11682 | static int cpu = 0; | |
11683 | int j; | |
11684 | #endif | |
11685 | int i; | |
11686 | unsigned long flags = 0; | |
11687 | int ret = 0; | |
11688 | ||
11689 | ssd_reg32_write(dev->ctrlp + SSD_INTR_INTERVAL_REG, 0x800); | |
11690 | ||
11691 | #ifdef SSD_ESCAPE_IRQ | |
11692 | dev->irq_cpu = -1; | |
11693 | #endif | |
11694 | ||
11695 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0)) | |
11696 | if (int_mode >= SSD_INT_MSIX && pci_find_capability(dev->pdev, PCI_CAP_ID_MSIX)) { | |
11697 | dev->nr_queue = SSD_MSIX_VEC; | |
11698 | ||
11699 | for (i=0; i<dev->nr_queue; i++) { | |
11700 | dev->entry[i].entry = i; | |
11701 | } | |
11702 | for (;;) { | |
11703 | ret = pci_enable_msix(dev->pdev, dev->entry, dev->nr_queue); | |
11704 | if (ret == 0) { | |
11705 | break; | |
11706 | } else if (ret > 0) { | |
11707 | dev->nr_queue = ret; | |
11708 | } else { | |
11709 | hio_warn("%s: can not enable msix\n", dev->name); | |
11710 | /* alarm led */ | |
11711 | ssd_set_alarm(dev); | |
11712 | goto out; | |
11713 | } | |
11714 | } | |
11715 | ||
11716 | #if ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) || (defined RHEL_MAJOR && RHEL_MAJOR == 6)) | |
11717 | mask = (dev_to_node(&dev->pdev->dev) == -1) ? cpu_online_mask : cpumask_of_node(dev_to_node(&dev->pdev->dev)); | |
11718 | if ((0 == cpu) || (!cpumask_intersects(mask, cpumask_of(cpu)))) { | |
11719 | cpu = cpumask_first(mask); | |
11720 | } | |
11721 | for (i=0; i<dev->nr_queue; i++) { | |
11722 | irq_set_affinity_hint(dev->entry[i].vector, cpumask_of(cpu)); | |
11723 | cpu = cpumask_next(cpu, mask); | |
11724 | if (cpu >= nr_cpu_ids) { | |
11725 | cpu = cpumask_first(mask); | |
11726 | } | |
11727 | } | |
11728 | #endif | |
11729 | ||
11730 | dev->int_mode = SSD_INT_MSIX; | |
11731 | } else if (int_mode >= SSD_INT_MSI && pci_find_capability(dev->pdev, PCI_CAP_ID_MSI)) { | |
11732 | ret = pci_enable_msi(dev->pdev); | |
11733 | if (ret) { | |
11734 | hio_warn("%s: can not enable msi\n", dev->name); | |
11735 | /* alarm led */ | |
11736 | ssd_set_alarm(dev); | |
11737 | goto out; | |
11738 | } | |
11739 | ||
11740 | dev->nr_queue = 1; | |
11741 | dev->entry[0].vector = dev->pdev->irq; | |
11742 | ||
11743 | dev->int_mode = SSD_INT_MSI; | |
11744 | } else { | |
11745 | dev->nr_queue = 1; | |
11746 | dev->entry[0].vector = dev->pdev->irq; | |
11747 | ||
11748 | dev->int_mode = SSD_INT_LEGACY; | |
11749 | } | |
11750 | #else | |
11751 | if (int_mode >= SSD_INT_MSIX && pci_find_capability(dev->pdev, PCI_CAP_ID_MSIX)) { | |
11752 | dev->nr_queue = SSD_MSIX_VEC; | |
11753 | ||
11754 | dev->nr_queue = pci_alloc_irq_vectors(dev->pdev, 1, dev->nr_queue, PCI_IRQ_MSIX | PCI_IRQ_AFFINITY); | |
11755 | if (dev->nr_queue <= 0) { | |
11756 | ret = -EIO; | |
11757 | hio_warn("%s: can not enable msix\n", dev->name); | |
11758 | ssd_set_alarm(dev); | |
11759 | goto out; | |
11760 | } | |
11761 | ||
11762 | dev->int_mode = SSD_INT_MSIX; | |
11763 | } else if (int_mode >= SSD_INT_MSI && pci_find_capability(dev->pdev, PCI_CAP_ID_MSI)) { | |
11764 | ||
11765 | ret = pci_alloc_irq_vectors(dev->pdev, 1, 1, PCI_IRQ_MSI | PCI_IRQ_AFFINITY); | |
11766 | if (ret <= 0) { | |
11767 | ret = -EIO; | |
11768 | hio_warn("%s: can not enable msi\n", dev->name); | |
11769 | /* alarm led */ | |
11770 | ssd_set_alarm(dev); | |
11771 | goto out; | |
11772 | } | |
11773 | dev->nr_queue = 1; | |
11774 | ||
11775 | dev->int_mode = SSD_INT_MSI; | |
11776 | } else { | |
11777 | ret = pci_alloc_irq_vectors(dev->pdev, 1, 1, PCI_IRQ_LEGACY); | |
11778 | ||
11779 | if (ret <= 0) { | |
11780 | ret = -EIO; | |
11781 | hio_warn("%s: can not enable msi\n", dev->name); | |
11782 | /* alarm led */ | |
11783 | ssd_set_alarm(dev); | |
11784 | goto out; | |
11785 | } | |
11786 | dev->nr_queue = 1; | |
11787 | ||
11788 | dev->int_mode = SSD_INT_LEGACY; | |
11789 | } | |
11790 | #endif | |
11791 | ||
11792 | for (i=0; i<dev->nr_queue; i++) { | |
11793 | if (dev->nr_queue > 1) { | |
11794 | snprintf(dev->queue[i].name, SSD_QUEUE_NAME_LEN, "%s_e100-%d", dev->name, i); | |
11795 | } else { | |
11796 | snprintf(dev->queue[i].name, SSD_QUEUE_NAME_LEN, "%s_e100", dev->name); | |
11797 | } | |
11798 | ||
11799 | dev->queue[i].dev = dev; | |
11800 | dev->queue[i].idx = i; | |
11801 | ||
11802 | dev->queue[i].resp_idx = (dev->hw_info.cmd_fifo_sz * 2) - 1; | |
11803 | dev->queue[i].resp_idx_mask = dev->hw_info.cmd_fifo_sz - 1; | |
11804 | ||
11805 | dev->queue[i].resp_msg_sz = dev->hw_info.resp_msg_sz; | |
11806 | dev->queue[i].resp_msg = dev->resp_msg_base + dev->hw_info.resp_msg_sz * dev->hw_info.cmd_fifo_sz * i; | |
11807 | dev->queue[i].resp_ptr = dev->resp_ptr_base + dev->hw_info.resp_ptr_sz * i; | |
11808 | *(uint32_t *)dev->queue[i].resp_ptr = dev->queue[i].resp_idx; | |
11809 | ||
11810 | dev->queue[i].cmd = dev->cmd; | |
11811 | } | |
11812 | ||
11813 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)) | |
11814 | flags = IRQF_SHARED; | |
11815 | #else | |
11816 | flags = SA_SHIRQ; | |
11817 | #endif | |
11818 | ||
11819 | for (i=0; i<dev->nr_queue; i++) { | |
11820 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30)) | |
11821 | if (dev->int_mode == SSD_INT_LEGACY) { | |
11822 | ret = request_irq(dev->entry[i].vector, &ssd_interrupt_legacy, flags, dev->queue[i].name, &dev->queue[i]); | |
11823 | } else { | |
11824 | ret = request_irq(dev->entry[i].vector, &ssd_interrupt, flags, dev->queue[i].name, &dev->queue[i]); | |
11825 | } | |
11826 | #elif (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0)) | |
11827 | if (threaded_irq) { | |
11828 | ret = request_threaded_irq(dev->entry[i].vector, ssd_interrupt_check, ssd_interrupt_threaded, flags, dev->queue[i].name, &dev->queue[i]); | |
11829 | } else if (dev->int_mode == SSD_INT_LEGACY) { | |
11830 | ret = request_irq(dev->entry[i].vector, &ssd_interrupt_legacy, flags, dev->queue[i].name, &dev->queue[i]); | |
11831 | } else { | |
11832 | ret = request_irq(dev->entry[i].vector, &ssd_interrupt, flags, dev->queue[i].name, &dev->queue[i]); | |
11833 | } | |
11834 | #else | |
11835 | if (threaded_irq) { | |
11836 | ret = request_threaded_irq(pci_irq_vector(dev->pdev, i), ssd_interrupt_check, ssd_interrupt_threaded, flags, dev->queue[i].name, &dev->queue[i]); | |
11837 | } else if (dev->int_mode == SSD_INT_LEGACY) { | |
11838 | ret = request_irq(pci_irq_vector(dev->pdev, i), &ssd_interrupt_legacy, flags, dev->queue[i].name, &dev->queue[i]); | |
11839 | } else { | |
11840 | ret = request_irq(pci_irq_vector(dev->pdev, i), &ssd_interrupt, flags, dev->queue[i].name, &dev->queue[i]); | |
11841 | } | |
11842 | #endif | |
11843 | if (ret) { | |
11844 | hio_warn("%s: request irq failed\n", dev->name); | |
11845 | /* alarm led */ | |
11846 | ssd_set_alarm(dev); | |
11847 | goto out_request_irq; | |
11848 | } | |
11849 | ||
11850 | #if (!defined MODULE) && (defined SSD_MSIX_AFFINITY_FORCE) && (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0)) | |
11851 | cpu_mask = (dev_to_node(&dev->pdev->dev) == -1) ? cpu_online_mask : cpumask_of_node(dev_to_node(&dev->pdev->dev)); | |
11852 | if (SSD_INT_MSIX == dev->int_mode) { | |
11853 | if ((0 == cpu_affinity) || (!cpumask_intersects(mask, cpumask_of(cpu_affinity)))) { | |
11854 | cpu_affinity = cpumask_first(cpu_mask); | |
11855 | } | |
11856 | ||
11857 | irq_set_affinity(dev->entry[i].vector, cpumask_of(cpu_affinity)); | |
11858 | cpu_affinity = cpumask_next(cpu_affinity, cpu_mask); | |
11859 | if (cpu_affinity >= nr_cpu_ids) { | |
11860 | cpu_affinity = cpumask_first(cpu_mask); | |
11861 | } | |
11862 | } | |
11863 | #endif | |
11864 | } | |
11865 | ||
11866 | return ret; | |
11867 | ||
11868 | out_request_irq: | |
11869 | #if ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) || (defined RHEL_MAJOR && RHEL_MAJOR == 6)) && (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0)) | |
11870 | if (SSD_INT_MSIX == dev->int_mode) { | |
11871 | for (j=0; j<dev->nr_queue; j++) { | |
11872 | irq_set_affinity_hint(dev->entry[j].vector, NULL); | |
11873 | } | |
11874 | } | |
11875 | #endif | |
11876 | ||
11877 | for (i--; i>=0; i--) { | |
11878 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0)) | |
11879 | free_irq(dev->entry[i].vector, &dev->queue[i]); | |
11880 | #else | |
11881 | free_irq(pci_irq_vector(dev->pdev, i), &dev->queue[i]); | |
11882 | #endif | |
11883 | } | |
11884 | ||
11885 | if (SSD_INT_MSIX == dev->int_mode) { | |
11886 | pci_disable_msix(dev->pdev); | |
11887 | } else if (SSD_INT_MSI == dev->int_mode) { | |
11888 | pci_disable_msi(dev->pdev); | |
11889 | } | |
11890 | ||
11891 | out: | |
11892 | return ret; | |
11893 | } | |
11894 | ||
11895 | static void ssd_initial_log(struct ssd_device *dev) | |
11896 | { | |
11897 | uint32_t val; | |
11898 | uint32_t speed, width; | |
11899 | ||
11900 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_2) { | |
11901 | return; | |
11902 | } | |
11903 | ||
11904 | val = ssd_reg32_read(dev->ctrlp + SSD_POWER_ON_REG); | |
11905 | if (val) { | |
11906 | // Poweron detection switched to SSD_INTR_INTERVAL_REG in 'ssd_init_smart' | |
11907 | //ssd_gen_swlog(dev, SSD_LOG_POWER_ON, dev->hw_info.bridge_ver); | |
11908 | } | |
11909 | ||
11910 | val = ssd_reg32_read(dev->ctrlp + SSD_PCIE_LINKSTATUS_REG); | |
11911 | speed = val & 0xF; | |
11912 | width = (val >> 4)& 0x3F; | |
11913 | if (0x1 == speed) { | |
11914 | hio_info("%s: PCIe: 2.5GT/s, x%u\n", dev->name, width); | |
11915 | } else if (0x2 == speed) { | |
11916 | hio_info("%s: PCIe: 5GT/s, x%u\n", dev->name, width); | |
11917 | } else { | |
11918 | hio_info("%s: PCIe: unknown GT/s, x%u\n", dev->name, width); | |
11919 | } | |
11920 | ssd_gen_swlog(dev, SSD_LOG_PCIE_LINK_STATUS, val); | |
11921 | ||
11922 | return; | |
11923 | } | |
11924 | ||
11925 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)) | |
11926 | static void ssd_hwmon_worker(void *data) | |
11927 | { | |
11928 | struct ssd_device *dev = (struct ssd_device *)data; | |
11929 | #else | |
11930 | static void ssd_hwmon_worker(struct work_struct *work) | |
11931 | { | |
11932 | struct ssd_device *dev = container_of(work, struct ssd_device, hwmon_work); | |
11933 | #endif | |
11934 | ||
11935 | if (ssd_check_hw(dev)) { | |
11936 | //hio_err("%s: check hardware failed\n", dev->name); | |
11937 | return; | |
11938 | } | |
11939 | ||
11940 | ssd_check_clock(dev); | |
11941 | ssd_check_volt(dev); | |
11942 | ||
11943 | ssd_mon_boardvolt(dev); | |
11944 | } | |
11945 | ||
11946 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)) | |
11947 | static void ssd_tempmon_worker(void *data) | |
11948 | { | |
11949 | struct ssd_device *dev = (struct ssd_device *)data; | |
11950 | #else | |
11951 | static void ssd_tempmon_worker(struct work_struct *work) | |
11952 | { | |
11953 | struct ssd_device *dev = container_of(work, struct ssd_device, tempmon_work); | |
11954 | #endif | |
11955 | ||
11956 | if (ssd_check_hw(dev)) { | |
11957 | //hio_err("%s: check hardware failed\n", dev->name); | |
11958 | return; | |
11959 | } | |
11960 | ||
11961 | ssd_mon_temp(dev); | |
11962 | } | |
11963 | ||
11964 | ||
11965 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)) | |
11966 | static void ssd_capmon_worker(void *data) | |
11967 | { | |
11968 | struct ssd_device *dev = (struct ssd_device *)data; | |
11969 | #else | |
11970 | static void ssd_capmon_worker(struct work_struct *work) | |
11971 | { | |
11972 | struct ssd_device *dev = container_of(work, struct ssd_device, capmon_work); | |
11973 | #endif | |
11974 | uint32_t cap = 0; | |
11975 | uint32_t cap_threshold = SSD_PL_CAP_THRESHOLD; | |
11976 | int ret = 0; | |
11977 | ||
11978 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_2) { | |
11979 | return; | |
11980 | } | |
11981 | ||
11982 | if (dev->hw_info_ext.form_factor == SSD_FORM_FACTOR_FHHL && dev->hw_info.pcb_ver < 'B') { | |
11983 | return; | |
11984 | } | |
11985 | ||
11986 | /* fault before? */ | |
11987 | if (test_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP), &dev->hwmon)) { | |
11988 | ret = ssd_check_pl_cap_fast(dev); | |
11989 | if (ret) { | |
11990 | return; | |
11991 | } | |
11992 | } | |
11993 | ||
11994 | /* learn */ | |
11995 | ret = ssd_do_cap_learn(dev, &cap); | |
11996 | if (ret) { | |
11997 | hio_err("%s: cap learn failed\n", dev->name); | |
11998 | ssd_gen_swlog(dev, SSD_LOG_CAP_LEARN_FAULT, 0); | |
11999 | return; | |
12000 | } | |
12001 | ||
12002 | ssd_gen_swlog(dev, SSD_LOG_CAP_STATUS, cap); | |
12003 | ||
12004 | if (SSD_PL_CAP_CP == dev->hw_info_ext.cap_type) { | |
12005 | cap_threshold = SSD_PL_CAP_CP_THRESHOLD; | |
12006 | } | |
12007 | ||
12008 | //use the fw event id? | |
12009 | if (cap < cap_threshold) { | |
12010 | if (!test_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP), &dev->hwmon)) { | |
12011 | ssd_gen_swlog(dev, SSD_LOG_BATTERY_FAULT, 0); | |
12012 | } | |
12013 | } else if (cap >= (cap_threshold + SSD_PL_CAP_THRESHOLD_HYST)) { | |
12014 | if (test_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP), &dev->hwmon)) { | |
12015 | ssd_gen_swlog(dev, SSD_LOG_BATTERY_OK, 0); | |
12016 | } | |
12017 | } | |
12018 | } | |
12019 | ||
12020 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(4,15,0)) | |
12021 | static void ssd_routine_start(void *data) | |
12022 | #else | |
12023 | static void ssd_routine_start(struct timer_list *t) | |
12024 | #endif | |
12025 | { | |
12026 | struct ssd_device *dev; | |
12027 | ||
12028 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(4,15,0)) | |
12029 | if (!data) { | |
12030 | return; | |
12031 | } | |
12032 | dev = data; | |
12033 | #else | |
12034 | dev = from_timer(dev, t, routine_timer); | |
12035 | #endif | |
12036 | ||
12037 | dev->routine_tick++; | |
12038 | ||
12039 | if (test_bit(SSD_INIT_WORKQ, &dev->state) && !ssd_busy(dev)) { | |
12040 | (void)test_and_set_bit(SSD_LOG_HW, &dev->state); | |
12041 | queue_work(dev->workq, &dev->log_work); | |
12042 | } | |
12043 | ||
12044 | if ((dev->routine_tick % SSD_HWMON_ROUTINE_TICK) == 0 && test_bit(SSD_INIT_WORKQ, &dev->state)) { | |
12045 | queue_work(dev->workq, &dev->hwmon_work); | |
12046 | } | |
12047 | ||
12048 | if ((dev->routine_tick % SSD_CAPMON_ROUTINE_TICK) == 0 && test_bit(SSD_INIT_WORKQ, &dev->state)) { | |
12049 | queue_work(dev->workq, &dev->capmon_work); | |
12050 | } | |
12051 | ||
12052 | if ((dev->routine_tick % SSD_CAPMON2_ROUTINE_TICK) == 0 && test_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP), &dev->hwmon) && test_bit(SSD_INIT_WORKQ, &dev->state)) { | |
12053 | /* CAP fault? check again */ | |
12054 | queue_work(dev->workq, &dev->capmon_work); | |
12055 | } | |
12056 | ||
12057 | if (test_bit(SSD_INIT_WORKQ, &dev->state)) { | |
12058 | queue_work(dev->workq, &dev->tempmon_work); | |
12059 | } | |
12060 | ||
12061 | /* schedule routine */ | |
12062 | mod_timer(&dev->routine_timer, jiffies + msecs_to_jiffies(SSD_ROUTINE_INTERVAL)); | |
12063 | } | |
12064 | ||
12065 | static void ssd_cleanup_routine(struct ssd_device *dev) | |
12066 | { | |
12067 | if (unlikely(mode != SSD_DRV_MODE_STANDARD)) | |
12068 | return; | |
12069 | ||
12070 | (void)ssd_del_timer(&dev->routine_timer); | |
12071 | ||
12072 | (void)ssd_del_timer(&dev->bm_timer); | |
12073 | } | |
12074 | ||
12075 | static int ssd_init_routine(struct ssd_device *dev) | |
12076 | { | |
12077 | if (unlikely(mode != SSD_DRV_MODE_STANDARD)) | |
12078 | return 0; | |
12079 | ||
12080 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)) | |
12081 | INIT_WORK(&dev->bm_work, ssd_bm_worker, dev); | |
12082 | INIT_WORK(&dev->hwmon_work, ssd_hwmon_worker, dev); | |
12083 | INIT_WORK(&dev->capmon_work, ssd_capmon_worker, dev); | |
12084 | INIT_WORK(&dev->tempmon_work, ssd_tempmon_worker, dev); | |
12085 | #else | |
12086 | INIT_WORK(&dev->bm_work, ssd_bm_worker); | |
12087 | INIT_WORK(&dev->hwmon_work, ssd_hwmon_worker); | |
12088 | INIT_WORK(&dev->capmon_work, ssd_capmon_worker); | |
12089 | INIT_WORK(&dev->tempmon_work, ssd_tempmon_worker); | |
12090 | #endif | |
12091 | ||
12092 | /* initial log */ | |
12093 | ssd_initial_log(dev); | |
12094 | ||
12095 | /* schedule bm routine */ | |
12096 | ssd_add_timer(&dev->bm_timer, msecs_to_jiffies(SSD_BM_CAP_LEARNING_DELAY), ssd_bm_routine_start, dev); | |
12097 | ||
12098 | /* schedule routine */ | |
12099 | ssd_add_timer(&dev->routine_timer, msecs_to_jiffies(SSD_ROUTINE_INTERVAL), ssd_routine_start, dev); | |
12100 | ||
12101 | return 0; | |
12102 | } | |
12103 | ||
12104 | static void | |
12105 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38)) | |
12106 | __devexit | |
12107 | #endif | |
12108 | ssd_remove_one (struct pci_dev *pdev) | |
12109 | { | |
12110 | struct ssd_device *dev; | |
12111 | ||
12112 | if (!pdev) { | |
12113 | return; | |
12114 | } | |
12115 | ||
12116 | dev = pci_get_drvdata(pdev); | |
12117 | if (!dev) { | |
12118 | return; | |
12119 | } | |
12120 | ||
12121 | list_del_init(&dev->list); | |
12122 | ||
12123 | ssd_unregister_sysfs(dev); | |
12124 | ||
12125 | /* offline firstly */ | |
12126 | test_and_clear_bit(SSD_ONLINE, &dev->state); | |
12127 | ||
12128 | /* clean work queue first */ | |
12129 | if (!dev->slave) { | |
12130 | test_and_clear_bit(SSD_INIT_WORKQ, &dev->state); | |
12131 | ssd_cleanup_workq(dev); | |
12132 | } | |
12133 | ||
12134 | /* flush cache */ | |
12135 | (void)ssd_flush(dev); | |
12136 | (void)ssd_save_md(dev); | |
12137 | ||
12138 | /* save smart */ | |
12139 | if (!dev->slave) { | |
12140 | ssd_save_smart(dev); | |
12141 | } | |
12142 | ||
12143 | if (test_and_clear_bit(SSD_INIT_BD, &dev->state)) { | |
12144 | ssd_cleanup_blkdev(dev); | |
12145 | } | |
12146 | ||
12147 | if (!dev->slave) { | |
12148 | ssd_cleanup_chardev(dev); | |
12149 | } | |
12150 | ||
12151 | /* clean routine */ | |
12152 | if (!dev->slave) { | |
12153 | ssd_cleanup_routine(dev); | |
12154 | } | |
12155 | ||
12156 | ssd_cleanup_queue(dev); | |
12157 | ||
12158 | ssd_cleanup_tag(dev); | |
12159 | ssd_cleanup_thread(dev); | |
12160 | ||
12161 | ssd_free_irq(dev); | |
12162 | ||
12163 | ssd_cleanup_dcmd(dev); | |
12164 | ssd_cleanup_cmd(dev); | |
12165 | ssd_cleanup_response(dev); | |
12166 | ||
12167 | if (!dev->slave) { | |
12168 | ssd_cleanup_log(dev); | |
12169 | } | |
12170 | ||
12171 | if (dev->reload_fw) { //reload fw | |
12172 | dev->has_non_0x98_reg_access = 1; | |
12173 | ssd_reg32_write(dev->ctrlp + SSD_RELOAD_FW_REG, SSD_RELOAD_FW); | |
12174 | } | |
12175 | ||
12176 | /* unmap physical adress */ | |
12177 | #ifdef LINUX_SUSE_OS | |
12178 | iounmap(dev->ctrlp); | |
12179 | #else | |
12180 | pci_iounmap(pdev, dev->ctrlp); | |
12181 | #endif | |
12182 | ||
12183 | release_mem_region(dev->mmio_base, dev->mmio_len); | |
12184 | ||
12185 | pci_disable_device(pdev); | |
12186 | ||
12187 | pci_set_drvdata(pdev, NULL); | |
12188 | ||
12189 | ssd_put(dev); | |
12190 | } | |
12191 | ||
12192 | static int | |
12193 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38)) | |
12194 | __devinit | |
12195 | #endif | |
12196 | ssd_init_one(struct pci_dev *pdev, | |
12197 | const struct pci_device_id *ent) | |
12198 | { | |
12199 | struct ssd_device *dev; | |
12200 | int ret = 0; | |
12201 | ||
12202 | if (!pdev || !ent) { | |
12203 | ret = -EINVAL; | |
12204 | goto out; | |
12205 | } | |
12206 | ||
12207 | dev = kmalloc(sizeof(struct ssd_device), GFP_KERNEL); | |
12208 | if (!dev) { | |
12209 | ret = -ENOMEM; | |
12210 | goto out_alloc_dev; | |
12211 | } | |
12212 | memset(dev, 0, sizeof(struct ssd_device)); | |
12213 | ||
12214 | dev->owner = THIS_MODULE; | |
12215 | ||
12216 | if (SSD_SLAVE_PORT_DEVID == ent->device) { | |
12217 | dev->slave = 1; | |
12218 | } | |
12219 | ||
12220 | dev->idx = ssd_get_index(dev->slave); | |
12221 | if (dev->idx < 0) { | |
12222 | ret = -ENOMEM; | |
12223 | goto out_get_index; | |
12224 | } | |
12225 | ||
12226 | if (!dev->slave) { | |
12227 | snprintf(dev->name, SSD_DEV_NAME_LEN, SSD_DEV_NAME); | |
12228 | ssd_set_dev_name(&dev->name[strlen(SSD_DEV_NAME)], SSD_DEV_NAME_LEN-strlen(SSD_DEV_NAME), dev->idx); | |
12229 | ||
12230 | dev->major = ssd_major; | |
12231 | dev->cmajor = ssd_cmajor; | |
12232 | } else { | |
12233 | snprintf(dev->name, SSD_DEV_NAME_LEN, SSD_SDEV_NAME); | |
12234 | ssd_set_dev_name(&dev->name[strlen(SSD_SDEV_NAME)], SSD_DEV_NAME_LEN-strlen(SSD_SDEV_NAME), dev->idx); | |
12235 | dev->major = ssd_major_sl; | |
12236 | dev->cmajor = 0; | |
12237 | } | |
12238 | ||
12239 | dev->reset_time = (uint64_t)ktime_get_real_seconds(); | |
12240 | ||
12241 | atomic_set(&(dev->refcnt), 0); | |
12242 | atomic_set(&(dev->tocnt), 0); | |
12243 | ||
12244 | mutex_init(&dev->fw_mutex); | |
12245 | ||
12246 | //xx | |
12247 | mutex_init(&dev->gd_mutex); | |
12248 | dev->has_non_0x98_reg_access = 0; | |
12249 | ||
12250 | //init in_flight lock | |
12251 | spin_lock_init(&dev->in_flight_lock); | |
12252 | ||
12253 | dev->pdev = pdev; | |
12254 | pci_set_drvdata(pdev, dev); | |
12255 | ||
12256 | kref_init(&dev->kref); | |
12257 | ||
12258 | ret = pci_enable_device(pdev); | |
12259 | if (ret) { | |
12260 | hio_warn("%s: can not enable device\n", dev->name); | |
12261 | goto out_enable_device; | |
12262 | } | |
12263 | ||
12264 | pci_set_master(pdev); | |
12265 | ||
12266 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31)) | |
12267 | ret = pci_set_dma_mask(pdev, DMA_64BIT_MASK); | |
12268 | #else | |
12269 | ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); | |
12270 | #endif | |
12271 | if (ret) { | |
12272 | hio_warn("%s: set dma mask: failed\n", dev->name); | |
12273 | goto out_set_dma_mask; | |
12274 | } | |
12275 | ||
12276 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31)) | |
12277 | ret = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK); | |
12278 | #else | |
12279 | ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); | |
12280 | #endif | |
12281 | if (ret) { | |
12282 | hio_warn("%s: set consistent dma mask: failed\n", dev->name); | |
12283 | goto out_set_dma_mask; | |
12284 | } | |
12285 | ||
12286 | dev->mmio_base = pci_resource_start(pdev, 0); | |
12287 | dev->mmio_len = pci_resource_len(pdev, 0); | |
12288 | ||
12289 | if (!request_mem_region(dev->mmio_base, dev->mmio_len, SSD_DEV_NAME)) { | |
12290 | hio_warn("%s: can not reserve MMIO region 0\n", dev->name); | |
12291 | ret = -EBUSY; | |
12292 | goto out_request_mem_region; | |
12293 | } | |
12294 | ||
12295 | /* 2.6.9 kernel bug */ | |
12296 | dev->ctrlp = pci_iomap(pdev, 0, 0); | |
12297 | if (!dev->ctrlp) { | |
12298 | hio_warn("%s: can not remap IO region 0\n", dev->name); | |
12299 | ret = -ENOMEM; | |
12300 | goto out_pci_iomap; | |
12301 | } | |
12302 | ||
12303 | ret = ssd_check_hw(dev); | |
12304 | if (ret) { | |
12305 | hio_err("%s: check hardware failed\n", dev->name); | |
12306 | goto out_check_hw; | |
12307 | } | |
12308 | ||
12309 | ret = ssd_init_protocol_info(dev); | |
12310 | if (ret) { | |
12311 | hio_err("%s: init protocol info failed\n", dev->name); | |
12312 | goto out_init_protocol_info; | |
12313 | } | |
12314 | ||
12315 | /* alarm led ? */ | |
12316 | ssd_clear_alarm(dev); | |
12317 | ||
12318 | ret = ssd_init_fw_info(dev); | |
12319 | if (ret) { | |
12320 | hio_err("%s: init firmware info failed\n", dev->name); | |
12321 | /* alarm led */ | |
12322 | ssd_set_alarm(dev); | |
12323 | goto out_init_fw_info; | |
12324 | } | |
12325 | ||
12326 | /* slave port ? */ | |
12327 | if (dev->slave) { | |
12328 | goto init_next1; | |
12329 | } | |
12330 | ||
12331 | ret = ssd_init_rom_info(dev); | |
12332 | if (ret) { | |
12333 | hio_err("%s: init rom info failed\n", dev->name); | |
12334 | /* alarm led */ | |
12335 | ssd_set_alarm(dev); | |
12336 | goto out_init_rom_info; | |
12337 | } | |
12338 | ||
12339 | ret = ssd_init_label(dev); | |
12340 | if (ret) { | |
12341 | hio_err("%s: init label failed\n", dev->name); | |
12342 | /* alarm led */ | |
12343 | ssd_set_alarm(dev); | |
12344 | goto out_init_label; | |
12345 | } | |
12346 | ||
12347 | ret = ssd_init_workq(dev); | |
12348 | if (ret) { | |
12349 | hio_warn("%s: init workq failed\n", dev->name); | |
12350 | goto out_init_workq; | |
12351 | } | |
12352 | (void)test_and_set_bit(SSD_INIT_WORKQ, &dev->state); | |
12353 | ||
12354 | ret = ssd_init_log(dev); | |
12355 | if (ret) { | |
12356 | hio_err("%s: init log failed\n", dev->name); | |
12357 | /* alarm led */ | |
12358 | ssd_set_alarm(dev); | |
12359 | goto out_init_log; | |
12360 | } | |
12361 | ||
12362 | ret = ssd_init_smart(dev); | |
12363 | if (ret) { | |
12364 | hio_err("%s: init info failed\n", dev->name); | |
12365 | /* alarm led */ | |
12366 | ssd_set_alarm(dev); | |
12367 | goto out_init_smart; | |
12368 | } | |
12369 | ||
12370 | init_next1: | |
12371 | ret = ssd_init_hw_info(dev); | |
12372 | if (ret) { | |
12373 | hio_err("%s: init hardware info failed\n", dev->name); | |
12374 | /* alarm led */ | |
12375 | ssd_set_alarm(dev); | |
12376 | goto out_init_hw_info; | |
12377 | } | |
12378 | ||
12379 | /* slave port ? */ | |
12380 | if (dev->slave) { | |
12381 | goto init_next2; | |
12382 | } | |
12383 | ||
12384 | ret = ssd_init_sensor(dev); | |
12385 | if (ret) { | |
12386 | hio_err("%s: init sensor failed\n", dev->name); | |
12387 | /* alarm led */ | |
12388 | ssd_set_alarm(dev); | |
12389 | goto out_init_sensor; | |
12390 | } | |
12391 | ||
12392 | ret = ssd_init_pl_cap(dev); | |
12393 | if (ret) { | |
12394 | hio_err("%s: int pl_cap failed\n", dev->name); | |
12395 | /* alarm led */ | |
12396 | ssd_set_alarm(dev); | |
12397 | goto out_init_pl_cap; | |
12398 | } | |
12399 | ||
12400 | init_next2: | |
12401 | ret = ssd_check_init_state(dev); | |
12402 | if (ret) { | |
12403 | hio_err("%s: check init state failed\n", dev->name); | |
12404 | /* alarm led */ | |
12405 | ssd_set_alarm(dev); | |
12406 | goto out_check_init_state; | |
12407 | } | |
12408 | ||
12409 | ret = ssd_init_response(dev); | |
12410 | if (ret) { | |
12411 | hio_warn("%s: init resp_msg failed\n", dev->name); | |
12412 | goto out_init_response; | |
12413 | } | |
12414 | ||
12415 | ret = ssd_init_cmd(dev); | |
12416 | if (ret) { | |
12417 | hio_warn("%s: init msg failed\n", dev->name); | |
12418 | goto out_init_cmd; | |
12419 | } | |
12420 | ||
12421 | ret = ssd_init_dcmd(dev); | |
12422 | if (ret) { | |
12423 | hio_warn("%s: init cmd failed\n", dev->name); | |
12424 | goto out_init_dcmd; | |
12425 | } | |
12426 | ||
12427 | ret = ssd_init_irq(dev); | |
12428 | if (ret) { | |
12429 | hio_warn("%s: init irq failed\n", dev->name); | |
12430 | goto out_init_irq; | |
12431 | } | |
12432 | ||
12433 | ret = ssd_init_thread(dev); | |
12434 | if (ret) { | |
12435 | hio_warn("%s: init thread failed\n", dev->name); | |
12436 | goto out_init_thread; | |
12437 | } | |
12438 | ||
12439 | ret = ssd_init_tag(dev); | |
12440 | if(ret) { | |
12441 | hio_warn("%s: init tags failed\n", dev->name); | |
12442 | goto out_init_tags; | |
12443 | } | |
12444 | ||
12445 | /* */ | |
12446 | (void)test_and_set_bit(SSD_ONLINE, &dev->state); | |
12447 | ||
12448 | ret = ssd_init_queue(dev); | |
12449 | if (ret) { | |
12450 | hio_warn("%s: init queue failed\n", dev->name); | |
12451 | goto out_init_queue; | |
12452 | } | |
12453 | ||
12454 | /* slave port ? */ | |
12455 | if (dev->slave) { | |
12456 | goto init_next3; | |
12457 | } | |
12458 | ||
12459 | ret = ssd_init_ot_protect(dev); | |
12460 | if (ret) { | |
12461 | hio_err("%s: int ot_protect failed\n", dev->name); | |
12462 | /* alarm led */ | |
12463 | ssd_set_alarm(dev); | |
12464 | goto out_int_ot_protect; | |
12465 | } | |
12466 | ||
12467 | ret = ssd_init_wmode(dev); | |
12468 | if (ret) { | |
12469 | hio_warn("%s: init write mode\n", dev->name); | |
12470 | goto out_init_wmode; | |
12471 | } | |
12472 | ||
12473 | /* init routine after hw is ready */ | |
12474 | ret = ssd_init_routine(dev); | |
12475 | if (ret) { | |
12476 | hio_warn("%s: init routine\n", dev->name); | |
12477 | goto out_init_routine; | |
12478 | } | |
12479 | ||
12480 | ret = ssd_init_chardev(dev); | |
12481 | if (ret) { | |
12482 | hio_warn("%s: register char device failed\n", dev->name); | |
12483 | goto out_init_chardev; | |
12484 | } | |
12485 | ||
12486 | init_next3: | |
12487 | ret = ssd_init_blkdev(dev); | |
12488 | if (ret) { | |
12489 | hio_warn("%s: register block device failed\n", dev->name); | |
12490 | goto out_init_blkdev; | |
12491 | } | |
12492 | (void)test_and_set_bit(SSD_INIT_BD, &dev->state); | |
12493 | ||
12494 | ret = ssd_register_sysfs(dev); | |
12495 | if (ret) { | |
12496 | hio_warn("%s: register sysfs failed\n", dev->name); | |
12497 | goto out_register_sysfs; | |
12498 | } | |
12499 | ||
12500 | dev->save_md = 1; | |
12501 | ||
12502 | list_add_tail(&dev->list, &ssd_list); | |
12503 | ||
12504 | return 0; | |
12505 | ||
12506 | out_register_sysfs: | |
12507 | test_and_clear_bit(SSD_INIT_BD, &dev->state); | |
12508 | ssd_cleanup_blkdev(dev); | |
12509 | out_init_blkdev: | |
12510 | /* slave port ? */ | |
12511 | if (!dev->slave) { | |
12512 | ssd_cleanup_chardev(dev); | |
12513 | } | |
12514 | out_init_chardev: | |
12515 | /* slave port ? */ | |
12516 | if (!dev->slave) { | |
12517 | ssd_cleanup_routine(dev); | |
12518 | } | |
12519 | out_init_routine: | |
12520 | out_init_wmode: | |
12521 | out_int_ot_protect: | |
12522 | ssd_cleanup_queue(dev); | |
12523 | out_init_queue: | |
12524 | test_and_clear_bit(SSD_ONLINE, &dev->state); | |
12525 | ssd_cleanup_tag(dev); | |
12526 | out_init_tags: | |
12527 | ssd_cleanup_thread(dev); | |
12528 | out_init_thread: | |
12529 | ssd_free_irq(dev); | |
12530 | out_init_irq: | |
12531 | ssd_cleanup_dcmd(dev); | |
12532 | out_init_dcmd: | |
12533 | ssd_cleanup_cmd(dev); | |
12534 | out_init_cmd: | |
12535 | ssd_cleanup_response(dev); | |
12536 | out_init_response: | |
12537 | out_check_init_state: | |
12538 | out_init_pl_cap: | |
12539 | out_init_sensor: | |
12540 | out_init_hw_info: | |
12541 | out_init_smart: | |
12542 | /* slave port ? */ | |
12543 | if (!dev->slave) { | |
12544 | ssd_cleanup_log(dev); | |
12545 | } | |
12546 | out_init_log: | |
12547 | /* slave port ? */ | |
12548 | if (!dev->slave) { | |
12549 | test_and_clear_bit(SSD_INIT_WORKQ, &dev->state); | |
12550 | ssd_cleanup_workq(dev); | |
12551 | } | |
12552 | out_init_workq: | |
12553 | out_init_label: | |
12554 | out_init_rom_info: | |
12555 | out_init_fw_info: | |
12556 | out_init_protocol_info: | |
12557 | out_check_hw: | |
12558 | #ifdef LINUX_SUSE_OS | |
12559 | iounmap(dev->ctrlp); | |
12560 | #else | |
12561 | pci_iounmap(pdev, dev->ctrlp); | |
12562 | #endif | |
12563 | out_pci_iomap: | |
12564 | release_mem_region(dev->mmio_base, dev->mmio_len); | |
12565 | out_request_mem_region: | |
12566 | out_set_dma_mask: | |
12567 | pci_disable_device(pdev); | |
12568 | out_enable_device: | |
12569 | pci_set_drvdata(pdev, NULL); | |
12570 | out_get_index: | |
12571 | kfree(dev); | |
12572 | out_alloc_dev: | |
12573 | out: | |
12574 | return ret; | |
12575 | } | |
12576 | ||
12577 | static void ssd_cleanup_tasklet(void) | |
12578 | { | |
12579 | int i; | |
12580 | for_each_online_cpu(i) { | |
12581 | tasklet_kill(&per_cpu(ssd_tasklet, i)); | |
12582 | } | |
12583 | } | |
12584 | ||
12585 | static int ssd_init_tasklet(void) | |
12586 | { | |
12587 | int i; | |
12588 | ||
12589 | for_each_online_cpu(i) { | |
12590 | INIT_LIST_HEAD(&per_cpu(ssd_doneq, i)); | |
12591 | ||
12592 | if (finject) { | |
12593 | tasklet_init(&per_cpu(ssd_tasklet, i), __ssd_done_db, 0); | |
12594 | } else { | |
12595 | tasklet_init(&per_cpu(ssd_tasklet, i), __ssd_done, 0); | |
12596 | } | |
12597 | } | |
12598 | ||
12599 | return 0; | |
12600 | } | |
12601 | ||
12602 | static struct pci_device_id ssd_pci_tbl[] = { | |
12603 | { 0x10ee, 0x0007, PCI_ANY_ID, PCI_ANY_ID, }, /* g3 */ | |
12604 | { 0x19e5, 0x0007, PCI_ANY_ID, PCI_ANY_ID, }, /* v1 */ | |
12605 | //{ 0x19e5, 0x0008, PCI_ANY_ID, PCI_ANY_ID, }, /* v1 sp*/ | |
12606 | { 0x19e5, 0x0009, PCI_ANY_ID, PCI_ANY_ID, }, /* v2 */ | |
12607 | { 0x19e5, 0x000a, PCI_ANY_ID, PCI_ANY_ID, }, /* v2 dp slave*/ | |
12608 | { 0, } | |
12609 | }; | |
12610 | ||
12611 | /*driver power management handler for pm_ops*/ | |
12612 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32)) | |
12613 | static int ssd_hio_suspend(struct pci_dev *pdev, pm_message_t state) | |
12614 | { | |
12615 | #else | |
12616 | static int ssd_hio_suspend(struct device *ddev) | |
12617 | { | |
12618 | struct pci_dev *pdev = to_pci_dev(ddev); | |
12619 | #endif | |
12620 | struct ssd_device *dev; | |
12621 | ||
12622 | ||
12623 | if (!pdev) { | |
12624 | return -EINVAL; | |
12625 | } | |
12626 | ||
12627 | dev = pci_get_drvdata(pdev); | |
12628 | if (!dev) { | |
12629 | return -EINVAL; | |
12630 | } | |
12631 | ||
12632 | hio_warn("%s: suspend disk start.\n", dev->name); | |
12633 | ssd_unregister_sysfs(dev); | |
12634 | ||
12635 | /* offline firstly */ | |
12636 | test_and_clear_bit(SSD_ONLINE, &dev->state); | |
12637 | ||
12638 | /* clean work queue first */ | |
12639 | if (!dev->slave) { | |
12640 | test_and_clear_bit(SSD_INIT_WORKQ, &dev->state); | |
12641 | ssd_cleanup_workq(dev); | |
12642 | } | |
12643 | ||
12644 | /* flush cache */ | |
12645 | (void)ssd_flush(dev); | |
12646 | (void)ssd_save_md(dev); | |
12647 | ||
12648 | /* save smart */ | |
12649 | if (!dev->slave) { | |
12650 | ssd_save_smart(dev); | |
12651 | } | |
12652 | ||
12653 | /* clean routine */ | |
12654 | if (!dev->slave) { | |
12655 | ssd_cleanup_routine(dev); | |
12656 | } | |
12657 | ||
12658 | ssd_cleanup_thread(dev); | |
12659 | ||
12660 | ssd_free_irq(dev); | |
12661 | ||
12662 | if (!dev->slave) { | |
12663 | ssd_cleanup_log(dev); | |
12664 | } | |
12665 | ||
12666 | if (dev->reload_fw) { //reload fw | |
12667 | dev->has_non_0x98_reg_access = 1; | |
12668 | ssd_reg32_write(dev->ctrlp + SSD_RELOAD_FW_REG, SSD_RELOAD_FW); | |
12669 | } | |
12670 | ||
12671 | /* unmap physical adress */ | |
12672 | if (dev->ctrlp) { | |
12673 | #ifdef LINUX_SUSE_OS | |
12674 | iounmap(dev->ctrlp); | |
12675 | #else | |
12676 | pci_iounmap(pdev, dev->ctrlp); | |
12677 | #endif | |
12678 | dev->ctrlp = NULL; | |
12679 | } | |
12680 | ||
12681 | if (dev->mmio_base) { | |
12682 | release_mem_region(dev->mmio_base, dev->mmio_len); | |
12683 | dev->mmio_base = 0; | |
12684 | } | |
12685 | ||
12686 | pci_disable_device(pdev); | |
12687 | ||
12688 | hio_warn("%s: suspend disk finish.\n", dev->name); | |
12689 | ||
12690 | return 0; | |
12691 | } | |
12692 | ||
12693 | ||
12694 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32)) | |
12695 | static int ssd_hio_resume(struct pci_dev *pdev) | |
12696 | { | |
12697 | #else | |
12698 | static int ssd_hio_resume(struct device *ddev) | |
12699 | { | |
12700 | struct pci_dev *pdev = to_pci_dev(ddev); | |
12701 | #endif | |
12702 | struct ssd_device *dev = NULL; | |
12703 | int ret = 0; | |
12704 | ||
12705 | if (!pdev ) { | |
12706 | ret = -EINVAL; | |
12707 | goto out; | |
12708 | } | |
12709 | ||
12710 | dev = pci_get_drvdata(pdev); | |
12711 | if (!dev) { | |
12712 | ret = -ENOMEM; | |
12713 | goto out_alloc_dev; | |
12714 | } | |
12715 | ||
12716 | hio_warn("%s: resume disk start.\n", dev->name); | |
12717 | ret = pci_enable_device(pdev); | |
12718 | if (ret) { | |
12719 | hio_warn("%s: can not enable device\n", dev->name); | |
12720 | goto out_enable_device; | |
12721 | } | |
12722 | ||
12723 | pci_set_master(pdev); | |
12724 | ||
12725 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31)) | |
12726 | ret = pci_set_dma_mask(pdev, DMA_64BIT_MASK); | |
12727 | #else | |
12728 | ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); | |
12729 | #endif | |
12730 | if (ret) { | |
12731 | hio_warn("%s: set dma mask: failed\n", dev->name); | |
12732 | goto out_set_dma_mask; | |
12733 | } | |
12734 | ||
12735 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31)) | |
12736 | ret = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK); | |
12737 | #else | |
12738 | ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); | |
12739 | #endif | |
12740 | if (ret) { | |
12741 | hio_warn("%s: set consistent dma mask: failed\n", dev->name); | |
12742 | goto out_set_dma_mask; | |
12743 | } | |
12744 | ||
12745 | dev->mmio_base = pci_resource_start(pdev, 0); | |
12746 | dev->mmio_len = pci_resource_len(pdev, 0); | |
12747 | ||
12748 | if (!request_mem_region(dev->mmio_base, dev->mmio_len, SSD_DEV_NAME)) { | |
12749 | hio_warn("%s: can not reserve MMIO region 0\n", dev->name); | |
12750 | ret = -EBUSY; | |
12751 | goto out_request_mem_region; | |
12752 | } | |
12753 | ||
12754 | /* 2.6.9 kernel bug */ | |
12755 | dev->ctrlp = pci_iomap(pdev, 0, 0); | |
12756 | if (!dev->ctrlp) { | |
12757 | hio_warn("%s: can not remap IO region 0\n", dev->name); | |
12758 | ret = -ENOMEM; | |
12759 | goto out_pci_iomap; | |
12760 | } | |
12761 | ||
12762 | ret = ssd_check_hw(dev); | |
12763 | if (ret) { | |
12764 | hio_err("%s: check hardware failed\n", dev->name); | |
12765 | goto out_check_hw; | |
12766 | } | |
12767 | ||
12768 | /* alarm led ? */ | |
12769 | ssd_clear_alarm(dev); | |
12770 | ||
12771 | ret = ssd_init_fw_info(dev); | |
12772 | if (ret) { | |
12773 | hio_err("%s: init firmware info failed\n", dev->name); | |
12774 | /* alarm led */ | |
12775 | ssd_set_alarm(dev); | |
12776 | goto out_init_fw_info; | |
12777 | } | |
12778 | ||
12779 | /* slave port ? */ | |
12780 | if (dev->slave) { | |
12781 | goto init_next1; | |
12782 | } | |
12783 | ||
12784 | ret = ssd_init_rom_info(dev); | |
12785 | if (ret) { | |
12786 | hio_err("%s: init rom info failed\n", dev->name); | |
12787 | /* alarm led */ | |
12788 | ssd_set_alarm(dev); | |
12789 | goto out_init_rom_info; | |
12790 | } | |
12791 | ||
12792 | ret = ssd_init_label(dev); | |
12793 | if (ret) { | |
12794 | hio_err("%s: init label failed\n", dev->name); | |
12795 | /* alarm led */ | |
12796 | ssd_set_alarm(dev); | |
12797 | goto out_init_label; | |
12798 | } | |
12799 | ||
12800 | ret = ssd_init_workq(dev); | |
12801 | if (ret) { | |
12802 | hio_warn("%s: init workq failed\n", dev->name); | |
12803 | goto out_init_workq; | |
12804 | } | |
12805 | (void)test_and_set_bit(SSD_INIT_WORKQ, &dev->state); | |
12806 | ||
12807 | ret = ssd_init_log(dev); | |
12808 | if (ret) { | |
12809 | hio_err("%s: init log failed\n", dev->name); | |
12810 | /* alarm led */ | |
12811 | ssd_set_alarm(dev); | |
12812 | goto out_init_log; | |
12813 | } | |
12814 | ||
12815 | ret = ssd_init_smart(dev); | |
12816 | if (ret) { | |
12817 | hio_err("%s: init info failed\n", dev->name); | |
12818 | /* alarm led */ | |
12819 | ssd_set_alarm(dev); | |
12820 | goto out_init_smart; | |
12821 | } | |
12822 | ||
12823 | init_next1: | |
12824 | ret = ssd_init_hw_info(dev); | |
12825 | if (ret) { | |
12826 | hio_err("%s: init hardware info failed\n", dev->name); | |
12827 | /* alarm led */ | |
12828 | ssd_set_alarm(dev); | |
12829 | goto out_init_hw_info; | |
12830 | } | |
12831 | ||
12832 | /* slave port ? */ | |
12833 | if (dev->slave) { | |
12834 | goto init_next2; | |
12835 | } | |
12836 | ||
12837 | ret = ssd_init_sensor(dev); | |
12838 | if (ret) { | |
12839 | hio_err("%s: init sensor failed\n", dev->name); | |
12840 | /* alarm led */ | |
12841 | ssd_set_alarm(dev); | |
12842 | goto out_init_sensor; | |
12843 | } | |
12844 | ||
12845 | ret = ssd_init_pl_cap(dev); | |
12846 | if (ret) { | |
12847 | hio_err("%s: int pl_cap failed\n", dev->name); | |
12848 | /* alarm led */ | |
12849 | ssd_set_alarm(dev); | |
12850 | goto out_init_pl_cap; | |
12851 | } | |
12852 | ||
12853 | init_next2: | |
12854 | ret = ssd_check_init_state(dev); | |
12855 | if (ret) { | |
12856 | hio_err("%s: check init state failed\n", dev->name); | |
12857 | /* alarm led */ | |
12858 | ssd_set_alarm(dev); | |
12859 | goto out_check_init_state; | |
12860 | } | |
12861 | ||
12862 | //flush all base pointer to ssd | |
12863 | (void)ssd_reload_ssd_ptr(dev); | |
12864 | ||
12865 | ret = ssd_init_irq(dev); | |
12866 | if (ret) { | |
12867 | hio_warn("%s: init irq failed\n", dev->name); | |
12868 | goto out_init_irq; | |
12869 | } | |
12870 | ||
12871 | ret = ssd_init_thread(dev); | |
12872 | if (ret) { | |
12873 | hio_warn("%s: init thread failed\n", dev->name); | |
12874 | goto out_init_thread; | |
12875 | } | |
12876 | ||
12877 | /* */ | |
12878 | (void)test_and_set_bit(SSD_ONLINE, &dev->state); | |
12879 | ||
12880 | /* slave port ? */ | |
12881 | if (dev->slave) { | |
12882 | goto init_next3; | |
12883 | } | |
12884 | ||
12885 | ret = ssd_init_ot_protect(dev); | |
12886 | if (ret) { | |
12887 | hio_err("%s: int ot_protect failed\n", dev->name); | |
12888 | /* alarm led */ | |
12889 | ssd_set_alarm(dev); | |
12890 | goto out_int_ot_protect; | |
12891 | } | |
12892 | ||
12893 | ret = ssd_init_wmode(dev); | |
12894 | if (ret) { | |
12895 | hio_warn("%s: init write mode\n", dev->name); | |
12896 | goto out_init_wmode; | |
12897 | } | |
12898 | ||
12899 | /* init routine after hw is ready */ | |
12900 | ret = ssd_init_routine(dev); | |
12901 | if (ret) { | |
12902 | hio_warn("%s: init routine\n", dev->name); | |
12903 | goto out_init_routine; | |
12904 | } | |
12905 | ||
12906 | init_next3: | |
12907 | (void)test_and_set_bit(SSD_INIT_BD, &dev->state); | |
12908 | ||
12909 | dev->save_md = 1; | |
12910 | ||
12911 | hio_warn("%s: resume disk finish.\n", dev->name); | |
12912 | ||
12913 | return 0; | |
12914 | ||
12915 | out_init_routine: | |
12916 | out_init_wmode: | |
12917 | out_int_ot_protect: | |
12918 | ssd_cleanup_thread(dev); | |
12919 | out_init_thread: | |
12920 | ssd_free_irq(dev); | |
12921 | out_init_irq: | |
12922 | out_check_init_state: | |
12923 | out_init_pl_cap: | |
12924 | out_init_sensor: | |
12925 | out_init_hw_info: | |
12926 | out_init_smart: | |
12927 | /* slave port ? */ | |
12928 | if (!dev->slave) { | |
12929 | ssd_cleanup_log(dev); | |
12930 | } | |
12931 | out_init_log: | |
12932 | /* slave port ? */ | |
12933 | if (!dev->slave) { | |
12934 | test_and_clear_bit(SSD_INIT_WORKQ, &dev->state); | |
12935 | ssd_cleanup_workq(dev); | |
12936 | } | |
12937 | out_init_workq: | |
12938 | out_init_label: | |
12939 | out_init_rom_info: | |
12940 | out_init_fw_info: | |
12941 | out_check_hw: | |
12942 | #ifdef LINUX_SUSE_OS | |
12943 | iounmap(dev->ctrlp); | |
12944 | #else | |
12945 | pci_iounmap(pdev, dev->ctrlp); | |
12946 | #endif | |
12947 | out_pci_iomap: | |
12948 | release_mem_region(dev->mmio_base, dev->mmio_len); | |
12949 | out_request_mem_region: | |
12950 | out_set_dma_mask: | |
12951 | pci_disable_device(pdev); | |
12952 | out_enable_device: | |
12953 | out_alloc_dev: | |
12954 | out: | |
12955 | ||
12956 | hio_warn("%s: resume disk fail.\n", dev->name); | |
12957 | ||
12958 | return ret; | |
12959 | } | |
12960 | ||
12961 | MODULE_DEVICE_TABLE(pci, ssd_pci_tbl); | |
12962 | ||
12963 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32)) | |
12964 | #else | |
12965 | SIMPLE_DEV_PM_OPS(hio_pm_ops, ssd_hio_suspend, ssd_hio_resume); | |
12966 | #endif | |
12967 | ||
12968 | struct pci_driver ssd_driver = { | |
12969 | .name = MODULE_NAME, | |
12970 | .id_table = ssd_pci_tbl, | |
12971 | .probe = ssd_init_one, | |
12972 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38)) | |
12973 | .remove = __devexit_p(ssd_remove_one), | |
12974 | #else | |
12975 | .remove = ssd_remove_one, | |
12976 | #endif | |
12977 | ||
12978 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32)) | |
12979 | .suspend = ssd_hio_suspend, | |
12980 | .resume = ssd_hio_resume, | |
12981 | #else | |
12982 | .driver = { | |
12983 | .pm = &hio_pm_ops, | |
12984 | }, | |
12985 | #endif | |
12986 | }; | |
12987 | ||
12988 | /* notifier block to get a notify on system shutdown/halt/reboot */ | |
12989 | static int ssd_notify_reboot(struct notifier_block *nb, unsigned long event, void *buf) | |
12990 | { | |
12991 | struct ssd_device *dev = NULL; | |
12992 | struct ssd_device *n = NULL; | |
12993 | ||
12994 | list_for_each_entry_safe(dev, n, &ssd_list, list) { | |
12995 | ssd_gen_swlog(dev, SSD_LOG_POWER_OFF, 0); | |
12996 | ||
12997 | (void)ssd_flush(dev); | |
12998 | (void)ssd_save_md(dev); | |
12999 | ||
13000 | /* slave port ? */ | |
13001 | if (!dev->slave) { | |
13002 | ssd_save_smart(dev); | |
13003 | ||
13004 | ssd_stop_workq(dev); | |
13005 | ||
13006 | if (dev->reload_fw) { | |
13007 | dev->has_non_0x98_reg_access = 1; | |
13008 | ssd_reg32_write(dev->ctrlp + SSD_RELOAD_FW_REG, SSD_RELOAD_FW); | |
13009 | } | |
13010 | } | |
13011 | } | |
13012 | ||
13013 | return NOTIFY_OK; | |
13014 | } | |
13015 | ||
13016 | static struct notifier_block ssd_notifier = { | |
13017 | ssd_notify_reboot, NULL, 0 | |
13018 | }; | |
13019 | ||
13020 | static int __init ssd_init_module(void) | |
13021 | { | |
13022 | int ret = 0; | |
13023 | ||
13024 | hio_info("driver version: %s\n", DRIVER_VERSION); | |
13025 | ||
13026 | ret = ssd_init_index(); | |
13027 | if (ret) { | |
13028 | hio_warn("init index failed\n"); | |
13029 | goto out_init_index; | |
13030 | } | |
13031 | ||
13032 | ret = ssd_init_proc(); | |
13033 | if (ret) { | |
13034 | hio_warn("init proc failed\n"); | |
13035 | goto out_init_proc; | |
13036 | } | |
13037 | ||
13038 | ret = ssd_init_sysfs(); | |
13039 | if (ret) { | |
13040 | hio_warn("init sysfs failed\n"); | |
13041 | goto out_init_sysfs; | |
13042 | } | |
13043 | ||
13044 | ret = ssd_init_tasklet(); | |
13045 | if (ret) { | |
13046 | hio_warn("init tasklet failed\n"); | |
13047 | goto out_init_tasklet; | |
13048 | } | |
13049 | ||
13050 | #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,12)) | |
13051 | ssd_class = class_simple_create(THIS_MODULE, SSD_DEV_NAME); | |
13052 | #else | |
13053 | ssd_class = class_create(THIS_MODULE, SSD_DEV_NAME); | |
13054 | #endif | |
13055 | if (IS_ERR(ssd_class)) { | |
13056 | ret = PTR_ERR(ssd_class); | |
13057 | goto out_class_create; | |
13058 | } | |
13059 | ||
13060 | if (ssd_cmajor > 0) { | |
13061 | ret = register_chrdev(ssd_cmajor, SSD_CDEV_NAME, &ssd_cfops); | |
13062 | } else { | |
13063 | ret = ssd_cmajor = register_chrdev(ssd_cmajor, SSD_CDEV_NAME, &ssd_cfops); | |
13064 | } | |
13065 | if (ret < 0) { | |
13066 | hio_warn("unable to register chardev major number\n"); | |
13067 | goto out_register_chardev; | |
13068 | } | |
13069 | ||
13070 | if (ssd_major > 0) { | |
13071 | ret = register_blkdev(ssd_major, SSD_DEV_NAME); | |
13072 | } else { | |
13073 | ret = ssd_major = register_blkdev(ssd_major, SSD_DEV_NAME); | |
13074 | } | |
13075 | if (ret < 0) { | |
13076 | hio_warn("unable to register major number\n"); | |
13077 | goto out_register_blkdev; | |
13078 | } | |
13079 | ||
13080 | if (ssd_major_sl > 0) { | |
13081 | ret = register_blkdev(ssd_major_sl, SSD_SDEV_NAME); | |
13082 | } else { | |
13083 | ret = ssd_major_sl = register_blkdev(ssd_major_sl, SSD_SDEV_NAME); | |
13084 | } | |
13085 | if (ret < 0) { | |
13086 | hio_warn("unable to register slave major number\n"); | |
13087 | goto out_register_blkdev_sl; | |
13088 | } | |
13089 | ||
13090 | if (mode < SSD_DRV_MODE_STANDARD || mode > SSD_DRV_MODE_BASE) { | |
13091 | mode = SSD_DRV_MODE_STANDARD; | |
13092 | } | |
13093 | ||
13094 | /* for debug */ | |
13095 | if (mode != SSD_DRV_MODE_STANDARD) { | |
13096 | ssd_minors = 1; | |
13097 | } | |
13098 | ||
13099 | if (int_mode < SSD_INT_LEGACY || int_mode > SSD_INT_MSIX) { | |
13100 | int_mode = SSD_INT_MODE_DEFAULT; | |
13101 | } | |
13102 | ||
13103 | if (threaded_irq) { | |
13104 | int_mode = SSD_INT_MSI; | |
13105 | } | |
13106 | ||
13107 | if (log_level >= SSD_LOG_NR_LEVEL || log_level < SSD_LOG_LEVEL_INFO) { | |
13108 | log_level = SSD_LOG_LEVEL_ERR; | |
13109 | } | |
13110 | ||
13111 | if (wmode < SSD_WMODE_BUFFER || wmode > SSD_WMODE_DEFAULT) { | |
13112 | wmode = SSD_WMODE_DEFAULT; | |
13113 | } | |
13114 | ||
13115 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)) | |
13116 | ret = pci_module_init(&ssd_driver); | |
13117 | #else | |
13118 | ret = pci_register_driver(&ssd_driver); | |
13119 | #endif | |
13120 | if (ret) { | |
13121 | hio_warn("pci init failed\n"); | |
13122 | goto out_pci_init; | |
13123 | } | |
13124 | ||
13125 | ret = register_reboot_notifier(&ssd_notifier); | |
13126 | if (ret) { | |
13127 | hio_warn("register reboot notifier failed\n"); | |
13128 | goto out_register_reboot_notifier; | |
13129 | } | |
13130 | ||
13131 | return 0; | |
13132 | ||
13133 | out_register_reboot_notifier: | |
13134 | out_pci_init: | |
13135 | pci_unregister_driver(&ssd_driver); | |
13136 | unregister_blkdev(ssd_major_sl, SSD_SDEV_NAME); | |
13137 | out_register_blkdev_sl: | |
13138 | unregister_blkdev(ssd_major, SSD_DEV_NAME); | |
13139 | out_register_blkdev: | |
13140 | unregister_chrdev(ssd_cmajor, SSD_CDEV_NAME); | |
13141 | out_register_chardev: | |
13142 | #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,12)) | |
13143 | class_simple_destroy(ssd_class); | |
13144 | #else | |
13145 | class_destroy(ssd_class); | |
13146 | #endif | |
13147 | out_class_create: | |
13148 | ssd_cleanup_tasklet(); | |
13149 | out_init_tasklet: | |
13150 | ssd_cleanup_sysfs(); | |
13151 | out_init_sysfs: | |
13152 | ssd_cleanup_proc(); | |
13153 | out_init_proc: | |
13154 | ssd_cleanup_index(); | |
13155 | out_init_index: | |
13156 | return ret; | |
13157 | ||
13158 | } | |
13159 | ||
13160 | static void __exit ssd_cleanup_module(void) | |
13161 | { | |
13162 | ||
13163 | hio_info("unload driver: %s\n", DRIVER_VERSION); | |
13164 | /* exiting */ | |
13165 | ssd_exiting = 1; | |
13166 | ||
13167 | unregister_reboot_notifier(&ssd_notifier); | |
13168 | ||
13169 | pci_unregister_driver(&ssd_driver); | |
13170 | ||
13171 | unregister_blkdev(ssd_major_sl, SSD_SDEV_NAME); | |
13172 | unregister_blkdev(ssd_major, SSD_DEV_NAME); | |
13173 | unregister_chrdev(ssd_cmajor, SSD_CDEV_NAME); | |
13174 | #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,12)) | |
13175 | class_simple_destroy(ssd_class); | |
13176 | #else | |
13177 | class_destroy(ssd_class); | |
13178 | #endif | |
13179 | ||
13180 | ssd_cleanup_tasklet(); | |
13181 | ssd_cleanup_sysfs(); | |
13182 | ssd_cleanup_proc(); | |
13183 | ssd_cleanup_index(); | |
13184 | } | |
13185 | ||
13186 | int ssd_register_event_notifier(struct block_device *bdev, ssd_event_call event_call) | |
13187 | { | |
13188 | struct ssd_device *dev; | |
13189 | struct ssd_log *le, *temp_le = NULL; | |
13190 | uint64_t cur; | |
13191 | int temp = 0; | |
13192 | int log_nr; | |
13193 | ||
13194 | if (!bdev || !event_call || !(bdev->bd_disk)) { | |
13195 | return -EINVAL; | |
13196 | } | |
13197 | ||
13198 | dev = bdev->bd_disk->private_data; | |
13199 | dev->event_call = event_call; | |
13200 | ||
13201 | cur = (uint64_t)ktime_get_real_seconds(); | |
13202 | ||
13203 | le = (struct ssd_log *)(dev->internal_log.log); | |
13204 | log_nr = dev->internal_log.nr_log; | |
13205 | ||
13206 | while (log_nr--) { | |
13207 | if (le->time <= cur && le->time >= dev->uptime) { | |
13208 | if ((le->le.event == SSD_LOG_SEU_FAULT1) && (le->time < dev->reset_time)) { | |
13209 | le++; | |
13210 | continue; | |
13211 | } | |
13212 | if (le->le.event == SSD_LOG_OVER_TEMP || le->le.event == SSD_LOG_NORMAL_TEMP || le->le.event == SSD_LOG_WARN_TEMP) { | |
13213 | if (!temp_le || le->time >= temp_le->time) { | |
13214 | temp_le = le; | |
13215 | } | |
13216 | le++; | |
13217 | continue; | |
13218 | } | |
13219 | (void)dev->event_call(dev->gd, le->le.event, ssd_parse_log(dev, le, 0)); | |
13220 | } | |
13221 | le++; | |
13222 | } | |
13223 | ||
13224 | ssd_get_temperature(bdev, &temp); | |
13225 | if (temp_le && (temp >= SSD_OT_TEMP_HYST)) { | |
13226 | (void)dev->event_call(dev->gd, temp_le->le.event, ssd_parse_log(dev, temp_le, 0)); | |
13227 | } | |
13228 | ||
13229 | return 0; | |
13230 | } | |
13231 | ||
13232 | int ssd_unregister_event_notifier(struct block_device *bdev) | |
13233 | { | |
13234 | struct ssd_device *dev; | |
13235 | ||
13236 | if (!bdev || !(bdev->bd_disk)) { | |
13237 | return -EINVAL; | |
13238 | } | |
13239 | ||
13240 | dev = bdev->bd_disk->private_data; | |
13241 | dev->event_call = NULL; | |
13242 | ||
13243 | return 0; | |
13244 | } | |
13245 | ||
13246 | EXPORT_SYMBOL(ssd_get_label); | |
13247 | EXPORT_SYMBOL(ssd_get_version); | |
13248 | EXPORT_SYMBOL(ssd_set_otprotect); | |
13249 | EXPORT_SYMBOL(ssd_bm_status); | |
13250 | EXPORT_SYMBOL(ssd_submit_pbio); | |
13251 | EXPORT_SYMBOL(ssd_get_pciaddr); | |
13252 | EXPORT_SYMBOL(ssd_get_temperature); | |
13253 | EXPORT_SYMBOL(ssd_register_event_notifier); | |
13254 | EXPORT_SYMBOL(ssd_unregister_event_notifier); | |
13255 | EXPORT_SYMBOL(ssd_reset); | |
13256 | EXPORT_SYMBOL(ssd_set_wmode); | |
13257 | ||
13258 | ||
13259 | ||
13260 | module_init(ssd_init_module); | |
13261 | module_exit(ssd_cleanup_module); | |
13262 | MODULE_VERSION(DRIVER_VERSION); | |
13263 | MODULE_LICENSE("GPL"); | |
13264 | MODULE_AUTHOR("Huawei SSD DEV Team"); | |
13265 | MODULE_DESCRIPTION("Huawei SSD driver"); |