]>
Commit | Line | Data |
---|---|---|
361ebed5 HSDT |
1 | /* |
2 | * Huawei SSD device driver | |
3 | * Copyright (c) 2016, Huawei Technologies Co., Ltd. | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify it | |
6 | * under the terms and conditions of the GNU General Public License, | |
7 | * version 2, as published by the Free Software Foundation. | |
8 | * | |
9 | * This program is distributed in the hope it will be useful, but WITHOUT | |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
12 | * more details. | |
13 | */ | |
b44043bd | 14 | |
361ebed5 HSDT |
15 | #ifndef LINUX_VERSION_CODE |
16 | #include <linux/version.h> | |
17 | #endif | |
18 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16)) | |
19 | #include <linux/config.h> | |
20 | #endif | |
21 | #include <linux/types.h> | |
22 | #include <linux/kernel.h> | |
23 | #include <linux/module.h> | |
24 | #include <linux/bio.h> | |
25 | #include <linux/timer.h> | |
26 | #include <linux/init.h> | |
27 | #include <linux/pci.h> | |
28 | #include <linux/slab.h> | |
29 | #include <linux/spinlock.h> | |
30 | #include <linux/blkdev.h> | |
31 | #include <linux/sched.h> | |
32 | #include <linux/fcntl.h> | |
33 | #include <linux/interrupt.h> | |
34 | #include <linux/compiler.h> | |
35 | #include <linux/bitops.h> | |
36 | #include <linux/delay.h> | |
37 | #include <linux/time.h> | |
38 | #include <linux/stat.h> | |
39 | #include <linux/fs.h> | |
40 | #include <linux/dma-mapping.h> | |
41 | #include <linux/completion.h> | |
42 | #include <linux/workqueue.h> | |
43 | #include <linux/mm.h> | |
44 | #include <linux/ioctl.h> | |
45 | #include <linux/hdreg.h> /* HDIO_GETGEO */ | |
46 | #include <linux/list.h> | |
47 | #include <linux/reboot.h> | |
48 | #include <linux/kthread.h> | |
49 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0)) | |
50 | #include <linux/seq_file.h> | |
51 | #endif | |
52 | #include <asm/uaccess.h> | |
53 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,2,0)) | |
54 | #include <linux/scatterlist.h> | |
55 | #include <linux/vmalloc.h> | |
56 | #else | |
57 | #include <asm/scatterlist.h> | |
58 | #endif | |
59 | #include <asm/io.h> | |
60 | #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,17)) | |
61 | #include <linux/devfs_fs_kernel.h> | |
62 | #endif | |
63 | ||
64 | /* driver */ | |
65 | #define MODULE_NAME "hio" | |
da3355df | 66 | #define DRIVER_VERSION "2.1.0.40" |
361ebed5 HSDT |
67 | #define DRIVER_VERSION_LEN 16 |
68 | ||
69 | #define SSD_FW_MIN 0x1 | |
70 | ||
71 | #define SSD_DEV_NAME MODULE_NAME | |
72 | #define SSD_DEV_NAME_LEN 16 | |
73 | #define SSD_CDEV_NAME "c"SSD_DEV_NAME | |
74 | #define SSD_SDEV_NAME "s"SSD_DEV_NAME | |
75 | ||
76 | ||
77 | #define SSD_CMAJOR 0 | |
78 | #define SSD_MAJOR 0 | |
79 | #define SSD_MAJOR_SL 0 | |
80 | #define SSD_MINORS 16 | |
81 | ||
82 | #define SSD_MAX_DEV 702 | |
83 | #define SSD_ALPHABET_NUM 26 | |
84 | ||
85 | #define hio_info(f, arg...) printk(KERN_INFO MODULE_NAME"info: " f , ## arg) | |
86 | #define hio_note(f, arg...) printk(KERN_NOTICE MODULE_NAME"note: " f , ## arg) | |
87 | #define hio_warn(f, arg...) printk(KERN_WARNING MODULE_NAME"warn: " f , ## arg) | |
88 | #define hio_err(f, arg...) printk(KERN_ERR MODULE_NAME"err: " f , ## arg) | |
89 | ||
90 | /* slave port */ | |
91 | #define SSD_SLAVE_PORT_DEVID 0x000a | |
92 | ||
93 | /* int mode */ | |
94 | ||
95 | /* 2.6.9 msi affinity bug, should turn msi & msi-x off */ | |
96 | //#define SSD_MSI | |
97 | #define SSD_ESCAPE_IRQ | |
98 | ||
99 | //#define SSD_MSIX | |
100 | #ifndef MODULE | |
101 | #define SSD_MSIX | |
102 | #endif | |
103 | #define SSD_MSIX_VEC 8 | |
104 | #ifdef SSD_MSIX | |
105 | #undef SSD_MSI | |
da3355df | 106 | #undef SSD_ESCAPE_IRQ |
361ebed5 HSDT |
107 | #define SSD_MSIX_AFFINITY_FORCE |
108 | #endif | |
109 | ||
110 | #define SSD_TRIM | |
111 | ||
112 | /* Over temperature protect */ | |
113 | #define SSD_OT_PROTECT | |
114 | ||
115 | #ifdef SSD_QUEUE_PBIO | |
116 | #define BIO_SSD_PBIO 20 | |
117 | #endif | |
118 | ||
119 | /* debug */ | |
120 | //#define SSD_DEBUG_ERR | |
121 | ||
122 | /* cmd timer */ | |
123 | #define SSD_CMD_TIMEOUT (60*HZ) | |
124 | ||
125 | /* i2c & smbus */ | |
126 | #define SSD_SPI_TIMEOUT (5*HZ) | |
127 | #define SSD_I2C_TIMEOUT (5*HZ) | |
128 | ||
129 | #define SSD_I2C_MAX_DATA (127) | |
130 | #define SSD_SMBUS_BLOCK_MAX (32) | |
131 | #define SSD_SMBUS_DATA_MAX (SSD_SMBUS_BLOCK_MAX + 2) | |
132 | ||
133 | /* wait for init */ | |
134 | #define SSD_INIT_WAIT (1000) //1s | |
135 | #define SSD_CONTROLLER_WAIT (20*1000/SSD_INIT_WAIT) //20s | |
136 | #define SSD_INIT_MAX_WAIT (500*1000/SSD_INIT_WAIT) //500s | |
137 | #define SSD_INIT_MAX_WAIT_V3_2 (1400*1000/SSD_INIT_WAIT) //1400s | |
138 | #define SSD_RAM_INIT_MAX_WAIT (10*1000/SSD_INIT_WAIT) //10s | |
139 | #define SSD_CH_INFO_MAX_WAIT (10*1000/SSD_INIT_WAIT) //10s | |
140 | ||
141 | /* blkdev busy wait */ | |
142 | #define SSD_DEV_BUSY_WAIT 1000 //ms | |
143 | #define SSD_DEV_BUSY_MAX_WAIT (8*1000/SSD_DEV_BUSY_WAIT) //8s | |
144 | ||
145 | /* smbus retry */ | |
146 | #define SSD_SMBUS_RETRY_INTERVAL (5) //ms | |
147 | #define SSD_SMBUS_RETRY_MAX (1000/SSD_SMBUS_RETRY_INTERVAL) | |
148 | ||
149 | #define SSD_BM_RETRY_MAX 7 | |
150 | ||
151 | /* bm routine interval */ | |
152 | #define SSD_BM_CAP_LEARNING_DELAY (10*60*1000) | |
153 | ||
154 | /* routine interval */ | |
155 | #define SSD_ROUTINE_INTERVAL (10*1000) //10s | |
156 | #define SSD_HWMON_ROUTINE_TICK (60*1000/SSD_ROUTINE_INTERVAL) | |
157 | #define SSD_CAPMON_ROUTINE_TICK ((3600*1000/SSD_ROUTINE_INTERVAL)*24*30) | |
158 | #define SSD_CAPMON2_ROUTINE_TICK (10*60*1000/SSD_ROUTINE_INTERVAL) //fault recover | |
159 | ||
160 | /* dma align */ | |
161 | #define SSD_DMA_ALIGN (16) | |
162 | ||
163 | /* some hw defalut */ | |
164 | #define SSD_LOG_MAX_SZ 4096 | |
165 | ||
166 | #define SSD_NAND_OOB_SZ 1024 | |
167 | #define SSD_NAND_ID_SZ 8 | |
168 | #define SSD_NAND_ID_BUFF_SZ 1024 | |
169 | #define SSD_NAND_MAX_CE 2 | |
170 | ||
171 | #define SSD_BBT_RESERVED 8 | |
172 | ||
173 | #define SSD_ECC_MAX_FLIP (64+1) | |
174 | ||
175 | #define SSD_RAM_ALIGN 16 | |
176 | ||
177 | ||
178 | #define SSD_RELOAD_FLAG 0x3333CCCC | |
179 | #define SSD_RELOAD_FW 0xAA5555AA | |
180 | #define SSD_RESET_NOINIT 0xAA5555AA | |
181 | #define SSD_RESET 0x55AAAA55 | |
182 | #define SSD_RESET_FULL 0x5A | |
183 | //#define SSD_RESET_WAIT 1000 //1s | |
184 | //#define SSD_RESET_MAX_WAIT (200*1000/SSD_RESET_WAIT) //200s | |
185 | ||
186 | ||
187 | /* reverion 1 */ | |
188 | #define SSD_PROTOCOL_V1 0x0 | |
189 | ||
190 | #define SSD_ROM_SIZE (16*1024*1024) | |
191 | #define SSD_ROM_BLK_SIZE (256*1024) | |
192 | #define SSD_ROM_PAGE_SIZE (256) | |
193 | #define SSD_ROM_NR_BRIDGE_FW 2 | |
194 | #define SSD_ROM_NR_CTRL_FW 2 | |
195 | #define SSD_ROM_BRIDGE_FW_BASE 0 | |
196 | #define SSD_ROM_BRIDGE_FW_SIZE (2*1024*1024) | |
197 | #define SSD_ROM_CTRL_FW_BASE (SSD_ROM_NR_BRIDGE_FW*SSD_ROM_BRIDGE_FW_SIZE) | |
198 | #define SSD_ROM_CTRL_FW_SIZE (5*1024*1024) | |
199 | #define SSD_ROM_LABEL_BASE (SSD_ROM_CTRL_FW_BASE+SSD_ROM_CTRL_FW_SIZE*SSD_ROM_NR_CTRL_FW) | |
200 | #define SSD_ROM_VP_BASE (SSD_ROM_LABEL_BASE+SSD_ROM_BLK_SIZE) | |
201 | ||
202 | /* reverion 3 */ | |
203 | #define SSD_PROTOCOL_V3 0x3000000 | |
204 | #define SSD_PROTOCOL_V3_1_1 0x3010001 | |
205 | #define SSD_PROTOCOL_V3_1_3 0x3010003 | |
206 | #define SSD_PROTOCOL_V3_2 0x3020000 | |
207 | #define SSD_PROTOCOL_V3_2_1 0x3020001 /* <4KB improved */ | |
208 | #define SSD_PROTOCOL_V3_2_2 0x3020002 /* ot protect */ | |
209 | #define SSD_PROTOCOL_V3_2_4 0x3020004 | |
210 | ||
211 | ||
212 | #define SSD_PV3_ROM_NR_BM_FW 1 | |
213 | #define SSD_PV3_ROM_BM_FW_SZ (64*1024*8) | |
214 | ||
215 | #define SSD_ROM_LOG_SZ (64*1024*4) | |
216 | ||
217 | #define SSD_ROM_NR_SMART_MAX 2 | |
218 | #define SSD_PV3_ROM_NR_SMART SSD_ROM_NR_SMART_MAX | |
219 | #define SSD_PV3_ROM_SMART_SZ (64*1024) | |
220 | ||
221 | /* reverion 3.2 */ | |
222 | #define SSD_PV3_2_ROM_LOG_SZ (64*1024*80) /* 5MB */ | |
223 | #define SSD_PV3_2_ROM_SEC_SZ (256*1024) /* 256KB */ | |
224 | ||
225 | ||
226 | /* register */ | |
227 | #define SSD_REQ_FIFO_REG 0x0000 | |
228 | #define SSD_RESP_FIFO_REG 0x0008 //0x0010 | |
229 | #define SSD_RESP_PTR_REG 0x0010 //0x0018 | |
230 | #define SSD_INTR_INTERVAL_REG 0x0018 | |
231 | #define SSD_READY_REG 0x001C | |
232 | #define SSD_BRIDGE_TEST_REG 0x0020 | |
233 | #define SSD_STRIPE_SIZE_REG 0x0028 | |
234 | #define SSD_CTRL_VER_REG 0x0030 //controller | |
235 | #define SSD_BRIDGE_VER_REG 0x0034 //bridge | |
236 | #define SSD_PCB_VER_REG 0x0038 | |
237 | #define SSD_BURN_FLAG_REG 0x0040 | |
238 | #define SSD_BRIDGE_INFO_REG 0x0044 | |
239 | ||
240 | #define SSD_WL_VAL_REG 0x0048 //32-bit | |
241 | ||
242 | #define SSD_BB_INFO_REG 0x004C | |
243 | ||
244 | #define SSD_ECC_TEST_REG 0x0050 //test only | |
245 | #define SSD_ERASE_TEST_REG 0x0058 //test only | |
246 | #define SSD_WRITE_TEST_REG 0x0060 //test only | |
247 | ||
248 | #define SSD_RESET_REG 0x0068 | |
249 | #define SSD_RELOAD_FW_REG 0x0070 | |
250 | ||
251 | #define SSD_RESERVED_BLKS_REG 0x0074 | |
252 | #define SSD_VALID_PAGES_REG 0x0078 | |
253 | #define SSD_CH_INFO_REG 0x007C | |
254 | ||
255 | #define SSD_CTRL_TEST_REG_SZ 0x8 | |
256 | #define SSD_CTRL_TEST_REG0 0x0080 | |
257 | #define SSD_CTRL_TEST_REG1 0x0088 | |
258 | #define SSD_CTRL_TEST_REG2 0x0090 | |
259 | #define SSD_CTRL_TEST_REG3 0x0098 | |
260 | #define SSD_CTRL_TEST_REG4 0x00A0 | |
261 | #define SSD_CTRL_TEST_REG5 0x00A8 | |
262 | #define SSD_CTRL_TEST_REG6 0x00B0 | |
263 | #define SSD_CTRL_TEST_REG7 0x00B8 | |
264 | ||
265 | #define SSD_FLASH_INFO_REG0 0x00C0 | |
266 | #define SSD_FLASH_INFO_REG1 0x00C8 | |
267 | #define SSD_FLASH_INFO_REG2 0x00D0 | |
268 | #define SSD_FLASH_INFO_REG3 0x00D8 | |
269 | #define SSD_FLASH_INFO_REG4 0x00E0 | |
270 | #define SSD_FLASH_INFO_REG5 0x00E8 | |
271 | #define SSD_FLASH_INFO_REG6 0x00F0 | |
272 | #define SSD_FLASH_INFO_REG7 0x00F8 | |
273 | ||
274 | #define SSD_RESP_INFO_REG 0x01B8 | |
275 | #define SSD_NAND_BUFF_BASE 0x01BC //for nand write | |
276 | ||
277 | #define SSD_CHIP_INFO_REG_SZ 0x10 | |
278 | #define SSD_CHIP_INFO_REG0 0x0100 //128 bit | |
279 | #define SSD_CHIP_INFO_REG1 0x0110 | |
280 | #define SSD_CHIP_INFO_REG2 0x0120 | |
281 | #define SSD_CHIP_INFO_REG3 0x0130 | |
282 | #define SSD_CHIP_INFO_REG4 0x0140 | |
283 | #define SSD_CHIP_INFO_REG5 0x0150 | |
284 | #define SSD_CHIP_INFO_REG6 0x0160 | |
285 | #define SSD_CHIP_INFO_REG7 0x0170 | |
286 | ||
287 | #define SSD_RAM_INFO_REG 0x01C4 | |
288 | ||
289 | #define SSD_BBT_BASE_REG 0x01C8 | |
290 | #define SSD_ECT_BASE_REG 0x01CC | |
291 | ||
292 | #define SSD_CLEAR_INTR_REG 0x01F0 | |
293 | ||
294 | #define SSD_INIT_STATE_REG_SZ 0x8 | |
295 | #define SSD_INIT_STATE_REG0 0x0200 | |
296 | #define SSD_INIT_STATE_REG1 0x0208 | |
297 | #define SSD_INIT_STATE_REG2 0x0210 | |
298 | #define SSD_INIT_STATE_REG3 0x0218 | |
299 | #define SSD_INIT_STATE_REG4 0x0220 | |
300 | #define SSD_INIT_STATE_REG5 0x0228 | |
301 | #define SSD_INIT_STATE_REG6 0x0230 | |
302 | #define SSD_INIT_STATE_REG7 0x0238 | |
303 | ||
304 | #define SSD_ROM_INFO_REG 0x0600 | |
305 | #define SSD_ROM_BRIDGE_FW_INFO_REG 0x0604 | |
306 | #define SSD_ROM_CTRL_FW_INFO_REG 0x0608 | |
307 | #define SSD_ROM_VP_INFO_REG 0x060C | |
308 | ||
309 | #define SSD_LOG_INFO_REG 0x0610 | |
310 | #define SSD_LED_REG 0x0614 | |
311 | #define SSD_MSG_BASE_REG 0x06F8 | |
312 | ||
313 | /*spi reg */ | |
314 | #define SSD_SPI_REG_CMD 0x0180 | |
315 | #define SSD_SPI_REG_CMD_HI 0x0184 | |
316 | #define SSD_SPI_REG_WDATA 0x0188 | |
317 | #define SSD_SPI_REG_ID 0x0190 | |
318 | #define SSD_SPI_REG_STATUS 0x0198 | |
319 | #define SSD_SPI_REG_RDATA 0x01A0 | |
320 | #define SSD_SPI_REG_READY 0x01A8 | |
321 | ||
322 | /* i2c register */ | |
323 | #define SSD_I2C_CTRL_REG 0x06F0 | |
324 | #define SSD_I2C_RDATA_REG 0x06F4 | |
325 | ||
326 | /* temperature reg */ | |
327 | #define SSD_BRIGE_TEMP_REG 0x0618 | |
328 | ||
329 | #define SSD_CTRL_TEMP_REG0 0x0700 | |
330 | #define SSD_CTRL_TEMP_REG1 0x0708 | |
331 | #define SSD_CTRL_TEMP_REG2 0x0710 | |
332 | #define SSD_CTRL_TEMP_REG3 0x0718 | |
333 | #define SSD_CTRL_TEMP_REG4 0x0720 | |
334 | #define SSD_CTRL_TEMP_REG5 0x0728 | |
335 | #define SSD_CTRL_TEMP_REG6 0x0730 | |
336 | #define SSD_CTRL_TEMP_REG7 0x0738 | |
337 | ||
338 | /* reversion 3 reg */ | |
339 | #define SSD_PROTOCOL_VER_REG 0x01B4 | |
340 | ||
341 | #define SSD_FLUSH_TIMEOUT_REG 0x02A4 | |
342 | #define SSD_BM_FAULT_REG 0x0660 | |
343 | ||
344 | #define SSD_PV3_RAM_STATUS_REG_SZ 0x4 | |
345 | #define SSD_PV3_RAM_STATUS_REG0 0x0260 | |
346 | #define SSD_PV3_RAM_STATUS_REG1 0x0264 | |
347 | #define SSD_PV3_RAM_STATUS_REG2 0x0268 | |
348 | #define SSD_PV3_RAM_STATUS_REG3 0x026C | |
349 | #define SSD_PV3_RAM_STATUS_REG4 0x0270 | |
350 | #define SSD_PV3_RAM_STATUS_REG5 0x0274 | |
351 | #define SSD_PV3_RAM_STATUS_REG6 0x0278 | |
352 | #define SSD_PV3_RAM_STATUS_REG7 0x027C | |
353 | ||
354 | #define SSD_PV3_CHIP_INFO_REG_SZ 0x40 | |
355 | #define SSD_PV3_CHIP_INFO_REG0 0x0300 | |
356 | #define SSD_PV3_CHIP_INFO_REG1 0x0340 | |
357 | #define SSD_PV3_CHIP_INFO_REG2 0x0380 | |
358 | #define SSD_PV3_CHIP_INFO_REG3 0x03B0 | |
359 | #define SSD_PV3_CHIP_INFO_REG4 0x0400 | |
360 | #define SSD_PV3_CHIP_INFO_REG5 0x0440 | |
361 | #define SSD_PV3_CHIP_INFO_REG6 0x0480 | |
362 | #define SSD_PV3_CHIP_INFO_REG7 0x04B0 | |
363 | ||
364 | #define SSD_PV3_INIT_STATE_REG_SZ 0x20 | |
365 | #define SSD_PV3_INIT_STATE_REG0 0x0500 | |
366 | #define SSD_PV3_INIT_STATE_REG1 0x0520 | |
367 | #define SSD_PV3_INIT_STATE_REG2 0x0540 | |
368 | #define SSD_PV3_INIT_STATE_REG3 0x0560 | |
369 | #define SSD_PV3_INIT_STATE_REG4 0x0580 | |
370 | #define SSD_PV3_INIT_STATE_REG5 0x05A0 | |
371 | #define SSD_PV3_INIT_STATE_REG6 0x05C0 | |
372 | #define SSD_PV3_INIT_STATE_REG7 0x05E0 | |
373 | ||
374 | /* reversion 3.1.1 reg */ | |
375 | #define SSD_FULL_RESET_REG 0x01B0 | |
376 | ||
377 | #define SSD_CTRL_REG_ZONE_SZ 0x800 | |
378 | ||
379 | #define SSD_BB_THRESHOLD_L1_REG 0x2C0 | |
380 | #define SSD_BB_THRESHOLD_L2_REG 0x2C4 | |
381 | ||
382 | #define SSD_BB_ACC_REG_SZ 0x4 | |
383 | #define SSD_BB_ACC_REG0 0x21C0 | |
384 | #define SSD_BB_ACC_REG1 0x29C0 | |
385 | #define SSD_BB_ACC_REG2 0x31C0 | |
386 | ||
387 | #define SSD_EC_THRESHOLD_L1_REG 0x2C8 | |
388 | #define SSD_EC_THRESHOLD_L2_REG 0x2CC | |
389 | ||
390 | #define SSD_EC_ACC_REG_SZ 0x4 | |
391 | #define SSD_EC_ACC_REG0 0x21E0 | |
392 | #define SSD_EC_ACC_REG1 0x29E0 | |
393 | #define SSD_EC_ACC_REG2 0x31E0 | |
394 | ||
395 | /* reversion 3.1.2 & 3.1.3 reg */ | |
396 | #define SSD_HW_STATUS_REG 0x02AC | |
397 | ||
398 | #define SSD_PLP_INFO_REG 0x0664 | |
399 | ||
400 | /*reversion 3.2 reg*/ | |
401 | #define SSD_POWER_ON_REG 0x01EC | |
402 | #define SSD_PCIE_LINKSTATUS_REG 0x01F8 | |
403 | #define SSD_PL_CAP_LEARN_REG 0x01FC | |
404 | ||
405 | #define SSD_FPGA_1V0_REG0 0x2070 | |
406 | #define SSD_FPGA_1V8_REG0 0x2078 | |
407 | #define SSD_FPGA_1V0_REG1 0x2870 | |
408 | #define SSD_FPGA_1V8_REG1 0x2878 | |
409 | ||
410 | /*reversion 3.2 reg*/ | |
411 | #define SSD_READ_OT_REG0 0x2260 | |
412 | #define SSD_WRITE_OT_REG0 0x2264 | |
413 | #define SSD_READ_OT_REG1 0x2A60 | |
414 | #define SSD_WRITE_OT_REG1 0x2A64 | |
415 | ||
416 | ||
417 | /* function */ | |
418 | #define SSD_FUNC_READ 0x01 | |
419 | #define SSD_FUNC_WRITE 0x02 | |
420 | #define SSD_FUNC_NAND_READ_WOOB 0x03 | |
421 | #define SSD_FUNC_NAND_READ 0x04 | |
422 | #define SSD_FUNC_NAND_WRITE 0x05 | |
423 | #define SSD_FUNC_NAND_ERASE 0x06 | |
424 | #define SSD_FUNC_NAND_READ_ID 0x07 | |
425 | #define SSD_FUNC_READ_LOG 0x08 | |
426 | #define SSD_FUNC_TRIM 0x09 | |
427 | #define SSD_FUNC_RAM_READ 0x10 | |
428 | #define SSD_FUNC_RAM_WRITE 0x11 | |
429 | #define SSD_FUNC_FLUSH 0x12 //cache / bbt | |
430 | ||
431 | /* spi function */ | |
432 | #define SSD_SPI_CMD_PROGRAM 0x02 | |
433 | #define SSD_SPI_CMD_READ 0x03 | |
434 | #define SSD_SPI_CMD_W_DISABLE 0x04 | |
435 | #define SSD_SPI_CMD_READ_STATUS 0x05 | |
436 | #define SSD_SPI_CMD_W_ENABLE 0x06 | |
437 | #define SSD_SPI_CMD_ERASE 0xd8 | |
438 | #define SSD_SPI_CMD_CLSR 0x30 | |
439 | #define SSD_SPI_CMD_READ_ID 0x9f | |
440 | ||
441 | /* i2c */ | |
442 | #define SSD_I2C_CTRL_READ 0x00 | |
443 | #define SSD_I2C_CTRL_WRITE 0x01 | |
444 | ||
445 | /* i2c internal register */ | |
446 | #define SSD_I2C_CFG_REG 0x00 | |
447 | #define SSD_I2C_DATA_REG 0x01 | |
448 | #define SSD_I2C_CMD_REG 0x02 | |
449 | #define SSD_I2C_STATUS_REG 0x03 | |
450 | #define SSD_I2C_SADDR_REG 0x04 | |
451 | #define SSD_I2C_LEN_REG 0x05 | |
452 | #define SSD_I2C_RLEN_REG 0x06 | |
453 | #define SSD_I2C_WLEN_REG 0x07 | |
454 | #define SSD_I2C_RESET_REG 0x08 //write for reset | |
455 | #define SSD_I2C_PRER_REG 0x09 | |
456 | ||
457 | ||
458 | /* hw mon */ | |
459 | /* FPGA volt = ADC_value / 4096 * 3v */ | |
460 | #define SSD_FPGA_1V0_ADC_MIN 1228 // 0.9v | |
461 | #define SSD_FPGA_1V0_ADC_MAX 1502 // 1.1v | |
462 | #define SSD_FPGA_1V8_ADC_MIN 2211 // 1.62v | |
463 | #define SSD_FPGA_1V8_ADC_MAX 2703 // 1.98 | |
464 | ||
465 | /* ADC value */ | |
466 | #define SSD_FPGA_VOLT_MAX(val) (((val) & 0xffff) >> 4) | |
467 | #define SSD_FPGA_VOLT_MIN(val) (((val >> 16) & 0xffff) >> 4) | |
468 | #define SSD_FPGA_VOLT_CUR(val) (((val >> 32) & 0xffff) >> 4) | |
469 | #define SSD_FPGA_VOLT(val) ((val * 3000) >> 12) | |
470 | ||
471 | #define SSD_VOLT_LOG_DATA(idx, ctrl, volt) (((uint32_t)idx << 24) | ((uint32_t)ctrl << 16) | ((uint32_t)volt)) | |
472 | ||
473 | enum ssd_fpga_volt | |
474 | { | |
475 | SSD_FPGA_1V0 = 0, | |
476 | SSD_FPGA_1V8, | |
477 | SSD_FPGA_VOLT_NR | |
478 | }; | |
479 | ||
480 | enum ssd_clock | |
481 | { | |
482 | SSD_CLOCK_166M_LOST = 0, | |
483 | SSD_CLOCK_166M_SKEW, | |
484 | SSD_CLOCK_156M_LOST, | |
485 | SSD_CLOCK_156M_SKEW, | |
486 | SSD_CLOCK_NR | |
487 | }; | |
488 | ||
489 | /* sensor */ | |
490 | #define SSD_SENSOR_LM75_SADDRESS (0x49 << 1) | |
491 | #define SSD_SENSOR_LM80_SADDRESS (0x28 << 1) | |
492 | ||
493 | #define SSD_SENSOR_CONVERT_TEMP(val) ((int)(val >> 8)) | |
494 | ||
495 | #define SSD_INLET_OT_TEMP (55) //55 DegC | |
496 | #define SSD_INLET_OT_HYST (50) //50 DegC | |
497 | #define SSD_FLASH_OT_TEMP (70) //70 DegC | |
498 | #define SSD_FLASH_OT_HYST (65) //65 DegC | |
499 | ||
500 | enum ssd_sensor | |
501 | { | |
502 | SSD_SENSOR_LM80 = 0, | |
503 | SSD_SENSOR_LM75, | |
504 | SSD_SENSOR_NR | |
505 | }; | |
506 | ||
507 | ||
508 | /* lm75 */ | |
509 | enum ssd_lm75_reg | |
510 | { | |
511 | SSD_LM75_REG_TEMP = 0, | |
512 | SSD_LM75_REG_CONF, | |
513 | SSD_LM75_REG_THYST, | |
514 | SSD_LM75_REG_TOS | |
515 | }; | |
516 | ||
517 | /* lm96080 */ | |
518 | #define SSD_LM80_REG_IN_MAX(nr) (0x2a + (nr) * 2) | |
519 | #define SSD_LM80_REG_IN_MIN(nr) (0x2b + (nr) * 2) | |
520 | #define SSD_LM80_REG_IN(nr) (0x20 + (nr)) | |
521 | ||
522 | #define SSD_LM80_REG_FAN1 0x28 | |
523 | #define SSD_LM80_REG_FAN2 0x29 | |
524 | #define SSD_LM80_REG_FAN_MIN(nr) (0x3b + (nr)) | |
525 | ||
526 | #define SSD_LM80_REG_TEMP 0x27 | |
527 | #define SSD_LM80_REG_TEMP_HOT_MAX 0x38 | |
528 | #define SSD_LM80_REG_TEMP_HOT_HYST 0x39 | |
529 | #define SSD_LM80_REG_TEMP_OS_MAX 0x3a | |
530 | #define SSD_LM80_REG_TEMP_OS_HYST 0x3b | |
531 | ||
532 | #define SSD_LM80_REG_CONFIG 0x00 | |
533 | #define SSD_LM80_REG_ALARM1 0x01 | |
534 | #define SSD_LM80_REG_ALARM2 0x02 | |
535 | #define SSD_LM80_REG_MASK1 0x03 | |
536 | #define SSD_LM80_REG_MASK2 0x04 | |
537 | #define SSD_LM80_REG_FANDIV 0x05 | |
538 | #define SSD_LM80_REG_RES 0x06 | |
539 | ||
540 | #define SSD_LM80_CONVERT_VOLT(val) ((val * 10) >> 8) | |
541 | ||
542 | #define SSD_LM80_3V3_VOLT(val) ((val)*33/19) | |
543 | ||
544 | #define SSD_LM80_CONV_INTERVAL (1000) | |
545 | ||
546 | enum ssd_lm80_in | |
547 | { | |
548 | SSD_LM80_IN_CAP = 0, | |
549 | SSD_LM80_IN_1V2, | |
550 | SSD_LM80_IN_1V2a, | |
551 | SSD_LM80_IN_1V5, | |
552 | SSD_LM80_IN_1V8, | |
553 | SSD_LM80_IN_FPGA_3V3, | |
554 | SSD_LM80_IN_3V3, | |
555 | SSD_LM80_IN_NR | |
556 | }; | |
557 | ||
558 | struct ssd_lm80_limit | |
559 | { | |
560 | uint8_t low; | |
561 | uint8_t high; | |
562 | }; | |
563 | ||
564 | /* +/- 5% except cap in*/ | |
565 | static struct ssd_lm80_limit ssd_lm80_limit[SSD_LM80_IN_NR] = { | |
566 | {171, 217}, /* CAP in: 1710 ~ 2170 */ | |
567 | {114, 126}, | |
568 | {114, 126}, | |
569 | {142, 158}, | |
570 | {171, 189}, | |
571 | {180, 200}, | |
572 | {180, 200}, | |
573 | }; | |
574 | ||
575 | /* temperature sensors */ | |
576 | enum ssd_temp_sensor | |
577 | { | |
578 | SSD_TEMP_INLET = 0, | |
579 | SSD_TEMP_FLASH, | |
580 | SSD_TEMP_CTRL, | |
581 | SSD_TEMP_NR | |
582 | }; | |
583 | ||
584 | ||
585 | #ifdef SSD_OT_PROTECT | |
586 | #define SSD_OT_DELAY (60) //ms | |
587 | ||
588 | #define SSD_OT_TEMP (90) //90 DegC | |
589 | ||
590 | #define SSD_OT_TEMP_HYST (85) //85 DegC | |
591 | #endif | |
592 | ||
593 | /* fpga temperature */ | |
594 | //#define CONVERT_TEMP(val) ((float)(val)*503.975f/4096.0f-273.15f) | |
595 | #define CONVERT_TEMP(val) ((val)*504/4096-273) | |
596 | ||
597 | #define MAX_TEMP(val) CONVERT_TEMP(((val & 0xffff) >> 4)) | |
598 | #define MIN_TEMP(val) CONVERT_TEMP((((val>>16) & 0xffff) >> 4)) | |
599 | #define CUR_TEMP(val) CONVERT_TEMP((((val>>32) & 0xffff) >> 4)) | |
600 | ||
601 | ||
602 | /* CAP monitor */ | |
603 | #define SSD_PL_CAP_U1 SSD_LM80_REG_IN(SSD_LM80_IN_CAP) | |
604 | #define SSD_PL_CAP_U2 SSD_LM80_REG_IN(SSD_LM80_IN_1V8) | |
605 | #define SSD_PL_CAP_LEARN(u1, u2, t) ((t*(u1+u2))/(2*162*(u1-u2))) | |
606 | #define SSD_PL_CAP_LEARN_WAIT (20) //20ms | |
607 | #define SSD_PL_CAP_LEARN_MAX_WAIT (1000/SSD_PL_CAP_LEARN_WAIT) //1s | |
608 | ||
609 | #define SSD_PL_CAP_CHARGE_WAIT (1000) | |
610 | #define SSD_PL_CAP_CHARGE_MAX_WAIT ((120*1000)/SSD_PL_CAP_CHARGE_WAIT) //120s | |
611 | ||
612 | #define SSD_PL_CAP_VOLT(val) (val*7) | |
613 | ||
614 | #define SSD_PL_CAP_VOLT_FULL (13700) | |
615 | #define SSD_PL_CAP_VOLT_READY (12880) | |
616 | ||
617 | #define SSD_PL_CAP_THRESHOLD (8900) | |
618 | #define SSD_PL_CAP_CP_THRESHOLD (5800) | |
619 | #define SSD_PL_CAP_THRESHOLD_HYST (100) | |
620 | ||
621 | enum ssd_pl_cap_status | |
622 | { | |
623 | SSD_PL_CAP = 0, | |
624 | SSD_PL_CAP_NR | |
625 | }; | |
626 | ||
627 | enum ssd_pl_cap_type | |
628 | { | |
629 | SSD_PL_CAP_DEFAULT = 0, /* 4 cap */ | |
630 | SSD_PL_CAP_CP /* 3 cap */ | |
631 | }; | |
632 | ||
633 | ||
634 | /* hwmon offset */ | |
635 | #define SSD_HWMON_OFFS_TEMP (0) | |
636 | #define SSD_HWMON_OFFS_SENSOR (SSD_HWMON_OFFS_TEMP + SSD_TEMP_NR) | |
637 | #define SSD_HWMON_OFFS_PL_CAP (SSD_HWMON_OFFS_SENSOR + SSD_SENSOR_NR) | |
638 | #define SSD_HWMON_OFFS_LM80 (SSD_HWMON_OFFS_PL_CAP + SSD_PL_CAP_NR) | |
639 | #define SSD_HWMON_OFFS_CLOCK (SSD_HWMON_OFFS_LM80 + SSD_LM80_IN_NR) | |
640 | #define SSD_HWMON_OFFS_FPGA (SSD_HWMON_OFFS_CLOCK + SSD_CLOCK_NR) | |
641 | ||
642 | #define SSD_HWMON_TEMP(idx) (SSD_HWMON_OFFS_TEMP + idx) | |
643 | #define SSD_HWMON_SENSOR(idx) (SSD_HWMON_OFFS_SENSOR + idx) | |
644 | #define SSD_HWMON_PL_CAP(idx) (SSD_HWMON_OFFS_PL_CAP + idx) | |
645 | #define SSD_HWMON_LM80(idx) (SSD_HWMON_OFFS_LM80 + idx) | |
646 | #define SSD_HWMON_CLOCK(idx) (SSD_HWMON_OFFS_CLOCK + idx) | |
647 | #define SSD_HWMON_FPGA(ctrl, idx) (SSD_HWMON_OFFS_FPGA + (ctrl * SSD_FPGA_VOLT_NR) + idx) | |
648 | ||
649 | ||
650 | ||
651 | /* fifo */ | |
652 | typedef struct sfifo | |
653 | { | |
654 | uint32_t in; | |
655 | uint32_t out; | |
656 | uint32_t size; | |
657 | uint32_t esize; | |
658 | uint32_t mask; | |
659 | spinlock_t lock; | |
660 | void *data; | |
661 | } sfifo_t; | |
662 | ||
663 | static int sfifo_alloc(struct sfifo *fifo, uint32_t size, uint32_t esize) | |
664 | { | |
665 | uint32_t __size = 1; | |
666 | ||
667 | if (!fifo || size > INT_MAX || esize == 0) { | |
668 | return -EINVAL; | |
669 | } | |
670 | ||
671 | while (__size < size) __size <<= 1; | |
672 | ||
673 | if (__size < 2) { | |
674 | return -EINVAL; | |
675 | } | |
676 | ||
677 | fifo->data = vmalloc(esize * __size); | |
678 | if (!fifo->data) { | |
679 | return -ENOMEM; | |
680 | } | |
681 | ||
682 | fifo->in = 0; | |
683 | fifo->out = 0; | |
684 | fifo->mask = __size - 1; | |
685 | fifo->size = __size; | |
686 | fifo->esize = esize; | |
687 | spin_lock_init(&fifo->lock); | |
688 | ||
689 | return 0; | |
690 | } | |
691 | ||
692 | static void sfifo_free(struct sfifo *fifo) | |
693 | { | |
694 | if (!fifo) { | |
695 | return; | |
696 | } | |
697 | ||
698 | vfree(fifo->data); | |
699 | fifo->data = NULL; | |
700 | fifo->in = 0; | |
701 | fifo->out = 0; | |
702 | fifo->mask = 0; | |
703 | fifo->size = 0; | |
704 | fifo->esize = 0; | |
705 | } | |
706 | ||
707 | static int __sfifo_put(struct sfifo *fifo, void *val) | |
708 | { | |
709 | if (((fifo->in + 1) & fifo->mask) == fifo->out) { | |
710 | return -1; | |
711 | } | |
712 | ||
713 | memcpy((fifo->data + (fifo->in * fifo->esize)), val, fifo->esize); | |
714 | fifo->in = (fifo->in + 1) & fifo->mask; | |
715 | ||
716 | return 0; | |
717 | } | |
718 | ||
719 | static int sfifo_put(struct sfifo *fifo, void *val) | |
720 | { | |
721 | int ret = 0; | |
722 | ||
723 | if (!fifo || !val) { | |
724 | return -EINVAL; | |
725 | } | |
726 | ||
727 | if (!in_interrupt()) { | |
728 | spin_lock_irq(&fifo->lock); | |
729 | ret = __sfifo_put(fifo, val); | |
730 | spin_unlock_irq(&fifo->lock); | |
731 | } else { | |
732 | spin_lock(&fifo->lock); | |
733 | ret = __sfifo_put(fifo, val); | |
734 | spin_unlock(&fifo->lock); | |
735 | } | |
736 | ||
737 | return ret; | |
738 | } | |
739 | ||
740 | static int __sfifo_get(struct sfifo *fifo, void *val) | |
741 | { | |
742 | if (fifo->out == fifo->in) { | |
743 | return -1; | |
744 | } | |
745 | ||
746 | memcpy(val, (fifo->data + (fifo->out * fifo->esize)), fifo->esize); | |
747 | fifo->out = (fifo->out + 1) & fifo->mask; | |
748 | ||
749 | return 0; | |
750 | } | |
751 | ||
752 | static int sfifo_get(struct sfifo *fifo, void *val) | |
753 | { | |
754 | int ret = 0; | |
755 | ||
756 | if (!fifo || !val) { | |
757 | return -EINVAL; | |
758 | } | |
759 | ||
760 | if (!in_interrupt()) { | |
761 | spin_lock_irq(&fifo->lock); | |
762 | ret = __sfifo_get(fifo, val); | |
763 | spin_unlock_irq(&fifo->lock); | |
764 | } else { | |
765 | spin_lock(&fifo->lock); | |
766 | ret = __sfifo_get(fifo, val); | |
767 | spin_unlock(&fifo->lock); | |
768 | } | |
769 | ||
770 | return ret; | |
771 | } | |
772 | ||
773 | /* bio list */ | |
774 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30)) | |
775 | struct ssd_blist { | |
776 | struct bio *prev; | |
777 | struct bio *next; | |
778 | }; | |
779 | ||
780 | static inline void ssd_blist_init(struct ssd_blist *ssd_bl) | |
781 | { | |
782 | ssd_bl->prev = NULL; | |
783 | ssd_bl->next = NULL; | |
784 | } | |
785 | ||
786 | static inline struct bio *ssd_blist_get(struct ssd_blist *ssd_bl) | |
787 | { | |
788 | struct bio *bio = ssd_bl->prev; | |
789 | ||
790 | ssd_bl->prev = NULL; | |
791 | ssd_bl->next = NULL; | |
792 | ||
793 | return bio; | |
794 | } | |
795 | ||
796 | static inline void ssd_blist_add(struct ssd_blist *ssd_bl, struct bio *bio) | |
797 | { | |
798 | bio->bi_next = NULL; | |
799 | ||
800 | if (ssd_bl->next) { | |
801 | ssd_bl->next->bi_next = bio; | |
802 | } else { | |
803 | ssd_bl->prev = bio; | |
804 | } | |
805 | ||
806 | ssd_bl->next = bio; | |
807 | } | |
808 | ||
809 | #else | |
810 | #define ssd_blist bio_list | |
811 | #define ssd_blist_init bio_list_init | |
812 | #define ssd_blist_get bio_list_get | |
813 | #define ssd_blist_add bio_list_add | |
814 | #endif | |
815 | ||
816 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,14,0)) | |
817 | #define bio_start(bio) (bio->bi_sector) | |
818 | #else | |
819 | #define bio_start(bio) (bio->bi_iter.bi_sector) | |
820 | #endif | |
821 | ||
822 | /* mutex */ | |
823 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16)) | |
824 | #define mutex_lock down | |
825 | #define mutex_unlock up | |
826 | #define mutex semaphore | |
827 | #define mutex_init init_MUTEX | |
828 | #endif | |
829 | ||
830 | /* i2c */ | |
831 | typedef union ssd_i2c_ctrl { | |
832 | uint32_t val; | |
833 | struct { | |
834 | uint8_t wdata; | |
835 | uint8_t addr; | |
836 | uint16_t rw:1; | |
837 | uint16_t pad:15; | |
838 | } bits; | |
839 | }__attribute__((packed)) ssd_i2c_ctrl_t; | |
840 | ||
841 | typedef union ssd_i2c_data { | |
842 | uint32_t val; | |
843 | struct { | |
844 | uint32_t rdata:8; | |
845 | uint32_t valid:1; | |
846 | uint32_t pad:23; | |
847 | } bits; | |
848 | }__attribute__((packed)) ssd_i2c_data_t; | |
849 | ||
850 | /* write mode */ | |
851 | enum ssd_write_mode | |
852 | { | |
853 | SSD_WMODE_BUFFER = 0, | |
854 | SSD_WMODE_BUFFER_EX, | |
855 | SSD_WMODE_FUA, | |
856 | /* dummy */ | |
857 | SSD_WMODE_AUTO, | |
858 | SSD_WMODE_DEFAULT | |
859 | }; | |
860 | ||
861 | /* reset type */ | |
862 | enum ssd_reset_type | |
863 | { | |
864 | SSD_RST_NOINIT = 0, | |
865 | SSD_RST_NORMAL, | |
866 | SSD_RST_FULL | |
867 | }; | |
868 | ||
869 | /* ssd msg */ | |
870 | typedef struct ssd_sg_entry | |
871 | { | |
872 | uint64_t block:48; | |
873 | uint64_t length:16; | |
874 | uint64_t buf; | |
875 | }__attribute__((packed))ssd_sg_entry_t; | |
876 | ||
877 | typedef struct ssd_rw_msg | |
878 | { | |
879 | uint8_t tag; | |
880 | uint8_t flag; | |
881 | uint8_t nsegs; | |
882 | uint8_t fun; | |
883 | uint32_t reserved; //for 64-bit align | |
884 | struct ssd_sg_entry sge[1]; //base | |
885 | }__attribute__((packed))ssd_rw_msg_t; | |
886 | ||
887 | typedef struct ssd_resp_msg | |
888 | { | |
889 | uint8_t tag; | |
890 | uint8_t status:2; | |
891 | uint8_t bitflip:6; | |
892 | uint8_t log; | |
893 | uint8_t fun; | |
894 | uint32_t reserved; | |
895 | }__attribute__((packed))ssd_resp_msg_t; | |
896 | ||
897 | typedef struct ssd_flush_msg | |
898 | { | |
899 | uint8_t tag; | |
900 | uint8_t flag:2; //flash cache 0 or bbt 1 | |
901 | uint8_t flash:6; | |
902 | uint8_t ctrl_idx; | |
903 | uint8_t fun; | |
904 | uint32_t reserved; //align | |
905 | }__attribute__((packed))ssd_flush_msg_t; | |
906 | ||
907 | typedef struct ssd_nand_op_msg | |
908 | { | |
909 | uint8_t tag; | |
910 | uint8_t flag; | |
911 | uint8_t ctrl_idx; | |
912 | uint8_t fun; | |
913 | uint32_t reserved; //align | |
914 | uint16_t page_count; | |
915 | uint8_t chip_ce; | |
916 | uint8_t chip_no; | |
917 | uint32_t page_no; | |
918 | uint64_t buf; | |
919 | }__attribute__((packed))ssd_nand_op_msg_t; | |
920 | ||
921 | typedef struct ssd_ram_op_msg | |
922 | { | |
923 | uint8_t tag; | |
924 | uint8_t flag; | |
925 | uint8_t ctrl_idx; | |
926 | uint8_t fun; | |
927 | uint32_t reserved; //align | |
928 | uint32_t start; | |
929 | uint32_t length; | |
930 | uint64_t buf; | |
931 | }__attribute__((packed))ssd_ram_op_msg_t; | |
932 | ||
933 | ||
934 | /* log msg */ | |
935 | typedef struct ssd_log_msg | |
936 | { | |
937 | uint8_t tag; | |
938 | uint8_t flag; | |
939 | uint8_t ctrl_idx; | |
940 | uint8_t fun; | |
941 | uint32_t reserved; //align | |
942 | uint64_t buf; | |
943 | }__attribute__((packed))ssd_log_msg_t; | |
944 | ||
945 | typedef struct ssd_log_op_msg | |
946 | { | |
947 | uint8_t tag; | |
948 | uint8_t flag; | |
949 | uint8_t ctrl_idx; | |
950 | uint8_t fun; | |
951 | uint32_t reserved; //align | |
952 | uint64_t reserved1; //align | |
953 | uint64_t buf; | |
954 | }__attribute__((packed))ssd_log_op_msg_t; | |
955 | ||
956 | typedef struct ssd_log_resp_msg | |
957 | { | |
958 | uint8_t tag; | |
959 | uint16_t status :2; | |
960 | uint16_t reserved1 :2; //align with the normal resp msg | |
961 | uint16_t nr_log :12; | |
962 | uint8_t fun; | |
963 | uint32_t reserved; | |
964 | }__attribute__((packed))ssd_log_resp_msg_t; | |
965 | ||
966 | ||
967 | /* resp msg */ | |
968 | typedef union ssd_response_msq | |
969 | { | |
970 | ssd_resp_msg_t resp_msg; | |
971 | ssd_log_resp_msg_t log_resp_msg; | |
972 | uint64_t u64_msg; | |
973 | uint32_t u32_msg[2]; | |
974 | } ssd_response_msq_t; | |
975 | ||
976 | ||
977 | /* custom struct */ | |
978 | typedef struct ssd_protocol_info | |
979 | { | |
980 | uint32_t ver; | |
981 | uint32_t init_state_reg; | |
982 | uint32_t init_state_reg_sz; | |
983 | uint32_t chip_info_reg; | |
984 | uint32_t chip_info_reg_sz; | |
985 | } ssd_protocol_info_t; | |
986 | ||
987 | typedef struct ssd_hw_info | |
988 | { | |
989 | uint32_t bridge_ver; | |
990 | uint32_t ctrl_ver; | |
991 | ||
992 | uint32_t cmd_fifo_sz; | |
993 | uint32_t cmd_fifo_sz_mask; | |
994 | uint32_t cmd_max_sg; | |
995 | uint32_t sg_max_sec; | |
996 | uint32_t resp_ptr_sz; | |
997 | uint32_t resp_msg_sz; | |
998 | ||
999 | uint16_t nr_ctrl; | |
1000 | ||
1001 | uint16_t nr_data_ch; | |
1002 | uint16_t nr_ch; | |
1003 | uint16_t max_ch; | |
1004 | uint16_t nr_chip; | |
1005 | ||
1006 | uint8_t pcb_ver; | |
1007 | uint8_t upper_pcb_ver; | |
1008 | ||
1009 | uint8_t nand_vendor_id; | |
1010 | uint8_t nand_dev_id; | |
1011 | ||
1012 | uint8_t max_ce; | |
1013 | uint8_t id_size; | |
1014 | uint16_t oob_size; | |
1015 | ||
1016 | uint16_t bbf_pages; | |
1017 | uint16_t bbf_seek; // | |
1018 | ||
1019 | uint16_t page_count; //per block | |
1020 | uint32_t page_size; | |
1021 | uint32_t block_count; //per flash | |
1022 | ||
1023 | uint64_t ram_size; | |
1024 | uint32_t ram_align; | |
1025 | uint32_t ram_max_len; | |
1026 | ||
1027 | uint64_t bbt_base; | |
1028 | uint32_t bbt_size; | |
1029 | uint64_t md_base; //metadata | |
1030 | uint32_t md_size; | |
1031 | uint32_t md_entry_sz; | |
1032 | ||
1033 | uint32_t log_sz; | |
1034 | ||
1035 | uint64_t nand_wbuff_base; | |
1036 | ||
1037 | uint32_t md_reserved_blks; | |
1038 | uint32_t reserved_blks; | |
1039 | uint32_t valid_pages; | |
1040 | uint32_t max_valid_pages; | |
1041 | uint64_t size; | |
1042 | } ssd_hw_info_t; | |
1043 | ||
1044 | typedef struct ssd_hw_info_extend | |
1045 | { | |
1046 | uint8_t board_type; | |
1047 | uint8_t cap_type; | |
1048 | uint8_t plp_type; | |
1049 | uint8_t work_mode; | |
1050 | uint8_t form_factor; | |
1051 | ||
1052 | uint8_t pad[59]; | |
1053 | }ssd_hw_info_extend_t; | |
1054 | ||
1055 | typedef struct ssd_rom_info | |
1056 | { | |
1057 | uint32_t size; | |
1058 | uint32_t block_size; | |
1059 | uint16_t page_size; | |
1060 | uint8_t nr_bridge_fw; | |
1061 | uint8_t nr_ctrl_fw; | |
1062 | uint8_t nr_bm_fw; | |
1063 | uint8_t nr_smart; | |
1064 | uint32_t bridge_fw_base; | |
1065 | uint32_t bridge_fw_sz; | |
1066 | uint32_t ctrl_fw_base; | |
1067 | uint32_t ctrl_fw_sz; | |
1068 | uint32_t bm_fw_base; | |
1069 | uint32_t bm_fw_sz; | |
1070 | uint32_t log_base; | |
1071 | uint32_t log_sz; | |
1072 | uint32_t smart_base; | |
1073 | uint32_t smart_sz; | |
1074 | uint32_t vp_base; | |
1075 | uint32_t label_base; | |
1076 | } ssd_rom_info_t; | |
1077 | ||
1078 | /* debug info */ | |
1079 | enum ssd_debug_type | |
1080 | { | |
1081 | SSD_DEBUG_NONE = 0, | |
1082 | SSD_DEBUG_READ_ERR, | |
1083 | SSD_DEBUG_WRITE_ERR, | |
1084 | SSD_DEBUG_RW_ERR, | |
1085 | SSD_DEBUG_READ_TO, | |
1086 | SSD_DEBUG_WRITE_TO, | |
1087 | SSD_DEBUG_RW_TO, | |
1088 | SSD_DEBUG_LOG, | |
1089 | SSD_DEBUG_OFFLINE, | |
1090 | SSD_DEBUG_NR | |
1091 | }; | |
1092 | ||
1093 | typedef struct ssd_debug_info | |
1094 | { | |
1095 | int type; | |
1096 | union { | |
1097 | struct { | |
1098 | uint64_t off; | |
1099 | uint32_t len; | |
1100 | } loc; | |
1101 | struct { | |
1102 | int event; | |
1103 | uint32_t extra; | |
1104 | } log; | |
1105 | } data; | |
1106 | }ssd_debug_info_t; | |
1107 | ||
1108 | /* label */ | |
1109 | #define SSD_LABEL_FIELD_SZ 32 | |
1110 | #define SSD_SN_SZ 16 | |
1111 | ||
1112 | typedef struct ssd_label | |
1113 | { | |
1114 | char date[SSD_LABEL_FIELD_SZ]; | |
1115 | char sn[SSD_LABEL_FIELD_SZ]; | |
1116 | char part[SSD_LABEL_FIELD_SZ]; | |
1117 | char desc[SSD_LABEL_FIELD_SZ]; | |
1118 | char other[SSD_LABEL_FIELD_SZ]; | |
1119 | char maf[SSD_LABEL_FIELD_SZ]; | |
1120 | } ssd_label_t; | |
1121 | ||
1122 | #define SSD_LABEL_DESC_SZ 256 | |
1123 | ||
1124 | typedef struct ssd_labelv3 | |
1125 | { | |
1126 | char boardtype[SSD_LABEL_FIELD_SZ]; | |
1127 | char barcode[SSD_LABEL_FIELD_SZ]; | |
1128 | char item[SSD_LABEL_FIELD_SZ]; | |
1129 | char description[SSD_LABEL_DESC_SZ]; | |
1130 | char manufactured[SSD_LABEL_FIELD_SZ]; | |
1131 | char vendorname[SSD_LABEL_FIELD_SZ]; | |
1132 | char issuenumber[SSD_LABEL_FIELD_SZ]; | |
1133 | char cleicode[SSD_LABEL_FIELD_SZ]; | |
1134 | char bom[SSD_LABEL_FIELD_SZ]; | |
1135 | } ssd_labelv3_t; | |
1136 | ||
1137 | /* battery */ | |
1138 | typedef struct ssd_battery_info | |
1139 | { | |
1140 | uint32_t fw_ver; | |
1141 | } ssd_battery_info_t; | |
1142 | ||
1143 | /* ssd power stat */ | |
1144 | typedef struct ssd_power_stat | |
1145 | { | |
1146 | uint64_t nr_poweron; | |
1147 | uint64_t nr_powerloss; | |
1148 | uint64_t init_failed; | |
1149 | } ssd_power_stat_t; | |
1150 | ||
1151 | /* io stat */ | |
1152 | typedef struct ssd_io_stat | |
1153 | { | |
1154 | uint64_t run_time; | |
1155 | uint64_t nr_to; | |
1156 | uint64_t nr_ioerr; | |
1157 | uint64_t nr_rwerr; | |
1158 | uint64_t nr_read; | |
1159 | uint64_t nr_write; | |
1160 | uint64_t rsectors; | |
1161 | uint64_t wsectors; | |
1162 | } ssd_io_stat_t; | |
1163 | ||
1164 | /* ecc */ | |
1165 | typedef struct ssd_ecc_info | |
1166 | { | |
1167 | uint64_t bitflip[SSD_ECC_MAX_FLIP]; | |
1168 | } ssd_ecc_info_t; | |
1169 | ||
1170 | /* log */ | |
1171 | enum ssd_log_level | |
1172 | { | |
1173 | SSD_LOG_LEVEL_INFO = 0, | |
1174 | SSD_LOG_LEVEL_NOTICE, | |
1175 | SSD_LOG_LEVEL_WARNING, | |
1176 | SSD_LOG_LEVEL_ERR, | |
1177 | SSD_LOG_NR_LEVEL | |
1178 | }; | |
1179 | ||
1180 | typedef struct ssd_log_info | |
1181 | { | |
1182 | uint64_t nr_log; | |
1183 | uint64_t stat[SSD_LOG_NR_LEVEL]; | |
1184 | } ssd_log_info_t; | |
1185 | ||
1186 | /* S.M.A.R.T. */ | |
1187 | #define SSD_SMART_MAGIC (0x5452414D53445353ull) | |
1188 | ||
1189 | typedef struct ssd_smart | |
1190 | { | |
1191 | struct ssd_power_stat pstat; | |
1192 | struct ssd_io_stat io_stat; | |
1193 | struct ssd_ecc_info ecc_info; | |
1194 | struct ssd_log_info log_info; | |
1195 | uint64_t version; | |
1196 | uint64_t magic; | |
1197 | } ssd_smart_t; | |
1198 | ||
1199 | /* internal log */ | |
1200 | typedef struct ssd_internal_log | |
1201 | { | |
1202 | uint32_t nr_log; | |
1203 | void *log; | |
1204 | } ssd_internal_log_t; | |
1205 | ||
1206 | /* ssd cmd */ | |
1207 | typedef struct ssd_cmd | |
1208 | { | |
1209 | struct bio *bio; | |
1210 | struct scatterlist *sgl; | |
1211 | struct list_head list; | |
1212 | void *dev; | |
1213 | int nsegs; | |
1214 | int flag; /*pbio(1) or bio(0)*/ | |
1215 | ||
1216 | int tag; | |
1217 | void *msg; | |
1218 | dma_addr_t msg_dma; | |
1219 | ||
1220 | unsigned long start_time; | |
1221 | ||
1222 | int errors; | |
1223 | unsigned int nr_log; | |
1224 | ||
1225 | struct timer_list cmd_timer; | |
1226 | struct completion *waiting; | |
1227 | } ssd_cmd_t; | |
1228 | ||
1229 | typedef void (*send_cmd_func)(struct ssd_cmd *); | |
1230 | typedef int (*ssd_event_call)(struct gendisk *, int, int); /* gendisk, event id, event level */ | |
1231 | ||
1232 | /* dcmd sz */ | |
1233 | #define SSD_DCMD_MAX_SZ 32 | |
1234 | ||
1235 | typedef struct ssd_dcmd | |
1236 | { | |
1237 | struct list_head list; | |
1238 | void *dev; | |
1239 | uint8_t msg[SSD_DCMD_MAX_SZ]; | |
1240 | } ssd_dcmd_t; | |
1241 | ||
1242 | ||
1243 | enum ssd_state { | |
1244 | SSD_INIT_WORKQ, | |
1245 | SSD_INIT_BD, | |
1246 | SSD_ONLINE, | |
1247 | /* full reset */ | |
1248 | SSD_RESETING, | |
1249 | /* hw log */ | |
1250 | SSD_LOG_HW, | |
1251 | /* log err */ | |
da3355df | 1252 | SSD_LOG_ERR, |
361ebed5 HSDT |
1253 | }; |
1254 | ||
1255 | #define SSD_QUEUE_NAME_LEN 16 | |
1256 | typedef struct ssd_queue { | |
1257 | char name[SSD_QUEUE_NAME_LEN]; | |
1258 | void *dev; | |
1259 | ||
1260 | int idx; | |
1261 | ||
1262 | uint32_t resp_idx; | |
1263 | uint32_t resp_idx_mask; | |
1264 | uint32_t resp_msg_sz; | |
1265 | ||
1266 | void *resp_msg; | |
1267 | void *resp_ptr; | |
1268 | ||
1269 | struct ssd_cmd *cmd; | |
1270 | ||
1271 | struct ssd_io_stat io_stat; | |
1272 | struct ssd_ecc_info ecc_info; | |
1273 | } ssd_queue_t; | |
1274 | ||
1275 | typedef struct ssd_device { | |
1276 | char name[SSD_DEV_NAME_LEN]; | |
1277 | ||
1278 | int idx; | |
1279 | int major; | |
1280 | int readonly; | |
1281 | ||
1282 | int int_mode; | |
1283 | #ifdef SSD_ESCAPE_IRQ | |
1284 | int irq_cpu; | |
1285 | #endif | |
1286 | ||
1287 | int reload_fw; | |
1288 | ||
1289 | int ot_delay; //in ms | |
1290 | ||
1291 | atomic_t refcnt; | |
1292 | atomic_t tocnt; | |
1293 | atomic_t in_flight[2]; //r&w | |
1294 | ||
1295 | uint64_t uptime; | |
1296 | ||
1297 | struct list_head list; | |
1298 | struct pci_dev *pdev; | |
1299 | ||
1300 | unsigned long mmio_base; | |
1301 | unsigned long mmio_len; | |
1302 | void __iomem *ctrlp; | |
1303 | ||
1304 | struct mutex spi_mutex; | |
1305 | struct mutex i2c_mutex; | |
1306 | ||
1307 | struct ssd_protocol_info protocol_info; | |
1308 | struct ssd_hw_info hw_info; | |
1309 | struct ssd_rom_info rom_info; | |
1310 | struct ssd_label label; | |
1311 | ||
1312 | struct ssd_smart smart; | |
1313 | ||
1314 | atomic_t in_sendq; | |
1315 | spinlock_t sendq_lock; | |
1316 | struct ssd_blist sendq; | |
1317 | struct task_struct *send_thread; | |
1318 | wait_queue_head_t send_waitq; | |
1319 | ||
1320 | atomic_t in_doneq; | |
1321 | spinlock_t doneq_lock; | |
1322 | struct ssd_blist doneq; | |
1323 | struct task_struct *done_thread; | |
1324 | wait_queue_head_t done_waitq; | |
1325 | ||
1326 | struct ssd_dcmd *dcmd; | |
1327 | spinlock_t dcmd_lock; | |
1328 | struct list_head dcmd_list; /* direct cmd list */ | |
1329 | wait_queue_head_t dcmd_wq; | |
1330 | ||
1331 | unsigned long *tag_map; | |
1332 | wait_queue_head_t tag_wq; | |
1333 | ||
1334 | spinlock_t cmd_lock; | |
1335 | struct ssd_cmd *cmd; | |
1336 | send_cmd_func scmd; | |
1337 | ||
1338 | ssd_event_call event_call; | |
1339 | void *msg_base; | |
1340 | dma_addr_t msg_base_dma; | |
1341 | ||
1342 | uint32_t resp_idx; | |
1343 | void *resp_msg_base; | |
1344 | void *resp_ptr_base; | |
1345 | dma_addr_t resp_msg_base_dma; | |
1346 | dma_addr_t resp_ptr_base_dma; | |
1347 | ||
1348 | int nr_queue; | |
1349 | struct msix_entry entry[SSD_MSIX_VEC]; | |
1350 | struct ssd_queue queue[SSD_MSIX_VEC]; | |
1351 | ||
1352 | struct request_queue *rq; /* The device request queue */ | |
1353 | struct gendisk *gd; /* The gendisk structure */ | |
1354 | ||
1355 | struct mutex internal_log_mutex; | |
1356 | struct ssd_internal_log internal_log; | |
1357 | struct workqueue_struct *workq; | |
1358 | struct work_struct log_work; /* get log */ | |
1359 | void *log_buf; | |
1360 | ||
1361 | unsigned long state; /* device state, for example, block device inited */ | |
1362 | ||
1363 | struct module *owner; | |
1364 | ||
1365 | /* extend */ | |
1366 | ||
1367 | int slave; | |
1368 | int cmajor; | |
1369 | int save_md; | |
1370 | int ot_protect; | |
1371 | ||
1372 | struct kref kref; | |
1373 | ||
1374 | struct mutex gd_mutex; | |
1375 | struct ssd_log_info log_info; /* volatile */ | |
1376 | ||
1377 | atomic_t queue_depth; | |
1378 | struct mutex barrier_mutex; | |
1379 | struct mutex fw_mutex; | |
1380 | ||
1381 | struct ssd_hw_info_extend hw_info_ext; | |
1382 | struct ssd_labelv3 labelv3; | |
1383 | ||
1384 | int wmode; | |
1385 | int user_wmode; | |
1386 | struct mutex bm_mutex; | |
1387 | struct work_struct bm_work; /* check bm */ | |
1388 | struct timer_list bm_timer; | |
1389 | struct sfifo log_fifo; | |
1390 | ||
1391 | struct timer_list routine_timer; | |
1392 | unsigned long routine_tick; | |
1393 | unsigned long hwmon; | |
1394 | ||
1395 | struct work_struct hwmon_work; /* check hw */ | |
1396 | struct work_struct capmon_work; /* check battery */ | |
1397 | struct work_struct tempmon_work; /* check temp */ | |
1398 | ||
1399 | /* debug info */ | |
1400 | struct ssd_debug_info db_info; | |
1197134c | 1401 | uint64_t reset_time; |
da3355df SF |
1402 | int has_non_0x98_reg_access; |
1403 | spinlock_t in_flight_lock; | |
1404 | ||
1405 | uint64_t last_poweron_id; | |
1406 | ||
361ebed5 HSDT |
1407 | } ssd_device_t; |
1408 | ||
1409 | ||
1410 | /* Ioctl struct */ | |
1411 | typedef struct ssd_acc_info { | |
1412 | uint32_t threshold_l1; | |
1413 | uint32_t threshold_l2; | |
1414 | uint32_t val; | |
1415 | } ssd_acc_info_t; | |
1416 | ||
1417 | typedef struct ssd_reg_op_info | |
1418 | { | |
1419 | uint32_t offset; | |
1420 | uint32_t value; | |
1421 | } ssd_reg_op_info_t; | |
1422 | ||
1423 | typedef struct ssd_spi_op_info | |
1424 | { | |
1425 | void __user *buf; | |
1426 | uint32_t off; | |
1427 | uint32_t len; | |
1428 | } ssd_spi_op_info_t; | |
1429 | ||
1430 | typedef struct ssd_i2c_op_info | |
1431 | { | |
1432 | uint8_t saddr; | |
1433 | uint8_t wsize; | |
1434 | uint8_t rsize; | |
1435 | void __user *wbuf; | |
1436 | void __user *rbuf; | |
1437 | } ssd_i2c_op_info_t; | |
1438 | ||
1439 | typedef struct ssd_smbus_op_info | |
1440 | { | |
1441 | uint8_t saddr; | |
1442 | uint8_t cmd; | |
1443 | uint8_t size; | |
1444 | void __user *buf; | |
1445 | } ssd_smbus_op_info_t; | |
1446 | ||
1447 | typedef struct ssd_ram_op_info { | |
1448 | uint8_t ctrl_idx; | |
1449 | uint32_t length; | |
1450 | uint64_t start; | |
1451 | uint8_t __user *buf; | |
1452 | } ssd_ram_op_info_t; | |
1453 | ||
1454 | typedef struct ssd_flash_op_info { | |
1455 | uint32_t page; | |
1456 | uint16_t flash; | |
1457 | uint8_t chip; | |
1458 | uint8_t ctrl_idx; | |
1459 | uint8_t __user *buf; | |
1460 | } ssd_flash_op_info_t; | |
1461 | ||
1462 | typedef struct ssd_sw_log_info { | |
1463 | uint16_t event; | |
1464 | uint16_t pad; | |
1465 | uint32_t data; | |
1466 | } ssd_sw_log_info_t; | |
1467 | ||
1468 | typedef struct ssd_version_info | |
1469 | { | |
1470 | uint32_t bridge_ver; /* bridge fw version */ | |
1471 | uint32_t ctrl_ver; /* controller fw version */ | |
1472 | uint32_t bm_ver; /* battery manager fw version */ | |
1473 | uint8_t pcb_ver; /* main pcb version */ | |
1474 | uint8_t upper_pcb_ver; | |
1475 | uint8_t pad0; | |
1476 | uint8_t pad1; | |
1477 | } ssd_version_info_t; | |
1478 | ||
1479 | typedef struct pci_addr | |
1480 | { | |
1481 | uint16_t domain; | |
1482 | uint8_t bus; | |
1483 | uint8_t slot; | |
1484 | uint8_t func; | |
1485 | } pci_addr_t; | |
1486 | ||
1487 | typedef struct ssd_drv_param_info { | |
1488 | int mode; | |
1489 | int status_mask; | |
1490 | int int_mode; | |
1491 | int threaded_irq; | |
1492 | int log_level; | |
1493 | int wmode; | |
1494 | int ot_protect; | |
1495 | int finject; | |
1496 | int pad[8]; | |
1497 | } ssd_drv_param_info_t; | |
1498 | ||
1499 | ||
1500 | /* form factor */ | |
1501 | enum ssd_form_factor | |
1502 | { | |
1503 | SSD_FORM_FACTOR_HHHL = 0, | |
1504 | SSD_FORM_FACTOR_FHHL | |
1505 | }; | |
1506 | ||
1507 | ||
1508 | /* ssd power loss protect */ | |
1509 | enum ssd_plp_type | |
1510 | { | |
1511 | SSD_PLP_SCAP = 0, | |
1512 | SSD_PLP_CAP, | |
1513 | SSD_PLP_NONE | |
1514 | }; | |
1515 | ||
1516 | /* ssd bm */ | |
1517 | #define SSD_BM_SLAVE_ADDRESS 0x16 | |
1518 | #define SSD_BM_CAP 5 | |
1519 | ||
1520 | /* SBS cmd */ | |
1521 | #define SSD_BM_SAFETYSTATUS 0x51 | |
1522 | #define SSD_BM_OPERATIONSTATUS 0x54 | |
1523 | ||
1524 | /* ManufacturerAccess */ | |
1525 | #define SSD_BM_MANUFACTURERACCESS 0x00 | |
1526 | #define SSD_BM_ENTER_CAP_LEARNING 0x0023 /* cap learning */ | |
1527 | ||
1528 | /* Data flash access */ | |
1529 | #define SSD_BM_DATA_FLASH_SUBCLASS_ID 0x77 | |
1530 | #define SSD_BM_DATA_FLASH_SUBCLASS_ID_PAGE1 0x78 | |
1531 | #define SSD_BM_SYSTEM_DATA_SUBCLASS_ID 56 | |
1532 | #define SSD_BM_CONFIGURATION_REGISTERS_ID 64 | |
1533 | ||
1534 | /* min cap voltage */ | |
1535 | #define SSD_BM_CAP_VOLT_MIN 500 | |
1536 | ||
1537 | /* | |
1538 | enum ssd_bm_cap | |
1539 | { | |
1540 | SSD_BM_CAP_VINA = 1, | |
1541 | SSD_BM_CAP_JH = 3 | |
1542 | };*/ | |
1543 | ||
1544 | enum ssd_bmstatus | |
1545 | { | |
1546 | SSD_BMSTATUS_OK = 0, | |
1547 | SSD_BMSTATUS_CHARGING, /* not fully charged */ | |
1548 | SSD_BMSTATUS_WARNING | |
1549 | }; | |
1550 | ||
1551 | enum sbs_unit { | |
1552 | SBS_UNIT_VALUE = 0, | |
1553 | SBS_UNIT_TEMPERATURE, | |
1554 | SBS_UNIT_VOLTAGE, | |
1555 | SBS_UNIT_CURRENT, | |
1556 | SBS_UNIT_ESR, | |
1557 | SBS_UNIT_PERCENT, | |
1558 | SBS_UNIT_CAPACITANCE | |
1559 | }; | |
1560 | ||
1561 | enum sbs_size { | |
1562 | SBS_SIZE_BYTE = 1, | |
1563 | SBS_SIZE_WORD, | |
1564 | SBS_SIZE_BLK, | |
1565 | }; | |
1566 | ||
1567 | struct sbs_cmd { | |
1568 | uint8_t cmd; | |
1569 | uint8_t size; | |
1570 | uint8_t unit; | |
1571 | uint8_t off; | |
1572 | uint16_t mask; | |
1573 | char *desc; | |
1574 | }; | |
1575 | ||
1576 | struct ssd_bm { | |
1577 | uint16_t temp; | |
1578 | uint16_t volt; | |
1579 | uint16_t curr; | |
1580 | uint16_t esr; | |
1581 | uint16_t rsoc; | |
1582 | uint16_t health; | |
1583 | uint16_t cap; | |
1584 | uint16_t chg_curr; | |
1585 | uint16_t chg_volt; | |
1586 | uint16_t cap_volt[SSD_BM_CAP]; | |
1587 | uint16_t sf_alert; | |
1588 | uint16_t sf_status; | |
1589 | uint16_t op_status; | |
1590 | uint16_t sys_volt; | |
1591 | }; | |
1592 | ||
1593 | struct ssd_bm_manufacturer_data | |
1594 | { | |
1595 | uint16_t pack_lot_code; | |
1596 | uint16_t pcb_lot_code; | |
1597 | uint16_t firmware_ver; | |
1598 | uint16_t hardware_ver; | |
1599 | }; | |
1600 | ||
1601 | struct ssd_bm_configuration_registers | |
1602 | { | |
1603 | struct { | |
1604 | uint16_t cc:3; | |
1605 | uint16_t rsvd:5; | |
1606 | uint16_t stack:1; | |
1607 | uint16_t rsvd1:2; | |
1608 | uint16_t temp:2; | |
1609 | uint16_t rsvd2:1; | |
1610 | uint16_t lt_en:1; | |
1611 | uint16_t rsvd3:1; | |
1612 | } operation_cfg; | |
1613 | uint16_t pad; | |
1614 | uint16_t fet_action; | |
1615 | uint16_t pad1; | |
1616 | uint16_t fault; | |
1617 | }; | |
1618 | ||
1619 | #define SBS_VALUE_MASK 0xffff | |
1620 | ||
1621 | #define bm_var_offset(var) ((size_t) &((struct ssd_bm *)0)->var) | |
1622 | #define bm_var(start, offset) ((void *) start + (offset)) | |
1623 | ||
1624 | static struct sbs_cmd ssd_bm_sbs[] = { | |
1625 | {0x08, SBS_SIZE_WORD, SBS_UNIT_TEMPERATURE, bm_var_offset(temp), SBS_VALUE_MASK, "Temperature"}, | |
1626 | {0x09, SBS_SIZE_WORD, SBS_UNIT_VOLTAGE, bm_var_offset(volt), SBS_VALUE_MASK, "Voltage"}, | |
1627 | {0x0a, SBS_SIZE_WORD, SBS_UNIT_CURRENT, bm_var_offset(curr), SBS_VALUE_MASK, "Current"}, | |
1628 | {0x0b, SBS_SIZE_WORD, SBS_UNIT_ESR, bm_var_offset(esr), SBS_VALUE_MASK, "ESR"}, | |
1629 | {0x0d, SBS_SIZE_BYTE, SBS_UNIT_PERCENT, bm_var_offset(rsoc), SBS_VALUE_MASK, "RelativeStateOfCharge"}, | |
1630 | {0x0e, SBS_SIZE_BYTE, SBS_UNIT_PERCENT, bm_var_offset(health), SBS_VALUE_MASK, "Health"}, | |
1631 | {0x10, SBS_SIZE_WORD, SBS_UNIT_CAPACITANCE, bm_var_offset(cap), SBS_VALUE_MASK, "Capacitance"}, | |
1632 | {0x14, SBS_SIZE_WORD, SBS_UNIT_CURRENT, bm_var_offset(chg_curr), SBS_VALUE_MASK, "ChargingCurrent"}, | |
1633 | {0x15, SBS_SIZE_WORD, SBS_UNIT_VOLTAGE, bm_var_offset(chg_volt), SBS_VALUE_MASK, "ChargingVoltage"}, | |
1634 | {0x3b, SBS_SIZE_WORD, SBS_UNIT_VOLTAGE, (uint8_t)bm_var_offset(cap_volt[4]), SBS_VALUE_MASK, "CapacitorVoltage5"}, | |
1635 | {0x3c, SBS_SIZE_WORD, SBS_UNIT_VOLTAGE, (uint8_t)bm_var_offset(cap_volt[3]), SBS_VALUE_MASK, "CapacitorVoltage4"}, | |
1636 | {0x3d, SBS_SIZE_WORD, SBS_UNIT_VOLTAGE, (uint8_t)bm_var_offset(cap_volt[2]), SBS_VALUE_MASK, "CapacitorVoltage3"}, | |
1637 | {0x3e, SBS_SIZE_WORD, SBS_UNIT_VOLTAGE, (uint8_t)bm_var_offset(cap_volt[1]), SBS_VALUE_MASK, "CapacitorVoltage2"}, | |
1638 | {0x3f, SBS_SIZE_WORD, SBS_UNIT_VOLTAGE, (uint8_t)bm_var_offset(cap_volt[0]), SBS_VALUE_MASK, "CapacitorVoltage1"}, | |
1639 | {0x50, SBS_SIZE_WORD, SBS_UNIT_VALUE, bm_var_offset(sf_alert), 0x870F, "SafetyAlert"}, | |
1640 | {0x51, SBS_SIZE_WORD, SBS_UNIT_VALUE, bm_var_offset(sf_status), 0xE7BF, "SafetyStatus"}, | |
1641 | {0x54, SBS_SIZE_WORD, SBS_UNIT_VALUE, bm_var_offset(op_status), 0x79F4, "OperationStatus"}, | |
1642 | {0x5a, SBS_SIZE_WORD, SBS_UNIT_VOLTAGE, bm_var_offset(sys_volt), SBS_VALUE_MASK, "SystemVoltage"}, | |
1643 | {0, 0, 0, 0, 0, NULL}, | |
1644 | }; | |
1645 | ||
1646 | /* ssd ioctl */ | |
1647 | #define SSD_CMD_GET_PROTOCOL_INFO _IOR('H', 100, struct ssd_protocol_info) | |
1648 | #define SSD_CMD_GET_HW_INFO _IOR('H', 101, struct ssd_hw_info) | |
1649 | #define SSD_CMD_GET_ROM_INFO _IOR('H', 102, struct ssd_rom_info) | |
1650 | #define SSD_CMD_GET_SMART _IOR('H', 103, struct ssd_smart) | |
1651 | #define SSD_CMD_GET_IDX _IOR('H', 105, int) | |
1652 | #define SSD_CMD_GET_AMOUNT _IOR('H', 106, int) | |
1653 | #define SSD_CMD_GET_TO_INFO _IOR('H', 107, int) | |
1654 | #define SSD_CMD_GET_DRV_VER _IOR('H', 108, char[DRIVER_VERSION_LEN]) | |
1655 | ||
1656 | #define SSD_CMD_GET_BBACC_INFO _IOR('H', 109, struct ssd_acc_info) | |
1657 | #define SSD_CMD_GET_ECACC_INFO _IOR('H', 110, struct ssd_acc_info) | |
1658 | ||
1659 | #define SSD_CMD_GET_HW_INFO_EXT _IOR('H', 111, struct ssd_hw_info_extend) | |
1660 | ||
1661 | #define SSD_CMD_REG_READ _IOWR('H', 120, struct ssd_reg_op_info) | |
1662 | #define SSD_CMD_REG_WRITE _IOWR('H', 121, struct ssd_reg_op_info) | |
1663 | ||
1664 | #define SSD_CMD_SPI_READ _IOWR('H', 125, struct ssd_spi_op_info) | |
1665 | #define SSD_CMD_SPI_WRITE _IOWR('H', 126, struct ssd_spi_op_info) | |
1666 | #define SSD_CMD_SPI_ERASE _IOWR('H', 127, struct ssd_spi_op_info) | |
1667 | ||
1668 | #define SSD_CMD_I2C_READ _IOWR('H', 128, struct ssd_i2c_op_info) | |
1669 | #define SSD_CMD_I2C_WRITE _IOWR('H', 129, struct ssd_i2c_op_info) | |
1670 | #define SSD_CMD_I2C_WRITE_READ _IOWR('H', 130, struct ssd_i2c_op_info) | |
1671 | ||
1672 | #define SSD_CMD_SMBUS_SEND_BYTE _IOWR('H', 131, struct ssd_smbus_op_info) | |
1673 | #define SSD_CMD_SMBUS_RECEIVE_BYTE _IOWR('H', 132, struct ssd_smbus_op_info) | |
1674 | #define SSD_CMD_SMBUS_WRITE_BYTE _IOWR('H', 133, struct ssd_smbus_op_info) | |
1675 | #define SSD_CMD_SMBUS_READ_BYTE _IOWR('H', 135, struct ssd_smbus_op_info) | |
1676 | #define SSD_CMD_SMBUS_WRITE_WORD _IOWR('H', 136, struct ssd_smbus_op_info) | |
1677 | #define SSD_CMD_SMBUS_READ_WORD _IOWR('H', 137, struct ssd_smbus_op_info) | |
1678 | #define SSD_CMD_SMBUS_WRITE_BLOCK _IOWR('H', 138, struct ssd_smbus_op_info) | |
1679 | #define SSD_CMD_SMBUS_READ_BLOCK _IOWR('H', 139, struct ssd_smbus_op_info) | |
1680 | ||
1681 | #define SSD_CMD_BM_GET_VER _IOR('H', 140, uint16_t) | |
1682 | #define SSD_CMD_BM_GET_NR_CAP _IOR('H', 141, int) | |
1683 | #define SSD_CMD_BM_CAP_LEARNING _IOW('H', 142, int) | |
1684 | #define SSD_CMD_CAP_LEARN _IOR('H', 143, uint32_t) | |
1685 | #define SSD_CMD_GET_CAP_STATUS _IOR('H', 144, int) | |
1686 | ||
1687 | #define SSD_CMD_RAM_READ _IOWR('H', 150, struct ssd_ram_op_info) | |
1688 | #define SSD_CMD_RAM_WRITE _IOWR('H', 151, struct ssd_ram_op_info) | |
1689 | ||
1690 | #define SSD_CMD_NAND_READ_ID _IOR('H', 160, struct ssd_flash_op_info) | |
1691 | #define SSD_CMD_NAND_READ _IOWR('H', 161, struct ssd_flash_op_info) //with oob | |
1692 | #define SSD_CMD_NAND_WRITE _IOWR('H', 162, struct ssd_flash_op_info) | |
1693 | #define SSD_CMD_NAND_ERASE _IOWR('H', 163, struct ssd_flash_op_info) | |
1694 | #define SSD_CMD_NAND_READ_EXT _IOWR('H', 164, struct ssd_flash_op_info) //ingore EIO | |
1695 | ||
1696 | #define SSD_CMD_UPDATE_BBT _IOW('H', 180, struct ssd_flash_op_info) | |
1697 | ||
1698 | #define SSD_CMD_CLEAR_ALARM _IOW('H', 190, int) | |
1699 | #define SSD_CMD_SET_ALARM _IOW('H', 191, int) | |
1700 | ||
1701 | #define SSD_CMD_RESET _IOW('H', 200, int) | |
1702 | #define SSD_CMD_RELOAD_FW _IOW('H', 201, int) | |
1703 | #define SSD_CMD_UNLOAD_DEV _IOW('H', 202, int) | |
1704 | #define SSD_CMD_LOAD_DEV _IOW('H', 203, int) | |
1705 | #define SSD_CMD_UPDATE_VP _IOWR('H', 205, uint32_t) | |
1706 | #define SSD_CMD_FULL_RESET _IOW('H', 206, int) | |
1707 | ||
1708 | #define SSD_CMD_GET_NR_LOG _IOR('H', 220, uint32_t) | |
1709 | #define SSD_CMD_GET_LOG _IOR('H', 221, void *) | |
1710 | #define SSD_CMD_LOG_LEVEL _IOW('H', 222, int) | |
1711 | ||
1712 | #define SSD_CMD_OT_PROTECT _IOW('H', 223, int) | |
1713 | #define SSD_CMD_GET_OT_STATUS _IOR('H', 224, int) | |
1714 | ||
1715 | #define SSD_CMD_CLEAR_LOG _IOW('H', 230, int) | |
1716 | #define SSD_CMD_CLEAR_SMART _IOW('H', 231, int) | |
1717 | ||
1718 | #define SSD_CMD_SW_LOG _IOW('H', 232, struct ssd_sw_log_info) | |
1719 | ||
1720 | #define SSD_CMD_GET_LABEL _IOR('H', 235, struct ssd_label) | |
1721 | #define SSD_CMD_GET_VERSION _IOR('H', 236, struct ssd_version_info) | |
1722 | #define SSD_CMD_GET_TEMPERATURE _IOR('H', 237, int) | |
1723 | #define SSD_CMD_GET_BMSTATUS _IOR('H', 238, int) | |
1724 | #define SSD_CMD_GET_LABEL2 _IOR('H', 239, void *) | |
1725 | ||
1726 | ||
1727 | #define SSD_CMD_FLUSH _IOW('H', 240, int) | |
1728 | #define SSD_CMD_SAVE_MD _IOW('H', 241, int) | |
1729 | ||
1730 | #define SSD_CMD_SET_WMODE _IOW('H', 242, int) | |
1731 | #define SSD_CMD_GET_WMODE _IOR('H', 243, int) | |
1732 | #define SSD_CMD_GET_USER_WMODE _IOR('H', 244, int) | |
1733 | ||
1734 | #define SSD_CMD_DEBUG _IOW('H', 250, struct ssd_debug_info) | |
1735 | #define SSD_CMD_DRV_PARAM_INFO _IOR('H', 251, struct ssd_drv_param_info) | |
1736 | ||
1197134c KM |
1737 | #define SSD_CMD_CLEAR_WARNING _IOW('H', 260, int) |
1738 | ||
361ebed5 HSDT |
1739 | |
1740 | /* log */ | |
1741 | #define SSD_LOG_MAX_SZ 4096 | |
1742 | #define SSD_LOG_LEVEL SSD_LOG_LEVEL_NOTICE | |
da3355df | 1743 | #define SSD_DIF_WITH_OLD_LOG 0x3f |
361ebed5 HSDT |
1744 | |
1745 | enum ssd_log_data | |
1746 | { | |
1747 | SSD_LOG_DATA_NONE = 0, | |
1748 | SSD_LOG_DATA_LOC, | |
1749 | SSD_LOG_DATA_HEX | |
1750 | }; | |
1751 | ||
1752 | typedef struct ssd_log_entry | |
1753 | { | |
1754 | union { | |
1755 | struct { | |
1756 | uint32_t page:10; | |
1757 | uint32_t block:14; | |
1758 | uint32_t flash:8; | |
1759 | } loc; | |
1760 | struct { | |
1761 | uint32_t page:12; | |
1762 | uint32_t block:12; | |
1763 | uint32_t flash:8; | |
1764 | } loc1; | |
1765 | uint32_t val; | |
1766 | } data; | |
1767 | uint16_t event:10; | |
1768 | uint16_t mod:6; | |
1769 | uint16_t idx; | |
1770 | }__attribute__((packed))ssd_log_entry_t; | |
1771 | ||
1772 | typedef struct ssd_log | |
1773 | { | |
1774 | uint64_t time:56; | |
1775 | uint64_t ctrl_idx:8; | |
1776 | ssd_log_entry_t le; | |
1777 | } __attribute__((packed)) ssd_log_t; | |
1778 | ||
1779 | typedef struct ssd_log_desc | |
1780 | { | |
1781 | uint16_t event; | |
1782 | uint8_t level; | |
1783 | uint8_t data; | |
1784 | uint8_t sblock; | |
1785 | uint8_t spage; | |
1786 | char *desc; | |
1787 | } __attribute__((packed)) ssd_log_desc_t; | |
1788 | ||
1789 | #define SSD_LOG_SW_IDX 0xF | |
1790 | #define SSD_UNKNOWN_EVENT ((uint16_t)-1) | |
1791 | static struct ssd_log_desc ssd_log_desc[] = { | |
1792 | /* event, level, show flash, show block, show page, desc */ | |
1793 | {0x0, SSD_LOG_LEVEL_WARNING, SSD_LOG_DATA_LOC, 0, 0, "Create BBT failure"}, //g3 | |
1794 | {0x1, SSD_LOG_LEVEL_WARNING, SSD_LOG_DATA_LOC, 0, 0, "Read BBT failure"}, //g3 | |
1795 | {0x2, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 0, "Mark bad block"}, | |
1796 | {0x3, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 0, 0, "Flush BBT failure"}, | |
1797 | {0x4, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Program failure"}, | |
1798 | {0x7, SSD_LOG_LEVEL_ERR, SSD_LOG_DATA_LOC, 1, 1, "No available blocks"}, | |
1799 | {0x8, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 0, "Bad EC header"}, | |
1800 | {0x9, SSD_LOG_LEVEL_WARNING, SSD_LOG_DATA_LOC, 1, 0, "Bad VID header"}, //g3 | |
1801 | {0xa, SSD_LOG_LEVEL_INFO, SSD_LOG_DATA_LOC, 1, 0, "Wear leveling"}, | |
1802 | {0xb, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "WL read back failure"}, | |
1803 | {0x11, SSD_LOG_LEVEL_ERR, SSD_LOG_DATA_LOC, 1, 1, "Data recovery failure"}, // err | |
1804 | {0x20, SSD_LOG_LEVEL_ERR, SSD_LOG_DATA_LOC, 1, 1, "Init: scan mapping table failure"}, // err g3 | |
1805 | {0x21, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Program failure"}, | |
1806 | {0x22, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Program failure"}, | |
1807 | {0x23, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Program failure"}, | |
1808 | {0x24, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 0, "Merge: read mapping page failure"}, | |
1809 | {0x25, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Merge: read back failure"}, | |
1810 | {0x26, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Program failure"}, | |
1811 | {0x27, SSD_LOG_LEVEL_WARNING, SSD_LOG_DATA_LOC, 1, 1, "Data corrupted for abnormal power down"}, //g3 | |
1812 | {0x28, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Merge: mapping page corrupted"}, | |
1813 | {0x29, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 0, "Init: no mapping page"}, | |
1814 | {0x2a, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Init: mapping pages incomplete"}, | |
1815 | {0x2b, SSD_LOG_LEVEL_ERR, SSD_LOG_DATA_LOC, 1, 1, "Read back failure after programming failure"}, // err | |
1816 | {0xf1, SSD_LOG_LEVEL_ERR, SSD_LOG_DATA_LOC, 1, 1, "Read failure without recovery"}, // err | |
1817 | {0xf2, SSD_LOG_LEVEL_ERR, SSD_LOG_DATA_LOC, 0, 0, "No available blocks"}, // maybe err g3 | |
1818 | {0xf3, SSD_LOG_LEVEL_ERR, SSD_LOG_DATA_LOC, 1, 0, "Init: RAID incomplete"}, // err g3 | |
1819 | {0xf4, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Program failure"}, | |
1820 | {0xf5, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Read failure in moving data"}, | |
1821 | {0xf6, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Program failure"}, | |
1822 | {0xf7, SSD_LOG_LEVEL_WARNING, SSD_LOG_DATA_LOC, 1, 1, "Init: RAID not complete"}, | |
1823 | {0xf8, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 0, "Init: data moving interrupted"}, | |
da3355df | 1824 | {0xfe, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Data inspection failure"}, |
361ebed5 HSDT |
1825 | {0xff, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "IO: ECC failed"}, |
1826 | ||
1827 | /* new */ | |
1828 | {0x2e, SSD_LOG_LEVEL_ERR, SSD_LOG_DATA_LOC, 0, 0, "No available reserved blocks" }, // err | |
1829 | {0x30, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 0, 0, "Init: PMT membership not found"}, | |
1830 | {0x31, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_HEX, 0, 0, "Init: PMT corrupted"}, | |
1831 | {0x32, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 0, 0, "Init: PBT membership not found"}, | |
1832 | {0x33, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 0, 0, "Init: PBT not found"}, | |
1833 | {0x34, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 0, 0, "Init: PBT corrupted"}, | |
1834 | {0x35, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Init: PMT page read failure"}, | |
1835 | {0x36, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Init: PBT page read failure"}, | |
1836 | {0x37, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Init: PBT backup page read failure"}, | |
1837 | {0x38, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Init: PBMT read failure"}, | |
1838 | {0x39, SSD_LOG_LEVEL_ERR, SSD_LOG_DATA_LOC, 1, 1, "Init: PBMT scan failure"}, // err | |
1839 | {0x3a, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Init: first page read failure"}, | |
1840 | {0x3b, SSD_LOG_LEVEL_ERR, SSD_LOG_DATA_LOC, 1, 1, "Init: first page scan failure"}, // err | |
1841 | {0x3c, SSD_LOG_LEVEL_ERR, SSD_LOG_DATA_LOC, 1, 1, "Init: scan unclosed block failure"}, // err | |
1842 | {0x3d, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Init: write pointer mismatch"}, | |
1843 | {0x3e, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Init: PMT recovery: PBMT read failure"}, | |
1844 | {0x3f, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 0, "Init: PMT recovery: PBMT scan failure"}, | |
1845 | {0x40, SSD_LOG_LEVEL_ERR, SSD_LOG_DATA_LOC, 1, 1, "Init: PMT recovery: data page read failure"}, //err | |
1846 | {0x41, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Init: PBT write pointer mismatch"}, | |
1847 | {0x42, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Init: PBT latest version corrupted"}, | |
1848 | {0x43, SSD_LOG_LEVEL_ERR, SSD_LOG_DATA_LOC, 1, 0, "Init: too many unclosed blocks"}, | |
1849 | {0x44, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_HEX, 0, 0, "Init: PDW block found"}, | |
1850 | {0x45, SSD_LOG_LEVEL_ERR, SSD_LOG_DATA_HEX, 0, 0, "Init: more than one PDW block found"}, //err | |
1851 | {0x46, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Init: first page is blank or read failure"}, | |
1852 | {0x47, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 0, 0, "Init: PDW block not found"}, | |
1853 | ||
1854 | {0x50, SSD_LOG_LEVEL_ERR, SSD_LOG_DATA_LOC, 1, 0, "Cache: hit error data"}, // err | |
1855 | {0x51, SSD_LOG_LEVEL_ERR, SSD_LOG_DATA_LOC, 1, 0, "Cache: read back failure"}, // err | |
1856 | {0x52, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_NONE, 0, 0, "Cache: unknown command"}, //? | |
1857 | {0x53, SSD_LOG_LEVEL_ERR, SSD_LOG_DATA_LOC, 1, 1, "GC/WL read back failure"}, // err | |
1858 | ||
1859 | {0x60, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 0, "Erase failure"}, | |
1860 | ||
1861 | {0x70, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "LPA not matched"}, | |
1862 | {0x71, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "PBN not matched"}, | |
1863 | {0x72, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Read retry failure"}, | |
1864 | {0x73, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Need raid recovery"}, | |
1865 | {0x74, SSD_LOG_LEVEL_INFO, SSD_LOG_DATA_LOC, 1, 1, "Need read retry"}, | |
1866 | {0x75, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Read invalid data page"}, | |
1867 | {0x76, SSD_LOG_LEVEL_INFO, SSD_LOG_DATA_LOC, 1, 1, "ECC error, data in cache, PBN matched"}, | |
1868 | {0x77, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "ECC error, data in cache, PBN not matched"}, | |
1869 | {0x78, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "ECC error, data in flash, PBN not matched"}, | |
1870 | {0x79, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "ECC ok, data in cache, LPA not matched"}, | |
1871 | {0x7a, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "ECC ok, data in flash, LPA not matched"}, | |
1872 | {0x7b, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "RAID data in cache, LPA not matched"}, | |
1873 | {0x7c, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "RAID data in flash, LPA not matched"}, | |
1874 | {0x7d, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Read data page status error"}, | |
1875 | {0x7e, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Read blank page"}, | |
1876 | {0x7f, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Access flash timeout"}, | |
1877 | ||
1878 | {0x80, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 0, "EC overflow"}, | |
1879 | {0x81, SSD_LOG_LEVEL_INFO, SSD_LOG_DATA_NONE, 0, 0, "Scrubbing completed"}, | |
1880 | {0x82, SSD_LOG_LEVEL_INFO, SSD_LOG_DATA_LOC, 1, 0, "Unstable block(too much bit flip)"}, | |
1881 | {0x83, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 0, "GC: ram error"}, //? | |
1882 | {0x84, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 0, "GC: one PBMT read failure"}, | |
1883 | ||
1884 | {0x88, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 0, "GC: mark bad block"}, | |
1885 | {0x89, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 0, "GC: invalid page count error"}, // maybe err | |
1886 | {0x8a, SSD_LOG_LEVEL_WARNING, SSD_LOG_DATA_NONE, 0, 0, "Warning: Bad Block close to limit"}, | |
1887 | {0x8b, SSD_LOG_LEVEL_ERR, SSD_LOG_DATA_NONE, 0, 0, "Error: Bad Block over limit"}, | |
1888 | {0x8c, SSD_LOG_LEVEL_WARNING, SSD_LOG_DATA_NONE, 0, 0, "Warning: P/E cycles close to limit"}, | |
1889 | {0x8d, SSD_LOG_LEVEL_ERR, SSD_LOG_DATA_NONE, 0, 0, "Error: P/E cycles over limit"}, | |
1890 | ||
1197134c KM |
1891 | {0x90, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_NONE, 0, 0, "Warning: Over temperature"}, //90 |
1892 | {0x91, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_NONE, 0, 0, "Info: Temperature is OK"}, //80 | |
361ebed5 HSDT |
1893 | {0x92, SSD_LOG_LEVEL_WARNING, SSD_LOG_DATA_NONE, 0, 0, "Battery fault"}, |
1894 | {0x93, SSD_LOG_LEVEL_WARNING, SSD_LOG_DATA_NONE, 0, 0, "SEU fault"}, //err | |
1895 | {0x94, SSD_LOG_LEVEL_ERR, SSD_LOG_DATA_NONE, 0, 0, "DDR error"}, //err | |
1896 | {0x95, SSD_LOG_LEVEL_ERR, SSD_LOG_DATA_NONE, 0, 0, "Controller serdes error"}, //err | |
1897 | {0x96, SSD_LOG_LEVEL_ERR, SSD_LOG_DATA_NONE, 0, 0, "Bridge serdes 1 error"}, //err | |
1898 | {0x97, SSD_LOG_LEVEL_ERR, SSD_LOG_DATA_NONE, 0, 0, "Bridge serdes 2 error"}, //err | |
1899 | {0x98, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_NONE, 0, 0, "SEU fault (corrected)"}, //err | |
1900 | {0x99, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_NONE, 0, 0, "Battery is OK"}, | |
1197134c | 1901 | {0x9a, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_NONE, 0, 0, "Info: Temperature close to limit"}, //85 |
361ebed5 HSDT |
1902 | |
1903 | {0x9b, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_HEX, 0, 0, "SEU fault address (low)"}, | |
1904 | {0x9c, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_HEX, 0, 0, "SEU fault address (high)"}, | |
1905 | {0x9d, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_NONE, 0, 0, "I2C fault" }, | |
1906 | {0x9e, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_NONE, 0, 0, "DDR single bit error" }, | |
1907 | {0x9f, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_NONE, 0, 0, "Board voltage fault" }, | |
1908 | ||
1909 | {0xa0, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_HEX, 0, 0, "LPA not matched"}, | |
1910 | {0xa1, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Re-read data in cache"}, | |
1911 | {0xa2, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Read blank page"}, | |
1912 | {0xa3, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "RAID recovery: Read blank page"}, | |
1913 | {0xa4, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "RAID recovery: new data in cache"}, | |
1914 | {0xa5, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "RAID recovery: PBN not matched"}, | |
1915 | {0xa6, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Read data with error flag"}, | |
1916 | {0xa7, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "RAID recovery: recoverd data with error flag"}, | |
1917 | {0xa8, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Blank page in cache, PBN matched"}, | |
1918 | {0xa9, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "RAID recovery: Blank page in cache, PBN matched"}, | |
1919 | {0xaa, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 0, 0, "Flash init failure"}, | |
1920 | {0xab, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Mapping table recovery failure"}, | |
1921 | {0xac, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "RAID recovery: ECC failed"}, | |
da3355df SF |
1922 | {0xb0, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_NONE, 0, 0, "Warning: Temperature is 95 degrees C"}, |
1923 | {0xb1, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_NONE, 0, 0, "Warning: Temperature is 100 degrees C"}, | |
361ebed5 HSDT |
1924 | |
1925 | {0x300, SSD_LOG_LEVEL_ERR, SSD_LOG_DATA_HEX, 0, 0, "CMD timeout"}, | |
1926 | {0x301, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_HEX, 0, 0, "Power on"}, | |
1927 | {0x302, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_NONE, 0, 0, "Power off"}, | |
1928 | {0x303, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_NONE, 0, 0, "Clear log"}, | |
1929 | {0x304, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_HEX, 0, 0, "Set capacity"}, | |
1930 | {0x305, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_NONE, 0, 0, "Clear data"}, | |
1931 | {0x306, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_HEX, 0, 0, "BM safety status"}, | |
1932 | {0x307, SSD_LOG_LEVEL_ERR, SSD_LOG_DATA_HEX, 0, 0, "I/O error"}, | |
1933 | {0x308, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_HEX, 0, 0, "CMD error"}, | |
1934 | {0x309, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_HEX, 0, 0, "Set wmode"}, | |
1935 | {0x30a, SSD_LOG_LEVEL_ERR, SSD_LOG_DATA_HEX, 0, 0, "DDR init failed" }, | |
1936 | {0x30b, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_HEX, 0, 0, "PCIe link status" }, | |
1937 | {0x30c, SSD_LOG_LEVEL_ERR, SSD_LOG_DATA_HEX, 0, 0, "Controller reset sync error" }, | |
1938 | {0x30d, SSD_LOG_LEVEL_ERR, SSD_LOG_DATA_HEX, 0, 0, "Clock fault" }, | |
1939 | {0x30e, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_HEX, 0, 0, "FPGA voltage fault status" }, | |
1940 | {0x30f, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_HEX, 0, 0, "Set capacity finished"}, | |
1941 | {0x310, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_NONE, 0, 0, "Clear data finished"}, | |
1942 | {0x311, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_HEX, 0, 0, "Reset"}, | |
1943 | {0x312, SSD_LOG_LEVEL_WARNING,SSD_LOG_DATA_HEX, 0, 0, "CAP: voltage fault"}, | |
1944 | {0x313, SSD_LOG_LEVEL_WARNING,SSD_LOG_DATA_NONE, 0, 0, "CAP: learn fault"}, | |
1945 | {0x314, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_HEX, 0, 0, "CAP status"}, | |
1946 | {0x315, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_HEX, 0, 0, "Board voltage fault status"}, | |
da3355df SF |
1947 | {0x316, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_NONE, 0, 0, "Info: Inlet temperature is 55 degrees C"}, //55 |
1948 | {0x317, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_NONE, 0, 0, "Info: Inlet temperature is 50 degrees C"}, //50 | |
1197134c KM |
1949 | {0x318, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_NONE, 0, 0, "Info: Flash over temperature"}, //70 |
1950 | {0x319, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_NONE, 0, 0, "Info: Flash temperature is OK"}, //65 | |
361ebed5 HSDT |
1951 | {0x31a, SSD_LOG_LEVEL_WARNING,SSD_LOG_DATA_NONE, 0, 0, "CAP: short circuit"}, |
1952 | {0x31b, SSD_LOG_LEVEL_WARNING,SSD_LOG_DATA_HEX, 0, 0, "Sensor fault"}, | |
1953 | {0x31c, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_NONE, 0, 0, "Erase all data"}, | |
1954 | {0x31d, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_NONE, 0, 0, "Erase all data finished"}, | |
da3355df SF |
1955 | {0x320, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_HEX, 0, 0, "Temperature sensor event"}, |
1956 | ||
1957 | {0x350, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_NONE, 0, 0, "Clear smart"}, | |
1958 | {0x351, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_NONE, 0, 0, "Clear warning"}, | |
361ebed5 HSDT |
1959 | |
1960 | {SSD_UNKNOWN_EVENT, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_HEX, 0, 0, "unknown event"}, | |
1961 | }; | |
1962 | /* */ | |
1963 | #define SSD_LOG_OVER_TEMP 0x90 | |
1964 | #define SSD_LOG_NORMAL_TEMP 0x91 | |
1965 | #define SSD_LOG_WARN_TEMP 0x9a | |
1966 | #define SSD_LOG_SEU_FAULT 0x93 | |
1967 | #define SSD_LOG_SEU_FAULT1 0x98 | |
1968 | #define SSD_LOG_BATTERY_FAULT 0x92 | |
1969 | #define SSD_LOG_BATTERY_OK 0x99 | |
1970 | #define SSD_LOG_BOARD_VOLT_FAULT 0x9f | |
1971 | ||
1972 | /* software log */ | |
1973 | #define SSD_LOG_TIMEOUT 0x300 | |
1974 | #define SSD_LOG_POWER_ON 0x301 | |
1975 | #define SSD_LOG_POWER_OFF 0x302 | |
1976 | #define SSD_LOG_CLEAR_LOG 0x303 | |
1977 | #define SSD_LOG_SET_CAPACITY 0x304 | |
1978 | #define SSD_LOG_CLEAR_DATA 0x305 | |
1979 | #define SSD_LOG_BM_SFSTATUS 0x306 | |
1980 | #define SSD_LOG_EIO 0x307 | |
1981 | #define SSD_LOG_ECMD 0x308 | |
1982 | #define SSD_LOG_SET_WMODE 0x309 | |
1983 | #define SSD_LOG_DDR_INIT_ERR 0x30a | |
1984 | #define SSD_LOG_PCIE_LINK_STATUS 0x30b | |
1985 | #define SSD_LOG_CTRL_RST_SYNC 0x30c | |
1986 | #define SSD_LOG_CLK_FAULT 0x30d | |
1987 | #define SSD_LOG_VOLT_FAULT 0x30e | |
1988 | #define SSD_LOG_SET_CAPACITY_END 0x30F | |
1989 | #define SSD_LOG_CLEAR_DATA_END 0x310 | |
1990 | #define SSD_LOG_RESET 0x311 | |
1991 | #define SSD_LOG_CAP_VOLT_FAULT 0x312 | |
1992 | #define SSD_LOG_CAP_LEARN_FAULT 0x313 | |
1993 | #define SSD_LOG_CAP_STATUS 0x314 | |
1994 | #define SSD_LOG_VOLT_STATUS 0x315 | |
1995 | #define SSD_LOG_INLET_OVER_TEMP 0x316 | |
1996 | #define SSD_LOG_INLET_NORMAL_TEMP 0x317 | |
1997 | #define SSD_LOG_FLASH_OVER_TEMP 0x318 | |
1998 | #define SSD_LOG_FLASH_NORMAL_TEMP 0x319 | |
1999 | #define SSD_LOG_CAP_SHORT_CIRCUIT 0x31a | |
2000 | #define SSD_LOG_SENSOR_FAULT 0x31b | |
2001 | #define SSD_LOG_ERASE_ALL 0x31c | |
2002 | #define SSD_LOG_ERASE_ALL_END 0x31d | |
da3355df SF |
2003 | #define SSD_LOG_TEMP_SENSOR_EVENT 0x320 |
2004 | #define SSD_LOG_CLEAR_SMART 0x350 | |
2005 | #define SSD_LOG_CLEAR_WARNING 0x351 | |
361ebed5 HSDT |
2006 | |
2007 | ||
2008 | /* sw log fifo depth */ | |
2009 | #define SSD_LOG_FIFO_SZ 1024 | |
2010 | ||
2011 | ||
2012 | /* done queue */ | |
2013 | static DEFINE_PER_CPU(struct list_head, ssd_doneq); | |
2014 | static DEFINE_PER_CPU(struct tasklet_struct, ssd_tasklet); | |
2015 | ||
2016 | ||
2017 | /* unloading driver */ | |
2018 | static volatile int ssd_exiting = 0; | |
2019 | ||
2020 | #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,12)) | |
2021 | static struct class_simple *ssd_class; | |
2022 | #else | |
2023 | static struct class *ssd_class; | |
2024 | #endif | |
2025 | ||
2026 | static int ssd_cmajor = SSD_CMAJOR; | |
2027 | ||
2028 | /* ssd block device major, minors */ | |
2029 | static int ssd_major = SSD_MAJOR; | |
2030 | static int ssd_major_sl = SSD_MAJOR_SL; | |
2031 | static int ssd_minors = SSD_MINORS; | |
2032 | ||
2033 | /* ssd device list */ | |
2034 | static struct list_head ssd_list; | |
2035 | static unsigned long ssd_index_bits[SSD_MAX_DEV / BITS_PER_LONG + 1]; | |
2036 | static unsigned long ssd_index_bits_sl[SSD_MAX_DEV / BITS_PER_LONG + 1]; | |
2037 | static atomic_t ssd_nr; | |
2038 | ||
2039 | /* module param */ | |
2040 | enum ssd_drv_mode | |
2041 | { | |
2042 | SSD_DRV_MODE_STANDARD = 0, /* full */ | |
2043 | SSD_DRV_MODE_DEBUG = 2, /* debug */ | |
2044 | SSD_DRV_MODE_BASE /* base only */ | |
2045 | }; | |
2046 | ||
2047 | enum ssd_int_mode | |
2048 | { | |
2049 | SSD_INT_LEGACY = 0, | |
2050 | SSD_INT_MSI, | |
2051 | SSD_INT_MSIX | |
2052 | }; | |
2053 | ||
2054 | #if (defined SSD_MSIX) | |
2055 | #define SSD_INT_MODE_DEFAULT SSD_INT_MSIX | |
2056 | #elif (defined SSD_MSI) | |
2057 | #define SSD_INT_MODE_DEFAULT SSD_INT_MSI | |
2058 | #else | |
2059 | /* auto select the defaut int mode according to the kernel version*/ | |
2060 | /* suse 11 sp1 irqbalance bug: use msi instead*/ | |
2061 | #if ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) || (defined RHEL_MAJOR && RHEL_MAJOR >= 6) || (defined RHEL_MAJOR && RHEL_MAJOR == 5 && RHEL_MINOR >= 5)) | |
2062 | #define SSD_INT_MODE_DEFAULT SSD_INT_MSIX | |
2063 | #else | |
2064 | #define SSD_INT_MODE_DEFAULT SSD_INT_MSI | |
2065 | #endif | |
2066 | #endif | |
2067 | ||
2068 | static int mode = SSD_DRV_MODE_STANDARD; | |
2069 | static int status_mask = 0xFF; | |
2070 | static int int_mode = SSD_INT_MODE_DEFAULT; | |
2071 | static int threaded_irq = 0; | |
2072 | static int log_level = SSD_LOG_LEVEL_WARNING; | |
2073 | static int ot_protect = 1; | |
2074 | static int wmode = SSD_WMODE_DEFAULT; | |
2075 | static int finject = 0; | |
2076 | ||
2077 | module_param(mode, int, 0); | |
2078 | module_param(status_mask, int, 0); | |
2079 | module_param(int_mode, int, 0); | |
2080 | module_param(threaded_irq, int, 0); | |
2081 | module_param(log_level, int, 0); | |
2082 | module_param(ot_protect, int, 0); | |
2083 | module_param(wmode, int, 0); | |
2084 | module_param(finject, int, 0); | |
2085 | ||
2086 | ||
2087 | MODULE_PARM_DESC(mode, "driver mode, 0 - standard, 1 - debug, 2 - debug without IO, 3 - basic debug mode"); | |
2088 | MODULE_PARM_DESC(status_mask, "command status mask, 0 - without command error, 0xff - with command error"); | |
2089 | MODULE_PARM_DESC(int_mode, "preferred interrupt mode, 0 - legacy, 1 - msi, 2 - msix"); | |
2090 | MODULE_PARM_DESC(threaded_irq, "threaded irq, 0 - normal irq, 1 - threaded irq"); | |
2091 | MODULE_PARM_DESC(log_level, "log level to display, 0 - info and above, 1 - notice and above, 2 - warning and above, 3 - error only"); | |
2092 | MODULE_PARM_DESC(ot_protect, "over temperature protect, 0 - disable, 1 - enable"); | |
2093 | MODULE_PARM_DESC(wmode, "write mode, 0 - write buffer (with risk for the 6xx firmware), 1 - write buffer ex, 2 - write through, 3 - auto, 4 - default"); | |
2094 | MODULE_PARM_DESC(finject, "enable fault simulation, 0 - off, 1 - on, for debug purpose only"); | |
2095 | ||
1197134c KM |
2096 | // API adaption layer |
2097 | static inline void ssd_bio_endio(struct bio *bio, int error) | |
2098 | { | |
2099 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,0)) | |
91557e4a | 2100 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(4,13,0)) |
1197134c | 2101 | bio->bi_error = error; |
91557e4a SF |
2102 | #else |
2103 | bio->bi_status = errno_to_blk_status(error); | |
2104 | #endif | |
1197134c KM |
2105 | bio_endio(bio); |
2106 | #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)) | |
2107 | bio_endio(bio, error); | |
2108 | #else | |
2109 | bio_endio(bio, bio->bi_size, error); | |
2110 | #endif | |
2111 | } | |
2112 | ||
2113 | static inline int ssd_bio_has_discard(struct bio *bio) | |
2114 | { | |
2115 | #ifndef SSD_TRIM | |
2116 | return 0; | |
2117 | #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(4,8,0)) | |
0f07eebb | 2118 | return bio_op(bio) == REQ_OP_DISCARD; |
1197134c KM |
2119 | #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36)) |
2120 | return bio->bi_rw & REQ_DISCARD; | |
2121 | #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32)) | |
2122 | return bio_rw_flagged(bio, BIO_RW_DISCARD); | |
2123 | #else | |
2124 | return 0; | |
2125 | #endif | |
2126 | } | |
2127 | ||
2128 | static inline int ssd_bio_has_flush(struct bio *bio) | |
2129 | { | |
2130 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,8,0)) | |
0f07eebb | 2131 | return bio_op(bio) == REQ_OP_FLUSH; |
1197134c KM |
2132 | #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37)) |
2133 | return bio->bi_rw & REQ_FLUSH; | |
2134 | #else | |
2135 | return 0; | |
2136 | #endif | |
2137 | } | |
2138 | ||
da3355df | 2139 | static inline int ssd_bio_has_barrier_or_fua(struct bio * bio) |
1197134c KM |
2140 | { |
2141 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,8,0)) | |
2142 | return bio->bi_opf & REQ_FUA; | |
da3355df | 2143 | #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37)) |
1197134c | 2144 | return bio->bi_rw & REQ_FUA; |
da3355df SF |
2145 | #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36)) |
2146 | return bio->bi_rw & REQ_HARDBARRIER; | |
2147 | #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32)) | |
2148 | return bio_rw_flagged(bio, BIO_RW_BARRIER); | |
2149 | #else | |
2150 | return bio_barrier(bio); | |
1197134c KM |
2151 | #endif |
2152 | } | |
361ebed5 HSDT |
2153 | |
2154 | #ifndef MODULE | |
2155 | static int __init ssd_drv_mode(char *str) | |
2156 | { | |
2157 | mode = (int)simple_strtoul(str, NULL, 0); | |
2158 | ||
2159 | return 1; | |
2160 | } | |
2161 | ||
2162 | static int __init ssd_status_mask(char *str) | |
2163 | { | |
2164 | status_mask = (int)simple_strtoul(str, NULL, 16); | |
2165 | ||
2166 | return 1; | |
2167 | } | |
2168 | ||
2169 | static int __init ssd_int_mode(char *str) | |
2170 | { | |
2171 | int_mode = (int)simple_strtoul(str, NULL, 0); | |
2172 | ||
2173 | return 1; | |
2174 | } | |
2175 | ||
2176 | static int __init ssd_threaded_irq(char *str) | |
2177 | { | |
2178 | threaded_irq = (int)simple_strtoul(str, NULL, 0); | |
2179 | ||
2180 | return 1; | |
2181 | } | |
2182 | ||
2183 | static int __init ssd_log_level(char *str) | |
2184 | { | |
2185 | log_level = (int)simple_strtoul(str, NULL, 0); | |
2186 | ||
2187 | return 1; | |
2188 | } | |
2189 | ||
2190 | static int __init ssd_ot_protect(char *str) | |
2191 | { | |
2192 | ot_protect = (int)simple_strtoul(str, NULL, 0); | |
2193 | ||
2194 | return 1; | |
2195 | } | |
2196 | ||
2197 | static int __init ssd_wmode(char *str) | |
2198 | { | |
2199 | wmode = (int)simple_strtoul(str, NULL, 0); | |
2200 | ||
2201 | return 1; | |
2202 | } | |
2203 | ||
2204 | static int __init ssd_finject(char *str) | |
2205 | { | |
2206 | finject = (int)simple_strtoul(str, NULL, 0); | |
2207 | ||
2208 | return 1; | |
2209 | } | |
2210 | ||
2211 | __setup(MODULE_NAME"_mode=", ssd_drv_mode); | |
2212 | __setup(MODULE_NAME"_status_mask=", ssd_status_mask); | |
2213 | __setup(MODULE_NAME"_int_mode=", ssd_int_mode); | |
2214 | __setup(MODULE_NAME"_threaded_irq=", ssd_threaded_irq); | |
2215 | __setup(MODULE_NAME"_log_level=", ssd_log_level); | |
2216 | __setup(MODULE_NAME"_ot_protect=", ssd_ot_protect); | |
2217 | __setup(MODULE_NAME"_wmode=", ssd_wmode); | |
2218 | __setup(MODULE_NAME"_finject=", ssd_finject); | |
2219 | #endif | |
2220 | ||
2221 | ||
2222 | #ifdef CONFIG_PROC_FS | |
2223 | #include <linux/proc_fs.h> | |
2224 | #include <asm/uaccess.h> | |
2225 | ||
2226 | #define SSD_PROC_DIR MODULE_NAME | |
2227 | #define SSD_PROC_INFO "info" | |
2228 | ||
2229 | static struct proc_dir_entry *ssd_proc_dir = NULL; | |
2230 | static struct proc_dir_entry *ssd_proc_info = NULL; | |
2231 | ||
2232 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,2,0)) | |
2233 | static int ssd_proc_read(char *page, char **start, | |
2234 | off_t off, int count, int *eof, void *data) | |
2235 | { | |
2236 | struct ssd_device *dev = NULL; | |
2237 | struct ssd_device *n = NULL; | |
2238 | uint64_t size; | |
2239 | int idx; | |
2240 | int len = 0; | |
2241 | //char type; //xx | |
2242 | ||
1197134c | 2243 | if (ssd_exiting || off != 0) { |
361ebed5 HSDT |
2244 | return 0; |
2245 | } | |
2246 | ||
2247 | len += snprintf((page + len), (count - len), "Driver Version:\t%s\n", DRIVER_VERSION); | |
2248 | ||
2249 | list_for_each_entry_safe(dev, n, &ssd_list, list) { | |
2250 | idx = dev->idx + 1; | |
2251 | size = dev->hw_info.size ; | |
2252 | do_div(size, 1000000000); | |
2253 | ||
2254 | len += snprintf((page + len), (count - len), "\n"); | |
2255 | ||
2256 | len += snprintf((page + len), (count - len), "HIO %d Size:\t%uGB\n", idx, (uint32_t)size); | |
2257 | ||
2258 | len += snprintf((page + len), (count - len), "HIO %d Bridge FW VER:\t%03X\n", idx, dev->hw_info.bridge_ver); | |
2259 | if (dev->hw_info.ctrl_ver != 0) { | |
2260 | len += snprintf((page + len), (count - len), "HIO %d Controller FW VER:\t%03X\n", idx, dev->hw_info.ctrl_ver); | |
2261 | } | |
2262 | ||
2263 | len += snprintf((page + len), (count - len), "HIO %d PCB VER:\t.%c\n", idx, dev->hw_info.pcb_ver); | |
2264 | ||
2265 | if (dev->hw_info.upper_pcb_ver >= 'A') { | |
2266 | len += snprintf((page + len), (count - len), "HIO %d Upper PCB VER:\t.%c\n", idx, dev->hw_info.upper_pcb_ver); | |
2267 | } | |
2268 | ||
2269 | len += snprintf((page + len), (count - len), "HIO %d Device:\t%s\n", idx, dev->name); | |
2270 | } | |
2271 | ||
1197134c | 2272 | *eof = 1; |
361ebed5 HSDT |
2273 | return len; |
2274 | } | |
2275 | ||
2276 | #else | |
2277 | ||
2278 | static int ssd_proc_show(struct seq_file *m, void *v) | |
2279 | { | |
2280 | struct ssd_device *dev = NULL; | |
2281 | struct ssd_device *n = NULL; | |
2282 | uint64_t size; | |
2283 | int idx; | |
2284 | ||
2285 | if (ssd_exiting) { | |
2286 | return 0; | |
2287 | } | |
2288 | ||
2289 | seq_printf(m, "Driver Version:\t%s\n", DRIVER_VERSION); | |
2290 | ||
2291 | list_for_each_entry_safe(dev, n, &ssd_list, list) { | |
2292 | idx = dev->idx + 1; | |
2293 | size = dev->hw_info.size ; | |
2294 | do_div(size, 1000000000); | |
2295 | ||
2296 | seq_printf(m, "\n"); | |
2297 | ||
2298 | seq_printf(m, "HIO %d Size:\t%uGB\n", idx, (uint32_t)size); | |
2299 | ||
2300 | seq_printf(m, "HIO %d Bridge FW VER:\t%03X\n", idx, dev->hw_info.bridge_ver); | |
2301 | if (dev->hw_info.ctrl_ver != 0) { | |
2302 | seq_printf(m, "HIO %d Controller FW VER:\t%03X\n", idx, dev->hw_info.ctrl_ver); | |
2303 | } | |
2304 | ||
2305 | seq_printf(m, "HIO %d PCB VER:\t.%c\n", idx, dev->hw_info.pcb_ver); | |
2306 | ||
2307 | if (dev->hw_info.upper_pcb_ver >= 'A') { | |
2308 | seq_printf(m, "HIO %d Upper PCB VER:\t.%c\n", idx, dev->hw_info.upper_pcb_ver); | |
2309 | } | |
2310 | ||
2311 | seq_printf(m, "HIO %d Device:\t%s\n", idx, dev->name); | |
2312 | } | |
2313 | ||
2314 | return 0; | |
2315 | } | |
2316 | ||
2317 | static int ssd_proc_open(struct inode *inode, struct file *file) | |
2318 | { | |
2319 | #if (LINUX_VERSION_CODE <= KERNEL_VERSION(3,9,0)) | |
2320 | return single_open(file, ssd_proc_show, PDE(inode)->data); | |
2321 | #else | |
2322 | return single_open(file, ssd_proc_show, PDE_DATA(inode)); | |
2323 | #endif | |
2324 | } | |
2325 | ||
2326 | static const struct file_operations ssd_proc_fops = { | |
2327 | .open = ssd_proc_open, | |
2328 | .read = seq_read, | |
2329 | .llseek = seq_lseek, | |
2330 | .release = single_release, | |
2331 | }; | |
2332 | #endif | |
2333 | ||
2334 | ||
2335 | static void ssd_cleanup_proc(void) | |
2336 | { | |
2337 | if (ssd_proc_info) { | |
2338 | remove_proc_entry(SSD_PROC_INFO, ssd_proc_dir); | |
2339 | ssd_proc_info = NULL; | |
2340 | } | |
2341 | if (ssd_proc_dir) { | |
2342 | remove_proc_entry(SSD_PROC_DIR, NULL); | |
2343 | ssd_proc_dir = NULL; | |
2344 | } | |
2345 | } | |
2346 | static int ssd_init_proc(void) | |
2347 | { | |
2348 | ssd_proc_dir = proc_mkdir(SSD_PROC_DIR, NULL); | |
2349 | if (!ssd_proc_dir) | |
2350 | goto out_proc_mkdir; | |
2351 | ||
2352 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,2,0)) | |
2353 | ssd_proc_info = create_proc_entry(SSD_PROC_INFO, S_IFREG | S_IRUGO | S_IWUSR, ssd_proc_dir); | |
2354 | if (!ssd_proc_info) | |
2355 | goto out_create_proc_entry; | |
2356 | ||
2357 | ssd_proc_info->read_proc = ssd_proc_read; | |
2358 | ||
2359 | /* kernel bug */ | |
2360 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30)) | |
2361 | ssd_proc_info->owner = THIS_MODULE; | |
2362 | #endif | |
2363 | #else | |
2364 | ssd_proc_info = proc_create(SSD_PROC_INFO, 0600, ssd_proc_dir, &ssd_proc_fops); | |
2365 | if (!ssd_proc_info) | |
2366 | goto out_create_proc_entry; | |
2367 | #endif | |
2368 | ||
2369 | return 0; | |
2370 | ||
2371 | out_create_proc_entry: | |
2372 | remove_proc_entry(SSD_PROC_DIR, NULL); | |
2373 | out_proc_mkdir: | |
2374 | return -ENOMEM; | |
2375 | } | |
2376 | ||
2377 | #else | |
2378 | static void ssd_cleanup_proc(void) | |
2379 | { | |
2380 | return; | |
2381 | } | |
2382 | static int ssd_init_proc(void) | |
2383 | { | |
2384 | return 0; | |
2385 | } | |
2386 | #endif /* CONFIG_PROC_FS */ | |
2387 | ||
2388 | /* sysfs */ | |
2389 | static void ssd_unregister_sysfs(struct ssd_device *dev) | |
2390 | { | |
2391 | return; | |
2392 | } | |
2393 | ||
2394 | static int ssd_register_sysfs(struct ssd_device *dev) | |
2395 | { | |
2396 | return 0; | |
2397 | } | |
2398 | ||
2399 | static void ssd_cleanup_sysfs(void) | |
2400 | { | |
2401 | return; | |
2402 | } | |
2403 | ||
2404 | static int ssd_init_sysfs(void) | |
2405 | { | |
2406 | return 0; | |
2407 | } | |
2408 | ||
2409 | static inline void ssd_put_index(int slave, int index) | |
2410 | { | |
2411 | unsigned long *index_bits = ssd_index_bits; | |
2412 | ||
2413 | if (slave) { | |
2414 | index_bits = ssd_index_bits_sl; | |
2415 | } | |
2416 | ||
2417 | if (test_and_clear_bit(index, index_bits)) { | |
2418 | atomic_dec(&ssd_nr); | |
2419 | } | |
2420 | } | |
2421 | ||
2422 | static inline int ssd_get_index(int slave) | |
2423 | { | |
2424 | unsigned long *index_bits = ssd_index_bits; | |
2425 | int index; | |
2426 | ||
2427 | if (slave) { | |
2428 | index_bits = ssd_index_bits_sl; | |
2429 | } | |
2430 | ||
2431 | find_index: | |
2432 | if ((index = find_first_zero_bit(index_bits, SSD_MAX_DEV)) >= SSD_MAX_DEV) { | |
2433 | return -1; | |
2434 | } | |
2435 | ||
2436 | if (test_and_set_bit(index, index_bits)) { | |
2437 | goto find_index; | |
2438 | } | |
2439 | ||
2440 | atomic_inc(&ssd_nr); | |
2441 | ||
2442 | return index; | |
2443 | } | |
2444 | ||
2445 | static void ssd_cleanup_index(void) | |
2446 | { | |
2447 | return; | |
2448 | } | |
2449 | ||
2450 | static int ssd_init_index(void) | |
2451 | { | |
2452 | INIT_LIST_HEAD(&ssd_list); | |
2453 | atomic_set(&ssd_nr, 0); | |
2454 | memset(ssd_index_bits, 0, (SSD_MAX_DEV / BITS_PER_LONG + 1)); | |
2455 | memset(ssd_index_bits_sl, 0, (SSD_MAX_DEV / BITS_PER_LONG + 1)); | |
2456 | ||
2457 | return 0; | |
2458 | } | |
2459 | ||
2460 | static void ssd_set_dev_name(char *name, size_t size, int idx) | |
2461 | { | |
2462 | if(idx < SSD_ALPHABET_NUM) { | |
2463 | snprintf(name, size, "%c", 'a'+idx); | |
2464 | } else { | |
2465 | idx -= SSD_ALPHABET_NUM; | |
2466 | snprintf(name, size, "%c%c", 'a'+(idx/SSD_ALPHABET_NUM), 'a'+(idx%SSD_ALPHABET_NUM)); | |
2467 | } | |
2468 | } | |
2469 | ||
2470 | /* pci register r&w */ | |
2471 | static inline void ssd_reg_write(void *addr, uint64_t val) | |
2472 | { | |
2473 | iowrite32((uint32_t)val, addr); | |
2474 | iowrite32((uint32_t)(val >> 32), addr + 4); | |
2475 | wmb(); | |
2476 | } | |
2477 | ||
2478 | static inline uint64_t ssd_reg_read(void *addr) | |
2479 | { | |
2480 | uint64_t val; | |
2481 | uint32_t val_lo, val_hi; | |
2482 | ||
2483 | val_lo = ioread32(addr); | |
2484 | val_hi = ioread32(addr + 4); | |
2485 | ||
2486 | rmb(); | |
2487 | val = val_lo | ((uint64_t)val_hi << 32); | |
2488 | ||
2489 | return val; | |
2490 | } | |
2491 | ||
2492 | ||
2493 | #define ssd_reg32_write(addr, val) writel(val, addr) | |
2494 | #define ssd_reg32_read(addr) readl(addr) | |
2495 | ||
2496 | /* alarm led */ | |
2497 | static void ssd_clear_alarm(struct ssd_device *dev) | |
2498 | { | |
2499 | uint32_t val; | |
2500 | ||
2501 | if (dev->protocol_info.ver <= SSD_PROTOCOL_V3) { | |
2502 | return; | |
2503 | } | |
2504 | ||
2505 | val = ssd_reg32_read(dev->ctrlp + SSD_LED_REG); | |
2506 | ||
2507 | /* firmware control */ | |
2508 | val &= ~0x2; | |
2509 | ||
2510 | ssd_reg32_write(dev->ctrlp + SSD_LED_REG, val); | |
2511 | } | |
2512 | ||
2513 | static void ssd_set_alarm(struct ssd_device *dev) | |
2514 | { | |
2515 | uint32_t val; | |
2516 | ||
2517 | if (dev->protocol_info.ver <= SSD_PROTOCOL_V3) { | |
2518 | return; | |
2519 | } | |
2520 | ||
2521 | val = ssd_reg32_read(dev->ctrlp + SSD_LED_REG); | |
2522 | ||
2523 | /* light up */ | |
2524 | val &= ~0x1; | |
2525 | /* software control */ | |
2526 | val |= 0x2; | |
2527 | ||
2528 | ssd_reg32_write(dev->ctrlp + SSD_LED_REG, val); | |
2529 | } | |
2530 | ||
2531 | #define u32_swap(x) \ | |
2532 | ((uint32_t)( \ | |
2533 | (((uint32_t)(x) & (uint32_t)0x000000ffUL) << 24) | \ | |
2534 | (((uint32_t)(x) & (uint32_t)0x0000ff00UL) << 8) | \ | |
2535 | (((uint32_t)(x) & (uint32_t)0x00ff0000UL) >> 8) | \ | |
2536 | (((uint32_t)(x) & (uint32_t)0xff000000UL) >> 24))) | |
2537 | ||
2538 | #define u16_swap(x) \ | |
2539 | ((uint16_t)( \ | |
2540 | (((uint16_t)(x) & (uint16_t)0x00ff) << 8) | \ | |
2541 | (((uint16_t)(x) & (uint16_t)0xff00) >> 8) )) | |
2542 | ||
2543 | ||
2544 | #if 0 | |
2545 | /* No lock, for init only*/ | |
2546 | static int ssd_spi_read_id(struct ssd_device *dev, uint32_t *id) | |
2547 | { | |
2548 | uint32_t val; | |
2549 | unsigned long st; | |
2550 | int ret = 0; | |
2551 | ||
2552 | if (!dev || !id) { | |
2553 | return -EINVAL; | |
2554 | } | |
2555 | ||
2556 | ssd_reg32_write(dev->ctrlp + SSD_SPI_REG_CMD, SSD_SPI_CMD_READ_ID); | |
2557 | ||
2558 | val = ssd_reg32_read(dev->ctrlp + SSD_SPI_REG_READY); | |
2559 | val = ssd_reg32_read(dev->ctrlp + SSD_SPI_REG_READY); | |
2560 | val = ssd_reg32_read(dev->ctrlp + SSD_SPI_REG_READY); | |
2561 | val = ssd_reg32_read(dev->ctrlp + SSD_SPI_REG_READY); | |
2562 | ||
2563 | st = jiffies; | |
2564 | for (;;) { | |
2565 | val = ssd_reg32_read(dev->ctrlp + SSD_SPI_REG_READY); | |
2566 | if (val == 0x1000000) { | |
2567 | break; | |
2568 | } | |
2569 | ||
2570 | if (time_after(jiffies, (st + SSD_SPI_TIMEOUT))) { | |
2571 | ret = -ETIMEDOUT; | |
2572 | goto out; | |
2573 | } | |
2574 | cond_resched(); | |
2575 | } | |
2576 | ||
2577 | val = ssd_reg32_read(dev->ctrlp + SSD_SPI_REG_ID); | |
2578 | *id = val; | |
2579 | ||
2580 | out: | |
2581 | return ret; | |
2582 | } | |
2583 | #endif | |
2584 | ||
2585 | /* spi access */ | |
2586 | static int ssd_init_spi(struct ssd_device *dev) | |
2587 | { | |
2588 | uint32_t val; | |
2589 | unsigned long st; | |
2590 | int ret = 0; | |
2591 | ||
2592 | mutex_lock(&dev->spi_mutex); | |
2593 | st = jiffies; | |
2594 | for(;;) { | |
2595 | ssd_reg32_write(dev->ctrlp + SSD_SPI_REG_CMD, SSD_SPI_CMD_READ_STATUS); | |
2596 | ||
2597 | do { | |
2598 | val = ssd_reg32_read(dev->ctrlp + SSD_SPI_REG_READY); | |
2599 | ||
2600 | if (time_after(jiffies, (st + SSD_SPI_TIMEOUT))) { | |
2601 | ret = -ETIMEDOUT; | |
2602 | goto out; | |
2603 | } | |
2604 | cond_resched(); | |
2605 | } while (val != 0x1000000); | |
2606 | ||
2607 | val = ssd_reg32_read(dev->ctrlp + SSD_SPI_REG_STATUS); | |
2608 | if (!(val & 0x1)) { | |
2609 | break; | |
2610 | } | |
2611 | ||
2612 | if (time_after(jiffies, (st + SSD_SPI_TIMEOUT))) { | |
2613 | ret = -ETIMEDOUT; | |
2614 | goto out; | |
2615 | } | |
2616 | cond_resched(); | |
2617 | } | |
2618 | ||
2619 | out: | |
2620 | if (dev->protocol_info.ver >= SSD_PROTOCOL_V3_2) { | |
2621 | if (val & 0x1) { | |
2622 | ssd_reg32_write(dev->ctrlp + SSD_SPI_REG_CMD, SSD_SPI_CMD_CLSR); | |
2623 | } | |
2624 | } | |
2625 | ssd_reg32_write(dev->ctrlp + SSD_SPI_REG_CMD, SSD_SPI_CMD_W_DISABLE); | |
2626 | mutex_unlock(&dev->spi_mutex); | |
2627 | ||
2628 | ret = 0; | |
2629 | ||
2630 | return ret; | |
2631 | } | |
2632 | ||
2633 | static int ssd_spi_page_read(struct ssd_device *dev, void *buf, uint32_t off, uint32_t size) | |
2634 | { | |
2635 | uint32_t val; | |
2636 | uint32_t rlen = 0; | |
2637 | unsigned long st; | |
2638 | int ret = 0; | |
2639 | ||
2640 | if (!dev || !buf) { | |
2641 | return -EINVAL; | |
2642 | } | |
2643 | ||
2644 | if ((off % sizeof(uint32_t)) != 0 || (size % sizeof(uint32_t)) != 0 || size == 0 || | |
2645 | ((uint64_t)off + (uint64_t)size) > dev->rom_info.size || size > dev->rom_info.page_size) { | |
2646 | return -EINVAL; | |
2647 | } | |
2648 | ||
2649 | mutex_lock(&dev->spi_mutex); | |
2650 | while (rlen < size) { | |
2651 | ssd_reg32_write(dev->ctrlp + SSD_SPI_REG_CMD_HI, ((off + rlen) >> 24)); | |
2652 | wmb(); | |
2653 | ssd_reg32_write(dev->ctrlp + SSD_SPI_REG_CMD, (((off + rlen) << 8) | SSD_SPI_CMD_READ)); | |
2654 | ||
2655 | (void)ssd_reg32_read(dev->ctrlp + SSD_SPI_REG_READY); | |
2656 | (void)ssd_reg32_read(dev->ctrlp + SSD_SPI_REG_READY); | |
2657 | (void)ssd_reg32_read(dev->ctrlp + SSD_SPI_REG_READY); | |
2658 | (void)ssd_reg32_read(dev->ctrlp + SSD_SPI_REG_READY); | |
2659 | ||
2660 | st = jiffies; | |
2661 | for (;;) { | |
2662 | val = ssd_reg32_read(dev->ctrlp + SSD_SPI_REG_READY); | |
2663 | if (val == 0x1000000) { | |
2664 | break; | |
2665 | } | |
2666 | ||
2667 | if (time_after(jiffies, (st + SSD_SPI_TIMEOUT))) { | |
2668 | ret = -ETIMEDOUT; | |
2669 | goto out; | |
2670 | } | |
2671 | cond_resched(); | |
2672 | } | |
2673 | ||
2674 | val = ssd_reg32_read(dev->ctrlp + SSD_SPI_REG_RDATA); | |
2675 | *(uint32_t *)(buf + rlen)= u32_swap(val); | |
2676 | ||
2677 | rlen += sizeof(uint32_t); | |
2678 | } | |
2679 | ||
2680 | out: | |
2681 | mutex_unlock(&dev->spi_mutex); | |
2682 | return ret; | |
2683 | } | |
2684 | ||
2685 | static int ssd_spi_page_write(struct ssd_device *dev, void *buf, uint32_t off, uint32_t size) | |
2686 | { | |
2687 | uint32_t val; | |
2688 | uint32_t wlen; | |
2689 | unsigned long st; | |
2690 | int i; | |
2691 | int ret = 0; | |
2692 | ||
2693 | if (!dev || !buf) { | |
2694 | return -EINVAL; | |
2695 | } | |
2696 | ||
2697 | if ((off % sizeof(uint32_t)) != 0 || (size % sizeof(uint32_t)) != 0 || size == 0 || | |
2698 | ((uint64_t)off + (uint64_t)size) > dev->rom_info.size || size > dev->rom_info.page_size || | |
2699 | (off / dev->rom_info.page_size) != ((off + size - 1) / dev->rom_info.page_size)) { | |
2700 | return -EINVAL; | |
2701 | } | |
2702 | ||
2703 | mutex_lock(&dev->spi_mutex); | |
2704 | ||
2705 | ssd_reg32_write(dev->ctrlp + SSD_SPI_REG_CMD, SSD_SPI_CMD_W_ENABLE); | |
2706 | ||
2707 | wlen = size / sizeof(uint32_t); | |
2708 | for (i=0; i<(int)wlen; i++) { | |
2709 | ssd_reg32_write(dev->ctrlp + SSD_SPI_REG_WDATA, u32_swap(*((uint32_t *)buf + i))); | |
2710 | } | |
2711 | ||
2712 | wmb(); | |
2713 | ssd_reg32_write(dev->ctrlp + SSD_SPI_REG_CMD_HI, (off >> 24)); | |
2714 | wmb(); | |
2715 | ssd_reg32_write(dev->ctrlp + SSD_SPI_REG_CMD, ((off << 8) | SSD_SPI_CMD_PROGRAM)); | |
2716 | ||
2717 | udelay(1); | |
2718 | ||
2719 | st = jiffies; | |
2720 | for (;;) { | |
2721 | ssd_reg32_write(dev->ctrlp + SSD_SPI_REG_CMD, SSD_SPI_CMD_READ_STATUS); | |
2722 | do { | |
2723 | val = ssd_reg32_read(dev->ctrlp + SSD_SPI_REG_READY); | |
2724 | ||
2725 | if (time_after(jiffies, (st + SSD_SPI_TIMEOUT))) { | |
2726 | ret = -ETIMEDOUT; | |
2727 | goto out; | |
2728 | } | |
2729 | cond_resched(); | |
2730 | } while (val != 0x1000000); | |
2731 | ||
2732 | val = ssd_reg32_read(dev->ctrlp + SSD_SPI_REG_STATUS); | |
2733 | if (!(val & 0x1)) { | |
2734 | break; | |
2735 | } | |
2736 | ||
2737 | if (time_after(jiffies, (st + SSD_SPI_TIMEOUT))) { | |
2738 | ret = -ETIMEDOUT; | |
2739 | goto out; | |
2740 | } | |
2741 | cond_resched(); | |
2742 | } | |
2743 | ||
2744 | if (dev->protocol_info.ver >= SSD_PROTOCOL_V3_2) { | |
2745 | if ((val >> 6) & 0x1) { | |
2746 | ret = -EIO; | |
2747 | goto out; | |
2748 | } | |
2749 | } | |
2750 | ||
2751 | out: | |
2752 | if (dev->protocol_info.ver >= SSD_PROTOCOL_V3_2) { | |
2753 | if (val & 0x1) { | |
2754 | ssd_reg32_write(dev->ctrlp + SSD_SPI_REG_CMD, SSD_SPI_CMD_CLSR); | |
2755 | } | |
2756 | } | |
2757 | ssd_reg32_write(dev->ctrlp + SSD_SPI_REG_CMD, SSD_SPI_CMD_W_DISABLE); | |
2758 | ||
2759 | mutex_unlock(&dev->spi_mutex); | |
2760 | ||
2761 | return ret; | |
2762 | } | |
2763 | ||
2764 | static int ssd_spi_block_erase(struct ssd_device *dev, uint32_t off) | |
2765 | { | |
2766 | uint32_t val; | |
2767 | unsigned long st; | |
2768 | int ret = 0; | |
2769 | ||
2770 | if (!dev) { | |
2771 | return -EINVAL; | |
2772 | } | |
2773 | ||
2774 | if ((off % dev->rom_info.block_size) != 0 || off >= dev->rom_info.size) { | |
2775 | return -EINVAL; | |
2776 | } | |
2777 | ||
2778 | mutex_lock(&dev->spi_mutex); | |
2779 | ||
2780 | ssd_reg32_write(dev->ctrlp + SSD_SPI_REG_CMD, SSD_SPI_CMD_W_ENABLE); | |
2781 | ssd_reg32_write(dev->ctrlp + SSD_SPI_REG_CMD, SSD_SPI_CMD_W_ENABLE); | |
2782 | ||
2783 | wmb(); | |
2784 | ssd_reg32_write(dev->ctrlp + SSD_SPI_REG_CMD_HI, (off >> 24)); | |
2785 | wmb(); | |
2786 | ssd_reg32_write(dev->ctrlp + SSD_SPI_REG_CMD, ((off << 8) | SSD_SPI_CMD_ERASE)); | |
2787 | ||
2788 | st = jiffies; | |
2789 | for (;;) { | |
2790 | ssd_reg32_write(dev->ctrlp + SSD_SPI_REG_CMD, SSD_SPI_CMD_READ_STATUS); | |
2791 | ||
2792 | do { | |
2793 | val = ssd_reg32_read(dev->ctrlp + SSD_SPI_REG_READY); | |
2794 | ||
2795 | if (time_after(jiffies, (st + SSD_SPI_TIMEOUT))) { | |
2796 | ret = -ETIMEDOUT; | |
2797 | goto out; | |
2798 | } | |
2799 | cond_resched(); | |
2800 | } while (val != 0x1000000); | |
2801 | ||
2802 | val = ssd_reg32_read(dev->ctrlp + SSD_SPI_REG_STATUS); | |
2803 | if (!(val & 0x1)) { | |
2804 | break; | |
2805 | } | |
2806 | ||
2807 | if (time_after(jiffies, (st + SSD_SPI_TIMEOUT))) { | |
2808 | ret = -ETIMEDOUT; | |
2809 | goto out; | |
2810 | } | |
2811 | cond_resched(); | |
2812 | } | |
2813 | ||
2814 | if (dev->protocol_info.ver >= SSD_PROTOCOL_V3_2) { | |
2815 | if ((val >> 5) & 0x1) { | |
2816 | ret = -EIO; | |
2817 | goto out; | |
2818 | } | |
2819 | } | |
2820 | ||
2821 | out: | |
2822 | if (dev->protocol_info.ver >= SSD_PROTOCOL_V3_2) { | |
2823 | if (val & 0x1) { | |
2824 | ssd_reg32_write(dev->ctrlp + SSD_SPI_REG_CMD, SSD_SPI_CMD_CLSR); | |
2825 | } | |
2826 | } | |
2827 | ssd_reg32_write(dev->ctrlp + SSD_SPI_REG_CMD, SSD_SPI_CMD_W_DISABLE); | |
2828 | ||
2829 | mutex_unlock(&dev->spi_mutex); | |
2830 | ||
2831 | return ret; | |
2832 | } | |
2833 | ||
2834 | static int ssd_spi_read(struct ssd_device *dev, void *buf, uint32_t off, uint32_t size) | |
2835 | { | |
2836 | uint32_t len = 0; | |
2837 | uint32_t roff; | |
2838 | uint32_t rsize; | |
2839 | int ret = 0; | |
2840 | ||
2841 | if (!dev || !buf) { | |
2842 | return -EINVAL; | |
2843 | } | |
2844 | ||
2845 | if ((off % sizeof(uint32_t)) != 0 || (size % sizeof(uint32_t)) != 0 || size == 0 || | |
2846 | ((uint64_t)off + (uint64_t)size) > dev->rom_info.size) { | |
2847 | return -EINVAL; | |
2848 | } | |
2849 | ||
2850 | while (len < size) { | |
2851 | roff = (off + len) % dev->rom_info.page_size; | |
2852 | rsize = dev->rom_info.page_size - roff; | |
2853 | if ((size - len) < rsize) { | |
2854 | rsize = (size - len); | |
2855 | } | |
2856 | roff = off + len; | |
2857 | ||
2858 | ret = ssd_spi_page_read(dev, (buf + len), roff, rsize); | |
2859 | if (ret) { | |
2860 | goto out; | |
2861 | } | |
2862 | ||
2863 | len += rsize; | |
2864 | ||
2865 | cond_resched(); | |
2866 | } | |
2867 | ||
2868 | out: | |
2869 | return ret; | |
2870 | } | |
2871 | ||
2872 | static int ssd_spi_write(struct ssd_device *dev, void *buf, uint32_t off, uint32_t size) | |
2873 | { | |
2874 | uint32_t len = 0; | |
2875 | uint32_t woff; | |
2876 | uint32_t wsize; | |
2877 | int ret = 0; | |
2878 | ||
2879 | if (!dev || !buf) { | |
2880 | return -EINVAL; | |
2881 | } | |
2882 | ||
2883 | if ((off % sizeof(uint32_t)) != 0 || (size % sizeof(uint32_t)) != 0 || size == 0 || | |
2884 | ((uint64_t)off + (uint64_t)size) > dev->rom_info.size) { | |
2885 | return -EINVAL; | |
2886 | } | |
2887 | ||
2888 | while (len < size) { | |
2889 | woff = (off + len) % dev->rom_info.page_size; | |
2890 | wsize = dev->rom_info.page_size - woff; | |
2891 | if ((size - len) < wsize) { | |
2892 | wsize = (size - len); | |
2893 | } | |
2894 | woff = off + len; | |
2895 | ||
2896 | ret = ssd_spi_page_write(dev, (buf + len), woff, wsize); | |
2897 | if (ret) { | |
2898 | goto out; | |
2899 | } | |
2900 | ||
2901 | len += wsize; | |
2902 | ||
2903 | cond_resched(); | |
2904 | } | |
2905 | ||
2906 | out: | |
2907 | return ret; | |
2908 | } | |
2909 | ||
2910 | static int ssd_spi_erase(struct ssd_device *dev, uint32_t off, uint32_t size) | |
2911 | { | |
2912 | uint32_t len = 0; | |
2913 | uint32_t eoff; | |
2914 | int ret = 0; | |
2915 | ||
2916 | if (!dev) { | |
2917 | return -EINVAL; | |
2918 | } | |
2919 | ||
2920 | if (size == 0 || ((uint64_t)off + (uint64_t)size) > dev->rom_info.size || | |
2921 | (off % dev->rom_info.block_size) != 0 || (size % dev->rom_info.block_size) != 0) { | |
2922 | return -EINVAL; | |
2923 | } | |
2924 | ||
2925 | while (len < size) { | |
2926 | eoff = (off + len); | |
2927 | ||
2928 | ret = ssd_spi_block_erase(dev, eoff); | |
2929 | if (ret) { | |
2930 | goto out; | |
2931 | } | |
2932 | ||
2933 | len += dev->rom_info.block_size; | |
2934 | ||
2935 | cond_resched(); | |
2936 | } | |
2937 | ||
2938 | out: | |
2939 | return ret; | |
2940 | } | |
2941 | ||
2942 | /* i2c access */ | |
2943 | static uint32_t __ssd_i2c_reg32_read(void *addr) | |
2944 | { | |
2945 | return ssd_reg32_read(addr); | |
2946 | } | |
2947 | ||
2948 | static void __ssd_i2c_reg32_write(void *addr, uint32_t val) | |
2949 | { | |
2950 | ssd_reg32_write(addr, val); | |
2951 | ssd_reg32_read(addr); | |
2952 | } | |
2953 | ||
2954 | static int __ssd_i2c_clear(struct ssd_device *dev, uint8_t saddr) | |
2955 | { | |
2956 | ssd_i2c_ctrl_t ctrl; | |
2957 | ssd_i2c_data_t data; | |
2958 | uint8_t status = 0; | |
2959 | int nr_data = 0; | |
2960 | unsigned long st; | |
2961 | int ret = 0; | |
2962 | ||
2963 | check_status: | |
2964 | ctrl.bits.wdata = 0; | |
2965 | ctrl.bits.addr = SSD_I2C_STATUS_REG; | |
2966 | ctrl.bits.rw = SSD_I2C_CTRL_READ; | |
2967 | __ssd_i2c_reg32_write(dev->ctrlp + SSD_I2C_CTRL_REG, ctrl.val); | |
2968 | ||
2969 | st = jiffies; | |
2970 | for (;;) { | |
2971 | data.val = __ssd_i2c_reg32_read(dev->ctrlp + SSD_I2C_RDATA_REG); | |
2972 | if (data.bits.valid == 0) { | |
2973 | break; | |
2974 | } | |
2975 | ||
2976 | /* retry */ | |
2977 | if (time_after(jiffies, (st + SSD_I2C_TIMEOUT))) { | |
2978 | ret = -ETIMEDOUT; | |
2979 | goto out; | |
2980 | } | |
2981 | cond_resched(); | |
2982 | } | |
2983 | status = data.bits.rdata; | |
2984 | ||
2985 | if (!(status & 0x4)) { | |
2986 | /* clear read fifo data */ | |
2987 | ctrl.bits.wdata = 0; | |
2988 | ctrl.bits.addr = SSD_I2C_DATA_REG; | |
2989 | ctrl.bits.rw = SSD_I2C_CTRL_READ; | |
2990 | __ssd_i2c_reg32_write(dev->ctrlp + SSD_I2C_CTRL_REG, ctrl.val); | |
2991 | ||
2992 | st = jiffies; | |
2993 | for (;;) { | |
2994 | data.val = __ssd_i2c_reg32_read(dev->ctrlp + SSD_I2C_RDATA_REG); | |
2995 | if (data.bits.valid == 0) { | |
2996 | break; | |
2997 | } | |
2998 | ||
2999 | /* retry */ | |
3000 | if (time_after(jiffies, (st + SSD_I2C_TIMEOUT))) { | |
3001 | ret = -ETIMEDOUT; | |
3002 | goto out; | |
3003 | } | |
3004 | cond_resched(); | |
3005 | } | |
3006 | ||
3007 | nr_data++; | |
3008 | if (nr_data <= SSD_I2C_MAX_DATA) { | |
3009 | goto check_status; | |
3010 | } else { | |
3011 | goto out_reset; | |
3012 | } | |
3013 | } | |
3014 | ||
3015 | if (status & 0x3) { | |
3016 | /* clear int */ | |
3017 | ctrl.bits.wdata = 0x04; | |
3018 | ctrl.bits.addr = SSD_I2C_CMD_REG; | |
3019 | ctrl.bits.rw = SSD_I2C_CTRL_WRITE; | |
3020 | __ssd_i2c_reg32_write(dev->ctrlp + SSD_I2C_CTRL_REG, ctrl.val); | |
3021 | } | |
3022 | ||
3023 | if (!(status & 0x8)) { | |
3024 | out_reset: | |
3025 | /* reset i2c controller */ | |
3026 | ctrl.bits.wdata = 0x0; | |
3027 | ctrl.bits.addr = SSD_I2C_RESET_REG; | |
3028 | ctrl.bits.rw = SSD_I2C_CTRL_WRITE; | |
3029 | __ssd_i2c_reg32_write(dev->ctrlp + SSD_I2C_CTRL_REG, ctrl.val); | |
3030 | } | |
3031 | ||
3032 | out: | |
3033 | return ret; | |
3034 | } | |
3035 | ||
3036 | static int ssd_i2c_write(struct ssd_device *dev, uint8_t saddr, uint8_t size, uint8_t *buf) | |
3037 | { | |
3038 | ssd_i2c_ctrl_t ctrl; | |
3039 | ssd_i2c_data_t data; | |
3040 | uint8_t off = 0; | |
3041 | uint8_t status = 0; | |
3042 | unsigned long st; | |
3043 | int ret = 0; | |
3044 | ||
3045 | mutex_lock(&dev->i2c_mutex); | |
3046 | ||
3047 | ctrl.val = 0; | |
3048 | ||
3049 | /* slave addr */ | |
3050 | ctrl.bits.wdata = saddr; | |
3051 | ctrl.bits.addr = SSD_I2C_SADDR_REG; | |
3052 | ctrl.bits.rw = SSD_I2C_CTRL_WRITE; | |
3053 | __ssd_i2c_reg32_write(dev->ctrlp + SSD_I2C_CTRL_REG, ctrl.val); | |
3054 | ||
3055 | /* data */ | |
3056 | while (off < size) { | |
3057 | ctrl.bits.wdata = buf[off]; | |
3058 | ctrl.bits.addr = SSD_I2C_DATA_REG; | |
3059 | ctrl.bits.rw = SSD_I2C_CTRL_WRITE; | |
3060 | __ssd_i2c_reg32_write(dev->ctrlp + SSD_I2C_CTRL_REG, ctrl.val); | |
3061 | ||
3062 | off++; | |
3063 | } | |
3064 | ||
3065 | /* write */ | |
3066 | ctrl.bits.wdata = 0x01; | |
3067 | ctrl.bits.addr = SSD_I2C_CMD_REG; | |
3068 | ctrl.bits.rw = SSD_I2C_CTRL_WRITE; | |
3069 | __ssd_i2c_reg32_write(dev->ctrlp + SSD_I2C_CTRL_REG, ctrl.val); | |
3070 | ||
3071 | /* wait */ | |
3072 | st = jiffies; | |
3073 | for (;;) { | |
3074 | ctrl.bits.wdata = 0; | |
3075 | ctrl.bits.addr = SSD_I2C_STATUS_REG; | |
3076 | ctrl.bits.rw = SSD_I2C_CTRL_READ; | |
3077 | __ssd_i2c_reg32_write(dev->ctrlp + SSD_I2C_CTRL_REG, ctrl.val); | |
3078 | ||
3079 | for (;;) { | |
3080 | data.val = __ssd_i2c_reg32_read(dev->ctrlp + SSD_I2C_RDATA_REG); | |
3081 | if (data.bits.valid == 0) { | |
3082 | break; | |
3083 | } | |
3084 | ||
3085 | /* retry */ | |
3086 | if (time_after(jiffies, (st + SSD_I2C_TIMEOUT))) { | |
3087 | ret = -ETIMEDOUT; | |
3088 | goto out_clear; | |
3089 | } | |
3090 | cond_resched(); | |
3091 | } | |
3092 | ||
3093 | status = data.bits.rdata; | |
3094 | if (status & 0x1) { | |
3095 | break; | |
3096 | } | |
3097 | ||
3098 | if (time_after(jiffies, (st + SSD_I2C_TIMEOUT))) { | |
3099 | ret = -ETIMEDOUT; | |
3100 | goto out_clear; | |
3101 | } | |
3102 | cond_resched(); | |
3103 | } | |
3104 | ||
3105 | if (!(status & 0x1)) { | |
3106 | ret = -1; | |
3107 | goto out_clear; | |
3108 | } | |
3109 | ||
3110 | /* busy ? */ | |
3111 | if (status & 0x20) { | |
3112 | ret = -2; | |
3113 | goto out_clear; | |
3114 | } | |
3115 | ||
3116 | /* ack ? */ | |
3117 | if (status & 0x10) { | |
3118 | ret = -3; | |
3119 | goto out_clear; | |
3120 | } | |
3121 | ||
3122 | /* clear */ | |
3123 | out_clear: | |
3124 | if (__ssd_i2c_clear(dev, saddr)) { | |
3125 | if (!ret) ret = -4; | |
3126 | } | |
3127 | ||
3128 | mutex_unlock(&dev->i2c_mutex); | |
3129 | ||
3130 | return ret; | |
3131 | } | |
3132 | ||
3133 | static int ssd_i2c_read(struct ssd_device *dev, uint8_t saddr, uint8_t size, uint8_t *buf) | |
3134 | { | |
3135 | ssd_i2c_ctrl_t ctrl; | |
3136 | ssd_i2c_data_t data; | |
3137 | uint8_t off = 0; | |
3138 | uint8_t status = 0; | |
3139 | unsigned long st; | |
3140 | int ret = 0; | |
3141 | ||
3142 | mutex_lock(&dev->i2c_mutex); | |
3143 | ||
3144 | ctrl.val = 0; | |
3145 | ||
3146 | /* slave addr */ | |
3147 | ctrl.bits.wdata = saddr; | |
3148 | ctrl.bits.addr = SSD_I2C_SADDR_REG; | |
3149 | ctrl.bits.rw = SSD_I2C_CTRL_WRITE; | |
3150 | __ssd_i2c_reg32_write(dev->ctrlp + SSD_I2C_CTRL_REG, ctrl.val); | |
3151 | ||
3152 | /* read len */ | |
3153 | ctrl.bits.wdata = size; | |
3154 | ctrl.bits.addr = SSD_I2C_LEN_REG; | |
3155 | ctrl.bits.rw = SSD_I2C_CTRL_WRITE; | |
3156 | __ssd_i2c_reg32_write(dev->ctrlp + SSD_I2C_CTRL_REG, ctrl.val); | |
3157 | ||
3158 | /* read */ | |
3159 | ctrl.bits.wdata = 0x02; | |
3160 | ctrl.bits.addr = SSD_I2C_CMD_REG; | |
3161 | ctrl.bits.rw = SSD_I2C_CTRL_WRITE; | |
3162 | __ssd_i2c_reg32_write(dev->ctrlp + SSD_I2C_CTRL_REG, ctrl.val); | |
3163 | ||
3164 | /* wait */ | |
3165 | st = jiffies; | |
3166 | for (;;) { | |
3167 | ctrl.bits.wdata = 0; | |
3168 | ctrl.bits.addr = SSD_I2C_STATUS_REG; | |
3169 | ctrl.bits.rw = SSD_I2C_CTRL_READ; | |
3170 | __ssd_i2c_reg32_write(dev->ctrlp + SSD_I2C_CTRL_REG, ctrl.val); | |
3171 | ||
3172 | for (;;) { | |
3173 | data.val = __ssd_i2c_reg32_read(dev->ctrlp + SSD_I2C_RDATA_REG); | |
3174 | if (data.bits.valid == 0) { | |
3175 | break; | |
3176 | } | |
3177 | ||
3178 | /* retry */ | |
3179 | if (time_after(jiffies, (st + SSD_I2C_TIMEOUT))) { | |
3180 | ret = -ETIMEDOUT; | |
3181 | goto out_clear; | |
3182 | } | |
3183 | cond_resched(); | |
3184 | } | |
3185 | ||
3186 | status = data.bits.rdata; | |
3187 | if (status & 0x2) { | |
3188 | break; | |
3189 | } | |
3190 | ||
3191 | if (time_after(jiffies, (st + SSD_I2C_TIMEOUT))) { | |
3192 | ret = -ETIMEDOUT; | |
3193 | goto out_clear; | |
3194 | } | |
3195 | cond_resched(); | |
3196 | } | |
3197 | ||
3198 | if (!(status & 0x2)) { | |
3199 | ret = -1; | |
3200 | goto out_clear; | |
3201 | } | |
3202 | ||
3203 | /* busy ? */ | |
3204 | if (status & 0x20) { | |
3205 | ret = -2; | |
3206 | goto out_clear; | |
3207 | } | |
3208 | ||
3209 | /* ack ? */ | |
3210 | if (status & 0x10) { | |
3211 | ret = -3; | |
3212 | goto out_clear; | |
3213 | } | |
3214 | ||
3215 | /* data */ | |
3216 | while (off < size) { | |
3217 | ctrl.bits.wdata = 0; | |
3218 | ctrl.bits.addr = SSD_I2C_DATA_REG; | |
3219 | ctrl.bits.rw = SSD_I2C_CTRL_READ; | |
3220 | __ssd_i2c_reg32_write(dev->ctrlp + SSD_I2C_CTRL_REG, ctrl.val); | |
3221 | ||
3222 | st = jiffies; | |
3223 | for (;;) { | |
3224 | data.val = __ssd_i2c_reg32_read(dev->ctrlp + SSD_I2C_RDATA_REG); | |
3225 | if (data.bits.valid == 0) { | |
3226 | break; | |
3227 | } | |
3228 | ||
3229 | /* retry */ | |
3230 | if (time_after(jiffies, (st + SSD_I2C_TIMEOUT))) { | |
3231 | ret = -ETIMEDOUT; | |
3232 | goto out_clear; | |
3233 | } | |
3234 | cond_resched(); | |
3235 | } | |
3236 | ||
3237 | buf[off] = data.bits.rdata; | |
3238 | ||
3239 | off++; | |
3240 | } | |
3241 | ||
3242 | /* clear */ | |
3243 | out_clear: | |
3244 | if (__ssd_i2c_clear(dev, saddr)) { | |
3245 | if (!ret) ret = -4; | |
3246 | } | |
3247 | ||
3248 | mutex_unlock(&dev->i2c_mutex); | |
3249 | ||
3250 | return ret; | |
3251 | } | |
3252 | ||
3253 | static int ssd_i2c_write_read(struct ssd_device *dev, uint8_t saddr, uint8_t wsize, uint8_t *wbuf, uint8_t rsize, uint8_t *rbuf) | |
3254 | { | |
3255 | ssd_i2c_ctrl_t ctrl; | |
3256 | ssd_i2c_data_t data; | |
3257 | uint8_t off = 0; | |
3258 | uint8_t status = 0; | |
3259 | unsigned long st; | |
3260 | int ret = 0; | |
3261 | ||
3262 | mutex_lock(&dev->i2c_mutex); | |
3263 | ||
3264 | ctrl.val = 0; | |
3265 | ||
3266 | /* slave addr */ | |
3267 | ctrl.bits.wdata = saddr; | |
3268 | ctrl.bits.addr = SSD_I2C_SADDR_REG; | |
3269 | ctrl.bits.rw = SSD_I2C_CTRL_WRITE; | |
3270 | __ssd_i2c_reg32_write(dev->ctrlp + SSD_I2C_CTRL_REG, ctrl.val); | |
3271 | ||
3272 | /* data */ | |
3273 | off = 0; | |
3274 | while (off < wsize) { | |
3275 | ctrl.bits.wdata = wbuf[off]; | |
3276 | ctrl.bits.addr = SSD_I2C_DATA_REG; | |
3277 | ctrl.bits.rw = SSD_I2C_CTRL_WRITE; | |
3278 | __ssd_i2c_reg32_write(dev->ctrlp + SSD_I2C_CTRL_REG, ctrl.val); | |
3279 | ||
3280 | off++; | |
3281 | } | |
3282 | ||
3283 | /* read len */ | |
3284 | ctrl.bits.wdata = rsize; | |
3285 | ctrl.bits.addr = SSD_I2C_LEN_REG; | |
3286 | ctrl.bits.rw = SSD_I2C_CTRL_WRITE; | |
3287 | __ssd_i2c_reg32_write(dev->ctrlp + SSD_I2C_CTRL_REG, ctrl.val); | |
3288 | ||
3289 | /* write -> read */ | |
3290 | ctrl.bits.wdata = 0x03; | |
3291 | ctrl.bits.addr = SSD_I2C_CMD_REG; | |
3292 | ctrl.bits.rw = SSD_I2C_CTRL_WRITE; | |
3293 | __ssd_i2c_reg32_write(dev->ctrlp + SSD_I2C_CTRL_REG, ctrl.val); | |
3294 | ||
3295 | /* wait */ | |
3296 | st = jiffies; | |
3297 | for (;;) { | |
3298 | ctrl.bits.wdata = 0; | |
3299 | ctrl.bits.addr = SSD_I2C_STATUS_REG; | |
3300 | ctrl.bits.rw = SSD_I2C_CTRL_READ; | |
3301 | __ssd_i2c_reg32_write(dev->ctrlp + SSD_I2C_CTRL_REG, ctrl.val); | |
3302 | ||
3303 | for (;;) { | |
3304 | data.val = __ssd_i2c_reg32_read(dev->ctrlp + SSD_I2C_RDATA_REG); | |
3305 | if (data.bits.valid == 0) { | |
3306 | break; | |
3307 | } | |
3308 | ||
3309 | /* retry */ | |
3310 | if (time_after(jiffies, (st + SSD_I2C_TIMEOUT))) { | |
3311 | ret = -ETIMEDOUT; | |
3312 | goto out_clear; | |
3313 | } | |
3314 | cond_resched(); | |
3315 | } | |
3316 | ||
3317 | status = data.bits.rdata; | |
3318 | if (status & 0x2) { | |
3319 | break; | |
3320 | } | |
3321 | ||
3322 | if (time_after(jiffies, (st + SSD_I2C_TIMEOUT))) { | |
3323 | ret = -ETIMEDOUT; | |
3324 | goto out_clear; | |
3325 | } | |
3326 | cond_resched(); | |
3327 | } | |
3328 | ||
3329 | if (!(status & 0x2)) { | |
3330 | ret = -1; | |
3331 | goto out_clear; | |
3332 | } | |
3333 | ||
3334 | /* busy ? */ | |
3335 | if (status & 0x20) { | |
3336 | ret = -2; | |
3337 | goto out_clear; | |
3338 | } | |
3339 | ||
3340 | /* ack ? */ | |
3341 | if (status & 0x10) { | |
3342 | ret = -3; | |
3343 | goto out_clear; | |
3344 | } | |
3345 | ||
3346 | /* data */ | |
3347 | off = 0; | |
3348 | while (off < rsize) { | |
3349 | ctrl.bits.wdata = 0; | |
3350 | ctrl.bits.addr = SSD_I2C_DATA_REG; | |
3351 | ctrl.bits.rw = SSD_I2C_CTRL_READ; | |
3352 | __ssd_i2c_reg32_write(dev->ctrlp + SSD_I2C_CTRL_REG, ctrl.val); | |
3353 | ||
3354 | st = jiffies; | |
3355 | for (;;) { | |
3356 | data.val = __ssd_i2c_reg32_read(dev->ctrlp + SSD_I2C_RDATA_REG); | |
3357 | if (data.bits.valid == 0) { | |
3358 | break; | |
3359 | } | |
3360 | ||
3361 | /* retry */ | |
3362 | if (time_after(jiffies, (st + SSD_I2C_TIMEOUT))) { | |
3363 | ret = -ETIMEDOUT; | |
3364 | goto out_clear; | |
3365 | } | |
3366 | cond_resched(); | |
3367 | } | |
3368 | ||
3369 | rbuf[off] = data.bits.rdata; | |
3370 | ||
3371 | off++; | |
3372 | } | |
3373 | ||
3374 | /* clear */ | |
3375 | out_clear: | |
3376 | if (__ssd_i2c_clear(dev, saddr)) { | |
3377 | if (!ret) ret = -4; | |
3378 | } | |
3379 | mutex_unlock(&dev->i2c_mutex); | |
3380 | ||
3381 | return ret; | |
3382 | } | |
3383 | ||
3384 | static int ssd_smbus_send_byte(struct ssd_device *dev, uint8_t saddr, uint8_t *buf) | |
3385 | { | |
3386 | int i = 0; | |
3387 | int ret = 0; | |
3388 | ||
3389 | for (;;) { | |
3390 | ret = ssd_i2c_write(dev, saddr, 1, buf); | |
3391 | if (!ret || -ETIMEDOUT == ret) { | |
3392 | break; | |
3393 | } | |
3394 | ||
3395 | i++; | |
3396 | if (i >= SSD_SMBUS_RETRY_MAX) { | |
3397 | break; | |
3398 | } | |
3399 | msleep(SSD_SMBUS_RETRY_INTERVAL); | |
3400 | } | |
3401 | ||
3402 | return ret; | |
3403 | } | |
3404 | ||
3405 | static int ssd_smbus_receive_byte(struct ssd_device *dev, uint8_t saddr, uint8_t *buf) | |
3406 | { | |
3407 | int i = 0; | |
3408 | int ret = 0; | |
3409 | ||
3410 | for (;;) { | |
3411 | ret = ssd_i2c_read(dev, saddr, 1, buf); | |
3412 | if (!ret || -ETIMEDOUT == ret) { | |
3413 | break; | |
3414 | } | |
3415 | ||
3416 | i++; | |
3417 | if (i >= SSD_SMBUS_RETRY_MAX) { | |
3418 | break; | |
3419 | } | |
3420 | msleep(SSD_SMBUS_RETRY_INTERVAL); | |
3421 | } | |
3422 | ||
3423 | return ret; | |
3424 | } | |
3425 | ||
3426 | static int ssd_smbus_write_byte(struct ssd_device *dev, uint8_t saddr, uint8_t cmd, uint8_t *buf) | |
3427 | { | |
3428 | uint8_t smb_data[SSD_SMBUS_DATA_MAX] = {0}; | |
3429 | int i = 0; | |
3430 | int ret = 0; | |
3431 | ||
3432 | smb_data[0] = cmd; | |
3433 | memcpy((smb_data + 1), buf, 1); | |
3434 | ||
3435 | for (;;) { | |
3436 | ret = ssd_i2c_write(dev, saddr, 2, smb_data); | |
3437 | if (!ret || -ETIMEDOUT == ret) { | |
3438 | break; | |
3439 | } | |
3440 | ||
3441 | i++; | |
3442 | if (i >= SSD_SMBUS_RETRY_MAX) { | |
3443 | break; | |
3444 | } | |
3445 | msleep(SSD_SMBUS_RETRY_INTERVAL); | |
3446 | } | |
3447 | ||
3448 | return ret; | |
3449 | } | |
3450 | ||
3451 | static int ssd_smbus_read_byte(struct ssd_device *dev, uint8_t saddr, uint8_t cmd, uint8_t *buf) | |
3452 | { | |
3453 | uint8_t smb_data[SSD_SMBUS_DATA_MAX] = {0}; | |
3454 | int i = 0; | |
3455 | int ret = 0; | |
3456 | ||
3457 | smb_data[0] = cmd; | |
3458 | ||
3459 | for (;;) { | |
3460 | ret = ssd_i2c_write_read(dev, saddr, 1, smb_data, 1, buf); | |
3461 | if (!ret || -ETIMEDOUT == ret) { | |
3462 | break; | |
3463 | } | |
3464 | ||
3465 | i++; | |
3466 | if (i >= SSD_SMBUS_RETRY_MAX) { | |
3467 | break; | |
3468 | } | |
3469 | msleep(SSD_SMBUS_RETRY_INTERVAL); | |
3470 | } | |
3471 | ||
3472 | return ret; | |
3473 | } | |
3474 | ||
3475 | static int ssd_smbus_write_word(struct ssd_device *dev, uint8_t saddr, uint8_t cmd, uint8_t *buf) | |
3476 | { | |
3477 | uint8_t smb_data[SSD_SMBUS_DATA_MAX] = {0}; | |
3478 | int i = 0; | |
3479 | int ret = 0; | |
3480 | ||
3481 | smb_data[0] = cmd; | |
3482 | memcpy((smb_data + 1), buf, 2); | |
3483 | ||
3484 | for (;;) { | |
3485 | ret = ssd_i2c_write(dev, saddr, 3, smb_data); | |
3486 | if (!ret || -ETIMEDOUT == ret) { | |
3487 | break; | |
3488 | } | |
3489 | ||
3490 | i++; | |
3491 | if (i >= SSD_SMBUS_RETRY_MAX) { | |
3492 | break; | |
3493 | } | |
3494 | msleep(SSD_SMBUS_RETRY_INTERVAL); | |
3495 | } | |
3496 | ||
3497 | return ret; | |
3498 | } | |
3499 | ||
3500 | static int ssd_smbus_read_word(struct ssd_device *dev, uint8_t saddr, uint8_t cmd, uint8_t *buf) | |
3501 | { | |
3502 | uint8_t smb_data[SSD_SMBUS_DATA_MAX] = {0}; | |
3503 | int i = 0; | |
3504 | int ret = 0; | |
3505 | ||
3506 | smb_data[0] = cmd; | |
3507 | ||
3508 | for (;;) { | |
3509 | ret = ssd_i2c_write_read(dev, saddr, 1, smb_data, 2, buf); | |
3510 | if (!ret || -ETIMEDOUT == ret) { | |
3511 | break; | |
3512 | } | |
3513 | ||
3514 | i++; | |
3515 | if (i >= SSD_SMBUS_RETRY_MAX) { | |
3516 | break; | |
3517 | } | |
3518 | msleep(SSD_SMBUS_RETRY_INTERVAL); | |
3519 | } | |
3520 | ||
3521 | return ret; | |
3522 | } | |
3523 | ||
3524 | static int ssd_smbus_write_block(struct ssd_device *dev, uint8_t saddr, uint8_t cmd, uint8_t size, uint8_t *buf) | |
3525 | { | |
3526 | uint8_t smb_data[SSD_SMBUS_DATA_MAX] = {0}; | |
3527 | int i = 0; | |
3528 | int ret = 0; | |
3529 | ||
3530 | smb_data[0] = cmd; | |
3531 | smb_data[1] = size; | |
3532 | memcpy((smb_data + 2), buf, size); | |
3533 | ||
3534 | for (;;) { | |
3535 | ret = ssd_i2c_write(dev, saddr, (2 + size), smb_data); | |
3536 | if (!ret || -ETIMEDOUT == ret) { | |
3537 | break; | |
3538 | } | |
3539 | ||
3540 | i++; | |
3541 | if (i >= SSD_SMBUS_RETRY_MAX) { | |
3542 | break; | |
3543 | } | |
3544 | msleep(SSD_SMBUS_RETRY_INTERVAL); | |
3545 | } | |
3546 | ||
3547 | return ret; | |
3548 | } | |
3549 | ||
3550 | static int ssd_smbus_read_block(struct ssd_device *dev, uint8_t saddr, uint8_t cmd, uint8_t size, uint8_t *buf) | |
3551 | { | |
3552 | uint8_t smb_data[SSD_SMBUS_DATA_MAX] = {0}; | |
3553 | uint8_t rsize; | |
3554 | int i = 0; | |
3555 | int ret = 0; | |
3556 | ||
3557 | smb_data[0] = cmd; | |
3558 | ||
3559 | for (;;) { | |
3560 | ret = ssd_i2c_write_read(dev, saddr, 1, smb_data, (SSD_SMBUS_BLOCK_MAX + 1), (smb_data + 1)); | |
3561 | if (!ret || -ETIMEDOUT == ret) { | |
3562 | break; | |
3563 | } | |
3564 | ||
3565 | i++; | |
3566 | if (i >= SSD_SMBUS_RETRY_MAX) { | |
3567 | break; | |
3568 | } | |
3569 | msleep(SSD_SMBUS_RETRY_INTERVAL); | |
3570 | } | |
3571 | if (ret) { | |
3572 | return ret; | |
3573 | } | |
3574 | ||
3575 | rsize = smb_data[1]; | |
3576 | ||
3577 | if (rsize > size ) { | |
3578 | rsize = size; | |
3579 | } | |
3580 | ||
3581 | memcpy(buf, (smb_data + 2), rsize); | |
3582 | ||
3583 | return 0; | |
3584 | } | |
3585 | ||
3586 | ||
3587 | static int ssd_gen_swlog(struct ssd_device *dev, uint16_t event, uint32_t data); | |
3588 | ||
3589 | /* sensor */ | |
3590 | static int ssd_init_lm75(struct ssd_device *dev, uint8_t saddr) | |
3591 | { | |
3592 | uint8_t conf = 0; | |
3593 | int ret = 0; | |
3594 | ||
3595 | ret = ssd_smbus_read_byte(dev, saddr, SSD_LM75_REG_CONF, &conf); | |
3596 | if (ret) { | |
3597 | goto out; | |
3598 | } | |
3599 | ||
3600 | conf &= (uint8_t)(~1u); | |
3601 | ||
3602 | ret = ssd_smbus_write_byte(dev, saddr, SSD_LM75_REG_CONF, &conf); | |
3603 | if (ret) { | |
3604 | goto out; | |
3605 | } | |
3606 | ||
3607 | out: | |
3608 | return ret; | |
3609 | } | |
3610 | ||
3611 | static int ssd_lm75_read(struct ssd_device *dev, uint8_t saddr, uint16_t *data) | |
3612 | { | |
3613 | uint16_t val = 0; | |
3614 | int ret; | |
3615 | ||
3616 | ret = ssd_smbus_read_word(dev, saddr, SSD_LM75_REG_TEMP, (uint8_t *)&val); | |
3617 | if (ret) { | |
3618 | return ret; | |
3619 | } | |
3620 | ||
3621 | *data = u16_swap(val); | |
3622 | ||
3623 | return 0; | |
3624 | } | |
3625 | ||
3626 | static int ssd_init_lm80(struct ssd_device *dev, uint8_t saddr) | |
3627 | { | |
3628 | uint8_t val; | |
3629 | uint8_t low, high; | |
3630 | int i; | |
3631 | int ret = 0; | |
3632 | ||
3633 | /* init */ | |
3634 | val = 0x80; | |
3635 | ret = ssd_smbus_write_byte(dev, saddr, SSD_LM80_REG_CONFIG, &val); | |
3636 | if (ret) { | |
3637 | goto out; | |
3638 | } | |
3639 | ||
3640 | /* 11-bit temp */ | |
3641 | val = 0x08; | |
3642 | ret = ssd_smbus_write_byte(dev, saddr, SSD_LM80_REG_RES, &val); | |
3643 | if (ret) { | |
3644 | goto out; | |
3645 | } | |
3646 | ||
3647 | /* set volt limit */ | |
3648 | for (i=0; i<SSD_LM80_IN_NR; i++) { | |
3649 | high = ssd_lm80_limit[i].high; | |
3650 | low = ssd_lm80_limit[i].low; | |
3651 | ||
3652 | if (SSD_LM80_IN_CAP == i) { | |
3653 | low = 0; | |
3654 | } | |
3655 | ||
3656 | if (dev->hw_info.nr_ctrl <= 1 && SSD_LM80_IN_1V2 == i) { | |
3657 | high = 0xFF; | |
3658 | low = 0; | |
3659 | } | |
3660 | ||
3661 | /* high limit */ | |
3662 | ret = ssd_smbus_write_byte(dev, saddr, SSD_LM80_REG_IN_MAX(i), &high); | |
3663 | if (ret) { | |
3664 | goto out; | |
3665 | } | |
3666 | ||
3667 | /* low limit*/ | |
3668 | ret = ssd_smbus_write_byte(dev, saddr, SSD_LM80_REG_IN_MIN(i), &low); | |
3669 | if (ret) { | |
3670 | goto out; | |
3671 | } | |
3672 | } | |
3673 | ||
3674 | /* set interrupt mask: allow volt in interrupt except cap in*/ | |
3675 | val = 0x81; | |
3676 | ret = ssd_smbus_write_byte(dev, saddr, SSD_LM80_REG_MASK1, &val); | |
3677 | if (ret) { | |
3678 | goto out; | |
3679 | } | |
3680 | ||
3681 | /* set interrupt mask: disable others */ | |
3682 | val = 0xFF; | |
3683 | ret = ssd_smbus_write_byte(dev, saddr, SSD_LM80_REG_MASK2, &val); | |
3684 | if (ret) { | |
3685 | goto out; | |
3686 | } | |
3687 | ||
3688 | /* start */ | |
3689 | val = 0x03; | |
3690 | ret = ssd_smbus_write_byte(dev, saddr, SSD_LM80_REG_CONFIG, &val); | |
3691 | if (ret) { | |
3692 | goto out; | |
3693 | } | |
3694 | ||
3695 | out: | |
3696 | return ret; | |
3697 | } | |
3698 | ||
3699 | static int ssd_lm80_enable_in(struct ssd_device *dev, uint8_t saddr, int idx) | |
3700 | { | |
3701 | uint8_t val = 0; | |
3702 | int ret = 0; | |
3703 | ||
3704 | if (idx >= SSD_LM80_IN_NR || idx < 0) { | |
3705 | return -EINVAL; | |
3706 | } | |
3707 | ||
3708 | ret = ssd_smbus_read_byte(dev, saddr, SSD_LM80_REG_MASK1, &val); | |
3709 | if (ret) { | |
3710 | goto out; | |
3711 | } | |
3712 | ||
3713 | val &= ~(1UL << (uint32_t)idx); | |
3714 | ||
3715 | ret = ssd_smbus_write_byte(dev, saddr, SSD_LM80_REG_MASK1, &val); | |
3716 | if (ret) { | |
3717 | goto out; | |
3718 | } | |
3719 | ||
3720 | out: | |
3721 | return ret; | |
3722 | } | |
3723 | ||
3724 | static int ssd_lm80_disable_in(struct ssd_device *dev, uint8_t saddr, int idx) | |
3725 | { | |
3726 | uint8_t val = 0; | |
3727 | int ret = 0; | |
3728 | ||
3729 | if (idx >= SSD_LM80_IN_NR || idx < 0) { | |
3730 | return -EINVAL; | |
3731 | } | |
3732 | ||
3733 | ret = ssd_smbus_read_byte(dev, saddr, SSD_LM80_REG_MASK1, &val); | |
3734 | if (ret) { | |
3735 | goto out; | |
3736 | } | |
3737 | ||
3738 | val |= (1UL << (uint32_t)idx); | |
3739 | ||
3740 | ret = ssd_smbus_write_byte(dev, saddr, SSD_LM80_REG_MASK1, &val); | |
3741 | if (ret) { | |
3742 | goto out; | |
3743 | } | |
3744 | ||
3745 | out: | |
3746 | return ret; | |
3747 | } | |
3748 | ||
3749 | static int ssd_lm80_read_temp(struct ssd_device *dev, uint8_t saddr, uint16_t *data) | |
3750 | { | |
3751 | uint16_t val = 0; | |
3752 | int ret; | |
3753 | ||
3754 | ret = ssd_smbus_read_word(dev, saddr, SSD_LM80_REG_TEMP, (uint8_t *)&val); | |
3755 | if (ret) { | |
3756 | return ret; | |
3757 | } | |
3758 | ||
3759 | *data = u16_swap(val); | |
3760 | ||
3761 | return 0; | |
3762 | } | |
da3355df SF |
3763 | static int ssd_generate_sensor_fault_log(struct ssd_device *dev, uint16_t event, uint8_t addr,uint32_t ret) |
3764 | { | |
3765 | uint32_t data; | |
3766 | data = ((ret & 0xffff) << 16) | (addr << 8) | addr; | |
3767 | ssd_gen_swlog(dev,event,data); | |
3768 | return 0; | |
3769 | } | |
361ebed5 HSDT |
3770 | static int ssd_lm80_check_event(struct ssd_device *dev, uint8_t saddr) |
3771 | { | |
3772 | uint32_t volt; | |
3773 | uint16_t val = 0, status; | |
3774 | uint8_t alarm1 = 0, alarm2 = 0; | |
1197134c KM |
3775 | uint32_t low, high; |
3776 | int i,j=0; | |
361ebed5 HSDT |
3777 | int ret = 0; |
3778 | ||
3779 | /* read interrupt status to clear interrupt */ | |
3780 | ret = ssd_smbus_read_byte(dev, saddr, SSD_LM80_REG_ALARM1, &alarm1); | |
3781 | if (ret) { | |
3782 | goto out; | |
3783 | } | |
3784 | ||
3785 | ret = ssd_smbus_read_byte(dev, saddr, SSD_LM80_REG_ALARM2, &alarm2); | |
3786 | if (ret) { | |
3787 | goto out; | |
3788 | } | |
3789 | ||
3790 | status = (uint16_t)alarm1 | ((uint16_t)alarm2 << 8); | |
3791 | ||
3792 | /* parse inetrrupt status */ | |
3793 | for (i=0; i<SSD_LM80_IN_NR; i++) { | |
3794 | if (!((status >> (uint32_t)i) & 0x1)) { | |
3795 | if (test_and_clear_bit(SSD_HWMON_LM80(i), &dev->hwmon)) { | |
3796 | /* enable INx irq */ | |
3797 | ret = ssd_lm80_enable_in(dev, saddr, i); | |
3798 | if (ret) { | |
3799 | goto out; | |
3800 | } | |
3801 | } | |
3802 | ||
3803 | continue; | |
3804 | } | |
3805 | ||
3806 | /* disable INx irq */ | |
3807 | ret = ssd_lm80_disable_in(dev, saddr, i); | |
3808 | if (ret) { | |
3809 | goto out; | |
3810 | } | |
3811 | ||
3812 | if (test_and_set_bit(SSD_HWMON_LM80(i), &dev->hwmon)) { | |
3813 | continue; | |
3814 | } | |
3815 | ||
1197134c KM |
3816 | high = (uint32_t)ssd_lm80_limit[i].high * (uint32_t)10; |
3817 | low = (uint32_t)ssd_lm80_limit[i].low * (uint32_t)10; | |
3818 | ||
3819 | for (j=0; j<3; j++) { | |
3820 | ret = ssd_smbus_read_word(dev, saddr, SSD_LM80_REG_IN(i), (uint8_t *)&val); | |
3821 | if (ret) { | |
3822 | goto out; | |
3823 | } | |
3824 | volt = SSD_LM80_CONVERT_VOLT(u16_swap(val)); | |
3825 | if ((volt>high) || (volt<=low)) { | |
3826 | if(j<2) { | |
3827 | msleep(SSD_LM80_CONV_INTERVAL); | |
3828 | } | |
3829 | } else { | |
3830 | break; | |
3831 | } | |
361ebed5 HSDT |
3832 | } |
3833 | ||
1197134c KM |
3834 | if (j<3) { |
3835 | continue; | |
3836 | } | |
361ebed5 HSDT |
3837 | |
3838 | switch (i) { | |
3839 | case SSD_LM80_IN_CAP: { | |
3840 | if (0 == volt) { | |
3841 | ssd_gen_swlog(dev, SSD_LOG_CAP_SHORT_CIRCUIT, 0); | |
3842 | } else { | |
3843 | ssd_gen_swlog(dev, SSD_LOG_CAP_VOLT_FAULT, SSD_PL_CAP_VOLT(volt)); | |
3844 | } | |
3845 | break; | |
3846 | } | |
3847 | ||
3848 | case SSD_LM80_IN_1V2: | |
3849 | case SSD_LM80_IN_1V2a: | |
3850 | case SSD_LM80_IN_1V5: | |
3851 | case SSD_LM80_IN_1V8: { | |
3852 | ssd_gen_swlog(dev, SSD_LOG_VOLT_STATUS, SSD_VOLT_LOG_DATA(i, 0, volt)); | |
3853 | break; | |
3854 | } | |
3855 | case SSD_LM80_IN_FPGA_3V3: | |
3856 | case SSD_LM80_IN_3V3: { | |
3857 | ssd_gen_swlog(dev, SSD_LOG_VOLT_STATUS, SSD_VOLT_LOG_DATA(i, 0, SSD_LM80_3V3_VOLT(volt))); | |
3858 | break; | |
3859 | } | |
3860 | default: | |
3861 | break; | |
3862 | } | |
3863 | } | |
3864 | ||
3865 | out: | |
3866 | if (ret) { | |
3867 | if (!test_and_set_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM80), &dev->hwmon)) { | |
da3355df | 3868 | ssd_generate_sensor_fault_log(dev, SSD_LOG_SENSOR_FAULT, (uint32_t)saddr,ret); |
361ebed5 HSDT |
3869 | } |
3870 | } else { | |
3871 | test_and_clear_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM80), &dev->hwmon); | |
3872 | } | |
3873 | return ret; | |
3874 | } | |
3875 | ||
da3355df | 3876 | |
361ebed5 HSDT |
3877 | static int ssd_init_sensor(struct ssd_device *dev) |
3878 | { | |
3879 | int ret = 0; | |
3880 | ||
3881 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_2) { | |
3882 | goto out; | |
3883 | } | |
3884 | ||
3885 | ret = ssd_init_lm75(dev, SSD_SENSOR_LM75_SADDRESS); | |
3886 | if (ret) { | |
3887 | hio_warn("%s: init lm75 failed\n", dev->name); | |
3888 | if (!test_and_set_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM75), &dev->hwmon)) { | |
da3355df | 3889 | ssd_generate_sensor_fault_log(dev, SSD_LOG_SENSOR_FAULT, SSD_SENSOR_LM75_SADDRESS,ret); |
361ebed5 HSDT |
3890 | } |
3891 | goto out; | |
3892 | } | |
3893 | ||
3894 | if (dev->hw_info.pcb_ver >= 'B' || dev->hw_info_ext.form_factor == SSD_FORM_FACTOR_HHHL) { | |
3895 | ret = ssd_init_lm80(dev, SSD_SENSOR_LM80_SADDRESS); | |
3896 | if (ret) { | |
3897 | hio_warn("%s: init lm80 failed\n", dev->name); | |
3898 | if (!test_and_set_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM80), &dev->hwmon)) { | |
da3355df | 3899 | ssd_generate_sensor_fault_log(dev, SSD_LOG_SENSOR_FAULT, SSD_SENSOR_LM80_SADDRESS,ret); |
361ebed5 HSDT |
3900 | } |
3901 | goto out; | |
3902 | } | |
3903 | } | |
3904 | ||
3905 | out: | |
3906 | /* skip error if not in standard mode */ | |
3907 | if (mode != SSD_DRV_MODE_STANDARD) { | |
3908 | ret = 0; | |
3909 | } | |
3910 | return ret; | |
3911 | } | |
3912 | ||
3913 | /* board volt */ | |
3914 | static int ssd_mon_boardvolt(struct ssd_device *dev) | |
3915 | { | |
3916 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_2) { | |
3917 | return 0; | |
3918 | } | |
3919 | ||
3920 | if (dev->hw_info_ext.form_factor == SSD_FORM_FACTOR_FHHL && dev->hw_info.pcb_ver < 'B') { | |
3921 | return 0; | |
3922 | } | |
3923 | ||
3924 | return ssd_lm80_check_event(dev, SSD_SENSOR_LM80_SADDRESS); | |
3925 | } | |
3926 | ||
3927 | /* temperature */ | |
3928 | static int ssd_mon_temp(struct ssd_device *dev) | |
3929 | { | |
3930 | int cur; | |
3931 | uint16_t val = 0; | |
3932 | int ret = 0; | |
3933 | ||
3934 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_2) { | |
3935 | return 0; | |
3936 | } | |
3937 | ||
3938 | if (dev->hw_info_ext.form_factor == SSD_FORM_FACTOR_FHHL && dev->hw_info.pcb_ver < 'B') { | |
3939 | return 0; | |
3940 | } | |
3941 | ||
3942 | /* inlet */ | |
3943 | ret = ssd_lm80_read_temp(dev, SSD_SENSOR_LM80_SADDRESS, &val); | |
3944 | if (ret) { | |
3945 | if (!test_and_set_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM80), &dev->hwmon)) { | |
da3355df | 3946 | ssd_generate_sensor_fault_log(dev, SSD_LOG_TEMP_SENSOR_EVENT, SSD_SENSOR_LM80_SADDRESS,ret); |
361ebed5 HSDT |
3947 | } |
3948 | goto out; | |
3949 | } | |
3950 | test_and_clear_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM80), &dev->hwmon); | |
3951 | ||
3952 | cur = SSD_SENSOR_CONVERT_TEMP(val); | |
3953 | if (cur >= SSD_INLET_OT_TEMP) { | |
3954 | if (!test_and_set_bit(SSD_HWMON_TEMP(SSD_TEMP_INLET), &dev->hwmon)) { | |
3955 | ssd_gen_swlog(dev, SSD_LOG_INLET_OVER_TEMP, (uint32_t)cur); | |
3956 | } | |
3957 | } else if(cur < SSD_INLET_OT_HYST) { | |
3958 | if (test_and_clear_bit(SSD_HWMON_TEMP(SSD_TEMP_INLET), &dev->hwmon)) { | |
3959 | ssd_gen_swlog(dev, SSD_LOG_INLET_NORMAL_TEMP, (uint32_t)cur); | |
3960 | } | |
3961 | } | |
3962 | ||
3963 | /* flash */ | |
3964 | ret = ssd_lm75_read(dev, SSD_SENSOR_LM75_SADDRESS, &val); | |
3965 | if (ret) { | |
3966 | if (!test_and_set_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM75), &dev->hwmon)) { | |
da3355df | 3967 | ssd_generate_sensor_fault_log(dev, SSD_LOG_TEMP_SENSOR_EVENT, SSD_SENSOR_LM75_SADDRESS,ret); |
361ebed5 HSDT |
3968 | } |
3969 | goto out; | |
3970 | } | |
3971 | test_and_clear_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM75), &dev->hwmon); | |
3972 | ||
3973 | cur = SSD_SENSOR_CONVERT_TEMP(val); | |
3974 | if (cur >= SSD_FLASH_OT_TEMP) { | |
3975 | if (!test_and_set_bit(SSD_HWMON_TEMP(SSD_TEMP_FLASH), &dev->hwmon)) { | |
3976 | ssd_gen_swlog(dev, SSD_LOG_FLASH_OVER_TEMP, (uint32_t)cur); | |
3977 | } | |
3978 | } else if(cur < SSD_FLASH_OT_HYST) { | |
3979 | if (test_and_clear_bit(SSD_HWMON_TEMP(SSD_TEMP_FLASH), &dev->hwmon)) { | |
3980 | ssd_gen_swlog(dev, SSD_LOG_FLASH_NORMAL_TEMP, (uint32_t)cur); | |
3981 | } | |
3982 | } | |
3983 | ||
3984 | out: | |
3985 | return ret; | |
3986 | } | |
3987 | ||
3988 | /* cmd tag */ | |
3989 | static inline void ssd_put_tag(struct ssd_device *dev, int tag) | |
3990 | { | |
3991 | test_and_clear_bit(tag, dev->tag_map); | |
3992 | wake_up(&dev->tag_wq); | |
3993 | } | |
3994 | ||
3995 | static inline int ssd_get_tag(struct ssd_device *dev, int wait) | |
3996 | { | |
3997 | int tag; | |
3998 | ||
3999 | find_tag: | |
4000 | while ((tag = find_first_zero_bit(dev->tag_map, dev->hw_info.cmd_fifo_sz)) >= atomic_read(&dev->queue_depth)) { | |
4001 | DEFINE_WAIT(__wait); | |
4002 | ||
4003 | if (!wait) { | |
4004 | return -1; | |
4005 | } | |
4006 | ||
4007 | prepare_to_wait_exclusive(&dev->tag_wq, &__wait, TASK_UNINTERRUPTIBLE); | |
4008 | schedule(); | |
4009 | ||
4010 | finish_wait(&dev->tag_wq, &__wait); | |
4011 | } | |
4012 | ||
4013 | if (test_and_set_bit(tag, dev->tag_map)) { | |
4014 | goto find_tag; | |
4015 | } | |
4016 | ||
4017 | return tag; | |
4018 | } | |
4019 | ||
4020 | static void ssd_barrier_put_tag(struct ssd_device *dev, int tag) | |
4021 | { | |
4022 | test_and_clear_bit(tag, dev->tag_map); | |
4023 | } | |
4024 | ||
4025 | static int ssd_barrier_get_tag(struct ssd_device *dev) | |
4026 | { | |
4027 | int tag = 0; | |
4028 | ||
4029 | if (test_and_set_bit(tag, dev->tag_map)) { | |
4030 | return -1; | |
4031 | } | |
4032 | ||
4033 | return tag; | |
4034 | } | |
4035 | ||
4036 | static void ssd_barrier_end(struct ssd_device *dev) | |
4037 | { | |
4038 | atomic_set(&dev->queue_depth, dev->hw_info.cmd_fifo_sz); | |
4039 | wake_up_all(&dev->tag_wq); | |
4040 | ||
4041 | mutex_unlock(&dev->barrier_mutex); | |
4042 | } | |
4043 | ||
4044 | static int ssd_barrier_start(struct ssd_device *dev) | |
4045 | { | |
4046 | int i; | |
4047 | ||
4048 | mutex_lock(&dev->barrier_mutex); | |
4049 | ||
4050 | atomic_set(&dev->queue_depth, 0); | |
4051 | ||
4052 | for (i=0; i<SSD_CMD_TIMEOUT; i++) { | |
4053 | if (find_first_bit(dev->tag_map, dev->hw_info.cmd_fifo_sz) >= dev->hw_info.cmd_fifo_sz) { | |
4054 | return 0; | |
4055 | } | |
4056 | ||
4057 | __set_current_state(TASK_INTERRUPTIBLE); | |
4058 | schedule_timeout(1); | |
4059 | } | |
4060 | ||
4061 | atomic_set(&dev->queue_depth, dev->hw_info.cmd_fifo_sz); | |
4062 | wake_up_all(&dev->tag_wq); | |
4063 | ||
4064 | mutex_unlock(&dev->barrier_mutex); | |
4065 | ||
4066 | return -EBUSY; | |
4067 | } | |
4068 | ||
4069 | static int ssd_busy(struct ssd_device *dev) | |
4070 | { | |
4071 | if (find_first_bit(dev->tag_map, dev->hw_info.cmd_fifo_sz) >= dev->hw_info.cmd_fifo_sz) { | |
4072 | return 0; | |
4073 | } | |
4074 | ||
4075 | return 1; | |
4076 | } | |
4077 | ||
4078 | static int ssd_wait_io(struct ssd_device *dev) | |
4079 | { | |
4080 | int i; | |
4081 | ||
4082 | for (i=0; i<SSD_CMD_TIMEOUT; i++) { | |
4083 | if (find_first_bit(dev->tag_map, dev->hw_info.cmd_fifo_sz) >= dev->hw_info.cmd_fifo_sz) { | |
4084 | return 0; | |
4085 | } | |
4086 | ||
4087 | __set_current_state(TASK_INTERRUPTIBLE); | |
4088 | schedule_timeout(1); | |
4089 | } | |
4090 | ||
4091 | return -EBUSY; | |
4092 | } | |
4093 | ||
4094 | #if 0 | |
4095 | static int ssd_in_barrier(struct ssd_device *dev) | |
4096 | { | |
4097 | return (0 == atomic_read(&dev->queue_depth)); | |
4098 | } | |
4099 | #endif | |
4100 | ||
4101 | static void ssd_cleanup_tag(struct ssd_device *dev) | |
4102 | { | |
4103 | kfree(dev->tag_map); | |
4104 | } | |
4105 | ||
4106 | static int ssd_init_tag(struct ssd_device *dev) | |
4107 | { | |
4108 | int nr_ulongs = ALIGN(dev->hw_info.cmd_fifo_sz, BITS_PER_LONG) / BITS_PER_LONG; | |
4109 | ||
4110 | mutex_init(&dev->barrier_mutex); | |
4111 | ||
4112 | atomic_set(&dev->queue_depth, dev->hw_info.cmd_fifo_sz); | |
4113 | ||
4114 | dev->tag_map = kmalloc(nr_ulongs * sizeof(unsigned long), GFP_ATOMIC); | |
4115 | if (!dev->tag_map) { | |
4116 | return -ENOMEM; | |
4117 | } | |
4118 | ||
4119 | memset(dev->tag_map, 0, nr_ulongs * sizeof(unsigned long)); | |
4120 | ||
4121 | init_waitqueue_head(&dev->tag_wq); | |
4122 | ||
4123 | return 0; | |
4124 | } | |
4125 | ||
4126 | /* io stat */ | |
4127 | static void ssd_end_io_acct(struct ssd_cmd *cmd) | |
4128 | { | |
4129 | struct ssd_device *dev = cmd->dev; | |
4130 | struct bio *bio = cmd->bio; | |
4131 | unsigned long dur = jiffies - cmd->start_time; | |
4132 | int rw = bio_data_dir(bio); | |
da3355df SF |
4133 | #if ((LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,0)) || (defined RHEL_MAJOR && RHEL_MAJOR == 6 && RHEL_MINOR >= 7)) |
4134 | #else | |
4135 | unsigned long flag; | |
4136 | #endif | |
4137 | ||
361ebed5 HSDT |
4138 | #if ((LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,0)) || (defined RHEL_MAJOR && RHEL_MAJOR == 6 && RHEL_MINOR >= 7)) |
4139 | int cpu = part_stat_lock(); | |
4140 | struct hd_struct *part = disk_map_sector_rcu(dev->gd, bio_start(bio)); | |
4141 | part_round_stats(cpu, part); | |
4142 | part_stat_add(cpu, part, ticks[rw], dur); | |
4143 | part_dec_in_flight(part, rw); | |
4144 | part_stat_unlock(); | |
4145 | #elif (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,27)) | |
4146 | int cpu = part_stat_lock(); | |
4147 | struct hd_struct *part = &dev->gd->part0; | |
4148 | part_round_stats(cpu, part); | |
4149 | part_stat_add(cpu, part, ticks[rw], dur); | |
da3355df SF |
4150 | |
4151 | spin_lock_irqsave(&dev->in_flight_lock,flag); | |
4152 | part->in_flight[rw]--; | |
4153 | spin_unlock_irqrestore(&dev->in_flight_lock,flag); | |
4154 | ||
361ebed5 | 4155 | part_stat_unlock(); |
da3355df | 4156 | |
361ebed5 HSDT |
4157 | #elif (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,14)) |
4158 | preempt_disable(); | |
4159 | disk_round_stats(dev->gd); | |
361ebed5 | 4160 | disk_stat_add(dev->gd, ticks[rw], dur); |
da3355df SF |
4161 | |
4162 | spin_lock_irqsave(&dev->in_flight_lock,flag); | |
4163 | dev->gd->in_flight--; | |
4164 | spin_unlock_irqrestore(&dev->in_flight_lock,flag); | |
4165 | ||
4166 | preempt_enable(); | |
4167 | ||
361ebed5 HSDT |
4168 | #else |
4169 | preempt_disable(); | |
4170 | disk_round_stats(dev->gd); | |
361ebed5 HSDT |
4171 | if (rw == WRITE) { |
4172 | disk_stat_add(dev->gd, write_ticks, dur); | |
4173 | } else { | |
4174 | disk_stat_add(dev->gd, read_ticks, dur); | |
4175 | } | |
da3355df SF |
4176 | spin_lock_irqsave(&dev->in_flight_lock,flag); |
4177 | dev->gd->in_flight--; | |
4178 | spin_unlock_irqrestore(&dev->in_flight_lock,flag); | |
4179 | ||
4180 | preempt_enable(); | |
4181 | ||
361ebed5 HSDT |
4182 | #endif |
4183 | } | |
4184 | ||
4185 | static void ssd_start_io_acct(struct ssd_cmd *cmd) | |
4186 | { | |
4187 | struct ssd_device *dev = cmd->dev; | |
4188 | struct bio *bio = cmd->bio; | |
4189 | int rw = bio_data_dir(bio); | |
da3355df SF |
4190 | #if ((LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,0)) || (defined RHEL_MAJOR && RHEL_MAJOR == 6 && RHEL_MINOR >= 7)) |
4191 | #else | |
4192 | unsigned long flag; | |
4193 | #endif | |
361ebed5 HSDT |
4194 | |
4195 | #if ((LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,0)) || (defined RHEL_MAJOR && RHEL_MAJOR == 6 && RHEL_MINOR >= 7)) | |
4196 | int cpu = part_stat_lock(); | |
4197 | struct hd_struct *part = disk_map_sector_rcu(dev->gd, bio_start(bio)); | |
4198 | part_round_stats(cpu, part); | |
4199 | part_stat_inc(cpu, part, ios[rw]); | |
4200 | part_stat_add(cpu, part, sectors[rw], bio_sectors(bio)); | |
4201 | part_inc_in_flight(part, rw); | |
4202 | part_stat_unlock(); | |
4203 | #elif (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,27)) | |
4204 | int cpu = part_stat_lock(); | |
4205 | struct hd_struct *part = &dev->gd->part0; | |
4206 | part_round_stats(cpu, part); | |
4207 | part_stat_inc(cpu, part, ios[rw]); | |
4208 | part_stat_add(cpu, part, sectors[rw], bio_sectors(bio)); | |
da3355df SF |
4209 | |
4210 | spin_lock_irqsave(&dev->in_flight_lock,flag); | |
4211 | part->in_flight[rw]++; | |
4212 | spin_unlock_irqrestore(&dev->in_flight_lock,flag); | |
4213 | ||
361ebed5 | 4214 | part_stat_unlock(); |
da3355df | 4215 | |
361ebed5 HSDT |
4216 | #elif (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,14)) |
4217 | preempt_disable(); | |
4218 | disk_round_stats(dev->gd); | |
361ebed5 HSDT |
4219 | disk_stat_inc(dev->gd, ios[rw]); |
4220 | disk_stat_add(dev->gd, sectors[rw], bio_sectors(bio)); | |
da3355df SF |
4221 | |
4222 | spin_lock_irqsave(&dev->in_flight_lock,flag); | |
4223 | dev->gd->in_flight++; | |
4224 | spin_unlock_irqrestore(&dev->in_flight_lock,flag); | |
4225 | ||
4226 | preempt_enable(); | |
361ebed5 HSDT |
4227 | #else |
4228 | preempt_disable(); | |
4229 | disk_round_stats(dev->gd); | |
361ebed5 HSDT |
4230 | if (rw == WRITE) { |
4231 | disk_stat_inc(dev->gd, writes); | |
4232 | disk_stat_add(dev->gd, write_sectors, bio_sectors(bio)); | |
4233 | } else { | |
4234 | disk_stat_inc(dev->gd, reads); | |
4235 | disk_stat_add(dev->gd, read_sectors, bio_sectors(bio)); | |
4236 | } | |
da3355df SF |
4237 | |
4238 | spin_lock_irqsave(&dev->in_flight_lock,flag); | |
4239 | dev->gd->in_flight++; | |
4240 | spin_unlock_irqrestore(&dev->in_flight_lock,flag); | |
4241 | ||
4242 | preempt_enable(); | |
4243 | ||
361ebed5 HSDT |
4244 | #endif |
4245 | ||
4246 | cmd->start_time = jiffies; | |
4247 | } | |
4248 | ||
4249 | /* io */ | |
4250 | static void ssd_queue_bio(struct ssd_device *dev, struct bio *bio) | |
4251 | { | |
4252 | spin_lock(&dev->sendq_lock); | |
4253 | ssd_blist_add(&dev->sendq, bio); | |
4254 | spin_unlock(&dev->sendq_lock); | |
4255 | ||
4256 | atomic_inc(&dev->in_sendq); | |
4257 | wake_up(&dev->send_waitq); | |
4258 | } | |
4259 | ||
4260 | static inline void ssd_end_request(struct ssd_cmd *cmd) | |
4261 | { | |
4262 | struct ssd_device *dev = cmd->dev; | |
4263 | struct bio *bio = cmd->bio; | |
4264 | int errors = cmd->errors; | |
4265 | int tag = cmd->tag; | |
4266 | ||
4267 | if (bio) { | |
1197134c | 4268 | if (!ssd_bio_has_discard(bio)) { |
361ebed5 HSDT |
4269 | ssd_end_io_acct(cmd); |
4270 | if (!cmd->flag) { | |
4271 | pci_unmap_sg(dev->pdev, cmd->sgl, cmd->nsegs, | |
4272 | bio_data_dir(bio) == READ ? PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE); | |
4273 | } | |
4274 | } | |
361ebed5 HSDT |
4275 | |
4276 | cmd->bio = NULL; | |
4277 | ssd_put_tag(dev, tag); | |
4278 | ||
4279 | if (SSD_INT_MSIX == dev->int_mode || tag < 16 || errors) { | |
1197134c | 4280 | ssd_bio_endio(bio, errors); |
361ebed5 HSDT |
4281 | } else /* if (bio->bi_idx >= bio->bi_vcnt)*/ { |
4282 | spin_lock(&dev->doneq_lock); | |
4283 | ssd_blist_add(&dev->doneq, bio); | |
4284 | spin_unlock(&dev->doneq_lock); | |
4285 | ||
4286 | atomic_inc(&dev->in_doneq); | |
4287 | wake_up(&dev->done_waitq); | |
4288 | } | |
4289 | } else { | |
4290 | if (cmd->waiting) { | |
4291 | complete(cmd->waiting); | |
4292 | } | |
4293 | } | |
4294 | } | |
4295 | ||
4296 | static void ssd_end_timeout_request(struct ssd_cmd *cmd) | |
4297 | { | |
4298 | struct ssd_device *dev = cmd->dev; | |
4299 | struct ssd_rw_msg *msg = (struct ssd_rw_msg *)cmd->msg; | |
4300 | int i; | |
4301 | ||
4302 | for (i=0; i<dev->nr_queue; i++) { | |
b44043bd | 4303 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0)) |
361ebed5 | 4304 | disable_irq(dev->entry[i].vector); |
b44043bd SF |
4305 | #else |
4306 | disable_irq(pci_irq_vector(dev->pdev, i)); | |
4307 | #endif | |
361ebed5 HSDT |
4308 | } |
4309 | ||
4310 | atomic_inc(&dev->tocnt); | |
4311 | //if (cmd->bio) { | |
4312 | hio_err("%s: cmd timeout: tag %d fun %#x\n", dev->name, msg->tag, msg->fun); | |
4313 | cmd->errors = -ETIMEDOUT; | |
4314 | ssd_end_request(cmd); | |
4315 | //} | |
4316 | ||
4317 | for (i=0; i<dev->nr_queue; i++) { | |
b44043bd | 4318 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0)) |
361ebed5 | 4319 | enable_irq(dev->entry[i].vector); |
b44043bd SF |
4320 | #else |
4321 | enable_irq(pci_irq_vector(dev->pdev, i)); | |
4322 | #endif | |
361ebed5 HSDT |
4323 | } |
4324 | ||
4325 | /* alarm led */ | |
4326 | ssd_set_alarm(dev); | |
4327 | } | |
4328 | ||
4329 | /* cmd timer */ | |
4330 | static void ssd_cmd_add_timer(struct ssd_cmd *cmd, int timeout, void (*complt)(struct ssd_cmd *)) | |
4331 | { | |
4332 | init_timer(&cmd->cmd_timer); | |
4333 | ||
4334 | cmd->cmd_timer.data = (unsigned long)cmd; | |
4335 | cmd->cmd_timer.expires = jiffies + timeout; | |
4336 | cmd->cmd_timer.function = (void (*)(unsigned long)) complt; | |
4337 | ||
4338 | add_timer(&cmd->cmd_timer); | |
4339 | } | |
4340 | ||
4341 | static int ssd_cmd_del_timer(struct ssd_cmd *cmd) | |
4342 | { | |
4343 | return del_timer(&cmd->cmd_timer); | |
4344 | } | |
4345 | ||
4346 | static void ssd_add_timer(struct timer_list *timer, int timeout, void (*complt)(void *), void *data) | |
4347 | { | |
4348 | init_timer(timer); | |
4349 | ||
4350 | timer->data = (unsigned long)data; | |
4351 | timer->expires = jiffies + timeout; | |
4352 | timer->function = (void (*)(unsigned long)) complt; | |
4353 | ||
4354 | add_timer(timer); | |
4355 | } | |
4356 | ||
4357 | static int ssd_del_timer(struct timer_list *timer) | |
4358 | { | |
4359 | return del_timer(timer); | |
4360 | } | |
4361 | ||
4362 | static void ssd_cmd_timeout(struct ssd_cmd *cmd) | |
4363 | { | |
4364 | struct ssd_device *dev = cmd->dev; | |
4365 | uint32_t msg = *(uint32_t *)cmd->msg; | |
4366 | ||
4367 | ssd_end_timeout_request(cmd); | |
4368 | ||
4369 | ssd_gen_swlog(dev, SSD_LOG_TIMEOUT, msg); | |
4370 | } | |
4371 | ||
4372 | ||
4373 | static void __ssd_done(unsigned long data) | |
4374 | { | |
4375 | struct ssd_cmd *cmd; | |
4376 | LIST_HEAD(localq); | |
4377 | ||
4378 | local_irq_disable(); | |
4379 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,13,0)) | |
4380 | list_splice_init(&__get_cpu_var(ssd_doneq), &localq); | |
4381 | #else | |
4382 | list_splice_init(this_cpu_ptr(&ssd_doneq), &localq); | |
4383 | #endif | |
4384 | local_irq_enable(); | |
4385 | ||
4386 | while (!list_empty(&localq)) { | |
4387 | cmd = list_entry(localq.next, struct ssd_cmd, list); | |
4388 | list_del_init(&cmd->list); | |
4389 | ||
4390 | ssd_end_request(cmd); | |
4391 | } | |
4392 | } | |
4393 | ||
4394 | static void __ssd_done_db(unsigned long data) | |
4395 | { | |
4396 | struct ssd_cmd *cmd; | |
4397 | struct ssd_device *dev; | |
4398 | struct bio *bio; | |
4399 | LIST_HEAD(localq); | |
4400 | ||
4401 | local_irq_disable(); | |
4402 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,13,0)) | |
4403 | list_splice_init(&__get_cpu_var(ssd_doneq), &localq); | |
4404 | #else | |
4405 | list_splice_init(this_cpu_ptr(&ssd_doneq), &localq); | |
4406 | #endif | |
4407 | local_irq_enable(); | |
4408 | ||
4409 | while (!list_empty(&localq)) { | |
4410 | cmd = list_entry(localq.next, struct ssd_cmd, list); | |
4411 | list_del_init(&cmd->list); | |
4412 | ||
4413 | dev = (struct ssd_device *)cmd->dev; | |
4414 | bio = cmd->bio; | |
4415 | ||
4416 | if (bio) { | |
4417 | sector_t off = dev->db_info.data.loc.off; | |
4418 | uint32_t len = dev->db_info.data.loc.len; | |
4419 | ||
4420 | switch (dev->db_info.type) { | |
4421 | case SSD_DEBUG_READ_ERR: | |
4422 | if (bio_data_dir(bio) == READ && | |
4423 | !((off + len) <= bio_start(bio) || off >= (bio_start(bio) + bio_sectors(bio)))) { | |
4424 | cmd->errors = -EIO; | |
4425 | } | |
4426 | break; | |
4427 | case SSD_DEBUG_WRITE_ERR: | |
4428 | if (bio_data_dir(bio) == WRITE && | |
4429 | !((off + len) <= bio_start(bio) || off >= (bio_start(bio) + bio_sectors(bio)))) { | |
4430 | cmd->errors = -EROFS; | |
4431 | } | |
4432 | break; | |
4433 | case SSD_DEBUG_RW_ERR: | |
4434 | if (!((off + len) <= bio_start(bio) || off >= (bio_start(bio) + bio_sectors(bio)))) { | |
4435 | if (bio_data_dir(bio) == READ) { | |
4436 | cmd->errors = -EIO; | |
4437 | } else { | |
4438 | cmd->errors = -EROFS; | |
4439 | } | |
4440 | } | |
4441 | break; | |
4442 | default: | |
4443 | break; | |
4444 | } | |
4445 | } | |
4446 | ||
4447 | ssd_end_request(cmd); | |
4448 | } | |
4449 | } | |
4450 | ||
4451 | static inline void ssd_done_bh(struct ssd_cmd *cmd) | |
4452 | { | |
4453 | unsigned long flags = 0; | |
4454 | ||
4455 | if (unlikely(!ssd_cmd_del_timer(cmd))) { | |
4456 | struct ssd_device *dev = cmd->dev; | |
4457 | struct ssd_rw_msg *msg = (struct ssd_rw_msg *)cmd->msg; | |
4458 | hio_err("%s: unknown cmd: tag %d fun %#x\n", dev->name, msg->tag, msg->fun); | |
4459 | ||
4460 | /* alarm led */ | |
4461 | ssd_set_alarm(dev); | |
4462 | return; | |
4463 | } | |
4464 | ||
4465 | local_irq_save(flags); | |
4466 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,13,0)) | |
4467 | list_add_tail(&cmd->list, &__get_cpu_var(ssd_doneq)); | |
4468 | tasklet_hi_schedule(&__get_cpu_var(ssd_tasklet)); | |
4469 | #else | |
4470 | list_add_tail(&cmd->list, this_cpu_ptr(&ssd_doneq)); | |
4471 | tasklet_hi_schedule(this_cpu_ptr(&ssd_tasklet)); | |
4472 | #endif | |
4473 | local_irq_restore(flags); | |
4474 | ||
4475 | return; | |
4476 | } | |
4477 | ||
4478 | static inline void ssd_done(struct ssd_cmd *cmd) | |
4479 | { | |
4480 | if (unlikely(!ssd_cmd_del_timer(cmd))) { | |
4481 | struct ssd_device *dev = cmd->dev; | |
4482 | struct ssd_rw_msg *msg = (struct ssd_rw_msg *)cmd->msg; | |
4483 | hio_err("%s: unknown cmd: tag %d fun %#x\n", dev->name, msg->tag, msg->fun); | |
4484 | ||
4485 | /* alarm led */ | |
4486 | ssd_set_alarm(dev); | |
4487 | return; | |
4488 | } | |
4489 | ||
4490 | ssd_end_request(cmd); | |
4491 | ||
4492 | return; | |
4493 | } | |
4494 | ||
4495 | static inline void ssd_dispatch_cmd(struct ssd_cmd *cmd) | |
4496 | { | |
4497 | struct ssd_device *dev = (struct ssd_device *)cmd->dev; | |
4498 | ||
4499 | ssd_cmd_add_timer(cmd, SSD_CMD_TIMEOUT, ssd_cmd_timeout); | |
4500 | ||
4501 | spin_lock(&dev->cmd_lock); | |
4502 | ssd_reg_write(dev->ctrlp + SSD_REQ_FIFO_REG, cmd->msg_dma); | |
4503 | spin_unlock(&dev->cmd_lock); | |
4504 | } | |
4505 | ||
4506 | static inline void ssd_send_cmd(struct ssd_cmd *cmd) | |
4507 | { | |
4508 | struct ssd_device *dev = (struct ssd_device *)cmd->dev; | |
4509 | ||
4510 | ssd_cmd_add_timer(cmd, SSD_CMD_TIMEOUT, ssd_cmd_timeout); | |
4511 | ||
4512 | ssd_reg32_write(dev->ctrlp + SSD_REQ_FIFO_REG, ((uint32_t)cmd->tag | ((uint32_t)cmd->nsegs << 16))); | |
4513 | } | |
4514 | ||
4515 | static inline void ssd_send_cmd_db(struct ssd_cmd *cmd) | |
4516 | { | |
4517 | struct ssd_device *dev = (struct ssd_device *)cmd->dev; | |
4518 | struct bio *bio = cmd->bio; | |
4519 | ||
4520 | ssd_cmd_add_timer(cmd, SSD_CMD_TIMEOUT, ssd_cmd_timeout); | |
4521 | ||
4522 | if (bio) { | |
4523 | switch (dev->db_info.type) { | |
4524 | case SSD_DEBUG_READ_TO: | |
4525 | if (bio_data_dir(bio) == READ) { | |
4526 | return; | |
4527 | } | |
4528 | break; | |
4529 | case SSD_DEBUG_WRITE_TO: | |
4530 | if (bio_data_dir(bio) == WRITE) { | |
4531 | return; | |
4532 | } | |
4533 | break; | |
4534 | case SSD_DEBUG_RW_TO: | |
4535 | return; | |
4536 | break; | |
4537 | default: | |
4538 | break; | |
4539 | } | |
4540 | } | |
4541 | ||
4542 | ssd_reg32_write(dev->ctrlp + SSD_REQ_FIFO_REG, ((uint32_t)cmd->tag | ((uint32_t)cmd->nsegs << 16))); | |
4543 | } | |
4544 | ||
4545 | ||
4546 | /* fixed for BIOVEC_PHYS_MERGEABLE */ | |
4547 | #ifdef SSD_BIOVEC_PHYS_MERGEABLE_FIXED | |
4548 | #include <linux/bio.h> | |
4549 | #include <linux/io.h> | |
4550 | #include <xen/page.h> | |
4551 | ||
4552 | static bool xen_biovec_phys_mergeable_fixed(const struct bio_vec *vec1, | |
4553 | const struct bio_vec *vec2) | |
4554 | { | |
4555 | unsigned long mfn1 = pfn_to_mfn(page_to_pfn(vec1->bv_page)); | |
4556 | unsigned long mfn2 = pfn_to_mfn(page_to_pfn(vec2->bv_page)); | |
4557 | ||
4558 | return __BIOVEC_PHYS_MERGEABLE(vec1, vec2) && | |
4559 | ((mfn1 == mfn2) || ((mfn1+1) == mfn2)); | |
4560 | } | |
4561 | ||
4562 | #ifdef BIOVEC_PHYS_MERGEABLE | |
4563 | #undef BIOVEC_PHYS_MERGEABLE | |
4564 | #endif | |
4565 | #define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \ | |
4566 | (__BIOVEC_PHYS_MERGEABLE(vec1, vec2) && \ | |
4567 | (!xen_domain() || xen_biovec_phys_mergeable_fixed(vec1, vec2))) | |
4568 | ||
4569 | #endif | |
4570 | ||
4571 | static inline int ssd_bio_map_sg(struct ssd_device *dev, struct bio *bio, struct scatterlist *sgl) | |
4572 | { | |
4573 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,14,0)) | |
4574 | struct bio_vec *bvec, *bvprv = NULL; | |
4575 | struct scatterlist *sg = NULL; | |
4576 | int i = 0, nsegs = 0; | |
4577 | ||
4578 | #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)) | |
4579 | sg_init_table(sgl, dev->hw_info.cmd_max_sg); | |
4580 | #endif | |
4581 | ||
4582 | /* | |
4583 | * for each segment in bio | |
4584 | */ | |
4585 | bio_for_each_segment(bvec, bio, i) { | |
4586 | if (bvprv && BIOVEC_PHYS_MERGEABLE(bvprv, bvec)) { | |
4587 | sg->length += bvec->bv_len; | |
4588 | } else { | |
4589 | if (unlikely(nsegs >= (int)dev->hw_info.cmd_max_sg)) { | |
4590 | break; | |
4591 | } | |
4592 | ||
4593 | sg = sg ? (sg + 1) : sgl; | |
4594 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)) | |
4595 | sg_set_page(sg, bvec->bv_page, bvec->bv_len, bvec->bv_offset); | |
4596 | #else | |
4597 | sg->page = bvec->bv_page; | |
4598 | sg->length = bvec->bv_len; | |
4599 | sg->offset = bvec->bv_offset; | |
4600 | #endif | |
4601 | nsegs++; | |
4602 | } | |
4603 | bvprv = bvec; | |
4604 | } | |
4605 | ||
4606 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)) | |
4607 | if (sg) { | |
4608 | sg_mark_end(sg); | |
4609 | } | |
4610 | #endif | |
4611 | ||
4612 | bio->bi_idx = i; | |
4613 | ||
4614 | return nsegs; | |
4615 | #else | |
4616 | struct bio_vec bvec, bvprv; | |
4617 | struct bvec_iter iter; | |
4618 | struct scatterlist *sg = NULL; | |
4619 | int nsegs = 0; | |
4620 | int first = 1; | |
4621 | ||
4622 | sg_init_table(sgl, dev->hw_info.cmd_max_sg); | |
4623 | ||
4624 | /* | |
4625 | * for each segment in bio | |
4626 | */ | |
4627 | bio_for_each_segment(bvec, bio, iter) { | |
4628 | if (!first && BIOVEC_PHYS_MERGEABLE(&bvprv, &bvec)) { | |
4629 | sg->length += bvec.bv_len; | |
4630 | } else { | |
4631 | if (unlikely(nsegs >= (int)dev->hw_info.cmd_max_sg)) { | |
4632 | break; | |
4633 | } | |
4634 | ||
4635 | sg = sg ? (sg + 1) : sgl; | |
4636 | ||
4637 | sg_set_page(sg, bvec.bv_page, bvec.bv_len, bvec.bv_offset); | |
4638 | ||
4639 | nsegs++; | |
4640 | first = 0; | |
4641 | } | |
4642 | bvprv = bvec; | |
4643 | } | |
4644 | ||
4645 | if (sg) { | |
4646 | sg_mark_end(sg); | |
4647 | } | |
4648 | ||
4649 | return nsegs; | |
4650 | #endif | |
4651 | } | |
4652 | ||
4653 | ||
4654 | static int __ssd_submit_pbio(struct ssd_device *dev, struct bio *bio, int wait) | |
4655 | { | |
4656 | struct ssd_cmd *cmd; | |
4657 | struct ssd_rw_msg *msg; | |
4658 | struct ssd_sg_entry *sge; | |
4659 | sector_t block = bio_start(bio); | |
4660 | int tag; | |
4661 | int i; | |
4662 | ||
4663 | tag = ssd_get_tag(dev, wait); | |
4664 | if (tag < 0) { | |
4665 | return -EBUSY; | |
4666 | } | |
4667 | ||
4668 | cmd = &dev->cmd[tag]; | |
4669 | cmd->bio = bio; | |
4670 | cmd->flag = 1; | |
4671 | ||
4672 | msg = (struct ssd_rw_msg *)cmd->msg; | |
4673 | ||
1197134c | 4674 | if (ssd_bio_has_discard(bio)) { |
361ebed5 HSDT |
4675 | unsigned int length = bio_sectors(bio); |
4676 | ||
4677 | //printk(KERN_WARNING "%s: discard len %u, block %llu\n", dev->name, bio_sectors(bio), block); | |
4678 | msg->tag = tag; | |
4679 | msg->fun = SSD_FUNC_TRIM; | |
4680 | ||
4681 | sge = msg->sge; | |
4682 | for (i=0; i<(dev->hw_info.cmd_max_sg); i++) { | |
4683 | sge->block = block; | |
4684 | sge->length = (length >= dev->hw_info.sg_max_sec) ? dev->hw_info.sg_max_sec : length; | |
4685 | sge->buf = 0; | |
4686 | ||
4687 | block += sge->length; | |
4688 | length -= sge->length; | |
4689 | sge++; | |
4690 | ||
4691 | if (length <= 0) { | |
1197134c | 4692 | ++i; |
361ebed5 HSDT |
4693 | break; |
4694 | } | |
4695 | } | |
1197134c | 4696 | msg->nsegs = cmd->nsegs = i; |
361ebed5 HSDT |
4697 | |
4698 | dev->scmd(cmd); | |
4699 | return 0; | |
4700 | } | |
361ebed5 HSDT |
4701 | |
4702 | //msg->nsegs = cmd->nsegs = ssd_bio_map_sg(dev, bio, sgl); | |
4703 | msg->nsegs = cmd->nsegs = bio->bi_vcnt; | |
4704 | ||
4705 | //xx | |
4706 | if (bio_data_dir(bio) == READ) { | |
4707 | msg->fun = SSD_FUNC_READ; | |
4708 | msg->flag = 0; | |
4709 | } else { | |
4710 | msg->fun = SSD_FUNC_WRITE; | |
4711 | msg->flag = dev->wmode; | |
4712 | } | |
4713 | ||
4714 | sge = msg->sge; | |
4715 | for (i=0; i<bio->bi_vcnt; i++) { | |
4716 | sge->block = block; | |
4717 | sge->length = bio->bi_io_vec[i].bv_len >> 9; | |
4718 | sge->buf = (uint64_t)((void *)bio->bi_io_vec[i].bv_page + bio->bi_io_vec[i].bv_offset); | |
4719 | ||
4720 | block += sge->length; | |
4721 | sge++; | |
4722 | } | |
4723 | ||
4724 | msg->tag = tag; | |
4725 | ||
4726 | #ifdef SSD_OT_PROTECT | |
4727 | if (unlikely(dev->ot_delay > 0 && dev->ot_protect != 0)) { | |
4728 | msleep_interruptible(dev->ot_delay); | |
4729 | } | |
4730 | #endif | |
4731 | ||
4732 | ssd_start_io_acct(cmd); | |
4733 | dev->scmd(cmd); | |
4734 | ||
4735 | return 0; | |
4736 | } | |
4737 | ||
4738 | static inline int ssd_submit_bio(struct ssd_device *dev, struct bio *bio, int wait) | |
4739 | { | |
4740 | struct ssd_cmd *cmd; | |
4741 | struct ssd_rw_msg *msg; | |
4742 | struct ssd_sg_entry *sge; | |
4743 | struct scatterlist *sgl; | |
4744 | sector_t block = bio_start(bio); | |
4745 | int tag; | |
4746 | int i; | |
4747 | ||
4748 | tag = ssd_get_tag(dev, wait); | |
4749 | if (tag < 0) { | |
4750 | return -EBUSY; | |
4751 | } | |
4752 | ||
4753 | cmd = &dev->cmd[tag]; | |
4754 | cmd->bio = bio; | |
4755 | cmd->flag = 0; | |
4756 | ||
4757 | msg = (struct ssd_rw_msg *)cmd->msg; | |
4758 | ||
4759 | sgl = cmd->sgl; | |
4760 | ||
1197134c | 4761 | if (ssd_bio_has_discard(bio)) { |
361ebed5 HSDT |
4762 | unsigned int length = bio_sectors(bio); |
4763 | ||
4764 | //printk(KERN_WARNING "%s: discard len %u, block %llu\n", dev->name, bio_sectors(bio), block); | |
4765 | msg->tag = tag; | |
4766 | msg->fun = SSD_FUNC_TRIM; | |
4767 | ||
4768 | sge = msg->sge; | |
4769 | for (i=0; i<(dev->hw_info.cmd_max_sg); i++) { | |
4770 | sge->block = block; | |
4771 | sge->length = (length >= dev->hw_info.sg_max_sec) ? dev->hw_info.sg_max_sec : length; | |
4772 | sge->buf = 0; | |
4773 | ||
4774 | block += sge->length; | |
4775 | length -= sge->length; | |
4776 | sge++; | |
4777 | ||
4778 | if (length <= 0) { | |
1197134c | 4779 | ++i; |
361ebed5 HSDT |
4780 | break; |
4781 | } | |
4782 | } | |
1197134c | 4783 | msg->nsegs = cmd->nsegs = i; |
361ebed5 HSDT |
4784 | |
4785 | dev->scmd(cmd); | |
4786 | return 0; | |
4787 | } | |
361ebed5 HSDT |
4788 | |
4789 | msg->nsegs = cmd->nsegs = ssd_bio_map_sg(dev, bio, sgl); | |
4790 | ||
4791 | //xx | |
4792 | if (bio_data_dir(bio) == READ) { | |
4793 | msg->fun = SSD_FUNC_READ; | |
4794 | msg->flag = 0; | |
4795 | pci_map_sg(dev->pdev, sgl, cmd->nsegs, PCI_DMA_FROMDEVICE); | |
4796 | } else { | |
4797 | msg->fun = SSD_FUNC_WRITE; | |
4798 | msg->flag = dev->wmode; | |
4799 | pci_map_sg(dev->pdev, sgl, cmd->nsegs, PCI_DMA_TODEVICE); | |
4800 | } | |
4801 | ||
4802 | sge = msg->sge; | |
4803 | for (i=0; i<cmd->nsegs; i++) { | |
4804 | sge->block = block; | |
4805 | sge->length = sg_dma_len(sgl) >> 9; | |
4806 | sge->buf = sg_dma_address(sgl); | |
4807 | ||
4808 | block += sge->length; | |
4809 | sgl++; | |
4810 | sge++; | |
4811 | } | |
4812 | ||
4813 | msg->tag = tag; | |
4814 | ||
4815 | #ifdef SSD_OT_PROTECT | |
4816 | if (unlikely(dev->ot_delay > 0 && dev->ot_protect != 0)) { | |
4817 | msleep_interruptible(dev->ot_delay); | |
4818 | } | |
4819 | #endif | |
4820 | ||
4821 | ssd_start_io_acct(cmd); | |
4822 | dev->scmd(cmd); | |
4823 | ||
4824 | return 0; | |
4825 | } | |
4826 | ||
4827 | /* threads */ | |
4828 | static int ssd_done_thread(void *data) | |
4829 | { | |
4830 | struct ssd_device *dev; | |
4831 | struct bio *bio; | |
4832 | struct bio *next; | |
361ebed5 HSDT |
4833 | |
4834 | if (!data) { | |
4835 | return -EINVAL; | |
4836 | } | |
4837 | dev = data; | |
4838 | ||
1197134c | 4839 | current->flags |= PF_NOFREEZE; |
361ebed5 HSDT |
4840 | //set_user_nice(current, -5); |
4841 | ||
4842 | while (!kthread_should_stop()) { | |
4843 | wait_event_interruptible(dev->done_waitq, (atomic_read(&dev->in_doneq) || kthread_should_stop())); | |
4844 | ||
4845 | while (atomic_read(&dev->in_doneq)) { | |
4846 | if (threaded_irq) { | |
4847 | spin_lock(&dev->doneq_lock); | |
4848 | bio = ssd_blist_get(&dev->doneq); | |
4849 | spin_unlock(&dev->doneq_lock); | |
4850 | } else { | |
4851 | spin_lock_irq(&dev->doneq_lock); | |
4852 | bio = ssd_blist_get(&dev->doneq); | |
4853 | spin_unlock_irq(&dev->doneq_lock); | |
4854 | } | |
4855 | ||
4856 | while (bio) { | |
4857 | next = bio->bi_next; | |
4858 | bio->bi_next = NULL; | |
1197134c | 4859 | ssd_bio_endio(bio, 0); |
361ebed5 HSDT |
4860 | atomic_dec(&dev->in_doneq); |
4861 | bio = next; | |
4862 | } | |
4863 | ||
4864 | cond_resched(); | |
4865 | ||
4866 | #ifdef SSD_ESCAPE_IRQ | |
4867 | if (unlikely(smp_processor_id() == dev->irq_cpu)) { | |
4868 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28)) | |
aa14a110 | 4869 | cpumask_var_t new_mask; |
1197134c KM |
4870 | if (alloc_cpumask_var(&new_mask, GFP_ATOMIC)) { |
4871 | cpumask_setall(new_mask); | |
4872 | cpumask_clear_cpu(dev->irq_cpu, new_mask); | |
4873 | set_cpus_allowed_ptr(current, new_mask); | |
4874 | free_cpumask_var(new_mask); | |
4875 | } | |
361ebed5 | 4876 | #else |
aa14a110 | 4877 | cpumask_t new_mask; |
361ebed5 HSDT |
4878 | cpus_setall(new_mask); |
4879 | cpu_clear(dev->irq_cpu, new_mask); | |
4880 | set_cpus_allowed(current, new_mask); | |
4881 | #endif | |
4882 | } | |
4883 | #endif | |
4884 | } | |
4885 | } | |
4886 | return 0; | |
4887 | } | |
4888 | ||
4889 | static int ssd_send_thread(void *data) | |
4890 | { | |
4891 | struct ssd_device *dev; | |
4892 | struct bio *bio; | |
4893 | struct bio *next; | |
361ebed5 HSDT |
4894 | |
4895 | if (!data) { | |
4896 | return -EINVAL; | |
4897 | } | |
4898 | dev = data; | |
4899 | ||
1197134c | 4900 | current->flags |= PF_NOFREEZE; |
361ebed5 HSDT |
4901 | //set_user_nice(current, -5); |
4902 | ||
4903 | while (!kthread_should_stop()) { | |
4904 | wait_event_interruptible(dev->send_waitq, (atomic_read(&dev->in_sendq) || kthread_should_stop())); | |
4905 | ||
4906 | while (atomic_read(&dev->in_sendq)) { | |
4907 | spin_lock(&dev->sendq_lock); | |
4908 | bio = ssd_blist_get(&dev->sendq); | |
4909 | spin_unlock(&dev->sendq_lock); | |
4910 | ||
4911 | while (bio) { | |
4912 | next = bio->bi_next; | |
4913 | bio->bi_next = NULL; | |
4914 | #ifdef SSD_QUEUE_PBIO | |
4915 | if (test_and_clear_bit(BIO_SSD_PBIO, &bio->bi_flags)) { | |
4916 | __ssd_submit_pbio(dev, bio, 1); | |
4917 | } else { | |
4918 | ssd_submit_bio(dev, bio, 1); | |
4919 | } | |
4920 | #else | |
4921 | ssd_submit_bio(dev, bio, 1); | |
4922 | #endif | |
4923 | atomic_dec(&dev->in_sendq); | |
4924 | bio = next; | |
4925 | } | |
4926 | ||
4927 | cond_resched(); | |
4928 | ||
4929 | #ifdef SSD_ESCAPE_IRQ | |
4930 | if (unlikely(smp_processor_id() == dev->irq_cpu)) { | |
4931 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28)) | |
aa14a110 | 4932 | cpumask_var_t new_mask; |
1197134c KM |
4933 | if (alloc_cpumask_var(&new_mask, GFP_ATOMIC)) { |
4934 | cpumask_setall(new_mask); | |
4935 | cpumask_clear_cpu(dev->irq_cpu, new_mask); | |
4936 | set_cpus_allowed_ptr(current, new_mask); | |
4937 | free_cpumask_var(new_mask); | |
4938 | } | |
361ebed5 | 4939 | #else |
aa14a110 | 4940 | cpumask_t new_mask; |
361ebed5 HSDT |
4941 | cpus_setall(new_mask); |
4942 | cpu_clear(dev->irq_cpu, new_mask); | |
4943 | set_cpus_allowed(current, new_mask); | |
4944 | #endif | |
4945 | } | |
4946 | #endif | |
4947 | } | |
4948 | } | |
4949 | ||
4950 | return 0; | |
4951 | } | |
4952 | ||
4953 | static void ssd_cleanup_thread(struct ssd_device *dev) | |
4954 | { | |
4955 | kthread_stop(dev->send_thread); | |
4956 | kthread_stop(dev->done_thread); | |
4957 | } | |
4958 | ||
4959 | static int ssd_init_thread(struct ssd_device *dev) | |
4960 | { | |
4961 | int ret; | |
4962 | ||
4963 | atomic_set(&dev->in_doneq, 0); | |
4964 | atomic_set(&dev->in_sendq, 0); | |
4965 | ||
4966 | spin_lock_init(&dev->doneq_lock); | |
4967 | spin_lock_init(&dev->sendq_lock); | |
4968 | ||
4969 | ssd_blist_init(&dev->doneq); | |
4970 | ssd_blist_init(&dev->sendq); | |
4971 | ||
4972 | init_waitqueue_head(&dev->done_waitq); | |
4973 | init_waitqueue_head(&dev->send_waitq); | |
4974 | ||
4975 | dev->done_thread = kthread_run(ssd_done_thread, dev, "%s/d", dev->name); | |
4976 | if (IS_ERR(dev->done_thread)) { | |
4977 | ret = PTR_ERR(dev->done_thread); | |
4978 | goto out_done_thread; | |
4979 | } | |
4980 | ||
4981 | dev->send_thread = kthread_run(ssd_send_thread, dev, "%s/s", dev->name); | |
4982 | if (IS_ERR(dev->send_thread)) { | |
4983 | ret = PTR_ERR(dev->send_thread); | |
4984 | goto out_send_thread; | |
4985 | } | |
4986 | ||
4987 | return 0; | |
4988 | ||
4989 | out_send_thread: | |
4990 | kthread_stop(dev->done_thread); | |
4991 | out_done_thread: | |
4992 | return ret; | |
4993 | } | |
4994 | ||
4995 | /* dcmd pool */ | |
4996 | static void ssd_put_dcmd(struct ssd_dcmd *dcmd) | |
4997 | { | |
4998 | struct ssd_device *dev = (struct ssd_device *)dcmd->dev; | |
4999 | ||
5000 | spin_lock(&dev->dcmd_lock); | |
5001 | list_add_tail(&dcmd->list, &dev->dcmd_list); | |
5002 | spin_unlock(&dev->dcmd_lock); | |
5003 | } | |
5004 | ||
5005 | static struct ssd_dcmd *ssd_get_dcmd(struct ssd_device *dev) | |
5006 | { | |
5007 | struct ssd_dcmd *dcmd = NULL; | |
5008 | ||
5009 | spin_lock(&dev->dcmd_lock); | |
5010 | if (!list_empty(&dev->dcmd_list)) { | |
5011 | dcmd = list_entry(dev->dcmd_list.next, | |
5012 | struct ssd_dcmd, list); | |
5013 | list_del_init(&dcmd->list); | |
5014 | } | |
5015 | spin_unlock(&dev->dcmd_lock); | |
5016 | ||
5017 | return dcmd; | |
5018 | } | |
5019 | ||
5020 | static void ssd_cleanup_dcmd(struct ssd_device *dev) | |
5021 | { | |
5022 | kfree(dev->dcmd); | |
5023 | } | |
5024 | ||
5025 | static int ssd_init_dcmd(struct ssd_device *dev) | |
5026 | { | |
5027 | struct ssd_dcmd *dcmd; | |
5028 | int dcmd_sz = sizeof(struct ssd_dcmd)*dev->hw_info.cmd_fifo_sz; | |
5029 | int i; | |
5030 | ||
5031 | spin_lock_init(&dev->dcmd_lock); | |
5032 | INIT_LIST_HEAD(&dev->dcmd_list); | |
5033 | init_waitqueue_head(&dev->dcmd_wq); | |
5034 | ||
5035 | dev->dcmd = kmalloc(dcmd_sz, GFP_KERNEL); | |
5036 | if (!dev->dcmd) { | |
5037 | hio_warn("%s: can not alloc dcmd\n", dev->name); | |
5038 | goto out_alloc_dcmd; | |
5039 | } | |
5040 | memset(dev->dcmd, 0, dcmd_sz); | |
5041 | ||
5042 | for (i=0, dcmd=dev->dcmd; i<(int)dev->hw_info.cmd_fifo_sz; i++, dcmd++) { | |
5043 | dcmd->dev = dev; | |
5044 | INIT_LIST_HEAD(&dcmd->list); | |
5045 | list_add_tail(&dcmd->list, &dev->dcmd_list); | |
5046 | } | |
5047 | ||
5048 | return 0; | |
5049 | ||
5050 | out_alloc_dcmd: | |
5051 | return -ENOMEM; | |
5052 | } | |
5053 | ||
5054 | static void ssd_put_dmsg(void *msg) | |
5055 | { | |
5056 | struct ssd_dcmd *dcmd = container_of(msg, struct ssd_dcmd, msg); | |
5057 | struct ssd_device *dev = (struct ssd_device *)dcmd->dev; | |
5058 | ||
5059 | memset(dcmd->msg, 0, SSD_DCMD_MAX_SZ); | |
5060 | ssd_put_dcmd(dcmd); | |
5061 | wake_up(&dev->dcmd_wq); | |
5062 | } | |
5063 | ||
5064 | static void *ssd_get_dmsg(struct ssd_device *dev) | |
5065 | { | |
5066 | struct ssd_dcmd *dcmd = ssd_get_dcmd(dev); | |
5067 | ||
5068 | while (!dcmd) { | |
5069 | DEFINE_WAIT(wait); | |
5070 | prepare_to_wait_exclusive(&dev->dcmd_wq, &wait, TASK_UNINTERRUPTIBLE); | |
5071 | schedule(); | |
5072 | ||
5073 | dcmd = ssd_get_dcmd(dev); | |
5074 | ||
5075 | finish_wait(&dev->dcmd_wq, &wait); | |
5076 | } | |
5077 | return dcmd->msg; | |
5078 | } | |
5079 | ||
5080 | /* do direct cmd */ | |
5081 | static int ssd_do_request(struct ssd_device *dev, int rw, void *msg, int *done) | |
5082 | { | |
5083 | DECLARE_COMPLETION(wait); | |
5084 | struct ssd_cmd *cmd; | |
5085 | int tag; | |
5086 | int ret = 0; | |
5087 | ||
5088 | tag = ssd_get_tag(dev, 1); | |
5089 | if (tag < 0) { | |
5090 | return -EBUSY; | |
5091 | } | |
5092 | ||
5093 | cmd = &dev->cmd[tag]; | |
5094 | cmd->nsegs = 1; | |
5095 | memcpy(cmd->msg, msg, SSD_DCMD_MAX_SZ); | |
5096 | ((struct ssd_rw_msg *)cmd->msg)->tag = tag; | |
5097 | ||
5098 | cmd->waiting = &wait; | |
5099 | ||
5100 | dev->scmd(cmd); | |
5101 | ||
5102 | wait_for_completion(cmd->waiting); | |
5103 | cmd->waiting = NULL; | |
5104 | ||
5105 | if (cmd->errors == -ETIMEDOUT) { | |
5106 | ret = cmd->errors; | |
5107 | } else if (cmd->errors) { | |
5108 | ret = -EIO; | |
5109 | } | |
5110 | ||
5111 | if (done != NULL) { | |
5112 | *done = cmd->nr_log; | |
5113 | } | |
5114 | ssd_put_tag(dev, cmd->tag); | |
5115 | ||
5116 | return ret; | |
5117 | } | |
5118 | ||
5119 | static int ssd_do_barrier_request(struct ssd_device *dev, int rw, void *msg, int *done) | |
5120 | { | |
5121 | DECLARE_COMPLETION(wait); | |
5122 | struct ssd_cmd *cmd; | |
5123 | int tag; | |
5124 | int ret = 0; | |
5125 | ||
5126 | tag = ssd_barrier_get_tag(dev); | |
5127 | if (tag < 0) { | |
5128 | return -EBUSY; | |
5129 | } | |
5130 | ||
5131 | cmd = &dev->cmd[tag]; | |
5132 | cmd->nsegs = 1; | |
5133 | memcpy(cmd->msg, msg, SSD_DCMD_MAX_SZ); | |
5134 | ((struct ssd_rw_msg *)cmd->msg)->tag = tag; | |
5135 | ||
5136 | cmd->waiting = &wait; | |
5137 | ||
5138 | dev->scmd(cmd); | |
5139 | ||
5140 | wait_for_completion(cmd->waiting); | |
5141 | cmd->waiting = NULL; | |
5142 | ||
5143 | if (cmd->errors == -ETIMEDOUT) { | |
5144 | ret = cmd->errors; | |
5145 | } else if (cmd->errors) { | |
5146 | ret = -EIO; | |
5147 | } | |
5148 | ||
5149 | if (done != NULL) { | |
5150 | *done = cmd->nr_log; | |
5151 | } | |
5152 | ssd_barrier_put_tag(dev, cmd->tag); | |
5153 | ||
5154 | return ret; | |
5155 | } | |
5156 | ||
5157 | #ifdef SSD_OT_PROTECT | |
5158 | static void ssd_check_temperature(struct ssd_device *dev, int temp) | |
5159 | { | |
5160 | uint64_t val; | |
5161 | uint32_t off; | |
5162 | int cur; | |
5163 | int i; | |
5164 | ||
5165 | if (mode != SSD_DRV_MODE_STANDARD) { | |
5166 | return; | |
5167 | } | |
5168 | ||
5169 | if (dev->protocol_info.ver <= SSD_PROTOCOL_V3) { | |
5170 | } | |
5171 | ||
5172 | for (i=0; i<dev->hw_info.nr_ctrl; i++) { | |
5173 | off = SSD_CTRL_TEMP_REG0 + i * sizeof(uint64_t); | |
5174 | ||
5175 | val = ssd_reg_read(dev->ctrlp + off); | |
5176 | if (val == 0xffffffffffffffffull) { | |
5177 | continue; | |
5178 | } | |
5179 | ||
5180 | cur = (int)CUR_TEMP(val); | |
5181 | if (cur >= temp) { | |
5182 | if (!test_and_set_bit(SSD_HWMON_TEMP(SSD_TEMP_CTRL), &dev->hwmon)) { | |
5183 | if (dev->protocol_info.ver > SSD_PROTOCOL_V3 && dev->protocol_info.ver < SSD_PROTOCOL_V3_2_2) { | |
5184 | hio_warn("%s: Over temperature, please check the fans.\n", dev->name); | |
5185 | dev->ot_delay = SSD_OT_DELAY; | |
5186 | } | |
5187 | } | |
5188 | return; | |
5189 | } | |
5190 | } | |
5191 | ||
5192 | if (test_and_clear_bit(SSD_HWMON_TEMP(SSD_TEMP_CTRL), &dev->hwmon)) { | |
5193 | if (dev->protocol_info.ver > SSD_PROTOCOL_V3 && dev->protocol_info.ver < SSD_PROTOCOL_V3_2_2) { | |
5194 | hio_warn("%s: Temperature is OK.\n", dev->name); | |
5195 | dev->ot_delay = 0; | |
5196 | } | |
5197 | } | |
5198 | } | |
5199 | #endif | |
5200 | ||
5201 | static int ssd_get_ot_status(struct ssd_device *dev, int *status) | |
5202 | { | |
5203 | uint32_t off; | |
5204 | uint32_t val; | |
5205 | int i; | |
5206 | ||
5207 | if (!dev || !status) { | |
5208 | return -EINVAL; | |
5209 | } | |
5210 | ||
5211 | if (dev->protocol_info.ver >= SSD_PROTOCOL_V3_2_2) { | |
5212 | for (i=0; i<dev->hw_info.nr_ctrl; i++) { | |
5213 | off = SSD_READ_OT_REG0 + (i * SSD_CTRL_REG_ZONE_SZ); | |
5214 | val = ssd_reg32_read(dev->ctrlp + off); | |
5215 | if ((val >> 22) & 0x1) { | |
5216 | *status = 1; | |
5217 | goto out; | |
5218 | } | |
5219 | ||
5220 | ||
5221 | off = SSD_WRITE_OT_REG0 + (i * SSD_CTRL_REG_ZONE_SZ); | |
5222 | val = ssd_reg32_read(dev->ctrlp + off); | |
5223 | if ((val >> 22) & 0x1) { | |
5224 | *status = 1; | |
5225 | goto out; | |
5226 | } | |
5227 | } | |
5228 | } else { | |
5229 | *status = !!dev->ot_delay; | |
5230 | } | |
5231 | ||
5232 | out: | |
5233 | return 0; | |
5234 | } | |
5235 | ||
5236 | static void ssd_set_ot_protect(struct ssd_device *dev, int protect) | |
5237 | { | |
5238 | uint32_t off; | |
5239 | uint32_t val; | |
5240 | int i; | |
5241 | ||
5242 | mutex_lock(&dev->fw_mutex); | |
5243 | ||
5244 | dev->ot_protect = !!protect; | |
5245 | ||
5246 | if (dev->protocol_info.ver >= SSD_PROTOCOL_V3_2_2) { | |
5247 | for (i=0; i<dev->hw_info.nr_ctrl; i++) { | |
5248 | off = SSD_READ_OT_REG0 + (i * SSD_CTRL_REG_ZONE_SZ); | |
5249 | val = ssd_reg32_read(dev->ctrlp + off); | |
5250 | if (dev->ot_protect) { | |
5251 | val |= (1U << 21); | |
5252 | } else { | |
5253 | val &= ~(1U << 21); | |
5254 | } | |
5255 | ssd_reg32_write(dev->ctrlp + off, val); | |
5256 | ||
5257 | ||
5258 | off = SSD_WRITE_OT_REG0 + (i * SSD_CTRL_REG_ZONE_SZ); | |
5259 | val = ssd_reg32_read(dev->ctrlp + off); | |
5260 | if (dev->ot_protect) { | |
5261 | val |= (1U << 21); | |
5262 | } else { | |
5263 | val &= ~(1U << 21); | |
5264 | } | |
5265 | ssd_reg32_write(dev->ctrlp + off, val); | |
5266 | } | |
5267 | } | |
5268 | ||
5269 | mutex_unlock(&dev->fw_mutex); | |
5270 | } | |
5271 | ||
5272 | static int ssd_init_ot_protect(struct ssd_device *dev) | |
5273 | { | |
5274 | ssd_set_ot_protect(dev, ot_protect); | |
5275 | ||
5276 | #ifdef SSD_OT_PROTECT | |
5277 | ssd_check_temperature(dev, SSD_OT_TEMP); | |
5278 | #endif | |
5279 | ||
5280 | return 0; | |
5281 | } | |
5282 | ||
5283 | /* log */ | |
5284 | static int ssd_read_log(struct ssd_device *dev, int ctrl_idx, void *buf, int *nr_log) | |
5285 | { | |
5286 | struct ssd_log_op_msg *msg; | |
5287 | struct ssd_log_msg *lmsg; | |
5288 | dma_addr_t buf_dma; | |
5289 | size_t length = dev->hw_info.log_sz; | |
5290 | int ret = 0; | |
5291 | ||
5292 | if (ctrl_idx >= dev->hw_info.nr_ctrl) { | |
5293 | return -EINVAL; | |
5294 | } | |
5295 | ||
5296 | buf_dma = pci_map_single(dev->pdev, buf, length, PCI_DMA_FROMDEVICE); | |
5297 | #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,26)) | |
5298 | ret = dma_mapping_error(buf_dma); | |
5299 | #else | |
5300 | ret = dma_mapping_error(&(dev->pdev->dev), buf_dma); | |
5301 | #endif | |
5302 | if (ret) { | |
5303 | hio_warn("%s: unable to map read DMA buffer\n", dev->name); | |
5304 | goto out_dma_mapping; | |
5305 | } | |
5306 | ||
5307 | msg = (struct ssd_log_op_msg *)ssd_get_dmsg(dev); | |
5308 | ||
5309 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3) { | |
5310 | lmsg = (struct ssd_log_msg *)msg; | |
5311 | lmsg->fun = SSD_FUNC_READ_LOG; | |
5312 | lmsg->ctrl_idx = ctrl_idx; | |
5313 | lmsg->buf = buf_dma; | |
5314 | } else { | |
5315 | msg->fun = SSD_FUNC_READ_LOG; | |
5316 | msg->ctrl_idx = ctrl_idx; | |
5317 | msg->buf = buf_dma; | |
5318 | } | |
5319 | ||
5320 | ret = ssd_do_request(dev, READ, msg, nr_log); | |
5321 | ssd_put_dmsg(msg); | |
5322 | ||
5323 | pci_unmap_single(dev->pdev, buf_dma, length, PCI_DMA_FROMDEVICE); | |
5324 | ||
5325 | out_dma_mapping: | |
5326 | return ret; | |
5327 | } | |
5328 | ||
5329 | #define SSD_LOG_PRINT_BUF_SZ 256 | |
5330 | static int ssd_parse_log(struct ssd_device *dev, struct ssd_log *log, int print) | |
5331 | { | |
5332 | struct ssd_log_desc *log_desc = ssd_log_desc; | |
5333 | struct ssd_log_entry *le; | |
5334 | char *sn = NULL; | |
5335 | char print_buf[SSD_LOG_PRINT_BUF_SZ]; | |
5336 | int print_len; | |
5337 | ||
5338 | le = &log->le; | |
5339 | ||
5340 | /* find desc */ | |
5341 | while (log_desc->event != SSD_UNKNOWN_EVENT) { | |
5342 | if (log_desc->event == le->event) { | |
5343 | break; | |
5344 | } | |
5345 | log_desc++; | |
5346 | } | |
5347 | ||
5348 | if (!print) { | |
5349 | goto out; | |
5350 | } | |
5351 | ||
5352 | if (log_desc->level < log_level) { | |
5353 | goto out; | |
5354 | } | |
5355 | ||
5356 | /* parse */ | |
5357 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_2) { | |
5358 | sn = dev->label.sn; | |
5359 | } else { | |
5360 | sn = dev->labelv3.barcode; | |
5361 | } | |
5362 | ||
5363 | print_len = snprintf(print_buf, SSD_LOG_PRINT_BUF_SZ, "%s (%s): <%#x>", dev->name, sn, le->event); | |
5364 | ||
5365 | if (log->ctrl_idx != SSD_LOG_SW_IDX) { | |
5366 | print_len += snprintf((print_buf + print_len), (SSD_LOG_PRINT_BUF_SZ - print_len), " controller %d", log->ctrl_idx); | |
5367 | } | |
5368 | ||
5369 | switch (log_desc->data) { | |
5370 | case SSD_LOG_DATA_NONE: | |
5371 | break; | |
5372 | case SSD_LOG_DATA_LOC: | |
5373 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_2) { | |
5374 | print_len += snprintf((print_buf + print_len), (SSD_LOG_PRINT_BUF_SZ - print_len), " flash %d", le->data.loc.flash); | |
5375 | if (log_desc->sblock) { | |
5376 | print_len += snprintf((print_buf + print_len), (SSD_LOG_PRINT_BUF_SZ - print_len), " block %d", le->data.loc.block); | |
5377 | } | |
5378 | if (log_desc->spage) { | |
5379 | print_len += snprintf((print_buf + print_len), (SSD_LOG_PRINT_BUF_SZ - print_len), " page %d", le->data.loc.page); | |
5380 | } | |
5381 | } else { | |
5382 | print_len += snprintf((print_buf + print_len), (SSD_LOG_PRINT_BUF_SZ - print_len), " flash %d", le->data.loc1.flash); | |
5383 | if (log_desc->sblock) { | |
5384 | print_len += snprintf((print_buf + print_len), (SSD_LOG_PRINT_BUF_SZ - print_len), " block %d", le->data.loc1.block); | |
5385 | } | |
5386 | if (log_desc->spage) { | |
5387 | print_len += snprintf((print_buf + print_len), (SSD_LOG_PRINT_BUF_SZ - print_len), " page %d", le->data.loc1.page); | |
5388 | } | |
5389 | } | |
5390 | break; | |
5391 | case SSD_LOG_DATA_HEX: | |
5392 | print_len += snprintf((print_buf + print_len), (SSD_LOG_PRINT_BUF_SZ - print_len), " info %#x", le->data.val); | |
5393 | break; | |
5394 | default: | |
5395 | break; | |
5396 | } | |
5397 | /*print_len += */snprintf((print_buf + print_len), (SSD_LOG_PRINT_BUF_SZ - print_len), ": %s", log_desc->desc); | |
5398 | ||
5399 | switch (log_desc->level) { | |
5400 | case SSD_LOG_LEVEL_INFO: | |
5401 | hio_info("%s\n", print_buf); | |
5402 | break; | |
5403 | case SSD_LOG_LEVEL_NOTICE: | |
5404 | hio_note("%s\n", print_buf); | |
5405 | break; | |
5406 | case SSD_LOG_LEVEL_WARNING: | |
5407 | hio_warn("%s\n", print_buf); | |
5408 | break; | |
5409 | case SSD_LOG_LEVEL_ERR: | |
5410 | hio_err("%s\n", print_buf); | |
5411 | //printk(KERN_ERR MODULE_NAME": some exception occurred, please check the data or refer to FAQ."); | |
5412 | break; | |
5413 | default: | |
5414 | hio_warn("%s\n", print_buf); | |
5415 | break; | |
5416 | } | |
5417 | ||
5418 | out: | |
5419 | return log_desc->level; | |
5420 | } | |
5421 | ||
5422 | static int ssd_bm_get_sfstatus(struct ssd_device *dev, uint16_t *status); | |
5423 | static int ssd_switch_wmode(struct ssd_device *dev, int wmode); | |
5424 | ||
5425 | ||
5426 | static int ssd_handle_event(struct ssd_device *dev, uint16_t event, int level) | |
5427 | { | |
5428 | int ret = 0; | |
5429 | ||
5430 | switch (event) { | |
5431 | case SSD_LOG_OVER_TEMP: { | |
5432 | #ifdef SSD_OT_PROTECT | |
5433 | if (!test_and_set_bit(SSD_HWMON_TEMP(SSD_TEMP_CTRL), &dev->hwmon)) { | |
5434 | if (dev->protocol_info.ver > SSD_PROTOCOL_V3 && dev->protocol_info.ver < SSD_PROTOCOL_V3_2_2) { | |
5435 | hio_warn("%s: Over temperature, please check the fans.\n", dev->name); | |
5436 | dev->ot_delay = SSD_OT_DELAY; | |
5437 | } | |
5438 | } | |
5439 | #endif | |
5440 | break; | |
5441 | } | |
5442 | ||
5443 | case SSD_LOG_NORMAL_TEMP: { | |
5444 | #ifdef SSD_OT_PROTECT | |
5445 | /* need to check all controller's temperature */ | |
5446 | ssd_check_temperature(dev, SSD_OT_TEMP_HYST); | |
5447 | #endif | |
5448 | break; | |
5449 | } | |
5450 | ||
5451 | case SSD_LOG_BATTERY_FAULT: { | |
5452 | uint16_t sfstatus; | |
5453 | ||
5454 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_2) { | |
5455 | if (!ssd_bm_get_sfstatus(dev, &sfstatus)) { | |
5456 | ssd_gen_swlog(dev, SSD_LOG_BM_SFSTATUS, sfstatus); | |
5457 | } | |
5458 | } | |
5459 | ||
5460 | if (!test_and_set_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP), &dev->hwmon)) { | |
5461 | ssd_switch_wmode(dev, dev->user_wmode); | |
5462 | } | |
5463 | break; | |
5464 | } | |
5465 | ||
5466 | case SSD_LOG_BATTERY_OK: { | |
5467 | if (test_and_clear_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP), &dev->hwmon)) { | |
5468 | ssd_switch_wmode(dev, dev->user_wmode); | |
5469 | } | |
5470 | break; | |
5471 | } | |
5472 | ||
5473 | case SSD_LOG_BOARD_VOLT_FAULT: { | |
5474 | ssd_mon_boardvolt(dev); | |
5475 | break; | |
5476 | } | |
5477 | ||
5478 | case SSD_LOG_CLEAR_LOG: { | |
5479 | /* update smart */ | |
5480 | memset(&dev->smart.log_info, 0, sizeof(struct ssd_log_info)); | |
5481 | break; | |
5482 | } | |
5483 | ||
5484 | case SSD_LOG_CAP_VOLT_FAULT: | |
5485 | case SSD_LOG_CAP_LEARN_FAULT: | |
5486 | case SSD_LOG_CAP_SHORT_CIRCUIT: { | |
5487 | if (!test_and_set_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP), &dev->hwmon)) { | |
5488 | ssd_switch_wmode(dev, dev->user_wmode); | |
5489 | } | |
5490 | break; | |
5491 | } | |
5492 | ||
5493 | default: | |
5494 | break; | |
5495 | } | |
5496 | ||
5497 | /* ssd event call */ | |
5498 | if (dev->event_call) { | |
5499 | dev->event_call(dev->gd, event, level); | |
5500 | ||
5501 | /* FIXME */ | |
5502 | if (SSD_LOG_CAP_VOLT_FAULT == event || SSD_LOG_CAP_LEARN_FAULT == event || SSD_LOG_CAP_SHORT_CIRCUIT == event) { | |
5503 | dev->event_call(dev->gd, SSD_LOG_BATTERY_FAULT, level); | |
5504 | } | |
5505 | } | |
5506 | ||
5507 | return ret; | |
5508 | } | |
5509 | ||
5510 | static int ssd_save_log(struct ssd_device *dev, struct ssd_log *log) | |
5511 | { | |
5512 | uint32_t off, size; | |
5513 | void *internal_log; | |
5514 | int ret = 0; | |
5515 | ||
5516 | mutex_lock(&dev->internal_log_mutex); | |
5517 | ||
5518 | size = sizeof(struct ssd_log); | |
5519 | off = dev->internal_log.nr_log * size; | |
5520 | ||
5521 | if (off == dev->rom_info.log_sz) { | |
5522 | if (dev->internal_log.nr_log == dev->smart.log_info.nr_log) { | |
5523 | hio_warn("%s: internal log is full\n", dev->name); | |
5524 | } | |
5525 | goto out; | |
5526 | } | |
5527 | ||
5528 | internal_log = dev->internal_log.log + off; | |
5529 | memcpy(internal_log, log, size); | |
5530 | ||
5531 | if (dev->protocol_info.ver > SSD_PROTOCOL_V3) { | |
5532 | off += dev->rom_info.log_base; | |
5533 | ||
5534 | ret = ssd_spi_write(dev, log, off, size); | |
5535 | if (ret) { | |
5536 | goto out; | |
5537 | } | |
5538 | } | |
5539 | ||
5540 | dev->internal_log.nr_log++; | |
5541 | ||
5542 | out: | |
5543 | mutex_unlock(&dev->internal_log_mutex); | |
5544 | return ret; | |
5545 | } | |
5546 | ||
da3355df SF |
5547 | /** CRC table for the CRC-16. The poly is 0x8005 (x^16 + x^15 + x^2 + 1) */ |
5548 | static unsigned short const crc16_table[256] = { | |
5549 | 0x0000, 0xC0C1, 0xC181, 0x0140, 0xC301, 0x03C0, 0x0280, 0xC241, | |
5550 | 0xC601, 0x06C0, 0x0780, 0xC741, 0x0500, 0xC5C1, 0xC481, 0x0440, | |
5551 | 0xCC01, 0x0CC0, 0x0D80, 0xCD41, 0x0F00, 0xCFC1, 0xCE81, 0x0E40, | |
5552 | 0x0A00, 0xCAC1, 0xCB81, 0x0B40, 0xC901, 0x09C0, 0x0880, 0xC841, | |
5553 | 0xD801, 0x18C0, 0x1980, 0xD941, 0x1B00, 0xDBC1, 0xDA81, 0x1A40, | |
5554 | 0x1E00, 0xDEC1, 0xDF81, 0x1F40, 0xDD01, 0x1DC0, 0x1C80, 0xDC41, | |
5555 | 0x1400, 0xD4C1, 0xD581, 0x1540, 0xD701, 0x17C0, 0x1680, 0xD641, | |
5556 | 0xD201, 0x12C0, 0x1380, 0xD341, 0x1100, 0xD1C1, 0xD081, 0x1040, | |
5557 | 0xF001, 0x30C0, 0x3180, 0xF141, 0x3300, 0xF3C1, 0xF281, 0x3240, | |
5558 | 0x3600, 0xF6C1, 0xF781, 0x3740, 0xF501, 0x35C0, 0x3480, 0xF441, | |
5559 | 0x3C00, 0xFCC1, 0xFD81, 0x3D40, 0xFF01, 0x3FC0, 0x3E80, 0xFE41, | |
5560 | 0xFA01, 0x3AC0, 0x3B80, 0xFB41, 0x3900, 0xF9C1, 0xF881, 0x3840, | |
5561 | 0x2800, 0xE8C1, 0xE981, 0x2940, 0xEB01, 0x2BC0, 0x2A80, 0xEA41, | |
5562 | 0xEE01, 0x2EC0, 0x2F80, 0xEF41, 0x2D00, 0xEDC1, 0xEC81, 0x2C40, | |
5563 | 0xE401, 0x24C0, 0x2580, 0xE541, 0x2700, 0xE7C1, 0xE681, 0x2640, | |
5564 | 0x2200, 0xE2C1, 0xE381, 0x2340, 0xE101, 0x21C0, 0x2080, 0xE041, | |
5565 | 0xA001, 0x60C0, 0x6180, 0xA141, 0x6300, 0xA3C1, 0xA281, 0x6240, | |
5566 | 0x6600, 0xA6C1, 0xA781, 0x6740, 0xA501, 0x65C0, 0x6480, 0xA441, | |
5567 | 0x6C00, 0xACC1, 0xAD81, 0x6D40, 0xAF01, 0x6FC0, 0x6E80, 0xAE41, | |
5568 | 0xAA01, 0x6AC0, 0x6B80, 0xAB41, 0x6900, 0xA9C1, 0xA881, 0x6840, | |
5569 | 0x7800, 0xB8C1, 0xB981, 0x7940, 0xBB01, 0x7BC0, 0x7A80, 0xBA41, | |
5570 | 0xBE01, 0x7EC0, 0x7F80, 0xBF41, 0x7D00, 0xBDC1, 0xBC81, 0x7C40, | |
5571 | 0xB401, 0x74C0, 0x7580, 0xB541, 0x7700, 0xB7C1, 0xB681, 0x7640, | |
5572 | 0x7200, 0xB2C1, 0xB381, 0x7340, 0xB101, 0x71C0, 0x7080, 0xB041, | |
5573 | 0x5000, 0x90C1, 0x9181, 0x5140, 0x9301, 0x53C0, 0x5280, 0x9241, | |
5574 | 0x9601, 0x56C0, 0x5780, 0x9741, 0x5500, 0x95C1, 0x9481, 0x5440, | |
5575 | 0x9C01, 0x5CC0, 0x5D80, 0x9D41, 0x5F00, 0x9FC1, 0x9E81, 0x5E40, | |
5576 | 0x5A00, 0x9AC1, 0x9B81, 0x5B40, 0x9901, 0x59C0, 0x5880, 0x9841, | |
5577 | 0x8801, 0x48C0, 0x4980, 0x8941, 0x4B00, 0x8BC1, 0x8A81, 0x4A40, | |
5578 | 0x4E00, 0x8EC1, 0x8F81, 0x4F40, 0x8D01, 0x4DC0, 0x4C80, 0x8C41, | |
5579 | 0x4400, 0x84C1, 0x8581, 0x4540, 0x8701, 0x47C0, 0x4680, 0x8641, | |
5580 | 0x8201, 0x42C0, 0x4380, 0x8341, 0x4100, 0x81C1, 0x8081, 0x4040 | |
5581 | }; | |
5582 | ||
5583 | static unsigned short crc16_byte(unsigned short crc, const unsigned char data) | |
5584 | { | |
5585 | return (crc >> 8) ^ crc16_table[(crc ^ data) & 0xff]; | |
5586 | } | |
5587 | /** | |
5588 | * crc16 - compute the CRC-16 for the data buffer | |
5589 | * @crc: previous CRC value | |
5590 | * @buffer: data pointer | |
5591 | * @len: number of bytes in the buffer | |
5592 | * | |
5593 | * Returns the updated CRC value. | |
5594 | */ | |
5595 | static unsigned short crc16(unsigned short crc, unsigned char const *buffer, int len) | |
5596 | { | |
5597 | while (len--) | |
5598 | crc = crc16_byte(crc, *buffer++); | |
5599 | return crc; | |
5600 | } | |
5601 | ||
361ebed5 HSDT |
5602 | static int ssd_save_swlog(struct ssd_device *dev, uint16_t event, uint32_t data) |
5603 | { | |
5604 | struct ssd_log log; | |
5605 | struct timeval tv; | |
5606 | int level; | |
5607 | int ret = 0; | |
5608 | ||
5609 | if (unlikely(mode != SSD_DRV_MODE_STANDARD)) | |
5610 | return 0; | |
5611 | ||
5612 | memset(&log, 0, sizeof(struct ssd_log)); | |
5613 | ||
5614 | do_gettimeofday(&tv); | |
5615 | log.ctrl_idx = SSD_LOG_SW_IDX; | |
5616 | log.time = tv.tv_sec; | |
5617 | log.le.event = event; | |
5618 | log.le.data.val = data; | |
5619 | ||
da3355df SF |
5620 | log.le.mod = SSD_DIF_WITH_OLD_LOG; |
5621 | log.le.idx = crc16(0,(const unsigned char *)&log,14); | |
361ebed5 HSDT |
5622 | level = ssd_parse_log(dev, &log, 0); |
5623 | if (level >= SSD_LOG_LEVEL) { | |
5624 | ret = ssd_save_log(dev, &log); | |
5625 | } | |
5626 | ||
5627 | /* set alarm */ | |
5628 | if (SSD_LOG_LEVEL_ERR == level) { | |
5629 | ssd_set_alarm(dev); | |
5630 | } | |
5631 | ||
5632 | /* update smart */ | |
5633 | dev->smart.log_info.nr_log++; | |
5634 | dev->smart.log_info.stat[level]++; | |
5635 | ||
5636 | /* handle event */ | |
5637 | ssd_handle_event(dev, event, level); | |
5638 | ||
5639 | return ret; | |
5640 | } | |
5641 | ||
5642 | static int ssd_gen_swlog(struct ssd_device *dev, uint16_t event, uint32_t data) | |
5643 | { | |
5644 | struct ssd_log_entry le; | |
5645 | int ret; | |
5646 | ||
5647 | if (unlikely(mode != SSD_DRV_MODE_STANDARD)) | |
5648 | return 0; | |
5649 | ||
5650 | /* slave port ? */ | |
5651 | if (dev->slave) { | |
5652 | return 0; | |
5653 | } | |
5654 | ||
5655 | memset(&le, 0, sizeof(struct ssd_log_entry)); | |
5656 | le.event = event; | |
5657 | le.data.val = data; | |
5658 | ||
5659 | ret = sfifo_put(&dev->log_fifo, &le); | |
5660 | if (ret) { | |
5661 | return ret; | |
5662 | } | |
5663 | ||
5664 | if (test_bit(SSD_INIT_WORKQ, &dev->state)) { | |
5665 | queue_work(dev->workq, &dev->log_work); | |
5666 | } | |
5667 | ||
5668 | return 0; | |
5669 | } | |
5670 | ||
5671 | static int ssd_do_swlog(struct ssd_device *dev) | |
5672 | { | |
5673 | struct ssd_log_entry le; | |
5674 | int ret = 0; | |
5675 | ||
5676 | memset(&le, 0, sizeof(struct ssd_log_entry)); | |
5677 | while (!sfifo_get(&dev->log_fifo, &le)) { | |
5678 | ret = ssd_save_swlog(dev, le.event, le.data.val); | |
5679 | if (ret) { | |
5680 | break; | |
5681 | } | |
5682 | } | |
5683 | ||
5684 | return ret; | |
5685 | } | |
5686 | ||
5687 | static int __ssd_clear_log(struct ssd_device *dev) | |
5688 | { | |
5689 | uint32_t off, length; | |
5690 | int ret; | |
5691 | ||
5692 | if (dev->protocol_info.ver <= SSD_PROTOCOL_V3) { | |
5693 | return 0; | |
5694 | } | |
5695 | ||
5696 | if (dev->internal_log.nr_log == 0) { | |
5697 | return 0; | |
5698 | } | |
5699 | ||
5700 | mutex_lock(&dev->internal_log_mutex); | |
5701 | ||
5702 | off = dev->rom_info.log_base; | |
5703 | length = dev->rom_info.log_sz; | |
5704 | ||
5705 | ret = ssd_spi_erase(dev, off, length); | |
5706 | if (ret) { | |
5707 | hio_warn("%s: log erase: failed\n", dev->name); | |
5708 | goto out; | |
5709 | } | |
5710 | ||
5711 | dev->internal_log.nr_log = 0; | |
5712 | ||
5713 | out: | |
5714 | mutex_unlock(&dev->internal_log_mutex); | |
5715 | return ret; | |
5716 | } | |
5717 | ||
5718 | static int ssd_clear_log(struct ssd_device *dev) | |
5719 | { | |
5720 | int ret; | |
5721 | ||
5722 | ret = __ssd_clear_log(dev); | |
5723 | if(!ret) { | |
5724 | ssd_gen_swlog(dev, SSD_LOG_CLEAR_LOG, 0); | |
5725 | } | |
5726 | ||
5727 | return ret; | |
5728 | } | |
5729 | ||
5730 | static int ssd_do_log(struct ssd_device *dev, int ctrl_idx, void *buf) | |
5731 | { | |
5732 | struct ssd_log_entry *le; | |
5733 | struct ssd_log log; | |
5734 | struct timeval tv; | |
5735 | int nr_log = 0; | |
5736 | int level; | |
5737 | int ret = 0; | |
5738 | ||
5739 | ret = ssd_read_log(dev, ctrl_idx, buf, &nr_log); | |
5740 | if (ret) { | |
5741 | return ret; | |
5742 | } | |
5743 | ||
5744 | do_gettimeofday(&tv); | |
5745 | ||
5746 | log.time = tv.tv_sec; | |
5747 | log.ctrl_idx = ctrl_idx; | |
5748 | ||
5749 | le = (ssd_log_entry_t *)buf; | |
5750 | while (nr_log > 0) { | |
5751 | memcpy(&log.le, le, sizeof(struct ssd_log_entry)); | |
5752 | ||
da3355df SF |
5753 | log.le.mod = SSD_DIF_WITH_OLD_LOG; |
5754 | log.le.idx = crc16(0,(const unsigned char *)&log,14); | |
361ebed5 HSDT |
5755 | level = ssd_parse_log(dev, &log, 1); |
5756 | if (level >= SSD_LOG_LEVEL) { | |
5757 | ssd_save_log(dev, &log); | |
5758 | } | |
5759 | ||
5760 | /* set alarm */ | |
5761 | if (SSD_LOG_LEVEL_ERR == level) { | |
5762 | ssd_set_alarm(dev); | |
5763 | } | |
5764 | ||
5765 | dev->smart.log_info.nr_log++; | |
5766 | if (SSD_LOG_SEU_FAULT != le->event && SSD_LOG_SEU_FAULT1 != le->event) { | |
5767 | dev->smart.log_info.stat[level]++; | |
5768 | } else { | |
5769 | /* SEU fault */ | |
5770 | ||
5771 | /* log to the volatile log info */ | |
5772 | dev->log_info.nr_log++; | |
5773 | dev->log_info.stat[level]++; | |
5774 | ||
5775 | /* do something */ | |
5776 | dev->reload_fw = 1; | |
5777 | ssd_reg32_write(dev->ctrlp + SSD_RELOAD_FW_REG, SSD_RELOAD_FLAG); | |
da3355df SF |
5778 | if (le->event != SSD_LOG_SEU_FAULT1) { |
5779 | dev->has_non_0x98_reg_access = 1; | |
5780 | } | |
361ebed5 HSDT |
5781 | |
5782 | /*dev->readonly = 1; | |
5783 | set_disk_ro(dev->gd, 1); | |
5784 | hio_warn("%s: switched to read-only mode.\n", dev->name);*/ | |
5785 | } | |
5786 | ||
5787 | /* handle event */ | |
5788 | ssd_handle_event(dev, le->event, level); | |
5789 | ||
5790 | le++; | |
5791 | nr_log--; | |
5792 | } | |
5793 | ||
5794 | return 0; | |
5795 | } | |
5796 | ||
5797 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)) | |
5798 | static void ssd_log_worker(void *data) | |
5799 | { | |
5800 | struct ssd_device *dev = (struct ssd_device *)data; | |
5801 | #else | |
5802 | static void ssd_log_worker(struct work_struct *work) | |
5803 | { | |
5804 | struct ssd_device *dev = container_of(work, struct ssd_device, log_work); | |
5805 | #endif | |
5806 | int i; | |
5807 | int ret; | |
5808 | ||
5809 | if (!test_bit(SSD_LOG_ERR, &dev->state) && test_bit(SSD_ONLINE, &dev->state)) { | |
5810 | /* alloc log buf */ | |
5811 | if (!dev->log_buf) { | |
5812 | dev->log_buf = kmalloc(dev->hw_info.log_sz, GFP_KERNEL); | |
5813 | if (!dev->log_buf) { | |
5814 | hio_warn("%s: ssd_log_worker: no mem\n", dev->name); | |
5815 | return; | |
5816 | } | |
5817 | } | |
5818 | ||
5819 | /* get log */ | |
5820 | if (test_and_clear_bit(SSD_LOG_HW, &dev->state)) { | |
5821 | for (i=0; i<dev->hw_info.nr_ctrl; i++) { | |
5822 | ret = ssd_do_log(dev, i, dev->log_buf); | |
5823 | if (ret) { | |
5824 | (void)test_and_set_bit(SSD_LOG_ERR, &dev->state); | |
5825 | hio_warn("%s: do log fail\n", dev->name); | |
5826 | } | |
5827 | } | |
5828 | } | |
5829 | } | |
5830 | ||
5831 | ret = ssd_do_swlog(dev); | |
5832 | if (ret) { | |
5833 | hio_warn("%s: do swlog fail\n", dev->name); | |
5834 | } | |
5835 | } | |
5836 | ||
5837 | static void ssd_cleanup_log(struct ssd_device *dev) | |
5838 | { | |
5839 | if (dev->log_buf) { | |
5840 | kfree(dev->log_buf); | |
5841 | dev->log_buf = NULL; | |
5842 | } | |
5843 | ||
5844 | sfifo_free(&dev->log_fifo); | |
5845 | ||
5846 | if (dev->internal_log.log) { | |
5847 | vfree(dev->internal_log.log); | |
1197134c | 5848 | dev->internal_log.nr_log = 0; |
361ebed5 HSDT |
5849 | dev->internal_log.log = NULL; |
5850 | } | |
5851 | } | |
5852 | ||
5853 | static int ssd_init_log(struct ssd_device *dev) | |
5854 | { | |
5855 | struct ssd_log *log; | |
5856 | uint32_t off, size; | |
5857 | uint32_t len = 0; | |
5858 | int ret = 0; | |
5859 | ||
5860 | mutex_init(&dev->internal_log_mutex); | |
5861 | ||
5862 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)) | |
5863 | INIT_WORK(&dev->log_work, ssd_log_worker, dev); | |
5864 | #else | |
5865 | INIT_WORK(&dev->log_work, ssd_log_worker); | |
5866 | #endif | |
5867 | ||
5868 | off = dev->rom_info.log_base; | |
5869 | size = dev->rom_info.log_sz; | |
5870 | ||
1197134c | 5871 | dev->internal_log.nr_log = 0; |
361ebed5 HSDT |
5872 | dev->internal_log.log = vmalloc(size); |
5873 | if (!dev->internal_log.log) { | |
5874 | ret = -ENOMEM; | |
5875 | goto out_alloc_log; | |
5876 | } | |
5877 | ||
5878 | ret = sfifo_alloc(&dev->log_fifo, SSD_LOG_FIFO_SZ, sizeof(struct ssd_log_entry)); | |
5879 | if (ret < 0) { | |
5880 | goto out_alloc_log_fifo; | |
5881 | } | |
5882 | ||
5883 | if (dev->protocol_info.ver <= SSD_PROTOCOL_V3) { | |
5884 | return 0; | |
5885 | } | |
5886 | ||
5887 | log = (struct ssd_log *)dev->internal_log.log; | |
5888 | while (len < size) { | |
5889 | ret = ssd_spi_read(dev, log, off, sizeof(struct ssd_log)); | |
5890 | if (ret) { | |
5891 | goto out_read_log; | |
5892 | } | |
5893 | ||
5894 | if (log->ctrl_idx == 0xff) { | |
5895 | break; | |
5896 | } | |
5897 | ||
da3355df SF |
5898 | if (log->le.event == SSD_LOG_POWER_ON) { |
5899 | if (dev->internal_log.nr_log > dev->last_poweron_id) { | |
5900 | dev->last_poweron_id = dev->internal_log.nr_log; | |
5901 | } | |
5902 | } | |
5903 | ||
361ebed5 HSDT |
5904 | dev->internal_log.nr_log++; |
5905 | log++; | |
5906 | len += sizeof(struct ssd_log); | |
5907 | off += sizeof(struct ssd_log); | |
5908 | } | |
5909 | ||
5910 | return 0; | |
5911 | ||
5912 | out_read_log: | |
5913 | sfifo_free(&dev->log_fifo); | |
5914 | out_alloc_log_fifo: | |
5915 | vfree(dev->internal_log.log); | |
5916 | dev->internal_log.log = NULL; | |
5917 | dev->internal_log.nr_log = 0; | |
5918 | out_alloc_log: | |
5919 | /* skip error if not in standard mode */ | |
5920 | if (mode != SSD_DRV_MODE_STANDARD) { | |
5921 | ret = 0; | |
5922 | } | |
5923 | return ret; | |
5924 | } | |
5925 | ||
5926 | /* work queue */ | |
5927 | static void ssd_stop_workq(struct ssd_device *dev) | |
5928 | { | |
5929 | test_and_clear_bit(SSD_INIT_WORKQ, &dev->state); | |
5930 | flush_workqueue(dev->workq); | |
5931 | } | |
5932 | ||
5933 | static void ssd_start_workq(struct ssd_device *dev) | |
5934 | { | |
5935 | (void)test_and_set_bit(SSD_INIT_WORKQ, &dev->state); | |
5936 | ||
5937 | /* log ? */ | |
5938 | queue_work(dev->workq, &dev->log_work); | |
5939 | } | |
5940 | ||
5941 | static void ssd_cleanup_workq(struct ssd_device *dev) | |
5942 | { | |
5943 | flush_workqueue(dev->workq); | |
5944 | destroy_workqueue(dev->workq); | |
5945 | dev->workq = NULL; | |
5946 | } | |
5947 | ||
5948 | static int ssd_init_workq(struct ssd_device *dev) | |
5949 | { | |
5950 | int ret = 0; | |
5951 | ||
5952 | dev->workq = create_singlethread_workqueue(dev->name); | |
5953 | if (!dev->workq) { | |
5954 | ret = -ESRCH; | |
5955 | goto out; | |
5956 | } | |
5957 | ||
5958 | out: | |
5959 | return ret; | |
5960 | } | |
5961 | ||
5962 | /* rom */ | |
5963 | static int ssd_init_rom_info(struct ssd_device *dev) | |
5964 | { | |
5965 | uint32_t val; | |
5966 | ||
5967 | mutex_init(&dev->spi_mutex); | |
5968 | mutex_init(&dev->i2c_mutex); | |
5969 | ||
5970 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3) { | |
5971 | /* fix bug: read data to clear status */ | |
5972 | (void)ssd_reg32_read(dev->ctrlp + SSD_SPI_REG_RDATA); | |
5973 | ||
5974 | dev->rom_info.size = SSD_ROM_SIZE; | |
5975 | dev->rom_info.block_size = SSD_ROM_BLK_SIZE; | |
5976 | dev->rom_info.page_size = SSD_ROM_PAGE_SIZE; | |
5977 | ||
5978 | dev->rom_info.bridge_fw_base = SSD_ROM_BRIDGE_FW_BASE; | |
5979 | dev->rom_info.bridge_fw_sz = SSD_ROM_BRIDGE_FW_SIZE; | |
5980 | dev->rom_info.nr_bridge_fw = SSD_ROM_NR_BRIDGE_FW; | |
5981 | ||
5982 | dev->rom_info.ctrl_fw_base = SSD_ROM_CTRL_FW_BASE; | |
5983 | dev->rom_info.ctrl_fw_sz = SSD_ROM_CTRL_FW_SIZE; | |
5984 | dev->rom_info.nr_ctrl_fw = SSD_ROM_NR_CTRL_FW; | |
5985 | ||
5986 | dev->rom_info.log_sz = SSD_ROM_LOG_SZ; | |
5987 | ||
5988 | dev->rom_info.vp_base = SSD_ROM_VP_BASE; | |
5989 | dev->rom_info.label_base = SSD_ROM_LABEL_BASE; | |
5990 | } else if (dev->protocol_info.ver < SSD_PROTOCOL_V3_2) { | |
5991 | val = ssd_reg32_read(dev->ctrlp + SSD_ROM_INFO_REG); | |
5992 | dev->rom_info.size = 0x100000 * (1U << (val & 0xFF)); | |
5993 | dev->rom_info.block_size = 0x10000 * (1U << ((val>>8) & 0xFF)); | |
5994 | dev->rom_info.page_size = (val>>16) & 0xFFFF; | |
5995 | ||
5996 | val = ssd_reg32_read(dev->ctrlp + SSD_ROM_BRIDGE_FW_INFO_REG); | |
5997 | dev->rom_info.bridge_fw_base = dev->rom_info.block_size * (val & 0xFFFF); | |
5998 | dev->rom_info.bridge_fw_sz = dev->rom_info.block_size * ((val>>16) & 0x3FFF); | |
5999 | dev->rom_info.nr_bridge_fw = ((val >> 30) & 0x3) + 1; | |
6000 | ||
6001 | val = ssd_reg32_read(dev->ctrlp + SSD_ROM_CTRL_FW_INFO_REG); | |
6002 | dev->rom_info.ctrl_fw_base = dev->rom_info.block_size * (val & 0xFFFF); | |
6003 | dev->rom_info.ctrl_fw_sz = dev->rom_info.block_size * ((val>>16) & 0x3FFF); | |
6004 | dev->rom_info.nr_ctrl_fw = ((val >> 30) & 0x3) + 1; | |
6005 | ||
6006 | dev->rom_info.bm_fw_base = dev->rom_info.ctrl_fw_base + (dev->rom_info.nr_ctrl_fw * dev->rom_info.ctrl_fw_sz); | |
6007 | dev->rom_info.bm_fw_sz = SSD_PV3_ROM_BM_FW_SZ; | |
6008 | dev->rom_info.nr_bm_fw = SSD_PV3_ROM_NR_BM_FW; | |
6009 | ||
6010 | dev->rom_info.log_base = dev->rom_info.bm_fw_base + (dev->rom_info.nr_bm_fw * dev->rom_info.bm_fw_sz); | |
6011 | dev->rom_info.log_sz = SSD_ROM_LOG_SZ; | |
6012 | ||
6013 | dev->rom_info.smart_base = dev->rom_info.log_base + dev->rom_info.log_sz; | |
6014 | dev->rom_info.smart_sz = SSD_PV3_ROM_SMART_SZ; | |
6015 | dev->rom_info.nr_smart = SSD_PV3_ROM_NR_SMART; | |
6016 | ||
6017 | val = ssd_reg32_read(dev->ctrlp + SSD_ROM_VP_INFO_REG); | |
6018 | dev->rom_info.vp_base = dev->rom_info.block_size * val; | |
6019 | dev->rom_info.label_base = dev->rom_info.vp_base + dev->rom_info.block_size; | |
6020 | if (dev->rom_info.label_base >= dev->rom_info.size) { | |
6021 | dev->rom_info.label_base = dev->rom_info.vp_base - dev->rom_info.block_size; | |
6022 | } | |
6023 | } else { | |
6024 | val = ssd_reg32_read(dev->ctrlp + SSD_ROM_INFO_REG); | |
6025 | dev->rom_info.size = 0x100000 * (1U << (val & 0xFF)); | |
6026 | dev->rom_info.block_size = 0x10000 * (1U << ((val>>8) & 0xFF)); | |
6027 | dev->rom_info.page_size = (val>>16) & 0xFFFF; | |
6028 | ||
6029 | val = ssd_reg32_read(dev->ctrlp + SSD_ROM_BRIDGE_FW_INFO_REG); | |
6030 | dev->rom_info.bridge_fw_base = dev->rom_info.block_size * (val & 0xFFFF); | |
6031 | dev->rom_info.bridge_fw_sz = dev->rom_info.block_size * ((val>>16) & 0x3FFF); | |
6032 | dev->rom_info.nr_bridge_fw = ((val >> 30) & 0x3) + 1; | |
6033 | ||
6034 | val = ssd_reg32_read(dev->ctrlp + SSD_ROM_CTRL_FW_INFO_REG); | |
6035 | dev->rom_info.ctrl_fw_base = dev->rom_info.block_size * (val & 0xFFFF); | |
6036 | dev->rom_info.ctrl_fw_sz = dev->rom_info.block_size * ((val>>16) & 0x3FFF); | |
6037 | dev->rom_info.nr_ctrl_fw = ((val >> 30) & 0x3) + 1; | |
6038 | ||
6039 | val = ssd_reg32_read(dev->ctrlp + SSD_ROM_VP_INFO_REG); | |
6040 | dev->rom_info.vp_base = dev->rom_info.block_size * val; | |
6041 | dev->rom_info.label_base = dev->rom_info.vp_base - SSD_PV3_2_ROM_SEC_SZ; | |
6042 | ||
6043 | dev->rom_info.nr_smart = SSD_PV3_ROM_NR_SMART; | |
6044 | dev->rom_info.smart_sz = SSD_PV3_2_ROM_SEC_SZ; | |
6045 | dev->rom_info.smart_base = dev->rom_info.label_base - (dev->rom_info.smart_sz * dev->rom_info.nr_smart); | |
6046 | if (dev->rom_info.smart_sz > dev->rom_info.block_size) { | |
6047 | dev->rom_info.smart_sz = dev->rom_info.block_size; | |
6048 | } | |
6049 | ||
6050 | dev->rom_info.log_sz = SSD_PV3_2_ROM_LOG_SZ; | |
6051 | dev->rom_info.log_base = dev->rom_info.smart_base - dev->rom_info.log_sz; | |
6052 | } | |
6053 | ||
6054 | return ssd_init_spi(dev); | |
6055 | } | |
6056 | ||
6057 | /* smart */ | |
6058 | static int ssd_update_smart(struct ssd_device *dev, struct ssd_smart *smart) | |
6059 | { | |
6060 | struct timeval tv; | |
6061 | uint64_t run_time; | |
6062 | #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,27)) | |
6063 | struct hd_struct *part; | |
6064 | int cpu; | |
6065 | #endif | |
6066 | int i, j; | |
6067 | int ret = 0; | |
6068 | ||
6069 | if (!test_bit(SSD_INIT_BD, &dev->state)) { | |
6070 | return 0; | |
6071 | } | |
6072 | ||
6073 | do_gettimeofday(&tv); | |
6074 | if ((uint64_t)tv.tv_sec < dev->uptime) { | |
6075 | run_time = 0; | |
6076 | } else { | |
6077 | run_time = tv.tv_sec - dev->uptime; | |
6078 | } | |
6079 | ||
6080 | /* avoid frequently update */ | |
6081 | if (run_time >= 60) { | |
6082 | ret = 1; | |
6083 | } | |
6084 | ||
6085 | /* io stat */ | |
6086 | smart->io_stat.run_time += run_time; | |
6087 | ||
6088 | #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,27)) | |
6089 | cpu = part_stat_lock(); | |
6090 | part = &dev->gd->part0; | |
6091 | part_round_stats(cpu, part); | |
6092 | part_stat_unlock(); | |
6093 | ||
6094 | smart->io_stat.nr_read += part_stat_read(part, ios[READ]); | |
6095 | smart->io_stat.nr_write += part_stat_read(part, ios[WRITE]); | |
6096 | smart->io_stat.rsectors += part_stat_read(part, sectors[READ]); | |
6097 | smart->io_stat.wsectors += part_stat_read(part, sectors[WRITE]); | |
6098 | #elif (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,14)) | |
6099 | preempt_disable(); | |
6100 | disk_round_stats(dev->gd); | |
6101 | preempt_enable(); | |
6102 | ||
6103 | smart->io_stat.nr_read += disk_stat_read(dev->gd, ios[READ]); | |
6104 | smart->io_stat.nr_write += disk_stat_read(dev->gd, ios[WRITE]); | |
6105 | smart->io_stat.rsectors += disk_stat_read(dev->gd, sectors[READ]); | |
6106 | smart->io_stat.wsectors += disk_stat_read(dev->gd, sectors[WRITE]); | |
6107 | #else | |
6108 | preempt_disable(); | |
6109 | disk_round_stats(dev->gd); | |
6110 | preempt_enable(); | |
6111 | ||
6112 | smart->io_stat.nr_read += disk_stat_read(dev->gd, reads); | |
6113 | smart->io_stat.nr_write += disk_stat_read(dev->gd, writes); | |
6114 | smart->io_stat.rsectors += disk_stat_read(dev->gd, read_sectors); | |
6115 | smart->io_stat.wsectors += disk_stat_read(dev->gd, write_sectors); | |
6116 | #endif | |
6117 | ||
6118 | smart->io_stat.nr_to += atomic_read(&dev->tocnt); | |
6119 | ||
6120 | for (i=0; i<dev->nr_queue; i++) { | |
6121 | smart->io_stat.nr_rwerr += dev->queue[i].io_stat.nr_rwerr; | |
6122 | smart->io_stat.nr_ioerr += dev->queue[i].io_stat.nr_ioerr; | |
6123 | } | |
6124 | ||
6125 | for (i=0; i<dev->nr_queue; i++) { | |
6126 | for (j=0; j<SSD_ECC_MAX_FLIP; j++) { | |
6127 | smart->ecc_info.bitflip[j] += dev->queue[i].ecc_info.bitflip[j]; | |
6128 | } | |
6129 | } | |
6130 | ||
6131 | //dev->uptime = tv.tv_sec; | |
6132 | ||
6133 | return ret; | |
6134 | } | |
6135 | ||
da3355df | 6136 | static int __ssd_clear_smart(struct ssd_device *dev) |
361ebed5 HSDT |
6137 | { |
6138 | struct timeval tv; | |
6139 | uint64_t sversion; | |
6140 | uint32_t off, length; | |
6141 | int i; | |
6142 | int ret; | |
6143 | ||
6144 | if (dev->protocol_info.ver <= SSD_PROTOCOL_V3) { | |
6145 | return 0; | |
6146 | } | |
6147 | ||
6148 | /* clear smart */ | |
6149 | off = dev->rom_info.smart_base; | |
6150 | length = dev->rom_info.smart_sz * dev->rom_info.nr_smart; | |
6151 | ||
6152 | ret = ssd_spi_erase(dev, off, length); | |
6153 | if (ret) { | |
6154 | hio_warn("%s: info erase: failed\n", dev->name); | |
6155 | goto out; | |
6156 | } | |
6157 | ||
6158 | sversion = dev->smart.version; | |
6159 | ||
6160 | memset(&dev->smart, 0, sizeof(struct ssd_smart)); | |
6161 | dev->smart.version = sversion + 1; | |
6162 | dev->smart.magic = SSD_SMART_MAGIC; | |
6163 | ||
6164 | /* clear all tmp acc */ | |
6165 | for (i=0; i<dev->nr_queue; i++) { | |
6166 | memset(&(dev->queue[i].io_stat), 0, sizeof(struct ssd_io_stat)); | |
6167 | memset(&(dev->queue[i].ecc_info), 0, sizeof(struct ssd_ecc_info)); | |
6168 | } | |
6169 | ||
6170 | atomic_set(&dev->tocnt, 0); | |
6171 | ||
6172 | /* clear tmp log info */ | |
6173 | memset(&dev->log_info, 0, sizeof(struct ssd_log_info)); | |
6174 | ||
6175 | do_gettimeofday(&tv); | |
6176 | dev->uptime = tv.tv_sec; | |
6177 | ||
6178 | /* clear alarm ? */ | |
6179 | //ssd_clear_alarm(dev); | |
6180 | out: | |
6181 | return ret; | |
6182 | } | |
6183 | ||
da3355df | 6184 | static int __ssd_clear_warning(struct ssd_device *dev) |
1197134c KM |
6185 | { |
6186 | uint32_t off, size; | |
6187 | int i, ret = 0; | |
6188 | ||
6189 | if (dev->protocol_info.ver <= SSD_PROTOCOL_V3) { | |
6190 | return 0; | |
6191 | } | |
6192 | ||
6193 | /* clear log_info warning */ | |
6194 | memset(&dev->smart.log_info, 0, sizeof(dev->smart.log_info)); | |
6195 | ||
6196 | /* clear io_stat warning */ | |
6197 | dev->smart.io_stat.nr_to = 0; | |
6198 | dev->smart.io_stat.nr_rwerr = 0; | |
6199 | dev->smart.io_stat.nr_ioerr = 0; | |
6200 | ||
6201 | /* clear ecc_info warning */ | |
6202 | memset(&dev->smart.ecc_info, 0, sizeof(dev->smart.ecc_info)); | |
6203 | ||
6204 | /* clear queued warnings */ | |
6205 | for (i=0; i<dev->nr_queue; i++) { | |
6206 | /* queued io_stat warning */ | |
6207 | dev->queue[i].io_stat.nr_to = 0; | |
6208 | dev->queue[i].io_stat.nr_rwerr = 0; | |
6209 | dev->queue[i].io_stat.nr_ioerr = 0; | |
6210 | ||
6211 | /* queued ecc_info warning */ | |
6212 | memset(&(dev->queue[i].ecc_info), 0, sizeof(dev->queue[i].ecc_info)); | |
6213 | } | |
6214 | ||
6215 | /* write smart back to nor */ | |
6216 | for (i = 0; i < dev->rom_info.nr_smart; i++) { | |
6217 | off = dev->rom_info.smart_base + (dev->rom_info.smart_sz * i); | |
6218 | size = dev->rom_info.smart_sz; | |
6219 | ||
6220 | ret = ssd_spi_erase(dev, off, size); | |
6221 | if (ret) { | |
6222 | hio_warn("%s: warning erase: failed with code 1\n", dev->name); | |
6223 | goto out; | |
6224 | } | |
6225 | ||
6226 | size = sizeof(struct ssd_smart); | |
6227 | ||
6228 | ret = ssd_spi_write(dev, &dev->smart, off, size); | |
6229 | if (ret) { | |
6230 | hio_warn("%s: warning erase: failed with code 2\n", dev->name); | |
6231 | goto out; | |
6232 | } | |
6233 | } | |
6234 | ||
6235 | dev->smart.version++; | |
6236 | ||
6237 | /* clear cmd timeout warning */ | |
6238 | atomic_set(&dev->tocnt, 0); | |
6239 | ||
6240 | /* clear tmp log info */ | |
6241 | memset(&dev->log_info, 0, sizeof(dev->log_info)); | |
6242 | ||
6243 | out: | |
6244 | return ret; | |
6245 | } | |
6246 | ||
da3355df SF |
6247 | static int ssd_clear_smart(struct ssd_device *dev) |
6248 | { | |
6249 | int ret; | |
6250 | ||
6251 | ret = __ssd_clear_smart(dev); | |
6252 | if(!ret) { | |
6253 | ssd_gen_swlog(dev, SSD_LOG_CLEAR_SMART, 0); | |
6254 | } | |
6255 | ||
6256 | return ret; | |
6257 | } | |
6258 | ||
6259 | static int ssd_clear_warning(struct ssd_device *dev) | |
6260 | { | |
6261 | int ret; | |
6262 | ||
6263 | ret = __ssd_clear_warning(dev); | |
6264 | if(!ret) { | |
6265 | ssd_gen_swlog(dev, SSD_LOG_CLEAR_WARNING, 0); | |
6266 | } | |
6267 | ||
6268 | return ret; | |
6269 | } | |
6270 | ||
361ebed5 HSDT |
6271 | static int ssd_save_smart(struct ssd_device *dev) |
6272 | { | |
6273 | uint32_t off, size; | |
6274 | int i; | |
6275 | int ret = 0; | |
6276 | ||
6277 | if (unlikely(mode != SSD_DRV_MODE_STANDARD)) | |
6278 | return 0; | |
6279 | ||
6280 | if (dev->protocol_info.ver <= SSD_PROTOCOL_V3) { | |
6281 | return 0; | |
6282 | } | |
6283 | ||
6284 | if (!ssd_update_smart(dev, &dev->smart)) { | |
6285 | return 0; | |
6286 | } | |
6287 | ||
6288 | dev->smart.version++; | |
6289 | ||
6290 | for (i=0; i<dev->rom_info.nr_smart; i++) { | |
6291 | off = dev->rom_info.smart_base + (dev->rom_info.smart_sz * i); | |
6292 | size = dev->rom_info.smart_sz; | |
6293 | ||
6294 | ret = ssd_spi_erase(dev, off, size); | |
6295 | if (ret) { | |
6296 | hio_warn("%s: info erase failed\n", dev->name); | |
6297 | goto out; | |
6298 | } | |
6299 | ||
6300 | size = sizeof(struct ssd_smart); | |
6301 | ||
6302 | ret = ssd_spi_write(dev, &dev->smart, off, size); | |
6303 | if (ret) { | |
6304 | hio_warn("%s: info write failed\n", dev->name); | |
6305 | goto out; | |
6306 | } | |
6307 | ||
6308 | //xx | |
6309 | } | |
6310 | ||
6311 | out: | |
6312 | return ret; | |
6313 | } | |
6314 | ||
6315 | static int ssd_init_smart(struct ssd_device *dev) | |
6316 | { | |
6317 | struct ssd_smart *smart; | |
6318 | struct timeval tv; | |
da3355df | 6319 | uint32_t off, size, val; |
361ebed5 HSDT |
6320 | int i; |
6321 | int ret = 0; | |
da3355df | 6322 | int update_smart = 0; |
361ebed5 HSDT |
6323 | |
6324 | do_gettimeofday(&tv); | |
6325 | dev->uptime = tv.tv_sec; | |
6326 | ||
6327 | if (dev->protocol_info.ver <= SSD_PROTOCOL_V3) { | |
6328 | return 0; | |
6329 | } | |
6330 | ||
6331 | smart = kmalloc(sizeof(struct ssd_smart) * SSD_ROM_NR_SMART_MAX, GFP_KERNEL); | |
6332 | if (!smart) { | |
6333 | ret = -ENOMEM; | |
6334 | goto out_nomem; | |
6335 | } | |
6336 | ||
6337 | memset(&dev->smart, 0, sizeof(struct ssd_smart)); | |
6338 | ||
6339 | /* read smart */ | |
6340 | for (i=0; i<dev->rom_info.nr_smart; i++) { | |
6341 | memset(&smart[i], 0, sizeof(struct ssd_smart)); | |
6342 | ||
6343 | off = dev->rom_info.smart_base + (dev->rom_info.smart_sz * i); | |
6344 | size = sizeof(struct ssd_smart); | |
6345 | ||
6346 | ret = ssd_spi_read(dev, &smart[i], off, size); | |
6347 | if (ret) { | |
6348 | hio_warn("%s: info read failed\n", dev->name); | |
6349 | goto out; | |
6350 | } | |
6351 | ||
6352 | if (smart[i].magic != SSD_SMART_MAGIC) { | |
6353 | smart[i].magic = 0; | |
6354 | smart[i].version = 0; | |
6355 | continue; | |
6356 | } | |
6357 | ||
6358 | if (smart[i].version > dev->smart.version) { | |
6359 | memcpy(&dev->smart, &smart[i], sizeof(struct ssd_smart)); | |
6360 | } | |
6361 | } | |
6362 | ||
6363 | if (dev->smart.magic != SSD_SMART_MAGIC) { | |
6364 | /* first time power up */ | |
6365 | dev->smart.magic = SSD_SMART_MAGIC; | |
6366 | dev->smart.version = 1; | |
6367 | } | |
6368 | ||
da3355df SF |
6369 | val = ssd_reg32_read(dev->ctrlp + SSD_INTR_INTERVAL_REG); |
6370 | if (!val) { | |
6371 | dev->last_poweron_id = ~0; | |
6372 | ssd_gen_swlog(dev, SSD_LOG_POWER_ON, dev->hw_info.bridge_ver); | |
6373 | if (dev->smart.io_stat.nr_to) { | |
6374 | dev->smart.io_stat.nr_to = 0; | |
6375 | update_smart = 1; | |
6376 | } | |
6377 | } | |
6378 | ||
361ebed5 HSDT |
6379 | /* check log info */ |
6380 | { | |
6381 | struct ssd_log_info log_info; | |
6382 | struct ssd_log *log = (struct ssd_log *)dev->internal_log.log; | |
6383 | ||
6384 | memset(&log_info, 0, sizeof(struct ssd_log_info)); | |
6385 | ||
6386 | while (log_info.nr_log < dev->internal_log.nr_log) { | |
da3355df SF |
6387 | int skip = 0; |
6388 | ||
6389 | switch (log->le.event) { | |
361ebed5 | 6390 | /* skip the volatile log info */ |
da3355df SF |
6391 | case SSD_LOG_SEU_FAULT: |
6392 | case SSD_LOG_SEU_FAULT1: | |
6393 | skip = 1; | |
6394 | break; | |
6395 | case SSD_LOG_TIMEOUT: | |
6396 | skip = (dev->last_poweron_id >= log_info.nr_log); | |
6397 | break; | |
6398 | } | |
6399 | ||
6400 | if (!skip) { | |
361ebed5 HSDT |
6401 | log_info.stat[ssd_parse_log(dev, log, 0)]++; |
6402 | } | |
6403 | ||
6404 | log_info.nr_log++; | |
6405 | log++; | |
6406 | } | |
6407 | ||
6408 | /* check */ | |
6409 | for (i=(SSD_LOG_NR_LEVEL-1); i>=0; i--) { | |
da3355df | 6410 | if (log_info.stat[i] != dev->smart.log_info.stat[i]) { |
361ebed5 HSDT |
6411 | /* unclean */ |
6412 | memcpy(&dev->smart.log_info, &log_info, sizeof(struct ssd_log_info)); | |
da3355df | 6413 | update_smart = 1; |
361ebed5 HSDT |
6414 | break; |
6415 | } | |
6416 | } | |
da3355df SF |
6417 | |
6418 | if (update_smart) { | |
6419 | ++dev->smart.version; | |
6420 | } | |
361ebed5 HSDT |
6421 | } |
6422 | ||
6423 | for (i=0; i<dev->rom_info.nr_smart; i++) { | |
6424 | if (smart[i].magic == SSD_SMART_MAGIC && smart[i].version == dev->smart.version) { | |
6425 | continue; | |
6426 | } | |
6427 | ||
6428 | off = dev->rom_info.smart_base + (dev->rom_info.smart_sz * i); | |
6429 | size = dev->rom_info.smart_sz; | |
6430 | ||
6431 | ret = ssd_spi_erase(dev, off, size); | |
6432 | if (ret) { | |
6433 | hio_warn("%s: info erase failed\n", dev->name); | |
6434 | goto out; | |
6435 | } | |
6436 | ||
6437 | size = sizeof(struct ssd_smart); | |
6438 | ret = ssd_spi_write(dev, &dev->smart, off, size); | |
6439 | if (ret) { | |
6440 | hio_warn("%s: info write failed\n", dev->name); | |
6441 | goto out; | |
6442 | } | |
6443 | ||
6444 | //xx | |
6445 | } | |
6446 | ||
6447 | /* sync smart with alarm led */ | |
6448 | if (dev->smart.io_stat.nr_to || dev->smart.io_stat.nr_rwerr || dev->smart.log_info.stat[SSD_LOG_LEVEL_ERR]) { | |
6449 | hio_warn("%s: some fault found in the history info\n", dev->name); | |
6450 | ssd_set_alarm(dev); | |
6451 | } | |
6452 | ||
6453 | out: | |
6454 | kfree(smart); | |
6455 | out_nomem: | |
6456 | /* skip error if not in standard mode */ | |
6457 | if (mode != SSD_DRV_MODE_STANDARD) { | |
6458 | ret = 0; | |
6459 | } | |
6460 | return ret; | |
6461 | } | |
6462 | ||
6463 | /* bm */ | |
6464 | static int __ssd_bm_get_version(struct ssd_device *dev, uint16_t *ver) | |
6465 | { | |
6466 | struct ssd_bm_manufacturer_data bm_md = {0}; | |
6467 | uint16_t sc_id = SSD_BM_SYSTEM_DATA_SUBCLASS_ID; | |
6468 | uint8_t cmd; | |
6469 | int ret = 0; | |
6470 | ||
6471 | if (!dev || !ver) { | |
6472 | return -EINVAL; | |
6473 | } | |
6474 | ||
6475 | mutex_lock(&dev->bm_mutex); | |
6476 | ||
6477 | cmd = SSD_BM_DATA_FLASH_SUBCLASS_ID; | |
6478 | ret = ssd_smbus_write_word(dev, SSD_BM_SLAVE_ADDRESS, cmd, (uint8_t *)&sc_id); | |
6479 | if (ret) { | |
6480 | goto out; | |
6481 | } | |
6482 | ||
6483 | cmd = SSD_BM_DATA_FLASH_SUBCLASS_ID_PAGE1; | |
6484 | ret = ssd_smbus_read_block(dev, SSD_BM_SLAVE_ADDRESS, cmd, sizeof(struct ssd_bm_manufacturer_data), (uint8_t *)&bm_md); | |
6485 | if (ret) { | |
6486 | goto out; | |
6487 | } | |
6488 | ||
6489 | if (bm_md.firmware_ver & 0xF000) { | |
6490 | ret = -EIO; | |
6491 | goto out; | |
6492 | } | |
6493 | ||
6494 | *ver = bm_md.firmware_ver; | |
6495 | ||
6496 | out: | |
6497 | mutex_unlock(&dev->bm_mutex); | |
6498 | return ret; | |
6499 | } | |
6500 | ||
6501 | static int ssd_bm_get_version(struct ssd_device *dev, uint16_t *ver) | |
6502 | { | |
6503 | uint16_t tmp = 0; | |
6504 | int i = SSD_BM_RETRY_MAX; | |
6505 | int ret = 0; | |
6506 | ||
6507 | while (i-- > 0) { | |
6508 | ret = __ssd_bm_get_version(dev, &tmp); | |
6509 | if (!ret) { | |
6510 | break; | |
6511 | } | |
6512 | } | |
6513 | if (ret) { | |
6514 | return ret; | |
6515 | } | |
6516 | ||
6517 | *ver = tmp; | |
6518 | ||
6519 | return 0; | |
6520 | } | |
6521 | ||
6522 | static int __ssd_bm_nr_cap(struct ssd_device *dev, int *nr_cap) | |
6523 | { | |
6524 | struct ssd_bm_configuration_registers bm_cr; | |
6525 | uint16_t sc_id = SSD_BM_CONFIGURATION_REGISTERS_ID; | |
6526 | uint8_t cmd; | |
6527 | int ret; | |
6528 | ||
6529 | mutex_lock(&dev->bm_mutex); | |
6530 | ||
6531 | cmd = SSD_BM_DATA_FLASH_SUBCLASS_ID; | |
6532 | ret = ssd_smbus_write_word(dev, SSD_BM_SLAVE_ADDRESS, cmd, (uint8_t *)&sc_id); | |
6533 | if (ret) { | |
6534 | goto out; | |
6535 | } | |
6536 | ||
6537 | cmd = SSD_BM_DATA_FLASH_SUBCLASS_ID_PAGE1; | |
6538 | ret = ssd_smbus_read_block(dev, SSD_BM_SLAVE_ADDRESS, cmd, sizeof(struct ssd_bm_configuration_registers), (uint8_t *)&bm_cr); | |
6539 | if (ret) { | |
6540 | goto out; | |
6541 | } | |
6542 | ||
6543 | if (bm_cr.operation_cfg.cc == 0 || bm_cr.operation_cfg.cc > 4) { | |
6544 | ret = -EIO; | |
6545 | goto out; | |
6546 | } | |
6547 | ||
6548 | *nr_cap = bm_cr.operation_cfg.cc + 1; | |
6549 | ||
6550 | out: | |
6551 | mutex_unlock(&dev->bm_mutex); | |
6552 | return ret; | |
6553 | } | |
6554 | ||
6555 | static int ssd_bm_nr_cap(struct ssd_device *dev, int *nr_cap) | |
6556 | { | |
6557 | int tmp = 0; | |
6558 | int i = SSD_BM_RETRY_MAX; | |
6559 | int ret = 0; | |
6560 | ||
6561 | while (i-- > 0) { | |
6562 | ret = __ssd_bm_nr_cap(dev, &tmp); | |
6563 | if (!ret) { | |
6564 | break; | |
6565 | } | |
6566 | } | |
6567 | if (ret) { | |
6568 | return ret; | |
6569 | } | |
6570 | ||
6571 | *nr_cap = tmp; | |
6572 | ||
6573 | return 0; | |
6574 | } | |
6575 | ||
6576 | static int ssd_bm_enter_cap_learning(struct ssd_device *dev) | |
6577 | { | |
6578 | uint16_t buf = SSD_BM_ENTER_CAP_LEARNING; | |
6579 | uint8_t cmd = SSD_BM_MANUFACTURERACCESS; | |
6580 | int ret; | |
6581 | ||
6582 | ret = ssd_smbus_write_word(dev, SSD_BM_SLAVE_ADDRESS, cmd, (uint8_t *)&buf); | |
6583 | if (ret) { | |
6584 | goto out; | |
6585 | } | |
6586 | ||
6587 | out: | |
6588 | return ret; | |
6589 | } | |
6590 | ||
6591 | static int ssd_bm_get_sfstatus(struct ssd_device *dev, uint16_t *status) | |
6592 | { | |
6593 | uint16_t val = 0; | |
6594 | uint8_t cmd = SSD_BM_SAFETYSTATUS; | |
6595 | int ret; | |
6596 | ||
6597 | ret = ssd_smbus_read_word(dev, SSD_BM_SLAVE_ADDRESS, cmd, (uint8_t *)&val); | |
6598 | if (ret) { | |
6599 | goto out; | |
6600 | } | |
6601 | ||
6602 | *status = val; | |
6603 | out: | |
6604 | return ret; | |
6605 | } | |
6606 | ||
6607 | static int ssd_bm_get_opstatus(struct ssd_device *dev, uint16_t *status) | |
6608 | { | |
6609 | uint16_t val = 0; | |
6610 | uint8_t cmd = SSD_BM_OPERATIONSTATUS; | |
6611 | int ret; | |
6612 | ||
6613 | ret = ssd_smbus_read_word(dev, SSD_BM_SLAVE_ADDRESS, cmd, (uint8_t *)&val); | |
6614 | if (ret) { | |
6615 | goto out; | |
6616 | } | |
6617 | ||
6618 | *status = val; | |
6619 | out: | |
6620 | return ret; | |
6621 | } | |
6622 | ||
6623 | static int ssd_get_bmstruct(struct ssd_device *dev, struct ssd_bm *bm_status_out) | |
6624 | { | |
6625 | struct sbs_cmd *bm_sbs = ssd_bm_sbs; | |
6626 | struct ssd_bm bm_status; | |
6627 | uint8_t buf[2] = {0, }; | |
6628 | uint16_t val = 0; | |
6629 | uint16_t cval; | |
6630 | int ret = 0; | |
6631 | ||
6632 | memset(&bm_status, 0, sizeof(struct ssd_bm)); | |
6633 | ||
6634 | while (bm_sbs->desc != NULL) { | |
6635 | switch (bm_sbs->size) { | |
6636 | case SBS_SIZE_BYTE: | |
6637 | ret = ssd_smbus_read_byte(dev, SSD_BM_SLAVE_ADDRESS, bm_sbs->cmd, buf); | |
6638 | if (ret) { | |
6639 | //printf("Error: smbus read byte %#x\n", bm_sbs->cmd); | |
6640 | goto out; | |
6641 | } | |
6642 | val = buf[0]; | |
6643 | break; | |
6644 | case SBS_SIZE_WORD: | |
6645 | ret = ssd_smbus_read_word(dev, SSD_BM_SLAVE_ADDRESS, bm_sbs->cmd, (uint8_t *)&val); | |
6646 | if (ret) { | |
6647 | //printf("Error: smbus read word %#x\n", bm_sbs->cmd); | |
6648 | goto out; | |
6649 | } | |
6650 | //val = *(uint16_t *)buf; | |
6651 | break; | |
6652 | default: | |
6653 | ret = -1; | |
6654 | goto out; | |
6655 | break; | |
6656 | } | |
6657 | ||
6658 | switch (bm_sbs->unit) { | |
6659 | case SBS_UNIT_VALUE: | |
6660 | *(uint16_t *)bm_var(&bm_status, bm_sbs->off) = val & bm_sbs->mask; | |
6661 | break; | |
6662 | case SBS_UNIT_TEMPERATURE: | |
6663 | cval = (uint16_t)(val - 2731) / 10; | |
6664 | *(uint16_t *)bm_var(&bm_status, bm_sbs->off) = cval; | |
6665 | break; | |
6666 | case SBS_UNIT_VOLTAGE: | |
6667 | *(uint16_t *)bm_var(&bm_status, bm_sbs->off) = val; | |
6668 | break; | |
6669 | case SBS_UNIT_CURRENT: | |
6670 | *(uint16_t *)bm_var(&bm_status, bm_sbs->off) = val; | |
6671 | break; | |
6672 | case SBS_UNIT_ESR: | |
6673 | *(uint16_t *)bm_var(&bm_status, bm_sbs->off) = val; | |
6674 | break; | |
6675 | case SBS_UNIT_PERCENT: | |
6676 | *(uint16_t *)bm_var(&bm_status, bm_sbs->off) = val; | |
6677 | break; | |
6678 | case SBS_UNIT_CAPACITANCE: | |
6679 | *(uint16_t *)bm_var(&bm_status, bm_sbs->off) = val; | |
6680 | break; | |
6681 | default: | |
6682 | ret = -1; | |
6683 | goto out; | |
6684 | break; | |
6685 | } | |
6686 | ||
6687 | bm_sbs++; | |
6688 | } | |
6689 | ||
6690 | memcpy(bm_status_out, &bm_status, sizeof(struct ssd_bm)); | |
6691 | ||
6692 | out: | |
6693 | return ret; | |
6694 | } | |
6695 | ||
6696 | static int __ssd_bm_status(struct ssd_device *dev, int *status) | |
6697 | { | |
6698 | struct ssd_bm bm_status = {0}; | |
6699 | int nr_cap = 0; | |
6700 | int i; | |
6701 | int ret = 0; | |
6702 | ||
6703 | ret = ssd_get_bmstruct(dev, &bm_status); | |
6704 | if (ret) { | |
6705 | goto out; | |
6706 | } | |
6707 | ||
6708 | /* capacitor voltage */ | |
6709 | ret = ssd_bm_nr_cap(dev, &nr_cap); | |
6710 | if (ret) { | |
6711 | goto out; | |
6712 | } | |
6713 | ||
6714 | for (i=0; i<nr_cap; i++) { | |
6715 | if (bm_status.cap_volt[i] < SSD_BM_CAP_VOLT_MIN) { | |
6716 | *status = SSD_BMSTATUS_WARNING; | |
6717 | goto out; | |
6718 | } | |
6719 | } | |
6720 | ||
6721 | /* Safety Status */ | |
6722 | if (bm_status.sf_status) { | |
6723 | *status = SSD_BMSTATUS_WARNING; | |
6724 | goto out; | |
6725 | } | |
6726 | ||
6727 | /* charge status */ | |
6728 | if (!((bm_status.op_status >> 12) & 0x1)) { | |
6729 | *status = SSD_BMSTATUS_CHARGING; | |
6730 | }else{ | |
6731 | *status = SSD_BMSTATUS_OK; | |
6732 | } | |
6733 | ||
6734 | out: | |
6735 | return ret; | |
6736 | } | |
6737 | ||
6738 | static void ssd_set_flush_timeout(struct ssd_device *dev, int mode); | |
6739 | ||
6740 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)) | |
6741 | static void ssd_bm_worker(void *data) | |
6742 | { | |
6743 | struct ssd_device *dev = (struct ssd_device *)data; | |
6744 | #else | |
6745 | static void ssd_bm_worker(struct work_struct *work) | |
6746 | { | |
6747 | struct ssd_device *dev = container_of(work, struct ssd_device, bm_work); | |
6748 | #endif | |
6749 | ||
6750 | uint16_t opstatus; | |
6751 | int ret = 0; | |
6752 | ||
6753 | if (mode != SSD_DRV_MODE_STANDARD) { | |
6754 | return; | |
6755 | } | |
6756 | ||
6757 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_1_1) { | |
6758 | return; | |
6759 | } | |
6760 | ||
6761 | if (dev->hw_info_ext.plp_type != SSD_PLP_SCAP) { | |
6762 | return; | |
6763 | } | |
6764 | ||
6765 | ret = ssd_bm_get_opstatus(dev, &opstatus); | |
6766 | if (ret) { | |
6767 | hio_warn("%s: get bm operationstatus failed\n", dev->name); | |
6768 | return; | |
6769 | } | |
6770 | ||
6771 | /* need cap learning ? */ | |
6772 | if (!(opstatus & 0xF0)) { | |
6773 | ret = ssd_bm_enter_cap_learning(dev); | |
6774 | if (ret) { | |
6775 | hio_warn("%s: enter capacitance learning failed\n", dev->name); | |
6776 | return; | |
6777 | } | |
6778 | } | |
6779 | } | |
6780 | ||
6781 | static void ssd_bm_routine_start(void *data) | |
6782 | { | |
6783 | struct ssd_device *dev; | |
6784 | ||
6785 | if (!data) { | |
6786 | return; | |
6787 | } | |
6788 | dev = data; | |
6789 | ||
6790 | if (test_bit(SSD_INIT_WORKQ, &dev->state)) { | |
6791 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_2) { | |
6792 | queue_work(dev->workq, &dev->bm_work); | |
6793 | } else { | |
6794 | queue_work(dev->workq, &dev->capmon_work); | |
6795 | } | |
6796 | } | |
6797 | } | |
6798 | ||
6799 | /* CAP */ | |
6800 | static int ssd_do_cap_learn(struct ssd_device *dev, uint32_t *cap) | |
6801 | { | |
6802 | uint32_t u1, u2, t; | |
6803 | uint16_t val = 0; | |
6804 | int wait = 0; | |
6805 | int ret = 0; | |
6806 | ||
6807 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_2) { | |
6808 | *cap = 0; | |
6809 | return 0; | |
6810 | } | |
6811 | ||
6812 | if (dev->hw_info_ext.form_factor == SSD_FORM_FACTOR_FHHL && dev->hw_info.pcb_ver < 'B') { | |
6813 | *cap = 0; | |
6814 | return 0; | |
6815 | } | |
6816 | ||
6817 | /* make sure the lm80 voltage value is updated */ | |
6818 | msleep(SSD_LM80_CONV_INTERVAL); | |
6819 | ||
6820 | /* check if full charged */ | |
6821 | wait = 0; | |
6822 | for (;;) { | |
6823 | ret = ssd_smbus_read_word(dev, SSD_SENSOR_LM80_SADDRESS, SSD_PL_CAP_U1, (uint8_t *)&val); | |
6824 | if (ret) { | |
6825 | if (!test_and_set_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM80), &dev->hwmon)) { | |
da3355df | 6826 | ssd_generate_sensor_fault_log(dev, SSD_LOG_SENSOR_FAULT, SSD_SENSOR_LM80_SADDRESS,ret); |
361ebed5 HSDT |
6827 | } |
6828 | goto out; | |
6829 | } | |
6830 | u1 = SSD_LM80_CONVERT_VOLT(u16_swap(val)); | |
6831 | if (SSD_PL_CAP_VOLT(u1) >= SSD_PL_CAP_VOLT_FULL) { | |
6832 | break; | |
6833 | } | |
6834 | ||
6835 | wait++; | |
6836 | if (wait > SSD_PL_CAP_CHARGE_MAX_WAIT) { | |
6837 | ret = -ETIMEDOUT; | |
6838 | goto out; | |
6839 | } | |
6840 | msleep(SSD_PL_CAP_CHARGE_WAIT); | |
6841 | } | |
6842 | ||
6843 | ret = ssd_smbus_read_word(dev, SSD_SENSOR_LM80_SADDRESS, SSD_PL_CAP_U2, (uint8_t *)&val); | |
6844 | if (ret) { | |
6845 | if (!test_and_set_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM80), &dev->hwmon)) { | |
da3355df | 6846 | ssd_generate_sensor_fault_log(dev, SSD_LOG_SENSOR_FAULT, SSD_SENSOR_LM80_SADDRESS,ret); |
361ebed5 HSDT |
6847 | } |
6848 | goto out; | |
6849 | } | |
6850 | u2 = SSD_LM80_CONVERT_VOLT(u16_swap(val)); | |
6851 | ||
6852 | if (u1 == u2) { | |
6853 | ret = -EINVAL; | |
6854 | goto out; | |
6855 | } | |
6856 | ||
6857 | /* enter cap learn */ | |
6858 | ssd_reg32_write(dev->ctrlp + SSD_PL_CAP_LEARN_REG, 0x1); | |
6859 | ||
6860 | wait = 0; | |
6861 | for (;;) { | |
6862 | msleep(SSD_PL_CAP_LEARN_WAIT); | |
6863 | ||
6864 | t = ssd_reg32_read(dev->ctrlp + SSD_PL_CAP_LEARN_REG); | |
6865 | if (!((t >> 1) & 0x1)) { | |
6866 | break; | |
6867 | } | |
6868 | ||
6869 | wait++; | |
6870 | if (wait > SSD_PL_CAP_LEARN_MAX_WAIT) { | |
6871 | ret = -ETIMEDOUT; | |
6872 | goto out; | |
6873 | } | |
6874 | } | |
6875 | ||
6876 | if ((t >> 4) & 0x1) { | |
6877 | ret = -ETIMEDOUT; | |
6878 | goto out; | |
6879 | } | |
6880 | ||
6881 | t = (t >> 8); | |
6882 | if (0 == t) { | |
6883 | ret = -EINVAL; | |
6884 | goto out; | |
6885 | } | |
6886 | ||
6887 | *cap = SSD_PL_CAP_LEARN(u1, u2, t); | |
6888 | ||
6889 | out: | |
6890 | return ret; | |
6891 | } | |
6892 | ||
6893 | static int ssd_cap_learn(struct ssd_device *dev, uint32_t *cap) | |
6894 | { | |
6895 | int ret = 0; | |
6896 | ||
6897 | if (!dev || !cap) { | |
6898 | return -EINVAL; | |
6899 | } | |
6900 | ||
6901 | mutex_lock(&dev->bm_mutex); | |
6902 | ||
6903 | ssd_stop_workq(dev); | |
6904 | ||
6905 | ret = ssd_do_cap_learn(dev, cap); | |
6906 | if (ret) { | |
6907 | ssd_gen_swlog(dev, SSD_LOG_CAP_LEARN_FAULT, 0); | |
6908 | goto out; | |
6909 | } | |
6910 | ||
6911 | ssd_gen_swlog(dev, SSD_LOG_CAP_STATUS, *cap); | |
6912 | ||
6913 | out: | |
6914 | ssd_start_workq(dev); | |
6915 | mutex_unlock(&dev->bm_mutex); | |
6916 | ||
6917 | return ret; | |
6918 | } | |
6919 | ||
6920 | static int ssd_check_pl_cap(struct ssd_device *dev) | |
6921 | { | |
6922 | uint32_t u1; | |
6923 | uint16_t val = 0; | |
6924 | uint8_t low = 0; | |
6925 | int wait = 0; | |
6926 | int ret = 0; | |
6927 | ||
6928 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_2) { | |
6929 | return 0; | |
6930 | } | |
6931 | ||
6932 | if (dev->hw_info_ext.form_factor == SSD_FORM_FACTOR_FHHL && dev->hw_info.pcb_ver < 'B') { | |
6933 | return 0; | |
6934 | } | |
6935 | ||
6936 | /* cap ready ? */ | |
6937 | wait = 0; | |
6938 | for (;;) { | |
6939 | ret = ssd_smbus_read_word(dev, SSD_SENSOR_LM80_SADDRESS, SSD_PL_CAP_U1, (uint8_t *)&val); | |
6940 | if (ret) { | |
6941 | if (!test_and_set_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM80), &dev->hwmon)) { | |
da3355df | 6942 | ssd_generate_sensor_fault_log(dev, SSD_LOG_SENSOR_FAULT, SSD_SENSOR_LM80_SADDRESS,ret); |
361ebed5 HSDT |
6943 | } |
6944 | goto out; | |
6945 | } | |
6946 | u1 = SSD_LM80_CONVERT_VOLT(u16_swap(val)); | |
6947 | if (SSD_PL_CAP_VOLT(u1) >= SSD_PL_CAP_VOLT_READY) { | |
6948 | break; | |
6949 | } | |
6950 | ||
6951 | wait++; | |
6952 | if (wait > SSD_PL_CAP_CHARGE_MAX_WAIT) { | |
6953 | ret = -ETIMEDOUT; | |
6954 | ssd_gen_swlog(dev, SSD_LOG_CAP_VOLT_FAULT, SSD_PL_CAP_VOLT(u1)); | |
6955 | goto out; | |
6956 | } | |
6957 | msleep(SSD_PL_CAP_CHARGE_WAIT); | |
6958 | } | |
6959 | ||
6960 | low = ssd_lm80_limit[SSD_LM80_IN_CAP].low; | |
6961 | ret = ssd_smbus_write_byte(dev, SSD_SENSOR_LM80_SADDRESS, SSD_LM80_REG_IN_MIN(SSD_LM80_IN_CAP), &low); | |
6962 | if (ret) { | |
6963 | goto out; | |
6964 | } | |
6965 | ||
6966 | /* enable cap INx */ | |
6967 | ret = ssd_lm80_enable_in(dev, SSD_SENSOR_LM80_SADDRESS, SSD_LM80_IN_CAP); | |
6968 | if (ret) { | |
6969 | if (!test_and_set_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM80), &dev->hwmon)) { | |
da3355df | 6970 | ssd_generate_sensor_fault_log(dev, SSD_LOG_SENSOR_FAULT, SSD_SENSOR_LM80_SADDRESS,ret); |
361ebed5 HSDT |
6971 | } |
6972 | goto out; | |
6973 | } | |
6974 | ||
6975 | out: | |
6976 | /* skip error if not in standard mode */ | |
6977 | if (mode != SSD_DRV_MODE_STANDARD) { | |
6978 | ret = 0; | |
6979 | } | |
6980 | return ret; | |
6981 | } | |
6982 | ||
6983 | static int ssd_check_pl_cap_fast(struct ssd_device *dev) | |
6984 | { | |
6985 | uint32_t u1; | |
6986 | uint16_t val = 0; | |
6987 | int ret = 0; | |
6988 | ||
6989 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_2) { | |
6990 | return 0; | |
6991 | } | |
6992 | ||
6993 | if (dev->hw_info_ext.form_factor == SSD_FORM_FACTOR_FHHL && dev->hw_info.pcb_ver < 'B') { | |
6994 | return 0; | |
6995 | } | |
6996 | ||
6997 | /* cap ready ? */ | |
6998 | ret = ssd_smbus_read_word(dev, SSD_SENSOR_LM80_SADDRESS, SSD_PL_CAP_U1, (uint8_t *)&val); | |
6999 | if (ret) { | |
7000 | goto out; | |
7001 | } | |
7002 | u1 = SSD_LM80_CONVERT_VOLT(u16_swap(val)); | |
7003 | if (SSD_PL_CAP_VOLT(u1) < SSD_PL_CAP_VOLT_READY) { | |
7004 | ret = 1; | |
7005 | } | |
7006 | ||
7007 | out: | |
7008 | return ret; | |
7009 | } | |
7010 | ||
7011 | static int ssd_init_pl_cap(struct ssd_device *dev) | |
7012 | { | |
7013 | int ret = 0; | |
7014 | ||
7015 | /* set here: user write mode */ | |
7016 | dev->user_wmode = wmode; | |
7017 | ||
7018 | mutex_init(&dev->bm_mutex); | |
7019 | ||
7020 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_2) { | |
7021 | uint32_t val; | |
7022 | val = ssd_reg32_read(dev->ctrlp + SSD_BM_FAULT_REG); | |
7023 | if ((val >> 1) & 0x1) { | |
7024 | (void)test_and_set_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP), &dev->hwmon); | |
7025 | } | |
7026 | } else { | |
7027 | ret = ssd_check_pl_cap(dev); | |
7028 | if (ret) { | |
7029 | (void)test_and_set_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP), &dev->hwmon); | |
7030 | } | |
7031 | } | |
7032 | ||
7033 | return 0; | |
7034 | } | |
7035 | ||
7036 | /* label */ | |
7037 | static void __end_str(char *str, int len) | |
7038 | { | |
7039 | int i; | |
7040 | ||
7041 | for(i=0; i<len; i++) { | |
7042 | if (*(str+i) == '\0') | |
7043 | return; | |
7044 | } | |
7045 | *str = '\0'; | |
7046 | } | |
7047 | ||
7048 | static int ssd_init_label(struct ssd_device *dev) | |
7049 | { | |
7050 | uint32_t off; | |
7051 | uint32_t size; | |
7052 | int ret; | |
7053 | ||
7054 | /* label location */ | |
7055 | off = dev->rom_info.label_base; | |
7056 | ||
7057 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_2) { | |
7058 | size = sizeof(struct ssd_label); | |
7059 | ||
7060 | /* read label */ | |
7061 | ret = ssd_spi_read(dev, &dev->label, off, size); | |
7062 | if (ret) { | |
7063 | memset(&dev->label, 0, size); | |
7064 | goto out; | |
7065 | } | |
7066 | ||
7067 | __end_str(dev->label.date, SSD_LABEL_FIELD_SZ); | |
7068 | __end_str(dev->label.sn, SSD_LABEL_FIELD_SZ); | |
7069 | __end_str(dev->label.part, SSD_LABEL_FIELD_SZ); | |
7070 | __end_str(dev->label.desc, SSD_LABEL_FIELD_SZ); | |
7071 | __end_str(dev->label.other, SSD_LABEL_FIELD_SZ); | |
7072 | __end_str(dev->label.maf, SSD_LABEL_FIELD_SZ); | |
7073 | } else { | |
7074 | size = sizeof(struct ssd_labelv3); | |
7075 | ||
7076 | /* read label */ | |
7077 | ret = ssd_spi_read(dev, &dev->labelv3, off, size); | |
7078 | if (ret) { | |
7079 | memset(&dev->labelv3, 0, size); | |
7080 | goto out; | |
7081 | } | |
7082 | ||
7083 | __end_str(dev->labelv3.boardtype, SSD_LABEL_FIELD_SZ); | |
7084 | __end_str(dev->labelv3.barcode, SSD_LABEL_FIELD_SZ); | |
7085 | __end_str(dev->labelv3.item, SSD_LABEL_FIELD_SZ); | |
7086 | __end_str(dev->labelv3.description, SSD_LABEL_DESC_SZ); | |
7087 | __end_str(dev->labelv3.manufactured, SSD_LABEL_FIELD_SZ); | |
7088 | __end_str(dev->labelv3.vendorname, SSD_LABEL_FIELD_SZ); | |
7089 | __end_str(dev->labelv3.issuenumber, SSD_LABEL_FIELD_SZ); | |
7090 | __end_str(dev->labelv3.cleicode, SSD_LABEL_FIELD_SZ); | |
7091 | __end_str(dev->labelv3.bom, SSD_LABEL_FIELD_SZ); | |
7092 | } | |
7093 | ||
7094 | out: | |
7095 | /* skip error if not in standard mode */ | |
7096 | if (mode != SSD_DRV_MODE_STANDARD) { | |
7097 | ret = 0; | |
7098 | } | |
7099 | return ret; | |
7100 | } | |
7101 | ||
7102 | int ssd_get_label(struct block_device *bdev, struct ssd_label *label) | |
7103 | { | |
7104 | struct ssd_device *dev; | |
7105 | ||
7106 | if (!bdev || !label || !(bdev->bd_disk)) { | |
7107 | return -EINVAL; | |
7108 | } | |
7109 | ||
7110 | dev = bdev->bd_disk->private_data; | |
7111 | ||
7112 | if (dev->protocol_info.ver >= SSD_PROTOCOL_V3_2) { | |
7113 | memset(label, 0, sizeof(struct ssd_label)); | |
7114 | memcpy(label->date, dev->labelv3.manufactured, SSD_LABEL_FIELD_SZ); | |
7115 | memcpy(label->sn, dev->labelv3.barcode, SSD_LABEL_FIELD_SZ); | |
7116 | memcpy(label->desc, dev->labelv3.boardtype, SSD_LABEL_FIELD_SZ); | |
7117 | memcpy(label->maf, dev->labelv3.vendorname, SSD_LABEL_FIELD_SZ); | |
7118 | } else { | |
7119 | memcpy(label, &dev->label, sizeof(struct ssd_label)); | |
7120 | } | |
7121 | ||
7122 | return 0; | |
7123 | } | |
7124 | ||
7125 | static int __ssd_get_version(struct ssd_device *dev, struct ssd_version_info *ver) | |
7126 | { | |
7127 | uint16_t bm_ver = 0; | |
7128 | int ret = 0; | |
7129 | ||
7130 | if (dev->protocol_info.ver > SSD_PROTOCOL_V3 && dev->protocol_info.ver < SSD_PROTOCOL_V3_2) { | |
7131 | ret = ssd_bm_get_version(dev, &bm_ver); | |
7132 | if(ret){ | |
7133 | goto out; | |
7134 | } | |
7135 | } | |
7136 | ||
7137 | ver->bridge_ver = dev->hw_info.bridge_ver; | |
7138 | ver->ctrl_ver = dev->hw_info.ctrl_ver; | |
7139 | ver->bm_ver = bm_ver; | |
7140 | ver->pcb_ver = dev->hw_info.pcb_ver; | |
7141 | ver->upper_pcb_ver = dev->hw_info.upper_pcb_ver; | |
7142 | ||
7143 | out: | |
7144 | return ret; | |
7145 | ||
7146 | } | |
7147 | ||
7148 | int ssd_get_version(struct block_device *bdev, struct ssd_version_info *ver) | |
7149 | { | |
7150 | struct ssd_device *dev; | |
7151 | int ret; | |
7152 | ||
7153 | if (!bdev || !ver || !(bdev->bd_disk)) { | |
7154 | return -EINVAL; | |
7155 | } | |
7156 | ||
7157 | dev = bdev->bd_disk->private_data; | |
7158 | ||
7159 | mutex_lock(&dev->fw_mutex); | |
7160 | ret = __ssd_get_version(dev, ver); | |
7161 | mutex_unlock(&dev->fw_mutex); | |
7162 | ||
7163 | return ret; | |
7164 | } | |
7165 | ||
7166 | static int __ssd_get_temperature(struct ssd_device *dev, int *temp) | |
7167 | { | |
7168 | uint64_t val; | |
7169 | uint32_t off; | |
7170 | int max = -300; | |
7171 | int cur; | |
7172 | int i; | |
7173 | ||
7174 | if (dev->protocol_info.ver <= SSD_PROTOCOL_V3) { | |
7175 | *temp = 0; | |
7176 | return 0; | |
7177 | } | |
7178 | ||
7179 | if (finject) { | |
7180 | if (dev->db_info.type == SSD_DEBUG_LOG && | |
7181 | (dev->db_info.data.log.event == SSD_LOG_OVER_TEMP || | |
7182 | dev->db_info.data.log.event == SSD_LOG_NORMAL_TEMP || | |
7183 | dev->db_info.data.log.event == SSD_LOG_WARN_TEMP)) { | |
7184 | *temp = (int)dev->db_info.data.log.extra; | |
7185 | return 0; | |
7186 | } | |
7187 | } | |
7188 | ||
7189 | for (i=0; i<dev->hw_info.nr_ctrl; i++) { | |
7190 | off = SSD_CTRL_TEMP_REG0 + i * sizeof(uint64_t); | |
7191 | ||
7192 | val = ssd_reg_read(dev->ctrlp + off); | |
7193 | if (val == 0xffffffffffffffffull) { | |
7194 | continue; | |
7195 | } | |
7196 | ||
7197 | cur = (int)CUR_TEMP(val); | |
7198 | if (cur >= max) { | |
7199 | max = cur; | |
7200 | } | |
7201 | } | |
7202 | ||
7203 | *temp = max; | |
7204 | ||
7205 | return 0; | |
7206 | } | |
7207 | ||
7208 | int ssd_get_temperature(struct block_device *bdev, int *temp) | |
7209 | { | |
7210 | struct ssd_device *dev; | |
7211 | int ret; | |
7212 | ||
7213 | if (!bdev || !temp || !(bdev->bd_disk)) { | |
7214 | return -EINVAL; | |
7215 | } | |
7216 | ||
7217 | dev = bdev->bd_disk->private_data; | |
7218 | ||
7219 | ||
7220 | mutex_lock(&dev->fw_mutex); | |
7221 | ret = __ssd_get_temperature(dev, temp); | |
7222 | mutex_unlock(&dev->fw_mutex); | |
7223 | ||
7224 | return ret; | |
7225 | } | |
7226 | ||
7227 | int ssd_set_otprotect(struct block_device *bdev, int otprotect) | |
7228 | { | |
7229 | struct ssd_device *dev; | |
7230 | ||
7231 | if (!bdev || !(bdev->bd_disk)) { | |
7232 | return -EINVAL; | |
7233 | } | |
7234 | ||
7235 | dev = bdev->bd_disk->private_data; | |
7236 | ssd_set_ot_protect(dev, !!otprotect); | |
7237 | ||
7238 | return 0; | |
7239 | } | |
7240 | ||
7241 | int ssd_bm_status(struct block_device *bdev, int *status) | |
7242 | { | |
7243 | struct ssd_device *dev; | |
7244 | int ret = 0; | |
7245 | ||
7246 | if (!bdev || !status || !(bdev->bd_disk)) { | |
7247 | return -EINVAL; | |
7248 | } | |
7249 | ||
7250 | dev = bdev->bd_disk->private_data; | |
7251 | ||
7252 | mutex_lock(&dev->fw_mutex); | |
7253 | if (dev->protocol_info.ver >= SSD_PROTOCOL_V3_2) { | |
7254 | if (test_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP), &dev->hwmon)) { | |
7255 | *status = SSD_BMSTATUS_WARNING; | |
7256 | } else { | |
7257 | *status = SSD_BMSTATUS_OK; | |
7258 | } | |
7259 | } else if(dev->protocol_info.ver > SSD_PROTOCOL_V3) { | |
7260 | ret = __ssd_bm_status(dev, status); | |
7261 | } else { | |
7262 | *status = SSD_BMSTATUS_OK; | |
7263 | } | |
7264 | mutex_unlock(&dev->fw_mutex); | |
7265 | ||
7266 | return ret; | |
7267 | } | |
7268 | ||
7269 | int ssd_get_pciaddr(struct block_device *bdev, struct pci_addr *paddr) | |
7270 | { | |
7271 | struct ssd_device *dev; | |
7272 | ||
7273 | if (!bdev || !paddr || !bdev->bd_disk) { | |
7274 | return -EINVAL; | |
7275 | } | |
7276 | ||
7277 | dev = bdev->bd_disk->private_data; | |
7278 | ||
7279 | paddr->domain = pci_domain_nr(dev->pdev->bus); | |
7280 | paddr->bus = dev->pdev->bus->number; | |
7281 | paddr->slot = PCI_SLOT(dev->pdev->devfn); | |
7282 | paddr->func= PCI_FUNC(dev->pdev->devfn); | |
7283 | ||
7284 | return 0; | |
7285 | } | |
7286 | ||
7287 | /* acc */ | |
7288 | static int ssd_bb_acc(struct ssd_device *dev, struct ssd_acc_info *acc) | |
7289 | { | |
7290 | uint32_t val; | |
7291 | int ctrl, chip; | |
7292 | ||
7293 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_1_1) { | |
7294 | return -EOPNOTSUPP; | |
7295 | } | |
7296 | ||
7297 | acc->threshold_l1 = ssd_reg32_read(dev->ctrlp + SSD_BB_THRESHOLD_L1_REG); | |
7298 | if (0xffffffffull == acc->threshold_l1) { | |
7299 | return -EIO; | |
7300 | } | |
7301 | acc->threshold_l2 = ssd_reg32_read(dev->ctrlp + SSD_BB_THRESHOLD_L2_REG); | |
7302 | if (0xffffffffull == acc->threshold_l2) { | |
7303 | return -EIO; | |
7304 | } | |
7305 | acc->val = 0; | |
7306 | ||
7307 | for (ctrl=0; ctrl<dev->hw_info.nr_ctrl; ctrl++) { | |
7308 | for (chip=0; chip<dev->hw_info.nr_chip; chip++) { | |
7309 | val = ssd_reg32_read(dev->ctrlp + SSD_BB_ACC_REG0 + (SSD_CTRL_REG_ZONE_SZ * ctrl) + (SSD_BB_ACC_REG_SZ * chip)); | |
7310 | if (0xffffffffull == acc->val) { | |
7311 | return -EIO; | |
7312 | } | |
7313 | if (val > acc->val) { | |
7314 | acc->val = val; | |
7315 | } | |
7316 | } | |
7317 | } | |
7318 | ||
7319 | return 0; | |
7320 | } | |
7321 | ||
7322 | static int ssd_ec_acc(struct ssd_device *dev, struct ssd_acc_info *acc) | |
7323 | { | |
7324 | uint32_t val; | |
7325 | int ctrl, chip; | |
7326 | ||
7327 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_1_1) { | |
7328 | return -EOPNOTSUPP; | |
7329 | } | |
7330 | ||
7331 | acc->threshold_l1 = ssd_reg32_read(dev->ctrlp + SSD_EC_THRESHOLD_L1_REG); | |
7332 | if (0xffffffffull == acc->threshold_l1) { | |
7333 | return -EIO; | |
7334 | } | |
7335 | acc->threshold_l2 = ssd_reg32_read(dev->ctrlp + SSD_EC_THRESHOLD_L2_REG); | |
7336 | if (0xffffffffull == acc->threshold_l2) { | |
7337 | return -EIO; | |
7338 | } | |
7339 | acc->val = 0; | |
7340 | ||
7341 | for (ctrl=0; ctrl<dev->hw_info.nr_ctrl; ctrl++) { | |
7342 | for (chip=0; chip<dev->hw_info.nr_chip; chip++) { | |
7343 | val = ssd_reg32_read(dev->ctrlp + SSD_EC_ACC_REG0 + (SSD_CTRL_REG_ZONE_SZ * ctrl) + (SSD_EC_ACC_REG_SZ * chip)); | |
7344 | if (0xffffffffull == acc->val) { | |
7345 | return -EIO; | |
7346 | } | |
7347 | ||
7348 | if (val > acc->val) { | |
7349 | acc->val = val; | |
7350 | } | |
7351 | } | |
7352 | } | |
7353 | ||
7354 | return 0; | |
7355 | } | |
7356 | ||
7357 | ||
7358 | /* ram r&w */ | |
7359 | static int ssd_ram_read_4k(struct ssd_device *dev, void *buf, size_t length, loff_t ofs, int ctrl_idx) | |
7360 | { | |
7361 | struct ssd_ram_op_msg *msg; | |
7362 | dma_addr_t buf_dma; | |
7363 | size_t len = length; | |
7364 | loff_t ofs_w = ofs; | |
7365 | int ret = 0; | |
7366 | ||
7367 | if (ctrl_idx >= dev->hw_info.nr_ctrl || (uint64_t)(ofs + length) > dev->hw_info.ram_size | |
7368 | || !length || length > dev->hw_info.ram_max_len | |
7369 | || (length & (dev->hw_info.ram_align - 1)) != 0 || ((uint64_t)ofs & (dev->hw_info.ram_align - 1)) != 0) { | |
7370 | return -EINVAL; | |
7371 | } | |
7372 | ||
7373 | len /= dev->hw_info.ram_align; | |
7374 | do_div(ofs_w, dev->hw_info.ram_align); | |
7375 | ||
7376 | buf_dma = pci_map_single(dev->pdev, buf, length, PCI_DMA_FROMDEVICE); | |
7377 | #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,26)) | |
7378 | ret = dma_mapping_error(buf_dma); | |
7379 | #else | |
7380 | ret = dma_mapping_error(&(dev->pdev->dev), buf_dma); | |
7381 | #endif | |
7382 | if (ret) { | |
7383 | hio_warn("%s: unable to map read DMA buffer\n", dev->name); | |
7384 | goto out_dma_mapping; | |
7385 | } | |
7386 | ||
7387 | msg = (struct ssd_ram_op_msg *)ssd_get_dmsg(dev); | |
7388 | ||
7389 | msg->fun = SSD_FUNC_RAM_READ; | |
7390 | msg->ctrl_idx = ctrl_idx; | |
7391 | msg->start = (uint32_t)ofs_w; | |
7392 | msg->length = len; | |
7393 | msg->buf = buf_dma; | |
7394 | ||
7395 | ret = ssd_do_request(dev, READ, msg, NULL); | |
7396 | ssd_put_dmsg(msg); | |
7397 | ||
7398 | pci_unmap_single(dev->pdev, buf_dma, length, PCI_DMA_FROMDEVICE); | |
7399 | ||
7400 | out_dma_mapping: | |
7401 | return ret; | |
7402 | } | |
7403 | ||
7404 | static int ssd_ram_write_4k(struct ssd_device *dev, void *buf, size_t length, loff_t ofs, int ctrl_idx) | |
7405 | { | |
7406 | struct ssd_ram_op_msg *msg; | |
7407 | dma_addr_t buf_dma; | |
7408 | size_t len = length; | |
7409 | loff_t ofs_w = ofs; | |
7410 | int ret = 0; | |
7411 | ||
7412 | if (ctrl_idx >= dev->hw_info.nr_ctrl || (uint64_t)(ofs + length) > dev->hw_info.ram_size | |
7413 | || !length || length > dev->hw_info.ram_max_len | |
7414 | || (length & (dev->hw_info.ram_align - 1)) != 0 || ((uint64_t)ofs & (dev->hw_info.ram_align - 1)) != 0) { | |
7415 | return -EINVAL; | |
7416 | } | |
7417 | ||
7418 | len /= dev->hw_info.ram_align; | |
7419 | do_div(ofs_w, dev->hw_info.ram_align); | |
7420 | ||
7421 | buf_dma = pci_map_single(dev->pdev, buf, length, PCI_DMA_TODEVICE); | |
7422 | #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,26)) | |
7423 | ret = dma_mapping_error(buf_dma); | |
7424 | #else | |
7425 | ret = dma_mapping_error(&(dev->pdev->dev), buf_dma); | |
7426 | #endif | |
7427 | if (ret) { | |
7428 | hio_warn("%s: unable to map write DMA buffer\n", dev->name); | |
7429 | goto out_dma_mapping; | |
7430 | } | |
7431 | ||
7432 | msg = (struct ssd_ram_op_msg *)ssd_get_dmsg(dev); | |
7433 | ||
7434 | msg->fun = SSD_FUNC_RAM_WRITE; | |
7435 | msg->ctrl_idx = ctrl_idx; | |
7436 | msg->start = (uint32_t)ofs_w; | |
7437 | msg->length = len; | |
7438 | msg->buf = buf_dma; | |
7439 | ||
7440 | ret = ssd_do_request(dev, WRITE, msg, NULL); | |
7441 | ssd_put_dmsg(msg); | |
7442 | ||
7443 | pci_unmap_single(dev->pdev, buf_dma, length, PCI_DMA_TODEVICE); | |
7444 | ||
7445 | out_dma_mapping: | |
7446 | return ret; | |
7447 | ||
7448 | } | |
7449 | ||
7450 | static int ssd_ram_read(struct ssd_device *dev, void *buf, size_t length, loff_t ofs, int ctrl_idx) | |
7451 | { | |
7452 | int left = length; | |
7453 | size_t len; | |
7454 | loff_t off = ofs; | |
7455 | int ret = 0; | |
7456 | ||
7457 | if (ctrl_idx >= dev->hw_info.nr_ctrl || (uint64_t)(ofs + length) > dev->hw_info.ram_size || !length | |
7458 | || (length & (dev->hw_info.ram_align - 1)) != 0 || ((uint64_t)ofs & (dev->hw_info.ram_align - 1)) != 0) { | |
7459 | return -EINVAL; | |
7460 | } | |
7461 | ||
7462 | while (left > 0) { | |
7463 | len = dev->hw_info.ram_max_len; | |
7464 | if (left < (int)dev->hw_info.ram_max_len) { | |
7465 | len = left; | |
7466 | } | |
7467 | ||
7468 | ret = ssd_ram_read_4k(dev, buf, len, off, ctrl_idx); | |
7469 | if (ret) { | |
7470 | break; | |
7471 | } | |
7472 | ||
7473 | left -= len; | |
7474 | off += len; | |
7475 | buf += len; | |
7476 | } | |
7477 | ||
7478 | return ret; | |
7479 | } | |
7480 | ||
7481 | static int ssd_ram_write(struct ssd_device *dev, void *buf, size_t length, loff_t ofs, int ctrl_idx) | |
7482 | { | |
7483 | int left = length; | |
7484 | size_t len; | |
7485 | loff_t off = ofs; | |
7486 | int ret = 0; | |
7487 | ||
7488 | if (ctrl_idx >= dev->hw_info.nr_ctrl || (uint64_t)(ofs + length) > dev->hw_info.ram_size || !length | |
7489 | || (length & (dev->hw_info.ram_align - 1)) != 0 || ((uint64_t)ofs & (dev->hw_info.ram_align - 1)) != 0) { | |
7490 | return -EINVAL; | |
7491 | } | |
7492 | ||
7493 | while (left > 0) { | |
7494 | len = dev->hw_info.ram_max_len; | |
7495 | if (left < (int)dev->hw_info.ram_max_len) { | |
7496 | len = left; | |
7497 | } | |
7498 | ||
7499 | ret = ssd_ram_write_4k(dev, buf, len, off, ctrl_idx); | |
7500 | if (ret) { | |
7501 | break; | |
7502 | } | |
7503 | ||
7504 | left -= len; | |
7505 | off += len; | |
7506 | buf += len; | |
7507 | } | |
7508 | ||
7509 | return ret; | |
7510 | } | |
7511 | ||
7512 | ||
7513 | /* flash op */ | |
7514 | static int ssd_check_flash(struct ssd_device *dev, int flash, int page, int ctrl_idx) | |
7515 | { | |
7516 | int cur_ch = flash % dev->hw_info.max_ch; | |
7517 | int cur_chip = flash /dev->hw_info.max_ch; | |
7518 | ||
7519 | if (ctrl_idx >= dev->hw_info.nr_ctrl) { | |
7520 | return -EINVAL; | |
7521 | } | |
7522 | ||
7523 | if (cur_ch >= dev->hw_info.nr_ch || cur_chip >= dev->hw_info.nr_chip) { | |
7524 | return -EINVAL; | |
7525 | } | |
7526 | ||
7527 | if (page >= (int)(dev->hw_info.block_count * dev->hw_info.page_count)) { | |
7528 | return -EINVAL; | |
7529 | } | |
7530 | return 0; | |
7531 | } | |
7532 | ||
7533 | static int ssd_nand_read_id(struct ssd_device *dev, void *id, int flash, int chip, int ctrl_idx) | |
7534 | { | |
7535 | struct ssd_nand_op_msg *msg; | |
7536 | dma_addr_t buf_dma; | |
7537 | int ret = 0; | |
7538 | ||
7539 | if (unlikely(!id)) | |
7540 | return -EINVAL; | |
7541 | ||
7542 | buf_dma = pci_map_single(dev->pdev, id, SSD_NAND_ID_BUFF_SZ, PCI_DMA_FROMDEVICE); | |
7543 | #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,26)) | |
7544 | ret = dma_mapping_error(buf_dma); | |
7545 | #else | |
7546 | ret = dma_mapping_error(&(dev->pdev->dev), buf_dma); | |
7547 | #endif | |
7548 | if (ret) { | |
7549 | hio_warn("%s: unable to map read DMA buffer\n", dev->name); | |
7550 | goto out_dma_mapping; | |
7551 | } | |
7552 | ||
7553 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3) { | |
7554 | flash = ((uint32_t)flash << 1) | (uint32_t)chip; | |
7555 | chip = 0; | |
7556 | } | |
7557 | ||
7558 | msg = (struct ssd_nand_op_msg *)ssd_get_dmsg(dev); | |
7559 | ||
7560 | msg->fun = SSD_FUNC_NAND_READ_ID; | |
7561 | msg->chip_no = flash; | |
7562 | msg->chip_ce = chip; | |
7563 | msg->ctrl_idx = ctrl_idx; | |
7564 | msg->buf = buf_dma; | |
7565 | ||
7566 | ret = ssd_do_request(dev, READ, msg, NULL); | |
7567 | ssd_put_dmsg(msg); | |
7568 | ||
7569 | pci_unmap_single(dev->pdev, buf_dma, SSD_NAND_ID_BUFF_SZ, PCI_DMA_FROMDEVICE); | |
7570 | ||
7571 | out_dma_mapping: | |
7572 | return ret; | |
7573 | } | |
7574 | ||
7575 | #if 0 | |
7576 | static int ssd_nand_read(struct ssd_device *dev, void *buf, | |
7577 | int flash, int chip, int page, int page_count, int ctrl_idx) | |
7578 | { | |
7579 | struct ssd_nand_op_msg *msg; | |
7580 | dma_addr_t buf_dma; | |
7581 | int length; | |
7582 | int ret = 0; | |
7583 | ||
7584 | if (!buf) { | |
7585 | return -EINVAL; | |
7586 | } | |
7587 | ||
7588 | if ((page + page_count) > dev->hw_info.block_count*dev->hw_info.page_count) { | |
7589 | return -EINVAL; | |
7590 | } | |
7591 | ||
7592 | ret = ssd_check_flash(dev, flash, page, ctrl_idx); | |
7593 | if (ret) { | |
7594 | return ret; | |
7595 | } | |
7596 | ||
7597 | length = page_count * dev->hw_info.page_size; | |
7598 | ||
7599 | buf_dma = pci_map_single(dev->pdev, buf, length, PCI_DMA_FROMDEVICE); | |
7600 | #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,26)) | |
7601 | ret = dma_mapping_error(buf_dma); | |
7602 | #else | |
7603 | ret = dma_mapping_error(&(dev->pdev->dev), buf_dma); | |
7604 | #endif | |
7605 | if (ret) { | |
7606 | hio_warn("%s: unable to map read DMA buffer\n", dev->name); | |
7607 | goto out_dma_mapping; | |
7608 | } | |
7609 | ||
7610 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3) { | |
7611 | flash = (flash << 1) | chip; | |
7612 | chip = 0; | |
7613 | } | |
7614 | ||
7615 | msg = (struct ssd_nand_op_msg *)ssd_get_dmsg(dev); | |
7616 | ||
7617 | msg->fun = SSD_FUNC_NAND_READ; | |
7618 | msg->ctrl_idx = ctrl_idx; | |
7619 | msg->chip_no = flash; | |
7620 | msg->chip_ce = chip; | |
7621 | msg->page_no = page; | |
7622 | msg->page_count = page_count; | |
7623 | msg->buf = buf_dma; | |
7624 | ||
7625 | ret = ssd_do_request(dev, READ, msg, NULL); | |
7626 | ssd_put_dmsg(msg); | |
7627 | ||
7628 | pci_unmap_single(dev->pdev, buf_dma, length, PCI_DMA_FROMDEVICE); | |
7629 | ||
7630 | out_dma_mapping: | |
7631 | return ret; | |
7632 | } | |
7633 | #endif | |
7634 | ||
7635 | static int ssd_nand_read_w_oob(struct ssd_device *dev, void *buf, | |
7636 | int flash, int chip, int page, int count, int ctrl_idx) | |
7637 | { | |
7638 | struct ssd_nand_op_msg *msg; | |
7639 | dma_addr_t buf_dma; | |
7640 | int length; | |
7641 | int ret = 0; | |
7642 | ||
7643 | if (!buf) { | |
7644 | return -EINVAL; | |
7645 | } | |
7646 | ||
7647 | if ((page + count) > (int)(dev->hw_info.block_count * dev->hw_info.page_count)) { | |
7648 | return -EINVAL; | |
7649 | } | |
7650 | ||
7651 | ret = ssd_check_flash(dev, flash, page, ctrl_idx); | |
7652 | if (ret) { | |
7653 | return ret; | |
7654 | } | |
7655 | ||
7656 | length = count * (dev->hw_info.page_size + dev->hw_info.oob_size); | |
7657 | ||
7658 | buf_dma = pci_map_single(dev->pdev, buf, length, PCI_DMA_FROMDEVICE); | |
7659 | #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,26)) | |
7660 | ret = dma_mapping_error(buf_dma); | |
7661 | #else | |
7662 | ret = dma_mapping_error(&(dev->pdev->dev), buf_dma); | |
7663 | #endif | |
7664 | if (ret) { | |
7665 | hio_warn("%s: unable to map read DMA buffer\n", dev->name); | |
7666 | goto out_dma_mapping; | |
7667 | } | |
7668 | ||
7669 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3) { | |
7670 | flash = ((uint32_t)flash << 1) | (uint32_t)chip; | |
7671 | chip = 0; | |
7672 | } | |
7673 | ||
7674 | msg = (struct ssd_nand_op_msg *)ssd_get_dmsg(dev); | |
7675 | ||
7676 | msg->fun = SSD_FUNC_NAND_READ_WOOB; | |
7677 | msg->ctrl_idx = ctrl_idx; | |
7678 | msg->chip_no = flash; | |
7679 | msg->chip_ce = chip; | |
7680 | msg->page_no = page; | |
7681 | msg->page_count = count; | |
7682 | msg->buf = buf_dma; | |
7683 | ||
7684 | ret = ssd_do_request(dev, READ, msg, NULL); | |
7685 | ssd_put_dmsg(msg); | |
7686 | ||
7687 | pci_unmap_single(dev->pdev, buf_dma, length, PCI_DMA_FROMDEVICE); | |
7688 | ||
7689 | out_dma_mapping: | |
7690 | return ret; | |
7691 | } | |
7692 | ||
7693 | /* write 1 page */ | |
7694 | static int ssd_nand_write(struct ssd_device *dev, void *buf, | |
7695 | int flash, int chip, int page, int count, int ctrl_idx) | |
7696 | { | |
7697 | struct ssd_nand_op_msg *msg; | |
7698 | dma_addr_t buf_dma; | |
7699 | int length; | |
7700 | int ret = 0; | |
7701 | ||
7702 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3) { | |
7703 | return -EINVAL; | |
7704 | } | |
7705 | ||
7706 | if (!buf) { | |
7707 | return -EINVAL; | |
7708 | } | |
7709 | ||
7710 | if (count != 1) { | |
7711 | return -EINVAL; | |
7712 | } | |
7713 | ||
7714 | ret = ssd_check_flash(dev, flash, page, ctrl_idx); | |
7715 | if (ret) { | |
7716 | return ret; | |
7717 | } | |
7718 | ||
7719 | length = count * (dev->hw_info.page_size + dev->hw_info.oob_size); | |
7720 | ||
7721 | /* write data to ram */ | |
7722 | /*ret = ssd_ram_write(dev, buf, length, dev->hw_info.nand_wbuff_base, ctrl_idx); | |
7723 | if (ret) { | |
7724 | return ret; | |
7725 | }*/ | |
7726 | ||
7727 | buf_dma = pci_map_single(dev->pdev, buf, length, PCI_DMA_TODEVICE); | |
7728 | #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,26)) | |
7729 | ret = dma_mapping_error(buf_dma); | |
7730 | #else | |
7731 | ret = dma_mapping_error(&(dev->pdev->dev), buf_dma); | |
7732 | #endif | |
7733 | if (ret) { | |
7734 | hio_warn("%s: unable to map write DMA buffer\n", dev->name); | |
7735 | goto out_dma_mapping; | |
7736 | } | |
7737 | ||
7738 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3) { | |
7739 | flash = ((uint32_t)flash << 1) | (uint32_t)chip; | |
7740 | chip = 0; | |
7741 | } | |
7742 | ||
7743 | msg = (struct ssd_nand_op_msg *)ssd_get_dmsg(dev); | |
7744 | ||
7745 | msg->fun = SSD_FUNC_NAND_WRITE; | |
7746 | msg->ctrl_idx = ctrl_idx; | |
7747 | msg->chip_no = flash; | |
7748 | msg->chip_ce = chip; | |
7749 | ||
7750 | msg->page_no = page; | |
7751 | msg->page_count = count; | |
7752 | msg->buf = buf_dma; | |
7753 | ||
7754 | ret = ssd_do_request(dev, WRITE, msg, NULL); | |
7755 | ssd_put_dmsg(msg); | |
7756 | ||
7757 | pci_unmap_single(dev->pdev, buf_dma, length, PCI_DMA_TODEVICE); | |
7758 | ||
7759 | out_dma_mapping: | |
7760 | return ret; | |
7761 | } | |
7762 | ||
7763 | static int ssd_nand_erase(struct ssd_device *dev, int flash, int chip, int page, int ctrl_idx) | |
7764 | { | |
7765 | struct ssd_nand_op_msg *msg; | |
7766 | int ret = 0; | |
7767 | ||
7768 | ret = ssd_check_flash(dev, flash, page, ctrl_idx); | |
7769 | if (ret) { | |
7770 | return ret; | |
7771 | } | |
7772 | ||
7773 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3) { | |
7774 | flash = ((uint32_t)flash << 1) | (uint32_t)chip; | |
7775 | chip = 0; | |
7776 | } | |
7777 | ||
7778 | msg = (struct ssd_nand_op_msg *)ssd_get_dmsg(dev); | |
7779 | ||
7780 | msg->fun = SSD_FUNC_NAND_ERASE; | |
7781 | msg->ctrl_idx = ctrl_idx; | |
7782 | msg->chip_no = flash; | |
7783 | msg->chip_ce = chip; | |
7784 | msg->page_no = page; | |
7785 | ||
7786 | ret = ssd_do_request(dev, WRITE, msg, NULL); | |
7787 | ssd_put_dmsg(msg); | |
7788 | ||
7789 | return ret; | |
7790 | } | |
7791 | ||
7792 | static int ssd_update_bbt(struct ssd_device *dev, int flash, int ctrl_idx) | |
7793 | { | |
7794 | struct ssd_nand_op_msg *msg; | |
7795 | struct ssd_flush_msg *fmsg; | |
7796 | int ret = 0; | |
7797 | ||
7798 | ret = ssd_check_flash(dev, flash, 0, ctrl_idx); | |
7799 | if (ret) { | |
7800 | return ret; | |
7801 | } | |
7802 | ||
7803 | msg = (struct ssd_nand_op_msg *)ssd_get_dmsg(dev); | |
7804 | ||
7805 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3) { | |
7806 | fmsg = (struct ssd_flush_msg *)msg; | |
7807 | ||
7808 | fmsg->fun = SSD_FUNC_FLUSH; | |
7809 | fmsg->flag = 0x1; | |
7810 | fmsg->flash = flash; | |
7811 | fmsg->ctrl_idx = ctrl_idx; | |
7812 | } else { | |
7813 | msg->fun = SSD_FUNC_FLUSH; | |
7814 | msg->flag = 0x1; | |
7815 | msg->chip_no = flash; | |
7816 | msg->ctrl_idx = ctrl_idx; | |
7817 | } | |
7818 | ||
7819 | ret = ssd_do_request(dev, WRITE, msg, NULL); | |
7820 | ssd_put_dmsg(msg); | |
7821 | ||
7822 | return ret; | |
7823 | } | |
7824 | ||
7825 | /* flash controller init state */ | |
7826 | static int __ssd_check_init_state(struct ssd_device *dev) | |
7827 | { | |
7828 | uint32_t *init_state = NULL; | |
7829 | int reg_base, reg_sz; | |
7830 | int max_wait = SSD_INIT_MAX_WAIT; | |
7831 | int init_wait = 0; | |
7832 | int i, j, k; | |
7833 | int ch_start = 0; | |
7834 | ||
7835 | /* | |
7836 | for (i=0; i<dev->hw_info.nr_ctrl; i++) { | |
7837 | ssd_reg32_write(dev->ctrlp + SSD_CTRL_TEST_REG0 + i * 8, test_data); | |
7838 | read_data = ssd_reg32_read(dev->ctrlp + SSD_CTRL_TEST_REG0 + i * 8); | |
7839 | if (read_data == ~test_data) { | |
7840 | //dev->hw_info.nr_ctrl++; | |
7841 | dev->hw_info.nr_ctrl_map |= 1<<i; | |
7842 | } | |
7843 | } | |
7844 | */ | |
7845 | ||
7846 | /* | |
7847 | read_data = ssd_reg32_read(dev->ctrlp + SSD_READY_REG); | |
7848 | j=0; | |
7849 | for (i=0; i<dev->hw_info.nr_ctrl; i++) { | |
7850 | if (((read_data>>i) & 0x1) == 0) { | |
7851 | j++; | |
7852 | } | |
7853 | } | |
7854 | ||
7855 | if (dev->hw_info.nr_ctrl != j) { | |
7856 | printk(KERN_WARNING "%s: nr_ctrl mismatch: %d %d\n", dev->name, dev->hw_info.nr_ctrl, j); | |
7857 | return -1; | |
7858 | } | |
7859 | */ | |
7860 | ||
7861 | /* | |
7862 | init_state = ssd_reg_read(dev->ctrlp + SSD_FLASH_INFO_REG0); | |
7863 | for (j=1; j<dev->hw_info.nr_ctrl;j++) { | |
7864 | if (init_state != ssd_reg_read(dev->ctrlp + SSD_FLASH_INFO_REG0 + j*8)) { | |
7865 | printk(KERN_WARNING "SSD_FLASH_INFO_REG[%d], not match\n", j); | |
7866 | return -1; | |
7867 | } | |
7868 | } | |
7869 | */ | |
7870 | ||
7871 | /* init_state = ssd_reg_read(dev->ctrlp + SSD_CHIP_INFO_REG0); | |
7872 | for (j=1; j<dev->hw_info.nr_ctrl; j++) { | |
7873 | if (init_state != ssd_reg_read(dev->ctrlp + SSD_CHIP_INFO_REG0 + j*16)) { | |
7874 | printk(KERN_WARNING "SSD_CHIP_INFO_REG Lo [%d], not match\n", j); | |
7875 | return -1; | |
7876 | } | |
7877 | } | |
7878 | ||
7879 | init_state = ssd_reg_read(dev->ctrlp + SSD_CHIP_INFO_REG0 + 8); | |
7880 | for (j=1; j<dev->hw_info.nr_ctrl; j++) { | |
7881 | if (init_state != ssd_reg_read(dev->ctrlp + SSD_CHIP_INFO_REG0 + 8 + j*16)) { | |
7882 | printk(KERN_WARNING "SSD_CHIP_INFO_REG Hi [%d], not match\n", j); | |
7883 | return -1; | |
7884 | } | |
7885 | } | |
7886 | */ | |
7887 | ||
7888 | if (dev->protocol_info.ver >= SSD_PROTOCOL_V3_2) { | |
7889 | max_wait = SSD_INIT_MAX_WAIT_V3_2; | |
7890 | } | |
7891 | ||
7892 | reg_base = dev->protocol_info.init_state_reg; | |
7893 | reg_sz = dev->protocol_info.init_state_reg_sz; | |
7894 | ||
7895 | init_state = (uint32_t *)kmalloc(reg_sz, GFP_KERNEL); | |
7896 | if (!init_state) { | |
7897 | return -ENOMEM; | |
7898 | } | |
7899 | ||
7900 | for (i=0; i<dev->hw_info.nr_ctrl; i++) { | |
7901 | check_init: | |
7902 | for (j=0, k=0; j<reg_sz; j+=sizeof(uint32_t), k++) { | |
7903 | init_state[k] = ssd_reg32_read(dev->ctrlp + reg_base + j); | |
7904 | } | |
7905 | ||
7906 | if (dev->protocol_info.ver > SSD_PROTOCOL_V3) { | |
7907 | /* just check the last bit, no need to check all channel */ | |
7908 | ch_start = dev->hw_info.max_ch - 1; | |
7909 | } else { | |
7910 | ch_start = 0; | |
7911 | } | |
7912 | ||
7913 | for (j=0; j<dev->hw_info.nr_chip; j++) { | |
7914 | for (k=ch_start; k<dev->hw_info.max_ch; k++) { | |
7915 | if (test_bit((j*dev->hw_info.max_ch + k), (void *)init_state)) { | |
7916 | continue; | |
7917 | } | |
7918 | ||
7919 | init_wait++; | |
7920 | if (init_wait <= max_wait) { | |
7921 | msleep(SSD_INIT_WAIT); | |
7922 | goto check_init; | |
7923 | } else { | |
7924 | if (k < dev->hw_info.nr_ch) { | |
7925 | hio_warn("%s: controller %d chip %d ch %d init failed\n", | |
7926 | dev->name, i, j, k); | |
7927 | } else { | |
7928 | hio_warn("%s: controller %d chip %d init failed\n", | |
7929 | dev->name, i, j); | |
7930 | } | |
7931 | ||
7932 | kfree(init_state); | |
7933 | return -1; | |
7934 | } | |
7935 | } | |
7936 | } | |
7937 | reg_base += reg_sz; | |
7938 | } | |
7939 | //printk(KERN_WARNING "%s: init wait %d\n", dev->name, init_wait); | |
7940 | ||
7941 | kfree(init_state); | |
7942 | return 0; | |
7943 | } | |
7944 | ||
7945 | static int ssd_check_init_state(struct ssd_device *dev) | |
7946 | { | |
7947 | if (mode != SSD_DRV_MODE_STANDARD) { | |
7948 | return 0; | |
7949 | } | |
7950 | ||
7951 | return __ssd_check_init_state(dev); | |
7952 | } | |
7953 | ||
7954 | static void ssd_reset_resp_ptr(struct ssd_device *dev); | |
7955 | ||
7956 | /* reset flash controller etc */ | |
7957 | static int __ssd_reset(struct ssd_device *dev, int type) | |
7958 | { | |
1197134c | 7959 | struct timeval tv; |
361ebed5 HSDT |
7960 | if (type < SSD_RST_NOINIT || type > SSD_RST_FULL) { |
7961 | return -EINVAL; | |
7962 | } | |
7963 | ||
7964 | mutex_lock(&dev->fw_mutex); | |
7965 | ||
7966 | if (type == SSD_RST_NOINIT) { //no init | |
7967 | ssd_reg32_write(dev->ctrlp + SSD_RESET_REG, SSD_RESET_NOINIT); | |
7968 | } else if (type == SSD_RST_NORMAL) { //reset & init | |
7969 | ssd_reg32_write(dev->ctrlp + SSD_RESET_REG, SSD_RESET); | |
7970 | } else { // full reset | |
7971 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_2) { | |
7972 | mutex_unlock(&dev->fw_mutex); | |
7973 | return -EINVAL; | |
7974 | } | |
7975 | ||
7976 | ssd_reg32_write(dev->ctrlp + SSD_FULL_RESET_REG, SSD_RESET_FULL); | |
7977 | ||
7978 | /* ?? */ | |
7979 | ssd_reset_resp_ptr(dev); | |
7980 | } | |
7981 | ||
7982 | #ifdef SSD_OT_PROTECT | |
7983 | dev->ot_delay = 0; | |
7984 | #endif | |
7985 | ||
7986 | msleep(1000); | |
7987 | ||
7988 | /* xx */ | |
7989 | ssd_set_flush_timeout(dev, dev->wmode); | |
7990 | ||
7991 | mutex_unlock(&dev->fw_mutex); | |
7992 | ssd_gen_swlog(dev, SSD_LOG_RESET, (uint32_t)type); | |
1197134c KM |
7993 | do_gettimeofday(&tv); |
7994 | dev->reset_time = tv.tv_sec; | |
361ebed5 HSDT |
7995 | |
7996 | return __ssd_check_init_state(dev); | |
7997 | } | |
7998 | ||
7999 | static int ssd_save_md(struct ssd_device *dev) | |
8000 | { | |
8001 | struct ssd_nand_op_msg *msg; | |
8002 | int ret = 0; | |
8003 | ||
8004 | if (unlikely(mode != SSD_DRV_MODE_STANDARD)) | |
8005 | return 0; | |
8006 | ||
8007 | if (dev->protocol_info.ver <= SSD_PROTOCOL_V3) { | |
8008 | return 0; | |
8009 | } | |
8010 | ||
8011 | if (!dev->save_md) { | |
8012 | return 0; | |
8013 | } | |
8014 | ||
8015 | msg = (struct ssd_nand_op_msg *)ssd_get_dmsg(dev); | |
8016 | ||
8017 | msg->fun = SSD_FUNC_FLUSH; | |
8018 | msg->flag = 0x2; | |
8019 | msg->ctrl_idx = 0; | |
8020 | msg->chip_no = 0; | |
8021 | ||
8022 | ret = ssd_do_request(dev, WRITE, msg, NULL); | |
8023 | ssd_put_dmsg(msg); | |
8024 | ||
8025 | return ret; | |
8026 | } | |
8027 | ||
8028 | static int ssd_barrier_save_md(struct ssd_device *dev) | |
8029 | { | |
8030 | struct ssd_nand_op_msg *msg; | |
8031 | int ret = 0; | |
8032 | ||
8033 | if (unlikely(mode != SSD_DRV_MODE_STANDARD)) | |
8034 | return 0; | |
8035 | ||
8036 | if (dev->protocol_info.ver <= SSD_PROTOCOL_V3) { | |
8037 | return 0; | |
8038 | } | |
8039 | ||
8040 | if (!dev->save_md) { | |
8041 | return 0; | |
8042 | } | |
8043 | ||
8044 | msg = (struct ssd_nand_op_msg *)ssd_get_dmsg(dev); | |
8045 | ||
8046 | msg->fun = SSD_FUNC_FLUSH; | |
8047 | msg->flag = 0x2; | |
8048 | msg->ctrl_idx = 0; | |
8049 | msg->chip_no = 0; | |
8050 | ||
8051 | ret = ssd_do_barrier_request(dev, WRITE, msg, NULL); | |
8052 | ssd_put_dmsg(msg); | |
8053 | ||
8054 | return ret; | |
8055 | } | |
8056 | ||
8057 | static int ssd_flush(struct ssd_device *dev) | |
8058 | { | |
8059 | struct ssd_nand_op_msg *msg; | |
8060 | struct ssd_flush_msg *fmsg; | |
8061 | int ret = 0; | |
8062 | ||
8063 | if (unlikely(mode != SSD_DRV_MODE_STANDARD)) | |
8064 | return 0; | |
8065 | ||
8066 | msg = (struct ssd_nand_op_msg *)ssd_get_dmsg(dev); | |
8067 | ||
8068 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3) { | |
8069 | fmsg = (struct ssd_flush_msg *)msg; | |
8070 | ||
8071 | fmsg->fun = SSD_FUNC_FLUSH; | |
8072 | fmsg->flag = 0; | |
8073 | fmsg->ctrl_idx = 0; | |
8074 | fmsg->flash = 0; | |
8075 | } else { | |
8076 | msg->fun = SSD_FUNC_FLUSH; | |
8077 | msg->flag = 0; | |
8078 | msg->ctrl_idx = 0; | |
8079 | msg->chip_no = 0; | |
8080 | } | |
8081 | ||
8082 | ret = ssd_do_request(dev, WRITE, msg, NULL); | |
8083 | ssd_put_dmsg(msg); | |
8084 | ||
8085 | return ret; | |
8086 | } | |
8087 | ||
8088 | static int ssd_barrier_flush(struct ssd_device *dev) | |
8089 | { | |
8090 | struct ssd_nand_op_msg *msg; | |
8091 | struct ssd_flush_msg *fmsg; | |
8092 | int ret = 0; | |
8093 | ||
8094 | if (unlikely(mode != SSD_DRV_MODE_STANDARD)) | |
8095 | return 0; | |
8096 | ||
8097 | msg = (struct ssd_nand_op_msg *)ssd_get_dmsg(dev); | |
8098 | ||
8099 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3) { | |
8100 | fmsg = (struct ssd_flush_msg *)msg; | |
8101 | ||
8102 | fmsg->fun = SSD_FUNC_FLUSH; | |
8103 | fmsg->flag = 0; | |
8104 | fmsg->ctrl_idx = 0; | |
8105 | fmsg->flash = 0; | |
8106 | } else { | |
8107 | msg->fun = SSD_FUNC_FLUSH; | |
8108 | msg->flag = 0; | |
8109 | msg->ctrl_idx = 0; | |
8110 | msg->chip_no = 0; | |
8111 | } | |
8112 | ||
8113 | ret = ssd_do_barrier_request(dev, WRITE, msg, NULL); | |
8114 | ssd_put_dmsg(msg); | |
8115 | ||
8116 | return ret; | |
8117 | } | |
8118 | ||
8119 | #define SSD_WMODE_BUFFER_TIMEOUT 0x00c82710 | |
8120 | #define SSD_WMODE_BUFFER_EX_TIMEOUT 0x000500c8 | |
8121 | #define SSD_WMODE_FUA_TIMEOUT 0x000503E8 | |
8122 | static void ssd_set_flush_timeout(struct ssd_device *dev, int m) | |
8123 | { | |
8124 | uint32_t to; | |
8125 | uint32_t val = 0; | |
8126 | ||
8127 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_1_1) { | |
8128 | return; | |
8129 | } | |
8130 | ||
8131 | switch(m) { | |
8132 | case SSD_WMODE_BUFFER: | |
8133 | to = SSD_WMODE_BUFFER_TIMEOUT; | |
8134 | break; | |
8135 | case SSD_WMODE_BUFFER_EX: | |
8136 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_2_1) { | |
8137 | to = SSD_WMODE_BUFFER_EX_TIMEOUT; | |
8138 | } else { | |
8139 | to = SSD_WMODE_BUFFER_TIMEOUT; | |
8140 | } | |
8141 | break; | |
8142 | case SSD_WMODE_FUA: | |
8143 | to = SSD_WMODE_FUA_TIMEOUT; | |
8144 | break; | |
8145 | default: | |
8146 | return; | |
8147 | } | |
8148 | ||
8149 | val = (((uint32_t)((uint32_t)m & 0x3) << 28) | to); | |
8150 | ||
8151 | ssd_reg32_write(dev->ctrlp + SSD_FLUSH_TIMEOUT_REG, val); | |
8152 | } | |
8153 | ||
8154 | static int ssd_do_switch_wmode(struct ssd_device *dev, int m) | |
8155 | { | |
8156 | int ret = 0; | |
8157 | ||
8158 | ret = ssd_barrier_start(dev); | |
8159 | if (ret) { | |
8160 | goto out; | |
8161 | } | |
8162 | ||
8163 | ret = ssd_barrier_flush(dev); | |
8164 | if (ret) { | |
8165 | goto out_barrier_end; | |
8166 | } | |
8167 | ||
8168 | /* set contoller flush timeout */ | |
8169 | ssd_set_flush_timeout(dev, m); | |
8170 | ||
8171 | dev->wmode = m; | |
8172 | mb(); | |
8173 | ||
8174 | out_barrier_end: | |
8175 | ssd_barrier_end(dev); | |
8176 | out: | |
8177 | return ret; | |
8178 | } | |
8179 | ||
8180 | static int ssd_switch_wmode(struct ssd_device *dev, int m) | |
8181 | { | |
8182 | int default_wmode; | |
8183 | int next_wmode; | |
8184 | int ret = 0; | |
8185 | ||
8186 | if (!test_bit(SSD_ONLINE, &dev->state)) { | |
8187 | return -ENODEV; | |
8188 | } | |
8189 | ||
8190 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_2) { | |
8191 | default_wmode = SSD_WMODE_BUFFER; | |
8192 | } else { | |
8193 | default_wmode = SSD_WMODE_BUFFER_EX; | |
8194 | } | |
8195 | ||
8196 | if (SSD_WMODE_AUTO == m) { | |
8197 | /* battery fault ? */ | |
8198 | if (test_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP), &dev->hwmon)) { | |
8199 | next_wmode = SSD_WMODE_FUA; | |
8200 | } else { | |
8201 | next_wmode = default_wmode; | |
8202 | } | |
8203 | } else if (SSD_WMODE_DEFAULT == m) { | |
8204 | next_wmode = default_wmode; | |
8205 | } else { | |
8206 | next_wmode = m; | |
8207 | } | |
8208 | ||
8209 | if (next_wmode != dev->wmode) { | |
8210 | hio_warn("%s: switch write mode (%d -> %d)\n", dev->name, dev->wmode, next_wmode); | |
8211 | ret = ssd_do_switch_wmode(dev, next_wmode); | |
8212 | if (ret) { | |
8213 | hio_err("%s: can not switch write mode (%d -> %d)\n", dev->name, dev->wmode, next_wmode); | |
8214 | } | |
8215 | } | |
8216 | ||
8217 | return ret; | |
8218 | } | |
8219 | ||
8220 | static int ssd_init_wmode(struct ssd_device *dev) | |
8221 | { | |
8222 | int default_wmode; | |
8223 | int ret = 0; | |
8224 | ||
8225 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_2) { | |
8226 | default_wmode = SSD_WMODE_BUFFER; | |
8227 | } else { | |
8228 | default_wmode = SSD_WMODE_BUFFER_EX; | |
8229 | } | |
8230 | ||
8231 | /* dummy mode */ | |
8232 | if (SSD_WMODE_AUTO == dev->user_wmode) { | |
8233 | /* battery fault ? */ | |
8234 | if (test_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP), &dev->hwmon)) { | |
8235 | dev->wmode = SSD_WMODE_FUA; | |
8236 | } else { | |
8237 | dev->wmode = default_wmode; | |
8238 | } | |
8239 | } else if (SSD_WMODE_DEFAULT == dev->user_wmode) { | |
8240 | dev->wmode = default_wmode; | |
8241 | } else { | |
8242 | dev->wmode = dev->user_wmode; | |
8243 | } | |
8244 | ssd_set_flush_timeout(dev, dev->wmode); | |
8245 | ||
8246 | return ret; | |
8247 | } | |
8248 | ||
8249 | static int __ssd_set_wmode(struct ssd_device *dev, int m) | |
8250 | { | |
8251 | int ret = 0; | |
8252 | ||
8253 | /* not support old fw*/ | |
8254 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_1_1) { | |
8255 | ret = -EOPNOTSUPP; | |
8256 | goto out; | |
8257 | } | |
8258 | ||
8259 | if (m < SSD_WMODE_BUFFER || m > SSD_WMODE_DEFAULT) { | |
8260 | ret = -EINVAL; | |
8261 | goto out; | |
8262 | } | |
8263 | ||
8264 | ssd_gen_swlog(dev, SSD_LOG_SET_WMODE, m); | |
8265 | ||
8266 | dev->user_wmode = m; | |
8267 | ||
8268 | ret = ssd_switch_wmode(dev, dev->user_wmode); | |
8269 | if (ret) { | |
8270 | goto out; | |
8271 | } | |
8272 | ||
8273 | out: | |
8274 | return ret; | |
8275 | } | |
8276 | ||
8277 | int ssd_set_wmode(struct block_device *bdev, int m) | |
8278 | { | |
8279 | struct ssd_device *dev; | |
8280 | ||
8281 | if (!bdev || !(bdev->bd_disk)) { | |
8282 | return -EINVAL; | |
8283 | } | |
8284 | ||
8285 | dev = bdev->bd_disk->private_data; | |
8286 | ||
8287 | return __ssd_set_wmode(dev, m); | |
8288 | } | |
8289 | ||
8290 | static int ssd_do_reset(struct ssd_device *dev) | |
8291 | { | |
8292 | int ret = 0; | |
8293 | ||
8294 | if (test_and_set_bit(SSD_RESETING, &dev->state)) { | |
8295 | return 0; | |
8296 | } | |
8297 | ||
8298 | ssd_stop_workq(dev); | |
8299 | ||
8300 | ret = ssd_barrier_start(dev); | |
8301 | if (ret) { | |
8302 | goto out; | |
8303 | } | |
8304 | ||
8305 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_2) { | |
8306 | /* old reset */ | |
8307 | ret = __ssd_reset(dev, SSD_RST_NORMAL); | |
8308 | } else { | |
8309 | /* full reset */ | |
8310 | //ret = __ssd_reset(dev, SSD_RST_FULL); | |
8311 | ret = __ssd_reset(dev, SSD_RST_NORMAL); | |
8312 | } | |
8313 | if (ret) { | |
8314 | goto out_barrier_end; | |
8315 | } | |
8316 | ||
8317 | out_barrier_end: | |
8318 | ssd_barrier_end(dev); | |
8319 | out: | |
8320 | ssd_start_workq(dev); | |
8321 | test_and_clear_bit(SSD_RESETING, &dev->state); | |
8322 | return ret; | |
8323 | } | |
8324 | ||
8325 | static int ssd_full_reset(struct ssd_device *dev) | |
8326 | { | |
8327 | int ret = 0; | |
8328 | ||
8329 | if (test_and_set_bit(SSD_RESETING, &dev->state)) { | |
8330 | return 0; | |
8331 | } | |
8332 | ||
8333 | ssd_stop_workq(dev); | |
8334 | ||
8335 | ret = ssd_barrier_start(dev); | |
8336 | if (ret) { | |
8337 | goto out; | |
8338 | } | |
8339 | ||
8340 | ret = ssd_barrier_flush(dev); | |
8341 | if (ret) { | |
8342 | goto out_barrier_end; | |
8343 | } | |
8344 | ||
8345 | ret = ssd_barrier_save_md(dev); | |
8346 | if (ret) { | |
8347 | goto out_barrier_end; | |
8348 | } | |
8349 | ||
8350 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_2) { | |
8351 | /* old reset */ | |
8352 | ret = __ssd_reset(dev, SSD_RST_NORMAL); | |
8353 | } else { | |
8354 | /* full reset */ | |
8355 | //ret = __ssd_reset(dev, SSD_RST_FULL); | |
8356 | ret = __ssd_reset(dev, SSD_RST_NORMAL); | |
8357 | } | |
8358 | if (ret) { | |
8359 | goto out_barrier_end; | |
8360 | } | |
8361 | ||
8362 | out_barrier_end: | |
8363 | ssd_barrier_end(dev); | |
8364 | out: | |
8365 | ssd_start_workq(dev); | |
8366 | test_and_clear_bit(SSD_RESETING, &dev->state); | |
8367 | return ret; | |
8368 | } | |
8369 | ||
8370 | int ssd_reset(struct block_device *bdev) | |
8371 | { | |
da3355df | 8372 | int ret; |
361ebed5 HSDT |
8373 | struct ssd_device *dev; |
8374 | ||
8375 | if (!bdev || !(bdev->bd_disk)) { | |
8376 | return -EINVAL; | |
8377 | } | |
8378 | ||
8379 | dev = bdev->bd_disk->private_data; | |
8380 | ||
da3355df SF |
8381 | ret = ssd_full_reset(dev); |
8382 | if (!ret) { | |
8383 | if (!dev->has_non_0x98_reg_access) { | |
8384 | ssd_reg32_write(dev->ctrlp + SSD_RELOAD_FW_REG, 0); | |
8385 | } | |
8386 | } | |
8387 | ||
8388 | return ret ; | |
361ebed5 HSDT |
8389 | } |
8390 | ||
8391 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)) | |
8392 | static int ssd_issue_flush_fn(struct request_queue *q, struct gendisk *disk, | |
8393 | sector_t *error_sector) | |
8394 | { | |
8395 | struct ssd_device *dev = q->queuedata; | |
8396 | ||
8397 | return ssd_flush(dev); | |
8398 | } | |
8399 | #endif | |
8400 | ||
8401 | void ssd_submit_pbio(struct request_queue *q, struct bio *bio) | |
8402 | { | |
8403 | struct ssd_device *dev = q->queuedata; | |
8404 | #ifdef SSD_QUEUE_PBIO | |
8405 | int ret = -EBUSY; | |
8406 | #endif | |
8407 | ||
8408 | if (!test_bit(SSD_ONLINE, &dev->state)) { | |
1197134c | 8409 | ssd_bio_endio(bio, -ENODEV); |
361ebed5 HSDT |
8410 | goto out; |
8411 | } | |
8412 | ||
8413 | #ifdef SSD_DEBUG_ERR | |
8414 | if (atomic_read(&dev->tocnt)) { | |
8415 | hio_warn("%s: IO rejected because of IO timeout!\n", dev->name); | |
1197134c | 8416 | ssd_bio_endio(bio, -EIO); |
361ebed5 HSDT |
8417 | goto out; |
8418 | } | |
8419 | #endif | |
8420 | ||
da3355df | 8421 | if (unlikely(ssd_bio_has_barrier_or_fua(bio))) { |
1197134c | 8422 | ssd_bio_endio(bio, -EOPNOTSUPP); |
361ebed5 HSDT |
8423 | goto out; |
8424 | } | |
361ebed5 | 8425 | |
da3355df | 8426 | if (unlikely(dev->readonly && bio_data_dir(bio) == WRITE)) { |
1197134c | 8427 | ssd_bio_endio(bio, -EROFS); |
361ebed5 HSDT |
8428 | goto out; |
8429 | } | |
8430 | ||
8431 | #ifdef SSD_QUEUE_PBIO | |
8432 | if (0 == atomic_read(&dev->in_sendq)) { | |
8433 | ret = __ssd_submit_pbio(dev, bio, 0); | |
8434 | } | |
8435 | ||
8436 | if (ret) { | |
8437 | (void)test_and_set_bit(BIO_SSD_PBIO, &bio->bi_flags); | |
8438 | ssd_queue_bio(dev, bio); | |
8439 | } | |
8440 | #else | |
8441 | __ssd_submit_pbio(dev, bio, 1); | |
8442 | #endif | |
8443 | ||
8444 | out: | |
8445 | return; | |
8446 | } | |
8447 | ||
bf9a5140 KM |
8448 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,0)) |
8449 | static blk_qc_t ssd_make_request(struct request_queue *q, struct bio *bio) | |
8450 | #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0)) | |
361ebed5 | 8451 | static void ssd_make_request(struct request_queue *q, struct bio *bio) |
bf9a5140 KM |
8452 | #else |
8453 | static int ssd_make_request(struct request_queue *q, struct bio *bio) | |
361ebed5 HSDT |
8454 | #endif |
8455 | { | |
8456 | struct ssd_device *dev = q->queuedata; | |
8457 | int ret = -EBUSY; | |
8458 | ||
8459 | if (!test_bit(SSD_ONLINE, &dev->state)) { | |
1197134c | 8460 | ssd_bio_endio(bio, -ENODEV); |
361ebed5 HSDT |
8461 | goto out; |
8462 | } | |
8463 | ||
91557e4a SF |
8464 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,13,0)) |
8465 | blk_queue_split(q, &bio); | |
8466 | #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(4,3,0)) | |
1197134c KM |
8467 | blk_queue_split(q, &bio, q->bio_split); |
8468 | #endif | |
8469 | ||
361ebed5 HSDT |
8470 | #ifdef SSD_DEBUG_ERR |
8471 | if (atomic_read(&dev->tocnt)) { | |
8472 | hio_warn("%s: IO rejected because of IO timeout!\n", dev->name); | |
1197134c | 8473 | ssd_bio_endio(bio, -EIO); |
361ebed5 HSDT |
8474 | goto out; |
8475 | } | |
8476 | #endif | |
8477 | ||
da3355df | 8478 | if (unlikely(ssd_bio_has_barrier_or_fua(bio))) { |
1197134c | 8479 | ssd_bio_endio(bio, -EOPNOTSUPP); |
361ebed5 HSDT |
8480 | goto out; |
8481 | } | |
8482 | ||
8483 | /* writeback_cache_control.txt: REQ_FLUSH requests without data can be completed successfully without doing any work */ | |
1197134c KM |
8484 | if (unlikely(ssd_bio_has_flush(bio) && !bio_sectors(bio))) { |
8485 | ssd_bio_endio(bio, 0); | |
361ebed5 HSDT |
8486 | goto out; |
8487 | } | |
8488 | ||
361ebed5 HSDT |
8489 | if (0 == atomic_read(&dev->in_sendq)) { |
8490 | ret = ssd_submit_bio(dev, bio, 0); | |
8491 | } | |
8492 | ||
8493 | if (ret) { | |
8494 | ssd_queue_bio(dev, bio); | |
8495 | } | |
8496 | ||
8497 | out: | |
bf9a5140 KM |
8498 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,0)) |
8499 | return BLK_QC_T_NONE; | |
8500 | #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0)) | |
361ebed5 | 8501 | return; |
bf9a5140 KM |
8502 | #else |
8503 | return 0; | |
361ebed5 HSDT |
8504 | #endif |
8505 | } | |
8506 | ||
8507 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)) | |
8508 | static int ssd_block_getgeo(struct block_device *bdev, struct hd_geometry *geo) | |
8509 | { | |
8510 | struct ssd_device *dev; | |
8511 | ||
8512 | if (!bdev) { | |
8513 | return -EINVAL; | |
8514 | } | |
8515 | ||
8516 | dev = bdev->bd_disk->private_data; | |
8517 | if (!dev) { | |
8518 | return -EINVAL; | |
8519 | } | |
8520 | ||
8521 | geo->heads = 4; | |
8522 | geo->sectors = 16; | |
8523 | geo->cylinders = (dev->hw_info.size & ~0x3f) >> 6; | |
8524 | return 0; | |
8525 | } | |
8526 | #endif | |
8527 | ||
1197134c KM |
8528 | static int ssd_init_queue(struct ssd_device *dev); |
8529 | static void ssd_cleanup_queue(struct ssd_device *dev); | |
361ebed5 HSDT |
8530 | static void ssd_cleanup_blkdev(struct ssd_device *dev); |
8531 | static int ssd_init_blkdev(struct ssd_device *dev); | |
8532 | static int ssd_ioctl_common(struct ssd_device *dev, unsigned int cmd, unsigned long arg) | |
8533 | { | |
8534 | void __user *argp = (void __user *)arg; | |
8535 | void __user *buf = NULL; | |
8536 | void *kbuf = NULL; | |
8537 | int ret = 0; | |
8538 | ||
8539 | switch (cmd) { | |
8540 | case SSD_CMD_GET_PROTOCOL_INFO: | |
8541 | if (copy_to_user(argp, &dev->protocol_info, sizeof(struct ssd_protocol_info))) { | |
8542 | hio_warn("%s: copy_to_user: failed\n", dev->name); | |
8543 | ret = -EFAULT; | |
8544 | break; | |
8545 | } | |
8546 | break; | |
8547 | ||
8548 | case SSD_CMD_GET_HW_INFO: | |
8549 | if (copy_to_user(argp, &dev->hw_info, sizeof(struct ssd_hw_info))) { | |
8550 | hio_warn("%s: copy_to_user: failed\n", dev->name); | |
8551 | ret = -EFAULT; | |
8552 | break; | |
8553 | } | |
8554 | break; | |
8555 | ||
8556 | case SSD_CMD_GET_ROM_INFO: | |
8557 | if (copy_to_user(argp, &dev->rom_info, sizeof(struct ssd_rom_info))) { | |
8558 | hio_warn("%s: copy_to_user: failed\n", dev->name); | |
8559 | ret = -EFAULT; | |
8560 | break; | |
8561 | } | |
8562 | break; | |
8563 | ||
8564 | case SSD_CMD_GET_SMART: { | |
8565 | struct ssd_smart smart; | |
8566 | int i; | |
8567 | ||
8568 | memcpy(&smart, &dev->smart, sizeof(struct ssd_smart)); | |
8569 | ||
8570 | mutex_lock(&dev->gd_mutex); | |
8571 | ssd_update_smart(dev, &smart); | |
8572 | mutex_unlock(&dev->gd_mutex); | |
8573 | ||
8574 | /* combine the volatile log info */ | |
8575 | if (dev->log_info.nr_log) { | |
8576 | for (i=0; i<SSD_LOG_NR_LEVEL; i++) { | |
8577 | smart.log_info.stat[i] += dev->log_info.stat[i]; | |
8578 | } | |
8579 | } | |
8580 | ||
8581 | if (copy_to_user(argp, &smart, sizeof(struct ssd_smart))) { | |
8582 | hio_warn("%s: copy_to_user: failed\n", dev->name); | |
8583 | ret = -EFAULT; | |
8584 | break; | |
8585 | } | |
8586 | ||
8587 | break; | |
8588 | } | |
8589 | ||
8590 | case SSD_CMD_GET_IDX: | |
8591 | if (copy_to_user(argp, &dev->idx, sizeof(int))) { | |
8592 | hio_warn("%s: copy_to_user: failed\n", dev->name); | |
8593 | ret = -EFAULT; | |
8594 | break; | |
8595 | } | |
8596 | break; | |
8597 | ||
8598 | case SSD_CMD_GET_AMOUNT: { | |
8599 | int nr_ssd = atomic_read(&ssd_nr); | |
8600 | if (copy_to_user(argp, &nr_ssd, sizeof(int))) { | |
8601 | hio_warn("%s: copy_to_user: failed\n", dev->name); | |
8602 | ret = -EFAULT; | |
8603 | break; | |
8604 | } | |
8605 | break; | |
8606 | } | |
8607 | ||
8608 | case SSD_CMD_GET_TO_INFO: { | |
8609 | int tocnt = atomic_read(&dev->tocnt); | |
8610 | ||
8611 | if (copy_to_user(argp, &tocnt, sizeof(int))) { | |
8612 | hio_warn("%s: copy_to_user: failed\n", dev->name); | |
8613 | ret = -EFAULT; | |
8614 | break; | |
8615 | } | |
8616 | break; | |
8617 | } | |
8618 | ||
8619 | case SSD_CMD_GET_DRV_VER: { | |
8620 | char ver[] = DRIVER_VERSION; | |
8621 | int len = sizeof(ver); | |
8622 | ||
8623 | if (len > (DRIVER_VERSION_LEN - 1)) { | |
8624 | len = (DRIVER_VERSION_LEN - 1); | |
8625 | } | |
8626 | if (copy_to_user(argp, ver, len)) { | |
8627 | hio_warn("%s: copy_to_user: failed\n", dev->name); | |
8628 | ret = -EFAULT; | |
8629 | break; | |
8630 | } | |
8631 | break; | |
8632 | } | |
8633 | ||
8634 | case SSD_CMD_GET_BBACC_INFO: { | |
8635 | struct ssd_acc_info acc; | |
8636 | ||
8637 | mutex_lock(&dev->fw_mutex); | |
8638 | ret = ssd_bb_acc(dev, &acc); | |
8639 | mutex_unlock(&dev->fw_mutex); | |
8640 | if (ret) { | |
8641 | break; | |
8642 | } | |
8643 | ||
8644 | if (copy_to_user(argp, &acc, sizeof(struct ssd_acc_info))) { | |
8645 | hio_warn("%s: copy_to_user: failed\n", dev->name); | |
8646 | ret = -EFAULT; | |
8647 | break; | |
8648 | } | |
8649 | break; | |
8650 | } | |
8651 | ||
8652 | case SSD_CMD_GET_ECACC_INFO: { | |
8653 | struct ssd_acc_info acc; | |
8654 | ||
8655 | mutex_lock(&dev->fw_mutex); | |
8656 | ret = ssd_ec_acc(dev, &acc); | |
8657 | mutex_unlock(&dev->fw_mutex); | |
8658 | if (ret) { | |
8659 | break; | |
8660 | } | |
8661 | ||
8662 | if (copy_to_user(argp, &acc, sizeof(struct ssd_acc_info))) { | |
8663 | hio_warn("%s: copy_to_user: failed\n", dev->name); | |
8664 | ret = -EFAULT; | |
8665 | break; | |
8666 | } | |
8667 | break; | |
8668 | } | |
8669 | ||
8670 | case SSD_CMD_GET_HW_INFO_EXT: | |
8671 | if (copy_to_user(argp, &dev->hw_info_ext, sizeof(struct ssd_hw_info_extend))) { | |
8672 | hio_warn("%s: copy_to_user: failed\n", dev->name); | |
8673 | ret = -EFAULT; | |
8674 | break; | |
8675 | } | |
8676 | break; | |
8677 | ||
8678 | case SSD_CMD_REG_READ: { | |
8679 | struct ssd_reg_op_info reg_info; | |
8680 | ||
8681 | if (copy_from_user(®_info, argp, sizeof(struct ssd_reg_op_info))) { | |
8682 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
8683 | ret = -EFAULT; | |
8684 | break; | |
8685 | } | |
8686 | ||
8687 | if (reg_info.offset > dev->mmio_len-sizeof(uint32_t)) { | |
8688 | ret = -EINVAL; | |
8689 | break; | |
8690 | } | |
8691 | ||
8692 | reg_info.value = ssd_reg32_read(dev->ctrlp + reg_info.offset); | |
8693 | if (copy_to_user(argp, ®_info, sizeof(struct ssd_reg_op_info))) { | |
8694 | hio_warn("%s: copy_to_user: failed\n", dev->name); | |
8695 | ret = -EFAULT; | |
8696 | break; | |
8697 | } | |
8698 | ||
8699 | break; | |
8700 | } | |
8701 | ||
8702 | case SSD_CMD_REG_WRITE: { | |
8703 | struct ssd_reg_op_info reg_info; | |
8704 | ||
8705 | if (copy_from_user(®_info, argp, sizeof(struct ssd_reg_op_info))) { | |
8706 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
8707 | ret = -EFAULT; | |
8708 | break; | |
8709 | } | |
8710 | ||
8711 | if (reg_info.offset > dev->mmio_len-sizeof(uint32_t)) { | |
8712 | ret = -EINVAL; | |
8713 | break; | |
8714 | } | |
8715 | ||
8716 | ssd_reg32_write(dev->ctrlp + reg_info.offset, reg_info.value); | |
8717 | ||
8718 | break; | |
8719 | } | |
8720 | ||
8721 | case SSD_CMD_SPI_READ: { | |
8722 | struct ssd_spi_op_info spi_info; | |
8723 | uint32_t off, size; | |
8724 | ||
8725 | if (copy_from_user(&spi_info, argp, sizeof(struct ssd_spi_op_info))) { | |
8726 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
8727 | ret = -EFAULT; | |
8728 | break; | |
8729 | } | |
8730 | ||
8731 | off = spi_info.off; | |
8732 | size = spi_info.len; | |
8733 | buf = spi_info.buf; | |
8734 | ||
8735 | if (size > dev->rom_info.size || 0 == size || (off + size) > dev->rom_info.size) { | |
8736 | ret = -EINVAL; | |
8737 | break; | |
8738 | } | |
8739 | ||
8740 | kbuf = kmalloc(size, GFP_KERNEL); | |
8741 | if (!kbuf) { | |
8742 | ret = -ENOMEM; | |
8743 | break; | |
8744 | } | |
8745 | ||
8746 | ret = ssd_spi_page_read(dev, kbuf, off, size); | |
8747 | if (ret) { | |
8748 | kfree(kbuf); | |
8749 | break; | |
8750 | } | |
8751 | ||
8752 | if (copy_to_user(buf, kbuf, size)) { | |
8753 | hio_warn("%s: copy_to_user: failed\n", dev->name); | |
8754 | kfree(kbuf); | |
8755 | ret = -EFAULT; | |
8756 | break; | |
8757 | } | |
8758 | ||
8759 | kfree(kbuf); | |
8760 | ||
8761 | break; | |
8762 | } | |
8763 | ||
8764 | case SSD_CMD_SPI_WRITE: { | |
8765 | struct ssd_spi_op_info spi_info; | |
8766 | uint32_t off, size; | |
8767 | ||
8768 | if (copy_from_user(&spi_info, argp, sizeof(struct ssd_spi_op_info))) { | |
8769 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
8770 | ret = -EFAULT; | |
8771 | break; | |
8772 | } | |
8773 | ||
8774 | off = spi_info.off; | |
8775 | size = spi_info.len; | |
8776 | buf = spi_info.buf; | |
8777 | ||
8778 | if (size > dev->rom_info.size || 0 == size || (off + size) > dev->rom_info.size) { | |
8779 | ret = -EINVAL; | |
8780 | break; | |
8781 | } | |
8782 | ||
8783 | kbuf = kmalloc(size, GFP_KERNEL); | |
8784 | if (!kbuf) { | |
8785 | ret = -ENOMEM; | |
8786 | break; | |
8787 | } | |
8788 | ||
8789 | if (copy_from_user(kbuf, buf, size)) { | |
8790 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
8791 | kfree(kbuf); | |
8792 | ret = -EFAULT; | |
8793 | break; | |
8794 | } | |
8795 | ||
8796 | ret = ssd_spi_page_write(dev, kbuf, off, size); | |
8797 | if (ret) { | |
8798 | kfree(kbuf); | |
8799 | break; | |
8800 | } | |
8801 | ||
8802 | kfree(kbuf); | |
8803 | ||
8804 | break; | |
8805 | } | |
8806 | ||
8807 | case SSD_CMD_SPI_ERASE: { | |
8808 | struct ssd_spi_op_info spi_info; | |
8809 | uint32_t off; | |
8810 | ||
8811 | if (copy_from_user(&spi_info, argp, sizeof(struct ssd_spi_op_info))) { | |
8812 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
8813 | ret = -EFAULT; | |
8814 | break; | |
8815 | } | |
8816 | ||
8817 | off = spi_info.off; | |
8818 | ||
8819 | if ((off + dev->rom_info.block_size) > dev->rom_info.size) { | |
8820 | ret = -EINVAL; | |
8821 | break; | |
8822 | } | |
8823 | ||
8824 | ret = ssd_spi_block_erase(dev, off); | |
8825 | if (ret) { | |
8826 | break; | |
8827 | } | |
8828 | ||
8829 | break; | |
8830 | } | |
8831 | ||
8832 | case SSD_CMD_I2C_READ: { | |
8833 | struct ssd_i2c_op_info i2c_info; | |
8834 | uint8_t saddr; | |
8835 | uint8_t rsize; | |
8836 | ||
8837 | if (copy_from_user(&i2c_info, argp, sizeof(struct ssd_i2c_op_info))) { | |
8838 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
8839 | ret = -EFAULT; | |
8840 | break; | |
8841 | } | |
8842 | ||
8843 | saddr = i2c_info.saddr; | |
8844 | rsize = i2c_info.rsize; | |
8845 | buf = i2c_info.rbuf; | |
8846 | ||
8847 | if (rsize <= 0 || rsize > SSD_I2C_MAX_DATA) { | |
8848 | ret = -EINVAL; | |
8849 | break; | |
8850 | } | |
8851 | ||
8852 | kbuf = kmalloc(rsize, GFP_KERNEL); | |
8853 | if (!kbuf) { | |
8854 | ret = -ENOMEM; | |
8855 | break; | |
8856 | } | |
8857 | ||
8858 | ret = ssd_i2c_read(dev, saddr, rsize, kbuf); | |
8859 | if (ret) { | |
8860 | kfree(kbuf); | |
8861 | break; | |
8862 | } | |
8863 | ||
8864 | if (copy_to_user(buf, kbuf, rsize)) { | |
8865 | hio_warn("%s: copy_to_user: failed\n", dev->name); | |
8866 | kfree(kbuf); | |
8867 | ret = -EFAULT; | |
8868 | break; | |
8869 | } | |
8870 | ||
8871 | kfree(kbuf); | |
8872 | ||
8873 | break; | |
8874 | } | |
8875 | ||
8876 | case SSD_CMD_I2C_WRITE: { | |
8877 | struct ssd_i2c_op_info i2c_info; | |
8878 | uint8_t saddr; | |
8879 | uint8_t wsize; | |
8880 | ||
8881 | if (copy_from_user(&i2c_info, argp, sizeof(struct ssd_i2c_op_info))) { | |
8882 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
8883 | ret = -EFAULT; | |
8884 | break; | |
8885 | } | |
8886 | ||
8887 | saddr = i2c_info.saddr; | |
8888 | wsize = i2c_info.wsize; | |
8889 | buf = i2c_info.wbuf; | |
8890 | ||
8891 | if (wsize <= 0 || wsize > SSD_I2C_MAX_DATA) { | |
8892 | ret = -EINVAL; | |
8893 | break; | |
8894 | } | |
8895 | ||
8896 | kbuf = kmalloc(wsize, GFP_KERNEL); | |
8897 | if (!kbuf) { | |
8898 | ret = -ENOMEM; | |
8899 | break; | |
8900 | } | |
8901 | ||
8902 | if (copy_from_user(kbuf, buf, wsize)) { | |
8903 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
8904 | kfree(kbuf); | |
8905 | ret = -EFAULT; | |
8906 | break; | |
8907 | } | |
8908 | ||
8909 | ret = ssd_i2c_write(dev, saddr, wsize, kbuf); | |
8910 | if (ret) { | |
8911 | kfree(kbuf); | |
8912 | break; | |
8913 | } | |
8914 | ||
8915 | kfree(kbuf); | |
8916 | ||
8917 | break; | |
8918 | } | |
8919 | ||
8920 | case SSD_CMD_I2C_WRITE_READ: { | |
8921 | struct ssd_i2c_op_info i2c_info; | |
8922 | uint8_t saddr; | |
8923 | uint8_t wsize; | |
8924 | uint8_t rsize; | |
8925 | uint8_t size; | |
8926 | ||
8927 | if (copy_from_user(&i2c_info, argp, sizeof(struct ssd_i2c_op_info))) { | |
8928 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
8929 | ret = -EFAULT; | |
8930 | break; | |
8931 | } | |
8932 | ||
8933 | saddr = i2c_info.saddr; | |
8934 | wsize = i2c_info.wsize; | |
8935 | rsize = i2c_info.rsize; | |
8936 | buf = i2c_info.wbuf; | |
8937 | ||
8938 | if (wsize <= 0 || wsize > SSD_I2C_MAX_DATA) { | |
8939 | ret = -EINVAL; | |
8940 | break; | |
8941 | } | |
8942 | ||
8943 | if (rsize <= 0 || rsize > SSD_I2C_MAX_DATA) { | |
8944 | ret = -EINVAL; | |
8945 | break; | |
8946 | } | |
8947 | ||
8948 | size = wsize + rsize; | |
8949 | ||
8950 | kbuf = kmalloc(size, GFP_KERNEL); | |
8951 | if (!kbuf) { | |
8952 | ret = -ENOMEM; | |
8953 | break; | |
8954 | } | |
8955 | ||
8956 | if (copy_from_user((kbuf + rsize), buf, wsize)) { | |
8957 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
8958 | kfree(kbuf); | |
8959 | ret = -EFAULT; | |
8960 | break; | |
8961 | } | |
8962 | ||
8963 | buf = i2c_info.rbuf; | |
8964 | ||
8965 | ret = ssd_i2c_write_read(dev, saddr, wsize, (kbuf + rsize), rsize, kbuf); | |
8966 | if (ret) { | |
8967 | kfree(kbuf); | |
8968 | break; | |
8969 | } | |
8970 | ||
8971 | if (copy_to_user(buf, kbuf, rsize)) { | |
8972 | hio_warn("%s: copy_to_user: failed\n", dev->name); | |
8973 | kfree(kbuf); | |
8974 | ret = -EFAULT; | |
8975 | break; | |
8976 | } | |
8977 | ||
8978 | kfree(kbuf); | |
8979 | ||
8980 | break; | |
8981 | } | |
8982 | ||
8983 | case SSD_CMD_SMBUS_SEND_BYTE: { | |
8984 | struct ssd_smbus_op_info smbus_info; | |
8985 | uint8_t smb_data[SSD_SMBUS_BLOCK_MAX]; | |
8986 | uint8_t saddr; | |
8987 | uint8_t size; | |
8988 | ||
8989 | if (copy_from_user(&smbus_info, argp, sizeof(struct ssd_smbus_op_info))) { | |
8990 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
8991 | ret = -EFAULT; | |
8992 | break; | |
8993 | } | |
8994 | ||
8995 | saddr = smbus_info.saddr; | |
8996 | buf = smbus_info.buf; | |
8997 | size = 1; | |
8998 | ||
8999 | if (copy_from_user(smb_data, buf, size)) { | |
9000 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
9001 | ret = -EFAULT; | |
9002 | break; | |
9003 | } | |
9004 | ||
9005 | ret = ssd_smbus_send_byte(dev, saddr, smb_data); | |
9006 | if (ret) { | |
9007 | break; | |
9008 | } | |
9009 | ||
9010 | break; | |
9011 | } | |
9012 | ||
9013 | case SSD_CMD_SMBUS_RECEIVE_BYTE: { | |
9014 | struct ssd_smbus_op_info smbus_info; | |
9015 | uint8_t smb_data[SSD_SMBUS_BLOCK_MAX]; | |
9016 | uint8_t saddr; | |
9017 | uint8_t size; | |
9018 | ||
9019 | if (copy_from_user(&smbus_info, argp, sizeof(struct ssd_smbus_op_info))) { | |
9020 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
9021 | ret = -EFAULT; | |
9022 | break; | |
9023 | } | |
9024 | ||
9025 | saddr = smbus_info.saddr; | |
9026 | buf = smbus_info.buf; | |
9027 | size = 1; | |
9028 | ||
9029 | ret = ssd_smbus_receive_byte(dev, saddr, smb_data); | |
9030 | if (ret) { | |
9031 | break; | |
9032 | } | |
9033 | ||
9034 | if (copy_to_user(buf, smb_data, size)) { | |
9035 | hio_warn("%s: copy_to_user: failed\n", dev->name); | |
9036 | ret = -EFAULT; | |
9037 | break; | |
9038 | } | |
9039 | ||
9040 | break; | |
9041 | } | |
9042 | ||
9043 | case SSD_CMD_SMBUS_WRITE_BYTE: { | |
9044 | struct ssd_smbus_op_info smbus_info; | |
9045 | uint8_t smb_data[SSD_SMBUS_BLOCK_MAX]; | |
9046 | uint8_t saddr; | |
9047 | uint8_t command; | |
9048 | uint8_t size; | |
9049 | ||
9050 | if (copy_from_user(&smbus_info, argp, sizeof(struct ssd_smbus_op_info))) { | |
9051 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
9052 | ret = -EFAULT; | |
9053 | break; | |
9054 | } | |
9055 | ||
9056 | saddr = smbus_info.saddr; | |
9057 | command = smbus_info.cmd; | |
9058 | buf = smbus_info.buf; | |
9059 | size = 1; | |
9060 | ||
9061 | if (copy_from_user(smb_data, buf, size)) { | |
9062 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
9063 | ret = -EFAULT; | |
9064 | break; | |
9065 | } | |
9066 | ||
9067 | ret = ssd_smbus_write_byte(dev, saddr, command, smb_data); | |
9068 | if (ret) { | |
9069 | break; | |
9070 | } | |
9071 | ||
9072 | break; | |
9073 | } | |
9074 | ||
9075 | case SSD_CMD_SMBUS_READ_BYTE: { | |
9076 | struct ssd_smbus_op_info smbus_info; | |
9077 | uint8_t smb_data[SSD_SMBUS_BLOCK_MAX]; | |
9078 | uint8_t saddr; | |
9079 | uint8_t command; | |
9080 | uint8_t size; | |
9081 | ||
9082 | if (copy_from_user(&smbus_info, argp, sizeof(struct ssd_smbus_op_info))) { | |
9083 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
9084 | ret = -EFAULT; | |
9085 | break; | |
9086 | } | |
9087 | ||
9088 | saddr = smbus_info.saddr; | |
9089 | command = smbus_info.cmd; | |
9090 | buf = smbus_info.buf; | |
9091 | size = 1; | |
9092 | ||
9093 | ret = ssd_smbus_read_byte(dev, saddr, command, smb_data); | |
9094 | if (ret) { | |
9095 | break; | |
9096 | } | |
9097 | ||
9098 | if (copy_to_user(buf, smb_data, size)) { | |
9099 | hio_warn("%s: copy_to_user: failed\n", dev->name); | |
9100 | ret = -EFAULT; | |
9101 | break; | |
9102 | } | |
9103 | ||
9104 | break; | |
9105 | } | |
9106 | ||
9107 | case SSD_CMD_SMBUS_WRITE_WORD: { | |
9108 | struct ssd_smbus_op_info smbus_info; | |
9109 | uint8_t smb_data[SSD_SMBUS_BLOCK_MAX]; | |
9110 | uint8_t saddr; | |
9111 | uint8_t command; | |
9112 | uint8_t size; | |
9113 | ||
9114 | if (copy_from_user(&smbus_info, argp, sizeof(struct ssd_smbus_op_info))) { | |
9115 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
9116 | ret = -EFAULT; | |
9117 | break; | |
9118 | } | |
9119 | ||
9120 | saddr = smbus_info.saddr; | |
9121 | command = smbus_info.cmd; | |
9122 | buf = smbus_info.buf; | |
9123 | size = 2; | |
9124 | ||
9125 | if (copy_from_user(smb_data, buf, size)) { | |
9126 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
9127 | ret = -EFAULT; | |
9128 | break; | |
9129 | } | |
9130 | ||
9131 | ret = ssd_smbus_write_word(dev, saddr, command, smb_data); | |
9132 | if (ret) { | |
9133 | break; | |
9134 | } | |
9135 | ||
9136 | break; | |
9137 | } | |
9138 | ||
9139 | case SSD_CMD_SMBUS_READ_WORD: { | |
9140 | struct ssd_smbus_op_info smbus_info; | |
9141 | uint8_t smb_data[SSD_SMBUS_BLOCK_MAX]; | |
9142 | uint8_t saddr; | |
9143 | uint8_t command; | |
9144 | uint8_t size; | |
9145 | ||
9146 | if (copy_from_user(&smbus_info, argp, sizeof(struct ssd_smbus_op_info))) { | |
9147 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
9148 | ret = -EFAULT; | |
9149 | break; | |
9150 | } | |
9151 | ||
9152 | saddr = smbus_info.saddr; | |
9153 | command = smbus_info.cmd; | |
9154 | buf = smbus_info.buf; | |
9155 | size = 2; | |
9156 | ||
9157 | ret = ssd_smbus_read_word(dev, saddr, command, smb_data); | |
9158 | if (ret) { | |
9159 | break; | |
9160 | } | |
9161 | ||
9162 | if (copy_to_user(buf, smb_data, size)) { | |
9163 | hio_warn("%s: copy_to_user: failed\n", dev->name); | |
9164 | ret = -EFAULT; | |
9165 | break; | |
9166 | } | |
9167 | ||
9168 | break; | |
9169 | } | |
9170 | ||
9171 | case SSD_CMD_SMBUS_WRITE_BLOCK: { | |
9172 | struct ssd_smbus_op_info smbus_info; | |
9173 | uint8_t smb_data[SSD_SMBUS_BLOCK_MAX]; | |
9174 | uint8_t saddr; | |
9175 | uint8_t command; | |
9176 | uint8_t size; | |
9177 | ||
9178 | if (copy_from_user(&smbus_info, argp, sizeof(struct ssd_smbus_op_info))) { | |
9179 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
9180 | ret = -EFAULT; | |
9181 | break; | |
9182 | } | |
9183 | ||
9184 | saddr = smbus_info.saddr; | |
9185 | command = smbus_info.cmd; | |
9186 | buf = smbus_info.buf; | |
9187 | size = smbus_info.size; | |
9188 | ||
9189 | if (size > SSD_SMBUS_BLOCK_MAX) { | |
9190 | ret = -EINVAL; | |
9191 | break; | |
9192 | } | |
9193 | ||
9194 | if (copy_from_user(smb_data, buf, size)) { | |
9195 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
9196 | ret = -EFAULT; | |
9197 | break; | |
9198 | } | |
9199 | ||
9200 | ret = ssd_smbus_write_block(dev, saddr, command, size, smb_data); | |
9201 | if (ret) { | |
9202 | break; | |
9203 | } | |
9204 | ||
9205 | break; | |
9206 | } | |
9207 | ||
9208 | case SSD_CMD_SMBUS_READ_BLOCK: { | |
9209 | struct ssd_smbus_op_info smbus_info; | |
9210 | uint8_t smb_data[SSD_SMBUS_BLOCK_MAX]; | |
9211 | uint8_t saddr; | |
9212 | uint8_t command; | |
9213 | uint8_t size; | |
9214 | ||
9215 | if (copy_from_user(&smbus_info, argp, sizeof(struct ssd_smbus_op_info))) { | |
9216 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
9217 | ret = -EFAULT; | |
9218 | break; | |
9219 | } | |
9220 | ||
9221 | saddr = smbus_info.saddr; | |
9222 | command = smbus_info.cmd; | |
9223 | buf = smbus_info.buf; | |
9224 | size = smbus_info.size; | |
9225 | ||
9226 | if (size > SSD_SMBUS_BLOCK_MAX) { | |
9227 | ret = -EINVAL; | |
9228 | break; | |
9229 | } | |
9230 | ||
9231 | ret = ssd_smbus_read_block(dev, saddr, command, size, smb_data); | |
9232 | if (ret) { | |
9233 | break; | |
9234 | } | |
9235 | ||
9236 | if (copy_to_user(buf, smb_data, size)) { | |
9237 | hio_warn("%s: copy_to_user: failed\n", dev->name); | |
9238 | ret = -EFAULT; | |
9239 | break; | |
9240 | } | |
9241 | ||
9242 | break; | |
9243 | } | |
9244 | ||
9245 | case SSD_CMD_BM_GET_VER: { | |
9246 | uint16_t ver; | |
9247 | ||
9248 | ret = ssd_bm_get_version(dev, &ver); | |
9249 | if (ret) { | |
9250 | break; | |
9251 | } | |
9252 | ||
9253 | if (copy_to_user(argp, &ver, sizeof(uint16_t))) { | |
9254 | hio_warn("%s: copy_to_user: failed\n", dev->name); | |
9255 | ret = -EFAULT; | |
9256 | break; | |
9257 | } | |
9258 | ||
9259 | break; | |
9260 | } | |
9261 | ||
9262 | case SSD_CMD_BM_GET_NR_CAP: { | |
9263 | int nr_cap; | |
9264 | ||
9265 | ret = ssd_bm_nr_cap(dev, &nr_cap); | |
9266 | if (ret) { | |
9267 | break; | |
9268 | } | |
9269 | ||
9270 | if (copy_to_user(argp, &nr_cap, sizeof(int))) { | |
9271 | hio_warn("%s: copy_to_user: failed\n", dev->name); | |
9272 | ret = -EFAULT; | |
9273 | break; | |
9274 | } | |
9275 | ||
9276 | break; | |
9277 | } | |
9278 | ||
9279 | case SSD_CMD_BM_CAP_LEARNING: { | |
9280 | ret = ssd_bm_enter_cap_learning(dev); | |
9281 | ||
9282 | if (ret) { | |
9283 | break; | |
9284 | } | |
9285 | ||
9286 | break; | |
9287 | } | |
9288 | ||
9289 | case SSD_CMD_CAP_LEARN: { | |
9290 | uint32_t cap = 0; | |
9291 | ||
9292 | ret = ssd_cap_learn(dev, &cap); | |
9293 | if (ret) { | |
9294 | break; | |
9295 | } | |
9296 | ||
9297 | if (copy_to_user(argp, &cap, sizeof(uint32_t))) { | |
9298 | hio_warn("%s: copy_to_user: failed\n", dev->name); | |
9299 | ret = -EFAULT; | |
9300 | break; | |
9301 | } | |
9302 | ||
9303 | break; | |
9304 | } | |
9305 | ||
9306 | case SSD_CMD_GET_CAP_STATUS: { | |
9307 | int cap_status = 0; | |
9308 | ||
9309 | if (test_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP), &dev->hwmon)) { | |
9310 | cap_status = 1; | |
9311 | } | |
9312 | ||
9313 | if (copy_to_user(argp, &cap_status, sizeof(int))) { | |
9314 | hio_warn("%s: copy_to_user: failed\n", dev->name); | |
9315 | ret = -EFAULT; | |
9316 | break; | |
9317 | } | |
9318 | ||
9319 | break; | |
9320 | } | |
9321 | ||
9322 | case SSD_CMD_RAM_READ: { | |
9323 | struct ssd_ram_op_info ram_info; | |
9324 | uint64_t ofs; | |
9325 | uint32_t length; | |
9326 | size_t rlen, len = dev->hw_info.ram_max_len; | |
9327 | int ctrl_idx; | |
9328 | ||
9329 | if (copy_from_user(&ram_info, argp, sizeof(struct ssd_ram_op_info))) { | |
9330 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
9331 | ret = -EFAULT; | |
9332 | break; | |
9333 | } | |
9334 | ||
9335 | ofs = ram_info.start; | |
9336 | length = ram_info.length; | |
9337 | buf = ram_info.buf; | |
9338 | ctrl_idx = ram_info.ctrl_idx; | |
9339 | ||
9340 | if (ofs >= dev->hw_info.ram_size || length > dev->hw_info.ram_size || 0 == length || (ofs + length) > dev->hw_info.ram_size) { | |
9341 | ret = -EINVAL; | |
9342 | break; | |
9343 | } | |
9344 | ||
9345 | kbuf = kmalloc(len, GFP_KERNEL); | |
9346 | if (!kbuf) { | |
9347 | ret = -ENOMEM; | |
9348 | break; | |
9349 | } | |
9350 | ||
9351 | for (rlen=0; rlen<length; rlen+=len, buf+=len, ofs+=len) { | |
9352 | if ((length - rlen) < len) { | |
9353 | len = length - rlen; | |
9354 | } | |
9355 | ||
9356 | ret = ssd_ram_read(dev, kbuf, len, ofs, ctrl_idx); | |
9357 | if (ret) { | |
9358 | break; | |
9359 | } | |
9360 | ||
9361 | if (copy_to_user(buf, kbuf, len)) { | |
9362 | ret = -EFAULT; | |
9363 | break; | |
9364 | } | |
9365 | } | |
9366 | ||
9367 | kfree(kbuf); | |
9368 | ||
9369 | break; | |
9370 | } | |
9371 | ||
9372 | case SSD_CMD_RAM_WRITE: { | |
9373 | struct ssd_ram_op_info ram_info; | |
9374 | uint64_t ofs; | |
9375 | uint32_t length; | |
9376 | size_t wlen, len = dev->hw_info.ram_max_len; | |
9377 | int ctrl_idx; | |
9378 | ||
9379 | if (copy_from_user(&ram_info, argp, sizeof(struct ssd_ram_op_info))) { | |
9380 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
9381 | ret = -EFAULT; | |
9382 | break; | |
9383 | } | |
9384 | ofs = ram_info.start; | |
9385 | length = ram_info.length; | |
9386 | buf = ram_info.buf; | |
9387 | ctrl_idx = ram_info.ctrl_idx; | |
9388 | ||
9389 | if (ofs >= dev->hw_info.ram_size || length > dev->hw_info.ram_size || 0 == length || (ofs + length) > dev->hw_info.ram_size) { | |
9390 | ret = -EINVAL; | |
9391 | break; | |
9392 | } | |
9393 | ||
9394 | kbuf = kmalloc(len, GFP_KERNEL); | |
9395 | if (!kbuf) { | |
9396 | ret = -ENOMEM; | |
9397 | break; | |
9398 | } | |
9399 | ||
9400 | for (wlen=0; wlen<length; wlen+=len, buf+=len, ofs+=len) { | |
9401 | if ((length - wlen) < len) { | |
9402 | len = length - wlen; | |
9403 | } | |
9404 | ||
9405 | if (copy_from_user(kbuf, buf, len)) { | |
9406 | ret = -EFAULT; | |
9407 | break; | |
9408 | } | |
9409 | ||
9410 | ret = ssd_ram_write(dev, kbuf, len, ofs, ctrl_idx); | |
9411 | if (ret) { | |
9412 | break; | |
9413 | } | |
9414 | } | |
9415 | ||
9416 | kfree(kbuf); | |
9417 | ||
9418 | break; | |
9419 | } | |
9420 | ||
9421 | case SSD_CMD_NAND_READ_ID: { | |
9422 | struct ssd_flash_op_info flash_info; | |
9423 | int chip_no, chip_ce, length, ctrl_idx; | |
9424 | ||
9425 | if (copy_from_user(&flash_info, argp, sizeof(struct ssd_flash_op_info))) { | |
9426 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
9427 | ret = -EFAULT; | |
9428 | break; | |
9429 | } | |
9430 | ||
9431 | chip_no = flash_info.flash; | |
9432 | chip_ce = flash_info.chip; | |
9433 | ctrl_idx = flash_info.ctrl_idx; | |
9434 | buf = flash_info.buf; | |
9435 | length = dev->hw_info.id_size; | |
9436 | ||
9437 | //kbuf = kmalloc(length, GFP_KERNEL); | |
9438 | kbuf = kmalloc(SSD_NAND_ID_BUFF_SZ, GFP_KERNEL); //xx | |
9439 | if (!kbuf) { | |
9440 | ret = -ENOMEM; | |
9441 | break; | |
9442 | } | |
9443 | memset(kbuf, 0, length); | |
9444 | ||
9445 | ret = ssd_nand_read_id(dev, kbuf, chip_no, chip_ce, ctrl_idx); | |
9446 | if (ret) { | |
9447 | kfree(kbuf); | |
9448 | break; | |
9449 | } | |
9450 | ||
9451 | if (copy_to_user(buf, kbuf, length)) { | |
9452 | kfree(kbuf); | |
9453 | ret = -EFAULT; | |
9454 | break; | |
9455 | } | |
9456 | ||
9457 | kfree(kbuf); | |
9458 | ||
9459 | break; | |
9460 | } | |
9461 | ||
9462 | case SSD_CMD_NAND_READ: { //with oob | |
9463 | struct ssd_flash_op_info flash_info; | |
9464 | uint32_t length; | |
9465 | int flash, chip, page, ctrl_idx; | |
9466 | int err = 0; | |
9467 | ||
9468 | if (copy_from_user(&flash_info, argp, sizeof(struct ssd_flash_op_info))) { | |
9469 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
9470 | ret = -EFAULT; | |
9471 | break; | |
9472 | } | |
9473 | ||
9474 | flash = flash_info.flash; | |
9475 | chip = flash_info.chip; | |
9476 | page = flash_info.page; | |
9477 | buf = flash_info.buf; | |
9478 | ctrl_idx = flash_info.ctrl_idx; | |
9479 | ||
9480 | length = dev->hw_info.page_size + dev->hw_info.oob_size; | |
9481 | ||
9482 | kbuf = kmalloc(length, GFP_KERNEL); | |
9483 | if (!kbuf) { | |
9484 | ret = -ENOMEM; | |
9485 | break; | |
9486 | } | |
9487 | ||
9488 | err = ret = ssd_nand_read_w_oob(dev, kbuf, flash, chip, page, 1, ctrl_idx); | |
9489 | if (ret && (-EIO != ret)) { | |
9490 | kfree(kbuf); | |
9491 | break; | |
9492 | } | |
9493 | ||
9494 | if (copy_to_user(buf, kbuf, length)) { | |
9495 | kfree(kbuf); | |
9496 | ret = -EFAULT; | |
9497 | break; | |
9498 | } | |
9499 | ||
9500 | ret = err; | |
9501 | ||
9502 | kfree(kbuf); | |
9503 | break; | |
9504 | } | |
9505 | ||
9506 | case SSD_CMD_NAND_WRITE: { | |
9507 | struct ssd_flash_op_info flash_info; | |
9508 | int flash, chip, page, ctrl_idx; | |
9509 | uint32_t length; | |
9510 | ||
9511 | if (copy_from_user(&flash_info, argp, sizeof(struct ssd_flash_op_info))) { | |
9512 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
9513 | ret = -EFAULT; | |
9514 | break; | |
9515 | } | |
9516 | ||
9517 | flash = flash_info.flash; | |
9518 | chip = flash_info.chip; | |
9519 | page = flash_info.page; | |
9520 | buf = flash_info.buf; | |
9521 | ctrl_idx = flash_info.ctrl_idx; | |
9522 | ||
9523 | length = dev->hw_info.page_size + dev->hw_info.oob_size; | |
9524 | ||
9525 | kbuf = kmalloc(length, GFP_KERNEL); | |
9526 | if (!kbuf) { | |
9527 | ret = -ENOMEM; | |
9528 | break; | |
9529 | } | |
9530 | ||
9531 | if (copy_from_user(kbuf, buf, length)) { | |
9532 | kfree(kbuf); | |
9533 | ret = -EFAULT; | |
9534 | break; | |
9535 | } | |
9536 | ||
9537 | ret = ssd_nand_write(dev, kbuf, flash, chip, page, 1, ctrl_idx); | |
9538 | if (ret) { | |
9539 | kfree(kbuf); | |
9540 | break; | |
9541 | } | |
9542 | ||
9543 | kfree(kbuf); | |
9544 | break; | |
9545 | } | |
9546 | ||
9547 | case SSD_CMD_NAND_ERASE: { | |
9548 | struct ssd_flash_op_info flash_info; | |
9549 | int flash, chip, page, ctrl_idx; | |
9550 | ||
9551 | if (copy_from_user(&flash_info, argp, sizeof(struct ssd_flash_op_info))) { | |
9552 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
9553 | ret = -EFAULT; | |
9554 | break; | |
9555 | } | |
9556 | ||
9557 | flash = flash_info.flash; | |
9558 | chip = flash_info.chip; | |
9559 | page = flash_info.page; | |
9560 | ctrl_idx = flash_info.ctrl_idx; | |
9561 | ||
9562 | if ((page % dev->hw_info.page_count) != 0) { | |
9563 | ret = -EINVAL; | |
9564 | break; | |
9565 | } | |
9566 | ||
9567 | //hio_warn("erase fs = %llx\n", ofs); | |
9568 | ret = ssd_nand_erase(dev, flash, chip, page, ctrl_idx); | |
9569 | if (ret) { | |
9570 | break; | |
9571 | } | |
9572 | ||
9573 | break; | |
9574 | } | |
9575 | ||
9576 | case SSD_CMD_NAND_READ_EXT: { //ingore EIO | |
9577 | struct ssd_flash_op_info flash_info; | |
9578 | uint32_t length; | |
9579 | int flash, chip, page, ctrl_idx; | |
9580 | ||
9581 | if (copy_from_user(&flash_info, argp, sizeof(struct ssd_flash_op_info))) { | |
9582 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
9583 | ret = -EFAULT; | |
9584 | break; | |
9585 | } | |
9586 | ||
9587 | flash = flash_info.flash; | |
9588 | chip = flash_info.chip; | |
9589 | page = flash_info.page; | |
9590 | buf = flash_info.buf; | |
9591 | ctrl_idx = flash_info.ctrl_idx; | |
9592 | ||
9593 | length = dev->hw_info.page_size + dev->hw_info.oob_size; | |
9594 | ||
9595 | kbuf = kmalloc(length, GFP_KERNEL); | |
9596 | if (!kbuf) { | |
9597 | ret = -ENOMEM; | |
9598 | break; | |
9599 | } | |
9600 | ||
9601 | ret = ssd_nand_read_w_oob(dev, kbuf, flash, chip, page, 1, ctrl_idx); | |
9602 | if (-EIO == ret) { //ingore EIO | |
9603 | ret = 0; | |
9604 | } | |
9605 | if (ret) { | |
9606 | kfree(kbuf); | |
9607 | break; | |
9608 | } | |
9609 | ||
9610 | if (copy_to_user(buf, kbuf, length)) { | |
9611 | kfree(kbuf); | |
9612 | ret = -EFAULT; | |
9613 | break; | |
9614 | } | |
9615 | ||
9616 | kfree(kbuf); | |
9617 | break; | |
9618 | } | |
9619 | ||
9620 | case SSD_CMD_UPDATE_BBT: { | |
9621 | struct ssd_flash_op_info flash_info; | |
9622 | int ctrl_idx, flash; | |
9623 | ||
9624 | if (copy_from_user(&flash_info, argp, sizeof(struct ssd_flash_op_info))) { | |
9625 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
9626 | ret = -EFAULT; | |
9627 | break; | |
9628 | } | |
9629 | ||
9630 | ctrl_idx = flash_info.ctrl_idx; | |
9631 | flash = flash_info.flash; | |
9632 | ret = ssd_update_bbt(dev, flash, ctrl_idx); | |
9633 | if (ret) { | |
9634 | break; | |
9635 | } | |
9636 | ||
9637 | break; | |
9638 | } | |
9639 | ||
9640 | case SSD_CMD_CLEAR_ALARM: | |
9641 | ssd_clear_alarm(dev); | |
9642 | break; | |
9643 | ||
9644 | case SSD_CMD_SET_ALARM: | |
9645 | ssd_set_alarm(dev); | |
9646 | break; | |
9647 | ||
9648 | case SSD_CMD_RESET: | |
9649 | ret = ssd_do_reset(dev); | |
9650 | break; | |
9651 | ||
9652 | case SSD_CMD_RELOAD_FW: | |
9653 | dev->reload_fw = 1; | |
da3355df | 9654 | dev->has_non_0x98_reg_access = 1; |
361ebed5 HSDT |
9655 | if (dev->protocol_info.ver >= SSD_PROTOCOL_V3_2) { |
9656 | ssd_reg32_write(dev->ctrlp + SSD_RELOAD_FW_REG, SSD_RELOAD_FLAG); | |
9657 | } else if (dev->protocol_info.ver >= SSD_PROTOCOL_V3_1_1) { | |
9658 | ssd_reg32_write(dev->ctrlp + SSD_RELOAD_FW_REG, SSD_RELOAD_FW); | |
9659 | ||
9660 | } | |
9661 | break; | |
9662 | ||
9663 | case SSD_CMD_UNLOAD_DEV: { | |
9664 | if (atomic_read(&dev->refcnt)) { | |
9665 | ret = -EBUSY; | |
9666 | break; | |
9667 | } | |
9668 | ||
9669 | /* save smart */ | |
9670 | ssd_save_smart(dev); | |
9671 | ||
9672 | ret = ssd_flush(dev); | |
9673 | if (ret) { | |
9674 | break; | |
9675 | } | |
9676 | ||
9677 | /* cleanup the block device */ | |
9678 | if (test_and_clear_bit(SSD_INIT_BD, &dev->state)) { | |
9679 | mutex_lock(&dev->gd_mutex); | |
9680 | ssd_cleanup_blkdev(dev); | |
1197134c | 9681 | ssd_cleanup_queue(dev); |
361ebed5 HSDT |
9682 | mutex_unlock(&dev->gd_mutex); |
9683 | } | |
9684 | ||
9685 | break; | |
9686 | } | |
9687 | ||
9688 | case SSD_CMD_LOAD_DEV: { | |
9689 | ||
9690 | if (test_bit(SSD_INIT_BD, &dev->state)) { | |
9691 | ret = -EINVAL; | |
9692 | break; | |
9693 | } | |
9694 | ||
9695 | ret = ssd_init_smart(dev); | |
9696 | if (ret) { | |
9697 | hio_warn("%s: init info: failed\n", dev->name); | |
9698 | break; | |
9699 | } | |
9700 | ||
1197134c KM |
9701 | ret = ssd_init_queue(dev); |
9702 | if (ret) { | |
9703 | hio_warn("%s: init queue failed\n", dev->name); | |
9704 | break; | |
9705 | } | |
361ebed5 HSDT |
9706 | ret = ssd_init_blkdev(dev); |
9707 | if (ret) { | |
9708 | hio_warn("%s: register block device: failed\n", dev->name); | |
9709 | break; | |
9710 | } | |
9711 | (void)test_and_set_bit(SSD_INIT_BD, &dev->state); | |
9712 | ||
9713 | break; | |
9714 | } | |
9715 | ||
9716 | case SSD_CMD_UPDATE_VP: { | |
9717 | uint32_t val; | |
9718 | uint32_t new_vp, new_vp1 = 0; | |
9719 | ||
9720 | if (test_bit(SSD_INIT_BD, &dev->state)) { | |
9721 | ret = -EINVAL; | |
9722 | break; | |
9723 | } | |
9724 | ||
9725 | if (copy_from_user(&new_vp, argp, sizeof(uint32_t))) { | |
9726 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
9727 | ret = -EFAULT; | |
9728 | break; | |
9729 | } | |
9730 | ||
9731 | if (new_vp > dev->hw_info.max_valid_pages || new_vp <= 0) { | |
9732 | ret = -EINVAL; | |
9733 | break; | |
9734 | } | |
9735 | ||
9736 | while (new_vp <= dev->hw_info.max_valid_pages) { | |
9737 | ssd_reg32_write(dev->ctrlp + SSD_VALID_PAGES_REG, new_vp); | |
9738 | msleep(10); | |
9739 | val = ssd_reg32_read(dev->ctrlp + SSD_VALID_PAGES_REG); | |
9740 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_2) { | |
9741 | new_vp1 = val & 0x3FF; | |
9742 | } else { | |
9743 | new_vp1 = val & 0x7FFF; | |
9744 | } | |
9745 | ||
9746 | if (new_vp1 == new_vp) { | |
9747 | break; | |
9748 | } | |
9749 | ||
9750 | new_vp++; | |
9751 | /*if (new_vp == dev->hw_info.valid_pages) { | |
9752 | new_vp++; | |
9753 | }*/ | |
9754 | } | |
9755 | ||
9756 | if (new_vp1 != new_vp || new_vp > dev->hw_info.max_valid_pages) { | |
9757 | /* restore */ | |
9758 | ssd_reg32_write(dev->ctrlp + SSD_VALID_PAGES_REG, dev->hw_info.valid_pages); | |
9759 | ret = -EINVAL; | |
9760 | break; | |
9761 | } | |
9762 | ||
9763 | if (copy_to_user(argp, &new_vp, sizeof(uint32_t))) { | |
9764 | hio_warn("%s: copy_to_user: failed\n", dev->name); | |
9765 | ssd_reg32_write(dev->ctrlp + SSD_VALID_PAGES_REG, dev->hw_info.valid_pages); | |
9766 | ret = -EFAULT; | |
9767 | break; | |
9768 | } | |
9769 | ||
9770 | /* new */ | |
9771 | dev->hw_info.valid_pages = new_vp; | |
9772 | dev->hw_info.size = (uint64_t)dev->hw_info.valid_pages * dev->hw_info.page_size; | |
9773 | dev->hw_info.size *= (dev->hw_info.block_count - dev->hw_info.reserved_blks); | |
9774 | dev->hw_info.size *= ((uint64_t)dev->hw_info.nr_data_ch * (uint64_t)dev->hw_info.nr_chip * (uint64_t)dev->hw_info.nr_ctrl); | |
9775 | ||
9776 | break; | |
9777 | } | |
9778 | ||
9779 | case SSD_CMD_FULL_RESET: { | |
9780 | ret = ssd_full_reset(dev); | |
9781 | break; | |
9782 | } | |
9783 | ||
9784 | case SSD_CMD_GET_NR_LOG: { | |
9785 | if (copy_to_user(argp, &dev->internal_log.nr_log, sizeof(dev->internal_log.nr_log))) { | |
9786 | ret = -EFAULT; | |
9787 | break; | |
9788 | } | |
9789 | break; | |
9790 | } | |
9791 | ||
9792 | case SSD_CMD_GET_LOG: { | |
9793 | uint32_t length = dev->rom_info.log_sz; | |
9794 | ||
9795 | buf = argp; | |
9796 | ||
9797 | if (copy_to_user(buf, dev->internal_log.log, length)) { | |
9798 | ret = -EFAULT; | |
9799 | break; | |
9800 | } | |
9801 | ||
9802 | break; | |
9803 | } | |
9804 | ||
9805 | case SSD_CMD_LOG_LEVEL: { | |
9806 | int level = 0; | |
9807 | if (copy_from_user(&level, argp, sizeof(int))) { | |
9808 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
9809 | ret = -EFAULT; | |
9810 | break; | |
9811 | } | |
9812 | ||
9813 | if (level >= SSD_LOG_NR_LEVEL || level < SSD_LOG_LEVEL_INFO) { | |
9814 | level = SSD_LOG_LEVEL_ERR; | |
9815 | } | |
9816 | ||
9817 | //just for showing log, no need to protect | |
9818 | log_level = level; | |
9819 | break; | |
9820 | } | |
9821 | ||
9822 | case SSD_CMD_OT_PROTECT: { | |
9823 | int protect = 0; | |
9824 | ||
9825 | if (copy_from_user(&protect, argp, sizeof(int))) { | |
9826 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
9827 | ret = -EFAULT; | |
9828 | break; | |
9829 | } | |
9830 | ||
9831 | ssd_set_ot_protect(dev, !!protect); | |
9832 | break; | |
9833 | } | |
9834 | ||
9835 | case SSD_CMD_GET_OT_STATUS: { | |
9836 | int status = ssd_get_ot_status(dev, &status); | |
9837 | ||
9838 | if (copy_to_user(argp, &status, sizeof(int))) { | |
9839 | hio_warn("%s: copy_to_user: failed\n", dev->name); | |
9840 | ret = -EFAULT; | |
9841 | break; | |
9842 | } | |
9843 | break; | |
9844 | } | |
9845 | ||
9846 | case SSD_CMD_CLEAR_LOG: { | |
9847 | ret = ssd_clear_log(dev); | |
9848 | break; | |
9849 | } | |
9850 | ||
9851 | case SSD_CMD_CLEAR_SMART: { | |
9852 | ret = ssd_clear_smart(dev); | |
9853 | break; | |
9854 | } | |
9855 | ||
1197134c KM |
9856 | case SSD_CMD_CLEAR_WARNING: { |
9857 | ret = ssd_clear_warning(dev); | |
9858 | break; | |
9859 | } | |
9860 | ||
361ebed5 HSDT |
9861 | case SSD_CMD_SW_LOG: { |
9862 | struct ssd_sw_log_info sw_log; | |
9863 | ||
9864 | if (copy_from_user(&sw_log, argp, sizeof(struct ssd_sw_log_info))) { | |
9865 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
9866 | ret = -EFAULT; | |
9867 | break; | |
9868 | } | |
9869 | ||
9870 | ret = ssd_gen_swlog(dev, sw_log.event, sw_log.data); | |
9871 | break; | |
9872 | } | |
9873 | ||
9874 | case SSD_CMD_GET_LABEL: { | |
9875 | ||
9876 | if (dev->protocol_info.ver >= SSD_PROTOCOL_V3_2) { | |
9877 | ret = -EINVAL; | |
9878 | break; | |
9879 | } | |
9880 | ||
9881 | if (copy_to_user(argp, &dev->label, sizeof(struct ssd_label))) { | |
9882 | hio_warn("%s: copy_to_user: failed\n", dev->name); | |
9883 | ret = -EFAULT; | |
9884 | break; | |
9885 | } | |
9886 | break; | |
9887 | } | |
9888 | ||
9889 | case SSD_CMD_GET_VERSION: { | |
9890 | struct ssd_version_info ver; | |
9891 | ||
9892 | mutex_lock(&dev->fw_mutex); | |
9893 | ret = __ssd_get_version(dev, &ver); | |
9894 | mutex_unlock(&dev->fw_mutex); | |
9895 | if (ret) { | |
9896 | break; | |
9897 | } | |
9898 | ||
9899 | if (copy_to_user(argp, &ver, sizeof(struct ssd_version_info))) { | |
9900 | hio_warn("%s: copy_to_user: failed\n", dev->name); | |
9901 | ret = -EFAULT; | |
9902 | break; | |
9903 | } | |
9904 | break; | |
9905 | } | |
9906 | ||
9907 | case SSD_CMD_GET_TEMPERATURE: { | |
9908 | int temp; | |
9909 | ||
9910 | mutex_lock(&dev->fw_mutex); | |
9911 | ret = __ssd_get_temperature(dev, &temp); | |
9912 | mutex_unlock(&dev->fw_mutex); | |
9913 | if (ret) { | |
9914 | break; | |
9915 | } | |
9916 | ||
9917 | if (copy_to_user(argp, &temp, sizeof(int))) { | |
9918 | hio_warn("%s: copy_to_user: failed\n", dev->name); | |
9919 | ret = -EFAULT; | |
9920 | break; | |
9921 | } | |
9922 | break; | |
9923 | } | |
9924 | ||
9925 | case SSD_CMD_GET_BMSTATUS: { | |
9926 | int status; | |
9927 | ||
9928 | mutex_lock(&dev->fw_mutex); | |
9929 | if (dev->protocol_info.ver >= SSD_PROTOCOL_V3_2) { | |
9930 | if (test_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP), &dev->hwmon)) { | |
9931 | status = SSD_BMSTATUS_WARNING; | |
9932 | } else { | |
9933 | status = SSD_BMSTATUS_OK; | |
9934 | } | |
9935 | } else if(dev->protocol_info.ver > SSD_PROTOCOL_V3) { | |
9936 | ret = __ssd_bm_status(dev, &status); | |
9937 | } else { | |
9938 | status = SSD_BMSTATUS_OK; | |
9939 | } | |
9940 | mutex_unlock(&dev->fw_mutex); | |
9941 | if (ret) { | |
9942 | break; | |
9943 | } | |
9944 | ||
9945 | if (copy_to_user(argp, &status, sizeof(int))) { | |
9946 | hio_warn("%s: copy_to_user: failed\n", dev->name); | |
9947 | ret = -EFAULT; | |
9948 | break; | |
9949 | } | |
9950 | break; | |
9951 | } | |
9952 | ||
9953 | case SSD_CMD_GET_LABEL2: { | |
9954 | void *label; | |
9955 | int length; | |
9956 | ||
9957 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_2) { | |
9958 | label = &dev->label; | |
9959 | length = sizeof(struct ssd_label); | |
9960 | } else { | |
9961 | label = &dev->labelv3; | |
9962 | length = sizeof(struct ssd_labelv3); | |
9963 | } | |
9964 | ||
9965 | if (copy_to_user(argp, label, length)) { | |
9966 | ret = -EFAULT; | |
9967 | break; | |
9968 | } | |
9969 | break; | |
9970 | } | |
9971 | ||
9972 | case SSD_CMD_FLUSH: | |
9973 | ret = ssd_flush(dev); | |
9974 | if (ret) { | |
9975 | hio_warn("%s: ssd_flush: failed\n", dev->name); | |
9976 | ret = -EFAULT; | |
9977 | break; | |
9978 | } | |
9979 | break; | |
9980 | ||
9981 | case SSD_CMD_SAVE_MD: { | |
9982 | int save_md = 0; | |
9983 | ||
9984 | if (copy_from_user(&save_md, argp, sizeof(int))) { | |
9985 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
9986 | ret = -EFAULT; | |
9987 | break; | |
9988 | } | |
9989 | ||
9990 | dev->save_md = !!save_md; | |
9991 | break; | |
9992 | } | |
9993 | ||
9994 | case SSD_CMD_SET_WMODE: { | |
9995 | int new_wmode = 0; | |
9996 | ||
9997 | if (copy_from_user(&new_wmode, argp, sizeof(int))) { | |
9998 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
9999 | ret = -EFAULT; | |
10000 | break; | |
10001 | } | |
10002 | ||
10003 | ret = __ssd_set_wmode(dev, new_wmode); | |
10004 | if (ret) { | |
10005 | break; | |
10006 | } | |
10007 | ||
10008 | break; | |
10009 | } | |
10010 | ||
10011 | case SSD_CMD_GET_WMODE: { | |
10012 | if (copy_to_user(argp, &dev->wmode, sizeof(int))) { | |
10013 | hio_warn("%s: copy_to_user: failed\n", dev->name); | |
10014 | ret = -EFAULT; | |
10015 | break; | |
10016 | } | |
10017 | ||
10018 | break; | |
10019 | } | |
10020 | ||
10021 | case SSD_CMD_GET_USER_WMODE: { | |
10022 | if (copy_to_user(argp, &dev->user_wmode, sizeof(int))) { | |
10023 | hio_warn("%s: copy_to_user: failed\n", dev->name); | |
10024 | ret = -EFAULT; | |
10025 | break; | |
10026 | } | |
10027 | ||
10028 | break; | |
10029 | } | |
10030 | ||
10031 | case SSD_CMD_DEBUG: { | |
10032 | struct ssd_debug_info db_info; | |
10033 | ||
10034 | if (!finject) { | |
10035 | ret = -EOPNOTSUPP; | |
10036 | break; | |
10037 | } | |
10038 | ||
10039 | if (copy_from_user(&db_info, argp, sizeof(struct ssd_debug_info))) { | |
10040 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
10041 | ret = -EFAULT; | |
10042 | break; | |
10043 | } | |
10044 | ||
10045 | if (db_info.type < SSD_DEBUG_NONE || db_info.type >= SSD_DEBUG_NR) { | |
10046 | ret = -EINVAL; | |
10047 | break; | |
10048 | } | |
10049 | ||
10050 | /* IO */ | |
10051 | if (db_info.type >= SSD_DEBUG_READ_ERR && db_info.type <= SSD_DEBUG_RW_ERR && | |
10052 | (db_info.data.loc.off + db_info.data.loc.len) > (dev->hw_info.size >> 9)) { | |
10053 | ret = -EINVAL; | |
10054 | break; | |
10055 | } | |
10056 | ||
10057 | memcpy(&dev->db_info, &db_info, sizeof(struct ssd_debug_info)); | |
10058 | ||
10059 | #ifdef SSD_OT_PROTECT | |
10060 | /* temperature */ | |
10061 | if (db_info.type == SSD_DEBUG_NONE) { | |
10062 | ssd_check_temperature(dev, SSD_OT_TEMP); | |
10063 | } else if (db_info.type == SSD_DEBUG_LOG) { | |
10064 | if (db_info.data.log.event == SSD_LOG_OVER_TEMP) { | |
10065 | dev->ot_delay = SSD_OT_DELAY; | |
10066 | } else if (db_info.data.log.event == SSD_LOG_NORMAL_TEMP) { | |
10067 | dev->ot_delay = 0; | |
10068 | } | |
10069 | } | |
10070 | #endif | |
10071 | ||
10072 | /* offline */ | |
10073 | if (db_info.type == SSD_DEBUG_OFFLINE) { | |
10074 | test_and_clear_bit(SSD_ONLINE, &dev->state); | |
10075 | } else if (db_info.type == SSD_DEBUG_NONE) { | |
10076 | (void)test_and_set_bit(SSD_ONLINE, &dev->state); | |
10077 | } | |
10078 | ||
10079 | /* log */ | |
10080 | if (db_info.type == SSD_DEBUG_LOG && dev->event_call && dev->gd) { | |
10081 | dev->event_call(dev->gd, db_info.data.log.event, 0); | |
10082 | } | |
10083 | ||
10084 | break; | |
10085 | } | |
10086 | ||
10087 | case SSD_CMD_DRV_PARAM_INFO: { | |
10088 | struct ssd_drv_param_info drv_param; | |
10089 | ||
10090 | memset(&drv_param, 0, sizeof(struct ssd_drv_param_info)); | |
10091 | ||
10092 | drv_param.mode = mode; | |
10093 | drv_param.status_mask = status_mask; | |
10094 | drv_param.int_mode = int_mode; | |
10095 | drv_param.threaded_irq = threaded_irq; | |
10096 | drv_param.log_level = log_level; | |
10097 | drv_param.wmode = wmode; | |
10098 | drv_param.ot_protect = ot_protect; | |
10099 | drv_param.finject = finject; | |
10100 | ||
10101 | if (copy_to_user(argp, &drv_param, sizeof(struct ssd_drv_param_info))) { | |
10102 | hio_warn("%s: copy_to_user: failed\n", dev->name); | |
10103 | ret = -EFAULT; | |
10104 | break; | |
10105 | } | |
10106 | break; | |
10107 | } | |
10108 | ||
10109 | default: | |
10110 | ret = -EINVAL; | |
10111 | break; | |
10112 | } | |
10113 | ||
10114 | return ret; | |
10115 | } | |
10116 | ||
10117 | ||
10118 | #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,27)) | |
10119 | static int ssd_block_ioctl(struct inode *inode, struct file *file, | |
10120 | unsigned int cmd, unsigned long arg) | |
10121 | { | |
10122 | struct ssd_device *dev; | |
10123 | void __user *argp = (void __user *)arg; | |
10124 | int ret = 0; | |
10125 | ||
10126 | if (!inode) { | |
10127 | return -EINVAL; | |
10128 | } | |
10129 | dev = inode->i_bdev->bd_disk->private_data; | |
10130 | if (!dev) { | |
10131 | return -EINVAL; | |
10132 | } | |
10133 | #else | |
10134 | static int ssd_block_ioctl(struct block_device *bdev, fmode_t mode, | |
10135 | unsigned int cmd, unsigned long arg) | |
10136 | { | |
10137 | struct ssd_device *dev; | |
10138 | void __user *argp = (void __user *)arg; | |
10139 | int ret = 0; | |
10140 | ||
10141 | if (!bdev) { | |
10142 | return -EINVAL; | |
10143 | } | |
10144 | ||
10145 | dev = bdev->bd_disk->private_data; | |
10146 | if (!dev) { | |
10147 | return -EINVAL; | |
10148 | } | |
10149 | #endif | |
10150 | ||
10151 | switch (cmd) { | |
10152 | case HDIO_GETGEO: { | |
10153 | struct hd_geometry geo; | |
10154 | geo.cylinders = (dev->hw_info.size & ~0x3f) >> 6; | |
10155 | geo.heads = 4; | |
10156 | geo.sectors = 16; | |
10157 | #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,27)) | |
10158 | geo.start = get_start_sect(inode->i_bdev); | |
10159 | #else | |
10160 | geo.start = get_start_sect(bdev); | |
10161 | #endif | |
10162 | if (copy_to_user(argp, &geo, sizeof(geo))) { | |
10163 | ret = -EFAULT; | |
10164 | break; | |
10165 | } | |
10166 | ||
10167 | break; | |
10168 | } | |
10169 | ||
10170 | case BLKFLSBUF: | |
10171 | ret = ssd_flush(dev); | |
10172 | if (ret) { | |
10173 | hio_warn("%s: ssd_flush: failed\n", dev->name); | |
10174 | ret = -EFAULT; | |
10175 | break; | |
10176 | } | |
10177 | break; | |
10178 | ||
10179 | default: | |
10180 | if (!dev->slave) { | |
10181 | ret = ssd_ioctl_common(dev, cmd, arg); | |
10182 | } else { | |
10183 | ret = -EFAULT; | |
10184 | } | |
10185 | break; | |
10186 | } | |
10187 | ||
10188 | return ret; | |
10189 | } | |
10190 | ||
10191 | ||
10192 | static void ssd_free_dev(struct kref *kref) | |
10193 | { | |
10194 | struct ssd_device *dev; | |
10195 | ||
10196 | if (!kref) { | |
10197 | return; | |
10198 | } | |
10199 | ||
10200 | dev = container_of(kref, struct ssd_device, kref); | |
10201 | ||
10202 | put_disk(dev->gd); | |
10203 | ||
10204 | ssd_put_index(dev->slave, dev->idx); | |
10205 | ||
10206 | kfree(dev); | |
10207 | } | |
10208 | ||
10209 | static void ssd_put(struct ssd_device *dev) | |
10210 | { | |
10211 | kref_put(&dev->kref, ssd_free_dev); | |
10212 | } | |
10213 | ||
10214 | static int ssd_get(struct ssd_device *dev) | |
10215 | { | |
10216 | kref_get(&dev->kref); | |
10217 | return 0; | |
10218 | } | |
10219 | ||
10220 | /* block device */ | |
10221 | #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,27)) | |
10222 | static int ssd_block_open(struct inode *inode, struct file *filp) | |
10223 | { | |
10224 | struct ssd_device *dev; | |
10225 | ||
10226 | if (!inode) { | |
10227 | return -EINVAL; | |
10228 | } | |
10229 | ||
10230 | dev = inode->i_bdev->bd_disk->private_data; | |
10231 | if (!dev) { | |
10232 | return -EINVAL; | |
10233 | } | |
10234 | #else | |
10235 | static int ssd_block_open(struct block_device *bdev, fmode_t mode) | |
10236 | { | |
10237 | struct ssd_device *dev; | |
10238 | ||
10239 | if (!bdev) { | |
10240 | return -EINVAL; | |
10241 | } | |
10242 | ||
10243 | dev = bdev->bd_disk->private_data; | |
10244 | if (!dev) { | |
10245 | return -EINVAL; | |
10246 | } | |
10247 | #endif | |
10248 | ||
10249 | /*if (!try_module_get(dev->owner)) | |
10250 | return -ENODEV; | |
10251 | */ | |
10252 | ||
10253 | ssd_get(dev); | |
10254 | ||
10255 | atomic_inc(&dev->refcnt); | |
10256 | ||
10257 | return 0; | |
10258 | } | |
10259 | ||
10260 | #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,27)) | |
10261 | static int ssd_block_release(struct inode *inode, struct file *filp) | |
10262 | { | |
10263 | struct ssd_device *dev; | |
10264 | ||
10265 | if (!inode) { | |
10266 | return -EINVAL; | |
10267 | } | |
10268 | ||
10269 | dev = inode->i_bdev->bd_disk->private_data; | |
10270 | if (!dev) { | |
10271 | return -EINVAL; | |
10272 | } | |
10273 | #elif (LINUX_VERSION_CODE <= KERNEL_VERSION(3,9,0)) | |
10274 | static int ssd_block_release(struct gendisk *disk, fmode_t mode) | |
10275 | { | |
10276 | struct ssd_device *dev; | |
10277 | ||
10278 | if (!disk) { | |
10279 | return -EINVAL; | |
10280 | } | |
10281 | ||
10282 | dev = disk->private_data; | |
10283 | if (!dev) { | |
10284 | return -EINVAL; | |
10285 | } | |
10286 | #else | |
10287 | static void ssd_block_release(struct gendisk *disk, fmode_t mode) | |
10288 | { | |
10289 | struct ssd_device *dev; | |
10290 | ||
10291 | if (!disk) { | |
10292 | return; | |
10293 | } | |
10294 | ||
10295 | dev = disk->private_data; | |
10296 | if (!dev) { | |
10297 | return; | |
10298 | } | |
10299 | #endif | |
10300 | ||
10301 | atomic_dec(&dev->refcnt); | |
10302 | ||
10303 | ssd_put(dev); | |
10304 | ||
10305 | //module_put(dev->owner); | |
10306 | #if (LINUX_VERSION_CODE <= KERNEL_VERSION(3,9,0)) | |
10307 | return 0; | |
10308 | #endif | |
10309 | } | |
10310 | ||
10311 | static struct block_device_operations ssd_fops = { | |
10312 | .owner = THIS_MODULE, | |
10313 | .open = ssd_block_open, | |
10314 | .release = ssd_block_release, | |
10315 | .ioctl = ssd_block_ioctl, | |
10316 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)) | |
10317 | .getgeo = ssd_block_getgeo, | |
10318 | #endif | |
10319 | }; | |
10320 | ||
10321 | static void ssd_init_trim(ssd_device_t *dev) | |
10322 | { | |
10323 | #if (defined SSD_TRIM && (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32))) | |
10324 | if (dev->protocol_info.ver <= SSD_PROTOCOL_V3) { | |
10325 | return; | |
10326 | } | |
10327 | queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, dev->rq); | |
10328 | ||
10329 | #if ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,33)) || (defined RHEL_MAJOR && RHEL_MAJOR >= 6)) | |
b44043bd | 10330 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(4,12,0)) |
361ebed5 | 10331 | dev->rq->limits.discard_zeroes_data = 1; |
b44043bd | 10332 | #endif |
361ebed5 HSDT |
10333 | dev->rq->limits.discard_alignment = 4096; |
10334 | dev->rq->limits.discard_granularity = 4096; | |
10335 | #endif | |
10336 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_2_4) { | |
10337 | dev->rq->limits.max_discard_sectors = dev->hw_info.sg_max_sec; | |
10338 | } else { | |
10339 | dev->rq->limits.max_discard_sectors = (dev->hw_info.sg_max_sec) * (dev->hw_info.cmd_max_sg); | |
10340 | } | |
10341 | #endif | |
10342 | } | |
10343 | ||
10344 | static void ssd_cleanup_queue(struct ssd_device *dev) | |
10345 | { | |
10346 | ssd_wait_io(dev); | |
10347 | ||
10348 | blk_cleanup_queue(dev->rq); | |
10349 | dev->rq = NULL; | |
10350 | } | |
10351 | ||
10352 | static int ssd_init_queue(struct ssd_device *dev) | |
10353 | { | |
10354 | dev->rq = blk_alloc_queue(GFP_KERNEL); | |
10355 | if (dev->rq == NULL) { | |
10356 | hio_warn("%s: alloc queue: failed\n ", dev->name); | |
10357 | goto out_init_queue; | |
10358 | } | |
10359 | ||
10360 | /* must be first */ | |
10361 | blk_queue_make_request(dev->rq, ssd_make_request); | |
10362 | ||
10363 | #if ((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,34)) && !(defined RHEL_MAJOR && RHEL_MAJOR == 6)) | |
10364 | blk_queue_max_hw_segments(dev->rq, dev->hw_info.cmd_max_sg); | |
10365 | blk_queue_max_phys_segments(dev->rq, dev->hw_info.cmd_max_sg); | |
10366 | blk_queue_max_sectors(dev->rq, dev->hw_info.sg_max_sec); | |
10367 | #else | |
10368 | blk_queue_max_segments(dev->rq, dev->hw_info.cmd_max_sg); | |
10369 | blk_queue_max_hw_sectors(dev->rq, dev->hw_info.sg_max_sec); | |
10370 | #endif | |
10371 | ||
10372 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31)) | |
10373 | blk_queue_hardsect_size(dev->rq, 512); | |
10374 | #else | |
10375 | blk_queue_logical_block_size(dev->rq, 512); | |
10376 | #endif | |
10377 | /* not work for make_request based drivers(bio) */ | |
10378 | blk_queue_max_segment_size(dev->rq, dev->hw_info.sg_max_sec << 9); | |
10379 | ||
10380 | blk_queue_bounce_limit(dev->rq, BLK_BOUNCE_HIGH); | |
10381 | ||
10382 | dev->rq->queuedata = dev; | |
10383 | ||
10384 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)) | |
10385 | blk_queue_issue_flush_fn(dev->rq, ssd_issue_flush_fn); | |
10386 | #endif | |
10387 | ||
10388 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28)) | |
10389 | queue_flag_set_unlocked(QUEUE_FLAG_NONROT, dev->rq); | |
10390 | #endif | |
10391 | ||
10392 | ssd_init_trim(dev); | |
10393 | ||
10394 | return 0; | |
10395 | ||
10396 | out_init_queue: | |
10397 | return -ENOMEM; | |
10398 | } | |
10399 | ||
10400 | static void ssd_cleanup_blkdev(struct ssd_device *dev) | |
10401 | { | |
10402 | del_gendisk(dev->gd); | |
10403 | } | |
10404 | ||
10405 | static int ssd_init_blkdev(struct ssd_device *dev) | |
10406 | { | |
10407 | if (dev->gd) { | |
10408 | put_disk(dev->gd); | |
10409 | } | |
10410 | ||
10411 | dev->gd = alloc_disk(ssd_minors); | |
10412 | if (!dev->gd) { | |
10413 | hio_warn("%s: alloc_disk fail\n", dev->name); | |
10414 | goto out_alloc_gd; | |
10415 | } | |
10416 | dev->gd->major = dev->major; | |
10417 | dev->gd->first_minor = dev->idx * ssd_minors; | |
10418 | dev->gd->fops = &ssd_fops; | |
10419 | dev->gd->queue = dev->rq; | |
10420 | dev->gd->private_data = dev; | |
1197134c | 10421 | |
361ebed5 HSDT |
10422 | snprintf (dev->gd->disk_name, sizeof(dev->gd->disk_name), "%s", dev->name); |
10423 | ||
10424 | set_capacity(dev->gd, dev->hw_info.size >> 9); | |
10425 | ||
1197134c | 10426 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,8,0)) |
5e004b00 | 10427 | device_add_disk(&dev->pdev->dev, dev->gd); |
1197134c KM |
10428 | #else |
10429 | dev->gd->driverfs_dev = &dev->pdev->dev; | |
10430 | add_disk(dev->gd); | |
5e004b00 | 10431 | #endif |
361ebed5 HSDT |
10432 | |
10433 | return 0; | |
10434 | ||
10435 | out_alloc_gd: | |
10436 | return -ENOMEM; | |
10437 | } | |
10438 | ||
10439 | #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,10)) | |
10440 | static int ssd_ioctl(struct inode *inode, struct file *file, | |
10441 | unsigned int cmd, unsigned long arg) | |
10442 | #else | |
10443 | static long ssd_ioctl(struct file *file, | |
10444 | unsigned int cmd, unsigned long arg) | |
10445 | #endif | |
10446 | { | |
10447 | struct ssd_device *dev; | |
10448 | ||
10449 | if (!file) { | |
10450 | return -EINVAL; | |
10451 | } | |
10452 | ||
10453 | dev = file->private_data; | |
10454 | if (!dev) { | |
10455 | return -EINVAL; | |
10456 | } | |
10457 | ||
10458 | return (long)ssd_ioctl_common(dev, cmd, arg); | |
10459 | } | |
10460 | ||
10461 | static int ssd_open(struct inode *inode, struct file *file) | |
10462 | { | |
10463 | struct ssd_device *dev = NULL; | |
10464 | struct ssd_device *n = NULL; | |
10465 | int idx; | |
10466 | int ret = -ENODEV; | |
10467 | ||
10468 | if (!inode || !file) { | |
10469 | return -EINVAL; | |
10470 | } | |
10471 | ||
10472 | idx = iminor(inode); | |
10473 | ||
10474 | list_for_each_entry_safe(dev, n, &ssd_list, list) { | |
10475 | if (dev->idx == idx) { | |
10476 | ret = 0; | |
10477 | break; | |
10478 | } | |
10479 | } | |
10480 | ||
10481 | if (ret) { | |
10482 | return ret; | |
10483 | } | |
10484 | ||
10485 | file->private_data = dev; | |
10486 | ||
10487 | ssd_get(dev); | |
10488 | ||
10489 | return 0; | |
10490 | } | |
10491 | ||
10492 | static int ssd_release(struct inode *inode, struct file *file) | |
10493 | { | |
10494 | struct ssd_device *dev; | |
10495 | ||
10496 | if (!file) { | |
10497 | return -EINVAL; | |
10498 | } | |
10499 | ||
10500 | dev = file->private_data; | |
10501 | if (!dev) { | |
10502 | return -EINVAL; | |
10503 | } | |
10504 | ||
10505 | ssd_put(dev); | |
10506 | ||
10507 | file->private_data = NULL; | |
10508 | ||
10509 | return 0; | |
10510 | } | |
10511 | ||
1197134c KM |
10512 | static int ssd_reload_ssd_ptr(struct ssd_device *dev) |
10513 | { | |
10514 | ssd_reset_resp_ptr(dev); | |
10515 | ||
10516 | //update base reg address | |
10517 | if (dev->protocol_info.ver >= SSD_PROTOCOL_V3) { | |
10518 | ||
10519 | ssd_reg_write(dev->ctrlp + SSD_MSG_BASE_REG, dev->msg_base_dma); | |
10520 | } | |
10521 | ||
10522 | //update response base reg address | |
10523 | ssd_reg_write(dev->ctrlp + SSD_RESP_FIFO_REG, dev->resp_msg_base_dma); | |
10524 | ssd_reg_write(dev->ctrlp + SSD_RESP_PTR_REG, dev->resp_ptr_base_dma); | |
10525 | ||
10526 | return 0; | |
10527 | } | |
10528 | ||
361ebed5 HSDT |
10529 | static struct file_operations ssd_cfops = { |
10530 | .owner = THIS_MODULE, | |
10531 | .open = ssd_open, | |
10532 | .release = ssd_release, | |
10533 | #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,10)) | |
10534 | .ioctl = ssd_ioctl, | |
10535 | #else | |
10536 | .unlocked_ioctl = ssd_ioctl, | |
10537 | #endif | |
10538 | }; | |
10539 | ||
10540 | static void ssd_cleanup_chardev(struct ssd_device *dev) | |
10541 | { | |
10542 | if (dev->slave) { | |
10543 | return; | |
10544 | } | |
10545 | ||
10546 | #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,12)) | |
10547 | class_simple_device_remove(MKDEV((dev_t)dev->cmajor, (dev_t)dev->idx)); | |
10548 | devfs_remove("c%s", dev->name); | |
10549 | #elif (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,14)) | |
10550 | class_device_destroy(ssd_class, MKDEV((dev_t)dev->cmajor, (dev_t)dev->idx)); | |
10551 | devfs_remove("c%s", dev->name); | |
10552 | #elif (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,17)) | |
10553 | class_device_destroy(ssd_class, MKDEV((dev_t)dev->cmajor, (dev_t)dev->idx)); | |
10554 | devfs_remove("c%s", dev->name); | |
10555 | #elif (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,24)) | |
10556 | class_device_destroy(ssd_class, MKDEV((dev_t)dev->cmajor, (dev_t)dev->idx)); | |
10557 | #else | |
10558 | device_destroy(ssd_class, MKDEV((dev_t)dev->cmajor, (dev_t)dev->idx)); | |
10559 | #endif | |
10560 | } | |
10561 | ||
10562 | static int ssd_init_chardev(struct ssd_device *dev) | |
10563 | { | |
10564 | int ret = 0; | |
10565 | ||
10566 | if (dev->slave) { | |
10567 | return 0; | |
10568 | } | |
10569 | ||
10570 | #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,12)) | |
10571 | ret = devfs_mk_cdev(MKDEV((dev_t)dev->cmajor, (dev_t)dev->idx), S_IFCHR|S_IRUSR|S_IWUSR, "c%s", dev->name); | |
10572 | if (ret) { | |
10573 | goto out; | |
10574 | } | |
10575 | class_simple_device_add(ssd_class, MKDEV((dev_t)dev->cmajor, (dev_t)dev->idx), NULL, "c%s", dev->name); | |
10576 | out: | |
10577 | #elif (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,14)) | |
10578 | ret = devfs_mk_cdev(MKDEV((dev_t)dev->cmajor, (dev_t)dev->idx), S_IFCHR|S_IRUSR|S_IWUSR, "c%s", dev->name); | |
10579 | if (ret) { | |
10580 | goto out; | |
10581 | } | |
10582 | class_device_create(ssd_class, MKDEV((dev_t)dev->cmajor, (dev_t)dev->idx), NULL, "c%s", dev->name); | |
10583 | out: | |
10584 | #elif (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,17)) | |
10585 | ret = devfs_mk_cdev(MKDEV((dev_t)dev->cmajor, (dev_t)dev->idx), S_IFCHR|S_IRUSR|S_IWUSR, "c%s", dev->name); | |
10586 | if (ret) { | |
10587 | goto out; | |
10588 | } | |
10589 | class_device_create(ssd_class, NULL, MKDEV((dev_t)dev->cmajor, (dev_t)dev->idx), NULL, "c%s", dev->name); | |
10590 | out: | |
10591 | #elif (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,24)) | |
10592 | class_device_create(ssd_class, NULL, MKDEV((dev_t)dev->cmajor, (dev_t)dev->idx), NULL, "c%s", dev->name); | |
10593 | #elif (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,26)) | |
10594 | device_create(ssd_class, NULL, MKDEV((dev_t)dev->cmajor, (dev_t)dev->idx), "c%s", dev->name); | |
10595 | #elif (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,27)) | |
10596 | device_create_drvdata(ssd_class, NULL, MKDEV((dev_t)dev->cmajor, (dev_t)dev->idx), NULL, "c%s", dev->name); | |
10597 | #else | |
10598 | device_create(ssd_class, NULL, MKDEV((dev_t)dev->cmajor, (dev_t)dev->idx), NULL, "c%s", dev->name); | |
10599 | #endif | |
10600 | ||
10601 | return ret; | |
10602 | } | |
10603 | ||
10604 | static int ssd_check_hw(struct ssd_device *dev) | |
10605 | { | |
10606 | uint32_t test_data = 0x55AA5AA5; | |
10607 | uint32_t read_data; | |
10608 | ||
10609 | ssd_reg32_write(dev->ctrlp + SSD_BRIDGE_TEST_REG, test_data); | |
10610 | read_data = ssd_reg32_read(dev->ctrlp + SSD_BRIDGE_TEST_REG); | |
10611 | if (read_data != ~(test_data)) { | |
10612 | //hio_warn("%s: check bridge error: %#x\n", dev->name, read_data); | |
10613 | return -1; | |
10614 | } | |
10615 | ||
10616 | return 0; | |
10617 | } | |
10618 | ||
10619 | static int ssd_check_fw(struct ssd_device *dev) | |
10620 | { | |
10621 | uint32_t val = 0; | |
10622 | int i; | |
10623 | ||
10624 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_1_3) { | |
10625 | return 0; | |
10626 | } | |
10627 | ||
10628 | for (i=0; i<SSD_CONTROLLER_WAIT; i++) { | |
10629 | val = ssd_reg32_read(dev->ctrlp + SSD_HW_STATUS_REG); | |
10630 | if ((val & 0x1) && ((val >> 8) & 0x1)) { | |
10631 | break; | |
10632 | } | |
10633 | ||
10634 | msleep(SSD_INIT_WAIT); | |
10635 | } | |
10636 | ||
10637 | if (!(val & 0x1)) { | |
10638 | /* controller fw status */ | |
10639 | hio_warn("%s: controller firmware load failed: %#x\n", dev->name, val); | |
10640 | return -1; | |
10641 | } else if (!((val >> 8) & 0x1)) { | |
10642 | /* controller state */ | |
10643 | hio_warn("%s: controller state error: %#x\n", dev->name, val); | |
10644 | return -1; | |
10645 | } | |
10646 | ||
10647 | val = ssd_reg32_read(dev->ctrlp + SSD_RELOAD_FW_REG); | |
10648 | if (val) { | |
10649 | dev->reload_fw = 1; | |
10650 | } | |
10651 | ||
10652 | return 0; | |
10653 | } | |
10654 | ||
10655 | static int ssd_init_fw_info(struct ssd_device *dev) | |
10656 | { | |
10657 | uint32_t val; | |
10658 | int ret = 0; | |
10659 | ||
10660 | val = ssd_reg32_read(dev->ctrlp + SSD_BRIDGE_VER_REG); | |
10661 | dev->hw_info.bridge_ver = val & 0xFFF; | |
10662 | if (dev->hw_info.bridge_ver < SSD_FW_MIN) { | |
10663 | hio_warn("%s: bridge firmware version %03X is not supported\n", dev->name, dev->hw_info.bridge_ver); | |
10664 | return -EINVAL; | |
10665 | } | |
10666 | hio_info("%s: bridge firmware version: %03X\n", dev->name, dev->hw_info.bridge_ver); | |
10667 | ||
10668 | ret = ssd_check_fw(dev); | |
10669 | if (ret) { | |
10670 | goto out; | |
10671 | } | |
10672 | ||
10673 | out: | |
10674 | /* skip error if not in standard mode */ | |
10675 | if (mode != SSD_DRV_MODE_STANDARD) { | |
10676 | ret = 0; | |
10677 | } | |
10678 | return ret; | |
10679 | } | |
10680 | ||
10681 | static int ssd_check_clock(struct ssd_device *dev) | |
10682 | { | |
10683 | uint32_t val; | |
10684 | int ret = 0; | |
10685 | ||
10686 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_1_3) { | |
10687 | return 0; | |
10688 | } | |
10689 | ||
10690 | val = ssd_reg32_read(dev->ctrlp + SSD_HW_STATUS_REG); | |
10691 | ||
10692 | /* clock status */ | |
10693 | if (!((val >> 4 ) & 0x1)) { | |
10694 | if (!test_and_set_bit(SSD_HWMON_CLOCK(SSD_CLOCK_166M_LOST), &dev->hwmon)) { | |
10695 | hio_warn("%s: 166MHz clock losed: %#x\n", dev->name, val); | |
10696 | ssd_gen_swlog(dev, SSD_LOG_CLK_FAULT, val); | |
10697 | } | |
10698 | ret = -1; | |
10699 | } | |
10700 | ||
10701 | if (dev->protocol_info.ver >= SSD_PROTOCOL_V3_2) { | |
10702 | if (!((val >> 5 ) & 0x1)) { | |
10703 | if (!test_and_set_bit(SSD_HWMON_CLOCK(SSD_CLOCK_166M_SKEW), &dev->hwmon)) { | |
10704 | hio_warn("%s: 166MHz clock is skew: %#x\n", dev->name, val); | |
10705 | ssd_gen_swlog(dev, SSD_LOG_CLK_FAULT, val); | |
10706 | } | |
10707 | ret = -1; | |
10708 | } | |
10709 | if (!((val >> 6 ) & 0x1)) { | |
10710 | if (!test_and_set_bit(SSD_HWMON_CLOCK(SSD_CLOCK_156M_LOST), &dev->hwmon)) { | |
10711 | hio_warn("%s: 156.25MHz clock lost: %#x\n", dev->name, val); | |
10712 | ssd_gen_swlog(dev, SSD_LOG_CLK_FAULT, val); | |
10713 | } | |
10714 | ret = -1; | |
10715 | } | |
10716 | if (!((val >> 7 ) & 0x1)) { | |
10717 | if (!test_and_set_bit(SSD_HWMON_CLOCK(SSD_CLOCK_156M_SKEW), &dev->hwmon)) { | |
10718 | hio_warn("%s: 156.25MHz clock is skew: %#x\n", dev->name, val); | |
10719 | ssd_gen_swlog(dev, SSD_LOG_CLK_FAULT, val); | |
10720 | } | |
10721 | ret = -1; | |
10722 | } | |
10723 | } | |
10724 | ||
10725 | return ret; | |
10726 | } | |
10727 | ||
10728 | static int ssd_check_volt(struct ssd_device *dev) | |
10729 | { | |
10730 | int i = 0; | |
10731 | uint64_t val; | |
10732 | uint32_t adc_val; | |
10733 | int ret =0; | |
10734 | ||
10735 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_2) { | |
10736 | return 0; | |
10737 | } | |
10738 | ||
10739 | for (i=0; i<dev->hw_info.nr_ctrl; i++) { | |
10740 | /* 1.0v */ | |
10741 | if (!test_bit(SSD_HWMON_FPGA(i, SSD_FPGA_1V0), &dev->hwmon)) { | |
10742 | val = ssd_reg_read(dev->ctrlp + SSD_FPGA_1V0_REG0 + i * SSD_CTRL_REG_ZONE_SZ); | |
10743 | adc_val = SSD_FPGA_VOLT_MAX(val); | |
10744 | if (adc_val < SSD_FPGA_1V0_ADC_MIN || adc_val > SSD_FPGA_1V0_ADC_MAX) { | |
10745 | (void)test_and_set_bit(SSD_HWMON_FPGA(i, SSD_FPGA_1V0), &dev->hwmon); | |
10746 | hio_warn("%s: controller %d 1.0V fault: %d mV.\n", dev->name, i, SSD_FPGA_VOLT(adc_val)); | |
10747 | ssd_gen_swlog(dev, SSD_LOG_VOLT_FAULT, SSD_VOLT_LOG_DATA(SSD_FPGA_1V0, i, adc_val)); | |
10748 | ret = -1; | |
10749 | } | |
10750 | ||
10751 | adc_val = SSD_FPGA_VOLT_MIN(val); | |
10752 | if (adc_val < SSD_FPGA_1V0_ADC_MIN || adc_val > SSD_FPGA_1V0_ADC_MAX) { | |
10753 | (void)test_and_set_bit(SSD_HWMON_FPGA(i, SSD_FPGA_1V0), &dev->hwmon); | |
10754 | hio_warn("%s: controller %d 1.0V fault: %d mV.\n", dev->name, i, SSD_FPGA_VOLT(adc_val)); | |
10755 | ssd_gen_swlog(dev, SSD_LOG_VOLT_FAULT, SSD_VOLT_LOG_DATA(SSD_FPGA_1V0, i, adc_val)); | |
10756 | ret = -2; | |
10757 | } | |
10758 | } | |
10759 | ||
10760 | /* 1.8v */ | |
10761 | if (!test_bit(SSD_HWMON_FPGA(i, SSD_FPGA_1V8), &dev->hwmon)) { | |
10762 | val = ssd_reg_read(dev->ctrlp + SSD_FPGA_1V8_REG0 + i * SSD_CTRL_REG_ZONE_SZ); | |
10763 | adc_val = SSD_FPGA_VOLT_MAX(val); | |
10764 | if (adc_val < SSD_FPGA_1V8_ADC_MIN || adc_val > SSD_FPGA_1V8_ADC_MAX) { | |
10765 | (void)test_and_set_bit(SSD_HWMON_FPGA(i, SSD_FPGA_1V8), &dev->hwmon); | |
10766 | hio_warn("%s: controller %d 1.8V fault: %d mV.\n", dev->name, i, SSD_FPGA_VOLT(adc_val)); | |
10767 | ssd_gen_swlog(dev, SSD_LOG_VOLT_FAULT, SSD_VOLT_LOG_DATA(SSD_FPGA_1V8, i, adc_val)); | |
10768 | ret = -3; | |
10769 | } | |
10770 | ||
10771 | adc_val = SSD_FPGA_VOLT_MIN(val); | |
10772 | if (adc_val < SSD_FPGA_1V8_ADC_MIN || adc_val > SSD_FPGA_1V8_ADC_MAX) { | |
10773 | (void)test_and_set_bit(SSD_HWMON_FPGA(i, SSD_FPGA_1V8), &dev->hwmon); | |
10774 | hio_warn("%s: controller %d 1.8V fault: %d mV.\n", dev->name, i, SSD_FPGA_VOLT(adc_val)); | |
10775 | ssd_gen_swlog(dev, SSD_LOG_VOLT_FAULT, SSD_VOLT_LOG_DATA(SSD_FPGA_1V8, i, adc_val)); | |
10776 | ret = -4; | |
10777 | } | |
10778 | } | |
10779 | } | |
10780 | ||
10781 | return ret; | |
10782 | } | |
10783 | ||
10784 | static int ssd_check_reset_sync(struct ssd_device *dev) | |
10785 | { | |
10786 | uint32_t val; | |
10787 | ||
10788 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_1_3) { | |
10789 | return 0; | |
10790 | } | |
10791 | ||
10792 | val = ssd_reg32_read(dev->ctrlp + SSD_HW_STATUS_REG); | |
10793 | if (!((val >> 8) & 0x1)) { | |
10794 | /* controller state */ | |
10795 | hio_warn("%s: controller state error: %#x\n", dev->name, val); | |
10796 | return -1; | |
10797 | } | |
10798 | ||
10799 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_2) { | |
10800 | return 0; | |
10801 | } | |
10802 | ||
10803 | if (((val >> 9 ) & 0x1)) { | |
10804 | hio_warn("%s: controller reset asynchronously: %#x\n", dev->name, val); | |
10805 | ssd_gen_swlog(dev, SSD_LOG_CTRL_RST_SYNC, val); | |
10806 | return -1; | |
10807 | } | |
10808 | ||
10809 | return 0; | |
10810 | } | |
10811 | ||
10812 | static int ssd_check_hw_bh(struct ssd_device *dev) | |
10813 | { | |
10814 | int ret; | |
10815 | ||
10816 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_1_3) { | |
10817 | return 0; | |
10818 | } | |
10819 | ||
10820 | /* clock status */ | |
10821 | ret = ssd_check_clock(dev); | |
10822 | if (ret) { | |
10823 | goto out; | |
10824 | } | |
10825 | ||
10826 | out: | |
10827 | /* skip error if not in standard mode */ | |
10828 | if (mode != SSD_DRV_MODE_STANDARD) { | |
10829 | ret = 0; | |
10830 | } | |
10831 | return ret; | |
10832 | } | |
10833 | ||
10834 | static int ssd_check_controller(struct ssd_device *dev) | |
10835 | { | |
10836 | int ret; | |
10837 | ||
10838 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_1_3) { | |
10839 | return 0; | |
10840 | } | |
10841 | ||
10842 | /* sync reset */ | |
10843 | ret = ssd_check_reset_sync(dev); | |
10844 | if (ret) { | |
10845 | goto out; | |
10846 | } | |
10847 | ||
10848 | out: | |
10849 | /* skip error if not in standard mode */ | |
10850 | if (mode != SSD_DRV_MODE_STANDARD) { | |
10851 | ret = 0; | |
10852 | } | |
10853 | return ret; | |
10854 | } | |
10855 | ||
10856 | static int ssd_check_controller_bh(struct ssd_device *dev) | |
10857 | { | |
10858 | uint32_t test_data = 0x55AA5AA5; | |
10859 | uint32_t val; | |
10860 | int reg_base, reg_sz; | |
10861 | int init_wait = 0; | |
10862 | int i; | |
10863 | int ret = 0; | |
10864 | ||
10865 | if (mode != SSD_DRV_MODE_STANDARD) { | |
10866 | return 0; | |
10867 | } | |
10868 | ||
10869 | /* controller */ | |
10870 | val = ssd_reg32_read(dev->ctrlp + SSD_READY_REG); | |
10871 | if (val & 0x1) { | |
10872 | hio_warn("%s: controller 0 not ready\n", dev->name); | |
10873 | return -1; | |
10874 | } | |
10875 | ||
10876 | for (i=0; i<dev->hw_info.nr_ctrl; i++) { | |
10877 | reg_base = SSD_CTRL_TEST_REG0 + i * SSD_CTRL_TEST_REG_SZ; | |
10878 | ssd_reg32_write(dev->ctrlp + reg_base, test_data); | |
10879 | val = ssd_reg32_read(dev->ctrlp + reg_base); | |
10880 | if (val != ~(test_data)) { | |
10881 | hio_warn("%s: check controller %d error: %#x\n", dev->name, i, val); | |
10882 | return -1; | |
10883 | } | |
10884 | } | |
10885 | ||
10886 | /* clock */ | |
10887 | ret = ssd_check_volt(dev); | |
10888 | if (ret) { | |
10889 | return ret; | |
10890 | } | |
10891 | ||
10892 | /* ddr */ | |
10893 | if (dev->protocol_info.ver > SSD_PROTOCOL_V3) { | |
10894 | reg_base = SSD_PV3_RAM_STATUS_REG0; | |
10895 | reg_sz = SSD_PV3_RAM_STATUS_REG_SZ; | |
10896 | ||
10897 | for (i=0; i<dev->hw_info.nr_ctrl; i++) { | |
10898 | check_ram_status: | |
10899 | val = ssd_reg32_read(dev->ctrlp + reg_base); | |
10900 | ||
10901 | if (!((val >> 1) & 0x1)) { | |
10902 | init_wait++; | |
10903 | if (init_wait <= SSD_RAM_INIT_MAX_WAIT) { | |
10904 | msleep(SSD_INIT_WAIT); | |
10905 | goto check_ram_status; | |
10906 | } else { | |
10907 | hio_warn("%s: controller %d ram init failed: %#x\n", dev->name, i, val); | |
10908 | ssd_gen_swlog(dev, SSD_LOG_DDR_INIT_ERR, i); | |
10909 | return -1; | |
10910 | } | |
10911 | } | |
10912 | ||
10913 | reg_base += reg_sz; | |
10914 | } | |
10915 | } | |
10916 | ||
10917 | /* ch info */ | |
10918 | for (i=0; i<SSD_CH_INFO_MAX_WAIT; i++) { | |
10919 | val = ssd_reg32_read(dev->ctrlp + SSD_CH_INFO_REG); | |
10920 | if (!((val >> 31) & 0x1)) { | |
10921 | break; | |
10922 | } | |
10923 | ||
10924 | msleep(SSD_INIT_WAIT); | |
10925 | } | |
10926 | if ((val >> 31) & 0x1) { | |
10927 | hio_warn("%s: channel info init failed: %#x\n", dev->name, val); | |
10928 | return -1; | |
10929 | } | |
10930 | ||
10931 | return 0; | |
10932 | } | |
10933 | ||
10934 | static int ssd_init_protocol_info(struct ssd_device *dev) | |
10935 | { | |
10936 | uint32_t val; | |
10937 | ||
10938 | val = ssd_reg32_read(dev->ctrlp + SSD_PROTOCOL_VER_REG); | |
10939 | if (val == (uint32_t)-1) { | |
10940 | hio_warn("%s: protocol version error: %#x\n", dev->name, val); | |
10941 | return -EINVAL; | |
10942 | } | |
10943 | dev->protocol_info.ver = val; | |
10944 | ||
10945 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3) { | |
10946 | dev->protocol_info.init_state_reg = SSD_INIT_STATE_REG0; | |
10947 | dev->protocol_info.init_state_reg_sz = SSD_INIT_STATE_REG_SZ; | |
10948 | ||
10949 | dev->protocol_info.chip_info_reg = SSD_CHIP_INFO_REG0; | |
10950 | dev->protocol_info.chip_info_reg_sz = SSD_CHIP_INFO_REG_SZ; | |
10951 | } else { | |
10952 | dev->protocol_info.init_state_reg = SSD_PV3_INIT_STATE_REG0; | |
10953 | dev->protocol_info.init_state_reg_sz = SSD_PV3_INIT_STATE_REG_SZ; | |
10954 | ||
10955 | dev->protocol_info.chip_info_reg = SSD_PV3_CHIP_INFO_REG0; | |
10956 | dev->protocol_info.chip_info_reg_sz = SSD_PV3_CHIP_INFO_REG_SZ; | |
10957 | } | |
10958 | ||
10959 | return 0; | |
10960 | } | |
10961 | ||
10962 | static int ssd_init_hw_info(struct ssd_device *dev) | |
10963 | { | |
10964 | uint64_t val64; | |
10965 | uint32_t val; | |
10966 | uint32_t nr_ctrl; | |
10967 | int ret = 0; | |
10968 | ||
10969 | /* base info */ | |
10970 | val = ssd_reg32_read(dev->ctrlp + SSD_RESP_INFO_REG); | |
10971 | dev->hw_info.resp_ptr_sz = 16 * (1U << (val & 0xFF)); | |
10972 | dev->hw_info.resp_msg_sz = 16 * (1U << ((val >> 8) & 0xFF)); | |
10973 | ||
10974 | if (0 == dev->hw_info.resp_ptr_sz || 0 == dev->hw_info.resp_msg_sz) { | |
10975 | hio_warn("%s: response info error\n", dev->name); | |
10976 | ret = -EINVAL; | |
10977 | goto out; | |
10978 | } | |
10979 | ||
10980 | val = ssd_reg32_read(dev->ctrlp + SSD_BRIDGE_INFO_REG); | |
10981 | dev->hw_info.cmd_fifo_sz = 1U << ((val >> 4) & 0xF); | |
10982 | dev->hw_info.cmd_max_sg = 1U << ((val >> 8) & 0xF); | |
10983 | dev->hw_info.sg_max_sec = 1U << ((val >> 12) & 0xF); | |
10984 | dev->hw_info.cmd_fifo_sz_mask = dev->hw_info.cmd_fifo_sz - 1; | |
10985 | ||
10986 | if (0 == dev->hw_info.cmd_fifo_sz || 0 == dev->hw_info.cmd_max_sg || 0 == dev->hw_info.sg_max_sec) { | |
10987 | hio_warn("%s: cmd info error\n", dev->name); | |
10988 | ret = -EINVAL; | |
10989 | goto out; | |
10990 | } | |
10991 | ||
10992 | /* check hw */ | |
10993 | if (ssd_check_hw_bh(dev)) { | |
10994 | hio_warn("%s: check hardware status failed\n", dev->name); | |
10995 | ret = -EINVAL; | |
10996 | goto out; | |
10997 | } | |
10998 | ||
10999 | if (ssd_check_controller(dev)) { | |
11000 | hio_warn("%s: check controller state failed\n", dev->name); | |
11001 | ret = -EINVAL; | |
11002 | goto out; | |
11003 | } | |
11004 | ||
11005 | /* nr controller : read again*/ | |
11006 | val = ssd_reg32_read(dev->ctrlp + SSD_BRIDGE_INFO_REG); | |
11007 | dev->hw_info.nr_ctrl = (val >> 16) & 0xF; | |
11008 | ||
11009 | /* nr ctrl configured */ | |
11010 | nr_ctrl = (val >> 20) & 0xF; | |
11011 | if (0 == dev->hw_info.nr_ctrl) { | |
11012 | hio_warn("%s: nr controller error: %u\n", dev->name, dev->hw_info.nr_ctrl); | |
11013 | ret = -EINVAL; | |
11014 | goto out; | |
11015 | } else if (0 != nr_ctrl && nr_ctrl != dev->hw_info.nr_ctrl) { | |
11016 | hio_warn("%s: nr controller error: configured %u but found %u\n", dev->name, nr_ctrl, dev->hw_info.nr_ctrl); | |
11017 | if (mode <= SSD_DRV_MODE_STANDARD) { | |
11018 | ret = -EINVAL; | |
11019 | goto out; | |
11020 | } | |
11021 | } | |
11022 | ||
11023 | if (ssd_check_controller_bh(dev)) { | |
11024 | hio_warn("%s: check controller failed\n", dev->name); | |
11025 | ret = -EINVAL; | |
11026 | goto out; | |
11027 | } | |
11028 | ||
11029 | val = ssd_reg32_read(dev->ctrlp + SSD_PCB_VER_REG); | |
11030 | dev->hw_info.pcb_ver = (uint8_t) ((val >> 4) & 0xF) + 'A' -1; | |
11031 | if ((val & 0xF) != 0xF) { | |
11032 | dev->hw_info.upper_pcb_ver = (uint8_t) (val & 0xF) + 'A' -1; | |
11033 | } | |
11034 | ||
11035 | if (dev->hw_info.pcb_ver < 'A' || (0 != dev->hw_info.upper_pcb_ver && dev->hw_info.upper_pcb_ver < 'A')) { | |
11036 | hio_warn("%s: PCB version error: %#x %#x\n", dev->name, dev->hw_info.pcb_ver, dev->hw_info.upper_pcb_ver); | |
11037 | ret = -EINVAL; | |
11038 | goto out; | |
11039 | } | |
11040 | ||
11041 | /* channel info */ | |
11042 | if (mode <= SSD_DRV_MODE_DEBUG) { | |
11043 | val = ssd_reg32_read(dev->ctrlp + SSD_CH_INFO_REG); | |
11044 | dev->hw_info.nr_data_ch = val & 0xFF; | |
11045 | dev->hw_info.nr_ch = dev->hw_info.nr_data_ch + ((val >> 8) & 0xFF); | |
11046 | dev->hw_info.nr_chip = (val >> 16) & 0xFF; | |
11047 | ||
11048 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_2) { | |
11049 | dev->hw_info.max_ch = 1; | |
11050 | while (dev->hw_info.max_ch < dev->hw_info.nr_ch) dev->hw_info.max_ch <<= 1; | |
11051 | } else { | |
11052 | /* set max channel 32 */ | |
11053 | dev->hw_info.max_ch = 32; | |
11054 | } | |
11055 | ||
11056 | if (0 == dev->hw_info.nr_chip) { | |
11057 | //for debug mode | |
11058 | dev->hw_info.nr_chip = 1; | |
11059 | } | |
11060 | ||
11061 | //xx | |
11062 | dev->hw_info.id_size = SSD_NAND_ID_SZ; | |
11063 | dev->hw_info.max_ce = SSD_NAND_MAX_CE; | |
11064 | ||
11065 | if (0 == dev->hw_info.nr_data_ch || 0 == dev->hw_info.nr_ch || 0 == dev->hw_info.nr_chip) { | |
11066 | hio_warn("%s: channel info error: data_ch %u ch %u chip %u\n", dev->name, dev->hw_info.nr_data_ch, dev->hw_info.nr_ch, dev->hw_info.nr_chip); | |
11067 | ret = -EINVAL; | |
11068 | goto out; | |
11069 | } | |
11070 | } | |
11071 | ||
11072 | /* ram info */ | |
11073 | if (mode <= SSD_DRV_MODE_DEBUG) { | |
11074 | val = ssd_reg32_read(dev->ctrlp + SSD_RAM_INFO_REG); | |
11075 | dev->hw_info.ram_size = 0x4000000ull * (1ULL << (val & 0xF)); | |
11076 | dev->hw_info.ram_align = 1U << ((val >> 12) & 0xF); | |
11077 | if (dev->hw_info.ram_align < SSD_RAM_ALIGN) { | |
11078 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3) { | |
11079 | dev->hw_info.ram_align = SSD_RAM_ALIGN; | |
11080 | } else { | |
11081 | hio_warn("%s: ram align error: %u\n", dev->name, dev->hw_info.ram_align); | |
11082 | ret = -EINVAL; | |
11083 | goto out; | |
11084 | } | |
11085 | } | |
11086 | dev->hw_info.ram_max_len = 0x1000 * (1U << ((val >> 16) & 0xF)); | |
11087 | ||
11088 | if (0 == dev->hw_info.ram_size || 0 == dev->hw_info.ram_align || 0 == dev->hw_info.ram_max_len || dev->hw_info.ram_align > dev->hw_info.ram_max_len) { | |
11089 | hio_warn("%s: ram info error\n", dev->name); | |
11090 | ret = -EINVAL; | |
11091 | goto out; | |
11092 | } | |
11093 | ||
11094 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3) { | |
11095 | dev->hw_info.log_sz = SSD_LOG_MAX_SZ; | |
11096 | } else { | |
11097 | val = ssd_reg32_read(dev->ctrlp + SSD_LOG_INFO_REG); | |
11098 | dev->hw_info.log_sz = 0x1000 * (1U << (val & 0xFF)); | |
11099 | } | |
11100 | if (0 == dev->hw_info.log_sz) { | |
11101 | hio_warn("%s: log size error\n", dev->name); | |
11102 | ret = -EINVAL; | |
11103 | goto out; | |
11104 | } | |
11105 | ||
11106 | val = ssd_reg32_read(dev->ctrlp + SSD_BBT_BASE_REG); | |
11107 | dev->hw_info.bbt_base = 0x40000ull * (val & 0xFFFF); | |
11108 | dev->hw_info.bbt_size = 0x40000 * (((val >> 16) & 0xFFFF) + 1) / (dev->hw_info.max_ch * dev->hw_info.nr_chip); | |
11109 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3) { | |
11110 | if (dev->hw_info.bbt_base > dev->hw_info.ram_size || 0 == dev->hw_info.bbt_size) { | |
11111 | hio_warn("%s: bbt info error\n", dev->name); | |
11112 | ret = -EINVAL; | |
11113 | goto out; | |
11114 | } | |
11115 | } | |
11116 | ||
11117 | val = ssd_reg32_read(dev->ctrlp + SSD_ECT_BASE_REG); | |
11118 | dev->hw_info.md_base = 0x40000ull * (val & 0xFFFF); | |
11119 | if (dev->protocol_info.ver <= SSD_PROTOCOL_V3) { | |
11120 | dev->hw_info.md_size = 0x40000 * (((val >> 16) & 0xFFF) + 1) / (dev->hw_info.max_ch * dev->hw_info.nr_chip); | |
11121 | } else { | |
11122 | dev->hw_info.md_size = 0x40000 * (((val >> 16) & 0xFFF) + 1) / (dev->hw_info.nr_chip); | |
11123 | } | |
11124 | dev->hw_info.md_entry_sz = 8 * (1U << ((val >> 28) & 0xF)); | |
11125 | if (dev->protocol_info.ver >= SSD_PROTOCOL_V3) { | |
11126 | if (dev->hw_info.md_base > dev->hw_info.ram_size || 0 == dev->hw_info.md_size || | |
11127 | 0 == dev->hw_info.md_entry_sz || dev->hw_info.md_entry_sz > dev->hw_info.md_size) { | |
11128 | hio_warn("%s: md info error\n", dev->name); | |
11129 | ret = -EINVAL; | |
11130 | goto out; | |
11131 | } | |
11132 | } | |
11133 | ||
11134 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3) { | |
11135 | dev->hw_info.nand_wbuff_base = dev->hw_info.ram_size + 1; | |
11136 | } else { | |
11137 | val = ssd_reg32_read(dev->ctrlp + SSD_NAND_BUFF_BASE); | |
11138 | dev->hw_info.nand_wbuff_base = 0x8000ull * val; | |
11139 | } | |
11140 | } | |
11141 | ||
11142 | /* flash info */ | |
11143 | if (mode <= SSD_DRV_MODE_DEBUG) { | |
11144 | if (dev->hw_info.nr_ctrl > 1) { | |
11145 | val = ssd_reg32_read(dev->ctrlp + SSD_CTRL_VER_REG); | |
11146 | dev->hw_info.ctrl_ver = val & 0xFFF; | |
11147 | hio_info("%s: controller firmware version: %03X\n", dev->name, dev->hw_info.ctrl_ver); | |
11148 | } | |
11149 | ||
11150 | val64 = ssd_reg_read(dev->ctrlp + SSD_FLASH_INFO_REG0); | |
11151 | dev->hw_info.nand_vendor_id = ((val64 >> 56) & 0xFF); | |
11152 | dev->hw_info.nand_dev_id = ((val64 >> 48) & 0xFF); | |
11153 | ||
11154 | dev->hw_info.block_count = (((val64 >> 32) & 0xFFFF) + 1); | |
11155 | dev->hw_info.page_count = ((val64>>16) & 0xFFFF); | |
11156 | dev->hw_info.page_size = (val64 & 0xFFFF); | |
11157 | ||
11158 | val = ssd_reg32_read(dev->ctrlp + SSD_BB_INFO_REG); | |
11159 | dev->hw_info.bbf_pages = val & 0xFF; | |
11160 | dev->hw_info.bbf_seek = (val >> 8) & 0x1; | |
11161 | ||
11162 | if (0 == dev->hw_info.block_count || 0 == dev->hw_info.page_count || 0 == dev->hw_info.page_size || dev->hw_info.block_count > INT_MAX) { | |
11163 | hio_warn("%s: flash info error\n", dev->name); | |
11164 | ret = -EINVAL; | |
11165 | goto out; | |
11166 | } | |
11167 | ||
11168 | //xx | |
11169 | dev->hw_info.oob_size = SSD_NAND_OOB_SZ; //(dev->hw_info.page_size) >> 5; | |
11170 | ||
11171 | val = ssd_reg32_read(dev->ctrlp + SSD_VALID_PAGES_REG); | |
11172 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_2) { | |
11173 | dev->hw_info.valid_pages = val & 0x3FF; | |
11174 | dev->hw_info.max_valid_pages = (val>>20) & 0x3FF; | |
11175 | } else { | |
11176 | dev->hw_info.valid_pages = val & 0x7FFF; | |
11177 | dev->hw_info.max_valid_pages = (val>>15) & 0x7FFF; | |
11178 | } | |
11179 | if (0 == dev->hw_info.valid_pages || 0 == dev->hw_info.max_valid_pages || | |
11180 | dev->hw_info.valid_pages > dev->hw_info.max_valid_pages || dev->hw_info.max_valid_pages > dev->hw_info.page_count) { | |
11181 | hio_warn("%s: valid page info error: valid_pages %d, max_valid_pages %d\n", dev->name, dev->hw_info.valid_pages, dev->hw_info.max_valid_pages); | |
11182 | ret = -EINVAL; | |
11183 | goto out; | |
11184 | } | |
11185 | ||
11186 | val = ssd_reg32_read(dev->ctrlp + SSD_RESERVED_BLKS_REG); | |
11187 | dev->hw_info.reserved_blks = val & 0xFFFF; | |
11188 | dev->hw_info.md_reserved_blks = (val >> 16) & 0xFF; | |
11189 | if (dev->protocol_info.ver <= SSD_PROTOCOL_V3) { | |
11190 | dev->hw_info.md_reserved_blks = SSD_BBT_RESERVED; | |
11191 | } | |
11192 | if (dev->hw_info.reserved_blks > dev->hw_info.block_count || dev->hw_info.md_reserved_blks > dev->hw_info.block_count) { | |
11193 | hio_warn("%s: reserved blocks info error: reserved_blks %d, md_reserved_blks %d\n", dev->name, dev->hw_info.reserved_blks, dev->hw_info.md_reserved_blks); | |
11194 | ret = -EINVAL; | |
11195 | goto out; | |
11196 | } | |
11197 | } | |
11198 | ||
11199 | /* size */ | |
11200 | if (mode < SSD_DRV_MODE_DEBUG) { | |
11201 | dev->hw_info.size = (uint64_t)dev->hw_info.valid_pages * dev->hw_info.page_size; | |
11202 | dev->hw_info.size *= (dev->hw_info.block_count - dev->hw_info.reserved_blks); | |
11203 | dev->hw_info.size *= ((uint64_t)dev->hw_info.nr_data_ch * (uint64_t)dev->hw_info.nr_chip * (uint64_t)dev->hw_info.nr_ctrl); | |
11204 | } | |
11205 | ||
11206 | /* extend hardware info */ | |
11207 | val = ssd_reg32_read(dev->ctrlp + SSD_PCB_VER_REG); | |
11208 | dev->hw_info_ext.board_type = (val >> 24) & 0xF; | |
11209 | ||
11210 | dev->hw_info_ext.form_factor = SSD_FORM_FACTOR_FHHL; | |
11211 | if (dev->protocol_info.ver >= SSD_PROTOCOL_V3_2_1) { | |
11212 | dev->hw_info_ext.form_factor = (val >> 31) & 0x1; | |
11213 | } | |
11214 | /* | |
11215 | dev->hw_info_ext.cap_type = (val >> 28) & 0x3; | |
11216 | if (SSD_BM_CAP_VINA != dev->hw_info_ext.cap_type && SSD_BM_CAP_JH != dev->hw_info_ext.cap_type) { | |
11217 | dev->hw_info_ext.cap_type = SSD_BM_CAP_VINA; | |
11218 | }*/ | |
11219 | ||
11220 | /* power loss protect */ | |
11221 | val = ssd_reg32_read(dev->ctrlp + SSD_PLP_INFO_REG); | |
11222 | dev->hw_info_ext.plp_type = (val & 0x3); | |
11223 | if (dev->protocol_info.ver >= SSD_PROTOCOL_V3_2) { | |
11224 | /* 3 or 4 cap */ | |
11225 | dev->hw_info_ext.cap_type = ((val >> 2)& 0x1); | |
11226 | } | |
11227 | ||
11228 | /* work mode */ | |
11229 | val = ssd_reg32_read(dev->ctrlp + SSD_CH_INFO_REG); | |
11230 | dev->hw_info_ext.work_mode = (val >> 25) & 0x1; | |
11231 | ||
11232 | out: | |
11233 | /* skip error if not in standard mode */ | |
11234 | if (mode != SSD_DRV_MODE_STANDARD) { | |
11235 | ret = 0; | |
11236 | } | |
11237 | return ret; | |
11238 | } | |
11239 | ||
11240 | static void ssd_cleanup_response(struct ssd_device *dev) | |
11241 | { | |
11242 | int resp_msg_sz = dev->hw_info.resp_msg_sz * dev->hw_info.cmd_fifo_sz * SSD_MSIX_VEC; | |
11243 | int resp_ptr_sz = dev->hw_info.resp_ptr_sz * SSD_MSIX_VEC; | |
11244 | ||
11245 | pci_free_consistent(dev->pdev, resp_ptr_sz, dev->resp_ptr_base, dev->resp_ptr_base_dma); | |
11246 | pci_free_consistent(dev->pdev, resp_msg_sz, dev->resp_msg_base, dev->resp_msg_base_dma); | |
11247 | } | |
11248 | ||
11249 | static int ssd_init_response(struct ssd_device *dev) | |
11250 | { | |
11251 | int resp_msg_sz = dev->hw_info.resp_msg_sz * dev->hw_info.cmd_fifo_sz * SSD_MSIX_VEC; | |
11252 | int resp_ptr_sz = dev->hw_info.resp_ptr_sz * SSD_MSIX_VEC; | |
11253 | ||
11254 | dev->resp_msg_base = pci_alloc_consistent(dev->pdev, resp_msg_sz, &(dev->resp_msg_base_dma)); | |
11255 | if (!dev->resp_msg_base) { | |
11256 | hio_warn("%s: unable to allocate resp msg DMA buffer\n", dev->name); | |
11257 | goto out_alloc_resp_msg; | |
11258 | } | |
11259 | memset(dev->resp_msg_base, 0xFF, resp_msg_sz); | |
11260 | ||
11261 | dev->resp_ptr_base = pci_alloc_consistent(dev->pdev, resp_ptr_sz, &(dev->resp_ptr_base_dma)); | |
11262 | if (!dev->resp_ptr_base){ | |
11263 | hio_warn("%s: unable to allocate resp ptr DMA buffer\n", dev->name); | |
11264 | goto out_alloc_resp_ptr; | |
11265 | } | |
11266 | memset(dev->resp_ptr_base, 0, resp_ptr_sz); | |
11267 | dev->resp_idx = *(uint32_t *)(dev->resp_ptr_base) = dev->hw_info.cmd_fifo_sz * 2 - 1; | |
11268 | ||
11269 | ssd_reg_write(dev->ctrlp + SSD_RESP_FIFO_REG, dev->resp_msg_base_dma); | |
11270 | ssd_reg_write(dev->ctrlp + SSD_RESP_PTR_REG, dev->resp_ptr_base_dma); | |
11271 | ||
11272 | return 0; | |
11273 | ||
11274 | out_alloc_resp_ptr: | |
11275 | pci_free_consistent(dev->pdev, resp_msg_sz, dev->resp_msg_base, dev->resp_msg_base_dma); | |
11276 | out_alloc_resp_msg: | |
11277 | return -ENOMEM; | |
11278 | } | |
11279 | ||
11280 | static int ssd_cleanup_cmd(struct ssd_device *dev) | |
11281 | { | |
11282 | int msg_sz = ALIGN(sizeof(struct ssd_rw_msg) + (dev->hw_info.cmd_max_sg - 1) * sizeof(struct ssd_sg_entry), SSD_DMA_ALIGN); | |
11283 | int i; | |
11284 | ||
11285 | for (i=0; i<(int)dev->hw_info.cmd_fifo_sz; i++) { | |
11286 | kfree(dev->cmd[i].sgl); | |
11287 | } | |
11288 | kfree(dev->cmd); | |
11289 | pci_free_consistent(dev->pdev, (msg_sz * dev->hw_info.cmd_fifo_sz), dev->msg_base, dev->msg_base_dma); | |
11290 | return 0; | |
11291 | } | |
11292 | ||
11293 | static int ssd_init_cmd(struct ssd_device *dev) | |
11294 | { | |
11295 | int sgl_sz = sizeof(struct scatterlist) * dev->hw_info.cmd_max_sg; | |
11296 | int cmd_sz = sizeof(struct ssd_cmd) * dev->hw_info.cmd_fifo_sz; | |
11297 | int msg_sz = ALIGN(sizeof(struct ssd_rw_msg) + (dev->hw_info.cmd_max_sg - 1) * sizeof(struct ssd_sg_entry), SSD_DMA_ALIGN); | |
11298 | int i; | |
11299 | ||
11300 | spin_lock_init(&dev->cmd_lock); | |
11301 | ||
11302 | dev->msg_base = pci_alloc_consistent(dev->pdev, (msg_sz * dev->hw_info.cmd_fifo_sz), &dev->msg_base_dma); | |
11303 | if (!dev->msg_base) { | |
11304 | hio_warn("%s: can not alloc cmd msg\n", dev->name); | |
11305 | goto out_alloc_msg; | |
11306 | } | |
11307 | ||
11308 | dev->cmd = kmalloc(cmd_sz, GFP_KERNEL); | |
11309 | if (!dev->cmd) { | |
11310 | hio_warn("%s: can not alloc cmd\n", dev->name); | |
11311 | goto out_alloc_cmd; | |
11312 | } | |
11313 | memset(dev->cmd, 0, cmd_sz); | |
11314 | ||
11315 | for (i=0; i<(int)dev->hw_info.cmd_fifo_sz; i++) { | |
11316 | dev->cmd[i].sgl = kmalloc(sgl_sz, GFP_KERNEL); | |
11317 | if (!dev->cmd[i].sgl) { | |
11318 | hio_warn("%s: can not alloc cmd sgl %d\n", dev->name, i); | |
11319 | goto out_alloc_sgl; | |
11320 | } | |
11321 | ||
11322 | dev->cmd[i].msg = dev->msg_base + (msg_sz * i); | |
11323 | dev->cmd[i].msg_dma = dev->msg_base_dma + ((dma_addr_t)msg_sz * i); | |
11324 | ||
11325 | dev->cmd[i].dev = dev; | |
11326 | dev->cmd[i].tag = i; | |
11327 | dev->cmd[i].flag = 0; | |
11328 | ||
11329 | INIT_LIST_HEAD(&dev->cmd[i].list); | |
11330 | } | |
11331 | ||
11332 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3) { | |
11333 | dev->scmd = ssd_dispatch_cmd; | |
11334 | } else { | |
11335 | ssd_reg_write(dev->ctrlp + SSD_MSG_BASE_REG, dev->msg_base_dma); | |
11336 | if (finject) { | |
11337 | dev->scmd = ssd_send_cmd_db; | |
11338 | } else { | |
11339 | dev->scmd = ssd_send_cmd; | |
11340 | } | |
11341 | } | |
11342 | ||
11343 | return 0; | |
11344 | ||
11345 | out_alloc_sgl: | |
11346 | for (i--; i>=0; i--) { | |
11347 | kfree(dev->cmd[i].sgl); | |
11348 | } | |
11349 | kfree(dev->cmd); | |
11350 | out_alloc_cmd: | |
11351 | pci_free_consistent(dev->pdev, (msg_sz * dev->hw_info.cmd_fifo_sz), dev->msg_base, dev->msg_base_dma); | |
11352 | out_alloc_msg: | |
11353 | return -ENOMEM; | |
11354 | } | |
11355 | ||
11356 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,30)) | |
11357 | static irqreturn_t ssd_interrupt_check(int irq, void *dev_id) | |
11358 | { | |
11359 | struct ssd_queue *queue = (struct ssd_queue *)dev_id; | |
11360 | ||
11361 | if (*(uint32_t *)queue->resp_ptr == queue->resp_idx) { | |
11362 | return IRQ_NONE; | |
11363 | } | |
11364 | ||
11365 | return IRQ_WAKE_THREAD; | |
11366 | } | |
11367 | ||
11368 | static irqreturn_t ssd_interrupt_threaded(int irq, void *dev_id) | |
11369 | { | |
11370 | struct ssd_queue *queue = (struct ssd_queue *)dev_id; | |
11371 | struct ssd_device *dev = (struct ssd_device *)queue->dev; | |
11372 | struct ssd_cmd *cmd; | |
11373 | union ssd_response_msq __msg; | |
11374 | union ssd_response_msq *msg = &__msg; | |
11375 | uint64_t *u64_msg; | |
11376 | uint32_t resp_idx = queue->resp_idx; | |
11377 | uint32_t new_resp_idx = *(uint32_t *)queue->resp_ptr; | |
11378 | uint32_t end_resp_idx; | |
11379 | ||
11380 | if (unlikely(resp_idx == new_resp_idx)) { | |
11381 | return IRQ_NONE; | |
11382 | } | |
11383 | ||
11384 | end_resp_idx = new_resp_idx & queue->resp_idx_mask; | |
11385 | ||
11386 | do { | |
11387 | resp_idx = (resp_idx + 1) & queue->resp_idx_mask; | |
11388 | ||
11389 | /* the resp msg */ | |
11390 | u64_msg = (uint64_t *)(queue->resp_msg + queue->resp_msg_sz * resp_idx); | |
11391 | msg->u64_msg = *u64_msg; | |
11392 | ||
11393 | if (unlikely(msg->u64_msg == (uint64_t)(-1))) { | |
11394 | hio_err("%s: empty resp msg: queue %d idx %u\n", dev->name, queue->idx, resp_idx); | |
11395 | continue; | |
11396 | } | |
11397 | /* clear the resp msg */ | |
11398 | *u64_msg = (uint64_t)(-1); | |
11399 | ||
11400 | cmd = &queue->cmd[msg->resp_msg.tag]; | |
11401 | /*if (unlikely(!cmd->bio)) { | |
11402 | printk(KERN_WARNING "%s: unknown tag %d fun %#x\n", | |
11403 | dev->name, msg->resp_msg.tag, msg->resp_msg.fun); | |
11404 | continue; | |
11405 | }*/ | |
11406 | ||
11407 | if(unlikely(msg->resp_msg.status & (uint32_t)status_mask)) { | |
11408 | cmd->errors = -EIO; | |
11409 | } else { | |
11410 | cmd->errors = 0; | |
11411 | } | |
11412 | cmd->nr_log = msg->log_resp_msg.nr_log; | |
11413 | ||
11414 | ssd_done(cmd); | |
11415 | ||
11416 | if (unlikely(msg->resp_msg.fun != SSD_FUNC_READ_LOG && msg->resp_msg.log > 0)) { | |
11417 | (void)test_and_set_bit(SSD_LOG_HW, &dev->state); | |
11418 | if (test_bit(SSD_INIT_WORKQ, &dev->state)) { | |
11419 | queue_work(dev->workq, &dev->log_work); | |
11420 | } | |
11421 | } | |
11422 | ||
11423 | if (unlikely(msg->resp_msg.status)) { | |
11424 | if (msg->resp_msg.fun == SSD_FUNC_READ || msg->resp_msg.fun == SSD_FUNC_WRITE) { | |
11425 | hio_err("%s: I/O error %d: tag %d fun %#x\n", | |
11426 | dev->name, msg->resp_msg.status, msg->resp_msg.tag, msg->resp_msg.fun); | |
11427 | ||
11428 | /* alarm led */ | |
11429 | ssd_set_alarm(dev); | |
11430 | queue->io_stat.nr_rwerr++; | |
11431 | ssd_gen_swlog(dev, SSD_LOG_EIO, msg->u32_msg[0]); | |
11432 | } else { | |
11433 | hio_info("%s: CMD error %d: tag %d fun %#x\n", | |
11434 | dev->name, msg->resp_msg.status, msg->resp_msg.tag, msg->resp_msg.fun); | |
11435 | ||
11436 | ssd_gen_swlog(dev, SSD_LOG_ECMD, msg->u32_msg[0]); | |
11437 | } | |
11438 | queue->io_stat.nr_ioerr++; | |
11439 | } | |
11440 | ||
11441 | if (msg->resp_msg.fun == SSD_FUNC_READ || | |
11442 | msg->resp_msg.fun == SSD_FUNC_NAND_READ_WOOB || | |
11443 | msg->resp_msg.fun == SSD_FUNC_NAND_READ) { | |
11444 | ||
11445 | queue->ecc_info.bitflip[msg->resp_msg.bitflip]++; | |
11446 | } | |
11447 | }while (resp_idx != end_resp_idx); | |
11448 | ||
11449 | queue->resp_idx = new_resp_idx; | |
11450 | ||
11451 | return IRQ_HANDLED; | |
11452 | } | |
11453 | #endif | |
11454 | ||
11455 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)) | |
11456 | static irqreturn_t ssd_interrupt(int irq, void *dev_id, struct pt_regs *regs) | |
11457 | #else | |
11458 | static irqreturn_t ssd_interrupt(int irq, void *dev_id) | |
11459 | #endif | |
11460 | { | |
11461 | struct ssd_queue *queue = (struct ssd_queue *)dev_id; | |
11462 | struct ssd_device *dev = (struct ssd_device *)queue->dev; | |
11463 | struct ssd_cmd *cmd; | |
11464 | union ssd_response_msq __msg; | |
11465 | union ssd_response_msq *msg = &__msg; | |
11466 | uint64_t *u64_msg; | |
11467 | uint32_t resp_idx = queue->resp_idx; | |
11468 | uint32_t new_resp_idx = *(uint32_t *)queue->resp_ptr; | |
11469 | uint32_t end_resp_idx; | |
11470 | ||
11471 | if (unlikely(resp_idx == new_resp_idx)) { | |
11472 | return IRQ_NONE; | |
11473 | } | |
11474 | ||
11475 | #if (defined SSD_ESCAPE_IRQ) | |
11476 | if (SSD_INT_MSIX != dev->int_mode) { | |
11477 | dev->irq_cpu = smp_processor_id(); | |
11478 | } | |
11479 | #endif | |
11480 | ||
11481 | end_resp_idx = new_resp_idx & queue->resp_idx_mask; | |
11482 | ||
11483 | do { | |
11484 | resp_idx = (resp_idx + 1) & queue->resp_idx_mask; | |
11485 | ||
11486 | /* the resp msg */ | |
11487 | u64_msg = (uint64_t *)(queue->resp_msg + queue->resp_msg_sz * resp_idx); | |
11488 | msg->u64_msg = *u64_msg; | |
11489 | ||
11490 | if (unlikely(msg->u64_msg == (uint64_t)(-1))) { | |
11491 | hio_err("%s: empty resp msg: queue %d idx %u\n", dev->name, queue->idx, resp_idx); | |
11492 | continue; | |
11493 | } | |
11494 | /* clear the resp msg */ | |
11495 | *u64_msg = (uint64_t)(-1); | |
11496 | ||
11497 | cmd = &queue->cmd[msg->resp_msg.tag]; | |
11498 | /*if (unlikely(!cmd->bio)) { | |
11499 | printk(KERN_WARNING "%s: unknown tag %d fun %#x\n", | |
11500 | dev->name, msg->resp_msg.tag, msg->resp_msg.fun); | |
11501 | continue; | |
11502 | }*/ | |
11503 | ||
11504 | if(unlikely(msg->resp_msg.status & (uint32_t)status_mask)) { | |
11505 | cmd->errors = -EIO; | |
11506 | } else { | |
11507 | cmd->errors = 0; | |
11508 | } | |
11509 | cmd->nr_log = msg->log_resp_msg.nr_log; | |
11510 | ||
11511 | ssd_done_bh(cmd); | |
11512 | ||
11513 | if (unlikely(msg->resp_msg.fun != SSD_FUNC_READ_LOG && msg->resp_msg.log > 0)) { | |
11514 | (void)test_and_set_bit(SSD_LOG_HW, &dev->state); | |
11515 | if (test_bit(SSD_INIT_WORKQ, &dev->state)) { | |
11516 | queue_work(dev->workq, &dev->log_work); | |
11517 | } | |
11518 | } | |
11519 | ||
11520 | if (unlikely(msg->resp_msg.status)) { | |
11521 | if (msg->resp_msg.fun == SSD_FUNC_READ || msg->resp_msg.fun == SSD_FUNC_WRITE) { | |
11522 | hio_err("%s: I/O error %d: tag %d fun %#x\n", | |
11523 | dev->name, msg->resp_msg.status, msg->resp_msg.tag, msg->resp_msg.fun); | |
11524 | ||
11525 | /* alarm led */ | |
11526 | ssd_set_alarm(dev); | |
11527 | queue->io_stat.nr_rwerr++; | |
11528 | ssd_gen_swlog(dev, SSD_LOG_EIO, msg->u32_msg[0]); | |
11529 | } else { | |
11530 | hio_info("%s: CMD error %d: tag %d fun %#x\n", | |
11531 | dev->name, msg->resp_msg.status, msg->resp_msg.tag, msg->resp_msg.fun); | |
11532 | ||
11533 | ssd_gen_swlog(dev, SSD_LOG_ECMD, msg->u32_msg[0]); | |
11534 | } | |
11535 | queue->io_stat.nr_ioerr++; | |
11536 | } | |
11537 | ||
11538 | if (msg->resp_msg.fun == SSD_FUNC_READ || | |
11539 | msg->resp_msg.fun == SSD_FUNC_NAND_READ_WOOB || | |
11540 | msg->resp_msg.fun == SSD_FUNC_NAND_READ) { | |
11541 | ||
11542 | queue->ecc_info.bitflip[msg->resp_msg.bitflip]++; | |
11543 | } | |
11544 | }while (resp_idx != end_resp_idx); | |
11545 | ||
11546 | queue->resp_idx = new_resp_idx; | |
11547 | ||
11548 | return IRQ_HANDLED; | |
11549 | } | |
11550 | ||
11551 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)) | |
11552 | static irqreturn_t ssd_interrupt_legacy(int irq, void *dev_id, struct pt_regs *regs) | |
11553 | #else | |
11554 | static irqreturn_t ssd_interrupt_legacy(int irq, void *dev_id) | |
11555 | #endif | |
11556 | { | |
11557 | irqreturn_t ret; | |
11558 | struct ssd_queue *queue = (struct ssd_queue *)dev_id; | |
11559 | struct ssd_device *dev = (struct ssd_device *)queue->dev; | |
11560 | ||
11561 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)) | |
11562 | ret = ssd_interrupt(irq, dev_id, regs); | |
11563 | #else | |
11564 | ret = ssd_interrupt(irq, dev_id); | |
11565 | #endif | |
11566 | ||
11567 | /* clear intr */ | |
11568 | if (IRQ_HANDLED == ret) { | |
11569 | ssd_reg32_write(dev->ctrlp + SSD_CLEAR_INTR_REG, 1); | |
11570 | } | |
11571 | ||
11572 | return ret; | |
11573 | } | |
11574 | ||
11575 | static void ssd_reset_resp_ptr(struct ssd_device *dev) | |
11576 | { | |
11577 | int i; | |
11578 | ||
11579 | for (i=0; i<dev->nr_queue; i++) { | |
11580 | *(uint32_t *)dev->queue[i].resp_ptr = dev->queue[i].resp_idx = (dev->hw_info.cmd_fifo_sz * 2) - 1; | |
11581 | } | |
11582 | } | |
11583 | ||
11584 | static void ssd_free_irq(struct ssd_device *dev) | |
11585 | { | |
11586 | int i; | |
11587 | ||
b44043bd | 11588 | #if ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) || (defined RHEL_MAJOR && RHEL_MAJOR == 6)) && (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0)) |
361ebed5 HSDT |
11589 | if (SSD_INT_MSIX == dev->int_mode) { |
11590 | for (i=0; i<dev->nr_queue; i++) { | |
11591 | irq_set_affinity_hint(dev->entry[i].vector, NULL); | |
11592 | } | |
11593 | } | |
11594 | #endif | |
11595 | ||
11596 | for (i=0; i<dev->nr_queue; i++) { | |
b44043bd | 11597 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0)) |
361ebed5 | 11598 | free_irq(dev->entry[i].vector, &dev->queue[i]); |
b44043bd SF |
11599 | #else |
11600 | free_irq(pci_irq_vector(dev->pdev, i), &dev->queue[i]); | |
11601 | #endif | |
361ebed5 HSDT |
11602 | } |
11603 | ||
11604 | if (SSD_INT_MSIX == dev->int_mode) { | |
11605 | pci_disable_msix(dev->pdev); | |
11606 | } else if (SSD_INT_MSI == dev->int_mode) { | |
11607 | pci_disable_msi(dev->pdev); | |
11608 | } | |
11609 | ||
11610 | } | |
11611 | ||
11612 | static int ssd_init_irq(struct ssd_device *dev) | |
11613 | { | |
b44043bd | 11614 | #if (!defined MODULE) && (defined SSD_MSIX_AFFINITY_FORCE) && (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0)) |
1197134c | 11615 | const struct cpumask *cpu_mask = NULL; |
361ebed5 HSDT |
11616 | static int cpu_affinity = 0; |
11617 | #endif | |
b44043bd | 11618 | #if ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) || (defined RHEL_MAJOR && RHEL_MAJOR == 6)) && (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0)) |
13cfa002 | 11619 | const struct cpumask *mask = NULL; |
361ebed5 HSDT |
11620 | static int cpu = 0; |
11621 | int j; | |
11622 | #endif | |
11623 | int i; | |
11624 | unsigned long flags = 0; | |
11625 | int ret = 0; | |
11626 | ||
11627 | ssd_reg32_write(dev->ctrlp + SSD_INTR_INTERVAL_REG, 0x800); | |
11628 | ||
11629 | #ifdef SSD_ESCAPE_IRQ | |
11630 | dev->irq_cpu = -1; | |
11631 | #endif | |
11632 | ||
b44043bd | 11633 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0)) |
361ebed5 HSDT |
11634 | if (int_mode >= SSD_INT_MSIX && pci_find_capability(dev->pdev, PCI_CAP_ID_MSIX)) { |
11635 | dev->nr_queue = SSD_MSIX_VEC; | |
b44043bd | 11636 | |
361ebed5 HSDT |
11637 | for (i=0; i<dev->nr_queue; i++) { |
11638 | dev->entry[i].entry = i; | |
11639 | } | |
11640 | for (;;) { | |
11641 | ret = pci_enable_msix(dev->pdev, dev->entry, dev->nr_queue); | |
11642 | if (ret == 0) { | |
11643 | break; | |
11644 | } else if (ret > 0) { | |
11645 | dev->nr_queue = ret; | |
11646 | } else { | |
11647 | hio_warn("%s: can not enable msix\n", dev->name); | |
11648 | /* alarm led */ | |
11649 | ssd_set_alarm(dev); | |
11650 | goto out; | |
11651 | } | |
11652 | } | |
11653 | ||
11654 | #if ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) || (defined RHEL_MAJOR && RHEL_MAJOR == 6)) | |
11655 | mask = (dev_to_node(&dev->pdev->dev) == -1) ? cpu_online_mask : cpumask_of_node(dev_to_node(&dev->pdev->dev)); | |
11656 | if ((0 == cpu) || (!cpumask_intersects(mask, cpumask_of(cpu)))) { | |
11657 | cpu = cpumask_first(mask); | |
11658 | } | |
11659 | for (i=0; i<dev->nr_queue; i++) { | |
11660 | irq_set_affinity_hint(dev->entry[i].vector, cpumask_of(cpu)); | |
11661 | cpu = cpumask_next(cpu, mask); | |
11662 | if (cpu >= nr_cpu_ids) { | |
11663 | cpu = cpumask_first(mask); | |
11664 | } | |
11665 | } | |
11666 | #endif | |
11667 | ||
11668 | dev->int_mode = SSD_INT_MSIX; | |
11669 | } else if (int_mode >= SSD_INT_MSI && pci_find_capability(dev->pdev, PCI_CAP_ID_MSI)) { | |
11670 | ret = pci_enable_msi(dev->pdev); | |
11671 | if (ret) { | |
11672 | hio_warn("%s: can not enable msi\n", dev->name); | |
11673 | /* alarm led */ | |
11674 | ssd_set_alarm(dev); | |
11675 | goto out; | |
11676 | } | |
11677 | ||
11678 | dev->nr_queue = 1; | |
11679 | dev->entry[0].vector = dev->pdev->irq; | |
11680 | ||
11681 | dev->int_mode = SSD_INT_MSI; | |
11682 | } else { | |
11683 | dev->nr_queue = 1; | |
11684 | dev->entry[0].vector = dev->pdev->irq; | |
11685 | ||
11686 | dev->int_mode = SSD_INT_LEGACY; | |
11687 | } | |
b44043bd SF |
11688 | #else |
11689 | if (int_mode >= SSD_INT_MSIX && pci_find_capability(dev->pdev, PCI_CAP_ID_MSIX)) { | |
11690 | dev->nr_queue = SSD_MSIX_VEC; | |
11691 | ||
11692 | dev->nr_queue = pci_alloc_irq_vectors(dev->pdev, 1, dev->nr_queue, PCI_IRQ_MSIX | PCI_IRQ_AFFINITY); | |
11693 | if (dev->nr_queue <= 0) { | |
11694 | ret = -EIO; | |
11695 | hio_warn("%s: can not enable msix\n", dev->name); | |
11696 | ssd_set_alarm(dev); | |
11697 | goto out; | |
11698 | } | |
11699 | ||
11700 | dev->int_mode = SSD_INT_MSIX; | |
11701 | } else if (int_mode >= SSD_INT_MSI && pci_find_capability(dev->pdev, PCI_CAP_ID_MSI)) { | |
11702 | ||
11703 | ret = pci_alloc_irq_vectors(dev->pdev, 1, 1, PCI_IRQ_MSI | PCI_IRQ_AFFINITY); | |
11704 | if (ret <= 0) { | |
11705 | ret = -EIO; | |
11706 | hio_warn("%s: can not enable msi\n", dev->name); | |
11707 | /* alarm led */ | |
11708 | ssd_set_alarm(dev); | |
11709 | goto out; | |
11710 | } | |
11711 | dev->nr_queue = 1; | |
11712 | ||
11713 | dev->int_mode = SSD_INT_MSI; | |
11714 | } else { | |
11715 | ret = pci_alloc_irq_vectors(dev->pdev, 1, 1, PCI_IRQ_LEGACY); | |
11716 | ||
11717 | if (ret <= 0) { | |
11718 | ret = -EIO; | |
11719 | hio_warn("%s: can not enable msi\n", dev->name); | |
11720 | /* alarm led */ | |
11721 | ssd_set_alarm(dev); | |
11722 | goto out; | |
11723 | } | |
11724 | dev->nr_queue = 1; | |
11725 | ||
11726 | dev->int_mode = SSD_INT_LEGACY; | |
11727 | } | |
11728 | #endif | |
361ebed5 HSDT |
11729 | |
11730 | for (i=0; i<dev->nr_queue; i++) { | |
11731 | if (dev->nr_queue > 1) { | |
11732 | snprintf(dev->queue[i].name, SSD_QUEUE_NAME_LEN, "%s_e100-%d", dev->name, i); | |
11733 | } else { | |
11734 | snprintf(dev->queue[i].name, SSD_QUEUE_NAME_LEN, "%s_e100", dev->name); | |
11735 | } | |
11736 | ||
11737 | dev->queue[i].dev = dev; | |
11738 | dev->queue[i].idx = i; | |
11739 | ||
11740 | dev->queue[i].resp_idx = (dev->hw_info.cmd_fifo_sz * 2) - 1; | |
11741 | dev->queue[i].resp_idx_mask = dev->hw_info.cmd_fifo_sz - 1; | |
11742 | ||
11743 | dev->queue[i].resp_msg_sz = dev->hw_info.resp_msg_sz; | |
11744 | dev->queue[i].resp_msg = dev->resp_msg_base + dev->hw_info.resp_msg_sz * dev->hw_info.cmd_fifo_sz * i; | |
11745 | dev->queue[i].resp_ptr = dev->resp_ptr_base + dev->hw_info.resp_ptr_sz * i; | |
11746 | *(uint32_t *)dev->queue[i].resp_ptr = dev->queue[i].resp_idx; | |
11747 | ||
11748 | dev->queue[i].cmd = dev->cmd; | |
11749 | } | |
11750 | ||
11751 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)) | |
11752 | flags = IRQF_SHARED; | |
11753 | #else | |
11754 | flags = SA_SHIRQ; | |
11755 | #endif | |
11756 | ||
11757 | for (i=0; i<dev->nr_queue; i++) { | |
b44043bd SF |
11758 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30)) |
11759 | if (dev->int_mode == SSD_INT_LEGACY) { | |
11760 | ret = request_irq(dev->entry[i].vector, &ssd_interrupt_legacy, flags, dev->queue[i].name, &dev->queue[i]); | |
11761 | } else { | |
11762 | ret = request_irq(dev->entry[i].vector, &ssd_interrupt, flags, dev->queue[i].name, &dev->queue[i]); | |
11763 | } | |
11764 | #elif (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0)) | |
361ebed5 HSDT |
11765 | if (threaded_irq) { |
11766 | ret = request_threaded_irq(dev->entry[i].vector, ssd_interrupt_check, ssd_interrupt_threaded, flags, dev->queue[i].name, &dev->queue[i]); | |
11767 | } else if (dev->int_mode == SSD_INT_LEGACY) { | |
11768 | ret = request_irq(dev->entry[i].vector, &ssd_interrupt_legacy, flags, dev->queue[i].name, &dev->queue[i]); | |
11769 | } else { | |
11770 | ret = request_irq(dev->entry[i].vector, &ssd_interrupt, flags, dev->queue[i].name, &dev->queue[i]); | |
11771 | } | |
11772 | #else | |
b44043bd SF |
11773 | if (threaded_irq) { |
11774 | ret = request_threaded_irq(pci_irq_vector(dev->pdev, i), ssd_interrupt_check, ssd_interrupt_threaded, flags, dev->queue[i].name, &dev->queue[i]); | |
11775 | } else if (dev->int_mode == SSD_INT_LEGACY) { | |
11776 | ret = request_irq(pci_irq_vector(dev->pdev, i), &ssd_interrupt_legacy, flags, dev->queue[i].name, &dev->queue[i]); | |
361ebed5 | 11777 | } else { |
b44043bd | 11778 | ret = request_irq(pci_irq_vector(dev->pdev, i), &ssd_interrupt, flags, dev->queue[i].name, &dev->queue[i]); |
361ebed5 HSDT |
11779 | } |
11780 | #endif | |
11781 | if (ret) { | |
11782 | hio_warn("%s: request irq failed\n", dev->name); | |
11783 | /* alarm led */ | |
11784 | ssd_set_alarm(dev); | |
11785 | goto out_request_irq; | |
11786 | } | |
11787 | ||
b44043bd | 11788 | #if (!defined MODULE) && (defined SSD_MSIX_AFFINITY_FORCE) && (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0)) |
361ebed5 HSDT |
11789 | cpu_mask = (dev_to_node(&dev->pdev->dev) == -1) ? cpu_online_mask : cpumask_of_node(dev_to_node(&dev->pdev->dev)); |
11790 | if (SSD_INT_MSIX == dev->int_mode) { | |
11791 | if ((0 == cpu_affinity) || (!cpumask_intersects(mask, cpumask_of(cpu_affinity)))) { | |
11792 | cpu_affinity = cpumask_first(cpu_mask); | |
11793 | } | |
11794 | ||
11795 | irq_set_affinity(dev->entry[i].vector, cpumask_of(cpu_affinity)); | |
11796 | cpu_affinity = cpumask_next(cpu_affinity, cpu_mask); | |
11797 | if (cpu_affinity >= nr_cpu_ids) { | |
11798 | cpu_affinity = cpumask_first(cpu_mask); | |
11799 | } | |
11800 | } | |
11801 | #endif | |
11802 | } | |
11803 | ||
11804 | return ret; | |
11805 | ||
11806 | out_request_irq: | |
b44043bd | 11807 | #if ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) || (defined RHEL_MAJOR && RHEL_MAJOR == 6)) && (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0)) |
361ebed5 HSDT |
11808 | if (SSD_INT_MSIX == dev->int_mode) { |
11809 | for (j=0; j<dev->nr_queue; j++) { | |
11810 | irq_set_affinity_hint(dev->entry[j].vector, NULL); | |
11811 | } | |
11812 | } | |
11813 | #endif | |
11814 | ||
11815 | for (i--; i>=0; i--) { | |
b44043bd | 11816 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0)) |
361ebed5 | 11817 | free_irq(dev->entry[i].vector, &dev->queue[i]); |
b44043bd SF |
11818 | #else |
11819 | free_irq(pci_irq_vector(dev->pdev, i), &dev->queue[i]); | |
11820 | #endif | |
361ebed5 HSDT |
11821 | } |
11822 | ||
11823 | if (SSD_INT_MSIX == dev->int_mode) { | |
11824 | pci_disable_msix(dev->pdev); | |
11825 | } else if (SSD_INT_MSI == dev->int_mode) { | |
11826 | pci_disable_msi(dev->pdev); | |
11827 | } | |
11828 | ||
11829 | out: | |
11830 | return ret; | |
11831 | } | |
11832 | ||
11833 | static void ssd_initial_log(struct ssd_device *dev) | |
11834 | { | |
11835 | uint32_t val; | |
11836 | uint32_t speed, width; | |
11837 | ||
11838 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_2) { | |
11839 | return; | |
11840 | } | |
11841 | ||
11842 | val = ssd_reg32_read(dev->ctrlp + SSD_POWER_ON_REG); | |
11843 | if (val) { | |
da3355df SF |
11844 | // Poweron detection switched to SSD_INTR_INTERVAL_REG in 'ssd_init_smart' |
11845 | //ssd_gen_swlog(dev, SSD_LOG_POWER_ON, dev->hw_info.bridge_ver); | |
361ebed5 HSDT |
11846 | } |
11847 | ||
11848 | val = ssd_reg32_read(dev->ctrlp + SSD_PCIE_LINKSTATUS_REG); | |
11849 | speed = val & 0xF; | |
11850 | width = (val >> 4)& 0x3F; | |
11851 | if (0x1 == speed) { | |
11852 | hio_info("%s: PCIe: 2.5GT/s, x%u\n", dev->name, width); | |
11853 | } else if (0x2 == speed) { | |
11854 | hio_info("%s: PCIe: 5GT/s, x%u\n", dev->name, width); | |
11855 | } else { | |
11856 | hio_info("%s: PCIe: unknown GT/s, x%u\n", dev->name, width); | |
11857 | } | |
11858 | ssd_gen_swlog(dev, SSD_LOG_PCIE_LINK_STATUS, val); | |
11859 | ||
11860 | return; | |
11861 | } | |
11862 | ||
11863 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)) | |
11864 | static void ssd_hwmon_worker(void *data) | |
11865 | { | |
11866 | struct ssd_device *dev = (struct ssd_device *)data; | |
11867 | #else | |
11868 | static void ssd_hwmon_worker(struct work_struct *work) | |
11869 | { | |
11870 | struct ssd_device *dev = container_of(work, struct ssd_device, hwmon_work); | |
11871 | #endif | |
11872 | ||
11873 | if (ssd_check_hw(dev)) { | |
11874 | //hio_err("%s: check hardware failed\n", dev->name); | |
11875 | return; | |
11876 | } | |
11877 | ||
11878 | ssd_check_clock(dev); | |
11879 | ssd_check_volt(dev); | |
11880 | ||
11881 | ssd_mon_boardvolt(dev); | |
11882 | } | |
11883 | ||
11884 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)) | |
11885 | static void ssd_tempmon_worker(void *data) | |
11886 | { | |
11887 | struct ssd_device *dev = (struct ssd_device *)data; | |
11888 | #else | |
11889 | static void ssd_tempmon_worker(struct work_struct *work) | |
11890 | { | |
11891 | struct ssd_device *dev = container_of(work, struct ssd_device, tempmon_work); | |
11892 | #endif | |
11893 | ||
11894 | if (ssd_check_hw(dev)) { | |
11895 | //hio_err("%s: check hardware failed\n", dev->name); | |
11896 | return; | |
11897 | } | |
11898 | ||
11899 | ssd_mon_temp(dev); | |
11900 | } | |
11901 | ||
11902 | ||
11903 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)) | |
11904 | static void ssd_capmon_worker(void *data) | |
11905 | { | |
11906 | struct ssd_device *dev = (struct ssd_device *)data; | |
11907 | #else | |
11908 | static void ssd_capmon_worker(struct work_struct *work) | |
11909 | { | |
11910 | struct ssd_device *dev = container_of(work, struct ssd_device, capmon_work); | |
11911 | #endif | |
11912 | uint32_t cap = 0; | |
11913 | uint32_t cap_threshold = SSD_PL_CAP_THRESHOLD; | |
11914 | int ret = 0; | |
11915 | ||
11916 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_2) { | |
11917 | return; | |
11918 | } | |
11919 | ||
11920 | if (dev->hw_info_ext.form_factor == SSD_FORM_FACTOR_FHHL && dev->hw_info.pcb_ver < 'B') { | |
11921 | return; | |
11922 | } | |
11923 | ||
11924 | /* fault before? */ | |
11925 | if (test_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP), &dev->hwmon)) { | |
11926 | ret = ssd_check_pl_cap_fast(dev); | |
11927 | if (ret) { | |
11928 | return; | |
11929 | } | |
11930 | } | |
11931 | ||
11932 | /* learn */ | |
11933 | ret = ssd_do_cap_learn(dev, &cap); | |
11934 | if (ret) { | |
11935 | hio_err("%s: cap learn failed\n", dev->name); | |
11936 | ssd_gen_swlog(dev, SSD_LOG_CAP_LEARN_FAULT, 0); | |
11937 | return; | |
11938 | } | |
11939 | ||
11940 | ssd_gen_swlog(dev, SSD_LOG_CAP_STATUS, cap); | |
11941 | ||
11942 | if (SSD_PL_CAP_CP == dev->hw_info_ext.cap_type) { | |
11943 | cap_threshold = SSD_PL_CAP_CP_THRESHOLD; | |
11944 | } | |
11945 | ||
11946 | //use the fw event id? | |
11947 | if (cap < cap_threshold) { | |
11948 | if (!test_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP), &dev->hwmon)) { | |
11949 | ssd_gen_swlog(dev, SSD_LOG_BATTERY_FAULT, 0); | |
11950 | } | |
11951 | } else if (cap >= (cap_threshold + SSD_PL_CAP_THRESHOLD_HYST)) { | |
11952 | if (test_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP), &dev->hwmon)) { | |
11953 | ssd_gen_swlog(dev, SSD_LOG_BATTERY_OK, 0); | |
11954 | } | |
11955 | } | |
11956 | } | |
11957 | ||
11958 | static void ssd_routine_start(void *data) | |
11959 | { | |
11960 | struct ssd_device *dev; | |
11961 | ||
11962 | if (!data) { | |
11963 | return; | |
11964 | } | |
11965 | dev = data; | |
11966 | ||
11967 | dev->routine_tick++; | |
11968 | ||
11969 | if (test_bit(SSD_INIT_WORKQ, &dev->state) && !ssd_busy(dev)) { | |
11970 | (void)test_and_set_bit(SSD_LOG_HW, &dev->state); | |
11971 | queue_work(dev->workq, &dev->log_work); | |
11972 | } | |
11973 | ||
11974 | if ((dev->routine_tick % SSD_HWMON_ROUTINE_TICK) == 0 && test_bit(SSD_INIT_WORKQ, &dev->state)) { | |
11975 | queue_work(dev->workq, &dev->hwmon_work); | |
11976 | } | |
11977 | ||
11978 | if ((dev->routine_tick % SSD_CAPMON_ROUTINE_TICK) == 0 && test_bit(SSD_INIT_WORKQ, &dev->state)) { | |
11979 | queue_work(dev->workq, &dev->capmon_work); | |
11980 | } | |
11981 | ||
11982 | if ((dev->routine_tick % SSD_CAPMON2_ROUTINE_TICK) == 0 && test_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP), &dev->hwmon) && test_bit(SSD_INIT_WORKQ, &dev->state)) { | |
11983 | /* CAP fault? check again */ | |
11984 | queue_work(dev->workq, &dev->capmon_work); | |
11985 | } | |
11986 | ||
11987 | if (test_bit(SSD_INIT_WORKQ, &dev->state)) { | |
11988 | queue_work(dev->workq, &dev->tempmon_work); | |
11989 | } | |
11990 | ||
11991 | /* schedule routine */ | |
11992 | mod_timer(&dev->routine_timer, jiffies + msecs_to_jiffies(SSD_ROUTINE_INTERVAL)); | |
11993 | } | |
11994 | ||
11995 | static void ssd_cleanup_routine(struct ssd_device *dev) | |
11996 | { | |
11997 | if (unlikely(mode != SSD_DRV_MODE_STANDARD)) | |
11998 | return; | |
11999 | ||
12000 | (void)ssd_del_timer(&dev->routine_timer); | |
12001 | ||
12002 | (void)ssd_del_timer(&dev->bm_timer); | |
12003 | } | |
12004 | ||
12005 | static int ssd_init_routine(struct ssd_device *dev) | |
12006 | { | |
12007 | if (unlikely(mode != SSD_DRV_MODE_STANDARD)) | |
12008 | return 0; | |
12009 | ||
12010 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)) | |
12011 | INIT_WORK(&dev->bm_work, ssd_bm_worker, dev); | |
12012 | INIT_WORK(&dev->hwmon_work, ssd_hwmon_worker, dev); | |
12013 | INIT_WORK(&dev->capmon_work, ssd_capmon_worker, dev); | |
12014 | INIT_WORK(&dev->tempmon_work, ssd_tempmon_worker, dev); | |
12015 | #else | |
12016 | INIT_WORK(&dev->bm_work, ssd_bm_worker); | |
12017 | INIT_WORK(&dev->hwmon_work, ssd_hwmon_worker); | |
12018 | INIT_WORK(&dev->capmon_work, ssd_capmon_worker); | |
12019 | INIT_WORK(&dev->tempmon_work, ssd_tempmon_worker); | |
12020 | #endif | |
12021 | ||
12022 | /* initial log */ | |
12023 | ssd_initial_log(dev); | |
12024 | ||
12025 | /* schedule bm routine */ | |
12026 | ssd_add_timer(&dev->bm_timer, msecs_to_jiffies(SSD_BM_CAP_LEARNING_DELAY), ssd_bm_routine_start, dev); | |
12027 | ||
12028 | /* schedule routine */ | |
12029 | ssd_add_timer(&dev->routine_timer, msecs_to_jiffies(SSD_ROUTINE_INTERVAL), ssd_routine_start, dev); | |
12030 | ||
12031 | return 0; | |
12032 | } | |
12033 | ||
12034 | static void | |
12035 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38)) | |
12036 | __devexit | |
12037 | #endif | |
12038 | ssd_remove_one (struct pci_dev *pdev) | |
12039 | { | |
12040 | struct ssd_device *dev; | |
12041 | ||
12042 | if (!pdev) { | |
12043 | return; | |
12044 | } | |
12045 | ||
12046 | dev = pci_get_drvdata(pdev); | |
12047 | if (!dev) { | |
12048 | return; | |
12049 | } | |
12050 | ||
12051 | list_del_init(&dev->list); | |
12052 | ||
12053 | ssd_unregister_sysfs(dev); | |
12054 | ||
12055 | /* offline firstly */ | |
12056 | test_and_clear_bit(SSD_ONLINE, &dev->state); | |
12057 | ||
12058 | /* clean work queue first */ | |
12059 | if (!dev->slave) { | |
12060 | test_and_clear_bit(SSD_INIT_WORKQ, &dev->state); | |
12061 | ssd_cleanup_workq(dev); | |
12062 | } | |
12063 | ||
12064 | /* flush cache */ | |
12065 | (void)ssd_flush(dev); | |
12066 | (void)ssd_save_md(dev); | |
12067 | ||
12068 | /* save smart */ | |
12069 | if (!dev->slave) { | |
12070 | ssd_save_smart(dev); | |
12071 | } | |
12072 | ||
12073 | if (test_and_clear_bit(SSD_INIT_BD, &dev->state)) { | |
12074 | ssd_cleanup_blkdev(dev); | |
12075 | } | |
12076 | ||
12077 | if (!dev->slave) { | |
12078 | ssd_cleanup_chardev(dev); | |
12079 | } | |
12080 | ||
12081 | /* clean routine */ | |
12082 | if (!dev->slave) { | |
12083 | ssd_cleanup_routine(dev); | |
12084 | } | |
12085 | ||
12086 | ssd_cleanup_queue(dev); | |
12087 | ||
12088 | ssd_cleanup_tag(dev); | |
12089 | ssd_cleanup_thread(dev); | |
12090 | ||
12091 | ssd_free_irq(dev); | |
12092 | ||
12093 | ssd_cleanup_dcmd(dev); | |
12094 | ssd_cleanup_cmd(dev); | |
12095 | ssd_cleanup_response(dev); | |
12096 | ||
12097 | if (!dev->slave) { | |
12098 | ssd_cleanup_log(dev); | |
12099 | } | |
12100 | ||
12101 | if (dev->reload_fw) { //reload fw | |
da3355df | 12102 | dev->has_non_0x98_reg_access = 1; |
361ebed5 HSDT |
12103 | ssd_reg32_write(dev->ctrlp + SSD_RELOAD_FW_REG, SSD_RELOAD_FW); |
12104 | } | |
12105 | ||
12106 | /* unmap physical adress */ | |
12107 | #ifdef LINUX_SUSE_OS | |
12108 | iounmap(dev->ctrlp); | |
12109 | #else | |
12110 | pci_iounmap(pdev, dev->ctrlp); | |
12111 | #endif | |
12112 | ||
12113 | release_mem_region(dev->mmio_base, dev->mmio_len); | |
12114 | ||
12115 | pci_disable_device(pdev); | |
12116 | ||
12117 | pci_set_drvdata(pdev, NULL); | |
12118 | ||
12119 | ssd_put(dev); | |
12120 | } | |
12121 | ||
12122 | static int | |
12123 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38)) | |
12124 | __devinit | |
12125 | #endif | |
12126 | ssd_init_one(struct pci_dev *pdev, | |
12127 | const struct pci_device_id *ent) | |
12128 | { | |
12129 | struct ssd_device *dev; | |
1197134c | 12130 | struct timeval tv; |
361ebed5 HSDT |
12131 | int ret = 0; |
12132 | ||
12133 | if (!pdev || !ent) { | |
12134 | ret = -EINVAL; | |
12135 | goto out; | |
12136 | } | |
12137 | ||
12138 | dev = kmalloc(sizeof(struct ssd_device), GFP_KERNEL); | |
12139 | if (!dev) { | |
12140 | ret = -ENOMEM; | |
12141 | goto out_alloc_dev; | |
12142 | } | |
12143 | memset(dev, 0, sizeof(struct ssd_device)); | |
12144 | ||
12145 | dev->owner = THIS_MODULE; | |
12146 | ||
12147 | if (SSD_SLAVE_PORT_DEVID == ent->device) { | |
12148 | dev->slave = 1; | |
12149 | } | |
12150 | ||
12151 | dev->idx = ssd_get_index(dev->slave); | |
12152 | if (dev->idx < 0) { | |
12153 | ret = -ENOMEM; | |
12154 | goto out_get_index; | |
12155 | } | |
12156 | ||
12157 | if (!dev->slave) { | |
12158 | snprintf(dev->name, SSD_DEV_NAME_LEN, SSD_DEV_NAME); | |
12159 | ssd_set_dev_name(&dev->name[strlen(SSD_DEV_NAME)], SSD_DEV_NAME_LEN-strlen(SSD_DEV_NAME), dev->idx); | |
12160 | ||
12161 | dev->major = ssd_major; | |
12162 | dev->cmajor = ssd_cmajor; | |
12163 | } else { | |
12164 | snprintf(dev->name, SSD_DEV_NAME_LEN, SSD_SDEV_NAME); | |
12165 | ssd_set_dev_name(&dev->name[strlen(SSD_SDEV_NAME)], SSD_DEV_NAME_LEN-strlen(SSD_SDEV_NAME), dev->idx); | |
12166 | dev->major = ssd_major_sl; | |
12167 | dev->cmajor = 0; | |
12168 | } | |
12169 | ||
1197134c KM |
12170 | do_gettimeofday(&tv); |
12171 | dev->reset_time = tv.tv_sec; | |
12172 | ||
361ebed5 HSDT |
12173 | atomic_set(&(dev->refcnt), 0); |
12174 | atomic_set(&(dev->tocnt), 0); | |
12175 | ||
12176 | mutex_init(&dev->fw_mutex); | |
12177 | ||
12178 | //xx | |
12179 | mutex_init(&dev->gd_mutex); | |
da3355df SF |
12180 | dev->has_non_0x98_reg_access = 0; |
12181 | ||
12182 | //init in_flight lock | |
12183 | spin_lock_init(&dev->in_flight_lock); | |
361ebed5 HSDT |
12184 | |
12185 | dev->pdev = pdev; | |
12186 | pci_set_drvdata(pdev, dev); | |
12187 | ||
12188 | kref_init(&dev->kref); | |
12189 | ||
12190 | ret = pci_enable_device(pdev); | |
12191 | if (ret) { | |
12192 | hio_warn("%s: can not enable device\n", dev->name); | |
12193 | goto out_enable_device; | |
12194 | } | |
12195 | ||
12196 | pci_set_master(pdev); | |
12197 | ||
12198 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31)) | |
12199 | ret = pci_set_dma_mask(pdev, DMA_64BIT_MASK); | |
12200 | #else | |
12201 | ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); | |
12202 | #endif | |
12203 | if (ret) { | |
12204 | hio_warn("%s: set dma mask: failed\n", dev->name); | |
12205 | goto out_set_dma_mask; | |
12206 | } | |
12207 | ||
12208 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31)) | |
12209 | ret = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK); | |
12210 | #else | |
12211 | ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); | |
12212 | #endif | |
12213 | if (ret) { | |
12214 | hio_warn("%s: set consistent dma mask: failed\n", dev->name); | |
12215 | goto out_set_dma_mask; | |
12216 | } | |
12217 | ||
12218 | dev->mmio_base = pci_resource_start(pdev, 0); | |
12219 | dev->mmio_len = pci_resource_len(pdev, 0); | |
12220 | ||
12221 | if (!request_mem_region(dev->mmio_base, dev->mmio_len, SSD_DEV_NAME)) { | |
12222 | hio_warn("%s: can not reserve MMIO region 0\n", dev->name); | |
12223 | ret = -EBUSY; | |
12224 | goto out_request_mem_region; | |
12225 | } | |
12226 | ||
12227 | /* 2.6.9 kernel bug */ | |
12228 | dev->ctrlp = pci_iomap(pdev, 0, 0); | |
12229 | if (!dev->ctrlp) { | |
12230 | hio_warn("%s: can not remap IO region 0\n", dev->name); | |
12231 | ret = -ENOMEM; | |
12232 | goto out_pci_iomap; | |
12233 | } | |
12234 | ||
12235 | ret = ssd_check_hw(dev); | |
12236 | if (ret) { | |
12237 | hio_err("%s: check hardware failed\n", dev->name); | |
12238 | goto out_check_hw; | |
12239 | } | |
12240 | ||
12241 | ret = ssd_init_protocol_info(dev); | |
12242 | if (ret) { | |
12243 | hio_err("%s: init protocol info failed\n", dev->name); | |
12244 | goto out_init_protocol_info; | |
12245 | } | |
12246 | ||
12247 | /* alarm led ? */ | |
12248 | ssd_clear_alarm(dev); | |
12249 | ||
12250 | ret = ssd_init_fw_info(dev); | |
12251 | if (ret) { | |
12252 | hio_err("%s: init firmware info failed\n", dev->name); | |
12253 | /* alarm led */ | |
12254 | ssd_set_alarm(dev); | |
12255 | goto out_init_fw_info; | |
12256 | } | |
12257 | ||
12258 | /* slave port ? */ | |
12259 | if (dev->slave) { | |
12260 | goto init_next1; | |
12261 | } | |
12262 | ||
12263 | ret = ssd_init_rom_info(dev); | |
12264 | if (ret) { | |
12265 | hio_err("%s: init rom info failed\n", dev->name); | |
12266 | /* alarm led */ | |
12267 | ssd_set_alarm(dev); | |
12268 | goto out_init_rom_info; | |
12269 | } | |
12270 | ||
12271 | ret = ssd_init_label(dev); | |
12272 | if (ret) { | |
12273 | hio_err("%s: init label failed\n", dev->name); | |
12274 | /* alarm led */ | |
12275 | ssd_set_alarm(dev); | |
12276 | goto out_init_label; | |
12277 | } | |
12278 | ||
12279 | ret = ssd_init_workq(dev); | |
12280 | if (ret) { | |
12281 | hio_warn("%s: init workq failed\n", dev->name); | |
12282 | goto out_init_workq; | |
12283 | } | |
12284 | (void)test_and_set_bit(SSD_INIT_WORKQ, &dev->state); | |
12285 | ||
12286 | ret = ssd_init_log(dev); | |
12287 | if (ret) { | |
12288 | hio_err("%s: init log failed\n", dev->name); | |
12289 | /* alarm led */ | |
12290 | ssd_set_alarm(dev); | |
12291 | goto out_init_log; | |
12292 | } | |
12293 | ||
12294 | ret = ssd_init_smart(dev); | |
12295 | if (ret) { | |
12296 | hio_err("%s: init info failed\n", dev->name); | |
12297 | /* alarm led */ | |
12298 | ssd_set_alarm(dev); | |
12299 | goto out_init_smart; | |
12300 | } | |
12301 | ||
12302 | init_next1: | |
12303 | ret = ssd_init_hw_info(dev); | |
12304 | if (ret) { | |
12305 | hio_err("%s: init hardware info failed\n", dev->name); | |
12306 | /* alarm led */ | |
12307 | ssd_set_alarm(dev); | |
12308 | goto out_init_hw_info; | |
12309 | } | |
12310 | ||
12311 | /* slave port ? */ | |
12312 | if (dev->slave) { | |
12313 | goto init_next2; | |
12314 | } | |
12315 | ||
12316 | ret = ssd_init_sensor(dev); | |
12317 | if (ret) { | |
12318 | hio_err("%s: init sensor failed\n", dev->name); | |
12319 | /* alarm led */ | |
12320 | ssd_set_alarm(dev); | |
12321 | goto out_init_sensor; | |
12322 | } | |
12323 | ||
12324 | ret = ssd_init_pl_cap(dev); | |
12325 | if (ret) { | |
12326 | hio_err("%s: int pl_cap failed\n", dev->name); | |
12327 | /* alarm led */ | |
12328 | ssd_set_alarm(dev); | |
12329 | goto out_init_pl_cap; | |
12330 | } | |
12331 | ||
12332 | init_next2: | |
12333 | ret = ssd_check_init_state(dev); | |
12334 | if (ret) { | |
12335 | hio_err("%s: check init state failed\n", dev->name); | |
12336 | /* alarm led */ | |
12337 | ssd_set_alarm(dev); | |
12338 | goto out_check_init_state; | |
12339 | } | |
12340 | ||
12341 | ret = ssd_init_response(dev); | |
12342 | if (ret) { | |
12343 | hio_warn("%s: init resp_msg failed\n", dev->name); | |
12344 | goto out_init_response; | |
12345 | } | |
12346 | ||
12347 | ret = ssd_init_cmd(dev); | |
12348 | if (ret) { | |
12349 | hio_warn("%s: init msg failed\n", dev->name); | |
12350 | goto out_init_cmd; | |
12351 | } | |
12352 | ||
12353 | ret = ssd_init_dcmd(dev); | |
12354 | if (ret) { | |
12355 | hio_warn("%s: init cmd failed\n", dev->name); | |
12356 | goto out_init_dcmd; | |
12357 | } | |
12358 | ||
12359 | ret = ssd_init_irq(dev); | |
12360 | if (ret) { | |
12361 | hio_warn("%s: init irq failed\n", dev->name); | |
12362 | goto out_init_irq; | |
12363 | } | |
12364 | ||
12365 | ret = ssd_init_thread(dev); | |
12366 | if (ret) { | |
12367 | hio_warn("%s: init thread failed\n", dev->name); | |
12368 | goto out_init_thread; | |
12369 | } | |
12370 | ||
12371 | ret = ssd_init_tag(dev); | |
12372 | if(ret) { | |
12373 | hio_warn("%s: init tags failed\n", dev->name); | |
12374 | goto out_init_tags; | |
12375 | } | |
12376 | ||
12377 | /* */ | |
12378 | (void)test_and_set_bit(SSD_ONLINE, &dev->state); | |
12379 | ||
12380 | ret = ssd_init_queue(dev); | |
12381 | if (ret) { | |
12382 | hio_warn("%s: init queue failed\n", dev->name); | |
12383 | goto out_init_queue; | |
12384 | } | |
12385 | ||
12386 | /* slave port ? */ | |
12387 | if (dev->slave) { | |
12388 | goto init_next3; | |
12389 | } | |
12390 | ||
12391 | ret = ssd_init_ot_protect(dev); | |
12392 | if (ret) { | |
12393 | hio_err("%s: int ot_protect failed\n", dev->name); | |
12394 | /* alarm led */ | |
12395 | ssd_set_alarm(dev); | |
12396 | goto out_int_ot_protect; | |
12397 | } | |
12398 | ||
12399 | ret = ssd_init_wmode(dev); | |
12400 | if (ret) { | |
12401 | hio_warn("%s: init write mode\n", dev->name); | |
12402 | goto out_init_wmode; | |
12403 | } | |
12404 | ||
12405 | /* init routine after hw is ready */ | |
12406 | ret = ssd_init_routine(dev); | |
12407 | if (ret) { | |
12408 | hio_warn("%s: init routine\n", dev->name); | |
12409 | goto out_init_routine; | |
12410 | } | |
12411 | ||
12412 | ret = ssd_init_chardev(dev); | |
12413 | if (ret) { | |
12414 | hio_warn("%s: register char device failed\n", dev->name); | |
12415 | goto out_init_chardev; | |
12416 | } | |
12417 | ||
12418 | init_next3: | |
12419 | ret = ssd_init_blkdev(dev); | |
12420 | if (ret) { | |
12421 | hio_warn("%s: register block device failed\n", dev->name); | |
12422 | goto out_init_blkdev; | |
12423 | } | |
12424 | (void)test_and_set_bit(SSD_INIT_BD, &dev->state); | |
12425 | ||
12426 | ret = ssd_register_sysfs(dev); | |
12427 | if (ret) { | |
12428 | hio_warn("%s: register sysfs failed\n", dev->name); | |
12429 | goto out_register_sysfs; | |
12430 | } | |
12431 | ||
12432 | dev->save_md = 1; | |
12433 | ||
12434 | list_add_tail(&dev->list, &ssd_list); | |
12435 | ||
12436 | return 0; | |
12437 | ||
12438 | out_register_sysfs: | |
12439 | test_and_clear_bit(SSD_INIT_BD, &dev->state); | |
12440 | ssd_cleanup_blkdev(dev); | |
12441 | out_init_blkdev: | |
12442 | /* slave port ? */ | |
12443 | if (!dev->slave) { | |
12444 | ssd_cleanup_chardev(dev); | |
12445 | } | |
12446 | out_init_chardev: | |
12447 | /* slave port ? */ | |
12448 | if (!dev->slave) { | |
12449 | ssd_cleanup_routine(dev); | |
12450 | } | |
12451 | out_init_routine: | |
12452 | out_init_wmode: | |
12453 | out_int_ot_protect: | |
12454 | ssd_cleanup_queue(dev); | |
12455 | out_init_queue: | |
12456 | test_and_clear_bit(SSD_ONLINE, &dev->state); | |
12457 | ssd_cleanup_tag(dev); | |
12458 | out_init_tags: | |
12459 | ssd_cleanup_thread(dev); | |
12460 | out_init_thread: | |
12461 | ssd_free_irq(dev); | |
12462 | out_init_irq: | |
12463 | ssd_cleanup_dcmd(dev); | |
12464 | out_init_dcmd: | |
12465 | ssd_cleanup_cmd(dev); | |
12466 | out_init_cmd: | |
12467 | ssd_cleanup_response(dev); | |
12468 | out_init_response: | |
12469 | out_check_init_state: | |
12470 | out_init_pl_cap: | |
12471 | out_init_sensor: | |
12472 | out_init_hw_info: | |
12473 | out_init_smart: | |
12474 | /* slave port ? */ | |
12475 | if (!dev->slave) { | |
12476 | ssd_cleanup_log(dev); | |
12477 | } | |
12478 | out_init_log: | |
12479 | /* slave port ? */ | |
12480 | if (!dev->slave) { | |
12481 | test_and_clear_bit(SSD_INIT_WORKQ, &dev->state); | |
12482 | ssd_cleanup_workq(dev); | |
12483 | } | |
12484 | out_init_workq: | |
12485 | out_init_label: | |
12486 | out_init_rom_info: | |
12487 | out_init_fw_info: | |
12488 | out_init_protocol_info: | |
12489 | out_check_hw: | |
12490 | #ifdef LINUX_SUSE_OS | |
12491 | iounmap(dev->ctrlp); | |
12492 | #else | |
12493 | pci_iounmap(pdev, dev->ctrlp); | |
12494 | #endif | |
12495 | out_pci_iomap: | |
12496 | release_mem_region(dev->mmio_base, dev->mmio_len); | |
12497 | out_request_mem_region: | |
12498 | out_set_dma_mask: | |
12499 | pci_disable_device(pdev); | |
12500 | out_enable_device: | |
12501 | pci_set_drvdata(pdev, NULL); | |
12502 | out_get_index: | |
12503 | kfree(dev); | |
12504 | out_alloc_dev: | |
12505 | out: | |
12506 | return ret; | |
12507 | } | |
12508 | ||
12509 | static void ssd_cleanup_tasklet(void) | |
12510 | { | |
12511 | int i; | |
12512 | for_each_online_cpu(i) { | |
12513 | tasklet_kill(&per_cpu(ssd_tasklet, i)); | |
12514 | } | |
12515 | } | |
12516 | ||
12517 | static int ssd_init_tasklet(void) | |
12518 | { | |
12519 | int i; | |
12520 | ||
12521 | for_each_online_cpu(i) { | |
12522 | INIT_LIST_HEAD(&per_cpu(ssd_doneq, i)); | |
12523 | ||
12524 | if (finject) { | |
12525 | tasklet_init(&per_cpu(ssd_tasklet, i), __ssd_done_db, 0); | |
12526 | } else { | |
12527 | tasklet_init(&per_cpu(ssd_tasklet, i), __ssd_done, 0); | |
12528 | } | |
12529 | } | |
12530 | ||
12531 | return 0; | |
12532 | } | |
12533 | ||
12534 | static struct pci_device_id ssd_pci_tbl[] = { | |
12535 | { 0x10ee, 0x0007, PCI_ANY_ID, PCI_ANY_ID, }, /* g3 */ | |
12536 | { 0x19e5, 0x0007, PCI_ANY_ID, PCI_ANY_ID, }, /* v1 */ | |
12537 | //{ 0x19e5, 0x0008, PCI_ANY_ID, PCI_ANY_ID, }, /* v1 sp*/ | |
12538 | { 0x19e5, 0x0009, PCI_ANY_ID, PCI_ANY_ID, }, /* v2 */ | |
12539 | { 0x19e5, 0x000a, PCI_ANY_ID, PCI_ANY_ID, }, /* v2 dp slave*/ | |
12540 | { 0, } | |
12541 | }; | |
361ebed5 | 12542 | |
1197134c KM |
12543 | /*driver power management handler for pm_ops*/ |
12544 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32)) | |
12545 | static int ssd_hio_suspend(struct pci_dev *pdev, pm_message_t state) | |
12546 | { | |
12547 | #else | |
12548 | static int ssd_hio_suspend(struct device *ddev) | |
12549 | { | |
12550 | struct pci_dev *pdev = to_pci_dev(ddev); | |
12551 | #endif | |
12552 | struct ssd_device *dev; | |
12553 | ||
12554 | ||
12555 | if (!pdev) { | |
12556 | return -EINVAL; | |
12557 | } | |
12558 | ||
12559 | dev = pci_get_drvdata(pdev); | |
12560 | if (!dev) { | |
12561 | return -EINVAL; | |
12562 | } | |
12563 | ||
12564 | hio_warn("%s: suspend disk start.\n", dev->name); | |
12565 | ssd_unregister_sysfs(dev); | |
12566 | ||
12567 | /* offline firstly */ | |
12568 | test_and_clear_bit(SSD_ONLINE, &dev->state); | |
12569 | ||
12570 | /* clean work queue first */ | |
12571 | if (!dev->slave) { | |
12572 | test_and_clear_bit(SSD_INIT_WORKQ, &dev->state); | |
12573 | ssd_cleanup_workq(dev); | |
12574 | } | |
12575 | ||
12576 | /* flush cache */ | |
12577 | (void)ssd_flush(dev); | |
12578 | (void)ssd_save_md(dev); | |
12579 | ||
12580 | /* save smart */ | |
12581 | if (!dev->slave) { | |
12582 | ssd_save_smart(dev); | |
12583 | } | |
12584 | ||
12585 | /* clean routine */ | |
12586 | if (!dev->slave) { | |
12587 | ssd_cleanup_routine(dev); | |
12588 | } | |
12589 | ||
12590 | ssd_cleanup_thread(dev); | |
12591 | ||
12592 | ssd_free_irq(dev); | |
12593 | ||
12594 | if (!dev->slave) { | |
12595 | ssd_cleanup_log(dev); | |
12596 | } | |
12597 | ||
12598 | if (dev->reload_fw) { //reload fw | |
da3355df | 12599 | dev->has_non_0x98_reg_access = 1; |
1197134c KM |
12600 | ssd_reg32_write(dev->ctrlp + SSD_RELOAD_FW_REG, SSD_RELOAD_FW); |
12601 | } | |
12602 | ||
12603 | /* unmap physical adress */ | |
12604 | if (dev->ctrlp) { | |
12605 | #ifdef LINUX_SUSE_OS | |
12606 | iounmap(dev->ctrlp); | |
12607 | #else | |
12608 | pci_iounmap(pdev, dev->ctrlp); | |
12609 | #endif | |
12610 | dev->ctrlp = NULL; | |
12611 | } | |
12612 | ||
12613 | if (dev->mmio_base) { | |
12614 | release_mem_region(dev->mmio_base, dev->mmio_len); | |
12615 | dev->mmio_base = 0; | |
12616 | } | |
12617 | ||
12618 | pci_disable_device(pdev); | |
12619 | ||
12620 | hio_warn("%s: suspend disk finish.\n", dev->name); | |
12621 | ||
12622 | return 0; | |
12623 | } | |
12624 | ||
12625 | ||
12626 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32)) | |
12627 | static int ssd_hio_resume(struct pci_dev *pdev) | |
12628 | { | |
12629 | #else | |
12630 | static int ssd_hio_resume(struct device *ddev) | |
12631 | { | |
12632 | struct pci_dev *pdev = to_pci_dev(ddev); | |
12633 | #endif | |
12634 | struct ssd_device *dev = NULL; | |
12635 | int ret = 0; | |
12636 | ||
12637 | if (!pdev ) { | |
12638 | ret = -EINVAL; | |
12639 | goto out; | |
12640 | } | |
12641 | ||
12642 | dev = pci_get_drvdata(pdev); | |
12643 | if (!dev) { | |
12644 | ret = -ENOMEM; | |
12645 | goto out_alloc_dev; | |
12646 | } | |
12647 | ||
12648 | hio_warn("%s: resume disk start.\n", dev->name); | |
12649 | ret = pci_enable_device(pdev); | |
12650 | if (ret) { | |
12651 | hio_warn("%s: can not enable device\n", dev->name); | |
12652 | goto out_enable_device; | |
12653 | } | |
12654 | ||
12655 | pci_set_master(pdev); | |
12656 | ||
12657 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31)) | |
12658 | ret = pci_set_dma_mask(pdev, DMA_64BIT_MASK); | |
12659 | #else | |
12660 | ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); | |
12661 | #endif | |
12662 | if (ret) { | |
12663 | hio_warn("%s: set dma mask: failed\n", dev->name); | |
12664 | goto out_set_dma_mask; | |
12665 | } | |
12666 | ||
12667 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31)) | |
12668 | ret = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK); | |
12669 | #else | |
12670 | ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); | |
12671 | #endif | |
12672 | if (ret) { | |
12673 | hio_warn("%s: set consistent dma mask: failed\n", dev->name); | |
12674 | goto out_set_dma_mask; | |
12675 | } | |
12676 | ||
12677 | dev->mmio_base = pci_resource_start(pdev, 0); | |
12678 | dev->mmio_len = pci_resource_len(pdev, 0); | |
12679 | ||
12680 | if (!request_mem_region(dev->mmio_base, dev->mmio_len, SSD_DEV_NAME)) { | |
12681 | hio_warn("%s: can not reserve MMIO region 0\n", dev->name); | |
12682 | ret = -EBUSY; | |
12683 | goto out_request_mem_region; | |
12684 | } | |
12685 | ||
12686 | /* 2.6.9 kernel bug */ | |
12687 | dev->ctrlp = pci_iomap(pdev, 0, 0); | |
12688 | if (!dev->ctrlp) { | |
12689 | hio_warn("%s: can not remap IO region 0\n", dev->name); | |
12690 | ret = -ENOMEM; | |
12691 | goto out_pci_iomap; | |
12692 | } | |
12693 | ||
12694 | ret = ssd_check_hw(dev); | |
12695 | if (ret) { | |
12696 | hio_err("%s: check hardware failed\n", dev->name); | |
12697 | goto out_check_hw; | |
12698 | } | |
12699 | ||
12700 | /* alarm led ? */ | |
12701 | ssd_clear_alarm(dev); | |
12702 | ||
12703 | ret = ssd_init_fw_info(dev); | |
12704 | if (ret) { | |
12705 | hio_err("%s: init firmware info failed\n", dev->name); | |
12706 | /* alarm led */ | |
12707 | ssd_set_alarm(dev); | |
12708 | goto out_init_fw_info; | |
12709 | } | |
12710 | ||
12711 | /* slave port ? */ | |
12712 | if (dev->slave) { | |
12713 | goto init_next1; | |
12714 | } | |
12715 | ||
12716 | ret = ssd_init_rom_info(dev); | |
12717 | if (ret) { | |
12718 | hio_err("%s: init rom info failed\n", dev->name); | |
12719 | /* alarm led */ | |
12720 | ssd_set_alarm(dev); | |
12721 | goto out_init_rom_info; | |
12722 | } | |
12723 | ||
12724 | ret = ssd_init_label(dev); | |
12725 | if (ret) { | |
12726 | hio_err("%s: init label failed\n", dev->name); | |
12727 | /* alarm led */ | |
12728 | ssd_set_alarm(dev); | |
12729 | goto out_init_label; | |
12730 | } | |
12731 | ||
12732 | ret = ssd_init_workq(dev); | |
12733 | if (ret) { | |
12734 | hio_warn("%s: init workq failed\n", dev->name); | |
12735 | goto out_init_workq; | |
12736 | } | |
12737 | (void)test_and_set_bit(SSD_INIT_WORKQ, &dev->state); | |
12738 | ||
12739 | ret = ssd_init_log(dev); | |
12740 | if (ret) { | |
12741 | hio_err("%s: init log failed\n", dev->name); | |
12742 | /* alarm led */ | |
12743 | ssd_set_alarm(dev); | |
12744 | goto out_init_log; | |
12745 | } | |
12746 | ||
12747 | ret = ssd_init_smart(dev); | |
12748 | if (ret) { | |
12749 | hio_err("%s: init info failed\n", dev->name); | |
12750 | /* alarm led */ | |
12751 | ssd_set_alarm(dev); | |
12752 | goto out_init_smart; | |
12753 | } | |
12754 | ||
12755 | init_next1: | |
12756 | ret = ssd_init_hw_info(dev); | |
12757 | if (ret) { | |
12758 | hio_err("%s: init hardware info failed\n", dev->name); | |
12759 | /* alarm led */ | |
12760 | ssd_set_alarm(dev); | |
12761 | goto out_init_hw_info; | |
12762 | } | |
12763 | ||
12764 | /* slave port ? */ | |
12765 | if (dev->slave) { | |
12766 | goto init_next2; | |
12767 | } | |
12768 | ||
12769 | ret = ssd_init_sensor(dev); | |
12770 | if (ret) { | |
12771 | hio_err("%s: init sensor failed\n", dev->name); | |
12772 | /* alarm led */ | |
12773 | ssd_set_alarm(dev); | |
12774 | goto out_init_sensor; | |
12775 | } | |
12776 | ||
12777 | ret = ssd_init_pl_cap(dev); | |
12778 | if (ret) { | |
12779 | hio_err("%s: int pl_cap failed\n", dev->name); | |
12780 | /* alarm led */ | |
12781 | ssd_set_alarm(dev); | |
12782 | goto out_init_pl_cap; | |
12783 | } | |
12784 | ||
12785 | init_next2: | |
12786 | ret = ssd_check_init_state(dev); | |
12787 | if (ret) { | |
12788 | hio_err("%s: check init state failed\n", dev->name); | |
12789 | /* alarm led */ | |
12790 | ssd_set_alarm(dev); | |
12791 | goto out_check_init_state; | |
12792 | } | |
12793 | ||
12794 | //flush all base pointer to ssd | |
12795 | (void)ssd_reload_ssd_ptr(dev); | |
12796 | ||
12797 | ret = ssd_init_irq(dev); | |
12798 | if (ret) { | |
12799 | hio_warn("%s: init irq failed\n", dev->name); | |
12800 | goto out_init_irq; | |
12801 | } | |
12802 | ||
12803 | ret = ssd_init_thread(dev); | |
12804 | if (ret) { | |
12805 | hio_warn("%s: init thread failed\n", dev->name); | |
12806 | goto out_init_thread; | |
12807 | } | |
12808 | ||
12809 | /* */ | |
12810 | (void)test_and_set_bit(SSD_ONLINE, &dev->state); | |
12811 | ||
12812 | /* slave port ? */ | |
12813 | if (dev->slave) { | |
12814 | goto init_next3; | |
12815 | } | |
12816 | ||
12817 | ret = ssd_init_ot_protect(dev); | |
12818 | if (ret) { | |
12819 | hio_err("%s: int ot_protect failed\n", dev->name); | |
12820 | /* alarm led */ | |
12821 | ssd_set_alarm(dev); | |
12822 | goto out_int_ot_protect; | |
12823 | } | |
12824 | ||
12825 | ret = ssd_init_wmode(dev); | |
12826 | if (ret) { | |
12827 | hio_warn("%s: init write mode\n", dev->name); | |
12828 | goto out_init_wmode; | |
12829 | } | |
12830 | ||
12831 | /* init routine after hw is ready */ | |
12832 | ret = ssd_init_routine(dev); | |
12833 | if (ret) { | |
12834 | hio_warn("%s: init routine\n", dev->name); | |
12835 | goto out_init_routine; | |
12836 | } | |
12837 | ||
12838 | init_next3: | |
12839 | (void)test_and_set_bit(SSD_INIT_BD, &dev->state); | |
12840 | ||
12841 | dev->save_md = 1; | |
12842 | ||
12843 | hio_warn("%s: resume disk finish.\n", dev->name); | |
12844 | ||
12845 | return 0; | |
12846 | ||
12847 | out_init_routine: | |
12848 | out_init_wmode: | |
12849 | out_int_ot_protect: | |
12850 | ssd_cleanup_thread(dev); | |
12851 | out_init_thread: | |
12852 | ssd_free_irq(dev); | |
12853 | out_init_irq: | |
12854 | out_check_init_state: | |
12855 | out_init_pl_cap: | |
12856 | out_init_sensor: | |
12857 | out_init_hw_info: | |
12858 | out_init_smart: | |
12859 | /* slave port ? */ | |
12860 | if (!dev->slave) { | |
12861 | ssd_cleanup_log(dev); | |
12862 | } | |
12863 | out_init_log: | |
12864 | /* slave port ? */ | |
12865 | if (!dev->slave) { | |
12866 | test_and_clear_bit(SSD_INIT_WORKQ, &dev->state); | |
12867 | ssd_cleanup_workq(dev); | |
12868 | } | |
12869 | out_init_workq: | |
12870 | out_init_label: | |
12871 | out_init_rom_info: | |
12872 | out_init_fw_info: | |
12873 | out_check_hw: | |
12874 | #ifdef LINUX_SUSE_OS | |
12875 | iounmap(dev->ctrlp); | |
12876 | #else | |
12877 | pci_iounmap(pdev, dev->ctrlp); | |
12878 | #endif | |
12879 | out_pci_iomap: | |
12880 | release_mem_region(dev->mmio_base, dev->mmio_len); | |
12881 | out_request_mem_region: | |
12882 | out_set_dma_mask: | |
12883 | pci_disable_device(pdev); | |
12884 | out_enable_device: | |
12885 | out_alloc_dev: | |
12886 | out: | |
12887 | ||
12888 | hio_warn("%s: resume disk fail.\n", dev->name); | |
12889 | ||
12890 | return ret; | |
12891 | } | |
12892 | ||
12893 | MODULE_DEVICE_TABLE(pci, ssd_pci_tbl); | |
12894 | ||
12895 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32)) | |
12896 | #else | |
12897 | SIMPLE_DEV_PM_OPS(hio_pm_ops, ssd_hio_suspend, ssd_hio_resume); | |
12898 | #endif | |
12899 | ||
12900 | MODULE_DEVICE_TABLE(pci, ssd_pci_tbl); | |
12901 | struct pci_driver ssd_driver = { | |
12902 | .name = MODULE_NAME, | |
12903 | .id_table = ssd_pci_tbl, | |
12904 | .probe = ssd_init_one, | |
12905 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38)) | |
12906 | .remove = __devexit_p(ssd_remove_one), | |
361ebed5 HSDT |
12907 | #else |
12908 | .remove = ssd_remove_one, | |
12909 | #endif | |
1197134c KM |
12910 | |
12911 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32)) | |
12912 | .suspend = ssd_hio_suspend, | |
12913 | .resume = ssd_hio_resume, | |
12914 | #else | |
12915 | .driver = { | |
12916 | .pm = &hio_pm_ops, | |
12917 | }, | |
12918 | #endif | |
361ebed5 HSDT |
12919 | }; |
12920 | ||
12921 | /* notifier block to get a notify on system shutdown/halt/reboot */ | |
12922 | static int ssd_notify_reboot(struct notifier_block *nb, unsigned long event, void *buf) | |
12923 | { | |
12924 | struct ssd_device *dev = NULL; | |
12925 | struct ssd_device *n = NULL; | |
12926 | ||
12927 | list_for_each_entry_safe(dev, n, &ssd_list, list) { | |
12928 | ssd_gen_swlog(dev, SSD_LOG_POWER_OFF, 0); | |
12929 | ||
12930 | (void)ssd_flush(dev); | |
12931 | (void)ssd_save_md(dev); | |
12932 | ||
12933 | /* slave port ? */ | |
12934 | if (!dev->slave) { | |
12935 | ssd_save_smart(dev); | |
12936 | ||
12937 | ssd_stop_workq(dev); | |
12938 | ||
12939 | if (dev->reload_fw) { | |
da3355df | 12940 | dev->has_non_0x98_reg_access = 1; |
361ebed5 HSDT |
12941 | ssd_reg32_write(dev->ctrlp + SSD_RELOAD_FW_REG, SSD_RELOAD_FW); |
12942 | } | |
12943 | } | |
12944 | } | |
12945 | ||
12946 | return NOTIFY_OK; | |
12947 | } | |
12948 | ||
12949 | static struct notifier_block ssd_notifier = { | |
12950 | ssd_notify_reboot, NULL, 0 | |
12951 | }; | |
12952 | ||
12953 | static int __init ssd_init_module(void) | |
12954 | { | |
12955 | int ret = 0; | |
12956 | ||
12957 | hio_info("driver version: %s\n", DRIVER_VERSION); | |
12958 | ||
12959 | ret = ssd_init_index(); | |
12960 | if (ret) { | |
12961 | hio_warn("init index failed\n"); | |
12962 | goto out_init_index; | |
12963 | } | |
12964 | ||
12965 | ret = ssd_init_proc(); | |
12966 | if (ret) { | |
12967 | hio_warn("init proc failed\n"); | |
12968 | goto out_init_proc; | |
12969 | } | |
12970 | ||
12971 | ret = ssd_init_sysfs(); | |
12972 | if (ret) { | |
12973 | hio_warn("init sysfs failed\n"); | |
12974 | goto out_init_sysfs; | |
12975 | } | |
12976 | ||
12977 | ret = ssd_init_tasklet(); | |
12978 | if (ret) { | |
12979 | hio_warn("init tasklet failed\n"); | |
12980 | goto out_init_tasklet; | |
12981 | } | |
12982 | ||
12983 | #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,12)) | |
12984 | ssd_class = class_simple_create(THIS_MODULE, SSD_DEV_NAME); | |
12985 | #else | |
12986 | ssd_class = class_create(THIS_MODULE, SSD_DEV_NAME); | |
12987 | #endif | |
12988 | if (IS_ERR(ssd_class)) { | |
12989 | ret = PTR_ERR(ssd_class); | |
12990 | goto out_class_create; | |
12991 | } | |
12992 | ||
12993 | if (ssd_cmajor > 0) { | |
12994 | ret = register_chrdev(ssd_cmajor, SSD_CDEV_NAME, &ssd_cfops); | |
12995 | } else { | |
12996 | ret = ssd_cmajor = register_chrdev(ssd_cmajor, SSD_CDEV_NAME, &ssd_cfops); | |
12997 | } | |
12998 | if (ret < 0) { | |
12999 | hio_warn("unable to register chardev major number\n"); | |
13000 | goto out_register_chardev; | |
13001 | } | |
13002 | ||
13003 | if (ssd_major > 0) { | |
13004 | ret = register_blkdev(ssd_major, SSD_DEV_NAME); | |
13005 | } else { | |
13006 | ret = ssd_major = register_blkdev(ssd_major, SSD_DEV_NAME); | |
13007 | } | |
13008 | if (ret < 0) { | |
13009 | hio_warn("unable to register major number\n"); | |
13010 | goto out_register_blkdev; | |
13011 | } | |
13012 | ||
13013 | if (ssd_major_sl > 0) { | |
13014 | ret = register_blkdev(ssd_major_sl, SSD_SDEV_NAME); | |
13015 | } else { | |
13016 | ret = ssd_major_sl = register_blkdev(ssd_major_sl, SSD_SDEV_NAME); | |
13017 | } | |
13018 | if (ret < 0) { | |
13019 | hio_warn("unable to register slave major number\n"); | |
13020 | goto out_register_blkdev_sl; | |
13021 | } | |
13022 | ||
13023 | if (mode < SSD_DRV_MODE_STANDARD || mode > SSD_DRV_MODE_BASE) { | |
13024 | mode = SSD_DRV_MODE_STANDARD; | |
13025 | } | |
13026 | ||
13027 | /* for debug */ | |
13028 | if (mode != SSD_DRV_MODE_STANDARD) { | |
13029 | ssd_minors = 1; | |
13030 | } | |
13031 | ||
13032 | if (int_mode < SSD_INT_LEGACY || int_mode > SSD_INT_MSIX) { | |
13033 | int_mode = SSD_INT_MODE_DEFAULT; | |
13034 | } | |
13035 | ||
13036 | if (threaded_irq) { | |
13037 | int_mode = SSD_INT_MSI; | |
13038 | } | |
13039 | ||
13040 | if (log_level >= SSD_LOG_NR_LEVEL || log_level < SSD_LOG_LEVEL_INFO) { | |
13041 | log_level = SSD_LOG_LEVEL_ERR; | |
13042 | } | |
13043 | ||
13044 | if (wmode < SSD_WMODE_BUFFER || wmode > SSD_WMODE_DEFAULT) { | |
13045 | wmode = SSD_WMODE_DEFAULT; | |
13046 | } | |
13047 | ||
13048 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)) | |
13049 | ret = pci_module_init(&ssd_driver); | |
13050 | #else | |
13051 | ret = pci_register_driver(&ssd_driver); | |
13052 | #endif | |
13053 | if (ret) { | |
13054 | hio_warn("pci init failed\n"); | |
13055 | goto out_pci_init; | |
13056 | } | |
13057 | ||
13058 | ret = register_reboot_notifier(&ssd_notifier); | |
13059 | if (ret) { | |
13060 | hio_warn("register reboot notifier failed\n"); | |
13061 | goto out_register_reboot_notifier; | |
13062 | } | |
13063 | ||
13064 | return 0; | |
13065 | ||
13066 | out_register_reboot_notifier: | |
13067 | out_pci_init: | |
13068 | pci_unregister_driver(&ssd_driver); | |
13069 | unregister_blkdev(ssd_major_sl, SSD_SDEV_NAME); | |
13070 | out_register_blkdev_sl: | |
13071 | unregister_blkdev(ssd_major, SSD_DEV_NAME); | |
13072 | out_register_blkdev: | |
13073 | unregister_chrdev(ssd_cmajor, SSD_CDEV_NAME); | |
13074 | out_register_chardev: | |
13075 | #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,12)) | |
13076 | class_simple_destroy(ssd_class); | |
13077 | #else | |
13078 | class_destroy(ssd_class); | |
13079 | #endif | |
13080 | out_class_create: | |
13081 | ssd_cleanup_tasklet(); | |
13082 | out_init_tasklet: | |
13083 | ssd_cleanup_sysfs(); | |
13084 | out_init_sysfs: | |
13085 | ssd_cleanup_proc(); | |
13086 | out_init_proc: | |
13087 | ssd_cleanup_index(); | |
13088 | out_init_index: | |
13089 | return ret; | |
13090 | ||
13091 | } | |
13092 | ||
13093 | static void __exit ssd_cleanup_module(void) | |
13094 | { | |
13095 | ||
13096 | hio_info("unload driver: %s\n", DRIVER_VERSION); | |
13097 | /* exiting */ | |
13098 | ssd_exiting = 1; | |
13099 | ||
13100 | unregister_reboot_notifier(&ssd_notifier); | |
13101 | ||
13102 | pci_unregister_driver(&ssd_driver); | |
13103 | ||
13104 | unregister_blkdev(ssd_major_sl, SSD_SDEV_NAME); | |
13105 | unregister_blkdev(ssd_major, SSD_DEV_NAME); | |
13106 | unregister_chrdev(ssd_cmajor, SSD_CDEV_NAME); | |
13107 | #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,12)) | |
13108 | class_simple_destroy(ssd_class); | |
13109 | #else | |
13110 | class_destroy(ssd_class); | |
13111 | #endif | |
13112 | ||
13113 | ssd_cleanup_tasklet(); | |
13114 | ssd_cleanup_sysfs(); | |
13115 | ssd_cleanup_proc(); | |
13116 | ssd_cleanup_index(); | |
13117 | } | |
13118 | ||
13119 | int ssd_register_event_notifier(struct block_device *bdev, ssd_event_call event_call) | |
13120 | { | |
13121 | struct ssd_device *dev; | |
13122 | struct timeval tv; | |
1197134c | 13123 | struct ssd_log *le, *temp_le = NULL; |
361ebed5 | 13124 | uint64_t cur; |
1197134c | 13125 | int temp = 0; |
361ebed5 HSDT |
13126 | int log_nr; |
13127 | ||
13128 | if (!bdev || !event_call || !(bdev->bd_disk)) { | |
13129 | return -EINVAL; | |
13130 | } | |
13131 | ||
13132 | dev = bdev->bd_disk->private_data; | |
13133 | dev->event_call = event_call; | |
13134 | ||
13135 | do_gettimeofday(&tv); | |
13136 | cur = tv.tv_sec; | |
13137 | ||
13138 | le = (struct ssd_log *)(dev->internal_log.log); | |
13139 | log_nr = dev->internal_log.nr_log; | |
13140 | ||
13141 | while (log_nr--) { | |
13142 | if (le->time <= cur && le->time >= dev->uptime) { | |
1197134c KM |
13143 | if ((le->le.event == SSD_LOG_SEU_FAULT1) && (le->time < dev->reset_time)) { |
13144 | le++; | |
13145 | continue; | |
13146 | } | |
13147 | if (le->le.event == SSD_LOG_OVER_TEMP || le->le.event == SSD_LOG_NORMAL_TEMP || le->le.event == SSD_LOG_WARN_TEMP) { | |
13148 | if (!temp_le || le->time >= temp_le->time) { | |
13149 | temp_le = le; | |
13150 | } | |
13151 | le++; | |
13152 | continue; | |
13153 | } | |
361ebed5 HSDT |
13154 | (void)dev->event_call(dev->gd, le->le.event, ssd_parse_log(dev, le, 0)); |
13155 | } | |
13156 | le++; | |
13157 | } | |
13158 | ||
1197134c KM |
13159 | ssd_get_temperature(bdev, &temp); |
13160 | if (temp_le && (temp >= SSD_OT_TEMP_HYST)) { | |
13161 | (void)dev->event_call(dev->gd, temp_le->le.event, ssd_parse_log(dev, temp_le, 0)); | |
13162 | } | |
13163 | ||
361ebed5 HSDT |
13164 | return 0; |
13165 | } | |
13166 | ||
13167 | int ssd_unregister_event_notifier(struct block_device *bdev) | |
13168 | { | |
13169 | struct ssd_device *dev; | |
13170 | ||
13171 | if (!bdev || !(bdev->bd_disk)) { | |
13172 | return -EINVAL; | |
13173 | } | |
13174 | ||
13175 | dev = bdev->bd_disk->private_data; | |
13176 | dev->event_call = NULL; | |
13177 | ||
13178 | return 0; | |
13179 | } | |
13180 | ||
13181 | EXPORT_SYMBOL(ssd_get_label); | |
13182 | EXPORT_SYMBOL(ssd_get_version); | |
13183 | EXPORT_SYMBOL(ssd_set_otprotect); | |
13184 | EXPORT_SYMBOL(ssd_bm_status); | |
13185 | EXPORT_SYMBOL(ssd_submit_pbio); | |
13186 | EXPORT_SYMBOL(ssd_get_pciaddr); | |
13187 | EXPORT_SYMBOL(ssd_get_temperature); | |
13188 | EXPORT_SYMBOL(ssd_register_event_notifier); | |
13189 | EXPORT_SYMBOL(ssd_unregister_event_notifier); | |
13190 | EXPORT_SYMBOL(ssd_reset); | |
13191 | EXPORT_SYMBOL(ssd_set_wmode); | |
13192 | ||
13193 | ||
13194 | ||
13195 | module_init(ssd_init_module); | |
13196 | module_exit(ssd_cleanup_module); | |
13197 | MODULE_VERSION(DRIVER_VERSION); | |
13198 | MODULE_LICENSE("GPL"); | |
13199 | MODULE_AUTHOR("Huawei SSD DEV Team"); | |
13200 | MODULE_DESCRIPTION("Huawei SSD driver"); |