]>
Commit | Line | Data |
---|---|---|
361ebed5 HSDT |
1 | /* |
2 | * Huawei SSD device driver | |
3 | * Copyright (c) 2016, Huawei Technologies Co., Ltd. | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify it | |
6 | * under the terms and conditions of the GNU General Public License, | |
7 | * version 2, as published by the Free Software Foundation. | |
8 | * | |
9 | * This program is distributed in the hope it will be useful, but WITHOUT | |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
12 | * more details. | |
13 | */ | |
b44043bd | 14 | |
361ebed5 HSDT |
15 | #ifndef LINUX_VERSION_CODE |
16 | #include <linux/version.h> | |
17 | #endif | |
18 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16)) | |
19 | #include <linux/config.h> | |
20 | #endif | |
21 | #include <linux/types.h> | |
22 | #include <linux/kernel.h> | |
23 | #include <linux/module.h> | |
24 | #include <linux/bio.h> | |
25 | #include <linux/timer.h> | |
26 | #include <linux/init.h> | |
27 | #include <linux/pci.h> | |
28 | #include <linux/slab.h> | |
29 | #include <linux/spinlock.h> | |
30 | #include <linux/blkdev.h> | |
31 | #include <linux/sched.h> | |
32 | #include <linux/fcntl.h> | |
33 | #include <linux/interrupt.h> | |
34 | #include <linux/compiler.h> | |
35 | #include <linux/bitops.h> | |
36 | #include <linux/delay.h> | |
37 | #include <linux/time.h> | |
38 | #include <linux/stat.h> | |
39 | #include <linux/fs.h> | |
40 | #include <linux/dma-mapping.h> | |
41 | #include <linux/completion.h> | |
42 | #include <linux/workqueue.h> | |
43 | #include <linux/mm.h> | |
44 | #include <linux/ioctl.h> | |
45 | #include <linux/hdreg.h> /* HDIO_GETGEO */ | |
46 | #include <linux/list.h> | |
47 | #include <linux/reboot.h> | |
48 | #include <linux/kthread.h> | |
49 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0)) | |
50 | #include <linux/seq_file.h> | |
51 | #endif | |
52 | #include <asm/uaccess.h> | |
53 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,2,0)) | |
54 | #include <linux/scatterlist.h> | |
55 | #include <linux/vmalloc.h> | |
56 | #else | |
57 | #include <asm/scatterlist.h> | |
58 | #endif | |
59 | #include <asm/io.h> | |
60 | #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,17)) | |
61 | #include <linux/devfs_fs_kernel.h> | |
62 | #endif | |
63 | ||
64 | /* driver */ | |
65 | #define MODULE_NAME "hio" | |
da3355df | 66 | #define DRIVER_VERSION "2.1.0.40" |
361ebed5 HSDT |
67 | #define DRIVER_VERSION_LEN 16 |
68 | ||
69 | #define SSD_FW_MIN 0x1 | |
70 | ||
71 | #define SSD_DEV_NAME MODULE_NAME | |
72 | #define SSD_DEV_NAME_LEN 16 | |
73 | #define SSD_CDEV_NAME "c"SSD_DEV_NAME | |
74 | #define SSD_SDEV_NAME "s"SSD_DEV_NAME | |
75 | ||
76 | ||
77 | #define SSD_CMAJOR 0 | |
78 | #define SSD_MAJOR 0 | |
79 | #define SSD_MAJOR_SL 0 | |
80 | #define SSD_MINORS 16 | |
81 | ||
82 | #define SSD_MAX_DEV 702 | |
83 | #define SSD_ALPHABET_NUM 26 | |
84 | ||
85 | #define hio_info(f, arg...) printk(KERN_INFO MODULE_NAME"info: " f , ## arg) | |
86 | #define hio_note(f, arg...) printk(KERN_NOTICE MODULE_NAME"note: " f , ## arg) | |
87 | #define hio_warn(f, arg...) printk(KERN_WARNING MODULE_NAME"warn: " f , ## arg) | |
88 | #define hio_err(f, arg...) printk(KERN_ERR MODULE_NAME"err: " f , ## arg) | |
89 | ||
90 | /* slave port */ | |
91 | #define SSD_SLAVE_PORT_DEVID 0x000a | |
92 | ||
93 | /* int mode */ | |
94 | ||
95 | /* 2.6.9 msi affinity bug, should turn msi & msi-x off */ | |
96 | //#define SSD_MSI | |
97 | #define SSD_ESCAPE_IRQ | |
98 | ||
99 | //#define SSD_MSIX | |
100 | #ifndef MODULE | |
101 | #define SSD_MSIX | |
102 | #endif | |
103 | #define SSD_MSIX_VEC 8 | |
104 | #ifdef SSD_MSIX | |
105 | #undef SSD_MSI | |
da3355df | 106 | #undef SSD_ESCAPE_IRQ |
361ebed5 HSDT |
107 | #define SSD_MSIX_AFFINITY_FORCE |
108 | #endif | |
109 | ||
110 | #define SSD_TRIM | |
111 | ||
112 | /* Over temperature protect */ | |
113 | #define SSD_OT_PROTECT | |
114 | ||
115 | #ifdef SSD_QUEUE_PBIO | |
116 | #define BIO_SSD_PBIO 20 | |
117 | #endif | |
118 | ||
119 | /* debug */ | |
120 | //#define SSD_DEBUG_ERR | |
121 | ||
122 | /* cmd timer */ | |
123 | #define SSD_CMD_TIMEOUT (60*HZ) | |
124 | ||
125 | /* i2c & smbus */ | |
126 | #define SSD_SPI_TIMEOUT (5*HZ) | |
127 | #define SSD_I2C_TIMEOUT (5*HZ) | |
128 | ||
129 | #define SSD_I2C_MAX_DATA (127) | |
130 | #define SSD_SMBUS_BLOCK_MAX (32) | |
131 | #define SSD_SMBUS_DATA_MAX (SSD_SMBUS_BLOCK_MAX + 2) | |
132 | ||
133 | /* wait for init */ | |
134 | #define SSD_INIT_WAIT (1000) //1s | |
135 | #define SSD_CONTROLLER_WAIT (20*1000/SSD_INIT_WAIT) //20s | |
136 | #define SSD_INIT_MAX_WAIT (500*1000/SSD_INIT_WAIT) //500s | |
137 | #define SSD_INIT_MAX_WAIT_V3_2 (1400*1000/SSD_INIT_WAIT) //1400s | |
138 | #define SSD_RAM_INIT_MAX_WAIT (10*1000/SSD_INIT_WAIT) //10s | |
139 | #define SSD_CH_INFO_MAX_WAIT (10*1000/SSD_INIT_WAIT) //10s | |
140 | ||
141 | /* blkdev busy wait */ | |
142 | #define SSD_DEV_BUSY_WAIT 1000 //ms | |
143 | #define SSD_DEV_BUSY_MAX_WAIT (8*1000/SSD_DEV_BUSY_WAIT) //8s | |
144 | ||
145 | /* smbus retry */ | |
146 | #define SSD_SMBUS_RETRY_INTERVAL (5) //ms | |
147 | #define SSD_SMBUS_RETRY_MAX (1000/SSD_SMBUS_RETRY_INTERVAL) | |
148 | ||
149 | #define SSD_BM_RETRY_MAX 7 | |
150 | ||
151 | /* bm routine interval */ | |
152 | #define SSD_BM_CAP_LEARNING_DELAY (10*60*1000) | |
153 | ||
154 | /* routine interval */ | |
155 | #define SSD_ROUTINE_INTERVAL (10*1000) //10s | |
156 | #define SSD_HWMON_ROUTINE_TICK (60*1000/SSD_ROUTINE_INTERVAL) | |
157 | #define SSD_CAPMON_ROUTINE_TICK ((3600*1000/SSD_ROUTINE_INTERVAL)*24*30) | |
158 | #define SSD_CAPMON2_ROUTINE_TICK (10*60*1000/SSD_ROUTINE_INTERVAL) //fault recover | |
159 | ||
160 | /* dma align */ | |
161 | #define SSD_DMA_ALIGN (16) | |
162 | ||
163 | /* some hw defalut */ | |
164 | #define SSD_LOG_MAX_SZ 4096 | |
165 | ||
166 | #define SSD_NAND_OOB_SZ 1024 | |
167 | #define SSD_NAND_ID_SZ 8 | |
168 | #define SSD_NAND_ID_BUFF_SZ 1024 | |
169 | #define SSD_NAND_MAX_CE 2 | |
170 | ||
171 | #define SSD_BBT_RESERVED 8 | |
172 | ||
173 | #define SSD_ECC_MAX_FLIP (64+1) | |
174 | ||
175 | #define SSD_RAM_ALIGN 16 | |
176 | ||
177 | ||
178 | #define SSD_RELOAD_FLAG 0x3333CCCC | |
179 | #define SSD_RELOAD_FW 0xAA5555AA | |
180 | #define SSD_RESET_NOINIT 0xAA5555AA | |
181 | #define SSD_RESET 0x55AAAA55 | |
182 | #define SSD_RESET_FULL 0x5A | |
183 | //#define SSD_RESET_WAIT 1000 //1s | |
184 | //#define SSD_RESET_MAX_WAIT (200*1000/SSD_RESET_WAIT) //200s | |
185 | ||
186 | ||
187 | /* reverion 1 */ | |
188 | #define SSD_PROTOCOL_V1 0x0 | |
189 | ||
190 | #define SSD_ROM_SIZE (16*1024*1024) | |
191 | #define SSD_ROM_BLK_SIZE (256*1024) | |
192 | #define SSD_ROM_PAGE_SIZE (256) | |
193 | #define SSD_ROM_NR_BRIDGE_FW 2 | |
194 | #define SSD_ROM_NR_CTRL_FW 2 | |
195 | #define SSD_ROM_BRIDGE_FW_BASE 0 | |
196 | #define SSD_ROM_BRIDGE_FW_SIZE (2*1024*1024) | |
197 | #define SSD_ROM_CTRL_FW_BASE (SSD_ROM_NR_BRIDGE_FW*SSD_ROM_BRIDGE_FW_SIZE) | |
198 | #define SSD_ROM_CTRL_FW_SIZE (5*1024*1024) | |
199 | #define SSD_ROM_LABEL_BASE (SSD_ROM_CTRL_FW_BASE+SSD_ROM_CTRL_FW_SIZE*SSD_ROM_NR_CTRL_FW) | |
200 | #define SSD_ROM_VP_BASE (SSD_ROM_LABEL_BASE+SSD_ROM_BLK_SIZE) | |
201 | ||
202 | /* reverion 3 */ | |
203 | #define SSD_PROTOCOL_V3 0x3000000 | |
204 | #define SSD_PROTOCOL_V3_1_1 0x3010001 | |
205 | #define SSD_PROTOCOL_V3_1_3 0x3010003 | |
206 | #define SSD_PROTOCOL_V3_2 0x3020000 | |
207 | #define SSD_PROTOCOL_V3_2_1 0x3020001 /* <4KB improved */ | |
208 | #define SSD_PROTOCOL_V3_2_2 0x3020002 /* ot protect */ | |
209 | #define SSD_PROTOCOL_V3_2_4 0x3020004 | |
210 | ||
211 | ||
212 | #define SSD_PV3_ROM_NR_BM_FW 1 | |
213 | #define SSD_PV3_ROM_BM_FW_SZ (64*1024*8) | |
214 | ||
215 | #define SSD_ROM_LOG_SZ (64*1024*4) | |
216 | ||
217 | #define SSD_ROM_NR_SMART_MAX 2 | |
218 | #define SSD_PV3_ROM_NR_SMART SSD_ROM_NR_SMART_MAX | |
219 | #define SSD_PV3_ROM_SMART_SZ (64*1024) | |
220 | ||
221 | /* reverion 3.2 */ | |
222 | #define SSD_PV3_2_ROM_LOG_SZ (64*1024*80) /* 5MB */ | |
223 | #define SSD_PV3_2_ROM_SEC_SZ (256*1024) /* 256KB */ | |
224 | ||
225 | ||
226 | /* register */ | |
227 | #define SSD_REQ_FIFO_REG 0x0000 | |
228 | #define SSD_RESP_FIFO_REG 0x0008 //0x0010 | |
229 | #define SSD_RESP_PTR_REG 0x0010 //0x0018 | |
230 | #define SSD_INTR_INTERVAL_REG 0x0018 | |
231 | #define SSD_READY_REG 0x001C | |
232 | #define SSD_BRIDGE_TEST_REG 0x0020 | |
233 | #define SSD_STRIPE_SIZE_REG 0x0028 | |
234 | #define SSD_CTRL_VER_REG 0x0030 //controller | |
235 | #define SSD_BRIDGE_VER_REG 0x0034 //bridge | |
236 | #define SSD_PCB_VER_REG 0x0038 | |
237 | #define SSD_BURN_FLAG_REG 0x0040 | |
238 | #define SSD_BRIDGE_INFO_REG 0x0044 | |
239 | ||
240 | #define SSD_WL_VAL_REG 0x0048 //32-bit | |
241 | ||
242 | #define SSD_BB_INFO_REG 0x004C | |
243 | ||
244 | #define SSD_ECC_TEST_REG 0x0050 //test only | |
245 | #define SSD_ERASE_TEST_REG 0x0058 //test only | |
246 | #define SSD_WRITE_TEST_REG 0x0060 //test only | |
247 | ||
248 | #define SSD_RESET_REG 0x0068 | |
249 | #define SSD_RELOAD_FW_REG 0x0070 | |
250 | ||
251 | #define SSD_RESERVED_BLKS_REG 0x0074 | |
252 | #define SSD_VALID_PAGES_REG 0x0078 | |
253 | #define SSD_CH_INFO_REG 0x007C | |
254 | ||
255 | #define SSD_CTRL_TEST_REG_SZ 0x8 | |
256 | #define SSD_CTRL_TEST_REG0 0x0080 | |
257 | #define SSD_CTRL_TEST_REG1 0x0088 | |
258 | #define SSD_CTRL_TEST_REG2 0x0090 | |
259 | #define SSD_CTRL_TEST_REG3 0x0098 | |
260 | #define SSD_CTRL_TEST_REG4 0x00A0 | |
261 | #define SSD_CTRL_TEST_REG5 0x00A8 | |
262 | #define SSD_CTRL_TEST_REG6 0x00B0 | |
263 | #define SSD_CTRL_TEST_REG7 0x00B8 | |
264 | ||
265 | #define SSD_FLASH_INFO_REG0 0x00C0 | |
266 | #define SSD_FLASH_INFO_REG1 0x00C8 | |
267 | #define SSD_FLASH_INFO_REG2 0x00D0 | |
268 | #define SSD_FLASH_INFO_REG3 0x00D8 | |
269 | #define SSD_FLASH_INFO_REG4 0x00E0 | |
270 | #define SSD_FLASH_INFO_REG5 0x00E8 | |
271 | #define SSD_FLASH_INFO_REG6 0x00F0 | |
272 | #define SSD_FLASH_INFO_REG7 0x00F8 | |
273 | ||
274 | #define SSD_RESP_INFO_REG 0x01B8 | |
275 | #define SSD_NAND_BUFF_BASE 0x01BC //for nand write | |
276 | ||
277 | #define SSD_CHIP_INFO_REG_SZ 0x10 | |
278 | #define SSD_CHIP_INFO_REG0 0x0100 //128 bit | |
279 | #define SSD_CHIP_INFO_REG1 0x0110 | |
280 | #define SSD_CHIP_INFO_REG2 0x0120 | |
281 | #define SSD_CHIP_INFO_REG3 0x0130 | |
282 | #define SSD_CHIP_INFO_REG4 0x0140 | |
283 | #define SSD_CHIP_INFO_REG5 0x0150 | |
284 | #define SSD_CHIP_INFO_REG6 0x0160 | |
285 | #define SSD_CHIP_INFO_REG7 0x0170 | |
286 | ||
287 | #define SSD_RAM_INFO_REG 0x01C4 | |
288 | ||
289 | #define SSD_BBT_BASE_REG 0x01C8 | |
290 | #define SSD_ECT_BASE_REG 0x01CC | |
291 | ||
292 | #define SSD_CLEAR_INTR_REG 0x01F0 | |
293 | ||
294 | #define SSD_INIT_STATE_REG_SZ 0x8 | |
295 | #define SSD_INIT_STATE_REG0 0x0200 | |
296 | #define SSD_INIT_STATE_REG1 0x0208 | |
297 | #define SSD_INIT_STATE_REG2 0x0210 | |
298 | #define SSD_INIT_STATE_REG3 0x0218 | |
299 | #define SSD_INIT_STATE_REG4 0x0220 | |
300 | #define SSD_INIT_STATE_REG5 0x0228 | |
301 | #define SSD_INIT_STATE_REG6 0x0230 | |
302 | #define SSD_INIT_STATE_REG7 0x0238 | |
303 | ||
304 | #define SSD_ROM_INFO_REG 0x0600 | |
305 | #define SSD_ROM_BRIDGE_FW_INFO_REG 0x0604 | |
306 | #define SSD_ROM_CTRL_FW_INFO_REG 0x0608 | |
307 | #define SSD_ROM_VP_INFO_REG 0x060C | |
308 | ||
309 | #define SSD_LOG_INFO_REG 0x0610 | |
310 | #define SSD_LED_REG 0x0614 | |
311 | #define SSD_MSG_BASE_REG 0x06F8 | |
312 | ||
313 | /*spi reg */ | |
314 | #define SSD_SPI_REG_CMD 0x0180 | |
315 | #define SSD_SPI_REG_CMD_HI 0x0184 | |
316 | #define SSD_SPI_REG_WDATA 0x0188 | |
317 | #define SSD_SPI_REG_ID 0x0190 | |
318 | #define SSD_SPI_REG_STATUS 0x0198 | |
319 | #define SSD_SPI_REG_RDATA 0x01A0 | |
320 | #define SSD_SPI_REG_READY 0x01A8 | |
321 | ||
322 | /* i2c register */ | |
323 | #define SSD_I2C_CTRL_REG 0x06F0 | |
324 | #define SSD_I2C_RDATA_REG 0x06F4 | |
325 | ||
326 | /* temperature reg */ | |
327 | #define SSD_BRIGE_TEMP_REG 0x0618 | |
328 | ||
329 | #define SSD_CTRL_TEMP_REG0 0x0700 | |
330 | #define SSD_CTRL_TEMP_REG1 0x0708 | |
331 | #define SSD_CTRL_TEMP_REG2 0x0710 | |
332 | #define SSD_CTRL_TEMP_REG3 0x0718 | |
333 | #define SSD_CTRL_TEMP_REG4 0x0720 | |
334 | #define SSD_CTRL_TEMP_REG5 0x0728 | |
335 | #define SSD_CTRL_TEMP_REG6 0x0730 | |
336 | #define SSD_CTRL_TEMP_REG7 0x0738 | |
337 | ||
338 | /* reversion 3 reg */ | |
339 | #define SSD_PROTOCOL_VER_REG 0x01B4 | |
340 | ||
341 | #define SSD_FLUSH_TIMEOUT_REG 0x02A4 | |
342 | #define SSD_BM_FAULT_REG 0x0660 | |
343 | ||
344 | #define SSD_PV3_RAM_STATUS_REG_SZ 0x4 | |
345 | #define SSD_PV3_RAM_STATUS_REG0 0x0260 | |
346 | #define SSD_PV3_RAM_STATUS_REG1 0x0264 | |
347 | #define SSD_PV3_RAM_STATUS_REG2 0x0268 | |
348 | #define SSD_PV3_RAM_STATUS_REG3 0x026C | |
349 | #define SSD_PV3_RAM_STATUS_REG4 0x0270 | |
350 | #define SSD_PV3_RAM_STATUS_REG5 0x0274 | |
351 | #define SSD_PV3_RAM_STATUS_REG6 0x0278 | |
352 | #define SSD_PV3_RAM_STATUS_REG7 0x027C | |
353 | ||
354 | #define SSD_PV3_CHIP_INFO_REG_SZ 0x40 | |
355 | #define SSD_PV3_CHIP_INFO_REG0 0x0300 | |
356 | #define SSD_PV3_CHIP_INFO_REG1 0x0340 | |
357 | #define SSD_PV3_CHIP_INFO_REG2 0x0380 | |
358 | #define SSD_PV3_CHIP_INFO_REG3 0x03B0 | |
359 | #define SSD_PV3_CHIP_INFO_REG4 0x0400 | |
360 | #define SSD_PV3_CHIP_INFO_REG5 0x0440 | |
361 | #define SSD_PV3_CHIP_INFO_REG6 0x0480 | |
362 | #define SSD_PV3_CHIP_INFO_REG7 0x04B0 | |
363 | ||
364 | #define SSD_PV3_INIT_STATE_REG_SZ 0x20 | |
365 | #define SSD_PV3_INIT_STATE_REG0 0x0500 | |
366 | #define SSD_PV3_INIT_STATE_REG1 0x0520 | |
367 | #define SSD_PV3_INIT_STATE_REG2 0x0540 | |
368 | #define SSD_PV3_INIT_STATE_REG3 0x0560 | |
369 | #define SSD_PV3_INIT_STATE_REG4 0x0580 | |
370 | #define SSD_PV3_INIT_STATE_REG5 0x05A0 | |
371 | #define SSD_PV3_INIT_STATE_REG6 0x05C0 | |
372 | #define SSD_PV3_INIT_STATE_REG7 0x05E0 | |
373 | ||
374 | /* reversion 3.1.1 reg */ | |
375 | #define SSD_FULL_RESET_REG 0x01B0 | |
376 | ||
377 | #define SSD_CTRL_REG_ZONE_SZ 0x800 | |
378 | ||
379 | #define SSD_BB_THRESHOLD_L1_REG 0x2C0 | |
380 | #define SSD_BB_THRESHOLD_L2_REG 0x2C4 | |
381 | ||
382 | #define SSD_BB_ACC_REG_SZ 0x4 | |
383 | #define SSD_BB_ACC_REG0 0x21C0 | |
384 | #define SSD_BB_ACC_REG1 0x29C0 | |
385 | #define SSD_BB_ACC_REG2 0x31C0 | |
386 | ||
387 | #define SSD_EC_THRESHOLD_L1_REG 0x2C8 | |
388 | #define SSD_EC_THRESHOLD_L2_REG 0x2CC | |
389 | ||
390 | #define SSD_EC_ACC_REG_SZ 0x4 | |
391 | #define SSD_EC_ACC_REG0 0x21E0 | |
392 | #define SSD_EC_ACC_REG1 0x29E0 | |
393 | #define SSD_EC_ACC_REG2 0x31E0 | |
394 | ||
395 | /* reversion 3.1.2 & 3.1.3 reg */ | |
396 | #define SSD_HW_STATUS_REG 0x02AC | |
397 | ||
398 | #define SSD_PLP_INFO_REG 0x0664 | |
399 | ||
400 | /*reversion 3.2 reg*/ | |
401 | #define SSD_POWER_ON_REG 0x01EC | |
402 | #define SSD_PCIE_LINKSTATUS_REG 0x01F8 | |
403 | #define SSD_PL_CAP_LEARN_REG 0x01FC | |
404 | ||
405 | #define SSD_FPGA_1V0_REG0 0x2070 | |
406 | #define SSD_FPGA_1V8_REG0 0x2078 | |
407 | #define SSD_FPGA_1V0_REG1 0x2870 | |
408 | #define SSD_FPGA_1V8_REG1 0x2878 | |
409 | ||
410 | /*reversion 3.2 reg*/ | |
411 | #define SSD_READ_OT_REG0 0x2260 | |
412 | #define SSD_WRITE_OT_REG0 0x2264 | |
413 | #define SSD_READ_OT_REG1 0x2A60 | |
414 | #define SSD_WRITE_OT_REG1 0x2A64 | |
415 | ||
416 | ||
417 | /* function */ | |
418 | #define SSD_FUNC_READ 0x01 | |
419 | #define SSD_FUNC_WRITE 0x02 | |
420 | #define SSD_FUNC_NAND_READ_WOOB 0x03 | |
421 | #define SSD_FUNC_NAND_READ 0x04 | |
422 | #define SSD_FUNC_NAND_WRITE 0x05 | |
423 | #define SSD_FUNC_NAND_ERASE 0x06 | |
424 | #define SSD_FUNC_NAND_READ_ID 0x07 | |
425 | #define SSD_FUNC_READ_LOG 0x08 | |
426 | #define SSD_FUNC_TRIM 0x09 | |
427 | #define SSD_FUNC_RAM_READ 0x10 | |
428 | #define SSD_FUNC_RAM_WRITE 0x11 | |
429 | #define SSD_FUNC_FLUSH 0x12 //cache / bbt | |
430 | ||
431 | /* spi function */ | |
432 | #define SSD_SPI_CMD_PROGRAM 0x02 | |
433 | #define SSD_SPI_CMD_READ 0x03 | |
434 | #define SSD_SPI_CMD_W_DISABLE 0x04 | |
435 | #define SSD_SPI_CMD_READ_STATUS 0x05 | |
436 | #define SSD_SPI_CMD_W_ENABLE 0x06 | |
437 | #define SSD_SPI_CMD_ERASE 0xd8 | |
438 | #define SSD_SPI_CMD_CLSR 0x30 | |
439 | #define SSD_SPI_CMD_READ_ID 0x9f | |
440 | ||
441 | /* i2c */ | |
442 | #define SSD_I2C_CTRL_READ 0x00 | |
443 | #define SSD_I2C_CTRL_WRITE 0x01 | |
444 | ||
445 | /* i2c internal register */ | |
446 | #define SSD_I2C_CFG_REG 0x00 | |
447 | #define SSD_I2C_DATA_REG 0x01 | |
448 | #define SSD_I2C_CMD_REG 0x02 | |
449 | #define SSD_I2C_STATUS_REG 0x03 | |
450 | #define SSD_I2C_SADDR_REG 0x04 | |
451 | #define SSD_I2C_LEN_REG 0x05 | |
452 | #define SSD_I2C_RLEN_REG 0x06 | |
453 | #define SSD_I2C_WLEN_REG 0x07 | |
454 | #define SSD_I2C_RESET_REG 0x08 //write for reset | |
455 | #define SSD_I2C_PRER_REG 0x09 | |
456 | ||
457 | ||
458 | /* hw mon */ | |
459 | /* FPGA volt = ADC_value / 4096 * 3v */ | |
460 | #define SSD_FPGA_1V0_ADC_MIN 1228 // 0.9v | |
461 | #define SSD_FPGA_1V0_ADC_MAX 1502 // 1.1v | |
462 | #define SSD_FPGA_1V8_ADC_MIN 2211 // 1.62v | |
463 | #define SSD_FPGA_1V8_ADC_MAX 2703 // 1.98 | |
464 | ||
465 | /* ADC value */ | |
466 | #define SSD_FPGA_VOLT_MAX(val) (((val) & 0xffff) >> 4) | |
467 | #define SSD_FPGA_VOLT_MIN(val) (((val >> 16) & 0xffff) >> 4) | |
468 | #define SSD_FPGA_VOLT_CUR(val) (((val >> 32) & 0xffff) >> 4) | |
469 | #define SSD_FPGA_VOLT(val) ((val * 3000) >> 12) | |
470 | ||
471 | #define SSD_VOLT_LOG_DATA(idx, ctrl, volt) (((uint32_t)idx << 24) | ((uint32_t)ctrl << 16) | ((uint32_t)volt)) | |
472 | ||
473 | enum ssd_fpga_volt | |
474 | { | |
475 | SSD_FPGA_1V0 = 0, | |
476 | SSD_FPGA_1V8, | |
477 | SSD_FPGA_VOLT_NR | |
478 | }; | |
479 | ||
480 | enum ssd_clock | |
481 | { | |
482 | SSD_CLOCK_166M_LOST = 0, | |
483 | SSD_CLOCK_166M_SKEW, | |
484 | SSD_CLOCK_156M_LOST, | |
485 | SSD_CLOCK_156M_SKEW, | |
486 | SSD_CLOCK_NR | |
487 | }; | |
488 | ||
489 | /* sensor */ | |
490 | #define SSD_SENSOR_LM75_SADDRESS (0x49 << 1) | |
491 | #define SSD_SENSOR_LM80_SADDRESS (0x28 << 1) | |
492 | ||
493 | #define SSD_SENSOR_CONVERT_TEMP(val) ((int)(val >> 8)) | |
494 | ||
495 | #define SSD_INLET_OT_TEMP (55) //55 DegC | |
496 | #define SSD_INLET_OT_HYST (50) //50 DegC | |
497 | #define SSD_FLASH_OT_TEMP (70) //70 DegC | |
498 | #define SSD_FLASH_OT_HYST (65) //65 DegC | |
499 | ||
500 | enum ssd_sensor | |
501 | { | |
502 | SSD_SENSOR_LM80 = 0, | |
503 | SSD_SENSOR_LM75, | |
504 | SSD_SENSOR_NR | |
505 | }; | |
506 | ||
507 | ||
508 | /* lm75 */ | |
509 | enum ssd_lm75_reg | |
510 | { | |
511 | SSD_LM75_REG_TEMP = 0, | |
512 | SSD_LM75_REG_CONF, | |
513 | SSD_LM75_REG_THYST, | |
514 | SSD_LM75_REG_TOS | |
515 | }; | |
516 | ||
517 | /* lm96080 */ | |
518 | #define SSD_LM80_REG_IN_MAX(nr) (0x2a + (nr) * 2) | |
519 | #define SSD_LM80_REG_IN_MIN(nr) (0x2b + (nr) * 2) | |
520 | #define SSD_LM80_REG_IN(nr) (0x20 + (nr)) | |
521 | ||
522 | #define SSD_LM80_REG_FAN1 0x28 | |
523 | #define SSD_LM80_REG_FAN2 0x29 | |
524 | #define SSD_LM80_REG_FAN_MIN(nr) (0x3b + (nr)) | |
525 | ||
526 | #define SSD_LM80_REG_TEMP 0x27 | |
527 | #define SSD_LM80_REG_TEMP_HOT_MAX 0x38 | |
528 | #define SSD_LM80_REG_TEMP_HOT_HYST 0x39 | |
529 | #define SSD_LM80_REG_TEMP_OS_MAX 0x3a | |
530 | #define SSD_LM80_REG_TEMP_OS_HYST 0x3b | |
531 | ||
532 | #define SSD_LM80_REG_CONFIG 0x00 | |
533 | #define SSD_LM80_REG_ALARM1 0x01 | |
534 | #define SSD_LM80_REG_ALARM2 0x02 | |
535 | #define SSD_LM80_REG_MASK1 0x03 | |
536 | #define SSD_LM80_REG_MASK2 0x04 | |
537 | #define SSD_LM80_REG_FANDIV 0x05 | |
538 | #define SSD_LM80_REG_RES 0x06 | |
539 | ||
540 | #define SSD_LM80_CONVERT_VOLT(val) ((val * 10) >> 8) | |
541 | ||
542 | #define SSD_LM80_3V3_VOLT(val) ((val)*33/19) | |
543 | ||
544 | #define SSD_LM80_CONV_INTERVAL (1000) | |
545 | ||
546 | enum ssd_lm80_in | |
547 | { | |
548 | SSD_LM80_IN_CAP = 0, | |
549 | SSD_LM80_IN_1V2, | |
550 | SSD_LM80_IN_1V2a, | |
551 | SSD_LM80_IN_1V5, | |
552 | SSD_LM80_IN_1V8, | |
553 | SSD_LM80_IN_FPGA_3V3, | |
554 | SSD_LM80_IN_3V3, | |
555 | SSD_LM80_IN_NR | |
556 | }; | |
557 | ||
558 | struct ssd_lm80_limit | |
559 | { | |
560 | uint8_t low; | |
561 | uint8_t high; | |
562 | }; | |
563 | ||
564 | /* +/- 5% except cap in*/ | |
565 | static struct ssd_lm80_limit ssd_lm80_limit[SSD_LM80_IN_NR] = { | |
566 | {171, 217}, /* CAP in: 1710 ~ 2170 */ | |
567 | {114, 126}, | |
568 | {114, 126}, | |
569 | {142, 158}, | |
570 | {171, 189}, | |
571 | {180, 200}, | |
572 | {180, 200}, | |
573 | }; | |
574 | ||
575 | /* temperature sensors */ | |
576 | enum ssd_temp_sensor | |
577 | { | |
578 | SSD_TEMP_INLET = 0, | |
579 | SSD_TEMP_FLASH, | |
580 | SSD_TEMP_CTRL, | |
581 | SSD_TEMP_NR | |
582 | }; | |
583 | ||
584 | ||
585 | #ifdef SSD_OT_PROTECT | |
586 | #define SSD_OT_DELAY (60) //ms | |
587 | ||
588 | #define SSD_OT_TEMP (90) //90 DegC | |
589 | ||
590 | #define SSD_OT_TEMP_HYST (85) //85 DegC | |
591 | #endif | |
592 | ||
593 | /* fpga temperature */ | |
594 | //#define CONVERT_TEMP(val) ((float)(val)*503.975f/4096.0f-273.15f) | |
595 | #define CONVERT_TEMP(val) ((val)*504/4096-273) | |
596 | ||
597 | #define MAX_TEMP(val) CONVERT_TEMP(((val & 0xffff) >> 4)) | |
598 | #define MIN_TEMP(val) CONVERT_TEMP((((val>>16) & 0xffff) >> 4)) | |
599 | #define CUR_TEMP(val) CONVERT_TEMP((((val>>32) & 0xffff) >> 4)) | |
600 | ||
601 | ||
602 | /* CAP monitor */ | |
603 | #define SSD_PL_CAP_U1 SSD_LM80_REG_IN(SSD_LM80_IN_CAP) | |
604 | #define SSD_PL_CAP_U2 SSD_LM80_REG_IN(SSD_LM80_IN_1V8) | |
605 | #define SSD_PL_CAP_LEARN(u1, u2, t) ((t*(u1+u2))/(2*162*(u1-u2))) | |
606 | #define SSD_PL_CAP_LEARN_WAIT (20) //20ms | |
607 | #define SSD_PL_CAP_LEARN_MAX_WAIT (1000/SSD_PL_CAP_LEARN_WAIT) //1s | |
608 | ||
609 | #define SSD_PL_CAP_CHARGE_WAIT (1000) | |
610 | #define SSD_PL_CAP_CHARGE_MAX_WAIT ((120*1000)/SSD_PL_CAP_CHARGE_WAIT) //120s | |
611 | ||
612 | #define SSD_PL_CAP_VOLT(val) (val*7) | |
613 | ||
614 | #define SSD_PL_CAP_VOLT_FULL (13700) | |
615 | #define SSD_PL_CAP_VOLT_READY (12880) | |
616 | ||
617 | #define SSD_PL_CAP_THRESHOLD (8900) | |
618 | #define SSD_PL_CAP_CP_THRESHOLD (5800) | |
619 | #define SSD_PL_CAP_THRESHOLD_HYST (100) | |
620 | ||
621 | enum ssd_pl_cap_status | |
622 | { | |
623 | SSD_PL_CAP = 0, | |
624 | SSD_PL_CAP_NR | |
625 | }; | |
626 | ||
627 | enum ssd_pl_cap_type | |
628 | { | |
629 | SSD_PL_CAP_DEFAULT = 0, /* 4 cap */ | |
630 | SSD_PL_CAP_CP /* 3 cap */ | |
631 | }; | |
632 | ||
633 | ||
634 | /* hwmon offset */ | |
635 | #define SSD_HWMON_OFFS_TEMP (0) | |
636 | #define SSD_HWMON_OFFS_SENSOR (SSD_HWMON_OFFS_TEMP + SSD_TEMP_NR) | |
637 | #define SSD_HWMON_OFFS_PL_CAP (SSD_HWMON_OFFS_SENSOR + SSD_SENSOR_NR) | |
638 | #define SSD_HWMON_OFFS_LM80 (SSD_HWMON_OFFS_PL_CAP + SSD_PL_CAP_NR) | |
639 | #define SSD_HWMON_OFFS_CLOCK (SSD_HWMON_OFFS_LM80 + SSD_LM80_IN_NR) | |
640 | #define SSD_HWMON_OFFS_FPGA (SSD_HWMON_OFFS_CLOCK + SSD_CLOCK_NR) | |
641 | ||
642 | #define SSD_HWMON_TEMP(idx) (SSD_HWMON_OFFS_TEMP + idx) | |
643 | #define SSD_HWMON_SENSOR(idx) (SSD_HWMON_OFFS_SENSOR + idx) | |
644 | #define SSD_HWMON_PL_CAP(idx) (SSD_HWMON_OFFS_PL_CAP + idx) | |
645 | #define SSD_HWMON_LM80(idx) (SSD_HWMON_OFFS_LM80 + idx) | |
646 | #define SSD_HWMON_CLOCK(idx) (SSD_HWMON_OFFS_CLOCK + idx) | |
647 | #define SSD_HWMON_FPGA(ctrl, idx) (SSD_HWMON_OFFS_FPGA + (ctrl * SSD_FPGA_VOLT_NR) + idx) | |
648 | ||
649 | ||
650 | ||
651 | /* fifo */ | |
652 | typedef struct sfifo | |
653 | { | |
654 | uint32_t in; | |
655 | uint32_t out; | |
656 | uint32_t size; | |
657 | uint32_t esize; | |
658 | uint32_t mask; | |
659 | spinlock_t lock; | |
660 | void *data; | |
661 | } sfifo_t; | |
662 | ||
663 | static int sfifo_alloc(struct sfifo *fifo, uint32_t size, uint32_t esize) | |
664 | { | |
665 | uint32_t __size = 1; | |
666 | ||
667 | if (!fifo || size > INT_MAX || esize == 0) { | |
668 | return -EINVAL; | |
669 | } | |
670 | ||
671 | while (__size < size) __size <<= 1; | |
672 | ||
673 | if (__size < 2) { | |
674 | return -EINVAL; | |
675 | } | |
676 | ||
677 | fifo->data = vmalloc(esize * __size); | |
678 | if (!fifo->data) { | |
679 | return -ENOMEM; | |
680 | } | |
681 | ||
682 | fifo->in = 0; | |
683 | fifo->out = 0; | |
684 | fifo->mask = __size - 1; | |
685 | fifo->size = __size; | |
686 | fifo->esize = esize; | |
687 | spin_lock_init(&fifo->lock); | |
688 | ||
689 | return 0; | |
690 | } | |
691 | ||
692 | static void sfifo_free(struct sfifo *fifo) | |
693 | { | |
694 | if (!fifo) { | |
695 | return; | |
696 | } | |
697 | ||
698 | vfree(fifo->data); | |
699 | fifo->data = NULL; | |
700 | fifo->in = 0; | |
701 | fifo->out = 0; | |
702 | fifo->mask = 0; | |
703 | fifo->size = 0; | |
704 | fifo->esize = 0; | |
705 | } | |
706 | ||
707 | static int __sfifo_put(struct sfifo *fifo, void *val) | |
708 | { | |
709 | if (((fifo->in + 1) & fifo->mask) == fifo->out) { | |
710 | return -1; | |
711 | } | |
712 | ||
713 | memcpy((fifo->data + (fifo->in * fifo->esize)), val, fifo->esize); | |
714 | fifo->in = (fifo->in + 1) & fifo->mask; | |
715 | ||
716 | return 0; | |
717 | } | |
718 | ||
719 | static int sfifo_put(struct sfifo *fifo, void *val) | |
720 | { | |
721 | int ret = 0; | |
722 | ||
723 | if (!fifo || !val) { | |
724 | return -EINVAL; | |
725 | } | |
726 | ||
727 | if (!in_interrupt()) { | |
728 | spin_lock_irq(&fifo->lock); | |
729 | ret = __sfifo_put(fifo, val); | |
730 | spin_unlock_irq(&fifo->lock); | |
731 | } else { | |
732 | spin_lock(&fifo->lock); | |
733 | ret = __sfifo_put(fifo, val); | |
734 | spin_unlock(&fifo->lock); | |
735 | } | |
736 | ||
737 | return ret; | |
738 | } | |
739 | ||
740 | static int __sfifo_get(struct sfifo *fifo, void *val) | |
741 | { | |
742 | if (fifo->out == fifo->in) { | |
743 | return -1; | |
744 | } | |
745 | ||
746 | memcpy(val, (fifo->data + (fifo->out * fifo->esize)), fifo->esize); | |
747 | fifo->out = (fifo->out + 1) & fifo->mask; | |
748 | ||
749 | return 0; | |
750 | } | |
751 | ||
752 | static int sfifo_get(struct sfifo *fifo, void *val) | |
753 | { | |
754 | int ret = 0; | |
755 | ||
756 | if (!fifo || !val) { | |
757 | return -EINVAL; | |
758 | } | |
759 | ||
760 | if (!in_interrupt()) { | |
761 | spin_lock_irq(&fifo->lock); | |
762 | ret = __sfifo_get(fifo, val); | |
763 | spin_unlock_irq(&fifo->lock); | |
764 | } else { | |
765 | spin_lock(&fifo->lock); | |
766 | ret = __sfifo_get(fifo, val); | |
767 | spin_unlock(&fifo->lock); | |
768 | } | |
769 | ||
770 | return ret; | |
771 | } | |
772 | ||
773 | /* bio list */ | |
774 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30)) | |
775 | struct ssd_blist { | |
776 | struct bio *prev; | |
777 | struct bio *next; | |
778 | }; | |
779 | ||
780 | static inline void ssd_blist_init(struct ssd_blist *ssd_bl) | |
781 | { | |
782 | ssd_bl->prev = NULL; | |
783 | ssd_bl->next = NULL; | |
784 | } | |
785 | ||
786 | static inline struct bio *ssd_blist_get(struct ssd_blist *ssd_bl) | |
787 | { | |
788 | struct bio *bio = ssd_bl->prev; | |
789 | ||
790 | ssd_bl->prev = NULL; | |
791 | ssd_bl->next = NULL; | |
792 | ||
793 | return bio; | |
794 | } | |
795 | ||
796 | static inline void ssd_blist_add(struct ssd_blist *ssd_bl, struct bio *bio) | |
797 | { | |
798 | bio->bi_next = NULL; | |
799 | ||
800 | if (ssd_bl->next) { | |
801 | ssd_bl->next->bi_next = bio; | |
802 | } else { | |
803 | ssd_bl->prev = bio; | |
804 | } | |
805 | ||
806 | ssd_bl->next = bio; | |
807 | } | |
808 | ||
809 | #else | |
810 | #define ssd_blist bio_list | |
811 | #define ssd_blist_init bio_list_init | |
812 | #define ssd_blist_get bio_list_get | |
813 | #define ssd_blist_add bio_list_add | |
814 | #endif | |
815 | ||
816 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,14,0)) | |
817 | #define bio_start(bio) (bio->bi_sector) | |
818 | #else | |
819 | #define bio_start(bio) (bio->bi_iter.bi_sector) | |
820 | #endif | |
821 | ||
822 | /* mutex */ | |
823 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16)) | |
824 | #define mutex_lock down | |
825 | #define mutex_unlock up | |
826 | #define mutex semaphore | |
827 | #define mutex_init init_MUTEX | |
828 | #endif | |
829 | ||
830 | /* i2c */ | |
831 | typedef union ssd_i2c_ctrl { | |
832 | uint32_t val; | |
833 | struct { | |
834 | uint8_t wdata; | |
835 | uint8_t addr; | |
836 | uint16_t rw:1; | |
837 | uint16_t pad:15; | |
838 | } bits; | |
839 | }__attribute__((packed)) ssd_i2c_ctrl_t; | |
840 | ||
841 | typedef union ssd_i2c_data { | |
842 | uint32_t val; | |
843 | struct { | |
844 | uint32_t rdata:8; | |
845 | uint32_t valid:1; | |
846 | uint32_t pad:23; | |
847 | } bits; | |
848 | }__attribute__((packed)) ssd_i2c_data_t; | |
849 | ||
850 | /* write mode */ | |
851 | enum ssd_write_mode | |
852 | { | |
853 | SSD_WMODE_BUFFER = 0, | |
854 | SSD_WMODE_BUFFER_EX, | |
855 | SSD_WMODE_FUA, | |
856 | /* dummy */ | |
857 | SSD_WMODE_AUTO, | |
858 | SSD_WMODE_DEFAULT | |
859 | }; | |
860 | ||
861 | /* reset type */ | |
862 | enum ssd_reset_type | |
863 | { | |
864 | SSD_RST_NOINIT = 0, | |
865 | SSD_RST_NORMAL, | |
866 | SSD_RST_FULL | |
867 | }; | |
868 | ||
869 | /* ssd msg */ | |
870 | typedef struct ssd_sg_entry | |
871 | { | |
872 | uint64_t block:48; | |
873 | uint64_t length:16; | |
874 | uint64_t buf; | |
875 | }__attribute__((packed))ssd_sg_entry_t; | |
876 | ||
877 | typedef struct ssd_rw_msg | |
878 | { | |
879 | uint8_t tag; | |
880 | uint8_t flag; | |
881 | uint8_t nsegs; | |
882 | uint8_t fun; | |
883 | uint32_t reserved; //for 64-bit align | |
884 | struct ssd_sg_entry sge[1]; //base | |
885 | }__attribute__((packed))ssd_rw_msg_t; | |
886 | ||
887 | typedef struct ssd_resp_msg | |
888 | { | |
889 | uint8_t tag; | |
890 | uint8_t status:2; | |
891 | uint8_t bitflip:6; | |
892 | uint8_t log; | |
893 | uint8_t fun; | |
894 | uint32_t reserved; | |
895 | }__attribute__((packed))ssd_resp_msg_t; | |
896 | ||
897 | typedef struct ssd_flush_msg | |
898 | { | |
899 | uint8_t tag; | |
900 | uint8_t flag:2; //flash cache 0 or bbt 1 | |
901 | uint8_t flash:6; | |
902 | uint8_t ctrl_idx; | |
903 | uint8_t fun; | |
904 | uint32_t reserved; //align | |
905 | }__attribute__((packed))ssd_flush_msg_t; | |
906 | ||
907 | typedef struct ssd_nand_op_msg | |
908 | { | |
909 | uint8_t tag; | |
910 | uint8_t flag; | |
911 | uint8_t ctrl_idx; | |
912 | uint8_t fun; | |
913 | uint32_t reserved; //align | |
914 | uint16_t page_count; | |
915 | uint8_t chip_ce; | |
916 | uint8_t chip_no; | |
917 | uint32_t page_no; | |
918 | uint64_t buf; | |
919 | }__attribute__((packed))ssd_nand_op_msg_t; | |
920 | ||
921 | typedef struct ssd_ram_op_msg | |
922 | { | |
923 | uint8_t tag; | |
924 | uint8_t flag; | |
925 | uint8_t ctrl_idx; | |
926 | uint8_t fun; | |
927 | uint32_t reserved; //align | |
928 | uint32_t start; | |
929 | uint32_t length; | |
930 | uint64_t buf; | |
931 | }__attribute__((packed))ssd_ram_op_msg_t; | |
932 | ||
933 | ||
934 | /* log msg */ | |
935 | typedef struct ssd_log_msg | |
936 | { | |
937 | uint8_t tag; | |
938 | uint8_t flag; | |
939 | uint8_t ctrl_idx; | |
940 | uint8_t fun; | |
941 | uint32_t reserved; //align | |
942 | uint64_t buf; | |
943 | }__attribute__((packed))ssd_log_msg_t; | |
944 | ||
945 | typedef struct ssd_log_op_msg | |
946 | { | |
947 | uint8_t tag; | |
948 | uint8_t flag; | |
949 | uint8_t ctrl_idx; | |
950 | uint8_t fun; | |
951 | uint32_t reserved; //align | |
952 | uint64_t reserved1; //align | |
953 | uint64_t buf; | |
954 | }__attribute__((packed))ssd_log_op_msg_t; | |
955 | ||
956 | typedef struct ssd_log_resp_msg | |
957 | { | |
958 | uint8_t tag; | |
959 | uint16_t status :2; | |
960 | uint16_t reserved1 :2; //align with the normal resp msg | |
961 | uint16_t nr_log :12; | |
962 | uint8_t fun; | |
963 | uint32_t reserved; | |
964 | }__attribute__((packed))ssd_log_resp_msg_t; | |
965 | ||
966 | ||
967 | /* resp msg */ | |
968 | typedef union ssd_response_msq | |
969 | { | |
970 | ssd_resp_msg_t resp_msg; | |
971 | ssd_log_resp_msg_t log_resp_msg; | |
972 | uint64_t u64_msg; | |
973 | uint32_t u32_msg[2]; | |
974 | } ssd_response_msq_t; | |
975 | ||
976 | ||
977 | /* custom struct */ | |
978 | typedef struct ssd_protocol_info | |
979 | { | |
980 | uint32_t ver; | |
981 | uint32_t init_state_reg; | |
982 | uint32_t init_state_reg_sz; | |
983 | uint32_t chip_info_reg; | |
984 | uint32_t chip_info_reg_sz; | |
985 | } ssd_protocol_info_t; | |
986 | ||
987 | typedef struct ssd_hw_info | |
988 | { | |
989 | uint32_t bridge_ver; | |
990 | uint32_t ctrl_ver; | |
991 | ||
992 | uint32_t cmd_fifo_sz; | |
993 | uint32_t cmd_fifo_sz_mask; | |
994 | uint32_t cmd_max_sg; | |
995 | uint32_t sg_max_sec; | |
996 | uint32_t resp_ptr_sz; | |
997 | uint32_t resp_msg_sz; | |
998 | ||
999 | uint16_t nr_ctrl; | |
1000 | ||
1001 | uint16_t nr_data_ch; | |
1002 | uint16_t nr_ch; | |
1003 | uint16_t max_ch; | |
1004 | uint16_t nr_chip; | |
1005 | ||
1006 | uint8_t pcb_ver; | |
1007 | uint8_t upper_pcb_ver; | |
1008 | ||
1009 | uint8_t nand_vendor_id; | |
1010 | uint8_t nand_dev_id; | |
1011 | ||
1012 | uint8_t max_ce; | |
1013 | uint8_t id_size; | |
1014 | uint16_t oob_size; | |
1015 | ||
1016 | uint16_t bbf_pages; | |
1017 | uint16_t bbf_seek; // | |
1018 | ||
1019 | uint16_t page_count; //per block | |
1020 | uint32_t page_size; | |
1021 | uint32_t block_count; //per flash | |
1022 | ||
1023 | uint64_t ram_size; | |
1024 | uint32_t ram_align; | |
1025 | uint32_t ram_max_len; | |
1026 | ||
1027 | uint64_t bbt_base; | |
1028 | uint32_t bbt_size; | |
1029 | uint64_t md_base; //metadata | |
1030 | uint32_t md_size; | |
1031 | uint32_t md_entry_sz; | |
1032 | ||
1033 | uint32_t log_sz; | |
1034 | ||
1035 | uint64_t nand_wbuff_base; | |
1036 | ||
1037 | uint32_t md_reserved_blks; | |
1038 | uint32_t reserved_blks; | |
1039 | uint32_t valid_pages; | |
1040 | uint32_t max_valid_pages; | |
1041 | uint64_t size; | |
1042 | } ssd_hw_info_t; | |
1043 | ||
1044 | typedef struct ssd_hw_info_extend | |
1045 | { | |
1046 | uint8_t board_type; | |
1047 | uint8_t cap_type; | |
1048 | uint8_t plp_type; | |
1049 | uint8_t work_mode; | |
1050 | uint8_t form_factor; | |
1051 | ||
1052 | uint8_t pad[59]; | |
1053 | }ssd_hw_info_extend_t; | |
1054 | ||
1055 | typedef struct ssd_rom_info | |
1056 | { | |
1057 | uint32_t size; | |
1058 | uint32_t block_size; | |
1059 | uint16_t page_size; | |
1060 | uint8_t nr_bridge_fw; | |
1061 | uint8_t nr_ctrl_fw; | |
1062 | uint8_t nr_bm_fw; | |
1063 | uint8_t nr_smart; | |
1064 | uint32_t bridge_fw_base; | |
1065 | uint32_t bridge_fw_sz; | |
1066 | uint32_t ctrl_fw_base; | |
1067 | uint32_t ctrl_fw_sz; | |
1068 | uint32_t bm_fw_base; | |
1069 | uint32_t bm_fw_sz; | |
1070 | uint32_t log_base; | |
1071 | uint32_t log_sz; | |
1072 | uint32_t smart_base; | |
1073 | uint32_t smart_sz; | |
1074 | uint32_t vp_base; | |
1075 | uint32_t label_base; | |
1076 | } ssd_rom_info_t; | |
1077 | ||
1078 | /* debug info */ | |
1079 | enum ssd_debug_type | |
1080 | { | |
1081 | SSD_DEBUG_NONE = 0, | |
1082 | SSD_DEBUG_READ_ERR, | |
1083 | SSD_DEBUG_WRITE_ERR, | |
1084 | SSD_DEBUG_RW_ERR, | |
1085 | SSD_DEBUG_READ_TO, | |
1086 | SSD_DEBUG_WRITE_TO, | |
1087 | SSD_DEBUG_RW_TO, | |
1088 | SSD_DEBUG_LOG, | |
1089 | SSD_DEBUG_OFFLINE, | |
1090 | SSD_DEBUG_NR | |
1091 | }; | |
1092 | ||
1093 | typedef struct ssd_debug_info | |
1094 | { | |
1095 | int type; | |
1096 | union { | |
1097 | struct { | |
1098 | uint64_t off; | |
1099 | uint32_t len; | |
1100 | } loc; | |
1101 | struct { | |
1102 | int event; | |
1103 | uint32_t extra; | |
1104 | } log; | |
1105 | } data; | |
1106 | }ssd_debug_info_t; | |
1107 | ||
1108 | /* label */ | |
1109 | #define SSD_LABEL_FIELD_SZ 32 | |
1110 | #define SSD_SN_SZ 16 | |
1111 | ||
1112 | typedef struct ssd_label | |
1113 | { | |
1114 | char date[SSD_LABEL_FIELD_SZ]; | |
1115 | char sn[SSD_LABEL_FIELD_SZ]; | |
1116 | char part[SSD_LABEL_FIELD_SZ]; | |
1117 | char desc[SSD_LABEL_FIELD_SZ]; | |
1118 | char other[SSD_LABEL_FIELD_SZ]; | |
1119 | char maf[SSD_LABEL_FIELD_SZ]; | |
1120 | } ssd_label_t; | |
1121 | ||
1122 | #define SSD_LABEL_DESC_SZ 256 | |
1123 | ||
1124 | typedef struct ssd_labelv3 | |
1125 | { | |
1126 | char boardtype[SSD_LABEL_FIELD_SZ]; | |
1127 | char barcode[SSD_LABEL_FIELD_SZ]; | |
1128 | char item[SSD_LABEL_FIELD_SZ]; | |
1129 | char description[SSD_LABEL_DESC_SZ]; | |
1130 | char manufactured[SSD_LABEL_FIELD_SZ]; | |
1131 | char vendorname[SSD_LABEL_FIELD_SZ]; | |
1132 | char issuenumber[SSD_LABEL_FIELD_SZ]; | |
1133 | char cleicode[SSD_LABEL_FIELD_SZ]; | |
1134 | char bom[SSD_LABEL_FIELD_SZ]; | |
1135 | } ssd_labelv3_t; | |
1136 | ||
1137 | /* battery */ | |
1138 | typedef struct ssd_battery_info | |
1139 | { | |
1140 | uint32_t fw_ver; | |
1141 | } ssd_battery_info_t; | |
1142 | ||
1143 | /* ssd power stat */ | |
1144 | typedef struct ssd_power_stat | |
1145 | { | |
1146 | uint64_t nr_poweron; | |
1147 | uint64_t nr_powerloss; | |
1148 | uint64_t init_failed; | |
1149 | } ssd_power_stat_t; | |
1150 | ||
1151 | /* io stat */ | |
1152 | typedef struct ssd_io_stat | |
1153 | { | |
1154 | uint64_t run_time; | |
1155 | uint64_t nr_to; | |
1156 | uint64_t nr_ioerr; | |
1157 | uint64_t nr_rwerr; | |
1158 | uint64_t nr_read; | |
1159 | uint64_t nr_write; | |
1160 | uint64_t rsectors; | |
1161 | uint64_t wsectors; | |
1162 | } ssd_io_stat_t; | |
1163 | ||
1164 | /* ecc */ | |
1165 | typedef struct ssd_ecc_info | |
1166 | { | |
1167 | uint64_t bitflip[SSD_ECC_MAX_FLIP]; | |
1168 | } ssd_ecc_info_t; | |
1169 | ||
1170 | /* log */ | |
1171 | enum ssd_log_level | |
1172 | { | |
1173 | SSD_LOG_LEVEL_INFO = 0, | |
1174 | SSD_LOG_LEVEL_NOTICE, | |
1175 | SSD_LOG_LEVEL_WARNING, | |
1176 | SSD_LOG_LEVEL_ERR, | |
1177 | SSD_LOG_NR_LEVEL | |
1178 | }; | |
1179 | ||
1180 | typedef struct ssd_log_info | |
1181 | { | |
1182 | uint64_t nr_log; | |
1183 | uint64_t stat[SSD_LOG_NR_LEVEL]; | |
1184 | } ssd_log_info_t; | |
1185 | ||
1186 | /* S.M.A.R.T. */ | |
1187 | #define SSD_SMART_MAGIC (0x5452414D53445353ull) | |
1188 | ||
1189 | typedef struct ssd_smart | |
1190 | { | |
1191 | struct ssd_power_stat pstat; | |
1192 | struct ssd_io_stat io_stat; | |
1193 | struct ssd_ecc_info ecc_info; | |
1194 | struct ssd_log_info log_info; | |
1195 | uint64_t version; | |
1196 | uint64_t magic; | |
1197 | } ssd_smart_t; | |
1198 | ||
1199 | /* internal log */ | |
1200 | typedef struct ssd_internal_log | |
1201 | { | |
1202 | uint32_t nr_log; | |
1203 | void *log; | |
1204 | } ssd_internal_log_t; | |
1205 | ||
1206 | /* ssd cmd */ | |
1207 | typedef struct ssd_cmd | |
1208 | { | |
1209 | struct bio *bio; | |
1210 | struct scatterlist *sgl; | |
1211 | struct list_head list; | |
1212 | void *dev; | |
1213 | int nsegs; | |
1214 | int flag; /*pbio(1) or bio(0)*/ | |
1215 | ||
1216 | int tag; | |
1217 | void *msg; | |
1218 | dma_addr_t msg_dma; | |
1219 | ||
1220 | unsigned long start_time; | |
1221 | ||
1222 | int errors; | |
1223 | unsigned int nr_log; | |
1224 | ||
1225 | struct timer_list cmd_timer; | |
1226 | struct completion *waiting; | |
1227 | } ssd_cmd_t; | |
1228 | ||
1229 | typedef void (*send_cmd_func)(struct ssd_cmd *); | |
1230 | typedef int (*ssd_event_call)(struct gendisk *, int, int); /* gendisk, event id, event level */ | |
1231 | ||
1232 | /* dcmd sz */ | |
1233 | #define SSD_DCMD_MAX_SZ 32 | |
1234 | ||
1235 | typedef struct ssd_dcmd | |
1236 | { | |
1237 | struct list_head list; | |
1238 | void *dev; | |
1239 | uint8_t msg[SSD_DCMD_MAX_SZ]; | |
1240 | } ssd_dcmd_t; | |
1241 | ||
1242 | ||
1243 | enum ssd_state { | |
1244 | SSD_INIT_WORKQ, | |
1245 | SSD_INIT_BD, | |
1246 | SSD_ONLINE, | |
1247 | /* full reset */ | |
1248 | SSD_RESETING, | |
1249 | /* hw log */ | |
1250 | SSD_LOG_HW, | |
1251 | /* log err */ | |
da3355df | 1252 | SSD_LOG_ERR, |
361ebed5 HSDT |
1253 | }; |
1254 | ||
1255 | #define SSD_QUEUE_NAME_LEN 16 | |
1256 | typedef struct ssd_queue { | |
1257 | char name[SSD_QUEUE_NAME_LEN]; | |
1258 | void *dev; | |
1259 | ||
1260 | int idx; | |
1261 | ||
1262 | uint32_t resp_idx; | |
1263 | uint32_t resp_idx_mask; | |
1264 | uint32_t resp_msg_sz; | |
1265 | ||
1266 | void *resp_msg; | |
1267 | void *resp_ptr; | |
1268 | ||
1269 | struct ssd_cmd *cmd; | |
1270 | ||
1271 | struct ssd_io_stat io_stat; | |
1272 | struct ssd_ecc_info ecc_info; | |
1273 | } ssd_queue_t; | |
1274 | ||
1275 | typedef struct ssd_device { | |
1276 | char name[SSD_DEV_NAME_LEN]; | |
1277 | ||
1278 | int idx; | |
1279 | int major; | |
1280 | int readonly; | |
1281 | ||
1282 | int int_mode; | |
1283 | #ifdef SSD_ESCAPE_IRQ | |
1284 | int irq_cpu; | |
1285 | #endif | |
1286 | ||
1287 | int reload_fw; | |
1288 | ||
1289 | int ot_delay; //in ms | |
1290 | ||
1291 | atomic_t refcnt; | |
1292 | atomic_t tocnt; | |
1293 | atomic_t in_flight[2]; //r&w | |
1294 | ||
1295 | uint64_t uptime; | |
1296 | ||
1297 | struct list_head list; | |
1298 | struct pci_dev *pdev; | |
1299 | ||
1300 | unsigned long mmio_base; | |
1301 | unsigned long mmio_len; | |
1302 | void __iomem *ctrlp; | |
1303 | ||
1304 | struct mutex spi_mutex; | |
1305 | struct mutex i2c_mutex; | |
1306 | ||
1307 | struct ssd_protocol_info protocol_info; | |
1308 | struct ssd_hw_info hw_info; | |
1309 | struct ssd_rom_info rom_info; | |
1310 | struct ssd_label label; | |
1311 | ||
1312 | struct ssd_smart smart; | |
1313 | ||
1314 | atomic_t in_sendq; | |
1315 | spinlock_t sendq_lock; | |
1316 | struct ssd_blist sendq; | |
1317 | struct task_struct *send_thread; | |
1318 | wait_queue_head_t send_waitq; | |
1319 | ||
1320 | atomic_t in_doneq; | |
1321 | spinlock_t doneq_lock; | |
1322 | struct ssd_blist doneq; | |
1323 | struct task_struct *done_thread; | |
1324 | wait_queue_head_t done_waitq; | |
1325 | ||
1326 | struct ssd_dcmd *dcmd; | |
1327 | spinlock_t dcmd_lock; | |
1328 | struct list_head dcmd_list; /* direct cmd list */ | |
1329 | wait_queue_head_t dcmd_wq; | |
1330 | ||
1331 | unsigned long *tag_map; | |
1332 | wait_queue_head_t tag_wq; | |
1333 | ||
1334 | spinlock_t cmd_lock; | |
1335 | struct ssd_cmd *cmd; | |
1336 | send_cmd_func scmd; | |
1337 | ||
1338 | ssd_event_call event_call; | |
1339 | void *msg_base; | |
1340 | dma_addr_t msg_base_dma; | |
1341 | ||
1342 | uint32_t resp_idx; | |
1343 | void *resp_msg_base; | |
1344 | void *resp_ptr_base; | |
1345 | dma_addr_t resp_msg_base_dma; | |
1346 | dma_addr_t resp_ptr_base_dma; | |
1347 | ||
1348 | int nr_queue; | |
1349 | struct msix_entry entry[SSD_MSIX_VEC]; | |
1350 | struct ssd_queue queue[SSD_MSIX_VEC]; | |
1351 | ||
1352 | struct request_queue *rq; /* The device request queue */ | |
1353 | struct gendisk *gd; /* The gendisk structure */ | |
1354 | ||
1355 | struct mutex internal_log_mutex; | |
1356 | struct ssd_internal_log internal_log; | |
1357 | struct workqueue_struct *workq; | |
1358 | struct work_struct log_work; /* get log */ | |
1359 | void *log_buf; | |
1360 | ||
1361 | unsigned long state; /* device state, for example, block device inited */ | |
1362 | ||
1363 | struct module *owner; | |
1364 | ||
1365 | /* extend */ | |
1366 | ||
1367 | int slave; | |
1368 | int cmajor; | |
1369 | int save_md; | |
1370 | int ot_protect; | |
1371 | ||
1372 | struct kref kref; | |
1373 | ||
1374 | struct mutex gd_mutex; | |
1375 | struct ssd_log_info log_info; /* volatile */ | |
1376 | ||
1377 | atomic_t queue_depth; | |
1378 | struct mutex barrier_mutex; | |
1379 | struct mutex fw_mutex; | |
1380 | ||
1381 | struct ssd_hw_info_extend hw_info_ext; | |
1382 | struct ssd_labelv3 labelv3; | |
1383 | ||
1384 | int wmode; | |
1385 | int user_wmode; | |
1386 | struct mutex bm_mutex; | |
1387 | struct work_struct bm_work; /* check bm */ | |
1388 | struct timer_list bm_timer; | |
1389 | struct sfifo log_fifo; | |
1390 | ||
1391 | struct timer_list routine_timer; | |
1392 | unsigned long routine_tick; | |
1393 | unsigned long hwmon; | |
1394 | ||
1395 | struct work_struct hwmon_work; /* check hw */ | |
1396 | struct work_struct capmon_work; /* check battery */ | |
1397 | struct work_struct tempmon_work; /* check temp */ | |
1398 | ||
1399 | /* debug info */ | |
1400 | struct ssd_debug_info db_info; | |
1197134c | 1401 | uint64_t reset_time; |
da3355df SF |
1402 | int has_non_0x98_reg_access; |
1403 | spinlock_t in_flight_lock; | |
1404 | ||
1405 | uint64_t last_poweron_id; | |
1406 | ||
361ebed5 HSDT |
1407 | } ssd_device_t; |
1408 | ||
1409 | ||
1410 | /* Ioctl struct */ | |
1411 | typedef struct ssd_acc_info { | |
1412 | uint32_t threshold_l1; | |
1413 | uint32_t threshold_l2; | |
1414 | uint32_t val; | |
1415 | } ssd_acc_info_t; | |
1416 | ||
1417 | typedef struct ssd_reg_op_info | |
1418 | { | |
1419 | uint32_t offset; | |
1420 | uint32_t value; | |
1421 | } ssd_reg_op_info_t; | |
1422 | ||
1423 | typedef struct ssd_spi_op_info | |
1424 | { | |
1425 | void __user *buf; | |
1426 | uint32_t off; | |
1427 | uint32_t len; | |
1428 | } ssd_spi_op_info_t; | |
1429 | ||
1430 | typedef struct ssd_i2c_op_info | |
1431 | { | |
1432 | uint8_t saddr; | |
1433 | uint8_t wsize; | |
1434 | uint8_t rsize; | |
1435 | void __user *wbuf; | |
1436 | void __user *rbuf; | |
1437 | } ssd_i2c_op_info_t; | |
1438 | ||
1439 | typedef struct ssd_smbus_op_info | |
1440 | { | |
1441 | uint8_t saddr; | |
1442 | uint8_t cmd; | |
1443 | uint8_t size; | |
1444 | void __user *buf; | |
1445 | } ssd_smbus_op_info_t; | |
1446 | ||
1447 | typedef struct ssd_ram_op_info { | |
1448 | uint8_t ctrl_idx; | |
1449 | uint32_t length; | |
1450 | uint64_t start; | |
1451 | uint8_t __user *buf; | |
1452 | } ssd_ram_op_info_t; | |
1453 | ||
1454 | typedef struct ssd_flash_op_info { | |
1455 | uint32_t page; | |
1456 | uint16_t flash; | |
1457 | uint8_t chip; | |
1458 | uint8_t ctrl_idx; | |
1459 | uint8_t __user *buf; | |
1460 | } ssd_flash_op_info_t; | |
1461 | ||
1462 | typedef struct ssd_sw_log_info { | |
1463 | uint16_t event; | |
1464 | uint16_t pad; | |
1465 | uint32_t data; | |
1466 | } ssd_sw_log_info_t; | |
1467 | ||
1468 | typedef struct ssd_version_info | |
1469 | { | |
1470 | uint32_t bridge_ver; /* bridge fw version */ | |
1471 | uint32_t ctrl_ver; /* controller fw version */ | |
1472 | uint32_t bm_ver; /* battery manager fw version */ | |
1473 | uint8_t pcb_ver; /* main pcb version */ | |
1474 | uint8_t upper_pcb_ver; | |
1475 | uint8_t pad0; | |
1476 | uint8_t pad1; | |
1477 | } ssd_version_info_t; | |
1478 | ||
1479 | typedef struct pci_addr | |
1480 | { | |
1481 | uint16_t domain; | |
1482 | uint8_t bus; | |
1483 | uint8_t slot; | |
1484 | uint8_t func; | |
1485 | } pci_addr_t; | |
1486 | ||
1487 | typedef struct ssd_drv_param_info { | |
1488 | int mode; | |
1489 | int status_mask; | |
1490 | int int_mode; | |
1491 | int threaded_irq; | |
1492 | int log_level; | |
1493 | int wmode; | |
1494 | int ot_protect; | |
1495 | int finject; | |
1496 | int pad[8]; | |
1497 | } ssd_drv_param_info_t; | |
1498 | ||
1499 | ||
1500 | /* form factor */ | |
1501 | enum ssd_form_factor | |
1502 | { | |
1503 | SSD_FORM_FACTOR_HHHL = 0, | |
1504 | SSD_FORM_FACTOR_FHHL | |
1505 | }; | |
1506 | ||
1507 | ||
1508 | /* ssd power loss protect */ | |
1509 | enum ssd_plp_type | |
1510 | { | |
1511 | SSD_PLP_SCAP = 0, | |
1512 | SSD_PLP_CAP, | |
1513 | SSD_PLP_NONE | |
1514 | }; | |
1515 | ||
1516 | /* ssd bm */ | |
1517 | #define SSD_BM_SLAVE_ADDRESS 0x16 | |
1518 | #define SSD_BM_CAP 5 | |
1519 | ||
1520 | /* SBS cmd */ | |
1521 | #define SSD_BM_SAFETYSTATUS 0x51 | |
1522 | #define SSD_BM_OPERATIONSTATUS 0x54 | |
1523 | ||
1524 | /* ManufacturerAccess */ | |
1525 | #define SSD_BM_MANUFACTURERACCESS 0x00 | |
1526 | #define SSD_BM_ENTER_CAP_LEARNING 0x0023 /* cap learning */ | |
1527 | ||
1528 | /* Data flash access */ | |
1529 | #define SSD_BM_DATA_FLASH_SUBCLASS_ID 0x77 | |
1530 | #define SSD_BM_DATA_FLASH_SUBCLASS_ID_PAGE1 0x78 | |
1531 | #define SSD_BM_SYSTEM_DATA_SUBCLASS_ID 56 | |
1532 | #define SSD_BM_CONFIGURATION_REGISTERS_ID 64 | |
1533 | ||
1534 | /* min cap voltage */ | |
1535 | #define SSD_BM_CAP_VOLT_MIN 500 | |
1536 | ||
1537 | /* | |
1538 | enum ssd_bm_cap | |
1539 | { | |
1540 | SSD_BM_CAP_VINA = 1, | |
1541 | SSD_BM_CAP_JH = 3 | |
1542 | };*/ | |
1543 | ||
1544 | enum ssd_bmstatus | |
1545 | { | |
1546 | SSD_BMSTATUS_OK = 0, | |
1547 | SSD_BMSTATUS_CHARGING, /* not fully charged */ | |
1548 | SSD_BMSTATUS_WARNING | |
1549 | }; | |
1550 | ||
1551 | enum sbs_unit { | |
1552 | SBS_UNIT_VALUE = 0, | |
1553 | SBS_UNIT_TEMPERATURE, | |
1554 | SBS_UNIT_VOLTAGE, | |
1555 | SBS_UNIT_CURRENT, | |
1556 | SBS_UNIT_ESR, | |
1557 | SBS_UNIT_PERCENT, | |
1558 | SBS_UNIT_CAPACITANCE | |
1559 | }; | |
1560 | ||
1561 | enum sbs_size { | |
1562 | SBS_SIZE_BYTE = 1, | |
1563 | SBS_SIZE_WORD, | |
1564 | SBS_SIZE_BLK, | |
1565 | }; | |
1566 | ||
1567 | struct sbs_cmd { | |
1568 | uint8_t cmd; | |
1569 | uint8_t size; | |
1570 | uint8_t unit; | |
1571 | uint8_t off; | |
1572 | uint16_t mask; | |
1573 | char *desc; | |
1574 | }; | |
1575 | ||
1576 | struct ssd_bm { | |
1577 | uint16_t temp; | |
1578 | uint16_t volt; | |
1579 | uint16_t curr; | |
1580 | uint16_t esr; | |
1581 | uint16_t rsoc; | |
1582 | uint16_t health; | |
1583 | uint16_t cap; | |
1584 | uint16_t chg_curr; | |
1585 | uint16_t chg_volt; | |
1586 | uint16_t cap_volt[SSD_BM_CAP]; | |
1587 | uint16_t sf_alert; | |
1588 | uint16_t sf_status; | |
1589 | uint16_t op_status; | |
1590 | uint16_t sys_volt; | |
1591 | }; | |
1592 | ||
1593 | struct ssd_bm_manufacturer_data | |
1594 | { | |
1595 | uint16_t pack_lot_code; | |
1596 | uint16_t pcb_lot_code; | |
1597 | uint16_t firmware_ver; | |
1598 | uint16_t hardware_ver; | |
1599 | }; | |
1600 | ||
1601 | struct ssd_bm_configuration_registers | |
1602 | { | |
1603 | struct { | |
1604 | uint16_t cc:3; | |
1605 | uint16_t rsvd:5; | |
1606 | uint16_t stack:1; | |
1607 | uint16_t rsvd1:2; | |
1608 | uint16_t temp:2; | |
1609 | uint16_t rsvd2:1; | |
1610 | uint16_t lt_en:1; | |
1611 | uint16_t rsvd3:1; | |
1612 | } operation_cfg; | |
1613 | uint16_t pad; | |
1614 | uint16_t fet_action; | |
1615 | uint16_t pad1; | |
1616 | uint16_t fault; | |
1617 | }; | |
1618 | ||
1619 | #define SBS_VALUE_MASK 0xffff | |
1620 | ||
1621 | #define bm_var_offset(var) ((size_t) &((struct ssd_bm *)0)->var) | |
1622 | #define bm_var(start, offset) ((void *) start + (offset)) | |
1623 | ||
1624 | static struct sbs_cmd ssd_bm_sbs[] = { | |
1625 | {0x08, SBS_SIZE_WORD, SBS_UNIT_TEMPERATURE, bm_var_offset(temp), SBS_VALUE_MASK, "Temperature"}, | |
1626 | {0x09, SBS_SIZE_WORD, SBS_UNIT_VOLTAGE, bm_var_offset(volt), SBS_VALUE_MASK, "Voltage"}, | |
1627 | {0x0a, SBS_SIZE_WORD, SBS_UNIT_CURRENT, bm_var_offset(curr), SBS_VALUE_MASK, "Current"}, | |
1628 | {0x0b, SBS_SIZE_WORD, SBS_UNIT_ESR, bm_var_offset(esr), SBS_VALUE_MASK, "ESR"}, | |
1629 | {0x0d, SBS_SIZE_BYTE, SBS_UNIT_PERCENT, bm_var_offset(rsoc), SBS_VALUE_MASK, "RelativeStateOfCharge"}, | |
1630 | {0x0e, SBS_SIZE_BYTE, SBS_UNIT_PERCENT, bm_var_offset(health), SBS_VALUE_MASK, "Health"}, | |
1631 | {0x10, SBS_SIZE_WORD, SBS_UNIT_CAPACITANCE, bm_var_offset(cap), SBS_VALUE_MASK, "Capacitance"}, | |
1632 | {0x14, SBS_SIZE_WORD, SBS_UNIT_CURRENT, bm_var_offset(chg_curr), SBS_VALUE_MASK, "ChargingCurrent"}, | |
1633 | {0x15, SBS_SIZE_WORD, SBS_UNIT_VOLTAGE, bm_var_offset(chg_volt), SBS_VALUE_MASK, "ChargingVoltage"}, | |
1634 | {0x3b, SBS_SIZE_WORD, SBS_UNIT_VOLTAGE, (uint8_t)bm_var_offset(cap_volt[4]), SBS_VALUE_MASK, "CapacitorVoltage5"}, | |
1635 | {0x3c, SBS_SIZE_WORD, SBS_UNIT_VOLTAGE, (uint8_t)bm_var_offset(cap_volt[3]), SBS_VALUE_MASK, "CapacitorVoltage4"}, | |
1636 | {0x3d, SBS_SIZE_WORD, SBS_UNIT_VOLTAGE, (uint8_t)bm_var_offset(cap_volt[2]), SBS_VALUE_MASK, "CapacitorVoltage3"}, | |
1637 | {0x3e, SBS_SIZE_WORD, SBS_UNIT_VOLTAGE, (uint8_t)bm_var_offset(cap_volt[1]), SBS_VALUE_MASK, "CapacitorVoltage2"}, | |
1638 | {0x3f, SBS_SIZE_WORD, SBS_UNIT_VOLTAGE, (uint8_t)bm_var_offset(cap_volt[0]), SBS_VALUE_MASK, "CapacitorVoltage1"}, | |
1639 | {0x50, SBS_SIZE_WORD, SBS_UNIT_VALUE, bm_var_offset(sf_alert), 0x870F, "SafetyAlert"}, | |
1640 | {0x51, SBS_SIZE_WORD, SBS_UNIT_VALUE, bm_var_offset(sf_status), 0xE7BF, "SafetyStatus"}, | |
1641 | {0x54, SBS_SIZE_WORD, SBS_UNIT_VALUE, bm_var_offset(op_status), 0x79F4, "OperationStatus"}, | |
1642 | {0x5a, SBS_SIZE_WORD, SBS_UNIT_VOLTAGE, bm_var_offset(sys_volt), SBS_VALUE_MASK, "SystemVoltage"}, | |
1643 | {0, 0, 0, 0, 0, NULL}, | |
1644 | }; | |
1645 | ||
1646 | /* ssd ioctl */ | |
1647 | #define SSD_CMD_GET_PROTOCOL_INFO _IOR('H', 100, struct ssd_protocol_info) | |
1648 | #define SSD_CMD_GET_HW_INFO _IOR('H', 101, struct ssd_hw_info) | |
1649 | #define SSD_CMD_GET_ROM_INFO _IOR('H', 102, struct ssd_rom_info) | |
1650 | #define SSD_CMD_GET_SMART _IOR('H', 103, struct ssd_smart) | |
1651 | #define SSD_CMD_GET_IDX _IOR('H', 105, int) | |
1652 | #define SSD_CMD_GET_AMOUNT _IOR('H', 106, int) | |
1653 | #define SSD_CMD_GET_TO_INFO _IOR('H', 107, int) | |
1654 | #define SSD_CMD_GET_DRV_VER _IOR('H', 108, char[DRIVER_VERSION_LEN]) | |
1655 | ||
1656 | #define SSD_CMD_GET_BBACC_INFO _IOR('H', 109, struct ssd_acc_info) | |
1657 | #define SSD_CMD_GET_ECACC_INFO _IOR('H', 110, struct ssd_acc_info) | |
1658 | ||
1659 | #define SSD_CMD_GET_HW_INFO_EXT _IOR('H', 111, struct ssd_hw_info_extend) | |
1660 | ||
1661 | #define SSD_CMD_REG_READ _IOWR('H', 120, struct ssd_reg_op_info) | |
1662 | #define SSD_CMD_REG_WRITE _IOWR('H', 121, struct ssd_reg_op_info) | |
1663 | ||
1664 | #define SSD_CMD_SPI_READ _IOWR('H', 125, struct ssd_spi_op_info) | |
1665 | #define SSD_CMD_SPI_WRITE _IOWR('H', 126, struct ssd_spi_op_info) | |
1666 | #define SSD_CMD_SPI_ERASE _IOWR('H', 127, struct ssd_spi_op_info) | |
1667 | ||
1668 | #define SSD_CMD_I2C_READ _IOWR('H', 128, struct ssd_i2c_op_info) | |
1669 | #define SSD_CMD_I2C_WRITE _IOWR('H', 129, struct ssd_i2c_op_info) | |
1670 | #define SSD_CMD_I2C_WRITE_READ _IOWR('H', 130, struct ssd_i2c_op_info) | |
1671 | ||
1672 | #define SSD_CMD_SMBUS_SEND_BYTE _IOWR('H', 131, struct ssd_smbus_op_info) | |
1673 | #define SSD_CMD_SMBUS_RECEIVE_BYTE _IOWR('H', 132, struct ssd_smbus_op_info) | |
1674 | #define SSD_CMD_SMBUS_WRITE_BYTE _IOWR('H', 133, struct ssd_smbus_op_info) | |
1675 | #define SSD_CMD_SMBUS_READ_BYTE _IOWR('H', 135, struct ssd_smbus_op_info) | |
1676 | #define SSD_CMD_SMBUS_WRITE_WORD _IOWR('H', 136, struct ssd_smbus_op_info) | |
1677 | #define SSD_CMD_SMBUS_READ_WORD _IOWR('H', 137, struct ssd_smbus_op_info) | |
1678 | #define SSD_CMD_SMBUS_WRITE_BLOCK _IOWR('H', 138, struct ssd_smbus_op_info) | |
1679 | #define SSD_CMD_SMBUS_READ_BLOCK _IOWR('H', 139, struct ssd_smbus_op_info) | |
1680 | ||
1681 | #define SSD_CMD_BM_GET_VER _IOR('H', 140, uint16_t) | |
1682 | #define SSD_CMD_BM_GET_NR_CAP _IOR('H', 141, int) | |
1683 | #define SSD_CMD_BM_CAP_LEARNING _IOW('H', 142, int) | |
1684 | #define SSD_CMD_CAP_LEARN _IOR('H', 143, uint32_t) | |
1685 | #define SSD_CMD_GET_CAP_STATUS _IOR('H', 144, int) | |
1686 | ||
1687 | #define SSD_CMD_RAM_READ _IOWR('H', 150, struct ssd_ram_op_info) | |
1688 | #define SSD_CMD_RAM_WRITE _IOWR('H', 151, struct ssd_ram_op_info) | |
1689 | ||
1690 | #define SSD_CMD_NAND_READ_ID _IOR('H', 160, struct ssd_flash_op_info) | |
1691 | #define SSD_CMD_NAND_READ _IOWR('H', 161, struct ssd_flash_op_info) //with oob | |
1692 | #define SSD_CMD_NAND_WRITE _IOWR('H', 162, struct ssd_flash_op_info) | |
1693 | #define SSD_CMD_NAND_ERASE _IOWR('H', 163, struct ssd_flash_op_info) | |
1694 | #define SSD_CMD_NAND_READ_EXT _IOWR('H', 164, struct ssd_flash_op_info) //ingore EIO | |
1695 | ||
1696 | #define SSD_CMD_UPDATE_BBT _IOW('H', 180, struct ssd_flash_op_info) | |
1697 | ||
1698 | #define SSD_CMD_CLEAR_ALARM _IOW('H', 190, int) | |
1699 | #define SSD_CMD_SET_ALARM _IOW('H', 191, int) | |
1700 | ||
1701 | #define SSD_CMD_RESET _IOW('H', 200, int) | |
1702 | #define SSD_CMD_RELOAD_FW _IOW('H', 201, int) | |
1703 | #define SSD_CMD_UNLOAD_DEV _IOW('H', 202, int) | |
1704 | #define SSD_CMD_LOAD_DEV _IOW('H', 203, int) | |
1705 | #define SSD_CMD_UPDATE_VP _IOWR('H', 205, uint32_t) | |
1706 | #define SSD_CMD_FULL_RESET _IOW('H', 206, int) | |
1707 | ||
1708 | #define SSD_CMD_GET_NR_LOG _IOR('H', 220, uint32_t) | |
1709 | #define SSD_CMD_GET_LOG _IOR('H', 221, void *) | |
1710 | #define SSD_CMD_LOG_LEVEL _IOW('H', 222, int) | |
1711 | ||
1712 | #define SSD_CMD_OT_PROTECT _IOW('H', 223, int) | |
1713 | #define SSD_CMD_GET_OT_STATUS _IOR('H', 224, int) | |
1714 | ||
1715 | #define SSD_CMD_CLEAR_LOG _IOW('H', 230, int) | |
1716 | #define SSD_CMD_CLEAR_SMART _IOW('H', 231, int) | |
1717 | ||
1718 | #define SSD_CMD_SW_LOG _IOW('H', 232, struct ssd_sw_log_info) | |
1719 | ||
1720 | #define SSD_CMD_GET_LABEL _IOR('H', 235, struct ssd_label) | |
1721 | #define SSD_CMD_GET_VERSION _IOR('H', 236, struct ssd_version_info) | |
1722 | #define SSD_CMD_GET_TEMPERATURE _IOR('H', 237, int) | |
1723 | #define SSD_CMD_GET_BMSTATUS _IOR('H', 238, int) | |
1724 | #define SSD_CMD_GET_LABEL2 _IOR('H', 239, void *) | |
1725 | ||
1726 | ||
1727 | #define SSD_CMD_FLUSH _IOW('H', 240, int) | |
1728 | #define SSD_CMD_SAVE_MD _IOW('H', 241, int) | |
1729 | ||
1730 | #define SSD_CMD_SET_WMODE _IOW('H', 242, int) | |
1731 | #define SSD_CMD_GET_WMODE _IOR('H', 243, int) | |
1732 | #define SSD_CMD_GET_USER_WMODE _IOR('H', 244, int) | |
1733 | ||
1734 | #define SSD_CMD_DEBUG _IOW('H', 250, struct ssd_debug_info) | |
1735 | #define SSD_CMD_DRV_PARAM_INFO _IOR('H', 251, struct ssd_drv_param_info) | |
1736 | ||
1197134c KM |
1737 | #define SSD_CMD_CLEAR_WARNING _IOW('H', 260, int) |
1738 | ||
361ebed5 HSDT |
1739 | |
1740 | /* log */ | |
1741 | #define SSD_LOG_MAX_SZ 4096 | |
1742 | #define SSD_LOG_LEVEL SSD_LOG_LEVEL_NOTICE | |
da3355df | 1743 | #define SSD_DIF_WITH_OLD_LOG 0x3f |
361ebed5 HSDT |
1744 | |
1745 | enum ssd_log_data | |
1746 | { | |
1747 | SSD_LOG_DATA_NONE = 0, | |
1748 | SSD_LOG_DATA_LOC, | |
1749 | SSD_LOG_DATA_HEX | |
1750 | }; | |
1751 | ||
1752 | typedef struct ssd_log_entry | |
1753 | { | |
1754 | union { | |
1755 | struct { | |
1756 | uint32_t page:10; | |
1757 | uint32_t block:14; | |
1758 | uint32_t flash:8; | |
1759 | } loc; | |
1760 | struct { | |
1761 | uint32_t page:12; | |
1762 | uint32_t block:12; | |
1763 | uint32_t flash:8; | |
1764 | } loc1; | |
1765 | uint32_t val; | |
1766 | } data; | |
1767 | uint16_t event:10; | |
1768 | uint16_t mod:6; | |
1769 | uint16_t idx; | |
1770 | }__attribute__((packed))ssd_log_entry_t; | |
1771 | ||
1772 | typedef struct ssd_log | |
1773 | { | |
1774 | uint64_t time:56; | |
1775 | uint64_t ctrl_idx:8; | |
1776 | ssd_log_entry_t le; | |
1777 | } __attribute__((packed)) ssd_log_t; | |
1778 | ||
1779 | typedef struct ssd_log_desc | |
1780 | { | |
1781 | uint16_t event; | |
1782 | uint8_t level; | |
1783 | uint8_t data; | |
1784 | uint8_t sblock; | |
1785 | uint8_t spage; | |
1786 | char *desc; | |
1787 | } __attribute__((packed)) ssd_log_desc_t; | |
1788 | ||
1789 | #define SSD_LOG_SW_IDX 0xF | |
1790 | #define SSD_UNKNOWN_EVENT ((uint16_t)-1) | |
1791 | static struct ssd_log_desc ssd_log_desc[] = { | |
1792 | /* event, level, show flash, show block, show page, desc */ | |
1793 | {0x0, SSD_LOG_LEVEL_WARNING, SSD_LOG_DATA_LOC, 0, 0, "Create BBT failure"}, //g3 | |
1794 | {0x1, SSD_LOG_LEVEL_WARNING, SSD_LOG_DATA_LOC, 0, 0, "Read BBT failure"}, //g3 | |
1795 | {0x2, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 0, "Mark bad block"}, | |
1796 | {0x3, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 0, 0, "Flush BBT failure"}, | |
1797 | {0x4, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Program failure"}, | |
1798 | {0x7, SSD_LOG_LEVEL_ERR, SSD_LOG_DATA_LOC, 1, 1, "No available blocks"}, | |
1799 | {0x8, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 0, "Bad EC header"}, | |
1800 | {0x9, SSD_LOG_LEVEL_WARNING, SSD_LOG_DATA_LOC, 1, 0, "Bad VID header"}, //g3 | |
1801 | {0xa, SSD_LOG_LEVEL_INFO, SSD_LOG_DATA_LOC, 1, 0, "Wear leveling"}, | |
1802 | {0xb, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "WL read back failure"}, | |
1803 | {0x11, SSD_LOG_LEVEL_ERR, SSD_LOG_DATA_LOC, 1, 1, "Data recovery failure"}, // err | |
1804 | {0x20, SSD_LOG_LEVEL_ERR, SSD_LOG_DATA_LOC, 1, 1, "Init: scan mapping table failure"}, // err g3 | |
1805 | {0x21, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Program failure"}, | |
1806 | {0x22, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Program failure"}, | |
1807 | {0x23, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Program failure"}, | |
1808 | {0x24, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 0, "Merge: read mapping page failure"}, | |
1809 | {0x25, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Merge: read back failure"}, | |
1810 | {0x26, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Program failure"}, | |
1811 | {0x27, SSD_LOG_LEVEL_WARNING, SSD_LOG_DATA_LOC, 1, 1, "Data corrupted for abnormal power down"}, //g3 | |
1812 | {0x28, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Merge: mapping page corrupted"}, | |
1813 | {0x29, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 0, "Init: no mapping page"}, | |
1814 | {0x2a, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Init: mapping pages incomplete"}, | |
1815 | {0x2b, SSD_LOG_LEVEL_ERR, SSD_LOG_DATA_LOC, 1, 1, "Read back failure after programming failure"}, // err | |
1816 | {0xf1, SSD_LOG_LEVEL_ERR, SSD_LOG_DATA_LOC, 1, 1, "Read failure without recovery"}, // err | |
1817 | {0xf2, SSD_LOG_LEVEL_ERR, SSD_LOG_DATA_LOC, 0, 0, "No available blocks"}, // maybe err g3 | |
1818 | {0xf3, SSD_LOG_LEVEL_ERR, SSD_LOG_DATA_LOC, 1, 0, "Init: RAID incomplete"}, // err g3 | |
1819 | {0xf4, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Program failure"}, | |
1820 | {0xf5, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Read failure in moving data"}, | |
1821 | {0xf6, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Program failure"}, | |
1822 | {0xf7, SSD_LOG_LEVEL_WARNING, SSD_LOG_DATA_LOC, 1, 1, "Init: RAID not complete"}, | |
1823 | {0xf8, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 0, "Init: data moving interrupted"}, | |
da3355df | 1824 | {0xfe, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Data inspection failure"}, |
361ebed5 HSDT |
1825 | {0xff, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "IO: ECC failed"}, |
1826 | ||
1827 | /* new */ | |
1828 | {0x2e, SSD_LOG_LEVEL_ERR, SSD_LOG_DATA_LOC, 0, 0, "No available reserved blocks" }, // err | |
1829 | {0x30, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 0, 0, "Init: PMT membership not found"}, | |
1830 | {0x31, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_HEX, 0, 0, "Init: PMT corrupted"}, | |
1831 | {0x32, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 0, 0, "Init: PBT membership not found"}, | |
1832 | {0x33, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 0, 0, "Init: PBT not found"}, | |
1833 | {0x34, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 0, 0, "Init: PBT corrupted"}, | |
1834 | {0x35, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Init: PMT page read failure"}, | |
1835 | {0x36, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Init: PBT page read failure"}, | |
1836 | {0x37, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Init: PBT backup page read failure"}, | |
1837 | {0x38, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Init: PBMT read failure"}, | |
1838 | {0x39, SSD_LOG_LEVEL_ERR, SSD_LOG_DATA_LOC, 1, 1, "Init: PBMT scan failure"}, // err | |
1839 | {0x3a, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Init: first page read failure"}, | |
1840 | {0x3b, SSD_LOG_LEVEL_ERR, SSD_LOG_DATA_LOC, 1, 1, "Init: first page scan failure"}, // err | |
1841 | {0x3c, SSD_LOG_LEVEL_ERR, SSD_LOG_DATA_LOC, 1, 1, "Init: scan unclosed block failure"}, // err | |
1842 | {0x3d, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Init: write pointer mismatch"}, | |
1843 | {0x3e, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Init: PMT recovery: PBMT read failure"}, | |
1844 | {0x3f, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 0, "Init: PMT recovery: PBMT scan failure"}, | |
1845 | {0x40, SSD_LOG_LEVEL_ERR, SSD_LOG_DATA_LOC, 1, 1, "Init: PMT recovery: data page read failure"}, //err | |
1846 | {0x41, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Init: PBT write pointer mismatch"}, | |
1847 | {0x42, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Init: PBT latest version corrupted"}, | |
1848 | {0x43, SSD_LOG_LEVEL_ERR, SSD_LOG_DATA_LOC, 1, 0, "Init: too many unclosed blocks"}, | |
1849 | {0x44, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_HEX, 0, 0, "Init: PDW block found"}, | |
1850 | {0x45, SSD_LOG_LEVEL_ERR, SSD_LOG_DATA_HEX, 0, 0, "Init: more than one PDW block found"}, //err | |
1851 | {0x46, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Init: first page is blank or read failure"}, | |
1852 | {0x47, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 0, 0, "Init: PDW block not found"}, | |
1853 | ||
1854 | {0x50, SSD_LOG_LEVEL_ERR, SSD_LOG_DATA_LOC, 1, 0, "Cache: hit error data"}, // err | |
1855 | {0x51, SSD_LOG_LEVEL_ERR, SSD_LOG_DATA_LOC, 1, 0, "Cache: read back failure"}, // err | |
1856 | {0x52, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_NONE, 0, 0, "Cache: unknown command"}, //? | |
1857 | {0x53, SSD_LOG_LEVEL_ERR, SSD_LOG_DATA_LOC, 1, 1, "GC/WL read back failure"}, // err | |
1858 | ||
1859 | {0x60, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 0, "Erase failure"}, | |
1860 | ||
1861 | {0x70, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "LPA not matched"}, | |
1862 | {0x71, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "PBN not matched"}, | |
1863 | {0x72, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Read retry failure"}, | |
1864 | {0x73, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Need raid recovery"}, | |
1865 | {0x74, SSD_LOG_LEVEL_INFO, SSD_LOG_DATA_LOC, 1, 1, "Need read retry"}, | |
1866 | {0x75, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Read invalid data page"}, | |
1867 | {0x76, SSD_LOG_LEVEL_INFO, SSD_LOG_DATA_LOC, 1, 1, "ECC error, data in cache, PBN matched"}, | |
1868 | {0x77, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "ECC error, data in cache, PBN not matched"}, | |
1869 | {0x78, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "ECC error, data in flash, PBN not matched"}, | |
1870 | {0x79, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "ECC ok, data in cache, LPA not matched"}, | |
1871 | {0x7a, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "ECC ok, data in flash, LPA not matched"}, | |
1872 | {0x7b, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "RAID data in cache, LPA not matched"}, | |
1873 | {0x7c, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "RAID data in flash, LPA not matched"}, | |
1874 | {0x7d, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Read data page status error"}, | |
1875 | {0x7e, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Read blank page"}, | |
1876 | {0x7f, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Access flash timeout"}, | |
1877 | ||
1878 | {0x80, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 0, "EC overflow"}, | |
1879 | {0x81, SSD_LOG_LEVEL_INFO, SSD_LOG_DATA_NONE, 0, 0, "Scrubbing completed"}, | |
1880 | {0x82, SSD_LOG_LEVEL_INFO, SSD_LOG_DATA_LOC, 1, 0, "Unstable block(too much bit flip)"}, | |
1881 | {0x83, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 0, "GC: ram error"}, //? | |
1882 | {0x84, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 0, "GC: one PBMT read failure"}, | |
1883 | ||
1884 | {0x88, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 0, "GC: mark bad block"}, | |
1885 | {0x89, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 0, "GC: invalid page count error"}, // maybe err | |
1886 | {0x8a, SSD_LOG_LEVEL_WARNING, SSD_LOG_DATA_NONE, 0, 0, "Warning: Bad Block close to limit"}, | |
1887 | {0x8b, SSD_LOG_LEVEL_ERR, SSD_LOG_DATA_NONE, 0, 0, "Error: Bad Block over limit"}, | |
1888 | {0x8c, SSD_LOG_LEVEL_WARNING, SSD_LOG_DATA_NONE, 0, 0, "Warning: P/E cycles close to limit"}, | |
1889 | {0x8d, SSD_LOG_LEVEL_ERR, SSD_LOG_DATA_NONE, 0, 0, "Error: P/E cycles over limit"}, | |
1890 | ||
1197134c KM |
1891 | {0x90, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_NONE, 0, 0, "Warning: Over temperature"}, //90 |
1892 | {0x91, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_NONE, 0, 0, "Info: Temperature is OK"}, //80 | |
361ebed5 HSDT |
1893 | {0x92, SSD_LOG_LEVEL_WARNING, SSD_LOG_DATA_NONE, 0, 0, "Battery fault"}, |
1894 | {0x93, SSD_LOG_LEVEL_WARNING, SSD_LOG_DATA_NONE, 0, 0, "SEU fault"}, //err | |
1895 | {0x94, SSD_LOG_LEVEL_ERR, SSD_LOG_DATA_NONE, 0, 0, "DDR error"}, //err | |
1896 | {0x95, SSD_LOG_LEVEL_ERR, SSD_LOG_DATA_NONE, 0, 0, "Controller serdes error"}, //err | |
1897 | {0x96, SSD_LOG_LEVEL_ERR, SSD_LOG_DATA_NONE, 0, 0, "Bridge serdes 1 error"}, //err | |
1898 | {0x97, SSD_LOG_LEVEL_ERR, SSD_LOG_DATA_NONE, 0, 0, "Bridge serdes 2 error"}, //err | |
1899 | {0x98, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_NONE, 0, 0, "SEU fault (corrected)"}, //err | |
1900 | {0x99, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_NONE, 0, 0, "Battery is OK"}, | |
1197134c | 1901 | {0x9a, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_NONE, 0, 0, "Info: Temperature close to limit"}, //85 |
361ebed5 HSDT |
1902 | |
1903 | {0x9b, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_HEX, 0, 0, "SEU fault address (low)"}, | |
1904 | {0x9c, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_HEX, 0, 0, "SEU fault address (high)"}, | |
1905 | {0x9d, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_NONE, 0, 0, "I2C fault" }, | |
1906 | {0x9e, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_NONE, 0, 0, "DDR single bit error" }, | |
1907 | {0x9f, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_NONE, 0, 0, "Board voltage fault" }, | |
1908 | ||
1909 | {0xa0, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_HEX, 0, 0, "LPA not matched"}, | |
1910 | {0xa1, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Re-read data in cache"}, | |
1911 | {0xa2, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Read blank page"}, | |
1912 | {0xa3, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "RAID recovery: Read blank page"}, | |
1913 | {0xa4, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "RAID recovery: new data in cache"}, | |
1914 | {0xa5, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "RAID recovery: PBN not matched"}, | |
1915 | {0xa6, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Read data with error flag"}, | |
1916 | {0xa7, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "RAID recovery: recoverd data with error flag"}, | |
1917 | {0xa8, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Blank page in cache, PBN matched"}, | |
1918 | {0xa9, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "RAID recovery: Blank page in cache, PBN matched"}, | |
1919 | {0xaa, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 0, 0, "Flash init failure"}, | |
1920 | {0xab, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Mapping table recovery failure"}, | |
1921 | {0xac, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "RAID recovery: ECC failed"}, | |
da3355df SF |
1922 | {0xb0, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_NONE, 0, 0, "Warning: Temperature is 95 degrees C"}, |
1923 | {0xb1, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_NONE, 0, 0, "Warning: Temperature is 100 degrees C"}, | |
361ebed5 HSDT |
1924 | |
1925 | {0x300, SSD_LOG_LEVEL_ERR, SSD_LOG_DATA_HEX, 0, 0, "CMD timeout"}, | |
1926 | {0x301, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_HEX, 0, 0, "Power on"}, | |
1927 | {0x302, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_NONE, 0, 0, "Power off"}, | |
1928 | {0x303, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_NONE, 0, 0, "Clear log"}, | |
1929 | {0x304, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_HEX, 0, 0, "Set capacity"}, | |
1930 | {0x305, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_NONE, 0, 0, "Clear data"}, | |
1931 | {0x306, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_HEX, 0, 0, "BM safety status"}, | |
1932 | {0x307, SSD_LOG_LEVEL_ERR, SSD_LOG_DATA_HEX, 0, 0, "I/O error"}, | |
1933 | {0x308, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_HEX, 0, 0, "CMD error"}, | |
1934 | {0x309, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_HEX, 0, 0, "Set wmode"}, | |
1935 | {0x30a, SSD_LOG_LEVEL_ERR, SSD_LOG_DATA_HEX, 0, 0, "DDR init failed" }, | |
1936 | {0x30b, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_HEX, 0, 0, "PCIe link status" }, | |
1937 | {0x30c, SSD_LOG_LEVEL_ERR, SSD_LOG_DATA_HEX, 0, 0, "Controller reset sync error" }, | |
1938 | {0x30d, SSD_LOG_LEVEL_ERR, SSD_LOG_DATA_HEX, 0, 0, "Clock fault" }, | |
1939 | {0x30e, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_HEX, 0, 0, "FPGA voltage fault status" }, | |
1940 | {0x30f, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_HEX, 0, 0, "Set capacity finished"}, | |
1941 | {0x310, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_NONE, 0, 0, "Clear data finished"}, | |
1942 | {0x311, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_HEX, 0, 0, "Reset"}, | |
1943 | {0x312, SSD_LOG_LEVEL_WARNING,SSD_LOG_DATA_HEX, 0, 0, "CAP: voltage fault"}, | |
1944 | {0x313, SSD_LOG_LEVEL_WARNING,SSD_LOG_DATA_NONE, 0, 0, "CAP: learn fault"}, | |
1945 | {0x314, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_HEX, 0, 0, "CAP status"}, | |
1946 | {0x315, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_HEX, 0, 0, "Board voltage fault status"}, | |
da3355df SF |
1947 | {0x316, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_NONE, 0, 0, "Info: Inlet temperature is 55 degrees C"}, //55 |
1948 | {0x317, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_NONE, 0, 0, "Info: Inlet temperature is 50 degrees C"}, //50 | |
1197134c KM |
1949 | {0x318, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_NONE, 0, 0, "Info: Flash over temperature"}, //70 |
1950 | {0x319, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_NONE, 0, 0, "Info: Flash temperature is OK"}, //65 | |
361ebed5 HSDT |
1951 | {0x31a, SSD_LOG_LEVEL_WARNING,SSD_LOG_DATA_NONE, 0, 0, "CAP: short circuit"}, |
1952 | {0x31b, SSD_LOG_LEVEL_WARNING,SSD_LOG_DATA_HEX, 0, 0, "Sensor fault"}, | |
1953 | {0x31c, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_NONE, 0, 0, "Erase all data"}, | |
1954 | {0x31d, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_NONE, 0, 0, "Erase all data finished"}, | |
da3355df SF |
1955 | {0x320, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_HEX, 0, 0, "Temperature sensor event"}, |
1956 | ||
1957 | {0x350, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_NONE, 0, 0, "Clear smart"}, | |
1958 | {0x351, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_NONE, 0, 0, "Clear warning"}, | |
361ebed5 HSDT |
1959 | |
1960 | {SSD_UNKNOWN_EVENT, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_HEX, 0, 0, "unknown event"}, | |
1961 | }; | |
1962 | /* */ | |
1963 | #define SSD_LOG_OVER_TEMP 0x90 | |
1964 | #define SSD_LOG_NORMAL_TEMP 0x91 | |
1965 | #define SSD_LOG_WARN_TEMP 0x9a | |
1966 | #define SSD_LOG_SEU_FAULT 0x93 | |
1967 | #define SSD_LOG_SEU_FAULT1 0x98 | |
1968 | #define SSD_LOG_BATTERY_FAULT 0x92 | |
1969 | #define SSD_LOG_BATTERY_OK 0x99 | |
1970 | #define SSD_LOG_BOARD_VOLT_FAULT 0x9f | |
1971 | ||
1972 | /* software log */ | |
1973 | #define SSD_LOG_TIMEOUT 0x300 | |
1974 | #define SSD_LOG_POWER_ON 0x301 | |
1975 | #define SSD_LOG_POWER_OFF 0x302 | |
1976 | #define SSD_LOG_CLEAR_LOG 0x303 | |
1977 | #define SSD_LOG_SET_CAPACITY 0x304 | |
1978 | #define SSD_LOG_CLEAR_DATA 0x305 | |
1979 | #define SSD_LOG_BM_SFSTATUS 0x306 | |
1980 | #define SSD_LOG_EIO 0x307 | |
1981 | #define SSD_LOG_ECMD 0x308 | |
1982 | #define SSD_LOG_SET_WMODE 0x309 | |
1983 | #define SSD_LOG_DDR_INIT_ERR 0x30a | |
1984 | #define SSD_LOG_PCIE_LINK_STATUS 0x30b | |
1985 | #define SSD_LOG_CTRL_RST_SYNC 0x30c | |
1986 | #define SSD_LOG_CLK_FAULT 0x30d | |
1987 | #define SSD_LOG_VOLT_FAULT 0x30e | |
1988 | #define SSD_LOG_SET_CAPACITY_END 0x30F | |
1989 | #define SSD_LOG_CLEAR_DATA_END 0x310 | |
1990 | #define SSD_LOG_RESET 0x311 | |
1991 | #define SSD_LOG_CAP_VOLT_FAULT 0x312 | |
1992 | #define SSD_LOG_CAP_LEARN_FAULT 0x313 | |
1993 | #define SSD_LOG_CAP_STATUS 0x314 | |
1994 | #define SSD_LOG_VOLT_STATUS 0x315 | |
1995 | #define SSD_LOG_INLET_OVER_TEMP 0x316 | |
1996 | #define SSD_LOG_INLET_NORMAL_TEMP 0x317 | |
1997 | #define SSD_LOG_FLASH_OVER_TEMP 0x318 | |
1998 | #define SSD_LOG_FLASH_NORMAL_TEMP 0x319 | |
1999 | #define SSD_LOG_CAP_SHORT_CIRCUIT 0x31a | |
2000 | #define SSD_LOG_SENSOR_FAULT 0x31b | |
2001 | #define SSD_LOG_ERASE_ALL 0x31c | |
2002 | #define SSD_LOG_ERASE_ALL_END 0x31d | |
da3355df SF |
2003 | #define SSD_LOG_TEMP_SENSOR_EVENT 0x320 |
2004 | #define SSD_LOG_CLEAR_SMART 0x350 | |
2005 | #define SSD_LOG_CLEAR_WARNING 0x351 | |
361ebed5 HSDT |
2006 | |
2007 | ||
2008 | /* sw log fifo depth */ | |
2009 | #define SSD_LOG_FIFO_SZ 1024 | |
2010 | ||
2011 | ||
2012 | /* done queue */ | |
2013 | static DEFINE_PER_CPU(struct list_head, ssd_doneq); | |
2014 | static DEFINE_PER_CPU(struct tasklet_struct, ssd_tasklet); | |
2015 | ||
2016 | ||
2017 | /* unloading driver */ | |
2018 | static volatile int ssd_exiting = 0; | |
2019 | ||
2020 | #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,12)) | |
2021 | static struct class_simple *ssd_class; | |
2022 | #else | |
2023 | static struct class *ssd_class; | |
2024 | #endif | |
2025 | ||
2026 | static int ssd_cmajor = SSD_CMAJOR; | |
2027 | ||
2028 | /* ssd block device major, minors */ | |
2029 | static int ssd_major = SSD_MAJOR; | |
2030 | static int ssd_major_sl = SSD_MAJOR_SL; | |
2031 | static int ssd_minors = SSD_MINORS; | |
2032 | ||
2033 | /* ssd device list */ | |
2034 | static struct list_head ssd_list; | |
2035 | static unsigned long ssd_index_bits[SSD_MAX_DEV / BITS_PER_LONG + 1]; | |
2036 | static unsigned long ssd_index_bits_sl[SSD_MAX_DEV / BITS_PER_LONG + 1]; | |
2037 | static atomic_t ssd_nr; | |
2038 | ||
2039 | /* module param */ | |
2040 | enum ssd_drv_mode | |
2041 | { | |
2042 | SSD_DRV_MODE_STANDARD = 0, /* full */ | |
2043 | SSD_DRV_MODE_DEBUG = 2, /* debug */ | |
2044 | SSD_DRV_MODE_BASE /* base only */ | |
2045 | }; | |
2046 | ||
2047 | enum ssd_int_mode | |
2048 | { | |
2049 | SSD_INT_LEGACY = 0, | |
2050 | SSD_INT_MSI, | |
2051 | SSD_INT_MSIX | |
2052 | }; | |
2053 | ||
2054 | #if (defined SSD_MSIX) | |
2055 | #define SSD_INT_MODE_DEFAULT SSD_INT_MSIX | |
2056 | #elif (defined SSD_MSI) | |
2057 | #define SSD_INT_MODE_DEFAULT SSD_INT_MSI | |
2058 | #else | |
2059 | /* auto select the defaut int mode according to the kernel version*/ | |
2060 | /* suse 11 sp1 irqbalance bug: use msi instead*/ | |
2061 | #if ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) || (defined RHEL_MAJOR && RHEL_MAJOR >= 6) || (defined RHEL_MAJOR && RHEL_MAJOR == 5 && RHEL_MINOR >= 5)) | |
2062 | #define SSD_INT_MODE_DEFAULT SSD_INT_MSIX | |
2063 | #else | |
2064 | #define SSD_INT_MODE_DEFAULT SSD_INT_MSI | |
2065 | #endif | |
2066 | #endif | |
2067 | ||
2068 | static int mode = SSD_DRV_MODE_STANDARD; | |
2069 | static int status_mask = 0xFF; | |
2070 | static int int_mode = SSD_INT_MODE_DEFAULT; | |
2071 | static int threaded_irq = 0; | |
2072 | static int log_level = SSD_LOG_LEVEL_WARNING; | |
2073 | static int ot_protect = 1; | |
2074 | static int wmode = SSD_WMODE_DEFAULT; | |
2075 | static int finject = 0; | |
2076 | ||
2077 | module_param(mode, int, 0); | |
2078 | module_param(status_mask, int, 0); | |
2079 | module_param(int_mode, int, 0); | |
2080 | module_param(threaded_irq, int, 0); | |
2081 | module_param(log_level, int, 0); | |
2082 | module_param(ot_protect, int, 0); | |
2083 | module_param(wmode, int, 0); | |
2084 | module_param(finject, int, 0); | |
2085 | ||
2086 | ||
2087 | MODULE_PARM_DESC(mode, "driver mode, 0 - standard, 1 - debug, 2 - debug without IO, 3 - basic debug mode"); | |
2088 | MODULE_PARM_DESC(status_mask, "command status mask, 0 - without command error, 0xff - with command error"); | |
2089 | MODULE_PARM_DESC(int_mode, "preferred interrupt mode, 0 - legacy, 1 - msi, 2 - msix"); | |
2090 | MODULE_PARM_DESC(threaded_irq, "threaded irq, 0 - normal irq, 1 - threaded irq"); | |
2091 | MODULE_PARM_DESC(log_level, "log level to display, 0 - info and above, 1 - notice and above, 2 - warning and above, 3 - error only"); | |
2092 | MODULE_PARM_DESC(ot_protect, "over temperature protect, 0 - disable, 1 - enable"); | |
2093 | MODULE_PARM_DESC(wmode, "write mode, 0 - write buffer (with risk for the 6xx firmware), 1 - write buffer ex, 2 - write through, 3 - auto, 4 - default"); | |
2094 | MODULE_PARM_DESC(finject, "enable fault simulation, 0 - off, 1 - on, for debug purpose only"); | |
2095 | ||
1197134c KM |
2096 | // API adaption layer |
2097 | static inline void ssd_bio_endio(struct bio *bio, int error) | |
2098 | { | |
2099 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,0)) | |
91557e4a | 2100 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(4,13,0)) |
1197134c | 2101 | bio->bi_error = error; |
91557e4a SF |
2102 | #else |
2103 | bio->bi_status = errno_to_blk_status(error); | |
2104 | #endif | |
1197134c KM |
2105 | bio_endio(bio); |
2106 | #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)) | |
2107 | bio_endio(bio, error); | |
2108 | #else | |
2109 | bio_endio(bio, bio->bi_size, error); | |
2110 | #endif | |
2111 | } | |
2112 | ||
2113 | static inline int ssd_bio_has_discard(struct bio *bio) | |
2114 | { | |
2115 | #ifndef SSD_TRIM | |
2116 | return 0; | |
2117 | #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(4,8,0)) | |
0f07eebb | 2118 | return bio_op(bio) == REQ_OP_DISCARD; |
1197134c KM |
2119 | #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36)) |
2120 | return bio->bi_rw & REQ_DISCARD; | |
2121 | #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32)) | |
2122 | return bio_rw_flagged(bio, BIO_RW_DISCARD); | |
2123 | #else | |
2124 | return 0; | |
2125 | #endif | |
2126 | } | |
2127 | ||
2128 | static inline int ssd_bio_has_flush(struct bio *bio) | |
2129 | { | |
2130 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,8,0)) | |
0f07eebb | 2131 | return bio_op(bio) == REQ_OP_FLUSH; |
1197134c KM |
2132 | #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37)) |
2133 | return bio->bi_rw & REQ_FLUSH; | |
2134 | #else | |
2135 | return 0; | |
2136 | #endif | |
2137 | } | |
2138 | ||
da3355df | 2139 | static inline int ssd_bio_has_barrier_or_fua(struct bio * bio) |
1197134c KM |
2140 | { |
2141 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,8,0)) | |
2142 | return bio->bi_opf & REQ_FUA; | |
da3355df | 2143 | #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37)) |
1197134c | 2144 | return bio->bi_rw & REQ_FUA; |
da3355df SF |
2145 | #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36)) |
2146 | return bio->bi_rw & REQ_HARDBARRIER; | |
2147 | #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32)) | |
2148 | return bio_rw_flagged(bio, BIO_RW_BARRIER); | |
2149 | #else | |
2150 | return bio_barrier(bio); | |
1197134c KM |
2151 | #endif |
2152 | } | |
361ebed5 HSDT |
2153 | |
2154 | #ifndef MODULE | |
2155 | static int __init ssd_drv_mode(char *str) | |
2156 | { | |
2157 | mode = (int)simple_strtoul(str, NULL, 0); | |
2158 | ||
2159 | return 1; | |
2160 | } | |
2161 | ||
2162 | static int __init ssd_status_mask(char *str) | |
2163 | { | |
2164 | status_mask = (int)simple_strtoul(str, NULL, 16); | |
2165 | ||
2166 | return 1; | |
2167 | } | |
2168 | ||
2169 | static int __init ssd_int_mode(char *str) | |
2170 | { | |
2171 | int_mode = (int)simple_strtoul(str, NULL, 0); | |
2172 | ||
2173 | return 1; | |
2174 | } | |
2175 | ||
2176 | static int __init ssd_threaded_irq(char *str) | |
2177 | { | |
2178 | threaded_irq = (int)simple_strtoul(str, NULL, 0); | |
2179 | ||
2180 | return 1; | |
2181 | } | |
2182 | ||
2183 | static int __init ssd_log_level(char *str) | |
2184 | { | |
2185 | log_level = (int)simple_strtoul(str, NULL, 0); | |
2186 | ||
2187 | return 1; | |
2188 | } | |
2189 | ||
2190 | static int __init ssd_ot_protect(char *str) | |
2191 | { | |
2192 | ot_protect = (int)simple_strtoul(str, NULL, 0); | |
2193 | ||
2194 | return 1; | |
2195 | } | |
2196 | ||
2197 | static int __init ssd_wmode(char *str) | |
2198 | { | |
2199 | wmode = (int)simple_strtoul(str, NULL, 0); | |
2200 | ||
2201 | return 1; | |
2202 | } | |
2203 | ||
2204 | static int __init ssd_finject(char *str) | |
2205 | { | |
2206 | finject = (int)simple_strtoul(str, NULL, 0); | |
2207 | ||
2208 | return 1; | |
2209 | } | |
2210 | ||
2211 | __setup(MODULE_NAME"_mode=", ssd_drv_mode); | |
2212 | __setup(MODULE_NAME"_status_mask=", ssd_status_mask); | |
2213 | __setup(MODULE_NAME"_int_mode=", ssd_int_mode); | |
2214 | __setup(MODULE_NAME"_threaded_irq=", ssd_threaded_irq); | |
2215 | __setup(MODULE_NAME"_log_level=", ssd_log_level); | |
2216 | __setup(MODULE_NAME"_ot_protect=", ssd_ot_protect); | |
2217 | __setup(MODULE_NAME"_wmode=", ssd_wmode); | |
2218 | __setup(MODULE_NAME"_finject=", ssd_finject); | |
2219 | #endif | |
2220 | ||
2221 | ||
2222 | #ifdef CONFIG_PROC_FS | |
2223 | #include <linux/proc_fs.h> | |
2224 | #include <asm/uaccess.h> | |
2225 | ||
2226 | #define SSD_PROC_DIR MODULE_NAME | |
2227 | #define SSD_PROC_INFO "info" | |
2228 | ||
2229 | static struct proc_dir_entry *ssd_proc_dir = NULL; | |
2230 | static struct proc_dir_entry *ssd_proc_info = NULL; | |
2231 | ||
2232 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,2,0)) | |
2233 | static int ssd_proc_read(char *page, char **start, | |
2234 | off_t off, int count, int *eof, void *data) | |
2235 | { | |
2236 | struct ssd_device *dev = NULL; | |
2237 | struct ssd_device *n = NULL; | |
2238 | uint64_t size; | |
2239 | int idx; | |
2240 | int len = 0; | |
2241 | //char type; //xx | |
2242 | ||
1197134c | 2243 | if (ssd_exiting || off != 0) { |
361ebed5 HSDT |
2244 | return 0; |
2245 | } | |
2246 | ||
2247 | len += snprintf((page + len), (count - len), "Driver Version:\t%s\n", DRIVER_VERSION); | |
2248 | ||
2249 | list_for_each_entry_safe(dev, n, &ssd_list, list) { | |
2250 | idx = dev->idx + 1; | |
2251 | size = dev->hw_info.size ; | |
2252 | do_div(size, 1000000000); | |
2253 | ||
2254 | len += snprintf((page + len), (count - len), "\n"); | |
2255 | ||
2256 | len += snprintf((page + len), (count - len), "HIO %d Size:\t%uGB\n", idx, (uint32_t)size); | |
2257 | ||
2258 | len += snprintf((page + len), (count - len), "HIO %d Bridge FW VER:\t%03X\n", idx, dev->hw_info.bridge_ver); | |
2259 | if (dev->hw_info.ctrl_ver != 0) { | |
2260 | len += snprintf((page + len), (count - len), "HIO %d Controller FW VER:\t%03X\n", idx, dev->hw_info.ctrl_ver); | |
2261 | } | |
2262 | ||
2263 | len += snprintf((page + len), (count - len), "HIO %d PCB VER:\t.%c\n", idx, dev->hw_info.pcb_ver); | |
2264 | ||
2265 | if (dev->hw_info.upper_pcb_ver >= 'A') { | |
2266 | len += snprintf((page + len), (count - len), "HIO %d Upper PCB VER:\t.%c\n", idx, dev->hw_info.upper_pcb_ver); | |
2267 | } | |
2268 | ||
2269 | len += snprintf((page + len), (count - len), "HIO %d Device:\t%s\n", idx, dev->name); | |
2270 | } | |
2271 | ||
1197134c | 2272 | *eof = 1; |
361ebed5 HSDT |
2273 | return len; |
2274 | } | |
2275 | ||
2276 | #else | |
2277 | ||
2278 | static int ssd_proc_show(struct seq_file *m, void *v) | |
2279 | { | |
2280 | struct ssd_device *dev = NULL; | |
2281 | struct ssd_device *n = NULL; | |
2282 | uint64_t size; | |
2283 | int idx; | |
2284 | ||
2285 | if (ssd_exiting) { | |
2286 | return 0; | |
2287 | } | |
2288 | ||
2289 | seq_printf(m, "Driver Version:\t%s\n", DRIVER_VERSION); | |
2290 | ||
2291 | list_for_each_entry_safe(dev, n, &ssd_list, list) { | |
2292 | idx = dev->idx + 1; | |
2293 | size = dev->hw_info.size ; | |
2294 | do_div(size, 1000000000); | |
2295 | ||
2296 | seq_printf(m, "\n"); | |
2297 | ||
2298 | seq_printf(m, "HIO %d Size:\t%uGB\n", idx, (uint32_t)size); | |
2299 | ||
2300 | seq_printf(m, "HIO %d Bridge FW VER:\t%03X\n", idx, dev->hw_info.bridge_ver); | |
2301 | if (dev->hw_info.ctrl_ver != 0) { | |
2302 | seq_printf(m, "HIO %d Controller FW VER:\t%03X\n", idx, dev->hw_info.ctrl_ver); | |
2303 | } | |
2304 | ||
2305 | seq_printf(m, "HIO %d PCB VER:\t.%c\n", idx, dev->hw_info.pcb_ver); | |
2306 | ||
2307 | if (dev->hw_info.upper_pcb_ver >= 'A') { | |
2308 | seq_printf(m, "HIO %d Upper PCB VER:\t.%c\n", idx, dev->hw_info.upper_pcb_ver); | |
2309 | } | |
2310 | ||
2311 | seq_printf(m, "HIO %d Device:\t%s\n", idx, dev->name); | |
2312 | } | |
2313 | ||
2314 | return 0; | |
2315 | } | |
2316 | ||
2317 | static int ssd_proc_open(struct inode *inode, struct file *file) | |
2318 | { | |
2319 | #if (LINUX_VERSION_CODE <= KERNEL_VERSION(3,9,0)) | |
2320 | return single_open(file, ssd_proc_show, PDE(inode)->data); | |
2321 | #else | |
2322 | return single_open(file, ssd_proc_show, PDE_DATA(inode)); | |
2323 | #endif | |
2324 | } | |
2325 | ||
2326 | static const struct file_operations ssd_proc_fops = { | |
2327 | .open = ssd_proc_open, | |
2328 | .read = seq_read, | |
2329 | .llseek = seq_lseek, | |
2330 | .release = single_release, | |
2331 | }; | |
2332 | #endif | |
2333 | ||
2334 | ||
2335 | static void ssd_cleanup_proc(void) | |
2336 | { | |
2337 | if (ssd_proc_info) { | |
2338 | remove_proc_entry(SSD_PROC_INFO, ssd_proc_dir); | |
2339 | ssd_proc_info = NULL; | |
2340 | } | |
2341 | if (ssd_proc_dir) { | |
2342 | remove_proc_entry(SSD_PROC_DIR, NULL); | |
2343 | ssd_proc_dir = NULL; | |
2344 | } | |
2345 | } | |
2346 | static int ssd_init_proc(void) | |
2347 | { | |
2348 | ssd_proc_dir = proc_mkdir(SSD_PROC_DIR, NULL); | |
2349 | if (!ssd_proc_dir) | |
2350 | goto out_proc_mkdir; | |
2351 | ||
2352 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,2,0)) | |
2353 | ssd_proc_info = create_proc_entry(SSD_PROC_INFO, S_IFREG | S_IRUGO | S_IWUSR, ssd_proc_dir); | |
2354 | if (!ssd_proc_info) | |
2355 | goto out_create_proc_entry; | |
2356 | ||
2357 | ssd_proc_info->read_proc = ssd_proc_read; | |
2358 | ||
2359 | /* kernel bug */ | |
2360 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30)) | |
2361 | ssd_proc_info->owner = THIS_MODULE; | |
2362 | #endif | |
2363 | #else | |
2364 | ssd_proc_info = proc_create(SSD_PROC_INFO, 0600, ssd_proc_dir, &ssd_proc_fops); | |
2365 | if (!ssd_proc_info) | |
2366 | goto out_create_proc_entry; | |
2367 | #endif | |
2368 | ||
2369 | return 0; | |
2370 | ||
2371 | out_create_proc_entry: | |
2372 | remove_proc_entry(SSD_PROC_DIR, NULL); | |
2373 | out_proc_mkdir: | |
2374 | return -ENOMEM; | |
2375 | } | |
2376 | ||
2377 | #else | |
2378 | static void ssd_cleanup_proc(void) | |
2379 | { | |
2380 | return; | |
2381 | } | |
2382 | static int ssd_init_proc(void) | |
2383 | { | |
2384 | return 0; | |
2385 | } | |
2386 | #endif /* CONFIG_PROC_FS */ | |
2387 | ||
2388 | /* sysfs */ | |
2389 | static void ssd_unregister_sysfs(struct ssd_device *dev) | |
2390 | { | |
2391 | return; | |
2392 | } | |
2393 | ||
2394 | static int ssd_register_sysfs(struct ssd_device *dev) | |
2395 | { | |
2396 | return 0; | |
2397 | } | |
2398 | ||
2399 | static void ssd_cleanup_sysfs(void) | |
2400 | { | |
2401 | return; | |
2402 | } | |
2403 | ||
2404 | static int ssd_init_sysfs(void) | |
2405 | { | |
2406 | return 0; | |
2407 | } | |
2408 | ||
2409 | static inline void ssd_put_index(int slave, int index) | |
2410 | { | |
2411 | unsigned long *index_bits = ssd_index_bits; | |
2412 | ||
2413 | if (slave) { | |
2414 | index_bits = ssd_index_bits_sl; | |
2415 | } | |
2416 | ||
2417 | if (test_and_clear_bit(index, index_bits)) { | |
2418 | atomic_dec(&ssd_nr); | |
2419 | } | |
2420 | } | |
2421 | ||
2422 | static inline int ssd_get_index(int slave) | |
2423 | { | |
2424 | unsigned long *index_bits = ssd_index_bits; | |
2425 | int index; | |
2426 | ||
2427 | if (slave) { | |
2428 | index_bits = ssd_index_bits_sl; | |
2429 | } | |
2430 | ||
2431 | find_index: | |
2432 | if ((index = find_first_zero_bit(index_bits, SSD_MAX_DEV)) >= SSD_MAX_DEV) { | |
2433 | return -1; | |
2434 | } | |
2435 | ||
2436 | if (test_and_set_bit(index, index_bits)) { | |
2437 | goto find_index; | |
2438 | } | |
2439 | ||
2440 | atomic_inc(&ssd_nr); | |
2441 | ||
2442 | return index; | |
2443 | } | |
2444 | ||
2445 | static void ssd_cleanup_index(void) | |
2446 | { | |
2447 | return; | |
2448 | } | |
2449 | ||
2450 | static int ssd_init_index(void) | |
2451 | { | |
2452 | INIT_LIST_HEAD(&ssd_list); | |
2453 | atomic_set(&ssd_nr, 0); | |
3871d789 SF |
2454 | memset(ssd_index_bits, 0, sizeof(ssd_index_bits)); |
2455 | memset(ssd_index_bits_sl, 0, sizeof(ssd_index_bits_sl)); | |
361ebed5 HSDT |
2456 | |
2457 | return 0; | |
2458 | } | |
2459 | ||
2460 | static void ssd_set_dev_name(char *name, size_t size, int idx) | |
2461 | { | |
2462 | if(idx < SSD_ALPHABET_NUM) { | |
2463 | snprintf(name, size, "%c", 'a'+idx); | |
2464 | } else { | |
2465 | idx -= SSD_ALPHABET_NUM; | |
2466 | snprintf(name, size, "%c%c", 'a'+(idx/SSD_ALPHABET_NUM), 'a'+(idx%SSD_ALPHABET_NUM)); | |
2467 | } | |
2468 | } | |
2469 | ||
2470 | /* pci register r&w */ | |
2471 | static inline void ssd_reg_write(void *addr, uint64_t val) | |
2472 | { | |
2473 | iowrite32((uint32_t)val, addr); | |
2474 | iowrite32((uint32_t)(val >> 32), addr + 4); | |
2475 | wmb(); | |
2476 | } | |
2477 | ||
2478 | static inline uint64_t ssd_reg_read(void *addr) | |
2479 | { | |
2480 | uint64_t val; | |
2481 | uint32_t val_lo, val_hi; | |
2482 | ||
2483 | val_lo = ioread32(addr); | |
2484 | val_hi = ioread32(addr + 4); | |
2485 | ||
2486 | rmb(); | |
2487 | val = val_lo | ((uint64_t)val_hi << 32); | |
2488 | ||
2489 | return val; | |
2490 | } | |
2491 | ||
2492 | ||
2493 | #define ssd_reg32_write(addr, val) writel(val, addr) | |
2494 | #define ssd_reg32_read(addr) readl(addr) | |
2495 | ||
2496 | /* alarm led */ | |
2497 | static void ssd_clear_alarm(struct ssd_device *dev) | |
2498 | { | |
2499 | uint32_t val; | |
2500 | ||
2501 | if (dev->protocol_info.ver <= SSD_PROTOCOL_V3) { | |
2502 | return; | |
2503 | } | |
2504 | ||
2505 | val = ssd_reg32_read(dev->ctrlp + SSD_LED_REG); | |
2506 | ||
2507 | /* firmware control */ | |
2508 | val &= ~0x2; | |
2509 | ||
2510 | ssd_reg32_write(dev->ctrlp + SSD_LED_REG, val); | |
2511 | } | |
2512 | ||
2513 | static void ssd_set_alarm(struct ssd_device *dev) | |
2514 | { | |
2515 | uint32_t val; | |
2516 | ||
2517 | if (dev->protocol_info.ver <= SSD_PROTOCOL_V3) { | |
2518 | return; | |
2519 | } | |
2520 | ||
2521 | val = ssd_reg32_read(dev->ctrlp + SSD_LED_REG); | |
2522 | ||
2523 | /* light up */ | |
2524 | val &= ~0x1; | |
2525 | /* software control */ | |
2526 | val |= 0x2; | |
2527 | ||
2528 | ssd_reg32_write(dev->ctrlp + SSD_LED_REG, val); | |
2529 | } | |
2530 | ||
2531 | #define u32_swap(x) \ | |
2532 | ((uint32_t)( \ | |
2533 | (((uint32_t)(x) & (uint32_t)0x000000ffUL) << 24) | \ | |
2534 | (((uint32_t)(x) & (uint32_t)0x0000ff00UL) << 8) | \ | |
2535 | (((uint32_t)(x) & (uint32_t)0x00ff0000UL) >> 8) | \ | |
2536 | (((uint32_t)(x) & (uint32_t)0xff000000UL) >> 24))) | |
2537 | ||
2538 | #define u16_swap(x) \ | |
2539 | ((uint16_t)( \ | |
2540 | (((uint16_t)(x) & (uint16_t)0x00ff) << 8) | \ | |
2541 | (((uint16_t)(x) & (uint16_t)0xff00) >> 8) )) | |
2542 | ||
2543 | ||
2544 | #if 0 | |
2545 | /* No lock, for init only*/ | |
2546 | static int ssd_spi_read_id(struct ssd_device *dev, uint32_t *id) | |
2547 | { | |
2548 | uint32_t val; | |
2549 | unsigned long st; | |
2550 | int ret = 0; | |
2551 | ||
2552 | if (!dev || !id) { | |
2553 | return -EINVAL; | |
2554 | } | |
2555 | ||
2556 | ssd_reg32_write(dev->ctrlp + SSD_SPI_REG_CMD, SSD_SPI_CMD_READ_ID); | |
2557 | ||
2558 | val = ssd_reg32_read(dev->ctrlp + SSD_SPI_REG_READY); | |
2559 | val = ssd_reg32_read(dev->ctrlp + SSD_SPI_REG_READY); | |
2560 | val = ssd_reg32_read(dev->ctrlp + SSD_SPI_REG_READY); | |
2561 | val = ssd_reg32_read(dev->ctrlp + SSD_SPI_REG_READY); | |
2562 | ||
2563 | st = jiffies; | |
2564 | for (;;) { | |
2565 | val = ssd_reg32_read(dev->ctrlp + SSD_SPI_REG_READY); | |
2566 | if (val == 0x1000000) { | |
2567 | break; | |
2568 | } | |
2569 | ||
2570 | if (time_after(jiffies, (st + SSD_SPI_TIMEOUT))) { | |
2571 | ret = -ETIMEDOUT; | |
2572 | goto out; | |
2573 | } | |
2574 | cond_resched(); | |
2575 | } | |
2576 | ||
2577 | val = ssd_reg32_read(dev->ctrlp + SSD_SPI_REG_ID); | |
2578 | *id = val; | |
2579 | ||
2580 | out: | |
2581 | return ret; | |
2582 | } | |
2583 | #endif | |
2584 | ||
2585 | /* spi access */ | |
2586 | static int ssd_init_spi(struct ssd_device *dev) | |
2587 | { | |
2588 | uint32_t val; | |
2589 | unsigned long st; | |
2590 | int ret = 0; | |
2591 | ||
2592 | mutex_lock(&dev->spi_mutex); | |
2593 | st = jiffies; | |
2594 | for(;;) { | |
2595 | ssd_reg32_write(dev->ctrlp + SSD_SPI_REG_CMD, SSD_SPI_CMD_READ_STATUS); | |
2596 | ||
2597 | do { | |
2598 | val = ssd_reg32_read(dev->ctrlp + SSD_SPI_REG_READY); | |
2599 | ||
2600 | if (time_after(jiffies, (st + SSD_SPI_TIMEOUT))) { | |
2601 | ret = -ETIMEDOUT; | |
2602 | goto out; | |
2603 | } | |
2604 | cond_resched(); | |
2605 | } while (val != 0x1000000); | |
2606 | ||
2607 | val = ssd_reg32_read(dev->ctrlp + SSD_SPI_REG_STATUS); | |
2608 | if (!(val & 0x1)) { | |
2609 | break; | |
2610 | } | |
2611 | ||
2612 | if (time_after(jiffies, (st + SSD_SPI_TIMEOUT))) { | |
2613 | ret = -ETIMEDOUT; | |
2614 | goto out; | |
2615 | } | |
2616 | cond_resched(); | |
2617 | } | |
2618 | ||
2619 | out: | |
2620 | if (dev->protocol_info.ver >= SSD_PROTOCOL_V3_2) { | |
2621 | if (val & 0x1) { | |
2622 | ssd_reg32_write(dev->ctrlp + SSD_SPI_REG_CMD, SSD_SPI_CMD_CLSR); | |
2623 | } | |
2624 | } | |
2625 | ssd_reg32_write(dev->ctrlp + SSD_SPI_REG_CMD, SSD_SPI_CMD_W_DISABLE); | |
2626 | mutex_unlock(&dev->spi_mutex); | |
2627 | ||
2628 | ret = 0; | |
2629 | ||
2630 | return ret; | |
2631 | } | |
2632 | ||
2633 | static int ssd_spi_page_read(struct ssd_device *dev, void *buf, uint32_t off, uint32_t size) | |
2634 | { | |
2635 | uint32_t val; | |
2636 | uint32_t rlen = 0; | |
2637 | unsigned long st; | |
2638 | int ret = 0; | |
2639 | ||
2640 | if (!dev || !buf) { | |
2641 | return -EINVAL; | |
2642 | } | |
2643 | ||
2644 | if ((off % sizeof(uint32_t)) != 0 || (size % sizeof(uint32_t)) != 0 || size == 0 || | |
2645 | ((uint64_t)off + (uint64_t)size) > dev->rom_info.size || size > dev->rom_info.page_size) { | |
2646 | return -EINVAL; | |
2647 | } | |
2648 | ||
2649 | mutex_lock(&dev->spi_mutex); | |
2650 | while (rlen < size) { | |
2651 | ssd_reg32_write(dev->ctrlp + SSD_SPI_REG_CMD_HI, ((off + rlen) >> 24)); | |
2652 | wmb(); | |
2653 | ssd_reg32_write(dev->ctrlp + SSD_SPI_REG_CMD, (((off + rlen) << 8) | SSD_SPI_CMD_READ)); | |
2654 | ||
2655 | (void)ssd_reg32_read(dev->ctrlp + SSD_SPI_REG_READY); | |
2656 | (void)ssd_reg32_read(dev->ctrlp + SSD_SPI_REG_READY); | |
2657 | (void)ssd_reg32_read(dev->ctrlp + SSD_SPI_REG_READY); | |
2658 | (void)ssd_reg32_read(dev->ctrlp + SSD_SPI_REG_READY); | |
2659 | ||
2660 | st = jiffies; | |
2661 | for (;;) { | |
2662 | val = ssd_reg32_read(dev->ctrlp + SSD_SPI_REG_READY); | |
2663 | if (val == 0x1000000) { | |
2664 | break; | |
2665 | } | |
2666 | ||
2667 | if (time_after(jiffies, (st + SSD_SPI_TIMEOUT))) { | |
2668 | ret = -ETIMEDOUT; | |
2669 | goto out; | |
2670 | } | |
2671 | cond_resched(); | |
2672 | } | |
2673 | ||
2674 | val = ssd_reg32_read(dev->ctrlp + SSD_SPI_REG_RDATA); | |
2675 | *(uint32_t *)(buf + rlen)= u32_swap(val); | |
2676 | ||
2677 | rlen += sizeof(uint32_t); | |
2678 | } | |
2679 | ||
2680 | out: | |
2681 | mutex_unlock(&dev->spi_mutex); | |
2682 | return ret; | |
2683 | } | |
2684 | ||
2685 | static int ssd_spi_page_write(struct ssd_device *dev, void *buf, uint32_t off, uint32_t size) | |
2686 | { | |
2687 | uint32_t val; | |
2688 | uint32_t wlen; | |
2689 | unsigned long st; | |
2690 | int i; | |
2691 | int ret = 0; | |
2692 | ||
2693 | if (!dev || !buf) { | |
2694 | return -EINVAL; | |
2695 | } | |
2696 | ||
2697 | if ((off % sizeof(uint32_t)) != 0 || (size % sizeof(uint32_t)) != 0 || size == 0 || | |
2698 | ((uint64_t)off + (uint64_t)size) > dev->rom_info.size || size > dev->rom_info.page_size || | |
2699 | (off / dev->rom_info.page_size) != ((off + size - 1) / dev->rom_info.page_size)) { | |
2700 | return -EINVAL; | |
2701 | } | |
2702 | ||
2703 | mutex_lock(&dev->spi_mutex); | |
2704 | ||
2705 | ssd_reg32_write(dev->ctrlp + SSD_SPI_REG_CMD, SSD_SPI_CMD_W_ENABLE); | |
2706 | ||
2707 | wlen = size / sizeof(uint32_t); | |
2708 | for (i=0; i<(int)wlen; i++) { | |
2709 | ssd_reg32_write(dev->ctrlp + SSD_SPI_REG_WDATA, u32_swap(*((uint32_t *)buf + i))); | |
2710 | } | |
2711 | ||
2712 | wmb(); | |
2713 | ssd_reg32_write(dev->ctrlp + SSD_SPI_REG_CMD_HI, (off >> 24)); | |
2714 | wmb(); | |
2715 | ssd_reg32_write(dev->ctrlp + SSD_SPI_REG_CMD, ((off << 8) | SSD_SPI_CMD_PROGRAM)); | |
2716 | ||
2717 | udelay(1); | |
2718 | ||
2719 | st = jiffies; | |
2720 | for (;;) { | |
2721 | ssd_reg32_write(dev->ctrlp + SSD_SPI_REG_CMD, SSD_SPI_CMD_READ_STATUS); | |
2722 | do { | |
2723 | val = ssd_reg32_read(dev->ctrlp + SSD_SPI_REG_READY); | |
2724 | ||
2725 | if (time_after(jiffies, (st + SSD_SPI_TIMEOUT))) { | |
2726 | ret = -ETIMEDOUT; | |
2727 | goto out; | |
2728 | } | |
2729 | cond_resched(); | |
2730 | } while (val != 0x1000000); | |
2731 | ||
2732 | val = ssd_reg32_read(dev->ctrlp + SSD_SPI_REG_STATUS); | |
2733 | if (!(val & 0x1)) { | |
2734 | break; | |
2735 | } | |
2736 | ||
2737 | if (time_after(jiffies, (st + SSD_SPI_TIMEOUT))) { | |
2738 | ret = -ETIMEDOUT; | |
2739 | goto out; | |
2740 | } | |
2741 | cond_resched(); | |
2742 | } | |
2743 | ||
2744 | if (dev->protocol_info.ver >= SSD_PROTOCOL_V3_2) { | |
2745 | if ((val >> 6) & 0x1) { | |
2746 | ret = -EIO; | |
2747 | goto out; | |
2748 | } | |
2749 | } | |
2750 | ||
2751 | out: | |
2752 | if (dev->protocol_info.ver >= SSD_PROTOCOL_V3_2) { | |
2753 | if (val & 0x1) { | |
2754 | ssd_reg32_write(dev->ctrlp + SSD_SPI_REG_CMD, SSD_SPI_CMD_CLSR); | |
2755 | } | |
2756 | } | |
2757 | ssd_reg32_write(dev->ctrlp + SSD_SPI_REG_CMD, SSD_SPI_CMD_W_DISABLE); | |
2758 | ||
2759 | mutex_unlock(&dev->spi_mutex); | |
2760 | ||
2761 | return ret; | |
2762 | } | |
2763 | ||
2764 | static int ssd_spi_block_erase(struct ssd_device *dev, uint32_t off) | |
2765 | { | |
2766 | uint32_t val; | |
2767 | unsigned long st; | |
2768 | int ret = 0; | |
2769 | ||
2770 | if (!dev) { | |
2771 | return -EINVAL; | |
2772 | } | |
2773 | ||
2774 | if ((off % dev->rom_info.block_size) != 0 || off >= dev->rom_info.size) { | |
2775 | return -EINVAL; | |
2776 | } | |
2777 | ||
2778 | mutex_lock(&dev->spi_mutex); | |
2779 | ||
2780 | ssd_reg32_write(dev->ctrlp + SSD_SPI_REG_CMD, SSD_SPI_CMD_W_ENABLE); | |
2781 | ssd_reg32_write(dev->ctrlp + SSD_SPI_REG_CMD, SSD_SPI_CMD_W_ENABLE); | |
2782 | ||
2783 | wmb(); | |
2784 | ssd_reg32_write(dev->ctrlp + SSD_SPI_REG_CMD_HI, (off >> 24)); | |
2785 | wmb(); | |
2786 | ssd_reg32_write(dev->ctrlp + SSD_SPI_REG_CMD, ((off << 8) | SSD_SPI_CMD_ERASE)); | |
2787 | ||
2788 | st = jiffies; | |
2789 | for (;;) { | |
2790 | ssd_reg32_write(dev->ctrlp + SSD_SPI_REG_CMD, SSD_SPI_CMD_READ_STATUS); | |
2791 | ||
2792 | do { | |
2793 | val = ssd_reg32_read(dev->ctrlp + SSD_SPI_REG_READY); | |
2794 | ||
2795 | if (time_after(jiffies, (st + SSD_SPI_TIMEOUT))) { | |
2796 | ret = -ETIMEDOUT; | |
2797 | goto out; | |
2798 | } | |
2799 | cond_resched(); | |
2800 | } while (val != 0x1000000); | |
2801 | ||
2802 | val = ssd_reg32_read(dev->ctrlp + SSD_SPI_REG_STATUS); | |
2803 | if (!(val & 0x1)) { | |
2804 | break; | |
2805 | } | |
2806 | ||
2807 | if (time_after(jiffies, (st + SSD_SPI_TIMEOUT))) { | |
2808 | ret = -ETIMEDOUT; | |
2809 | goto out; | |
2810 | } | |
2811 | cond_resched(); | |
2812 | } | |
2813 | ||
2814 | if (dev->protocol_info.ver >= SSD_PROTOCOL_V3_2) { | |
2815 | if ((val >> 5) & 0x1) { | |
2816 | ret = -EIO; | |
2817 | goto out; | |
2818 | } | |
2819 | } | |
2820 | ||
2821 | out: | |
2822 | if (dev->protocol_info.ver >= SSD_PROTOCOL_V3_2) { | |
2823 | if (val & 0x1) { | |
2824 | ssd_reg32_write(dev->ctrlp + SSD_SPI_REG_CMD, SSD_SPI_CMD_CLSR); | |
2825 | } | |
2826 | } | |
2827 | ssd_reg32_write(dev->ctrlp + SSD_SPI_REG_CMD, SSD_SPI_CMD_W_DISABLE); | |
2828 | ||
2829 | mutex_unlock(&dev->spi_mutex); | |
2830 | ||
2831 | return ret; | |
2832 | } | |
2833 | ||
2834 | static int ssd_spi_read(struct ssd_device *dev, void *buf, uint32_t off, uint32_t size) | |
2835 | { | |
2836 | uint32_t len = 0; | |
2837 | uint32_t roff; | |
2838 | uint32_t rsize; | |
2839 | int ret = 0; | |
2840 | ||
2841 | if (!dev || !buf) { | |
2842 | return -EINVAL; | |
2843 | } | |
2844 | ||
2845 | if ((off % sizeof(uint32_t)) != 0 || (size % sizeof(uint32_t)) != 0 || size == 0 || | |
2846 | ((uint64_t)off + (uint64_t)size) > dev->rom_info.size) { | |
2847 | return -EINVAL; | |
2848 | } | |
2849 | ||
2850 | while (len < size) { | |
2851 | roff = (off + len) % dev->rom_info.page_size; | |
2852 | rsize = dev->rom_info.page_size - roff; | |
2853 | if ((size - len) < rsize) { | |
2854 | rsize = (size - len); | |
2855 | } | |
2856 | roff = off + len; | |
2857 | ||
2858 | ret = ssd_spi_page_read(dev, (buf + len), roff, rsize); | |
2859 | if (ret) { | |
2860 | goto out; | |
2861 | } | |
2862 | ||
2863 | len += rsize; | |
2864 | ||
2865 | cond_resched(); | |
2866 | } | |
2867 | ||
2868 | out: | |
2869 | return ret; | |
2870 | } | |
2871 | ||
2872 | static int ssd_spi_write(struct ssd_device *dev, void *buf, uint32_t off, uint32_t size) | |
2873 | { | |
2874 | uint32_t len = 0; | |
2875 | uint32_t woff; | |
2876 | uint32_t wsize; | |
2877 | int ret = 0; | |
2878 | ||
2879 | if (!dev || !buf) { | |
2880 | return -EINVAL; | |
2881 | } | |
2882 | ||
2883 | if ((off % sizeof(uint32_t)) != 0 || (size % sizeof(uint32_t)) != 0 || size == 0 || | |
2884 | ((uint64_t)off + (uint64_t)size) > dev->rom_info.size) { | |
2885 | return -EINVAL; | |
2886 | } | |
2887 | ||
2888 | while (len < size) { | |
2889 | woff = (off + len) % dev->rom_info.page_size; | |
2890 | wsize = dev->rom_info.page_size - woff; | |
2891 | if ((size - len) < wsize) { | |
2892 | wsize = (size - len); | |
2893 | } | |
2894 | woff = off + len; | |
2895 | ||
2896 | ret = ssd_spi_page_write(dev, (buf + len), woff, wsize); | |
2897 | if (ret) { | |
2898 | goto out; | |
2899 | } | |
2900 | ||
2901 | len += wsize; | |
2902 | ||
2903 | cond_resched(); | |
2904 | } | |
2905 | ||
2906 | out: | |
2907 | return ret; | |
2908 | } | |
2909 | ||
2910 | static int ssd_spi_erase(struct ssd_device *dev, uint32_t off, uint32_t size) | |
2911 | { | |
2912 | uint32_t len = 0; | |
2913 | uint32_t eoff; | |
2914 | int ret = 0; | |
2915 | ||
2916 | if (!dev) { | |
2917 | return -EINVAL; | |
2918 | } | |
2919 | ||
2920 | if (size == 0 || ((uint64_t)off + (uint64_t)size) > dev->rom_info.size || | |
2921 | (off % dev->rom_info.block_size) != 0 || (size % dev->rom_info.block_size) != 0) { | |
2922 | return -EINVAL; | |
2923 | } | |
2924 | ||
2925 | while (len < size) { | |
2926 | eoff = (off + len); | |
2927 | ||
2928 | ret = ssd_spi_block_erase(dev, eoff); | |
2929 | if (ret) { | |
2930 | goto out; | |
2931 | } | |
2932 | ||
2933 | len += dev->rom_info.block_size; | |
2934 | ||
2935 | cond_resched(); | |
2936 | } | |
2937 | ||
2938 | out: | |
2939 | return ret; | |
2940 | } | |
2941 | ||
2942 | /* i2c access */ | |
2943 | static uint32_t __ssd_i2c_reg32_read(void *addr) | |
2944 | { | |
2945 | return ssd_reg32_read(addr); | |
2946 | } | |
2947 | ||
2948 | static void __ssd_i2c_reg32_write(void *addr, uint32_t val) | |
2949 | { | |
2950 | ssd_reg32_write(addr, val); | |
2951 | ssd_reg32_read(addr); | |
2952 | } | |
2953 | ||
2954 | static int __ssd_i2c_clear(struct ssd_device *dev, uint8_t saddr) | |
2955 | { | |
2956 | ssd_i2c_ctrl_t ctrl; | |
2957 | ssd_i2c_data_t data; | |
2958 | uint8_t status = 0; | |
2959 | int nr_data = 0; | |
2960 | unsigned long st; | |
2961 | int ret = 0; | |
2962 | ||
2963 | check_status: | |
2964 | ctrl.bits.wdata = 0; | |
2965 | ctrl.bits.addr = SSD_I2C_STATUS_REG; | |
2966 | ctrl.bits.rw = SSD_I2C_CTRL_READ; | |
2967 | __ssd_i2c_reg32_write(dev->ctrlp + SSD_I2C_CTRL_REG, ctrl.val); | |
2968 | ||
2969 | st = jiffies; | |
2970 | for (;;) { | |
2971 | data.val = __ssd_i2c_reg32_read(dev->ctrlp + SSD_I2C_RDATA_REG); | |
2972 | if (data.bits.valid == 0) { | |
2973 | break; | |
2974 | } | |
2975 | ||
2976 | /* retry */ | |
2977 | if (time_after(jiffies, (st + SSD_I2C_TIMEOUT))) { | |
2978 | ret = -ETIMEDOUT; | |
2979 | goto out; | |
2980 | } | |
2981 | cond_resched(); | |
2982 | } | |
2983 | status = data.bits.rdata; | |
2984 | ||
2985 | if (!(status & 0x4)) { | |
2986 | /* clear read fifo data */ | |
2987 | ctrl.bits.wdata = 0; | |
2988 | ctrl.bits.addr = SSD_I2C_DATA_REG; | |
2989 | ctrl.bits.rw = SSD_I2C_CTRL_READ; | |
2990 | __ssd_i2c_reg32_write(dev->ctrlp + SSD_I2C_CTRL_REG, ctrl.val); | |
2991 | ||
2992 | st = jiffies; | |
2993 | for (;;) { | |
2994 | data.val = __ssd_i2c_reg32_read(dev->ctrlp + SSD_I2C_RDATA_REG); | |
2995 | if (data.bits.valid == 0) { | |
2996 | break; | |
2997 | } | |
2998 | ||
2999 | /* retry */ | |
3000 | if (time_after(jiffies, (st + SSD_I2C_TIMEOUT))) { | |
3001 | ret = -ETIMEDOUT; | |
3002 | goto out; | |
3003 | } | |
3004 | cond_resched(); | |
3005 | } | |
3006 | ||
3007 | nr_data++; | |
3008 | if (nr_data <= SSD_I2C_MAX_DATA) { | |
3009 | goto check_status; | |
3010 | } else { | |
3011 | goto out_reset; | |
3012 | } | |
3013 | } | |
3014 | ||
3015 | if (status & 0x3) { | |
3016 | /* clear int */ | |
3017 | ctrl.bits.wdata = 0x04; | |
3018 | ctrl.bits.addr = SSD_I2C_CMD_REG; | |
3019 | ctrl.bits.rw = SSD_I2C_CTRL_WRITE; | |
3020 | __ssd_i2c_reg32_write(dev->ctrlp + SSD_I2C_CTRL_REG, ctrl.val); | |
3021 | } | |
3022 | ||
3023 | if (!(status & 0x8)) { | |
3024 | out_reset: | |
3025 | /* reset i2c controller */ | |
3026 | ctrl.bits.wdata = 0x0; | |
3027 | ctrl.bits.addr = SSD_I2C_RESET_REG; | |
3028 | ctrl.bits.rw = SSD_I2C_CTRL_WRITE; | |
3029 | __ssd_i2c_reg32_write(dev->ctrlp + SSD_I2C_CTRL_REG, ctrl.val); | |
3030 | } | |
3031 | ||
3032 | out: | |
3033 | return ret; | |
3034 | } | |
3035 | ||
3036 | static int ssd_i2c_write(struct ssd_device *dev, uint8_t saddr, uint8_t size, uint8_t *buf) | |
3037 | { | |
3038 | ssd_i2c_ctrl_t ctrl; | |
3039 | ssd_i2c_data_t data; | |
3040 | uint8_t off = 0; | |
3041 | uint8_t status = 0; | |
3042 | unsigned long st; | |
3043 | int ret = 0; | |
3044 | ||
3045 | mutex_lock(&dev->i2c_mutex); | |
3046 | ||
3047 | ctrl.val = 0; | |
3048 | ||
3049 | /* slave addr */ | |
3050 | ctrl.bits.wdata = saddr; | |
3051 | ctrl.bits.addr = SSD_I2C_SADDR_REG; | |
3052 | ctrl.bits.rw = SSD_I2C_CTRL_WRITE; | |
3053 | __ssd_i2c_reg32_write(dev->ctrlp + SSD_I2C_CTRL_REG, ctrl.val); | |
3054 | ||
3055 | /* data */ | |
3056 | while (off < size) { | |
3057 | ctrl.bits.wdata = buf[off]; | |
3058 | ctrl.bits.addr = SSD_I2C_DATA_REG; | |
3059 | ctrl.bits.rw = SSD_I2C_CTRL_WRITE; | |
3060 | __ssd_i2c_reg32_write(dev->ctrlp + SSD_I2C_CTRL_REG, ctrl.val); | |
3061 | ||
3062 | off++; | |
3063 | } | |
3064 | ||
3065 | /* write */ | |
3066 | ctrl.bits.wdata = 0x01; | |
3067 | ctrl.bits.addr = SSD_I2C_CMD_REG; | |
3068 | ctrl.bits.rw = SSD_I2C_CTRL_WRITE; | |
3069 | __ssd_i2c_reg32_write(dev->ctrlp + SSD_I2C_CTRL_REG, ctrl.val); | |
3070 | ||
3071 | /* wait */ | |
3072 | st = jiffies; | |
3073 | for (;;) { | |
3074 | ctrl.bits.wdata = 0; | |
3075 | ctrl.bits.addr = SSD_I2C_STATUS_REG; | |
3076 | ctrl.bits.rw = SSD_I2C_CTRL_READ; | |
3077 | __ssd_i2c_reg32_write(dev->ctrlp + SSD_I2C_CTRL_REG, ctrl.val); | |
3078 | ||
3079 | for (;;) { | |
3080 | data.val = __ssd_i2c_reg32_read(dev->ctrlp + SSD_I2C_RDATA_REG); | |
3081 | if (data.bits.valid == 0) { | |
3082 | break; | |
3083 | } | |
3084 | ||
3085 | /* retry */ | |
3086 | if (time_after(jiffies, (st + SSD_I2C_TIMEOUT))) { | |
3087 | ret = -ETIMEDOUT; | |
3088 | goto out_clear; | |
3089 | } | |
3090 | cond_resched(); | |
3091 | } | |
3092 | ||
3093 | status = data.bits.rdata; | |
3094 | if (status & 0x1) { | |
3095 | break; | |
3096 | } | |
3097 | ||
3098 | if (time_after(jiffies, (st + SSD_I2C_TIMEOUT))) { | |
3099 | ret = -ETIMEDOUT; | |
3100 | goto out_clear; | |
3101 | } | |
3102 | cond_resched(); | |
3103 | } | |
3104 | ||
3105 | if (!(status & 0x1)) { | |
3106 | ret = -1; | |
3107 | goto out_clear; | |
3108 | } | |
3109 | ||
3110 | /* busy ? */ | |
3111 | if (status & 0x20) { | |
3112 | ret = -2; | |
3113 | goto out_clear; | |
3114 | } | |
3115 | ||
3116 | /* ack ? */ | |
3117 | if (status & 0x10) { | |
3118 | ret = -3; | |
3119 | goto out_clear; | |
3120 | } | |
3121 | ||
3122 | /* clear */ | |
3123 | out_clear: | |
3124 | if (__ssd_i2c_clear(dev, saddr)) { | |
3125 | if (!ret) ret = -4; | |
3126 | } | |
3127 | ||
3128 | mutex_unlock(&dev->i2c_mutex); | |
3129 | ||
3130 | return ret; | |
3131 | } | |
3132 | ||
3133 | static int ssd_i2c_read(struct ssd_device *dev, uint8_t saddr, uint8_t size, uint8_t *buf) | |
3134 | { | |
3135 | ssd_i2c_ctrl_t ctrl; | |
3136 | ssd_i2c_data_t data; | |
3137 | uint8_t off = 0; | |
3138 | uint8_t status = 0; | |
3139 | unsigned long st; | |
3140 | int ret = 0; | |
3141 | ||
3142 | mutex_lock(&dev->i2c_mutex); | |
3143 | ||
3144 | ctrl.val = 0; | |
3145 | ||
3146 | /* slave addr */ | |
3147 | ctrl.bits.wdata = saddr; | |
3148 | ctrl.bits.addr = SSD_I2C_SADDR_REG; | |
3149 | ctrl.bits.rw = SSD_I2C_CTRL_WRITE; | |
3150 | __ssd_i2c_reg32_write(dev->ctrlp + SSD_I2C_CTRL_REG, ctrl.val); | |
3151 | ||
3152 | /* read len */ | |
3153 | ctrl.bits.wdata = size; | |
3154 | ctrl.bits.addr = SSD_I2C_LEN_REG; | |
3155 | ctrl.bits.rw = SSD_I2C_CTRL_WRITE; | |
3156 | __ssd_i2c_reg32_write(dev->ctrlp + SSD_I2C_CTRL_REG, ctrl.val); | |
3157 | ||
3158 | /* read */ | |
3159 | ctrl.bits.wdata = 0x02; | |
3160 | ctrl.bits.addr = SSD_I2C_CMD_REG; | |
3161 | ctrl.bits.rw = SSD_I2C_CTRL_WRITE; | |
3162 | __ssd_i2c_reg32_write(dev->ctrlp + SSD_I2C_CTRL_REG, ctrl.val); | |
3163 | ||
3164 | /* wait */ | |
3165 | st = jiffies; | |
3166 | for (;;) { | |
3167 | ctrl.bits.wdata = 0; | |
3168 | ctrl.bits.addr = SSD_I2C_STATUS_REG; | |
3169 | ctrl.bits.rw = SSD_I2C_CTRL_READ; | |
3170 | __ssd_i2c_reg32_write(dev->ctrlp + SSD_I2C_CTRL_REG, ctrl.val); | |
3171 | ||
3172 | for (;;) { | |
3173 | data.val = __ssd_i2c_reg32_read(dev->ctrlp + SSD_I2C_RDATA_REG); | |
3174 | if (data.bits.valid == 0) { | |
3175 | break; | |
3176 | } | |
3177 | ||
3178 | /* retry */ | |
3179 | if (time_after(jiffies, (st + SSD_I2C_TIMEOUT))) { | |
3180 | ret = -ETIMEDOUT; | |
3181 | goto out_clear; | |
3182 | } | |
3183 | cond_resched(); | |
3184 | } | |
3185 | ||
3186 | status = data.bits.rdata; | |
3187 | if (status & 0x2) { | |
3188 | break; | |
3189 | } | |
3190 | ||
3191 | if (time_after(jiffies, (st + SSD_I2C_TIMEOUT))) { | |
3192 | ret = -ETIMEDOUT; | |
3193 | goto out_clear; | |
3194 | } | |
3195 | cond_resched(); | |
3196 | } | |
3197 | ||
3198 | if (!(status & 0x2)) { | |
3199 | ret = -1; | |
3200 | goto out_clear; | |
3201 | } | |
3202 | ||
3203 | /* busy ? */ | |
3204 | if (status & 0x20) { | |
3205 | ret = -2; | |
3206 | goto out_clear; | |
3207 | } | |
3208 | ||
3209 | /* ack ? */ | |
3210 | if (status & 0x10) { | |
3211 | ret = -3; | |
3212 | goto out_clear; | |
3213 | } | |
3214 | ||
3215 | /* data */ | |
3216 | while (off < size) { | |
3217 | ctrl.bits.wdata = 0; | |
3218 | ctrl.bits.addr = SSD_I2C_DATA_REG; | |
3219 | ctrl.bits.rw = SSD_I2C_CTRL_READ; | |
3220 | __ssd_i2c_reg32_write(dev->ctrlp + SSD_I2C_CTRL_REG, ctrl.val); | |
3221 | ||
3222 | st = jiffies; | |
3223 | for (;;) { | |
3224 | data.val = __ssd_i2c_reg32_read(dev->ctrlp + SSD_I2C_RDATA_REG); | |
3225 | if (data.bits.valid == 0) { | |
3226 | break; | |
3227 | } | |
3228 | ||
3229 | /* retry */ | |
3230 | if (time_after(jiffies, (st + SSD_I2C_TIMEOUT))) { | |
3231 | ret = -ETIMEDOUT; | |
3232 | goto out_clear; | |
3233 | } | |
3234 | cond_resched(); | |
3235 | } | |
3236 | ||
3237 | buf[off] = data.bits.rdata; | |
3238 | ||
3239 | off++; | |
3240 | } | |
3241 | ||
3242 | /* clear */ | |
3243 | out_clear: | |
3244 | if (__ssd_i2c_clear(dev, saddr)) { | |
3245 | if (!ret) ret = -4; | |
3246 | } | |
3247 | ||
3248 | mutex_unlock(&dev->i2c_mutex); | |
3249 | ||
3250 | return ret; | |
3251 | } | |
3252 | ||
3253 | static int ssd_i2c_write_read(struct ssd_device *dev, uint8_t saddr, uint8_t wsize, uint8_t *wbuf, uint8_t rsize, uint8_t *rbuf) | |
3254 | { | |
3255 | ssd_i2c_ctrl_t ctrl; | |
3256 | ssd_i2c_data_t data; | |
3257 | uint8_t off = 0; | |
3258 | uint8_t status = 0; | |
3259 | unsigned long st; | |
3260 | int ret = 0; | |
3261 | ||
3262 | mutex_lock(&dev->i2c_mutex); | |
3263 | ||
3264 | ctrl.val = 0; | |
3265 | ||
3266 | /* slave addr */ | |
3267 | ctrl.bits.wdata = saddr; | |
3268 | ctrl.bits.addr = SSD_I2C_SADDR_REG; | |
3269 | ctrl.bits.rw = SSD_I2C_CTRL_WRITE; | |
3270 | __ssd_i2c_reg32_write(dev->ctrlp + SSD_I2C_CTRL_REG, ctrl.val); | |
3271 | ||
3272 | /* data */ | |
3273 | off = 0; | |
3274 | while (off < wsize) { | |
3275 | ctrl.bits.wdata = wbuf[off]; | |
3276 | ctrl.bits.addr = SSD_I2C_DATA_REG; | |
3277 | ctrl.bits.rw = SSD_I2C_CTRL_WRITE; | |
3278 | __ssd_i2c_reg32_write(dev->ctrlp + SSD_I2C_CTRL_REG, ctrl.val); | |
3279 | ||
3280 | off++; | |
3281 | } | |
3282 | ||
3283 | /* read len */ | |
3284 | ctrl.bits.wdata = rsize; | |
3285 | ctrl.bits.addr = SSD_I2C_LEN_REG; | |
3286 | ctrl.bits.rw = SSD_I2C_CTRL_WRITE; | |
3287 | __ssd_i2c_reg32_write(dev->ctrlp + SSD_I2C_CTRL_REG, ctrl.val); | |
3288 | ||
3289 | /* write -> read */ | |
3290 | ctrl.bits.wdata = 0x03; | |
3291 | ctrl.bits.addr = SSD_I2C_CMD_REG; | |
3292 | ctrl.bits.rw = SSD_I2C_CTRL_WRITE; | |
3293 | __ssd_i2c_reg32_write(dev->ctrlp + SSD_I2C_CTRL_REG, ctrl.val); | |
3294 | ||
3295 | /* wait */ | |
3296 | st = jiffies; | |
3297 | for (;;) { | |
3298 | ctrl.bits.wdata = 0; | |
3299 | ctrl.bits.addr = SSD_I2C_STATUS_REG; | |
3300 | ctrl.bits.rw = SSD_I2C_CTRL_READ; | |
3301 | __ssd_i2c_reg32_write(dev->ctrlp + SSD_I2C_CTRL_REG, ctrl.val); | |
3302 | ||
3303 | for (;;) { | |
3304 | data.val = __ssd_i2c_reg32_read(dev->ctrlp + SSD_I2C_RDATA_REG); | |
3305 | if (data.bits.valid == 0) { | |
3306 | break; | |
3307 | } | |
3308 | ||
3309 | /* retry */ | |
3310 | if (time_after(jiffies, (st + SSD_I2C_TIMEOUT))) { | |
3311 | ret = -ETIMEDOUT; | |
3312 | goto out_clear; | |
3313 | } | |
3314 | cond_resched(); | |
3315 | } | |
3316 | ||
3317 | status = data.bits.rdata; | |
3318 | if (status & 0x2) { | |
3319 | break; | |
3320 | } | |
3321 | ||
3322 | if (time_after(jiffies, (st + SSD_I2C_TIMEOUT))) { | |
3323 | ret = -ETIMEDOUT; | |
3324 | goto out_clear; | |
3325 | } | |
3326 | cond_resched(); | |
3327 | } | |
3328 | ||
3329 | if (!(status & 0x2)) { | |
3330 | ret = -1; | |
3331 | goto out_clear; | |
3332 | } | |
3333 | ||
3334 | /* busy ? */ | |
3335 | if (status & 0x20) { | |
3336 | ret = -2; | |
3337 | goto out_clear; | |
3338 | } | |
3339 | ||
3340 | /* ack ? */ | |
3341 | if (status & 0x10) { | |
3342 | ret = -3; | |
3343 | goto out_clear; | |
3344 | } | |
3345 | ||
3346 | /* data */ | |
3347 | off = 0; | |
3348 | while (off < rsize) { | |
3349 | ctrl.bits.wdata = 0; | |
3350 | ctrl.bits.addr = SSD_I2C_DATA_REG; | |
3351 | ctrl.bits.rw = SSD_I2C_CTRL_READ; | |
3352 | __ssd_i2c_reg32_write(dev->ctrlp + SSD_I2C_CTRL_REG, ctrl.val); | |
3353 | ||
3354 | st = jiffies; | |
3355 | for (;;) { | |
3356 | data.val = __ssd_i2c_reg32_read(dev->ctrlp + SSD_I2C_RDATA_REG); | |
3357 | if (data.bits.valid == 0) { | |
3358 | break; | |
3359 | } | |
3360 | ||
3361 | /* retry */ | |
3362 | if (time_after(jiffies, (st + SSD_I2C_TIMEOUT))) { | |
3363 | ret = -ETIMEDOUT; | |
3364 | goto out_clear; | |
3365 | } | |
3366 | cond_resched(); | |
3367 | } | |
3368 | ||
3369 | rbuf[off] = data.bits.rdata; | |
3370 | ||
3371 | off++; | |
3372 | } | |
3373 | ||
3374 | /* clear */ | |
3375 | out_clear: | |
3376 | if (__ssd_i2c_clear(dev, saddr)) { | |
3377 | if (!ret) ret = -4; | |
3378 | } | |
3379 | mutex_unlock(&dev->i2c_mutex); | |
3380 | ||
3381 | return ret; | |
3382 | } | |
3383 | ||
3384 | static int ssd_smbus_send_byte(struct ssd_device *dev, uint8_t saddr, uint8_t *buf) | |
3385 | { | |
3386 | int i = 0; | |
3387 | int ret = 0; | |
3388 | ||
3389 | for (;;) { | |
3390 | ret = ssd_i2c_write(dev, saddr, 1, buf); | |
3391 | if (!ret || -ETIMEDOUT == ret) { | |
3392 | break; | |
3393 | } | |
3394 | ||
3395 | i++; | |
3396 | if (i >= SSD_SMBUS_RETRY_MAX) { | |
3397 | break; | |
3398 | } | |
3399 | msleep(SSD_SMBUS_RETRY_INTERVAL); | |
3400 | } | |
3401 | ||
3402 | return ret; | |
3403 | } | |
3404 | ||
3405 | static int ssd_smbus_receive_byte(struct ssd_device *dev, uint8_t saddr, uint8_t *buf) | |
3406 | { | |
3407 | int i = 0; | |
3408 | int ret = 0; | |
3409 | ||
3410 | for (;;) { | |
3411 | ret = ssd_i2c_read(dev, saddr, 1, buf); | |
3412 | if (!ret || -ETIMEDOUT == ret) { | |
3413 | break; | |
3414 | } | |
3415 | ||
3416 | i++; | |
3417 | if (i >= SSD_SMBUS_RETRY_MAX) { | |
3418 | break; | |
3419 | } | |
3420 | msleep(SSD_SMBUS_RETRY_INTERVAL); | |
3421 | } | |
3422 | ||
3423 | return ret; | |
3424 | } | |
3425 | ||
3426 | static int ssd_smbus_write_byte(struct ssd_device *dev, uint8_t saddr, uint8_t cmd, uint8_t *buf) | |
3427 | { | |
3428 | uint8_t smb_data[SSD_SMBUS_DATA_MAX] = {0}; | |
3429 | int i = 0; | |
3430 | int ret = 0; | |
3431 | ||
3432 | smb_data[0] = cmd; | |
3433 | memcpy((smb_data + 1), buf, 1); | |
3434 | ||
3435 | for (;;) { | |
3436 | ret = ssd_i2c_write(dev, saddr, 2, smb_data); | |
3437 | if (!ret || -ETIMEDOUT == ret) { | |
3438 | break; | |
3439 | } | |
3440 | ||
3441 | i++; | |
3442 | if (i >= SSD_SMBUS_RETRY_MAX) { | |
3443 | break; | |
3444 | } | |
3445 | msleep(SSD_SMBUS_RETRY_INTERVAL); | |
3446 | } | |
3447 | ||
3448 | return ret; | |
3449 | } | |
3450 | ||
3451 | static int ssd_smbus_read_byte(struct ssd_device *dev, uint8_t saddr, uint8_t cmd, uint8_t *buf) | |
3452 | { | |
3453 | uint8_t smb_data[SSD_SMBUS_DATA_MAX] = {0}; | |
3454 | int i = 0; | |
3455 | int ret = 0; | |
3456 | ||
3457 | smb_data[0] = cmd; | |
3458 | ||
3459 | for (;;) { | |
3460 | ret = ssd_i2c_write_read(dev, saddr, 1, smb_data, 1, buf); | |
3461 | if (!ret || -ETIMEDOUT == ret) { | |
3462 | break; | |
3463 | } | |
3464 | ||
3465 | i++; | |
3466 | if (i >= SSD_SMBUS_RETRY_MAX) { | |
3467 | break; | |
3468 | } | |
3469 | msleep(SSD_SMBUS_RETRY_INTERVAL); | |
3470 | } | |
3471 | ||
3472 | return ret; | |
3473 | } | |
3474 | ||
3475 | static int ssd_smbus_write_word(struct ssd_device *dev, uint8_t saddr, uint8_t cmd, uint8_t *buf) | |
3476 | { | |
3477 | uint8_t smb_data[SSD_SMBUS_DATA_MAX] = {0}; | |
3478 | int i = 0; | |
3479 | int ret = 0; | |
3480 | ||
3481 | smb_data[0] = cmd; | |
3482 | memcpy((smb_data + 1), buf, 2); | |
3483 | ||
3484 | for (;;) { | |
3485 | ret = ssd_i2c_write(dev, saddr, 3, smb_data); | |
3486 | if (!ret || -ETIMEDOUT == ret) { | |
3487 | break; | |
3488 | } | |
3489 | ||
3490 | i++; | |
3491 | if (i >= SSD_SMBUS_RETRY_MAX) { | |
3492 | break; | |
3493 | } | |
3494 | msleep(SSD_SMBUS_RETRY_INTERVAL); | |
3495 | } | |
3496 | ||
3497 | return ret; | |
3498 | } | |
3499 | ||
3500 | static int ssd_smbus_read_word(struct ssd_device *dev, uint8_t saddr, uint8_t cmd, uint8_t *buf) | |
3501 | { | |
3502 | uint8_t smb_data[SSD_SMBUS_DATA_MAX] = {0}; | |
3503 | int i = 0; | |
3504 | int ret = 0; | |
3505 | ||
3506 | smb_data[0] = cmd; | |
3507 | ||
3508 | for (;;) { | |
3509 | ret = ssd_i2c_write_read(dev, saddr, 1, smb_data, 2, buf); | |
3510 | if (!ret || -ETIMEDOUT == ret) { | |
3511 | break; | |
3512 | } | |
3513 | ||
3514 | i++; | |
3515 | if (i >= SSD_SMBUS_RETRY_MAX) { | |
3516 | break; | |
3517 | } | |
3518 | msleep(SSD_SMBUS_RETRY_INTERVAL); | |
3519 | } | |
3520 | ||
3521 | return ret; | |
3522 | } | |
3523 | ||
3524 | static int ssd_smbus_write_block(struct ssd_device *dev, uint8_t saddr, uint8_t cmd, uint8_t size, uint8_t *buf) | |
3525 | { | |
3526 | uint8_t smb_data[SSD_SMBUS_DATA_MAX] = {0}; | |
3527 | int i = 0; | |
3528 | int ret = 0; | |
3529 | ||
3530 | smb_data[0] = cmd; | |
3531 | smb_data[1] = size; | |
3532 | memcpy((smb_data + 2), buf, size); | |
3533 | ||
3534 | for (;;) { | |
3535 | ret = ssd_i2c_write(dev, saddr, (2 + size), smb_data); | |
3536 | if (!ret || -ETIMEDOUT == ret) { | |
3537 | break; | |
3538 | } | |
3539 | ||
3540 | i++; | |
3541 | if (i >= SSD_SMBUS_RETRY_MAX) { | |
3542 | break; | |
3543 | } | |
3544 | msleep(SSD_SMBUS_RETRY_INTERVAL); | |
3545 | } | |
3546 | ||
3547 | return ret; | |
3548 | } | |
3549 | ||
3550 | static int ssd_smbus_read_block(struct ssd_device *dev, uint8_t saddr, uint8_t cmd, uint8_t size, uint8_t *buf) | |
3551 | { | |
3552 | uint8_t smb_data[SSD_SMBUS_DATA_MAX] = {0}; | |
3553 | uint8_t rsize; | |
3554 | int i = 0; | |
3555 | int ret = 0; | |
3556 | ||
3557 | smb_data[0] = cmd; | |
3558 | ||
3559 | for (;;) { | |
3560 | ret = ssd_i2c_write_read(dev, saddr, 1, smb_data, (SSD_SMBUS_BLOCK_MAX + 1), (smb_data + 1)); | |
3561 | if (!ret || -ETIMEDOUT == ret) { | |
3562 | break; | |
3563 | } | |
3564 | ||
3565 | i++; | |
3566 | if (i >= SSD_SMBUS_RETRY_MAX) { | |
3567 | break; | |
3568 | } | |
3569 | msleep(SSD_SMBUS_RETRY_INTERVAL); | |
3570 | } | |
3571 | if (ret) { | |
3572 | return ret; | |
3573 | } | |
3574 | ||
3575 | rsize = smb_data[1]; | |
3576 | ||
3577 | if (rsize > size ) { | |
3578 | rsize = size; | |
3579 | } | |
3580 | ||
3581 | memcpy(buf, (smb_data + 2), rsize); | |
3582 | ||
3583 | return 0; | |
3584 | } | |
3585 | ||
3586 | ||
3587 | static int ssd_gen_swlog(struct ssd_device *dev, uint16_t event, uint32_t data); | |
3588 | ||
3589 | /* sensor */ | |
3590 | static int ssd_init_lm75(struct ssd_device *dev, uint8_t saddr) | |
3591 | { | |
3592 | uint8_t conf = 0; | |
3593 | int ret = 0; | |
3594 | ||
3595 | ret = ssd_smbus_read_byte(dev, saddr, SSD_LM75_REG_CONF, &conf); | |
3596 | if (ret) { | |
3597 | goto out; | |
3598 | } | |
3599 | ||
3600 | conf &= (uint8_t)(~1u); | |
3601 | ||
3602 | ret = ssd_smbus_write_byte(dev, saddr, SSD_LM75_REG_CONF, &conf); | |
3603 | if (ret) { | |
3604 | goto out; | |
3605 | } | |
3606 | ||
3607 | out: | |
3608 | return ret; | |
3609 | } | |
3610 | ||
3611 | static int ssd_lm75_read(struct ssd_device *dev, uint8_t saddr, uint16_t *data) | |
3612 | { | |
3613 | uint16_t val = 0; | |
3614 | int ret; | |
3615 | ||
3616 | ret = ssd_smbus_read_word(dev, saddr, SSD_LM75_REG_TEMP, (uint8_t *)&val); | |
3617 | if (ret) { | |
3618 | return ret; | |
3619 | } | |
3620 | ||
3621 | *data = u16_swap(val); | |
3622 | ||
3623 | return 0; | |
3624 | } | |
3625 | ||
3626 | static int ssd_init_lm80(struct ssd_device *dev, uint8_t saddr) | |
3627 | { | |
3628 | uint8_t val; | |
3629 | uint8_t low, high; | |
3630 | int i; | |
3631 | int ret = 0; | |
3632 | ||
3633 | /* init */ | |
3634 | val = 0x80; | |
3635 | ret = ssd_smbus_write_byte(dev, saddr, SSD_LM80_REG_CONFIG, &val); | |
3636 | if (ret) { | |
3637 | goto out; | |
3638 | } | |
3639 | ||
3640 | /* 11-bit temp */ | |
3641 | val = 0x08; | |
3642 | ret = ssd_smbus_write_byte(dev, saddr, SSD_LM80_REG_RES, &val); | |
3643 | if (ret) { | |
3644 | goto out; | |
3645 | } | |
3646 | ||
3647 | /* set volt limit */ | |
3648 | for (i=0; i<SSD_LM80_IN_NR; i++) { | |
3649 | high = ssd_lm80_limit[i].high; | |
3650 | low = ssd_lm80_limit[i].low; | |
3651 | ||
3652 | if (SSD_LM80_IN_CAP == i) { | |
3653 | low = 0; | |
3654 | } | |
3655 | ||
3656 | if (dev->hw_info.nr_ctrl <= 1 && SSD_LM80_IN_1V2 == i) { | |
3657 | high = 0xFF; | |
3658 | low = 0; | |
3659 | } | |
3660 | ||
3661 | /* high limit */ | |
3662 | ret = ssd_smbus_write_byte(dev, saddr, SSD_LM80_REG_IN_MAX(i), &high); | |
3663 | if (ret) { | |
3664 | goto out; | |
3665 | } | |
3666 | ||
3667 | /* low limit*/ | |
3668 | ret = ssd_smbus_write_byte(dev, saddr, SSD_LM80_REG_IN_MIN(i), &low); | |
3669 | if (ret) { | |
3670 | goto out; | |
3671 | } | |
3672 | } | |
3673 | ||
3674 | /* set interrupt mask: allow volt in interrupt except cap in*/ | |
3675 | val = 0x81; | |
3676 | ret = ssd_smbus_write_byte(dev, saddr, SSD_LM80_REG_MASK1, &val); | |
3677 | if (ret) { | |
3678 | goto out; | |
3679 | } | |
3680 | ||
3681 | /* set interrupt mask: disable others */ | |
3682 | val = 0xFF; | |
3683 | ret = ssd_smbus_write_byte(dev, saddr, SSD_LM80_REG_MASK2, &val); | |
3684 | if (ret) { | |
3685 | goto out; | |
3686 | } | |
3687 | ||
3688 | /* start */ | |
3689 | val = 0x03; | |
3690 | ret = ssd_smbus_write_byte(dev, saddr, SSD_LM80_REG_CONFIG, &val); | |
3691 | if (ret) { | |
3692 | goto out; | |
3693 | } | |
3694 | ||
3695 | out: | |
3696 | return ret; | |
3697 | } | |
3698 | ||
3699 | static int ssd_lm80_enable_in(struct ssd_device *dev, uint8_t saddr, int idx) | |
3700 | { | |
3701 | uint8_t val = 0; | |
3702 | int ret = 0; | |
3703 | ||
3704 | if (idx >= SSD_LM80_IN_NR || idx < 0) { | |
3705 | return -EINVAL; | |
3706 | } | |
3707 | ||
3708 | ret = ssd_smbus_read_byte(dev, saddr, SSD_LM80_REG_MASK1, &val); | |
3709 | if (ret) { | |
3710 | goto out; | |
3711 | } | |
3712 | ||
3713 | val &= ~(1UL << (uint32_t)idx); | |
3714 | ||
3715 | ret = ssd_smbus_write_byte(dev, saddr, SSD_LM80_REG_MASK1, &val); | |
3716 | if (ret) { | |
3717 | goto out; | |
3718 | } | |
3719 | ||
3720 | out: | |
3721 | return ret; | |
3722 | } | |
3723 | ||
3724 | static int ssd_lm80_disable_in(struct ssd_device *dev, uint8_t saddr, int idx) | |
3725 | { | |
3726 | uint8_t val = 0; | |
3727 | int ret = 0; | |
3728 | ||
3729 | if (idx >= SSD_LM80_IN_NR || idx < 0) { | |
3730 | return -EINVAL; | |
3731 | } | |
3732 | ||
3733 | ret = ssd_smbus_read_byte(dev, saddr, SSD_LM80_REG_MASK1, &val); | |
3734 | if (ret) { | |
3735 | goto out; | |
3736 | } | |
3737 | ||
3738 | val |= (1UL << (uint32_t)idx); | |
3739 | ||
3740 | ret = ssd_smbus_write_byte(dev, saddr, SSD_LM80_REG_MASK1, &val); | |
3741 | if (ret) { | |
3742 | goto out; | |
3743 | } | |
3744 | ||
3745 | out: | |
3746 | return ret; | |
3747 | } | |
3748 | ||
3749 | static int ssd_lm80_read_temp(struct ssd_device *dev, uint8_t saddr, uint16_t *data) | |
3750 | { | |
3751 | uint16_t val = 0; | |
3752 | int ret; | |
3753 | ||
3754 | ret = ssd_smbus_read_word(dev, saddr, SSD_LM80_REG_TEMP, (uint8_t *)&val); | |
3755 | if (ret) { | |
3756 | return ret; | |
3757 | } | |
3758 | ||
3759 | *data = u16_swap(val); | |
3760 | ||
3761 | return 0; | |
3762 | } | |
da3355df SF |
3763 | static int ssd_generate_sensor_fault_log(struct ssd_device *dev, uint16_t event, uint8_t addr,uint32_t ret) |
3764 | { | |
3765 | uint32_t data; | |
3766 | data = ((ret & 0xffff) << 16) | (addr << 8) | addr; | |
3767 | ssd_gen_swlog(dev,event,data); | |
3768 | return 0; | |
3769 | } | |
361ebed5 HSDT |
3770 | static int ssd_lm80_check_event(struct ssd_device *dev, uint8_t saddr) |
3771 | { | |
3772 | uint32_t volt; | |
3773 | uint16_t val = 0, status; | |
3774 | uint8_t alarm1 = 0, alarm2 = 0; | |
1197134c KM |
3775 | uint32_t low, high; |
3776 | int i,j=0; | |
361ebed5 HSDT |
3777 | int ret = 0; |
3778 | ||
3779 | /* read interrupt status to clear interrupt */ | |
3780 | ret = ssd_smbus_read_byte(dev, saddr, SSD_LM80_REG_ALARM1, &alarm1); | |
3781 | if (ret) { | |
3782 | goto out; | |
3783 | } | |
3784 | ||
3785 | ret = ssd_smbus_read_byte(dev, saddr, SSD_LM80_REG_ALARM2, &alarm2); | |
3786 | if (ret) { | |
3787 | goto out; | |
3788 | } | |
3789 | ||
3790 | status = (uint16_t)alarm1 | ((uint16_t)alarm2 << 8); | |
3791 | ||
3792 | /* parse inetrrupt status */ | |
3793 | for (i=0; i<SSD_LM80_IN_NR; i++) { | |
3794 | if (!((status >> (uint32_t)i) & 0x1)) { | |
3795 | if (test_and_clear_bit(SSD_HWMON_LM80(i), &dev->hwmon)) { | |
3796 | /* enable INx irq */ | |
3797 | ret = ssd_lm80_enable_in(dev, saddr, i); | |
3798 | if (ret) { | |
3799 | goto out; | |
3800 | } | |
3801 | } | |
3802 | ||
3803 | continue; | |
3804 | } | |
3805 | ||
3806 | /* disable INx irq */ | |
3807 | ret = ssd_lm80_disable_in(dev, saddr, i); | |
3808 | if (ret) { | |
3809 | goto out; | |
3810 | } | |
3811 | ||
3812 | if (test_and_set_bit(SSD_HWMON_LM80(i), &dev->hwmon)) { | |
3813 | continue; | |
3814 | } | |
3815 | ||
1197134c KM |
3816 | high = (uint32_t)ssd_lm80_limit[i].high * (uint32_t)10; |
3817 | low = (uint32_t)ssd_lm80_limit[i].low * (uint32_t)10; | |
3818 | ||
3819 | for (j=0; j<3; j++) { | |
3820 | ret = ssd_smbus_read_word(dev, saddr, SSD_LM80_REG_IN(i), (uint8_t *)&val); | |
3821 | if (ret) { | |
3822 | goto out; | |
3823 | } | |
3824 | volt = SSD_LM80_CONVERT_VOLT(u16_swap(val)); | |
3825 | if ((volt>high) || (volt<=low)) { | |
3826 | if(j<2) { | |
3827 | msleep(SSD_LM80_CONV_INTERVAL); | |
3828 | } | |
3829 | } else { | |
3830 | break; | |
3831 | } | |
361ebed5 HSDT |
3832 | } |
3833 | ||
1197134c KM |
3834 | if (j<3) { |
3835 | continue; | |
3836 | } | |
361ebed5 HSDT |
3837 | |
3838 | switch (i) { | |
3839 | case SSD_LM80_IN_CAP: { | |
3840 | if (0 == volt) { | |
3841 | ssd_gen_swlog(dev, SSD_LOG_CAP_SHORT_CIRCUIT, 0); | |
3842 | } else { | |
3843 | ssd_gen_swlog(dev, SSD_LOG_CAP_VOLT_FAULT, SSD_PL_CAP_VOLT(volt)); | |
3844 | } | |
3845 | break; | |
3846 | } | |
3847 | ||
3848 | case SSD_LM80_IN_1V2: | |
3849 | case SSD_LM80_IN_1V2a: | |
3850 | case SSD_LM80_IN_1V5: | |
3851 | case SSD_LM80_IN_1V8: { | |
3852 | ssd_gen_swlog(dev, SSD_LOG_VOLT_STATUS, SSD_VOLT_LOG_DATA(i, 0, volt)); | |
3853 | break; | |
3854 | } | |
3855 | case SSD_LM80_IN_FPGA_3V3: | |
3856 | case SSD_LM80_IN_3V3: { | |
3857 | ssd_gen_swlog(dev, SSD_LOG_VOLT_STATUS, SSD_VOLT_LOG_DATA(i, 0, SSD_LM80_3V3_VOLT(volt))); | |
3858 | break; | |
3859 | } | |
3860 | default: | |
3861 | break; | |
3862 | } | |
3863 | } | |
3864 | ||
3865 | out: | |
3866 | if (ret) { | |
3867 | if (!test_and_set_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM80), &dev->hwmon)) { | |
da3355df | 3868 | ssd_generate_sensor_fault_log(dev, SSD_LOG_SENSOR_FAULT, (uint32_t)saddr,ret); |
361ebed5 HSDT |
3869 | } |
3870 | } else { | |
3871 | test_and_clear_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM80), &dev->hwmon); | |
3872 | } | |
3873 | return ret; | |
3874 | } | |
3875 | ||
da3355df | 3876 | |
361ebed5 HSDT |
3877 | static int ssd_init_sensor(struct ssd_device *dev) |
3878 | { | |
3879 | int ret = 0; | |
3880 | ||
3881 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_2) { | |
3882 | goto out; | |
3883 | } | |
3884 | ||
3885 | ret = ssd_init_lm75(dev, SSD_SENSOR_LM75_SADDRESS); | |
3886 | if (ret) { | |
3887 | hio_warn("%s: init lm75 failed\n", dev->name); | |
3888 | if (!test_and_set_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM75), &dev->hwmon)) { | |
da3355df | 3889 | ssd_generate_sensor_fault_log(dev, SSD_LOG_SENSOR_FAULT, SSD_SENSOR_LM75_SADDRESS,ret); |
361ebed5 HSDT |
3890 | } |
3891 | goto out; | |
3892 | } | |
3893 | ||
3894 | if (dev->hw_info.pcb_ver >= 'B' || dev->hw_info_ext.form_factor == SSD_FORM_FACTOR_HHHL) { | |
3895 | ret = ssd_init_lm80(dev, SSD_SENSOR_LM80_SADDRESS); | |
3896 | if (ret) { | |
3897 | hio_warn("%s: init lm80 failed\n", dev->name); | |
3898 | if (!test_and_set_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM80), &dev->hwmon)) { | |
da3355df | 3899 | ssd_generate_sensor_fault_log(dev, SSD_LOG_SENSOR_FAULT, SSD_SENSOR_LM80_SADDRESS,ret); |
361ebed5 HSDT |
3900 | } |
3901 | goto out; | |
3902 | } | |
3903 | } | |
3904 | ||
3905 | out: | |
3906 | /* skip error if not in standard mode */ | |
3907 | if (mode != SSD_DRV_MODE_STANDARD) { | |
3908 | ret = 0; | |
3909 | } | |
3910 | return ret; | |
3911 | } | |
3912 | ||
3913 | /* board volt */ | |
3914 | static int ssd_mon_boardvolt(struct ssd_device *dev) | |
3915 | { | |
3916 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_2) { | |
3917 | return 0; | |
3918 | } | |
3919 | ||
3920 | if (dev->hw_info_ext.form_factor == SSD_FORM_FACTOR_FHHL && dev->hw_info.pcb_ver < 'B') { | |
3921 | return 0; | |
3922 | } | |
3923 | ||
3924 | return ssd_lm80_check_event(dev, SSD_SENSOR_LM80_SADDRESS); | |
3925 | } | |
3926 | ||
3927 | /* temperature */ | |
3928 | static int ssd_mon_temp(struct ssd_device *dev) | |
3929 | { | |
3930 | int cur; | |
3931 | uint16_t val = 0; | |
3932 | int ret = 0; | |
3933 | ||
3934 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_2) { | |
3935 | return 0; | |
3936 | } | |
3937 | ||
3938 | if (dev->hw_info_ext.form_factor == SSD_FORM_FACTOR_FHHL && dev->hw_info.pcb_ver < 'B') { | |
3939 | return 0; | |
3940 | } | |
3941 | ||
3942 | /* inlet */ | |
3943 | ret = ssd_lm80_read_temp(dev, SSD_SENSOR_LM80_SADDRESS, &val); | |
3944 | if (ret) { | |
3945 | if (!test_and_set_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM80), &dev->hwmon)) { | |
da3355df | 3946 | ssd_generate_sensor_fault_log(dev, SSD_LOG_TEMP_SENSOR_EVENT, SSD_SENSOR_LM80_SADDRESS,ret); |
361ebed5 HSDT |
3947 | } |
3948 | goto out; | |
3949 | } | |
3950 | test_and_clear_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM80), &dev->hwmon); | |
3951 | ||
3952 | cur = SSD_SENSOR_CONVERT_TEMP(val); | |
3953 | if (cur >= SSD_INLET_OT_TEMP) { | |
3954 | if (!test_and_set_bit(SSD_HWMON_TEMP(SSD_TEMP_INLET), &dev->hwmon)) { | |
3955 | ssd_gen_swlog(dev, SSD_LOG_INLET_OVER_TEMP, (uint32_t)cur); | |
3956 | } | |
3957 | } else if(cur < SSD_INLET_OT_HYST) { | |
3958 | if (test_and_clear_bit(SSD_HWMON_TEMP(SSD_TEMP_INLET), &dev->hwmon)) { | |
3959 | ssd_gen_swlog(dev, SSD_LOG_INLET_NORMAL_TEMP, (uint32_t)cur); | |
3960 | } | |
3961 | } | |
3962 | ||
3963 | /* flash */ | |
3964 | ret = ssd_lm75_read(dev, SSD_SENSOR_LM75_SADDRESS, &val); | |
3965 | if (ret) { | |
3966 | if (!test_and_set_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM75), &dev->hwmon)) { | |
da3355df | 3967 | ssd_generate_sensor_fault_log(dev, SSD_LOG_TEMP_SENSOR_EVENT, SSD_SENSOR_LM75_SADDRESS,ret); |
361ebed5 HSDT |
3968 | } |
3969 | goto out; | |
3970 | } | |
3971 | test_and_clear_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM75), &dev->hwmon); | |
3972 | ||
3973 | cur = SSD_SENSOR_CONVERT_TEMP(val); | |
3974 | if (cur >= SSD_FLASH_OT_TEMP) { | |
3975 | if (!test_and_set_bit(SSD_HWMON_TEMP(SSD_TEMP_FLASH), &dev->hwmon)) { | |
3976 | ssd_gen_swlog(dev, SSD_LOG_FLASH_OVER_TEMP, (uint32_t)cur); | |
3977 | } | |
3978 | } else if(cur < SSD_FLASH_OT_HYST) { | |
3979 | if (test_and_clear_bit(SSD_HWMON_TEMP(SSD_TEMP_FLASH), &dev->hwmon)) { | |
3980 | ssd_gen_swlog(dev, SSD_LOG_FLASH_NORMAL_TEMP, (uint32_t)cur); | |
3981 | } | |
3982 | } | |
3983 | ||
3984 | out: | |
3985 | return ret; | |
3986 | } | |
3987 | ||
3988 | /* cmd tag */ | |
3989 | static inline void ssd_put_tag(struct ssd_device *dev, int tag) | |
3990 | { | |
3991 | test_and_clear_bit(tag, dev->tag_map); | |
3992 | wake_up(&dev->tag_wq); | |
3993 | } | |
3994 | ||
3995 | static inline int ssd_get_tag(struct ssd_device *dev, int wait) | |
3996 | { | |
3997 | int tag; | |
3998 | ||
3999 | find_tag: | |
4000 | while ((tag = find_first_zero_bit(dev->tag_map, dev->hw_info.cmd_fifo_sz)) >= atomic_read(&dev->queue_depth)) { | |
4001 | DEFINE_WAIT(__wait); | |
4002 | ||
4003 | if (!wait) { | |
4004 | return -1; | |
4005 | } | |
4006 | ||
4007 | prepare_to_wait_exclusive(&dev->tag_wq, &__wait, TASK_UNINTERRUPTIBLE); | |
4008 | schedule(); | |
4009 | ||
4010 | finish_wait(&dev->tag_wq, &__wait); | |
4011 | } | |
4012 | ||
4013 | if (test_and_set_bit(tag, dev->tag_map)) { | |
4014 | goto find_tag; | |
4015 | } | |
4016 | ||
4017 | return tag; | |
4018 | } | |
4019 | ||
4020 | static void ssd_barrier_put_tag(struct ssd_device *dev, int tag) | |
4021 | { | |
4022 | test_and_clear_bit(tag, dev->tag_map); | |
4023 | } | |
4024 | ||
4025 | static int ssd_barrier_get_tag(struct ssd_device *dev) | |
4026 | { | |
4027 | int tag = 0; | |
4028 | ||
4029 | if (test_and_set_bit(tag, dev->tag_map)) { | |
4030 | return -1; | |
4031 | } | |
4032 | ||
4033 | return tag; | |
4034 | } | |
4035 | ||
4036 | static void ssd_barrier_end(struct ssd_device *dev) | |
4037 | { | |
4038 | atomic_set(&dev->queue_depth, dev->hw_info.cmd_fifo_sz); | |
4039 | wake_up_all(&dev->tag_wq); | |
4040 | ||
4041 | mutex_unlock(&dev->barrier_mutex); | |
4042 | } | |
4043 | ||
4044 | static int ssd_barrier_start(struct ssd_device *dev) | |
4045 | { | |
4046 | int i; | |
4047 | ||
4048 | mutex_lock(&dev->barrier_mutex); | |
4049 | ||
4050 | atomic_set(&dev->queue_depth, 0); | |
4051 | ||
4052 | for (i=0; i<SSD_CMD_TIMEOUT; i++) { | |
4053 | if (find_first_bit(dev->tag_map, dev->hw_info.cmd_fifo_sz) >= dev->hw_info.cmd_fifo_sz) { | |
4054 | return 0; | |
4055 | } | |
4056 | ||
4057 | __set_current_state(TASK_INTERRUPTIBLE); | |
4058 | schedule_timeout(1); | |
4059 | } | |
4060 | ||
4061 | atomic_set(&dev->queue_depth, dev->hw_info.cmd_fifo_sz); | |
4062 | wake_up_all(&dev->tag_wq); | |
4063 | ||
4064 | mutex_unlock(&dev->barrier_mutex); | |
4065 | ||
4066 | return -EBUSY; | |
4067 | } | |
4068 | ||
4069 | static int ssd_busy(struct ssd_device *dev) | |
4070 | { | |
4071 | if (find_first_bit(dev->tag_map, dev->hw_info.cmd_fifo_sz) >= dev->hw_info.cmd_fifo_sz) { | |
4072 | return 0; | |
4073 | } | |
4074 | ||
4075 | return 1; | |
4076 | } | |
4077 | ||
4078 | static int ssd_wait_io(struct ssd_device *dev) | |
4079 | { | |
4080 | int i; | |
4081 | ||
4082 | for (i=0; i<SSD_CMD_TIMEOUT; i++) { | |
4083 | if (find_first_bit(dev->tag_map, dev->hw_info.cmd_fifo_sz) >= dev->hw_info.cmd_fifo_sz) { | |
4084 | return 0; | |
4085 | } | |
4086 | ||
4087 | __set_current_state(TASK_INTERRUPTIBLE); | |
4088 | schedule_timeout(1); | |
4089 | } | |
4090 | ||
4091 | return -EBUSY; | |
4092 | } | |
4093 | ||
4094 | #if 0 | |
4095 | static int ssd_in_barrier(struct ssd_device *dev) | |
4096 | { | |
4097 | return (0 == atomic_read(&dev->queue_depth)); | |
4098 | } | |
4099 | #endif | |
4100 | ||
4101 | static void ssd_cleanup_tag(struct ssd_device *dev) | |
4102 | { | |
4103 | kfree(dev->tag_map); | |
4104 | } | |
4105 | ||
4106 | static int ssd_init_tag(struct ssd_device *dev) | |
4107 | { | |
4108 | int nr_ulongs = ALIGN(dev->hw_info.cmd_fifo_sz, BITS_PER_LONG) / BITS_PER_LONG; | |
4109 | ||
4110 | mutex_init(&dev->barrier_mutex); | |
4111 | ||
4112 | atomic_set(&dev->queue_depth, dev->hw_info.cmd_fifo_sz); | |
4113 | ||
4114 | dev->tag_map = kmalloc(nr_ulongs * sizeof(unsigned long), GFP_ATOMIC); | |
4115 | if (!dev->tag_map) { | |
4116 | return -ENOMEM; | |
4117 | } | |
4118 | ||
4119 | memset(dev->tag_map, 0, nr_ulongs * sizeof(unsigned long)); | |
4120 | ||
4121 | init_waitqueue_head(&dev->tag_wq); | |
4122 | ||
4123 | return 0; | |
4124 | } | |
4125 | ||
4126 | /* io stat */ | |
4127 | static void ssd_end_io_acct(struct ssd_cmd *cmd) | |
4128 | { | |
4129 | struct ssd_device *dev = cmd->dev; | |
4130 | struct bio *bio = cmd->bio; | |
4131 | unsigned long dur = jiffies - cmd->start_time; | |
4132 | int rw = bio_data_dir(bio); | |
da3355df SF |
4133 | #if ((LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,0)) || (defined RHEL_MAJOR && RHEL_MAJOR == 6 && RHEL_MINOR >= 7)) |
4134 | #else | |
4135 | unsigned long flag; | |
4136 | #endif | |
4137 | ||
b49bd764 SF |
4138 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,14,0)) |
4139 | struct hd_struct *part = disk_map_sector_rcu(dev->gd, bio_start(bio)); | |
4140 | generic_end_io_acct(dev->rq, rw, part, cmd->start_time); | |
4141 | #elif ((LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,0)) || (defined RHEL_MAJOR && RHEL_MAJOR == 6 && RHEL_MINOR >= 7)) | |
361ebed5 HSDT |
4142 | int cpu = part_stat_lock(); |
4143 | struct hd_struct *part = disk_map_sector_rcu(dev->gd, bio_start(bio)); | |
4144 | part_round_stats(cpu, part); | |
4145 | part_stat_add(cpu, part, ticks[rw], dur); | |
4146 | part_dec_in_flight(part, rw); | |
4147 | part_stat_unlock(); | |
4148 | #elif (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,27)) | |
4149 | int cpu = part_stat_lock(); | |
4150 | struct hd_struct *part = &dev->gd->part0; | |
4151 | part_round_stats(cpu, part); | |
4152 | part_stat_add(cpu, part, ticks[rw], dur); | |
da3355df SF |
4153 | |
4154 | spin_lock_irqsave(&dev->in_flight_lock,flag); | |
4155 | part->in_flight[rw]--; | |
4156 | spin_unlock_irqrestore(&dev->in_flight_lock,flag); | |
4157 | ||
361ebed5 | 4158 | part_stat_unlock(); |
da3355df | 4159 | |
361ebed5 HSDT |
4160 | #elif (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,14)) |
4161 | preempt_disable(); | |
4162 | disk_round_stats(dev->gd); | |
361ebed5 | 4163 | disk_stat_add(dev->gd, ticks[rw], dur); |
da3355df SF |
4164 | |
4165 | spin_lock_irqsave(&dev->in_flight_lock,flag); | |
4166 | dev->gd->in_flight--; | |
4167 | spin_unlock_irqrestore(&dev->in_flight_lock,flag); | |
4168 | ||
4169 | preempt_enable(); | |
4170 | ||
361ebed5 HSDT |
4171 | #else |
4172 | preempt_disable(); | |
4173 | disk_round_stats(dev->gd); | |
361ebed5 HSDT |
4174 | if (rw == WRITE) { |
4175 | disk_stat_add(dev->gd, write_ticks, dur); | |
4176 | } else { | |
4177 | disk_stat_add(dev->gd, read_ticks, dur); | |
4178 | } | |
da3355df SF |
4179 | spin_lock_irqsave(&dev->in_flight_lock,flag); |
4180 | dev->gd->in_flight--; | |
4181 | spin_unlock_irqrestore(&dev->in_flight_lock,flag); | |
4182 | ||
4183 | preempt_enable(); | |
4184 | ||
361ebed5 HSDT |
4185 | #endif |
4186 | } | |
4187 | ||
4188 | static void ssd_start_io_acct(struct ssd_cmd *cmd) | |
4189 | { | |
4190 | struct ssd_device *dev = cmd->dev; | |
4191 | struct bio *bio = cmd->bio; | |
4192 | int rw = bio_data_dir(bio); | |
da3355df SF |
4193 | #if ((LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,0)) || (defined RHEL_MAJOR && RHEL_MAJOR == 6 && RHEL_MINOR >= 7)) |
4194 | #else | |
4195 | unsigned long flag; | |
4196 | #endif | |
361ebed5 | 4197 | |
b49bd764 SF |
4198 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,14,0)) |
4199 | struct hd_struct *part = disk_map_sector_rcu(dev->gd, bio_start(bio)); | |
4200 | generic_start_io_acct(dev->rq, rw, bio_sectors(bio), part); | |
4201 | #elif ((LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,0)) || (defined RHEL_MAJOR && RHEL_MAJOR == 6 && RHEL_MINOR >= 7)) | |
361ebed5 HSDT |
4202 | int cpu = part_stat_lock(); |
4203 | struct hd_struct *part = disk_map_sector_rcu(dev->gd, bio_start(bio)); | |
4204 | part_round_stats(cpu, part); | |
4205 | part_stat_inc(cpu, part, ios[rw]); | |
4206 | part_stat_add(cpu, part, sectors[rw], bio_sectors(bio)); | |
4207 | part_inc_in_flight(part, rw); | |
4208 | part_stat_unlock(); | |
4209 | #elif (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,27)) | |
4210 | int cpu = part_stat_lock(); | |
4211 | struct hd_struct *part = &dev->gd->part0; | |
4212 | part_round_stats(cpu, part); | |
4213 | part_stat_inc(cpu, part, ios[rw]); | |
4214 | part_stat_add(cpu, part, sectors[rw], bio_sectors(bio)); | |
da3355df SF |
4215 | |
4216 | spin_lock_irqsave(&dev->in_flight_lock,flag); | |
4217 | part->in_flight[rw]++; | |
4218 | spin_unlock_irqrestore(&dev->in_flight_lock,flag); | |
4219 | ||
361ebed5 | 4220 | part_stat_unlock(); |
da3355df | 4221 | |
361ebed5 HSDT |
4222 | #elif (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,14)) |
4223 | preempt_disable(); | |
4224 | disk_round_stats(dev->gd); | |
361ebed5 HSDT |
4225 | disk_stat_inc(dev->gd, ios[rw]); |
4226 | disk_stat_add(dev->gd, sectors[rw], bio_sectors(bio)); | |
da3355df SF |
4227 | |
4228 | spin_lock_irqsave(&dev->in_flight_lock,flag); | |
4229 | dev->gd->in_flight++; | |
4230 | spin_unlock_irqrestore(&dev->in_flight_lock,flag); | |
4231 | ||
4232 | preempt_enable(); | |
361ebed5 HSDT |
4233 | #else |
4234 | preempt_disable(); | |
4235 | disk_round_stats(dev->gd); | |
361ebed5 HSDT |
4236 | if (rw == WRITE) { |
4237 | disk_stat_inc(dev->gd, writes); | |
4238 | disk_stat_add(dev->gd, write_sectors, bio_sectors(bio)); | |
4239 | } else { | |
4240 | disk_stat_inc(dev->gd, reads); | |
4241 | disk_stat_add(dev->gd, read_sectors, bio_sectors(bio)); | |
4242 | } | |
da3355df SF |
4243 | |
4244 | spin_lock_irqsave(&dev->in_flight_lock,flag); | |
4245 | dev->gd->in_flight++; | |
4246 | spin_unlock_irqrestore(&dev->in_flight_lock,flag); | |
4247 | ||
4248 | preempt_enable(); | |
4249 | ||
361ebed5 HSDT |
4250 | #endif |
4251 | ||
4252 | cmd->start_time = jiffies; | |
4253 | } | |
4254 | ||
4255 | /* io */ | |
4256 | static void ssd_queue_bio(struct ssd_device *dev, struct bio *bio) | |
4257 | { | |
4258 | spin_lock(&dev->sendq_lock); | |
4259 | ssd_blist_add(&dev->sendq, bio); | |
4260 | spin_unlock(&dev->sendq_lock); | |
4261 | ||
4262 | atomic_inc(&dev->in_sendq); | |
4263 | wake_up(&dev->send_waitq); | |
4264 | } | |
4265 | ||
4266 | static inline void ssd_end_request(struct ssd_cmd *cmd) | |
4267 | { | |
4268 | struct ssd_device *dev = cmd->dev; | |
4269 | struct bio *bio = cmd->bio; | |
4270 | int errors = cmd->errors; | |
4271 | int tag = cmd->tag; | |
4272 | ||
4273 | if (bio) { | |
1197134c | 4274 | if (!ssd_bio_has_discard(bio)) { |
361ebed5 HSDT |
4275 | ssd_end_io_acct(cmd); |
4276 | if (!cmd->flag) { | |
4277 | pci_unmap_sg(dev->pdev, cmd->sgl, cmd->nsegs, | |
4278 | bio_data_dir(bio) == READ ? PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE); | |
4279 | } | |
4280 | } | |
361ebed5 HSDT |
4281 | |
4282 | cmd->bio = NULL; | |
4283 | ssd_put_tag(dev, tag); | |
4284 | ||
4285 | if (SSD_INT_MSIX == dev->int_mode || tag < 16 || errors) { | |
1197134c | 4286 | ssd_bio_endio(bio, errors); |
361ebed5 HSDT |
4287 | } else /* if (bio->bi_idx >= bio->bi_vcnt)*/ { |
4288 | spin_lock(&dev->doneq_lock); | |
4289 | ssd_blist_add(&dev->doneq, bio); | |
4290 | spin_unlock(&dev->doneq_lock); | |
4291 | ||
4292 | atomic_inc(&dev->in_doneq); | |
4293 | wake_up(&dev->done_waitq); | |
4294 | } | |
4295 | } else { | |
4296 | if (cmd->waiting) { | |
4297 | complete(cmd->waiting); | |
4298 | } | |
4299 | } | |
4300 | } | |
4301 | ||
4302 | static void ssd_end_timeout_request(struct ssd_cmd *cmd) | |
4303 | { | |
4304 | struct ssd_device *dev = cmd->dev; | |
4305 | struct ssd_rw_msg *msg = (struct ssd_rw_msg *)cmd->msg; | |
4306 | int i; | |
4307 | ||
4308 | for (i=0; i<dev->nr_queue; i++) { | |
b44043bd | 4309 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0)) |
361ebed5 | 4310 | disable_irq(dev->entry[i].vector); |
b44043bd SF |
4311 | #else |
4312 | disable_irq(pci_irq_vector(dev->pdev, i)); | |
4313 | #endif | |
361ebed5 HSDT |
4314 | } |
4315 | ||
4316 | atomic_inc(&dev->tocnt); | |
4317 | //if (cmd->bio) { | |
4318 | hio_err("%s: cmd timeout: tag %d fun %#x\n", dev->name, msg->tag, msg->fun); | |
4319 | cmd->errors = -ETIMEDOUT; | |
4320 | ssd_end_request(cmd); | |
4321 | //} | |
4322 | ||
4323 | for (i=0; i<dev->nr_queue; i++) { | |
b44043bd | 4324 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0)) |
361ebed5 | 4325 | enable_irq(dev->entry[i].vector); |
b44043bd SF |
4326 | #else |
4327 | enable_irq(pci_irq_vector(dev->pdev, i)); | |
4328 | #endif | |
361ebed5 HSDT |
4329 | } |
4330 | ||
4331 | /* alarm led */ | |
4332 | ssd_set_alarm(dev); | |
4333 | } | |
4334 | ||
4335 | /* cmd timer */ | |
7e9f9829 | 4336 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(4,15,0)) |
361ebed5 | 4337 | static void ssd_cmd_add_timer(struct ssd_cmd *cmd, int timeout, void (*complt)(struct ssd_cmd *)) |
7e9f9829 SF |
4338 | #else |
4339 | static void ssd_cmd_add_timer(struct ssd_cmd *cmd, int timeout, void (*complt)(struct timer_list *)) | |
4340 | #endif | |
361ebed5 | 4341 | { |
7e9f9829 | 4342 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(4,15,0)) |
361ebed5 HSDT |
4343 | init_timer(&cmd->cmd_timer); |
4344 | ||
4345 | cmd->cmd_timer.data = (unsigned long)cmd; | |
361ebed5 | 4346 | cmd->cmd_timer.function = (void (*)(unsigned long)) complt; |
7e9f9829 SF |
4347 | #else |
4348 | timer_setup(&cmd->cmd_timer, complt, 0); | |
4349 | #endif | |
361ebed5 | 4350 | |
7e9f9829 | 4351 | cmd->cmd_timer.expires = jiffies + timeout; |
361ebed5 HSDT |
4352 | add_timer(&cmd->cmd_timer); |
4353 | } | |
4354 | ||
4355 | static int ssd_cmd_del_timer(struct ssd_cmd *cmd) | |
4356 | { | |
4357 | return del_timer(&cmd->cmd_timer); | |
4358 | } | |
4359 | ||
7e9f9829 | 4360 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(4,15,0)) |
361ebed5 | 4361 | static void ssd_add_timer(struct timer_list *timer, int timeout, void (*complt)(void *), void *data) |
7e9f9829 SF |
4362 | #else |
4363 | static void ssd_add_timer(struct timer_list *timer, int timeout, void (*complt)(struct timer_list *), void *data) | |
4364 | #endif | |
361ebed5 | 4365 | { |
7e9f9829 | 4366 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(4,15,0)) |
361ebed5 HSDT |
4367 | init_timer(timer); |
4368 | ||
4369 | timer->data = (unsigned long)data; | |
361ebed5 | 4370 | timer->function = (void (*)(unsigned long)) complt; |
7e9f9829 SF |
4371 | #else |
4372 | timer_setup(timer, complt, 0); | |
4373 | #endif | |
361ebed5 | 4374 | |
7e9f9829 | 4375 | timer->expires = jiffies + timeout; |
361ebed5 HSDT |
4376 | add_timer(timer); |
4377 | } | |
4378 | ||
4379 | static int ssd_del_timer(struct timer_list *timer) | |
4380 | { | |
4381 | return del_timer(timer); | |
4382 | } | |
4383 | ||
7e9f9829 | 4384 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(4,15,0)) |
361ebed5 | 4385 | static void ssd_cmd_timeout(struct ssd_cmd *cmd) |
7e9f9829 SF |
4386 | #else |
4387 | static void ssd_cmd_timeout(struct timer_list *t) | |
4388 | #endif | |
361ebed5 | 4389 | { |
7e9f9829 SF |
4390 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,15,0)) |
4391 | struct ssd_cmd *cmd = from_timer(cmd, t, cmd_timer); | |
4392 | #endif | |
361ebed5 HSDT |
4393 | struct ssd_device *dev = cmd->dev; |
4394 | uint32_t msg = *(uint32_t *)cmd->msg; | |
4395 | ||
4396 | ssd_end_timeout_request(cmd); | |
4397 | ||
4398 | ssd_gen_swlog(dev, SSD_LOG_TIMEOUT, msg); | |
4399 | } | |
4400 | ||
4401 | ||
4402 | static void __ssd_done(unsigned long data) | |
4403 | { | |
4404 | struct ssd_cmd *cmd; | |
4405 | LIST_HEAD(localq); | |
4406 | ||
4407 | local_irq_disable(); | |
4408 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,13,0)) | |
4409 | list_splice_init(&__get_cpu_var(ssd_doneq), &localq); | |
4410 | #else | |
4411 | list_splice_init(this_cpu_ptr(&ssd_doneq), &localq); | |
4412 | #endif | |
4413 | local_irq_enable(); | |
4414 | ||
4415 | while (!list_empty(&localq)) { | |
4416 | cmd = list_entry(localq.next, struct ssd_cmd, list); | |
4417 | list_del_init(&cmd->list); | |
4418 | ||
4419 | ssd_end_request(cmd); | |
4420 | } | |
4421 | } | |
4422 | ||
4423 | static void __ssd_done_db(unsigned long data) | |
4424 | { | |
4425 | struct ssd_cmd *cmd; | |
4426 | struct ssd_device *dev; | |
4427 | struct bio *bio; | |
4428 | LIST_HEAD(localq); | |
4429 | ||
4430 | local_irq_disable(); | |
4431 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,13,0)) | |
4432 | list_splice_init(&__get_cpu_var(ssd_doneq), &localq); | |
4433 | #else | |
4434 | list_splice_init(this_cpu_ptr(&ssd_doneq), &localq); | |
4435 | #endif | |
4436 | local_irq_enable(); | |
4437 | ||
4438 | while (!list_empty(&localq)) { | |
4439 | cmd = list_entry(localq.next, struct ssd_cmd, list); | |
4440 | list_del_init(&cmd->list); | |
4441 | ||
4442 | dev = (struct ssd_device *)cmd->dev; | |
4443 | bio = cmd->bio; | |
4444 | ||
4445 | if (bio) { | |
4446 | sector_t off = dev->db_info.data.loc.off; | |
4447 | uint32_t len = dev->db_info.data.loc.len; | |
4448 | ||
4449 | switch (dev->db_info.type) { | |
4450 | case SSD_DEBUG_READ_ERR: | |
4451 | if (bio_data_dir(bio) == READ && | |
4452 | !((off + len) <= bio_start(bio) || off >= (bio_start(bio) + bio_sectors(bio)))) { | |
4453 | cmd->errors = -EIO; | |
4454 | } | |
4455 | break; | |
4456 | case SSD_DEBUG_WRITE_ERR: | |
4457 | if (bio_data_dir(bio) == WRITE && | |
4458 | !((off + len) <= bio_start(bio) || off >= (bio_start(bio) + bio_sectors(bio)))) { | |
4459 | cmd->errors = -EROFS; | |
4460 | } | |
4461 | break; | |
4462 | case SSD_DEBUG_RW_ERR: | |
4463 | if (!((off + len) <= bio_start(bio) || off >= (bio_start(bio) + bio_sectors(bio)))) { | |
4464 | if (bio_data_dir(bio) == READ) { | |
4465 | cmd->errors = -EIO; | |
4466 | } else { | |
4467 | cmd->errors = -EROFS; | |
4468 | } | |
4469 | } | |
4470 | break; | |
4471 | default: | |
4472 | break; | |
4473 | } | |
4474 | } | |
4475 | ||
4476 | ssd_end_request(cmd); | |
4477 | } | |
4478 | } | |
4479 | ||
4480 | static inline void ssd_done_bh(struct ssd_cmd *cmd) | |
4481 | { | |
4482 | unsigned long flags = 0; | |
4483 | ||
4484 | if (unlikely(!ssd_cmd_del_timer(cmd))) { | |
4485 | struct ssd_device *dev = cmd->dev; | |
4486 | struct ssd_rw_msg *msg = (struct ssd_rw_msg *)cmd->msg; | |
4487 | hio_err("%s: unknown cmd: tag %d fun %#x\n", dev->name, msg->tag, msg->fun); | |
4488 | ||
4489 | /* alarm led */ | |
4490 | ssd_set_alarm(dev); | |
4491 | return; | |
4492 | } | |
4493 | ||
4494 | local_irq_save(flags); | |
4495 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,13,0)) | |
4496 | list_add_tail(&cmd->list, &__get_cpu_var(ssd_doneq)); | |
4497 | tasklet_hi_schedule(&__get_cpu_var(ssd_tasklet)); | |
4498 | #else | |
4499 | list_add_tail(&cmd->list, this_cpu_ptr(&ssd_doneq)); | |
4500 | tasklet_hi_schedule(this_cpu_ptr(&ssd_tasklet)); | |
4501 | #endif | |
4502 | local_irq_restore(flags); | |
4503 | ||
4504 | return; | |
4505 | } | |
4506 | ||
4507 | static inline void ssd_done(struct ssd_cmd *cmd) | |
4508 | { | |
4509 | if (unlikely(!ssd_cmd_del_timer(cmd))) { | |
4510 | struct ssd_device *dev = cmd->dev; | |
4511 | struct ssd_rw_msg *msg = (struct ssd_rw_msg *)cmd->msg; | |
4512 | hio_err("%s: unknown cmd: tag %d fun %#x\n", dev->name, msg->tag, msg->fun); | |
4513 | ||
4514 | /* alarm led */ | |
4515 | ssd_set_alarm(dev); | |
4516 | return; | |
4517 | } | |
4518 | ||
4519 | ssd_end_request(cmd); | |
4520 | ||
4521 | return; | |
4522 | } | |
4523 | ||
4524 | static inline void ssd_dispatch_cmd(struct ssd_cmd *cmd) | |
4525 | { | |
4526 | struct ssd_device *dev = (struct ssd_device *)cmd->dev; | |
4527 | ||
4528 | ssd_cmd_add_timer(cmd, SSD_CMD_TIMEOUT, ssd_cmd_timeout); | |
4529 | ||
4530 | spin_lock(&dev->cmd_lock); | |
4531 | ssd_reg_write(dev->ctrlp + SSD_REQ_FIFO_REG, cmd->msg_dma); | |
4532 | spin_unlock(&dev->cmd_lock); | |
4533 | } | |
4534 | ||
4535 | static inline void ssd_send_cmd(struct ssd_cmd *cmd) | |
4536 | { | |
4537 | struct ssd_device *dev = (struct ssd_device *)cmd->dev; | |
4538 | ||
4539 | ssd_cmd_add_timer(cmd, SSD_CMD_TIMEOUT, ssd_cmd_timeout); | |
4540 | ||
4541 | ssd_reg32_write(dev->ctrlp + SSD_REQ_FIFO_REG, ((uint32_t)cmd->tag | ((uint32_t)cmd->nsegs << 16))); | |
4542 | } | |
4543 | ||
4544 | static inline void ssd_send_cmd_db(struct ssd_cmd *cmd) | |
4545 | { | |
4546 | struct ssd_device *dev = (struct ssd_device *)cmd->dev; | |
4547 | struct bio *bio = cmd->bio; | |
4548 | ||
4549 | ssd_cmd_add_timer(cmd, SSD_CMD_TIMEOUT, ssd_cmd_timeout); | |
4550 | ||
4551 | if (bio) { | |
4552 | switch (dev->db_info.type) { | |
4553 | case SSD_DEBUG_READ_TO: | |
4554 | if (bio_data_dir(bio) == READ) { | |
4555 | return; | |
4556 | } | |
4557 | break; | |
4558 | case SSD_DEBUG_WRITE_TO: | |
4559 | if (bio_data_dir(bio) == WRITE) { | |
4560 | return; | |
4561 | } | |
4562 | break; | |
4563 | case SSD_DEBUG_RW_TO: | |
4564 | return; | |
4565 | break; | |
4566 | default: | |
4567 | break; | |
4568 | } | |
4569 | } | |
4570 | ||
4571 | ssd_reg32_write(dev->ctrlp + SSD_REQ_FIFO_REG, ((uint32_t)cmd->tag | ((uint32_t)cmd->nsegs << 16))); | |
4572 | } | |
4573 | ||
4574 | ||
4575 | /* fixed for BIOVEC_PHYS_MERGEABLE */ | |
4576 | #ifdef SSD_BIOVEC_PHYS_MERGEABLE_FIXED | |
4577 | #include <linux/bio.h> | |
4578 | #include <linux/io.h> | |
4579 | #include <xen/page.h> | |
4580 | ||
4581 | static bool xen_biovec_phys_mergeable_fixed(const struct bio_vec *vec1, | |
4582 | const struct bio_vec *vec2) | |
4583 | { | |
4584 | unsigned long mfn1 = pfn_to_mfn(page_to_pfn(vec1->bv_page)); | |
4585 | unsigned long mfn2 = pfn_to_mfn(page_to_pfn(vec2->bv_page)); | |
4586 | ||
4587 | return __BIOVEC_PHYS_MERGEABLE(vec1, vec2) && | |
4588 | ((mfn1 == mfn2) || ((mfn1+1) == mfn2)); | |
4589 | } | |
4590 | ||
4591 | #ifdef BIOVEC_PHYS_MERGEABLE | |
4592 | #undef BIOVEC_PHYS_MERGEABLE | |
4593 | #endif | |
4594 | #define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \ | |
4595 | (__BIOVEC_PHYS_MERGEABLE(vec1, vec2) && \ | |
4596 | (!xen_domain() || xen_biovec_phys_mergeable_fixed(vec1, vec2))) | |
4597 | ||
4598 | #endif | |
4599 | ||
653c3a30 SF |
4600 | /* |
4601 | * BIOVEC_PHYS_MERGEABLE not available from 4.20 onward, and it seems likely | |
4602 | * that all the merging that can be done has been done by the block core | |
4603 | * already. Just stub it out. | |
4604 | */ | |
4605 | #if (LINUX_VERSION_CODE > KERNEL_VERSION(4,20,0)) | |
4606 | # ifdef BIOVEC_PHYS_MERGEABLE | |
4607 | # undef BIOVEC_PHYS_MERGEABLE | |
4608 | # endif | |
4609 | # define BIOVEC_PHYS_MERGEABLE(vec1, vec2) (0) | |
4610 | #endif | |
4611 | ||
361ebed5 HSDT |
4612 | static inline int ssd_bio_map_sg(struct ssd_device *dev, struct bio *bio, struct scatterlist *sgl) |
4613 | { | |
4614 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,14,0)) | |
4615 | struct bio_vec *bvec, *bvprv = NULL; | |
4616 | struct scatterlist *sg = NULL; | |
4617 | int i = 0, nsegs = 0; | |
4618 | ||
4619 | #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)) | |
4620 | sg_init_table(sgl, dev->hw_info.cmd_max_sg); | |
4621 | #endif | |
4622 | ||
4623 | /* | |
4624 | * for each segment in bio | |
4625 | */ | |
4626 | bio_for_each_segment(bvec, bio, i) { | |
4627 | if (bvprv && BIOVEC_PHYS_MERGEABLE(bvprv, bvec)) { | |
4628 | sg->length += bvec->bv_len; | |
4629 | } else { | |
4630 | if (unlikely(nsegs >= (int)dev->hw_info.cmd_max_sg)) { | |
4631 | break; | |
4632 | } | |
4633 | ||
4634 | sg = sg ? (sg + 1) : sgl; | |
4635 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)) | |
4636 | sg_set_page(sg, bvec->bv_page, bvec->bv_len, bvec->bv_offset); | |
4637 | #else | |
4638 | sg->page = bvec->bv_page; | |
4639 | sg->length = bvec->bv_len; | |
4640 | sg->offset = bvec->bv_offset; | |
4641 | #endif | |
4642 | nsegs++; | |
4643 | } | |
4644 | bvprv = bvec; | |
4645 | } | |
4646 | ||
4647 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)) | |
4648 | if (sg) { | |
4649 | sg_mark_end(sg); | |
4650 | } | |
4651 | #endif | |
4652 | ||
4653 | bio->bi_idx = i; | |
4654 | ||
4655 | return nsegs; | |
4656 | #else | |
4657 | struct bio_vec bvec, bvprv; | |
4658 | struct bvec_iter iter; | |
4659 | struct scatterlist *sg = NULL; | |
4660 | int nsegs = 0; | |
4661 | int first = 1; | |
4662 | ||
4663 | sg_init_table(sgl, dev->hw_info.cmd_max_sg); | |
4664 | ||
4665 | /* | |
4666 | * for each segment in bio | |
4667 | */ | |
4668 | bio_for_each_segment(bvec, bio, iter) { | |
4669 | if (!first && BIOVEC_PHYS_MERGEABLE(&bvprv, &bvec)) { | |
4670 | sg->length += bvec.bv_len; | |
4671 | } else { | |
4672 | if (unlikely(nsegs >= (int)dev->hw_info.cmd_max_sg)) { | |
4673 | break; | |
4674 | } | |
4675 | ||
4676 | sg = sg ? (sg + 1) : sgl; | |
4677 | ||
4678 | sg_set_page(sg, bvec.bv_page, bvec.bv_len, bvec.bv_offset); | |
4679 | ||
4680 | nsegs++; | |
4681 | first = 0; | |
4682 | } | |
4683 | bvprv = bvec; | |
4684 | } | |
4685 | ||
4686 | if (sg) { | |
4687 | sg_mark_end(sg); | |
4688 | } | |
4689 | ||
4690 | return nsegs; | |
4691 | #endif | |
4692 | } | |
4693 | ||
4694 | ||
4695 | static int __ssd_submit_pbio(struct ssd_device *dev, struct bio *bio, int wait) | |
4696 | { | |
4697 | struct ssd_cmd *cmd; | |
4698 | struct ssd_rw_msg *msg; | |
4699 | struct ssd_sg_entry *sge; | |
4700 | sector_t block = bio_start(bio); | |
4701 | int tag; | |
4702 | int i; | |
4703 | ||
4704 | tag = ssd_get_tag(dev, wait); | |
4705 | if (tag < 0) { | |
4706 | return -EBUSY; | |
4707 | } | |
4708 | ||
4709 | cmd = &dev->cmd[tag]; | |
4710 | cmd->bio = bio; | |
4711 | cmd->flag = 1; | |
4712 | ||
4713 | msg = (struct ssd_rw_msg *)cmd->msg; | |
4714 | ||
1197134c | 4715 | if (ssd_bio_has_discard(bio)) { |
361ebed5 HSDT |
4716 | unsigned int length = bio_sectors(bio); |
4717 | ||
4718 | //printk(KERN_WARNING "%s: discard len %u, block %llu\n", dev->name, bio_sectors(bio), block); | |
4719 | msg->tag = tag; | |
4720 | msg->fun = SSD_FUNC_TRIM; | |
4721 | ||
4722 | sge = msg->sge; | |
4723 | for (i=0; i<(dev->hw_info.cmd_max_sg); i++) { | |
4724 | sge->block = block; | |
4725 | sge->length = (length >= dev->hw_info.sg_max_sec) ? dev->hw_info.sg_max_sec : length; | |
4726 | sge->buf = 0; | |
4727 | ||
4728 | block += sge->length; | |
4729 | length -= sge->length; | |
4730 | sge++; | |
4731 | ||
4732 | if (length <= 0) { | |
1197134c | 4733 | ++i; |
361ebed5 HSDT |
4734 | break; |
4735 | } | |
4736 | } | |
1197134c | 4737 | msg->nsegs = cmd->nsegs = i; |
361ebed5 HSDT |
4738 | |
4739 | dev->scmd(cmd); | |
4740 | return 0; | |
4741 | } | |
361ebed5 HSDT |
4742 | |
4743 | //msg->nsegs = cmd->nsegs = ssd_bio_map_sg(dev, bio, sgl); | |
4744 | msg->nsegs = cmd->nsegs = bio->bi_vcnt; | |
4745 | ||
4746 | //xx | |
4747 | if (bio_data_dir(bio) == READ) { | |
4748 | msg->fun = SSD_FUNC_READ; | |
4749 | msg->flag = 0; | |
4750 | } else { | |
4751 | msg->fun = SSD_FUNC_WRITE; | |
4752 | msg->flag = dev->wmode; | |
4753 | } | |
4754 | ||
4755 | sge = msg->sge; | |
4756 | for (i=0; i<bio->bi_vcnt; i++) { | |
4757 | sge->block = block; | |
4758 | sge->length = bio->bi_io_vec[i].bv_len >> 9; | |
4759 | sge->buf = (uint64_t)((void *)bio->bi_io_vec[i].bv_page + bio->bi_io_vec[i].bv_offset); | |
4760 | ||
4761 | block += sge->length; | |
4762 | sge++; | |
4763 | } | |
4764 | ||
4765 | msg->tag = tag; | |
4766 | ||
4767 | #ifdef SSD_OT_PROTECT | |
4768 | if (unlikely(dev->ot_delay > 0 && dev->ot_protect != 0)) { | |
4769 | msleep_interruptible(dev->ot_delay); | |
4770 | } | |
4771 | #endif | |
4772 | ||
4773 | ssd_start_io_acct(cmd); | |
4774 | dev->scmd(cmd); | |
4775 | ||
4776 | return 0; | |
4777 | } | |
4778 | ||
4779 | static inline int ssd_submit_bio(struct ssd_device *dev, struct bio *bio, int wait) | |
4780 | { | |
4781 | struct ssd_cmd *cmd; | |
4782 | struct ssd_rw_msg *msg; | |
4783 | struct ssd_sg_entry *sge; | |
4784 | struct scatterlist *sgl; | |
4785 | sector_t block = bio_start(bio); | |
4786 | int tag; | |
4787 | int i; | |
4788 | ||
4789 | tag = ssd_get_tag(dev, wait); | |
4790 | if (tag < 0) { | |
4791 | return -EBUSY; | |
4792 | } | |
4793 | ||
4794 | cmd = &dev->cmd[tag]; | |
4795 | cmd->bio = bio; | |
4796 | cmd->flag = 0; | |
4797 | ||
4798 | msg = (struct ssd_rw_msg *)cmd->msg; | |
4799 | ||
4800 | sgl = cmd->sgl; | |
4801 | ||
1197134c | 4802 | if (ssd_bio_has_discard(bio)) { |
361ebed5 HSDT |
4803 | unsigned int length = bio_sectors(bio); |
4804 | ||
4805 | //printk(KERN_WARNING "%s: discard len %u, block %llu\n", dev->name, bio_sectors(bio), block); | |
4806 | msg->tag = tag; | |
4807 | msg->fun = SSD_FUNC_TRIM; | |
4808 | ||
4809 | sge = msg->sge; | |
4810 | for (i=0; i<(dev->hw_info.cmd_max_sg); i++) { | |
4811 | sge->block = block; | |
4812 | sge->length = (length >= dev->hw_info.sg_max_sec) ? dev->hw_info.sg_max_sec : length; | |
4813 | sge->buf = 0; | |
4814 | ||
4815 | block += sge->length; | |
4816 | length -= sge->length; | |
4817 | sge++; | |
4818 | ||
4819 | if (length <= 0) { | |
1197134c | 4820 | ++i; |
361ebed5 HSDT |
4821 | break; |
4822 | } | |
4823 | } | |
1197134c | 4824 | msg->nsegs = cmd->nsegs = i; |
361ebed5 HSDT |
4825 | |
4826 | dev->scmd(cmd); | |
4827 | return 0; | |
4828 | } | |
361ebed5 HSDT |
4829 | |
4830 | msg->nsegs = cmd->nsegs = ssd_bio_map_sg(dev, bio, sgl); | |
4831 | ||
4832 | //xx | |
4833 | if (bio_data_dir(bio) == READ) { | |
4834 | msg->fun = SSD_FUNC_READ; | |
4835 | msg->flag = 0; | |
4836 | pci_map_sg(dev->pdev, sgl, cmd->nsegs, PCI_DMA_FROMDEVICE); | |
4837 | } else { | |
4838 | msg->fun = SSD_FUNC_WRITE; | |
4839 | msg->flag = dev->wmode; | |
4840 | pci_map_sg(dev->pdev, sgl, cmd->nsegs, PCI_DMA_TODEVICE); | |
4841 | } | |
4842 | ||
4843 | sge = msg->sge; | |
4844 | for (i=0; i<cmd->nsegs; i++) { | |
4845 | sge->block = block; | |
4846 | sge->length = sg_dma_len(sgl) >> 9; | |
4847 | sge->buf = sg_dma_address(sgl); | |
4848 | ||
4849 | block += sge->length; | |
4850 | sgl++; | |
4851 | sge++; | |
4852 | } | |
4853 | ||
4854 | msg->tag = tag; | |
4855 | ||
4856 | #ifdef SSD_OT_PROTECT | |
4857 | if (unlikely(dev->ot_delay > 0 && dev->ot_protect != 0)) { | |
4858 | msleep_interruptible(dev->ot_delay); | |
4859 | } | |
4860 | #endif | |
4861 | ||
4862 | ssd_start_io_acct(cmd); | |
4863 | dev->scmd(cmd); | |
4864 | ||
4865 | return 0; | |
4866 | } | |
4867 | ||
4868 | /* threads */ | |
4869 | static int ssd_done_thread(void *data) | |
4870 | { | |
4871 | struct ssd_device *dev; | |
4872 | struct bio *bio; | |
4873 | struct bio *next; | |
361ebed5 HSDT |
4874 | |
4875 | if (!data) { | |
4876 | return -EINVAL; | |
4877 | } | |
4878 | dev = data; | |
4879 | ||
1197134c | 4880 | current->flags |= PF_NOFREEZE; |
361ebed5 HSDT |
4881 | //set_user_nice(current, -5); |
4882 | ||
4883 | while (!kthread_should_stop()) { | |
4884 | wait_event_interruptible(dev->done_waitq, (atomic_read(&dev->in_doneq) || kthread_should_stop())); | |
4885 | ||
4886 | while (atomic_read(&dev->in_doneq)) { | |
4887 | if (threaded_irq) { | |
4888 | spin_lock(&dev->doneq_lock); | |
4889 | bio = ssd_blist_get(&dev->doneq); | |
4890 | spin_unlock(&dev->doneq_lock); | |
4891 | } else { | |
4892 | spin_lock_irq(&dev->doneq_lock); | |
4893 | bio = ssd_blist_get(&dev->doneq); | |
4894 | spin_unlock_irq(&dev->doneq_lock); | |
4895 | } | |
4896 | ||
4897 | while (bio) { | |
4898 | next = bio->bi_next; | |
4899 | bio->bi_next = NULL; | |
1197134c | 4900 | ssd_bio_endio(bio, 0); |
361ebed5 HSDT |
4901 | atomic_dec(&dev->in_doneq); |
4902 | bio = next; | |
4903 | } | |
4904 | ||
4905 | cond_resched(); | |
4906 | ||
4907 | #ifdef SSD_ESCAPE_IRQ | |
4908 | if (unlikely(smp_processor_id() == dev->irq_cpu)) { | |
4909 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28)) | |
aa14a110 | 4910 | cpumask_var_t new_mask; |
1197134c KM |
4911 | if (alloc_cpumask_var(&new_mask, GFP_ATOMIC)) { |
4912 | cpumask_setall(new_mask); | |
4913 | cpumask_clear_cpu(dev->irq_cpu, new_mask); | |
4914 | set_cpus_allowed_ptr(current, new_mask); | |
4915 | free_cpumask_var(new_mask); | |
4916 | } | |
361ebed5 | 4917 | #else |
aa14a110 | 4918 | cpumask_t new_mask; |
361ebed5 HSDT |
4919 | cpus_setall(new_mask); |
4920 | cpu_clear(dev->irq_cpu, new_mask); | |
4921 | set_cpus_allowed(current, new_mask); | |
4922 | #endif | |
4923 | } | |
4924 | #endif | |
4925 | } | |
4926 | } | |
4927 | return 0; | |
4928 | } | |
4929 | ||
4930 | static int ssd_send_thread(void *data) | |
4931 | { | |
4932 | struct ssd_device *dev; | |
4933 | struct bio *bio; | |
4934 | struct bio *next; | |
361ebed5 HSDT |
4935 | |
4936 | if (!data) { | |
4937 | return -EINVAL; | |
4938 | } | |
4939 | dev = data; | |
4940 | ||
1197134c | 4941 | current->flags |= PF_NOFREEZE; |
361ebed5 HSDT |
4942 | //set_user_nice(current, -5); |
4943 | ||
4944 | while (!kthread_should_stop()) { | |
4945 | wait_event_interruptible(dev->send_waitq, (atomic_read(&dev->in_sendq) || kthread_should_stop())); | |
4946 | ||
4947 | while (atomic_read(&dev->in_sendq)) { | |
4948 | spin_lock(&dev->sendq_lock); | |
4949 | bio = ssd_blist_get(&dev->sendq); | |
4950 | spin_unlock(&dev->sendq_lock); | |
4951 | ||
4952 | while (bio) { | |
4953 | next = bio->bi_next; | |
4954 | bio->bi_next = NULL; | |
4955 | #ifdef SSD_QUEUE_PBIO | |
4956 | if (test_and_clear_bit(BIO_SSD_PBIO, &bio->bi_flags)) { | |
4957 | __ssd_submit_pbio(dev, bio, 1); | |
4958 | } else { | |
4959 | ssd_submit_bio(dev, bio, 1); | |
4960 | } | |
4961 | #else | |
4962 | ssd_submit_bio(dev, bio, 1); | |
4963 | #endif | |
4964 | atomic_dec(&dev->in_sendq); | |
4965 | bio = next; | |
4966 | } | |
4967 | ||
4968 | cond_resched(); | |
4969 | ||
4970 | #ifdef SSD_ESCAPE_IRQ | |
4971 | if (unlikely(smp_processor_id() == dev->irq_cpu)) { | |
4972 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28)) | |
aa14a110 | 4973 | cpumask_var_t new_mask; |
1197134c KM |
4974 | if (alloc_cpumask_var(&new_mask, GFP_ATOMIC)) { |
4975 | cpumask_setall(new_mask); | |
4976 | cpumask_clear_cpu(dev->irq_cpu, new_mask); | |
4977 | set_cpus_allowed_ptr(current, new_mask); | |
4978 | free_cpumask_var(new_mask); | |
4979 | } | |
361ebed5 | 4980 | #else |
aa14a110 | 4981 | cpumask_t new_mask; |
361ebed5 HSDT |
4982 | cpus_setall(new_mask); |
4983 | cpu_clear(dev->irq_cpu, new_mask); | |
4984 | set_cpus_allowed(current, new_mask); | |
4985 | #endif | |
4986 | } | |
4987 | #endif | |
4988 | } | |
4989 | } | |
4990 | ||
4991 | return 0; | |
4992 | } | |
4993 | ||
4994 | static void ssd_cleanup_thread(struct ssd_device *dev) | |
4995 | { | |
4996 | kthread_stop(dev->send_thread); | |
4997 | kthread_stop(dev->done_thread); | |
4998 | } | |
4999 | ||
5000 | static int ssd_init_thread(struct ssd_device *dev) | |
5001 | { | |
5002 | int ret; | |
5003 | ||
5004 | atomic_set(&dev->in_doneq, 0); | |
5005 | atomic_set(&dev->in_sendq, 0); | |
5006 | ||
5007 | spin_lock_init(&dev->doneq_lock); | |
5008 | spin_lock_init(&dev->sendq_lock); | |
5009 | ||
5010 | ssd_blist_init(&dev->doneq); | |
5011 | ssd_blist_init(&dev->sendq); | |
5012 | ||
5013 | init_waitqueue_head(&dev->done_waitq); | |
5014 | init_waitqueue_head(&dev->send_waitq); | |
5015 | ||
5016 | dev->done_thread = kthread_run(ssd_done_thread, dev, "%s/d", dev->name); | |
5017 | if (IS_ERR(dev->done_thread)) { | |
5018 | ret = PTR_ERR(dev->done_thread); | |
5019 | goto out_done_thread; | |
5020 | } | |
5021 | ||
5022 | dev->send_thread = kthread_run(ssd_send_thread, dev, "%s/s", dev->name); | |
5023 | if (IS_ERR(dev->send_thread)) { | |
5024 | ret = PTR_ERR(dev->send_thread); | |
5025 | goto out_send_thread; | |
5026 | } | |
5027 | ||
5028 | return 0; | |
5029 | ||
5030 | out_send_thread: | |
5031 | kthread_stop(dev->done_thread); | |
5032 | out_done_thread: | |
5033 | return ret; | |
5034 | } | |
5035 | ||
5036 | /* dcmd pool */ | |
5037 | static void ssd_put_dcmd(struct ssd_dcmd *dcmd) | |
5038 | { | |
5039 | struct ssd_device *dev = (struct ssd_device *)dcmd->dev; | |
5040 | ||
5041 | spin_lock(&dev->dcmd_lock); | |
5042 | list_add_tail(&dcmd->list, &dev->dcmd_list); | |
5043 | spin_unlock(&dev->dcmd_lock); | |
5044 | } | |
5045 | ||
5046 | static struct ssd_dcmd *ssd_get_dcmd(struct ssd_device *dev) | |
5047 | { | |
5048 | struct ssd_dcmd *dcmd = NULL; | |
5049 | ||
5050 | spin_lock(&dev->dcmd_lock); | |
5051 | if (!list_empty(&dev->dcmd_list)) { | |
5052 | dcmd = list_entry(dev->dcmd_list.next, | |
5053 | struct ssd_dcmd, list); | |
5054 | list_del_init(&dcmd->list); | |
5055 | } | |
5056 | spin_unlock(&dev->dcmd_lock); | |
5057 | ||
5058 | return dcmd; | |
5059 | } | |
5060 | ||
5061 | static void ssd_cleanup_dcmd(struct ssd_device *dev) | |
5062 | { | |
5063 | kfree(dev->dcmd); | |
5064 | } | |
5065 | ||
5066 | static int ssd_init_dcmd(struct ssd_device *dev) | |
5067 | { | |
5068 | struct ssd_dcmd *dcmd; | |
5069 | int dcmd_sz = sizeof(struct ssd_dcmd)*dev->hw_info.cmd_fifo_sz; | |
5070 | int i; | |
5071 | ||
5072 | spin_lock_init(&dev->dcmd_lock); | |
5073 | INIT_LIST_HEAD(&dev->dcmd_list); | |
5074 | init_waitqueue_head(&dev->dcmd_wq); | |
5075 | ||
5076 | dev->dcmd = kmalloc(dcmd_sz, GFP_KERNEL); | |
5077 | if (!dev->dcmd) { | |
5078 | hio_warn("%s: can not alloc dcmd\n", dev->name); | |
5079 | goto out_alloc_dcmd; | |
5080 | } | |
5081 | memset(dev->dcmd, 0, dcmd_sz); | |
5082 | ||
5083 | for (i=0, dcmd=dev->dcmd; i<(int)dev->hw_info.cmd_fifo_sz; i++, dcmd++) { | |
5084 | dcmd->dev = dev; | |
5085 | INIT_LIST_HEAD(&dcmd->list); | |
5086 | list_add_tail(&dcmd->list, &dev->dcmd_list); | |
5087 | } | |
5088 | ||
5089 | return 0; | |
5090 | ||
5091 | out_alloc_dcmd: | |
5092 | return -ENOMEM; | |
5093 | } | |
5094 | ||
5095 | static void ssd_put_dmsg(void *msg) | |
5096 | { | |
5097 | struct ssd_dcmd *dcmd = container_of(msg, struct ssd_dcmd, msg); | |
5098 | struct ssd_device *dev = (struct ssd_device *)dcmd->dev; | |
5099 | ||
5100 | memset(dcmd->msg, 0, SSD_DCMD_MAX_SZ); | |
5101 | ssd_put_dcmd(dcmd); | |
5102 | wake_up(&dev->dcmd_wq); | |
5103 | } | |
5104 | ||
5105 | static void *ssd_get_dmsg(struct ssd_device *dev) | |
5106 | { | |
5107 | struct ssd_dcmd *dcmd = ssd_get_dcmd(dev); | |
5108 | ||
5109 | while (!dcmd) { | |
5110 | DEFINE_WAIT(wait); | |
5111 | prepare_to_wait_exclusive(&dev->dcmd_wq, &wait, TASK_UNINTERRUPTIBLE); | |
5112 | schedule(); | |
5113 | ||
5114 | dcmd = ssd_get_dcmd(dev); | |
5115 | ||
5116 | finish_wait(&dev->dcmd_wq, &wait); | |
5117 | } | |
5118 | return dcmd->msg; | |
5119 | } | |
5120 | ||
5121 | /* do direct cmd */ | |
5122 | static int ssd_do_request(struct ssd_device *dev, int rw, void *msg, int *done) | |
5123 | { | |
5124 | DECLARE_COMPLETION(wait); | |
5125 | struct ssd_cmd *cmd; | |
5126 | int tag; | |
5127 | int ret = 0; | |
5128 | ||
5129 | tag = ssd_get_tag(dev, 1); | |
5130 | if (tag < 0) { | |
5131 | return -EBUSY; | |
5132 | } | |
5133 | ||
5134 | cmd = &dev->cmd[tag]; | |
5135 | cmd->nsegs = 1; | |
5136 | memcpy(cmd->msg, msg, SSD_DCMD_MAX_SZ); | |
5137 | ((struct ssd_rw_msg *)cmd->msg)->tag = tag; | |
5138 | ||
5139 | cmd->waiting = &wait; | |
5140 | ||
5141 | dev->scmd(cmd); | |
5142 | ||
5143 | wait_for_completion(cmd->waiting); | |
5144 | cmd->waiting = NULL; | |
5145 | ||
5146 | if (cmd->errors == -ETIMEDOUT) { | |
5147 | ret = cmd->errors; | |
5148 | } else if (cmd->errors) { | |
5149 | ret = -EIO; | |
5150 | } | |
5151 | ||
5152 | if (done != NULL) { | |
5153 | *done = cmd->nr_log; | |
5154 | } | |
5155 | ssd_put_tag(dev, cmd->tag); | |
5156 | ||
5157 | return ret; | |
5158 | } | |
5159 | ||
5160 | static int ssd_do_barrier_request(struct ssd_device *dev, int rw, void *msg, int *done) | |
5161 | { | |
5162 | DECLARE_COMPLETION(wait); | |
5163 | struct ssd_cmd *cmd; | |
5164 | int tag; | |
5165 | int ret = 0; | |
5166 | ||
5167 | tag = ssd_barrier_get_tag(dev); | |
5168 | if (tag < 0) { | |
5169 | return -EBUSY; | |
5170 | } | |
5171 | ||
5172 | cmd = &dev->cmd[tag]; | |
5173 | cmd->nsegs = 1; | |
5174 | memcpy(cmd->msg, msg, SSD_DCMD_MAX_SZ); | |
5175 | ((struct ssd_rw_msg *)cmd->msg)->tag = tag; | |
5176 | ||
5177 | cmd->waiting = &wait; | |
5178 | ||
5179 | dev->scmd(cmd); | |
5180 | ||
5181 | wait_for_completion(cmd->waiting); | |
5182 | cmd->waiting = NULL; | |
5183 | ||
5184 | if (cmd->errors == -ETIMEDOUT) { | |
5185 | ret = cmd->errors; | |
5186 | } else if (cmd->errors) { | |
5187 | ret = -EIO; | |
5188 | } | |
5189 | ||
5190 | if (done != NULL) { | |
5191 | *done = cmd->nr_log; | |
5192 | } | |
5193 | ssd_barrier_put_tag(dev, cmd->tag); | |
5194 | ||
5195 | return ret; | |
5196 | } | |
5197 | ||
5198 | #ifdef SSD_OT_PROTECT | |
5199 | static void ssd_check_temperature(struct ssd_device *dev, int temp) | |
5200 | { | |
5201 | uint64_t val; | |
5202 | uint32_t off; | |
5203 | int cur; | |
5204 | int i; | |
5205 | ||
5206 | if (mode != SSD_DRV_MODE_STANDARD) { | |
5207 | return; | |
5208 | } | |
5209 | ||
5210 | if (dev->protocol_info.ver <= SSD_PROTOCOL_V3) { | |
5211 | } | |
5212 | ||
5213 | for (i=0; i<dev->hw_info.nr_ctrl; i++) { | |
5214 | off = SSD_CTRL_TEMP_REG0 + i * sizeof(uint64_t); | |
5215 | ||
5216 | val = ssd_reg_read(dev->ctrlp + off); | |
5217 | if (val == 0xffffffffffffffffull) { | |
5218 | continue; | |
5219 | } | |
5220 | ||
5221 | cur = (int)CUR_TEMP(val); | |
5222 | if (cur >= temp) { | |
5223 | if (!test_and_set_bit(SSD_HWMON_TEMP(SSD_TEMP_CTRL), &dev->hwmon)) { | |
5224 | if (dev->protocol_info.ver > SSD_PROTOCOL_V3 && dev->protocol_info.ver < SSD_PROTOCOL_V3_2_2) { | |
5225 | hio_warn("%s: Over temperature, please check the fans.\n", dev->name); | |
5226 | dev->ot_delay = SSD_OT_DELAY; | |
5227 | } | |
5228 | } | |
5229 | return; | |
5230 | } | |
5231 | } | |
5232 | ||
5233 | if (test_and_clear_bit(SSD_HWMON_TEMP(SSD_TEMP_CTRL), &dev->hwmon)) { | |
5234 | if (dev->protocol_info.ver > SSD_PROTOCOL_V3 && dev->protocol_info.ver < SSD_PROTOCOL_V3_2_2) { | |
5235 | hio_warn("%s: Temperature is OK.\n", dev->name); | |
5236 | dev->ot_delay = 0; | |
5237 | } | |
5238 | } | |
5239 | } | |
5240 | #endif | |
5241 | ||
5242 | static int ssd_get_ot_status(struct ssd_device *dev, int *status) | |
5243 | { | |
5244 | uint32_t off; | |
5245 | uint32_t val; | |
5246 | int i; | |
5247 | ||
5248 | if (!dev || !status) { | |
5249 | return -EINVAL; | |
5250 | } | |
5251 | ||
5252 | if (dev->protocol_info.ver >= SSD_PROTOCOL_V3_2_2) { | |
5253 | for (i=0; i<dev->hw_info.nr_ctrl; i++) { | |
5254 | off = SSD_READ_OT_REG0 + (i * SSD_CTRL_REG_ZONE_SZ); | |
5255 | val = ssd_reg32_read(dev->ctrlp + off); | |
5256 | if ((val >> 22) & 0x1) { | |
5257 | *status = 1; | |
5258 | goto out; | |
5259 | } | |
5260 | ||
5261 | ||
5262 | off = SSD_WRITE_OT_REG0 + (i * SSD_CTRL_REG_ZONE_SZ); | |
5263 | val = ssd_reg32_read(dev->ctrlp + off); | |
5264 | if ((val >> 22) & 0x1) { | |
5265 | *status = 1; | |
5266 | goto out; | |
5267 | } | |
5268 | } | |
5269 | } else { | |
5270 | *status = !!dev->ot_delay; | |
5271 | } | |
5272 | ||
5273 | out: | |
5274 | return 0; | |
5275 | } | |
5276 | ||
5277 | static void ssd_set_ot_protect(struct ssd_device *dev, int protect) | |
5278 | { | |
5279 | uint32_t off; | |
5280 | uint32_t val; | |
5281 | int i; | |
5282 | ||
5283 | mutex_lock(&dev->fw_mutex); | |
5284 | ||
5285 | dev->ot_protect = !!protect; | |
5286 | ||
5287 | if (dev->protocol_info.ver >= SSD_PROTOCOL_V3_2_2) { | |
5288 | for (i=0; i<dev->hw_info.nr_ctrl; i++) { | |
5289 | off = SSD_READ_OT_REG0 + (i * SSD_CTRL_REG_ZONE_SZ); | |
5290 | val = ssd_reg32_read(dev->ctrlp + off); | |
5291 | if (dev->ot_protect) { | |
5292 | val |= (1U << 21); | |
5293 | } else { | |
5294 | val &= ~(1U << 21); | |
5295 | } | |
5296 | ssd_reg32_write(dev->ctrlp + off, val); | |
5297 | ||
5298 | ||
5299 | off = SSD_WRITE_OT_REG0 + (i * SSD_CTRL_REG_ZONE_SZ); | |
5300 | val = ssd_reg32_read(dev->ctrlp + off); | |
5301 | if (dev->ot_protect) { | |
5302 | val |= (1U << 21); | |
5303 | } else { | |
5304 | val &= ~(1U << 21); | |
5305 | } | |
5306 | ssd_reg32_write(dev->ctrlp + off, val); | |
5307 | } | |
5308 | } | |
5309 | ||
5310 | mutex_unlock(&dev->fw_mutex); | |
5311 | } | |
5312 | ||
5313 | static int ssd_init_ot_protect(struct ssd_device *dev) | |
5314 | { | |
5315 | ssd_set_ot_protect(dev, ot_protect); | |
5316 | ||
5317 | #ifdef SSD_OT_PROTECT | |
5318 | ssd_check_temperature(dev, SSD_OT_TEMP); | |
5319 | #endif | |
5320 | ||
5321 | return 0; | |
5322 | } | |
5323 | ||
5324 | /* log */ | |
5325 | static int ssd_read_log(struct ssd_device *dev, int ctrl_idx, void *buf, int *nr_log) | |
5326 | { | |
5327 | struct ssd_log_op_msg *msg; | |
5328 | struct ssd_log_msg *lmsg; | |
5329 | dma_addr_t buf_dma; | |
5330 | size_t length = dev->hw_info.log_sz; | |
5331 | int ret = 0; | |
5332 | ||
5333 | if (ctrl_idx >= dev->hw_info.nr_ctrl) { | |
5334 | return -EINVAL; | |
5335 | } | |
5336 | ||
5337 | buf_dma = pci_map_single(dev->pdev, buf, length, PCI_DMA_FROMDEVICE); | |
5338 | #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,26)) | |
5339 | ret = dma_mapping_error(buf_dma); | |
5340 | #else | |
5341 | ret = dma_mapping_error(&(dev->pdev->dev), buf_dma); | |
5342 | #endif | |
5343 | if (ret) { | |
5344 | hio_warn("%s: unable to map read DMA buffer\n", dev->name); | |
5345 | goto out_dma_mapping; | |
5346 | } | |
5347 | ||
5348 | msg = (struct ssd_log_op_msg *)ssd_get_dmsg(dev); | |
5349 | ||
5350 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3) { | |
5351 | lmsg = (struct ssd_log_msg *)msg; | |
5352 | lmsg->fun = SSD_FUNC_READ_LOG; | |
5353 | lmsg->ctrl_idx = ctrl_idx; | |
5354 | lmsg->buf = buf_dma; | |
5355 | } else { | |
5356 | msg->fun = SSD_FUNC_READ_LOG; | |
5357 | msg->ctrl_idx = ctrl_idx; | |
5358 | msg->buf = buf_dma; | |
5359 | } | |
5360 | ||
5361 | ret = ssd_do_request(dev, READ, msg, nr_log); | |
5362 | ssd_put_dmsg(msg); | |
5363 | ||
5364 | pci_unmap_single(dev->pdev, buf_dma, length, PCI_DMA_FROMDEVICE); | |
5365 | ||
5366 | out_dma_mapping: | |
5367 | return ret; | |
5368 | } | |
5369 | ||
5370 | #define SSD_LOG_PRINT_BUF_SZ 256 | |
5371 | static int ssd_parse_log(struct ssd_device *dev, struct ssd_log *log, int print) | |
5372 | { | |
5373 | struct ssd_log_desc *log_desc = ssd_log_desc; | |
5374 | struct ssd_log_entry *le; | |
5375 | char *sn = NULL; | |
5376 | char print_buf[SSD_LOG_PRINT_BUF_SZ]; | |
5377 | int print_len; | |
5378 | ||
5379 | le = &log->le; | |
5380 | ||
5381 | /* find desc */ | |
5382 | while (log_desc->event != SSD_UNKNOWN_EVENT) { | |
5383 | if (log_desc->event == le->event) { | |
5384 | break; | |
5385 | } | |
5386 | log_desc++; | |
5387 | } | |
5388 | ||
5389 | if (!print) { | |
5390 | goto out; | |
5391 | } | |
5392 | ||
5393 | if (log_desc->level < log_level) { | |
5394 | goto out; | |
5395 | } | |
5396 | ||
5397 | /* parse */ | |
5398 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_2) { | |
5399 | sn = dev->label.sn; | |
5400 | } else { | |
5401 | sn = dev->labelv3.barcode; | |
5402 | } | |
5403 | ||
5404 | print_len = snprintf(print_buf, SSD_LOG_PRINT_BUF_SZ, "%s (%s): <%#x>", dev->name, sn, le->event); | |
5405 | ||
5406 | if (log->ctrl_idx != SSD_LOG_SW_IDX) { | |
5407 | print_len += snprintf((print_buf + print_len), (SSD_LOG_PRINT_BUF_SZ - print_len), " controller %d", log->ctrl_idx); | |
5408 | } | |
5409 | ||
5410 | switch (log_desc->data) { | |
5411 | case SSD_LOG_DATA_NONE: | |
5412 | break; | |
5413 | case SSD_LOG_DATA_LOC: | |
5414 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_2) { | |
5415 | print_len += snprintf((print_buf + print_len), (SSD_LOG_PRINT_BUF_SZ - print_len), " flash %d", le->data.loc.flash); | |
5416 | if (log_desc->sblock) { | |
5417 | print_len += snprintf((print_buf + print_len), (SSD_LOG_PRINT_BUF_SZ - print_len), " block %d", le->data.loc.block); | |
5418 | } | |
5419 | if (log_desc->spage) { | |
5420 | print_len += snprintf((print_buf + print_len), (SSD_LOG_PRINT_BUF_SZ - print_len), " page %d", le->data.loc.page); | |
5421 | } | |
5422 | } else { | |
5423 | print_len += snprintf((print_buf + print_len), (SSD_LOG_PRINT_BUF_SZ - print_len), " flash %d", le->data.loc1.flash); | |
5424 | if (log_desc->sblock) { | |
5425 | print_len += snprintf((print_buf + print_len), (SSD_LOG_PRINT_BUF_SZ - print_len), " block %d", le->data.loc1.block); | |
5426 | } | |
5427 | if (log_desc->spage) { | |
5428 | print_len += snprintf((print_buf + print_len), (SSD_LOG_PRINT_BUF_SZ - print_len), " page %d", le->data.loc1.page); | |
5429 | } | |
5430 | } | |
5431 | break; | |
5432 | case SSD_LOG_DATA_HEX: | |
5433 | print_len += snprintf((print_buf + print_len), (SSD_LOG_PRINT_BUF_SZ - print_len), " info %#x", le->data.val); | |
5434 | break; | |
5435 | default: | |
5436 | break; | |
5437 | } | |
5438 | /*print_len += */snprintf((print_buf + print_len), (SSD_LOG_PRINT_BUF_SZ - print_len), ": %s", log_desc->desc); | |
5439 | ||
5440 | switch (log_desc->level) { | |
5441 | case SSD_LOG_LEVEL_INFO: | |
5442 | hio_info("%s\n", print_buf); | |
5443 | break; | |
5444 | case SSD_LOG_LEVEL_NOTICE: | |
5445 | hio_note("%s\n", print_buf); | |
5446 | break; | |
5447 | case SSD_LOG_LEVEL_WARNING: | |
5448 | hio_warn("%s\n", print_buf); | |
5449 | break; | |
5450 | case SSD_LOG_LEVEL_ERR: | |
5451 | hio_err("%s\n", print_buf); | |
5452 | //printk(KERN_ERR MODULE_NAME": some exception occurred, please check the data or refer to FAQ."); | |
5453 | break; | |
5454 | default: | |
5455 | hio_warn("%s\n", print_buf); | |
5456 | break; | |
5457 | } | |
5458 | ||
5459 | out: | |
5460 | return log_desc->level; | |
5461 | } | |
5462 | ||
5463 | static int ssd_bm_get_sfstatus(struct ssd_device *dev, uint16_t *status); | |
5464 | static int ssd_switch_wmode(struct ssd_device *dev, int wmode); | |
5465 | ||
5466 | ||
5467 | static int ssd_handle_event(struct ssd_device *dev, uint16_t event, int level) | |
5468 | { | |
5469 | int ret = 0; | |
5470 | ||
5471 | switch (event) { | |
5472 | case SSD_LOG_OVER_TEMP: { | |
5473 | #ifdef SSD_OT_PROTECT | |
5474 | if (!test_and_set_bit(SSD_HWMON_TEMP(SSD_TEMP_CTRL), &dev->hwmon)) { | |
5475 | if (dev->protocol_info.ver > SSD_PROTOCOL_V3 && dev->protocol_info.ver < SSD_PROTOCOL_V3_2_2) { | |
5476 | hio_warn("%s: Over temperature, please check the fans.\n", dev->name); | |
5477 | dev->ot_delay = SSD_OT_DELAY; | |
5478 | } | |
5479 | } | |
5480 | #endif | |
5481 | break; | |
5482 | } | |
5483 | ||
5484 | case SSD_LOG_NORMAL_TEMP: { | |
5485 | #ifdef SSD_OT_PROTECT | |
5486 | /* need to check all controller's temperature */ | |
5487 | ssd_check_temperature(dev, SSD_OT_TEMP_HYST); | |
5488 | #endif | |
5489 | break; | |
5490 | } | |
5491 | ||
5492 | case SSD_LOG_BATTERY_FAULT: { | |
5493 | uint16_t sfstatus; | |
5494 | ||
5495 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_2) { | |
5496 | if (!ssd_bm_get_sfstatus(dev, &sfstatus)) { | |
5497 | ssd_gen_swlog(dev, SSD_LOG_BM_SFSTATUS, sfstatus); | |
5498 | } | |
5499 | } | |
5500 | ||
5501 | if (!test_and_set_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP), &dev->hwmon)) { | |
5502 | ssd_switch_wmode(dev, dev->user_wmode); | |
5503 | } | |
5504 | break; | |
5505 | } | |
5506 | ||
5507 | case SSD_LOG_BATTERY_OK: { | |
5508 | if (test_and_clear_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP), &dev->hwmon)) { | |
5509 | ssd_switch_wmode(dev, dev->user_wmode); | |
5510 | } | |
5511 | break; | |
5512 | } | |
5513 | ||
5514 | case SSD_LOG_BOARD_VOLT_FAULT: { | |
5515 | ssd_mon_boardvolt(dev); | |
5516 | break; | |
5517 | } | |
5518 | ||
5519 | case SSD_LOG_CLEAR_LOG: { | |
5520 | /* update smart */ | |
5521 | memset(&dev->smart.log_info, 0, sizeof(struct ssd_log_info)); | |
5522 | break; | |
5523 | } | |
5524 | ||
5525 | case SSD_LOG_CAP_VOLT_FAULT: | |
5526 | case SSD_LOG_CAP_LEARN_FAULT: | |
5527 | case SSD_LOG_CAP_SHORT_CIRCUIT: { | |
5528 | if (!test_and_set_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP), &dev->hwmon)) { | |
5529 | ssd_switch_wmode(dev, dev->user_wmode); | |
5530 | } | |
5531 | break; | |
5532 | } | |
5533 | ||
5534 | default: | |
5535 | break; | |
5536 | } | |
5537 | ||
5538 | /* ssd event call */ | |
5539 | if (dev->event_call) { | |
5540 | dev->event_call(dev->gd, event, level); | |
5541 | ||
5542 | /* FIXME */ | |
5543 | if (SSD_LOG_CAP_VOLT_FAULT == event || SSD_LOG_CAP_LEARN_FAULT == event || SSD_LOG_CAP_SHORT_CIRCUIT == event) { | |
5544 | dev->event_call(dev->gd, SSD_LOG_BATTERY_FAULT, level); | |
5545 | } | |
5546 | } | |
5547 | ||
5548 | return ret; | |
5549 | } | |
5550 | ||
5551 | static int ssd_save_log(struct ssd_device *dev, struct ssd_log *log) | |
5552 | { | |
5553 | uint32_t off, size; | |
5554 | void *internal_log; | |
5555 | int ret = 0; | |
5556 | ||
5557 | mutex_lock(&dev->internal_log_mutex); | |
5558 | ||
5559 | size = sizeof(struct ssd_log); | |
5560 | off = dev->internal_log.nr_log * size; | |
5561 | ||
5562 | if (off == dev->rom_info.log_sz) { | |
5563 | if (dev->internal_log.nr_log == dev->smart.log_info.nr_log) { | |
5564 | hio_warn("%s: internal log is full\n", dev->name); | |
5565 | } | |
5566 | goto out; | |
5567 | } | |
5568 | ||
5569 | internal_log = dev->internal_log.log + off; | |
5570 | memcpy(internal_log, log, size); | |
5571 | ||
5572 | if (dev->protocol_info.ver > SSD_PROTOCOL_V3) { | |
5573 | off += dev->rom_info.log_base; | |
5574 | ||
5575 | ret = ssd_spi_write(dev, log, off, size); | |
5576 | if (ret) { | |
5577 | goto out; | |
5578 | } | |
5579 | } | |
5580 | ||
5581 | dev->internal_log.nr_log++; | |
5582 | ||
5583 | out: | |
5584 | mutex_unlock(&dev->internal_log_mutex); | |
5585 | return ret; | |
5586 | } | |
5587 | ||
da3355df SF |
5588 | /** CRC table for the CRC-16. The poly is 0x8005 (x^16 + x^15 + x^2 + 1) */ |
5589 | static unsigned short const crc16_table[256] = { | |
5590 | 0x0000, 0xC0C1, 0xC181, 0x0140, 0xC301, 0x03C0, 0x0280, 0xC241, | |
5591 | 0xC601, 0x06C0, 0x0780, 0xC741, 0x0500, 0xC5C1, 0xC481, 0x0440, | |
5592 | 0xCC01, 0x0CC0, 0x0D80, 0xCD41, 0x0F00, 0xCFC1, 0xCE81, 0x0E40, | |
5593 | 0x0A00, 0xCAC1, 0xCB81, 0x0B40, 0xC901, 0x09C0, 0x0880, 0xC841, | |
5594 | 0xD801, 0x18C0, 0x1980, 0xD941, 0x1B00, 0xDBC1, 0xDA81, 0x1A40, | |
5595 | 0x1E00, 0xDEC1, 0xDF81, 0x1F40, 0xDD01, 0x1DC0, 0x1C80, 0xDC41, | |
5596 | 0x1400, 0xD4C1, 0xD581, 0x1540, 0xD701, 0x17C0, 0x1680, 0xD641, | |
5597 | 0xD201, 0x12C0, 0x1380, 0xD341, 0x1100, 0xD1C1, 0xD081, 0x1040, | |
5598 | 0xF001, 0x30C0, 0x3180, 0xF141, 0x3300, 0xF3C1, 0xF281, 0x3240, | |
5599 | 0x3600, 0xF6C1, 0xF781, 0x3740, 0xF501, 0x35C0, 0x3480, 0xF441, | |
5600 | 0x3C00, 0xFCC1, 0xFD81, 0x3D40, 0xFF01, 0x3FC0, 0x3E80, 0xFE41, | |
5601 | 0xFA01, 0x3AC0, 0x3B80, 0xFB41, 0x3900, 0xF9C1, 0xF881, 0x3840, | |
5602 | 0x2800, 0xE8C1, 0xE981, 0x2940, 0xEB01, 0x2BC0, 0x2A80, 0xEA41, | |
5603 | 0xEE01, 0x2EC0, 0x2F80, 0xEF41, 0x2D00, 0xEDC1, 0xEC81, 0x2C40, | |
5604 | 0xE401, 0x24C0, 0x2580, 0xE541, 0x2700, 0xE7C1, 0xE681, 0x2640, | |
5605 | 0x2200, 0xE2C1, 0xE381, 0x2340, 0xE101, 0x21C0, 0x2080, 0xE041, | |
5606 | 0xA001, 0x60C0, 0x6180, 0xA141, 0x6300, 0xA3C1, 0xA281, 0x6240, | |
5607 | 0x6600, 0xA6C1, 0xA781, 0x6740, 0xA501, 0x65C0, 0x6480, 0xA441, | |
5608 | 0x6C00, 0xACC1, 0xAD81, 0x6D40, 0xAF01, 0x6FC0, 0x6E80, 0xAE41, | |
5609 | 0xAA01, 0x6AC0, 0x6B80, 0xAB41, 0x6900, 0xA9C1, 0xA881, 0x6840, | |
5610 | 0x7800, 0xB8C1, 0xB981, 0x7940, 0xBB01, 0x7BC0, 0x7A80, 0xBA41, | |
5611 | 0xBE01, 0x7EC0, 0x7F80, 0xBF41, 0x7D00, 0xBDC1, 0xBC81, 0x7C40, | |
5612 | 0xB401, 0x74C0, 0x7580, 0xB541, 0x7700, 0xB7C1, 0xB681, 0x7640, | |
5613 | 0x7200, 0xB2C1, 0xB381, 0x7340, 0xB101, 0x71C0, 0x7080, 0xB041, | |
5614 | 0x5000, 0x90C1, 0x9181, 0x5140, 0x9301, 0x53C0, 0x5280, 0x9241, | |
5615 | 0x9601, 0x56C0, 0x5780, 0x9741, 0x5500, 0x95C1, 0x9481, 0x5440, | |
5616 | 0x9C01, 0x5CC0, 0x5D80, 0x9D41, 0x5F00, 0x9FC1, 0x9E81, 0x5E40, | |
5617 | 0x5A00, 0x9AC1, 0x9B81, 0x5B40, 0x9901, 0x59C0, 0x5880, 0x9841, | |
5618 | 0x8801, 0x48C0, 0x4980, 0x8941, 0x4B00, 0x8BC1, 0x8A81, 0x4A40, | |
5619 | 0x4E00, 0x8EC1, 0x8F81, 0x4F40, 0x8D01, 0x4DC0, 0x4C80, 0x8C41, | |
5620 | 0x4400, 0x84C1, 0x8581, 0x4540, 0x8701, 0x47C0, 0x4680, 0x8641, | |
5621 | 0x8201, 0x42C0, 0x4380, 0x8341, 0x4100, 0x81C1, 0x8081, 0x4040 | |
5622 | }; | |
5623 | ||
5624 | static unsigned short crc16_byte(unsigned short crc, const unsigned char data) | |
5625 | { | |
5626 | return (crc >> 8) ^ crc16_table[(crc ^ data) & 0xff]; | |
5627 | } | |
5628 | /** | |
5629 | * crc16 - compute the CRC-16 for the data buffer | |
5630 | * @crc: previous CRC value | |
5631 | * @buffer: data pointer | |
5632 | * @len: number of bytes in the buffer | |
5633 | * | |
5634 | * Returns the updated CRC value. | |
5635 | */ | |
5636 | static unsigned short crc16(unsigned short crc, unsigned char const *buffer, int len) | |
5637 | { | |
5638 | while (len--) | |
5639 | crc = crc16_byte(crc, *buffer++); | |
5640 | return crc; | |
5641 | } | |
5642 | ||
361ebed5 HSDT |
5643 | static int ssd_save_swlog(struct ssd_device *dev, uint16_t event, uint32_t data) |
5644 | { | |
5645 | struct ssd_log log; | |
361ebed5 HSDT |
5646 | int level; |
5647 | int ret = 0; | |
5648 | ||
5649 | if (unlikely(mode != SSD_DRV_MODE_STANDARD)) | |
5650 | return 0; | |
5651 | ||
5652 | memset(&log, 0, sizeof(struct ssd_log)); | |
5653 | ||
361ebed5 | 5654 | log.ctrl_idx = SSD_LOG_SW_IDX; |
57e45d44 | 5655 | log.time = ktime_get_real_seconds(); |
361ebed5 HSDT |
5656 | log.le.event = event; |
5657 | log.le.data.val = data; | |
5658 | ||
da3355df SF |
5659 | log.le.mod = SSD_DIF_WITH_OLD_LOG; |
5660 | log.le.idx = crc16(0,(const unsigned char *)&log,14); | |
361ebed5 HSDT |
5661 | level = ssd_parse_log(dev, &log, 0); |
5662 | if (level >= SSD_LOG_LEVEL) { | |
5663 | ret = ssd_save_log(dev, &log); | |
5664 | } | |
5665 | ||
5666 | /* set alarm */ | |
5667 | if (SSD_LOG_LEVEL_ERR == level) { | |
5668 | ssd_set_alarm(dev); | |
5669 | } | |
5670 | ||
5671 | /* update smart */ | |
5672 | dev->smart.log_info.nr_log++; | |
5673 | dev->smart.log_info.stat[level]++; | |
5674 | ||
5675 | /* handle event */ | |
5676 | ssd_handle_event(dev, event, level); | |
5677 | ||
5678 | return ret; | |
5679 | } | |
5680 | ||
5681 | static int ssd_gen_swlog(struct ssd_device *dev, uint16_t event, uint32_t data) | |
5682 | { | |
5683 | struct ssd_log_entry le; | |
5684 | int ret; | |
5685 | ||
5686 | if (unlikely(mode != SSD_DRV_MODE_STANDARD)) | |
5687 | return 0; | |
5688 | ||
5689 | /* slave port ? */ | |
5690 | if (dev->slave) { | |
5691 | return 0; | |
5692 | } | |
5693 | ||
5694 | memset(&le, 0, sizeof(struct ssd_log_entry)); | |
5695 | le.event = event; | |
5696 | le.data.val = data; | |
5697 | ||
5698 | ret = sfifo_put(&dev->log_fifo, &le); | |
5699 | if (ret) { | |
5700 | return ret; | |
5701 | } | |
5702 | ||
5703 | if (test_bit(SSD_INIT_WORKQ, &dev->state)) { | |
5704 | queue_work(dev->workq, &dev->log_work); | |
5705 | } | |
5706 | ||
5707 | return 0; | |
5708 | } | |
5709 | ||
5710 | static int ssd_do_swlog(struct ssd_device *dev) | |
5711 | { | |
5712 | struct ssd_log_entry le; | |
5713 | int ret = 0; | |
5714 | ||
5715 | memset(&le, 0, sizeof(struct ssd_log_entry)); | |
5716 | while (!sfifo_get(&dev->log_fifo, &le)) { | |
5717 | ret = ssd_save_swlog(dev, le.event, le.data.val); | |
5718 | if (ret) { | |
5719 | break; | |
5720 | } | |
5721 | } | |
5722 | ||
5723 | return ret; | |
5724 | } | |
5725 | ||
5726 | static int __ssd_clear_log(struct ssd_device *dev) | |
5727 | { | |
5728 | uint32_t off, length; | |
5729 | int ret; | |
5730 | ||
5731 | if (dev->protocol_info.ver <= SSD_PROTOCOL_V3) { | |
5732 | return 0; | |
5733 | } | |
5734 | ||
5735 | if (dev->internal_log.nr_log == 0) { | |
5736 | return 0; | |
5737 | } | |
5738 | ||
5739 | mutex_lock(&dev->internal_log_mutex); | |
5740 | ||
5741 | off = dev->rom_info.log_base; | |
5742 | length = dev->rom_info.log_sz; | |
5743 | ||
5744 | ret = ssd_spi_erase(dev, off, length); | |
5745 | if (ret) { | |
5746 | hio_warn("%s: log erase: failed\n", dev->name); | |
5747 | goto out; | |
5748 | } | |
5749 | ||
5750 | dev->internal_log.nr_log = 0; | |
5751 | ||
5752 | out: | |
5753 | mutex_unlock(&dev->internal_log_mutex); | |
5754 | return ret; | |
5755 | } | |
5756 | ||
5757 | static int ssd_clear_log(struct ssd_device *dev) | |
5758 | { | |
5759 | int ret; | |
5760 | ||
5761 | ret = __ssd_clear_log(dev); | |
5762 | if(!ret) { | |
5763 | ssd_gen_swlog(dev, SSD_LOG_CLEAR_LOG, 0); | |
5764 | } | |
5765 | ||
5766 | return ret; | |
5767 | } | |
5768 | ||
5769 | static int ssd_do_log(struct ssd_device *dev, int ctrl_idx, void *buf) | |
5770 | { | |
5771 | struct ssd_log_entry *le; | |
5772 | struct ssd_log log; | |
361ebed5 HSDT |
5773 | int nr_log = 0; |
5774 | int level; | |
5775 | int ret = 0; | |
5776 | ||
5777 | ret = ssd_read_log(dev, ctrl_idx, buf, &nr_log); | |
5778 | if (ret) { | |
5779 | return ret; | |
5780 | } | |
5781 | ||
57e45d44 | 5782 | log.time = ktime_get_real_seconds(); |
361ebed5 HSDT |
5783 | log.ctrl_idx = ctrl_idx; |
5784 | ||
5785 | le = (ssd_log_entry_t *)buf; | |
5786 | while (nr_log > 0) { | |
5787 | memcpy(&log.le, le, sizeof(struct ssd_log_entry)); | |
5788 | ||
da3355df SF |
5789 | log.le.mod = SSD_DIF_WITH_OLD_LOG; |
5790 | log.le.idx = crc16(0,(const unsigned char *)&log,14); | |
361ebed5 HSDT |
5791 | level = ssd_parse_log(dev, &log, 1); |
5792 | if (level >= SSD_LOG_LEVEL) { | |
5793 | ssd_save_log(dev, &log); | |
5794 | } | |
5795 | ||
5796 | /* set alarm */ | |
5797 | if (SSD_LOG_LEVEL_ERR == level) { | |
5798 | ssd_set_alarm(dev); | |
5799 | } | |
5800 | ||
5801 | dev->smart.log_info.nr_log++; | |
5802 | if (SSD_LOG_SEU_FAULT != le->event && SSD_LOG_SEU_FAULT1 != le->event) { | |
5803 | dev->smart.log_info.stat[level]++; | |
5804 | } else { | |
5805 | /* SEU fault */ | |
5806 | ||
5807 | /* log to the volatile log info */ | |
5808 | dev->log_info.nr_log++; | |
5809 | dev->log_info.stat[level]++; | |
5810 | ||
5811 | /* do something */ | |
5812 | dev->reload_fw = 1; | |
5813 | ssd_reg32_write(dev->ctrlp + SSD_RELOAD_FW_REG, SSD_RELOAD_FLAG); | |
da3355df SF |
5814 | if (le->event != SSD_LOG_SEU_FAULT1) { |
5815 | dev->has_non_0x98_reg_access = 1; | |
5816 | } | |
361ebed5 HSDT |
5817 | |
5818 | /*dev->readonly = 1; | |
5819 | set_disk_ro(dev->gd, 1); | |
5820 | hio_warn("%s: switched to read-only mode.\n", dev->name);*/ | |
5821 | } | |
5822 | ||
5823 | /* handle event */ | |
5824 | ssd_handle_event(dev, le->event, level); | |
5825 | ||
5826 | le++; | |
5827 | nr_log--; | |
5828 | } | |
5829 | ||
5830 | return 0; | |
5831 | } | |
5832 | ||
5833 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)) | |
5834 | static void ssd_log_worker(void *data) | |
5835 | { | |
5836 | struct ssd_device *dev = (struct ssd_device *)data; | |
5837 | #else | |
5838 | static void ssd_log_worker(struct work_struct *work) | |
5839 | { | |
5840 | struct ssd_device *dev = container_of(work, struct ssd_device, log_work); | |
5841 | #endif | |
5842 | int i; | |
5843 | int ret; | |
5844 | ||
5845 | if (!test_bit(SSD_LOG_ERR, &dev->state) && test_bit(SSD_ONLINE, &dev->state)) { | |
5846 | /* alloc log buf */ | |
5847 | if (!dev->log_buf) { | |
5848 | dev->log_buf = kmalloc(dev->hw_info.log_sz, GFP_KERNEL); | |
5849 | if (!dev->log_buf) { | |
5850 | hio_warn("%s: ssd_log_worker: no mem\n", dev->name); | |
5851 | return; | |
5852 | } | |
5853 | } | |
5854 | ||
5855 | /* get log */ | |
5856 | if (test_and_clear_bit(SSD_LOG_HW, &dev->state)) { | |
5857 | for (i=0; i<dev->hw_info.nr_ctrl; i++) { | |
5858 | ret = ssd_do_log(dev, i, dev->log_buf); | |
5859 | if (ret) { | |
5860 | (void)test_and_set_bit(SSD_LOG_ERR, &dev->state); | |
5861 | hio_warn("%s: do log fail\n", dev->name); | |
5862 | } | |
5863 | } | |
5864 | } | |
5865 | } | |
5866 | ||
5867 | ret = ssd_do_swlog(dev); | |
5868 | if (ret) { | |
5869 | hio_warn("%s: do swlog fail\n", dev->name); | |
5870 | } | |
5871 | } | |
5872 | ||
5873 | static void ssd_cleanup_log(struct ssd_device *dev) | |
5874 | { | |
5875 | if (dev->log_buf) { | |
5876 | kfree(dev->log_buf); | |
5877 | dev->log_buf = NULL; | |
5878 | } | |
5879 | ||
5880 | sfifo_free(&dev->log_fifo); | |
5881 | ||
5882 | if (dev->internal_log.log) { | |
5883 | vfree(dev->internal_log.log); | |
1197134c | 5884 | dev->internal_log.nr_log = 0; |
361ebed5 HSDT |
5885 | dev->internal_log.log = NULL; |
5886 | } | |
5887 | } | |
5888 | ||
5889 | static int ssd_init_log(struct ssd_device *dev) | |
5890 | { | |
5891 | struct ssd_log *log; | |
5892 | uint32_t off, size; | |
5893 | uint32_t len = 0; | |
5894 | int ret = 0; | |
5895 | ||
5896 | mutex_init(&dev->internal_log_mutex); | |
5897 | ||
5898 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)) | |
5899 | INIT_WORK(&dev->log_work, ssd_log_worker, dev); | |
5900 | #else | |
5901 | INIT_WORK(&dev->log_work, ssd_log_worker); | |
5902 | #endif | |
5903 | ||
5904 | off = dev->rom_info.log_base; | |
5905 | size = dev->rom_info.log_sz; | |
5906 | ||
1197134c | 5907 | dev->internal_log.nr_log = 0; |
361ebed5 HSDT |
5908 | dev->internal_log.log = vmalloc(size); |
5909 | if (!dev->internal_log.log) { | |
5910 | ret = -ENOMEM; | |
5911 | goto out_alloc_log; | |
5912 | } | |
5913 | ||
5914 | ret = sfifo_alloc(&dev->log_fifo, SSD_LOG_FIFO_SZ, sizeof(struct ssd_log_entry)); | |
5915 | if (ret < 0) { | |
5916 | goto out_alloc_log_fifo; | |
5917 | } | |
5918 | ||
5919 | if (dev->protocol_info.ver <= SSD_PROTOCOL_V3) { | |
5920 | return 0; | |
5921 | } | |
5922 | ||
5923 | log = (struct ssd_log *)dev->internal_log.log; | |
5924 | while (len < size) { | |
5925 | ret = ssd_spi_read(dev, log, off, sizeof(struct ssd_log)); | |
5926 | if (ret) { | |
5927 | goto out_read_log; | |
5928 | } | |
5929 | ||
5930 | if (log->ctrl_idx == 0xff) { | |
5931 | break; | |
5932 | } | |
5933 | ||
da3355df SF |
5934 | if (log->le.event == SSD_LOG_POWER_ON) { |
5935 | if (dev->internal_log.nr_log > dev->last_poweron_id) { | |
5936 | dev->last_poweron_id = dev->internal_log.nr_log; | |
5937 | } | |
5938 | } | |
5939 | ||
361ebed5 HSDT |
5940 | dev->internal_log.nr_log++; |
5941 | log++; | |
5942 | len += sizeof(struct ssd_log); | |
5943 | off += sizeof(struct ssd_log); | |
5944 | } | |
5945 | ||
5946 | return 0; | |
5947 | ||
5948 | out_read_log: | |
5949 | sfifo_free(&dev->log_fifo); | |
5950 | out_alloc_log_fifo: | |
5951 | vfree(dev->internal_log.log); | |
5952 | dev->internal_log.log = NULL; | |
5953 | dev->internal_log.nr_log = 0; | |
5954 | out_alloc_log: | |
5955 | /* skip error if not in standard mode */ | |
5956 | if (mode != SSD_DRV_MODE_STANDARD) { | |
5957 | ret = 0; | |
5958 | } | |
5959 | return ret; | |
5960 | } | |
5961 | ||
5962 | /* work queue */ | |
5963 | static void ssd_stop_workq(struct ssd_device *dev) | |
5964 | { | |
5965 | test_and_clear_bit(SSD_INIT_WORKQ, &dev->state); | |
5966 | flush_workqueue(dev->workq); | |
5967 | } | |
5968 | ||
5969 | static void ssd_start_workq(struct ssd_device *dev) | |
5970 | { | |
5971 | (void)test_and_set_bit(SSD_INIT_WORKQ, &dev->state); | |
5972 | ||
5973 | /* log ? */ | |
5974 | queue_work(dev->workq, &dev->log_work); | |
5975 | } | |
5976 | ||
5977 | static void ssd_cleanup_workq(struct ssd_device *dev) | |
5978 | { | |
5979 | flush_workqueue(dev->workq); | |
5980 | destroy_workqueue(dev->workq); | |
5981 | dev->workq = NULL; | |
5982 | } | |
5983 | ||
5984 | static int ssd_init_workq(struct ssd_device *dev) | |
5985 | { | |
5986 | int ret = 0; | |
5987 | ||
5988 | dev->workq = create_singlethread_workqueue(dev->name); | |
5989 | if (!dev->workq) { | |
5990 | ret = -ESRCH; | |
5991 | goto out; | |
5992 | } | |
5993 | ||
5994 | out: | |
5995 | return ret; | |
5996 | } | |
5997 | ||
5998 | /* rom */ | |
5999 | static int ssd_init_rom_info(struct ssd_device *dev) | |
6000 | { | |
6001 | uint32_t val; | |
6002 | ||
6003 | mutex_init(&dev->spi_mutex); | |
6004 | mutex_init(&dev->i2c_mutex); | |
6005 | ||
6006 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3) { | |
6007 | /* fix bug: read data to clear status */ | |
6008 | (void)ssd_reg32_read(dev->ctrlp + SSD_SPI_REG_RDATA); | |
6009 | ||
6010 | dev->rom_info.size = SSD_ROM_SIZE; | |
6011 | dev->rom_info.block_size = SSD_ROM_BLK_SIZE; | |
6012 | dev->rom_info.page_size = SSD_ROM_PAGE_SIZE; | |
6013 | ||
6014 | dev->rom_info.bridge_fw_base = SSD_ROM_BRIDGE_FW_BASE; | |
6015 | dev->rom_info.bridge_fw_sz = SSD_ROM_BRIDGE_FW_SIZE; | |
6016 | dev->rom_info.nr_bridge_fw = SSD_ROM_NR_BRIDGE_FW; | |
6017 | ||
6018 | dev->rom_info.ctrl_fw_base = SSD_ROM_CTRL_FW_BASE; | |
6019 | dev->rom_info.ctrl_fw_sz = SSD_ROM_CTRL_FW_SIZE; | |
6020 | dev->rom_info.nr_ctrl_fw = SSD_ROM_NR_CTRL_FW; | |
6021 | ||
6022 | dev->rom_info.log_sz = SSD_ROM_LOG_SZ; | |
6023 | ||
6024 | dev->rom_info.vp_base = SSD_ROM_VP_BASE; | |
6025 | dev->rom_info.label_base = SSD_ROM_LABEL_BASE; | |
6026 | } else if (dev->protocol_info.ver < SSD_PROTOCOL_V3_2) { | |
6027 | val = ssd_reg32_read(dev->ctrlp + SSD_ROM_INFO_REG); | |
6028 | dev->rom_info.size = 0x100000 * (1U << (val & 0xFF)); | |
6029 | dev->rom_info.block_size = 0x10000 * (1U << ((val>>8) & 0xFF)); | |
6030 | dev->rom_info.page_size = (val>>16) & 0xFFFF; | |
6031 | ||
6032 | val = ssd_reg32_read(dev->ctrlp + SSD_ROM_BRIDGE_FW_INFO_REG); | |
6033 | dev->rom_info.bridge_fw_base = dev->rom_info.block_size * (val & 0xFFFF); | |
6034 | dev->rom_info.bridge_fw_sz = dev->rom_info.block_size * ((val>>16) & 0x3FFF); | |
6035 | dev->rom_info.nr_bridge_fw = ((val >> 30) & 0x3) + 1; | |
6036 | ||
6037 | val = ssd_reg32_read(dev->ctrlp + SSD_ROM_CTRL_FW_INFO_REG); | |
6038 | dev->rom_info.ctrl_fw_base = dev->rom_info.block_size * (val & 0xFFFF); | |
6039 | dev->rom_info.ctrl_fw_sz = dev->rom_info.block_size * ((val>>16) & 0x3FFF); | |
6040 | dev->rom_info.nr_ctrl_fw = ((val >> 30) & 0x3) + 1; | |
6041 | ||
6042 | dev->rom_info.bm_fw_base = dev->rom_info.ctrl_fw_base + (dev->rom_info.nr_ctrl_fw * dev->rom_info.ctrl_fw_sz); | |
6043 | dev->rom_info.bm_fw_sz = SSD_PV3_ROM_BM_FW_SZ; | |
6044 | dev->rom_info.nr_bm_fw = SSD_PV3_ROM_NR_BM_FW; | |
6045 | ||
6046 | dev->rom_info.log_base = dev->rom_info.bm_fw_base + (dev->rom_info.nr_bm_fw * dev->rom_info.bm_fw_sz); | |
6047 | dev->rom_info.log_sz = SSD_ROM_LOG_SZ; | |
6048 | ||
6049 | dev->rom_info.smart_base = dev->rom_info.log_base + dev->rom_info.log_sz; | |
6050 | dev->rom_info.smart_sz = SSD_PV3_ROM_SMART_SZ; | |
6051 | dev->rom_info.nr_smart = SSD_PV3_ROM_NR_SMART; | |
6052 | ||
6053 | val = ssd_reg32_read(dev->ctrlp + SSD_ROM_VP_INFO_REG); | |
6054 | dev->rom_info.vp_base = dev->rom_info.block_size * val; | |
6055 | dev->rom_info.label_base = dev->rom_info.vp_base + dev->rom_info.block_size; | |
6056 | if (dev->rom_info.label_base >= dev->rom_info.size) { | |
6057 | dev->rom_info.label_base = dev->rom_info.vp_base - dev->rom_info.block_size; | |
6058 | } | |
6059 | } else { | |
6060 | val = ssd_reg32_read(dev->ctrlp + SSD_ROM_INFO_REG); | |
6061 | dev->rom_info.size = 0x100000 * (1U << (val & 0xFF)); | |
6062 | dev->rom_info.block_size = 0x10000 * (1U << ((val>>8) & 0xFF)); | |
6063 | dev->rom_info.page_size = (val>>16) & 0xFFFF; | |
6064 | ||
6065 | val = ssd_reg32_read(dev->ctrlp + SSD_ROM_BRIDGE_FW_INFO_REG); | |
6066 | dev->rom_info.bridge_fw_base = dev->rom_info.block_size * (val & 0xFFFF); | |
6067 | dev->rom_info.bridge_fw_sz = dev->rom_info.block_size * ((val>>16) & 0x3FFF); | |
6068 | dev->rom_info.nr_bridge_fw = ((val >> 30) & 0x3) + 1; | |
6069 | ||
6070 | val = ssd_reg32_read(dev->ctrlp + SSD_ROM_CTRL_FW_INFO_REG); | |
6071 | dev->rom_info.ctrl_fw_base = dev->rom_info.block_size * (val & 0xFFFF); | |
6072 | dev->rom_info.ctrl_fw_sz = dev->rom_info.block_size * ((val>>16) & 0x3FFF); | |
6073 | dev->rom_info.nr_ctrl_fw = ((val >> 30) & 0x3) + 1; | |
6074 | ||
6075 | val = ssd_reg32_read(dev->ctrlp + SSD_ROM_VP_INFO_REG); | |
6076 | dev->rom_info.vp_base = dev->rom_info.block_size * val; | |
6077 | dev->rom_info.label_base = dev->rom_info.vp_base - SSD_PV3_2_ROM_SEC_SZ; | |
6078 | ||
6079 | dev->rom_info.nr_smart = SSD_PV3_ROM_NR_SMART; | |
6080 | dev->rom_info.smart_sz = SSD_PV3_2_ROM_SEC_SZ; | |
6081 | dev->rom_info.smart_base = dev->rom_info.label_base - (dev->rom_info.smart_sz * dev->rom_info.nr_smart); | |
6082 | if (dev->rom_info.smart_sz > dev->rom_info.block_size) { | |
6083 | dev->rom_info.smart_sz = dev->rom_info.block_size; | |
6084 | } | |
6085 | ||
6086 | dev->rom_info.log_sz = SSD_PV3_2_ROM_LOG_SZ; | |
6087 | dev->rom_info.log_base = dev->rom_info.smart_base - dev->rom_info.log_sz; | |
6088 | } | |
6089 | ||
6090 | return ssd_init_spi(dev); | |
6091 | } | |
6092 | ||
6093 | /* smart */ | |
6094 | static int ssd_update_smart(struct ssd_device *dev, struct ssd_smart *smart) | |
6095 | { | |
57e45d44 | 6096 | uint64_t cur_time, run_time; |
361ebed5 HSDT |
6097 | #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,27)) |
6098 | struct hd_struct *part; | |
6099 | int cpu; | |
6100 | #endif | |
6101 | int i, j; | |
6102 | int ret = 0; | |
6103 | ||
6104 | if (!test_bit(SSD_INIT_BD, &dev->state)) { | |
6105 | return 0; | |
6106 | } | |
6107 | ||
57e45d44 SF |
6108 | cur_time = (uint64_t)ktime_get_real_seconds(); |
6109 | if (cur_time < dev->uptime) { | |
361ebed5 HSDT |
6110 | run_time = 0; |
6111 | } else { | |
57e45d44 | 6112 | run_time = cur_time - dev->uptime; |
361ebed5 HSDT |
6113 | } |
6114 | ||
6115 | /* avoid frequently update */ | |
6116 | if (run_time >= 60) { | |
6117 | ret = 1; | |
6118 | } | |
6119 | ||
6120 | /* io stat */ | |
6121 | smart->io_stat.run_time += run_time; | |
6122 | ||
6123 | #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,27)) | |
6124 | cpu = part_stat_lock(); | |
6125 | part = &dev->gd->part0; | |
b49bd764 SF |
6126 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,14,0)) |
6127 | part_round_stats(dev->rq, cpu, part); | |
6128 | #else | |
361ebed5 | 6129 | part_round_stats(cpu, part); |
b49bd764 | 6130 | #endif |
361ebed5 HSDT |
6131 | part_stat_unlock(); |
6132 | ||
6133 | smart->io_stat.nr_read += part_stat_read(part, ios[READ]); | |
6134 | smart->io_stat.nr_write += part_stat_read(part, ios[WRITE]); | |
6135 | smart->io_stat.rsectors += part_stat_read(part, sectors[READ]); | |
6136 | smart->io_stat.wsectors += part_stat_read(part, sectors[WRITE]); | |
6137 | #elif (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,14)) | |
6138 | preempt_disable(); | |
6139 | disk_round_stats(dev->gd); | |
6140 | preempt_enable(); | |
6141 | ||
6142 | smart->io_stat.nr_read += disk_stat_read(dev->gd, ios[READ]); | |
6143 | smart->io_stat.nr_write += disk_stat_read(dev->gd, ios[WRITE]); | |
6144 | smart->io_stat.rsectors += disk_stat_read(dev->gd, sectors[READ]); | |
6145 | smart->io_stat.wsectors += disk_stat_read(dev->gd, sectors[WRITE]); | |
6146 | #else | |
6147 | preempt_disable(); | |
6148 | disk_round_stats(dev->gd); | |
6149 | preempt_enable(); | |
6150 | ||
6151 | smart->io_stat.nr_read += disk_stat_read(dev->gd, reads); | |
6152 | smart->io_stat.nr_write += disk_stat_read(dev->gd, writes); | |
6153 | smart->io_stat.rsectors += disk_stat_read(dev->gd, read_sectors); | |
6154 | smart->io_stat.wsectors += disk_stat_read(dev->gd, write_sectors); | |
6155 | #endif | |
6156 | ||
6157 | smart->io_stat.nr_to += atomic_read(&dev->tocnt); | |
6158 | ||
6159 | for (i=0; i<dev->nr_queue; i++) { | |
6160 | smart->io_stat.nr_rwerr += dev->queue[i].io_stat.nr_rwerr; | |
6161 | smart->io_stat.nr_ioerr += dev->queue[i].io_stat.nr_ioerr; | |
6162 | } | |
6163 | ||
6164 | for (i=0; i<dev->nr_queue; i++) { | |
6165 | for (j=0; j<SSD_ECC_MAX_FLIP; j++) { | |
6166 | smart->ecc_info.bitflip[j] += dev->queue[i].ecc_info.bitflip[j]; | |
6167 | } | |
6168 | } | |
6169 | ||
6170 | //dev->uptime = tv.tv_sec; | |
6171 | ||
6172 | return ret; | |
6173 | } | |
6174 | ||
da3355df | 6175 | static int __ssd_clear_smart(struct ssd_device *dev) |
361ebed5 | 6176 | { |
361ebed5 HSDT |
6177 | uint64_t sversion; |
6178 | uint32_t off, length; | |
6179 | int i; | |
6180 | int ret; | |
6181 | ||
6182 | if (dev->protocol_info.ver <= SSD_PROTOCOL_V3) { | |
6183 | return 0; | |
6184 | } | |
6185 | ||
6186 | /* clear smart */ | |
6187 | off = dev->rom_info.smart_base; | |
6188 | length = dev->rom_info.smart_sz * dev->rom_info.nr_smart; | |
6189 | ||
6190 | ret = ssd_spi_erase(dev, off, length); | |
6191 | if (ret) { | |
6192 | hio_warn("%s: info erase: failed\n", dev->name); | |
6193 | goto out; | |
6194 | } | |
6195 | ||
6196 | sversion = dev->smart.version; | |
6197 | ||
6198 | memset(&dev->smart, 0, sizeof(struct ssd_smart)); | |
6199 | dev->smart.version = sversion + 1; | |
6200 | dev->smart.magic = SSD_SMART_MAGIC; | |
6201 | ||
6202 | /* clear all tmp acc */ | |
6203 | for (i=0; i<dev->nr_queue; i++) { | |
6204 | memset(&(dev->queue[i].io_stat), 0, sizeof(struct ssd_io_stat)); | |
6205 | memset(&(dev->queue[i].ecc_info), 0, sizeof(struct ssd_ecc_info)); | |
6206 | } | |
6207 | ||
6208 | atomic_set(&dev->tocnt, 0); | |
6209 | ||
6210 | /* clear tmp log info */ | |
6211 | memset(&dev->log_info, 0, sizeof(struct ssd_log_info)); | |
6212 | ||
57e45d44 | 6213 | dev->uptime = (uint64_t)ktime_get_real_seconds(); |
361ebed5 HSDT |
6214 | |
6215 | /* clear alarm ? */ | |
6216 | //ssd_clear_alarm(dev); | |
6217 | out: | |
6218 | return ret; | |
6219 | } | |
6220 | ||
da3355df | 6221 | static int __ssd_clear_warning(struct ssd_device *dev) |
1197134c KM |
6222 | { |
6223 | uint32_t off, size; | |
6224 | int i, ret = 0; | |
6225 | ||
6226 | if (dev->protocol_info.ver <= SSD_PROTOCOL_V3) { | |
6227 | return 0; | |
6228 | } | |
6229 | ||
6230 | /* clear log_info warning */ | |
6231 | memset(&dev->smart.log_info, 0, sizeof(dev->smart.log_info)); | |
6232 | ||
6233 | /* clear io_stat warning */ | |
6234 | dev->smart.io_stat.nr_to = 0; | |
6235 | dev->smart.io_stat.nr_rwerr = 0; | |
6236 | dev->smart.io_stat.nr_ioerr = 0; | |
6237 | ||
6238 | /* clear ecc_info warning */ | |
6239 | memset(&dev->smart.ecc_info, 0, sizeof(dev->smart.ecc_info)); | |
6240 | ||
6241 | /* clear queued warnings */ | |
6242 | for (i=0; i<dev->nr_queue; i++) { | |
6243 | /* queued io_stat warning */ | |
6244 | dev->queue[i].io_stat.nr_to = 0; | |
6245 | dev->queue[i].io_stat.nr_rwerr = 0; | |
6246 | dev->queue[i].io_stat.nr_ioerr = 0; | |
6247 | ||
6248 | /* queued ecc_info warning */ | |
6249 | memset(&(dev->queue[i].ecc_info), 0, sizeof(dev->queue[i].ecc_info)); | |
6250 | } | |
6251 | ||
6252 | /* write smart back to nor */ | |
6253 | for (i = 0; i < dev->rom_info.nr_smart; i++) { | |
6254 | off = dev->rom_info.smart_base + (dev->rom_info.smart_sz * i); | |
6255 | size = dev->rom_info.smart_sz; | |
6256 | ||
6257 | ret = ssd_spi_erase(dev, off, size); | |
6258 | if (ret) { | |
6259 | hio_warn("%s: warning erase: failed with code 1\n", dev->name); | |
6260 | goto out; | |
6261 | } | |
6262 | ||
6263 | size = sizeof(struct ssd_smart); | |
6264 | ||
6265 | ret = ssd_spi_write(dev, &dev->smart, off, size); | |
6266 | if (ret) { | |
6267 | hio_warn("%s: warning erase: failed with code 2\n", dev->name); | |
6268 | goto out; | |
6269 | } | |
6270 | } | |
6271 | ||
6272 | dev->smart.version++; | |
6273 | ||
6274 | /* clear cmd timeout warning */ | |
6275 | atomic_set(&dev->tocnt, 0); | |
6276 | ||
6277 | /* clear tmp log info */ | |
6278 | memset(&dev->log_info, 0, sizeof(dev->log_info)); | |
6279 | ||
6280 | out: | |
6281 | return ret; | |
6282 | } | |
6283 | ||
da3355df SF |
6284 | static int ssd_clear_smart(struct ssd_device *dev) |
6285 | { | |
6286 | int ret; | |
6287 | ||
6288 | ret = __ssd_clear_smart(dev); | |
6289 | if(!ret) { | |
6290 | ssd_gen_swlog(dev, SSD_LOG_CLEAR_SMART, 0); | |
6291 | } | |
6292 | ||
6293 | return ret; | |
6294 | } | |
6295 | ||
6296 | static int ssd_clear_warning(struct ssd_device *dev) | |
6297 | { | |
6298 | int ret; | |
6299 | ||
6300 | ret = __ssd_clear_warning(dev); | |
6301 | if(!ret) { | |
6302 | ssd_gen_swlog(dev, SSD_LOG_CLEAR_WARNING, 0); | |
6303 | } | |
6304 | ||
6305 | return ret; | |
6306 | } | |
6307 | ||
361ebed5 HSDT |
6308 | static int ssd_save_smart(struct ssd_device *dev) |
6309 | { | |
6310 | uint32_t off, size; | |
6311 | int i; | |
6312 | int ret = 0; | |
6313 | ||
6314 | if (unlikely(mode != SSD_DRV_MODE_STANDARD)) | |
6315 | return 0; | |
6316 | ||
6317 | if (dev->protocol_info.ver <= SSD_PROTOCOL_V3) { | |
6318 | return 0; | |
6319 | } | |
6320 | ||
6321 | if (!ssd_update_smart(dev, &dev->smart)) { | |
6322 | return 0; | |
6323 | } | |
6324 | ||
6325 | dev->smart.version++; | |
6326 | ||
6327 | for (i=0; i<dev->rom_info.nr_smart; i++) { | |
6328 | off = dev->rom_info.smart_base + (dev->rom_info.smart_sz * i); | |
6329 | size = dev->rom_info.smart_sz; | |
6330 | ||
6331 | ret = ssd_spi_erase(dev, off, size); | |
6332 | if (ret) { | |
6333 | hio_warn("%s: info erase failed\n", dev->name); | |
6334 | goto out; | |
6335 | } | |
6336 | ||
6337 | size = sizeof(struct ssd_smart); | |
6338 | ||
6339 | ret = ssd_spi_write(dev, &dev->smart, off, size); | |
6340 | if (ret) { | |
6341 | hio_warn("%s: info write failed\n", dev->name); | |
6342 | goto out; | |
6343 | } | |
6344 | ||
6345 | //xx | |
6346 | } | |
6347 | ||
6348 | out: | |
6349 | return ret; | |
6350 | } | |
6351 | ||
6352 | static int ssd_init_smart(struct ssd_device *dev) | |
6353 | { | |
6354 | struct ssd_smart *smart; | |
da3355df | 6355 | uint32_t off, size, val; |
361ebed5 HSDT |
6356 | int i; |
6357 | int ret = 0; | |
da3355df | 6358 | int update_smart = 0; |
361ebed5 | 6359 | |
57e45d44 | 6360 | dev->uptime = (uint64_t)ktime_get_real_seconds(); |
361ebed5 HSDT |
6361 | |
6362 | if (dev->protocol_info.ver <= SSD_PROTOCOL_V3) { | |
6363 | return 0; | |
6364 | } | |
6365 | ||
6366 | smart = kmalloc(sizeof(struct ssd_smart) * SSD_ROM_NR_SMART_MAX, GFP_KERNEL); | |
6367 | if (!smart) { | |
6368 | ret = -ENOMEM; | |
6369 | goto out_nomem; | |
6370 | } | |
6371 | ||
6372 | memset(&dev->smart, 0, sizeof(struct ssd_smart)); | |
6373 | ||
6374 | /* read smart */ | |
6375 | for (i=0; i<dev->rom_info.nr_smart; i++) { | |
6376 | memset(&smart[i], 0, sizeof(struct ssd_smart)); | |
6377 | ||
6378 | off = dev->rom_info.smart_base + (dev->rom_info.smart_sz * i); | |
6379 | size = sizeof(struct ssd_smart); | |
6380 | ||
6381 | ret = ssd_spi_read(dev, &smart[i], off, size); | |
6382 | if (ret) { | |
6383 | hio_warn("%s: info read failed\n", dev->name); | |
6384 | goto out; | |
6385 | } | |
6386 | ||
6387 | if (smart[i].magic != SSD_SMART_MAGIC) { | |
6388 | smart[i].magic = 0; | |
6389 | smart[i].version = 0; | |
6390 | continue; | |
6391 | } | |
6392 | ||
6393 | if (smart[i].version > dev->smart.version) { | |
6394 | memcpy(&dev->smart, &smart[i], sizeof(struct ssd_smart)); | |
6395 | } | |
6396 | } | |
6397 | ||
6398 | if (dev->smart.magic != SSD_SMART_MAGIC) { | |
6399 | /* first time power up */ | |
6400 | dev->smart.magic = SSD_SMART_MAGIC; | |
6401 | dev->smart.version = 1; | |
6402 | } | |
6403 | ||
da3355df SF |
6404 | val = ssd_reg32_read(dev->ctrlp + SSD_INTR_INTERVAL_REG); |
6405 | if (!val) { | |
6406 | dev->last_poweron_id = ~0; | |
6407 | ssd_gen_swlog(dev, SSD_LOG_POWER_ON, dev->hw_info.bridge_ver); | |
6408 | if (dev->smart.io_stat.nr_to) { | |
6409 | dev->smart.io_stat.nr_to = 0; | |
6410 | update_smart = 1; | |
6411 | } | |
6412 | } | |
6413 | ||
361ebed5 HSDT |
6414 | /* check log info */ |
6415 | { | |
6416 | struct ssd_log_info log_info; | |
6417 | struct ssd_log *log = (struct ssd_log *)dev->internal_log.log; | |
6418 | ||
6419 | memset(&log_info, 0, sizeof(struct ssd_log_info)); | |
6420 | ||
6421 | while (log_info.nr_log < dev->internal_log.nr_log) { | |
da3355df SF |
6422 | int skip = 0; |
6423 | ||
6424 | switch (log->le.event) { | |
361ebed5 | 6425 | /* skip the volatile log info */ |
da3355df SF |
6426 | case SSD_LOG_SEU_FAULT: |
6427 | case SSD_LOG_SEU_FAULT1: | |
6428 | skip = 1; | |
6429 | break; | |
6430 | case SSD_LOG_TIMEOUT: | |
6431 | skip = (dev->last_poweron_id >= log_info.nr_log); | |
6432 | break; | |
6433 | } | |
6434 | ||
6435 | if (!skip) { | |
361ebed5 HSDT |
6436 | log_info.stat[ssd_parse_log(dev, log, 0)]++; |
6437 | } | |
6438 | ||
6439 | log_info.nr_log++; | |
6440 | log++; | |
6441 | } | |
6442 | ||
6443 | /* check */ | |
6444 | for (i=(SSD_LOG_NR_LEVEL-1); i>=0; i--) { | |
da3355df | 6445 | if (log_info.stat[i] != dev->smart.log_info.stat[i]) { |
361ebed5 HSDT |
6446 | /* unclean */ |
6447 | memcpy(&dev->smart.log_info, &log_info, sizeof(struct ssd_log_info)); | |
da3355df | 6448 | update_smart = 1; |
361ebed5 HSDT |
6449 | break; |
6450 | } | |
6451 | } | |
da3355df SF |
6452 | |
6453 | if (update_smart) { | |
6454 | ++dev->smart.version; | |
6455 | } | |
361ebed5 HSDT |
6456 | } |
6457 | ||
6458 | for (i=0; i<dev->rom_info.nr_smart; i++) { | |
6459 | if (smart[i].magic == SSD_SMART_MAGIC && smart[i].version == dev->smart.version) { | |
6460 | continue; | |
6461 | } | |
6462 | ||
6463 | off = dev->rom_info.smart_base + (dev->rom_info.smart_sz * i); | |
6464 | size = dev->rom_info.smart_sz; | |
6465 | ||
6466 | ret = ssd_spi_erase(dev, off, size); | |
6467 | if (ret) { | |
6468 | hio_warn("%s: info erase failed\n", dev->name); | |
6469 | goto out; | |
6470 | } | |
6471 | ||
6472 | size = sizeof(struct ssd_smart); | |
6473 | ret = ssd_spi_write(dev, &dev->smart, off, size); | |
6474 | if (ret) { | |
6475 | hio_warn("%s: info write failed\n", dev->name); | |
6476 | goto out; | |
6477 | } | |
6478 | ||
6479 | //xx | |
6480 | } | |
6481 | ||
6482 | /* sync smart with alarm led */ | |
6483 | if (dev->smart.io_stat.nr_to || dev->smart.io_stat.nr_rwerr || dev->smart.log_info.stat[SSD_LOG_LEVEL_ERR]) { | |
6484 | hio_warn("%s: some fault found in the history info\n", dev->name); | |
6485 | ssd_set_alarm(dev); | |
6486 | } | |
6487 | ||
6488 | out: | |
6489 | kfree(smart); | |
6490 | out_nomem: | |
6491 | /* skip error if not in standard mode */ | |
6492 | if (mode != SSD_DRV_MODE_STANDARD) { | |
6493 | ret = 0; | |
6494 | } | |
6495 | return ret; | |
6496 | } | |
6497 | ||
6498 | /* bm */ | |
6499 | static int __ssd_bm_get_version(struct ssd_device *dev, uint16_t *ver) | |
6500 | { | |
6501 | struct ssd_bm_manufacturer_data bm_md = {0}; | |
6502 | uint16_t sc_id = SSD_BM_SYSTEM_DATA_SUBCLASS_ID; | |
6503 | uint8_t cmd; | |
6504 | int ret = 0; | |
6505 | ||
6506 | if (!dev || !ver) { | |
6507 | return -EINVAL; | |
6508 | } | |
6509 | ||
6510 | mutex_lock(&dev->bm_mutex); | |
6511 | ||
6512 | cmd = SSD_BM_DATA_FLASH_SUBCLASS_ID; | |
6513 | ret = ssd_smbus_write_word(dev, SSD_BM_SLAVE_ADDRESS, cmd, (uint8_t *)&sc_id); | |
6514 | if (ret) { | |
6515 | goto out; | |
6516 | } | |
6517 | ||
6518 | cmd = SSD_BM_DATA_FLASH_SUBCLASS_ID_PAGE1; | |
6519 | ret = ssd_smbus_read_block(dev, SSD_BM_SLAVE_ADDRESS, cmd, sizeof(struct ssd_bm_manufacturer_data), (uint8_t *)&bm_md); | |
6520 | if (ret) { | |
6521 | goto out; | |
6522 | } | |
6523 | ||
6524 | if (bm_md.firmware_ver & 0xF000) { | |
6525 | ret = -EIO; | |
6526 | goto out; | |
6527 | } | |
6528 | ||
6529 | *ver = bm_md.firmware_ver; | |
6530 | ||
6531 | out: | |
6532 | mutex_unlock(&dev->bm_mutex); | |
6533 | return ret; | |
6534 | } | |
6535 | ||
6536 | static int ssd_bm_get_version(struct ssd_device *dev, uint16_t *ver) | |
6537 | { | |
6538 | uint16_t tmp = 0; | |
6539 | int i = SSD_BM_RETRY_MAX; | |
6540 | int ret = 0; | |
6541 | ||
6542 | while (i-- > 0) { | |
6543 | ret = __ssd_bm_get_version(dev, &tmp); | |
6544 | if (!ret) { | |
6545 | break; | |
6546 | } | |
6547 | } | |
6548 | if (ret) { | |
6549 | return ret; | |
6550 | } | |
6551 | ||
6552 | *ver = tmp; | |
6553 | ||
6554 | return 0; | |
6555 | } | |
6556 | ||
6557 | static int __ssd_bm_nr_cap(struct ssd_device *dev, int *nr_cap) | |
6558 | { | |
6559 | struct ssd_bm_configuration_registers bm_cr; | |
6560 | uint16_t sc_id = SSD_BM_CONFIGURATION_REGISTERS_ID; | |
6561 | uint8_t cmd; | |
6562 | int ret; | |
6563 | ||
6564 | mutex_lock(&dev->bm_mutex); | |
6565 | ||
6566 | cmd = SSD_BM_DATA_FLASH_SUBCLASS_ID; | |
6567 | ret = ssd_smbus_write_word(dev, SSD_BM_SLAVE_ADDRESS, cmd, (uint8_t *)&sc_id); | |
6568 | if (ret) { | |
6569 | goto out; | |
6570 | } | |
6571 | ||
6572 | cmd = SSD_BM_DATA_FLASH_SUBCLASS_ID_PAGE1; | |
6573 | ret = ssd_smbus_read_block(dev, SSD_BM_SLAVE_ADDRESS, cmd, sizeof(struct ssd_bm_configuration_registers), (uint8_t *)&bm_cr); | |
6574 | if (ret) { | |
6575 | goto out; | |
6576 | } | |
6577 | ||
6578 | if (bm_cr.operation_cfg.cc == 0 || bm_cr.operation_cfg.cc > 4) { | |
6579 | ret = -EIO; | |
6580 | goto out; | |
6581 | } | |
6582 | ||
6583 | *nr_cap = bm_cr.operation_cfg.cc + 1; | |
6584 | ||
6585 | out: | |
6586 | mutex_unlock(&dev->bm_mutex); | |
6587 | return ret; | |
6588 | } | |
6589 | ||
6590 | static int ssd_bm_nr_cap(struct ssd_device *dev, int *nr_cap) | |
6591 | { | |
6592 | int tmp = 0; | |
6593 | int i = SSD_BM_RETRY_MAX; | |
6594 | int ret = 0; | |
6595 | ||
6596 | while (i-- > 0) { | |
6597 | ret = __ssd_bm_nr_cap(dev, &tmp); | |
6598 | if (!ret) { | |
6599 | break; | |
6600 | } | |
6601 | } | |
6602 | if (ret) { | |
6603 | return ret; | |
6604 | } | |
6605 | ||
6606 | *nr_cap = tmp; | |
6607 | ||
6608 | return 0; | |
6609 | } | |
6610 | ||
6611 | static int ssd_bm_enter_cap_learning(struct ssd_device *dev) | |
6612 | { | |
6613 | uint16_t buf = SSD_BM_ENTER_CAP_LEARNING; | |
6614 | uint8_t cmd = SSD_BM_MANUFACTURERACCESS; | |
6615 | int ret; | |
6616 | ||
6617 | ret = ssd_smbus_write_word(dev, SSD_BM_SLAVE_ADDRESS, cmd, (uint8_t *)&buf); | |
6618 | if (ret) { | |
6619 | goto out; | |
6620 | } | |
6621 | ||
6622 | out: | |
6623 | return ret; | |
6624 | } | |
6625 | ||
6626 | static int ssd_bm_get_sfstatus(struct ssd_device *dev, uint16_t *status) | |
6627 | { | |
6628 | uint16_t val = 0; | |
6629 | uint8_t cmd = SSD_BM_SAFETYSTATUS; | |
6630 | int ret; | |
6631 | ||
6632 | ret = ssd_smbus_read_word(dev, SSD_BM_SLAVE_ADDRESS, cmd, (uint8_t *)&val); | |
6633 | if (ret) { | |
6634 | goto out; | |
6635 | } | |
6636 | ||
6637 | *status = val; | |
6638 | out: | |
6639 | return ret; | |
6640 | } | |
6641 | ||
6642 | static int ssd_bm_get_opstatus(struct ssd_device *dev, uint16_t *status) | |
6643 | { | |
6644 | uint16_t val = 0; | |
6645 | uint8_t cmd = SSD_BM_OPERATIONSTATUS; | |
6646 | int ret; | |
6647 | ||
6648 | ret = ssd_smbus_read_word(dev, SSD_BM_SLAVE_ADDRESS, cmd, (uint8_t *)&val); | |
6649 | if (ret) { | |
6650 | goto out; | |
6651 | } | |
6652 | ||
6653 | *status = val; | |
6654 | out: | |
6655 | return ret; | |
6656 | } | |
6657 | ||
6658 | static int ssd_get_bmstruct(struct ssd_device *dev, struct ssd_bm *bm_status_out) | |
6659 | { | |
6660 | struct sbs_cmd *bm_sbs = ssd_bm_sbs; | |
6661 | struct ssd_bm bm_status; | |
6662 | uint8_t buf[2] = {0, }; | |
6663 | uint16_t val = 0; | |
6664 | uint16_t cval; | |
6665 | int ret = 0; | |
6666 | ||
6667 | memset(&bm_status, 0, sizeof(struct ssd_bm)); | |
6668 | ||
6669 | while (bm_sbs->desc != NULL) { | |
6670 | switch (bm_sbs->size) { | |
6671 | case SBS_SIZE_BYTE: | |
6672 | ret = ssd_smbus_read_byte(dev, SSD_BM_SLAVE_ADDRESS, bm_sbs->cmd, buf); | |
6673 | if (ret) { | |
6674 | //printf("Error: smbus read byte %#x\n", bm_sbs->cmd); | |
6675 | goto out; | |
6676 | } | |
6677 | val = buf[0]; | |
6678 | break; | |
6679 | case SBS_SIZE_WORD: | |
6680 | ret = ssd_smbus_read_word(dev, SSD_BM_SLAVE_ADDRESS, bm_sbs->cmd, (uint8_t *)&val); | |
6681 | if (ret) { | |
6682 | //printf("Error: smbus read word %#x\n", bm_sbs->cmd); | |
6683 | goto out; | |
6684 | } | |
6685 | //val = *(uint16_t *)buf; | |
6686 | break; | |
6687 | default: | |
6688 | ret = -1; | |
6689 | goto out; | |
6690 | break; | |
6691 | } | |
6692 | ||
6693 | switch (bm_sbs->unit) { | |
6694 | case SBS_UNIT_VALUE: | |
6695 | *(uint16_t *)bm_var(&bm_status, bm_sbs->off) = val & bm_sbs->mask; | |
6696 | break; | |
6697 | case SBS_UNIT_TEMPERATURE: | |
6698 | cval = (uint16_t)(val - 2731) / 10; | |
6699 | *(uint16_t *)bm_var(&bm_status, bm_sbs->off) = cval; | |
6700 | break; | |
6701 | case SBS_UNIT_VOLTAGE: | |
6702 | *(uint16_t *)bm_var(&bm_status, bm_sbs->off) = val; | |
6703 | break; | |
6704 | case SBS_UNIT_CURRENT: | |
6705 | *(uint16_t *)bm_var(&bm_status, bm_sbs->off) = val; | |
6706 | break; | |
6707 | case SBS_UNIT_ESR: | |
6708 | *(uint16_t *)bm_var(&bm_status, bm_sbs->off) = val; | |
6709 | break; | |
6710 | case SBS_UNIT_PERCENT: | |
6711 | *(uint16_t *)bm_var(&bm_status, bm_sbs->off) = val; | |
6712 | break; | |
6713 | case SBS_UNIT_CAPACITANCE: | |
6714 | *(uint16_t *)bm_var(&bm_status, bm_sbs->off) = val; | |
6715 | break; | |
6716 | default: | |
6717 | ret = -1; | |
6718 | goto out; | |
6719 | break; | |
6720 | } | |
6721 | ||
6722 | bm_sbs++; | |
6723 | } | |
6724 | ||
6725 | memcpy(bm_status_out, &bm_status, sizeof(struct ssd_bm)); | |
6726 | ||
6727 | out: | |
6728 | return ret; | |
6729 | } | |
6730 | ||
6731 | static int __ssd_bm_status(struct ssd_device *dev, int *status) | |
6732 | { | |
6733 | struct ssd_bm bm_status = {0}; | |
6734 | int nr_cap = 0; | |
6735 | int i; | |
6736 | int ret = 0; | |
6737 | ||
6738 | ret = ssd_get_bmstruct(dev, &bm_status); | |
6739 | if (ret) { | |
6740 | goto out; | |
6741 | } | |
6742 | ||
6743 | /* capacitor voltage */ | |
6744 | ret = ssd_bm_nr_cap(dev, &nr_cap); | |
6745 | if (ret) { | |
6746 | goto out; | |
6747 | } | |
6748 | ||
6749 | for (i=0; i<nr_cap; i++) { | |
6750 | if (bm_status.cap_volt[i] < SSD_BM_CAP_VOLT_MIN) { | |
6751 | *status = SSD_BMSTATUS_WARNING; | |
6752 | goto out; | |
6753 | } | |
6754 | } | |
6755 | ||
6756 | /* Safety Status */ | |
6757 | if (bm_status.sf_status) { | |
6758 | *status = SSD_BMSTATUS_WARNING; | |
6759 | goto out; | |
6760 | } | |
6761 | ||
6762 | /* charge status */ | |
6763 | if (!((bm_status.op_status >> 12) & 0x1)) { | |
6764 | *status = SSD_BMSTATUS_CHARGING; | |
6765 | }else{ | |
6766 | *status = SSD_BMSTATUS_OK; | |
6767 | } | |
6768 | ||
6769 | out: | |
6770 | return ret; | |
6771 | } | |
6772 | ||
6773 | static void ssd_set_flush_timeout(struct ssd_device *dev, int mode); | |
6774 | ||
6775 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)) | |
6776 | static void ssd_bm_worker(void *data) | |
6777 | { | |
6778 | struct ssd_device *dev = (struct ssd_device *)data; | |
6779 | #else | |
6780 | static void ssd_bm_worker(struct work_struct *work) | |
6781 | { | |
6782 | struct ssd_device *dev = container_of(work, struct ssd_device, bm_work); | |
6783 | #endif | |
6784 | ||
6785 | uint16_t opstatus; | |
6786 | int ret = 0; | |
6787 | ||
6788 | if (mode != SSD_DRV_MODE_STANDARD) { | |
6789 | return; | |
6790 | } | |
6791 | ||
6792 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_1_1) { | |
6793 | return; | |
6794 | } | |
6795 | ||
6796 | if (dev->hw_info_ext.plp_type != SSD_PLP_SCAP) { | |
6797 | return; | |
6798 | } | |
6799 | ||
6800 | ret = ssd_bm_get_opstatus(dev, &opstatus); | |
6801 | if (ret) { | |
6802 | hio_warn("%s: get bm operationstatus failed\n", dev->name); | |
6803 | return; | |
6804 | } | |
6805 | ||
6806 | /* need cap learning ? */ | |
6807 | if (!(opstatus & 0xF0)) { | |
6808 | ret = ssd_bm_enter_cap_learning(dev); | |
6809 | if (ret) { | |
6810 | hio_warn("%s: enter capacitance learning failed\n", dev->name); | |
6811 | return; | |
6812 | } | |
6813 | } | |
6814 | } | |
6815 | ||
7e9f9829 | 6816 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(4,15,0)) |
361ebed5 | 6817 | static void ssd_bm_routine_start(void *data) |
7e9f9829 SF |
6818 | #else |
6819 | static void ssd_bm_routine_start(struct timer_list *t) | |
6820 | #endif | |
361ebed5 HSDT |
6821 | { |
6822 | struct ssd_device *dev; | |
6823 | ||
7e9f9829 | 6824 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(4,15,0)) |
361ebed5 HSDT |
6825 | if (!data) { |
6826 | return; | |
6827 | } | |
6828 | dev = data; | |
7e9f9829 SF |
6829 | #else |
6830 | dev = from_timer(dev, t, bm_timer); | |
6831 | #endif | |
361ebed5 HSDT |
6832 | |
6833 | if (test_bit(SSD_INIT_WORKQ, &dev->state)) { | |
6834 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_2) { | |
6835 | queue_work(dev->workq, &dev->bm_work); | |
6836 | } else { | |
6837 | queue_work(dev->workq, &dev->capmon_work); | |
6838 | } | |
6839 | } | |
6840 | } | |
6841 | ||
6842 | /* CAP */ | |
6843 | static int ssd_do_cap_learn(struct ssd_device *dev, uint32_t *cap) | |
6844 | { | |
6845 | uint32_t u1, u2, t; | |
6846 | uint16_t val = 0; | |
6847 | int wait = 0; | |
6848 | int ret = 0; | |
6849 | ||
6850 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_2) { | |
6851 | *cap = 0; | |
6852 | return 0; | |
6853 | } | |
6854 | ||
6855 | if (dev->hw_info_ext.form_factor == SSD_FORM_FACTOR_FHHL && dev->hw_info.pcb_ver < 'B') { | |
6856 | *cap = 0; | |
6857 | return 0; | |
6858 | } | |
6859 | ||
6860 | /* make sure the lm80 voltage value is updated */ | |
6861 | msleep(SSD_LM80_CONV_INTERVAL); | |
6862 | ||
6863 | /* check if full charged */ | |
6864 | wait = 0; | |
6865 | for (;;) { | |
6866 | ret = ssd_smbus_read_word(dev, SSD_SENSOR_LM80_SADDRESS, SSD_PL_CAP_U1, (uint8_t *)&val); | |
6867 | if (ret) { | |
6868 | if (!test_and_set_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM80), &dev->hwmon)) { | |
da3355df | 6869 | ssd_generate_sensor_fault_log(dev, SSD_LOG_SENSOR_FAULT, SSD_SENSOR_LM80_SADDRESS,ret); |
361ebed5 HSDT |
6870 | } |
6871 | goto out; | |
6872 | } | |
6873 | u1 = SSD_LM80_CONVERT_VOLT(u16_swap(val)); | |
6874 | if (SSD_PL_CAP_VOLT(u1) >= SSD_PL_CAP_VOLT_FULL) { | |
6875 | break; | |
6876 | } | |
6877 | ||
6878 | wait++; | |
6879 | if (wait > SSD_PL_CAP_CHARGE_MAX_WAIT) { | |
6880 | ret = -ETIMEDOUT; | |
6881 | goto out; | |
6882 | } | |
6883 | msleep(SSD_PL_CAP_CHARGE_WAIT); | |
6884 | } | |
6885 | ||
6886 | ret = ssd_smbus_read_word(dev, SSD_SENSOR_LM80_SADDRESS, SSD_PL_CAP_U2, (uint8_t *)&val); | |
6887 | if (ret) { | |
6888 | if (!test_and_set_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM80), &dev->hwmon)) { | |
da3355df | 6889 | ssd_generate_sensor_fault_log(dev, SSD_LOG_SENSOR_FAULT, SSD_SENSOR_LM80_SADDRESS,ret); |
361ebed5 HSDT |
6890 | } |
6891 | goto out; | |
6892 | } | |
6893 | u2 = SSD_LM80_CONVERT_VOLT(u16_swap(val)); | |
6894 | ||
6895 | if (u1 == u2) { | |
6896 | ret = -EINVAL; | |
6897 | goto out; | |
6898 | } | |
6899 | ||
6900 | /* enter cap learn */ | |
6901 | ssd_reg32_write(dev->ctrlp + SSD_PL_CAP_LEARN_REG, 0x1); | |
6902 | ||
6903 | wait = 0; | |
6904 | for (;;) { | |
6905 | msleep(SSD_PL_CAP_LEARN_WAIT); | |
6906 | ||
6907 | t = ssd_reg32_read(dev->ctrlp + SSD_PL_CAP_LEARN_REG); | |
6908 | if (!((t >> 1) & 0x1)) { | |
6909 | break; | |
6910 | } | |
6911 | ||
6912 | wait++; | |
6913 | if (wait > SSD_PL_CAP_LEARN_MAX_WAIT) { | |
6914 | ret = -ETIMEDOUT; | |
6915 | goto out; | |
6916 | } | |
6917 | } | |
6918 | ||
6919 | if ((t >> 4) & 0x1) { | |
6920 | ret = -ETIMEDOUT; | |
6921 | goto out; | |
6922 | } | |
6923 | ||
6924 | t = (t >> 8); | |
6925 | if (0 == t) { | |
6926 | ret = -EINVAL; | |
6927 | goto out; | |
6928 | } | |
6929 | ||
6930 | *cap = SSD_PL_CAP_LEARN(u1, u2, t); | |
6931 | ||
6932 | out: | |
6933 | return ret; | |
6934 | } | |
6935 | ||
6936 | static int ssd_cap_learn(struct ssd_device *dev, uint32_t *cap) | |
6937 | { | |
6938 | int ret = 0; | |
6939 | ||
6940 | if (!dev || !cap) { | |
6941 | return -EINVAL; | |
6942 | } | |
6943 | ||
6944 | mutex_lock(&dev->bm_mutex); | |
6945 | ||
6946 | ssd_stop_workq(dev); | |
6947 | ||
6948 | ret = ssd_do_cap_learn(dev, cap); | |
6949 | if (ret) { | |
6950 | ssd_gen_swlog(dev, SSD_LOG_CAP_LEARN_FAULT, 0); | |
6951 | goto out; | |
6952 | } | |
6953 | ||
6954 | ssd_gen_swlog(dev, SSD_LOG_CAP_STATUS, *cap); | |
6955 | ||
6956 | out: | |
6957 | ssd_start_workq(dev); | |
6958 | mutex_unlock(&dev->bm_mutex); | |
6959 | ||
6960 | return ret; | |
6961 | } | |
6962 | ||
6963 | static int ssd_check_pl_cap(struct ssd_device *dev) | |
6964 | { | |
6965 | uint32_t u1; | |
6966 | uint16_t val = 0; | |
6967 | uint8_t low = 0; | |
6968 | int wait = 0; | |
6969 | int ret = 0; | |
6970 | ||
6971 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_2) { | |
6972 | return 0; | |
6973 | } | |
6974 | ||
6975 | if (dev->hw_info_ext.form_factor == SSD_FORM_FACTOR_FHHL && dev->hw_info.pcb_ver < 'B') { | |
6976 | return 0; | |
6977 | } | |
6978 | ||
6979 | /* cap ready ? */ | |
6980 | wait = 0; | |
6981 | for (;;) { | |
6982 | ret = ssd_smbus_read_word(dev, SSD_SENSOR_LM80_SADDRESS, SSD_PL_CAP_U1, (uint8_t *)&val); | |
6983 | if (ret) { | |
6984 | if (!test_and_set_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM80), &dev->hwmon)) { | |
da3355df | 6985 | ssd_generate_sensor_fault_log(dev, SSD_LOG_SENSOR_FAULT, SSD_SENSOR_LM80_SADDRESS,ret); |
361ebed5 HSDT |
6986 | } |
6987 | goto out; | |
6988 | } | |
6989 | u1 = SSD_LM80_CONVERT_VOLT(u16_swap(val)); | |
6990 | if (SSD_PL_CAP_VOLT(u1) >= SSD_PL_CAP_VOLT_READY) { | |
6991 | break; | |
6992 | } | |
6993 | ||
6994 | wait++; | |
6995 | if (wait > SSD_PL_CAP_CHARGE_MAX_WAIT) { | |
6996 | ret = -ETIMEDOUT; | |
6997 | ssd_gen_swlog(dev, SSD_LOG_CAP_VOLT_FAULT, SSD_PL_CAP_VOLT(u1)); | |
6998 | goto out; | |
6999 | } | |
7000 | msleep(SSD_PL_CAP_CHARGE_WAIT); | |
7001 | } | |
7002 | ||
7003 | low = ssd_lm80_limit[SSD_LM80_IN_CAP].low; | |
7004 | ret = ssd_smbus_write_byte(dev, SSD_SENSOR_LM80_SADDRESS, SSD_LM80_REG_IN_MIN(SSD_LM80_IN_CAP), &low); | |
7005 | if (ret) { | |
7006 | goto out; | |
7007 | } | |
7008 | ||
7009 | /* enable cap INx */ | |
7010 | ret = ssd_lm80_enable_in(dev, SSD_SENSOR_LM80_SADDRESS, SSD_LM80_IN_CAP); | |
7011 | if (ret) { | |
7012 | if (!test_and_set_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM80), &dev->hwmon)) { | |
da3355df | 7013 | ssd_generate_sensor_fault_log(dev, SSD_LOG_SENSOR_FAULT, SSD_SENSOR_LM80_SADDRESS,ret); |
361ebed5 HSDT |
7014 | } |
7015 | goto out; | |
7016 | } | |
7017 | ||
7018 | out: | |
7019 | /* skip error if not in standard mode */ | |
7020 | if (mode != SSD_DRV_MODE_STANDARD) { | |
7021 | ret = 0; | |
7022 | } | |
7023 | return ret; | |
7024 | } | |
7025 | ||
7026 | static int ssd_check_pl_cap_fast(struct ssd_device *dev) | |
7027 | { | |
7028 | uint32_t u1; | |
7029 | uint16_t val = 0; | |
7030 | int ret = 0; | |
7031 | ||
7032 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_2) { | |
7033 | return 0; | |
7034 | } | |
7035 | ||
7036 | if (dev->hw_info_ext.form_factor == SSD_FORM_FACTOR_FHHL && dev->hw_info.pcb_ver < 'B') { | |
7037 | return 0; | |
7038 | } | |
7039 | ||
7040 | /* cap ready ? */ | |
7041 | ret = ssd_smbus_read_word(dev, SSD_SENSOR_LM80_SADDRESS, SSD_PL_CAP_U1, (uint8_t *)&val); | |
7042 | if (ret) { | |
7043 | goto out; | |
7044 | } | |
7045 | u1 = SSD_LM80_CONVERT_VOLT(u16_swap(val)); | |
7046 | if (SSD_PL_CAP_VOLT(u1) < SSD_PL_CAP_VOLT_READY) { | |
7047 | ret = 1; | |
7048 | } | |
7049 | ||
7050 | out: | |
7051 | return ret; | |
7052 | } | |
7053 | ||
7054 | static int ssd_init_pl_cap(struct ssd_device *dev) | |
7055 | { | |
7056 | int ret = 0; | |
7057 | ||
7058 | /* set here: user write mode */ | |
7059 | dev->user_wmode = wmode; | |
7060 | ||
7061 | mutex_init(&dev->bm_mutex); | |
7062 | ||
7063 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_2) { | |
7064 | uint32_t val; | |
7065 | val = ssd_reg32_read(dev->ctrlp + SSD_BM_FAULT_REG); | |
7066 | if ((val >> 1) & 0x1) { | |
7067 | (void)test_and_set_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP), &dev->hwmon); | |
7068 | } | |
7069 | } else { | |
7070 | ret = ssd_check_pl_cap(dev); | |
7071 | if (ret) { | |
7072 | (void)test_and_set_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP), &dev->hwmon); | |
7073 | } | |
7074 | } | |
7075 | ||
7076 | return 0; | |
7077 | } | |
7078 | ||
7079 | /* label */ | |
7080 | static void __end_str(char *str, int len) | |
7081 | { | |
7082 | int i; | |
7083 | ||
7084 | for(i=0; i<len; i++) { | |
7085 | if (*(str+i) == '\0') | |
7086 | return; | |
7087 | } | |
7088 | *str = '\0'; | |
7089 | } | |
7090 | ||
7091 | static int ssd_init_label(struct ssd_device *dev) | |
7092 | { | |
7093 | uint32_t off; | |
7094 | uint32_t size; | |
7095 | int ret; | |
7096 | ||
7097 | /* label location */ | |
7098 | off = dev->rom_info.label_base; | |
7099 | ||
7100 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_2) { | |
7101 | size = sizeof(struct ssd_label); | |
7102 | ||
7103 | /* read label */ | |
7104 | ret = ssd_spi_read(dev, &dev->label, off, size); | |
7105 | if (ret) { | |
7106 | memset(&dev->label, 0, size); | |
7107 | goto out; | |
7108 | } | |
7109 | ||
7110 | __end_str(dev->label.date, SSD_LABEL_FIELD_SZ); | |
7111 | __end_str(dev->label.sn, SSD_LABEL_FIELD_SZ); | |
7112 | __end_str(dev->label.part, SSD_LABEL_FIELD_SZ); | |
7113 | __end_str(dev->label.desc, SSD_LABEL_FIELD_SZ); | |
7114 | __end_str(dev->label.other, SSD_LABEL_FIELD_SZ); | |
7115 | __end_str(dev->label.maf, SSD_LABEL_FIELD_SZ); | |
7116 | } else { | |
7117 | size = sizeof(struct ssd_labelv3); | |
7118 | ||
7119 | /* read label */ | |
7120 | ret = ssd_spi_read(dev, &dev->labelv3, off, size); | |
7121 | if (ret) { | |
7122 | memset(&dev->labelv3, 0, size); | |
7123 | goto out; | |
7124 | } | |
7125 | ||
7126 | __end_str(dev->labelv3.boardtype, SSD_LABEL_FIELD_SZ); | |
7127 | __end_str(dev->labelv3.barcode, SSD_LABEL_FIELD_SZ); | |
7128 | __end_str(dev->labelv3.item, SSD_LABEL_FIELD_SZ); | |
7129 | __end_str(dev->labelv3.description, SSD_LABEL_DESC_SZ); | |
7130 | __end_str(dev->labelv3.manufactured, SSD_LABEL_FIELD_SZ); | |
7131 | __end_str(dev->labelv3.vendorname, SSD_LABEL_FIELD_SZ); | |
7132 | __end_str(dev->labelv3.issuenumber, SSD_LABEL_FIELD_SZ); | |
7133 | __end_str(dev->labelv3.cleicode, SSD_LABEL_FIELD_SZ); | |
7134 | __end_str(dev->labelv3.bom, SSD_LABEL_FIELD_SZ); | |
7135 | } | |
7136 | ||
7137 | out: | |
7138 | /* skip error if not in standard mode */ | |
7139 | if (mode != SSD_DRV_MODE_STANDARD) { | |
7140 | ret = 0; | |
7141 | } | |
7142 | return ret; | |
7143 | } | |
7144 | ||
7145 | int ssd_get_label(struct block_device *bdev, struct ssd_label *label) | |
7146 | { | |
7147 | struct ssd_device *dev; | |
7148 | ||
7149 | if (!bdev || !label || !(bdev->bd_disk)) { | |
7150 | return -EINVAL; | |
7151 | } | |
7152 | ||
7153 | dev = bdev->bd_disk->private_data; | |
7154 | ||
7155 | if (dev->protocol_info.ver >= SSD_PROTOCOL_V3_2) { | |
7156 | memset(label, 0, sizeof(struct ssd_label)); | |
7157 | memcpy(label->date, dev->labelv3.manufactured, SSD_LABEL_FIELD_SZ); | |
7158 | memcpy(label->sn, dev->labelv3.barcode, SSD_LABEL_FIELD_SZ); | |
7159 | memcpy(label->desc, dev->labelv3.boardtype, SSD_LABEL_FIELD_SZ); | |
7160 | memcpy(label->maf, dev->labelv3.vendorname, SSD_LABEL_FIELD_SZ); | |
7161 | } else { | |
7162 | memcpy(label, &dev->label, sizeof(struct ssd_label)); | |
7163 | } | |
7164 | ||
7165 | return 0; | |
7166 | } | |
7167 | ||
7168 | static int __ssd_get_version(struct ssd_device *dev, struct ssd_version_info *ver) | |
7169 | { | |
7170 | uint16_t bm_ver = 0; | |
7171 | int ret = 0; | |
7172 | ||
7173 | if (dev->protocol_info.ver > SSD_PROTOCOL_V3 && dev->protocol_info.ver < SSD_PROTOCOL_V3_2) { | |
7174 | ret = ssd_bm_get_version(dev, &bm_ver); | |
7175 | if(ret){ | |
7176 | goto out; | |
7177 | } | |
7178 | } | |
7179 | ||
7180 | ver->bridge_ver = dev->hw_info.bridge_ver; | |
7181 | ver->ctrl_ver = dev->hw_info.ctrl_ver; | |
7182 | ver->bm_ver = bm_ver; | |
7183 | ver->pcb_ver = dev->hw_info.pcb_ver; | |
7184 | ver->upper_pcb_ver = dev->hw_info.upper_pcb_ver; | |
7185 | ||
7186 | out: | |
7187 | return ret; | |
7188 | ||
7189 | } | |
7190 | ||
7191 | int ssd_get_version(struct block_device *bdev, struct ssd_version_info *ver) | |
7192 | { | |
7193 | struct ssd_device *dev; | |
7194 | int ret; | |
7195 | ||
7196 | if (!bdev || !ver || !(bdev->bd_disk)) { | |
7197 | return -EINVAL; | |
7198 | } | |
7199 | ||
7200 | dev = bdev->bd_disk->private_data; | |
7201 | ||
7202 | mutex_lock(&dev->fw_mutex); | |
7203 | ret = __ssd_get_version(dev, ver); | |
7204 | mutex_unlock(&dev->fw_mutex); | |
7205 | ||
7206 | return ret; | |
7207 | } | |
7208 | ||
7209 | static int __ssd_get_temperature(struct ssd_device *dev, int *temp) | |
7210 | { | |
7211 | uint64_t val; | |
7212 | uint32_t off; | |
7213 | int max = -300; | |
7214 | int cur; | |
7215 | int i; | |
7216 | ||
7217 | if (dev->protocol_info.ver <= SSD_PROTOCOL_V3) { | |
7218 | *temp = 0; | |
7219 | return 0; | |
7220 | } | |
7221 | ||
7222 | if (finject) { | |
7223 | if (dev->db_info.type == SSD_DEBUG_LOG && | |
7224 | (dev->db_info.data.log.event == SSD_LOG_OVER_TEMP || | |
7225 | dev->db_info.data.log.event == SSD_LOG_NORMAL_TEMP || | |
7226 | dev->db_info.data.log.event == SSD_LOG_WARN_TEMP)) { | |
7227 | *temp = (int)dev->db_info.data.log.extra; | |
7228 | return 0; | |
7229 | } | |
7230 | } | |
7231 | ||
7232 | for (i=0; i<dev->hw_info.nr_ctrl; i++) { | |
7233 | off = SSD_CTRL_TEMP_REG0 + i * sizeof(uint64_t); | |
7234 | ||
7235 | val = ssd_reg_read(dev->ctrlp + off); | |
7236 | if (val == 0xffffffffffffffffull) { | |
7237 | continue; | |
7238 | } | |
7239 | ||
7240 | cur = (int)CUR_TEMP(val); | |
7241 | if (cur >= max) { | |
7242 | max = cur; | |
7243 | } | |
7244 | } | |
7245 | ||
7246 | *temp = max; | |
7247 | ||
7248 | return 0; | |
7249 | } | |
7250 | ||
7251 | int ssd_get_temperature(struct block_device *bdev, int *temp) | |
7252 | { | |
7253 | struct ssd_device *dev; | |
7254 | int ret; | |
7255 | ||
7256 | if (!bdev || !temp || !(bdev->bd_disk)) { | |
7257 | return -EINVAL; | |
7258 | } | |
7259 | ||
7260 | dev = bdev->bd_disk->private_data; | |
7261 | ||
7262 | ||
7263 | mutex_lock(&dev->fw_mutex); | |
7264 | ret = __ssd_get_temperature(dev, temp); | |
7265 | mutex_unlock(&dev->fw_mutex); | |
7266 | ||
7267 | return ret; | |
7268 | } | |
7269 | ||
7270 | int ssd_set_otprotect(struct block_device *bdev, int otprotect) | |
7271 | { | |
7272 | struct ssd_device *dev; | |
7273 | ||
7274 | if (!bdev || !(bdev->bd_disk)) { | |
7275 | return -EINVAL; | |
7276 | } | |
7277 | ||
7278 | dev = bdev->bd_disk->private_data; | |
7279 | ssd_set_ot_protect(dev, !!otprotect); | |
7280 | ||
7281 | return 0; | |
7282 | } | |
7283 | ||
7284 | int ssd_bm_status(struct block_device *bdev, int *status) | |
7285 | { | |
7286 | struct ssd_device *dev; | |
7287 | int ret = 0; | |
7288 | ||
7289 | if (!bdev || !status || !(bdev->bd_disk)) { | |
7290 | return -EINVAL; | |
7291 | } | |
7292 | ||
7293 | dev = bdev->bd_disk->private_data; | |
7294 | ||
7295 | mutex_lock(&dev->fw_mutex); | |
7296 | if (dev->protocol_info.ver >= SSD_PROTOCOL_V3_2) { | |
7297 | if (test_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP), &dev->hwmon)) { | |
7298 | *status = SSD_BMSTATUS_WARNING; | |
7299 | } else { | |
7300 | *status = SSD_BMSTATUS_OK; | |
7301 | } | |
7302 | } else if(dev->protocol_info.ver > SSD_PROTOCOL_V3) { | |
7303 | ret = __ssd_bm_status(dev, status); | |
7304 | } else { | |
7305 | *status = SSD_BMSTATUS_OK; | |
7306 | } | |
7307 | mutex_unlock(&dev->fw_mutex); | |
7308 | ||
7309 | return ret; | |
7310 | } | |
7311 | ||
7312 | int ssd_get_pciaddr(struct block_device *bdev, struct pci_addr *paddr) | |
7313 | { | |
7314 | struct ssd_device *dev; | |
7315 | ||
7316 | if (!bdev || !paddr || !bdev->bd_disk) { | |
7317 | return -EINVAL; | |
7318 | } | |
7319 | ||
7320 | dev = bdev->bd_disk->private_data; | |
7321 | ||
7322 | paddr->domain = pci_domain_nr(dev->pdev->bus); | |
7323 | paddr->bus = dev->pdev->bus->number; | |
7324 | paddr->slot = PCI_SLOT(dev->pdev->devfn); | |
7325 | paddr->func= PCI_FUNC(dev->pdev->devfn); | |
7326 | ||
7327 | return 0; | |
7328 | } | |
7329 | ||
7330 | /* acc */ | |
7331 | static int ssd_bb_acc(struct ssd_device *dev, struct ssd_acc_info *acc) | |
7332 | { | |
7333 | uint32_t val; | |
7334 | int ctrl, chip; | |
7335 | ||
7336 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_1_1) { | |
7337 | return -EOPNOTSUPP; | |
7338 | } | |
7339 | ||
7340 | acc->threshold_l1 = ssd_reg32_read(dev->ctrlp + SSD_BB_THRESHOLD_L1_REG); | |
7341 | if (0xffffffffull == acc->threshold_l1) { | |
7342 | return -EIO; | |
7343 | } | |
7344 | acc->threshold_l2 = ssd_reg32_read(dev->ctrlp + SSD_BB_THRESHOLD_L2_REG); | |
7345 | if (0xffffffffull == acc->threshold_l2) { | |
7346 | return -EIO; | |
7347 | } | |
7348 | acc->val = 0; | |
7349 | ||
7350 | for (ctrl=0; ctrl<dev->hw_info.nr_ctrl; ctrl++) { | |
7351 | for (chip=0; chip<dev->hw_info.nr_chip; chip++) { | |
7352 | val = ssd_reg32_read(dev->ctrlp + SSD_BB_ACC_REG0 + (SSD_CTRL_REG_ZONE_SZ * ctrl) + (SSD_BB_ACC_REG_SZ * chip)); | |
7353 | if (0xffffffffull == acc->val) { | |
7354 | return -EIO; | |
7355 | } | |
7356 | if (val > acc->val) { | |
7357 | acc->val = val; | |
7358 | } | |
7359 | } | |
7360 | } | |
7361 | ||
7362 | return 0; | |
7363 | } | |
7364 | ||
7365 | static int ssd_ec_acc(struct ssd_device *dev, struct ssd_acc_info *acc) | |
7366 | { | |
7367 | uint32_t val; | |
7368 | int ctrl, chip; | |
7369 | ||
7370 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_1_1) { | |
7371 | return -EOPNOTSUPP; | |
7372 | } | |
7373 | ||
7374 | acc->threshold_l1 = ssd_reg32_read(dev->ctrlp + SSD_EC_THRESHOLD_L1_REG); | |
7375 | if (0xffffffffull == acc->threshold_l1) { | |
7376 | return -EIO; | |
7377 | } | |
7378 | acc->threshold_l2 = ssd_reg32_read(dev->ctrlp + SSD_EC_THRESHOLD_L2_REG); | |
7379 | if (0xffffffffull == acc->threshold_l2) { | |
7380 | return -EIO; | |
7381 | } | |
7382 | acc->val = 0; | |
7383 | ||
7384 | for (ctrl=0; ctrl<dev->hw_info.nr_ctrl; ctrl++) { | |
7385 | for (chip=0; chip<dev->hw_info.nr_chip; chip++) { | |
7386 | val = ssd_reg32_read(dev->ctrlp + SSD_EC_ACC_REG0 + (SSD_CTRL_REG_ZONE_SZ * ctrl) + (SSD_EC_ACC_REG_SZ * chip)); | |
7387 | if (0xffffffffull == acc->val) { | |
7388 | return -EIO; | |
7389 | } | |
7390 | ||
7391 | if (val > acc->val) { | |
7392 | acc->val = val; | |
7393 | } | |
7394 | } | |
7395 | } | |
7396 | ||
7397 | return 0; | |
7398 | } | |
7399 | ||
7400 | ||
7401 | /* ram r&w */ | |
7402 | static int ssd_ram_read_4k(struct ssd_device *dev, void *buf, size_t length, loff_t ofs, int ctrl_idx) | |
7403 | { | |
7404 | struct ssd_ram_op_msg *msg; | |
7405 | dma_addr_t buf_dma; | |
7406 | size_t len = length; | |
7407 | loff_t ofs_w = ofs; | |
7408 | int ret = 0; | |
7409 | ||
7410 | if (ctrl_idx >= dev->hw_info.nr_ctrl || (uint64_t)(ofs + length) > dev->hw_info.ram_size | |
7411 | || !length || length > dev->hw_info.ram_max_len | |
7412 | || (length & (dev->hw_info.ram_align - 1)) != 0 || ((uint64_t)ofs & (dev->hw_info.ram_align - 1)) != 0) { | |
7413 | return -EINVAL; | |
7414 | } | |
7415 | ||
7416 | len /= dev->hw_info.ram_align; | |
7417 | do_div(ofs_w, dev->hw_info.ram_align); | |
7418 | ||
7419 | buf_dma = pci_map_single(dev->pdev, buf, length, PCI_DMA_FROMDEVICE); | |
7420 | #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,26)) | |
7421 | ret = dma_mapping_error(buf_dma); | |
7422 | #else | |
7423 | ret = dma_mapping_error(&(dev->pdev->dev), buf_dma); | |
7424 | #endif | |
7425 | if (ret) { | |
7426 | hio_warn("%s: unable to map read DMA buffer\n", dev->name); | |
7427 | goto out_dma_mapping; | |
7428 | } | |
7429 | ||
7430 | msg = (struct ssd_ram_op_msg *)ssd_get_dmsg(dev); | |
7431 | ||
7432 | msg->fun = SSD_FUNC_RAM_READ; | |
7433 | msg->ctrl_idx = ctrl_idx; | |
7434 | msg->start = (uint32_t)ofs_w; | |
7435 | msg->length = len; | |
7436 | msg->buf = buf_dma; | |
7437 | ||
7438 | ret = ssd_do_request(dev, READ, msg, NULL); | |
7439 | ssd_put_dmsg(msg); | |
7440 | ||
7441 | pci_unmap_single(dev->pdev, buf_dma, length, PCI_DMA_FROMDEVICE); | |
7442 | ||
7443 | out_dma_mapping: | |
7444 | return ret; | |
7445 | } | |
7446 | ||
7447 | static int ssd_ram_write_4k(struct ssd_device *dev, void *buf, size_t length, loff_t ofs, int ctrl_idx) | |
7448 | { | |
7449 | struct ssd_ram_op_msg *msg; | |
7450 | dma_addr_t buf_dma; | |
7451 | size_t len = length; | |
7452 | loff_t ofs_w = ofs; | |
7453 | int ret = 0; | |
7454 | ||
7455 | if (ctrl_idx >= dev->hw_info.nr_ctrl || (uint64_t)(ofs + length) > dev->hw_info.ram_size | |
7456 | || !length || length > dev->hw_info.ram_max_len | |
7457 | || (length & (dev->hw_info.ram_align - 1)) != 0 || ((uint64_t)ofs & (dev->hw_info.ram_align - 1)) != 0) { | |
7458 | return -EINVAL; | |
7459 | } | |
7460 | ||
7461 | len /= dev->hw_info.ram_align; | |
7462 | do_div(ofs_w, dev->hw_info.ram_align); | |
7463 | ||
7464 | buf_dma = pci_map_single(dev->pdev, buf, length, PCI_DMA_TODEVICE); | |
7465 | #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,26)) | |
7466 | ret = dma_mapping_error(buf_dma); | |
7467 | #else | |
7468 | ret = dma_mapping_error(&(dev->pdev->dev), buf_dma); | |
7469 | #endif | |
7470 | if (ret) { | |
7471 | hio_warn("%s: unable to map write DMA buffer\n", dev->name); | |
7472 | goto out_dma_mapping; | |
7473 | } | |
7474 | ||
7475 | msg = (struct ssd_ram_op_msg *)ssd_get_dmsg(dev); | |
7476 | ||
7477 | msg->fun = SSD_FUNC_RAM_WRITE; | |
7478 | msg->ctrl_idx = ctrl_idx; | |
7479 | msg->start = (uint32_t)ofs_w; | |
7480 | msg->length = len; | |
7481 | msg->buf = buf_dma; | |
7482 | ||
7483 | ret = ssd_do_request(dev, WRITE, msg, NULL); | |
7484 | ssd_put_dmsg(msg); | |
7485 | ||
7486 | pci_unmap_single(dev->pdev, buf_dma, length, PCI_DMA_TODEVICE); | |
7487 | ||
7488 | out_dma_mapping: | |
7489 | return ret; | |
7490 | ||
7491 | } | |
7492 | ||
7493 | static int ssd_ram_read(struct ssd_device *dev, void *buf, size_t length, loff_t ofs, int ctrl_idx) | |
7494 | { | |
7495 | int left = length; | |
7496 | size_t len; | |
7497 | loff_t off = ofs; | |
7498 | int ret = 0; | |
7499 | ||
7500 | if (ctrl_idx >= dev->hw_info.nr_ctrl || (uint64_t)(ofs + length) > dev->hw_info.ram_size || !length | |
7501 | || (length & (dev->hw_info.ram_align - 1)) != 0 || ((uint64_t)ofs & (dev->hw_info.ram_align - 1)) != 0) { | |
7502 | return -EINVAL; | |
7503 | } | |
7504 | ||
7505 | while (left > 0) { | |
7506 | len = dev->hw_info.ram_max_len; | |
7507 | if (left < (int)dev->hw_info.ram_max_len) { | |
7508 | len = left; | |
7509 | } | |
7510 | ||
7511 | ret = ssd_ram_read_4k(dev, buf, len, off, ctrl_idx); | |
7512 | if (ret) { | |
7513 | break; | |
7514 | } | |
7515 | ||
7516 | left -= len; | |
7517 | off += len; | |
7518 | buf += len; | |
7519 | } | |
7520 | ||
7521 | return ret; | |
7522 | } | |
7523 | ||
7524 | static int ssd_ram_write(struct ssd_device *dev, void *buf, size_t length, loff_t ofs, int ctrl_idx) | |
7525 | { | |
7526 | int left = length; | |
7527 | size_t len; | |
7528 | loff_t off = ofs; | |
7529 | int ret = 0; | |
7530 | ||
7531 | if (ctrl_idx >= dev->hw_info.nr_ctrl || (uint64_t)(ofs + length) > dev->hw_info.ram_size || !length | |
7532 | || (length & (dev->hw_info.ram_align - 1)) != 0 || ((uint64_t)ofs & (dev->hw_info.ram_align - 1)) != 0) { | |
7533 | return -EINVAL; | |
7534 | } | |
7535 | ||
7536 | while (left > 0) { | |
7537 | len = dev->hw_info.ram_max_len; | |
7538 | if (left < (int)dev->hw_info.ram_max_len) { | |
7539 | len = left; | |
7540 | } | |
7541 | ||
7542 | ret = ssd_ram_write_4k(dev, buf, len, off, ctrl_idx); | |
7543 | if (ret) { | |
7544 | break; | |
7545 | } | |
7546 | ||
7547 | left -= len; | |
7548 | off += len; | |
7549 | buf += len; | |
7550 | } | |
7551 | ||
7552 | return ret; | |
7553 | } | |
7554 | ||
7555 | ||
7556 | /* flash op */ | |
7557 | static int ssd_check_flash(struct ssd_device *dev, int flash, int page, int ctrl_idx) | |
7558 | { | |
7559 | int cur_ch = flash % dev->hw_info.max_ch; | |
7560 | int cur_chip = flash /dev->hw_info.max_ch; | |
7561 | ||
7562 | if (ctrl_idx >= dev->hw_info.nr_ctrl) { | |
7563 | return -EINVAL; | |
7564 | } | |
7565 | ||
7566 | if (cur_ch >= dev->hw_info.nr_ch || cur_chip >= dev->hw_info.nr_chip) { | |
7567 | return -EINVAL; | |
7568 | } | |
7569 | ||
7570 | if (page >= (int)(dev->hw_info.block_count * dev->hw_info.page_count)) { | |
7571 | return -EINVAL; | |
7572 | } | |
7573 | return 0; | |
7574 | } | |
7575 | ||
7576 | static int ssd_nand_read_id(struct ssd_device *dev, void *id, int flash, int chip, int ctrl_idx) | |
7577 | { | |
7578 | struct ssd_nand_op_msg *msg; | |
7579 | dma_addr_t buf_dma; | |
7580 | int ret = 0; | |
7581 | ||
7582 | if (unlikely(!id)) | |
7583 | return -EINVAL; | |
7584 | ||
7585 | buf_dma = pci_map_single(dev->pdev, id, SSD_NAND_ID_BUFF_SZ, PCI_DMA_FROMDEVICE); | |
7586 | #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,26)) | |
7587 | ret = dma_mapping_error(buf_dma); | |
7588 | #else | |
7589 | ret = dma_mapping_error(&(dev->pdev->dev), buf_dma); | |
7590 | #endif | |
7591 | if (ret) { | |
7592 | hio_warn("%s: unable to map read DMA buffer\n", dev->name); | |
7593 | goto out_dma_mapping; | |
7594 | } | |
7595 | ||
7596 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3) { | |
7597 | flash = ((uint32_t)flash << 1) | (uint32_t)chip; | |
7598 | chip = 0; | |
7599 | } | |
7600 | ||
7601 | msg = (struct ssd_nand_op_msg *)ssd_get_dmsg(dev); | |
7602 | ||
7603 | msg->fun = SSD_FUNC_NAND_READ_ID; | |
7604 | msg->chip_no = flash; | |
7605 | msg->chip_ce = chip; | |
7606 | msg->ctrl_idx = ctrl_idx; | |
7607 | msg->buf = buf_dma; | |
7608 | ||
7609 | ret = ssd_do_request(dev, READ, msg, NULL); | |
7610 | ssd_put_dmsg(msg); | |
7611 | ||
7612 | pci_unmap_single(dev->pdev, buf_dma, SSD_NAND_ID_BUFF_SZ, PCI_DMA_FROMDEVICE); | |
7613 | ||
7614 | out_dma_mapping: | |
7615 | return ret; | |
7616 | } | |
7617 | ||
7618 | #if 0 | |
7619 | static int ssd_nand_read(struct ssd_device *dev, void *buf, | |
7620 | int flash, int chip, int page, int page_count, int ctrl_idx) | |
7621 | { | |
7622 | struct ssd_nand_op_msg *msg; | |
7623 | dma_addr_t buf_dma; | |
7624 | int length; | |
7625 | int ret = 0; | |
7626 | ||
7627 | if (!buf) { | |
7628 | return -EINVAL; | |
7629 | } | |
7630 | ||
7631 | if ((page + page_count) > dev->hw_info.block_count*dev->hw_info.page_count) { | |
7632 | return -EINVAL; | |
7633 | } | |
7634 | ||
7635 | ret = ssd_check_flash(dev, flash, page, ctrl_idx); | |
7636 | if (ret) { | |
7637 | return ret; | |
7638 | } | |
7639 | ||
7640 | length = page_count * dev->hw_info.page_size; | |
7641 | ||
7642 | buf_dma = pci_map_single(dev->pdev, buf, length, PCI_DMA_FROMDEVICE); | |
7643 | #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,26)) | |
7644 | ret = dma_mapping_error(buf_dma); | |
7645 | #else | |
7646 | ret = dma_mapping_error(&(dev->pdev->dev), buf_dma); | |
7647 | #endif | |
7648 | if (ret) { | |
7649 | hio_warn("%s: unable to map read DMA buffer\n", dev->name); | |
7650 | goto out_dma_mapping; | |
7651 | } | |
7652 | ||
7653 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3) { | |
7654 | flash = (flash << 1) | chip; | |
7655 | chip = 0; | |
7656 | } | |
7657 | ||
7658 | msg = (struct ssd_nand_op_msg *)ssd_get_dmsg(dev); | |
7659 | ||
7660 | msg->fun = SSD_FUNC_NAND_READ; | |
7661 | msg->ctrl_idx = ctrl_idx; | |
7662 | msg->chip_no = flash; | |
7663 | msg->chip_ce = chip; | |
7664 | msg->page_no = page; | |
7665 | msg->page_count = page_count; | |
7666 | msg->buf = buf_dma; | |
7667 | ||
7668 | ret = ssd_do_request(dev, READ, msg, NULL); | |
7669 | ssd_put_dmsg(msg); | |
7670 | ||
7671 | pci_unmap_single(dev->pdev, buf_dma, length, PCI_DMA_FROMDEVICE); | |
7672 | ||
7673 | out_dma_mapping: | |
7674 | return ret; | |
7675 | } | |
7676 | #endif | |
7677 | ||
7678 | static int ssd_nand_read_w_oob(struct ssd_device *dev, void *buf, | |
7679 | int flash, int chip, int page, int count, int ctrl_idx) | |
7680 | { | |
7681 | struct ssd_nand_op_msg *msg; | |
7682 | dma_addr_t buf_dma; | |
7683 | int length; | |
7684 | int ret = 0; | |
7685 | ||
7686 | if (!buf) { | |
7687 | return -EINVAL; | |
7688 | } | |
7689 | ||
7690 | if ((page + count) > (int)(dev->hw_info.block_count * dev->hw_info.page_count)) { | |
7691 | return -EINVAL; | |
7692 | } | |
7693 | ||
7694 | ret = ssd_check_flash(dev, flash, page, ctrl_idx); | |
7695 | if (ret) { | |
7696 | return ret; | |
7697 | } | |
7698 | ||
7699 | length = count * (dev->hw_info.page_size + dev->hw_info.oob_size); | |
7700 | ||
7701 | buf_dma = pci_map_single(dev->pdev, buf, length, PCI_DMA_FROMDEVICE); | |
7702 | #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,26)) | |
7703 | ret = dma_mapping_error(buf_dma); | |
7704 | #else | |
7705 | ret = dma_mapping_error(&(dev->pdev->dev), buf_dma); | |
7706 | #endif | |
7707 | if (ret) { | |
7708 | hio_warn("%s: unable to map read DMA buffer\n", dev->name); | |
7709 | goto out_dma_mapping; | |
7710 | } | |
7711 | ||
7712 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3) { | |
7713 | flash = ((uint32_t)flash << 1) | (uint32_t)chip; | |
7714 | chip = 0; | |
7715 | } | |
7716 | ||
7717 | msg = (struct ssd_nand_op_msg *)ssd_get_dmsg(dev); | |
7718 | ||
7719 | msg->fun = SSD_FUNC_NAND_READ_WOOB; | |
7720 | msg->ctrl_idx = ctrl_idx; | |
7721 | msg->chip_no = flash; | |
7722 | msg->chip_ce = chip; | |
7723 | msg->page_no = page; | |
7724 | msg->page_count = count; | |
7725 | msg->buf = buf_dma; | |
7726 | ||
7727 | ret = ssd_do_request(dev, READ, msg, NULL); | |
7728 | ssd_put_dmsg(msg); | |
7729 | ||
7730 | pci_unmap_single(dev->pdev, buf_dma, length, PCI_DMA_FROMDEVICE); | |
7731 | ||
7732 | out_dma_mapping: | |
7733 | return ret; | |
7734 | } | |
7735 | ||
7736 | /* write 1 page */ | |
7737 | static int ssd_nand_write(struct ssd_device *dev, void *buf, | |
7738 | int flash, int chip, int page, int count, int ctrl_idx) | |
7739 | { | |
7740 | struct ssd_nand_op_msg *msg; | |
7741 | dma_addr_t buf_dma; | |
7742 | int length; | |
7743 | int ret = 0; | |
7744 | ||
7745 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3) { | |
7746 | return -EINVAL; | |
7747 | } | |
7748 | ||
7749 | if (!buf) { | |
7750 | return -EINVAL; | |
7751 | } | |
7752 | ||
7753 | if (count != 1) { | |
7754 | return -EINVAL; | |
7755 | } | |
7756 | ||
7757 | ret = ssd_check_flash(dev, flash, page, ctrl_idx); | |
7758 | if (ret) { | |
7759 | return ret; | |
7760 | } | |
7761 | ||
7762 | length = count * (dev->hw_info.page_size + dev->hw_info.oob_size); | |
7763 | ||
7764 | /* write data to ram */ | |
7765 | /*ret = ssd_ram_write(dev, buf, length, dev->hw_info.nand_wbuff_base, ctrl_idx); | |
7766 | if (ret) { | |
7767 | return ret; | |
7768 | }*/ | |
7769 | ||
7770 | buf_dma = pci_map_single(dev->pdev, buf, length, PCI_DMA_TODEVICE); | |
7771 | #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,26)) | |
7772 | ret = dma_mapping_error(buf_dma); | |
7773 | #else | |
7774 | ret = dma_mapping_error(&(dev->pdev->dev), buf_dma); | |
7775 | #endif | |
7776 | if (ret) { | |
7777 | hio_warn("%s: unable to map write DMA buffer\n", dev->name); | |
7778 | goto out_dma_mapping; | |
7779 | } | |
7780 | ||
7781 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3) { | |
7782 | flash = ((uint32_t)flash << 1) | (uint32_t)chip; | |
7783 | chip = 0; | |
7784 | } | |
7785 | ||
7786 | msg = (struct ssd_nand_op_msg *)ssd_get_dmsg(dev); | |
7787 | ||
7788 | msg->fun = SSD_FUNC_NAND_WRITE; | |
7789 | msg->ctrl_idx = ctrl_idx; | |
7790 | msg->chip_no = flash; | |
7791 | msg->chip_ce = chip; | |
7792 | ||
7793 | msg->page_no = page; | |
7794 | msg->page_count = count; | |
7795 | msg->buf = buf_dma; | |
7796 | ||
7797 | ret = ssd_do_request(dev, WRITE, msg, NULL); | |
7798 | ssd_put_dmsg(msg); | |
7799 | ||
7800 | pci_unmap_single(dev->pdev, buf_dma, length, PCI_DMA_TODEVICE); | |
7801 | ||
7802 | out_dma_mapping: | |
7803 | return ret; | |
7804 | } | |
7805 | ||
7806 | static int ssd_nand_erase(struct ssd_device *dev, int flash, int chip, int page, int ctrl_idx) | |
7807 | { | |
7808 | struct ssd_nand_op_msg *msg; | |
7809 | int ret = 0; | |
7810 | ||
7811 | ret = ssd_check_flash(dev, flash, page, ctrl_idx); | |
7812 | if (ret) { | |
7813 | return ret; | |
7814 | } | |
7815 | ||
7816 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3) { | |
7817 | flash = ((uint32_t)flash << 1) | (uint32_t)chip; | |
7818 | chip = 0; | |
7819 | } | |
7820 | ||
7821 | msg = (struct ssd_nand_op_msg *)ssd_get_dmsg(dev); | |
7822 | ||
7823 | msg->fun = SSD_FUNC_NAND_ERASE; | |
7824 | msg->ctrl_idx = ctrl_idx; | |
7825 | msg->chip_no = flash; | |
7826 | msg->chip_ce = chip; | |
7827 | msg->page_no = page; | |
7828 | ||
7829 | ret = ssd_do_request(dev, WRITE, msg, NULL); | |
7830 | ssd_put_dmsg(msg); | |
7831 | ||
7832 | return ret; | |
7833 | } | |
7834 | ||
7835 | static int ssd_update_bbt(struct ssd_device *dev, int flash, int ctrl_idx) | |
7836 | { | |
7837 | struct ssd_nand_op_msg *msg; | |
7838 | struct ssd_flush_msg *fmsg; | |
7839 | int ret = 0; | |
7840 | ||
7841 | ret = ssd_check_flash(dev, flash, 0, ctrl_idx); | |
7842 | if (ret) { | |
7843 | return ret; | |
7844 | } | |
7845 | ||
7846 | msg = (struct ssd_nand_op_msg *)ssd_get_dmsg(dev); | |
7847 | ||
7848 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3) { | |
7849 | fmsg = (struct ssd_flush_msg *)msg; | |
7850 | ||
7851 | fmsg->fun = SSD_FUNC_FLUSH; | |
7852 | fmsg->flag = 0x1; | |
7853 | fmsg->flash = flash; | |
7854 | fmsg->ctrl_idx = ctrl_idx; | |
7855 | } else { | |
7856 | msg->fun = SSD_FUNC_FLUSH; | |
7857 | msg->flag = 0x1; | |
7858 | msg->chip_no = flash; | |
7859 | msg->ctrl_idx = ctrl_idx; | |
7860 | } | |
7861 | ||
7862 | ret = ssd_do_request(dev, WRITE, msg, NULL); | |
7863 | ssd_put_dmsg(msg); | |
7864 | ||
7865 | return ret; | |
7866 | } | |
7867 | ||
7868 | /* flash controller init state */ | |
7869 | static int __ssd_check_init_state(struct ssd_device *dev) | |
7870 | { | |
7871 | uint32_t *init_state = NULL; | |
7872 | int reg_base, reg_sz; | |
7873 | int max_wait = SSD_INIT_MAX_WAIT; | |
7874 | int init_wait = 0; | |
7875 | int i, j, k; | |
7876 | int ch_start = 0; | |
7877 | ||
7878 | /* | |
7879 | for (i=0; i<dev->hw_info.nr_ctrl; i++) { | |
7880 | ssd_reg32_write(dev->ctrlp + SSD_CTRL_TEST_REG0 + i * 8, test_data); | |
7881 | read_data = ssd_reg32_read(dev->ctrlp + SSD_CTRL_TEST_REG0 + i * 8); | |
7882 | if (read_data == ~test_data) { | |
7883 | //dev->hw_info.nr_ctrl++; | |
7884 | dev->hw_info.nr_ctrl_map |= 1<<i; | |
7885 | } | |
7886 | } | |
7887 | */ | |
7888 | ||
7889 | /* | |
7890 | read_data = ssd_reg32_read(dev->ctrlp + SSD_READY_REG); | |
7891 | j=0; | |
7892 | for (i=0; i<dev->hw_info.nr_ctrl; i++) { | |
7893 | if (((read_data>>i) & 0x1) == 0) { | |
7894 | j++; | |
7895 | } | |
7896 | } | |
7897 | ||
7898 | if (dev->hw_info.nr_ctrl != j) { | |
7899 | printk(KERN_WARNING "%s: nr_ctrl mismatch: %d %d\n", dev->name, dev->hw_info.nr_ctrl, j); | |
7900 | return -1; | |
7901 | } | |
7902 | */ | |
7903 | ||
7904 | /* | |
7905 | init_state = ssd_reg_read(dev->ctrlp + SSD_FLASH_INFO_REG0); | |
7906 | for (j=1; j<dev->hw_info.nr_ctrl;j++) { | |
7907 | if (init_state != ssd_reg_read(dev->ctrlp + SSD_FLASH_INFO_REG0 + j*8)) { | |
7908 | printk(KERN_WARNING "SSD_FLASH_INFO_REG[%d], not match\n", j); | |
7909 | return -1; | |
7910 | } | |
7911 | } | |
7912 | */ | |
7913 | ||
7914 | /* init_state = ssd_reg_read(dev->ctrlp + SSD_CHIP_INFO_REG0); | |
7915 | for (j=1; j<dev->hw_info.nr_ctrl; j++) { | |
7916 | if (init_state != ssd_reg_read(dev->ctrlp + SSD_CHIP_INFO_REG0 + j*16)) { | |
7917 | printk(KERN_WARNING "SSD_CHIP_INFO_REG Lo [%d], not match\n", j); | |
7918 | return -1; | |
7919 | } | |
7920 | } | |
7921 | ||
7922 | init_state = ssd_reg_read(dev->ctrlp + SSD_CHIP_INFO_REG0 + 8); | |
7923 | for (j=1; j<dev->hw_info.nr_ctrl; j++) { | |
7924 | if (init_state != ssd_reg_read(dev->ctrlp + SSD_CHIP_INFO_REG0 + 8 + j*16)) { | |
7925 | printk(KERN_WARNING "SSD_CHIP_INFO_REG Hi [%d], not match\n", j); | |
7926 | return -1; | |
7927 | } | |
7928 | } | |
7929 | */ | |
7930 | ||
7931 | if (dev->protocol_info.ver >= SSD_PROTOCOL_V3_2) { | |
7932 | max_wait = SSD_INIT_MAX_WAIT_V3_2; | |
7933 | } | |
7934 | ||
7935 | reg_base = dev->protocol_info.init_state_reg; | |
7936 | reg_sz = dev->protocol_info.init_state_reg_sz; | |
7937 | ||
7938 | init_state = (uint32_t *)kmalloc(reg_sz, GFP_KERNEL); | |
7939 | if (!init_state) { | |
7940 | return -ENOMEM; | |
7941 | } | |
7942 | ||
7943 | for (i=0; i<dev->hw_info.nr_ctrl; i++) { | |
7944 | check_init: | |
7945 | for (j=0, k=0; j<reg_sz; j+=sizeof(uint32_t), k++) { | |
7946 | init_state[k] = ssd_reg32_read(dev->ctrlp + reg_base + j); | |
7947 | } | |
7948 | ||
7949 | if (dev->protocol_info.ver > SSD_PROTOCOL_V3) { | |
7950 | /* just check the last bit, no need to check all channel */ | |
7951 | ch_start = dev->hw_info.max_ch - 1; | |
7952 | } else { | |
7953 | ch_start = 0; | |
7954 | } | |
7955 | ||
7956 | for (j=0; j<dev->hw_info.nr_chip; j++) { | |
7957 | for (k=ch_start; k<dev->hw_info.max_ch; k++) { | |
7958 | if (test_bit((j*dev->hw_info.max_ch + k), (void *)init_state)) { | |
7959 | continue; | |
7960 | } | |
7961 | ||
7962 | init_wait++; | |
7963 | if (init_wait <= max_wait) { | |
7964 | msleep(SSD_INIT_WAIT); | |
7965 | goto check_init; | |
7966 | } else { | |
7967 | if (k < dev->hw_info.nr_ch) { | |
7968 | hio_warn("%s: controller %d chip %d ch %d init failed\n", | |
7969 | dev->name, i, j, k); | |
7970 | } else { | |
7971 | hio_warn("%s: controller %d chip %d init failed\n", | |
7972 | dev->name, i, j); | |
7973 | } | |
7974 | ||
7975 | kfree(init_state); | |
7976 | return -1; | |
7977 | } | |
7978 | } | |
7979 | } | |
7980 | reg_base += reg_sz; | |
7981 | } | |
7982 | //printk(KERN_WARNING "%s: init wait %d\n", dev->name, init_wait); | |
7983 | ||
7984 | kfree(init_state); | |
7985 | return 0; | |
7986 | } | |
7987 | ||
7988 | static int ssd_check_init_state(struct ssd_device *dev) | |
7989 | { | |
7990 | if (mode != SSD_DRV_MODE_STANDARD) { | |
7991 | return 0; | |
7992 | } | |
7993 | ||
7994 | return __ssd_check_init_state(dev); | |
7995 | } | |
7996 | ||
7997 | static void ssd_reset_resp_ptr(struct ssd_device *dev); | |
7998 | ||
7999 | /* reset flash controller etc */ | |
8000 | static int __ssd_reset(struct ssd_device *dev, int type) | |
8001 | { | |
8002 | if (type < SSD_RST_NOINIT || type > SSD_RST_FULL) { | |
8003 | return -EINVAL; | |
8004 | } | |
8005 | ||
8006 | mutex_lock(&dev->fw_mutex); | |
8007 | ||
8008 | if (type == SSD_RST_NOINIT) { //no init | |
8009 | ssd_reg32_write(dev->ctrlp + SSD_RESET_REG, SSD_RESET_NOINIT); | |
8010 | } else if (type == SSD_RST_NORMAL) { //reset & init | |
8011 | ssd_reg32_write(dev->ctrlp + SSD_RESET_REG, SSD_RESET); | |
8012 | } else { // full reset | |
8013 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_2) { | |
8014 | mutex_unlock(&dev->fw_mutex); | |
8015 | return -EINVAL; | |
8016 | } | |
8017 | ||
8018 | ssd_reg32_write(dev->ctrlp + SSD_FULL_RESET_REG, SSD_RESET_FULL); | |
8019 | ||
8020 | /* ?? */ | |
8021 | ssd_reset_resp_ptr(dev); | |
8022 | } | |
8023 | ||
8024 | #ifdef SSD_OT_PROTECT | |
8025 | dev->ot_delay = 0; | |
8026 | #endif | |
8027 | ||
8028 | msleep(1000); | |
8029 | ||
8030 | /* xx */ | |
8031 | ssd_set_flush_timeout(dev, dev->wmode); | |
8032 | ||
8033 | mutex_unlock(&dev->fw_mutex); | |
8034 | ssd_gen_swlog(dev, SSD_LOG_RESET, (uint32_t)type); | |
57e45d44 | 8035 | dev->reset_time = (uint64_t)ktime_get_real_seconds(); |
361ebed5 HSDT |
8036 | |
8037 | return __ssd_check_init_state(dev); | |
8038 | } | |
8039 | ||
8040 | static int ssd_save_md(struct ssd_device *dev) | |
8041 | { | |
8042 | struct ssd_nand_op_msg *msg; | |
8043 | int ret = 0; | |
8044 | ||
8045 | if (unlikely(mode != SSD_DRV_MODE_STANDARD)) | |
8046 | return 0; | |
8047 | ||
8048 | if (dev->protocol_info.ver <= SSD_PROTOCOL_V3) { | |
8049 | return 0; | |
8050 | } | |
8051 | ||
8052 | if (!dev->save_md) { | |
8053 | return 0; | |
8054 | } | |
8055 | ||
8056 | msg = (struct ssd_nand_op_msg *)ssd_get_dmsg(dev); | |
8057 | ||
8058 | msg->fun = SSD_FUNC_FLUSH; | |
8059 | msg->flag = 0x2; | |
8060 | msg->ctrl_idx = 0; | |
8061 | msg->chip_no = 0; | |
8062 | ||
8063 | ret = ssd_do_request(dev, WRITE, msg, NULL); | |
8064 | ssd_put_dmsg(msg); | |
8065 | ||
8066 | return ret; | |
8067 | } | |
8068 | ||
8069 | static int ssd_barrier_save_md(struct ssd_device *dev) | |
8070 | { | |
8071 | struct ssd_nand_op_msg *msg; | |
8072 | int ret = 0; | |
8073 | ||
8074 | if (unlikely(mode != SSD_DRV_MODE_STANDARD)) | |
8075 | return 0; | |
8076 | ||
8077 | if (dev->protocol_info.ver <= SSD_PROTOCOL_V3) { | |
8078 | return 0; | |
8079 | } | |
8080 | ||
8081 | if (!dev->save_md) { | |
8082 | return 0; | |
8083 | } | |
8084 | ||
8085 | msg = (struct ssd_nand_op_msg *)ssd_get_dmsg(dev); | |
8086 | ||
8087 | msg->fun = SSD_FUNC_FLUSH; | |
8088 | msg->flag = 0x2; | |
8089 | msg->ctrl_idx = 0; | |
8090 | msg->chip_no = 0; | |
8091 | ||
8092 | ret = ssd_do_barrier_request(dev, WRITE, msg, NULL); | |
8093 | ssd_put_dmsg(msg); | |
8094 | ||
8095 | return ret; | |
8096 | } | |
8097 | ||
8098 | static int ssd_flush(struct ssd_device *dev) | |
8099 | { | |
8100 | struct ssd_nand_op_msg *msg; | |
8101 | struct ssd_flush_msg *fmsg; | |
8102 | int ret = 0; | |
8103 | ||
8104 | if (unlikely(mode != SSD_DRV_MODE_STANDARD)) | |
8105 | return 0; | |
8106 | ||
8107 | msg = (struct ssd_nand_op_msg *)ssd_get_dmsg(dev); | |
8108 | ||
8109 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3) { | |
8110 | fmsg = (struct ssd_flush_msg *)msg; | |
8111 | ||
8112 | fmsg->fun = SSD_FUNC_FLUSH; | |
8113 | fmsg->flag = 0; | |
8114 | fmsg->ctrl_idx = 0; | |
8115 | fmsg->flash = 0; | |
8116 | } else { | |
8117 | msg->fun = SSD_FUNC_FLUSH; | |
8118 | msg->flag = 0; | |
8119 | msg->ctrl_idx = 0; | |
8120 | msg->chip_no = 0; | |
8121 | } | |
8122 | ||
8123 | ret = ssd_do_request(dev, WRITE, msg, NULL); | |
8124 | ssd_put_dmsg(msg); | |
8125 | ||
8126 | return ret; | |
8127 | } | |
8128 | ||
8129 | static int ssd_barrier_flush(struct ssd_device *dev) | |
8130 | { | |
8131 | struct ssd_nand_op_msg *msg; | |
8132 | struct ssd_flush_msg *fmsg; | |
8133 | int ret = 0; | |
8134 | ||
8135 | if (unlikely(mode != SSD_DRV_MODE_STANDARD)) | |
8136 | return 0; | |
8137 | ||
8138 | msg = (struct ssd_nand_op_msg *)ssd_get_dmsg(dev); | |
8139 | ||
8140 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3) { | |
8141 | fmsg = (struct ssd_flush_msg *)msg; | |
8142 | ||
8143 | fmsg->fun = SSD_FUNC_FLUSH; | |
8144 | fmsg->flag = 0; | |
8145 | fmsg->ctrl_idx = 0; | |
8146 | fmsg->flash = 0; | |
8147 | } else { | |
8148 | msg->fun = SSD_FUNC_FLUSH; | |
8149 | msg->flag = 0; | |
8150 | msg->ctrl_idx = 0; | |
8151 | msg->chip_no = 0; | |
8152 | } | |
8153 | ||
8154 | ret = ssd_do_barrier_request(dev, WRITE, msg, NULL); | |
8155 | ssd_put_dmsg(msg); | |
8156 | ||
8157 | return ret; | |
8158 | } | |
8159 | ||
8160 | #define SSD_WMODE_BUFFER_TIMEOUT 0x00c82710 | |
8161 | #define SSD_WMODE_BUFFER_EX_TIMEOUT 0x000500c8 | |
8162 | #define SSD_WMODE_FUA_TIMEOUT 0x000503E8 | |
8163 | static void ssd_set_flush_timeout(struct ssd_device *dev, int m) | |
8164 | { | |
8165 | uint32_t to; | |
8166 | uint32_t val = 0; | |
8167 | ||
8168 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_1_1) { | |
8169 | return; | |
8170 | } | |
8171 | ||
8172 | switch(m) { | |
8173 | case SSD_WMODE_BUFFER: | |
8174 | to = SSD_WMODE_BUFFER_TIMEOUT; | |
8175 | break; | |
8176 | case SSD_WMODE_BUFFER_EX: | |
8177 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_2_1) { | |
8178 | to = SSD_WMODE_BUFFER_EX_TIMEOUT; | |
8179 | } else { | |
8180 | to = SSD_WMODE_BUFFER_TIMEOUT; | |
8181 | } | |
8182 | break; | |
8183 | case SSD_WMODE_FUA: | |
8184 | to = SSD_WMODE_FUA_TIMEOUT; | |
8185 | break; | |
8186 | default: | |
8187 | return; | |
8188 | } | |
8189 | ||
8190 | val = (((uint32_t)((uint32_t)m & 0x3) << 28) | to); | |
8191 | ||
8192 | ssd_reg32_write(dev->ctrlp + SSD_FLUSH_TIMEOUT_REG, val); | |
8193 | } | |
8194 | ||
8195 | static int ssd_do_switch_wmode(struct ssd_device *dev, int m) | |
8196 | { | |
8197 | int ret = 0; | |
8198 | ||
8199 | ret = ssd_barrier_start(dev); | |
8200 | if (ret) { | |
8201 | goto out; | |
8202 | } | |
8203 | ||
8204 | ret = ssd_barrier_flush(dev); | |
8205 | if (ret) { | |
8206 | goto out_barrier_end; | |
8207 | } | |
8208 | ||
8209 | /* set contoller flush timeout */ | |
8210 | ssd_set_flush_timeout(dev, m); | |
8211 | ||
8212 | dev->wmode = m; | |
8213 | mb(); | |
8214 | ||
8215 | out_barrier_end: | |
8216 | ssd_barrier_end(dev); | |
8217 | out: | |
8218 | return ret; | |
8219 | } | |
8220 | ||
8221 | static int ssd_switch_wmode(struct ssd_device *dev, int m) | |
8222 | { | |
8223 | int default_wmode; | |
8224 | int next_wmode; | |
8225 | int ret = 0; | |
8226 | ||
8227 | if (!test_bit(SSD_ONLINE, &dev->state)) { | |
8228 | return -ENODEV; | |
8229 | } | |
8230 | ||
8231 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_2) { | |
8232 | default_wmode = SSD_WMODE_BUFFER; | |
8233 | } else { | |
8234 | default_wmode = SSD_WMODE_BUFFER_EX; | |
8235 | } | |
8236 | ||
8237 | if (SSD_WMODE_AUTO == m) { | |
8238 | /* battery fault ? */ | |
8239 | if (test_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP), &dev->hwmon)) { | |
8240 | next_wmode = SSD_WMODE_FUA; | |
8241 | } else { | |
8242 | next_wmode = default_wmode; | |
8243 | } | |
8244 | } else if (SSD_WMODE_DEFAULT == m) { | |
8245 | next_wmode = default_wmode; | |
8246 | } else { | |
8247 | next_wmode = m; | |
8248 | } | |
8249 | ||
8250 | if (next_wmode != dev->wmode) { | |
8251 | hio_warn("%s: switch write mode (%d -> %d)\n", dev->name, dev->wmode, next_wmode); | |
8252 | ret = ssd_do_switch_wmode(dev, next_wmode); | |
8253 | if (ret) { | |
8254 | hio_err("%s: can not switch write mode (%d -> %d)\n", dev->name, dev->wmode, next_wmode); | |
8255 | } | |
8256 | } | |
8257 | ||
8258 | return ret; | |
8259 | } | |
8260 | ||
8261 | static int ssd_init_wmode(struct ssd_device *dev) | |
8262 | { | |
8263 | int default_wmode; | |
8264 | int ret = 0; | |
8265 | ||
8266 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_2) { | |
8267 | default_wmode = SSD_WMODE_BUFFER; | |
8268 | } else { | |
8269 | default_wmode = SSD_WMODE_BUFFER_EX; | |
8270 | } | |
8271 | ||
8272 | /* dummy mode */ | |
8273 | if (SSD_WMODE_AUTO == dev->user_wmode) { | |
8274 | /* battery fault ? */ | |
8275 | if (test_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP), &dev->hwmon)) { | |
8276 | dev->wmode = SSD_WMODE_FUA; | |
8277 | } else { | |
8278 | dev->wmode = default_wmode; | |
8279 | } | |
8280 | } else if (SSD_WMODE_DEFAULT == dev->user_wmode) { | |
8281 | dev->wmode = default_wmode; | |
8282 | } else { | |
8283 | dev->wmode = dev->user_wmode; | |
8284 | } | |
8285 | ssd_set_flush_timeout(dev, dev->wmode); | |
8286 | ||
8287 | return ret; | |
8288 | } | |
8289 | ||
8290 | static int __ssd_set_wmode(struct ssd_device *dev, int m) | |
8291 | { | |
8292 | int ret = 0; | |
8293 | ||
8294 | /* not support old fw*/ | |
8295 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_1_1) { | |
8296 | ret = -EOPNOTSUPP; | |
8297 | goto out; | |
8298 | } | |
8299 | ||
8300 | if (m < SSD_WMODE_BUFFER || m > SSD_WMODE_DEFAULT) { | |
8301 | ret = -EINVAL; | |
8302 | goto out; | |
8303 | } | |
8304 | ||
8305 | ssd_gen_swlog(dev, SSD_LOG_SET_WMODE, m); | |
8306 | ||
8307 | dev->user_wmode = m; | |
8308 | ||
8309 | ret = ssd_switch_wmode(dev, dev->user_wmode); | |
8310 | if (ret) { | |
8311 | goto out; | |
8312 | } | |
8313 | ||
8314 | out: | |
8315 | return ret; | |
8316 | } | |
8317 | ||
8318 | int ssd_set_wmode(struct block_device *bdev, int m) | |
8319 | { | |
8320 | struct ssd_device *dev; | |
8321 | ||
8322 | if (!bdev || !(bdev->bd_disk)) { | |
8323 | return -EINVAL; | |
8324 | } | |
8325 | ||
8326 | dev = bdev->bd_disk->private_data; | |
8327 | ||
8328 | return __ssd_set_wmode(dev, m); | |
8329 | } | |
8330 | ||
8331 | static int ssd_do_reset(struct ssd_device *dev) | |
8332 | { | |
8333 | int ret = 0; | |
8334 | ||
8335 | if (test_and_set_bit(SSD_RESETING, &dev->state)) { | |
8336 | return 0; | |
8337 | } | |
8338 | ||
8339 | ssd_stop_workq(dev); | |
8340 | ||
8341 | ret = ssd_barrier_start(dev); | |
8342 | if (ret) { | |
8343 | goto out; | |
8344 | } | |
8345 | ||
8346 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_2) { | |
8347 | /* old reset */ | |
8348 | ret = __ssd_reset(dev, SSD_RST_NORMAL); | |
8349 | } else { | |
8350 | /* full reset */ | |
8351 | //ret = __ssd_reset(dev, SSD_RST_FULL); | |
8352 | ret = __ssd_reset(dev, SSD_RST_NORMAL); | |
8353 | } | |
8354 | if (ret) { | |
8355 | goto out_barrier_end; | |
8356 | } | |
8357 | ||
8358 | out_barrier_end: | |
8359 | ssd_barrier_end(dev); | |
8360 | out: | |
8361 | ssd_start_workq(dev); | |
8362 | test_and_clear_bit(SSD_RESETING, &dev->state); | |
8363 | return ret; | |
8364 | } | |
8365 | ||
8366 | static int ssd_full_reset(struct ssd_device *dev) | |
8367 | { | |
8368 | int ret = 0; | |
8369 | ||
8370 | if (test_and_set_bit(SSD_RESETING, &dev->state)) { | |
8371 | return 0; | |
8372 | } | |
8373 | ||
8374 | ssd_stop_workq(dev); | |
8375 | ||
8376 | ret = ssd_barrier_start(dev); | |
8377 | if (ret) { | |
8378 | goto out; | |
8379 | } | |
8380 | ||
8381 | ret = ssd_barrier_flush(dev); | |
8382 | if (ret) { | |
8383 | goto out_barrier_end; | |
8384 | } | |
8385 | ||
8386 | ret = ssd_barrier_save_md(dev); | |
8387 | if (ret) { | |
8388 | goto out_barrier_end; | |
8389 | } | |
8390 | ||
8391 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_2) { | |
8392 | /* old reset */ | |
8393 | ret = __ssd_reset(dev, SSD_RST_NORMAL); | |
8394 | } else { | |
8395 | /* full reset */ | |
8396 | //ret = __ssd_reset(dev, SSD_RST_FULL); | |
8397 | ret = __ssd_reset(dev, SSD_RST_NORMAL); | |
8398 | } | |
8399 | if (ret) { | |
8400 | goto out_barrier_end; | |
8401 | } | |
8402 | ||
8403 | out_barrier_end: | |
8404 | ssd_barrier_end(dev); | |
8405 | out: | |
8406 | ssd_start_workq(dev); | |
8407 | test_and_clear_bit(SSD_RESETING, &dev->state); | |
8408 | return ret; | |
8409 | } | |
8410 | ||
8411 | int ssd_reset(struct block_device *bdev) | |
8412 | { | |
da3355df | 8413 | int ret; |
361ebed5 HSDT |
8414 | struct ssd_device *dev; |
8415 | ||
8416 | if (!bdev || !(bdev->bd_disk)) { | |
8417 | return -EINVAL; | |
8418 | } | |
8419 | ||
8420 | dev = bdev->bd_disk->private_data; | |
8421 | ||
da3355df SF |
8422 | ret = ssd_full_reset(dev); |
8423 | if (!ret) { | |
8424 | if (!dev->has_non_0x98_reg_access) { | |
8425 | ssd_reg32_write(dev->ctrlp + SSD_RELOAD_FW_REG, 0); | |
8426 | } | |
8427 | } | |
8428 | ||
8429 | return ret ; | |
361ebed5 HSDT |
8430 | } |
8431 | ||
8432 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)) | |
8433 | static int ssd_issue_flush_fn(struct request_queue *q, struct gendisk *disk, | |
8434 | sector_t *error_sector) | |
8435 | { | |
8436 | struct ssd_device *dev = q->queuedata; | |
8437 | ||
8438 | return ssd_flush(dev); | |
8439 | } | |
8440 | #endif | |
8441 | ||
8442 | void ssd_submit_pbio(struct request_queue *q, struct bio *bio) | |
8443 | { | |
8444 | struct ssd_device *dev = q->queuedata; | |
8445 | #ifdef SSD_QUEUE_PBIO | |
8446 | int ret = -EBUSY; | |
8447 | #endif | |
8448 | ||
8449 | if (!test_bit(SSD_ONLINE, &dev->state)) { | |
1197134c | 8450 | ssd_bio_endio(bio, -ENODEV); |
361ebed5 HSDT |
8451 | goto out; |
8452 | } | |
8453 | ||
8454 | #ifdef SSD_DEBUG_ERR | |
8455 | if (atomic_read(&dev->tocnt)) { | |
8456 | hio_warn("%s: IO rejected because of IO timeout!\n", dev->name); | |
1197134c | 8457 | ssd_bio_endio(bio, -EIO); |
361ebed5 HSDT |
8458 | goto out; |
8459 | } | |
8460 | #endif | |
8461 | ||
da3355df | 8462 | if (unlikely(ssd_bio_has_barrier_or_fua(bio))) { |
1197134c | 8463 | ssd_bio_endio(bio, -EOPNOTSUPP); |
361ebed5 HSDT |
8464 | goto out; |
8465 | } | |
361ebed5 | 8466 | |
da3355df | 8467 | if (unlikely(dev->readonly && bio_data_dir(bio) == WRITE)) { |
1197134c | 8468 | ssd_bio_endio(bio, -EROFS); |
361ebed5 HSDT |
8469 | goto out; |
8470 | } | |
8471 | ||
8472 | #ifdef SSD_QUEUE_PBIO | |
8473 | if (0 == atomic_read(&dev->in_sendq)) { | |
8474 | ret = __ssd_submit_pbio(dev, bio, 0); | |
8475 | } | |
8476 | ||
8477 | if (ret) { | |
8478 | (void)test_and_set_bit(BIO_SSD_PBIO, &bio->bi_flags); | |
8479 | ssd_queue_bio(dev, bio); | |
8480 | } | |
8481 | #else | |
8482 | __ssd_submit_pbio(dev, bio, 1); | |
8483 | #endif | |
8484 | ||
8485 | out: | |
8486 | return; | |
8487 | } | |
8488 | ||
bf9a5140 KM |
8489 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,0)) |
8490 | static blk_qc_t ssd_make_request(struct request_queue *q, struct bio *bio) | |
8491 | #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0)) | |
361ebed5 | 8492 | static void ssd_make_request(struct request_queue *q, struct bio *bio) |
bf9a5140 KM |
8493 | #else |
8494 | static int ssd_make_request(struct request_queue *q, struct bio *bio) | |
361ebed5 HSDT |
8495 | #endif |
8496 | { | |
8497 | struct ssd_device *dev = q->queuedata; | |
8498 | int ret = -EBUSY; | |
8499 | ||
8500 | if (!test_bit(SSD_ONLINE, &dev->state)) { | |
1197134c | 8501 | ssd_bio_endio(bio, -ENODEV); |
361ebed5 HSDT |
8502 | goto out; |
8503 | } | |
8504 | ||
91557e4a SF |
8505 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,13,0)) |
8506 | blk_queue_split(q, &bio); | |
8507 | #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(4,3,0)) | |
1197134c KM |
8508 | blk_queue_split(q, &bio, q->bio_split); |
8509 | #endif | |
8510 | ||
361ebed5 HSDT |
8511 | #ifdef SSD_DEBUG_ERR |
8512 | if (atomic_read(&dev->tocnt)) { | |
8513 | hio_warn("%s: IO rejected because of IO timeout!\n", dev->name); | |
1197134c | 8514 | ssd_bio_endio(bio, -EIO); |
361ebed5 HSDT |
8515 | goto out; |
8516 | } | |
8517 | #endif | |
8518 | ||
da3355df | 8519 | if (unlikely(ssd_bio_has_barrier_or_fua(bio))) { |
1197134c | 8520 | ssd_bio_endio(bio, -EOPNOTSUPP); |
361ebed5 HSDT |
8521 | goto out; |
8522 | } | |
8523 | ||
8524 | /* writeback_cache_control.txt: REQ_FLUSH requests without data can be completed successfully without doing any work */ | |
1197134c KM |
8525 | if (unlikely(ssd_bio_has_flush(bio) && !bio_sectors(bio))) { |
8526 | ssd_bio_endio(bio, 0); | |
361ebed5 HSDT |
8527 | goto out; |
8528 | } | |
8529 | ||
361ebed5 HSDT |
8530 | if (0 == atomic_read(&dev->in_sendq)) { |
8531 | ret = ssd_submit_bio(dev, bio, 0); | |
8532 | } | |
8533 | ||
8534 | if (ret) { | |
8535 | ssd_queue_bio(dev, bio); | |
8536 | } | |
8537 | ||
8538 | out: | |
bf9a5140 KM |
8539 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,0)) |
8540 | return BLK_QC_T_NONE; | |
8541 | #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0)) | |
361ebed5 | 8542 | return; |
bf9a5140 KM |
8543 | #else |
8544 | return 0; | |
361ebed5 HSDT |
8545 | #endif |
8546 | } | |
8547 | ||
8548 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)) | |
8549 | static int ssd_block_getgeo(struct block_device *bdev, struct hd_geometry *geo) | |
8550 | { | |
8551 | struct ssd_device *dev; | |
8552 | ||
8553 | if (!bdev) { | |
8554 | return -EINVAL; | |
8555 | } | |
8556 | ||
8557 | dev = bdev->bd_disk->private_data; | |
8558 | if (!dev) { | |
8559 | return -EINVAL; | |
8560 | } | |
8561 | ||
8562 | geo->heads = 4; | |
8563 | geo->sectors = 16; | |
8564 | geo->cylinders = (dev->hw_info.size & ~0x3f) >> 6; | |
8565 | return 0; | |
8566 | } | |
8567 | #endif | |
8568 | ||
1197134c KM |
8569 | static int ssd_init_queue(struct ssd_device *dev); |
8570 | static void ssd_cleanup_queue(struct ssd_device *dev); | |
361ebed5 HSDT |
8571 | static void ssd_cleanup_blkdev(struct ssd_device *dev); |
8572 | static int ssd_init_blkdev(struct ssd_device *dev); | |
8573 | static int ssd_ioctl_common(struct ssd_device *dev, unsigned int cmd, unsigned long arg) | |
8574 | { | |
8575 | void __user *argp = (void __user *)arg; | |
8576 | void __user *buf = NULL; | |
8577 | void *kbuf = NULL; | |
8578 | int ret = 0; | |
8579 | ||
8580 | switch (cmd) { | |
8581 | case SSD_CMD_GET_PROTOCOL_INFO: | |
8582 | if (copy_to_user(argp, &dev->protocol_info, sizeof(struct ssd_protocol_info))) { | |
8583 | hio_warn("%s: copy_to_user: failed\n", dev->name); | |
8584 | ret = -EFAULT; | |
8585 | break; | |
8586 | } | |
8587 | break; | |
8588 | ||
8589 | case SSD_CMD_GET_HW_INFO: | |
8590 | if (copy_to_user(argp, &dev->hw_info, sizeof(struct ssd_hw_info))) { | |
8591 | hio_warn("%s: copy_to_user: failed\n", dev->name); | |
8592 | ret = -EFAULT; | |
8593 | break; | |
8594 | } | |
8595 | break; | |
8596 | ||
8597 | case SSD_CMD_GET_ROM_INFO: | |
8598 | if (copy_to_user(argp, &dev->rom_info, sizeof(struct ssd_rom_info))) { | |
8599 | hio_warn("%s: copy_to_user: failed\n", dev->name); | |
8600 | ret = -EFAULT; | |
8601 | break; | |
8602 | } | |
8603 | break; | |
8604 | ||
8605 | case SSD_CMD_GET_SMART: { | |
8606 | struct ssd_smart smart; | |
8607 | int i; | |
8608 | ||
8609 | memcpy(&smart, &dev->smart, sizeof(struct ssd_smart)); | |
8610 | ||
8611 | mutex_lock(&dev->gd_mutex); | |
8612 | ssd_update_smart(dev, &smart); | |
8613 | mutex_unlock(&dev->gd_mutex); | |
8614 | ||
8615 | /* combine the volatile log info */ | |
8616 | if (dev->log_info.nr_log) { | |
8617 | for (i=0; i<SSD_LOG_NR_LEVEL; i++) { | |
8618 | smart.log_info.stat[i] += dev->log_info.stat[i]; | |
8619 | } | |
8620 | } | |
8621 | ||
8622 | if (copy_to_user(argp, &smart, sizeof(struct ssd_smart))) { | |
8623 | hio_warn("%s: copy_to_user: failed\n", dev->name); | |
8624 | ret = -EFAULT; | |
8625 | break; | |
8626 | } | |
8627 | ||
8628 | break; | |
8629 | } | |
8630 | ||
8631 | case SSD_CMD_GET_IDX: | |
8632 | if (copy_to_user(argp, &dev->idx, sizeof(int))) { | |
8633 | hio_warn("%s: copy_to_user: failed\n", dev->name); | |
8634 | ret = -EFAULT; | |
8635 | break; | |
8636 | } | |
8637 | break; | |
8638 | ||
8639 | case SSD_CMD_GET_AMOUNT: { | |
8640 | int nr_ssd = atomic_read(&ssd_nr); | |
8641 | if (copy_to_user(argp, &nr_ssd, sizeof(int))) { | |
8642 | hio_warn("%s: copy_to_user: failed\n", dev->name); | |
8643 | ret = -EFAULT; | |
8644 | break; | |
8645 | } | |
8646 | break; | |
8647 | } | |
8648 | ||
8649 | case SSD_CMD_GET_TO_INFO: { | |
8650 | int tocnt = atomic_read(&dev->tocnt); | |
8651 | ||
8652 | if (copy_to_user(argp, &tocnt, sizeof(int))) { | |
8653 | hio_warn("%s: copy_to_user: failed\n", dev->name); | |
8654 | ret = -EFAULT; | |
8655 | break; | |
8656 | } | |
8657 | break; | |
8658 | } | |
8659 | ||
8660 | case SSD_CMD_GET_DRV_VER: { | |
8661 | char ver[] = DRIVER_VERSION; | |
8662 | int len = sizeof(ver); | |
8663 | ||
8664 | if (len > (DRIVER_VERSION_LEN - 1)) { | |
8665 | len = (DRIVER_VERSION_LEN - 1); | |
8666 | } | |
8667 | if (copy_to_user(argp, ver, len)) { | |
8668 | hio_warn("%s: copy_to_user: failed\n", dev->name); | |
8669 | ret = -EFAULT; | |
8670 | break; | |
8671 | } | |
8672 | break; | |
8673 | } | |
8674 | ||
8675 | case SSD_CMD_GET_BBACC_INFO: { | |
8676 | struct ssd_acc_info acc; | |
8677 | ||
8678 | mutex_lock(&dev->fw_mutex); | |
8679 | ret = ssd_bb_acc(dev, &acc); | |
8680 | mutex_unlock(&dev->fw_mutex); | |
8681 | if (ret) { | |
8682 | break; | |
8683 | } | |
8684 | ||
8685 | if (copy_to_user(argp, &acc, sizeof(struct ssd_acc_info))) { | |
8686 | hio_warn("%s: copy_to_user: failed\n", dev->name); | |
8687 | ret = -EFAULT; | |
8688 | break; | |
8689 | } | |
8690 | break; | |
8691 | } | |
8692 | ||
8693 | case SSD_CMD_GET_ECACC_INFO: { | |
8694 | struct ssd_acc_info acc; | |
8695 | ||
8696 | mutex_lock(&dev->fw_mutex); | |
8697 | ret = ssd_ec_acc(dev, &acc); | |
8698 | mutex_unlock(&dev->fw_mutex); | |
8699 | if (ret) { | |
8700 | break; | |
8701 | } | |
8702 | ||
8703 | if (copy_to_user(argp, &acc, sizeof(struct ssd_acc_info))) { | |
8704 | hio_warn("%s: copy_to_user: failed\n", dev->name); | |
8705 | ret = -EFAULT; | |
8706 | break; | |
8707 | } | |
8708 | break; | |
8709 | } | |
8710 | ||
8711 | case SSD_CMD_GET_HW_INFO_EXT: | |
8712 | if (copy_to_user(argp, &dev->hw_info_ext, sizeof(struct ssd_hw_info_extend))) { | |
8713 | hio_warn("%s: copy_to_user: failed\n", dev->name); | |
8714 | ret = -EFAULT; | |
8715 | break; | |
8716 | } | |
8717 | break; | |
8718 | ||
8719 | case SSD_CMD_REG_READ: { | |
8720 | struct ssd_reg_op_info reg_info; | |
8721 | ||
8722 | if (copy_from_user(®_info, argp, sizeof(struct ssd_reg_op_info))) { | |
8723 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
8724 | ret = -EFAULT; | |
8725 | break; | |
8726 | } | |
8727 | ||
8728 | if (reg_info.offset > dev->mmio_len-sizeof(uint32_t)) { | |
8729 | ret = -EINVAL; | |
8730 | break; | |
8731 | } | |
8732 | ||
8733 | reg_info.value = ssd_reg32_read(dev->ctrlp + reg_info.offset); | |
8734 | if (copy_to_user(argp, ®_info, sizeof(struct ssd_reg_op_info))) { | |
8735 | hio_warn("%s: copy_to_user: failed\n", dev->name); | |
8736 | ret = -EFAULT; | |
8737 | break; | |
8738 | } | |
8739 | ||
8740 | break; | |
8741 | } | |
8742 | ||
8743 | case SSD_CMD_REG_WRITE: { | |
8744 | struct ssd_reg_op_info reg_info; | |
8745 | ||
8746 | if (copy_from_user(®_info, argp, sizeof(struct ssd_reg_op_info))) { | |
8747 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
8748 | ret = -EFAULT; | |
8749 | break; | |
8750 | } | |
8751 | ||
8752 | if (reg_info.offset > dev->mmio_len-sizeof(uint32_t)) { | |
8753 | ret = -EINVAL; | |
8754 | break; | |
8755 | } | |
8756 | ||
8757 | ssd_reg32_write(dev->ctrlp + reg_info.offset, reg_info.value); | |
8758 | ||
8759 | break; | |
8760 | } | |
8761 | ||
8762 | case SSD_CMD_SPI_READ: { | |
8763 | struct ssd_spi_op_info spi_info; | |
8764 | uint32_t off, size; | |
8765 | ||
8766 | if (copy_from_user(&spi_info, argp, sizeof(struct ssd_spi_op_info))) { | |
8767 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
8768 | ret = -EFAULT; | |
8769 | break; | |
8770 | } | |
8771 | ||
8772 | off = spi_info.off; | |
8773 | size = spi_info.len; | |
8774 | buf = spi_info.buf; | |
8775 | ||
8776 | if (size > dev->rom_info.size || 0 == size || (off + size) > dev->rom_info.size) { | |
8777 | ret = -EINVAL; | |
8778 | break; | |
8779 | } | |
8780 | ||
8781 | kbuf = kmalloc(size, GFP_KERNEL); | |
8782 | if (!kbuf) { | |
8783 | ret = -ENOMEM; | |
8784 | break; | |
8785 | } | |
8786 | ||
8787 | ret = ssd_spi_page_read(dev, kbuf, off, size); | |
8788 | if (ret) { | |
8789 | kfree(kbuf); | |
8790 | break; | |
8791 | } | |
8792 | ||
8793 | if (copy_to_user(buf, kbuf, size)) { | |
8794 | hio_warn("%s: copy_to_user: failed\n", dev->name); | |
8795 | kfree(kbuf); | |
8796 | ret = -EFAULT; | |
8797 | break; | |
8798 | } | |
8799 | ||
8800 | kfree(kbuf); | |
8801 | ||
8802 | break; | |
8803 | } | |
8804 | ||
8805 | case SSD_CMD_SPI_WRITE: { | |
8806 | struct ssd_spi_op_info spi_info; | |
8807 | uint32_t off, size; | |
8808 | ||
8809 | if (copy_from_user(&spi_info, argp, sizeof(struct ssd_spi_op_info))) { | |
8810 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
8811 | ret = -EFAULT; | |
8812 | break; | |
8813 | } | |
8814 | ||
8815 | off = spi_info.off; | |
8816 | size = spi_info.len; | |
8817 | buf = spi_info.buf; | |
8818 | ||
8819 | if (size > dev->rom_info.size || 0 == size || (off + size) > dev->rom_info.size) { | |
8820 | ret = -EINVAL; | |
8821 | break; | |
8822 | } | |
8823 | ||
8824 | kbuf = kmalloc(size, GFP_KERNEL); | |
8825 | if (!kbuf) { | |
8826 | ret = -ENOMEM; | |
8827 | break; | |
8828 | } | |
8829 | ||
8830 | if (copy_from_user(kbuf, buf, size)) { | |
8831 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
8832 | kfree(kbuf); | |
8833 | ret = -EFAULT; | |
8834 | break; | |
8835 | } | |
8836 | ||
8837 | ret = ssd_spi_page_write(dev, kbuf, off, size); | |
8838 | if (ret) { | |
8839 | kfree(kbuf); | |
8840 | break; | |
8841 | } | |
8842 | ||
8843 | kfree(kbuf); | |
8844 | ||
8845 | break; | |
8846 | } | |
8847 | ||
8848 | case SSD_CMD_SPI_ERASE: { | |
8849 | struct ssd_spi_op_info spi_info; | |
8850 | uint32_t off; | |
8851 | ||
8852 | if (copy_from_user(&spi_info, argp, sizeof(struct ssd_spi_op_info))) { | |
8853 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
8854 | ret = -EFAULT; | |
8855 | break; | |
8856 | } | |
8857 | ||
8858 | off = spi_info.off; | |
8859 | ||
8860 | if ((off + dev->rom_info.block_size) > dev->rom_info.size) { | |
8861 | ret = -EINVAL; | |
8862 | break; | |
8863 | } | |
8864 | ||
8865 | ret = ssd_spi_block_erase(dev, off); | |
8866 | if (ret) { | |
8867 | break; | |
8868 | } | |
8869 | ||
8870 | break; | |
8871 | } | |
8872 | ||
8873 | case SSD_CMD_I2C_READ: { | |
8874 | struct ssd_i2c_op_info i2c_info; | |
8875 | uint8_t saddr; | |
8876 | uint8_t rsize; | |
8877 | ||
8878 | if (copy_from_user(&i2c_info, argp, sizeof(struct ssd_i2c_op_info))) { | |
8879 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
8880 | ret = -EFAULT; | |
8881 | break; | |
8882 | } | |
8883 | ||
8884 | saddr = i2c_info.saddr; | |
8885 | rsize = i2c_info.rsize; | |
8886 | buf = i2c_info.rbuf; | |
8887 | ||
8888 | if (rsize <= 0 || rsize > SSD_I2C_MAX_DATA) { | |
8889 | ret = -EINVAL; | |
8890 | break; | |
8891 | } | |
8892 | ||
8893 | kbuf = kmalloc(rsize, GFP_KERNEL); | |
8894 | if (!kbuf) { | |
8895 | ret = -ENOMEM; | |
8896 | break; | |
8897 | } | |
8898 | ||
8899 | ret = ssd_i2c_read(dev, saddr, rsize, kbuf); | |
8900 | if (ret) { | |
8901 | kfree(kbuf); | |
8902 | break; | |
8903 | } | |
8904 | ||
8905 | if (copy_to_user(buf, kbuf, rsize)) { | |
8906 | hio_warn("%s: copy_to_user: failed\n", dev->name); | |
8907 | kfree(kbuf); | |
8908 | ret = -EFAULT; | |
8909 | break; | |
8910 | } | |
8911 | ||
8912 | kfree(kbuf); | |
8913 | ||
8914 | break; | |
8915 | } | |
8916 | ||
8917 | case SSD_CMD_I2C_WRITE: { | |
8918 | struct ssd_i2c_op_info i2c_info; | |
8919 | uint8_t saddr; | |
8920 | uint8_t wsize; | |
8921 | ||
8922 | if (copy_from_user(&i2c_info, argp, sizeof(struct ssd_i2c_op_info))) { | |
8923 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
8924 | ret = -EFAULT; | |
8925 | break; | |
8926 | } | |
8927 | ||
8928 | saddr = i2c_info.saddr; | |
8929 | wsize = i2c_info.wsize; | |
8930 | buf = i2c_info.wbuf; | |
8931 | ||
8932 | if (wsize <= 0 || wsize > SSD_I2C_MAX_DATA) { | |
8933 | ret = -EINVAL; | |
8934 | break; | |
8935 | } | |
8936 | ||
8937 | kbuf = kmalloc(wsize, GFP_KERNEL); | |
8938 | if (!kbuf) { | |
8939 | ret = -ENOMEM; | |
8940 | break; | |
8941 | } | |
8942 | ||
8943 | if (copy_from_user(kbuf, buf, wsize)) { | |
8944 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
8945 | kfree(kbuf); | |
8946 | ret = -EFAULT; | |
8947 | break; | |
8948 | } | |
8949 | ||
8950 | ret = ssd_i2c_write(dev, saddr, wsize, kbuf); | |
8951 | if (ret) { | |
8952 | kfree(kbuf); | |
8953 | break; | |
8954 | } | |
8955 | ||
8956 | kfree(kbuf); | |
8957 | ||
8958 | break; | |
8959 | } | |
8960 | ||
8961 | case SSD_CMD_I2C_WRITE_READ: { | |
8962 | struct ssd_i2c_op_info i2c_info; | |
8963 | uint8_t saddr; | |
8964 | uint8_t wsize; | |
8965 | uint8_t rsize; | |
8966 | uint8_t size; | |
8967 | ||
8968 | if (copy_from_user(&i2c_info, argp, sizeof(struct ssd_i2c_op_info))) { | |
8969 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
8970 | ret = -EFAULT; | |
8971 | break; | |
8972 | } | |
8973 | ||
8974 | saddr = i2c_info.saddr; | |
8975 | wsize = i2c_info.wsize; | |
8976 | rsize = i2c_info.rsize; | |
8977 | buf = i2c_info.wbuf; | |
8978 | ||
8979 | if (wsize <= 0 || wsize > SSD_I2C_MAX_DATA) { | |
8980 | ret = -EINVAL; | |
8981 | break; | |
8982 | } | |
8983 | ||
8984 | if (rsize <= 0 || rsize > SSD_I2C_MAX_DATA) { | |
8985 | ret = -EINVAL; | |
8986 | break; | |
8987 | } | |
8988 | ||
8989 | size = wsize + rsize; | |
8990 | ||
8991 | kbuf = kmalloc(size, GFP_KERNEL); | |
8992 | if (!kbuf) { | |
8993 | ret = -ENOMEM; | |
8994 | break; | |
8995 | } | |
8996 | ||
8997 | if (copy_from_user((kbuf + rsize), buf, wsize)) { | |
8998 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
8999 | kfree(kbuf); | |
9000 | ret = -EFAULT; | |
9001 | break; | |
9002 | } | |
9003 | ||
9004 | buf = i2c_info.rbuf; | |
9005 | ||
9006 | ret = ssd_i2c_write_read(dev, saddr, wsize, (kbuf + rsize), rsize, kbuf); | |
9007 | if (ret) { | |
9008 | kfree(kbuf); | |
9009 | break; | |
9010 | } | |
9011 | ||
9012 | if (copy_to_user(buf, kbuf, rsize)) { | |
9013 | hio_warn("%s: copy_to_user: failed\n", dev->name); | |
9014 | kfree(kbuf); | |
9015 | ret = -EFAULT; | |
9016 | break; | |
9017 | } | |
9018 | ||
9019 | kfree(kbuf); | |
9020 | ||
9021 | break; | |
9022 | } | |
9023 | ||
9024 | case SSD_CMD_SMBUS_SEND_BYTE: { | |
9025 | struct ssd_smbus_op_info smbus_info; | |
9026 | uint8_t smb_data[SSD_SMBUS_BLOCK_MAX]; | |
9027 | uint8_t saddr; | |
9028 | uint8_t size; | |
9029 | ||
9030 | if (copy_from_user(&smbus_info, argp, sizeof(struct ssd_smbus_op_info))) { | |
9031 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
9032 | ret = -EFAULT; | |
9033 | break; | |
9034 | } | |
9035 | ||
9036 | saddr = smbus_info.saddr; | |
9037 | buf = smbus_info.buf; | |
9038 | size = 1; | |
9039 | ||
9040 | if (copy_from_user(smb_data, buf, size)) { | |
9041 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
9042 | ret = -EFAULT; | |
9043 | break; | |
9044 | } | |
9045 | ||
9046 | ret = ssd_smbus_send_byte(dev, saddr, smb_data); | |
9047 | if (ret) { | |
9048 | break; | |
9049 | } | |
9050 | ||
9051 | break; | |
9052 | } | |
9053 | ||
9054 | case SSD_CMD_SMBUS_RECEIVE_BYTE: { | |
9055 | struct ssd_smbus_op_info smbus_info; | |
9056 | uint8_t smb_data[SSD_SMBUS_BLOCK_MAX]; | |
9057 | uint8_t saddr; | |
9058 | uint8_t size; | |
9059 | ||
9060 | if (copy_from_user(&smbus_info, argp, sizeof(struct ssd_smbus_op_info))) { | |
9061 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
9062 | ret = -EFAULT; | |
9063 | break; | |
9064 | } | |
9065 | ||
9066 | saddr = smbus_info.saddr; | |
9067 | buf = smbus_info.buf; | |
9068 | size = 1; | |
9069 | ||
9070 | ret = ssd_smbus_receive_byte(dev, saddr, smb_data); | |
9071 | if (ret) { | |
9072 | break; | |
9073 | } | |
9074 | ||
9075 | if (copy_to_user(buf, smb_data, size)) { | |
9076 | hio_warn("%s: copy_to_user: failed\n", dev->name); | |
9077 | ret = -EFAULT; | |
9078 | break; | |
9079 | } | |
9080 | ||
9081 | break; | |
9082 | } | |
9083 | ||
9084 | case SSD_CMD_SMBUS_WRITE_BYTE: { | |
9085 | struct ssd_smbus_op_info smbus_info; | |
9086 | uint8_t smb_data[SSD_SMBUS_BLOCK_MAX]; | |
9087 | uint8_t saddr; | |
9088 | uint8_t command; | |
9089 | uint8_t size; | |
9090 | ||
9091 | if (copy_from_user(&smbus_info, argp, sizeof(struct ssd_smbus_op_info))) { | |
9092 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
9093 | ret = -EFAULT; | |
9094 | break; | |
9095 | } | |
9096 | ||
9097 | saddr = smbus_info.saddr; | |
9098 | command = smbus_info.cmd; | |
9099 | buf = smbus_info.buf; | |
9100 | size = 1; | |
9101 | ||
9102 | if (copy_from_user(smb_data, buf, size)) { | |
9103 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
9104 | ret = -EFAULT; | |
9105 | break; | |
9106 | } | |
9107 | ||
9108 | ret = ssd_smbus_write_byte(dev, saddr, command, smb_data); | |
9109 | if (ret) { | |
9110 | break; | |
9111 | } | |
9112 | ||
9113 | break; | |
9114 | } | |
9115 | ||
9116 | case SSD_CMD_SMBUS_READ_BYTE: { | |
9117 | struct ssd_smbus_op_info smbus_info; | |
9118 | uint8_t smb_data[SSD_SMBUS_BLOCK_MAX]; | |
9119 | uint8_t saddr; | |
9120 | uint8_t command; | |
9121 | uint8_t size; | |
9122 | ||
9123 | if (copy_from_user(&smbus_info, argp, sizeof(struct ssd_smbus_op_info))) { | |
9124 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
9125 | ret = -EFAULT; | |
9126 | break; | |
9127 | } | |
9128 | ||
9129 | saddr = smbus_info.saddr; | |
9130 | command = smbus_info.cmd; | |
9131 | buf = smbus_info.buf; | |
9132 | size = 1; | |
9133 | ||
9134 | ret = ssd_smbus_read_byte(dev, saddr, command, smb_data); | |
9135 | if (ret) { | |
9136 | break; | |
9137 | } | |
9138 | ||
9139 | if (copy_to_user(buf, smb_data, size)) { | |
9140 | hio_warn("%s: copy_to_user: failed\n", dev->name); | |
9141 | ret = -EFAULT; | |
9142 | break; | |
9143 | } | |
9144 | ||
9145 | break; | |
9146 | } | |
9147 | ||
9148 | case SSD_CMD_SMBUS_WRITE_WORD: { | |
9149 | struct ssd_smbus_op_info smbus_info; | |
9150 | uint8_t smb_data[SSD_SMBUS_BLOCK_MAX]; | |
9151 | uint8_t saddr; | |
9152 | uint8_t command; | |
9153 | uint8_t size; | |
9154 | ||
9155 | if (copy_from_user(&smbus_info, argp, sizeof(struct ssd_smbus_op_info))) { | |
9156 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
9157 | ret = -EFAULT; | |
9158 | break; | |
9159 | } | |
9160 | ||
9161 | saddr = smbus_info.saddr; | |
9162 | command = smbus_info.cmd; | |
9163 | buf = smbus_info.buf; | |
9164 | size = 2; | |
9165 | ||
9166 | if (copy_from_user(smb_data, buf, size)) { | |
9167 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
9168 | ret = -EFAULT; | |
9169 | break; | |
9170 | } | |
9171 | ||
9172 | ret = ssd_smbus_write_word(dev, saddr, command, smb_data); | |
9173 | if (ret) { | |
9174 | break; | |
9175 | } | |
9176 | ||
9177 | break; | |
9178 | } | |
9179 | ||
9180 | case SSD_CMD_SMBUS_READ_WORD: { | |
9181 | struct ssd_smbus_op_info smbus_info; | |
9182 | uint8_t smb_data[SSD_SMBUS_BLOCK_MAX]; | |
9183 | uint8_t saddr; | |
9184 | uint8_t command; | |
9185 | uint8_t size; | |
9186 | ||
9187 | if (copy_from_user(&smbus_info, argp, sizeof(struct ssd_smbus_op_info))) { | |
9188 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
9189 | ret = -EFAULT; | |
9190 | break; | |
9191 | } | |
9192 | ||
9193 | saddr = smbus_info.saddr; | |
9194 | command = smbus_info.cmd; | |
9195 | buf = smbus_info.buf; | |
9196 | size = 2; | |
9197 | ||
9198 | ret = ssd_smbus_read_word(dev, saddr, command, smb_data); | |
9199 | if (ret) { | |
9200 | break; | |
9201 | } | |
9202 | ||
9203 | if (copy_to_user(buf, smb_data, size)) { | |
9204 | hio_warn("%s: copy_to_user: failed\n", dev->name); | |
9205 | ret = -EFAULT; | |
9206 | break; | |
9207 | } | |
9208 | ||
9209 | break; | |
9210 | } | |
9211 | ||
9212 | case SSD_CMD_SMBUS_WRITE_BLOCK: { | |
9213 | struct ssd_smbus_op_info smbus_info; | |
9214 | uint8_t smb_data[SSD_SMBUS_BLOCK_MAX]; | |
9215 | uint8_t saddr; | |
9216 | uint8_t command; | |
9217 | uint8_t size; | |
9218 | ||
9219 | if (copy_from_user(&smbus_info, argp, sizeof(struct ssd_smbus_op_info))) { | |
9220 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
9221 | ret = -EFAULT; | |
9222 | break; | |
9223 | } | |
9224 | ||
9225 | saddr = smbus_info.saddr; | |
9226 | command = smbus_info.cmd; | |
9227 | buf = smbus_info.buf; | |
9228 | size = smbus_info.size; | |
9229 | ||
9230 | if (size > SSD_SMBUS_BLOCK_MAX) { | |
9231 | ret = -EINVAL; | |
9232 | break; | |
9233 | } | |
9234 | ||
9235 | if (copy_from_user(smb_data, buf, size)) { | |
9236 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
9237 | ret = -EFAULT; | |
9238 | break; | |
9239 | } | |
9240 | ||
9241 | ret = ssd_smbus_write_block(dev, saddr, command, size, smb_data); | |
9242 | if (ret) { | |
9243 | break; | |
9244 | } | |
9245 | ||
9246 | break; | |
9247 | } | |
9248 | ||
9249 | case SSD_CMD_SMBUS_READ_BLOCK: { | |
9250 | struct ssd_smbus_op_info smbus_info; | |
9251 | uint8_t smb_data[SSD_SMBUS_BLOCK_MAX]; | |
9252 | uint8_t saddr; | |
9253 | uint8_t command; | |
9254 | uint8_t size; | |
9255 | ||
9256 | if (copy_from_user(&smbus_info, argp, sizeof(struct ssd_smbus_op_info))) { | |
9257 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
9258 | ret = -EFAULT; | |
9259 | break; | |
9260 | } | |
9261 | ||
9262 | saddr = smbus_info.saddr; | |
9263 | command = smbus_info.cmd; | |
9264 | buf = smbus_info.buf; | |
9265 | size = smbus_info.size; | |
9266 | ||
9267 | if (size > SSD_SMBUS_BLOCK_MAX) { | |
9268 | ret = -EINVAL; | |
9269 | break; | |
9270 | } | |
9271 | ||
9272 | ret = ssd_smbus_read_block(dev, saddr, command, size, smb_data); | |
9273 | if (ret) { | |
9274 | break; | |
9275 | } | |
9276 | ||
9277 | if (copy_to_user(buf, smb_data, size)) { | |
9278 | hio_warn("%s: copy_to_user: failed\n", dev->name); | |
9279 | ret = -EFAULT; | |
9280 | break; | |
9281 | } | |
9282 | ||
9283 | break; | |
9284 | } | |
9285 | ||
9286 | case SSD_CMD_BM_GET_VER: { | |
9287 | uint16_t ver; | |
9288 | ||
9289 | ret = ssd_bm_get_version(dev, &ver); | |
9290 | if (ret) { | |
9291 | break; | |
9292 | } | |
9293 | ||
9294 | if (copy_to_user(argp, &ver, sizeof(uint16_t))) { | |
9295 | hio_warn("%s: copy_to_user: failed\n", dev->name); | |
9296 | ret = -EFAULT; | |
9297 | break; | |
9298 | } | |
9299 | ||
9300 | break; | |
9301 | } | |
9302 | ||
9303 | case SSD_CMD_BM_GET_NR_CAP: { | |
9304 | int nr_cap; | |
9305 | ||
9306 | ret = ssd_bm_nr_cap(dev, &nr_cap); | |
9307 | if (ret) { | |
9308 | break; | |
9309 | } | |
9310 | ||
9311 | if (copy_to_user(argp, &nr_cap, sizeof(int))) { | |
9312 | hio_warn("%s: copy_to_user: failed\n", dev->name); | |
9313 | ret = -EFAULT; | |
9314 | break; | |
9315 | } | |
9316 | ||
9317 | break; | |
9318 | } | |
9319 | ||
9320 | case SSD_CMD_BM_CAP_LEARNING: { | |
9321 | ret = ssd_bm_enter_cap_learning(dev); | |
9322 | ||
9323 | if (ret) { | |
9324 | break; | |
9325 | } | |
9326 | ||
9327 | break; | |
9328 | } | |
9329 | ||
9330 | case SSD_CMD_CAP_LEARN: { | |
9331 | uint32_t cap = 0; | |
9332 | ||
9333 | ret = ssd_cap_learn(dev, &cap); | |
9334 | if (ret) { | |
9335 | break; | |
9336 | } | |
9337 | ||
9338 | if (copy_to_user(argp, &cap, sizeof(uint32_t))) { | |
9339 | hio_warn("%s: copy_to_user: failed\n", dev->name); | |
9340 | ret = -EFAULT; | |
9341 | break; | |
9342 | } | |
9343 | ||
9344 | break; | |
9345 | } | |
9346 | ||
9347 | case SSD_CMD_GET_CAP_STATUS: { | |
9348 | int cap_status = 0; | |
9349 | ||
9350 | if (test_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP), &dev->hwmon)) { | |
9351 | cap_status = 1; | |
9352 | } | |
9353 | ||
9354 | if (copy_to_user(argp, &cap_status, sizeof(int))) { | |
9355 | hio_warn("%s: copy_to_user: failed\n", dev->name); | |
9356 | ret = -EFAULT; | |
9357 | break; | |
9358 | } | |
9359 | ||
9360 | break; | |
9361 | } | |
9362 | ||
9363 | case SSD_CMD_RAM_READ: { | |
9364 | struct ssd_ram_op_info ram_info; | |
9365 | uint64_t ofs; | |
9366 | uint32_t length; | |
9367 | size_t rlen, len = dev->hw_info.ram_max_len; | |
9368 | int ctrl_idx; | |
9369 | ||
9370 | if (copy_from_user(&ram_info, argp, sizeof(struct ssd_ram_op_info))) { | |
9371 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
9372 | ret = -EFAULT; | |
9373 | break; | |
9374 | } | |
9375 | ||
9376 | ofs = ram_info.start; | |
9377 | length = ram_info.length; | |
9378 | buf = ram_info.buf; | |
9379 | ctrl_idx = ram_info.ctrl_idx; | |
9380 | ||
9381 | if (ofs >= dev->hw_info.ram_size || length > dev->hw_info.ram_size || 0 == length || (ofs + length) > dev->hw_info.ram_size) { | |
9382 | ret = -EINVAL; | |
9383 | break; | |
9384 | } | |
9385 | ||
9386 | kbuf = kmalloc(len, GFP_KERNEL); | |
9387 | if (!kbuf) { | |
9388 | ret = -ENOMEM; | |
9389 | break; | |
9390 | } | |
9391 | ||
9392 | for (rlen=0; rlen<length; rlen+=len, buf+=len, ofs+=len) { | |
9393 | if ((length - rlen) < len) { | |
9394 | len = length - rlen; | |
9395 | } | |
9396 | ||
9397 | ret = ssd_ram_read(dev, kbuf, len, ofs, ctrl_idx); | |
9398 | if (ret) { | |
9399 | break; | |
9400 | } | |
9401 | ||
9402 | if (copy_to_user(buf, kbuf, len)) { | |
9403 | ret = -EFAULT; | |
9404 | break; | |
9405 | } | |
9406 | } | |
9407 | ||
9408 | kfree(kbuf); | |
9409 | ||
9410 | break; | |
9411 | } | |
9412 | ||
9413 | case SSD_CMD_RAM_WRITE: { | |
9414 | struct ssd_ram_op_info ram_info; | |
9415 | uint64_t ofs; | |
9416 | uint32_t length; | |
9417 | size_t wlen, len = dev->hw_info.ram_max_len; | |
9418 | int ctrl_idx; | |
9419 | ||
9420 | if (copy_from_user(&ram_info, argp, sizeof(struct ssd_ram_op_info))) { | |
9421 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
9422 | ret = -EFAULT; | |
9423 | break; | |
9424 | } | |
9425 | ofs = ram_info.start; | |
9426 | length = ram_info.length; | |
9427 | buf = ram_info.buf; | |
9428 | ctrl_idx = ram_info.ctrl_idx; | |
9429 | ||
9430 | if (ofs >= dev->hw_info.ram_size || length > dev->hw_info.ram_size || 0 == length || (ofs + length) > dev->hw_info.ram_size) { | |
9431 | ret = -EINVAL; | |
9432 | break; | |
9433 | } | |
9434 | ||
9435 | kbuf = kmalloc(len, GFP_KERNEL); | |
9436 | if (!kbuf) { | |
9437 | ret = -ENOMEM; | |
9438 | break; | |
9439 | } | |
9440 | ||
9441 | for (wlen=0; wlen<length; wlen+=len, buf+=len, ofs+=len) { | |
9442 | if ((length - wlen) < len) { | |
9443 | len = length - wlen; | |
9444 | } | |
9445 | ||
9446 | if (copy_from_user(kbuf, buf, len)) { | |
9447 | ret = -EFAULT; | |
9448 | break; | |
9449 | } | |
9450 | ||
9451 | ret = ssd_ram_write(dev, kbuf, len, ofs, ctrl_idx); | |
9452 | if (ret) { | |
9453 | break; | |
9454 | } | |
9455 | } | |
9456 | ||
9457 | kfree(kbuf); | |
9458 | ||
9459 | break; | |
9460 | } | |
9461 | ||
9462 | case SSD_CMD_NAND_READ_ID: { | |
9463 | struct ssd_flash_op_info flash_info; | |
9464 | int chip_no, chip_ce, length, ctrl_idx; | |
9465 | ||
9466 | if (copy_from_user(&flash_info, argp, sizeof(struct ssd_flash_op_info))) { | |
9467 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
9468 | ret = -EFAULT; | |
9469 | break; | |
9470 | } | |
9471 | ||
9472 | chip_no = flash_info.flash; | |
9473 | chip_ce = flash_info.chip; | |
9474 | ctrl_idx = flash_info.ctrl_idx; | |
9475 | buf = flash_info.buf; | |
9476 | length = dev->hw_info.id_size; | |
9477 | ||
9478 | //kbuf = kmalloc(length, GFP_KERNEL); | |
9479 | kbuf = kmalloc(SSD_NAND_ID_BUFF_SZ, GFP_KERNEL); //xx | |
9480 | if (!kbuf) { | |
9481 | ret = -ENOMEM; | |
9482 | break; | |
9483 | } | |
9484 | memset(kbuf, 0, length); | |
9485 | ||
9486 | ret = ssd_nand_read_id(dev, kbuf, chip_no, chip_ce, ctrl_idx); | |
9487 | if (ret) { | |
9488 | kfree(kbuf); | |
9489 | break; | |
9490 | } | |
9491 | ||
9492 | if (copy_to_user(buf, kbuf, length)) { | |
9493 | kfree(kbuf); | |
9494 | ret = -EFAULT; | |
9495 | break; | |
9496 | } | |
9497 | ||
9498 | kfree(kbuf); | |
9499 | ||
9500 | break; | |
9501 | } | |
9502 | ||
9503 | case SSD_CMD_NAND_READ: { //with oob | |
9504 | struct ssd_flash_op_info flash_info; | |
9505 | uint32_t length; | |
9506 | int flash, chip, page, ctrl_idx; | |
9507 | int err = 0; | |
9508 | ||
9509 | if (copy_from_user(&flash_info, argp, sizeof(struct ssd_flash_op_info))) { | |
9510 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
9511 | ret = -EFAULT; | |
9512 | break; | |
9513 | } | |
9514 | ||
9515 | flash = flash_info.flash; | |
9516 | chip = flash_info.chip; | |
9517 | page = flash_info.page; | |
9518 | buf = flash_info.buf; | |
9519 | ctrl_idx = flash_info.ctrl_idx; | |
9520 | ||
9521 | length = dev->hw_info.page_size + dev->hw_info.oob_size; | |
9522 | ||
9523 | kbuf = kmalloc(length, GFP_KERNEL); | |
9524 | if (!kbuf) { | |
9525 | ret = -ENOMEM; | |
9526 | break; | |
9527 | } | |
9528 | ||
9529 | err = ret = ssd_nand_read_w_oob(dev, kbuf, flash, chip, page, 1, ctrl_idx); | |
9530 | if (ret && (-EIO != ret)) { | |
9531 | kfree(kbuf); | |
9532 | break; | |
9533 | } | |
9534 | ||
9535 | if (copy_to_user(buf, kbuf, length)) { | |
9536 | kfree(kbuf); | |
9537 | ret = -EFAULT; | |
9538 | break; | |
9539 | } | |
9540 | ||
9541 | ret = err; | |
9542 | ||
9543 | kfree(kbuf); | |
9544 | break; | |
9545 | } | |
9546 | ||
9547 | case SSD_CMD_NAND_WRITE: { | |
9548 | struct ssd_flash_op_info flash_info; | |
9549 | int flash, chip, page, ctrl_idx; | |
9550 | uint32_t length; | |
9551 | ||
9552 | if (copy_from_user(&flash_info, argp, sizeof(struct ssd_flash_op_info))) { | |
9553 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
9554 | ret = -EFAULT; | |
9555 | break; | |
9556 | } | |
9557 | ||
9558 | flash = flash_info.flash; | |
9559 | chip = flash_info.chip; | |
9560 | page = flash_info.page; | |
9561 | buf = flash_info.buf; | |
9562 | ctrl_idx = flash_info.ctrl_idx; | |
9563 | ||
9564 | length = dev->hw_info.page_size + dev->hw_info.oob_size; | |
9565 | ||
9566 | kbuf = kmalloc(length, GFP_KERNEL); | |
9567 | if (!kbuf) { | |
9568 | ret = -ENOMEM; | |
9569 | break; | |
9570 | } | |
9571 | ||
9572 | if (copy_from_user(kbuf, buf, length)) { | |
9573 | kfree(kbuf); | |
9574 | ret = -EFAULT; | |
9575 | break; | |
9576 | } | |
9577 | ||
9578 | ret = ssd_nand_write(dev, kbuf, flash, chip, page, 1, ctrl_idx); | |
9579 | if (ret) { | |
9580 | kfree(kbuf); | |
9581 | break; | |
9582 | } | |
9583 | ||
9584 | kfree(kbuf); | |
9585 | break; | |
9586 | } | |
9587 | ||
9588 | case SSD_CMD_NAND_ERASE: { | |
9589 | struct ssd_flash_op_info flash_info; | |
9590 | int flash, chip, page, ctrl_idx; | |
9591 | ||
9592 | if (copy_from_user(&flash_info, argp, sizeof(struct ssd_flash_op_info))) { | |
9593 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
9594 | ret = -EFAULT; | |
9595 | break; | |
9596 | } | |
9597 | ||
9598 | flash = flash_info.flash; | |
9599 | chip = flash_info.chip; | |
9600 | page = flash_info.page; | |
9601 | ctrl_idx = flash_info.ctrl_idx; | |
9602 | ||
9603 | if ((page % dev->hw_info.page_count) != 0) { | |
9604 | ret = -EINVAL; | |
9605 | break; | |
9606 | } | |
9607 | ||
9608 | //hio_warn("erase fs = %llx\n", ofs); | |
9609 | ret = ssd_nand_erase(dev, flash, chip, page, ctrl_idx); | |
9610 | if (ret) { | |
9611 | break; | |
9612 | } | |
9613 | ||
9614 | break; | |
9615 | } | |
9616 | ||
9617 | case SSD_CMD_NAND_READ_EXT: { //ingore EIO | |
9618 | struct ssd_flash_op_info flash_info; | |
9619 | uint32_t length; | |
9620 | int flash, chip, page, ctrl_idx; | |
9621 | ||
9622 | if (copy_from_user(&flash_info, argp, sizeof(struct ssd_flash_op_info))) { | |
9623 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
9624 | ret = -EFAULT; | |
9625 | break; | |
9626 | } | |
9627 | ||
9628 | flash = flash_info.flash; | |
9629 | chip = flash_info.chip; | |
9630 | page = flash_info.page; | |
9631 | buf = flash_info.buf; | |
9632 | ctrl_idx = flash_info.ctrl_idx; | |
9633 | ||
9634 | length = dev->hw_info.page_size + dev->hw_info.oob_size; | |
9635 | ||
9636 | kbuf = kmalloc(length, GFP_KERNEL); | |
9637 | if (!kbuf) { | |
9638 | ret = -ENOMEM; | |
9639 | break; | |
9640 | } | |
9641 | ||
9642 | ret = ssd_nand_read_w_oob(dev, kbuf, flash, chip, page, 1, ctrl_idx); | |
9643 | if (-EIO == ret) { //ingore EIO | |
9644 | ret = 0; | |
9645 | } | |
9646 | if (ret) { | |
9647 | kfree(kbuf); | |
9648 | break; | |
9649 | } | |
9650 | ||
9651 | if (copy_to_user(buf, kbuf, length)) { | |
9652 | kfree(kbuf); | |
9653 | ret = -EFAULT; | |
9654 | break; | |
9655 | } | |
9656 | ||
9657 | kfree(kbuf); | |
9658 | break; | |
9659 | } | |
9660 | ||
9661 | case SSD_CMD_UPDATE_BBT: { | |
9662 | struct ssd_flash_op_info flash_info; | |
9663 | int ctrl_idx, flash; | |
9664 | ||
9665 | if (copy_from_user(&flash_info, argp, sizeof(struct ssd_flash_op_info))) { | |
9666 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
9667 | ret = -EFAULT; | |
9668 | break; | |
9669 | } | |
9670 | ||
9671 | ctrl_idx = flash_info.ctrl_idx; | |
9672 | flash = flash_info.flash; | |
9673 | ret = ssd_update_bbt(dev, flash, ctrl_idx); | |
9674 | if (ret) { | |
9675 | break; | |
9676 | } | |
9677 | ||
9678 | break; | |
9679 | } | |
9680 | ||
9681 | case SSD_CMD_CLEAR_ALARM: | |
9682 | ssd_clear_alarm(dev); | |
9683 | break; | |
9684 | ||
9685 | case SSD_CMD_SET_ALARM: | |
9686 | ssd_set_alarm(dev); | |
9687 | break; | |
9688 | ||
9689 | case SSD_CMD_RESET: | |
9690 | ret = ssd_do_reset(dev); | |
9691 | break; | |
9692 | ||
9693 | case SSD_CMD_RELOAD_FW: | |
9694 | dev->reload_fw = 1; | |
da3355df | 9695 | dev->has_non_0x98_reg_access = 1; |
361ebed5 HSDT |
9696 | if (dev->protocol_info.ver >= SSD_PROTOCOL_V3_2) { |
9697 | ssd_reg32_write(dev->ctrlp + SSD_RELOAD_FW_REG, SSD_RELOAD_FLAG); | |
9698 | } else if (dev->protocol_info.ver >= SSD_PROTOCOL_V3_1_1) { | |
9699 | ssd_reg32_write(dev->ctrlp + SSD_RELOAD_FW_REG, SSD_RELOAD_FW); | |
9700 | ||
9701 | } | |
9702 | break; | |
9703 | ||
9704 | case SSD_CMD_UNLOAD_DEV: { | |
9705 | if (atomic_read(&dev->refcnt)) { | |
9706 | ret = -EBUSY; | |
9707 | break; | |
9708 | } | |
9709 | ||
9710 | /* save smart */ | |
9711 | ssd_save_smart(dev); | |
9712 | ||
9713 | ret = ssd_flush(dev); | |
9714 | if (ret) { | |
9715 | break; | |
9716 | } | |
9717 | ||
9718 | /* cleanup the block device */ | |
9719 | if (test_and_clear_bit(SSD_INIT_BD, &dev->state)) { | |
9720 | mutex_lock(&dev->gd_mutex); | |
9721 | ssd_cleanup_blkdev(dev); | |
1197134c | 9722 | ssd_cleanup_queue(dev); |
361ebed5 HSDT |
9723 | mutex_unlock(&dev->gd_mutex); |
9724 | } | |
9725 | ||
9726 | break; | |
9727 | } | |
9728 | ||
9729 | case SSD_CMD_LOAD_DEV: { | |
9730 | ||
9731 | if (test_bit(SSD_INIT_BD, &dev->state)) { | |
9732 | ret = -EINVAL; | |
9733 | break; | |
9734 | } | |
9735 | ||
9736 | ret = ssd_init_smart(dev); | |
9737 | if (ret) { | |
9738 | hio_warn("%s: init info: failed\n", dev->name); | |
9739 | break; | |
9740 | } | |
9741 | ||
1197134c KM |
9742 | ret = ssd_init_queue(dev); |
9743 | if (ret) { | |
9744 | hio_warn("%s: init queue failed\n", dev->name); | |
9745 | break; | |
9746 | } | |
361ebed5 HSDT |
9747 | ret = ssd_init_blkdev(dev); |
9748 | if (ret) { | |
9749 | hio_warn("%s: register block device: failed\n", dev->name); | |
9750 | break; | |
9751 | } | |
9752 | (void)test_and_set_bit(SSD_INIT_BD, &dev->state); | |
9753 | ||
9754 | break; | |
9755 | } | |
9756 | ||
9757 | case SSD_CMD_UPDATE_VP: { | |
9758 | uint32_t val; | |
9759 | uint32_t new_vp, new_vp1 = 0; | |
9760 | ||
9761 | if (test_bit(SSD_INIT_BD, &dev->state)) { | |
9762 | ret = -EINVAL; | |
9763 | break; | |
9764 | } | |
9765 | ||
9766 | if (copy_from_user(&new_vp, argp, sizeof(uint32_t))) { | |
9767 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
9768 | ret = -EFAULT; | |
9769 | break; | |
9770 | } | |
9771 | ||
9772 | if (new_vp > dev->hw_info.max_valid_pages || new_vp <= 0) { | |
9773 | ret = -EINVAL; | |
9774 | break; | |
9775 | } | |
9776 | ||
9777 | while (new_vp <= dev->hw_info.max_valid_pages) { | |
9778 | ssd_reg32_write(dev->ctrlp + SSD_VALID_PAGES_REG, new_vp); | |
9779 | msleep(10); | |
9780 | val = ssd_reg32_read(dev->ctrlp + SSD_VALID_PAGES_REG); | |
9781 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_2) { | |
9782 | new_vp1 = val & 0x3FF; | |
9783 | } else { | |
9784 | new_vp1 = val & 0x7FFF; | |
9785 | } | |
9786 | ||
9787 | if (new_vp1 == new_vp) { | |
9788 | break; | |
9789 | } | |
9790 | ||
9791 | new_vp++; | |
9792 | /*if (new_vp == dev->hw_info.valid_pages) { | |
9793 | new_vp++; | |
9794 | }*/ | |
9795 | } | |
9796 | ||
9797 | if (new_vp1 != new_vp || new_vp > dev->hw_info.max_valid_pages) { | |
9798 | /* restore */ | |
9799 | ssd_reg32_write(dev->ctrlp + SSD_VALID_PAGES_REG, dev->hw_info.valid_pages); | |
9800 | ret = -EINVAL; | |
9801 | break; | |
9802 | } | |
9803 | ||
9804 | if (copy_to_user(argp, &new_vp, sizeof(uint32_t))) { | |
9805 | hio_warn("%s: copy_to_user: failed\n", dev->name); | |
9806 | ssd_reg32_write(dev->ctrlp + SSD_VALID_PAGES_REG, dev->hw_info.valid_pages); | |
9807 | ret = -EFAULT; | |
9808 | break; | |
9809 | } | |
9810 | ||
9811 | /* new */ | |
9812 | dev->hw_info.valid_pages = new_vp; | |
9813 | dev->hw_info.size = (uint64_t)dev->hw_info.valid_pages * dev->hw_info.page_size; | |
9814 | dev->hw_info.size *= (dev->hw_info.block_count - dev->hw_info.reserved_blks); | |
9815 | dev->hw_info.size *= ((uint64_t)dev->hw_info.nr_data_ch * (uint64_t)dev->hw_info.nr_chip * (uint64_t)dev->hw_info.nr_ctrl); | |
9816 | ||
9817 | break; | |
9818 | } | |
9819 | ||
9820 | case SSD_CMD_FULL_RESET: { | |
9821 | ret = ssd_full_reset(dev); | |
9822 | break; | |
9823 | } | |
9824 | ||
9825 | case SSD_CMD_GET_NR_LOG: { | |
9826 | if (copy_to_user(argp, &dev->internal_log.nr_log, sizeof(dev->internal_log.nr_log))) { | |
9827 | ret = -EFAULT; | |
9828 | break; | |
9829 | } | |
9830 | break; | |
9831 | } | |
9832 | ||
9833 | case SSD_CMD_GET_LOG: { | |
9834 | uint32_t length = dev->rom_info.log_sz; | |
9835 | ||
9836 | buf = argp; | |
9837 | ||
9838 | if (copy_to_user(buf, dev->internal_log.log, length)) { | |
9839 | ret = -EFAULT; | |
9840 | break; | |
9841 | } | |
9842 | ||
9843 | break; | |
9844 | } | |
9845 | ||
9846 | case SSD_CMD_LOG_LEVEL: { | |
9847 | int level = 0; | |
9848 | if (copy_from_user(&level, argp, sizeof(int))) { | |
9849 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
9850 | ret = -EFAULT; | |
9851 | break; | |
9852 | } | |
9853 | ||
9854 | if (level >= SSD_LOG_NR_LEVEL || level < SSD_LOG_LEVEL_INFO) { | |
9855 | level = SSD_LOG_LEVEL_ERR; | |
9856 | } | |
9857 | ||
9858 | //just for showing log, no need to protect | |
9859 | log_level = level; | |
9860 | break; | |
9861 | } | |
9862 | ||
9863 | case SSD_CMD_OT_PROTECT: { | |
9864 | int protect = 0; | |
9865 | ||
9866 | if (copy_from_user(&protect, argp, sizeof(int))) { | |
9867 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
9868 | ret = -EFAULT; | |
9869 | break; | |
9870 | } | |
9871 | ||
9872 | ssd_set_ot_protect(dev, !!protect); | |
9873 | break; | |
9874 | } | |
9875 | ||
9876 | case SSD_CMD_GET_OT_STATUS: { | |
9877 | int status = ssd_get_ot_status(dev, &status); | |
9878 | ||
9879 | if (copy_to_user(argp, &status, sizeof(int))) { | |
9880 | hio_warn("%s: copy_to_user: failed\n", dev->name); | |
9881 | ret = -EFAULT; | |
9882 | break; | |
9883 | } | |
9884 | break; | |
9885 | } | |
9886 | ||
9887 | case SSD_CMD_CLEAR_LOG: { | |
9888 | ret = ssd_clear_log(dev); | |
9889 | break; | |
9890 | } | |
9891 | ||
9892 | case SSD_CMD_CLEAR_SMART: { | |
9893 | ret = ssd_clear_smart(dev); | |
9894 | break; | |
9895 | } | |
9896 | ||
1197134c KM |
9897 | case SSD_CMD_CLEAR_WARNING: { |
9898 | ret = ssd_clear_warning(dev); | |
9899 | break; | |
9900 | } | |
9901 | ||
361ebed5 HSDT |
9902 | case SSD_CMD_SW_LOG: { |
9903 | struct ssd_sw_log_info sw_log; | |
9904 | ||
9905 | if (copy_from_user(&sw_log, argp, sizeof(struct ssd_sw_log_info))) { | |
9906 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
9907 | ret = -EFAULT; | |
9908 | break; | |
9909 | } | |
9910 | ||
9911 | ret = ssd_gen_swlog(dev, sw_log.event, sw_log.data); | |
9912 | break; | |
9913 | } | |
9914 | ||
9915 | case SSD_CMD_GET_LABEL: { | |
9916 | ||
9917 | if (dev->protocol_info.ver >= SSD_PROTOCOL_V3_2) { | |
9918 | ret = -EINVAL; | |
9919 | break; | |
9920 | } | |
9921 | ||
9922 | if (copy_to_user(argp, &dev->label, sizeof(struct ssd_label))) { | |
9923 | hio_warn("%s: copy_to_user: failed\n", dev->name); | |
9924 | ret = -EFAULT; | |
9925 | break; | |
9926 | } | |
9927 | break; | |
9928 | } | |
9929 | ||
9930 | case SSD_CMD_GET_VERSION: { | |
9931 | struct ssd_version_info ver; | |
9932 | ||
9933 | mutex_lock(&dev->fw_mutex); | |
9934 | ret = __ssd_get_version(dev, &ver); | |
9935 | mutex_unlock(&dev->fw_mutex); | |
9936 | if (ret) { | |
9937 | break; | |
9938 | } | |
9939 | ||
9940 | if (copy_to_user(argp, &ver, sizeof(struct ssd_version_info))) { | |
9941 | hio_warn("%s: copy_to_user: failed\n", dev->name); | |
9942 | ret = -EFAULT; | |
9943 | break; | |
9944 | } | |
9945 | break; | |
9946 | } | |
9947 | ||
9948 | case SSD_CMD_GET_TEMPERATURE: { | |
9949 | int temp; | |
9950 | ||
9951 | mutex_lock(&dev->fw_mutex); | |
9952 | ret = __ssd_get_temperature(dev, &temp); | |
9953 | mutex_unlock(&dev->fw_mutex); | |
9954 | if (ret) { | |
9955 | break; | |
9956 | } | |
9957 | ||
9958 | if (copy_to_user(argp, &temp, sizeof(int))) { | |
9959 | hio_warn("%s: copy_to_user: failed\n", dev->name); | |
9960 | ret = -EFAULT; | |
9961 | break; | |
9962 | } | |
9963 | break; | |
9964 | } | |
9965 | ||
9966 | case SSD_CMD_GET_BMSTATUS: { | |
9967 | int status; | |
9968 | ||
9969 | mutex_lock(&dev->fw_mutex); | |
9970 | if (dev->protocol_info.ver >= SSD_PROTOCOL_V3_2) { | |
9971 | if (test_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP), &dev->hwmon)) { | |
9972 | status = SSD_BMSTATUS_WARNING; | |
9973 | } else { | |
9974 | status = SSD_BMSTATUS_OK; | |
9975 | } | |
9976 | } else if(dev->protocol_info.ver > SSD_PROTOCOL_V3) { | |
9977 | ret = __ssd_bm_status(dev, &status); | |
9978 | } else { | |
9979 | status = SSD_BMSTATUS_OK; | |
9980 | } | |
9981 | mutex_unlock(&dev->fw_mutex); | |
9982 | if (ret) { | |
9983 | break; | |
9984 | } | |
9985 | ||
9986 | if (copy_to_user(argp, &status, sizeof(int))) { | |
9987 | hio_warn("%s: copy_to_user: failed\n", dev->name); | |
9988 | ret = -EFAULT; | |
9989 | break; | |
9990 | } | |
9991 | break; | |
9992 | } | |
9993 | ||
9994 | case SSD_CMD_GET_LABEL2: { | |
9995 | void *label; | |
9996 | int length; | |
9997 | ||
9998 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_2) { | |
9999 | label = &dev->label; | |
10000 | length = sizeof(struct ssd_label); | |
10001 | } else { | |
10002 | label = &dev->labelv3; | |
10003 | length = sizeof(struct ssd_labelv3); | |
10004 | } | |
10005 | ||
10006 | if (copy_to_user(argp, label, length)) { | |
10007 | ret = -EFAULT; | |
10008 | break; | |
10009 | } | |
10010 | break; | |
10011 | } | |
10012 | ||
10013 | case SSD_CMD_FLUSH: | |
10014 | ret = ssd_flush(dev); | |
10015 | if (ret) { | |
10016 | hio_warn("%s: ssd_flush: failed\n", dev->name); | |
10017 | ret = -EFAULT; | |
10018 | break; | |
10019 | } | |
10020 | break; | |
10021 | ||
10022 | case SSD_CMD_SAVE_MD: { | |
10023 | int save_md = 0; | |
10024 | ||
10025 | if (copy_from_user(&save_md, argp, sizeof(int))) { | |
10026 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
10027 | ret = -EFAULT; | |
10028 | break; | |
10029 | } | |
10030 | ||
10031 | dev->save_md = !!save_md; | |
10032 | break; | |
10033 | } | |
10034 | ||
10035 | case SSD_CMD_SET_WMODE: { | |
10036 | int new_wmode = 0; | |
10037 | ||
10038 | if (copy_from_user(&new_wmode, argp, sizeof(int))) { | |
10039 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
10040 | ret = -EFAULT; | |
10041 | break; | |
10042 | } | |
10043 | ||
10044 | ret = __ssd_set_wmode(dev, new_wmode); | |
10045 | if (ret) { | |
10046 | break; | |
10047 | } | |
10048 | ||
10049 | break; | |
10050 | } | |
10051 | ||
10052 | case SSD_CMD_GET_WMODE: { | |
10053 | if (copy_to_user(argp, &dev->wmode, sizeof(int))) { | |
10054 | hio_warn("%s: copy_to_user: failed\n", dev->name); | |
10055 | ret = -EFAULT; | |
10056 | break; | |
10057 | } | |
10058 | ||
10059 | break; | |
10060 | } | |
10061 | ||
10062 | case SSD_CMD_GET_USER_WMODE: { | |
10063 | if (copy_to_user(argp, &dev->user_wmode, sizeof(int))) { | |
10064 | hio_warn("%s: copy_to_user: failed\n", dev->name); | |
10065 | ret = -EFAULT; | |
10066 | break; | |
10067 | } | |
10068 | ||
10069 | break; | |
10070 | } | |
10071 | ||
10072 | case SSD_CMD_DEBUG: { | |
10073 | struct ssd_debug_info db_info; | |
10074 | ||
10075 | if (!finject) { | |
10076 | ret = -EOPNOTSUPP; | |
10077 | break; | |
10078 | } | |
10079 | ||
10080 | if (copy_from_user(&db_info, argp, sizeof(struct ssd_debug_info))) { | |
10081 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
10082 | ret = -EFAULT; | |
10083 | break; | |
10084 | } | |
10085 | ||
10086 | if (db_info.type < SSD_DEBUG_NONE || db_info.type >= SSD_DEBUG_NR) { | |
10087 | ret = -EINVAL; | |
10088 | break; | |
10089 | } | |
10090 | ||
10091 | /* IO */ | |
10092 | if (db_info.type >= SSD_DEBUG_READ_ERR && db_info.type <= SSD_DEBUG_RW_ERR && | |
10093 | (db_info.data.loc.off + db_info.data.loc.len) > (dev->hw_info.size >> 9)) { | |
10094 | ret = -EINVAL; | |
10095 | break; | |
10096 | } | |
10097 | ||
10098 | memcpy(&dev->db_info, &db_info, sizeof(struct ssd_debug_info)); | |
10099 | ||
10100 | #ifdef SSD_OT_PROTECT | |
10101 | /* temperature */ | |
10102 | if (db_info.type == SSD_DEBUG_NONE) { | |
10103 | ssd_check_temperature(dev, SSD_OT_TEMP); | |
10104 | } else if (db_info.type == SSD_DEBUG_LOG) { | |
10105 | if (db_info.data.log.event == SSD_LOG_OVER_TEMP) { | |
10106 | dev->ot_delay = SSD_OT_DELAY; | |
10107 | } else if (db_info.data.log.event == SSD_LOG_NORMAL_TEMP) { | |
10108 | dev->ot_delay = 0; | |
10109 | } | |
10110 | } | |
10111 | #endif | |
10112 | ||
10113 | /* offline */ | |
10114 | if (db_info.type == SSD_DEBUG_OFFLINE) { | |
10115 | test_and_clear_bit(SSD_ONLINE, &dev->state); | |
10116 | } else if (db_info.type == SSD_DEBUG_NONE) { | |
10117 | (void)test_and_set_bit(SSD_ONLINE, &dev->state); | |
10118 | } | |
10119 | ||
10120 | /* log */ | |
10121 | if (db_info.type == SSD_DEBUG_LOG && dev->event_call && dev->gd) { | |
10122 | dev->event_call(dev->gd, db_info.data.log.event, 0); | |
10123 | } | |
10124 | ||
10125 | break; | |
10126 | } | |
10127 | ||
10128 | case SSD_CMD_DRV_PARAM_INFO: { | |
10129 | struct ssd_drv_param_info drv_param; | |
10130 | ||
10131 | memset(&drv_param, 0, sizeof(struct ssd_drv_param_info)); | |
10132 | ||
10133 | drv_param.mode = mode; | |
10134 | drv_param.status_mask = status_mask; | |
10135 | drv_param.int_mode = int_mode; | |
10136 | drv_param.threaded_irq = threaded_irq; | |
10137 | drv_param.log_level = log_level; | |
10138 | drv_param.wmode = wmode; | |
10139 | drv_param.ot_protect = ot_protect; | |
10140 | drv_param.finject = finject; | |
10141 | ||
10142 | if (copy_to_user(argp, &drv_param, sizeof(struct ssd_drv_param_info))) { | |
10143 | hio_warn("%s: copy_to_user: failed\n", dev->name); | |
10144 | ret = -EFAULT; | |
10145 | break; | |
10146 | } | |
10147 | break; | |
10148 | } | |
10149 | ||
10150 | default: | |
10151 | ret = -EINVAL; | |
10152 | break; | |
10153 | } | |
10154 | ||
10155 | return ret; | |
10156 | } | |
10157 | ||
10158 | ||
10159 | #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,27)) | |
10160 | static int ssd_block_ioctl(struct inode *inode, struct file *file, | |
10161 | unsigned int cmd, unsigned long arg) | |
10162 | { | |
10163 | struct ssd_device *dev; | |
10164 | void __user *argp = (void __user *)arg; | |
10165 | int ret = 0; | |
10166 | ||
10167 | if (!inode) { | |
10168 | return -EINVAL; | |
10169 | } | |
10170 | dev = inode->i_bdev->bd_disk->private_data; | |
10171 | if (!dev) { | |
10172 | return -EINVAL; | |
10173 | } | |
10174 | #else | |
10175 | static int ssd_block_ioctl(struct block_device *bdev, fmode_t mode, | |
10176 | unsigned int cmd, unsigned long arg) | |
10177 | { | |
10178 | struct ssd_device *dev; | |
10179 | void __user *argp = (void __user *)arg; | |
10180 | int ret = 0; | |
10181 | ||
10182 | if (!bdev) { | |
10183 | return -EINVAL; | |
10184 | } | |
10185 | ||
10186 | dev = bdev->bd_disk->private_data; | |
10187 | if (!dev) { | |
10188 | return -EINVAL; | |
10189 | } | |
10190 | #endif | |
10191 | ||
10192 | switch (cmd) { | |
10193 | case HDIO_GETGEO: { | |
10194 | struct hd_geometry geo; | |
10195 | geo.cylinders = (dev->hw_info.size & ~0x3f) >> 6; | |
10196 | geo.heads = 4; | |
10197 | geo.sectors = 16; | |
10198 | #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,27)) | |
10199 | geo.start = get_start_sect(inode->i_bdev); | |
10200 | #else | |
10201 | geo.start = get_start_sect(bdev); | |
10202 | #endif | |
10203 | if (copy_to_user(argp, &geo, sizeof(geo))) { | |
10204 | ret = -EFAULT; | |
10205 | break; | |
10206 | } | |
10207 | ||
10208 | break; | |
10209 | } | |
10210 | ||
10211 | case BLKFLSBUF: | |
10212 | ret = ssd_flush(dev); | |
10213 | if (ret) { | |
10214 | hio_warn("%s: ssd_flush: failed\n", dev->name); | |
10215 | ret = -EFAULT; | |
10216 | break; | |
10217 | } | |
10218 | break; | |
10219 | ||
10220 | default: | |
10221 | if (!dev->slave) { | |
10222 | ret = ssd_ioctl_common(dev, cmd, arg); | |
10223 | } else { | |
10224 | ret = -EFAULT; | |
10225 | } | |
10226 | break; | |
10227 | } | |
10228 | ||
10229 | return ret; | |
10230 | } | |
10231 | ||
10232 | ||
10233 | static void ssd_free_dev(struct kref *kref) | |
10234 | { | |
10235 | struct ssd_device *dev; | |
10236 | ||
10237 | if (!kref) { | |
10238 | return; | |
10239 | } | |
10240 | ||
10241 | dev = container_of(kref, struct ssd_device, kref); | |
10242 | ||
10243 | put_disk(dev->gd); | |
10244 | ||
10245 | ssd_put_index(dev->slave, dev->idx); | |
10246 | ||
10247 | kfree(dev); | |
10248 | } | |
10249 | ||
10250 | static void ssd_put(struct ssd_device *dev) | |
10251 | { | |
10252 | kref_put(&dev->kref, ssd_free_dev); | |
10253 | } | |
10254 | ||
10255 | static int ssd_get(struct ssd_device *dev) | |
10256 | { | |
10257 | kref_get(&dev->kref); | |
10258 | return 0; | |
10259 | } | |
10260 | ||
10261 | /* block device */ | |
10262 | #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,27)) | |
10263 | static int ssd_block_open(struct inode *inode, struct file *filp) | |
10264 | { | |
10265 | struct ssd_device *dev; | |
10266 | ||
10267 | if (!inode) { | |
10268 | return -EINVAL; | |
10269 | } | |
10270 | ||
10271 | dev = inode->i_bdev->bd_disk->private_data; | |
10272 | if (!dev) { | |
10273 | return -EINVAL; | |
10274 | } | |
10275 | #else | |
10276 | static int ssd_block_open(struct block_device *bdev, fmode_t mode) | |
10277 | { | |
10278 | struct ssd_device *dev; | |
10279 | ||
10280 | if (!bdev) { | |
10281 | return -EINVAL; | |
10282 | } | |
10283 | ||
10284 | dev = bdev->bd_disk->private_data; | |
10285 | if (!dev) { | |
10286 | return -EINVAL; | |
10287 | } | |
10288 | #endif | |
10289 | ||
10290 | /*if (!try_module_get(dev->owner)) | |
10291 | return -ENODEV; | |
10292 | */ | |
10293 | ||
10294 | ssd_get(dev); | |
10295 | ||
10296 | atomic_inc(&dev->refcnt); | |
10297 | ||
10298 | return 0; | |
10299 | } | |
10300 | ||
10301 | #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,27)) | |
10302 | static int ssd_block_release(struct inode *inode, struct file *filp) | |
10303 | { | |
10304 | struct ssd_device *dev; | |
10305 | ||
10306 | if (!inode) { | |
10307 | return -EINVAL; | |
10308 | } | |
10309 | ||
10310 | dev = inode->i_bdev->bd_disk->private_data; | |
10311 | if (!dev) { | |
10312 | return -EINVAL; | |
10313 | } | |
10314 | #elif (LINUX_VERSION_CODE <= KERNEL_VERSION(3,9,0)) | |
10315 | static int ssd_block_release(struct gendisk *disk, fmode_t mode) | |
10316 | { | |
10317 | struct ssd_device *dev; | |
10318 | ||
10319 | if (!disk) { | |
10320 | return -EINVAL; | |
10321 | } | |
10322 | ||
10323 | dev = disk->private_data; | |
10324 | if (!dev) { | |
10325 | return -EINVAL; | |
10326 | } | |
10327 | #else | |
10328 | static void ssd_block_release(struct gendisk *disk, fmode_t mode) | |
10329 | { | |
10330 | struct ssd_device *dev; | |
10331 | ||
10332 | if (!disk) { | |
10333 | return; | |
10334 | } | |
10335 | ||
10336 | dev = disk->private_data; | |
10337 | if (!dev) { | |
10338 | return; | |
10339 | } | |
10340 | #endif | |
10341 | ||
10342 | atomic_dec(&dev->refcnt); | |
10343 | ||
10344 | ssd_put(dev); | |
10345 | ||
10346 | //module_put(dev->owner); | |
10347 | #if (LINUX_VERSION_CODE <= KERNEL_VERSION(3,9,0)) | |
10348 | return 0; | |
10349 | #endif | |
10350 | } | |
10351 | ||
10352 | static struct block_device_operations ssd_fops = { | |
10353 | .owner = THIS_MODULE, | |
10354 | .open = ssd_block_open, | |
10355 | .release = ssd_block_release, | |
10356 | .ioctl = ssd_block_ioctl, | |
10357 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)) | |
10358 | .getgeo = ssd_block_getgeo, | |
10359 | #endif | |
10360 | }; | |
10361 | ||
10362 | static void ssd_init_trim(ssd_device_t *dev) | |
10363 | { | |
10364 | #if (defined SSD_TRIM && (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32))) | |
10365 | if (dev->protocol_info.ver <= SSD_PROTOCOL_V3) { | |
10366 | return; | |
10367 | } | |
65a7cac1 TLSC |
10368 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,17,0)) |
10369 | blk_queue_flag_set(QUEUE_FLAG_DISCARD, dev->rq); | |
10370 | #else | |
361ebed5 | 10371 | queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, dev->rq); |
65a7cac1 | 10372 | #endif |
361ebed5 HSDT |
10373 | |
10374 | #if ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,33)) || (defined RHEL_MAJOR && RHEL_MAJOR >= 6)) | |
b44043bd | 10375 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(4,12,0)) |
361ebed5 | 10376 | dev->rq->limits.discard_zeroes_data = 1; |
b44043bd | 10377 | #endif |
361ebed5 HSDT |
10378 | dev->rq->limits.discard_alignment = 4096; |
10379 | dev->rq->limits.discard_granularity = 4096; | |
10380 | #endif | |
10381 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_2_4) { | |
10382 | dev->rq->limits.max_discard_sectors = dev->hw_info.sg_max_sec; | |
10383 | } else { | |
10384 | dev->rq->limits.max_discard_sectors = (dev->hw_info.sg_max_sec) * (dev->hw_info.cmd_max_sg); | |
10385 | } | |
10386 | #endif | |
10387 | } | |
10388 | ||
10389 | static void ssd_cleanup_queue(struct ssd_device *dev) | |
10390 | { | |
10391 | ssd_wait_io(dev); | |
10392 | ||
10393 | blk_cleanup_queue(dev->rq); | |
10394 | dev->rq = NULL; | |
10395 | } | |
10396 | ||
10397 | static int ssd_init_queue(struct ssd_device *dev) | |
10398 | { | |
10399 | dev->rq = blk_alloc_queue(GFP_KERNEL); | |
10400 | if (dev->rq == NULL) { | |
10401 | hio_warn("%s: alloc queue: failed\n ", dev->name); | |
10402 | goto out_init_queue; | |
10403 | } | |
10404 | ||
10405 | /* must be first */ | |
10406 | blk_queue_make_request(dev->rq, ssd_make_request); | |
10407 | ||
10408 | #if ((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,34)) && !(defined RHEL_MAJOR && RHEL_MAJOR == 6)) | |
10409 | blk_queue_max_hw_segments(dev->rq, dev->hw_info.cmd_max_sg); | |
10410 | blk_queue_max_phys_segments(dev->rq, dev->hw_info.cmd_max_sg); | |
10411 | blk_queue_max_sectors(dev->rq, dev->hw_info.sg_max_sec); | |
10412 | #else | |
10413 | blk_queue_max_segments(dev->rq, dev->hw_info.cmd_max_sg); | |
10414 | blk_queue_max_hw_sectors(dev->rq, dev->hw_info.sg_max_sec); | |
10415 | #endif | |
10416 | ||
10417 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31)) | |
10418 | blk_queue_hardsect_size(dev->rq, 512); | |
10419 | #else | |
10420 | blk_queue_logical_block_size(dev->rq, 512); | |
10421 | #endif | |
10422 | /* not work for make_request based drivers(bio) */ | |
10423 | blk_queue_max_segment_size(dev->rq, dev->hw_info.sg_max_sec << 9); | |
10424 | ||
10425 | blk_queue_bounce_limit(dev->rq, BLK_BOUNCE_HIGH); | |
10426 | ||
10427 | dev->rq->queuedata = dev; | |
10428 | ||
10429 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)) | |
10430 | blk_queue_issue_flush_fn(dev->rq, ssd_issue_flush_fn); | |
10431 | #endif | |
10432 | ||
10433 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28)) | |
65a7cac1 TLSC |
10434 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,17,0)) |
10435 | blk_queue_flag_set(QUEUE_FLAG_NONROT, dev->rq); | |
10436 | #else | |
361ebed5 | 10437 | queue_flag_set_unlocked(QUEUE_FLAG_NONROT, dev->rq); |
65a7cac1 | 10438 | #endif |
361ebed5 HSDT |
10439 | #endif |
10440 | ||
10441 | ssd_init_trim(dev); | |
10442 | ||
10443 | return 0; | |
10444 | ||
10445 | out_init_queue: | |
10446 | return -ENOMEM; | |
10447 | } | |
10448 | ||
10449 | static void ssd_cleanup_blkdev(struct ssd_device *dev) | |
10450 | { | |
10451 | del_gendisk(dev->gd); | |
10452 | } | |
10453 | ||
10454 | static int ssd_init_blkdev(struct ssd_device *dev) | |
10455 | { | |
10456 | if (dev->gd) { | |
10457 | put_disk(dev->gd); | |
10458 | } | |
10459 | ||
10460 | dev->gd = alloc_disk(ssd_minors); | |
10461 | if (!dev->gd) { | |
10462 | hio_warn("%s: alloc_disk fail\n", dev->name); | |
10463 | goto out_alloc_gd; | |
10464 | } | |
10465 | dev->gd->major = dev->major; | |
10466 | dev->gd->first_minor = dev->idx * ssd_minors; | |
10467 | dev->gd->fops = &ssd_fops; | |
10468 | dev->gd->queue = dev->rq; | |
10469 | dev->gd->private_data = dev; | |
1197134c | 10470 | |
361ebed5 HSDT |
10471 | snprintf (dev->gd->disk_name, sizeof(dev->gd->disk_name), "%s", dev->name); |
10472 | ||
10473 | set_capacity(dev->gd, dev->hw_info.size >> 9); | |
10474 | ||
1197134c | 10475 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,8,0)) |
5e004b00 | 10476 | device_add_disk(&dev->pdev->dev, dev->gd); |
1197134c KM |
10477 | #else |
10478 | dev->gd->driverfs_dev = &dev->pdev->dev; | |
10479 | add_disk(dev->gd); | |
5e004b00 | 10480 | #endif |
361ebed5 HSDT |
10481 | |
10482 | return 0; | |
10483 | ||
10484 | out_alloc_gd: | |
10485 | return -ENOMEM; | |
10486 | } | |
10487 | ||
10488 | #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,10)) | |
10489 | static int ssd_ioctl(struct inode *inode, struct file *file, | |
10490 | unsigned int cmd, unsigned long arg) | |
10491 | #else | |
10492 | static long ssd_ioctl(struct file *file, | |
10493 | unsigned int cmd, unsigned long arg) | |
10494 | #endif | |
10495 | { | |
10496 | struct ssd_device *dev; | |
10497 | ||
10498 | if (!file) { | |
10499 | return -EINVAL; | |
10500 | } | |
10501 | ||
10502 | dev = file->private_data; | |
10503 | if (!dev) { | |
10504 | return -EINVAL; | |
10505 | } | |
10506 | ||
10507 | return (long)ssd_ioctl_common(dev, cmd, arg); | |
10508 | } | |
10509 | ||
10510 | static int ssd_open(struct inode *inode, struct file *file) | |
10511 | { | |
10512 | struct ssd_device *dev = NULL; | |
10513 | struct ssd_device *n = NULL; | |
10514 | int idx; | |
10515 | int ret = -ENODEV; | |
10516 | ||
10517 | if (!inode || !file) { | |
10518 | return -EINVAL; | |
10519 | } | |
10520 | ||
10521 | idx = iminor(inode); | |
10522 | ||
10523 | list_for_each_entry_safe(dev, n, &ssd_list, list) { | |
10524 | if (dev->idx == idx) { | |
10525 | ret = 0; | |
10526 | break; | |
10527 | } | |
10528 | } | |
10529 | ||
10530 | if (ret) { | |
10531 | return ret; | |
10532 | } | |
10533 | ||
10534 | file->private_data = dev; | |
10535 | ||
10536 | ssd_get(dev); | |
10537 | ||
10538 | return 0; | |
10539 | } | |
10540 | ||
10541 | static int ssd_release(struct inode *inode, struct file *file) | |
10542 | { | |
10543 | struct ssd_device *dev; | |
10544 | ||
10545 | if (!file) { | |
10546 | return -EINVAL; | |
10547 | } | |
10548 | ||
10549 | dev = file->private_data; | |
10550 | if (!dev) { | |
10551 | return -EINVAL; | |
10552 | } | |
10553 | ||
10554 | ssd_put(dev); | |
10555 | ||
10556 | file->private_data = NULL; | |
10557 | ||
10558 | return 0; | |
10559 | } | |
10560 | ||
1197134c KM |
10561 | static int ssd_reload_ssd_ptr(struct ssd_device *dev) |
10562 | { | |
10563 | ssd_reset_resp_ptr(dev); | |
10564 | ||
10565 | //update base reg address | |
10566 | if (dev->protocol_info.ver >= SSD_PROTOCOL_V3) { | |
10567 | ||
10568 | ssd_reg_write(dev->ctrlp + SSD_MSG_BASE_REG, dev->msg_base_dma); | |
10569 | } | |
10570 | ||
10571 | //update response base reg address | |
10572 | ssd_reg_write(dev->ctrlp + SSD_RESP_FIFO_REG, dev->resp_msg_base_dma); | |
10573 | ssd_reg_write(dev->ctrlp + SSD_RESP_PTR_REG, dev->resp_ptr_base_dma); | |
10574 | ||
10575 | return 0; | |
10576 | } | |
10577 | ||
361ebed5 HSDT |
10578 | static struct file_operations ssd_cfops = { |
10579 | .owner = THIS_MODULE, | |
10580 | .open = ssd_open, | |
10581 | .release = ssd_release, | |
10582 | #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,10)) | |
10583 | .ioctl = ssd_ioctl, | |
10584 | #else | |
10585 | .unlocked_ioctl = ssd_ioctl, | |
10586 | #endif | |
10587 | }; | |
10588 | ||
10589 | static void ssd_cleanup_chardev(struct ssd_device *dev) | |
10590 | { | |
10591 | if (dev->slave) { | |
10592 | return; | |
10593 | } | |
10594 | ||
10595 | #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,12)) | |
10596 | class_simple_device_remove(MKDEV((dev_t)dev->cmajor, (dev_t)dev->idx)); | |
10597 | devfs_remove("c%s", dev->name); | |
10598 | #elif (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,14)) | |
10599 | class_device_destroy(ssd_class, MKDEV((dev_t)dev->cmajor, (dev_t)dev->idx)); | |
10600 | devfs_remove("c%s", dev->name); | |
10601 | #elif (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,17)) | |
10602 | class_device_destroy(ssd_class, MKDEV((dev_t)dev->cmajor, (dev_t)dev->idx)); | |
10603 | devfs_remove("c%s", dev->name); | |
10604 | #elif (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,24)) | |
10605 | class_device_destroy(ssd_class, MKDEV((dev_t)dev->cmajor, (dev_t)dev->idx)); | |
10606 | #else | |
10607 | device_destroy(ssd_class, MKDEV((dev_t)dev->cmajor, (dev_t)dev->idx)); | |
10608 | #endif | |
10609 | } | |
10610 | ||
10611 | static int ssd_init_chardev(struct ssd_device *dev) | |
10612 | { | |
10613 | int ret = 0; | |
10614 | ||
10615 | if (dev->slave) { | |
10616 | return 0; | |
10617 | } | |
10618 | ||
10619 | #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,12)) | |
10620 | ret = devfs_mk_cdev(MKDEV((dev_t)dev->cmajor, (dev_t)dev->idx), S_IFCHR|S_IRUSR|S_IWUSR, "c%s", dev->name); | |
10621 | if (ret) { | |
10622 | goto out; | |
10623 | } | |
10624 | class_simple_device_add(ssd_class, MKDEV((dev_t)dev->cmajor, (dev_t)dev->idx), NULL, "c%s", dev->name); | |
10625 | out: | |
10626 | #elif (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,14)) | |
10627 | ret = devfs_mk_cdev(MKDEV((dev_t)dev->cmajor, (dev_t)dev->idx), S_IFCHR|S_IRUSR|S_IWUSR, "c%s", dev->name); | |
10628 | if (ret) { | |
10629 | goto out; | |
10630 | } | |
10631 | class_device_create(ssd_class, MKDEV((dev_t)dev->cmajor, (dev_t)dev->idx), NULL, "c%s", dev->name); | |
10632 | out: | |
10633 | #elif (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,17)) | |
10634 | ret = devfs_mk_cdev(MKDEV((dev_t)dev->cmajor, (dev_t)dev->idx), S_IFCHR|S_IRUSR|S_IWUSR, "c%s", dev->name); | |
10635 | if (ret) { | |
10636 | goto out; | |
10637 | } | |
10638 | class_device_create(ssd_class, NULL, MKDEV((dev_t)dev->cmajor, (dev_t)dev->idx), NULL, "c%s", dev->name); | |
10639 | out: | |
10640 | #elif (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,24)) | |
10641 | class_device_create(ssd_class, NULL, MKDEV((dev_t)dev->cmajor, (dev_t)dev->idx), NULL, "c%s", dev->name); | |
10642 | #elif (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,26)) | |
10643 | device_create(ssd_class, NULL, MKDEV((dev_t)dev->cmajor, (dev_t)dev->idx), "c%s", dev->name); | |
10644 | #elif (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,27)) | |
10645 | device_create_drvdata(ssd_class, NULL, MKDEV((dev_t)dev->cmajor, (dev_t)dev->idx), NULL, "c%s", dev->name); | |
10646 | #else | |
10647 | device_create(ssd_class, NULL, MKDEV((dev_t)dev->cmajor, (dev_t)dev->idx), NULL, "c%s", dev->name); | |
10648 | #endif | |
10649 | ||
10650 | return ret; | |
10651 | } | |
10652 | ||
10653 | static int ssd_check_hw(struct ssd_device *dev) | |
10654 | { | |
10655 | uint32_t test_data = 0x55AA5AA5; | |
10656 | uint32_t read_data; | |
10657 | ||
10658 | ssd_reg32_write(dev->ctrlp + SSD_BRIDGE_TEST_REG, test_data); | |
10659 | read_data = ssd_reg32_read(dev->ctrlp + SSD_BRIDGE_TEST_REG); | |
10660 | if (read_data != ~(test_data)) { | |
10661 | //hio_warn("%s: check bridge error: %#x\n", dev->name, read_data); | |
10662 | return -1; | |
10663 | } | |
10664 | ||
10665 | return 0; | |
10666 | } | |
10667 | ||
10668 | static int ssd_check_fw(struct ssd_device *dev) | |
10669 | { | |
10670 | uint32_t val = 0; | |
10671 | int i; | |
10672 | ||
10673 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_1_3) { | |
10674 | return 0; | |
10675 | } | |
10676 | ||
10677 | for (i=0; i<SSD_CONTROLLER_WAIT; i++) { | |
10678 | val = ssd_reg32_read(dev->ctrlp + SSD_HW_STATUS_REG); | |
10679 | if ((val & 0x1) && ((val >> 8) & 0x1)) { | |
10680 | break; | |
10681 | } | |
10682 | ||
10683 | msleep(SSD_INIT_WAIT); | |
10684 | } | |
10685 | ||
10686 | if (!(val & 0x1)) { | |
10687 | /* controller fw status */ | |
10688 | hio_warn("%s: controller firmware load failed: %#x\n", dev->name, val); | |
10689 | return -1; | |
10690 | } else if (!((val >> 8) & 0x1)) { | |
10691 | /* controller state */ | |
10692 | hio_warn("%s: controller state error: %#x\n", dev->name, val); | |
10693 | return -1; | |
10694 | } | |
10695 | ||
10696 | val = ssd_reg32_read(dev->ctrlp + SSD_RELOAD_FW_REG); | |
10697 | if (val) { | |
10698 | dev->reload_fw = 1; | |
10699 | } | |
10700 | ||
10701 | return 0; | |
10702 | } | |
10703 | ||
10704 | static int ssd_init_fw_info(struct ssd_device *dev) | |
10705 | { | |
10706 | uint32_t val; | |
10707 | int ret = 0; | |
10708 | ||
10709 | val = ssd_reg32_read(dev->ctrlp + SSD_BRIDGE_VER_REG); | |
10710 | dev->hw_info.bridge_ver = val & 0xFFF; | |
10711 | if (dev->hw_info.bridge_ver < SSD_FW_MIN) { | |
10712 | hio_warn("%s: bridge firmware version %03X is not supported\n", dev->name, dev->hw_info.bridge_ver); | |
10713 | return -EINVAL; | |
10714 | } | |
10715 | hio_info("%s: bridge firmware version: %03X\n", dev->name, dev->hw_info.bridge_ver); | |
10716 | ||
10717 | ret = ssd_check_fw(dev); | |
10718 | if (ret) { | |
10719 | goto out; | |
10720 | } | |
10721 | ||
10722 | out: | |
10723 | /* skip error if not in standard mode */ | |
10724 | if (mode != SSD_DRV_MODE_STANDARD) { | |
10725 | ret = 0; | |
10726 | } | |
10727 | return ret; | |
10728 | } | |
10729 | ||
10730 | static int ssd_check_clock(struct ssd_device *dev) | |
10731 | { | |
10732 | uint32_t val; | |
10733 | int ret = 0; | |
10734 | ||
10735 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_1_3) { | |
10736 | return 0; | |
10737 | } | |
10738 | ||
10739 | val = ssd_reg32_read(dev->ctrlp + SSD_HW_STATUS_REG); | |
10740 | ||
10741 | /* clock status */ | |
10742 | if (!((val >> 4 ) & 0x1)) { | |
10743 | if (!test_and_set_bit(SSD_HWMON_CLOCK(SSD_CLOCK_166M_LOST), &dev->hwmon)) { | |
10744 | hio_warn("%s: 166MHz clock losed: %#x\n", dev->name, val); | |
10745 | ssd_gen_swlog(dev, SSD_LOG_CLK_FAULT, val); | |
10746 | } | |
10747 | ret = -1; | |
10748 | } | |
10749 | ||
10750 | if (dev->protocol_info.ver >= SSD_PROTOCOL_V3_2) { | |
10751 | if (!((val >> 5 ) & 0x1)) { | |
10752 | if (!test_and_set_bit(SSD_HWMON_CLOCK(SSD_CLOCK_166M_SKEW), &dev->hwmon)) { | |
10753 | hio_warn("%s: 166MHz clock is skew: %#x\n", dev->name, val); | |
10754 | ssd_gen_swlog(dev, SSD_LOG_CLK_FAULT, val); | |
10755 | } | |
10756 | ret = -1; | |
10757 | } | |
10758 | if (!((val >> 6 ) & 0x1)) { | |
10759 | if (!test_and_set_bit(SSD_HWMON_CLOCK(SSD_CLOCK_156M_LOST), &dev->hwmon)) { | |
10760 | hio_warn("%s: 156.25MHz clock lost: %#x\n", dev->name, val); | |
10761 | ssd_gen_swlog(dev, SSD_LOG_CLK_FAULT, val); | |
10762 | } | |
10763 | ret = -1; | |
10764 | } | |
10765 | if (!((val >> 7 ) & 0x1)) { | |
10766 | if (!test_and_set_bit(SSD_HWMON_CLOCK(SSD_CLOCK_156M_SKEW), &dev->hwmon)) { | |
10767 | hio_warn("%s: 156.25MHz clock is skew: %#x\n", dev->name, val); | |
10768 | ssd_gen_swlog(dev, SSD_LOG_CLK_FAULT, val); | |
10769 | } | |
10770 | ret = -1; | |
10771 | } | |
10772 | } | |
10773 | ||
10774 | return ret; | |
10775 | } | |
10776 | ||
10777 | static int ssd_check_volt(struct ssd_device *dev) | |
10778 | { | |
10779 | int i = 0; | |
10780 | uint64_t val; | |
10781 | uint32_t adc_val; | |
10782 | int ret =0; | |
10783 | ||
10784 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_2) { | |
10785 | return 0; | |
10786 | } | |
10787 | ||
10788 | for (i=0; i<dev->hw_info.nr_ctrl; i++) { | |
10789 | /* 1.0v */ | |
10790 | if (!test_bit(SSD_HWMON_FPGA(i, SSD_FPGA_1V0), &dev->hwmon)) { | |
10791 | val = ssd_reg_read(dev->ctrlp + SSD_FPGA_1V0_REG0 + i * SSD_CTRL_REG_ZONE_SZ); | |
10792 | adc_val = SSD_FPGA_VOLT_MAX(val); | |
10793 | if (adc_val < SSD_FPGA_1V0_ADC_MIN || adc_val > SSD_FPGA_1V0_ADC_MAX) { | |
10794 | (void)test_and_set_bit(SSD_HWMON_FPGA(i, SSD_FPGA_1V0), &dev->hwmon); | |
10795 | hio_warn("%s: controller %d 1.0V fault: %d mV.\n", dev->name, i, SSD_FPGA_VOLT(adc_val)); | |
10796 | ssd_gen_swlog(dev, SSD_LOG_VOLT_FAULT, SSD_VOLT_LOG_DATA(SSD_FPGA_1V0, i, adc_val)); | |
10797 | ret = -1; | |
10798 | } | |
10799 | ||
10800 | adc_val = SSD_FPGA_VOLT_MIN(val); | |
10801 | if (adc_val < SSD_FPGA_1V0_ADC_MIN || adc_val > SSD_FPGA_1V0_ADC_MAX) { | |
10802 | (void)test_and_set_bit(SSD_HWMON_FPGA(i, SSD_FPGA_1V0), &dev->hwmon); | |
10803 | hio_warn("%s: controller %d 1.0V fault: %d mV.\n", dev->name, i, SSD_FPGA_VOLT(adc_val)); | |
10804 | ssd_gen_swlog(dev, SSD_LOG_VOLT_FAULT, SSD_VOLT_LOG_DATA(SSD_FPGA_1V0, i, adc_val)); | |
10805 | ret = -2; | |
10806 | } | |
10807 | } | |
10808 | ||
10809 | /* 1.8v */ | |
10810 | if (!test_bit(SSD_HWMON_FPGA(i, SSD_FPGA_1V8), &dev->hwmon)) { | |
10811 | val = ssd_reg_read(dev->ctrlp + SSD_FPGA_1V8_REG0 + i * SSD_CTRL_REG_ZONE_SZ); | |
10812 | adc_val = SSD_FPGA_VOLT_MAX(val); | |
10813 | if (adc_val < SSD_FPGA_1V8_ADC_MIN || adc_val > SSD_FPGA_1V8_ADC_MAX) { | |
10814 | (void)test_and_set_bit(SSD_HWMON_FPGA(i, SSD_FPGA_1V8), &dev->hwmon); | |
10815 | hio_warn("%s: controller %d 1.8V fault: %d mV.\n", dev->name, i, SSD_FPGA_VOLT(adc_val)); | |
10816 | ssd_gen_swlog(dev, SSD_LOG_VOLT_FAULT, SSD_VOLT_LOG_DATA(SSD_FPGA_1V8, i, adc_val)); | |
10817 | ret = -3; | |
10818 | } | |
10819 | ||
10820 | adc_val = SSD_FPGA_VOLT_MIN(val); | |
10821 | if (adc_val < SSD_FPGA_1V8_ADC_MIN || adc_val > SSD_FPGA_1V8_ADC_MAX) { | |
10822 | (void)test_and_set_bit(SSD_HWMON_FPGA(i, SSD_FPGA_1V8), &dev->hwmon); | |
10823 | hio_warn("%s: controller %d 1.8V fault: %d mV.\n", dev->name, i, SSD_FPGA_VOLT(adc_val)); | |
10824 | ssd_gen_swlog(dev, SSD_LOG_VOLT_FAULT, SSD_VOLT_LOG_DATA(SSD_FPGA_1V8, i, adc_val)); | |
10825 | ret = -4; | |
10826 | } | |
10827 | } | |
10828 | } | |
10829 | ||
10830 | return ret; | |
10831 | } | |
10832 | ||
10833 | static int ssd_check_reset_sync(struct ssd_device *dev) | |
10834 | { | |
10835 | uint32_t val; | |
10836 | ||
10837 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_1_3) { | |
10838 | return 0; | |
10839 | } | |
10840 | ||
10841 | val = ssd_reg32_read(dev->ctrlp + SSD_HW_STATUS_REG); | |
10842 | if (!((val >> 8) & 0x1)) { | |
10843 | /* controller state */ | |
10844 | hio_warn("%s: controller state error: %#x\n", dev->name, val); | |
10845 | return -1; | |
10846 | } | |
10847 | ||
10848 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_2) { | |
10849 | return 0; | |
10850 | } | |
10851 | ||
10852 | if (((val >> 9 ) & 0x1)) { | |
10853 | hio_warn("%s: controller reset asynchronously: %#x\n", dev->name, val); | |
10854 | ssd_gen_swlog(dev, SSD_LOG_CTRL_RST_SYNC, val); | |
10855 | return -1; | |
10856 | } | |
10857 | ||
10858 | return 0; | |
10859 | } | |
10860 | ||
10861 | static int ssd_check_hw_bh(struct ssd_device *dev) | |
10862 | { | |
10863 | int ret; | |
10864 | ||
10865 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_1_3) { | |
10866 | return 0; | |
10867 | } | |
10868 | ||
10869 | /* clock status */ | |
10870 | ret = ssd_check_clock(dev); | |
10871 | if (ret) { | |
10872 | goto out; | |
10873 | } | |
10874 | ||
10875 | out: | |
10876 | /* skip error if not in standard mode */ | |
10877 | if (mode != SSD_DRV_MODE_STANDARD) { | |
10878 | ret = 0; | |
10879 | } | |
10880 | return ret; | |
10881 | } | |
10882 | ||
10883 | static int ssd_check_controller(struct ssd_device *dev) | |
10884 | { | |
10885 | int ret; | |
10886 | ||
10887 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_1_3) { | |
10888 | return 0; | |
10889 | } | |
10890 | ||
10891 | /* sync reset */ | |
10892 | ret = ssd_check_reset_sync(dev); | |
10893 | if (ret) { | |
10894 | goto out; | |
10895 | } | |
10896 | ||
10897 | out: | |
10898 | /* skip error if not in standard mode */ | |
10899 | if (mode != SSD_DRV_MODE_STANDARD) { | |
10900 | ret = 0; | |
10901 | } | |
10902 | return ret; | |
10903 | } | |
10904 | ||
10905 | static int ssd_check_controller_bh(struct ssd_device *dev) | |
10906 | { | |
10907 | uint32_t test_data = 0x55AA5AA5; | |
10908 | uint32_t val; | |
10909 | int reg_base, reg_sz; | |
10910 | int init_wait = 0; | |
10911 | int i; | |
10912 | int ret = 0; | |
10913 | ||
10914 | if (mode != SSD_DRV_MODE_STANDARD) { | |
10915 | return 0; | |
10916 | } | |
10917 | ||
10918 | /* controller */ | |
10919 | val = ssd_reg32_read(dev->ctrlp + SSD_READY_REG); | |
10920 | if (val & 0x1) { | |
10921 | hio_warn("%s: controller 0 not ready\n", dev->name); | |
10922 | return -1; | |
10923 | } | |
10924 | ||
10925 | for (i=0; i<dev->hw_info.nr_ctrl; i++) { | |
10926 | reg_base = SSD_CTRL_TEST_REG0 + i * SSD_CTRL_TEST_REG_SZ; | |
10927 | ssd_reg32_write(dev->ctrlp + reg_base, test_data); | |
10928 | val = ssd_reg32_read(dev->ctrlp + reg_base); | |
10929 | if (val != ~(test_data)) { | |
10930 | hio_warn("%s: check controller %d error: %#x\n", dev->name, i, val); | |
10931 | return -1; | |
10932 | } | |
10933 | } | |
10934 | ||
10935 | /* clock */ | |
10936 | ret = ssd_check_volt(dev); | |
10937 | if (ret) { | |
10938 | return ret; | |
10939 | } | |
10940 | ||
10941 | /* ddr */ | |
10942 | if (dev->protocol_info.ver > SSD_PROTOCOL_V3) { | |
10943 | reg_base = SSD_PV3_RAM_STATUS_REG0; | |
10944 | reg_sz = SSD_PV3_RAM_STATUS_REG_SZ; | |
10945 | ||
10946 | for (i=0; i<dev->hw_info.nr_ctrl; i++) { | |
10947 | check_ram_status: | |
10948 | val = ssd_reg32_read(dev->ctrlp + reg_base); | |
10949 | ||
10950 | if (!((val >> 1) & 0x1)) { | |
10951 | init_wait++; | |
10952 | if (init_wait <= SSD_RAM_INIT_MAX_WAIT) { | |
10953 | msleep(SSD_INIT_WAIT); | |
10954 | goto check_ram_status; | |
10955 | } else { | |
10956 | hio_warn("%s: controller %d ram init failed: %#x\n", dev->name, i, val); | |
10957 | ssd_gen_swlog(dev, SSD_LOG_DDR_INIT_ERR, i); | |
10958 | return -1; | |
10959 | } | |
10960 | } | |
10961 | ||
10962 | reg_base += reg_sz; | |
10963 | } | |
10964 | } | |
10965 | ||
10966 | /* ch info */ | |
10967 | for (i=0; i<SSD_CH_INFO_MAX_WAIT; i++) { | |
10968 | val = ssd_reg32_read(dev->ctrlp + SSD_CH_INFO_REG); | |
10969 | if (!((val >> 31) & 0x1)) { | |
10970 | break; | |
10971 | } | |
10972 | ||
10973 | msleep(SSD_INIT_WAIT); | |
10974 | } | |
10975 | if ((val >> 31) & 0x1) { | |
10976 | hio_warn("%s: channel info init failed: %#x\n", dev->name, val); | |
10977 | return -1; | |
10978 | } | |
10979 | ||
10980 | return 0; | |
10981 | } | |
10982 | ||
10983 | static int ssd_init_protocol_info(struct ssd_device *dev) | |
10984 | { | |
10985 | uint32_t val; | |
10986 | ||
10987 | val = ssd_reg32_read(dev->ctrlp + SSD_PROTOCOL_VER_REG); | |
10988 | if (val == (uint32_t)-1) { | |
10989 | hio_warn("%s: protocol version error: %#x\n", dev->name, val); | |
10990 | return -EINVAL; | |
10991 | } | |
10992 | dev->protocol_info.ver = val; | |
10993 | ||
10994 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3) { | |
10995 | dev->protocol_info.init_state_reg = SSD_INIT_STATE_REG0; | |
10996 | dev->protocol_info.init_state_reg_sz = SSD_INIT_STATE_REG_SZ; | |
10997 | ||
10998 | dev->protocol_info.chip_info_reg = SSD_CHIP_INFO_REG0; | |
10999 | dev->protocol_info.chip_info_reg_sz = SSD_CHIP_INFO_REG_SZ; | |
11000 | } else { | |
11001 | dev->protocol_info.init_state_reg = SSD_PV3_INIT_STATE_REG0; | |
11002 | dev->protocol_info.init_state_reg_sz = SSD_PV3_INIT_STATE_REG_SZ; | |
11003 | ||
11004 | dev->protocol_info.chip_info_reg = SSD_PV3_CHIP_INFO_REG0; | |
11005 | dev->protocol_info.chip_info_reg_sz = SSD_PV3_CHIP_INFO_REG_SZ; | |
11006 | } | |
11007 | ||
11008 | return 0; | |
11009 | } | |
11010 | ||
11011 | static int ssd_init_hw_info(struct ssd_device *dev) | |
11012 | { | |
11013 | uint64_t val64; | |
11014 | uint32_t val; | |
11015 | uint32_t nr_ctrl; | |
11016 | int ret = 0; | |
11017 | ||
11018 | /* base info */ | |
11019 | val = ssd_reg32_read(dev->ctrlp + SSD_RESP_INFO_REG); | |
11020 | dev->hw_info.resp_ptr_sz = 16 * (1U << (val & 0xFF)); | |
11021 | dev->hw_info.resp_msg_sz = 16 * (1U << ((val >> 8) & 0xFF)); | |
11022 | ||
11023 | if (0 == dev->hw_info.resp_ptr_sz || 0 == dev->hw_info.resp_msg_sz) { | |
11024 | hio_warn("%s: response info error\n", dev->name); | |
11025 | ret = -EINVAL; | |
11026 | goto out; | |
11027 | } | |
11028 | ||
11029 | val = ssd_reg32_read(dev->ctrlp + SSD_BRIDGE_INFO_REG); | |
11030 | dev->hw_info.cmd_fifo_sz = 1U << ((val >> 4) & 0xF); | |
11031 | dev->hw_info.cmd_max_sg = 1U << ((val >> 8) & 0xF); | |
11032 | dev->hw_info.sg_max_sec = 1U << ((val >> 12) & 0xF); | |
11033 | dev->hw_info.cmd_fifo_sz_mask = dev->hw_info.cmd_fifo_sz - 1; | |
11034 | ||
11035 | if (0 == dev->hw_info.cmd_fifo_sz || 0 == dev->hw_info.cmd_max_sg || 0 == dev->hw_info.sg_max_sec) { | |
11036 | hio_warn("%s: cmd info error\n", dev->name); | |
11037 | ret = -EINVAL; | |
11038 | goto out; | |
11039 | } | |
11040 | ||
11041 | /* check hw */ | |
11042 | if (ssd_check_hw_bh(dev)) { | |
11043 | hio_warn("%s: check hardware status failed\n", dev->name); | |
11044 | ret = -EINVAL; | |
11045 | goto out; | |
11046 | } | |
11047 | ||
11048 | if (ssd_check_controller(dev)) { | |
11049 | hio_warn("%s: check controller state failed\n", dev->name); | |
11050 | ret = -EINVAL; | |
11051 | goto out; | |
11052 | } | |
11053 | ||
11054 | /* nr controller : read again*/ | |
11055 | val = ssd_reg32_read(dev->ctrlp + SSD_BRIDGE_INFO_REG); | |
11056 | dev->hw_info.nr_ctrl = (val >> 16) & 0xF; | |
11057 | ||
11058 | /* nr ctrl configured */ | |
11059 | nr_ctrl = (val >> 20) & 0xF; | |
11060 | if (0 == dev->hw_info.nr_ctrl) { | |
11061 | hio_warn("%s: nr controller error: %u\n", dev->name, dev->hw_info.nr_ctrl); | |
11062 | ret = -EINVAL; | |
11063 | goto out; | |
11064 | } else if (0 != nr_ctrl && nr_ctrl != dev->hw_info.nr_ctrl) { | |
11065 | hio_warn("%s: nr controller error: configured %u but found %u\n", dev->name, nr_ctrl, dev->hw_info.nr_ctrl); | |
11066 | if (mode <= SSD_DRV_MODE_STANDARD) { | |
11067 | ret = -EINVAL; | |
11068 | goto out; | |
11069 | } | |
11070 | } | |
11071 | ||
11072 | if (ssd_check_controller_bh(dev)) { | |
11073 | hio_warn("%s: check controller failed\n", dev->name); | |
11074 | ret = -EINVAL; | |
11075 | goto out; | |
11076 | } | |
11077 | ||
11078 | val = ssd_reg32_read(dev->ctrlp + SSD_PCB_VER_REG); | |
11079 | dev->hw_info.pcb_ver = (uint8_t) ((val >> 4) & 0xF) + 'A' -1; | |
11080 | if ((val & 0xF) != 0xF) { | |
11081 | dev->hw_info.upper_pcb_ver = (uint8_t) (val & 0xF) + 'A' -1; | |
11082 | } | |
11083 | ||
11084 | if (dev->hw_info.pcb_ver < 'A' || (0 != dev->hw_info.upper_pcb_ver && dev->hw_info.upper_pcb_ver < 'A')) { | |
11085 | hio_warn("%s: PCB version error: %#x %#x\n", dev->name, dev->hw_info.pcb_ver, dev->hw_info.upper_pcb_ver); | |
11086 | ret = -EINVAL; | |
11087 | goto out; | |
11088 | } | |
11089 | ||
11090 | /* channel info */ | |
11091 | if (mode <= SSD_DRV_MODE_DEBUG) { | |
11092 | val = ssd_reg32_read(dev->ctrlp + SSD_CH_INFO_REG); | |
11093 | dev->hw_info.nr_data_ch = val & 0xFF; | |
11094 | dev->hw_info.nr_ch = dev->hw_info.nr_data_ch + ((val >> 8) & 0xFF); | |
11095 | dev->hw_info.nr_chip = (val >> 16) & 0xFF; | |
11096 | ||
11097 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_2) { | |
11098 | dev->hw_info.max_ch = 1; | |
11099 | while (dev->hw_info.max_ch < dev->hw_info.nr_ch) dev->hw_info.max_ch <<= 1; | |
11100 | } else { | |
11101 | /* set max channel 32 */ | |
11102 | dev->hw_info.max_ch = 32; | |
11103 | } | |
11104 | ||
11105 | if (0 == dev->hw_info.nr_chip) { | |
11106 | //for debug mode | |
11107 | dev->hw_info.nr_chip = 1; | |
11108 | } | |
11109 | ||
11110 | //xx | |
11111 | dev->hw_info.id_size = SSD_NAND_ID_SZ; | |
11112 | dev->hw_info.max_ce = SSD_NAND_MAX_CE; | |
11113 | ||
11114 | if (0 == dev->hw_info.nr_data_ch || 0 == dev->hw_info.nr_ch || 0 == dev->hw_info.nr_chip) { | |
11115 | hio_warn("%s: channel info error: data_ch %u ch %u chip %u\n", dev->name, dev->hw_info.nr_data_ch, dev->hw_info.nr_ch, dev->hw_info.nr_chip); | |
11116 | ret = -EINVAL; | |
11117 | goto out; | |
11118 | } | |
11119 | } | |
11120 | ||
11121 | /* ram info */ | |
11122 | if (mode <= SSD_DRV_MODE_DEBUG) { | |
11123 | val = ssd_reg32_read(dev->ctrlp + SSD_RAM_INFO_REG); | |
11124 | dev->hw_info.ram_size = 0x4000000ull * (1ULL << (val & 0xF)); | |
11125 | dev->hw_info.ram_align = 1U << ((val >> 12) & 0xF); | |
11126 | if (dev->hw_info.ram_align < SSD_RAM_ALIGN) { | |
11127 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3) { | |
11128 | dev->hw_info.ram_align = SSD_RAM_ALIGN; | |
11129 | } else { | |
11130 | hio_warn("%s: ram align error: %u\n", dev->name, dev->hw_info.ram_align); | |
11131 | ret = -EINVAL; | |
11132 | goto out; | |
11133 | } | |
11134 | } | |
11135 | dev->hw_info.ram_max_len = 0x1000 * (1U << ((val >> 16) & 0xF)); | |
11136 | ||
11137 | if (0 == dev->hw_info.ram_size || 0 == dev->hw_info.ram_align || 0 == dev->hw_info.ram_max_len || dev->hw_info.ram_align > dev->hw_info.ram_max_len) { | |
11138 | hio_warn("%s: ram info error\n", dev->name); | |
11139 | ret = -EINVAL; | |
11140 | goto out; | |
11141 | } | |
11142 | ||
11143 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3) { | |
11144 | dev->hw_info.log_sz = SSD_LOG_MAX_SZ; | |
11145 | } else { | |
11146 | val = ssd_reg32_read(dev->ctrlp + SSD_LOG_INFO_REG); | |
11147 | dev->hw_info.log_sz = 0x1000 * (1U << (val & 0xFF)); | |
11148 | } | |
11149 | if (0 == dev->hw_info.log_sz) { | |
11150 | hio_warn("%s: log size error\n", dev->name); | |
11151 | ret = -EINVAL; | |
11152 | goto out; | |
11153 | } | |
11154 | ||
11155 | val = ssd_reg32_read(dev->ctrlp + SSD_BBT_BASE_REG); | |
11156 | dev->hw_info.bbt_base = 0x40000ull * (val & 0xFFFF); | |
11157 | dev->hw_info.bbt_size = 0x40000 * (((val >> 16) & 0xFFFF) + 1) / (dev->hw_info.max_ch * dev->hw_info.nr_chip); | |
11158 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3) { | |
11159 | if (dev->hw_info.bbt_base > dev->hw_info.ram_size || 0 == dev->hw_info.bbt_size) { | |
11160 | hio_warn("%s: bbt info error\n", dev->name); | |
11161 | ret = -EINVAL; | |
11162 | goto out; | |
11163 | } | |
11164 | } | |
11165 | ||
11166 | val = ssd_reg32_read(dev->ctrlp + SSD_ECT_BASE_REG); | |
11167 | dev->hw_info.md_base = 0x40000ull * (val & 0xFFFF); | |
11168 | if (dev->protocol_info.ver <= SSD_PROTOCOL_V3) { | |
11169 | dev->hw_info.md_size = 0x40000 * (((val >> 16) & 0xFFF) + 1) / (dev->hw_info.max_ch * dev->hw_info.nr_chip); | |
11170 | } else { | |
11171 | dev->hw_info.md_size = 0x40000 * (((val >> 16) & 0xFFF) + 1) / (dev->hw_info.nr_chip); | |
11172 | } | |
11173 | dev->hw_info.md_entry_sz = 8 * (1U << ((val >> 28) & 0xF)); | |
11174 | if (dev->protocol_info.ver >= SSD_PROTOCOL_V3) { | |
11175 | if (dev->hw_info.md_base > dev->hw_info.ram_size || 0 == dev->hw_info.md_size || | |
11176 | 0 == dev->hw_info.md_entry_sz || dev->hw_info.md_entry_sz > dev->hw_info.md_size) { | |
11177 | hio_warn("%s: md info error\n", dev->name); | |
11178 | ret = -EINVAL; | |
11179 | goto out; | |
11180 | } | |
11181 | } | |
11182 | ||
11183 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3) { | |
11184 | dev->hw_info.nand_wbuff_base = dev->hw_info.ram_size + 1; | |
11185 | } else { | |
11186 | val = ssd_reg32_read(dev->ctrlp + SSD_NAND_BUFF_BASE); | |
11187 | dev->hw_info.nand_wbuff_base = 0x8000ull * val; | |
11188 | } | |
11189 | } | |
11190 | ||
11191 | /* flash info */ | |
11192 | if (mode <= SSD_DRV_MODE_DEBUG) { | |
11193 | if (dev->hw_info.nr_ctrl > 1) { | |
11194 | val = ssd_reg32_read(dev->ctrlp + SSD_CTRL_VER_REG); | |
11195 | dev->hw_info.ctrl_ver = val & 0xFFF; | |
11196 | hio_info("%s: controller firmware version: %03X\n", dev->name, dev->hw_info.ctrl_ver); | |
11197 | } | |
11198 | ||
11199 | val64 = ssd_reg_read(dev->ctrlp + SSD_FLASH_INFO_REG0); | |
11200 | dev->hw_info.nand_vendor_id = ((val64 >> 56) & 0xFF); | |
11201 | dev->hw_info.nand_dev_id = ((val64 >> 48) & 0xFF); | |
11202 | ||
11203 | dev->hw_info.block_count = (((val64 >> 32) & 0xFFFF) + 1); | |
11204 | dev->hw_info.page_count = ((val64>>16) & 0xFFFF); | |
11205 | dev->hw_info.page_size = (val64 & 0xFFFF); | |
11206 | ||
11207 | val = ssd_reg32_read(dev->ctrlp + SSD_BB_INFO_REG); | |
11208 | dev->hw_info.bbf_pages = val & 0xFF; | |
11209 | dev->hw_info.bbf_seek = (val >> 8) & 0x1; | |
11210 | ||
11211 | if (0 == dev->hw_info.block_count || 0 == dev->hw_info.page_count || 0 == dev->hw_info.page_size || dev->hw_info.block_count > INT_MAX) { | |
11212 | hio_warn("%s: flash info error\n", dev->name); | |
11213 | ret = -EINVAL; | |
11214 | goto out; | |
11215 | } | |
11216 | ||
11217 | //xx | |
11218 | dev->hw_info.oob_size = SSD_NAND_OOB_SZ; //(dev->hw_info.page_size) >> 5; | |
11219 | ||
11220 | val = ssd_reg32_read(dev->ctrlp + SSD_VALID_PAGES_REG); | |
11221 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_2) { | |
11222 | dev->hw_info.valid_pages = val & 0x3FF; | |
11223 | dev->hw_info.max_valid_pages = (val>>20) & 0x3FF; | |
11224 | } else { | |
11225 | dev->hw_info.valid_pages = val & 0x7FFF; | |
11226 | dev->hw_info.max_valid_pages = (val>>15) & 0x7FFF; | |
11227 | } | |
11228 | if (0 == dev->hw_info.valid_pages || 0 == dev->hw_info.max_valid_pages || | |
11229 | dev->hw_info.valid_pages > dev->hw_info.max_valid_pages || dev->hw_info.max_valid_pages > dev->hw_info.page_count) { | |
11230 | hio_warn("%s: valid page info error: valid_pages %d, max_valid_pages %d\n", dev->name, dev->hw_info.valid_pages, dev->hw_info.max_valid_pages); | |
11231 | ret = -EINVAL; | |
11232 | goto out; | |
11233 | } | |
11234 | ||
11235 | val = ssd_reg32_read(dev->ctrlp + SSD_RESERVED_BLKS_REG); | |
11236 | dev->hw_info.reserved_blks = val & 0xFFFF; | |
11237 | dev->hw_info.md_reserved_blks = (val >> 16) & 0xFF; | |
11238 | if (dev->protocol_info.ver <= SSD_PROTOCOL_V3) { | |
11239 | dev->hw_info.md_reserved_blks = SSD_BBT_RESERVED; | |
11240 | } | |
11241 | if (dev->hw_info.reserved_blks > dev->hw_info.block_count || dev->hw_info.md_reserved_blks > dev->hw_info.block_count) { | |
11242 | hio_warn("%s: reserved blocks info error: reserved_blks %d, md_reserved_blks %d\n", dev->name, dev->hw_info.reserved_blks, dev->hw_info.md_reserved_blks); | |
11243 | ret = -EINVAL; | |
11244 | goto out; | |
11245 | } | |
11246 | } | |
11247 | ||
11248 | /* size */ | |
11249 | if (mode < SSD_DRV_MODE_DEBUG) { | |
11250 | dev->hw_info.size = (uint64_t)dev->hw_info.valid_pages * dev->hw_info.page_size; | |
11251 | dev->hw_info.size *= (dev->hw_info.block_count - dev->hw_info.reserved_blks); | |
11252 | dev->hw_info.size *= ((uint64_t)dev->hw_info.nr_data_ch * (uint64_t)dev->hw_info.nr_chip * (uint64_t)dev->hw_info.nr_ctrl); | |
11253 | } | |
11254 | ||
11255 | /* extend hardware info */ | |
11256 | val = ssd_reg32_read(dev->ctrlp + SSD_PCB_VER_REG); | |
11257 | dev->hw_info_ext.board_type = (val >> 24) & 0xF; | |
11258 | ||
11259 | dev->hw_info_ext.form_factor = SSD_FORM_FACTOR_FHHL; | |
11260 | if (dev->protocol_info.ver >= SSD_PROTOCOL_V3_2_1) { | |
11261 | dev->hw_info_ext.form_factor = (val >> 31) & 0x1; | |
11262 | } | |
11263 | /* | |
11264 | dev->hw_info_ext.cap_type = (val >> 28) & 0x3; | |
11265 | if (SSD_BM_CAP_VINA != dev->hw_info_ext.cap_type && SSD_BM_CAP_JH != dev->hw_info_ext.cap_type) { | |
11266 | dev->hw_info_ext.cap_type = SSD_BM_CAP_VINA; | |
11267 | }*/ | |
11268 | ||
11269 | /* power loss protect */ | |
11270 | val = ssd_reg32_read(dev->ctrlp + SSD_PLP_INFO_REG); | |
11271 | dev->hw_info_ext.plp_type = (val & 0x3); | |
11272 | if (dev->protocol_info.ver >= SSD_PROTOCOL_V3_2) { | |
11273 | /* 3 or 4 cap */ | |
11274 | dev->hw_info_ext.cap_type = ((val >> 2)& 0x1); | |
11275 | } | |
11276 | ||
11277 | /* work mode */ | |
11278 | val = ssd_reg32_read(dev->ctrlp + SSD_CH_INFO_REG); | |
11279 | dev->hw_info_ext.work_mode = (val >> 25) & 0x1; | |
11280 | ||
11281 | out: | |
11282 | /* skip error if not in standard mode */ | |
11283 | if (mode != SSD_DRV_MODE_STANDARD) { | |
11284 | ret = 0; | |
11285 | } | |
11286 | return ret; | |
11287 | } | |
11288 | ||
11289 | static void ssd_cleanup_response(struct ssd_device *dev) | |
11290 | { | |
11291 | int resp_msg_sz = dev->hw_info.resp_msg_sz * dev->hw_info.cmd_fifo_sz * SSD_MSIX_VEC; | |
11292 | int resp_ptr_sz = dev->hw_info.resp_ptr_sz * SSD_MSIX_VEC; | |
11293 | ||
11294 | pci_free_consistent(dev->pdev, resp_ptr_sz, dev->resp_ptr_base, dev->resp_ptr_base_dma); | |
11295 | pci_free_consistent(dev->pdev, resp_msg_sz, dev->resp_msg_base, dev->resp_msg_base_dma); | |
11296 | } | |
11297 | ||
11298 | static int ssd_init_response(struct ssd_device *dev) | |
11299 | { | |
11300 | int resp_msg_sz = dev->hw_info.resp_msg_sz * dev->hw_info.cmd_fifo_sz * SSD_MSIX_VEC; | |
11301 | int resp_ptr_sz = dev->hw_info.resp_ptr_sz * SSD_MSIX_VEC; | |
11302 | ||
11303 | dev->resp_msg_base = pci_alloc_consistent(dev->pdev, resp_msg_sz, &(dev->resp_msg_base_dma)); | |
11304 | if (!dev->resp_msg_base) { | |
11305 | hio_warn("%s: unable to allocate resp msg DMA buffer\n", dev->name); | |
11306 | goto out_alloc_resp_msg; | |
11307 | } | |
11308 | memset(dev->resp_msg_base, 0xFF, resp_msg_sz); | |
11309 | ||
11310 | dev->resp_ptr_base = pci_alloc_consistent(dev->pdev, resp_ptr_sz, &(dev->resp_ptr_base_dma)); | |
11311 | if (!dev->resp_ptr_base){ | |
11312 | hio_warn("%s: unable to allocate resp ptr DMA buffer\n", dev->name); | |
11313 | goto out_alloc_resp_ptr; | |
11314 | } | |
11315 | memset(dev->resp_ptr_base, 0, resp_ptr_sz); | |
11316 | dev->resp_idx = *(uint32_t *)(dev->resp_ptr_base) = dev->hw_info.cmd_fifo_sz * 2 - 1; | |
11317 | ||
11318 | ssd_reg_write(dev->ctrlp + SSD_RESP_FIFO_REG, dev->resp_msg_base_dma); | |
11319 | ssd_reg_write(dev->ctrlp + SSD_RESP_PTR_REG, dev->resp_ptr_base_dma); | |
11320 | ||
11321 | return 0; | |
11322 | ||
11323 | out_alloc_resp_ptr: | |
11324 | pci_free_consistent(dev->pdev, resp_msg_sz, dev->resp_msg_base, dev->resp_msg_base_dma); | |
11325 | out_alloc_resp_msg: | |
11326 | return -ENOMEM; | |
11327 | } | |
11328 | ||
11329 | static int ssd_cleanup_cmd(struct ssd_device *dev) | |
11330 | { | |
11331 | int msg_sz = ALIGN(sizeof(struct ssd_rw_msg) + (dev->hw_info.cmd_max_sg - 1) * sizeof(struct ssd_sg_entry), SSD_DMA_ALIGN); | |
11332 | int i; | |
11333 | ||
11334 | for (i=0; i<(int)dev->hw_info.cmd_fifo_sz; i++) { | |
11335 | kfree(dev->cmd[i].sgl); | |
11336 | } | |
11337 | kfree(dev->cmd); | |
11338 | pci_free_consistent(dev->pdev, (msg_sz * dev->hw_info.cmd_fifo_sz), dev->msg_base, dev->msg_base_dma); | |
11339 | return 0; | |
11340 | } | |
11341 | ||
11342 | static int ssd_init_cmd(struct ssd_device *dev) | |
11343 | { | |
11344 | int sgl_sz = sizeof(struct scatterlist) * dev->hw_info.cmd_max_sg; | |
11345 | int cmd_sz = sizeof(struct ssd_cmd) * dev->hw_info.cmd_fifo_sz; | |
11346 | int msg_sz = ALIGN(sizeof(struct ssd_rw_msg) + (dev->hw_info.cmd_max_sg - 1) * sizeof(struct ssd_sg_entry), SSD_DMA_ALIGN); | |
11347 | int i; | |
11348 | ||
11349 | spin_lock_init(&dev->cmd_lock); | |
11350 | ||
11351 | dev->msg_base = pci_alloc_consistent(dev->pdev, (msg_sz * dev->hw_info.cmd_fifo_sz), &dev->msg_base_dma); | |
11352 | if (!dev->msg_base) { | |
11353 | hio_warn("%s: can not alloc cmd msg\n", dev->name); | |
11354 | goto out_alloc_msg; | |
11355 | } | |
11356 | ||
11357 | dev->cmd = kmalloc(cmd_sz, GFP_KERNEL); | |
11358 | if (!dev->cmd) { | |
11359 | hio_warn("%s: can not alloc cmd\n", dev->name); | |
11360 | goto out_alloc_cmd; | |
11361 | } | |
11362 | memset(dev->cmd, 0, cmd_sz); | |
11363 | ||
11364 | for (i=0; i<(int)dev->hw_info.cmd_fifo_sz; i++) { | |
11365 | dev->cmd[i].sgl = kmalloc(sgl_sz, GFP_KERNEL); | |
11366 | if (!dev->cmd[i].sgl) { | |
11367 | hio_warn("%s: can not alloc cmd sgl %d\n", dev->name, i); | |
11368 | goto out_alloc_sgl; | |
11369 | } | |
11370 | ||
11371 | dev->cmd[i].msg = dev->msg_base + (msg_sz * i); | |
11372 | dev->cmd[i].msg_dma = dev->msg_base_dma + ((dma_addr_t)msg_sz * i); | |
11373 | ||
11374 | dev->cmd[i].dev = dev; | |
11375 | dev->cmd[i].tag = i; | |
11376 | dev->cmd[i].flag = 0; | |
11377 | ||
11378 | INIT_LIST_HEAD(&dev->cmd[i].list); | |
11379 | } | |
11380 | ||
11381 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3) { | |
11382 | dev->scmd = ssd_dispatch_cmd; | |
11383 | } else { | |
11384 | ssd_reg_write(dev->ctrlp + SSD_MSG_BASE_REG, dev->msg_base_dma); | |
11385 | if (finject) { | |
11386 | dev->scmd = ssd_send_cmd_db; | |
11387 | } else { | |
11388 | dev->scmd = ssd_send_cmd; | |
11389 | } | |
11390 | } | |
11391 | ||
11392 | return 0; | |
11393 | ||
11394 | out_alloc_sgl: | |
11395 | for (i--; i>=0; i--) { | |
11396 | kfree(dev->cmd[i].sgl); | |
11397 | } | |
11398 | kfree(dev->cmd); | |
11399 | out_alloc_cmd: | |
11400 | pci_free_consistent(dev->pdev, (msg_sz * dev->hw_info.cmd_fifo_sz), dev->msg_base, dev->msg_base_dma); | |
11401 | out_alloc_msg: | |
11402 | return -ENOMEM; | |
11403 | } | |
11404 | ||
11405 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,30)) | |
11406 | static irqreturn_t ssd_interrupt_check(int irq, void *dev_id) | |
11407 | { | |
11408 | struct ssd_queue *queue = (struct ssd_queue *)dev_id; | |
11409 | ||
11410 | if (*(uint32_t *)queue->resp_ptr == queue->resp_idx) { | |
11411 | return IRQ_NONE; | |
11412 | } | |
11413 | ||
11414 | return IRQ_WAKE_THREAD; | |
11415 | } | |
11416 | ||
11417 | static irqreturn_t ssd_interrupt_threaded(int irq, void *dev_id) | |
11418 | { | |
11419 | struct ssd_queue *queue = (struct ssd_queue *)dev_id; | |
11420 | struct ssd_device *dev = (struct ssd_device *)queue->dev; | |
11421 | struct ssd_cmd *cmd; | |
11422 | union ssd_response_msq __msg; | |
11423 | union ssd_response_msq *msg = &__msg; | |
11424 | uint64_t *u64_msg; | |
11425 | uint32_t resp_idx = queue->resp_idx; | |
11426 | uint32_t new_resp_idx = *(uint32_t *)queue->resp_ptr; | |
11427 | uint32_t end_resp_idx; | |
11428 | ||
11429 | if (unlikely(resp_idx == new_resp_idx)) { | |
11430 | return IRQ_NONE; | |
11431 | } | |
11432 | ||
11433 | end_resp_idx = new_resp_idx & queue->resp_idx_mask; | |
11434 | ||
11435 | do { | |
11436 | resp_idx = (resp_idx + 1) & queue->resp_idx_mask; | |
11437 | ||
11438 | /* the resp msg */ | |
11439 | u64_msg = (uint64_t *)(queue->resp_msg + queue->resp_msg_sz * resp_idx); | |
11440 | msg->u64_msg = *u64_msg; | |
11441 | ||
11442 | if (unlikely(msg->u64_msg == (uint64_t)(-1))) { | |
11443 | hio_err("%s: empty resp msg: queue %d idx %u\n", dev->name, queue->idx, resp_idx); | |
11444 | continue; | |
11445 | } | |
11446 | /* clear the resp msg */ | |
11447 | *u64_msg = (uint64_t)(-1); | |
11448 | ||
11449 | cmd = &queue->cmd[msg->resp_msg.tag]; | |
11450 | /*if (unlikely(!cmd->bio)) { | |
11451 | printk(KERN_WARNING "%s: unknown tag %d fun %#x\n", | |
11452 | dev->name, msg->resp_msg.tag, msg->resp_msg.fun); | |
11453 | continue; | |
11454 | }*/ | |
11455 | ||
11456 | if(unlikely(msg->resp_msg.status & (uint32_t)status_mask)) { | |
11457 | cmd->errors = -EIO; | |
11458 | } else { | |
11459 | cmd->errors = 0; | |
11460 | } | |
11461 | cmd->nr_log = msg->log_resp_msg.nr_log; | |
11462 | ||
11463 | ssd_done(cmd); | |
11464 | ||
11465 | if (unlikely(msg->resp_msg.fun != SSD_FUNC_READ_LOG && msg->resp_msg.log > 0)) { | |
11466 | (void)test_and_set_bit(SSD_LOG_HW, &dev->state); | |
11467 | if (test_bit(SSD_INIT_WORKQ, &dev->state)) { | |
11468 | queue_work(dev->workq, &dev->log_work); | |
11469 | } | |
11470 | } | |
11471 | ||
11472 | if (unlikely(msg->resp_msg.status)) { | |
11473 | if (msg->resp_msg.fun == SSD_FUNC_READ || msg->resp_msg.fun == SSD_FUNC_WRITE) { | |
11474 | hio_err("%s: I/O error %d: tag %d fun %#x\n", | |
11475 | dev->name, msg->resp_msg.status, msg->resp_msg.tag, msg->resp_msg.fun); | |
11476 | ||
11477 | /* alarm led */ | |
11478 | ssd_set_alarm(dev); | |
11479 | queue->io_stat.nr_rwerr++; | |
11480 | ssd_gen_swlog(dev, SSD_LOG_EIO, msg->u32_msg[0]); | |
11481 | } else { | |
11482 | hio_info("%s: CMD error %d: tag %d fun %#x\n", | |
11483 | dev->name, msg->resp_msg.status, msg->resp_msg.tag, msg->resp_msg.fun); | |
11484 | ||
11485 | ssd_gen_swlog(dev, SSD_LOG_ECMD, msg->u32_msg[0]); | |
11486 | } | |
11487 | queue->io_stat.nr_ioerr++; | |
11488 | } | |
11489 | ||
11490 | if (msg->resp_msg.fun == SSD_FUNC_READ || | |
11491 | msg->resp_msg.fun == SSD_FUNC_NAND_READ_WOOB || | |
11492 | msg->resp_msg.fun == SSD_FUNC_NAND_READ) { | |
11493 | ||
11494 | queue->ecc_info.bitflip[msg->resp_msg.bitflip]++; | |
11495 | } | |
11496 | }while (resp_idx != end_resp_idx); | |
11497 | ||
11498 | queue->resp_idx = new_resp_idx; | |
11499 | ||
11500 | return IRQ_HANDLED; | |
11501 | } | |
11502 | #endif | |
11503 | ||
11504 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)) | |
11505 | static irqreturn_t ssd_interrupt(int irq, void *dev_id, struct pt_regs *regs) | |
11506 | #else | |
11507 | static irqreturn_t ssd_interrupt(int irq, void *dev_id) | |
11508 | #endif | |
11509 | { | |
11510 | struct ssd_queue *queue = (struct ssd_queue *)dev_id; | |
11511 | struct ssd_device *dev = (struct ssd_device *)queue->dev; | |
11512 | struct ssd_cmd *cmd; | |
11513 | union ssd_response_msq __msg; | |
11514 | union ssd_response_msq *msg = &__msg; | |
11515 | uint64_t *u64_msg; | |
11516 | uint32_t resp_idx = queue->resp_idx; | |
11517 | uint32_t new_resp_idx = *(uint32_t *)queue->resp_ptr; | |
11518 | uint32_t end_resp_idx; | |
11519 | ||
11520 | if (unlikely(resp_idx == new_resp_idx)) { | |
11521 | return IRQ_NONE; | |
11522 | } | |
11523 | ||
11524 | #if (defined SSD_ESCAPE_IRQ) | |
11525 | if (SSD_INT_MSIX != dev->int_mode) { | |
11526 | dev->irq_cpu = smp_processor_id(); | |
11527 | } | |
11528 | #endif | |
11529 | ||
11530 | end_resp_idx = new_resp_idx & queue->resp_idx_mask; | |
11531 | ||
11532 | do { | |
11533 | resp_idx = (resp_idx + 1) & queue->resp_idx_mask; | |
11534 | ||
11535 | /* the resp msg */ | |
11536 | u64_msg = (uint64_t *)(queue->resp_msg + queue->resp_msg_sz * resp_idx); | |
11537 | msg->u64_msg = *u64_msg; | |
11538 | ||
11539 | if (unlikely(msg->u64_msg == (uint64_t)(-1))) { | |
11540 | hio_err("%s: empty resp msg: queue %d idx %u\n", dev->name, queue->idx, resp_idx); | |
11541 | continue; | |
11542 | } | |
11543 | /* clear the resp msg */ | |
11544 | *u64_msg = (uint64_t)(-1); | |
11545 | ||
11546 | cmd = &queue->cmd[msg->resp_msg.tag]; | |
11547 | /*if (unlikely(!cmd->bio)) { | |
11548 | printk(KERN_WARNING "%s: unknown tag %d fun %#x\n", | |
11549 | dev->name, msg->resp_msg.tag, msg->resp_msg.fun); | |
11550 | continue; | |
11551 | }*/ | |
11552 | ||
11553 | if(unlikely(msg->resp_msg.status & (uint32_t)status_mask)) { | |
11554 | cmd->errors = -EIO; | |
11555 | } else { | |
11556 | cmd->errors = 0; | |
11557 | } | |
11558 | cmd->nr_log = msg->log_resp_msg.nr_log; | |
11559 | ||
11560 | ssd_done_bh(cmd); | |
11561 | ||
11562 | if (unlikely(msg->resp_msg.fun != SSD_FUNC_READ_LOG && msg->resp_msg.log > 0)) { | |
11563 | (void)test_and_set_bit(SSD_LOG_HW, &dev->state); | |
11564 | if (test_bit(SSD_INIT_WORKQ, &dev->state)) { | |
11565 | queue_work(dev->workq, &dev->log_work); | |
11566 | } | |
11567 | } | |
11568 | ||
11569 | if (unlikely(msg->resp_msg.status)) { | |
11570 | if (msg->resp_msg.fun == SSD_FUNC_READ || msg->resp_msg.fun == SSD_FUNC_WRITE) { | |
11571 | hio_err("%s: I/O error %d: tag %d fun %#x\n", | |
11572 | dev->name, msg->resp_msg.status, msg->resp_msg.tag, msg->resp_msg.fun); | |
11573 | ||
11574 | /* alarm led */ | |
11575 | ssd_set_alarm(dev); | |
11576 | queue->io_stat.nr_rwerr++; | |
11577 | ssd_gen_swlog(dev, SSD_LOG_EIO, msg->u32_msg[0]); | |
11578 | } else { | |
11579 | hio_info("%s: CMD error %d: tag %d fun %#x\n", | |
11580 | dev->name, msg->resp_msg.status, msg->resp_msg.tag, msg->resp_msg.fun); | |
11581 | ||
11582 | ssd_gen_swlog(dev, SSD_LOG_ECMD, msg->u32_msg[0]); | |
11583 | } | |
11584 | queue->io_stat.nr_ioerr++; | |
11585 | } | |
11586 | ||
11587 | if (msg->resp_msg.fun == SSD_FUNC_READ || | |
11588 | msg->resp_msg.fun == SSD_FUNC_NAND_READ_WOOB || | |
11589 | msg->resp_msg.fun == SSD_FUNC_NAND_READ) { | |
11590 | ||
11591 | queue->ecc_info.bitflip[msg->resp_msg.bitflip]++; | |
11592 | } | |
11593 | }while (resp_idx != end_resp_idx); | |
11594 | ||
11595 | queue->resp_idx = new_resp_idx; | |
11596 | ||
11597 | return IRQ_HANDLED; | |
11598 | } | |
11599 | ||
11600 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)) | |
11601 | static irqreturn_t ssd_interrupt_legacy(int irq, void *dev_id, struct pt_regs *regs) | |
11602 | #else | |
11603 | static irqreturn_t ssd_interrupt_legacy(int irq, void *dev_id) | |
11604 | #endif | |
11605 | { | |
11606 | irqreturn_t ret; | |
11607 | struct ssd_queue *queue = (struct ssd_queue *)dev_id; | |
11608 | struct ssd_device *dev = (struct ssd_device *)queue->dev; | |
11609 | ||
11610 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)) | |
11611 | ret = ssd_interrupt(irq, dev_id, regs); | |
11612 | #else | |
11613 | ret = ssd_interrupt(irq, dev_id); | |
11614 | #endif | |
11615 | ||
11616 | /* clear intr */ | |
11617 | if (IRQ_HANDLED == ret) { | |
11618 | ssd_reg32_write(dev->ctrlp + SSD_CLEAR_INTR_REG, 1); | |
11619 | } | |
11620 | ||
11621 | return ret; | |
11622 | } | |
11623 | ||
11624 | static void ssd_reset_resp_ptr(struct ssd_device *dev) | |
11625 | { | |
11626 | int i; | |
11627 | ||
11628 | for (i=0; i<dev->nr_queue; i++) { | |
11629 | *(uint32_t *)dev->queue[i].resp_ptr = dev->queue[i].resp_idx = (dev->hw_info.cmd_fifo_sz * 2) - 1; | |
11630 | } | |
11631 | } | |
11632 | ||
11633 | static void ssd_free_irq(struct ssd_device *dev) | |
11634 | { | |
11635 | int i; | |
11636 | ||
b44043bd | 11637 | #if ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) || (defined RHEL_MAJOR && RHEL_MAJOR == 6)) && (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0)) |
361ebed5 HSDT |
11638 | if (SSD_INT_MSIX == dev->int_mode) { |
11639 | for (i=0; i<dev->nr_queue; i++) { | |
11640 | irq_set_affinity_hint(dev->entry[i].vector, NULL); | |
11641 | } | |
11642 | } | |
11643 | #endif | |
11644 | ||
11645 | for (i=0; i<dev->nr_queue; i++) { | |
b44043bd | 11646 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0)) |
361ebed5 | 11647 | free_irq(dev->entry[i].vector, &dev->queue[i]); |
b44043bd SF |
11648 | #else |
11649 | free_irq(pci_irq_vector(dev->pdev, i), &dev->queue[i]); | |
11650 | #endif | |
361ebed5 HSDT |
11651 | } |
11652 | ||
11653 | if (SSD_INT_MSIX == dev->int_mode) { | |
11654 | pci_disable_msix(dev->pdev); | |
11655 | } else if (SSD_INT_MSI == dev->int_mode) { | |
11656 | pci_disable_msi(dev->pdev); | |
11657 | } | |
11658 | ||
11659 | } | |
11660 | ||
11661 | static int ssd_init_irq(struct ssd_device *dev) | |
11662 | { | |
b44043bd | 11663 | #if (!defined MODULE) && (defined SSD_MSIX_AFFINITY_FORCE) && (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0)) |
1197134c | 11664 | const struct cpumask *cpu_mask = NULL; |
361ebed5 HSDT |
11665 | static int cpu_affinity = 0; |
11666 | #endif | |
b44043bd | 11667 | #if ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) || (defined RHEL_MAJOR && RHEL_MAJOR == 6)) && (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0)) |
13cfa002 | 11668 | const struct cpumask *mask = NULL; |
361ebed5 HSDT |
11669 | static int cpu = 0; |
11670 | int j; | |
11671 | #endif | |
11672 | int i; | |
11673 | unsigned long flags = 0; | |
11674 | int ret = 0; | |
11675 | ||
11676 | ssd_reg32_write(dev->ctrlp + SSD_INTR_INTERVAL_REG, 0x800); | |
11677 | ||
11678 | #ifdef SSD_ESCAPE_IRQ | |
11679 | dev->irq_cpu = -1; | |
11680 | #endif | |
11681 | ||
b44043bd | 11682 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0)) |
361ebed5 HSDT |
11683 | if (int_mode >= SSD_INT_MSIX && pci_find_capability(dev->pdev, PCI_CAP_ID_MSIX)) { |
11684 | dev->nr_queue = SSD_MSIX_VEC; | |
b44043bd | 11685 | |
361ebed5 HSDT |
11686 | for (i=0; i<dev->nr_queue; i++) { |
11687 | dev->entry[i].entry = i; | |
11688 | } | |
11689 | for (;;) { | |
11690 | ret = pci_enable_msix(dev->pdev, dev->entry, dev->nr_queue); | |
11691 | if (ret == 0) { | |
11692 | break; | |
11693 | } else if (ret > 0) { | |
11694 | dev->nr_queue = ret; | |
11695 | } else { | |
11696 | hio_warn("%s: can not enable msix\n", dev->name); | |
11697 | /* alarm led */ | |
11698 | ssd_set_alarm(dev); | |
11699 | goto out; | |
11700 | } | |
11701 | } | |
11702 | ||
11703 | #if ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) || (defined RHEL_MAJOR && RHEL_MAJOR == 6)) | |
11704 | mask = (dev_to_node(&dev->pdev->dev) == -1) ? cpu_online_mask : cpumask_of_node(dev_to_node(&dev->pdev->dev)); | |
11705 | if ((0 == cpu) || (!cpumask_intersects(mask, cpumask_of(cpu)))) { | |
11706 | cpu = cpumask_first(mask); | |
11707 | } | |
11708 | for (i=0; i<dev->nr_queue; i++) { | |
11709 | irq_set_affinity_hint(dev->entry[i].vector, cpumask_of(cpu)); | |
11710 | cpu = cpumask_next(cpu, mask); | |
11711 | if (cpu >= nr_cpu_ids) { | |
11712 | cpu = cpumask_first(mask); | |
11713 | } | |
11714 | } | |
11715 | #endif | |
11716 | ||
11717 | dev->int_mode = SSD_INT_MSIX; | |
11718 | } else if (int_mode >= SSD_INT_MSI && pci_find_capability(dev->pdev, PCI_CAP_ID_MSI)) { | |
11719 | ret = pci_enable_msi(dev->pdev); | |
11720 | if (ret) { | |
11721 | hio_warn("%s: can not enable msi\n", dev->name); | |
11722 | /* alarm led */ | |
11723 | ssd_set_alarm(dev); | |
11724 | goto out; | |
11725 | } | |
11726 | ||
11727 | dev->nr_queue = 1; | |
11728 | dev->entry[0].vector = dev->pdev->irq; | |
11729 | ||
11730 | dev->int_mode = SSD_INT_MSI; | |
11731 | } else { | |
11732 | dev->nr_queue = 1; | |
11733 | dev->entry[0].vector = dev->pdev->irq; | |
11734 | ||
11735 | dev->int_mode = SSD_INT_LEGACY; | |
11736 | } | |
b44043bd SF |
11737 | #else |
11738 | if (int_mode >= SSD_INT_MSIX && pci_find_capability(dev->pdev, PCI_CAP_ID_MSIX)) { | |
11739 | dev->nr_queue = SSD_MSIX_VEC; | |
11740 | ||
11741 | dev->nr_queue = pci_alloc_irq_vectors(dev->pdev, 1, dev->nr_queue, PCI_IRQ_MSIX | PCI_IRQ_AFFINITY); | |
11742 | if (dev->nr_queue <= 0) { | |
11743 | ret = -EIO; | |
11744 | hio_warn("%s: can not enable msix\n", dev->name); | |
11745 | ssd_set_alarm(dev); | |
11746 | goto out; | |
11747 | } | |
11748 | ||
11749 | dev->int_mode = SSD_INT_MSIX; | |
11750 | } else if (int_mode >= SSD_INT_MSI && pci_find_capability(dev->pdev, PCI_CAP_ID_MSI)) { | |
11751 | ||
11752 | ret = pci_alloc_irq_vectors(dev->pdev, 1, 1, PCI_IRQ_MSI | PCI_IRQ_AFFINITY); | |
11753 | if (ret <= 0) { | |
11754 | ret = -EIO; | |
11755 | hio_warn("%s: can not enable msi\n", dev->name); | |
11756 | /* alarm led */ | |
11757 | ssd_set_alarm(dev); | |
11758 | goto out; | |
11759 | } | |
11760 | dev->nr_queue = 1; | |
11761 | ||
11762 | dev->int_mode = SSD_INT_MSI; | |
11763 | } else { | |
11764 | ret = pci_alloc_irq_vectors(dev->pdev, 1, 1, PCI_IRQ_LEGACY); | |
11765 | ||
11766 | if (ret <= 0) { | |
11767 | ret = -EIO; | |
11768 | hio_warn("%s: can not enable msi\n", dev->name); | |
11769 | /* alarm led */ | |
11770 | ssd_set_alarm(dev); | |
11771 | goto out; | |
11772 | } | |
11773 | dev->nr_queue = 1; | |
11774 | ||
11775 | dev->int_mode = SSD_INT_LEGACY; | |
11776 | } | |
11777 | #endif | |
361ebed5 HSDT |
11778 | |
11779 | for (i=0; i<dev->nr_queue; i++) { | |
11780 | if (dev->nr_queue > 1) { | |
11781 | snprintf(dev->queue[i].name, SSD_QUEUE_NAME_LEN, "%s_e100-%d", dev->name, i); | |
11782 | } else { | |
11783 | snprintf(dev->queue[i].name, SSD_QUEUE_NAME_LEN, "%s_e100", dev->name); | |
11784 | } | |
11785 | ||
11786 | dev->queue[i].dev = dev; | |
11787 | dev->queue[i].idx = i; | |
11788 | ||
11789 | dev->queue[i].resp_idx = (dev->hw_info.cmd_fifo_sz * 2) - 1; | |
11790 | dev->queue[i].resp_idx_mask = dev->hw_info.cmd_fifo_sz - 1; | |
11791 | ||
11792 | dev->queue[i].resp_msg_sz = dev->hw_info.resp_msg_sz; | |
11793 | dev->queue[i].resp_msg = dev->resp_msg_base + dev->hw_info.resp_msg_sz * dev->hw_info.cmd_fifo_sz * i; | |
11794 | dev->queue[i].resp_ptr = dev->resp_ptr_base + dev->hw_info.resp_ptr_sz * i; | |
11795 | *(uint32_t *)dev->queue[i].resp_ptr = dev->queue[i].resp_idx; | |
11796 | ||
11797 | dev->queue[i].cmd = dev->cmd; | |
11798 | } | |
11799 | ||
11800 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)) | |
11801 | flags = IRQF_SHARED; | |
11802 | #else | |
11803 | flags = SA_SHIRQ; | |
11804 | #endif | |
11805 | ||
11806 | for (i=0; i<dev->nr_queue; i++) { | |
b44043bd SF |
11807 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30)) |
11808 | if (dev->int_mode == SSD_INT_LEGACY) { | |
11809 | ret = request_irq(dev->entry[i].vector, &ssd_interrupt_legacy, flags, dev->queue[i].name, &dev->queue[i]); | |
11810 | } else { | |
11811 | ret = request_irq(dev->entry[i].vector, &ssd_interrupt, flags, dev->queue[i].name, &dev->queue[i]); | |
11812 | } | |
11813 | #elif (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0)) | |
361ebed5 HSDT |
11814 | if (threaded_irq) { |
11815 | ret = request_threaded_irq(dev->entry[i].vector, ssd_interrupt_check, ssd_interrupt_threaded, flags, dev->queue[i].name, &dev->queue[i]); | |
11816 | } else if (dev->int_mode == SSD_INT_LEGACY) { | |
11817 | ret = request_irq(dev->entry[i].vector, &ssd_interrupt_legacy, flags, dev->queue[i].name, &dev->queue[i]); | |
11818 | } else { | |
11819 | ret = request_irq(dev->entry[i].vector, &ssd_interrupt, flags, dev->queue[i].name, &dev->queue[i]); | |
11820 | } | |
11821 | #else | |
b44043bd SF |
11822 | if (threaded_irq) { |
11823 | ret = request_threaded_irq(pci_irq_vector(dev->pdev, i), ssd_interrupt_check, ssd_interrupt_threaded, flags, dev->queue[i].name, &dev->queue[i]); | |
11824 | } else if (dev->int_mode == SSD_INT_LEGACY) { | |
11825 | ret = request_irq(pci_irq_vector(dev->pdev, i), &ssd_interrupt_legacy, flags, dev->queue[i].name, &dev->queue[i]); | |
361ebed5 | 11826 | } else { |
b44043bd | 11827 | ret = request_irq(pci_irq_vector(dev->pdev, i), &ssd_interrupt, flags, dev->queue[i].name, &dev->queue[i]); |
361ebed5 HSDT |
11828 | } |
11829 | #endif | |
11830 | if (ret) { | |
11831 | hio_warn("%s: request irq failed\n", dev->name); | |
11832 | /* alarm led */ | |
11833 | ssd_set_alarm(dev); | |
11834 | goto out_request_irq; | |
11835 | } | |
11836 | ||
b44043bd | 11837 | #if (!defined MODULE) && (defined SSD_MSIX_AFFINITY_FORCE) && (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0)) |
361ebed5 HSDT |
11838 | cpu_mask = (dev_to_node(&dev->pdev->dev) == -1) ? cpu_online_mask : cpumask_of_node(dev_to_node(&dev->pdev->dev)); |
11839 | if (SSD_INT_MSIX == dev->int_mode) { | |
11840 | if ((0 == cpu_affinity) || (!cpumask_intersects(mask, cpumask_of(cpu_affinity)))) { | |
11841 | cpu_affinity = cpumask_first(cpu_mask); | |
11842 | } | |
11843 | ||
11844 | irq_set_affinity(dev->entry[i].vector, cpumask_of(cpu_affinity)); | |
11845 | cpu_affinity = cpumask_next(cpu_affinity, cpu_mask); | |
11846 | if (cpu_affinity >= nr_cpu_ids) { | |
11847 | cpu_affinity = cpumask_first(cpu_mask); | |
11848 | } | |
11849 | } | |
11850 | #endif | |
11851 | } | |
11852 | ||
11853 | return ret; | |
11854 | ||
11855 | out_request_irq: | |
b44043bd | 11856 | #if ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) || (defined RHEL_MAJOR && RHEL_MAJOR == 6)) && (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0)) |
361ebed5 HSDT |
11857 | if (SSD_INT_MSIX == dev->int_mode) { |
11858 | for (j=0; j<dev->nr_queue; j++) { | |
11859 | irq_set_affinity_hint(dev->entry[j].vector, NULL); | |
11860 | } | |
11861 | } | |
11862 | #endif | |
11863 | ||
11864 | for (i--; i>=0; i--) { | |
b44043bd | 11865 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0)) |
361ebed5 | 11866 | free_irq(dev->entry[i].vector, &dev->queue[i]); |
b44043bd SF |
11867 | #else |
11868 | free_irq(pci_irq_vector(dev->pdev, i), &dev->queue[i]); | |
11869 | #endif | |
361ebed5 HSDT |
11870 | } |
11871 | ||
11872 | if (SSD_INT_MSIX == dev->int_mode) { | |
11873 | pci_disable_msix(dev->pdev); | |
11874 | } else if (SSD_INT_MSI == dev->int_mode) { | |
11875 | pci_disable_msi(dev->pdev); | |
11876 | } | |
11877 | ||
11878 | out: | |
11879 | return ret; | |
11880 | } | |
11881 | ||
11882 | static void ssd_initial_log(struct ssd_device *dev) | |
11883 | { | |
11884 | uint32_t val; | |
11885 | uint32_t speed, width; | |
11886 | ||
11887 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_2) { | |
11888 | return; | |
11889 | } | |
11890 | ||
11891 | val = ssd_reg32_read(dev->ctrlp + SSD_POWER_ON_REG); | |
11892 | if (val) { | |
da3355df SF |
11893 | // Poweron detection switched to SSD_INTR_INTERVAL_REG in 'ssd_init_smart' |
11894 | //ssd_gen_swlog(dev, SSD_LOG_POWER_ON, dev->hw_info.bridge_ver); | |
361ebed5 HSDT |
11895 | } |
11896 | ||
11897 | val = ssd_reg32_read(dev->ctrlp + SSD_PCIE_LINKSTATUS_REG); | |
11898 | speed = val & 0xF; | |
11899 | width = (val >> 4)& 0x3F; | |
11900 | if (0x1 == speed) { | |
11901 | hio_info("%s: PCIe: 2.5GT/s, x%u\n", dev->name, width); | |
11902 | } else if (0x2 == speed) { | |
11903 | hio_info("%s: PCIe: 5GT/s, x%u\n", dev->name, width); | |
11904 | } else { | |
11905 | hio_info("%s: PCIe: unknown GT/s, x%u\n", dev->name, width); | |
11906 | } | |
11907 | ssd_gen_swlog(dev, SSD_LOG_PCIE_LINK_STATUS, val); | |
11908 | ||
11909 | return; | |
11910 | } | |
11911 | ||
11912 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)) | |
11913 | static void ssd_hwmon_worker(void *data) | |
11914 | { | |
11915 | struct ssd_device *dev = (struct ssd_device *)data; | |
11916 | #else | |
11917 | static void ssd_hwmon_worker(struct work_struct *work) | |
11918 | { | |
11919 | struct ssd_device *dev = container_of(work, struct ssd_device, hwmon_work); | |
11920 | #endif | |
11921 | ||
11922 | if (ssd_check_hw(dev)) { | |
11923 | //hio_err("%s: check hardware failed\n", dev->name); | |
11924 | return; | |
11925 | } | |
11926 | ||
11927 | ssd_check_clock(dev); | |
11928 | ssd_check_volt(dev); | |
11929 | ||
11930 | ssd_mon_boardvolt(dev); | |
11931 | } | |
11932 | ||
11933 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)) | |
11934 | static void ssd_tempmon_worker(void *data) | |
11935 | { | |
11936 | struct ssd_device *dev = (struct ssd_device *)data; | |
11937 | #else | |
11938 | static void ssd_tempmon_worker(struct work_struct *work) | |
11939 | { | |
11940 | struct ssd_device *dev = container_of(work, struct ssd_device, tempmon_work); | |
11941 | #endif | |
11942 | ||
11943 | if (ssd_check_hw(dev)) { | |
11944 | //hio_err("%s: check hardware failed\n", dev->name); | |
11945 | return; | |
11946 | } | |
11947 | ||
11948 | ssd_mon_temp(dev); | |
11949 | } | |
11950 | ||
11951 | ||
11952 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)) | |
11953 | static void ssd_capmon_worker(void *data) | |
11954 | { | |
11955 | struct ssd_device *dev = (struct ssd_device *)data; | |
11956 | #else | |
11957 | static void ssd_capmon_worker(struct work_struct *work) | |
11958 | { | |
11959 | struct ssd_device *dev = container_of(work, struct ssd_device, capmon_work); | |
11960 | #endif | |
11961 | uint32_t cap = 0; | |
11962 | uint32_t cap_threshold = SSD_PL_CAP_THRESHOLD; | |
11963 | int ret = 0; | |
11964 | ||
11965 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_2) { | |
11966 | return; | |
11967 | } | |
11968 | ||
11969 | if (dev->hw_info_ext.form_factor == SSD_FORM_FACTOR_FHHL && dev->hw_info.pcb_ver < 'B') { | |
11970 | return; | |
11971 | } | |
11972 | ||
11973 | /* fault before? */ | |
11974 | if (test_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP), &dev->hwmon)) { | |
11975 | ret = ssd_check_pl_cap_fast(dev); | |
11976 | if (ret) { | |
11977 | return; | |
11978 | } | |
11979 | } | |
11980 | ||
11981 | /* learn */ | |
11982 | ret = ssd_do_cap_learn(dev, &cap); | |
11983 | if (ret) { | |
11984 | hio_err("%s: cap learn failed\n", dev->name); | |
11985 | ssd_gen_swlog(dev, SSD_LOG_CAP_LEARN_FAULT, 0); | |
11986 | return; | |
11987 | } | |
11988 | ||
11989 | ssd_gen_swlog(dev, SSD_LOG_CAP_STATUS, cap); | |
11990 | ||
11991 | if (SSD_PL_CAP_CP == dev->hw_info_ext.cap_type) { | |
11992 | cap_threshold = SSD_PL_CAP_CP_THRESHOLD; | |
11993 | } | |
11994 | ||
11995 | //use the fw event id? | |
11996 | if (cap < cap_threshold) { | |
11997 | if (!test_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP), &dev->hwmon)) { | |
11998 | ssd_gen_swlog(dev, SSD_LOG_BATTERY_FAULT, 0); | |
11999 | } | |
12000 | } else if (cap >= (cap_threshold + SSD_PL_CAP_THRESHOLD_HYST)) { | |
12001 | if (test_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP), &dev->hwmon)) { | |
12002 | ssd_gen_swlog(dev, SSD_LOG_BATTERY_OK, 0); | |
12003 | } | |
12004 | } | |
12005 | } | |
12006 | ||
7e9f9829 | 12007 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(4,15,0)) |
361ebed5 | 12008 | static void ssd_routine_start(void *data) |
7e9f9829 SF |
12009 | #else |
12010 | static void ssd_routine_start(struct timer_list *t) | |
12011 | #endif | |
361ebed5 HSDT |
12012 | { |
12013 | struct ssd_device *dev; | |
12014 | ||
7e9f9829 | 12015 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(4,15,0)) |
361ebed5 HSDT |
12016 | if (!data) { |
12017 | return; | |
12018 | } | |
12019 | dev = data; | |
7e9f9829 SF |
12020 | #else |
12021 | dev = from_timer(dev, t, routine_timer); | |
12022 | #endif | |
361ebed5 HSDT |
12023 | |
12024 | dev->routine_tick++; | |
12025 | ||
12026 | if (test_bit(SSD_INIT_WORKQ, &dev->state) && !ssd_busy(dev)) { | |
12027 | (void)test_and_set_bit(SSD_LOG_HW, &dev->state); | |
12028 | queue_work(dev->workq, &dev->log_work); | |
12029 | } | |
12030 | ||
12031 | if ((dev->routine_tick % SSD_HWMON_ROUTINE_TICK) == 0 && test_bit(SSD_INIT_WORKQ, &dev->state)) { | |
12032 | queue_work(dev->workq, &dev->hwmon_work); | |
12033 | } | |
12034 | ||
12035 | if ((dev->routine_tick % SSD_CAPMON_ROUTINE_TICK) == 0 && test_bit(SSD_INIT_WORKQ, &dev->state)) { | |
12036 | queue_work(dev->workq, &dev->capmon_work); | |
12037 | } | |
12038 | ||
12039 | if ((dev->routine_tick % SSD_CAPMON2_ROUTINE_TICK) == 0 && test_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP), &dev->hwmon) && test_bit(SSD_INIT_WORKQ, &dev->state)) { | |
12040 | /* CAP fault? check again */ | |
12041 | queue_work(dev->workq, &dev->capmon_work); | |
12042 | } | |
12043 | ||
12044 | if (test_bit(SSD_INIT_WORKQ, &dev->state)) { | |
12045 | queue_work(dev->workq, &dev->tempmon_work); | |
12046 | } | |
12047 | ||
12048 | /* schedule routine */ | |
12049 | mod_timer(&dev->routine_timer, jiffies + msecs_to_jiffies(SSD_ROUTINE_INTERVAL)); | |
12050 | } | |
12051 | ||
12052 | static void ssd_cleanup_routine(struct ssd_device *dev) | |
12053 | { | |
12054 | if (unlikely(mode != SSD_DRV_MODE_STANDARD)) | |
12055 | return; | |
12056 | ||
12057 | (void)ssd_del_timer(&dev->routine_timer); | |
12058 | ||
12059 | (void)ssd_del_timer(&dev->bm_timer); | |
12060 | } | |
12061 | ||
12062 | static int ssd_init_routine(struct ssd_device *dev) | |
12063 | { | |
12064 | if (unlikely(mode != SSD_DRV_MODE_STANDARD)) | |
12065 | return 0; | |
12066 | ||
12067 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)) | |
12068 | INIT_WORK(&dev->bm_work, ssd_bm_worker, dev); | |
12069 | INIT_WORK(&dev->hwmon_work, ssd_hwmon_worker, dev); | |
12070 | INIT_WORK(&dev->capmon_work, ssd_capmon_worker, dev); | |
12071 | INIT_WORK(&dev->tempmon_work, ssd_tempmon_worker, dev); | |
12072 | #else | |
12073 | INIT_WORK(&dev->bm_work, ssd_bm_worker); | |
12074 | INIT_WORK(&dev->hwmon_work, ssd_hwmon_worker); | |
12075 | INIT_WORK(&dev->capmon_work, ssd_capmon_worker); | |
12076 | INIT_WORK(&dev->tempmon_work, ssd_tempmon_worker); | |
12077 | #endif | |
12078 | ||
12079 | /* initial log */ | |
12080 | ssd_initial_log(dev); | |
12081 | ||
12082 | /* schedule bm routine */ | |
12083 | ssd_add_timer(&dev->bm_timer, msecs_to_jiffies(SSD_BM_CAP_LEARNING_DELAY), ssd_bm_routine_start, dev); | |
12084 | ||
12085 | /* schedule routine */ | |
12086 | ssd_add_timer(&dev->routine_timer, msecs_to_jiffies(SSD_ROUTINE_INTERVAL), ssd_routine_start, dev); | |
12087 | ||
12088 | return 0; | |
12089 | } | |
12090 | ||
12091 | static void | |
12092 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38)) | |
12093 | __devexit | |
12094 | #endif | |
12095 | ssd_remove_one (struct pci_dev *pdev) | |
12096 | { | |
12097 | struct ssd_device *dev; | |
12098 | ||
12099 | if (!pdev) { | |
12100 | return; | |
12101 | } | |
12102 | ||
12103 | dev = pci_get_drvdata(pdev); | |
12104 | if (!dev) { | |
12105 | return; | |
12106 | } | |
12107 | ||
12108 | list_del_init(&dev->list); | |
12109 | ||
12110 | ssd_unregister_sysfs(dev); | |
12111 | ||
12112 | /* offline firstly */ | |
12113 | test_and_clear_bit(SSD_ONLINE, &dev->state); | |
12114 | ||
12115 | /* clean work queue first */ | |
12116 | if (!dev->slave) { | |
12117 | test_and_clear_bit(SSD_INIT_WORKQ, &dev->state); | |
12118 | ssd_cleanup_workq(dev); | |
12119 | } | |
12120 | ||
12121 | /* flush cache */ | |
12122 | (void)ssd_flush(dev); | |
12123 | (void)ssd_save_md(dev); | |
12124 | ||
12125 | /* save smart */ | |
12126 | if (!dev->slave) { | |
12127 | ssd_save_smart(dev); | |
12128 | } | |
12129 | ||
12130 | if (test_and_clear_bit(SSD_INIT_BD, &dev->state)) { | |
12131 | ssd_cleanup_blkdev(dev); | |
12132 | } | |
12133 | ||
12134 | if (!dev->slave) { | |
12135 | ssd_cleanup_chardev(dev); | |
12136 | } | |
12137 | ||
12138 | /* clean routine */ | |
12139 | if (!dev->slave) { | |
12140 | ssd_cleanup_routine(dev); | |
12141 | } | |
12142 | ||
12143 | ssd_cleanup_queue(dev); | |
12144 | ||
12145 | ssd_cleanup_tag(dev); | |
12146 | ssd_cleanup_thread(dev); | |
12147 | ||
12148 | ssd_free_irq(dev); | |
12149 | ||
12150 | ssd_cleanup_dcmd(dev); | |
12151 | ssd_cleanup_cmd(dev); | |
12152 | ssd_cleanup_response(dev); | |
12153 | ||
12154 | if (!dev->slave) { | |
12155 | ssd_cleanup_log(dev); | |
12156 | } | |
12157 | ||
12158 | if (dev->reload_fw) { //reload fw | |
da3355df | 12159 | dev->has_non_0x98_reg_access = 1; |
361ebed5 HSDT |
12160 | ssd_reg32_write(dev->ctrlp + SSD_RELOAD_FW_REG, SSD_RELOAD_FW); |
12161 | } | |
12162 | ||
12163 | /* unmap physical adress */ | |
12164 | #ifdef LINUX_SUSE_OS | |
12165 | iounmap(dev->ctrlp); | |
12166 | #else | |
12167 | pci_iounmap(pdev, dev->ctrlp); | |
12168 | #endif | |
12169 | ||
12170 | release_mem_region(dev->mmio_base, dev->mmio_len); | |
12171 | ||
12172 | pci_disable_device(pdev); | |
12173 | ||
12174 | pci_set_drvdata(pdev, NULL); | |
12175 | ||
12176 | ssd_put(dev); | |
12177 | } | |
12178 | ||
12179 | static int | |
12180 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38)) | |
12181 | __devinit | |
12182 | #endif | |
12183 | ssd_init_one(struct pci_dev *pdev, | |
12184 | const struct pci_device_id *ent) | |
12185 | { | |
12186 | struct ssd_device *dev; | |
12187 | int ret = 0; | |
12188 | ||
12189 | if (!pdev || !ent) { | |
12190 | ret = -EINVAL; | |
12191 | goto out; | |
12192 | } | |
12193 | ||
12194 | dev = kmalloc(sizeof(struct ssd_device), GFP_KERNEL); | |
12195 | if (!dev) { | |
12196 | ret = -ENOMEM; | |
12197 | goto out_alloc_dev; | |
12198 | } | |
12199 | memset(dev, 0, sizeof(struct ssd_device)); | |
12200 | ||
12201 | dev->owner = THIS_MODULE; | |
12202 | ||
12203 | if (SSD_SLAVE_PORT_DEVID == ent->device) { | |
12204 | dev->slave = 1; | |
12205 | } | |
12206 | ||
12207 | dev->idx = ssd_get_index(dev->slave); | |
12208 | if (dev->idx < 0) { | |
12209 | ret = -ENOMEM; | |
12210 | goto out_get_index; | |
12211 | } | |
12212 | ||
12213 | if (!dev->slave) { | |
12214 | snprintf(dev->name, SSD_DEV_NAME_LEN, SSD_DEV_NAME); | |
12215 | ssd_set_dev_name(&dev->name[strlen(SSD_DEV_NAME)], SSD_DEV_NAME_LEN-strlen(SSD_DEV_NAME), dev->idx); | |
12216 | ||
12217 | dev->major = ssd_major; | |
12218 | dev->cmajor = ssd_cmajor; | |
12219 | } else { | |
12220 | snprintf(dev->name, SSD_DEV_NAME_LEN, SSD_SDEV_NAME); | |
12221 | ssd_set_dev_name(&dev->name[strlen(SSD_SDEV_NAME)], SSD_DEV_NAME_LEN-strlen(SSD_SDEV_NAME), dev->idx); | |
12222 | dev->major = ssd_major_sl; | |
12223 | dev->cmajor = 0; | |
12224 | } | |
12225 | ||
57e45d44 | 12226 | dev->reset_time = (uint64_t)ktime_get_real_seconds(); |
1197134c | 12227 | |
361ebed5 HSDT |
12228 | atomic_set(&(dev->refcnt), 0); |
12229 | atomic_set(&(dev->tocnt), 0); | |
12230 | ||
12231 | mutex_init(&dev->fw_mutex); | |
12232 | ||
12233 | //xx | |
12234 | mutex_init(&dev->gd_mutex); | |
da3355df SF |
12235 | dev->has_non_0x98_reg_access = 0; |
12236 | ||
12237 | //init in_flight lock | |
12238 | spin_lock_init(&dev->in_flight_lock); | |
361ebed5 HSDT |
12239 | |
12240 | dev->pdev = pdev; | |
12241 | pci_set_drvdata(pdev, dev); | |
12242 | ||
12243 | kref_init(&dev->kref); | |
12244 | ||
12245 | ret = pci_enable_device(pdev); | |
12246 | if (ret) { | |
12247 | hio_warn("%s: can not enable device\n", dev->name); | |
12248 | goto out_enable_device; | |
12249 | } | |
12250 | ||
12251 | pci_set_master(pdev); | |
12252 | ||
12253 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31)) | |
12254 | ret = pci_set_dma_mask(pdev, DMA_64BIT_MASK); | |
12255 | #else | |
12256 | ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); | |
12257 | #endif | |
12258 | if (ret) { | |
12259 | hio_warn("%s: set dma mask: failed\n", dev->name); | |
12260 | goto out_set_dma_mask; | |
12261 | } | |
12262 | ||
12263 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31)) | |
12264 | ret = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK); | |
12265 | #else | |
12266 | ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); | |
12267 | #endif | |
12268 | if (ret) { | |
12269 | hio_warn("%s: set consistent dma mask: failed\n", dev->name); | |
12270 | goto out_set_dma_mask; | |
12271 | } | |
12272 | ||
12273 | dev->mmio_base = pci_resource_start(pdev, 0); | |
12274 | dev->mmio_len = pci_resource_len(pdev, 0); | |
12275 | ||
12276 | if (!request_mem_region(dev->mmio_base, dev->mmio_len, SSD_DEV_NAME)) { | |
12277 | hio_warn("%s: can not reserve MMIO region 0\n", dev->name); | |
12278 | ret = -EBUSY; | |
12279 | goto out_request_mem_region; | |
12280 | } | |
12281 | ||
12282 | /* 2.6.9 kernel bug */ | |
12283 | dev->ctrlp = pci_iomap(pdev, 0, 0); | |
12284 | if (!dev->ctrlp) { | |
12285 | hio_warn("%s: can not remap IO region 0\n", dev->name); | |
12286 | ret = -ENOMEM; | |
12287 | goto out_pci_iomap; | |
12288 | } | |
12289 | ||
12290 | ret = ssd_check_hw(dev); | |
12291 | if (ret) { | |
12292 | hio_err("%s: check hardware failed\n", dev->name); | |
12293 | goto out_check_hw; | |
12294 | } | |
12295 | ||
12296 | ret = ssd_init_protocol_info(dev); | |
12297 | if (ret) { | |
12298 | hio_err("%s: init protocol info failed\n", dev->name); | |
12299 | goto out_init_protocol_info; | |
12300 | } | |
12301 | ||
12302 | /* alarm led ? */ | |
12303 | ssd_clear_alarm(dev); | |
12304 | ||
12305 | ret = ssd_init_fw_info(dev); | |
12306 | if (ret) { | |
12307 | hio_err("%s: init firmware info failed\n", dev->name); | |
12308 | /* alarm led */ | |
12309 | ssd_set_alarm(dev); | |
12310 | goto out_init_fw_info; | |
12311 | } | |
12312 | ||
12313 | /* slave port ? */ | |
12314 | if (dev->slave) { | |
12315 | goto init_next1; | |
12316 | } | |
12317 | ||
12318 | ret = ssd_init_rom_info(dev); | |
12319 | if (ret) { | |
12320 | hio_err("%s: init rom info failed\n", dev->name); | |
12321 | /* alarm led */ | |
12322 | ssd_set_alarm(dev); | |
12323 | goto out_init_rom_info; | |
12324 | } | |
12325 | ||
12326 | ret = ssd_init_label(dev); | |
12327 | if (ret) { | |
12328 | hio_err("%s: init label failed\n", dev->name); | |
12329 | /* alarm led */ | |
12330 | ssd_set_alarm(dev); | |
12331 | goto out_init_label; | |
12332 | } | |
12333 | ||
12334 | ret = ssd_init_workq(dev); | |
12335 | if (ret) { | |
12336 | hio_warn("%s: init workq failed\n", dev->name); | |
12337 | goto out_init_workq; | |
12338 | } | |
12339 | (void)test_and_set_bit(SSD_INIT_WORKQ, &dev->state); | |
12340 | ||
12341 | ret = ssd_init_log(dev); | |
12342 | if (ret) { | |
12343 | hio_err("%s: init log failed\n", dev->name); | |
12344 | /* alarm led */ | |
12345 | ssd_set_alarm(dev); | |
12346 | goto out_init_log; | |
12347 | } | |
12348 | ||
12349 | ret = ssd_init_smart(dev); | |
12350 | if (ret) { | |
12351 | hio_err("%s: init info failed\n", dev->name); | |
12352 | /* alarm led */ | |
12353 | ssd_set_alarm(dev); | |
12354 | goto out_init_smart; | |
12355 | } | |
12356 | ||
12357 | init_next1: | |
12358 | ret = ssd_init_hw_info(dev); | |
12359 | if (ret) { | |
12360 | hio_err("%s: init hardware info failed\n", dev->name); | |
12361 | /* alarm led */ | |
12362 | ssd_set_alarm(dev); | |
12363 | goto out_init_hw_info; | |
12364 | } | |
12365 | ||
12366 | /* slave port ? */ | |
12367 | if (dev->slave) { | |
12368 | goto init_next2; | |
12369 | } | |
12370 | ||
12371 | ret = ssd_init_sensor(dev); | |
12372 | if (ret) { | |
12373 | hio_err("%s: init sensor failed\n", dev->name); | |
12374 | /* alarm led */ | |
12375 | ssd_set_alarm(dev); | |
12376 | goto out_init_sensor; | |
12377 | } | |
12378 | ||
12379 | ret = ssd_init_pl_cap(dev); | |
12380 | if (ret) { | |
12381 | hio_err("%s: int pl_cap failed\n", dev->name); | |
12382 | /* alarm led */ | |
12383 | ssd_set_alarm(dev); | |
12384 | goto out_init_pl_cap; | |
12385 | } | |
12386 | ||
12387 | init_next2: | |
12388 | ret = ssd_check_init_state(dev); | |
12389 | if (ret) { | |
12390 | hio_err("%s: check init state failed\n", dev->name); | |
12391 | /* alarm led */ | |
12392 | ssd_set_alarm(dev); | |
12393 | goto out_check_init_state; | |
12394 | } | |
12395 | ||
12396 | ret = ssd_init_response(dev); | |
12397 | if (ret) { | |
12398 | hio_warn("%s: init resp_msg failed\n", dev->name); | |
12399 | goto out_init_response; | |
12400 | } | |
12401 | ||
12402 | ret = ssd_init_cmd(dev); | |
12403 | if (ret) { | |
12404 | hio_warn("%s: init msg failed\n", dev->name); | |
12405 | goto out_init_cmd; | |
12406 | } | |
12407 | ||
12408 | ret = ssd_init_dcmd(dev); | |
12409 | if (ret) { | |
12410 | hio_warn("%s: init cmd failed\n", dev->name); | |
12411 | goto out_init_dcmd; | |
12412 | } | |
12413 | ||
12414 | ret = ssd_init_irq(dev); | |
12415 | if (ret) { | |
12416 | hio_warn("%s: init irq failed\n", dev->name); | |
12417 | goto out_init_irq; | |
12418 | } | |
12419 | ||
12420 | ret = ssd_init_thread(dev); | |
12421 | if (ret) { | |
12422 | hio_warn("%s: init thread failed\n", dev->name); | |
12423 | goto out_init_thread; | |
12424 | } | |
12425 | ||
12426 | ret = ssd_init_tag(dev); | |
12427 | if(ret) { | |
12428 | hio_warn("%s: init tags failed\n", dev->name); | |
12429 | goto out_init_tags; | |
12430 | } | |
12431 | ||
12432 | /* */ | |
12433 | (void)test_and_set_bit(SSD_ONLINE, &dev->state); | |
12434 | ||
12435 | ret = ssd_init_queue(dev); | |
12436 | if (ret) { | |
12437 | hio_warn("%s: init queue failed\n", dev->name); | |
12438 | goto out_init_queue; | |
12439 | } | |
12440 | ||
12441 | /* slave port ? */ | |
12442 | if (dev->slave) { | |
12443 | goto init_next3; | |
12444 | } | |
12445 | ||
12446 | ret = ssd_init_ot_protect(dev); | |
12447 | if (ret) { | |
12448 | hio_err("%s: int ot_protect failed\n", dev->name); | |
12449 | /* alarm led */ | |
12450 | ssd_set_alarm(dev); | |
12451 | goto out_int_ot_protect; | |
12452 | } | |
12453 | ||
12454 | ret = ssd_init_wmode(dev); | |
12455 | if (ret) { | |
12456 | hio_warn("%s: init write mode\n", dev->name); | |
12457 | goto out_init_wmode; | |
12458 | } | |
12459 | ||
12460 | /* init routine after hw is ready */ | |
12461 | ret = ssd_init_routine(dev); | |
12462 | if (ret) { | |
12463 | hio_warn("%s: init routine\n", dev->name); | |
12464 | goto out_init_routine; | |
12465 | } | |
12466 | ||
12467 | ret = ssd_init_chardev(dev); | |
12468 | if (ret) { | |
12469 | hio_warn("%s: register char device failed\n", dev->name); | |
12470 | goto out_init_chardev; | |
12471 | } | |
12472 | ||
12473 | init_next3: | |
12474 | ret = ssd_init_blkdev(dev); | |
12475 | if (ret) { | |
12476 | hio_warn("%s: register block device failed\n", dev->name); | |
12477 | goto out_init_blkdev; | |
12478 | } | |
12479 | (void)test_and_set_bit(SSD_INIT_BD, &dev->state); | |
12480 | ||
12481 | ret = ssd_register_sysfs(dev); | |
12482 | if (ret) { | |
12483 | hio_warn("%s: register sysfs failed\n", dev->name); | |
12484 | goto out_register_sysfs; | |
12485 | } | |
12486 | ||
12487 | dev->save_md = 1; | |
12488 | ||
12489 | list_add_tail(&dev->list, &ssd_list); | |
12490 | ||
12491 | return 0; | |
12492 | ||
12493 | out_register_sysfs: | |
12494 | test_and_clear_bit(SSD_INIT_BD, &dev->state); | |
12495 | ssd_cleanup_blkdev(dev); | |
12496 | out_init_blkdev: | |
12497 | /* slave port ? */ | |
12498 | if (!dev->slave) { | |
12499 | ssd_cleanup_chardev(dev); | |
12500 | } | |
12501 | out_init_chardev: | |
12502 | /* slave port ? */ | |
12503 | if (!dev->slave) { | |
12504 | ssd_cleanup_routine(dev); | |
12505 | } | |
12506 | out_init_routine: | |
12507 | out_init_wmode: | |
12508 | out_int_ot_protect: | |
12509 | ssd_cleanup_queue(dev); | |
12510 | out_init_queue: | |
12511 | test_and_clear_bit(SSD_ONLINE, &dev->state); | |
12512 | ssd_cleanup_tag(dev); | |
12513 | out_init_tags: | |
12514 | ssd_cleanup_thread(dev); | |
12515 | out_init_thread: | |
12516 | ssd_free_irq(dev); | |
12517 | out_init_irq: | |
12518 | ssd_cleanup_dcmd(dev); | |
12519 | out_init_dcmd: | |
12520 | ssd_cleanup_cmd(dev); | |
12521 | out_init_cmd: | |
12522 | ssd_cleanup_response(dev); | |
12523 | out_init_response: | |
12524 | out_check_init_state: | |
12525 | out_init_pl_cap: | |
12526 | out_init_sensor: | |
12527 | out_init_hw_info: | |
12528 | out_init_smart: | |
12529 | /* slave port ? */ | |
12530 | if (!dev->slave) { | |
12531 | ssd_cleanup_log(dev); | |
12532 | } | |
12533 | out_init_log: | |
12534 | /* slave port ? */ | |
12535 | if (!dev->slave) { | |
12536 | test_and_clear_bit(SSD_INIT_WORKQ, &dev->state); | |
12537 | ssd_cleanup_workq(dev); | |
12538 | } | |
12539 | out_init_workq: | |
12540 | out_init_label: | |
12541 | out_init_rom_info: | |
12542 | out_init_fw_info: | |
12543 | out_init_protocol_info: | |
12544 | out_check_hw: | |
12545 | #ifdef LINUX_SUSE_OS | |
12546 | iounmap(dev->ctrlp); | |
12547 | #else | |
12548 | pci_iounmap(pdev, dev->ctrlp); | |
12549 | #endif | |
12550 | out_pci_iomap: | |
12551 | release_mem_region(dev->mmio_base, dev->mmio_len); | |
12552 | out_request_mem_region: | |
12553 | out_set_dma_mask: | |
12554 | pci_disable_device(pdev); | |
12555 | out_enable_device: | |
12556 | pci_set_drvdata(pdev, NULL); | |
12557 | out_get_index: | |
12558 | kfree(dev); | |
12559 | out_alloc_dev: | |
12560 | out: | |
12561 | return ret; | |
12562 | } | |
12563 | ||
12564 | static void ssd_cleanup_tasklet(void) | |
12565 | { | |
12566 | int i; | |
12567 | for_each_online_cpu(i) { | |
12568 | tasklet_kill(&per_cpu(ssd_tasklet, i)); | |
12569 | } | |
12570 | } | |
12571 | ||
12572 | static int ssd_init_tasklet(void) | |
12573 | { | |
12574 | int i; | |
12575 | ||
12576 | for_each_online_cpu(i) { | |
12577 | INIT_LIST_HEAD(&per_cpu(ssd_doneq, i)); | |
12578 | ||
12579 | if (finject) { | |
12580 | tasklet_init(&per_cpu(ssd_tasklet, i), __ssd_done_db, 0); | |
12581 | } else { | |
12582 | tasklet_init(&per_cpu(ssd_tasklet, i), __ssd_done, 0); | |
12583 | } | |
12584 | } | |
12585 | ||
12586 | return 0; | |
12587 | } | |
12588 | ||
12589 | static struct pci_device_id ssd_pci_tbl[] = { | |
12590 | { 0x10ee, 0x0007, PCI_ANY_ID, PCI_ANY_ID, }, /* g3 */ | |
12591 | { 0x19e5, 0x0007, PCI_ANY_ID, PCI_ANY_ID, }, /* v1 */ | |
12592 | //{ 0x19e5, 0x0008, PCI_ANY_ID, PCI_ANY_ID, }, /* v1 sp*/ | |
12593 | { 0x19e5, 0x0009, PCI_ANY_ID, PCI_ANY_ID, }, /* v2 */ | |
12594 | { 0x19e5, 0x000a, PCI_ANY_ID, PCI_ANY_ID, }, /* v2 dp slave*/ | |
12595 | { 0, } | |
12596 | }; | |
361ebed5 | 12597 | |
1197134c KM |
12598 | /*driver power management handler for pm_ops*/ |
12599 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32)) | |
12600 | static int ssd_hio_suspend(struct pci_dev *pdev, pm_message_t state) | |
12601 | { | |
12602 | #else | |
12603 | static int ssd_hio_suspend(struct device *ddev) | |
12604 | { | |
12605 | struct pci_dev *pdev = to_pci_dev(ddev); | |
12606 | #endif | |
12607 | struct ssd_device *dev; | |
12608 | ||
12609 | ||
12610 | if (!pdev) { | |
12611 | return -EINVAL; | |
12612 | } | |
12613 | ||
12614 | dev = pci_get_drvdata(pdev); | |
12615 | if (!dev) { | |
12616 | return -EINVAL; | |
12617 | } | |
12618 | ||
12619 | hio_warn("%s: suspend disk start.\n", dev->name); | |
12620 | ssd_unregister_sysfs(dev); | |
12621 | ||
12622 | /* offline firstly */ | |
12623 | test_and_clear_bit(SSD_ONLINE, &dev->state); | |
12624 | ||
12625 | /* clean work queue first */ | |
12626 | if (!dev->slave) { | |
12627 | test_and_clear_bit(SSD_INIT_WORKQ, &dev->state); | |
12628 | ssd_cleanup_workq(dev); | |
12629 | } | |
12630 | ||
12631 | /* flush cache */ | |
12632 | (void)ssd_flush(dev); | |
12633 | (void)ssd_save_md(dev); | |
12634 | ||
12635 | /* save smart */ | |
12636 | if (!dev->slave) { | |
12637 | ssd_save_smart(dev); | |
12638 | } | |
12639 | ||
12640 | /* clean routine */ | |
12641 | if (!dev->slave) { | |
12642 | ssd_cleanup_routine(dev); | |
12643 | } | |
12644 | ||
12645 | ssd_cleanup_thread(dev); | |
12646 | ||
12647 | ssd_free_irq(dev); | |
12648 | ||
12649 | if (!dev->slave) { | |
12650 | ssd_cleanup_log(dev); | |
12651 | } | |
12652 | ||
12653 | if (dev->reload_fw) { //reload fw | |
da3355df | 12654 | dev->has_non_0x98_reg_access = 1; |
1197134c KM |
12655 | ssd_reg32_write(dev->ctrlp + SSD_RELOAD_FW_REG, SSD_RELOAD_FW); |
12656 | } | |
12657 | ||
12658 | /* unmap physical adress */ | |
12659 | if (dev->ctrlp) { | |
12660 | #ifdef LINUX_SUSE_OS | |
12661 | iounmap(dev->ctrlp); | |
12662 | #else | |
12663 | pci_iounmap(pdev, dev->ctrlp); | |
12664 | #endif | |
12665 | dev->ctrlp = NULL; | |
12666 | } | |
12667 | ||
12668 | if (dev->mmio_base) { | |
12669 | release_mem_region(dev->mmio_base, dev->mmio_len); | |
12670 | dev->mmio_base = 0; | |
12671 | } | |
12672 | ||
12673 | pci_disable_device(pdev); | |
12674 | ||
12675 | hio_warn("%s: suspend disk finish.\n", dev->name); | |
12676 | ||
12677 | return 0; | |
12678 | } | |
12679 | ||
12680 | ||
12681 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32)) | |
12682 | static int ssd_hio_resume(struct pci_dev *pdev) | |
12683 | { | |
12684 | #else | |
12685 | static int ssd_hio_resume(struct device *ddev) | |
12686 | { | |
12687 | struct pci_dev *pdev = to_pci_dev(ddev); | |
12688 | #endif | |
12689 | struct ssd_device *dev = NULL; | |
12690 | int ret = 0; | |
12691 | ||
12692 | if (!pdev ) { | |
12693 | ret = -EINVAL; | |
12694 | goto out; | |
12695 | } | |
12696 | ||
12697 | dev = pci_get_drvdata(pdev); | |
12698 | if (!dev) { | |
12699 | ret = -ENOMEM; | |
12700 | goto out_alloc_dev; | |
12701 | } | |
12702 | ||
12703 | hio_warn("%s: resume disk start.\n", dev->name); | |
12704 | ret = pci_enable_device(pdev); | |
12705 | if (ret) { | |
12706 | hio_warn("%s: can not enable device\n", dev->name); | |
12707 | goto out_enable_device; | |
12708 | } | |
12709 | ||
12710 | pci_set_master(pdev); | |
12711 | ||
12712 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31)) | |
12713 | ret = pci_set_dma_mask(pdev, DMA_64BIT_MASK); | |
12714 | #else | |
12715 | ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); | |
12716 | #endif | |
12717 | if (ret) { | |
12718 | hio_warn("%s: set dma mask: failed\n", dev->name); | |
12719 | goto out_set_dma_mask; | |
12720 | } | |
12721 | ||
12722 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31)) | |
12723 | ret = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK); | |
12724 | #else | |
12725 | ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); | |
12726 | #endif | |
12727 | if (ret) { | |
12728 | hio_warn("%s: set consistent dma mask: failed\n", dev->name); | |
12729 | goto out_set_dma_mask; | |
12730 | } | |
12731 | ||
12732 | dev->mmio_base = pci_resource_start(pdev, 0); | |
12733 | dev->mmio_len = pci_resource_len(pdev, 0); | |
12734 | ||
12735 | if (!request_mem_region(dev->mmio_base, dev->mmio_len, SSD_DEV_NAME)) { | |
12736 | hio_warn("%s: can not reserve MMIO region 0\n", dev->name); | |
12737 | ret = -EBUSY; | |
12738 | goto out_request_mem_region; | |
12739 | } | |
12740 | ||
12741 | /* 2.6.9 kernel bug */ | |
12742 | dev->ctrlp = pci_iomap(pdev, 0, 0); | |
12743 | if (!dev->ctrlp) { | |
12744 | hio_warn("%s: can not remap IO region 0\n", dev->name); | |
12745 | ret = -ENOMEM; | |
12746 | goto out_pci_iomap; | |
12747 | } | |
12748 | ||
12749 | ret = ssd_check_hw(dev); | |
12750 | if (ret) { | |
12751 | hio_err("%s: check hardware failed\n", dev->name); | |
12752 | goto out_check_hw; | |
12753 | } | |
12754 | ||
12755 | /* alarm led ? */ | |
12756 | ssd_clear_alarm(dev); | |
12757 | ||
12758 | ret = ssd_init_fw_info(dev); | |
12759 | if (ret) { | |
12760 | hio_err("%s: init firmware info failed\n", dev->name); | |
12761 | /* alarm led */ | |
12762 | ssd_set_alarm(dev); | |
12763 | goto out_init_fw_info; | |
12764 | } | |
12765 | ||
12766 | /* slave port ? */ | |
12767 | if (dev->slave) { | |
12768 | goto init_next1; | |
12769 | } | |
12770 | ||
12771 | ret = ssd_init_rom_info(dev); | |
12772 | if (ret) { | |
12773 | hio_err("%s: init rom info failed\n", dev->name); | |
12774 | /* alarm led */ | |
12775 | ssd_set_alarm(dev); | |
12776 | goto out_init_rom_info; | |
12777 | } | |
12778 | ||
12779 | ret = ssd_init_label(dev); | |
12780 | if (ret) { | |
12781 | hio_err("%s: init label failed\n", dev->name); | |
12782 | /* alarm led */ | |
12783 | ssd_set_alarm(dev); | |
12784 | goto out_init_label; | |
12785 | } | |
12786 | ||
12787 | ret = ssd_init_workq(dev); | |
12788 | if (ret) { | |
12789 | hio_warn("%s: init workq failed\n", dev->name); | |
12790 | goto out_init_workq; | |
12791 | } | |
12792 | (void)test_and_set_bit(SSD_INIT_WORKQ, &dev->state); | |
12793 | ||
12794 | ret = ssd_init_log(dev); | |
12795 | if (ret) { | |
12796 | hio_err("%s: init log failed\n", dev->name); | |
12797 | /* alarm led */ | |
12798 | ssd_set_alarm(dev); | |
12799 | goto out_init_log; | |
12800 | } | |
12801 | ||
12802 | ret = ssd_init_smart(dev); | |
12803 | if (ret) { | |
12804 | hio_err("%s: init info failed\n", dev->name); | |
12805 | /* alarm led */ | |
12806 | ssd_set_alarm(dev); | |
12807 | goto out_init_smart; | |
12808 | } | |
12809 | ||
12810 | init_next1: | |
12811 | ret = ssd_init_hw_info(dev); | |
12812 | if (ret) { | |
12813 | hio_err("%s: init hardware info failed\n", dev->name); | |
12814 | /* alarm led */ | |
12815 | ssd_set_alarm(dev); | |
12816 | goto out_init_hw_info; | |
12817 | } | |
12818 | ||
12819 | /* slave port ? */ | |
12820 | if (dev->slave) { | |
12821 | goto init_next2; | |
12822 | } | |
12823 | ||
12824 | ret = ssd_init_sensor(dev); | |
12825 | if (ret) { | |
12826 | hio_err("%s: init sensor failed\n", dev->name); | |
12827 | /* alarm led */ | |
12828 | ssd_set_alarm(dev); | |
12829 | goto out_init_sensor; | |
12830 | } | |
12831 | ||
12832 | ret = ssd_init_pl_cap(dev); | |
12833 | if (ret) { | |
12834 | hio_err("%s: int pl_cap failed\n", dev->name); | |
12835 | /* alarm led */ | |
12836 | ssd_set_alarm(dev); | |
12837 | goto out_init_pl_cap; | |
12838 | } | |
12839 | ||
12840 | init_next2: | |
12841 | ret = ssd_check_init_state(dev); | |
12842 | if (ret) { | |
12843 | hio_err("%s: check init state failed\n", dev->name); | |
12844 | /* alarm led */ | |
12845 | ssd_set_alarm(dev); | |
12846 | goto out_check_init_state; | |
12847 | } | |
12848 | ||
12849 | //flush all base pointer to ssd | |
12850 | (void)ssd_reload_ssd_ptr(dev); | |
12851 | ||
12852 | ret = ssd_init_irq(dev); | |
12853 | if (ret) { | |
12854 | hio_warn("%s: init irq failed\n", dev->name); | |
12855 | goto out_init_irq; | |
12856 | } | |
12857 | ||
12858 | ret = ssd_init_thread(dev); | |
12859 | if (ret) { | |
12860 | hio_warn("%s: init thread failed\n", dev->name); | |
12861 | goto out_init_thread; | |
12862 | } | |
12863 | ||
12864 | /* */ | |
12865 | (void)test_and_set_bit(SSD_ONLINE, &dev->state); | |
12866 | ||
12867 | /* slave port ? */ | |
12868 | if (dev->slave) { | |
12869 | goto init_next3; | |
12870 | } | |
12871 | ||
12872 | ret = ssd_init_ot_protect(dev); | |
12873 | if (ret) { | |
12874 | hio_err("%s: int ot_protect failed\n", dev->name); | |
12875 | /* alarm led */ | |
12876 | ssd_set_alarm(dev); | |
12877 | goto out_int_ot_protect; | |
12878 | } | |
12879 | ||
12880 | ret = ssd_init_wmode(dev); | |
12881 | if (ret) { | |
12882 | hio_warn("%s: init write mode\n", dev->name); | |
12883 | goto out_init_wmode; | |
12884 | } | |
12885 | ||
12886 | /* init routine after hw is ready */ | |
12887 | ret = ssd_init_routine(dev); | |
12888 | if (ret) { | |
12889 | hio_warn("%s: init routine\n", dev->name); | |
12890 | goto out_init_routine; | |
12891 | } | |
12892 | ||
12893 | init_next3: | |
12894 | (void)test_and_set_bit(SSD_INIT_BD, &dev->state); | |
12895 | ||
12896 | dev->save_md = 1; | |
12897 | ||
12898 | hio_warn("%s: resume disk finish.\n", dev->name); | |
12899 | ||
12900 | return 0; | |
12901 | ||
12902 | out_init_routine: | |
12903 | out_init_wmode: | |
12904 | out_int_ot_protect: | |
12905 | ssd_cleanup_thread(dev); | |
12906 | out_init_thread: | |
12907 | ssd_free_irq(dev); | |
12908 | out_init_irq: | |
12909 | out_check_init_state: | |
12910 | out_init_pl_cap: | |
12911 | out_init_sensor: | |
12912 | out_init_hw_info: | |
12913 | out_init_smart: | |
12914 | /* slave port ? */ | |
12915 | if (!dev->slave) { | |
12916 | ssd_cleanup_log(dev); | |
12917 | } | |
12918 | out_init_log: | |
12919 | /* slave port ? */ | |
12920 | if (!dev->slave) { | |
12921 | test_and_clear_bit(SSD_INIT_WORKQ, &dev->state); | |
12922 | ssd_cleanup_workq(dev); | |
12923 | } | |
12924 | out_init_workq: | |
12925 | out_init_label: | |
12926 | out_init_rom_info: | |
12927 | out_init_fw_info: | |
12928 | out_check_hw: | |
12929 | #ifdef LINUX_SUSE_OS | |
12930 | iounmap(dev->ctrlp); | |
12931 | #else | |
12932 | pci_iounmap(pdev, dev->ctrlp); | |
12933 | #endif | |
12934 | out_pci_iomap: | |
12935 | release_mem_region(dev->mmio_base, dev->mmio_len); | |
12936 | out_request_mem_region: | |
12937 | out_set_dma_mask: | |
12938 | pci_disable_device(pdev); | |
12939 | out_enable_device: | |
12940 | out_alloc_dev: | |
12941 | out: | |
12942 | ||
12943 | hio_warn("%s: resume disk fail.\n", dev->name); | |
12944 | ||
12945 | return ret; | |
12946 | } | |
12947 | ||
12948 | MODULE_DEVICE_TABLE(pci, ssd_pci_tbl); | |
12949 | ||
12950 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32)) | |
12951 | #else | |
12952 | SIMPLE_DEV_PM_OPS(hio_pm_ops, ssd_hio_suspend, ssd_hio_resume); | |
12953 | #endif | |
12954 | ||
12955 | MODULE_DEVICE_TABLE(pci, ssd_pci_tbl); | |
12956 | struct pci_driver ssd_driver = { | |
12957 | .name = MODULE_NAME, | |
12958 | .id_table = ssd_pci_tbl, | |
12959 | .probe = ssd_init_one, | |
12960 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38)) | |
12961 | .remove = __devexit_p(ssd_remove_one), | |
361ebed5 HSDT |
12962 | #else |
12963 | .remove = ssd_remove_one, | |
12964 | #endif | |
1197134c KM |
12965 | |
12966 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32)) | |
12967 | .suspend = ssd_hio_suspend, | |
12968 | .resume = ssd_hio_resume, | |
12969 | #else | |
12970 | .driver = { | |
12971 | .pm = &hio_pm_ops, | |
12972 | }, | |
12973 | #endif | |
361ebed5 HSDT |
12974 | }; |
12975 | ||
12976 | /* notifier block to get a notify on system shutdown/halt/reboot */ | |
12977 | static int ssd_notify_reboot(struct notifier_block *nb, unsigned long event, void *buf) | |
12978 | { | |
12979 | struct ssd_device *dev = NULL; | |
12980 | struct ssd_device *n = NULL; | |
12981 | ||
12982 | list_for_each_entry_safe(dev, n, &ssd_list, list) { | |
12983 | ssd_gen_swlog(dev, SSD_LOG_POWER_OFF, 0); | |
12984 | ||
12985 | (void)ssd_flush(dev); | |
12986 | (void)ssd_save_md(dev); | |
12987 | ||
12988 | /* slave port ? */ | |
12989 | if (!dev->slave) { | |
12990 | ssd_save_smart(dev); | |
12991 | ||
12992 | ssd_stop_workq(dev); | |
12993 | ||
12994 | if (dev->reload_fw) { | |
da3355df | 12995 | dev->has_non_0x98_reg_access = 1; |
361ebed5 HSDT |
12996 | ssd_reg32_write(dev->ctrlp + SSD_RELOAD_FW_REG, SSD_RELOAD_FW); |
12997 | } | |
12998 | } | |
12999 | } | |
13000 | ||
13001 | return NOTIFY_OK; | |
13002 | } | |
13003 | ||
13004 | static struct notifier_block ssd_notifier = { | |
13005 | ssd_notify_reboot, NULL, 0 | |
13006 | }; | |
13007 | ||
13008 | static int __init ssd_init_module(void) | |
13009 | { | |
13010 | int ret = 0; | |
13011 | ||
13012 | hio_info("driver version: %s\n", DRIVER_VERSION); | |
13013 | ||
13014 | ret = ssd_init_index(); | |
13015 | if (ret) { | |
13016 | hio_warn("init index failed\n"); | |
13017 | goto out_init_index; | |
13018 | } | |
13019 | ||
13020 | ret = ssd_init_proc(); | |
13021 | if (ret) { | |
13022 | hio_warn("init proc failed\n"); | |
13023 | goto out_init_proc; | |
13024 | } | |
13025 | ||
13026 | ret = ssd_init_sysfs(); | |
13027 | if (ret) { | |
13028 | hio_warn("init sysfs failed\n"); | |
13029 | goto out_init_sysfs; | |
13030 | } | |
13031 | ||
13032 | ret = ssd_init_tasklet(); | |
13033 | if (ret) { | |
13034 | hio_warn("init tasklet failed\n"); | |
13035 | goto out_init_tasklet; | |
13036 | } | |
13037 | ||
13038 | #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,12)) | |
13039 | ssd_class = class_simple_create(THIS_MODULE, SSD_DEV_NAME); | |
13040 | #else | |
13041 | ssd_class = class_create(THIS_MODULE, SSD_DEV_NAME); | |
13042 | #endif | |
13043 | if (IS_ERR(ssd_class)) { | |
13044 | ret = PTR_ERR(ssd_class); | |
13045 | goto out_class_create; | |
13046 | } | |
13047 | ||
13048 | if (ssd_cmajor > 0) { | |
13049 | ret = register_chrdev(ssd_cmajor, SSD_CDEV_NAME, &ssd_cfops); | |
13050 | } else { | |
13051 | ret = ssd_cmajor = register_chrdev(ssd_cmajor, SSD_CDEV_NAME, &ssd_cfops); | |
13052 | } | |
13053 | if (ret < 0) { | |
13054 | hio_warn("unable to register chardev major number\n"); | |
13055 | goto out_register_chardev; | |
13056 | } | |
13057 | ||
13058 | if (ssd_major > 0) { | |
13059 | ret = register_blkdev(ssd_major, SSD_DEV_NAME); | |
13060 | } else { | |
13061 | ret = ssd_major = register_blkdev(ssd_major, SSD_DEV_NAME); | |
13062 | } | |
13063 | if (ret < 0) { | |
13064 | hio_warn("unable to register major number\n"); | |
13065 | goto out_register_blkdev; | |
13066 | } | |
13067 | ||
13068 | if (ssd_major_sl > 0) { | |
13069 | ret = register_blkdev(ssd_major_sl, SSD_SDEV_NAME); | |
13070 | } else { | |
13071 | ret = ssd_major_sl = register_blkdev(ssd_major_sl, SSD_SDEV_NAME); | |
13072 | } | |
13073 | if (ret < 0) { | |
13074 | hio_warn("unable to register slave major number\n"); | |
13075 | goto out_register_blkdev_sl; | |
13076 | } | |
13077 | ||
13078 | if (mode < SSD_DRV_MODE_STANDARD || mode > SSD_DRV_MODE_BASE) { | |
13079 | mode = SSD_DRV_MODE_STANDARD; | |
13080 | } | |
13081 | ||
13082 | /* for debug */ | |
13083 | if (mode != SSD_DRV_MODE_STANDARD) { | |
13084 | ssd_minors = 1; | |
13085 | } | |
13086 | ||
13087 | if (int_mode < SSD_INT_LEGACY || int_mode > SSD_INT_MSIX) { | |
13088 | int_mode = SSD_INT_MODE_DEFAULT; | |
13089 | } | |
13090 | ||
13091 | if (threaded_irq) { | |
13092 | int_mode = SSD_INT_MSI; | |
13093 | } | |
13094 | ||
13095 | if (log_level >= SSD_LOG_NR_LEVEL || log_level < SSD_LOG_LEVEL_INFO) { | |
13096 | log_level = SSD_LOG_LEVEL_ERR; | |
13097 | } | |
13098 | ||
13099 | if (wmode < SSD_WMODE_BUFFER || wmode > SSD_WMODE_DEFAULT) { | |
13100 | wmode = SSD_WMODE_DEFAULT; | |
13101 | } | |
13102 | ||
13103 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)) | |
13104 | ret = pci_module_init(&ssd_driver); | |
13105 | #else | |
13106 | ret = pci_register_driver(&ssd_driver); | |
13107 | #endif | |
13108 | if (ret) { | |
13109 | hio_warn("pci init failed\n"); | |
13110 | goto out_pci_init; | |
13111 | } | |
13112 | ||
13113 | ret = register_reboot_notifier(&ssd_notifier); | |
13114 | if (ret) { | |
13115 | hio_warn("register reboot notifier failed\n"); | |
13116 | goto out_register_reboot_notifier; | |
13117 | } | |
13118 | ||
13119 | return 0; | |
13120 | ||
13121 | out_register_reboot_notifier: | |
13122 | out_pci_init: | |
13123 | pci_unregister_driver(&ssd_driver); | |
13124 | unregister_blkdev(ssd_major_sl, SSD_SDEV_NAME); | |
13125 | out_register_blkdev_sl: | |
13126 | unregister_blkdev(ssd_major, SSD_DEV_NAME); | |
13127 | out_register_blkdev: | |
13128 | unregister_chrdev(ssd_cmajor, SSD_CDEV_NAME); | |
13129 | out_register_chardev: | |
13130 | #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,12)) | |
13131 | class_simple_destroy(ssd_class); | |
13132 | #else | |
13133 | class_destroy(ssd_class); | |
13134 | #endif | |
13135 | out_class_create: | |
13136 | ssd_cleanup_tasklet(); | |
13137 | out_init_tasklet: | |
13138 | ssd_cleanup_sysfs(); | |
13139 | out_init_sysfs: | |
13140 | ssd_cleanup_proc(); | |
13141 | out_init_proc: | |
13142 | ssd_cleanup_index(); | |
13143 | out_init_index: | |
13144 | return ret; | |
13145 | ||
13146 | } | |
13147 | ||
13148 | static void __exit ssd_cleanup_module(void) | |
13149 | { | |
13150 | ||
13151 | hio_info("unload driver: %s\n", DRIVER_VERSION); | |
13152 | /* exiting */ | |
13153 | ssd_exiting = 1; | |
13154 | ||
13155 | unregister_reboot_notifier(&ssd_notifier); | |
13156 | ||
13157 | pci_unregister_driver(&ssd_driver); | |
13158 | ||
13159 | unregister_blkdev(ssd_major_sl, SSD_SDEV_NAME); | |
13160 | unregister_blkdev(ssd_major, SSD_DEV_NAME); | |
13161 | unregister_chrdev(ssd_cmajor, SSD_CDEV_NAME); | |
13162 | #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,12)) | |
13163 | class_simple_destroy(ssd_class); | |
13164 | #else | |
13165 | class_destroy(ssd_class); | |
13166 | #endif | |
13167 | ||
13168 | ssd_cleanup_tasklet(); | |
13169 | ssd_cleanup_sysfs(); | |
13170 | ssd_cleanup_proc(); | |
13171 | ssd_cleanup_index(); | |
13172 | } | |
13173 | ||
13174 | int ssd_register_event_notifier(struct block_device *bdev, ssd_event_call event_call) | |
13175 | { | |
13176 | struct ssd_device *dev; | |
1197134c | 13177 | struct ssd_log *le, *temp_le = NULL; |
361ebed5 | 13178 | uint64_t cur; |
1197134c | 13179 | int temp = 0; |
361ebed5 HSDT |
13180 | int log_nr; |
13181 | ||
13182 | if (!bdev || !event_call || !(bdev->bd_disk)) { | |
13183 | return -EINVAL; | |
13184 | } | |
13185 | ||
13186 | dev = bdev->bd_disk->private_data; | |
13187 | dev->event_call = event_call; | |
13188 | ||
57e45d44 | 13189 | cur = (uint64_t)ktime_get_real_seconds(); |
361ebed5 HSDT |
13190 | |
13191 | le = (struct ssd_log *)(dev->internal_log.log); | |
13192 | log_nr = dev->internal_log.nr_log; | |
13193 | ||
13194 | while (log_nr--) { | |
13195 | if (le->time <= cur && le->time >= dev->uptime) { | |
1197134c KM |
13196 | if ((le->le.event == SSD_LOG_SEU_FAULT1) && (le->time < dev->reset_time)) { |
13197 | le++; | |
13198 | continue; | |
13199 | } | |
13200 | if (le->le.event == SSD_LOG_OVER_TEMP || le->le.event == SSD_LOG_NORMAL_TEMP || le->le.event == SSD_LOG_WARN_TEMP) { | |
13201 | if (!temp_le || le->time >= temp_le->time) { | |
13202 | temp_le = le; | |
13203 | } | |
13204 | le++; | |
13205 | continue; | |
13206 | } | |
361ebed5 HSDT |
13207 | (void)dev->event_call(dev->gd, le->le.event, ssd_parse_log(dev, le, 0)); |
13208 | } | |
13209 | le++; | |
13210 | } | |
13211 | ||
1197134c KM |
13212 | ssd_get_temperature(bdev, &temp); |
13213 | if (temp_le && (temp >= SSD_OT_TEMP_HYST)) { | |
13214 | (void)dev->event_call(dev->gd, temp_le->le.event, ssd_parse_log(dev, temp_le, 0)); | |
13215 | } | |
13216 | ||
361ebed5 HSDT |
13217 | return 0; |
13218 | } | |
13219 | ||
13220 | int ssd_unregister_event_notifier(struct block_device *bdev) | |
13221 | { | |
13222 | struct ssd_device *dev; | |
13223 | ||
13224 | if (!bdev || !(bdev->bd_disk)) { | |
13225 | return -EINVAL; | |
13226 | } | |
13227 | ||
13228 | dev = bdev->bd_disk->private_data; | |
13229 | dev->event_call = NULL; | |
13230 | ||
13231 | return 0; | |
13232 | } | |
13233 | ||
13234 | EXPORT_SYMBOL(ssd_get_label); | |
13235 | EXPORT_SYMBOL(ssd_get_version); | |
13236 | EXPORT_SYMBOL(ssd_set_otprotect); | |
13237 | EXPORT_SYMBOL(ssd_bm_status); | |
13238 | EXPORT_SYMBOL(ssd_submit_pbio); | |
13239 | EXPORT_SYMBOL(ssd_get_pciaddr); | |
13240 | EXPORT_SYMBOL(ssd_get_temperature); | |
13241 | EXPORT_SYMBOL(ssd_register_event_notifier); | |
13242 | EXPORT_SYMBOL(ssd_unregister_event_notifier); | |
13243 | EXPORT_SYMBOL(ssd_reset); | |
13244 | EXPORT_SYMBOL(ssd_set_wmode); | |
13245 | ||
13246 | ||
13247 | ||
13248 | module_init(ssd_init_module); | |
13249 | module_exit(ssd_cleanup_module); | |
13250 | MODULE_VERSION(DRIVER_VERSION); | |
13251 | MODULE_LICENSE("GPL"); | |
13252 | MODULE_AUTHOR("Huawei SSD DEV Team"); | |
13253 | MODULE_DESCRIPTION("Huawei SSD driver"); |