]>
Commit | Line | Data |
---|---|---|
361ebed5 HSDT |
1 | /* |
2 | * Huawei SSD device driver | |
3 | * Copyright (c) 2016, Huawei Technologies Co., Ltd. | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify it | |
6 | * under the terms and conditions of the GNU General Public License, | |
7 | * version 2, as published by the Free Software Foundation. | |
8 | * | |
9 | * This program is distributed in the hope it will be useful, but WITHOUT | |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
12 | * more details. | |
13 | */ | |
b44043bd | 14 | |
361ebed5 HSDT |
15 | #ifndef LINUX_VERSION_CODE |
16 | #include <linux/version.h> | |
17 | #endif | |
18 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16)) | |
19 | #include <linux/config.h> | |
20 | #endif | |
21 | #include <linux/types.h> | |
22 | #include <linux/kernel.h> | |
23 | #include <linux/module.h> | |
24 | #include <linux/bio.h> | |
25 | #include <linux/timer.h> | |
26 | #include <linux/init.h> | |
27 | #include <linux/pci.h> | |
28 | #include <linux/slab.h> | |
29 | #include <linux/spinlock.h> | |
30 | #include <linux/blkdev.h> | |
31 | #include <linux/sched.h> | |
32 | #include <linux/fcntl.h> | |
33 | #include <linux/interrupt.h> | |
34 | #include <linux/compiler.h> | |
35 | #include <linux/bitops.h> | |
36 | #include <linux/delay.h> | |
37 | #include <linux/time.h> | |
38 | #include <linux/stat.h> | |
39 | #include <linux/fs.h> | |
40 | #include <linux/dma-mapping.h> | |
41 | #include <linux/completion.h> | |
42 | #include <linux/workqueue.h> | |
43 | #include <linux/mm.h> | |
44 | #include <linux/ioctl.h> | |
45 | #include <linux/hdreg.h> /* HDIO_GETGEO */ | |
46 | #include <linux/list.h> | |
47 | #include <linux/reboot.h> | |
48 | #include <linux/kthread.h> | |
49 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0)) | |
50 | #include <linux/seq_file.h> | |
51 | #endif | |
52 | #include <asm/uaccess.h> | |
53 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,2,0)) | |
54 | #include <linux/scatterlist.h> | |
55 | #include <linux/vmalloc.h> | |
56 | #else | |
57 | #include <asm/scatterlist.h> | |
58 | #endif | |
59 | #include <asm/io.h> | |
60 | #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,17)) | |
61 | #include <linux/devfs_fs_kernel.h> | |
62 | #endif | |
9d793ee8 PP |
63 | #if (LINUX_VERSION_CODE > KERNEL_VERSION(5,6,0)) |
64 | #include <linux/part_stat.h> | |
65 | #endif | |
361ebed5 HSDT |
66 | |
67 | /* driver */ | |
68 | #define MODULE_NAME "hio" | |
da3355df | 69 | #define DRIVER_VERSION "2.1.0.40" |
361ebed5 HSDT |
70 | #define DRIVER_VERSION_LEN 16 |
71 | ||
72 | #define SSD_FW_MIN 0x1 | |
73 | ||
74 | #define SSD_DEV_NAME MODULE_NAME | |
75 | #define SSD_DEV_NAME_LEN 16 | |
76 | #define SSD_CDEV_NAME "c"SSD_DEV_NAME | |
77 | #define SSD_SDEV_NAME "s"SSD_DEV_NAME | |
78 | ||
79 | ||
80 | #define SSD_CMAJOR 0 | |
81 | #define SSD_MAJOR 0 | |
82 | #define SSD_MAJOR_SL 0 | |
83 | #define SSD_MINORS 16 | |
84 | ||
85 | #define SSD_MAX_DEV 702 | |
86 | #define SSD_ALPHABET_NUM 26 | |
87 | ||
88 | #define hio_info(f, arg...) printk(KERN_INFO MODULE_NAME"info: " f , ## arg) | |
89 | #define hio_note(f, arg...) printk(KERN_NOTICE MODULE_NAME"note: " f , ## arg) | |
90 | #define hio_warn(f, arg...) printk(KERN_WARNING MODULE_NAME"warn: " f , ## arg) | |
91 | #define hio_err(f, arg...) printk(KERN_ERR MODULE_NAME"err: " f , ## arg) | |
92 | ||
6dec1b12 | 93 | #if (LINUX_VERSION_CODE > KERNEL_VERSION(5,6,0)) && (LINUX_VERSION_CODE < KERNEL_VERSION(5,11,0)) |
b6e5398a PP |
94 | struct hd_struct *disk_map_sector_rcu(struct gendisk *disk, sector_t sector); |
95 | #endif | |
96 | ||
361ebed5 HSDT |
97 | /* slave port */ |
98 | #define SSD_SLAVE_PORT_DEVID 0x000a | |
99 | ||
100 | /* int mode */ | |
101 | ||
102 | /* 2.6.9 msi affinity bug, should turn msi & msi-x off */ | |
103 | //#define SSD_MSI | |
104 | #define SSD_ESCAPE_IRQ | |
105 | ||
106 | //#define SSD_MSIX | |
107 | #ifndef MODULE | |
108 | #define SSD_MSIX | |
109 | #endif | |
110 | #define SSD_MSIX_VEC 8 | |
111 | #ifdef SSD_MSIX | |
112 | #undef SSD_MSI | |
da3355df | 113 | #undef SSD_ESCAPE_IRQ |
361ebed5 HSDT |
114 | #define SSD_MSIX_AFFINITY_FORCE |
115 | #endif | |
116 | ||
117 | #define SSD_TRIM | |
118 | ||
119 | /* Over temperature protect */ | |
120 | #define SSD_OT_PROTECT | |
121 | ||
122 | #ifdef SSD_QUEUE_PBIO | |
123 | #define BIO_SSD_PBIO 20 | |
124 | #endif | |
125 | ||
126 | /* debug */ | |
127 | //#define SSD_DEBUG_ERR | |
128 | ||
129 | /* cmd timer */ | |
130 | #define SSD_CMD_TIMEOUT (60*HZ) | |
131 | ||
132 | /* i2c & smbus */ | |
133 | #define SSD_SPI_TIMEOUT (5*HZ) | |
134 | #define SSD_I2C_TIMEOUT (5*HZ) | |
135 | ||
136 | #define SSD_I2C_MAX_DATA (127) | |
137 | #define SSD_SMBUS_BLOCK_MAX (32) | |
138 | #define SSD_SMBUS_DATA_MAX (SSD_SMBUS_BLOCK_MAX + 2) | |
139 | ||
140 | /* wait for init */ | |
141 | #define SSD_INIT_WAIT (1000) //1s | |
142 | #define SSD_CONTROLLER_WAIT (20*1000/SSD_INIT_WAIT) //20s | |
143 | #define SSD_INIT_MAX_WAIT (500*1000/SSD_INIT_WAIT) //500s | |
144 | #define SSD_INIT_MAX_WAIT_V3_2 (1400*1000/SSD_INIT_WAIT) //1400s | |
145 | #define SSD_RAM_INIT_MAX_WAIT (10*1000/SSD_INIT_WAIT) //10s | |
146 | #define SSD_CH_INFO_MAX_WAIT (10*1000/SSD_INIT_WAIT) //10s | |
147 | ||
148 | /* blkdev busy wait */ | |
149 | #define SSD_DEV_BUSY_WAIT 1000 //ms | |
150 | #define SSD_DEV_BUSY_MAX_WAIT (8*1000/SSD_DEV_BUSY_WAIT) //8s | |
151 | ||
152 | /* smbus retry */ | |
153 | #define SSD_SMBUS_RETRY_INTERVAL (5) //ms | |
154 | #define SSD_SMBUS_RETRY_MAX (1000/SSD_SMBUS_RETRY_INTERVAL) | |
155 | ||
156 | #define SSD_BM_RETRY_MAX 7 | |
157 | ||
158 | /* bm routine interval */ | |
159 | #define SSD_BM_CAP_LEARNING_DELAY (10*60*1000) | |
160 | ||
161 | /* routine interval */ | |
162 | #define SSD_ROUTINE_INTERVAL (10*1000) //10s | |
163 | #define SSD_HWMON_ROUTINE_TICK (60*1000/SSD_ROUTINE_INTERVAL) | |
164 | #define SSD_CAPMON_ROUTINE_TICK ((3600*1000/SSD_ROUTINE_INTERVAL)*24*30) | |
165 | #define SSD_CAPMON2_ROUTINE_TICK (10*60*1000/SSD_ROUTINE_INTERVAL) //fault recover | |
166 | ||
167 | /* dma align */ | |
168 | #define SSD_DMA_ALIGN (16) | |
169 | ||
170 | /* some hw defalut */ | |
171 | #define SSD_LOG_MAX_SZ 4096 | |
172 | ||
173 | #define SSD_NAND_OOB_SZ 1024 | |
174 | #define SSD_NAND_ID_SZ 8 | |
175 | #define SSD_NAND_ID_BUFF_SZ 1024 | |
176 | #define SSD_NAND_MAX_CE 2 | |
177 | ||
178 | #define SSD_BBT_RESERVED 8 | |
179 | ||
180 | #define SSD_ECC_MAX_FLIP (64+1) | |
181 | ||
182 | #define SSD_RAM_ALIGN 16 | |
183 | ||
184 | ||
185 | #define SSD_RELOAD_FLAG 0x3333CCCC | |
186 | #define SSD_RELOAD_FW 0xAA5555AA | |
187 | #define SSD_RESET_NOINIT 0xAA5555AA | |
188 | #define SSD_RESET 0x55AAAA55 | |
189 | #define SSD_RESET_FULL 0x5A | |
190 | //#define SSD_RESET_WAIT 1000 //1s | |
191 | //#define SSD_RESET_MAX_WAIT (200*1000/SSD_RESET_WAIT) //200s | |
192 | ||
193 | ||
194 | /* reverion 1 */ | |
195 | #define SSD_PROTOCOL_V1 0x0 | |
196 | ||
197 | #define SSD_ROM_SIZE (16*1024*1024) | |
198 | #define SSD_ROM_BLK_SIZE (256*1024) | |
199 | #define SSD_ROM_PAGE_SIZE (256) | |
200 | #define SSD_ROM_NR_BRIDGE_FW 2 | |
201 | #define SSD_ROM_NR_CTRL_FW 2 | |
202 | #define SSD_ROM_BRIDGE_FW_BASE 0 | |
203 | #define SSD_ROM_BRIDGE_FW_SIZE (2*1024*1024) | |
204 | #define SSD_ROM_CTRL_FW_BASE (SSD_ROM_NR_BRIDGE_FW*SSD_ROM_BRIDGE_FW_SIZE) | |
205 | #define SSD_ROM_CTRL_FW_SIZE (5*1024*1024) | |
206 | #define SSD_ROM_LABEL_BASE (SSD_ROM_CTRL_FW_BASE+SSD_ROM_CTRL_FW_SIZE*SSD_ROM_NR_CTRL_FW) | |
207 | #define SSD_ROM_VP_BASE (SSD_ROM_LABEL_BASE+SSD_ROM_BLK_SIZE) | |
208 | ||
209 | /* reverion 3 */ | |
210 | #define SSD_PROTOCOL_V3 0x3000000 | |
211 | #define SSD_PROTOCOL_V3_1_1 0x3010001 | |
212 | #define SSD_PROTOCOL_V3_1_3 0x3010003 | |
213 | #define SSD_PROTOCOL_V3_2 0x3020000 | |
214 | #define SSD_PROTOCOL_V3_2_1 0x3020001 /* <4KB improved */ | |
215 | #define SSD_PROTOCOL_V3_2_2 0x3020002 /* ot protect */ | |
216 | #define SSD_PROTOCOL_V3_2_4 0x3020004 | |
217 | ||
218 | ||
219 | #define SSD_PV3_ROM_NR_BM_FW 1 | |
220 | #define SSD_PV3_ROM_BM_FW_SZ (64*1024*8) | |
221 | ||
222 | #define SSD_ROM_LOG_SZ (64*1024*4) | |
223 | ||
224 | #define SSD_ROM_NR_SMART_MAX 2 | |
225 | #define SSD_PV3_ROM_NR_SMART SSD_ROM_NR_SMART_MAX | |
226 | #define SSD_PV3_ROM_SMART_SZ (64*1024) | |
227 | ||
228 | /* reverion 3.2 */ | |
229 | #define SSD_PV3_2_ROM_LOG_SZ (64*1024*80) /* 5MB */ | |
230 | #define SSD_PV3_2_ROM_SEC_SZ (256*1024) /* 256KB */ | |
231 | ||
232 | ||
233 | /* register */ | |
234 | #define SSD_REQ_FIFO_REG 0x0000 | |
235 | #define SSD_RESP_FIFO_REG 0x0008 //0x0010 | |
236 | #define SSD_RESP_PTR_REG 0x0010 //0x0018 | |
237 | #define SSD_INTR_INTERVAL_REG 0x0018 | |
238 | #define SSD_READY_REG 0x001C | |
239 | #define SSD_BRIDGE_TEST_REG 0x0020 | |
240 | #define SSD_STRIPE_SIZE_REG 0x0028 | |
241 | #define SSD_CTRL_VER_REG 0x0030 //controller | |
242 | #define SSD_BRIDGE_VER_REG 0x0034 //bridge | |
243 | #define SSD_PCB_VER_REG 0x0038 | |
244 | #define SSD_BURN_FLAG_REG 0x0040 | |
245 | #define SSD_BRIDGE_INFO_REG 0x0044 | |
246 | ||
247 | #define SSD_WL_VAL_REG 0x0048 //32-bit | |
248 | ||
249 | #define SSD_BB_INFO_REG 0x004C | |
250 | ||
251 | #define SSD_ECC_TEST_REG 0x0050 //test only | |
252 | #define SSD_ERASE_TEST_REG 0x0058 //test only | |
253 | #define SSD_WRITE_TEST_REG 0x0060 //test only | |
254 | ||
255 | #define SSD_RESET_REG 0x0068 | |
256 | #define SSD_RELOAD_FW_REG 0x0070 | |
257 | ||
258 | #define SSD_RESERVED_BLKS_REG 0x0074 | |
259 | #define SSD_VALID_PAGES_REG 0x0078 | |
260 | #define SSD_CH_INFO_REG 0x007C | |
261 | ||
262 | #define SSD_CTRL_TEST_REG_SZ 0x8 | |
263 | #define SSD_CTRL_TEST_REG0 0x0080 | |
264 | #define SSD_CTRL_TEST_REG1 0x0088 | |
265 | #define SSD_CTRL_TEST_REG2 0x0090 | |
266 | #define SSD_CTRL_TEST_REG3 0x0098 | |
267 | #define SSD_CTRL_TEST_REG4 0x00A0 | |
268 | #define SSD_CTRL_TEST_REG5 0x00A8 | |
269 | #define SSD_CTRL_TEST_REG6 0x00B0 | |
270 | #define SSD_CTRL_TEST_REG7 0x00B8 | |
271 | ||
272 | #define SSD_FLASH_INFO_REG0 0x00C0 | |
273 | #define SSD_FLASH_INFO_REG1 0x00C8 | |
274 | #define SSD_FLASH_INFO_REG2 0x00D0 | |
275 | #define SSD_FLASH_INFO_REG3 0x00D8 | |
276 | #define SSD_FLASH_INFO_REG4 0x00E0 | |
277 | #define SSD_FLASH_INFO_REG5 0x00E8 | |
278 | #define SSD_FLASH_INFO_REG6 0x00F0 | |
279 | #define SSD_FLASH_INFO_REG7 0x00F8 | |
280 | ||
281 | #define SSD_RESP_INFO_REG 0x01B8 | |
282 | #define SSD_NAND_BUFF_BASE 0x01BC //for nand write | |
283 | ||
284 | #define SSD_CHIP_INFO_REG_SZ 0x10 | |
285 | #define SSD_CHIP_INFO_REG0 0x0100 //128 bit | |
286 | #define SSD_CHIP_INFO_REG1 0x0110 | |
287 | #define SSD_CHIP_INFO_REG2 0x0120 | |
288 | #define SSD_CHIP_INFO_REG3 0x0130 | |
289 | #define SSD_CHIP_INFO_REG4 0x0140 | |
290 | #define SSD_CHIP_INFO_REG5 0x0150 | |
291 | #define SSD_CHIP_INFO_REG6 0x0160 | |
292 | #define SSD_CHIP_INFO_REG7 0x0170 | |
293 | ||
294 | #define SSD_RAM_INFO_REG 0x01C4 | |
295 | ||
296 | #define SSD_BBT_BASE_REG 0x01C8 | |
297 | #define SSD_ECT_BASE_REG 0x01CC | |
298 | ||
299 | #define SSD_CLEAR_INTR_REG 0x01F0 | |
300 | ||
301 | #define SSD_INIT_STATE_REG_SZ 0x8 | |
302 | #define SSD_INIT_STATE_REG0 0x0200 | |
303 | #define SSD_INIT_STATE_REG1 0x0208 | |
304 | #define SSD_INIT_STATE_REG2 0x0210 | |
305 | #define SSD_INIT_STATE_REG3 0x0218 | |
306 | #define SSD_INIT_STATE_REG4 0x0220 | |
307 | #define SSD_INIT_STATE_REG5 0x0228 | |
308 | #define SSD_INIT_STATE_REG6 0x0230 | |
309 | #define SSD_INIT_STATE_REG7 0x0238 | |
310 | ||
311 | #define SSD_ROM_INFO_REG 0x0600 | |
312 | #define SSD_ROM_BRIDGE_FW_INFO_REG 0x0604 | |
313 | #define SSD_ROM_CTRL_FW_INFO_REG 0x0608 | |
314 | #define SSD_ROM_VP_INFO_REG 0x060C | |
315 | ||
316 | #define SSD_LOG_INFO_REG 0x0610 | |
317 | #define SSD_LED_REG 0x0614 | |
318 | #define SSD_MSG_BASE_REG 0x06F8 | |
319 | ||
320 | /*spi reg */ | |
321 | #define SSD_SPI_REG_CMD 0x0180 | |
322 | #define SSD_SPI_REG_CMD_HI 0x0184 | |
323 | #define SSD_SPI_REG_WDATA 0x0188 | |
324 | #define SSD_SPI_REG_ID 0x0190 | |
325 | #define SSD_SPI_REG_STATUS 0x0198 | |
326 | #define SSD_SPI_REG_RDATA 0x01A0 | |
327 | #define SSD_SPI_REG_READY 0x01A8 | |
328 | ||
329 | /* i2c register */ | |
330 | #define SSD_I2C_CTRL_REG 0x06F0 | |
331 | #define SSD_I2C_RDATA_REG 0x06F4 | |
332 | ||
333 | /* temperature reg */ | |
334 | #define SSD_BRIGE_TEMP_REG 0x0618 | |
335 | ||
336 | #define SSD_CTRL_TEMP_REG0 0x0700 | |
337 | #define SSD_CTRL_TEMP_REG1 0x0708 | |
338 | #define SSD_CTRL_TEMP_REG2 0x0710 | |
339 | #define SSD_CTRL_TEMP_REG3 0x0718 | |
340 | #define SSD_CTRL_TEMP_REG4 0x0720 | |
341 | #define SSD_CTRL_TEMP_REG5 0x0728 | |
342 | #define SSD_CTRL_TEMP_REG6 0x0730 | |
343 | #define SSD_CTRL_TEMP_REG7 0x0738 | |
344 | ||
345 | /* reversion 3 reg */ | |
346 | #define SSD_PROTOCOL_VER_REG 0x01B4 | |
347 | ||
348 | #define SSD_FLUSH_TIMEOUT_REG 0x02A4 | |
349 | #define SSD_BM_FAULT_REG 0x0660 | |
350 | ||
351 | #define SSD_PV3_RAM_STATUS_REG_SZ 0x4 | |
352 | #define SSD_PV3_RAM_STATUS_REG0 0x0260 | |
353 | #define SSD_PV3_RAM_STATUS_REG1 0x0264 | |
354 | #define SSD_PV3_RAM_STATUS_REG2 0x0268 | |
355 | #define SSD_PV3_RAM_STATUS_REG3 0x026C | |
356 | #define SSD_PV3_RAM_STATUS_REG4 0x0270 | |
357 | #define SSD_PV3_RAM_STATUS_REG5 0x0274 | |
358 | #define SSD_PV3_RAM_STATUS_REG6 0x0278 | |
359 | #define SSD_PV3_RAM_STATUS_REG7 0x027C | |
360 | ||
361 | #define SSD_PV3_CHIP_INFO_REG_SZ 0x40 | |
362 | #define SSD_PV3_CHIP_INFO_REG0 0x0300 | |
363 | #define SSD_PV3_CHIP_INFO_REG1 0x0340 | |
364 | #define SSD_PV3_CHIP_INFO_REG2 0x0380 | |
365 | #define SSD_PV3_CHIP_INFO_REG3 0x03B0 | |
366 | #define SSD_PV3_CHIP_INFO_REG4 0x0400 | |
367 | #define SSD_PV3_CHIP_INFO_REG5 0x0440 | |
368 | #define SSD_PV3_CHIP_INFO_REG6 0x0480 | |
369 | #define SSD_PV3_CHIP_INFO_REG7 0x04B0 | |
370 | ||
371 | #define SSD_PV3_INIT_STATE_REG_SZ 0x20 | |
372 | #define SSD_PV3_INIT_STATE_REG0 0x0500 | |
373 | #define SSD_PV3_INIT_STATE_REG1 0x0520 | |
374 | #define SSD_PV3_INIT_STATE_REG2 0x0540 | |
375 | #define SSD_PV3_INIT_STATE_REG3 0x0560 | |
376 | #define SSD_PV3_INIT_STATE_REG4 0x0580 | |
377 | #define SSD_PV3_INIT_STATE_REG5 0x05A0 | |
378 | #define SSD_PV3_INIT_STATE_REG6 0x05C0 | |
379 | #define SSD_PV3_INIT_STATE_REG7 0x05E0 | |
380 | ||
381 | /* reversion 3.1.1 reg */ | |
382 | #define SSD_FULL_RESET_REG 0x01B0 | |
383 | ||
384 | #define SSD_CTRL_REG_ZONE_SZ 0x800 | |
385 | ||
386 | #define SSD_BB_THRESHOLD_L1_REG 0x2C0 | |
387 | #define SSD_BB_THRESHOLD_L2_REG 0x2C4 | |
388 | ||
389 | #define SSD_BB_ACC_REG_SZ 0x4 | |
390 | #define SSD_BB_ACC_REG0 0x21C0 | |
391 | #define SSD_BB_ACC_REG1 0x29C0 | |
392 | #define SSD_BB_ACC_REG2 0x31C0 | |
393 | ||
394 | #define SSD_EC_THRESHOLD_L1_REG 0x2C8 | |
395 | #define SSD_EC_THRESHOLD_L2_REG 0x2CC | |
396 | ||
397 | #define SSD_EC_ACC_REG_SZ 0x4 | |
398 | #define SSD_EC_ACC_REG0 0x21E0 | |
399 | #define SSD_EC_ACC_REG1 0x29E0 | |
400 | #define SSD_EC_ACC_REG2 0x31E0 | |
401 | ||
402 | /* reversion 3.1.2 & 3.1.3 reg */ | |
403 | #define SSD_HW_STATUS_REG 0x02AC | |
404 | ||
405 | #define SSD_PLP_INFO_REG 0x0664 | |
406 | ||
407 | /*reversion 3.2 reg*/ | |
408 | #define SSD_POWER_ON_REG 0x01EC | |
409 | #define SSD_PCIE_LINKSTATUS_REG 0x01F8 | |
410 | #define SSD_PL_CAP_LEARN_REG 0x01FC | |
411 | ||
412 | #define SSD_FPGA_1V0_REG0 0x2070 | |
413 | #define SSD_FPGA_1V8_REG0 0x2078 | |
414 | #define SSD_FPGA_1V0_REG1 0x2870 | |
415 | #define SSD_FPGA_1V8_REG1 0x2878 | |
416 | ||
417 | /*reversion 3.2 reg*/ | |
418 | #define SSD_READ_OT_REG0 0x2260 | |
419 | #define SSD_WRITE_OT_REG0 0x2264 | |
420 | #define SSD_READ_OT_REG1 0x2A60 | |
421 | #define SSD_WRITE_OT_REG1 0x2A64 | |
422 | ||
423 | ||
424 | /* function */ | |
425 | #define SSD_FUNC_READ 0x01 | |
426 | #define SSD_FUNC_WRITE 0x02 | |
427 | #define SSD_FUNC_NAND_READ_WOOB 0x03 | |
428 | #define SSD_FUNC_NAND_READ 0x04 | |
429 | #define SSD_FUNC_NAND_WRITE 0x05 | |
430 | #define SSD_FUNC_NAND_ERASE 0x06 | |
431 | #define SSD_FUNC_NAND_READ_ID 0x07 | |
432 | #define SSD_FUNC_READ_LOG 0x08 | |
433 | #define SSD_FUNC_TRIM 0x09 | |
434 | #define SSD_FUNC_RAM_READ 0x10 | |
435 | #define SSD_FUNC_RAM_WRITE 0x11 | |
436 | #define SSD_FUNC_FLUSH 0x12 //cache / bbt | |
437 | ||
438 | /* spi function */ | |
439 | #define SSD_SPI_CMD_PROGRAM 0x02 | |
440 | #define SSD_SPI_CMD_READ 0x03 | |
441 | #define SSD_SPI_CMD_W_DISABLE 0x04 | |
442 | #define SSD_SPI_CMD_READ_STATUS 0x05 | |
443 | #define SSD_SPI_CMD_W_ENABLE 0x06 | |
444 | #define SSD_SPI_CMD_ERASE 0xd8 | |
445 | #define SSD_SPI_CMD_CLSR 0x30 | |
446 | #define SSD_SPI_CMD_READ_ID 0x9f | |
447 | ||
448 | /* i2c */ | |
449 | #define SSD_I2C_CTRL_READ 0x00 | |
450 | #define SSD_I2C_CTRL_WRITE 0x01 | |
451 | ||
452 | /* i2c internal register */ | |
453 | #define SSD_I2C_CFG_REG 0x00 | |
454 | #define SSD_I2C_DATA_REG 0x01 | |
455 | #define SSD_I2C_CMD_REG 0x02 | |
456 | #define SSD_I2C_STATUS_REG 0x03 | |
457 | #define SSD_I2C_SADDR_REG 0x04 | |
458 | #define SSD_I2C_LEN_REG 0x05 | |
459 | #define SSD_I2C_RLEN_REG 0x06 | |
460 | #define SSD_I2C_WLEN_REG 0x07 | |
461 | #define SSD_I2C_RESET_REG 0x08 //write for reset | |
462 | #define SSD_I2C_PRER_REG 0x09 | |
463 | ||
464 | ||
465 | /* hw mon */ | |
466 | /* FPGA volt = ADC_value / 4096 * 3v */ | |
467 | #define SSD_FPGA_1V0_ADC_MIN 1228 // 0.9v | |
468 | #define SSD_FPGA_1V0_ADC_MAX 1502 // 1.1v | |
469 | #define SSD_FPGA_1V8_ADC_MIN 2211 // 1.62v | |
470 | #define SSD_FPGA_1V8_ADC_MAX 2703 // 1.98 | |
471 | ||
472 | /* ADC value */ | |
473 | #define SSD_FPGA_VOLT_MAX(val) (((val) & 0xffff) >> 4) | |
474 | #define SSD_FPGA_VOLT_MIN(val) (((val >> 16) & 0xffff) >> 4) | |
475 | #define SSD_FPGA_VOLT_CUR(val) (((val >> 32) & 0xffff) >> 4) | |
476 | #define SSD_FPGA_VOLT(val) ((val * 3000) >> 12) | |
477 | ||
478 | #define SSD_VOLT_LOG_DATA(idx, ctrl, volt) (((uint32_t)idx << 24) | ((uint32_t)ctrl << 16) | ((uint32_t)volt)) | |
479 | ||
480 | enum ssd_fpga_volt | |
481 | { | |
482 | SSD_FPGA_1V0 = 0, | |
483 | SSD_FPGA_1V8, | |
484 | SSD_FPGA_VOLT_NR | |
485 | }; | |
486 | ||
487 | enum ssd_clock | |
488 | { | |
489 | SSD_CLOCK_166M_LOST = 0, | |
490 | SSD_CLOCK_166M_SKEW, | |
491 | SSD_CLOCK_156M_LOST, | |
492 | SSD_CLOCK_156M_SKEW, | |
493 | SSD_CLOCK_NR | |
494 | }; | |
495 | ||
496 | /* sensor */ | |
497 | #define SSD_SENSOR_LM75_SADDRESS (0x49 << 1) | |
498 | #define SSD_SENSOR_LM80_SADDRESS (0x28 << 1) | |
499 | ||
500 | #define SSD_SENSOR_CONVERT_TEMP(val) ((int)(val >> 8)) | |
501 | ||
502 | #define SSD_INLET_OT_TEMP (55) //55 DegC | |
503 | #define SSD_INLET_OT_HYST (50) //50 DegC | |
504 | #define SSD_FLASH_OT_TEMP (70) //70 DegC | |
505 | #define SSD_FLASH_OT_HYST (65) //65 DegC | |
506 | ||
507 | enum ssd_sensor | |
508 | { | |
509 | SSD_SENSOR_LM80 = 0, | |
510 | SSD_SENSOR_LM75, | |
511 | SSD_SENSOR_NR | |
512 | }; | |
513 | ||
514 | ||
515 | /* lm75 */ | |
516 | enum ssd_lm75_reg | |
517 | { | |
518 | SSD_LM75_REG_TEMP = 0, | |
519 | SSD_LM75_REG_CONF, | |
520 | SSD_LM75_REG_THYST, | |
521 | SSD_LM75_REG_TOS | |
522 | }; | |
523 | ||
524 | /* lm96080 */ | |
525 | #define SSD_LM80_REG_IN_MAX(nr) (0x2a + (nr) * 2) | |
526 | #define SSD_LM80_REG_IN_MIN(nr) (0x2b + (nr) * 2) | |
527 | #define SSD_LM80_REG_IN(nr) (0x20 + (nr)) | |
528 | ||
529 | #define SSD_LM80_REG_FAN1 0x28 | |
530 | #define SSD_LM80_REG_FAN2 0x29 | |
531 | #define SSD_LM80_REG_FAN_MIN(nr) (0x3b + (nr)) | |
532 | ||
533 | #define SSD_LM80_REG_TEMP 0x27 | |
534 | #define SSD_LM80_REG_TEMP_HOT_MAX 0x38 | |
535 | #define SSD_LM80_REG_TEMP_HOT_HYST 0x39 | |
536 | #define SSD_LM80_REG_TEMP_OS_MAX 0x3a | |
537 | #define SSD_LM80_REG_TEMP_OS_HYST 0x3b | |
538 | ||
539 | #define SSD_LM80_REG_CONFIG 0x00 | |
540 | #define SSD_LM80_REG_ALARM1 0x01 | |
541 | #define SSD_LM80_REG_ALARM2 0x02 | |
542 | #define SSD_LM80_REG_MASK1 0x03 | |
543 | #define SSD_LM80_REG_MASK2 0x04 | |
544 | #define SSD_LM80_REG_FANDIV 0x05 | |
545 | #define SSD_LM80_REG_RES 0x06 | |
546 | ||
547 | #define SSD_LM80_CONVERT_VOLT(val) ((val * 10) >> 8) | |
548 | ||
549 | #define SSD_LM80_3V3_VOLT(val) ((val)*33/19) | |
550 | ||
551 | #define SSD_LM80_CONV_INTERVAL (1000) | |
552 | ||
553 | enum ssd_lm80_in | |
554 | { | |
555 | SSD_LM80_IN_CAP = 0, | |
556 | SSD_LM80_IN_1V2, | |
557 | SSD_LM80_IN_1V2a, | |
558 | SSD_LM80_IN_1V5, | |
559 | SSD_LM80_IN_1V8, | |
560 | SSD_LM80_IN_FPGA_3V3, | |
561 | SSD_LM80_IN_3V3, | |
562 | SSD_LM80_IN_NR | |
563 | }; | |
564 | ||
565 | struct ssd_lm80_limit | |
566 | { | |
567 | uint8_t low; | |
568 | uint8_t high; | |
569 | }; | |
570 | ||
571 | /* +/- 5% except cap in*/ | |
572 | static struct ssd_lm80_limit ssd_lm80_limit[SSD_LM80_IN_NR] = { | |
573 | {171, 217}, /* CAP in: 1710 ~ 2170 */ | |
574 | {114, 126}, | |
575 | {114, 126}, | |
576 | {142, 158}, | |
577 | {171, 189}, | |
578 | {180, 200}, | |
579 | {180, 200}, | |
580 | }; | |
581 | ||
582 | /* temperature sensors */ | |
583 | enum ssd_temp_sensor | |
584 | { | |
585 | SSD_TEMP_INLET = 0, | |
586 | SSD_TEMP_FLASH, | |
587 | SSD_TEMP_CTRL, | |
588 | SSD_TEMP_NR | |
589 | }; | |
590 | ||
591 | ||
592 | #ifdef SSD_OT_PROTECT | |
593 | #define SSD_OT_DELAY (60) //ms | |
594 | ||
595 | #define SSD_OT_TEMP (90) //90 DegC | |
596 | ||
597 | #define SSD_OT_TEMP_HYST (85) //85 DegC | |
598 | #endif | |
599 | ||
600 | /* fpga temperature */ | |
601 | //#define CONVERT_TEMP(val) ((float)(val)*503.975f/4096.0f-273.15f) | |
602 | #define CONVERT_TEMP(val) ((val)*504/4096-273) | |
603 | ||
604 | #define MAX_TEMP(val) CONVERT_TEMP(((val & 0xffff) >> 4)) | |
605 | #define MIN_TEMP(val) CONVERT_TEMP((((val>>16) & 0xffff) >> 4)) | |
606 | #define CUR_TEMP(val) CONVERT_TEMP((((val>>32) & 0xffff) >> 4)) | |
607 | ||
608 | ||
609 | /* CAP monitor */ | |
610 | #define SSD_PL_CAP_U1 SSD_LM80_REG_IN(SSD_LM80_IN_CAP) | |
611 | #define SSD_PL_CAP_U2 SSD_LM80_REG_IN(SSD_LM80_IN_1V8) | |
612 | #define SSD_PL_CAP_LEARN(u1, u2, t) ((t*(u1+u2))/(2*162*(u1-u2))) | |
613 | #define SSD_PL_CAP_LEARN_WAIT (20) //20ms | |
614 | #define SSD_PL_CAP_LEARN_MAX_WAIT (1000/SSD_PL_CAP_LEARN_WAIT) //1s | |
615 | ||
616 | #define SSD_PL_CAP_CHARGE_WAIT (1000) | |
617 | #define SSD_PL_CAP_CHARGE_MAX_WAIT ((120*1000)/SSD_PL_CAP_CHARGE_WAIT) //120s | |
618 | ||
619 | #define SSD_PL_CAP_VOLT(val) (val*7) | |
620 | ||
621 | #define SSD_PL_CAP_VOLT_FULL (13700) | |
622 | #define SSD_PL_CAP_VOLT_READY (12880) | |
623 | ||
624 | #define SSD_PL_CAP_THRESHOLD (8900) | |
625 | #define SSD_PL_CAP_CP_THRESHOLD (5800) | |
626 | #define SSD_PL_CAP_THRESHOLD_HYST (100) | |
627 | ||
628 | enum ssd_pl_cap_status | |
629 | { | |
630 | SSD_PL_CAP = 0, | |
631 | SSD_PL_CAP_NR | |
632 | }; | |
633 | ||
634 | enum ssd_pl_cap_type | |
635 | { | |
636 | SSD_PL_CAP_DEFAULT = 0, /* 4 cap */ | |
637 | SSD_PL_CAP_CP /* 3 cap */ | |
638 | }; | |
639 | ||
640 | ||
641 | /* hwmon offset */ | |
642 | #define SSD_HWMON_OFFS_TEMP (0) | |
643 | #define SSD_HWMON_OFFS_SENSOR (SSD_HWMON_OFFS_TEMP + SSD_TEMP_NR) | |
644 | #define SSD_HWMON_OFFS_PL_CAP (SSD_HWMON_OFFS_SENSOR + SSD_SENSOR_NR) | |
645 | #define SSD_HWMON_OFFS_LM80 (SSD_HWMON_OFFS_PL_CAP + SSD_PL_CAP_NR) | |
646 | #define SSD_HWMON_OFFS_CLOCK (SSD_HWMON_OFFS_LM80 + SSD_LM80_IN_NR) | |
647 | #define SSD_HWMON_OFFS_FPGA (SSD_HWMON_OFFS_CLOCK + SSD_CLOCK_NR) | |
648 | ||
649 | #define SSD_HWMON_TEMP(idx) (SSD_HWMON_OFFS_TEMP + idx) | |
650 | #define SSD_HWMON_SENSOR(idx) (SSD_HWMON_OFFS_SENSOR + idx) | |
651 | #define SSD_HWMON_PL_CAP(idx) (SSD_HWMON_OFFS_PL_CAP + idx) | |
652 | #define SSD_HWMON_LM80(idx) (SSD_HWMON_OFFS_LM80 + idx) | |
653 | #define SSD_HWMON_CLOCK(idx) (SSD_HWMON_OFFS_CLOCK + idx) | |
654 | #define SSD_HWMON_FPGA(ctrl, idx) (SSD_HWMON_OFFS_FPGA + (ctrl * SSD_FPGA_VOLT_NR) + idx) | |
655 | ||
656 | ||
657 | ||
658 | /* fifo */ | |
659 | typedef struct sfifo | |
660 | { | |
661 | uint32_t in; | |
662 | uint32_t out; | |
663 | uint32_t size; | |
664 | uint32_t esize; | |
665 | uint32_t mask; | |
666 | spinlock_t lock; | |
667 | void *data; | |
668 | } sfifo_t; | |
669 | ||
670 | static int sfifo_alloc(struct sfifo *fifo, uint32_t size, uint32_t esize) | |
671 | { | |
672 | uint32_t __size = 1; | |
673 | ||
674 | if (!fifo || size > INT_MAX || esize == 0) { | |
675 | return -EINVAL; | |
676 | } | |
677 | ||
678 | while (__size < size) __size <<= 1; | |
679 | ||
680 | if (__size < 2) { | |
681 | return -EINVAL; | |
682 | } | |
683 | ||
684 | fifo->data = vmalloc(esize * __size); | |
685 | if (!fifo->data) { | |
686 | return -ENOMEM; | |
687 | } | |
688 | ||
689 | fifo->in = 0; | |
690 | fifo->out = 0; | |
691 | fifo->mask = __size - 1; | |
692 | fifo->size = __size; | |
693 | fifo->esize = esize; | |
694 | spin_lock_init(&fifo->lock); | |
695 | ||
696 | return 0; | |
697 | } | |
698 | ||
699 | static void sfifo_free(struct sfifo *fifo) | |
700 | { | |
701 | if (!fifo) { | |
702 | return; | |
703 | } | |
704 | ||
705 | vfree(fifo->data); | |
706 | fifo->data = NULL; | |
707 | fifo->in = 0; | |
708 | fifo->out = 0; | |
709 | fifo->mask = 0; | |
710 | fifo->size = 0; | |
711 | fifo->esize = 0; | |
712 | } | |
713 | ||
714 | static int __sfifo_put(struct sfifo *fifo, void *val) | |
715 | { | |
716 | if (((fifo->in + 1) & fifo->mask) == fifo->out) { | |
717 | return -1; | |
718 | } | |
719 | ||
720 | memcpy((fifo->data + (fifo->in * fifo->esize)), val, fifo->esize); | |
721 | fifo->in = (fifo->in + 1) & fifo->mask; | |
722 | ||
723 | return 0; | |
724 | } | |
725 | ||
726 | static int sfifo_put(struct sfifo *fifo, void *val) | |
727 | { | |
728 | int ret = 0; | |
729 | ||
730 | if (!fifo || !val) { | |
731 | return -EINVAL; | |
732 | } | |
733 | ||
734 | if (!in_interrupt()) { | |
735 | spin_lock_irq(&fifo->lock); | |
736 | ret = __sfifo_put(fifo, val); | |
737 | spin_unlock_irq(&fifo->lock); | |
738 | } else { | |
739 | spin_lock(&fifo->lock); | |
740 | ret = __sfifo_put(fifo, val); | |
741 | spin_unlock(&fifo->lock); | |
742 | } | |
743 | ||
744 | return ret; | |
745 | } | |
746 | ||
747 | static int __sfifo_get(struct sfifo *fifo, void *val) | |
748 | { | |
749 | if (fifo->out == fifo->in) { | |
750 | return -1; | |
751 | } | |
752 | ||
753 | memcpy(val, (fifo->data + (fifo->out * fifo->esize)), fifo->esize); | |
754 | fifo->out = (fifo->out + 1) & fifo->mask; | |
755 | ||
756 | return 0; | |
757 | } | |
758 | ||
759 | static int sfifo_get(struct sfifo *fifo, void *val) | |
760 | { | |
761 | int ret = 0; | |
762 | ||
763 | if (!fifo || !val) { | |
764 | return -EINVAL; | |
765 | } | |
766 | ||
767 | if (!in_interrupt()) { | |
768 | spin_lock_irq(&fifo->lock); | |
769 | ret = __sfifo_get(fifo, val); | |
770 | spin_unlock_irq(&fifo->lock); | |
771 | } else { | |
772 | spin_lock(&fifo->lock); | |
773 | ret = __sfifo_get(fifo, val); | |
774 | spin_unlock(&fifo->lock); | |
775 | } | |
776 | ||
777 | return ret; | |
778 | } | |
779 | ||
780 | /* bio list */ | |
781 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30)) | |
782 | struct ssd_blist { | |
783 | struct bio *prev; | |
784 | struct bio *next; | |
785 | }; | |
786 | ||
787 | static inline void ssd_blist_init(struct ssd_blist *ssd_bl) | |
788 | { | |
789 | ssd_bl->prev = NULL; | |
790 | ssd_bl->next = NULL; | |
791 | } | |
792 | ||
793 | static inline struct bio *ssd_blist_get(struct ssd_blist *ssd_bl) | |
794 | { | |
795 | struct bio *bio = ssd_bl->prev; | |
796 | ||
797 | ssd_bl->prev = NULL; | |
798 | ssd_bl->next = NULL; | |
799 | ||
800 | return bio; | |
801 | } | |
802 | ||
803 | static inline void ssd_blist_add(struct ssd_blist *ssd_bl, struct bio *bio) | |
804 | { | |
805 | bio->bi_next = NULL; | |
806 | ||
807 | if (ssd_bl->next) { | |
808 | ssd_bl->next->bi_next = bio; | |
809 | } else { | |
810 | ssd_bl->prev = bio; | |
811 | } | |
812 | ||
813 | ssd_bl->next = bio; | |
814 | } | |
815 | ||
816 | #else | |
817 | #define ssd_blist bio_list | |
818 | #define ssd_blist_init bio_list_init | |
819 | #define ssd_blist_get bio_list_get | |
820 | #define ssd_blist_add bio_list_add | |
821 | #endif | |
822 | ||
823 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,14,0)) | |
824 | #define bio_start(bio) (bio->bi_sector) | |
825 | #else | |
826 | #define bio_start(bio) (bio->bi_iter.bi_sector) | |
827 | #endif | |
828 | ||
829 | /* mutex */ | |
830 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16)) | |
831 | #define mutex_lock down | |
832 | #define mutex_unlock up | |
833 | #define mutex semaphore | |
834 | #define mutex_init init_MUTEX | |
835 | #endif | |
836 | ||
837 | /* i2c */ | |
838 | typedef union ssd_i2c_ctrl { | |
839 | uint32_t val; | |
840 | struct { | |
841 | uint8_t wdata; | |
842 | uint8_t addr; | |
843 | uint16_t rw:1; | |
844 | uint16_t pad:15; | |
845 | } bits; | |
846 | }__attribute__((packed)) ssd_i2c_ctrl_t; | |
847 | ||
848 | typedef union ssd_i2c_data { | |
849 | uint32_t val; | |
850 | struct { | |
851 | uint32_t rdata:8; | |
852 | uint32_t valid:1; | |
853 | uint32_t pad:23; | |
854 | } bits; | |
855 | }__attribute__((packed)) ssd_i2c_data_t; | |
856 | ||
857 | /* write mode */ | |
858 | enum ssd_write_mode | |
859 | { | |
860 | SSD_WMODE_BUFFER = 0, | |
861 | SSD_WMODE_BUFFER_EX, | |
862 | SSD_WMODE_FUA, | |
863 | /* dummy */ | |
864 | SSD_WMODE_AUTO, | |
865 | SSD_WMODE_DEFAULT | |
866 | }; | |
867 | ||
868 | /* reset type */ | |
869 | enum ssd_reset_type | |
870 | { | |
871 | SSD_RST_NOINIT = 0, | |
872 | SSD_RST_NORMAL, | |
873 | SSD_RST_FULL | |
874 | }; | |
875 | ||
876 | /* ssd msg */ | |
877 | typedef struct ssd_sg_entry | |
878 | { | |
879 | uint64_t block:48; | |
880 | uint64_t length:16; | |
881 | uint64_t buf; | |
882 | }__attribute__((packed))ssd_sg_entry_t; | |
883 | ||
884 | typedef struct ssd_rw_msg | |
885 | { | |
886 | uint8_t tag; | |
887 | uint8_t flag; | |
888 | uint8_t nsegs; | |
889 | uint8_t fun; | |
890 | uint32_t reserved; //for 64-bit align | |
891 | struct ssd_sg_entry sge[1]; //base | |
892 | }__attribute__((packed))ssd_rw_msg_t; | |
893 | ||
894 | typedef struct ssd_resp_msg | |
895 | { | |
896 | uint8_t tag; | |
897 | uint8_t status:2; | |
898 | uint8_t bitflip:6; | |
899 | uint8_t log; | |
900 | uint8_t fun; | |
901 | uint32_t reserved; | |
902 | }__attribute__((packed))ssd_resp_msg_t; | |
903 | ||
904 | typedef struct ssd_flush_msg | |
905 | { | |
906 | uint8_t tag; | |
907 | uint8_t flag:2; //flash cache 0 or bbt 1 | |
908 | uint8_t flash:6; | |
909 | uint8_t ctrl_idx; | |
910 | uint8_t fun; | |
911 | uint32_t reserved; //align | |
912 | }__attribute__((packed))ssd_flush_msg_t; | |
913 | ||
914 | typedef struct ssd_nand_op_msg | |
915 | { | |
916 | uint8_t tag; | |
917 | uint8_t flag; | |
918 | uint8_t ctrl_idx; | |
919 | uint8_t fun; | |
920 | uint32_t reserved; //align | |
921 | uint16_t page_count; | |
922 | uint8_t chip_ce; | |
923 | uint8_t chip_no; | |
924 | uint32_t page_no; | |
925 | uint64_t buf; | |
926 | }__attribute__((packed))ssd_nand_op_msg_t; | |
927 | ||
928 | typedef struct ssd_ram_op_msg | |
929 | { | |
930 | uint8_t tag; | |
931 | uint8_t flag; | |
932 | uint8_t ctrl_idx; | |
933 | uint8_t fun; | |
934 | uint32_t reserved; //align | |
935 | uint32_t start; | |
936 | uint32_t length; | |
937 | uint64_t buf; | |
938 | }__attribute__((packed))ssd_ram_op_msg_t; | |
939 | ||
940 | ||
941 | /* log msg */ | |
942 | typedef struct ssd_log_msg | |
943 | { | |
944 | uint8_t tag; | |
945 | uint8_t flag; | |
946 | uint8_t ctrl_idx; | |
947 | uint8_t fun; | |
948 | uint32_t reserved; //align | |
949 | uint64_t buf; | |
950 | }__attribute__((packed))ssd_log_msg_t; | |
951 | ||
952 | typedef struct ssd_log_op_msg | |
953 | { | |
954 | uint8_t tag; | |
955 | uint8_t flag; | |
956 | uint8_t ctrl_idx; | |
957 | uint8_t fun; | |
958 | uint32_t reserved; //align | |
959 | uint64_t reserved1; //align | |
960 | uint64_t buf; | |
961 | }__attribute__((packed))ssd_log_op_msg_t; | |
962 | ||
963 | typedef struct ssd_log_resp_msg | |
964 | { | |
965 | uint8_t tag; | |
966 | uint16_t status :2; | |
967 | uint16_t reserved1 :2; //align with the normal resp msg | |
968 | uint16_t nr_log :12; | |
969 | uint8_t fun; | |
970 | uint32_t reserved; | |
971 | }__attribute__((packed))ssd_log_resp_msg_t; | |
972 | ||
973 | ||
974 | /* resp msg */ | |
975 | typedef union ssd_response_msq | |
976 | { | |
977 | ssd_resp_msg_t resp_msg; | |
978 | ssd_log_resp_msg_t log_resp_msg; | |
979 | uint64_t u64_msg; | |
980 | uint32_t u32_msg[2]; | |
981 | } ssd_response_msq_t; | |
982 | ||
983 | ||
984 | /* custom struct */ | |
985 | typedef struct ssd_protocol_info | |
986 | { | |
987 | uint32_t ver; | |
988 | uint32_t init_state_reg; | |
989 | uint32_t init_state_reg_sz; | |
990 | uint32_t chip_info_reg; | |
991 | uint32_t chip_info_reg_sz; | |
992 | } ssd_protocol_info_t; | |
993 | ||
994 | typedef struct ssd_hw_info | |
995 | { | |
996 | uint32_t bridge_ver; | |
997 | uint32_t ctrl_ver; | |
998 | ||
999 | uint32_t cmd_fifo_sz; | |
1000 | uint32_t cmd_fifo_sz_mask; | |
1001 | uint32_t cmd_max_sg; | |
1002 | uint32_t sg_max_sec; | |
1003 | uint32_t resp_ptr_sz; | |
1004 | uint32_t resp_msg_sz; | |
1005 | ||
1006 | uint16_t nr_ctrl; | |
1007 | ||
1008 | uint16_t nr_data_ch; | |
1009 | uint16_t nr_ch; | |
1010 | uint16_t max_ch; | |
1011 | uint16_t nr_chip; | |
1012 | ||
1013 | uint8_t pcb_ver; | |
1014 | uint8_t upper_pcb_ver; | |
1015 | ||
1016 | uint8_t nand_vendor_id; | |
1017 | uint8_t nand_dev_id; | |
1018 | ||
1019 | uint8_t max_ce; | |
1020 | uint8_t id_size; | |
1021 | uint16_t oob_size; | |
1022 | ||
1023 | uint16_t bbf_pages; | |
1024 | uint16_t bbf_seek; // | |
1025 | ||
1026 | uint16_t page_count; //per block | |
1027 | uint32_t page_size; | |
1028 | uint32_t block_count; //per flash | |
1029 | ||
1030 | uint64_t ram_size; | |
1031 | uint32_t ram_align; | |
1032 | uint32_t ram_max_len; | |
1033 | ||
1034 | uint64_t bbt_base; | |
1035 | uint32_t bbt_size; | |
1036 | uint64_t md_base; //metadata | |
1037 | uint32_t md_size; | |
1038 | uint32_t md_entry_sz; | |
1039 | ||
1040 | uint32_t log_sz; | |
1041 | ||
1042 | uint64_t nand_wbuff_base; | |
1043 | ||
1044 | uint32_t md_reserved_blks; | |
1045 | uint32_t reserved_blks; | |
1046 | uint32_t valid_pages; | |
1047 | uint32_t max_valid_pages; | |
1048 | uint64_t size; | |
1049 | } ssd_hw_info_t; | |
1050 | ||
1051 | typedef struct ssd_hw_info_extend | |
1052 | { | |
1053 | uint8_t board_type; | |
1054 | uint8_t cap_type; | |
1055 | uint8_t plp_type; | |
1056 | uint8_t work_mode; | |
1057 | uint8_t form_factor; | |
1058 | ||
1059 | uint8_t pad[59]; | |
1060 | }ssd_hw_info_extend_t; | |
1061 | ||
1062 | typedef struct ssd_rom_info | |
1063 | { | |
1064 | uint32_t size; | |
1065 | uint32_t block_size; | |
1066 | uint16_t page_size; | |
1067 | uint8_t nr_bridge_fw; | |
1068 | uint8_t nr_ctrl_fw; | |
1069 | uint8_t nr_bm_fw; | |
1070 | uint8_t nr_smart; | |
1071 | uint32_t bridge_fw_base; | |
1072 | uint32_t bridge_fw_sz; | |
1073 | uint32_t ctrl_fw_base; | |
1074 | uint32_t ctrl_fw_sz; | |
1075 | uint32_t bm_fw_base; | |
1076 | uint32_t bm_fw_sz; | |
1077 | uint32_t log_base; | |
1078 | uint32_t log_sz; | |
1079 | uint32_t smart_base; | |
1080 | uint32_t smart_sz; | |
1081 | uint32_t vp_base; | |
1082 | uint32_t label_base; | |
1083 | } ssd_rom_info_t; | |
1084 | ||
1085 | /* debug info */ | |
1086 | enum ssd_debug_type | |
1087 | { | |
1088 | SSD_DEBUG_NONE = 0, | |
1089 | SSD_DEBUG_READ_ERR, | |
1090 | SSD_DEBUG_WRITE_ERR, | |
1091 | SSD_DEBUG_RW_ERR, | |
1092 | SSD_DEBUG_READ_TO, | |
1093 | SSD_DEBUG_WRITE_TO, | |
1094 | SSD_DEBUG_RW_TO, | |
1095 | SSD_DEBUG_LOG, | |
1096 | SSD_DEBUG_OFFLINE, | |
1097 | SSD_DEBUG_NR | |
1098 | }; | |
1099 | ||
1100 | typedef struct ssd_debug_info | |
1101 | { | |
1102 | int type; | |
1103 | union { | |
1104 | struct { | |
1105 | uint64_t off; | |
1106 | uint32_t len; | |
1107 | } loc; | |
1108 | struct { | |
1109 | int event; | |
1110 | uint32_t extra; | |
1111 | } log; | |
1112 | } data; | |
1113 | }ssd_debug_info_t; | |
1114 | ||
1115 | /* label */ | |
1116 | #define SSD_LABEL_FIELD_SZ 32 | |
1117 | #define SSD_SN_SZ 16 | |
1118 | ||
1119 | typedef struct ssd_label | |
1120 | { | |
1121 | char date[SSD_LABEL_FIELD_SZ]; | |
1122 | char sn[SSD_LABEL_FIELD_SZ]; | |
1123 | char part[SSD_LABEL_FIELD_SZ]; | |
1124 | char desc[SSD_LABEL_FIELD_SZ]; | |
1125 | char other[SSD_LABEL_FIELD_SZ]; | |
1126 | char maf[SSD_LABEL_FIELD_SZ]; | |
1127 | } ssd_label_t; | |
1128 | ||
1129 | #define SSD_LABEL_DESC_SZ 256 | |
1130 | ||
1131 | typedef struct ssd_labelv3 | |
1132 | { | |
1133 | char boardtype[SSD_LABEL_FIELD_SZ]; | |
1134 | char barcode[SSD_LABEL_FIELD_SZ]; | |
1135 | char item[SSD_LABEL_FIELD_SZ]; | |
1136 | char description[SSD_LABEL_DESC_SZ]; | |
1137 | char manufactured[SSD_LABEL_FIELD_SZ]; | |
1138 | char vendorname[SSD_LABEL_FIELD_SZ]; | |
1139 | char issuenumber[SSD_LABEL_FIELD_SZ]; | |
1140 | char cleicode[SSD_LABEL_FIELD_SZ]; | |
1141 | char bom[SSD_LABEL_FIELD_SZ]; | |
1142 | } ssd_labelv3_t; | |
1143 | ||
1144 | /* battery */ | |
1145 | typedef struct ssd_battery_info | |
1146 | { | |
1147 | uint32_t fw_ver; | |
1148 | } ssd_battery_info_t; | |
1149 | ||
1150 | /* ssd power stat */ | |
1151 | typedef struct ssd_power_stat | |
1152 | { | |
1153 | uint64_t nr_poweron; | |
1154 | uint64_t nr_powerloss; | |
1155 | uint64_t init_failed; | |
1156 | } ssd_power_stat_t; | |
1157 | ||
1158 | /* io stat */ | |
1159 | typedef struct ssd_io_stat | |
1160 | { | |
1161 | uint64_t run_time; | |
1162 | uint64_t nr_to; | |
1163 | uint64_t nr_ioerr; | |
1164 | uint64_t nr_rwerr; | |
1165 | uint64_t nr_read; | |
1166 | uint64_t nr_write; | |
1167 | uint64_t rsectors; | |
1168 | uint64_t wsectors; | |
1169 | } ssd_io_stat_t; | |
1170 | ||
1171 | /* ecc */ | |
1172 | typedef struct ssd_ecc_info | |
1173 | { | |
1174 | uint64_t bitflip[SSD_ECC_MAX_FLIP]; | |
1175 | } ssd_ecc_info_t; | |
1176 | ||
1177 | /* log */ | |
1178 | enum ssd_log_level | |
1179 | { | |
1180 | SSD_LOG_LEVEL_INFO = 0, | |
1181 | SSD_LOG_LEVEL_NOTICE, | |
1182 | SSD_LOG_LEVEL_WARNING, | |
1183 | SSD_LOG_LEVEL_ERR, | |
1184 | SSD_LOG_NR_LEVEL | |
1185 | }; | |
1186 | ||
1187 | typedef struct ssd_log_info | |
1188 | { | |
1189 | uint64_t nr_log; | |
1190 | uint64_t stat[SSD_LOG_NR_LEVEL]; | |
1191 | } ssd_log_info_t; | |
1192 | ||
1193 | /* S.M.A.R.T. */ | |
1194 | #define SSD_SMART_MAGIC (0x5452414D53445353ull) | |
1195 | ||
1196 | typedef struct ssd_smart | |
1197 | { | |
1198 | struct ssd_power_stat pstat; | |
1199 | struct ssd_io_stat io_stat; | |
1200 | struct ssd_ecc_info ecc_info; | |
1201 | struct ssd_log_info log_info; | |
1202 | uint64_t version; | |
1203 | uint64_t magic; | |
1204 | } ssd_smart_t; | |
1205 | ||
1206 | /* internal log */ | |
1207 | typedef struct ssd_internal_log | |
1208 | { | |
1209 | uint32_t nr_log; | |
1210 | void *log; | |
1211 | } ssd_internal_log_t; | |
1212 | ||
1213 | /* ssd cmd */ | |
1214 | typedef struct ssd_cmd | |
1215 | { | |
1216 | struct bio *bio; | |
1217 | struct scatterlist *sgl; | |
1218 | struct list_head list; | |
1219 | void *dev; | |
1220 | int nsegs; | |
1221 | int flag; /*pbio(1) or bio(0)*/ | |
1222 | ||
1223 | int tag; | |
1224 | void *msg; | |
1225 | dma_addr_t msg_dma; | |
1226 | ||
1227 | unsigned long start_time; | |
1228 | ||
1229 | int errors; | |
1230 | unsigned int nr_log; | |
1231 | ||
1232 | struct timer_list cmd_timer; | |
1233 | struct completion *waiting; | |
1234 | } ssd_cmd_t; | |
1235 | ||
1236 | typedef void (*send_cmd_func)(struct ssd_cmd *); | |
1237 | typedef int (*ssd_event_call)(struct gendisk *, int, int); /* gendisk, event id, event level */ | |
1238 | ||
1239 | /* dcmd sz */ | |
1240 | #define SSD_DCMD_MAX_SZ 32 | |
1241 | ||
1242 | typedef struct ssd_dcmd | |
1243 | { | |
1244 | struct list_head list; | |
1245 | void *dev; | |
1246 | uint8_t msg[SSD_DCMD_MAX_SZ]; | |
1247 | } ssd_dcmd_t; | |
1248 | ||
1249 | ||
1250 | enum ssd_state { | |
1251 | SSD_INIT_WORKQ, | |
1252 | SSD_INIT_BD, | |
1253 | SSD_ONLINE, | |
1254 | /* full reset */ | |
1255 | SSD_RESETING, | |
1256 | /* hw log */ | |
1257 | SSD_LOG_HW, | |
1258 | /* log err */ | |
da3355df | 1259 | SSD_LOG_ERR, |
361ebed5 HSDT |
1260 | }; |
1261 | ||
1262 | #define SSD_QUEUE_NAME_LEN 16 | |
1263 | typedef struct ssd_queue { | |
1264 | char name[SSD_QUEUE_NAME_LEN]; | |
1265 | void *dev; | |
1266 | ||
1267 | int idx; | |
1268 | ||
1269 | uint32_t resp_idx; | |
1270 | uint32_t resp_idx_mask; | |
1271 | uint32_t resp_msg_sz; | |
1272 | ||
1273 | void *resp_msg; | |
1274 | void *resp_ptr; | |
1275 | ||
1276 | struct ssd_cmd *cmd; | |
1277 | ||
1278 | struct ssd_io_stat io_stat; | |
1279 | struct ssd_ecc_info ecc_info; | |
1280 | } ssd_queue_t; | |
1281 | ||
1282 | typedef struct ssd_device { | |
1283 | char name[SSD_DEV_NAME_LEN]; | |
1284 | ||
1285 | int idx; | |
1286 | int major; | |
1287 | int readonly; | |
1288 | ||
1289 | int int_mode; | |
1290 | #ifdef SSD_ESCAPE_IRQ | |
1291 | int irq_cpu; | |
1292 | #endif | |
1293 | ||
1294 | int reload_fw; | |
1295 | ||
1296 | int ot_delay; //in ms | |
1297 | ||
1298 | atomic_t refcnt; | |
1299 | atomic_t tocnt; | |
1300 | atomic_t in_flight[2]; //r&w | |
1301 | ||
1302 | uint64_t uptime; | |
1303 | ||
1304 | struct list_head list; | |
1305 | struct pci_dev *pdev; | |
1306 | ||
1307 | unsigned long mmio_base; | |
1308 | unsigned long mmio_len; | |
1309 | void __iomem *ctrlp; | |
1310 | ||
1311 | struct mutex spi_mutex; | |
1312 | struct mutex i2c_mutex; | |
1313 | ||
1314 | struct ssd_protocol_info protocol_info; | |
1315 | struct ssd_hw_info hw_info; | |
1316 | struct ssd_rom_info rom_info; | |
1317 | struct ssd_label label; | |
1318 | ||
1319 | struct ssd_smart smart; | |
1320 | ||
1321 | atomic_t in_sendq; | |
1322 | spinlock_t sendq_lock; | |
1323 | struct ssd_blist sendq; | |
1324 | struct task_struct *send_thread; | |
1325 | wait_queue_head_t send_waitq; | |
1326 | ||
1327 | atomic_t in_doneq; | |
1328 | spinlock_t doneq_lock; | |
1329 | struct ssd_blist doneq; | |
1330 | struct task_struct *done_thread; | |
1331 | wait_queue_head_t done_waitq; | |
1332 | ||
1333 | struct ssd_dcmd *dcmd; | |
1334 | spinlock_t dcmd_lock; | |
1335 | struct list_head dcmd_list; /* direct cmd list */ | |
1336 | wait_queue_head_t dcmd_wq; | |
1337 | ||
1338 | unsigned long *tag_map; | |
1339 | wait_queue_head_t tag_wq; | |
1340 | ||
1341 | spinlock_t cmd_lock; | |
1342 | struct ssd_cmd *cmd; | |
1343 | send_cmd_func scmd; | |
1344 | ||
1345 | ssd_event_call event_call; | |
1346 | void *msg_base; | |
1347 | dma_addr_t msg_base_dma; | |
1348 | ||
1349 | uint32_t resp_idx; | |
1350 | void *resp_msg_base; | |
1351 | void *resp_ptr_base; | |
1352 | dma_addr_t resp_msg_base_dma; | |
1353 | dma_addr_t resp_ptr_base_dma; | |
1354 | ||
1355 | int nr_queue; | |
1356 | struct msix_entry entry[SSD_MSIX_VEC]; | |
1357 | struct ssd_queue queue[SSD_MSIX_VEC]; | |
1358 | ||
1359 | struct request_queue *rq; /* The device request queue */ | |
1360 | struct gendisk *gd; /* The gendisk structure */ | |
1361 | ||
1362 | struct mutex internal_log_mutex; | |
1363 | struct ssd_internal_log internal_log; | |
1364 | struct workqueue_struct *workq; | |
1365 | struct work_struct log_work; /* get log */ | |
1366 | void *log_buf; | |
1367 | ||
1368 | unsigned long state; /* device state, for example, block device inited */ | |
1369 | ||
1370 | struct module *owner; | |
1371 | ||
1372 | /* extend */ | |
1373 | ||
1374 | int slave; | |
1375 | int cmajor; | |
1376 | int save_md; | |
1377 | int ot_protect; | |
1378 | ||
1379 | struct kref kref; | |
1380 | ||
1381 | struct mutex gd_mutex; | |
1382 | struct ssd_log_info log_info; /* volatile */ | |
1383 | ||
1384 | atomic_t queue_depth; | |
1385 | struct mutex barrier_mutex; | |
1386 | struct mutex fw_mutex; | |
1387 | ||
1388 | struct ssd_hw_info_extend hw_info_ext; | |
1389 | struct ssd_labelv3 labelv3; | |
1390 | ||
1391 | int wmode; | |
1392 | int user_wmode; | |
1393 | struct mutex bm_mutex; | |
1394 | struct work_struct bm_work; /* check bm */ | |
1395 | struct timer_list bm_timer; | |
1396 | struct sfifo log_fifo; | |
1397 | ||
1398 | struct timer_list routine_timer; | |
1399 | unsigned long routine_tick; | |
1400 | unsigned long hwmon; | |
1401 | ||
1402 | struct work_struct hwmon_work; /* check hw */ | |
1403 | struct work_struct capmon_work; /* check battery */ | |
1404 | struct work_struct tempmon_work; /* check temp */ | |
1405 | ||
1406 | /* debug info */ | |
1407 | struct ssd_debug_info db_info; | |
1197134c | 1408 | uint64_t reset_time; |
da3355df SF |
1409 | int has_non_0x98_reg_access; |
1410 | spinlock_t in_flight_lock; | |
1411 | ||
1412 | uint64_t last_poweron_id; | |
1413 | ||
361ebed5 HSDT |
1414 | } ssd_device_t; |
1415 | ||
1416 | ||
1417 | /* Ioctl struct */ | |
1418 | typedef struct ssd_acc_info { | |
1419 | uint32_t threshold_l1; | |
1420 | uint32_t threshold_l2; | |
1421 | uint32_t val; | |
1422 | } ssd_acc_info_t; | |
1423 | ||
1424 | typedef struct ssd_reg_op_info | |
1425 | { | |
1426 | uint32_t offset; | |
1427 | uint32_t value; | |
1428 | } ssd_reg_op_info_t; | |
1429 | ||
1430 | typedef struct ssd_spi_op_info | |
1431 | { | |
1432 | void __user *buf; | |
1433 | uint32_t off; | |
1434 | uint32_t len; | |
1435 | } ssd_spi_op_info_t; | |
1436 | ||
1437 | typedef struct ssd_i2c_op_info | |
1438 | { | |
1439 | uint8_t saddr; | |
1440 | uint8_t wsize; | |
1441 | uint8_t rsize; | |
1442 | void __user *wbuf; | |
1443 | void __user *rbuf; | |
1444 | } ssd_i2c_op_info_t; | |
1445 | ||
1446 | typedef struct ssd_smbus_op_info | |
1447 | { | |
1448 | uint8_t saddr; | |
1449 | uint8_t cmd; | |
1450 | uint8_t size; | |
1451 | void __user *buf; | |
1452 | } ssd_smbus_op_info_t; | |
1453 | ||
1454 | typedef struct ssd_ram_op_info { | |
1455 | uint8_t ctrl_idx; | |
1456 | uint32_t length; | |
1457 | uint64_t start; | |
1458 | uint8_t __user *buf; | |
1459 | } ssd_ram_op_info_t; | |
1460 | ||
1461 | typedef struct ssd_flash_op_info { | |
1462 | uint32_t page; | |
1463 | uint16_t flash; | |
1464 | uint8_t chip; | |
1465 | uint8_t ctrl_idx; | |
1466 | uint8_t __user *buf; | |
1467 | } ssd_flash_op_info_t; | |
1468 | ||
1469 | typedef struct ssd_sw_log_info { | |
1470 | uint16_t event; | |
1471 | uint16_t pad; | |
1472 | uint32_t data; | |
1473 | } ssd_sw_log_info_t; | |
1474 | ||
1475 | typedef struct ssd_version_info | |
1476 | { | |
1477 | uint32_t bridge_ver; /* bridge fw version */ | |
1478 | uint32_t ctrl_ver; /* controller fw version */ | |
1479 | uint32_t bm_ver; /* battery manager fw version */ | |
1480 | uint8_t pcb_ver; /* main pcb version */ | |
1481 | uint8_t upper_pcb_ver; | |
1482 | uint8_t pad0; | |
1483 | uint8_t pad1; | |
1484 | } ssd_version_info_t; | |
1485 | ||
1486 | typedef struct pci_addr | |
1487 | { | |
1488 | uint16_t domain; | |
1489 | uint8_t bus; | |
1490 | uint8_t slot; | |
1491 | uint8_t func; | |
1492 | } pci_addr_t; | |
1493 | ||
1494 | typedef struct ssd_drv_param_info { | |
1495 | int mode; | |
1496 | int status_mask; | |
1497 | int int_mode; | |
1498 | int threaded_irq; | |
1499 | int log_level; | |
1500 | int wmode; | |
1501 | int ot_protect; | |
1502 | int finject; | |
1503 | int pad[8]; | |
1504 | } ssd_drv_param_info_t; | |
1505 | ||
1506 | ||
1507 | /* form factor */ | |
1508 | enum ssd_form_factor | |
1509 | { | |
1510 | SSD_FORM_FACTOR_HHHL = 0, | |
1511 | SSD_FORM_FACTOR_FHHL | |
1512 | }; | |
1513 | ||
1514 | ||
1515 | /* ssd power loss protect */ | |
1516 | enum ssd_plp_type | |
1517 | { | |
1518 | SSD_PLP_SCAP = 0, | |
1519 | SSD_PLP_CAP, | |
1520 | SSD_PLP_NONE | |
1521 | }; | |
1522 | ||
1523 | /* ssd bm */ | |
1524 | #define SSD_BM_SLAVE_ADDRESS 0x16 | |
1525 | #define SSD_BM_CAP 5 | |
1526 | ||
1527 | /* SBS cmd */ | |
1528 | #define SSD_BM_SAFETYSTATUS 0x51 | |
1529 | #define SSD_BM_OPERATIONSTATUS 0x54 | |
1530 | ||
1531 | /* ManufacturerAccess */ | |
1532 | #define SSD_BM_MANUFACTURERACCESS 0x00 | |
1533 | #define SSD_BM_ENTER_CAP_LEARNING 0x0023 /* cap learning */ | |
1534 | ||
1535 | /* Data flash access */ | |
1536 | #define SSD_BM_DATA_FLASH_SUBCLASS_ID 0x77 | |
1537 | #define SSD_BM_DATA_FLASH_SUBCLASS_ID_PAGE1 0x78 | |
1538 | #define SSD_BM_SYSTEM_DATA_SUBCLASS_ID 56 | |
1539 | #define SSD_BM_CONFIGURATION_REGISTERS_ID 64 | |
1540 | ||
1541 | /* min cap voltage */ | |
1542 | #define SSD_BM_CAP_VOLT_MIN 500 | |
1543 | ||
1544 | /* | |
1545 | enum ssd_bm_cap | |
1546 | { | |
1547 | SSD_BM_CAP_VINA = 1, | |
1548 | SSD_BM_CAP_JH = 3 | |
1549 | };*/ | |
1550 | ||
1551 | enum ssd_bmstatus | |
1552 | { | |
1553 | SSD_BMSTATUS_OK = 0, | |
1554 | SSD_BMSTATUS_CHARGING, /* not fully charged */ | |
1555 | SSD_BMSTATUS_WARNING | |
1556 | }; | |
1557 | ||
1558 | enum sbs_unit { | |
1559 | SBS_UNIT_VALUE = 0, | |
1560 | SBS_UNIT_TEMPERATURE, | |
1561 | SBS_UNIT_VOLTAGE, | |
1562 | SBS_UNIT_CURRENT, | |
1563 | SBS_UNIT_ESR, | |
1564 | SBS_UNIT_PERCENT, | |
1565 | SBS_UNIT_CAPACITANCE | |
1566 | }; | |
1567 | ||
1568 | enum sbs_size { | |
1569 | SBS_SIZE_BYTE = 1, | |
1570 | SBS_SIZE_WORD, | |
1571 | SBS_SIZE_BLK, | |
1572 | }; | |
1573 | ||
1574 | struct sbs_cmd { | |
1575 | uint8_t cmd; | |
1576 | uint8_t size; | |
1577 | uint8_t unit; | |
1578 | uint8_t off; | |
1579 | uint16_t mask; | |
1580 | char *desc; | |
1581 | }; | |
1582 | ||
1583 | struct ssd_bm { | |
1584 | uint16_t temp; | |
1585 | uint16_t volt; | |
1586 | uint16_t curr; | |
1587 | uint16_t esr; | |
1588 | uint16_t rsoc; | |
1589 | uint16_t health; | |
1590 | uint16_t cap; | |
1591 | uint16_t chg_curr; | |
1592 | uint16_t chg_volt; | |
1593 | uint16_t cap_volt[SSD_BM_CAP]; | |
1594 | uint16_t sf_alert; | |
1595 | uint16_t sf_status; | |
1596 | uint16_t op_status; | |
1597 | uint16_t sys_volt; | |
1598 | }; | |
1599 | ||
1600 | struct ssd_bm_manufacturer_data | |
1601 | { | |
1602 | uint16_t pack_lot_code; | |
1603 | uint16_t pcb_lot_code; | |
1604 | uint16_t firmware_ver; | |
1605 | uint16_t hardware_ver; | |
1606 | }; | |
1607 | ||
1608 | struct ssd_bm_configuration_registers | |
1609 | { | |
1610 | struct { | |
1611 | uint16_t cc:3; | |
1612 | uint16_t rsvd:5; | |
1613 | uint16_t stack:1; | |
1614 | uint16_t rsvd1:2; | |
1615 | uint16_t temp:2; | |
1616 | uint16_t rsvd2:1; | |
1617 | uint16_t lt_en:1; | |
1618 | uint16_t rsvd3:1; | |
1619 | } operation_cfg; | |
1620 | uint16_t pad; | |
1621 | uint16_t fet_action; | |
1622 | uint16_t pad1; | |
1623 | uint16_t fault; | |
1624 | }; | |
1625 | ||
1626 | #define SBS_VALUE_MASK 0xffff | |
1627 | ||
1628 | #define bm_var_offset(var) ((size_t) &((struct ssd_bm *)0)->var) | |
1629 | #define bm_var(start, offset) ((void *) start + (offset)) | |
1630 | ||
1631 | static struct sbs_cmd ssd_bm_sbs[] = { | |
1632 | {0x08, SBS_SIZE_WORD, SBS_UNIT_TEMPERATURE, bm_var_offset(temp), SBS_VALUE_MASK, "Temperature"}, | |
1633 | {0x09, SBS_SIZE_WORD, SBS_UNIT_VOLTAGE, bm_var_offset(volt), SBS_VALUE_MASK, "Voltage"}, | |
1634 | {0x0a, SBS_SIZE_WORD, SBS_UNIT_CURRENT, bm_var_offset(curr), SBS_VALUE_MASK, "Current"}, | |
1635 | {0x0b, SBS_SIZE_WORD, SBS_UNIT_ESR, bm_var_offset(esr), SBS_VALUE_MASK, "ESR"}, | |
1636 | {0x0d, SBS_SIZE_BYTE, SBS_UNIT_PERCENT, bm_var_offset(rsoc), SBS_VALUE_MASK, "RelativeStateOfCharge"}, | |
1637 | {0x0e, SBS_SIZE_BYTE, SBS_UNIT_PERCENT, bm_var_offset(health), SBS_VALUE_MASK, "Health"}, | |
1638 | {0x10, SBS_SIZE_WORD, SBS_UNIT_CAPACITANCE, bm_var_offset(cap), SBS_VALUE_MASK, "Capacitance"}, | |
1639 | {0x14, SBS_SIZE_WORD, SBS_UNIT_CURRENT, bm_var_offset(chg_curr), SBS_VALUE_MASK, "ChargingCurrent"}, | |
1640 | {0x15, SBS_SIZE_WORD, SBS_UNIT_VOLTAGE, bm_var_offset(chg_volt), SBS_VALUE_MASK, "ChargingVoltage"}, | |
1641 | {0x3b, SBS_SIZE_WORD, SBS_UNIT_VOLTAGE, (uint8_t)bm_var_offset(cap_volt[4]), SBS_VALUE_MASK, "CapacitorVoltage5"}, | |
1642 | {0x3c, SBS_SIZE_WORD, SBS_UNIT_VOLTAGE, (uint8_t)bm_var_offset(cap_volt[3]), SBS_VALUE_MASK, "CapacitorVoltage4"}, | |
1643 | {0x3d, SBS_SIZE_WORD, SBS_UNIT_VOLTAGE, (uint8_t)bm_var_offset(cap_volt[2]), SBS_VALUE_MASK, "CapacitorVoltage3"}, | |
1644 | {0x3e, SBS_SIZE_WORD, SBS_UNIT_VOLTAGE, (uint8_t)bm_var_offset(cap_volt[1]), SBS_VALUE_MASK, "CapacitorVoltage2"}, | |
1645 | {0x3f, SBS_SIZE_WORD, SBS_UNIT_VOLTAGE, (uint8_t)bm_var_offset(cap_volt[0]), SBS_VALUE_MASK, "CapacitorVoltage1"}, | |
1646 | {0x50, SBS_SIZE_WORD, SBS_UNIT_VALUE, bm_var_offset(sf_alert), 0x870F, "SafetyAlert"}, | |
1647 | {0x51, SBS_SIZE_WORD, SBS_UNIT_VALUE, bm_var_offset(sf_status), 0xE7BF, "SafetyStatus"}, | |
1648 | {0x54, SBS_SIZE_WORD, SBS_UNIT_VALUE, bm_var_offset(op_status), 0x79F4, "OperationStatus"}, | |
1649 | {0x5a, SBS_SIZE_WORD, SBS_UNIT_VOLTAGE, bm_var_offset(sys_volt), SBS_VALUE_MASK, "SystemVoltage"}, | |
1650 | {0, 0, 0, 0, 0, NULL}, | |
1651 | }; | |
1652 | ||
1653 | /* ssd ioctl */ | |
1654 | #define SSD_CMD_GET_PROTOCOL_INFO _IOR('H', 100, struct ssd_protocol_info) | |
1655 | #define SSD_CMD_GET_HW_INFO _IOR('H', 101, struct ssd_hw_info) | |
1656 | #define SSD_CMD_GET_ROM_INFO _IOR('H', 102, struct ssd_rom_info) | |
1657 | #define SSD_CMD_GET_SMART _IOR('H', 103, struct ssd_smart) | |
1658 | #define SSD_CMD_GET_IDX _IOR('H', 105, int) | |
1659 | #define SSD_CMD_GET_AMOUNT _IOR('H', 106, int) | |
1660 | #define SSD_CMD_GET_TO_INFO _IOR('H', 107, int) | |
1661 | #define SSD_CMD_GET_DRV_VER _IOR('H', 108, char[DRIVER_VERSION_LEN]) | |
1662 | ||
1663 | #define SSD_CMD_GET_BBACC_INFO _IOR('H', 109, struct ssd_acc_info) | |
1664 | #define SSD_CMD_GET_ECACC_INFO _IOR('H', 110, struct ssd_acc_info) | |
1665 | ||
1666 | #define SSD_CMD_GET_HW_INFO_EXT _IOR('H', 111, struct ssd_hw_info_extend) | |
1667 | ||
1668 | #define SSD_CMD_REG_READ _IOWR('H', 120, struct ssd_reg_op_info) | |
1669 | #define SSD_CMD_REG_WRITE _IOWR('H', 121, struct ssd_reg_op_info) | |
1670 | ||
1671 | #define SSD_CMD_SPI_READ _IOWR('H', 125, struct ssd_spi_op_info) | |
1672 | #define SSD_CMD_SPI_WRITE _IOWR('H', 126, struct ssd_spi_op_info) | |
1673 | #define SSD_CMD_SPI_ERASE _IOWR('H', 127, struct ssd_spi_op_info) | |
1674 | ||
1675 | #define SSD_CMD_I2C_READ _IOWR('H', 128, struct ssd_i2c_op_info) | |
1676 | #define SSD_CMD_I2C_WRITE _IOWR('H', 129, struct ssd_i2c_op_info) | |
1677 | #define SSD_CMD_I2C_WRITE_READ _IOWR('H', 130, struct ssd_i2c_op_info) | |
1678 | ||
1679 | #define SSD_CMD_SMBUS_SEND_BYTE _IOWR('H', 131, struct ssd_smbus_op_info) | |
1680 | #define SSD_CMD_SMBUS_RECEIVE_BYTE _IOWR('H', 132, struct ssd_smbus_op_info) | |
1681 | #define SSD_CMD_SMBUS_WRITE_BYTE _IOWR('H', 133, struct ssd_smbus_op_info) | |
1682 | #define SSD_CMD_SMBUS_READ_BYTE _IOWR('H', 135, struct ssd_smbus_op_info) | |
1683 | #define SSD_CMD_SMBUS_WRITE_WORD _IOWR('H', 136, struct ssd_smbus_op_info) | |
1684 | #define SSD_CMD_SMBUS_READ_WORD _IOWR('H', 137, struct ssd_smbus_op_info) | |
1685 | #define SSD_CMD_SMBUS_WRITE_BLOCK _IOWR('H', 138, struct ssd_smbus_op_info) | |
1686 | #define SSD_CMD_SMBUS_READ_BLOCK _IOWR('H', 139, struct ssd_smbus_op_info) | |
1687 | ||
1688 | #define SSD_CMD_BM_GET_VER _IOR('H', 140, uint16_t) | |
1689 | #define SSD_CMD_BM_GET_NR_CAP _IOR('H', 141, int) | |
1690 | #define SSD_CMD_BM_CAP_LEARNING _IOW('H', 142, int) | |
1691 | #define SSD_CMD_CAP_LEARN _IOR('H', 143, uint32_t) | |
1692 | #define SSD_CMD_GET_CAP_STATUS _IOR('H', 144, int) | |
1693 | ||
1694 | #define SSD_CMD_RAM_READ _IOWR('H', 150, struct ssd_ram_op_info) | |
1695 | #define SSD_CMD_RAM_WRITE _IOWR('H', 151, struct ssd_ram_op_info) | |
1696 | ||
1697 | #define SSD_CMD_NAND_READ_ID _IOR('H', 160, struct ssd_flash_op_info) | |
1698 | #define SSD_CMD_NAND_READ _IOWR('H', 161, struct ssd_flash_op_info) //with oob | |
1699 | #define SSD_CMD_NAND_WRITE _IOWR('H', 162, struct ssd_flash_op_info) | |
1700 | #define SSD_CMD_NAND_ERASE _IOWR('H', 163, struct ssd_flash_op_info) | |
1701 | #define SSD_CMD_NAND_READ_EXT _IOWR('H', 164, struct ssd_flash_op_info) //ingore EIO | |
1702 | ||
1703 | #define SSD_CMD_UPDATE_BBT _IOW('H', 180, struct ssd_flash_op_info) | |
1704 | ||
1705 | #define SSD_CMD_CLEAR_ALARM _IOW('H', 190, int) | |
1706 | #define SSD_CMD_SET_ALARM _IOW('H', 191, int) | |
1707 | ||
1708 | #define SSD_CMD_RESET _IOW('H', 200, int) | |
1709 | #define SSD_CMD_RELOAD_FW _IOW('H', 201, int) | |
1710 | #define SSD_CMD_UNLOAD_DEV _IOW('H', 202, int) | |
1711 | #define SSD_CMD_LOAD_DEV _IOW('H', 203, int) | |
1712 | #define SSD_CMD_UPDATE_VP _IOWR('H', 205, uint32_t) | |
1713 | #define SSD_CMD_FULL_RESET _IOW('H', 206, int) | |
1714 | ||
1715 | #define SSD_CMD_GET_NR_LOG _IOR('H', 220, uint32_t) | |
1716 | #define SSD_CMD_GET_LOG _IOR('H', 221, void *) | |
1717 | #define SSD_CMD_LOG_LEVEL _IOW('H', 222, int) | |
1718 | ||
1719 | #define SSD_CMD_OT_PROTECT _IOW('H', 223, int) | |
1720 | #define SSD_CMD_GET_OT_STATUS _IOR('H', 224, int) | |
1721 | ||
1722 | #define SSD_CMD_CLEAR_LOG _IOW('H', 230, int) | |
1723 | #define SSD_CMD_CLEAR_SMART _IOW('H', 231, int) | |
1724 | ||
1725 | #define SSD_CMD_SW_LOG _IOW('H', 232, struct ssd_sw_log_info) | |
1726 | ||
1727 | #define SSD_CMD_GET_LABEL _IOR('H', 235, struct ssd_label) | |
1728 | #define SSD_CMD_GET_VERSION _IOR('H', 236, struct ssd_version_info) | |
1729 | #define SSD_CMD_GET_TEMPERATURE _IOR('H', 237, int) | |
1730 | #define SSD_CMD_GET_BMSTATUS _IOR('H', 238, int) | |
1731 | #define SSD_CMD_GET_LABEL2 _IOR('H', 239, void *) | |
1732 | ||
1733 | ||
1734 | #define SSD_CMD_FLUSH _IOW('H', 240, int) | |
1735 | #define SSD_CMD_SAVE_MD _IOW('H', 241, int) | |
1736 | ||
1737 | #define SSD_CMD_SET_WMODE _IOW('H', 242, int) | |
1738 | #define SSD_CMD_GET_WMODE _IOR('H', 243, int) | |
1739 | #define SSD_CMD_GET_USER_WMODE _IOR('H', 244, int) | |
1740 | ||
1741 | #define SSD_CMD_DEBUG _IOW('H', 250, struct ssd_debug_info) | |
1742 | #define SSD_CMD_DRV_PARAM_INFO _IOR('H', 251, struct ssd_drv_param_info) | |
1743 | ||
1197134c KM |
1744 | #define SSD_CMD_CLEAR_WARNING _IOW('H', 260, int) |
1745 | ||
361ebed5 HSDT |
1746 | |
1747 | /* log */ | |
1748 | #define SSD_LOG_MAX_SZ 4096 | |
1749 | #define SSD_LOG_LEVEL SSD_LOG_LEVEL_NOTICE | |
da3355df | 1750 | #define SSD_DIF_WITH_OLD_LOG 0x3f |
361ebed5 HSDT |
1751 | |
1752 | enum ssd_log_data | |
1753 | { | |
1754 | SSD_LOG_DATA_NONE = 0, | |
1755 | SSD_LOG_DATA_LOC, | |
1756 | SSD_LOG_DATA_HEX | |
1757 | }; | |
1758 | ||
1759 | typedef struct ssd_log_entry | |
1760 | { | |
1761 | union { | |
1762 | struct { | |
1763 | uint32_t page:10; | |
1764 | uint32_t block:14; | |
1765 | uint32_t flash:8; | |
1766 | } loc; | |
1767 | struct { | |
1768 | uint32_t page:12; | |
1769 | uint32_t block:12; | |
1770 | uint32_t flash:8; | |
1771 | } loc1; | |
1772 | uint32_t val; | |
1773 | } data; | |
1774 | uint16_t event:10; | |
1775 | uint16_t mod:6; | |
1776 | uint16_t idx; | |
1777 | }__attribute__((packed))ssd_log_entry_t; | |
1778 | ||
1779 | typedef struct ssd_log | |
1780 | { | |
1781 | uint64_t time:56; | |
1782 | uint64_t ctrl_idx:8; | |
1783 | ssd_log_entry_t le; | |
1784 | } __attribute__((packed)) ssd_log_t; | |
1785 | ||
1786 | typedef struct ssd_log_desc | |
1787 | { | |
1788 | uint16_t event; | |
1789 | uint8_t level; | |
1790 | uint8_t data; | |
1791 | uint8_t sblock; | |
1792 | uint8_t spage; | |
1793 | char *desc; | |
1794 | } __attribute__((packed)) ssd_log_desc_t; | |
1795 | ||
1796 | #define SSD_LOG_SW_IDX 0xF | |
1797 | #define SSD_UNKNOWN_EVENT ((uint16_t)-1) | |
1798 | static struct ssd_log_desc ssd_log_desc[] = { | |
1799 | /* event, level, show flash, show block, show page, desc */ | |
1800 | {0x0, SSD_LOG_LEVEL_WARNING, SSD_LOG_DATA_LOC, 0, 0, "Create BBT failure"}, //g3 | |
1801 | {0x1, SSD_LOG_LEVEL_WARNING, SSD_LOG_DATA_LOC, 0, 0, "Read BBT failure"}, //g3 | |
1802 | {0x2, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 0, "Mark bad block"}, | |
1803 | {0x3, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 0, 0, "Flush BBT failure"}, | |
1804 | {0x4, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Program failure"}, | |
1805 | {0x7, SSD_LOG_LEVEL_ERR, SSD_LOG_DATA_LOC, 1, 1, "No available blocks"}, | |
1806 | {0x8, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 0, "Bad EC header"}, | |
1807 | {0x9, SSD_LOG_LEVEL_WARNING, SSD_LOG_DATA_LOC, 1, 0, "Bad VID header"}, //g3 | |
1808 | {0xa, SSD_LOG_LEVEL_INFO, SSD_LOG_DATA_LOC, 1, 0, "Wear leveling"}, | |
1809 | {0xb, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "WL read back failure"}, | |
1810 | {0x11, SSD_LOG_LEVEL_ERR, SSD_LOG_DATA_LOC, 1, 1, "Data recovery failure"}, // err | |
1811 | {0x20, SSD_LOG_LEVEL_ERR, SSD_LOG_DATA_LOC, 1, 1, "Init: scan mapping table failure"}, // err g3 | |
1812 | {0x21, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Program failure"}, | |
1813 | {0x22, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Program failure"}, | |
1814 | {0x23, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Program failure"}, | |
1815 | {0x24, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 0, "Merge: read mapping page failure"}, | |
1816 | {0x25, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Merge: read back failure"}, | |
1817 | {0x26, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Program failure"}, | |
1818 | {0x27, SSD_LOG_LEVEL_WARNING, SSD_LOG_DATA_LOC, 1, 1, "Data corrupted for abnormal power down"}, //g3 | |
1819 | {0x28, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Merge: mapping page corrupted"}, | |
1820 | {0x29, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 0, "Init: no mapping page"}, | |
1821 | {0x2a, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Init: mapping pages incomplete"}, | |
1822 | {0x2b, SSD_LOG_LEVEL_ERR, SSD_LOG_DATA_LOC, 1, 1, "Read back failure after programming failure"}, // err | |
1823 | {0xf1, SSD_LOG_LEVEL_ERR, SSD_LOG_DATA_LOC, 1, 1, "Read failure without recovery"}, // err | |
1824 | {0xf2, SSD_LOG_LEVEL_ERR, SSD_LOG_DATA_LOC, 0, 0, "No available blocks"}, // maybe err g3 | |
1825 | {0xf3, SSD_LOG_LEVEL_ERR, SSD_LOG_DATA_LOC, 1, 0, "Init: RAID incomplete"}, // err g3 | |
1826 | {0xf4, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Program failure"}, | |
1827 | {0xf5, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Read failure in moving data"}, | |
1828 | {0xf6, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Program failure"}, | |
1829 | {0xf7, SSD_LOG_LEVEL_WARNING, SSD_LOG_DATA_LOC, 1, 1, "Init: RAID not complete"}, | |
1830 | {0xf8, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 0, "Init: data moving interrupted"}, | |
da3355df | 1831 | {0xfe, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Data inspection failure"}, |
361ebed5 HSDT |
1832 | {0xff, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "IO: ECC failed"}, |
1833 | ||
1834 | /* new */ | |
1835 | {0x2e, SSD_LOG_LEVEL_ERR, SSD_LOG_DATA_LOC, 0, 0, "No available reserved blocks" }, // err | |
1836 | {0x30, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 0, 0, "Init: PMT membership not found"}, | |
1837 | {0x31, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_HEX, 0, 0, "Init: PMT corrupted"}, | |
1838 | {0x32, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 0, 0, "Init: PBT membership not found"}, | |
1839 | {0x33, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 0, 0, "Init: PBT not found"}, | |
1840 | {0x34, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 0, 0, "Init: PBT corrupted"}, | |
1841 | {0x35, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Init: PMT page read failure"}, | |
1842 | {0x36, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Init: PBT page read failure"}, | |
1843 | {0x37, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Init: PBT backup page read failure"}, | |
1844 | {0x38, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Init: PBMT read failure"}, | |
1845 | {0x39, SSD_LOG_LEVEL_ERR, SSD_LOG_DATA_LOC, 1, 1, "Init: PBMT scan failure"}, // err | |
1846 | {0x3a, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Init: first page read failure"}, | |
1847 | {0x3b, SSD_LOG_LEVEL_ERR, SSD_LOG_DATA_LOC, 1, 1, "Init: first page scan failure"}, // err | |
1848 | {0x3c, SSD_LOG_LEVEL_ERR, SSD_LOG_DATA_LOC, 1, 1, "Init: scan unclosed block failure"}, // err | |
1849 | {0x3d, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Init: write pointer mismatch"}, | |
1850 | {0x3e, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Init: PMT recovery: PBMT read failure"}, | |
1851 | {0x3f, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 0, "Init: PMT recovery: PBMT scan failure"}, | |
1852 | {0x40, SSD_LOG_LEVEL_ERR, SSD_LOG_DATA_LOC, 1, 1, "Init: PMT recovery: data page read failure"}, //err | |
1853 | {0x41, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Init: PBT write pointer mismatch"}, | |
1854 | {0x42, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Init: PBT latest version corrupted"}, | |
1855 | {0x43, SSD_LOG_LEVEL_ERR, SSD_LOG_DATA_LOC, 1, 0, "Init: too many unclosed blocks"}, | |
1856 | {0x44, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_HEX, 0, 0, "Init: PDW block found"}, | |
1857 | {0x45, SSD_LOG_LEVEL_ERR, SSD_LOG_DATA_HEX, 0, 0, "Init: more than one PDW block found"}, //err | |
1858 | {0x46, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Init: first page is blank or read failure"}, | |
1859 | {0x47, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 0, 0, "Init: PDW block not found"}, | |
1860 | ||
1861 | {0x50, SSD_LOG_LEVEL_ERR, SSD_LOG_DATA_LOC, 1, 0, "Cache: hit error data"}, // err | |
1862 | {0x51, SSD_LOG_LEVEL_ERR, SSD_LOG_DATA_LOC, 1, 0, "Cache: read back failure"}, // err | |
1863 | {0x52, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_NONE, 0, 0, "Cache: unknown command"}, //? | |
1864 | {0x53, SSD_LOG_LEVEL_ERR, SSD_LOG_DATA_LOC, 1, 1, "GC/WL read back failure"}, // err | |
1865 | ||
1866 | {0x60, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 0, "Erase failure"}, | |
1867 | ||
1868 | {0x70, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "LPA not matched"}, | |
1869 | {0x71, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "PBN not matched"}, | |
1870 | {0x72, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Read retry failure"}, | |
1871 | {0x73, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Need raid recovery"}, | |
1872 | {0x74, SSD_LOG_LEVEL_INFO, SSD_LOG_DATA_LOC, 1, 1, "Need read retry"}, | |
1873 | {0x75, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Read invalid data page"}, | |
1874 | {0x76, SSD_LOG_LEVEL_INFO, SSD_LOG_DATA_LOC, 1, 1, "ECC error, data in cache, PBN matched"}, | |
1875 | {0x77, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "ECC error, data in cache, PBN not matched"}, | |
1876 | {0x78, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "ECC error, data in flash, PBN not matched"}, | |
1877 | {0x79, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "ECC ok, data in cache, LPA not matched"}, | |
1878 | {0x7a, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "ECC ok, data in flash, LPA not matched"}, | |
1879 | {0x7b, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "RAID data in cache, LPA not matched"}, | |
1880 | {0x7c, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "RAID data in flash, LPA not matched"}, | |
1881 | {0x7d, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Read data page status error"}, | |
1882 | {0x7e, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Read blank page"}, | |
1883 | {0x7f, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Access flash timeout"}, | |
1884 | ||
1885 | {0x80, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 0, "EC overflow"}, | |
1886 | {0x81, SSD_LOG_LEVEL_INFO, SSD_LOG_DATA_NONE, 0, 0, "Scrubbing completed"}, | |
1887 | {0x82, SSD_LOG_LEVEL_INFO, SSD_LOG_DATA_LOC, 1, 0, "Unstable block(too much bit flip)"}, | |
1888 | {0x83, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 0, "GC: ram error"}, //? | |
1889 | {0x84, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 0, "GC: one PBMT read failure"}, | |
1890 | ||
1891 | {0x88, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 0, "GC: mark bad block"}, | |
1892 | {0x89, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 0, "GC: invalid page count error"}, // maybe err | |
1893 | {0x8a, SSD_LOG_LEVEL_WARNING, SSD_LOG_DATA_NONE, 0, 0, "Warning: Bad Block close to limit"}, | |
1894 | {0x8b, SSD_LOG_LEVEL_ERR, SSD_LOG_DATA_NONE, 0, 0, "Error: Bad Block over limit"}, | |
1895 | {0x8c, SSD_LOG_LEVEL_WARNING, SSD_LOG_DATA_NONE, 0, 0, "Warning: P/E cycles close to limit"}, | |
1896 | {0x8d, SSD_LOG_LEVEL_ERR, SSD_LOG_DATA_NONE, 0, 0, "Error: P/E cycles over limit"}, | |
1897 | ||
1197134c KM |
1898 | {0x90, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_NONE, 0, 0, "Warning: Over temperature"}, //90 |
1899 | {0x91, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_NONE, 0, 0, "Info: Temperature is OK"}, //80 | |
361ebed5 HSDT |
1900 | {0x92, SSD_LOG_LEVEL_WARNING, SSD_LOG_DATA_NONE, 0, 0, "Battery fault"}, |
1901 | {0x93, SSD_LOG_LEVEL_WARNING, SSD_LOG_DATA_NONE, 0, 0, "SEU fault"}, //err | |
1902 | {0x94, SSD_LOG_LEVEL_ERR, SSD_LOG_DATA_NONE, 0, 0, "DDR error"}, //err | |
1903 | {0x95, SSD_LOG_LEVEL_ERR, SSD_LOG_DATA_NONE, 0, 0, "Controller serdes error"}, //err | |
1904 | {0x96, SSD_LOG_LEVEL_ERR, SSD_LOG_DATA_NONE, 0, 0, "Bridge serdes 1 error"}, //err | |
1905 | {0x97, SSD_LOG_LEVEL_ERR, SSD_LOG_DATA_NONE, 0, 0, "Bridge serdes 2 error"}, //err | |
1906 | {0x98, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_NONE, 0, 0, "SEU fault (corrected)"}, //err | |
1907 | {0x99, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_NONE, 0, 0, "Battery is OK"}, | |
1197134c | 1908 | {0x9a, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_NONE, 0, 0, "Info: Temperature close to limit"}, //85 |
361ebed5 HSDT |
1909 | |
1910 | {0x9b, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_HEX, 0, 0, "SEU fault address (low)"}, | |
1911 | {0x9c, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_HEX, 0, 0, "SEU fault address (high)"}, | |
1912 | {0x9d, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_NONE, 0, 0, "I2C fault" }, | |
1913 | {0x9e, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_NONE, 0, 0, "DDR single bit error" }, | |
1914 | {0x9f, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_NONE, 0, 0, "Board voltage fault" }, | |
1915 | ||
1916 | {0xa0, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_HEX, 0, 0, "LPA not matched"}, | |
1917 | {0xa1, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Re-read data in cache"}, | |
1918 | {0xa2, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Read blank page"}, | |
1919 | {0xa3, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "RAID recovery: Read blank page"}, | |
1920 | {0xa4, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "RAID recovery: new data in cache"}, | |
1921 | {0xa5, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "RAID recovery: PBN not matched"}, | |
1922 | {0xa6, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Read data with error flag"}, | |
1923 | {0xa7, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "RAID recovery: recoverd data with error flag"}, | |
1924 | {0xa8, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Blank page in cache, PBN matched"}, | |
1925 | {0xa9, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "RAID recovery: Blank page in cache, PBN matched"}, | |
1926 | {0xaa, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 0, 0, "Flash init failure"}, | |
1927 | {0xab, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "Mapping table recovery failure"}, | |
1928 | {0xac, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_LOC, 1, 1, "RAID recovery: ECC failed"}, | |
da3355df SF |
1929 | {0xb0, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_NONE, 0, 0, "Warning: Temperature is 95 degrees C"}, |
1930 | {0xb1, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_NONE, 0, 0, "Warning: Temperature is 100 degrees C"}, | |
361ebed5 HSDT |
1931 | |
1932 | {0x300, SSD_LOG_LEVEL_ERR, SSD_LOG_DATA_HEX, 0, 0, "CMD timeout"}, | |
1933 | {0x301, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_HEX, 0, 0, "Power on"}, | |
1934 | {0x302, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_NONE, 0, 0, "Power off"}, | |
1935 | {0x303, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_NONE, 0, 0, "Clear log"}, | |
1936 | {0x304, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_HEX, 0, 0, "Set capacity"}, | |
1937 | {0x305, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_NONE, 0, 0, "Clear data"}, | |
1938 | {0x306, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_HEX, 0, 0, "BM safety status"}, | |
1939 | {0x307, SSD_LOG_LEVEL_ERR, SSD_LOG_DATA_HEX, 0, 0, "I/O error"}, | |
1940 | {0x308, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_HEX, 0, 0, "CMD error"}, | |
1941 | {0x309, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_HEX, 0, 0, "Set wmode"}, | |
1942 | {0x30a, SSD_LOG_LEVEL_ERR, SSD_LOG_DATA_HEX, 0, 0, "DDR init failed" }, | |
1943 | {0x30b, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_HEX, 0, 0, "PCIe link status" }, | |
1944 | {0x30c, SSD_LOG_LEVEL_ERR, SSD_LOG_DATA_HEX, 0, 0, "Controller reset sync error" }, | |
1945 | {0x30d, SSD_LOG_LEVEL_ERR, SSD_LOG_DATA_HEX, 0, 0, "Clock fault" }, | |
1946 | {0x30e, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_HEX, 0, 0, "FPGA voltage fault status" }, | |
1947 | {0x30f, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_HEX, 0, 0, "Set capacity finished"}, | |
1948 | {0x310, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_NONE, 0, 0, "Clear data finished"}, | |
1949 | {0x311, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_HEX, 0, 0, "Reset"}, | |
1950 | {0x312, SSD_LOG_LEVEL_WARNING,SSD_LOG_DATA_HEX, 0, 0, "CAP: voltage fault"}, | |
1951 | {0x313, SSD_LOG_LEVEL_WARNING,SSD_LOG_DATA_NONE, 0, 0, "CAP: learn fault"}, | |
1952 | {0x314, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_HEX, 0, 0, "CAP status"}, | |
1953 | {0x315, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_HEX, 0, 0, "Board voltage fault status"}, | |
da3355df SF |
1954 | {0x316, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_NONE, 0, 0, "Info: Inlet temperature is 55 degrees C"}, //55 |
1955 | {0x317, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_NONE, 0, 0, "Info: Inlet temperature is 50 degrees C"}, //50 | |
1197134c KM |
1956 | {0x318, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_NONE, 0, 0, "Info: Flash over temperature"}, //70 |
1957 | {0x319, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_NONE, 0, 0, "Info: Flash temperature is OK"}, //65 | |
361ebed5 HSDT |
1958 | {0x31a, SSD_LOG_LEVEL_WARNING,SSD_LOG_DATA_NONE, 0, 0, "CAP: short circuit"}, |
1959 | {0x31b, SSD_LOG_LEVEL_WARNING,SSD_LOG_DATA_HEX, 0, 0, "Sensor fault"}, | |
1960 | {0x31c, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_NONE, 0, 0, "Erase all data"}, | |
1961 | {0x31d, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_NONE, 0, 0, "Erase all data finished"}, | |
da3355df SF |
1962 | {0x320, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_HEX, 0, 0, "Temperature sensor event"}, |
1963 | ||
1964 | {0x350, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_NONE, 0, 0, "Clear smart"}, | |
1965 | {0x351, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_NONE, 0, 0, "Clear warning"}, | |
361ebed5 HSDT |
1966 | |
1967 | {SSD_UNKNOWN_EVENT, SSD_LOG_LEVEL_NOTICE, SSD_LOG_DATA_HEX, 0, 0, "unknown event"}, | |
1968 | }; | |
1969 | /* */ | |
1970 | #define SSD_LOG_OVER_TEMP 0x90 | |
1971 | #define SSD_LOG_NORMAL_TEMP 0x91 | |
1972 | #define SSD_LOG_WARN_TEMP 0x9a | |
1973 | #define SSD_LOG_SEU_FAULT 0x93 | |
1974 | #define SSD_LOG_SEU_FAULT1 0x98 | |
1975 | #define SSD_LOG_BATTERY_FAULT 0x92 | |
1976 | #define SSD_LOG_BATTERY_OK 0x99 | |
1977 | #define SSD_LOG_BOARD_VOLT_FAULT 0x9f | |
1978 | ||
1979 | /* software log */ | |
1980 | #define SSD_LOG_TIMEOUT 0x300 | |
1981 | #define SSD_LOG_POWER_ON 0x301 | |
1982 | #define SSD_LOG_POWER_OFF 0x302 | |
1983 | #define SSD_LOG_CLEAR_LOG 0x303 | |
1984 | #define SSD_LOG_SET_CAPACITY 0x304 | |
1985 | #define SSD_LOG_CLEAR_DATA 0x305 | |
1986 | #define SSD_LOG_BM_SFSTATUS 0x306 | |
1987 | #define SSD_LOG_EIO 0x307 | |
1988 | #define SSD_LOG_ECMD 0x308 | |
1989 | #define SSD_LOG_SET_WMODE 0x309 | |
1990 | #define SSD_LOG_DDR_INIT_ERR 0x30a | |
1991 | #define SSD_LOG_PCIE_LINK_STATUS 0x30b | |
1992 | #define SSD_LOG_CTRL_RST_SYNC 0x30c | |
1993 | #define SSD_LOG_CLK_FAULT 0x30d | |
1994 | #define SSD_LOG_VOLT_FAULT 0x30e | |
1995 | #define SSD_LOG_SET_CAPACITY_END 0x30F | |
1996 | #define SSD_LOG_CLEAR_DATA_END 0x310 | |
1997 | #define SSD_LOG_RESET 0x311 | |
1998 | #define SSD_LOG_CAP_VOLT_FAULT 0x312 | |
1999 | #define SSD_LOG_CAP_LEARN_FAULT 0x313 | |
2000 | #define SSD_LOG_CAP_STATUS 0x314 | |
2001 | #define SSD_LOG_VOLT_STATUS 0x315 | |
2002 | #define SSD_LOG_INLET_OVER_TEMP 0x316 | |
2003 | #define SSD_LOG_INLET_NORMAL_TEMP 0x317 | |
2004 | #define SSD_LOG_FLASH_OVER_TEMP 0x318 | |
2005 | #define SSD_LOG_FLASH_NORMAL_TEMP 0x319 | |
2006 | #define SSD_LOG_CAP_SHORT_CIRCUIT 0x31a | |
2007 | #define SSD_LOG_SENSOR_FAULT 0x31b | |
2008 | #define SSD_LOG_ERASE_ALL 0x31c | |
2009 | #define SSD_LOG_ERASE_ALL_END 0x31d | |
da3355df SF |
2010 | #define SSD_LOG_TEMP_SENSOR_EVENT 0x320 |
2011 | #define SSD_LOG_CLEAR_SMART 0x350 | |
2012 | #define SSD_LOG_CLEAR_WARNING 0x351 | |
361ebed5 HSDT |
2013 | |
2014 | ||
2015 | /* sw log fifo depth */ | |
2016 | #define SSD_LOG_FIFO_SZ 1024 | |
2017 | ||
2018 | ||
2019 | /* done queue */ | |
2020 | static DEFINE_PER_CPU(struct list_head, ssd_doneq); | |
2021 | static DEFINE_PER_CPU(struct tasklet_struct, ssd_tasklet); | |
2022 | ||
2023 | ||
2024 | /* unloading driver */ | |
2025 | static volatile int ssd_exiting = 0; | |
2026 | ||
2027 | #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,12)) | |
2028 | static struct class_simple *ssd_class; | |
2029 | #else | |
2030 | static struct class *ssd_class; | |
2031 | #endif | |
2032 | ||
2033 | static int ssd_cmajor = SSD_CMAJOR; | |
2034 | ||
2035 | /* ssd block device major, minors */ | |
2036 | static int ssd_major = SSD_MAJOR; | |
2037 | static int ssd_major_sl = SSD_MAJOR_SL; | |
2038 | static int ssd_minors = SSD_MINORS; | |
2039 | ||
2040 | /* ssd device list */ | |
2041 | static struct list_head ssd_list; | |
2042 | static unsigned long ssd_index_bits[SSD_MAX_DEV / BITS_PER_LONG + 1]; | |
2043 | static unsigned long ssd_index_bits_sl[SSD_MAX_DEV / BITS_PER_LONG + 1]; | |
2044 | static atomic_t ssd_nr; | |
2045 | ||
2046 | /* module param */ | |
2047 | enum ssd_drv_mode | |
2048 | { | |
2049 | SSD_DRV_MODE_STANDARD = 0, /* full */ | |
2050 | SSD_DRV_MODE_DEBUG = 2, /* debug */ | |
2051 | SSD_DRV_MODE_BASE /* base only */ | |
2052 | }; | |
2053 | ||
2054 | enum ssd_int_mode | |
2055 | { | |
2056 | SSD_INT_LEGACY = 0, | |
2057 | SSD_INT_MSI, | |
2058 | SSD_INT_MSIX | |
2059 | }; | |
2060 | ||
2061 | #if (defined SSD_MSIX) | |
2062 | #define SSD_INT_MODE_DEFAULT SSD_INT_MSIX | |
2063 | #elif (defined SSD_MSI) | |
2064 | #define SSD_INT_MODE_DEFAULT SSD_INT_MSI | |
2065 | #else | |
2066 | /* auto select the defaut int mode according to the kernel version*/ | |
2067 | /* suse 11 sp1 irqbalance bug: use msi instead*/ | |
2068 | #if ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) || (defined RHEL_MAJOR && RHEL_MAJOR >= 6) || (defined RHEL_MAJOR && RHEL_MAJOR == 5 && RHEL_MINOR >= 5)) | |
2069 | #define SSD_INT_MODE_DEFAULT SSD_INT_MSIX | |
2070 | #else | |
2071 | #define SSD_INT_MODE_DEFAULT SSD_INT_MSI | |
2072 | #endif | |
2073 | #endif | |
2074 | ||
2075 | static int mode = SSD_DRV_MODE_STANDARD; | |
2076 | static int status_mask = 0xFF; | |
2077 | static int int_mode = SSD_INT_MODE_DEFAULT; | |
2078 | static int threaded_irq = 0; | |
2079 | static int log_level = SSD_LOG_LEVEL_WARNING; | |
2080 | static int ot_protect = 1; | |
2081 | static int wmode = SSD_WMODE_DEFAULT; | |
2082 | static int finject = 0; | |
2083 | ||
2084 | module_param(mode, int, 0); | |
2085 | module_param(status_mask, int, 0); | |
2086 | module_param(int_mode, int, 0); | |
2087 | module_param(threaded_irq, int, 0); | |
2088 | module_param(log_level, int, 0); | |
2089 | module_param(ot_protect, int, 0); | |
2090 | module_param(wmode, int, 0); | |
2091 | module_param(finject, int, 0); | |
2092 | ||
2093 | ||
2094 | MODULE_PARM_DESC(mode, "driver mode, 0 - standard, 1 - debug, 2 - debug without IO, 3 - basic debug mode"); | |
2095 | MODULE_PARM_DESC(status_mask, "command status mask, 0 - without command error, 0xff - with command error"); | |
2096 | MODULE_PARM_DESC(int_mode, "preferred interrupt mode, 0 - legacy, 1 - msi, 2 - msix"); | |
2097 | MODULE_PARM_DESC(threaded_irq, "threaded irq, 0 - normal irq, 1 - threaded irq"); | |
2098 | MODULE_PARM_DESC(log_level, "log level to display, 0 - info and above, 1 - notice and above, 2 - warning and above, 3 - error only"); | |
2099 | MODULE_PARM_DESC(ot_protect, "over temperature protect, 0 - disable, 1 - enable"); | |
2100 | MODULE_PARM_DESC(wmode, "write mode, 0 - write buffer (with risk for the 6xx firmware), 1 - write buffer ex, 2 - write through, 3 - auto, 4 - default"); | |
2101 | MODULE_PARM_DESC(finject, "enable fault simulation, 0 - off, 1 - on, for debug purpose only"); | |
2102 | ||
1197134c KM |
2103 | // API adaption layer |
2104 | static inline void ssd_bio_endio(struct bio *bio, int error) | |
2105 | { | |
2106 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,0)) | |
91557e4a | 2107 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(4,13,0)) |
1197134c | 2108 | bio->bi_error = error; |
91557e4a SF |
2109 | #else |
2110 | bio->bi_status = errno_to_blk_status(error); | |
2111 | #endif | |
1197134c KM |
2112 | bio_endio(bio); |
2113 | #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)) | |
2114 | bio_endio(bio, error); | |
2115 | #else | |
2116 | bio_endio(bio, bio->bi_size, error); | |
2117 | #endif | |
2118 | } | |
2119 | ||
2120 | static inline int ssd_bio_has_discard(struct bio *bio) | |
2121 | { | |
2122 | #ifndef SSD_TRIM | |
2123 | return 0; | |
2124 | #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(4,8,0)) | |
0f07eebb | 2125 | return bio_op(bio) == REQ_OP_DISCARD; |
1197134c KM |
2126 | #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36)) |
2127 | return bio->bi_rw & REQ_DISCARD; | |
2128 | #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32)) | |
2129 | return bio_rw_flagged(bio, BIO_RW_DISCARD); | |
2130 | #else | |
2131 | return 0; | |
2132 | #endif | |
2133 | } | |
2134 | ||
2135 | static inline int ssd_bio_has_flush(struct bio *bio) | |
2136 | { | |
2137 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,8,0)) | |
0f07eebb | 2138 | return bio_op(bio) == REQ_OP_FLUSH; |
1197134c KM |
2139 | #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37)) |
2140 | return bio->bi_rw & REQ_FLUSH; | |
2141 | #else | |
2142 | return 0; | |
2143 | #endif | |
2144 | } | |
2145 | ||
da3355df | 2146 | static inline int ssd_bio_has_barrier_or_fua(struct bio * bio) |
1197134c KM |
2147 | { |
2148 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,8,0)) | |
2149 | return bio->bi_opf & REQ_FUA; | |
da3355df | 2150 | #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37)) |
1197134c | 2151 | return bio->bi_rw & REQ_FUA; |
da3355df SF |
2152 | #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,36)) |
2153 | return bio->bi_rw & REQ_HARDBARRIER; | |
2154 | #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32)) | |
2155 | return bio_rw_flagged(bio, BIO_RW_BARRIER); | |
2156 | #else | |
2157 | return bio_barrier(bio); | |
1197134c KM |
2158 | #endif |
2159 | } | |
361ebed5 HSDT |
2160 | |
2161 | #ifndef MODULE | |
2162 | static int __init ssd_drv_mode(char *str) | |
2163 | { | |
2164 | mode = (int)simple_strtoul(str, NULL, 0); | |
2165 | ||
2166 | return 1; | |
2167 | } | |
2168 | ||
2169 | static int __init ssd_status_mask(char *str) | |
2170 | { | |
2171 | status_mask = (int)simple_strtoul(str, NULL, 16); | |
2172 | ||
2173 | return 1; | |
2174 | } | |
2175 | ||
2176 | static int __init ssd_int_mode(char *str) | |
2177 | { | |
2178 | int_mode = (int)simple_strtoul(str, NULL, 0); | |
2179 | ||
2180 | return 1; | |
2181 | } | |
2182 | ||
2183 | static int __init ssd_threaded_irq(char *str) | |
2184 | { | |
2185 | threaded_irq = (int)simple_strtoul(str, NULL, 0); | |
2186 | ||
2187 | return 1; | |
2188 | } | |
2189 | ||
2190 | static int __init ssd_log_level(char *str) | |
2191 | { | |
2192 | log_level = (int)simple_strtoul(str, NULL, 0); | |
2193 | ||
2194 | return 1; | |
2195 | } | |
2196 | ||
2197 | static int __init ssd_ot_protect(char *str) | |
2198 | { | |
2199 | ot_protect = (int)simple_strtoul(str, NULL, 0); | |
2200 | ||
2201 | return 1; | |
2202 | } | |
2203 | ||
2204 | static int __init ssd_wmode(char *str) | |
2205 | { | |
2206 | wmode = (int)simple_strtoul(str, NULL, 0); | |
2207 | ||
2208 | return 1; | |
2209 | } | |
2210 | ||
2211 | static int __init ssd_finject(char *str) | |
2212 | { | |
2213 | finject = (int)simple_strtoul(str, NULL, 0); | |
2214 | ||
2215 | return 1; | |
2216 | } | |
2217 | ||
2218 | __setup(MODULE_NAME"_mode=", ssd_drv_mode); | |
2219 | __setup(MODULE_NAME"_status_mask=", ssd_status_mask); | |
2220 | __setup(MODULE_NAME"_int_mode=", ssd_int_mode); | |
2221 | __setup(MODULE_NAME"_threaded_irq=", ssd_threaded_irq); | |
2222 | __setup(MODULE_NAME"_log_level=", ssd_log_level); | |
2223 | __setup(MODULE_NAME"_ot_protect=", ssd_ot_protect); | |
2224 | __setup(MODULE_NAME"_wmode=", ssd_wmode); | |
2225 | __setup(MODULE_NAME"_finject=", ssd_finject); | |
2226 | #endif | |
2227 | ||
2228 | ||
2229 | #ifdef CONFIG_PROC_FS | |
2230 | #include <linux/proc_fs.h> | |
2231 | #include <asm/uaccess.h> | |
2232 | ||
2233 | #define SSD_PROC_DIR MODULE_NAME | |
2234 | #define SSD_PROC_INFO "info" | |
2235 | ||
2236 | static struct proc_dir_entry *ssd_proc_dir = NULL; | |
2237 | static struct proc_dir_entry *ssd_proc_info = NULL; | |
2238 | ||
2239 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,2,0)) | |
2240 | static int ssd_proc_read(char *page, char **start, | |
2241 | off_t off, int count, int *eof, void *data) | |
2242 | { | |
2243 | struct ssd_device *dev = NULL; | |
2244 | struct ssd_device *n = NULL; | |
2245 | uint64_t size; | |
2246 | int idx; | |
2247 | int len = 0; | |
2248 | //char type; //xx | |
2249 | ||
1197134c | 2250 | if (ssd_exiting || off != 0) { |
361ebed5 HSDT |
2251 | return 0; |
2252 | } | |
2253 | ||
2254 | len += snprintf((page + len), (count - len), "Driver Version:\t%s\n", DRIVER_VERSION); | |
2255 | ||
2256 | list_for_each_entry_safe(dev, n, &ssd_list, list) { | |
2257 | idx = dev->idx + 1; | |
2258 | size = dev->hw_info.size ; | |
2259 | do_div(size, 1000000000); | |
2260 | ||
2261 | len += snprintf((page + len), (count - len), "\n"); | |
2262 | ||
2263 | len += snprintf((page + len), (count - len), "HIO %d Size:\t%uGB\n", idx, (uint32_t)size); | |
2264 | ||
2265 | len += snprintf((page + len), (count - len), "HIO %d Bridge FW VER:\t%03X\n", idx, dev->hw_info.bridge_ver); | |
2266 | if (dev->hw_info.ctrl_ver != 0) { | |
2267 | len += snprintf((page + len), (count - len), "HIO %d Controller FW VER:\t%03X\n", idx, dev->hw_info.ctrl_ver); | |
2268 | } | |
2269 | ||
2270 | len += snprintf((page + len), (count - len), "HIO %d PCB VER:\t.%c\n", idx, dev->hw_info.pcb_ver); | |
2271 | ||
2272 | if (dev->hw_info.upper_pcb_ver >= 'A') { | |
2273 | len += snprintf((page + len), (count - len), "HIO %d Upper PCB VER:\t.%c\n", idx, dev->hw_info.upper_pcb_ver); | |
2274 | } | |
2275 | ||
2276 | len += snprintf((page + len), (count - len), "HIO %d Device:\t%s\n", idx, dev->name); | |
2277 | } | |
2278 | ||
1197134c | 2279 | *eof = 1; |
361ebed5 HSDT |
2280 | return len; |
2281 | } | |
2282 | ||
2283 | #else | |
2284 | ||
2285 | static int ssd_proc_show(struct seq_file *m, void *v) | |
2286 | { | |
2287 | struct ssd_device *dev = NULL; | |
2288 | struct ssd_device *n = NULL; | |
2289 | uint64_t size; | |
2290 | int idx; | |
2291 | ||
2292 | if (ssd_exiting) { | |
2293 | return 0; | |
2294 | } | |
2295 | ||
2296 | seq_printf(m, "Driver Version:\t%s\n", DRIVER_VERSION); | |
2297 | ||
2298 | list_for_each_entry_safe(dev, n, &ssd_list, list) { | |
2299 | idx = dev->idx + 1; | |
2300 | size = dev->hw_info.size ; | |
2301 | do_div(size, 1000000000); | |
2302 | ||
2303 | seq_printf(m, "\n"); | |
2304 | ||
2305 | seq_printf(m, "HIO %d Size:\t%uGB\n", idx, (uint32_t)size); | |
2306 | ||
2307 | seq_printf(m, "HIO %d Bridge FW VER:\t%03X\n", idx, dev->hw_info.bridge_ver); | |
2308 | if (dev->hw_info.ctrl_ver != 0) { | |
2309 | seq_printf(m, "HIO %d Controller FW VER:\t%03X\n", idx, dev->hw_info.ctrl_ver); | |
2310 | } | |
2311 | ||
2312 | seq_printf(m, "HIO %d PCB VER:\t.%c\n", idx, dev->hw_info.pcb_ver); | |
2313 | ||
2314 | if (dev->hw_info.upper_pcb_ver >= 'A') { | |
2315 | seq_printf(m, "HIO %d Upper PCB VER:\t.%c\n", idx, dev->hw_info.upper_pcb_ver); | |
2316 | } | |
2317 | ||
2318 | seq_printf(m, "HIO %d Device:\t%s\n", idx, dev->name); | |
2319 | } | |
2320 | ||
2321 | return 0; | |
2322 | } | |
2323 | ||
2324 | static int ssd_proc_open(struct inode *inode, struct file *file) | |
2325 | { | |
2326 | #if (LINUX_VERSION_CODE <= KERNEL_VERSION(3,9,0)) | |
2327 | return single_open(file, ssd_proc_show, PDE(inode)->data); | |
2328 | #else | |
2329 | return single_open(file, ssd_proc_show, PDE_DATA(inode)); | |
2330 | #endif | |
2331 | } | |
2332 | ||
ba619a46 | 2333 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(5,6,0)) |
361ebed5 HSDT |
2334 | static const struct file_operations ssd_proc_fops = { |
2335 | .open = ssd_proc_open, | |
2336 | .read = seq_read, | |
2337 | .llseek = seq_lseek, | |
2338 | .release = single_release, | |
2339 | }; | |
ba619a46 AR |
2340 | #else |
2341 | static const struct proc_ops ssd_proc_fops = { | |
2342 | .proc_open = ssd_proc_open, | |
2343 | .proc_read = seq_read, | |
2344 | .proc_lseek = seq_lseek, | |
2345 | .proc_release = single_release, | |
2346 | }; | |
2347 | #endif | |
361ebed5 HSDT |
2348 | #endif |
2349 | ||
2350 | ||
2351 | static void ssd_cleanup_proc(void) | |
2352 | { | |
2353 | if (ssd_proc_info) { | |
2354 | remove_proc_entry(SSD_PROC_INFO, ssd_proc_dir); | |
2355 | ssd_proc_info = NULL; | |
2356 | } | |
2357 | if (ssd_proc_dir) { | |
2358 | remove_proc_entry(SSD_PROC_DIR, NULL); | |
2359 | ssd_proc_dir = NULL; | |
2360 | } | |
2361 | } | |
2362 | static int ssd_init_proc(void) | |
2363 | { | |
2364 | ssd_proc_dir = proc_mkdir(SSD_PROC_DIR, NULL); | |
2365 | if (!ssd_proc_dir) | |
2366 | goto out_proc_mkdir; | |
2367 | ||
2368 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,2,0)) | |
2369 | ssd_proc_info = create_proc_entry(SSD_PROC_INFO, S_IFREG | S_IRUGO | S_IWUSR, ssd_proc_dir); | |
2370 | if (!ssd_proc_info) | |
2371 | goto out_create_proc_entry; | |
2372 | ||
2373 | ssd_proc_info->read_proc = ssd_proc_read; | |
2374 | ||
2375 | /* kernel bug */ | |
2376 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30)) | |
2377 | ssd_proc_info->owner = THIS_MODULE; | |
2378 | #endif | |
2379 | #else | |
2380 | ssd_proc_info = proc_create(SSD_PROC_INFO, 0600, ssd_proc_dir, &ssd_proc_fops); | |
2381 | if (!ssd_proc_info) | |
2382 | goto out_create_proc_entry; | |
2383 | #endif | |
2384 | ||
2385 | return 0; | |
2386 | ||
2387 | out_create_proc_entry: | |
2388 | remove_proc_entry(SSD_PROC_DIR, NULL); | |
2389 | out_proc_mkdir: | |
2390 | return -ENOMEM; | |
2391 | } | |
2392 | ||
2393 | #else | |
2394 | static void ssd_cleanup_proc(void) | |
2395 | { | |
2396 | return; | |
2397 | } | |
2398 | static int ssd_init_proc(void) | |
2399 | { | |
2400 | return 0; | |
2401 | } | |
2402 | #endif /* CONFIG_PROC_FS */ | |
2403 | ||
2404 | /* sysfs */ | |
2405 | static void ssd_unregister_sysfs(struct ssd_device *dev) | |
2406 | { | |
2407 | return; | |
2408 | } | |
2409 | ||
2410 | static int ssd_register_sysfs(struct ssd_device *dev) | |
2411 | { | |
2412 | return 0; | |
2413 | } | |
2414 | ||
2415 | static void ssd_cleanup_sysfs(void) | |
2416 | { | |
2417 | return; | |
2418 | } | |
2419 | ||
2420 | static int ssd_init_sysfs(void) | |
2421 | { | |
2422 | return 0; | |
2423 | } | |
2424 | ||
2425 | static inline void ssd_put_index(int slave, int index) | |
2426 | { | |
2427 | unsigned long *index_bits = ssd_index_bits; | |
2428 | ||
2429 | if (slave) { | |
2430 | index_bits = ssd_index_bits_sl; | |
2431 | } | |
2432 | ||
2433 | if (test_and_clear_bit(index, index_bits)) { | |
2434 | atomic_dec(&ssd_nr); | |
2435 | } | |
2436 | } | |
2437 | ||
2438 | static inline int ssd_get_index(int slave) | |
2439 | { | |
2440 | unsigned long *index_bits = ssd_index_bits; | |
2441 | int index; | |
2442 | ||
2443 | if (slave) { | |
2444 | index_bits = ssd_index_bits_sl; | |
2445 | } | |
2446 | ||
2447 | find_index: | |
2448 | if ((index = find_first_zero_bit(index_bits, SSD_MAX_DEV)) >= SSD_MAX_DEV) { | |
2449 | return -1; | |
2450 | } | |
2451 | ||
2452 | if (test_and_set_bit(index, index_bits)) { | |
2453 | goto find_index; | |
2454 | } | |
2455 | ||
2456 | atomic_inc(&ssd_nr); | |
2457 | ||
2458 | return index; | |
2459 | } | |
2460 | ||
2461 | static void ssd_cleanup_index(void) | |
2462 | { | |
2463 | return; | |
2464 | } | |
2465 | ||
2466 | static int ssd_init_index(void) | |
2467 | { | |
2468 | INIT_LIST_HEAD(&ssd_list); | |
2469 | atomic_set(&ssd_nr, 0); | |
3871d789 SF |
2470 | memset(ssd_index_bits, 0, sizeof(ssd_index_bits)); |
2471 | memset(ssd_index_bits_sl, 0, sizeof(ssd_index_bits_sl)); | |
361ebed5 HSDT |
2472 | |
2473 | return 0; | |
2474 | } | |
2475 | ||
2476 | static void ssd_set_dev_name(char *name, size_t size, int idx) | |
2477 | { | |
2478 | if(idx < SSD_ALPHABET_NUM) { | |
2479 | snprintf(name, size, "%c", 'a'+idx); | |
2480 | } else { | |
2481 | idx -= SSD_ALPHABET_NUM; | |
2482 | snprintf(name, size, "%c%c", 'a'+(idx/SSD_ALPHABET_NUM), 'a'+(idx%SSD_ALPHABET_NUM)); | |
2483 | } | |
2484 | } | |
2485 | ||
2486 | /* pci register r&w */ | |
2487 | static inline void ssd_reg_write(void *addr, uint64_t val) | |
2488 | { | |
2489 | iowrite32((uint32_t)val, addr); | |
2490 | iowrite32((uint32_t)(val >> 32), addr + 4); | |
2491 | wmb(); | |
2492 | } | |
2493 | ||
2494 | static inline uint64_t ssd_reg_read(void *addr) | |
2495 | { | |
2496 | uint64_t val; | |
2497 | uint32_t val_lo, val_hi; | |
2498 | ||
2499 | val_lo = ioread32(addr); | |
2500 | val_hi = ioread32(addr + 4); | |
2501 | ||
2502 | rmb(); | |
2503 | val = val_lo | ((uint64_t)val_hi << 32); | |
2504 | ||
2505 | return val; | |
2506 | } | |
2507 | ||
2508 | ||
2509 | #define ssd_reg32_write(addr, val) writel(val, addr) | |
2510 | #define ssd_reg32_read(addr) readl(addr) | |
2511 | ||
2512 | /* alarm led */ | |
2513 | static void ssd_clear_alarm(struct ssd_device *dev) | |
2514 | { | |
2515 | uint32_t val; | |
2516 | ||
2517 | if (dev->protocol_info.ver <= SSD_PROTOCOL_V3) { | |
2518 | return; | |
2519 | } | |
2520 | ||
2521 | val = ssd_reg32_read(dev->ctrlp + SSD_LED_REG); | |
2522 | ||
2523 | /* firmware control */ | |
2524 | val &= ~0x2; | |
2525 | ||
2526 | ssd_reg32_write(dev->ctrlp + SSD_LED_REG, val); | |
2527 | } | |
2528 | ||
2529 | static void ssd_set_alarm(struct ssd_device *dev) | |
2530 | { | |
2531 | uint32_t val; | |
2532 | ||
2533 | if (dev->protocol_info.ver <= SSD_PROTOCOL_V3) { | |
2534 | return; | |
2535 | } | |
2536 | ||
2537 | val = ssd_reg32_read(dev->ctrlp + SSD_LED_REG); | |
2538 | ||
2539 | /* light up */ | |
2540 | val &= ~0x1; | |
2541 | /* software control */ | |
2542 | val |= 0x2; | |
2543 | ||
2544 | ssd_reg32_write(dev->ctrlp + SSD_LED_REG, val); | |
2545 | } | |
2546 | ||
2547 | #define u32_swap(x) \ | |
2548 | ((uint32_t)( \ | |
2549 | (((uint32_t)(x) & (uint32_t)0x000000ffUL) << 24) | \ | |
2550 | (((uint32_t)(x) & (uint32_t)0x0000ff00UL) << 8) | \ | |
2551 | (((uint32_t)(x) & (uint32_t)0x00ff0000UL) >> 8) | \ | |
2552 | (((uint32_t)(x) & (uint32_t)0xff000000UL) >> 24))) | |
2553 | ||
2554 | #define u16_swap(x) \ | |
2555 | ((uint16_t)( \ | |
2556 | (((uint16_t)(x) & (uint16_t)0x00ff) << 8) | \ | |
2557 | (((uint16_t)(x) & (uint16_t)0xff00) >> 8) )) | |
2558 | ||
2559 | ||
2560 | #if 0 | |
2561 | /* No lock, for init only*/ | |
2562 | static int ssd_spi_read_id(struct ssd_device *dev, uint32_t *id) | |
2563 | { | |
2564 | uint32_t val; | |
2565 | unsigned long st; | |
2566 | int ret = 0; | |
2567 | ||
2568 | if (!dev || !id) { | |
2569 | return -EINVAL; | |
2570 | } | |
2571 | ||
2572 | ssd_reg32_write(dev->ctrlp + SSD_SPI_REG_CMD, SSD_SPI_CMD_READ_ID); | |
2573 | ||
2574 | val = ssd_reg32_read(dev->ctrlp + SSD_SPI_REG_READY); | |
2575 | val = ssd_reg32_read(dev->ctrlp + SSD_SPI_REG_READY); | |
2576 | val = ssd_reg32_read(dev->ctrlp + SSD_SPI_REG_READY); | |
2577 | val = ssd_reg32_read(dev->ctrlp + SSD_SPI_REG_READY); | |
2578 | ||
2579 | st = jiffies; | |
2580 | for (;;) { | |
2581 | val = ssd_reg32_read(dev->ctrlp + SSD_SPI_REG_READY); | |
2582 | if (val == 0x1000000) { | |
2583 | break; | |
2584 | } | |
2585 | ||
2586 | if (time_after(jiffies, (st + SSD_SPI_TIMEOUT))) { | |
2587 | ret = -ETIMEDOUT; | |
2588 | goto out; | |
2589 | } | |
2590 | cond_resched(); | |
2591 | } | |
2592 | ||
2593 | val = ssd_reg32_read(dev->ctrlp + SSD_SPI_REG_ID); | |
2594 | *id = val; | |
2595 | ||
2596 | out: | |
2597 | return ret; | |
2598 | } | |
2599 | #endif | |
2600 | ||
2601 | /* spi access */ | |
2602 | static int ssd_init_spi(struct ssd_device *dev) | |
2603 | { | |
2604 | uint32_t val; | |
2605 | unsigned long st; | |
2606 | int ret = 0; | |
2607 | ||
2608 | mutex_lock(&dev->spi_mutex); | |
2609 | st = jiffies; | |
2610 | for(;;) { | |
2611 | ssd_reg32_write(dev->ctrlp + SSD_SPI_REG_CMD, SSD_SPI_CMD_READ_STATUS); | |
2612 | ||
2613 | do { | |
2614 | val = ssd_reg32_read(dev->ctrlp + SSD_SPI_REG_READY); | |
2615 | ||
2616 | if (time_after(jiffies, (st + SSD_SPI_TIMEOUT))) { | |
2617 | ret = -ETIMEDOUT; | |
2618 | goto out; | |
2619 | } | |
2620 | cond_resched(); | |
2621 | } while (val != 0x1000000); | |
2622 | ||
2623 | val = ssd_reg32_read(dev->ctrlp + SSD_SPI_REG_STATUS); | |
2624 | if (!(val & 0x1)) { | |
2625 | break; | |
2626 | } | |
2627 | ||
2628 | if (time_after(jiffies, (st + SSD_SPI_TIMEOUT))) { | |
2629 | ret = -ETIMEDOUT; | |
2630 | goto out; | |
2631 | } | |
2632 | cond_resched(); | |
2633 | } | |
2634 | ||
2635 | out: | |
2636 | if (dev->protocol_info.ver >= SSD_PROTOCOL_V3_2) { | |
2637 | if (val & 0x1) { | |
2638 | ssd_reg32_write(dev->ctrlp + SSD_SPI_REG_CMD, SSD_SPI_CMD_CLSR); | |
2639 | } | |
2640 | } | |
2641 | ssd_reg32_write(dev->ctrlp + SSD_SPI_REG_CMD, SSD_SPI_CMD_W_DISABLE); | |
2642 | mutex_unlock(&dev->spi_mutex); | |
2643 | ||
2644 | ret = 0; | |
2645 | ||
2646 | return ret; | |
2647 | } | |
2648 | ||
2649 | static int ssd_spi_page_read(struct ssd_device *dev, void *buf, uint32_t off, uint32_t size) | |
2650 | { | |
2651 | uint32_t val; | |
2652 | uint32_t rlen = 0; | |
2653 | unsigned long st; | |
2654 | int ret = 0; | |
2655 | ||
2656 | if (!dev || !buf) { | |
2657 | return -EINVAL; | |
2658 | } | |
2659 | ||
2660 | if ((off % sizeof(uint32_t)) != 0 || (size % sizeof(uint32_t)) != 0 || size == 0 || | |
2661 | ((uint64_t)off + (uint64_t)size) > dev->rom_info.size || size > dev->rom_info.page_size) { | |
2662 | return -EINVAL; | |
2663 | } | |
2664 | ||
2665 | mutex_lock(&dev->spi_mutex); | |
2666 | while (rlen < size) { | |
2667 | ssd_reg32_write(dev->ctrlp + SSD_SPI_REG_CMD_HI, ((off + rlen) >> 24)); | |
2668 | wmb(); | |
2669 | ssd_reg32_write(dev->ctrlp + SSD_SPI_REG_CMD, (((off + rlen) << 8) | SSD_SPI_CMD_READ)); | |
2670 | ||
2671 | (void)ssd_reg32_read(dev->ctrlp + SSD_SPI_REG_READY); | |
2672 | (void)ssd_reg32_read(dev->ctrlp + SSD_SPI_REG_READY); | |
2673 | (void)ssd_reg32_read(dev->ctrlp + SSD_SPI_REG_READY); | |
2674 | (void)ssd_reg32_read(dev->ctrlp + SSD_SPI_REG_READY); | |
2675 | ||
2676 | st = jiffies; | |
2677 | for (;;) { | |
2678 | val = ssd_reg32_read(dev->ctrlp + SSD_SPI_REG_READY); | |
2679 | if (val == 0x1000000) { | |
2680 | break; | |
2681 | } | |
2682 | ||
2683 | if (time_after(jiffies, (st + SSD_SPI_TIMEOUT))) { | |
2684 | ret = -ETIMEDOUT; | |
2685 | goto out; | |
2686 | } | |
2687 | cond_resched(); | |
2688 | } | |
2689 | ||
2690 | val = ssd_reg32_read(dev->ctrlp + SSD_SPI_REG_RDATA); | |
2691 | *(uint32_t *)(buf + rlen)= u32_swap(val); | |
2692 | ||
2693 | rlen += sizeof(uint32_t); | |
2694 | } | |
2695 | ||
2696 | out: | |
2697 | mutex_unlock(&dev->spi_mutex); | |
2698 | return ret; | |
2699 | } | |
2700 | ||
2701 | static int ssd_spi_page_write(struct ssd_device *dev, void *buf, uint32_t off, uint32_t size) | |
2702 | { | |
2703 | uint32_t val; | |
2704 | uint32_t wlen; | |
2705 | unsigned long st; | |
2706 | int i; | |
2707 | int ret = 0; | |
2708 | ||
2709 | if (!dev || !buf) { | |
2710 | return -EINVAL; | |
2711 | } | |
2712 | ||
2713 | if ((off % sizeof(uint32_t)) != 0 || (size % sizeof(uint32_t)) != 0 || size == 0 || | |
2714 | ((uint64_t)off + (uint64_t)size) > dev->rom_info.size || size > dev->rom_info.page_size || | |
2715 | (off / dev->rom_info.page_size) != ((off + size - 1) / dev->rom_info.page_size)) { | |
2716 | return -EINVAL; | |
2717 | } | |
2718 | ||
2719 | mutex_lock(&dev->spi_mutex); | |
2720 | ||
2721 | ssd_reg32_write(dev->ctrlp + SSD_SPI_REG_CMD, SSD_SPI_CMD_W_ENABLE); | |
2722 | ||
2723 | wlen = size / sizeof(uint32_t); | |
2724 | for (i=0; i<(int)wlen; i++) { | |
2725 | ssd_reg32_write(dev->ctrlp + SSD_SPI_REG_WDATA, u32_swap(*((uint32_t *)buf + i))); | |
2726 | } | |
2727 | ||
2728 | wmb(); | |
2729 | ssd_reg32_write(dev->ctrlp + SSD_SPI_REG_CMD_HI, (off >> 24)); | |
2730 | wmb(); | |
2731 | ssd_reg32_write(dev->ctrlp + SSD_SPI_REG_CMD, ((off << 8) | SSD_SPI_CMD_PROGRAM)); | |
2732 | ||
2733 | udelay(1); | |
2734 | ||
2735 | st = jiffies; | |
2736 | for (;;) { | |
2737 | ssd_reg32_write(dev->ctrlp + SSD_SPI_REG_CMD, SSD_SPI_CMD_READ_STATUS); | |
2738 | do { | |
2739 | val = ssd_reg32_read(dev->ctrlp + SSD_SPI_REG_READY); | |
2740 | ||
2741 | if (time_after(jiffies, (st + SSD_SPI_TIMEOUT))) { | |
2742 | ret = -ETIMEDOUT; | |
2743 | goto out; | |
2744 | } | |
2745 | cond_resched(); | |
2746 | } while (val != 0x1000000); | |
2747 | ||
2748 | val = ssd_reg32_read(dev->ctrlp + SSD_SPI_REG_STATUS); | |
2749 | if (!(val & 0x1)) { | |
2750 | break; | |
2751 | } | |
2752 | ||
2753 | if (time_after(jiffies, (st + SSD_SPI_TIMEOUT))) { | |
2754 | ret = -ETIMEDOUT; | |
2755 | goto out; | |
2756 | } | |
2757 | cond_resched(); | |
2758 | } | |
2759 | ||
2760 | if (dev->protocol_info.ver >= SSD_PROTOCOL_V3_2) { | |
2761 | if ((val >> 6) & 0x1) { | |
2762 | ret = -EIO; | |
2763 | goto out; | |
2764 | } | |
2765 | } | |
2766 | ||
2767 | out: | |
2768 | if (dev->protocol_info.ver >= SSD_PROTOCOL_V3_2) { | |
2769 | if (val & 0x1) { | |
2770 | ssd_reg32_write(dev->ctrlp + SSD_SPI_REG_CMD, SSD_SPI_CMD_CLSR); | |
2771 | } | |
2772 | } | |
2773 | ssd_reg32_write(dev->ctrlp + SSD_SPI_REG_CMD, SSD_SPI_CMD_W_DISABLE); | |
2774 | ||
2775 | mutex_unlock(&dev->spi_mutex); | |
2776 | ||
2777 | return ret; | |
2778 | } | |
2779 | ||
2780 | static int ssd_spi_block_erase(struct ssd_device *dev, uint32_t off) | |
2781 | { | |
2782 | uint32_t val; | |
2783 | unsigned long st; | |
2784 | int ret = 0; | |
2785 | ||
2786 | if (!dev) { | |
2787 | return -EINVAL; | |
2788 | } | |
2789 | ||
2790 | if ((off % dev->rom_info.block_size) != 0 || off >= dev->rom_info.size) { | |
2791 | return -EINVAL; | |
2792 | } | |
2793 | ||
2794 | mutex_lock(&dev->spi_mutex); | |
2795 | ||
2796 | ssd_reg32_write(dev->ctrlp + SSD_SPI_REG_CMD, SSD_SPI_CMD_W_ENABLE); | |
2797 | ssd_reg32_write(dev->ctrlp + SSD_SPI_REG_CMD, SSD_SPI_CMD_W_ENABLE); | |
2798 | ||
2799 | wmb(); | |
2800 | ssd_reg32_write(dev->ctrlp + SSD_SPI_REG_CMD_HI, (off >> 24)); | |
2801 | wmb(); | |
2802 | ssd_reg32_write(dev->ctrlp + SSD_SPI_REG_CMD, ((off << 8) | SSD_SPI_CMD_ERASE)); | |
2803 | ||
2804 | st = jiffies; | |
2805 | for (;;) { | |
2806 | ssd_reg32_write(dev->ctrlp + SSD_SPI_REG_CMD, SSD_SPI_CMD_READ_STATUS); | |
2807 | ||
2808 | do { | |
2809 | val = ssd_reg32_read(dev->ctrlp + SSD_SPI_REG_READY); | |
2810 | ||
2811 | if (time_after(jiffies, (st + SSD_SPI_TIMEOUT))) { | |
2812 | ret = -ETIMEDOUT; | |
2813 | goto out; | |
2814 | } | |
2815 | cond_resched(); | |
2816 | } while (val != 0x1000000); | |
2817 | ||
2818 | val = ssd_reg32_read(dev->ctrlp + SSD_SPI_REG_STATUS); | |
2819 | if (!(val & 0x1)) { | |
2820 | break; | |
2821 | } | |
2822 | ||
2823 | if (time_after(jiffies, (st + SSD_SPI_TIMEOUT))) { | |
2824 | ret = -ETIMEDOUT; | |
2825 | goto out; | |
2826 | } | |
2827 | cond_resched(); | |
2828 | } | |
2829 | ||
2830 | if (dev->protocol_info.ver >= SSD_PROTOCOL_V3_2) { | |
2831 | if ((val >> 5) & 0x1) { | |
2832 | ret = -EIO; | |
2833 | goto out; | |
2834 | } | |
2835 | } | |
2836 | ||
2837 | out: | |
2838 | if (dev->protocol_info.ver >= SSD_PROTOCOL_V3_2) { | |
2839 | if (val & 0x1) { | |
2840 | ssd_reg32_write(dev->ctrlp + SSD_SPI_REG_CMD, SSD_SPI_CMD_CLSR); | |
2841 | } | |
2842 | } | |
2843 | ssd_reg32_write(dev->ctrlp + SSD_SPI_REG_CMD, SSD_SPI_CMD_W_DISABLE); | |
2844 | ||
2845 | mutex_unlock(&dev->spi_mutex); | |
2846 | ||
2847 | return ret; | |
2848 | } | |
2849 | ||
2850 | static int ssd_spi_read(struct ssd_device *dev, void *buf, uint32_t off, uint32_t size) | |
2851 | { | |
2852 | uint32_t len = 0; | |
2853 | uint32_t roff; | |
2854 | uint32_t rsize; | |
2855 | int ret = 0; | |
2856 | ||
2857 | if (!dev || !buf) { | |
2858 | return -EINVAL; | |
2859 | } | |
2860 | ||
2861 | if ((off % sizeof(uint32_t)) != 0 || (size % sizeof(uint32_t)) != 0 || size == 0 || | |
2862 | ((uint64_t)off + (uint64_t)size) > dev->rom_info.size) { | |
2863 | return -EINVAL; | |
2864 | } | |
2865 | ||
2866 | while (len < size) { | |
2867 | roff = (off + len) % dev->rom_info.page_size; | |
2868 | rsize = dev->rom_info.page_size - roff; | |
2869 | if ((size - len) < rsize) { | |
2870 | rsize = (size - len); | |
2871 | } | |
2872 | roff = off + len; | |
2873 | ||
2874 | ret = ssd_spi_page_read(dev, (buf + len), roff, rsize); | |
2875 | if (ret) { | |
2876 | goto out; | |
2877 | } | |
2878 | ||
2879 | len += rsize; | |
2880 | ||
2881 | cond_resched(); | |
2882 | } | |
2883 | ||
2884 | out: | |
2885 | return ret; | |
2886 | } | |
2887 | ||
2888 | static int ssd_spi_write(struct ssd_device *dev, void *buf, uint32_t off, uint32_t size) | |
2889 | { | |
2890 | uint32_t len = 0; | |
2891 | uint32_t woff; | |
2892 | uint32_t wsize; | |
2893 | int ret = 0; | |
2894 | ||
2895 | if (!dev || !buf) { | |
2896 | return -EINVAL; | |
2897 | } | |
2898 | ||
2899 | if ((off % sizeof(uint32_t)) != 0 || (size % sizeof(uint32_t)) != 0 || size == 0 || | |
2900 | ((uint64_t)off + (uint64_t)size) > dev->rom_info.size) { | |
2901 | return -EINVAL; | |
2902 | } | |
2903 | ||
2904 | while (len < size) { | |
2905 | woff = (off + len) % dev->rom_info.page_size; | |
2906 | wsize = dev->rom_info.page_size - woff; | |
2907 | if ((size - len) < wsize) { | |
2908 | wsize = (size - len); | |
2909 | } | |
2910 | woff = off + len; | |
2911 | ||
2912 | ret = ssd_spi_page_write(dev, (buf + len), woff, wsize); | |
2913 | if (ret) { | |
2914 | goto out; | |
2915 | } | |
2916 | ||
2917 | len += wsize; | |
2918 | ||
2919 | cond_resched(); | |
2920 | } | |
2921 | ||
2922 | out: | |
2923 | return ret; | |
2924 | } | |
2925 | ||
2926 | static int ssd_spi_erase(struct ssd_device *dev, uint32_t off, uint32_t size) | |
2927 | { | |
2928 | uint32_t len = 0; | |
2929 | uint32_t eoff; | |
2930 | int ret = 0; | |
2931 | ||
2932 | if (!dev) { | |
2933 | return -EINVAL; | |
2934 | } | |
2935 | ||
2936 | if (size == 0 || ((uint64_t)off + (uint64_t)size) > dev->rom_info.size || | |
2937 | (off % dev->rom_info.block_size) != 0 || (size % dev->rom_info.block_size) != 0) { | |
2938 | return -EINVAL; | |
2939 | } | |
2940 | ||
2941 | while (len < size) { | |
2942 | eoff = (off + len); | |
2943 | ||
2944 | ret = ssd_spi_block_erase(dev, eoff); | |
2945 | if (ret) { | |
2946 | goto out; | |
2947 | } | |
2948 | ||
2949 | len += dev->rom_info.block_size; | |
2950 | ||
2951 | cond_resched(); | |
2952 | } | |
2953 | ||
2954 | out: | |
2955 | return ret; | |
2956 | } | |
2957 | ||
2958 | /* i2c access */ | |
2959 | static uint32_t __ssd_i2c_reg32_read(void *addr) | |
2960 | { | |
2961 | return ssd_reg32_read(addr); | |
2962 | } | |
2963 | ||
2964 | static void __ssd_i2c_reg32_write(void *addr, uint32_t val) | |
2965 | { | |
2966 | ssd_reg32_write(addr, val); | |
2967 | ssd_reg32_read(addr); | |
2968 | } | |
2969 | ||
2970 | static int __ssd_i2c_clear(struct ssd_device *dev, uint8_t saddr) | |
2971 | { | |
2972 | ssd_i2c_ctrl_t ctrl; | |
2973 | ssd_i2c_data_t data; | |
2974 | uint8_t status = 0; | |
2975 | int nr_data = 0; | |
2976 | unsigned long st; | |
2977 | int ret = 0; | |
2978 | ||
2979 | check_status: | |
2980 | ctrl.bits.wdata = 0; | |
2981 | ctrl.bits.addr = SSD_I2C_STATUS_REG; | |
2982 | ctrl.bits.rw = SSD_I2C_CTRL_READ; | |
2983 | __ssd_i2c_reg32_write(dev->ctrlp + SSD_I2C_CTRL_REG, ctrl.val); | |
2984 | ||
2985 | st = jiffies; | |
2986 | for (;;) { | |
2987 | data.val = __ssd_i2c_reg32_read(dev->ctrlp + SSD_I2C_RDATA_REG); | |
2988 | if (data.bits.valid == 0) { | |
2989 | break; | |
2990 | } | |
2991 | ||
2992 | /* retry */ | |
2993 | if (time_after(jiffies, (st + SSD_I2C_TIMEOUT))) { | |
2994 | ret = -ETIMEDOUT; | |
2995 | goto out; | |
2996 | } | |
2997 | cond_resched(); | |
2998 | } | |
2999 | status = data.bits.rdata; | |
3000 | ||
3001 | if (!(status & 0x4)) { | |
3002 | /* clear read fifo data */ | |
3003 | ctrl.bits.wdata = 0; | |
3004 | ctrl.bits.addr = SSD_I2C_DATA_REG; | |
3005 | ctrl.bits.rw = SSD_I2C_CTRL_READ; | |
3006 | __ssd_i2c_reg32_write(dev->ctrlp + SSD_I2C_CTRL_REG, ctrl.val); | |
3007 | ||
3008 | st = jiffies; | |
3009 | for (;;) { | |
3010 | data.val = __ssd_i2c_reg32_read(dev->ctrlp + SSD_I2C_RDATA_REG); | |
3011 | if (data.bits.valid == 0) { | |
3012 | break; | |
3013 | } | |
3014 | ||
3015 | /* retry */ | |
3016 | if (time_after(jiffies, (st + SSD_I2C_TIMEOUT))) { | |
3017 | ret = -ETIMEDOUT; | |
3018 | goto out; | |
3019 | } | |
3020 | cond_resched(); | |
3021 | } | |
3022 | ||
3023 | nr_data++; | |
3024 | if (nr_data <= SSD_I2C_MAX_DATA) { | |
3025 | goto check_status; | |
3026 | } else { | |
3027 | goto out_reset; | |
3028 | } | |
3029 | } | |
3030 | ||
3031 | if (status & 0x3) { | |
3032 | /* clear int */ | |
3033 | ctrl.bits.wdata = 0x04; | |
3034 | ctrl.bits.addr = SSD_I2C_CMD_REG; | |
3035 | ctrl.bits.rw = SSD_I2C_CTRL_WRITE; | |
3036 | __ssd_i2c_reg32_write(dev->ctrlp + SSD_I2C_CTRL_REG, ctrl.val); | |
3037 | } | |
3038 | ||
3039 | if (!(status & 0x8)) { | |
3040 | out_reset: | |
3041 | /* reset i2c controller */ | |
3042 | ctrl.bits.wdata = 0x0; | |
3043 | ctrl.bits.addr = SSD_I2C_RESET_REG; | |
3044 | ctrl.bits.rw = SSD_I2C_CTRL_WRITE; | |
3045 | __ssd_i2c_reg32_write(dev->ctrlp + SSD_I2C_CTRL_REG, ctrl.val); | |
3046 | } | |
3047 | ||
3048 | out: | |
3049 | return ret; | |
3050 | } | |
3051 | ||
3052 | static int ssd_i2c_write(struct ssd_device *dev, uint8_t saddr, uint8_t size, uint8_t *buf) | |
3053 | { | |
3054 | ssd_i2c_ctrl_t ctrl; | |
3055 | ssd_i2c_data_t data; | |
3056 | uint8_t off = 0; | |
3057 | uint8_t status = 0; | |
3058 | unsigned long st; | |
3059 | int ret = 0; | |
3060 | ||
3061 | mutex_lock(&dev->i2c_mutex); | |
3062 | ||
3063 | ctrl.val = 0; | |
3064 | ||
3065 | /* slave addr */ | |
3066 | ctrl.bits.wdata = saddr; | |
3067 | ctrl.bits.addr = SSD_I2C_SADDR_REG; | |
3068 | ctrl.bits.rw = SSD_I2C_CTRL_WRITE; | |
3069 | __ssd_i2c_reg32_write(dev->ctrlp + SSD_I2C_CTRL_REG, ctrl.val); | |
3070 | ||
3071 | /* data */ | |
3072 | while (off < size) { | |
3073 | ctrl.bits.wdata = buf[off]; | |
3074 | ctrl.bits.addr = SSD_I2C_DATA_REG; | |
3075 | ctrl.bits.rw = SSD_I2C_CTRL_WRITE; | |
3076 | __ssd_i2c_reg32_write(dev->ctrlp + SSD_I2C_CTRL_REG, ctrl.val); | |
3077 | ||
3078 | off++; | |
3079 | } | |
3080 | ||
3081 | /* write */ | |
3082 | ctrl.bits.wdata = 0x01; | |
3083 | ctrl.bits.addr = SSD_I2C_CMD_REG; | |
3084 | ctrl.bits.rw = SSD_I2C_CTRL_WRITE; | |
3085 | __ssd_i2c_reg32_write(dev->ctrlp + SSD_I2C_CTRL_REG, ctrl.val); | |
3086 | ||
3087 | /* wait */ | |
3088 | st = jiffies; | |
3089 | for (;;) { | |
3090 | ctrl.bits.wdata = 0; | |
3091 | ctrl.bits.addr = SSD_I2C_STATUS_REG; | |
3092 | ctrl.bits.rw = SSD_I2C_CTRL_READ; | |
3093 | __ssd_i2c_reg32_write(dev->ctrlp + SSD_I2C_CTRL_REG, ctrl.val); | |
3094 | ||
3095 | for (;;) { | |
3096 | data.val = __ssd_i2c_reg32_read(dev->ctrlp + SSD_I2C_RDATA_REG); | |
3097 | if (data.bits.valid == 0) { | |
3098 | break; | |
3099 | } | |
3100 | ||
3101 | /* retry */ | |
3102 | if (time_after(jiffies, (st + SSD_I2C_TIMEOUT))) { | |
3103 | ret = -ETIMEDOUT; | |
3104 | goto out_clear; | |
3105 | } | |
3106 | cond_resched(); | |
3107 | } | |
3108 | ||
3109 | status = data.bits.rdata; | |
3110 | if (status & 0x1) { | |
3111 | break; | |
3112 | } | |
3113 | ||
3114 | if (time_after(jiffies, (st + SSD_I2C_TIMEOUT))) { | |
3115 | ret = -ETIMEDOUT; | |
3116 | goto out_clear; | |
3117 | } | |
3118 | cond_resched(); | |
3119 | } | |
3120 | ||
3121 | if (!(status & 0x1)) { | |
3122 | ret = -1; | |
3123 | goto out_clear; | |
3124 | } | |
3125 | ||
3126 | /* busy ? */ | |
3127 | if (status & 0x20) { | |
3128 | ret = -2; | |
3129 | goto out_clear; | |
3130 | } | |
3131 | ||
3132 | /* ack ? */ | |
3133 | if (status & 0x10) { | |
3134 | ret = -3; | |
3135 | goto out_clear; | |
3136 | } | |
3137 | ||
3138 | /* clear */ | |
3139 | out_clear: | |
3140 | if (__ssd_i2c_clear(dev, saddr)) { | |
3141 | if (!ret) ret = -4; | |
3142 | } | |
3143 | ||
3144 | mutex_unlock(&dev->i2c_mutex); | |
3145 | ||
3146 | return ret; | |
3147 | } | |
3148 | ||
3149 | static int ssd_i2c_read(struct ssd_device *dev, uint8_t saddr, uint8_t size, uint8_t *buf) | |
3150 | { | |
3151 | ssd_i2c_ctrl_t ctrl; | |
3152 | ssd_i2c_data_t data; | |
3153 | uint8_t off = 0; | |
3154 | uint8_t status = 0; | |
3155 | unsigned long st; | |
3156 | int ret = 0; | |
3157 | ||
3158 | mutex_lock(&dev->i2c_mutex); | |
3159 | ||
3160 | ctrl.val = 0; | |
3161 | ||
3162 | /* slave addr */ | |
3163 | ctrl.bits.wdata = saddr; | |
3164 | ctrl.bits.addr = SSD_I2C_SADDR_REG; | |
3165 | ctrl.bits.rw = SSD_I2C_CTRL_WRITE; | |
3166 | __ssd_i2c_reg32_write(dev->ctrlp + SSD_I2C_CTRL_REG, ctrl.val); | |
3167 | ||
3168 | /* read len */ | |
3169 | ctrl.bits.wdata = size; | |
3170 | ctrl.bits.addr = SSD_I2C_LEN_REG; | |
3171 | ctrl.bits.rw = SSD_I2C_CTRL_WRITE; | |
3172 | __ssd_i2c_reg32_write(dev->ctrlp + SSD_I2C_CTRL_REG, ctrl.val); | |
3173 | ||
3174 | /* read */ | |
3175 | ctrl.bits.wdata = 0x02; | |
3176 | ctrl.bits.addr = SSD_I2C_CMD_REG; | |
3177 | ctrl.bits.rw = SSD_I2C_CTRL_WRITE; | |
3178 | __ssd_i2c_reg32_write(dev->ctrlp + SSD_I2C_CTRL_REG, ctrl.val); | |
3179 | ||
3180 | /* wait */ | |
3181 | st = jiffies; | |
3182 | for (;;) { | |
3183 | ctrl.bits.wdata = 0; | |
3184 | ctrl.bits.addr = SSD_I2C_STATUS_REG; | |
3185 | ctrl.bits.rw = SSD_I2C_CTRL_READ; | |
3186 | __ssd_i2c_reg32_write(dev->ctrlp + SSD_I2C_CTRL_REG, ctrl.val); | |
3187 | ||
3188 | for (;;) { | |
3189 | data.val = __ssd_i2c_reg32_read(dev->ctrlp + SSD_I2C_RDATA_REG); | |
3190 | if (data.bits.valid == 0) { | |
3191 | break; | |
3192 | } | |
3193 | ||
3194 | /* retry */ | |
3195 | if (time_after(jiffies, (st + SSD_I2C_TIMEOUT))) { | |
3196 | ret = -ETIMEDOUT; | |
3197 | goto out_clear; | |
3198 | } | |
3199 | cond_resched(); | |
3200 | } | |
3201 | ||
3202 | status = data.bits.rdata; | |
3203 | if (status & 0x2) { | |
3204 | break; | |
3205 | } | |
3206 | ||
3207 | if (time_after(jiffies, (st + SSD_I2C_TIMEOUT))) { | |
3208 | ret = -ETIMEDOUT; | |
3209 | goto out_clear; | |
3210 | } | |
3211 | cond_resched(); | |
3212 | } | |
3213 | ||
3214 | if (!(status & 0x2)) { | |
3215 | ret = -1; | |
3216 | goto out_clear; | |
3217 | } | |
3218 | ||
3219 | /* busy ? */ | |
3220 | if (status & 0x20) { | |
3221 | ret = -2; | |
3222 | goto out_clear; | |
3223 | } | |
3224 | ||
3225 | /* ack ? */ | |
3226 | if (status & 0x10) { | |
3227 | ret = -3; | |
3228 | goto out_clear; | |
3229 | } | |
3230 | ||
3231 | /* data */ | |
3232 | while (off < size) { | |
3233 | ctrl.bits.wdata = 0; | |
3234 | ctrl.bits.addr = SSD_I2C_DATA_REG; | |
3235 | ctrl.bits.rw = SSD_I2C_CTRL_READ; | |
3236 | __ssd_i2c_reg32_write(dev->ctrlp + SSD_I2C_CTRL_REG, ctrl.val); | |
3237 | ||
3238 | st = jiffies; | |
3239 | for (;;) { | |
3240 | data.val = __ssd_i2c_reg32_read(dev->ctrlp + SSD_I2C_RDATA_REG); | |
3241 | if (data.bits.valid == 0) { | |
3242 | break; | |
3243 | } | |
3244 | ||
3245 | /* retry */ | |
3246 | if (time_after(jiffies, (st + SSD_I2C_TIMEOUT))) { | |
3247 | ret = -ETIMEDOUT; | |
3248 | goto out_clear; | |
3249 | } | |
3250 | cond_resched(); | |
3251 | } | |
3252 | ||
3253 | buf[off] = data.bits.rdata; | |
3254 | ||
3255 | off++; | |
3256 | } | |
3257 | ||
3258 | /* clear */ | |
3259 | out_clear: | |
3260 | if (__ssd_i2c_clear(dev, saddr)) { | |
3261 | if (!ret) ret = -4; | |
3262 | } | |
3263 | ||
3264 | mutex_unlock(&dev->i2c_mutex); | |
3265 | ||
3266 | return ret; | |
3267 | } | |
3268 | ||
3269 | static int ssd_i2c_write_read(struct ssd_device *dev, uint8_t saddr, uint8_t wsize, uint8_t *wbuf, uint8_t rsize, uint8_t *rbuf) | |
3270 | { | |
3271 | ssd_i2c_ctrl_t ctrl; | |
3272 | ssd_i2c_data_t data; | |
3273 | uint8_t off = 0; | |
3274 | uint8_t status = 0; | |
3275 | unsigned long st; | |
3276 | int ret = 0; | |
3277 | ||
3278 | mutex_lock(&dev->i2c_mutex); | |
3279 | ||
3280 | ctrl.val = 0; | |
3281 | ||
3282 | /* slave addr */ | |
3283 | ctrl.bits.wdata = saddr; | |
3284 | ctrl.bits.addr = SSD_I2C_SADDR_REG; | |
3285 | ctrl.bits.rw = SSD_I2C_CTRL_WRITE; | |
3286 | __ssd_i2c_reg32_write(dev->ctrlp + SSD_I2C_CTRL_REG, ctrl.val); | |
3287 | ||
3288 | /* data */ | |
3289 | off = 0; | |
3290 | while (off < wsize) { | |
3291 | ctrl.bits.wdata = wbuf[off]; | |
3292 | ctrl.bits.addr = SSD_I2C_DATA_REG; | |
3293 | ctrl.bits.rw = SSD_I2C_CTRL_WRITE; | |
3294 | __ssd_i2c_reg32_write(dev->ctrlp + SSD_I2C_CTRL_REG, ctrl.val); | |
3295 | ||
3296 | off++; | |
3297 | } | |
3298 | ||
3299 | /* read len */ | |
3300 | ctrl.bits.wdata = rsize; | |
3301 | ctrl.bits.addr = SSD_I2C_LEN_REG; | |
3302 | ctrl.bits.rw = SSD_I2C_CTRL_WRITE; | |
3303 | __ssd_i2c_reg32_write(dev->ctrlp + SSD_I2C_CTRL_REG, ctrl.val); | |
3304 | ||
3305 | /* write -> read */ | |
3306 | ctrl.bits.wdata = 0x03; | |
3307 | ctrl.bits.addr = SSD_I2C_CMD_REG; | |
3308 | ctrl.bits.rw = SSD_I2C_CTRL_WRITE; | |
3309 | __ssd_i2c_reg32_write(dev->ctrlp + SSD_I2C_CTRL_REG, ctrl.val); | |
3310 | ||
3311 | /* wait */ | |
3312 | st = jiffies; | |
3313 | for (;;) { | |
3314 | ctrl.bits.wdata = 0; | |
3315 | ctrl.bits.addr = SSD_I2C_STATUS_REG; | |
3316 | ctrl.bits.rw = SSD_I2C_CTRL_READ; | |
3317 | __ssd_i2c_reg32_write(dev->ctrlp + SSD_I2C_CTRL_REG, ctrl.val); | |
3318 | ||
3319 | for (;;) { | |
3320 | data.val = __ssd_i2c_reg32_read(dev->ctrlp + SSD_I2C_RDATA_REG); | |
3321 | if (data.bits.valid == 0) { | |
3322 | break; | |
3323 | } | |
3324 | ||
3325 | /* retry */ | |
3326 | if (time_after(jiffies, (st + SSD_I2C_TIMEOUT))) { | |
3327 | ret = -ETIMEDOUT; | |
3328 | goto out_clear; | |
3329 | } | |
3330 | cond_resched(); | |
3331 | } | |
3332 | ||
3333 | status = data.bits.rdata; | |
3334 | if (status & 0x2) { | |
3335 | break; | |
3336 | } | |
3337 | ||
3338 | if (time_after(jiffies, (st + SSD_I2C_TIMEOUT))) { | |
3339 | ret = -ETIMEDOUT; | |
3340 | goto out_clear; | |
3341 | } | |
3342 | cond_resched(); | |
3343 | } | |
3344 | ||
3345 | if (!(status & 0x2)) { | |
3346 | ret = -1; | |
3347 | goto out_clear; | |
3348 | } | |
3349 | ||
3350 | /* busy ? */ | |
3351 | if (status & 0x20) { | |
3352 | ret = -2; | |
3353 | goto out_clear; | |
3354 | } | |
3355 | ||
3356 | /* ack ? */ | |
3357 | if (status & 0x10) { | |
3358 | ret = -3; | |
3359 | goto out_clear; | |
3360 | } | |
3361 | ||
3362 | /* data */ | |
3363 | off = 0; | |
3364 | while (off < rsize) { | |
3365 | ctrl.bits.wdata = 0; | |
3366 | ctrl.bits.addr = SSD_I2C_DATA_REG; | |
3367 | ctrl.bits.rw = SSD_I2C_CTRL_READ; | |
3368 | __ssd_i2c_reg32_write(dev->ctrlp + SSD_I2C_CTRL_REG, ctrl.val); | |
3369 | ||
3370 | st = jiffies; | |
3371 | for (;;) { | |
3372 | data.val = __ssd_i2c_reg32_read(dev->ctrlp + SSD_I2C_RDATA_REG); | |
3373 | if (data.bits.valid == 0) { | |
3374 | break; | |
3375 | } | |
3376 | ||
3377 | /* retry */ | |
3378 | if (time_after(jiffies, (st + SSD_I2C_TIMEOUT))) { | |
3379 | ret = -ETIMEDOUT; | |
3380 | goto out_clear; | |
3381 | } | |
3382 | cond_resched(); | |
3383 | } | |
3384 | ||
3385 | rbuf[off] = data.bits.rdata; | |
3386 | ||
3387 | off++; | |
3388 | } | |
3389 | ||
3390 | /* clear */ | |
3391 | out_clear: | |
3392 | if (__ssd_i2c_clear(dev, saddr)) { | |
3393 | if (!ret) ret = -4; | |
3394 | } | |
3395 | mutex_unlock(&dev->i2c_mutex); | |
3396 | ||
3397 | return ret; | |
3398 | } | |
3399 | ||
3400 | static int ssd_smbus_send_byte(struct ssd_device *dev, uint8_t saddr, uint8_t *buf) | |
3401 | { | |
3402 | int i = 0; | |
3403 | int ret = 0; | |
3404 | ||
3405 | for (;;) { | |
3406 | ret = ssd_i2c_write(dev, saddr, 1, buf); | |
3407 | if (!ret || -ETIMEDOUT == ret) { | |
3408 | break; | |
3409 | } | |
3410 | ||
3411 | i++; | |
3412 | if (i >= SSD_SMBUS_RETRY_MAX) { | |
3413 | break; | |
3414 | } | |
3415 | msleep(SSD_SMBUS_RETRY_INTERVAL); | |
3416 | } | |
3417 | ||
3418 | return ret; | |
3419 | } | |
3420 | ||
3421 | static int ssd_smbus_receive_byte(struct ssd_device *dev, uint8_t saddr, uint8_t *buf) | |
3422 | { | |
3423 | int i = 0; | |
3424 | int ret = 0; | |
3425 | ||
3426 | for (;;) { | |
3427 | ret = ssd_i2c_read(dev, saddr, 1, buf); | |
3428 | if (!ret || -ETIMEDOUT == ret) { | |
3429 | break; | |
3430 | } | |
3431 | ||
3432 | i++; | |
3433 | if (i >= SSD_SMBUS_RETRY_MAX) { | |
3434 | break; | |
3435 | } | |
3436 | msleep(SSD_SMBUS_RETRY_INTERVAL); | |
3437 | } | |
3438 | ||
3439 | return ret; | |
3440 | } | |
3441 | ||
3442 | static int ssd_smbus_write_byte(struct ssd_device *dev, uint8_t saddr, uint8_t cmd, uint8_t *buf) | |
3443 | { | |
3444 | uint8_t smb_data[SSD_SMBUS_DATA_MAX] = {0}; | |
3445 | int i = 0; | |
3446 | int ret = 0; | |
3447 | ||
3448 | smb_data[0] = cmd; | |
3449 | memcpy((smb_data + 1), buf, 1); | |
3450 | ||
3451 | for (;;) { | |
3452 | ret = ssd_i2c_write(dev, saddr, 2, smb_data); | |
3453 | if (!ret || -ETIMEDOUT == ret) { | |
3454 | break; | |
3455 | } | |
3456 | ||
3457 | i++; | |
3458 | if (i >= SSD_SMBUS_RETRY_MAX) { | |
3459 | break; | |
3460 | } | |
3461 | msleep(SSD_SMBUS_RETRY_INTERVAL); | |
3462 | } | |
3463 | ||
3464 | return ret; | |
3465 | } | |
3466 | ||
3467 | static int ssd_smbus_read_byte(struct ssd_device *dev, uint8_t saddr, uint8_t cmd, uint8_t *buf) | |
3468 | { | |
3469 | uint8_t smb_data[SSD_SMBUS_DATA_MAX] = {0}; | |
3470 | int i = 0; | |
3471 | int ret = 0; | |
3472 | ||
3473 | smb_data[0] = cmd; | |
3474 | ||
3475 | for (;;) { | |
3476 | ret = ssd_i2c_write_read(dev, saddr, 1, smb_data, 1, buf); | |
3477 | if (!ret || -ETIMEDOUT == ret) { | |
3478 | break; | |
3479 | } | |
3480 | ||
3481 | i++; | |
3482 | if (i >= SSD_SMBUS_RETRY_MAX) { | |
3483 | break; | |
3484 | } | |
3485 | msleep(SSD_SMBUS_RETRY_INTERVAL); | |
3486 | } | |
3487 | ||
3488 | return ret; | |
3489 | } | |
3490 | ||
3491 | static int ssd_smbus_write_word(struct ssd_device *dev, uint8_t saddr, uint8_t cmd, uint8_t *buf) | |
3492 | { | |
3493 | uint8_t smb_data[SSD_SMBUS_DATA_MAX] = {0}; | |
3494 | int i = 0; | |
3495 | int ret = 0; | |
3496 | ||
3497 | smb_data[0] = cmd; | |
3498 | memcpy((smb_data + 1), buf, 2); | |
3499 | ||
3500 | for (;;) { | |
3501 | ret = ssd_i2c_write(dev, saddr, 3, smb_data); | |
3502 | if (!ret || -ETIMEDOUT == ret) { | |
3503 | break; | |
3504 | } | |
3505 | ||
3506 | i++; | |
3507 | if (i >= SSD_SMBUS_RETRY_MAX) { | |
3508 | break; | |
3509 | } | |
3510 | msleep(SSD_SMBUS_RETRY_INTERVAL); | |
3511 | } | |
3512 | ||
3513 | return ret; | |
3514 | } | |
3515 | ||
3516 | static int ssd_smbus_read_word(struct ssd_device *dev, uint8_t saddr, uint8_t cmd, uint8_t *buf) | |
3517 | { | |
3518 | uint8_t smb_data[SSD_SMBUS_DATA_MAX] = {0}; | |
3519 | int i = 0; | |
3520 | int ret = 0; | |
3521 | ||
3522 | smb_data[0] = cmd; | |
3523 | ||
3524 | for (;;) { | |
3525 | ret = ssd_i2c_write_read(dev, saddr, 1, smb_data, 2, buf); | |
3526 | if (!ret || -ETIMEDOUT == ret) { | |
3527 | break; | |
3528 | } | |
3529 | ||
3530 | i++; | |
3531 | if (i >= SSD_SMBUS_RETRY_MAX) { | |
3532 | break; | |
3533 | } | |
3534 | msleep(SSD_SMBUS_RETRY_INTERVAL); | |
3535 | } | |
3536 | ||
3537 | return ret; | |
3538 | } | |
3539 | ||
3540 | static int ssd_smbus_write_block(struct ssd_device *dev, uint8_t saddr, uint8_t cmd, uint8_t size, uint8_t *buf) | |
3541 | { | |
3542 | uint8_t smb_data[SSD_SMBUS_DATA_MAX] = {0}; | |
3543 | int i = 0; | |
3544 | int ret = 0; | |
3545 | ||
3546 | smb_data[0] = cmd; | |
3547 | smb_data[1] = size; | |
3548 | memcpy((smb_data + 2), buf, size); | |
3549 | ||
3550 | for (;;) { | |
3551 | ret = ssd_i2c_write(dev, saddr, (2 + size), smb_data); | |
3552 | if (!ret || -ETIMEDOUT == ret) { | |
3553 | break; | |
3554 | } | |
3555 | ||
3556 | i++; | |
3557 | if (i >= SSD_SMBUS_RETRY_MAX) { | |
3558 | break; | |
3559 | } | |
3560 | msleep(SSD_SMBUS_RETRY_INTERVAL); | |
3561 | } | |
3562 | ||
3563 | return ret; | |
3564 | } | |
3565 | ||
3566 | static int ssd_smbus_read_block(struct ssd_device *dev, uint8_t saddr, uint8_t cmd, uint8_t size, uint8_t *buf) | |
3567 | { | |
3568 | uint8_t smb_data[SSD_SMBUS_DATA_MAX] = {0}; | |
3569 | uint8_t rsize; | |
3570 | int i = 0; | |
3571 | int ret = 0; | |
3572 | ||
3573 | smb_data[0] = cmd; | |
3574 | ||
3575 | for (;;) { | |
3576 | ret = ssd_i2c_write_read(dev, saddr, 1, smb_data, (SSD_SMBUS_BLOCK_MAX + 1), (smb_data + 1)); | |
3577 | if (!ret || -ETIMEDOUT == ret) { | |
3578 | break; | |
3579 | } | |
3580 | ||
3581 | i++; | |
3582 | if (i >= SSD_SMBUS_RETRY_MAX) { | |
3583 | break; | |
3584 | } | |
3585 | msleep(SSD_SMBUS_RETRY_INTERVAL); | |
3586 | } | |
3587 | if (ret) { | |
3588 | return ret; | |
3589 | } | |
3590 | ||
3591 | rsize = smb_data[1]; | |
3592 | ||
3593 | if (rsize > size ) { | |
3594 | rsize = size; | |
3595 | } | |
3596 | ||
3597 | memcpy(buf, (smb_data + 2), rsize); | |
3598 | ||
3599 | return 0; | |
3600 | } | |
3601 | ||
3602 | ||
3603 | static int ssd_gen_swlog(struct ssd_device *dev, uint16_t event, uint32_t data); | |
3604 | ||
3605 | /* sensor */ | |
3606 | static int ssd_init_lm75(struct ssd_device *dev, uint8_t saddr) | |
3607 | { | |
3608 | uint8_t conf = 0; | |
3609 | int ret = 0; | |
3610 | ||
3611 | ret = ssd_smbus_read_byte(dev, saddr, SSD_LM75_REG_CONF, &conf); | |
3612 | if (ret) { | |
3613 | goto out; | |
3614 | } | |
3615 | ||
3616 | conf &= (uint8_t)(~1u); | |
3617 | ||
3618 | ret = ssd_smbus_write_byte(dev, saddr, SSD_LM75_REG_CONF, &conf); | |
3619 | if (ret) { | |
3620 | goto out; | |
3621 | } | |
3622 | ||
3623 | out: | |
3624 | return ret; | |
3625 | } | |
3626 | ||
3627 | static int ssd_lm75_read(struct ssd_device *dev, uint8_t saddr, uint16_t *data) | |
3628 | { | |
3629 | uint16_t val = 0; | |
3630 | int ret; | |
3631 | ||
3632 | ret = ssd_smbus_read_word(dev, saddr, SSD_LM75_REG_TEMP, (uint8_t *)&val); | |
3633 | if (ret) { | |
3634 | return ret; | |
3635 | } | |
3636 | ||
3637 | *data = u16_swap(val); | |
3638 | ||
3639 | return 0; | |
3640 | } | |
3641 | ||
3642 | static int ssd_init_lm80(struct ssd_device *dev, uint8_t saddr) | |
3643 | { | |
3644 | uint8_t val; | |
3645 | uint8_t low, high; | |
3646 | int i; | |
3647 | int ret = 0; | |
3648 | ||
3649 | /* init */ | |
3650 | val = 0x80; | |
3651 | ret = ssd_smbus_write_byte(dev, saddr, SSD_LM80_REG_CONFIG, &val); | |
3652 | if (ret) { | |
3653 | goto out; | |
3654 | } | |
3655 | ||
3656 | /* 11-bit temp */ | |
3657 | val = 0x08; | |
3658 | ret = ssd_smbus_write_byte(dev, saddr, SSD_LM80_REG_RES, &val); | |
3659 | if (ret) { | |
3660 | goto out; | |
3661 | } | |
3662 | ||
3663 | /* set volt limit */ | |
3664 | for (i=0; i<SSD_LM80_IN_NR; i++) { | |
3665 | high = ssd_lm80_limit[i].high; | |
3666 | low = ssd_lm80_limit[i].low; | |
3667 | ||
3668 | if (SSD_LM80_IN_CAP == i) { | |
3669 | low = 0; | |
3670 | } | |
3671 | ||
3672 | if (dev->hw_info.nr_ctrl <= 1 && SSD_LM80_IN_1V2 == i) { | |
3673 | high = 0xFF; | |
3674 | low = 0; | |
3675 | } | |
3676 | ||
3677 | /* high limit */ | |
3678 | ret = ssd_smbus_write_byte(dev, saddr, SSD_LM80_REG_IN_MAX(i), &high); | |
3679 | if (ret) { | |
3680 | goto out; | |
3681 | } | |
3682 | ||
3683 | /* low limit*/ | |
3684 | ret = ssd_smbus_write_byte(dev, saddr, SSD_LM80_REG_IN_MIN(i), &low); | |
3685 | if (ret) { | |
3686 | goto out; | |
3687 | } | |
3688 | } | |
3689 | ||
3690 | /* set interrupt mask: allow volt in interrupt except cap in*/ | |
3691 | val = 0x81; | |
3692 | ret = ssd_smbus_write_byte(dev, saddr, SSD_LM80_REG_MASK1, &val); | |
3693 | if (ret) { | |
3694 | goto out; | |
3695 | } | |
3696 | ||
3697 | /* set interrupt mask: disable others */ | |
3698 | val = 0xFF; | |
3699 | ret = ssd_smbus_write_byte(dev, saddr, SSD_LM80_REG_MASK2, &val); | |
3700 | if (ret) { | |
3701 | goto out; | |
3702 | } | |
3703 | ||
3704 | /* start */ | |
3705 | val = 0x03; | |
3706 | ret = ssd_smbus_write_byte(dev, saddr, SSD_LM80_REG_CONFIG, &val); | |
3707 | if (ret) { | |
3708 | goto out; | |
3709 | } | |
3710 | ||
3711 | out: | |
3712 | return ret; | |
3713 | } | |
3714 | ||
3715 | static int ssd_lm80_enable_in(struct ssd_device *dev, uint8_t saddr, int idx) | |
3716 | { | |
3717 | uint8_t val = 0; | |
3718 | int ret = 0; | |
3719 | ||
3720 | if (idx >= SSD_LM80_IN_NR || idx < 0) { | |
3721 | return -EINVAL; | |
3722 | } | |
3723 | ||
3724 | ret = ssd_smbus_read_byte(dev, saddr, SSD_LM80_REG_MASK1, &val); | |
3725 | if (ret) { | |
3726 | goto out; | |
3727 | } | |
3728 | ||
3729 | val &= ~(1UL << (uint32_t)idx); | |
3730 | ||
3731 | ret = ssd_smbus_write_byte(dev, saddr, SSD_LM80_REG_MASK1, &val); | |
3732 | if (ret) { | |
3733 | goto out; | |
3734 | } | |
3735 | ||
3736 | out: | |
3737 | return ret; | |
3738 | } | |
3739 | ||
3740 | static int ssd_lm80_disable_in(struct ssd_device *dev, uint8_t saddr, int idx) | |
3741 | { | |
3742 | uint8_t val = 0; | |
3743 | int ret = 0; | |
3744 | ||
3745 | if (idx >= SSD_LM80_IN_NR || idx < 0) { | |
3746 | return -EINVAL; | |
3747 | } | |
3748 | ||
3749 | ret = ssd_smbus_read_byte(dev, saddr, SSD_LM80_REG_MASK1, &val); | |
3750 | if (ret) { | |
3751 | goto out; | |
3752 | } | |
3753 | ||
3754 | val |= (1UL << (uint32_t)idx); | |
3755 | ||
3756 | ret = ssd_smbus_write_byte(dev, saddr, SSD_LM80_REG_MASK1, &val); | |
3757 | if (ret) { | |
3758 | goto out; | |
3759 | } | |
3760 | ||
3761 | out: | |
3762 | return ret; | |
3763 | } | |
3764 | ||
3765 | static int ssd_lm80_read_temp(struct ssd_device *dev, uint8_t saddr, uint16_t *data) | |
3766 | { | |
3767 | uint16_t val = 0; | |
3768 | int ret; | |
3769 | ||
3770 | ret = ssd_smbus_read_word(dev, saddr, SSD_LM80_REG_TEMP, (uint8_t *)&val); | |
3771 | if (ret) { | |
3772 | return ret; | |
3773 | } | |
3774 | ||
3775 | *data = u16_swap(val); | |
3776 | ||
3777 | return 0; | |
3778 | } | |
da3355df SF |
3779 | static int ssd_generate_sensor_fault_log(struct ssd_device *dev, uint16_t event, uint8_t addr,uint32_t ret) |
3780 | { | |
3781 | uint32_t data; | |
3782 | data = ((ret & 0xffff) << 16) | (addr << 8) | addr; | |
3783 | ssd_gen_swlog(dev,event,data); | |
3784 | return 0; | |
3785 | } | |
361ebed5 HSDT |
3786 | static int ssd_lm80_check_event(struct ssd_device *dev, uint8_t saddr) |
3787 | { | |
3788 | uint32_t volt; | |
3789 | uint16_t val = 0, status; | |
3790 | uint8_t alarm1 = 0, alarm2 = 0; | |
1197134c KM |
3791 | uint32_t low, high; |
3792 | int i,j=0; | |
361ebed5 HSDT |
3793 | int ret = 0; |
3794 | ||
3795 | /* read interrupt status to clear interrupt */ | |
3796 | ret = ssd_smbus_read_byte(dev, saddr, SSD_LM80_REG_ALARM1, &alarm1); | |
3797 | if (ret) { | |
3798 | goto out; | |
3799 | } | |
3800 | ||
3801 | ret = ssd_smbus_read_byte(dev, saddr, SSD_LM80_REG_ALARM2, &alarm2); | |
3802 | if (ret) { | |
3803 | goto out; | |
3804 | } | |
3805 | ||
3806 | status = (uint16_t)alarm1 | ((uint16_t)alarm2 << 8); | |
3807 | ||
3808 | /* parse inetrrupt status */ | |
3809 | for (i=0; i<SSD_LM80_IN_NR; i++) { | |
3810 | if (!((status >> (uint32_t)i) & 0x1)) { | |
3811 | if (test_and_clear_bit(SSD_HWMON_LM80(i), &dev->hwmon)) { | |
3812 | /* enable INx irq */ | |
3813 | ret = ssd_lm80_enable_in(dev, saddr, i); | |
3814 | if (ret) { | |
3815 | goto out; | |
3816 | } | |
3817 | } | |
3818 | ||
3819 | continue; | |
3820 | } | |
3821 | ||
3822 | /* disable INx irq */ | |
3823 | ret = ssd_lm80_disable_in(dev, saddr, i); | |
3824 | if (ret) { | |
3825 | goto out; | |
3826 | } | |
3827 | ||
3828 | if (test_and_set_bit(SSD_HWMON_LM80(i), &dev->hwmon)) { | |
3829 | continue; | |
3830 | } | |
3831 | ||
1197134c KM |
3832 | high = (uint32_t)ssd_lm80_limit[i].high * (uint32_t)10; |
3833 | low = (uint32_t)ssd_lm80_limit[i].low * (uint32_t)10; | |
3834 | ||
3835 | for (j=0; j<3; j++) { | |
3836 | ret = ssd_smbus_read_word(dev, saddr, SSD_LM80_REG_IN(i), (uint8_t *)&val); | |
3837 | if (ret) { | |
3838 | goto out; | |
3839 | } | |
3840 | volt = SSD_LM80_CONVERT_VOLT(u16_swap(val)); | |
3841 | if ((volt>high) || (volt<=low)) { | |
3842 | if(j<2) { | |
3843 | msleep(SSD_LM80_CONV_INTERVAL); | |
3844 | } | |
3845 | } else { | |
3846 | break; | |
3847 | } | |
361ebed5 HSDT |
3848 | } |
3849 | ||
1197134c KM |
3850 | if (j<3) { |
3851 | continue; | |
3852 | } | |
361ebed5 HSDT |
3853 | |
3854 | switch (i) { | |
3855 | case SSD_LM80_IN_CAP: { | |
3856 | if (0 == volt) { | |
3857 | ssd_gen_swlog(dev, SSD_LOG_CAP_SHORT_CIRCUIT, 0); | |
3858 | } else { | |
3859 | ssd_gen_swlog(dev, SSD_LOG_CAP_VOLT_FAULT, SSD_PL_CAP_VOLT(volt)); | |
3860 | } | |
3861 | break; | |
3862 | } | |
3863 | ||
3864 | case SSD_LM80_IN_1V2: | |
3865 | case SSD_LM80_IN_1V2a: | |
3866 | case SSD_LM80_IN_1V5: | |
3867 | case SSD_LM80_IN_1V8: { | |
3868 | ssd_gen_swlog(dev, SSD_LOG_VOLT_STATUS, SSD_VOLT_LOG_DATA(i, 0, volt)); | |
3869 | break; | |
3870 | } | |
3871 | case SSD_LM80_IN_FPGA_3V3: | |
3872 | case SSD_LM80_IN_3V3: { | |
3873 | ssd_gen_swlog(dev, SSD_LOG_VOLT_STATUS, SSD_VOLT_LOG_DATA(i, 0, SSD_LM80_3V3_VOLT(volt))); | |
3874 | break; | |
3875 | } | |
3876 | default: | |
3877 | break; | |
3878 | } | |
3879 | } | |
3880 | ||
3881 | out: | |
3882 | if (ret) { | |
3883 | if (!test_and_set_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM80), &dev->hwmon)) { | |
da3355df | 3884 | ssd_generate_sensor_fault_log(dev, SSD_LOG_SENSOR_FAULT, (uint32_t)saddr,ret); |
361ebed5 HSDT |
3885 | } |
3886 | } else { | |
3887 | test_and_clear_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM80), &dev->hwmon); | |
3888 | } | |
3889 | return ret; | |
3890 | } | |
3891 | ||
da3355df | 3892 | |
361ebed5 HSDT |
3893 | static int ssd_init_sensor(struct ssd_device *dev) |
3894 | { | |
3895 | int ret = 0; | |
3896 | ||
3897 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_2) { | |
3898 | goto out; | |
3899 | } | |
3900 | ||
3901 | ret = ssd_init_lm75(dev, SSD_SENSOR_LM75_SADDRESS); | |
3902 | if (ret) { | |
3903 | hio_warn("%s: init lm75 failed\n", dev->name); | |
3904 | if (!test_and_set_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM75), &dev->hwmon)) { | |
da3355df | 3905 | ssd_generate_sensor_fault_log(dev, SSD_LOG_SENSOR_FAULT, SSD_SENSOR_LM75_SADDRESS,ret); |
361ebed5 HSDT |
3906 | } |
3907 | goto out; | |
3908 | } | |
3909 | ||
3910 | if (dev->hw_info.pcb_ver >= 'B' || dev->hw_info_ext.form_factor == SSD_FORM_FACTOR_HHHL) { | |
3911 | ret = ssd_init_lm80(dev, SSD_SENSOR_LM80_SADDRESS); | |
3912 | if (ret) { | |
3913 | hio_warn("%s: init lm80 failed\n", dev->name); | |
3914 | if (!test_and_set_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM80), &dev->hwmon)) { | |
da3355df | 3915 | ssd_generate_sensor_fault_log(dev, SSD_LOG_SENSOR_FAULT, SSD_SENSOR_LM80_SADDRESS,ret); |
361ebed5 HSDT |
3916 | } |
3917 | goto out; | |
3918 | } | |
3919 | } | |
3920 | ||
3921 | out: | |
3922 | /* skip error if not in standard mode */ | |
3923 | if (mode != SSD_DRV_MODE_STANDARD) { | |
3924 | ret = 0; | |
3925 | } | |
3926 | return ret; | |
3927 | } | |
3928 | ||
3929 | /* board volt */ | |
3930 | static int ssd_mon_boardvolt(struct ssd_device *dev) | |
3931 | { | |
3932 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_2) { | |
3933 | return 0; | |
3934 | } | |
3935 | ||
3936 | if (dev->hw_info_ext.form_factor == SSD_FORM_FACTOR_FHHL && dev->hw_info.pcb_ver < 'B') { | |
3937 | return 0; | |
3938 | } | |
3939 | ||
3940 | return ssd_lm80_check_event(dev, SSD_SENSOR_LM80_SADDRESS); | |
3941 | } | |
3942 | ||
3943 | /* temperature */ | |
3944 | static int ssd_mon_temp(struct ssd_device *dev) | |
3945 | { | |
3946 | int cur; | |
3947 | uint16_t val = 0; | |
3948 | int ret = 0; | |
3949 | ||
3950 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_2) { | |
3951 | return 0; | |
3952 | } | |
3953 | ||
3954 | if (dev->hw_info_ext.form_factor == SSD_FORM_FACTOR_FHHL && dev->hw_info.pcb_ver < 'B') { | |
3955 | return 0; | |
3956 | } | |
3957 | ||
3958 | /* inlet */ | |
3959 | ret = ssd_lm80_read_temp(dev, SSD_SENSOR_LM80_SADDRESS, &val); | |
3960 | if (ret) { | |
3961 | if (!test_and_set_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM80), &dev->hwmon)) { | |
da3355df | 3962 | ssd_generate_sensor_fault_log(dev, SSD_LOG_TEMP_SENSOR_EVENT, SSD_SENSOR_LM80_SADDRESS,ret); |
361ebed5 HSDT |
3963 | } |
3964 | goto out; | |
3965 | } | |
3966 | test_and_clear_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM80), &dev->hwmon); | |
3967 | ||
3968 | cur = SSD_SENSOR_CONVERT_TEMP(val); | |
3969 | if (cur >= SSD_INLET_OT_TEMP) { | |
3970 | if (!test_and_set_bit(SSD_HWMON_TEMP(SSD_TEMP_INLET), &dev->hwmon)) { | |
3971 | ssd_gen_swlog(dev, SSD_LOG_INLET_OVER_TEMP, (uint32_t)cur); | |
3972 | } | |
3973 | } else if(cur < SSD_INLET_OT_HYST) { | |
3974 | if (test_and_clear_bit(SSD_HWMON_TEMP(SSD_TEMP_INLET), &dev->hwmon)) { | |
3975 | ssd_gen_swlog(dev, SSD_LOG_INLET_NORMAL_TEMP, (uint32_t)cur); | |
3976 | } | |
3977 | } | |
3978 | ||
3979 | /* flash */ | |
3980 | ret = ssd_lm75_read(dev, SSD_SENSOR_LM75_SADDRESS, &val); | |
3981 | if (ret) { | |
3982 | if (!test_and_set_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM75), &dev->hwmon)) { | |
da3355df | 3983 | ssd_generate_sensor_fault_log(dev, SSD_LOG_TEMP_SENSOR_EVENT, SSD_SENSOR_LM75_SADDRESS,ret); |
361ebed5 HSDT |
3984 | } |
3985 | goto out; | |
3986 | } | |
3987 | test_and_clear_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM75), &dev->hwmon); | |
3988 | ||
3989 | cur = SSD_SENSOR_CONVERT_TEMP(val); | |
3990 | if (cur >= SSD_FLASH_OT_TEMP) { | |
3991 | if (!test_and_set_bit(SSD_HWMON_TEMP(SSD_TEMP_FLASH), &dev->hwmon)) { | |
3992 | ssd_gen_swlog(dev, SSD_LOG_FLASH_OVER_TEMP, (uint32_t)cur); | |
3993 | } | |
3994 | } else if(cur < SSD_FLASH_OT_HYST) { | |
3995 | if (test_and_clear_bit(SSD_HWMON_TEMP(SSD_TEMP_FLASH), &dev->hwmon)) { | |
3996 | ssd_gen_swlog(dev, SSD_LOG_FLASH_NORMAL_TEMP, (uint32_t)cur); | |
3997 | } | |
3998 | } | |
3999 | ||
4000 | out: | |
4001 | return ret; | |
4002 | } | |
4003 | ||
4004 | /* cmd tag */ | |
4005 | static inline void ssd_put_tag(struct ssd_device *dev, int tag) | |
4006 | { | |
4007 | test_and_clear_bit(tag, dev->tag_map); | |
4008 | wake_up(&dev->tag_wq); | |
4009 | } | |
4010 | ||
4011 | static inline int ssd_get_tag(struct ssd_device *dev, int wait) | |
4012 | { | |
4013 | int tag; | |
4014 | ||
4015 | find_tag: | |
4016 | while ((tag = find_first_zero_bit(dev->tag_map, dev->hw_info.cmd_fifo_sz)) >= atomic_read(&dev->queue_depth)) { | |
4017 | DEFINE_WAIT(__wait); | |
4018 | ||
4019 | if (!wait) { | |
4020 | return -1; | |
4021 | } | |
4022 | ||
4023 | prepare_to_wait_exclusive(&dev->tag_wq, &__wait, TASK_UNINTERRUPTIBLE); | |
4024 | schedule(); | |
4025 | ||
4026 | finish_wait(&dev->tag_wq, &__wait); | |
4027 | } | |
4028 | ||
4029 | if (test_and_set_bit(tag, dev->tag_map)) { | |
4030 | goto find_tag; | |
4031 | } | |
4032 | ||
4033 | return tag; | |
4034 | } | |
4035 | ||
4036 | static void ssd_barrier_put_tag(struct ssd_device *dev, int tag) | |
4037 | { | |
4038 | test_and_clear_bit(tag, dev->tag_map); | |
4039 | } | |
4040 | ||
4041 | static int ssd_barrier_get_tag(struct ssd_device *dev) | |
4042 | { | |
4043 | int tag = 0; | |
4044 | ||
4045 | if (test_and_set_bit(tag, dev->tag_map)) { | |
4046 | return -1; | |
4047 | } | |
4048 | ||
4049 | return tag; | |
4050 | } | |
4051 | ||
4052 | static void ssd_barrier_end(struct ssd_device *dev) | |
4053 | { | |
4054 | atomic_set(&dev->queue_depth, dev->hw_info.cmd_fifo_sz); | |
4055 | wake_up_all(&dev->tag_wq); | |
4056 | ||
4057 | mutex_unlock(&dev->barrier_mutex); | |
4058 | } | |
4059 | ||
4060 | static int ssd_barrier_start(struct ssd_device *dev) | |
4061 | { | |
4062 | int i; | |
4063 | ||
4064 | mutex_lock(&dev->barrier_mutex); | |
4065 | ||
4066 | atomic_set(&dev->queue_depth, 0); | |
4067 | ||
4068 | for (i=0; i<SSD_CMD_TIMEOUT; i++) { | |
4069 | if (find_first_bit(dev->tag_map, dev->hw_info.cmd_fifo_sz) >= dev->hw_info.cmd_fifo_sz) { | |
4070 | return 0; | |
4071 | } | |
4072 | ||
4073 | __set_current_state(TASK_INTERRUPTIBLE); | |
4074 | schedule_timeout(1); | |
4075 | } | |
4076 | ||
4077 | atomic_set(&dev->queue_depth, dev->hw_info.cmd_fifo_sz); | |
4078 | wake_up_all(&dev->tag_wq); | |
4079 | ||
4080 | mutex_unlock(&dev->barrier_mutex); | |
4081 | ||
4082 | return -EBUSY; | |
4083 | } | |
4084 | ||
4085 | static int ssd_busy(struct ssd_device *dev) | |
4086 | { | |
4087 | if (find_first_bit(dev->tag_map, dev->hw_info.cmd_fifo_sz) >= dev->hw_info.cmd_fifo_sz) { | |
4088 | return 0; | |
4089 | } | |
4090 | ||
4091 | return 1; | |
4092 | } | |
4093 | ||
4094 | static int ssd_wait_io(struct ssd_device *dev) | |
4095 | { | |
4096 | int i; | |
4097 | ||
4098 | for (i=0; i<SSD_CMD_TIMEOUT; i++) { | |
4099 | if (find_first_bit(dev->tag_map, dev->hw_info.cmd_fifo_sz) >= dev->hw_info.cmd_fifo_sz) { | |
4100 | return 0; | |
4101 | } | |
4102 | ||
4103 | __set_current_state(TASK_INTERRUPTIBLE); | |
4104 | schedule_timeout(1); | |
4105 | } | |
4106 | ||
4107 | return -EBUSY; | |
4108 | } | |
4109 | ||
4110 | #if 0 | |
4111 | static int ssd_in_barrier(struct ssd_device *dev) | |
4112 | { | |
4113 | return (0 == atomic_read(&dev->queue_depth)); | |
4114 | } | |
4115 | #endif | |
4116 | ||
4117 | static void ssd_cleanup_tag(struct ssd_device *dev) | |
4118 | { | |
4119 | kfree(dev->tag_map); | |
4120 | } | |
4121 | ||
4122 | static int ssd_init_tag(struct ssd_device *dev) | |
4123 | { | |
4124 | int nr_ulongs = ALIGN(dev->hw_info.cmd_fifo_sz, BITS_PER_LONG) / BITS_PER_LONG; | |
4125 | ||
4126 | mutex_init(&dev->barrier_mutex); | |
4127 | ||
4128 | atomic_set(&dev->queue_depth, dev->hw_info.cmd_fifo_sz); | |
4129 | ||
4130 | dev->tag_map = kmalloc(nr_ulongs * sizeof(unsigned long), GFP_ATOMIC); | |
4131 | if (!dev->tag_map) { | |
4132 | return -ENOMEM; | |
4133 | } | |
4134 | ||
4135 | memset(dev->tag_map, 0, nr_ulongs * sizeof(unsigned long)); | |
4136 | ||
4137 | init_waitqueue_head(&dev->tag_wq); | |
4138 | ||
4139 | return 0; | |
4140 | } | |
4141 | ||
4142 | /* io stat */ | |
4143 | static void ssd_end_io_acct(struct ssd_cmd *cmd) | |
4144 | { | |
4145 | struct ssd_device *dev = cmd->dev; | |
4146 | struct bio *bio = cmd->bio; | |
4147 | unsigned long dur = jiffies - cmd->start_time; | |
4148 | int rw = bio_data_dir(bio); | |
da3355df SF |
4149 | #if ((LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,0)) || (defined RHEL_MAJOR && RHEL_MAJOR == 6 && RHEL_MINOR >= 7)) |
4150 | #else | |
4151 | unsigned long flag; | |
4152 | #endif | |
4153 | ||
345cefb5 SF |
4154 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,8,0)) |
4155 | bio_end_io_acct(bio, cmd->start_time); | |
4156 | #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(4,14,0)) | |
b49bd764 SF |
4157 | struct hd_struct *part = disk_map_sector_rcu(dev->gd, bio_start(bio)); |
4158 | generic_end_io_acct(dev->rq, rw, part, cmd->start_time); | |
4159 | #elif ((LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,0)) || (defined RHEL_MAJOR && RHEL_MAJOR == 6 && RHEL_MINOR >= 7)) | |
361ebed5 HSDT |
4160 | int cpu = part_stat_lock(); |
4161 | struct hd_struct *part = disk_map_sector_rcu(dev->gd, bio_start(bio)); | |
4162 | part_round_stats(cpu, part); | |
4163 | part_stat_add(cpu, part, ticks[rw], dur); | |
4164 | part_dec_in_flight(part, rw); | |
4165 | part_stat_unlock(); | |
4166 | #elif (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,27)) | |
4167 | int cpu = part_stat_lock(); | |
4168 | struct hd_struct *part = &dev->gd->part0; | |
4169 | part_round_stats(cpu, part); | |
4170 | part_stat_add(cpu, part, ticks[rw], dur); | |
da3355df SF |
4171 | |
4172 | spin_lock_irqsave(&dev->in_flight_lock,flag); | |
4173 | part->in_flight[rw]--; | |
4174 | spin_unlock_irqrestore(&dev->in_flight_lock,flag); | |
4175 | ||
361ebed5 | 4176 | part_stat_unlock(); |
da3355df | 4177 | |
361ebed5 HSDT |
4178 | #elif (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,14)) |
4179 | preempt_disable(); | |
4180 | disk_round_stats(dev->gd); | |
361ebed5 | 4181 | disk_stat_add(dev->gd, ticks[rw], dur); |
da3355df SF |
4182 | |
4183 | spin_lock_irqsave(&dev->in_flight_lock,flag); | |
4184 | dev->gd->in_flight--; | |
4185 | spin_unlock_irqrestore(&dev->in_flight_lock,flag); | |
4186 | ||
4187 | preempt_enable(); | |
4188 | ||
361ebed5 HSDT |
4189 | #else |
4190 | preempt_disable(); | |
4191 | disk_round_stats(dev->gd); | |
361ebed5 HSDT |
4192 | if (rw == WRITE) { |
4193 | disk_stat_add(dev->gd, write_ticks, dur); | |
4194 | } else { | |
4195 | disk_stat_add(dev->gd, read_ticks, dur); | |
4196 | } | |
da3355df SF |
4197 | spin_lock_irqsave(&dev->in_flight_lock,flag); |
4198 | dev->gd->in_flight--; | |
4199 | spin_unlock_irqrestore(&dev->in_flight_lock,flag); | |
4200 | ||
4201 | preempt_enable(); | |
4202 | ||
361ebed5 HSDT |
4203 | #endif |
4204 | } | |
4205 | ||
4206 | static void ssd_start_io_acct(struct ssd_cmd *cmd) | |
4207 | { | |
4208 | struct ssd_device *dev = cmd->dev; | |
4209 | struct bio *bio = cmd->bio; | |
4210 | int rw = bio_data_dir(bio); | |
da3355df SF |
4211 | #if ((LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,0)) || (defined RHEL_MAJOR && RHEL_MAJOR == 6 && RHEL_MINOR >= 7)) |
4212 | #else | |
4213 | unsigned long flag; | |
4214 | #endif | |
361ebed5 | 4215 | |
345cefb5 SF |
4216 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,8,0)) |
4217 | cmd->start_time = bio_start_io_acct(bio); | |
4218 | #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(4,14,0)) | |
b49bd764 SF |
4219 | struct hd_struct *part = disk_map_sector_rcu(dev->gd, bio_start(bio)); |
4220 | generic_start_io_acct(dev->rq, rw, bio_sectors(bio), part); | |
345cefb5 | 4221 | cmd->start_time = jiffies; |
b49bd764 | 4222 | #elif ((LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,0)) || (defined RHEL_MAJOR && RHEL_MAJOR == 6 && RHEL_MINOR >= 7)) |
361ebed5 HSDT |
4223 | int cpu = part_stat_lock(); |
4224 | struct hd_struct *part = disk_map_sector_rcu(dev->gd, bio_start(bio)); | |
4225 | part_round_stats(cpu, part); | |
4226 | part_stat_inc(cpu, part, ios[rw]); | |
4227 | part_stat_add(cpu, part, sectors[rw], bio_sectors(bio)); | |
4228 | part_inc_in_flight(part, rw); | |
4229 | part_stat_unlock(); | |
345cefb5 | 4230 | cmd->start_time = jiffies; |
361ebed5 HSDT |
4231 | #elif (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,27)) |
4232 | int cpu = part_stat_lock(); | |
4233 | struct hd_struct *part = &dev->gd->part0; | |
4234 | part_round_stats(cpu, part); | |
4235 | part_stat_inc(cpu, part, ios[rw]); | |
4236 | part_stat_add(cpu, part, sectors[rw], bio_sectors(bio)); | |
da3355df SF |
4237 | |
4238 | spin_lock_irqsave(&dev->in_flight_lock,flag); | |
4239 | part->in_flight[rw]++; | |
4240 | spin_unlock_irqrestore(&dev->in_flight_lock,flag); | |
4241 | ||
361ebed5 | 4242 | part_stat_unlock(); |
345cefb5 | 4243 | cmd->start_time = jiffies; |
da3355df | 4244 | |
361ebed5 HSDT |
4245 | #elif (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,14)) |
4246 | preempt_disable(); | |
4247 | disk_round_stats(dev->gd); | |
361ebed5 HSDT |
4248 | disk_stat_inc(dev->gd, ios[rw]); |
4249 | disk_stat_add(dev->gd, sectors[rw], bio_sectors(bio)); | |
da3355df SF |
4250 | |
4251 | spin_lock_irqsave(&dev->in_flight_lock,flag); | |
4252 | dev->gd->in_flight++; | |
4253 | spin_unlock_irqrestore(&dev->in_flight_lock,flag); | |
4254 | ||
4255 | preempt_enable(); | |
345cefb5 | 4256 | cmd->start_time = jiffies; |
361ebed5 HSDT |
4257 | #else |
4258 | preempt_disable(); | |
4259 | disk_round_stats(dev->gd); | |
361ebed5 HSDT |
4260 | if (rw == WRITE) { |
4261 | disk_stat_inc(dev->gd, writes); | |
4262 | disk_stat_add(dev->gd, write_sectors, bio_sectors(bio)); | |
4263 | } else { | |
4264 | disk_stat_inc(dev->gd, reads); | |
4265 | disk_stat_add(dev->gd, read_sectors, bio_sectors(bio)); | |
4266 | } | |
da3355df SF |
4267 | |
4268 | spin_lock_irqsave(&dev->in_flight_lock,flag); | |
4269 | dev->gd->in_flight++; | |
4270 | spin_unlock_irqrestore(&dev->in_flight_lock,flag); | |
4271 | ||
4272 | preempt_enable(); | |
361ebed5 | 4273 | cmd->start_time = jiffies; |
345cefb5 | 4274 | #endif |
361ebed5 HSDT |
4275 | } |
4276 | ||
4277 | /* io */ | |
4278 | static void ssd_queue_bio(struct ssd_device *dev, struct bio *bio) | |
4279 | { | |
4280 | spin_lock(&dev->sendq_lock); | |
4281 | ssd_blist_add(&dev->sendq, bio); | |
4282 | spin_unlock(&dev->sendq_lock); | |
4283 | ||
4284 | atomic_inc(&dev->in_sendq); | |
4285 | wake_up(&dev->send_waitq); | |
4286 | } | |
4287 | ||
4288 | static inline void ssd_end_request(struct ssd_cmd *cmd) | |
4289 | { | |
4290 | struct ssd_device *dev = cmd->dev; | |
4291 | struct bio *bio = cmd->bio; | |
4292 | int errors = cmd->errors; | |
4293 | int tag = cmd->tag; | |
4294 | ||
4295 | if (bio) { | |
1197134c | 4296 | if (!ssd_bio_has_discard(bio)) { |
361ebed5 HSDT |
4297 | ssd_end_io_acct(cmd); |
4298 | if (!cmd->flag) { | |
4299 | pci_unmap_sg(dev->pdev, cmd->sgl, cmd->nsegs, | |
4300 | bio_data_dir(bio) == READ ? PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE); | |
4301 | } | |
4302 | } | |
361ebed5 HSDT |
4303 | |
4304 | cmd->bio = NULL; | |
4305 | ssd_put_tag(dev, tag); | |
4306 | ||
4307 | if (SSD_INT_MSIX == dev->int_mode || tag < 16 || errors) { | |
1197134c | 4308 | ssd_bio_endio(bio, errors); |
361ebed5 HSDT |
4309 | } else /* if (bio->bi_idx >= bio->bi_vcnt)*/ { |
4310 | spin_lock(&dev->doneq_lock); | |
4311 | ssd_blist_add(&dev->doneq, bio); | |
4312 | spin_unlock(&dev->doneq_lock); | |
4313 | ||
4314 | atomic_inc(&dev->in_doneq); | |
4315 | wake_up(&dev->done_waitq); | |
4316 | } | |
4317 | } else { | |
4318 | if (cmd->waiting) { | |
4319 | complete(cmd->waiting); | |
4320 | } | |
4321 | } | |
4322 | } | |
4323 | ||
4324 | static void ssd_end_timeout_request(struct ssd_cmd *cmd) | |
4325 | { | |
4326 | struct ssd_device *dev = cmd->dev; | |
4327 | struct ssd_rw_msg *msg = (struct ssd_rw_msg *)cmd->msg; | |
4328 | int i; | |
4329 | ||
4330 | for (i=0; i<dev->nr_queue; i++) { | |
b44043bd | 4331 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0)) |
361ebed5 | 4332 | disable_irq(dev->entry[i].vector); |
b44043bd SF |
4333 | #else |
4334 | disable_irq(pci_irq_vector(dev->pdev, i)); | |
4335 | #endif | |
361ebed5 HSDT |
4336 | } |
4337 | ||
4338 | atomic_inc(&dev->tocnt); | |
4339 | //if (cmd->bio) { | |
4340 | hio_err("%s: cmd timeout: tag %d fun %#x\n", dev->name, msg->tag, msg->fun); | |
4341 | cmd->errors = -ETIMEDOUT; | |
4342 | ssd_end_request(cmd); | |
4343 | //} | |
4344 | ||
4345 | for (i=0; i<dev->nr_queue; i++) { | |
b44043bd | 4346 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0)) |
361ebed5 | 4347 | enable_irq(dev->entry[i].vector); |
b44043bd SF |
4348 | #else |
4349 | enable_irq(pci_irq_vector(dev->pdev, i)); | |
4350 | #endif | |
361ebed5 HSDT |
4351 | } |
4352 | ||
4353 | /* alarm led */ | |
4354 | ssd_set_alarm(dev); | |
4355 | } | |
4356 | ||
4357 | /* cmd timer */ | |
7e9f9829 | 4358 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(4,15,0)) |
361ebed5 | 4359 | static void ssd_cmd_add_timer(struct ssd_cmd *cmd, int timeout, void (*complt)(struct ssd_cmd *)) |
7e9f9829 SF |
4360 | #else |
4361 | static void ssd_cmd_add_timer(struct ssd_cmd *cmd, int timeout, void (*complt)(struct timer_list *)) | |
4362 | #endif | |
361ebed5 | 4363 | { |
7e9f9829 | 4364 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(4,15,0)) |
361ebed5 HSDT |
4365 | init_timer(&cmd->cmd_timer); |
4366 | ||
4367 | cmd->cmd_timer.data = (unsigned long)cmd; | |
361ebed5 | 4368 | cmd->cmd_timer.function = (void (*)(unsigned long)) complt; |
7e9f9829 SF |
4369 | #else |
4370 | timer_setup(&cmd->cmd_timer, complt, 0); | |
4371 | #endif | |
361ebed5 | 4372 | |
7e9f9829 | 4373 | cmd->cmd_timer.expires = jiffies + timeout; |
361ebed5 HSDT |
4374 | add_timer(&cmd->cmd_timer); |
4375 | } | |
4376 | ||
4377 | static int ssd_cmd_del_timer(struct ssd_cmd *cmd) | |
4378 | { | |
4379 | return del_timer(&cmd->cmd_timer); | |
4380 | } | |
4381 | ||
7e9f9829 | 4382 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(4,15,0)) |
361ebed5 | 4383 | static void ssd_add_timer(struct timer_list *timer, int timeout, void (*complt)(void *), void *data) |
7e9f9829 SF |
4384 | #else |
4385 | static void ssd_add_timer(struct timer_list *timer, int timeout, void (*complt)(struct timer_list *), void *data) | |
4386 | #endif | |
361ebed5 | 4387 | { |
7e9f9829 | 4388 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(4,15,0)) |
361ebed5 HSDT |
4389 | init_timer(timer); |
4390 | ||
4391 | timer->data = (unsigned long)data; | |
361ebed5 | 4392 | timer->function = (void (*)(unsigned long)) complt; |
7e9f9829 SF |
4393 | #else |
4394 | timer_setup(timer, complt, 0); | |
4395 | #endif | |
361ebed5 | 4396 | |
7e9f9829 | 4397 | timer->expires = jiffies + timeout; |
361ebed5 HSDT |
4398 | add_timer(timer); |
4399 | } | |
4400 | ||
4401 | static int ssd_del_timer(struct timer_list *timer) | |
4402 | { | |
4403 | return del_timer(timer); | |
4404 | } | |
4405 | ||
7e9f9829 | 4406 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(4,15,0)) |
361ebed5 | 4407 | static void ssd_cmd_timeout(struct ssd_cmd *cmd) |
7e9f9829 SF |
4408 | #else |
4409 | static void ssd_cmd_timeout(struct timer_list *t) | |
4410 | #endif | |
361ebed5 | 4411 | { |
7e9f9829 SF |
4412 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,15,0)) |
4413 | struct ssd_cmd *cmd = from_timer(cmd, t, cmd_timer); | |
4414 | #endif | |
361ebed5 HSDT |
4415 | struct ssd_device *dev = cmd->dev; |
4416 | uint32_t msg = *(uint32_t *)cmd->msg; | |
4417 | ||
4418 | ssd_end_timeout_request(cmd); | |
4419 | ||
4420 | ssd_gen_swlog(dev, SSD_LOG_TIMEOUT, msg); | |
4421 | } | |
4422 | ||
4423 | ||
4424 | static void __ssd_done(unsigned long data) | |
4425 | { | |
4426 | struct ssd_cmd *cmd; | |
4427 | LIST_HEAD(localq); | |
4428 | ||
4429 | local_irq_disable(); | |
4430 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,13,0)) | |
4431 | list_splice_init(&__get_cpu_var(ssd_doneq), &localq); | |
4432 | #else | |
4433 | list_splice_init(this_cpu_ptr(&ssd_doneq), &localq); | |
4434 | #endif | |
4435 | local_irq_enable(); | |
4436 | ||
4437 | while (!list_empty(&localq)) { | |
4438 | cmd = list_entry(localq.next, struct ssd_cmd, list); | |
4439 | list_del_init(&cmd->list); | |
4440 | ||
4441 | ssd_end_request(cmd); | |
4442 | } | |
4443 | } | |
4444 | ||
4445 | static void __ssd_done_db(unsigned long data) | |
4446 | { | |
4447 | struct ssd_cmd *cmd; | |
4448 | struct ssd_device *dev; | |
4449 | struct bio *bio; | |
4450 | LIST_HEAD(localq); | |
4451 | ||
4452 | local_irq_disable(); | |
4453 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,13,0)) | |
4454 | list_splice_init(&__get_cpu_var(ssd_doneq), &localq); | |
4455 | #else | |
4456 | list_splice_init(this_cpu_ptr(&ssd_doneq), &localq); | |
4457 | #endif | |
4458 | local_irq_enable(); | |
4459 | ||
4460 | while (!list_empty(&localq)) { | |
4461 | cmd = list_entry(localq.next, struct ssd_cmd, list); | |
4462 | list_del_init(&cmd->list); | |
4463 | ||
4464 | dev = (struct ssd_device *)cmd->dev; | |
4465 | bio = cmd->bio; | |
4466 | ||
4467 | if (bio) { | |
4468 | sector_t off = dev->db_info.data.loc.off; | |
4469 | uint32_t len = dev->db_info.data.loc.len; | |
4470 | ||
4471 | switch (dev->db_info.type) { | |
4472 | case SSD_DEBUG_READ_ERR: | |
4473 | if (bio_data_dir(bio) == READ && | |
4474 | !((off + len) <= bio_start(bio) || off >= (bio_start(bio) + bio_sectors(bio)))) { | |
4475 | cmd->errors = -EIO; | |
4476 | } | |
4477 | break; | |
4478 | case SSD_DEBUG_WRITE_ERR: | |
4479 | if (bio_data_dir(bio) == WRITE && | |
4480 | !((off + len) <= bio_start(bio) || off >= (bio_start(bio) + bio_sectors(bio)))) { | |
4481 | cmd->errors = -EROFS; | |
4482 | } | |
4483 | break; | |
4484 | case SSD_DEBUG_RW_ERR: | |
4485 | if (!((off + len) <= bio_start(bio) || off >= (bio_start(bio) + bio_sectors(bio)))) { | |
4486 | if (bio_data_dir(bio) == READ) { | |
4487 | cmd->errors = -EIO; | |
4488 | } else { | |
4489 | cmd->errors = -EROFS; | |
4490 | } | |
4491 | } | |
4492 | break; | |
4493 | default: | |
4494 | break; | |
4495 | } | |
4496 | } | |
4497 | ||
4498 | ssd_end_request(cmd); | |
4499 | } | |
4500 | } | |
4501 | ||
4502 | static inline void ssd_done_bh(struct ssd_cmd *cmd) | |
4503 | { | |
4504 | unsigned long flags = 0; | |
4505 | ||
4506 | if (unlikely(!ssd_cmd_del_timer(cmd))) { | |
4507 | struct ssd_device *dev = cmd->dev; | |
4508 | struct ssd_rw_msg *msg = (struct ssd_rw_msg *)cmd->msg; | |
4509 | hio_err("%s: unknown cmd: tag %d fun %#x\n", dev->name, msg->tag, msg->fun); | |
4510 | ||
4511 | /* alarm led */ | |
4512 | ssd_set_alarm(dev); | |
4513 | return; | |
4514 | } | |
4515 | ||
4516 | local_irq_save(flags); | |
4517 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,13,0)) | |
4518 | list_add_tail(&cmd->list, &__get_cpu_var(ssd_doneq)); | |
4519 | tasklet_hi_schedule(&__get_cpu_var(ssd_tasklet)); | |
4520 | #else | |
4521 | list_add_tail(&cmd->list, this_cpu_ptr(&ssd_doneq)); | |
4522 | tasklet_hi_schedule(this_cpu_ptr(&ssd_tasklet)); | |
4523 | #endif | |
4524 | local_irq_restore(flags); | |
4525 | ||
4526 | return; | |
4527 | } | |
4528 | ||
4529 | static inline void ssd_done(struct ssd_cmd *cmd) | |
4530 | { | |
4531 | if (unlikely(!ssd_cmd_del_timer(cmd))) { | |
4532 | struct ssd_device *dev = cmd->dev; | |
4533 | struct ssd_rw_msg *msg = (struct ssd_rw_msg *)cmd->msg; | |
4534 | hio_err("%s: unknown cmd: tag %d fun %#x\n", dev->name, msg->tag, msg->fun); | |
4535 | ||
4536 | /* alarm led */ | |
4537 | ssd_set_alarm(dev); | |
4538 | return; | |
4539 | } | |
4540 | ||
4541 | ssd_end_request(cmd); | |
4542 | ||
4543 | return; | |
4544 | } | |
4545 | ||
4546 | static inline void ssd_dispatch_cmd(struct ssd_cmd *cmd) | |
4547 | { | |
4548 | struct ssd_device *dev = (struct ssd_device *)cmd->dev; | |
4549 | ||
4550 | ssd_cmd_add_timer(cmd, SSD_CMD_TIMEOUT, ssd_cmd_timeout); | |
4551 | ||
4552 | spin_lock(&dev->cmd_lock); | |
4553 | ssd_reg_write(dev->ctrlp + SSD_REQ_FIFO_REG, cmd->msg_dma); | |
4554 | spin_unlock(&dev->cmd_lock); | |
4555 | } | |
4556 | ||
4557 | static inline void ssd_send_cmd(struct ssd_cmd *cmd) | |
4558 | { | |
4559 | struct ssd_device *dev = (struct ssd_device *)cmd->dev; | |
4560 | ||
4561 | ssd_cmd_add_timer(cmd, SSD_CMD_TIMEOUT, ssd_cmd_timeout); | |
4562 | ||
4563 | ssd_reg32_write(dev->ctrlp + SSD_REQ_FIFO_REG, ((uint32_t)cmd->tag | ((uint32_t)cmd->nsegs << 16))); | |
4564 | } | |
4565 | ||
4566 | static inline void ssd_send_cmd_db(struct ssd_cmd *cmd) | |
4567 | { | |
4568 | struct ssd_device *dev = (struct ssd_device *)cmd->dev; | |
4569 | struct bio *bio = cmd->bio; | |
4570 | ||
4571 | ssd_cmd_add_timer(cmd, SSD_CMD_TIMEOUT, ssd_cmd_timeout); | |
4572 | ||
4573 | if (bio) { | |
4574 | switch (dev->db_info.type) { | |
4575 | case SSD_DEBUG_READ_TO: | |
4576 | if (bio_data_dir(bio) == READ) { | |
4577 | return; | |
4578 | } | |
4579 | break; | |
4580 | case SSD_DEBUG_WRITE_TO: | |
4581 | if (bio_data_dir(bio) == WRITE) { | |
4582 | return; | |
4583 | } | |
4584 | break; | |
4585 | case SSD_DEBUG_RW_TO: | |
4586 | return; | |
4587 | break; | |
4588 | default: | |
4589 | break; | |
4590 | } | |
4591 | } | |
4592 | ||
4593 | ssd_reg32_write(dev->ctrlp + SSD_REQ_FIFO_REG, ((uint32_t)cmd->tag | ((uint32_t)cmd->nsegs << 16))); | |
4594 | } | |
4595 | ||
4596 | ||
4597 | /* fixed for BIOVEC_PHYS_MERGEABLE */ | |
4598 | #ifdef SSD_BIOVEC_PHYS_MERGEABLE_FIXED | |
4599 | #include <linux/bio.h> | |
4600 | #include <linux/io.h> | |
4601 | #include <xen/page.h> | |
4602 | ||
4603 | static bool xen_biovec_phys_mergeable_fixed(const struct bio_vec *vec1, | |
4604 | const struct bio_vec *vec2) | |
4605 | { | |
4606 | unsigned long mfn1 = pfn_to_mfn(page_to_pfn(vec1->bv_page)); | |
4607 | unsigned long mfn2 = pfn_to_mfn(page_to_pfn(vec2->bv_page)); | |
4608 | ||
4609 | return __BIOVEC_PHYS_MERGEABLE(vec1, vec2) && | |
4610 | ((mfn1 == mfn2) || ((mfn1+1) == mfn2)); | |
4611 | } | |
4612 | ||
4613 | #ifdef BIOVEC_PHYS_MERGEABLE | |
4614 | #undef BIOVEC_PHYS_MERGEABLE | |
4615 | #endif | |
4616 | #define BIOVEC_PHYS_MERGEABLE(vec1, vec2) \ | |
4617 | (__BIOVEC_PHYS_MERGEABLE(vec1, vec2) && \ | |
4618 | (!xen_domain() || xen_biovec_phys_mergeable_fixed(vec1, vec2))) | |
4619 | ||
4620 | #endif | |
4621 | ||
653c3a30 SF |
4622 | /* |
4623 | * BIOVEC_PHYS_MERGEABLE not available from 4.20 onward, and it seems likely | |
4624 | * that all the merging that can be done has been done by the block core | |
4625 | * already. Just stub it out. | |
4626 | */ | |
4627 | #if (LINUX_VERSION_CODE > KERNEL_VERSION(4,20,0)) | |
4628 | # ifdef BIOVEC_PHYS_MERGEABLE | |
4629 | # undef BIOVEC_PHYS_MERGEABLE | |
4630 | # endif | |
4631 | # define BIOVEC_PHYS_MERGEABLE(vec1, vec2) (0) | |
4632 | #endif | |
4633 | ||
361ebed5 HSDT |
4634 | static inline int ssd_bio_map_sg(struct ssd_device *dev, struct bio *bio, struct scatterlist *sgl) |
4635 | { | |
4636 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,14,0)) | |
4637 | struct bio_vec *bvec, *bvprv = NULL; | |
4638 | struct scatterlist *sg = NULL; | |
4639 | int i = 0, nsegs = 0; | |
4640 | ||
4641 | #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,23)) | |
4642 | sg_init_table(sgl, dev->hw_info.cmd_max_sg); | |
4643 | #endif | |
4644 | ||
4645 | /* | |
4646 | * for each segment in bio | |
4647 | */ | |
4648 | bio_for_each_segment(bvec, bio, i) { | |
4649 | if (bvprv && BIOVEC_PHYS_MERGEABLE(bvprv, bvec)) { | |
4650 | sg->length += bvec->bv_len; | |
4651 | } else { | |
4652 | if (unlikely(nsegs >= (int)dev->hw_info.cmd_max_sg)) { | |
4653 | break; | |
4654 | } | |
4655 | ||
4656 | sg = sg ? (sg + 1) : sgl; | |
4657 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)) | |
4658 | sg_set_page(sg, bvec->bv_page, bvec->bv_len, bvec->bv_offset); | |
4659 | #else | |
4660 | sg->page = bvec->bv_page; | |
4661 | sg->length = bvec->bv_len; | |
4662 | sg->offset = bvec->bv_offset; | |
4663 | #endif | |
4664 | nsegs++; | |
4665 | } | |
4666 | bvprv = bvec; | |
4667 | } | |
4668 | ||
4669 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)) | |
4670 | if (sg) { | |
4671 | sg_mark_end(sg); | |
4672 | } | |
4673 | #endif | |
4674 | ||
4675 | bio->bi_idx = i; | |
4676 | ||
4677 | return nsegs; | |
4678 | #else | |
4679 | struct bio_vec bvec, bvprv; | |
4680 | struct bvec_iter iter; | |
4681 | struct scatterlist *sg = NULL; | |
4682 | int nsegs = 0; | |
4683 | int first = 1; | |
4684 | ||
4685 | sg_init_table(sgl, dev->hw_info.cmd_max_sg); | |
4686 | ||
4687 | /* | |
4688 | * for each segment in bio | |
4689 | */ | |
4690 | bio_for_each_segment(bvec, bio, iter) { | |
4691 | if (!first && BIOVEC_PHYS_MERGEABLE(&bvprv, &bvec)) { | |
4692 | sg->length += bvec.bv_len; | |
4693 | } else { | |
4694 | if (unlikely(nsegs >= (int)dev->hw_info.cmd_max_sg)) { | |
4695 | break; | |
4696 | } | |
4697 | ||
4698 | sg = sg ? (sg + 1) : sgl; | |
4699 | ||
4700 | sg_set_page(sg, bvec.bv_page, bvec.bv_len, bvec.bv_offset); | |
4701 | ||
4702 | nsegs++; | |
4703 | first = 0; | |
4704 | } | |
4705 | bvprv = bvec; | |
4706 | } | |
4707 | ||
4708 | if (sg) { | |
4709 | sg_mark_end(sg); | |
4710 | } | |
4711 | ||
4712 | return nsegs; | |
4713 | #endif | |
4714 | } | |
4715 | ||
4716 | ||
4717 | static int __ssd_submit_pbio(struct ssd_device *dev, struct bio *bio, int wait) | |
4718 | { | |
4719 | struct ssd_cmd *cmd; | |
4720 | struct ssd_rw_msg *msg; | |
4721 | struct ssd_sg_entry *sge; | |
4722 | sector_t block = bio_start(bio); | |
4723 | int tag; | |
4724 | int i; | |
4725 | ||
4726 | tag = ssd_get_tag(dev, wait); | |
4727 | if (tag < 0) { | |
4728 | return -EBUSY; | |
4729 | } | |
4730 | ||
4731 | cmd = &dev->cmd[tag]; | |
4732 | cmd->bio = bio; | |
4733 | cmd->flag = 1; | |
4734 | ||
4735 | msg = (struct ssd_rw_msg *)cmd->msg; | |
4736 | ||
1197134c | 4737 | if (ssd_bio_has_discard(bio)) { |
361ebed5 HSDT |
4738 | unsigned int length = bio_sectors(bio); |
4739 | ||
4740 | //printk(KERN_WARNING "%s: discard len %u, block %llu\n", dev->name, bio_sectors(bio), block); | |
4741 | msg->tag = tag; | |
4742 | msg->fun = SSD_FUNC_TRIM; | |
4743 | ||
4744 | sge = msg->sge; | |
4745 | for (i=0; i<(dev->hw_info.cmd_max_sg); i++) { | |
4746 | sge->block = block; | |
4747 | sge->length = (length >= dev->hw_info.sg_max_sec) ? dev->hw_info.sg_max_sec : length; | |
4748 | sge->buf = 0; | |
4749 | ||
4750 | block += sge->length; | |
4751 | length -= sge->length; | |
4752 | sge++; | |
4753 | ||
4754 | if (length <= 0) { | |
1197134c | 4755 | ++i; |
361ebed5 HSDT |
4756 | break; |
4757 | } | |
4758 | } | |
1197134c | 4759 | msg->nsegs = cmd->nsegs = i; |
361ebed5 HSDT |
4760 | |
4761 | dev->scmd(cmd); | |
4762 | return 0; | |
4763 | } | |
361ebed5 HSDT |
4764 | |
4765 | //msg->nsegs = cmd->nsegs = ssd_bio_map_sg(dev, bio, sgl); | |
4766 | msg->nsegs = cmd->nsegs = bio->bi_vcnt; | |
4767 | ||
4768 | //xx | |
4769 | if (bio_data_dir(bio) == READ) { | |
4770 | msg->fun = SSD_FUNC_READ; | |
4771 | msg->flag = 0; | |
4772 | } else { | |
4773 | msg->fun = SSD_FUNC_WRITE; | |
4774 | msg->flag = dev->wmode; | |
4775 | } | |
4776 | ||
4777 | sge = msg->sge; | |
4778 | for (i=0; i<bio->bi_vcnt; i++) { | |
4779 | sge->block = block; | |
4780 | sge->length = bio->bi_io_vec[i].bv_len >> 9; | |
4781 | sge->buf = (uint64_t)((void *)bio->bi_io_vec[i].bv_page + bio->bi_io_vec[i].bv_offset); | |
4782 | ||
4783 | block += sge->length; | |
4784 | sge++; | |
4785 | } | |
4786 | ||
4787 | msg->tag = tag; | |
4788 | ||
4789 | #ifdef SSD_OT_PROTECT | |
4790 | if (unlikely(dev->ot_delay > 0 && dev->ot_protect != 0)) { | |
4791 | msleep_interruptible(dev->ot_delay); | |
4792 | } | |
4793 | #endif | |
4794 | ||
4795 | ssd_start_io_acct(cmd); | |
4796 | dev->scmd(cmd); | |
4797 | ||
4798 | return 0; | |
4799 | } | |
4800 | ||
4801 | static inline int ssd_submit_bio(struct ssd_device *dev, struct bio *bio, int wait) | |
4802 | { | |
4803 | struct ssd_cmd *cmd; | |
4804 | struct ssd_rw_msg *msg; | |
4805 | struct ssd_sg_entry *sge; | |
4806 | struct scatterlist *sgl; | |
4807 | sector_t block = bio_start(bio); | |
4808 | int tag; | |
4809 | int i; | |
4810 | ||
4811 | tag = ssd_get_tag(dev, wait); | |
4812 | if (tag < 0) { | |
4813 | return -EBUSY; | |
4814 | } | |
4815 | ||
4816 | cmd = &dev->cmd[tag]; | |
4817 | cmd->bio = bio; | |
4818 | cmd->flag = 0; | |
4819 | ||
4820 | msg = (struct ssd_rw_msg *)cmd->msg; | |
4821 | ||
4822 | sgl = cmd->sgl; | |
4823 | ||
1197134c | 4824 | if (ssd_bio_has_discard(bio)) { |
361ebed5 HSDT |
4825 | unsigned int length = bio_sectors(bio); |
4826 | ||
4827 | //printk(KERN_WARNING "%s: discard len %u, block %llu\n", dev->name, bio_sectors(bio), block); | |
4828 | msg->tag = tag; | |
4829 | msg->fun = SSD_FUNC_TRIM; | |
4830 | ||
4831 | sge = msg->sge; | |
4832 | for (i=0; i<(dev->hw_info.cmd_max_sg); i++) { | |
4833 | sge->block = block; | |
4834 | sge->length = (length >= dev->hw_info.sg_max_sec) ? dev->hw_info.sg_max_sec : length; | |
4835 | sge->buf = 0; | |
4836 | ||
4837 | block += sge->length; | |
4838 | length -= sge->length; | |
4839 | sge++; | |
4840 | ||
4841 | if (length <= 0) { | |
1197134c | 4842 | ++i; |
361ebed5 HSDT |
4843 | break; |
4844 | } | |
4845 | } | |
1197134c | 4846 | msg->nsegs = cmd->nsegs = i; |
361ebed5 HSDT |
4847 | |
4848 | dev->scmd(cmd); | |
4849 | return 0; | |
4850 | } | |
361ebed5 HSDT |
4851 | |
4852 | msg->nsegs = cmd->nsegs = ssd_bio_map_sg(dev, bio, sgl); | |
4853 | ||
4854 | //xx | |
4855 | if (bio_data_dir(bio) == READ) { | |
4856 | msg->fun = SSD_FUNC_READ; | |
4857 | msg->flag = 0; | |
4858 | pci_map_sg(dev->pdev, sgl, cmd->nsegs, PCI_DMA_FROMDEVICE); | |
4859 | } else { | |
4860 | msg->fun = SSD_FUNC_WRITE; | |
4861 | msg->flag = dev->wmode; | |
4862 | pci_map_sg(dev->pdev, sgl, cmd->nsegs, PCI_DMA_TODEVICE); | |
4863 | } | |
4864 | ||
4865 | sge = msg->sge; | |
4866 | for (i=0; i<cmd->nsegs; i++) { | |
4867 | sge->block = block; | |
4868 | sge->length = sg_dma_len(sgl) >> 9; | |
4869 | sge->buf = sg_dma_address(sgl); | |
4870 | ||
4871 | block += sge->length; | |
4872 | sgl++; | |
4873 | sge++; | |
4874 | } | |
4875 | ||
4876 | msg->tag = tag; | |
4877 | ||
4878 | #ifdef SSD_OT_PROTECT | |
4879 | if (unlikely(dev->ot_delay > 0 && dev->ot_protect != 0)) { | |
4880 | msleep_interruptible(dev->ot_delay); | |
4881 | } | |
4882 | #endif | |
4883 | ||
4884 | ssd_start_io_acct(cmd); | |
4885 | dev->scmd(cmd); | |
4886 | ||
4887 | return 0; | |
4888 | } | |
4889 | ||
4890 | /* threads */ | |
4891 | static int ssd_done_thread(void *data) | |
4892 | { | |
4893 | struct ssd_device *dev; | |
4894 | struct bio *bio; | |
4895 | struct bio *next; | |
361ebed5 HSDT |
4896 | |
4897 | if (!data) { | |
4898 | return -EINVAL; | |
4899 | } | |
4900 | dev = data; | |
4901 | ||
1197134c | 4902 | current->flags |= PF_NOFREEZE; |
361ebed5 HSDT |
4903 | //set_user_nice(current, -5); |
4904 | ||
4905 | while (!kthread_should_stop()) { | |
4906 | wait_event_interruptible(dev->done_waitq, (atomic_read(&dev->in_doneq) || kthread_should_stop())); | |
4907 | ||
4908 | while (atomic_read(&dev->in_doneq)) { | |
4909 | if (threaded_irq) { | |
4910 | spin_lock(&dev->doneq_lock); | |
4911 | bio = ssd_blist_get(&dev->doneq); | |
4912 | spin_unlock(&dev->doneq_lock); | |
4913 | } else { | |
4914 | spin_lock_irq(&dev->doneq_lock); | |
4915 | bio = ssd_blist_get(&dev->doneq); | |
4916 | spin_unlock_irq(&dev->doneq_lock); | |
4917 | } | |
4918 | ||
4919 | while (bio) { | |
4920 | next = bio->bi_next; | |
4921 | bio->bi_next = NULL; | |
1197134c | 4922 | ssd_bio_endio(bio, 0); |
361ebed5 HSDT |
4923 | atomic_dec(&dev->in_doneq); |
4924 | bio = next; | |
4925 | } | |
4926 | ||
4927 | cond_resched(); | |
4928 | ||
4929 | #ifdef SSD_ESCAPE_IRQ | |
4930 | if (unlikely(smp_processor_id() == dev->irq_cpu)) { | |
4931 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28)) | |
aa14a110 | 4932 | cpumask_var_t new_mask; |
1197134c KM |
4933 | if (alloc_cpumask_var(&new_mask, GFP_ATOMIC)) { |
4934 | cpumask_setall(new_mask); | |
4935 | cpumask_clear_cpu(dev->irq_cpu, new_mask); | |
4936 | set_cpus_allowed_ptr(current, new_mask); | |
4937 | free_cpumask_var(new_mask); | |
4938 | } | |
361ebed5 | 4939 | #else |
aa14a110 | 4940 | cpumask_t new_mask; |
361ebed5 HSDT |
4941 | cpus_setall(new_mask); |
4942 | cpu_clear(dev->irq_cpu, new_mask); | |
4943 | set_cpus_allowed(current, new_mask); | |
4944 | #endif | |
4945 | } | |
4946 | #endif | |
4947 | } | |
4948 | } | |
4949 | return 0; | |
4950 | } | |
4951 | ||
4952 | static int ssd_send_thread(void *data) | |
4953 | { | |
4954 | struct ssd_device *dev; | |
4955 | struct bio *bio; | |
4956 | struct bio *next; | |
361ebed5 HSDT |
4957 | |
4958 | if (!data) { | |
4959 | return -EINVAL; | |
4960 | } | |
4961 | dev = data; | |
4962 | ||
1197134c | 4963 | current->flags |= PF_NOFREEZE; |
361ebed5 HSDT |
4964 | //set_user_nice(current, -5); |
4965 | ||
4966 | while (!kthread_should_stop()) { | |
4967 | wait_event_interruptible(dev->send_waitq, (atomic_read(&dev->in_sendq) || kthread_should_stop())); | |
4968 | ||
4969 | while (atomic_read(&dev->in_sendq)) { | |
4970 | spin_lock(&dev->sendq_lock); | |
4971 | bio = ssd_blist_get(&dev->sendq); | |
4972 | spin_unlock(&dev->sendq_lock); | |
4973 | ||
4974 | while (bio) { | |
4975 | next = bio->bi_next; | |
4976 | bio->bi_next = NULL; | |
4977 | #ifdef SSD_QUEUE_PBIO | |
4978 | if (test_and_clear_bit(BIO_SSD_PBIO, &bio->bi_flags)) { | |
4979 | __ssd_submit_pbio(dev, bio, 1); | |
4980 | } else { | |
4981 | ssd_submit_bio(dev, bio, 1); | |
4982 | } | |
4983 | #else | |
4984 | ssd_submit_bio(dev, bio, 1); | |
4985 | #endif | |
4986 | atomic_dec(&dev->in_sendq); | |
4987 | bio = next; | |
4988 | } | |
4989 | ||
4990 | cond_resched(); | |
4991 | ||
4992 | #ifdef SSD_ESCAPE_IRQ | |
4993 | if (unlikely(smp_processor_id() == dev->irq_cpu)) { | |
4994 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28)) | |
aa14a110 | 4995 | cpumask_var_t new_mask; |
1197134c KM |
4996 | if (alloc_cpumask_var(&new_mask, GFP_ATOMIC)) { |
4997 | cpumask_setall(new_mask); | |
4998 | cpumask_clear_cpu(dev->irq_cpu, new_mask); | |
4999 | set_cpus_allowed_ptr(current, new_mask); | |
5000 | free_cpumask_var(new_mask); | |
5001 | } | |
361ebed5 | 5002 | #else |
aa14a110 | 5003 | cpumask_t new_mask; |
361ebed5 HSDT |
5004 | cpus_setall(new_mask); |
5005 | cpu_clear(dev->irq_cpu, new_mask); | |
5006 | set_cpus_allowed(current, new_mask); | |
5007 | #endif | |
5008 | } | |
5009 | #endif | |
5010 | } | |
5011 | } | |
5012 | ||
5013 | return 0; | |
5014 | } | |
5015 | ||
5016 | static void ssd_cleanup_thread(struct ssd_device *dev) | |
5017 | { | |
5018 | kthread_stop(dev->send_thread); | |
5019 | kthread_stop(dev->done_thread); | |
5020 | } | |
5021 | ||
5022 | static int ssd_init_thread(struct ssd_device *dev) | |
5023 | { | |
5024 | int ret; | |
5025 | ||
5026 | atomic_set(&dev->in_doneq, 0); | |
5027 | atomic_set(&dev->in_sendq, 0); | |
5028 | ||
5029 | spin_lock_init(&dev->doneq_lock); | |
5030 | spin_lock_init(&dev->sendq_lock); | |
5031 | ||
5032 | ssd_blist_init(&dev->doneq); | |
5033 | ssd_blist_init(&dev->sendq); | |
5034 | ||
5035 | init_waitqueue_head(&dev->done_waitq); | |
5036 | init_waitqueue_head(&dev->send_waitq); | |
5037 | ||
5038 | dev->done_thread = kthread_run(ssd_done_thread, dev, "%s/d", dev->name); | |
5039 | if (IS_ERR(dev->done_thread)) { | |
5040 | ret = PTR_ERR(dev->done_thread); | |
5041 | goto out_done_thread; | |
5042 | } | |
5043 | ||
5044 | dev->send_thread = kthread_run(ssd_send_thread, dev, "%s/s", dev->name); | |
5045 | if (IS_ERR(dev->send_thread)) { | |
5046 | ret = PTR_ERR(dev->send_thread); | |
5047 | goto out_send_thread; | |
5048 | } | |
5049 | ||
5050 | return 0; | |
5051 | ||
5052 | out_send_thread: | |
5053 | kthread_stop(dev->done_thread); | |
5054 | out_done_thread: | |
5055 | return ret; | |
5056 | } | |
5057 | ||
5058 | /* dcmd pool */ | |
5059 | static void ssd_put_dcmd(struct ssd_dcmd *dcmd) | |
5060 | { | |
5061 | struct ssd_device *dev = (struct ssd_device *)dcmd->dev; | |
5062 | ||
5063 | spin_lock(&dev->dcmd_lock); | |
5064 | list_add_tail(&dcmd->list, &dev->dcmd_list); | |
5065 | spin_unlock(&dev->dcmd_lock); | |
5066 | } | |
5067 | ||
5068 | static struct ssd_dcmd *ssd_get_dcmd(struct ssd_device *dev) | |
5069 | { | |
5070 | struct ssd_dcmd *dcmd = NULL; | |
5071 | ||
5072 | spin_lock(&dev->dcmd_lock); | |
5073 | if (!list_empty(&dev->dcmd_list)) { | |
5074 | dcmd = list_entry(dev->dcmd_list.next, | |
5075 | struct ssd_dcmd, list); | |
5076 | list_del_init(&dcmd->list); | |
5077 | } | |
5078 | spin_unlock(&dev->dcmd_lock); | |
5079 | ||
5080 | return dcmd; | |
5081 | } | |
5082 | ||
5083 | static void ssd_cleanup_dcmd(struct ssd_device *dev) | |
5084 | { | |
5085 | kfree(dev->dcmd); | |
5086 | } | |
5087 | ||
5088 | static int ssd_init_dcmd(struct ssd_device *dev) | |
5089 | { | |
5090 | struct ssd_dcmd *dcmd; | |
5091 | int dcmd_sz = sizeof(struct ssd_dcmd)*dev->hw_info.cmd_fifo_sz; | |
5092 | int i; | |
5093 | ||
5094 | spin_lock_init(&dev->dcmd_lock); | |
5095 | INIT_LIST_HEAD(&dev->dcmd_list); | |
5096 | init_waitqueue_head(&dev->dcmd_wq); | |
5097 | ||
5098 | dev->dcmd = kmalloc(dcmd_sz, GFP_KERNEL); | |
5099 | if (!dev->dcmd) { | |
5100 | hio_warn("%s: can not alloc dcmd\n", dev->name); | |
5101 | goto out_alloc_dcmd; | |
5102 | } | |
5103 | memset(dev->dcmd, 0, dcmd_sz); | |
5104 | ||
5105 | for (i=0, dcmd=dev->dcmd; i<(int)dev->hw_info.cmd_fifo_sz; i++, dcmd++) { | |
5106 | dcmd->dev = dev; | |
5107 | INIT_LIST_HEAD(&dcmd->list); | |
5108 | list_add_tail(&dcmd->list, &dev->dcmd_list); | |
5109 | } | |
5110 | ||
5111 | return 0; | |
5112 | ||
5113 | out_alloc_dcmd: | |
5114 | return -ENOMEM; | |
5115 | } | |
5116 | ||
5117 | static void ssd_put_dmsg(void *msg) | |
5118 | { | |
5119 | struct ssd_dcmd *dcmd = container_of(msg, struct ssd_dcmd, msg); | |
5120 | struct ssd_device *dev = (struct ssd_device *)dcmd->dev; | |
5121 | ||
5122 | memset(dcmd->msg, 0, SSD_DCMD_MAX_SZ); | |
5123 | ssd_put_dcmd(dcmd); | |
5124 | wake_up(&dev->dcmd_wq); | |
5125 | } | |
5126 | ||
5127 | static void *ssd_get_dmsg(struct ssd_device *dev) | |
5128 | { | |
5129 | struct ssd_dcmd *dcmd = ssd_get_dcmd(dev); | |
5130 | ||
5131 | while (!dcmd) { | |
5132 | DEFINE_WAIT(wait); | |
5133 | prepare_to_wait_exclusive(&dev->dcmd_wq, &wait, TASK_UNINTERRUPTIBLE); | |
5134 | schedule(); | |
5135 | ||
5136 | dcmd = ssd_get_dcmd(dev); | |
5137 | ||
5138 | finish_wait(&dev->dcmd_wq, &wait); | |
5139 | } | |
5140 | return dcmd->msg; | |
5141 | } | |
5142 | ||
5143 | /* do direct cmd */ | |
5144 | static int ssd_do_request(struct ssd_device *dev, int rw, void *msg, int *done) | |
5145 | { | |
5146 | DECLARE_COMPLETION(wait); | |
5147 | struct ssd_cmd *cmd; | |
5148 | int tag; | |
5149 | int ret = 0; | |
5150 | ||
5151 | tag = ssd_get_tag(dev, 1); | |
5152 | if (tag < 0) { | |
5153 | return -EBUSY; | |
5154 | } | |
5155 | ||
5156 | cmd = &dev->cmd[tag]; | |
5157 | cmd->nsegs = 1; | |
5158 | memcpy(cmd->msg, msg, SSD_DCMD_MAX_SZ); | |
5159 | ((struct ssd_rw_msg *)cmd->msg)->tag = tag; | |
5160 | ||
5161 | cmd->waiting = &wait; | |
5162 | ||
5163 | dev->scmd(cmd); | |
5164 | ||
5165 | wait_for_completion(cmd->waiting); | |
5166 | cmd->waiting = NULL; | |
5167 | ||
5168 | if (cmd->errors == -ETIMEDOUT) { | |
5169 | ret = cmd->errors; | |
5170 | } else if (cmd->errors) { | |
5171 | ret = -EIO; | |
5172 | } | |
5173 | ||
5174 | if (done != NULL) { | |
5175 | *done = cmd->nr_log; | |
5176 | } | |
5177 | ssd_put_tag(dev, cmd->tag); | |
5178 | ||
5179 | return ret; | |
5180 | } | |
5181 | ||
5182 | static int ssd_do_barrier_request(struct ssd_device *dev, int rw, void *msg, int *done) | |
5183 | { | |
5184 | DECLARE_COMPLETION(wait); | |
5185 | struct ssd_cmd *cmd; | |
5186 | int tag; | |
5187 | int ret = 0; | |
5188 | ||
5189 | tag = ssd_barrier_get_tag(dev); | |
5190 | if (tag < 0) { | |
5191 | return -EBUSY; | |
5192 | } | |
5193 | ||
5194 | cmd = &dev->cmd[tag]; | |
5195 | cmd->nsegs = 1; | |
5196 | memcpy(cmd->msg, msg, SSD_DCMD_MAX_SZ); | |
5197 | ((struct ssd_rw_msg *)cmd->msg)->tag = tag; | |
5198 | ||
5199 | cmd->waiting = &wait; | |
5200 | ||
5201 | dev->scmd(cmd); | |
5202 | ||
5203 | wait_for_completion(cmd->waiting); | |
5204 | cmd->waiting = NULL; | |
5205 | ||
5206 | if (cmd->errors == -ETIMEDOUT) { | |
5207 | ret = cmd->errors; | |
5208 | } else if (cmd->errors) { | |
5209 | ret = -EIO; | |
5210 | } | |
5211 | ||
5212 | if (done != NULL) { | |
5213 | *done = cmd->nr_log; | |
5214 | } | |
5215 | ssd_barrier_put_tag(dev, cmd->tag); | |
5216 | ||
5217 | return ret; | |
5218 | } | |
5219 | ||
5220 | #ifdef SSD_OT_PROTECT | |
5221 | static void ssd_check_temperature(struct ssd_device *dev, int temp) | |
5222 | { | |
5223 | uint64_t val; | |
5224 | uint32_t off; | |
5225 | int cur; | |
5226 | int i; | |
5227 | ||
5228 | if (mode != SSD_DRV_MODE_STANDARD) { | |
5229 | return; | |
5230 | } | |
5231 | ||
5232 | if (dev->protocol_info.ver <= SSD_PROTOCOL_V3) { | |
5233 | } | |
5234 | ||
5235 | for (i=0; i<dev->hw_info.nr_ctrl; i++) { | |
5236 | off = SSD_CTRL_TEMP_REG0 + i * sizeof(uint64_t); | |
5237 | ||
5238 | val = ssd_reg_read(dev->ctrlp + off); | |
5239 | if (val == 0xffffffffffffffffull) { | |
5240 | continue; | |
5241 | } | |
5242 | ||
5243 | cur = (int)CUR_TEMP(val); | |
5244 | if (cur >= temp) { | |
5245 | if (!test_and_set_bit(SSD_HWMON_TEMP(SSD_TEMP_CTRL), &dev->hwmon)) { | |
5246 | if (dev->protocol_info.ver > SSD_PROTOCOL_V3 && dev->protocol_info.ver < SSD_PROTOCOL_V3_2_2) { | |
5247 | hio_warn("%s: Over temperature, please check the fans.\n", dev->name); | |
5248 | dev->ot_delay = SSD_OT_DELAY; | |
5249 | } | |
5250 | } | |
5251 | return; | |
5252 | } | |
5253 | } | |
5254 | ||
5255 | if (test_and_clear_bit(SSD_HWMON_TEMP(SSD_TEMP_CTRL), &dev->hwmon)) { | |
5256 | if (dev->protocol_info.ver > SSD_PROTOCOL_V3 && dev->protocol_info.ver < SSD_PROTOCOL_V3_2_2) { | |
5257 | hio_warn("%s: Temperature is OK.\n", dev->name); | |
5258 | dev->ot_delay = 0; | |
5259 | } | |
5260 | } | |
5261 | } | |
5262 | #endif | |
5263 | ||
5264 | static int ssd_get_ot_status(struct ssd_device *dev, int *status) | |
5265 | { | |
5266 | uint32_t off; | |
5267 | uint32_t val; | |
5268 | int i; | |
5269 | ||
5270 | if (!dev || !status) { | |
5271 | return -EINVAL; | |
5272 | } | |
5273 | ||
5274 | if (dev->protocol_info.ver >= SSD_PROTOCOL_V3_2_2) { | |
5275 | for (i=0; i<dev->hw_info.nr_ctrl; i++) { | |
5276 | off = SSD_READ_OT_REG0 + (i * SSD_CTRL_REG_ZONE_SZ); | |
5277 | val = ssd_reg32_read(dev->ctrlp + off); | |
5278 | if ((val >> 22) & 0x1) { | |
5279 | *status = 1; | |
5280 | goto out; | |
5281 | } | |
5282 | ||
5283 | ||
5284 | off = SSD_WRITE_OT_REG0 + (i * SSD_CTRL_REG_ZONE_SZ); | |
5285 | val = ssd_reg32_read(dev->ctrlp + off); | |
5286 | if ((val >> 22) & 0x1) { | |
5287 | *status = 1; | |
5288 | goto out; | |
5289 | } | |
5290 | } | |
5291 | } else { | |
5292 | *status = !!dev->ot_delay; | |
5293 | } | |
5294 | ||
5295 | out: | |
5296 | return 0; | |
5297 | } | |
5298 | ||
5299 | static void ssd_set_ot_protect(struct ssd_device *dev, int protect) | |
5300 | { | |
5301 | uint32_t off; | |
5302 | uint32_t val; | |
5303 | int i; | |
5304 | ||
5305 | mutex_lock(&dev->fw_mutex); | |
5306 | ||
5307 | dev->ot_protect = !!protect; | |
5308 | ||
5309 | if (dev->protocol_info.ver >= SSD_PROTOCOL_V3_2_2) { | |
5310 | for (i=0; i<dev->hw_info.nr_ctrl; i++) { | |
5311 | off = SSD_READ_OT_REG0 + (i * SSD_CTRL_REG_ZONE_SZ); | |
5312 | val = ssd_reg32_read(dev->ctrlp + off); | |
5313 | if (dev->ot_protect) { | |
5314 | val |= (1U << 21); | |
5315 | } else { | |
5316 | val &= ~(1U << 21); | |
5317 | } | |
5318 | ssd_reg32_write(dev->ctrlp + off, val); | |
5319 | ||
5320 | ||
5321 | off = SSD_WRITE_OT_REG0 + (i * SSD_CTRL_REG_ZONE_SZ); | |
5322 | val = ssd_reg32_read(dev->ctrlp + off); | |
5323 | if (dev->ot_protect) { | |
5324 | val |= (1U << 21); | |
5325 | } else { | |
5326 | val &= ~(1U << 21); | |
5327 | } | |
5328 | ssd_reg32_write(dev->ctrlp + off, val); | |
5329 | } | |
5330 | } | |
5331 | ||
5332 | mutex_unlock(&dev->fw_mutex); | |
5333 | } | |
5334 | ||
5335 | static int ssd_init_ot_protect(struct ssd_device *dev) | |
5336 | { | |
5337 | ssd_set_ot_protect(dev, ot_protect); | |
5338 | ||
5339 | #ifdef SSD_OT_PROTECT | |
5340 | ssd_check_temperature(dev, SSD_OT_TEMP); | |
5341 | #endif | |
5342 | ||
5343 | return 0; | |
5344 | } | |
5345 | ||
5346 | /* log */ | |
5347 | static int ssd_read_log(struct ssd_device *dev, int ctrl_idx, void *buf, int *nr_log) | |
5348 | { | |
5349 | struct ssd_log_op_msg *msg; | |
5350 | struct ssd_log_msg *lmsg; | |
5351 | dma_addr_t buf_dma; | |
5352 | size_t length = dev->hw_info.log_sz; | |
5353 | int ret = 0; | |
5354 | ||
5355 | if (ctrl_idx >= dev->hw_info.nr_ctrl) { | |
5356 | return -EINVAL; | |
5357 | } | |
5358 | ||
5359 | buf_dma = pci_map_single(dev->pdev, buf, length, PCI_DMA_FROMDEVICE); | |
5360 | #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,26)) | |
5361 | ret = dma_mapping_error(buf_dma); | |
5362 | #else | |
5363 | ret = dma_mapping_error(&(dev->pdev->dev), buf_dma); | |
5364 | #endif | |
5365 | if (ret) { | |
5366 | hio_warn("%s: unable to map read DMA buffer\n", dev->name); | |
5367 | goto out_dma_mapping; | |
5368 | } | |
5369 | ||
5370 | msg = (struct ssd_log_op_msg *)ssd_get_dmsg(dev); | |
5371 | ||
5372 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3) { | |
5373 | lmsg = (struct ssd_log_msg *)msg; | |
5374 | lmsg->fun = SSD_FUNC_READ_LOG; | |
5375 | lmsg->ctrl_idx = ctrl_idx; | |
5376 | lmsg->buf = buf_dma; | |
5377 | } else { | |
5378 | msg->fun = SSD_FUNC_READ_LOG; | |
5379 | msg->ctrl_idx = ctrl_idx; | |
5380 | msg->buf = buf_dma; | |
5381 | } | |
5382 | ||
5383 | ret = ssd_do_request(dev, READ, msg, nr_log); | |
5384 | ssd_put_dmsg(msg); | |
5385 | ||
5386 | pci_unmap_single(dev->pdev, buf_dma, length, PCI_DMA_FROMDEVICE); | |
5387 | ||
5388 | out_dma_mapping: | |
5389 | return ret; | |
5390 | } | |
5391 | ||
5392 | #define SSD_LOG_PRINT_BUF_SZ 256 | |
5393 | static int ssd_parse_log(struct ssd_device *dev, struct ssd_log *log, int print) | |
5394 | { | |
5395 | struct ssd_log_desc *log_desc = ssd_log_desc; | |
5396 | struct ssd_log_entry *le; | |
5397 | char *sn = NULL; | |
5398 | char print_buf[SSD_LOG_PRINT_BUF_SZ]; | |
5399 | int print_len; | |
5400 | ||
5401 | le = &log->le; | |
5402 | ||
5403 | /* find desc */ | |
5404 | while (log_desc->event != SSD_UNKNOWN_EVENT) { | |
5405 | if (log_desc->event == le->event) { | |
5406 | break; | |
5407 | } | |
5408 | log_desc++; | |
5409 | } | |
5410 | ||
5411 | if (!print) { | |
5412 | goto out; | |
5413 | } | |
5414 | ||
5415 | if (log_desc->level < log_level) { | |
5416 | goto out; | |
5417 | } | |
5418 | ||
5419 | /* parse */ | |
5420 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_2) { | |
5421 | sn = dev->label.sn; | |
5422 | } else { | |
5423 | sn = dev->labelv3.barcode; | |
5424 | } | |
5425 | ||
5426 | print_len = snprintf(print_buf, SSD_LOG_PRINT_BUF_SZ, "%s (%s): <%#x>", dev->name, sn, le->event); | |
5427 | ||
5428 | if (log->ctrl_idx != SSD_LOG_SW_IDX) { | |
5429 | print_len += snprintf((print_buf + print_len), (SSD_LOG_PRINT_BUF_SZ - print_len), " controller %d", log->ctrl_idx); | |
5430 | } | |
5431 | ||
5432 | switch (log_desc->data) { | |
5433 | case SSD_LOG_DATA_NONE: | |
5434 | break; | |
5435 | case SSD_LOG_DATA_LOC: | |
5436 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_2) { | |
5437 | print_len += snprintf((print_buf + print_len), (SSD_LOG_PRINT_BUF_SZ - print_len), " flash %d", le->data.loc.flash); | |
5438 | if (log_desc->sblock) { | |
5439 | print_len += snprintf((print_buf + print_len), (SSD_LOG_PRINT_BUF_SZ - print_len), " block %d", le->data.loc.block); | |
5440 | } | |
5441 | if (log_desc->spage) { | |
5442 | print_len += snprintf((print_buf + print_len), (SSD_LOG_PRINT_BUF_SZ - print_len), " page %d", le->data.loc.page); | |
5443 | } | |
5444 | } else { | |
5445 | print_len += snprintf((print_buf + print_len), (SSD_LOG_PRINT_BUF_SZ - print_len), " flash %d", le->data.loc1.flash); | |
5446 | if (log_desc->sblock) { | |
5447 | print_len += snprintf((print_buf + print_len), (SSD_LOG_PRINT_BUF_SZ - print_len), " block %d", le->data.loc1.block); | |
5448 | } | |
5449 | if (log_desc->spage) { | |
5450 | print_len += snprintf((print_buf + print_len), (SSD_LOG_PRINT_BUF_SZ - print_len), " page %d", le->data.loc1.page); | |
5451 | } | |
5452 | } | |
5453 | break; | |
5454 | case SSD_LOG_DATA_HEX: | |
5455 | print_len += snprintf((print_buf + print_len), (SSD_LOG_PRINT_BUF_SZ - print_len), " info %#x", le->data.val); | |
5456 | break; | |
5457 | default: | |
5458 | break; | |
5459 | } | |
5460 | /*print_len += */snprintf((print_buf + print_len), (SSD_LOG_PRINT_BUF_SZ - print_len), ": %s", log_desc->desc); | |
5461 | ||
5462 | switch (log_desc->level) { | |
5463 | case SSD_LOG_LEVEL_INFO: | |
5464 | hio_info("%s\n", print_buf); | |
5465 | break; | |
5466 | case SSD_LOG_LEVEL_NOTICE: | |
5467 | hio_note("%s\n", print_buf); | |
5468 | break; | |
5469 | case SSD_LOG_LEVEL_WARNING: | |
5470 | hio_warn("%s\n", print_buf); | |
5471 | break; | |
5472 | case SSD_LOG_LEVEL_ERR: | |
5473 | hio_err("%s\n", print_buf); | |
5474 | //printk(KERN_ERR MODULE_NAME": some exception occurred, please check the data or refer to FAQ."); | |
5475 | break; | |
5476 | default: | |
5477 | hio_warn("%s\n", print_buf); | |
5478 | break; | |
5479 | } | |
5480 | ||
5481 | out: | |
5482 | return log_desc->level; | |
5483 | } | |
5484 | ||
5485 | static int ssd_bm_get_sfstatus(struct ssd_device *dev, uint16_t *status); | |
5486 | static int ssd_switch_wmode(struct ssd_device *dev, int wmode); | |
5487 | ||
5488 | ||
5489 | static int ssd_handle_event(struct ssd_device *dev, uint16_t event, int level) | |
5490 | { | |
5491 | int ret = 0; | |
5492 | ||
5493 | switch (event) { | |
5494 | case SSD_LOG_OVER_TEMP: { | |
5495 | #ifdef SSD_OT_PROTECT | |
5496 | if (!test_and_set_bit(SSD_HWMON_TEMP(SSD_TEMP_CTRL), &dev->hwmon)) { | |
5497 | if (dev->protocol_info.ver > SSD_PROTOCOL_V3 && dev->protocol_info.ver < SSD_PROTOCOL_V3_2_2) { | |
5498 | hio_warn("%s: Over temperature, please check the fans.\n", dev->name); | |
5499 | dev->ot_delay = SSD_OT_DELAY; | |
5500 | } | |
5501 | } | |
5502 | #endif | |
5503 | break; | |
5504 | } | |
5505 | ||
5506 | case SSD_LOG_NORMAL_TEMP: { | |
5507 | #ifdef SSD_OT_PROTECT | |
5508 | /* need to check all controller's temperature */ | |
5509 | ssd_check_temperature(dev, SSD_OT_TEMP_HYST); | |
5510 | #endif | |
5511 | break; | |
5512 | } | |
5513 | ||
5514 | case SSD_LOG_BATTERY_FAULT: { | |
5515 | uint16_t sfstatus; | |
5516 | ||
5517 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_2) { | |
5518 | if (!ssd_bm_get_sfstatus(dev, &sfstatus)) { | |
5519 | ssd_gen_swlog(dev, SSD_LOG_BM_SFSTATUS, sfstatus); | |
5520 | } | |
5521 | } | |
5522 | ||
5523 | if (!test_and_set_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP), &dev->hwmon)) { | |
5524 | ssd_switch_wmode(dev, dev->user_wmode); | |
5525 | } | |
5526 | break; | |
5527 | } | |
5528 | ||
5529 | case SSD_LOG_BATTERY_OK: { | |
5530 | if (test_and_clear_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP), &dev->hwmon)) { | |
5531 | ssd_switch_wmode(dev, dev->user_wmode); | |
5532 | } | |
5533 | break; | |
5534 | } | |
5535 | ||
5536 | case SSD_LOG_BOARD_VOLT_FAULT: { | |
5537 | ssd_mon_boardvolt(dev); | |
5538 | break; | |
5539 | } | |
5540 | ||
5541 | case SSD_LOG_CLEAR_LOG: { | |
5542 | /* update smart */ | |
5543 | memset(&dev->smart.log_info, 0, sizeof(struct ssd_log_info)); | |
5544 | break; | |
5545 | } | |
5546 | ||
5547 | case SSD_LOG_CAP_VOLT_FAULT: | |
5548 | case SSD_LOG_CAP_LEARN_FAULT: | |
5549 | case SSD_LOG_CAP_SHORT_CIRCUIT: { | |
5550 | if (!test_and_set_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP), &dev->hwmon)) { | |
5551 | ssd_switch_wmode(dev, dev->user_wmode); | |
5552 | } | |
5553 | break; | |
5554 | } | |
5555 | ||
5556 | default: | |
5557 | break; | |
5558 | } | |
5559 | ||
5560 | /* ssd event call */ | |
5561 | if (dev->event_call) { | |
5562 | dev->event_call(dev->gd, event, level); | |
5563 | ||
5564 | /* FIXME */ | |
5565 | if (SSD_LOG_CAP_VOLT_FAULT == event || SSD_LOG_CAP_LEARN_FAULT == event || SSD_LOG_CAP_SHORT_CIRCUIT == event) { | |
5566 | dev->event_call(dev->gd, SSD_LOG_BATTERY_FAULT, level); | |
5567 | } | |
5568 | } | |
5569 | ||
5570 | return ret; | |
5571 | } | |
5572 | ||
5573 | static int ssd_save_log(struct ssd_device *dev, struct ssd_log *log) | |
5574 | { | |
5575 | uint32_t off, size; | |
5576 | void *internal_log; | |
5577 | int ret = 0; | |
5578 | ||
5579 | mutex_lock(&dev->internal_log_mutex); | |
5580 | ||
5581 | size = sizeof(struct ssd_log); | |
5582 | off = dev->internal_log.nr_log * size; | |
5583 | ||
5584 | if (off == dev->rom_info.log_sz) { | |
5585 | if (dev->internal_log.nr_log == dev->smart.log_info.nr_log) { | |
5586 | hio_warn("%s: internal log is full\n", dev->name); | |
5587 | } | |
5588 | goto out; | |
5589 | } | |
5590 | ||
5591 | internal_log = dev->internal_log.log + off; | |
5592 | memcpy(internal_log, log, size); | |
5593 | ||
5594 | if (dev->protocol_info.ver > SSD_PROTOCOL_V3) { | |
5595 | off += dev->rom_info.log_base; | |
5596 | ||
5597 | ret = ssd_spi_write(dev, log, off, size); | |
5598 | if (ret) { | |
5599 | goto out; | |
5600 | } | |
5601 | } | |
5602 | ||
5603 | dev->internal_log.nr_log++; | |
5604 | ||
5605 | out: | |
5606 | mutex_unlock(&dev->internal_log_mutex); | |
5607 | return ret; | |
5608 | } | |
5609 | ||
da3355df SF |
5610 | /** CRC table for the CRC-16. The poly is 0x8005 (x^16 + x^15 + x^2 + 1) */ |
5611 | static unsigned short const crc16_table[256] = { | |
5612 | 0x0000, 0xC0C1, 0xC181, 0x0140, 0xC301, 0x03C0, 0x0280, 0xC241, | |
5613 | 0xC601, 0x06C0, 0x0780, 0xC741, 0x0500, 0xC5C1, 0xC481, 0x0440, | |
5614 | 0xCC01, 0x0CC0, 0x0D80, 0xCD41, 0x0F00, 0xCFC1, 0xCE81, 0x0E40, | |
5615 | 0x0A00, 0xCAC1, 0xCB81, 0x0B40, 0xC901, 0x09C0, 0x0880, 0xC841, | |
5616 | 0xD801, 0x18C0, 0x1980, 0xD941, 0x1B00, 0xDBC1, 0xDA81, 0x1A40, | |
5617 | 0x1E00, 0xDEC1, 0xDF81, 0x1F40, 0xDD01, 0x1DC0, 0x1C80, 0xDC41, | |
5618 | 0x1400, 0xD4C1, 0xD581, 0x1540, 0xD701, 0x17C0, 0x1680, 0xD641, | |
5619 | 0xD201, 0x12C0, 0x1380, 0xD341, 0x1100, 0xD1C1, 0xD081, 0x1040, | |
5620 | 0xF001, 0x30C0, 0x3180, 0xF141, 0x3300, 0xF3C1, 0xF281, 0x3240, | |
5621 | 0x3600, 0xF6C1, 0xF781, 0x3740, 0xF501, 0x35C0, 0x3480, 0xF441, | |
5622 | 0x3C00, 0xFCC1, 0xFD81, 0x3D40, 0xFF01, 0x3FC0, 0x3E80, 0xFE41, | |
5623 | 0xFA01, 0x3AC0, 0x3B80, 0xFB41, 0x3900, 0xF9C1, 0xF881, 0x3840, | |
5624 | 0x2800, 0xE8C1, 0xE981, 0x2940, 0xEB01, 0x2BC0, 0x2A80, 0xEA41, | |
5625 | 0xEE01, 0x2EC0, 0x2F80, 0xEF41, 0x2D00, 0xEDC1, 0xEC81, 0x2C40, | |
5626 | 0xE401, 0x24C0, 0x2580, 0xE541, 0x2700, 0xE7C1, 0xE681, 0x2640, | |
5627 | 0x2200, 0xE2C1, 0xE381, 0x2340, 0xE101, 0x21C0, 0x2080, 0xE041, | |
5628 | 0xA001, 0x60C0, 0x6180, 0xA141, 0x6300, 0xA3C1, 0xA281, 0x6240, | |
5629 | 0x6600, 0xA6C1, 0xA781, 0x6740, 0xA501, 0x65C0, 0x6480, 0xA441, | |
5630 | 0x6C00, 0xACC1, 0xAD81, 0x6D40, 0xAF01, 0x6FC0, 0x6E80, 0xAE41, | |
5631 | 0xAA01, 0x6AC0, 0x6B80, 0xAB41, 0x6900, 0xA9C1, 0xA881, 0x6840, | |
5632 | 0x7800, 0xB8C1, 0xB981, 0x7940, 0xBB01, 0x7BC0, 0x7A80, 0xBA41, | |
5633 | 0xBE01, 0x7EC0, 0x7F80, 0xBF41, 0x7D00, 0xBDC1, 0xBC81, 0x7C40, | |
5634 | 0xB401, 0x74C0, 0x7580, 0xB541, 0x7700, 0xB7C1, 0xB681, 0x7640, | |
5635 | 0x7200, 0xB2C1, 0xB381, 0x7340, 0xB101, 0x71C0, 0x7080, 0xB041, | |
5636 | 0x5000, 0x90C1, 0x9181, 0x5140, 0x9301, 0x53C0, 0x5280, 0x9241, | |
5637 | 0x9601, 0x56C0, 0x5780, 0x9741, 0x5500, 0x95C1, 0x9481, 0x5440, | |
5638 | 0x9C01, 0x5CC0, 0x5D80, 0x9D41, 0x5F00, 0x9FC1, 0x9E81, 0x5E40, | |
5639 | 0x5A00, 0x9AC1, 0x9B81, 0x5B40, 0x9901, 0x59C0, 0x5880, 0x9841, | |
5640 | 0x8801, 0x48C0, 0x4980, 0x8941, 0x4B00, 0x8BC1, 0x8A81, 0x4A40, | |
5641 | 0x4E00, 0x8EC1, 0x8F81, 0x4F40, 0x8D01, 0x4DC0, 0x4C80, 0x8C41, | |
5642 | 0x4400, 0x84C1, 0x8581, 0x4540, 0x8701, 0x47C0, 0x4680, 0x8641, | |
5643 | 0x8201, 0x42C0, 0x4380, 0x8341, 0x4100, 0x81C1, 0x8081, 0x4040 | |
5644 | }; | |
5645 | ||
5646 | static unsigned short crc16_byte(unsigned short crc, const unsigned char data) | |
5647 | { | |
5648 | return (crc >> 8) ^ crc16_table[(crc ^ data) & 0xff]; | |
5649 | } | |
5650 | /** | |
5651 | * crc16 - compute the CRC-16 for the data buffer | |
5652 | * @crc: previous CRC value | |
5653 | * @buffer: data pointer | |
5654 | * @len: number of bytes in the buffer | |
5655 | * | |
5656 | * Returns the updated CRC value. | |
5657 | */ | |
5658 | static unsigned short crc16(unsigned short crc, unsigned char const *buffer, int len) | |
5659 | { | |
5660 | while (len--) | |
5661 | crc = crc16_byte(crc, *buffer++); | |
5662 | return crc; | |
5663 | } | |
5664 | ||
361ebed5 HSDT |
5665 | static int ssd_save_swlog(struct ssd_device *dev, uint16_t event, uint32_t data) |
5666 | { | |
5667 | struct ssd_log log; | |
361ebed5 HSDT |
5668 | int level; |
5669 | int ret = 0; | |
5670 | ||
5671 | if (unlikely(mode != SSD_DRV_MODE_STANDARD)) | |
5672 | return 0; | |
5673 | ||
5674 | memset(&log, 0, sizeof(struct ssd_log)); | |
5675 | ||
361ebed5 | 5676 | log.ctrl_idx = SSD_LOG_SW_IDX; |
57e45d44 | 5677 | log.time = ktime_get_real_seconds(); |
361ebed5 HSDT |
5678 | log.le.event = event; |
5679 | log.le.data.val = data; | |
5680 | ||
da3355df SF |
5681 | log.le.mod = SSD_DIF_WITH_OLD_LOG; |
5682 | log.le.idx = crc16(0,(const unsigned char *)&log,14); | |
361ebed5 HSDT |
5683 | level = ssd_parse_log(dev, &log, 0); |
5684 | if (level >= SSD_LOG_LEVEL) { | |
5685 | ret = ssd_save_log(dev, &log); | |
5686 | } | |
5687 | ||
5688 | /* set alarm */ | |
5689 | if (SSD_LOG_LEVEL_ERR == level) { | |
5690 | ssd_set_alarm(dev); | |
5691 | } | |
5692 | ||
5693 | /* update smart */ | |
5694 | dev->smart.log_info.nr_log++; | |
5695 | dev->smart.log_info.stat[level]++; | |
5696 | ||
5697 | /* handle event */ | |
5698 | ssd_handle_event(dev, event, level); | |
5699 | ||
5700 | return ret; | |
5701 | } | |
5702 | ||
5703 | static int ssd_gen_swlog(struct ssd_device *dev, uint16_t event, uint32_t data) | |
5704 | { | |
5705 | struct ssd_log_entry le; | |
5706 | int ret; | |
5707 | ||
5708 | if (unlikely(mode != SSD_DRV_MODE_STANDARD)) | |
5709 | return 0; | |
5710 | ||
5711 | /* slave port ? */ | |
5712 | if (dev->slave) { | |
5713 | return 0; | |
5714 | } | |
5715 | ||
5716 | memset(&le, 0, sizeof(struct ssd_log_entry)); | |
5717 | le.event = event; | |
5718 | le.data.val = data; | |
5719 | ||
5720 | ret = sfifo_put(&dev->log_fifo, &le); | |
5721 | if (ret) { | |
5722 | return ret; | |
5723 | } | |
5724 | ||
5725 | if (test_bit(SSD_INIT_WORKQ, &dev->state)) { | |
5726 | queue_work(dev->workq, &dev->log_work); | |
5727 | } | |
5728 | ||
5729 | return 0; | |
5730 | } | |
5731 | ||
5732 | static int ssd_do_swlog(struct ssd_device *dev) | |
5733 | { | |
5734 | struct ssd_log_entry le; | |
5735 | int ret = 0; | |
5736 | ||
5737 | memset(&le, 0, sizeof(struct ssd_log_entry)); | |
5738 | while (!sfifo_get(&dev->log_fifo, &le)) { | |
5739 | ret = ssd_save_swlog(dev, le.event, le.data.val); | |
5740 | if (ret) { | |
5741 | break; | |
5742 | } | |
5743 | } | |
5744 | ||
5745 | return ret; | |
5746 | } | |
5747 | ||
5748 | static int __ssd_clear_log(struct ssd_device *dev) | |
5749 | { | |
5750 | uint32_t off, length; | |
5751 | int ret; | |
5752 | ||
5753 | if (dev->protocol_info.ver <= SSD_PROTOCOL_V3) { | |
5754 | return 0; | |
5755 | } | |
5756 | ||
5757 | if (dev->internal_log.nr_log == 0) { | |
5758 | return 0; | |
5759 | } | |
5760 | ||
5761 | mutex_lock(&dev->internal_log_mutex); | |
5762 | ||
5763 | off = dev->rom_info.log_base; | |
5764 | length = dev->rom_info.log_sz; | |
5765 | ||
5766 | ret = ssd_spi_erase(dev, off, length); | |
5767 | if (ret) { | |
5768 | hio_warn("%s: log erase: failed\n", dev->name); | |
5769 | goto out; | |
5770 | } | |
5771 | ||
5772 | dev->internal_log.nr_log = 0; | |
5773 | ||
5774 | out: | |
5775 | mutex_unlock(&dev->internal_log_mutex); | |
5776 | return ret; | |
5777 | } | |
5778 | ||
5779 | static int ssd_clear_log(struct ssd_device *dev) | |
5780 | { | |
5781 | int ret; | |
5782 | ||
5783 | ret = __ssd_clear_log(dev); | |
5784 | if(!ret) { | |
5785 | ssd_gen_swlog(dev, SSD_LOG_CLEAR_LOG, 0); | |
5786 | } | |
5787 | ||
5788 | return ret; | |
5789 | } | |
5790 | ||
5791 | static int ssd_do_log(struct ssd_device *dev, int ctrl_idx, void *buf) | |
5792 | { | |
5793 | struct ssd_log_entry *le; | |
5794 | struct ssd_log log; | |
361ebed5 HSDT |
5795 | int nr_log = 0; |
5796 | int level; | |
5797 | int ret = 0; | |
5798 | ||
5799 | ret = ssd_read_log(dev, ctrl_idx, buf, &nr_log); | |
5800 | if (ret) { | |
5801 | return ret; | |
5802 | } | |
5803 | ||
57e45d44 | 5804 | log.time = ktime_get_real_seconds(); |
361ebed5 HSDT |
5805 | log.ctrl_idx = ctrl_idx; |
5806 | ||
5807 | le = (ssd_log_entry_t *)buf; | |
5808 | while (nr_log > 0) { | |
5809 | memcpy(&log.le, le, sizeof(struct ssd_log_entry)); | |
5810 | ||
da3355df SF |
5811 | log.le.mod = SSD_DIF_WITH_OLD_LOG; |
5812 | log.le.idx = crc16(0,(const unsigned char *)&log,14); | |
361ebed5 HSDT |
5813 | level = ssd_parse_log(dev, &log, 1); |
5814 | if (level >= SSD_LOG_LEVEL) { | |
5815 | ssd_save_log(dev, &log); | |
5816 | } | |
5817 | ||
5818 | /* set alarm */ | |
5819 | if (SSD_LOG_LEVEL_ERR == level) { | |
5820 | ssd_set_alarm(dev); | |
5821 | } | |
5822 | ||
5823 | dev->smart.log_info.nr_log++; | |
5824 | if (SSD_LOG_SEU_FAULT != le->event && SSD_LOG_SEU_FAULT1 != le->event) { | |
5825 | dev->smart.log_info.stat[level]++; | |
5826 | } else { | |
5827 | /* SEU fault */ | |
5828 | ||
5829 | /* log to the volatile log info */ | |
5830 | dev->log_info.nr_log++; | |
5831 | dev->log_info.stat[level]++; | |
5832 | ||
5833 | /* do something */ | |
5834 | dev->reload_fw = 1; | |
5835 | ssd_reg32_write(dev->ctrlp + SSD_RELOAD_FW_REG, SSD_RELOAD_FLAG); | |
da3355df SF |
5836 | if (le->event != SSD_LOG_SEU_FAULT1) { |
5837 | dev->has_non_0x98_reg_access = 1; | |
5838 | } | |
361ebed5 HSDT |
5839 | |
5840 | /*dev->readonly = 1; | |
5841 | set_disk_ro(dev->gd, 1); | |
5842 | hio_warn("%s: switched to read-only mode.\n", dev->name);*/ | |
5843 | } | |
5844 | ||
5845 | /* handle event */ | |
5846 | ssd_handle_event(dev, le->event, level); | |
5847 | ||
5848 | le++; | |
5849 | nr_log--; | |
5850 | } | |
5851 | ||
5852 | return 0; | |
5853 | } | |
5854 | ||
5855 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)) | |
5856 | static void ssd_log_worker(void *data) | |
5857 | { | |
5858 | struct ssd_device *dev = (struct ssd_device *)data; | |
5859 | #else | |
5860 | static void ssd_log_worker(struct work_struct *work) | |
5861 | { | |
5862 | struct ssd_device *dev = container_of(work, struct ssd_device, log_work); | |
5863 | #endif | |
5864 | int i; | |
5865 | int ret; | |
5866 | ||
5867 | if (!test_bit(SSD_LOG_ERR, &dev->state) && test_bit(SSD_ONLINE, &dev->state)) { | |
5868 | /* alloc log buf */ | |
5869 | if (!dev->log_buf) { | |
5870 | dev->log_buf = kmalloc(dev->hw_info.log_sz, GFP_KERNEL); | |
5871 | if (!dev->log_buf) { | |
5872 | hio_warn("%s: ssd_log_worker: no mem\n", dev->name); | |
5873 | return; | |
5874 | } | |
5875 | } | |
5876 | ||
5877 | /* get log */ | |
5878 | if (test_and_clear_bit(SSD_LOG_HW, &dev->state)) { | |
5879 | for (i=0; i<dev->hw_info.nr_ctrl; i++) { | |
5880 | ret = ssd_do_log(dev, i, dev->log_buf); | |
5881 | if (ret) { | |
5882 | (void)test_and_set_bit(SSD_LOG_ERR, &dev->state); | |
5883 | hio_warn("%s: do log fail\n", dev->name); | |
5884 | } | |
5885 | } | |
5886 | } | |
5887 | } | |
5888 | ||
5889 | ret = ssd_do_swlog(dev); | |
5890 | if (ret) { | |
5891 | hio_warn("%s: do swlog fail\n", dev->name); | |
5892 | } | |
5893 | } | |
5894 | ||
5895 | static void ssd_cleanup_log(struct ssd_device *dev) | |
5896 | { | |
5897 | if (dev->log_buf) { | |
5898 | kfree(dev->log_buf); | |
5899 | dev->log_buf = NULL; | |
5900 | } | |
5901 | ||
5902 | sfifo_free(&dev->log_fifo); | |
5903 | ||
5904 | if (dev->internal_log.log) { | |
5905 | vfree(dev->internal_log.log); | |
1197134c | 5906 | dev->internal_log.nr_log = 0; |
361ebed5 HSDT |
5907 | dev->internal_log.log = NULL; |
5908 | } | |
5909 | } | |
5910 | ||
5911 | static int ssd_init_log(struct ssd_device *dev) | |
5912 | { | |
5913 | struct ssd_log *log; | |
5914 | uint32_t off, size; | |
5915 | uint32_t len = 0; | |
5916 | int ret = 0; | |
5917 | ||
5918 | mutex_init(&dev->internal_log_mutex); | |
5919 | ||
5920 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)) | |
5921 | INIT_WORK(&dev->log_work, ssd_log_worker, dev); | |
5922 | #else | |
5923 | INIT_WORK(&dev->log_work, ssd_log_worker); | |
5924 | #endif | |
5925 | ||
5926 | off = dev->rom_info.log_base; | |
5927 | size = dev->rom_info.log_sz; | |
5928 | ||
1197134c | 5929 | dev->internal_log.nr_log = 0; |
361ebed5 HSDT |
5930 | dev->internal_log.log = vmalloc(size); |
5931 | if (!dev->internal_log.log) { | |
5932 | ret = -ENOMEM; | |
5933 | goto out_alloc_log; | |
5934 | } | |
5935 | ||
5936 | ret = sfifo_alloc(&dev->log_fifo, SSD_LOG_FIFO_SZ, sizeof(struct ssd_log_entry)); | |
5937 | if (ret < 0) { | |
5938 | goto out_alloc_log_fifo; | |
5939 | } | |
5940 | ||
5941 | if (dev->protocol_info.ver <= SSD_PROTOCOL_V3) { | |
5942 | return 0; | |
5943 | } | |
5944 | ||
5945 | log = (struct ssd_log *)dev->internal_log.log; | |
5946 | while (len < size) { | |
5947 | ret = ssd_spi_read(dev, log, off, sizeof(struct ssd_log)); | |
5948 | if (ret) { | |
5949 | goto out_read_log; | |
5950 | } | |
5951 | ||
5952 | if (log->ctrl_idx == 0xff) { | |
5953 | break; | |
5954 | } | |
5955 | ||
da3355df SF |
5956 | if (log->le.event == SSD_LOG_POWER_ON) { |
5957 | if (dev->internal_log.nr_log > dev->last_poweron_id) { | |
5958 | dev->last_poweron_id = dev->internal_log.nr_log; | |
5959 | } | |
5960 | } | |
5961 | ||
361ebed5 HSDT |
5962 | dev->internal_log.nr_log++; |
5963 | log++; | |
5964 | len += sizeof(struct ssd_log); | |
5965 | off += sizeof(struct ssd_log); | |
5966 | } | |
5967 | ||
5968 | return 0; | |
5969 | ||
5970 | out_read_log: | |
5971 | sfifo_free(&dev->log_fifo); | |
5972 | out_alloc_log_fifo: | |
5973 | vfree(dev->internal_log.log); | |
5974 | dev->internal_log.log = NULL; | |
5975 | dev->internal_log.nr_log = 0; | |
5976 | out_alloc_log: | |
5977 | /* skip error if not in standard mode */ | |
5978 | if (mode != SSD_DRV_MODE_STANDARD) { | |
5979 | ret = 0; | |
5980 | } | |
5981 | return ret; | |
5982 | } | |
5983 | ||
5984 | /* work queue */ | |
5985 | static void ssd_stop_workq(struct ssd_device *dev) | |
5986 | { | |
5987 | test_and_clear_bit(SSD_INIT_WORKQ, &dev->state); | |
5988 | flush_workqueue(dev->workq); | |
5989 | } | |
5990 | ||
5991 | static void ssd_start_workq(struct ssd_device *dev) | |
5992 | { | |
5993 | (void)test_and_set_bit(SSD_INIT_WORKQ, &dev->state); | |
5994 | ||
5995 | /* log ? */ | |
5996 | queue_work(dev->workq, &dev->log_work); | |
5997 | } | |
5998 | ||
5999 | static void ssd_cleanup_workq(struct ssd_device *dev) | |
6000 | { | |
6001 | flush_workqueue(dev->workq); | |
6002 | destroy_workqueue(dev->workq); | |
6003 | dev->workq = NULL; | |
6004 | } | |
6005 | ||
6006 | static int ssd_init_workq(struct ssd_device *dev) | |
6007 | { | |
6008 | int ret = 0; | |
6009 | ||
6010 | dev->workq = create_singlethread_workqueue(dev->name); | |
6011 | if (!dev->workq) { | |
6012 | ret = -ESRCH; | |
6013 | goto out; | |
6014 | } | |
6015 | ||
6016 | out: | |
6017 | return ret; | |
6018 | } | |
6019 | ||
6020 | /* rom */ | |
6021 | static int ssd_init_rom_info(struct ssd_device *dev) | |
6022 | { | |
6023 | uint32_t val; | |
6024 | ||
6025 | mutex_init(&dev->spi_mutex); | |
6026 | mutex_init(&dev->i2c_mutex); | |
6027 | ||
6028 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3) { | |
6029 | /* fix bug: read data to clear status */ | |
6030 | (void)ssd_reg32_read(dev->ctrlp + SSD_SPI_REG_RDATA); | |
6031 | ||
6032 | dev->rom_info.size = SSD_ROM_SIZE; | |
6033 | dev->rom_info.block_size = SSD_ROM_BLK_SIZE; | |
6034 | dev->rom_info.page_size = SSD_ROM_PAGE_SIZE; | |
6035 | ||
6036 | dev->rom_info.bridge_fw_base = SSD_ROM_BRIDGE_FW_BASE; | |
6037 | dev->rom_info.bridge_fw_sz = SSD_ROM_BRIDGE_FW_SIZE; | |
6038 | dev->rom_info.nr_bridge_fw = SSD_ROM_NR_BRIDGE_FW; | |
6039 | ||
6040 | dev->rom_info.ctrl_fw_base = SSD_ROM_CTRL_FW_BASE; | |
6041 | dev->rom_info.ctrl_fw_sz = SSD_ROM_CTRL_FW_SIZE; | |
6042 | dev->rom_info.nr_ctrl_fw = SSD_ROM_NR_CTRL_FW; | |
6043 | ||
6044 | dev->rom_info.log_sz = SSD_ROM_LOG_SZ; | |
6045 | ||
6046 | dev->rom_info.vp_base = SSD_ROM_VP_BASE; | |
6047 | dev->rom_info.label_base = SSD_ROM_LABEL_BASE; | |
6048 | } else if (dev->protocol_info.ver < SSD_PROTOCOL_V3_2) { | |
6049 | val = ssd_reg32_read(dev->ctrlp + SSD_ROM_INFO_REG); | |
6050 | dev->rom_info.size = 0x100000 * (1U << (val & 0xFF)); | |
6051 | dev->rom_info.block_size = 0x10000 * (1U << ((val>>8) & 0xFF)); | |
6052 | dev->rom_info.page_size = (val>>16) & 0xFFFF; | |
6053 | ||
6054 | val = ssd_reg32_read(dev->ctrlp + SSD_ROM_BRIDGE_FW_INFO_REG); | |
6055 | dev->rom_info.bridge_fw_base = dev->rom_info.block_size * (val & 0xFFFF); | |
6056 | dev->rom_info.bridge_fw_sz = dev->rom_info.block_size * ((val>>16) & 0x3FFF); | |
6057 | dev->rom_info.nr_bridge_fw = ((val >> 30) & 0x3) + 1; | |
6058 | ||
6059 | val = ssd_reg32_read(dev->ctrlp + SSD_ROM_CTRL_FW_INFO_REG); | |
6060 | dev->rom_info.ctrl_fw_base = dev->rom_info.block_size * (val & 0xFFFF); | |
6061 | dev->rom_info.ctrl_fw_sz = dev->rom_info.block_size * ((val>>16) & 0x3FFF); | |
6062 | dev->rom_info.nr_ctrl_fw = ((val >> 30) & 0x3) + 1; | |
6063 | ||
6064 | dev->rom_info.bm_fw_base = dev->rom_info.ctrl_fw_base + (dev->rom_info.nr_ctrl_fw * dev->rom_info.ctrl_fw_sz); | |
6065 | dev->rom_info.bm_fw_sz = SSD_PV3_ROM_BM_FW_SZ; | |
6066 | dev->rom_info.nr_bm_fw = SSD_PV3_ROM_NR_BM_FW; | |
6067 | ||
6068 | dev->rom_info.log_base = dev->rom_info.bm_fw_base + (dev->rom_info.nr_bm_fw * dev->rom_info.bm_fw_sz); | |
6069 | dev->rom_info.log_sz = SSD_ROM_LOG_SZ; | |
6070 | ||
6071 | dev->rom_info.smart_base = dev->rom_info.log_base + dev->rom_info.log_sz; | |
6072 | dev->rom_info.smart_sz = SSD_PV3_ROM_SMART_SZ; | |
6073 | dev->rom_info.nr_smart = SSD_PV3_ROM_NR_SMART; | |
6074 | ||
6075 | val = ssd_reg32_read(dev->ctrlp + SSD_ROM_VP_INFO_REG); | |
6076 | dev->rom_info.vp_base = dev->rom_info.block_size * val; | |
6077 | dev->rom_info.label_base = dev->rom_info.vp_base + dev->rom_info.block_size; | |
6078 | if (dev->rom_info.label_base >= dev->rom_info.size) { | |
6079 | dev->rom_info.label_base = dev->rom_info.vp_base - dev->rom_info.block_size; | |
6080 | } | |
6081 | } else { | |
6082 | val = ssd_reg32_read(dev->ctrlp + SSD_ROM_INFO_REG); | |
6083 | dev->rom_info.size = 0x100000 * (1U << (val & 0xFF)); | |
6084 | dev->rom_info.block_size = 0x10000 * (1U << ((val>>8) & 0xFF)); | |
6085 | dev->rom_info.page_size = (val>>16) & 0xFFFF; | |
6086 | ||
6087 | val = ssd_reg32_read(dev->ctrlp + SSD_ROM_BRIDGE_FW_INFO_REG); | |
6088 | dev->rom_info.bridge_fw_base = dev->rom_info.block_size * (val & 0xFFFF); | |
6089 | dev->rom_info.bridge_fw_sz = dev->rom_info.block_size * ((val>>16) & 0x3FFF); | |
6090 | dev->rom_info.nr_bridge_fw = ((val >> 30) & 0x3) + 1; | |
6091 | ||
6092 | val = ssd_reg32_read(dev->ctrlp + SSD_ROM_CTRL_FW_INFO_REG); | |
6093 | dev->rom_info.ctrl_fw_base = dev->rom_info.block_size * (val & 0xFFFF); | |
6094 | dev->rom_info.ctrl_fw_sz = dev->rom_info.block_size * ((val>>16) & 0x3FFF); | |
6095 | dev->rom_info.nr_ctrl_fw = ((val >> 30) & 0x3) + 1; | |
6096 | ||
6097 | val = ssd_reg32_read(dev->ctrlp + SSD_ROM_VP_INFO_REG); | |
6098 | dev->rom_info.vp_base = dev->rom_info.block_size * val; | |
6099 | dev->rom_info.label_base = dev->rom_info.vp_base - SSD_PV3_2_ROM_SEC_SZ; | |
6100 | ||
6101 | dev->rom_info.nr_smart = SSD_PV3_ROM_NR_SMART; | |
6102 | dev->rom_info.smart_sz = SSD_PV3_2_ROM_SEC_SZ; | |
6103 | dev->rom_info.smart_base = dev->rom_info.label_base - (dev->rom_info.smart_sz * dev->rom_info.nr_smart); | |
6104 | if (dev->rom_info.smart_sz > dev->rom_info.block_size) { | |
6105 | dev->rom_info.smart_sz = dev->rom_info.block_size; | |
6106 | } | |
6107 | ||
6108 | dev->rom_info.log_sz = SSD_PV3_2_ROM_LOG_SZ; | |
6109 | dev->rom_info.log_base = dev->rom_info.smart_base - dev->rom_info.log_sz; | |
6110 | } | |
6111 | ||
6112 | return ssd_init_spi(dev); | |
6113 | } | |
6114 | ||
6115 | /* smart */ | |
6116 | static int ssd_update_smart(struct ssd_device *dev, struct ssd_smart *smart) | |
6117 | { | |
57e45d44 | 6118 | uint64_t cur_time, run_time; |
6dec1b12 AR |
6119 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,11,0)) |
6120 | struct block_device *part; | |
6121 | int cpu; | |
6122 | #elif (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,27)) | |
361ebed5 HSDT |
6123 | struct hd_struct *part; |
6124 | int cpu; | |
6125 | #endif | |
6126 | int i, j; | |
6127 | int ret = 0; | |
6128 | ||
6129 | if (!test_bit(SSD_INIT_BD, &dev->state)) { | |
6130 | return 0; | |
6131 | } | |
6132 | ||
57e45d44 SF |
6133 | cur_time = (uint64_t)ktime_get_real_seconds(); |
6134 | if (cur_time < dev->uptime) { | |
361ebed5 HSDT |
6135 | run_time = 0; |
6136 | } else { | |
57e45d44 | 6137 | run_time = cur_time - dev->uptime; |
361ebed5 HSDT |
6138 | } |
6139 | ||
6140 | /* avoid frequently update */ | |
6141 | if (run_time >= 60) { | |
6142 | ret = 1; | |
6143 | } | |
6144 | ||
6145 | /* io stat */ | |
6146 | smart->io_stat.run_time += run_time; | |
6147 | ||
6148 | #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,27)) | |
3f93200e | 6149 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(5,0,0)) |
361ebed5 HSDT |
6150 | cpu = part_stat_lock(); |
6151 | part = &dev->gd->part0; | |
b49bd764 SF |
6152 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,14,0)) |
6153 | part_round_stats(dev->rq, cpu, part); | |
6154 | #else | |
361ebed5 | 6155 | part_round_stats(cpu, part); |
b49bd764 | 6156 | #endif |
361ebed5 | 6157 | part_stat_unlock(); |
3f93200e | 6158 | #endif |
361ebed5 HSDT |
6159 | |
6160 | smart->io_stat.nr_read += part_stat_read(part, ios[READ]); | |
6161 | smart->io_stat.nr_write += part_stat_read(part, ios[WRITE]); | |
6162 | smart->io_stat.rsectors += part_stat_read(part, sectors[READ]); | |
6163 | smart->io_stat.wsectors += part_stat_read(part, sectors[WRITE]); | |
6164 | #elif (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,14)) | |
6165 | preempt_disable(); | |
6166 | disk_round_stats(dev->gd); | |
6167 | preempt_enable(); | |
6168 | ||
6169 | smart->io_stat.nr_read += disk_stat_read(dev->gd, ios[READ]); | |
6170 | smart->io_stat.nr_write += disk_stat_read(dev->gd, ios[WRITE]); | |
6171 | smart->io_stat.rsectors += disk_stat_read(dev->gd, sectors[READ]); | |
6172 | smart->io_stat.wsectors += disk_stat_read(dev->gd, sectors[WRITE]); | |
6173 | #else | |
6174 | preempt_disable(); | |
6175 | disk_round_stats(dev->gd); | |
6176 | preempt_enable(); | |
6177 | ||
6178 | smart->io_stat.nr_read += disk_stat_read(dev->gd, reads); | |
6179 | smart->io_stat.nr_write += disk_stat_read(dev->gd, writes); | |
6180 | smart->io_stat.rsectors += disk_stat_read(dev->gd, read_sectors); | |
6181 | smart->io_stat.wsectors += disk_stat_read(dev->gd, write_sectors); | |
6182 | #endif | |
6183 | ||
6184 | smart->io_stat.nr_to += atomic_read(&dev->tocnt); | |
6185 | ||
6186 | for (i=0; i<dev->nr_queue; i++) { | |
6187 | smart->io_stat.nr_rwerr += dev->queue[i].io_stat.nr_rwerr; | |
6188 | smart->io_stat.nr_ioerr += dev->queue[i].io_stat.nr_ioerr; | |
6189 | } | |
6190 | ||
6191 | for (i=0; i<dev->nr_queue; i++) { | |
6192 | for (j=0; j<SSD_ECC_MAX_FLIP; j++) { | |
6193 | smart->ecc_info.bitflip[j] += dev->queue[i].ecc_info.bitflip[j]; | |
6194 | } | |
6195 | } | |
6196 | ||
6197 | //dev->uptime = tv.tv_sec; | |
6198 | ||
6199 | return ret; | |
6200 | } | |
6201 | ||
da3355df | 6202 | static int __ssd_clear_smart(struct ssd_device *dev) |
361ebed5 | 6203 | { |
361ebed5 HSDT |
6204 | uint64_t sversion; |
6205 | uint32_t off, length; | |
6206 | int i; | |
6207 | int ret; | |
6208 | ||
6209 | if (dev->protocol_info.ver <= SSD_PROTOCOL_V3) { | |
6210 | return 0; | |
6211 | } | |
6212 | ||
6213 | /* clear smart */ | |
6214 | off = dev->rom_info.smart_base; | |
6215 | length = dev->rom_info.smart_sz * dev->rom_info.nr_smart; | |
6216 | ||
6217 | ret = ssd_spi_erase(dev, off, length); | |
6218 | if (ret) { | |
6219 | hio_warn("%s: info erase: failed\n", dev->name); | |
6220 | goto out; | |
6221 | } | |
6222 | ||
6223 | sversion = dev->smart.version; | |
6224 | ||
6225 | memset(&dev->smart, 0, sizeof(struct ssd_smart)); | |
6226 | dev->smart.version = sversion + 1; | |
6227 | dev->smart.magic = SSD_SMART_MAGIC; | |
6228 | ||
6229 | /* clear all tmp acc */ | |
6230 | for (i=0; i<dev->nr_queue; i++) { | |
6231 | memset(&(dev->queue[i].io_stat), 0, sizeof(struct ssd_io_stat)); | |
6232 | memset(&(dev->queue[i].ecc_info), 0, sizeof(struct ssd_ecc_info)); | |
6233 | } | |
6234 | ||
6235 | atomic_set(&dev->tocnt, 0); | |
6236 | ||
6237 | /* clear tmp log info */ | |
6238 | memset(&dev->log_info, 0, sizeof(struct ssd_log_info)); | |
6239 | ||
57e45d44 | 6240 | dev->uptime = (uint64_t)ktime_get_real_seconds(); |
361ebed5 HSDT |
6241 | |
6242 | /* clear alarm ? */ | |
6243 | //ssd_clear_alarm(dev); | |
6244 | out: | |
6245 | return ret; | |
6246 | } | |
6247 | ||
da3355df | 6248 | static int __ssd_clear_warning(struct ssd_device *dev) |
1197134c KM |
6249 | { |
6250 | uint32_t off, size; | |
6251 | int i, ret = 0; | |
6252 | ||
6253 | if (dev->protocol_info.ver <= SSD_PROTOCOL_V3) { | |
6254 | return 0; | |
6255 | } | |
6256 | ||
6257 | /* clear log_info warning */ | |
6258 | memset(&dev->smart.log_info, 0, sizeof(dev->smart.log_info)); | |
6259 | ||
6260 | /* clear io_stat warning */ | |
6261 | dev->smart.io_stat.nr_to = 0; | |
6262 | dev->smart.io_stat.nr_rwerr = 0; | |
6263 | dev->smart.io_stat.nr_ioerr = 0; | |
6264 | ||
6265 | /* clear ecc_info warning */ | |
6266 | memset(&dev->smart.ecc_info, 0, sizeof(dev->smart.ecc_info)); | |
6267 | ||
6268 | /* clear queued warnings */ | |
6269 | for (i=0; i<dev->nr_queue; i++) { | |
6270 | /* queued io_stat warning */ | |
6271 | dev->queue[i].io_stat.nr_to = 0; | |
6272 | dev->queue[i].io_stat.nr_rwerr = 0; | |
6273 | dev->queue[i].io_stat.nr_ioerr = 0; | |
6274 | ||
6275 | /* queued ecc_info warning */ | |
6276 | memset(&(dev->queue[i].ecc_info), 0, sizeof(dev->queue[i].ecc_info)); | |
6277 | } | |
6278 | ||
6279 | /* write smart back to nor */ | |
6280 | for (i = 0; i < dev->rom_info.nr_smart; i++) { | |
6281 | off = dev->rom_info.smart_base + (dev->rom_info.smart_sz * i); | |
6282 | size = dev->rom_info.smart_sz; | |
6283 | ||
6284 | ret = ssd_spi_erase(dev, off, size); | |
6285 | if (ret) { | |
6286 | hio_warn("%s: warning erase: failed with code 1\n", dev->name); | |
6287 | goto out; | |
6288 | } | |
6289 | ||
6290 | size = sizeof(struct ssd_smart); | |
6291 | ||
6292 | ret = ssd_spi_write(dev, &dev->smart, off, size); | |
6293 | if (ret) { | |
6294 | hio_warn("%s: warning erase: failed with code 2\n", dev->name); | |
6295 | goto out; | |
6296 | } | |
6297 | } | |
6298 | ||
6299 | dev->smart.version++; | |
6300 | ||
6301 | /* clear cmd timeout warning */ | |
6302 | atomic_set(&dev->tocnt, 0); | |
6303 | ||
6304 | /* clear tmp log info */ | |
6305 | memset(&dev->log_info, 0, sizeof(dev->log_info)); | |
6306 | ||
6307 | out: | |
6308 | return ret; | |
6309 | } | |
6310 | ||
da3355df SF |
6311 | static int ssd_clear_smart(struct ssd_device *dev) |
6312 | { | |
6313 | int ret; | |
6314 | ||
6315 | ret = __ssd_clear_smart(dev); | |
6316 | if(!ret) { | |
6317 | ssd_gen_swlog(dev, SSD_LOG_CLEAR_SMART, 0); | |
6318 | } | |
6319 | ||
6320 | return ret; | |
6321 | } | |
6322 | ||
6323 | static int ssd_clear_warning(struct ssd_device *dev) | |
6324 | { | |
6325 | int ret; | |
6326 | ||
6327 | ret = __ssd_clear_warning(dev); | |
6328 | if(!ret) { | |
6329 | ssd_gen_swlog(dev, SSD_LOG_CLEAR_WARNING, 0); | |
6330 | } | |
6331 | ||
6332 | return ret; | |
6333 | } | |
6334 | ||
361ebed5 HSDT |
6335 | static int ssd_save_smart(struct ssd_device *dev) |
6336 | { | |
6337 | uint32_t off, size; | |
6338 | int i; | |
6339 | int ret = 0; | |
6340 | ||
6341 | if (unlikely(mode != SSD_DRV_MODE_STANDARD)) | |
6342 | return 0; | |
6343 | ||
6344 | if (dev->protocol_info.ver <= SSD_PROTOCOL_V3) { | |
6345 | return 0; | |
6346 | } | |
6347 | ||
6348 | if (!ssd_update_smart(dev, &dev->smart)) { | |
6349 | return 0; | |
6350 | } | |
6351 | ||
6352 | dev->smart.version++; | |
6353 | ||
6354 | for (i=0; i<dev->rom_info.nr_smart; i++) { | |
6355 | off = dev->rom_info.smart_base + (dev->rom_info.smart_sz * i); | |
6356 | size = dev->rom_info.smart_sz; | |
6357 | ||
6358 | ret = ssd_spi_erase(dev, off, size); | |
6359 | if (ret) { | |
6360 | hio_warn("%s: info erase failed\n", dev->name); | |
6361 | goto out; | |
6362 | } | |
6363 | ||
6364 | size = sizeof(struct ssd_smart); | |
6365 | ||
6366 | ret = ssd_spi_write(dev, &dev->smart, off, size); | |
6367 | if (ret) { | |
6368 | hio_warn("%s: info write failed\n", dev->name); | |
6369 | goto out; | |
6370 | } | |
6371 | ||
6372 | //xx | |
6373 | } | |
6374 | ||
6375 | out: | |
6376 | return ret; | |
6377 | } | |
6378 | ||
6379 | static int ssd_init_smart(struct ssd_device *dev) | |
6380 | { | |
6381 | struct ssd_smart *smart; | |
da3355df | 6382 | uint32_t off, size, val; |
361ebed5 HSDT |
6383 | int i; |
6384 | int ret = 0; | |
da3355df | 6385 | int update_smart = 0; |
361ebed5 | 6386 | |
57e45d44 | 6387 | dev->uptime = (uint64_t)ktime_get_real_seconds(); |
361ebed5 HSDT |
6388 | |
6389 | if (dev->protocol_info.ver <= SSD_PROTOCOL_V3) { | |
6390 | return 0; | |
6391 | } | |
6392 | ||
6393 | smart = kmalloc(sizeof(struct ssd_smart) * SSD_ROM_NR_SMART_MAX, GFP_KERNEL); | |
6394 | if (!smart) { | |
6395 | ret = -ENOMEM; | |
6396 | goto out_nomem; | |
6397 | } | |
6398 | ||
6399 | memset(&dev->smart, 0, sizeof(struct ssd_smart)); | |
6400 | ||
6401 | /* read smart */ | |
6402 | for (i=0; i<dev->rom_info.nr_smart; i++) { | |
6403 | memset(&smart[i], 0, sizeof(struct ssd_smart)); | |
6404 | ||
6405 | off = dev->rom_info.smart_base + (dev->rom_info.smart_sz * i); | |
6406 | size = sizeof(struct ssd_smart); | |
6407 | ||
6408 | ret = ssd_spi_read(dev, &smart[i], off, size); | |
6409 | if (ret) { | |
6410 | hio_warn("%s: info read failed\n", dev->name); | |
6411 | goto out; | |
6412 | } | |
6413 | ||
6414 | if (smart[i].magic != SSD_SMART_MAGIC) { | |
6415 | smart[i].magic = 0; | |
6416 | smart[i].version = 0; | |
6417 | continue; | |
6418 | } | |
6419 | ||
6420 | if (smart[i].version > dev->smart.version) { | |
6421 | memcpy(&dev->smart, &smart[i], sizeof(struct ssd_smart)); | |
6422 | } | |
6423 | } | |
6424 | ||
6425 | if (dev->smart.magic != SSD_SMART_MAGIC) { | |
6426 | /* first time power up */ | |
6427 | dev->smart.magic = SSD_SMART_MAGIC; | |
6428 | dev->smart.version = 1; | |
6429 | } | |
6430 | ||
da3355df SF |
6431 | val = ssd_reg32_read(dev->ctrlp + SSD_INTR_INTERVAL_REG); |
6432 | if (!val) { | |
6433 | dev->last_poweron_id = ~0; | |
6434 | ssd_gen_swlog(dev, SSD_LOG_POWER_ON, dev->hw_info.bridge_ver); | |
6435 | if (dev->smart.io_stat.nr_to) { | |
6436 | dev->smart.io_stat.nr_to = 0; | |
6437 | update_smart = 1; | |
6438 | } | |
6439 | } | |
6440 | ||
361ebed5 HSDT |
6441 | /* check log info */ |
6442 | { | |
6443 | struct ssd_log_info log_info; | |
6444 | struct ssd_log *log = (struct ssd_log *)dev->internal_log.log; | |
6445 | ||
6446 | memset(&log_info, 0, sizeof(struct ssd_log_info)); | |
6447 | ||
6448 | while (log_info.nr_log < dev->internal_log.nr_log) { | |
da3355df SF |
6449 | int skip = 0; |
6450 | ||
6451 | switch (log->le.event) { | |
361ebed5 | 6452 | /* skip the volatile log info */ |
da3355df SF |
6453 | case SSD_LOG_SEU_FAULT: |
6454 | case SSD_LOG_SEU_FAULT1: | |
6455 | skip = 1; | |
6456 | break; | |
6457 | case SSD_LOG_TIMEOUT: | |
6458 | skip = (dev->last_poweron_id >= log_info.nr_log); | |
6459 | break; | |
6460 | } | |
6461 | ||
6462 | if (!skip) { | |
361ebed5 HSDT |
6463 | log_info.stat[ssd_parse_log(dev, log, 0)]++; |
6464 | } | |
6465 | ||
6466 | log_info.nr_log++; | |
6467 | log++; | |
6468 | } | |
6469 | ||
6470 | /* check */ | |
6471 | for (i=(SSD_LOG_NR_LEVEL-1); i>=0; i--) { | |
da3355df | 6472 | if (log_info.stat[i] != dev->smart.log_info.stat[i]) { |
361ebed5 HSDT |
6473 | /* unclean */ |
6474 | memcpy(&dev->smart.log_info, &log_info, sizeof(struct ssd_log_info)); | |
da3355df | 6475 | update_smart = 1; |
361ebed5 HSDT |
6476 | break; |
6477 | } | |
6478 | } | |
da3355df SF |
6479 | |
6480 | if (update_smart) { | |
6481 | ++dev->smart.version; | |
6482 | } | |
361ebed5 HSDT |
6483 | } |
6484 | ||
6485 | for (i=0; i<dev->rom_info.nr_smart; i++) { | |
6486 | if (smart[i].magic == SSD_SMART_MAGIC && smart[i].version == dev->smart.version) { | |
6487 | continue; | |
6488 | } | |
6489 | ||
6490 | off = dev->rom_info.smart_base + (dev->rom_info.smart_sz * i); | |
6491 | size = dev->rom_info.smart_sz; | |
6492 | ||
6493 | ret = ssd_spi_erase(dev, off, size); | |
6494 | if (ret) { | |
6495 | hio_warn("%s: info erase failed\n", dev->name); | |
6496 | goto out; | |
6497 | } | |
6498 | ||
6499 | size = sizeof(struct ssd_smart); | |
6500 | ret = ssd_spi_write(dev, &dev->smart, off, size); | |
6501 | if (ret) { | |
6502 | hio_warn("%s: info write failed\n", dev->name); | |
6503 | goto out; | |
6504 | } | |
6505 | ||
6506 | //xx | |
6507 | } | |
6508 | ||
6509 | /* sync smart with alarm led */ | |
6510 | if (dev->smart.io_stat.nr_to || dev->smart.io_stat.nr_rwerr || dev->smart.log_info.stat[SSD_LOG_LEVEL_ERR]) { | |
6511 | hio_warn("%s: some fault found in the history info\n", dev->name); | |
6512 | ssd_set_alarm(dev); | |
6513 | } | |
6514 | ||
6515 | out: | |
6516 | kfree(smart); | |
6517 | out_nomem: | |
6518 | /* skip error if not in standard mode */ | |
6519 | if (mode != SSD_DRV_MODE_STANDARD) { | |
6520 | ret = 0; | |
6521 | } | |
6522 | return ret; | |
6523 | } | |
6524 | ||
6525 | /* bm */ | |
6526 | static int __ssd_bm_get_version(struct ssd_device *dev, uint16_t *ver) | |
6527 | { | |
6528 | struct ssd_bm_manufacturer_data bm_md = {0}; | |
6529 | uint16_t sc_id = SSD_BM_SYSTEM_DATA_SUBCLASS_ID; | |
6530 | uint8_t cmd; | |
6531 | int ret = 0; | |
6532 | ||
6533 | if (!dev || !ver) { | |
6534 | return -EINVAL; | |
6535 | } | |
6536 | ||
6537 | mutex_lock(&dev->bm_mutex); | |
6538 | ||
6539 | cmd = SSD_BM_DATA_FLASH_SUBCLASS_ID; | |
6540 | ret = ssd_smbus_write_word(dev, SSD_BM_SLAVE_ADDRESS, cmd, (uint8_t *)&sc_id); | |
6541 | if (ret) { | |
6542 | goto out; | |
6543 | } | |
6544 | ||
6545 | cmd = SSD_BM_DATA_FLASH_SUBCLASS_ID_PAGE1; | |
6546 | ret = ssd_smbus_read_block(dev, SSD_BM_SLAVE_ADDRESS, cmd, sizeof(struct ssd_bm_manufacturer_data), (uint8_t *)&bm_md); | |
6547 | if (ret) { | |
6548 | goto out; | |
6549 | } | |
6550 | ||
6551 | if (bm_md.firmware_ver & 0xF000) { | |
6552 | ret = -EIO; | |
6553 | goto out; | |
6554 | } | |
6555 | ||
6556 | *ver = bm_md.firmware_ver; | |
6557 | ||
6558 | out: | |
6559 | mutex_unlock(&dev->bm_mutex); | |
6560 | return ret; | |
6561 | } | |
6562 | ||
6563 | static int ssd_bm_get_version(struct ssd_device *dev, uint16_t *ver) | |
6564 | { | |
6565 | uint16_t tmp = 0; | |
6566 | int i = SSD_BM_RETRY_MAX; | |
6567 | int ret = 0; | |
6568 | ||
6569 | while (i-- > 0) { | |
6570 | ret = __ssd_bm_get_version(dev, &tmp); | |
6571 | if (!ret) { | |
6572 | break; | |
6573 | } | |
6574 | } | |
6575 | if (ret) { | |
6576 | return ret; | |
6577 | } | |
6578 | ||
6579 | *ver = tmp; | |
6580 | ||
6581 | return 0; | |
6582 | } | |
6583 | ||
6584 | static int __ssd_bm_nr_cap(struct ssd_device *dev, int *nr_cap) | |
6585 | { | |
6586 | struct ssd_bm_configuration_registers bm_cr; | |
6587 | uint16_t sc_id = SSD_BM_CONFIGURATION_REGISTERS_ID; | |
6588 | uint8_t cmd; | |
6589 | int ret; | |
6590 | ||
6591 | mutex_lock(&dev->bm_mutex); | |
6592 | ||
6593 | cmd = SSD_BM_DATA_FLASH_SUBCLASS_ID; | |
6594 | ret = ssd_smbus_write_word(dev, SSD_BM_SLAVE_ADDRESS, cmd, (uint8_t *)&sc_id); | |
6595 | if (ret) { | |
6596 | goto out; | |
6597 | } | |
6598 | ||
6599 | cmd = SSD_BM_DATA_FLASH_SUBCLASS_ID_PAGE1; | |
6600 | ret = ssd_smbus_read_block(dev, SSD_BM_SLAVE_ADDRESS, cmd, sizeof(struct ssd_bm_configuration_registers), (uint8_t *)&bm_cr); | |
6601 | if (ret) { | |
6602 | goto out; | |
6603 | } | |
6604 | ||
6605 | if (bm_cr.operation_cfg.cc == 0 || bm_cr.operation_cfg.cc > 4) { | |
6606 | ret = -EIO; | |
6607 | goto out; | |
6608 | } | |
6609 | ||
6610 | *nr_cap = bm_cr.operation_cfg.cc + 1; | |
6611 | ||
6612 | out: | |
6613 | mutex_unlock(&dev->bm_mutex); | |
6614 | return ret; | |
6615 | } | |
6616 | ||
6617 | static int ssd_bm_nr_cap(struct ssd_device *dev, int *nr_cap) | |
6618 | { | |
6619 | int tmp = 0; | |
6620 | int i = SSD_BM_RETRY_MAX; | |
6621 | int ret = 0; | |
6622 | ||
6623 | while (i-- > 0) { | |
6624 | ret = __ssd_bm_nr_cap(dev, &tmp); | |
6625 | if (!ret) { | |
6626 | break; | |
6627 | } | |
6628 | } | |
6629 | if (ret) { | |
6630 | return ret; | |
6631 | } | |
6632 | ||
6633 | *nr_cap = tmp; | |
6634 | ||
6635 | return 0; | |
6636 | } | |
6637 | ||
6638 | static int ssd_bm_enter_cap_learning(struct ssd_device *dev) | |
6639 | { | |
6640 | uint16_t buf = SSD_BM_ENTER_CAP_LEARNING; | |
6641 | uint8_t cmd = SSD_BM_MANUFACTURERACCESS; | |
6642 | int ret; | |
6643 | ||
6644 | ret = ssd_smbus_write_word(dev, SSD_BM_SLAVE_ADDRESS, cmd, (uint8_t *)&buf); | |
6645 | if (ret) { | |
6646 | goto out; | |
6647 | } | |
6648 | ||
6649 | out: | |
6650 | return ret; | |
6651 | } | |
6652 | ||
6653 | static int ssd_bm_get_sfstatus(struct ssd_device *dev, uint16_t *status) | |
6654 | { | |
6655 | uint16_t val = 0; | |
6656 | uint8_t cmd = SSD_BM_SAFETYSTATUS; | |
6657 | int ret; | |
6658 | ||
6659 | ret = ssd_smbus_read_word(dev, SSD_BM_SLAVE_ADDRESS, cmd, (uint8_t *)&val); | |
6660 | if (ret) { | |
6661 | goto out; | |
6662 | } | |
6663 | ||
6664 | *status = val; | |
6665 | out: | |
6666 | return ret; | |
6667 | } | |
6668 | ||
6669 | static int ssd_bm_get_opstatus(struct ssd_device *dev, uint16_t *status) | |
6670 | { | |
6671 | uint16_t val = 0; | |
6672 | uint8_t cmd = SSD_BM_OPERATIONSTATUS; | |
6673 | int ret; | |
6674 | ||
6675 | ret = ssd_smbus_read_word(dev, SSD_BM_SLAVE_ADDRESS, cmd, (uint8_t *)&val); | |
6676 | if (ret) { | |
6677 | goto out; | |
6678 | } | |
6679 | ||
6680 | *status = val; | |
6681 | out: | |
6682 | return ret; | |
6683 | } | |
6684 | ||
6685 | static int ssd_get_bmstruct(struct ssd_device *dev, struct ssd_bm *bm_status_out) | |
6686 | { | |
6687 | struct sbs_cmd *bm_sbs = ssd_bm_sbs; | |
6688 | struct ssd_bm bm_status; | |
6689 | uint8_t buf[2] = {0, }; | |
6690 | uint16_t val = 0; | |
6691 | uint16_t cval; | |
6692 | int ret = 0; | |
6693 | ||
6694 | memset(&bm_status, 0, sizeof(struct ssd_bm)); | |
6695 | ||
6696 | while (bm_sbs->desc != NULL) { | |
6697 | switch (bm_sbs->size) { | |
6698 | case SBS_SIZE_BYTE: | |
6699 | ret = ssd_smbus_read_byte(dev, SSD_BM_SLAVE_ADDRESS, bm_sbs->cmd, buf); | |
6700 | if (ret) { | |
6701 | //printf("Error: smbus read byte %#x\n", bm_sbs->cmd); | |
6702 | goto out; | |
6703 | } | |
6704 | val = buf[0]; | |
6705 | break; | |
6706 | case SBS_SIZE_WORD: | |
6707 | ret = ssd_smbus_read_word(dev, SSD_BM_SLAVE_ADDRESS, bm_sbs->cmd, (uint8_t *)&val); | |
6708 | if (ret) { | |
6709 | //printf("Error: smbus read word %#x\n", bm_sbs->cmd); | |
6710 | goto out; | |
6711 | } | |
6712 | //val = *(uint16_t *)buf; | |
6713 | break; | |
6714 | default: | |
6715 | ret = -1; | |
6716 | goto out; | |
6717 | break; | |
6718 | } | |
6719 | ||
6720 | switch (bm_sbs->unit) { | |
6721 | case SBS_UNIT_VALUE: | |
6722 | *(uint16_t *)bm_var(&bm_status, bm_sbs->off) = val & bm_sbs->mask; | |
6723 | break; | |
6724 | case SBS_UNIT_TEMPERATURE: | |
6725 | cval = (uint16_t)(val - 2731) / 10; | |
6726 | *(uint16_t *)bm_var(&bm_status, bm_sbs->off) = cval; | |
6727 | break; | |
6728 | case SBS_UNIT_VOLTAGE: | |
6729 | *(uint16_t *)bm_var(&bm_status, bm_sbs->off) = val; | |
6730 | break; | |
6731 | case SBS_UNIT_CURRENT: | |
6732 | *(uint16_t *)bm_var(&bm_status, bm_sbs->off) = val; | |
6733 | break; | |
6734 | case SBS_UNIT_ESR: | |
6735 | *(uint16_t *)bm_var(&bm_status, bm_sbs->off) = val; | |
6736 | break; | |
6737 | case SBS_UNIT_PERCENT: | |
6738 | *(uint16_t *)bm_var(&bm_status, bm_sbs->off) = val; | |
6739 | break; | |
6740 | case SBS_UNIT_CAPACITANCE: | |
6741 | *(uint16_t *)bm_var(&bm_status, bm_sbs->off) = val; | |
6742 | break; | |
6743 | default: | |
6744 | ret = -1; | |
6745 | goto out; | |
6746 | break; | |
6747 | } | |
6748 | ||
6749 | bm_sbs++; | |
6750 | } | |
6751 | ||
6752 | memcpy(bm_status_out, &bm_status, sizeof(struct ssd_bm)); | |
6753 | ||
6754 | out: | |
6755 | return ret; | |
6756 | } | |
6757 | ||
6758 | static int __ssd_bm_status(struct ssd_device *dev, int *status) | |
6759 | { | |
6760 | struct ssd_bm bm_status = {0}; | |
6761 | int nr_cap = 0; | |
6762 | int i; | |
6763 | int ret = 0; | |
6764 | ||
6765 | ret = ssd_get_bmstruct(dev, &bm_status); | |
6766 | if (ret) { | |
6767 | goto out; | |
6768 | } | |
6769 | ||
6770 | /* capacitor voltage */ | |
6771 | ret = ssd_bm_nr_cap(dev, &nr_cap); | |
6772 | if (ret) { | |
6773 | goto out; | |
6774 | } | |
6775 | ||
6776 | for (i=0; i<nr_cap; i++) { | |
6777 | if (bm_status.cap_volt[i] < SSD_BM_CAP_VOLT_MIN) { | |
6778 | *status = SSD_BMSTATUS_WARNING; | |
6779 | goto out; | |
6780 | } | |
6781 | } | |
6782 | ||
6783 | /* Safety Status */ | |
6784 | if (bm_status.sf_status) { | |
6785 | *status = SSD_BMSTATUS_WARNING; | |
6786 | goto out; | |
6787 | } | |
6788 | ||
6789 | /* charge status */ | |
6790 | if (!((bm_status.op_status >> 12) & 0x1)) { | |
6791 | *status = SSD_BMSTATUS_CHARGING; | |
6792 | }else{ | |
6793 | *status = SSD_BMSTATUS_OK; | |
6794 | } | |
6795 | ||
6796 | out: | |
6797 | return ret; | |
6798 | } | |
6799 | ||
6800 | static void ssd_set_flush_timeout(struct ssd_device *dev, int mode); | |
6801 | ||
6802 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)) | |
6803 | static void ssd_bm_worker(void *data) | |
6804 | { | |
6805 | struct ssd_device *dev = (struct ssd_device *)data; | |
6806 | #else | |
6807 | static void ssd_bm_worker(struct work_struct *work) | |
6808 | { | |
6809 | struct ssd_device *dev = container_of(work, struct ssd_device, bm_work); | |
6810 | #endif | |
6811 | ||
6812 | uint16_t opstatus; | |
6813 | int ret = 0; | |
6814 | ||
6815 | if (mode != SSD_DRV_MODE_STANDARD) { | |
6816 | return; | |
6817 | } | |
6818 | ||
6819 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_1_1) { | |
6820 | return; | |
6821 | } | |
6822 | ||
6823 | if (dev->hw_info_ext.plp_type != SSD_PLP_SCAP) { | |
6824 | return; | |
6825 | } | |
6826 | ||
6827 | ret = ssd_bm_get_opstatus(dev, &opstatus); | |
6828 | if (ret) { | |
6829 | hio_warn("%s: get bm operationstatus failed\n", dev->name); | |
6830 | return; | |
6831 | } | |
6832 | ||
6833 | /* need cap learning ? */ | |
6834 | if (!(opstatus & 0xF0)) { | |
6835 | ret = ssd_bm_enter_cap_learning(dev); | |
6836 | if (ret) { | |
6837 | hio_warn("%s: enter capacitance learning failed\n", dev->name); | |
6838 | return; | |
6839 | } | |
6840 | } | |
6841 | } | |
6842 | ||
7e9f9829 | 6843 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(4,15,0)) |
361ebed5 | 6844 | static void ssd_bm_routine_start(void *data) |
7e9f9829 SF |
6845 | #else |
6846 | static void ssd_bm_routine_start(struct timer_list *t) | |
6847 | #endif | |
361ebed5 HSDT |
6848 | { |
6849 | struct ssd_device *dev; | |
6850 | ||
7e9f9829 | 6851 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(4,15,0)) |
361ebed5 HSDT |
6852 | if (!data) { |
6853 | return; | |
6854 | } | |
6855 | dev = data; | |
7e9f9829 SF |
6856 | #else |
6857 | dev = from_timer(dev, t, bm_timer); | |
6858 | #endif | |
361ebed5 HSDT |
6859 | |
6860 | if (test_bit(SSD_INIT_WORKQ, &dev->state)) { | |
6861 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_2) { | |
6862 | queue_work(dev->workq, &dev->bm_work); | |
6863 | } else { | |
6864 | queue_work(dev->workq, &dev->capmon_work); | |
6865 | } | |
6866 | } | |
6867 | } | |
6868 | ||
6869 | /* CAP */ | |
6870 | static int ssd_do_cap_learn(struct ssd_device *dev, uint32_t *cap) | |
6871 | { | |
6872 | uint32_t u1, u2, t; | |
6873 | uint16_t val = 0; | |
6874 | int wait = 0; | |
6875 | int ret = 0; | |
6876 | ||
6877 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_2) { | |
6878 | *cap = 0; | |
6879 | return 0; | |
6880 | } | |
6881 | ||
6882 | if (dev->hw_info_ext.form_factor == SSD_FORM_FACTOR_FHHL && dev->hw_info.pcb_ver < 'B') { | |
6883 | *cap = 0; | |
6884 | return 0; | |
6885 | } | |
6886 | ||
6887 | /* make sure the lm80 voltage value is updated */ | |
6888 | msleep(SSD_LM80_CONV_INTERVAL); | |
6889 | ||
6890 | /* check if full charged */ | |
6891 | wait = 0; | |
6892 | for (;;) { | |
6893 | ret = ssd_smbus_read_word(dev, SSD_SENSOR_LM80_SADDRESS, SSD_PL_CAP_U1, (uint8_t *)&val); | |
6894 | if (ret) { | |
6895 | if (!test_and_set_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM80), &dev->hwmon)) { | |
da3355df | 6896 | ssd_generate_sensor_fault_log(dev, SSD_LOG_SENSOR_FAULT, SSD_SENSOR_LM80_SADDRESS,ret); |
361ebed5 HSDT |
6897 | } |
6898 | goto out; | |
6899 | } | |
6900 | u1 = SSD_LM80_CONVERT_VOLT(u16_swap(val)); | |
6901 | if (SSD_PL_CAP_VOLT(u1) >= SSD_PL_CAP_VOLT_FULL) { | |
6902 | break; | |
6903 | } | |
6904 | ||
6905 | wait++; | |
6906 | if (wait > SSD_PL_CAP_CHARGE_MAX_WAIT) { | |
6907 | ret = -ETIMEDOUT; | |
6908 | goto out; | |
6909 | } | |
6910 | msleep(SSD_PL_CAP_CHARGE_WAIT); | |
6911 | } | |
6912 | ||
6913 | ret = ssd_smbus_read_word(dev, SSD_SENSOR_LM80_SADDRESS, SSD_PL_CAP_U2, (uint8_t *)&val); | |
6914 | if (ret) { | |
6915 | if (!test_and_set_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM80), &dev->hwmon)) { | |
da3355df | 6916 | ssd_generate_sensor_fault_log(dev, SSD_LOG_SENSOR_FAULT, SSD_SENSOR_LM80_SADDRESS,ret); |
361ebed5 HSDT |
6917 | } |
6918 | goto out; | |
6919 | } | |
6920 | u2 = SSD_LM80_CONVERT_VOLT(u16_swap(val)); | |
6921 | ||
6922 | if (u1 == u2) { | |
6923 | ret = -EINVAL; | |
6924 | goto out; | |
6925 | } | |
6926 | ||
6927 | /* enter cap learn */ | |
6928 | ssd_reg32_write(dev->ctrlp + SSD_PL_CAP_LEARN_REG, 0x1); | |
6929 | ||
6930 | wait = 0; | |
6931 | for (;;) { | |
6932 | msleep(SSD_PL_CAP_LEARN_WAIT); | |
6933 | ||
6934 | t = ssd_reg32_read(dev->ctrlp + SSD_PL_CAP_LEARN_REG); | |
6935 | if (!((t >> 1) & 0x1)) { | |
6936 | break; | |
6937 | } | |
6938 | ||
6939 | wait++; | |
6940 | if (wait > SSD_PL_CAP_LEARN_MAX_WAIT) { | |
6941 | ret = -ETIMEDOUT; | |
6942 | goto out; | |
6943 | } | |
6944 | } | |
6945 | ||
6946 | if ((t >> 4) & 0x1) { | |
6947 | ret = -ETIMEDOUT; | |
6948 | goto out; | |
6949 | } | |
6950 | ||
6951 | t = (t >> 8); | |
6952 | if (0 == t) { | |
6953 | ret = -EINVAL; | |
6954 | goto out; | |
6955 | } | |
6956 | ||
6957 | *cap = SSD_PL_CAP_LEARN(u1, u2, t); | |
6958 | ||
6959 | out: | |
6960 | return ret; | |
6961 | } | |
6962 | ||
6963 | static int ssd_cap_learn(struct ssd_device *dev, uint32_t *cap) | |
6964 | { | |
6965 | int ret = 0; | |
6966 | ||
6967 | if (!dev || !cap) { | |
6968 | return -EINVAL; | |
6969 | } | |
6970 | ||
6971 | mutex_lock(&dev->bm_mutex); | |
6972 | ||
6973 | ssd_stop_workq(dev); | |
6974 | ||
6975 | ret = ssd_do_cap_learn(dev, cap); | |
6976 | if (ret) { | |
6977 | ssd_gen_swlog(dev, SSD_LOG_CAP_LEARN_FAULT, 0); | |
6978 | goto out; | |
6979 | } | |
6980 | ||
6981 | ssd_gen_swlog(dev, SSD_LOG_CAP_STATUS, *cap); | |
6982 | ||
6983 | out: | |
6984 | ssd_start_workq(dev); | |
6985 | mutex_unlock(&dev->bm_mutex); | |
6986 | ||
6987 | return ret; | |
6988 | } | |
6989 | ||
6990 | static int ssd_check_pl_cap(struct ssd_device *dev) | |
6991 | { | |
6992 | uint32_t u1; | |
6993 | uint16_t val = 0; | |
6994 | uint8_t low = 0; | |
6995 | int wait = 0; | |
6996 | int ret = 0; | |
6997 | ||
6998 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_2) { | |
6999 | return 0; | |
7000 | } | |
7001 | ||
7002 | if (dev->hw_info_ext.form_factor == SSD_FORM_FACTOR_FHHL && dev->hw_info.pcb_ver < 'B') { | |
7003 | return 0; | |
7004 | } | |
7005 | ||
7006 | /* cap ready ? */ | |
7007 | wait = 0; | |
7008 | for (;;) { | |
7009 | ret = ssd_smbus_read_word(dev, SSD_SENSOR_LM80_SADDRESS, SSD_PL_CAP_U1, (uint8_t *)&val); | |
7010 | if (ret) { | |
7011 | if (!test_and_set_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM80), &dev->hwmon)) { | |
da3355df | 7012 | ssd_generate_sensor_fault_log(dev, SSD_LOG_SENSOR_FAULT, SSD_SENSOR_LM80_SADDRESS,ret); |
361ebed5 HSDT |
7013 | } |
7014 | goto out; | |
7015 | } | |
7016 | u1 = SSD_LM80_CONVERT_VOLT(u16_swap(val)); | |
7017 | if (SSD_PL_CAP_VOLT(u1) >= SSD_PL_CAP_VOLT_READY) { | |
7018 | break; | |
7019 | } | |
7020 | ||
7021 | wait++; | |
7022 | if (wait > SSD_PL_CAP_CHARGE_MAX_WAIT) { | |
7023 | ret = -ETIMEDOUT; | |
7024 | ssd_gen_swlog(dev, SSD_LOG_CAP_VOLT_FAULT, SSD_PL_CAP_VOLT(u1)); | |
7025 | goto out; | |
7026 | } | |
7027 | msleep(SSD_PL_CAP_CHARGE_WAIT); | |
7028 | } | |
7029 | ||
7030 | low = ssd_lm80_limit[SSD_LM80_IN_CAP].low; | |
7031 | ret = ssd_smbus_write_byte(dev, SSD_SENSOR_LM80_SADDRESS, SSD_LM80_REG_IN_MIN(SSD_LM80_IN_CAP), &low); | |
7032 | if (ret) { | |
7033 | goto out; | |
7034 | } | |
7035 | ||
7036 | /* enable cap INx */ | |
7037 | ret = ssd_lm80_enable_in(dev, SSD_SENSOR_LM80_SADDRESS, SSD_LM80_IN_CAP); | |
7038 | if (ret) { | |
7039 | if (!test_and_set_bit(SSD_HWMON_SENSOR(SSD_SENSOR_LM80), &dev->hwmon)) { | |
da3355df | 7040 | ssd_generate_sensor_fault_log(dev, SSD_LOG_SENSOR_FAULT, SSD_SENSOR_LM80_SADDRESS,ret); |
361ebed5 HSDT |
7041 | } |
7042 | goto out; | |
7043 | } | |
7044 | ||
7045 | out: | |
7046 | /* skip error if not in standard mode */ | |
7047 | if (mode != SSD_DRV_MODE_STANDARD) { | |
7048 | ret = 0; | |
7049 | } | |
7050 | return ret; | |
7051 | } | |
7052 | ||
7053 | static int ssd_check_pl_cap_fast(struct ssd_device *dev) | |
7054 | { | |
7055 | uint32_t u1; | |
7056 | uint16_t val = 0; | |
7057 | int ret = 0; | |
7058 | ||
7059 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_2) { | |
7060 | return 0; | |
7061 | } | |
7062 | ||
7063 | if (dev->hw_info_ext.form_factor == SSD_FORM_FACTOR_FHHL && dev->hw_info.pcb_ver < 'B') { | |
7064 | return 0; | |
7065 | } | |
7066 | ||
7067 | /* cap ready ? */ | |
7068 | ret = ssd_smbus_read_word(dev, SSD_SENSOR_LM80_SADDRESS, SSD_PL_CAP_U1, (uint8_t *)&val); | |
7069 | if (ret) { | |
7070 | goto out; | |
7071 | } | |
7072 | u1 = SSD_LM80_CONVERT_VOLT(u16_swap(val)); | |
7073 | if (SSD_PL_CAP_VOLT(u1) < SSD_PL_CAP_VOLT_READY) { | |
7074 | ret = 1; | |
7075 | } | |
7076 | ||
7077 | out: | |
7078 | return ret; | |
7079 | } | |
7080 | ||
7081 | static int ssd_init_pl_cap(struct ssd_device *dev) | |
7082 | { | |
7083 | int ret = 0; | |
7084 | ||
7085 | /* set here: user write mode */ | |
7086 | dev->user_wmode = wmode; | |
7087 | ||
7088 | mutex_init(&dev->bm_mutex); | |
7089 | ||
7090 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_2) { | |
7091 | uint32_t val; | |
7092 | val = ssd_reg32_read(dev->ctrlp + SSD_BM_FAULT_REG); | |
7093 | if ((val >> 1) & 0x1) { | |
7094 | (void)test_and_set_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP), &dev->hwmon); | |
7095 | } | |
7096 | } else { | |
7097 | ret = ssd_check_pl_cap(dev); | |
7098 | if (ret) { | |
7099 | (void)test_and_set_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP), &dev->hwmon); | |
7100 | } | |
7101 | } | |
7102 | ||
7103 | return 0; | |
7104 | } | |
7105 | ||
7106 | /* label */ | |
7107 | static void __end_str(char *str, int len) | |
7108 | { | |
7109 | int i; | |
7110 | ||
7111 | for(i=0; i<len; i++) { | |
7112 | if (*(str+i) == '\0') | |
7113 | return; | |
7114 | } | |
7115 | *str = '\0'; | |
7116 | } | |
7117 | ||
7118 | static int ssd_init_label(struct ssd_device *dev) | |
7119 | { | |
7120 | uint32_t off; | |
7121 | uint32_t size; | |
7122 | int ret; | |
7123 | ||
7124 | /* label location */ | |
7125 | off = dev->rom_info.label_base; | |
7126 | ||
7127 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_2) { | |
7128 | size = sizeof(struct ssd_label); | |
7129 | ||
7130 | /* read label */ | |
7131 | ret = ssd_spi_read(dev, &dev->label, off, size); | |
7132 | if (ret) { | |
7133 | memset(&dev->label, 0, size); | |
7134 | goto out; | |
7135 | } | |
7136 | ||
7137 | __end_str(dev->label.date, SSD_LABEL_FIELD_SZ); | |
7138 | __end_str(dev->label.sn, SSD_LABEL_FIELD_SZ); | |
7139 | __end_str(dev->label.part, SSD_LABEL_FIELD_SZ); | |
7140 | __end_str(dev->label.desc, SSD_LABEL_FIELD_SZ); | |
7141 | __end_str(dev->label.other, SSD_LABEL_FIELD_SZ); | |
7142 | __end_str(dev->label.maf, SSD_LABEL_FIELD_SZ); | |
7143 | } else { | |
7144 | size = sizeof(struct ssd_labelv3); | |
7145 | ||
7146 | /* read label */ | |
7147 | ret = ssd_spi_read(dev, &dev->labelv3, off, size); | |
7148 | if (ret) { | |
7149 | memset(&dev->labelv3, 0, size); | |
7150 | goto out; | |
7151 | } | |
7152 | ||
7153 | __end_str(dev->labelv3.boardtype, SSD_LABEL_FIELD_SZ); | |
7154 | __end_str(dev->labelv3.barcode, SSD_LABEL_FIELD_SZ); | |
7155 | __end_str(dev->labelv3.item, SSD_LABEL_FIELD_SZ); | |
7156 | __end_str(dev->labelv3.description, SSD_LABEL_DESC_SZ); | |
7157 | __end_str(dev->labelv3.manufactured, SSD_LABEL_FIELD_SZ); | |
7158 | __end_str(dev->labelv3.vendorname, SSD_LABEL_FIELD_SZ); | |
7159 | __end_str(dev->labelv3.issuenumber, SSD_LABEL_FIELD_SZ); | |
7160 | __end_str(dev->labelv3.cleicode, SSD_LABEL_FIELD_SZ); | |
7161 | __end_str(dev->labelv3.bom, SSD_LABEL_FIELD_SZ); | |
7162 | } | |
7163 | ||
7164 | out: | |
7165 | /* skip error if not in standard mode */ | |
7166 | if (mode != SSD_DRV_MODE_STANDARD) { | |
7167 | ret = 0; | |
7168 | } | |
7169 | return ret; | |
7170 | } | |
7171 | ||
7172 | int ssd_get_label(struct block_device *bdev, struct ssd_label *label) | |
7173 | { | |
7174 | struct ssd_device *dev; | |
7175 | ||
7176 | if (!bdev || !label || !(bdev->bd_disk)) { | |
7177 | return -EINVAL; | |
7178 | } | |
7179 | ||
7180 | dev = bdev->bd_disk->private_data; | |
7181 | ||
7182 | if (dev->protocol_info.ver >= SSD_PROTOCOL_V3_2) { | |
7183 | memset(label, 0, sizeof(struct ssd_label)); | |
7184 | memcpy(label->date, dev->labelv3.manufactured, SSD_LABEL_FIELD_SZ); | |
7185 | memcpy(label->sn, dev->labelv3.barcode, SSD_LABEL_FIELD_SZ); | |
7186 | memcpy(label->desc, dev->labelv3.boardtype, SSD_LABEL_FIELD_SZ); | |
7187 | memcpy(label->maf, dev->labelv3.vendorname, SSD_LABEL_FIELD_SZ); | |
7188 | } else { | |
7189 | memcpy(label, &dev->label, sizeof(struct ssd_label)); | |
7190 | } | |
7191 | ||
7192 | return 0; | |
7193 | } | |
7194 | ||
7195 | static int __ssd_get_version(struct ssd_device *dev, struct ssd_version_info *ver) | |
7196 | { | |
7197 | uint16_t bm_ver = 0; | |
7198 | int ret = 0; | |
7199 | ||
7200 | if (dev->protocol_info.ver > SSD_PROTOCOL_V3 && dev->protocol_info.ver < SSD_PROTOCOL_V3_2) { | |
7201 | ret = ssd_bm_get_version(dev, &bm_ver); | |
7202 | if(ret){ | |
7203 | goto out; | |
7204 | } | |
7205 | } | |
7206 | ||
7207 | ver->bridge_ver = dev->hw_info.bridge_ver; | |
7208 | ver->ctrl_ver = dev->hw_info.ctrl_ver; | |
7209 | ver->bm_ver = bm_ver; | |
7210 | ver->pcb_ver = dev->hw_info.pcb_ver; | |
7211 | ver->upper_pcb_ver = dev->hw_info.upper_pcb_ver; | |
7212 | ||
7213 | out: | |
7214 | return ret; | |
7215 | ||
7216 | } | |
7217 | ||
7218 | int ssd_get_version(struct block_device *bdev, struct ssd_version_info *ver) | |
7219 | { | |
7220 | struct ssd_device *dev; | |
7221 | int ret; | |
7222 | ||
7223 | if (!bdev || !ver || !(bdev->bd_disk)) { | |
7224 | return -EINVAL; | |
7225 | } | |
7226 | ||
7227 | dev = bdev->bd_disk->private_data; | |
7228 | ||
7229 | mutex_lock(&dev->fw_mutex); | |
7230 | ret = __ssd_get_version(dev, ver); | |
7231 | mutex_unlock(&dev->fw_mutex); | |
7232 | ||
7233 | return ret; | |
7234 | } | |
7235 | ||
7236 | static int __ssd_get_temperature(struct ssd_device *dev, int *temp) | |
7237 | { | |
7238 | uint64_t val; | |
7239 | uint32_t off; | |
7240 | int max = -300; | |
7241 | int cur; | |
7242 | int i; | |
7243 | ||
7244 | if (dev->protocol_info.ver <= SSD_PROTOCOL_V3) { | |
7245 | *temp = 0; | |
7246 | return 0; | |
7247 | } | |
7248 | ||
7249 | if (finject) { | |
7250 | if (dev->db_info.type == SSD_DEBUG_LOG && | |
7251 | (dev->db_info.data.log.event == SSD_LOG_OVER_TEMP || | |
7252 | dev->db_info.data.log.event == SSD_LOG_NORMAL_TEMP || | |
7253 | dev->db_info.data.log.event == SSD_LOG_WARN_TEMP)) { | |
7254 | *temp = (int)dev->db_info.data.log.extra; | |
7255 | return 0; | |
7256 | } | |
7257 | } | |
7258 | ||
7259 | for (i=0; i<dev->hw_info.nr_ctrl; i++) { | |
7260 | off = SSD_CTRL_TEMP_REG0 + i * sizeof(uint64_t); | |
7261 | ||
7262 | val = ssd_reg_read(dev->ctrlp + off); | |
7263 | if (val == 0xffffffffffffffffull) { | |
7264 | continue; | |
7265 | } | |
7266 | ||
7267 | cur = (int)CUR_TEMP(val); | |
7268 | if (cur >= max) { | |
7269 | max = cur; | |
7270 | } | |
7271 | } | |
7272 | ||
7273 | *temp = max; | |
7274 | ||
7275 | return 0; | |
7276 | } | |
7277 | ||
7278 | int ssd_get_temperature(struct block_device *bdev, int *temp) | |
7279 | { | |
7280 | struct ssd_device *dev; | |
7281 | int ret; | |
7282 | ||
7283 | if (!bdev || !temp || !(bdev->bd_disk)) { | |
7284 | return -EINVAL; | |
7285 | } | |
7286 | ||
7287 | dev = bdev->bd_disk->private_data; | |
7288 | ||
7289 | ||
7290 | mutex_lock(&dev->fw_mutex); | |
7291 | ret = __ssd_get_temperature(dev, temp); | |
7292 | mutex_unlock(&dev->fw_mutex); | |
7293 | ||
7294 | return ret; | |
7295 | } | |
7296 | ||
7297 | int ssd_set_otprotect(struct block_device *bdev, int otprotect) | |
7298 | { | |
7299 | struct ssd_device *dev; | |
7300 | ||
7301 | if (!bdev || !(bdev->bd_disk)) { | |
7302 | return -EINVAL; | |
7303 | } | |
7304 | ||
7305 | dev = bdev->bd_disk->private_data; | |
7306 | ssd_set_ot_protect(dev, !!otprotect); | |
7307 | ||
7308 | return 0; | |
7309 | } | |
7310 | ||
7311 | int ssd_bm_status(struct block_device *bdev, int *status) | |
7312 | { | |
7313 | struct ssd_device *dev; | |
7314 | int ret = 0; | |
7315 | ||
7316 | if (!bdev || !status || !(bdev->bd_disk)) { | |
7317 | return -EINVAL; | |
7318 | } | |
7319 | ||
7320 | dev = bdev->bd_disk->private_data; | |
7321 | ||
7322 | mutex_lock(&dev->fw_mutex); | |
7323 | if (dev->protocol_info.ver >= SSD_PROTOCOL_V3_2) { | |
7324 | if (test_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP), &dev->hwmon)) { | |
7325 | *status = SSD_BMSTATUS_WARNING; | |
7326 | } else { | |
7327 | *status = SSD_BMSTATUS_OK; | |
7328 | } | |
7329 | } else if(dev->protocol_info.ver > SSD_PROTOCOL_V3) { | |
7330 | ret = __ssd_bm_status(dev, status); | |
7331 | } else { | |
7332 | *status = SSD_BMSTATUS_OK; | |
7333 | } | |
7334 | mutex_unlock(&dev->fw_mutex); | |
7335 | ||
7336 | return ret; | |
7337 | } | |
7338 | ||
7339 | int ssd_get_pciaddr(struct block_device *bdev, struct pci_addr *paddr) | |
7340 | { | |
7341 | struct ssd_device *dev; | |
7342 | ||
7343 | if (!bdev || !paddr || !bdev->bd_disk) { | |
7344 | return -EINVAL; | |
7345 | } | |
7346 | ||
7347 | dev = bdev->bd_disk->private_data; | |
7348 | ||
7349 | paddr->domain = pci_domain_nr(dev->pdev->bus); | |
7350 | paddr->bus = dev->pdev->bus->number; | |
7351 | paddr->slot = PCI_SLOT(dev->pdev->devfn); | |
7352 | paddr->func= PCI_FUNC(dev->pdev->devfn); | |
7353 | ||
7354 | return 0; | |
7355 | } | |
7356 | ||
7357 | /* acc */ | |
7358 | static int ssd_bb_acc(struct ssd_device *dev, struct ssd_acc_info *acc) | |
7359 | { | |
7360 | uint32_t val; | |
7361 | int ctrl, chip; | |
7362 | ||
7363 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_1_1) { | |
7364 | return -EOPNOTSUPP; | |
7365 | } | |
7366 | ||
7367 | acc->threshold_l1 = ssd_reg32_read(dev->ctrlp + SSD_BB_THRESHOLD_L1_REG); | |
7368 | if (0xffffffffull == acc->threshold_l1) { | |
7369 | return -EIO; | |
7370 | } | |
7371 | acc->threshold_l2 = ssd_reg32_read(dev->ctrlp + SSD_BB_THRESHOLD_L2_REG); | |
7372 | if (0xffffffffull == acc->threshold_l2) { | |
7373 | return -EIO; | |
7374 | } | |
7375 | acc->val = 0; | |
7376 | ||
7377 | for (ctrl=0; ctrl<dev->hw_info.nr_ctrl; ctrl++) { | |
7378 | for (chip=0; chip<dev->hw_info.nr_chip; chip++) { | |
7379 | val = ssd_reg32_read(dev->ctrlp + SSD_BB_ACC_REG0 + (SSD_CTRL_REG_ZONE_SZ * ctrl) + (SSD_BB_ACC_REG_SZ * chip)); | |
7380 | if (0xffffffffull == acc->val) { | |
7381 | return -EIO; | |
7382 | } | |
7383 | if (val > acc->val) { | |
7384 | acc->val = val; | |
7385 | } | |
7386 | } | |
7387 | } | |
7388 | ||
7389 | return 0; | |
7390 | } | |
7391 | ||
7392 | static int ssd_ec_acc(struct ssd_device *dev, struct ssd_acc_info *acc) | |
7393 | { | |
7394 | uint32_t val; | |
7395 | int ctrl, chip; | |
7396 | ||
7397 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_1_1) { | |
7398 | return -EOPNOTSUPP; | |
7399 | } | |
7400 | ||
7401 | acc->threshold_l1 = ssd_reg32_read(dev->ctrlp + SSD_EC_THRESHOLD_L1_REG); | |
7402 | if (0xffffffffull == acc->threshold_l1) { | |
7403 | return -EIO; | |
7404 | } | |
7405 | acc->threshold_l2 = ssd_reg32_read(dev->ctrlp + SSD_EC_THRESHOLD_L2_REG); | |
7406 | if (0xffffffffull == acc->threshold_l2) { | |
7407 | return -EIO; | |
7408 | } | |
7409 | acc->val = 0; | |
7410 | ||
7411 | for (ctrl=0; ctrl<dev->hw_info.nr_ctrl; ctrl++) { | |
7412 | for (chip=0; chip<dev->hw_info.nr_chip; chip++) { | |
7413 | val = ssd_reg32_read(dev->ctrlp + SSD_EC_ACC_REG0 + (SSD_CTRL_REG_ZONE_SZ * ctrl) + (SSD_EC_ACC_REG_SZ * chip)); | |
7414 | if (0xffffffffull == acc->val) { | |
7415 | return -EIO; | |
7416 | } | |
7417 | ||
7418 | if (val > acc->val) { | |
7419 | acc->val = val; | |
7420 | } | |
7421 | } | |
7422 | } | |
7423 | ||
7424 | return 0; | |
7425 | } | |
7426 | ||
7427 | ||
7428 | /* ram r&w */ | |
7429 | static int ssd_ram_read_4k(struct ssd_device *dev, void *buf, size_t length, loff_t ofs, int ctrl_idx) | |
7430 | { | |
7431 | struct ssd_ram_op_msg *msg; | |
7432 | dma_addr_t buf_dma; | |
7433 | size_t len = length; | |
7434 | loff_t ofs_w = ofs; | |
7435 | int ret = 0; | |
7436 | ||
7437 | if (ctrl_idx >= dev->hw_info.nr_ctrl || (uint64_t)(ofs + length) > dev->hw_info.ram_size | |
7438 | || !length || length > dev->hw_info.ram_max_len | |
7439 | || (length & (dev->hw_info.ram_align - 1)) != 0 || ((uint64_t)ofs & (dev->hw_info.ram_align - 1)) != 0) { | |
7440 | return -EINVAL; | |
7441 | } | |
7442 | ||
7443 | len /= dev->hw_info.ram_align; | |
7444 | do_div(ofs_w, dev->hw_info.ram_align); | |
7445 | ||
7446 | buf_dma = pci_map_single(dev->pdev, buf, length, PCI_DMA_FROMDEVICE); | |
7447 | #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,26)) | |
7448 | ret = dma_mapping_error(buf_dma); | |
7449 | #else | |
7450 | ret = dma_mapping_error(&(dev->pdev->dev), buf_dma); | |
7451 | #endif | |
7452 | if (ret) { | |
7453 | hio_warn("%s: unable to map read DMA buffer\n", dev->name); | |
7454 | goto out_dma_mapping; | |
7455 | } | |
7456 | ||
7457 | msg = (struct ssd_ram_op_msg *)ssd_get_dmsg(dev); | |
7458 | ||
7459 | msg->fun = SSD_FUNC_RAM_READ; | |
7460 | msg->ctrl_idx = ctrl_idx; | |
7461 | msg->start = (uint32_t)ofs_w; | |
7462 | msg->length = len; | |
7463 | msg->buf = buf_dma; | |
7464 | ||
7465 | ret = ssd_do_request(dev, READ, msg, NULL); | |
7466 | ssd_put_dmsg(msg); | |
7467 | ||
7468 | pci_unmap_single(dev->pdev, buf_dma, length, PCI_DMA_FROMDEVICE); | |
7469 | ||
7470 | out_dma_mapping: | |
7471 | return ret; | |
7472 | } | |
7473 | ||
7474 | static int ssd_ram_write_4k(struct ssd_device *dev, void *buf, size_t length, loff_t ofs, int ctrl_idx) | |
7475 | { | |
7476 | struct ssd_ram_op_msg *msg; | |
7477 | dma_addr_t buf_dma; | |
7478 | size_t len = length; | |
7479 | loff_t ofs_w = ofs; | |
7480 | int ret = 0; | |
7481 | ||
7482 | if (ctrl_idx >= dev->hw_info.nr_ctrl || (uint64_t)(ofs + length) > dev->hw_info.ram_size | |
7483 | || !length || length > dev->hw_info.ram_max_len | |
7484 | || (length & (dev->hw_info.ram_align - 1)) != 0 || ((uint64_t)ofs & (dev->hw_info.ram_align - 1)) != 0) { | |
7485 | return -EINVAL; | |
7486 | } | |
7487 | ||
7488 | len /= dev->hw_info.ram_align; | |
7489 | do_div(ofs_w, dev->hw_info.ram_align); | |
7490 | ||
7491 | buf_dma = pci_map_single(dev->pdev, buf, length, PCI_DMA_TODEVICE); | |
7492 | #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,26)) | |
7493 | ret = dma_mapping_error(buf_dma); | |
7494 | #else | |
7495 | ret = dma_mapping_error(&(dev->pdev->dev), buf_dma); | |
7496 | #endif | |
7497 | if (ret) { | |
7498 | hio_warn("%s: unable to map write DMA buffer\n", dev->name); | |
7499 | goto out_dma_mapping; | |
7500 | } | |
7501 | ||
7502 | msg = (struct ssd_ram_op_msg *)ssd_get_dmsg(dev); | |
7503 | ||
7504 | msg->fun = SSD_FUNC_RAM_WRITE; | |
7505 | msg->ctrl_idx = ctrl_idx; | |
7506 | msg->start = (uint32_t)ofs_w; | |
7507 | msg->length = len; | |
7508 | msg->buf = buf_dma; | |
7509 | ||
7510 | ret = ssd_do_request(dev, WRITE, msg, NULL); | |
7511 | ssd_put_dmsg(msg); | |
7512 | ||
7513 | pci_unmap_single(dev->pdev, buf_dma, length, PCI_DMA_TODEVICE); | |
7514 | ||
7515 | out_dma_mapping: | |
7516 | return ret; | |
7517 | ||
7518 | } | |
7519 | ||
7520 | static int ssd_ram_read(struct ssd_device *dev, void *buf, size_t length, loff_t ofs, int ctrl_idx) | |
7521 | { | |
7522 | int left = length; | |
7523 | size_t len; | |
7524 | loff_t off = ofs; | |
7525 | int ret = 0; | |
7526 | ||
7527 | if (ctrl_idx >= dev->hw_info.nr_ctrl || (uint64_t)(ofs + length) > dev->hw_info.ram_size || !length | |
7528 | || (length & (dev->hw_info.ram_align - 1)) != 0 || ((uint64_t)ofs & (dev->hw_info.ram_align - 1)) != 0) { | |
7529 | return -EINVAL; | |
7530 | } | |
7531 | ||
7532 | while (left > 0) { | |
7533 | len = dev->hw_info.ram_max_len; | |
7534 | if (left < (int)dev->hw_info.ram_max_len) { | |
7535 | len = left; | |
7536 | } | |
7537 | ||
7538 | ret = ssd_ram_read_4k(dev, buf, len, off, ctrl_idx); | |
7539 | if (ret) { | |
7540 | break; | |
7541 | } | |
7542 | ||
7543 | left -= len; | |
7544 | off += len; | |
7545 | buf += len; | |
7546 | } | |
7547 | ||
7548 | return ret; | |
7549 | } | |
7550 | ||
7551 | static int ssd_ram_write(struct ssd_device *dev, void *buf, size_t length, loff_t ofs, int ctrl_idx) | |
7552 | { | |
7553 | int left = length; | |
7554 | size_t len; | |
7555 | loff_t off = ofs; | |
7556 | int ret = 0; | |
7557 | ||
7558 | if (ctrl_idx >= dev->hw_info.nr_ctrl || (uint64_t)(ofs + length) > dev->hw_info.ram_size || !length | |
7559 | || (length & (dev->hw_info.ram_align - 1)) != 0 || ((uint64_t)ofs & (dev->hw_info.ram_align - 1)) != 0) { | |
7560 | return -EINVAL; | |
7561 | } | |
7562 | ||
7563 | while (left > 0) { | |
7564 | len = dev->hw_info.ram_max_len; | |
7565 | if (left < (int)dev->hw_info.ram_max_len) { | |
7566 | len = left; | |
7567 | } | |
7568 | ||
7569 | ret = ssd_ram_write_4k(dev, buf, len, off, ctrl_idx); | |
7570 | if (ret) { | |
7571 | break; | |
7572 | } | |
7573 | ||
7574 | left -= len; | |
7575 | off += len; | |
7576 | buf += len; | |
7577 | } | |
7578 | ||
7579 | return ret; | |
7580 | } | |
7581 | ||
7582 | ||
7583 | /* flash op */ | |
7584 | static int ssd_check_flash(struct ssd_device *dev, int flash, int page, int ctrl_idx) | |
7585 | { | |
7586 | int cur_ch = flash % dev->hw_info.max_ch; | |
7587 | int cur_chip = flash /dev->hw_info.max_ch; | |
7588 | ||
7589 | if (ctrl_idx >= dev->hw_info.nr_ctrl) { | |
7590 | return -EINVAL; | |
7591 | } | |
7592 | ||
7593 | if (cur_ch >= dev->hw_info.nr_ch || cur_chip >= dev->hw_info.nr_chip) { | |
7594 | return -EINVAL; | |
7595 | } | |
7596 | ||
7597 | if (page >= (int)(dev->hw_info.block_count * dev->hw_info.page_count)) { | |
7598 | return -EINVAL; | |
7599 | } | |
7600 | return 0; | |
7601 | } | |
7602 | ||
7603 | static int ssd_nand_read_id(struct ssd_device *dev, void *id, int flash, int chip, int ctrl_idx) | |
7604 | { | |
7605 | struct ssd_nand_op_msg *msg; | |
7606 | dma_addr_t buf_dma; | |
7607 | int ret = 0; | |
7608 | ||
7609 | if (unlikely(!id)) | |
7610 | return -EINVAL; | |
7611 | ||
7612 | buf_dma = pci_map_single(dev->pdev, id, SSD_NAND_ID_BUFF_SZ, PCI_DMA_FROMDEVICE); | |
7613 | #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,26)) | |
7614 | ret = dma_mapping_error(buf_dma); | |
7615 | #else | |
7616 | ret = dma_mapping_error(&(dev->pdev->dev), buf_dma); | |
7617 | #endif | |
7618 | if (ret) { | |
7619 | hio_warn("%s: unable to map read DMA buffer\n", dev->name); | |
7620 | goto out_dma_mapping; | |
7621 | } | |
7622 | ||
7623 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3) { | |
7624 | flash = ((uint32_t)flash << 1) | (uint32_t)chip; | |
7625 | chip = 0; | |
7626 | } | |
7627 | ||
7628 | msg = (struct ssd_nand_op_msg *)ssd_get_dmsg(dev); | |
7629 | ||
7630 | msg->fun = SSD_FUNC_NAND_READ_ID; | |
7631 | msg->chip_no = flash; | |
7632 | msg->chip_ce = chip; | |
7633 | msg->ctrl_idx = ctrl_idx; | |
7634 | msg->buf = buf_dma; | |
7635 | ||
7636 | ret = ssd_do_request(dev, READ, msg, NULL); | |
7637 | ssd_put_dmsg(msg); | |
7638 | ||
7639 | pci_unmap_single(dev->pdev, buf_dma, SSD_NAND_ID_BUFF_SZ, PCI_DMA_FROMDEVICE); | |
7640 | ||
7641 | out_dma_mapping: | |
7642 | return ret; | |
7643 | } | |
7644 | ||
7645 | #if 0 | |
7646 | static int ssd_nand_read(struct ssd_device *dev, void *buf, | |
7647 | int flash, int chip, int page, int page_count, int ctrl_idx) | |
7648 | { | |
7649 | struct ssd_nand_op_msg *msg; | |
7650 | dma_addr_t buf_dma; | |
7651 | int length; | |
7652 | int ret = 0; | |
7653 | ||
7654 | if (!buf) { | |
7655 | return -EINVAL; | |
7656 | } | |
7657 | ||
7658 | if ((page + page_count) > dev->hw_info.block_count*dev->hw_info.page_count) { | |
7659 | return -EINVAL; | |
7660 | } | |
7661 | ||
7662 | ret = ssd_check_flash(dev, flash, page, ctrl_idx); | |
7663 | if (ret) { | |
7664 | return ret; | |
7665 | } | |
7666 | ||
7667 | length = page_count * dev->hw_info.page_size; | |
7668 | ||
7669 | buf_dma = pci_map_single(dev->pdev, buf, length, PCI_DMA_FROMDEVICE); | |
7670 | #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,26)) | |
7671 | ret = dma_mapping_error(buf_dma); | |
7672 | #else | |
7673 | ret = dma_mapping_error(&(dev->pdev->dev), buf_dma); | |
7674 | #endif | |
7675 | if (ret) { | |
7676 | hio_warn("%s: unable to map read DMA buffer\n", dev->name); | |
7677 | goto out_dma_mapping; | |
7678 | } | |
7679 | ||
7680 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3) { | |
7681 | flash = (flash << 1) | chip; | |
7682 | chip = 0; | |
7683 | } | |
7684 | ||
7685 | msg = (struct ssd_nand_op_msg *)ssd_get_dmsg(dev); | |
7686 | ||
7687 | msg->fun = SSD_FUNC_NAND_READ; | |
7688 | msg->ctrl_idx = ctrl_idx; | |
7689 | msg->chip_no = flash; | |
7690 | msg->chip_ce = chip; | |
7691 | msg->page_no = page; | |
7692 | msg->page_count = page_count; | |
7693 | msg->buf = buf_dma; | |
7694 | ||
7695 | ret = ssd_do_request(dev, READ, msg, NULL); | |
7696 | ssd_put_dmsg(msg); | |
7697 | ||
7698 | pci_unmap_single(dev->pdev, buf_dma, length, PCI_DMA_FROMDEVICE); | |
7699 | ||
7700 | out_dma_mapping: | |
7701 | return ret; | |
7702 | } | |
7703 | #endif | |
7704 | ||
7705 | static int ssd_nand_read_w_oob(struct ssd_device *dev, void *buf, | |
7706 | int flash, int chip, int page, int count, int ctrl_idx) | |
7707 | { | |
7708 | struct ssd_nand_op_msg *msg; | |
7709 | dma_addr_t buf_dma; | |
7710 | int length; | |
7711 | int ret = 0; | |
7712 | ||
7713 | if (!buf) { | |
7714 | return -EINVAL; | |
7715 | } | |
7716 | ||
7717 | if ((page + count) > (int)(dev->hw_info.block_count * dev->hw_info.page_count)) { | |
7718 | return -EINVAL; | |
7719 | } | |
7720 | ||
7721 | ret = ssd_check_flash(dev, flash, page, ctrl_idx); | |
7722 | if (ret) { | |
7723 | return ret; | |
7724 | } | |
7725 | ||
7726 | length = count * (dev->hw_info.page_size + dev->hw_info.oob_size); | |
7727 | ||
7728 | buf_dma = pci_map_single(dev->pdev, buf, length, PCI_DMA_FROMDEVICE); | |
7729 | #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,26)) | |
7730 | ret = dma_mapping_error(buf_dma); | |
7731 | #else | |
7732 | ret = dma_mapping_error(&(dev->pdev->dev), buf_dma); | |
7733 | #endif | |
7734 | if (ret) { | |
7735 | hio_warn("%s: unable to map read DMA buffer\n", dev->name); | |
7736 | goto out_dma_mapping; | |
7737 | } | |
7738 | ||
7739 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3) { | |
7740 | flash = ((uint32_t)flash << 1) | (uint32_t)chip; | |
7741 | chip = 0; | |
7742 | } | |
7743 | ||
7744 | msg = (struct ssd_nand_op_msg *)ssd_get_dmsg(dev); | |
7745 | ||
7746 | msg->fun = SSD_FUNC_NAND_READ_WOOB; | |
7747 | msg->ctrl_idx = ctrl_idx; | |
7748 | msg->chip_no = flash; | |
7749 | msg->chip_ce = chip; | |
7750 | msg->page_no = page; | |
7751 | msg->page_count = count; | |
7752 | msg->buf = buf_dma; | |
7753 | ||
7754 | ret = ssd_do_request(dev, READ, msg, NULL); | |
7755 | ssd_put_dmsg(msg); | |
7756 | ||
7757 | pci_unmap_single(dev->pdev, buf_dma, length, PCI_DMA_FROMDEVICE); | |
7758 | ||
7759 | out_dma_mapping: | |
7760 | return ret; | |
7761 | } | |
7762 | ||
7763 | /* write 1 page */ | |
7764 | static int ssd_nand_write(struct ssd_device *dev, void *buf, | |
7765 | int flash, int chip, int page, int count, int ctrl_idx) | |
7766 | { | |
7767 | struct ssd_nand_op_msg *msg; | |
7768 | dma_addr_t buf_dma; | |
7769 | int length; | |
7770 | int ret = 0; | |
7771 | ||
7772 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3) { | |
7773 | return -EINVAL; | |
7774 | } | |
7775 | ||
7776 | if (!buf) { | |
7777 | return -EINVAL; | |
7778 | } | |
7779 | ||
7780 | if (count != 1) { | |
7781 | return -EINVAL; | |
7782 | } | |
7783 | ||
7784 | ret = ssd_check_flash(dev, flash, page, ctrl_idx); | |
7785 | if (ret) { | |
7786 | return ret; | |
7787 | } | |
7788 | ||
7789 | length = count * (dev->hw_info.page_size + dev->hw_info.oob_size); | |
7790 | ||
7791 | /* write data to ram */ | |
7792 | /*ret = ssd_ram_write(dev, buf, length, dev->hw_info.nand_wbuff_base, ctrl_idx); | |
7793 | if (ret) { | |
7794 | return ret; | |
7795 | }*/ | |
7796 | ||
7797 | buf_dma = pci_map_single(dev->pdev, buf, length, PCI_DMA_TODEVICE); | |
7798 | #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,26)) | |
7799 | ret = dma_mapping_error(buf_dma); | |
7800 | #else | |
7801 | ret = dma_mapping_error(&(dev->pdev->dev), buf_dma); | |
7802 | #endif | |
7803 | if (ret) { | |
7804 | hio_warn("%s: unable to map write DMA buffer\n", dev->name); | |
7805 | goto out_dma_mapping; | |
7806 | } | |
7807 | ||
7808 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3) { | |
7809 | flash = ((uint32_t)flash << 1) | (uint32_t)chip; | |
7810 | chip = 0; | |
7811 | } | |
7812 | ||
7813 | msg = (struct ssd_nand_op_msg *)ssd_get_dmsg(dev); | |
7814 | ||
7815 | msg->fun = SSD_FUNC_NAND_WRITE; | |
7816 | msg->ctrl_idx = ctrl_idx; | |
7817 | msg->chip_no = flash; | |
7818 | msg->chip_ce = chip; | |
7819 | ||
7820 | msg->page_no = page; | |
7821 | msg->page_count = count; | |
7822 | msg->buf = buf_dma; | |
7823 | ||
7824 | ret = ssd_do_request(dev, WRITE, msg, NULL); | |
7825 | ssd_put_dmsg(msg); | |
7826 | ||
7827 | pci_unmap_single(dev->pdev, buf_dma, length, PCI_DMA_TODEVICE); | |
7828 | ||
7829 | out_dma_mapping: | |
7830 | return ret; | |
7831 | } | |
7832 | ||
7833 | static int ssd_nand_erase(struct ssd_device *dev, int flash, int chip, int page, int ctrl_idx) | |
7834 | { | |
7835 | struct ssd_nand_op_msg *msg; | |
7836 | int ret = 0; | |
7837 | ||
7838 | ret = ssd_check_flash(dev, flash, page, ctrl_idx); | |
7839 | if (ret) { | |
7840 | return ret; | |
7841 | } | |
7842 | ||
7843 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3) { | |
7844 | flash = ((uint32_t)flash << 1) | (uint32_t)chip; | |
7845 | chip = 0; | |
7846 | } | |
7847 | ||
7848 | msg = (struct ssd_nand_op_msg *)ssd_get_dmsg(dev); | |
7849 | ||
7850 | msg->fun = SSD_FUNC_NAND_ERASE; | |
7851 | msg->ctrl_idx = ctrl_idx; | |
7852 | msg->chip_no = flash; | |
7853 | msg->chip_ce = chip; | |
7854 | msg->page_no = page; | |
7855 | ||
7856 | ret = ssd_do_request(dev, WRITE, msg, NULL); | |
7857 | ssd_put_dmsg(msg); | |
7858 | ||
7859 | return ret; | |
7860 | } | |
7861 | ||
7862 | static int ssd_update_bbt(struct ssd_device *dev, int flash, int ctrl_idx) | |
7863 | { | |
7864 | struct ssd_nand_op_msg *msg; | |
7865 | struct ssd_flush_msg *fmsg; | |
7866 | int ret = 0; | |
7867 | ||
7868 | ret = ssd_check_flash(dev, flash, 0, ctrl_idx); | |
7869 | if (ret) { | |
7870 | return ret; | |
7871 | } | |
7872 | ||
7873 | msg = (struct ssd_nand_op_msg *)ssd_get_dmsg(dev); | |
7874 | ||
7875 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3) { | |
7876 | fmsg = (struct ssd_flush_msg *)msg; | |
7877 | ||
7878 | fmsg->fun = SSD_FUNC_FLUSH; | |
7879 | fmsg->flag = 0x1; | |
7880 | fmsg->flash = flash; | |
7881 | fmsg->ctrl_idx = ctrl_idx; | |
7882 | } else { | |
7883 | msg->fun = SSD_FUNC_FLUSH; | |
7884 | msg->flag = 0x1; | |
7885 | msg->chip_no = flash; | |
7886 | msg->ctrl_idx = ctrl_idx; | |
7887 | } | |
7888 | ||
7889 | ret = ssd_do_request(dev, WRITE, msg, NULL); | |
7890 | ssd_put_dmsg(msg); | |
7891 | ||
7892 | return ret; | |
7893 | } | |
7894 | ||
7895 | /* flash controller init state */ | |
7896 | static int __ssd_check_init_state(struct ssd_device *dev) | |
7897 | { | |
7898 | uint32_t *init_state = NULL; | |
7899 | int reg_base, reg_sz; | |
7900 | int max_wait = SSD_INIT_MAX_WAIT; | |
7901 | int init_wait = 0; | |
7902 | int i, j, k; | |
7903 | int ch_start = 0; | |
7904 | ||
7905 | /* | |
7906 | for (i=0; i<dev->hw_info.nr_ctrl; i++) { | |
7907 | ssd_reg32_write(dev->ctrlp + SSD_CTRL_TEST_REG0 + i * 8, test_data); | |
7908 | read_data = ssd_reg32_read(dev->ctrlp + SSD_CTRL_TEST_REG0 + i * 8); | |
7909 | if (read_data == ~test_data) { | |
7910 | //dev->hw_info.nr_ctrl++; | |
7911 | dev->hw_info.nr_ctrl_map |= 1<<i; | |
7912 | } | |
7913 | } | |
7914 | */ | |
7915 | ||
7916 | /* | |
7917 | read_data = ssd_reg32_read(dev->ctrlp + SSD_READY_REG); | |
7918 | j=0; | |
7919 | for (i=0; i<dev->hw_info.nr_ctrl; i++) { | |
7920 | if (((read_data>>i) & 0x1) == 0) { | |
7921 | j++; | |
7922 | } | |
7923 | } | |
7924 | ||
7925 | if (dev->hw_info.nr_ctrl != j) { | |
7926 | printk(KERN_WARNING "%s: nr_ctrl mismatch: %d %d\n", dev->name, dev->hw_info.nr_ctrl, j); | |
7927 | return -1; | |
7928 | } | |
7929 | */ | |
7930 | ||
7931 | /* | |
7932 | init_state = ssd_reg_read(dev->ctrlp + SSD_FLASH_INFO_REG0); | |
7933 | for (j=1; j<dev->hw_info.nr_ctrl;j++) { | |
7934 | if (init_state != ssd_reg_read(dev->ctrlp + SSD_FLASH_INFO_REG0 + j*8)) { | |
7935 | printk(KERN_WARNING "SSD_FLASH_INFO_REG[%d], not match\n", j); | |
7936 | return -1; | |
7937 | } | |
7938 | } | |
7939 | */ | |
7940 | ||
7941 | /* init_state = ssd_reg_read(dev->ctrlp + SSD_CHIP_INFO_REG0); | |
7942 | for (j=1; j<dev->hw_info.nr_ctrl; j++) { | |
7943 | if (init_state != ssd_reg_read(dev->ctrlp + SSD_CHIP_INFO_REG0 + j*16)) { | |
7944 | printk(KERN_WARNING "SSD_CHIP_INFO_REG Lo [%d], not match\n", j); | |
7945 | return -1; | |
7946 | } | |
7947 | } | |
7948 | ||
7949 | init_state = ssd_reg_read(dev->ctrlp + SSD_CHIP_INFO_REG0 + 8); | |
7950 | for (j=1; j<dev->hw_info.nr_ctrl; j++) { | |
7951 | if (init_state != ssd_reg_read(dev->ctrlp + SSD_CHIP_INFO_REG0 + 8 + j*16)) { | |
7952 | printk(KERN_WARNING "SSD_CHIP_INFO_REG Hi [%d], not match\n", j); | |
7953 | return -1; | |
7954 | } | |
7955 | } | |
7956 | */ | |
7957 | ||
7958 | if (dev->protocol_info.ver >= SSD_PROTOCOL_V3_2) { | |
7959 | max_wait = SSD_INIT_MAX_WAIT_V3_2; | |
7960 | } | |
7961 | ||
7962 | reg_base = dev->protocol_info.init_state_reg; | |
7963 | reg_sz = dev->protocol_info.init_state_reg_sz; | |
7964 | ||
7965 | init_state = (uint32_t *)kmalloc(reg_sz, GFP_KERNEL); | |
7966 | if (!init_state) { | |
7967 | return -ENOMEM; | |
7968 | } | |
7969 | ||
7970 | for (i=0; i<dev->hw_info.nr_ctrl; i++) { | |
7971 | check_init: | |
7972 | for (j=0, k=0; j<reg_sz; j+=sizeof(uint32_t), k++) { | |
7973 | init_state[k] = ssd_reg32_read(dev->ctrlp + reg_base + j); | |
7974 | } | |
7975 | ||
7976 | if (dev->protocol_info.ver > SSD_PROTOCOL_V3) { | |
7977 | /* just check the last bit, no need to check all channel */ | |
7978 | ch_start = dev->hw_info.max_ch - 1; | |
7979 | } else { | |
7980 | ch_start = 0; | |
7981 | } | |
7982 | ||
7983 | for (j=0; j<dev->hw_info.nr_chip; j++) { | |
7984 | for (k=ch_start; k<dev->hw_info.max_ch; k++) { | |
7985 | if (test_bit((j*dev->hw_info.max_ch + k), (void *)init_state)) { | |
7986 | continue; | |
7987 | } | |
7988 | ||
7989 | init_wait++; | |
7990 | if (init_wait <= max_wait) { | |
7991 | msleep(SSD_INIT_WAIT); | |
7992 | goto check_init; | |
7993 | } else { | |
7994 | if (k < dev->hw_info.nr_ch) { | |
7995 | hio_warn("%s: controller %d chip %d ch %d init failed\n", | |
7996 | dev->name, i, j, k); | |
7997 | } else { | |
7998 | hio_warn("%s: controller %d chip %d init failed\n", | |
7999 | dev->name, i, j); | |
8000 | } | |
8001 | ||
8002 | kfree(init_state); | |
8003 | return -1; | |
8004 | } | |
8005 | } | |
8006 | } | |
8007 | reg_base += reg_sz; | |
8008 | } | |
8009 | //printk(KERN_WARNING "%s: init wait %d\n", dev->name, init_wait); | |
8010 | ||
8011 | kfree(init_state); | |
8012 | return 0; | |
8013 | } | |
8014 | ||
8015 | static int ssd_check_init_state(struct ssd_device *dev) | |
8016 | { | |
8017 | if (mode != SSD_DRV_MODE_STANDARD) { | |
8018 | return 0; | |
8019 | } | |
8020 | ||
8021 | return __ssd_check_init_state(dev); | |
8022 | } | |
8023 | ||
8024 | static void ssd_reset_resp_ptr(struct ssd_device *dev); | |
8025 | ||
8026 | /* reset flash controller etc */ | |
8027 | static int __ssd_reset(struct ssd_device *dev, int type) | |
8028 | { | |
8029 | if (type < SSD_RST_NOINIT || type > SSD_RST_FULL) { | |
8030 | return -EINVAL; | |
8031 | } | |
8032 | ||
8033 | mutex_lock(&dev->fw_mutex); | |
8034 | ||
8035 | if (type == SSD_RST_NOINIT) { //no init | |
8036 | ssd_reg32_write(dev->ctrlp + SSD_RESET_REG, SSD_RESET_NOINIT); | |
8037 | } else if (type == SSD_RST_NORMAL) { //reset & init | |
8038 | ssd_reg32_write(dev->ctrlp + SSD_RESET_REG, SSD_RESET); | |
8039 | } else { // full reset | |
8040 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_2) { | |
8041 | mutex_unlock(&dev->fw_mutex); | |
8042 | return -EINVAL; | |
8043 | } | |
8044 | ||
8045 | ssd_reg32_write(dev->ctrlp + SSD_FULL_RESET_REG, SSD_RESET_FULL); | |
8046 | ||
8047 | /* ?? */ | |
8048 | ssd_reset_resp_ptr(dev); | |
8049 | } | |
8050 | ||
8051 | #ifdef SSD_OT_PROTECT | |
8052 | dev->ot_delay = 0; | |
8053 | #endif | |
8054 | ||
8055 | msleep(1000); | |
8056 | ||
8057 | /* xx */ | |
8058 | ssd_set_flush_timeout(dev, dev->wmode); | |
8059 | ||
8060 | mutex_unlock(&dev->fw_mutex); | |
8061 | ssd_gen_swlog(dev, SSD_LOG_RESET, (uint32_t)type); | |
57e45d44 | 8062 | dev->reset_time = (uint64_t)ktime_get_real_seconds(); |
361ebed5 HSDT |
8063 | |
8064 | return __ssd_check_init_state(dev); | |
8065 | } | |
8066 | ||
8067 | static int ssd_save_md(struct ssd_device *dev) | |
8068 | { | |
8069 | struct ssd_nand_op_msg *msg; | |
8070 | int ret = 0; | |
8071 | ||
8072 | if (unlikely(mode != SSD_DRV_MODE_STANDARD)) | |
8073 | return 0; | |
8074 | ||
8075 | if (dev->protocol_info.ver <= SSD_PROTOCOL_V3) { | |
8076 | return 0; | |
8077 | } | |
8078 | ||
8079 | if (!dev->save_md) { | |
8080 | return 0; | |
8081 | } | |
8082 | ||
8083 | msg = (struct ssd_nand_op_msg *)ssd_get_dmsg(dev); | |
8084 | ||
8085 | msg->fun = SSD_FUNC_FLUSH; | |
8086 | msg->flag = 0x2; | |
8087 | msg->ctrl_idx = 0; | |
8088 | msg->chip_no = 0; | |
8089 | ||
8090 | ret = ssd_do_request(dev, WRITE, msg, NULL); | |
8091 | ssd_put_dmsg(msg); | |
8092 | ||
8093 | return ret; | |
8094 | } | |
8095 | ||
8096 | static int ssd_barrier_save_md(struct ssd_device *dev) | |
8097 | { | |
8098 | struct ssd_nand_op_msg *msg; | |
8099 | int ret = 0; | |
8100 | ||
8101 | if (unlikely(mode != SSD_DRV_MODE_STANDARD)) | |
8102 | return 0; | |
8103 | ||
8104 | if (dev->protocol_info.ver <= SSD_PROTOCOL_V3) { | |
8105 | return 0; | |
8106 | } | |
8107 | ||
8108 | if (!dev->save_md) { | |
8109 | return 0; | |
8110 | } | |
8111 | ||
8112 | msg = (struct ssd_nand_op_msg *)ssd_get_dmsg(dev); | |
8113 | ||
8114 | msg->fun = SSD_FUNC_FLUSH; | |
8115 | msg->flag = 0x2; | |
8116 | msg->ctrl_idx = 0; | |
8117 | msg->chip_no = 0; | |
8118 | ||
8119 | ret = ssd_do_barrier_request(dev, WRITE, msg, NULL); | |
8120 | ssd_put_dmsg(msg); | |
8121 | ||
8122 | return ret; | |
8123 | } | |
8124 | ||
8125 | static int ssd_flush(struct ssd_device *dev) | |
8126 | { | |
8127 | struct ssd_nand_op_msg *msg; | |
8128 | struct ssd_flush_msg *fmsg; | |
8129 | int ret = 0; | |
8130 | ||
8131 | if (unlikely(mode != SSD_DRV_MODE_STANDARD)) | |
8132 | return 0; | |
8133 | ||
8134 | msg = (struct ssd_nand_op_msg *)ssd_get_dmsg(dev); | |
8135 | ||
8136 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3) { | |
8137 | fmsg = (struct ssd_flush_msg *)msg; | |
8138 | ||
8139 | fmsg->fun = SSD_FUNC_FLUSH; | |
8140 | fmsg->flag = 0; | |
8141 | fmsg->ctrl_idx = 0; | |
8142 | fmsg->flash = 0; | |
8143 | } else { | |
8144 | msg->fun = SSD_FUNC_FLUSH; | |
8145 | msg->flag = 0; | |
8146 | msg->ctrl_idx = 0; | |
8147 | msg->chip_no = 0; | |
8148 | } | |
8149 | ||
8150 | ret = ssd_do_request(dev, WRITE, msg, NULL); | |
8151 | ssd_put_dmsg(msg); | |
8152 | ||
8153 | return ret; | |
8154 | } | |
8155 | ||
8156 | static int ssd_barrier_flush(struct ssd_device *dev) | |
8157 | { | |
8158 | struct ssd_nand_op_msg *msg; | |
8159 | struct ssd_flush_msg *fmsg; | |
8160 | int ret = 0; | |
8161 | ||
8162 | if (unlikely(mode != SSD_DRV_MODE_STANDARD)) | |
8163 | return 0; | |
8164 | ||
8165 | msg = (struct ssd_nand_op_msg *)ssd_get_dmsg(dev); | |
8166 | ||
8167 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3) { | |
8168 | fmsg = (struct ssd_flush_msg *)msg; | |
8169 | ||
8170 | fmsg->fun = SSD_FUNC_FLUSH; | |
8171 | fmsg->flag = 0; | |
8172 | fmsg->ctrl_idx = 0; | |
8173 | fmsg->flash = 0; | |
8174 | } else { | |
8175 | msg->fun = SSD_FUNC_FLUSH; | |
8176 | msg->flag = 0; | |
8177 | msg->ctrl_idx = 0; | |
8178 | msg->chip_no = 0; | |
8179 | } | |
8180 | ||
8181 | ret = ssd_do_barrier_request(dev, WRITE, msg, NULL); | |
8182 | ssd_put_dmsg(msg); | |
8183 | ||
8184 | return ret; | |
8185 | } | |
8186 | ||
8187 | #define SSD_WMODE_BUFFER_TIMEOUT 0x00c82710 | |
8188 | #define SSD_WMODE_BUFFER_EX_TIMEOUT 0x000500c8 | |
8189 | #define SSD_WMODE_FUA_TIMEOUT 0x000503E8 | |
8190 | static void ssd_set_flush_timeout(struct ssd_device *dev, int m) | |
8191 | { | |
8192 | uint32_t to; | |
8193 | uint32_t val = 0; | |
8194 | ||
8195 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_1_1) { | |
8196 | return; | |
8197 | } | |
8198 | ||
8199 | switch(m) { | |
8200 | case SSD_WMODE_BUFFER: | |
8201 | to = SSD_WMODE_BUFFER_TIMEOUT; | |
8202 | break; | |
8203 | case SSD_WMODE_BUFFER_EX: | |
8204 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_2_1) { | |
8205 | to = SSD_WMODE_BUFFER_EX_TIMEOUT; | |
8206 | } else { | |
8207 | to = SSD_WMODE_BUFFER_TIMEOUT; | |
8208 | } | |
8209 | break; | |
8210 | case SSD_WMODE_FUA: | |
8211 | to = SSD_WMODE_FUA_TIMEOUT; | |
8212 | break; | |
8213 | default: | |
8214 | return; | |
8215 | } | |
8216 | ||
8217 | val = (((uint32_t)((uint32_t)m & 0x3) << 28) | to); | |
8218 | ||
8219 | ssd_reg32_write(dev->ctrlp + SSD_FLUSH_TIMEOUT_REG, val); | |
8220 | } | |
8221 | ||
8222 | static int ssd_do_switch_wmode(struct ssd_device *dev, int m) | |
8223 | { | |
8224 | int ret = 0; | |
8225 | ||
8226 | ret = ssd_barrier_start(dev); | |
8227 | if (ret) { | |
8228 | goto out; | |
8229 | } | |
8230 | ||
8231 | ret = ssd_barrier_flush(dev); | |
8232 | if (ret) { | |
8233 | goto out_barrier_end; | |
8234 | } | |
8235 | ||
8236 | /* set contoller flush timeout */ | |
8237 | ssd_set_flush_timeout(dev, m); | |
8238 | ||
8239 | dev->wmode = m; | |
8240 | mb(); | |
8241 | ||
8242 | out_barrier_end: | |
8243 | ssd_barrier_end(dev); | |
8244 | out: | |
8245 | return ret; | |
8246 | } | |
8247 | ||
8248 | static int ssd_switch_wmode(struct ssd_device *dev, int m) | |
8249 | { | |
8250 | int default_wmode; | |
8251 | int next_wmode; | |
8252 | int ret = 0; | |
8253 | ||
8254 | if (!test_bit(SSD_ONLINE, &dev->state)) { | |
8255 | return -ENODEV; | |
8256 | } | |
8257 | ||
8258 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_2) { | |
8259 | default_wmode = SSD_WMODE_BUFFER; | |
8260 | } else { | |
8261 | default_wmode = SSD_WMODE_BUFFER_EX; | |
8262 | } | |
8263 | ||
8264 | if (SSD_WMODE_AUTO == m) { | |
8265 | /* battery fault ? */ | |
8266 | if (test_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP), &dev->hwmon)) { | |
8267 | next_wmode = SSD_WMODE_FUA; | |
8268 | } else { | |
8269 | next_wmode = default_wmode; | |
8270 | } | |
8271 | } else if (SSD_WMODE_DEFAULT == m) { | |
8272 | next_wmode = default_wmode; | |
8273 | } else { | |
8274 | next_wmode = m; | |
8275 | } | |
8276 | ||
8277 | if (next_wmode != dev->wmode) { | |
8278 | hio_warn("%s: switch write mode (%d -> %d)\n", dev->name, dev->wmode, next_wmode); | |
8279 | ret = ssd_do_switch_wmode(dev, next_wmode); | |
8280 | if (ret) { | |
8281 | hio_err("%s: can not switch write mode (%d -> %d)\n", dev->name, dev->wmode, next_wmode); | |
8282 | } | |
8283 | } | |
8284 | ||
8285 | return ret; | |
8286 | } | |
8287 | ||
8288 | static int ssd_init_wmode(struct ssd_device *dev) | |
8289 | { | |
8290 | int default_wmode; | |
8291 | int ret = 0; | |
8292 | ||
8293 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_2) { | |
8294 | default_wmode = SSD_WMODE_BUFFER; | |
8295 | } else { | |
8296 | default_wmode = SSD_WMODE_BUFFER_EX; | |
8297 | } | |
8298 | ||
8299 | /* dummy mode */ | |
8300 | if (SSD_WMODE_AUTO == dev->user_wmode) { | |
8301 | /* battery fault ? */ | |
8302 | if (test_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP), &dev->hwmon)) { | |
8303 | dev->wmode = SSD_WMODE_FUA; | |
8304 | } else { | |
8305 | dev->wmode = default_wmode; | |
8306 | } | |
8307 | } else if (SSD_WMODE_DEFAULT == dev->user_wmode) { | |
8308 | dev->wmode = default_wmode; | |
8309 | } else { | |
8310 | dev->wmode = dev->user_wmode; | |
8311 | } | |
8312 | ssd_set_flush_timeout(dev, dev->wmode); | |
8313 | ||
8314 | return ret; | |
8315 | } | |
8316 | ||
8317 | static int __ssd_set_wmode(struct ssd_device *dev, int m) | |
8318 | { | |
8319 | int ret = 0; | |
8320 | ||
8321 | /* not support old fw*/ | |
8322 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_1_1) { | |
8323 | ret = -EOPNOTSUPP; | |
8324 | goto out; | |
8325 | } | |
8326 | ||
8327 | if (m < SSD_WMODE_BUFFER || m > SSD_WMODE_DEFAULT) { | |
8328 | ret = -EINVAL; | |
8329 | goto out; | |
8330 | } | |
8331 | ||
8332 | ssd_gen_swlog(dev, SSD_LOG_SET_WMODE, m); | |
8333 | ||
8334 | dev->user_wmode = m; | |
8335 | ||
8336 | ret = ssd_switch_wmode(dev, dev->user_wmode); | |
8337 | if (ret) { | |
8338 | goto out; | |
8339 | } | |
8340 | ||
8341 | out: | |
8342 | return ret; | |
8343 | } | |
8344 | ||
8345 | int ssd_set_wmode(struct block_device *bdev, int m) | |
8346 | { | |
8347 | struct ssd_device *dev; | |
8348 | ||
8349 | if (!bdev || !(bdev->bd_disk)) { | |
8350 | return -EINVAL; | |
8351 | } | |
8352 | ||
8353 | dev = bdev->bd_disk->private_data; | |
8354 | ||
8355 | return __ssd_set_wmode(dev, m); | |
8356 | } | |
8357 | ||
8358 | static int ssd_do_reset(struct ssd_device *dev) | |
8359 | { | |
8360 | int ret = 0; | |
8361 | ||
8362 | if (test_and_set_bit(SSD_RESETING, &dev->state)) { | |
8363 | return 0; | |
8364 | } | |
8365 | ||
8366 | ssd_stop_workq(dev); | |
8367 | ||
8368 | ret = ssd_barrier_start(dev); | |
8369 | if (ret) { | |
8370 | goto out; | |
8371 | } | |
8372 | ||
8373 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_2) { | |
8374 | /* old reset */ | |
8375 | ret = __ssd_reset(dev, SSD_RST_NORMAL); | |
8376 | } else { | |
8377 | /* full reset */ | |
8378 | //ret = __ssd_reset(dev, SSD_RST_FULL); | |
8379 | ret = __ssd_reset(dev, SSD_RST_NORMAL); | |
8380 | } | |
8381 | if (ret) { | |
8382 | goto out_barrier_end; | |
8383 | } | |
8384 | ||
8385 | out_barrier_end: | |
8386 | ssd_barrier_end(dev); | |
8387 | out: | |
8388 | ssd_start_workq(dev); | |
8389 | test_and_clear_bit(SSD_RESETING, &dev->state); | |
8390 | return ret; | |
8391 | } | |
8392 | ||
8393 | static int ssd_full_reset(struct ssd_device *dev) | |
8394 | { | |
8395 | int ret = 0; | |
8396 | ||
8397 | if (test_and_set_bit(SSD_RESETING, &dev->state)) { | |
8398 | return 0; | |
8399 | } | |
8400 | ||
8401 | ssd_stop_workq(dev); | |
8402 | ||
8403 | ret = ssd_barrier_start(dev); | |
8404 | if (ret) { | |
8405 | goto out; | |
8406 | } | |
8407 | ||
8408 | ret = ssd_barrier_flush(dev); | |
8409 | if (ret) { | |
8410 | goto out_barrier_end; | |
8411 | } | |
8412 | ||
8413 | ret = ssd_barrier_save_md(dev); | |
8414 | if (ret) { | |
8415 | goto out_barrier_end; | |
8416 | } | |
8417 | ||
8418 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_2) { | |
8419 | /* old reset */ | |
8420 | ret = __ssd_reset(dev, SSD_RST_NORMAL); | |
8421 | } else { | |
8422 | /* full reset */ | |
8423 | //ret = __ssd_reset(dev, SSD_RST_FULL); | |
8424 | ret = __ssd_reset(dev, SSD_RST_NORMAL); | |
8425 | } | |
8426 | if (ret) { | |
8427 | goto out_barrier_end; | |
8428 | } | |
8429 | ||
8430 | out_barrier_end: | |
8431 | ssd_barrier_end(dev); | |
8432 | out: | |
8433 | ssd_start_workq(dev); | |
8434 | test_and_clear_bit(SSD_RESETING, &dev->state); | |
8435 | return ret; | |
8436 | } | |
8437 | ||
8438 | int ssd_reset(struct block_device *bdev) | |
8439 | { | |
da3355df | 8440 | int ret; |
361ebed5 HSDT |
8441 | struct ssd_device *dev; |
8442 | ||
8443 | if (!bdev || !(bdev->bd_disk)) { | |
8444 | return -EINVAL; | |
8445 | } | |
8446 | ||
8447 | dev = bdev->bd_disk->private_data; | |
8448 | ||
da3355df SF |
8449 | ret = ssd_full_reset(dev); |
8450 | if (!ret) { | |
8451 | if (!dev->has_non_0x98_reg_access) { | |
8452 | ssd_reg32_write(dev->ctrlp + SSD_RELOAD_FW_REG, 0); | |
8453 | } | |
8454 | } | |
8455 | ||
8456 | return ret ; | |
361ebed5 HSDT |
8457 | } |
8458 | ||
8459 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)) | |
8460 | static int ssd_issue_flush_fn(struct request_queue *q, struct gendisk *disk, | |
8461 | sector_t *error_sector) | |
8462 | { | |
8463 | struct ssd_device *dev = q->queuedata; | |
8464 | ||
8465 | return ssd_flush(dev); | |
8466 | } | |
8467 | #endif | |
8468 | ||
8469 | void ssd_submit_pbio(struct request_queue *q, struct bio *bio) | |
8470 | { | |
8471 | struct ssd_device *dev = q->queuedata; | |
8472 | #ifdef SSD_QUEUE_PBIO | |
8473 | int ret = -EBUSY; | |
8474 | #endif | |
8475 | ||
8476 | if (!test_bit(SSD_ONLINE, &dev->state)) { | |
1197134c | 8477 | ssd_bio_endio(bio, -ENODEV); |
361ebed5 HSDT |
8478 | goto out; |
8479 | } | |
8480 | ||
8481 | #ifdef SSD_DEBUG_ERR | |
8482 | if (atomic_read(&dev->tocnt)) { | |
8483 | hio_warn("%s: IO rejected because of IO timeout!\n", dev->name); | |
1197134c | 8484 | ssd_bio_endio(bio, -EIO); |
361ebed5 HSDT |
8485 | goto out; |
8486 | } | |
8487 | #endif | |
8488 | ||
da3355df | 8489 | if (unlikely(ssd_bio_has_barrier_or_fua(bio))) { |
1197134c | 8490 | ssd_bio_endio(bio, -EOPNOTSUPP); |
361ebed5 HSDT |
8491 | goto out; |
8492 | } | |
361ebed5 | 8493 | |
da3355df | 8494 | if (unlikely(dev->readonly && bio_data_dir(bio) == WRITE)) { |
1197134c | 8495 | ssd_bio_endio(bio, -EROFS); |
361ebed5 HSDT |
8496 | goto out; |
8497 | } | |
8498 | ||
8499 | #ifdef SSD_QUEUE_PBIO | |
8500 | if (0 == atomic_read(&dev->in_sendq)) { | |
8501 | ret = __ssd_submit_pbio(dev, bio, 0); | |
8502 | } | |
8503 | ||
8504 | if (ret) { | |
8505 | (void)test_and_set_bit(BIO_SSD_PBIO, &bio->bi_flags); | |
8506 | ssd_queue_bio(dev, bio); | |
8507 | } | |
8508 | #else | |
8509 | __ssd_submit_pbio(dev, bio, 1); | |
8510 | #endif | |
8511 | ||
8512 | out: | |
8513 | return; | |
8514 | } | |
8515 | ||
7fd8c57f SF |
8516 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,9,0)) |
8517 | static blk_qc_t hio_submit_bio(struct bio *bio) | |
8518 | #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,0)) | |
bf9a5140 KM |
8519 | static blk_qc_t ssd_make_request(struct request_queue *q, struct bio *bio) |
8520 | #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0)) | |
361ebed5 | 8521 | static void ssd_make_request(struct request_queue *q, struct bio *bio) |
bf9a5140 KM |
8522 | #else |
8523 | static int ssd_make_request(struct request_queue *q, struct bio *bio) | |
361ebed5 HSDT |
8524 | #endif |
8525 | { | |
7fd8c57f SF |
8526 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,9,0)) |
8527 | struct request_queue *q = bio->bi_disk->queue; | |
8528 | #endif | |
361ebed5 HSDT |
8529 | struct ssd_device *dev = q->queuedata; |
8530 | int ret = -EBUSY; | |
8531 | ||
8532 | if (!test_bit(SSD_ONLINE, &dev->state)) { | |
1197134c | 8533 | ssd_bio_endio(bio, -ENODEV); |
361ebed5 HSDT |
8534 | goto out; |
8535 | } | |
8536 | ||
6d54a9d4 SF |
8537 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,9,0)) |
8538 | blk_queue_split(&bio); | |
8539 | #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(4,13,0)) | |
91557e4a SF |
8540 | blk_queue_split(q, &bio); |
8541 | #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(4,3,0)) | |
1197134c KM |
8542 | blk_queue_split(q, &bio, q->bio_split); |
8543 | #endif | |
8544 | ||
361ebed5 HSDT |
8545 | #ifdef SSD_DEBUG_ERR |
8546 | if (atomic_read(&dev->tocnt)) { | |
8547 | hio_warn("%s: IO rejected because of IO timeout!\n", dev->name); | |
1197134c | 8548 | ssd_bio_endio(bio, -EIO); |
361ebed5 HSDT |
8549 | goto out; |
8550 | } | |
8551 | #endif | |
8552 | ||
da3355df | 8553 | if (unlikely(ssd_bio_has_barrier_or_fua(bio))) { |
1197134c | 8554 | ssd_bio_endio(bio, -EOPNOTSUPP); |
361ebed5 HSDT |
8555 | goto out; |
8556 | } | |
8557 | ||
8558 | /* writeback_cache_control.txt: REQ_FLUSH requests without data can be completed successfully without doing any work */ | |
1197134c KM |
8559 | if (unlikely(ssd_bio_has_flush(bio) && !bio_sectors(bio))) { |
8560 | ssd_bio_endio(bio, 0); | |
361ebed5 HSDT |
8561 | goto out; |
8562 | } | |
8563 | ||
361ebed5 HSDT |
8564 | if (0 == atomic_read(&dev->in_sendq)) { |
8565 | ret = ssd_submit_bio(dev, bio, 0); | |
8566 | } | |
8567 | ||
8568 | if (ret) { | |
8569 | ssd_queue_bio(dev, bio); | |
8570 | } | |
8571 | ||
8572 | out: | |
bf9a5140 KM |
8573 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,0)) |
8574 | return BLK_QC_T_NONE; | |
8575 | #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(3,2,0)) | |
361ebed5 | 8576 | return; |
bf9a5140 KM |
8577 | #else |
8578 | return 0; | |
361ebed5 HSDT |
8579 | #endif |
8580 | } | |
8581 | ||
8582 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)) | |
8583 | static int ssd_block_getgeo(struct block_device *bdev, struct hd_geometry *geo) | |
8584 | { | |
8585 | struct ssd_device *dev; | |
8586 | ||
8587 | if (!bdev) { | |
8588 | return -EINVAL; | |
8589 | } | |
8590 | ||
8591 | dev = bdev->bd_disk->private_data; | |
8592 | if (!dev) { | |
8593 | return -EINVAL; | |
8594 | } | |
8595 | ||
8596 | geo->heads = 4; | |
8597 | geo->sectors = 16; | |
8598 | geo->cylinders = (dev->hw_info.size & ~0x3f) >> 6; | |
8599 | return 0; | |
8600 | } | |
8601 | #endif | |
8602 | ||
1197134c KM |
8603 | static int ssd_init_queue(struct ssd_device *dev); |
8604 | static void ssd_cleanup_queue(struct ssd_device *dev); | |
361ebed5 HSDT |
8605 | static void ssd_cleanup_blkdev(struct ssd_device *dev); |
8606 | static int ssd_init_blkdev(struct ssd_device *dev); | |
8607 | static int ssd_ioctl_common(struct ssd_device *dev, unsigned int cmd, unsigned long arg) | |
8608 | { | |
8609 | void __user *argp = (void __user *)arg; | |
8610 | void __user *buf = NULL; | |
8611 | void *kbuf = NULL; | |
8612 | int ret = 0; | |
8613 | ||
8614 | switch (cmd) { | |
8615 | case SSD_CMD_GET_PROTOCOL_INFO: | |
8616 | if (copy_to_user(argp, &dev->protocol_info, sizeof(struct ssd_protocol_info))) { | |
8617 | hio_warn("%s: copy_to_user: failed\n", dev->name); | |
8618 | ret = -EFAULT; | |
8619 | break; | |
8620 | } | |
8621 | break; | |
8622 | ||
8623 | case SSD_CMD_GET_HW_INFO: | |
8624 | if (copy_to_user(argp, &dev->hw_info, sizeof(struct ssd_hw_info))) { | |
8625 | hio_warn("%s: copy_to_user: failed\n", dev->name); | |
8626 | ret = -EFAULT; | |
8627 | break; | |
8628 | } | |
8629 | break; | |
8630 | ||
8631 | case SSD_CMD_GET_ROM_INFO: | |
8632 | if (copy_to_user(argp, &dev->rom_info, sizeof(struct ssd_rom_info))) { | |
8633 | hio_warn("%s: copy_to_user: failed\n", dev->name); | |
8634 | ret = -EFAULT; | |
8635 | break; | |
8636 | } | |
8637 | break; | |
8638 | ||
8639 | case SSD_CMD_GET_SMART: { | |
8640 | struct ssd_smart smart; | |
8641 | int i; | |
8642 | ||
8643 | memcpy(&smart, &dev->smart, sizeof(struct ssd_smart)); | |
8644 | ||
8645 | mutex_lock(&dev->gd_mutex); | |
8646 | ssd_update_smart(dev, &smart); | |
8647 | mutex_unlock(&dev->gd_mutex); | |
8648 | ||
8649 | /* combine the volatile log info */ | |
8650 | if (dev->log_info.nr_log) { | |
8651 | for (i=0; i<SSD_LOG_NR_LEVEL; i++) { | |
8652 | smart.log_info.stat[i] += dev->log_info.stat[i]; | |
8653 | } | |
8654 | } | |
8655 | ||
8656 | if (copy_to_user(argp, &smart, sizeof(struct ssd_smart))) { | |
8657 | hio_warn("%s: copy_to_user: failed\n", dev->name); | |
8658 | ret = -EFAULT; | |
8659 | break; | |
8660 | } | |
8661 | ||
8662 | break; | |
8663 | } | |
8664 | ||
8665 | case SSD_CMD_GET_IDX: | |
8666 | if (copy_to_user(argp, &dev->idx, sizeof(int))) { | |
8667 | hio_warn("%s: copy_to_user: failed\n", dev->name); | |
8668 | ret = -EFAULT; | |
8669 | break; | |
8670 | } | |
8671 | break; | |
8672 | ||
8673 | case SSD_CMD_GET_AMOUNT: { | |
8674 | int nr_ssd = atomic_read(&ssd_nr); | |
8675 | if (copy_to_user(argp, &nr_ssd, sizeof(int))) { | |
8676 | hio_warn("%s: copy_to_user: failed\n", dev->name); | |
8677 | ret = -EFAULT; | |
8678 | break; | |
8679 | } | |
8680 | break; | |
8681 | } | |
8682 | ||
8683 | case SSD_CMD_GET_TO_INFO: { | |
8684 | int tocnt = atomic_read(&dev->tocnt); | |
8685 | ||
8686 | if (copy_to_user(argp, &tocnt, sizeof(int))) { | |
8687 | hio_warn("%s: copy_to_user: failed\n", dev->name); | |
8688 | ret = -EFAULT; | |
8689 | break; | |
8690 | } | |
8691 | break; | |
8692 | } | |
8693 | ||
8694 | case SSD_CMD_GET_DRV_VER: { | |
8695 | char ver[] = DRIVER_VERSION; | |
8696 | int len = sizeof(ver); | |
8697 | ||
8698 | if (len > (DRIVER_VERSION_LEN - 1)) { | |
8699 | len = (DRIVER_VERSION_LEN - 1); | |
8700 | } | |
8701 | if (copy_to_user(argp, ver, len)) { | |
8702 | hio_warn("%s: copy_to_user: failed\n", dev->name); | |
8703 | ret = -EFAULT; | |
8704 | break; | |
8705 | } | |
8706 | break; | |
8707 | } | |
8708 | ||
8709 | case SSD_CMD_GET_BBACC_INFO: { | |
8710 | struct ssd_acc_info acc; | |
8711 | ||
8712 | mutex_lock(&dev->fw_mutex); | |
8713 | ret = ssd_bb_acc(dev, &acc); | |
8714 | mutex_unlock(&dev->fw_mutex); | |
8715 | if (ret) { | |
8716 | break; | |
8717 | } | |
8718 | ||
8719 | if (copy_to_user(argp, &acc, sizeof(struct ssd_acc_info))) { | |
8720 | hio_warn("%s: copy_to_user: failed\n", dev->name); | |
8721 | ret = -EFAULT; | |
8722 | break; | |
8723 | } | |
8724 | break; | |
8725 | } | |
8726 | ||
8727 | case SSD_CMD_GET_ECACC_INFO: { | |
8728 | struct ssd_acc_info acc; | |
8729 | ||
8730 | mutex_lock(&dev->fw_mutex); | |
8731 | ret = ssd_ec_acc(dev, &acc); | |
8732 | mutex_unlock(&dev->fw_mutex); | |
8733 | if (ret) { | |
8734 | break; | |
8735 | } | |
8736 | ||
8737 | if (copy_to_user(argp, &acc, sizeof(struct ssd_acc_info))) { | |
8738 | hio_warn("%s: copy_to_user: failed\n", dev->name); | |
8739 | ret = -EFAULT; | |
8740 | break; | |
8741 | } | |
8742 | break; | |
8743 | } | |
8744 | ||
8745 | case SSD_CMD_GET_HW_INFO_EXT: | |
8746 | if (copy_to_user(argp, &dev->hw_info_ext, sizeof(struct ssd_hw_info_extend))) { | |
8747 | hio_warn("%s: copy_to_user: failed\n", dev->name); | |
8748 | ret = -EFAULT; | |
8749 | break; | |
8750 | } | |
8751 | break; | |
8752 | ||
8753 | case SSD_CMD_REG_READ: { | |
8754 | struct ssd_reg_op_info reg_info; | |
8755 | ||
8756 | if (copy_from_user(®_info, argp, sizeof(struct ssd_reg_op_info))) { | |
8757 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
8758 | ret = -EFAULT; | |
8759 | break; | |
8760 | } | |
8761 | ||
8762 | if (reg_info.offset > dev->mmio_len-sizeof(uint32_t)) { | |
8763 | ret = -EINVAL; | |
8764 | break; | |
8765 | } | |
8766 | ||
8767 | reg_info.value = ssd_reg32_read(dev->ctrlp + reg_info.offset); | |
8768 | if (copy_to_user(argp, ®_info, sizeof(struct ssd_reg_op_info))) { | |
8769 | hio_warn("%s: copy_to_user: failed\n", dev->name); | |
8770 | ret = -EFAULT; | |
8771 | break; | |
8772 | } | |
8773 | ||
8774 | break; | |
8775 | } | |
8776 | ||
8777 | case SSD_CMD_REG_WRITE: { | |
8778 | struct ssd_reg_op_info reg_info; | |
8779 | ||
8780 | if (copy_from_user(®_info, argp, sizeof(struct ssd_reg_op_info))) { | |
8781 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
8782 | ret = -EFAULT; | |
8783 | break; | |
8784 | } | |
8785 | ||
8786 | if (reg_info.offset > dev->mmio_len-sizeof(uint32_t)) { | |
8787 | ret = -EINVAL; | |
8788 | break; | |
8789 | } | |
8790 | ||
8791 | ssd_reg32_write(dev->ctrlp + reg_info.offset, reg_info.value); | |
8792 | ||
8793 | break; | |
8794 | } | |
8795 | ||
8796 | case SSD_CMD_SPI_READ: { | |
8797 | struct ssd_spi_op_info spi_info; | |
8798 | uint32_t off, size; | |
8799 | ||
8800 | if (copy_from_user(&spi_info, argp, sizeof(struct ssd_spi_op_info))) { | |
8801 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
8802 | ret = -EFAULT; | |
8803 | break; | |
8804 | } | |
8805 | ||
8806 | off = spi_info.off; | |
8807 | size = spi_info.len; | |
8808 | buf = spi_info.buf; | |
8809 | ||
8810 | if (size > dev->rom_info.size || 0 == size || (off + size) > dev->rom_info.size) { | |
8811 | ret = -EINVAL; | |
8812 | break; | |
8813 | } | |
8814 | ||
8815 | kbuf = kmalloc(size, GFP_KERNEL); | |
8816 | if (!kbuf) { | |
8817 | ret = -ENOMEM; | |
8818 | break; | |
8819 | } | |
8820 | ||
8821 | ret = ssd_spi_page_read(dev, kbuf, off, size); | |
8822 | if (ret) { | |
8823 | kfree(kbuf); | |
8824 | break; | |
8825 | } | |
8826 | ||
8827 | if (copy_to_user(buf, kbuf, size)) { | |
8828 | hio_warn("%s: copy_to_user: failed\n", dev->name); | |
8829 | kfree(kbuf); | |
8830 | ret = -EFAULT; | |
8831 | break; | |
8832 | } | |
8833 | ||
8834 | kfree(kbuf); | |
8835 | ||
8836 | break; | |
8837 | } | |
8838 | ||
8839 | case SSD_CMD_SPI_WRITE: { | |
8840 | struct ssd_spi_op_info spi_info; | |
8841 | uint32_t off, size; | |
8842 | ||
8843 | if (copy_from_user(&spi_info, argp, sizeof(struct ssd_spi_op_info))) { | |
8844 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
8845 | ret = -EFAULT; | |
8846 | break; | |
8847 | } | |
8848 | ||
8849 | off = spi_info.off; | |
8850 | size = spi_info.len; | |
8851 | buf = spi_info.buf; | |
8852 | ||
8853 | if (size > dev->rom_info.size || 0 == size || (off + size) > dev->rom_info.size) { | |
8854 | ret = -EINVAL; | |
8855 | break; | |
8856 | } | |
8857 | ||
8858 | kbuf = kmalloc(size, GFP_KERNEL); | |
8859 | if (!kbuf) { | |
8860 | ret = -ENOMEM; | |
8861 | break; | |
8862 | } | |
8863 | ||
8864 | if (copy_from_user(kbuf, buf, size)) { | |
8865 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
8866 | kfree(kbuf); | |
8867 | ret = -EFAULT; | |
8868 | break; | |
8869 | } | |
8870 | ||
8871 | ret = ssd_spi_page_write(dev, kbuf, off, size); | |
8872 | if (ret) { | |
8873 | kfree(kbuf); | |
8874 | break; | |
8875 | } | |
8876 | ||
8877 | kfree(kbuf); | |
8878 | ||
8879 | break; | |
8880 | } | |
8881 | ||
8882 | case SSD_CMD_SPI_ERASE: { | |
8883 | struct ssd_spi_op_info spi_info; | |
8884 | uint32_t off; | |
8885 | ||
8886 | if (copy_from_user(&spi_info, argp, sizeof(struct ssd_spi_op_info))) { | |
8887 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
8888 | ret = -EFAULT; | |
8889 | break; | |
8890 | } | |
8891 | ||
8892 | off = spi_info.off; | |
8893 | ||
8894 | if ((off + dev->rom_info.block_size) > dev->rom_info.size) { | |
8895 | ret = -EINVAL; | |
8896 | break; | |
8897 | } | |
8898 | ||
8899 | ret = ssd_spi_block_erase(dev, off); | |
8900 | if (ret) { | |
8901 | break; | |
8902 | } | |
8903 | ||
8904 | break; | |
8905 | } | |
8906 | ||
8907 | case SSD_CMD_I2C_READ: { | |
8908 | struct ssd_i2c_op_info i2c_info; | |
8909 | uint8_t saddr; | |
8910 | uint8_t rsize; | |
8911 | ||
8912 | if (copy_from_user(&i2c_info, argp, sizeof(struct ssd_i2c_op_info))) { | |
8913 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
8914 | ret = -EFAULT; | |
8915 | break; | |
8916 | } | |
8917 | ||
8918 | saddr = i2c_info.saddr; | |
8919 | rsize = i2c_info.rsize; | |
8920 | buf = i2c_info.rbuf; | |
8921 | ||
8922 | if (rsize <= 0 || rsize > SSD_I2C_MAX_DATA) { | |
8923 | ret = -EINVAL; | |
8924 | break; | |
8925 | } | |
8926 | ||
8927 | kbuf = kmalloc(rsize, GFP_KERNEL); | |
8928 | if (!kbuf) { | |
8929 | ret = -ENOMEM; | |
8930 | break; | |
8931 | } | |
8932 | ||
8933 | ret = ssd_i2c_read(dev, saddr, rsize, kbuf); | |
8934 | if (ret) { | |
8935 | kfree(kbuf); | |
8936 | break; | |
8937 | } | |
8938 | ||
8939 | if (copy_to_user(buf, kbuf, rsize)) { | |
8940 | hio_warn("%s: copy_to_user: failed\n", dev->name); | |
8941 | kfree(kbuf); | |
8942 | ret = -EFAULT; | |
8943 | break; | |
8944 | } | |
8945 | ||
8946 | kfree(kbuf); | |
8947 | ||
8948 | break; | |
8949 | } | |
8950 | ||
8951 | case SSD_CMD_I2C_WRITE: { | |
8952 | struct ssd_i2c_op_info i2c_info; | |
8953 | uint8_t saddr; | |
8954 | uint8_t wsize; | |
8955 | ||
8956 | if (copy_from_user(&i2c_info, argp, sizeof(struct ssd_i2c_op_info))) { | |
8957 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
8958 | ret = -EFAULT; | |
8959 | break; | |
8960 | } | |
8961 | ||
8962 | saddr = i2c_info.saddr; | |
8963 | wsize = i2c_info.wsize; | |
8964 | buf = i2c_info.wbuf; | |
8965 | ||
8966 | if (wsize <= 0 || wsize > SSD_I2C_MAX_DATA) { | |
8967 | ret = -EINVAL; | |
8968 | break; | |
8969 | } | |
8970 | ||
8971 | kbuf = kmalloc(wsize, GFP_KERNEL); | |
8972 | if (!kbuf) { | |
8973 | ret = -ENOMEM; | |
8974 | break; | |
8975 | } | |
8976 | ||
8977 | if (copy_from_user(kbuf, buf, wsize)) { | |
8978 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
8979 | kfree(kbuf); | |
8980 | ret = -EFAULT; | |
8981 | break; | |
8982 | } | |
8983 | ||
8984 | ret = ssd_i2c_write(dev, saddr, wsize, kbuf); | |
8985 | if (ret) { | |
8986 | kfree(kbuf); | |
8987 | break; | |
8988 | } | |
8989 | ||
8990 | kfree(kbuf); | |
8991 | ||
8992 | break; | |
8993 | } | |
8994 | ||
8995 | case SSD_CMD_I2C_WRITE_READ: { | |
8996 | struct ssd_i2c_op_info i2c_info; | |
8997 | uint8_t saddr; | |
8998 | uint8_t wsize; | |
8999 | uint8_t rsize; | |
9000 | uint8_t size; | |
9001 | ||
9002 | if (copy_from_user(&i2c_info, argp, sizeof(struct ssd_i2c_op_info))) { | |
9003 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
9004 | ret = -EFAULT; | |
9005 | break; | |
9006 | } | |
9007 | ||
9008 | saddr = i2c_info.saddr; | |
9009 | wsize = i2c_info.wsize; | |
9010 | rsize = i2c_info.rsize; | |
9011 | buf = i2c_info.wbuf; | |
9012 | ||
9013 | if (wsize <= 0 || wsize > SSD_I2C_MAX_DATA) { | |
9014 | ret = -EINVAL; | |
9015 | break; | |
9016 | } | |
9017 | ||
9018 | if (rsize <= 0 || rsize > SSD_I2C_MAX_DATA) { | |
9019 | ret = -EINVAL; | |
9020 | break; | |
9021 | } | |
9022 | ||
9023 | size = wsize + rsize; | |
9024 | ||
9025 | kbuf = kmalloc(size, GFP_KERNEL); | |
9026 | if (!kbuf) { | |
9027 | ret = -ENOMEM; | |
9028 | break; | |
9029 | } | |
9030 | ||
9031 | if (copy_from_user((kbuf + rsize), buf, wsize)) { | |
9032 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
9033 | kfree(kbuf); | |
9034 | ret = -EFAULT; | |
9035 | break; | |
9036 | } | |
9037 | ||
9038 | buf = i2c_info.rbuf; | |
9039 | ||
9040 | ret = ssd_i2c_write_read(dev, saddr, wsize, (kbuf + rsize), rsize, kbuf); | |
9041 | if (ret) { | |
9042 | kfree(kbuf); | |
9043 | break; | |
9044 | } | |
9045 | ||
9046 | if (copy_to_user(buf, kbuf, rsize)) { | |
9047 | hio_warn("%s: copy_to_user: failed\n", dev->name); | |
9048 | kfree(kbuf); | |
9049 | ret = -EFAULT; | |
9050 | break; | |
9051 | } | |
9052 | ||
9053 | kfree(kbuf); | |
9054 | ||
9055 | break; | |
9056 | } | |
9057 | ||
9058 | case SSD_CMD_SMBUS_SEND_BYTE: { | |
9059 | struct ssd_smbus_op_info smbus_info; | |
9060 | uint8_t smb_data[SSD_SMBUS_BLOCK_MAX]; | |
9061 | uint8_t saddr; | |
9062 | uint8_t size; | |
9063 | ||
9064 | if (copy_from_user(&smbus_info, argp, sizeof(struct ssd_smbus_op_info))) { | |
9065 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
9066 | ret = -EFAULT; | |
9067 | break; | |
9068 | } | |
9069 | ||
9070 | saddr = smbus_info.saddr; | |
9071 | buf = smbus_info.buf; | |
9072 | size = 1; | |
9073 | ||
9074 | if (copy_from_user(smb_data, buf, size)) { | |
9075 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
9076 | ret = -EFAULT; | |
9077 | break; | |
9078 | } | |
9079 | ||
9080 | ret = ssd_smbus_send_byte(dev, saddr, smb_data); | |
9081 | if (ret) { | |
9082 | break; | |
9083 | } | |
9084 | ||
9085 | break; | |
9086 | } | |
9087 | ||
9088 | case SSD_CMD_SMBUS_RECEIVE_BYTE: { | |
9089 | struct ssd_smbus_op_info smbus_info; | |
9090 | uint8_t smb_data[SSD_SMBUS_BLOCK_MAX]; | |
9091 | uint8_t saddr; | |
9092 | uint8_t size; | |
9093 | ||
9094 | if (copy_from_user(&smbus_info, argp, sizeof(struct ssd_smbus_op_info))) { | |
9095 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
9096 | ret = -EFAULT; | |
9097 | break; | |
9098 | } | |
9099 | ||
9100 | saddr = smbus_info.saddr; | |
9101 | buf = smbus_info.buf; | |
9102 | size = 1; | |
9103 | ||
9104 | ret = ssd_smbus_receive_byte(dev, saddr, smb_data); | |
9105 | if (ret) { | |
9106 | break; | |
9107 | } | |
9108 | ||
9109 | if (copy_to_user(buf, smb_data, size)) { | |
9110 | hio_warn("%s: copy_to_user: failed\n", dev->name); | |
9111 | ret = -EFAULT; | |
9112 | break; | |
9113 | } | |
9114 | ||
9115 | break; | |
9116 | } | |
9117 | ||
9118 | case SSD_CMD_SMBUS_WRITE_BYTE: { | |
9119 | struct ssd_smbus_op_info smbus_info; | |
9120 | uint8_t smb_data[SSD_SMBUS_BLOCK_MAX]; | |
9121 | uint8_t saddr; | |
9122 | uint8_t command; | |
9123 | uint8_t size; | |
9124 | ||
9125 | if (copy_from_user(&smbus_info, argp, sizeof(struct ssd_smbus_op_info))) { | |
9126 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
9127 | ret = -EFAULT; | |
9128 | break; | |
9129 | } | |
9130 | ||
9131 | saddr = smbus_info.saddr; | |
9132 | command = smbus_info.cmd; | |
9133 | buf = smbus_info.buf; | |
9134 | size = 1; | |
9135 | ||
9136 | if (copy_from_user(smb_data, buf, size)) { | |
9137 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
9138 | ret = -EFAULT; | |
9139 | break; | |
9140 | } | |
9141 | ||
9142 | ret = ssd_smbus_write_byte(dev, saddr, command, smb_data); | |
9143 | if (ret) { | |
9144 | break; | |
9145 | } | |
9146 | ||
9147 | break; | |
9148 | } | |
9149 | ||
9150 | case SSD_CMD_SMBUS_READ_BYTE: { | |
9151 | struct ssd_smbus_op_info smbus_info; | |
9152 | uint8_t smb_data[SSD_SMBUS_BLOCK_MAX]; | |
9153 | uint8_t saddr; | |
9154 | uint8_t command; | |
9155 | uint8_t size; | |
9156 | ||
9157 | if (copy_from_user(&smbus_info, argp, sizeof(struct ssd_smbus_op_info))) { | |
9158 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
9159 | ret = -EFAULT; | |
9160 | break; | |
9161 | } | |
9162 | ||
9163 | saddr = smbus_info.saddr; | |
9164 | command = smbus_info.cmd; | |
9165 | buf = smbus_info.buf; | |
9166 | size = 1; | |
9167 | ||
9168 | ret = ssd_smbus_read_byte(dev, saddr, command, smb_data); | |
9169 | if (ret) { | |
9170 | break; | |
9171 | } | |
9172 | ||
9173 | if (copy_to_user(buf, smb_data, size)) { | |
9174 | hio_warn("%s: copy_to_user: failed\n", dev->name); | |
9175 | ret = -EFAULT; | |
9176 | break; | |
9177 | } | |
9178 | ||
9179 | break; | |
9180 | } | |
9181 | ||
9182 | case SSD_CMD_SMBUS_WRITE_WORD: { | |
9183 | struct ssd_smbus_op_info smbus_info; | |
9184 | uint8_t smb_data[SSD_SMBUS_BLOCK_MAX]; | |
9185 | uint8_t saddr; | |
9186 | uint8_t command; | |
9187 | uint8_t size; | |
9188 | ||
9189 | if (copy_from_user(&smbus_info, argp, sizeof(struct ssd_smbus_op_info))) { | |
9190 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
9191 | ret = -EFAULT; | |
9192 | break; | |
9193 | } | |
9194 | ||
9195 | saddr = smbus_info.saddr; | |
9196 | command = smbus_info.cmd; | |
9197 | buf = smbus_info.buf; | |
9198 | size = 2; | |
9199 | ||
9200 | if (copy_from_user(smb_data, buf, size)) { | |
9201 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
9202 | ret = -EFAULT; | |
9203 | break; | |
9204 | } | |
9205 | ||
9206 | ret = ssd_smbus_write_word(dev, saddr, command, smb_data); | |
9207 | if (ret) { | |
9208 | break; | |
9209 | } | |
9210 | ||
9211 | break; | |
9212 | } | |
9213 | ||
9214 | case SSD_CMD_SMBUS_READ_WORD: { | |
9215 | struct ssd_smbus_op_info smbus_info; | |
9216 | uint8_t smb_data[SSD_SMBUS_BLOCK_MAX]; | |
9217 | uint8_t saddr; | |
9218 | uint8_t command; | |
9219 | uint8_t size; | |
9220 | ||
9221 | if (copy_from_user(&smbus_info, argp, sizeof(struct ssd_smbus_op_info))) { | |
9222 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
9223 | ret = -EFAULT; | |
9224 | break; | |
9225 | } | |
9226 | ||
9227 | saddr = smbus_info.saddr; | |
9228 | command = smbus_info.cmd; | |
9229 | buf = smbus_info.buf; | |
9230 | size = 2; | |
9231 | ||
9232 | ret = ssd_smbus_read_word(dev, saddr, command, smb_data); | |
9233 | if (ret) { | |
9234 | break; | |
9235 | } | |
9236 | ||
9237 | if (copy_to_user(buf, smb_data, size)) { | |
9238 | hio_warn("%s: copy_to_user: failed\n", dev->name); | |
9239 | ret = -EFAULT; | |
9240 | break; | |
9241 | } | |
9242 | ||
9243 | break; | |
9244 | } | |
9245 | ||
9246 | case SSD_CMD_SMBUS_WRITE_BLOCK: { | |
9247 | struct ssd_smbus_op_info smbus_info; | |
9248 | uint8_t smb_data[SSD_SMBUS_BLOCK_MAX]; | |
9249 | uint8_t saddr; | |
9250 | uint8_t command; | |
9251 | uint8_t size; | |
9252 | ||
9253 | if (copy_from_user(&smbus_info, argp, sizeof(struct ssd_smbus_op_info))) { | |
9254 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
9255 | ret = -EFAULT; | |
9256 | break; | |
9257 | } | |
9258 | ||
9259 | saddr = smbus_info.saddr; | |
9260 | command = smbus_info.cmd; | |
9261 | buf = smbus_info.buf; | |
9262 | size = smbus_info.size; | |
9263 | ||
9264 | if (size > SSD_SMBUS_BLOCK_MAX) { | |
9265 | ret = -EINVAL; | |
9266 | break; | |
9267 | } | |
9268 | ||
9269 | if (copy_from_user(smb_data, buf, size)) { | |
9270 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
9271 | ret = -EFAULT; | |
9272 | break; | |
9273 | } | |
9274 | ||
9275 | ret = ssd_smbus_write_block(dev, saddr, command, size, smb_data); | |
9276 | if (ret) { | |
9277 | break; | |
9278 | } | |
9279 | ||
9280 | break; | |
9281 | } | |
9282 | ||
9283 | case SSD_CMD_SMBUS_READ_BLOCK: { | |
9284 | struct ssd_smbus_op_info smbus_info; | |
9285 | uint8_t smb_data[SSD_SMBUS_BLOCK_MAX]; | |
9286 | uint8_t saddr; | |
9287 | uint8_t command; | |
9288 | uint8_t size; | |
9289 | ||
9290 | if (copy_from_user(&smbus_info, argp, sizeof(struct ssd_smbus_op_info))) { | |
9291 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
9292 | ret = -EFAULT; | |
9293 | break; | |
9294 | } | |
9295 | ||
9296 | saddr = smbus_info.saddr; | |
9297 | command = smbus_info.cmd; | |
9298 | buf = smbus_info.buf; | |
9299 | size = smbus_info.size; | |
9300 | ||
9301 | if (size > SSD_SMBUS_BLOCK_MAX) { | |
9302 | ret = -EINVAL; | |
9303 | break; | |
9304 | } | |
9305 | ||
9306 | ret = ssd_smbus_read_block(dev, saddr, command, size, smb_data); | |
9307 | if (ret) { | |
9308 | break; | |
9309 | } | |
9310 | ||
9311 | if (copy_to_user(buf, smb_data, size)) { | |
9312 | hio_warn("%s: copy_to_user: failed\n", dev->name); | |
9313 | ret = -EFAULT; | |
9314 | break; | |
9315 | } | |
9316 | ||
9317 | break; | |
9318 | } | |
9319 | ||
9320 | case SSD_CMD_BM_GET_VER: { | |
9321 | uint16_t ver; | |
9322 | ||
9323 | ret = ssd_bm_get_version(dev, &ver); | |
9324 | if (ret) { | |
9325 | break; | |
9326 | } | |
9327 | ||
9328 | if (copy_to_user(argp, &ver, sizeof(uint16_t))) { | |
9329 | hio_warn("%s: copy_to_user: failed\n", dev->name); | |
9330 | ret = -EFAULT; | |
9331 | break; | |
9332 | } | |
9333 | ||
9334 | break; | |
9335 | } | |
9336 | ||
9337 | case SSD_CMD_BM_GET_NR_CAP: { | |
9338 | int nr_cap; | |
9339 | ||
9340 | ret = ssd_bm_nr_cap(dev, &nr_cap); | |
9341 | if (ret) { | |
9342 | break; | |
9343 | } | |
9344 | ||
9345 | if (copy_to_user(argp, &nr_cap, sizeof(int))) { | |
9346 | hio_warn("%s: copy_to_user: failed\n", dev->name); | |
9347 | ret = -EFAULT; | |
9348 | break; | |
9349 | } | |
9350 | ||
9351 | break; | |
9352 | } | |
9353 | ||
9354 | case SSD_CMD_BM_CAP_LEARNING: { | |
9355 | ret = ssd_bm_enter_cap_learning(dev); | |
9356 | ||
9357 | if (ret) { | |
9358 | break; | |
9359 | } | |
9360 | ||
9361 | break; | |
9362 | } | |
9363 | ||
9364 | case SSD_CMD_CAP_LEARN: { | |
9365 | uint32_t cap = 0; | |
9366 | ||
9367 | ret = ssd_cap_learn(dev, &cap); | |
9368 | if (ret) { | |
9369 | break; | |
9370 | } | |
9371 | ||
9372 | if (copy_to_user(argp, &cap, sizeof(uint32_t))) { | |
9373 | hio_warn("%s: copy_to_user: failed\n", dev->name); | |
9374 | ret = -EFAULT; | |
9375 | break; | |
9376 | } | |
9377 | ||
9378 | break; | |
9379 | } | |
9380 | ||
9381 | case SSD_CMD_GET_CAP_STATUS: { | |
9382 | int cap_status = 0; | |
9383 | ||
9384 | if (test_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP), &dev->hwmon)) { | |
9385 | cap_status = 1; | |
9386 | } | |
9387 | ||
9388 | if (copy_to_user(argp, &cap_status, sizeof(int))) { | |
9389 | hio_warn("%s: copy_to_user: failed\n", dev->name); | |
9390 | ret = -EFAULT; | |
9391 | break; | |
9392 | } | |
9393 | ||
9394 | break; | |
9395 | } | |
9396 | ||
9397 | case SSD_CMD_RAM_READ: { | |
9398 | struct ssd_ram_op_info ram_info; | |
9399 | uint64_t ofs; | |
9400 | uint32_t length; | |
9401 | size_t rlen, len = dev->hw_info.ram_max_len; | |
9402 | int ctrl_idx; | |
9403 | ||
9404 | if (copy_from_user(&ram_info, argp, sizeof(struct ssd_ram_op_info))) { | |
9405 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
9406 | ret = -EFAULT; | |
9407 | break; | |
9408 | } | |
9409 | ||
9410 | ofs = ram_info.start; | |
9411 | length = ram_info.length; | |
9412 | buf = ram_info.buf; | |
9413 | ctrl_idx = ram_info.ctrl_idx; | |
9414 | ||
9415 | if (ofs >= dev->hw_info.ram_size || length > dev->hw_info.ram_size || 0 == length || (ofs + length) > dev->hw_info.ram_size) { | |
9416 | ret = -EINVAL; | |
9417 | break; | |
9418 | } | |
9419 | ||
9420 | kbuf = kmalloc(len, GFP_KERNEL); | |
9421 | if (!kbuf) { | |
9422 | ret = -ENOMEM; | |
9423 | break; | |
9424 | } | |
9425 | ||
9426 | for (rlen=0; rlen<length; rlen+=len, buf+=len, ofs+=len) { | |
9427 | if ((length - rlen) < len) { | |
9428 | len = length - rlen; | |
9429 | } | |
9430 | ||
9431 | ret = ssd_ram_read(dev, kbuf, len, ofs, ctrl_idx); | |
9432 | if (ret) { | |
9433 | break; | |
9434 | } | |
9435 | ||
9436 | if (copy_to_user(buf, kbuf, len)) { | |
9437 | ret = -EFAULT; | |
9438 | break; | |
9439 | } | |
9440 | } | |
9441 | ||
9442 | kfree(kbuf); | |
9443 | ||
9444 | break; | |
9445 | } | |
9446 | ||
9447 | case SSD_CMD_RAM_WRITE: { | |
9448 | struct ssd_ram_op_info ram_info; | |
9449 | uint64_t ofs; | |
9450 | uint32_t length; | |
9451 | size_t wlen, len = dev->hw_info.ram_max_len; | |
9452 | int ctrl_idx; | |
9453 | ||
9454 | if (copy_from_user(&ram_info, argp, sizeof(struct ssd_ram_op_info))) { | |
9455 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
9456 | ret = -EFAULT; | |
9457 | break; | |
9458 | } | |
9459 | ofs = ram_info.start; | |
9460 | length = ram_info.length; | |
9461 | buf = ram_info.buf; | |
9462 | ctrl_idx = ram_info.ctrl_idx; | |
9463 | ||
9464 | if (ofs >= dev->hw_info.ram_size || length > dev->hw_info.ram_size || 0 == length || (ofs + length) > dev->hw_info.ram_size) { | |
9465 | ret = -EINVAL; | |
9466 | break; | |
9467 | } | |
9468 | ||
9469 | kbuf = kmalloc(len, GFP_KERNEL); | |
9470 | if (!kbuf) { | |
9471 | ret = -ENOMEM; | |
9472 | break; | |
9473 | } | |
9474 | ||
9475 | for (wlen=0; wlen<length; wlen+=len, buf+=len, ofs+=len) { | |
9476 | if ((length - wlen) < len) { | |
9477 | len = length - wlen; | |
9478 | } | |
9479 | ||
9480 | if (copy_from_user(kbuf, buf, len)) { | |
9481 | ret = -EFAULT; | |
9482 | break; | |
9483 | } | |
9484 | ||
9485 | ret = ssd_ram_write(dev, kbuf, len, ofs, ctrl_idx); | |
9486 | if (ret) { | |
9487 | break; | |
9488 | } | |
9489 | } | |
9490 | ||
9491 | kfree(kbuf); | |
9492 | ||
9493 | break; | |
9494 | } | |
9495 | ||
9496 | case SSD_CMD_NAND_READ_ID: { | |
9497 | struct ssd_flash_op_info flash_info; | |
9498 | int chip_no, chip_ce, length, ctrl_idx; | |
9499 | ||
9500 | if (copy_from_user(&flash_info, argp, sizeof(struct ssd_flash_op_info))) { | |
9501 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
9502 | ret = -EFAULT; | |
9503 | break; | |
9504 | } | |
9505 | ||
9506 | chip_no = flash_info.flash; | |
9507 | chip_ce = flash_info.chip; | |
9508 | ctrl_idx = flash_info.ctrl_idx; | |
9509 | buf = flash_info.buf; | |
9510 | length = dev->hw_info.id_size; | |
9511 | ||
9512 | //kbuf = kmalloc(length, GFP_KERNEL); | |
9513 | kbuf = kmalloc(SSD_NAND_ID_BUFF_SZ, GFP_KERNEL); //xx | |
9514 | if (!kbuf) { | |
9515 | ret = -ENOMEM; | |
9516 | break; | |
9517 | } | |
9518 | memset(kbuf, 0, length); | |
9519 | ||
9520 | ret = ssd_nand_read_id(dev, kbuf, chip_no, chip_ce, ctrl_idx); | |
9521 | if (ret) { | |
9522 | kfree(kbuf); | |
9523 | break; | |
9524 | } | |
9525 | ||
9526 | if (copy_to_user(buf, kbuf, length)) { | |
9527 | kfree(kbuf); | |
9528 | ret = -EFAULT; | |
9529 | break; | |
9530 | } | |
9531 | ||
9532 | kfree(kbuf); | |
9533 | ||
9534 | break; | |
9535 | } | |
9536 | ||
9537 | case SSD_CMD_NAND_READ: { //with oob | |
9538 | struct ssd_flash_op_info flash_info; | |
9539 | uint32_t length; | |
9540 | int flash, chip, page, ctrl_idx; | |
9541 | int err = 0; | |
9542 | ||
9543 | if (copy_from_user(&flash_info, argp, sizeof(struct ssd_flash_op_info))) { | |
9544 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
9545 | ret = -EFAULT; | |
9546 | break; | |
9547 | } | |
9548 | ||
9549 | flash = flash_info.flash; | |
9550 | chip = flash_info.chip; | |
9551 | page = flash_info.page; | |
9552 | buf = flash_info.buf; | |
9553 | ctrl_idx = flash_info.ctrl_idx; | |
9554 | ||
9555 | length = dev->hw_info.page_size + dev->hw_info.oob_size; | |
9556 | ||
9557 | kbuf = kmalloc(length, GFP_KERNEL); | |
9558 | if (!kbuf) { | |
9559 | ret = -ENOMEM; | |
9560 | break; | |
9561 | } | |
9562 | ||
9563 | err = ret = ssd_nand_read_w_oob(dev, kbuf, flash, chip, page, 1, ctrl_idx); | |
9564 | if (ret && (-EIO != ret)) { | |
9565 | kfree(kbuf); | |
9566 | break; | |
9567 | } | |
9568 | ||
9569 | if (copy_to_user(buf, kbuf, length)) { | |
9570 | kfree(kbuf); | |
9571 | ret = -EFAULT; | |
9572 | break; | |
9573 | } | |
9574 | ||
9575 | ret = err; | |
9576 | ||
9577 | kfree(kbuf); | |
9578 | break; | |
9579 | } | |
9580 | ||
9581 | case SSD_CMD_NAND_WRITE: { | |
9582 | struct ssd_flash_op_info flash_info; | |
9583 | int flash, chip, page, ctrl_idx; | |
9584 | uint32_t length; | |
9585 | ||
9586 | if (copy_from_user(&flash_info, argp, sizeof(struct ssd_flash_op_info))) { | |
9587 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
9588 | ret = -EFAULT; | |
9589 | break; | |
9590 | } | |
9591 | ||
9592 | flash = flash_info.flash; | |
9593 | chip = flash_info.chip; | |
9594 | page = flash_info.page; | |
9595 | buf = flash_info.buf; | |
9596 | ctrl_idx = flash_info.ctrl_idx; | |
9597 | ||
9598 | length = dev->hw_info.page_size + dev->hw_info.oob_size; | |
9599 | ||
9600 | kbuf = kmalloc(length, GFP_KERNEL); | |
9601 | if (!kbuf) { | |
9602 | ret = -ENOMEM; | |
9603 | break; | |
9604 | } | |
9605 | ||
9606 | if (copy_from_user(kbuf, buf, length)) { | |
9607 | kfree(kbuf); | |
9608 | ret = -EFAULT; | |
9609 | break; | |
9610 | } | |
9611 | ||
9612 | ret = ssd_nand_write(dev, kbuf, flash, chip, page, 1, ctrl_idx); | |
9613 | if (ret) { | |
9614 | kfree(kbuf); | |
9615 | break; | |
9616 | } | |
9617 | ||
9618 | kfree(kbuf); | |
9619 | break; | |
9620 | } | |
9621 | ||
9622 | case SSD_CMD_NAND_ERASE: { | |
9623 | struct ssd_flash_op_info flash_info; | |
9624 | int flash, chip, page, ctrl_idx; | |
9625 | ||
9626 | if (copy_from_user(&flash_info, argp, sizeof(struct ssd_flash_op_info))) { | |
9627 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
9628 | ret = -EFAULT; | |
9629 | break; | |
9630 | } | |
9631 | ||
9632 | flash = flash_info.flash; | |
9633 | chip = flash_info.chip; | |
9634 | page = flash_info.page; | |
9635 | ctrl_idx = flash_info.ctrl_idx; | |
9636 | ||
9637 | if ((page % dev->hw_info.page_count) != 0) { | |
9638 | ret = -EINVAL; | |
9639 | break; | |
9640 | } | |
9641 | ||
9642 | //hio_warn("erase fs = %llx\n", ofs); | |
9643 | ret = ssd_nand_erase(dev, flash, chip, page, ctrl_idx); | |
9644 | if (ret) { | |
9645 | break; | |
9646 | } | |
9647 | ||
9648 | break; | |
9649 | } | |
9650 | ||
9651 | case SSD_CMD_NAND_READ_EXT: { //ingore EIO | |
9652 | struct ssd_flash_op_info flash_info; | |
9653 | uint32_t length; | |
9654 | int flash, chip, page, ctrl_idx; | |
9655 | ||
9656 | if (copy_from_user(&flash_info, argp, sizeof(struct ssd_flash_op_info))) { | |
9657 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
9658 | ret = -EFAULT; | |
9659 | break; | |
9660 | } | |
9661 | ||
9662 | flash = flash_info.flash; | |
9663 | chip = flash_info.chip; | |
9664 | page = flash_info.page; | |
9665 | buf = flash_info.buf; | |
9666 | ctrl_idx = flash_info.ctrl_idx; | |
9667 | ||
9668 | length = dev->hw_info.page_size + dev->hw_info.oob_size; | |
9669 | ||
9670 | kbuf = kmalloc(length, GFP_KERNEL); | |
9671 | if (!kbuf) { | |
9672 | ret = -ENOMEM; | |
9673 | break; | |
9674 | } | |
9675 | ||
9676 | ret = ssd_nand_read_w_oob(dev, kbuf, flash, chip, page, 1, ctrl_idx); | |
9677 | if (-EIO == ret) { //ingore EIO | |
9678 | ret = 0; | |
9679 | } | |
9680 | if (ret) { | |
9681 | kfree(kbuf); | |
9682 | break; | |
9683 | } | |
9684 | ||
9685 | if (copy_to_user(buf, kbuf, length)) { | |
9686 | kfree(kbuf); | |
9687 | ret = -EFAULT; | |
9688 | break; | |
9689 | } | |
9690 | ||
9691 | kfree(kbuf); | |
9692 | break; | |
9693 | } | |
9694 | ||
9695 | case SSD_CMD_UPDATE_BBT: { | |
9696 | struct ssd_flash_op_info flash_info; | |
9697 | int ctrl_idx, flash; | |
9698 | ||
9699 | if (copy_from_user(&flash_info, argp, sizeof(struct ssd_flash_op_info))) { | |
9700 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
9701 | ret = -EFAULT; | |
9702 | break; | |
9703 | } | |
9704 | ||
9705 | ctrl_idx = flash_info.ctrl_idx; | |
9706 | flash = flash_info.flash; | |
9707 | ret = ssd_update_bbt(dev, flash, ctrl_idx); | |
9708 | if (ret) { | |
9709 | break; | |
9710 | } | |
9711 | ||
9712 | break; | |
9713 | } | |
9714 | ||
9715 | case SSD_CMD_CLEAR_ALARM: | |
9716 | ssd_clear_alarm(dev); | |
9717 | break; | |
9718 | ||
9719 | case SSD_CMD_SET_ALARM: | |
9720 | ssd_set_alarm(dev); | |
9721 | break; | |
9722 | ||
9723 | case SSD_CMD_RESET: | |
9724 | ret = ssd_do_reset(dev); | |
9725 | break; | |
9726 | ||
9727 | case SSD_CMD_RELOAD_FW: | |
9728 | dev->reload_fw = 1; | |
da3355df | 9729 | dev->has_non_0x98_reg_access = 1; |
361ebed5 HSDT |
9730 | if (dev->protocol_info.ver >= SSD_PROTOCOL_V3_2) { |
9731 | ssd_reg32_write(dev->ctrlp + SSD_RELOAD_FW_REG, SSD_RELOAD_FLAG); | |
9732 | } else if (dev->protocol_info.ver >= SSD_PROTOCOL_V3_1_1) { | |
9733 | ssd_reg32_write(dev->ctrlp + SSD_RELOAD_FW_REG, SSD_RELOAD_FW); | |
9734 | ||
9735 | } | |
9736 | break; | |
9737 | ||
9738 | case SSD_CMD_UNLOAD_DEV: { | |
9739 | if (atomic_read(&dev->refcnt)) { | |
9740 | ret = -EBUSY; | |
9741 | break; | |
9742 | } | |
9743 | ||
9744 | /* save smart */ | |
9745 | ssd_save_smart(dev); | |
9746 | ||
9747 | ret = ssd_flush(dev); | |
9748 | if (ret) { | |
9749 | break; | |
9750 | } | |
9751 | ||
9752 | /* cleanup the block device */ | |
9753 | if (test_and_clear_bit(SSD_INIT_BD, &dev->state)) { | |
9754 | mutex_lock(&dev->gd_mutex); | |
9755 | ssd_cleanup_blkdev(dev); | |
1197134c | 9756 | ssd_cleanup_queue(dev); |
361ebed5 HSDT |
9757 | mutex_unlock(&dev->gd_mutex); |
9758 | } | |
9759 | ||
9760 | break; | |
9761 | } | |
9762 | ||
9763 | case SSD_CMD_LOAD_DEV: { | |
9764 | ||
9765 | if (test_bit(SSD_INIT_BD, &dev->state)) { | |
9766 | ret = -EINVAL; | |
9767 | break; | |
9768 | } | |
9769 | ||
9770 | ret = ssd_init_smart(dev); | |
9771 | if (ret) { | |
9772 | hio_warn("%s: init info: failed\n", dev->name); | |
9773 | break; | |
9774 | } | |
9775 | ||
1197134c KM |
9776 | ret = ssd_init_queue(dev); |
9777 | if (ret) { | |
9778 | hio_warn("%s: init queue failed\n", dev->name); | |
9779 | break; | |
9780 | } | |
361ebed5 HSDT |
9781 | ret = ssd_init_blkdev(dev); |
9782 | if (ret) { | |
9783 | hio_warn("%s: register block device: failed\n", dev->name); | |
9784 | break; | |
9785 | } | |
9786 | (void)test_and_set_bit(SSD_INIT_BD, &dev->state); | |
9787 | ||
9788 | break; | |
9789 | } | |
9790 | ||
9791 | case SSD_CMD_UPDATE_VP: { | |
9792 | uint32_t val; | |
9793 | uint32_t new_vp, new_vp1 = 0; | |
9794 | ||
9795 | if (test_bit(SSD_INIT_BD, &dev->state)) { | |
9796 | ret = -EINVAL; | |
9797 | break; | |
9798 | } | |
9799 | ||
9800 | if (copy_from_user(&new_vp, argp, sizeof(uint32_t))) { | |
9801 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
9802 | ret = -EFAULT; | |
9803 | break; | |
9804 | } | |
9805 | ||
9806 | if (new_vp > dev->hw_info.max_valid_pages || new_vp <= 0) { | |
9807 | ret = -EINVAL; | |
9808 | break; | |
9809 | } | |
9810 | ||
9811 | while (new_vp <= dev->hw_info.max_valid_pages) { | |
9812 | ssd_reg32_write(dev->ctrlp + SSD_VALID_PAGES_REG, new_vp); | |
9813 | msleep(10); | |
9814 | val = ssd_reg32_read(dev->ctrlp + SSD_VALID_PAGES_REG); | |
9815 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_2) { | |
9816 | new_vp1 = val & 0x3FF; | |
9817 | } else { | |
9818 | new_vp1 = val & 0x7FFF; | |
9819 | } | |
9820 | ||
9821 | if (new_vp1 == new_vp) { | |
9822 | break; | |
9823 | } | |
9824 | ||
9825 | new_vp++; | |
9826 | /*if (new_vp == dev->hw_info.valid_pages) { | |
9827 | new_vp++; | |
9828 | }*/ | |
9829 | } | |
9830 | ||
9831 | if (new_vp1 != new_vp || new_vp > dev->hw_info.max_valid_pages) { | |
9832 | /* restore */ | |
9833 | ssd_reg32_write(dev->ctrlp + SSD_VALID_PAGES_REG, dev->hw_info.valid_pages); | |
9834 | ret = -EINVAL; | |
9835 | break; | |
9836 | } | |
9837 | ||
9838 | if (copy_to_user(argp, &new_vp, sizeof(uint32_t))) { | |
9839 | hio_warn("%s: copy_to_user: failed\n", dev->name); | |
9840 | ssd_reg32_write(dev->ctrlp + SSD_VALID_PAGES_REG, dev->hw_info.valid_pages); | |
9841 | ret = -EFAULT; | |
9842 | break; | |
9843 | } | |
9844 | ||
9845 | /* new */ | |
9846 | dev->hw_info.valid_pages = new_vp; | |
9847 | dev->hw_info.size = (uint64_t)dev->hw_info.valid_pages * dev->hw_info.page_size; | |
9848 | dev->hw_info.size *= (dev->hw_info.block_count - dev->hw_info.reserved_blks); | |
9849 | dev->hw_info.size *= ((uint64_t)dev->hw_info.nr_data_ch * (uint64_t)dev->hw_info.nr_chip * (uint64_t)dev->hw_info.nr_ctrl); | |
9850 | ||
9851 | break; | |
9852 | } | |
9853 | ||
9854 | case SSD_CMD_FULL_RESET: { | |
9855 | ret = ssd_full_reset(dev); | |
9856 | break; | |
9857 | } | |
9858 | ||
9859 | case SSD_CMD_GET_NR_LOG: { | |
9860 | if (copy_to_user(argp, &dev->internal_log.nr_log, sizeof(dev->internal_log.nr_log))) { | |
9861 | ret = -EFAULT; | |
9862 | break; | |
9863 | } | |
9864 | break; | |
9865 | } | |
9866 | ||
9867 | case SSD_CMD_GET_LOG: { | |
9868 | uint32_t length = dev->rom_info.log_sz; | |
9869 | ||
9870 | buf = argp; | |
9871 | ||
9872 | if (copy_to_user(buf, dev->internal_log.log, length)) { | |
9873 | ret = -EFAULT; | |
9874 | break; | |
9875 | } | |
9876 | ||
9877 | break; | |
9878 | } | |
9879 | ||
9880 | case SSD_CMD_LOG_LEVEL: { | |
9881 | int level = 0; | |
9882 | if (copy_from_user(&level, argp, sizeof(int))) { | |
9883 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
9884 | ret = -EFAULT; | |
9885 | break; | |
9886 | } | |
9887 | ||
9888 | if (level >= SSD_LOG_NR_LEVEL || level < SSD_LOG_LEVEL_INFO) { | |
9889 | level = SSD_LOG_LEVEL_ERR; | |
9890 | } | |
9891 | ||
9892 | //just for showing log, no need to protect | |
9893 | log_level = level; | |
9894 | break; | |
9895 | } | |
9896 | ||
9897 | case SSD_CMD_OT_PROTECT: { | |
9898 | int protect = 0; | |
9899 | ||
9900 | if (copy_from_user(&protect, argp, sizeof(int))) { | |
9901 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
9902 | ret = -EFAULT; | |
9903 | break; | |
9904 | } | |
9905 | ||
9906 | ssd_set_ot_protect(dev, !!protect); | |
9907 | break; | |
9908 | } | |
9909 | ||
9910 | case SSD_CMD_GET_OT_STATUS: { | |
9911 | int status = ssd_get_ot_status(dev, &status); | |
9912 | ||
9913 | if (copy_to_user(argp, &status, sizeof(int))) { | |
9914 | hio_warn("%s: copy_to_user: failed\n", dev->name); | |
9915 | ret = -EFAULT; | |
9916 | break; | |
9917 | } | |
9918 | break; | |
9919 | } | |
9920 | ||
9921 | case SSD_CMD_CLEAR_LOG: { | |
9922 | ret = ssd_clear_log(dev); | |
9923 | break; | |
9924 | } | |
9925 | ||
9926 | case SSD_CMD_CLEAR_SMART: { | |
9927 | ret = ssd_clear_smart(dev); | |
9928 | break; | |
9929 | } | |
9930 | ||
1197134c KM |
9931 | case SSD_CMD_CLEAR_WARNING: { |
9932 | ret = ssd_clear_warning(dev); | |
9933 | break; | |
9934 | } | |
9935 | ||
361ebed5 HSDT |
9936 | case SSD_CMD_SW_LOG: { |
9937 | struct ssd_sw_log_info sw_log; | |
9938 | ||
9939 | if (copy_from_user(&sw_log, argp, sizeof(struct ssd_sw_log_info))) { | |
9940 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
9941 | ret = -EFAULT; | |
9942 | break; | |
9943 | } | |
9944 | ||
9945 | ret = ssd_gen_swlog(dev, sw_log.event, sw_log.data); | |
9946 | break; | |
9947 | } | |
9948 | ||
9949 | case SSD_CMD_GET_LABEL: { | |
9950 | ||
9951 | if (dev->protocol_info.ver >= SSD_PROTOCOL_V3_2) { | |
9952 | ret = -EINVAL; | |
9953 | break; | |
9954 | } | |
9955 | ||
9956 | if (copy_to_user(argp, &dev->label, sizeof(struct ssd_label))) { | |
9957 | hio_warn("%s: copy_to_user: failed\n", dev->name); | |
9958 | ret = -EFAULT; | |
9959 | break; | |
9960 | } | |
9961 | break; | |
9962 | } | |
9963 | ||
9964 | case SSD_CMD_GET_VERSION: { | |
9965 | struct ssd_version_info ver; | |
9966 | ||
9967 | mutex_lock(&dev->fw_mutex); | |
9968 | ret = __ssd_get_version(dev, &ver); | |
9969 | mutex_unlock(&dev->fw_mutex); | |
9970 | if (ret) { | |
9971 | break; | |
9972 | } | |
9973 | ||
9974 | if (copy_to_user(argp, &ver, sizeof(struct ssd_version_info))) { | |
9975 | hio_warn("%s: copy_to_user: failed\n", dev->name); | |
9976 | ret = -EFAULT; | |
9977 | break; | |
9978 | } | |
9979 | break; | |
9980 | } | |
9981 | ||
9982 | case SSD_CMD_GET_TEMPERATURE: { | |
9983 | int temp; | |
9984 | ||
9985 | mutex_lock(&dev->fw_mutex); | |
9986 | ret = __ssd_get_temperature(dev, &temp); | |
9987 | mutex_unlock(&dev->fw_mutex); | |
9988 | if (ret) { | |
9989 | break; | |
9990 | } | |
9991 | ||
9992 | if (copy_to_user(argp, &temp, sizeof(int))) { | |
9993 | hio_warn("%s: copy_to_user: failed\n", dev->name); | |
9994 | ret = -EFAULT; | |
9995 | break; | |
9996 | } | |
9997 | break; | |
9998 | } | |
9999 | ||
10000 | case SSD_CMD_GET_BMSTATUS: { | |
10001 | int status; | |
10002 | ||
10003 | mutex_lock(&dev->fw_mutex); | |
10004 | if (dev->protocol_info.ver >= SSD_PROTOCOL_V3_2) { | |
10005 | if (test_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP), &dev->hwmon)) { | |
10006 | status = SSD_BMSTATUS_WARNING; | |
10007 | } else { | |
10008 | status = SSD_BMSTATUS_OK; | |
10009 | } | |
10010 | } else if(dev->protocol_info.ver > SSD_PROTOCOL_V3) { | |
10011 | ret = __ssd_bm_status(dev, &status); | |
10012 | } else { | |
10013 | status = SSD_BMSTATUS_OK; | |
10014 | } | |
10015 | mutex_unlock(&dev->fw_mutex); | |
10016 | if (ret) { | |
10017 | break; | |
10018 | } | |
10019 | ||
10020 | if (copy_to_user(argp, &status, sizeof(int))) { | |
10021 | hio_warn("%s: copy_to_user: failed\n", dev->name); | |
10022 | ret = -EFAULT; | |
10023 | break; | |
10024 | } | |
10025 | break; | |
10026 | } | |
10027 | ||
10028 | case SSD_CMD_GET_LABEL2: { | |
10029 | void *label; | |
10030 | int length; | |
10031 | ||
10032 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_2) { | |
10033 | label = &dev->label; | |
10034 | length = sizeof(struct ssd_label); | |
10035 | } else { | |
10036 | label = &dev->labelv3; | |
10037 | length = sizeof(struct ssd_labelv3); | |
10038 | } | |
10039 | ||
10040 | if (copy_to_user(argp, label, length)) { | |
10041 | ret = -EFAULT; | |
10042 | break; | |
10043 | } | |
10044 | break; | |
10045 | } | |
10046 | ||
10047 | case SSD_CMD_FLUSH: | |
10048 | ret = ssd_flush(dev); | |
10049 | if (ret) { | |
10050 | hio_warn("%s: ssd_flush: failed\n", dev->name); | |
10051 | ret = -EFAULT; | |
10052 | break; | |
10053 | } | |
10054 | break; | |
10055 | ||
10056 | case SSD_CMD_SAVE_MD: { | |
10057 | int save_md = 0; | |
10058 | ||
10059 | if (copy_from_user(&save_md, argp, sizeof(int))) { | |
10060 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
10061 | ret = -EFAULT; | |
10062 | break; | |
10063 | } | |
10064 | ||
10065 | dev->save_md = !!save_md; | |
10066 | break; | |
10067 | } | |
10068 | ||
10069 | case SSD_CMD_SET_WMODE: { | |
10070 | int new_wmode = 0; | |
10071 | ||
10072 | if (copy_from_user(&new_wmode, argp, sizeof(int))) { | |
10073 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
10074 | ret = -EFAULT; | |
10075 | break; | |
10076 | } | |
10077 | ||
10078 | ret = __ssd_set_wmode(dev, new_wmode); | |
10079 | if (ret) { | |
10080 | break; | |
10081 | } | |
10082 | ||
10083 | break; | |
10084 | } | |
10085 | ||
10086 | case SSD_CMD_GET_WMODE: { | |
10087 | if (copy_to_user(argp, &dev->wmode, sizeof(int))) { | |
10088 | hio_warn("%s: copy_to_user: failed\n", dev->name); | |
10089 | ret = -EFAULT; | |
10090 | break; | |
10091 | } | |
10092 | ||
10093 | break; | |
10094 | } | |
10095 | ||
10096 | case SSD_CMD_GET_USER_WMODE: { | |
10097 | if (copy_to_user(argp, &dev->user_wmode, sizeof(int))) { | |
10098 | hio_warn("%s: copy_to_user: failed\n", dev->name); | |
10099 | ret = -EFAULT; | |
10100 | break; | |
10101 | } | |
10102 | ||
10103 | break; | |
10104 | } | |
10105 | ||
10106 | case SSD_CMD_DEBUG: { | |
10107 | struct ssd_debug_info db_info; | |
10108 | ||
10109 | if (!finject) { | |
10110 | ret = -EOPNOTSUPP; | |
10111 | break; | |
10112 | } | |
10113 | ||
10114 | if (copy_from_user(&db_info, argp, sizeof(struct ssd_debug_info))) { | |
10115 | hio_warn("%s: copy_from_user: failed\n", dev->name); | |
10116 | ret = -EFAULT; | |
10117 | break; | |
10118 | } | |
10119 | ||
10120 | if (db_info.type < SSD_DEBUG_NONE || db_info.type >= SSD_DEBUG_NR) { | |
10121 | ret = -EINVAL; | |
10122 | break; | |
10123 | } | |
10124 | ||
10125 | /* IO */ | |
10126 | if (db_info.type >= SSD_DEBUG_READ_ERR && db_info.type <= SSD_DEBUG_RW_ERR && | |
10127 | (db_info.data.loc.off + db_info.data.loc.len) > (dev->hw_info.size >> 9)) { | |
10128 | ret = -EINVAL; | |
10129 | break; | |
10130 | } | |
10131 | ||
10132 | memcpy(&dev->db_info, &db_info, sizeof(struct ssd_debug_info)); | |
10133 | ||
10134 | #ifdef SSD_OT_PROTECT | |
10135 | /* temperature */ | |
10136 | if (db_info.type == SSD_DEBUG_NONE) { | |
10137 | ssd_check_temperature(dev, SSD_OT_TEMP); | |
10138 | } else if (db_info.type == SSD_DEBUG_LOG) { | |
10139 | if (db_info.data.log.event == SSD_LOG_OVER_TEMP) { | |
10140 | dev->ot_delay = SSD_OT_DELAY; | |
10141 | } else if (db_info.data.log.event == SSD_LOG_NORMAL_TEMP) { | |
10142 | dev->ot_delay = 0; | |
10143 | } | |
10144 | } | |
10145 | #endif | |
10146 | ||
10147 | /* offline */ | |
10148 | if (db_info.type == SSD_DEBUG_OFFLINE) { | |
10149 | test_and_clear_bit(SSD_ONLINE, &dev->state); | |
10150 | } else if (db_info.type == SSD_DEBUG_NONE) { | |
10151 | (void)test_and_set_bit(SSD_ONLINE, &dev->state); | |
10152 | } | |
10153 | ||
10154 | /* log */ | |
10155 | if (db_info.type == SSD_DEBUG_LOG && dev->event_call && dev->gd) { | |
10156 | dev->event_call(dev->gd, db_info.data.log.event, 0); | |
10157 | } | |
10158 | ||
10159 | break; | |
10160 | } | |
10161 | ||
10162 | case SSD_CMD_DRV_PARAM_INFO: { | |
10163 | struct ssd_drv_param_info drv_param; | |
10164 | ||
10165 | memset(&drv_param, 0, sizeof(struct ssd_drv_param_info)); | |
10166 | ||
10167 | drv_param.mode = mode; | |
10168 | drv_param.status_mask = status_mask; | |
10169 | drv_param.int_mode = int_mode; | |
10170 | drv_param.threaded_irq = threaded_irq; | |
10171 | drv_param.log_level = log_level; | |
10172 | drv_param.wmode = wmode; | |
10173 | drv_param.ot_protect = ot_protect; | |
10174 | drv_param.finject = finject; | |
10175 | ||
10176 | if (copy_to_user(argp, &drv_param, sizeof(struct ssd_drv_param_info))) { | |
10177 | hio_warn("%s: copy_to_user: failed\n", dev->name); | |
10178 | ret = -EFAULT; | |
10179 | break; | |
10180 | } | |
10181 | break; | |
10182 | } | |
10183 | ||
10184 | default: | |
10185 | ret = -EINVAL; | |
10186 | break; | |
10187 | } | |
10188 | ||
10189 | return ret; | |
10190 | } | |
10191 | ||
10192 | ||
10193 | #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,27)) | |
10194 | static int ssd_block_ioctl(struct inode *inode, struct file *file, | |
10195 | unsigned int cmd, unsigned long arg) | |
10196 | { | |
10197 | struct ssd_device *dev; | |
10198 | void __user *argp = (void __user *)arg; | |
10199 | int ret = 0; | |
10200 | ||
10201 | if (!inode) { | |
10202 | return -EINVAL; | |
10203 | } | |
10204 | dev = inode->i_bdev->bd_disk->private_data; | |
10205 | if (!dev) { | |
10206 | return -EINVAL; | |
10207 | } | |
10208 | #else | |
10209 | static int ssd_block_ioctl(struct block_device *bdev, fmode_t mode, | |
10210 | unsigned int cmd, unsigned long arg) | |
10211 | { | |
10212 | struct ssd_device *dev; | |
10213 | void __user *argp = (void __user *)arg; | |
10214 | int ret = 0; | |
10215 | ||
10216 | if (!bdev) { | |
10217 | return -EINVAL; | |
10218 | } | |
10219 | ||
10220 | dev = bdev->bd_disk->private_data; | |
10221 | if (!dev) { | |
10222 | return -EINVAL; | |
10223 | } | |
10224 | #endif | |
10225 | ||
10226 | switch (cmd) { | |
10227 | case HDIO_GETGEO: { | |
10228 | struct hd_geometry geo; | |
10229 | geo.cylinders = (dev->hw_info.size & ~0x3f) >> 6; | |
10230 | geo.heads = 4; | |
10231 | geo.sectors = 16; | |
10232 | #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,27)) | |
10233 | geo.start = get_start_sect(inode->i_bdev); | |
10234 | #else | |
10235 | geo.start = get_start_sect(bdev); | |
10236 | #endif | |
10237 | if (copy_to_user(argp, &geo, sizeof(geo))) { | |
10238 | ret = -EFAULT; | |
10239 | break; | |
10240 | } | |
10241 | ||
10242 | break; | |
10243 | } | |
10244 | ||
10245 | case BLKFLSBUF: | |
10246 | ret = ssd_flush(dev); | |
10247 | if (ret) { | |
10248 | hio_warn("%s: ssd_flush: failed\n", dev->name); | |
10249 | ret = -EFAULT; | |
10250 | break; | |
10251 | } | |
10252 | break; | |
10253 | ||
10254 | default: | |
10255 | if (!dev->slave) { | |
10256 | ret = ssd_ioctl_common(dev, cmd, arg); | |
10257 | } else { | |
10258 | ret = -EFAULT; | |
10259 | } | |
10260 | break; | |
10261 | } | |
10262 | ||
10263 | return ret; | |
10264 | } | |
10265 | ||
10266 | ||
10267 | static void ssd_free_dev(struct kref *kref) | |
10268 | { | |
10269 | struct ssd_device *dev; | |
10270 | ||
10271 | if (!kref) { | |
10272 | return; | |
10273 | } | |
10274 | ||
10275 | dev = container_of(kref, struct ssd_device, kref); | |
10276 | ||
10277 | put_disk(dev->gd); | |
10278 | ||
10279 | ssd_put_index(dev->slave, dev->idx); | |
10280 | ||
10281 | kfree(dev); | |
10282 | } | |
10283 | ||
10284 | static void ssd_put(struct ssd_device *dev) | |
10285 | { | |
10286 | kref_put(&dev->kref, ssd_free_dev); | |
10287 | } | |
10288 | ||
10289 | static int ssd_get(struct ssd_device *dev) | |
10290 | { | |
10291 | kref_get(&dev->kref); | |
10292 | return 0; | |
10293 | } | |
10294 | ||
10295 | /* block device */ | |
10296 | #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,27)) | |
10297 | static int ssd_block_open(struct inode *inode, struct file *filp) | |
10298 | { | |
10299 | struct ssd_device *dev; | |
10300 | ||
10301 | if (!inode) { | |
10302 | return -EINVAL; | |
10303 | } | |
10304 | ||
10305 | dev = inode->i_bdev->bd_disk->private_data; | |
10306 | if (!dev) { | |
10307 | return -EINVAL; | |
10308 | } | |
10309 | #else | |
10310 | static int ssd_block_open(struct block_device *bdev, fmode_t mode) | |
10311 | { | |
10312 | struct ssd_device *dev; | |
10313 | ||
10314 | if (!bdev) { | |
10315 | return -EINVAL; | |
10316 | } | |
10317 | ||
10318 | dev = bdev->bd_disk->private_data; | |
10319 | if (!dev) { | |
10320 | return -EINVAL; | |
10321 | } | |
10322 | #endif | |
10323 | ||
10324 | /*if (!try_module_get(dev->owner)) | |
10325 | return -ENODEV; | |
10326 | */ | |
10327 | ||
10328 | ssd_get(dev); | |
10329 | ||
10330 | atomic_inc(&dev->refcnt); | |
10331 | ||
10332 | return 0; | |
10333 | } | |
10334 | ||
10335 | #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,27)) | |
10336 | static int ssd_block_release(struct inode *inode, struct file *filp) | |
10337 | { | |
10338 | struct ssd_device *dev; | |
10339 | ||
10340 | if (!inode) { | |
10341 | return -EINVAL; | |
10342 | } | |
10343 | ||
10344 | dev = inode->i_bdev->bd_disk->private_data; | |
10345 | if (!dev) { | |
10346 | return -EINVAL; | |
10347 | } | |
10348 | #elif (LINUX_VERSION_CODE <= KERNEL_VERSION(3,9,0)) | |
10349 | static int ssd_block_release(struct gendisk *disk, fmode_t mode) | |
10350 | { | |
10351 | struct ssd_device *dev; | |
10352 | ||
10353 | if (!disk) { | |
10354 | return -EINVAL; | |
10355 | } | |
10356 | ||
10357 | dev = disk->private_data; | |
10358 | if (!dev) { | |
10359 | return -EINVAL; | |
10360 | } | |
10361 | #else | |
10362 | static void ssd_block_release(struct gendisk *disk, fmode_t mode) | |
10363 | { | |
10364 | struct ssd_device *dev; | |
10365 | ||
10366 | if (!disk) { | |
10367 | return; | |
10368 | } | |
10369 | ||
10370 | dev = disk->private_data; | |
10371 | if (!dev) { | |
10372 | return; | |
10373 | } | |
10374 | #endif | |
10375 | ||
10376 | atomic_dec(&dev->refcnt); | |
10377 | ||
10378 | ssd_put(dev); | |
10379 | ||
10380 | //module_put(dev->owner); | |
10381 | #if (LINUX_VERSION_CODE <= KERNEL_VERSION(3,9,0)) | |
10382 | return 0; | |
10383 | #endif | |
10384 | } | |
10385 | ||
10386 | static struct block_device_operations ssd_fops = { | |
10387 | .owner = THIS_MODULE, | |
10388 | .open = ssd_block_open, | |
10389 | .release = ssd_block_release, | |
10390 | .ioctl = ssd_block_ioctl, | |
10391 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)) | |
10392 | .getgeo = ssd_block_getgeo, | |
10393 | #endif | |
7fd8c57f SF |
10394 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,9,0)) |
10395 | .submit_bio = hio_submit_bio, | |
10396 | #endif | |
361ebed5 HSDT |
10397 | }; |
10398 | ||
10399 | static void ssd_init_trim(ssd_device_t *dev) | |
10400 | { | |
10401 | #if (defined SSD_TRIM && (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32))) | |
10402 | if (dev->protocol_info.ver <= SSD_PROTOCOL_V3) { | |
10403 | return; | |
10404 | } | |
65a7cac1 TLSC |
10405 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,17,0)) |
10406 | blk_queue_flag_set(QUEUE_FLAG_DISCARD, dev->rq); | |
10407 | #else | |
361ebed5 | 10408 | queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, dev->rq); |
65a7cac1 | 10409 | #endif |
361ebed5 HSDT |
10410 | |
10411 | #if ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,33)) || (defined RHEL_MAJOR && RHEL_MAJOR >= 6)) | |
b44043bd | 10412 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(4,12,0)) |
361ebed5 | 10413 | dev->rq->limits.discard_zeroes_data = 1; |
b44043bd | 10414 | #endif |
361ebed5 HSDT |
10415 | dev->rq->limits.discard_alignment = 4096; |
10416 | dev->rq->limits.discard_granularity = 4096; | |
10417 | #endif | |
10418 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_2_4) { | |
10419 | dev->rq->limits.max_discard_sectors = dev->hw_info.sg_max_sec; | |
10420 | } else { | |
10421 | dev->rq->limits.max_discard_sectors = (dev->hw_info.sg_max_sec) * (dev->hw_info.cmd_max_sg); | |
10422 | } | |
10423 | #endif | |
10424 | } | |
10425 | ||
10426 | static void ssd_cleanup_queue(struct ssd_device *dev) | |
10427 | { | |
10428 | ssd_wait_io(dev); | |
10429 | ||
10430 | blk_cleanup_queue(dev->rq); | |
10431 | dev->rq = NULL; | |
10432 | } | |
10433 | ||
10434 | static int ssd_init_queue(struct ssd_device *dev) | |
10435 | { | |
f0c64078 | 10436 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(5,7,0)) |
361ebed5 HSDT |
10437 | dev->rq = blk_alloc_queue(GFP_KERNEL); |
10438 | if (dev->rq == NULL) { | |
10439 | hio_warn("%s: alloc queue: failed\n ", dev->name); | |
10440 | goto out_init_queue; | |
10441 | } | |
10442 | ||
10443 | /* must be first */ | |
10444 | blk_queue_make_request(dev->rq, ssd_make_request); | |
f0c64078 | 10445 | #else |
7fd8c57f | 10446 | # if (LINUX_VERSION_CODE < KERNEL_VERSION(5,9,0)) |
f0c64078 | 10447 | dev->rq = blk_alloc_queue(ssd_make_request, NUMA_NO_NODE); |
7fd8c57f SF |
10448 | # else |
10449 | dev->rq = blk_alloc_queue(NUMA_NO_NODE); | |
10450 | # endif | |
f0c64078 PP |
10451 | if (dev->rq == NULL) { |
10452 | hio_warn("%s: blk_alloc_queue(): failed\n ", dev->name); | |
10453 | goto out_init_queue; | |
10454 | } | |
10455 | #endif | |
361ebed5 HSDT |
10456 | #if ((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,34)) && !(defined RHEL_MAJOR && RHEL_MAJOR == 6)) |
10457 | blk_queue_max_hw_segments(dev->rq, dev->hw_info.cmd_max_sg); | |
10458 | blk_queue_max_phys_segments(dev->rq, dev->hw_info.cmd_max_sg); | |
10459 | blk_queue_max_sectors(dev->rq, dev->hw_info.sg_max_sec); | |
10460 | #else | |
10461 | blk_queue_max_segments(dev->rq, dev->hw_info.cmd_max_sg); | |
10462 | blk_queue_max_hw_sectors(dev->rq, dev->hw_info.sg_max_sec); | |
10463 | #endif | |
10464 | ||
10465 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31)) | |
10466 | blk_queue_hardsect_size(dev->rq, 512); | |
10467 | #else | |
10468 | blk_queue_logical_block_size(dev->rq, 512); | |
10469 | #endif | |
10470 | /* not work for make_request based drivers(bio) */ | |
10471 | blk_queue_max_segment_size(dev->rq, dev->hw_info.sg_max_sec << 9); | |
10472 | ||
10473 | blk_queue_bounce_limit(dev->rq, BLK_BOUNCE_HIGH); | |
10474 | ||
10475 | dev->rq->queuedata = dev; | |
10476 | ||
10477 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)) | |
10478 | blk_queue_issue_flush_fn(dev->rq, ssd_issue_flush_fn); | |
10479 | #endif | |
10480 | ||
10481 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28)) | |
65a7cac1 TLSC |
10482 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,17,0)) |
10483 | blk_queue_flag_set(QUEUE_FLAG_NONROT, dev->rq); | |
10484 | #else | |
361ebed5 | 10485 | queue_flag_set_unlocked(QUEUE_FLAG_NONROT, dev->rq); |
65a7cac1 | 10486 | #endif |
361ebed5 HSDT |
10487 | #endif |
10488 | ||
10489 | ssd_init_trim(dev); | |
10490 | ||
10491 | return 0; | |
10492 | ||
10493 | out_init_queue: | |
10494 | return -ENOMEM; | |
10495 | } | |
10496 | ||
10497 | static void ssd_cleanup_blkdev(struct ssd_device *dev) | |
10498 | { | |
10499 | del_gendisk(dev->gd); | |
10500 | } | |
10501 | ||
10502 | static int ssd_init_blkdev(struct ssd_device *dev) | |
10503 | { | |
10504 | if (dev->gd) { | |
10505 | put_disk(dev->gd); | |
10506 | } | |
10507 | ||
10508 | dev->gd = alloc_disk(ssd_minors); | |
10509 | if (!dev->gd) { | |
10510 | hio_warn("%s: alloc_disk fail\n", dev->name); | |
10511 | goto out_alloc_gd; | |
10512 | } | |
10513 | dev->gd->major = dev->major; | |
10514 | dev->gd->first_minor = dev->idx * ssd_minors; | |
10515 | dev->gd->fops = &ssd_fops; | |
10516 | dev->gd->queue = dev->rq; | |
10517 | dev->gd->private_data = dev; | |
1197134c | 10518 | |
361ebed5 HSDT |
10519 | snprintf (dev->gd->disk_name, sizeof(dev->gd->disk_name), "%s", dev->name); |
10520 | ||
10521 | set_capacity(dev->gd, dev->hw_info.size >> 9); | |
10522 | ||
b323cbe4 SF |
10523 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,20,0)) |
10524 | device_add_disk(&dev->pdev->dev, dev->gd, NULL); | |
10525 | #elif (LINUX_VERSION_CODE >= KERNEL_VERSION(4,8,0)) | |
5e004b00 | 10526 | device_add_disk(&dev->pdev->dev, dev->gd); |
1197134c KM |
10527 | #else |
10528 | dev->gd->driverfs_dev = &dev->pdev->dev; | |
10529 | add_disk(dev->gd); | |
5e004b00 | 10530 | #endif |
361ebed5 HSDT |
10531 | |
10532 | return 0; | |
10533 | ||
10534 | out_alloc_gd: | |
10535 | return -ENOMEM; | |
10536 | } | |
10537 | ||
10538 | #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,10)) | |
10539 | static int ssd_ioctl(struct inode *inode, struct file *file, | |
10540 | unsigned int cmd, unsigned long arg) | |
10541 | #else | |
10542 | static long ssd_ioctl(struct file *file, | |
10543 | unsigned int cmd, unsigned long arg) | |
10544 | #endif | |
10545 | { | |
10546 | struct ssd_device *dev; | |
10547 | ||
10548 | if (!file) { | |
10549 | return -EINVAL; | |
10550 | } | |
10551 | ||
10552 | dev = file->private_data; | |
10553 | if (!dev) { | |
10554 | return -EINVAL; | |
10555 | } | |
10556 | ||
10557 | return (long)ssd_ioctl_common(dev, cmd, arg); | |
10558 | } | |
10559 | ||
10560 | static int ssd_open(struct inode *inode, struct file *file) | |
10561 | { | |
10562 | struct ssd_device *dev = NULL; | |
10563 | struct ssd_device *n = NULL; | |
10564 | int idx; | |
10565 | int ret = -ENODEV; | |
10566 | ||
10567 | if (!inode || !file) { | |
10568 | return -EINVAL; | |
10569 | } | |
10570 | ||
10571 | idx = iminor(inode); | |
10572 | ||
10573 | list_for_each_entry_safe(dev, n, &ssd_list, list) { | |
10574 | if (dev->idx == idx) { | |
10575 | ret = 0; | |
10576 | break; | |
10577 | } | |
10578 | } | |
10579 | ||
10580 | if (ret) { | |
10581 | return ret; | |
10582 | } | |
10583 | ||
10584 | file->private_data = dev; | |
10585 | ||
10586 | ssd_get(dev); | |
10587 | ||
10588 | return 0; | |
10589 | } | |
10590 | ||
10591 | static int ssd_release(struct inode *inode, struct file *file) | |
10592 | { | |
10593 | struct ssd_device *dev; | |
10594 | ||
10595 | if (!file) { | |
10596 | return -EINVAL; | |
10597 | } | |
10598 | ||
10599 | dev = file->private_data; | |
10600 | if (!dev) { | |
10601 | return -EINVAL; | |
10602 | } | |
10603 | ||
10604 | ssd_put(dev); | |
10605 | ||
10606 | file->private_data = NULL; | |
10607 | ||
10608 | return 0; | |
10609 | } | |
10610 | ||
1197134c KM |
10611 | static int ssd_reload_ssd_ptr(struct ssd_device *dev) |
10612 | { | |
10613 | ssd_reset_resp_ptr(dev); | |
10614 | ||
10615 | //update base reg address | |
10616 | if (dev->protocol_info.ver >= SSD_PROTOCOL_V3) { | |
10617 | ||
10618 | ssd_reg_write(dev->ctrlp + SSD_MSG_BASE_REG, dev->msg_base_dma); | |
10619 | } | |
10620 | ||
10621 | //update response base reg address | |
10622 | ssd_reg_write(dev->ctrlp + SSD_RESP_FIFO_REG, dev->resp_msg_base_dma); | |
10623 | ssd_reg_write(dev->ctrlp + SSD_RESP_PTR_REG, dev->resp_ptr_base_dma); | |
10624 | ||
10625 | return 0; | |
10626 | } | |
10627 | ||
361ebed5 HSDT |
10628 | static struct file_operations ssd_cfops = { |
10629 | .owner = THIS_MODULE, | |
10630 | .open = ssd_open, | |
10631 | .release = ssd_release, | |
10632 | #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,10)) | |
10633 | .ioctl = ssd_ioctl, | |
10634 | #else | |
10635 | .unlocked_ioctl = ssd_ioctl, | |
10636 | #endif | |
10637 | }; | |
10638 | ||
10639 | static void ssd_cleanup_chardev(struct ssd_device *dev) | |
10640 | { | |
10641 | if (dev->slave) { | |
10642 | return; | |
10643 | } | |
10644 | ||
10645 | #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,12)) | |
10646 | class_simple_device_remove(MKDEV((dev_t)dev->cmajor, (dev_t)dev->idx)); | |
10647 | devfs_remove("c%s", dev->name); | |
10648 | #elif (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,14)) | |
10649 | class_device_destroy(ssd_class, MKDEV((dev_t)dev->cmajor, (dev_t)dev->idx)); | |
10650 | devfs_remove("c%s", dev->name); | |
10651 | #elif (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,17)) | |
10652 | class_device_destroy(ssd_class, MKDEV((dev_t)dev->cmajor, (dev_t)dev->idx)); | |
10653 | devfs_remove("c%s", dev->name); | |
10654 | #elif (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,24)) | |
10655 | class_device_destroy(ssd_class, MKDEV((dev_t)dev->cmajor, (dev_t)dev->idx)); | |
10656 | #else | |
10657 | device_destroy(ssd_class, MKDEV((dev_t)dev->cmajor, (dev_t)dev->idx)); | |
10658 | #endif | |
10659 | } | |
10660 | ||
10661 | static int ssd_init_chardev(struct ssd_device *dev) | |
10662 | { | |
10663 | int ret = 0; | |
10664 | ||
10665 | if (dev->slave) { | |
10666 | return 0; | |
10667 | } | |
10668 | ||
10669 | #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,12)) | |
10670 | ret = devfs_mk_cdev(MKDEV((dev_t)dev->cmajor, (dev_t)dev->idx), S_IFCHR|S_IRUSR|S_IWUSR, "c%s", dev->name); | |
10671 | if (ret) { | |
10672 | goto out; | |
10673 | } | |
10674 | class_simple_device_add(ssd_class, MKDEV((dev_t)dev->cmajor, (dev_t)dev->idx), NULL, "c%s", dev->name); | |
10675 | out: | |
10676 | #elif (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,14)) | |
10677 | ret = devfs_mk_cdev(MKDEV((dev_t)dev->cmajor, (dev_t)dev->idx), S_IFCHR|S_IRUSR|S_IWUSR, "c%s", dev->name); | |
10678 | if (ret) { | |
10679 | goto out; | |
10680 | } | |
10681 | class_device_create(ssd_class, MKDEV((dev_t)dev->cmajor, (dev_t)dev->idx), NULL, "c%s", dev->name); | |
10682 | out: | |
10683 | #elif (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,17)) | |
10684 | ret = devfs_mk_cdev(MKDEV((dev_t)dev->cmajor, (dev_t)dev->idx), S_IFCHR|S_IRUSR|S_IWUSR, "c%s", dev->name); | |
10685 | if (ret) { | |
10686 | goto out; | |
10687 | } | |
10688 | class_device_create(ssd_class, NULL, MKDEV((dev_t)dev->cmajor, (dev_t)dev->idx), NULL, "c%s", dev->name); | |
10689 | out: | |
10690 | #elif (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,24)) | |
10691 | class_device_create(ssd_class, NULL, MKDEV((dev_t)dev->cmajor, (dev_t)dev->idx), NULL, "c%s", dev->name); | |
10692 | #elif (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,26)) | |
10693 | device_create(ssd_class, NULL, MKDEV((dev_t)dev->cmajor, (dev_t)dev->idx), "c%s", dev->name); | |
10694 | #elif (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,27)) | |
10695 | device_create_drvdata(ssd_class, NULL, MKDEV((dev_t)dev->cmajor, (dev_t)dev->idx), NULL, "c%s", dev->name); | |
10696 | #else | |
10697 | device_create(ssd_class, NULL, MKDEV((dev_t)dev->cmajor, (dev_t)dev->idx), NULL, "c%s", dev->name); | |
10698 | #endif | |
10699 | ||
10700 | return ret; | |
10701 | } | |
10702 | ||
10703 | static int ssd_check_hw(struct ssd_device *dev) | |
10704 | { | |
10705 | uint32_t test_data = 0x55AA5AA5; | |
10706 | uint32_t read_data; | |
10707 | ||
10708 | ssd_reg32_write(dev->ctrlp + SSD_BRIDGE_TEST_REG, test_data); | |
10709 | read_data = ssd_reg32_read(dev->ctrlp + SSD_BRIDGE_TEST_REG); | |
10710 | if (read_data != ~(test_data)) { | |
10711 | //hio_warn("%s: check bridge error: %#x\n", dev->name, read_data); | |
10712 | return -1; | |
10713 | } | |
10714 | ||
10715 | return 0; | |
10716 | } | |
10717 | ||
10718 | static int ssd_check_fw(struct ssd_device *dev) | |
10719 | { | |
10720 | uint32_t val = 0; | |
10721 | int i; | |
10722 | ||
10723 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_1_3) { | |
10724 | return 0; | |
10725 | } | |
10726 | ||
10727 | for (i=0; i<SSD_CONTROLLER_WAIT; i++) { | |
10728 | val = ssd_reg32_read(dev->ctrlp + SSD_HW_STATUS_REG); | |
10729 | if ((val & 0x1) && ((val >> 8) & 0x1)) { | |
10730 | break; | |
10731 | } | |
10732 | ||
10733 | msleep(SSD_INIT_WAIT); | |
10734 | } | |
10735 | ||
10736 | if (!(val & 0x1)) { | |
10737 | /* controller fw status */ | |
10738 | hio_warn("%s: controller firmware load failed: %#x\n", dev->name, val); | |
10739 | return -1; | |
10740 | } else if (!((val >> 8) & 0x1)) { | |
10741 | /* controller state */ | |
10742 | hio_warn("%s: controller state error: %#x\n", dev->name, val); | |
10743 | return -1; | |
10744 | } | |
10745 | ||
10746 | val = ssd_reg32_read(dev->ctrlp + SSD_RELOAD_FW_REG); | |
10747 | if (val) { | |
10748 | dev->reload_fw = 1; | |
10749 | } | |
10750 | ||
10751 | return 0; | |
10752 | } | |
10753 | ||
10754 | static int ssd_init_fw_info(struct ssd_device *dev) | |
10755 | { | |
10756 | uint32_t val; | |
10757 | int ret = 0; | |
10758 | ||
10759 | val = ssd_reg32_read(dev->ctrlp + SSD_BRIDGE_VER_REG); | |
10760 | dev->hw_info.bridge_ver = val & 0xFFF; | |
10761 | if (dev->hw_info.bridge_ver < SSD_FW_MIN) { | |
10762 | hio_warn("%s: bridge firmware version %03X is not supported\n", dev->name, dev->hw_info.bridge_ver); | |
10763 | return -EINVAL; | |
10764 | } | |
10765 | hio_info("%s: bridge firmware version: %03X\n", dev->name, dev->hw_info.bridge_ver); | |
10766 | ||
10767 | ret = ssd_check_fw(dev); | |
10768 | if (ret) { | |
10769 | goto out; | |
10770 | } | |
10771 | ||
10772 | out: | |
10773 | /* skip error if not in standard mode */ | |
10774 | if (mode != SSD_DRV_MODE_STANDARD) { | |
10775 | ret = 0; | |
10776 | } | |
10777 | return ret; | |
10778 | } | |
10779 | ||
10780 | static int ssd_check_clock(struct ssd_device *dev) | |
10781 | { | |
10782 | uint32_t val; | |
10783 | int ret = 0; | |
10784 | ||
10785 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_1_3) { | |
10786 | return 0; | |
10787 | } | |
10788 | ||
10789 | val = ssd_reg32_read(dev->ctrlp + SSD_HW_STATUS_REG); | |
10790 | ||
10791 | /* clock status */ | |
10792 | if (!((val >> 4 ) & 0x1)) { | |
10793 | if (!test_and_set_bit(SSD_HWMON_CLOCK(SSD_CLOCK_166M_LOST), &dev->hwmon)) { | |
10794 | hio_warn("%s: 166MHz clock losed: %#x\n", dev->name, val); | |
10795 | ssd_gen_swlog(dev, SSD_LOG_CLK_FAULT, val); | |
10796 | } | |
10797 | ret = -1; | |
10798 | } | |
10799 | ||
10800 | if (dev->protocol_info.ver >= SSD_PROTOCOL_V3_2) { | |
10801 | if (!((val >> 5 ) & 0x1)) { | |
10802 | if (!test_and_set_bit(SSD_HWMON_CLOCK(SSD_CLOCK_166M_SKEW), &dev->hwmon)) { | |
10803 | hio_warn("%s: 166MHz clock is skew: %#x\n", dev->name, val); | |
10804 | ssd_gen_swlog(dev, SSD_LOG_CLK_FAULT, val); | |
10805 | } | |
10806 | ret = -1; | |
10807 | } | |
10808 | if (!((val >> 6 ) & 0x1)) { | |
10809 | if (!test_and_set_bit(SSD_HWMON_CLOCK(SSD_CLOCK_156M_LOST), &dev->hwmon)) { | |
10810 | hio_warn("%s: 156.25MHz clock lost: %#x\n", dev->name, val); | |
10811 | ssd_gen_swlog(dev, SSD_LOG_CLK_FAULT, val); | |
10812 | } | |
10813 | ret = -1; | |
10814 | } | |
10815 | if (!((val >> 7 ) & 0x1)) { | |
10816 | if (!test_and_set_bit(SSD_HWMON_CLOCK(SSD_CLOCK_156M_SKEW), &dev->hwmon)) { | |
10817 | hio_warn("%s: 156.25MHz clock is skew: %#x\n", dev->name, val); | |
10818 | ssd_gen_swlog(dev, SSD_LOG_CLK_FAULT, val); | |
10819 | } | |
10820 | ret = -1; | |
10821 | } | |
10822 | } | |
10823 | ||
10824 | return ret; | |
10825 | } | |
10826 | ||
10827 | static int ssd_check_volt(struct ssd_device *dev) | |
10828 | { | |
10829 | int i = 0; | |
10830 | uint64_t val; | |
10831 | uint32_t adc_val; | |
10832 | int ret =0; | |
10833 | ||
10834 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_2) { | |
10835 | return 0; | |
10836 | } | |
10837 | ||
10838 | for (i=0; i<dev->hw_info.nr_ctrl; i++) { | |
10839 | /* 1.0v */ | |
10840 | if (!test_bit(SSD_HWMON_FPGA(i, SSD_FPGA_1V0), &dev->hwmon)) { | |
10841 | val = ssd_reg_read(dev->ctrlp + SSD_FPGA_1V0_REG0 + i * SSD_CTRL_REG_ZONE_SZ); | |
10842 | adc_val = SSD_FPGA_VOLT_MAX(val); | |
10843 | if (adc_val < SSD_FPGA_1V0_ADC_MIN || adc_val > SSD_FPGA_1V0_ADC_MAX) { | |
10844 | (void)test_and_set_bit(SSD_HWMON_FPGA(i, SSD_FPGA_1V0), &dev->hwmon); | |
10845 | hio_warn("%s: controller %d 1.0V fault: %d mV.\n", dev->name, i, SSD_FPGA_VOLT(adc_val)); | |
10846 | ssd_gen_swlog(dev, SSD_LOG_VOLT_FAULT, SSD_VOLT_LOG_DATA(SSD_FPGA_1V0, i, adc_val)); | |
10847 | ret = -1; | |
10848 | } | |
10849 | ||
10850 | adc_val = SSD_FPGA_VOLT_MIN(val); | |
10851 | if (adc_val < SSD_FPGA_1V0_ADC_MIN || adc_val > SSD_FPGA_1V0_ADC_MAX) { | |
10852 | (void)test_and_set_bit(SSD_HWMON_FPGA(i, SSD_FPGA_1V0), &dev->hwmon); | |
10853 | hio_warn("%s: controller %d 1.0V fault: %d mV.\n", dev->name, i, SSD_FPGA_VOLT(adc_val)); | |
10854 | ssd_gen_swlog(dev, SSD_LOG_VOLT_FAULT, SSD_VOLT_LOG_DATA(SSD_FPGA_1V0, i, adc_val)); | |
10855 | ret = -2; | |
10856 | } | |
10857 | } | |
10858 | ||
10859 | /* 1.8v */ | |
10860 | if (!test_bit(SSD_HWMON_FPGA(i, SSD_FPGA_1V8), &dev->hwmon)) { | |
10861 | val = ssd_reg_read(dev->ctrlp + SSD_FPGA_1V8_REG0 + i * SSD_CTRL_REG_ZONE_SZ); | |
10862 | adc_val = SSD_FPGA_VOLT_MAX(val); | |
10863 | if (adc_val < SSD_FPGA_1V8_ADC_MIN || adc_val > SSD_FPGA_1V8_ADC_MAX) { | |
10864 | (void)test_and_set_bit(SSD_HWMON_FPGA(i, SSD_FPGA_1V8), &dev->hwmon); | |
10865 | hio_warn("%s: controller %d 1.8V fault: %d mV.\n", dev->name, i, SSD_FPGA_VOLT(adc_val)); | |
10866 | ssd_gen_swlog(dev, SSD_LOG_VOLT_FAULT, SSD_VOLT_LOG_DATA(SSD_FPGA_1V8, i, adc_val)); | |
10867 | ret = -3; | |
10868 | } | |
10869 | ||
10870 | adc_val = SSD_FPGA_VOLT_MIN(val); | |
10871 | if (adc_val < SSD_FPGA_1V8_ADC_MIN || adc_val > SSD_FPGA_1V8_ADC_MAX) { | |
10872 | (void)test_and_set_bit(SSD_HWMON_FPGA(i, SSD_FPGA_1V8), &dev->hwmon); | |
10873 | hio_warn("%s: controller %d 1.8V fault: %d mV.\n", dev->name, i, SSD_FPGA_VOLT(adc_val)); | |
10874 | ssd_gen_swlog(dev, SSD_LOG_VOLT_FAULT, SSD_VOLT_LOG_DATA(SSD_FPGA_1V8, i, adc_val)); | |
10875 | ret = -4; | |
10876 | } | |
10877 | } | |
10878 | } | |
10879 | ||
10880 | return ret; | |
10881 | } | |
10882 | ||
10883 | static int ssd_check_reset_sync(struct ssd_device *dev) | |
10884 | { | |
10885 | uint32_t val; | |
10886 | ||
10887 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_1_3) { | |
10888 | return 0; | |
10889 | } | |
10890 | ||
10891 | val = ssd_reg32_read(dev->ctrlp + SSD_HW_STATUS_REG); | |
10892 | if (!((val >> 8) & 0x1)) { | |
10893 | /* controller state */ | |
10894 | hio_warn("%s: controller state error: %#x\n", dev->name, val); | |
10895 | return -1; | |
10896 | } | |
10897 | ||
10898 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_2) { | |
10899 | return 0; | |
10900 | } | |
10901 | ||
10902 | if (((val >> 9 ) & 0x1)) { | |
10903 | hio_warn("%s: controller reset asynchronously: %#x\n", dev->name, val); | |
10904 | ssd_gen_swlog(dev, SSD_LOG_CTRL_RST_SYNC, val); | |
10905 | return -1; | |
10906 | } | |
10907 | ||
10908 | return 0; | |
10909 | } | |
10910 | ||
10911 | static int ssd_check_hw_bh(struct ssd_device *dev) | |
10912 | { | |
10913 | int ret; | |
10914 | ||
10915 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_1_3) { | |
10916 | return 0; | |
10917 | } | |
10918 | ||
10919 | /* clock status */ | |
10920 | ret = ssd_check_clock(dev); | |
10921 | if (ret) { | |
10922 | goto out; | |
10923 | } | |
10924 | ||
10925 | out: | |
10926 | /* skip error if not in standard mode */ | |
10927 | if (mode != SSD_DRV_MODE_STANDARD) { | |
10928 | ret = 0; | |
10929 | } | |
10930 | return ret; | |
10931 | } | |
10932 | ||
10933 | static int ssd_check_controller(struct ssd_device *dev) | |
10934 | { | |
10935 | int ret; | |
10936 | ||
10937 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_1_3) { | |
10938 | return 0; | |
10939 | } | |
10940 | ||
10941 | /* sync reset */ | |
10942 | ret = ssd_check_reset_sync(dev); | |
10943 | if (ret) { | |
10944 | goto out; | |
10945 | } | |
10946 | ||
10947 | out: | |
10948 | /* skip error if not in standard mode */ | |
10949 | if (mode != SSD_DRV_MODE_STANDARD) { | |
10950 | ret = 0; | |
10951 | } | |
10952 | return ret; | |
10953 | } | |
10954 | ||
10955 | static int ssd_check_controller_bh(struct ssd_device *dev) | |
10956 | { | |
10957 | uint32_t test_data = 0x55AA5AA5; | |
10958 | uint32_t val; | |
10959 | int reg_base, reg_sz; | |
10960 | int init_wait = 0; | |
10961 | int i; | |
10962 | int ret = 0; | |
10963 | ||
10964 | if (mode != SSD_DRV_MODE_STANDARD) { | |
10965 | return 0; | |
10966 | } | |
10967 | ||
10968 | /* controller */ | |
10969 | val = ssd_reg32_read(dev->ctrlp + SSD_READY_REG); | |
10970 | if (val & 0x1) { | |
10971 | hio_warn("%s: controller 0 not ready\n", dev->name); | |
10972 | return -1; | |
10973 | } | |
10974 | ||
10975 | for (i=0; i<dev->hw_info.nr_ctrl; i++) { | |
10976 | reg_base = SSD_CTRL_TEST_REG0 + i * SSD_CTRL_TEST_REG_SZ; | |
10977 | ssd_reg32_write(dev->ctrlp + reg_base, test_data); | |
10978 | val = ssd_reg32_read(dev->ctrlp + reg_base); | |
10979 | if (val != ~(test_data)) { | |
10980 | hio_warn("%s: check controller %d error: %#x\n", dev->name, i, val); | |
10981 | return -1; | |
10982 | } | |
10983 | } | |
10984 | ||
10985 | /* clock */ | |
10986 | ret = ssd_check_volt(dev); | |
10987 | if (ret) { | |
10988 | return ret; | |
10989 | } | |
10990 | ||
10991 | /* ddr */ | |
10992 | if (dev->protocol_info.ver > SSD_PROTOCOL_V3) { | |
10993 | reg_base = SSD_PV3_RAM_STATUS_REG0; | |
10994 | reg_sz = SSD_PV3_RAM_STATUS_REG_SZ; | |
10995 | ||
10996 | for (i=0; i<dev->hw_info.nr_ctrl; i++) { | |
10997 | check_ram_status: | |
10998 | val = ssd_reg32_read(dev->ctrlp + reg_base); | |
10999 | ||
11000 | if (!((val >> 1) & 0x1)) { | |
11001 | init_wait++; | |
11002 | if (init_wait <= SSD_RAM_INIT_MAX_WAIT) { | |
11003 | msleep(SSD_INIT_WAIT); | |
11004 | goto check_ram_status; | |
11005 | } else { | |
11006 | hio_warn("%s: controller %d ram init failed: %#x\n", dev->name, i, val); | |
11007 | ssd_gen_swlog(dev, SSD_LOG_DDR_INIT_ERR, i); | |
11008 | return -1; | |
11009 | } | |
11010 | } | |
11011 | ||
11012 | reg_base += reg_sz; | |
11013 | } | |
11014 | } | |
11015 | ||
11016 | /* ch info */ | |
11017 | for (i=0; i<SSD_CH_INFO_MAX_WAIT; i++) { | |
11018 | val = ssd_reg32_read(dev->ctrlp + SSD_CH_INFO_REG); | |
11019 | if (!((val >> 31) & 0x1)) { | |
11020 | break; | |
11021 | } | |
11022 | ||
11023 | msleep(SSD_INIT_WAIT); | |
11024 | } | |
11025 | if ((val >> 31) & 0x1) { | |
11026 | hio_warn("%s: channel info init failed: %#x\n", dev->name, val); | |
11027 | return -1; | |
11028 | } | |
11029 | ||
11030 | return 0; | |
11031 | } | |
11032 | ||
11033 | static int ssd_init_protocol_info(struct ssd_device *dev) | |
11034 | { | |
11035 | uint32_t val; | |
11036 | ||
11037 | val = ssd_reg32_read(dev->ctrlp + SSD_PROTOCOL_VER_REG); | |
11038 | if (val == (uint32_t)-1) { | |
11039 | hio_warn("%s: protocol version error: %#x\n", dev->name, val); | |
11040 | return -EINVAL; | |
11041 | } | |
11042 | dev->protocol_info.ver = val; | |
11043 | ||
11044 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3) { | |
11045 | dev->protocol_info.init_state_reg = SSD_INIT_STATE_REG0; | |
11046 | dev->protocol_info.init_state_reg_sz = SSD_INIT_STATE_REG_SZ; | |
11047 | ||
11048 | dev->protocol_info.chip_info_reg = SSD_CHIP_INFO_REG0; | |
11049 | dev->protocol_info.chip_info_reg_sz = SSD_CHIP_INFO_REG_SZ; | |
11050 | } else { | |
11051 | dev->protocol_info.init_state_reg = SSD_PV3_INIT_STATE_REG0; | |
11052 | dev->protocol_info.init_state_reg_sz = SSD_PV3_INIT_STATE_REG_SZ; | |
11053 | ||
11054 | dev->protocol_info.chip_info_reg = SSD_PV3_CHIP_INFO_REG0; | |
11055 | dev->protocol_info.chip_info_reg_sz = SSD_PV3_CHIP_INFO_REG_SZ; | |
11056 | } | |
11057 | ||
11058 | return 0; | |
11059 | } | |
11060 | ||
11061 | static int ssd_init_hw_info(struct ssd_device *dev) | |
11062 | { | |
11063 | uint64_t val64; | |
11064 | uint32_t val; | |
11065 | uint32_t nr_ctrl; | |
11066 | int ret = 0; | |
11067 | ||
11068 | /* base info */ | |
11069 | val = ssd_reg32_read(dev->ctrlp + SSD_RESP_INFO_REG); | |
11070 | dev->hw_info.resp_ptr_sz = 16 * (1U << (val & 0xFF)); | |
11071 | dev->hw_info.resp_msg_sz = 16 * (1U << ((val >> 8) & 0xFF)); | |
11072 | ||
11073 | if (0 == dev->hw_info.resp_ptr_sz || 0 == dev->hw_info.resp_msg_sz) { | |
11074 | hio_warn("%s: response info error\n", dev->name); | |
11075 | ret = -EINVAL; | |
11076 | goto out; | |
11077 | } | |
11078 | ||
11079 | val = ssd_reg32_read(dev->ctrlp + SSD_BRIDGE_INFO_REG); | |
11080 | dev->hw_info.cmd_fifo_sz = 1U << ((val >> 4) & 0xF); | |
11081 | dev->hw_info.cmd_max_sg = 1U << ((val >> 8) & 0xF); | |
11082 | dev->hw_info.sg_max_sec = 1U << ((val >> 12) & 0xF); | |
11083 | dev->hw_info.cmd_fifo_sz_mask = dev->hw_info.cmd_fifo_sz - 1; | |
11084 | ||
11085 | if (0 == dev->hw_info.cmd_fifo_sz || 0 == dev->hw_info.cmd_max_sg || 0 == dev->hw_info.sg_max_sec) { | |
11086 | hio_warn("%s: cmd info error\n", dev->name); | |
11087 | ret = -EINVAL; | |
11088 | goto out; | |
11089 | } | |
11090 | ||
11091 | /* check hw */ | |
11092 | if (ssd_check_hw_bh(dev)) { | |
11093 | hio_warn("%s: check hardware status failed\n", dev->name); | |
11094 | ret = -EINVAL; | |
11095 | goto out; | |
11096 | } | |
11097 | ||
11098 | if (ssd_check_controller(dev)) { | |
11099 | hio_warn("%s: check controller state failed\n", dev->name); | |
11100 | ret = -EINVAL; | |
11101 | goto out; | |
11102 | } | |
11103 | ||
11104 | /* nr controller : read again*/ | |
11105 | val = ssd_reg32_read(dev->ctrlp + SSD_BRIDGE_INFO_REG); | |
11106 | dev->hw_info.nr_ctrl = (val >> 16) & 0xF; | |
11107 | ||
11108 | /* nr ctrl configured */ | |
11109 | nr_ctrl = (val >> 20) & 0xF; | |
11110 | if (0 == dev->hw_info.nr_ctrl) { | |
11111 | hio_warn("%s: nr controller error: %u\n", dev->name, dev->hw_info.nr_ctrl); | |
11112 | ret = -EINVAL; | |
11113 | goto out; | |
11114 | } else if (0 != nr_ctrl && nr_ctrl != dev->hw_info.nr_ctrl) { | |
11115 | hio_warn("%s: nr controller error: configured %u but found %u\n", dev->name, nr_ctrl, dev->hw_info.nr_ctrl); | |
11116 | if (mode <= SSD_DRV_MODE_STANDARD) { | |
11117 | ret = -EINVAL; | |
11118 | goto out; | |
11119 | } | |
11120 | } | |
11121 | ||
11122 | if (ssd_check_controller_bh(dev)) { | |
11123 | hio_warn("%s: check controller failed\n", dev->name); | |
11124 | ret = -EINVAL; | |
11125 | goto out; | |
11126 | } | |
11127 | ||
11128 | val = ssd_reg32_read(dev->ctrlp + SSD_PCB_VER_REG); | |
11129 | dev->hw_info.pcb_ver = (uint8_t) ((val >> 4) & 0xF) + 'A' -1; | |
11130 | if ((val & 0xF) != 0xF) { | |
11131 | dev->hw_info.upper_pcb_ver = (uint8_t) (val & 0xF) + 'A' -1; | |
11132 | } | |
11133 | ||
11134 | if (dev->hw_info.pcb_ver < 'A' || (0 != dev->hw_info.upper_pcb_ver && dev->hw_info.upper_pcb_ver < 'A')) { | |
11135 | hio_warn("%s: PCB version error: %#x %#x\n", dev->name, dev->hw_info.pcb_ver, dev->hw_info.upper_pcb_ver); | |
11136 | ret = -EINVAL; | |
11137 | goto out; | |
11138 | } | |
11139 | ||
11140 | /* channel info */ | |
11141 | if (mode <= SSD_DRV_MODE_DEBUG) { | |
11142 | val = ssd_reg32_read(dev->ctrlp + SSD_CH_INFO_REG); | |
11143 | dev->hw_info.nr_data_ch = val & 0xFF; | |
11144 | dev->hw_info.nr_ch = dev->hw_info.nr_data_ch + ((val >> 8) & 0xFF); | |
11145 | dev->hw_info.nr_chip = (val >> 16) & 0xFF; | |
11146 | ||
11147 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_2) { | |
11148 | dev->hw_info.max_ch = 1; | |
11149 | while (dev->hw_info.max_ch < dev->hw_info.nr_ch) dev->hw_info.max_ch <<= 1; | |
11150 | } else { | |
11151 | /* set max channel 32 */ | |
11152 | dev->hw_info.max_ch = 32; | |
11153 | } | |
11154 | ||
11155 | if (0 == dev->hw_info.nr_chip) { | |
11156 | //for debug mode | |
11157 | dev->hw_info.nr_chip = 1; | |
11158 | } | |
11159 | ||
11160 | //xx | |
11161 | dev->hw_info.id_size = SSD_NAND_ID_SZ; | |
11162 | dev->hw_info.max_ce = SSD_NAND_MAX_CE; | |
11163 | ||
11164 | if (0 == dev->hw_info.nr_data_ch || 0 == dev->hw_info.nr_ch || 0 == dev->hw_info.nr_chip) { | |
11165 | hio_warn("%s: channel info error: data_ch %u ch %u chip %u\n", dev->name, dev->hw_info.nr_data_ch, dev->hw_info.nr_ch, dev->hw_info.nr_chip); | |
11166 | ret = -EINVAL; | |
11167 | goto out; | |
11168 | } | |
11169 | } | |
11170 | ||
11171 | /* ram info */ | |
11172 | if (mode <= SSD_DRV_MODE_DEBUG) { | |
11173 | val = ssd_reg32_read(dev->ctrlp + SSD_RAM_INFO_REG); | |
11174 | dev->hw_info.ram_size = 0x4000000ull * (1ULL << (val & 0xF)); | |
11175 | dev->hw_info.ram_align = 1U << ((val >> 12) & 0xF); | |
11176 | if (dev->hw_info.ram_align < SSD_RAM_ALIGN) { | |
11177 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3) { | |
11178 | dev->hw_info.ram_align = SSD_RAM_ALIGN; | |
11179 | } else { | |
11180 | hio_warn("%s: ram align error: %u\n", dev->name, dev->hw_info.ram_align); | |
11181 | ret = -EINVAL; | |
11182 | goto out; | |
11183 | } | |
11184 | } | |
11185 | dev->hw_info.ram_max_len = 0x1000 * (1U << ((val >> 16) & 0xF)); | |
11186 | ||
11187 | if (0 == dev->hw_info.ram_size || 0 == dev->hw_info.ram_align || 0 == dev->hw_info.ram_max_len || dev->hw_info.ram_align > dev->hw_info.ram_max_len) { | |
11188 | hio_warn("%s: ram info error\n", dev->name); | |
11189 | ret = -EINVAL; | |
11190 | goto out; | |
11191 | } | |
11192 | ||
11193 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3) { | |
11194 | dev->hw_info.log_sz = SSD_LOG_MAX_SZ; | |
11195 | } else { | |
11196 | val = ssd_reg32_read(dev->ctrlp + SSD_LOG_INFO_REG); | |
11197 | dev->hw_info.log_sz = 0x1000 * (1U << (val & 0xFF)); | |
11198 | } | |
11199 | if (0 == dev->hw_info.log_sz) { | |
11200 | hio_warn("%s: log size error\n", dev->name); | |
11201 | ret = -EINVAL; | |
11202 | goto out; | |
11203 | } | |
11204 | ||
11205 | val = ssd_reg32_read(dev->ctrlp + SSD_BBT_BASE_REG); | |
11206 | dev->hw_info.bbt_base = 0x40000ull * (val & 0xFFFF); | |
11207 | dev->hw_info.bbt_size = 0x40000 * (((val >> 16) & 0xFFFF) + 1) / (dev->hw_info.max_ch * dev->hw_info.nr_chip); | |
11208 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3) { | |
11209 | if (dev->hw_info.bbt_base > dev->hw_info.ram_size || 0 == dev->hw_info.bbt_size) { | |
11210 | hio_warn("%s: bbt info error\n", dev->name); | |
11211 | ret = -EINVAL; | |
11212 | goto out; | |
11213 | } | |
11214 | } | |
11215 | ||
11216 | val = ssd_reg32_read(dev->ctrlp + SSD_ECT_BASE_REG); | |
11217 | dev->hw_info.md_base = 0x40000ull * (val & 0xFFFF); | |
11218 | if (dev->protocol_info.ver <= SSD_PROTOCOL_V3) { | |
11219 | dev->hw_info.md_size = 0x40000 * (((val >> 16) & 0xFFF) + 1) / (dev->hw_info.max_ch * dev->hw_info.nr_chip); | |
11220 | } else { | |
11221 | dev->hw_info.md_size = 0x40000 * (((val >> 16) & 0xFFF) + 1) / (dev->hw_info.nr_chip); | |
11222 | } | |
11223 | dev->hw_info.md_entry_sz = 8 * (1U << ((val >> 28) & 0xF)); | |
11224 | if (dev->protocol_info.ver >= SSD_PROTOCOL_V3) { | |
11225 | if (dev->hw_info.md_base > dev->hw_info.ram_size || 0 == dev->hw_info.md_size || | |
11226 | 0 == dev->hw_info.md_entry_sz || dev->hw_info.md_entry_sz > dev->hw_info.md_size) { | |
11227 | hio_warn("%s: md info error\n", dev->name); | |
11228 | ret = -EINVAL; | |
11229 | goto out; | |
11230 | } | |
11231 | } | |
11232 | ||
11233 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3) { | |
11234 | dev->hw_info.nand_wbuff_base = dev->hw_info.ram_size + 1; | |
11235 | } else { | |
11236 | val = ssd_reg32_read(dev->ctrlp + SSD_NAND_BUFF_BASE); | |
11237 | dev->hw_info.nand_wbuff_base = 0x8000ull * val; | |
11238 | } | |
11239 | } | |
11240 | ||
11241 | /* flash info */ | |
11242 | if (mode <= SSD_DRV_MODE_DEBUG) { | |
11243 | if (dev->hw_info.nr_ctrl > 1) { | |
11244 | val = ssd_reg32_read(dev->ctrlp + SSD_CTRL_VER_REG); | |
11245 | dev->hw_info.ctrl_ver = val & 0xFFF; | |
11246 | hio_info("%s: controller firmware version: %03X\n", dev->name, dev->hw_info.ctrl_ver); | |
11247 | } | |
11248 | ||
11249 | val64 = ssd_reg_read(dev->ctrlp + SSD_FLASH_INFO_REG0); | |
11250 | dev->hw_info.nand_vendor_id = ((val64 >> 56) & 0xFF); | |
11251 | dev->hw_info.nand_dev_id = ((val64 >> 48) & 0xFF); | |
11252 | ||
11253 | dev->hw_info.block_count = (((val64 >> 32) & 0xFFFF) + 1); | |
11254 | dev->hw_info.page_count = ((val64>>16) & 0xFFFF); | |
11255 | dev->hw_info.page_size = (val64 & 0xFFFF); | |
11256 | ||
11257 | val = ssd_reg32_read(dev->ctrlp + SSD_BB_INFO_REG); | |
11258 | dev->hw_info.bbf_pages = val & 0xFF; | |
11259 | dev->hw_info.bbf_seek = (val >> 8) & 0x1; | |
11260 | ||
11261 | if (0 == dev->hw_info.block_count || 0 == dev->hw_info.page_count || 0 == dev->hw_info.page_size || dev->hw_info.block_count > INT_MAX) { | |
11262 | hio_warn("%s: flash info error\n", dev->name); | |
11263 | ret = -EINVAL; | |
11264 | goto out; | |
11265 | } | |
11266 | ||
11267 | //xx | |
11268 | dev->hw_info.oob_size = SSD_NAND_OOB_SZ; //(dev->hw_info.page_size) >> 5; | |
11269 | ||
11270 | val = ssd_reg32_read(dev->ctrlp + SSD_VALID_PAGES_REG); | |
11271 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_2) { | |
11272 | dev->hw_info.valid_pages = val & 0x3FF; | |
11273 | dev->hw_info.max_valid_pages = (val>>20) & 0x3FF; | |
11274 | } else { | |
11275 | dev->hw_info.valid_pages = val & 0x7FFF; | |
11276 | dev->hw_info.max_valid_pages = (val>>15) & 0x7FFF; | |
11277 | } | |
11278 | if (0 == dev->hw_info.valid_pages || 0 == dev->hw_info.max_valid_pages || | |
11279 | dev->hw_info.valid_pages > dev->hw_info.max_valid_pages || dev->hw_info.max_valid_pages > dev->hw_info.page_count) { | |
11280 | hio_warn("%s: valid page info error: valid_pages %d, max_valid_pages %d\n", dev->name, dev->hw_info.valid_pages, dev->hw_info.max_valid_pages); | |
11281 | ret = -EINVAL; | |
11282 | goto out; | |
11283 | } | |
11284 | ||
11285 | val = ssd_reg32_read(dev->ctrlp + SSD_RESERVED_BLKS_REG); | |
11286 | dev->hw_info.reserved_blks = val & 0xFFFF; | |
11287 | dev->hw_info.md_reserved_blks = (val >> 16) & 0xFF; | |
11288 | if (dev->protocol_info.ver <= SSD_PROTOCOL_V3) { | |
11289 | dev->hw_info.md_reserved_blks = SSD_BBT_RESERVED; | |
11290 | } | |
11291 | if (dev->hw_info.reserved_blks > dev->hw_info.block_count || dev->hw_info.md_reserved_blks > dev->hw_info.block_count) { | |
11292 | hio_warn("%s: reserved blocks info error: reserved_blks %d, md_reserved_blks %d\n", dev->name, dev->hw_info.reserved_blks, dev->hw_info.md_reserved_blks); | |
11293 | ret = -EINVAL; | |
11294 | goto out; | |
11295 | } | |
11296 | } | |
11297 | ||
11298 | /* size */ | |
11299 | if (mode < SSD_DRV_MODE_DEBUG) { | |
11300 | dev->hw_info.size = (uint64_t)dev->hw_info.valid_pages * dev->hw_info.page_size; | |
11301 | dev->hw_info.size *= (dev->hw_info.block_count - dev->hw_info.reserved_blks); | |
11302 | dev->hw_info.size *= ((uint64_t)dev->hw_info.nr_data_ch * (uint64_t)dev->hw_info.nr_chip * (uint64_t)dev->hw_info.nr_ctrl); | |
11303 | } | |
11304 | ||
11305 | /* extend hardware info */ | |
11306 | val = ssd_reg32_read(dev->ctrlp + SSD_PCB_VER_REG); | |
11307 | dev->hw_info_ext.board_type = (val >> 24) & 0xF; | |
11308 | ||
11309 | dev->hw_info_ext.form_factor = SSD_FORM_FACTOR_FHHL; | |
11310 | if (dev->protocol_info.ver >= SSD_PROTOCOL_V3_2_1) { | |
11311 | dev->hw_info_ext.form_factor = (val >> 31) & 0x1; | |
11312 | } | |
11313 | /* | |
11314 | dev->hw_info_ext.cap_type = (val >> 28) & 0x3; | |
11315 | if (SSD_BM_CAP_VINA != dev->hw_info_ext.cap_type && SSD_BM_CAP_JH != dev->hw_info_ext.cap_type) { | |
11316 | dev->hw_info_ext.cap_type = SSD_BM_CAP_VINA; | |
11317 | }*/ | |
11318 | ||
11319 | /* power loss protect */ | |
11320 | val = ssd_reg32_read(dev->ctrlp + SSD_PLP_INFO_REG); | |
11321 | dev->hw_info_ext.plp_type = (val & 0x3); | |
11322 | if (dev->protocol_info.ver >= SSD_PROTOCOL_V3_2) { | |
11323 | /* 3 or 4 cap */ | |
11324 | dev->hw_info_ext.cap_type = ((val >> 2)& 0x1); | |
11325 | } | |
11326 | ||
11327 | /* work mode */ | |
11328 | val = ssd_reg32_read(dev->ctrlp + SSD_CH_INFO_REG); | |
11329 | dev->hw_info_ext.work_mode = (val >> 25) & 0x1; | |
11330 | ||
11331 | out: | |
11332 | /* skip error if not in standard mode */ | |
11333 | if (mode != SSD_DRV_MODE_STANDARD) { | |
11334 | ret = 0; | |
11335 | } | |
11336 | return ret; | |
11337 | } | |
11338 | ||
11339 | static void ssd_cleanup_response(struct ssd_device *dev) | |
11340 | { | |
11341 | int resp_msg_sz = dev->hw_info.resp_msg_sz * dev->hw_info.cmd_fifo_sz * SSD_MSIX_VEC; | |
11342 | int resp_ptr_sz = dev->hw_info.resp_ptr_sz * SSD_MSIX_VEC; | |
11343 | ||
11344 | pci_free_consistent(dev->pdev, resp_ptr_sz, dev->resp_ptr_base, dev->resp_ptr_base_dma); | |
11345 | pci_free_consistent(dev->pdev, resp_msg_sz, dev->resp_msg_base, dev->resp_msg_base_dma); | |
11346 | } | |
11347 | ||
11348 | static int ssd_init_response(struct ssd_device *dev) | |
11349 | { | |
11350 | int resp_msg_sz = dev->hw_info.resp_msg_sz * dev->hw_info.cmd_fifo_sz * SSD_MSIX_VEC; | |
11351 | int resp_ptr_sz = dev->hw_info.resp_ptr_sz * SSD_MSIX_VEC; | |
11352 | ||
11353 | dev->resp_msg_base = pci_alloc_consistent(dev->pdev, resp_msg_sz, &(dev->resp_msg_base_dma)); | |
11354 | if (!dev->resp_msg_base) { | |
11355 | hio_warn("%s: unable to allocate resp msg DMA buffer\n", dev->name); | |
11356 | goto out_alloc_resp_msg; | |
11357 | } | |
11358 | memset(dev->resp_msg_base, 0xFF, resp_msg_sz); | |
11359 | ||
11360 | dev->resp_ptr_base = pci_alloc_consistent(dev->pdev, resp_ptr_sz, &(dev->resp_ptr_base_dma)); | |
11361 | if (!dev->resp_ptr_base){ | |
11362 | hio_warn("%s: unable to allocate resp ptr DMA buffer\n", dev->name); | |
11363 | goto out_alloc_resp_ptr; | |
11364 | } | |
11365 | memset(dev->resp_ptr_base, 0, resp_ptr_sz); | |
11366 | dev->resp_idx = *(uint32_t *)(dev->resp_ptr_base) = dev->hw_info.cmd_fifo_sz * 2 - 1; | |
11367 | ||
11368 | ssd_reg_write(dev->ctrlp + SSD_RESP_FIFO_REG, dev->resp_msg_base_dma); | |
11369 | ssd_reg_write(dev->ctrlp + SSD_RESP_PTR_REG, dev->resp_ptr_base_dma); | |
11370 | ||
11371 | return 0; | |
11372 | ||
11373 | out_alloc_resp_ptr: | |
11374 | pci_free_consistent(dev->pdev, resp_msg_sz, dev->resp_msg_base, dev->resp_msg_base_dma); | |
11375 | out_alloc_resp_msg: | |
11376 | return -ENOMEM; | |
11377 | } | |
11378 | ||
11379 | static int ssd_cleanup_cmd(struct ssd_device *dev) | |
11380 | { | |
11381 | int msg_sz = ALIGN(sizeof(struct ssd_rw_msg) + (dev->hw_info.cmd_max_sg - 1) * sizeof(struct ssd_sg_entry), SSD_DMA_ALIGN); | |
11382 | int i; | |
11383 | ||
11384 | for (i=0; i<(int)dev->hw_info.cmd_fifo_sz; i++) { | |
11385 | kfree(dev->cmd[i].sgl); | |
11386 | } | |
11387 | kfree(dev->cmd); | |
11388 | pci_free_consistent(dev->pdev, (msg_sz * dev->hw_info.cmd_fifo_sz), dev->msg_base, dev->msg_base_dma); | |
11389 | return 0; | |
11390 | } | |
11391 | ||
11392 | static int ssd_init_cmd(struct ssd_device *dev) | |
11393 | { | |
11394 | int sgl_sz = sizeof(struct scatterlist) * dev->hw_info.cmd_max_sg; | |
11395 | int cmd_sz = sizeof(struct ssd_cmd) * dev->hw_info.cmd_fifo_sz; | |
11396 | int msg_sz = ALIGN(sizeof(struct ssd_rw_msg) + (dev->hw_info.cmd_max_sg - 1) * sizeof(struct ssd_sg_entry), SSD_DMA_ALIGN); | |
11397 | int i; | |
11398 | ||
11399 | spin_lock_init(&dev->cmd_lock); | |
11400 | ||
11401 | dev->msg_base = pci_alloc_consistent(dev->pdev, (msg_sz * dev->hw_info.cmd_fifo_sz), &dev->msg_base_dma); | |
11402 | if (!dev->msg_base) { | |
11403 | hio_warn("%s: can not alloc cmd msg\n", dev->name); | |
11404 | goto out_alloc_msg; | |
11405 | } | |
11406 | ||
11407 | dev->cmd = kmalloc(cmd_sz, GFP_KERNEL); | |
11408 | if (!dev->cmd) { | |
11409 | hio_warn("%s: can not alloc cmd\n", dev->name); | |
11410 | goto out_alloc_cmd; | |
11411 | } | |
11412 | memset(dev->cmd, 0, cmd_sz); | |
11413 | ||
11414 | for (i=0; i<(int)dev->hw_info.cmd_fifo_sz; i++) { | |
11415 | dev->cmd[i].sgl = kmalloc(sgl_sz, GFP_KERNEL); | |
11416 | if (!dev->cmd[i].sgl) { | |
11417 | hio_warn("%s: can not alloc cmd sgl %d\n", dev->name, i); | |
11418 | goto out_alloc_sgl; | |
11419 | } | |
11420 | ||
11421 | dev->cmd[i].msg = dev->msg_base + (msg_sz * i); | |
11422 | dev->cmd[i].msg_dma = dev->msg_base_dma + ((dma_addr_t)msg_sz * i); | |
11423 | ||
11424 | dev->cmd[i].dev = dev; | |
11425 | dev->cmd[i].tag = i; | |
11426 | dev->cmd[i].flag = 0; | |
11427 | ||
11428 | INIT_LIST_HEAD(&dev->cmd[i].list); | |
11429 | } | |
11430 | ||
11431 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3) { | |
11432 | dev->scmd = ssd_dispatch_cmd; | |
11433 | } else { | |
11434 | ssd_reg_write(dev->ctrlp + SSD_MSG_BASE_REG, dev->msg_base_dma); | |
11435 | if (finject) { | |
11436 | dev->scmd = ssd_send_cmd_db; | |
11437 | } else { | |
11438 | dev->scmd = ssd_send_cmd; | |
11439 | } | |
11440 | } | |
11441 | ||
11442 | return 0; | |
11443 | ||
11444 | out_alloc_sgl: | |
11445 | for (i--; i>=0; i--) { | |
11446 | kfree(dev->cmd[i].sgl); | |
11447 | } | |
11448 | kfree(dev->cmd); | |
11449 | out_alloc_cmd: | |
11450 | pci_free_consistent(dev->pdev, (msg_sz * dev->hw_info.cmd_fifo_sz), dev->msg_base, dev->msg_base_dma); | |
11451 | out_alloc_msg: | |
11452 | return -ENOMEM; | |
11453 | } | |
11454 | ||
11455 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,30)) | |
11456 | static irqreturn_t ssd_interrupt_check(int irq, void *dev_id) | |
11457 | { | |
11458 | struct ssd_queue *queue = (struct ssd_queue *)dev_id; | |
11459 | ||
11460 | if (*(uint32_t *)queue->resp_ptr == queue->resp_idx) { | |
11461 | return IRQ_NONE; | |
11462 | } | |
11463 | ||
11464 | return IRQ_WAKE_THREAD; | |
11465 | } | |
11466 | ||
11467 | static irqreturn_t ssd_interrupt_threaded(int irq, void *dev_id) | |
11468 | { | |
11469 | struct ssd_queue *queue = (struct ssd_queue *)dev_id; | |
11470 | struct ssd_device *dev = (struct ssd_device *)queue->dev; | |
11471 | struct ssd_cmd *cmd; | |
11472 | union ssd_response_msq __msg; | |
11473 | union ssd_response_msq *msg = &__msg; | |
11474 | uint64_t *u64_msg; | |
11475 | uint32_t resp_idx = queue->resp_idx; | |
11476 | uint32_t new_resp_idx = *(uint32_t *)queue->resp_ptr; | |
11477 | uint32_t end_resp_idx; | |
11478 | ||
11479 | if (unlikely(resp_idx == new_resp_idx)) { | |
11480 | return IRQ_NONE; | |
11481 | } | |
11482 | ||
11483 | end_resp_idx = new_resp_idx & queue->resp_idx_mask; | |
11484 | ||
11485 | do { | |
11486 | resp_idx = (resp_idx + 1) & queue->resp_idx_mask; | |
11487 | ||
11488 | /* the resp msg */ | |
11489 | u64_msg = (uint64_t *)(queue->resp_msg + queue->resp_msg_sz * resp_idx); | |
11490 | msg->u64_msg = *u64_msg; | |
11491 | ||
11492 | if (unlikely(msg->u64_msg == (uint64_t)(-1))) { | |
11493 | hio_err("%s: empty resp msg: queue %d idx %u\n", dev->name, queue->idx, resp_idx); | |
11494 | continue; | |
11495 | } | |
11496 | /* clear the resp msg */ | |
11497 | *u64_msg = (uint64_t)(-1); | |
11498 | ||
11499 | cmd = &queue->cmd[msg->resp_msg.tag]; | |
11500 | /*if (unlikely(!cmd->bio)) { | |
11501 | printk(KERN_WARNING "%s: unknown tag %d fun %#x\n", | |
11502 | dev->name, msg->resp_msg.tag, msg->resp_msg.fun); | |
11503 | continue; | |
11504 | }*/ | |
11505 | ||
11506 | if(unlikely(msg->resp_msg.status & (uint32_t)status_mask)) { | |
11507 | cmd->errors = -EIO; | |
11508 | } else { | |
11509 | cmd->errors = 0; | |
11510 | } | |
11511 | cmd->nr_log = msg->log_resp_msg.nr_log; | |
11512 | ||
11513 | ssd_done(cmd); | |
11514 | ||
11515 | if (unlikely(msg->resp_msg.fun != SSD_FUNC_READ_LOG && msg->resp_msg.log > 0)) { | |
11516 | (void)test_and_set_bit(SSD_LOG_HW, &dev->state); | |
11517 | if (test_bit(SSD_INIT_WORKQ, &dev->state)) { | |
11518 | queue_work(dev->workq, &dev->log_work); | |
11519 | } | |
11520 | } | |
11521 | ||
11522 | if (unlikely(msg->resp_msg.status)) { | |
11523 | if (msg->resp_msg.fun == SSD_FUNC_READ || msg->resp_msg.fun == SSD_FUNC_WRITE) { | |
11524 | hio_err("%s: I/O error %d: tag %d fun %#x\n", | |
11525 | dev->name, msg->resp_msg.status, msg->resp_msg.tag, msg->resp_msg.fun); | |
11526 | ||
11527 | /* alarm led */ | |
11528 | ssd_set_alarm(dev); | |
11529 | queue->io_stat.nr_rwerr++; | |
11530 | ssd_gen_swlog(dev, SSD_LOG_EIO, msg->u32_msg[0]); | |
11531 | } else { | |
11532 | hio_info("%s: CMD error %d: tag %d fun %#x\n", | |
11533 | dev->name, msg->resp_msg.status, msg->resp_msg.tag, msg->resp_msg.fun); | |
11534 | ||
11535 | ssd_gen_swlog(dev, SSD_LOG_ECMD, msg->u32_msg[0]); | |
11536 | } | |
11537 | queue->io_stat.nr_ioerr++; | |
11538 | } | |
11539 | ||
11540 | if (msg->resp_msg.fun == SSD_FUNC_READ || | |
11541 | msg->resp_msg.fun == SSD_FUNC_NAND_READ_WOOB || | |
11542 | msg->resp_msg.fun == SSD_FUNC_NAND_READ) { | |
11543 | ||
11544 | queue->ecc_info.bitflip[msg->resp_msg.bitflip]++; | |
11545 | } | |
11546 | }while (resp_idx != end_resp_idx); | |
11547 | ||
11548 | queue->resp_idx = new_resp_idx; | |
11549 | ||
11550 | return IRQ_HANDLED; | |
11551 | } | |
11552 | #endif | |
11553 | ||
11554 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)) | |
11555 | static irqreturn_t ssd_interrupt(int irq, void *dev_id, struct pt_regs *regs) | |
11556 | #else | |
11557 | static irqreturn_t ssd_interrupt(int irq, void *dev_id) | |
11558 | #endif | |
11559 | { | |
11560 | struct ssd_queue *queue = (struct ssd_queue *)dev_id; | |
11561 | struct ssd_device *dev = (struct ssd_device *)queue->dev; | |
11562 | struct ssd_cmd *cmd; | |
11563 | union ssd_response_msq __msg; | |
11564 | union ssd_response_msq *msg = &__msg; | |
11565 | uint64_t *u64_msg; | |
11566 | uint32_t resp_idx = queue->resp_idx; | |
11567 | uint32_t new_resp_idx = *(uint32_t *)queue->resp_ptr; | |
11568 | uint32_t end_resp_idx; | |
11569 | ||
11570 | if (unlikely(resp_idx == new_resp_idx)) { | |
11571 | return IRQ_NONE; | |
11572 | } | |
11573 | ||
11574 | #if (defined SSD_ESCAPE_IRQ) | |
11575 | if (SSD_INT_MSIX != dev->int_mode) { | |
11576 | dev->irq_cpu = smp_processor_id(); | |
11577 | } | |
11578 | #endif | |
11579 | ||
11580 | end_resp_idx = new_resp_idx & queue->resp_idx_mask; | |
11581 | ||
11582 | do { | |
11583 | resp_idx = (resp_idx + 1) & queue->resp_idx_mask; | |
11584 | ||
11585 | /* the resp msg */ | |
11586 | u64_msg = (uint64_t *)(queue->resp_msg + queue->resp_msg_sz * resp_idx); | |
11587 | msg->u64_msg = *u64_msg; | |
11588 | ||
11589 | if (unlikely(msg->u64_msg == (uint64_t)(-1))) { | |
11590 | hio_err("%s: empty resp msg: queue %d idx %u\n", dev->name, queue->idx, resp_idx); | |
11591 | continue; | |
11592 | } | |
11593 | /* clear the resp msg */ | |
11594 | *u64_msg = (uint64_t)(-1); | |
11595 | ||
11596 | cmd = &queue->cmd[msg->resp_msg.tag]; | |
11597 | /*if (unlikely(!cmd->bio)) { | |
11598 | printk(KERN_WARNING "%s: unknown tag %d fun %#x\n", | |
11599 | dev->name, msg->resp_msg.tag, msg->resp_msg.fun); | |
11600 | continue; | |
11601 | }*/ | |
11602 | ||
11603 | if(unlikely(msg->resp_msg.status & (uint32_t)status_mask)) { | |
11604 | cmd->errors = -EIO; | |
11605 | } else { | |
11606 | cmd->errors = 0; | |
11607 | } | |
11608 | cmd->nr_log = msg->log_resp_msg.nr_log; | |
11609 | ||
11610 | ssd_done_bh(cmd); | |
11611 | ||
11612 | if (unlikely(msg->resp_msg.fun != SSD_FUNC_READ_LOG && msg->resp_msg.log > 0)) { | |
11613 | (void)test_and_set_bit(SSD_LOG_HW, &dev->state); | |
11614 | if (test_bit(SSD_INIT_WORKQ, &dev->state)) { | |
11615 | queue_work(dev->workq, &dev->log_work); | |
11616 | } | |
11617 | } | |
11618 | ||
11619 | if (unlikely(msg->resp_msg.status)) { | |
11620 | if (msg->resp_msg.fun == SSD_FUNC_READ || msg->resp_msg.fun == SSD_FUNC_WRITE) { | |
11621 | hio_err("%s: I/O error %d: tag %d fun %#x\n", | |
11622 | dev->name, msg->resp_msg.status, msg->resp_msg.tag, msg->resp_msg.fun); | |
11623 | ||
11624 | /* alarm led */ | |
11625 | ssd_set_alarm(dev); | |
11626 | queue->io_stat.nr_rwerr++; | |
11627 | ssd_gen_swlog(dev, SSD_LOG_EIO, msg->u32_msg[0]); | |
11628 | } else { | |
11629 | hio_info("%s: CMD error %d: tag %d fun %#x\n", | |
11630 | dev->name, msg->resp_msg.status, msg->resp_msg.tag, msg->resp_msg.fun); | |
11631 | ||
11632 | ssd_gen_swlog(dev, SSD_LOG_ECMD, msg->u32_msg[0]); | |
11633 | } | |
11634 | queue->io_stat.nr_ioerr++; | |
11635 | } | |
11636 | ||
11637 | if (msg->resp_msg.fun == SSD_FUNC_READ || | |
11638 | msg->resp_msg.fun == SSD_FUNC_NAND_READ_WOOB || | |
11639 | msg->resp_msg.fun == SSD_FUNC_NAND_READ) { | |
11640 | ||
11641 | queue->ecc_info.bitflip[msg->resp_msg.bitflip]++; | |
11642 | } | |
11643 | }while (resp_idx != end_resp_idx); | |
11644 | ||
11645 | queue->resp_idx = new_resp_idx; | |
11646 | ||
11647 | return IRQ_HANDLED; | |
11648 | } | |
11649 | ||
11650 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)) | |
11651 | static irqreturn_t ssd_interrupt_legacy(int irq, void *dev_id, struct pt_regs *regs) | |
11652 | #else | |
11653 | static irqreturn_t ssd_interrupt_legacy(int irq, void *dev_id) | |
11654 | #endif | |
11655 | { | |
11656 | irqreturn_t ret; | |
11657 | struct ssd_queue *queue = (struct ssd_queue *)dev_id; | |
11658 | struct ssd_device *dev = (struct ssd_device *)queue->dev; | |
11659 | ||
11660 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)) | |
11661 | ret = ssd_interrupt(irq, dev_id, regs); | |
11662 | #else | |
11663 | ret = ssd_interrupt(irq, dev_id); | |
11664 | #endif | |
11665 | ||
11666 | /* clear intr */ | |
11667 | if (IRQ_HANDLED == ret) { | |
11668 | ssd_reg32_write(dev->ctrlp + SSD_CLEAR_INTR_REG, 1); | |
11669 | } | |
11670 | ||
11671 | return ret; | |
11672 | } | |
11673 | ||
11674 | static void ssd_reset_resp_ptr(struct ssd_device *dev) | |
11675 | { | |
11676 | int i; | |
11677 | ||
11678 | for (i=0; i<dev->nr_queue; i++) { | |
11679 | *(uint32_t *)dev->queue[i].resp_ptr = dev->queue[i].resp_idx = (dev->hw_info.cmd_fifo_sz * 2) - 1; | |
11680 | } | |
11681 | } | |
11682 | ||
11683 | static void ssd_free_irq(struct ssd_device *dev) | |
11684 | { | |
11685 | int i; | |
11686 | ||
b44043bd | 11687 | #if ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) || (defined RHEL_MAJOR && RHEL_MAJOR == 6)) && (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0)) |
361ebed5 HSDT |
11688 | if (SSD_INT_MSIX == dev->int_mode) { |
11689 | for (i=0; i<dev->nr_queue; i++) { | |
11690 | irq_set_affinity_hint(dev->entry[i].vector, NULL); | |
11691 | } | |
11692 | } | |
11693 | #endif | |
11694 | ||
11695 | for (i=0; i<dev->nr_queue; i++) { | |
b44043bd | 11696 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0)) |
361ebed5 | 11697 | free_irq(dev->entry[i].vector, &dev->queue[i]); |
b44043bd SF |
11698 | #else |
11699 | free_irq(pci_irq_vector(dev->pdev, i), &dev->queue[i]); | |
11700 | #endif | |
361ebed5 HSDT |
11701 | } |
11702 | ||
11703 | if (SSD_INT_MSIX == dev->int_mode) { | |
11704 | pci_disable_msix(dev->pdev); | |
11705 | } else if (SSD_INT_MSI == dev->int_mode) { | |
11706 | pci_disable_msi(dev->pdev); | |
11707 | } | |
11708 | ||
11709 | } | |
11710 | ||
11711 | static int ssd_init_irq(struct ssd_device *dev) | |
11712 | { | |
b44043bd | 11713 | #if (!defined MODULE) && (defined SSD_MSIX_AFFINITY_FORCE) && (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0)) |
1197134c | 11714 | const struct cpumask *cpu_mask = NULL; |
361ebed5 HSDT |
11715 | static int cpu_affinity = 0; |
11716 | #endif | |
b44043bd | 11717 | #if ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) || (defined RHEL_MAJOR && RHEL_MAJOR == 6)) && (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0)) |
13cfa002 | 11718 | const struct cpumask *mask = NULL; |
361ebed5 HSDT |
11719 | static int cpu = 0; |
11720 | int j; | |
11721 | #endif | |
11722 | int i; | |
11723 | unsigned long flags = 0; | |
11724 | int ret = 0; | |
11725 | ||
11726 | ssd_reg32_write(dev->ctrlp + SSD_INTR_INTERVAL_REG, 0x800); | |
11727 | ||
11728 | #ifdef SSD_ESCAPE_IRQ | |
11729 | dev->irq_cpu = -1; | |
11730 | #endif | |
11731 | ||
b44043bd | 11732 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0)) |
361ebed5 HSDT |
11733 | if (int_mode >= SSD_INT_MSIX && pci_find_capability(dev->pdev, PCI_CAP_ID_MSIX)) { |
11734 | dev->nr_queue = SSD_MSIX_VEC; | |
b44043bd | 11735 | |
361ebed5 HSDT |
11736 | for (i=0; i<dev->nr_queue; i++) { |
11737 | dev->entry[i].entry = i; | |
11738 | } | |
11739 | for (;;) { | |
11740 | ret = pci_enable_msix(dev->pdev, dev->entry, dev->nr_queue); | |
11741 | if (ret == 0) { | |
11742 | break; | |
11743 | } else if (ret > 0) { | |
11744 | dev->nr_queue = ret; | |
11745 | } else { | |
11746 | hio_warn("%s: can not enable msix\n", dev->name); | |
11747 | /* alarm led */ | |
11748 | ssd_set_alarm(dev); | |
11749 | goto out; | |
11750 | } | |
11751 | } | |
11752 | ||
11753 | #if ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) || (defined RHEL_MAJOR && RHEL_MAJOR == 6)) | |
11754 | mask = (dev_to_node(&dev->pdev->dev) == -1) ? cpu_online_mask : cpumask_of_node(dev_to_node(&dev->pdev->dev)); | |
11755 | if ((0 == cpu) || (!cpumask_intersects(mask, cpumask_of(cpu)))) { | |
11756 | cpu = cpumask_first(mask); | |
11757 | } | |
11758 | for (i=0; i<dev->nr_queue; i++) { | |
11759 | irq_set_affinity_hint(dev->entry[i].vector, cpumask_of(cpu)); | |
11760 | cpu = cpumask_next(cpu, mask); | |
11761 | if (cpu >= nr_cpu_ids) { | |
11762 | cpu = cpumask_first(mask); | |
11763 | } | |
11764 | } | |
11765 | #endif | |
11766 | ||
11767 | dev->int_mode = SSD_INT_MSIX; | |
11768 | } else if (int_mode >= SSD_INT_MSI && pci_find_capability(dev->pdev, PCI_CAP_ID_MSI)) { | |
11769 | ret = pci_enable_msi(dev->pdev); | |
11770 | if (ret) { | |
11771 | hio_warn("%s: can not enable msi\n", dev->name); | |
11772 | /* alarm led */ | |
11773 | ssd_set_alarm(dev); | |
11774 | goto out; | |
11775 | } | |
11776 | ||
11777 | dev->nr_queue = 1; | |
11778 | dev->entry[0].vector = dev->pdev->irq; | |
11779 | ||
11780 | dev->int_mode = SSD_INT_MSI; | |
11781 | } else { | |
11782 | dev->nr_queue = 1; | |
11783 | dev->entry[0].vector = dev->pdev->irq; | |
11784 | ||
11785 | dev->int_mode = SSD_INT_LEGACY; | |
11786 | } | |
b44043bd SF |
11787 | #else |
11788 | if (int_mode >= SSD_INT_MSIX && pci_find_capability(dev->pdev, PCI_CAP_ID_MSIX)) { | |
11789 | dev->nr_queue = SSD_MSIX_VEC; | |
11790 | ||
11791 | dev->nr_queue = pci_alloc_irq_vectors(dev->pdev, 1, dev->nr_queue, PCI_IRQ_MSIX | PCI_IRQ_AFFINITY); | |
11792 | if (dev->nr_queue <= 0) { | |
11793 | ret = -EIO; | |
11794 | hio_warn("%s: can not enable msix\n", dev->name); | |
11795 | ssd_set_alarm(dev); | |
11796 | goto out; | |
11797 | } | |
11798 | ||
11799 | dev->int_mode = SSD_INT_MSIX; | |
11800 | } else if (int_mode >= SSD_INT_MSI && pci_find_capability(dev->pdev, PCI_CAP_ID_MSI)) { | |
11801 | ||
11802 | ret = pci_alloc_irq_vectors(dev->pdev, 1, 1, PCI_IRQ_MSI | PCI_IRQ_AFFINITY); | |
11803 | if (ret <= 0) { | |
11804 | ret = -EIO; | |
11805 | hio_warn("%s: can not enable msi\n", dev->name); | |
11806 | /* alarm led */ | |
11807 | ssd_set_alarm(dev); | |
11808 | goto out; | |
11809 | } | |
11810 | dev->nr_queue = 1; | |
11811 | ||
11812 | dev->int_mode = SSD_INT_MSI; | |
11813 | } else { | |
11814 | ret = pci_alloc_irq_vectors(dev->pdev, 1, 1, PCI_IRQ_LEGACY); | |
11815 | ||
11816 | if (ret <= 0) { | |
11817 | ret = -EIO; | |
11818 | hio_warn("%s: can not enable msi\n", dev->name); | |
11819 | /* alarm led */ | |
11820 | ssd_set_alarm(dev); | |
11821 | goto out; | |
11822 | } | |
11823 | dev->nr_queue = 1; | |
11824 | ||
11825 | dev->int_mode = SSD_INT_LEGACY; | |
11826 | } | |
11827 | #endif | |
361ebed5 HSDT |
11828 | |
11829 | for (i=0; i<dev->nr_queue; i++) { | |
11830 | if (dev->nr_queue > 1) { | |
11831 | snprintf(dev->queue[i].name, SSD_QUEUE_NAME_LEN, "%s_e100-%d", dev->name, i); | |
11832 | } else { | |
11833 | snprintf(dev->queue[i].name, SSD_QUEUE_NAME_LEN, "%s_e100", dev->name); | |
11834 | } | |
11835 | ||
11836 | dev->queue[i].dev = dev; | |
11837 | dev->queue[i].idx = i; | |
11838 | ||
11839 | dev->queue[i].resp_idx = (dev->hw_info.cmd_fifo_sz * 2) - 1; | |
11840 | dev->queue[i].resp_idx_mask = dev->hw_info.cmd_fifo_sz - 1; | |
11841 | ||
11842 | dev->queue[i].resp_msg_sz = dev->hw_info.resp_msg_sz; | |
11843 | dev->queue[i].resp_msg = dev->resp_msg_base + dev->hw_info.resp_msg_sz * dev->hw_info.cmd_fifo_sz * i; | |
11844 | dev->queue[i].resp_ptr = dev->resp_ptr_base + dev->hw_info.resp_ptr_sz * i; | |
11845 | *(uint32_t *)dev->queue[i].resp_ptr = dev->queue[i].resp_idx; | |
11846 | ||
11847 | dev->queue[i].cmd = dev->cmd; | |
11848 | } | |
11849 | ||
11850 | #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)) | |
11851 | flags = IRQF_SHARED; | |
11852 | #else | |
11853 | flags = SA_SHIRQ; | |
11854 | #endif | |
11855 | ||
11856 | for (i=0; i<dev->nr_queue; i++) { | |
b44043bd SF |
11857 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30)) |
11858 | if (dev->int_mode == SSD_INT_LEGACY) { | |
11859 | ret = request_irq(dev->entry[i].vector, &ssd_interrupt_legacy, flags, dev->queue[i].name, &dev->queue[i]); | |
11860 | } else { | |
11861 | ret = request_irq(dev->entry[i].vector, &ssd_interrupt, flags, dev->queue[i].name, &dev->queue[i]); | |
11862 | } | |
11863 | #elif (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0)) | |
361ebed5 HSDT |
11864 | if (threaded_irq) { |
11865 | ret = request_threaded_irq(dev->entry[i].vector, ssd_interrupt_check, ssd_interrupt_threaded, flags, dev->queue[i].name, &dev->queue[i]); | |
11866 | } else if (dev->int_mode == SSD_INT_LEGACY) { | |
11867 | ret = request_irq(dev->entry[i].vector, &ssd_interrupt_legacy, flags, dev->queue[i].name, &dev->queue[i]); | |
11868 | } else { | |
11869 | ret = request_irq(dev->entry[i].vector, &ssd_interrupt, flags, dev->queue[i].name, &dev->queue[i]); | |
11870 | } | |
11871 | #else | |
b44043bd SF |
11872 | if (threaded_irq) { |
11873 | ret = request_threaded_irq(pci_irq_vector(dev->pdev, i), ssd_interrupt_check, ssd_interrupt_threaded, flags, dev->queue[i].name, &dev->queue[i]); | |
11874 | } else if (dev->int_mode == SSD_INT_LEGACY) { | |
11875 | ret = request_irq(pci_irq_vector(dev->pdev, i), &ssd_interrupt_legacy, flags, dev->queue[i].name, &dev->queue[i]); | |
361ebed5 | 11876 | } else { |
b44043bd | 11877 | ret = request_irq(pci_irq_vector(dev->pdev, i), &ssd_interrupt, flags, dev->queue[i].name, &dev->queue[i]); |
361ebed5 HSDT |
11878 | } |
11879 | #endif | |
11880 | if (ret) { | |
11881 | hio_warn("%s: request irq failed\n", dev->name); | |
11882 | /* alarm led */ | |
11883 | ssd_set_alarm(dev); | |
11884 | goto out_request_irq; | |
11885 | } | |
11886 | ||
b44043bd | 11887 | #if (!defined MODULE) && (defined SSD_MSIX_AFFINITY_FORCE) && (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0)) |
361ebed5 HSDT |
11888 | cpu_mask = (dev_to_node(&dev->pdev->dev) == -1) ? cpu_online_mask : cpumask_of_node(dev_to_node(&dev->pdev->dev)); |
11889 | if (SSD_INT_MSIX == dev->int_mode) { | |
11890 | if ((0 == cpu_affinity) || (!cpumask_intersects(mask, cpumask_of(cpu_affinity)))) { | |
11891 | cpu_affinity = cpumask_first(cpu_mask); | |
11892 | } | |
11893 | ||
11894 | irq_set_affinity(dev->entry[i].vector, cpumask_of(cpu_affinity)); | |
11895 | cpu_affinity = cpumask_next(cpu_affinity, cpu_mask); | |
11896 | if (cpu_affinity >= nr_cpu_ids) { | |
11897 | cpu_affinity = cpumask_first(cpu_mask); | |
11898 | } | |
11899 | } | |
11900 | #endif | |
11901 | } | |
11902 | ||
11903 | return ret; | |
11904 | ||
11905 | out_request_irq: | |
b44043bd | 11906 | #if ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) || (defined RHEL_MAJOR && RHEL_MAJOR == 6)) && (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0)) |
361ebed5 HSDT |
11907 | if (SSD_INT_MSIX == dev->int_mode) { |
11908 | for (j=0; j<dev->nr_queue; j++) { | |
11909 | irq_set_affinity_hint(dev->entry[j].vector, NULL); | |
11910 | } | |
11911 | } | |
11912 | #endif | |
11913 | ||
11914 | for (i--; i>=0; i--) { | |
b44043bd | 11915 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0)) |
361ebed5 | 11916 | free_irq(dev->entry[i].vector, &dev->queue[i]); |
b44043bd SF |
11917 | #else |
11918 | free_irq(pci_irq_vector(dev->pdev, i), &dev->queue[i]); | |
11919 | #endif | |
361ebed5 HSDT |
11920 | } |
11921 | ||
11922 | if (SSD_INT_MSIX == dev->int_mode) { | |
11923 | pci_disable_msix(dev->pdev); | |
11924 | } else if (SSD_INT_MSI == dev->int_mode) { | |
11925 | pci_disable_msi(dev->pdev); | |
11926 | } | |
11927 | ||
11928 | out: | |
11929 | return ret; | |
11930 | } | |
11931 | ||
11932 | static void ssd_initial_log(struct ssd_device *dev) | |
11933 | { | |
11934 | uint32_t val; | |
11935 | uint32_t speed, width; | |
11936 | ||
11937 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_2) { | |
11938 | return; | |
11939 | } | |
11940 | ||
11941 | val = ssd_reg32_read(dev->ctrlp + SSD_POWER_ON_REG); | |
11942 | if (val) { | |
da3355df SF |
11943 | // Poweron detection switched to SSD_INTR_INTERVAL_REG in 'ssd_init_smart' |
11944 | //ssd_gen_swlog(dev, SSD_LOG_POWER_ON, dev->hw_info.bridge_ver); | |
361ebed5 HSDT |
11945 | } |
11946 | ||
11947 | val = ssd_reg32_read(dev->ctrlp + SSD_PCIE_LINKSTATUS_REG); | |
11948 | speed = val & 0xF; | |
11949 | width = (val >> 4)& 0x3F; | |
11950 | if (0x1 == speed) { | |
11951 | hio_info("%s: PCIe: 2.5GT/s, x%u\n", dev->name, width); | |
11952 | } else if (0x2 == speed) { | |
11953 | hio_info("%s: PCIe: 5GT/s, x%u\n", dev->name, width); | |
11954 | } else { | |
11955 | hio_info("%s: PCIe: unknown GT/s, x%u\n", dev->name, width); | |
11956 | } | |
11957 | ssd_gen_swlog(dev, SSD_LOG_PCIE_LINK_STATUS, val); | |
11958 | ||
11959 | return; | |
11960 | } | |
11961 | ||
11962 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)) | |
11963 | static void ssd_hwmon_worker(void *data) | |
11964 | { | |
11965 | struct ssd_device *dev = (struct ssd_device *)data; | |
11966 | #else | |
11967 | static void ssd_hwmon_worker(struct work_struct *work) | |
11968 | { | |
11969 | struct ssd_device *dev = container_of(work, struct ssd_device, hwmon_work); | |
11970 | #endif | |
11971 | ||
11972 | if (ssd_check_hw(dev)) { | |
11973 | //hio_err("%s: check hardware failed\n", dev->name); | |
11974 | return; | |
11975 | } | |
11976 | ||
11977 | ssd_check_clock(dev); | |
11978 | ssd_check_volt(dev); | |
11979 | ||
11980 | ssd_mon_boardvolt(dev); | |
11981 | } | |
11982 | ||
11983 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)) | |
11984 | static void ssd_tempmon_worker(void *data) | |
11985 | { | |
11986 | struct ssd_device *dev = (struct ssd_device *)data; | |
11987 | #else | |
11988 | static void ssd_tempmon_worker(struct work_struct *work) | |
11989 | { | |
11990 | struct ssd_device *dev = container_of(work, struct ssd_device, tempmon_work); | |
11991 | #endif | |
11992 | ||
11993 | if (ssd_check_hw(dev)) { | |
11994 | //hio_err("%s: check hardware failed\n", dev->name); | |
11995 | return; | |
11996 | } | |
11997 | ||
11998 | ssd_mon_temp(dev); | |
11999 | } | |
12000 | ||
12001 | ||
12002 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)) | |
12003 | static void ssd_capmon_worker(void *data) | |
12004 | { | |
12005 | struct ssd_device *dev = (struct ssd_device *)data; | |
12006 | #else | |
12007 | static void ssd_capmon_worker(struct work_struct *work) | |
12008 | { | |
12009 | struct ssd_device *dev = container_of(work, struct ssd_device, capmon_work); | |
12010 | #endif | |
12011 | uint32_t cap = 0; | |
12012 | uint32_t cap_threshold = SSD_PL_CAP_THRESHOLD; | |
12013 | int ret = 0; | |
12014 | ||
12015 | if (dev->protocol_info.ver < SSD_PROTOCOL_V3_2) { | |
12016 | return; | |
12017 | } | |
12018 | ||
12019 | if (dev->hw_info_ext.form_factor == SSD_FORM_FACTOR_FHHL && dev->hw_info.pcb_ver < 'B') { | |
12020 | return; | |
12021 | } | |
12022 | ||
12023 | /* fault before? */ | |
12024 | if (test_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP), &dev->hwmon)) { | |
12025 | ret = ssd_check_pl_cap_fast(dev); | |
12026 | if (ret) { | |
12027 | return; | |
12028 | } | |
12029 | } | |
12030 | ||
12031 | /* learn */ | |
12032 | ret = ssd_do_cap_learn(dev, &cap); | |
12033 | if (ret) { | |
12034 | hio_err("%s: cap learn failed\n", dev->name); | |
12035 | ssd_gen_swlog(dev, SSD_LOG_CAP_LEARN_FAULT, 0); | |
12036 | return; | |
12037 | } | |
12038 | ||
12039 | ssd_gen_swlog(dev, SSD_LOG_CAP_STATUS, cap); | |
12040 | ||
12041 | if (SSD_PL_CAP_CP == dev->hw_info_ext.cap_type) { | |
12042 | cap_threshold = SSD_PL_CAP_CP_THRESHOLD; | |
12043 | } | |
12044 | ||
12045 | //use the fw event id? | |
12046 | if (cap < cap_threshold) { | |
12047 | if (!test_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP), &dev->hwmon)) { | |
12048 | ssd_gen_swlog(dev, SSD_LOG_BATTERY_FAULT, 0); | |
12049 | } | |
12050 | } else if (cap >= (cap_threshold + SSD_PL_CAP_THRESHOLD_HYST)) { | |
12051 | if (test_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP), &dev->hwmon)) { | |
12052 | ssd_gen_swlog(dev, SSD_LOG_BATTERY_OK, 0); | |
12053 | } | |
12054 | } | |
12055 | } | |
12056 | ||
7e9f9829 | 12057 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(4,15,0)) |
361ebed5 | 12058 | static void ssd_routine_start(void *data) |
7e9f9829 SF |
12059 | #else |
12060 | static void ssd_routine_start(struct timer_list *t) | |
12061 | #endif | |
361ebed5 HSDT |
12062 | { |
12063 | struct ssd_device *dev; | |
12064 | ||
7e9f9829 | 12065 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(4,15,0)) |
361ebed5 HSDT |
12066 | if (!data) { |
12067 | return; | |
12068 | } | |
12069 | dev = data; | |
7e9f9829 SF |
12070 | #else |
12071 | dev = from_timer(dev, t, routine_timer); | |
12072 | #endif | |
361ebed5 HSDT |
12073 | |
12074 | dev->routine_tick++; | |
12075 | ||
12076 | if (test_bit(SSD_INIT_WORKQ, &dev->state) && !ssd_busy(dev)) { | |
12077 | (void)test_and_set_bit(SSD_LOG_HW, &dev->state); | |
12078 | queue_work(dev->workq, &dev->log_work); | |
12079 | } | |
12080 | ||
12081 | if ((dev->routine_tick % SSD_HWMON_ROUTINE_TICK) == 0 && test_bit(SSD_INIT_WORKQ, &dev->state)) { | |
12082 | queue_work(dev->workq, &dev->hwmon_work); | |
12083 | } | |
12084 | ||
12085 | if ((dev->routine_tick % SSD_CAPMON_ROUTINE_TICK) == 0 && test_bit(SSD_INIT_WORKQ, &dev->state)) { | |
12086 | queue_work(dev->workq, &dev->capmon_work); | |
12087 | } | |
12088 | ||
12089 | if ((dev->routine_tick % SSD_CAPMON2_ROUTINE_TICK) == 0 && test_bit(SSD_HWMON_PL_CAP(SSD_PL_CAP), &dev->hwmon) && test_bit(SSD_INIT_WORKQ, &dev->state)) { | |
12090 | /* CAP fault? check again */ | |
12091 | queue_work(dev->workq, &dev->capmon_work); | |
12092 | } | |
12093 | ||
12094 | if (test_bit(SSD_INIT_WORKQ, &dev->state)) { | |
12095 | queue_work(dev->workq, &dev->tempmon_work); | |
12096 | } | |
12097 | ||
12098 | /* schedule routine */ | |
12099 | mod_timer(&dev->routine_timer, jiffies + msecs_to_jiffies(SSD_ROUTINE_INTERVAL)); | |
12100 | } | |
12101 | ||
12102 | static void ssd_cleanup_routine(struct ssd_device *dev) | |
12103 | { | |
12104 | if (unlikely(mode != SSD_DRV_MODE_STANDARD)) | |
12105 | return; | |
12106 | ||
12107 | (void)ssd_del_timer(&dev->routine_timer); | |
12108 | ||
12109 | (void)ssd_del_timer(&dev->bm_timer); | |
12110 | } | |
12111 | ||
12112 | static int ssd_init_routine(struct ssd_device *dev) | |
12113 | { | |
12114 | if (unlikely(mode != SSD_DRV_MODE_STANDARD)) | |
12115 | return 0; | |
12116 | ||
12117 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)) | |
12118 | INIT_WORK(&dev->bm_work, ssd_bm_worker, dev); | |
12119 | INIT_WORK(&dev->hwmon_work, ssd_hwmon_worker, dev); | |
12120 | INIT_WORK(&dev->capmon_work, ssd_capmon_worker, dev); | |
12121 | INIT_WORK(&dev->tempmon_work, ssd_tempmon_worker, dev); | |
12122 | #else | |
12123 | INIT_WORK(&dev->bm_work, ssd_bm_worker); | |
12124 | INIT_WORK(&dev->hwmon_work, ssd_hwmon_worker); | |
12125 | INIT_WORK(&dev->capmon_work, ssd_capmon_worker); | |
12126 | INIT_WORK(&dev->tempmon_work, ssd_tempmon_worker); | |
12127 | #endif | |
12128 | ||
12129 | /* initial log */ | |
12130 | ssd_initial_log(dev); | |
12131 | ||
12132 | /* schedule bm routine */ | |
12133 | ssd_add_timer(&dev->bm_timer, msecs_to_jiffies(SSD_BM_CAP_LEARNING_DELAY), ssd_bm_routine_start, dev); | |
12134 | ||
12135 | /* schedule routine */ | |
12136 | ssd_add_timer(&dev->routine_timer, msecs_to_jiffies(SSD_ROUTINE_INTERVAL), ssd_routine_start, dev); | |
12137 | ||
12138 | return 0; | |
12139 | } | |
12140 | ||
12141 | static void | |
12142 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38)) | |
12143 | __devexit | |
12144 | #endif | |
12145 | ssd_remove_one (struct pci_dev *pdev) | |
12146 | { | |
12147 | struct ssd_device *dev; | |
12148 | ||
12149 | if (!pdev) { | |
12150 | return; | |
12151 | } | |
12152 | ||
12153 | dev = pci_get_drvdata(pdev); | |
12154 | if (!dev) { | |
12155 | return; | |
12156 | } | |
12157 | ||
12158 | list_del_init(&dev->list); | |
12159 | ||
12160 | ssd_unregister_sysfs(dev); | |
12161 | ||
12162 | /* offline firstly */ | |
12163 | test_and_clear_bit(SSD_ONLINE, &dev->state); | |
12164 | ||
12165 | /* clean work queue first */ | |
12166 | if (!dev->slave) { | |
12167 | test_and_clear_bit(SSD_INIT_WORKQ, &dev->state); | |
12168 | ssd_cleanup_workq(dev); | |
12169 | } | |
12170 | ||
12171 | /* flush cache */ | |
12172 | (void)ssd_flush(dev); | |
12173 | (void)ssd_save_md(dev); | |
12174 | ||
12175 | /* save smart */ | |
12176 | if (!dev->slave) { | |
12177 | ssd_save_smart(dev); | |
12178 | } | |
12179 | ||
12180 | if (test_and_clear_bit(SSD_INIT_BD, &dev->state)) { | |
12181 | ssd_cleanup_blkdev(dev); | |
12182 | } | |
12183 | ||
12184 | if (!dev->slave) { | |
12185 | ssd_cleanup_chardev(dev); | |
12186 | } | |
12187 | ||
12188 | /* clean routine */ | |
12189 | if (!dev->slave) { | |
12190 | ssd_cleanup_routine(dev); | |
12191 | } | |
12192 | ||
12193 | ssd_cleanup_queue(dev); | |
12194 | ||
12195 | ssd_cleanup_tag(dev); | |
12196 | ssd_cleanup_thread(dev); | |
12197 | ||
12198 | ssd_free_irq(dev); | |
12199 | ||
12200 | ssd_cleanup_dcmd(dev); | |
12201 | ssd_cleanup_cmd(dev); | |
12202 | ssd_cleanup_response(dev); | |
12203 | ||
12204 | if (!dev->slave) { | |
12205 | ssd_cleanup_log(dev); | |
12206 | } | |
12207 | ||
12208 | if (dev->reload_fw) { //reload fw | |
da3355df | 12209 | dev->has_non_0x98_reg_access = 1; |
361ebed5 HSDT |
12210 | ssd_reg32_write(dev->ctrlp + SSD_RELOAD_FW_REG, SSD_RELOAD_FW); |
12211 | } | |
12212 | ||
12213 | /* unmap physical adress */ | |
12214 | #ifdef LINUX_SUSE_OS | |
12215 | iounmap(dev->ctrlp); | |
12216 | #else | |
12217 | pci_iounmap(pdev, dev->ctrlp); | |
12218 | #endif | |
12219 | ||
12220 | release_mem_region(dev->mmio_base, dev->mmio_len); | |
12221 | ||
12222 | pci_disable_device(pdev); | |
12223 | ||
12224 | pci_set_drvdata(pdev, NULL); | |
12225 | ||
12226 | ssd_put(dev); | |
12227 | } | |
12228 | ||
12229 | static int | |
12230 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38)) | |
12231 | __devinit | |
12232 | #endif | |
12233 | ssd_init_one(struct pci_dev *pdev, | |
12234 | const struct pci_device_id *ent) | |
12235 | { | |
12236 | struct ssd_device *dev; | |
12237 | int ret = 0; | |
12238 | ||
12239 | if (!pdev || !ent) { | |
12240 | ret = -EINVAL; | |
12241 | goto out; | |
12242 | } | |
12243 | ||
12244 | dev = kmalloc(sizeof(struct ssd_device), GFP_KERNEL); | |
12245 | if (!dev) { | |
12246 | ret = -ENOMEM; | |
12247 | goto out_alloc_dev; | |
12248 | } | |
12249 | memset(dev, 0, sizeof(struct ssd_device)); | |
12250 | ||
12251 | dev->owner = THIS_MODULE; | |
12252 | ||
12253 | if (SSD_SLAVE_PORT_DEVID == ent->device) { | |
12254 | dev->slave = 1; | |
12255 | } | |
12256 | ||
12257 | dev->idx = ssd_get_index(dev->slave); | |
12258 | if (dev->idx < 0) { | |
12259 | ret = -ENOMEM; | |
12260 | goto out_get_index; | |
12261 | } | |
12262 | ||
12263 | if (!dev->slave) { | |
12264 | snprintf(dev->name, SSD_DEV_NAME_LEN, SSD_DEV_NAME); | |
12265 | ssd_set_dev_name(&dev->name[strlen(SSD_DEV_NAME)], SSD_DEV_NAME_LEN-strlen(SSD_DEV_NAME), dev->idx); | |
12266 | ||
12267 | dev->major = ssd_major; | |
12268 | dev->cmajor = ssd_cmajor; | |
12269 | } else { | |
12270 | snprintf(dev->name, SSD_DEV_NAME_LEN, SSD_SDEV_NAME); | |
12271 | ssd_set_dev_name(&dev->name[strlen(SSD_SDEV_NAME)], SSD_DEV_NAME_LEN-strlen(SSD_SDEV_NAME), dev->idx); | |
12272 | dev->major = ssd_major_sl; | |
12273 | dev->cmajor = 0; | |
12274 | } | |
12275 | ||
57e45d44 | 12276 | dev->reset_time = (uint64_t)ktime_get_real_seconds(); |
1197134c | 12277 | |
361ebed5 HSDT |
12278 | atomic_set(&(dev->refcnt), 0); |
12279 | atomic_set(&(dev->tocnt), 0); | |
12280 | ||
12281 | mutex_init(&dev->fw_mutex); | |
12282 | ||
12283 | //xx | |
12284 | mutex_init(&dev->gd_mutex); | |
da3355df SF |
12285 | dev->has_non_0x98_reg_access = 0; |
12286 | ||
12287 | //init in_flight lock | |
12288 | spin_lock_init(&dev->in_flight_lock); | |
361ebed5 HSDT |
12289 | |
12290 | dev->pdev = pdev; | |
12291 | pci_set_drvdata(pdev, dev); | |
12292 | ||
12293 | kref_init(&dev->kref); | |
12294 | ||
12295 | ret = pci_enable_device(pdev); | |
12296 | if (ret) { | |
12297 | hio_warn("%s: can not enable device\n", dev->name); | |
12298 | goto out_enable_device; | |
12299 | } | |
12300 | ||
12301 | pci_set_master(pdev); | |
12302 | ||
12303 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31)) | |
12304 | ret = pci_set_dma_mask(pdev, DMA_64BIT_MASK); | |
12305 | #else | |
12306 | ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); | |
12307 | #endif | |
12308 | if (ret) { | |
12309 | hio_warn("%s: set dma mask: failed\n", dev->name); | |
12310 | goto out_set_dma_mask; | |
12311 | } | |
12312 | ||
12313 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31)) | |
12314 | ret = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK); | |
12315 | #else | |
12316 | ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); | |
12317 | #endif | |
12318 | if (ret) { | |
12319 | hio_warn("%s: set consistent dma mask: failed\n", dev->name); | |
12320 | goto out_set_dma_mask; | |
12321 | } | |
12322 | ||
12323 | dev->mmio_base = pci_resource_start(pdev, 0); | |
12324 | dev->mmio_len = pci_resource_len(pdev, 0); | |
12325 | ||
12326 | if (!request_mem_region(dev->mmio_base, dev->mmio_len, SSD_DEV_NAME)) { | |
12327 | hio_warn("%s: can not reserve MMIO region 0\n", dev->name); | |
12328 | ret = -EBUSY; | |
12329 | goto out_request_mem_region; | |
12330 | } | |
12331 | ||
12332 | /* 2.6.9 kernel bug */ | |
12333 | dev->ctrlp = pci_iomap(pdev, 0, 0); | |
12334 | if (!dev->ctrlp) { | |
12335 | hio_warn("%s: can not remap IO region 0\n", dev->name); | |
12336 | ret = -ENOMEM; | |
12337 | goto out_pci_iomap; | |
12338 | } | |
12339 | ||
12340 | ret = ssd_check_hw(dev); | |
12341 | if (ret) { | |
12342 | hio_err("%s: check hardware failed\n", dev->name); | |
12343 | goto out_check_hw; | |
12344 | } | |
12345 | ||
12346 | ret = ssd_init_protocol_info(dev); | |
12347 | if (ret) { | |
12348 | hio_err("%s: init protocol info failed\n", dev->name); | |
12349 | goto out_init_protocol_info; | |
12350 | } | |
12351 | ||
12352 | /* alarm led ? */ | |
12353 | ssd_clear_alarm(dev); | |
12354 | ||
12355 | ret = ssd_init_fw_info(dev); | |
12356 | if (ret) { | |
12357 | hio_err("%s: init firmware info failed\n", dev->name); | |
12358 | /* alarm led */ | |
12359 | ssd_set_alarm(dev); | |
12360 | goto out_init_fw_info; | |
12361 | } | |
12362 | ||
12363 | /* slave port ? */ | |
12364 | if (dev->slave) { | |
12365 | goto init_next1; | |
12366 | } | |
12367 | ||
12368 | ret = ssd_init_rom_info(dev); | |
12369 | if (ret) { | |
12370 | hio_err("%s: init rom info failed\n", dev->name); | |
12371 | /* alarm led */ | |
12372 | ssd_set_alarm(dev); | |
12373 | goto out_init_rom_info; | |
12374 | } | |
12375 | ||
12376 | ret = ssd_init_label(dev); | |
12377 | if (ret) { | |
12378 | hio_err("%s: init label failed\n", dev->name); | |
12379 | /* alarm led */ | |
12380 | ssd_set_alarm(dev); | |
12381 | goto out_init_label; | |
12382 | } | |
12383 | ||
12384 | ret = ssd_init_workq(dev); | |
12385 | if (ret) { | |
12386 | hio_warn("%s: init workq failed\n", dev->name); | |
12387 | goto out_init_workq; | |
12388 | } | |
12389 | (void)test_and_set_bit(SSD_INIT_WORKQ, &dev->state); | |
12390 | ||
12391 | ret = ssd_init_log(dev); | |
12392 | if (ret) { | |
12393 | hio_err("%s: init log failed\n", dev->name); | |
12394 | /* alarm led */ | |
12395 | ssd_set_alarm(dev); | |
12396 | goto out_init_log; | |
12397 | } | |
12398 | ||
12399 | ret = ssd_init_smart(dev); | |
12400 | if (ret) { | |
12401 | hio_err("%s: init info failed\n", dev->name); | |
12402 | /* alarm led */ | |
12403 | ssd_set_alarm(dev); | |
12404 | goto out_init_smart; | |
12405 | } | |
12406 | ||
12407 | init_next1: | |
12408 | ret = ssd_init_hw_info(dev); | |
12409 | if (ret) { | |
12410 | hio_err("%s: init hardware info failed\n", dev->name); | |
12411 | /* alarm led */ | |
12412 | ssd_set_alarm(dev); | |
12413 | goto out_init_hw_info; | |
12414 | } | |
12415 | ||
12416 | /* slave port ? */ | |
12417 | if (dev->slave) { | |
12418 | goto init_next2; | |
12419 | } | |
12420 | ||
12421 | ret = ssd_init_sensor(dev); | |
12422 | if (ret) { | |
12423 | hio_err("%s: init sensor failed\n", dev->name); | |
12424 | /* alarm led */ | |
12425 | ssd_set_alarm(dev); | |
12426 | goto out_init_sensor; | |
12427 | } | |
12428 | ||
12429 | ret = ssd_init_pl_cap(dev); | |
12430 | if (ret) { | |
12431 | hio_err("%s: int pl_cap failed\n", dev->name); | |
12432 | /* alarm led */ | |
12433 | ssd_set_alarm(dev); | |
12434 | goto out_init_pl_cap; | |
12435 | } | |
12436 | ||
12437 | init_next2: | |
12438 | ret = ssd_check_init_state(dev); | |
12439 | if (ret) { | |
12440 | hio_err("%s: check init state failed\n", dev->name); | |
12441 | /* alarm led */ | |
12442 | ssd_set_alarm(dev); | |
12443 | goto out_check_init_state; | |
12444 | } | |
12445 | ||
12446 | ret = ssd_init_response(dev); | |
12447 | if (ret) { | |
12448 | hio_warn("%s: init resp_msg failed\n", dev->name); | |
12449 | goto out_init_response; | |
12450 | } | |
12451 | ||
12452 | ret = ssd_init_cmd(dev); | |
12453 | if (ret) { | |
12454 | hio_warn("%s: init msg failed\n", dev->name); | |
12455 | goto out_init_cmd; | |
12456 | } | |
12457 | ||
12458 | ret = ssd_init_dcmd(dev); | |
12459 | if (ret) { | |
12460 | hio_warn("%s: init cmd failed\n", dev->name); | |
12461 | goto out_init_dcmd; | |
12462 | } | |
12463 | ||
12464 | ret = ssd_init_irq(dev); | |
12465 | if (ret) { | |
12466 | hio_warn("%s: init irq failed\n", dev->name); | |
12467 | goto out_init_irq; | |
12468 | } | |
12469 | ||
12470 | ret = ssd_init_thread(dev); | |
12471 | if (ret) { | |
12472 | hio_warn("%s: init thread failed\n", dev->name); | |
12473 | goto out_init_thread; | |
12474 | } | |
12475 | ||
12476 | ret = ssd_init_tag(dev); | |
12477 | if(ret) { | |
12478 | hio_warn("%s: init tags failed\n", dev->name); | |
12479 | goto out_init_tags; | |
12480 | } | |
12481 | ||
12482 | /* */ | |
12483 | (void)test_and_set_bit(SSD_ONLINE, &dev->state); | |
12484 | ||
12485 | ret = ssd_init_queue(dev); | |
12486 | if (ret) { | |
12487 | hio_warn("%s: init queue failed\n", dev->name); | |
12488 | goto out_init_queue; | |
12489 | } | |
12490 | ||
12491 | /* slave port ? */ | |
12492 | if (dev->slave) { | |
12493 | goto init_next3; | |
12494 | } | |
12495 | ||
12496 | ret = ssd_init_ot_protect(dev); | |
12497 | if (ret) { | |
12498 | hio_err("%s: int ot_protect failed\n", dev->name); | |
12499 | /* alarm led */ | |
12500 | ssd_set_alarm(dev); | |
12501 | goto out_int_ot_protect; | |
12502 | } | |
12503 | ||
12504 | ret = ssd_init_wmode(dev); | |
12505 | if (ret) { | |
12506 | hio_warn("%s: init write mode\n", dev->name); | |
12507 | goto out_init_wmode; | |
12508 | } | |
12509 | ||
12510 | /* init routine after hw is ready */ | |
12511 | ret = ssd_init_routine(dev); | |
12512 | if (ret) { | |
12513 | hio_warn("%s: init routine\n", dev->name); | |
12514 | goto out_init_routine; | |
12515 | } | |
12516 | ||
12517 | ret = ssd_init_chardev(dev); | |
12518 | if (ret) { | |
12519 | hio_warn("%s: register char device failed\n", dev->name); | |
12520 | goto out_init_chardev; | |
12521 | } | |
12522 | ||
12523 | init_next3: | |
12524 | ret = ssd_init_blkdev(dev); | |
12525 | if (ret) { | |
12526 | hio_warn("%s: register block device failed\n", dev->name); | |
12527 | goto out_init_blkdev; | |
12528 | } | |
12529 | (void)test_and_set_bit(SSD_INIT_BD, &dev->state); | |
12530 | ||
12531 | ret = ssd_register_sysfs(dev); | |
12532 | if (ret) { | |
12533 | hio_warn("%s: register sysfs failed\n", dev->name); | |
12534 | goto out_register_sysfs; | |
12535 | } | |
12536 | ||
12537 | dev->save_md = 1; | |
12538 | ||
12539 | list_add_tail(&dev->list, &ssd_list); | |
12540 | ||
12541 | return 0; | |
12542 | ||
12543 | out_register_sysfs: | |
12544 | test_and_clear_bit(SSD_INIT_BD, &dev->state); | |
12545 | ssd_cleanup_blkdev(dev); | |
12546 | out_init_blkdev: | |
12547 | /* slave port ? */ | |
12548 | if (!dev->slave) { | |
12549 | ssd_cleanup_chardev(dev); | |
12550 | } | |
12551 | out_init_chardev: | |
12552 | /* slave port ? */ | |
12553 | if (!dev->slave) { | |
12554 | ssd_cleanup_routine(dev); | |
12555 | } | |
12556 | out_init_routine: | |
12557 | out_init_wmode: | |
12558 | out_int_ot_protect: | |
12559 | ssd_cleanup_queue(dev); | |
12560 | out_init_queue: | |
12561 | test_and_clear_bit(SSD_ONLINE, &dev->state); | |
12562 | ssd_cleanup_tag(dev); | |
12563 | out_init_tags: | |
12564 | ssd_cleanup_thread(dev); | |
12565 | out_init_thread: | |
12566 | ssd_free_irq(dev); | |
12567 | out_init_irq: | |
12568 | ssd_cleanup_dcmd(dev); | |
12569 | out_init_dcmd: | |
12570 | ssd_cleanup_cmd(dev); | |
12571 | out_init_cmd: | |
12572 | ssd_cleanup_response(dev); | |
12573 | out_init_response: | |
12574 | out_check_init_state: | |
12575 | out_init_pl_cap: | |
12576 | out_init_sensor: | |
12577 | out_init_hw_info: | |
12578 | out_init_smart: | |
12579 | /* slave port ? */ | |
12580 | if (!dev->slave) { | |
12581 | ssd_cleanup_log(dev); | |
12582 | } | |
12583 | out_init_log: | |
12584 | /* slave port ? */ | |
12585 | if (!dev->slave) { | |
12586 | test_and_clear_bit(SSD_INIT_WORKQ, &dev->state); | |
12587 | ssd_cleanup_workq(dev); | |
12588 | } | |
12589 | out_init_workq: | |
12590 | out_init_label: | |
12591 | out_init_rom_info: | |
12592 | out_init_fw_info: | |
12593 | out_init_protocol_info: | |
12594 | out_check_hw: | |
12595 | #ifdef LINUX_SUSE_OS | |
12596 | iounmap(dev->ctrlp); | |
12597 | #else | |
12598 | pci_iounmap(pdev, dev->ctrlp); | |
12599 | #endif | |
12600 | out_pci_iomap: | |
12601 | release_mem_region(dev->mmio_base, dev->mmio_len); | |
12602 | out_request_mem_region: | |
12603 | out_set_dma_mask: | |
12604 | pci_disable_device(pdev); | |
12605 | out_enable_device: | |
12606 | pci_set_drvdata(pdev, NULL); | |
12607 | out_get_index: | |
12608 | kfree(dev); | |
12609 | out_alloc_dev: | |
12610 | out: | |
12611 | return ret; | |
12612 | } | |
12613 | ||
12614 | static void ssd_cleanup_tasklet(void) | |
12615 | { | |
12616 | int i; | |
12617 | for_each_online_cpu(i) { | |
12618 | tasklet_kill(&per_cpu(ssd_tasklet, i)); | |
12619 | } | |
12620 | } | |
12621 | ||
12622 | static int ssd_init_tasklet(void) | |
12623 | { | |
12624 | int i; | |
12625 | ||
12626 | for_each_online_cpu(i) { | |
12627 | INIT_LIST_HEAD(&per_cpu(ssd_doneq, i)); | |
12628 | ||
12629 | if (finject) { | |
12630 | tasklet_init(&per_cpu(ssd_tasklet, i), __ssd_done_db, 0); | |
12631 | } else { | |
12632 | tasklet_init(&per_cpu(ssd_tasklet, i), __ssd_done, 0); | |
12633 | } | |
12634 | } | |
12635 | ||
12636 | return 0; | |
12637 | } | |
12638 | ||
12639 | static struct pci_device_id ssd_pci_tbl[] = { | |
12640 | { 0x10ee, 0x0007, PCI_ANY_ID, PCI_ANY_ID, }, /* g3 */ | |
12641 | { 0x19e5, 0x0007, PCI_ANY_ID, PCI_ANY_ID, }, /* v1 */ | |
12642 | //{ 0x19e5, 0x0008, PCI_ANY_ID, PCI_ANY_ID, }, /* v1 sp*/ | |
12643 | { 0x19e5, 0x0009, PCI_ANY_ID, PCI_ANY_ID, }, /* v2 */ | |
12644 | { 0x19e5, 0x000a, PCI_ANY_ID, PCI_ANY_ID, }, /* v2 dp slave*/ | |
12645 | { 0, } | |
12646 | }; | |
361ebed5 | 12647 | |
1197134c KM |
12648 | /*driver power management handler for pm_ops*/ |
12649 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32)) | |
12650 | static int ssd_hio_suspend(struct pci_dev *pdev, pm_message_t state) | |
12651 | { | |
12652 | #else | |
12653 | static int ssd_hio_suspend(struct device *ddev) | |
12654 | { | |
12655 | struct pci_dev *pdev = to_pci_dev(ddev); | |
12656 | #endif | |
12657 | struct ssd_device *dev; | |
12658 | ||
12659 | ||
12660 | if (!pdev) { | |
12661 | return -EINVAL; | |
12662 | } | |
12663 | ||
12664 | dev = pci_get_drvdata(pdev); | |
12665 | if (!dev) { | |
12666 | return -EINVAL; | |
12667 | } | |
12668 | ||
12669 | hio_warn("%s: suspend disk start.\n", dev->name); | |
12670 | ssd_unregister_sysfs(dev); | |
12671 | ||
12672 | /* offline firstly */ | |
12673 | test_and_clear_bit(SSD_ONLINE, &dev->state); | |
12674 | ||
12675 | /* clean work queue first */ | |
12676 | if (!dev->slave) { | |
12677 | test_and_clear_bit(SSD_INIT_WORKQ, &dev->state); | |
12678 | ssd_cleanup_workq(dev); | |
12679 | } | |
12680 | ||
12681 | /* flush cache */ | |
12682 | (void)ssd_flush(dev); | |
12683 | (void)ssd_save_md(dev); | |
12684 | ||
12685 | /* save smart */ | |
12686 | if (!dev->slave) { | |
12687 | ssd_save_smart(dev); | |
12688 | } | |
12689 | ||
12690 | /* clean routine */ | |
12691 | if (!dev->slave) { | |
12692 | ssd_cleanup_routine(dev); | |
12693 | } | |
12694 | ||
12695 | ssd_cleanup_thread(dev); | |
12696 | ||
12697 | ssd_free_irq(dev); | |
12698 | ||
12699 | if (!dev->slave) { | |
12700 | ssd_cleanup_log(dev); | |
12701 | } | |
12702 | ||
12703 | if (dev->reload_fw) { //reload fw | |
da3355df | 12704 | dev->has_non_0x98_reg_access = 1; |
1197134c KM |
12705 | ssd_reg32_write(dev->ctrlp + SSD_RELOAD_FW_REG, SSD_RELOAD_FW); |
12706 | } | |
12707 | ||
12708 | /* unmap physical adress */ | |
12709 | if (dev->ctrlp) { | |
12710 | #ifdef LINUX_SUSE_OS | |
12711 | iounmap(dev->ctrlp); | |
12712 | #else | |
12713 | pci_iounmap(pdev, dev->ctrlp); | |
12714 | #endif | |
12715 | dev->ctrlp = NULL; | |
12716 | } | |
12717 | ||
12718 | if (dev->mmio_base) { | |
12719 | release_mem_region(dev->mmio_base, dev->mmio_len); | |
12720 | dev->mmio_base = 0; | |
12721 | } | |
12722 | ||
12723 | pci_disable_device(pdev); | |
12724 | ||
12725 | hio_warn("%s: suspend disk finish.\n", dev->name); | |
12726 | ||
12727 | return 0; | |
12728 | } | |
12729 | ||
12730 | ||
12731 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32)) | |
12732 | static int ssd_hio_resume(struct pci_dev *pdev) | |
12733 | { | |
12734 | #else | |
12735 | static int ssd_hio_resume(struct device *ddev) | |
12736 | { | |
12737 | struct pci_dev *pdev = to_pci_dev(ddev); | |
12738 | #endif | |
12739 | struct ssd_device *dev = NULL; | |
12740 | int ret = 0; | |
12741 | ||
12742 | if (!pdev ) { | |
12743 | ret = -EINVAL; | |
12744 | goto out; | |
12745 | } | |
12746 | ||
12747 | dev = pci_get_drvdata(pdev); | |
12748 | if (!dev) { | |
12749 | ret = -ENOMEM; | |
12750 | goto out_alloc_dev; | |
12751 | } | |
12752 | ||
12753 | hio_warn("%s: resume disk start.\n", dev->name); | |
12754 | ret = pci_enable_device(pdev); | |
12755 | if (ret) { | |
12756 | hio_warn("%s: can not enable device\n", dev->name); | |
12757 | goto out_enable_device; | |
12758 | } | |
12759 | ||
12760 | pci_set_master(pdev); | |
12761 | ||
12762 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31)) | |
12763 | ret = pci_set_dma_mask(pdev, DMA_64BIT_MASK); | |
12764 | #else | |
12765 | ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); | |
12766 | #endif | |
12767 | if (ret) { | |
12768 | hio_warn("%s: set dma mask: failed\n", dev->name); | |
12769 | goto out_set_dma_mask; | |
12770 | } | |
12771 | ||
12772 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31)) | |
12773 | ret = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK); | |
12774 | #else | |
12775 | ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); | |
12776 | #endif | |
12777 | if (ret) { | |
12778 | hio_warn("%s: set consistent dma mask: failed\n", dev->name); | |
12779 | goto out_set_dma_mask; | |
12780 | } | |
12781 | ||
12782 | dev->mmio_base = pci_resource_start(pdev, 0); | |
12783 | dev->mmio_len = pci_resource_len(pdev, 0); | |
12784 | ||
12785 | if (!request_mem_region(dev->mmio_base, dev->mmio_len, SSD_DEV_NAME)) { | |
12786 | hio_warn("%s: can not reserve MMIO region 0\n", dev->name); | |
12787 | ret = -EBUSY; | |
12788 | goto out_request_mem_region; | |
12789 | } | |
12790 | ||
12791 | /* 2.6.9 kernel bug */ | |
12792 | dev->ctrlp = pci_iomap(pdev, 0, 0); | |
12793 | if (!dev->ctrlp) { | |
12794 | hio_warn("%s: can not remap IO region 0\n", dev->name); | |
12795 | ret = -ENOMEM; | |
12796 | goto out_pci_iomap; | |
12797 | } | |
12798 | ||
12799 | ret = ssd_check_hw(dev); | |
12800 | if (ret) { | |
12801 | hio_err("%s: check hardware failed\n", dev->name); | |
12802 | goto out_check_hw; | |
12803 | } | |
12804 | ||
12805 | /* alarm led ? */ | |
12806 | ssd_clear_alarm(dev); | |
12807 | ||
12808 | ret = ssd_init_fw_info(dev); | |
12809 | if (ret) { | |
12810 | hio_err("%s: init firmware info failed\n", dev->name); | |
12811 | /* alarm led */ | |
12812 | ssd_set_alarm(dev); | |
12813 | goto out_init_fw_info; | |
12814 | } | |
12815 | ||
12816 | /* slave port ? */ | |
12817 | if (dev->slave) { | |
12818 | goto init_next1; | |
12819 | } | |
12820 | ||
12821 | ret = ssd_init_rom_info(dev); | |
12822 | if (ret) { | |
12823 | hio_err("%s: init rom info failed\n", dev->name); | |
12824 | /* alarm led */ | |
12825 | ssd_set_alarm(dev); | |
12826 | goto out_init_rom_info; | |
12827 | } | |
12828 | ||
12829 | ret = ssd_init_label(dev); | |
12830 | if (ret) { | |
12831 | hio_err("%s: init label failed\n", dev->name); | |
12832 | /* alarm led */ | |
12833 | ssd_set_alarm(dev); | |
12834 | goto out_init_label; | |
12835 | } | |
12836 | ||
12837 | ret = ssd_init_workq(dev); | |
12838 | if (ret) { | |
12839 | hio_warn("%s: init workq failed\n", dev->name); | |
12840 | goto out_init_workq; | |
12841 | } | |
12842 | (void)test_and_set_bit(SSD_INIT_WORKQ, &dev->state); | |
12843 | ||
12844 | ret = ssd_init_log(dev); | |
12845 | if (ret) { | |
12846 | hio_err("%s: init log failed\n", dev->name); | |
12847 | /* alarm led */ | |
12848 | ssd_set_alarm(dev); | |
12849 | goto out_init_log; | |
12850 | } | |
12851 | ||
12852 | ret = ssd_init_smart(dev); | |
12853 | if (ret) { | |
12854 | hio_err("%s: init info failed\n", dev->name); | |
12855 | /* alarm led */ | |
12856 | ssd_set_alarm(dev); | |
12857 | goto out_init_smart; | |
12858 | } | |
12859 | ||
12860 | init_next1: | |
12861 | ret = ssd_init_hw_info(dev); | |
12862 | if (ret) { | |
12863 | hio_err("%s: init hardware info failed\n", dev->name); | |
12864 | /* alarm led */ | |
12865 | ssd_set_alarm(dev); | |
12866 | goto out_init_hw_info; | |
12867 | } | |
12868 | ||
12869 | /* slave port ? */ | |
12870 | if (dev->slave) { | |
12871 | goto init_next2; | |
12872 | } | |
12873 | ||
12874 | ret = ssd_init_sensor(dev); | |
12875 | if (ret) { | |
12876 | hio_err("%s: init sensor failed\n", dev->name); | |
12877 | /* alarm led */ | |
12878 | ssd_set_alarm(dev); | |
12879 | goto out_init_sensor; | |
12880 | } | |
12881 | ||
12882 | ret = ssd_init_pl_cap(dev); | |
12883 | if (ret) { | |
12884 | hio_err("%s: int pl_cap failed\n", dev->name); | |
12885 | /* alarm led */ | |
12886 | ssd_set_alarm(dev); | |
12887 | goto out_init_pl_cap; | |
12888 | } | |
12889 | ||
12890 | init_next2: | |
12891 | ret = ssd_check_init_state(dev); | |
12892 | if (ret) { | |
12893 | hio_err("%s: check init state failed\n", dev->name); | |
12894 | /* alarm led */ | |
12895 | ssd_set_alarm(dev); | |
12896 | goto out_check_init_state; | |
12897 | } | |
12898 | ||
12899 | //flush all base pointer to ssd | |
12900 | (void)ssd_reload_ssd_ptr(dev); | |
12901 | ||
12902 | ret = ssd_init_irq(dev); | |
12903 | if (ret) { | |
12904 | hio_warn("%s: init irq failed\n", dev->name); | |
12905 | goto out_init_irq; | |
12906 | } | |
12907 | ||
12908 | ret = ssd_init_thread(dev); | |
12909 | if (ret) { | |
12910 | hio_warn("%s: init thread failed\n", dev->name); | |
12911 | goto out_init_thread; | |
12912 | } | |
12913 | ||
12914 | /* */ | |
12915 | (void)test_and_set_bit(SSD_ONLINE, &dev->state); | |
12916 | ||
12917 | /* slave port ? */ | |
12918 | if (dev->slave) { | |
12919 | goto init_next3; | |
12920 | } | |
12921 | ||
12922 | ret = ssd_init_ot_protect(dev); | |
12923 | if (ret) { | |
12924 | hio_err("%s: int ot_protect failed\n", dev->name); | |
12925 | /* alarm led */ | |
12926 | ssd_set_alarm(dev); | |
12927 | goto out_int_ot_protect; | |
12928 | } | |
12929 | ||
12930 | ret = ssd_init_wmode(dev); | |
12931 | if (ret) { | |
12932 | hio_warn("%s: init write mode\n", dev->name); | |
12933 | goto out_init_wmode; | |
12934 | } | |
12935 | ||
12936 | /* init routine after hw is ready */ | |
12937 | ret = ssd_init_routine(dev); | |
12938 | if (ret) { | |
12939 | hio_warn("%s: init routine\n", dev->name); | |
12940 | goto out_init_routine; | |
12941 | } | |
12942 | ||
12943 | init_next3: | |
12944 | (void)test_and_set_bit(SSD_INIT_BD, &dev->state); | |
12945 | ||
12946 | dev->save_md = 1; | |
12947 | ||
12948 | hio_warn("%s: resume disk finish.\n", dev->name); | |
12949 | ||
12950 | return 0; | |
12951 | ||
12952 | out_init_routine: | |
12953 | out_init_wmode: | |
12954 | out_int_ot_protect: | |
12955 | ssd_cleanup_thread(dev); | |
12956 | out_init_thread: | |
12957 | ssd_free_irq(dev); | |
12958 | out_init_irq: | |
12959 | out_check_init_state: | |
12960 | out_init_pl_cap: | |
12961 | out_init_sensor: | |
12962 | out_init_hw_info: | |
12963 | out_init_smart: | |
12964 | /* slave port ? */ | |
12965 | if (!dev->slave) { | |
12966 | ssd_cleanup_log(dev); | |
12967 | } | |
12968 | out_init_log: | |
12969 | /* slave port ? */ | |
12970 | if (!dev->slave) { | |
12971 | test_and_clear_bit(SSD_INIT_WORKQ, &dev->state); | |
12972 | ssd_cleanup_workq(dev); | |
12973 | } | |
12974 | out_init_workq: | |
12975 | out_init_label: | |
12976 | out_init_rom_info: | |
12977 | out_init_fw_info: | |
12978 | out_check_hw: | |
12979 | #ifdef LINUX_SUSE_OS | |
12980 | iounmap(dev->ctrlp); | |
12981 | #else | |
12982 | pci_iounmap(pdev, dev->ctrlp); | |
12983 | #endif | |
12984 | out_pci_iomap: | |
12985 | release_mem_region(dev->mmio_base, dev->mmio_len); | |
12986 | out_request_mem_region: | |
12987 | out_set_dma_mask: | |
12988 | pci_disable_device(pdev); | |
12989 | out_enable_device: | |
12990 | out_alloc_dev: | |
12991 | out: | |
12992 | ||
12993 | hio_warn("%s: resume disk fail.\n", dev->name); | |
12994 | ||
12995 | return ret; | |
12996 | } | |
12997 | ||
12998 | MODULE_DEVICE_TABLE(pci, ssd_pci_tbl); | |
12999 | ||
13000 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32)) | |
13001 | #else | |
13002 | SIMPLE_DEV_PM_OPS(hio_pm_ops, ssd_hio_suspend, ssd_hio_resume); | |
13003 | #endif | |
13004 | ||
1197134c KM |
13005 | struct pci_driver ssd_driver = { |
13006 | .name = MODULE_NAME, | |
13007 | .id_table = ssd_pci_tbl, | |
13008 | .probe = ssd_init_one, | |
13009 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38)) | |
13010 | .remove = __devexit_p(ssd_remove_one), | |
361ebed5 HSDT |
13011 | #else |
13012 | .remove = ssd_remove_one, | |
13013 | #endif | |
1197134c KM |
13014 | |
13015 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32)) | |
13016 | .suspend = ssd_hio_suspend, | |
13017 | .resume = ssd_hio_resume, | |
13018 | #else | |
13019 | .driver = { | |
13020 | .pm = &hio_pm_ops, | |
13021 | }, | |
13022 | #endif | |
361ebed5 HSDT |
13023 | }; |
13024 | ||
13025 | /* notifier block to get a notify on system shutdown/halt/reboot */ | |
13026 | static int ssd_notify_reboot(struct notifier_block *nb, unsigned long event, void *buf) | |
13027 | { | |
13028 | struct ssd_device *dev = NULL; | |
13029 | struct ssd_device *n = NULL; | |
13030 | ||
13031 | list_for_each_entry_safe(dev, n, &ssd_list, list) { | |
13032 | ssd_gen_swlog(dev, SSD_LOG_POWER_OFF, 0); | |
13033 | ||
13034 | (void)ssd_flush(dev); | |
13035 | (void)ssd_save_md(dev); | |
13036 | ||
13037 | /* slave port ? */ | |
13038 | if (!dev->slave) { | |
13039 | ssd_save_smart(dev); | |
13040 | ||
13041 | ssd_stop_workq(dev); | |
13042 | ||
13043 | if (dev->reload_fw) { | |
da3355df | 13044 | dev->has_non_0x98_reg_access = 1; |
361ebed5 HSDT |
13045 | ssd_reg32_write(dev->ctrlp + SSD_RELOAD_FW_REG, SSD_RELOAD_FW); |
13046 | } | |
13047 | } | |
13048 | } | |
13049 | ||
13050 | return NOTIFY_OK; | |
13051 | } | |
13052 | ||
13053 | static struct notifier_block ssd_notifier = { | |
13054 | ssd_notify_reboot, NULL, 0 | |
13055 | }; | |
13056 | ||
13057 | static int __init ssd_init_module(void) | |
13058 | { | |
13059 | int ret = 0; | |
13060 | ||
13061 | hio_info("driver version: %s\n", DRIVER_VERSION); | |
13062 | ||
13063 | ret = ssd_init_index(); | |
13064 | if (ret) { | |
13065 | hio_warn("init index failed\n"); | |
13066 | goto out_init_index; | |
13067 | } | |
13068 | ||
13069 | ret = ssd_init_proc(); | |
13070 | if (ret) { | |
13071 | hio_warn("init proc failed\n"); | |
13072 | goto out_init_proc; | |
13073 | } | |
13074 | ||
13075 | ret = ssd_init_sysfs(); | |
13076 | if (ret) { | |
13077 | hio_warn("init sysfs failed\n"); | |
13078 | goto out_init_sysfs; | |
13079 | } | |
13080 | ||
13081 | ret = ssd_init_tasklet(); | |
13082 | if (ret) { | |
13083 | hio_warn("init tasklet failed\n"); | |
13084 | goto out_init_tasklet; | |
13085 | } | |
13086 | ||
13087 | #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,12)) | |
13088 | ssd_class = class_simple_create(THIS_MODULE, SSD_DEV_NAME); | |
13089 | #else | |
13090 | ssd_class = class_create(THIS_MODULE, SSD_DEV_NAME); | |
13091 | #endif | |
13092 | if (IS_ERR(ssd_class)) { | |
13093 | ret = PTR_ERR(ssd_class); | |
13094 | goto out_class_create; | |
13095 | } | |
13096 | ||
13097 | if (ssd_cmajor > 0) { | |
13098 | ret = register_chrdev(ssd_cmajor, SSD_CDEV_NAME, &ssd_cfops); | |
13099 | } else { | |
13100 | ret = ssd_cmajor = register_chrdev(ssd_cmajor, SSD_CDEV_NAME, &ssd_cfops); | |
13101 | } | |
13102 | if (ret < 0) { | |
13103 | hio_warn("unable to register chardev major number\n"); | |
13104 | goto out_register_chardev; | |
13105 | } | |
13106 | ||
13107 | if (ssd_major > 0) { | |
13108 | ret = register_blkdev(ssd_major, SSD_DEV_NAME); | |
13109 | } else { | |
13110 | ret = ssd_major = register_blkdev(ssd_major, SSD_DEV_NAME); | |
13111 | } | |
13112 | if (ret < 0) { | |
13113 | hio_warn("unable to register major number\n"); | |
13114 | goto out_register_blkdev; | |
13115 | } | |
13116 | ||
13117 | if (ssd_major_sl > 0) { | |
13118 | ret = register_blkdev(ssd_major_sl, SSD_SDEV_NAME); | |
13119 | } else { | |
13120 | ret = ssd_major_sl = register_blkdev(ssd_major_sl, SSD_SDEV_NAME); | |
13121 | } | |
13122 | if (ret < 0) { | |
13123 | hio_warn("unable to register slave major number\n"); | |
13124 | goto out_register_blkdev_sl; | |
13125 | } | |
13126 | ||
13127 | if (mode < SSD_DRV_MODE_STANDARD || mode > SSD_DRV_MODE_BASE) { | |
13128 | mode = SSD_DRV_MODE_STANDARD; | |
13129 | } | |
13130 | ||
13131 | /* for debug */ | |
13132 | if (mode != SSD_DRV_MODE_STANDARD) { | |
13133 | ssd_minors = 1; | |
13134 | } | |
13135 | ||
13136 | if (int_mode < SSD_INT_LEGACY || int_mode > SSD_INT_MSIX) { | |
13137 | int_mode = SSD_INT_MODE_DEFAULT; | |
13138 | } | |
13139 | ||
13140 | if (threaded_irq) { | |
13141 | int_mode = SSD_INT_MSI; | |
13142 | } | |
13143 | ||
13144 | if (log_level >= SSD_LOG_NR_LEVEL || log_level < SSD_LOG_LEVEL_INFO) { | |
13145 | log_level = SSD_LOG_LEVEL_ERR; | |
13146 | } | |
13147 | ||
13148 | if (wmode < SSD_WMODE_BUFFER || wmode > SSD_WMODE_DEFAULT) { | |
13149 | wmode = SSD_WMODE_DEFAULT; | |
13150 | } | |
13151 | ||
13152 | #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)) | |
13153 | ret = pci_module_init(&ssd_driver); | |
13154 | #else | |
13155 | ret = pci_register_driver(&ssd_driver); | |
13156 | #endif | |
13157 | if (ret) { | |
13158 | hio_warn("pci init failed\n"); | |
13159 | goto out_pci_init; | |
13160 | } | |
13161 | ||
13162 | ret = register_reboot_notifier(&ssd_notifier); | |
13163 | if (ret) { | |
13164 | hio_warn("register reboot notifier failed\n"); | |
13165 | goto out_register_reboot_notifier; | |
13166 | } | |
13167 | ||
13168 | return 0; | |
13169 | ||
13170 | out_register_reboot_notifier: | |
13171 | out_pci_init: | |
13172 | pci_unregister_driver(&ssd_driver); | |
13173 | unregister_blkdev(ssd_major_sl, SSD_SDEV_NAME); | |
13174 | out_register_blkdev_sl: | |
13175 | unregister_blkdev(ssd_major, SSD_DEV_NAME); | |
13176 | out_register_blkdev: | |
13177 | unregister_chrdev(ssd_cmajor, SSD_CDEV_NAME); | |
13178 | out_register_chardev: | |
13179 | #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,12)) | |
13180 | class_simple_destroy(ssd_class); | |
13181 | #else | |
13182 | class_destroy(ssd_class); | |
13183 | #endif | |
13184 | out_class_create: | |
13185 | ssd_cleanup_tasklet(); | |
13186 | out_init_tasklet: | |
13187 | ssd_cleanup_sysfs(); | |
13188 | out_init_sysfs: | |
13189 | ssd_cleanup_proc(); | |
13190 | out_init_proc: | |
13191 | ssd_cleanup_index(); | |
13192 | out_init_index: | |
13193 | return ret; | |
13194 | ||
13195 | } | |
13196 | ||
13197 | static void __exit ssd_cleanup_module(void) | |
13198 | { | |
13199 | ||
13200 | hio_info("unload driver: %s\n", DRIVER_VERSION); | |
13201 | /* exiting */ | |
13202 | ssd_exiting = 1; | |
13203 | ||
13204 | unregister_reboot_notifier(&ssd_notifier); | |
13205 | ||
13206 | pci_unregister_driver(&ssd_driver); | |
13207 | ||
13208 | unregister_blkdev(ssd_major_sl, SSD_SDEV_NAME); | |
13209 | unregister_blkdev(ssd_major, SSD_DEV_NAME); | |
13210 | unregister_chrdev(ssd_cmajor, SSD_CDEV_NAME); | |
13211 | #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,12)) | |
13212 | class_simple_destroy(ssd_class); | |
13213 | #else | |
13214 | class_destroy(ssd_class); | |
13215 | #endif | |
13216 | ||
13217 | ssd_cleanup_tasklet(); | |
13218 | ssd_cleanup_sysfs(); | |
13219 | ssd_cleanup_proc(); | |
13220 | ssd_cleanup_index(); | |
13221 | } | |
13222 | ||
13223 | int ssd_register_event_notifier(struct block_device *bdev, ssd_event_call event_call) | |
13224 | { | |
13225 | struct ssd_device *dev; | |
1197134c | 13226 | struct ssd_log *le, *temp_le = NULL; |
361ebed5 | 13227 | uint64_t cur; |
1197134c | 13228 | int temp = 0; |
361ebed5 HSDT |
13229 | int log_nr; |
13230 | ||
13231 | if (!bdev || !event_call || !(bdev->bd_disk)) { | |
13232 | return -EINVAL; | |
13233 | } | |
13234 | ||
13235 | dev = bdev->bd_disk->private_data; | |
13236 | dev->event_call = event_call; | |
13237 | ||
57e45d44 | 13238 | cur = (uint64_t)ktime_get_real_seconds(); |
361ebed5 HSDT |
13239 | |
13240 | le = (struct ssd_log *)(dev->internal_log.log); | |
13241 | log_nr = dev->internal_log.nr_log; | |
13242 | ||
13243 | while (log_nr--) { | |
13244 | if (le->time <= cur && le->time >= dev->uptime) { | |
1197134c KM |
13245 | if ((le->le.event == SSD_LOG_SEU_FAULT1) && (le->time < dev->reset_time)) { |
13246 | le++; | |
13247 | continue; | |
13248 | } | |
13249 | if (le->le.event == SSD_LOG_OVER_TEMP || le->le.event == SSD_LOG_NORMAL_TEMP || le->le.event == SSD_LOG_WARN_TEMP) { | |
13250 | if (!temp_le || le->time >= temp_le->time) { | |
13251 | temp_le = le; | |
13252 | } | |
13253 | le++; | |
13254 | continue; | |
13255 | } | |
361ebed5 HSDT |
13256 | (void)dev->event_call(dev->gd, le->le.event, ssd_parse_log(dev, le, 0)); |
13257 | } | |
13258 | le++; | |
13259 | } | |
13260 | ||
1197134c KM |
13261 | ssd_get_temperature(bdev, &temp); |
13262 | if (temp_le && (temp >= SSD_OT_TEMP_HYST)) { | |
13263 | (void)dev->event_call(dev->gd, temp_le->le.event, ssd_parse_log(dev, temp_le, 0)); | |
13264 | } | |
13265 | ||
361ebed5 HSDT |
13266 | return 0; |
13267 | } | |
13268 | ||
13269 | int ssd_unregister_event_notifier(struct block_device *bdev) | |
13270 | { | |
13271 | struct ssd_device *dev; | |
13272 | ||
13273 | if (!bdev || !(bdev->bd_disk)) { | |
13274 | return -EINVAL; | |
13275 | } | |
13276 | ||
13277 | dev = bdev->bd_disk->private_data; | |
13278 | dev->event_call = NULL; | |
13279 | ||
13280 | return 0; | |
13281 | } | |
13282 | ||
13283 | EXPORT_SYMBOL(ssd_get_label); | |
13284 | EXPORT_SYMBOL(ssd_get_version); | |
13285 | EXPORT_SYMBOL(ssd_set_otprotect); | |
13286 | EXPORT_SYMBOL(ssd_bm_status); | |
13287 | EXPORT_SYMBOL(ssd_submit_pbio); | |
13288 | EXPORT_SYMBOL(ssd_get_pciaddr); | |
13289 | EXPORT_SYMBOL(ssd_get_temperature); | |
13290 | EXPORT_SYMBOL(ssd_register_event_notifier); | |
13291 | EXPORT_SYMBOL(ssd_unregister_event_notifier); | |
13292 | EXPORT_SYMBOL(ssd_reset); | |
13293 | EXPORT_SYMBOL(ssd_set_wmode); | |
13294 | ||
13295 | ||
13296 | ||
13297 | module_init(ssd_init_module); | |
13298 | module_exit(ssd_cleanup_module); | |
13299 | MODULE_VERSION(DRIVER_VERSION); | |
13300 | MODULE_LICENSE("GPL"); | |
13301 | MODULE_AUTHOR("Huawei SSD DEV Team"); | |
13302 | MODULE_DESCRIPTION("Huawei SSD driver"); |