]>
Commit | Line | Data |
---|---|---|
7d17c02a ML |
1 | /* |
2 | * Copyright © 2009 - Maxim Levitsky | |
3 | * SmartMedia/xD translation layer | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify | |
6 | * it under the terms of the GNU General Public License version 2 as | |
7 | * published by the Free Software Foundation. | |
8 | */ | |
9 | ||
10 | #include <linux/kernel.h> | |
11 | #include <linux/module.h> | |
12 | #include <linux/random.h> | |
13 | #include <linux/hdreg.h> | |
14 | #include <linux/kthread.h> | |
15 | #include <linux/freezer.h> | |
16 | #include <linux/sysfs.h> | |
17 | #include <linux/bitops.h> | |
18 | #include "nand/sm_common.h" | |
19 | #include "sm_ftl.h" | |
20 | ||
21 | #ifdef CONFIG_SM_FTL_MUSEUM | |
22 | #include <linux/mtd/nand_ecc.h> | |
23 | #endif | |
24 | ||
25 | ||
26 | struct workqueue_struct *cache_flush_workqueue; | |
27 | ||
28 | static int cache_timeout = 1000; | |
29 | module_param(cache_timeout, bool, S_IRUGO); | |
30 | MODULE_PARM_DESC(cache_timeout, | |
31 | "Timeout (in ms) for cache flush (1000 ms default"); | |
32 | ||
33 | static int debug; | |
34 | module_param(debug, int, S_IRUGO | S_IWUSR); | |
35 | MODULE_PARM_DESC(debug, "Debug level (0-2)"); | |
36 | ||
37 | ||
38 | /* ------------------- sysfs attributtes ---------------------------------- */ | |
39 | struct sm_sysfs_attribute { | |
40 | struct device_attribute dev_attr; | |
41 | char *data; | |
42 | int len; | |
43 | }; | |
44 | ||
45 | ssize_t sm_attr_show(struct device *dev, struct device_attribute *attr, | |
46 | char *buf) | |
47 | { | |
48 | struct sm_sysfs_attribute *sm_attr = | |
49 | container_of(attr, struct sm_sysfs_attribute, dev_attr); | |
50 | ||
51 | strncpy(buf, sm_attr->data, sm_attr->len); | |
52 | return sm_attr->len; | |
53 | } | |
54 | ||
55 | ||
56 | #define NUM_ATTRIBUTES 1 | |
57 | #define SM_CIS_VENDOR_OFFSET 0x59 | |
58 | struct attribute_group *sm_create_sysfs_attributes(struct sm_ftl *ftl) | |
59 | { | |
60 | struct attribute_group *attr_group; | |
61 | struct attribute **attributes; | |
62 | struct sm_sysfs_attribute *vendor_attribute; | |
63 | ||
64 | int vendor_len = strnlen(ftl->cis_buffer + SM_CIS_VENDOR_OFFSET, | |
65 | SM_SMALL_PAGE - SM_CIS_VENDOR_OFFSET); | |
66 | ||
67 | char *vendor = kmalloc(vendor_len, GFP_KERNEL); | |
68 | memcpy(vendor, ftl->cis_buffer + SM_CIS_VENDOR_OFFSET, vendor_len); | |
69 | vendor[vendor_len] = 0; | |
70 | ||
71 | /* Initialize sysfs attributes */ | |
72 | vendor_attribute = | |
73 | kzalloc(sizeof(struct sm_sysfs_attribute), GFP_KERNEL); | |
74 | ||
ca7081d9 ML |
75 | sysfs_attr_init(&vendor_attribute->dev_attr.attr); |
76 | ||
7d17c02a ML |
77 | vendor_attribute->data = vendor; |
78 | vendor_attribute->len = vendor_len; | |
79 | vendor_attribute->dev_attr.attr.name = "vendor"; | |
80 | vendor_attribute->dev_attr.attr.mode = S_IRUGO; | |
81 | vendor_attribute->dev_attr.show = sm_attr_show; | |
82 | ||
83 | ||
84 | /* Create array of pointers to the attributes */ | |
85 | attributes = kzalloc(sizeof(struct attribute *) * (NUM_ATTRIBUTES + 1), | |
86 | GFP_KERNEL); | |
87 | attributes[0] = &vendor_attribute->dev_attr.attr; | |
88 | ||
89 | /* Finally create the attribute group */ | |
90 | attr_group = kzalloc(sizeof(struct attribute_group), GFP_KERNEL); | |
91 | attr_group->attrs = attributes; | |
92 | return attr_group; | |
93 | } | |
94 | ||
95 | void sm_delete_sysfs_attributes(struct sm_ftl *ftl) | |
96 | { | |
97 | struct attribute **attributes = ftl->disk_attributes->attrs; | |
98 | int i; | |
99 | ||
100 | for (i = 0; attributes[i] ; i++) { | |
101 | ||
102 | struct device_attribute *dev_attr = container_of(attributes[i], | |
103 | struct device_attribute, attr); | |
104 | ||
105 | struct sm_sysfs_attribute *sm_attr = | |
106 | container_of(dev_attr, | |
107 | struct sm_sysfs_attribute, dev_attr); | |
108 | ||
109 | kfree(sm_attr->data); | |
110 | kfree(sm_attr); | |
111 | } | |
112 | ||
113 | kfree(ftl->disk_attributes->attrs); | |
114 | kfree(ftl->disk_attributes); | |
115 | } | |
116 | ||
117 | ||
118 | /* ----------------------- oob helpers -------------------------------------- */ | |
119 | ||
120 | static int sm_get_lba(uint8_t *lba) | |
121 | { | |
122 | /* check fixed bits */ | |
123 | if ((lba[0] & 0xF8) != 0x10) | |
124 | return -2; | |
125 | ||
126 | /* check parity - endianess doesn't matter */ | |
127 | if (hweight16(*(uint16_t *)lba) & 1) | |
128 | return -2; | |
129 | ||
130 | return (lba[1] >> 1) | ((lba[0] & 0x07) << 7); | |
131 | } | |
132 | ||
133 | ||
134 | /* | |
135 | * Read LBA asscociated with block | |
136 | * returns -1, if block is erased | |
137 | * returns -2 if error happens | |
138 | */ | |
139 | static int sm_read_lba(struct sm_oob *oob) | |
140 | { | |
141 | static const uint32_t erased_pattern[4] = { | |
142 | 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF }; | |
143 | ||
144 | uint16_t lba_test; | |
145 | int lba; | |
146 | ||
147 | /* First test for erased block */ | |
148 | if (!memcmp(oob, erased_pattern, SM_OOB_SIZE)) | |
149 | return -1; | |
150 | ||
151 | /* Now check is both copies of the LBA differ too much */ | |
152 | lba_test = *(uint16_t *)oob->lba_copy1 ^ *(uint16_t*)oob->lba_copy2; | |
153 | if (lba_test && !is_power_of_2(lba_test)) | |
154 | return -2; | |
155 | ||
156 | /* And read it */ | |
157 | lba = sm_get_lba(oob->lba_copy1); | |
158 | ||
159 | if (lba == -2) | |
160 | lba = sm_get_lba(oob->lba_copy2); | |
161 | ||
162 | return lba; | |
163 | } | |
164 | ||
165 | static void sm_write_lba(struct sm_oob *oob, uint16_t lba) | |
166 | { | |
167 | uint8_t tmp[2]; | |
168 | ||
169 | WARN_ON(lba >= 1000); | |
170 | ||
171 | tmp[0] = 0x10 | ((lba >> 7) & 0x07); | |
172 | tmp[1] = (lba << 1) & 0xFF; | |
173 | ||
174 | if (hweight16(*(uint16_t *)tmp) & 0x01) | |
175 | tmp[1] |= 1; | |
176 | ||
177 | oob->lba_copy1[0] = oob->lba_copy2[0] = tmp[0]; | |
178 | oob->lba_copy1[1] = oob->lba_copy2[1] = tmp[1]; | |
179 | } | |
180 | ||
181 | ||
182 | /* Make offset from parts */ | |
183 | static loff_t sm_mkoffset(struct sm_ftl *ftl, int zone, int block, int boffset) | |
184 | { | |
185 | WARN_ON(boffset & (SM_SECTOR_SIZE - 1)); | |
186 | WARN_ON(zone < 0 || zone >= ftl->zone_count); | |
187 | WARN_ON(block >= ftl->zone_size); | |
188 | WARN_ON(boffset >= ftl->block_size); | |
189 | ||
190 | if (block == -1) | |
191 | return -1; | |
192 | ||
193 | return (zone * SM_MAX_ZONE_SIZE + block) * ftl->block_size + boffset; | |
194 | } | |
195 | ||
196 | /* Breaks offset into parts */ | |
197 | static void sm_break_offset(struct sm_ftl *ftl, loff_t offset, | |
198 | int *zone, int *block, int *boffset) | |
199 | { | |
200 | *boffset = do_div(offset, ftl->block_size); | |
201 | *block = do_div(offset, ftl->max_lba); | |
202 | *zone = offset >= ftl->zone_count ? -1 : offset; | |
203 | } | |
204 | ||
205 | /* ---------------------- low level IO ------------------------------------- */ | |
206 | ||
207 | static int sm_correct_sector(uint8_t *buffer, struct sm_oob *oob) | |
208 | { | |
209 | #ifdef CONFIG_SM_FTL_MUSEUM | |
210 | uint8_t ecc[3]; | |
211 | ||
212 | __nand_calculate_ecc(buffer, SM_SMALL_PAGE, ecc); | |
213 | if (__nand_correct_data(buffer, ecc, oob->ecc1, SM_SMALL_PAGE) < 0) | |
214 | return -EIO; | |
215 | ||
216 | buffer += SM_SMALL_PAGE; | |
217 | ||
218 | __nand_calculate_ecc(buffer, SM_SMALL_PAGE, ecc); | |
219 | if (__nand_correct_data(buffer, ecc, oob->ecc2, SM_SMALL_PAGE) < 0) | |
220 | return -EIO; | |
221 | #endif | |
222 | return 0; | |
223 | } | |
224 | ||
225 | /* Reads a sector + oob*/ | |
226 | static int sm_read_sector(struct sm_ftl *ftl, | |
227 | int zone, int block, int boffset, | |
228 | uint8_t *buffer, struct sm_oob *oob) | |
229 | { | |
230 | struct mtd_info *mtd = ftl->trans->mtd; | |
231 | struct mtd_oob_ops ops; | |
232 | struct sm_oob tmp_oob; | |
133fa8c7 | 233 | int ret = -EIO; |
7d17c02a ML |
234 | int try = 0; |
235 | ||
236 | /* FTL can contain -1 entries that are by default filled with bits */ | |
237 | if (block == -1) { | |
238 | memset(buffer, 0xFF, SM_SECTOR_SIZE); | |
239 | return 0; | |
240 | } | |
241 | ||
242 | /* User might not need the oob, but we do for data vertification */ | |
243 | if (!oob) | |
244 | oob = &tmp_oob; | |
245 | ||
246 | ops.mode = ftl->smallpagenand ? MTD_OOB_RAW : MTD_OOB_PLACE; | |
247 | ops.ooboffs = 0; | |
248 | ops.ooblen = SM_OOB_SIZE; | |
249 | ops.oobbuf = (void *)oob; | |
250 | ops.len = SM_SECTOR_SIZE; | |
251 | ops.datbuf = buffer; | |
252 | ||
253 | again: | |
254 | if (try++) { | |
255 | /* Avoid infinite recursion on CIS reads, sm_recheck_media | |
256 | won't help anyway */ | |
257 | if (zone == 0 && block == ftl->cis_block && boffset == | |
258 | ftl->cis_boffset) | |
259 | return ret; | |
260 | ||
261 | /* Test if media is stable */ | |
262 | if (try == 3 || sm_recheck_media(ftl)) | |
263 | return ret; | |
264 | } | |
265 | ||
266 | /* Unfortunelly, oob read will _always_ succeed, | |
267 | despite card removal..... */ | |
268 | ret = mtd->read_oob(mtd, sm_mkoffset(ftl, zone, block, boffset), &ops); | |
269 | ||
270 | /* Test for unknown errors */ | |
271 | if (ret != 0 && ret != -EUCLEAN && ret != -EBADMSG) { | |
272 | dbg("read of block %d at zone %d, failed due to error (%d)", | |
273 | block, zone, ret); | |
274 | goto again; | |
275 | } | |
276 | ||
277 | /* Do a basic test on the oob, to guard against returned garbage */ | |
278 | if (oob->reserved != 0xFFFFFFFF && !is_power_of_2(~oob->reserved)) | |
279 | goto again; | |
280 | ||
281 | /* This should never happen, unless there is a bug in the mtd driver */ | |
282 | WARN_ON(ops.oobretlen != SM_OOB_SIZE); | |
283 | WARN_ON(buffer && ops.retlen != SM_SECTOR_SIZE); | |
284 | ||
285 | if (!buffer) | |
286 | return 0; | |
287 | ||
288 | /* Test if sector marked as bad */ | |
289 | if (!sm_sector_valid(oob)) { | |
290 | dbg("read of block %d at zone %d, failed because it is marked" | |
291 | " as bad" , block, zone); | |
292 | goto again; | |
293 | } | |
294 | ||
295 | /* Test ECC*/ | |
296 | if (ret == -EBADMSG || | |
297 | (ftl->smallpagenand && sm_correct_sector(buffer, oob))) { | |
298 | ||
299 | dbg("read of block %d at zone %d, failed due to ECC error", | |
300 | block, zone); | |
301 | goto again; | |
302 | } | |
303 | ||
304 | return 0; | |
305 | } | |
306 | ||
307 | /* Writes a sector to media */ | |
308 | static int sm_write_sector(struct sm_ftl *ftl, | |
309 | int zone, int block, int boffset, | |
310 | uint8_t *buffer, struct sm_oob *oob) | |
311 | { | |
312 | struct mtd_oob_ops ops; | |
313 | struct mtd_info *mtd = ftl->trans->mtd; | |
314 | int ret; | |
315 | ||
316 | BUG_ON(ftl->readonly); | |
317 | ||
318 | if (zone == 0 && (block == ftl->cis_block || block == 0)) { | |
319 | dbg("attempted to write the CIS!"); | |
320 | return -EIO; | |
321 | } | |
322 | ||
323 | if (ftl->unstable) | |
324 | return -EIO; | |
325 | ||
326 | ops.mode = ftl->smallpagenand ? MTD_OOB_RAW : MTD_OOB_PLACE; | |
327 | ops.len = SM_SECTOR_SIZE; | |
328 | ops.datbuf = buffer; | |
329 | ops.ooboffs = 0; | |
330 | ops.ooblen = SM_OOB_SIZE; | |
331 | ops.oobbuf = (void *)oob; | |
332 | ||
333 | ret = mtd->write_oob(mtd, sm_mkoffset(ftl, zone, block, boffset), &ops); | |
334 | ||
335 | /* Now we assume that hardware will catch write bitflip errors */ | |
336 | /* If you are paranoid, use CONFIG_MTD_NAND_VERIFY_WRITE */ | |
337 | ||
338 | if (ret) { | |
339 | dbg("write to block %d at zone %d, failed with error %d", | |
340 | block, zone, ret); | |
341 | ||
342 | sm_recheck_media(ftl); | |
343 | return ret; | |
344 | } | |
345 | ||
346 | /* This should never happen, unless there is a bug in the driver */ | |
347 | WARN_ON(ops.oobretlen != SM_OOB_SIZE); | |
348 | WARN_ON(buffer && ops.retlen != SM_SECTOR_SIZE); | |
349 | ||
350 | return 0; | |
351 | } | |
352 | ||
353 | /* ------------------------ block IO ------------------------------------- */ | |
354 | ||
355 | /* Write a block using data and lba, and invalid sector bitmap */ | |
356 | static int sm_write_block(struct sm_ftl *ftl, uint8_t *buf, | |
357 | int zone, int block, int lba, | |
358 | unsigned long invalid_bitmap) | |
359 | { | |
360 | struct sm_oob oob; | |
361 | int boffset; | |
362 | int retry = 0; | |
363 | ||
364 | /* Initialize the oob with requested values */ | |
365 | memset(&oob, 0xFF, SM_OOB_SIZE); | |
366 | sm_write_lba(&oob, lba); | |
367 | restart: | |
368 | if (ftl->unstable) | |
369 | return -EIO; | |
370 | ||
371 | for (boffset = 0; boffset < ftl->block_size; | |
372 | boffset += SM_SECTOR_SIZE) { | |
373 | ||
374 | oob.data_status = 0xFF; | |
375 | ||
376 | if (test_bit(boffset / SM_SECTOR_SIZE, &invalid_bitmap)) { | |
377 | ||
378 | sm_printk("sector %d of block at LBA %d of zone %d" | |
379 | " coudn't be read, marking it as invalid", | |
380 | boffset / SM_SECTOR_SIZE, lba, zone); | |
381 | ||
382 | oob.data_status = 0; | |
383 | } | |
384 | ||
385 | #ifdef CONFIG_SM_FTL_MUSEUM | |
386 | if (ftl->smallpagenand) { | |
387 | __nand_calculate_ecc(buf + boffset, | |
388 | SM_SMALL_PAGE, oob.ecc1); | |
389 | ||
390 | __nand_calculate_ecc(buf + boffset + SM_SMALL_PAGE, | |
391 | SM_SMALL_PAGE, oob.ecc2); | |
392 | } | |
393 | #endif | |
394 | if (!sm_write_sector(ftl, zone, block, boffset, | |
395 | buf + boffset, &oob)) | |
396 | continue; | |
397 | ||
398 | if (!retry) { | |
399 | ||
400 | /* If write fails. try to erase the block */ | |
401 | /* This is safe, because we never write in blocks | |
402 | that contain valuable data. | |
403 | This is intended to repair block that are marked | |
404 | as erased, but that isn't fully erased*/ | |
405 | ||
406 | if (sm_erase_block(ftl, zone, block, 0)) | |
407 | return -EIO; | |
408 | ||
409 | retry = 1; | |
410 | goto restart; | |
411 | } else { | |
412 | sm_mark_block_bad(ftl, zone, block); | |
413 | return -EIO; | |
414 | } | |
415 | } | |
416 | return 0; | |
417 | } | |
418 | ||
419 | ||
420 | /* Mark whole block at offset 'offs' as bad. */ | |
421 | static void sm_mark_block_bad(struct sm_ftl *ftl, int zone, int block) | |
422 | { | |
423 | struct sm_oob oob; | |
424 | int boffset; | |
425 | ||
426 | memset(&oob, 0xFF, SM_OOB_SIZE); | |
427 | oob.block_status = 0xF0; | |
428 | ||
429 | if (ftl->unstable) | |
430 | return; | |
431 | ||
432 | if (sm_recheck_media(ftl)) | |
433 | return; | |
434 | ||
435 | sm_printk("marking block %d of zone %d as bad", block, zone); | |
436 | ||
437 | /* We aren't checking the return value, because we don't care */ | |
438 | /* This also fails on fake xD cards, but I guess these won't expose | |
439 | any bad blocks till fail completly */ | |
440 | for (boffset = 0; boffset < ftl->block_size; boffset += SM_SECTOR_SIZE) | |
441 | sm_write_sector(ftl, zone, block, boffset, NULL, &oob); | |
442 | } | |
443 | ||
444 | /* | |
445 | * Erase a block within a zone | |
446 | * If erase succedes, it updates free block fifo, otherwise marks block as bad | |
447 | */ | |
448 | static int sm_erase_block(struct sm_ftl *ftl, int zone_num, uint16_t block, | |
449 | int put_free) | |
450 | { | |
451 | struct ftl_zone *zone = &ftl->zones[zone_num]; | |
452 | struct mtd_info *mtd = ftl->trans->mtd; | |
453 | struct erase_info erase; | |
454 | ||
455 | erase.mtd = mtd; | |
456 | erase.callback = sm_erase_callback; | |
457 | erase.addr = sm_mkoffset(ftl, zone_num, block, 0); | |
458 | erase.len = ftl->block_size; | |
459 | erase.priv = (u_long)ftl; | |
460 | ||
461 | if (ftl->unstable) | |
462 | return -EIO; | |
463 | ||
464 | BUG_ON(ftl->readonly); | |
465 | ||
466 | if (zone_num == 0 && (block == ftl->cis_block || block == 0)) { | |
467 | sm_printk("attempted to erase the CIS!"); | |
468 | return -EIO; | |
469 | } | |
470 | ||
471 | if (mtd->erase(mtd, &erase)) { | |
472 | sm_printk("erase of block %d in zone %d failed", | |
473 | block, zone_num); | |
474 | goto error; | |
475 | } | |
476 | ||
477 | if (erase.state == MTD_ERASE_PENDING) | |
478 | wait_for_completion(&ftl->erase_completion); | |
479 | ||
480 | if (erase.state != MTD_ERASE_DONE) { | |
481 | sm_printk("erase of block %d in zone %d failed after wait", | |
482 | block, zone_num); | |
483 | goto error; | |
484 | } | |
485 | ||
486 | if (put_free) | |
487 | kfifo_in(&zone->free_sectors, | |
488 | (const unsigned char *)&block, sizeof(block)); | |
489 | ||
490 | return 0; | |
491 | error: | |
492 | sm_mark_block_bad(ftl, zone_num, block); | |
493 | return -EIO; | |
494 | } | |
495 | ||
496 | static void sm_erase_callback(struct erase_info *self) | |
497 | { | |
498 | struct sm_ftl *ftl = (struct sm_ftl *)self->priv; | |
499 | complete(&ftl->erase_completion); | |
500 | } | |
501 | ||
502 | /* Throughtly test that block is valid. */ | |
503 | static int sm_check_block(struct sm_ftl *ftl, int zone, int block) | |
504 | { | |
505 | int boffset; | |
506 | struct sm_oob oob; | |
507 | int lbas[] = { -3, 0, 0, 0 }; | |
508 | int i = 0; | |
509 | int test_lba; | |
510 | ||
511 | ||
512 | /* First just check that block doesn't look fishy */ | |
513 | /* Only blocks that are valid or are sliced in two parts, are | |
514 | accepted */ | |
515 | for (boffset = 0; boffset < ftl->block_size; | |
516 | boffset += SM_SECTOR_SIZE) { | |
517 | ||
518 | /* This shoudn't happen anyway */ | |
519 | if (sm_read_sector(ftl, zone, block, boffset, NULL, &oob)) | |
520 | return -2; | |
521 | ||
522 | test_lba = sm_read_lba(&oob); | |
523 | ||
524 | if (lbas[i] != test_lba) | |
525 | lbas[++i] = test_lba; | |
526 | ||
527 | /* If we found three different LBAs, something is fishy */ | |
528 | if (i == 3) | |
529 | return -EIO; | |
530 | } | |
531 | ||
532 | /* If the block is sliced (partialy erased usually) erase it */ | |
533 | if (i == 2) { | |
534 | sm_erase_block(ftl, zone, block, 1); | |
535 | return 1; | |
536 | } | |
537 | ||
538 | return 0; | |
539 | } | |
540 | ||
541 | /* ----------------- media scanning --------------------------------- */ | |
542 | static const struct chs_entry chs_table[] = { | |
543 | { 1, 125, 4, 4 }, | |
544 | { 2, 125, 4, 8 }, | |
545 | { 4, 250, 4, 8 }, | |
546 | { 8, 250, 4, 16 }, | |
547 | { 16, 500, 4, 16 }, | |
548 | { 32, 500, 8, 16 }, | |
549 | { 64, 500, 8, 32 }, | |
550 | { 128, 500, 16, 32 }, | |
551 | { 256, 1000, 16, 32 }, | |
552 | { 512, 1015, 32, 63 }, | |
553 | { 1024, 985, 33, 63 }, | |
554 | { 2048, 985, 33, 63 }, | |
555 | { 0 }, | |
556 | }; | |
557 | ||
558 | ||
559 | static const uint8_t cis_signature[] = { | |
560 | 0x01, 0x03, 0xD9, 0x01, 0xFF, 0x18, 0x02, 0xDF, 0x01, 0x20 | |
561 | }; | |
562 | /* Find out media parameters. | |
563 | * This ideally has to be based on nand id, but for now device size is enough */ | |
564 | int sm_get_media_info(struct sm_ftl *ftl, struct mtd_info *mtd) | |
565 | { | |
566 | int i; | |
567 | int size_in_megs = mtd->size / (1024 * 1024); | |
568 | ||
569 | ftl->readonly = mtd->type == MTD_ROM; | |
570 | ||
571 | /* Manual settings for very old devices */ | |
572 | ftl->zone_count = 1; | |
573 | ftl->smallpagenand = 0; | |
574 | ||
575 | switch (size_in_megs) { | |
576 | case 1: | |
577 | /* 1 MiB flash/rom SmartMedia card (256 byte pages)*/ | |
578 | ftl->zone_size = 256; | |
579 | ftl->max_lba = 250; | |
580 | ftl->block_size = 8 * SM_SECTOR_SIZE; | |
581 | ftl->smallpagenand = 1; | |
582 | ||
583 | break; | |
584 | case 2: | |
585 | /* 2 MiB flash SmartMedia (256 byte pages)*/ | |
586 | if (mtd->writesize == SM_SMALL_PAGE) { | |
587 | ftl->zone_size = 512; | |
588 | ftl->max_lba = 500; | |
589 | ftl->block_size = 8 * SM_SECTOR_SIZE; | |
590 | ftl->smallpagenand = 1; | |
591 | /* 2 MiB rom SmartMedia */ | |
592 | } else { | |
593 | ||
594 | if (!ftl->readonly) | |
595 | return -ENODEV; | |
596 | ||
597 | ftl->zone_size = 256; | |
598 | ftl->max_lba = 250; | |
599 | ftl->block_size = 16 * SM_SECTOR_SIZE; | |
600 | } | |
601 | break; | |
602 | case 4: | |
603 | /* 4 MiB flash/rom SmartMedia device */ | |
604 | ftl->zone_size = 512; | |
605 | ftl->max_lba = 500; | |
606 | ftl->block_size = 16 * SM_SECTOR_SIZE; | |
607 | break; | |
608 | case 8: | |
609 | /* 8 MiB flash/rom SmartMedia device */ | |
610 | ftl->zone_size = 1024; | |
611 | ftl->max_lba = 1000; | |
612 | ftl->block_size = 16 * SM_SECTOR_SIZE; | |
613 | } | |
614 | ||
615 | /* Minimum xD size is 16MiB. Also, all xD cards have standard zone | |
616 | sizes. SmartMedia cards exist up to 128 MiB and have same layout*/ | |
617 | if (size_in_megs >= 16) { | |
618 | ftl->zone_count = size_in_megs / 16; | |
619 | ftl->zone_size = 1024; | |
620 | ftl->max_lba = 1000; | |
621 | ftl->block_size = 32 * SM_SECTOR_SIZE; | |
622 | } | |
623 | ||
624 | /* Test for proper write,erase and oob sizes */ | |
625 | if (mtd->erasesize > ftl->block_size) | |
626 | return -ENODEV; | |
627 | ||
628 | if (mtd->writesize > SM_SECTOR_SIZE) | |
629 | return -ENODEV; | |
630 | ||
631 | if (ftl->smallpagenand && mtd->oobsize < SM_SMALL_OOB_SIZE) | |
632 | return -ENODEV; | |
633 | ||
634 | if (!ftl->smallpagenand && mtd->oobsize < SM_OOB_SIZE) | |
635 | return -ENODEV; | |
636 | ||
637 | /* We use these functions for IO */ | |
638 | if (!mtd->read_oob || !mtd->write_oob) | |
639 | return -ENODEV; | |
640 | ||
641 | /* Find geometry information */ | |
642 | for (i = 0 ; i < ARRAY_SIZE(chs_table) ; i++) { | |
643 | if (chs_table[i].size == size_in_megs) { | |
644 | ftl->cylinders = chs_table[i].cyl; | |
645 | ftl->heads = chs_table[i].head; | |
646 | ftl->sectors = chs_table[i].sec; | |
647 | return 0; | |
648 | } | |
649 | } | |
650 | ||
651 | sm_printk("media has unknown size : %dMiB", size_in_megs); | |
652 | ftl->cylinders = 985; | |
653 | ftl->heads = 33; | |
654 | ftl->sectors = 63; | |
655 | return 0; | |
656 | } | |
657 | ||
658 | /* Validate the CIS */ | |
659 | static int sm_read_cis(struct sm_ftl *ftl) | |
660 | { | |
661 | struct sm_oob oob; | |
662 | ||
663 | if (sm_read_sector(ftl, | |
664 | 0, ftl->cis_block, ftl->cis_boffset, ftl->cis_buffer, &oob)) | |
665 | return -EIO; | |
666 | ||
667 | if (!sm_sector_valid(&oob) || !sm_block_valid(&oob)) | |
668 | return -EIO; | |
669 | ||
670 | if (!memcmp(ftl->cis_buffer + ftl->cis_page_offset, | |
671 | cis_signature, sizeof(cis_signature))) { | |
672 | return 0; | |
673 | } | |
674 | ||
675 | return -EIO; | |
676 | } | |
677 | ||
678 | /* Scan the media for the CIS */ | |
679 | static int sm_find_cis(struct sm_ftl *ftl) | |
680 | { | |
681 | struct sm_oob oob; | |
682 | int block, boffset; | |
683 | int block_found = 0; | |
684 | int cis_found = 0; | |
685 | ||
686 | /* Search for first valid block */ | |
687 | for (block = 0 ; block < ftl->zone_size - ftl->max_lba ; block++) { | |
688 | ||
689 | if (sm_read_sector(ftl, 0, block, 0, NULL, &oob)) | |
690 | continue; | |
691 | ||
692 | if (!sm_block_valid(&oob)) | |
693 | continue; | |
694 | block_found = 1; | |
695 | break; | |
696 | } | |
697 | ||
698 | if (!block_found) | |
699 | return -EIO; | |
700 | ||
701 | /* Search for first valid sector in this block */ | |
702 | for (boffset = 0 ; boffset < ftl->block_size; | |
703 | boffset += SM_SECTOR_SIZE) { | |
704 | ||
705 | if (sm_read_sector(ftl, 0, block, boffset, NULL, &oob)) | |
706 | continue; | |
707 | ||
708 | if (!sm_sector_valid(&oob)) | |
709 | continue; | |
710 | break; | |
711 | } | |
712 | ||
713 | if (boffset == ftl->block_size) | |
714 | return -EIO; | |
715 | ||
716 | ftl->cis_block = block; | |
717 | ftl->cis_boffset = boffset; | |
718 | ftl->cis_page_offset = 0; | |
719 | ||
720 | cis_found = !sm_read_cis(ftl); | |
721 | ||
722 | if (!cis_found) { | |
723 | ftl->cis_page_offset = SM_SMALL_PAGE; | |
724 | cis_found = !sm_read_cis(ftl); | |
725 | } | |
726 | ||
727 | if (cis_found) { | |
728 | dbg("CIS block found at offset %x", | |
729 | block * ftl->block_size + | |
730 | boffset + ftl->cis_page_offset); | |
731 | return 0; | |
732 | } | |
733 | return -EIO; | |
734 | } | |
735 | ||
736 | /* Basic test to determine if underlying mtd device if functional */ | |
737 | static int sm_recheck_media(struct sm_ftl *ftl) | |
738 | { | |
739 | if (sm_read_cis(ftl)) { | |
740 | ||
741 | if (!ftl->unstable) { | |
742 | sm_printk("media unstable, not allowing writes"); | |
743 | ftl->unstable = 1; | |
744 | } | |
745 | return -EIO; | |
746 | } | |
747 | return 0; | |
748 | } | |
749 | ||
750 | /* Initialize a FTL zone */ | |
751 | static int sm_init_zone(struct sm_ftl *ftl, int zone_num) | |
752 | { | |
753 | struct ftl_zone *zone = &ftl->zones[zone_num]; | |
754 | struct sm_oob oob; | |
755 | uint16_t block; | |
756 | int lba; | |
757 | int i = 0; | |
133fa8c7 | 758 | int len; |
7d17c02a ML |
759 | |
760 | dbg("initializing zone %d", zone_num); | |
761 | ||
762 | /* Allocate memory for FTL table */ | |
763 | zone->lba_to_phys_table = kmalloc(ftl->max_lba * 2, GFP_KERNEL); | |
764 | ||
765 | if (!zone->lba_to_phys_table) | |
766 | return -ENOMEM; | |
767 | memset(zone->lba_to_phys_table, -1, ftl->max_lba * 2); | |
768 | ||
769 | ||
770 | /* Allocate memory for free sectors FIFO */ | |
771 | if (kfifo_alloc(&zone->free_sectors, ftl->zone_size * 2, GFP_KERNEL)) { | |
772 | kfree(zone->lba_to_phys_table); | |
773 | return -ENOMEM; | |
774 | } | |
775 | ||
776 | /* Now scan the zone */ | |
777 | for (block = 0 ; block < ftl->zone_size ; block++) { | |
778 | ||
779 | /* Skip blocks till the CIS (including) */ | |
780 | if (zone_num == 0 && block <= ftl->cis_block) | |
781 | continue; | |
782 | ||
783 | /* Read the oob of first sector */ | |
784 | if (sm_read_sector(ftl, zone_num, block, 0, NULL, &oob)) | |
785 | return -EIO; | |
786 | ||
787 | /* Test to see if block is erased. It is enough to test | |
788 | first sector, because erase happens in one shot */ | |
789 | if (sm_block_erased(&oob)) { | |
790 | kfifo_in(&zone->free_sectors, | |
791 | (unsigned char *)&block, 2); | |
792 | continue; | |
793 | } | |
794 | ||
795 | /* If block is marked as bad, skip it */ | |
796 | /* This assumes we can trust first sector*/ | |
797 | /* However the way the block valid status is defined, ensures | |
798 | very low probability of failure here */ | |
799 | if (!sm_block_valid(&oob)) { | |
800 | dbg("PH %04d <-> <marked bad>", block); | |
801 | continue; | |
802 | } | |
803 | ||
804 | ||
805 | lba = sm_read_lba(&oob); | |
806 | ||
807 | /* Invalid LBA means that block is damaged. */ | |
808 | /* We can try to erase it, or mark it as bad, but | |
809 | lets leave that to recovery application */ | |
810 | if (lba == -2 || lba >= ftl->max_lba) { | |
811 | dbg("PH %04d <-> LBA %04d(bad)", block, lba); | |
812 | continue; | |
813 | } | |
814 | ||
815 | ||
816 | /* If there is no collision, | |
817 | just put the sector in the FTL table */ | |
818 | if (zone->lba_to_phys_table[lba] < 0) { | |
819 | dbg_verbose("PH %04d <-> LBA %04d", block, lba); | |
820 | zone->lba_to_phys_table[lba] = block; | |
821 | continue; | |
822 | } | |
823 | ||
824 | sm_printk("collision" | |
825 | " of LBA %d between blocks %d and %d in zone %d", | |
826 | lba, zone->lba_to_phys_table[lba], block, zone_num); | |
827 | ||
828 | /* Test that this block is valid*/ | |
829 | if (sm_check_block(ftl, zone_num, block)) | |
830 | continue; | |
831 | ||
832 | /* Test now the old block */ | |
833 | if (sm_check_block(ftl, zone_num, | |
834 | zone->lba_to_phys_table[lba])) { | |
835 | zone->lba_to_phys_table[lba] = block; | |
836 | continue; | |
837 | } | |
838 | ||
839 | /* If both blocks are valid and share same LBA, it means that | |
840 | they hold different versions of same data. It not | |
841 | known which is more recent, thus just erase one of them | |
842 | */ | |
843 | sm_printk("both blocks are valid, erasing the later"); | |
844 | sm_erase_block(ftl, zone_num, block, 1); | |
845 | } | |
846 | ||
847 | dbg("zone initialized"); | |
848 | zone->initialized = 1; | |
849 | ||
850 | /* No free sectors, means that the zone is heavily damaged, write won't | |
851 | work, but it can still can be (partially) read */ | |
852 | if (!kfifo_len(&zone->free_sectors)) { | |
853 | sm_printk("no free blocks in zone %d", zone_num); | |
854 | return 0; | |
855 | } | |
856 | ||
857 | /* Randomize first block we write to */ | |
858 | get_random_bytes(&i, 2); | |
859 | i %= (kfifo_len(&zone->free_sectors) / 2); | |
860 | ||
861 | while (i--) { | |
133fa8c7 ML |
862 | len = kfifo_out(&zone->free_sectors, |
863 | (unsigned char *)&block, 2); | |
864 | WARN_ON(len != 2); | |
7d17c02a ML |
865 | kfifo_in(&zone->free_sectors, (const unsigned char *)&block, 2); |
866 | } | |
867 | return 0; | |
868 | } | |
869 | ||
870 | /* Get and automaticly initialize an FTL mapping for one zone */ | |
871 | struct ftl_zone *sm_get_zone(struct sm_ftl *ftl, int zone_num) | |
872 | { | |
873 | struct ftl_zone *zone; | |
874 | int error; | |
875 | ||
876 | BUG_ON(zone_num >= ftl->zone_count); | |
877 | zone = &ftl->zones[zone_num]; | |
878 | ||
879 | if (!zone->initialized) { | |
880 | error = sm_init_zone(ftl, zone_num); | |
881 | ||
882 | if (error) | |
883 | return ERR_PTR(error); | |
884 | } | |
885 | return zone; | |
886 | } | |
887 | ||
888 | ||
889 | /* ----------------- cache handling ------------------------------------------*/ | |
890 | ||
891 | /* Initialize the one block cache */ | |
892 | void sm_cache_init(struct sm_ftl *ftl) | |
893 | { | |
894 | ftl->cache_data_invalid_bitmap = 0xFFFFFFFF; | |
895 | ftl->cache_clean = 1; | |
896 | ftl->cache_zone = -1; | |
897 | ftl->cache_block = -1; | |
898 | /*memset(ftl->cache_data, 0xAA, ftl->block_size);*/ | |
899 | } | |
900 | ||
901 | /* Put sector in one block cache */ | |
902 | void sm_cache_put(struct sm_ftl *ftl, char *buffer, int boffset) | |
903 | { | |
904 | memcpy(ftl->cache_data + boffset, buffer, SM_SECTOR_SIZE); | |
905 | clear_bit(boffset / SM_SECTOR_SIZE, &ftl->cache_data_invalid_bitmap); | |
906 | ftl->cache_clean = 0; | |
907 | } | |
908 | ||
909 | /* Read a sector from the cache */ | |
910 | int sm_cache_get(struct sm_ftl *ftl, char *buffer, int boffset) | |
911 | { | |
912 | if (test_bit(boffset / SM_SECTOR_SIZE, | |
913 | &ftl->cache_data_invalid_bitmap)) | |
914 | return -1; | |
915 | ||
916 | memcpy(buffer, ftl->cache_data + boffset, SM_SECTOR_SIZE); | |
917 | return 0; | |
918 | } | |
919 | ||
920 | /* Write the cache to hardware */ | |
921 | int sm_cache_flush(struct sm_ftl *ftl) | |
922 | { | |
923 | struct ftl_zone *zone; | |
924 | ||
925 | int sector_num; | |
926 | uint16_t write_sector; | |
927 | int zone_num = ftl->cache_zone; | |
928 | int block_num; | |
929 | ||
930 | if (ftl->cache_clean) | |
931 | return 0; | |
932 | ||
933 | if (ftl->unstable) | |
934 | return -EIO; | |
935 | ||
936 | BUG_ON(zone_num < 0); | |
937 | zone = &ftl->zones[zone_num]; | |
938 | block_num = zone->lba_to_phys_table[ftl->cache_block]; | |
939 | ||
940 | ||
941 | /* Try to read all unread areas of the cache block*/ | |
942 | for_each_bit(sector_num, &ftl->cache_data_invalid_bitmap, | |
943 | ftl->block_size / SM_SECTOR_SIZE) { | |
944 | ||
945 | if (!sm_read_sector(ftl, | |
946 | zone_num, block_num, sector_num * SM_SECTOR_SIZE, | |
947 | ftl->cache_data + sector_num * SM_SECTOR_SIZE, NULL)) | |
948 | clear_bit(sector_num, | |
949 | &ftl->cache_data_invalid_bitmap); | |
950 | } | |
951 | restart: | |
952 | ||
953 | if (ftl->unstable) | |
954 | return -EIO; | |
133fa8c7 ML |
955 | |
956 | /* If there are no spare blocks, */ | |
957 | /* we could still continue by erasing/writing the current block, | |
7d17c02a ML |
958 | but for such worn out media it doesn't worth the trouble, |
959 | and the dangers */ | |
133fa8c7 ML |
960 | if (kfifo_out(&zone->free_sectors, |
961 | (unsigned char *)&write_sector, 2) != 2) { | |
7d17c02a ML |
962 | dbg("no free sectors for write!"); |
963 | return -EIO; | |
964 | } | |
965 | ||
7d17c02a ML |
966 | |
967 | if (sm_write_block(ftl, ftl->cache_data, zone_num, write_sector, | |
968 | ftl->cache_block, ftl->cache_data_invalid_bitmap)) | |
969 | goto restart; | |
970 | ||
971 | /* Update the FTL table */ | |
972 | zone->lba_to_phys_table[ftl->cache_block] = write_sector; | |
973 | ||
974 | /* Write succesfull, so erase and free the old block */ | |
975 | if (block_num > 0) | |
976 | sm_erase_block(ftl, zone_num, block_num, 1); | |
977 | ||
978 | sm_cache_init(ftl); | |
979 | return 0; | |
980 | } | |
981 | ||
982 | ||
983 | /* flush timer, runs a second after last write */ | |
984 | static void sm_cache_flush_timer(unsigned long data) | |
985 | { | |
986 | struct sm_ftl *ftl = (struct sm_ftl *)data; | |
987 | queue_work(cache_flush_workqueue, &ftl->flush_work); | |
988 | } | |
989 | ||
990 | /* cache flush work, kicked by timer */ | |
991 | static void sm_cache_flush_work(struct work_struct *work) | |
992 | { | |
993 | struct sm_ftl *ftl = container_of(work, struct sm_ftl, flush_work); | |
994 | mutex_lock(&ftl->mutex); | |
995 | sm_cache_flush(ftl); | |
996 | mutex_unlock(&ftl->mutex); | |
997 | return; | |
998 | } | |
999 | ||
1000 | /* ---------------- outside interface -------------------------------------- */ | |
1001 | ||
1002 | /* outside interface: read a sector */ | |
1003 | static int sm_read(struct mtd_blktrans_dev *dev, | |
1004 | unsigned long sect_no, char *buf) | |
1005 | { | |
1006 | struct sm_ftl *ftl = dev->priv; | |
1007 | struct ftl_zone *zone; | |
1008 | int error = 0, in_cache = 0; | |
1009 | int zone_num, block, boffset; | |
1010 | ||
1011 | sm_break_offset(ftl, sect_no << 9, &zone_num, &block, &boffset); | |
1012 | mutex_lock(&ftl->mutex); | |
1013 | ||
1014 | ||
1015 | zone = sm_get_zone(ftl, zone_num); | |
1016 | if (IS_ERR(zone)) { | |
1017 | error = PTR_ERR(zone); | |
1018 | goto unlock; | |
1019 | } | |
1020 | ||
1021 | /* Have to look at cache first */ | |
1022 | if (ftl->cache_zone == zone_num && ftl->cache_block == block) { | |
1023 | in_cache = 1; | |
1024 | if (!sm_cache_get(ftl, buf, boffset)) | |
1025 | goto unlock; | |
1026 | } | |
1027 | ||
1028 | /* Translate the block and return if doesn't exist in the table */ | |
1029 | block = zone->lba_to_phys_table[block]; | |
1030 | ||
1031 | if (block == -1) { | |
1032 | memset(buf, 0xFF, SM_SECTOR_SIZE); | |
1033 | goto unlock; | |
1034 | } | |
1035 | ||
1036 | if (sm_read_sector(ftl, zone_num, block, boffset, buf, NULL)) { | |
1037 | error = -EIO; | |
1038 | goto unlock; | |
1039 | } | |
1040 | ||
1041 | if (in_cache) | |
1042 | sm_cache_put(ftl, buf, boffset); | |
1043 | unlock: | |
1044 | mutex_unlock(&ftl->mutex); | |
1045 | return error; | |
1046 | } | |
1047 | ||
1048 | /* outside interface: write a sector */ | |
1049 | static int sm_write(struct mtd_blktrans_dev *dev, | |
1050 | unsigned long sec_no, char *buf) | |
1051 | { | |
1052 | struct sm_ftl *ftl = dev->priv; | |
1053 | struct ftl_zone *zone; | |
1054 | int error, zone_num, block, boffset; | |
1055 | ||
1056 | BUG_ON(ftl->readonly); | |
1057 | sm_break_offset(ftl, sec_no << 9, &zone_num, &block, &boffset); | |
1058 | ||
1059 | /* No need in flush thread running now */ | |
1060 | del_timer(&ftl->timer); | |
1061 | mutex_lock(&ftl->mutex); | |
1062 | ||
1063 | zone = sm_get_zone(ftl, zone_num); | |
1064 | if (IS_ERR(zone)) { | |
1065 | error = PTR_ERR(zone); | |
1066 | goto unlock; | |
1067 | } | |
1068 | ||
1069 | /* If entry is not in cache, flush it */ | |
1070 | if (ftl->cache_block != block || ftl->cache_zone != zone_num) { | |
1071 | ||
1072 | error = sm_cache_flush(ftl); | |
1073 | if (error) | |
1074 | goto unlock; | |
1075 | ||
1076 | ftl->cache_block = block; | |
1077 | ftl->cache_zone = zone_num; | |
1078 | } | |
1079 | ||
1080 | sm_cache_put(ftl, buf, boffset); | |
1081 | unlock: | |
1082 | mod_timer(&ftl->timer, jiffies + msecs_to_jiffies(cache_timeout)); | |
1083 | mutex_unlock(&ftl->mutex); | |
1084 | return error; | |
1085 | } | |
1086 | ||
1087 | /* outside interface: flush everything */ | |
1088 | static int sm_flush(struct mtd_blktrans_dev *dev) | |
1089 | { | |
1090 | struct sm_ftl *ftl = dev->priv; | |
1091 | int retval; | |
1092 | ||
1093 | mutex_lock(&ftl->mutex); | |
1094 | retval = sm_cache_flush(ftl); | |
1095 | mutex_unlock(&ftl->mutex); | |
1096 | return retval; | |
1097 | } | |
1098 | ||
1099 | /* outside interface: device is released */ | |
1100 | static int sm_release(struct mtd_blktrans_dev *dev) | |
1101 | { | |
1102 | struct sm_ftl *ftl = dev->priv; | |
1103 | ||
1104 | mutex_lock(&ftl->mutex); | |
1105 | del_timer_sync(&ftl->timer); | |
1106 | cancel_work_sync(&ftl->flush_work); | |
1107 | sm_cache_flush(ftl); | |
1108 | mutex_unlock(&ftl->mutex); | |
1109 | return 0; | |
1110 | } | |
1111 | ||
1112 | /* outside interface: get geometry */ | |
1113 | static int sm_getgeo(struct mtd_blktrans_dev *dev, struct hd_geometry *geo) | |
1114 | { | |
1115 | struct sm_ftl *ftl = dev->priv; | |
1116 | geo->heads = ftl->heads; | |
1117 | geo->sectors = ftl->sectors; | |
1118 | geo->cylinders = ftl->cylinders; | |
1119 | return 0; | |
1120 | } | |
1121 | ||
1122 | /* external interface: main initialization function */ | |
1123 | static void sm_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd) | |
1124 | { | |
1125 | struct mtd_blktrans_dev *trans; | |
1126 | struct sm_ftl *ftl; | |
1127 | ||
1128 | /* Allocate & initialize our private structure */ | |
1129 | ftl = kzalloc(sizeof(struct sm_ftl), GFP_KERNEL); | |
1130 | if (!ftl) | |
1131 | goto error1; | |
1132 | ||
1133 | ||
1134 | mutex_init(&ftl->mutex); | |
1135 | setup_timer(&ftl->timer, sm_cache_flush_timer, (unsigned long)ftl); | |
1136 | INIT_WORK(&ftl->flush_work, sm_cache_flush_work); | |
1137 | init_completion(&ftl->erase_completion); | |
1138 | ||
1139 | /* Read media information */ | |
1140 | if (sm_get_media_info(ftl, mtd)) { | |
1141 | dbg("found unsupported mtd device, aborting"); | |
1142 | goto error2; | |
1143 | } | |
1144 | ||
1145 | ||
1146 | /* Allocate temporary CIS buffer for read retry support */ | |
1147 | ftl->cis_buffer = kzalloc(SM_SECTOR_SIZE, GFP_KERNEL); | |
1148 | if (!ftl->cis_buffer) | |
1149 | goto error2; | |
1150 | ||
1151 | /* Allocate zone array, it will be initialized on demand */ | |
1152 | ftl->zones = kzalloc(sizeof(struct ftl_zone) * ftl->zone_count, | |
1153 | GFP_KERNEL); | |
1154 | if (!ftl->zones) | |
1155 | goto error3; | |
1156 | ||
1157 | /* Allocate the cache*/ | |
1158 | ftl->cache_data = kzalloc(ftl->block_size, GFP_KERNEL); | |
1159 | ||
1160 | if (!ftl->cache_data) | |
1161 | goto error4; | |
1162 | ||
1163 | sm_cache_init(ftl); | |
1164 | ||
1165 | ||
1166 | /* Allocate upper layer structure and initialize it */ | |
1167 | trans = kzalloc(sizeof(struct mtd_blktrans_dev), GFP_KERNEL); | |
1168 | if (!trans) | |
1169 | goto error5; | |
1170 | ||
1171 | ftl->trans = trans; | |
1172 | trans->priv = ftl; | |
1173 | ||
1174 | trans->tr = tr; | |
1175 | trans->mtd = mtd; | |
1176 | trans->devnum = -1; | |
1177 | trans->size = (ftl->block_size * ftl->max_lba * ftl->zone_count) >> 9; | |
1178 | trans->readonly = ftl->readonly; | |
1179 | ||
1180 | if (sm_find_cis(ftl)) { | |
1181 | dbg("CIS not found on mtd device, aborting"); | |
1182 | goto error6; | |
1183 | } | |
1184 | ||
1185 | ftl->disk_attributes = sm_create_sysfs_attributes(ftl); | |
1186 | trans->disk_attributes = ftl->disk_attributes; | |
1187 | ||
1188 | sm_printk("Found %d MiB xD/SmartMedia FTL on mtd%d", | |
1189 | (int)(mtd->size / (1024 * 1024)), mtd->index); | |
1190 | ||
1191 | dbg("FTL layout:"); | |
1192 | dbg("%d zone(s), each consists of %d blocks (+%d spares)", | |
1193 | ftl->zone_count, ftl->max_lba, | |
1194 | ftl->zone_size - ftl->max_lba); | |
1195 | dbg("each block consists of %d bytes", | |
1196 | ftl->block_size); | |
1197 | ||
1198 | ||
1199 | /* Register device*/ | |
1200 | if (add_mtd_blktrans_dev(trans)) { | |
1201 | dbg("error in mtdblktrans layer"); | |
1202 | goto error6; | |
1203 | } | |
1204 | return; | |
1205 | error6: | |
1206 | kfree(trans); | |
1207 | error5: | |
1208 | kfree(ftl->cache_data); | |
1209 | error4: | |
1210 | kfree(ftl->zones); | |
1211 | error3: | |
1212 | kfree(ftl->cis_buffer); | |
1213 | error2: | |
1214 | kfree(ftl); | |
1215 | error1: | |
1216 | return; | |
1217 | } | |
1218 | ||
1219 | /* main interface: device {surprise,} removal */ | |
1220 | static void sm_remove_dev(struct mtd_blktrans_dev *dev) | |
1221 | { | |
1222 | struct sm_ftl *ftl = dev->priv; | |
1223 | int i; | |
1224 | ||
1225 | del_mtd_blktrans_dev(dev); | |
1226 | ftl->trans = NULL; | |
1227 | ||
1228 | for (i = 0 ; i < ftl->zone_count; i++) { | |
1229 | ||
1230 | if (!ftl->zones[i].initialized) | |
1231 | continue; | |
1232 | ||
1233 | kfree(ftl->zones[i].lba_to_phys_table); | |
1234 | kfifo_free(&ftl->zones[i].free_sectors); | |
1235 | } | |
1236 | ||
1237 | sm_delete_sysfs_attributes(ftl); | |
1238 | kfree(ftl->cis_buffer); | |
1239 | kfree(ftl->zones); | |
1240 | kfree(ftl->cache_data); | |
1241 | kfree(ftl); | |
1242 | } | |
1243 | ||
1244 | static struct mtd_blktrans_ops sm_ftl_ops = { | |
1245 | .name = "smblk", | |
1246 | .major = -1, | |
1247 | .part_bits = SM_FTL_PARTN_BITS, | |
1248 | .blksize = SM_SECTOR_SIZE, | |
1249 | .getgeo = sm_getgeo, | |
1250 | ||
1251 | .add_mtd = sm_add_mtd, | |
1252 | .remove_dev = sm_remove_dev, | |
1253 | ||
1254 | .readsect = sm_read, | |
1255 | .writesect = sm_write, | |
1256 | ||
1257 | .flush = sm_flush, | |
1258 | .release = sm_release, | |
1259 | ||
1260 | .owner = THIS_MODULE, | |
1261 | }; | |
1262 | ||
1263 | static __init int sm_module_init(void) | |
1264 | { | |
1265 | int error = 0; | |
1266 | cache_flush_workqueue = create_freezeable_workqueue("smflush"); | |
1267 | ||
1268 | if (IS_ERR(cache_flush_workqueue)) | |
1269 | return PTR_ERR(cache_flush_workqueue); | |
1270 | ||
1271 | error = register_mtd_blktrans(&sm_ftl_ops); | |
1272 | if (error) | |
1273 | destroy_workqueue(cache_flush_workqueue); | |
1274 | return error; | |
1275 | ||
1276 | } | |
1277 | ||
1278 | static void __exit sm_module_exit(void) | |
1279 | { | |
1280 | destroy_workqueue(cache_flush_workqueue); | |
1281 | deregister_mtd_blktrans(&sm_ftl_ops); | |
1282 | } | |
1283 | ||
1284 | module_init(sm_module_init); | |
1285 | module_exit(sm_module_exit); | |
1286 | ||
1287 | MODULE_LICENSE("GPL"); | |
1288 | MODULE_AUTHOR("Maxim Levitsky <maximlevitsky@gmail.com>"); | |
1289 | MODULE_DESCRIPTION("Smartmedia/xD mtd translation layer"); |