]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - drivers/mtd/mtdchar.c
[MTD] NAND: Fix oob available calculation
[mirror_ubuntu-hirsute-kernel.git] / drivers / mtd / mtdchar.c
CommitLineData
1da177e4 1/*
045e9a5d 2 * $Id: mtdchar.c,v 1.68 2005/02/08 19:12:50 nico Exp $
1da177e4
LT
3 *
4 * Character-device access to raw MTD devices.
5 *
6 */
7
8#include <linux/config.h>
9#include <linux/kernel.h>
10#include <linux/module.h>
11#include <linux/mtd/mtd.h>
12#include <linux/mtd/compatmac.h>
13#include <linux/slab.h>
14#include <linux/init.h>
15#include <linux/fs.h>
16#include <asm/uaccess.h>
17
18#ifdef CONFIG_DEVFS_FS
19#include <linux/devfs_fs_kernel.h>
20
21static void mtd_notify_add(struct mtd_info* mtd)
22{
23 if (!mtd)
24 return;
25
26 devfs_mk_cdev(MKDEV(MTD_CHAR_MAJOR, mtd->index*2),
27 S_IFCHR | S_IRUGO | S_IWUGO, "mtd/%d", mtd->index);
28
29 devfs_mk_cdev(MKDEV(MTD_CHAR_MAJOR, mtd->index*2+1),
30 S_IFCHR | S_IRUGO, "mtd/%dro", mtd->index);
31}
32
33static void mtd_notify_remove(struct mtd_info* mtd)
34{
35 if (!mtd)
36 return;
37 devfs_remove("mtd/%d", mtd->index);
38 devfs_remove("mtd/%dro", mtd->index);
39}
40
41static struct mtd_notifier notifier = {
42 .add = mtd_notify_add,
43 .remove = mtd_notify_remove,
44};
45
46static inline void mtdchar_devfs_init(void)
47{
48 devfs_mk_dir("mtd");
49 register_mtd_user(&notifier);
50}
51
52static inline void mtdchar_devfs_exit(void)
53{
54 unregister_mtd_user(&notifier);
55 devfs_remove("mtd");
56}
57#else /* !DEVFS */
58#define mtdchar_devfs_init() do { } while(0)
59#define mtdchar_devfs_exit() do { } while(0)
60#endif
61
045e9a5d
NP
62/*
63 * We use file->private_data to store a pointer to the MTDdevice.
64 * Since alighment is at least 32 bits, we have 2 bits free for OTP
65 * modes as well.
66 */
67
68#define TO_MTD(file) (struct mtd_info *)((long)((file)->private_data) & ~3L)
31f4233b 69
045e9a5d
NP
70#define MTD_MODE_OTP_FACT 1
71#define MTD_MODE_OTP_USER 2
72#define MTD_MODE(file) ((long)((file)->private_data) & 3)
73
74#define SET_MTD_MODE(file, mode) \
75 do { long __p = (long)((file)->private_data); \
76 (file)->private_data = (void *)((__p & ~3L) | mode); } while (0)
31f4233b 77
1da177e4
LT
78static loff_t mtd_lseek (struct file *file, loff_t offset, int orig)
79{
045e9a5d 80 struct mtd_info *mtd = TO_MTD(file);
1da177e4
LT
81
82 switch (orig) {
83 case 0:
84 /* SEEK_SET */
85 file->f_pos = offset;
86 break;
87 case 1:
88 /* SEEK_CUR */
89 file->f_pos += offset;
90 break;
91 case 2:
92 /* SEEK_END */
93 file->f_pos =mtd->size + offset;
94 break;
95 default:
96 return -EINVAL;
97 }
98
99 if (file->f_pos < 0)
100 file->f_pos = 0;
101 else if (file->f_pos >= mtd->size)
102 file->f_pos = mtd->size - 1;
103
104 return file->f_pos;
105}
106
107
108
109static int mtd_open(struct inode *inode, struct file *file)
110{
111 int minor = iminor(inode);
112 int devnum = minor >> 1;
113 struct mtd_info *mtd;
114
115 DEBUG(MTD_DEBUG_LEVEL0, "MTD_open\n");
116
117 if (devnum >= MAX_MTD_DEVICES)
118 return -ENODEV;
119
120 /* You can't open the RO devices RW */
121 if ((file->f_mode & 2) && (minor & 1))
122 return -EACCES;
123
124 mtd = get_mtd_device(NULL, devnum);
125
126 if (!mtd)
127 return -ENODEV;
128
129 if (MTD_ABSENT == mtd->type) {
130 put_mtd_device(mtd);
131 return -ENODEV;
132 }
133
134 file->private_data = mtd;
135
136 /* You can't open it RW if it's not a writeable device */
137 if ((file->f_mode & 2) && !(mtd->flags & MTD_WRITEABLE)) {
138 put_mtd_device(mtd);
139 return -EACCES;
140 }
141
142 return 0;
143} /* mtd_open */
144
145/*====================================================================*/
146
147static int mtd_close(struct inode *inode, struct file *file)
148{
149 struct mtd_info *mtd;
150
151 DEBUG(MTD_DEBUG_LEVEL0, "MTD_close\n");
152
045e9a5d 153 mtd = TO_MTD(file);
1da177e4
LT
154
155 if (mtd->sync)
156 mtd->sync(mtd);
157
158 put_mtd_device(mtd);
159
160 return 0;
161} /* mtd_close */
162
163/* FIXME: This _really_ needs to die. In 2.5, we should lock the
164 userspace buffer down and use it directly with readv/writev.
165*/
166#define MAX_KMALLOC_SIZE 0x20000
167
168static ssize_t mtd_read(struct file *file, char __user *buf, size_t count,loff_t *ppos)
169{
045e9a5d 170 struct mtd_info *mtd = TO_MTD(file);
1da177e4
LT
171 size_t retlen=0;
172 size_t total_retlen=0;
173 int ret=0;
174 int len;
175 char *kbuf;
176
177 DEBUG(MTD_DEBUG_LEVEL0,"MTD_read\n");
178
179 if (*ppos + count > mtd->size)
180 count = mtd->size - *ppos;
181
182 if (!count)
183 return 0;
184
185 /* FIXME: Use kiovec in 2.5 to lock down the user's buffers
186 and pass them directly to the MTD functions */
187 while (count) {
188 if (count > MAX_KMALLOC_SIZE)
189 len = MAX_KMALLOC_SIZE;
190 else
191 len = count;
192
193 kbuf=kmalloc(len,GFP_KERNEL);
194 if (!kbuf)
195 return -ENOMEM;
196
045e9a5d 197 switch (MTD_MODE(file)) {
31f4233b
NP
198 case MTD_MODE_OTP_FACT:
199 ret = mtd->read_fact_prot_reg(mtd, *ppos, len, &retlen, kbuf);
200 break;
201 case MTD_MODE_OTP_USER:
202 ret = mtd->read_user_prot_reg(mtd, *ppos, len, &retlen, kbuf);
203 break;
204 default:
205 ret = MTD_READ(mtd, *ppos, len, &retlen, kbuf);
206 }
1da177e4
LT
207 /* Nand returns -EBADMSG on ecc errors, but it returns
208 * the data. For our userspace tools it is important
209 * to dump areas with ecc errors !
210 * Userspace software which accesses NAND this way
211 * must be aware of the fact that it deals with NAND
212 */
213 if (!ret || (ret == -EBADMSG)) {
214 *ppos += retlen;
215 if (copy_to_user(buf, kbuf, retlen)) {
216 kfree(kbuf);
217 return -EFAULT;
218 }
219 else
220 total_retlen += retlen;
221
222 count -= retlen;
223 buf += retlen;
31f4233b
NP
224 if (retlen == 0)
225 count = 0;
1da177e4
LT
226 }
227 else {
228 kfree(kbuf);
229 return ret;
230 }
231
232 kfree(kbuf);
233 }
234
235 return total_retlen;
236} /* mtd_read */
237
238static ssize_t mtd_write(struct file *file, const char __user *buf, size_t count,loff_t *ppos)
239{
045e9a5d 240 struct mtd_info *mtd = TO_MTD(file);
1da177e4
LT
241 char *kbuf;
242 size_t retlen;
243 size_t total_retlen=0;
244 int ret=0;
245 int len;
246
247 DEBUG(MTD_DEBUG_LEVEL0,"MTD_write\n");
248
249 if (*ppos == mtd->size)
250 return -ENOSPC;
251
252 if (*ppos + count > mtd->size)
253 count = mtd->size - *ppos;
254
255 if (!count)
256 return 0;
257
258 while (count) {
259 if (count > MAX_KMALLOC_SIZE)
260 len = MAX_KMALLOC_SIZE;
261 else
262 len = count;
263
264 kbuf=kmalloc(len,GFP_KERNEL);
265 if (!kbuf) {
266 printk("kmalloc is null\n");
267 return -ENOMEM;
268 }
269
270 if (copy_from_user(kbuf, buf, len)) {
271 kfree(kbuf);
272 return -EFAULT;
273 }
274
045e9a5d 275 switch (MTD_MODE(file)) {
31f4233b
NP
276 case MTD_MODE_OTP_FACT:
277 ret = -EROFS;
278 break;
279 case MTD_MODE_OTP_USER:
280 if (!mtd->write_user_prot_reg) {
281 ret = -EOPNOTSUPP;
282 break;
283 }
284 ret = mtd->write_user_prot_reg(mtd, *ppos, len, &retlen, kbuf);
285 break;
286 default:
287 ret = (*(mtd->write))(mtd, *ppos, len, &retlen, kbuf);
288 }
1da177e4
LT
289 if (!ret) {
290 *ppos += retlen;
291 total_retlen += retlen;
292 count -= retlen;
293 buf += retlen;
294 }
295 else {
296 kfree(kbuf);
297 return ret;
298 }
299
300 kfree(kbuf);
301 }
302
303 return total_retlen;
304} /* mtd_write */
305
306/*======================================================================
307
308 IOCTL calls for getting device parameters.
309
310======================================================================*/
311static void mtdchar_erase_callback (struct erase_info *instr)
312{
313 wake_up((wait_queue_head_t *)instr->priv);
314}
315
316static int mtd_ioctl(struct inode *inode, struct file *file,
317 u_int cmd, u_long arg)
318{
045e9a5d 319 struct mtd_info *mtd = TO_MTD(file);
1da177e4
LT
320 void __user *argp = (void __user *)arg;
321 int ret = 0;
322 u_long size;
323
324 DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n");
325
326 size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
327 if (cmd & IOC_IN) {
328 if (!access_ok(VERIFY_READ, argp, size))
329 return -EFAULT;
330 }
331 if (cmd & IOC_OUT) {
332 if (!access_ok(VERIFY_WRITE, argp, size))
333 return -EFAULT;
334 }
335
336 switch (cmd) {
337 case MEMGETREGIONCOUNT:
338 if (copy_to_user(argp, &(mtd->numeraseregions), sizeof(int)))
339 return -EFAULT;
340 break;
341
342 case MEMGETREGIONINFO:
343 {
344 struct region_info_user ur;
345
346 if (copy_from_user(&ur, argp, sizeof(struct region_info_user)))
347 return -EFAULT;
348
349 if (ur.regionindex >= mtd->numeraseregions)
350 return -EINVAL;
351 if (copy_to_user(argp, &(mtd->eraseregions[ur.regionindex]),
352 sizeof(struct mtd_erase_region_info)))
353 return -EFAULT;
354 break;
355 }
356
357 case MEMGETINFO:
358 if (copy_to_user(argp, mtd, sizeof(struct mtd_info_user)))
359 return -EFAULT;
360 break;
361
362 case MEMERASE:
363 {
364 struct erase_info *erase;
365
366 if(!(file->f_mode & 2))
367 return -EPERM;
368
369 erase=kmalloc(sizeof(struct erase_info),GFP_KERNEL);
370 if (!erase)
371 ret = -ENOMEM;
372 else {
373 wait_queue_head_t waitq;
374 DECLARE_WAITQUEUE(wait, current);
375
376 init_waitqueue_head(&waitq);
377
378 memset (erase,0,sizeof(struct erase_info));
379 if (copy_from_user(&erase->addr, argp,
380 sizeof(struct erase_info_user))) {
381 kfree(erase);
382 return -EFAULT;
383 }
384 erase->mtd = mtd;
385 erase->callback = mtdchar_erase_callback;
386 erase->priv = (unsigned long)&waitq;
387
388 /*
389 FIXME: Allow INTERRUPTIBLE. Which means
390 not having the wait_queue head on the stack.
391
392 If the wq_head is on the stack, and we
393 leave because we got interrupted, then the
394 wq_head is no longer there when the
395 callback routine tries to wake us up.
396 */
397 ret = mtd->erase(mtd, erase);
398 if (!ret) {
399 set_current_state(TASK_UNINTERRUPTIBLE);
400 add_wait_queue(&waitq, &wait);
401 if (erase->state != MTD_ERASE_DONE &&
402 erase->state != MTD_ERASE_FAILED)
403 schedule();
404 remove_wait_queue(&waitq, &wait);
405 set_current_state(TASK_RUNNING);
406
407 ret = (erase->state == MTD_ERASE_FAILED)?-EIO:0;
408 }
409 kfree(erase);
410 }
411 break;
412 }
413
414 case MEMWRITEOOB:
415 {
416 struct mtd_oob_buf buf;
417 void *databuf;
418 ssize_t retlen;
419
420 if(!(file->f_mode & 2))
421 return -EPERM;
422
423 if (copy_from_user(&buf, argp, sizeof(struct mtd_oob_buf)))
424 return -EFAULT;
425
426 if (buf.length > 0x4096)
427 return -EINVAL;
428
429 if (!mtd->write_oob)
430 ret = -EOPNOTSUPP;
431 else
432 ret = access_ok(VERIFY_READ, buf.ptr,
433 buf.length) ? 0 : EFAULT;
434
435 if (ret)
436 return ret;
437
438 databuf = kmalloc(buf.length, GFP_KERNEL);
439 if (!databuf)
440 return -ENOMEM;
441
442 if (copy_from_user(databuf, buf.ptr, buf.length)) {
443 kfree(databuf);
444 return -EFAULT;
445 }
446
447 ret = (mtd->write_oob)(mtd, buf.start, buf.length, &retlen, databuf);
448
449 if (copy_to_user(argp + sizeof(uint32_t), &retlen, sizeof(uint32_t)))
450 ret = -EFAULT;
451
452 kfree(databuf);
453 break;
454
455 }
456
457 case MEMREADOOB:
458 {
459 struct mtd_oob_buf buf;
460 void *databuf;
461 ssize_t retlen;
462
463 if (copy_from_user(&buf, argp, sizeof(struct mtd_oob_buf)))
464 return -EFAULT;
465
466 if (buf.length > 0x4096)
467 return -EINVAL;
468
469 if (!mtd->read_oob)
470 ret = -EOPNOTSUPP;
471 else
472 ret = access_ok(VERIFY_WRITE, buf.ptr,
473 buf.length) ? 0 : -EFAULT;
474
475 if (ret)
476 return ret;
477
478 databuf = kmalloc(buf.length, GFP_KERNEL);
479 if (!databuf)
480 return -ENOMEM;
481
482 ret = (mtd->read_oob)(mtd, buf.start, buf.length, &retlen, databuf);
483
484 if (put_user(retlen, (uint32_t __user *)argp))
485 ret = -EFAULT;
486 else if (retlen && copy_to_user(buf.ptr, databuf, retlen))
487 ret = -EFAULT;
488
489 kfree(databuf);
490 break;
491 }
492
493 case MEMLOCK:
494 {
495 struct erase_info_user info;
496
497 if (copy_from_user(&info, argp, sizeof(info)))
498 return -EFAULT;
499
500 if (!mtd->lock)
501 ret = -EOPNOTSUPP;
502 else
503 ret = mtd->lock(mtd, info.start, info.length);
504 break;
505 }
506
507 case MEMUNLOCK:
508 {
509 struct erase_info_user info;
510
511 if (copy_from_user(&info, argp, sizeof(info)))
512 return -EFAULT;
513
514 if (!mtd->unlock)
515 ret = -EOPNOTSUPP;
516 else
517 ret = mtd->unlock(mtd, info.start, info.length);
518 break;
519 }
520
521 case MEMSETOOBSEL:
522 {
523 if (copy_from_user(&mtd->oobinfo, argp, sizeof(struct nand_oobinfo)))
524 return -EFAULT;
525 break;
526 }
527
528 case MEMGETOOBSEL:
529 {
530 if (copy_to_user(argp, &(mtd->oobinfo), sizeof(struct nand_oobinfo)))
531 return -EFAULT;
532 break;
533 }
534
535 case MEMGETBADBLOCK:
536 {
537 loff_t offs;
538
539 if (copy_from_user(&offs, argp, sizeof(loff_t)))
540 return -EFAULT;
541 if (!mtd->block_isbad)
542 ret = -EOPNOTSUPP;
543 else
544 return mtd->block_isbad(mtd, offs);
545 break;
546 }
547
548 case MEMSETBADBLOCK:
549 {
550 loff_t offs;
551
552 if (copy_from_user(&offs, argp, sizeof(loff_t)))
553 return -EFAULT;
554 if (!mtd->block_markbad)
555 ret = -EOPNOTSUPP;
556 else
557 return mtd->block_markbad(mtd, offs);
558 break;
559 }
560
31f4233b
NP
561#ifdef CONFIG_MTD_OTP
562 case OTPSELECT:
563 {
564 int mode;
565 if (copy_from_user(&mode, argp, sizeof(int)))
566 return -EFAULT;
045e9a5d 567 SET_MTD_MODE(file, 0);
31f4233b
NP
568 switch (mode) {
569 case MTD_OTP_FACTORY:
570 if (!mtd->read_fact_prot_reg)
571 ret = -EOPNOTSUPP;
572 else
045e9a5d 573 SET_MTD_MODE(file, MTD_MODE_OTP_FACT);
31f4233b
NP
574 break;
575 case MTD_OTP_USER:
576 if (!mtd->read_fact_prot_reg)
577 ret = -EOPNOTSUPP;
578 else
045e9a5d 579 SET_MTD_MODE(file, MTD_MODE_OTP_USER);
31f4233b
NP
580 break;
581 default:
582 ret = -EINVAL;
583 case MTD_OTP_OFF:
584 break;
585 }
586 break;
587 }
588
589 case OTPGETREGIONCOUNT:
590 case OTPGETREGIONINFO:
591 {
592 struct otp_info *buf = kmalloc(4096, GFP_KERNEL);
593 if (!buf)
594 return -ENOMEM;
595 ret = -EOPNOTSUPP;
045e9a5d 596 switch (MTD_MODE(file)) {
31f4233b
NP
597 case MTD_MODE_OTP_FACT:
598 if (mtd->get_fact_prot_info)
599 ret = mtd->get_fact_prot_info(mtd, buf, 4096);
600 break;
601 case MTD_MODE_OTP_USER:
602 if (mtd->get_user_prot_info)
603 ret = mtd->get_user_prot_info(mtd, buf, 4096);
604 break;
605 }
606 if (ret >= 0) {
607 if (cmd == OTPGETREGIONCOUNT) {
608 int nbr = ret / sizeof(struct otp_info);
609 ret = copy_to_user(argp, &nbr, sizeof(int));
610 } else
611 ret = copy_to_user(argp, buf, ret);
612 if (ret)
613 ret = -EFAULT;
614 }
615 kfree(buf);
616 break;
617 }
618
619 case OTPLOCK:
620 {
621 struct otp_info info;
622
045e9a5d 623 if (MTD_MODE(file) != MTD_MODE_OTP_USER)
31f4233b
NP
624 return -EINVAL;
625 if (copy_from_user(&info, argp, sizeof(info)))
626 return -EFAULT;
627 if (!mtd->lock_user_prot_reg)
628 return -EOPNOTSUPP;
629 ret = mtd->lock_user_prot_reg(mtd, info.start, info.length);
630 break;
631 }
632#endif
633
1da177e4
LT
634 default:
635 ret = -ENOTTY;
636 }
637
638 return ret;
639} /* memory_ioctl */
640
641static struct file_operations mtd_fops = {
642 .owner = THIS_MODULE,
643 .llseek = mtd_lseek,
644 .read = mtd_read,
645 .write = mtd_write,
646 .ioctl = mtd_ioctl,
647 .open = mtd_open,
648 .release = mtd_close,
649};
650
651static int __init init_mtdchar(void)
652{
653 if (register_chrdev(MTD_CHAR_MAJOR, "mtd", &mtd_fops)) {
654 printk(KERN_NOTICE "Can't allocate major number %d for Memory Technology Devices.\n",
655 MTD_CHAR_MAJOR);
656 return -EAGAIN;
657 }
658
659 mtdchar_devfs_init();
660 return 0;
661}
662
663static void __exit cleanup_mtdchar(void)
664{
665 mtdchar_devfs_exit();
666 unregister_chrdev(MTD_CHAR_MAJOR, "mtd");
667}
668
669module_init(init_mtdchar);
670module_exit(cleanup_mtdchar);
671
672
673MODULE_LICENSE("GPL");
674MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
675MODULE_DESCRIPTION("Direct character-device access to MTD devices");