]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - drivers/mtd/mtdchar.c
[PATCH] NAND: fix remaining OOB length calculation
[mirror_ubuntu-hirsute-kernel.git] / drivers / mtd / mtdchar.c
CommitLineData
1da177e4 1/*
97894cda 2 * $Id: mtdchar.c,v 1.76 2005/11/07 11:14:20 gleixner Exp $
1da177e4
LT
3 *
4 * Character-device access to raw MTD devices.
5 *
6 */
7
8#include <linux/config.h>
15fdc52f
TG
9#include <linux/device.h>
10#include <linux/fs.h>
11#include <linux/init.h>
1da177e4
LT
12#include <linux/kernel.h>
13#include <linux/module.h>
15fdc52f
TG
14#include <linux/slab.h>
15#include <linux/sched.h>
16
1da177e4
LT
17#include <linux/mtd/mtd.h>
18#include <linux/mtd/compatmac.h>
1da177e4 19
15fdc52f 20#include <asm/uaccess.h>
9bc7b387
TP
21
22static struct class *mtd_class;
1da177e4
LT
23
24static void mtd_notify_add(struct mtd_info* mtd)
25{
26 if (!mtd)
27 return;
28
53f46542 29 class_device_create(mtd_class, NULL, MKDEV(MTD_CHAR_MAJOR, mtd->index*2),
9bc7b387 30 NULL, "mtd%d", mtd->index);
97894cda 31
53f46542 32 class_device_create(mtd_class, NULL,
9bc7b387
TP
33 MKDEV(MTD_CHAR_MAJOR, mtd->index*2+1),
34 NULL, "mtd%dro", mtd->index);
1da177e4
LT
35}
36
37static void mtd_notify_remove(struct mtd_info* mtd)
38{
39 if (!mtd)
40 return;
9bc7b387
TP
41
42 class_device_destroy(mtd_class, MKDEV(MTD_CHAR_MAJOR, mtd->index*2));
43 class_device_destroy(mtd_class, MKDEV(MTD_CHAR_MAJOR, mtd->index*2+1));
1da177e4
LT
44}
45
46static struct mtd_notifier notifier = {
47 .add = mtd_notify_add,
48 .remove = mtd_notify_remove,
49};
50
045e9a5d 51/*
f1a28c02
TG
52 * Data structure to hold the pointer to the mtd device as well
53 * as mode information ofr various use cases.
045e9a5d 54 */
f1a28c02
TG
55struct mtd_file_info {
56 struct mtd_info *mtd;
57 enum mtd_file_modes mode;
58};
31f4233b 59
1da177e4
LT
60static loff_t mtd_lseek (struct file *file, loff_t offset, int orig)
61{
f1a28c02
TG
62 struct mtd_file_info *mfi = file->private_data;
63 struct mtd_info *mtd = mfi->mtd;
1da177e4
LT
64
65 switch (orig) {
66 case 0:
67 /* SEEK_SET */
1da177e4
LT
68 break;
69 case 1:
70 /* SEEK_CUR */
8b491d75 71 offset += file->f_pos;
1da177e4
LT
72 break;
73 case 2:
74 /* SEEK_END */
8b491d75 75 offset += mtd->size;
1da177e4
LT
76 break;
77 default:
78 return -EINVAL;
79 }
80
8b491d75
TP
81 if (offset >= 0 && offset < mtd->size)
82 return file->f_pos = offset;
1da177e4 83
8b491d75 84 return -EINVAL;
1da177e4
LT
85}
86
87
88
89static int mtd_open(struct inode *inode, struct file *file)
90{
91 int minor = iminor(inode);
92 int devnum = minor >> 1;
93 struct mtd_info *mtd;
f1a28c02 94 struct mtd_file_info *mfi;
1da177e4
LT
95
96 DEBUG(MTD_DEBUG_LEVEL0, "MTD_open\n");
97
98 if (devnum >= MAX_MTD_DEVICES)
99 return -ENODEV;
100
101 /* You can't open the RO devices RW */
102 if ((file->f_mode & 2) && (minor & 1))
103 return -EACCES;
104
105 mtd = get_mtd_device(NULL, devnum);
97894cda 106
1da177e4
LT
107 if (!mtd)
108 return -ENODEV;
97894cda 109
1da177e4
LT
110 if (MTD_ABSENT == mtd->type) {
111 put_mtd_device(mtd);
112 return -ENODEV;
113 }
114
1da177e4
LT
115 /* You can't open it RW if it's not a writeable device */
116 if ((file->f_mode & 2) && !(mtd->flags & MTD_WRITEABLE)) {
117 put_mtd_device(mtd);
118 return -EACCES;
119 }
97894cda 120
f1a28c02
TG
121 mfi = kzalloc(sizeof(*mfi), GFP_KERNEL);
122 if (!mfi) {
123 put_mtd_device(mtd);
124 return -ENOMEM;
125 }
126 mfi->mtd = mtd;
127 file->private_data = mfi;
128
1da177e4
LT
129 return 0;
130} /* mtd_open */
131
132/*====================================================================*/
133
134static int mtd_close(struct inode *inode, struct file *file)
135{
f1a28c02
TG
136 struct mtd_file_info *mfi = file->private_data;
137 struct mtd_info *mtd = mfi->mtd;
1da177e4
LT
138
139 DEBUG(MTD_DEBUG_LEVEL0, "MTD_close\n");
140
1da177e4
LT
141 if (mtd->sync)
142 mtd->sync(mtd);
97894cda 143
1da177e4 144 put_mtd_device(mtd);
f1a28c02
TG
145 file->private_data = NULL;
146 kfree(mfi);
1da177e4
LT
147
148 return 0;
149} /* mtd_close */
150
151/* FIXME: This _really_ needs to die. In 2.5, we should lock the
152 userspace buffer down and use it directly with readv/writev.
153*/
154#define MAX_KMALLOC_SIZE 0x20000
155
156static ssize_t mtd_read(struct file *file, char __user *buf, size_t count,loff_t *ppos)
157{
f1a28c02
TG
158 struct mtd_file_info *mfi = file->private_data;
159 struct mtd_info *mtd = mfi->mtd;
1da177e4
LT
160 size_t retlen=0;
161 size_t total_retlen=0;
162 int ret=0;
163 int len;
164 char *kbuf;
97894cda 165
1da177e4
LT
166 DEBUG(MTD_DEBUG_LEVEL0,"MTD_read\n");
167
168 if (*ppos + count > mtd->size)
169 count = mtd->size - *ppos;
170
171 if (!count)
172 return 0;
97894cda 173
1da177e4
LT
174 /* FIXME: Use kiovec in 2.5 to lock down the user's buffers
175 and pass them directly to the MTD functions */
b802c074
TG
176
177 if (count > MAX_KMALLOC_SIZE)
178 kbuf=kmalloc(MAX_KMALLOC_SIZE, GFP_KERNEL);
179 else
180 kbuf=kmalloc(count, GFP_KERNEL);
181
182 if (!kbuf)
183 return -ENOMEM;
184
1da177e4 185 while (count) {
b802c074 186
97894cda 187 if (count > MAX_KMALLOC_SIZE)
1da177e4
LT
188 len = MAX_KMALLOC_SIZE;
189 else
190 len = count;
191
f1a28c02
TG
192 switch (mfi->mode) {
193 case MTD_MODE_OTP_FACTORY:
31f4233b
NP
194 ret = mtd->read_fact_prot_reg(mtd, *ppos, len, &retlen, kbuf);
195 break;
196 case MTD_MODE_OTP_USER:
197 ret = mtd->read_user_prot_reg(mtd, *ppos, len, &retlen, kbuf);
198 break;
f1a28c02
TG
199 case MTD_MODE_RAW:
200 {
201 struct mtd_oob_ops ops;
202
203 ops.mode = MTD_OOB_RAW;
204 ops.datbuf = kbuf;
205 ops.oobbuf = NULL;
206 ops.len = len;
207
208 ret = mtd->read_oob(mtd, *ppos, &ops);
209 retlen = ops.retlen;
210 break;
211 }
31f4233b 212 default:
f4a43cfc 213 ret = mtd->read(mtd, *ppos, len, &retlen, kbuf);
31f4233b 214 }
1da177e4
LT
215 /* Nand returns -EBADMSG on ecc errors, but it returns
216 * the data. For our userspace tools it is important
97894cda 217 * to dump areas with ecc errors !
9a1fcdfd
TG
218 * For kernel internal usage it also might return -EUCLEAN
219 * to signal the caller that a bitflip has occured and has
220 * been corrected by the ECC algorithm.
1da177e4
LT
221 * Userspace software which accesses NAND this way
222 * must be aware of the fact that it deals with NAND
223 */
9a1fcdfd 224 if (!ret || (ret == -EUCLEAN) || (ret == -EBADMSG)) {
1da177e4
LT
225 *ppos += retlen;
226 if (copy_to_user(buf, kbuf, retlen)) {
f4a43cfc 227 kfree(kbuf);
1da177e4
LT
228 return -EFAULT;
229 }
230 else
231 total_retlen += retlen;
232
233 count -= retlen;
234 buf += retlen;
31f4233b
NP
235 if (retlen == 0)
236 count = 0;
1da177e4
LT
237 }
238 else {
239 kfree(kbuf);
240 return ret;
241 }
97894cda 242
1da177e4
LT
243 }
244
b802c074 245 kfree(kbuf);
1da177e4
LT
246 return total_retlen;
247} /* mtd_read */
248
249static ssize_t mtd_write(struct file *file, const char __user *buf, size_t count,loff_t *ppos)
250{
f1a28c02
TG
251 struct mtd_file_info *mfi = file->private_data;
252 struct mtd_info *mtd = mfi->mtd;
1da177e4
LT
253 char *kbuf;
254 size_t retlen;
255 size_t total_retlen=0;
256 int ret=0;
257 int len;
258
259 DEBUG(MTD_DEBUG_LEVEL0,"MTD_write\n");
97894cda 260
1da177e4
LT
261 if (*ppos == mtd->size)
262 return -ENOSPC;
97894cda 263
1da177e4
LT
264 if (*ppos + count > mtd->size)
265 count = mtd->size - *ppos;
266
267 if (!count)
268 return 0;
269
b802c074
TG
270 if (count > MAX_KMALLOC_SIZE)
271 kbuf=kmalloc(MAX_KMALLOC_SIZE, GFP_KERNEL);
272 else
273 kbuf=kmalloc(count, GFP_KERNEL);
274
275 if (!kbuf)
276 return -ENOMEM;
277
1da177e4 278 while (count) {
b802c074 279
97894cda 280 if (count > MAX_KMALLOC_SIZE)
1da177e4
LT
281 len = MAX_KMALLOC_SIZE;
282 else
283 len = count;
284
1da177e4
LT
285 if (copy_from_user(kbuf, buf, len)) {
286 kfree(kbuf);
287 return -EFAULT;
288 }
97894cda 289
f1a28c02
TG
290 switch (mfi->mode) {
291 case MTD_MODE_OTP_FACTORY:
31f4233b
NP
292 ret = -EROFS;
293 break;
294 case MTD_MODE_OTP_USER:
295 if (!mtd->write_user_prot_reg) {
296 ret = -EOPNOTSUPP;
297 break;
298 }
299 ret = mtd->write_user_prot_reg(mtd, *ppos, len, &retlen, kbuf);
300 break;
f1a28c02
TG
301
302 case MTD_MODE_RAW:
303 {
304 struct mtd_oob_ops ops;
305
306 ops.mode = MTD_OOB_RAW;
307 ops.datbuf = kbuf;
308 ops.oobbuf = NULL;
309 ops.len = len;
310
311 ret = mtd->write_oob(mtd, *ppos, &ops);
312 retlen = ops.retlen;
313 break;
314 }
315
31f4233b
NP
316 default:
317 ret = (*(mtd->write))(mtd, *ppos, len, &retlen, kbuf);
318 }
1da177e4
LT
319 if (!ret) {
320 *ppos += retlen;
321 total_retlen += retlen;
322 count -= retlen;
323 buf += retlen;
324 }
325 else {
326 kfree(kbuf);
327 return ret;
328 }
1da177e4
LT
329 }
330
b802c074 331 kfree(kbuf);
1da177e4
LT
332 return total_retlen;
333} /* mtd_write */
334
335/*======================================================================
336
337 IOCTL calls for getting device parameters.
338
339======================================================================*/
340static void mtdchar_erase_callback (struct erase_info *instr)
341{
342 wake_up((wait_queue_head_t *)instr->priv);
343}
344
f1a28c02
TG
345#if defined(CONFIG_MTD_OTP) || defined(CONFIG_MTD_ONENAND_OTP)
346static int otp_select_filemode(struct mtd_file_info *mfi, int mode)
347{
348 struct mtd_info *mtd = mfi->mtd;
349 int ret = 0;
350
351 switch (mode) {
352 case MTD_OTP_FACTORY:
353 if (!mtd->read_fact_prot_reg)
354 ret = -EOPNOTSUPP;
355 else
356 mfi->mode = MTD_MODE_OTP_FACTORY;
357 break;
358 case MTD_OTP_USER:
359 if (!mtd->read_fact_prot_reg)
360 ret = -EOPNOTSUPP;
361 else
362 mfi->mode = MTD_MODE_OTP_USER;
363 break;
364 default:
365 ret = -EINVAL;
366 case MTD_OTP_OFF:
367 break;
368 }
369 return ret;
370}
371#else
372# define otp_select_filemode(f,m) -EOPNOTSUPP
373#endif
374
1da177e4
LT
375static int mtd_ioctl(struct inode *inode, struct file *file,
376 u_int cmd, u_long arg)
377{
f1a28c02
TG
378 struct mtd_file_info *mfi = file->private_data;
379 struct mtd_info *mtd = mfi->mtd;
1da177e4
LT
380 void __user *argp = (void __user *)arg;
381 int ret = 0;
382 u_long size;
73c619ea 383 struct mtd_info_user info;
97894cda 384
1da177e4
LT
385 DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n");
386
387 size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
388 if (cmd & IOC_IN) {
389 if (!access_ok(VERIFY_READ, argp, size))
390 return -EFAULT;
391 }
392 if (cmd & IOC_OUT) {
393 if (!access_ok(VERIFY_WRITE, argp, size))
394 return -EFAULT;
395 }
97894cda 396
1da177e4
LT
397 switch (cmd) {
398 case MEMGETREGIONCOUNT:
399 if (copy_to_user(argp, &(mtd->numeraseregions), sizeof(int)))
400 return -EFAULT;
401 break;
402
403 case MEMGETREGIONINFO:
404 {
405 struct region_info_user ur;
406
407 if (copy_from_user(&ur, argp, sizeof(struct region_info_user)))
408 return -EFAULT;
409
410 if (ur.regionindex >= mtd->numeraseregions)
411 return -EINVAL;
412 if (copy_to_user(argp, &(mtd->eraseregions[ur.regionindex]),
413 sizeof(struct mtd_erase_region_info)))
414 return -EFAULT;
415 break;
416 }
417
418 case MEMGETINFO:
73c619ea
JE
419 info.type = mtd->type;
420 info.flags = mtd->flags;
421 info.size = mtd->size;
422 info.erasesize = mtd->erasesize;
423 info.writesize = mtd->writesize;
424 info.oobsize = mtd->oobsize;
425 info.ecctype = mtd->ecctype;
426 info.eccsize = mtd->eccsize;
427 if (copy_to_user(argp, &info, sizeof(struct mtd_info_user)))
1da177e4
LT
428 return -EFAULT;
429 break;
430
431 case MEMERASE:
432 {
433 struct erase_info *erase;
434
435 if(!(file->f_mode & 2))
436 return -EPERM;
437
438 erase=kmalloc(sizeof(struct erase_info),GFP_KERNEL);
439 if (!erase)
440 ret = -ENOMEM;
441 else {
442 wait_queue_head_t waitq;
443 DECLARE_WAITQUEUE(wait, current);
444
445 init_waitqueue_head(&waitq);
446
447 memset (erase,0,sizeof(struct erase_info));
448 if (copy_from_user(&erase->addr, argp,
449 sizeof(struct erase_info_user))) {
450 kfree(erase);
451 return -EFAULT;
452 }
453 erase->mtd = mtd;
454 erase->callback = mtdchar_erase_callback;
455 erase->priv = (unsigned long)&waitq;
97894cda 456
1da177e4
LT
457 /*
458 FIXME: Allow INTERRUPTIBLE. Which means
459 not having the wait_queue head on the stack.
97894cda 460
1da177e4
LT
461 If the wq_head is on the stack, and we
462 leave because we got interrupted, then the
463 wq_head is no longer there when the
464 callback routine tries to wake us up.
465 */
466 ret = mtd->erase(mtd, erase);
467 if (!ret) {
468 set_current_state(TASK_UNINTERRUPTIBLE);
469 add_wait_queue(&waitq, &wait);
470 if (erase->state != MTD_ERASE_DONE &&
471 erase->state != MTD_ERASE_FAILED)
472 schedule();
473 remove_wait_queue(&waitq, &wait);
474 set_current_state(TASK_RUNNING);
475
476 ret = (erase->state == MTD_ERASE_FAILED)?-EIO:0;
477 }
478 kfree(erase);
479 }
480 break;
481 }
482
483 case MEMWRITEOOB:
484 {
485 struct mtd_oob_buf buf;
8593fbc6 486 struct mtd_oob_ops ops;
97894cda 487
1da177e4
LT
488 if(!(file->f_mode & 2))
489 return -EPERM;
490
491 if (copy_from_user(&buf, argp, sizeof(struct mtd_oob_buf)))
492 return -EFAULT;
97894cda 493
8593fbc6 494 if (buf.length > 4096)
1da177e4
LT
495 return -EINVAL;
496
497 if (!mtd->write_oob)
498 ret = -EOPNOTSUPP;
499 else
500 ret = access_ok(VERIFY_READ, buf.ptr,
501 buf.length) ? 0 : EFAULT;
502
503 if (ret)
504 return ret;
505
8593fbc6
TG
506 ops.len = buf.length;
507 ops.ooblen = mtd->oobsize;
508 ops.ooboffs = buf.start & (mtd->oobsize - 1);
509 ops.datbuf = NULL;
510 ops.mode = MTD_OOB_PLACE;
511
512 if (ops.ooboffs && ops.len > (ops.ooblen - ops.ooboffs))
513 return -EINVAL;
514
515 ops.oobbuf = kmalloc(buf.length, GFP_KERNEL);
516 if (!ops.oobbuf)
1da177e4 517 return -ENOMEM;
97894cda 518
8593fbc6
TG
519 if (copy_from_user(ops.oobbuf, buf.ptr, buf.length)) {
520 kfree(ops.oobbuf);
1da177e4
LT
521 return -EFAULT;
522 }
523
8593fbc6
TG
524 buf.start &= ~(mtd->oobsize - 1);
525 ret = mtd->write_oob(mtd, buf.start, &ops);
1da177e4 526
8593fbc6
TG
527 if (copy_to_user(argp + sizeof(uint32_t), &ops.retlen,
528 sizeof(uint32_t)))
1da177e4
LT
529 ret = -EFAULT;
530
8593fbc6 531 kfree(ops.oobbuf);
1da177e4
LT
532 break;
533
534 }
535
536 case MEMREADOOB:
537 {
538 struct mtd_oob_buf buf;
8593fbc6 539 struct mtd_oob_ops ops;
1da177e4
LT
540
541 if (copy_from_user(&buf, argp, sizeof(struct mtd_oob_buf)))
542 return -EFAULT;
97894cda 543
8593fbc6 544 if (buf.length > 4096)
1da177e4
LT
545 return -EINVAL;
546
547 if (!mtd->read_oob)
548 ret = -EOPNOTSUPP;
549 else
550 ret = access_ok(VERIFY_WRITE, buf.ptr,
551 buf.length) ? 0 : -EFAULT;
1da177e4
LT
552 if (ret)
553 return ret;
554
8593fbc6
TG
555 ops.len = buf.length;
556 ops.ooblen = mtd->oobsize;
557 ops.ooboffs = buf.start & (mtd->oobsize - 1);
558 ops.datbuf = NULL;
559 ops.mode = MTD_OOB_PLACE;
560
561 if (ops.ooboffs && ops.len > (ops.ooblen - ops.ooboffs))
562 return -EINVAL;
563
564 ops.oobbuf = kmalloc(buf.length, GFP_KERNEL);
565 if (!ops.oobbuf)
1da177e4 566 return -ENOMEM;
97894cda 567
8593fbc6
TG
568 buf.start &= ~(mtd->oobsize - 1);
569 ret = mtd->read_oob(mtd, buf.start, &ops);
1da177e4 570
8593fbc6 571 if (put_user(ops.retlen, (uint32_t __user *)argp))
1da177e4 572 ret = -EFAULT;
8593fbc6
TG
573 else if (ops.retlen && copy_to_user(buf.ptr, ops.oobbuf,
574 ops.retlen))
1da177e4 575 ret = -EFAULT;
97894cda 576
8593fbc6 577 kfree(ops.oobbuf);
1da177e4
LT
578 break;
579 }
580
581 case MEMLOCK:
582 {
583 struct erase_info_user info;
584
585 if (copy_from_user(&info, argp, sizeof(info)))
586 return -EFAULT;
587
588 if (!mtd->lock)
589 ret = -EOPNOTSUPP;
590 else
591 ret = mtd->lock(mtd, info.start, info.length);
592 break;
593 }
594
595 case MEMUNLOCK:
596 {
597 struct erase_info_user info;
598
599 if (copy_from_user(&info, argp, sizeof(info)))
600 return -EFAULT;
601
602 if (!mtd->unlock)
603 ret = -EOPNOTSUPP;
604 else
605 ret = mtd->unlock(mtd, info.start, info.length);
606 break;
607 }
608
5bd34c09 609 /* Legacy interface */
1da177e4
LT
610 case MEMGETOOBSEL:
611 {
5bd34c09
TG
612 struct nand_oobinfo oi;
613
614 if (!mtd->ecclayout)
615 return -EOPNOTSUPP;
616 if (mtd->ecclayout->eccbytes > ARRAY_SIZE(oi.eccpos))
617 return -EINVAL;
618
619 oi.useecc = MTD_NANDECC_AUTOPLACE;
620 memcpy(&oi.eccpos, mtd->ecclayout->eccpos, sizeof(oi.eccpos));
621 memcpy(&oi.oobfree, mtd->ecclayout->oobfree,
622 sizeof(oi.oobfree));
623
624 if (copy_to_user(argp, &oi, sizeof(struct nand_oobinfo)))
1da177e4
LT
625 return -EFAULT;
626 break;
627 }
628
629 case MEMGETBADBLOCK:
630 {
631 loff_t offs;
97894cda 632
1da177e4
LT
633 if (copy_from_user(&offs, argp, sizeof(loff_t)))
634 return -EFAULT;
635 if (!mtd->block_isbad)
636 ret = -EOPNOTSUPP;
637 else
638 return mtd->block_isbad(mtd, offs);
639 break;
640 }
641
642 case MEMSETBADBLOCK:
643 {
644 loff_t offs;
645
646 if (copy_from_user(&offs, argp, sizeof(loff_t)))
647 return -EFAULT;
648 if (!mtd->block_markbad)
649 ret = -EOPNOTSUPP;
650 else
651 return mtd->block_markbad(mtd, offs);
652 break;
653 }
654
493c6460 655#if defined(CONFIG_MTD_OTP) || defined(CONFIG_MTD_ONENAND_OTP)
31f4233b
NP
656 case OTPSELECT:
657 {
658 int mode;
659 if (copy_from_user(&mode, argp, sizeof(int)))
660 return -EFAULT;
f1a28c02
TG
661
662 mfi->mode = MTD_MODE_NORMAL;
663
664 ret = otp_select_filemode(mfi, mode);
665
81dba488 666 file->f_pos = 0;
31f4233b
NP
667 break;
668 }
669
670 case OTPGETREGIONCOUNT:
671 case OTPGETREGIONINFO:
672 {
673 struct otp_info *buf = kmalloc(4096, GFP_KERNEL);
674 if (!buf)
675 return -ENOMEM;
676 ret = -EOPNOTSUPP;
f1a28c02
TG
677 switch (mfi->mode) {
678 case MTD_MODE_OTP_FACTORY:
31f4233b
NP
679 if (mtd->get_fact_prot_info)
680 ret = mtd->get_fact_prot_info(mtd, buf, 4096);
681 break;
682 case MTD_MODE_OTP_USER:
683 if (mtd->get_user_prot_info)
684 ret = mtd->get_user_prot_info(mtd, buf, 4096);
685 break;
f1a28c02
TG
686 default:
687 break;
31f4233b
NP
688 }
689 if (ret >= 0) {
690 if (cmd == OTPGETREGIONCOUNT) {
691 int nbr = ret / sizeof(struct otp_info);
692 ret = copy_to_user(argp, &nbr, sizeof(int));
693 } else
694 ret = copy_to_user(argp, buf, ret);
695 if (ret)
696 ret = -EFAULT;
697 }
698 kfree(buf);
699 break;
700 }
701
702 case OTPLOCK:
703 {
704 struct otp_info info;
705
f1a28c02 706 if (mfi->mode != MTD_MODE_OTP_USER)
31f4233b
NP
707 return -EINVAL;
708 if (copy_from_user(&info, argp, sizeof(info)))
709 return -EFAULT;
710 if (!mtd->lock_user_prot_reg)
711 return -EOPNOTSUPP;
712 ret = mtd->lock_user_prot_reg(mtd, info.start, info.length);
713 break;
714 }
715#endif
716
f1a28c02
TG
717 case ECCGETLAYOUT:
718 {
719 if (!mtd->ecclayout)
720 return -EOPNOTSUPP;
721
722 if (copy_to_user(argp, &mtd->ecclayout,
723 sizeof(struct nand_ecclayout)))
724 return -EFAULT;
725 break;
726 }
727
728 case ECCGETSTATS:
729 {
730 if (copy_to_user(argp, &mtd->ecc_stats,
731 sizeof(struct mtd_ecc_stats)))
732 return -EFAULT;
733 break;
734 }
735
736 case MTDFILEMODE:
737 {
738 mfi->mode = 0;
739
740 switch(arg) {
741 case MTD_MODE_OTP_FACTORY:
742 case MTD_MODE_OTP_USER:
743 ret = otp_select_filemode(mfi, arg);
744 break;
745
746 case MTD_MODE_RAW:
747 if (!mtd->read_oob || !mtd->write_oob)
748 return -EOPNOTSUPP;
749 mfi->mode = arg;
750
751 case MTD_MODE_NORMAL:
752 break;
753 default:
754 ret = -EINVAL;
755 }
756 file->f_pos = 0;
757 break;
758 }
759
1da177e4
LT
760 default:
761 ret = -ENOTTY;
762 }
763
764 return ret;
765} /* memory_ioctl */
766
767static struct file_operations mtd_fops = {
768 .owner = THIS_MODULE,
769 .llseek = mtd_lseek,
770 .read = mtd_read,
771 .write = mtd_write,
772 .ioctl = mtd_ioctl,
773 .open = mtd_open,
774 .release = mtd_close,
775};
776
777static int __init init_mtdchar(void)
778{
779 if (register_chrdev(MTD_CHAR_MAJOR, "mtd", &mtd_fops)) {
780 printk(KERN_NOTICE "Can't allocate major number %d for Memory Technology Devices.\n",
781 MTD_CHAR_MAJOR);
782 return -EAGAIN;
783 }
784
9bc7b387
TP
785 mtd_class = class_create(THIS_MODULE, "mtd");
786
787 if (IS_ERR(mtd_class)) {
788 printk(KERN_ERR "Error creating mtd class.\n");
789 unregister_chrdev(MTD_CHAR_MAJOR, "mtd");
3a7a8824 790 return PTR_ERR(mtd_class);
9bc7b387
TP
791 }
792
793 register_mtd_user(&notifier);
1da177e4
LT
794 return 0;
795}
796
797static void __exit cleanup_mtdchar(void)
798{
9bc7b387
TP
799 unregister_mtd_user(&notifier);
800 class_destroy(mtd_class);
1da177e4
LT
801 unregister_chrdev(MTD_CHAR_MAJOR, "mtd");
802}
803
804module_init(init_mtdchar);
805module_exit(cleanup_mtdchar);
806
807
808MODULE_LICENSE("GPL");
809MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
810MODULE_DESCRIPTION("Direct character-device access to MTD devices");