blob: ffa2884748205e26a376712c453b4fca5d3075bf [file] [log] [blame]
Kyle Swenson8d8f6542021-03-15 11:02:55 -06001/*
2 * Core registration and callback routines for MTD
3 * drivers and users.
4 *
5 * Copyright © 1999-2010 David Woodhouse <dwmw2@infradead.org>
6 * Copyright © 2006 Red Hat UK Limited
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
21 *
22 */
23
24#include <linux/module.h>
25#include <linux/kernel.h>
26#include <linux/ptrace.h>
27#include <linux/seq_file.h>
28#include <linux/string.h>
29#include <linux/timer.h>
30#include <linux/major.h>
31#include <linux/fs.h>
32#include <linux/err.h>
33#include <linux/ioctl.h>
34#include <linux/init.h>
35#include <linux/proc_fs.h>
36#include <linux/idr.h>
37#include <linux/backing-dev.h>
38#include <linux/gfp.h>
39#include <linux/slab.h>
40#include <linux/reboot.h>
41#include <linux/kconfig.h>
42
43#include <linux/mtd/mtd.h>
44#include <linux/mtd/partitions.h>
45
46#include "mtdcore.h"
47
48static struct backing_dev_info mtd_bdi = {
49};
50
51#ifdef CONFIG_PM_SLEEP
52
53static int mtd_cls_suspend(struct device *dev)
54{
55 struct mtd_info *mtd = dev_get_drvdata(dev);
56
57 return mtd ? mtd_suspend(mtd) : 0;
58}
59
60static int mtd_cls_resume(struct device *dev)
61{
62 struct mtd_info *mtd = dev_get_drvdata(dev);
63
64 if (mtd)
65 mtd_resume(mtd);
66 return 0;
67}
68
69static SIMPLE_DEV_PM_OPS(mtd_cls_pm_ops, mtd_cls_suspend, mtd_cls_resume);
70#define MTD_CLS_PM_OPS (&mtd_cls_pm_ops)
71#else
72#define MTD_CLS_PM_OPS NULL
73#endif
74
75static struct class mtd_class = {
76 .name = "mtd",
77 .owner = THIS_MODULE,
78 .pm = MTD_CLS_PM_OPS,
79};
80
81static DEFINE_IDR(mtd_idr);
82
83/* These are exported solely for the purpose of mtd_blkdevs.c. You
84 should not use them for _anything_ else */
85DEFINE_MUTEX(mtd_table_mutex);
86EXPORT_SYMBOL_GPL(mtd_table_mutex);
87
88struct mtd_info *__mtd_next_device(int i)
89{
90 return idr_get_next(&mtd_idr, &i);
91}
92EXPORT_SYMBOL_GPL(__mtd_next_device);
93
94static LIST_HEAD(mtd_notifiers);
95
96
97#define MTD_DEVT(index) MKDEV(MTD_CHAR_MAJOR, (index)*2)
98
99/* REVISIT once MTD uses the driver model better, whoever allocates
100 * the mtd_info will probably want to use the release() hook...
101 */
102static void mtd_release(struct device *dev)
103{
104 struct mtd_info *mtd = dev_get_drvdata(dev);
105 dev_t index = MTD_DEVT(mtd->index);
106
107 /* remove /dev/mtdXro node */
108 device_destroy(&mtd_class, index + 1);
109}
110
111static ssize_t mtd_type_show(struct device *dev,
112 struct device_attribute *attr, char *buf)
113{
114 struct mtd_info *mtd = dev_get_drvdata(dev);
115 char *type;
116
117 switch (mtd->type) {
118 case MTD_ABSENT:
119 type = "absent";
120 break;
121 case MTD_RAM:
122 type = "ram";
123 break;
124 case MTD_ROM:
125 type = "rom";
126 break;
127 case MTD_NORFLASH:
128 type = "nor";
129 break;
130 case MTD_NANDFLASH:
131 type = "nand";
132 break;
133 case MTD_DATAFLASH:
134 type = "dataflash";
135 break;
136 case MTD_UBIVOLUME:
137 type = "ubi";
138 break;
139 case MTD_MLCNANDFLASH:
140 type = "mlc-nand";
141 break;
142 default:
143 type = "unknown";
144 }
145
146 return snprintf(buf, PAGE_SIZE, "%s\n", type);
147}
148static DEVICE_ATTR(type, S_IRUGO, mtd_type_show, NULL);
149
150static ssize_t mtd_flags_show(struct device *dev,
151 struct device_attribute *attr, char *buf)
152{
153 struct mtd_info *mtd = dev_get_drvdata(dev);
154
155 return snprintf(buf, PAGE_SIZE, "0x%lx\n", (unsigned long)mtd->flags);
156
157}
158static DEVICE_ATTR(flags, S_IRUGO, mtd_flags_show, NULL);
159
160static ssize_t mtd_size_show(struct device *dev,
161 struct device_attribute *attr, char *buf)
162{
163 struct mtd_info *mtd = dev_get_drvdata(dev);
164
165 return snprintf(buf, PAGE_SIZE, "%llu\n",
166 (unsigned long long)mtd->size);
167
168}
169static DEVICE_ATTR(size, S_IRUGO, mtd_size_show, NULL);
170
171static ssize_t mtd_erasesize_show(struct device *dev,
172 struct device_attribute *attr, char *buf)
173{
174 struct mtd_info *mtd = dev_get_drvdata(dev);
175
176 return snprintf(buf, PAGE_SIZE, "%lu\n", (unsigned long)mtd->erasesize);
177
178}
179static DEVICE_ATTR(erasesize, S_IRUGO, mtd_erasesize_show, NULL);
180
181static ssize_t mtd_writesize_show(struct device *dev,
182 struct device_attribute *attr, char *buf)
183{
184 struct mtd_info *mtd = dev_get_drvdata(dev);
185
186 return snprintf(buf, PAGE_SIZE, "%lu\n", (unsigned long)mtd->writesize);
187
188}
189static DEVICE_ATTR(writesize, S_IRUGO, mtd_writesize_show, NULL);
190
191static ssize_t mtd_subpagesize_show(struct device *dev,
192 struct device_attribute *attr, char *buf)
193{
194 struct mtd_info *mtd = dev_get_drvdata(dev);
195 unsigned int subpagesize = mtd->writesize >> mtd->subpage_sft;
196
197 return snprintf(buf, PAGE_SIZE, "%u\n", subpagesize);
198
199}
200static DEVICE_ATTR(subpagesize, S_IRUGO, mtd_subpagesize_show, NULL);
201
202static ssize_t mtd_oobsize_show(struct device *dev,
203 struct device_attribute *attr, char *buf)
204{
205 struct mtd_info *mtd = dev_get_drvdata(dev);
206
207 return snprintf(buf, PAGE_SIZE, "%lu\n", (unsigned long)mtd->oobsize);
208
209}
210static DEVICE_ATTR(oobsize, S_IRUGO, mtd_oobsize_show, NULL);
211
212static ssize_t mtd_numeraseregions_show(struct device *dev,
213 struct device_attribute *attr, char *buf)
214{
215 struct mtd_info *mtd = dev_get_drvdata(dev);
216
217 return snprintf(buf, PAGE_SIZE, "%u\n", mtd->numeraseregions);
218
219}
220static DEVICE_ATTR(numeraseregions, S_IRUGO, mtd_numeraseregions_show,
221 NULL);
222
223static ssize_t mtd_name_show(struct device *dev,
224 struct device_attribute *attr, char *buf)
225{
226 struct mtd_info *mtd = dev_get_drvdata(dev);
227
228 return snprintf(buf, PAGE_SIZE, "%s\n", mtd->name);
229
230}
231static DEVICE_ATTR(name, S_IRUGO, mtd_name_show, NULL);
232
233static ssize_t mtd_ecc_strength_show(struct device *dev,
234 struct device_attribute *attr, char *buf)
235{
236 struct mtd_info *mtd = dev_get_drvdata(dev);
237
238 return snprintf(buf, PAGE_SIZE, "%u\n", mtd->ecc_strength);
239}
240static DEVICE_ATTR(ecc_strength, S_IRUGO, mtd_ecc_strength_show, NULL);
241
242static ssize_t mtd_bitflip_threshold_show(struct device *dev,
243 struct device_attribute *attr,
244 char *buf)
245{
246 struct mtd_info *mtd = dev_get_drvdata(dev);
247
248 return snprintf(buf, PAGE_SIZE, "%u\n", mtd->bitflip_threshold);
249}
250
251static ssize_t mtd_bitflip_threshold_store(struct device *dev,
252 struct device_attribute *attr,
253 const char *buf, size_t count)
254{
255 struct mtd_info *mtd = dev_get_drvdata(dev);
256 unsigned int bitflip_threshold;
257 int retval;
258
259 retval = kstrtouint(buf, 0, &bitflip_threshold);
260 if (retval)
261 return retval;
262
263 mtd->bitflip_threshold = bitflip_threshold;
264 return count;
265}
266static DEVICE_ATTR(bitflip_threshold, S_IRUGO | S_IWUSR,
267 mtd_bitflip_threshold_show,
268 mtd_bitflip_threshold_store);
269
270static ssize_t mtd_ecc_step_size_show(struct device *dev,
271 struct device_attribute *attr, char *buf)
272{
273 struct mtd_info *mtd = dev_get_drvdata(dev);
274
275 return snprintf(buf, PAGE_SIZE, "%u\n", mtd->ecc_step_size);
276
277}
278static DEVICE_ATTR(ecc_step_size, S_IRUGO, mtd_ecc_step_size_show, NULL);
279
280static ssize_t mtd_ecc_stats_corrected_show(struct device *dev,
281 struct device_attribute *attr, char *buf)
282{
283 struct mtd_info *mtd = dev_get_drvdata(dev);
284 struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
285
286 return snprintf(buf, PAGE_SIZE, "%u\n", ecc_stats->corrected);
287}
288static DEVICE_ATTR(corrected_bits, S_IRUGO,
289 mtd_ecc_stats_corrected_show, NULL);
290
291static ssize_t mtd_ecc_stats_errors_show(struct device *dev,
292 struct device_attribute *attr, char *buf)
293{
294 struct mtd_info *mtd = dev_get_drvdata(dev);
295 struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
296
297 return snprintf(buf, PAGE_SIZE, "%u\n", ecc_stats->failed);
298}
299static DEVICE_ATTR(ecc_failures, S_IRUGO, mtd_ecc_stats_errors_show, NULL);
300
301static ssize_t mtd_badblocks_show(struct device *dev,
302 struct device_attribute *attr, char *buf)
303{
304 struct mtd_info *mtd = dev_get_drvdata(dev);
305 struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
306
307 return snprintf(buf, PAGE_SIZE, "%u\n", ecc_stats->badblocks);
308}
309static DEVICE_ATTR(bad_blocks, S_IRUGO, mtd_badblocks_show, NULL);
310
311static ssize_t mtd_bbtblocks_show(struct device *dev,
312 struct device_attribute *attr, char *buf)
313{
314 struct mtd_info *mtd = dev_get_drvdata(dev);
315 struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
316
317 return snprintf(buf, PAGE_SIZE, "%u\n", ecc_stats->bbtblocks);
318}
319static DEVICE_ATTR(bbt_blocks, S_IRUGO, mtd_bbtblocks_show, NULL);
320
321static struct attribute *mtd_attrs[] = {
322 &dev_attr_type.attr,
323 &dev_attr_flags.attr,
324 &dev_attr_size.attr,
325 &dev_attr_erasesize.attr,
326 &dev_attr_writesize.attr,
327 &dev_attr_subpagesize.attr,
328 &dev_attr_oobsize.attr,
329 &dev_attr_numeraseregions.attr,
330 &dev_attr_name.attr,
331 &dev_attr_ecc_strength.attr,
332 &dev_attr_ecc_step_size.attr,
333 &dev_attr_corrected_bits.attr,
334 &dev_attr_ecc_failures.attr,
335 &dev_attr_bad_blocks.attr,
336 &dev_attr_bbt_blocks.attr,
337 &dev_attr_bitflip_threshold.attr,
338 NULL,
339};
340ATTRIBUTE_GROUPS(mtd);
341
342static struct device_type mtd_devtype = {
343 .name = "mtd",
344 .groups = mtd_groups,
345 .release = mtd_release,
346};
347
348#ifndef CONFIG_MMU
349unsigned mtd_mmap_capabilities(struct mtd_info *mtd)
350{
351 switch (mtd->type) {
352 case MTD_RAM:
353 return NOMMU_MAP_COPY | NOMMU_MAP_DIRECT | NOMMU_MAP_EXEC |
354 NOMMU_MAP_READ | NOMMU_MAP_WRITE;
355 case MTD_ROM:
356 return NOMMU_MAP_COPY | NOMMU_MAP_DIRECT | NOMMU_MAP_EXEC |
357 NOMMU_MAP_READ;
358 default:
359 return NOMMU_MAP_COPY;
360 }
361}
362EXPORT_SYMBOL_GPL(mtd_mmap_capabilities);
363#endif
364
365static int mtd_reboot_notifier(struct notifier_block *n, unsigned long state,
366 void *cmd)
367{
368 struct mtd_info *mtd;
369
370 mtd = container_of(n, struct mtd_info, reboot_notifier);
371 mtd->_reboot(mtd);
372
373 return NOTIFY_DONE;
374}
375
376/**
377 * add_mtd_device - register an MTD device
378 * @mtd: pointer to new MTD device info structure
379 *
380 * Add a device to the list of MTD devices present in the system, and
381 * notify each currently active MTD 'user' of its arrival. Returns
382 * zero on success or non-zero on failure.
383 */
384
385int add_mtd_device(struct mtd_info *mtd)
386{
387 struct mtd_notifier *not;
388 int i, error;
389
390 /*
391 * May occur, for instance, on buggy drivers which call
392 * mtd_device_parse_register() multiple times on the same master MTD,
393 * especially with CONFIG_MTD_PARTITIONED_MASTER=y.
394 */
395 if (WARN_ONCE(mtd->backing_dev_info, "MTD already registered\n"))
396 return -EEXIST;
397
398 mtd->backing_dev_info = &mtd_bdi;
399
400 BUG_ON(mtd->writesize == 0);
401 mutex_lock(&mtd_table_mutex);
402
403 i = idr_alloc(&mtd_idr, mtd, 0, 0, GFP_KERNEL);
404 if (i < 0) {
405 error = i;
406 goto fail_locked;
407 }
408
409 mtd->index = i;
410 mtd->usecount = 0;
411
412 /* default value if not set by driver */
413 if (mtd->bitflip_threshold == 0)
414 mtd->bitflip_threshold = mtd->ecc_strength;
415
416 if (is_power_of_2(mtd->erasesize))
417 mtd->erasesize_shift = ffs(mtd->erasesize) - 1;
418 else
419 mtd->erasesize_shift = 0;
420
421 if (is_power_of_2(mtd->writesize))
422 mtd->writesize_shift = ffs(mtd->writesize) - 1;
423 else
424 mtd->writesize_shift = 0;
425
426 mtd->erasesize_mask = (1 << mtd->erasesize_shift) - 1;
427 mtd->writesize_mask = (1 << mtd->writesize_shift) - 1;
428
429 /* Some chips always power up locked. Unlock them now */
430 if ((mtd->flags & MTD_WRITEABLE) && (mtd->flags & MTD_POWERUP_LOCK)) {
431 error = mtd_unlock(mtd, 0, mtd->size);
432 if (error && error != -EOPNOTSUPP)
433 printk(KERN_WARNING
434 "%s: unlock failed, writes may not work\n",
435 mtd->name);
436 /* Ignore unlock failures? */
437 error = 0;
438 }
439
440 /* Caller should have set dev.parent to match the
441 * physical device, if appropriate.
442 */
443 mtd->dev.type = &mtd_devtype;
444 mtd->dev.class = &mtd_class;
445 mtd->dev.devt = MTD_DEVT(i);
446 dev_set_name(&mtd->dev, "mtd%d", i);
447 dev_set_drvdata(&mtd->dev, mtd);
448 error = device_register(&mtd->dev);
449 if (error)
450 goto fail_added;
451
452 device_create(&mtd_class, mtd->dev.parent, MTD_DEVT(i) + 1, NULL,
453 "mtd%dro", i);
454
455 pr_debug("mtd: Giving out device %d to %s\n", i, mtd->name);
456 /* No need to get a refcount on the module containing
457 the notifier, since we hold the mtd_table_mutex */
458 list_for_each_entry(not, &mtd_notifiers, list)
459 not->add(mtd);
460
461 mutex_unlock(&mtd_table_mutex);
462 /* We _know_ we aren't being removed, because
463 our caller is still holding us here. So none
464 of this try_ nonsense, and no bitching about it
465 either. :) */
466 __module_get(THIS_MODULE);
467 return 0;
468
469fail_added:
470 idr_remove(&mtd_idr, i);
471fail_locked:
472 mutex_unlock(&mtd_table_mutex);
473 return error;
474}
475
476/**
477 * del_mtd_device - unregister an MTD device
478 * @mtd: pointer to MTD device info structure
479 *
480 * Remove a device from the list of MTD devices present in the system,
481 * and notify each currently active MTD 'user' of its departure.
482 * Returns zero on success or 1 on failure, which currently will happen
483 * if the requested device does not appear to be present in the list.
484 */
485
486int del_mtd_device(struct mtd_info *mtd)
487{
488 int ret;
489 struct mtd_notifier *not;
490
491 mutex_lock(&mtd_table_mutex);
492
493 if (idr_find(&mtd_idr, mtd->index) != mtd) {
494 ret = -ENODEV;
495 goto out_error;
496 }
497
498 /* No need to get a refcount on the module containing
499 the notifier, since we hold the mtd_table_mutex */
500 list_for_each_entry(not, &mtd_notifiers, list)
501 not->remove(mtd);
502
503 if (mtd->usecount) {
504 printk(KERN_NOTICE "Removing MTD device #%d (%s) with use count %d\n",
505 mtd->index, mtd->name, mtd->usecount);
506 ret = -EBUSY;
507 } else {
508 device_unregister(&mtd->dev);
509
510 idr_remove(&mtd_idr, mtd->index);
511
512 module_put(THIS_MODULE);
513 ret = 0;
514 }
515
516out_error:
517 mutex_unlock(&mtd_table_mutex);
518 return ret;
519}
520
521static int mtd_add_device_partitions(struct mtd_info *mtd,
522 struct mtd_partition *real_parts,
523 int nbparts)
524{
525 int ret;
526
527 if (nbparts == 0 || IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER)) {
528 ret = add_mtd_device(mtd);
529 if (ret)
530 return ret;
531 }
532
533 if (nbparts > 0) {
534 ret = add_mtd_partitions(mtd, real_parts, nbparts);
535 if (ret && IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER))
536 del_mtd_device(mtd);
537 return ret;
538 }
539
540 return 0;
541}
542
543/*
544 * Set a few defaults based on the parent devices, if not provided by the
545 * driver
546 */
547static void mtd_set_dev_defaults(struct mtd_info *mtd)
548{
549 if (mtd->dev.parent) {
550 if (!mtd->owner && mtd->dev.parent->driver)
551 mtd->owner = mtd->dev.parent->driver->owner;
552 if (!mtd->name)
553 mtd->name = dev_name(mtd->dev.parent);
554 } else {
555 pr_debug("mtd device won't show a device symlink in sysfs\n");
556 }
557}
558
559/**
560 * mtd_device_parse_register - parse partitions and register an MTD device.
561 *
562 * @mtd: the MTD device to register
563 * @types: the list of MTD partition probes to try, see
564 * 'parse_mtd_partitions()' for more information
565 * @parser_data: MTD partition parser-specific data
566 * @parts: fallback partition information to register, if parsing fails;
567 * only valid if %nr_parts > %0
568 * @nr_parts: the number of partitions in parts, if zero then the full
569 * MTD device is registered if no partition info is found
570 *
571 * This function aggregates MTD partitions parsing (done by
572 * 'parse_mtd_partitions()') and MTD device and partitions registering. It
573 * basically follows the most common pattern found in many MTD drivers:
574 *
575 * * It first tries to probe partitions on MTD device @mtd using parsers
576 * specified in @types (if @types is %NULL, then the default list of parsers
577 * is used, see 'parse_mtd_partitions()' for more information). If none are
578 * found this functions tries to fallback to information specified in
579 * @parts/@nr_parts.
580 * * If any partitioning info was found, this function registers the found
581 * partitions. If the MTD_PARTITIONED_MASTER option is set, then the device
582 * as a whole is registered first.
583 * * If no partitions were found this function just registers the MTD device
584 * @mtd and exits.
585 *
586 * Returns zero in case of success and a negative error code in case of failure.
587 */
588int mtd_device_parse_register(struct mtd_info *mtd, const char * const *types,
589 struct mtd_part_parser_data *parser_data,
590 const struct mtd_partition *parts,
591 int nr_parts)
592{
593 int ret;
594 struct mtd_partition *real_parts = NULL;
595
596 mtd_set_dev_defaults(mtd);
597
598 ret = parse_mtd_partitions(mtd, types, &real_parts, parser_data);
599 if (ret <= 0 && nr_parts && parts) {
600 real_parts = kmemdup(parts, sizeof(*parts) * nr_parts,
601 GFP_KERNEL);
602 if (!real_parts)
603 ret = -ENOMEM;
604 else
605 ret = nr_parts;
606 }
607 /* Didn't come up with either parsed OR fallback partitions */
608 if (ret < 0) {
609 pr_info("mtd: failed to find partitions; one or more parsers reports errors (%d)\n",
610 ret);
611 /* Don't abort on errors; we can still use unpartitioned MTD */
612 ret = 0;
613 }
614
615 ret = mtd_add_device_partitions(mtd, real_parts, ret);
616 if (ret)
617 goto out;
618
619 /*
620 * FIXME: some drivers unfortunately call this function more than once.
621 * So we have to check if we've already assigned the reboot notifier.
622 *
623 * Generally, we can make multiple calls work for most cases, but it
624 * does cause problems with parse_mtd_partitions() above (e.g.,
625 * cmdlineparts will register partitions more than once).
626 */
627 WARN_ONCE(mtd->_reboot && mtd->reboot_notifier.notifier_call,
628 "MTD already registered\n");
629 if (mtd->_reboot && !mtd->reboot_notifier.notifier_call) {
630 mtd->reboot_notifier.notifier_call = mtd_reboot_notifier;
631 register_reboot_notifier(&mtd->reboot_notifier);
632 }
633
634out:
635 kfree(real_parts);
636 return ret;
637}
638EXPORT_SYMBOL_GPL(mtd_device_parse_register);
639
640/**
641 * mtd_device_unregister - unregister an existing MTD device.
642 *
643 * @master: the MTD device to unregister. This will unregister both the master
644 * and any partitions if registered.
645 */
646int mtd_device_unregister(struct mtd_info *master)
647{
648 int err;
649
650 if (master->_reboot)
651 unregister_reboot_notifier(&master->reboot_notifier);
652
653 err = del_mtd_partitions(master);
654 if (err)
655 return err;
656
657 if (!device_is_registered(&master->dev))
658 return 0;
659
660 return del_mtd_device(master);
661}
662EXPORT_SYMBOL_GPL(mtd_device_unregister);
663
664/**
665 * register_mtd_user - register a 'user' of MTD devices.
666 * @new: pointer to notifier info structure
667 *
668 * Registers a pair of callbacks function to be called upon addition
669 * or removal of MTD devices. Causes the 'add' callback to be immediately
670 * invoked for each MTD device currently present in the system.
671 */
672void register_mtd_user (struct mtd_notifier *new)
673{
674 struct mtd_info *mtd;
675
676 mutex_lock(&mtd_table_mutex);
677
678 list_add(&new->list, &mtd_notifiers);
679
680 __module_get(THIS_MODULE);
681
682 mtd_for_each_device(mtd)
683 new->add(mtd);
684
685 mutex_unlock(&mtd_table_mutex);
686}
687EXPORT_SYMBOL_GPL(register_mtd_user);
688
689/**
690 * unregister_mtd_user - unregister a 'user' of MTD devices.
691 * @old: pointer to notifier info structure
692 *
693 * Removes a callback function pair from the list of 'users' to be
694 * notified upon addition or removal of MTD devices. Causes the
695 * 'remove' callback to be immediately invoked for each MTD device
696 * currently present in the system.
697 */
698int unregister_mtd_user (struct mtd_notifier *old)
699{
700 struct mtd_info *mtd;
701
702 mutex_lock(&mtd_table_mutex);
703
704 module_put(THIS_MODULE);
705
706 mtd_for_each_device(mtd)
707 old->remove(mtd);
708
709 list_del(&old->list);
710 mutex_unlock(&mtd_table_mutex);
711 return 0;
712}
713EXPORT_SYMBOL_GPL(unregister_mtd_user);
714
715/**
716 * get_mtd_device - obtain a validated handle for an MTD device
717 * @mtd: last known address of the required MTD device
718 * @num: internal device number of the required MTD device
719 *
720 * Given a number and NULL address, return the num'th entry in the device
721 * table, if any. Given an address and num == -1, search the device table
722 * for a device with that address and return if it's still present. Given
723 * both, return the num'th driver only if its address matches. Return
724 * error code if not.
725 */
726struct mtd_info *get_mtd_device(struct mtd_info *mtd, int num)
727{
728 struct mtd_info *ret = NULL, *other;
729 int err = -ENODEV;
730
731 mutex_lock(&mtd_table_mutex);
732
733 if (num == -1) {
734 mtd_for_each_device(other) {
735 if (other == mtd) {
736 ret = mtd;
737 break;
738 }
739 }
740 } else if (num >= 0) {
741 ret = idr_find(&mtd_idr, num);
742 if (mtd && mtd != ret)
743 ret = NULL;
744 }
745
746 if (!ret) {
747 ret = ERR_PTR(err);
748 goto out;
749 }
750
751 err = __get_mtd_device(ret);
752 if (err)
753 ret = ERR_PTR(err);
754out:
755 mutex_unlock(&mtd_table_mutex);
756 return ret;
757}
758EXPORT_SYMBOL_GPL(get_mtd_device);
759
760
761int __get_mtd_device(struct mtd_info *mtd)
762{
763 int err;
764
765 if (!try_module_get(mtd->owner))
766 return -ENODEV;
767
768 if (mtd->_get_device) {
769 err = mtd->_get_device(mtd);
770
771 if (err) {
772 module_put(mtd->owner);
773 return err;
774 }
775 }
776 mtd->usecount++;
777 return 0;
778}
779EXPORT_SYMBOL_GPL(__get_mtd_device);
780
781/**
782 * get_mtd_device_nm - obtain a validated handle for an MTD device by
783 * device name
784 * @name: MTD device name to open
785 *
786 * This function returns MTD device description structure in case of
787 * success and an error code in case of failure.
788 */
789struct mtd_info *get_mtd_device_nm(const char *name)
790{
791 int err = -ENODEV;
792 struct mtd_info *mtd = NULL, *other;
793
794 mutex_lock(&mtd_table_mutex);
795
796 mtd_for_each_device(other) {
797 if (!strcmp(name, other->name)) {
798 mtd = other;
799 break;
800 }
801 }
802
803 if (!mtd)
804 goto out_unlock;
805
806 err = __get_mtd_device(mtd);
807 if (err)
808 goto out_unlock;
809
810 mutex_unlock(&mtd_table_mutex);
811 return mtd;
812
813out_unlock:
814 mutex_unlock(&mtd_table_mutex);
815 return ERR_PTR(err);
816}
817EXPORT_SYMBOL_GPL(get_mtd_device_nm);
818
819void put_mtd_device(struct mtd_info *mtd)
820{
821 mutex_lock(&mtd_table_mutex);
822 __put_mtd_device(mtd);
823 mutex_unlock(&mtd_table_mutex);
824
825}
826EXPORT_SYMBOL_GPL(put_mtd_device);
827
828void __put_mtd_device(struct mtd_info *mtd)
829{
830 --mtd->usecount;
831 BUG_ON(mtd->usecount < 0);
832
833 if (mtd->_put_device)
834 mtd->_put_device(mtd);
835
836 module_put(mtd->owner);
837}
838EXPORT_SYMBOL_GPL(__put_mtd_device);
839
840/*
841 * Erase is an asynchronous operation. Device drivers are supposed
842 * to call instr->callback() whenever the operation completes, even
843 * if it completes with a failure.
844 * Callers are supposed to pass a callback function and wait for it
845 * to be called before writing to the block.
846 */
847int mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
848{
849 if (instr->addr >= mtd->size || instr->len > mtd->size - instr->addr)
850 return -EINVAL;
851 if (!(mtd->flags & MTD_WRITEABLE))
852 return -EROFS;
853 instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
854 if (!instr->len) {
855 instr->state = MTD_ERASE_DONE;
856 mtd_erase_callback(instr);
857 return 0;
858 }
859 return mtd->_erase(mtd, instr);
860}
861EXPORT_SYMBOL_GPL(mtd_erase);
862
863/*
864 * This stuff for eXecute-In-Place. phys is optional and may be set to NULL.
865 */
866int mtd_point(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
867 void **virt, resource_size_t *phys)
868{
869 *retlen = 0;
870 *virt = NULL;
871 if (phys)
872 *phys = 0;
873 if (!mtd->_point)
874 return -EOPNOTSUPP;
875 if (from < 0 || from >= mtd->size || len > mtd->size - from)
876 return -EINVAL;
877 if (!len)
878 return 0;
879 return mtd->_point(mtd, from, len, retlen, virt, phys);
880}
881EXPORT_SYMBOL_GPL(mtd_point);
882
883/* We probably shouldn't allow XIP if the unpoint isn't a NULL */
884int mtd_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
885{
886 if (!mtd->_point)
887 return -EOPNOTSUPP;
888 if (from < 0 || from >= mtd->size || len > mtd->size - from)
889 return -EINVAL;
890 if (!len)
891 return 0;
892 return mtd->_unpoint(mtd, from, len);
893}
894EXPORT_SYMBOL_GPL(mtd_unpoint);
895
896/*
897 * Allow NOMMU mmap() to directly map the device (if not NULL)
898 * - return the address to which the offset maps
899 * - return -ENOSYS to indicate refusal to do the mapping
900 */
901unsigned long mtd_get_unmapped_area(struct mtd_info *mtd, unsigned long len,
902 unsigned long offset, unsigned long flags)
903{
904 if (!mtd->_get_unmapped_area)
905 return -EOPNOTSUPP;
906 if (offset >= mtd->size || len > mtd->size - offset)
907 return -EINVAL;
908 return mtd->_get_unmapped_area(mtd, len, offset, flags);
909}
910EXPORT_SYMBOL_GPL(mtd_get_unmapped_area);
911
912int mtd_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
913 u_char *buf)
914{
915 int ret_code;
916 *retlen = 0;
917 if (from < 0 || from >= mtd->size || len > mtd->size - from)
918 return -EINVAL;
919 if (!len)
920 return 0;
921
922 /*
923 * In the absence of an error, drivers return a non-negative integer
924 * representing the maximum number of bitflips that were corrected on
925 * any one ecc region (if applicable; zero otherwise).
926 */
927 ret_code = mtd->_read(mtd, from, len, retlen, buf);
928 if (unlikely(ret_code < 0))
929 return ret_code;
930 if (mtd->ecc_strength == 0)
931 return 0; /* device lacks ecc */
932 return ret_code >= mtd->bitflip_threshold ? -EUCLEAN : 0;
933}
934EXPORT_SYMBOL_GPL(mtd_read);
935
936int mtd_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
937 const u_char *buf)
938{
939 *retlen = 0;
940 if (to < 0 || to >= mtd->size || len > mtd->size - to)
941 return -EINVAL;
942 if (!mtd->_write || !(mtd->flags & MTD_WRITEABLE))
943 return -EROFS;
944 if (!len)
945 return 0;
946 return mtd->_write(mtd, to, len, retlen, buf);
947}
948EXPORT_SYMBOL_GPL(mtd_write);
949
950/*
951 * In blackbox flight recorder like scenarios we want to make successful writes
952 * in interrupt context. panic_write() is only intended to be called when its
953 * known the kernel is about to panic and we need the write to succeed. Since
954 * the kernel is not going to be running for much longer, this function can
955 * break locks and delay to ensure the write succeeds (but not sleep).
956 */
957int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
958 const u_char *buf)
959{
960 *retlen = 0;
961 if (!mtd->_panic_write)
962 return -EOPNOTSUPP;
963 if (to < 0 || to >= mtd->size || len > mtd->size - to)
964 return -EINVAL;
965 if (!(mtd->flags & MTD_WRITEABLE))
966 return -EROFS;
967 if (!len)
968 return 0;
969 return mtd->_panic_write(mtd, to, len, retlen, buf);
970}
971EXPORT_SYMBOL_GPL(mtd_panic_write);
972
973int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
974{
975 int ret_code;
976 ops->retlen = ops->oobretlen = 0;
977 if (!mtd->_read_oob)
978 return -EOPNOTSUPP;
979 /*
980 * In cases where ops->datbuf != NULL, mtd->_read_oob() has semantics
981 * similar to mtd->_read(), returning a non-negative integer
982 * representing max bitflips. In other cases, mtd->_read_oob() may
983 * return -EUCLEAN. In all cases, perform similar logic to mtd_read().
984 */
985 ret_code = mtd->_read_oob(mtd, from, ops);
986 if (unlikely(ret_code < 0))
987 return ret_code;
988 if (mtd->ecc_strength == 0)
989 return 0; /* device lacks ecc */
990 return ret_code >= mtd->bitflip_threshold ? -EUCLEAN : 0;
991}
992EXPORT_SYMBOL_GPL(mtd_read_oob);
993
994/*
995 * Method to access the protection register area, present in some flash
996 * devices. The user data is one time programmable but the factory data is read
997 * only.
998 */
999int mtd_get_fact_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen,
1000 struct otp_info *buf)
1001{
1002 if (!mtd->_get_fact_prot_info)
1003 return -EOPNOTSUPP;
1004 if (!len)
1005 return 0;
1006 return mtd->_get_fact_prot_info(mtd, len, retlen, buf);
1007}
1008EXPORT_SYMBOL_GPL(mtd_get_fact_prot_info);
1009
1010int mtd_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, size_t len,
1011 size_t *retlen, u_char *buf)
1012{
1013 *retlen = 0;
1014 if (!mtd->_read_fact_prot_reg)
1015 return -EOPNOTSUPP;
1016 if (!len)
1017 return 0;
1018 return mtd->_read_fact_prot_reg(mtd, from, len, retlen, buf);
1019}
1020EXPORT_SYMBOL_GPL(mtd_read_fact_prot_reg);
1021
1022int mtd_get_user_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen,
1023 struct otp_info *buf)
1024{
1025 if (!mtd->_get_user_prot_info)
1026 return -EOPNOTSUPP;
1027 if (!len)
1028 return 0;
1029 return mtd->_get_user_prot_info(mtd, len, retlen, buf);
1030}
1031EXPORT_SYMBOL_GPL(mtd_get_user_prot_info);
1032
1033int mtd_read_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len,
1034 size_t *retlen, u_char *buf)
1035{
1036 *retlen = 0;
1037 if (!mtd->_read_user_prot_reg)
1038 return -EOPNOTSUPP;
1039 if (!len)
1040 return 0;
1041 return mtd->_read_user_prot_reg(mtd, from, len, retlen, buf);
1042}
1043EXPORT_SYMBOL_GPL(mtd_read_user_prot_reg);
1044
1045int mtd_write_user_prot_reg(struct mtd_info *mtd, loff_t to, size_t len,
1046 size_t *retlen, u_char *buf)
1047{
1048 int ret;
1049
1050 *retlen = 0;
1051 if (!mtd->_write_user_prot_reg)
1052 return -EOPNOTSUPP;
1053 if (!len)
1054 return 0;
1055 ret = mtd->_write_user_prot_reg(mtd, to, len, retlen, buf);
1056 if (ret)
1057 return ret;
1058
1059 /*
1060 * If no data could be written at all, we are out of memory and
1061 * must return -ENOSPC.
1062 */
1063 return (*retlen) ? 0 : -ENOSPC;
1064}
1065EXPORT_SYMBOL_GPL(mtd_write_user_prot_reg);
1066
1067int mtd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len)
1068{
1069 if (!mtd->_lock_user_prot_reg)
1070 return -EOPNOTSUPP;
1071 if (!len)
1072 return 0;
1073 return mtd->_lock_user_prot_reg(mtd, from, len);
1074}
1075EXPORT_SYMBOL_GPL(mtd_lock_user_prot_reg);
1076
1077/* Chip-supported device locking */
1078int mtd_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1079{
1080 if (!mtd->_lock)
1081 return -EOPNOTSUPP;
1082 if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs)
1083 return -EINVAL;
1084 if (!len)
1085 return 0;
1086 return mtd->_lock(mtd, ofs, len);
1087}
1088EXPORT_SYMBOL_GPL(mtd_lock);
1089
1090int mtd_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1091{
1092 if (!mtd->_unlock)
1093 return -EOPNOTSUPP;
1094 if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs)
1095 return -EINVAL;
1096 if (!len)
1097 return 0;
1098 return mtd->_unlock(mtd, ofs, len);
1099}
1100EXPORT_SYMBOL_GPL(mtd_unlock);
1101
1102int mtd_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1103{
1104 if (!mtd->_is_locked)
1105 return -EOPNOTSUPP;
1106 if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs)
1107 return -EINVAL;
1108 if (!len)
1109 return 0;
1110 return mtd->_is_locked(mtd, ofs, len);
1111}
1112EXPORT_SYMBOL_GPL(mtd_is_locked);
1113
1114int mtd_block_isreserved(struct mtd_info *mtd, loff_t ofs)
1115{
1116 if (ofs < 0 || ofs >= mtd->size)
1117 return -EINVAL;
1118 if (!mtd->_block_isreserved)
1119 return 0;
1120 return mtd->_block_isreserved(mtd, ofs);
1121}
1122EXPORT_SYMBOL_GPL(mtd_block_isreserved);
1123
1124int mtd_block_isbad(struct mtd_info *mtd, loff_t ofs)
1125{
1126 if (ofs < 0 || ofs >= mtd->size)
1127 return -EINVAL;
1128 if (!mtd->_block_isbad)
1129 return 0;
1130 return mtd->_block_isbad(mtd, ofs);
1131}
1132EXPORT_SYMBOL_GPL(mtd_block_isbad);
1133
1134int mtd_block_markbad(struct mtd_info *mtd, loff_t ofs)
1135{
1136 if (!mtd->_block_markbad)
1137 return -EOPNOTSUPP;
1138 if (ofs < 0 || ofs >= mtd->size)
1139 return -EINVAL;
1140 if (!(mtd->flags & MTD_WRITEABLE))
1141 return -EROFS;
1142 return mtd->_block_markbad(mtd, ofs);
1143}
1144EXPORT_SYMBOL_GPL(mtd_block_markbad);
1145
1146/*
1147 * default_mtd_writev - the default writev method
1148 * @mtd: mtd device description object pointer
1149 * @vecs: the vectors to write
1150 * @count: count of vectors in @vecs
1151 * @to: the MTD device offset to write to
1152 * @retlen: on exit contains the count of bytes written to the MTD device.
1153 *
1154 * This function returns zero in case of success and a negative error code in
1155 * case of failure.
1156 */
1157static int default_mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
1158 unsigned long count, loff_t to, size_t *retlen)
1159{
1160 unsigned long i;
1161 size_t totlen = 0, thislen;
1162 int ret = 0;
1163
1164 for (i = 0; i < count; i++) {
1165 if (!vecs[i].iov_len)
1166 continue;
1167 ret = mtd_write(mtd, to, vecs[i].iov_len, &thislen,
1168 vecs[i].iov_base);
1169 totlen += thislen;
1170 if (ret || thislen != vecs[i].iov_len)
1171 break;
1172 to += vecs[i].iov_len;
1173 }
1174 *retlen = totlen;
1175 return ret;
1176}
1177
1178/*
1179 * mtd_writev - the vector-based MTD write method
1180 * @mtd: mtd device description object pointer
1181 * @vecs: the vectors to write
1182 * @count: count of vectors in @vecs
1183 * @to: the MTD device offset to write to
1184 * @retlen: on exit contains the count of bytes written to the MTD device.
1185 *
1186 * This function returns zero in case of success and a negative error code in
1187 * case of failure.
1188 */
1189int mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
1190 unsigned long count, loff_t to, size_t *retlen)
1191{
1192 *retlen = 0;
1193 if (!(mtd->flags & MTD_WRITEABLE))
1194 return -EROFS;
1195 if (!mtd->_writev)
1196 return default_mtd_writev(mtd, vecs, count, to, retlen);
1197 return mtd->_writev(mtd, vecs, count, to, retlen);
1198}
1199EXPORT_SYMBOL_GPL(mtd_writev);
1200
1201/**
1202 * mtd_kmalloc_up_to - allocate a contiguous buffer up to the specified size
1203 * @mtd: mtd device description object pointer
1204 * @size: a pointer to the ideal or maximum size of the allocation, points
1205 * to the actual allocation size on success.
1206 *
1207 * This routine attempts to allocate a contiguous kernel buffer up to
1208 * the specified size, backing off the size of the request exponentially
1209 * until the request succeeds or until the allocation size falls below
1210 * the system page size. This attempts to make sure it does not adversely
1211 * impact system performance, so when allocating more than one page, we
1212 * ask the memory allocator to avoid re-trying, swapping, writing back
1213 * or performing I/O.
1214 *
1215 * Note, this function also makes sure that the allocated buffer is aligned to
1216 * the MTD device's min. I/O unit, i.e. the "mtd->writesize" value.
1217 *
1218 * This is called, for example by mtd_{read,write} and jffs2_scan_medium,
1219 * to handle smaller (i.e. degraded) buffer allocations under low- or
1220 * fragmented-memory situations where such reduced allocations, from a
1221 * requested ideal, are allowed.
1222 *
1223 * Returns a pointer to the allocated buffer on success; otherwise, NULL.
1224 */
1225void *mtd_kmalloc_up_to(const struct mtd_info *mtd, size_t *size)
1226{
1227 gfp_t flags = __GFP_NOWARN | __GFP_DIRECT_RECLAIM | __GFP_NORETRY;
1228 size_t min_alloc = max_t(size_t, mtd->writesize, PAGE_SIZE);
1229 void *kbuf;
1230
1231 *size = min_t(size_t, *size, KMALLOC_MAX_SIZE);
1232
1233 while (*size > min_alloc) {
1234 kbuf = kmalloc(*size, flags);
1235 if (kbuf)
1236 return kbuf;
1237
1238 *size >>= 1;
1239 *size = ALIGN(*size, mtd->writesize);
1240 }
1241
1242 /*
1243 * For the last resort allocation allow 'kmalloc()' to do all sorts of
1244 * things (write-back, dropping caches, etc) by using GFP_KERNEL.
1245 */
1246 return kmalloc(*size, GFP_KERNEL);
1247}
1248EXPORT_SYMBOL_GPL(mtd_kmalloc_up_to);
1249
1250#ifdef CONFIG_PROC_FS
1251
1252/*====================================================================*/
1253/* Support for /proc/mtd */
1254
1255static int mtd_proc_show(struct seq_file *m, void *v)
1256{
1257 struct mtd_info *mtd;
1258
1259 seq_puts(m, "dev: size erasesize name\n");
1260 mutex_lock(&mtd_table_mutex);
1261 mtd_for_each_device(mtd) {
1262 seq_printf(m, "mtd%d: %8.8llx %8.8x \"%s\"\n",
1263 mtd->index, (unsigned long long)mtd->size,
1264 mtd->erasesize, mtd->name);
1265 }
1266 mutex_unlock(&mtd_table_mutex);
1267 return 0;
1268}
1269
1270static int mtd_proc_open(struct inode *inode, struct file *file)
1271{
1272 return single_open(file, mtd_proc_show, NULL);
1273}
1274
1275static const struct file_operations mtd_proc_ops = {
1276 .open = mtd_proc_open,
1277 .read = seq_read,
1278 .llseek = seq_lseek,
1279 .release = single_release,
1280};
1281#endif /* CONFIG_PROC_FS */
1282
1283/*====================================================================*/
1284/* Init code */
1285
1286static int __init mtd_bdi_init(struct backing_dev_info *bdi, const char *name)
1287{
1288 int ret;
1289
1290 ret = bdi_init(bdi);
1291 if (!ret)
1292 ret = bdi_register(bdi, NULL, "%s", name);
1293
1294 if (ret)
1295 bdi_destroy(bdi);
1296
1297 return ret;
1298}
1299
1300static struct proc_dir_entry *proc_mtd;
1301
1302static int __init init_mtd(void)
1303{
1304 int ret;
1305
1306 ret = class_register(&mtd_class);
1307 if (ret)
1308 goto err_reg;
1309
1310 ret = mtd_bdi_init(&mtd_bdi, "mtd");
1311 if (ret)
1312 goto err_bdi;
1313
1314 proc_mtd = proc_create("mtd", 0, NULL, &mtd_proc_ops);
1315
1316 ret = init_mtdchar();
1317 if (ret)
1318 goto out_procfs;
1319
1320 return 0;
1321
1322out_procfs:
1323 if (proc_mtd)
1324 remove_proc_entry("mtd", NULL);
1325err_bdi:
1326 class_unregister(&mtd_class);
1327err_reg:
1328 pr_err("Error registering mtd class or bdi: %d\n", ret);
1329 return ret;
1330}
1331
1332static void __exit cleanup_mtd(void)
1333{
1334 cleanup_mtdchar();
1335 if (proc_mtd)
1336 remove_proc_entry("mtd", NULL);
1337 class_unregister(&mtd_class);
1338 bdi_destroy(&mtd_bdi);
1339 idr_destroy(&mtd_idr);
1340}
1341
1342module_init(init_mtd);
1343module_exit(cleanup_mtd);
1344
1345MODULE_LICENSE("GPL");
1346MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
1347MODULE_DESCRIPTION("Core MTD registration and access routines");