Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Core registration and callback routines for MTD
4 * drivers and users.
5 *
6 * Copyright © 1999-2010 David Woodhouse <dwmw2@infradead.org>
7 * Copyright © 2006 Red Hat UK Limited
8 */
9
10#include <linux/module.h>
11#include <linux/kernel.h>
12#include <linux/ptrace.h>
13#include <linux/seq_file.h>
14#include <linux/string.h>
15#include <linux/timer.h>
16#include <linux/major.h>
17#include <linux/fs.h>
18#include <linux/err.h>
19#include <linux/ioctl.h>
20#include <linux/init.h>
21#include <linux/of.h>
22#include <linux/proc_fs.h>
23#include <linux/idr.h>
24#include <linux/backing-dev.h>
25#include <linux/gfp.h>
26#include <linux/random.h>
27#include <linux/slab.h>
28#include <linux/reboot.h>
29#include <linux/leds.h>
30#include <linux/debugfs.h>
31#include <linux/nvmem-provider.h>
32#include <linux/root_dev.h>
33#include <linux/error-injection.h>
34
35#include <linux/mtd/mtd.h>
36#include <linux/mtd/partitions.h>
37
38#include "mtdcore.h"
39
40struct backing_dev_info *mtd_bdi;
41
42#ifdef CONFIG_PM_SLEEP
43
44static int mtd_cls_suspend(struct device *dev)
45{
46 struct mtd_info *mtd = dev_get_drvdata(dev);
47
48 return mtd ? mtd_suspend(mtd) : 0;
49}
50
51static int mtd_cls_resume(struct device *dev)
52{
53 struct mtd_info *mtd = dev_get_drvdata(dev);
54
55 if (mtd)
56 mtd_resume(mtd);
57 return 0;
58}
59
60static SIMPLE_DEV_PM_OPS(mtd_cls_pm_ops, mtd_cls_suspend, mtd_cls_resume);
61#define MTD_CLS_PM_OPS (&mtd_cls_pm_ops)
62#else
63#define MTD_CLS_PM_OPS NULL
64#endif
65
66static struct class mtd_class = {
67 .name = "mtd",
68 .pm = MTD_CLS_PM_OPS,
69};
70
71static DEFINE_IDR(mtd_idr);
72
73/* These are exported solely for the purpose of mtd_blkdevs.c. You
74 should not use them for _anything_ else */
75DEFINE_MUTEX(mtd_table_mutex);
76EXPORT_SYMBOL_GPL(mtd_table_mutex);
77
78struct mtd_info *__mtd_next_device(int i)
79{
80 return idr_get_next(&mtd_idr, &i);
81}
82EXPORT_SYMBOL_GPL(__mtd_next_device);
83
84static LIST_HEAD(mtd_notifiers);
85
86
87#define MTD_DEVT(index) MKDEV(MTD_CHAR_MAJOR, (index)*2)
88
89/* REVISIT once MTD uses the driver model better, whoever allocates
90 * the mtd_info will probably want to use the release() hook...
91 */
92static void mtd_release(struct device *dev)
93{
94 struct mtd_info *mtd = dev_get_drvdata(dev);
95 dev_t index = MTD_DEVT(mtd->index);
96
97 idr_remove(&mtd_idr, mtd->index);
98 of_node_put(mtd_get_of_node(mtd));
99
100 if (mtd_is_partition(mtd))
101 release_mtd_partition(mtd);
102
103 /* remove /dev/mtdXro node */
104 device_destroy(&mtd_class, index + 1);
105}
106
107static void mtd_device_release(struct kref *kref)
108{
109 struct mtd_info *mtd = container_of(kref, struct mtd_info, refcnt);
110 bool is_partition = mtd_is_partition(mtd);
111
112 debugfs_remove_recursive(mtd->dbg.dfs_dir);
113
114 /* Try to remove the NVMEM provider */
115 nvmem_unregister(mtd->nvmem);
116
117 device_unregister(&mtd->dev);
118
119 /*
120 * Clear dev so mtd can be safely re-registered later if desired.
121 * Should not be done for partition,
122 * as it was already destroyed in device_unregister().
123 */
124 if (!is_partition)
125 memset(&mtd->dev, 0, sizeof(mtd->dev));
126
127 module_put(THIS_MODULE);
128}
129
130#define MTD_DEVICE_ATTR_RO(name) \
131static DEVICE_ATTR(name, 0444, mtd_##name##_show, NULL)
132
133#define MTD_DEVICE_ATTR_RW(name) \
134static DEVICE_ATTR(name, 0644, mtd_##name##_show, mtd_##name##_store)
135
136static ssize_t mtd_type_show(struct device *dev,
137 struct device_attribute *attr, char *buf)
138{
139 struct mtd_info *mtd = dev_get_drvdata(dev);
140 char *type;
141
142 switch (mtd->type) {
143 case MTD_ABSENT:
144 type = "absent";
145 break;
146 case MTD_RAM:
147 type = "ram";
148 break;
149 case MTD_ROM:
150 type = "rom";
151 break;
152 case MTD_NORFLASH:
153 type = "nor";
154 break;
155 case MTD_NANDFLASH:
156 type = "nand";
157 break;
158 case MTD_DATAFLASH:
159 type = "dataflash";
160 break;
161 case MTD_UBIVOLUME:
162 type = "ubi";
163 break;
164 case MTD_MLCNANDFLASH:
165 type = "mlc-nand";
166 break;
167 default:
168 type = "unknown";
169 }
170
171 return sysfs_emit(buf, "%s\n", type);
172}
173MTD_DEVICE_ATTR_RO(type);
174
175static ssize_t mtd_flags_show(struct device *dev,
176 struct device_attribute *attr, char *buf)
177{
178 struct mtd_info *mtd = dev_get_drvdata(dev);
179
180 return sysfs_emit(buf, "0x%lx\n", (unsigned long)mtd->flags);
181}
182MTD_DEVICE_ATTR_RO(flags);
183
184static ssize_t mtd_size_show(struct device *dev,
185 struct device_attribute *attr, char *buf)
186{
187 struct mtd_info *mtd = dev_get_drvdata(dev);
188
189 return sysfs_emit(buf, "%llu\n", (unsigned long long)mtd->size);
190}
191MTD_DEVICE_ATTR_RO(size);
192
193static ssize_t mtd_erasesize_show(struct device *dev,
194 struct device_attribute *attr, char *buf)
195{
196 struct mtd_info *mtd = dev_get_drvdata(dev);
197
198 return sysfs_emit(buf, "%lu\n", (unsigned long)mtd->erasesize);
199}
200MTD_DEVICE_ATTR_RO(erasesize);
201
202static ssize_t mtd_writesize_show(struct device *dev,
203 struct device_attribute *attr, char *buf)
204{
205 struct mtd_info *mtd = dev_get_drvdata(dev);
206
207 return sysfs_emit(buf, "%lu\n", (unsigned long)mtd->writesize);
208}
209MTD_DEVICE_ATTR_RO(writesize);
210
211static ssize_t mtd_subpagesize_show(struct device *dev,
212 struct device_attribute *attr, char *buf)
213{
214 struct mtd_info *mtd = dev_get_drvdata(dev);
215 unsigned int subpagesize = mtd->writesize >> mtd->subpage_sft;
216
217 return sysfs_emit(buf, "%u\n", subpagesize);
218}
219MTD_DEVICE_ATTR_RO(subpagesize);
220
221static ssize_t mtd_oobsize_show(struct device *dev,
222 struct device_attribute *attr, char *buf)
223{
224 struct mtd_info *mtd = dev_get_drvdata(dev);
225
226 return sysfs_emit(buf, "%lu\n", (unsigned long)mtd->oobsize);
227}
228MTD_DEVICE_ATTR_RO(oobsize);
229
230static ssize_t mtd_oobavail_show(struct device *dev,
231 struct device_attribute *attr, char *buf)
232{
233 struct mtd_info *mtd = dev_get_drvdata(dev);
234
235 return sysfs_emit(buf, "%u\n", mtd->oobavail);
236}
237MTD_DEVICE_ATTR_RO(oobavail);
238
239static ssize_t mtd_numeraseregions_show(struct device *dev,
240 struct device_attribute *attr, char *buf)
241{
242 struct mtd_info *mtd = dev_get_drvdata(dev);
243
244 return sysfs_emit(buf, "%u\n", mtd->numeraseregions);
245}
246MTD_DEVICE_ATTR_RO(numeraseregions);
247
248static ssize_t mtd_name_show(struct device *dev,
249 struct device_attribute *attr, char *buf)
250{
251 struct mtd_info *mtd = dev_get_drvdata(dev);
252
253 return sysfs_emit(buf, "%s\n", mtd->name);
254}
255MTD_DEVICE_ATTR_RO(name);
256
257static ssize_t mtd_ecc_strength_show(struct device *dev,
258 struct device_attribute *attr, char *buf)
259{
260 struct mtd_info *mtd = dev_get_drvdata(dev);
261
262 return sysfs_emit(buf, "%u\n", mtd->ecc_strength);
263}
264MTD_DEVICE_ATTR_RO(ecc_strength);
265
266static ssize_t mtd_bitflip_threshold_show(struct device *dev,
267 struct device_attribute *attr,
268 char *buf)
269{
270 struct mtd_info *mtd = dev_get_drvdata(dev);
271
272 return sysfs_emit(buf, "%u\n", mtd->bitflip_threshold);
273}
274
275static ssize_t mtd_bitflip_threshold_store(struct device *dev,
276 struct device_attribute *attr,
277 const char *buf, size_t count)
278{
279 struct mtd_info *mtd = dev_get_drvdata(dev);
280 unsigned int bitflip_threshold;
281 int retval;
282
283 retval = kstrtouint(buf, 0, &bitflip_threshold);
284 if (retval)
285 return retval;
286
287 mtd->bitflip_threshold = bitflip_threshold;
288 return count;
289}
290MTD_DEVICE_ATTR_RW(bitflip_threshold);
291
292static ssize_t mtd_ecc_step_size_show(struct device *dev,
293 struct device_attribute *attr, char *buf)
294{
295 struct mtd_info *mtd = dev_get_drvdata(dev);
296
297 return sysfs_emit(buf, "%u\n", mtd->ecc_step_size);
298
299}
300MTD_DEVICE_ATTR_RO(ecc_step_size);
301
302static ssize_t mtd_corrected_bits_show(struct device *dev,
303 struct device_attribute *attr, char *buf)
304{
305 struct mtd_info *mtd = dev_get_drvdata(dev);
306 struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
307
308 return sysfs_emit(buf, "%u\n", ecc_stats->corrected);
309}
310MTD_DEVICE_ATTR_RO(corrected_bits); /* ecc stats corrected */
311
312static ssize_t mtd_ecc_failures_show(struct device *dev,
313 struct device_attribute *attr, char *buf)
314{
315 struct mtd_info *mtd = dev_get_drvdata(dev);
316 struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
317
318 return sysfs_emit(buf, "%u\n", ecc_stats->failed);
319}
320MTD_DEVICE_ATTR_RO(ecc_failures); /* ecc stats errors */
321
322static ssize_t mtd_bad_blocks_show(struct device *dev,
323 struct device_attribute *attr, char *buf)
324{
325 struct mtd_info *mtd = dev_get_drvdata(dev);
326 struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
327
328 return sysfs_emit(buf, "%u\n", ecc_stats->badblocks);
329}
330MTD_DEVICE_ATTR_RO(bad_blocks);
331
332static ssize_t mtd_bbt_blocks_show(struct device *dev,
333 struct device_attribute *attr, char *buf)
334{
335 struct mtd_info *mtd = dev_get_drvdata(dev);
336 struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
337
338 return sysfs_emit(buf, "%u\n", ecc_stats->bbtblocks);
339}
340MTD_DEVICE_ATTR_RO(bbt_blocks);
341
342static struct attribute *mtd_attrs[] = {
343 &dev_attr_type.attr,
344 &dev_attr_flags.attr,
345 &dev_attr_size.attr,
346 &dev_attr_erasesize.attr,
347 &dev_attr_writesize.attr,
348 &dev_attr_subpagesize.attr,
349 &dev_attr_oobsize.attr,
350 &dev_attr_oobavail.attr,
351 &dev_attr_numeraseregions.attr,
352 &dev_attr_name.attr,
353 &dev_attr_ecc_strength.attr,
354 &dev_attr_ecc_step_size.attr,
355 &dev_attr_corrected_bits.attr,
356 &dev_attr_ecc_failures.attr,
357 &dev_attr_bad_blocks.attr,
358 &dev_attr_bbt_blocks.attr,
359 &dev_attr_bitflip_threshold.attr,
360 NULL,
361};
362ATTRIBUTE_GROUPS(mtd);
363
364static const struct device_type mtd_devtype = {
365 .name = "mtd",
366 .groups = mtd_groups,
367 .release = mtd_release,
368};
369
370static bool mtd_expert_analysis_mode;
371
372#ifdef CONFIG_DEBUG_FS
373bool mtd_check_expert_analysis_mode(void)
374{
375 const char *mtd_expert_analysis_warning =
376 "Bad block checks have been entirely disabled.\n"
377 "This is only reserved for post-mortem forensics and debug purposes.\n"
378 "Never enable this mode if you do not know what you are doing!\n";
379
380 return WARN_ONCE(mtd_expert_analysis_mode, mtd_expert_analysis_warning);
381}
382EXPORT_SYMBOL_GPL(mtd_check_expert_analysis_mode);
383#endif
384
385static struct dentry *dfs_dir_mtd;
386
387static void mtd_debugfs_populate(struct mtd_info *mtd)
388{
389 struct device *dev = &mtd->dev;
390
391 if (IS_ERR_OR_NULL(dfs_dir_mtd))
392 return;
393
394 mtd->dbg.dfs_dir = debugfs_create_dir(dev_name(dev), dfs_dir_mtd);
395}
396
397#ifndef CONFIG_MMU
398unsigned mtd_mmap_capabilities(struct mtd_info *mtd)
399{
400 switch (mtd->type) {
401 case MTD_RAM:
402 return NOMMU_MAP_COPY | NOMMU_MAP_DIRECT | NOMMU_MAP_EXEC |
403 NOMMU_MAP_READ | NOMMU_MAP_WRITE;
404 case MTD_ROM:
405 return NOMMU_MAP_COPY | NOMMU_MAP_DIRECT | NOMMU_MAP_EXEC |
406 NOMMU_MAP_READ;
407 default:
408 return NOMMU_MAP_COPY;
409 }
410}
411EXPORT_SYMBOL_GPL(mtd_mmap_capabilities);
412#endif
413
414static int mtd_reboot_notifier(struct notifier_block *n, unsigned long state,
415 void *cmd)
416{
417 struct mtd_info *mtd;
418
419 mtd = container_of(n, struct mtd_info, reboot_notifier);
420 mtd->_reboot(mtd);
421
422 return NOTIFY_DONE;
423}
424
425/**
426 * mtd_wunit_to_pairing_info - get pairing information of a wunit
427 * @mtd: pointer to new MTD device info structure
428 * @wunit: write unit we are interested in
429 * @info: returned pairing information
430 *
431 * Retrieve pairing information associated to the wunit.
432 * This is mainly useful when dealing with MLC/TLC NANDs where pages can be
433 * paired together, and where programming a page may influence the page it is
434 * paired with.
435 * The notion of page is replaced by the term wunit (write-unit) to stay
436 * consistent with the ->writesize field.
437 *
438 * The @wunit argument can be extracted from an absolute offset using
439 * mtd_offset_to_wunit(). @info is filled with the pairing information attached
440 * to @wunit.
441 *
442 * From the pairing info the MTD user can find all the wunits paired with
443 * @wunit using the following loop:
444 *
445 * for (i = 0; i < mtd_pairing_groups(mtd); i++) {
446 * info.pair = i;
447 * mtd_pairing_info_to_wunit(mtd, &info);
448 * ...
449 * }
450 */
451int mtd_wunit_to_pairing_info(struct mtd_info *mtd, int wunit,
452 struct mtd_pairing_info *info)
453{
454 struct mtd_info *master = mtd_get_master(mtd);
455 int npairs = mtd_wunit_per_eb(master) / mtd_pairing_groups(master);
456
457 if (wunit < 0 || wunit >= npairs)
458 return -EINVAL;
459
460 if (master->pairing && master->pairing->get_info)
461 return master->pairing->get_info(master, wunit, info);
462
463 info->group = 0;
464 info->pair = wunit;
465
466 return 0;
467}
468EXPORT_SYMBOL_GPL(mtd_wunit_to_pairing_info);
469
470/**
471 * mtd_pairing_info_to_wunit - get wunit from pairing information
472 * @mtd: pointer to new MTD device info structure
473 * @info: pairing information struct
474 *
475 * Returns a positive number representing the wunit associated to the info
476 * struct, or a negative error code.
477 *
478 * This is the reverse of mtd_wunit_to_pairing_info(), and can help one to
479 * iterate over all wunits of a given pair (see mtd_wunit_to_pairing_info()
480 * doc).
481 *
482 * It can also be used to only program the first page of each pair (i.e.
483 * page attached to group 0), which allows one to use an MLC NAND in
484 * software-emulated SLC mode:
485 *
486 * info.group = 0;
487 * npairs = mtd_wunit_per_eb(mtd) / mtd_pairing_groups(mtd);
488 * for (info.pair = 0; info.pair < npairs; info.pair++) {
489 * wunit = mtd_pairing_info_to_wunit(mtd, &info);
490 * mtd_write(mtd, mtd_wunit_to_offset(mtd, blkoffs, wunit),
491 * mtd->writesize, &retlen, buf + (i * mtd->writesize));
492 * }
493 */
494int mtd_pairing_info_to_wunit(struct mtd_info *mtd,
495 const struct mtd_pairing_info *info)
496{
497 struct mtd_info *master = mtd_get_master(mtd);
498 int ngroups = mtd_pairing_groups(master);
499 int npairs = mtd_wunit_per_eb(master) / ngroups;
500
501 if (!info || info->pair < 0 || info->pair >= npairs ||
502 info->group < 0 || info->group >= ngroups)
503 return -EINVAL;
504
505 if (master->pairing && master->pairing->get_wunit)
506 return mtd->pairing->get_wunit(master, info);
507
508 return info->pair;
509}
510EXPORT_SYMBOL_GPL(mtd_pairing_info_to_wunit);
511
512/**
513 * mtd_pairing_groups - get the number of pairing groups
514 * @mtd: pointer to new MTD device info structure
515 *
516 * Returns the number of pairing groups.
517 *
518 * This number is usually equal to the number of bits exposed by a single
519 * cell, and can be used in conjunction with mtd_pairing_info_to_wunit()
520 * to iterate over all pages of a given pair.
521 */
522int mtd_pairing_groups(struct mtd_info *mtd)
523{
524 struct mtd_info *master = mtd_get_master(mtd);
525
526 if (!master->pairing || !master->pairing->ngroups)
527 return 1;
528
529 return master->pairing->ngroups;
530}
531EXPORT_SYMBOL_GPL(mtd_pairing_groups);
532
533static int mtd_nvmem_reg_read(void *priv, unsigned int offset,
534 void *val, size_t bytes)
535{
536 struct mtd_info *mtd = priv;
537 size_t retlen;
538 int err;
539
540 err = mtd_read(mtd, offset, bytes, &retlen, val);
541 if (err && err != -EUCLEAN)
542 return err;
543
544 return retlen == bytes ? 0 : -EIO;
545}
546
547static int mtd_nvmem_add(struct mtd_info *mtd)
548{
549 struct device_node *node = mtd_get_of_node(mtd);
550 struct nvmem_config config = {};
551
552 config.id = NVMEM_DEVID_NONE;
553 config.dev = &mtd->dev;
554 config.name = dev_name(&mtd->dev);
555 config.owner = THIS_MODULE;
556 config.add_legacy_fixed_of_cells = of_device_is_compatible(node, "nvmem-cells");
557 config.reg_read = mtd_nvmem_reg_read;
558 config.size = mtd->size;
559 config.word_size = 1;
560 config.stride = 1;
561 config.read_only = true;
562 config.root_only = true;
563 config.ignore_wp = true;
564 config.priv = mtd;
565
566 mtd->nvmem = nvmem_register(&config);
567 if (IS_ERR(mtd->nvmem)) {
568 /* Just ignore if there is no NVMEM support in the kernel */
569 if (PTR_ERR(mtd->nvmem) == -EOPNOTSUPP)
570 mtd->nvmem = NULL;
571 else
572 return dev_err_probe(&mtd->dev, PTR_ERR(mtd->nvmem),
573 "Failed to register NVMEM device\n");
574 }
575
576 return 0;
577}
578
579static void mtd_check_of_node(struct mtd_info *mtd)
580{
581 struct device_node *partitions, *parent_dn, *mtd_dn = NULL;
582 const char *pname, *prefix = "partition-";
583 int plen, mtd_name_len, offset, prefix_len;
584
585 /* Check if MTD already has a device node */
586 if (mtd_get_of_node(mtd))
587 return;
588
589 if (!mtd_is_partition(mtd))
590 return;
591
592 parent_dn = of_node_get(mtd_get_of_node(mtd->parent));
593 if (!parent_dn)
594 return;
595
596 if (mtd_is_partition(mtd->parent))
597 partitions = of_node_get(parent_dn);
598 else
599 partitions = of_get_child_by_name(parent_dn, "partitions");
600 if (!partitions)
601 goto exit_parent;
602
603 prefix_len = strlen(prefix);
604 mtd_name_len = strlen(mtd->name);
605
606 /* Search if a partition is defined with the same name */
607 for_each_child_of_node(partitions, mtd_dn) {
608 /* Skip partition with no/wrong prefix */
609 if (!of_node_name_prefix(mtd_dn, prefix))
610 continue;
611
612 /* Label have priority. Check that first */
613 if (!of_property_read_string(mtd_dn, "label", &pname)) {
614 offset = 0;
615 } else {
616 pname = mtd_dn->name;
617 offset = prefix_len;
618 }
619
620 plen = strlen(pname) - offset;
621 if (plen == mtd_name_len &&
622 !strncmp(mtd->name, pname + offset, plen)) {
623 mtd_set_of_node(mtd, mtd_dn);
624 break;
625 }
626 }
627
628 of_node_put(partitions);
629exit_parent:
630 of_node_put(parent_dn);
631}
632
633/**
634 * add_mtd_device - register an MTD device
635 * @mtd: pointer to new MTD device info structure
636 *
637 * Add a device to the list of MTD devices present in the system, and
638 * notify each currently active MTD 'user' of its arrival. Returns
639 * zero on success or non-zero on failure.
640 */
641
642int add_mtd_device(struct mtd_info *mtd)
643{
644 struct device_node *np = mtd_get_of_node(mtd);
645 struct mtd_info *master = mtd_get_master(mtd);
646 struct mtd_notifier *not;
647 int i, error, ofidx;
648
649 /*
650 * May occur, for instance, on buggy drivers which call
651 * mtd_device_parse_register() multiple times on the same master MTD,
652 * especially with CONFIG_MTD_PARTITIONED_MASTER=y.
653 */
654 if (WARN_ONCE(mtd->dev.type, "MTD already registered\n"))
655 return -EEXIST;
656
657 BUG_ON(mtd->writesize == 0);
658
659 /*
660 * MTD drivers should implement ->_{write,read}() or
661 * ->_{write,read}_oob(), but not both.
662 */
663 if (WARN_ON((mtd->_write && mtd->_write_oob) ||
664 (mtd->_read && mtd->_read_oob)))
665 return -EINVAL;
666
667 if (WARN_ON((!mtd->erasesize || !master->_erase) &&
668 !(mtd->flags & MTD_NO_ERASE)))
669 return -EINVAL;
670
671 /*
672 * MTD_SLC_ON_MLC_EMULATION can only be set on partitions, when the
673 * master is an MLC NAND and has a proper pairing scheme defined.
674 * We also reject masters that implement ->_writev() for now, because
675 * NAND controller drivers don't implement this hook, and adding the
676 * SLC -> MLC address/length conversion to this path is useless if we
677 * don't have a user.
678 */
679 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION &&
680 (!mtd_is_partition(mtd) || master->type != MTD_MLCNANDFLASH ||
681 !master->pairing || master->_writev))
682 return -EINVAL;
683
684 mutex_lock(&mtd_table_mutex);
685
686 ofidx = -1;
687 if (np)
688 ofidx = of_alias_get_id(np, "mtd");
689 if (ofidx >= 0)
690 i = idr_alloc(&mtd_idr, mtd, ofidx, ofidx + 1, GFP_KERNEL);
691 else
692 i = idr_alloc(&mtd_idr, mtd, 0, 0, GFP_KERNEL);
693 if (i < 0) {
694 error = i;
695 goto fail_locked;
696 }
697
698 mtd->index = i;
699 kref_init(&mtd->refcnt);
700
701 /* default value if not set by driver */
702 if (mtd->bitflip_threshold == 0)
703 mtd->bitflip_threshold = mtd->ecc_strength;
704
705 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
706 int ngroups = mtd_pairing_groups(master);
707
708 mtd->erasesize /= ngroups;
709 mtd->size = (u64)mtd_div_by_eb(mtd->size, master) *
710 mtd->erasesize;
711 }
712
713 if (is_power_of_2(mtd->erasesize))
714 mtd->erasesize_shift = ffs(mtd->erasesize) - 1;
715 else
716 mtd->erasesize_shift = 0;
717
718 if (is_power_of_2(mtd->writesize))
719 mtd->writesize_shift = ffs(mtd->writesize) - 1;
720 else
721 mtd->writesize_shift = 0;
722
723 mtd->erasesize_mask = (1 << mtd->erasesize_shift) - 1;
724 mtd->writesize_mask = (1 << mtd->writesize_shift) - 1;
725
726 /* Some chips always power up locked. Unlock them now */
727 if ((mtd->flags & MTD_WRITEABLE) && (mtd->flags & MTD_POWERUP_LOCK)) {
728 error = mtd_unlock(mtd, 0, mtd->size);
729 if (error && error != -EOPNOTSUPP)
730 printk(KERN_WARNING
731 "%s: unlock failed, writes may not work\n",
732 mtd->name);
733 /* Ignore unlock failures? */
734 error = 0;
735 }
736
737 /* Caller should have set dev.parent to match the
738 * physical device, if appropriate.
739 */
740 mtd->dev.type = &mtd_devtype;
741 mtd->dev.class = &mtd_class;
742 mtd->dev.devt = MTD_DEVT(i);
743 dev_set_name(&mtd->dev, "mtd%d", i);
744 dev_set_drvdata(&mtd->dev, mtd);
745 mtd_check_of_node(mtd);
746 of_node_get(mtd_get_of_node(mtd));
747 error = device_register(&mtd->dev);
748 if (error) {
749 put_device(&mtd->dev);
750 goto fail_added;
751 }
752
753 /* Add the nvmem provider */
754 error = mtd_nvmem_add(mtd);
755 if (error)
756 goto fail_nvmem_add;
757
758 mtd_debugfs_populate(mtd);
759
760 device_create(&mtd_class, mtd->dev.parent, MTD_DEVT(i) + 1, NULL,
761 "mtd%dro", i);
762
763 pr_debug("mtd: Giving out device %d to %s\n", i, mtd->name);
764 /* No need to get a refcount on the module containing
765 the notifier, since we hold the mtd_table_mutex */
766 list_for_each_entry(not, &mtd_notifiers, list)
767 not->add(mtd);
768
769 mutex_unlock(&mtd_table_mutex);
770
771 if (of_property_read_bool(mtd_get_of_node(mtd), "linux,rootfs")) {
772 if (IS_BUILTIN(CONFIG_MTD)) {
773 pr_info("mtd: setting mtd%d (%s) as root device\n", mtd->index, mtd->name);
774 ROOT_DEV = MKDEV(MTD_BLOCK_MAJOR, mtd->index);
775 } else {
776 pr_warn("mtd: can't set mtd%d (%s) as root device - mtd must be builtin\n",
777 mtd->index, mtd->name);
778 }
779 }
780
781 /* We _know_ we aren't being removed, because
782 our caller is still holding us here. So none
783 of this try_ nonsense, and no bitching about it
784 either. :) */
785 __module_get(THIS_MODULE);
786 return 0;
787
788fail_nvmem_add:
789 device_unregister(&mtd->dev);
790fail_added:
791 of_node_put(mtd_get_of_node(mtd));
792 idr_remove(&mtd_idr, i);
793fail_locked:
794 mutex_unlock(&mtd_table_mutex);
795 return error;
796}
797
798/**
799 * del_mtd_device - unregister an MTD device
800 * @mtd: pointer to MTD device info structure
801 *
802 * Remove a device from the list of MTD devices present in the system,
803 * and notify each currently active MTD 'user' of its departure.
804 * Returns zero on success or 1 on failure, which currently will happen
805 * if the requested device does not appear to be present in the list.
806 */
807
808int del_mtd_device(struct mtd_info *mtd)
809{
810 int ret;
811 struct mtd_notifier *not;
812
813 mutex_lock(&mtd_table_mutex);
814
815 if (idr_find(&mtd_idr, mtd->index) != mtd) {
816 ret = -ENODEV;
817 goto out_error;
818 }
819
820 /* No need to get a refcount on the module containing
821 the notifier, since we hold the mtd_table_mutex */
822 list_for_each_entry(not, &mtd_notifiers, list)
823 not->remove(mtd);
824
825 kref_put(&mtd->refcnt, mtd_device_release);
826 ret = 0;
827
828out_error:
829 mutex_unlock(&mtd_table_mutex);
830 return ret;
831}
832
833/*
834 * Set a few defaults based on the parent devices, if not provided by the
835 * driver
836 */
837static void mtd_set_dev_defaults(struct mtd_info *mtd)
838{
839 if (mtd->dev.parent) {
840 if (!mtd->owner && mtd->dev.parent->driver)
841 mtd->owner = mtd->dev.parent->driver->owner;
842 if (!mtd->name)
843 mtd->name = dev_name(mtd->dev.parent);
844 } else {
845 pr_debug("mtd device won't show a device symlink in sysfs\n");
846 }
847
848 INIT_LIST_HEAD(&mtd->partitions);
849 mutex_init(&mtd->master.partitions_lock);
850 mutex_init(&mtd->master.chrdev_lock);
851}
852
853static ssize_t mtd_otp_size(struct mtd_info *mtd, bool is_user)
854{
855 struct otp_info *info;
856 ssize_t size = 0;
857 unsigned int i;
858 size_t retlen;
859 int ret;
860
861 info = kmalloc(PAGE_SIZE, GFP_KERNEL);
862 if (!info)
863 return -ENOMEM;
864
865 if (is_user)
866 ret = mtd_get_user_prot_info(mtd, PAGE_SIZE, &retlen, info);
867 else
868 ret = mtd_get_fact_prot_info(mtd, PAGE_SIZE, &retlen, info);
869 if (ret)
870 goto err;
871
872 for (i = 0; i < retlen / sizeof(*info); i++)
873 size += info[i].length;
874
875 kfree(info);
876 return size;
877
878err:
879 kfree(info);
880
881 /* ENODATA means there is no OTP region. */
882 return ret == -ENODATA ? 0 : ret;
883}
884
885static struct nvmem_device *mtd_otp_nvmem_register(struct mtd_info *mtd,
886 const char *compatible,
887 int size,
888 nvmem_reg_read_t reg_read)
889{
890 struct nvmem_device *nvmem = NULL;
891 struct nvmem_config config = {};
892 struct device_node *np;
893
894 /* DT binding is optional */
895 np = of_get_compatible_child(mtd->dev.of_node, compatible);
896
897 /* OTP nvmem will be registered on the physical device */
898 config.dev = mtd->dev.parent;
899 config.name = compatible;
900 config.id = NVMEM_DEVID_AUTO;
901 config.owner = THIS_MODULE;
902 config.add_legacy_fixed_of_cells = true;
903 config.type = NVMEM_TYPE_OTP;
904 config.root_only = true;
905 config.ignore_wp = true;
906 config.reg_read = reg_read;
907 config.size = size;
908 config.of_node = np;
909 config.priv = mtd;
910
911 nvmem = nvmem_register(&config);
912 /* Just ignore if there is no NVMEM support in the kernel */
913 if (IS_ERR(nvmem) && PTR_ERR(nvmem) == -EOPNOTSUPP)
914 nvmem = NULL;
915
916 of_node_put(np);
917
918 return nvmem;
919}
920
921static int mtd_nvmem_user_otp_reg_read(void *priv, unsigned int offset,
922 void *val, size_t bytes)
923{
924 struct mtd_info *mtd = priv;
925 size_t retlen;
926 int ret;
927
928 ret = mtd_read_user_prot_reg(mtd, offset, bytes, &retlen, val);
929 if (ret)
930 return ret;
931
932 return retlen == bytes ? 0 : -EIO;
933}
934
935static int mtd_nvmem_fact_otp_reg_read(void *priv, unsigned int offset,
936 void *val, size_t bytes)
937{
938 struct mtd_info *mtd = priv;
939 size_t retlen;
940 int ret;
941
942 ret = mtd_read_fact_prot_reg(mtd, offset, bytes, &retlen, val);
943 if (ret)
944 return ret;
945
946 return retlen == bytes ? 0 : -EIO;
947}
948
949static int mtd_otp_nvmem_add(struct mtd_info *mtd)
950{
951 struct device *dev = mtd->dev.parent;
952 struct nvmem_device *nvmem;
953 ssize_t size;
954 int err;
955
956 if (mtd->_get_user_prot_info && mtd->_read_user_prot_reg) {
957 size = mtd_otp_size(mtd, true);
958 if (size < 0)
959 return size;
960
961 if (size > 0) {
962 nvmem = mtd_otp_nvmem_register(mtd, "user-otp", size,
963 mtd_nvmem_user_otp_reg_read);
964 if (IS_ERR(nvmem)) {
965 err = PTR_ERR(nvmem);
966 goto err;
967 }
968 mtd->otp_user_nvmem = nvmem;
969 }
970 }
971
972 if (mtd->_get_fact_prot_info && mtd->_read_fact_prot_reg) {
973 size = mtd_otp_size(mtd, false);
974 if (size < 0) {
975 err = size;
976 goto err;
977 }
978
979 if (size > 0) {
980 /*
981 * The factory OTP contains thing such as a unique serial
982 * number and is small, so let's read it out and put it
983 * into the entropy pool.
984 */
985 void *otp;
986
987 otp = kmalloc(size, GFP_KERNEL);
988 if (!otp) {
989 err = -ENOMEM;
990 goto err;
991 }
992 err = mtd_nvmem_fact_otp_reg_read(mtd, 0, otp, size);
993 if (err < 0) {
994 kfree(otp);
995 goto err;
996 }
997 add_device_randomness(otp, err);
998 kfree(otp);
999
1000 nvmem = mtd_otp_nvmem_register(mtd, "factory-otp", size,
1001 mtd_nvmem_fact_otp_reg_read);
1002 if (IS_ERR(nvmem)) {
1003 err = PTR_ERR(nvmem);
1004 goto err;
1005 }
1006 mtd->otp_factory_nvmem = nvmem;
1007 }
1008 }
1009
1010 return 0;
1011
1012err:
1013 nvmem_unregister(mtd->otp_user_nvmem);
1014 return dev_err_probe(dev, err, "Failed to register OTP NVMEM device\n");
1015}
1016
1017/**
1018 * mtd_device_parse_register - parse partitions and register an MTD device.
1019 *
1020 * @mtd: the MTD device to register
1021 * @types: the list of MTD partition probes to try, see
1022 * 'parse_mtd_partitions()' for more information
1023 * @parser_data: MTD partition parser-specific data
1024 * @parts: fallback partition information to register, if parsing fails;
1025 * only valid if %nr_parts > %0
1026 * @nr_parts: the number of partitions in parts, if zero then the full
1027 * MTD device is registered if no partition info is found
1028 *
1029 * This function aggregates MTD partitions parsing (done by
1030 * 'parse_mtd_partitions()') and MTD device and partitions registering. It
1031 * basically follows the most common pattern found in many MTD drivers:
1032 *
1033 * * If the MTD_PARTITIONED_MASTER option is set, then the device as a whole is
1034 * registered first.
1035 * * Then It tries to probe partitions on MTD device @mtd using parsers
1036 * specified in @types (if @types is %NULL, then the default list of parsers
1037 * is used, see 'parse_mtd_partitions()' for more information). If none are
1038 * found this functions tries to fallback to information specified in
1039 * @parts/@nr_parts.
1040 * * If no partitions were found this function just registers the MTD device
1041 * @mtd and exits.
1042 *
1043 * Returns zero in case of success and a negative error code in case of failure.
1044 */
1045int mtd_device_parse_register(struct mtd_info *mtd, const char * const *types,
1046 struct mtd_part_parser_data *parser_data,
1047 const struct mtd_partition *parts,
1048 int nr_parts)
1049{
1050 int ret;
1051
1052 mtd_set_dev_defaults(mtd);
1053
1054 ret = mtd_otp_nvmem_add(mtd);
1055 if (ret)
1056 goto out;
1057
1058 if (IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER)) {
1059 ret = add_mtd_device(mtd);
1060 if (ret)
1061 goto out;
1062 }
1063
1064 /* Prefer parsed partitions over driver-provided fallback */
1065 ret = parse_mtd_partitions(mtd, types, parser_data);
1066 if (ret == -EPROBE_DEFER)
1067 goto out;
1068
1069 if (ret > 0)
1070 ret = 0;
1071 else if (nr_parts)
1072 ret = add_mtd_partitions(mtd, parts, nr_parts);
1073 else if (!device_is_registered(&mtd->dev))
1074 ret = add_mtd_device(mtd);
1075 else
1076 ret = 0;
1077
1078 if (ret)
1079 goto out;
1080
1081 /*
1082 * FIXME: some drivers unfortunately call this function more than once.
1083 * So we have to check if we've already assigned the reboot notifier.
1084 *
1085 * Generally, we can make multiple calls work for most cases, but it
1086 * does cause problems with parse_mtd_partitions() above (e.g.,
1087 * cmdlineparts will register partitions more than once).
1088 */
1089 WARN_ONCE(mtd->_reboot && mtd->reboot_notifier.notifier_call,
1090 "MTD already registered\n");
1091 if (mtd->_reboot && !mtd->reboot_notifier.notifier_call) {
1092 mtd->reboot_notifier.notifier_call = mtd_reboot_notifier;
1093 register_reboot_notifier(&mtd->reboot_notifier);
1094 }
1095
1096out:
1097 if (ret) {
1098 nvmem_unregister(mtd->otp_user_nvmem);
1099 nvmem_unregister(mtd->otp_factory_nvmem);
1100 }
1101
1102 if (ret && device_is_registered(&mtd->dev))
1103 del_mtd_device(mtd);
1104
1105 return ret;
1106}
1107EXPORT_SYMBOL_GPL(mtd_device_parse_register);
1108
1109/**
1110 * mtd_device_unregister - unregister an existing MTD device.
1111 *
1112 * @master: the MTD device to unregister. This will unregister both the master
1113 * and any partitions if registered.
1114 */
1115int mtd_device_unregister(struct mtd_info *master)
1116{
1117 int err;
1118
1119 if (master->_reboot) {
1120 unregister_reboot_notifier(&master->reboot_notifier);
1121 memset(&master->reboot_notifier, 0, sizeof(master->reboot_notifier));
1122 }
1123
1124 nvmem_unregister(master->otp_user_nvmem);
1125 nvmem_unregister(master->otp_factory_nvmem);
1126
1127 err = del_mtd_partitions(master);
1128 if (err)
1129 return err;
1130
1131 if (!device_is_registered(&master->dev))
1132 return 0;
1133
1134 return del_mtd_device(master);
1135}
1136EXPORT_SYMBOL_GPL(mtd_device_unregister);
1137
1138/**
1139 * register_mtd_user - register a 'user' of MTD devices.
1140 * @new: pointer to notifier info structure
1141 *
1142 * Registers a pair of callbacks function to be called upon addition
1143 * or removal of MTD devices. Causes the 'add' callback to be immediately
1144 * invoked for each MTD device currently present in the system.
1145 */
1146void register_mtd_user (struct mtd_notifier *new)
1147{
1148 struct mtd_info *mtd;
1149
1150 mutex_lock(&mtd_table_mutex);
1151
1152 list_add(&new->list, &mtd_notifiers);
1153
1154 __module_get(THIS_MODULE);
1155
1156 mtd_for_each_device(mtd)
1157 new->add(mtd);
1158
1159 mutex_unlock(&mtd_table_mutex);
1160}
1161EXPORT_SYMBOL_GPL(register_mtd_user);
1162
1163/**
1164 * unregister_mtd_user - unregister a 'user' of MTD devices.
1165 * @old: pointer to notifier info structure
1166 *
1167 * Removes a callback function pair from the list of 'users' to be
1168 * notified upon addition or removal of MTD devices. Causes the
1169 * 'remove' callback to be immediately invoked for each MTD device
1170 * currently present in the system.
1171 */
1172int unregister_mtd_user (struct mtd_notifier *old)
1173{
1174 struct mtd_info *mtd;
1175
1176 mutex_lock(&mtd_table_mutex);
1177
1178 module_put(THIS_MODULE);
1179
1180 mtd_for_each_device(mtd)
1181 old->remove(mtd);
1182
1183 list_del(&old->list);
1184 mutex_unlock(&mtd_table_mutex);
1185 return 0;
1186}
1187EXPORT_SYMBOL_GPL(unregister_mtd_user);
1188
1189/**
1190 * get_mtd_device - obtain a validated handle for an MTD device
1191 * @mtd: last known address of the required MTD device
1192 * @num: internal device number of the required MTD device
1193 *
1194 * Given a number and NULL address, return the num'th entry in the device
1195 * table, if any. Given an address and num == -1, search the device table
1196 * for a device with that address and return if it's still present. Given
1197 * both, return the num'th driver only if its address matches. Return
1198 * error code if not.
1199 */
1200struct mtd_info *get_mtd_device(struct mtd_info *mtd, int num)
1201{
1202 struct mtd_info *ret = NULL, *other;
1203 int err = -ENODEV;
1204
1205 mutex_lock(&mtd_table_mutex);
1206
1207 if (num == -1) {
1208 mtd_for_each_device(other) {
1209 if (other == mtd) {
1210 ret = mtd;
1211 break;
1212 }
1213 }
1214 } else if (num >= 0) {
1215 ret = idr_find(&mtd_idr, num);
1216 if (mtd && mtd != ret)
1217 ret = NULL;
1218 }
1219
1220 if (!ret) {
1221 ret = ERR_PTR(err);
1222 goto out;
1223 }
1224
1225 err = __get_mtd_device(ret);
1226 if (err)
1227 ret = ERR_PTR(err);
1228out:
1229 mutex_unlock(&mtd_table_mutex);
1230 return ret;
1231}
1232EXPORT_SYMBOL_GPL(get_mtd_device);
1233
1234
1235int __get_mtd_device(struct mtd_info *mtd)
1236{
1237 struct mtd_info *master = mtd_get_master(mtd);
1238 int err;
1239
1240 if (master->_get_device) {
1241 err = master->_get_device(mtd);
1242 if (err)
1243 return err;
1244 }
1245
1246 if (!try_module_get(master->owner)) {
1247 if (master->_put_device)
1248 master->_put_device(master);
1249 return -ENODEV;
1250 }
1251
1252 while (mtd) {
1253 if (mtd != master)
1254 kref_get(&mtd->refcnt);
1255 mtd = mtd->parent;
1256 }
1257
1258 if (IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER))
1259 kref_get(&master->refcnt);
1260
1261 return 0;
1262}
1263EXPORT_SYMBOL_GPL(__get_mtd_device);
1264
1265/**
1266 * of_get_mtd_device_by_node - obtain an MTD device associated with a given node
1267 *
1268 * @np: device tree node
1269 */
1270struct mtd_info *of_get_mtd_device_by_node(struct device_node *np)
1271{
1272 struct mtd_info *mtd = NULL;
1273 struct mtd_info *tmp;
1274 int err;
1275
1276 mutex_lock(&mtd_table_mutex);
1277
1278 err = -EPROBE_DEFER;
1279 mtd_for_each_device(tmp) {
1280 if (mtd_get_of_node(tmp) == np) {
1281 mtd = tmp;
1282 err = __get_mtd_device(mtd);
1283 break;
1284 }
1285 }
1286
1287 mutex_unlock(&mtd_table_mutex);
1288
1289 return err ? ERR_PTR(err) : mtd;
1290}
1291EXPORT_SYMBOL_GPL(of_get_mtd_device_by_node);
1292
1293/**
1294 * get_mtd_device_nm - obtain a validated handle for an MTD device by
1295 * device name
1296 * @name: MTD device name to open
1297 *
1298 * This function returns MTD device description structure in case of
1299 * success and an error code in case of failure.
1300 */
1301struct mtd_info *get_mtd_device_nm(const char *name)
1302{
1303 int err = -ENODEV;
1304 struct mtd_info *mtd = NULL, *other;
1305
1306 mutex_lock(&mtd_table_mutex);
1307
1308 mtd_for_each_device(other) {
1309 if (!strcmp(name, other->name)) {
1310 mtd = other;
1311 break;
1312 }
1313 }
1314
1315 if (!mtd)
1316 goto out_unlock;
1317
1318 err = __get_mtd_device(mtd);
1319 if (err)
1320 goto out_unlock;
1321
1322 mutex_unlock(&mtd_table_mutex);
1323 return mtd;
1324
1325out_unlock:
1326 mutex_unlock(&mtd_table_mutex);
1327 return ERR_PTR(err);
1328}
1329EXPORT_SYMBOL_GPL(get_mtd_device_nm);
1330
1331void put_mtd_device(struct mtd_info *mtd)
1332{
1333 mutex_lock(&mtd_table_mutex);
1334 __put_mtd_device(mtd);
1335 mutex_unlock(&mtd_table_mutex);
1336
1337}
1338EXPORT_SYMBOL_GPL(put_mtd_device);
1339
1340void __put_mtd_device(struct mtd_info *mtd)
1341{
1342 struct mtd_info *master = mtd_get_master(mtd);
1343
1344 while (mtd) {
1345 /* kref_put() can relese mtd, so keep a reference mtd->parent */
1346 struct mtd_info *parent = mtd->parent;
1347
1348 if (mtd != master)
1349 kref_put(&mtd->refcnt, mtd_device_release);
1350 mtd = parent;
1351 }
1352
1353 if (IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER))
1354 kref_put(&master->refcnt, mtd_device_release);
1355
1356 module_put(master->owner);
1357
1358 /* must be the last as master can be freed in the _put_device */
1359 if (master->_put_device)
1360 master->_put_device(master);
1361}
1362EXPORT_SYMBOL_GPL(__put_mtd_device);
1363
1364/*
1365 * Erase is an synchronous operation. Device drivers are epected to return a
1366 * negative error code if the operation failed and update instr->fail_addr
1367 * to point the portion that was not properly erased.
1368 */
1369int mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
1370{
1371 struct mtd_info *master = mtd_get_master(mtd);
1372 u64 mst_ofs = mtd_get_master_ofs(mtd, 0);
1373 struct erase_info adjinstr;
1374 int ret;
1375
1376 instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
1377 adjinstr = *instr;
1378
1379 if (!mtd->erasesize || !master->_erase)
1380 return -ENOTSUPP;
1381
1382 if (instr->addr >= mtd->size || instr->len > mtd->size - instr->addr)
1383 return -EINVAL;
1384 if (!(mtd->flags & MTD_WRITEABLE))
1385 return -EROFS;
1386
1387 if (!instr->len)
1388 return 0;
1389
1390 ledtrig_mtd_activity();
1391
1392 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
1393 adjinstr.addr = (loff_t)mtd_div_by_eb(instr->addr, mtd) *
1394 master->erasesize;
1395 adjinstr.len = ((u64)mtd_div_by_eb(instr->addr + instr->len, mtd) *
1396 master->erasesize) -
1397 adjinstr.addr;
1398 }
1399
1400 adjinstr.addr += mst_ofs;
1401
1402 ret = master->_erase(master, &adjinstr);
1403
1404 if (adjinstr.fail_addr != MTD_FAIL_ADDR_UNKNOWN) {
1405 instr->fail_addr = adjinstr.fail_addr - mst_ofs;
1406 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
1407 instr->fail_addr = mtd_div_by_eb(instr->fail_addr,
1408 master);
1409 instr->fail_addr *= mtd->erasesize;
1410 }
1411 }
1412
1413 return ret;
1414}
1415EXPORT_SYMBOL_GPL(mtd_erase);
1416ALLOW_ERROR_INJECTION(mtd_erase, ERRNO);
1417
1418/*
1419 * This stuff for eXecute-In-Place. phys is optional and may be set to NULL.
1420 */
1421int mtd_point(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
1422 void **virt, resource_size_t *phys)
1423{
1424 struct mtd_info *master = mtd_get_master(mtd);
1425
1426 *retlen = 0;
1427 *virt = NULL;
1428 if (phys)
1429 *phys = 0;
1430 if (!master->_point)
1431 return -EOPNOTSUPP;
1432 if (from < 0 || from >= mtd->size || len > mtd->size - from)
1433 return -EINVAL;
1434 if (!len)
1435 return 0;
1436
1437 from = mtd_get_master_ofs(mtd, from);
1438 return master->_point(master, from, len, retlen, virt, phys);
1439}
1440EXPORT_SYMBOL_GPL(mtd_point);
1441
1442/* We probably shouldn't allow XIP if the unpoint isn't a NULL */
1443int mtd_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
1444{
1445 struct mtd_info *master = mtd_get_master(mtd);
1446
1447 if (!master->_unpoint)
1448 return -EOPNOTSUPP;
1449 if (from < 0 || from >= mtd->size || len > mtd->size - from)
1450 return -EINVAL;
1451 if (!len)
1452 return 0;
1453 return master->_unpoint(master, mtd_get_master_ofs(mtd, from), len);
1454}
1455EXPORT_SYMBOL_GPL(mtd_unpoint);
1456
1457/*
1458 * Allow NOMMU mmap() to directly map the device (if not NULL)
1459 * - return the address to which the offset maps
1460 * - return -ENOSYS to indicate refusal to do the mapping
1461 */
1462unsigned long mtd_get_unmapped_area(struct mtd_info *mtd, unsigned long len,
1463 unsigned long offset, unsigned long flags)
1464{
1465 size_t retlen;
1466 void *virt;
1467 int ret;
1468
1469 ret = mtd_point(mtd, offset, len, &retlen, &virt, NULL);
1470 if (ret)
1471 return ret;
1472 if (retlen != len) {
1473 mtd_unpoint(mtd, offset, retlen);
1474 return -ENOSYS;
1475 }
1476 return (unsigned long)virt;
1477}
1478EXPORT_SYMBOL_GPL(mtd_get_unmapped_area);
1479
1480static void mtd_update_ecc_stats(struct mtd_info *mtd, struct mtd_info *master,
1481 const struct mtd_ecc_stats *old_stats)
1482{
1483 struct mtd_ecc_stats diff;
1484
1485 if (master == mtd)
1486 return;
1487
1488 diff = master->ecc_stats;
1489 diff.failed -= old_stats->failed;
1490 diff.corrected -= old_stats->corrected;
1491
1492 while (mtd->parent) {
1493 mtd->ecc_stats.failed += diff.failed;
1494 mtd->ecc_stats.corrected += diff.corrected;
1495 mtd = mtd->parent;
1496 }
1497}
1498
1499int mtd_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
1500 u_char *buf)
1501{
1502 struct mtd_oob_ops ops = {
1503 .len = len,
1504 .datbuf = buf,
1505 };
1506 int ret;
1507
1508 ret = mtd_read_oob(mtd, from, &ops);
1509 *retlen = ops.retlen;
1510
1511 WARN_ON_ONCE(*retlen != len && mtd_is_bitflip_or_eccerr(ret));
1512
1513 return ret;
1514}
1515EXPORT_SYMBOL_GPL(mtd_read);
1516ALLOW_ERROR_INJECTION(mtd_read, ERRNO);
1517
1518int mtd_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
1519 const u_char *buf)
1520{
1521 struct mtd_oob_ops ops = {
1522 .len = len,
1523 .datbuf = (u8 *)buf,
1524 };
1525 int ret;
1526
1527 ret = mtd_write_oob(mtd, to, &ops);
1528 *retlen = ops.retlen;
1529
1530 return ret;
1531}
1532EXPORT_SYMBOL_GPL(mtd_write);
1533ALLOW_ERROR_INJECTION(mtd_write, ERRNO);
1534
1535/*
1536 * In blackbox flight recorder like scenarios we want to make successful writes
1537 * in interrupt context. panic_write() is only intended to be called when its
1538 * known the kernel is about to panic and we need the write to succeed. Since
1539 * the kernel is not going to be running for much longer, this function can
1540 * break locks and delay to ensure the write succeeds (but not sleep).
1541 */
1542int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
1543 const u_char *buf)
1544{
1545 struct mtd_info *master = mtd_get_master(mtd);
1546
1547 *retlen = 0;
1548 if (!master->_panic_write)
1549 return -EOPNOTSUPP;
1550 if (to < 0 || to >= mtd->size || len > mtd->size - to)
1551 return -EINVAL;
1552 if (!(mtd->flags & MTD_WRITEABLE))
1553 return -EROFS;
1554 if (!len)
1555 return 0;
1556 if (!master->oops_panic_write)
1557 master->oops_panic_write = true;
1558
1559 return master->_panic_write(master, mtd_get_master_ofs(mtd, to), len,
1560 retlen, buf);
1561}
1562EXPORT_SYMBOL_GPL(mtd_panic_write);
1563
1564static int mtd_check_oob_ops(struct mtd_info *mtd, loff_t offs,
1565 struct mtd_oob_ops *ops)
1566{
1567 /*
1568 * Some users are setting ->datbuf or ->oobbuf to NULL, but are leaving
1569 * ->len or ->ooblen uninitialized. Force ->len and ->ooblen to 0 in
1570 * this case.
1571 */
1572 if (!ops->datbuf)
1573 ops->len = 0;
1574
1575 if (!ops->oobbuf)
1576 ops->ooblen = 0;
1577
1578 if (offs < 0 || offs + ops->len > mtd->size)
1579 return -EINVAL;
1580
1581 if (ops->ooblen) {
1582 size_t maxooblen;
1583
1584 if (ops->ooboffs >= mtd_oobavail(mtd, ops))
1585 return -EINVAL;
1586
1587 maxooblen = ((size_t)(mtd_div_by_ws(mtd->size, mtd) -
1588 mtd_div_by_ws(offs, mtd)) *
1589 mtd_oobavail(mtd, ops)) - ops->ooboffs;
1590 if (ops->ooblen > maxooblen)
1591 return -EINVAL;
1592 }
1593
1594 return 0;
1595}
1596
1597static int mtd_read_oob_std(struct mtd_info *mtd, loff_t from,
1598 struct mtd_oob_ops *ops)
1599{
1600 struct mtd_info *master = mtd_get_master(mtd);
1601 int ret;
1602
1603 from = mtd_get_master_ofs(mtd, from);
1604 if (master->_read_oob)
1605 ret = master->_read_oob(master, from, ops);
1606 else
1607 ret = master->_read(master, from, ops->len, &ops->retlen,
1608 ops->datbuf);
1609
1610 return ret;
1611}
1612
1613static int mtd_write_oob_std(struct mtd_info *mtd, loff_t to,
1614 struct mtd_oob_ops *ops)
1615{
1616 struct mtd_info *master = mtd_get_master(mtd);
1617 int ret;
1618
1619 to = mtd_get_master_ofs(mtd, to);
1620 if (master->_write_oob)
1621 ret = master->_write_oob(master, to, ops);
1622 else
1623 ret = master->_write(master, to, ops->len, &ops->retlen,
1624 ops->datbuf);
1625
1626 return ret;
1627}
1628
1629static int mtd_io_emulated_slc(struct mtd_info *mtd, loff_t start, bool read,
1630 struct mtd_oob_ops *ops)
1631{
1632 struct mtd_info *master = mtd_get_master(mtd);
1633 int ngroups = mtd_pairing_groups(master);
1634 int npairs = mtd_wunit_per_eb(master) / ngroups;
1635 struct mtd_oob_ops adjops = *ops;
1636 unsigned int wunit, oobavail;
1637 struct mtd_pairing_info info;
1638 int max_bitflips = 0;
1639 u32 ebofs, pageofs;
1640 loff_t base, pos;
1641
1642 ebofs = mtd_mod_by_eb(start, mtd);
1643 base = (loff_t)mtd_div_by_eb(start, mtd) * master->erasesize;
1644 info.group = 0;
1645 info.pair = mtd_div_by_ws(ebofs, mtd);
1646 pageofs = mtd_mod_by_ws(ebofs, mtd);
1647 oobavail = mtd_oobavail(mtd, ops);
1648
1649 while (ops->retlen < ops->len || ops->oobretlen < ops->ooblen) {
1650 int ret;
1651
1652 if (info.pair >= npairs) {
1653 info.pair = 0;
1654 base += master->erasesize;
1655 }
1656
1657 wunit = mtd_pairing_info_to_wunit(master, &info);
1658 pos = mtd_wunit_to_offset(mtd, base, wunit);
1659
1660 adjops.len = ops->len - ops->retlen;
1661 if (adjops.len > mtd->writesize - pageofs)
1662 adjops.len = mtd->writesize - pageofs;
1663
1664 adjops.ooblen = ops->ooblen - ops->oobretlen;
1665 if (adjops.ooblen > oobavail - adjops.ooboffs)
1666 adjops.ooblen = oobavail - adjops.ooboffs;
1667
1668 if (read) {
1669 ret = mtd_read_oob_std(mtd, pos + pageofs, &adjops);
1670 if (ret > 0)
1671 max_bitflips = max(max_bitflips, ret);
1672 } else {
1673 ret = mtd_write_oob_std(mtd, pos + pageofs, &adjops);
1674 }
1675
1676 if (ret < 0)
1677 return ret;
1678
1679 max_bitflips = max(max_bitflips, ret);
1680 ops->retlen += adjops.retlen;
1681 ops->oobretlen += adjops.oobretlen;
1682 adjops.datbuf += adjops.retlen;
1683 adjops.oobbuf += adjops.oobretlen;
1684 adjops.ooboffs = 0;
1685 pageofs = 0;
1686 info.pair++;
1687 }
1688
1689 return max_bitflips;
1690}
1691
1692int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
1693{
1694 struct mtd_info *master = mtd_get_master(mtd);
1695 struct mtd_ecc_stats old_stats = master->ecc_stats;
1696 int ret_code;
1697
1698 ops->retlen = ops->oobretlen = 0;
1699
1700 ret_code = mtd_check_oob_ops(mtd, from, ops);
1701 if (ret_code)
1702 return ret_code;
1703
1704 ledtrig_mtd_activity();
1705
1706 /* Check the validity of a potential fallback on mtd->_read */
1707 if (!master->_read_oob && (!master->_read || ops->oobbuf))
1708 return -EOPNOTSUPP;
1709
1710 if (ops->stats)
1711 memset(ops->stats, 0, sizeof(*ops->stats));
1712
1713 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
1714 ret_code = mtd_io_emulated_slc(mtd, from, true, ops);
1715 else
1716 ret_code = mtd_read_oob_std(mtd, from, ops);
1717
1718 mtd_update_ecc_stats(mtd, master, &old_stats);
1719
1720 /*
1721 * In cases where ops->datbuf != NULL, mtd->_read_oob() has semantics
1722 * similar to mtd->_read(), returning a non-negative integer
1723 * representing max bitflips. In other cases, mtd->_read_oob() may
1724 * return -EUCLEAN. In all cases, perform similar logic to mtd_read().
1725 */
1726 if (unlikely(ret_code < 0))
1727 return ret_code;
1728 if (mtd->ecc_strength == 0)
1729 return 0; /* device lacks ecc */
1730 if (ops->stats)
1731 ops->stats->max_bitflips = ret_code;
1732 return ret_code >= mtd->bitflip_threshold ? -EUCLEAN : 0;
1733}
1734EXPORT_SYMBOL_GPL(mtd_read_oob);
1735
1736int mtd_write_oob(struct mtd_info *mtd, loff_t to,
1737 struct mtd_oob_ops *ops)
1738{
1739 struct mtd_info *master = mtd_get_master(mtd);
1740 int ret;
1741
1742 ops->retlen = ops->oobretlen = 0;
1743
1744 if (!(mtd->flags & MTD_WRITEABLE))
1745 return -EROFS;
1746
1747 ret = mtd_check_oob_ops(mtd, to, ops);
1748 if (ret)
1749 return ret;
1750
1751 ledtrig_mtd_activity();
1752
1753 /* Check the validity of a potential fallback on mtd->_write */
1754 if (!master->_write_oob && (!master->_write || ops->oobbuf))
1755 return -EOPNOTSUPP;
1756
1757 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
1758 return mtd_io_emulated_slc(mtd, to, false, ops);
1759
1760 return mtd_write_oob_std(mtd, to, ops);
1761}
1762EXPORT_SYMBOL_GPL(mtd_write_oob);
1763
1764/**
1765 * mtd_ooblayout_ecc - Get the OOB region definition of a specific ECC section
1766 * @mtd: MTD device structure
1767 * @section: ECC section. Depending on the layout you may have all the ECC
1768 * bytes stored in a single contiguous section, or one section
1769 * per ECC chunk (and sometime several sections for a single ECC
1770 * ECC chunk)
1771 * @oobecc: OOB region struct filled with the appropriate ECC position
1772 * information
1773 *
1774 * This function returns ECC section information in the OOB area. If you want
1775 * to get all the ECC bytes information, then you should call
1776 * mtd_ooblayout_ecc(mtd, section++, oobecc) until it returns -ERANGE.
1777 *
1778 * Returns zero on success, a negative error code otherwise.
1779 */
1780int mtd_ooblayout_ecc(struct mtd_info *mtd, int section,
1781 struct mtd_oob_region *oobecc)
1782{
1783 struct mtd_info *master = mtd_get_master(mtd);
1784
1785 memset(oobecc, 0, sizeof(*oobecc));
1786
1787 if (!master || section < 0)
1788 return -EINVAL;
1789
1790 if (!master->ooblayout || !master->ooblayout->ecc)
1791 return -ENOTSUPP;
1792
1793 return master->ooblayout->ecc(master, section, oobecc);
1794}
1795EXPORT_SYMBOL_GPL(mtd_ooblayout_ecc);
1796
1797/**
1798 * mtd_ooblayout_free - Get the OOB region definition of a specific free
1799 * section
1800 * @mtd: MTD device structure
1801 * @section: Free section you are interested in. Depending on the layout
1802 * you may have all the free bytes stored in a single contiguous
1803 * section, or one section per ECC chunk plus an extra section
1804 * for the remaining bytes (or other funky layout).
1805 * @oobfree: OOB region struct filled with the appropriate free position
1806 * information
1807 *
1808 * This function returns free bytes position in the OOB area. If you want
1809 * to get all the free bytes information, then you should call
1810 * mtd_ooblayout_free(mtd, section++, oobfree) until it returns -ERANGE.
1811 *
1812 * Returns zero on success, a negative error code otherwise.
1813 */
1814int mtd_ooblayout_free(struct mtd_info *mtd, int section,
1815 struct mtd_oob_region *oobfree)
1816{
1817 struct mtd_info *master = mtd_get_master(mtd);
1818
1819 memset(oobfree, 0, sizeof(*oobfree));
1820
1821 if (!master || section < 0)
1822 return -EINVAL;
1823
1824 if (!master->ooblayout || !master->ooblayout->free)
1825 return -ENOTSUPP;
1826
1827 return master->ooblayout->free(master, section, oobfree);
1828}
1829EXPORT_SYMBOL_GPL(mtd_ooblayout_free);
1830
1831/**
1832 * mtd_ooblayout_find_region - Find the region attached to a specific byte
1833 * @mtd: mtd info structure
1834 * @byte: the byte we are searching for
1835 * @sectionp: pointer where the section id will be stored
1836 * @oobregion: used to retrieve the ECC position
1837 * @iter: iterator function. Should be either mtd_ooblayout_free or
1838 * mtd_ooblayout_ecc depending on the region type you're searching for
1839 *
1840 * This function returns the section id and oobregion information of a
1841 * specific byte. For example, say you want to know where the 4th ECC byte is
1842 * stored, you'll use:
1843 *
1844 * mtd_ooblayout_find_region(mtd, 3, §ion, &oobregion, mtd_ooblayout_ecc);
1845 *
1846 * Returns zero on success, a negative error code otherwise.
1847 */
1848static int mtd_ooblayout_find_region(struct mtd_info *mtd, int byte,
1849 int *sectionp, struct mtd_oob_region *oobregion,
1850 int (*iter)(struct mtd_info *,
1851 int section,
1852 struct mtd_oob_region *oobregion))
1853{
1854 int pos = 0, ret, section = 0;
1855
1856 memset(oobregion, 0, sizeof(*oobregion));
1857
1858 while (1) {
1859 ret = iter(mtd, section, oobregion);
1860 if (ret)
1861 return ret;
1862
1863 if (pos + oobregion->length > byte)
1864 break;
1865
1866 pos += oobregion->length;
1867 section++;
1868 }
1869
1870 /*
1871 * Adjust region info to make it start at the beginning at the
1872 * 'start' ECC byte.
1873 */
1874 oobregion->offset += byte - pos;
1875 oobregion->length -= byte - pos;
1876 *sectionp = section;
1877
1878 return 0;
1879}
1880
1881/**
1882 * mtd_ooblayout_find_eccregion - Find the ECC region attached to a specific
1883 * ECC byte
1884 * @mtd: mtd info structure
1885 * @eccbyte: the byte we are searching for
1886 * @section: pointer where the section id will be stored
1887 * @oobregion: OOB region information
1888 *
1889 * Works like mtd_ooblayout_find_region() except it searches for a specific ECC
1890 * byte.
1891 *
1892 * Returns zero on success, a negative error code otherwise.
1893 */
1894int mtd_ooblayout_find_eccregion(struct mtd_info *mtd, int eccbyte,
1895 int *section,
1896 struct mtd_oob_region *oobregion)
1897{
1898 return mtd_ooblayout_find_region(mtd, eccbyte, section, oobregion,
1899 mtd_ooblayout_ecc);
1900}
1901EXPORT_SYMBOL_GPL(mtd_ooblayout_find_eccregion);
1902
1903/**
1904 * mtd_ooblayout_get_bytes - Extract OOB bytes from the oob buffer
1905 * @mtd: mtd info structure
1906 * @buf: destination buffer to store OOB bytes
1907 * @oobbuf: OOB buffer
1908 * @start: first byte to retrieve
1909 * @nbytes: number of bytes to retrieve
1910 * @iter: section iterator
1911 *
1912 * Extract bytes attached to a specific category (ECC or free)
1913 * from the OOB buffer and copy them into buf.
1914 *
1915 * Returns zero on success, a negative error code otherwise.
1916 */
1917static int mtd_ooblayout_get_bytes(struct mtd_info *mtd, u8 *buf,
1918 const u8 *oobbuf, int start, int nbytes,
1919 int (*iter)(struct mtd_info *,
1920 int section,
1921 struct mtd_oob_region *oobregion))
1922{
1923 struct mtd_oob_region oobregion;
1924 int section, ret;
1925
1926 ret = mtd_ooblayout_find_region(mtd, start, §ion,
1927 &oobregion, iter);
1928
1929 while (!ret) {
1930 int cnt;
1931
1932 cnt = min_t(int, nbytes, oobregion.length);
1933 memcpy(buf, oobbuf + oobregion.offset, cnt);
1934 buf += cnt;
1935 nbytes -= cnt;
1936
1937 if (!nbytes)
1938 break;
1939
1940 ret = iter(mtd, ++section, &oobregion);
1941 }
1942
1943 return ret;
1944}
1945
1946/**
1947 * mtd_ooblayout_set_bytes - put OOB bytes into the oob buffer
1948 * @mtd: mtd info structure
1949 * @buf: source buffer to get OOB bytes from
1950 * @oobbuf: OOB buffer
1951 * @start: first OOB byte to set
1952 * @nbytes: number of OOB bytes to set
1953 * @iter: section iterator
1954 *
1955 * Fill the OOB buffer with data provided in buf. The category (ECC or free)
1956 * is selected by passing the appropriate iterator.
1957 *
1958 * Returns zero on success, a negative error code otherwise.
1959 */
1960static int mtd_ooblayout_set_bytes(struct mtd_info *mtd, const u8 *buf,
1961 u8 *oobbuf, int start, int nbytes,
1962 int (*iter)(struct mtd_info *,
1963 int section,
1964 struct mtd_oob_region *oobregion))
1965{
1966 struct mtd_oob_region oobregion;
1967 int section, ret;
1968
1969 ret = mtd_ooblayout_find_region(mtd, start, §ion,
1970 &oobregion, iter);
1971
1972 while (!ret) {
1973 int cnt;
1974
1975 cnt = min_t(int, nbytes, oobregion.length);
1976 memcpy(oobbuf + oobregion.offset, buf, cnt);
1977 buf += cnt;
1978 nbytes -= cnt;
1979
1980 if (!nbytes)
1981 break;
1982
1983 ret = iter(mtd, ++section, &oobregion);
1984 }
1985
1986 return ret;
1987}
1988
1989/**
1990 * mtd_ooblayout_count_bytes - count the number of bytes in a OOB category
1991 * @mtd: mtd info structure
1992 * @iter: category iterator
1993 *
1994 * Count the number of bytes in a given category.
1995 *
1996 * Returns a positive value on success, a negative error code otherwise.
1997 */
1998static int mtd_ooblayout_count_bytes(struct mtd_info *mtd,
1999 int (*iter)(struct mtd_info *,
2000 int section,
2001 struct mtd_oob_region *oobregion))
2002{
2003 struct mtd_oob_region oobregion;
2004 int section = 0, ret, nbytes = 0;
2005
2006 while (1) {
2007 ret = iter(mtd, section++, &oobregion);
2008 if (ret) {
2009 if (ret == -ERANGE)
2010 ret = nbytes;
2011 break;
2012 }
2013
2014 nbytes += oobregion.length;
2015 }
2016
2017 return ret;
2018}
2019
2020/**
2021 * mtd_ooblayout_get_eccbytes - extract ECC bytes from the oob buffer
2022 * @mtd: mtd info structure
2023 * @eccbuf: destination buffer to store ECC bytes
2024 * @oobbuf: OOB buffer
2025 * @start: first ECC byte to retrieve
2026 * @nbytes: number of ECC bytes to retrieve
2027 *
2028 * Works like mtd_ooblayout_get_bytes(), except it acts on ECC bytes.
2029 *
2030 * Returns zero on success, a negative error code otherwise.
2031 */
2032int mtd_ooblayout_get_eccbytes(struct mtd_info *mtd, u8 *eccbuf,
2033 const u8 *oobbuf, int start, int nbytes)
2034{
2035 return mtd_ooblayout_get_bytes(mtd, eccbuf, oobbuf, start, nbytes,
2036 mtd_ooblayout_ecc);
2037}
2038EXPORT_SYMBOL_GPL(mtd_ooblayout_get_eccbytes);
2039
2040/**
2041 * mtd_ooblayout_set_eccbytes - set ECC bytes into the oob buffer
2042 * @mtd: mtd info structure
2043 * @eccbuf: source buffer to get ECC bytes from
2044 * @oobbuf: OOB buffer
2045 * @start: first ECC byte to set
2046 * @nbytes: number of ECC bytes to set
2047 *
2048 * Works like mtd_ooblayout_set_bytes(), except it acts on ECC bytes.
2049 *
2050 * Returns zero on success, a negative error code otherwise.
2051 */
2052int mtd_ooblayout_set_eccbytes(struct mtd_info *mtd, const u8 *eccbuf,
2053 u8 *oobbuf, int start, int nbytes)
2054{
2055 return mtd_ooblayout_set_bytes(mtd, eccbuf, oobbuf, start, nbytes,
2056 mtd_ooblayout_ecc);
2057}
2058EXPORT_SYMBOL_GPL(mtd_ooblayout_set_eccbytes);
2059
2060/**
2061 * mtd_ooblayout_get_databytes - extract data bytes from the oob buffer
2062 * @mtd: mtd info structure
2063 * @databuf: destination buffer to store ECC bytes
2064 * @oobbuf: OOB buffer
2065 * @start: first ECC byte to retrieve
2066 * @nbytes: number of ECC bytes to retrieve
2067 *
2068 * Works like mtd_ooblayout_get_bytes(), except it acts on free bytes.
2069 *
2070 * Returns zero on success, a negative error code otherwise.
2071 */
2072int mtd_ooblayout_get_databytes(struct mtd_info *mtd, u8 *databuf,
2073 const u8 *oobbuf, int start, int nbytes)
2074{
2075 return mtd_ooblayout_get_bytes(mtd, databuf, oobbuf, start, nbytes,
2076 mtd_ooblayout_free);
2077}
2078EXPORT_SYMBOL_GPL(mtd_ooblayout_get_databytes);
2079
2080/**
2081 * mtd_ooblayout_set_databytes - set data bytes into the oob buffer
2082 * @mtd: mtd info structure
2083 * @databuf: source buffer to get data bytes from
2084 * @oobbuf: OOB buffer
2085 * @start: first ECC byte to set
2086 * @nbytes: number of ECC bytes to set
2087 *
2088 * Works like mtd_ooblayout_set_bytes(), except it acts on free bytes.
2089 *
2090 * Returns zero on success, a negative error code otherwise.
2091 */
2092int mtd_ooblayout_set_databytes(struct mtd_info *mtd, const u8 *databuf,
2093 u8 *oobbuf, int start, int nbytes)
2094{
2095 return mtd_ooblayout_set_bytes(mtd, databuf, oobbuf, start, nbytes,
2096 mtd_ooblayout_free);
2097}
2098EXPORT_SYMBOL_GPL(mtd_ooblayout_set_databytes);
2099
2100/**
2101 * mtd_ooblayout_count_freebytes - count the number of free bytes in OOB
2102 * @mtd: mtd info structure
2103 *
2104 * Works like mtd_ooblayout_count_bytes(), except it count free bytes.
2105 *
2106 * Returns zero on success, a negative error code otherwise.
2107 */
2108int mtd_ooblayout_count_freebytes(struct mtd_info *mtd)
2109{
2110 return mtd_ooblayout_count_bytes(mtd, mtd_ooblayout_free);
2111}
2112EXPORT_SYMBOL_GPL(mtd_ooblayout_count_freebytes);
2113
2114/**
2115 * mtd_ooblayout_count_eccbytes - count the number of ECC bytes in OOB
2116 * @mtd: mtd info structure
2117 *
2118 * Works like mtd_ooblayout_count_bytes(), except it count ECC bytes.
2119 *
2120 * Returns zero on success, a negative error code otherwise.
2121 */
2122int mtd_ooblayout_count_eccbytes(struct mtd_info *mtd)
2123{
2124 return mtd_ooblayout_count_bytes(mtd, mtd_ooblayout_ecc);
2125}
2126EXPORT_SYMBOL_GPL(mtd_ooblayout_count_eccbytes);
2127
2128/*
2129 * Method to access the protection register area, present in some flash
2130 * devices. The user data is one time programmable but the factory data is read
2131 * only.
2132 */
2133int mtd_get_fact_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen,
2134 struct otp_info *buf)
2135{
2136 struct mtd_info *master = mtd_get_master(mtd);
2137
2138 if (!master->_get_fact_prot_info)
2139 return -EOPNOTSUPP;
2140 if (!len)
2141 return 0;
2142 return master->_get_fact_prot_info(master, len, retlen, buf);
2143}
2144EXPORT_SYMBOL_GPL(mtd_get_fact_prot_info);
2145
2146int mtd_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, size_t len,
2147 size_t *retlen, u_char *buf)
2148{
2149 struct mtd_info *master = mtd_get_master(mtd);
2150
2151 *retlen = 0;
2152 if (!master->_read_fact_prot_reg)
2153 return -EOPNOTSUPP;
2154 if (!len)
2155 return 0;
2156 return master->_read_fact_prot_reg(master, from, len, retlen, buf);
2157}
2158EXPORT_SYMBOL_GPL(mtd_read_fact_prot_reg);
2159
2160int mtd_get_user_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen,
2161 struct otp_info *buf)
2162{
2163 struct mtd_info *master = mtd_get_master(mtd);
2164
2165 if (!master->_get_user_prot_info)
2166 return -EOPNOTSUPP;
2167 if (!len)
2168 return 0;
2169 return master->_get_user_prot_info(master, len, retlen, buf);
2170}
2171EXPORT_SYMBOL_GPL(mtd_get_user_prot_info);
2172
2173int mtd_read_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len,
2174 size_t *retlen, u_char *buf)
2175{
2176 struct mtd_info *master = mtd_get_master(mtd);
2177
2178 *retlen = 0;
2179 if (!master->_read_user_prot_reg)
2180 return -EOPNOTSUPP;
2181 if (!len)
2182 return 0;
2183 return master->_read_user_prot_reg(master, from, len, retlen, buf);
2184}
2185EXPORT_SYMBOL_GPL(mtd_read_user_prot_reg);
2186
2187int mtd_write_user_prot_reg(struct mtd_info *mtd, loff_t to, size_t len,
2188 size_t *retlen, const u_char *buf)
2189{
2190 struct mtd_info *master = mtd_get_master(mtd);
2191 int ret;
2192
2193 *retlen = 0;
2194 if (!master->_write_user_prot_reg)
2195 return -EOPNOTSUPP;
2196 if (!len)
2197 return 0;
2198 ret = master->_write_user_prot_reg(master, to, len, retlen, buf);
2199 if (ret)
2200 return ret;
2201
2202 /*
2203 * If no data could be written at all, we are out of memory and
2204 * must return -ENOSPC.
2205 */
2206 return (*retlen) ? 0 : -ENOSPC;
2207}
2208EXPORT_SYMBOL_GPL(mtd_write_user_prot_reg);
2209
2210int mtd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len)
2211{
2212 struct mtd_info *master = mtd_get_master(mtd);
2213
2214 if (!master->_lock_user_prot_reg)
2215 return -EOPNOTSUPP;
2216 if (!len)
2217 return 0;
2218 return master->_lock_user_prot_reg(master, from, len);
2219}
2220EXPORT_SYMBOL_GPL(mtd_lock_user_prot_reg);
2221
2222int mtd_erase_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len)
2223{
2224 struct mtd_info *master = mtd_get_master(mtd);
2225
2226 if (!master->_erase_user_prot_reg)
2227 return -EOPNOTSUPP;
2228 if (!len)
2229 return 0;
2230 return master->_erase_user_prot_reg(master, from, len);
2231}
2232EXPORT_SYMBOL_GPL(mtd_erase_user_prot_reg);
2233
2234/* Chip-supported device locking */
2235int mtd_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2236{
2237 struct mtd_info *master = mtd_get_master(mtd);
2238
2239 if (!master->_lock)
2240 return -EOPNOTSUPP;
2241 if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs)
2242 return -EINVAL;
2243 if (!len)
2244 return 0;
2245
2246 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
2247 ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
2248 len = (u64)mtd_div_by_eb(len, mtd) * master->erasesize;
2249 }
2250
2251 return master->_lock(master, mtd_get_master_ofs(mtd, ofs), len);
2252}
2253EXPORT_SYMBOL_GPL(mtd_lock);
2254
2255int mtd_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2256{
2257 struct mtd_info *master = mtd_get_master(mtd);
2258
2259 if (!master->_unlock)
2260 return -EOPNOTSUPP;
2261 if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs)
2262 return -EINVAL;
2263 if (!len)
2264 return 0;
2265
2266 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
2267 ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
2268 len = (u64)mtd_div_by_eb(len, mtd) * master->erasesize;
2269 }
2270
2271 return master->_unlock(master, mtd_get_master_ofs(mtd, ofs), len);
2272}
2273EXPORT_SYMBOL_GPL(mtd_unlock);
2274
2275int mtd_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2276{
2277 struct mtd_info *master = mtd_get_master(mtd);
2278
2279 if (!master->_is_locked)
2280 return -EOPNOTSUPP;
2281 if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs)
2282 return -EINVAL;
2283 if (!len)
2284 return 0;
2285
2286 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
2287 ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
2288 len = (u64)mtd_div_by_eb(len, mtd) * master->erasesize;
2289 }
2290
2291 return master->_is_locked(master, mtd_get_master_ofs(mtd, ofs), len);
2292}
2293EXPORT_SYMBOL_GPL(mtd_is_locked);
2294
2295int mtd_block_isreserved(struct mtd_info *mtd, loff_t ofs)
2296{
2297 struct mtd_info *master = mtd_get_master(mtd);
2298
2299 if (ofs < 0 || ofs >= mtd->size)
2300 return -EINVAL;
2301 if (!master->_block_isreserved)
2302 return 0;
2303
2304 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
2305 ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
2306
2307 return master->_block_isreserved(master, mtd_get_master_ofs(mtd, ofs));
2308}
2309EXPORT_SYMBOL_GPL(mtd_block_isreserved);
2310
2311int mtd_block_isbad(struct mtd_info *mtd, loff_t ofs)
2312{
2313 struct mtd_info *master = mtd_get_master(mtd);
2314
2315 if (ofs < 0 || ofs >= mtd->size)
2316 return -EINVAL;
2317 if (!master->_block_isbad)
2318 return 0;
2319
2320 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
2321 ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
2322
2323 return master->_block_isbad(master, mtd_get_master_ofs(mtd, ofs));
2324}
2325EXPORT_SYMBOL_GPL(mtd_block_isbad);
2326
2327int mtd_block_markbad(struct mtd_info *mtd, loff_t ofs)
2328{
2329 struct mtd_info *master = mtd_get_master(mtd);
2330 int ret;
2331
2332 if (!master->_block_markbad)
2333 return -EOPNOTSUPP;
2334 if (ofs < 0 || ofs >= mtd->size)
2335 return -EINVAL;
2336 if (!(mtd->flags & MTD_WRITEABLE))
2337 return -EROFS;
2338
2339 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
2340 ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
2341
2342 ret = master->_block_markbad(master, mtd_get_master_ofs(mtd, ofs));
2343 if (ret)
2344 return ret;
2345
2346 while (mtd->parent) {
2347 mtd->ecc_stats.badblocks++;
2348 mtd = mtd->parent;
2349 }
2350
2351 return 0;
2352}
2353EXPORT_SYMBOL_GPL(mtd_block_markbad);
2354ALLOW_ERROR_INJECTION(mtd_block_markbad, ERRNO);
2355
2356/*
2357 * default_mtd_writev - the default writev method
2358 * @mtd: mtd device description object pointer
2359 * @vecs: the vectors to write
2360 * @count: count of vectors in @vecs
2361 * @to: the MTD device offset to write to
2362 * @retlen: on exit contains the count of bytes written to the MTD device.
2363 *
2364 * This function returns zero in case of success and a negative error code in
2365 * case of failure.
2366 */
2367static int default_mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
2368 unsigned long count, loff_t to, size_t *retlen)
2369{
2370 unsigned long i;
2371 size_t totlen = 0, thislen;
2372 int ret = 0;
2373
2374 for (i = 0; i < count; i++) {
2375 if (!vecs[i].iov_len)
2376 continue;
2377 ret = mtd_write(mtd, to, vecs[i].iov_len, &thislen,
2378 vecs[i].iov_base);
2379 totlen += thislen;
2380 if (ret || thislen != vecs[i].iov_len)
2381 break;
2382 to += vecs[i].iov_len;
2383 }
2384 *retlen = totlen;
2385 return ret;
2386}
2387
2388/*
2389 * mtd_writev - the vector-based MTD write method
2390 * @mtd: mtd device description object pointer
2391 * @vecs: the vectors to write
2392 * @count: count of vectors in @vecs
2393 * @to: the MTD device offset to write to
2394 * @retlen: on exit contains the count of bytes written to the MTD device.
2395 *
2396 * This function returns zero in case of success and a negative error code in
2397 * case of failure.
2398 */
2399int mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
2400 unsigned long count, loff_t to, size_t *retlen)
2401{
2402 struct mtd_info *master = mtd_get_master(mtd);
2403
2404 *retlen = 0;
2405 if (!(mtd->flags & MTD_WRITEABLE))
2406 return -EROFS;
2407
2408 if (!master->_writev)
2409 return default_mtd_writev(mtd, vecs, count, to, retlen);
2410
2411 return master->_writev(master, vecs, count,
2412 mtd_get_master_ofs(mtd, to), retlen);
2413}
2414EXPORT_SYMBOL_GPL(mtd_writev);
2415
2416/**
2417 * mtd_kmalloc_up_to - allocate a contiguous buffer up to the specified size
2418 * @mtd: mtd device description object pointer
2419 * @size: a pointer to the ideal or maximum size of the allocation, points
2420 * to the actual allocation size on success.
2421 *
2422 * This routine attempts to allocate a contiguous kernel buffer up to
2423 * the specified size, backing off the size of the request exponentially
2424 * until the request succeeds or until the allocation size falls below
2425 * the system page size. This attempts to make sure it does not adversely
2426 * impact system performance, so when allocating more than one page, we
2427 * ask the memory allocator to avoid re-trying, swapping, writing back
2428 * or performing I/O.
2429 *
2430 * Note, this function also makes sure that the allocated buffer is aligned to
2431 * the MTD device's min. I/O unit, i.e. the "mtd->writesize" value.
2432 *
2433 * This is called, for example by mtd_{read,write} and jffs2_scan_medium,
2434 * to handle smaller (i.e. degraded) buffer allocations under low- or
2435 * fragmented-memory situations where such reduced allocations, from a
2436 * requested ideal, are allowed.
2437 *
2438 * Returns a pointer to the allocated buffer on success; otherwise, NULL.
2439 */
2440void *mtd_kmalloc_up_to(const struct mtd_info *mtd, size_t *size)
2441{
2442 gfp_t flags = __GFP_NOWARN | __GFP_DIRECT_RECLAIM | __GFP_NORETRY;
2443 size_t min_alloc = max_t(size_t, mtd->writesize, PAGE_SIZE);
2444 void *kbuf;
2445
2446 *size = min_t(size_t, *size, KMALLOC_MAX_SIZE);
2447
2448 while (*size > min_alloc) {
2449 kbuf = kmalloc(*size, flags);
2450 if (kbuf)
2451 return kbuf;
2452
2453 *size >>= 1;
2454 *size = ALIGN(*size, mtd->writesize);
2455 }
2456
2457 /*
2458 * For the last resort allocation allow 'kmalloc()' to do all sorts of
2459 * things (write-back, dropping caches, etc) by using GFP_KERNEL.
2460 */
2461 return kmalloc(*size, GFP_KERNEL);
2462}
2463EXPORT_SYMBOL_GPL(mtd_kmalloc_up_to);
2464
2465#ifdef CONFIG_PROC_FS
2466
2467/*====================================================================*/
2468/* Support for /proc/mtd */
2469
2470static int mtd_proc_show(struct seq_file *m, void *v)
2471{
2472 struct mtd_info *mtd;
2473
2474 seq_puts(m, "dev: size erasesize name\n");
2475 mutex_lock(&mtd_table_mutex);
2476 mtd_for_each_device(mtd) {
2477 seq_printf(m, "mtd%d: %8.8llx %8.8x \"%s\"\n",
2478 mtd->index, (unsigned long long)mtd->size,
2479 mtd->erasesize, mtd->name);
2480 }
2481 mutex_unlock(&mtd_table_mutex);
2482 return 0;
2483}
2484#endif /* CONFIG_PROC_FS */
2485
2486/*====================================================================*/
2487/* Init code */
2488
2489static struct backing_dev_info * __init mtd_bdi_init(const char *name)
2490{
2491 struct backing_dev_info *bdi;
2492 int ret;
2493
2494 bdi = bdi_alloc(NUMA_NO_NODE);
2495 if (!bdi)
2496 return ERR_PTR(-ENOMEM);
2497 bdi->ra_pages = 0;
2498 bdi->io_pages = 0;
2499
2500 /*
2501 * We put '-0' suffix to the name to get the same name format as we
2502 * used to get. Since this is called only once, we get a unique name.
2503 */
2504 ret = bdi_register(bdi, "%.28s-0", name);
2505 if (ret)
2506 bdi_put(bdi);
2507
2508 return ret ? ERR_PTR(ret) : bdi;
2509}
2510
2511static struct proc_dir_entry *proc_mtd;
2512
2513static int __init init_mtd(void)
2514{
2515 int ret;
2516
2517 ret = class_register(&mtd_class);
2518 if (ret)
2519 goto err_reg;
2520
2521 mtd_bdi = mtd_bdi_init("mtd");
2522 if (IS_ERR(mtd_bdi)) {
2523 ret = PTR_ERR(mtd_bdi);
2524 goto err_bdi;
2525 }
2526
2527 proc_mtd = proc_create_single("mtd", 0, NULL, mtd_proc_show);
2528
2529 ret = init_mtdchar();
2530 if (ret)
2531 goto out_procfs;
2532
2533 dfs_dir_mtd = debugfs_create_dir("mtd", NULL);
2534 debugfs_create_bool("expert_analysis_mode", 0600, dfs_dir_mtd,
2535 &mtd_expert_analysis_mode);
2536
2537 return 0;
2538
2539out_procfs:
2540 if (proc_mtd)
2541 remove_proc_entry("mtd", NULL);
2542 bdi_unregister(mtd_bdi);
2543 bdi_put(mtd_bdi);
2544err_bdi:
2545 class_unregister(&mtd_class);
2546err_reg:
2547 pr_err("Error registering mtd class or bdi: %d\n", ret);
2548 return ret;
2549}
2550
2551static void __exit cleanup_mtd(void)
2552{
2553 debugfs_remove_recursive(dfs_dir_mtd);
2554 cleanup_mtdchar();
2555 if (proc_mtd)
2556 remove_proc_entry("mtd", NULL);
2557 class_unregister(&mtd_class);
2558 bdi_unregister(mtd_bdi);
2559 bdi_put(mtd_bdi);
2560 idr_destroy(&mtd_idr);
2561}
2562
2563module_init(init_mtd);
2564module_exit(cleanup_mtd);
2565
2566MODULE_LICENSE("GPL");
2567MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
2568MODULE_DESCRIPTION("Core MTD registration and access routines");