Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Core registration and callback routines for MTD
4 * drivers and users.
5 *
6 * Copyright © 1999-2010 David Woodhouse <dwmw2@infradead.org>
7 * Copyright © 2006 Red Hat UK Limited
8 */
9
10#include <linux/module.h>
11#include <linux/kernel.h>
12#include <linux/ptrace.h>
13#include <linux/seq_file.h>
14#include <linux/string.h>
15#include <linux/timer.h>
16#include <linux/major.h>
17#include <linux/fs.h>
18#include <linux/err.h>
19#include <linux/ioctl.h>
20#include <linux/init.h>
21#include <linux/of.h>
22#include <linux/proc_fs.h>
23#include <linux/idr.h>
24#include <linux/backing-dev.h>
25#include <linux/gfp.h>
26#include <linux/slab.h>
27#include <linux/reboot.h>
28#include <linux/leds.h>
29#include <linux/debugfs.h>
30#include <linux/nvmem-provider.h>
31
32#include <linux/mtd/mtd.h>
33#include <linux/mtd/partitions.h>
34
35#include "mtdcore.h"
36
37struct backing_dev_info *mtd_bdi;
38
39#ifdef CONFIG_PM_SLEEP
40
41static int mtd_cls_suspend(struct device *dev)
42{
43 struct mtd_info *mtd = dev_get_drvdata(dev);
44
45 return mtd ? mtd_suspend(mtd) : 0;
46}
47
48static int mtd_cls_resume(struct device *dev)
49{
50 struct mtd_info *mtd = dev_get_drvdata(dev);
51
52 if (mtd)
53 mtd_resume(mtd);
54 return 0;
55}
56
57static SIMPLE_DEV_PM_OPS(mtd_cls_pm_ops, mtd_cls_suspend, mtd_cls_resume);
58#define MTD_CLS_PM_OPS (&mtd_cls_pm_ops)
59#else
60#define MTD_CLS_PM_OPS NULL
61#endif
62
63static struct class mtd_class = {
64 .name = "mtd",
65 .owner = THIS_MODULE,
66 .pm = MTD_CLS_PM_OPS,
67};
68
69static DEFINE_IDR(mtd_idr);
70
71/* These are exported solely for the purpose of mtd_blkdevs.c. You
72 should not use them for _anything_ else */
73DEFINE_MUTEX(mtd_table_mutex);
74EXPORT_SYMBOL_GPL(mtd_table_mutex);
75
76struct mtd_info *__mtd_next_device(int i)
77{
78 return idr_get_next(&mtd_idr, &i);
79}
80EXPORT_SYMBOL_GPL(__mtd_next_device);
81
82static LIST_HEAD(mtd_notifiers);
83
84
85#define MTD_DEVT(index) MKDEV(MTD_CHAR_MAJOR, (index)*2)
86
87/* REVISIT once MTD uses the driver model better, whoever allocates
88 * the mtd_info will probably want to use the release() hook...
89 */
90static void mtd_release(struct device *dev)
91{
92 struct mtd_info *mtd = dev_get_drvdata(dev);
93 dev_t index = MTD_DEVT(mtd->index);
94
95 /* remove /dev/mtdXro node */
96 device_destroy(&mtd_class, index + 1);
97}
98
99#define MTD_DEVICE_ATTR_RO(name) \
100static DEVICE_ATTR(name, 0444, mtd_##name##_show, NULL)
101
102#define MTD_DEVICE_ATTR_RW(name) \
103static DEVICE_ATTR(name, 0644, mtd_##name##_show, mtd_##name##_store)
104
105static ssize_t mtd_type_show(struct device *dev,
106 struct device_attribute *attr, char *buf)
107{
108 struct mtd_info *mtd = dev_get_drvdata(dev);
109 char *type;
110
111 switch (mtd->type) {
112 case MTD_ABSENT:
113 type = "absent";
114 break;
115 case MTD_RAM:
116 type = "ram";
117 break;
118 case MTD_ROM:
119 type = "rom";
120 break;
121 case MTD_NORFLASH:
122 type = "nor";
123 break;
124 case MTD_NANDFLASH:
125 type = "nand";
126 break;
127 case MTD_DATAFLASH:
128 type = "dataflash";
129 break;
130 case MTD_UBIVOLUME:
131 type = "ubi";
132 break;
133 case MTD_MLCNANDFLASH:
134 type = "mlc-nand";
135 break;
136 default:
137 type = "unknown";
138 }
139
140 return sysfs_emit(buf, "%s\n", type);
141}
142MTD_DEVICE_ATTR_RO(type);
143
144static ssize_t mtd_flags_show(struct device *dev,
145 struct device_attribute *attr, char *buf)
146{
147 struct mtd_info *mtd = dev_get_drvdata(dev);
148
149 return sysfs_emit(buf, "0x%lx\n", (unsigned long)mtd->flags);
150}
151MTD_DEVICE_ATTR_RO(flags);
152
153static ssize_t mtd_size_show(struct device *dev,
154 struct device_attribute *attr, char *buf)
155{
156 struct mtd_info *mtd = dev_get_drvdata(dev);
157
158 return sysfs_emit(buf, "%llu\n", (unsigned long long)mtd->size);
159}
160MTD_DEVICE_ATTR_RO(size);
161
162static ssize_t mtd_erasesize_show(struct device *dev,
163 struct device_attribute *attr, char *buf)
164{
165 struct mtd_info *mtd = dev_get_drvdata(dev);
166
167 return sysfs_emit(buf, "%lu\n", (unsigned long)mtd->erasesize);
168}
169MTD_DEVICE_ATTR_RO(erasesize);
170
171static ssize_t mtd_writesize_show(struct device *dev,
172 struct device_attribute *attr, char *buf)
173{
174 struct mtd_info *mtd = dev_get_drvdata(dev);
175
176 return sysfs_emit(buf, "%lu\n", (unsigned long)mtd->writesize);
177}
178MTD_DEVICE_ATTR_RO(writesize);
179
180static ssize_t mtd_subpagesize_show(struct device *dev,
181 struct device_attribute *attr, char *buf)
182{
183 struct mtd_info *mtd = dev_get_drvdata(dev);
184 unsigned int subpagesize = mtd->writesize >> mtd->subpage_sft;
185
186 return sysfs_emit(buf, "%u\n", subpagesize);
187}
188MTD_DEVICE_ATTR_RO(subpagesize);
189
190static ssize_t mtd_oobsize_show(struct device *dev,
191 struct device_attribute *attr, char *buf)
192{
193 struct mtd_info *mtd = dev_get_drvdata(dev);
194
195 return sysfs_emit(buf, "%lu\n", (unsigned long)mtd->oobsize);
196}
197MTD_DEVICE_ATTR_RO(oobsize);
198
199static ssize_t mtd_oobavail_show(struct device *dev,
200 struct device_attribute *attr, char *buf)
201{
202 struct mtd_info *mtd = dev_get_drvdata(dev);
203
204 return sysfs_emit(buf, "%u\n", mtd->oobavail);
205}
206MTD_DEVICE_ATTR_RO(oobavail);
207
208static ssize_t mtd_numeraseregions_show(struct device *dev,
209 struct device_attribute *attr, char *buf)
210{
211 struct mtd_info *mtd = dev_get_drvdata(dev);
212
213 return sysfs_emit(buf, "%u\n", mtd->numeraseregions);
214}
215MTD_DEVICE_ATTR_RO(numeraseregions);
216
217static ssize_t mtd_name_show(struct device *dev,
218 struct device_attribute *attr, char *buf)
219{
220 struct mtd_info *mtd = dev_get_drvdata(dev);
221
222 return sysfs_emit(buf, "%s\n", mtd->name);
223}
224MTD_DEVICE_ATTR_RO(name);
225
226static ssize_t mtd_ecc_strength_show(struct device *dev,
227 struct device_attribute *attr, char *buf)
228{
229 struct mtd_info *mtd = dev_get_drvdata(dev);
230
231 return sysfs_emit(buf, "%u\n", mtd->ecc_strength);
232}
233MTD_DEVICE_ATTR_RO(ecc_strength);
234
235static ssize_t mtd_bitflip_threshold_show(struct device *dev,
236 struct device_attribute *attr,
237 char *buf)
238{
239 struct mtd_info *mtd = dev_get_drvdata(dev);
240
241 return sysfs_emit(buf, "%u\n", mtd->bitflip_threshold);
242}
243
244static ssize_t mtd_bitflip_threshold_store(struct device *dev,
245 struct device_attribute *attr,
246 const char *buf, size_t count)
247{
248 struct mtd_info *mtd = dev_get_drvdata(dev);
249 unsigned int bitflip_threshold;
250 int retval;
251
252 retval = kstrtouint(buf, 0, &bitflip_threshold);
253 if (retval)
254 return retval;
255
256 mtd->bitflip_threshold = bitflip_threshold;
257 return count;
258}
259MTD_DEVICE_ATTR_RW(bitflip_threshold);
260
261static ssize_t mtd_ecc_step_size_show(struct device *dev,
262 struct device_attribute *attr, char *buf)
263{
264 struct mtd_info *mtd = dev_get_drvdata(dev);
265
266 return sysfs_emit(buf, "%u\n", mtd->ecc_step_size);
267
268}
269MTD_DEVICE_ATTR_RO(ecc_step_size);
270
271static ssize_t mtd_corrected_bits_show(struct device *dev,
272 struct device_attribute *attr, char *buf)
273{
274 struct mtd_info *mtd = dev_get_drvdata(dev);
275 struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
276
277 return sysfs_emit(buf, "%u\n", ecc_stats->corrected);
278}
279MTD_DEVICE_ATTR_RO(corrected_bits); /* ecc stats corrected */
280
281static ssize_t mtd_ecc_failures_show(struct device *dev,
282 struct device_attribute *attr, char *buf)
283{
284 struct mtd_info *mtd = dev_get_drvdata(dev);
285 struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
286
287 return sysfs_emit(buf, "%u\n", ecc_stats->failed);
288}
289MTD_DEVICE_ATTR_RO(ecc_failures); /* ecc stats errors */
290
291static ssize_t mtd_bad_blocks_show(struct device *dev,
292 struct device_attribute *attr, char *buf)
293{
294 struct mtd_info *mtd = dev_get_drvdata(dev);
295 struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
296
297 return sysfs_emit(buf, "%u\n", ecc_stats->badblocks);
298}
299MTD_DEVICE_ATTR_RO(bad_blocks);
300
301static ssize_t mtd_bbt_blocks_show(struct device *dev,
302 struct device_attribute *attr, char *buf)
303{
304 struct mtd_info *mtd = dev_get_drvdata(dev);
305 struct mtd_ecc_stats *ecc_stats = &mtd->ecc_stats;
306
307 return sysfs_emit(buf, "%u\n", ecc_stats->bbtblocks);
308}
309MTD_DEVICE_ATTR_RO(bbt_blocks);
310
311static struct attribute *mtd_attrs[] = {
312 &dev_attr_type.attr,
313 &dev_attr_flags.attr,
314 &dev_attr_size.attr,
315 &dev_attr_erasesize.attr,
316 &dev_attr_writesize.attr,
317 &dev_attr_subpagesize.attr,
318 &dev_attr_oobsize.attr,
319 &dev_attr_oobavail.attr,
320 &dev_attr_numeraseregions.attr,
321 &dev_attr_name.attr,
322 &dev_attr_ecc_strength.attr,
323 &dev_attr_ecc_step_size.attr,
324 &dev_attr_corrected_bits.attr,
325 &dev_attr_ecc_failures.attr,
326 &dev_attr_bad_blocks.attr,
327 &dev_attr_bbt_blocks.attr,
328 &dev_attr_bitflip_threshold.attr,
329 NULL,
330};
331ATTRIBUTE_GROUPS(mtd);
332
333static const struct device_type mtd_devtype = {
334 .name = "mtd",
335 .groups = mtd_groups,
336 .release = mtd_release,
337};
338
339static int mtd_partid_debug_show(struct seq_file *s, void *p)
340{
341 struct mtd_info *mtd = s->private;
342
343 seq_printf(s, "%s\n", mtd->dbg.partid);
344
345 return 0;
346}
347
348DEFINE_SHOW_ATTRIBUTE(mtd_partid_debug);
349
350static int mtd_partname_debug_show(struct seq_file *s, void *p)
351{
352 struct mtd_info *mtd = s->private;
353
354 seq_printf(s, "%s\n", mtd->dbg.partname);
355
356 return 0;
357}
358
359DEFINE_SHOW_ATTRIBUTE(mtd_partname_debug);
360
361static struct dentry *dfs_dir_mtd;
362
363static void mtd_debugfs_populate(struct mtd_info *mtd)
364{
365 struct mtd_info *master = mtd_get_master(mtd);
366 struct device *dev = &mtd->dev;
367 struct dentry *root;
368
369 if (IS_ERR_OR_NULL(dfs_dir_mtd))
370 return;
371
372 root = debugfs_create_dir(dev_name(dev), dfs_dir_mtd);
373 mtd->dbg.dfs_dir = root;
374
375 if (master->dbg.partid)
376 debugfs_create_file("partid", 0400, root, master,
377 &mtd_partid_debug_fops);
378
379 if (master->dbg.partname)
380 debugfs_create_file("partname", 0400, root, master,
381 &mtd_partname_debug_fops);
382}
383
384#ifndef CONFIG_MMU
385unsigned mtd_mmap_capabilities(struct mtd_info *mtd)
386{
387 switch (mtd->type) {
388 case MTD_RAM:
389 return NOMMU_MAP_COPY | NOMMU_MAP_DIRECT | NOMMU_MAP_EXEC |
390 NOMMU_MAP_READ | NOMMU_MAP_WRITE;
391 case MTD_ROM:
392 return NOMMU_MAP_COPY | NOMMU_MAP_DIRECT | NOMMU_MAP_EXEC |
393 NOMMU_MAP_READ;
394 default:
395 return NOMMU_MAP_COPY;
396 }
397}
398EXPORT_SYMBOL_GPL(mtd_mmap_capabilities);
399#endif
400
401static int mtd_reboot_notifier(struct notifier_block *n, unsigned long state,
402 void *cmd)
403{
404 struct mtd_info *mtd;
405
406 mtd = container_of(n, struct mtd_info, reboot_notifier);
407 mtd->_reboot(mtd);
408
409 return NOTIFY_DONE;
410}
411
412/**
413 * mtd_wunit_to_pairing_info - get pairing information of a wunit
414 * @mtd: pointer to new MTD device info structure
415 * @wunit: write unit we are interested in
416 * @info: returned pairing information
417 *
418 * Retrieve pairing information associated to the wunit.
419 * This is mainly useful when dealing with MLC/TLC NANDs where pages can be
420 * paired together, and where programming a page may influence the page it is
421 * paired with.
422 * The notion of page is replaced by the term wunit (write-unit) to stay
423 * consistent with the ->writesize field.
424 *
425 * The @wunit argument can be extracted from an absolute offset using
426 * mtd_offset_to_wunit(). @info is filled with the pairing information attached
427 * to @wunit.
428 *
429 * From the pairing info the MTD user can find all the wunits paired with
430 * @wunit using the following loop:
431 *
432 * for (i = 0; i < mtd_pairing_groups(mtd); i++) {
433 * info.pair = i;
434 * mtd_pairing_info_to_wunit(mtd, &info);
435 * ...
436 * }
437 */
438int mtd_wunit_to_pairing_info(struct mtd_info *mtd, int wunit,
439 struct mtd_pairing_info *info)
440{
441 struct mtd_info *master = mtd_get_master(mtd);
442 int npairs = mtd_wunit_per_eb(master) / mtd_pairing_groups(master);
443
444 if (wunit < 0 || wunit >= npairs)
445 return -EINVAL;
446
447 if (master->pairing && master->pairing->get_info)
448 return master->pairing->get_info(master, wunit, info);
449
450 info->group = 0;
451 info->pair = wunit;
452
453 return 0;
454}
455EXPORT_SYMBOL_GPL(mtd_wunit_to_pairing_info);
456
457/**
458 * mtd_pairing_info_to_wunit - get wunit from pairing information
459 * @mtd: pointer to new MTD device info structure
460 * @info: pairing information struct
461 *
462 * Returns a positive number representing the wunit associated to the info
463 * struct, or a negative error code.
464 *
465 * This is the reverse of mtd_wunit_to_pairing_info(), and can help one to
466 * iterate over all wunits of a given pair (see mtd_wunit_to_pairing_info()
467 * doc).
468 *
469 * It can also be used to only program the first page of each pair (i.e.
470 * page attached to group 0), which allows one to use an MLC NAND in
471 * software-emulated SLC mode:
472 *
473 * info.group = 0;
474 * npairs = mtd_wunit_per_eb(mtd) / mtd_pairing_groups(mtd);
475 * for (info.pair = 0; info.pair < npairs; info.pair++) {
476 * wunit = mtd_pairing_info_to_wunit(mtd, &info);
477 * mtd_write(mtd, mtd_wunit_to_offset(mtd, blkoffs, wunit),
478 * mtd->writesize, &retlen, buf + (i * mtd->writesize));
479 * }
480 */
481int mtd_pairing_info_to_wunit(struct mtd_info *mtd,
482 const struct mtd_pairing_info *info)
483{
484 struct mtd_info *master = mtd_get_master(mtd);
485 int ngroups = mtd_pairing_groups(master);
486 int npairs = mtd_wunit_per_eb(master) / ngroups;
487
488 if (!info || info->pair < 0 || info->pair >= npairs ||
489 info->group < 0 || info->group >= ngroups)
490 return -EINVAL;
491
492 if (master->pairing && master->pairing->get_wunit)
493 return mtd->pairing->get_wunit(master, info);
494
495 return info->pair;
496}
497EXPORT_SYMBOL_GPL(mtd_pairing_info_to_wunit);
498
499/**
500 * mtd_pairing_groups - get the number of pairing groups
501 * @mtd: pointer to new MTD device info structure
502 *
503 * Returns the number of pairing groups.
504 *
505 * This number is usually equal to the number of bits exposed by a single
506 * cell, and can be used in conjunction with mtd_pairing_info_to_wunit()
507 * to iterate over all pages of a given pair.
508 */
509int mtd_pairing_groups(struct mtd_info *mtd)
510{
511 struct mtd_info *master = mtd_get_master(mtd);
512
513 if (!master->pairing || !master->pairing->ngroups)
514 return 1;
515
516 return master->pairing->ngroups;
517}
518EXPORT_SYMBOL_GPL(mtd_pairing_groups);
519
520static int mtd_nvmem_reg_read(void *priv, unsigned int offset,
521 void *val, size_t bytes)
522{
523 struct mtd_info *mtd = priv;
524 size_t retlen;
525 int err;
526
527 err = mtd_read(mtd, offset, bytes, &retlen, val);
528 if (err && err != -EUCLEAN)
529 return err;
530
531 return retlen == bytes ? 0 : -EIO;
532}
533
534static int mtd_nvmem_add(struct mtd_info *mtd)
535{
536 struct device_node *node = mtd_get_of_node(mtd);
537 struct nvmem_config config = {};
538
539 config.id = -1;
540 config.dev = &mtd->dev;
541 config.name = dev_name(&mtd->dev);
542 config.owner = THIS_MODULE;
543 config.reg_read = mtd_nvmem_reg_read;
544 config.size = mtd->size;
545 config.word_size = 1;
546 config.stride = 1;
547 config.read_only = true;
548 config.root_only = true;
549 config.no_of_node = !of_device_is_compatible(node, "nvmem-cells");
550 config.priv = mtd;
551
552 mtd->nvmem = nvmem_register(&config);
553 if (IS_ERR(mtd->nvmem)) {
554 /* Just ignore if there is no NVMEM support in the kernel */
555 if (PTR_ERR(mtd->nvmem) == -EOPNOTSUPP) {
556 mtd->nvmem = NULL;
557 } else {
558 dev_err(&mtd->dev, "Failed to register NVMEM device\n");
559 return PTR_ERR(mtd->nvmem);
560 }
561 }
562
563 return 0;
564}
565
566/**
567 * add_mtd_device - register an MTD device
568 * @mtd: pointer to new MTD device info structure
569 *
570 * Add a device to the list of MTD devices present in the system, and
571 * notify each currently active MTD 'user' of its arrival. Returns
572 * zero on success or non-zero on failure.
573 */
574
575int add_mtd_device(struct mtd_info *mtd)
576{
577 struct mtd_info *master = mtd_get_master(mtd);
578 struct mtd_notifier *not;
579 int i, error;
580
581 /*
582 * May occur, for instance, on buggy drivers which call
583 * mtd_device_parse_register() multiple times on the same master MTD,
584 * especially with CONFIG_MTD_PARTITIONED_MASTER=y.
585 */
586 if (WARN_ONCE(mtd->dev.type, "MTD already registered\n"))
587 return -EEXIST;
588
589 BUG_ON(mtd->writesize == 0);
590
591 /*
592 * MTD drivers should implement ->_{write,read}() or
593 * ->_{write,read}_oob(), but not both.
594 */
595 if (WARN_ON((mtd->_write && mtd->_write_oob) ||
596 (mtd->_read && mtd->_read_oob)))
597 return -EINVAL;
598
599 if (WARN_ON((!mtd->erasesize || !master->_erase) &&
600 !(mtd->flags & MTD_NO_ERASE)))
601 return -EINVAL;
602
603 /*
604 * MTD_SLC_ON_MLC_EMULATION can only be set on partitions, when the
605 * master is an MLC NAND and has a proper pairing scheme defined.
606 * We also reject masters that implement ->_writev() for now, because
607 * NAND controller drivers don't implement this hook, and adding the
608 * SLC -> MLC address/length conversion to this path is useless if we
609 * don't have a user.
610 */
611 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION &&
612 (!mtd_is_partition(mtd) || master->type != MTD_MLCNANDFLASH ||
613 !master->pairing || master->_writev))
614 return -EINVAL;
615
616 mutex_lock(&mtd_table_mutex);
617
618 i = idr_alloc(&mtd_idr, mtd, 0, 0, GFP_KERNEL);
619 if (i < 0) {
620 error = i;
621 goto fail_locked;
622 }
623
624 mtd->index = i;
625 mtd->usecount = 0;
626
627 /* default value if not set by driver */
628 if (mtd->bitflip_threshold == 0)
629 mtd->bitflip_threshold = mtd->ecc_strength;
630
631 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
632 int ngroups = mtd_pairing_groups(master);
633
634 mtd->erasesize /= ngroups;
635 mtd->size = (u64)mtd_div_by_eb(mtd->size, master) *
636 mtd->erasesize;
637 }
638
639 if (is_power_of_2(mtd->erasesize))
640 mtd->erasesize_shift = ffs(mtd->erasesize) - 1;
641 else
642 mtd->erasesize_shift = 0;
643
644 if (is_power_of_2(mtd->writesize))
645 mtd->writesize_shift = ffs(mtd->writesize) - 1;
646 else
647 mtd->writesize_shift = 0;
648
649 mtd->erasesize_mask = (1 << mtd->erasesize_shift) - 1;
650 mtd->writesize_mask = (1 << mtd->writesize_shift) - 1;
651
652 /* Some chips always power up locked. Unlock them now */
653 if ((mtd->flags & MTD_WRITEABLE) && (mtd->flags & MTD_POWERUP_LOCK)) {
654 error = mtd_unlock(mtd, 0, mtd->size);
655 if (error && error != -EOPNOTSUPP)
656 printk(KERN_WARNING
657 "%s: unlock failed, writes may not work\n",
658 mtd->name);
659 /* Ignore unlock failures? */
660 error = 0;
661 }
662
663 /* Caller should have set dev.parent to match the
664 * physical device, if appropriate.
665 */
666 mtd->dev.type = &mtd_devtype;
667 mtd->dev.class = &mtd_class;
668 mtd->dev.devt = MTD_DEVT(i);
669 dev_set_name(&mtd->dev, "mtd%d", i);
670 dev_set_drvdata(&mtd->dev, mtd);
671 of_node_get(mtd_get_of_node(mtd));
672 error = device_register(&mtd->dev);
673 if (error)
674 goto fail_added;
675
676 /* Add the nvmem provider */
677 error = mtd_nvmem_add(mtd);
678 if (error)
679 goto fail_nvmem_add;
680
681 mtd_debugfs_populate(mtd);
682
683 device_create(&mtd_class, mtd->dev.parent, MTD_DEVT(i) + 1, NULL,
684 "mtd%dro", i);
685
686 pr_debug("mtd: Giving out device %d to %s\n", i, mtd->name);
687 /* No need to get a refcount on the module containing
688 the notifier, since we hold the mtd_table_mutex */
689 list_for_each_entry(not, &mtd_notifiers, list)
690 not->add(mtd);
691
692 mutex_unlock(&mtd_table_mutex);
693 /* We _know_ we aren't being removed, because
694 our caller is still holding us here. So none
695 of this try_ nonsense, and no bitching about it
696 either. :) */
697 __module_get(THIS_MODULE);
698 return 0;
699
700fail_nvmem_add:
701 device_unregister(&mtd->dev);
702fail_added:
703 of_node_put(mtd_get_of_node(mtd));
704 idr_remove(&mtd_idr, i);
705fail_locked:
706 mutex_unlock(&mtd_table_mutex);
707 return error;
708}
709
710/**
711 * del_mtd_device - unregister an MTD device
712 * @mtd: pointer to MTD device info structure
713 *
714 * Remove a device from the list of MTD devices present in the system,
715 * and notify each currently active MTD 'user' of its departure.
716 * Returns zero on success or 1 on failure, which currently will happen
717 * if the requested device does not appear to be present in the list.
718 */
719
720int del_mtd_device(struct mtd_info *mtd)
721{
722 int ret;
723 struct mtd_notifier *not;
724
725 mutex_lock(&mtd_table_mutex);
726
727 debugfs_remove_recursive(mtd->dbg.dfs_dir);
728
729 if (idr_find(&mtd_idr, mtd->index) != mtd) {
730 ret = -ENODEV;
731 goto out_error;
732 }
733
734 /* No need to get a refcount on the module containing
735 the notifier, since we hold the mtd_table_mutex */
736 list_for_each_entry(not, &mtd_notifiers, list)
737 not->remove(mtd);
738
739 if (mtd->usecount) {
740 printk(KERN_NOTICE "Removing MTD device #%d (%s) with use count %d\n",
741 mtd->index, mtd->name, mtd->usecount);
742 ret = -EBUSY;
743 } else {
744 /* Try to remove the NVMEM provider */
745 if (mtd->nvmem)
746 nvmem_unregister(mtd->nvmem);
747
748 device_unregister(&mtd->dev);
749
750 idr_remove(&mtd_idr, mtd->index);
751 of_node_put(mtd_get_of_node(mtd));
752
753 module_put(THIS_MODULE);
754 ret = 0;
755 }
756
757out_error:
758 mutex_unlock(&mtd_table_mutex);
759 return ret;
760}
761
762/*
763 * Set a few defaults based on the parent devices, if not provided by the
764 * driver
765 */
766static void mtd_set_dev_defaults(struct mtd_info *mtd)
767{
768 if (mtd->dev.parent) {
769 if (!mtd->owner && mtd->dev.parent->driver)
770 mtd->owner = mtd->dev.parent->driver->owner;
771 if (!mtd->name)
772 mtd->name = dev_name(mtd->dev.parent);
773 } else {
774 pr_debug("mtd device won't show a device symlink in sysfs\n");
775 }
776
777 INIT_LIST_HEAD(&mtd->partitions);
778 mutex_init(&mtd->master.partitions_lock);
779 mutex_init(&mtd->master.chrdev_lock);
780}
781
782static ssize_t mtd_otp_size(struct mtd_info *mtd, bool is_user)
783{
784 struct otp_info *info;
785 ssize_t size = 0;
786 unsigned int i;
787 size_t retlen;
788 int ret;
789
790 info = kmalloc(PAGE_SIZE, GFP_KERNEL);
791 if (!info)
792 return -ENOMEM;
793
794 if (is_user)
795 ret = mtd_get_user_prot_info(mtd, PAGE_SIZE, &retlen, info);
796 else
797 ret = mtd_get_fact_prot_info(mtd, PAGE_SIZE, &retlen, info);
798 if (ret)
799 goto err;
800
801 for (i = 0; i < retlen / sizeof(*info); i++)
802 size += info[i].length;
803
804 kfree(info);
805 return size;
806
807err:
808 kfree(info);
809 return ret;
810}
811
812static struct nvmem_device *mtd_otp_nvmem_register(struct mtd_info *mtd,
813 const char *compatible,
814 int size,
815 nvmem_reg_read_t reg_read)
816{
817 struct nvmem_device *nvmem = NULL;
818 struct nvmem_config config = {};
819 struct device_node *np;
820
821 /* DT binding is optional */
822 np = of_get_compatible_child(mtd->dev.of_node, compatible);
823
824 /* OTP nvmem will be registered on the physical device */
825 config.dev = mtd->dev.parent;
826 /* just reuse the compatible as name */
827 config.name = compatible;
828 config.id = NVMEM_DEVID_NONE;
829 config.owner = THIS_MODULE;
830 config.type = NVMEM_TYPE_OTP;
831 config.root_only = true;
832 config.reg_read = reg_read;
833 config.size = size;
834 config.of_node = np;
835 config.priv = mtd;
836
837 nvmem = nvmem_register(&config);
838 /* Just ignore if there is no NVMEM support in the kernel */
839 if (IS_ERR(nvmem) && PTR_ERR(nvmem) == -EOPNOTSUPP)
840 nvmem = NULL;
841
842 of_node_put(np);
843
844 return nvmem;
845}
846
847static int mtd_nvmem_user_otp_reg_read(void *priv, unsigned int offset,
848 void *val, size_t bytes)
849{
850 struct mtd_info *mtd = priv;
851 size_t retlen;
852 int ret;
853
854 ret = mtd_read_user_prot_reg(mtd, offset, bytes, &retlen, val);
855 if (ret)
856 return ret;
857
858 return retlen == bytes ? 0 : -EIO;
859}
860
861static int mtd_nvmem_fact_otp_reg_read(void *priv, unsigned int offset,
862 void *val, size_t bytes)
863{
864 struct mtd_info *mtd = priv;
865 size_t retlen;
866 int ret;
867
868 ret = mtd_read_fact_prot_reg(mtd, offset, bytes, &retlen, val);
869 if (ret)
870 return ret;
871
872 return retlen == bytes ? 0 : -EIO;
873}
874
875static int mtd_otp_nvmem_add(struct mtd_info *mtd)
876{
877 struct nvmem_device *nvmem;
878 ssize_t size;
879 int err;
880
881 if (mtd->_get_user_prot_info && mtd->_read_user_prot_reg) {
882 size = mtd_otp_size(mtd, true);
883 if (size < 0)
884 return size;
885
886 if (size > 0) {
887 nvmem = mtd_otp_nvmem_register(mtd, "user-otp", size,
888 mtd_nvmem_user_otp_reg_read);
889 if (IS_ERR(nvmem)) {
890 dev_err(&mtd->dev, "Failed to register OTP NVMEM device\n");
891 return PTR_ERR(nvmem);
892 }
893 mtd->otp_user_nvmem = nvmem;
894 }
895 }
896
897 if (mtd->_get_fact_prot_info && mtd->_read_fact_prot_reg) {
898 size = mtd_otp_size(mtd, false);
899 if (size < 0) {
900 err = size;
901 goto err;
902 }
903
904 if (size > 0) {
905 nvmem = mtd_otp_nvmem_register(mtd, "factory-otp", size,
906 mtd_nvmem_fact_otp_reg_read);
907 if (IS_ERR(nvmem)) {
908 dev_err(&mtd->dev, "Failed to register OTP NVMEM device\n");
909 err = PTR_ERR(nvmem);
910 goto err;
911 }
912 mtd->otp_factory_nvmem = nvmem;
913 }
914 }
915
916 return 0;
917
918err:
919 if (mtd->otp_user_nvmem)
920 nvmem_unregister(mtd->otp_user_nvmem);
921 return err;
922}
923
924/**
925 * mtd_device_parse_register - parse partitions and register an MTD device.
926 *
927 * @mtd: the MTD device to register
928 * @types: the list of MTD partition probes to try, see
929 * 'parse_mtd_partitions()' for more information
930 * @parser_data: MTD partition parser-specific data
931 * @parts: fallback partition information to register, if parsing fails;
932 * only valid if %nr_parts > %0
933 * @nr_parts: the number of partitions in parts, if zero then the full
934 * MTD device is registered if no partition info is found
935 *
936 * This function aggregates MTD partitions parsing (done by
937 * 'parse_mtd_partitions()') and MTD device and partitions registering. It
938 * basically follows the most common pattern found in many MTD drivers:
939 *
940 * * If the MTD_PARTITIONED_MASTER option is set, then the device as a whole is
941 * registered first.
942 * * Then It tries to probe partitions on MTD device @mtd using parsers
943 * specified in @types (if @types is %NULL, then the default list of parsers
944 * is used, see 'parse_mtd_partitions()' for more information). If none are
945 * found this functions tries to fallback to information specified in
946 * @parts/@nr_parts.
947 * * If no partitions were found this function just registers the MTD device
948 * @mtd and exits.
949 *
950 * Returns zero in case of success and a negative error code in case of failure.
951 */
952int mtd_device_parse_register(struct mtd_info *mtd, const char * const *types,
953 struct mtd_part_parser_data *parser_data,
954 const struct mtd_partition *parts,
955 int nr_parts)
956{
957 int ret;
958
959 mtd_set_dev_defaults(mtd);
960
961 if (IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER)) {
962 ret = add_mtd_device(mtd);
963 if (ret)
964 return ret;
965 }
966
967 /* Prefer parsed partitions over driver-provided fallback */
968 ret = parse_mtd_partitions(mtd, types, parser_data);
969 if (ret == -EPROBE_DEFER)
970 goto out;
971
972 if (ret > 0)
973 ret = 0;
974 else if (nr_parts)
975 ret = add_mtd_partitions(mtd, parts, nr_parts);
976 else if (!device_is_registered(&mtd->dev))
977 ret = add_mtd_device(mtd);
978 else
979 ret = 0;
980
981 if (ret)
982 goto out;
983
984 /*
985 * FIXME: some drivers unfortunately call this function more than once.
986 * So we have to check if we've already assigned the reboot notifier.
987 *
988 * Generally, we can make multiple calls work for most cases, but it
989 * does cause problems with parse_mtd_partitions() above (e.g.,
990 * cmdlineparts will register partitions more than once).
991 */
992 WARN_ONCE(mtd->_reboot && mtd->reboot_notifier.notifier_call,
993 "MTD already registered\n");
994 if (mtd->_reboot && !mtd->reboot_notifier.notifier_call) {
995 mtd->reboot_notifier.notifier_call = mtd_reboot_notifier;
996 register_reboot_notifier(&mtd->reboot_notifier);
997 }
998
999 ret = mtd_otp_nvmem_add(mtd);
1000
1001out:
1002 if (ret && device_is_registered(&mtd->dev))
1003 del_mtd_device(mtd);
1004
1005 return ret;
1006}
1007EXPORT_SYMBOL_GPL(mtd_device_parse_register);
1008
1009/**
1010 * mtd_device_unregister - unregister an existing MTD device.
1011 *
1012 * @master: the MTD device to unregister. This will unregister both the master
1013 * and any partitions if registered.
1014 */
1015int mtd_device_unregister(struct mtd_info *master)
1016{
1017 int err;
1018
1019 if (master->_reboot)
1020 unregister_reboot_notifier(&master->reboot_notifier);
1021
1022 if (master->otp_user_nvmem)
1023 nvmem_unregister(master->otp_user_nvmem);
1024
1025 if (master->otp_factory_nvmem)
1026 nvmem_unregister(master->otp_factory_nvmem);
1027
1028 err = del_mtd_partitions(master);
1029 if (err)
1030 return err;
1031
1032 if (!device_is_registered(&master->dev))
1033 return 0;
1034
1035 return del_mtd_device(master);
1036}
1037EXPORT_SYMBOL_GPL(mtd_device_unregister);
1038
1039/**
1040 * register_mtd_user - register a 'user' of MTD devices.
1041 * @new: pointer to notifier info structure
1042 *
1043 * Registers a pair of callbacks function to be called upon addition
1044 * or removal of MTD devices. Causes the 'add' callback to be immediately
1045 * invoked for each MTD device currently present in the system.
1046 */
1047void register_mtd_user (struct mtd_notifier *new)
1048{
1049 struct mtd_info *mtd;
1050
1051 mutex_lock(&mtd_table_mutex);
1052
1053 list_add(&new->list, &mtd_notifiers);
1054
1055 __module_get(THIS_MODULE);
1056
1057 mtd_for_each_device(mtd)
1058 new->add(mtd);
1059
1060 mutex_unlock(&mtd_table_mutex);
1061}
1062EXPORT_SYMBOL_GPL(register_mtd_user);
1063
1064/**
1065 * unregister_mtd_user - unregister a 'user' of MTD devices.
1066 * @old: pointer to notifier info structure
1067 *
1068 * Removes a callback function pair from the list of 'users' to be
1069 * notified upon addition or removal of MTD devices. Causes the
1070 * 'remove' callback to be immediately invoked for each MTD device
1071 * currently present in the system.
1072 */
1073int unregister_mtd_user (struct mtd_notifier *old)
1074{
1075 struct mtd_info *mtd;
1076
1077 mutex_lock(&mtd_table_mutex);
1078
1079 module_put(THIS_MODULE);
1080
1081 mtd_for_each_device(mtd)
1082 old->remove(mtd);
1083
1084 list_del(&old->list);
1085 mutex_unlock(&mtd_table_mutex);
1086 return 0;
1087}
1088EXPORT_SYMBOL_GPL(unregister_mtd_user);
1089
1090/**
1091 * get_mtd_device - obtain a validated handle for an MTD device
1092 * @mtd: last known address of the required MTD device
1093 * @num: internal device number of the required MTD device
1094 *
1095 * Given a number and NULL address, return the num'th entry in the device
1096 * table, if any. Given an address and num == -1, search the device table
1097 * for a device with that address and return if it's still present. Given
1098 * both, return the num'th driver only if its address matches. Return
1099 * error code if not.
1100 */
1101struct mtd_info *get_mtd_device(struct mtd_info *mtd, int num)
1102{
1103 struct mtd_info *ret = NULL, *other;
1104 int err = -ENODEV;
1105
1106 mutex_lock(&mtd_table_mutex);
1107
1108 if (num == -1) {
1109 mtd_for_each_device(other) {
1110 if (other == mtd) {
1111 ret = mtd;
1112 break;
1113 }
1114 }
1115 } else if (num >= 0) {
1116 ret = idr_find(&mtd_idr, num);
1117 if (mtd && mtd != ret)
1118 ret = NULL;
1119 }
1120
1121 if (!ret) {
1122 ret = ERR_PTR(err);
1123 goto out;
1124 }
1125
1126 err = __get_mtd_device(ret);
1127 if (err)
1128 ret = ERR_PTR(err);
1129out:
1130 mutex_unlock(&mtd_table_mutex);
1131 return ret;
1132}
1133EXPORT_SYMBOL_GPL(get_mtd_device);
1134
1135
1136int __get_mtd_device(struct mtd_info *mtd)
1137{
1138 struct mtd_info *master = mtd_get_master(mtd);
1139 int err;
1140
1141 if (!try_module_get(master->owner))
1142 return -ENODEV;
1143
1144 if (master->_get_device) {
1145 err = master->_get_device(mtd);
1146
1147 if (err) {
1148 module_put(master->owner);
1149 return err;
1150 }
1151 }
1152
1153 master->usecount++;
1154
1155 while (mtd->parent) {
1156 mtd->usecount++;
1157 mtd = mtd->parent;
1158 }
1159
1160 return 0;
1161}
1162EXPORT_SYMBOL_GPL(__get_mtd_device);
1163
1164/**
1165 * get_mtd_device_nm - obtain a validated handle for an MTD device by
1166 * device name
1167 * @name: MTD device name to open
1168 *
1169 * This function returns MTD device description structure in case of
1170 * success and an error code in case of failure.
1171 */
1172struct mtd_info *get_mtd_device_nm(const char *name)
1173{
1174 int err = -ENODEV;
1175 struct mtd_info *mtd = NULL, *other;
1176
1177 mutex_lock(&mtd_table_mutex);
1178
1179 mtd_for_each_device(other) {
1180 if (!strcmp(name, other->name)) {
1181 mtd = other;
1182 break;
1183 }
1184 }
1185
1186 if (!mtd)
1187 goto out_unlock;
1188
1189 err = __get_mtd_device(mtd);
1190 if (err)
1191 goto out_unlock;
1192
1193 mutex_unlock(&mtd_table_mutex);
1194 return mtd;
1195
1196out_unlock:
1197 mutex_unlock(&mtd_table_mutex);
1198 return ERR_PTR(err);
1199}
1200EXPORT_SYMBOL_GPL(get_mtd_device_nm);
1201
1202void put_mtd_device(struct mtd_info *mtd)
1203{
1204 mutex_lock(&mtd_table_mutex);
1205 __put_mtd_device(mtd);
1206 mutex_unlock(&mtd_table_mutex);
1207
1208}
1209EXPORT_SYMBOL_GPL(put_mtd_device);
1210
1211void __put_mtd_device(struct mtd_info *mtd)
1212{
1213 struct mtd_info *master = mtd_get_master(mtd);
1214
1215 while (mtd->parent) {
1216 --mtd->usecount;
1217 BUG_ON(mtd->usecount < 0);
1218 mtd = mtd->parent;
1219 }
1220
1221 master->usecount--;
1222
1223 if (master->_put_device)
1224 master->_put_device(master);
1225
1226 module_put(master->owner);
1227}
1228EXPORT_SYMBOL_GPL(__put_mtd_device);
1229
1230/*
1231 * Erase is an synchronous operation. Device drivers are epected to return a
1232 * negative error code if the operation failed and update instr->fail_addr
1233 * to point the portion that was not properly erased.
1234 */
1235int mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
1236{
1237 struct mtd_info *master = mtd_get_master(mtd);
1238 u64 mst_ofs = mtd_get_master_ofs(mtd, 0);
1239 struct erase_info adjinstr;
1240 int ret;
1241
1242 instr->fail_addr = MTD_FAIL_ADDR_UNKNOWN;
1243 adjinstr = *instr;
1244
1245 if (!mtd->erasesize || !master->_erase)
1246 return -ENOTSUPP;
1247
1248 if (instr->addr >= mtd->size || instr->len > mtd->size - instr->addr)
1249 return -EINVAL;
1250 if (!(mtd->flags & MTD_WRITEABLE))
1251 return -EROFS;
1252
1253 if (!instr->len)
1254 return 0;
1255
1256 ledtrig_mtd_activity();
1257
1258 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
1259 adjinstr.addr = (loff_t)mtd_div_by_eb(instr->addr, mtd) *
1260 master->erasesize;
1261 adjinstr.len = ((u64)mtd_div_by_eb(instr->addr + instr->len, mtd) *
1262 master->erasesize) -
1263 adjinstr.addr;
1264 }
1265
1266 adjinstr.addr += mst_ofs;
1267
1268 ret = master->_erase(master, &adjinstr);
1269
1270 if (adjinstr.fail_addr != MTD_FAIL_ADDR_UNKNOWN) {
1271 instr->fail_addr = adjinstr.fail_addr - mst_ofs;
1272 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
1273 instr->fail_addr = mtd_div_by_eb(instr->fail_addr,
1274 master);
1275 instr->fail_addr *= mtd->erasesize;
1276 }
1277 }
1278
1279 return ret;
1280}
1281EXPORT_SYMBOL_GPL(mtd_erase);
1282
1283/*
1284 * This stuff for eXecute-In-Place. phys is optional and may be set to NULL.
1285 */
1286int mtd_point(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
1287 void **virt, resource_size_t *phys)
1288{
1289 struct mtd_info *master = mtd_get_master(mtd);
1290
1291 *retlen = 0;
1292 *virt = NULL;
1293 if (phys)
1294 *phys = 0;
1295 if (!master->_point)
1296 return -EOPNOTSUPP;
1297 if (from < 0 || from >= mtd->size || len > mtd->size - from)
1298 return -EINVAL;
1299 if (!len)
1300 return 0;
1301
1302 from = mtd_get_master_ofs(mtd, from);
1303 return master->_point(master, from, len, retlen, virt, phys);
1304}
1305EXPORT_SYMBOL_GPL(mtd_point);
1306
1307/* We probably shouldn't allow XIP if the unpoint isn't a NULL */
1308int mtd_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
1309{
1310 struct mtd_info *master = mtd_get_master(mtd);
1311
1312 if (!master->_unpoint)
1313 return -EOPNOTSUPP;
1314 if (from < 0 || from >= mtd->size || len > mtd->size - from)
1315 return -EINVAL;
1316 if (!len)
1317 return 0;
1318 return master->_unpoint(master, mtd_get_master_ofs(mtd, from), len);
1319}
1320EXPORT_SYMBOL_GPL(mtd_unpoint);
1321
1322/*
1323 * Allow NOMMU mmap() to directly map the device (if not NULL)
1324 * - return the address to which the offset maps
1325 * - return -ENOSYS to indicate refusal to do the mapping
1326 */
1327unsigned long mtd_get_unmapped_area(struct mtd_info *mtd, unsigned long len,
1328 unsigned long offset, unsigned long flags)
1329{
1330 size_t retlen;
1331 void *virt;
1332 int ret;
1333
1334 ret = mtd_point(mtd, offset, len, &retlen, &virt, NULL);
1335 if (ret)
1336 return ret;
1337 if (retlen != len) {
1338 mtd_unpoint(mtd, offset, retlen);
1339 return -ENOSYS;
1340 }
1341 return (unsigned long)virt;
1342}
1343EXPORT_SYMBOL_GPL(mtd_get_unmapped_area);
1344
1345static void mtd_update_ecc_stats(struct mtd_info *mtd, struct mtd_info *master,
1346 const struct mtd_ecc_stats *old_stats)
1347{
1348 struct mtd_ecc_stats diff;
1349
1350 if (master == mtd)
1351 return;
1352
1353 diff = master->ecc_stats;
1354 diff.failed -= old_stats->failed;
1355 diff.corrected -= old_stats->corrected;
1356
1357 while (mtd->parent) {
1358 mtd->ecc_stats.failed += diff.failed;
1359 mtd->ecc_stats.corrected += diff.corrected;
1360 mtd = mtd->parent;
1361 }
1362}
1363
1364int mtd_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
1365 u_char *buf)
1366{
1367 struct mtd_oob_ops ops = {
1368 .len = len,
1369 .datbuf = buf,
1370 };
1371 int ret;
1372
1373 ret = mtd_read_oob(mtd, from, &ops);
1374 *retlen = ops.retlen;
1375
1376 return ret;
1377}
1378EXPORT_SYMBOL_GPL(mtd_read);
1379
1380int mtd_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
1381 const u_char *buf)
1382{
1383 struct mtd_oob_ops ops = {
1384 .len = len,
1385 .datbuf = (u8 *)buf,
1386 };
1387 int ret;
1388
1389 ret = mtd_write_oob(mtd, to, &ops);
1390 *retlen = ops.retlen;
1391
1392 return ret;
1393}
1394EXPORT_SYMBOL_GPL(mtd_write);
1395
1396/*
1397 * In blackbox flight recorder like scenarios we want to make successful writes
1398 * in interrupt context. panic_write() is only intended to be called when its
1399 * known the kernel is about to panic and we need the write to succeed. Since
1400 * the kernel is not going to be running for much longer, this function can
1401 * break locks and delay to ensure the write succeeds (but not sleep).
1402 */
1403int mtd_panic_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
1404 const u_char *buf)
1405{
1406 struct mtd_info *master = mtd_get_master(mtd);
1407
1408 *retlen = 0;
1409 if (!master->_panic_write)
1410 return -EOPNOTSUPP;
1411 if (to < 0 || to >= mtd->size || len > mtd->size - to)
1412 return -EINVAL;
1413 if (!(mtd->flags & MTD_WRITEABLE))
1414 return -EROFS;
1415 if (!len)
1416 return 0;
1417 if (!master->oops_panic_write)
1418 master->oops_panic_write = true;
1419
1420 return master->_panic_write(master, mtd_get_master_ofs(mtd, to), len,
1421 retlen, buf);
1422}
1423EXPORT_SYMBOL_GPL(mtd_panic_write);
1424
1425static int mtd_check_oob_ops(struct mtd_info *mtd, loff_t offs,
1426 struct mtd_oob_ops *ops)
1427{
1428 /*
1429 * Some users are setting ->datbuf or ->oobbuf to NULL, but are leaving
1430 * ->len or ->ooblen uninitialized. Force ->len and ->ooblen to 0 in
1431 * this case.
1432 */
1433 if (!ops->datbuf)
1434 ops->len = 0;
1435
1436 if (!ops->oobbuf)
1437 ops->ooblen = 0;
1438
1439 if (offs < 0 || offs + ops->len > mtd->size)
1440 return -EINVAL;
1441
1442 if (ops->ooblen) {
1443 size_t maxooblen;
1444
1445 if (ops->ooboffs >= mtd_oobavail(mtd, ops))
1446 return -EINVAL;
1447
1448 maxooblen = ((size_t)(mtd_div_by_ws(mtd->size, mtd) -
1449 mtd_div_by_ws(offs, mtd)) *
1450 mtd_oobavail(mtd, ops)) - ops->ooboffs;
1451 if (ops->ooblen > maxooblen)
1452 return -EINVAL;
1453 }
1454
1455 return 0;
1456}
1457
1458static int mtd_read_oob_std(struct mtd_info *mtd, loff_t from,
1459 struct mtd_oob_ops *ops)
1460{
1461 struct mtd_info *master = mtd_get_master(mtd);
1462 int ret;
1463
1464 from = mtd_get_master_ofs(mtd, from);
1465 if (master->_read_oob)
1466 ret = master->_read_oob(master, from, ops);
1467 else
1468 ret = master->_read(master, from, ops->len, &ops->retlen,
1469 ops->datbuf);
1470
1471 return ret;
1472}
1473
1474static int mtd_write_oob_std(struct mtd_info *mtd, loff_t to,
1475 struct mtd_oob_ops *ops)
1476{
1477 struct mtd_info *master = mtd_get_master(mtd);
1478 int ret;
1479
1480 to = mtd_get_master_ofs(mtd, to);
1481 if (master->_write_oob)
1482 ret = master->_write_oob(master, to, ops);
1483 else
1484 ret = master->_write(master, to, ops->len, &ops->retlen,
1485 ops->datbuf);
1486
1487 return ret;
1488}
1489
1490static int mtd_io_emulated_slc(struct mtd_info *mtd, loff_t start, bool read,
1491 struct mtd_oob_ops *ops)
1492{
1493 struct mtd_info *master = mtd_get_master(mtd);
1494 int ngroups = mtd_pairing_groups(master);
1495 int npairs = mtd_wunit_per_eb(master) / ngroups;
1496 struct mtd_oob_ops adjops = *ops;
1497 unsigned int wunit, oobavail;
1498 struct mtd_pairing_info info;
1499 int max_bitflips = 0;
1500 u32 ebofs, pageofs;
1501 loff_t base, pos;
1502
1503 ebofs = mtd_mod_by_eb(start, mtd);
1504 base = (loff_t)mtd_div_by_eb(start, mtd) * master->erasesize;
1505 info.group = 0;
1506 info.pair = mtd_div_by_ws(ebofs, mtd);
1507 pageofs = mtd_mod_by_ws(ebofs, mtd);
1508 oobavail = mtd_oobavail(mtd, ops);
1509
1510 while (ops->retlen < ops->len || ops->oobretlen < ops->ooblen) {
1511 int ret;
1512
1513 if (info.pair >= npairs) {
1514 info.pair = 0;
1515 base += master->erasesize;
1516 }
1517
1518 wunit = mtd_pairing_info_to_wunit(master, &info);
1519 pos = mtd_wunit_to_offset(mtd, base, wunit);
1520
1521 adjops.len = ops->len - ops->retlen;
1522 if (adjops.len > mtd->writesize - pageofs)
1523 adjops.len = mtd->writesize - pageofs;
1524
1525 adjops.ooblen = ops->ooblen - ops->oobretlen;
1526 if (adjops.ooblen > oobavail - adjops.ooboffs)
1527 adjops.ooblen = oobavail - adjops.ooboffs;
1528
1529 if (read) {
1530 ret = mtd_read_oob_std(mtd, pos + pageofs, &adjops);
1531 if (ret > 0)
1532 max_bitflips = max(max_bitflips, ret);
1533 } else {
1534 ret = mtd_write_oob_std(mtd, pos + pageofs, &adjops);
1535 }
1536
1537 if (ret < 0)
1538 return ret;
1539
1540 max_bitflips = max(max_bitflips, ret);
1541 ops->retlen += adjops.retlen;
1542 ops->oobretlen += adjops.oobretlen;
1543 adjops.datbuf += adjops.retlen;
1544 adjops.oobbuf += adjops.oobretlen;
1545 adjops.ooboffs = 0;
1546 pageofs = 0;
1547 info.pair++;
1548 }
1549
1550 return max_bitflips;
1551}
1552
1553int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
1554{
1555 struct mtd_info *master = mtd_get_master(mtd);
1556 struct mtd_ecc_stats old_stats = master->ecc_stats;
1557 int ret_code;
1558
1559 ops->retlen = ops->oobretlen = 0;
1560
1561 ret_code = mtd_check_oob_ops(mtd, from, ops);
1562 if (ret_code)
1563 return ret_code;
1564
1565 ledtrig_mtd_activity();
1566
1567 /* Check the validity of a potential fallback on mtd->_read */
1568 if (!master->_read_oob && (!master->_read || ops->oobbuf))
1569 return -EOPNOTSUPP;
1570
1571 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
1572 ret_code = mtd_io_emulated_slc(mtd, from, true, ops);
1573 else
1574 ret_code = mtd_read_oob_std(mtd, from, ops);
1575
1576 mtd_update_ecc_stats(mtd, master, &old_stats);
1577
1578 /*
1579 * In cases where ops->datbuf != NULL, mtd->_read_oob() has semantics
1580 * similar to mtd->_read(), returning a non-negative integer
1581 * representing max bitflips. In other cases, mtd->_read_oob() may
1582 * return -EUCLEAN. In all cases, perform similar logic to mtd_read().
1583 */
1584 if (unlikely(ret_code < 0))
1585 return ret_code;
1586 if (mtd->ecc_strength == 0)
1587 return 0; /* device lacks ecc */
1588 return ret_code >= mtd->bitflip_threshold ? -EUCLEAN : 0;
1589}
1590EXPORT_SYMBOL_GPL(mtd_read_oob);
1591
1592int mtd_write_oob(struct mtd_info *mtd, loff_t to,
1593 struct mtd_oob_ops *ops)
1594{
1595 struct mtd_info *master = mtd_get_master(mtd);
1596 int ret;
1597
1598 ops->retlen = ops->oobretlen = 0;
1599
1600 if (!(mtd->flags & MTD_WRITEABLE))
1601 return -EROFS;
1602
1603 ret = mtd_check_oob_ops(mtd, to, ops);
1604 if (ret)
1605 return ret;
1606
1607 ledtrig_mtd_activity();
1608
1609 /* Check the validity of a potential fallback on mtd->_write */
1610 if (!master->_write_oob && (!master->_write || ops->oobbuf))
1611 return -EOPNOTSUPP;
1612
1613 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
1614 return mtd_io_emulated_slc(mtd, to, false, ops);
1615
1616 return mtd_write_oob_std(mtd, to, ops);
1617}
1618EXPORT_SYMBOL_GPL(mtd_write_oob);
1619
1620/**
1621 * mtd_ooblayout_ecc - Get the OOB region definition of a specific ECC section
1622 * @mtd: MTD device structure
1623 * @section: ECC section. Depending on the layout you may have all the ECC
1624 * bytes stored in a single contiguous section, or one section
1625 * per ECC chunk (and sometime several sections for a single ECC
1626 * ECC chunk)
1627 * @oobecc: OOB region struct filled with the appropriate ECC position
1628 * information
1629 *
1630 * This function returns ECC section information in the OOB area. If you want
1631 * to get all the ECC bytes information, then you should call
1632 * mtd_ooblayout_ecc(mtd, section++, oobecc) until it returns -ERANGE.
1633 *
1634 * Returns zero on success, a negative error code otherwise.
1635 */
1636int mtd_ooblayout_ecc(struct mtd_info *mtd, int section,
1637 struct mtd_oob_region *oobecc)
1638{
1639 struct mtd_info *master = mtd_get_master(mtd);
1640
1641 memset(oobecc, 0, sizeof(*oobecc));
1642
1643 if (!master || section < 0)
1644 return -EINVAL;
1645
1646 if (!master->ooblayout || !master->ooblayout->ecc)
1647 return -ENOTSUPP;
1648
1649 return master->ooblayout->ecc(master, section, oobecc);
1650}
1651EXPORT_SYMBOL_GPL(mtd_ooblayout_ecc);
1652
1653/**
1654 * mtd_ooblayout_free - Get the OOB region definition of a specific free
1655 * section
1656 * @mtd: MTD device structure
1657 * @section: Free section you are interested in. Depending on the layout
1658 * you may have all the free bytes stored in a single contiguous
1659 * section, or one section per ECC chunk plus an extra section
1660 * for the remaining bytes (or other funky layout).
1661 * @oobfree: OOB region struct filled with the appropriate free position
1662 * information
1663 *
1664 * This function returns free bytes position in the OOB area. If you want
1665 * to get all the free bytes information, then you should call
1666 * mtd_ooblayout_free(mtd, section++, oobfree) until it returns -ERANGE.
1667 *
1668 * Returns zero on success, a negative error code otherwise.
1669 */
1670int mtd_ooblayout_free(struct mtd_info *mtd, int section,
1671 struct mtd_oob_region *oobfree)
1672{
1673 struct mtd_info *master = mtd_get_master(mtd);
1674
1675 memset(oobfree, 0, sizeof(*oobfree));
1676
1677 if (!master || section < 0)
1678 return -EINVAL;
1679
1680 if (!master->ooblayout || !master->ooblayout->free)
1681 return -ENOTSUPP;
1682
1683 return master->ooblayout->free(master, section, oobfree);
1684}
1685EXPORT_SYMBOL_GPL(mtd_ooblayout_free);
1686
1687/**
1688 * mtd_ooblayout_find_region - Find the region attached to a specific byte
1689 * @mtd: mtd info structure
1690 * @byte: the byte we are searching for
1691 * @sectionp: pointer where the section id will be stored
1692 * @oobregion: used to retrieve the ECC position
1693 * @iter: iterator function. Should be either mtd_ooblayout_free or
1694 * mtd_ooblayout_ecc depending on the region type you're searching for
1695 *
1696 * This function returns the section id and oobregion information of a
1697 * specific byte. For example, say you want to know where the 4th ECC byte is
1698 * stored, you'll use:
1699 *
1700 * mtd_ooblayout_find_region(mtd, 3, §ion, &oobregion, mtd_ooblayout_ecc);
1701 *
1702 * Returns zero on success, a negative error code otherwise.
1703 */
1704static int mtd_ooblayout_find_region(struct mtd_info *mtd, int byte,
1705 int *sectionp, struct mtd_oob_region *oobregion,
1706 int (*iter)(struct mtd_info *,
1707 int section,
1708 struct mtd_oob_region *oobregion))
1709{
1710 int pos = 0, ret, section = 0;
1711
1712 memset(oobregion, 0, sizeof(*oobregion));
1713
1714 while (1) {
1715 ret = iter(mtd, section, oobregion);
1716 if (ret)
1717 return ret;
1718
1719 if (pos + oobregion->length > byte)
1720 break;
1721
1722 pos += oobregion->length;
1723 section++;
1724 }
1725
1726 /*
1727 * Adjust region info to make it start at the beginning at the
1728 * 'start' ECC byte.
1729 */
1730 oobregion->offset += byte - pos;
1731 oobregion->length -= byte - pos;
1732 *sectionp = section;
1733
1734 return 0;
1735}
1736
1737/**
1738 * mtd_ooblayout_find_eccregion - Find the ECC region attached to a specific
1739 * ECC byte
1740 * @mtd: mtd info structure
1741 * @eccbyte: the byte we are searching for
1742 * @section: pointer where the section id will be stored
1743 * @oobregion: OOB region information
1744 *
1745 * Works like mtd_ooblayout_find_region() except it searches for a specific ECC
1746 * byte.
1747 *
1748 * Returns zero on success, a negative error code otherwise.
1749 */
1750int mtd_ooblayout_find_eccregion(struct mtd_info *mtd, int eccbyte,
1751 int *section,
1752 struct mtd_oob_region *oobregion)
1753{
1754 return mtd_ooblayout_find_region(mtd, eccbyte, section, oobregion,
1755 mtd_ooblayout_ecc);
1756}
1757EXPORT_SYMBOL_GPL(mtd_ooblayout_find_eccregion);
1758
1759/**
1760 * mtd_ooblayout_get_bytes - Extract OOB bytes from the oob buffer
1761 * @mtd: mtd info structure
1762 * @buf: destination buffer to store OOB bytes
1763 * @oobbuf: OOB buffer
1764 * @start: first byte to retrieve
1765 * @nbytes: number of bytes to retrieve
1766 * @iter: section iterator
1767 *
1768 * Extract bytes attached to a specific category (ECC or free)
1769 * from the OOB buffer and copy them into buf.
1770 *
1771 * Returns zero on success, a negative error code otherwise.
1772 */
1773static int mtd_ooblayout_get_bytes(struct mtd_info *mtd, u8 *buf,
1774 const u8 *oobbuf, int start, int nbytes,
1775 int (*iter)(struct mtd_info *,
1776 int section,
1777 struct mtd_oob_region *oobregion))
1778{
1779 struct mtd_oob_region oobregion;
1780 int section, ret;
1781
1782 ret = mtd_ooblayout_find_region(mtd, start, §ion,
1783 &oobregion, iter);
1784
1785 while (!ret) {
1786 int cnt;
1787
1788 cnt = min_t(int, nbytes, oobregion.length);
1789 memcpy(buf, oobbuf + oobregion.offset, cnt);
1790 buf += cnt;
1791 nbytes -= cnt;
1792
1793 if (!nbytes)
1794 break;
1795
1796 ret = iter(mtd, ++section, &oobregion);
1797 }
1798
1799 return ret;
1800}
1801
1802/**
1803 * mtd_ooblayout_set_bytes - put OOB bytes into the oob buffer
1804 * @mtd: mtd info structure
1805 * @buf: source buffer to get OOB bytes from
1806 * @oobbuf: OOB buffer
1807 * @start: first OOB byte to set
1808 * @nbytes: number of OOB bytes to set
1809 * @iter: section iterator
1810 *
1811 * Fill the OOB buffer with data provided in buf. The category (ECC or free)
1812 * is selected by passing the appropriate iterator.
1813 *
1814 * Returns zero on success, a negative error code otherwise.
1815 */
1816static int mtd_ooblayout_set_bytes(struct mtd_info *mtd, const u8 *buf,
1817 u8 *oobbuf, int start, int nbytes,
1818 int (*iter)(struct mtd_info *,
1819 int section,
1820 struct mtd_oob_region *oobregion))
1821{
1822 struct mtd_oob_region oobregion;
1823 int section, ret;
1824
1825 ret = mtd_ooblayout_find_region(mtd, start, §ion,
1826 &oobregion, iter);
1827
1828 while (!ret) {
1829 int cnt;
1830
1831 cnt = min_t(int, nbytes, oobregion.length);
1832 memcpy(oobbuf + oobregion.offset, buf, cnt);
1833 buf += cnt;
1834 nbytes -= cnt;
1835
1836 if (!nbytes)
1837 break;
1838
1839 ret = iter(mtd, ++section, &oobregion);
1840 }
1841
1842 return ret;
1843}
1844
1845/**
1846 * mtd_ooblayout_count_bytes - count the number of bytes in a OOB category
1847 * @mtd: mtd info structure
1848 * @iter: category iterator
1849 *
1850 * Count the number of bytes in a given category.
1851 *
1852 * Returns a positive value on success, a negative error code otherwise.
1853 */
1854static int mtd_ooblayout_count_bytes(struct mtd_info *mtd,
1855 int (*iter)(struct mtd_info *,
1856 int section,
1857 struct mtd_oob_region *oobregion))
1858{
1859 struct mtd_oob_region oobregion;
1860 int section = 0, ret, nbytes = 0;
1861
1862 while (1) {
1863 ret = iter(mtd, section++, &oobregion);
1864 if (ret) {
1865 if (ret == -ERANGE)
1866 ret = nbytes;
1867 break;
1868 }
1869
1870 nbytes += oobregion.length;
1871 }
1872
1873 return ret;
1874}
1875
1876/**
1877 * mtd_ooblayout_get_eccbytes - extract ECC bytes from the oob buffer
1878 * @mtd: mtd info structure
1879 * @eccbuf: destination buffer to store ECC bytes
1880 * @oobbuf: OOB buffer
1881 * @start: first ECC byte to retrieve
1882 * @nbytes: number of ECC bytes to retrieve
1883 *
1884 * Works like mtd_ooblayout_get_bytes(), except it acts on ECC bytes.
1885 *
1886 * Returns zero on success, a negative error code otherwise.
1887 */
1888int mtd_ooblayout_get_eccbytes(struct mtd_info *mtd, u8 *eccbuf,
1889 const u8 *oobbuf, int start, int nbytes)
1890{
1891 return mtd_ooblayout_get_bytes(mtd, eccbuf, oobbuf, start, nbytes,
1892 mtd_ooblayout_ecc);
1893}
1894EXPORT_SYMBOL_GPL(mtd_ooblayout_get_eccbytes);
1895
1896/**
1897 * mtd_ooblayout_set_eccbytes - set ECC bytes into the oob buffer
1898 * @mtd: mtd info structure
1899 * @eccbuf: source buffer to get ECC bytes from
1900 * @oobbuf: OOB buffer
1901 * @start: first ECC byte to set
1902 * @nbytes: number of ECC bytes to set
1903 *
1904 * Works like mtd_ooblayout_set_bytes(), except it acts on ECC bytes.
1905 *
1906 * Returns zero on success, a negative error code otherwise.
1907 */
1908int mtd_ooblayout_set_eccbytes(struct mtd_info *mtd, const u8 *eccbuf,
1909 u8 *oobbuf, int start, int nbytes)
1910{
1911 return mtd_ooblayout_set_bytes(mtd, eccbuf, oobbuf, start, nbytes,
1912 mtd_ooblayout_ecc);
1913}
1914EXPORT_SYMBOL_GPL(mtd_ooblayout_set_eccbytes);
1915
1916/**
1917 * mtd_ooblayout_get_databytes - extract data bytes from the oob buffer
1918 * @mtd: mtd info structure
1919 * @databuf: destination buffer to store ECC bytes
1920 * @oobbuf: OOB buffer
1921 * @start: first ECC byte to retrieve
1922 * @nbytes: number of ECC bytes to retrieve
1923 *
1924 * Works like mtd_ooblayout_get_bytes(), except it acts on free bytes.
1925 *
1926 * Returns zero on success, a negative error code otherwise.
1927 */
1928int mtd_ooblayout_get_databytes(struct mtd_info *mtd, u8 *databuf,
1929 const u8 *oobbuf, int start, int nbytes)
1930{
1931 return mtd_ooblayout_get_bytes(mtd, databuf, oobbuf, start, nbytes,
1932 mtd_ooblayout_free);
1933}
1934EXPORT_SYMBOL_GPL(mtd_ooblayout_get_databytes);
1935
1936/**
1937 * mtd_ooblayout_set_databytes - set data bytes into the oob buffer
1938 * @mtd: mtd info structure
1939 * @databuf: source buffer to get data bytes from
1940 * @oobbuf: OOB buffer
1941 * @start: first ECC byte to set
1942 * @nbytes: number of ECC bytes to set
1943 *
1944 * Works like mtd_ooblayout_set_bytes(), except it acts on free bytes.
1945 *
1946 * Returns zero on success, a negative error code otherwise.
1947 */
1948int mtd_ooblayout_set_databytes(struct mtd_info *mtd, const u8 *databuf,
1949 u8 *oobbuf, int start, int nbytes)
1950{
1951 return mtd_ooblayout_set_bytes(mtd, databuf, oobbuf, start, nbytes,
1952 mtd_ooblayout_free);
1953}
1954EXPORT_SYMBOL_GPL(mtd_ooblayout_set_databytes);
1955
1956/**
1957 * mtd_ooblayout_count_freebytes - count the number of free bytes in OOB
1958 * @mtd: mtd info structure
1959 *
1960 * Works like mtd_ooblayout_count_bytes(), except it count free bytes.
1961 *
1962 * Returns zero on success, a negative error code otherwise.
1963 */
1964int mtd_ooblayout_count_freebytes(struct mtd_info *mtd)
1965{
1966 return mtd_ooblayout_count_bytes(mtd, mtd_ooblayout_free);
1967}
1968EXPORT_SYMBOL_GPL(mtd_ooblayout_count_freebytes);
1969
1970/**
1971 * mtd_ooblayout_count_eccbytes - count the number of ECC bytes in OOB
1972 * @mtd: mtd info structure
1973 *
1974 * Works like mtd_ooblayout_count_bytes(), except it count ECC bytes.
1975 *
1976 * Returns zero on success, a negative error code otherwise.
1977 */
1978int mtd_ooblayout_count_eccbytes(struct mtd_info *mtd)
1979{
1980 return mtd_ooblayout_count_bytes(mtd, mtd_ooblayout_ecc);
1981}
1982EXPORT_SYMBOL_GPL(mtd_ooblayout_count_eccbytes);
1983
1984/*
1985 * Method to access the protection register area, present in some flash
1986 * devices. The user data is one time programmable but the factory data is read
1987 * only.
1988 */
1989int mtd_get_fact_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen,
1990 struct otp_info *buf)
1991{
1992 struct mtd_info *master = mtd_get_master(mtd);
1993
1994 if (!master->_get_fact_prot_info)
1995 return -EOPNOTSUPP;
1996 if (!len)
1997 return 0;
1998 return master->_get_fact_prot_info(master, len, retlen, buf);
1999}
2000EXPORT_SYMBOL_GPL(mtd_get_fact_prot_info);
2001
2002int mtd_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, size_t len,
2003 size_t *retlen, u_char *buf)
2004{
2005 struct mtd_info *master = mtd_get_master(mtd);
2006
2007 *retlen = 0;
2008 if (!master->_read_fact_prot_reg)
2009 return -EOPNOTSUPP;
2010 if (!len)
2011 return 0;
2012 return master->_read_fact_prot_reg(master, from, len, retlen, buf);
2013}
2014EXPORT_SYMBOL_GPL(mtd_read_fact_prot_reg);
2015
2016int mtd_get_user_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen,
2017 struct otp_info *buf)
2018{
2019 struct mtd_info *master = mtd_get_master(mtd);
2020
2021 if (!master->_get_user_prot_info)
2022 return -EOPNOTSUPP;
2023 if (!len)
2024 return 0;
2025 return master->_get_user_prot_info(master, len, retlen, buf);
2026}
2027EXPORT_SYMBOL_GPL(mtd_get_user_prot_info);
2028
2029int mtd_read_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len,
2030 size_t *retlen, u_char *buf)
2031{
2032 struct mtd_info *master = mtd_get_master(mtd);
2033
2034 *retlen = 0;
2035 if (!master->_read_user_prot_reg)
2036 return -EOPNOTSUPP;
2037 if (!len)
2038 return 0;
2039 return master->_read_user_prot_reg(master, from, len, retlen, buf);
2040}
2041EXPORT_SYMBOL_GPL(mtd_read_user_prot_reg);
2042
2043int mtd_write_user_prot_reg(struct mtd_info *mtd, loff_t to, size_t len,
2044 size_t *retlen, const u_char *buf)
2045{
2046 struct mtd_info *master = mtd_get_master(mtd);
2047 int ret;
2048
2049 *retlen = 0;
2050 if (!master->_write_user_prot_reg)
2051 return -EOPNOTSUPP;
2052 if (!len)
2053 return 0;
2054 ret = master->_write_user_prot_reg(master, to, len, retlen, buf);
2055 if (ret)
2056 return ret;
2057
2058 /*
2059 * If no data could be written at all, we are out of memory and
2060 * must return -ENOSPC.
2061 */
2062 return (*retlen) ? 0 : -ENOSPC;
2063}
2064EXPORT_SYMBOL_GPL(mtd_write_user_prot_reg);
2065
2066int mtd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len)
2067{
2068 struct mtd_info *master = mtd_get_master(mtd);
2069
2070 if (!master->_lock_user_prot_reg)
2071 return -EOPNOTSUPP;
2072 if (!len)
2073 return 0;
2074 return master->_lock_user_prot_reg(master, from, len);
2075}
2076EXPORT_SYMBOL_GPL(mtd_lock_user_prot_reg);
2077
2078int mtd_erase_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len)
2079{
2080 struct mtd_info *master = mtd_get_master(mtd);
2081
2082 if (!master->_erase_user_prot_reg)
2083 return -EOPNOTSUPP;
2084 if (!len)
2085 return 0;
2086 return master->_erase_user_prot_reg(master, from, len);
2087}
2088EXPORT_SYMBOL_GPL(mtd_erase_user_prot_reg);
2089
2090/* Chip-supported device locking */
2091int mtd_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2092{
2093 struct mtd_info *master = mtd_get_master(mtd);
2094
2095 if (!master->_lock)
2096 return -EOPNOTSUPP;
2097 if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs)
2098 return -EINVAL;
2099 if (!len)
2100 return 0;
2101
2102 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
2103 ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
2104 len = (u64)mtd_div_by_eb(len, mtd) * master->erasesize;
2105 }
2106
2107 return master->_lock(master, mtd_get_master_ofs(mtd, ofs), len);
2108}
2109EXPORT_SYMBOL_GPL(mtd_lock);
2110
2111int mtd_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2112{
2113 struct mtd_info *master = mtd_get_master(mtd);
2114
2115 if (!master->_unlock)
2116 return -EOPNOTSUPP;
2117 if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs)
2118 return -EINVAL;
2119 if (!len)
2120 return 0;
2121
2122 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
2123 ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
2124 len = (u64)mtd_div_by_eb(len, mtd) * master->erasesize;
2125 }
2126
2127 return master->_unlock(master, mtd_get_master_ofs(mtd, ofs), len);
2128}
2129EXPORT_SYMBOL_GPL(mtd_unlock);
2130
2131int mtd_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2132{
2133 struct mtd_info *master = mtd_get_master(mtd);
2134
2135 if (!master->_is_locked)
2136 return -EOPNOTSUPP;
2137 if (ofs < 0 || ofs >= mtd->size || len > mtd->size - ofs)
2138 return -EINVAL;
2139 if (!len)
2140 return 0;
2141
2142 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION) {
2143 ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
2144 len = (u64)mtd_div_by_eb(len, mtd) * master->erasesize;
2145 }
2146
2147 return master->_is_locked(master, mtd_get_master_ofs(mtd, ofs), len);
2148}
2149EXPORT_SYMBOL_GPL(mtd_is_locked);
2150
2151int mtd_block_isreserved(struct mtd_info *mtd, loff_t ofs)
2152{
2153 struct mtd_info *master = mtd_get_master(mtd);
2154
2155 if (ofs < 0 || ofs >= mtd->size)
2156 return -EINVAL;
2157 if (!master->_block_isreserved)
2158 return 0;
2159
2160 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
2161 ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
2162
2163 return master->_block_isreserved(master, mtd_get_master_ofs(mtd, ofs));
2164}
2165EXPORT_SYMBOL_GPL(mtd_block_isreserved);
2166
2167int mtd_block_isbad(struct mtd_info *mtd, loff_t ofs)
2168{
2169 struct mtd_info *master = mtd_get_master(mtd);
2170
2171 if (ofs < 0 || ofs >= mtd->size)
2172 return -EINVAL;
2173 if (!master->_block_isbad)
2174 return 0;
2175
2176 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
2177 ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
2178
2179 return master->_block_isbad(master, mtd_get_master_ofs(mtd, ofs));
2180}
2181EXPORT_SYMBOL_GPL(mtd_block_isbad);
2182
2183int mtd_block_markbad(struct mtd_info *mtd, loff_t ofs)
2184{
2185 struct mtd_info *master = mtd_get_master(mtd);
2186 int ret;
2187
2188 if (!master->_block_markbad)
2189 return -EOPNOTSUPP;
2190 if (ofs < 0 || ofs >= mtd->size)
2191 return -EINVAL;
2192 if (!(mtd->flags & MTD_WRITEABLE))
2193 return -EROFS;
2194
2195 if (mtd->flags & MTD_SLC_ON_MLC_EMULATION)
2196 ofs = (loff_t)mtd_div_by_eb(ofs, mtd) * master->erasesize;
2197
2198 ret = master->_block_markbad(master, mtd_get_master_ofs(mtd, ofs));
2199 if (ret)
2200 return ret;
2201
2202 while (mtd->parent) {
2203 mtd->ecc_stats.badblocks++;
2204 mtd = mtd->parent;
2205 }
2206
2207 return 0;
2208}
2209EXPORT_SYMBOL_GPL(mtd_block_markbad);
2210
2211/*
2212 * default_mtd_writev - the default writev method
2213 * @mtd: mtd device description object pointer
2214 * @vecs: the vectors to write
2215 * @count: count of vectors in @vecs
2216 * @to: the MTD device offset to write to
2217 * @retlen: on exit contains the count of bytes written to the MTD device.
2218 *
2219 * This function returns zero in case of success and a negative error code in
2220 * case of failure.
2221 */
2222static int default_mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
2223 unsigned long count, loff_t to, size_t *retlen)
2224{
2225 unsigned long i;
2226 size_t totlen = 0, thislen;
2227 int ret = 0;
2228
2229 for (i = 0; i < count; i++) {
2230 if (!vecs[i].iov_len)
2231 continue;
2232 ret = mtd_write(mtd, to, vecs[i].iov_len, &thislen,
2233 vecs[i].iov_base);
2234 totlen += thislen;
2235 if (ret || thislen != vecs[i].iov_len)
2236 break;
2237 to += vecs[i].iov_len;
2238 }
2239 *retlen = totlen;
2240 return ret;
2241}
2242
2243/*
2244 * mtd_writev - the vector-based MTD write method
2245 * @mtd: mtd device description object pointer
2246 * @vecs: the vectors to write
2247 * @count: count of vectors in @vecs
2248 * @to: the MTD device offset to write to
2249 * @retlen: on exit contains the count of bytes written to the MTD device.
2250 *
2251 * This function returns zero in case of success and a negative error code in
2252 * case of failure.
2253 */
2254int mtd_writev(struct mtd_info *mtd, const struct kvec *vecs,
2255 unsigned long count, loff_t to, size_t *retlen)
2256{
2257 struct mtd_info *master = mtd_get_master(mtd);
2258
2259 *retlen = 0;
2260 if (!(mtd->flags & MTD_WRITEABLE))
2261 return -EROFS;
2262
2263 if (!master->_writev)
2264 return default_mtd_writev(mtd, vecs, count, to, retlen);
2265
2266 return master->_writev(master, vecs, count,
2267 mtd_get_master_ofs(mtd, to), retlen);
2268}
2269EXPORT_SYMBOL_GPL(mtd_writev);
2270
2271/**
2272 * mtd_kmalloc_up_to - allocate a contiguous buffer up to the specified size
2273 * @mtd: mtd device description object pointer
2274 * @size: a pointer to the ideal or maximum size of the allocation, points
2275 * to the actual allocation size on success.
2276 *
2277 * This routine attempts to allocate a contiguous kernel buffer up to
2278 * the specified size, backing off the size of the request exponentially
2279 * until the request succeeds or until the allocation size falls below
2280 * the system page size. This attempts to make sure it does not adversely
2281 * impact system performance, so when allocating more than one page, we
2282 * ask the memory allocator to avoid re-trying, swapping, writing back
2283 * or performing I/O.
2284 *
2285 * Note, this function also makes sure that the allocated buffer is aligned to
2286 * the MTD device's min. I/O unit, i.e. the "mtd->writesize" value.
2287 *
2288 * This is called, for example by mtd_{read,write} and jffs2_scan_medium,
2289 * to handle smaller (i.e. degraded) buffer allocations under low- or
2290 * fragmented-memory situations where such reduced allocations, from a
2291 * requested ideal, are allowed.
2292 *
2293 * Returns a pointer to the allocated buffer on success; otherwise, NULL.
2294 */
2295void *mtd_kmalloc_up_to(const struct mtd_info *mtd, size_t *size)
2296{
2297 gfp_t flags = __GFP_NOWARN | __GFP_DIRECT_RECLAIM | __GFP_NORETRY;
2298 size_t min_alloc = max_t(size_t, mtd->writesize, PAGE_SIZE);
2299 void *kbuf;
2300
2301 *size = min_t(size_t, *size, KMALLOC_MAX_SIZE);
2302
2303 while (*size > min_alloc) {
2304 kbuf = kmalloc(*size, flags);
2305 if (kbuf)
2306 return kbuf;
2307
2308 *size >>= 1;
2309 *size = ALIGN(*size, mtd->writesize);
2310 }
2311
2312 /*
2313 * For the last resort allocation allow 'kmalloc()' to do all sorts of
2314 * things (write-back, dropping caches, etc) by using GFP_KERNEL.
2315 */
2316 return kmalloc(*size, GFP_KERNEL);
2317}
2318EXPORT_SYMBOL_GPL(mtd_kmalloc_up_to);
2319
2320#ifdef CONFIG_PROC_FS
2321
2322/*====================================================================*/
2323/* Support for /proc/mtd */
2324
2325static int mtd_proc_show(struct seq_file *m, void *v)
2326{
2327 struct mtd_info *mtd;
2328
2329 seq_puts(m, "dev: size erasesize name\n");
2330 mutex_lock(&mtd_table_mutex);
2331 mtd_for_each_device(mtd) {
2332 seq_printf(m, "mtd%d: %8.8llx %8.8x \"%s\"\n",
2333 mtd->index, (unsigned long long)mtd->size,
2334 mtd->erasesize, mtd->name);
2335 }
2336 mutex_unlock(&mtd_table_mutex);
2337 return 0;
2338}
2339#endif /* CONFIG_PROC_FS */
2340
2341/*====================================================================*/
2342/* Init code */
2343
2344static struct backing_dev_info * __init mtd_bdi_init(const char *name)
2345{
2346 struct backing_dev_info *bdi;
2347 int ret;
2348
2349 bdi = bdi_alloc(NUMA_NO_NODE);
2350 if (!bdi)
2351 return ERR_PTR(-ENOMEM);
2352 bdi->ra_pages = 0;
2353 bdi->io_pages = 0;
2354
2355 /*
2356 * We put '-0' suffix to the name to get the same name format as we
2357 * used to get. Since this is called only once, we get a unique name.
2358 */
2359 ret = bdi_register(bdi, "%.28s-0", name);
2360 if (ret)
2361 bdi_put(bdi);
2362
2363 return ret ? ERR_PTR(ret) : bdi;
2364}
2365
2366static struct proc_dir_entry *proc_mtd;
2367
2368static int __init init_mtd(void)
2369{
2370 int ret;
2371
2372 ret = class_register(&mtd_class);
2373 if (ret)
2374 goto err_reg;
2375
2376 mtd_bdi = mtd_bdi_init("mtd");
2377 if (IS_ERR(mtd_bdi)) {
2378 ret = PTR_ERR(mtd_bdi);
2379 goto err_bdi;
2380 }
2381
2382 proc_mtd = proc_create_single("mtd", 0, NULL, mtd_proc_show);
2383
2384 ret = init_mtdchar();
2385 if (ret)
2386 goto out_procfs;
2387
2388 dfs_dir_mtd = debugfs_create_dir("mtd", NULL);
2389
2390 return 0;
2391
2392out_procfs:
2393 if (proc_mtd)
2394 remove_proc_entry("mtd", NULL);
2395 bdi_put(mtd_bdi);
2396err_bdi:
2397 class_unregister(&mtd_class);
2398err_reg:
2399 pr_err("Error registering mtd class or bdi: %d\n", ret);
2400 return ret;
2401}
2402
2403static void __exit cleanup_mtd(void)
2404{
2405 debugfs_remove_recursive(dfs_dir_mtd);
2406 cleanup_mtdchar();
2407 if (proc_mtd)
2408 remove_proc_entry("mtd", NULL);
2409 class_unregister(&mtd_class);
2410 bdi_put(mtd_bdi);
2411 idr_destroy(&mtd_idr);
2412}
2413
2414module_init(init_mtd);
2415module_exit(cleanup_mtd);
2416
2417MODULE_LICENSE("GPL");
2418MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
2419MODULE_DESCRIPTION("Core MTD registration and access routines");