at v2.6.34 1411 lines 42 kB view raw
1/* 2 * Copyright (c) International Business Machines Corp., 2006 3 * Copyright (c) Nokia Corporation, 2007 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation; either version 2 of the License, or 8 * (at your option) any later version. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See 13 * the GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, write to the Free Software 17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 18 * 19 * Author: Artem Bityutskiy (Битюцкий Артём), 20 * Frank Haverkamp 21 */ 22 23/* 24 * This file includes UBI initialization and building of UBI devices. 25 * 26 * When UBI is initialized, it attaches all the MTD devices specified as the 27 * module load parameters or the kernel boot parameters. If MTD devices were 28 * specified, UBI does not attach any MTD device, but it is possible to do 29 * later using the "UBI control device". 30 * 31 * At the moment we only attach UBI devices by scanning, which will become a 32 * bottleneck when flashes reach certain large size. Then one may improve UBI 33 * and add other methods, although it does not seem to be easy to do. 34 */ 35 36#include <linux/err.h> 37#include <linux/module.h> 38#include <linux/moduleparam.h> 39#include <linux/stringify.h> 40#include <linux/namei.h> 41#include <linux/stat.h> 42#include <linux/miscdevice.h> 43#include <linux/log2.h> 44#include <linux/kthread.h> 45#include <linux/reboot.h> 46#include <linux/kernel.h> 47#include <linux/slab.h> 48#include "ubi.h" 49 50/* Maximum length of the 'mtd=' parameter */ 51#define MTD_PARAM_LEN_MAX 64 52 53/** 54 * struct mtd_dev_param - MTD device parameter description data structure. 55 * @name: MTD character device node path, MTD device name, or MTD device number 56 * string 57 * @vid_hdr_offs: VID header offset 58 */ 59struct mtd_dev_param { 60 char name[MTD_PARAM_LEN_MAX]; 61 int vid_hdr_offs; 62}; 63 64/* Numbers of elements set in the @mtd_dev_param array */ 65static int __initdata mtd_devs; 66 67/* MTD devices specification parameters */ 68static struct mtd_dev_param __initdata mtd_dev_param[UBI_MAX_DEVICES]; 69 70/* Root UBI "class" object (corresponds to '/<sysfs>/class/ubi/') */ 71struct class *ubi_class; 72 73/* Slab cache for wear-leveling entries */ 74struct kmem_cache *ubi_wl_entry_slab; 75 76/* UBI control character device */ 77static struct miscdevice ubi_ctrl_cdev = { 78 .minor = MISC_DYNAMIC_MINOR, 79 .name = "ubi_ctrl", 80 .fops = &ubi_ctrl_cdev_operations, 81}; 82 83/* All UBI devices in system */ 84static struct ubi_device *ubi_devices[UBI_MAX_DEVICES]; 85 86/* Serializes UBI devices creations and removals */ 87DEFINE_MUTEX(ubi_devices_mutex); 88 89/* Protects @ubi_devices and @ubi->ref_count */ 90static DEFINE_SPINLOCK(ubi_devices_lock); 91 92/* "Show" method for files in '/<sysfs>/class/ubi/' */ 93static ssize_t ubi_version_show(struct class *class, struct class_attribute *attr, 94 char *buf) 95{ 96 return sprintf(buf, "%d\n", UBI_VERSION); 97} 98 99/* UBI version attribute ('/<sysfs>/class/ubi/version') */ 100static struct class_attribute ubi_version = 101 __ATTR(version, S_IRUGO, ubi_version_show, NULL); 102 103static ssize_t dev_attribute_show(struct device *dev, 104 struct device_attribute *attr, char *buf); 105 106/* UBI device attributes (correspond to files in '/<sysfs>/class/ubi/ubiX') */ 107static struct device_attribute dev_eraseblock_size = 108 __ATTR(eraseblock_size, S_IRUGO, dev_attribute_show, NULL); 109static struct device_attribute dev_avail_eraseblocks = 110 __ATTR(avail_eraseblocks, S_IRUGO, dev_attribute_show, NULL); 111static struct device_attribute dev_total_eraseblocks = 112 __ATTR(total_eraseblocks, S_IRUGO, dev_attribute_show, NULL); 113static struct device_attribute dev_volumes_count = 114 __ATTR(volumes_count, S_IRUGO, dev_attribute_show, NULL); 115static struct device_attribute dev_max_ec = 116 __ATTR(max_ec, S_IRUGO, dev_attribute_show, NULL); 117static struct device_attribute dev_reserved_for_bad = 118 __ATTR(reserved_for_bad, S_IRUGO, dev_attribute_show, NULL); 119static struct device_attribute dev_bad_peb_count = 120 __ATTR(bad_peb_count, S_IRUGO, dev_attribute_show, NULL); 121static struct device_attribute dev_max_vol_count = 122 __ATTR(max_vol_count, S_IRUGO, dev_attribute_show, NULL); 123static struct device_attribute dev_min_io_size = 124 __ATTR(min_io_size, S_IRUGO, dev_attribute_show, NULL); 125static struct device_attribute dev_bgt_enabled = 126 __ATTR(bgt_enabled, S_IRUGO, dev_attribute_show, NULL); 127static struct device_attribute dev_mtd_num = 128 __ATTR(mtd_num, S_IRUGO, dev_attribute_show, NULL); 129 130/** 131 * ubi_volume_notify - send a volume change notification. 132 * @ubi: UBI device description object 133 * @vol: volume description object of the changed volume 134 * @ntype: notification type to send (%UBI_VOLUME_ADDED, etc) 135 * 136 * This is a helper function which notifies all subscribers about a volume 137 * change event (creation, removal, re-sizing, re-naming, updating). Returns 138 * zero in case of success and a negative error code in case of failure. 139 */ 140int ubi_volume_notify(struct ubi_device *ubi, struct ubi_volume *vol, int ntype) 141{ 142 struct ubi_notification nt; 143 144 ubi_do_get_device_info(ubi, &nt.di); 145 ubi_do_get_volume_info(ubi, vol, &nt.vi); 146 return blocking_notifier_call_chain(&ubi_notifiers, ntype, &nt); 147} 148 149/** 150 * ubi_notify_all - send a notification to all volumes. 151 * @ubi: UBI device description object 152 * @ntype: notification type to send (%UBI_VOLUME_ADDED, etc) 153 * @nb: the notifier to call 154 * 155 * This function walks all volumes of UBI device @ubi and sends the @ntype 156 * notification for each volume. If @nb is %NULL, then all registered notifiers 157 * are called, otherwise only the @nb notifier is called. Returns the number of 158 * sent notifications. 159 */ 160int ubi_notify_all(struct ubi_device *ubi, int ntype, struct notifier_block *nb) 161{ 162 struct ubi_notification nt; 163 int i, count = 0; 164 165 ubi_do_get_device_info(ubi, &nt.di); 166 167 mutex_lock(&ubi->device_mutex); 168 for (i = 0; i < ubi->vtbl_slots; i++) { 169 /* 170 * Since the @ubi->device is locked, and we are not going to 171 * change @ubi->volumes, we do not have to lock 172 * @ubi->volumes_lock. 173 */ 174 if (!ubi->volumes[i]) 175 continue; 176 177 ubi_do_get_volume_info(ubi, ubi->volumes[i], &nt.vi); 178 if (nb) 179 nb->notifier_call(nb, ntype, &nt); 180 else 181 blocking_notifier_call_chain(&ubi_notifiers, ntype, 182 &nt); 183 count += 1; 184 } 185 mutex_unlock(&ubi->device_mutex); 186 187 return count; 188} 189 190/** 191 * ubi_enumerate_volumes - send "add" notification for all existing volumes. 192 * @nb: the notifier to call 193 * 194 * This function walks all UBI devices and volumes and sends the 195 * %UBI_VOLUME_ADDED notification for each volume. If @nb is %NULL, then all 196 * registered notifiers are called, otherwise only the @nb notifier is called. 197 * Returns the number of sent notifications. 198 */ 199int ubi_enumerate_volumes(struct notifier_block *nb) 200{ 201 int i, count = 0; 202 203 /* 204 * Since the @ubi_devices_mutex is locked, and we are not going to 205 * change @ubi_devices, we do not have to lock @ubi_devices_lock. 206 */ 207 for (i = 0; i < UBI_MAX_DEVICES; i++) { 208 struct ubi_device *ubi = ubi_devices[i]; 209 210 if (!ubi) 211 continue; 212 count += ubi_notify_all(ubi, UBI_VOLUME_ADDED, nb); 213 } 214 215 return count; 216} 217 218/** 219 * ubi_get_device - get UBI device. 220 * @ubi_num: UBI device number 221 * 222 * This function returns UBI device description object for UBI device number 223 * @ubi_num, or %NULL if the device does not exist. This function increases the 224 * device reference count to prevent removal of the device. In other words, the 225 * device cannot be removed if its reference count is not zero. 226 */ 227struct ubi_device *ubi_get_device(int ubi_num) 228{ 229 struct ubi_device *ubi; 230 231 spin_lock(&ubi_devices_lock); 232 ubi = ubi_devices[ubi_num]; 233 if (ubi) { 234 ubi_assert(ubi->ref_count >= 0); 235 ubi->ref_count += 1; 236 get_device(&ubi->dev); 237 } 238 spin_unlock(&ubi_devices_lock); 239 240 return ubi; 241} 242 243/** 244 * ubi_put_device - drop an UBI device reference. 245 * @ubi: UBI device description object 246 */ 247void ubi_put_device(struct ubi_device *ubi) 248{ 249 spin_lock(&ubi_devices_lock); 250 ubi->ref_count -= 1; 251 put_device(&ubi->dev); 252 spin_unlock(&ubi_devices_lock); 253} 254 255/** 256 * ubi_get_by_major - get UBI device by character device major number. 257 * @major: major number 258 * 259 * This function is similar to 'ubi_get_device()', but it searches the device 260 * by its major number. 261 */ 262struct ubi_device *ubi_get_by_major(int major) 263{ 264 int i; 265 struct ubi_device *ubi; 266 267 spin_lock(&ubi_devices_lock); 268 for (i = 0; i < UBI_MAX_DEVICES; i++) { 269 ubi = ubi_devices[i]; 270 if (ubi && MAJOR(ubi->cdev.dev) == major) { 271 ubi_assert(ubi->ref_count >= 0); 272 ubi->ref_count += 1; 273 get_device(&ubi->dev); 274 spin_unlock(&ubi_devices_lock); 275 return ubi; 276 } 277 } 278 spin_unlock(&ubi_devices_lock); 279 280 return NULL; 281} 282 283/** 284 * ubi_major2num - get UBI device number by character device major number. 285 * @major: major number 286 * 287 * This function searches UBI device number object by its major number. If UBI 288 * device was not found, this function returns -ENODEV, otherwise the UBI device 289 * number is returned. 290 */ 291int ubi_major2num(int major) 292{ 293 int i, ubi_num = -ENODEV; 294 295 spin_lock(&ubi_devices_lock); 296 for (i = 0; i < UBI_MAX_DEVICES; i++) { 297 struct ubi_device *ubi = ubi_devices[i]; 298 299 if (ubi && MAJOR(ubi->cdev.dev) == major) { 300 ubi_num = ubi->ubi_num; 301 break; 302 } 303 } 304 spin_unlock(&ubi_devices_lock); 305 306 return ubi_num; 307} 308 309/* "Show" method for files in '/<sysfs>/class/ubi/ubiX/' */ 310static ssize_t dev_attribute_show(struct device *dev, 311 struct device_attribute *attr, char *buf) 312{ 313 ssize_t ret; 314 struct ubi_device *ubi; 315 316 /* 317 * The below code looks weird, but it actually makes sense. We get the 318 * UBI device reference from the contained 'struct ubi_device'. But it 319 * is unclear if the device was removed or not yet. Indeed, if the 320 * device was removed before we increased its reference count, 321 * 'ubi_get_device()' will return -ENODEV and we fail. 322 * 323 * Remember, 'struct ubi_device' is freed in the release function, so 324 * we still can use 'ubi->ubi_num'. 325 */ 326 ubi = container_of(dev, struct ubi_device, dev); 327 ubi = ubi_get_device(ubi->ubi_num); 328 if (!ubi) 329 return -ENODEV; 330 331 if (attr == &dev_eraseblock_size) 332 ret = sprintf(buf, "%d\n", ubi->leb_size); 333 else if (attr == &dev_avail_eraseblocks) 334 ret = sprintf(buf, "%d\n", ubi->avail_pebs); 335 else if (attr == &dev_total_eraseblocks) 336 ret = sprintf(buf, "%d\n", ubi->good_peb_count); 337 else if (attr == &dev_volumes_count) 338 ret = sprintf(buf, "%d\n", ubi->vol_count - UBI_INT_VOL_COUNT); 339 else if (attr == &dev_max_ec) 340 ret = sprintf(buf, "%d\n", ubi->max_ec); 341 else if (attr == &dev_reserved_for_bad) 342 ret = sprintf(buf, "%d\n", ubi->beb_rsvd_pebs); 343 else if (attr == &dev_bad_peb_count) 344 ret = sprintf(buf, "%d\n", ubi->bad_peb_count); 345 else if (attr == &dev_max_vol_count) 346 ret = sprintf(buf, "%d\n", ubi->vtbl_slots); 347 else if (attr == &dev_min_io_size) 348 ret = sprintf(buf, "%d\n", ubi->min_io_size); 349 else if (attr == &dev_bgt_enabled) 350 ret = sprintf(buf, "%d\n", ubi->thread_enabled); 351 else if (attr == &dev_mtd_num) 352 ret = sprintf(buf, "%d\n", ubi->mtd->index); 353 else 354 ret = -EINVAL; 355 356 ubi_put_device(ubi); 357 return ret; 358} 359 360static void dev_release(struct device *dev) 361{ 362 struct ubi_device *ubi = container_of(dev, struct ubi_device, dev); 363 364 kfree(ubi); 365} 366 367/** 368 * ubi_sysfs_init - initialize sysfs for an UBI device. 369 * @ubi: UBI device description object 370 * @ref: set to %1 on exit in case of failure if a reference to @ubi->dev was 371 * taken 372 * 373 * This function returns zero in case of success and a negative error code in 374 * case of failure. 375 */ 376static int ubi_sysfs_init(struct ubi_device *ubi, int *ref) 377{ 378 int err; 379 380 ubi->dev.release = dev_release; 381 ubi->dev.devt = ubi->cdev.dev; 382 ubi->dev.class = ubi_class; 383 dev_set_name(&ubi->dev, UBI_NAME_STR"%d", ubi->ubi_num); 384 err = device_register(&ubi->dev); 385 if (err) 386 return err; 387 388 *ref = 1; 389 err = device_create_file(&ubi->dev, &dev_eraseblock_size); 390 if (err) 391 return err; 392 err = device_create_file(&ubi->dev, &dev_avail_eraseblocks); 393 if (err) 394 return err; 395 err = device_create_file(&ubi->dev, &dev_total_eraseblocks); 396 if (err) 397 return err; 398 err = device_create_file(&ubi->dev, &dev_volumes_count); 399 if (err) 400 return err; 401 err = device_create_file(&ubi->dev, &dev_max_ec); 402 if (err) 403 return err; 404 err = device_create_file(&ubi->dev, &dev_reserved_for_bad); 405 if (err) 406 return err; 407 err = device_create_file(&ubi->dev, &dev_bad_peb_count); 408 if (err) 409 return err; 410 err = device_create_file(&ubi->dev, &dev_max_vol_count); 411 if (err) 412 return err; 413 err = device_create_file(&ubi->dev, &dev_min_io_size); 414 if (err) 415 return err; 416 err = device_create_file(&ubi->dev, &dev_bgt_enabled); 417 if (err) 418 return err; 419 err = device_create_file(&ubi->dev, &dev_mtd_num); 420 return err; 421} 422 423/** 424 * ubi_sysfs_close - close sysfs for an UBI device. 425 * @ubi: UBI device description object 426 */ 427static void ubi_sysfs_close(struct ubi_device *ubi) 428{ 429 device_remove_file(&ubi->dev, &dev_mtd_num); 430 device_remove_file(&ubi->dev, &dev_bgt_enabled); 431 device_remove_file(&ubi->dev, &dev_min_io_size); 432 device_remove_file(&ubi->dev, &dev_max_vol_count); 433 device_remove_file(&ubi->dev, &dev_bad_peb_count); 434 device_remove_file(&ubi->dev, &dev_reserved_for_bad); 435 device_remove_file(&ubi->dev, &dev_max_ec); 436 device_remove_file(&ubi->dev, &dev_volumes_count); 437 device_remove_file(&ubi->dev, &dev_total_eraseblocks); 438 device_remove_file(&ubi->dev, &dev_avail_eraseblocks); 439 device_remove_file(&ubi->dev, &dev_eraseblock_size); 440 device_unregister(&ubi->dev); 441} 442 443/** 444 * kill_volumes - destroy all user volumes. 445 * @ubi: UBI device description object 446 */ 447static void kill_volumes(struct ubi_device *ubi) 448{ 449 int i; 450 451 for (i = 0; i < ubi->vtbl_slots; i++) 452 if (ubi->volumes[i]) 453 ubi_free_volume(ubi, ubi->volumes[i]); 454} 455 456/** 457 * uif_init - initialize user interfaces for an UBI device. 458 * @ubi: UBI device description object 459 * @ref: set to %1 on exit in case of failure if a reference to @ubi->dev was 460 * taken, otherwise set to %0 461 * 462 * This function initializes various user interfaces for an UBI device. If the 463 * initialization fails at an early stage, this function frees all the 464 * resources it allocated, returns an error, and @ref is set to %0. However, 465 * if the initialization fails after the UBI device was registered in the 466 * driver core subsystem, this function takes a reference to @ubi->dev, because 467 * otherwise the release function ('dev_release()') would free whole @ubi 468 * object. The @ref argument is set to %1 in this case. The caller has to put 469 * this reference. 470 * 471 * This function returns zero in case of success and a negative error code in 472 * case of failure. 473 */ 474static int uif_init(struct ubi_device *ubi, int *ref) 475{ 476 int i, err; 477 dev_t dev; 478 479 *ref = 0; 480 sprintf(ubi->ubi_name, UBI_NAME_STR "%d", ubi->ubi_num); 481 482 /* 483 * Major numbers for the UBI character devices are allocated 484 * dynamically. Major numbers of volume character devices are 485 * equivalent to ones of the corresponding UBI character device. Minor 486 * numbers of UBI character devices are 0, while minor numbers of 487 * volume character devices start from 1. Thus, we allocate one major 488 * number and ubi->vtbl_slots + 1 minor numbers. 489 */ 490 err = alloc_chrdev_region(&dev, 0, ubi->vtbl_slots + 1, ubi->ubi_name); 491 if (err) { 492 ubi_err("cannot register UBI character devices"); 493 return err; 494 } 495 496 ubi_assert(MINOR(dev) == 0); 497 cdev_init(&ubi->cdev, &ubi_cdev_operations); 498 dbg_gen("%s major is %u", ubi->ubi_name, MAJOR(dev)); 499 ubi->cdev.owner = THIS_MODULE; 500 501 err = cdev_add(&ubi->cdev, dev, 1); 502 if (err) { 503 ubi_err("cannot add character device"); 504 goto out_unreg; 505 } 506 507 err = ubi_sysfs_init(ubi, ref); 508 if (err) 509 goto out_sysfs; 510 511 for (i = 0; i < ubi->vtbl_slots; i++) 512 if (ubi->volumes[i]) { 513 err = ubi_add_volume(ubi, ubi->volumes[i]); 514 if (err) { 515 ubi_err("cannot add volume %d", i); 516 goto out_volumes; 517 } 518 } 519 520 return 0; 521 522out_volumes: 523 kill_volumes(ubi); 524out_sysfs: 525 if (*ref) 526 get_device(&ubi->dev); 527 ubi_sysfs_close(ubi); 528 cdev_del(&ubi->cdev); 529out_unreg: 530 unregister_chrdev_region(ubi->cdev.dev, ubi->vtbl_slots + 1); 531 ubi_err("cannot initialize UBI %s, error %d", ubi->ubi_name, err); 532 return err; 533} 534 535/** 536 * uif_close - close user interfaces for an UBI device. 537 * @ubi: UBI device description object 538 * 539 * Note, since this function un-registers UBI volume device objects (@vol->dev), 540 * the memory allocated voe the volumes is freed as well (in the release 541 * function). 542 */ 543static void uif_close(struct ubi_device *ubi) 544{ 545 kill_volumes(ubi); 546 ubi_sysfs_close(ubi); 547 cdev_del(&ubi->cdev); 548 unregister_chrdev_region(ubi->cdev.dev, ubi->vtbl_slots + 1); 549} 550 551/** 552 * free_internal_volumes - free internal volumes. 553 * @ubi: UBI device description object 554 */ 555static void free_internal_volumes(struct ubi_device *ubi) 556{ 557 int i; 558 559 for (i = ubi->vtbl_slots; 560 i < ubi->vtbl_slots + UBI_INT_VOL_COUNT; i++) { 561 kfree(ubi->volumes[i]->eba_tbl); 562 kfree(ubi->volumes[i]); 563 } 564} 565 566/** 567 * attach_by_scanning - attach an MTD device using scanning method. 568 * @ubi: UBI device descriptor 569 * 570 * This function returns zero in case of success and a negative error code in 571 * case of failure. 572 * 573 * Note, currently this is the only method to attach UBI devices. Hopefully in 574 * the future we'll have more scalable attaching methods and avoid full media 575 * scanning. But even in this case scanning will be needed as a fall-back 576 * attaching method if there are some on-flash table corruptions. 577 */ 578static int attach_by_scanning(struct ubi_device *ubi) 579{ 580 int err; 581 struct ubi_scan_info *si; 582 583 si = ubi_scan(ubi); 584 if (IS_ERR(si)) 585 return PTR_ERR(si); 586 587 ubi->bad_peb_count = si->bad_peb_count; 588 ubi->good_peb_count = ubi->peb_count - ubi->bad_peb_count; 589 ubi->max_ec = si->max_ec; 590 ubi->mean_ec = si->mean_ec; 591 592 err = ubi_read_volume_table(ubi, si); 593 if (err) 594 goto out_si; 595 596 err = ubi_wl_init_scan(ubi, si); 597 if (err) 598 goto out_vtbl; 599 600 err = ubi_eba_init_scan(ubi, si); 601 if (err) 602 goto out_wl; 603 604 ubi_scan_destroy_si(si); 605 return 0; 606 607out_wl: 608 ubi_wl_close(ubi); 609out_vtbl: 610 free_internal_volumes(ubi); 611 vfree(ubi->vtbl); 612out_si: 613 ubi_scan_destroy_si(si); 614 return err; 615} 616 617/** 618 * io_init - initialize I/O sub-system for a given UBI device. 619 * @ubi: UBI device description object 620 * 621 * If @ubi->vid_hdr_offset or @ubi->leb_start is zero, default offsets are 622 * assumed: 623 * o EC header is always at offset zero - this cannot be changed; 624 * o VID header starts just after the EC header at the closest address 625 * aligned to @io->hdrs_min_io_size; 626 * o data starts just after the VID header at the closest address aligned to 627 * @io->min_io_size 628 * 629 * This function returns zero in case of success and a negative error code in 630 * case of failure. 631 */ 632static int io_init(struct ubi_device *ubi) 633{ 634 if (ubi->mtd->numeraseregions != 0) { 635 /* 636 * Some flashes have several erase regions. Different regions 637 * may have different eraseblock size and other 638 * characteristics. It looks like mostly multi-region flashes 639 * have one "main" region and one or more small regions to 640 * store boot loader code or boot parameters or whatever. I 641 * guess we should just pick the largest region. But this is 642 * not implemented. 643 */ 644 ubi_err("multiple regions, not implemented"); 645 return -EINVAL; 646 } 647 648 if (ubi->vid_hdr_offset < 0) 649 return -EINVAL; 650 651 /* 652 * Note, in this implementation we support MTD devices with 0x7FFFFFFF 653 * physical eraseblocks maximum. 654 */ 655 656 ubi->peb_size = ubi->mtd->erasesize; 657 ubi->peb_count = mtd_div_by_eb(ubi->mtd->size, ubi->mtd); 658 ubi->flash_size = ubi->mtd->size; 659 660 if (ubi->mtd->block_isbad && ubi->mtd->block_markbad) 661 ubi->bad_allowed = 1; 662 663 if (ubi->mtd->type == MTD_NORFLASH) { 664 ubi_assert(ubi->mtd->writesize == 1); 665 ubi->nor_flash = 1; 666 } 667 668 ubi->min_io_size = ubi->mtd->writesize; 669 ubi->hdrs_min_io_size = ubi->mtd->writesize >> ubi->mtd->subpage_sft; 670 671 /* 672 * Make sure minimal I/O unit is power of 2. Note, there is no 673 * fundamental reason for this assumption. It is just an optimization 674 * which allows us to avoid costly division operations. 675 */ 676 if (!is_power_of_2(ubi->min_io_size)) { 677 ubi_err("min. I/O unit (%d) is not power of 2", 678 ubi->min_io_size); 679 return -EINVAL; 680 } 681 682 ubi_assert(ubi->hdrs_min_io_size > 0); 683 ubi_assert(ubi->hdrs_min_io_size <= ubi->min_io_size); 684 ubi_assert(ubi->min_io_size % ubi->hdrs_min_io_size == 0); 685 686 /* Calculate default aligned sizes of EC and VID headers */ 687 ubi->ec_hdr_alsize = ALIGN(UBI_EC_HDR_SIZE, ubi->hdrs_min_io_size); 688 ubi->vid_hdr_alsize = ALIGN(UBI_VID_HDR_SIZE, ubi->hdrs_min_io_size); 689 690 dbg_msg("min_io_size %d", ubi->min_io_size); 691 dbg_msg("hdrs_min_io_size %d", ubi->hdrs_min_io_size); 692 dbg_msg("ec_hdr_alsize %d", ubi->ec_hdr_alsize); 693 dbg_msg("vid_hdr_alsize %d", ubi->vid_hdr_alsize); 694 695 if (ubi->vid_hdr_offset == 0) 696 /* Default offset */ 697 ubi->vid_hdr_offset = ubi->vid_hdr_aloffset = 698 ubi->ec_hdr_alsize; 699 else { 700 ubi->vid_hdr_aloffset = ubi->vid_hdr_offset & 701 ~(ubi->hdrs_min_io_size - 1); 702 ubi->vid_hdr_shift = ubi->vid_hdr_offset - 703 ubi->vid_hdr_aloffset; 704 } 705 706 /* Similar for the data offset */ 707 ubi->leb_start = ubi->vid_hdr_offset + UBI_EC_HDR_SIZE; 708 ubi->leb_start = ALIGN(ubi->leb_start, ubi->min_io_size); 709 710 dbg_msg("vid_hdr_offset %d", ubi->vid_hdr_offset); 711 dbg_msg("vid_hdr_aloffset %d", ubi->vid_hdr_aloffset); 712 dbg_msg("vid_hdr_shift %d", ubi->vid_hdr_shift); 713 dbg_msg("leb_start %d", ubi->leb_start); 714 715 /* The shift must be aligned to 32-bit boundary */ 716 if (ubi->vid_hdr_shift % 4) { 717 ubi_err("unaligned VID header shift %d", 718 ubi->vid_hdr_shift); 719 return -EINVAL; 720 } 721 722 /* Check sanity */ 723 if (ubi->vid_hdr_offset < UBI_EC_HDR_SIZE || 724 ubi->leb_start < ubi->vid_hdr_offset + UBI_VID_HDR_SIZE || 725 ubi->leb_start > ubi->peb_size - UBI_VID_HDR_SIZE || 726 ubi->leb_start & (ubi->min_io_size - 1)) { 727 ubi_err("bad VID header (%d) or data offsets (%d)", 728 ubi->vid_hdr_offset, ubi->leb_start); 729 return -EINVAL; 730 } 731 732 /* 733 * Set maximum amount of physical erroneous eraseblocks to be 10%. 734 * Erroneous PEB are those which have read errors. 735 */ 736 ubi->max_erroneous = ubi->peb_count / 10; 737 if (ubi->max_erroneous < 16) 738 ubi->max_erroneous = 16; 739 dbg_msg("max_erroneous %d", ubi->max_erroneous); 740 741 /* 742 * It may happen that EC and VID headers are situated in one minimal 743 * I/O unit. In this case we can only accept this UBI image in 744 * read-only mode. 745 */ 746 if (ubi->vid_hdr_offset + UBI_VID_HDR_SIZE <= ubi->hdrs_min_io_size) { 747 ubi_warn("EC and VID headers are in the same minimal I/O unit, " 748 "switch to read-only mode"); 749 ubi->ro_mode = 1; 750 } 751 752 ubi->leb_size = ubi->peb_size - ubi->leb_start; 753 754 if (!(ubi->mtd->flags & MTD_WRITEABLE)) { 755 ubi_msg("MTD device %d is write-protected, attach in " 756 "read-only mode", ubi->mtd->index); 757 ubi->ro_mode = 1; 758 } 759 760 ubi_msg("physical eraseblock size: %d bytes (%d KiB)", 761 ubi->peb_size, ubi->peb_size >> 10); 762 ubi_msg("logical eraseblock size: %d bytes", ubi->leb_size); 763 ubi_msg("smallest flash I/O unit: %d", ubi->min_io_size); 764 if (ubi->hdrs_min_io_size != ubi->min_io_size) 765 ubi_msg("sub-page size: %d", 766 ubi->hdrs_min_io_size); 767 ubi_msg("VID header offset: %d (aligned %d)", 768 ubi->vid_hdr_offset, ubi->vid_hdr_aloffset); 769 ubi_msg("data offset: %d", ubi->leb_start); 770 771 /* 772 * Note, ideally, we have to initialize ubi->bad_peb_count here. But 773 * unfortunately, MTD does not provide this information. We should loop 774 * over all physical eraseblocks and invoke mtd->block_is_bad() for 775 * each physical eraseblock. So, we skip ubi->bad_peb_count 776 * uninitialized and initialize it after scanning. 777 */ 778 779 return 0; 780} 781 782/** 783 * autoresize - re-size the volume which has the "auto-resize" flag set. 784 * @ubi: UBI device description object 785 * @vol_id: ID of the volume to re-size 786 * 787 * This function re-sizes the volume marked by the @UBI_VTBL_AUTORESIZE_FLG in 788 * the volume table to the largest possible size. See comments in ubi-header.h 789 * for more description of the flag. Returns zero in case of success and a 790 * negative error code in case of failure. 791 */ 792static int autoresize(struct ubi_device *ubi, int vol_id) 793{ 794 struct ubi_volume_desc desc; 795 struct ubi_volume *vol = ubi->volumes[vol_id]; 796 int err, old_reserved_pebs = vol->reserved_pebs; 797 798 /* 799 * Clear the auto-resize flag in the volume in-memory copy of the 800 * volume table, and 'ubi_resize_volume()' will propagate this change 801 * to the flash. 802 */ 803 ubi->vtbl[vol_id].flags &= ~UBI_VTBL_AUTORESIZE_FLG; 804 805 if (ubi->avail_pebs == 0) { 806 struct ubi_vtbl_record vtbl_rec; 807 808 /* 809 * No available PEBs to re-size the volume, clear the flag on 810 * flash and exit. 811 */ 812 memcpy(&vtbl_rec, &ubi->vtbl[vol_id], 813 sizeof(struct ubi_vtbl_record)); 814 err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec); 815 if (err) 816 ubi_err("cannot clean auto-resize flag for volume %d", 817 vol_id); 818 } else { 819 desc.vol = vol; 820 err = ubi_resize_volume(&desc, 821 old_reserved_pebs + ubi->avail_pebs); 822 if (err) 823 ubi_err("cannot auto-resize volume %d", vol_id); 824 } 825 826 if (err) 827 return err; 828 829 ubi_msg("volume %d (\"%s\") re-sized from %d to %d LEBs", vol_id, 830 vol->name, old_reserved_pebs, vol->reserved_pebs); 831 return 0; 832} 833 834/** 835 * ubi_reboot_notifier - halt UBI transactions immediately prior to a reboot. 836 * @n: reboot notifier object 837 * @state: SYS_RESTART, SYS_HALT, or SYS_POWER_OFF 838 * @cmd: pointer to command string for RESTART2 839 * 840 * This function stops the UBI background thread so that the flash device 841 * remains quiescent when Linux restarts the system. Any queued work will be 842 * discarded, but this function will block until do_work() finishes if an 843 * operation is already in progress. 844 * 845 * This function solves a real-life problem observed on NOR flashes when an 846 * PEB erase operation starts, then the system is rebooted before the erase is 847 * finishes, and the boot loader gets confused and dies. So we prefer to finish 848 * the ongoing operation before rebooting. 849 */ 850static int ubi_reboot_notifier(struct notifier_block *n, unsigned long state, 851 void *cmd) 852{ 853 struct ubi_device *ubi; 854 855 ubi = container_of(n, struct ubi_device, reboot_notifier); 856 if (ubi->bgt_thread) 857 kthread_stop(ubi->bgt_thread); 858 ubi_sync(ubi->ubi_num); 859 return NOTIFY_DONE; 860} 861 862/** 863 * ubi_attach_mtd_dev - attach an MTD device. 864 * @mtd: MTD device description object 865 * @ubi_num: number to assign to the new UBI device 866 * @vid_hdr_offset: VID header offset 867 * 868 * This function attaches MTD device @mtd_dev to UBI and assign @ubi_num number 869 * to the newly created UBI device, unless @ubi_num is %UBI_DEV_NUM_AUTO, in 870 * which case this function finds a vacant device number and assigns it 871 * automatically. Returns the new UBI device number in case of success and a 872 * negative error code in case of failure. 873 * 874 * Note, the invocations of this function has to be serialized by the 875 * @ubi_devices_mutex. 876 */ 877int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset) 878{ 879 struct ubi_device *ubi; 880 int i, err, ref = 0; 881 882 /* 883 * Check if we already have the same MTD device attached. 884 * 885 * Note, this function assumes that UBI devices creations and deletions 886 * are serialized, so it does not take the &ubi_devices_lock. 887 */ 888 for (i = 0; i < UBI_MAX_DEVICES; i++) { 889 ubi = ubi_devices[i]; 890 if (ubi && mtd->index == ubi->mtd->index) { 891 dbg_err("mtd%d is already attached to ubi%d", 892 mtd->index, i); 893 return -EEXIST; 894 } 895 } 896 897 /* 898 * Make sure this MTD device is not emulated on top of an UBI volume 899 * already. Well, generally this recursion works fine, but there are 900 * different problems like the UBI module takes a reference to itself 901 * by attaching (and thus, opening) the emulated MTD device. This 902 * results in inability to unload the module. And in general it makes 903 * no sense to attach emulated MTD devices, so we prohibit this. 904 */ 905 if (mtd->type == MTD_UBIVOLUME) { 906 ubi_err("refuse attaching mtd%d - it is already emulated on " 907 "top of UBI", mtd->index); 908 return -EINVAL; 909 } 910 911 if (ubi_num == UBI_DEV_NUM_AUTO) { 912 /* Search for an empty slot in the @ubi_devices array */ 913 for (ubi_num = 0; ubi_num < UBI_MAX_DEVICES; ubi_num++) 914 if (!ubi_devices[ubi_num]) 915 break; 916 if (ubi_num == UBI_MAX_DEVICES) { 917 dbg_err("only %d UBI devices may be created", 918 UBI_MAX_DEVICES); 919 return -ENFILE; 920 } 921 } else { 922 if (ubi_num >= UBI_MAX_DEVICES) 923 return -EINVAL; 924 925 /* Make sure ubi_num is not busy */ 926 if (ubi_devices[ubi_num]) { 927 dbg_err("ubi%d already exists", ubi_num); 928 return -EEXIST; 929 } 930 } 931 932 ubi = kzalloc(sizeof(struct ubi_device), GFP_KERNEL); 933 if (!ubi) 934 return -ENOMEM; 935 936 ubi->mtd = mtd; 937 ubi->ubi_num = ubi_num; 938 ubi->vid_hdr_offset = vid_hdr_offset; 939 ubi->autoresize_vol_id = -1; 940 941 mutex_init(&ubi->buf_mutex); 942 mutex_init(&ubi->ckvol_mutex); 943 mutex_init(&ubi->device_mutex); 944 spin_lock_init(&ubi->volumes_lock); 945 946 ubi_msg("attaching mtd%d to ubi%d", mtd->index, ubi_num); 947 948 err = io_init(ubi); 949 if (err) 950 goto out_free; 951 952 err = -ENOMEM; 953 ubi->peb_buf1 = vmalloc(ubi->peb_size); 954 if (!ubi->peb_buf1) 955 goto out_free; 956 957 ubi->peb_buf2 = vmalloc(ubi->peb_size); 958 if (!ubi->peb_buf2) 959 goto out_free; 960 961#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID 962 mutex_init(&ubi->dbg_buf_mutex); 963 ubi->dbg_peb_buf = vmalloc(ubi->peb_size); 964 if (!ubi->dbg_peb_buf) 965 goto out_free; 966#endif 967 968 err = attach_by_scanning(ubi); 969 if (err) { 970 dbg_err("failed to attach by scanning, error %d", err); 971 goto out_free; 972 } 973 974 if (ubi->autoresize_vol_id != -1) { 975 err = autoresize(ubi, ubi->autoresize_vol_id); 976 if (err) 977 goto out_detach; 978 } 979 980 err = uif_init(ubi, &ref); 981 if (err) 982 goto out_detach; 983 984 ubi->bgt_thread = kthread_create(ubi_thread, ubi, ubi->bgt_name); 985 if (IS_ERR(ubi->bgt_thread)) { 986 err = PTR_ERR(ubi->bgt_thread); 987 ubi_err("cannot spawn \"%s\", error %d", ubi->bgt_name, 988 err); 989 goto out_uif; 990 } 991 992 ubi_msg("attached mtd%d to ubi%d", mtd->index, ubi_num); 993 ubi_msg("MTD device name: \"%s\"", mtd->name); 994 ubi_msg("MTD device size: %llu MiB", ubi->flash_size >> 20); 995 ubi_msg("number of good PEBs: %d", ubi->good_peb_count); 996 ubi_msg("number of bad PEBs: %d", ubi->bad_peb_count); 997 ubi_msg("max. allowed volumes: %d", ubi->vtbl_slots); 998 ubi_msg("wear-leveling threshold: %d", CONFIG_MTD_UBI_WL_THRESHOLD); 999 ubi_msg("number of internal volumes: %d", UBI_INT_VOL_COUNT); 1000 ubi_msg("number of user volumes: %d", 1001 ubi->vol_count - UBI_INT_VOL_COUNT); 1002 ubi_msg("available PEBs: %d", ubi->avail_pebs); 1003 ubi_msg("total number of reserved PEBs: %d", ubi->rsvd_pebs); 1004 ubi_msg("number of PEBs reserved for bad PEB handling: %d", 1005 ubi->beb_rsvd_pebs); 1006 ubi_msg("max/mean erase counter: %d/%d", ubi->max_ec, ubi->mean_ec); 1007 ubi_msg("image sequence number: %d", ubi->image_seq); 1008 1009 /* 1010 * The below lock makes sure we do not race with 'ubi_thread()' which 1011 * checks @ubi->thread_enabled. Otherwise we may fail to wake it up. 1012 */ 1013 spin_lock(&ubi->wl_lock); 1014 if (!DBG_DISABLE_BGT) 1015 ubi->thread_enabled = 1; 1016 wake_up_process(ubi->bgt_thread); 1017 spin_unlock(&ubi->wl_lock); 1018 1019 /* Flash device priority is 0 - UBI needs to shut down first */ 1020 ubi->reboot_notifier.priority = 1; 1021 ubi->reboot_notifier.notifier_call = ubi_reboot_notifier; 1022 register_reboot_notifier(&ubi->reboot_notifier); 1023 1024 ubi_devices[ubi_num] = ubi; 1025 ubi_notify_all(ubi, UBI_VOLUME_ADDED, NULL); 1026 return ubi_num; 1027 1028out_uif: 1029 uif_close(ubi); 1030out_detach: 1031 ubi_wl_close(ubi); 1032 free_internal_volumes(ubi); 1033 vfree(ubi->vtbl); 1034out_free: 1035 vfree(ubi->peb_buf1); 1036 vfree(ubi->peb_buf2); 1037#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID 1038 vfree(ubi->dbg_peb_buf); 1039#endif 1040 if (ref) 1041 put_device(&ubi->dev); 1042 else 1043 kfree(ubi); 1044 return err; 1045} 1046 1047/** 1048 * ubi_detach_mtd_dev - detach an MTD device. 1049 * @ubi_num: UBI device number to detach from 1050 * @anyway: detach MTD even if device reference count is not zero 1051 * 1052 * This function destroys an UBI device number @ubi_num and detaches the 1053 * underlying MTD device. Returns zero in case of success and %-EBUSY if the 1054 * UBI device is busy and cannot be destroyed, and %-EINVAL if it does not 1055 * exist. 1056 * 1057 * Note, the invocations of this function has to be serialized by the 1058 * @ubi_devices_mutex. 1059 */ 1060int ubi_detach_mtd_dev(int ubi_num, int anyway) 1061{ 1062 struct ubi_device *ubi; 1063 1064 if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES) 1065 return -EINVAL; 1066 1067 ubi = ubi_get_device(ubi_num); 1068 if (!ubi) 1069 return -EINVAL; 1070 1071 spin_lock(&ubi_devices_lock); 1072 put_device(&ubi->dev); 1073 ubi->ref_count -= 1; 1074 if (ubi->ref_count) { 1075 if (!anyway) { 1076 spin_unlock(&ubi_devices_lock); 1077 return -EBUSY; 1078 } 1079 /* This may only happen if there is a bug */ 1080 ubi_err("%s reference count %d, destroy anyway", 1081 ubi->ubi_name, ubi->ref_count); 1082 } 1083 ubi_devices[ubi_num] = NULL; 1084 spin_unlock(&ubi_devices_lock); 1085 1086 ubi_assert(ubi_num == ubi->ubi_num); 1087 ubi_notify_all(ubi, UBI_VOLUME_REMOVED, NULL); 1088 dbg_msg("detaching mtd%d from ubi%d", ubi->mtd->index, ubi_num); 1089 1090 /* 1091 * Before freeing anything, we have to stop the background thread to 1092 * prevent it from doing anything on this device while we are freeing. 1093 */ 1094 unregister_reboot_notifier(&ubi->reboot_notifier); 1095 if (ubi->bgt_thread) 1096 kthread_stop(ubi->bgt_thread); 1097 1098 /* 1099 * Get a reference to the device in order to prevent 'dev_release()' 1100 * from freeing the @ubi object. 1101 */ 1102 get_device(&ubi->dev); 1103 1104 uif_close(ubi); 1105 ubi_wl_close(ubi); 1106 free_internal_volumes(ubi); 1107 vfree(ubi->vtbl); 1108 put_mtd_device(ubi->mtd); 1109 vfree(ubi->peb_buf1); 1110 vfree(ubi->peb_buf2); 1111#ifdef CONFIG_MTD_UBI_DEBUG_PARANOID 1112 vfree(ubi->dbg_peb_buf); 1113#endif 1114 ubi_msg("mtd%d is detached from ubi%d", ubi->mtd->index, ubi->ubi_num); 1115 put_device(&ubi->dev); 1116 return 0; 1117} 1118 1119/** 1120 * open_mtd_by_chdev - open an MTD device by its character device node path. 1121 * @mtd_dev: MTD character device node path 1122 * 1123 * This helper function opens an MTD device by its character node device path. 1124 * Returns MTD device description object in case of success and a negative 1125 * error code in case of failure. 1126 */ 1127static struct mtd_info * __init open_mtd_by_chdev(const char *mtd_dev) 1128{ 1129 int err, major, minor, mode; 1130 struct path path; 1131 1132 /* Probably this is an MTD character device node path */ 1133 err = kern_path(mtd_dev, LOOKUP_FOLLOW, &path); 1134 if (err) 1135 return ERR_PTR(err); 1136 1137 /* MTD device number is defined by the major / minor numbers */ 1138 major = imajor(path.dentry->d_inode); 1139 minor = iminor(path.dentry->d_inode); 1140 mode = path.dentry->d_inode->i_mode; 1141 path_put(&path); 1142 if (major != MTD_CHAR_MAJOR || !S_ISCHR(mode)) 1143 return ERR_PTR(-EINVAL); 1144 1145 if (minor & 1) 1146 /* 1147 * Just do not think the "/dev/mtdrX" devices support is need, 1148 * so do not support them to avoid doing extra work. 1149 */ 1150 return ERR_PTR(-EINVAL); 1151 1152 return get_mtd_device(NULL, minor / 2); 1153} 1154 1155/** 1156 * open_mtd_device - open MTD device by name, character device path, or number. 1157 * @mtd_dev: name, character device node path, or MTD device device number 1158 * 1159 * This function tries to open and MTD device described by @mtd_dev string, 1160 * which is first treated as ASCII MTD device number, and if it is not true, it 1161 * is treated as MTD device name, and if that is also not true, it is treated 1162 * as MTD character device node path. Returns MTD device description object in 1163 * case of success and a negative error code in case of failure. 1164 */ 1165static struct mtd_info * __init open_mtd_device(const char *mtd_dev) 1166{ 1167 struct mtd_info *mtd; 1168 int mtd_num; 1169 char *endp; 1170 1171 mtd_num = simple_strtoul(mtd_dev, &endp, 0); 1172 if (*endp != '\0' || mtd_dev == endp) { 1173 /* 1174 * This does not look like an ASCII integer, probably this is 1175 * MTD device name. 1176 */ 1177 mtd = get_mtd_device_nm(mtd_dev); 1178 if (IS_ERR(mtd) && PTR_ERR(mtd) == -ENODEV) 1179 /* Probably this is an MTD character device node path */ 1180 mtd = open_mtd_by_chdev(mtd_dev); 1181 } else 1182 mtd = get_mtd_device(NULL, mtd_num); 1183 1184 return mtd; 1185} 1186 1187static int __init ubi_init(void) 1188{ 1189 int err, i, k; 1190 1191 /* Ensure that EC and VID headers have correct size */ 1192 BUILD_BUG_ON(sizeof(struct ubi_ec_hdr) != 64); 1193 BUILD_BUG_ON(sizeof(struct ubi_vid_hdr) != 64); 1194 1195 if (mtd_devs > UBI_MAX_DEVICES) { 1196 ubi_err("too many MTD devices, maximum is %d", UBI_MAX_DEVICES); 1197 return -EINVAL; 1198 } 1199 1200 /* Create base sysfs directory and sysfs files */ 1201 ubi_class = class_create(THIS_MODULE, UBI_NAME_STR); 1202 if (IS_ERR(ubi_class)) { 1203 err = PTR_ERR(ubi_class); 1204 ubi_err("cannot create UBI class"); 1205 goto out; 1206 } 1207 1208 err = class_create_file(ubi_class, &ubi_version); 1209 if (err) { 1210 ubi_err("cannot create sysfs file"); 1211 goto out_class; 1212 } 1213 1214 err = misc_register(&ubi_ctrl_cdev); 1215 if (err) { 1216 ubi_err("cannot register device"); 1217 goto out_version; 1218 } 1219 1220 ubi_wl_entry_slab = kmem_cache_create("ubi_wl_entry_slab", 1221 sizeof(struct ubi_wl_entry), 1222 0, 0, NULL); 1223 if (!ubi_wl_entry_slab) 1224 goto out_dev_unreg; 1225 1226 /* Attach MTD devices */ 1227 for (i = 0; i < mtd_devs; i++) { 1228 struct mtd_dev_param *p = &mtd_dev_param[i]; 1229 struct mtd_info *mtd; 1230 1231 cond_resched(); 1232 1233 mtd = open_mtd_device(p->name); 1234 if (IS_ERR(mtd)) { 1235 err = PTR_ERR(mtd); 1236 goto out_detach; 1237 } 1238 1239 mutex_lock(&ubi_devices_mutex); 1240 err = ubi_attach_mtd_dev(mtd, UBI_DEV_NUM_AUTO, 1241 p->vid_hdr_offs); 1242 mutex_unlock(&ubi_devices_mutex); 1243 if (err < 0) { 1244 put_mtd_device(mtd); 1245 ubi_err("cannot attach mtd%d", mtd->index); 1246 goto out_detach; 1247 } 1248 } 1249 1250 return 0; 1251 1252out_detach: 1253 for (k = 0; k < i; k++) 1254 if (ubi_devices[k]) { 1255 mutex_lock(&ubi_devices_mutex); 1256 ubi_detach_mtd_dev(ubi_devices[k]->ubi_num, 1); 1257 mutex_unlock(&ubi_devices_mutex); 1258 } 1259 kmem_cache_destroy(ubi_wl_entry_slab); 1260out_dev_unreg: 1261 misc_deregister(&ubi_ctrl_cdev); 1262out_version: 1263 class_remove_file(ubi_class, &ubi_version); 1264out_class: 1265 class_destroy(ubi_class); 1266out: 1267 ubi_err("UBI error: cannot initialize UBI, error %d", err); 1268 return err; 1269} 1270module_init(ubi_init); 1271 1272static void __exit ubi_exit(void) 1273{ 1274 int i; 1275 1276 for (i = 0; i < UBI_MAX_DEVICES; i++) 1277 if (ubi_devices[i]) { 1278 mutex_lock(&ubi_devices_mutex); 1279 ubi_detach_mtd_dev(ubi_devices[i]->ubi_num, 1); 1280 mutex_unlock(&ubi_devices_mutex); 1281 } 1282 kmem_cache_destroy(ubi_wl_entry_slab); 1283 misc_deregister(&ubi_ctrl_cdev); 1284 class_remove_file(ubi_class, &ubi_version); 1285 class_destroy(ubi_class); 1286} 1287module_exit(ubi_exit); 1288 1289/** 1290 * bytes_str_to_int - convert a number of bytes string into an integer. 1291 * @str: the string to convert 1292 * 1293 * This function returns positive resulting integer in case of success and a 1294 * negative error code in case of failure. 1295 */ 1296static int __init bytes_str_to_int(const char *str) 1297{ 1298 char *endp; 1299 unsigned long result; 1300 1301 result = simple_strtoul(str, &endp, 0); 1302 if (str == endp || result >= INT_MAX) { 1303 printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n", 1304 str); 1305 return -EINVAL; 1306 } 1307 1308 switch (*endp) { 1309 case 'G': 1310 result *= 1024; 1311 case 'M': 1312 result *= 1024; 1313 case 'K': 1314 result *= 1024; 1315 if (endp[1] == 'i' && endp[2] == 'B') 1316 endp += 2; 1317 case '\0': 1318 break; 1319 default: 1320 printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n", 1321 str); 1322 return -EINVAL; 1323 } 1324 1325 return result; 1326} 1327 1328/** 1329 * ubi_mtd_param_parse - parse the 'mtd=' UBI parameter. 1330 * @val: the parameter value to parse 1331 * @kp: not used 1332 * 1333 * This function returns zero in case of success and a negative error code in 1334 * case of error. 1335 */ 1336static int __init ubi_mtd_param_parse(const char *val, struct kernel_param *kp) 1337{ 1338 int i, len; 1339 struct mtd_dev_param *p; 1340 char buf[MTD_PARAM_LEN_MAX]; 1341 char *pbuf = &buf[0]; 1342 char *tokens[2] = {NULL, NULL}; 1343 1344 if (!val) 1345 return -EINVAL; 1346 1347 if (mtd_devs == UBI_MAX_DEVICES) { 1348 printk(KERN_ERR "UBI error: too many parameters, max. is %d\n", 1349 UBI_MAX_DEVICES); 1350 return -EINVAL; 1351 } 1352 1353 len = strnlen(val, MTD_PARAM_LEN_MAX); 1354 if (len == MTD_PARAM_LEN_MAX) { 1355 printk(KERN_ERR "UBI error: parameter \"%s\" is too long, " 1356 "max. is %d\n", val, MTD_PARAM_LEN_MAX); 1357 return -EINVAL; 1358 } 1359 1360 if (len == 0) { 1361 printk(KERN_WARNING "UBI warning: empty 'mtd=' parameter - " 1362 "ignored\n"); 1363 return 0; 1364 } 1365 1366 strcpy(buf, val); 1367 1368 /* Get rid of the final newline */ 1369 if (buf[len - 1] == '\n') 1370 buf[len - 1] = '\0'; 1371 1372 for (i = 0; i < 2; i++) 1373 tokens[i] = strsep(&pbuf, ","); 1374 1375 if (pbuf) { 1376 printk(KERN_ERR "UBI error: too many arguments at \"%s\"\n", 1377 val); 1378 return -EINVAL; 1379 } 1380 1381 p = &mtd_dev_param[mtd_devs]; 1382 strcpy(&p->name[0], tokens[0]); 1383 1384 if (tokens[1]) 1385 p->vid_hdr_offs = bytes_str_to_int(tokens[1]); 1386 1387 if (p->vid_hdr_offs < 0) 1388 return p->vid_hdr_offs; 1389 1390 mtd_devs += 1; 1391 return 0; 1392} 1393 1394module_param_call(mtd, ubi_mtd_param_parse, NULL, NULL, 000); 1395MODULE_PARM_DESC(mtd, "MTD devices to attach. Parameter format: " 1396 "mtd=<name|num|path>[,<vid_hdr_offs>].\n" 1397 "Multiple \"mtd\" parameters may be specified.\n" 1398 "MTD devices may be specified by their number, name, or " 1399 "path to the MTD character device node.\n" 1400 "Optional \"vid_hdr_offs\" parameter specifies UBI VID " 1401 "header position to be used by UBI.\n" 1402 "Example 1: mtd=/dev/mtd0 - attach MTD device " 1403 "/dev/mtd0.\n" 1404 "Example 2: mtd=content,1984 mtd=4 - attach MTD device " 1405 "with name \"content\" using VID header offset 1984, and " 1406 "MTD device number 4 with default VID header offset."); 1407 1408MODULE_VERSION(__stringify(UBI_VERSION)); 1409MODULE_DESCRIPTION("UBI - Unsorted Block Images"); 1410MODULE_AUTHOR("Artem Bityutskiy"); 1411MODULE_LICENSE("GPL");