Merge branch 'linux-next' of git://git.infradead.org/~dedekind/ubi-2.6

* 'linux-next' of git://git.infradead.org/~dedekind/ubi-2.6: (22 commits)
UBI: always start the background thread
UBI: fix gcc warning
UBI: remove pre-sqnum images support
UBI: fix kernel-doc errors and warnings
UBI: fix checkpatch.pl errors and warnings
UBI: bugfix - do not torture PEB needlessly
UBI: rework scrubbing messages
UBI: implement multiple volumes rename
UBI: fix and re-work debugging stuff
UBI: amend commentaries
UBI: fix error message
UBI: improve mkvol request validation
UBI: add ubi_sync() interface
UBI: fix 64-bit calculations
UBI: fix LEB locking
UBI: fix memory leak on error path
UBI: do not forget to free internal volumes
UBI: fix memory leak
UBI: avoid unnecessary division operations
UBI: fix buffer padding
...

+1008 -616
+73 -26
drivers/mtd/ubi/build.c
··· 51 * @name: MTD device name or number string 52 * @vid_hdr_offs: VID header offset 53 */ 54 - struct mtd_dev_param 55 - { 56 char name[MTD_PARAM_LEN_MAX]; 57 int vid_hdr_offs; 58 }; 59 60 /* Numbers of elements set in the @mtd_dev_param array */ 61 - static int mtd_devs = 0; 62 63 /* MTD devices specification parameters */ 64 static struct mtd_dev_param mtd_dev_param[UBI_MAX_DEVICES]; ··· 159 } 160 161 /** 162 - * ubi_get_by_major - get UBI device description object by character device 163 - * major number. 164 * @major: major number 165 * 166 * This function is similar to 'ubi_get_device()', but it searches the device ··· 353 } 354 355 /** 356 * uif_init - initialize user interfaces for an UBI device. 357 * @ubi: UBI device description object 358 * 359 * This function returns zero in case of success and a negative error code in 360 - * case of failure. 361 */ 362 static int uif_init(struct ubi_device *ubi) 363 { 364 - int i, err; 365 dev_t dev; 366 367 sprintf(ubi->ubi_name, UBI_NAME_STR "%d", ubi->ubi_num); ··· 401 402 ubi_assert(MINOR(dev) == 0); 403 cdev_init(&ubi->cdev, &ubi_cdev_operations); 404 - dbg_msg("%s major is %u", ubi->ubi_name, MAJOR(dev)); 405 ubi->cdev.owner = THIS_MODULE; 406 407 err = cdev_add(&ubi->cdev, dev, 1); ··· 427 428 out_volumes: 429 kill_volumes(ubi); 430 out_sysfs: 431 ubi_sysfs_close(ubi); 432 cdev_del(&ubi->cdev); 433 out_unreg: 434 unregister_chrdev_region(ubi->cdev.dev, ubi->vtbl_slots + 1); 435 ubi_err("cannot initialize UBI %s, error %d", ubi->ubi_name, err); 436 return err; ··· 442 /** 443 * uif_close - close user interfaces for an UBI device. 444 * @ubi: UBI device description object 445 */ 446 static void uif_close(struct ubi_device *ubi) 447 { ··· 453 ubi_sysfs_close(ubi); 454 cdev_del(&ubi->cdev); 455 unregister_chrdev_region(ubi->cdev.dev, ubi->vtbl_slots + 1); 456 } 457 458 /** ··· 514 out_wl: 515 ubi_wl_close(ubi); 516 out_vtbl: 517 vfree(ubi->vtbl); 518 out_si: 519 ubi_scan_destroy_si(si); ··· 522 } 523 524 /** 525 - * io_init - initialize I/O unit for a given UBI device. 526 * @ubi: UBI device description object 527 * 528 * If @ubi->vid_hdr_offset or @ubi->leb_start is zero, default offsets are ··· 570 ubi->min_io_size = ubi->mtd->writesize; 571 ubi->hdrs_min_io_size = ubi->mtd->writesize >> ubi->mtd->subpage_sft; 572 573 - /* Make sure minimal I/O unit is power of 2 */ 574 if (!is_power_of_2(ubi->min_io_size)) { 575 ubi_err("min. I/O unit (%d) is not power of 2", 576 ubi->min_io_size); ··· 625 if (ubi->vid_hdr_offset < UBI_EC_HDR_SIZE || 626 ubi->leb_start < ubi->vid_hdr_offset + UBI_VID_HDR_SIZE || 627 ubi->leb_start > ubi->peb_size - UBI_VID_HDR_SIZE || 628 - ubi->leb_start % ubi->min_io_size) { 629 ubi_err("bad VID header (%d) or data offsets (%d)", 630 ubi->vid_hdr_offset, ubi->leb_start); 631 return -EINVAL; ··· 690 691 /* 692 * Clear the auto-resize flag in the volume in-memory copy of the 693 - * volume table, and 'ubi_resize_volume()' will propogate this change 694 * to the flash. 695 */ 696 ubi->vtbl[vol_id].flags &= ~UBI_VTBL_AUTORESIZE_FLG; ··· 699 struct ubi_vtbl_record vtbl_rec; 700 701 /* 702 - * No avalilable PEBs to re-size the volume, clear the flag on 703 * flash and exit. 704 */ 705 memcpy(&vtbl_rec, &ubi->vtbl[vol_id], ··· 726 727 /** 728 * ubi_attach_mtd_dev - attach an MTD device. 729 - * @mtd_dev: MTD device description object 730 * @ubi_num: number to assign to the new UBI device 731 * @vid_hdr_offset: VID header offset 732 * 733 * This function attaches MTD device @mtd_dev to UBI and assign @ubi_num number 734 * to the newly created UBI device, unless @ubi_num is %UBI_DEV_NUM_AUTO, in 735 - * which case this function finds a vacant device nubert and assings it 736 * automatically. Returns the new UBI device number in case of success and a 737 * negative error code in case of failure. 738 * ··· 742 int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset) 743 { 744 struct ubi_device *ubi; 745 - int i, err; 746 747 /* 748 * Check if we already have the same MTD device attached. ··· 779 if (!ubi_devices[ubi_num]) 780 break; 781 if (ubi_num == UBI_MAX_DEVICES) { 782 - dbg_err("only %d UBI devices may be created", UBI_MAX_DEVICES); 783 return -ENFILE; 784 } 785 } else { ··· 805 806 mutex_init(&ubi->buf_mutex); 807 mutex_init(&ubi->ckvol_mutex); 808 mutex_init(&ubi->volumes_mutex); 809 spin_lock_init(&ubi->volumes_lock); 810 ··· 844 845 err = uif_init(ubi); 846 if (err) 847 - goto out_detach; 848 849 ubi->bgt_thread = kthread_create(ubi_thread, ubi, ubi->bgt_name); 850 if (IS_ERR(ubi->bgt_thread)) { ··· 870 ubi->beb_rsvd_pebs); 871 ubi_msg("max/mean erase counter: %d/%d", ubi->max_ec, ubi->mean_ec); 872 873 - /* Enable the background thread */ 874 - if (!DBG_DISABLE_BGT) { 875 ubi->thread_enabled = 1; 876 - wake_up_process(ubi->bgt_thread); 877 - } 878 879 ubi_devices[ubi_num] = ubi; 880 return ubi_num; 881 882 out_uif: 883 uif_close(ubi); 884 out_detach: 885 - ubi_eba_close(ubi); 886 ubi_wl_close(ubi); 887 vfree(ubi->vtbl); 888 out_free: 889 vfree(ubi->peb_buf1); ··· 947 kthread_stop(ubi->bgt_thread); 948 949 uif_close(ubi); 950 - ubi_eba_close(ubi); 951 ubi_wl_close(ubi); 952 vfree(ubi->vtbl); 953 put_mtd_device(ubi->mtd); 954 vfree(ubi->peb_buf1); ··· 1092 module_exit(ubi_exit); 1093 1094 /** 1095 - * bytes_str_to_int - convert a string representing number of bytes to an 1096 - * integer. 1097 * @str: the string to convert 1098 * 1099 * This function returns positive resulting integer in case of success and a
··· 51 * @name: MTD device name or number string 52 * @vid_hdr_offs: VID header offset 53 */ 54 + struct mtd_dev_param { 55 char name[MTD_PARAM_LEN_MAX]; 56 int vid_hdr_offs; 57 }; 58 59 /* Numbers of elements set in the @mtd_dev_param array */ 60 + static int mtd_devs; 61 62 /* MTD devices specification parameters */ 63 static struct mtd_dev_param mtd_dev_param[UBI_MAX_DEVICES]; ··· 160 } 161 162 /** 163 + * ubi_get_by_major - get UBI device by character device major number. 164 * @major: major number 165 * 166 * This function is similar to 'ubi_get_device()', but it searches the device ··· 355 } 356 357 /** 358 + * free_user_volumes - free all user volumes. 359 + * @ubi: UBI device description object 360 + * 361 + * Normally the volumes are freed at the release function of the volume device 362 + * objects. However, on error paths the volumes have to be freed before the 363 + * device objects have been initialized. 364 + */ 365 + static void free_user_volumes(struct ubi_device *ubi) 366 + { 367 + int i; 368 + 369 + for (i = 0; i < ubi->vtbl_slots; i++) 370 + if (ubi->volumes[i]) { 371 + kfree(ubi->volumes[i]->eba_tbl); 372 + kfree(ubi->volumes[i]); 373 + } 374 + } 375 + 376 + /** 377 * uif_init - initialize user interfaces for an UBI device. 378 * @ubi: UBI device description object 379 * 380 * This function returns zero in case of success and a negative error code in 381 + * case of failure. Note, this function destroys all volumes if it failes. 382 */ 383 static int uif_init(struct ubi_device *ubi) 384 { 385 + int i, err, do_free = 0; 386 dev_t dev; 387 388 sprintf(ubi->ubi_name, UBI_NAME_STR "%d", ubi->ubi_num); ··· 384 385 ubi_assert(MINOR(dev) == 0); 386 cdev_init(&ubi->cdev, &ubi_cdev_operations); 387 + dbg_gen("%s major is %u", ubi->ubi_name, MAJOR(dev)); 388 ubi->cdev.owner = THIS_MODULE; 389 390 err = cdev_add(&ubi->cdev, dev, 1); ··· 410 411 out_volumes: 412 kill_volumes(ubi); 413 + do_free = 0; 414 out_sysfs: 415 ubi_sysfs_close(ubi); 416 cdev_del(&ubi->cdev); 417 out_unreg: 418 + if (do_free) 419 + free_user_volumes(ubi); 420 unregister_chrdev_region(ubi->cdev.dev, ubi->vtbl_slots + 1); 421 ubi_err("cannot initialize UBI %s, error %d", ubi->ubi_name, err); 422 return err; ··· 422 /** 423 * uif_close - close user interfaces for an UBI device. 424 * @ubi: UBI device description object 425 + * 426 + * Note, since this function un-registers UBI volume device objects (@vol->dev), 427 + * the memory allocated voe the volumes is freed as well (in the release 428 + * function). 429 */ 430 static void uif_close(struct ubi_device *ubi) 431 { ··· 429 ubi_sysfs_close(ubi); 430 cdev_del(&ubi->cdev); 431 unregister_chrdev_region(ubi->cdev.dev, ubi->vtbl_slots + 1); 432 + } 433 + 434 + /** 435 + * free_internal_volumes - free internal volumes. 436 + * @ubi: UBI device description object 437 + */ 438 + static void free_internal_volumes(struct ubi_device *ubi) 439 + { 440 + int i; 441 + 442 + for (i = ubi->vtbl_slots; 443 + i < ubi->vtbl_slots + UBI_INT_VOL_COUNT; i++) { 444 + kfree(ubi->volumes[i]->eba_tbl); 445 + kfree(ubi->volumes[i]); 446 + } 447 } 448 449 /** ··· 475 out_wl: 476 ubi_wl_close(ubi); 477 out_vtbl: 478 + free_internal_volumes(ubi); 479 vfree(ubi->vtbl); 480 out_si: 481 ubi_scan_destroy_si(si); ··· 482 } 483 484 /** 485 + * io_init - initialize I/O sub-system for a given UBI device. 486 * @ubi: UBI device description object 487 * 488 * If @ubi->vid_hdr_offset or @ubi->leb_start is zero, default offsets are ··· 530 ubi->min_io_size = ubi->mtd->writesize; 531 ubi->hdrs_min_io_size = ubi->mtd->writesize >> ubi->mtd->subpage_sft; 532 533 + /* 534 + * Make sure minimal I/O unit is power of 2. Note, there is no 535 + * fundamental reason for this assumption. It is just an optimization 536 + * which allows us to avoid costly division operations. 537 + */ 538 if (!is_power_of_2(ubi->min_io_size)) { 539 ubi_err("min. I/O unit (%d) is not power of 2", 540 ubi->min_io_size); ··· 581 if (ubi->vid_hdr_offset < UBI_EC_HDR_SIZE || 582 ubi->leb_start < ubi->vid_hdr_offset + UBI_VID_HDR_SIZE || 583 ubi->leb_start > ubi->peb_size - UBI_VID_HDR_SIZE || 584 + ubi->leb_start & (ubi->min_io_size - 1)) { 585 ubi_err("bad VID header (%d) or data offsets (%d)", 586 ubi->vid_hdr_offset, ubi->leb_start); 587 return -EINVAL; ··· 646 647 /* 648 * Clear the auto-resize flag in the volume in-memory copy of the 649 + * volume table, and 'ubi_resize_volume()' will propagate this change 650 * to the flash. 651 */ 652 ubi->vtbl[vol_id].flags &= ~UBI_VTBL_AUTORESIZE_FLG; ··· 655 struct ubi_vtbl_record vtbl_rec; 656 657 /* 658 + * No available PEBs to re-size the volume, clear the flag on 659 * flash and exit. 660 */ 661 memcpy(&vtbl_rec, &ubi->vtbl[vol_id], ··· 682 683 /** 684 * ubi_attach_mtd_dev - attach an MTD device. 685 + * @mtd: MTD device description object 686 * @ubi_num: number to assign to the new UBI device 687 * @vid_hdr_offset: VID header offset 688 * 689 * This function attaches MTD device @mtd_dev to UBI and assign @ubi_num number 690 * to the newly created UBI device, unless @ubi_num is %UBI_DEV_NUM_AUTO, in 691 + * which case this function finds a vacant device number and assigns it 692 * automatically. Returns the new UBI device number in case of success and a 693 * negative error code in case of failure. 694 * ··· 698 int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset) 699 { 700 struct ubi_device *ubi; 701 + int i, err, do_free = 1; 702 703 /* 704 * Check if we already have the same MTD device attached. ··· 735 if (!ubi_devices[ubi_num]) 736 break; 737 if (ubi_num == UBI_MAX_DEVICES) { 738 + dbg_err("only %d UBI devices may be created", 739 + UBI_MAX_DEVICES); 740 return -ENFILE; 741 } 742 } else { ··· 760 761 mutex_init(&ubi->buf_mutex); 762 mutex_init(&ubi->ckvol_mutex); 763 + mutex_init(&ubi->mult_mutex); 764 mutex_init(&ubi->volumes_mutex); 765 spin_lock_init(&ubi->volumes_lock); 766 ··· 798 799 err = uif_init(ubi); 800 if (err) 801 + goto out_nofree; 802 803 ubi->bgt_thread = kthread_create(ubi_thread, ubi, ubi->bgt_name); 804 if (IS_ERR(ubi->bgt_thread)) { ··· 824 ubi->beb_rsvd_pebs); 825 ubi_msg("max/mean erase counter: %d/%d", ubi->max_ec, ubi->mean_ec); 826 827 + if (!DBG_DISABLE_BGT) 828 ubi->thread_enabled = 1; 829 + wake_up_process(ubi->bgt_thread); 830 831 ubi_devices[ubi_num] = ubi; 832 return ubi_num; 833 834 out_uif: 835 uif_close(ubi); 836 + out_nofree: 837 + do_free = 0; 838 out_detach: 839 ubi_wl_close(ubi); 840 + if (do_free) 841 + free_user_volumes(ubi); 842 + free_internal_volumes(ubi); 843 vfree(ubi->vtbl); 844 out_free: 845 vfree(ubi->peb_buf1); ··· 899 kthread_stop(ubi->bgt_thread); 900 901 uif_close(ubi); 902 ubi_wl_close(ubi); 903 + free_internal_volumes(ubi); 904 vfree(ubi->vtbl); 905 put_mtd_device(ubi->mtd); 906 vfree(ubi->peb_buf1); ··· 1044 module_exit(ubi_exit); 1045 1046 /** 1047 + * bytes_str_to_int - convert a number of bytes string into an integer. 1048 * @str: the string to convert 1049 * 1050 * This function returns positive resulting integer in case of success and a
+212 -22
drivers/mtd/ubi/cdev.c
··· 39 #include <linux/stat.h> 40 #include <linux/ioctl.h> 41 #include <linux/capability.h> 42 #include <linux/smp_lock.h> 43 #include <mtd/ubi-user.h> 44 - #include <asm/uaccess.h> 45 #include <asm/div64.h> 46 #include "ubi.h" 47 ··· 116 else 117 mode = UBI_READONLY; 118 119 - dbg_msg("open volume %d, mode %d", vol_id, mode); 120 121 desc = ubi_open_volume(ubi_num, vol_id, mode); 122 unlock_kernel(); ··· 132 struct ubi_volume_desc *desc = file->private_data; 133 struct ubi_volume *vol = desc->vol; 134 135 - dbg_msg("release volume %d, mode %d", vol->vol_id, desc->mode); 136 137 if (vol->updating) { 138 ubi_warn("update of volume %d not finished, volume is damaged", ··· 141 vol->updating = 0; 142 vfree(vol->upd_buf); 143 } else if (vol->changing_leb) { 144 - dbg_msg("only %lld of %lld bytes received for atomic LEB change" 145 " for volume %d:%d, cancel", vol->upd_received, 146 vol->upd_bytes, vol->ubi->ubi_num, vol->vol_id); 147 vol->changing_leb = 0; ··· 183 return -EINVAL; 184 } 185 186 - dbg_msg("seek volume %d, offset %lld, origin %d, new offset %lld", 187 vol->vol_id, offset, origin, new_offset); 188 189 file->f_pos = new_offset; ··· 201 void *tbuf; 202 uint64_t tmp; 203 204 - dbg_msg("read %zd bytes from offset %lld of volume %d", 205 count, *offp, vol->vol_id); 206 207 if (vol->updating) { ··· 216 return 0; 217 218 if (vol->corrupted) 219 - dbg_msg("read from corrupted volume %d", vol->vol_id); 220 221 if (*offp + count > vol->used_bytes) 222 count_save = count = vol->used_bytes - *offp; ··· 285 char *tbuf; 286 uint64_t tmp; 287 288 - dbg_msg("requested: write %zd bytes to offset %lld of volume %u", 289 count, *offp, vol->vol_id); 290 291 if (vol->vol_type == UBI_STATIC_VOLUME) ··· 295 off = do_div(tmp, vol->usable_leb_size); 296 lnum = tmp; 297 298 - if (off % ubi->min_io_size) { 299 dbg_err("unaligned position"); 300 return -EINVAL; 301 } ··· 304 count_save = count = vol->used_bytes - *offp; 305 306 /* We can write only in fractions of the minimum I/O unit */ 307 - if (count % ubi->min_io_size) { 308 dbg_err("unaligned write length"); 309 return -EINVAL; 310 } ··· 352 } 353 354 #else 355 - #define vol_cdev_direct_write(file, buf, count, offp) -EPERM 356 #endif /* CONFIG_MTD_UBI_DEBUG_USERSPACE_IO */ 357 358 static ssize_t vol_cdev_write(struct file *file, const char __user *buf, ··· 437 break; 438 } 439 440 - rsvd_bytes = vol->reserved_pebs * (ubi->leb_size-vol->data_pad); 441 if (bytes < 0 || bytes > rsvd_bytes) { 442 err = -EINVAL; 443 break; ··· 514 break; 515 } 516 517 - dbg_msg("erase LEB %d:%d", vol->vol_id, lnum); 518 err = ubi_eba_unmap_leb(ubi, vol, lnum); 519 if (err) 520 break; ··· 565 if (req->alignment > ubi->leb_size) 566 goto bad; 567 568 - n = req->alignment % ubi->min_io_size; 569 if (req->alignment != 1 && n) 570 goto bad; 571 ··· 573 err = -ENAMETOOLONG; 574 goto bad; 575 } 576 577 return 0; 578 ··· 605 return 0; 606 } 607 608 static int ubi_cdev_ioctl(struct inode *inode, struct file *file, 609 unsigned int cmd, unsigned long arg) 610 { ··· 786 { 787 struct ubi_mkvol_req req; 788 789 - dbg_msg("create volume"); 790 err = copy_from_user(&req, argp, sizeof(struct ubi_mkvol_req)); 791 if (err) { 792 err = -EFAULT; 793 break; 794 } 795 796 err = verify_mkvol_req(ubi, &req); 797 if (err) 798 break; 799 - 800 - req.name[req.name_len] = '\0'; 801 802 mutex_lock(&ubi->volumes_mutex); 803 err = ubi_create_volume(ubi, &req); ··· 816 { 817 int vol_id; 818 819 - dbg_msg("remove volume"); 820 err = get_user(vol_id, (__user int32_t *)argp); 821 if (err) { 822 err = -EFAULT; ··· 830 } 831 832 mutex_lock(&ubi->volumes_mutex); 833 - err = ubi_remove_volume(desc); 834 mutex_unlock(&ubi->volumes_mutex); 835 836 /* ··· 849 uint64_t tmp; 850 struct ubi_rsvol_req req; 851 852 - dbg_msg("re-size volume"); 853 err = copy_from_user(&req, argp, sizeof(struct ubi_rsvol_req)); 854 if (err) { 855 err = -EFAULT; ··· 874 err = ubi_resize_volume(desc, pebs); 875 mutex_unlock(&ubi->volumes_mutex); 876 ubi_close_volume(desc); 877 break; 878 } 879 ··· 928 struct ubi_attach_req req; 929 struct mtd_info *mtd; 930 931 - dbg_msg("attach MTD device"); 932 err = copy_from_user(&req, argp, sizeof(struct ubi_attach_req)); 933 if (err) { 934 err = -EFAULT; ··· 968 { 969 int ubi_num; 970 971 - dbg_msg("dettach MTD device"); 972 err = get_user(ubi_num, (__user int32_t *)argp); 973 if (err) { 974 err = -EFAULT;
··· 39 #include <linux/stat.h> 40 #include <linux/ioctl.h> 41 #include <linux/capability.h> 42 + #include <linux/uaccess.h> 43 #include <linux/smp_lock.h> 44 #include <mtd/ubi-user.h> 45 #include <asm/div64.h> 46 #include "ubi.h" 47 ··· 116 else 117 mode = UBI_READONLY; 118 119 + dbg_gen("open volume %d, mode %d", vol_id, mode); 120 121 desc = ubi_open_volume(ubi_num, vol_id, mode); 122 unlock_kernel(); ··· 132 struct ubi_volume_desc *desc = file->private_data; 133 struct ubi_volume *vol = desc->vol; 134 135 + dbg_gen("release volume %d, mode %d", vol->vol_id, desc->mode); 136 137 if (vol->updating) { 138 ubi_warn("update of volume %d not finished, volume is damaged", ··· 141 vol->updating = 0; 142 vfree(vol->upd_buf); 143 } else if (vol->changing_leb) { 144 + dbg_gen("only %lld of %lld bytes received for atomic LEB change" 145 " for volume %d:%d, cancel", vol->upd_received, 146 vol->upd_bytes, vol->ubi->ubi_num, vol->vol_id); 147 vol->changing_leb = 0; ··· 183 return -EINVAL; 184 } 185 186 + dbg_gen("seek volume %d, offset %lld, origin %d, new offset %lld", 187 vol->vol_id, offset, origin, new_offset); 188 189 file->f_pos = new_offset; ··· 201 void *tbuf; 202 uint64_t tmp; 203 204 + dbg_gen("read %zd bytes from offset %lld of volume %d", 205 count, *offp, vol->vol_id); 206 207 if (vol->updating) { ··· 216 return 0; 217 218 if (vol->corrupted) 219 + dbg_gen("read from corrupted volume %d", vol->vol_id); 220 221 if (*offp + count > vol->used_bytes) 222 count_save = count = vol->used_bytes - *offp; ··· 285 char *tbuf; 286 uint64_t tmp; 287 288 + dbg_gen("requested: write %zd bytes to offset %lld of volume %u", 289 count, *offp, vol->vol_id); 290 291 if (vol->vol_type == UBI_STATIC_VOLUME) ··· 295 off = do_div(tmp, vol->usable_leb_size); 296 lnum = tmp; 297 298 + if (off & (ubi->min_io_size - 1)) { 299 dbg_err("unaligned position"); 300 return -EINVAL; 301 } ··· 304 count_save = count = vol->used_bytes - *offp; 305 306 /* We can write only in fractions of the minimum I/O unit */ 307 + if (count & (ubi->min_io_size - 1)) { 308 dbg_err("unaligned write length"); 309 return -EINVAL; 310 } ··· 352 } 353 354 #else 355 + #define vol_cdev_direct_write(file, buf, count, offp) (-EPERM) 356 #endif /* CONFIG_MTD_UBI_DEBUG_USERSPACE_IO */ 357 358 static ssize_t vol_cdev_write(struct file *file, const char __user *buf, ··· 437 break; 438 } 439 440 + rsvd_bytes = (long long)vol->reserved_pebs * 441 + ubi->leb_size-vol->data_pad; 442 if (bytes < 0 || bytes > rsvd_bytes) { 443 err = -EINVAL; 444 break; ··· 513 break; 514 } 515 516 + dbg_gen("erase LEB %d:%d", vol->vol_id, lnum); 517 err = ubi_eba_unmap_leb(ubi, vol, lnum); 518 if (err) 519 break; ··· 564 if (req->alignment > ubi->leb_size) 565 goto bad; 566 567 + n = req->alignment & (ubi->min_io_size - 1); 568 if (req->alignment != 1 && n) 569 goto bad; 570 ··· 572 err = -ENAMETOOLONG; 573 goto bad; 574 } 575 + 576 + n = strnlen(req->name, req->name_len + 1); 577 + if (n != req->name_len) 578 + goto bad; 579 580 return 0; 581 ··· 600 return 0; 601 } 602 603 + /** 604 + * rename_volumes - rename UBI volumes. 605 + * @ubi: UBI device description object 606 + * @req: volumes re-name request 607 + * 608 + * This is a helper function for the volume re-name IOCTL which validates the 609 + * the request, opens the volume and calls corresponding volumes management 610 + * function. Returns zero in case of success and a negative error code in case 611 + * of failure. 612 + */ 613 + static int rename_volumes(struct ubi_device *ubi, 614 + struct ubi_rnvol_req *req) 615 + { 616 + int i, n, err; 617 + struct list_head rename_list; 618 + struct ubi_rename_entry *re, *re1; 619 + 620 + if (req->count < 0 || req->count > UBI_MAX_RNVOL) 621 + return -EINVAL; 622 + 623 + if (req->count == 0) 624 + return 0; 625 + 626 + /* Validate volume IDs and names in the request */ 627 + for (i = 0; i < req->count; i++) { 628 + if (req->ents[i].vol_id < 0 || 629 + req->ents[i].vol_id >= ubi->vtbl_slots) 630 + return -EINVAL; 631 + if (req->ents[i].name_len < 0) 632 + return -EINVAL; 633 + if (req->ents[i].name_len > UBI_VOL_NAME_MAX) 634 + return -ENAMETOOLONG; 635 + req->ents[i].name[req->ents[i].name_len] = '\0'; 636 + n = strlen(req->ents[i].name); 637 + if (n != req->ents[i].name_len) 638 + err = -EINVAL; 639 + } 640 + 641 + /* Make sure volume IDs and names are unique */ 642 + for (i = 0; i < req->count - 1; i++) { 643 + for (n = i + 1; n < req->count; n++) { 644 + if (req->ents[i].vol_id == req->ents[n].vol_id) { 645 + dbg_err("duplicated volume id %d", 646 + req->ents[i].vol_id); 647 + return -EINVAL; 648 + } 649 + if (!strcmp(req->ents[i].name, req->ents[n].name)) { 650 + dbg_err("duplicated volume name \"%s\"", 651 + req->ents[i].name); 652 + return -EINVAL; 653 + } 654 + } 655 + } 656 + 657 + /* Create the re-name list */ 658 + INIT_LIST_HEAD(&rename_list); 659 + for (i = 0; i < req->count; i++) { 660 + int vol_id = req->ents[i].vol_id; 661 + int name_len = req->ents[i].name_len; 662 + const char *name = req->ents[i].name; 663 + 664 + re = kzalloc(sizeof(struct ubi_rename_entry), GFP_KERNEL); 665 + if (!re) { 666 + err = -ENOMEM; 667 + goto out_free; 668 + } 669 + 670 + re->desc = ubi_open_volume(ubi->ubi_num, vol_id, UBI_EXCLUSIVE); 671 + if (IS_ERR(re->desc)) { 672 + err = PTR_ERR(re->desc); 673 + dbg_err("cannot open volume %d, error %d", vol_id, err); 674 + kfree(re); 675 + goto out_free; 676 + } 677 + 678 + /* Skip this re-naming if the name does not really change */ 679 + if (re->desc->vol->name_len == name_len && 680 + !memcmp(re->desc->vol->name, name, name_len)) { 681 + ubi_close_volume(re->desc); 682 + kfree(re); 683 + continue; 684 + } 685 + 686 + re->new_name_len = name_len; 687 + memcpy(re->new_name, name, name_len); 688 + list_add_tail(&re->list, &rename_list); 689 + dbg_msg("will rename volume %d from \"%s\" to \"%s\"", 690 + vol_id, re->desc->vol->name, name); 691 + } 692 + 693 + if (list_empty(&rename_list)) 694 + return 0; 695 + 696 + /* Find out the volumes which have to be removed */ 697 + list_for_each_entry(re, &rename_list, list) { 698 + struct ubi_volume_desc *desc; 699 + int no_remove_needed = 0; 700 + 701 + /* 702 + * Volume @re->vol_id is going to be re-named to 703 + * @re->new_name, while its current name is @name. If a volume 704 + * with name @re->new_name currently exists, it has to be 705 + * removed, unless it is also re-named in the request (@req). 706 + */ 707 + list_for_each_entry(re1, &rename_list, list) { 708 + if (re->new_name_len == re1->desc->vol->name_len && 709 + !memcmp(re->new_name, re1->desc->vol->name, 710 + re1->desc->vol->name_len)) { 711 + no_remove_needed = 1; 712 + break; 713 + } 714 + } 715 + 716 + if (no_remove_needed) 717 + continue; 718 + 719 + /* 720 + * It seems we need to remove volume with name @re->new_name, 721 + * if it exists. 722 + */ 723 + desc = ubi_open_volume_nm(ubi->ubi_num, re->new_name, UBI_EXCLUSIVE); 724 + if (IS_ERR(desc)) { 725 + err = PTR_ERR(desc); 726 + if (err == -ENODEV) 727 + /* Re-naming into a non-existing volume name */ 728 + continue; 729 + 730 + /* The volume exists but busy, or an error occurred */ 731 + dbg_err("cannot open volume \"%s\", error %d", 732 + re->new_name, err); 733 + goto out_free; 734 + } 735 + 736 + re = kzalloc(sizeof(struct ubi_rename_entry), GFP_KERNEL); 737 + if (!re) { 738 + err = -ENOMEM; 739 + ubi_close_volume(desc); 740 + goto out_free; 741 + } 742 + 743 + re->remove = 1; 744 + re->desc = desc; 745 + list_add(&re->list, &rename_list); 746 + dbg_msg("will remove volume %d, name \"%s\"", 747 + re->desc->vol->vol_id, re->desc->vol->name); 748 + } 749 + 750 + mutex_lock(&ubi->volumes_mutex); 751 + err = ubi_rename_volumes(ubi, &rename_list); 752 + mutex_unlock(&ubi->volumes_mutex); 753 + 754 + out_free: 755 + list_for_each_entry_safe(re, re1, &rename_list, list) { 756 + ubi_close_volume(re->desc); 757 + list_del(&re->list); 758 + kfree(re); 759 + } 760 + return err; 761 + } 762 + 763 static int ubi_cdev_ioctl(struct inode *inode, struct file *file, 764 unsigned int cmd, unsigned long arg) 765 { ··· 621 { 622 struct ubi_mkvol_req req; 623 624 + dbg_gen("create volume"); 625 err = copy_from_user(&req, argp, sizeof(struct ubi_mkvol_req)); 626 if (err) { 627 err = -EFAULT; 628 break; 629 } 630 631 + req.name[req.name_len] = '\0'; 632 err = verify_mkvol_req(ubi, &req); 633 if (err) 634 break; 635 636 mutex_lock(&ubi->volumes_mutex); 637 err = ubi_create_volume(ubi, &req); ··· 652 { 653 int vol_id; 654 655 + dbg_gen("remove volume"); 656 err = get_user(vol_id, (__user int32_t *)argp); 657 if (err) { 658 err = -EFAULT; ··· 666 } 667 668 mutex_lock(&ubi->volumes_mutex); 669 + err = ubi_remove_volume(desc, 0); 670 mutex_unlock(&ubi->volumes_mutex); 671 672 /* ··· 685 uint64_t tmp; 686 struct ubi_rsvol_req req; 687 688 + dbg_gen("re-size volume"); 689 err = copy_from_user(&req, argp, sizeof(struct ubi_rsvol_req)); 690 if (err) { 691 err = -EFAULT; ··· 710 err = ubi_resize_volume(desc, pebs); 711 mutex_unlock(&ubi->volumes_mutex); 712 ubi_close_volume(desc); 713 + break; 714 + } 715 + 716 + /* Re-name volumes command */ 717 + case UBI_IOCRNVOL: 718 + { 719 + struct ubi_rnvol_req *req; 720 + 721 + dbg_msg("re-name volumes"); 722 + req = kmalloc(sizeof(struct ubi_rnvol_req), GFP_KERNEL); 723 + if (!req) { 724 + err = -ENOMEM; 725 + break; 726 + }; 727 + 728 + err = copy_from_user(req, argp, sizeof(struct ubi_rnvol_req)); 729 + if (err) { 730 + err = -EFAULT; 731 + kfree(req); 732 + break; 733 + } 734 + 735 + mutex_lock(&ubi->mult_mutex); 736 + err = rename_volumes(ubi, req); 737 + mutex_unlock(&ubi->mult_mutex); 738 + kfree(req); 739 break; 740 } 741 ··· 738 struct ubi_attach_req req; 739 struct mtd_info *mtd; 740 741 + dbg_gen("attach MTD device"); 742 err = copy_from_user(&req, argp, sizeof(struct ubi_attach_req)); 743 if (err) { 744 err = -EFAULT; ··· 778 { 779 int ubi_num; 780 781 + dbg_gen("dettach MTD device"); 782 err = get_user(ubi_num, (__user int32_t *)argp); 783 if (err) { 784 err = -EFAULT;
+82 -76
drivers/mtd/ubi/debug.c
··· 24 * changes. 25 */ 26 27 - #ifdef CONFIG_MTD_UBI_DEBUG_MSG 28 29 #include "ubi.h" 30 ··· 34 */ 35 void ubi_dbg_dump_ec_hdr(const struct ubi_ec_hdr *ec_hdr) 36 { 37 - dbg_msg("erase counter header dump:"); 38 - dbg_msg("magic %#08x", be32_to_cpu(ec_hdr->magic)); 39 - dbg_msg("version %d", (int)ec_hdr->version); 40 - dbg_msg("ec %llu", (long long)be64_to_cpu(ec_hdr->ec)); 41 - dbg_msg("vid_hdr_offset %d", be32_to_cpu(ec_hdr->vid_hdr_offset)); 42 - dbg_msg("data_offset %d", be32_to_cpu(ec_hdr->data_offset)); 43 - dbg_msg("hdr_crc %#08x", be32_to_cpu(ec_hdr->hdr_crc)); 44 - dbg_msg("erase counter header hexdump:"); 45 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1, 46 ec_hdr, UBI_EC_HDR_SIZE, 1); 47 } ··· 57 */ 58 void ubi_dbg_dump_vid_hdr(const struct ubi_vid_hdr *vid_hdr) 59 { 60 - dbg_msg("volume identifier header dump:"); 61 - dbg_msg("magic %08x", be32_to_cpu(vid_hdr->magic)); 62 - dbg_msg("version %d", (int)vid_hdr->version); 63 - dbg_msg("vol_type %d", (int)vid_hdr->vol_type); 64 - dbg_msg("copy_flag %d", (int)vid_hdr->copy_flag); 65 - dbg_msg("compat %d", (int)vid_hdr->compat); 66 - dbg_msg("vol_id %d", be32_to_cpu(vid_hdr->vol_id)); 67 - dbg_msg("lnum %d", be32_to_cpu(vid_hdr->lnum)); 68 - dbg_msg("leb_ver %u", be32_to_cpu(vid_hdr->leb_ver)); 69 - dbg_msg("data_size %d", be32_to_cpu(vid_hdr->data_size)); 70 - dbg_msg("used_ebs %d", be32_to_cpu(vid_hdr->used_ebs)); 71 - dbg_msg("data_pad %d", be32_to_cpu(vid_hdr->data_pad)); 72 - dbg_msg("sqnum %llu", 73 (unsigned long long)be64_to_cpu(vid_hdr->sqnum)); 74 - dbg_msg("hdr_crc %08x", be32_to_cpu(vid_hdr->hdr_crc)); 75 - dbg_msg("volume identifier header hexdump:"); 76 } 77 78 /** ··· 82 */ 83 void ubi_dbg_dump_vol_info(const struct ubi_volume *vol) 84 { 85 - dbg_msg("volume information dump:"); 86 - dbg_msg("vol_id %d", vol->vol_id); 87 - dbg_msg("reserved_pebs %d", vol->reserved_pebs); 88 - dbg_msg("alignment %d", vol->alignment); 89 - dbg_msg("data_pad %d", vol->data_pad); 90 - dbg_msg("vol_type %d", vol->vol_type); 91 - dbg_msg("name_len %d", vol->name_len); 92 - dbg_msg("usable_leb_size %d", vol->usable_leb_size); 93 - dbg_msg("used_ebs %d", vol->used_ebs); 94 - dbg_msg("used_bytes %lld", vol->used_bytes); 95 - dbg_msg("last_eb_bytes %d", vol->last_eb_bytes); 96 - dbg_msg("corrupted %d", vol->corrupted); 97 - dbg_msg("upd_marker %d", vol->upd_marker); 98 99 if (vol->name_len <= UBI_VOL_NAME_MAX && 100 strnlen(vol->name, vol->name_len + 1) == vol->name_len) { 101 - dbg_msg("name %s", vol->name); 102 } else { 103 - dbg_msg("the 1st 5 characters of the name: %c%c%c%c%c", 104 - vol->name[0], vol->name[1], vol->name[2], 105 - vol->name[3], vol->name[4]); 106 } 107 } 108 ··· 115 { 116 int name_len = be16_to_cpu(r->name_len); 117 118 - dbg_msg("volume table record %d dump:", idx); 119 - dbg_msg("reserved_pebs %d", be32_to_cpu(r->reserved_pebs)); 120 - dbg_msg("alignment %d", be32_to_cpu(r->alignment)); 121 - dbg_msg("data_pad %d", be32_to_cpu(r->data_pad)); 122 - dbg_msg("vol_type %d", (int)r->vol_type); 123 - dbg_msg("upd_marker %d", (int)r->upd_marker); 124 - dbg_msg("name_len %d", name_len); 125 126 if (r->name[0] == '\0') { 127 - dbg_msg("name NULL"); 128 return; 129 } 130 131 if (name_len <= UBI_VOL_NAME_MAX && 132 strnlen(&r->name[0], name_len + 1) == name_len) { 133 - dbg_msg("name %s", &r->name[0]); 134 } else { 135 - dbg_msg("1st 5 characters of the name: %c%c%c%c%c", 136 r->name[0], r->name[1], r->name[2], r->name[3], 137 r->name[4]); 138 } 139 - dbg_msg("crc %#08x", be32_to_cpu(r->crc)); 140 } 141 142 /** ··· 146 */ 147 void ubi_dbg_dump_sv(const struct ubi_scan_volume *sv) 148 { 149 - dbg_msg("volume scanning information dump:"); 150 - dbg_msg("vol_id %d", sv->vol_id); 151 - dbg_msg("highest_lnum %d", sv->highest_lnum); 152 - dbg_msg("leb_count %d", sv->leb_count); 153 - dbg_msg("compat %d", sv->compat); 154 - dbg_msg("vol_type %d", sv->vol_type); 155 - dbg_msg("used_ebs %d", sv->used_ebs); 156 - dbg_msg("last_data_size %d", sv->last_data_size); 157 - dbg_msg("data_pad %d", sv->data_pad); 158 } 159 160 /** ··· 164 */ 165 void ubi_dbg_dump_seb(const struct ubi_scan_leb *seb, int type) 166 { 167 - dbg_msg("eraseblock scanning information dump:"); 168 - dbg_msg("ec %d", seb->ec); 169 - dbg_msg("pnum %d", seb->pnum); 170 if (type == 0) { 171 - dbg_msg("lnum %d", seb->lnum); 172 - dbg_msg("scrub %d", seb->scrub); 173 - dbg_msg("sqnum %llu", seb->sqnum); 174 - dbg_msg("leb_ver %u", seb->leb_ver); 175 } 176 } 177 ··· 182 { 183 char nm[17]; 184 185 - dbg_msg("volume creation request dump:"); 186 - dbg_msg("vol_id %d", req->vol_id); 187 - dbg_msg("alignment %d", req->alignment); 188 - dbg_msg("bytes %lld", (long long)req->bytes); 189 - dbg_msg("vol_type %d", req->vol_type); 190 - dbg_msg("name_len %d", req->name_len); 191 192 memcpy(nm, req->name, 16); 193 nm[16] = 0; 194 - dbg_msg("the 1st 16 characters of the name: %s", nm); 195 } 196 197 - #endif /* CONFIG_MTD_UBI_DEBUG_MSG */
··· 24 * changes. 25 */ 26 27 + #ifdef CONFIG_MTD_UBI_DEBUG 28 29 #include "ubi.h" 30 ··· 34 */ 35 void ubi_dbg_dump_ec_hdr(const struct ubi_ec_hdr *ec_hdr) 36 { 37 + printk(KERN_DEBUG "Erase counter header dump:\n"); 38 + printk(KERN_DEBUG "\tmagic %#08x\n", 39 + be32_to_cpu(ec_hdr->magic)); 40 + printk(KERN_DEBUG "\tversion %d\n", (int)ec_hdr->version); 41 + printk(KERN_DEBUG "\tec %llu\n", 42 + (long long)be64_to_cpu(ec_hdr->ec)); 43 + printk(KERN_DEBUG "\tvid_hdr_offset %d\n", 44 + be32_to_cpu(ec_hdr->vid_hdr_offset)); 45 + printk(KERN_DEBUG "\tdata_offset %d\n", 46 + be32_to_cpu(ec_hdr->data_offset)); 47 + printk(KERN_DEBUG "\thdr_crc %#08x\n", 48 + be32_to_cpu(ec_hdr->hdr_crc)); 49 + printk(KERN_DEBUG "erase counter header hexdump:\n"); 50 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1, 51 ec_hdr, UBI_EC_HDR_SIZE, 1); 52 } ··· 52 */ 53 void ubi_dbg_dump_vid_hdr(const struct ubi_vid_hdr *vid_hdr) 54 { 55 + printk(KERN_DEBUG "Volume identifier header dump:\n"); 56 + printk(KERN_DEBUG "\tmagic %08x\n", be32_to_cpu(vid_hdr->magic)); 57 + printk(KERN_DEBUG "\tversion %d\n", (int)vid_hdr->version); 58 + printk(KERN_DEBUG "\tvol_type %d\n", (int)vid_hdr->vol_type); 59 + printk(KERN_DEBUG "\tcopy_flag %d\n", (int)vid_hdr->copy_flag); 60 + printk(KERN_DEBUG "\tcompat %d\n", (int)vid_hdr->compat); 61 + printk(KERN_DEBUG "\tvol_id %d\n", be32_to_cpu(vid_hdr->vol_id)); 62 + printk(KERN_DEBUG "\tlnum %d\n", be32_to_cpu(vid_hdr->lnum)); 63 + printk(KERN_DEBUG "\tdata_size %d\n", be32_to_cpu(vid_hdr->data_size)); 64 + printk(KERN_DEBUG "\tused_ebs %d\n", be32_to_cpu(vid_hdr->used_ebs)); 65 + printk(KERN_DEBUG "\tdata_pad %d\n", be32_to_cpu(vid_hdr->data_pad)); 66 + printk(KERN_DEBUG "\tsqnum %llu\n", 67 (unsigned long long)be64_to_cpu(vid_hdr->sqnum)); 68 + printk(KERN_DEBUG "\thdr_crc %08x\n", be32_to_cpu(vid_hdr->hdr_crc)); 69 + printk(KERN_DEBUG "Volume identifier header hexdump:\n"); 70 + print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1, 71 + vid_hdr, UBI_VID_HDR_SIZE, 1); 72 } 73 74 /** ··· 76 */ 77 void ubi_dbg_dump_vol_info(const struct ubi_volume *vol) 78 { 79 + printk(KERN_DEBUG "Volume information dump:\n"); 80 + printk(KERN_DEBUG "\tvol_id %d\n", vol->vol_id); 81 + printk(KERN_DEBUG "\treserved_pebs %d\n", vol->reserved_pebs); 82 + printk(KERN_DEBUG "\talignment %d\n", vol->alignment); 83 + printk(KERN_DEBUG "\tdata_pad %d\n", vol->data_pad); 84 + printk(KERN_DEBUG "\tvol_type %d\n", vol->vol_type); 85 + printk(KERN_DEBUG "\tname_len %d\n", vol->name_len); 86 + printk(KERN_DEBUG "\tusable_leb_size %d\n", vol->usable_leb_size); 87 + printk(KERN_DEBUG "\tused_ebs %d\n", vol->used_ebs); 88 + printk(KERN_DEBUG "\tused_bytes %lld\n", vol->used_bytes); 89 + printk(KERN_DEBUG "\tlast_eb_bytes %d\n", vol->last_eb_bytes); 90 + printk(KERN_DEBUG "\tcorrupted %d\n", vol->corrupted); 91 + printk(KERN_DEBUG "\tupd_marker %d\n", vol->upd_marker); 92 93 if (vol->name_len <= UBI_VOL_NAME_MAX && 94 strnlen(vol->name, vol->name_len + 1) == vol->name_len) { 95 + printk(KERN_DEBUG "\tname %s\n", vol->name); 96 } else { 97 + printk(KERN_DEBUG "\t1st 5 characters of name: %c%c%c%c%c\n", 98 + vol->name[0], vol->name[1], vol->name[2], 99 + vol->name[3], vol->name[4]); 100 } 101 } 102 ··· 109 { 110 int name_len = be16_to_cpu(r->name_len); 111 112 + printk(KERN_DEBUG "Volume table record %d dump:\n", idx); 113 + printk(KERN_DEBUG "\treserved_pebs %d\n", 114 + be32_to_cpu(r->reserved_pebs)); 115 + printk(KERN_DEBUG "\talignment %d\n", be32_to_cpu(r->alignment)); 116 + printk(KERN_DEBUG "\tdata_pad %d\n", be32_to_cpu(r->data_pad)); 117 + printk(KERN_DEBUG "\tvol_type %d\n", (int)r->vol_type); 118 + printk(KERN_DEBUG "\tupd_marker %d\n", (int)r->upd_marker); 119 + printk(KERN_DEBUG "\tname_len %d\n", name_len); 120 121 if (r->name[0] == '\0') { 122 + printk(KERN_DEBUG "\tname NULL\n"); 123 return; 124 } 125 126 if (name_len <= UBI_VOL_NAME_MAX && 127 strnlen(&r->name[0], name_len + 1) == name_len) { 128 + printk(KERN_DEBUG "\tname %s\n", &r->name[0]); 129 } else { 130 + printk(KERN_DEBUG "\t1st 5 characters of name: %c%c%c%c%c\n", 131 r->name[0], r->name[1], r->name[2], r->name[3], 132 r->name[4]); 133 } 134 + printk(KERN_DEBUG "\tcrc %#08x\n", be32_to_cpu(r->crc)); 135 } 136 137 /** ··· 139 */ 140 void ubi_dbg_dump_sv(const struct ubi_scan_volume *sv) 141 { 142 + printk(KERN_DEBUG "Volume scanning information dump:\n"); 143 + printk(KERN_DEBUG "\tvol_id %d\n", sv->vol_id); 144 + printk(KERN_DEBUG "\thighest_lnum %d\n", sv->highest_lnum); 145 + printk(KERN_DEBUG "\tleb_count %d\n", sv->leb_count); 146 + printk(KERN_DEBUG "\tcompat %d\n", sv->compat); 147 + printk(KERN_DEBUG "\tvol_type %d\n", sv->vol_type); 148 + printk(KERN_DEBUG "\tused_ebs %d\n", sv->used_ebs); 149 + printk(KERN_DEBUG "\tlast_data_size %d\n", sv->last_data_size); 150 + printk(KERN_DEBUG "\tdata_pad %d\n", sv->data_pad); 151 } 152 153 /** ··· 157 */ 158 void ubi_dbg_dump_seb(const struct ubi_scan_leb *seb, int type) 159 { 160 + printk(KERN_DEBUG "eraseblock scanning information dump:\n"); 161 + printk(KERN_DEBUG "\tec %d\n", seb->ec); 162 + printk(KERN_DEBUG "\tpnum %d\n", seb->pnum); 163 if (type == 0) { 164 + printk(KERN_DEBUG "\tlnum %d\n", seb->lnum); 165 + printk(KERN_DEBUG "\tscrub %d\n", seb->scrub); 166 + printk(KERN_DEBUG "\tsqnum %llu\n", seb->sqnum); 167 } 168 } 169 ··· 176 { 177 char nm[17]; 178 179 + printk(KERN_DEBUG "Volume creation request dump:\n"); 180 + printk(KERN_DEBUG "\tvol_id %d\n", req->vol_id); 181 + printk(KERN_DEBUG "\talignment %d\n", req->alignment); 182 + printk(KERN_DEBUG "\tbytes %lld\n", (long long)req->bytes); 183 + printk(KERN_DEBUG "\tvol_type %d\n", req->vol_type); 184 + printk(KERN_DEBUG "\tname_len %d\n", req->name_len); 185 186 memcpy(nm, req->name, 16); 187 nm[16] = 0; 188 + printk(KERN_DEBUG "\t1st 16 characters of name: %s\n", nm); 189 } 190 191 + #endif /* CONFIG_MTD_UBI_DEBUG */
+47 -27
drivers/mtd/ubi/debug.h
··· 24 #ifdef CONFIG_MTD_UBI_DEBUG 25 #include <linux/random.h> 26 27 - #define ubi_assert(expr) BUG_ON(!(expr)) 28 #define dbg_err(fmt, ...) ubi_err(fmt, ##__VA_ARGS__) 29 - #else 30 - #define ubi_assert(expr) ({}) 31 - #define dbg_err(fmt, ...) ({}) 32 - #endif 33 34 - #ifdef CONFIG_MTD_UBI_DEBUG_DISABLE_BGT 35 - #define DBG_DISABLE_BGT 1 36 - #else 37 - #define DBG_DISABLE_BGT 0 38 - #endif 39 40 - #ifdef CONFIG_MTD_UBI_DEBUG_MSG 41 - /* Generic debugging message */ 42 #define dbg_msg(fmt, ...) \ 43 printk(KERN_DEBUG "UBI DBG (pid %d): %s: " fmt "\n", \ 44 current->pid, __func__, ##__VA_ARGS__) ··· 56 void ubi_dbg_dump_seb(const struct ubi_scan_leb *seb, int type); 57 void ubi_dbg_dump_mkvol_req(const struct ubi_mkvol_req *req); 58 59 #else 60 - 61 - #define dbg_msg(fmt, ...) ({}) 62 - #define ubi_dbg_dump_stack() ({}) 63 - #define ubi_dbg_dump_ec_hdr(ec_hdr) ({}) 64 - #define ubi_dbg_dump_vid_hdr(vid_hdr) ({}) 65 - #define ubi_dbg_dump_vol_info(vol) ({}) 66 - #define ubi_dbg_dump_vtbl_record(r, idx) ({}) 67 - #define ubi_dbg_dump_sv(sv) ({}) 68 - #define ubi_dbg_dump_seb(seb, type) ({}) 69 - #define ubi_dbg_dump_mkvol_req(req) ({}) 70 - 71 - #endif /* CONFIG_MTD_UBI_DEBUG_MSG */ 72 73 #ifdef CONFIG_MTD_UBI_DEBUG_MSG_EBA 74 - /* Messages from the eraseblock association unit */ 75 #define dbg_eba(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__) 76 #else 77 #define dbg_eba(fmt, ...) ({}) 78 #endif 79 80 #ifdef CONFIG_MTD_UBI_DEBUG_MSG_WL 81 - /* Messages from the wear-leveling unit */ 82 #define dbg_wl(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__) 83 #else 84 #define dbg_wl(fmt, ...) ({}) 85 #endif 86 87 #ifdef CONFIG_MTD_UBI_DEBUG_MSG_IO 88 - /* Messages from the input/output unit */ 89 #define dbg_io(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__) 90 #else 91 #define dbg_io(fmt, ...) ({}) ··· 91 #else 92 #define dbg_bld(fmt, ...) ({}) 93 #define UBI_IO_DEBUG 0 94 #endif 95 96 #ifdef CONFIG_MTD_UBI_DEBUG_EMULATE_BITFLIPS ··· 143 #define ubi_dbg_is_erase_failure() 0 144 #endif 145 146 #endif /* !__UBI_DEBUG_H__ */
··· 24 #ifdef CONFIG_MTD_UBI_DEBUG 25 #include <linux/random.h> 26 27 #define dbg_err(fmt, ...) ubi_err(fmt, ##__VA_ARGS__) 28 29 + #define ubi_assert(expr) do { \ 30 + if (unlikely(!(expr))) { \ 31 + printk(KERN_CRIT "UBI assert failed in %s at %u (pid %d)\n", \ 32 + __func__, __LINE__, current->pid); \ 33 + ubi_dbg_dump_stack(); \ 34 + } \ 35 + } while (0) 36 37 #define dbg_msg(fmt, ...) \ 38 printk(KERN_DEBUG "UBI DBG (pid %d): %s: " fmt "\n", \ 39 current->pid, __func__, ##__VA_ARGS__) ··· 61 void ubi_dbg_dump_seb(const struct ubi_scan_leb *seb, int type); 62 void ubi_dbg_dump_mkvol_req(const struct ubi_mkvol_req *req); 63 64 + #ifdef CONFIG_MTD_UBI_DEBUG_MSG 65 + /* General debugging messages */ 66 + #define dbg_gen(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__) 67 #else 68 + #define dbg_gen(fmt, ...) ({}) 69 + #endif 70 71 #ifdef CONFIG_MTD_UBI_DEBUG_MSG_EBA 72 + /* Messages from the eraseblock association sub-system */ 73 #define dbg_eba(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__) 74 #else 75 #define dbg_eba(fmt, ...) ({}) 76 #endif 77 78 #ifdef CONFIG_MTD_UBI_DEBUG_MSG_WL 79 + /* Messages from the wear-leveling sub-system */ 80 #define dbg_wl(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__) 81 #else 82 #define dbg_wl(fmt, ...) ({}) 83 #endif 84 85 #ifdef CONFIG_MTD_UBI_DEBUG_MSG_IO 86 + /* Messages from the input/output sub-system */ 87 #define dbg_io(fmt, ...) dbg_msg(fmt, ##__VA_ARGS__) 88 #else 89 #define dbg_io(fmt, ...) ({}) ··· 103 #else 104 #define dbg_bld(fmt, ...) ({}) 105 #define UBI_IO_DEBUG 0 106 + #endif 107 + 108 + #ifdef CONFIG_MTD_UBI_DEBUG_DISABLE_BGT 109 + #define DBG_DISABLE_BGT 1 110 + #else 111 + #define DBG_DISABLE_BGT 0 112 #endif 113 114 #ifdef CONFIG_MTD_UBI_DEBUG_EMULATE_BITFLIPS ··· 149 #define ubi_dbg_is_erase_failure() 0 150 #endif 151 152 + #else 153 + 154 + #define ubi_assert(expr) ({}) 155 + #define dbg_err(fmt, ...) ({}) 156 + #define dbg_msg(fmt, ...) ({}) 157 + #define dbg_gen(fmt, ...) ({}) 158 + #define dbg_eba(fmt, ...) ({}) 159 + #define dbg_wl(fmt, ...) ({}) 160 + #define dbg_io(fmt, ...) ({}) 161 + #define dbg_bld(fmt, ...) ({}) 162 + #define ubi_dbg_dump_stack() ({}) 163 + #define ubi_dbg_dump_ec_hdr(ec_hdr) ({}) 164 + #define ubi_dbg_dump_vid_hdr(vid_hdr) ({}) 165 + #define ubi_dbg_dump_vol_info(vol) ({}) 166 + #define ubi_dbg_dump_vtbl_record(r, idx) ({}) 167 + #define ubi_dbg_dump_sv(sv) ({}) 168 + #define ubi_dbg_dump_seb(seb, type) ({}) 169 + #define ubi_dbg_dump_mkvol_req(req) ({}) 170 + 171 + #define UBI_IO_DEBUG 0 172 + #define DBG_DISABLE_BGT 0 173 + #define ubi_dbg_is_bitflip() 0 174 + #define ubi_dbg_is_write_failure() 0 175 + #define ubi_dbg_is_erase_failure() 0 176 + 177 + #endif /* !CONFIG_MTD_UBI_DEBUG */ 178 #endif /* !__UBI_DEBUG_H__ */
+22 -55
drivers/mtd/ubi/eba.c
··· 19 */ 20 21 /* 22 - * The UBI Eraseblock Association (EBA) unit. 23 * 24 - * This unit is responsible for I/O to/from logical eraseblock. 25 * 26 * Although in this implementation the EBA table is fully kept and managed in 27 * RAM, which assumes poor scalability, it might be (partially) maintained on 28 * flash in future implementations. 29 * 30 - * The EBA unit implements per-logical eraseblock locking. Before accessing a 31 - * logical eraseblock it is locked for reading or writing. The per-logical 32 - * eraseblock locking is implemented by means of the lock tree. The lock tree 33 - * is an RB-tree which refers all the currently locked logical eraseblocks. The 34 - * lock tree elements are &struct ubi_ltree_entry objects. They are indexed by 35 - * (@vol_id, @lnum) pairs. 36 * 37 * EBA also maintains the global sequence counter which is incremented each 38 * time a logical eraseblock is mapped to a physical eraseblock and it is ··· 189 le->users += 1; 190 spin_unlock(&ubi->ltree_lock); 191 192 - if (le_free) 193 - kfree(le_free); 194 - 195 return le; 196 } 197 ··· 221 */ 222 static void leb_read_unlock(struct ubi_device *ubi, int vol_id, int lnum) 223 { 224 - int free = 0; 225 struct ubi_ltree_entry *le; 226 227 spin_lock(&ubi->ltree_lock); 228 le = ltree_lookup(ubi, vol_id, lnum); 229 le->users -= 1; 230 ubi_assert(le->users >= 0); 231 if (le->users == 0) { 232 rb_erase(&le->rb, &ubi->ltree); 233 - free = 1; 234 } 235 spin_unlock(&ubi->ltree_lock); 236 - 237 - up_read(&le->mutex); 238 - if (free) 239 - kfree(le); 240 } 241 242 /** ··· 268 */ 269 static int leb_write_trylock(struct ubi_device *ubi, int vol_id, int lnum) 270 { 271 - int free; 272 struct ubi_ltree_entry *le; 273 274 le = ltree_add_entry(ubi, vol_id, lnum); ··· 282 ubi_assert(le->users >= 0); 283 if (le->users == 0) { 284 rb_erase(&le->rb, &ubi->ltree); 285 - free = 1; 286 - } else 287 - free = 0; 288 - spin_unlock(&ubi->ltree_lock); 289 - if (free) 290 kfree(le); 291 292 return 1; 293 } ··· 297 */ 298 static void leb_write_unlock(struct ubi_device *ubi, int vol_id, int lnum) 299 { 300 - int free; 301 struct ubi_ltree_entry *le; 302 303 spin_lock(&ubi->ltree_lock); 304 le = ltree_lookup(ubi, vol_id, lnum); 305 le->users -= 1; 306 ubi_assert(le->users >= 0); 307 if (le->users == 0) { 308 rb_erase(&le->rb, &ubi->ltree); 309 - free = 1; 310 - } else 311 - free = 0; 312 - spin_unlock(&ubi->ltree_lock); 313 - 314 - up_write(&le->mutex); 315 - if (free) 316 kfree(le); 317 } 318 319 /** ··· 501 struct ubi_vid_hdr *vid_hdr; 502 503 vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS); 504 - if (!vid_hdr) { 505 return -ENOMEM; 506 - } 507 508 mutex_lock(&ubi->buf_mutex); 509 ··· 736 /* If this is the last LEB @len may be unaligned */ 737 len = ALIGN(data_size, ubi->min_io_size); 738 else 739 - ubi_assert(len % ubi->min_io_size == 0); 740 741 vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS); 742 if (!vid_hdr) ··· 903 } 904 905 if (vol->eba_tbl[lnum] >= 0) { 906 - err = ubi_wl_put_peb(ubi, vol->eba_tbl[lnum], 1); 907 if (err) 908 goto out_leb_unlock; 909 } ··· 1125 } 1126 1127 /** 1128 - * ubi_eba_init_scan - initialize the EBA unit using scanning information. 1129 * @ubi: UBI device description object 1130 * @si: scanning information 1131 * ··· 1140 struct ubi_scan_leb *seb; 1141 struct rb_node *rb; 1142 1143 - dbg_eba("initialize EBA unit"); 1144 1145 spin_lock_init(&ubi->ltree_lock); 1146 mutex_init(&ubi->alc_mutex); ··· 1206 ubi->rsvd_pebs += ubi->beb_rsvd_pebs; 1207 } 1208 1209 - dbg_eba("EBA unit is initialized"); 1210 return 0; 1211 1212 out_free: ··· 1216 kfree(ubi->volumes[i]->eba_tbl); 1217 } 1218 return err; 1219 - } 1220 - 1221 - /** 1222 - * ubi_eba_close - close EBA unit. 1223 - * @ubi: UBI device description object 1224 - */ 1225 - void ubi_eba_close(const struct ubi_device *ubi) 1226 - { 1227 - int i, num_volumes = ubi->vtbl_slots + UBI_INT_VOL_COUNT; 1228 - 1229 - dbg_eba("close EBA unit"); 1230 - 1231 - for (i = 0; i < num_volumes; i++) { 1232 - if (!ubi->volumes[i]) 1233 - continue; 1234 - kfree(ubi->volumes[i]->eba_tbl); 1235 - } 1236 }
··· 19 */ 20 21 /* 22 + * The UBI Eraseblock Association (EBA) sub-system. 23 * 24 + * This sub-system is responsible for I/O to/from logical eraseblock. 25 * 26 * Although in this implementation the EBA table is fully kept and managed in 27 * RAM, which assumes poor scalability, it might be (partially) maintained on 28 * flash in future implementations. 29 * 30 + * The EBA sub-system implements per-logical eraseblock locking. Before 31 + * accessing a logical eraseblock it is locked for reading or writing. The 32 + * per-logical eraseblock locking is implemented by means of the lock tree. The 33 + * lock tree is an RB-tree which refers all the currently locked logical 34 + * eraseblocks. The lock tree elements are &struct ubi_ltree_entry objects. 35 + * They are indexed by (@vol_id, @lnum) pairs. 36 * 37 * EBA also maintains the global sequence counter which is incremented each 38 * time a logical eraseblock is mapped to a physical eraseblock and it is ··· 189 le->users += 1; 190 spin_unlock(&ubi->ltree_lock); 191 192 + kfree(le_free); 193 return le; 194 } 195 ··· 223 */ 224 static void leb_read_unlock(struct ubi_device *ubi, int vol_id, int lnum) 225 { 226 struct ubi_ltree_entry *le; 227 228 spin_lock(&ubi->ltree_lock); 229 le = ltree_lookup(ubi, vol_id, lnum); 230 le->users -= 1; 231 ubi_assert(le->users >= 0); 232 + up_read(&le->mutex); 233 if (le->users == 0) { 234 rb_erase(&le->rb, &ubi->ltree); 235 + kfree(le); 236 } 237 spin_unlock(&ubi->ltree_lock); 238 } 239 240 /** ··· 274 */ 275 static int leb_write_trylock(struct ubi_device *ubi, int vol_id, int lnum) 276 { 277 struct ubi_ltree_entry *le; 278 279 le = ltree_add_entry(ubi, vol_id, lnum); ··· 289 ubi_assert(le->users >= 0); 290 if (le->users == 0) { 291 rb_erase(&le->rb, &ubi->ltree); 292 kfree(le); 293 + } 294 + spin_unlock(&ubi->ltree_lock); 295 296 return 1; 297 } ··· 307 */ 308 static void leb_write_unlock(struct ubi_device *ubi, int vol_id, int lnum) 309 { 310 struct ubi_ltree_entry *le; 311 312 spin_lock(&ubi->ltree_lock); 313 le = ltree_lookup(ubi, vol_id, lnum); 314 le->users -= 1; 315 ubi_assert(le->users >= 0); 316 + up_write(&le->mutex); 317 if (le->users == 0) { 318 rb_erase(&le->rb, &ubi->ltree); 319 kfree(le); 320 + } 321 + spin_unlock(&ubi->ltree_lock); 322 } 323 324 /** ··· 516 struct ubi_vid_hdr *vid_hdr; 517 518 vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS); 519 + if (!vid_hdr) 520 return -ENOMEM; 521 522 mutex_lock(&ubi->buf_mutex); 523 ··· 752 /* If this is the last LEB @len may be unaligned */ 753 len = ALIGN(data_size, ubi->min_io_size); 754 else 755 + ubi_assert(!(len & (ubi->min_io_size - 1))); 756 757 vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS); 758 if (!vid_hdr) ··· 919 } 920 921 if (vol->eba_tbl[lnum] >= 0) { 922 + err = ubi_wl_put_peb(ubi, vol->eba_tbl[lnum], 0); 923 if (err) 924 goto out_leb_unlock; 925 } ··· 1141 } 1142 1143 /** 1144 + * ubi_eba_init_scan - initialize the EBA sub-system using scanning information. 1145 * @ubi: UBI device description object 1146 * @si: scanning information 1147 * ··· 1156 struct ubi_scan_leb *seb; 1157 struct rb_node *rb; 1158 1159 + dbg_eba("initialize EBA sub-system"); 1160 1161 spin_lock_init(&ubi->ltree_lock); 1162 mutex_init(&ubi->alc_mutex); ··· 1222 ubi->rsvd_pebs += ubi->beb_rsvd_pebs; 1223 } 1224 1225 + dbg_eba("EBA sub-system is initialized"); 1226 return 0; 1227 1228 out_free: ··· 1232 kfree(ubi->volumes[i]->eba_tbl); 1233 } 1234 return err; 1235 }
+8 -8
drivers/mtd/ubi/gluebi.c
··· 111 struct ubi_device *ubi; 112 uint64_t tmp = from; 113 114 - dbg_msg("read %zd bytes from offset %lld", len, from); 115 116 if (len < 0 || from < 0 || from + len > mtd->size) 117 return -EINVAL; ··· 162 struct ubi_device *ubi; 163 uint64_t tmp = to; 164 165 - dbg_msg("write %zd bytes to offset %lld", len, to); 166 167 if (len < 0 || to < 0 || len + to > mtd->size) 168 return -EINVAL; ··· 215 struct ubi_volume *vol; 216 struct ubi_device *ubi; 217 218 - dbg_msg("erase %u bytes at offset %u", instr->len, instr->addr); 219 220 if (instr->addr < 0 || instr->addr > mtd->size - mtd->erasesize) 221 return -EINVAL; ··· 249 if (err) 250 goto out_err; 251 252 - instr->state = MTD_ERASE_DONE; 253 - mtd_erase_callback(instr); 254 return 0; 255 256 out_err: ··· 299 mtd->size = vol->used_bytes; 300 301 if (add_mtd_device(mtd)) { 302 - ubi_err("cannot not add MTD device\n"); 303 kfree(mtd->name); 304 return -ENFILE; 305 } 306 307 - dbg_msg("added mtd%d (\"%s\"), size %u, EB size %u", 308 mtd->index, mtd->name, mtd->size, mtd->erasesize); 309 return 0; 310 } ··· 322 int err; 323 struct mtd_info *mtd = &vol->gluebi_mtd; 324 325 - dbg_msg("remove mtd%d", mtd->index); 326 err = del_mtd_device(mtd); 327 if (err) 328 return err;
··· 111 struct ubi_device *ubi; 112 uint64_t tmp = from; 113 114 + dbg_gen("read %zd bytes from offset %lld", len, from); 115 116 if (len < 0 || from < 0 || from + len > mtd->size) 117 return -EINVAL; ··· 162 struct ubi_device *ubi; 163 uint64_t tmp = to; 164 165 + dbg_gen("write %zd bytes to offset %lld", len, to); 166 167 if (len < 0 || to < 0 || len + to > mtd->size) 168 return -EINVAL; ··· 215 struct ubi_volume *vol; 216 struct ubi_device *ubi; 217 218 + dbg_gen("erase %u bytes at offset %u", instr->len, instr->addr); 219 220 if (instr->addr < 0 || instr->addr > mtd->size - mtd->erasesize) 221 return -EINVAL; ··· 249 if (err) 250 goto out_err; 251 252 + instr->state = MTD_ERASE_DONE; 253 + mtd_erase_callback(instr); 254 return 0; 255 256 out_err: ··· 299 mtd->size = vol->used_bytes; 300 301 if (add_mtd_device(mtd)) { 302 + ubi_err("cannot not add MTD device"); 303 kfree(mtd->name); 304 return -ENFILE; 305 } 306 307 + dbg_gen("added mtd%d (\"%s\"), size %u, EB size %u", 308 mtd->index, mtd->name, mtd->size, mtd->erasesize); 309 return 0; 310 } ··· 322 int err; 323 struct mtd_info *mtd = &vol->gluebi_mtd; 324 325 + dbg_gen("remove mtd%d", mtd->index); 326 err = del_mtd_device(mtd); 327 if (err) 328 return err;
+26 -22
drivers/mtd/ubi/io.c
··· 20 */ 21 22 /* 23 - * UBI input/output unit. 24 * 25 - * This unit provides a uniform way to work with all kinds of the underlying 26 - * MTD devices. It also implements handy functions for reading and writing UBI 27 - * headers. 28 * 29 * We are trying to have a paranoid mindset and not to trust to what we read 30 - * from the flash media in order to be more secure and robust. So this unit 31 - * validates every single header it reads from the flash media. 32 * 33 * Some words about how the eraseblock headers are stored. 34 * ··· 79 * 512-byte chunks, we have to allocate one more buffer and copy our VID header 80 * to offset 448 of this buffer. 81 * 82 - * The I/O unit does the following trick in order to avoid this extra copy. 83 - * It always allocates a @ubi->vid_hdr_alsize bytes buffer for the VID header 84 - * and returns a pointer to offset @ubi->vid_hdr_shift of this buffer. When the 85 - * VID header is being written out, it shifts the VID header pointer back and 86 - * writes the whole sub-page. 87 */ 88 89 #include <linux/crc32.h> ··· 156 /* 157 * -EUCLEAN is reported if there was a bit-flip which 158 * was corrected, so this is harmless. 159 */ 160 - ubi_msg("fixable bit-flip detected at PEB %d", pnum); 161 ubi_assert(len == read); 162 return UBI_IO_BITFLIPS; 163 } 164 165 if (read != len && retries++ < UBI_IO_RETRIES) { 166 - dbg_io("error %d while reading %d bytes from PEB %d:%d, " 167 - "read only %zd bytes, retry", 168 err, len, pnum, offset, read); 169 yield(); 170 goto retry; ··· 191 ubi_assert(len == read); 192 193 if (ubi_dbg_is_bitflip()) { 194 - dbg_msg("bit-flip (emulated)"); 195 err = UBI_IO_BITFLIPS; 196 } 197 } ··· 395 { 396 int err, i, patt_count; 397 398 patt_count = ARRAY_SIZE(patterns); 399 ubi_assert(patt_count > 0); 400 ··· 439 } 440 441 err = patt_count; 442 443 out: 444 mutex_unlock(&ubi->buf_mutex); ··· 705 706 if (hdr_crc != crc) { 707 if (verbose) { 708 - ubi_warn("bad EC header CRC at PEB %d, calculated %#08x," 709 - " read %#08x", pnum, crc, hdr_crc); 710 ubi_dbg_dump_ec_hdr(ec_hdr); 711 } 712 return UBI_IO_BAD_EC_HDR; ··· 1101 } 1102 1103 /** 1104 - * paranoid_check_peb_ec_hdr - check that the erase counter header of a 1105 - * physical eraseblock is in-place and is all right. 1106 * @ubi: UBI device description object 1107 * @pnum: the physical eraseblock number to check 1108 * ··· 1179 } 1180 1181 /** 1182 - * paranoid_check_peb_vid_hdr - check that the volume identifier header of a 1183 - * physical eraseblock is in-place and is all right. 1184 * @ubi: UBI device description object 1185 * @pnum: the physical eraseblock number to check 1186 * ··· 1260 1261 fail: 1262 ubi_err("paranoid check failed for PEB %d", pnum); 1263 - dbg_msg("hex dump of the %d-%d region", offset, offset + len); 1264 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1, 1265 ubi->dbg_peb_buf, len, 1); 1266 err = 1;
··· 20 */ 21 22 /* 23 + * UBI input/output sub-system. 24 * 25 + * This sub-system provides a uniform way to work with all kinds of the 26 + * underlying MTD devices. It also implements handy functions for reading and 27 + * writing UBI headers. 28 * 29 * We are trying to have a paranoid mindset and not to trust to what we read 30 + * from the flash media in order to be more secure and robust. So this 31 + * sub-system validates every single header it reads from the flash media. 32 * 33 * Some words about how the eraseblock headers are stored. 34 * ··· 79 * 512-byte chunks, we have to allocate one more buffer and copy our VID header 80 * to offset 448 of this buffer. 81 * 82 + * The I/O sub-system does the following trick in order to avoid this extra 83 + * copy. It always allocates a @ubi->vid_hdr_alsize bytes buffer for the VID 84 + * header and returns a pointer to offset @ubi->vid_hdr_shift of this buffer. 85 + * When the VID header is being written out, it shifts the VID header pointer 86 + * back and writes the whole sub-page. 87 */ 88 89 #include <linux/crc32.h> ··· 156 /* 157 * -EUCLEAN is reported if there was a bit-flip which 158 * was corrected, so this is harmless. 159 + * 160 + * We do not report about it here unless debugging is 161 + * enabled. A corresponding message will be printed 162 + * later, when it is has been scrubbed. 163 */ 164 + dbg_msg("fixable bit-flip detected at PEB %d", pnum); 165 ubi_assert(len == read); 166 return UBI_IO_BITFLIPS; 167 } 168 169 if (read != len && retries++ < UBI_IO_RETRIES) { 170 + dbg_io("error %d while reading %d bytes from PEB %d:%d," 171 + " read only %zd bytes, retry", 172 err, len, pnum, offset, read); 173 yield(); 174 goto retry; ··· 187 ubi_assert(len == read); 188 189 if (ubi_dbg_is_bitflip()) { 190 + dbg_gen("bit-flip (emulated)"); 191 err = UBI_IO_BITFLIPS; 192 } 193 } ··· 391 { 392 int err, i, patt_count; 393 394 + ubi_msg("run torture test for PEB %d", pnum); 395 patt_count = ARRAY_SIZE(patterns); 396 ubi_assert(patt_count > 0); 397 ··· 434 } 435 436 err = patt_count; 437 + ubi_msg("PEB %d passed torture test, do not mark it a bad", pnum); 438 439 out: 440 mutex_unlock(&ubi->buf_mutex); ··· 699 700 if (hdr_crc != crc) { 701 if (verbose) { 702 + ubi_warn("bad EC header CRC at PEB %d, calculated " 703 + "%#08x, read %#08x", pnum, crc, hdr_crc); 704 ubi_dbg_dump_ec_hdr(ec_hdr); 705 } 706 return UBI_IO_BAD_EC_HDR; ··· 1095 } 1096 1097 /** 1098 + * paranoid_check_peb_ec_hdr - check erase counter header. 1099 * @ubi: UBI device description object 1100 * @pnum: the physical eraseblock number to check 1101 * ··· 1174 } 1175 1176 /** 1177 + * paranoid_check_peb_vid_hdr - check volume identifier header. 1178 * @ubi: UBI device description object 1179 * @pnum: the physical eraseblock number to check 1180 * ··· 1256 1257 fail: 1258 ubi_err("paranoid check failed for PEB %d", pnum); 1259 + ubi_msg("hex dump of the %d-%d region", offset, offset + len); 1260 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1, 1261 ubi->dbg_peb_buf, len, 1); 1262 err = 1;
+37 -13
drivers/mtd/ubi/kapi.c
··· 106 struct ubi_device *ubi; 107 struct ubi_volume *vol; 108 109 - dbg_msg("open device %d volume %d, mode %d", ubi_num, vol_id, mode); 110 111 if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES) 112 return ERR_PTR(-EINVAL); ··· 215 struct ubi_device *ubi; 216 struct ubi_volume_desc *ret; 217 218 - dbg_msg("open volume %s, mode %d", name, mode); 219 220 if (!name) 221 return ERR_PTR(-EINVAL); ··· 266 struct ubi_volume *vol = desc->vol; 267 struct ubi_device *ubi = vol->ubi; 268 269 - dbg_msg("close volume %d, mode %d", vol->vol_id, desc->mode); 270 271 spin_lock(&ubi->volumes_lock); 272 switch (desc->mode) { ··· 323 struct ubi_device *ubi = vol->ubi; 324 int err, vol_id = vol->vol_id; 325 326 - dbg_msg("read %d bytes from LEB %d:%d:%d", len, vol_id, lnum, offset); 327 328 if (vol_id < 0 || vol_id >= ubi->vtbl_slots || lnum < 0 || 329 lnum >= vol->used_ebs || offset < 0 || len < 0 || ··· 388 struct ubi_device *ubi = vol->ubi; 389 int vol_id = vol->vol_id; 390 391 - dbg_msg("write %d bytes to LEB %d:%d:%d", len, vol_id, lnum, offset); 392 393 if (vol_id < 0 || vol_id >= ubi->vtbl_slots) 394 return -EINVAL; ··· 397 return -EROFS; 398 399 if (lnum < 0 || lnum >= vol->reserved_pebs || offset < 0 || len < 0 || 400 - offset + len > vol->usable_leb_size || offset % ubi->min_io_size || 401 - len % ubi->min_io_size) 402 return -EINVAL; 403 404 if (dtype != UBI_LONGTERM && dtype != UBI_SHORTTERM && ··· 438 struct ubi_device *ubi = vol->ubi; 439 int vol_id = vol->vol_id; 440 441 - dbg_msg("atomically write %d bytes to LEB %d:%d", len, vol_id, lnum); 442 443 if (vol_id < 0 || vol_id >= ubi->vtbl_slots) 444 return -EINVAL; ··· 447 return -EROFS; 448 449 if (lnum < 0 || lnum >= vol->reserved_pebs || len < 0 || 450 - len > vol->usable_leb_size || len % ubi->min_io_size) 451 return -EINVAL; 452 453 if (dtype != UBI_LONGTERM && dtype != UBI_SHORTTERM && ··· 482 struct ubi_device *ubi = vol->ubi; 483 int err; 484 485 - dbg_msg("erase LEB %d:%d", vol->vol_id, lnum); 486 487 if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME) 488 return -EROFS; ··· 542 struct ubi_volume *vol = desc->vol; 543 struct ubi_device *ubi = vol->ubi; 544 545 - dbg_msg("unmap LEB %d:%d", vol->vol_id, lnum); 546 547 if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME) 548 return -EROFS; ··· 579 struct ubi_volume *vol = desc->vol; 580 struct ubi_device *ubi = vol->ubi; 581 582 - dbg_msg("unmap LEB %d:%d", vol->vol_id, lnum); 583 584 if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME) 585 return -EROFS; ··· 621 { 622 struct ubi_volume *vol = desc->vol; 623 624 - dbg_msg("test LEB %d:%d", vol->vol_id, lnum); 625 626 if (lnum < 0 || lnum >= vol->reserved_pebs) 627 return -EINVAL; ··· 632 return vol->eba_tbl[lnum] >= 0; 633 } 634 EXPORT_SYMBOL_GPL(ubi_is_mapped);
··· 106 struct ubi_device *ubi; 107 struct ubi_volume *vol; 108 109 + dbg_gen("open device %d volume %d, mode %d", ubi_num, vol_id, mode); 110 111 if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES) 112 return ERR_PTR(-EINVAL); ··· 215 struct ubi_device *ubi; 216 struct ubi_volume_desc *ret; 217 218 + dbg_gen("open volume %s, mode %d", name, mode); 219 220 if (!name) 221 return ERR_PTR(-EINVAL); ··· 266 struct ubi_volume *vol = desc->vol; 267 struct ubi_device *ubi = vol->ubi; 268 269 + dbg_gen("close volume %d, mode %d", vol->vol_id, desc->mode); 270 271 spin_lock(&ubi->volumes_lock); 272 switch (desc->mode) { ··· 323 struct ubi_device *ubi = vol->ubi; 324 int err, vol_id = vol->vol_id; 325 326 + dbg_gen("read %d bytes from LEB %d:%d:%d", len, vol_id, lnum, offset); 327 328 if (vol_id < 0 || vol_id >= ubi->vtbl_slots || lnum < 0 || 329 lnum >= vol->used_ebs || offset < 0 || len < 0 || ··· 388 struct ubi_device *ubi = vol->ubi; 389 int vol_id = vol->vol_id; 390 391 + dbg_gen("write %d bytes to LEB %d:%d:%d", len, vol_id, lnum, offset); 392 393 if (vol_id < 0 || vol_id >= ubi->vtbl_slots) 394 return -EINVAL; ··· 397 return -EROFS; 398 399 if (lnum < 0 || lnum >= vol->reserved_pebs || offset < 0 || len < 0 || 400 + offset + len > vol->usable_leb_size || 401 + offset & (ubi->min_io_size - 1) || len & (ubi->min_io_size - 1)) 402 return -EINVAL; 403 404 if (dtype != UBI_LONGTERM && dtype != UBI_SHORTTERM && ··· 438 struct ubi_device *ubi = vol->ubi; 439 int vol_id = vol->vol_id; 440 441 + dbg_gen("atomically write %d bytes to LEB %d:%d", len, vol_id, lnum); 442 443 if (vol_id < 0 || vol_id >= ubi->vtbl_slots) 444 return -EINVAL; ··· 447 return -EROFS; 448 449 if (lnum < 0 || lnum >= vol->reserved_pebs || len < 0 || 450 + len > vol->usable_leb_size || len & (ubi->min_io_size - 1)) 451 return -EINVAL; 452 453 if (dtype != UBI_LONGTERM && dtype != UBI_SHORTTERM && ··· 482 struct ubi_device *ubi = vol->ubi; 483 int err; 484 485 + dbg_gen("erase LEB %d:%d", vol->vol_id, lnum); 486 487 if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME) 488 return -EROFS; ··· 542 struct ubi_volume *vol = desc->vol; 543 struct ubi_device *ubi = vol->ubi; 544 545 + dbg_gen("unmap LEB %d:%d", vol->vol_id, lnum); 546 547 if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME) 548 return -EROFS; ··· 579 struct ubi_volume *vol = desc->vol; 580 struct ubi_device *ubi = vol->ubi; 581 582 + dbg_gen("unmap LEB %d:%d", vol->vol_id, lnum); 583 584 if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME) 585 return -EROFS; ··· 621 { 622 struct ubi_volume *vol = desc->vol; 623 624 + dbg_gen("test LEB %d:%d", vol->vol_id, lnum); 625 626 if (lnum < 0 || lnum >= vol->reserved_pebs) 627 return -EINVAL; ··· 632 return vol->eba_tbl[lnum] >= 0; 633 } 634 EXPORT_SYMBOL_GPL(ubi_is_mapped); 635 + 636 + /** 637 + * ubi_sync - synchronize UBI device buffers. 638 + * @ubi_num: UBI device to synchronize 639 + * 640 + * The underlying MTD device may cache data in hardware or in software. This 641 + * function ensures the caches are flushed. Returns zero in case of success and 642 + * a negative error code in case of failure. 643 + */ 644 + int ubi_sync(int ubi_num) 645 + { 646 + struct ubi_device *ubi; 647 + 648 + ubi = ubi_get_device(ubi_num); 649 + if (!ubi) 650 + return -ENODEV; 651 + 652 + if (ubi->mtd->sync) 653 + ubi->mtd->sync(ubi->mtd); 654 + 655 + ubi_put_device(ubi); 656 + return 0; 657 + } 658 + EXPORT_SYMBOL_GPL(ubi_sync);
+1 -1
drivers/mtd/ubi/misc.c
··· 37 { 38 int i; 39 40 - ubi_assert(length % ubi->min_io_size == 0); 41 42 for (i = length - 1; i >= 0; i--) 43 if (((const uint8_t *)buf)[i] != 0xFF)
··· 37 { 38 int i; 39 40 + ubi_assert(!(length & (ubi->min_io_size - 1))); 41 42 for (i = length - 1; i >= 0; i--) 43 if (((const uint8_t *)buf)[i] != 0xFF)
+47 -89
drivers/mtd/ubi/scan.c
··· 19 */ 20 21 /* 22 - * UBI scanning unit. 23 * 24 - * This unit is responsible for scanning the flash media, checking UBI 25 * headers and providing complete information about the UBI flash image. 26 * 27 * The scanning information is represented by a &struct ubi_scan_info' object. ··· 93 } 94 95 /** 96 - * validate_vid_hdr - check that volume identifier header is correct and 97 - * consistent. 98 * @vid_hdr: the volume identifier header to check 99 * @sv: information about the volume this logical eraseblock belongs to 100 * @pnum: physical eraseblock number the VID header came from ··· 102 * non-zero if an inconsistency was found and zero if not. 103 * 104 * Note, UBI does sanity check of everything it reads from the flash media. 105 - * Most of the checks are done in the I/O unit. Here we check that the 106 * information in the VID header is consistent to the information in other VID 107 * headers of the same volume. 108 */ ··· 246 struct ubi_vid_hdr *vh = NULL; 247 unsigned long long sqnum2 = be64_to_cpu(vid_hdr->sqnum); 248 249 - if (seb->sqnum == 0 && sqnum2 == 0) { 250 - long long abs, v1 = seb->leb_ver, v2 = be32_to_cpu(vid_hdr->leb_ver); 251 - 252 /* 253 - * UBI constantly increases the logical eraseblock version 254 - * number and it can overflow. Thus, we have to bear in mind 255 - * that versions that are close to %0xFFFFFFFF are less then 256 - * versions that are close to %0. 257 - * 258 - * The UBI WL unit guarantees that the number of pending tasks 259 - * is not greater then %0x7FFFFFFF. So, if the difference 260 - * between any two versions is greater or equivalent to 261 - * %0x7FFFFFFF, there was an overflow and the logical 262 - * eraseblock with lower version is actually newer then the one 263 - * with higher version. 264 - * 265 - * FIXME: but this is anyway obsolete and will be removed at 266 - * some point. 267 */ 268 - dbg_bld("using old crappy leb_ver stuff"); 269 270 - if (v1 == v2) { 271 - ubi_err("PEB %d and PEB %d have the same version %lld", 272 - seb->pnum, pnum, v1); 273 - return -EINVAL; 274 - } 275 - 276 - abs = v1 - v2; 277 - if (abs < 0) 278 - abs = -abs; 279 - 280 - if (abs < 0x7FFFFFFF) 281 - /* Non-overflow situation */ 282 - second_is_newer = (v2 > v1); 283 - else 284 - second_is_newer = (v2 < v1); 285 - } else 286 - /* Obviously the LEB with lower sequence counter is older */ 287 - second_is_newer = sqnum2 > seb->sqnum; 288 289 /* 290 * Now we know which copy is newer. If the copy flag of the PEB with ··· 268 * check data CRC. For the second PEB we already have the VID header, 269 * for the first one - we'll need to re-read it from flash. 270 * 271 - * FIXME: this may be optimized so that we wouldn't read twice. 272 */ 273 274 if (second_is_newer) { ··· 354 } 355 356 /** 357 - * ubi_scan_add_used - add information about a physical eraseblock to the 358 - * scanning information. 359 * @ubi: UBI device description object 360 * @si: scanning information 361 * @pnum: the physical eraseblock number ··· 374 int bitflips) 375 { 376 int err, vol_id, lnum; 377 - uint32_t leb_ver; 378 unsigned long long sqnum; 379 struct ubi_scan_volume *sv; 380 struct ubi_scan_leb *seb; ··· 382 vol_id = be32_to_cpu(vid_hdr->vol_id); 383 lnum = be32_to_cpu(vid_hdr->lnum); 384 sqnum = be64_to_cpu(vid_hdr->sqnum); 385 - leb_ver = be32_to_cpu(vid_hdr->leb_ver); 386 387 - dbg_bld("PEB %d, LEB %d:%d, EC %d, sqnum %llu, ver %u, bitflips %d", 388 - pnum, vol_id, lnum, ec, sqnum, leb_ver, bitflips); 389 390 sv = add_volume(si, vol_id, pnum, vid_hdr); 391 if (IS_ERR(sv) < 0) ··· 417 */ 418 419 dbg_bld("this LEB already exists: PEB %d, sqnum %llu, " 420 - "LEB ver %u, EC %d", seb->pnum, seb->sqnum, 421 - seb->leb_ver, seb->ec); 422 - 423 - /* 424 - * Make sure that the logical eraseblocks have different 425 - * versions. Otherwise the image is bad. 426 - */ 427 - if (seb->leb_ver == leb_ver && leb_ver != 0) { 428 - ubi_err("two LEBs with same version %u", leb_ver); 429 - ubi_dbg_dump_seb(seb, 0); 430 - ubi_dbg_dump_vid_hdr(vid_hdr); 431 - return -EINVAL; 432 - } 433 434 /* 435 * Make sure that the logical eraseblocks have different 436 * sequence numbers. Otherwise the image is bad. 437 * 438 - * FIXME: remove 'sqnum != 0' check when leb_ver is removed. 439 */ 440 if (seb->sqnum == sqnum && sqnum != 0) { 441 ubi_err("two LEBs with same sequence number %llu", ··· 470 seb->pnum = pnum; 471 seb->scrub = ((cmp_res & 2) || bitflips); 472 seb->sqnum = sqnum; 473 - seb->leb_ver = leb_ver; 474 475 if (sv->highest_lnum == lnum) 476 sv->last_data_size = ··· 506 seb->lnum = lnum; 507 seb->sqnum = sqnum; 508 seb->scrub = bitflips; 509 - seb->leb_ver = leb_ver; 510 511 if (sv->highest_lnum <= lnum) { 512 sv->highest_lnum = lnum; ··· 519 } 520 521 /** 522 - * ubi_scan_find_sv - find information about a particular volume in the 523 - * scanning information. 524 * @si: scanning information 525 * @vol_id: the requested volume ID 526 * ··· 548 } 549 550 /** 551 - * ubi_scan_find_seb - find information about a particular logical 552 - * eraseblock in the volume scanning information. 553 * @sv: a pointer to the volume scanning information 554 * @lnum: the requested logical eraseblock 555 * ··· 608 * 609 * This function erases physical eraseblock 'pnum', and writes the erase 610 * counter header to it. This function should only be used on UBI device 611 - * initialization stages, when the EBA unit had not been yet initialized. This 612 - * function returns zero in case of success and a negative error code in case 613 - * of failure. 614 */ 615 int ubi_scan_erase_peb(struct ubi_device *ubi, const struct ubi_scan_info *si, 616 int pnum, int ec) ··· 650 * @si: scanning information 651 * 652 * This function returns a free physical eraseblock. It is supposed to be 653 - * called on the UBI initialization stages when the wear-leveling unit is not 654 - * initialized yet. This function picks a physical eraseblocks from one of the 655 - * lists, writes the EC header if it is needed, and removes it from the list. 656 * 657 * This function returns scanning physical eraseblock information in case of 658 * success and an error code in case of failure. ··· 706 } 707 708 /** 709 - * process_eb - read UBI headers, check them and add corresponding data 710 - * to the scanning information. 711 * @ubi: UBI device description object 712 * @si: scanning information 713 * @pnum: the physical eraseblock number ··· 714 * This function returns a zero if the physical eraseblock was successfully 715 * handled and a negative error code in case of failure. 716 */ 717 - static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si, int pnum) 718 { 719 long long uninitialized_var(ec); 720 int err, bitflips = 0, vol_id, ec_corr = 0; ··· 728 return err; 729 else if (err) { 730 /* 731 - * FIXME: this is actually duty of the I/O unit to initialize 732 - * this, but MTD does not provide enough information. 733 */ 734 si->bad_peb_count += 1; 735 return 0; ··· 895 for (pnum = 0; pnum < ubi->peb_count; pnum++) { 896 cond_resched(); 897 898 - dbg_msg("process PEB %d", pnum); 899 err = process_eb(ubi, si, pnum); 900 if (err < 0) 901 goto out_vidh; ··· 1044 #ifdef CONFIG_MTD_UBI_DEBUG_PARANOID 1045 1046 /** 1047 - * paranoid_check_si - check if the scanning information is correct and 1048 - * consistent. 1049 * @ubi: UBI device description object 1050 * @si: scanning information 1051 * ··· 1229 ubi_err("bad data_pad %d", sv->data_pad); 1230 goto bad_vid_hdr; 1231 } 1232 - 1233 - if (seb->leb_ver != be32_to_cpu(vidh->leb_ver)) { 1234 - ubi_err("bad leb_ver %u", seb->leb_ver); 1235 - goto bad_vid_hdr; 1236 - } 1237 } 1238 1239 if (!last_seb) ··· 1258 if (err < 0) { 1259 kfree(buf); 1260 return err; 1261 - } 1262 - else if (err) 1263 buf[pnum] = 1; 1264 } 1265
··· 19 */ 20 21 /* 22 + * UBI scanning sub-system. 23 * 24 + * This sub-system is responsible for scanning the flash media, checking UBI 25 * headers and providing complete information about the UBI flash image. 26 * 27 * The scanning information is represented by a &struct ubi_scan_info' object. ··· 93 } 94 95 /** 96 + * validate_vid_hdr - check volume identifier header. 97 * @vid_hdr: the volume identifier header to check 98 * @sv: information about the volume this logical eraseblock belongs to 99 * @pnum: physical eraseblock number the VID header came from ··· 103 * non-zero if an inconsistency was found and zero if not. 104 * 105 * Note, UBI does sanity check of everything it reads from the flash media. 106 + * Most of the checks are done in the I/O sub-system. Here we check that the 107 * information in the VID header is consistent to the information in other VID 108 * headers of the same volume. 109 */ ··· 247 struct ubi_vid_hdr *vh = NULL; 248 unsigned long long sqnum2 = be64_to_cpu(vid_hdr->sqnum); 249 250 + if (sqnum2 == seb->sqnum) { 251 /* 252 + * This must be a really ancient UBI image which has been 253 + * created before sequence numbers support has been added. At 254 + * that times we used 32-bit LEB versions stored in logical 255 + * eraseblocks. That was before UBI got into mainline. We do not 256 + * support these images anymore. Well, those images will work 257 + * still work, but only if no unclean reboots happened. 258 */ 259 + ubi_err("unsupported on-flash UBI format\n"); 260 + return -EINVAL; 261 + } 262 263 + /* Obviously the LEB with lower sequence counter is older */ 264 + second_is_newer = !!(sqnum2 > seb->sqnum); 265 266 /* 267 * Now we know which copy is newer. If the copy flag of the PEB with ··· 293 * check data CRC. For the second PEB we already have the VID header, 294 * for the first one - we'll need to re-read it from flash. 295 * 296 + * Note: this may be optimized so that we wouldn't read twice. 297 */ 298 299 if (second_is_newer) { ··· 379 } 380 381 /** 382 + * ubi_scan_add_used - add physical eraseblock to the scanning information. 383 * @ubi: UBI device description object 384 * @si: scanning information 385 * @pnum: the physical eraseblock number ··· 400 int bitflips) 401 { 402 int err, vol_id, lnum; 403 unsigned long long sqnum; 404 struct ubi_scan_volume *sv; 405 struct ubi_scan_leb *seb; ··· 409 vol_id = be32_to_cpu(vid_hdr->vol_id); 410 lnum = be32_to_cpu(vid_hdr->lnum); 411 sqnum = be64_to_cpu(vid_hdr->sqnum); 412 413 + dbg_bld("PEB %d, LEB %d:%d, EC %d, sqnum %llu, bitflips %d", 414 + pnum, vol_id, lnum, ec, sqnum, bitflips); 415 416 sv = add_volume(si, vol_id, pnum, vid_hdr); 417 if (IS_ERR(sv) < 0) ··· 445 */ 446 447 dbg_bld("this LEB already exists: PEB %d, sqnum %llu, " 448 + "EC %d", seb->pnum, seb->sqnum, seb->ec); 449 450 /* 451 * Make sure that the logical eraseblocks have different 452 * sequence numbers. Otherwise the image is bad. 453 * 454 + * However, if the sequence number is zero, we assume it must 455 + * be an ancient UBI image from the era when UBI did not have 456 + * sequence numbers. We still can attach these images, unless 457 + * there is a need to distinguish between old and new 458 + * eraseblocks, in which case we'll refuse the image in 459 + * 'compare_lebs()'. In other words, we attach old clean 460 + * images, but refuse attaching old images with duplicated 461 + * logical eraseblocks because there was an unclean reboot. 462 */ 463 if (seb->sqnum == sqnum && sqnum != 0) { 464 ubi_err("two LEBs with same sequence number %llu", ··· 503 seb->pnum = pnum; 504 seb->scrub = ((cmp_res & 2) || bitflips); 505 seb->sqnum = sqnum; 506 507 if (sv->highest_lnum == lnum) 508 sv->last_data_size = ··· 540 seb->lnum = lnum; 541 seb->sqnum = sqnum; 542 seb->scrub = bitflips; 543 544 if (sv->highest_lnum <= lnum) { 545 sv->highest_lnum = lnum; ··· 554 } 555 556 /** 557 + * ubi_scan_find_sv - find volume in the scanning information. 558 * @si: scanning information 559 * @vol_id: the requested volume ID 560 * ··· 584 } 585 586 /** 587 + * ubi_scan_find_seb - find LEB in the volume scanning information. 588 * @sv: a pointer to the volume scanning information 589 * @lnum: the requested logical eraseblock 590 * ··· 645 * 646 * This function erases physical eraseblock 'pnum', and writes the erase 647 * counter header to it. This function should only be used on UBI device 648 + * initialization stages, when the EBA sub-system had not been yet initialized. 649 + * This function returns zero in case of success and a negative error code in 650 + * case of failure. 651 */ 652 int ubi_scan_erase_peb(struct ubi_device *ubi, const struct ubi_scan_info *si, 653 int pnum, int ec) ··· 687 * @si: scanning information 688 * 689 * This function returns a free physical eraseblock. It is supposed to be 690 + * called on the UBI initialization stages when the wear-leveling sub-system is 691 + * not initialized yet. This function picks a physical eraseblocks from one of 692 + * the lists, writes the EC header if it is needed, and removes it from the 693 + * list. 694 * 695 * This function returns scanning physical eraseblock information in case of 696 * success and an error code in case of failure. ··· 742 } 743 744 /** 745 + * process_eb - read, check UBI headers, and add them to scanning information. 746 * @ubi: UBI device description object 747 * @si: scanning information 748 * @pnum: the physical eraseblock number ··· 751 * This function returns a zero if the physical eraseblock was successfully 752 * handled and a negative error code in case of failure. 753 */ 754 + static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si, 755 + int pnum) 756 { 757 long long uninitialized_var(ec); 758 int err, bitflips = 0, vol_id, ec_corr = 0; ··· 764 return err; 765 else if (err) { 766 /* 767 + * FIXME: this is actually duty of the I/O sub-system to 768 + * initialize this, but MTD does not provide enough 769 + * information. 770 */ 771 si->bad_peb_count += 1; 772 return 0; ··· 930 for (pnum = 0; pnum < ubi->peb_count; pnum++) { 931 cond_resched(); 932 933 + dbg_gen("process PEB %d", pnum); 934 err = process_eb(ubi, si, pnum); 935 if (err < 0) 936 goto out_vidh; ··· 1079 #ifdef CONFIG_MTD_UBI_DEBUG_PARANOID 1080 1081 /** 1082 + * paranoid_check_si - check the scanning information. 1083 * @ubi: UBI device description object 1084 * @si: scanning information 1085 * ··· 1265 ubi_err("bad data_pad %d", sv->data_pad); 1266 goto bad_vid_hdr; 1267 } 1268 } 1269 1270 if (!last_seb) ··· 1299 if (err < 0) { 1300 kfree(buf); 1301 return err; 1302 + } else if (err) 1303 buf[pnum] = 1; 1304 } 1305
+9 -12
drivers/mtd/ubi/scan.h
··· 34 * @u: unions RB-tree or @list links 35 * @u.rb: link in the per-volume RB-tree of &struct ubi_scan_leb objects 36 * @u.list: link in one of the eraseblock lists 37 - * @leb_ver: logical eraseblock version (obsolete) 38 * 39 * One object of this type is allocated for each physical eraseblock during 40 * scanning. ··· 48 struct rb_node rb; 49 struct list_head list; 50 } u; 51 - uint32_t leb_ver; 52 }; 53 54 /** ··· 57 * @leb_count: number of logical eraseblocks in this volume 58 * @vol_type: volume type 59 * @used_ebs: number of used logical eraseblocks in this volume (only for 60 - * static volumes) 61 * @last_data_size: amount of data in the last logical eraseblock of this 62 - * volume (always equivalent to the usable logical eraseblock size in case of 63 - * dynamic volumes) 64 * @data_pad: how many bytes at the end of logical eraseblocks of this volume 65 - * are not used (due to volume alignment) 66 * @compat: compatibility flags of this volume 67 * @rb: link in the volume RB-tree 68 * @root: root of the RB-tree containing all the eraseblock belonging to this 69 - * volume (&struct ubi_scan_leb objects) 70 * 71 * One object of this type is allocated for each volume during scanning. 72 */ ··· 90 * @free: list of free physical eraseblocks 91 * @erase: list of physical eraseblocks which have to be erased 92 * @alien: list of physical eraseblocks which should not be used by UBI (e.g., 93 * @bad_peb_count: count of bad physical eraseblocks 94 - * those belonging to "preserve"-compatible internal volumes) 95 * @vols_found: number of volumes found during scanning 96 * @highest_vol_id: highest volume ID 97 * @alien_peb_count: count of physical eraseblocks in the @alien list ··· 104 * @ec_count: a temporary variable used when calculating @mean_ec 105 * 106 * This data structure contains the result of scanning and may be used by other 107 - * UBI units to build final UBI data structures, further error-recovery and so 108 - * on. 109 */ 110 struct ubi_scan_info { 111 struct rb_root volumes; ··· 130 struct ubi_vid_hdr; 131 132 /* 133 - * ubi_scan_move_to_list - move a physical eraseblock from the volume tree to a 134 - * list. 135 * 136 * @sv: volume scanning information 137 * @seb: scanning eraseblock infprmation
··· 34 * @u: unions RB-tree or @list links 35 * @u.rb: link in the per-volume RB-tree of &struct ubi_scan_leb objects 36 * @u.list: link in one of the eraseblock lists 37 * 38 * One object of this type is allocated for each physical eraseblock during 39 * scanning. ··· 49 struct rb_node rb; 50 struct list_head list; 51 } u; 52 }; 53 54 /** ··· 59 * @leb_count: number of logical eraseblocks in this volume 60 * @vol_type: volume type 61 * @used_ebs: number of used logical eraseblocks in this volume (only for 62 + * static volumes) 63 * @last_data_size: amount of data in the last logical eraseblock of this 64 + * volume (always equivalent to the usable logical eraseblock 65 + * size in case of dynamic volumes) 66 * @data_pad: how many bytes at the end of logical eraseblocks of this volume 67 + * are not used (due to volume alignment) 68 * @compat: compatibility flags of this volume 69 * @rb: link in the volume RB-tree 70 * @root: root of the RB-tree containing all the eraseblock belonging to this 71 + * volume (&struct ubi_scan_leb objects) 72 * 73 * One object of this type is allocated for each volume during scanning. 74 */ ··· 92 * @free: list of free physical eraseblocks 93 * @erase: list of physical eraseblocks which have to be erased 94 * @alien: list of physical eraseblocks which should not be used by UBI (e.g., 95 + * those belonging to "preserve"-compatible internal volumes) 96 * @bad_peb_count: count of bad physical eraseblocks 97 * @vols_found: number of volumes found during scanning 98 * @highest_vol_id: highest volume ID 99 * @alien_peb_count: count of physical eraseblocks in the @alien list ··· 106 * @ec_count: a temporary variable used when calculating @mean_ec 107 * 108 * This data structure contains the result of scanning and may be used by other 109 + * UBI sub-systems to build final UBI data structures, further error-recovery 110 + * and so on. 111 */ 112 struct ubi_scan_info { 113 struct rb_root volumes; ··· 132 struct ubi_vid_hdr; 133 134 /* 135 + * ubi_scan_move_to_list - move a PEB from the volume tree to a list. 136 * 137 * @sv: volume scanning information 138 * @seb: scanning eraseblock infprmation
+17 -21
drivers/mtd/ubi/ubi-media.h
··· 98 * Compatibility constants used by internal volumes. 99 * 100 * @UBI_COMPAT_DELETE: delete this internal volume before anything is written 101 - * to the flash 102 * @UBI_COMPAT_RO: attach this device in read-only mode 103 * @UBI_COMPAT_PRESERVE: preserve this internal volume - do not touch its 104 - * physical eraseblocks, don't allow the wear-leveling unit to move them 105 * @UBI_COMPAT_REJECT: reject this UBI image 106 */ 107 enum { ··· 124 * struct ubi_ec_hdr - UBI erase counter header. 125 * @magic: erase counter header magic number (%UBI_EC_HDR_MAGIC) 126 * @version: version of UBI implementation which is supposed to accept this 127 - * UBI image 128 * @padding1: reserved for future, zeroes 129 * @ec: the erase counter 130 * @vid_hdr_offset: where the VID header starts ··· 160 * struct ubi_vid_hdr - on-flash UBI volume identifier header. 161 * @magic: volume identifier header magic number (%UBI_VID_HDR_MAGIC) 162 * @version: UBI implementation version which is supposed to accept this UBI 163 - * image (%UBI_VERSION) 164 * @vol_type: volume type (%UBI_VID_DYNAMIC or %UBI_VID_STATIC) 165 * @copy_flag: if this logical eraseblock was copied from another physical 166 - * eraseblock (for wear-leveling reasons) 167 * @compat: compatibility of this volume (%0, %UBI_COMPAT_DELETE, 168 - * %UBI_COMPAT_IGNORE, %UBI_COMPAT_PRESERVE, or %UBI_COMPAT_REJECT) 169 * @vol_id: ID of this volume 170 * @lnum: logical eraseblock number 171 - * @leb_ver: version of this logical eraseblock (IMPORTANT: obsolete, to be 172 - * removed, kept only for not breaking older UBI users) 173 * @data_size: how many bytes of data this logical eraseblock contains 174 * @used_ebs: total number of used logical eraseblocks in this volume 175 * @data_pad: how many bytes at the end of this physical eraseblock are not 176 - * used 177 * @data_crc: CRC checksum of the data stored in this logical eraseblock 178 - * @padding1: reserved for future, zeroes 179 - * @sqnum: sequence number 180 * @padding2: reserved for future, zeroes 181 * @hdr_crc: volume identifier header CRC checksum 182 * 183 * The @sqnum is the value of the global sequence counter at the time when this ··· 224 * checksum is correct, this physical eraseblock is selected (P1). Otherwise 225 * the older one (P) is selected. 226 * 227 - * Note, there is an obsolete @leb_ver field which was used instead of @sqnum 228 - * in the past. But it is not used anymore and we keep it in order to be able 229 - * to deal with old UBI images. It will be removed at some point. 230 - * 231 * There are 2 sorts of volumes in UBI: user volumes and internal volumes. 232 * Internal volumes are not seen from outside and are used for various internal 233 * UBI purposes. In this implementation there is only one internal volume - the ··· 244 * The @data_crc field contains the CRC checksum of the contents of the logical 245 * eraseblock if this is a static volume. In case of dynamic volumes, it does 246 * not contain the CRC checksum as a rule. The only exception is when the 247 - * data of the physical eraseblock was moved by the wear-leveling unit, then 248 - * the wear-leveling unit calculates the data CRC and stores it in the 249 - * @data_crc field. And of course, the @copy_flag is %in this case. 250 * 251 * The @data_size field is used only for static volumes because UBI has to know 252 * how many bytes of data are stored in this eraseblock. For dynamic volumes, ··· 273 __u8 compat; 274 __be32 vol_id; 275 __be32 lnum; 276 - __be32 leb_ver; /* obsolete, to be removed, don't use */ 277 __be32 data_size; 278 __be32 used_ebs; 279 __be32 data_pad; 280 __be32 data_crc; 281 - __u8 padding1[4]; 282 __be64 sqnum; 283 - __u8 padding2[12]; 284 __be32 hdr_crc; 285 } __attribute__ ((packed)); 286
··· 98 * Compatibility constants used by internal volumes. 99 * 100 * @UBI_COMPAT_DELETE: delete this internal volume before anything is written 101 + * to the flash 102 * @UBI_COMPAT_RO: attach this device in read-only mode 103 * @UBI_COMPAT_PRESERVE: preserve this internal volume - do not touch its 104 + * physical eraseblocks, don't allow the wear-leveling 105 + * sub-system to move them 106 * @UBI_COMPAT_REJECT: reject this UBI image 107 */ 108 enum { ··· 123 * struct ubi_ec_hdr - UBI erase counter header. 124 * @magic: erase counter header magic number (%UBI_EC_HDR_MAGIC) 125 * @version: version of UBI implementation which is supposed to accept this 126 + * UBI image 127 * @padding1: reserved for future, zeroes 128 * @ec: the erase counter 129 * @vid_hdr_offset: where the VID header starts ··· 159 * struct ubi_vid_hdr - on-flash UBI volume identifier header. 160 * @magic: volume identifier header magic number (%UBI_VID_HDR_MAGIC) 161 * @version: UBI implementation version which is supposed to accept this UBI 162 + * image (%UBI_VERSION) 163 * @vol_type: volume type (%UBI_VID_DYNAMIC or %UBI_VID_STATIC) 164 * @copy_flag: if this logical eraseblock was copied from another physical 165 + * eraseblock (for wear-leveling reasons) 166 * @compat: compatibility of this volume (%0, %UBI_COMPAT_DELETE, 167 + * %UBI_COMPAT_IGNORE, %UBI_COMPAT_PRESERVE, or %UBI_COMPAT_REJECT) 168 * @vol_id: ID of this volume 169 * @lnum: logical eraseblock number 170 + * @padding1: reserved for future, zeroes 171 * @data_size: how many bytes of data this logical eraseblock contains 172 * @used_ebs: total number of used logical eraseblocks in this volume 173 * @data_pad: how many bytes at the end of this physical eraseblock are not 174 + * used 175 * @data_crc: CRC checksum of the data stored in this logical eraseblock 176 * @padding2: reserved for future, zeroes 177 + * @sqnum: sequence number 178 + * @padding3: reserved for future, zeroes 179 * @hdr_crc: volume identifier header CRC checksum 180 * 181 * The @sqnum is the value of the global sequence counter at the time when this ··· 224 * checksum is correct, this physical eraseblock is selected (P1). Otherwise 225 * the older one (P) is selected. 226 * 227 * There are 2 sorts of volumes in UBI: user volumes and internal volumes. 228 * Internal volumes are not seen from outside and are used for various internal 229 * UBI purposes. In this implementation there is only one internal volume - the ··· 248 * The @data_crc field contains the CRC checksum of the contents of the logical 249 * eraseblock if this is a static volume. In case of dynamic volumes, it does 250 * not contain the CRC checksum as a rule. The only exception is when the 251 + * data of the physical eraseblock was moved by the wear-leveling sub-system, 252 + * then the wear-leveling sub-system calculates the data CRC and stores it in 253 + * the @data_crc field. And of course, the @copy_flag is %in this case. 254 * 255 * The @data_size field is used only for static volumes because UBI has to know 256 * how many bytes of data are stored in this eraseblock. For dynamic volumes, ··· 277 __u8 compat; 278 __be32 vol_id; 279 __be32 lnum; 280 + __u8 padding1[4]; 281 __be32 data_size; 282 __be32 used_ebs; 283 __be32 data_pad; 284 __be32 data_crc; 285 + __u8 padding2[4]; 286 __be64 sqnum; 287 + __u8 padding3[12]; 288 __be32 hdr_crc; 289 } __attribute__ ((packed)); 290
+51 -24
drivers/mtd/ubi/ubi.h
··· 74 #define UBI_IO_RETRIES 3 75 76 /* 77 - * Error codes returned by the I/O unit. 78 * 79 * UBI_IO_PEB_EMPTY: the physical eraseblock is empty, i.e. it contains only 80 - * 0xFF bytes 81 * UBI_IO_PEB_FREE: the physical eraseblock is free, i.e. it contains only a 82 - * valid erase counter header, and the rest are %0xFF bytes 83 * UBI_IO_BAD_EC_HDR: the erase counter header is corrupted (bad magic or CRC) 84 * UBI_IO_BAD_VID_HDR: the volume identifier header is corrupted (bad magic or 85 - * CRC) 86 * UBI_IO_BITFLIPS: bit-flips were detected and corrected 87 */ 88 enum { ··· 99 * @ec: erase counter 100 * @pnum: physical eraseblock number 101 * 102 - * This data structure is used in the WL unit. Each physical eraseblock has a 103 - * corresponding &struct wl_entry object which may be kept in different 104 - * RB-trees. See WL unit for details. 105 */ 106 struct ubi_wl_entry { 107 struct rb_node rb; ··· 118 * @mutex: read/write mutex to implement read/write access serialization to 119 * the (@vol_id, @lnum) logical eraseblock 120 * 121 - * This data structure is used in the EBA unit to implement per-LEB locking. 122 - * When a logical eraseblock is being locked - corresponding 123 * &struct ubi_ltree_entry object is inserted to the lock tree (@ubi->ltree). 124 - * See EBA unit for details. 125 */ 126 struct ubi_ltree_entry { 127 struct rb_node rb; ··· 129 int lnum; 130 int users; 131 struct rw_semaphore mutex; 132 }; 133 134 struct ubi_volume_desc; ··· 227 int alignment; 228 int data_pad; 229 int name_len; 230 - char name[UBI_VOL_NAME_MAX+1]; 231 232 int upd_ebs; 233 int ch_lnum; ··· 246 #ifdef CONFIG_MTD_UBI_GLUEBI 247 /* 248 * Gluebi-related stuff may be compiled out. 249 - * TODO: this should not be built into UBI but should be a separate 250 * ubimtd driver which works on top of UBI and emulates MTD devices. 251 */ 252 struct ubi_volume_desc *gluebi_desc; ··· 256 }; 257 258 /** 259 - * struct ubi_volume_desc - descriptor of the UBI volume returned when it is 260 - * opened. 261 * @vol: reference to the corresponding volume description object 262 * @mode: open mode (%UBI_READONLY, %UBI_READWRITE, or %UBI_EXCLUSIVE) 263 */ ··· 293 * @vtbl_size: size of the volume table in bytes 294 * @vtbl: in-RAM volume table copy 295 * @volumes_mutex: protects on-flash volume table and serializes volume 296 - * changes, like creation, deletion, update, resize 297 * 298 * @max_ec: current highest erase counter value 299 * @mean_ec: current mean erase counter value ··· 313 * @move_to, @move_to_put @erase_pending, @wl_scheduled, and @works 314 * fields 315 * @move_mutex: serializes eraseblock moves 316 * @wl_scheduled: non-zero if the wear-leveling was scheduled 317 * @lookuptbl: a table to quickly find a &struct ubi_wl_entry object for any 318 * physical eraseblock ··· 337 * @ro_mode: if the UBI device is in read-only mode 338 * @leb_size: logical eraseblock size 339 * @leb_start: starting offset of logical eraseblocks within physical 340 - * eraseblocks 341 * @ec_hdr_alsize: size of the EC header aligned to @hdrs_min_io_size 342 * @vid_hdr_alsize: size of the VID header aligned to @hdrs_min_io_size 343 * @vid_hdr_offset: starting offset of the volume identifier header (might be 344 - * unaligned) 345 * @vid_hdr_aloffset: starting offset of the VID header aligned to 346 * @hdrs_min_io_size 347 * @vid_hdr_shift: contains @vid_hdr_offset - @vid_hdr_aloffset ··· 352 * @peb_buf1: a buffer of PEB size used for different purposes 353 * @peb_buf2: another buffer of PEB size used for different purposes 354 * @buf_mutex: proptects @peb_buf1 and @peb_buf2 355 * @dbg_peb_buf: buffer of PEB size used for debugging 356 * @dbg_buf_mutex: proptects @dbg_peb_buf 357 */ ··· 379 struct mutex volumes_mutex; 380 381 int max_ec; 382 - /* TODO: mean_ec is not updated run-time, fix */ 383 int mean_ec; 384 385 - /* EBA unit's stuff */ 386 unsigned long long global_sqnum; 387 spinlock_t ltree_lock; 388 struct rb_root ltree; 389 struct mutex alc_mutex; 390 391 - /* Wear-leveling unit's stuff */ 392 struct rb_root used; 393 struct rb_root free; 394 struct rb_root scrub; ··· 411 int thread_enabled; 412 char bgt_name[sizeof(UBI_BGT_NAME_PATTERN)+2]; 413 414 - /* I/O unit's stuff */ 415 long long flash_size; 416 int peb_count; 417 int peb_size; ··· 434 void *peb_buf2; 435 struct mutex buf_mutex; 436 struct mutex ckvol_mutex; 437 #ifdef CONFIG_MTD_UBI_DEBUG 438 void *dbg_peb_buf; 439 struct mutex dbg_buf_mutex; ··· 451 /* vtbl.c */ 452 int ubi_change_vtbl_record(struct ubi_device *ubi, int idx, 453 struct ubi_vtbl_record *vtbl_rec); 454 int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_scan_info *si); 455 456 /* vmt.c */ 457 int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req); 458 - int ubi_remove_volume(struct ubi_volume_desc *desc); 459 int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs); 460 int ubi_add_volume(struct ubi_device *ubi, struct ubi_volume *vol); 461 void ubi_free_volume(struct ubi_device *ubi, struct ubi_volume *vol); 462 ··· 474 const void __user *buf, int count); 475 476 /* misc.c */ 477 - int ubi_calc_data_len(const struct ubi_device *ubi, const void *buf, int length); 478 int ubi_check_volume(struct ubi_device *ubi, int vol_id); 479 void ubi_calculate_reserved(struct ubi_device *ubi); 480 ··· 505 int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, 506 struct ubi_vid_hdr *vid_hdr); 507 int ubi_eba_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si); 508 - void ubi_eba_close(const struct ubi_device *ubi); 509 510 /* wl.c */ 511 int ubi_wl_get_peb(struct ubi_device *ubi, int dtype);
··· 74 #define UBI_IO_RETRIES 3 75 76 /* 77 + * Error codes returned by the I/O sub-system. 78 * 79 * UBI_IO_PEB_EMPTY: the physical eraseblock is empty, i.e. it contains only 80 + * %0xFF bytes 81 * UBI_IO_PEB_FREE: the physical eraseblock is free, i.e. it contains only a 82 + * valid erase counter header, and the rest are %0xFF bytes 83 * UBI_IO_BAD_EC_HDR: the erase counter header is corrupted (bad magic or CRC) 84 * UBI_IO_BAD_VID_HDR: the volume identifier header is corrupted (bad magic or 85 + * CRC) 86 * UBI_IO_BITFLIPS: bit-flips were detected and corrected 87 */ 88 enum { ··· 99 * @ec: erase counter 100 * @pnum: physical eraseblock number 101 * 102 + * This data structure is used in the WL sub-system. Each physical eraseblock 103 + * has a corresponding &struct wl_entry object which may be kept in different 104 + * RB-trees. See WL sub-system for details. 105 */ 106 struct ubi_wl_entry { 107 struct rb_node rb; ··· 118 * @mutex: read/write mutex to implement read/write access serialization to 119 * the (@vol_id, @lnum) logical eraseblock 120 * 121 + * This data structure is used in the EBA sub-system to implement per-LEB 122 + * locking. When a logical eraseblock is being locked - corresponding 123 * &struct ubi_ltree_entry object is inserted to the lock tree (@ubi->ltree). 124 + * See EBA sub-system for details. 125 */ 126 struct ubi_ltree_entry { 127 struct rb_node rb; ··· 129 int lnum; 130 int users; 131 struct rw_semaphore mutex; 132 + }; 133 + 134 + /** 135 + * struct ubi_rename_entry - volume re-name description data structure. 136 + * @new_name_len: new volume name length 137 + * @new_name: new volume name 138 + * @remove: if not zero, this volume should be removed, not re-named 139 + * @desc: descriptor of the volume 140 + * @list: links re-name entries into a list 141 + * 142 + * This data structure is utilized in the multiple volume re-name code. Namely, 143 + * UBI first creates a list of &struct ubi_rename_entry objects from the 144 + * &struct ubi_rnvol_req request object, and then utilizes this list to do all 145 + * the job. 146 + */ 147 + struct ubi_rename_entry { 148 + int new_name_len; 149 + char new_name[UBI_VOL_NAME_MAX + 1]; 150 + int remove; 151 + struct ubi_volume_desc *desc; 152 + struct list_head list; 153 }; 154 155 struct ubi_volume_desc; ··· 206 int alignment; 207 int data_pad; 208 int name_len; 209 + char name[UBI_VOL_NAME_MAX + 1]; 210 211 int upd_ebs; 212 int ch_lnum; ··· 225 #ifdef CONFIG_MTD_UBI_GLUEBI 226 /* 227 * Gluebi-related stuff may be compiled out. 228 + * Note: this should not be built into UBI but should be a separate 229 * ubimtd driver which works on top of UBI and emulates MTD devices. 230 */ 231 struct ubi_volume_desc *gluebi_desc; ··· 235 }; 236 237 /** 238 + * struct ubi_volume_desc - UBI volume descriptor returned when it is opened. 239 * @vol: reference to the corresponding volume description object 240 * @mode: open mode (%UBI_READONLY, %UBI_READWRITE, or %UBI_EXCLUSIVE) 241 */ ··· 273 * @vtbl_size: size of the volume table in bytes 274 * @vtbl: in-RAM volume table copy 275 * @volumes_mutex: protects on-flash volume table and serializes volume 276 + * changes, like creation, deletion, update, re-size and re-name 277 * 278 * @max_ec: current highest erase counter value 279 * @mean_ec: current mean erase counter value ··· 293 * @move_to, @move_to_put @erase_pending, @wl_scheduled, and @works 294 * fields 295 * @move_mutex: serializes eraseblock moves 296 + * @work_sem: sycnhronizes the WL worker with use tasks 297 * @wl_scheduled: non-zero if the wear-leveling was scheduled 298 * @lookuptbl: a table to quickly find a &struct ubi_wl_entry object for any 299 * physical eraseblock ··· 316 * @ro_mode: if the UBI device is in read-only mode 317 * @leb_size: logical eraseblock size 318 * @leb_start: starting offset of logical eraseblocks within physical 319 + * eraseblocks 320 * @ec_hdr_alsize: size of the EC header aligned to @hdrs_min_io_size 321 * @vid_hdr_alsize: size of the VID header aligned to @hdrs_min_io_size 322 * @vid_hdr_offset: starting offset of the volume identifier header (might be 323 + * unaligned) 324 * @vid_hdr_aloffset: starting offset of the VID header aligned to 325 * @hdrs_min_io_size 326 * @vid_hdr_shift: contains @vid_hdr_offset - @vid_hdr_aloffset ··· 331 * @peb_buf1: a buffer of PEB size used for different purposes 332 * @peb_buf2: another buffer of PEB size used for different purposes 333 * @buf_mutex: proptects @peb_buf1 and @peb_buf2 334 + * @ckvol_mutex: serializes static volume checking when opening 335 + * @mult_mutex: serializes operations on multiple volumes, like re-nameing 336 * @dbg_peb_buf: buffer of PEB size used for debugging 337 * @dbg_buf_mutex: proptects @dbg_peb_buf 338 */ ··· 356 struct mutex volumes_mutex; 357 358 int max_ec; 359 + /* Note, mean_ec is not updated run-time - should be fixed */ 360 int mean_ec; 361 362 + /* EBA sub-system's stuff */ 363 unsigned long long global_sqnum; 364 spinlock_t ltree_lock; 365 struct rb_root ltree; 366 struct mutex alc_mutex; 367 368 + /* Wear-leveling sub-system's stuff */ 369 struct rb_root used; 370 struct rb_root free; 371 struct rb_root scrub; ··· 388 int thread_enabled; 389 char bgt_name[sizeof(UBI_BGT_NAME_PATTERN)+2]; 390 391 + /* I/O sub-system's stuff */ 392 long long flash_size; 393 int peb_count; 394 int peb_size; ··· 411 void *peb_buf2; 412 struct mutex buf_mutex; 413 struct mutex ckvol_mutex; 414 + struct mutex mult_mutex; 415 #ifdef CONFIG_MTD_UBI_DEBUG 416 void *dbg_peb_buf; 417 struct mutex dbg_buf_mutex; ··· 427 /* vtbl.c */ 428 int ubi_change_vtbl_record(struct ubi_device *ubi, int idx, 429 struct ubi_vtbl_record *vtbl_rec); 430 + int ubi_vtbl_rename_volumes(struct ubi_device *ubi, 431 + struct list_head *rename_list); 432 int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_scan_info *si); 433 434 /* vmt.c */ 435 int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req); 436 + int ubi_remove_volume(struct ubi_volume_desc *desc, int no_vtbl); 437 int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs); 438 + int ubi_rename_volumes(struct ubi_device *ubi, struct list_head *rename_list); 439 int ubi_add_volume(struct ubi_device *ubi, struct ubi_volume *vol); 440 void ubi_free_volume(struct ubi_device *ubi, struct ubi_volume *vol); 441 ··· 447 const void __user *buf, int count); 448 449 /* misc.c */ 450 + int ubi_calc_data_len(const struct ubi_device *ubi, const void *buf, 451 + int length); 452 int ubi_check_volume(struct ubi_device *ubi, int vol_id); 453 void ubi_calculate_reserved(struct ubi_device *ubi); 454 ··· 477 int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, 478 struct ubi_vid_hdr *vid_hdr); 479 int ubi_eba_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si); 480 481 /* wl.c */ 482 int ubi_wl_get_peb(struct ubi_device *ubi, int dtype);
+18 -14
drivers/mtd/ubi/upd.c
··· 39 */ 40 41 #include <linux/err.h> 42 - #include <asm/uaccess.h> 43 #include <asm/div64.h> 44 #include "ubi.h" 45 ··· 56 int err; 57 struct ubi_vtbl_record vtbl_rec; 58 59 - dbg_msg("set update marker for volume %d", vol->vol_id); 60 61 if (vol->upd_marker) { 62 ubi_assert(ubi->vtbl[vol->vol_id].upd_marker); 63 - dbg_msg("already set"); 64 return 0; 65 } 66 ··· 92 uint64_t tmp; 93 struct ubi_vtbl_record vtbl_rec; 94 95 - dbg_msg("clear update marker for volume %d", vol->vol_id); 96 97 memcpy(&vtbl_rec, &ubi->vtbl[vol->vol_id], 98 sizeof(struct ubi_vtbl_record)); ··· 133 int i, err; 134 uint64_t tmp; 135 136 - dbg_msg("start update of volume %d, %llu bytes", vol->vol_id, bytes); 137 ubi_assert(!vol->updating && !vol->changing_leb); 138 vol->updating = 1; 139 ··· 183 { 184 ubi_assert(!vol->updating && !vol->changing_leb); 185 186 - dbg_msg("start changing LEB %d:%d, %u bytes", 187 vol->vol_id, req->lnum, req->bytes); 188 if (req->bytes == 0) 189 return ubi_eba_atomic_leb_change(ubi, vol, req->lnum, NULL, 0, ··· 237 int err; 238 239 if (vol->vol_type == UBI_DYNAMIC_VOLUME) { 240 - len = ALIGN(len, ubi->min_io_size); 241 - memset(buf + len, 0xFF, len - len); 242 243 - len = ubi_calc_data_len(ubi, buf, len); 244 if (len == 0) { 245 - dbg_msg("all %d bytes contain 0xFF - skip", len); 246 return 0; 247 } 248 249 - err = ubi_eba_write_leb(ubi, vol, lnum, buf, 0, len, UBI_UNKNOWN); 250 } else { 251 /* 252 * When writing static volume, and this is the last logical ··· 268 269 /** 270 * ubi_more_update_data - write more update data. 271 * @vol: volume description object 272 * @buf: write data (user-space memory buffer) 273 * @count: how much bytes to write ··· 285 uint64_t tmp; 286 int lnum, offs, err = 0, len, to_write = count; 287 288 - dbg_msg("write %d of %lld bytes, %lld already passed", 289 count, vol->upd_bytes, vol->upd_received); 290 291 if (ubi->ro_mode) ··· 386 387 /** 388 * ubi_more_leb_change_data - accept more data for atomic LEB change. 389 * @vol: volume description object 390 * @buf: write data (user-space memory buffer) 391 * @count: how much bytes to write ··· 403 { 404 int err; 405 406 - dbg_msg("write %d of %lld bytes, %lld already passed", 407 count, vol->upd_bytes, vol->upd_received); 408 409 if (ubi->ro_mode) ··· 421 if (vol->upd_received == vol->upd_bytes) { 422 int len = ALIGN((int)vol->upd_bytes, ubi->min_io_size); 423 424 - memset(vol->upd_buf + vol->upd_bytes, 0xFF, len - vol->upd_bytes); 425 len = ubi_calc_data_len(ubi, vol->upd_buf, len); 426 err = ubi_eba_atomic_leb_change(ubi, vol, vol->ch_lnum, 427 vol->upd_buf, len, UBI_UNKNOWN);
··· 39 */ 40 41 #include <linux/err.h> 42 + #include <linux/uaccess.h> 43 #include <asm/div64.h> 44 #include "ubi.h" 45 ··· 56 int err; 57 struct ubi_vtbl_record vtbl_rec; 58 59 + dbg_gen("set update marker for volume %d", vol->vol_id); 60 61 if (vol->upd_marker) { 62 ubi_assert(ubi->vtbl[vol->vol_id].upd_marker); 63 + dbg_gen("already set"); 64 return 0; 65 } 66 ··· 92 uint64_t tmp; 93 struct ubi_vtbl_record vtbl_rec; 94 95 + dbg_gen("clear update marker for volume %d", vol->vol_id); 96 97 memcpy(&vtbl_rec, &ubi->vtbl[vol->vol_id], 98 sizeof(struct ubi_vtbl_record)); ··· 133 int i, err; 134 uint64_t tmp; 135 136 + dbg_gen("start update of volume %d, %llu bytes", vol->vol_id, bytes); 137 ubi_assert(!vol->updating && !vol->changing_leb); 138 vol->updating = 1; 139 ··· 183 { 184 ubi_assert(!vol->updating && !vol->changing_leb); 185 186 + dbg_gen("start changing LEB %d:%d, %u bytes", 187 vol->vol_id, req->lnum, req->bytes); 188 if (req->bytes == 0) 189 return ubi_eba_atomic_leb_change(ubi, vol, req->lnum, NULL, 0, ··· 237 int err; 238 239 if (vol->vol_type == UBI_DYNAMIC_VOLUME) { 240 + int l = ALIGN(len, ubi->min_io_size); 241 242 + memset(buf + len, 0xFF, l - len); 243 + len = ubi_calc_data_len(ubi, buf, l); 244 if (len == 0) { 245 + dbg_gen("all %d bytes contain 0xFF - skip", len); 246 return 0; 247 } 248 249 + err = ubi_eba_write_leb(ubi, vol, lnum, buf, 0, len, 250 + UBI_UNKNOWN); 251 } else { 252 /* 253 * When writing static volume, and this is the last logical ··· 267 268 /** 269 * ubi_more_update_data - write more update data. 270 + * @ubi: UBI device description object 271 * @vol: volume description object 272 * @buf: write data (user-space memory buffer) 273 * @count: how much bytes to write ··· 283 uint64_t tmp; 284 int lnum, offs, err = 0, len, to_write = count; 285 286 + dbg_gen("write %d of %lld bytes, %lld already passed", 287 count, vol->upd_bytes, vol->upd_received); 288 289 if (ubi->ro_mode) ··· 384 385 /** 386 * ubi_more_leb_change_data - accept more data for atomic LEB change. 387 + * @ubi: UBI device description object 388 * @vol: volume description object 389 * @buf: write data (user-space memory buffer) 390 * @count: how much bytes to write ··· 400 { 401 int err; 402 403 + dbg_gen("write %d of %lld bytes, %lld already passed", 404 count, vol->upd_bytes, vol->upd_received); 405 406 if (ubi->ro_mode) ··· 418 if (vol->upd_received == vol->upd_bytes) { 419 int len = ALIGN((int)vol->upd_bytes, ubi->min_io_size); 420 421 + memset(vol->upd_buf + vol->upd_bytes, 0xFF, 422 + len - vol->upd_bytes); 423 len = ubi_calc_data_len(ubi, vol->upd_buf, len); 424 err = ubi_eba_atomic_leb_change(ubi, vol, vol->ch_lnum, 425 vol->upd_buf, len, UBI_UNKNOWN);
+95 -53
drivers/mtd/ubi/vmt.c
··· 28 #include "ubi.h" 29 30 #ifdef CONFIG_MTD_UBI_DEBUG_PARANOID 31 - static void paranoid_check_volumes(struct ubi_device *ubi); 32 #else 33 - #define paranoid_check_volumes(ubi) 34 #endif 35 36 static ssize_t vol_attribute_show(struct device *dev, ··· 127 { 128 struct ubi_volume *vol = container_of(dev, struct ubi_volume, dev); 129 130 kfree(vol); 131 } 132 ··· 202 */ 203 int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req) 204 { 205 - int i, err, vol_id = req->vol_id, dont_free = 0; 206 struct ubi_volume *vol; 207 struct ubi_vtbl_record vtbl_rec; 208 uint64_t bytes; ··· 218 spin_lock(&ubi->volumes_lock); 219 if (vol_id == UBI_VOL_NUM_AUTO) { 220 /* Find unused volume ID */ 221 - dbg_msg("search for vacant volume ID"); 222 for (i = 0; i < ubi->vtbl_slots; i++) 223 if (!ubi->volumes[i]) { 224 vol_id = i; ··· 233 req->vol_id = vol_id; 234 } 235 236 - dbg_msg("volume ID %d, %llu bytes, type %d, name %s", 237 vol_id, (unsigned long long)req->bytes, 238 (int)req->vol_type, req->name); 239 ··· 253 goto out_unlock; 254 } 255 256 - /* Calculate how many eraseblocks are requested */ 257 vol->usable_leb_size = ubi->leb_size - ubi->leb_size % req->alignment; 258 bytes = req->bytes; 259 if (do_div(bytes, vol->usable_leb_size)) ··· 275 vol->data_pad = ubi->leb_size % vol->alignment; 276 vol->vol_type = req->vol_type; 277 vol->name_len = req->name_len; 278 - memcpy(vol->name, req->name, vol->name_len + 1); 279 vol->ubi = ubi; 280 281 /* ··· 350 vtbl_rec.vol_type = UBI_VID_DYNAMIC; 351 else 352 vtbl_rec.vol_type = UBI_VID_STATIC; 353 - memcpy(vtbl_rec.name, vol->name, vol->name_len + 1); 354 355 err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec); 356 if (err) ··· 361 ubi->vol_count += 1; 362 spin_unlock(&ubi->volumes_lock); 363 364 - paranoid_check_volumes(ubi); 365 - return 0; 366 367 out_sysfs: 368 /* 369 - * We have registered our device, we should not free the volume* 370 * description object in this function in case of an error - it is 371 * freed by the release function. 372 * 373 * Get device reference to prevent the release function from being 374 * called just after sysfs has been closed. 375 */ 376 - dont_free = 1; 377 get_device(&vol->dev); 378 volume_sysfs_close(vol); 379 out_gluebi: ··· 383 out_cdev: 384 cdev_del(&vol->cdev); 385 out_mapping: 386 - kfree(vol->eba_tbl); 387 out_acc: 388 spin_lock(&ubi->volumes_lock); 389 ubi->rsvd_pebs -= vol->reserved_pebs; 390 ubi->avail_pebs += vol->reserved_pebs; 391 out_unlock: 392 spin_unlock(&ubi->volumes_lock); 393 - if (dont_free) 394 - put_device(&vol->dev); 395 - else 396 kfree(vol); 397 ubi_err("cannot create volume %d, error %d", vol_id, err); 398 return err; 399 } ··· 402 /** 403 * ubi_remove_volume - remove volume. 404 * @desc: volume descriptor 405 * 406 * This function removes volume described by @desc. The volume has to be opened 407 * in "exclusive" mode. Returns zero in case of success and a negative error 408 * code in case of failure. The caller has to have the @ubi->volumes_mutex 409 * locked. 410 */ 411 - int ubi_remove_volume(struct ubi_volume_desc *desc) 412 { 413 struct ubi_volume *vol = desc->vol; 414 struct ubi_device *ubi = vol->ubi; 415 int i, err, vol_id = vol->vol_id, reserved_pebs = vol->reserved_pebs; 416 417 - dbg_msg("remove UBI volume %d", vol_id); 418 ubi_assert(desc->mode == UBI_EXCLUSIVE); 419 ubi_assert(vol == ubi->volumes[vol_id]); 420 ··· 438 if (err) 439 goto out_err; 440 441 - err = ubi_change_vtbl_record(ubi, vol_id, NULL); 442 - if (err) 443 - goto out_err; 444 445 for (i = 0; i < vol->reserved_pebs; i++) { 446 err = ubi_eba_unmap_leb(ubi, vol, i); ··· 450 goto out_err; 451 } 452 453 - kfree(vol->eba_tbl); 454 - vol->eba_tbl = NULL; 455 cdev_del(&vol->cdev); 456 volume_sysfs_close(vol); 457 ··· 468 ubi->vol_count -= 1; 469 spin_unlock(&ubi->volumes_lock); 470 471 - paranoid_check_volumes(ubi); 472 - return 0; 473 474 out_err: 475 ubi_err("cannot remove volume %d, error %d", vol_id, err); ··· 501 if (ubi->ro_mode) 502 return -EROFS; 503 504 - dbg_msg("re-size volume %d to from %d to %d PEBs", 505 vol_id, vol->reserved_pebs, reserved_pebs); 506 507 if (vol->vol_type == UBI_STATIC_VOLUME && ··· 590 (long long)vol->used_ebs * vol->usable_leb_size; 591 } 592 593 - paranoid_check_volumes(ubi); 594 - return 0; 595 596 out_acc: 597 if (pebs > 0) { ··· 602 } 603 out_free: 604 kfree(new_mapping); 605 return err; 606 } 607 ··· 657 int err, vol_id = vol->vol_id; 658 dev_t dev; 659 660 - dbg_msg("add volume %d", vol_id); 661 - ubi_dbg_dump_vol_info(vol); 662 663 /* Register character device for the volume */ 664 cdev_init(&vol->cdev, &ubi_vol_cdev_operations); ··· 691 return err; 692 } 693 694 - paranoid_check_volumes(ubi); 695 - return 0; 696 697 out_gluebi: 698 err = ubi_destroy_gluebi(vol); ··· 713 { 714 int err; 715 716 - dbg_msg("free volume %d", vol->vol_id); 717 718 ubi->volumes[vol->vol_id] = NULL; 719 err = ubi_destroy_gluebi(vol); ··· 727 * paranoid_check_volume - check volume information. 728 * @ubi: UBI device description object 729 * @vol_id: volume ID 730 */ 731 - static void paranoid_check_volume(struct ubi_device *ubi, int vol_id) 732 { 733 int idx = vol_id2idx(ubi, vol_id); 734 int reserved_pebs, alignment, data_pad, vol_type, name_len, upd_marker; ··· 748 goto fail; 749 } 750 spin_unlock(&ubi->volumes_lock); 751 - return; 752 - } 753 - 754 - if (vol->exclusive) { 755 - /* 756 - * The volume may be being created at the moment, do not check 757 - * it (e.g., it may be in the middle of ubi_create_volume(). 758 - */ 759 - spin_unlock(&ubi->volumes_lock); 760 - return; 761 } 762 763 if (vol->reserved_pebs < 0 || vol->alignment < 0 || vol->data_pad < 0 || ··· 761 goto fail; 762 } 763 764 - n = vol->alignment % ubi->min_io_size; 765 if (vol->alignment != 1 && n) { 766 ubi_err("alignment is not multiple of min I/O unit"); 767 goto fail; ··· 858 859 if (alignment != vol->alignment || data_pad != vol->data_pad || 860 upd_marker != vol->upd_marker || vol_type != vol->vol_type || 861 - name_len!= vol->name_len || strncmp(name, vol->name, name_len)) { 862 ubi_err("volume info is different"); 863 goto fail; 864 } 865 866 spin_unlock(&ubi->volumes_lock); 867 - return; 868 869 fail: 870 ubi_err("paranoid check failed for volume %d", vol_id); 871 - ubi_dbg_dump_vol_info(vol); 872 ubi_dbg_dump_vtbl_record(&ubi->vtbl[vol_id], vol_id); 873 spin_unlock(&ubi->volumes_lock); 874 - BUG(); 875 } 876 877 /** 878 * paranoid_check_volumes - check information about all volumes. 879 * @ubi: UBI device description object 880 */ 881 - static void paranoid_check_volumes(struct ubi_device *ubi) 882 { 883 - int i; 884 885 - for (i = 0; i < ubi->vtbl_slots; i++) 886 - paranoid_check_volume(ubi, i); 887 } 888 #endif
··· 28 #include "ubi.h" 29 30 #ifdef CONFIG_MTD_UBI_DEBUG_PARANOID 31 + static int paranoid_check_volumes(struct ubi_device *ubi); 32 #else 33 + #define paranoid_check_volumes(ubi) 0 34 #endif 35 36 static ssize_t vol_attribute_show(struct device *dev, ··· 127 { 128 struct ubi_volume *vol = container_of(dev, struct ubi_volume, dev); 129 130 + kfree(vol->eba_tbl); 131 kfree(vol); 132 } 133 ··· 201 */ 202 int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req) 203 { 204 + int i, err, vol_id = req->vol_id, do_free = 1; 205 struct ubi_volume *vol; 206 struct ubi_vtbl_record vtbl_rec; 207 uint64_t bytes; ··· 217 spin_lock(&ubi->volumes_lock); 218 if (vol_id == UBI_VOL_NUM_AUTO) { 219 /* Find unused volume ID */ 220 + dbg_gen("search for vacant volume ID"); 221 for (i = 0; i < ubi->vtbl_slots; i++) 222 if (!ubi->volumes[i]) { 223 vol_id = i; ··· 232 req->vol_id = vol_id; 233 } 234 235 + dbg_gen("volume ID %d, %llu bytes, type %d, name %s", 236 vol_id, (unsigned long long)req->bytes, 237 (int)req->vol_type, req->name); 238 ··· 252 goto out_unlock; 253 } 254 255 + /* Calculate how many eraseblocks are requested */ 256 vol->usable_leb_size = ubi->leb_size - ubi->leb_size % req->alignment; 257 bytes = req->bytes; 258 if (do_div(bytes, vol->usable_leb_size)) ··· 274 vol->data_pad = ubi->leb_size % vol->alignment; 275 vol->vol_type = req->vol_type; 276 vol->name_len = req->name_len; 277 + memcpy(vol->name, req->name, vol->name_len); 278 vol->ubi = ubi; 279 280 /* ··· 349 vtbl_rec.vol_type = UBI_VID_DYNAMIC; 350 else 351 vtbl_rec.vol_type = UBI_VID_STATIC; 352 + memcpy(vtbl_rec.name, vol->name, vol->name_len); 353 354 err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec); 355 if (err) ··· 360 ubi->vol_count += 1; 361 spin_unlock(&ubi->volumes_lock); 362 363 + err = paranoid_check_volumes(ubi); 364 + return err; 365 366 out_sysfs: 367 /* 368 + * We have registered our device, we should not free the volume 369 * description object in this function in case of an error - it is 370 * freed by the release function. 371 * 372 * Get device reference to prevent the release function from being 373 * called just after sysfs has been closed. 374 */ 375 + do_free = 0; 376 get_device(&vol->dev); 377 volume_sysfs_close(vol); 378 out_gluebi: ··· 382 out_cdev: 383 cdev_del(&vol->cdev); 384 out_mapping: 385 + if (do_free) 386 + kfree(vol->eba_tbl); 387 out_acc: 388 spin_lock(&ubi->volumes_lock); 389 ubi->rsvd_pebs -= vol->reserved_pebs; 390 ubi->avail_pebs += vol->reserved_pebs; 391 out_unlock: 392 spin_unlock(&ubi->volumes_lock); 393 + if (do_free) 394 kfree(vol); 395 + else 396 + put_device(&vol->dev); 397 ubi_err("cannot create volume %d, error %d", vol_id, err); 398 return err; 399 } ··· 400 /** 401 * ubi_remove_volume - remove volume. 402 * @desc: volume descriptor 403 + * @no_vtbl: do not change volume table if not zero 404 * 405 * This function removes volume described by @desc. The volume has to be opened 406 * in "exclusive" mode. Returns zero in case of success and a negative error 407 * code in case of failure. The caller has to have the @ubi->volumes_mutex 408 * locked. 409 */ 410 + int ubi_remove_volume(struct ubi_volume_desc *desc, int no_vtbl) 411 { 412 struct ubi_volume *vol = desc->vol; 413 struct ubi_device *ubi = vol->ubi; 414 int i, err, vol_id = vol->vol_id, reserved_pebs = vol->reserved_pebs; 415 416 + dbg_gen("remove UBI volume %d", vol_id); 417 ubi_assert(desc->mode == UBI_EXCLUSIVE); 418 ubi_assert(vol == ubi->volumes[vol_id]); 419 ··· 435 if (err) 436 goto out_err; 437 438 + if (!no_vtbl) { 439 + err = ubi_change_vtbl_record(ubi, vol_id, NULL); 440 + if (err) 441 + goto out_err; 442 + } 443 444 for (i = 0; i < vol->reserved_pebs; i++) { 445 err = ubi_eba_unmap_leb(ubi, vol, i); ··· 445 goto out_err; 446 } 447 448 cdev_del(&vol->cdev); 449 volume_sysfs_close(vol); 450 ··· 465 ubi->vol_count -= 1; 466 spin_unlock(&ubi->volumes_lock); 467 468 + if (!no_vtbl) 469 + err = paranoid_check_volumes(ubi); 470 + return err; 471 472 out_err: 473 ubi_err("cannot remove volume %d, error %d", vol_id, err); ··· 497 if (ubi->ro_mode) 498 return -EROFS; 499 500 + dbg_gen("re-size volume %d to from %d to %d PEBs", 501 vol_id, vol->reserved_pebs, reserved_pebs); 502 503 if (vol->vol_type == UBI_STATIC_VOLUME && ··· 586 (long long)vol->used_ebs * vol->usable_leb_size; 587 } 588 589 + err = paranoid_check_volumes(ubi); 590 + return err; 591 592 out_acc: 593 if (pebs > 0) { ··· 598 } 599 out_free: 600 kfree(new_mapping); 601 + return err; 602 + } 603 + 604 + /** 605 + * ubi_rename_volumes - re-name UBI volumes. 606 + * @ubi: UBI device description object 607 + * @rename_list: list of &struct ubi_rename_entry objects 608 + * 609 + * This function re-names or removes volumes specified in the re-name list. 610 + * Returns zero in case of success and a negative error code in case of 611 + * failure. 612 + */ 613 + int ubi_rename_volumes(struct ubi_device *ubi, struct list_head *rename_list) 614 + { 615 + int err; 616 + struct ubi_rename_entry *re; 617 + 618 + err = ubi_vtbl_rename_volumes(ubi, rename_list); 619 + if (err) 620 + return err; 621 + 622 + list_for_each_entry(re, rename_list, list) { 623 + if (re->remove) { 624 + err = ubi_remove_volume(re->desc, 1); 625 + if (err) 626 + break; 627 + } else { 628 + struct ubi_volume *vol = re->desc->vol; 629 + 630 + spin_lock(&ubi->volumes_lock); 631 + vol->name_len = re->new_name_len; 632 + memcpy(vol->name, re->new_name, re->new_name_len + 1); 633 + spin_unlock(&ubi->volumes_lock); 634 + } 635 + } 636 + 637 + if (!err) 638 + err = paranoid_check_volumes(ubi); 639 return err; 640 } 641 ··· 615 int err, vol_id = vol->vol_id; 616 dev_t dev; 617 618 + dbg_gen("add volume %d", vol_id); 619 620 /* Register character device for the volume */ 621 cdev_init(&vol->cdev, &ubi_vol_cdev_operations); ··· 650 return err; 651 } 652 653 + err = paranoid_check_volumes(ubi); 654 + return err; 655 656 out_gluebi: 657 err = ubi_destroy_gluebi(vol); ··· 672 { 673 int err; 674 675 + dbg_gen("free volume %d", vol->vol_id); 676 677 ubi->volumes[vol->vol_id] = NULL; 678 err = ubi_destroy_gluebi(vol); ··· 686 * paranoid_check_volume - check volume information. 687 * @ubi: UBI device description object 688 * @vol_id: volume ID 689 + * 690 + * Returns zero if volume is all right and a a negative error code if not. 691 */ 692 + static int paranoid_check_volume(struct ubi_device *ubi, int vol_id) 693 { 694 int idx = vol_id2idx(ubi, vol_id); 695 int reserved_pebs, alignment, data_pad, vol_type, name_len, upd_marker; ··· 705 goto fail; 706 } 707 spin_unlock(&ubi->volumes_lock); 708 + return 0; 709 } 710 711 if (vol->reserved_pebs < 0 || vol->alignment < 0 || vol->data_pad < 0 || ··· 727 goto fail; 728 } 729 730 + n = vol->alignment & (ubi->min_io_size - 1); 731 if (vol->alignment != 1 && n) { 732 ubi_err("alignment is not multiple of min I/O unit"); 733 goto fail; ··· 824 825 if (alignment != vol->alignment || data_pad != vol->data_pad || 826 upd_marker != vol->upd_marker || vol_type != vol->vol_type || 827 + name_len != vol->name_len || strncmp(name, vol->name, name_len)) { 828 ubi_err("volume info is different"); 829 goto fail; 830 } 831 832 spin_unlock(&ubi->volumes_lock); 833 + return 0; 834 835 fail: 836 ubi_err("paranoid check failed for volume %d", vol_id); 837 + if (vol) 838 + ubi_dbg_dump_vol_info(vol); 839 ubi_dbg_dump_vtbl_record(&ubi->vtbl[vol_id], vol_id); 840 spin_unlock(&ubi->volumes_lock); 841 + return -EINVAL; 842 } 843 844 /** 845 * paranoid_check_volumes - check information about all volumes. 846 * @ubi: UBI device description object 847 + * 848 + * Returns zero if volumes are all right and a a negative error code if not. 849 */ 850 + static int paranoid_check_volumes(struct ubi_device *ubi) 851 { 852 + int i, err = 0; 853 854 + for (i = 0; i < ubi->vtbl_slots; i++) { 855 + err = paranoid_check_volume(ubi, i); 856 + if (err) 857 + break; 858 + } 859 + 860 + return err; 861 } 862 #endif
+93 -34
drivers/mtd/ubi/vtbl.c
··· 115 } 116 117 /** 118 - * vtbl_check - check if volume table is not corrupted and contains sensible 119 - * data. 120 * @ubi: UBI device description object 121 * @vtbl: volume table 122 * ··· 177 const struct ubi_vtbl_record *vtbl) 178 { 179 int i, n, reserved_pebs, alignment, data_pad, vol_type, name_len; 180 - int upd_marker; 181 uint32_t crc; 182 const char *name; 183 ··· 203 if (reserved_pebs == 0) { 204 if (memcmp(&vtbl[i], &empty_vtbl_record, 205 UBI_VTBL_RECORD_SIZE)) { 206 - dbg_err("bad empty record"); 207 goto bad; 208 } 209 continue; ··· 211 212 if (reserved_pebs < 0 || alignment < 0 || data_pad < 0 || 213 name_len < 0) { 214 - dbg_err("negative values"); 215 goto bad; 216 } 217 218 if (alignment > ubi->leb_size || alignment == 0) { 219 - dbg_err("bad alignment"); 220 goto bad; 221 } 222 223 - n = alignment % ubi->min_io_size; 224 if (alignment != 1 && n) { 225 - dbg_err("alignment is not multiple of min I/O unit"); 226 goto bad; 227 } 228 229 n = ubi->leb_size % alignment; 230 if (data_pad != n) { 231 dbg_err("bad data_pad, has to be %d", n); 232 goto bad; 233 } 234 235 if (vol_type != UBI_VID_DYNAMIC && vol_type != UBI_VID_STATIC) { 236 - dbg_err("bad vol_type"); 237 goto bad; 238 } 239 240 if (upd_marker != 0 && upd_marker != 1) { 241 - dbg_err("bad upd_marker"); 242 goto bad; 243 } 244 245 if (reserved_pebs > ubi->good_peb_count) { 246 dbg_err("too large reserved_pebs, good PEBs %d", 247 ubi->good_peb_count); 248 goto bad; 249 } 250 251 if (name_len > UBI_VOL_NAME_MAX) { 252 - dbg_err("too long volume name, max %d", 253 - UBI_VOL_NAME_MAX); 254 goto bad; 255 } 256 257 if (name[0] == '\0') { 258 - dbg_err("NULL volume name"); 259 goto bad; 260 } 261 262 if (name_len != strnlen(name, name_len + 1)) { 263 - dbg_err("bad name_len"); 264 goto bad; 265 } 266 } ··· 286 return 0; 287 288 bad: 289 - ubi_err("volume table check failed, record %d", i); 290 ubi_dbg_dump_vtbl_record(&vtbl[i], i); 291 return -EINVAL; 292 } ··· 338 vid_hdr->data_pad = cpu_to_be32(0); 339 vid_hdr->lnum = cpu_to_be32(copy); 340 vid_hdr->sqnum = cpu_to_be64(++si->max_sqnum); 341 - vid_hdr->leb_ver = cpu_to_be32(old_seb ? old_seb->leb_ver + 1: 0); 342 343 /* The EC header is already there, write the VID header */ 344 err = ubi_io_write_vid_hdr(ubi, new_seb->pnum, vid_hdr); ··· 420 * to LEB 0. 421 */ 422 423 - dbg_msg("check layout volume"); 424 425 /* Read both LEB 0 and LEB 1 into memory */ 426 ubi_rb_for_each_entry(rb, seb, &sv->root, u.rb) { ··· 434 err = ubi_io_read_data(ubi, leb[seb->lnum], seb->pnum, 0, 435 ubi->vtbl_size); 436 if (err == UBI_IO_BITFLIPS || err == -EBADMSG) 437 - /* Scrub the PEB later */ 438 seb->scrub = 1; 439 else if (err) 440 goto out_free; ··· 459 if (!leb_corrupted[0]) { 460 /* LEB 0 is OK */ 461 if (leb[1]) 462 - leb_corrupted[1] = memcmp(leb[0], leb[1], ubi->vtbl_size); 463 if (leb_corrupted[1]) { 464 ubi_warn("volume table copy #2 is corrupted"); 465 err = create_vtbl(ubi, si, 1, leb[0]); ··· 680 static int check_sv(const struct ubi_volume *vol, 681 const struct ubi_scan_volume *sv) 682 { 683 if (sv->highest_lnum >= vol->reserved_pebs) { 684 - dbg_err("bad highest_lnum"); 685 goto bad; 686 } 687 if (sv->leb_count > vol->reserved_pebs) { 688 - dbg_err("bad leb_count"); 689 goto bad; 690 } 691 if (sv->vol_type != vol->vol_type) { 692 - dbg_err("bad vol_type"); 693 goto bad; 694 } 695 if (sv->used_ebs > vol->reserved_pebs) { 696 - dbg_err("bad used_ebs"); 697 goto bad; 698 } 699 if (sv->data_pad != vol->data_pad) { 700 - dbg_err("bad data_pad"); 701 goto bad; 702 } 703 return 0; 704 705 bad: 706 - ubi_err("bad scanning information"); 707 ubi_dbg_dump_sv(sv); 708 ubi_dbg_dump_vol_info(vol); 709 return -EINVAL; ··· 734 return -EINVAL; 735 } 736 737 - if (si->highest_vol_id >= ubi->vtbl_slots + UBI_INT_VOL_COUNT&& 738 si->highest_vol_id < UBI_INTERNAL_VOL_START) { 739 ubi_err("too large volume ID %d found by scanning", 740 si->highest_vol_id); 741 return -EINVAL; 742 } 743 - 744 745 for (i = 0; i < ubi->vtbl_slots + UBI_INT_VOL_COUNT; i++) { 746 cond_resched(); ··· 778 } 779 780 /** 781 - * ubi_read_volume_table - read volume table. 782 - * information. 783 * @ubi: UBI device description object 784 * @si: scanning information 785 * ··· 857 858 out_free: 859 vfree(ubi->vtbl); 860 - for (i = 0; i < ubi->vtbl_slots + UBI_INT_VOL_COUNT; i++) 861 - if (ubi->volumes[i]) { 862 - kfree(ubi->volumes[i]); 863 - ubi->volumes[i] = NULL; 864 - } 865 return err; 866 } 867
··· 115 } 116 117 /** 118 + * ubi_vtbl_rename_volumes - rename UBI volumes in the volume table. 119 + * @ubi: UBI device description object 120 + * @rename_list: list of &struct ubi_rename_entry objects 121 + * 122 + * This function re-names multiple volumes specified in @req in the volume 123 + * table. Returns zero in case of success and a negative error code in case of 124 + * failure. 125 + */ 126 + int ubi_vtbl_rename_volumes(struct ubi_device *ubi, 127 + struct list_head *rename_list) 128 + { 129 + int i, err; 130 + struct ubi_rename_entry *re; 131 + struct ubi_volume *layout_vol; 132 + 133 + list_for_each_entry(re, rename_list, list) { 134 + uint32_t crc; 135 + struct ubi_volume *vol = re->desc->vol; 136 + struct ubi_vtbl_record *vtbl_rec = &ubi->vtbl[vol->vol_id]; 137 + 138 + if (re->remove) { 139 + memcpy(vtbl_rec, &empty_vtbl_record, 140 + sizeof(struct ubi_vtbl_record)); 141 + continue; 142 + } 143 + 144 + vtbl_rec->name_len = cpu_to_be16(re->new_name_len); 145 + memcpy(vtbl_rec->name, re->new_name, re->new_name_len); 146 + memset(vtbl_rec->name + re->new_name_len, 0, 147 + UBI_VOL_NAME_MAX + 1 - re->new_name_len); 148 + crc = crc32(UBI_CRC32_INIT, vtbl_rec, 149 + UBI_VTBL_RECORD_SIZE_CRC); 150 + vtbl_rec->crc = cpu_to_be32(crc); 151 + } 152 + 153 + layout_vol = ubi->volumes[vol_id2idx(ubi, UBI_LAYOUT_VOLUME_ID)]; 154 + for (i = 0; i < UBI_LAYOUT_VOLUME_EBS; i++) { 155 + err = ubi_eba_unmap_leb(ubi, layout_vol, i); 156 + if (err) 157 + return err; 158 + 159 + err = ubi_eba_write_leb(ubi, layout_vol, i, ubi->vtbl, 0, 160 + ubi->vtbl_size, UBI_LONGTERM); 161 + if (err) 162 + return err; 163 + } 164 + 165 + return 0; 166 + } 167 + 168 + /** 169 + * vtbl_check - check if volume table is not corrupted and sensible. 170 * @ubi: UBI device description object 171 * @vtbl: volume table 172 * ··· 127 const struct ubi_vtbl_record *vtbl) 128 { 129 int i, n, reserved_pebs, alignment, data_pad, vol_type, name_len; 130 + int upd_marker, err; 131 uint32_t crc; 132 const char *name; 133 ··· 153 if (reserved_pebs == 0) { 154 if (memcmp(&vtbl[i], &empty_vtbl_record, 155 UBI_VTBL_RECORD_SIZE)) { 156 + err = 2; 157 goto bad; 158 } 159 continue; ··· 161 162 if (reserved_pebs < 0 || alignment < 0 || data_pad < 0 || 163 name_len < 0) { 164 + err = 3; 165 goto bad; 166 } 167 168 if (alignment > ubi->leb_size || alignment == 0) { 169 + err = 4; 170 goto bad; 171 } 172 173 + n = alignment & (ubi->min_io_size - 1); 174 if (alignment != 1 && n) { 175 + err = 5; 176 goto bad; 177 } 178 179 n = ubi->leb_size % alignment; 180 if (data_pad != n) { 181 dbg_err("bad data_pad, has to be %d", n); 182 + err = 6; 183 goto bad; 184 } 185 186 if (vol_type != UBI_VID_DYNAMIC && vol_type != UBI_VID_STATIC) { 187 + err = 7; 188 goto bad; 189 } 190 191 if (upd_marker != 0 && upd_marker != 1) { 192 + err = 8; 193 goto bad; 194 } 195 196 if (reserved_pebs > ubi->good_peb_count) { 197 dbg_err("too large reserved_pebs, good PEBs %d", 198 ubi->good_peb_count); 199 + err = 9; 200 goto bad; 201 } 202 203 if (name_len > UBI_VOL_NAME_MAX) { 204 + err = 10; 205 goto bad; 206 } 207 208 if (name[0] == '\0') { 209 + err = 11; 210 goto bad; 211 } 212 213 if (name_len != strnlen(name, name_len + 1)) { 214 + err = 12; 215 goto bad; 216 } 217 } ··· 235 return 0; 236 237 bad: 238 + ubi_err("volume table check failed: record %d, error %d", i, err); 239 ubi_dbg_dump_vtbl_record(&vtbl[i], i); 240 return -EINVAL; 241 } ··· 287 vid_hdr->data_pad = cpu_to_be32(0); 288 vid_hdr->lnum = cpu_to_be32(copy); 289 vid_hdr->sqnum = cpu_to_be64(++si->max_sqnum); 290 291 /* The EC header is already there, write the VID header */ 292 err = ubi_io_write_vid_hdr(ubi, new_seb->pnum, vid_hdr); ··· 370 * to LEB 0. 371 */ 372 373 + dbg_gen("check layout volume"); 374 375 /* Read both LEB 0 and LEB 1 into memory */ 376 ubi_rb_for_each_entry(rb, seb, &sv->root, u.rb) { ··· 384 err = ubi_io_read_data(ubi, leb[seb->lnum], seb->pnum, 0, 385 ubi->vtbl_size); 386 if (err == UBI_IO_BITFLIPS || err == -EBADMSG) 387 + /* 388 + * Scrub the PEB later. Note, -EBADMSG indicates an 389 + * uncorrectable ECC error, but we have our own CRC and 390 + * the data will be checked later. If the data is OK, 391 + * the PEB will be scrubbed (because we set 392 + * seb->scrub). If the data is not OK, the contents of 393 + * the PEB will be recovered from the second copy, and 394 + * seb->scrub will be cleared in 395 + * 'ubi_scan_add_used()'. 396 + */ 397 seb->scrub = 1; 398 else if (err) 399 goto out_free; ··· 400 if (!leb_corrupted[0]) { 401 /* LEB 0 is OK */ 402 if (leb[1]) 403 + leb_corrupted[1] = memcmp(leb[0], leb[1], 404 + ubi->vtbl_size); 405 if (leb_corrupted[1]) { 406 ubi_warn("volume table copy #2 is corrupted"); 407 err = create_vtbl(ubi, si, 1, leb[0]); ··· 620 static int check_sv(const struct ubi_volume *vol, 621 const struct ubi_scan_volume *sv) 622 { 623 + int err; 624 + 625 if (sv->highest_lnum >= vol->reserved_pebs) { 626 + err = 1; 627 goto bad; 628 } 629 if (sv->leb_count > vol->reserved_pebs) { 630 + err = 2; 631 goto bad; 632 } 633 if (sv->vol_type != vol->vol_type) { 634 + err = 3; 635 goto bad; 636 } 637 if (sv->used_ebs > vol->reserved_pebs) { 638 + err = 4; 639 goto bad; 640 } 641 if (sv->data_pad != vol->data_pad) { 642 + err = 5; 643 goto bad; 644 } 645 return 0; 646 647 bad: 648 + ubi_err("bad scanning information, error %d", err); 649 ubi_dbg_dump_sv(sv); 650 ubi_dbg_dump_vol_info(vol); 651 return -EINVAL; ··· 672 return -EINVAL; 673 } 674 675 + if (si->highest_vol_id >= ubi->vtbl_slots + UBI_INT_VOL_COUNT && 676 si->highest_vol_id < UBI_INTERNAL_VOL_START) { 677 ubi_err("too large volume ID %d found by scanning", 678 si->highest_vol_id); 679 return -EINVAL; 680 } 681 682 for (i = 0; i < ubi->vtbl_slots + UBI_INT_VOL_COUNT; i++) { 683 cond_resched(); ··· 717 } 718 719 /** 720 + * ubi_read_volume_table - read the volume table. 721 * @ubi: UBI device description object 722 * @si: scanning information 723 * ··· 797 798 out_free: 799 vfree(ubi->vtbl); 800 + for (i = 0; i < ubi->vtbl_slots + UBI_INT_VOL_COUNT; i++) { 801 + kfree(ubi->volumes[i]); 802 + ubi->volumes[i] = NULL; 803 + } 804 return err; 805 } 806
+102 -106
drivers/mtd/ubi/wl.c
··· 19 */ 20 21 /* 22 - * UBI wear-leveling unit. 23 * 24 - * This unit is responsible for wear-leveling. It works in terms of physical 25 - * eraseblocks and erase counters and knows nothing about logical eraseblocks, 26 - * volumes, etc. From this unit's perspective all physical eraseblocks are of 27 - * two types - used and free. Used physical eraseblocks are those that were 28 - * "get" by the 'ubi_wl_get_peb()' function, and free physical eraseblocks are 29 - * those that were put by the 'ubi_wl_put_peb()' function. 30 * 31 * Physical eraseblocks returned by 'ubi_wl_get_peb()' have only erase counter 32 - * header. The rest of the physical eraseblock contains only 0xFF bytes. 33 * 34 - * When physical eraseblocks are returned to the WL unit by means of the 35 * 'ubi_wl_put_peb()' function, they are scheduled for erasure. The erasure is 36 * done asynchronously in context of the per-UBI device background thread, 37 - * which is also managed by the WL unit. 38 * 39 * The wear-leveling is ensured by means of moving the contents of used 40 * physical eraseblocks with low erase counter to free physical eraseblocks ··· 43 * The 'ubi_wl_get_peb()' function accepts data type hints which help to pick 44 * an "optimal" physical eraseblock. For example, when it is known that the 45 * physical eraseblock will be "put" soon because it contains short-term data, 46 - * the WL unit may pick a free physical eraseblock with low erase counter, and 47 - * so forth. 48 * 49 - * If the WL unit fails to erase a physical eraseblock, it marks it as bad. 50 * 51 - * This unit is also responsible for scrubbing. If a bit-flip is detected in a 52 - * physical eraseblock, it has to be moved. Technically this is the same as 53 - * moving it for wear-leveling reasons. 54 * 55 - * As it was said, for the UBI unit all physical eraseblocks are either "free" 56 - * or "used". Free eraseblock are kept in the @wl->free RB-tree, while used 57 - * eraseblocks are kept in a set of different RB-trees: @wl->used, 58 * @wl->prot.pnum, @wl->prot.aec, and @wl->scrub. 59 * 60 * Note, in this implementation, we keep a small in-RAM object for each physical 61 * eraseblock. This is surely not a scalable solution. But it appears to be good 62 * enough for moderately large flashes and it is simple. In future, one may 63 - * re-work this unit and make it more scalable. 64 * 65 - * At the moment this unit does not utilize the sequence number, which was 66 - * introduced relatively recently. But it would be wise to do this because the 67 - * sequence number of a logical eraseblock characterizes how old is it. For 68 * example, when we move a PEB with low erase counter, and we need to pick the 69 * target PEB, we pick a PEB with the highest EC if our PEB is "old" and we 70 * pick target PEB with an average EC if our PEB is not very "old". This is a 71 - * room for future re-works of the WL unit. 72 * 73 - * FIXME: looks too complex, should be simplified (later). 74 */ 75 76 #include <linux/slab.h> ··· 94 95 /* 96 * Maximum difference between two erase counters. If this threshold is 97 - * exceeded, the WL unit starts moving data from used physical eraseblocks with 98 - * low erase counter to free physical eraseblocks with high erase counter. 99 */ 100 #define UBI_WL_THRESHOLD CONFIG_MTD_UBI_WL_THRESHOLD 101 102 /* 103 - * When a physical eraseblock is moved, the WL unit has to pick the target 104 * physical eraseblock to move to. The simplest way would be just to pick the 105 * one with the highest erase counter. But in certain workloads this could lead 106 * to an unlimited wear of one or few physical eraseblock. Indeed, imagine a 107 * situation when the picked physical eraseblock is constantly erased after the 108 * data is written to it. So, we have a constant which limits the highest erase 109 - * counter of the free physical eraseblock to pick. Namely, the WL unit does 110 - * not pick eraseblocks with erase counter greater then the lowest erase 111 * counter plus %WL_FREE_MAX_DIFF. 112 */ 113 #define WL_FREE_MAX_DIFF (2*UBI_WL_THRESHOLD) ··· 126 * @abs_ec: the absolute erase counter value when the protection ends 127 * @e: the wear-leveling entry of the physical eraseblock under protection 128 * 129 - * When the WL unit returns a physical eraseblock, the physical eraseblock is 130 - * protected from being moved for some "time". For this reason, the physical 131 - * eraseblock is not directly moved from the @wl->free tree to the @wl->used 132 - * tree. There is one more tree in between where this physical eraseblock is 133 - * temporarily stored (@wl->prot). 134 * 135 * All this protection stuff is needed because: 136 * o we don't want to move physical eraseblocks just after we have given them ··· 178 * @list: a link in the list of pending works 179 * @func: worker function 180 * @priv: private data of the worker function 181 - * 182 * @e: physical eraseblock to erase 183 * @torture: if the physical eraseblock has to be tortured 184 * ··· 475 } 476 477 switch (dtype) { 478 - case UBI_LONGTERM: 479 - /* 480 - * For long term data we pick a physical eraseblock 481 - * with high erase counter. But the highest erase 482 - * counter we can pick is bounded by the the lowest 483 - * erase counter plus %WL_FREE_MAX_DIFF. 484 - */ 485 - e = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF); 486 - protect = LT_PROTECTION; 487 - break; 488 - case UBI_UNKNOWN: 489 - /* 490 - * For unknown data we pick a physical eraseblock with 491 - * medium erase counter. But we by no means can pick a 492 - * physical eraseblock with erase counter greater or 493 - * equivalent than the lowest erase counter plus 494 - * %WL_FREE_MAX_DIFF. 495 - */ 496 - first = rb_entry(rb_first(&ubi->free), 497 - struct ubi_wl_entry, rb); 498 - last = rb_entry(rb_last(&ubi->free), 499 - struct ubi_wl_entry, rb); 500 501 - if (last->ec - first->ec < WL_FREE_MAX_DIFF) 502 - e = rb_entry(ubi->free.rb_node, 503 - struct ubi_wl_entry, rb); 504 - else { 505 - medium_ec = (first->ec + WL_FREE_MAX_DIFF)/2; 506 - e = find_wl_entry(&ubi->free, medium_ec); 507 - } 508 - protect = U_PROTECTION; 509 - break; 510 - case UBI_SHORTTERM: 511 - /* 512 - * For short term data we pick a physical eraseblock 513 - * with the lowest erase counter as we expect it will 514 - * be erased soon. 515 - */ 516 - e = rb_entry(rb_first(&ubi->free), 517 - struct ubi_wl_entry, rb); 518 - protect = ST_PROTECTION; 519 - break; 520 - default: 521 - protect = 0; 522 - e = NULL; 523 - BUG(); 524 } 525 526 /* ··· 579 * This function returns zero in case of success and a negative error code in 580 * case of failure. 581 */ 582 - static int sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, int torture) 583 { 584 int err; 585 struct ubi_ec_hdr *ec_hdr; ··· 632 } 633 634 /** 635 - * check_protection_over - check if it is time to stop protecting some 636 - * physical eraseblocks. 637 * @ubi: UBI device description object 638 * 639 * This function is called after each erase operation, when the absolute erase ··· 868 } 869 870 ubi_free_vid_hdr(ubi, vid_hdr); 871 spin_lock(&ubi->wl_lock); 872 if (protect) 873 prot_tree_add(ubi, e1, pe, protect); ··· 1055 spin_unlock(&ubi->wl_lock); 1056 1057 /* 1058 - * One more erase operation has happened, take care about protected 1059 - * physical eraseblocks. 1060 */ 1061 check_protection_over(ubi); 1062 ··· 1137 } 1138 1139 /** 1140 - * ubi_wl_put_peb - return a physical eraseblock to the wear-leveling unit. 1141 * @ubi: UBI device description object 1142 * @pnum: physical eraseblock to return 1143 * @torture: if this physical eraseblock has to be tortured ··· 1176 /* 1177 * User is putting the physical eraseblock which was selected 1178 * as the target the data is moved to. It may happen if the EBA 1179 - * unit already re-mapped the LEB in 'ubi_eba_copy_leb()' but 1180 - * the WL unit has not put the PEB to the "used" tree yet, but 1181 - * it is about to do this. So we just set a flag which will 1182 - * tell the WL worker that the PEB is not needed anymore and 1183 - * should be scheduled for erasure. 1184 */ 1185 dbg_wl("PEB %d is the target of data moving", pnum); 1186 ubi_assert(!ubi->move_to_put); ··· 1230 { 1231 struct ubi_wl_entry *e; 1232 1233 - ubi_msg("schedule PEB %d for scrubbing", pnum); 1234 1235 retry: 1236 spin_lock(&ubi->wl_lock); ··· 1369 int err; 1370 1371 if (kthread_should_stop()) 1372 - goto out; 1373 1374 if (try_to_freeze()) 1375 continue; ··· 1404 cond_resched(); 1405 } 1406 1407 - out: 1408 dbg_wl("background thread \"%s\" is killed", ubi->bgt_name); 1409 return 0; 1410 } ··· 1426 } 1427 1428 /** 1429 - * ubi_wl_init_scan - initialize the wear-leveling unit using scanning 1430 - * information. 1431 * @ubi: UBI device description object 1432 * @si: scanning information 1433 * ··· 1583 } 1584 1585 /** 1586 - * ubi_wl_close - close the wear-leveling unit. 1587 * @ubi: UBI device description object 1588 */ 1589 void ubi_wl_close(struct ubi_device *ubi) 1590 { 1591 - dbg_wl("close the UBI wear-leveling unit"); 1592 - 1593 cancel_pending(ubi); 1594 protection_trees_destroy(ubi); 1595 tree_destroy(&ubi->used); ··· 1600 #ifdef CONFIG_MTD_UBI_DEBUG_PARANOID 1601 1602 /** 1603 - * paranoid_check_ec - make sure that the erase counter of a physical eraseblock 1604 - * is correct. 1605 * @ubi: UBI device description object 1606 * @pnum: the physical eraseblock number to check 1607 * @ec: the erase counter to check ··· 1641 } 1642 1643 /** 1644 - * paranoid_check_in_wl_tree - make sure that a wear-leveling entry is present 1645 - * in a WL RB-tree. 1646 * @e: the wear-leveling entry to check 1647 * @root: the root of the tree 1648 * 1649 - * This function returns zero if @e is in the @root RB-tree and %1 if it 1650 - * is not. 1651 */ 1652 static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e, 1653 struct rb_root *root)
··· 19 */ 20 21 /* 22 + * UBI wear-leveling sub-system. 23 * 24 + * This sub-system is responsible for wear-leveling. It works in terms of 25 + * physical* eraseblocks and erase counters and knows nothing about logical 26 + * eraseblocks, volumes, etc. From this sub-system's perspective all physical 27 + * eraseblocks are of two types - used and free. Used physical eraseblocks are 28 + * those that were "get" by the 'ubi_wl_get_peb()' function, and free physical 29 + * eraseblocks are those that were put by the 'ubi_wl_put_peb()' function. 30 * 31 * Physical eraseblocks returned by 'ubi_wl_get_peb()' have only erase counter 32 + * header. The rest of the physical eraseblock contains only %0xFF bytes. 33 * 34 + * When physical eraseblocks are returned to the WL sub-system by means of the 35 * 'ubi_wl_put_peb()' function, they are scheduled for erasure. The erasure is 36 * done asynchronously in context of the per-UBI device background thread, 37 + * which is also managed by the WL sub-system. 38 * 39 * The wear-leveling is ensured by means of moving the contents of used 40 * physical eraseblocks with low erase counter to free physical eraseblocks ··· 43 * The 'ubi_wl_get_peb()' function accepts data type hints which help to pick 44 * an "optimal" physical eraseblock. For example, when it is known that the 45 * physical eraseblock will be "put" soon because it contains short-term data, 46 + * the WL sub-system may pick a free physical eraseblock with low erase 47 + * counter, and so forth. 48 * 49 + * If the WL sub-system fails to erase a physical eraseblock, it marks it as 50 + * bad. 51 * 52 + * This sub-system is also responsible for scrubbing. If a bit-flip is detected 53 + * in a physical eraseblock, it has to be moved. Technically this is the same 54 + * as moving it for wear-leveling reasons. 55 * 56 + * As it was said, for the UBI sub-system all physical eraseblocks are either 57 + * "free" or "used". Free eraseblock are kept in the @wl->free RB-tree, while 58 + * used eraseblocks are kept in a set of different RB-trees: @wl->used, 59 * @wl->prot.pnum, @wl->prot.aec, and @wl->scrub. 60 * 61 * Note, in this implementation, we keep a small in-RAM object for each physical 62 * eraseblock. This is surely not a scalable solution. But it appears to be good 63 * enough for moderately large flashes and it is simple. In future, one may 64 + * re-work this sub-system and make it more scalable. 65 * 66 + * At the moment this sub-system does not utilize the sequence number, which 67 + * was introduced relatively recently. But it would be wise to do this because 68 + * the sequence number of a logical eraseblock characterizes how old is it. For 69 * example, when we move a PEB with low erase counter, and we need to pick the 70 * target PEB, we pick a PEB with the highest EC if our PEB is "old" and we 71 * pick target PEB with an average EC if our PEB is not very "old". This is a 72 + * room for future re-works of the WL sub-system. 73 * 74 + * Note: the stuff with protection trees looks too complex and is difficult to 75 + * understand. Should be fixed. 76 */ 77 78 #include <linux/slab.h> ··· 92 93 /* 94 * Maximum difference between two erase counters. If this threshold is 95 + * exceeded, the WL sub-system starts moving data from used physical 96 + * eraseblocks with low erase counter to free physical eraseblocks with high 97 + * erase counter. 98 */ 99 #define UBI_WL_THRESHOLD CONFIG_MTD_UBI_WL_THRESHOLD 100 101 /* 102 + * When a physical eraseblock is moved, the WL sub-system has to pick the target 103 * physical eraseblock to move to. The simplest way would be just to pick the 104 * one with the highest erase counter. But in certain workloads this could lead 105 * to an unlimited wear of one or few physical eraseblock. Indeed, imagine a 106 * situation when the picked physical eraseblock is constantly erased after the 107 * data is written to it. So, we have a constant which limits the highest erase 108 + * counter of the free physical eraseblock to pick. Namely, the WL sub-system 109 + * does not pick eraseblocks with erase counter greater then the lowest erase 110 * counter plus %WL_FREE_MAX_DIFF. 111 */ 112 #define WL_FREE_MAX_DIFF (2*UBI_WL_THRESHOLD) ··· 123 * @abs_ec: the absolute erase counter value when the protection ends 124 * @e: the wear-leveling entry of the physical eraseblock under protection 125 * 126 + * When the WL sub-system returns a physical eraseblock, the physical 127 + * eraseblock is protected from being moved for some "time". For this reason, 128 + * the physical eraseblock is not directly moved from the @wl->free tree to the 129 + * @wl->used tree. There is one more tree in between where this physical 130 + * eraseblock is temporarily stored (@wl->prot). 131 * 132 * All this protection stuff is needed because: 133 * o we don't want to move physical eraseblocks just after we have given them ··· 175 * @list: a link in the list of pending works 176 * @func: worker function 177 * @priv: private data of the worker function 178 * @e: physical eraseblock to erase 179 * @torture: if the physical eraseblock has to be tortured 180 * ··· 473 } 474 475 switch (dtype) { 476 + case UBI_LONGTERM: 477 + /* 478 + * For long term data we pick a physical eraseblock with high 479 + * erase counter. But the highest erase counter we can pick is 480 + * bounded by the the lowest erase counter plus 481 + * %WL_FREE_MAX_DIFF. 482 + */ 483 + e = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF); 484 + protect = LT_PROTECTION; 485 + break; 486 + case UBI_UNKNOWN: 487 + /* 488 + * For unknown data we pick a physical eraseblock with medium 489 + * erase counter. But we by no means can pick a physical 490 + * eraseblock with erase counter greater or equivalent than the 491 + * lowest erase counter plus %WL_FREE_MAX_DIFF. 492 + */ 493 + first = rb_entry(rb_first(&ubi->free), struct ubi_wl_entry, rb); 494 + last = rb_entry(rb_last(&ubi->free), struct ubi_wl_entry, rb); 495 496 + if (last->ec - first->ec < WL_FREE_MAX_DIFF) 497 + e = rb_entry(ubi->free.rb_node, 498 + struct ubi_wl_entry, rb); 499 + else { 500 + medium_ec = (first->ec + WL_FREE_MAX_DIFF)/2; 501 + e = find_wl_entry(&ubi->free, medium_ec); 502 + } 503 + protect = U_PROTECTION; 504 + break; 505 + case UBI_SHORTTERM: 506 + /* 507 + * For short term data we pick a physical eraseblock with the 508 + * lowest erase counter as we expect it will be erased soon. 509 + */ 510 + e = rb_entry(rb_first(&ubi->free), struct ubi_wl_entry, rb); 511 + protect = ST_PROTECTION; 512 + break; 513 + default: 514 + protect = 0; 515 + e = NULL; 516 + BUG(); 517 } 518 519 /* ··· 582 * This function returns zero in case of success and a negative error code in 583 * case of failure. 584 */ 585 + static int sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, 586 + int torture) 587 { 588 int err; 589 struct ubi_ec_hdr *ec_hdr; ··· 634 } 635 636 /** 637 + * check_protection_over - check if it is time to stop protecting some PEBs. 638 * @ubi: UBI device description object 639 * 640 * This function is called after each erase operation, when the absolute erase ··· 871 } 872 873 ubi_free_vid_hdr(ubi, vid_hdr); 874 + if (scrubbing && !protect) 875 + ubi_msg("scrubbed PEB %d, data moved to PEB %d", 876 + e1->pnum, e2->pnum); 877 + 878 spin_lock(&ubi->wl_lock); 879 if (protect) 880 prot_tree_add(ubi, e1, pe, protect); ··· 1054 spin_unlock(&ubi->wl_lock); 1055 1056 /* 1057 + * One more erase operation has happened, take care about 1058 + * protected physical eraseblocks. 1059 */ 1060 check_protection_over(ubi); 1061 ··· 1136 } 1137 1138 /** 1139 + * ubi_wl_put_peb - return a PEB to the wear-leveling sub-system. 1140 * @ubi: UBI device description object 1141 * @pnum: physical eraseblock to return 1142 * @torture: if this physical eraseblock has to be tortured ··· 1175 /* 1176 * User is putting the physical eraseblock which was selected 1177 * as the target the data is moved to. It may happen if the EBA 1178 + * sub-system already re-mapped the LEB in 'ubi_eba_copy_leb()' 1179 + * but the WL sub-system has not put the PEB to the "used" tree 1180 + * yet, but it is about to do this. So we just set a flag which 1181 + * will tell the WL worker that the PEB is not needed anymore 1182 + * and should be scheduled for erasure. 1183 */ 1184 dbg_wl("PEB %d is the target of data moving", pnum); 1185 ubi_assert(!ubi->move_to_put); ··· 1229 { 1230 struct ubi_wl_entry *e; 1231 1232 + dbg_msg("schedule PEB %d for scrubbing", pnum); 1233 1234 retry: 1235 spin_lock(&ubi->wl_lock); ··· 1368 int err; 1369 1370 if (kthread_should_stop()) 1371 + break; 1372 1373 if (try_to_freeze()) 1374 continue; ··· 1403 cond_resched(); 1404 } 1405 1406 dbg_wl("background thread \"%s\" is killed", ubi->bgt_name); 1407 return 0; 1408 } ··· 1426 } 1427 1428 /** 1429 + * ubi_wl_init_scan - initialize the WL sub-system using scanning information. 1430 * @ubi: UBI device description object 1431 * @si: scanning information 1432 * ··· 1584 } 1585 1586 /** 1587 + * ubi_wl_close - close the wear-leveling sub-system. 1588 * @ubi: UBI device description object 1589 */ 1590 void ubi_wl_close(struct ubi_device *ubi) 1591 { 1592 + dbg_wl("close the WL sub-system"); 1593 cancel_pending(ubi); 1594 protection_trees_destroy(ubi); 1595 tree_destroy(&ubi->used); ··· 1602 #ifdef CONFIG_MTD_UBI_DEBUG_PARANOID 1603 1604 /** 1605 + * paranoid_check_ec - make sure that the erase counter of a PEB is correct. 1606 * @ubi: UBI device description object 1607 * @pnum: the physical eraseblock number to check 1608 * @ec: the erase counter to check ··· 1644 } 1645 1646 /** 1647 + * paranoid_check_in_wl_tree - check that wear-leveling entry is in WL RB-tree. 1648 * @e: the wear-leveling entry to check 1649 * @root: the root of the tree 1650 * 1651 + * This function returns zero if @e is in the @root RB-tree and %1 if it is 1652 + * not. 1653 */ 1654 static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e, 1655 struct rb_root *root)
+3 -2
include/linux/mtd/ubi.h
··· 45 * @size: how many physical eraseblocks are reserved for this volume 46 * @used_bytes: how many bytes of data this volume contains 47 * @used_ebs: how many physical eraseblocks of this volume actually contain any 48 - * data 49 * @vol_type: volume type (%UBI_DYNAMIC_VOLUME or %UBI_STATIC_VOLUME) 50 * @corrupted: non-zero if the volume is corrupted (static volumes only) 51 * @upd_marker: non-zero if the volume has update marker set 52 * @alignment: volume alignment 53 * @usable_leb_size: how many bytes are available in logical eraseblocks of 54 - * this volume 55 * @name_len: volume name length 56 * @name: volume name 57 * @cdev: UBI volume character device major and minor numbers ··· 152 int ubi_leb_unmap(struct ubi_volume_desc *desc, int lnum); 153 int ubi_leb_map(struct ubi_volume_desc *desc, int lnum, int dtype); 154 int ubi_is_mapped(struct ubi_volume_desc *desc, int lnum); 155 156 /* 157 * This function is the same as the 'ubi_leb_read()' function, but it does not
··· 45 * @size: how many physical eraseblocks are reserved for this volume 46 * @used_bytes: how many bytes of data this volume contains 47 * @used_ebs: how many physical eraseblocks of this volume actually contain any 48 + * data 49 * @vol_type: volume type (%UBI_DYNAMIC_VOLUME or %UBI_STATIC_VOLUME) 50 * @corrupted: non-zero if the volume is corrupted (static volumes only) 51 * @upd_marker: non-zero if the volume has update marker set 52 * @alignment: volume alignment 53 * @usable_leb_size: how many bytes are available in logical eraseblocks of 54 + * this volume 55 * @name_len: volume name length 56 * @name: volume name 57 * @cdev: UBI volume character device major and minor numbers ··· 152 int ubi_leb_unmap(struct ubi_volume_desc *desc, int lnum); 153 int ubi_leb_map(struct ubi_volume_desc *desc, int lnum, int dtype); 154 int ubi_is_mapped(struct ubi_volume_desc *desc, int lnum); 155 + int ubi_sync(int ubi_num); 156 157 /* 158 * This function is the same as the 'ubi_leb_read()' function, but it does not
+65 -11
include/mtd/ubi-user.h
··· 58 * device should be used. A &struct ubi_rsvol_req object has to be properly 59 * filled and a pointer to it has to be passed to the IOCTL. 60 * 61 * UBI volume update 62 * ~~~~~~~~~~~~~~~~~ 63 * ··· 111 #define UBI_IOCRMVOL _IOW(UBI_IOC_MAGIC, 1, int32_t) 112 /* Re-size an UBI volume */ 113 #define UBI_IOCRSVOL _IOW(UBI_IOC_MAGIC, 2, struct ubi_rsvol_req) 114 115 /* IOCTL commands of the UBI control character device */ 116 ··· 136 137 /* Maximum MTD device name length supported by UBI */ 138 #define MAX_UBI_MTD_NAME_LEN 127 139 140 /* 141 * UBI data type hint constants. ··· 188 * it will be 512 in case of a 2KiB page NAND flash with 4 512-byte sub-pages. 189 * 190 * But in rare cases, if this optimizes things, the VID header may be placed to 191 - * a different offset. For example, the boot-loader might do things faster if the 192 - * VID header sits at the end of the first 2KiB NAND page with 4 sub-pages. As 193 - * the boot-loader would not normally need to read EC headers (unless it needs 194 - * UBI in RW mode), it might be faster to calculate ECC. This is weird example, 195 - * but it real-life example. So, in this example, @vid_hdr_offer would be 196 - * 2KiB-64 bytes = 1984. Note, that this position is not even 512-bytes 197 - * aligned, which is OK, as UBI is clever enough to realize this is 4th sub-page 198 - * of the first page and add needed padding. 199 */ 200 struct ubi_attach_req { 201 int32_t ubi_num; 202 int32_t mtd_num; 203 int32_t vid_hdr_offset; 204 - uint8_t padding[12]; 205 }; 206 207 /** ··· 263 } __attribute__ ((packed)); 264 265 /** 266 * struct ubi_leb_change_req - a data structure used in atomic logical 267 * eraseblock change requests. 268 * @lnum: logical eraseblock number to change ··· 315 struct ubi_leb_change_req { 316 int32_t lnum; 317 int32_t bytes; 318 - uint8_t dtype; 319 - uint8_t padding[7]; 320 } __attribute__ ((packed)); 321 322 #endif /* __UBI_USER_H__ */
··· 58 * device should be used. A &struct ubi_rsvol_req object has to be properly 59 * filled and a pointer to it has to be passed to the IOCTL. 60 * 61 + * UBI volumes re-name 62 + * ~~~~~~~~~~~~~~~~~~~ 63 + * 64 + * To re-name several volumes atomically at one go, the %UBI_IOCRNVOL command 65 + * of the UBI character device should be used. A &struct ubi_rnvol_req object 66 + * has to be properly filled and a pointer to it has to be passed to the IOCTL. 67 + * 68 * UBI volume update 69 * ~~~~~~~~~~~~~~~~~ 70 * ··· 104 #define UBI_IOCRMVOL _IOW(UBI_IOC_MAGIC, 1, int32_t) 105 /* Re-size an UBI volume */ 106 #define UBI_IOCRSVOL _IOW(UBI_IOC_MAGIC, 2, struct ubi_rsvol_req) 107 + /* Re-name volumes */ 108 + #define UBI_IOCRNVOL _IOW(UBI_IOC_MAGIC, 3, struct ubi_rnvol_req) 109 110 /* IOCTL commands of the UBI control character device */ 111 ··· 127 128 /* Maximum MTD device name length supported by UBI */ 129 #define MAX_UBI_MTD_NAME_LEN 127 130 + 131 + /* Maximum amount of UBI volumes that can be re-named at one go */ 132 + #define UBI_MAX_RNVOL 32 133 134 /* 135 * UBI data type hint constants. ··· 176 * it will be 512 in case of a 2KiB page NAND flash with 4 512-byte sub-pages. 177 * 178 * But in rare cases, if this optimizes things, the VID header may be placed to 179 + * a different offset. For example, the boot-loader might do things faster if 180 + * the VID header sits at the end of the first 2KiB NAND page with 4 sub-pages. 181 + * As the boot-loader would not normally need to read EC headers (unless it 182 + * needs UBI in RW mode), it might be faster to calculate ECC. This is weird 183 + * example, but it real-life example. So, in this example, @vid_hdr_offer would 184 + * be 2KiB-64 bytes = 1984. Note, that this position is not even 512-bytes 185 + * aligned, which is OK, as UBI is clever enough to realize this is 4th 186 + * sub-page of the first page and add needed padding. 187 */ 188 struct ubi_attach_req { 189 int32_t ubi_num; 190 int32_t mtd_num; 191 int32_t vid_hdr_offset; 192 + int8_t padding[12]; 193 }; 194 195 /** ··· 251 } __attribute__ ((packed)); 252 253 /** 254 + * struct ubi_rnvol_req - volumes re-name request. 255 + * @count: count of volumes to re-name 256 + * @padding1: reserved for future, not used, has to be zeroed 257 + * @vol_id: ID of the volume to re-name 258 + * @name_len: name length 259 + * @padding2: reserved for future, not used, has to be zeroed 260 + * @name: new volume name 261 + * 262 + * UBI allows to re-name up to %32 volumes at one go. The count of volumes to 263 + * re-name is specified in the @count field. The ID of the volumes to re-name 264 + * and the new names are specified in the @vol_id and @name fields. 265 + * 266 + * The UBI volume re-name operation is atomic, which means that should power cut 267 + * happen, the volumes will have either old name or new name. So the possible 268 + * use-cases of this command is atomic upgrade. Indeed, to upgrade, say, volumes 269 + * A and B one may create temporary volumes %A1 and %B1 with the new contents, 270 + * then atomically re-name A1->A and B1->B, in which case old %A and %B will 271 + * be removed. 272 + * 273 + * If it is not desirable to remove old A and B, the re-name request has to 274 + * contain 4 entries: A1->A, A->A1, B1->B, B->B1, in which case old A1 and B1 275 + * become A and B, and old A and B will become A1 and B1. 276 + * 277 + * It is also OK to request: A1->A, A1->X, B1->B, B->Y, in which case old A1 278 + * and B1 become A and B, and old A and B become X and Y. 279 + * 280 + * In other words, in case of re-naming into an existing volume name, the 281 + * existing volume is removed, unless it is re-named as well at the same 282 + * re-name request. 283 + */ 284 + struct ubi_rnvol_req { 285 + int32_t count; 286 + int8_t padding1[12]; 287 + struct { 288 + int32_t vol_id; 289 + int16_t name_len; 290 + int8_t padding2[2]; 291 + char name[UBI_MAX_VOLUME_NAME + 1]; 292 + } ents[UBI_MAX_RNVOL]; 293 + } __attribute__ ((packed)); 294 + 295 + /** 296 * struct ubi_leb_change_req - a data structure used in atomic logical 297 * eraseblock change requests. 298 * @lnum: logical eraseblock number to change ··· 261 struct ubi_leb_change_req { 262 int32_t lnum; 263 int32_t bytes; 264 + int8_t dtype; 265 + int8_t padding[7]; 266 } __attribute__ ((packed)); 267 268 #endif /* __UBI_USER_H__ */