UBI: fix checkpatch.pl errors and warnings

Just out or curiousity ran checkpatch.pl for whole UBI,
and discovered there are quite a few of stylistic issues.
Fix them.

Signed-off-by: Artem Bityutskiy <Artem.Bityutskiy@nokia.com>

+86 -89
+4 -4
drivers/mtd/ubi/build.c
··· 51 51 * @name: MTD device name or number string 52 52 * @vid_hdr_offs: VID header offset 53 53 */ 54 - struct mtd_dev_param 55 - { 54 + struct mtd_dev_param { 56 55 char name[MTD_PARAM_LEN_MAX]; 57 56 int vid_hdr_offs; 58 57 }; 59 58 60 59 /* Numbers of elements set in the @mtd_dev_param array */ 61 - static int mtd_devs = 0; 60 + static int mtd_devs; 62 61 63 62 /* MTD devices specification parameters */ 64 63 static struct mtd_dev_param mtd_dev_param[UBI_MAX_DEVICES]; ··· 780 781 if (!ubi_devices[ubi_num]) 781 782 break; 782 783 if (ubi_num == UBI_MAX_DEVICES) { 783 - dbg_err("only %d UBI devices may be created", UBI_MAX_DEVICES); 784 + dbg_err("only %d UBI devices may be created", 785 + UBI_MAX_DEVICES); 784 786 return -ENFILE; 785 787 } 786 788 } else {
+2 -2
drivers/mtd/ubi/cdev.c
··· 39 39 #include <linux/stat.h> 40 40 #include <linux/ioctl.h> 41 41 #include <linux/capability.h> 42 + #include <linux/uaccess.h> 42 43 #include <linux/smp_lock.h> 43 44 #include <mtd/ubi-user.h> 44 - #include <asm/uaccess.h> 45 45 #include <asm/div64.h> 46 46 #include "ubi.h" 47 47 ··· 352 352 } 353 353 354 354 #else 355 - #define vol_cdev_direct_write(file, buf, count, offp) -EPERM 355 + #define vol_cdev_direct_write(file, buf, count, offp) (-EPERM) 356 356 #endif /* CONFIG_MTD_UBI_DEBUG_USERSPACE_IO */ 357 357 358 358 static ssize_t vol_cdev_write(struct file *file, const char __user *buf,
+2 -5
drivers/mtd/ubi/eba.c
··· 189 189 le->users += 1; 190 190 spin_unlock(&ubi->ltree_lock); 191 191 192 - if (le_free) 193 - kfree(le_free); 194 - 192 + kfree(le_free); 195 193 return le; 196 194 } 197 195 ··· 501 503 struct ubi_vid_hdr *vid_hdr; 502 504 503 505 vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS); 504 - if (!vid_hdr) { 506 + if (!vid_hdr) 505 507 return -ENOMEM; 506 - } 507 508 508 509 mutex_lock(&ubi->buf_mutex); 509 510
+2 -2
drivers/mtd/ubi/gluebi.c
··· 249 249 if (err) 250 250 goto out_err; 251 251 252 - instr->state = MTD_ERASE_DONE; 253 - mtd_erase_callback(instr); 252 + instr->state = MTD_ERASE_DONE; 253 + mtd_erase_callback(instr); 254 254 return 0; 255 255 256 256 out_err:
+4 -4
drivers/mtd/ubi/io.c
··· 167 167 } 168 168 169 169 if (read != len && retries++ < UBI_IO_RETRIES) { 170 - dbg_io("error %d while reading %d bytes from PEB %d:%d, " 171 - "read only %zd bytes, retry", 170 + dbg_io("error %d while reading %d bytes from PEB %d:%d," 171 + " read only %zd bytes, retry", 172 172 err, len, pnum, offset, read); 173 173 yield(); 174 174 goto retry; ··· 705 705 706 706 if (hdr_crc != crc) { 707 707 if (verbose) { 708 - ubi_warn("bad EC header CRC at PEB %d, calculated %#08x," 709 - " read %#08x", pnum, crc, hdr_crc); 708 + ubi_warn("bad EC header CRC at PEB %d, calculated " 709 + "%#08x, read %#08x", pnum, crc, hdr_crc); 710 710 ubi_dbg_dump_ec_hdr(ec_hdr); 711 711 } 712 712 return UBI_IO_BAD_EC_HDR;
+5 -4
drivers/mtd/ubi/scan.c
··· 248 248 unsigned long long sqnum2 = be64_to_cpu(vid_hdr->sqnum); 249 249 250 250 if (seb->sqnum == 0 && sqnum2 == 0) { 251 - long long abs, v1 = seb->leb_ver, v2 = be32_to_cpu(vid_hdr->leb_ver); 251 + long long abs; 252 + long long v1 = seb->leb_ver, v2 = be32_to_cpu(vid_hdr->leb_ver); 252 253 253 254 /* 254 255 * UBI constantly increases the logical eraseblock version ··· 753 752 * This function returns a zero if the physical eraseblock was successfully 754 753 * handled and a negative error code in case of failure. 755 754 */ 756 - static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si, int pnum) 755 + static int process_eb(struct ubi_device *ubi, struct ubi_scan_info *si, 756 + int pnum) 757 757 { 758 758 long long uninitialized_var(ec); 759 759 int err, bitflips = 0, vol_id, ec_corr = 0; ··· 1303 1301 if (err < 0) { 1304 1302 kfree(buf); 1305 1303 return err; 1306 - } 1307 - else if (err) 1304 + } else if (err) 1308 1305 buf[pnum] = 1; 1309 1306 } 1310 1307
+2 -1
drivers/mtd/ubi/ubi.h
··· 473 473 const void __user *buf, int count); 474 474 475 475 /* misc.c */ 476 - int ubi_calc_data_len(const struct ubi_device *ubi, const void *buf, int length); 476 + int ubi_calc_data_len(const struct ubi_device *ubi, const void *buf, 477 + int length); 477 478 int ubi_check_volume(struct ubi_device *ubi, int vol_id); 478 479 void ubi_calculate_reserved(struct ubi_device *ubi); 479 480
+5 -3
drivers/mtd/ubi/upd.c
··· 39 39 */ 40 40 41 41 #include <linux/err.h> 42 - #include <asm/uaccess.h> 42 + #include <linux/uaccess.h> 43 43 #include <asm/div64.h> 44 44 #include "ubi.h" 45 45 ··· 246 246 return 0; 247 247 } 248 248 249 - err = ubi_eba_write_leb(ubi, vol, lnum, buf, 0, len, UBI_UNKNOWN); 249 + err = ubi_eba_write_leb(ubi, vol, lnum, buf, 0, len, 250 + UBI_UNKNOWN); 250 251 } else { 251 252 /* 252 253 * When writing static volume, and this is the last logical ··· 419 418 if (vol->upd_received == vol->upd_bytes) { 420 419 int len = ALIGN((int)vol->upd_bytes, ubi->min_io_size); 421 420 422 - memset(vol->upd_buf + vol->upd_bytes, 0xFF, len - vol->upd_bytes); 421 + memset(vol->upd_buf + vol->upd_bytes, 0xFF, 422 + len - vol->upd_bytes); 423 423 len = ubi_calc_data_len(ubi, vol->upd_buf, len); 424 424 err = ubi_eba_atomic_leb_change(ubi, vol, vol->ch_lnum, 425 425 vol->upd_buf, len, UBI_UNKNOWN);
+2 -2
drivers/mtd/ubi/vmt.c
··· 253 253 goto out_unlock; 254 254 } 255 255 256 - /* Calculate how many eraseblocks are requested */ 256 + /* Calculate how many eraseblocks are requested */ 257 257 vol->usable_leb_size = ubi->leb_size - ubi->leb_size % req->alignment; 258 258 bytes = req->bytes; 259 259 if (do_div(bytes, vol->usable_leb_size)) ··· 858 858 859 859 if (alignment != vol->alignment || data_pad != vol->data_pad || 860 860 upd_marker != vol->upd_marker || vol_type != vol->vol_type || 861 - name_len!= vol->name_len || strncmp(name, vol->name, name_len)) { 861 + name_len != vol->name_len || strncmp(name, vol->name, name_len)) { 862 862 ubi_err("volume info is different"); 863 863 goto fail; 864 864 }
+6 -6
drivers/mtd/ubi/vtbl.c
··· 461 461 if (!leb_corrupted[0]) { 462 462 /* LEB 0 is OK */ 463 463 if (leb[1]) 464 - leb_corrupted[1] = memcmp(leb[0], leb[1], ubi->vtbl_size); 464 + leb_corrupted[1] = memcmp(leb[0], leb[1], 465 + ubi->vtbl_size); 465 466 if (leb_corrupted[1]) { 466 467 ubi_warn("volume table copy #2 is corrupted"); 467 468 err = create_vtbl(ubi, si, 1, leb[0]); ··· 860 859 861 860 out_free: 862 861 vfree(ubi->vtbl); 863 - for (i = 0; i < ubi->vtbl_slots + UBI_INT_VOL_COUNT; i++) 864 - if (ubi->volumes[i]) { 865 - kfree(ubi->volumes[i]); 866 - ubi->volumes[i] = NULL; 867 - } 862 + for (i = 0; i < ubi->vtbl_slots + UBI_INT_VOL_COUNT; i++) { 863 + kfree(ubi->volumes[i]); 864 + ubi->volumes[i] = NULL; 865 + } 868 866 return err; 869 867 } 870 868
+44 -48
drivers/mtd/ubi/wl.c
··· 475 475 } 476 476 477 477 switch (dtype) { 478 - case UBI_LONGTERM: 479 - /* 480 - * For long term data we pick a physical eraseblock 481 - * with high erase counter. But the highest erase 482 - * counter we can pick is bounded by the the lowest 483 - * erase counter plus %WL_FREE_MAX_DIFF. 484 - */ 485 - e = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF); 486 - protect = LT_PROTECTION; 487 - break; 488 - case UBI_UNKNOWN: 489 - /* 490 - * For unknown data we pick a physical eraseblock with 491 - * medium erase counter. But we by no means can pick a 492 - * physical eraseblock with erase counter greater or 493 - * equivalent than the lowest erase counter plus 494 - * %WL_FREE_MAX_DIFF. 495 - */ 496 - first = rb_entry(rb_first(&ubi->free), 497 - struct ubi_wl_entry, rb); 498 - last = rb_entry(rb_last(&ubi->free), 499 - struct ubi_wl_entry, rb); 478 + case UBI_LONGTERM: 479 + /* 480 + * For long term data we pick a physical eraseblock with high 481 + * erase counter. But the highest erase counter we can pick is 482 + * bounded by the the lowest erase counter plus 483 + * %WL_FREE_MAX_DIFF. 484 + */ 485 + e = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF); 486 + protect = LT_PROTECTION; 487 + break; 488 + case UBI_UNKNOWN: 489 + /* 490 + * For unknown data we pick a physical eraseblock with medium 491 + * erase counter. But we by no means can pick a physical 492 + * eraseblock with erase counter greater or equivalent than the 493 + * lowest erase counter plus %WL_FREE_MAX_DIFF. 494 + */ 495 + first = rb_entry(rb_first(&ubi->free), struct ubi_wl_entry, rb); 496 + last = rb_entry(rb_last(&ubi->free), struct ubi_wl_entry, rb); 500 497 501 - if (last->ec - first->ec < WL_FREE_MAX_DIFF) 502 - e = rb_entry(ubi->free.rb_node, 503 - struct ubi_wl_entry, rb); 504 - else { 505 - medium_ec = (first->ec + WL_FREE_MAX_DIFF)/2; 506 - e = find_wl_entry(&ubi->free, medium_ec); 507 - } 508 - protect = U_PROTECTION; 509 - break; 510 - case UBI_SHORTTERM: 511 - /* 512 - * For short term data we pick a physical eraseblock 513 - * with the lowest erase counter as we expect it will 514 - * be erased soon. 515 - */ 516 - e = rb_entry(rb_first(&ubi->free), 517 - struct ubi_wl_entry, rb); 518 - protect = ST_PROTECTION; 519 - break; 520 - default: 521 - protect = 0; 522 - e = NULL; 523 - BUG(); 498 + if (last->ec - first->ec < WL_FREE_MAX_DIFF) 499 + e = rb_entry(ubi->free.rb_node, 500 + struct ubi_wl_entry, rb); 501 + else { 502 + medium_ec = (first->ec + WL_FREE_MAX_DIFF)/2; 503 + e = find_wl_entry(&ubi->free, medium_ec); 504 + } 505 + protect = U_PROTECTION; 506 + break; 507 + case UBI_SHORTTERM: 508 + /* 509 + * For short term data we pick a physical eraseblock with the 510 + * lowest erase counter as we expect it will be erased soon. 511 + */ 512 + e = rb_entry(rb_first(&ubi->free), struct ubi_wl_entry, rb); 513 + protect = ST_PROTECTION; 514 + break; 515 + default: 516 + protect = 0; 517 + e = NULL; 518 + BUG(); 524 519 } 525 520 526 521 /* ··· 579 584 * This function returns zero in case of success and a negative error code in 580 585 * case of failure. 581 586 */ 582 - static int sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, int torture) 587 + static int sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, 588 + int torture) 583 589 { 584 590 int err; 585 591 struct ubi_ec_hdr *ec_hdr; ··· 1056 1060 spin_unlock(&ubi->wl_lock); 1057 1061 1058 1062 /* 1059 - * One more erase operation has happened, take care about protected 1060 - * physical eraseblocks. 1063 + * One more erase operation has happened, take care about 1064 + * protected physical eraseblocks. 1061 1065 */ 1062 1066 check_protection_over(ubi); 1063 1067
+8 -8
include/mtd/ubi-user.h
··· 188 188 * it will be 512 in case of a 2KiB page NAND flash with 4 512-byte sub-pages. 189 189 * 190 190 * But in rare cases, if this optimizes things, the VID header may be placed to 191 - * a different offset. For example, the boot-loader might do things faster if the 192 - * VID header sits at the end of the first 2KiB NAND page with 4 sub-pages. As 193 - * the boot-loader would not normally need to read EC headers (unless it needs 194 - * UBI in RW mode), it might be faster to calculate ECC. This is weird example, 195 - * but it real-life example. So, in this example, @vid_hdr_offer would be 196 - * 2KiB-64 bytes = 1984. Note, that this position is not even 512-bytes 197 - * aligned, which is OK, as UBI is clever enough to realize this is 4th sub-page 198 - * of the first page and add needed padding. 191 + * a different offset. For example, the boot-loader might do things faster if 192 + * the VID header sits at the end of the first 2KiB NAND page with 4 sub-pages. 193 + * As the boot-loader would not normally need to read EC headers (unless it 194 + * needs UBI in RW mode), it might be faster to calculate ECC. This is weird 195 + * example, but it real-life example. So, in this example, @vid_hdr_offer would 196 + * be 2KiB-64 bytes = 1984. Note, that this position is not even 512-bytes 197 + * aligned, which is OK, as UBI is clever enough to realize this is 4th 198 + * sub-page of the first page and add needed padding. 199 199 */ 200 200 struct ubi_attach_req { 201 201 int32_t ubi_num;