Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'upstream-3.17-rc1' of git://git.infradead.org/linux-ubifs

Pull UBI/UBIFS changes from Artem Bityutskiy:
"No significant changes, mostly small fixes here and there. The more
important fixes are:

- UBI deleted list items while iterating the list with
'list_for_each_entry'
- The UBI block driver did not work properly with very large UBI
volumes"

* tag 'upstream-3.17-rc1' of git://git.infradead.org/linux-ubifs: (21 commits)
UBIFS: Add log overlap assertions
Revert "UBIFS: add a log overlap assertion"
UBI: bugfix in ubi_wl_flush()
UBI: block: Avoid disk size integer overflow
UBI: block: Set disk_capacity out of the mutex
UBI: block: Make ubiblock_resize return something
UBIFS: add a log overlap assertion
UBIFS: remove unnecessary check
UBIFS: remove mst_mutex
UBIFS: kernel-doc warning fix
UBI: init_volumes: Ignore volumes with no LEBs
UBIFS: replace seq_printf by seq_puts
UBIFS: replace count*size kzalloc by kcalloc
UBIFS: kernel-doc warning fix
UBIFS: fix error path in create_default_filesystem()
UBIFS: fix spelling of "scanned"
UBIFS: fix some comments
UBIFS: remove useless @ecc in struct ubifs_scan_leb
UBIFS: remove useless statements
UBIFS: Add missing break statements in dbg_chk_pnode()
...

+53 -55
+12 -6
drivers/mtd/ubi/block.c
··· 378 378 { 379 379 struct ubiblock *dev; 380 380 struct gendisk *gd; 381 - int disk_capacity; 381 + u64 disk_capacity = ((u64)vi->size * vi->usable_leb_size) >> 9; 382 382 int ret; 383 383 384 + if ((sector_t)disk_capacity != disk_capacity) 385 + return -EFBIG; 384 386 /* Check that the volume isn't already handled */ 385 387 mutex_lock(&devices_mutex); 386 388 if (find_dev_nolock(vi->ubi_num, vi->vol_id)) { ··· 414 412 gd->first_minor = dev->ubi_num * UBI_MAX_VOLUMES + dev->vol_id; 415 413 gd->private_data = dev; 416 414 sprintf(gd->disk_name, "ubiblock%d_%d", dev->ubi_num, dev->vol_id); 417 - disk_capacity = (vi->size * vi->usable_leb_size) >> 9; 418 415 set_capacity(gd, disk_capacity); 419 416 dev->gd = gd; 420 417 ··· 499 498 return 0; 500 499 } 501 500 502 - static void ubiblock_resize(struct ubi_volume_info *vi) 501 + static int ubiblock_resize(struct ubi_volume_info *vi) 503 502 { 504 503 struct ubiblock *dev; 505 - int disk_capacity; 504 + u64 disk_capacity = ((u64)vi->size * vi->usable_leb_size) >> 9; 506 505 506 + if ((sector_t)disk_capacity != disk_capacity) { 507 + ubi_warn("%s: the volume is too big, cannot resize (%d LEBs)", 508 + dev->gd->disk_name, vi->size); 509 + return -EFBIG; 510 + } 507 511 /* 508 512 * Need to lock the device list until we stop using the device, 509 513 * otherwise the device struct might get released in ··· 518 512 dev = find_dev_nolock(vi->ubi_num, vi->vol_id); 519 513 if (!dev) { 520 514 mutex_unlock(&devices_mutex); 521 - return; 515 + return -ENODEV; 522 516 } 523 517 524 518 mutex_lock(&dev->dev_mutex); 525 - disk_capacity = (vi->size * vi->usable_leb_size) >> 9; 526 519 set_capacity(dev->gd, disk_capacity); 527 520 ubi_msg("%s resized to %d LEBs", dev->gd->disk_name, vi->size); 528 521 mutex_unlock(&dev->dev_mutex); 529 522 mutex_unlock(&devices_mutex); 523 + return 0; 530 524 } 531 525 532 526 static int ubiblock_notify(struct notifier_block *nb,
+1 -1
drivers/mtd/ubi/vtbl.c
··· 591 591 592 592 /* Static volumes only */ 593 593 av = ubi_find_av(ai, i); 594 - if (!av) { 594 + if (!av || !av->leb_count) { 595 595 /* 596 596 * No eraseblocks belonging to this volume found. We 597 597 * don't actually know whether this static volume is
+2 -2
drivers/mtd/ubi/wl.c
··· 1718 1718 vol_id, lnum, ubi->works_count); 1719 1719 1720 1720 while (found) { 1721 - struct ubi_work *wrk; 1721 + struct ubi_work *wrk, *tmp; 1722 1722 found = 0; 1723 1723 1724 1724 down_read(&ubi->work_sem); 1725 1725 spin_lock(&ubi->wl_lock); 1726 - list_for_each_entry(wrk, &ubi->works, list) { 1726 + list_for_each_entry_safe(wrk, tmp, &ubi->works, list) { 1727 1727 if ((vol_id == UBI_ALL || wrk->vol_id == vol_id) && 1728 1728 (lnum == UBI_ALL || wrk->lnum == lnum)) { 1729 1729 list_del(&wrk->list);
-2
fs/ubifs/commit.c
··· 174 174 if (err) 175 175 goto out; 176 176 177 - mutex_lock(&c->mst_mutex); 178 177 c->mst_node->cmt_no = cpu_to_le64(c->cmt_no); 179 178 c->mst_node->log_lnum = cpu_to_le32(new_ltail_lnum); 180 179 c->mst_node->root_lnum = cpu_to_le32(zroot.lnum); ··· 203 204 else 204 205 c->mst_node->flags &= ~cpu_to_le32(UBIFS_MST_NO_ORPHS); 205 206 err = ubifs_write_master(c); 206 - mutex_unlock(&c->mst_mutex); 207 207 if (err) 208 208 goto out; 209 209
+1 -1
fs/ubifs/io.c
··· 431 431 432 432 /** 433 433 * wbuf_timer_callback - write-buffer timer callback function. 434 - * @data: timer data (write-buffer descriptor) 434 + * @timer: timer data (write-buffer descriptor) 435 435 * 436 436 * This function is called when the write-buffer timer expires. 437 437 */
+6 -6
fs/ubifs/log.c
··· 240 240 241 241 if (c->lhead_offs > c->leb_size - c->ref_node_alsz) { 242 242 c->lhead_lnum = ubifs_next_log_lnum(c, c->lhead_lnum); 243 + ubifs_assert(c->lhead_lnum != c->ltail_lnum); 243 244 c->lhead_offs = 0; 244 245 } 245 246 ··· 405 404 /* Switch to the next log LEB */ 406 405 if (c->lhead_offs) { 407 406 c->lhead_lnum = ubifs_next_log_lnum(c, c->lhead_lnum); 407 + ubifs_assert(c->lhead_lnum != c->ltail_lnum); 408 408 c->lhead_offs = 0; 409 409 } 410 410 411 - if (c->lhead_offs == 0) { 412 - /* Must ensure next LEB has been unmapped */ 413 - err = ubifs_leb_unmap(c, c->lhead_lnum); 414 - if (err) 415 - goto out; 416 - } 411 + /* Must ensure next LEB has been unmapped */ 412 + err = ubifs_leb_unmap(c, c->lhead_lnum); 413 + if (err) 414 + goto out; 417 415 418 416 len = ALIGN(len, c->min_io_size); 419 417 dbg_log("writing commit start at LEB %d:0, len %d", c->lhead_lnum, len);
+2 -3
fs/ubifs/lpt.c
··· 1464 1464 return ERR_CAST(nnode); 1465 1465 } 1466 1466 iip = ((i >> shft) & (UBIFS_LPT_FANOUT - 1)); 1467 - shft -= UBIFS_LPT_FANOUT_SHIFT; 1468 1467 pnode = ubifs_get_pnode(c, nnode, iip); 1469 1468 if (IS_ERR(pnode)) 1470 1469 return ERR_CAST(pnode); ··· 1603 1604 return ERR_CAST(nnode); 1604 1605 } 1605 1606 iip = ((i >> shft) & (UBIFS_LPT_FANOUT - 1)); 1606 - shft -= UBIFS_LPT_FANOUT_SHIFT; 1607 1607 pnode = ubifs_get_pnode(c, nnode, iip); 1608 1608 if (IS_ERR(pnode)) 1609 1609 return ERR_CAST(pnode); ··· 1962 1964 } 1963 1965 } 1964 1966 iip = ((i >> shft) & (UBIFS_LPT_FANOUT - 1)); 1965 - shft -= UBIFS_LPT_FANOUT_SHIFT; 1966 1967 pnode = scan_get_pnode(c, path + h, nnode, iip); 1967 1968 if (IS_ERR(pnode)) { 1968 1969 err = PTR_ERR(pnode); ··· 2195 2198 lprops->dirty); 2196 2199 return -EINVAL; 2197 2200 } 2201 + break; 2198 2202 case LPROPS_FREEABLE: 2199 2203 case LPROPS_FRDI_IDX: 2200 2204 if (lprops->free + lprops->dirty != c->leb_size) { ··· 2204 2206 lprops->dirty); 2205 2207 return -EINVAL; 2206 2208 } 2209 + break; 2207 2210 } 2208 2211 } 2209 2212 return 0;
+5 -2
fs/ubifs/lpt_commit.c
··· 304 304 ubifs_assert(lnum >= c->lpt_first && 305 305 lnum <= c->lpt_last); 306 306 } 307 - done_ltab = 1; 308 307 c->ltab_lnum = lnum; 309 308 c->ltab_offs = offs; 310 309 offs += c->ltab_sz; ··· 513 514 if (err) 514 515 return err; 515 516 } 516 - done_ltab = 1; 517 517 ubifs_pack_ltab(c, buf + offs, c->ltab_cmt); 518 518 offs += c->ltab_sz; 519 519 dbg_chk_lpt_sz(c, 1, c->ltab_sz); ··· 1939 1941 pr_err("LEB %d:%d, nnode, ", 1940 1942 lnum, offs); 1941 1943 err = ubifs_unpack_nnode(c, p, &nnode); 1944 + if (err) { 1945 + pr_err("failed to unpack_node, error %d\n", 1946 + err); 1947 + break; 1948 + } 1942 1949 for (i = 0; i < UBIFS_LPT_FANOUT; i++) { 1943 1950 pr_cont("%d:%d", nnode.nbranch[i].lnum, 1944 1951 nnode.nbranch[i].offs);
+3 -4
fs/ubifs/master.c
··· 352 352 * ubifs_write_master - write master node. 353 353 * @c: UBIFS file-system description object 354 354 * 355 - * This function writes the master node. The caller has to take the 356 - * @c->mst_mutex lock before calling this function. Returns zero in case of 357 - * success and a negative error code in case of failure. The master node is 358 - * written twice to enable recovery. 355 + * This function writes the master node. Returns zero in case of success and a 356 + * negative error code in case of failure. The master node is written twice to 357 + * enable recovery. 359 358 */ 360 359 int ubifs_write_master(struct ubifs_info *c) 361 360 {
-1
fs/ubifs/orphan.c
··· 346 346 int lnum; 347 347 348 348 /* Unmap any unused LEBs after consolidation */ 349 - lnum = c->ohead_lnum + 1; 350 349 for (lnum = c->ohead_lnum + 1; lnum <= c->orph_last; lnum++) { 351 350 err = ubifs_leb_unmap(c, lnum); 352 351 if (err)
+2 -3
fs/ubifs/recovery.c
··· 596 596 * drop_last_node - drop the last node. 597 597 * @sleb: scanned LEB information 598 598 * @offs: offset of dropped nodes is returned here 599 - * @grouped: non-zero if whole group of nodes have to be dropped 600 599 * 601 600 * This is a helper function for 'ubifs_recover_leb()' which drops the last 602 601 * node of the scanned LEB. ··· 628 629 * 629 630 * This function does a scan of a LEB, but caters for errors that might have 630 631 * been caused by the unclean unmount from which we are attempting to recover. 631 - * Returns %0 in case of success, %-EUCLEAN if an unrecoverable corruption is 632 - * found, and a negative error code in case of failure. 632 + * Returns the scanned information on success and a negative error code on 633 + * failure. 633 634 */ 634 635 struct ubifs_scan_leb *ubifs_recover_leb(struct ubifs_info *c, int lnum, 635 636 int offs, void *sbuf, int jhead)
+3 -1
fs/ubifs/sb.c
··· 332 332 cs->ch.node_type = UBIFS_CS_NODE; 333 333 err = ubifs_write_node(c, cs, UBIFS_CS_NODE_SZ, UBIFS_LOG_LNUM, 0); 334 334 kfree(cs); 335 + if (err) 336 + return err; 335 337 336 338 ubifs_msg("default file-system created"); 337 339 return 0; ··· 449 447 goto failed; 450 448 } 451 449 452 - if (c->default_compr < 0 || c->default_compr >= UBIFS_COMPR_TYPES_CNT) { 450 + if (c->default_compr >= UBIFS_COMPR_TYPES_CNT) { 453 451 err = 13; 454 452 goto failed; 455 453 }
+7 -7
fs/ubifs/scan.c
··· 131 131 * @offs: offset to start at (usually zero) 132 132 * @sbuf: scan buffer (must be c->leb_size) 133 133 * 134 - * This function returns %0 on success and a negative error code on failure. 134 + * This function returns the scanned information on success and a negative error 135 + * code on failure. 135 136 */ 136 137 struct ubifs_scan_leb *ubifs_start_scan(const struct ubifs_info *c, int lnum, 137 138 int offs, void *sbuf) ··· 158 157 return ERR_PTR(err); 159 158 } 160 159 161 - if (err == -EBADMSG) 162 - sleb->ecc = 1; 163 - 160 + /* 161 + * Note, we ignore integrity errors (EBASMSG) because all the nodes are 162 + * protected by CRC checksums. 163 + */ 164 164 return sleb; 165 165 } 166 166 ··· 171 169 * @sleb: scanning information 172 170 * @lnum: logical eraseblock number 173 171 * @offs: offset to start at (usually zero) 174 - * 175 - * This function returns %0 on success and a negative error code on failure. 176 172 */ 177 173 void ubifs_end_scan(const struct ubifs_info *c, struct ubifs_scan_leb *sleb, 178 174 int lnum, int offs) ··· 257 257 * @quiet: print no messages 258 258 * 259 259 * This function scans LEB number @lnum and returns complete information about 260 - * its contents. Returns the scaned information in case of success and, 260 + * its contents. Returns the scanned information in case of success and, 261 261 * %-EUCLEAN if the LEB neads recovery, and other negative error codes in case 262 262 * of failure. 263 263 *
+9 -10
fs/ubifs/super.c
··· 75 75 return 1; 76 76 } 77 77 78 - if (ui->compr_type < 0 || ui->compr_type >= UBIFS_COMPR_TYPES_CNT) { 78 + if (ui->compr_type >= UBIFS_COMPR_TYPES_CNT) { 79 79 ubifs_err("unknown compression type %d", ui->compr_type); 80 80 return 2; 81 81 } ··· 424 424 struct ubifs_info *c = root->d_sb->s_fs_info; 425 425 426 426 if (c->mount_opts.unmount_mode == 2) 427 - seq_printf(s, ",fast_unmount"); 427 + seq_puts(s, ",fast_unmount"); 428 428 else if (c->mount_opts.unmount_mode == 1) 429 - seq_printf(s, ",norm_unmount"); 429 + seq_puts(s, ",norm_unmount"); 430 430 431 431 if (c->mount_opts.bulk_read == 2) 432 - seq_printf(s, ",bulk_read"); 432 + seq_puts(s, ",bulk_read"); 433 433 else if (c->mount_opts.bulk_read == 1) 434 - seq_printf(s, ",no_bulk_read"); 434 + seq_puts(s, ",no_bulk_read"); 435 435 436 436 if (c->mount_opts.chk_data_crc == 2) 437 - seq_printf(s, ",chk_data_crc"); 437 + seq_puts(s, ",chk_data_crc"); 438 438 else if (c->mount_opts.chk_data_crc == 1) 439 - seq_printf(s, ",no_chk_data_crc"); 439 + seq_puts(s, ",no_chk_data_crc"); 440 440 441 441 if (c->mount_opts.override_compr) { 442 442 seq_printf(s, ",compr=%s", ··· 796 796 { 797 797 int i, err; 798 798 799 - c->jheads = kzalloc(c->jhead_cnt * sizeof(struct ubifs_jhead), 800 - GFP_KERNEL); 799 + c->jheads = kcalloc(c->jhead_cnt, sizeof(struct ubifs_jhead), 800 + GFP_KERNEL); 801 801 if (!c->jheads) 802 802 return -ENOMEM; 803 803 ··· 1963 1963 mutex_init(&c->lp_mutex); 1964 1964 mutex_init(&c->tnc_mutex); 1965 1965 mutex_init(&c->log_mutex); 1966 - mutex_init(&c->mst_mutex); 1967 1966 mutex_init(&c->umount_mutex); 1968 1967 mutex_init(&c->bu_mutex); 1969 1968 mutex_init(&c->write_reserve_mutex);
-1
fs/ubifs/tnc.c
··· 3294 3294 goto out_unlock; 3295 3295 3296 3296 if (err) { 3297 - err = -EINVAL; 3298 3297 key = &from_key; 3299 3298 goto out_dump; 3300 3299 }
-1
fs/ubifs/tnc_commit.c
··· 389 389 ubifs_dump_lprops(c); 390 390 } 391 391 /* Try to commit anyway */ 392 - err = 0; 393 392 break; 394 393 } 395 394 p++;
-4
fs/ubifs/ubifs.h
··· 314 314 * @nodes_cnt: number of nodes scanned 315 315 * @nodes: list of struct ubifs_scan_node 316 316 * @endpt: end point (and therefore the start of empty space) 317 - * @ecc: read returned -EBADMSG 318 317 * @buf: buffer containing entire LEB scanned 319 318 */ 320 319 struct ubifs_scan_leb { ··· 321 322 int nodes_cnt; 322 323 struct list_head nodes; 323 324 int endpt; 324 - int ecc; 325 325 void *buf; 326 326 }; 327 327 ··· 1049 1051 * 1050 1052 * @mst_node: master node 1051 1053 * @mst_offs: offset of valid master node 1052 - * @mst_mutex: protects the master node area, @mst_node, and @mst_offs 1053 1054 * 1054 1055 * @max_bu_buf_len: maximum bulk-read buffer length 1055 1056 * @bu_mutex: protects the pre-allocated bulk-read buffer and @c->bu ··· 1289 1292 1290 1293 struct ubifs_mst_node *mst_node; 1291 1294 int mst_offs; 1292 - struct mutex mst_mutex; 1293 1295 1294 1296 int max_bu_buf_len; 1295 1297 struct mutex bu_mutex;