Merge branch 'linux-next' of git://git.infradead.org/ubifs-2.6

* 'linux-next' of git://git.infradead.org/ubifs-2.6:
UBIFS: pre-allocate bulk-read buffer
UBIFS: do not allocate too much
UBIFS: do not print scary memory allocation warnings
UBIFS: allow for gaps when dirtying the LPT
UBIFS: fix compilation warnings
MAINTAINERS: change UBI/UBIFS git tree URLs
UBIFS: endian handling fixes and annotations
UBIFS: remove printk

+223 -111
+2 -2
MAINTAINERS
··· 4236 4236 P: Adrian Hunter 4237 4237 M: ext-adrian.hunter@nokia.com 4238 4238 L: linux-mtd@lists.infradead.org 4239 - T: git git://git.infradead.org/~dedekind/ubifs-2.6.git 4239 + T: git git://git.infradead.org/ubifs-2.6.git 4240 4240 W: http://www.linux-mtd.infradead.org/doc/ubifs.html 4241 4241 S: Maintained 4242 4242 ··· 4290 4290 M: dedekind@infradead.org 4291 4291 W: http://www.linux-mtd.infradead.org/ 4292 4292 L: linux-mtd@lists.infradead.org 4293 - T: git git://git.infradead.org/~dedekind/ubi-2.6.git 4293 + T: git git://git.infradead.org/ubi-2.6.git 4294 4294 S: Maintained 4295 4295 4296 4296 USB ACM DRIVER
+2 -2
fs/ubifs/commit.c
··· 234 234 int err; 235 235 struct ubifs_info *c = info; 236 236 237 - ubifs_msg("background thread \"%s\" started, PID %d", 238 - c->bgt_name, current->pid); 237 + dbg_msg("background thread \"%s\" started, PID %d", 238 + c->bgt_name, current->pid); 239 239 set_freezable(); 240 240 241 241 while (1) {
+39 -27
fs/ubifs/debug.c
··· 101 101 if (c->key_fmt == UBIFS_SIMPLE_KEY_FMT) { 102 102 switch (type) { 103 103 case UBIFS_INO_KEY: 104 - sprintf(p, "(%lu, %s)", key_inum(c, key), 104 + sprintf(p, "(%lu, %s)", (unsigned long)key_inum(c, key), 105 105 get_key_type(type)); 106 106 break; 107 107 case UBIFS_DENT_KEY: 108 108 case UBIFS_XENT_KEY: 109 - sprintf(p, "(%lu, %s, %#08x)", key_inum(c, key), 109 + sprintf(p, "(%lu, %s, %#08x)", 110 + (unsigned long)key_inum(c, key), 110 111 get_key_type(type), key_hash(c, key)); 111 112 break; 112 113 case UBIFS_DATA_KEY: 113 - sprintf(p, "(%lu, %s, %u)", key_inum(c, key), 114 + sprintf(p, "(%lu, %s, %u)", 115 + (unsigned long)key_inum(c, key), 114 116 get_key_type(type), key_block(c, key)); 115 117 break; 116 118 case UBIFS_TRUN_KEY: 117 119 sprintf(p, "(%lu, %s)", 118 - key_inum(c, key), get_key_type(type)); 120 + (unsigned long)key_inum(c, key), 121 + get_key_type(type)); 119 122 break; 120 123 default: 121 124 sprintf(p, "(bad key type: %#08x, %#08x)", ··· 367 364 le32_to_cpu(mst->ihead_lnum)); 368 365 printk(KERN_DEBUG "\tihead_offs %u\n", 369 366 le32_to_cpu(mst->ihead_offs)); 370 - printk(KERN_DEBUG "\tindex_size %u\n", 371 - le32_to_cpu(mst->index_size)); 367 + printk(KERN_DEBUG "\tindex_size %llu\n", 368 + (unsigned long long)le64_to_cpu(mst->index_size)); 372 369 printk(KERN_DEBUG "\tlpt_lnum %u\n", 373 370 le32_to_cpu(mst->lpt_lnum)); 374 371 printk(KERN_DEBUG "\tlpt_offs %u\n", ··· 1592 1589 1593 1590 if (inum > c->highest_inum) { 1594 1591 ubifs_err("too high inode number, max. is %lu", 1595 - c->highest_inum); 1592 + (unsigned long)c->highest_inum); 1596 1593 return ERR_PTR(-EINVAL); 1597 1594 } 1598 1595 ··· 1671 1668 ino_key_init(c, &key, inum); 1672 1669 err = ubifs_lookup_level0(c, &key, &znode, &n); 1673 1670 if (!err) { 1674 - ubifs_err("inode %lu not found in index", inum); 1671 + ubifs_err("inode %lu not found in index", (unsigned long)inum); 1675 1672 return ERR_PTR(-ENOENT); 1676 1673 } else if (err < 0) { 1677 - ubifs_err("error %d while looking up inode %lu", err, inum); 1674 + ubifs_err("error %d while looking up inode %lu", 1675 + err, (unsigned long)inum); 1678 1676 return ERR_PTR(err); 1679 1677 } 1680 1678 1681 1679 zbr = &znode->zbranch[n]; 1682 1680 if (zbr->len < UBIFS_INO_NODE_SZ) { 1683 - ubifs_err("bad node %lu node length %d", inum, zbr->len); 1681 + ubifs_err("bad node %lu node length %d", 1682 + (unsigned long)inum, zbr->len); 1684 1683 return ERR_PTR(-EINVAL); 1685 1684 } 1686 1685 ··· 1702 1697 kfree(ino); 1703 1698 if (IS_ERR(fscki)) { 1704 1699 ubifs_err("error %ld while adding inode %lu node", 1705 - PTR_ERR(fscki), inum); 1700 + PTR_ERR(fscki), (unsigned long)inum); 1706 1701 return fscki; 1707 1702 } 1708 1703 ··· 1791 1786 if (IS_ERR(fscki)) { 1792 1787 err = PTR_ERR(fscki); 1793 1788 ubifs_err("error %d while processing data node and " 1794 - "trying to find inode node %lu", err, inum); 1789 + "trying to find inode node %lu", 1790 + err, (unsigned long)inum); 1795 1791 goto out_dump; 1796 1792 } 1797 1793 ··· 1825 1819 if (IS_ERR(fscki)) { 1826 1820 err = PTR_ERR(fscki); 1827 1821 ubifs_err("error %d while processing entry node and " 1828 - "trying to find inode node %lu", err, inum); 1822 + "trying to find inode node %lu", 1823 + err, (unsigned long)inum); 1829 1824 goto out_dump; 1830 1825 } 1831 1826 ··· 1839 1832 err = PTR_ERR(fscki); 1840 1833 ubifs_err("error %d while processing entry node and " 1841 1834 "trying to find parent inode node %lu", 1842 - err, inum); 1835 + err, (unsigned long)inum); 1843 1836 goto out_dump; 1844 1837 } 1845 1838 ··· 1930 1923 fscki->references != 1) { 1931 1924 ubifs_err("directory inode %lu has %d " 1932 1925 "direntries which refer it, but " 1933 - "should be 1", fscki->inum, 1926 + "should be 1", 1927 + (unsigned long)fscki->inum, 1934 1928 fscki->references); 1935 1929 goto out_dump; 1936 1930 } ··· 1939 1931 fscki->references != 0) { 1940 1932 ubifs_err("root inode %lu has non-zero (%d) " 1941 1933 "direntries which refer it", 1942 - fscki->inum, fscki->references); 1934 + (unsigned long)fscki->inum, 1935 + fscki->references); 1943 1936 goto out_dump; 1944 1937 } 1945 1938 if (fscki->calc_sz != fscki->size) { 1946 1939 ubifs_err("directory inode %lu size is %lld, " 1947 1940 "but calculated size is %lld", 1948 - fscki->inum, fscki->size, 1949 - fscki->calc_sz); 1941 + (unsigned long)fscki->inum, 1942 + fscki->size, fscki->calc_sz); 1950 1943 goto out_dump; 1951 1944 } 1952 1945 if (fscki->calc_cnt != fscki->nlink) { 1953 1946 ubifs_err("directory inode %lu nlink is %d, " 1954 1947 "but calculated nlink is %d", 1955 - fscki->inum, fscki->nlink, 1956 - fscki->calc_cnt); 1948 + (unsigned long)fscki->inum, 1949 + fscki->nlink, fscki->calc_cnt); 1957 1950 goto out_dump; 1958 1951 } 1959 1952 } else { 1960 1953 if (fscki->references != fscki->nlink) { 1961 1954 ubifs_err("inode %lu nlink is %d, but " 1962 - "calculated nlink is %d", fscki->inum, 1955 + "calculated nlink is %d", 1956 + (unsigned long)fscki->inum, 1963 1957 fscki->nlink, fscki->references); 1964 1958 goto out_dump; 1965 1959 } ··· 1969 1959 if (fscki->xattr_sz != fscki->calc_xsz) { 1970 1960 ubifs_err("inode %lu has xattr size %u, but " 1971 1961 "calculated size is %lld", 1972 - fscki->inum, fscki->xattr_sz, 1962 + (unsigned long)fscki->inum, fscki->xattr_sz, 1973 1963 fscki->calc_xsz); 1974 1964 goto out_dump; 1975 1965 } 1976 1966 if (fscki->xattr_cnt != fscki->calc_xcnt) { 1977 1967 ubifs_err("inode %lu has %u xattrs, but " 1978 - "calculated count is %lld", fscki->inum, 1968 + "calculated count is %lld", 1969 + (unsigned long)fscki->inum, 1979 1970 fscki->xattr_cnt, fscki->calc_xcnt); 1980 1971 goto out_dump; 1981 1972 } 1982 1973 if (fscki->xattr_nms != fscki->calc_xnms) { 1983 1974 ubifs_err("inode %lu has xattr names' size %u, but " 1984 1975 "calculated names' size is %lld", 1985 - fscki->inum, fscki->xattr_nms, 1976 + (unsigned long)fscki->inum, fscki->xattr_nms, 1986 1977 fscki->calc_xnms); 1987 1978 goto out_dump; 1988 1979 } ··· 1996 1985 ino_key_init(c, &key, fscki->inum); 1997 1986 err = ubifs_lookup_level0(c, &key, &znode, &n); 1998 1987 if (!err) { 1999 - ubifs_err("inode %lu not found in index", fscki->inum); 1988 + ubifs_err("inode %lu not found in index", 1989 + (unsigned long)fscki->inum); 2000 1990 return -ENOENT; 2001 1991 } else if (err < 0) { 2002 1992 ubifs_err("error %d while looking up inode %lu", 2003 - err, fscki->inum); 1993 + err, (unsigned long)fscki->inum); 2004 1994 return err; 2005 1995 } 2006 1996 ··· 2019 2007 } 2020 2008 2021 2009 ubifs_msg("dump of the inode %lu sitting in LEB %d:%d", 2022 - fscki->inum, zbr->lnum, zbr->offs); 2010 + (unsigned long)fscki->inum, zbr->lnum, zbr->offs); 2023 2011 dbg_dump_node(c, ino); 2024 2012 kfree(ino); 2025 2013 return -EINVAL;
+3 -2
fs/ubifs/dir.c
··· 161 161 return ERR_PTR(-EINVAL); 162 162 } 163 163 ubifs_warn("running out of inode numbers (current %lu, max %d)", 164 - c->highest_inum, INUM_WATERMARK); 164 + (unsigned long)c->highest_inum, INUM_WATERMARK); 165 165 } 166 166 167 167 inode->i_ino = ++c->highest_inum; ··· 428 428 dbg_gen("feed '%s', ino %llu, new f_pos %#x", 429 429 dent->name, (unsigned long long)le64_to_cpu(dent->inum), 430 430 key_hash_flash(c, &dent->key)); 431 - ubifs_assert(dent->ch.sqnum > ubifs_inode(dir)->creat_sqnum); 431 + ubifs_assert(le64_to_cpu(dent->ch.sqnum) > 432 + ubifs_inode(dir)->creat_sqnum); 432 433 433 434 nm.len = le16_to_cpu(dent->nlen); 434 435 over = filldir(dirent, dent->name, nm.len, file->f_pos,
+63 -28
fs/ubifs/file.c
··· 72 72 return err; 73 73 } 74 74 75 - ubifs_assert(dn->ch.sqnum > ubifs_inode(inode)->creat_sqnum); 75 + ubifs_assert(le64_to_cpu(dn->ch.sqnum) > ubifs_inode(inode)->creat_sqnum); 76 76 77 77 len = le32_to_cpu(dn->size); 78 78 if (len <= 0 || len > UBIFS_BLOCK_SIZE) ··· 626 626 627 627 dn = bu->buf + (bu->zbranch[nn].offs - offs); 628 628 629 - ubifs_assert(dn->ch.sqnum > 629 + ubifs_assert(le64_to_cpu(dn->ch.sqnum) > 630 630 ubifs_inode(inode)->creat_sqnum); 631 631 632 632 len = le32_to_cpu(dn->size); ··· 691 691 /** 692 692 * ubifs_do_bulk_read - do bulk-read. 693 693 * @c: UBIFS file-system description object 694 - * @page1: first page 694 + * @bu: bulk-read information 695 + * @page1: first page to read 695 696 * 696 697 * This function returns %1 if the bulk-read is done, otherwise %0 is returned. 697 698 */ 698 - static int ubifs_do_bulk_read(struct ubifs_info *c, struct page *page1) 699 + static int ubifs_do_bulk_read(struct ubifs_info *c, struct bu_info *bu, 700 + struct page *page1) 699 701 { 700 702 pgoff_t offset = page1->index, end_index; 701 703 struct address_space *mapping = page1->mapping; 702 704 struct inode *inode = mapping->host; 703 705 struct ubifs_inode *ui = ubifs_inode(inode); 704 - struct bu_info *bu; 705 706 int err, page_idx, page_cnt, ret = 0, n = 0; 707 + int allocate = bu->buf ? 0 : 1; 706 708 loff_t isize; 707 - 708 - bu = kmalloc(sizeof(struct bu_info), GFP_NOFS); 709 - if (!bu) 710 - return 0; 711 - 712 - bu->buf_len = c->bulk_read_buf_size; 713 - bu->buf = kmalloc(bu->buf_len, GFP_NOFS); 714 - if (!bu->buf) 715 - goto out_free; 716 - 717 - data_key_init(c, &bu->key, inode->i_ino, 718 - offset << UBIFS_BLOCKS_PER_PAGE_SHIFT); 719 709 720 710 err = ubifs_tnc_get_bu_keys(c, bu); 721 711 if (err) ··· 725 735 * together. If all the pages were like this, bulk-read would 726 736 * reduce performance, so we turn it off for a while. 727 737 */ 728 - ui->read_in_a_row = 0; 729 - ui->bulk_read = 0; 730 - goto out_free; 738 + goto out_bu_off; 731 739 } 732 740 733 741 if (bu->cnt) { 742 + if (allocate) { 743 + /* 744 + * Allocate bulk-read buffer depending on how many data 745 + * nodes we are going to read. 746 + */ 747 + bu->buf_len = bu->zbranch[bu->cnt - 1].offs + 748 + bu->zbranch[bu->cnt - 1].len - 749 + bu->zbranch[0].offs; 750 + ubifs_assert(bu->buf_len > 0); 751 + ubifs_assert(bu->buf_len <= c->leb_size); 752 + bu->buf = kmalloc(bu->buf_len, GFP_NOFS | __GFP_NOWARN); 753 + if (!bu->buf) 754 + goto out_bu_off; 755 + } 756 + 734 757 err = ubifs_tnc_bulk_read(c, bu); 735 758 if (err) 736 759 goto out_warn; ··· 782 779 ui->last_page_read = offset + page_idx - 1; 783 780 784 781 out_free: 785 - kfree(bu->buf); 786 - kfree(bu); 782 + if (allocate) 783 + kfree(bu->buf); 787 784 return ret; 788 785 789 786 out_warn: 790 787 ubifs_warn("ignoring error %d and skipping bulk-read", err); 788 + goto out_free; 789 + 790 + out_bu_off: 791 + ui->read_in_a_row = ui->bulk_read = 0; 791 792 goto out_free; 792 793 } 793 794 ··· 810 803 struct ubifs_info *c = inode->i_sb->s_fs_info; 811 804 struct ubifs_inode *ui = ubifs_inode(inode); 812 805 pgoff_t index = page->index, last_page_read = ui->last_page_read; 813 - int ret = 0; 806 + struct bu_info *bu; 807 + int err = 0, allocated = 0; 814 808 815 809 ui->last_page_read = index; 816 - 817 810 if (!c->bulk_read) 818 811 return 0; 812 + 819 813 /* 820 - * Bulk-read is protected by ui_mutex, but it is an optimization, so 821 - * don't bother if we cannot lock the mutex. 814 + * Bulk-read is protected by @ui->ui_mutex, but it is an optimization, 815 + * so don't bother if we cannot lock the mutex. 822 816 */ 823 817 if (!mutex_trylock(&ui->ui_mutex)) 824 818 return 0; 819 + 825 820 if (index != last_page_read + 1) { 826 821 /* Turn off bulk-read if we stop reading sequentially */ 827 822 ui->read_in_a_row = 1; ··· 831 822 ui->bulk_read = 0; 832 823 goto out_unlock; 833 824 } 825 + 834 826 if (!ui->bulk_read) { 835 827 ui->read_in_a_row += 1; 836 828 if (ui->read_in_a_row < 3) ··· 839 829 /* Three reads in a row, so switch on bulk-read */ 840 830 ui->bulk_read = 1; 841 831 } 842 - ret = ubifs_do_bulk_read(c, page); 832 + 833 + /* 834 + * If possible, try to use pre-allocated bulk-read information, which 835 + * is protected by @c->bu_mutex. 836 + */ 837 + if (mutex_trylock(&c->bu_mutex)) 838 + bu = &c->bu; 839 + else { 840 + bu = kmalloc(sizeof(struct bu_info), GFP_NOFS | __GFP_NOWARN); 841 + if (!bu) 842 + goto out_unlock; 843 + 844 + bu->buf = NULL; 845 + allocated = 1; 846 + } 847 + 848 + bu->buf_len = c->max_bu_buf_len; 849 + data_key_init(c, &bu->key, inode->i_ino, 850 + page->index << UBIFS_BLOCKS_PER_PAGE_SHIFT); 851 + err = ubifs_do_bulk_read(c, bu, page); 852 + 853 + if (!allocated) 854 + mutex_unlock(&c->bu_mutex); 855 + else 856 + kfree(bu); 857 + 843 858 out_unlock: 844 859 mutex_unlock(&ui->ui_mutex); 845 - return ret; 860 + return err; 846 861 } 847 862 848 863 static int ubifs_readpage(struct file *file, struct page *page)
+5 -3
fs/ubifs/journal.c
··· 690 690 int dlen = UBIFS_DATA_NODE_SZ + UBIFS_BLOCK_SIZE * WORST_COMPR_FACTOR; 691 691 struct ubifs_inode *ui = ubifs_inode(inode); 692 692 693 - dbg_jnl("ino %lu, blk %u, len %d, key %s", key_inum(c, key), 694 - key_block(c, key), len, DBGKEY(key)); 693 + dbg_jnl("ino %lu, blk %u, len %d, key %s", 694 + (unsigned long)key_inum(c, key), key_block(c, key), len, 695 + DBGKEY(key)); 695 696 ubifs_assert(len <= UBIFS_BLOCK_SIZE); 696 697 697 698 data = kmalloc(dlen, GFP_NOFS); ··· 1129 1128 ino_t inum = inode->i_ino; 1130 1129 unsigned int blk; 1131 1130 1132 - dbg_jnl("ino %lu, size %lld -> %lld", inum, old_size, new_size); 1131 + dbg_jnl("ino %lu, size %lld -> %lld", 1132 + (unsigned long)inum, old_size, new_size); 1133 1133 ubifs_assert(!ui->data_len); 1134 1134 ubifs_assert(S_ISREG(inode->i_mode)); 1135 1135 ubifs_assert(mutex_is_locked(&ui->ui_mutex));
+2 -2
fs/ubifs/key.h
··· 345 345 { 346 346 const union ubifs_key *key = k; 347 347 348 - return le32_to_cpu(key->u32[1]) >> UBIFS_S_KEY_BLOCK_BITS; 348 + return le32_to_cpu(key->j32[1]) >> UBIFS_S_KEY_BLOCK_BITS; 349 349 } 350 350 351 351 /** ··· 416 416 { 417 417 const union ubifs_key *key = k; 418 418 419 - return le32_to_cpu(key->u32[1]) & UBIFS_S_KEY_BLOCK_MASK; 419 + return le32_to_cpu(key->j32[1]) & UBIFS_S_KEY_BLOCK_MASK; 420 420 } 421 421 422 422 /**
-2
fs/ubifs/lpt_commit.c
··· 571 571 /* We assume here that LEB zero is never an LPT LEB */ 572 572 if (nnode->nbranch[iip].lnum) 573 573 return ubifs_get_pnode(c, nnode, iip); 574 - else 575 - return NULL; 576 574 } 577 575 578 576 /* Go up while can't go right */
+16 -12
fs/ubifs/orphan.c
··· 105 105 list_add_tail(&orphan->list, &c->orph_list); 106 106 list_add_tail(&orphan->new_list, &c->orph_new); 107 107 spin_unlock(&c->orphan_lock); 108 - dbg_gen("ino %lu", inum); 108 + dbg_gen("ino %lu", (unsigned long)inum); 109 109 return 0; 110 110 } 111 111 ··· 132 132 else { 133 133 if (o->dnext) { 134 134 spin_unlock(&c->orphan_lock); 135 - dbg_gen("deleted twice ino %lu", inum); 135 + dbg_gen("deleted twice ino %lu", 136 + (unsigned long)inum); 136 137 return; 137 138 } 138 139 if (o->cnext) { 139 140 o->dnext = c->orph_dnext; 140 141 c->orph_dnext = o; 141 142 spin_unlock(&c->orphan_lock); 142 - dbg_gen("delete later ino %lu", inum); 143 + dbg_gen("delete later ino %lu", 144 + (unsigned long)inum); 143 145 return; 144 146 } 145 147 rb_erase(p, &c->orph_tree); ··· 153 151 } 154 152 spin_unlock(&c->orphan_lock); 155 153 kfree(o); 156 - dbg_gen("inum %lu", inum); 154 + dbg_gen("inum %lu", (unsigned long)inum); 157 155 return; 158 156 } 159 157 } 160 158 spin_unlock(&c->orphan_lock); 161 - dbg_err("missing orphan ino %lu", inum); 159 + dbg_err("missing orphan ino %lu", (unsigned long)inum); 162 160 dbg_dump_stack(); 163 161 } 164 162 ··· 450 448 rb_erase(&orphan->rb, &c->orph_tree); 451 449 list_del(&orphan->list); 452 450 c->tot_orphans -= 1; 453 - dbg_gen("deleting orphan ino %lu", orphan->inum); 451 + dbg_gen("deleting orphan ino %lu", (unsigned long)orphan->inum); 454 452 kfree(orphan); 455 453 } 456 454 c->orph_dnext = NULL; ··· 538 536 list_add_tail(&orphan->list, &c->orph_list); 539 537 orphan->dnext = c->orph_dnext; 540 538 c->orph_dnext = orphan; 541 - dbg_mnt("ino %lu, new %d, tot %d", 542 - inum, c->new_orphans, c->tot_orphans); 539 + dbg_mnt("ino %lu, new %d, tot %d", (unsigned long)inum, 540 + c->new_orphans, c->tot_orphans); 543 541 return 0; 544 542 } 545 543 ··· 611 609 n = (le32_to_cpu(orph->ch.len) - UBIFS_ORPH_NODE_SZ) >> 3; 612 610 for (i = 0; i < n; i++) { 613 611 inum = le64_to_cpu(orph->inos[i]); 614 - dbg_rcvry("deleting orphaned inode %lu", inum); 612 + dbg_rcvry("deleting orphaned inode %lu", 613 + (unsigned long)inum); 615 614 err = ubifs_tnc_remove_ino(c, inum); 616 615 if (err) 617 616 return err; ··· 843 840 if (inum != ci->last_ino) { 844 841 /* Lowest node type is the inode node, so it comes first */ 845 842 if (key_type(c, &zbr->key) != UBIFS_INO_KEY) 846 - ubifs_err("found orphan node ino %lu, type %d", inum, 847 - key_type(c, &zbr->key)); 843 + ubifs_err("found orphan node ino %lu, type %d", 844 + (unsigned long)inum, key_type(c, &zbr->key)); 848 845 ci->last_ino = inum; 849 846 ci->tot_inos += 1; 850 847 err = ubifs_tnc_read_node(c, zbr, ci->node); ··· 856 853 /* Must be recorded as an orphan */ 857 854 if (!dbg_find_check_orphan(&ci->root, inum) && 858 855 !dbg_find_orphan(c, inum)) { 859 - ubifs_err("missing orphan, ino %lu", inum); 856 + ubifs_err("missing orphan, ino %lu", 857 + (unsigned long)inum); 860 858 ci->missing += 1; 861 859 } 862 860 }
+9 -8
fs/ubifs/recovery.c
··· 168 168 struct ubifs_mst_node *mst) 169 169 { 170 170 int err = 0, lnum = UBIFS_MST_LNUM, sz = c->mst_node_alsz; 171 - uint32_t save_flags; 171 + __le32 save_flags; 172 172 173 173 dbg_rcvry("recovery"); 174 174 175 175 save_flags = mst->flags; 176 - mst->flags = cpu_to_le32(le32_to_cpu(mst->flags) | UBIFS_MST_RCVRY); 176 + mst->flags |= cpu_to_le32(UBIFS_MST_RCVRY); 177 177 178 178 ubifs_prepare_node(c, mst, UBIFS_MST_NODE_SZ, 1); 179 179 err = ubi_leb_change(c->ubi, lnum, mst, sz, UBI_SHORTTERM); ··· 1435 1435 err = ubi_leb_change(c->ubi, lnum, c->sbuf, len, UBI_UNKNOWN); 1436 1436 if (err) 1437 1437 goto out; 1438 - dbg_rcvry("inode %lu at %d:%d size %lld -> %lld ", e->inum, lnum, offs, 1439 - i_size, e->d_size); 1438 + dbg_rcvry("inode %lu at %d:%d size %lld -> %lld ", 1439 + (unsigned long)e->inum, lnum, offs, i_size, e->d_size); 1440 1440 return 0; 1441 1441 1442 1442 out: 1443 1443 ubifs_warn("inode %lu failed to fix size %lld -> %lld error %d", 1444 - e->inum, e->i_size, e->d_size, err); 1444 + (unsigned long)e->inum, e->i_size, e->d_size, err); 1445 1445 return err; 1446 1446 } 1447 1447 ··· 1472 1472 return err; 1473 1473 if (err == -ENOENT) { 1474 1474 /* Remove data nodes that have no inode */ 1475 - dbg_rcvry("removing ino %lu", e->inum); 1475 + dbg_rcvry("removing ino %lu", 1476 + (unsigned long)e->inum); 1476 1477 err = ubifs_tnc_remove_ino(c, e->inum); 1477 1478 if (err) 1478 1479 return err; ··· 1494 1493 return PTR_ERR(inode); 1495 1494 if (inode->i_size < e->d_size) { 1496 1495 dbg_rcvry("ino %lu size %lld -> %lld", 1497 - e->inum, e->d_size, 1498 - inode->i_size); 1496 + (unsigned long)e->inum, 1497 + e->d_size, inode->i_size); 1499 1498 inode->i_size = e->d_size; 1500 1499 ubifs_inode(inode)->ui_size = e->d_size; 1501 1500 e->inode = inode;
+1 -1
fs/ubifs/replay.c
··· 1065 1065 ubifs_assert(c->bud_bytes <= c->max_bud_bytes || c->need_recovery); 1066 1066 dbg_mnt("finished, log head LEB %d:%d, max_sqnum %llu, " 1067 1067 "highest_inum %lu", c->lhead_lnum, c->lhead_offs, c->max_sqnum, 1068 - c->highest_inum); 1068 + (unsigned long)c->highest_inum); 1069 1069 out: 1070 1070 destroy_replay_tree(c); 1071 1071 destroy_bud_list(c);
+5 -4
fs/ubifs/sb.c
··· 81 81 int lpt_lebs, lpt_first, orph_lebs, big_lpt, ino_waste, sup_flags = 0; 82 82 int min_leb_cnt = UBIFS_MIN_LEB_CNT; 83 83 uint64_t tmp64, main_bytes; 84 + __le64 tmp_le64; 84 85 85 86 /* Some functions called from here depend on the @c->key_len filed */ 86 87 c->key_len = UBIFS_SK_LEN; ··· 296 295 ino->ch.node_type = UBIFS_INO_NODE; 297 296 ino->creat_sqnum = cpu_to_le64(++c->max_sqnum); 298 297 ino->nlink = cpu_to_le32(2); 299 - tmp = cpu_to_le64(CURRENT_TIME_SEC.tv_sec); 300 - ino->atime_sec = tmp; 301 - ino->ctime_sec = tmp; 302 - ino->mtime_sec = tmp; 298 + tmp_le64 = cpu_to_le64(CURRENT_TIME_SEC.tv_sec); 299 + ino->atime_sec = tmp_le64; 300 + ino->ctime_sec = tmp_le64; 301 + ino->mtime_sec = tmp_le64; 303 302 ino->atime_nsec = 0; 304 303 ino->ctime_nsec = 0; 305 304 ino->mtime_nsec = 0;
+58 -12
fs/ubifs/super.c
··· 36 36 #include <linux/mount.h> 37 37 #include "ubifs.h" 38 38 39 + /* 40 + * Maximum amount of memory we may 'kmalloc()' without worrying that we are 41 + * allocating too much. 42 + */ 43 + #define UBIFS_KMALLOC_OK (128*1024) 44 + 39 45 /* Slab cache for UBIFS inodes */ 40 46 struct kmem_cache *ubifs_inode_slab; 41 47 ··· 567 561 * calculations when reporting free space. 568 562 */ 569 563 c->leb_overhead = c->leb_size % UBIFS_MAX_DATA_NODE_SZ; 570 - /* Buffer size for bulk-reads */ 571 - c->bulk_read_buf_size = UBIFS_MAX_BULK_READ * UBIFS_MAX_DATA_NODE_SZ; 572 - if (c->bulk_read_buf_size > c->leb_size) 573 - c->bulk_read_buf_size = c->leb_size; 574 - if (c->bulk_read_buf_size > 128 * 1024) { 575 - /* Check if we can kmalloc more than 128KiB */ 576 - void *try = kmalloc(c->bulk_read_buf_size, GFP_KERNEL); 577 564 578 - kfree(try); 579 - if (!try) 580 - c->bulk_read_buf_size = 128 * 1024; 581 - } 565 + /* Buffer size for bulk-reads */ 566 + c->max_bu_buf_len = UBIFS_MAX_BULK_READ * UBIFS_MAX_DATA_NODE_SZ; 567 + if (c->max_bu_buf_len > c->leb_size) 568 + c->max_bu_buf_len = c->leb_size; 582 569 return 0; 583 570 } 584 571 ··· 991 992 } 992 993 993 994 /** 995 + * bu_init - initialize bulk-read information. 996 + * @c: UBIFS file-system description object 997 + */ 998 + static void bu_init(struct ubifs_info *c) 999 + { 1000 + ubifs_assert(c->bulk_read == 1); 1001 + 1002 + if (c->bu.buf) 1003 + return; /* Already initialized */ 1004 + 1005 + again: 1006 + c->bu.buf = kmalloc(c->max_bu_buf_len, GFP_KERNEL | __GFP_NOWARN); 1007 + if (!c->bu.buf) { 1008 + if (c->max_bu_buf_len > UBIFS_KMALLOC_OK) { 1009 + c->max_bu_buf_len = UBIFS_KMALLOC_OK; 1010 + goto again; 1011 + } 1012 + 1013 + /* Just disable bulk-read */ 1014 + ubifs_warn("Cannot allocate %d bytes of memory for bulk-read, " 1015 + "disabling it", c->max_bu_buf_len); 1016 + c->mount_opts.bulk_read = 1; 1017 + c->bulk_read = 0; 1018 + return; 1019 + } 1020 + } 1021 + 1022 + /** 994 1023 * mount_ubifs - mount UBIFS file-system. 995 1024 * @c: UBIFS file-system description object 996 1025 * ··· 1086 1059 goto out_free; 1087 1060 } 1088 1061 1062 + if (c->bulk_read == 1) 1063 + bu_init(c); 1064 + 1065 + /* 1066 + * We have to check all CRCs, even for data nodes, when we mount the FS 1067 + * (specifically, when we are replaying). 1068 + */ 1089 1069 c->always_chk_crc = 1; 1090 1070 1091 1071 err = ubifs_read_superblock(c); ··· 1323 1289 out_dereg: 1324 1290 dbg_failure_mode_deregistration(c); 1325 1291 out_free: 1292 + kfree(c->bu.buf); 1326 1293 vfree(c->ileb_buf); 1327 1294 vfree(c->sbuf); 1328 1295 kfree(c->bottom_up_buf); ··· 1360 1325 kfree(c->cbuf); 1361 1326 kfree(c->rcvrd_mst_node); 1362 1327 kfree(c->mst_node); 1328 + kfree(c->bu.buf); 1329 + vfree(c->ileb_buf); 1363 1330 vfree(c->sbuf); 1364 1331 kfree(c->bottom_up_buf); 1365 1332 UBIFS_DBG(vfree(c->dbg_buf)); 1366 - vfree(c->ileb_buf); 1367 1333 dbg_failure_mode_deregistration(c); 1368 1334 } 1369 1335 ··· 1662 1626 ubifs_err("invalid or unknown remount parameter"); 1663 1627 return err; 1664 1628 } 1629 + 1665 1630 if ((sb->s_flags & MS_RDONLY) && !(*flags & MS_RDONLY)) { 1666 1631 err = ubifs_remount_rw(c); 1667 1632 if (err) 1668 1633 return err; 1669 1634 } else if (!(sb->s_flags & MS_RDONLY) && (*flags & MS_RDONLY)) 1670 1635 ubifs_remount_ro(c); 1636 + 1637 + if (c->bulk_read == 1) 1638 + bu_init(c); 1639 + else { 1640 + dbg_gen("disable bulk-read"); 1641 + kfree(c->bu.buf); 1642 + c->bu.buf = NULL; 1643 + } 1671 1644 1672 1645 return 0; 1673 1646 } ··· 1768 1723 mutex_init(&c->log_mutex); 1769 1724 mutex_init(&c->mst_mutex); 1770 1725 mutex_init(&c->umount_mutex); 1726 + mutex_init(&c->bu_mutex); 1771 1727 init_waitqueue_head(&c->cmt_wq); 1772 1728 c->buds = RB_ROOT; 1773 1729 c->old_idx = RB_ROOT;
+9 -3
fs/ubifs/tnc.c
··· 1501 1501 * @bu: bulk-read parameters and results 1502 1502 * 1503 1503 * Lookup consecutive data node keys for the same inode that reside 1504 - * consecutively in the same LEB. 1504 + * consecutively in the same LEB. This function returns zero in case of success 1505 + * and a negative error code in case of failure. 1506 + * 1507 + * Note, if the bulk-read buffer length (@bu->buf_len) is known, this function 1508 + * makes sure bulk-read nodes fit the buffer. Otherwise, this function prepares 1509 + * maxumum possible amount of nodes for bulk-read. 1505 1510 */ 1506 1511 int ubifs_tnc_get_bu_keys(struct ubifs_info *c, struct bu_info *bu) 1507 1512 { ··· 2682 2677 struct ubifs_dent_node *xent, *pxent = NULL; 2683 2678 struct qstr nm = { .name = NULL }; 2684 2679 2685 - dbg_tnc("ino %lu", inum); 2680 + dbg_tnc("ino %lu", (unsigned long)inum); 2686 2681 2687 2682 /* 2688 2683 * Walk all extended attribute entries and remove them together with ··· 2702 2697 } 2703 2698 2704 2699 xattr_inum = le64_to_cpu(xent->inum); 2705 - dbg_tnc("xent '%s', ino %lu", xent->name, xattr_inum); 2700 + dbg_tnc("xent '%s', ino %lu", xent->name, 2701 + (unsigned long)xattr_inum); 2706 2702 2707 2703 nm.name = xent->name; 2708 2704 nm.len = le16_to_cpu(xent->nlen);
+9 -3
fs/ubifs/ubifs.h
··· 753 753 }; 754 754 755 755 /** 756 - * struct bu_info - bulk-read information 756 + * struct bu_info - bulk-read information. 757 757 * @key: first data node key 758 758 * @zbranch: zbranches of data nodes to bulk read 759 759 * @buf: buffer to read into ··· 969 969 * @mst_node: master node 970 970 * @mst_offs: offset of valid master node 971 971 * @mst_mutex: protects the master node area, @mst_node, and @mst_offs 972 - * @bulk_read_buf_size: buffer size for bulk-reads 972 + * 973 + * @max_bu_buf_len: maximum bulk-read buffer length 974 + * @bu_mutex: protects the pre-allocated bulk-read buffer and @c->bu 975 + * @bu: pre-allocated bulk-read information 973 976 * 974 977 * @log_lebs: number of logical eraseblocks in the log 975 978 * @log_bytes: log size in bytes ··· 1220 1217 struct ubifs_mst_node *mst_node; 1221 1218 int mst_offs; 1222 1219 struct mutex mst_mutex; 1223 - int bulk_read_buf_size; 1220 + 1221 + int max_bu_buf_len; 1222 + struct mutex bu_mutex; 1223 + struct bu_info bu; 1224 1224 1225 1225 int log_lebs; 1226 1226 long long log_bytes;