Merge branch 'linux-next' of git://git.infradead.org/ubifs-2.6

* 'linux-next' of git://git.infradead.org/ubifs-2.6:
UBIFS: pre-allocate bulk-read buffer
UBIFS: do not allocate too much
UBIFS: do not print scary memory allocation warnings
UBIFS: allow for gaps when dirtying the LPT
UBIFS: fix compilation warnings
MAINTAINERS: change UBI/UBIFS git tree URLs
UBIFS: endian handling fixes and annotations
UBIFS: remove printk

+223 -111
+2 -2
MAINTAINERS
··· 4236 P: Adrian Hunter 4237 M: ext-adrian.hunter@nokia.com 4238 L: linux-mtd@lists.infradead.org 4239 - T: git git://git.infradead.org/~dedekind/ubifs-2.6.git 4240 W: http://www.linux-mtd.infradead.org/doc/ubifs.html 4241 S: Maintained 4242 ··· 4290 M: dedekind@infradead.org 4291 W: http://www.linux-mtd.infradead.org/ 4292 L: linux-mtd@lists.infradead.org 4293 - T: git git://git.infradead.org/~dedekind/ubi-2.6.git 4294 S: Maintained 4295 4296 USB ACM DRIVER
··· 4236 P: Adrian Hunter 4237 M: ext-adrian.hunter@nokia.com 4238 L: linux-mtd@lists.infradead.org 4239 + T: git git://git.infradead.org/ubifs-2.6.git 4240 W: http://www.linux-mtd.infradead.org/doc/ubifs.html 4241 S: Maintained 4242 ··· 4290 M: dedekind@infradead.org 4291 W: http://www.linux-mtd.infradead.org/ 4292 L: linux-mtd@lists.infradead.org 4293 + T: git git://git.infradead.org/ubi-2.6.git 4294 S: Maintained 4295 4296 USB ACM DRIVER
+2 -2
fs/ubifs/commit.c
··· 234 int err; 235 struct ubifs_info *c = info; 236 237 - ubifs_msg("background thread \"%s\" started, PID %d", 238 - c->bgt_name, current->pid); 239 set_freezable(); 240 241 while (1) {
··· 234 int err; 235 struct ubifs_info *c = info; 236 237 + dbg_msg("background thread \"%s\" started, PID %d", 238 + c->bgt_name, current->pid); 239 set_freezable(); 240 241 while (1) {
+39 -27
fs/ubifs/debug.c
··· 101 if (c->key_fmt == UBIFS_SIMPLE_KEY_FMT) { 102 switch (type) { 103 case UBIFS_INO_KEY: 104 - sprintf(p, "(%lu, %s)", key_inum(c, key), 105 get_key_type(type)); 106 break; 107 case UBIFS_DENT_KEY: 108 case UBIFS_XENT_KEY: 109 - sprintf(p, "(%lu, %s, %#08x)", key_inum(c, key), 110 get_key_type(type), key_hash(c, key)); 111 break; 112 case UBIFS_DATA_KEY: 113 - sprintf(p, "(%lu, %s, %u)", key_inum(c, key), 114 get_key_type(type), key_block(c, key)); 115 break; 116 case UBIFS_TRUN_KEY: 117 sprintf(p, "(%lu, %s)", 118 - key_inum(c, key), get_key_type(type)); 119 break; 120 default: 121 sprintf(p, "(bad key type: %#08x, %#08x)", ··· 367 le32_to_cpu(mst->ihead_lnum)); 368 printk(KERN_DEBUG "\tihead_offs %u\n", 369 le32_to_cpu(mst->ihead_offs)); 370 - printk(KERN_DEBUG "\tindex_size %u\n", 371 - le32_to_cpu(mst->index_size)); 372 printk(KERN_DEBUG "\tlpt_lnum %u\n", 373 le32_to_cpu(mst->lpt_lnum)); 374 printk(KERN_DEBUG "\tlpt_offs %u\n", ··· 1592 1593 if (inum > c->highest_inum) { 1594 ubifs_err("too high inode number, max. is %lu", 1595 - c->highest_inum); 1596 return ERR_PTR(-EINVAL); 1597 } 1598 ··· 1671 ino_key_init(c, &key, inum); 1672 err = ubifs_lookup_level0(c, &key, &znode, &n); 1673 if (!err) { 1674 - ubifs_err("inode %lu not found in index", inum); 1675 return ERR_PTR(-ENOENT); 1676 } else if (err < 0) { 1677 - ubifs_err("error %d while looking up inode %lu", err, inum); 1678 return ERR_PTR(err); 1679 } 1680 1681 zbr = &znode->zbranch[n]; 1682 if (zbr->len < UBIFS_INO_NODE_SZ) { 1683 - ubifs_err("bad node %lu node length %d", inum, zbr->len); 1684 return ERR_PTR(-EINVAL); 1685 } 1686 ··· 1702 kfree(ino); 1703 if (IS_ERR(fscki)) { 1704 ubifs_err("error %ld while adding inode %lu node", 1705 - PTR_ERR(fscki), inum); 1706 return fscki; 1707 } 1708 ··· 1791 if (IS_ERR(fscki)) { 1792 err = PTR_ERR(fscki); 1793 ubifs_err("error %d while processing data node and " 1794 - "trying to find inode node %lu", err, inum); 1795 goto out_dump; 1796 } 1797 ··· 1825 if (IS_ERR(fscki)) { 1826 err = PTR_ERR(fscki); 1827 ubifs_err("error %d while processing entry node and " 1828 - "trying to find inode node %lu", err, inum); 1829 goto out_dump; 1830 } 1831 ··· 1839 err = PTR_ERR(fscki); 1840 ubifs_err("error %d while processing entry node and " 1841 "trying to find parent inode node %lu", 1842 - err, inum); 1843 goto out_dump; 1844 } 1845 ··· 1930 fscki->references != 1) { 1931 ubifs_err("directory inode %lu has %d " 1932 "direntries which refer it, but " 1933 - "should be 1", fscki->inum, 1934 fscki->references); 1935 goto out_dump; 1936 } ··· 1939 fscki->references != 0) { 1940 ubifs_err("root inode %lu has non-zero (%d) " 1941 "direntries which refer it", 1942 - fscki->inum, fscki->references); 1943 goto out_dump; 1944 } 1945 if (fscki->calc_sz != fscki->size) { 1946 ubifs_err("directory inode %lu size is %lld, " 1947 "but calculated size is %lld", 1948 - fscki->inum, fscki->size, 1949 - fscki->calc_sz); 1950 goto out_dump; 1951 } 1952 if (fscki->calc_cnt != fscki->nlink) { 1953 ubifs_err("directory inode %lu nlink is %d, " 1954 "but calculated nlink is %d", 1955 - fscki->inum, fscki->nlink, 1956 - fscki->calc_cnt); 1957 goto out_dump; 1958 } 1959 } else { 1960 if (fscki->references != fscki->nlink) { 1961 ubifs_err("inode %lu nlink is %d, but " 1962 - "calculated nlink is %d", fscki->inum, 1963 fscki->nlink, fscki->references); 1964 goto out_dump; 1965 } ··· 1969 if (fscki->xattr_sz != fscki->calc_xsz) { 1970 ubifs_err("inode %lu has xattr size %u, but " 1971 "calculated size is %lld", 1972 - fscki->inum, fscki->xattr_sz, 1973 fscki->calc_xsz); 1974 goto out_dump; 1975 } 1976 if (fscki->xattr_cnt != fscki->calc_xcnt) { 1977 ubifs_err("inode %lu has %u xattrs, but " 1978 - "calculated count is %lld", fscki->inum, 1979 fscki->xattr_cnt, fscki->calc_xcnt); 1980 goto out_dump; 1981 } 1982 if (fscki->xattr_nms != fscki->calc_xnms) { 1983 ubifs_err("inode %lu has xattr names' size %u, but " 1984 "calculated names' size is %lld", 1985 - fscki->inum, fscki->xattr_nms, 1986 fscki->calc_xnms); 1987 goto out_dump; 1988 } ··· 1996 ino_key_init(c, &key, fscki->inum); 1997 err = ubifs_lookup_level0(c, &key, &znode, &n); 1998 if (!err) { 1999 - ubifs_err("inode %lu not found in index", fscki->inum); 2000 return -ENOENT; 2001 } else if (err < 0) { 2002 ubifs_err("error %d while looking up inode %lu", 2003 - err, fscki->inum); 2004 return err; 2005 } 2006 ··· 2019 } 2020 2021 ubifs_msg("dump of the inode %lu sitting in LEB %d:%d", 2022 - fscki->inum, zbr->lnum, zbr->offs); 2023 dbg_dump_node(c, ino); 2024 kfree(ino); 2025 return -EINVAL;
··· 101 if (c->key_fmt == UBIFS_SIMPLE_KEY_FMT) { 102 switch (type) { 103 case UBIFS_INO_KEY: 104 + sprintf(p, "(%lu, %s)", (unsigned long)key_inum(c, key), 105 get_key_type(type)); 106 break; 107 case UBIFS_DENT_KEY: 108 case UBIFS_XENT_KEY: 109 + sprintf(p, "(%lu, %s, %#08x)", 110 + (unsigned long)key_inum(c, key), 111 get_key_type(type), key_hash(c, key)); 112 break; 113 case UBIFS_DATA_KEY: 114 + sprintf(p, "(%lu, %s, %u)", 115 + (unsigned long)key_inum(c, key), 116 get_key_type(type), key_block(c, key)); 117 break; 118 case UBIFS_TRUN_KEY: 119 sprintf(p, "(%lu, %s)", 120 + (unsigned long)key_inum(c, key), 121 + get_key_type(type)); 122 break; 123 default: 124 sprintf(p, "(bad key type: %#08x, %#08x)", ··· 364 le32_to_cpu(mst->ihead_lnum)); 365 printk(KERN_DEBUG "\tihead_offs %u\n", 366 le32_to_cpu(mst->ihead_offs)); 367 + printk(KERN_DEBUG "\tindex_size %llu\n", 368 + (unsigned long long)le64_to_cpu(mst->index_size)); 369 printk(KERN_DEBUG "\tlpt_lnum %u\n", 370 le32_to_cpu(mst->lpt_lnum)); 371 printk(KERN_DEBUG "\tlpt_offs %u\n", ··· 1589 1590 if (inum > c->highest_inum) { 1591 ubifs_err("too high inode number, max. is %lu", 1592 + (unsigned long)c->highest_inum); 1593 return ERR_PTR(-EINVAL); 1594 } 1595 ··· 1668 ino_key_init(c, &key, inum); 1669 err = ubifs_lookup_level0(c, &key, &znode, &n); 1670 if (!err) { 1671 + ubifs_err("inode %lu not found in index", (unsigned long)inum); 1672 return ERR_PTR(-ENOENT); 1673 } else if (err < 0) { 1674 + ubifs_err("error %d while looking up inode %lu", 1675 + err, (unsigned long)inum); 1676 return ERR_PTR(err); 1677 } 1678 1679 zbr = &znode->zbranch[n]; 1680 if (zbr->len < UBIFS_INO_NODE_SZ) { 1681 + ubifs_err("bad node %lu node length %d", 1682 + (unsigned long)inum, zbr->len); 1683 return ERR_PTR(-EINVAL); 1684 } 1685 ··· 1697 kfree(ino); 1698 if (IS_ERR(fscki)) { 1699 ubifs_err("error %ld while adding inode %lu node", 1700 + PTR_ERR(fscki), (unsigned long)inum); 1701 return fscki; 1702 } 1703 ··· 1786 if (IS_ERR(fscki)) { 1787 err = PTR_ERR(fscki); 1788 ubifs_err("error %d while processing data node and " 1789 + "trying to find inode node %lu", 1790 + err, (unsigned long)inum); 1791 goto out_dump; 1792 } 1793 ··· 1819 if (IS_ERR(fscki)) { 1820 err = PTR_ERR(fscki); 1821 ubifs_err("error %d while processing entry node and " 1822 + "trying to find inode node %lu", 1823 + err, (unsigned long)inum); 1824 goto out_dump; 1825 } 1826 ··· 1832 err = PTR_ERR(fscki); 1833 ubifs_err("error %d while processing entry node and " 1834 "trying to find parent inode node %lu", 1835 + err, (unsigned long)inum); 1836 goto out_dump; 1837 } 1838 ··· 1923 fscki->references != 1) { 1924 ubifs_err("directory inode %lu has %d " 1925 "direntries which refer it, but " 1926 + "should be 1", 1927 + (unsigned long)fscki->inum, 1928 fscki->references); 1929 goto out_dump; 1930 } ··· 1931 fscki->references != 0) { 1932 ubifs_err("root inode %lu has non-zero (%d) " 1933 "direntries which refer it", 1934 + (unsigned long)fscki->inum, 1935 + fscki->references); 1936 goto out_dump; 1937 } 1938 if (fscki->calc_sz != fscki->size) { 1939 ubifs_err("directory inode %lu size is %lld, " 1940 "but calculated size is %lld", 1941 + (unsigned long)fscki->inum, 1942 + fscki->size, fscki->calc_sz); 1943 goto out_dump; 1944 } 1945 if (fscki->calc_cnt != fscki->nlink) { 1946 ubifs_err("directory inode %lu nlink is %d, " 1947 "but calculated nlink is %d", 1948 + (unsigned long)fscki->inum, 1949 + fscki->nlink, fscki->calc_cnt); 1950 goto out_dump; 1951 } 1952 } else { 1953 if (fscki->references != fscki->nlink) { 1954 ubifs_err("inode %lu nlink is %d, but " 1955 + "calculated nlink is %d", 1956 + (unsigned long)fscki->inum, 1957 fscki->nlink, fscki->references); 1958 goto out_dump; 1959 } ··· 1959 if (fscki->xattr_sz != fscki->calc_xsz) { 1960 ubifs_err("inode %lu has xattr size %u, but " 1961 "calculated size is %lld", 1962 + (unsigned long)fscki->inum, fscki->xattr_sz, 1963 fscki->calc_xsz); 1964 goto out_dump; 1965 } 1966 if (fscki->xattr_cnt != fscki->calc_xcnt) { 1967 ubifs_err("inode %lu has %u xattrs, but " 1968 + "calculated count is %lld", 1969 + (unsigned long)fscki->inum, 1970 fscki->xattr_cnt, fscki->calc_xcnt); 1971 goto out_dump; 1972 } 1973 if (fscki->xattr_nms != fscki->calc_xnms) { 1974 ubifs_err("inode %lu has xattr names' size %u, but " 1975 "calculated names' size is %lld", 1976 + (unsigned long)fscki->inum, fscki->xattr_nms, 1977 fscki->calc_xnms); 1978 goto out_dump; 1979 } ··· 1985 ino_key_init(c, &key, fscki->inum); 1986 err = ubifs_lookup_level0(c, &key, &znode, &n); 1987 if (!err) { 1988 + ubifs_err("inode %lu not found in index", 1989 + (unsigned long)fscki->inum); 1990 return -ENOENT; 1991 } else if (err < 0) { 1992 ubifs_err("error %d while looking up inode %lu", 1993 + err, (unsigned long)fscki->inum); 1994 return err; 1995 } 1996 ··· 2007 } 2008 2009 ubifs_msg("dump of the inode %lu sitting in LEB %d:%d", 2010 + (unsigned long)fscki->inum, zbr->lnum, zbr->offs); 2011 dbg_dump_node(c, ino); 2012 kfree(ino); 2013 return -EINVAL;
+3 -2
fs/ubifs/dir.c
··· 161 return ERR_PTR(-EINVAL); 162 } 163 ubifs_warn("running out of inode numbers (current %lu, max %d)", 164 - c->highest_inum, INUM_WATERMARK); 165 } 166 167 inode->i_ino = ++c->highest_inum; ··· 428 dbg_gen("feed '%s', ino %llu, new f_pos %#x", 429 dent->name, (unsigned long long)le64_to_cpu(dent->inum), 430 key_hash_flash(c, &dent->key)); 431 - ubifs_assert(dent->ch.sqnum > ubifs_inode(dir)->creat_sqnum); 432 433 nm.len = le16_to_cpu(dent->nlen); 434 over = filldir(dirent, dent->name, nm.len, file->f_pos,
··· 161 return ERR_PTR(-EINVAL); 162 } 163 ubifs_warn("running out of inode numbers (current %lu, max %d)", 164 + (unsigned long)c->highest_inum, INUM_WATERMARK); 165 } 166 167 inode->i_ino = ++c->highest_inum; ··· 428 dbg_gen("feed '%s', ino %llu, new f_pos %#x", 429 dent->name, (unsigned long long)le64_to_cpu(dent->inum), 430 key_hash_flash(c, &dent->key)); 431 + ubifs_assert(le64_to_cpu(dent->ch.sqnum) > 432 + ubifs_inode(dir)->creat_sqnum); 433 434 nm.len = le16_to_cpu(dent->nlen); 435 over = filldir(dirent, dent->name, nm.len, file->f_pos,
+63 -28
fs/ubifs/file.c
··· 72 return err; 73 } 74 75 - ubifs_assert(dn->ch.sqnum > ubifs_inode(inode)->creat_sqnum); 76 77 len = le32_to_cpu(dn->size); 78 if (len <= 0 || len > UBIFS_BLOCK_SIZE) ··· 626 627 dn = bu->buf + (bu->zbranch[nn].offs - offs); 628 629 - ubifs_assert(dn->ch.sqnum > 630 ubifs_inode(inode)->creat_sqnum); 631 632 len = le32_to_cpu(dn->size); ··· 691 /** 692 * ubifs_do_bulk_read - do bulk-read. 693 * @c: UBIFS file-system description object 694 - * @page1: first page 695 * 696 * This function returns %1 if the bulk-read is done, otherwise %0 is returned. 697 */ 698 - static int ubifs_do_bulk_read(struct ubifs_info *c, struct page *page1) 699 { 700 pgoff_t offset = page1->index, end_index; 701 struct address_space *mapping = page1->mapping; 702 struct inode *inode = mapping->host; 703 struct ubifs_inode *ui = ubifs_inode(inode); 704 - struct bu_info *bu; 705 int err, page_idx, page_cnt, ret = 0, n = 0; 706 loff_t isize; 707 - 708 - bu = kmalloc(sizeof(struct bu_info), GFP_NOFS); 709 - if (!bu) 710 - return 0; 711 - 712 - bu->buf_len = c->bulk_read_buf_size; 713 - bu->buf = kmalloc(bu->buf_len, GFP_NOFS); 714 - if (!bu->buf) 715 - goto out_free; 716 - 717 - data_key_init(c, &bu->key, inode->i_ino, 718 - offset << UBIFS_BLOCKS_PER_PAGE_SHIFT); 719 720 err = ubifs_tnc_get_bu_keys(c, bu); 721 if (err) ··· 725 * together. If all the pages were like this, bulk-read would 726 * reduce performance, so we turn it off for a while. 727 */ 728 - ui->read_in_a_row = 0; 729 - ui->bulk_read = 0; 730 - goto out_free; 731 } 732 733 if (bu->cnt) { 734 err = ubifs_tnc_bulk_read(c, bu); 735 if (err) 736 goto out_warn; ··· 782 ui->last_page_read = offset + page_idx - 1; 783 784 out_free: 785 - kfree(bu->buf); 786 - kfree(bu); 787 return ret; 788 789 out_warn: 790 ubifs_warn("ignoring error %d and skipping bulk-read", err); 791 goto out_free; 792 } 793 ··· 810 struct ubifs_info *c = inode->i_sb->s_fs_info; 811 struct ubifs_inode *ui = ubifs_inode(inode); 812 pgoff_t index = page->index, last_page_read = ui->last_page_read; 813 - int ret = 0; 814 815 ui->last_page_read = index; 816 - 817 if (!c->bulk_read) 818 return 0; 819 /* 820 - * Bulk-read is protected by ui_mutex, but it is an optimization, so 821 - * don't bother if we cannot lock the mutex. 822 */ 823 if (!mutex_trylock(&ui->ui_mutex)) 824 return 0; 825 if (index != last_page_read + 1) { 826 /* Turn off bulk-read if we stop reading sequentially */ 827 ui->read_in_a_row = 1; ··· 831 ui->bulk_read = 0; 832 goto out_unlock; 833 } 834 if (!ui->bulk_read) { 835 ui->read_in_a_row += 1; 836 if (ui->read_in_a_row < 3) ··· 839 /* Three reads in a row, so switch on bulk-read */ 840 ui->bulk_read = 1; 841 } 842 - ret = ubifs_do_bulk_read(c, page); 843 out_unlock: 844 mutex_unlock(&ui->ui_mutex); 845 - return ret; 846 } 847 848 static int ubifs_readpage(struct file *file, struct page *page)
··· 72 return err; 73 } 74 75 + ubifs_assert(le64_to_cpu(dn->ch.sqnum) > ubifs_inode(inode)->creat_sqnum); 76 77 len = le32_to_cpu(dn->size); 78 if (len <= 0 || len > UBIFS_BLOCK_SIZE) ··· 626 627 dn = bu->buf + (bu->zbranch[nn].offs - offs); 628 629 + ubifs_assert(le64_to_cpu(dn->ch.sqnum) > 630 ubifs_inode(inode)->creat_sqnum); 631 632 len = le32_to_cpu(dn->size); ··· 691 /** 692 * ubifs_do_bulk_read - do bulk-read. 693 * @c: UBIFS file-system description object 694 + * @bu: bulk-read information 695 + * @page1: first page to read 696 * 697 * This function returns %1 if the bulk-read is done, otherwise %0 is returned. 698 */ 699 + static int ubifs_do_bulk_read(struct ubifs_info *c, struct bu_info *bu, 700 + struct page *page1) 701 { 702 pgoff_t offset = page1->index, end_index; 703 struct address_space *mapping = page1->mapping; 704 struct inode *inode = mapping->host; 705 struct ubifs_inode *ui = ubifs_inode(inode); 706 int err, page_idx, page_cnt, ret = 0, n = 0; 707 + int allocate = bu->buf ? 0 : 1; 708 loff_t isize; 709 710 err = ubifs_tnc_get_bu_keys(c, bu); 711 if (err) ··· 735 * together. If all the pages were like this, bulk-read would 736 * reduce performance, so we turn it off for a while. 737 */ 738 + goto out_bu_off; 739 } 740 741 if (bu->cnt) { 742 + if (allocate) { 743 + /* 744 + * Allocate bulk-read buffer depending on how many data 745 + * nodes we are going to read. 746 + */ 747 + bu->buf_len = bu->zbranch[bu->cnt - 1].offs + 748 + bu->zbranch[bu->cnt - 1].len - 749 + bu->zbranch[0].offs; 750 + ubifs_assert(bu->buf_len > 0); 751 + ubifs_assert(bu->buf_len <= c->leb_size); 752 + bu->buf = kmalloc(bu->buf_len, GFP_NOFS | __GFP_NOWARN); 753 + if (!bu->buf) 754 + goto out_bu_off; 755 + } 756 + 757 err = ubifs_tnc_bulk_read(c, bu); 758 if (err) 759 goto out_warn; ··· 779 ui->last_page_read = offset + page_idx - 1; 780 781 out_free: 782 + if (allocate) 783 + kfree(bu->buf); 784 return ret; 785 786 out_warn: 787 ubifs_warn("ignoring error %d and skipping bulk-read", err); 788 + goto out_free; 789 + 790 + out_bu_off: 791 + ui->read_in_a_row = ui->bulk_read = 0; 792 goto out_free; 793 } 794 ··· 803 struct ubifs_info *c = inode->i_sb->s_fs_info; 804 struct ubifs_inode *ui = ubifs_inode(inode); 805 pgoff_t index = page->index, last_page_read = ui->last_page_read; 806 + struct bu_info *bu; 807 + int err = 0, allocated = 0; 808 809 ui->last_page_read = index; 810 if (!c->bulk_read) 811 return 0; 812 + 813 /* 814 + * Bulk-read is protected by @ui->ui_mutex, but it is an optimization, 815 + * so don't bother if we cannot lock the mutex. 816 */ 817 if (!mutex_trylock(&ui->ui_mutex)) 818 return 0; 819 + 820 if (index != last_page_read + 1) { 821 /* Turn off bulk-read if we stop reading sequentially */ 822 ui->read_in_a_row = 1; ··· 822 ui->bulk_read = 0; 823 goto out_unlock; 824 } 825 + 826 if (!ui->bulk_read) { 827 ui->read_in_a_row += 1; 828 if (ui->read_in_a_row < 3) ··· 829 /* Three reads in a row, so switch on bulk-read */ 830 ui->bulk_read = 1; 831 } 832 + 833 + /* 834 + * If possible, try to use pre-allocated bulk-read information, which 835 + * is protected by @c->bu_mutex. 836 + */ 837 + if (mutex_trylock(&c->bu_mutex)) 838 + bu = &c->bu; 839 + else { 840 + bu = kmalloc(sizeof(struct bu_info), GFP_NOFS | __GFP_NOWARN); 841 + if (!bu) 842 + goto out_unlock; 843 + 844 + bu->buf = NULL; 845 + allocated = 1; 846 + } 847 + 848 + bu->buf_len = c->max_bu_buf_len; 849 + data_key_init(c, &bu->key, inode->i_ino, 850 + page->index << UBIFS_BLOCKS_PER_PAGE_SHIFT); 851 + err = ubifs_do_bulk_read(c, bu, page); 852 + 853 + if (!allocated) 854 + mutex_unlock(&c->bu_mutex); 855 + else 856 + kfree(bu); 857 + 858 out_unlock: 859 mutex_unlock(&ui->ui_mutex); 860 + return err; 861 } 862 863 static int ubifs_readpage(struct file *file, struct page *page)
+5 -3
fs/ubifs/journal.c
··· 690 int dlen = UBIFS_DATA_NODE_SZ + UBIFS_BLOCK_SIZE * WORST_COMPR_FACTOR; 691 struct ubifs_inode *ui = ubifs_inode(inode); 692 693 - dbg_jnl("ino %lu, blk %u, len %d, key %s", key_inum(c, key), 694 - key_block(c, key), len, DBGKEY(key)); 695 ubifs_assert(len <= UBIFS_BLOCK_SIZE); 696 697 data = kmalloc(dlen, GFP_NOFS); ··· 1129 ino_t inum = inode->i_ino; 1130 unsigned int blk; 1131 1132 - dbg_jnl("ino %lu, size %lld -> %lld", inum, old_size, new_size); 1133 ubifs_assert(!ui->data_len); 1134 ubifs_assert(S_ISREG(inode->i_mode)); 1135 ubifs_assert(mutex_is_locked(&ui->ui_mutex));
··· 690 int dlen = UBIFS_DATA_NODE_SZ + UBIFS_BLOCK_SIZE * WORST_COMPR_FACTOR; 691 struct ubifs_inode *ui = ubifs_inode(inode); 692 693 + dbg_jnl("ino %lu, blk %u, len %d, key %s", 694 + (unsigned long)key_inum(c, key), key_block(c, key), len, 695 + DBGKEY(key)); 696 ubifs_assert(len <= UBIFS_BLOCK_SIZE); 697 698 data = kmalloc(dlen, GFP_NOFS); ··· 1128 ino_t inum = inode->i_ino; 1129 unsigned int blk; 1130 1131 + dbg_jnl("ino %lu, size %lld -> %lld", 1132 + (unsigned long)inum, old_size, new_size); 1133 ubifs_assert(!ui->data_len); 1134 ubifs_assert(S_ISREG(inode->i_mode)); 1135 ubifs_assert(mutex_is_locked(&ui->ui_mutex));
+2 -2
fs/ubifs/key.h
··· 345 { 346 const union ubifs_key *key = k; 347 348 - return le32_to_cpu(key->u32[1]) >> UBIFS_S_KEY_BLOCK_BITS; 349 } 350 351 /** ··· 416 { 417 const union ubifs_key *key = k; 418 419 - return le32_to_cpu(key->u32[1]) & UBIFS_S_KEY_BLOCK_MASK; 420 } 421 422 /**
··· 345 { 346 const union ubifs_key *key = k; 347 348 + return le32_to_cpu(key->j32[1]) >> UBIFS_S_KEY_BLOCK_BITS; 349 } 350 351 /** ··· 416 { 417 const union ubifs_key *key = k; 418 419 + return le32_to_cpu(key->j32[1]) & UBIFS_S_KEY_BLOCK_MASK; 420 } 421 422 /**
-2
fs/ubifs/lpt_commit.c
··· 571 /* We assume here that LEB zero is never an LPT LEB */ 572 if (nnode->nbranch[iip].lnum) 573 return ubifs_get_pnode(c, nnode, iip); 574 - else 575 - return NULL; 576 } 577 578 /* Go up while can't go right */
··· 571 /* We assume here that LEB zero is never an LPT LEB */ 572 if (nnode->nbranch[iip].lnum) 573 return ubifs_get_pnode(c, nnode, iip); 574 } 575 576 /* Go up while can't go right */
+16 -12
fs/ubifs/orphan.c
··· 105 list_add_tail(&orphan->list, &c->orph_list); 106 list_add_tail(&orphan->new_list, &c->orph_new); 107 spin_unlock(&c->orphan_lock); 108 - dbg_gen("ino %lu", inum); 109 return 0; 110 } 111 ··· 132 else { 133 if (o->dnext) { 134 spin_unlock(&c->orphan_lock); 135 - dbg_gen("deleted twice ino %lu", inum); 136 return; 137 } 138 if (o->cnext) { 139 o->dnext = c->orph_dnext; 140 c->orph_dnext = o; 141 spin_unlock(&c->orphan_lock); 142 - dbg_gen("delete later ino %lu", inum); 143 return; 144 } 145 rb_erase(p, &c->orph_tree); ··· 153 } 154 spin_unlock(&c->orphan_lock); 155 kfree(o); 156 - dbg_gen("inum %lu", inum); 157 return; 158 } 159 } 160 spin_unlock(&c->orphan_lock); 161 - dbg_err("missing orphan ino %lu", inum); 162 dbg_dump_stack(); 163 } 164 ··· 450 rb_erase(&orphan->rb, &c->orph_tree); 451 list_del(&orphan->list); 452 c->tot_orphans -= 1; 453 - dbg_gen("deleting orphan ino %lu", orphan->inum); 454 kfree(orphan); 455 } 456 c->orph_dnext = NULL; ··· 538 list_add_tail(&orphan->list, &c->orph_list); 539 orphan->dnext = c->orph_dnext; 540 c->orph_dnext = orphan; 541 - dbg_mnt("ino %lu, new %d, tot %d", 542 - inum, c->new_orphans, c->tot_orphans); 543 return 0; 544 } 545 ··· 611 n = (le32_to_cpu(orph->ch.len) - UBIFS_ORPH_NODE_SZ) >> 3; 612 for (i = 0; i < n; i++) { 613 inum = le64_to_cpu(orph->inos[i]); 614 - dbg_rcvry("deleting orphaned inode %lu", inum); 615 err = ubifs_tnc_remove_ino(c, inum); 616 if (err) 617 return err; ··· 843 if (inum != ci->last_ino) { 844 /* Lowest node type is the inode node, so it comes first */ 845 if (key_type(c, &zbr->key) != UBIFS_INO_KEY) 846 - ubifs_err("found orphan node ino %lu, type %d", inum, 847 - key_type(c, &zbr->key)); 848 ci->last_ino = inum; 849 ci->tot_inos += 1; 850 err = ubifs_tnc_read_node(c, zbr, ci->node); ··· 856 /* Must be recorded as an orphan */ 857 if (!dbg_find_check_orphan(&ci->root, inum) && 858 !dbg_find_orphan(c, inum)) { 859 - ubifs_err("missing orphan, ino %lu", inum); 860 ci->missing += 1; 861 } 862 }
··· 105 list_add_tail(&orphan->list, &c->orph_list); 106 list_add_tail(&orphan->new_list, &c->orph_new); 107 spin_unlock(&c->orphan_lock); 108 + dbg_gen("ino %lu", (unsigned long)inum); 109 return 0; 110 } 111 ··· 132 else { 133 if (o->dnext) { 134 spin_unlock(&c->orphan_lock); 135 + dbg_gen("deleted twice ino %lu", 136 + (unsigned long)inum); 137 return; 138 } 139 if (o->cnext) { 140 o->dnext = c->orph_dnext; 141 c->orph_dnext = o; 142 spin_unlock(&c->orphan_lock); 143 + dbg_gen("delete later ino %lu", 144 + (unsigned long)inum); 145 return; 146 } 147 rb_erase(p, &c->orph_tree); ··· 151 } 152 spin_unlock(&c->orphan_lock); 153 kfree(o); 154 + dbg_gen("inum %lu", (unsigned long)inum); 155 return; 156 } 157 } 158 spin_unlock(&c->orphan_lock); 159 + dbg_err("missing orphan ino %lu", (unsigned long)inum); 160 dbg_dump_stack(); 161 } 162 ··· 448 rb_erase(&orphan->rb, &c->orph_tree); 449 list_del(&orphan->list); 450 c->tot_orphans -= 1; 451 + dbg_gen("deleting orphan ino %lu", (unsigned long)orphan->inum); 452 kfree(orphan); 453 } 454 c->orph_dnext = NULL; ··· 536 list_add_tail(&orphan->list, &c->orph_list); 537 orphan->dnext = c->orph_dnext; 538 c->orph_dnext = orphan; 539 + dbg_mnt("ino %lu, new %d, tot %d", (unsigned long)inum, 540 + c->new_orphans, c->tot_orphans); 541 return 0; 542 } 543 ··· 609 n = (le32_to_cpu(orph->ch.len) - UBIFS_ORPH_NODE_SZ) >> 3; 610 for (i = 0; i < n; i++) { 611 inum = le64_to_cpu(orph->inos[i]); 612 + dbg_rcvry("deleting orphaned inode %lu", 613 + (unsigned long)inum); 614 err = ubifs_tnc_remove_ino(c, inum); 615 if (err) 616 return err; ··· 840 if (inum != ci->last_ino) { 841 /* Lowest node type is the inode node, so it comes first */ 842 if (key_type(c, &zbr->key) != UBIFS_INO_KEY) 843 + ubifs_err("found orphan node ino %lu, type %d", 844 + (unsigned long)inum, key_type(c, &zbr->key)); 845 ci->last_ino = inum; 846 ci->tot_inos += 1; 847 err = ubifs_tnc_read_node(c, zbr, ci->node); ··· 853 /* Must be recorded as an orphan */ 854 if (!dbg_find_check_orphan(&ci->root, inum) && 855 !dbg_find_orphan(c, inum)) { 856 + ubifs_err("missing orphan, ino %lu", 857 + (unsigned long)inum); 858 ci->missing += 1; 859 } 860 }
+9 -8
fs/ubifs/recovery.c
··· 168 struct ubifs_mst_node *mst) 169 { 170 int err = 0, lnum = UBIFS_MST_LNUM, sz = c->mst_node_alsz; 171 - uint32_t save_flags; 172 173 dbg_rcvry("recovery"); 174 175 save_flags = mst->flags; 176 - mst->flags = cpu_to_le32(le32_to_cpu(mst->flags) | UBIFS_MST_RCVRY); 177 178 ubifs_prepare_node(c, mst, UBIFS_MST_NODE_SZ, 1); 179 err = ubi_leb_change(c->ubi, lnum, mst, sz, UBI_SHORTTERM); ··· 1435 err = ubi_leb_change(c->ubi, lnum, c->sbuf, len, UBI_UNKNOWN); 1436 if (err) 1437 goto out; 1438 - dbg_rcvry("inode %lu at %d:%d size %lld -> %lld ", e->inum, lnum, offs, 1439 - i_size, e->d_size); 1440 return 0; 1441 1442 out: 1443 ubifs_warn("inode %lu failed to fix size %lld -> %lld error %d", 1444 - e->inum, e->i_size, e->d_size, err); 1445 return err; 1446 } 1447 ··· 1472 return err; 1473 if (err == -ENOENT) { 1474 /* Remove data nodes that have no inode */ 1475 - dbg_rcvry("removing ino %lu", e->inum); 1476 err = ubifs_tnc_remove_ino(c, e->inum); 1477 if (err) 1478 return err; ··· 1494 return PTR_ERR(inode); 1495 if (inode->i_size < e->d_size) { 1496 dbg_rcvry("ino %lu size %lld -> %lld", 1497 - e->inum, e->d_size, 1498 - inode->i_size); 1499 inode->i_size = e->d_size; 1500 ubifs_inode(inode)->ui_size = e->d_size; 1501 e->inode = inode;
··· 168 struct ubifs_mst_node *mst) 169 { 170 int err = 0, lnum = UBIFS_MST_LNUM, sz = c->mst_node_alsz; 171 + __le32 save_flags; 172 173 dbg_rcvry("recovery"); 174 175 save_flags = mst->flags; 176 + mst->flags |= cpu_to_le32(UBIFS_MST_RCVRY); 177 178 ubifs_prepare_node(c, mst, UBIFS_MST_NODE_SZ, 1); 179 err = ubi_leb_change(c->ubi, lnum, mst, sz, UBI_SHORTTERM); ··· 1435 err = ubi_leb_change(c->ubi, lnum, c->sbuf, len, UBI_UNKNOWN); 1436 if (err) 1437 goto out; 1438 + dbg_rcvry("inode %lu at %d:%d size %lld -> %lld ", 1439 + (unsigned long)e->inum, lnum, offs, i_size, e->d_size); 1440 return 0; 1441 1442 out: 1443 ubifs_warn("inode %lu failed to fix size %lld -> %lld error %d", 1444 + (unsigned long)e->inum, e->i_size, e->d_size, err); 1445 return err; 1446 } 1447 ··· 1472 return err; 1473 if (err == -ENOENT) { 1474 /* Remove data nodes that have no inode */ 1475 + dbg_rcvry("removing ino %lu", 1476 + (unsigned long)e->inum); 1477 err = ubifs_tnc_remove_ino(c, e->inum); 1478 if (err) 1479 return err; ··· 1493 return PTR_ERR(inode); 1494 if (inode->i_size < e->d_size) { 1495 dbg_rcvry("ino %lu size %lld -> %lld", 1496 + (unsigned long)e->inum, 1497 + e->d_size, inode->i_size); 1498 inode->i_size = e->d_size; 1499 ubifs_inode(inode)->ui_size = e->d_size; 1500 e->inode = inode;
+1 -1
fs/ubifs/replay.c
··· 1065 ubifs_assert(c->bud_bytes <= c->max_bud_bytes || c->need_recovery); 1066 dbg_mnt("finished, log head LEB %d:%d, max_sqnum %llu, " 1067 "highest_inum %lu", c->lhead_lnum, c->lhead_offs, c->max_sqnum, 1068 - c->highest_inum); 1069 out: 1070 destroy_replay_tree(c); 1071 destroy_bud_list(c);
··· 1065 ubifs_assert(c->bud_bytes <= c->max_bud_bytes || c->need_recovery); 1066 dbg_mnt("finished, log head LEB %d:%d, max_sqnum %llu, " 1067 "highest_inum %lu", c->lhead_lnum, c->lhead_offs, c->max_sqnum, 1068 + (unsigned long)c->highest_inum); 1069 out: 1070 destroy_replay_tree(c); 1071 destroy_bud_list(c);
+5 -4
fs/ubifs/sb.c
··· 81 int lpt_lebs, lpt_first, orph_lebs, big_lpt, ino_waste, sup_flags = 0; 82 int min_leb_cnt = UBIFS_MIN_LEB_CNT; 83 uint64_t tmp64, main_bytes; 84 85 /* Some functions called from here depend on the @c->key_len filed */ 86 c->key_len = UBIFS_SK_LEN; ··· 296 ino->ch.node_type = UBIFS_INO_NODE; 297 ino->creat_sqnum = cpu_to_le64(++c->max_sqnum); 298 ino->nlink = cpu_to_le32(2); 299 - tmp = cpu_to_le64(CURRENT_TIME_SEC.tv_sec); 300 - ino->atime_sec = tmp; 301 - ino->ctime_sec = tmp; 302 - ino->mtime_sec = tmp; 303 ino->atime_nsec = 0; 304 ino->ctime_nsec = 0; 305 ino->mtime_nsec = 0;
··· 81 int lpt_lebs, lpt_first, orph_lebs, big_lpt, ino_waste, sup_flags = 0; 82 int min_leb_cnt = UBIFS_MIN_LEB_CNT; 83 uint64_t tmp64, main_bytes; 84 + __le64 tmp_le64; 85 86 /* Some functions called from here depend on the @c->key_len filed */ 87 c->key_len = UBIFS_SK_LEN; ··· 295 ino->ch.node_type = UBIFS_INO_NODE; 296 ino->creat_sqnum = cpu_to_le64(++c->max_sqnum); 297 ino->nlink = cpu_to_le32(2); 298 + tmp_le64 = cpu_to_le64(CURRENT_TIME_SEC.tv_sec); 299 + ino->atime_sec = tmp_le64; 300 + ino->ctime_sec = tmp_le64; 301 + ino->mtime_sec = tmp_le64; 302 ino->atime_nsec = 0; 303 ino->ctime_nsec = 0; 304 ino->mtime_nsec = 0;
+58 -12
fs/ubifs/super.c
··· 36 #include <linux/mount.h> 37 #include "ubifs.h" 38 39 /* Slab cache for UBIFS inodes */ 40 struct kmem_cache *ubifs_inode_slab; 41 ··· 567 * calculations when reporting free space. 568 */ 569 c->leb_overhead = c->leb_size % UBIFS_MAX_DATA_NODE_SZ; 570 - /* Buffer size for bulk-reads */ 571 - c->bulk_read_buf_size = UBIFS_MAX_BULK_READ * UBIFS_MAX_DATA_NODE_SZ; 572 - if (c->bulk_read_buf_size > c->leb_size) 573 - c->bulk_read_buf_size = c->leb_size; 574 - if (c->bulk_read_buf_size > 128 * 1024) { 575 - /* Check if we can kmalloc more than 128KiB */ 576 - void *try = kmalloc(c->bulk_read_buf_size, GFP_KERNEL); 577 578 - kfree(try); 579 - if (!try) 580 - c->bulk_read_buf_size = 128 * 1024; 581 - } 582 return 0; 583 } 584 ··· 991 } 992 993 /** 994 * mount_ubifs - mount UBIFS file-system. 995 * @c: UBIFS file-system description object 996 * ··· 1086 goto out_free; 1087 } 1088 1089 c->always_chk_crc = 1; 1090 1091 err = ubifs_read_superblock(c); ··· 1323 out_dereg: 1324 dbg_failure_mode_deregistration(c); 1325 out_free: 1326 vfree(c->ileb_buf); 1327 vfree(c->sbuf); 1328 kfree(c->bottom_up_buf); ··· 1360 kfree(c->cbuf); 1361 kfree(c->rcvrd_mst_node); 1362 kfree(c->mst_node); 1363 vfree(c->sbuf); 1364 kfree(c->bottom_up_buf); 1365 UBIFS_DBG(vfree(c->dbg_buf)); 1366 - vfree(c->ileb_buf); 1367 dbg_failure_mode_deregistration(c); 1368 } 1369 ··· 1662 ubifs_err("invalid or unknown remount parameter"); 1663 return err; 1664 } 1665 if ((sb->s_flags & MS_RDONLY) && !(*flags & MS_RDONLY)) { 1666 err = ubifs_remount_rw(c); 1667 if (err) 1668 return err; 1669 } else if (!(sb->s_flags & MS_RDONLY) && (*flags & MS_RDONLY)) 1670 ubifs_remount_ro(c); 1671 1672 return 0; 1673 } ··· 1768 mutex_init(&c->log_mutex); 1769 mutex_init(&c->mst_mutex); 1770 mutex_init(&c->umount_mutex); 1771 init_waitqueue_head(&c->cmt_wq); 1772 c->buds = RB_ROOT; 1773 c->old_idx = RB_ROOT;
··· 36 #include <linux/mount.h> 37 #include "ubifs.h" 38 39 + /* 40 + * Maximum amount of memory we may 'kmalloc()' without worrying that we are 41 + * allocating too much. 42 + */ 43 + #define UBIFS_KMALLOC_OK (128*1024) 44 + 45 /* Slab cache for UBIFS inodes */ 46 struct kmem_cache *ubifs_inode_slab; 47 ··· 561 * calculations when reporting free space. 562 */ 563 c->leb_overhead = c->leb_size % UBIFS_MAX_DATA_NODE_SZ; 564 565 + /* Buffer size for bulk-reads */ 566 + c->max_bu_buf_len = UBIFS_MAX_BULK_READ * UBIFS_MAX_DATA_NODE_SZ; 567 + if (c->max_bu_buf_len > c->leb_size) 568 + c->max_bu_buf_len = c->leb_size; 569 return 0; 570 } 571 ··· 992 } 993 994 /** 995 + * bu_init - initialize bulk-read information. 996 + * @c: UBIFS file-system description object 997 + */ 998 + static void bu_init(struct ubifs_info *c) 999 + { 1000 + ubifs_assert(c->bulk_read == 1); 1001 + 1002 + if (c->bu.buf) 1003 + return; /* Already initialized */ 1004 + 1005 + again: 1006 + c->bu.buf = kmalloc(c->max_bu_buf_len, GFP_KERNEL | __GFP_NOWARN); 1007 + if (!c->bu.buf) { 1008 + if (c->max_bu_buf_len > UBIFS_KMALLOC_OK) { 1009 + c->max_bu_buf_len = UBIFS_KMALLOC_OK; 1010 + goto again; 1011 + } 1012 + 1013 + /* Just disable bulk-read */ 1014 + ubifs_warn("Cannot allocate %d bytes of memory for bulk-read, " 1015 + "disabling it", c->max_bu_buf_len); 1016 + c->mount_opts.bulk_read = 1; 1017 + c->bulk_read = 0; 1018 + return; 1019 + } 1020 + } 1021 + 1022 + /** 1023 * mount_ubifs - mount UBIFS file-system. 1024 * @c: UBIFS file-system description object 1025 * ··· 1059 goto out_free; 1060 } 1061 1062 + if (c->bulk_read == 1) 1063 + bu_init(c); 1064 + 1065 + /* 1066 + * We have to check all CRCs, even for data nodes, when we mount the FS 1067 + * (specifically, when we are replaying). 1068 + */ 1069 c->always_chk_crc = 1; 1070 1071 err = ubifs_read_superblock(c); ··· 1289 out_dereg: 1290 dbg_failure_mode_deregistration(c); 1291 out_free: 1292 + kfree(c->bu.buf); 1293 vfree(c->ileb_buf); 1294 vfree(c->sbuf); 1295 kfree(c->bottom_up_buf); ··· 1325 kfree(c->cbuf); 1326 kfree(c->rcvrd_mst_node); 1327 kfree(c->mst_node); 1328 + kfree(c->bu.buf); 1329 + vfree(c->ileb_buf); 1330 vfree(c->sbuf); 1331 kfree(c->bottom_up_buf); 1332 UBIFS_DBG(vfree(c->dbg_buf)); 1333 dbg_failure_mode_deregistration(c); 1334 } 1335 ··· 1626 ubifs_err("invalid or unknown remount parameter"); 1627 return err; 1628 } 1629 + 1630 if ((sb->s_flags & MS_RDONLY) && !(*flags & MS_RDONLY)) { 1631 err = ubifs_remount_rw(c); 1632 if (err) 1633 return err; 1634 } else if (!(sb->s_flags & MS_RDONLY) && (*flags & MS_RDONLY)) 1635 ubifs_remount_ro(c); 1636 + 1637 + if (c->bulk_read == 1) 1638 + bu_init(c); 1639 + else { 1640 + dbg_gen("disable bulk-read"); 1641 + kfree(c->bu.buf); 1642 + c->bu.buf = NULL; 1643 + } 1644 1645 return 0; 1646 } ··· 1723 mutex_init(&c->log_mutex); 1724 mutex_init(&c->mst_mutex); 1725 mutex_init(&c->umount_mutex); 1726 + mutex_init(&c->bu_mutex); 1727 init_waitqueue_head(&c->cmt_wq); 1728 c->buds = RB_ROOT; 1729 c->old_idx = RB_ROOT;
+9 -3
fs/ubifs/tnc.c
··· 1501 * @bu: bulk-read parameters and results 1502 * 1503 * Lookup consecutive data node keys for the same inode that reside 1504 - * consecutively in the same LEB. 1505 */ 1506 int ubifs_tnc_get_bu_keys(struct ubifs_info *c, struct bu_info *bu) 1507 { ··· 2682 struct ubifs_dent_node *xent, *pxent = NULL; 2683 struct qstr nm = { .name = NULL }; 2684 2685 - dbg_tnc("ino %lu", inum); 2686 2687 /* 2688 * Walk all extended attribute entries and remove them together with ··· 2702 } 2703 2704 xattr_inum = le64_to_cpu(xent->inum); 2705 - dbg_tnc("xent '%s', ino %lu", xent->name, xattr_inum); 2706 2707 nm.name = xent->name; 2708 nm.len = le16_to_cpu(xent->nlen);
··· 1501 * @bu: bulk-read parameters and results 1502 * 1503 * Lookup consecutive data node keys for the same inode that reside 1504 + * consecutively in the same LEB. This function returns zero in case of success 1505 + * and a negative error code in case of failure. 1506 + * 1507 + * Note, if the bulk-read buffer length (@bu->buf_len) is known, this function 1508 + * makes sure bulk-read nodes fit the buffer. Otherwise, this function prepares 1509 + * maxumum possible amount of nodes for bulk-read. 1510 */ 1511 int ubifs_tnc_get_bu_keys(struct ubifs_info *c, struct bu_info *bu) 1512 { ··· 2677 struct ubifs_dent_node *xent, *pxent = NULL; 2678 struct qstr nm = { .name = NULL }; 2679 2680 + dbg_tnc("ino %lu", (unsigned long)inum); 2681 2682 /* 2683 * Walk all extended attribute entries and remove them together with ··· 2697 } 2698 2699 xattr_inum = le64_to_cpu(xent->inum); 2700 + dbg_tnc("xent '%s', ino %lu", xent->name, 2701 + (unsigned long)xattr_inum); 2702 2703 nm.name = xent->name; 2704 nm.len = le16_to_cpu(xent->nlen);
+9 -3
fs/ubifs/ubifs.h
··· 753 }; 754 755 /** 756 - * struct bu_info - bulk-read information 757 * @key: first data node key 758 * @zbranch: zbranches of data nodes to bulk read 759 * @buf: buffer to read into ··· 969 * @mst_node: master node 970 * @mst_offs: offset of valid master node 971 * @mst_mutex: protects the master node area, @mst_node, and @mst_offs 972 - * @bulk_read_buf_size: buffer size for bulk-reads 973 * 974 * @log_lebs: number of logical eraseblocks in the log 975 * @log_bytes: log size in bytes ··· 1220 struct ubifs_mst_node *mst_node; 1221 int mst_offs; 1222 struct mutex mst_mutex; 1223 - int bulk_read_buf_size; 1224 1225 int log_lebs; 1226 long long log_bytes;
··· 753 }; 754 755 /** 756 + * struct bu_info - bulk-read information. 757 * @key: first data node key 758 * @zbranch: zbranches of data nodes to bulk read 759 * @buf: buffer to read into ··· 969 * @mst_node: master node 970 * @mst_offs: offset of valid master node 971 * @mst_mutex: protects the master node area, @mst_node, and @mst_offs 972 + * 973 + * @max_bu_buf_len: maximum bulk-read buffer length 974 + * @bu_mutex: protects the pre-allocated bulk-read buffer and @c->bu 975 + * @bu: pre-allocated bulk-read information 976 * 977 * @log_lebs: number of logical eraseblocks in the log 978 * @log_bytes: log size in bytes ··· 1217 struct ubifs_mst_node *mst_node; 1218 int mst_offs; 1219 struct mutex mst_mutex; 1220 + 1221 + int max_bu_buf_len; 1222 + struct mutex bu_mutex; 1223 + struct bu_info bu; 1224 1225 int log_lebs; 1226 long long log_bytes;