UBIFS: pre-allocate bulk-read buffer

To avoid memory allocation failure during bulk-read, pre-allocate
a bulk-read buffer, so that if there is only one bulk-reader at
a time, it would just use the pre-allocated buffer and would not
do any memory allocation. However, if there are more than 1 bulk-
reader, then only one reader would use the pre-allocated buffer,
while the other reader would allocate the buffer for itself.

Signed-off-by: Artem Bityutskiy <Artem.Bityutskiy@nokia.com>

+76 -18
+22 -9
fs/ubifs/file.c
··· 811 struct ubifs_inode *ui = ubifs_inode(inode); 812 pgoff_t index = page->index, last_page_read = ui->last_page_read; 813 struct bu_info *bu; 814 - int err = 0; 815 816 ui->last_page_read = index; 817 if (!c->bulk_read) 818 return 0; 819 820 /* 821 - * Bulk-read is protected by ui_mutex, but it is an optimization, so 822 - * don't bother if we cannot lock the mutex. 823 */ 824 if (!mutex_trylock(&ui->ui_mutex)) 825 return 0; ··· 840 ui->bulk_read = 1; 841 } 842 843 - bu = kmalloc(sizeof(struct bu_info), GFP_NOFS | __GFP_NOWARN); 844 - if (!bu) 845 - return 0; 846 847 - bu->buf = NULL; 848 bu->buf_len = c->max_bu_buf_len; 849 data_key_init(c, &bu->key, inode->i_ino, 850 page->index << UBIFS_BLOCKS_PER_PAGE_SHIFT); 851 - 852 err = ubifs_do_bulk_read(c, bu, page); 853 - kfree(bu); 854 855 out_unlock: 856 mutex_unlock(&ui->ui_mutex);
··· 811 struct ubifs_inode *ui = ubifs_inode(inode); 812 pgoff_t index = page->index, last_page_read = ui->last_page_read; 813 struct bu_info *bu; 814 + int err = 0, allocated = 0; 815 816 ui->last_page_read = index; 817 if (!c->bulk_read) 818 return 0; 819 820 /* 821 + * Bulk-read is protected by @ui->ui_mutex, but it is an optimization, 822 + * so don't bother if we cannot lock the mutex. 823 */ 824 if (!mutex_trylock(&ui->ui_mutex)) 825 return 0; ··· 840 ui->bulk_read = 1; 841 } 842 843 + /* 844 + * If possible, try to use pre-allocated bulk-read information, which 845 + * is protected by @c->bu_mutex. 846 + */ 847 + if (mutex_trylock(&c->bu_mutex)) 848 + bu = &c->bu; 849 + else { 850 + bu = kmalloc(sizeof(struct bu_info), GFP_NOFS | __GFP_NOWARN); 851 + if (!bu) 852 + goto out_unlock; 853 854 + bu->buf = NULL; 855 + allocated = 1; 856 + } 857 + 858 bu->buf_len = c->max_bu_buf_len; 859 data_key_init(c, &bu->key, inode->i_ino, 860 page->index << UBIFS_BLOCKS_PER_PAGE_SHIFT); 861 err = ubifs_do_bulk_read(c, bu, page); 862 + 863 + if (!allocated) 864 + mutex_unlock(&c->bu_mutex); 865 + else 866 + kfree(bu); 867 868 out_unlock: 869 mutex_unlock(&ui->ui_mutex);
+48 -9
fs/ubifs/super.c
··· 572 c->max_bu_buf_len = UBIFS_MAX_BULK_READ * UBIFS_MAX_DATA_NODE_SZ; 573 if (c->max_bu_buf_len > c->leb_size) 574 c->max_bu_buf_len = c->leb_size; 575 - if (c->max_bu_buf_len > UBIFS_KMALLOC_OK) { 576 - /* Check if we can kmalloc that much */ 577 - void *try = kmalloc(c->max_bu_buf_len, 578 - GFP_KERNEL | __GFP_NOWARN); 579 - kfree(try); 580 - if (!try) 581 - c->max_bu_buf_len = UBIFS_KMALLOC_OK; 582 - } 583 return 0; 584 } 585 ··· 991 } 992 993 /** 994 * mount_ubifs - mount UBIFS file-system. 995 * @c: UBIFS file-system description object 996 * ··· 1086 goto out_free; 1087 } 1088 1089 c->always_chk_crc = 1; 1090 1091 err = ubifs_read_superblock(c); ··· 1323 out_dereg: 1324 dbg_failure_mode_deregistration(c); 1325 out_free: 1326 vfree(c->ileb_buf); 1327 vfree(c->sbuf); 1328 kfree(c->bottom_up_buf); ··· 1360 kfree(c->cbuf); 1361 kfree(c->rcvrd_mst_node); 1362 kfree(c->mst_node); 1363 vfree(c->sbuf); 1364 kfree(c->bottom_up_buf); 1365 UBIFS_DBG(vfree(c->dbg_buf)); 1366 - vfree(c->ileb_buf); 1367 dbg_failure_mode_deregistration(c); 1368 } 1369 ··· 1662 ubifs_err("invalid or unknown remount parameter"); 1663 return err; 1664 } 1665 if ((sb->s_flags & MS_RDONLY) && !(*flags & MS_RDONLY)) { 1666 err = ubifs_remount_rw(c); 1667 if (err) 1668 return err; 1669 } else if (!(sb->s_flags & MS_RDONLY) && (*flags & MS_RDONLY)) 1670 ubifs_remount_ro(c); 1671 1672 return 0; 1673 } ··· 1768 mutex_init(&c->log_mutex); 1769 mutex_init(&c->mst_mutex); 1770 mutex_init(&c->umount_mutex); 1771 init_waitqueue_head(&c->cmt_wq); 1772 c->buds = RB_ROOT; 1773 c->old_idx = RB_ROOT;
··· 572 c->max_bu_buf_len = UBIFS_MAX_BULK_READ * UBIFS_MAX_DATA_NODE_SZ; 573 if (c->max_bu_buf_len > c->leb_size) 574 c->max_bu_buf_len = c->leb_size; 575 return 0; 576 } 577 ··· 999 } 1000 1001 /** 1002 + * bu_init - initialize bulk-read information. 1003 + * @c: UBIFS file-system description object 1004 + */ 1005 + static void bu_init(struct ubifs_info *c) 1006 + { 1007 + ubifs_assert(c->bulk_read == 1); 1008 + 1009 + if (c->bu.buf) 1010 + return; /* Already initialized */ 1011 + 1012 + again: 1013 + c->bu.buf = kmalloc(c->max_bu_buf_len, GFP_KERNEL | __GFP_NOWARN); 1014 + if (!c->bu.buf) { 1015 + if (c->max_bu_buf_len > UBIFS_KMALLOC_OK) { 1016 + c->max_bu_buf_len = UBIFS_KMALLOC_OK; 1017 + goto again; 1018 + } 1019 + 1020 + /* Just disable bulk-read */ 1021 + ubifs_warn("Cannot allocate %d bytes of memory for bulk-read, " 1022 + "disabling it", c->max_bu_buf_len); 1023 + c->mount_opts.bulk_read = 1; 1024 + c->bulk_read = 0; 1025 + return; 1026 + } 1027 + } 1028 + 1029 + /** 1030 * mount_ubifs - mount UBIFS file-system. 1031 * @c: UBIFS file-system description object 1032 * ··· 1066 goto out_free; 1067 } 1068 1069 + if (c->bulk_read == 1) 1070 + bu_init(c); 1071 + 1072 + /* 1073 + * We have to check all CRCs, even for data nodes, when we mount the FS 1074 + * (specifically, when we are replaying). 1075 + */ 1076 c->always_chk_crc = 1; 1077 1078 err = ubifs_read_superblock(c); ··· 1296 out_dereg: 1297 dbg_failure_mode_deregistration(c); 1298 out_free: 1299 + kfree(c->bu.buf); 1300 vfree(c->ileb_buf); 1301 vfree(c->sbuf); 1302 kfree(c->bottom_up_buf); ··· 1332 kfree(c->cbuf); 1333 kfree(c->rcvrd_mst_node); 1334 kfree(c->mst_node); 1335 + kfree(c->bu.buf); 1336 + vfree(c->ileb_buf); 1337 vfree(c->sbuf); 1338 kfree(c->bottom_up_buf); 1339 UBIFS_DBG(vfree(c->dbg_buf)); 1340 dbg_failure_mode_deregistration(c); 1341 } 1342 ··· 1633 ubifs_err("invalid or unknown remount parameter"); 1634 return err; 1635 } 1636 + 1637 if ((sb->s_flags & MS_RDONLY) && !(*flags & MS_RDONLY)) { 1638 err = ubifs_remount_rw(c); 1639 if (err) 1640 return err; 1641 } else if (!(sb->s_flags & MS_RDONLY) && (*flags & MS_RDONLY)) 1642 ubifs_remount_ro(c); 1643 + 1644 + if (c->bulk_read == 1) 1645 + bu_init(c); 1646 + else { 1647 + dbg_gen("disable bulk-read"); 1648 + kfree(c->bu.buf); 1649 + c->bu.buf = NULL; 1650 + } 1651 1652 return 0; 1653 } ··· 1730 mutex_init(&c->log_mutex); 1731 mutex_init(&c->mst_mutex); 1732 mutex_init(&c->umount_mutex); 1733 + mutex_init(&c->bu_mutex); 1734 init_waitqueue_head(&c->cmt_wq); 1735 c->buds = RB_ROOT; 1736 c->old_idx = RB_ROOT;
+6
fs/ubifs/ubifs.h
··· 969 * @mst_node: master node 970 * @mst_offs: offset of valid master node 971 * @mst_mutex: protects the master node area, @mst_node, and @mst_offs 972 * @max_bu_buf_len: maximum bulk-read buffer length 973 * 974 * @log_lebs: number of logical eraseblocks in the log 975 * @log_bytes: log size in bytes ··· 1220 struct ubifs_mst_node *mst_node; 1221 int mst_offs; 1222 struct mutex mst_mutex; 1223 int max_bu_buf_len; 1224 1225 int log_lebs; 1226 long long log_bytes;
··· 969 * @mst_node: master node 970 * @mst_offs: offset of valid master node 971 * @mst_mutex: protects the master node area, @mst_node, and @mst_offs 972 + * 973 * @max_bu_buf_len: maximum bulk-read buffer length 974 + * @bu_mutex: protects the pre-allocated bulk-read buffer and @c->bu 975 + * @bu: pre-allocated bulk-read information 976 * 977 * @log_lebs: number of logical eraseblocks in the log 978 * @log_bytes: log size in bytes ··· 1217 struct ubifs_mst_node *mst_node; 1218 int mst_offs; 1219 struct mutex mst_mutex; 1220 + 1221 int max_bu_buf_len; 1222 + struct mutex bu_mutex; 1223 + struct bu_info bu; 1224 1225 int log_lebs; 1226 long long log_bytes;