Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

ufs: move truncate code into inode.c

It is closely tied to block pointers handling there, can benefit
from existing helpers, etc. - no point keeping them apart.

Trimmed the trailing whitespaces in inode.c at the same time.

Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>

Al Viro 010d331f 0d23cf76

+470 -533
+1 -1
fs/ufs/Makefile
··· 5 5 obj-$(CONFIG_UFS_FS) += ufs.o 6 6 7 7 ufs-objs := balloc.o cylinder.o dir.o file.o ialloc.o inode.o \ 8 - namei.o super.o symlink.o truncate.o util.o 8 + namei.o super.o symlink.o util.o 9 9 ccflags-$(CONFIG_UFS_DEBUG) += -DDEBUG
+468 -12
fs/ufs/inode.c
··· 126 126 struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; 127 127 u64 mask = (u64) uspi->s_apbmask>>uspi->s_fpbshift; 128 128 int shift = uspi->s_apbshift-uspi->s_fpbshift; 129 - sector_t offsets[4], *p; 129 + unsigned offsets[4], *p; 130 130 Indirect chain[4], *q = chain; 131 131 int depth = ufs_block_to_path(inode, frag >> uspi->s_fpbshift, offsets); 132 132 unsigned flags = UFS_SB(sb)->s_flags; ··· 290 290 return NULL; 291 291 } 292 292 lastfrag = ufsi->i_lastfrag; 293 - 293 + 294 294 } 295 295 tmp = ufs_data_ptr_to_cpu(sb, 296 296 ufs_get_direct_data_ptr(uspi, ufsi, 297 297 lastblock)); 298 298 if (tmp) 299 299 goal = tmp + uspi->s_fpb; 300 - tmp = ufs_new_fragments (inode, p, fragment - blockoff, 300 + tmp = ufs_new_fragments (inode, p, fragment - blockoff, 301 301 goal, required + blockoff, 302 302 err, 303 303 phys != NULL ? locked_page : NULL); ··· 436 436 if (ufs_data_ptr_to_cpu(sb, p)) 437 437 goto repeat; 438 438 goto out; 439 - } 439 + } 440 440 441 441 442 442 if (!phys) { ··· 463 463 * readpage, writepage and so on 464 464 */ 465 465 466 - int ufs_getfrag_block(struct inode *inode, sector_t fragment, struct buffer_head *bh_result, int create) 466 + static int ufs_getfrag_block(struct inode *inode, sector_t fragment, struct buffer_head *bh_result, int create) 467 467 { 468 468 struct super_block * sb = inode->i_sb; 469 469 struct ufs_sb_info * sbi = UFS_SB(sb); ··· 472 472 int ret, err, new; 473 473 unsigned long ptr,phys; 474 474 u64 phys64 = 0; 475 - 475 + 476 476 if (!create) { 477 477 phys64 = ufs_frag_map(inode, fragment); 478 478 UFSD("phys64 = %llu\n", (unsigned long long)phys64); ··· 498 498 499 499 err = 0; 500 500 ptr = fragment; 501 - 501 + 502 502 /* 503 503 * ok, these macros clean the logic up a bit and make 504 504 * it much more readable: ··· 573 573 { 574 574 return __block_write_begin(page, pos, len, ufs_getfrag_block); 575 575 } 576 + 577 + static void ufs_truncate_blocks(struct inode *); 576 578 577 579 static void ufs_write_failed(struct address_space *mapping, loff_t to) 578 580 { ··· 663 661 ufs_error (sb, "ufs_read_inode", "inode %lu has zero nlink\n", inode->i_ino); 664 662 return -1; 665 663 } 666 - 664 + 667 665 /* 668 666 * Linux now has 32-bit uid and gid, so we can support EFT. 669 667 */ ··· 683 681 ufsi->i_shadow = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_shadow); 684 682 ufsi->i_oeftflag = fs32_to_cpu(sb, ufs_inode->ui_u3.ui_sun.ui_oeftflag); 685 683 686 - 684 + 687 685 if (S_ISCHR(mode) || S_ISBLK(mode) || inode->i_blocks) { 688 686 memcpy(ufsi->i_u1.i_data, &ufs_inode->ui_u2.ui_addr, 689 687 sizeof(ufs_inode->ui_u2.ui_addr)); ··· 817 815 818 816 ufs_set_inode_uid(sb, ufs_inode, i_uid_read(inode)); 819 817 ufs_set_inode_gid(sb, ufs_inode, i_gid_read(inode)); 820 - 818 + 821 819 ufs_inode->ui_size = cpu_to_fs64(sb, inode->i_size); 822 820 ufs_inode->ui_atime.tv_sec = cpu_to_fs32(sb, inode->i_atime.tv_sec); 823 821 ufs_inode->ui_atime.tv_usec = 0; ··· 919 917 920 918 ufs1_update_inode(inode, ufs_inode + ufs_inotofsbo(inode->i_ino)); 921 919 } 922 - 920 + 923 921 mark_buffer_dirty(bh); 924 922 if (do_sync) 925 923 sync_dirty_buffer(bh); 926 924 brelse (bh); 927 - 925 + 928 926 UFSD("EXIT\n"); 929 927 return 0; 930 928 } ··· 959 957 if (want_delete) 960 958 ufs_free_inode(inode); 961 959 } 960 + 961 + #define DIRECT_BLOCK ((inode->i_size + uspi->s_bsize - 1) >> uspi->s_bshift) 962 + #define DIRECT_FRAGMENT ((inode->i_size + uspi->s_fsize - 1) >> uspi->s_fshift) 963 + 964 + static void ufs_trunc_direct(struct inode *inode) 965 + { 966 + struct ufs_inode_info *ufsi = UFS_I(inode); 967 + struct super_block * sb; 968 + struct ufs_sb_private_info * uspi; 969 + void *p; 970 + u64 frag1, frag2, frag3, frag4, block1, block2; 971 + unsigned frag_to_free, free_count; 972 + unsigned i, tmp; 973 + 974 + UFSD("ENTER: ino %lu\n", inode->i_ino); 975 + 976 + sb = inode->i_sb; 977 + uspi = UFS_SB(sb)->s_uspi; 978 + 979 + frag_to_free = 0; 980 + free_count = 0; 981 + 982 + frag1 = DIRECT_FRAGMENT; 983 + frag4 = min_t(u64, UFS_NDIR_FRAGMENT, ufsi->i_lastfrag); 984 + frag2 = ((frag1 & uspi->s_fpbmask) ? ((frag1 | uspi->s_fpbmask) + 1) : frag1); 985 + frag3 = frag4 & ~uspi->s_fpbmask; 986 + block1 = block2 = 0; 987 + if (frag2 > frag3) { 988 + frag2 = frag4; 989 + frag3 = frag4 = 0; 990 + } else if (frag2 < frag3) { 991 + block1 = ufs_fragstoblks (frag2); 992 + block2 = ufs_fragstoblks (frag3); 993 + } 994 + 995 + UFSD("ino %lu, frag1 %llu, frag2 %llu, block1 %llu, block2 %llu," 996 + " frag3 %llu, frag4 %llu\n", inode->i_ino, 997 + (unsigned long long)frag1, (unsigned long long)frag2, 998 + (unsigned long long)block1, (unsigned long long)block2, 999 + (unsigned long long)frag3, (unsigned long long)frag4); 1000 + 1001 + if (frag1 >= frag2) 1002 + goto next1; 1003 + 1004 + /* 1005 + * Free first free fragments 1006 + */ 1007 + p = ufs_get_direct_data_ptr(uspi, ufsi, ufs_fragstoblks(frag1)); 1008 + tmp = ufs_data_ptr_to_cpu(sb, p); 1009 + if (!tmp ) 1010 + ufs_panic (sb, "ufs_trunc_direct", "internal error"); 1011 + frag2 -= frag1; 1012 + frag1 = ufs_fragnum (frag1); 1013 + 1014 + ufs_free_fragments(inode, tmp + frag1, frag2); 1015 + mark_inode_dirty(inode); 1016 + frag_to_free = tmp + frag1; 1017 + 1018 + next1: 1019 + /* 1020 + * Free whole blocks 1021 + */ 1022 + for (i = block1 ; i < block2; i++) { 1023 + p = ufs_get_direct_data_ptr(uspi, ufsi, i); 1024 + tmp = ufs_data_ptr_to_cpu(sb, p); 1025 + if (!tmp) 1026 + continue; 1027 + write_seqlock(&ufsi->meta_lock); 1028 + ufs_data_ptr_clear(uspi, p); 1029 + write_sequnlock(&ufsi->meta_lock); 1030 + 1031 + if (free_count == 0) { 1032 + frag_to_free = tmp; 1033 + free_count = uspi->s_fpb; 1034 + } else if (free_count > 0 && frag_to_free == tmp - free_count) 1035 + free_count += uspi->s_fpb; 1036 + else { 1037 + ufs_free_blocks (inode, frag_to_free, free_count); 1038 + frag_to_free = tmp; 1039 + free_count = uspi->s_fpb; 1040 + } 1041 + mark_inode_dirty(inode); 1042 + } 1043 + 1044 + if (free_count > 0) 1045 + ufs_free_blocks (inode, frag_to_free, free_count); 1046 + 1047 + if (frag3 >= frag4) 1048 + goto next3; 1049 + 1050 + /* 1051 + * Free last free fragments 1052 + */ 1053 + p = ufs_get_direct_data_ptr(uspi, ufsi, ufs_fragstoblks(frag3)); 1054 + tmp = ufs_data_ptr_to_cpu(sb, p); 1055 + if (!tmp ) 1056 + ufs_panic(sb, "ufs_truncate_direct", "internal error"); 1057 + frag4 = ufs_fragnum (frag4); 1058 + write_seqlock(&ufsi->meta_lock); 1059 + ufs_data_ptr_clear(uspi, p); 1060 + write_sequnlock(&ufsi->meta_lock); 1061 + 1062 + ufs_free_fragments (inode, tmp, frag4); 1063 + mark_inode_dirty(inode); 1064 + next3: 1065 + 1066 + UFSD("EXIT: ino %lu\n", inode->i_ino); 1067 + } 1068 + 1069 + 1070 + static void ufs_trunc_indirect(struct inode *inode, u64 offset, void *p) 1071 + { 1072 + struct super_block * sb; 1073 + struct ufs_sb_private_info * uspi; 1074 + struct ufs_buffer_head * ind_ubh; 1075 + void *ind; 1076 + u64 tmp, indirect_block, i, frag_to_free; 1077 + unsigned free_count; 1078 + 1079 + UFSD("ENTER: ino %lu, offset %llu, p: %p\n", 1080 + inode->i_ino, (unsigned long long)offset, p); 1081 + 1082 + BUG_ON(!p); 1083 + 1084 + sb = inode->i_sb; 1085 + uspi = UFS_SB(sb)->s_uspi; 1086 + 1087 + frag_to_free = 0; 1088 + free_count = 0; 1089 + 1090 + tmp = ufs_data_ptr_to_cpu(sb, p); 1091 + if (!tmp) 1092 + return; 1093 + ind_ubh = ubh_bread(sb, tmp, uspi->s_bsize); 1094 + if (!ind_ubh) { 1095 + write_seqlock(&UFS_I(inode)->meta_lock); 1096 + ufs_data_ptr_clear(uspi, p); 1097 + write_sequnlock(&UFS_I(inode)->meta_lock); 1098 + return; 1099 + } 1100 + 1101 + indirect_block = (DIRECT_BLOCK > offset) ? (DIRECT_BLOCK - offset) : 0; 1102 + for (i = indirect_block; i < uspi->s_apb; i++) { 1103 + ind = ubh_get_data_ptr(uspi, ind_ubh, i); 1104 + tmp = ufs_data_ptr_to_cpu(sb, ind); 1105 + if (!tmp) 1106 + continue; 1107 + 1108 + write_seqlock(&UFS_I(inode)->meta_lock); 1109 + ufs_data_ptr_clear(uspi, ind); 1110 + write_sequnlock(&UFS_I(inode)->meta_lock); 1111 + ubh_mark_buffer_dirty(ind_ubh); 1112 + if (free_count == 0) { 1113 + frag_to_free = tmp; 1114 + free_count = uspi->s_fpb; 1115 + } else if (free_count > 0 && frag_to_free == tmp - free_count) 1116 + free_count += uspi->s_fpb; 1117 + else { 1118 + ufs_free_blocks (inode, frag_to_free, free_count); 1119 + frag_to_free = tmp; 1120 + free_count = uspi->s_fpb; 1121 + } 1122 + 1123 + mark_inode_dirty(inode); 1124 + } 1125 + 1126 + if (free_count > 0) { 1127 + ufs_free_blocks (inode, frag_to_free, free_count); 1128 + } 1129 + for (i = 0; i < uspi->s_apb; i++) 1130 + if (!ufs_is_data_ptr_zero(uspi, 1131 + ubh_get_data_ptr(uspi, ind_ubh, i))) 1132 + break; 1133 + if (i >= uspi->s_apb) { 1134 + tmp = ufs_data_ptr_to_cpu(sb, p); 1135 + write_seqlock(&UFS_I(inode)->meta_lock); 1136 + ufs_data_ptr_clear(uspi, p); 1137 + write_sequnlock(&UFS_I(inode)->meta_lock); 1138 + 1139 + ubh_bforget(ind_ubh); 1140 + ufs_free_blocks (inode, tmp, uspi->s_fpb); 1141 + mark_inode_dirty(inode); 1142 + ind_ubh = NULL; 1143 + } 1144 + if (IS_SYNC(inode) && ind_ubh && ubh_buffer_dirty(ind_ubh)) 1145 + ubh_sync_block(ind_ubh); 1146 + ubh_brelse (ind_ubh); 1147 + 1148 + UFSD("EXIT: ino %lu\n", inode->i_ino); 1149 + } 1150 + 1151 + static void ufs_trunc_dindirect(struct inode *inode, u64 offset, void *p) 1152 + { 1153 + struct super_block * sb; 1154 + struct ufs_sb_private_info * uspi; 1155 + struct ufs_buffer_head *dind_bh; 1156 + u64 i, tmp, dindirect_block; 1157 + void *dind; 1158 + 1159 + UFSD("ENTER: ino %lu\n", inode->i_ino); 1160 + 1161 + sb = inode->i_sb; 1162 + uspi = UFS_SB(sb)->s_uspi; 1163 + 1164 + dindirect_block = (DIRECT_BLOCK > offset) 1165 + ? ((DIRECT_BLOCK - offset) >> uspi->s_apbshift) : 0; 1166 + 1167 + tmp = ufs_data_ptr_to_cpu(sb, p); 1168 + if (!tmp) 1169 + return; 1170 + dind_bh = ubh_bread(sb, tmp, uspi->s_bsize); 1171 + if (!dind_bh) { 1172 + write_seqlock(&UFS_I(inode)->meta_lock); 1173 + ufs_data_ptr_clear(uspi, p); 1174 + write_sequnlock(&UFS_I(inode)->meta_lock); 1175 + return; 1176 + } 1177 + 1178 + for (i = dindirect_block ; i < uspi->s_apb ; i++) { 1179 + dind = ubh_get_data_ptr(uspi, dind_bh, i); 1180 + tmp = ufs_data_ptr_to_cpu(sb, dind); 1181 + if (!tmp) 1182 + continue; 1183 + ufs_trunc_indirect (inode, offset + (i << uspi->s_apbshift), dind); 1184 + ubh_mark_buffer_dirty(dind_bh); 1185 + } 1186 + 1187 + for (i = 0; i < uspi->s_apb; i++) 1188 + if (!ufs_is_data_ptr_zero(uspi, 1189 + ubh_get_data_ptr(uspi, dind_bh, i))) 1190 + break; 1191 + if (i >= uspi->s_apb) { 1192 + tmp = ufs_data_ptr_to_cpu(sb, p); 1193 + write_seqlock(&UFS_I(inode)->meta_lock); 1194 + ufs_data_ptr_clear(uspi, p); 1195 + write_sequnlock(&UFS_I(inode)->meta_lock); 1196 + 1197 + ubh_bforget(dind_bh); 1198 + ufs_free_blocks(inode, tmp, uspi->s_fpb); 1199 + mark_inode_dirty(inode); 1200 + dind_bh = NULL; 1201 + } 1202 + if (IS_SYNC(inode) && dind_bh && ubh_buffer_dirty(dind_bh)) 1203 + ubh_sync_block(dind_bh); 1204 + ubh_brelse (dind_bh); 1205 + 1206 + UFSD("EXIT: ino %lu\n", inode->i_ino); 1207 + } 1208 + 1209 + static void ufs_trunc_tindirect(struct inode *inode) 1210 + { 1211 + struct super_block *sb = inode->i_sb; 1212 + struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; 1213 + struct ufs_inode_info *ufsi = UFS_I(inode); 1214 + struct ufs_buffer_head * tind_bh; 1215 + u64 tindirect_block, tmp, i; 1216 + void *tind, *p; 1217 + 1218 + UFSD("ENTER: ino %lu\n", inode->i_ino); 1219 + 1220 + tindirect_block = (DIRECT_BLOCK > (UFS_NDADDR + uspi->s_apb + uspi->s_2apb)) 1221 + ? ((DIRECT_BLOCK - UFS_NDADDR - uspi->s_apb - uspi->s_2apb) >> uspi->s_2apbshift) : 0; 1222 + 1223 + p = ufs_get_direct_data_ptr(uspi, ufsi, UFS_TIND_BLOCK); 1224 + if (!(tmp = ufs_data_ptr_to_cpu(sb, p))) 1225 + return; 1226 + tind_bh = ubh_bread (sb, tmp, uspi->s_bsize); 1227 + if (!tind_bh) { 1228 + write_seqlock(&ufsi->meta_lock); 1229 + ufs_data_ptr_clear(uspi, p); 1230 + write_sequnlock(&ufsi->meta_lock); 1231 + return; 1232 + } 1233 + 1234 + for (i = tindirect_block ; i < uspi->s_apb ; i++) { 1235 + tind = ubh_get_data_ptr(uspi, tind_bh, i); 1236 + ufs_trunc_dindirect(inode, UFS_NDADDR + 1237 + uspi->s_apb + ((i + 1) << uspi->s_2apbshift), tind); 1238 + ubh_mark_buffer_dirty(tind_bh); 1239 + } 1240 + for (i = 0; i < uspi->s_apb; i++) 1241 + if (!ufs_is_data_ptr_zero(uspi, 1242 + ubh_get_data_ptr(uspi, tind_bh, i))) 1243 + break; 1244 + if (i >= uspi->s_apb) { 1245 + tmp = ufs_data_ptr_to_cpu(sb, p); 1246 + write_seqlock(&ufsi->meta_lock); 1247 + ufs_data_ptr_clear(uspi, p); 1248 + write_sequnlock(&ufsi->meta_lock); 1249 + 1250 + ubh_bforget(tind_bh); 1251 + ufs_free_blocks(inode, tmp, uspi->s_fpb); 1252 + mark_inode_dirty(inode); 1253 + tind_bh = NULL; 1254 + } 1255 + if (IS_SYNC(inode) && tind_bh && ubh_buffer_dirty(tind_bh)) 1256 + ubh_sync_block(tind_bh); 1257 + ubh_brelse (tind_bh); 1258 + 1259 + UFSD("EXIT: ino %lu\n", inode->i_ino); 1260 + } 1261 + 1262 + static int ufs_alloc_lastblock(struct inode *inode, loff_t size) 1263 + { 1264 + int err = 0; 1265 + struct super_block *sb = inode->i_sb; 1266 + struct address_space *mapping = inode->i_mapping; 1267 + struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; 1268 + unsigned i, end; 1269 + sector_t lastfrag; 1270 + struct page *lastpage; 1271 + struct buffer_head *bh; 1272 + u64 phys64; 1273 + 1274 + lastfrag = (size + uspi->s_fsize - 1) >> uspi->s_fshift; 1275 + 1276 + if (!lastfrag) 1277 + goto out; 1278 + 1279 + lastfrag--; 1280 + 1281 + lastpage = ufs_get_locked_page(mapping, lastfrag >> 1282 + (PAGE_CACHE_SHIFT - inode->i_blkbits)); 1283 + if (IS_ERR(lastpage)) { 1284 + err = -EIO; 1285 + goto out; 1286 + } 1287 + 1288 + end = lastfrag & ((1 << (PAGE_CACHE_SHIFT - inode->i_blkbits)) - 1); 1289 + bh = page_buffers(lastpage); 1290 + for (i = 0; i < end; ++i) 1291 + bh = bh->b_this_page; 1292 + 1293 + 1294 + err = ufs_getfrag_block(inode, lastfrag, bh, 1); 1295 + 1296 + if (unlikely(err)) 1297 + goto out_unlock; 1298 + 1299 + if (buffer_new(bh)) { 1300 + clear_buffer_new(bh); 1301 + unmap_underlying_metadata(bh->b_bdev, 1302 + bh->b_blocknr); 1303 + /* 1304 + * we do not zeroize fragment, because of 1305 + * if it maped to hole, it already contains zeroes 1306 + */ 1307 + set_buffer_uptodate(bh); 1308 + mark_buffer_dirty(bh); 1309 + set_page_dirty(lastpage); 1310 + } 1311 + 1312 + if (lastfrag >= UFS_IND_FRAGMENT) { 1313 + end = uspi->s_fpb - ufs_fragnum(lastfrag) - 1; 1314 + phys64 = bh->b_blocknr + 1; 1315 + for (i = 0; i < end; ++i) { 1316 + bh = sb_getblk(sb, i + phys64); 1317 + lock_buffer(bh); 1318 + memset(bh->b_data, 0, sb->s_blocksize); 1319 + set_buffer_uptodate(bh); 1320 + mark_buffer_dirty(bh); 1321 + unlock_buffer(bh); 1322 + sync_dirty_buffer(bh); 1323 + brelse(bh); 1324 + } 1325 + } 1326 + out_unlock: 1327 + ufs_put_locked_page(lastpage); 1328 + out: 1329 + return err; 1330 + } 1331 + 1332 + static void __ufs_truncate_blocks(struct inode *inode) 1333 + { 1334 + struct ufs_inode_info *ufsi = UFS_I(inode); 1335 + struct super_block *sb = inode->i_sb; 1336 + struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; 1337 + 1338 + mutex_lock(&ufsi->truncate_mutex); 1339 + ufs_trunc_direct(inode); 1340 + ufs_trunc_indirect(inode, UFS_IND_BLOCK, 1341 + ufs_get_direct_data_ptr(uspi, ufsi, UFS_IND_BLOCK)); 1342 + ufs_trunc_dindirect(inode, UFS_IND_BLOCK + uspi->s_apb, 1343 + ufs_get_direct_data_ptr(uspi, ufsi, UFS_DIND_BLOCK)); 1344 + ufs_trunc_tindirect(inode); 1345 + ufsi->i_lastfrag = DIRECT_FRAGMENT; 1346 + mutex_unlock(&ufsi->truncate_mutex); 1347 + } 1348 + 1349 + static int ufs_truncate(struct inode *inode, loff_t size) 1350 + { 1351 + int err = 0; 1352 + 1353 + UFSD("ENTER: ino %lu, i_size: %llu, old_i_size: %llu\n", 1354 + inode->i_ino, (unsigned long long)size, 1355 + (unsigned long long)i_size_read(inode)); 1356 + 1357 + if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || 1358 + S_ISLNK(inode->i_mode))) 1359 + return -EINVAL; 1360 + if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) 1361 + return -EPERM; 1362 + 1363 + err = ufs_alloc_lastblock(inode, size); 1364 + 1365 + if (err) 1366 + goto out; 1367 + 1368 + block_truncate_page(inode->i_mapping, size, ufs_getfrag_block); 1369 + 1370 + truncate_setsize(inode, size); 1371 + 1372 + __ufs_truncate_blocks(inode); 1373 + inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC; 1374 + mark_inode_dirty(inode); 1375 + out: 1376 + UFSD("EXIT: err %d\n", err); 1377 + return err; 1378 + } 1379 + 1380 + void ufs_truncate_blocks(struct inode *inode) 1381 + { 1382 + if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || 1383 + S_ISLNK(inode->i_mode))) 1384 + return; 1385 + if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) 1386 + return; 1387 + __ufs_truncate_blocks(inode); 1388 + } 1389 + 1390 + int ufs_setattr(struct dentry *dentry, struct iattr *attr) 1391 + { 1392 + struct inode *inode = d_inode(dentry); 1393 + unsigned int ia_valid = attr->ia_valid; 1394 + int error; 1395 + 1396 + error = inode_change_ok(inode, attr); 1397 + if (error) 1398 + return error; 1399 + 1400 + if (ia_valid & ATTR_SIZE && attr->ia_size != inode->i_size) { 1401 + error = ufs_truncate(inode, attr->ia_size); 1402 + if (error) 1403 + return error; 1404 + } 1405 + 1406 + setattr_copy(inode, attr); 1407 + mark_inode_dirty(inode); 1408 + return 0; 1409 + } 1410 + 1411 + const struct inode_operations ufs_file_inode_operations = { 1412 + .setattr = ufs_setattr, 1413 + };
-515
fs/ufs/truncate.c
··· 1 - /* 2 - * linux/fs/ufs/truncate.c 3 - * 4 - * Copyright (C) 1998 5 - * Daniel Pirkl <daniel.pirkl@email.cz> 6 - * Charles University, Faculty of Mathematics and Physics 7 - * 8 - * from 9 - * 10 - * linux/fs/ext2/truncate.c 11 - * 12 - * Copyright (C) 1992, 1993, 1994, 1995 13 - * Remy Card (card@masi.ibp.fr) 14 - * Laboratoire MASI - Institut Blaise Pascal 15 - * Universite Pierre et Marie Curie (Paris VI) 16 - * 17 - * from 18 - * 19 - * linux/fs/minix/truncate.c 20 - * 21 - * Copyright (C) 1991, 1992 Linus Torvalds 22 - * 23 - * Big-endian to little-endian byte-swapping/bitmaps by 24 - * David S. Miller (davem@caip.rutgers.edu), 1995 25 - */ 26 - 27 - /* 28 - * Real random numbers for secure rm added 94/02/18 29 - * Idea from Pierre del Perugia <delperug@gla.ecoledoc.ibp.fr> 30 - */ 31 - 32 - /* 33 - * Adoptation to use page cache and UFS2 write support by 34 - * Evgeniy Dushistov <dushistov@mail.ru>, 2006-2007 35 - */ 36 - 37 - #include <linux/errno.h> 38 - #include <linux/fs.h> 39 - #include <linux/fcntl.h> 40 - #include <linux/time.h> 41 - #include <linux/stat.h> 42 - #include <linux/string.h> 43 - #include <linux/buffer_head.h> 44 - #include <linux/blkdev.h> 45 - #include <linux/sched.h> 46 - 47 - #include "ufs_fs.h" 48 - #include "ufs.h" 49 - #include "swab.h" 50 - #include "util.h" 51 - 52 - /* 53 - * Secure deletion currently doesn't work. It interacts very badly 54 - * with buffers shared with memory mappings, and for that reason 55 - * can't be done in the truncate() routines. It should instead be 56 - * done separately in "release()" before calling the truncate routines 57 - * that will release the actual file blocks. 58 - * 59 - * Linus 60 - */ 61 - 62 - #define DIRECT_BLOCK ((inode->i_size + uspi->s_bsize - 1) >> uspi->s_bshift) 63 - #define DIRECT_FRAGMENT ((inode->i_size + uspi->s_fsize - 1) >> uspi->s_fshift) 64 - 65 - 66 - static void ufs_trunc_direct(struct inode *inode) 67 - { 68 - struct ufs_inode_info *ufsi = UFS_I(inode); 69 - struct super_block * sb; 70 - struct ufs_sb_private_info * uspi; 71 - void *p; 72 - u64 frag1, frag2, frag3, frag4, block1, block2; 73 - unsigned frag_to_free, free_count; 74 - unsigned i, tmp; 75 - 76 - UFSD("ENTER: ino %lu\n", inode->i_ino); 77 - 78 - sb = inode->i_sb; 79 - uspi = UFS_SB(sb)->s_uspi; 80 - 81 - frag_to_free = 0; 82 - free_count = 0; 83 - 84 - frag1 = DIRECT_FRAGMENT; 85 - frag4 = min_t(u64, UFS_NDIR_FRAGMENT, ufsi->i_lastfrag); 86 - frag2 = ((frag1 & uspi->s_fpbmask) ? ((frag1 | uspi->s_fpbmask) + 1) : frag1); 87 - frag3 = frag4 & ~uspi->s_fpbmask; 88 - block1 = block2 = 0; 89 - if (frag2 > frag3) { 90 - frag2 = frag4; 91 - frag3 = frag4 = 0; 92 - } else if (frag2 < frag3) { 93 - block1 = ufs_fragstoblks (frag2); 94 - block2 = ufs_fragstoblks (frag3); 95 - } 96 - 97 - UFSD("ino %lu, frag1 %llu, frag2 %llu, block1 %llu, block2 %llu," 98 - " frag3 %llu, frag4 %llu\n", inode->i_ino, 99 - (unsigned long long)frag1, (unsigned long long)frag2, 100 - (unsigned long long)block1, (unsigned long long)block2, 101 - (unsigned long long)frag3, (unsigned long long)frag4); 102 - 103 - if (frag1 >= frag2) 104 - goto next1; 105 - 106 - /* 107 - * Free first free fragments 108 - */ 109 - p = ufs_get_direct_data_ptr(uspi, ufsi, ufs_fragstoblks(frag1)); 110 - tmp = ufs_data_ptr_to_cpu(sb, p); 111 - if (!tmp ) 112 - ufs_panic (sb, "ufs_trunc_direct", "internal error"); 113 - frag2 -= frag1; 114 - frag1 = ufs_fragnum (frag1); 115 - 116 - ufs_free_fragments(inode, tmp + frag1, frag2); 117 - mark_inode_dirty(inode); 118 - frag_to_free = tmp + frag1; 119 - 120 - next1: 121 - /* 122 - * Free whole blocks 123 - */ 124 - for (i = block1 ; i < block2; i++) { 125 - p = ufs_get_direct_data_ptr(uspi, ufsi, i); 126 - tmp = ufs_data_ptr_to_cpu(sb, p); 127 - if (!tmp) 128 - continue; 129 - write_seqlock(&ufsi->meta_lock); 130 - ufs_data_ptr_clear(uspi, p); 131 - write_sequnlock(&ufsi->meta_lock); 132 - 133 - if (free_count == 0) { 134 - frag_to_free = tmp; 135 - free_count = uspi->s_fpb; 136 - } else if (free_count > 0 && frag_to_free == tmp - free_count) 137 - free_count += uspi->s_fpb; 138 - else { 139 - ufs_free_blocks (inode, frag_to_free, free_count); 140 - frag_to_free = tmp; 141 - free_count = uspi->s_fpb; 142 - } 143 - mark_inode_dirty(inode); 144 - } 145 - 146 - if (free_count > 0) 147 - ufs_free_blocks (inode, frag_to_free, free_count); 148 - 149 - if (frag3 >= frag4) 150 - goto next3; 151 - 152 - /* 153 - * Free last free fragments 154 - */ 155 - p = ufs_get_direct_data_ptr(uspi, ufsi, ufs_fragstoblks(frag3)); 156 - tmp = ufs_data_ptr_to_cpu(sb, p); 157 - if (!tmp ) 158 - ufs_panic(sb, "ufs_truncate_direct", "internal error"); 159 - frag4 = ufs_fragnum (frag4); 160 - write_seqlock(&ufsi->meta_lock); 161 - ufs_data_ptr_clear(uspi, p); 162 - write_sequnlock(&ufsi->meta_lock); 163 - 164 - ufs_free_fragments (inode, tmp, frag4); 165 - mark_inode_dirty(inode); 166 - next3: 167 - 168 - UFSD("EXIT: ino %lu\n", inode->i_ino); 169 - } 170 - 171 - 172 - static void ufs_trunc_indirect(struct inode *inode, u64 offset, void *p) 173 - { 174 - struct super_block * sb; 175 - struct ufs_sb_private_info * uspi; 176 - struct ufs_buffer_head * ind_ubh; 177 - void *ind; 178 - u64 tmp, indirect_block, i, frag_to_free; 179 - unsigned free_count; 180 - 181 - UFSD("ENTER: ino %lu, offset %llu, p: %p\n", 182 - inode->i_ino, (unsigned long long)offset, p); 183 - 184 - BUG_ON(!p); 185 - 186 - sb = inode->i_sb; 187 - uspi = UFS_SB(sb)->s_uspi; 188 - 189 - frag_to_free = 0; 190 - free_count = 0; 191 - 192 - tmp = ufs_data_ptr_to_cpu(sb, p); 193 - if (!tmp) 194 - return; 195 - ind_ubh = ubh_bread(sb, tmp, uspi->s_bsize); 196 - if (!ind_ubh) { 197 - write_seqlock(&UFS_I(inode)->meta_lock); 198 - ufs_data_ptr_clear(uspi, p); 199 - write_sequnlock(&UFS_I(inode)->meta_lock); 200 - return; 201 - } 202 - 203 - indirect_block = (DIRECT_BLOCK > offset) ? (DIRECT_BLOCK - offset) : 0; 204 - for (i = indirect_block; i < uspi->s_apb; i++) { 205 - ind = ubh_get_data_ptr(uspi, ind_ubh, i); 206 - tmp = ufs_data_ptr_to_cpu(sb, ind); 207 - if (!tmp) 208 - continue; 209 - 210 - write_seqlock(&UFS_I(inode)->meta_lock); 211 - ufs_data_ptr_clear(uspi, ind); 212 - write_sequnlock(&UFS_I(inode)->meta_lock); 213 - ubh_mark_buffer_dirty(ind_ubh); 214 - if (free_count == 0) { 215 - frag_to_free = tmp; 216 - free_count = uspi->s_fpb; 217 - } else if (free_count > 0 && frag_to_free == tmp - free_count) 218 - free_count += uspi->s_fpb; 219 - else { 220 - ufs_free_blocks (inode, frag_to_free, free_count); 221 - frag_to_free = tmp; 222 - free_count = uspi->s_fpb; 223 - } 224 - 225 - mark_inode_dirty(inode); 226 - } 227 - 228 - if (free_count > 0) { 229 - ufs_free_blocks (inode, frag_to_free, free_count); 230 - } 231 - for (i = 0; i < uspi->s_apb; i++) 232 - if (!ufs_is_data_ptr_zero(uspi, 233 - ubh_get_data_ptr(uspi, ind_ubh, i))) 234 - break; 235 - if (i >= uspi->s_apb) { 236 - tmp = ufs_data_ptr_to_cpu(sb, p); 237 - write_seqlock(&UFS_I(inode)->meta_lock); 238 - ufs_data_ptr_clear(uspi, p); 239 - write_sequnlock(&UFS_I(inode)->meta_lock); 240 - 241 - ubh_bforget(ind_ubh); 242 - ufs_free_blocks (inode, tmp, uspi->s_fpb); 243 - mark_inode_dirty(inode); 244 - ind_ubh = NULL; 245 - } 246 - if (IS_SYNC(inode) && ind_ubh && ubh_buffer_dirty(ind_ubh)) 247 - ubh_sync_block(ind_ubh); 248 - ubh_brelse (ind_ubh); 249 - 250 - UFSD("EXIT: ino %lu\n", inode->i_ino); 251 - } 252 - 253 - static void ufs_trunc_dindirect(struct inode *inode, u64 offset, void *p) 254 - { 255 - struct super_block * sb; 256 - struct ufs_sb_private_info * uspi; 257 - struct ufs_buffer_head *dind_bh; 258 - u64 i, tmp, dindirect_block; 259 - void *dind; 260 - 261 - UFSD("ENTER: ino %lu\n", inode->i_ino); 262 - 263 - sb = inode->i_sb; 264 - uspi = UFS_SB(sb)->s_uspi; 265 - 266 - dindirect_block = (DIRECT_BLOCK > offset) 267 - ? ((DIRECT_BLOCK - offset) >> uspi->s_apbshift) : 0; 268 - 269 - tmp = ufs_data_ptr_to_cpu(sb, p); 270 - if (!tmp) 271 - return; 272 - dind_bh = ubh_bread(sb, tmp, uspi->s_bsize); 273 - if (!dind_bh) { 274 - write_seqlock(&UFS_I(inode)->meta_lock); 275 - ufs_data_ptr_clear(uspi, p); 276 - write_sequnlock(&UFS_I(inode)->meta_lock); 277 - return; 278 - } 279 - 280 - for (i = dindirect_block ; i < uspi->s_apb ; i++) { 281 - dind = ubh_get_data_ptr(uspi, dind_bh, i); 282 - tmp = ufs_data_ptr_to_cpu(sb, dind); 283 - if (!tmp) 284 - continue; 285 - ufs_trunc_indirect (inode, offset + (i << uspi->s_apbshift), dind); 286 - ubh_mark_buffer_dirty(dind_bh); 287 - } 288 - 289 - for (i = 0; i < uspi->s_apb; i++) 290 - if (!ufs_is_data_ptr_zero(uspi, 291 - ubh_get_data_ptr(uspi, dind_bh, i))) 292 - break; 293 - if (i >= uspi->s_apb) { 294 - tmp = ufs_data_ptr_to_cpu(sb, p); 295 - write_seqlock(&UFS_I(inode)->meta_lock); 296 - ufs_data_ptr_clear(uspi, p); 297 - write_sequnlock(&UFS_I(inode)->meta_lock); 298 - 299 - ubh_bforget(dind_bh); 300 - ufs_free_blocks(inode, tmp, uspi->s_fpb); 301 - mark_inode_dirty(inode); 302 - dind_bh = NULL; 303 - } 304 - if (IS_SYNC(inode) && dind_bh && ubh_buffer_dirty(dind_bh)) 305 - ubh_sync_block(dind_bh); 306 - ubh_brelse (dind_bh); 307 - 308 - UFSD("EXIT: ino %lu\n", inode->i_ino); 309 - } 310 - 311 - static void ufs_trunc_tindirect(struct inode *inode) 312 - { 313 - struct super_block *sb = inode->i_sb; 314 - struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; 315 - struct ufs_inode_info *ufsi = UFS_I(inode); 316 - struct ufs_buffer_head * tind_bh; 317 - u64 tindirect_block, tmp, i; 318 - void *tind, *p; 319 - 320 - UFSD("ENTER: ino %lu\n", inode->i_ino); 321 - 322 - tindirect_block = (DIRECT_BLOCK > (UFS_NDADDR + uspi->s_apb + uspi->s_2apb)) 323 - ? ((DIRECT_BLOCK - UFS_NDADDR - uspi->s_apb - uspi->s_2apb) >> uspi->s_2apbshift) : 0; 324 - 325 - p = ufs_get_direct_data_ptr(uspi, ufsi, UFS_TIND_BLOCK); 326 - if (!(tmp = ufs_data_ptr_to_cpu(sb, p))) 327 - return; 328 - tind_bh = ubh_bread (sb, tmp, uspi->s_bsize); 329 - if (!tind_bh) { 330 - write_seqlock(&ufsi->meta_lock); 331 - ufs_data_ptr_clear(uspi, p); 332 - write_sequnlock(&ufsi->meta_lock); 333 - return; 334 - } 335 - 336 - for (i = tindirect_block ; i < uspi->s_apb ; i++) { 337 - tind = ubh_get_data_ptr(uspi, tind_bh, i); 338 - ufs_trunc_dindirect(inode, UFS_NDADDR + 339 - uspi->s_apb + ((i + 1) << uspi->s_2apbshift), tind); 340 - ubh_mark_buffer_dirty(tind_bh); 341 - } 342 - for (i = 0; i < uspi->s_apb; i++) 343 - if (!ufs_is_data_ptr_zero(uspi, 344 - ubh_get_data_ptr(uspi, tind_bh, i))) 345 - break; 346 - if (i >= uspi->s_apb) { 347 - tmp = ufs_data_ptr_to_cpu(sb, p); 348 - write_seqlock(&ufsi->meta_lock); 349 - ufs_data_ptr_clear(uspi, p); 350 - write_sequnlock(&ufsi->meta_lock); 351 - 352 - ubh_bforget(tind_bh); 353 - ufs_free_blocks(inode, tmp, uspi->s_fpb); 354 - mark_inode_dirty(inode); 355 - tind_bh = NULL; 356 - } 357 - if (IS_SYNC(inode) && tind_bh && ubh_buffer_dirty(tind_bh)) 358 - ubh_sync_block(tind_bh); 359 - ubh_brelse (tind_bh); 360 - 361 - UFSD("EXIT: ino %lu\n", inode->i_ino); 362 - } 363 - 364 - static int ufs_alloc_lastblock(struct inode *inode, loff_t size) 365 - { 366 - int err = 0; 367 - struct super_block *sb = inode->i_sb; 368 - struct address_space *mapping = inode->i_mapping; 369 - struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; 370 - unsigned i, end; 371 - sector_t lastfrag; 372 - struct page *lastpage; 373 - struct buffer_head *bh; 374 - u64 phys64; 375 - 376 - lastfrag = (size + uspi->s_fsize - 1) >> uspi->s_fshift; 377 - 378 - if (!lastfrag) 379 - goto out; 380 - 381 - lastfrag--; 382 - 383 - lastpage = ufs_get_locked_page(mapping, lastfrag >> 384 - (PAGE_CACHE_SHIFT - inode->i_blkbits)); 385 - if (IS_ERR(lastpage)) { 386 - err = -EIO; 387 - goto out; 388 - } 389 - 390 - end = lastfrag & ((1 << (PAGE_CACHE_SHIFT - inode->i_blkbits)) - 1); 391 - bh = page_buffers(lastpage); 392 - for (i = 0; i < end; ++i) 393 - bh = bh->b_this_page; 394 - 395 - 396 - err = ufs_getfrag_block(inode, lastfrag, bh, 1); 397 - 398 - if (unlikely(err)) 399 - goto out_unlock; 400 - 401 - if (buffer_new(bh)) { 402 - clear_buffer_new(bh); 403 - unmap_underlying_metadata(bh->b_bdev, 404 - bh->b_blocknr); 405 - /* 406 - * we do not zeroize fragment, because of 407 - * if it maped to hole, it already contains zeroes 408 - */ 409 - set_buffer_uptodate(bh); 410 - mark_buffer_dirty(bh); 411 - set_page_dirty(lastpage); 412 - } 413 - 414 - if (lastfrag >= UFS_IND_FRAGMENT) { 415 - end = uspi->s_fpb - ufs_fragnum(lastfrag) - 1; 416 - phys64 = bh->b_blocknr + 1; 417 - for (i = 0; i < end; ++i) { 418 - bh = sb_getblk(sb, i + phys64); 419 - lock_buffer(bh); 420 - memset(bh->b_data, 0, sb->s_blocksize); 421 - set_buffer_uptodate(bh); 422 - mark_buffer_dirty(bh); 423 - unlock_buffer(bh); 424 - sync_dirty_buffer(bh); 425 - brelse(bh); 426 - } 427 - } 428 - out_unlock: 429 - ufs_put_locked_page(lastpage); 430 - out: 431 - return err; 432 - } 433 - 434 - static void __ufs_truncate_blocks(struct inode *inode) 435 - { 436 - struct ufs_inode_info *ufsi = UFS_I(inode); 437 - struct super_block *sb = inode->i_sb; 438 - struct ufs_sb_private_info *uspi = UFS_SB(sb)->s_uspi; 439 - 440 - mutex_lock(&ufsi->truncate_mutex); 441 - ufs_trunc_direct(inode); 442 - ufs_trunc_indirect(inode, UFS_IND_BLOCK, 443 - ufs_get_direct_data_ptr(uspi, ufsi, UFS_IND_BLOCK)); 444 - ufs_trunc_dindirect(inode, UFS_IND_BLOCK + uspi->s_apb, 445 - ufs_get_direct_data_ptr(uspi, ufsi, UFS_DIND_BLOCK)); 446 - ufs_trunc_tindirect(inode); 447 - ufsi->i_lastfrag = DIRECT_FRAGMENT; 448 - mutex_unlock(&ufsi->truncate_mutex); 449 - } 450 - 451 - int ufs_truncate(struct inode *inode, loff_t size) 452 - { 453 - int err = 0; 454 - 455 - UFSD("ENTER: ino %lu, i_size: %llu, old_i_size: %llu\n", 456 - inode->i_ino, (unsigned long long)size, 457 - (unsigned long long)i_size_read(inode)); 458 - 459 - if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || 460 - S_ISLNK(inode->i_mode))) 461 - return -EINVAL; 462 - if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) 463 - return -EPERM; 464 - 465 - err = ufs_alloc_lastblock(inode, size); 466 - 467 - if (err) 468 - goto out; 469 - 470 - block_truncate_page(inode->i_mapping, size, ufs_getfrag_block); 471 - 472 - truncate_setsize(inode, size); 473 - 474 - __ufs_truncate_blocks(inode); 475 - inode->i_mtime = inode->i_ctime = CURRENT_TIME_SEC; 476 - mark_inode_dirty(inode); 477 - out: 478 - UFSD("EXIT: err %d\n", err); 479 - return err; 480 - } 481 - 482 - void ufs_truncate_blocks(struct inode *inode) 483 - { 484 - if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) || 485 - S_ISLNK(inode->i_mode))) 486 - return; 487 - if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) 488 - return; 489 - __ufs_truncate_blocks(inode); 490 - } 491 - 492 - int ufs_setattr(struct dentry *dentry, struct iattr *attr) 493 - { 494 - struct inode *inode = d_inode(dentry); 495 - unsigned int ia_valid = attr->ia_valid; 496 - int error; 497 - 498 - error = inode_change_ok(inode, attr); 499 - if (error) 500 - return error; 501 - 502 - if (ia_valid & ATTR_SIZE && attr->ia_size != inode->i_size) { 503 - error = ufs_truncate(inode, attr->ia_size); 504 - if (error) 505 - return error; 506 - } 507 - 508 - setattr_copy(inode, attr); 509 - mark_inode_dirty(inode); 510 - return 0; 511 - } 512 - 513 - const struct inode_operations ufs_file_inode_operations = { 514 - .setattr = ufs_setattr, 515 - };
+1 -5
fs/ufs/ufs.h
··· 122 122 extern int ufs_write_inode (struct inode *, struct writeback_control *); 123 123 extern int ufs_sync_inode (struct inode *); 124 124 extern void ufs_evict_inode (struct inode *); 125 - extern int ufs_getfrag_block (struct inode *inode, sector_t fragment, struct buffer_head *bh_result, int create); 125 + extern int ufs_setattr(struct dentry *dentry, struct iattr *attr); 126 126 127 127 /* namei.c */ 128 128 extern const struct file_operations ufs_dir_operations; ··· 139 139 /* symlink.c */ 140 140 extern const struct inode_operations ufs_fast_symlink_inode_operations; 141 141 extern const struct inode_operations ufs_symlink_inode_operations; 142 - 143 - /* truncate.c */ 144 - extern void ufs_truncate_blocks(struct inode *); 145 - extern int ufs_setattr(struct dentry *dentry, struct iattr *attr); 146 142 147 143 static inline struct ufs_sb_info *UFS_SB(struct super_block *sb) 148 144 {