Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

bcachefs: bch2_extent_update_i_size_sectors()

In the io path, when we do the extent update we also have to update the
inode - for i_size and i_sectors updates, as well as for bi_journal_seq
for fsync.

This factors that out into a new helper which will be used in the new
nocow mode, in the unwritten extent conversion path.

Signed-off-by: Kent Overstreet <kent.overstreet@linux.dev>

+60 -54
+60 -54
fs/bcachefs/io.c
··· 243 243 return ret; 244 244 } 245 245 246 + static inline int bch2_extent_update_i_size_sectors(struct btree_trans *trans, 247 + struct btree_iter *extent_iter, 248 + u64 new_i_size, 249 + s64 i_sectors_delta) 250 + { 251 + struct btree_iter iter; 252 + struct bkey_i *k; 253 + struct bkey_i_inode_v3 *inode; 254 + unsigned inode_update_flags = BTREE_UPDATE_NOJOURNAL; 255 + int ret; 256 + 257 + bch2_trans_iter_init(trans, &iter, BTREE_ID_inodes, 258 + SPOS(0, 259 + extent_iter->pos.inode, 260 + extent_iter->snapshot), 261 + BTREE_ITER_INTENT|BTREE_ITER_CACHED); 262 + k = bch2_bkey_get_mut(trans, &iter); 263 + ret = PTR_ERR_OR_ZERO(k); 264 + if (unlikely(ret)) 265 + goto err; 266 + 267 + if (unlikely(k->k.type != KEY_TYPE_inode_v3)) { 268 + k = bch2_inode_to_v3(trans, k); 269 + ret = PTR_ERR_OR_ZERO(k); 270 + if (unlikely(ret)) 271 + goto err; 272 + } 273 + 274 + inode = bkey_i_to_inode_v3(k); 275 + 276 + if (!(le64_to_cpu(inode->v.bi_flags) & BCH_INODE_I_SIZE_DIRTY) && 277 + new_i_size > le64_to_cpu(inode->v.bi_size)) { 278 + inode->v.bi_size = cpu_to_le64(new_i_size); 279 + inode_update_flags = 0; 280 + } 281 + 282 + if (i_sectors_delta) { 283 + le64_add_cpu(&inode->v.bi_sectors, i_sectors_delta); 284 + inode_update_flags = 0; 285 + } 286 + 287 + if (inode->k.p.snapshot != iter.snapshot) { 288 + inode->k.p.snapshot = iter.snapshot; 289 + inode_update_flags = 0; 290 + } 291 + 292 + ret = bch2_trans_update(trans, &iter, &inode->k_i, 293 + BTREE_UPDATE_INTERNAL_SNAPSHOT_NODE| 294 + inode_update_flags); 295 + err: 296 + bch2_trans_iter_exit(trans, &iter); 297 + return ret; 298 + } 299 + 246 300 int bch2_extent_update(struct btree_trans *trans, 247 301 subvol_inum inum, 248 302 struct btree_iter *iter, ··· 306 252 s64 *i_sectors_delta_total, 307 253 bool check_enospc) 308 254 { 309 - struct btree_iter inode_iter = { NULL }; 310 - struct bkey_s_c inode_k; 311 - struct bkey_s_c_inode_v3 inode; 312 - struct bkey_i_inode_v3 *new_inode; 313 255 struct bpos next_pos; 314 256 bool usage_increasing; 315 - unsigned inode_update_flags = BTREE_UPDATE_NOJOURNAL; 316 257 s64 i_sectors_delta = 0, disk_sectors_delta = 0; 317 258 int ret; 318 259 ··· 325 276 if (ret) 326 277 return ret; 327 278 328 - new_i_size = min(k->k.p.offset << 9, new_i_size); 329 279 next_pos = k->k.p; 330 280 331 281 ret = bch2_sum_sector_overwrites(trans, iter, k, ··· 344 296 return ret; 345 297 } 346 298 347 - bch2_trans_iter_init(trans, &inode_iter, BTREE_ID_inodes, 348 - SPOS(0, inum.inum, iter->snapshot), 349 - BTREE_ITER_INTENT|BTREE_ITER_CACHED); 350 - inode_k = bch2_btree_iter_peek_slot(&inode_iter); 351 - ret = bkey_err(inode_k); 352 - if (unlikely(ret)) 353 - goto err; 354 - 355 - ret = bkey_is_inode(inode_k.k) ? 0 : -ENOENT; 356 - if (unlikely(ret)) 357 - goto err; 358 - 359 - if (unlikely(inode_k.k->type != KEY_TYPE_inode_v3)) { 360 - inode_k = bch2_inode_to_v3(trans, inode_k); 361 - ret = bkey_err(inode_k); 362 - if (unlikely(ret)) 363 - goto err; 364 - } 365 - 366 - inode = bkey_s_c_to_inode_v3(inode_k); 367 - 368 - new_inode = bch2_trans_kmalloc(trans, bkey_bytes(inode_k.k)); 369 - ret = PTR_ERR_OR_ZERO(new_inode); 370 - if (unlikely(ret)) 371 - goto err; 372 - 373 - bkey_reassemble(&new_inode->k_i, inode.s_c); 374 - 375 - if (!(le64_to_cpu(inode.v->bi_flags) & BCH_INODE_I_SIZE_DIRTY) && 376 - new_i_size > le64_to_cpu(inode.v->bi_size)) { 377 - new_inode->v.bi_size = cpu_to_le64(new_i_size); 378 - inode_update_flags = 0; 379 - } 380 - 381 - if (i_sectors_delta) { 382 - le64_add_cpu(&new_inode->v.bi_sectors, i_sectors_delta); 383 - inode_update_flags = 0; 384 - } 385 - 386 - new_inode->k.p.snapshot = iter->snapshot; 387 - 388 299 /* 389 300 * Note: 390 - * We always have to do an inode updated - even when i_size/i_sectors 301 + * We always have to do an inode update - even when i_size/i_sectors 391 302 * aren't changing - for fsync to work properly; fsync relies on 392 303 * inode->bi_journal_seq which is updated by the trigger code: 393 304 */ 394 - ret = bch2_trans_update(trans, &inode_iter, &new_inode->k_i, 395 - inode_update_flags) ?: 305 + ret = bch2_extent_update_i_size_sectors(trans, iter, 306 + min(k->k.p.offset << 9, new_i_size), 307 + i_sectors_delta) ?: 396 308 bch2_trans_update(trans, iter, k, 0) ?: 397 309 bch2_trans_commit(trans, disk_res, NULL, 398 310 BTREE_INSERT_NOCHECK_RW| 399 311 BTREE_INSERT_NOFAIL); 400 312 if (unlikely(ret)) 401 - goto err; 313 + return ret; 402 314 403 315 if (i_sectors_delta_total) 404 316 *i_sectors_delta_total += i_sectors_delta; 405 317 bch2_btree_iter_set_pos(iter, next_pos); 406 - err: 407 - bch2_trans_iter_exit(trans, &inode_iter); 408 - return ret; 318 + return 0; 409 319 } 410 320 411 321 /* Overwrites whatever was present with zeroes: */