Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

xfs: fold dfops into the transaction

struct xfs_defer_ops has now been reduced to a single list_head. The
external dfops mechanism is unused and thus everywhere a (permanent)
transaction is accessible the associated dfops structure is as well.

Remove the xfs_defer_ops structure and fold the list_head into the
transaction. Also remove the last remnant of external dfops in
xfs_trans_dup().

Signed-off-by: Brian Foster <bfoster@redhat.com>
Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>

authored by

Brian Foster and committed by
Darrick J. Wong
9d9e6233 c03edc9e

+46 -96
-1
fs/xfs/libxfs/xfs_bmap.c
··· 4286 4286 bma.ip = ip; 4287 4287 bma.total = total; 4288 4288 bma.datatype = 0; 4289 - ASSERT(!tp || tp->t_dfops); 4290 4289 4291 4290 while (bno < end && n < *nmap) { 4292 4291 bool need_alloc = false, wasdelay = false;
-1
fs/xfs/libxfs/xfs_btree.h
··· 7 7 #define __XFS_BTREE_H__ 8 8 9 9 struct xfs_buf; 10 - struct xfs_defer_ops; 11 10 struct xfs_inode; 12 11 struct xfs_mount; 13 12 struct xfs_trans;
-1
fs/xfs/libxfs/xfs_da_btree.h
··· 7 7 #ifndef __XFS_DA_BTREE_H__ 8 8 #define __XFS_DA_BTREE_H__ 9 9 10 - struct xfs_defer_ops; 11 10 struct xfs_inode; 12 11 struct xfs_trans; 13 12 struct zone;
+19 -48
fs/xfs/libxfs/xfs_defer.c
··· 183 183 xfs_defer_create_intents( 184 184 struct xfs_trans *tp) 185 185 { 186 - struct xfs_defer_ops *dop = tp->t_dfops; 187 186 struct list_head *li; 188 187 struct xfs_defer_pending *dfp; 189 188 190 - list_for_each_entry(dfp, &dop->dop_intake, dfp_list) { 189 + list_for_each_entry(dfp, &tp->t_dfops, dfp_list) { 191 190 dfp->dfp_intent = dfp->dfp_type->create_intent(tp, 192 191 dfp->dfp_count); 193 192 trace_xfs_defer_create_intent(tp->t_mountp, dfp); ··· 203 204 struct xfs_trans *tp, 204 205 struct list_head *dop_pending) 205 206 { 206 - struct xfs_defer_ops *dop = tp->t_dfops; 207 207 struct xfs_defer_pending *dfp; 208 208 209 - trace_xfs_defer_trans_abort(tp->t_mountp, dop, _RET_IP_); 209 + trace_xfs_defer_trans_abort(tp, _RET_IP_); 210 210 211 211 /* Abort intent items that don't have a done item. */ 212 212 list_for_each_entry(dfp, dop_pending, dfp_list) { ··· 264 266 } 265 267 } 266 268 267 - trace_xfs_defer_trans_roll(tp->t_mountp, tp->t_dfops, _RET_IP_); 269 + trace_xfs_defer_trans_roll(tp, _RET_IP_); 268 270 269 271 /* Roll the transaction. */ 270 272 error = xfs_trans_roll(tpp); 271 273 tp = *tpp; 272 274 if (error) { 273 - trace_xfs_defer_trans_roll_error(tp->t_mountp, 274 - tp->t_dfops, error); 275 + trace_xfs_defer_trans_roll_error(tp, error); 275 276 return error; 276 277 } 277 278 ··· 294 297 xfs_defer_reset( 295 298 struct xfs_trans *tp) 296 299 { 297 - ASSERT(list_empty(&tp->t_dfops->dop_intake)); 300 + ASSERT(list_empty(&tp->t_dfops)); 298 301 299 302 /* 300 303 * Low mode state transfers across transaction rolls to mirror dfops ··· 355 358 356 359 ASSERT((*tp)->t_flags & XFS_TRANS_PERM_LOG_RES); 357 360 358 - trace_xfs_defer_finish((*tp)->t_mountp, (*tp)->t_dfops, _RET_IP_); 361 + trace_xfs_defer_finish(*tp, _RET_IP_); 359 362 360 363 /* Until we run out of pending work to finish... */ 361 - while (!list_empty(&dop_pending) || 362 - !list_empty(&(*tp)->t_dfops->dop_intake)) { 364 + while (!list_empty(&dop_pending) || !list_empty(&(*tp)->t_dfops)) { 363 365 /* log intents and pull in intake items */ 364 366 xfs_defer_create_intents(*tp); 365 - list_splice_tail_init(&(*tp)->t_dfops->dop_intake, 366 - &dop_pending); 367 + list_splice_tail_init(&(*tp)->t_dfops, &dop_pending); 367 368 368 369 /* 369 370 * Roll the transaction. ··· 433 438 if (error) { 434 439 xfs_defer_trans_abort(*tp, &dop_pending); 435 440 xfs_force_shutdown((*tp)->t_mountp, SHUTDOWN_CORRUPT_INCORE); 436 - trace_xfs_defer_finish_error((*tp)->t_mountp, (*tp)->t_dfops, 437 - error); 441 + trace_xfs_defer_finish_error(*tp, error); 438 442 xfs_defer_cancel_list((*tp)->t_mountp, &dop_pending); 439 443 xfs_defer_cancel(*tp); 440 444 return error; 441 445 } 442 446 443 - trace_xfs_defer_finish_done((*tp)->t_mountp, (*tp)->t_dfops, _RET_IP_); 447 + trace_xfs_defer_finish_done(*tp, _RET_IP_); 444 448 return 0; 445 449 } 446 450 ··· 474 480 { 475 481 struct xfs_mount *mp = tp->t_mountp; 476 482 477 - trace_xfs_defer_cancel(mp, tp->t_dfops, _RET_IP_); 478 - xfs_defer_cancel_list(mp, &tp->t_dfops->dop_intake); 483 + trace_xfs_defer_cancel(tp, _RET_IP_); 484 + xfs_defer_cancel_list(mp, &tp->t_dfops); 479 485 } 480 486 481 487 /* Add an item for later deferred processing. */ ··· 485 491 enum xfs_defer_ops_type type, 486 492 struct list_head *li) 487 493 { 488 - struct xfs_defer_ops *dop = tp->t_dfops; 489 494 struct xfs_defer_pending *dfp = NULL; 490 495 491 496 ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES); ··· 494 501 * If the last pending item has the same type, reuse it. Else, 495 502 * create a new pending item at the end of the intake list. 496 503 */ 497 - if (!list_empty(&dop->dop_intake)) { 498 - dfp = list_last_entry(&dop->dop_intake, 504 + if (!list_empty(&tp->t_dfops)) { 505 + dfp = list_last_entry(&tp->t_dfops, 499 506 struct xfs_defer_pending, dfp_list); 500 507 if (dfp->dfp_type->type != type || 501 508 (dfp->dfp_type->max_items && ··· 510 517 dfp->dfp_done = NULL; 511 518 dfp->dfp_count = 0; 512 519 INIT_LIST_HEAD(&dfp->dfp_work); 513 - list_add_tail(&dfp->dfp_list, &dop->dop_intake); 520 + list_add_tail(&dfp->dfp_list, &tp->t_dfops); 514 521 } 515 522 516 523 list_add_tail(li, &dfp->dfp_work); ··· 525 532 defer_op_types[type->type] = type; 526 533 } 527 534 528 - /* Initialize a deferred operation. */ 529 - void 530 - xfs_defer_init( 531 - struct xfs_trans *tp, 532 - struct xfs_defer_ops *dop) 533 - { 534 - struct xfs_mount *mp = NULL; 535 - 536 - memset(dop, 0, sizeof(struct xfs_defer_ops)); 537 - INIT_LIST_HEAD(&dop->dop_intake); 538 - if (tp) { 539 - ASSERT(tp->t_firstblock == NULLFSBLOCK); 540 - tp->t_dfops = dop; 541 - mp = tp->t_mountp; 542 - } 543 - trace_xfs_defer_init(mp, dop, _RET_IP_); 544 - } 545 - 546 535 /* 547 - * Move state from one xfs_defer_ops to another and reset the source to initial 548 - * state. This is primarily used to carry state forward across transaction rolls 549 - * with internal dfops. 536 + * Move deferred ops from one transaction to another and reset the source to 537 + * initial state. This is primarily used to carry state forward across 538 + * transaction rolls with pending dfops. 550 539 */ 551 540 void 552 541 xfs_defer_move( 553 542 struct xfs_trans *dtp, 554 543 struct xfs_trans *stp) 555 544 { 556 - struct xfs_defer_ops *dst = dtp->t_dfops; 557 - struct xfs_defer_ops *src = stp->t_dfops; 558 - ASSERT(dst != src); 559 - 560 - list_splice_init(&src->dop_intake, &dst->dop_intake); 545 + list_splice_init(&stp->t_dfops, &dtp->t_dfops); 561 546 562 547 /* 563 548 * Low free space mode was historically controlled by a dfops field.
-2
fs/xfs/libxfs/xfs_defer.h
··· 7 7 #define __XFS_DEFER_H__ 8 8 9 9 struct xfs_defer_op_type; 10 - struct xfs_defer_ops; 11 10 12 11 /* 13 12 * Save a log intent item and a list of extents, so that we can replay ··· 39 40 int xfs_defer_finish_noroll(struct xfs_trans **tp); 40 41 int xfs_defer_finish(struct xfs_trans **tp); 41 42 void xfs_defer_cancel(struct xfs_trans *); 42 - void xfs_defer_init(struct xfs_trans *tp, struct xfs_defer_ops *dop); 43 43 void xfs_defer_move(struct xfs_trans *dtp, struct xfs_trans *stp); 44 44 45 45 /* Description of a deferred type. */
-2
fs/xfs/libxfs/xfs_dir2.c
··· 424 424 int v; /* type-checking value */ 425 425 426 426 ASSERT(S_ISDIR(VFS_I(dp)->i_mode)); 427 - ASSERT(tp->t_dfops); 428 427 XFS_STATS_INC(dp->i_mount, xs_dir_remove); 429 428 430 429 args = kmem_zalloc(sizeof(*args), KM_SLEEP | KM_NOFS); ··· 482 483 int v; /* type-checking value */ 483 484 484 485 ASSERT(S_ISDIR(VFS_I(dp)->i_mode)); 485 - ASSERT(tp->t_dfops); 486 486 487 487 rval = xfs_dir_ino_validate(tp->t_mountp, inum); 488 488 if (rval)
-1
fs/xfs/libxfs/xfs_dir2.h
··· 9 9 #include "xfs_da_format.h" 10 10 #include "xfs_da_btree.h" 11 11 12 - struct xfs_defer_ops; 13 12 struct xfs_da_args; 14 13 struct xfs_inode; 15 14 struct xfs_mount;
-1
fs/xfs/xfs_inode.h
··· 15 15 struct xfs_dinode; 16 16 struct xfs_inode; 17 17 struct xfs_buf; 18 - struct xfs_defer_ops; 19 18 struct xfs_bmbt_irec; 20 19 struct xfs_inode_log_item; 21 20 struct xfs_mount;
+2 -3
fs/xfs/xfs_reflink.c
··· 502 502 if (error) 503 503 break; 504 504 } else if (del.br_state == XFS_EXT_UNWRITTEN || cancel_real) { 505 - ASSERT((*tpp)->t_dfops); 506 505 ASSERT((*tpp)->t_firstblock == NULLFSBLOCK); 507 506 508 507 /* Free the CoW orphan record. */ ··· 677 678 goto prev_extent; 678 679 679 680 /* Unmap the old blocks in the data fork. */ 680 - ASSERT(tp->t_dfops && tp->t_firstblock == NULLFSBLOCK); 681 + ASSERT(tp->t_firstblock == NULLFSBLOCK); 681 682 rlen = del.br_blockcount; 682 683 error = __xfs_bunmapi(tp, ip, del.br_startoff, &rlen, 0, 1); 683 684 if (error) ··· 1020 1021 /* Unmap the old blocks in the data fork. */ 1021 1022 rlen = unmap_len; 1022 1023 while (rlen) { 1023 - ASSERT(tp->t_dfops && tp->t_firstblock == NULLFSBLOCK); 1024 + ASSERT(tp->t_firstblock == NULLFSBLOCK); 1024 1025 error = __xfs_bunmapi(tp, ip, destoff, &rlen, 0, 1); 1025 1026 if (error) 1026 1027 goto out_cancel;
+18 -22
fs/xfs/xfs_trace.h
··· 2213 2213 2214 2214 /* deferred ops */ 2215 2215 struct xfs_defer_pending; 2216 - struct xfs_defer_ops; 2217 2216 2218 2217 DECLARE_EVENT_CLASS(xfs_defer_class, 2219 - TP_PROTO(struct xfs_mount *mp, struct xfs_defer_ops *dop, 2220 - unsigned long caller_ip), 2221 - TP_ARGS(mp, dop, caller_ip), 2218 + TP_PROTO(struct xfs_trans *tp, unsigned long caller_ip), 2219 + TP_ARGS(tp, caller_ip), 2222 2220 TP_STRUCT__entry( 2223 2221 __field(dev_t, dev) 2224 - __field(void *, dop) 2222 + __field(struct xfs_trans *, tp) 2225 2223 __field(char, committed) 2226 2224 __field(unsigned long, caller_ip) 2227 2225 ), 2228 2226 TP_fast_assign( 2229 - __entry->dev = mp ? mp->m_super->s_dev : 0; 2230 - __entry->dop = dop; 2227 + __entry->dev = tp->t_mountp->m_super->s_dev; 2228 + __entry->tp = tp; 2231 2229 __entry->caller_ip = caller_ip; 2232 2230 ), 2233 - TP_printk("dev %d:%d ops %p caller %pS", 2231 + TP_printk("dev %d:%d tp %p caller %pS", 2234 2232 MAJOR(__entry->dev), MINOR(__entry->dev), 2235 - __entry->dop, 2233 + __entry->tp, 2236 2234 (char *)__entry->caller_ip) 2237 2235 ) 2238 2236 #define DEFINE_DEFER_EVENT(name) \ 2239 2237 DEFINE_EVENT(xfs_defer_class, name, \ 2240 - TP_PROTO(struct xfs_mount *mp, struct xfs_defer_ops *dop, \ 2241 - unsigned long caller_ip), \ 2242 - TP_ARGS(mp, dop, caller_ip)) 2238 + TP_PROTO(struct xfs_trans *tp, unsigned long caller_ip), \ 2239 + TP_ARGS(tp, caller_ip)) 2243 2240 2244 2241 DECLARE_EVENT_CLASS(xfs_defer_error_class, 2245 - TP_PROTO(struct xfs_mount *mp, struct xfs_defer_ops *dop, int error), 2246 - TP_ARGS(mp, dop, error), 2242 + TP_PROTO(struct xfs_trans *tp, int error), 2243 + TP_ARGS(tp, error), 2247 2244 TP_STRUCT__entry( 2248 2245 __field(dev_t, dev) 2249 - __field(void *, dop) 2246 + __field(struct xfs_trans *, tp) 2250 2247 __field(char, committed) 2251 2248 __field(int, error) 2252 2249 ), 2253 2250 TP_fast_assign( 2254 - __entry->dev = mp ? mp->m_super->s_dev : 0; 2255 - __entry->dop = dop; 2251 + __entry->dev = tp->t_mountp->m_super->s_dev; 2252 + __entry->tp = tp; 2256 2253 __entry->error = error; 2257 2254 ), 2258 - TP_printk("dev %d:%d ops %p err %d", 2255 + TP_printk("dev %d:%d tp %p err %d", 2259 2256 MAJOR(__entry->dev), MINOR(__entry->dev), 2260 - __entry->dop, 2257 + __entry->tp, 2261 2258 __entry->error) 2262 2259 ) 2263 2260 #define DEFINE_DEFER_ERROR_EVENT(name) \ 2264 2261 DEFINE_EVENT(xfs_defer_error_class, name, \ 2265 - TP_PROTO(struct xfs_mount *mp, struct xfs_defer_ops *dop, int error), \ 2266 - TP_ARGS(mp, dop, error)) 2262 + TP_PROTO(struct xfs_trans *tp, int error), \ 2263 + TP_ARGS(tp, error)) 2267 2264 2268 2265 DECLARE_EVENT_CLASS(xfs_defer_pending_class, 2269 2266 TP_PROTO(struct xfs_mount *mp, struct xfs_defer_pending *dfp), ··· 2379 2382 xfs_exntst_t state), \ 2380 2383 TP_ARGS(mp, agno, op, agbno, ino, whichfork, offset, len, state)) 2381 2384 2382 - DEFINE_DEFER_EVENT(xfs_defer_init); 2383 2385 DEFINE_DEFER_EVENT(xfs_defer_cancel); 2384 2386 DEFINE_DEFER_EVENT(xfs_defer_trans_roll); 2385 2387 DEFINE_DEFER_EVENT(xfs_defer_trans_abort);
+5 -8
fs/xfs/xfs_trans.c
··· 100 100 ntp->t_mountp = tp->t_mountp; 101 101 INIT_LIST_HEAD(&ntp->t_items); 102 102 INIT_LIST_HEAD(&ntp->t_busy); 103 + INIT_LIST_HEAD(&ntp->t_dfops); 103 104 ntp->t_firstblock = NULLFSBLOCK; 104 105 105 106 ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES); ··· 121 120 tp->t_rtx_res = tp->t_rtx_res_used; 122 121 ntp->t_pflags = tp->t_pflags; 123 122 124 - /* copy the dfops pointer if it's external, otherwise move it */ 125 - xfs_defer_init(ntp, &ntp->t_dfops_internal); 126 - if (tp->t_dfops != &tp->t_dfops_internal) 127 - ntp->t_dfops = tp->t_dfops; 128 - else 129 - xfs_defer_move(ntp, tp); 123 + /* move deferred ops over to the new tp */ 124 + xfs_defer_move(ntp, tp); 130 125 131 126 xfs_trans_dup_dqinfo(tp, ntp); 132 127 ··· 277 280 tp->t_mountp = mp; 278 281 INIT_LIST_HEAD(&tp->t_items); 279 282 INIT_LIST_HEAD(&tp->t_busy); 283 + INIT_LIST_HEAD(&tp->t_dfops); 280 284 tp->t_firstblock = NULLFSBLOCK; 281 - xfs_defer_init(tp, &tp->t_dfops_internal); 282 285 283 286 error = xfs_trans_reserve(tp, resp, blocks, rtextents); 284 287 if (error) { ··· 926 929 * Finish deferred items on final commit. Only permanent transactions 927 930 * should ever have deferred ops. 928 931 */ 929 - WARN_ON_ONCE(!list_empty(&tp->t_dfops->dop_intake) && 932 + WARN_ON_ONCE(!list_empty(&tp->t_dfops) && 930 933 !(tp->t_flags & XFS_TRANS_PERM_LOG_RES)); 931 934 if (!regrant && (tp->t_flags & XFS_TRANS_PERM_LOG_RES)) { 932 935 error = xfs_defer_finish_noroll(&tp);
+2 -6
fs/xfs/xfs_trans.h
··· 90 90 #define XFS_ITEM_FLUSHING 3 91 91 92 92 /* 93 - * Deferred operations tracking structure. 93 + * Deferred operation item relogging limits. 94 94 */ 95 95 #define XFS_DEFER_OPS_NR_INODES 2 /* join up to two inodes */ 96 96 #define XFS_DEFER_OPS_NR_BUFS 2 /* join up to two buffers */ 97 - struct xfs_defer_ops { 98 - struct list_head dop_intake; /* unlogged pending work */ 99 - }; 100 97 101 98 /* 102 99 * This is the structure maintained for every active transaction. ··· 111 114 struct xlog_ticket *t_ticket; /* log mgr ticket */ 112 115 struct xfs_mount *t_mountp; /* ptr to fs mount struct */ 113 116 struct xfs_dquot_acct *t_dqinfo; /* acctg info for dquots */ 114 - struct xfs_defer_ops *t_dfops; /* dfops reference */ 115 117 int64_t t_icount_delta; /* superblock icount change */ 116 118 int64_t t_ifree_delta; /* superblock ifree change */ 117 119 int64_t t_fdblocks_delta; /* superblock fdblocks chg */ ··· 132 136 int64_t t_rextslog_delta;/* superblocks rextslog chg */ 133 137 struct list_head t_items; /* log item descriptors */ 134 138 struct list_head t_busy; /* list of busy extents */ 139 + struct list_head t_dfops; /* deferred operations */ 135 140 unsigned long t_pflags; /* saved process flags state */ 136 - struct xfs_defer_ops t_dfops_internal; 137 141 } xfs_trans_t; 138 142 139 143 /*