Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Copyright (C) 2016 Oracle. All Rights Reserved.
4 * Author: Darrick J. Wong <darrick.wong@oracle.com>
5 */
6#include "xfs.h"
7#include "xfs_fs.h"
8#include "xfs_shared.h"
9#include "xfs_format.h"
10#include "xfs_log_format.h"
11#include "xfs_trans_resv.h"
12#include "xfs_mount.h"
13#include "xfs_defer.h"
14#include "xfs_trans.h"
15#include "xfs_buf_item.h"
16#include "xfs_inode.h"
17#include "xfs_inode_item.h"
18#include "xfs_trace.h"
19#include "xfs_icache.h"
20#include "xfs_log.h"
21#include "xfs_rmap.h"
22#include "xfs_refcount.h"
23#include "xfs_bmap.h"
24#include "xfs_alloc.h"
25#include "xfs_buf.h"
26#include "xfs_da_format.h"
27#include "xfs_da_btree.h"
28#include "xfs_attr.h"
29
30static struct kmem_cache *xfs_defer_pending_cache;
31
32/*
33 * Deferred Operations in XFS
34 *
35 * Due to the way locking rules work in XFS, certain transactions (block
36 * mapping and unmapping, typically) have permanent reservations so that
37 * we can roll the transaction to adhere to AG locking order rules and
38 * to unlock buffers between metadata updates. Prior to rmap/reflink,
39 * the mapping code had a mechanism to perform these deferrals for
40 * extents that were going to be freed; this code makes that facility
41 * more generic.
42 *
43 * When adding the reverse mapping and reflink features, it became
44 * necessary to perform complex remapping multi-transactions to comply
45 * with AG locking order rules, and to be able to spread a single
46 * refcount update operation (an operation on an n-block extent can
47 * update as many as n records!) among multiple transactions. XFS can
48 * roll a transaction to facilitate this, but using this facility
49 * requires us to log "intent" items in case log recovery needs to
50 * redo the operation, and to log "done" items to indicate that redo
51 * is not necessary.
52 *
53 * Deferred work is tracked in xfs_defer_pending items. Each pending
54 * item tracks one type of deferred work. Incoming work items (which
55 * have not yet had an intent logged) are attached to a pending item
56 * on the dop_intake list, where they wait for the caller to finish
57 * the deferred operations.
58 *
59 * Finishing a set of deferred operations is an involved process. To
60 * start, we define "rolling a deferred-op transaction" as follows:
61 *
62 * > For each xfs_defer_pending item on the dop_intake list,
63 * - Sort the work items in AG order. XFS locking
64 * order rules require us to lock buffers in AG order.
65 * - Create a log intent item for that type.
66 * - Attach it to the pending item.
67 * - Move the pending item from the dop_intake list to the
68 * dop_pending list.
69 * > Roll the transaction.
70 *
71 * NOTE: To avoid exceeding the transaction reservation, we limit the
72 * number of items that we attach to a given xfs_defer_pending.
73 *
74 * The actual finishing process looks like this:
75 *
76 * > For each xfs_defer_pending in the dop_pending list,
77 * - Roll the deferred-op transaction as above.
78 * - Create a log done item for that type, and attach it to the
79 * log intent item.
80 * - For each work item attached to the log intent item,
81 * * Perform the described action.
82 * * Attach the work item to the log done item.
83 * * If the result of doing the work was -EAGAIN, ->finish work
84 * wants a new transaction. See the "Requesting a Fresh
85 * Transaction while Finishing Deferred Work" section below for
86 * details.
87 *
88 * The key here is that we must log an intent item for all pending
89 * work items every time we roll the transaction, and that we must log
90 * a done item as soon as the work is completed. With this mechanism
91 * we can perform complex remapping operations, chaining intent items
92 * as needed.
93 *
94 * Requesting a Fresh Transaction while Finishing Deferred Work
95 *
96 * If ->finish_item decides that it needs a fresh transaction to
97 * finish the work, it must ask its caller (xfs_defer_finish) for a
98 * continuation. The most likely cause of this circumstance are the
99 * refcount adjust functions deciding that they've logged enough items
100 * to be at risk of exceeding the transaction reservation.
101 *
102 * To get a fresh transaction, we want to log the existing log done
103 * item to prevent the log intent item from replaying, immediately log
104 * a new log intent item with the unfinished work items, roll the
105 * transaction, and re-call ->finish_item wherever it left off. The
106 * log done item and the new log intent item must be in the same
107 * transaction or atomicity cannot be guaranteed; defer_finish ensures
108 * that this happens.
109 *
110 * This requires some coordination between ->finish_item and
111 * defer_finish. Upon deciding to request a new transaction,
112 * ->finish_item should update the current work item to reflect the
113 * unfinished work. Next, it should reset the log done item's list
114 * count to the number of items finished, and return -EAGAIN.
115 * defer_finish sees the -EAGAIN, logs the new log intent item
116 * with the remaining work items, and leaves the xfs_defer_pending
117 * item at the head of the dop_work queue. Then it rolls the
118 * transaction and picks up processing where it left off. It is
119 * required that ->finish_item must be careful to leave enough
120 * transaction reservation to fit the new log intent item.
121 *
122 * This is an example of remapping the extent (E, E+B) into file X at
123 * offset A and dealing with the extent (C, C+B) already being mapped
124 * there:
125 * +-------------------------------------------------+
126 * | Unmap file X startblock C offset A length B | t0
127 * | Intent to reduce refcount for extent (C, B) |
128 * | Intent to remove rmap (X, C, A, B) |
129 * | Intent to free extent (D, 1) (bmbt block) |
130 * | Intent to map (X, A, B) at startblock E |
131 * +-------------------------------------------------+
132 * | Map file X startblock E offset A length B | t1
133 * | Done mapping (X, E, A, B) |
134 * | Intent to increase refcount for extent (E, B) |
135 * | Intent to add rmap (X, E, A, B) |
136 * +-------------------------------------------------+
137 * | Reduce refcount for extent (C, B) | t2
138 * | Done reducing refcount for extent (C, 9) |
139 * | Intent to reduce refcount for extent (C+9, B-9) |
140 * | (ran out of space after 9 refcount updates) |
141 * +-------------------------------------------------+
142 * | Reduce refcount for extent (C+9, B+9) | t3
143 * | Done reducing refcount for extent (C+9, B-9) |
144 * | Increase refcount for extent (E, B) |
145 * | Done increasing refcount for extent (E, B) |
146 * | Intent to free extent (C, B) |
147 * | Intent to free extent (F, 1) (refcountbt block) |
148 * | Intent to remove rmap (F, 1, REFC) |
149 * +-------------------------------------------------+
150 * | Remove rmap (X, C, A, B) | t4
151 * | Done removing rmap (X, C, A, B) |
152 * | Add rmap (X, E, A, B) |
153 * | Done adding rmap (X, E, A, B) |
154 * | Remove rmap (F, 1, REFC) |
155 * | Done removing rmap (F, 1, REFC) |
156 * +-------------------------------------------------+
157 * | Free extent (C, B) | t5
158 * | Done freeing extent (C, B) |
159 * | Free extent (D, 1) |
160 * | Done freeing extent (D, 1) |
161 * | Free extent (F, 1) |
162 * | Done freeing extent (F, 1) |
163 * +-------------------------------------------------+
164 *
165 * If we should crash before t2 commits, log recovery replays
166 * the following intent items:
167 *
168 * - Intent to reduce refcount for extent (C, B)
169 * - Intent to remove rmap (X, C, A, B)
170 * - Intent to free extent (D, 1) (bmbt block)
171 * - Intent to increase refcount for extent (E, B)
172 * - Intent to add rmap (X, E, A, B)
173 *
174 * In the process of recovering, it should also generate and take care
175 * of these intent items:
176 *
177 * - Intent to free extent (C, B)
178 * - Intent to free extent (F, 1) (refcountbt block)
179 * - Intent to remove rmap (F, 1, REFC)
180 *
181 * Note that the continuation requested between t2 and t3 is likely to
182 * reoccur.
183 */
184
185static const struct xfs_defer_op_type *defer_op_types[] = {
186 [XFS_DEFER_OPS_TYPE_BMAP] = &xfs_bmap_update_defer_type,
187 [XFS_DEFER_OPS_TYPE_REFCOUNT] = &xfs_refcount_update_defer_type,
188 [XFS_DEFER_OPS_TYPE_RMAP] = &xfs_rmap_update_defer_type,
189 [XFS_DEFER_OPS_TYPE_FREE] = &xfs_extent_free_defer_type,
190 [XFS_DEFER_OPS_TYPE_AGFL_FREE] = &xfs_agfl_free_defer_type,
191 [XFS_DEFER_OPS_TYPE_ATTR] = &xfs_attr_defer_type,
192};
193
194/*
195 * Ensure there's a log intent item associated with this deferred work item if
196 * the operation must be restarted on crash. Returns 1 if there's a log item;
197 * 0 if there isn't; or a negative errno.
198 */
199static int
200xfs_defer_create_intent(
201 struct xfs_trans *tp,
202 struct xfs_defer_pending *dfp,
203 bool sort)
204{
205 const struct xfs_defer_op_type *ops = defer_op_types[dfp->dfp_type];
206 struct xfs_log_item *lip;
207
208 if (dfp->dfp_intent)
209 return 1;
210
211 lip = ops->create_intent(tp, &dfp->dfp_work, dfp->dfp_count, sort);
212 if (!lip)
213 return 0;
214 if (IS_ERR(lip))
215 return PTR_ERR(lip);
216
217 dfp->dfp_intent = lip;
218 return 1;
219}
220
221/*
222 * For each pending item in the intake list, log its intent item and the
223 * associated extents, then add the entire intake list to the end of
224 * the pending list.
225 *
226 * Returns 1 if at least one log item was associated with the deferred work;
227 * 0 if there are no log items; or a negative errno.
228 */
229static int
230xfs_defer_create_intents(
231 struct xfs_trans *tp)
232{
233 struct xfs_defer_pending *dfp;
234 int ret = 0;
235
236 list_for_each_entry(dfp, &tp->t_dfops, dfp_list) {
237 int ret2;
238
239 trace_xfs_defer_create_intent(tp->t_mountp, dfp);
240 ret2 = xfs_defer_create_intent(tp, dfp, true);
241 if (ret2 < 0)
242 return ret2;
243 ret |= ret2;
244 }
245 return ret;
246}
247
248/* Abort all the intents that were committed. */
249STATIC void
250xfs_defer_trans_abort(
251 struct xfs_trans *tp,
252 struct list_head *dop_pending)
253{
254 struct xfs_defer_pending *dfp;
255 const struct xfs_defer_op_type *ops;
256
257 trace_xfs_defer_trans_abort(tp, _RET_IP_);
258
259 /* Abort intent items that don't have a done item. */
260 list_for_each_entry(dfp, dop_pending, dfp_list) {
261 ops = defer_op_types[dfp->dfp_type];
262 trace_xfs_defer_pending_abort(tp->t_mountp, dfp);
263 if (dfp->dfp_intent && !dfp->dfp_done) {
264 ops->abort_intent(dfp->dfp_intent);
265 dfp->dfp_intent = NULL;
266 }
267 }
268}
269
270/*
271 * Capture resources that the caller said not to release ("held") when the
272 * transaction commits. Caller is responsible for zero-initializing @dres.
273 */
274static int
275xfs_defer_save_resources(
276 struct xfs_defer_resources *dres,
277 struct xfs_trans *tp)
278{
279 struct xfs_buf_log_item *bli;
280 struct xfs_inode_log_item *ili;
281 struct xfs_log_item *lip;
282
283 BUILD_BUG_ON(NBBY * sizeof(dres->dr_ordered) < XFS_DEFER_OPS_NR_BUFS);
284
285 list_for_each_entry(lip, &tp->t_items, li_trans) {
286 switch (lip->li_type) {
287 case XFS_LI_BUF:
288 bli = container_of(lip, struct xfs_buf_log_item,
289 bli_item);
290 if (bli->bli_flags & XFS_BLI_HOLD) {
291 if (dres->dr_bufs >= XFS_DEFER_OPS_NR_BUFS) {
292 ASSERT(0);
293 return -EFSCORRUPTED;
294 }
295 if (bli->bli_flags & XFS_BLI_ORDERED)
296 dres->dr_ordered |=
297 (1U << dres->dr_bufs);
298 else
299 xfs_trans_dirty_buf(tp, bli->bli_buf);
300 dres->dr_bp[dres->dr_bufs++] = bli->bli_buf;
301 }
302 break;
303 case XFS_LI_INODE:
304 ili = container_of(lip, struct xfs_inode_log_item,
305 ili_item);
306 if (ili->ili_lock_flags == 0) {
307 if (dres->dr_inos >= XFS_DEFER_OPS_NR_INODES) {
308 ASSERT(0);
309 return -EFSCORRUPTED;
310 }
311 xfs_trans_log_inode(tp, ili->ili_inode,
312 XFS_ILOG_CORE);
313 dres->dr_ip[dres->dr_inos++] = ili->ili_inode;
314 }
315 break;
316 default:
317 break;
318 }
319 }
320
321 return 0;
322}
323
324/* Attach the held resources to the transaction. */
325static void
326xfs_defer_restore_resources(
327 struct xfs_trans *tp,
328 struct xfs_defer_resources *dres)
329{
330 unsigned short i;
331
332 /* Rejoin the joined inodes. */
333 for (i = 0; i < dres->dr_inos; i++)
334 xfs_trans_ijoin(tp, dres->dr_ip[i], 0);
335
336 /* Rejoin the buffers and dirty them so the log moves forward. */
337 for (i = 0; i < dres->dr_bufs; i++) {
338 xfs_trans_bjoin(tp, dres->dr_bp[i]);
339 if (dres->dr_ordered & (1U << i))
340 xfs_trans_ordered_buf(tp, dres->dr_bp[i]);
341 xfs_trans_bhold(tp, dres->dr_bp[i]);
342 }
343}
344
345/* Roll a transaction so we can do some deferred op processing. */
346STATIC int
347xfs_defer_trans_roll(
348 struct xfs_trans **tpp)
349{
350 struct xfs_defer_resources dres = { };
351 int error;
352
353 error = xfs_defer_save_resources(&dres, *tpp);
354 if (error)
355 return error;
356
357 trace_xfs_defer_trans_roll(*tpp, _RET_IP_);
358
359 /*
360 * Roll the transaction. Rolling always given a new transaction (even
361 * if committing the old one fails!) to hand back to the caller, so we
362 * join the held resources to the new transaction so that we always
363 * return with the held resources joined to @tpp, no matter what
364 * happened.
365 */
366 error = xfs_trans_roll(tpp);
367
368 xfs_defer_restore_resources(*tpp, &dres);
369
370 if (error)
371 trace_xfs_defer_trans_roll_error(*tpp, error);
372 return error;
373}
374
375/*
376 * Free up any items left in the list.
377 */
378static void
379xfs_defer_cancel_list(
380 struct xfs_mount *mp,
381 struct list_head *dop_list)
382{
383 struct xfs_defer_pending *dfp;
384 struct xfs_defer_pending *pli;
385 struct list_head *pwi;
386 struct list_head *n;
387 const struct xfs_defer_op_type *ops;
388
389 /*
390 * Free the pending items. Caller should already have arranged
391 * for the intent items to be released.
392 */
393 list_for_each_entry_safe(dfp, pli, dop_list, dfp_list) {
394 ops = defer_op_types[dfp->dfp_type];
395 trace_xfs_defer_cancel_list(mp, dfp);
396 list_del(&dfp->dfp_list);
397 list_for_each_safe(pwi, n, &dfp->dfp_work) {
398 list_del(pwi);
399 dfp->dfp_count--;
400 ops->cancel_item(pwi);
401 }
402 ASSERT(dfp->dfp_count == 0);
403 kmem_cache_free(xfs_defer_pending_cache, dfp);
404 }
405}
406
407/*
408 * Prevent a log intent item from pinning the tail of the log by logging a
409 * done item to release the intent item; and then log a new intent item.
410 * The caller should provide a fresh transaction and roll it after we're done.
411 */
412static int
413xfs_defer_relog(
414 struct xfs_trans **tpp,
415 struct list_head *dfops)
416{
417 struct xlog *log = (*tpp)->t_mountp->m_log;
418 struct xfs_defer_pending *dfp;
419 xfs_lsn_t threshold_lsn = NULLCOMMITLSN;
420
421
422 ASSERT((*tpp)->t_flags & XFS_TRANS_PERM_LOG_RES);
423
424 list_for_each_entry(dfp, dfops, dfp_list) {
425 /*
426 * If the log intent item for this deferred op is not a part of
427 * the current log checkpoint, relog the intent item to keep
428 * the log tail moving forward. We're ok with this being racy
429 * because an incorrect decision means we'll be a little slower
430 * at pushing the tail.
431 */
432 if (dfp->dfp_intent == NULL ||
433 xfs_log_item_in_current_chkpt(dfp->dfp_intent))
434 continue;
435
436 /*
437 * Figure out where we need the tail to be in order to maintain
438 * the minimum required free space in the log. Only sample
439 * the log threshold once per call.
440 */
441 if (threshold_lsn == NULLCOMMITLSN) {
442 threshold_lsn = xlog_grant_push_threshold(log, 0);
443 if (threshold_lsn == NULLCOMMITLSN)
444 break;
445 }
446 if (XFS_LSN_CMP(dfp->dfp_intent->li_lsn, threshold_lsn) >= 0)
447 continue;
448
449 trace_xfs_defer_relog_intent((*tpp)->t_mountp, dfp);
450 XFS_STATS_INC((*tpp)->t_mountp, defer_relog);
451 dfp->dfp_intent = xfs_trans_item_relog(dfp->dfp_intent, *tpp);
452 }
453
454 if ((*tpp)->t_flags & XFS_TRANS_DIRTY)
455 return xfs_defer_trans_roll(tpp);
456 return 0;
457}
458
459/*
460 * Log an intent-done item for the first pending intent, and finish the work
461 * items.
462 */
463static int
464xfs_defer_finish_one(
465 struct xfs_trans *tp,
466 struct xfs_defer_pending *dfp)
467{
468 const struct xfs_defer_op_type *ops = defer_op_types[dfp->dfp_type];
469 struct xfs_btree_cur *state = NULL;
470 struct list_head *li, *n;
471 int error;
472
473 trace_xfs_defer_pending_finish(tp->t_mountp, dfp);
474
475 dfp->dfp_done = ops->create_done(tp, dfp->dfp_intent, dfp->dfp_count);
476 list_for_each_safe(li, n, &dfp->dfp_work) {
477 list_del(li);
478 dfp->dfp_count--;
479 error = ops->finish_item(tp, dfp->dfp_done, li, &state);
480 if (error == -EAGAIN) {
481 int ret;
482
483 /*
484 * Caller wants a fresh transaction; put the work item
485 * back on the list and log a new log intent item to
486 * replace the old one. See "Requesting a Fresh
487 * Transaction while Finishing Deferred Work" above.
488 */
489 list_add(li, &dfp->dfp_work);
490 dfp->dfp_count++;
491 dfp->dfp_done = NULL;
492 dfp->dfp_intent = NULL;
493 ret = xfs_defer_create_intent(tp, dfp, false);
494 if (ret < 0)
495 error = ret;
496 }
497
498 if (error)
499 goto out;
500 }
501
502 /* Done with the dfp, free it. */
503 list_del(&dfp->dfp_list);
504 kmem_cache_free(xfs_defer_pending_cache, dfp);
505out:
506 if (ops->finish_cleanup)
507 ops->finish_cleanup(tp, state, error);
508 return error;
509}
510
511/*
512 * Finish all the pending work. This involves logging intent items for
513 * any work items that wandered in since the last transaction roll (if
514 * one has even happened), rolling the transaction, and finishing the
515 * work items in the first item on the logged-and-pending list.
516 *
517 * If an inode is provided, relog it to the new transaction.
518 */
519int
520xfs_defer_finish_noroll(
521 struct xfs_trans **tp)
522{
523 struct xfs_defer_pending *dfp = NULL;
524 int error = 0;
525 LIST_HEAD(dop_pending);
526
527 ASSERT((*tp)->t_flags & XFS_TRANS_PERM_LOG_RES);
528
529 trace_xfs_defer_finish(*tp, _RET_IP_);
530
531 /* Until we run out of pending work to finish... */
532 while (!list_empty(&dop_pending) || !list_empty(&(*tp)->t_dfops)) {
533 /*
534 * Deferred items that are created in the process of finishing
535 * other deferred work items should be queued at the head of
536 * the pending list, which puts them ahead of the deferred work
537 * that was created by the caller. This keeps the number of
538 * pending work items to a minimum, which decreases the amount
539 * of time that any one intent item can stick around in memory,
540 * pinning the log tail.
541 */
542 int has_intents = xfs_defer_create_intents(*tp);
543
544 list_splice_init(&(*tp)->t_dfops, &dop_pending);
545
546 if (has_intents < 0) {
547 error = has_intents;
548 goto out_shutdown;
549 }
550 if (has_intents || dfp) {
551 error = xfs_defer_trans_roll(tp);
552 if (error)
553 goto out_shutdown;
554
555 /* Relog intent items to keep the log moving. */
556 error = xfs_defer_relog(tp, &dop_pending);
557 if (error)
558 goto out_shutdown;
559 }
560
561 dfp = list_first_entry(&dop_pending, struct xfs_defer_pending,
562 dfp_list);
563 error = xfs_defer_finish_one(*tp, dfp);
564 if (error && error != -EAGAIN)
565 goto out_shutdown;
566 }
567
568 trace_xfs_defer_finish_done(*tp, _RET_IP_);
569 return 0;
570
571out_shutdown:
572 xfs_defer_trans_abort(*tp, &dop_pending);
573 xfs_force_shutdown((*tp)->t_mountp, SHUTDOWN_CORRUPT_INCORE);
574 trace_xfs_defer_finish_error(*tp, error);
575 xfs_defer_cancel_list((*tp)->t_mountp, &dop_pending);
576 xfs_defer_cancel(*tp);
577 return error;
578}
579
580int
581xfs_defer_finish(
582 struct xfs_trans **tp)
583{
584 int error;
585
586 /*
587 * Finish and roll the transaction once more to avoid returning to the
588 * caller with a dirty transaction.
589 */
590 error = xfs_defer_finish_noroll(tp);
591 if (error)
592 return error;
593 if ((*tp)->t_flags & XFS_TRANS_DIRTY) {
594 error = xfs_defer_trans_roll(tp);
595 if (error) {
596 xfs_force_shutdown((*tp)->t_mountp,
597 SHUTDOWN_CORRUPT_INCORE);
598 return error;
599 }
600 }
601
602 /* Reset LOWMODE now that we've finished all the dfops. */
603 ASSERT(list_empty(&(*tp)->t_dfops));
604 (*tp)->t_flags &= ~XFS_TRANS_LOWMODE;
605 return 0;
606}
607
608void
609xfs_defer_cancel(
610 struct xfs_trans *tp)
611{
612 struct xfs_mount *mp = tp->t_mountp;
613
614 trace_xfs_defer_cancel(tp, _RET_IP_);
615 xfs_defer_cancel_list(mp, &tp->t_dfops);
616}
617
618/* Add an item for later deferred processing. */
619void
620xfs_defer_add(
621 struct xfs_trans *tp,
622 enum xfs_defer_ops_type type,
623 struct list_head *li)
624{
625 struct xfs_defer_pending *dfp = NULL;
626 const struct xfs_defer_op_type *ops;
627
628 ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
629 BUILD_BUG_ON(ARRAY_SIZE(defer_op_types) != XFS_DEFER_OPS_TYPE_MAX);
630
631 /*
632 * Add the item to a pending item at the end of the intake list.
633 * If the last pending item has the same type, reuse it. Else,
634 * create a new pending item at the end of the intake list.
635 */
636 if (!list_empty(&tp->t_dfops)) {
637 dfp = list_last_entry(&tp->t_dfops,
638 struct xfs_defer_pending, dfp_list);
639 ops = defer_op_types[dfp->dfp_type];
640 if (dfp->dfp_type != type ||
641 (ops->max_items && dfp->dfp_count >= ops->max_items))
642 dfp = NULL;
643 }
644 if (!dfp) {
645 dfp = kmem_cache_zalloc(xfs_defer_pending_cache,
646 GFP_NOFS | __GFP_NOFAIL);
647 dfp->dfp_type = type;
648 dfp->dfp_intent = NULL;
649 dfp->dfp_done = NULL;
650 dfp->dfp_count = 0;
651 INIT_LIST_HEAD(&dfp->dfp_work);
652 list_add_tail(&dfp->dfp_list, &tp->t_dfops);
653 }
654
655 list_add_tail(li, &dfp->dfp_work);
656 dfp->dfp_count++;
657}
658
659/*
660 * Move deferred ops from one transaction to another and reset the source to
661 * initial state. This is primarily used to carry state forward across
662 * transaction rolls with pending dfops.
663 */
664void
665xfs_defer_move(
666 struct xfs_trans *dtp,
667 struct xfs_trans *stp)
668{
669 list_splice_init(&stp->t_dfops, &dtp->t_dfops);
670
671 /*
672 * Low free space mode was historically controlled by a dfops field.
673 * This meant that low mode state potentially carried across multiple
674 * transaction rolls. Transfer low mode on a dfops move to preserve
675 * that behavior.
676 */
677 dtp->t_flags |= (stp->t_flags & XFS_TRANS_LOWMODE);
678 stp->t_flags &= ~XFS_TRANS_LOWMODE;
679}
680
681/*
682 * Prepare a chain of fresh deferred ops work items to be completed later. Log
683 * recovery requires the ability to put off until later the actual finishing
684 * work so that it can process unfinished items recovered from the log in
685 * correct order.
686 *
687 * Create and log intent items for all the work that we're capturing so that we
688 * can be assured that the items will get replayed if the system goes down
689 * before log recovery gets a chance to finish the work it put off. The entire
690 * deferred ops state is transferred to the capture structure and the
691 * transaction is then ready for the caller to commit it. If there are no
692 * intent items to capture, this function returns NULL.
693 *
694 * If capture_ip is not NULL, the capture structure will obtain an extra
695 * reference to the inode.
696 */
697static struct xfs_defer_capture *
698xfs_defer_ops_capture(
699 struct xfs_trans *tp)
700{
701 struct xfs_defer_capture *dfc;
702 unsigned short i;
703 int error;
704
705 if (list_empty(&tp->t_dfops))
706 return NULL;
707
708 error = xfs_defer_create_intents(tp);
709 if (error < 0)
710 return ERR_PTR(error);
711
712 /* Create an object to capture the defer ops. */
713 dfc = kmem_zalloc(sizeof(*dfc), KM_NOFS);
714 INIT_LIST_HEAD(&dfc->dfc_list);
715 INIT_LIST_HEAD(&dfc->dfc_dfops);
716
717 /* Move the dfops chain and transaction state to the capture struct. */
718 list_splice_init(&tp->t_dfops, &dfc->dfc_dfops);
719 dfc->dfc_tpflags = tp->t_flags & XFS_TRANS_LOWMODE;
720 tp->t_flags &= ~XFS_TRANS_LOWMODE;
721
722 /* Capture the remaining block reservations along with the dfops. */
723 dfc->dfc_blkres = tp->t_blk_res - tp->t_blk_res_used;
724 dfc->dfc_rtxres = tp->t_rtx_res - tp->t_rtx_res_used;
725
726 /* Preserve the log reservation size. */
727 dfc->dfc_logres = tp->t_log_res;
728
729 error = xfs_defer_save_resources(&dfc->dfc_held, tp);
730 if (error) {
731 /*
732 * Resource capture should never fail, but if it does, we
733 * still have to shut down the log and release things
734 * properly.
735 */
736 xfs_force_shutdown(tp->t_mountp, SHUTDOWN_CORRUPT_INCORE);
737 }
738
739 /*
740 * Grab extra references to the inodes and buffers because callers are
741 * expected to release their held references after we commit the
742 * transaction.
743 */
744 for (i = 0; i < dfc->dfc_held.dr_inos; i++) {
745 ASSERT(xfs_isilocked(dfc->dfc_held.dr_ip[i], XFS_ILOCK_EXCL));
746 ihold(VFS_I(dfc->dfc_held.dr_ip[i]));
747 }
748
749 for (i = 0; i < dfc->dfc_held.dr_bufs; i++)
750 xfs_buf_hold(dfc->dfc_held.dr_bp[i]);
751
752 return dfc;
753}
754
755/* Release all resources that we used to capture deferred ops. */
756void
757xfs_defer_ops_capture_free(
758 struct xfs_mount *mp,
759 struct xfs_defer_capture *dfc)
760{
761 unsigned short i;
762
763 xfs_defer_cancel_list(mp, &dfc->dfc_dfops);
764
765 for (i = 0; i < dfc->dfc_held.dr_bufs; i++)
766 xfs_buf_relse(dfc->dfc_held.dr_bp[i]);
767
768 for (i = 0; i < dfc->dfc_held.dr_inos; i++)
769 xfs_irele(dfc->dfc_held.dr_ip[i]);
770
771 kmem_free(dfc);
772}
773
774/*
775 * Capture any deferred ops and commit the transaction. This is the last step
776 * needed to finish a log intent item that we recovered from the log. If any
777 * of the deferred ops operate on an inode, the caller must pass in that inode
778 * so that the reference can be transferred to the capture structure. The
779 * caller must hold ILOCK_EXCL on the inode, and must unlock it before calling
780 * xfs_defer_ops_continue.
781 */
782int
783xfs_defer_ops_capture_and_commit(
784 struct xfs_trans *tp,
785 struct list_head *capture_list)
786{
787 struct xfs_mount *mp = tp->t_mountp;
788 struct xfs_defer_capture *dfc;
789 int error;
790
791 /* If we don't capture anything, commit transaction and exit. */
792 dfc = xfs_defer_ops_capture(tp);
793 if (IS_ERR(dfc)) {
794 xfs_trans_cancel(tp);
795 return PTR_ERR(dfc);
796 }
797 if (!dfc)
798 return xfs_trans_commit(tp);
799
800 /* Commit the transaction and add the capture structure to the list. */
801 error = xfs_trans_commit(tp);
802 if (error) {
803 xfs_defer_ops_capture_free(mp, dfc);
804 return error;
805 }
806
807 list_add_tail(&dfc->dfc_list, capture_list);
808 return 0;
809}
810
811/*
812 * Attach a chain of captured deferred ops to a new transaction and free the
813 * capture structure. If an inode was captured, it will be passed back to the
814 * caller with ILOCK_EXCL held and joined to the transaction with lockflags==0.
815 * The caller now owns the inode reference.
816 */
817void
818xfs_defer_ops_continue(
819 struct xfs_defer_capture *dfc,
820 struct xfs_trans *tp,
821 struct xfs_defer_resources *dres)
822{
823 unsigned int i;
824
825 ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
826 ASSERT(!(tp->t_flags & XFS_TRANS_DIRTY));
827
828 /* Lock the captured resources to the new transaction. */
829 if (dfc->dfc_held.dr_inos == 2)
830 xfs_lock_two_inodes(dfc->dfc_held.dr_ip[0], XFS_ILOCK_EXCL,
831 dfc->dfc_held.dr_ip[1], XFS_ILOCK_EXCL);
832 else if (dfc->dfc_held.dr_inos == 1)
833 xfs_ilock(dfc->dfc_held.dr_ip[0], XFS_ILOCK_EXCL);
834
835 for (i = 0; i < dfc->dfc_held.dr_bufs; i++)
836 xfs_buf_lock(dfc->dfc_held.dr_bp[i]);
837
838 /* Join the captured resources to the new transaction. */
839 xfs_defer_restore_resources(tp, &dfc->dfc_held);
840 memcpy(dres, &dfc->dfc_held, sizeof(struct xfs_defer_resources));
841 dres->dr_bufs = 0;
842
843 /* Move captured dfops chain and state to the transaction. */
844 list_splice_init(&dfc->dfc_dfops, &tp->t_dfops);
845 tp->t_flags |= dfc->dfc_tpflags;
846
847 kmem_free(dfc);
848}
849
850/* Release the resources captured and continued during recovery. */
851void
852xfs_defer_resources_rele(
853 struct xfs_defer_resources *dres)
854{
855 unsigned short i;
856
857 for (i = 0; i < dres->dr_inos; i++) {
858 xfs_iunlock(dres->dr_ip[i], XFS_ILOCK_EXCL);
859 xfs_irele(dres->dr_ip[i]);
860 dres->dr_ip[i] = NULL;
861 }
862
863 for (i = 0; i < dres->dr_bufs; i++) {
864 xfs_buf_relse(dres->dr_bp[i]);
865 dres->dr_bp[i] = NULL;
866 }
867
868 dres->dr_inos = 0;
869 dres->dr_bufs = 0;
870 dres->dr_ordered = 0;
871}
872
873static inline int __init
874xfs_defer_init_cache(void)
875{
876 xfs_defer_pending_cache = kmem_cache_create("xfs_defer_pending",
877 sizeof(struct xfs_defer_pending),
878 0, 0, NULL);
879
880 return xfs_defer_pending_cache != NULL ? 0 : -ENOMEM;
881}
882
883static inline void
884xfs_defer_destroy_cache(void)
885{
886 kmem_cache_destroy(xfs_defer_pending_cache);
887 xfs_defer_pending_cache = NULL;
888}
889
890/* Set up caches for deferred work items. */
891int __init
892xfs_defer_init_item_caches(void)
893{
894 int error;
895
896 error = xfs_defer_init_cache();
897 if (error)
898 return error;
899 error = xfs_rmap_intent_init_cache();
900 if (error)
901 goto err;
902 error = xfs_refcount_intent_init_cache();
903 if (error)
904 goto err;
905 error = xfs_bmap_intent_init_cache();
906 if (error)
907 goto err;
908 error = xfs_extfree_intent_init_cache();
909 if (error)
910 goto err;
911 error = xfs_attr_intent_init_cache();
912 if (error)
913 goto err;
914 return 0;
915err:
916 xfs_defer_destroy_item_caches();
917 return error;
918}
919
920/* Destroy all the deferred work item caches, if they've been allocated. */
921void
922xfs_defer_destroy_item_caches(void)
923{
924 xfs_attr_intent_destroy_cache();
925 xfs_extfree_intent_destroy_cache();
926 xfs_bmap_intent_destroy_cache();
927 xfs_refcount_intent_destroy_cache();
928 xfs_rmap_intent_destroy_cache();
929 xfs_defer_destroy_cache();
930}