[XFS] Move AIL pushing into it's own thread

When many hundreds to thousands of threads all try to do simultaneous
transactions and the log is in a tail-pushing situation (i.e. full), we
can get multiple threads walking the AIL list and contending on the AIL
lock.

The AIL push is, in effect, a simple I/O dispatch algorithm complicated by
the ordering constraints placed on it by the transaction subsystem. It
really does not need multiple threads to push on it - even when only a
single CPU is pushing the AIL, it can push the I/O out far faster that
pretty much any disk subsystem can handle.

So, to avoid contention problems stemming from multiple list walkers, move
the list walk off into another thread and simply provide a "target" to
push to. When a thread requires a push, it sets the target and wakes the
push thread, then goes to sleep waiting for the required amount of space
to become available in the log.

This mechanism should also be a lot fairer under heavy load as the waiters
will queue in arrival order, rather than queuing in "who completed a push
first" order.

Also, by moving the pushing to a separate thread we can do more
effectively overload detection and prevention as we can keep context from
loop iteration to loop iteration. That is, we can push only part of the
list each loop and not have to loop back to the start of the list every
time we run. This should also help by reducing the number of items we try
to lock and/or push items that we cannot move.

Note that this patch is not intended to solve the inefficiencies in the
AIL structure and the associated issues with extremely large list
contents. That needs to be addresses separately; parallel access would
cause problems to any new structure as well, so I'm only aiming to isolate
the structure from unbounded parallelism here.

SGI-PV: 972759
SGI-Modid: xfs-linux-melb:xfs-kern:30371a

Signed-off-by: David Chinner <dgc@sgi.com>
Signed-off-by: Lachlan McIlroy <lachlan@sgi.com>

authored by David Chinner and committed by Lachlan McIlroy 249a8c11 4576758d

+282 -108
+59
fs/xfs/linux-2.6/xfs_super.c
··· 51 51 #include "xfs_vfsops.h" 52 52 #include "xfs_version.h" 53 53 #include "xfs_log_priv.h" 54 + #include "xfs_trans_priv.h" 54 55 55 56 #include <linux/namei.h> 56 57 #include <linux/init.h> ··· 765 764 { 766 765 blkdev_issue_flush(buftarg->bt_bdev, NULL); 767 766 } 767 + 768 + /* 769 + * XFS AIL push thread support 770 + */ 771 + void 772 + xfsaild_wakeup( 773 + xfs_mount_t *mp, 774 + xfs_lsn_t threshold_lsn) 775 + { 776 + mp->m_ail.xa_target = threshold_lsn; 777 + wake_up_process(mp->m_ail.xa_task); 778 + } 779 + 780 + int 781 + xfsaild( 782 + void *data) 783 + { 784 + xfs_mount_t *mp = (xfs_mount_t *)data; 785 + xfs_lsn_t last_pushed_lsn = 0; 786 + long tout = 0; 787 + 788 + while (!kthread_should_stop()) { 789 + if (tout) 790 + schedule_timeout_interruptible(msecs_to_jiffies(tout)); 791 + tout = 1000; 792 + 793 + /* swsusp */ 794 + try_to_freeze(); 795 + 796 + ASSERT(mp->m_log); 797 + if (XFS_FORCED_SHUTDOWN(mp)) 798 + continue; 799 + 800 + tout = xfsaild_push(mp, &last_pushed_lsn); 801 + } 802 + 803 + return 0; 804 + } /* xfsaild */ 805 + 806 + int 807 + xfsaild_start( 808 + xfs_mount_t *mp) 809 + { 810 + mp->m_ail.xa_target = 0; 811 + mp->m_ail.xa_task = kthread_run(xfsaild, mp, "xfsaild"); 812 + if (IS_ERR(mp->m_ail.xa_task)) 813 + return -PTR_ERR(mp->m_ail.xa_task); 814 + return 0; 815 + } 816 + 817 + void 818 + xfsaild_stop( 819 + xfs_mount_t *mp) 820 + { 821 + kthread_stop(mp->m_ail.xa_task); 822 + } 823 + 824 + 768 825 769 826 STATIC struct inode * 770 827 xfs_fs_alloc_inode(
+26 -7
fs/xfs/xfs_log.c
··· 498 498 * Return error or zero. 499 499 */ 500 500 int 501 - xfs_log_mount(xfs_mount_t *mp, 502 - xfs_buftarg_t *log_target, 503 - xfs_daddr_t blk_offset, 504 - int num_bblks) 501 + xfs_log_mount( 502 + xfs_mount_t *mp, 503 + xfs_buftarg_t *log_target, 504 + xfs_daddr_t blk_offset, 505 + int num_bblks) 505 506 { 507 + int error; 508 + 506 509 if (!(mp->m_flags & XFS_MOUNT_NORECOVERY)) 507 510 cmn_err(CE_NOTE, "XFS mounting filesystem %s", mp->m_fsname); 508 511 else { ··· 518 515 mp->m_log = xlog_alloc_log(mp, log_target, blk_offset, num_bblks); 519 516 520 517 /* 518 + * Initialize the AIL now we have a log. 519 + */ 520 + spin_lock_init(&mp->m_ail_lock); 521 + error = xfs_trans_ail_init(mp); 522 + if (error) { 523 + cmn_err(CE_WARN, "XFS: AIL initialisation failed: error %d", error); 524 + goto error; 525 + } 526 + 527 + /* 521 528 * skip log recovery on a norecovery mount. pretend it all 522 529 * just worked. 523 530 */ 524 531 if (!(mp->m_flags & XFS_MOUNT_NORECOVERY)) { 525 - int error, readonly = (mp->m_flags & XFS_MOUNT_RDONLY); 532 + int readonly = (mp->m_flags & XFS_MOUNT_RDONLY); 526 533 527 534 if (readonly) 528 535 mp->m_flags &= ~XFS_MOUNT_RDONLY; ··· 543 530 mp->m_flags |= XFS_MOUNT_RDONLY; 544 531 if (error) { 545 532 cmn_err(CE_WARN, "XFS: log mount/recovery failed: error %d", error); 546 - xlog_dealloc_log(mp->m_log); 547 - return error; 533 + goto error; 548 534 } 549 535 } 550 536 ··· 552 540 553 541 /* End mounting message in xfs_log_mount_finish */ 554 542 return 0; 543 + error: 544 + xfs_log_unmount_dealloc(mp); 545 + return error; 555 546 } /* xfs_log_mount */ 556 547 557 548 /* ··· 737 722 738 723 /* 739 724 * Deallocate log structures for unmount/relocation. 725 + * 726 + * We need to stop the aild from running before we destroy 727 + * and deallocate the log as the aild references the log. 740 728 */ 741 729 void 742 730 xfs_log_unmount_dealloc(xfs_mount_t *mp) 743 731 { 732 + xfs_trans_ail_destroy(mp); 744 733 xlog_dealloc_log(mp->m_log); 745 734 } 746 735
-6
fs/xfs/xfs_mount.c
··· 136 136 mp->m_flags |= XFS_MOUNT_NO_PERCPU_SB; 137 137 } 138 138 139 - spin_lock_init(&mp->m_ail_lock); 140 139 spin_lock_init(&mp->m_sb_lock); 141 140 mutex_init(&mp->m_ilock); 142 141 mutex_init(&mp->m_growlock); 143 - /* 144 - * Initialize the AIL. 145 - */ 146 - xfs_trans_ail_init(mp); 147 - 148 142 atomic_set(&mp->m_active_trans, 0); 149 143 150 144 return mp;
+8 -2
fs/xfs/xfs_mount.h
··· 219 219 #define xfs_icsb_sync_counters_flags(mp, flags) do { } while (0) 220 220 #endif 221 221 222 + typedef struct xfs_ail { 223 + xfs_ail_entry_t xa_ail; 224 + uint xa_gen; 225 + struct task_struct *xa_task; 226 + xfs_lsn_t xa_target; 227 + } xfs_ail_t; 228 + 222 229 typedef struct xfs_mount { 223 230 struct super_block *m_super; 224 231 xfs_tid_t m_tid; /* next unused tid for fs */ 225 232 spinlock_t m_ail_lock; /* fs AIL mutex */ 226 - xfs_ail_entry_t m_ail; /* fs active log item list */ 227 - uint m_ail_gen; /* fs AIL generation count */ 233 + xfs_ail_t m_ail; /* fs active log item list */ 228 234 xfs_sb_t m_sb; /* copy of fs superblock */ 229 235 spinlock_t m_sb_lock; /* sb counter lock */ 230 236 struct xfs_buf *m_sb_bp; /* buffer for superblock */
+3 -2
fs/xfs/xfs_trans.h
··· 992 992 int *); 993 993 #define xfs_trans_commit(tp, flags) _xfs_trans_commit(tp, flags, NULL) 994 994 void xfs_trans_cancel(xfs_trans_t *, int); 995 - void xfs_trans_ail_init(struct xfs_mount *); 996 - xfs_lsn_t xfs_trans_push_ail(struct xfs_mount *, xfs_lsn_t); 995 + int xfs_trans_ail_init(struct xfs_mount *); 996 + void xfs_trans_ail_destroy(struct xfs_mount *); 997 + void xfs_trans_push_ail(struct xfs_mount *, xfs_lsn_t); 997 998 xfs_lsn_t xfs_trans_tail_ail(struct xfs_mount *); 998 999 void xfs_trans_unlocked_item(struct xfs_mount *, 999 1000 xfs_log_item_t *);
+178 -91
fs/xfs/xfs_trans_ail.c
··· 57 57 xfs_log_item_t *lip; 58 58 59 59 spin_lock(&mp->m_ail_lock); 60 - lip = xfs_ail_min(&(mp->m_ail)); 60 + lip = xfs_ail_min(&(mp->m_ail.xa_ail)); 61 61 if (lip == NULL) { 62 62 lsn = (xfs_lsn_t)0; 63 63 } else { ··· 71 71 /* 72 72 * xfs_trans_push_ail 73 73 * 74 - * This routine is called to move the tail of the AIL 75 - * forward. It does this by trying to flush items in the AIL 76 - * whose lsns are below the given threshold_lsn. 74 + * This routine is called to move the tail of the AIL forward. It does this by 75 + * trying to flush items in the AIL whose lsns are below the given 76 + * threshold_lsn. 77 77 * 78 - * The routine returns the lsn of the tail of the log. 78 + * the push is run asynchronously in a separate thread, so we return the tail 79 + * of the log right now instead of the tail after the push. This means we will 80 + * either continue right away, or we will sleep waiting on the async thread to 81 + * do it's work. 82 + * 83 + * We do this unlocked - we only need to know whether there is anything in the 84 + * AIL at the time we are called. We don't need to access the contents of 85 + * any of the objects, so the lock is not needed. 79 86 */ 80 - xfs_lsn_t 87 + void 81 88 xfs_trans_push_ail( 82 89 xfs_mount_t *mp, 83 90 xfs_lsn_t threshold_lsn) 84 91 { 85 - xfs_lsn_t lsn; 86 92 xfs_log_item_t *lip; 87 - int gen; 88 - int restarts; 89 - int lock_result; 90 - int flush_log; 91 93 92 - #define XFS_TRANS_PUSH_AIL_RESTARTS 1000 94 + lip = xfs_ail_min(&mp->m_ail.xa_ail); 95 + if (lip && !XFS_FORCED_SHUTDOWN(mp)) { 96 + if (XFS_LSN_CMP(threshold_lsn, mp->m_ail.xa_target) > 0) 97 + xfsaild_wakeup(mp, threshold_lsn); 98 + } 99 + } 100 + 101 + /* 102 + * Return the item in the AIL with the current lsn. 103 + * Return the current tree generation number for use 104 + * in calls to xfs_trans_next_ail(). 105 + */ 106 + STATIC xfs_log_item_t * 107 + xfs_trans_first_push_ail( 108 + xfs_mount_t *mp, 109 + int *gen, 110 + xfs_lsn_t lsn) 111 + { 112 + xfs_log_item_t *lip; 113 + 114 + lip = xfs_ail_min(&(mp->m_ail.xa_ail)); 115 + *gen = (int)mp->m_ail.xa_gen; 116 + if (lsn == 0) 117 + return lip; 118 + 119 + while (lip && (XFS_LSN_CMP(lip->li_lsn, lsn) < 0)) 120 + lip = lip->li_ail.ail_forw; 121 + 122 + return lip; 123 + } 124 + 125 + /* 126 + * Function that does the work of pushing on the AIL 127 + */ 128 + long 129 + xfsaild_push( 130 + xfs_mount_t *mp, 131 + xfs_lsn_t *last_lsn) 132 + { 133 + long tout = 1000; /* milliseconds */ 134 + xfs_lsn_t last_pushed_lsn = *last_lsn; 135 + xfs_lsn_t target = mp->m_ail.xa_target; 136 + xfs_lsn_t lsn; 137 + xfs_log_item_t *lip; 138 + int gen; 139 + int restarts; 140 + int flush_log, count, stuck; 141 + 142 + #define XFS_TRANS_PUSH_AIL_RESTARTS 10 93 143 94 144 spin_lock(&mp->m_ail_lock); 95 - lip = xfs_trans_first_ail(mp, &gen); 96 - if (lip == NULL || XFS_FORCED_SHUTDOWN(mp)) { 145 + lip = xfs_trans_first_push_ail(mp, &gen, *last_lsn); 146 + if (!lip || XFS_FORCED_SHUTDOWN(mp)) { 97 147 /* 98 - * Just return if the AIL is empty. 148 + * AIL is empty or our push has reached the end. 99 149 */ 100 150 spin_unlock(&mp->m_ail_lock); 101 - return (xfs_lsn_t)0; 151 + last_pushed_lsn = 0; 152 + goto out; 102 153 } 103 154 104 155 XFS_STATS_INC(xs_push_ail); 105 156 106 157 /* 107 158 * While the item we are looking at is below the given threshold 108 - * try to flush it out. Make sure to limit the number of times 109 - * we allow xfs_trans_next_ail() to restart scanning from the 110 - * beginning of the list. We'd like not to stop until we've at least 159 + * try to flush it out. We'd like not to stop until we've at least 111 160 * tried to push on everything in the AIL with an LSN less than 112 - * the given threshold. However, we may give up before that if 113 - * we realize that we've been holding the AIL lock for 'too long', 114 - * blocking interrupts. Currently, too long is < 500us roughly. 161 + * the given threshold. 162 + * 163 + * However, we will stop after a certain number of pushes and wait 164 + * for a reduced timeout to fire before pushing further. This 165 + * prevents use from spinning when we can't do anything or there is 166 + * lots of contention on the AIL lists. 115 167 */ 116 - flush_log = 0; 117 - restarts = 0; 118 - while (((restarts < XFS_TRANS_PUSH_AIL_RESTARTS) && 119 - (XFS_LSN_CMP(lip->li_lsn, threshold_lsn) < 0))) { 168 + tout = 10; 169 + lsn = lip->li_lsn; 170 + flush_log = stuck = count = restarts = 0; 171 + while ((XFS_LSN_CMP(lip->li_lsn, target) < 0)) { 172 + int lock_result; 120 173 /* 121 - * If we can lock the item without sleeping, unlock 122 - * the AIL lock and flush the item. Then re-grab the 123 - * AIL lock so we can look for the next item on the 124 - * AIL. Since we unlock the AIL while we flush the 125 - * item, the next routine may start over again at the 126 - * the beginning of the list if anything has changed. 127 - * That is what the generation count is for. 174 + * If we can lock the item without sleeping, unlock the AIL 175 + * lock and flush the item. Then re-grab the AIL lock so we 176 + * can look for the next item on the AIL. List changes are 177 + * handled by the AIL lookup functions internally 128 178 * 129 - * If we can't lock the item, either its holder will flush 130 - * it or it is already being flushed or it is being relogged. 131 - * In any of these case it is being taken care of and we 132 - * can just skip to the next item in the list. 179 + * If we can't lock the item, either its holder will flush it 180 + * or it is already being flushed or it is being relogged. In 181 + * any of these case it is being taken care of and we can just 182 + * skip to the next item in the list. 133 183 */ 134 184 lock_result = IOP_TRYLOCK(lip); 185 + spin_unlock(&mp->m_ail_lock); 135 186 switch (lock_result) { 136 - case XFS_ITEM_SUCCESS: 137 - spin_unlock(&mp->m_ail_lock); 187 + case XFS_ITEM_SUCCESS: 138 188 XFS_STATS_INC(xs_push_ail_success); 139 189 IOP_PUSH(lip); 140 - spin_lock(&mp->m_ail_lock); 190 + last_pushed_lsn = lsn; 141 191 break; 142 192 143 - case XFS_ITEM_PUSHBUF: 144 - spin_unlock(&mp->m_ail_lock); 193 + case XFS_ITEM_PUSHBUF: 145 194 XFS_STATS_INC(xs_push_ail_pushbuf); 146 - #ifdef XFSRACEDEBUG 147 - delay_for_intr(); 148 - delay(300); 149 - #endif 150 - ASSERT(lip->li_ops->iop_pushbuf); 151 - ASSERT(lip); 152 195 IOP_PUSHBUF(lip); 153 - spin_lock(&mp->m_ail_lock); 196 + last_pushed_lsn = lsn; 154 197 break; 155 198 156 - case XFS_ITEM_PINNED: 199 + case XFS_ITEM_PINNED: 157 200 XFS_STATS_INC(xs_push_ail_pinned); 201 + stuck++; 158 202 flush_log = 1; 159 203 break; 160 204 161 - case XFS_ITEM_LOCKED: 205 + case XFS_ITEM_LOCKED: 162 206 XFS_STATS_INC(xs_push_ail_locked); 207 + last_pushed_lsn = lsn; 208 + stuck++; 163 209 break; 164 210 165 - case XFS_ITEM_FLUSHING: 211 + case XFS_ITEM_FLUSHING: 166 212 XFS_STATS_INC(xs_push_ail_flushing); 213 + last_pushed_lsn = lsn; 214 + stuck++; 167 215 break; 168 216 169 - default: 217 + default: 170 218 ASSERT(0); 171 219 break; 172 220 } 173 221 174 - lip = xfs_trans_next_ail(mp, lip, &gen, &restarts); 175 - if (lip == NULL) { 222 + spin_lock(&mp->m_ail_lock); 223 + /* should we bother continuing? */ 224 + if (XFS_FORCED_SHUTDOWN(mp)) 176 225 break; 177 - } 178 - if (XFS_FORCED_SHUTDOWN(mp)) { 179 - /* 180 - * Just return if we shut down during the last try. 181 - */ 182 - spin_unlock(&mp->m_ail_lock); 183 - return (xfs_lsn_t)0; 184 - } 226 + ASSERT(mp->m_log); 185 227 228 + count++; 229 + 230 + /* 231 + * Are there too many items we can't do anything with? 232 + * If we we are skipping too many items because we can't flush 233 + * them or they are already being flushed, we back off and 234 + * given them time to complete whatever operation is being 235 + * done. i.e. remove pressure from the AIL while we can't make 236 + * progress so traversals don't slow down further inserts and 237 + * removals to/from the AIL. 238 + * 239 + * The value of 100 is an arbitrary magic number based on 240 + * observation. 241 + */ 242 + if (stuck > 100) 243 + break; 244 + 245 + lip = xfs_trans_next_ail(mp, lip, &gen, &restarts); 246 + if (lip == NULL) 247 + break; 248 + if (restarts > XFS_TRANS_PUSH_AIL_RESTARTS) 249 + break; 250 + lsn = lip->li_lsn; 186 251 } 252 + spin_unlock(&mp->m_ail_lock); 187 253 188 254 if (flush_log) { 189 255 /* ··· 257 191 * push out the log so it will become unpinned and 258 192 * move forward in the AIL. 259 193 */ 260 - spin_unlock(&mp->m_ail_lock); 261 194 XFS_STATS_INC(xs_push_ail_flush); 262 195 xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE); 263 - spin_lock(&mp->m_ail_lock); 264 196 } 265 197 266 - lip = xfs_ail_min(&(mp->m_ail)); 267 - if (lip == NULL) { 268 - lsn = (xfs_lsn_t)0; 269 - } else { 270 - lsn = lip->li_lsn; 198 + /* 199 + * We reached the target so wait a bit longer for I/O to complete and 200 + * remove pushed items from the AIL before we start the next scan from 201 + * the start of the AIL. 202 + */ 203 + if ((XFS_LSN_CMP(lsn, target) >= 0)) { 204 + tout += 20; 205 + last_pushed_lsn = 0; 206 + } else if ((restarts > XFS_TRANS_PUSH_AIL_RESTARTS) || 207 + (count && ((stuck * 100) / count > 90))) { 208 + /* 209 + * Either there is a lot of contention on the AIL or we 210 + * are stuck due to operations in progress. "Stuck" in this 211 + * case is defined as >90% of the items we tried to push 212 + * were stuck. 213 + * 214 + * Backoff a bit more to allow some I/O to complete before 215 + * continuing from where we were. 216 + */ 217 + tout += 10; 271 218 } 272 - 273 - spin_unlock(&mp->m_ail_lock); 274 - return lsn; 275 - } /* xfs_trans_push_ail */ 219 + out: 220 + *last_lsn = last_pushed_lsn; 221 + return tout; 222 + } /* xfsaild_push */ 276 223 277 224 278 225 /* ··· 326 247 * the call to xfs_log_move_tail() doesn't do anything if there's 327 248 * not enough free space to wake people up so we're safe calling it. 328 249 */ 329 - min_lip = xfs_ail_min(&mp->m_ail); 250 + min_lip = xfs_ail_min(&mp->m_ail.xa_ail); 330 251 331 252 if (min_lip == lip) 332 253 xfs_log_move_tail(mp, 1); ··· 358 279 xfs_log_item_t *dlip=NULL; 359 280 xfs_log_item_t *mlip; /* ptr to minimum lip */ 360 281 361 - ailp = &(mp->m_ail); 282 + ailp = &(mp->m_ail.xa_ail); 362 283 mlip = xfs_ail_min(ailp); 363 284 364 285 if (lip->li_flags & XFS_LI_IN_AIL) { ··· 371 292 lip->li_lsn = lsn; 372 293 373 294 xfs_ail_insert(ailp, lip); 374 - mp->m_ail_gen++; 295 + mp->m_ail.xa_gen++; 375 296 376 297 if (mlip == dlip) { 377 - mlip = xfs_ail_min(&(mp->m_ail)); 298 + mlip = xfs_ail_min(&(mp->m_ail.xa_ail)); 378 299 spin_unlock(&mp->m_ail_lock); 379 300 xfs_log_move_tail(mp, mlip->li_lsn); 380 301 } else { ··· 409 330 xfs_log_item_t *mlip; 410 331 411 332 if (lip->li_flags & XFS_LI_IN_AIL) { 412 - ailp = &(mp->m_ail); 333 + ailp = &(mp->m_ail.xa_ail); 413 334 mlip = xfs_ail_min(ailp); 414 335 dlip = xfs_ail_delete(ailp, lip); 415 336 ASSERT(dlip == lip); ··· 417 338 418 339 lip->li_flags &= ~XFS_LI_IN_AIL; 419 340 lip->li_lsn = 0; 420 - mp->m_ail_gen++; 341 + mp->m_ail.xa_gen++; 421 342 422 343 if (mlip == dlip) { 423 - mlip = xfs_ail_min(&(mp->m_ail)); 344 + mlip = xfs_ail_min(&(mp->m_ail.xa_ail)); 424 345 spin_unlock(&mp->m_ail_lock); 425 346 xfs_log_move_tail(mp, (mlip ? mlip->li_lsn : 0)); 426 347 } else { ··· 458 379 { 459 380 xfs_log_item_t *lip; 460 381 461 - lip = xfs_ail_min(&(mp->m_ail)); 462 - *gen = (int)mp->m_ail_gen; 382 + lip = xfs_ail_min(&(mp->m_ail.xa_ail)); 383 + *gen = (int)mp->m_ail.xa_gen; 463 384 464 - return (lip); 385 + return lip; 465 386 } 466 387 467 388 /* ··· 481 402 xfs_log_item_t *nlip; 482 403 483 404 ASSERT(mp && lip && gen); 484 - if (mp->m_ail_gen == *gen) { 485 - nlip = xfs_ail_next(&(mp->m_ail), lip); 405 + if (mp->m_ail.xa_gen == *gen) { 406 + nlip = xfs_ail_next(&(mp->m_ail.xa_ail), lip); 486 407 } else { 487 - nlip = xfs_ail_min(&(mp->m_ail)); 488 - *gen = (int)mp->m_ail_gen; 408 + nlip = xfs_ail_min(&(mp->m_ail).xa_ail); 409 + *gen = (int)mp->m_ail.xa_gen; 489 410 if (restarts != NULL) { 490 411 XFS_STATS_INC(xs_push_ail_restarts); 491 412 (*restarts)++; ··· 510 431 /* 511 432 * Initialize the doubly linked list to point only to itself. 512 433 */ 513 - void 434 + int 514 435 xfs_trans_ail_init( 515 436 xfs_mount_t *mp) 516 437 { 517 - mp->m_ail.ail_forw = (xfs_log_item_t*)&(mp->m_ail); 518 - mp->m_ail.ail_back = (xfs_log_item_t*)&(mp->m_ail); 438 + mp->m_ail.xa_ail.ail_forw = (xfs_log_item_t*)&mp->m_ail.xa_ail; 439 + mp->m_ail.xa_ail.ail_back = (xfs_log_item_t*)&mp->m_ail.xa_ail; 440 + return xfsaild_start(mp); 441 + } 442 + 443 + void 444 + xfs_trans_ail_destroy( 445 + xfs_mount_t *mp) 446 + { 447 + xfsaild_stop(mp); 519 448 } 520 449 521 450 /*
+8
fs/xfs/xfs_trans_priv.h
··· 57 57 struct xfs_log_item *, int *, int *); 58 58 59 59 60 + /* 61 + * AIL push thread support 62 + */ 63 + long xfsaild_push(struct xfs_mount *, xfs_lsn_t *); 64 + void xfsaild_wakeup(struct xfs_mount *, xfs_lsn_t); 65 + int xfsaild_start(struct xfs_mount *); 66 + void xfsaild_stop(struct xfs_mount *); 67 + 60 68 #endif /* __XFS_TRANS_PRIV_H__ */