Merge branch 'for-linus' of git://oss.sgi.com/xfs/xfs

* 'for-linus' of git://oss.sgi.com/xfs/xfs:
xfs: Ensure we force all busy extents in range to disk
xfs: Don't flush stale inodes
xfs: fix timestamp handling in xfs_setattr
xfs: use DECLARE_EVENT_CLASS

+674 -633
+2 -1
fs/xfs/linux-2.6/xfs_acl.c
··· 251 251 if (mode != inode->i_mode) { 252 252 struct iattr iattr; 253 253 254 - iattr.ia_valid = ATTR_MODE; 254 + iattr.ia_valid = ATTR_MODE | ATTR_CTIME; 255 255 iattr.ia_mode = mode; 256 + iattr.ia_ctime = current_fs_time(inode->i_sb); 256 257 257 258 error = -xfs_setattr(XFS_I(inode), &iattr, XFS_ATTR_NOACL); 258 259 }
+605 -552
fs/xfs/linux-2.6/xfs_trace.h
··· 33 33 struct xlog_ticket; 34 34 struct log; 35 35 36 - #define DEFINE_ATTR_LIST_EVENT(name) \ 37 - TRACE_EVENT(name, \ 38 - TP_PROTO(struct xfs_attr_list_context *ctx), \ 39 - TP_ARGS(ctx), \ 40 - TP_STRUCT__entry( \ 41 - __field(dev_t, dev) \ 42 - __field(xfs_ino_t, ino) \ 43 - __field(u32, hashval) \ 44 - __field(u32, blkno) \ 45 - __field(u32, offset) \ 46 - __field(void *, alist) \ 47 - __field(int, bufsize) \ 48 - __field(int, count) \ 49 - __field(int, firstu) \ 50 - __field(int, dupcnt) \ 51 - __field(int, flags) \ 52 - ), \ 53 - TP_fast_assign( \ 54 - __entry->dev = VFS_I(ctx->dp)->i_sb->s_dev; \ 55 - __entry->ino = ctx->dp->i_ino; \ 56 - __entry->hashval = ctx->cursor->hashval; \ 57 - __entry->blkno = ctx->cursor->blkno; \ 58 - __entry->offset = ctx->cursor->offset; \ 59 - __entry->alist = ctx->alist; \ 60 - __entry->bufsize = ctx->bufsize; \ 61 - __entry->count = ctx->count; \ 62 - __entry->firstu = ctx->firstu; \ 63 - __entry->flags = ctx->flags; \ 64 - ), \ 65 - TP_printk("dev %d:%d ino 0x%llx cursor h/b/o 0x%x/0x%x/%u dupcnt %u " \ 66 - "alist 0x%p size %u count %u firstu %u flags %d %s", \ 67 - MAJOR(__entry->dev), MINOR(__entry->dev), \ 68 - __entry->ino, \ 69 - __entry->hashval, \ 70 - __entry->blkno, \ 71 - __entry->offset, \ 72 - __entry->dupcnt, \ 73 - __entry->alist, \ 74 - __entry->bufsize, \ 75 - __entry->count, \ 76 - __entry->firstu, \ 77 - __entry->flags, \ 78 - __print_flags(__entry->flags, "|", XFS_ATTR_FLAGS) \ 79 - ) \ 36 + DECLARE_EVENT_CLASS(xfs_attr_list_class, 37 + TP_PROTO(struct xfs_attr_list_context *ctx), 38 + TP_ARGS(ctx), 39 + TP_STRUCT__entry( 40 + __field(dev_t, dev) 41 + __field(xfs_ino_t, ino) 42 + __field(u32, hashval) 43 + __field(u32, blkno) 44 + __field(u32, offset) 45 + __field(void *, alist) 46 + __field(int, bufsize) 47 + __field(int, count) 48 + __field(int, firstu) 49 + __field(int, dupcnt) 50 + __field(int, flags) 51 + ), 52 + TP_fast_assign( 53 + __entry->dev = VFS_I(ctx->dp)->i_sb->s_dev; 54 + __entry->ino = ctx->dp->i_ino; 55 + __entry->hashval = ctx->cursor->hashval; 56 + __entry->blkno = ctx->cursor->blkno; 57 + __entry->offset = ctx->cursor->offset; 58 + __entry->alist = ctx->alist; 59 + __entry->bufsize = ctx->bufsize; 60 + __entry->count = ctx->count; 61 + __entry->firstu = ctx->firstu; 62 + __entry->flags = ctx->flags; 63 + ), 64 + TP_printk("dev %d:%d ino 0x%llx cursor h/b/o 0x%x/0x%x/%u dupcnt %u " 65 + "alist 0x%p size %u count %u firstu %u flags %d %s", 66 + MAJOR(__entry->dev), MINOR(__entry->dev), 67 + __entry->ino, 68 + __entry->hashval, 69 + __entry->blkno, 70 + __entry->offset, 71 + __entry->dupcnt, 72 + __entry->alist, 73 + __entry->bufsize, 74 + __entry->count, 75 + __entry->firstu, 76 + __entry->flags, 77 + __print_flags(__entry->flags, "|", XFS_ATTR_FLAGS) 78 + ) 80 79 ) 80 + 81 + #define DEFINE_ATTR_LIST_EVENT(name) \ 82 + DEFINE_EVENT(xfs_attr_list_class, name, \ 83 + TP_PROTO(struct xfs_attr_list_context *ctx), \ 84 + TP_ARGS(ctx)) 81 85 DEFINE_ATTR_LIST_EVENT(xfs_attr_list_sf); 82 86 DEFINE_ATTR_LIST_EVENT(xfs_attr_list_sf_all); 83 87 DEFINE_ATTR_LIST_EVENT(xfs_attr_list_leaf); ··· 182 178 (char *)__entry->caller_ip) 183 179 ); 184 180 185 - #define DEFINE_BMAP_EVENT(name) \ 186 - TRACE_EVENT(name, \ 187 - TP_PROTO(struct xfs_inode *ip, xfs_extnum_t idx, int state, \ 188 - unsigned long caller_ip), \ 189 - TP_ARGS(ip, idx, state, caller_ip), \ 190 - TP_STRUCT__entry( \ 191 - __field(dev_t, dev) \ 192 - __field(xfs_ino_t, ino) \ 193 - __field(xfs_extnum_t, idx) \ 194 - __field(xfs_fileoff_t, startoff) \ 195 - __field(xfs_fsblock_t, startblock) \ 196 - __field(xfs_filblks_t, blockcount) \ 197 - __field(xfs_exntst_t, state) \ 198 - __field(int, bmap_state) \ 199 - __field(unsigned long, caller_ip) \ 200 - ), \ 201 - TP_fast_assign( \ 202 - struct xfs_ifork *ifp = (state & BMAP_ATTRFORK) ? \ 203 - ip->i_afp : &ip->i_df; \ 204 - struct xfs_bmbt_irec r; \ 205 - \ 206 - xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx), &r); \ 207 - __entry->dev = VFS_I(ip)->i_sb->s_dev; \ 208 - __entry->ino = ip->i_ino; \ 209 - __entry->idx = idx; \ 210 - __entry->startoff = r.br_startoff; \ 211 - __entry->startblock = r.br_startblock; \ 212 - __entry->blockcount = r.br_blockcount; \ 213 - __entry->state = r.br_state; \ 214 - __entry->bmap_state = state; \ 215 - __entry->caller_ip = caller_ip; \ 216 - ), \ 217 - TP_printk("dev %d:%d ino 0x%llx state %s idx %ld " \ 218 - "offset %lld block %s count %lld flag %d caller %pf", \ 219 - MAJOR(__entry->dev), MINOR(__entry->dev), \ 220 - __entry->ino, \ 221 - __print_flags(__entry->bmap_state, "|", XFS_BMAP_EXT_FLAGS), \ 222 - (long)__entry->idx, \ 223 - __entry->startoff, \ 224 - xfs_fmtfsblock(__entry->startblock), \ 225 - __entry->blockcount, \ 226 - __entry->state, \ 227 - (char *)__entry->caller_ip) \ 181 + DECLARE_EVENT_CLASS(xfs_bmap_class, 182 + TP_PROTO(struct xfs_inode *ip, xfs_extnum_t idx, int state, 183 + unsigned long caller_ip), 184 + TP_ARGS(ip, idx, state, caller_ip), 185 + TP_STRUCT__entry( 186 + __field(dev_t, dev) 187 + __field(xfs_ino_t, ino) 188 + __field(xfs_extnum_t, idx) 189 + __field(xfs_fileoff_t, startoff) 190 + __field(xfs_fsblock_t, startblock) 191 + __field(xfs_filblks_t, blockcount) 192 + __field(xfs_exntst_t, state) 193 + __field(int, bmap_state) 194 + __field(unsigned long, caller_ip) 195 + ), 196 + TP_fast_assign( 197 + struct xfs_ifork *ifp = (state & BMAP_ATTRFORK) ? 198 + ip->i_afp : &ip->i_df; 199 + struct xfs_bmbt_irec r; 200 + 201 + xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx), &r); 202 + __entry->dev = VFS_I(ip)->i_sb->s_dev; 203 + __entry->ino = ip->i_ino; 204 + __entry->idx = idx; 205 + __entry->startoff = r.br_startoff; 206 + __entry->startblock = r.br_startblock; 207 + __entry->blockcount = r.br_blockcount; 208 + __entry->state = r.br_state; 209 + __entry->bmap_state = state; 210 + __entry->caller_ip = caller_ip; 211 + ), 212 + TP_printk("dev %d:%d ino 0x%llx state %s idx %ld " 213 + "offset %lld block %s count %lld flag %d caller %pf", 214 + MAJOR(__entry->dev), MINOR(__entry->dev), 215 + __entry->ino, 216 + __print_flags(__entry->bmap_state, "|", XFS_BMAP_EXT_FLAGS), 217 + (long)__entry->idx, 218 + __entry->startoff, 219 + xfs_fmtfsblock(__entry->startblock), 220 + __entry->blockcount, 221 + __entry->state, 222 + (char *)__entry->caller_ip) 228 223 ) 229 224 225 + #define DEFINE_BMAP_EVENT(name) \ 226 + DEFINE_EVENT(xfs_bmap_class, name, \ 227 + TP_PROTO(struct xfs_inode *ip, xfs_extnum_t idx, int state, \ 228 + unsigned long caller_ip), \ 229 + TP_ARGS(ip, idx, state, caller_ip)) 230 230 DEFINE_BMAP_EVENT(xfs_iext_remove); 231 231 DEFINE_BMAP_EVENT(xfs_bmap_pre_update); 232 232 DEFINE_BMAP_EVENT(xfs_bmap_post_update); 233 233 DEFINE_BMAP_EVENT(xfs_extlist); 234 234 235 - #define DEFINE_BUF_EVENT(tname) \ 236 - TRACE_EVENT(tname, \ 237 - TP_PROTO(struct xfs_buf *bp, unsigned long caller_ip), \ 238 - TP_ARGS(bp, caller_ip), \ 239 - TP_STRUCT__entry( \ 240 - __field(dev_t, dev) \ 241 - __field(xfs_daddr_t, bno) \ 242 - __field(size_t, buffer_length) \ 243 - __field(int, hold) \ 244 - __field(int, pincount) \ 245 - __field(unsigned, lockval) \ 246 - __field(unsigned, flags) \ 247 - __field(unsigned long, caller_ip) \ 248 - ), \ 249 - TP_fast_assign( \ 250 - __entry->dev = bp->b_target->bt_dev; \ 251 - __entry->bno = bp->b_bn; \ 252 - __entry->buffer_length = bp->b_buffer_length; \ 253 - __entry->hold = atomic_read(&bp->b_hold); \ 254 - __entry->pincount = atomic_read(&bp->b_pin_count); \ 255 - __entry->lockval = xfs_buf_lock_value(bp); \ 256 - __entry->flags = bp->b_flags; \ 257 - __entry->caller_ip = caller_ip; \ 258 - ), \ 259 - TP_printk("dev %d:%d bno 0x%llx len 0x%zx hold %d pincount %d " \ 260 - "lock %d flags %s caller %pf", \ 261 - MAJOR(__entry->dev), MINOR(__entry->dev), \ 262 - (unsigned long long)__entry->bno, \ 263 - __entry->buffer_length, \ 264 - __entry->hold, \ 265 - __entry->pincount, \ 266 - __entry->lockval, \ 267 - __print_flags(__entry->flags, "|", XFS_BUF_FLAGS), \ 268 - (void *)__entry->caller_ip) \ 235 + DECLARE_EVENT_CLASS(xfs_buf_class, 236 + TP_PROTO(struct xfs_buf *bp, unsigned long caller_ip), 237 + TP_ARGS(bp, caller_ip), 238 + TP_STRUCT__entry( 239 + __field(dev_t, dev) 240 + __field(xfs_daddr_t, bno) 241 + __field(size_t, buffer_length) 242 + __field(int, hold) 243 + __field(int, pincount) 244 + __field(unsigned, lockval) 245 + __field(unsigned, flags) 246 + __field(unsigned long, caller_ip) 247 + ), 248 + TP_fast_assign( 249 + __entry->dev = bp->b_target->bt_dev; 250 + __entry->bno = bp->b_bn; 251 + __entry->buffer_length = bp->b_buffer_length; 252 + __entry->hold = atomic_read(&bp->b_hold); 253 + __entry->pincount = atomic_read(&bp->b_pin_count); 254 + __entry->lockval = xfs_buf_lock_value(bp); 255 + __entry->flags = bp->b_flags; 256 + __entry->caller_ip = caller_ip; 257 + ), 258 + TP_printk("dev %d:%d bno 0x%llx len 0x%zx hold %d pincount %d " 259 + "lock %d flags %s caller %pf", 260 + MAJOR(__entry->dev), MINOR(__entry->dev), 261 + (unsigned long long)__entry->bno, 262 + __entry->buffer_length, 263 + __entry->hold, 264 + __entry->pincount, 265 + __entry->lockval, 266 + __print_flags(__entry->flags, "|", XFS_BUF_FLAGS), 267 + (void *)__entry->caller_ip) 269 268 ) 269 + 270 + #define DEFINE_BUF_EVENT(name) \ 271 + DEFINE_EVENT(xfs_buf_class, name, \ 272 + TP_PROTO(struct xfs_buf *bp, unsigned long caller_ip), \ 273 + TP_ARGS(bp, caller_ip)) 270 274 DEFINE_BUF_EVENT(xfs_buf_init); 271 275 DEFINE_BUF_EVENT(xfs_buf_free); 272 276 DEFINE_BUF_EVENT(xfs_buf_hold); ··· 311 299 DEFINE_BUF_EVENT(xfs_inode_item_push); 312 300 313 301 /* pass flags explicitly */ 314 - #define DEFINE_BUF_FLAGS_EVENT(tname) \ 315 - TRACE_EVENT(tname, \ 316 - TP_PROTO(struct xfs_buf *bp, unsigned flags, unsigned long caller_ip), \ 317 - TP_ARGS(bp, flags, caller_ip), \ 318 - TP_STRUCT__entry( \ 319 - __field(dev_t, dev) \ 320 - __field(xfs_daddr_t, bno) \ 321 - __field(size_t, buffer_length) \ 322 - __field(int, hold) \ 323 - __field(int, pincount) \ 324 - __field(unsigned, lockval) \ 325 - __field(unsigned, flags) \ 326 - __field(unsigned long, caller_ip) \ 327 - ), \ 328 - TP_fast_assign( \ 329 - __entry->dev = bp->b_target->bt_dev; \ 330 - __entry->bno = bp->b_bn; \ 331 - __entry->buffer_length = bp->b_buffer_length; \ 332 - __entry->flags = flags; \ 333 - __entry->hold = atomic_read(&bp->b_hold); \ 334 - __entry->pincount = atomic_read(&bp->b_pin_count); \ 335 - __entry->lockval = xfs_buf_lock_value(bp); \ 336 - __entry->caller_ip = caller_ip; \ 337 - ), \ 338 - TP_printk("dev %d:%d bno 0x%llx len 0x%zx hold %d pincount %d " \ 339 - "lock %d flags %s caller %pf", \ 340 - MAJOR(__entry->dev), MINOR(__entry->dev), \ 341 - (unsigned long long)__entry->bno, \ 342 - __entry->buffer_length, \ 343 - __entry->hold, \ 344 - __entry->pincount, \ 345 - __entry->lockval, \ 346 - __print_flags(__entry->flags, "|", XFS_BUF_FLAGS), \ 347 - (void *)__entry->caller_ip) \ 302 + DECLARE_EVENT_CLASS(xfs_buf_flags_class, 303 + TP_PROTO(struct xfs_buf *bp, unsigned flags, unsigned long caller_ip), 304 + TP_ARGS(bp, flags, caller_ip), 305 + TP_STRUCT__entry( 306 + __field(dev_t, dev) 307 + __field(xfs_daddr_t, bno) 308 + __field(size_t, buffer_length) 309 + __field(int, hold) 310 + __field(int, pincount) 311 + __field(unsigned, lockval) 312 + __field(unsigned, flags) 313 + __field(unsigned long, caller_ip) 314 + ), 315 + TP_fast_assign( 316 + __entry->dev = bp->b_target->bt_dev; 317 + __entry->bno = bp->b_bn; 318 + __entry->buffer_length = bp->b_buffer_length; 319 + __entry->flags = flags; 320 + __entry->hold = atomic_read(&bp->b_hold); 321 + __entry->pincount = atomic_read(&bp->b_pin_count); 322 + __entry->lockval = xfs_buf_lock_value(bp); 323 + __entry->caller_ip = caller_ip; 324 + ), 325 + TP_printk("dev %d:%d bno 0x%llx len 0x%zx hold %d pincount %d " 326 + "lock %d flags %s caller %pf", 327 + MAJOR(__entry->dev), MINOR(__entry->dev), 328 + (unsigned long long)__entry->bno, 329 + __entry->buffer_length, 330 + __entry->hold, 331 + __entry->pincount, 332 + __entry->lockval, 333 + __print_flags(__entry->flags, "|", XFS_BUF_FLAGS), 334 + (void *)__entry->caller_ip) 348 335 ) 336 + 337 + #define DEFINE_BUF_FLAGS_EVENT(name) \ 338 + DEFINE_EVENT(xfs_buf_flags_class, name, \ 339 + TP_PROTO(struct xfs_buf *bp, unsigned flags, unsigned long caller_ip), \ 340 + TP_ARGS(bp, flags, caller_ip)) 349 341 DEFINE_BUF_FLAGS_EVENT(xfs_buf_find); 350 342 DEFINE_BUF_FLAGS_EVENT(xfs_buf_get); 351 343 DEFINE_BUF_FLAGS_EVENT(xfs_buf_read); ··· 392 376 (void *)__entry->caller_ip) 393 377 ); 394 378 395 - #define DEFINE_BUF_ITEM_EVENT(tname) \ 396 - TRACE_EVENT(tname, \ 397 - TP_PROTO(struct xfs_buf_log_item *bip), \ 398 - TP_ARGS(bip), \ 399 - TP_STRUCT__entry( \ 400 - __field(dev_t, dev) \ 401 - __field(xfs_daddr_t, buf_bno) \ 402 - __field(size_t, buf_len) \ 403 - __field(int, buf_hold) \ 404 - __field(int, buf_pincount) \ 405 - __field(int, buf_lockval) \ 406 - __field(unsigned, buf_flags) \ 407 - __field(unsigned, bli_recur) \ 408 - __field(int, bli_refcount) \ 409 - __field(unsigned, bli_flags) \ 410 - __field(void *, li_desc) \ 411 - __field(unsigned, li_flags) \ 412 - ), \ 413 - TP_fast_assign( \ 414 - __entry->dev = bip->bli_buf->b_target->bt_dev; \ 415 - __entry->bli_flags = bip->bli_flags; \ 416 - __entry->bli_recur = bip->bli_recur; \ 417 - __entry->bli_refcount = atomic_read(&bip->bli_refcount); \ 418 - __entry->buf_bno = bip->bli_buf->b_bn; \ 419 - __entry->buf_len = bip->bli_buf->b_buffer_length; \ 420 - __entry->buf_flags = bip->bli_buf->b_flags; \ 421 - __entry->buf_hold = atomic_read(&bip->bli_buf->b_hold); \ 422 - __entry->buf_pincount = \ 423 - atomic_read(&bip->bli_buf->b_pin_count); \ 424 - __entry->buf_lockval = xfs_buf_lock_value(bip->bli_buf); \ 425 - __entry->li_desc = bip->bli_item.li_desc; \ 426 - __entry->li_flags = bip->bli_item.li_flags; \ 427 - ), \ 428 - TP_printk("dev %d:%d bno 0x%llx len 0x%zx hold %d pincount %d " \ 429 - "lock %d flags %s recur %d refcount %d bliflags %s " \ 430 - "lidesc 0x%p liflags %s", \ 431 - MAJOR(__entry->dev), MINOR(__entry->dev), \ 432 - (unsigned long long)__entry->buf_bno, \ 433 - __entry->buf_len, \ 434 - __entry->buf_hold, \ 435 - __entry->buf_pincount, \ 436 - __entry->buf_lockval, \ 437 - __print_flags(__entry->buf_flags, "|", XFS_BUF_FLAGS), \ 438 - __entry->bli_recur, \ 439 - __entry->bli_refcount, \ 440 - __print_flags(__entry->bli_flags, "|", XFS_BLI_FLAGS), \ 441 - __entry->li_desc, \ 442 - __print_flags(__entry->li_flags, "|", XFS_LI_FLAGS)) \ 379 + DECLARE_EVENT_CLASS(xfs_buf_item_class, 380 + TP_PROTO(struct xfs_buf_log_item *bip), 381 + TP_ARGS(bip), 382 + TP_STRUCT__entry( 383 + __field(dev_t, dev) 384 + __field(xfs_daddr_t, buf_bno) 385 + __field(size_t, buf_len) 386 + __field(int, buf_hold) 387 + __field(int, buf_pincount) 388 + __field(int, buf_lockval) 389 + __field(unsigned, buf_flags) 390 + __field(unsigned, bli_recur) 391 + __field(int, bli_refcount) 392 + __field(unsigned, bli_flags) 393 + __field(void *, li_desc) 394 + __field(unsigned, li_flags) 395 + ), 396 + TP_fast_assign( 397 + __entry->dev = bip->bli_buf->b_target->bt_dev; 398 + __entry->bli_flags = bip->bli_flags; 399 + __entry->bli_recur = bip->bli_recur; 400 + __entry->bli_refcount = atomic_read(&bip->bli_refcount); 401 + __entry->buf_bno = bip->bli_buf->b_bn; 402 + __entry->buf_len = bip->bli_buf->b_buffer_length; 403 + __entry->buf_flags = bip->bli_buf->b_flags; 404 + __entry->buf_hold = atomic_read(&bip->bli_buf->b_hold); 405 + __entry->buf_pincount = atomic_read(&bip->bli_buf->b_pin_count); 406 + __entry->buf_lockval = xfs_buf_lock_value(bip->bli_buf); 407 + __entry->li_desc = bip->bli_item.li_desc; 408 + __entry->li_flags = bip->bli_item.li_flags; 409 + ), 410 + TP_printk("dev %d:%d bno 0x%llx len 0x%zx hold %d pincount %d " 411 + "lock %d flags %s recur %d refcount %d bliflags %s " 412 + "lidesc 0x%p liflags %s", 413 + MAJOR(__entry->dev), MINOR(__entry->dev), 414 + (unsigned long long)__entry->buf_bno, 415 + __entry->buf_len, 416 + __entry->buf_hold, 417 + __entry->buf_pincount, 418 + __entry->buf_lockval, 419 + __print_flags(__entry->buf_flags, "|", XFS_BUF_FLAGS), 420 + __entry->bli_recur, 421 + __entry->bli_refcount, 422 + __print_flags(__entry->bli_flags, "|", XFS_BLI_FLAGS), 423 + __entry->li_desc, 424 + __print_flags(__entry->li_flags, "|", XFS_LI_FLAGS)) 443 425 ) 426 + 427 + #define DEFINE_BUF_ITEM_EVENT(name) \ 428 + DEFINE_EVENT(xfs_buf_item_class, name, \ 429 + TP_PROTO(struct xfs_buf_log_item *bip), \ 430 + TP_ARGS(bip)) 444 431 DEFINE_BUF_ITEM_EVENT(xfs_buf_item_size); 445 432 DEFINE_BUF_ITEM_EVENT(xfs_buf_item_size_stale); 446 433 DEFINE_BUF_ITEM_EVENT(xfs_buf_item_format); ··· 469 450 DEFINE_BUF_ITEM_EVENT(xfs_trans_bhold_release); 470 451 DEFINE_BUF_ITEM_EVENT(xfs_trans_binval); 471 452 472 - #define DEFINE_LOCK_EVENT(name) \ 473 - TRACE_EVENT(name, \ 474 - TP_PROTO(struct xfs_inode *ip, unsigned lock_flags, \ 475 - unsigned long caller_ip), \ 476 - TP_ARGS(ip, lock_flags, caller_ip), \ 477 - TP_STRUCT__entry( \ 478 - __field(dev_t, dev) \ 479 - __field(xfs_ino_t, ino) \ 480 - __field(int, lock_flags) \ 481 - __field(unsigned long, caller_ip) \ 482 - ), \ 483 - TP_fast_assign( \ 484 - __entry->dev = VFS_I(ip)->i_sb->s_dev; \ 485 - __entry->ino = ip->i_ino; \ 486 - __entry->lock_flags = lock_flags; \ 487 - __entry->caller_ip = caller_ip; \ 488 - ), \ 489 - TP_printk("dev %d:%d ino 0x%llx flags %s caller %pf", \ 490 - MAJOR(__entry->dev), MINOR(__entry->dev), \ 491 - __entry->ino, \ 492 - __print_flags(__entry->lock_flags, "|", XFS_LOCK_FLAGS), \ 493 - (void *)__entry->caller_ip) \ 453 + DECLARE_EVENT_CLASS(xfs_lock_class, 454 + TP_PROTO(struct xfs_inode *ip, unsigned lock_flags, 455 + unsigned long caller_ip), 456 + TP_ARGS(ip, lock_flags, caller_ip), 457 + TP_STRUCT__entry( 458 + __field(dev_t, dev) 459 + __field(xfs_ino_t, ino) 460 + __field(int, lock_flags) 461 + __field(unsigned long, caller_ip) 462 + ), 463 + TP_fast_assign( 464 + __entry->dev = VFS_I(ip)->i_sb->s_dev; 465 + __entry->ino = ip->i_ino; 466 + __entry->lock_flags = lock_flags; 467 + __entry->caller_ip = caller_ip; 468 + ), 469 + TP_printk("dev %d:%d ino 0x%llx flags %s caller %pf", 470 + MAJOR(__entry->dev), MINOR(__entry->dev), 471 + __entry->ino, 472 + __print_flags(__entry->lock_flags, "|", XFS_LOCK_FLAGS), 473 + (void *)__entry->caller_ip) 494 474 ) 495 475 476 + #define DEFINE_LOCK_EVENT(name) \ 477 + DEFINE_EVENT(xfs_lock_class, name, \ 478 + TP_PROTO(struct xfs_inode *ip, unsigned lock_flags, \ 479 + unsigned long caller_ip), \ 480 + TP_ARGS(ip, lock_flags, caller_ip)) 496 481 DEFINE_LOCK_EVENT(xfs_ilock); 497 482 DEFINE_LOCK_EVENT(xfs_ilock_nowait); 498 483 DEFINE_LOCK_EVENT(xfs_ilock_demote); 499 484 DEFINE_LOCK_EVENT(xfs_iunlock); 500 485 501 - #define DEFINE_IGET_EVENT(name) \ 502 - TRACE_EVENT(name, \ 503 - TP_PROTO(struct xfs_inode *ip), \ 504 - TP_ARGS(ip), \ 505 - TP_STRUCT__entry( \ 506 - __field(dev_t, dev) \ 507 - __field(xfs_ino_t, ino) \ 508 - ), \ 509 - TP_fast_assign( \ 510 - __entry->dev = VFS_I(ip)->i_sb->s_dev; \ 511 - __entry->ino = ip->i_ino; \ 512 - ), \ 513 - TP_printk("dev %d:%d ino 0x%llx", \ 514 - MAJOR(__entry->dev), MINOR(__entry->dev), \ 515 - __entry->ino) \ 486 + DECLARE_EVENT_CLASS(xfs_iget_class, 487 + TP_PROTO(struct xfs_inode *ip), 488 + TP_ARGS(ip), 489 + TP_STRUCT__entry( 490 + __field(dev_t, dev) 491 + __field(xfs_ino_t, ino) 492 + ), 493 + TP_fast_assign( 494 + __entry->dev = VFS_I(ip)->i_sb->s_dev; 495 + __entry->ino = ip->i_ino; 496 + ), 497 + TP_printk("dev %d:%d ino 0x%llx", 498 + MAJOR(__entry->dev), MINOR(__entry->dev), 499 + __entry->ino) 516 500 ) 501 + 502 + #define DEFINE_IGET_EVENT(name) \ 503 + DEFINE_EVENT(xfs_iget_class, name, \ 504 + TP_PROTO(struct xfs_inode *ip), \ 505 + TP_ARGS(ip)) 517 506 DEFINE_IGET_EVENT(xfs_iget_skip); 518 507 DEFINE_IGET_EVENT(xfs_iget_reclaim); 519 508 DEFINE_IGET_EVENT(xfs_iget_found); 520 509 DEFINE_IGET_EVENT(xfs_iget_alloc); 521 510 522 - #define DEFINE_INODE_EVENT(name) \ 523 - TRACE_EVENT(name, \ 524 - TP_PROTO(struct xfs_inode *ip, unsigned long caller_ip), \ 525 - TP_ARGS(ip, caller_ip), \ 526 - TP_STRUCT__entry( \ 527 - __field(dev_t, dev) \ 528 - __field(xfs_ino_t, ino) \ 529 - __field(int, count) \ 530 - __field(unsigned long, caller_ip) \ 531 - ), \ 532 - TP_fast_assign( \ 533 - __entry->dev = VFS_I(ip)->i_sb->s_dev; \ 534 - __entry->ino = ip->i_ino; \ 535 - __entry->count = atomic_read(&VFS_I(ip)->i_count); \ 536 - __entry->caller_ip = caller_ip; \ 537 - ), \ 538 - TP_printk("dev %d:%d ino 0x%llx count %d caller %pf", \ 539 - MAJOR(__entry->dev), MINOR(__entry->dev), \ 540 - __entry->ino, \ 541 - __entry->count, \ 542 - (char *)__entry->caller_ip) \ 511 + DECLARE_EVENT_CLASS(xfs_inode_class, 512 + TP_PROTO(struct xfs_inode *ip, unsigned long caller_ip), 513 + TP_ARGS(ip, caller_ip), 514 + TP_STRUCT__entry( 515 + __field(dev_t, dev) 516 + __field(xfs_ino_t, ino) 517 + __field(int, count) 518 + __field(unsigned long, caller_ip) 519 + ), 520 + TP_fast_assign( 521 + __entry->dev = VFS_I(ip)->i_sb->s_dev; 522 + __entry->ino = ip->i_ino; 523 + __entry->count = atomic_read(&VFS_I(ip)->i_count); 524 + __entry->caller_ip = caller_ip; 525 + ), 526 + TP_printk("dev %d:%d ino 0x%llx count %d caller %pf", 527 + MAJOR(__entry->dev), MINOR(__entry->dev), 528 + __entry->ino, 529 + __entry->count, 530 + (char *)__entry->caller_ip) 543 531 ) 532 + 533 + #define DEFINE_INODE_EVENT(name) \ 534 + DEFINE_EVENT(xfs_inode_class, name, \ 535 + TP_PROTO(struct xfs_inode *ip, unsigned long caller_ip), \ 536 + TP_ARGS(ip, caller_ip)) 544 537 DEFINE_INODE_EVENT(xfs_ihold); 545 538 DEFINE_INODE_EVENT(xfs_irele); 546 539 /* the old xfs_itrace_entry tracer - to be replaced by s.th. in the VFS */ ··· 560 529 #define xfs_itrace_entry(ip) \ 561 530 trace_xfs_inode(ip, _THIS_IP_) 562 531 563 - #define DEFINE_DQUOT_EVENT(tname) \ 564 - TRACE_EVENT(tname, \ 565 - TP_PROTO(struct xfs_dquot *dqp), \ 566 - TP_ARGS(dqp), \ 567 - TP_STRUCT__entry( \ 568 - __field(dev_t, dev) \ 569 - __field(__be32, id) \ 570 - __field(unsigned, flags) \ 571 - __field(unsigned, nrefs) \ 572 - __field(unsigned long long, res_bcount) \ 573 - __field(unsigned long long, bcount) \ 574 - __field(unsigned long long, icount) \ 575 - __field(unsigned long long, blk_hardlimit) \ 576 - __field(unsigned long long, blk_softlimit) \ 577 - __field(unsigned long long, ino_hardlimit) \ 578 - __field(unsigned long long, ino_softlimit) \ 532 + DECLARE_EVENT_CLASS(xfs_dquot_class, 533 + TP_PROTO(struct xfs_dquot *dqp), 534 + TP_ARGS(dqp), 535 + TP_STRUCT__entry( 536 + __field(dev_t, dev) 537 + __field(__be32, id) 538 + __field(unsigned, flags) 539 + __field(unsigned, nrefs) 540 + __field(unsigned long long, res_bcount) 541 + __field(unsigned long long, bcount) 542 + __field(unsigned long long, icount) 543 + __field(unsigned long long, blk_hardlimit) 544 + __field(unsigned long long, blk_softlimit) 545 + __field(unsigned long long, ino_hardlimit) 546 + __field(unsigned long long, ino_softlimit) 579 547 ), \ 580 - TP_fast_assign( \ 581 - __entry->dev = dqp->q_mount->m_super->s_dev; \ 582 - __entry->id = dqp->q_core.d_id; \ 583 - __entry->flags = dqp->dq_flags; \ 584 - __entry->nrefs = dqp->q_nrefs; \ 585 - __entry->res_bcount = dqp->q_res_bcount; \ 586 - __entry->bcount = be64_to_cpu(dqp->q_core.d_bcount); \ 587 - __entry->icount = be64_to_cpu(dqp->q_core.d_icount); \ 588 - __entry->blk_hardlimit = \ 589 - be64_to_cpu(dqp->q_core.d_blk_hardlimit); \ 590 - __entry->blk_softlimit = \ 591 - be64_to_cpu(dqp->q_core.d_blk_softlimit); \ 592 - __entry->ino_hardlimit = \ 593 - be64_to_cpu(dqp->q_core.d_ino_hardlimit); \ 594 - __entry->ino_softlimit = \ 595 - be64_to_cpu(dqp->q_core.d_ino_softlimit); \ 596 - ), \ 597 - TP_printk("dev %d:%d id 0x%x flags %s nrefs %u res_bc 0x%llx " \ 598 - "bcnt 0x%llx [hard 0x%llx | soft 0x%llx] " \ 599 - "icnt 0x%llx [hard 0x%llx | soft 0x%llx]", \ 600 - MAJOR(__entry->dev), MINOR(__entry->dev), \ 601 - be32_to_cpu(__entry->id), \ 602 - __print_flags(__entry->flags, "|", XFS_DQ_FLAGS), \ 603 - __entry->nrefs, \ 604 - __entry->res_bcount, \ 605 - __entry->bcount, \ 606 - __entry->blk_hardlimit, \ 607 - __entry->blk_softlimit, \ 608 - __entry->icount, \ 609 - __entry->ino_hardlimit, \ 610 - __entry->ino_softlimit) \ 548 + TP_fast_assign( 549 + __entry->dev = dqp->q_mount->m_super->s_dev; 550 + __entry->id = dqp->q_core.d_id; 551 + __entry->flags = dqp->dq_flags; 552 + __entry->nrefs = dqp->q_nrefs; 553 + __entry->res_bcount = dqp->q_res_bcount; 554 + __entry->bcount = be64_to_cpu(dqp->q_core.d_bcount); 555 + __entry->icount = be64_to_cpu(dqp->q_core.d_icount); 556 + __entry->blk_hardlimit = 557 + be64_to_cpu(dqp->q_core.d_blk_hardlimit); 558 + __entry->blk_softlimit = 559 + be64_to_cpu(dqp->q_core.d_blk_softlimit); 560 + __entry->ino_hardlimit = 561 + be64_to_cpu(dqp->q_core.d_ino_hardlimit); 562 + __entry->ino_softlimit = 563 + be64_to_cpu(dqp->q_core.d_ino_softlimit); 564 + ), 565 + TP_printk("dev %d:%d id 0x%x flags %s nrefs %u res_bc 0x%llx " 566 + "bcnt 0x%llx [hard 0x%llx | soft 0x%llx] " 567 + "icnt 0x%llx [hard 0x%llx | soft 0x%llx]", 568 + MAJOR(__entry->dev), MINOR(__entry->dev), 569 + be32_to_cpu(__entry->id), 570 + __print_flags(__entry->flags, "|", XFS_DQ_FLAGS), 571 + __entry->nrefs, 572 + __entry->res_bcount, 573 + __entry->bcount, 574 + __entry->blk_hardlimit, 575 + __entry->blk_softlimit, 576 + __entry->icount, 577 + __entry->ino_hardlimit, 578 + __entry->ino_softlimit) 611 579 ) 580 + 581 + #define DEFINE_DQUOT_EVENT(name) \ 582 + DEFINE_EVENT(xfs_dquot_class, name, \ 583 + TP_PROTO(struct xfs_dquot *dqp), \ 584 + TP_ARGS(dqp)) 612 585 DEFINE_DQUOT_EVENT(xfs_dqadjust); 613 586 DEFINE_DQUOT_EVENT(xfs_dqshake_dirty); 614 587 DEFINE_DQUOT_EVENT(xfs_dqshake_unlink); ··· 645 610 DEFINE_IGET_EVENT(xfs_dquot_dqalloc); 646 611 DEFINE_IGET_EVENT(xfs_dquot_dqdetach); 647 612 648 - 649 - #define DEFINE_LOGGRANT_EVENT(tname) \ 650 - TRACE_EVENT(tname, \ 651 - TP_PROTO(struct log *log, struct xlog_ticket *tic), \ 652 - TP_ARGS(log, tic), \ 653 - TP_STRUCT__entry( \ 654 - __field(dev_t, dev) \ 655 - __field(unsigned, trans_type) \ 656 - __field(char, ocnt) \ 657 - __field(char, cnt) \ 658 - __field(int, curr_res) \ 659 - __field(int, unit_res) \ 660 - __field(unsigned int, flags) \ 661 - __field(void *, reserve_headq) \ 662 - __field(void *, write_headq) \ 663 - __field(int, grant_reserve_cycle) \ 664 - __field(int, grant_reserve_bytes) \ 665 - __field(int, grant_write_cycle) \ 666 - __field(int, grant_write_bytes) \ 667 - __field(int, curr_cycle) \ 668 - __field(int, curr_block) \ 669 - __field(xfs_lsn_t, tail_lsn) \ 670 - ), \ 671 - TP_fast_assign( \ 672 - __entry->dev = log->l_mp->m_super->s_dev; \ 673 - __entry->trans_type = tic->t_trans_type; \ 674 - __entry->ocnt = tic->t_ocnt; \ 675 - __entry->cnt = tic->t_cnt; \ 676 - __entry->curr_res = tic->t_curr_res; \ 677 - __entry->unit_res = tic->t_unit_res; \ 678 - __entry->flags = tic->t_flags; \ 679 - __entry->reserve_headq = log->l_reserve_headq; \ 680 - __entry->write_headq = log->l_write_headq; \ 681 - __entry->grant_reserve_cycle = log->l_grant_reserve_cycle; \ 682 - __entry->grant_reserve_bytes = log->l_grant_reserve_bytes; \ 683 - __entry->grant_write_cycle = log->l_grant_write_cycle; \ 684 - __entry->grant_write_bytes = log->l_grant_write_bytes; \ 685 - __entry->curr_cycle = log->l_curr_cycle; \ 686 - __entry->curr_block = log->l_curr_block; \ 687 - __entry->tail_lsn = log->l_tail_lsn; \ 688 - ), \ 689 - TP_printk("dev %d:%d type %s t_ocnt %u t_cnt %u t_curr_res %u " \ 690 - "t_unit_res %u t_flags %s reserve_headq 0x%p " \ 691 - "write_headq 0x%p grant_reserve_cycle %d " \ 692 - "grant_reserve_bytes %d grant_write_cycle %d " \ 693 - "grant_write_bytes %d curr_cycle %d curr_block %d " \ 694 - "tail_cycle %d tail_block %d", \ 695 - MAJOR(__entry->dev), MINOR(__entry->dev), \ 696 - __print_symbolic(__entry->trans_type, XFS_TRANS_TYPES), \ 697 - __entry->ocnt, \ 698 - __entry->cnt, \ 699 - __entry->curr_res, \ 700 - __entry->unit_res, \ 701 - __print_flags(__entry->flags, "|", XLOG_TIC_FLAGS), \ 702 - __entry->reserve_headq, \ 703 - __entry->write_headq, \ 704 - __entry->grant_reserve_cycle, \ 705 - __entry->grant_reserve_bytes, \ 706 - __entry->grant_write_cycle, \ 707 - __entry->grant_write_bytes, \ 708 - __entry->curr_cycle, \ 709 - __entry->curr_block, \ 710 - CYCLE_LSN(__entry->tail_lsn), \ 711 - BLOCK_LSN(__entry->tail_lsn) \ 712 - ) \ 613 + DECLARE_EVENT_CLASS(xfs_loggrant_class, 614 + TP_PROTO(struct log *log, struct xlog_ticket *tic), 615 + TP_ARGS(log, tic), 616 + TP_STRUCT__entry( 617 + __field(dev_t, dev) 618 + __field(unsigned, trans_type) 619 + __field(char, ocnt) 620 + __field(char, cnt) 621 + __field(int, curr_res) 622 + __field(int, unit_res) 623 + __field(unsigned int, flags) 624 + __field(void *, reserve_headq) 625 + __field(void *, write_headq) 626 + __field(int, grant_reserve_cycle) 627 + __field(int, grant_reserve_bytes) 628 + __field(int, grant_write_cycle) 629 + __field(int, grant_write_bytes) 630 + __field(int, curr_cycle) 631 + __field(int, curr_block) 632 + __field(xfs_lsn_t, tail_lsn) 633 + ), 634 + TP_fast_assign( 635 + __entry->dev = log->l_mp->m_super->s_dev; 636 + __entry->trans_type = tic->t_trans_type; 637 + __entry->ocnt = tic->t_ocnt; 638 + __entry->cnt = tic->t_cnt; 639 + __entry->curr_res = tic->t_curr_res; 640 + __entry->unit_res = tic->t_unit_res; 641 + __entry->flags = tic->t_flags; 642 + __entry->reserve_headq = log->l_reserve_headq; 643 + __entry->write_headq = log->l_write_headq; 644 + __entry->grant_reserve_cycle = log->l_grant_reserve_cycle; 645 + __entry->grant_reserve_bytes = log->l_grant_reserve_bytes; 646 + __entry->grant_write_cycle = log->l_grant_write_cycle; 647 + __entry->grant_write_bytes = log->l_grant_write_bytes; 648 + __entry->curr_cycle = log->l_curr_cycle; 649 + __entry->curr_block = log->l_curr_block; 650 + __entry->tail_lsn = log->l_tail_lsn; 651 + ), 652 + TP_printk("dev %d:%d type %s t_ocnt %u t_cnt %u t_curr_res %u " 653 + "t_unit_res %u t_flags %s reserve_headq 0x%p " 654 + "write_headq 0x%p grant_reserve_cycle %d " 655 + "grant_reserve_bytes %d grant_write_cycle %d " 656 + "grant_write_bytes %d curr_cycle %d curr_block %d " 657 + "tail_cycle %d tail_block %d", 658 + MAJOR(__entry->dev), MINOR(__entry->dev), 659 + __print_symbolic(__entry->trans_type, XFS_TRANS_TYPES), 660 + __entry->ocnt, 661 + __entry->cnt, 662 + __entry->curr_res, 663 + __entry->unit_res, 664 + __print_flags(__entry->flags, "|", XLOG_TIC_FLAGS), 665 + __entry->reserve_headq, 666 + __entry->write_headq, 667 + __entry->grant_reserve_cycle, 668 + __entry->grant_reserve_bytes, 669 + __entry->grant_write_cycle, 670 + __entry->grant_write_bytes, 671 + __entry->curr_cycle, 672 + __entry->curr_block, 673 + CYCLE_LSN(__entry->tail_lsn), 674 + BLOCK_LSN(__entry->tail_lsn) 675 + ) 713 676 ) 677 + 678 + #define DEFINE_LOGGRANT_EVENT(name) \ 679 + DEFINE_EVENT(xfs_loggrant_class, name, \ 680 + TP_PROTO(struct log *log, struct xlog_ticket *tic), \ 681 + TP_ARGS(log, tic)) 714 682 DEFINE_LOGGRANT_EVENT(xfs_log_done_nonperm); 715 683 DEFINE_LOGGRANT_EVENT(xfs_log_done_perm); 716 684 DEFINE_LOGGRANT_EVENT(xfs_log_reserve); ··· 935 897 __entry->toss_finish) 936 898 ); 937 899 938 - #define DEFINE_ITRUNC_EVENT(name) \ 939 - TRACE_EVENT(name, \ 940 - TP_PROTO(struct xfs_inode *ip, xfs_fsize_t new_size), \ 941 - TP_ARGS(ip, new_size), \ 942 - TP_STRUCT__entry( \ 943 - __field(dev_t, dev) \ 944 - __field(xfs_ino_t, ino) \ 945 - __field(xfs_fsize_t, size) \ 946 - __field(xfs_fsize_t, new_size) \ 947 - ), \ 948 - TP_fast_assign( \ 949 - __entry->dev = VFS_I(ip)->i_sb->s_dev; \ 950 - __entry->ino = ip->i_ino; \ 951 - __entry->size = ip->i_d.di_size; \ 952 - __entry->new_size = new_size; \ 953 - ), \ 954 - TP_printk("dev %d:%d ino 0x%llx size 0x%llx new_size 0x%llx", \ 955 - MAJOR(__entry->dev), MINOR(__entry->dev), \ 956 - __entry->ino, \ 957 - __entry->size, \ 958 - __entry->new_size) \ 900 + DECLARE_EVENT_CLASS(xfs_itrunc_class, 901 + TP_PROTO(struct xfs_inode *ip, xfs_fsize_t new_size), 902 + TP_ARGS(ip, new_size), 903 + TP_STRUCT__entry( 904 + __field(dev_t, dev) 905 + __field(xfs_ino_t, ino) 906 + __field(xfs_fsize_t, size) 907 + __field(xfs_fsize_t, new_size) 908 + ), 909 + TP_fast_assign( 910 + __entry->dev = VFS_I(ip)->i_sb->s_dev; 911 + __entry->ino = ip->i_ino; 912 + __entry->size = ip->i_d.di_size; 913 + __entry->new_size = new_size; 914 + ), 915 + TP_printk("dev %d:%d ino 0x%llx size 0x%llx new_size 0x%llx", 916 + MAJOR(__entry->dev), MINOR(__entry->dev), 917 + __entry->ino, 918 + __entry->size, 919 + __entry->new_size) 959 920 ) 921 + 922 + #define DEFINE_ITRUNC_EVENT(name) \ 923 + DEFINE_EVENT(xfs_itrunc_class, name, \ 924 + TP_PROTO(struct xfs_inode *ip, xfs_fsize_t new_size), \ 925 + TP_ARGS(ip, new_size)) 960 926 DEFINE_ITRUNC_EVENT(xfs_itruncate_finish_start); 961 927 DEFINE_ITRUNC_EVENT(xfs_itruncate_finish_end); 962 928 ··· 1079 1037 1080 1038 TRACE_EVENT(xfs_alloc_busysearch, 1081 1039 TP_PROTO(struct xfs_mount *mp, xfs_agnumber_t agno, xfs_agblock_t agbno, 1082 - xfs_extlen_t len, int found), 1083 - TP_ARGS(mp, agno, agbno, len, found), 1040 + xfs_extlen_t len, xfs_lsn_t lsn), 1041 + TP_ARGS(mp, agno, agbno, len, lsn), 1084 1042 TP_STRUCT__entry( 1085 1043 __field(dev_t, dev) 1086 1044 __field(xfs_agnumber_t, agno) 1087 1045 __field(xfs_agblock_t, agbno) 1088 1046 __field(xfs_extlen_t, len) 1089 - __field(int, found) 1047 + __field(xfs_lsn_t, lsn) 1090 1048 ), 1091 1049 TP_fast_assign( 1092 1050 __entry->dev = mp->m_super->s_dev; 1093 1051 __entry->agno = agno; 1094 1052 __entry->agbno = agbno; 1095 1053 __entry->len = len; 1096 - __entry->found = found; 1054 + __entry->lsn = lsn; 1097 1055 ), 1098 - TP_printk("dev %d:%d agno %u agbno %u len %u %s", 1056 + TP_printk("dev %d:%d agno %u agbno %u len %u force lsn 0x%llx", 1099 1057 MAJOR(__entry->dev), MINOR(__entry->dev), 1100 1058 __entry->agno, 1101 1059 __entry->agbno, 1102 1060 __entry->len, 1103 - __print_symbolic(__entry->found, XFS_BUSY_STATES)) 1061 + __entry->lsn) 1104 1062 ); 1105 1063 1106 1064 TRACE_EVENT(xfs_agf, ··· 1194 1152 1195 1153 ); 1196 1154 1197 - #define DEFINE_ALLOC_EVENT(name) \ 1198 - TRACE_EVENT(name, \ 1199 - TP_PROTO(struct xfs_alloc_arg *args), \ 1200 - TP_ARGS(args), \ 1201 - TP_STRUCT__entry( \ 1202 - __field(dev_t, dev) \ 1203 - __field(xfs_agnumber_t, agno) \ 1204 - __field(xfs_agblock_t, agbno) \ 1205 - __field(xfs_extlen_t, minlen) \ 1206 - __field(xfs_extlen_t, maxlen) \ 1207 - __field(xfs_extlen_t, mod) \ 1208 - __field(xfs_extlen_t, prod) \ 1209 - __field(xfs_extlen_t, minleft) \ 1210 - __field(xfs_extlen_t, total) \ 1211 - __field(xfs_extlen_t, alignment) \ 1212 - __field(xfs_extlen_t, minalignslop) \ 1213 - __field(xfs_extlen_t, len) \ 1214 - __field(short, type) \ 1215 - __field(short, otype) \ 1216 - __field(char, wasdel) \ 1217 - __field(char, wasfromfl) \ 1218 - __field(char, isfl) \ 1219 - __field(char, userdata) \ 1220 - __field(xfs_fsblock_t, firstblock) \ 1221 - ), \ 1222 - TP_fast_assign( \ 1223 - __entry->dev = args->mp->m_super->s_dev; \ 1224 - __entry->agno = args->agno; \ 1225 - __entry->agbno = args->agbno; \ 1226 - __entry->minlen = args->minlen; \ 1227 - __entry->maxlen = args->maxlen; \ 1228 - __entry->mod = args->mod; \ 1229 - __entry->prod = args->prod; \ 1230 - __entry->minleft = args->minleft; \ 1231 - __entry->total = args->total; \ 1232 - __entry->alignment = args->alignment; \ 1233 - __entry->minalignslop = args->minalignslop; \ 1234 - __entry->len = args->len; \ 1235 - __entry->type = args->type; \ 1236 - __entry->otype = args->otype; \ 1237 - __entry->wasdel = args->wasdel; \ 1238 - __entry->wasfromfl = args->wasfromfl; \ 1239 - __entry->isfl = args->isfl; \ 1240 - __entry->userdata = args->userdata; \ 1241 - __entry->firstblock = args->firstblock; \ 1242 - ), \ 1243 - TP_printk("dev %d:%d agno %u agbno %u minlen %u maxlen %u mod %u " \ 1244 - "prod %u minleft %u total %u alignment %u minalignslop %u " \ 1245 - "len %u type %s otype %s wasdel %d wasfromfl %d isfl %d " \ 1246 - "userdata %d firstblock %s", \ 1247 - MAJOR(__entry->dev), MINOR(__entry->dev), \ 1248 - __entry->agno, \ 1249 - __entry->agbno, \ 1250 - __entry->minlen, \ 1251 - __entry->maxlen, \ 1252 - __entry->mod, \ 1253 - __entry->prod, \ 1254 - __entry->minleft, \ 1255 - __entry->total, \ 1256 - __entry->alignment, \ 1257 - __entry->minalignslop, \ 1258 - __entry->len, \ 1259 - __print_symbolic(__entry->type, XFS_ALLOC_TYPES), \ 1260 - __print_symbolic(__entry->otype, XFS_ALLOC_TYPES), \ 1261 - __entry->wasdel, \ 1262 - __entry->wasfromfl, \ 1263 - __entry->isfl, \ 1264 - __entry->userdata, \ 1265 - xfs_fmtfsblock(__entry->firstblock)) \ 1155 + DECLARE_EVENT_CLASS(xfs_alloc_class, 1156 + TP_PROTO(struct xfs_alloc_arg *args), 1157 + TP_ARGS(args), 1158 + TP_STRUCT__entry( 1159 + __field(dev_t, dev) 1160 + __field(xfs_agnumber_t, agno) 1161 + __field(xfs_agblock_t, agbno) 1162 + __field(xfs_extlen_t, minlen) 1163 + __field(xfs_extlen_t, maxlen) 1164 + __field(xfs_extlen_t, mod) 1165 + __field(xfs_extlen_t, prod) 1166 + __field(xfs_extlen_t, minleft) 1167 + __field(xfs_extlen_t, total) 1168 + __field(xfs_extlen_t, alignment) 1169 + __field(xfs_extlen_t, minalignslop) 1170 + __field(xfs_extlen_t, len) 1171 + __field(short, type) 1172 + __field(short, otype) 1173 + __field(char, wasdel) 1174 + __field(char, wasfromfl) 1175 + __field(char, isfl) 1176 + __field(char, userdata) 1177 + __field(xfs_fsblock_t, firstblock) 1178 + ), 1179 + TP_fast_assign( 1180 + __entry->dev = args->mp->m_super->s_dev; 1181 + __entry->agno = args->agno; 1182 + __entry->agbno = args->agbno; 1183 + __entry->minlen = args->minlen; 1184 + __entry->maxlen = args->maxlen; 1185 + __entry->mod = args->mod; 1186 + __entry->prod = args->prod; 1187 + __entry->minleft = args->minleft; 1188 + __entry->total = args->total; 1189 + __entry->alignment = args->alignment; 1190 + __entry->minalignslop = args->minalignslop; 1191 + __entry->len = args->len; 1192 + __entry->type = args->type; 1193 + __entry->otype = args->otype; 1194 + __entry->wasdel = args->wasdel; 1195 + __entry->wasfromfl = args->wasfromfl; 1196 + __entry->isfl = args->isfl; 1197 + __entry->userdata = args->userdata; 1198 + __entry->firstblock = args->firstblock; 1199 + ), 1200 + TP_printk("dev %d:%d agno %u agbno %u minlen %u maxlen %u mod %u " 1201 + "prod %u minleft %u total %u alignment %u minalignslop %u " 1202 + "len %u type %s otype %s wasdel %d wasfromfl %d isfl %d " 1203 + "userdata %d firstblock 0x%llx", 1204 + MAJOR(__entry->dev), MINOR(__entry->dev), 1205 + __entry->agno, 1206 + __entry->agbno, 1207 + __entry->minlen, 1208 + __entry->maxlen, 1209 + __entry->mod, 1210 + __entry->prod, 1211 + __entry->minleft, 1212 + __entry->total, 1213 + __entry->alignment, 1214 + __entry->minalignslop, 1215 + __entry->len, 1216 + __print_symbolic(__entry->type, XFS_ALLOC_TYPES), 1217 + __print_symbolic(__entry->otype, XFS_ALLOC_TYPES), 1218 + __entry->wasdel, 1219 + __entry->wasfromfl, 1220 + __entry->isfl, 1221 + __entry->userdata, 1222 + __entry->firstblock) 1266 1223 ) 1267 1224 1225 + #define DEFINE_ALLOC_EVENT(name) \ 1226 + DEFINE_EVENT(xfs_alloc_class, name, \ 1227 + TP_PROTO(struct xfs_alloc_arg *args), \ 1228 + TP_ARGS(args)) 1268 1229 DEFINE_ALLOC_EVENT(xfs_alloc_exact_done); 1269 1230 DEFINE_ALLOC_EVENT(xfs_alloc_exact_error); 1270 1231 DEFINE_ALLOC_EVENT(xfs_alloc_near_nominleft); ··· 1290 1245 DEFINE_ALLOC_EVENT(xfs_alloc_vextent_loopfailed); 1291 1246 DEFINE_ALLOC_EVENT(xfs_alloc_vextent_allfailed); 1292 1247 1293 - #define DEFINE_DIR2_TRACE(tname) \ 1294 - TRACE_EVENT(tname, \ 1295 - TP_PROTO(struct xfs_da_args *args), \ 1296 - TP_ARGS(args), \ 1297 - TP_STRUCT__entry( \ 1298 - __field(dev_t, dev) \ 1299 - __field(xfs_ino_t, ino) \ 1300 - __dynamic_array(char, name, args->namelen) \ 1301 - __field(int, namelen) \ 1302 - __field(xfs_dahash_t, hashval) \ 1303 - __field(xfs_ino_t, inumber) \ 1304 - __field(int, op_flags) \ 1305 - ), \ 1306 - TP_fast_assign( \ 1307 - __entry->dev = VFS_I(args->dp)->i_sb->s_dev; \ 1308 - __entry->ino = args->dp->i_ino; \ 1309 - if (args->namelen) \ 1310 - memcpy(__get_str(name), args->name, args->namelen); \ 1311 - __entry->namelen = args->namelen; \ 1312 - __entry->hashval = args->hashval; \ 1313 - __entry->inumber = args->inumber; \ 1314 - __entry->op_flags = args->op_flags; \ 1315 - ), \ 1316 - TP_printk("dev %d:%d ino 0x%llx name %.*s namelen %d hashval 0x%x " \ 1317 - "inumber 0x%llx op_flags %s", \ 1318 - MAJOR(__entry->dev), MINOR(__entry->dev), \ 1319 - __entry->ino, \ 1320 - __entry->namelen, \ 1321 - __entry->namelen ? __get_str(name) : NULL, \ 1322 - __entry->namelen, \ 1323 - __entry->hashval, \ 1324 - __entry->inumber, \ 1325 - __print_flags(__entry->op_flags, "|", XFS_DA_OP_FLAGS)) \ 1248 + DECLARE_EVENT_CLASS(xfs_dir2_class, 1249 + TP_PROTO(struct xfs_da_args *args), 1250 + TP_ARGS(args), 1251 + TP_STRUCT__entry( 1252 + __field(dev_t, dev) 1253 + __field(xfs_ino_t, ino) 1254 + __dynamic_array(char, name, args->namelen) 1255 + __field(int, namelen) 1256 + __field(xfs_dahash_t, hashval) 1257 + __field(xfs_ino_t, inumber) 1258 + __field(int, op_flags) 1259 + ), 1260 + TP_fast_assign( 1261 + __entry->dev = VFS_I(args->dp)->i_sb->s_dev; 1262 + __entry->ino = args->dp->i_ino; 1263 + if (args->namelen) 1264 + memcpy(__get_str(name), args->name, args->namelen); 1265 + __entry->namelen = args->namelen; 1266 + __entry->hashval = args->hashval; 1267 + __entry->inumber = args->inumber; 1268 + __entry->op_flags = args->op_flags; 1269 + ), 1270 + TP_printk("dev %d:%d ino 0x%llx name %.*s namelen %d hashval 0x%x " 1271 + "inumber 0x%llx op_flags %s", 1272 + MAJOR(__entry->dev), MINOR(__entry->dev), 1273 + __entry->ino, 1274 + __entry->namelen, 1275 + __entry->namelen ? __get_str(name) : NULL, 1276 + __entry->namelen, 1277 + __entry->hashval, 1278 + __entry->inumber, 1279 + __print_flags(__entry->op_flags, "|", XFS_DA_OP_FLAGS)) 1326 1280 ) 1327 - DEFINE_DIR2_TRACE(xfs_dir2_sf_addname); 1328 - DEFINE_DIR2_TRACE(xfs_dir2_sf_create); 1329 - DEFINE_DIR2_TRACE(xfs_dir2_sf_lookup); 1330 - DEFINE_DIR2_TRACE(xfs_dir2_sf_replace); 1331 - DEFINE_DIR2_TRACE(xfs_dir2_sf_removename); 1332 - DEFINE_DIR2_TRACE(xfs_dir2_sf_toino4); 1333 - DEFINE_DIR2_TRACE(xfs_dir2_sf_toino8); 1334 - DEFINE_DIR2_TRACE(xfs_dir2_sf_to_block); 1335 - DEFINE_DIR2_TRACE(xfs_dir2_block_addname); 1336 - DEFINE_DIR2_TRACE(xfs_dir2_block_lookup); 1337 - DEFINE_DIR2_TRACE(xfs_dir2_block_replace); 1338 - DEFINE_DIR2_TRACE(xfs_dir2_block_removename); 1339 - DEFINE_DIR2_TRACE(xfs_dir2_block_to_sf); 1340 - DEFINE_DIR2_TRACE(xfs_dir2_block_to_leaf); 1341 - DEFINE_DIR2_TRACE(xfs_dir2_leaf_addname); 1342 - DEFINE_DIR2_TRACE(xfs_dir2_leaf_lookup); 1343 - DEFINE_DIR2_TRACE(xfs_dir2_leaf_replace); 1344 - DEFINE_DIR2_TRACE(xfs_dir2_leaf_removename); 1345 - DEFINE_DIR2_TRACE(xfs_dir2_leaf_to_block); 1346 - DEFINE_DIR2_TRACE(xfs_dir2_leaf_to_node); 1347 - DEFINE_DIR2_TRACE(xfs_dir2_node_addname); 1348 - DEFINE_DIR2_TRACE(xfs_dir2_node_lookup); 1349 - DEFINE_DIR2_TRACE(xfs_dir2_node_replace); 1350 - DEFINE_DIR2_TRACE(xfs_dir2_node_removename); 1351 - DEFINE_DIR2_TRACE(xfs_dir2_node_to_leaf); 1352 1281 1353 - #define DEFINE_DIR2_SPACE_TRACE(tname) \ 1354 - TRACE_EVENT(tname, \ 1355 - TP_PROTO(struct xfs_da_args *args, int idx), \ 1356 - TP_ARGS(args, idx), \ 1357 - TP_STRUCT__entry( \ 1358 - __field(dev_t, dev) \ 1359 - __field(xfs_ino_t, ino) \ 1360 - __field(int, op_flags) \ 1361 - __field(int, idx) \ 1362 - ), \ 1363 - TP_fast_assign( \ 1364 - __entry->dev = VFS_I(args->dp)->i_sb->s_dev; \ 1365 - __entry->ino = args->dp->i_ino; \ 1366 - __entry->op_flags = args->op_flags; \ 1367 - __entry->idx = idx; \ 1368 - ), \ 1369 - TP_printk("dev %d:%d ino 0x%llx op_flags %s index %d", \ 1370 - MAJOR(__entry->dev), MINOR(__entry->dev), \ 1371 - __entry->ino, \ 1372 - __print_flags(__entry->op_flags, "|", XFS_DA_OP_FLAGS), \ 1373 - __entry->idx) \ 1282 + #define DEFINE_DIR2_EVENT(name) \ 1283 + DEFINE_EVENT(xfs_dir2_class, name, \ 1284 + TP_PROTO(struct xfs_da_args *args), \ 1285 + TP_ARGS(args)) 1286 + DEFINE_DIR2_EVENT(xfs_dir2_sf_addname); 1287 + DEFINE_DIR2_EVENT(xfs_dir2_sf_create); 1288 + DEFINE_DIR2_EVENT(xfs_dir2_sf_lookup); 1289 + DEFINE_DIR2_EVENT(xfs_dir2_sf_replace); 1290 + DEFINE_DIR2_EVENT(xfs_dir2_sf_removename); 1291 + DEFINE_DIR2_EVENT(xfs_dir2_sf_toino4); 1292 + DEFINE_DIR2_EVENT(xfs_dir2_sf_toino8); 1293 + DEFINE_DIR2_EVENT(xfs_dir2_sf_to_block); 1294 + DEFINE_DIR2_EVENT(xfs_dir2_block_addname); 1295 + DEFINE_DIR2_EVENT(xfs_dir2_block_lookup); 1296 + DEFINE_DIR2_EVENT(xfs_dir2_block_replace); 1297 + DEFINE_DIR2_EVENT(xfs_dir2_block_removename); 1298 + DEFINE_DIR2_EVENT(xfs_dir2_block_to_sf); 1299 + DEFINE_DIR2_EVENT(xfs_dir2_block_to_leaf); 1300 + DEFINE_DIR2_EVENT(xfs_dir2_leaf_addname); 1301 + DEFINE_DIR2_EVENT(xfs_dir2_leaf_lookup); 1302 + DEFINE_DIR2_EVENT(xfs_dir2_leaf_replace); 1303 + DEFINE_DIR2_EVENT(xfs_dir2_leaf_removename); 1304 + DEFINE_DIR2_EVENT(xfs_dir2_leaf_to_block); 1305 + DEFINE_DIR2_EVENT(xfs_dir2_leaf_to_node); 1306 + DEFINE_DIR2_EVENT(xfs_dir2_node_addname); 1307 + DEFINE_DIR2_EVENT(xfs_dir2_node_lookup); 1308 + DEFINE_DIR2_EVENT(xfs_dir2_node_replace); 1309 + DEFINE_DIR2_EVENT(xfs_dir2_node_removename); 1310 + DEFINE_DIR2_EVENT(xfs_dir2_node_to_leaf); 1311 + 1312 + DECLARE_EVENT_CLASS(xfs_dir2_space_class, 1313 + TP_PROTO(struct xfs_da_args *args, int idx), 1314 + TP_ARGS(args, idx), 1315 + TP_STRUCT__entry( 1316 + __field(dev_t, dev) 1317 + __field(xfs_ino_t, ino) 1318 + __field(int, op_flags) 1319 + __field(int, idx) 1320 + ), 1321 + TP_fast_assign( 1322 + __entry->dev = VFS_I(args->dp)->i_sb->s_dev; 1323 + __entry->ino = args->dp->i_ino; 1324 + __entry->op_flags = args->op_flags; 1325 + __entry->idx = idx; 1326 + ), 1327 + TP_printk("dev %d:%d ino 0x%llx op_flags %s index %d", 1328 + MAJOR(__entry->dev), MINOR(__entry->dev), 1329 + __entry->ino, 1330 + __print_flags(__entry->op_flags, "|", XFS_DA_OP_FLAGS), 1331 + __entry->idx) 1374 1332 ) 1375 - DEFINE_DIR2_SPACE_TRACE(xfs_dir2_leafn_add); 1376 - DEFINE_DIR2_SPACE_TRACE(xfs_dir2_leafn_remove); 1377 - DEFINE_DIR2_SPACE_TRACE(xfs_dir2_grow_inode); 1378 - DEFINE_DIR2_SPACE_TRACE(xfs_dir2_shrink_inode); 1333 + 1334 + #define DEFINE_DIR2_SPACE_EVENT(name) \ 1335 + DEFINE_EVENT(xfs_dir2_space_class, name, \ 1336 + TP_PROTO(struct xfs_da_args *args, int idx), \ 1337 + TP_ARGS(args, idx)) 1338 + DEFINE_DIR2_SPACE_EVENT(xfs_dir2_leafn_add); 1339 + DEFINE_DIR2_SPACE_EVENT(xfs_dir2_leafn_remove); 1340 + DEFINE_DIR2_SPACE_EVENT(xfs_dir2_grow_inode); 1341 + DEFINE_DIR2_SPACE_EVENT(xfs_dir2_shrink_inode); 1379 1342 1380 1343 TRACE_EVENT(xfs_dir2_leafn_moveents, 1381 1344 TP_PROTO(struct xfs_da_args *args, int src_idx, int dst_idx, int count),
+21 -23
fs/xfs/xfs_alloc.c
··· 2563 2563 xfs_mount_t *mp; 2564 2564 xfs_perag_busy_t *bsy; 2565 2565 xfs_agblock_t uend, bend; 2566 - xfs_lsn_t lsn; 2566 + xfs_lsn_t lsn = 0; 2567 2567 int cnt; 2568 2568 2569 2569 mp = tp->t_mountp; 2570 2570 2571 2571 spin_lock(&mp->m_perag[agno].pagb_lock); 2572 - cnt = mp->m_perag[agno].pagb_count; 2573 2572 2574 2573 uend = bno + len - 1; 2575 2574 2576 - /* search pagb_list for this slot, skipping open slots */ 2577 - for (bsy = mp->m_perag[agno].pagb_list; cnt; bsy++) { 2575 + /* 2576 + * search pagb_list for this slot, skipping open slots. We have to 2577 + * search the entire array as there may be multiple overlaps and 2578 + * we have to get the most recent LSN for the log force to push out 2579 + * all the transactions that span the range. 2580 + */ 2581 + for (cnt = 0; cnt < mp->m_perag[agno].pagb_count; cnt++) { 2582 + bsy = &mp->m_perag[agno].pagb_list[cnt]; 2583 + if (!bsy->busy_tp) 2584 + continue; 2578 2585 2579 - /* 2580 - * (start1,length1) within (start2, length2) 2581 - */ 2582 - if (bsy->busy_tp != NULL) { 2583 - bend = bsy->busy_start + bsy->busy_length - 1; 2584 - if ((bno > bend) || (uend < bsy->busy_start)) { 2585 - cnt--; 2586 - } else { 2587 - break; 2588 - } 2589 - } 2586 + bend = bsy->busy_start + bsy->busy_length - 1; 2587 + if (bno > bend || uend < bsy->busy_start) 2588 + continue; 2589 + 2590 + /* (start1,length1) within (start2, length2) */ 2591 + if (XFS_LSN_CMP(bsy->busy_tp->t_commit_lsn, lsn) > 0) 2592 + lsn = bsy->busy_tp->t_commit_lsn; 2590 2593 } 2591 - 2592 - trace_xfs_alloc_busysearch(mp, agno, bno, len, !!cnt); 2594 + spin_unlock(&mp->m_perag[agno].pagb_lock); 2595 + trace_xfs_alloc_busysearch(tp->t_mountp, agno, bno, len, lsn); 2593 2596 2594 2597 /* 2595 2598 * If a block was found, force the log through the LSN of the 2596 2599 * transaction that freed the block 2597 2600 */ 2598 - if (cnt) { 2599 - lsn = bsy->busy_tp->t_commit_lsn; 2600 - spin_unlock(&mp->m_perag[agno].pagb_lock); 2601 + if (lsn) 2601 2602 xfs_log_force(mp, lsn, XFS_LOG_FORCE|XFS_LOG_SYNC); 2602 - } else { 2603 - spin_unlock(&mp->m_perag[agno].pagb_lock); 2604 - } 2605 2603 }
+7 -3
fs/xfs/xfs_inode.c
··· 2841 2841 mp = ip->i_mount; 2842 2842 2843 2843 /* 2844 - * If the inode isn't dirty, then just release the inode 2845 - * flush lock and do nothing. 2844 + * If the inode isn't dirty, then just release the inode flush lock and 2845 + * do nothing. Treat stale inodes the same; we cannot rely on the 2846 + * backing buffer remaining stale in cache for the remaining life of 2847 + * the stale inode and so xfs_itobp() below may give us a buffer that 2848 + * no longer contains inodes below. Doing this stale check here also 2849 + * avoids forcing the log on pinned, stale inodes. 2846 2850 */ 2847 - if (xfs_inode_clean(ip)) { 2851 + if (xfs_inode_clean(ip) || xfs_iflags_test(ip, XFS_ISTALE)) { 2848 2852 xfs_ifunlock(ip); 2849 2853 return 0; 2850 2854 }
+39 -54
fs/xfs/xfs_vnodeops.c
··· 70 70 uint commit_flags=0; 71 71 uid_t uid=0, iuid=0; 72 72 gid_t gid=0, igid=0; 73 - int timeflags = 0; 74 73 struct xfs_dquot *udqp, *gdqp, *olddquot1, *olddquot2; 75 74 int need_iolock = 1; 76 75 ··· 134 135 if (flags & XFS_ATTR_NOLOCK) 135 136 need_iolock = 0; 136 137 if (!(mask & ATTR_SIZE)) { 137 - if ((mask != (ATTR_CTIME|ATTR_ATIME|ATTR_MTIME)) || 138 - (mp->m_flags & XFS_MOUNT_WSYNC)) { 139 - tp = xfs_trans_alloc(mp, XFS_TRANS_SETATTR_NOT_SIZE); 140 - commit_flags = 0; 141 - if ((code = xfs_trans_reserve(tp, 0, 142 - XFS_ICHANGE_LOG_RES(mp), 0, 143 - 0, 0))) { 144 - lock_flags = 0; 145 - goto error_return; 146 - } 138 + tp = xfs_trans_alloc(mp, XFS_TRANS_SETATTR_NOT_SIZE); 139 + commit_flags = 0; 140 + code = xfs_trans_reserve(tp, 0, XFS_ICHANGE_LOG_RES(mp), 141 + 0, 0, 0); 142 + if (code) { 143 + lock_flags = 0; 144 + goto error_return; 147 145 } 148 146 } else { 149 147 if (DM_EVENT_ENABLED(ip, DM_EVENT_TRUNCATE) && ··· 291 295 * or we are explicitly asked to change it. This handles 292 296 * the semantic difference between truncate() and ftruncate() 293 297 * as implemented in the VFS. 298 + * 299 + * The regular truncate() case without ATTR_CTIME and ATTR_MTIME 300 + * is a special case where we need to update the times despite 301 + * not having these flags set. For all other operations the 302 + * VFS set these flags explicitly if it wants a timestamp 303 + * update. 294 304 */ 295 - if (iattr->ia_size != ip->i_size || (mask & ATTR_CTIME)) 296 - timeflags |= XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG; 305 + if (iattr->ia_size != ip->i_size && 306 + (!(mask & (ATTR_CTIME | ATTR_MTIME)))) { 307 + iattr->ia_ctime = iattr->ia_mtime = 308 + current_fs_time(inode->i_sb); 309 + mask |= ATTR_CTIME | ATTR_MTIME; 310 + } 297 311 298 312 if (iattr->ia_size > ip->i_size) { 299 313 ip->i_d.di_size = iattr->ia_size; 300 314 ip->i_size = iattr->ia_size; 301 - if (!(flags & XFS_ATTR_DMI)) 302 - xfs_ichgtime(ip, XFS_ICHGTIME_CHG); 303 315 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 304 316 } else if (iattr->ia_size <= ip->i_size || 305 317 (iattr->ia_size == 0 && ip->i_d.di_nextents)) { ··· 378 374 ip->i_d.di_gid = gid; 379 375 inode->i_gid = gid; 380 376 } 381 - 382 - xfs_trans_log_inode (tp, ip, XFS_ILOG_CORE); 383 - timeflags |= XFS_ICHGTIME_CHG; 384 377 } 385 378 386 379 /* ··· 394 393 395 394 inode->i_mode &= S_IFMT; 396 395 inode->i_mode |= mode & ~S_IFMT; 397 - 398 - xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 399 - timeflags |= XFS_ICHGTIME_CHG; 400 396 } 401 397 402 398 /* 403 399 * Change file access or modified times. 404 400 */ 405 - if (mask & (ATTR_ATIME|ATTR_MTIME)) { 406 - if (mask & ATTR_ATIME) { 407 - inode->i_atime = iattr->ia_atime; 408 - ip->i_d.di_atime.t_sec = iattr->ia_atime.tv_sec; 409 - ip->i_d.di_atime.t_nsec = iattr->ia_atime.tv_nsec; 410 - ip->i_update_core = 1; 411 - } 412 - if (mask & ATTR_MTIME) { 413 - inode->i_mtime = iattr->ia_mtime; 414 - ip->i_d.di_mtime.t_sec = iattr->ia_mtime.tv_sec; 415 - ip->i_d.di_mtime.t_nsec = iattr->ia_mtime.tv_nsec; 416 - timeflags &= ~XFS_ICHGTIME_MOD; 417 - timeflags |= XFS_ICHGTIME_CHG; 418 - } 419 - if (tp && (mask & (ATTR_MTIME_SET|ATTR_ATIME_SET))) 420 - xfs_trans_log_inode (tp, ip, XFS_ILOG_CORE); 401 + if (mask & ATTR_ATIME) { 402 + inode->i_atime = iattr->ia_atime; 403 + ip->i_d.di_atime.t_sec = iattr->ia_atime.tv_sec; 404 + ip->i_d.di_atime.t_nsec = iattr->ia_atime.tv_nsec; 405 + ip->i_update_core = 1; 421 406 } 422 - 423 - /* 424 - * Change file inode change time only if ATTR_CTIME set 425 - * AND we have been called by a DMI function. 426 - */ 427 - 428 - if ((flags & XFS_ATTR_DMI) && (mask & ATTR_CTIME)) { 407 + if (mask & ATTR_CTIME) { 429 408 inode->i_ctime = iattr->ia_ctime; 430 409 ip->i_d.di_ctime.t_sec = iattr->ia_ctime.tv_sec; 431 410 ip->i_d.di_ctime.t_nsec = iattr->ia_ctime.tv_nsec; 432 411 ip->i_update_core = 1; 433 - timeflags &= ~XFS_ICHGTIME_CHG; 412 + } 413 + if (mask & ATTR_MTIME) { 414 + inode->i_mtime = iattr->ia_mtime; 415 + ip->i_d.di_mtime.t_sec = iattr->ia_mtime.tv_sec; 416 + ip->i_d.di_mtime.t_nsec = iattr->ia_mtime.tv_nsec; 417 + ip->i_update_core = 1; 434 418 } 435 419 436 420 /* 437 - * Send out timestamp changes that need to be set to the 438 - * current time. Not done when called by a DMI function. 421 + * And finally, log the inode core if any attribute in it 422 + * has been changed. 439 423 */ 440 - if (timeflags && !(flags & XFS_ATTR_DMI)) 441 - xfs_ichgtime(ip, timeflags); 424 + if (mask & (ATTR_UID|ATTR_GID|ATTR_MODE| 425 + ATTR_ATIME|ATTR_CTIME|ATTR_MTIME)) 426 + xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 442 427 443 428 XFS_STATS_INC(xs_ig_attrchg); 444 429 ··· 439 452 * mix so this probably isn't worth the trouble to optimize. 440 453 */ 441 454 code = 0; 442 - if (tp) { 443 - if (mp->m_flags & XFS_MOUNT_WSYNC) 444 - xfs_trans_set_sync(tp); 455 + if (mp->m_flags & XFS_MOUNT_WSYNC) 456 + xfs_trans_set_sync(tp); 445 457 446 - code = xfs_trans_commit(tp, commit_flags); 447 - } 458 + code = xfs_trans_commit(tp, commit_flags); 448 459 449 460 xfs_iunlock(ip, lock_flags); 450 461