Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

xfs: add online scrub for superblock counters

Teach online scrub how to check the filesystem summary counters. We use
the incore delalloc block counter along with the incore AG headers to
compute expected values for fdblocks, icount, and ifree, and then check
that the percpu counter is within a certain threshold of the expected
value. This is done to avoid having to freeze or otherwise lock the
filesystem, which means that we're only checking that the counters are
fairly close, not that they're exactly correct.

Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
Reviewed-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: Brian Foster <bfoster@redhat.com>

+461 -3
+1
fs/xfs/Makefile
··· 143 143 common.o \ 144 144 dabtree.o \ 145 145 dir.o \ 146 + fscounters.o \ 146 147 health.o \ 147 148 ialloc.o \ 148 149 inode.o \
+2 -1
fs/xfs/libxfs/xfs_fs.h
··· 578 578 #define XFS_SCRUB_TYPE_UQUOTA 21 /* user quotas */ 579 579 #define XFS_SCRUB_TYPE_GQUOTA 22 /* group quotas */ 580 580 #define XFS_SCRUB_TYPE_PQUOTA 23 /* project quotas */ 581 + #define XFS_SCRUB_TYPE_FSCOUNTERS 24 /* fs summary counters */ 581 582 582 583 /* Number of scrub subcommands. */ 583 - #define XFS_SCRUB_TYPE_NR 24 584 + #define XFS_SCRUB_TYPE_NR 25 584 585 585 586 /* i: Repair this metadata. */ 586 587 #define XFS_SCRUB_IFLAG_REPAIR (1 << 0)
+1 -1
fs/xfs/libxfs/xfs_types.c
··· 185 185 } 186 186 187 187 /* Calculate the range of valid icount values. */ 188 - static void 188 + void 189 189 xfs_icount_range( 190 190 struct xfs_mount *mp, 191 191 unsigned long long *min,
+2
fs/xfs/libxfs/xfs_types.h
··· 191 191 bool xfs_verify_rtbno(struct xfs_mount *mp, xfs_rtblock_t rtbno); 192 192 bool xfs_verify_icount(struct xfs_mount *mp, unsigned long long icount); 193 193 bool xfs_verify_dablk(struct xfs_mount *mp, xfs_fileoff_t off); 194 + void xfs_icount_range(struct xfs_mount *mp, unsigned long long *min, 195 + unsigned long long *max); 194 196 195 197 #endif /* __XFS_TYPES_H__ */
+9
fs/xfs/scrub/common.c
··· 209 209 trace_xchk_ino_preen(sc, ino, __return_address); 210 210 } 211 211 212 + /* Record something being wrong with the filesystem primary superblock. */ 213 + void 214 + xchk_set_corrupt( 215 + struct xfs_scrub *sc) 216 + { 217 + sc->sm->sm_flags |= XFS_SCRUB_OFLAG_CORRUPT; 218 + trace_xchk_fs_error(sc, 0, __return_address); 219 + } 220 + 212 221 /* Record a corrupt block. */ 213 222 void 214 223 xchk_block_set_corrupt(
+2
fs/xfs/scrub/common.h
··· 39 39 struct xfs_buf *bp); 40 40 void xchk_ino_set_preen(struct xfs_scrub *sc, xfs_ino_t ino); 41 41 42 + void xchk_set_corrupt(struct xfs_scrub *sc); 42 43 void xchk_block_set_corrupt(struct xfs_scrub *sc, 43 44 struct xfs_buf *bp); 44 45 void xchk_ino_set_corrupt(struct xfs_scrub *sc, xfs_ino_t ino); ··· 106 105 return -ENOENT; 107 106 } 108 107 #endif 108 + int xchk_setup_fscounters(struct xfs_scrub *sc, struct xfs_inode *ip); 109 109 110 110 void xchk_ag_free(struct xfs_scrub *sc, struct xchk_ag *sa); 111 111 int xchk_ag_init(struct xfs_scrub *sc, xfs_agnumber_t agno,
+366
fs/xfs/scrub/fscounters.c
··· 1 + // SPDX-License-Identifier: GPL-2.0+ 2 + /* 3 + * Copyright (C) 2019 Oracle. All Rights Reserved. 4 + * Author: Darrick J. Wong <darrick.wong@oracle.com> 5 + */ 6 + #include "xfs.h" 7 + #include "xfs_fs.h" 8 + #include "xfs_shared.h" 9 + #include "xfs_format.h" 10 + #include "xfs_trans_resv.h" 11 + #include "xfs_mount.h" 12 + #include "xfs_defer.h" 13 + #include "xfs_btree.h" 14 + #include "xfs_bit.h" 15 + #include "xfs_log_format.h" 16 + #include "xfs_trans.h" 17 + #include "xfs_sb.h" 18 + #include "xfs_inode.h" 19 + #include "xfs_alloc.h" 20 + #include "xfs_ialloc.h" 21 + #include "xfs_rmap.h" 22 + #include "xfs_error.h" 23 + #include "xfs_errortag.h" 24 + #include "xfs_icache.h" 25 + #include "xfs_health.h" 26 + #include "xfs_bmap.h" 27 + #include "scrub/xfs_scrub.h" 28 + #include "scrub/scrub.h" 29 + #include "scrub/common.h" 30 + #include "scrub/trace.h" 31 + 32 + /* 33 + * FS Summary Counters 34 + * =================== 35 + * 36 + * The basics of filesystem summary counter checking are that we iterate the 37 + * AGs counting the number of free blocks, free space btree blocks, per-AG 38 + * reservations, inodes, delayed allocation reservations, and free inodes. 39 + * Then we compare what we computed against the in-core counters. 40 + * 41 + * However, the reality is that summary counters are a tricky beast to check. 42 + * While we /could/ freeze the filesystem and scramble around the AGs counting 43 + * the free blocks, in practice we prefer not do that for a scan because 44 + * freezing is costly. To get around this, we added a per-cpu counter of the 45 + * delalloc reservations so that we can rotor around the AGs relatively 46 + * quickly, and we allow the counts to be slightly off because we're not taking 47 + * any locks while we do this. 48 + * 49 + * So the first thing we do is warm up the buffer cache in the setup routine by 50 + * walking all the AGs to make sure the incore per-AG structure has been 51 + * initialized. The expected value calculation then iterates the incore per-AG 52 + * structures as quickly as it can. We snapshot the percpu counters before and 53 + * after this operation and use the difference in counter values to guess at 54 + * our tolerance for mismatch between expected and actual counter values. 55 + */ 56 + 57 + /* 58 + * Since the expected value computation is lockless but only browses incore 59 + * values, the percpu counters should be fairly close to each other. However, 60 + * we'll allow ourselves to be off by at least this (arbitrary) amount. 61 + */ 62 + #define XCHK_FSCOUNT_MIN_VARIANCE (512) 63 + 64 + /* 65 + * Make sure the per-AG structure has been initialized from the on-disk header 66 + * contents and trust that the incore counters match the ondisk counters. (The 67 + * AGF and AGI scrubbers check them, and a normal xfs_scrub run checks the 68 + * summary counters after checking all AG headers). Do this from the setup 69 + * function so that the inner AG aggregation loop runs as quickly as possible. 70 + * 71 + * This function runs during the setup phase /before/ we start checking any 72 + * metadata. 73 + */ 74 + STATIC int 75 + xchk_fscount_warmup( 76 + struct xfs_scrub *sc) 77 + { 78 + struct xfs_mount *mp = sc->mp; 79 + struct xfs_buf *agi_bp = NULL; 80 + struct xfs_buf *agf_bp = NULL; 81 + struct xfs_perag *pag = NULL; 82 + xfs_agnumber_t agno; 83 + int error = 0; 84 + 85 + for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) { 86 + pag = xfs_perag_get(mp, agno); 87 + 88 + if (pag->pagi_init && pag->pagf_init) 89 + goto next_loop_perag; 90 + 91 + /* Lock both AG headers. */ 92 + error = xfs_ialloc_read_agi(mp, sc->tp, agno, &agi_bp); 93 + if (error) 94 + break; 95 + error = xfs_alloc_read_agf(mp, sc->tp, agno, 0, &agf_bp); 96 + if (error) 97 + break; 98 + error = -ENOMEM; 99 + if (!agf_bp || !agi_bp) 100 + break; 101 + 102 + /* 103 + * These are supposed to be initialized by the header read 104 + * function. 105 + */ 106 + error = -EFSCORRUPTED; 107 + if (!pag->pagi_init || !pag->pagf_init) 108 + break; 109 + 110 + xfs_buf_relse(agf_bp); 111 + agf_bp = NULL; 112 + xfs_buf_relse(agi_bp); 113 + agi_bp = NULL; 114 + next_loop_perag: 115 + xfs_perag_put(pag); 116 + pag = NULL; 117 + error = 0; 118 + 119 + if (fatal_signal_pending(current)) 120 + break; 121 + } 122 + 123 + if (agf_bp) 124 + xfs_buf_relse(agf_bp); 125 + if (agi_bp) 126 + xfs_buf_relse(agi_bp); 127 + if (pag) 128 + xfs_perag_put(pag); 129 + return error; 130 + } 131 + 132 + int 133 + xchk_setup_fscounters( 134 + struct xfs_scrub *sc, 135 + struct xfs_inode *ip) 136 + { 137 + struct xchk_fscounters *fsc; 138 + int error; 139 + 140 + sc->buf = kmem_zalloc(sizeof(struct xchk_fscounters), KM_SLEEP); 141 + if (!sc->buf) 142 + return -ENOMEM; 143 + fsc = sc->buf; 144 + 145 + xfs_icount_range(sc->mp, &fsc->icount_min, &fsc->icount_max); 146 + 147 + /* We must get the incore counters set up before we can proceed. */ 148 + error = xchk_fscount_warmup(sc); 149 + if (error) 150 + return error; 151 + 152 + /* 153 + * Pause background reclaim while we're scrubbing to reduce the 154 + * likelihood of background perturbations to the counters throwing off 155 + * our calculations. 156 + */ 157 + xchk_stop_reaping(sc); 158 + 159 + return xchk_trans_alloc(sc, 0); 160 + } 161 + 162 + /* 163 + * Calculate what the global in-core counters ought to be from the incore 164 + * per-AG structure. Callers can compare this to the actual in-core counters 165 + * to estimate by how much both in-core and on-disk counters need to be 166 + * adjusted. 167 + */ 168 + STATIC int 169 + xchk_fscount_aggregate_agcounts( 170 + struct xfs_scrub *sc, 171 + struct xchk_fscounters *fsc) 172 + { 173 + struct xfs_mount *mp = sc->mp; 174 + struct xfs_perag *pag; 175 + uint64_t delayed; 176 + xfs_agnumber_t agno; 177 + int tries = 8; 178 + 179 + retry: 180 + fsc->icount = 0; 181 + fsc->ifree = 0; 182 + fsc->fdblocks = 0; 183 + 184 + for (agno = 0; agno < mp->m_sb.sb_agcount; agno++) { 185 + pag = xfs_perag_get(mp, agno); 186 + 187 + /* This somehow got unset since the warmup? */ 188 + if (!pag->pagi_init || !pag->pagf_init) { 189 + xfs_perag_put(pag); 190 + return -EFSCORRUPTED; 191 + } 192 + 193 + /* Count all the inodes */ 194 + fsc->icount += pag->pagi_count; 195 + fsc->ifree += pag->pagi_freecount; 196 + 197 + /* Add up the free/freelist/bnobt/cntbt blocks */ 198 + fsc->fdblocks += pag->pagf_freeblks; 199 + fsc->fdblocks += pag->pagf_flcount; 200 + fsc->fdblocks += pag->pagf_btreeblks; 201 + 202 + /* 203 + * Per-AG reservations are taken out of the incore counters, 204 + * so they must be left out of the free blocks computation. 205 + */ 206 + fsc->fdblocks -= pag->pag_meta_resv.ar_reserved; 207 + fsc->fdblocks -= pag->pag_rmapbt_resv.ar_orig_reserved; 208 + 209 + xfs_perag_put(pag); 210 + 211 + if (fatal_signal_pending(current)) 212 + break; 213 + } 214 + 215 + /* 216 + * The global incore space reservation is taken from the incore 217 + * counters, so leave that out of the computation. 218 + */ 219 + fsc->fdblocks -= mp->m_resblks_avail; 220 + 221 + /* 222 + * Delayed allocation reservations are taken out of the incore counters 223 + * but not recorded on disk, so leave them and their indlen blocks out 224 + * of the computation. 225 + */ 226 + delayed = percpu_counter_sum(&mp->m_delalloc_blks); 227 + fsc->fdblocks -= delayed; 228 + 229 + trace_xchk_fscounters_calc(mp, fsc->icount, fsc->ifree, fsc->fdblocks, 230 + delayed); 231 + 232 + 233 + /* Bail out if the values we compute are totally nonsense. */ 234 + if (fsc->icount < fsc->icount_min || fsc->icount > fsc->icount_max || 235 + fsc->fdblocks > mp->m_sb.sb_dblocks || 236 + fsc->ifree > fsc->icount_max) 237 + return -EFSCORRUPTED; 238 + 239 + /* 240 + * If ifree > icount then we probably had some perturbation in the 241 + * counters while we were calculating things. We'll try a few times 242 + * to maintain ifree <= icount before giving up. 243 + */ 244 + if (fsc->ifree > fsc->icount) { 245 + if (tries--) 246 + goto retry; 247 + xchk_set_incomplete(sc); 248 + return 0; 249 + } 250 + 251 + return 0; 252 + } 253 + 254 + /* 255 + * Is the @counter reasonably close to the @expected value? 256 + * 257 + * We neither locked nor froze anything in the filesystem while aggregating the 258 + * per-AG data to compute the @expected value, which means that the counter 259 + * could have changed. We know the @old_value of the summation of the counter 260 + * before the aggregation, and we re-sum the counter now. If the expected 261 + * value falls between the two summations, we're ok. 262 + * 263 + * Otherwise, we /might/ have a problem. If the change in the summations is 264 + * more than we want to tolerate, the filesystem is probably busy and we should 265 + * just send back INCOMPLETE and see if userspace will try again. 266 + */ 267 + static inline bool 268 + xchk_fscount_within_range( 269 + struct xfs_scrub *sc, 270 + const int64_t old_value, 271 + struct percpu_counter *counter, 272 + uint64_t expected) 273 + { 274 + int64_t min_value, max_value; 275 + int64_t curr_value = percpu_counter_sum(counter); 276 + 277 + trace_xchk_fscounters_within_range(sc->mp, expected, curr_value, 278 + old_value); 279 + 280 + /* Negative values are always wrong. */ 281 + if (curr_value < 0) 282 + return false; 283 + 284 + /* Exact matches are always ok. */ 285 + if (curr_value == expected) 286 + return true; 287 + 288 + min_value = min(old_value, curr_value); 289 + max_value = max(old_value, curr_value); 290 + 291 + /* Within the before-and-after range is ok. */ 292 + if (expected >= min_value && expected <= max_value) 293 + return true; 294 + 295 + /* 296 + * If the difference between the two summations is too large, the fs 297 + * might just be busy and so we'll mark the scrub incomplete. Return 298 + * true here so that we don't mark the counter corrupt. 299 + * 300 + * XXX: In the future when userspace can grant scrub permission to 301 + * quiesce the filesystem to solve the outsized variance problem, this 302 + * check should be moved up and the return code changed to signal to 303 + * userspace that we need quiesce permission. 304 + */ 305 + if (max_value - min_value >= XCHK_FSCOUNT_MIN_VARIANCE) { 306 + xchk_set_incomplete(sc); 307 + return true; 308 + } 309 + 310 + return false; 311 + } 312 + 313 + /* Check the superblock counters. */ 314 + int 315 + xchk_fscounters( 316 + struct xfs_scrub *sc) 317 + { 318 + struct xfs_mount *mp = sc->mp; 319 + struct xchk_fscounters *fsc = sc->buf; 320 + int64_t icount, ifree, fdblocks; 321 + int error; 322 + 323 + /* Snapshot the percpu counters. */ 324 + icount = percpu_counter_sum(&mp->m_icount); 325 + ifree = percpu_counter_sum(&mp->m_ifree); 326 + fdblocks = percpu_counter_sum(&mp->m_fdblocks); 327 + 328 + /* No negative values, please! */ 329 + if (icount < 0 || ifree < 0 || fdblocks < 0) 330 + xchk_set_corrupt(sc); 331 + 332 + /* See if icount is obviously wrong. */ 333 + if (icount < fsc->icount_min || icount > fsc->icount_max) 334 + xchk_set_corrupt(sc); 335 + 336 + /* See if fdblocks is obviously wrong. */ 337 + if (fdblocks > mp->m_sb.sb_dblocks) 338 + xchk_set_corrupt(sc); 339 + 340 + /* 341 + * If ifree exceeds icount by more than the minimum variance then 342 + * something's probably wrong with the counters. 343 + */ 344 + if (ifree > icount && ifree - icount > XCHK_FSCOUNT_MIN_VARIANCE) 345 + xchk_set_corrupt(sc); 346 + 347 + /* Walk the incore AG headers to calculate the expected counters. */ 348 + error = xchk_fscount_aggregate_agcounts(sc, fsc); 349 + if (!xchk_process_error(sc, 0, XFS_SB_BLOCK(mp), &error)) 350 + return error; 351 + if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_INCOMPLETE) 352 + return 0; 353 + 354 + /* Compare the in-core counters with whatever we counted. */ 355 + if (!xchk_fscount_within_range(sc, icount, &mp->m_icount, fsc->icount)) 356 + xchk_set_corrupt(sc); 357 + 358 + if (!xchk_fscount_within_range(sc, ifree, &mp->m_ifree, fsc->ifree)) 359 + xchk_set_corrupt(sc); 360 + 361 + if (!xchk_fscount_within_range(sc, fdblocks, &mp->m_fdblocks, 362 + fsc->fdblocks)) 363 + xchk_set_corrupt(sc); 364 + 365 + return 0; 366 + }
+1
fs/xfs/scrub/health.c
··· 109 109 [XFS_SCRUB_TYPE_UQUOTA] = { XHG_FS, XFS_SICK_FS_UQUOTA }, 110 110 [XFS_SCRUB_TYPE_GQUOTA] = { XHG_FS, XFS_SICK_FS_GQUOTA }, 111 111 [XFS_SCRUB_TYPE_PQUOTA] = { XHG_FS, XFS_SICK_FS_PQUOTA }, 112 + [XFS_SCRUB_TYPE_FSCOUNTERS] = { XHG_FS, XFS_SICK_FS_COUNTERS }, 112 113 }; 113 114 114 115 /* Return the health status mask for this scrub type. */
+6
fs/xfs/scrub/scrub.c
··· 352 352 .scrub = xchk_quota, 353 353 .repair = xrep_notsupported, 354 354 }, 355 + [XFS_SCRUB_TYPE_FSCOUNTERS] = { /* fs summary counters */ 356 + .type = ST_FS, 357 + .setup = xchk_setup_fscounters, 358 + .scrub = xchk_fscounters, 359 + .repair = xrep_notsupported, 360 + }, 355 361 }; 356 362 357 363 /* This isn't a stable feature, warn once per day. */
+9
fs/xfs/scrub/scrub.h
··· 127 127 return -ENOENT; 128 128 } 129 129 #endif 130 + int xchk_fscounters(struct xfs_scrub *sc); 130 131 131 132 /* cross-referencing helpers */ 132 133 void xchk_xref_is_used_space(struct xfs_scrub *sc, xfs_agblock_t agbno, ··· 152 151 #else 153 152 # define xchk_xref_is_used_rt_space(sc, rtbno, len) do { } while (0) 154 153 #endif 154 + 155 + struct xchk_fscounters { 156 + uint64_t icount; 157 + uint64_t ifree; 158 + uint64_t fdblocks; 159 + unsigned long long icount_min; 160 + unsigned long long icount_max; 161 + }; 155 162 156 163 #endif /* __XFS_SCRUB_SCRUB_H__ */
+62 -1
fs/xfs/scrub/trace.h
··· 50 50 TRACE_DEFINE_ENUM(XFS_SCRUB_TYPE_UQUOTA); 51 51 TRACE_DEFINE_ENUM(XFS_SCRUB_TYPE_GQUOTA); 52 52 TRACE_DEFINE_ENUM(XFS_SCRUB_TYPE_PQUOTA); 53 + TRACE_DEFINE_ENUM(XFS_SCRUB_TYPE_FSCOUNTERS); 53 54 54 55 #define XFS_SCRUB_TYPE_STRINGS \ 55 56 { XFS_SCRUB_TYPE_PROBE, "probe" }, \ ··· 76 75 { XFS_SCRUB_TYPE_RTSUM, "rtsummary" }, \ 77 76 { XFS_SCRUB_TYPE_UQUOTA, "usrquota" }, \ 78 77 { XFS_SCRUB_TYPE_GQUOTA, "grpquota" }, \ 79 - { XFS_SCRUB_TYPE_PQUOTA, "prjquota" } 78 + { XFS_SCRUB_TYPE_PQUOTA, "prjquota" }, \ 79 + { XFS_SCRUB_TYPE_FSCOUNTERS, "fscounters" } 80 80 81 81 DECLARE_EVENT_CLASS(xchk_class, 82 82 TP_PROTO(struct xfs_inode *ip, struct xfs_scrub_metadata *sm, ··· 225 223 void *ret_ip), \ 226 224 TP_ARGS(sc, daddr, ret_ip)) 227 225 226 + DEFINE_SCRUB_BLOCK_ERROR_EVENT(xchk_fs_error); 228 227 DEFINE_SCRUB_BLOCK_ERROR_EVENT(xchk_block_error); 229 228 DEFINE_SCRUB_BLOCK_ERROR_EVENT(xchk_block_preen); 230 229 ··· 591 588 __entry->cluster_mask, 592 589 __entry->holemask, 593 590 __entry->cluster_ino) 591 + ) 592 + 593 + TRACE_EVENT(xchk_fscounters_calc, 594 + TP_PROTO(struct xfs_mount *mp, uint64_t icount, uint64_t ifree, 595 + uint64_t fdblocks, uint64_t delalloc), 596 + TP_ARGS(mp, icount, ifree, fdblocks, delalloc), 597 + TP_STRUCT__entry( 598 + __field(dev_t, dev) 599 + __field(int64_t, icount_sb) 600 + __field(uint64_t, icount_calculated) 601 + __field(int64_t, ifree_sb) 602 + __field(uint64_t, ifree_calculated) 603 + __field(int64_t, fdblocks_sb) 604 + __field(uint64_t, fdblocks_calculated) 605 + __field(uint64_t, delalloc) 606 + ), 607 + TP_fast_assign( 608 + __entry->dev = mp->m_super->s_dev; 609 + __entry->icount_sb = mp->m_sb.sb_icount; 610 + __entry->icount_calculated = icount; 611 + __entry->ifree_sb = mp->m_sb.sb_ifree; 612 + __entry->ifree_calculated = ifree; 613 + __entry->fdblocks_sb = mp->m_sb.sb_fdblocks; 614 + __entry->fdblocks_calculated = fdblocks; 615 + __entry->delalloc = delalloc; 616 + ), 617 + TP_printk("dev %d:%d icount %lld:%llu ifree %lld::%llu fdblocks %lld::%llu delalloc %llu", 618 + MAJOR(__entry->dev), MINOR(__entry->dev), 619 + __entry->icount_sb, 620 + __entry->icount_calculated, 621 + __entry->ifree_sb, 622 + __entry->ifree_calculated, 623 + __entry->fdblocks_sb, 624 + __entry->fdblocks_calculated, 625 + __entry->delalloc) 626 + ) 627 + 628 + TRACE_EVENT(xchk_fscounters_within_range, 629 + TP_PROTO(struct xfs_mount *mp, uint64_t expected, int64_t curr_value, 630 + int64_t old_value), 631 + TP_ARGS(mp, expected, curr_value, old_value), 632 + TP_STRUCT__entry( 633 + __field(dev_t, dev) 634 + __field(uint64_t, expected) 635 + __field(int64_t, curr_value) 636 + __field(int64_t, old_value) 637 + ), 638 + TP_fast_assign( 639 + __entry->dev = mp->m_super->s_dev; 640 + __entry->expected = expected; 641 + __entry->curr_value = curr_value; 642 + __entry->old_value = old_value; 643 + ), 644 + TP_printk("dev %d:%d expected %llu curr_value %lld old_value %lld", 645 + MAJOR(__entry->dev), MINOR(__entry->dev), 646 + __entry->expected, 647 + __entry->curr_value, 648 + __entry->old_value) 594 649 ) 595 650 596 651 /* repair tracepoints */