Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

block: fix diskstats access

There are two variants of stat functions - ones prefixed with double
underbars which don't care about preemption and ones without which
disable preemption before manipulating per-cpu counters. It's unclear
whether the underbarred ones assume that preemtion is disabled on
entry as some callers don't do that.

This patch unifies diskstats access by implementing disk_stat_lock()
and disk_stat_unlock() which take care of both RCU (for partition
access) and preemption (for per-cpu counter access). diskstats access
should always be enclosed between the two functions. As such, there's
no need for the versions which disables preemption. They're removed
and double underbars ones are renamed to drop the underbars. As an
extra argument is added, there's no danger of using the old version
unconverted.

disk_stat_lock() uses get_cpu() and returns the cpu index and all
diskstat functions which access per-cpu counters now has @cpu
argument to help RT.

This change adds RCU or preemption operations at some places but also
collapses several preemption ops into one at others. Overall, the
performance difference should be negligible as all involved ops are
very lightweight per-cpu ones.

Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>

authored by

Tejun Heo and committed by
Jens Axboe
c9959059 e71bf0d0

+156 -154
+27 -25
block/blk-core.c
··· 56 56 { 57 57 struct hd_struct *part; 58 58 int rw = rq_data_dir(rq); 59 + int cpu; 59 60 60 61 if (!blk_fs_request(rq) || !rq->rq_disk) 61 62 return; 62 63 63 - rcu_read_lock(); 64 - 64 + cpu = disk_stat_lock(); 65 65 part = disk_map_sector_rcu(rq->rq_disk, rq->sector); 66 + 66 67 if (!new_io) 67 - __all_stat_inc(rq->rq_disk, part, merges[rw], rq->sector); 68 + all_stat_inc(cpu, rq->rq_disk, part, merges[rw], rq->sector); 68 69 else { 69 - disk_round_stats(rq->rq_disk); 70 + disk_round_stats(cpu, rq->rq_disk); 70 71 rq->rq_disk->in_flight++; 71 72 if (part) { 72 - part_round_stats(part); 73 + part_round_stats(cpu, part); 73 74 part->in_flight++; 74 75 } 75 76 } 76 77 77 - rcu_read_unlock(); 78 + disk_stat_unlock(); 78 79 } 79 80 80 81 void blk_queue_congestion_threshold(struct request_queue *q) ··· 998 997 * /proc/diskstats. This accounts immediately for all queue usage up to 999 998 * the current jiffies and restarts the counters again. 1000 999 */ 1001 - void disk_round_stats(struct gendisk *disk) 1000 + void disk_round_stats(int cpu, struct gendisk *disk) 1002 1001 { 1003 1002 unsigned long now = jiffies; 1004 1003 ··· 1006 1005 return; 1007 1006 1008 1007 if (disk->in_flight) { 1009 - __disk_stat_add(disk, time_in_queue, 1010 - disk->in_flight * (now - disk->stamp)); 1011 - __disk_stat_add(disk, io_ticks, (now - disk->stamp)); 1008 + disk_stat_add(cpu, disk, time_in_queue, 1009 + disk->in_flight * (now - disk->stamp)); 1010 + disk_stat_add(cpu, disk, io_ticks, (now - disk->stamp)); 1012 1011 } 1013 1012 disk->stamp = now; 1014 1013 } 1015 1014 EXPORT_SYMBOL_GPL(disk_round_stats); 1016 1015 1017 - void part_round_stats(struct hd_struct *part) 1016 + void part_round_stats(int cpu, struct hd_struct *part) 1018 1017 { 1019 1018 unsigned long now = jiffies; 1020 1019 ··· 1022 1021 return; 1023 1022 1024 1023 if (part->in_flight) { 1025 - __part_stat_add(part, time_in_queue, 1026 - part->in_flight * (now - part->stamp)); 1027 - __part_stat_add(part, io_ticks, (now - part->stamp)); 1024 + part_stat_add(cpu, part, time_in_queue, 1025 + part->in_flight * (now - part->stamp)); 1026 + part_stat_add(cpu, part, io_ticks, (now - part->stamp)); 1028 1027 } 1029 1028 part->stamp = now; 1030 1029 } ··· 1564 1563 if (blk_fs_request(req) && req->rq_disk) { 1565 1564 const int rw = rq_data_dir(req); 1566 1565 struct hd_struct *part; 1566 + int cpu; 1567 1567 1568 - rcu_read_lock(); 1568 + cpu = disk_stat_lock(); 1569 1569 part = disk_map_sector_rcu(req->rq_disk, req->sector); 1570 - all_stat_add(req->rq_disk, part, sectors[rw], 1571 - nr_bytes >> 9, req->sector); 1572 - rcu_read_unlock(); 1570 + all_stat_add(cpu, req->rq_disk, part, sectors[rw], 1571 + nr_bytes >> 9, req->sector); 1572 + disk_stat_unlock(); 1573 1573 } 1574 1574 1575 1575 total_bytes = bio_nbytes = 0; ··· 1755 1753 unsigned long duration = jiffies - req->start_time; 1756 1754 const int rw = rq_data_dir(req); 1757 1755 struct hd_struct *part; 1756 + int cpu; 1758 1757 1759 - rcu_read_lock(); 1760 - 1758 + cpu = disk_stat_lock(); 1761 1759 part = disk_map_sector_rcu(disk, req->sector); 1762 1760 1763 - __all_stat_inc(disk, part, ios[rw], req->sector); 1764 - __all_stat_add(disk, part, ticks[rw], duration, req->sector); 1765 - disk_round_stats(disk); 1761 + all_stat_inc(cpu, disk, part, ios[rw], req->sector); 1762 + all_stat_add(cpu, disk, part, ticks[rw], duration, req->sector); 1763 + disk_round_stats(cpu, disk); 1766 1764 disk->in_flight--; 1767 1765 if (part) { 1768 - part_round_stats(part); 1766 + part_round_stats(cpu, part); 1769 1767 part->in_flight--; 1770 1768 } 1771 1769 1772 - rcu_read_unlock(); 1770 + disk_stat_unlock(); 1773 1771 } 1774 1772 1775 1773 if (req->end_io)
+6 -5
block/blk-merge.c
··· 388 388 389 389 if (req->rq_disk) { 390 390 struct hd_struct *part; 391 + int cpu; 391 392 392 - rcu_read_lock(); 393 - 393 + cpu = disk_stat_lock(); 394 394 part = disk_map_sector_rcu(req->rq_disk, req->sector); 395 - disk_round_stats(req->rq_disk); 395 + 396 + disk_round_stats(cpu, req->rq_disk); 396 397 req->rq_disk->in_flight--; 397 398 if (part) { 398 - part_round_stats(part); 399 + part_round_stats(cpu, part); 399 400 part->in_flight--; 400 401 } 401 402 402 - rcu_read_unlock(); 403 + disk_stat_unlock(); 403 404 } 404 405 405 406 req->ioprio = ioprio_best(req->ioprio, next->ioprio);
+11 -9
block/genhd.c
··· 633 633 struct device_attribute *attr, char *buf) 634 634 { 635 635 struct gendisk *disk = dev_to_disk(dev); 636 + int cpu; 636 637 637 - preempt_disable(); 638 - disk_round_stats(disk); 639 - preempt_enable(); 638 + cpu = disk_stat_lock(); 639 + disk_round_stats(cpu, disk); 640 + disk_stat_unlock(); 640 641 return sprintf(buf, 641 642 "%8lu %8lu %8llu %8u " 642 643 "%8lu %8lu %8llu %8u " ··· 750 749 struct disk_part_iter piter; 751 750 struct hd_struct *hd; 752 751 char buf[BDEVNAME_SIZE]; 752 + int cpu; 753 753 754 754 /* 755 755 if (&gp->dev.kobj.entry == block_class.devices.next) ··· 760 758 "\n\n"); 761 759 */ 762 760 763 - preempt_disable(); 764 - disk_round_stats(gp); 765 - preempt_enable(); 761 + cpu = disk_stat_lock(); 762 + disk_round_stats(cpu, gp); 763 + disk_stat_unlock(); 766 764 seq_printf(seqf, "%4d %4d %s %lu %lu %llu %u %lu %lu %llu %u %u %u %u\n", 767 765 MAJOR(disk_devt(gp)), MINOR(disk_devt(gp)), 768 766 disk_name(gp, 0, buf), ··· 779 777 /* now show all non-0 size partitions of it */ 780 778 disk_part_iter_init(&piter, gp, 0); 781 779 while ((hd = disk_part_iter_next(&piter))) { 782 - preempt_disable(); 783 - part_round_stats(hd); 784 - preempt_enable(); 780 + cpu = disk_stat_lock(); 781 + part_round_stats(cpu, hd); 782 + disk_stat_unlock(); 785 783 seq_printf(seqf, "%4d %4d %s %lu %lu %llu " 786 784 "%u %lu %lu %llu %u %u %u %u\n", 787 785 MAJOR(part_devt(hd)), MINOR(part_devt(hd)),
+8 -7
drivers/block/aoe/aoecmd.c
··· 756 756 unsigned long n_sect = bio->bi_size >> 9; 757 757 const int rw = bio_data_dir(bio); 758 758 struct hd_struct *part; 759 + int cpu; 759 760 760 - rcu_read_lock(); 761 - 761 + cpu = disk_stat_lock(); 762 762 part = disk_map_sector_rcu(disk, sector); 763 - all_stat_inc(disk, part, ios[rw], sector); 764 - all_stat_add(disk, part, ticks[rw], duration, sector); 765 - all_stat_add(disk, part, sectors[rw], n_sect, sector); 766 - all_stat_add(disk, part, io_ticks, duration, sector); 767 763 768 - rcu_read_unlock(); 764 + all_stat_inc(cpu, disk, part, ios[rw], sector); 765 + all_stat_add(cpu, disk, part, ticks[rw], duration, sector); 766 + all_stat_add(cpu, disk, part, sectors[rw], n_sect, sector); 767 + all_stat_add(cpu, disk, part, io_ticks, duration, sector); 768 + 769 + disk_stat_unlock(); 769 770 } 770 771 771 772 void
+15 -11
drivers/md/dm.c
··· 377 377 static void start_io_acct(struct dm_io *io) 378 378 { 379 379 struct mapped_device *md = io->md; 380 + int cpu; 380 381 381 382 io->start_time = jiffies; 382 383 383 - preempt_disable(); 384 - disk_round_stats(dm_disk(md)); 385 - preempt_enable(); 384 + cpu = disk_stat_lock(); 385 + disk_round_stats(cpu, dm_disk(md)); 386 + disk_stat_unlock(); 386 387 dm_disk(md)->in_flight = atomic_inc_return(&md->pending); 387 388 } 388 389 ··· 392 391 struct mapped_device *md = io->md; 393 392 struct bio *bio = io->bio; 394 393 unsigned long duration = jiffies - io->start_time; 395 - int pending; 394 + int pending, cpu; 396 395 int rw = bio_data_dir(bio); 397 396 398 - preempt_disable(); 399 - disk_round_stats(dm_disk(md)); 400 - preempt_enable(); 401 - dm_disk(md)->in_flight = pending = atomic_dec_return(&md->pending); 397 + cpu = disk_stat_lock(); 398 + disk_round_stats(cpu, dm_disk(md)); 399 + disk_stat_add(cpu, dm_disk(md), ticks[rw], duration); 400 + disk_stat_unlock(); 402 401 403 - disk_stat_add(dm_disk(md), ticks[rw], duration); 402 + dm_disk(md)->in_flight = pending = atomic_dec_return(&md->pending); 404 403 405 404 return !pending; 406 405 } ··· 886 885 int r = -EIO; 887 886 int rw = bio_data_dir(bio); 888 887 struct mapped_device *md = q->queuedata; 888 + int cpu; 889 889 890 890 /* 891 891 * There is no use in forwarding any barrier request since we can't ··· 899 897 900 898 down_read(&md->io_lock); 901 899 902 - disk_stat_inc(dm_disk(md), ios[rw]); 903 - disk_stat_add(dm_disk(md), sectors[rw], bio_sectors(bio)); 900 + cpu = disk_stat_lock(); 901 + disk_stat_inc(cpu, dm_disk(md), ios[rw]); 902 + disk_stat_add(cpu, dm_disk(md), sectors[rw], bio_sectors(bio)); 903 + disk_stat_unlock(); 904 904 905 905 /* 906 906 * If we're suspended we have to queue
+5 -2
drivers/md/linear.c
··· 318 318 mddev_t *mddev = q->queuedata; 319 319 dev_info_t *tmp_dev; 320 320 sector_t block; 321 + int cpu; 321 322 322 323 if (unlikely(bio_barrier(bio))) { 323 324 bio_endio(bio, -EOPNOTSUPP); 324 325 return 0; 325 326 } 326 327 327 - disk_stat_inc(mddev->gendisk, ios[rw]); 328 - disk_stat_add(mddev->gendisk, sectors[rw], bio_sectors(bio)); 328 + cpu = disk_stat_lock(); 329 + disk_stat_inc(cpu, mddev->gendisk, ios[rw]); 330 + disk_stat_add(cpu, mddev->gendisk, sectors[rw], bio_sectors(bio)); 331 + disk_stat_unlock(); 329 332 330 333 tmp_dev = which_dev(mddev, bio->bi_sector); 331 334 block = bio->bi_sector >> 1;
+5 -2
drivers/md/multipath.c
··· 147 147 struct multipath_bh * mp_bh; 148 148 struct multipath_info *multipath; 149 149 const int rw = bio_data_dir(bio); 150 + int cpu; 150 151 151 152 if (unlikely(bio_barrier(bio))) { 152 153 bio_endio(bio, -EOPNOTSUPP); ··· 159 158 mp_bh->master_bio = bio; 160 159 mp_bh->mddev = mddev; 161 160 162 - disk_stat_inc(mddev->gendisk, ios[rw]); 163 - disk_stat_add(mddev->gendisk, sectors[rw], bio_sectors(bio)); 161 + cpu = disk_stat_lock(); 162 + disk_stat_inc(cpu, mddev->gendisk, ios[rw]); 163 + disk_stat_add(cpu, mddev->gendisk, sectors[rw], bio_sectors(bio)); 164 + disk_stat_unlock(); 164 165 165 166 mp_bh->path = multipath_map(conf); 166 167 if (mp_bh->path < 0) {
+5 -2
drivers/md/raid0.c
··· 399 399 sector_t chunk; 400 400 sector_t block, rsect; 401 401 const int rw = bio_data_dir(bio); 402 + int cpu; 402 403 403 404 if (unlikely(bio_barrier(bio))) { 404 405 bio_endio(bio, -EOPNOTSUPP); 405 406 return 0; 406 407 } 407 408 408 - disk_stat_inc(mddev->gendisk, ios[rw]); 409 - disk_stat_add(mddev->gendisk, sectors[rw], bio_sectors(bio)); 409 + cpu = disk_stat_lock(); 410 + disk_stat_inc(cpu, mddev->gendisk, ios[rw]); 411 + disk_stat_add(cpu, mddev->gendisk, sectors[rw], bio_sectors(bio)); 412 + disk_stat_unlock(); 410 413 411 414 chunk_size = mddev->chunk_size >> 10; 412 415 chunk_sects = mddev->chunk_size >> 9;
+5 -3
drivers/md/raid1.c
··· 779 779 struct page **behind_pages = NULL; 780 780 const int rw = bio_data_dir(bio); 781 781 const int do_sync = bio_sync(bio); 782 - int do_barriers; 782 + int cpu, do_barriers; 783 783 mdk_rdev_t *blocked_rdev; 784 784 785 785 /* ··· 804 804 805 805 bitmap = mddev->bitmap; 806 806 807 - disk_stat_inc(mddev->gendisk, ios[rw]); 808 - disk_stat_add(mddev->gendisk, sectors[rw], bio_sectors(bio)); 807 + cpu = disk_stat_lock(); 808 + disk_stat_inc(cpu, mddev->gendisk, ios[rw]); 809 + disk_stat_add(cpu, mddev->gendisk, sectors[rw], bio_sectors(bio)); 810 + disk_stat_unlock(); 809 811 810 812 /* 811 813 * make_request() can abort the operation when READA is being
+5 -2
drivers/md/raid10.c
··· 789 789 mirror_info_t *mirror; 790 790 r10bio_t *r10_bio; 791 791 struct bio *read_bio; 792 + int cpu; 792 793 int i; 793 794 int chunk_sects = conf->chunk_mask + 1; 794 795 const int rw = bio_data_dir(bio); ··· 844 843 */ 845 844 wait_barrier(conf); 846 845 847 - disk_stat_inc(mddev->gendisk, ios[rw]); 848 - disk_stat_add(mddev->gendisk, sectors[rw], bio_sectors(bio)); 846 + cpu = disk_stat_lock(); 847 + disk_stat_inc(cpu, mddev->gendisk, ios[rw]); 848 + disk_stat_add(cpu, mddev->gendisk, sectors[rw], bio_sectors(bio)); 849 + disk_stat_unlock(); 849 850 850 851 r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO); 851 852
+5 -3
drivers/md/raid5.c
··· 3387 3387 sector_t logical_sector, last_sector; 3388 3388 struct stripe_head *sh; 3389 3389 const int rw = bio_data_dir(bi); 3390 - int remaining; 3390 + int cpu, remaining; 3391 3391 3392 3392 if (unlikely(bio_barrier(bi))) { 3393 3393 bio_endio(bi, -EOPNOTSUPP); ··· 3396 3396 3397 3397 md_write_start(mddev, bi); 3398 3398 3399 - disk_stat_inc(mddev->gendisk, ios[rw]); 3400 - disk_stat_add(mddev->gendisk, sectors[rw], bio_sectors(bi)); 3399 + cpu = disk_stat_lock(); 3400 + disk_stat_inc(cpu, mddev->gendisk, ios[rw]); 3401 + disk_stat_add(cpu, mddev->gendisk, sectors[rw], bio_sectors(bi)); 3402 + disk_stat_unlock(); 3401 3403 3402 3404 if (rw == READ && 3403 3405 mddev->reshape_position == MaxSector &&
+4 -3
fs/partitions/check.c
··· 219 219 struct device_attribute *attr, char *buf) 220 220 { 221 221 struct hd_struct *p = dev_to_part(dev); 222 + int cpu; 222 223 223 - preempt_disable(); 224 - part_round_stats(p); 225 - preempt_enable(); 224 + cpu = disk_stat_lock(); 225 + part_round_stats(cpu, p); 226 + disk_stat_unlock(); 226 227 return sprintf(buf, 227 228 "%8lu %8lu %8llu %8u " 228 229 "%8lu %8lu %8llu %8u "
+55 -80
include/linux/genhd.h
··· 209 209 extern struct hd_struct *disk_map_sector_rcu(struct gendisk *disk, 210 210 sector_t sector); 211 211 212 - /* 212 + /* 213 213 * Macros to operate on percpu disk statistics: 214 214 * 215 - * The __ variants should only be called in critical sections. The full 216 - * variants disable/enable preemption. 215 + * {disk|part|all}_stat_{add|sub|inc|dec}() modify the stat counters 216 + * and should be called between disk_stat_lock() and 217 + * disk_stat_unlock(). 218 + * 219 + * part_stat_read() can be called at any time. 220 + * 221 + * part_stat_{add|set_all}() and {init|free}_part_stats are for 222 + * internal use only. 217 223 */ 218 - 219 224 #ifdef CONFIG_SMP 220 - #define __disk_stat_add(gendiskp, field, addnd) \ 221 - (per_cpu_ptr(gendiskp->dkstats, smp_processor_id())->field += addnd) 225 + #define disk_stat_lock() ({ rcu_read_lock(); get_cpu(); }) 226 + #define disk_stat_unlock() do { put_cpu(); rcu_read_unlock(); } while (0) 227 + 228 + #define disk_stat_add(cpu, gendiskp, field, addnd) \ 229 + (per_cpu_ptr(gendiskp->dkstats, cpu)->field += addnd) 222 230 223 231 #define disk_stat_read(gendiskp, field) \ 224 232 ({ \ ··· 237 229 res; \ 238 230 }) 239 231 240 - static inline void disk_stat_set_all(struct gendisk *gendiskp, int value) { 232 + static inline void disk_stat_set_all(struct gendisk *gendiskp, int value) 233 + { 241 234 int i; 242 235 243 236 for_each_possible_cpu(i) ··· 246 237 sizeof(struct disk_stats)); 247 238 } 248 239 249 - #define __part_stat_add(part, field, addnd) \ 250 - (per_cpu_ptr(part->dkstats, smp_processor_id())->field += addnd) 240 + #define part_stat_add(cpu, part, field, addnd) \ 241 + (per_cpu_ptr(part->dkstats, cpu)->field += addnd) 251 242 252 - #define __all_stat_add(gendiskp, part, field, addnd, sector) \ 253 - ({ \ 254 - if (part) \ 255 - __part_stat_add(part, field, addnd); \ 256 - __disk_stat_add(gendiskp, field, addnd); \ 243 + #define all_stat_add(cpu, gendiskp, part, field, addnd, sector) \ 244 + ({ \ 245 + if (part) \ 246 + part_stat_add(cpu, part, field, addnd); \ 247 + disk_stat_add(cpu, gendiskp, field, addnd); \ 257 248 }) 258 249 259 250 #define part_stat_read(part, field) \ ··· 273 264 memset(per_cpu_ptr(part->dkstats, i), value, 274 265 sizeof(struct disk_stats)); 275 266 } 276 - 267 + 277 268 #else /* !CONFIG_SMP */ 278 - #define __disk_stat_add(gendiskp, field, addnd) \ 279 - (gendiskp->dkstats.field += addnd) 269 + #define disk_stat_lock() ({ rcu_read_lock(); 0; }) 270 + #define disk_stat_unlock() rcu_read_unlock() 271 + 272 + #define disk_stat_add(cpu, gendiskp, field, addnd) \ 273 + (gendiskp->dkstats.field += addnd) 280 274 #define disk_stat_read(gendiskp, field) (gendiskp->dkstats.field) 281 275 282 276 static inline void disk_stat_set_all(struct gendisk *gendiskp, int value) ··· 287 275 memset(&gendiskp->dkstats, value, sizeof (struct disk_stats)); 288 276 } 289 277 290 - #define __part_stat_add(part, field, addnd) \ 278 + #define part_stat_add(cpu, part, field, addnd) \ 291 279 (part->dkstats.field += addnd) 292 280 293 - #define __all_stat_add(gendiskp, part, field, addnd, sector) \ 294 - ({ \ 295 - if (part) \ 296 - part->dkstats.field += addnd; \ 297 - __disk_stat_add(gendiskp, field, addnd); \ 281 + #define all_stat_add(cpu, gendiskp, part, field, addnd, sector) \ 282 + ({ \ 283 + if (part) \ 284 + part_stat_add(cpu, part, field, addnd); \ 285 + disk_stat_add(cpu, gendiskp, field, addnd); \ 298 286 }) 299 287 300 288 #define part_stat_read(part, field) (part->dkstats.field) ··· 306 294 307 295 #endif /* CONFIG_SMP */ 308 296 309 - #define disk_stat_add(gendiskp, field, addnd) \ 310 - do { \ 311 - preempt_disable(); \ 312 - __disk_stat_add(gendiskp, field, addnd); \ 313 - preempt_enable(); \ 314 - } while (0) 297 + #define disk_stat_dec(cpu, gendiskp, field) \ 298 + disk_stat_add(cpu, gendiskp, field, -1) 299 + #define disk_stat_inc(cpu, gendiskp, field) \ 300 + disk_stat_add(cpu, gendiskp, field, 1) 301 + #define disk_stat_sub(cpu, gendiskp, field, subnd) \ 302 + disk_stat_add(cpu, gendiskp, field, -subnd) 315 303 316 - #define __disk_stat_dec(gendiskp, field) __disk_stat_add(gendiskp, field, -1) 317 - #define disk_stat_dec(gendiskp, field) disk_stat_add(gendiskp, field, -1) 304 + #define part_stat_dec(cpu, gendiskp, field) \ 305 + part_stat_add(cpu, gendiskp, field, -1) 306 + #define part_stat_inc(cpu, gendiskp, field) \ 307 + part_stat_add(cpu, gendiskp, field, 1) 308 + #define part_stat_sub(cpu, gendiskp, field, subnd) \ 309 + part_stat_add(cpu, gendiskp, field, -subnd) 318 310 319 - #define __disk_stat_inc(gendiskp, field) __disk_stat_add(gendiskp, field, 1) 320 - #define disk_stat_inc(gendiskp, field) disk_stat_add(gendiskp, field, 1) 321 - 322 - #define __disk_stat_sub(gendiskp, field, subnd) \ 323 - __disk_stat_add(gendiskp, field, -subnd) 324 - #define disk_stat_sub(gendiskp, field, subnd) \ 325 - disk_stat_add(gendiskp, field, -subnd) 326 - 327 - #define part_stat_add(gendiskp, field, addnd) \ 328 - do { \ 329 - preempt_disable(); \ 330 - __part_stat_add(gendiskp, field, addnd);\ 331 - preempt_enable(); \ 332 - } while (0) 333 - 334 - #define __part_stat_dec(gendiskp, field) __part_stat_add(gendiskp, field, -1) 335 - #define part_stat_dec(gendiskp, field) part_stat_add(gendiskp, field, -1) 336 - 337 - #define __part_stat_inc(gendiskp, field) __part_stat_add(gendiskp, field, 1) 338 - #define part_stat_inc(gendiskp, field) part_stat_add(gendiskp, field, 1) 339 - 340 - #define __part_stat_sub(gendiskp, field, subnd) \ 341 - __part_stat_add(gendiskp, field, -subnd) 342 - #define part_stat_sub(gendiskp, field, subnd) \ 343 - part_stat_add(gendiskp, field, -subnd) 344 - 345 - #define all_stat_add(gendiskp, part, field, addnd, sector) \ 346 - do { \ 347 - preempt_disable(); \ 348 - __all_stat_add(gendiskp, part, field, addnd, sector); \ 349 - preempt_enable(); \ 350 - } while (0) 351 - 352 - #define __all_stat_dec(gendiskp, field, sector) \ 353 - __all_stat_add(gendiskp, field, -1, sector) 354 - #define all_stat_dec(gendiskp, field, sector) \ 355 - all_stat_add(gendiskp, field, -1, sector) 356 - 357 - #define __all_stat_inc(gendiskp, part, field, sector) \ 358 - __all_stat_add(gendiskp, part, field, 1, sector) 359 - #define all_stat_inc(gendiskp, part, field, sector) \ 360 - all_stat_add(gendiskp, part, field, 1, sector) 361 - 362 - #define __all_stat_sub(gendiskp, part, field, subnd, sector) \ 363 - __all_stat_add(gendiskp, part, field, -subnd, sector) 364 - #define all_stat_sub(gendiskp, part, field, subnd, sector) \ 365 - all_stat_add(gendiskp, part, field, -subnd, sector) 311 + #define all_stat_dec(cpu, gendiskp, field, sector) \ 312 + all_stat_add(cpu, gendiskp, field, -1, sector) 313 + #define all_stat_inc(cpu, gendiskp, part, field, sector) \ 314 + all_stat_add(cpu, gendiskp, part, field, 1, sector) 315 + #define all_stat_sub(cpu, gendiskp, part, field, subnd, sector) \ 316 + all_stat_add(cpu, gendiskp, part, field, -subnd, sector) 366 317 367 318 /* Inlines to alloc and free disk stats in struct gendisk */ 368 319 #ifdef CONFIG_SMP ··· 376 401 #endif /* CONFIG_SMP */ 377 402 378 403 /* drivers/block/ll_rw_blk.c */ 379 - extern void disk_round_stats(struct gendisk *disk); 380 - extern void part_round_stats(struct hd_struct *part); 404 + extern void disk_round_stats(int cpu, struct gendisk *disk); 405 + extern void part_round_stats(int cpu, struct hd_struct *part); 381 406 382 407 /* drivers/block/genhd.c */ 383 408 extern int get_blkdev_list(char *, int);