Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

writeback: remove bdi->congested_fn

Except for pktdvd, the only places setting congested bits are file
systems that allocate their own backing_dev_info structures. And
pktdvd is a deprecated driver that isn't useful in stack setup
either. So remove the dead congested_fn stacking infrastructure.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Acked-by: Song Liu <song@kernel.org>
Acked-by: David Sterba <dsterba@suse.com>
[axboe: fixup unused variables in bcache/request.c]
Signed-off-by: Jens Axboe <axboe@kernel.dk>

authored by

Christoph Hellwig and committed by
Jens Axboe
21cf8661 8c911f3d

+1 -468
-59
drivers/block/drbd/drbd_main.c
··· 2415 2415 pr_info("module cleanup done.\n"); 2416 2416 } 2417 2417 2418 - /** 2419 - * drbd_congested() - Callback for the flusher thread 2420 - * @congested_data: User data 2421 - * @bdi_bits: Bits the BDI flusher thread is currently interested in 2422 - * 2423 - * Returns 1<<WB_async_congested and/or 1<<WB_sync_congested if we are congested. 2424 - */ 2425 - static int drbd_congested(void *congested_data, int bdi_bits) 2426 - { 2427 - struct drbd_device *device = congested_data; 2428 - struct request_queue *q; 2429 - char reason = '-'; 2430 - int r = 0; 2431 - 2432 - if (!may_inc_ap_bio(device)) { 2433 - /* DRBD has frozen IO */ 2434 - r = bdi_bits; 2435 - reason = 'd'; 2436 - goto out; 2437 - } 2438 - 2439 - if (test_bit(CALLBACK_PENDING, &first_peer_device(device)->connection->flags)) { 2440 - r |= (1 << WB_async_congested); 2441 - /* Without good local data, we would need to read from remote, 2442 - * and that would need the worker thread as well, which is 2443 - * currently blocked waiting for that usermode helper to 2444 - * finish. 2445 - */ 2446 - if (!get_ldev_if_state(device, D_UP_TO_DATE)) 2447 - r |= (1 << WB_sync_congested); 2448 - else 2449 - put_ldev(device); 2450 - r &= bdi_bits; 2451 - reason = 'c'; 2452 - goto out; 2453 - } 2454 - 2455 - if (get_ldev(device)) { 2456 - q = bdev_get_queue(device->ldev->backing_bdev); 2457 - r = bdi_congested(q->backing_dev_info, bdi_bits); 2458 - put_ldev(device); 2459 - if (r) 2460 - reason = 'b'; 2461 - } 2462 - 2463 - if (bdi_bits & (1 << WB_async_congested) && 2464 - test_bit(NET_CONGESTED, &first_peer_device(device)->connection->flags)) { 2465 - r |= (1 << WB_async_congested); 2466 - reason = reason == 'b' ? 'a' : 'n'; 2467 - } 2468 - 2469 - out: 2470 - device->congestion_reason = reason; 2471 - return r; 2472 - } 2473 - 2474 2418 static void drbd_init_workqueue(struct drbd_work_queue* wq) 2475 2419 { 2476 2420 spin_lock_init(&wq->q_lock); ··· 2768 2824 device->this_bdev = bdget(MKDEV(DRBD_MAJOR, minor)); 2769 2825 /* we have no partitions. we contain only ourselves. */ 2770 2826 device->this_bdev->bd_contains = device->this_bdev; 2771 - 2772 - q->backing_dev_info->congested_fn = drbd_congested; 2773 - q->backing_dev_info->congested_data = device; 2774 2827 2775 2828 blk_queue_write_cache(q, true, true); 2776 2829 /* Setting the max_hw_sectors to an odd value of 8kibyte here
-47
drivers/md/bcache/request.c
··· 1228 1228 return __blkdev_driver_ioctl(dc->bdev, mode, cmd, arg); 1229 1229 } 1230 1230 1231 - static int cached_dev_congested(void *data, int bits) 1232 - { 1233 - struct bcache_device *d = data; 1234 - struct cached_dev *dc = container_of(d, struct cached_dev, disk); 1235 - struct request_queue *q = bdev_get_queue(dc->bdev); 1236 - int ret = 0; 1237 - 1238 - if (bdi_congested(q->backing_dev_info, bits)) 1239 - return 1; 1240 - 1241 - if (cached_dev_get(dc)) { 1242 - unsigned int i; 1243 - struct cache *ca; 1244 - 1245 - for_each_cache(ca, d->c, i) { 1246 - q = bdev_get_queue(ca->bdev); 1247 - ret |= bdi_congested(q->backing_dev_info, bits); 1248 - } 1249 - 1250 - cached_dev_put(dc); 1251 - } 1252 - 1253 - return ret; 1254 - } 1255 - 1256 1231 void bch_cached_dev_request_init(struct cached_dev *dc) 1257 1232 { 1258 - struct gendisk *g = dc->disk.disk; 1259 - 1260 - g->queue->backing_dev_info->congested_fn = cached_dev_congested; 1261 1233 dc->disk.cache_miss = cached_dev_cache_miss; 1262 1234 dc->disk.ioctl = cached_dev_ioctl; 1263 1235 } ··· 1313 1341 return -ENOTTY; 1314 1342 } 1315 1343 1316 - static int flash_dev_congested(void *data, int bits) 1317 - { 1318 - struct bcache_device *d = data; 1319 - struct request_queue *q; 1320 - struct cache *ca; 1321 - unsigned int i; 1322 - int ret = 0; 1323 - 1324 - for_each_cache(ca, d->c, i) { 1325 - q = bdev_get_queue(ca->bdev); 1326 - ret |= bdi_congested(q->backing_dev_info, bits); 1327 - } 1328 - 1329 - return ret; 1330 - } 1331 - 1332 1344 void bch_flash_dev_request_init(struct bcache_device *d) 1333 1345 { 1334 - struct gendisk *g = d->disk; 1335 - 1336 - g->queue->backing_dev_info->congested_fn = flash_dev_congested; 1337 1346 d->cache_miss = flash_dev_cache_miss; 1338 1347 d->ioctl = flash_dev_ioctl; 1339 1348 }
-1
drivers/md/bcache/super.c
··· 885 885 return -ENOMEM; 886 886 887 887 d->disk->queue = q; 888 - q->backing_dev_info->congested_data = d; 889 888 q->limits.max_hw_sectors = UINT_MAX; 890 889 q->limits.max_sectors = UINT_MAX; 891 890 q->limits.max_segment_size = UINT_MAX;
-19
drivers/md/dm-cache-target.c
··· 421 421 422 422 struct rw_semaphore quiesce_lock; 423 423 424 - struct dm_target_callbacks callbacks; 425 - 426 424 /* 427 425 * origin_blocks entries, discarded if set. 428 426 */ ··· 2421 2423 cache->cache_size = size; 2422 2424 } 2423 2425 2424 - static int is_congested(struct dm_dev *dev, int bdi_bits) 2425 - { 2426 - struct request_queue *q = bdev_get_queue(dev->bdev); 2427 - return bdi_congested(q->backing_dev_info, bdi_bits); 2428 - } 2429 - 2430 - static int cache_is_congested(struct dm_target_callbacks *cb, int bdi_bits) 2431 - { 2432 - struct cache *cache = container_of(cb, struct cache, callbacks); 2433 - 2434 - return is_congested(cache->origin_dev, bdi_bits) || 2435 - is_congested(cache->cache_dev, bdi_bits); 2436 - } 2437 - 2438 2426 #define DEFAULT_MIGRATION_THRESHOLD 2048 2439 2427 2440 2428 static int cache_create(struct cache_args *ca, struct cache **result) ··· 2454 2470 if (r) 2455 2471 goto bad; 2456 2472 } 2457 - 2458 - cache->callbacks.congested_fn = cache_is_congested; 2459 - dm_table_add_target_callbacks(ti->table, &cache->callbacks); 2460 2473 2461 2474 cache->metadata_dev = ca->metadata_dev; 2462 2475 cache->origin_dev = ca->origin_dev;
-15
drivers/md/dm-clone-target.c
··· 68 68 69 69 struct clone { 70 70 struct dm_target *ti; 71 - struct dm_target_callbacks callbacks; 72 71 73 72 struct dm_dev *metadata_dev; 74 73 struct dm_dev *dest_dev; ··· 1517 1518 DMEMIT("Error"); 1518 1519 } 1519 1520 1520 - static int clone_is_congested(struct dm_target_callbacks *cb, int bdi_bits) 1521 - { 1522 - struct request_queue *dest_q, *source_q; 1523 - struct clone *clone = container_of(cb, struct clone, callbacks); 1524 - 1525 - source_q = bdev_get_queue(clone->source_dev->bdev); 1526 - dest_q = bdev_get_queue(clone->dest_dev->bdev); 1527 - 1528 - return (bdi_congested(dest_q->backing_dev_info, bdi_bits) | 1529 - bdi_congested(source_q->backing_dev_info, bdi_bits)); 1530 - } 1531 - 1532 1521 static sector_t get_dev_size(struct dm_dev *dev) 1533 1522 { 1534 1523 return i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT; ··· 1917 1930 goto out_with_mempool; 1918 1931 1919 1932 mutex_init(&clone->commit_lock); 1920 - clone->callbacks.congested_fn = clone_is_congested; 1921 - dm_table_add_target_callbacks(ti->table, &clone->callbacks); 1922 1933 1923 1934 /* Enable flushes */ 1924 1935 ti->num_flush_bios = 1;
-15
drivers/md/dm-era-target.c
··· 1137 1137 1138 1138 struct era { 1139 1139 struct dm_target *ti; 1140 - struct dm_target_callbacks callbacks; 1141 1140 1142 1141 struct dm_dev *metadata_dev; 1143 1142 struct dm_dev *origin_dev; ··· 1374 1375 /*---------------------------------------------------------------- 1375 1376 * Target methods 1376 1377 *--------------------------------------------------------------*/ 1377 - static int dev_is_congested(struct dm_dev *dev, int bdi_bits) 1378 - { 1379 - struct request_queue *q = bdev_get_queue(dev->bdev); 1380 - return bdi_congested(q->backing_dev_info, bdi_bits); 1381 - } 1382 - 1383 - static int era_is_congested(struct dm_target_callbacks *cb, int bdi_bits) 1384 - { 1385 - struct era *era = container_of(cb, struct era, callbacks); 1386 - return dev_is_congested(era->origin_dev, bdi_bits); 1387 - } 1388 - 1389 1378 static void era_destroy(struct era *era) 1390 1379 { 1391 1380 if (era->md) ··· 1501 1514 ti->flush_supported = true; 1502 1515 1503 1516 ti->num_discard_bios = 1; 1504 - era->callbacks.congested_fn = era_is_congested; 1505 - dm_table_add_target_callbacks(ti->table, &era->callbacks); 1506 1517 1507 1518 return 0; 1508 1519 }
-12
drivers/md/dm-raid.c
··· 242 242 243 243 struct mddev md; 244 244 struct raid_type *raid_type; 245 - struct dm_target_callbacks callbacks; 246 245 247 246 sector_t array_sectors; 248 247 sector_t dev_sectors; ··· 1702 1703 rs_set_capacity(rs); 1703 1704 } 1704 1705 dm_table_event(rs->ti->table); 1705 - } 1706 - 1707 - static int raid_is_congested(struct dm_target_callbacks *cb, int bits) 1708 - { 1709 - struct raid_set *rs = container_of(cb, struct raid_set, callbacks); 1710 - 1711 - return mddev_congested(&rs->md, bits); 1712 1706 } 1713 1707 1714 1708 /* ··· 3240 3248 goto bad_md_start; 3241 3249 } 3242 3250 3243 - rs->callbacks.congested_fn = raid_is_congested; 3244 - dm_table_add_target_callbacks(ti->table, &rs->callbacks); 3245 - 3246 3251 /* If raid4/5/6 journal mode explicitly requested (only possible with journal dev) -> set it */ 3247 3252 if (test_bit(__CTR_FLAG_JOURNAL_MODE, &rs->ctr_flags)) { 3248 3253 r = r5c_journal_mode_set(&rs->md, rs->journal_dev.mode); ··· 3299 3310 { 3300 3311 struct raid_set *rs = ti->private; 3301 3312 3302 - list_del_init(&rs->callbacks.list); 3303 3313 md_stop(&rs->md); 3304 3314 raid_set_free(rs); 3305 3315 }
+1 -36
drivers/md/dm-table.c
··· 64 64 void *event_context; 65 65 66 66 struct dm_md_mempools *mempools; 67 - 68 - struct list_head target_callbacks; 69 67 }; 70 68 71 69 /* ··· 188 190 return -ENOMEM; 189 191 190 192 INIT_LIST_HEAD(&t->devices); 191 - INIT_LIST_HEAD(&t->target_callbacks); 192 193 193 194 if (!num_targets) 194 195 num_targets = KEYS_PER_NODE; ··· 358 361 * This upgrades the mode on an already open dm_dev, being 359 362 * careful to leave things as they were if we fail to reopen the 360 363 * device and not to touch the existing bdev field in case 361 - * it is accessed concurrently inside dm_table_any_congested(). 364 + * it is accessed concurrently. 362 365 */ 363 366 static int upgrade_mode(struct dm_dev_internal *dd, fmode_t new_mode, 364 367 struct mapped_device *md) ··· 2047 2050 } 2048 2051 2049 2052 return 0; 2050 - } 2051 - 2052 - void dm_table_add_target_callbacks(struct dm_table *t, struct dm_target_callbacks *cb) 2053 - { 2054 - list_add(&cb->list, &t->target_callbacks); 2055 - } 2056 - EXPORT_SYMBOL_GPL(dm_table_add_target_callbacks); 2057 - 2058 - int dm_table_any_congested(struct dm_table *t, int bdi_bits) 2059 - { 2060 - struct dm_dev_internal *dd; 2061 - struct list_head *devices = dm_table_get_devices(t); 2062 - struct dm_target_callbacks *cb; 2063 - int r = 0; 2064 - 2065 - list_for_each_entry(dd, devices, list) { 2066 - struct request_queue *q = bdev_get_queue(dd->dm_dev->bdev); 2067 - char b[BDEVNAME_SIZE]; 2068 - 2069 - if (likely(q)) 2070 - r |= bdi_congested(q->backing_dev_info, bdi_bits); 2071 - else 2072 - DMWARN_LIMIT("%s: any_congested: nonexistent device %s", 2073 - dm_device_name(t->md), 2074 - bdevname(dd->dm_dev->bdev, b)); 2075 - } 2076 - 2077 - list_for_each_entry(cb, &t->target_callbacks, list) 2078 - if (cb->congested_fn) 2079 - r |= cb->congested_fn(cb, bdi_bits); 2080 - 2081 - return r; 2082 2053 } 2083 2054 2084 2055 struct mapped_device *dm_table_get_md(struct dm_table *t)
-16
drivers/md/dm-thin.c
··· 326 326 struct pool *pool; 327 327 struct dm_dev *data_dev; 328 328 struct dm_dev *metadata_dev; 329 - struct dm_target_callbacks callbacks; 330 329 331 330 dm_block_t low_water_blocks; 332 331 struct pool_features requested_pf; /* Features requested during table load */ ··· 2795 2796 } 2796 2797 } 2797 2798 2798 - static int pool_is_congested(struct dm_target_callbacks *cb, int bdi_bits) 2799 - { 2800 - struct pool_c *pt = container_of(cb, struct pool_c, callbacks); 2801 - struct request_queue *q; 2802 - 2803 - if (get_pool_mode(pt->pool) == PM_OUT_OF_DATA_SPACE) 2804 - return 1; 2805 - 2806 - q = bdev_get_queue(pt->data_dev->bdev); 2807 - return bdi_congested(q->backing_dev_info, bdi_bits); 2808 - } 2809 - 2810 2799 static void requeue_bios(struct pool *pool) 2811 2800 { 2812 2801 struct thin_c *tc; ··· 3406 3419 3407 3420 dm_pool_register_pre_commit_callback(pool->pmd, 3408 3421 metadata_pre_commit_callback, pool); 3409 - 3410 - pt->callbacks.congested_fn = pool_is_congested; 3411 - dm_table_add_target_callbacks(ti->table, &pt->callbacks); 3412 3422 3413 3423 mutex_unlock(&dm_thin_pool_table.mutex); 3414 3424
-33
drivers/md/dm.c
··· 1821 1821 return ret; 1822 1822 } 1823 1823 1824 - static int dm_any_congested(void *congested_data, int bdi_bits) 1825 - { 1826 - int r = bdi_bits; 1827 - struct mapped_device *md = congested_data; 1828 - struct dm_table *map; 1829 - 1830 - if (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) { 1831 - if (dm_request_based(md)) { 1832 - /* 1833 - * With request-based DM we only need to check the 1834 - * top-level queue for congestion. 1835 - */ 1836 - struct backing_dev_info *bdi = md->queue->backing_dev_info; 1837 - r = bdi->wb.congested & bdi_bits; 1838 - } else { 1839 - map = dm_get_live_table_fast(md); 1840 - if (map) 1841 - r = dm_table_any_congested(map, bdi_bits); 1842 - dm_put_live_table_fast(md); 1843 - } 1844 - } 1845 - 1846 - return r; 1847 - } 1848 - 1849 1824 /*----------------------------------------------------------------- 1850 1825 * An IDR is used to keep track of allocated minor numbers. 1851 1826 *---------------------------------------------------------------*/ ··· 2259 2284 } 2260 2285 EXPORT_SYMBOL_GPL(dm_get_queue_limits); 2261 2286 2262 - static void dm_init_congested_fn(struct mapped_device *md) 2263 - { 2264 - md->queue->backing_dev_info->congested_data = md; 2265 - md->queue->backing_dev_info->congested_fn = dm_any_congested; 2266 - } 2267 - 2268 2287 /* 2269 2288 * Setup the DM device's queue based on md's type 2270 2289 */ ··· 2275 2306 DMERR("Cannot initialize queue for request-based dm-mq mapped device"); 2276 2307 return r; 2277 2308 } 2278 - dm_init_congested_fn(md); 2279 2309 break; 2280 2310 case DM_TYPE_BIO_BASED: 2281 2311 case DM_TYPE_DAX_BIO_BASED: 2282 2312 case DM_TYPE_NVME_BIO_BASED: 2283 - dm_init_congested_fn(md); 2284 2313 break; 2285 2314 case DM_TYPE_NONE: 2286 2315 WARN_ON_ONCE(true);
-1
drivers/md/dm.h
··· 63 63 void dm_table_presuspend_undo_targets(struct dm_table *t); 64 64 void dm_table_postsuspend_targets(struct dm_table *t); 65 65 int dm_table_resume_targets(struct dm_table *t); 66 - int dm_table_any_congested(struct dm_table *t, int bdi_bits); 67 66 enum dm_queue_mode dm_table_get_type(struct dm_table *t); 68 67 struct target_type *dm_table_get_immutable_target_type(struct dm_table *t); 69 68 struct dm_target *dm_table_get_immutable_target(struct dm_table *t);
-24
drivers/md/md-linear.c
··· 46 46 return conf->disks + lo; 47 47 } 48 48 49 - /* 50 - * In linear_congested() conf->raid_disks is used as a copy of 51 - * mddev->raid_disks to iterate conf->disks[], because conf->raid_disks 52 - * and conf->disks[] are created in linear_conf(), they are always 53 - * consitent with each other, but mddev->raid_disks does not. 54 - */ 55 - static int linear_congested(struct mddev *mddev, int bits) 56 - { 57 - struct linear_conf *conf; 58 - int i, ret = 0; 59 - 60 - rcu_read_lock(); 61 - conf = rcu_dereference(mddev->private); 62 - 63 - for (i = 0; i < conf->raid_disks && !ret ; i++) { 64 - struct request_queue *q = bdev_get_queue(conf->disks[i].rdev->bdev); 65 - ret |= bdi_congested(q->backing_dev_info, bits); 66 - } 67 - 68 - rcu_read_unlock(); 69 - return ret; 70 - } 71 - 72 49 static sector_t linear_size(struct mddev *mddev, sector_t sectors, int raid_disks) 73 50 { 74 51 struct linear_conf *conf; ··· 299 322 .hot_add_disk = linear_add, 300 323 .size = linear_size, 301 324 .quiesce = linear_quiesce, 302 - .congested = linear_congested, 303 325 }; 304 326 305 327 static int __init linear_init (void)
-23
drivers/md/md-multipath.c
··· 151 151 seq_putc(seq, ']'); 152 152 } 153 153 154 - static int multipath_congested(struct mddev *mddev, int bits) 155 - { 156 - struct mpconf *conf = mddev->private; 157 - int i, ret = 0; 158 - 159 - rcu_read_lock(); 160 - for (i = 0; i < mddev->raid_disks ; i++) { 161 - struct md_rdev *rdev = rcu_dereference(conf->multipaths[i].rdev); 162 - if (rdev && !test_bit(Faulty, &rdev->flags)) { 163 - struct request_queue *q = bdev_get_queue(rdev->bdev); 164 - 165 - ret |= bdi_congested(q->backing_dev_info, bits); 166 - /* Just like multipath_map, we just check the 167 - * first available device 168 - */ 169 - break; 170 - } 171 - } 172 - rcu_read_unlock(); 173 - return ret; 174 - } 175 - 176 154 /* 177 155 * Careful, this can execute in IRQ contexts as well! 178 156 */ ··· 456 478 .hot_add_disk = multipath_add_disk, 457 479 .hot_remove_disk= multipath_remove_disk, 458 480 .size = multipath_size, 459 - .congested = multipath_congested, 460 481 }; 461 482 462 483 static int __init multipath_init (void)
-23
drivers/md/md.c
··· 549 549 } 550 550 EXPORT_SYMBOL_GPL(mddev_resume); 551 551 552 - int mddev_congested(struct mddev *mddev, int bits) 553 - { 554 - struct md_personality *pers = mddev->pers; 555 - int ret = 0; 556 - 557 - rcu_read_lock(); 558 - if (mddev->suspended) 559 - ret = 1; 560 - else if (pers && pers->congested) 561 - ret = pers->congested(mddev, bits); 562 - rcu_read_unlock(); 563 - return ret; 564 - } 565 - EXPORT_SYMBOL_GPL(mddev_congested); 566 - static int md_congested(void *data, int bits) 567 - { 568 - struct mddev *mddev = data; 569 - return mddev_congested(mddev, bits); 570 - } 571 - 572 552 /* 573 553 * Generic flush handling for md 574 554 */ ··· 5945 5965 blk_queue_flag_set(QUEUE_FLAG_NONROT, mddev->queue); 5946 5966 else 5947 5967 blk_queue_flag_clear(QUEUE_FLAG_NONROT, mddev->queue); 5948 - mddev->queue->backing_dev_info->congested_data = mddev; 5949 - mddev->queue->backing_dev_info->congested_fn = md_congested; 5950 5968 } 5951 5969 if (pers->sync_request) { 5952 5970 if (mddev->kobj.sd && ··· 6329 6351 6330 6352 __md_stop_writes(mddev); 6331 6353 __md_stop(mddev); 6332 - mddev->queue->backing_dev_info->congested_fn = NULL; 6333 6354 6334 6355 /* tell userspace to handle 'inactive' */ 6335 6356 sysfs_notify_dirent_safe(mddev->sysfs_state);
-4
drivers/md/md.h
··· 597 597 * array. 598 598 */ 599 599 void *(*takeover) (struct mddev *mddev); 600 - /* congested implements bdi.congested_fn(). 601 - * Will not be called while array is 'suspended' */ 602 - int (*congested)(struct mddev *mddev, int bits); 603 600 /* Changes the consistency policy of an active array. */ 604 601 int (*change_consistency_policy)(struct mddev *mddev, const char *buf); 605 602 }; ··· 707 710 extern void md_error(struct mddev *mddev, struct md_rdev *rdev); 708 711 extern void md_finish_reshape(struct mddev *mddev); 709 712 710 - extern int mddev_congested(struct mddev *mddev, int bits); 711 713 extern bool __must_check md_flush_request(struct mddev *mddev, struct bio *bio); 712 714 extern void md_super_write(struct mddev *mddev, struct md_rdev *rdev, 713 715 sector_t sector, int size, struct page *page);
-16
drivers/md/raid0.c
··· 29 29 (1L << MD_HAS_PPL) | \ 30 30 (1L << MD_HAS_MULTIPLE_PPLS)) 31 31 32 - static int raid0_congested(struct mddev *mddev, int bits) 33 - { 34 - struct r0conf *conf = mddev->private; 35 - struct md_rdev **devlist = conf->devlist; 36 - int raid_disks = conf->strip_zone[0].nb_dev; 37 - int i, ret = 0; 38 - 39 - for (i = 0; i < raid_disks && !ret ; i++) { 40 - struct request_queue *q = bdev_get_queue(devlist[i]->bdev); 41 - 42 - ret |= bdi_congested(q->backing_dev_info, bits); 43 - } 44 - return ret; 45 - } 46 - 47 32 /* 48 33 * inform the user of the raid configuration 49 34 */ ··· 803 818 .size = raid0_size, 804 819 .takeover = raid0_takeover, 805 820 .quiesce = raid0_quiesce, 806 - .congested = raid0_congested, 807 821 }; 808 822 809 823 static int __init raid0_init (void)
-31
drivers/md/raid1.c
··· 786 786 return best_disk; 787 787 } 788 788 789 - static int raid1_congested(struct mddev *mddev, int bits) 790 - { 791 - struct r1conf *conf = mddev->private; 792 - int i, ret = 0; 793 - 794 - if ((bits & (1 << WB_async_congested)) && 795 - conf->pending_count >= max_queued_requests) 796 - return 1; 797 - 798 - rcu_read_lock(); 799 - for (i = 0; i < conf->raid_disks * 2; i++) { 800 - struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev); 801 - if (rdev && !test_bit(Faulty, &rdev->flags)) { 802 - struct request_queue *q = bdev_get_queue(rdev->bdev); 803 - 804 - BUG_ON(!q); 805 - 806 - /* Note the '|| 1' - when read_balance prefers 807 - * non-congested targets, it can be removed 808 - */ 809 - if ((bits & (1 << WB_async_congested)) || 1) 810 - ret |= bdi_congested(q->backing_dev_info, bits); 811 - else 812 - ret &= bdi_congested(q->backing_dev_info, bits); 813 - } 814 - } 815 - rcu_read_unlock(); 816 - return ret; 817 - } 818 - 819 789 static void flush_bio_list(struct r1conf *conf, struct bio *bio) 820 790 { 821 791 /* flush any pending bitmap writes to disk before proceeding w/ I/O */ ··· 3366 3396 .check_reshape = raid1_reshape, 3367 3397 .quiesce = raid1_quiesce, 3368 3398 .takeover = raid1_takeover, 3369 - .congested = raid1_congested, 3370 3399 }; 3371 3400 3372 3401 static int __init raid_init(void)
-26
drivers/md/raid10.c
··· 848 848 return rdev; 849 849 } 850 850 851 - static int raid10_congested(struct mddev *mddev, int bits) 852 - { 853 - struct r10conf *conf = mddev->private; 854 - int i, ret = 0; 855 - 856 - if ((bits & (1 << WB_async_congested)) && 857 - conf->pending_count >= max_queued_requests) 858 - return 1; 859 - 860 - rcu_read_lock(); 861 - for (i = 0; 862 - (i < conf->geo.raid_disks || i < conf->prev.raid_disks) 863 - && ret == 0; 864 - i++) { 865 - struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev); 866 - if (rdev && !test_bit(Faulty, &rdev->flags)) { 867 - struct request_queue *q = bdev_get_queue(rdev->bdev); 868 - 869 - ret |= bdi_congested(q->backing_dev_info, bits); 870 - } 871 - } 872 - rcu_read_unlock(); 873 - return ret; 874 - } 875 - 876 851 static void flush_pending_writes(struct r10conf *conf) 877 852 { 878 853 /* Any writes that have been queued but are awaiting ··· 4904 4929 .start_reshape = raid10_start_reshape, 4905 4930 .finish_reshape = raid10_finish_reshape, 4906 4931 .update_reshape_pos = raid10_update_reshape_pos, 4907 - .congested = raid10_congested, 4908 4932 }; 4909 4933 4910 4934 static int __init raid_init(void)
-25
drivers/md/raid5.c
··· 5099 5099 } 5100 5100 } 5101 5101 5102 - static int raid5_congested(struct mddev *mddev, int bits) 5103 - { 5104 - struct r5conf *conf = mddev->private; 5105 - 5106 - /* No difference between reads and writes. Just check 5107 - * how busy the stripe_cache is 5108 - */ 5109 - 5110 - if (test_bit(R5_INACTIVE_BLOCKED, &conf->cache_state)) 5111 - return 1; 5112 - 5113 - /* Also checks whether there is pressure on r5cache log space */ 5114 - if (test_bit(R5C_LOG_TIGHT, &conf->cache_state)) 5115 - return 1; 5116 - if (conf->quiesce) 5117 - return 1; 5118 - if (atomic_read(&conf->empty_inactive_list_nr)) 5119 - return 1; 5120 - 5121 - return 0; 5122 - } 5123 - 5124 5102 static int in_chunk_boundary(struct mddev *mddev, struct bio *bio) 5125 5103 { 5126 5104 struct r5conf *conf = mddev->private; ··· 8405 8427 .finish_reshape = raid5_finish_reshape, 8406 8428 .quiesce = raid5_quiesce, 8407 8429 .takeover = raid6_takeover, 8408 - .congested = raid5_congested, 8409 8430 .change_consistency_policy = raid5_change_consistency_policy, 8410 8431 }; 8411 8432 static struct md_personality raid5_personality = ··· 8429 8452 .finish_reshape = raid5_finish_reshape, 8430 8453 .quiesce = raid5_quiesce, 8431 8454 .takeover = raid5_takeover, 8432 - .congested = raid5_congested, 8433 8455 .change_consistency_policy = raid5_change_consistency_policy, 8434 8456 }; 8435 8457 ··· 8454 8478 .finish_reshape = raid5_finish_reshape, 8455 8479 .quiesce = raid5_quiesce, 8456 8480 .takeover = raid4_takeover, 8457 - .congested = raid5_congested, 8458 8481 .change_consistency_policy = raid5_change_consistency_policy, 8459 8482 }; 8460 8483
-23
fs/btrfs/disk-io.c
··· 1616 1616 return ERR_PTR(ret); 1617 1617 } 1618 1618 1619 - static int btrfs_congested_fn(void *congested_data, int bdi_bits) 1620 - { 1621 - struct btrfs_fs_info *info = (struct btrfs_fs_info *)congested_data; 1622 - int ret = 0; 1623 - struct btrfs_device *device; 1624 - struct backing_dev_info *bdi; 1625 - 1626 - rcu_read_lock(); 1627 - list_for_each_entry_rcu(device, &info->fs_devices->devices, dev_list) { 1628 - if (!device->bdev) 1629 - continue; 1630 - bdi = device->bdev->bd_bdi; 1631 - if (bdi_congested(bdi, bdi_bits)) { 1632 - ret = 1; 1633 - break; 1634 - } 1635 - } 1636 - rcu_read_unlock(); 1637 - return ret; 1638 - } 1639 - 1640 1619 /* 1641 1620 * called by the kthread helper functions to finally call the bio end_io 1642 1621 * functions. This is where read checksum verification actually happens ··· 3030 3051 goto fail_sb_buffer; 3031 3052 } 3032 3053 3033 - sb->s_bdi->congested_fn = btrfs_congested_fn; 3034 - sb->s_bdi->congested_data = fs_info; 3035 3054 sb->s_bdi->capabilities |= BDI_CAP_CGROUP_WRITEBACK; 3036 3055 sb->s_bdi->ra_pages = VM_READAHEAD_PAGES; 3037 3056 sb->s_bdi->ra_pages *= btrfs_super_num_devices(disk_super);
-4
include/linux/backing-dev-defs.h
··· 33 33 WB_sync_congested, /* The sync queue is getting full */ 34 34 }; 35 35 36 - typedef int (congested_fn)(void *, int); 37 - 38 36 enum wb_stat_item { 39 37 WB_RECLAIMABLE, 40 38 WB_WRITEBACK, ··· 168 170 struct list_head bdi_list; 169 171 unsigned long ra_pages; /* max readahead in PAGE_SIZE units */ 170 172 unsigned long io_pages; /* max allowed IO size */ 171 - congested_fn *congested_fn; /* Function pointer if device is md/dm */ 172 - void *congested_data; /* Pointer to aux data for congested func */ 173 173 174 174 struct kref refcnt; /* Reference counter for the structure */ 175 175 unsigned int capabilities; /* Device capabilities */
-4
include/linux/backing-dev.h
··· 169 169 170 170 static inline int wb_congested(struct bdi_writeback *wb, int cong_bits) 171 171 { 172 - struct backing_dev_info *bdi = wb->bdi; 173 - 174 - if (bdi->congested_fn) 175 - return bdi->congested_fn(bdi->congested_data, cong_bits); 176 172 return wb->congested & cong_bits; 177 173 } 178 174
-11
include/linux/device-mapper.h
··· 322 322 bool discards_supported:1; 323 323 }; 324 324 325 - /* Each target can link one of these into the table */ 326 - struct dm_target_callbacks { 327 - struct list_head list; 328 - int (*congested_fn) (struct dm_target_callbacks *, int); 329 - }; 330 - 331 325 void *dm_per_bio_data(struct bio *bio, size_t data_size); 332 326 struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size); 333 327 unsigned dm_bio_get_target_bio_nr(const struct bio *bio); ··· 470 476 */ 471 477 int dm_table_add_target(struct dm_table *t, const char *type, 472 478 sector_t start, sector_t len, char *params); 473 - 474 - /* 475 - * Target_ctr should call this if it needs to add any callbacks. 476 - */ 477 - void dm_table_add_target_callbacks(struct dm_table *t, struct dm_target_callbacks *cb); 478 479 479 480 /* 480 481 * Target can use this to set the table's type.