[PATCH] drivers/block/ll_rw_blk.c: cleanups

This patch contains the following cleanups:
- make needlessly global code static
- remove the following unused global functions:
- blkdev_scsi_issue_flush_fn
- __blk_attempt_remerge
- remove the following unused EXPORT_SYMBOL's:
- blk_phys_contig_segment
- blk_hw_contig_segment
- blkdev_scsi_issue_flush_fn
- __blk_attempt_remerge

Signed-off-by: Adrian Bunk <bunk@stusta.de>
Acked-by: Jens Axboe <axboe@suse.de>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>

authored by Adrian Bunk and committed by Linus Torvalds 93d17d3d e8e1c729

+8 -65
+8 -59
drivers/block/ll_rw_blk.c
··· 37 37 38 38 static void blk_unplug_work(void *data); 39 39 static void blk_unplug_timeout(unsigned long data); 40 + static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io); 40 41 41 42 /* 42 43 * For the allocated request tables ··· 1138 1137 } 1139 1138 1140 1139 1141 - int blk_phys_contig_segment(request_queue_t *q, struct bio *bio, 1140 + static int blk_phys_contig_segment(request_queue_t *q, struct bio *bio, 1142 1141 struct bio *nxt) 1143 1142 { 1144 1143 if (!(q->queue_flags & (1 << QUEUE_FLAG_CLUSTER))) ··· 1159 1158 return 0; 1160 1159 } 1161 1160 1162 - EXPORT_SYMBOL(blk_phys_contig_segment); 1163 - 1164 - int blk_hw_contig_segment(request_queue_t *q, struct bio *bio, 1161 + static int blk_hw_contig_segment(request_queue_t *q, struct bio *bio, 1165 1162 struct bio *nxt) 1166 1163 { 1167 1164 if (unlikely(!bio_flagged(bio, BIO_SEG_VALID))) ··· 1174 1175 1175 1176 return 1; 1176 1177 } 1177 - 1178 - EXPORT_SYMBOL(blk_hw_contig_segment); 1179 1178 1180 1179 /* 1181 1180 * map a request to scatterlist, return number of sg entries setup. Caller ··· 1822 1825 * is the behaviour we want though - once it gets a wakeup it should be given 1823 1826 * a nice run. 1824 1827 */ 1825 - void ioc_set_batching(request_queue_t *q, struct io_context *ioc) 1828 + static void ioc_set_batching(request_queue_t *q, struct io_context *ioc) 1826 1829 { 1827 1830 if (!ioc || ioc_batching(q, ioc)) 1828 1831 return; ··· 2251 2254 2252 2255 EXPORT_SYMBOL(blkdev_issue_flush); 2253 2256 2254 - /** 2255 - * blkdev_scsi_issue_flush_fn - issue flush for SCSI devices 2256 - * @q: device queue 2257 - * @disk: gendisk 2258 - * @error_sector: error offset 2259 - * 2260 - * Description: 2261 - * Devices understanding the SCSI command set, can use this function as 2262 - * a helper for issuing a cache flush. Note: driver is required to store 2263 - * the error offset (in case of error flushing) in ->sector of struct 2264 - * request. 2265 - */ 2266 - int blkdev_scsi_issue_flush_fn(request_queue_t *q, struct gendisk *disk, 2267 - sector_t *error_sector) 2268 - { 2269 - struct request *rq = blk_get_request(q, WRITE, __GFP_WAIT); 2270 - int ret; 2271 - 2272 - rq->flags |= REQ_BLOCK_PC | REQ_SOFTBARRIER; 2273 - rq->sector = 0; 2274 - memset(rq->cmd, 0, sizeof(rq->cmd)); 2275 - rq->cmd[0] = 0x35; 2276 - rq->cmd_len = 12; 2277 - rq->data = NULL; 2278 - rq->data_len = 0; 2279 - rq->timeout = 60 * HZ; 2280 - 2281 - ret = blk_execute_rq(q, disk, rq); 2282 - 2283 - if (ret && error_sector) 2284 - *error_sector = rq->sector; 2285 - 2286 - blk_put_request(rq); 2287 - return ret; 2288 - } 2289 - 2290 - EXPORT_SYMBOL(blkdev_scsi_issue_flush_fn); 2291 - 2292 - void drive_stat_acct(struct request *rq, int nr_sectors, int new_io) 2257 + static void drive_stat_acct(struct request *rq, int nr_sectors, int new_io) 2293 2258 { 2294 2259 int rw = rq_data_dir(rq); 2295 2260 ··· 2509 2550 } 2510 2551 2511 2552 EXPORT_SYMBOL(blk_attempt_remerge); 2512 - 2513 - /* 2514 - * Non-locking blk_attempt_remerge variant. 2515 - */ 2516 - void __blk_attempt_remerge(request_queue_t *q, struct request *rq) 2517 - { 2518 - attempt_back_merge(q, rq); 2519 - } 2520 - 2521 - EXPORT_SYMBOL(__blk_attempt_remerge); 2522 2553 2523 2554 static int __make_request(request_queue_t *q, struct bio *bio) 2524 2555 { ··· 2920 2971 2921 2972 EXPORT_SYMBOL(submit_bio); 2922 2973 2923 - void blk_recalc_rq_segments(struct request *rq) 2974 + static void blk_recalc_rq_segments(struct request *rq) 2924 2975 { 2925 2976 struct bio *bio, *prevbio = NULL; 2926 2977 int nr_phys_segs, nr_hw_segs; ··· 2962 3013 rq->nr_hw_segments = nr_hw_segs; 2963 3014 } 2964 3015 2965 - void blk_recalc_rq_sectors(struct request *rq, int nsect) 3016 + static void blk_recalc_rq_sectors(struct request *rq, int nsect) 2966 3017 { 2967 3018 if (blk_fs_request(rq)) { 2968 3019 rq->hard_sector += nsect; ··· 3550 3601 .store = queue_attr_store, 3551 3602 }; 3552 3603 3553 - struct kobj_type queue_ktype = { 3604 + static struct kobj_type queue_ktype = { 3554 3605 .sysfs_ops = &queue_sysfs_ops, 3555 3606 .default_attrs = default_attrs, 3556 3607 };
-6
include/linux/blkdev.h
··· 539 539 extern void blk_put_request(struct request *); 540 540 extern void blk_end_sync_rq(struct request *rq); 541 541 extern void blk_attempt_remerge(request_queue_t *, struct request *); 542 - extern void __blk_attempt_remerge(request_queue_t *, struct request *); 543 542 extern struct request *blk_get_request(request_queue_t *, int, int); 544 543 extern void blk_insert_request(request_queue_t *, struct request *, int, void *); 545 544 extern void blk_requeue_request(request_queue_t *, struct request *); 546 545 extern void blk_plug_device(request_queue_t *); 547 546 extern int blk_remove_plug(request_queue_t *); 548 547 extern void blk_recount_segments(request_queue_t *, struct bio *); 549 - extern int blk_phys_contig_segment(request_queue_t *q, struct bio *, struct bio *); 550 - extern int blk_hw_contig_segment(request_queue_t *q, struct bio *, struct bio *); 551 548 extern int scsi_cmd_ioctl(struct file *, struct gendisk *, unsigned int, void __user *); 552 549 extern void blk_start_queue(request_queue_t *q); 553 550 extern void blk_stop_queue(request_queue_t *q); ··· 628 631 extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); 629 632 extern void blk_queue_ordered(request_queue_t *, int); 630 633 extern void blk_queue_issue_flush_fn(request_queue_t *, issue_flush_fn *); 631 - extern int blkdev_scsi_issue_flush_fn(request_queue_t *, struct gendisk *, sector_t *); 632 634 extern struct request *blk_start_pre_flush(request_queue_t *,struct request *); 633 635 extern int blk_complete_barrier_rq(request_queue_t *, struct request *, int); 634 636 extern int blk_complete_barrier_rq_locked(request_queue_t *, struct request *, int); ··· 670 674 #define MAX_SEGMENT_SIZE 65536 671 675 672 676 #define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist) 673 - 674 - extern void drive_stat_acct(struct request *, int, int); 675 677 676 678 static inline int queue_hardsect_size(request_queue_t *q) 677 679 {