Merge branch 'for-linus' of git://git.kernel.dk/linux-2.6-block

* 'for-linus' of git://git.kernel.dk/linux-2.6-block:
block: kill swap_io_context()
as-iosched: fix inconsistent ioc->lock context
ide-cd: fix leftover data BUG
block: make elevator lib checkpatch compliant
cfq-iosched: make checkpatch compliant
block: make core bits checkpatch compliant
block: new end request handling interface should take unsigned byte counts
unexport add_disk_randomness
block/sunvdc.c:print_version() must be __devinit
splice: always updated atime in direct splice

+219 -253
+6 -18
block/as-iosched.c
··· 170 170 171 171 static void as_trim(struct io_context *ioc) 172 172 { 173 - spin_lock(&ioc->lock); 173 + spin_lock_irq(&ioc->lock); 174 174 if (ioc->aic) 175 175 free_as_io_context(ioc->aic); 176 176 ioc->aic = NULL; 177 - spin_unlock(&ioc->lock); 177 + spin_unlock_irq(&ioc->lock); 178 178 } 179 179 180 180 /* Called when the task exits */ ··· 235 235 aic = RQ_IOC(rq)->aic; 236 236 237 237 if (rq_is_sync(rq) && aic) { 238 - spin_lock(&aic->lock); 238 + unsigned long flags; 239 + 240 + spin_lock_irqsave(&aic->lock, flags); 239 241 set_bit(AS_TASK_IORUNNING, &aic->state); 240 242 aic->last_end_request = jiffies; 241 - spin_unlock(&aic->lock); 243 + spin_unlock_irqrestore(&aic->lock, flags); 242 244 } 243 245 244 246 put_io_context(RQ_IOC(rq)); ··· 1268 1266 */ 1269 1267 if (!list_empty(&req->queuelist) && !list_empty(&next->queuelist)) { 1270 1268 if (time_before(rq_fifo_time(next), rq_fifo_time(req))) { 1271 - struct io_context *rioc = RQ_IOC(req); 1272 - struct io_context *nioc = RQ_IOC(next); 1273 - 1274 1269 list_move(&req->queuelist, &next->queuelist); 1275 1270 rq_set_fifo_time(req, rq_fifo_time(next)); 1276 - /* 1277 - * Don't copy here but swap, because when anext is 1278 - * removed below, it must contain the unused context 1279 - */ 1280 - if (rioc != nioc) { 1281 - double_spin_lock(&rioc->lock, &nioc->lock, 1282 - rioc < nioc); 1283 - swap_io_context(&rioc, &nioc); 1284 - double_spin_unlock(&rioc->lock, &nioc->lock, 1285 - rioc < nioc); 1286 - } 1287 1271 } 1288 1272 } 1289 1273
+2 -3
block/blk-barrier.c
··· 26 26 { 27 27 if (ordered & (QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH) && 28 28 prepare_flush_fn == NULL) { 29 - printk(KERN_ERR "blk_queue_ordered: prepare_flush_fn required\n"); 29 + printk(KERN_ERR "%s: prepare_flush_fn required\n", 30 + __FUNCTION__); 30 31 return -EINVAL; 31 32 } 32 33 ··· 48 47 49 48 return 0; 50 49 } 51 - 52 50 EXPORT_SYMBOL(blk_queue_ordered); 53 51 54 52 /* ··· 315 315 bio_put(bio); 316 316 return ret; 317 317 } 318 - 319 318 EXPORT_SYMBOL(blkdev_issue_flush);
+80 -87
block/blk-core.c
··· 3 3 * Copyright (C) 1994, Karl Keyte: Added support for disk statistics 4 4 * Elevator latency, (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE 5 5 * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de> 6 - * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au> - July2000 6 + * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au> 7 + * - July2000 7 8 * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001 8 9 */ 9 10 ··· 43 42 /* 44 43 * For queue allocation 45 44 */ 46 - struct kmem_cache *blk_requestq_cachep = NULL; 45 + struct kmem_cache *blk_requestq_cachep; 47 46 48 47 /* 49 48 * Controlling structure to kblockd ··· 138 137 error = -EIO; 139 138 140 139 if (unlikely(nbytes > bio->bi_size)) { 141 - printk("%s: want %u bytes done, only %u left\n", 140 + printk(KERN_ERR "%s: want %u bytes done, %u left\n", 142 141 __FUNCTION__, nbytes, bio->bi_size); 143 142 nbytes = bio->bi_size; 144 143 } ··· 162 161 { 163 162 int bit; 164 163 165 - printk("%s: dev %s: type=%x, flags=%x\n", msg, 164 + printk(KERN_INFO "%s: dev %s: type=%x, flags=%x\n", msg, 166 165 rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->cmd_type, 167 166 rq->cmd_flags); 168 167 169 - printk("\nsector %llu, nr/cnr %lu/%u\n", (unsigned long long)rq->sector, 170 - rq->nr_sectors, 171 - rq->current_nr_sectors); 172 - printk("bio %p, biotail %p, buffer %p, data %p, len %u\n", rq->bio, rq->biotail, rq->buffer, rq->data, rq->data_len); 168 + printk(KERN_INFO " sector %llu, nr/cnr %lu/%u\n", 169 + (unsigned long long)rq->sector, 170 + rq->nr_sectors, 171 + rq->current_nr_sectors); 172 + printk(KERN_INFO " bio %p, biotail %p, buffer %p, data %p, len %u\n", 173 + rq->bio, rq->biotail, 174 + rq->buffer, rq->data, 175 + rq->data_len); 173 176 174 177 if (blk_pc_request(rq)) { 175 - printk("cdb: "); 178 + printk(KERN_INFO " cdb: "); 176 179 for (bit = 0; bit < sizeof(rq->cmd); bit++) 177 180 printk("%02x ", rq->cmd[bit]); 178 181 printk("\n"); 179 182 } 180 183 } 181 - 182 184 EXPORT_SYMBOL(blk_dump_rq_flags); 183 185 184 186 /* ··· 208 204 blk_add_trace_generic(q, NULL, 0, BLK_TA_PLUG); 209 205 } 210 206 } 211 - 212 207 EXPORT_SYMBOL(blk_plug_device); 213 208 214 209 /* ··· 224 221 del_timer(&q->unplug_timer); 225 222 return 1; 226 223 } 227 - 228 224 EXPORT_SYMBOL(blk_remove_plug); 229 225 230 226 /* ··· 330 328 kblockd_schedule_work(&q->unplug_work); 331 329 } 332 330 } 333 - 334 331 EXPORT_SYMBOL(blk_start_queue); 335 332 336 333 /** ··· 409 408 } 410 409 EXPORT_SYMBOL(blk_put_queue); 411 410 412 - void blk_cleanup_queue(struct request_queue * q) 411 + void blk_cleanup_queue(struct request_queue *q) 413 412 { 414 413 mutex_lock(&q->sysfs_lock); 415 414 set_bit(QUEUE_FLAG_DEAD, &q->queue_flags); ··· 420 419 421 420 blk_put_queue(q); 422 421 } 423 - 424 422 EXPORT_SYMBOL(blk_cleanup_queue); 425 423 426 424 static int blk_init_free_list(struct request_queue *q) ··· 575 575 576 576 return 1; 577 577 } 578 - 579 578 EXPORT_SYMBOL(blk_get_queue); 580 579 581 580 static inline void blk_free_request(struct request_queue *q, struct request *rq) ··· 773 774 */ 774 775 if (ioc_batching(q, ioc)) 775 776 ioc->nr_batch_requests--; 776 - 777 + 777 778 rq_init(q, rq); 778 779 779 780 blk_add_trace_generic(q, bio, rw, BLK_TA_GETRQ); ··· 887 888 888 889 elv_requeue_request(q, rq); 889 890 } 890 - 891 891 EXPORT_SYMBOL(blk_requeue_request); 892 892 893 893 /** ··· 937 939 blk_start_queueing(q); 938 940 spin_unlock_irqrestore(q->queue_lock, flags); 939 941 } 940 - 941 942 EXPORT_SYMBOL(blk_insert_request); 942 943 943 944 /* ··· 944 947 * queue lock is held and interrupts disabled, as we muck with the 945 948 * request queue list. 946 949 */ 947 - static inline void add_request(struct request_queue * q, struct request * req) 950 + static inline void add_request(struct request_queue *q, struct request *req) 948 951 { 949 952 drive_stat_acct(req, 1); 950 953 ··· 954 957 */ 955 958 __elv_add_request(q, req, ELEVATOR_INSERT_SORT, 0); 956 959 } 957 - 960 + 958 961 /* 959 962 * disk_round_stats() - Round off the performance stats on a struct 960 963 * disk_stats. ··· 984 987 } 985 988 disk->stamp = now; 986 989 } 987 - 988 990 EXPORT_SYMBOL_GPL(disk_round_stats); 989 991 990 992 /* ··· 1013 1017 freed_request(q, rw, priv); 1014 1018 } 1015 1019 } 1016 - 1017 1020 EXPORT_SYMBOL_GPL(__blk_put_request); 1018 1021 1019 1022 void blk_put_request(struct request *req) ··· 1030 1035 spin_unlock_irqrestore(q->queue_lock, flags); 1031 1036 } 1032 1037 } 1033 - 1034 1038 EXPORT_SYMBOL(blk_put_request); 1035 1039 1036 1040 void init_request_from_bio(struct request *req, struct bio *bio) ··· 1090 1096 1091 1097 el_ret = elv_merge(q, &req, bio); 1092 1098 switch (el_ret) { 1093 - case ELEVATOR_BACK_MERGE: 1094 - BUG_ON(!rq_mergeable(req)); 1099 + case ELEVATOR_BACK_MERGE: 1100 + BUG_ON(!rq_mergeable(req)); 1095 1101 1096 - if (!ll_back_merge_fn(q, req, bio)) 1097 - break; 1102 + if (!ll_back_merge_fn(q, req, bio)) 1103 + break; 1098 1104 1099 - blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE); 1105 + blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE); 1100 1106 1101 - req->biotail->bi_next = bio; 1102 - req->biotail = bio; 1103 - req->nr_sectors = req->hard_nr_sectors += nr_sectors; 1104 - req->ioprio = ioprio_best(req->ioprio, prio); 1105 - drive_stat_acct(req, 0); 1106 - if (!attempt_back_merge(q, req)) 1107 - elv_merged_request(q, req, el_ret); 1108 - goto out; 1107 + req->biotail->bi_next = bio; 1108 + req->biotail = bio; 1109 + req->nr_sectors = req->hard_nr_sectors += nr_sectors; 1110 + req->ioprio = ioprio_best(req->ioprio, prio); 1111 + drive_stat_acct(req, 0); 1112 + if (!attempt_back_merge(q, req)) 1113 + elv_merged_request(q, req, el_ret); 1114 + goto out; 1109 1115 1110 - case ELEVATOR_FRONT_MERGE: 1111 - BUG_ON(!rq_mergeable(req)); 1116 + case ELEVATOR_FRONT_MERGE: 1117 + BUG_ON(!rq_mergeable(req)); 1112 1118 1113 - if (!ll_front_merge_fn(q, req, bio)) 1114 - break; 1119 + if (!ll_front_merge_fn(q, req, bio)) 1120 + break; 1115 1121 1116 - blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE); 1122 + blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE); 1117 1123 1118 - bio->bi_next = req->bio; 1119 - req->bio = bio; 1124 + bio->bi_next = req->bio; 1125 + req->bio = bio; 1120 1126 1121 - /* 1122 - * may not be valid. if the low level driver said 1123 - * it didn't need a bounce buffer then it better 1124 - * not touch req->buffer either... 1125 - */ 1126 - req->buffer = bio_data(bio); 1127 - req->current_nr_sectors = bio_cur_sectors(bio); 1128 - req->hard_cur_sectors = req->current_nr_sectors; 1129 - req->sector = req->hard_sector = bio->bi_sector; 1130 - req->nr_sectors = req->hard_nr_sectors += nr_sectors; 1131 - req->ioprio = ioprio_best(req->ioprio, prio); 1132 - drive_stat_acct(req, 0); 1133 - if (!attempt_front_merge(q, req)) 1134 - elv_merged_request(q, req, el_ret); 1135 - goto out; 1127 + /* 1128 + * may not be valid. if the low level driver said 1129 + * it didn't need a bounce buffer then it better 1130 + * not touch req->buffer either... 1131 + */ 1132 + req->buffer = bio_data(bio); 1133 + req->current_nr_sectors = bio_cur_sectors(bio); 1134 + req->hard_cur_sectors = req->current_nr_sectors; 1135 + req->sector = req->hard_sector = bio->bi_sector; 1136 + req->nr_sectors = req->hard_nr_sectors += nr_sectors; 1137 + req->ioprio = ioprio_best(req->ioprio, prio); 1138 + drive_stat_acct(req, 0); 1139 + if (!attempt_front_merge(q, req)) 1140 + elv_merged_request(q, req, el_ret); 1141 + goto out; 1136 1142 1137 - /* ELV_NO_MERGE: elevator says don't/can't merge. */ 1138 - default: 1139 - ; 1143 + /* ELV_NO_MERGE: elevator says don't/can't merge. */ 1144 + default: 1145 + ; 1140 1146 } 1141 1147 1142 1148 get_rq: ··· 1344 1350 } 1345 1351 1346 1352 if (unlikely(nr_sectors > q->max_hw_sectors)) { 1347 - printk("bio too big device %s (%u > %u)\n", 1353 + printk(KERN_ERR "bio too big device %s (%u > %u)\n", 1348 1354 bdevname(bio->bi_bdev, b), 1349 1355 bio_sectors(bio), 1350 1356 q->max_hw_sectors); ··· 1433 1439 } while (bio); 1434 1440 current->bio_tail = NULL; /* deactivate */ 1435 1441 } 1436 - 1437 1442 EXPORT_SYMBOL(generic_make_request); 1438 1443 1439 1444 /** ··· 1473 1480 current->comm, task_pid_nr(current), 1474 1481 (rw & WRITE) ? "WRITE" : "READ", 1475 1482 (unsigned long long)bio->bi_sector, 1476 - bdevname(bio->bi_bdev,b)); 1483 + bdevname(bio->bi_bdev, b)); 1477 1484 } 1478 1485 } 1479 1486 1480 1487 generic_make_request(bio); 1481 1488 } 1482 - 1483 1489 EXPORT_SYMBOL(submit_bio); 1484 1490 1485 1491 /** ··· 1510 1518 if (!blk_pc_request(req)) 1511 1519 req->errors = 0; 1512 1520 1513 - if (error) { 1514 - if (blk_fs_request(req) && !(req->cmd_flags & REQ_QUIET)) 1515 - printk("end_request: I/O error, dev %s, sector %llu\n", 1521 + if (error && (blk_fs_request(req) && !(req->cmd_flags & REQ_QUIET))) { 1522 + printk(KERN_ERR "end_request: I/O error, dev %s, sector %llu\n", 1516 1523 req->rq_disk ? req->rq_disk->disk_name : "?", 1517 1524 (unsigned long long)req->sector); 1518 1525 } ··· 1545 1554 1546 1555 if (unlikely(bio->bi_idx >= bio->bi_vcnt)) { 1547 1556 blk_dump_rq_flags(req, "__end_that"); 1548 - printk("%s: bio idx %d >= vcnt %d\n", 1549 - __FUNCTION__, 1550 - bio->bi_idx, bio->bi_vcnt); 1557 + printk(KERN_ERR "%s: bio idx %d >= vcnt %d\n", 1558 + __FUNCTION__, bio->bi_idx, 1559 + bio->bi_vcnt); 1551 1560 break; 1552 1561 } 1553 1562 ··· 1573 1582 total_bytes += nbytes; 1574 1583 nr_bytes -= nbytes; 1575 1584 1576 - if ((bio = req->bio)) { 1585 + bio = req->bio; 1586 + if (bio) { 1577 1587 /* 1578 1588 * end more in this run, or just return 'not-done' 1579 1589 */ ··· 1618 1626 local_irq_enable(); 1619 1627 1620 1628 while (!list_empty(&local_list)) { 1621 - struct request *rq = list_entry(local_list.next, struct request, donelist); 1629 + struct request *rq; 1622 1630 1631 + rq = list_entry(local_list.next, struct request, donelist); 1623 1632 list_del_init(&rq->donelist); 1624 1633 rq->q->softirq_done_fn(rq); 1625 1634 } 1626 1635 } 1627 1636 1628 - static int __cpuinit blk_cpu_notify(struct notifier_block *self, unsigned long action, 1629 - void *hcpu) 1637 + static int __cpuinit blk_cpu_notify(struct notifier_block *self, 1638 + unsigned long action, void *hcpu) 1630 1639 { 1631 1640 /* 1632 1641 * If a CPU goes away, splice its entries to the current CPU ··· 1669 1676 unsigned long flags; 1670 1677 1671 1678 BUG_ON(!req->q->softirq_done_fn); 1672 - 1679 + 1673 1680 local_irq_save(flags); 1674 1681 1675 1682 cpu_list = &__get_cpu_var(blk_cpu_done); ··· 1678 1685 1679 1686 local_irq_restore(flags); 1680 1687 } 1681 - 1682 1688 EXPORT_SYMBOL(blk_complete_request); 1683 - 1689 + 1684 1690 /* 1685 1691 * queue lock must be held 1686 1692 */ ··· 1838 1846 * 0 - we are done with this request 1839 1847 * 1 - this request is not freed yet, it still has pending buffers. 1840 1848 **/ 1841 - static int blk_end_io(struct request *rq, int error, int nr_bytes, 1842 - int bidi_bytes, int (drv_callback)(struct request *)) 1849 + static int blk_end_io(struct request *rq, int error, unsigned int nr_bytes, 1850 + unsigned int bidi_bytes, 1851 + int (drv_callback)(struct request *)) 1843 1852 { 1844 1853 struct request_queue *q = rq->q; 1845 1854 unsigned long flags = 0UL; ··· 1882 1889 * 0 - we are done with this request 1883 1890 * 1 - still buffers pending for this request 1884 1891 **/ 1885 - int blk_end_request(struct request *rq, int error, int nr_bytes) 1892 + int blk_end_request(struct request *rq, int error, unsigned int nr_bytes) 1886 1893 { 1887 1894 return blk_end_io(rq, error, nr_bytes, 0, NULL); 1888 1895 } ··· 1901 1908 * 0 - we are done with this request 1902 1909 * 1 - still buffers pending for this request 1903 1910 **/ 1904 - int __blk_end_request(struct request *rq, int error, int nr_bytes) 1911 + int __blk_end_request(struct request *rq, int error, unsigned int nr_bytes) 1905 1912 { 1906 1913 if (blk_fs_request(rq) || blk_pc_request(rq)) { 1907 1914 if (__end_that_request_first(rq, error, nr_bytes)) ··· 1930 1937 * 0 - we are done with this request 1931 1938 * 1 - still buffers pending for this request 1932 1939 **/ 1933 - int blk_end_bidi_request(struct request *rq, int error, int nr_bytes, 1934 - int bidi_bytes) 1940 + int blk_end_bidi_request(struct request *rq, int error, unsigned int nr_bytes, 1941 + unsigned int bidi_bytes) 1935 1942 { 1936 1943 return blk_end_io(rq, error, nr_bytes, bidi_bytes, NULL); 1937 1944 } ··· 1962 1969 * this request still has pending buffers or 1963 1970 * the driver doesn't want to finish this request yet. 1964 1971 **/ 1965 - int blk_end_request_callback(struct request *rq, int error, int nr_bytes, 1972 + int blk_end_request_callback(struct request *rq, int error, 1973 + unsigned int nr_bytes, 1966 1974 int (drv_callback)(struct request *)) 1967 1975 { 1968 1976 return blk_end_io(rq, error, nr_bytes, 0, drv_callback); ··· 1994 2000 { 1995 2001 return queue_work(kblockd_workqueue, work); 1996 2002 } 1997 - 1998 2003 EXPORT_SYMBOL(kblockd_schedule_work); 1999 2004 2000 2005 void kblockd_flush_work(struct work_struct *work)
-1
block/blk-exec.c
··· 101 101 102 102 return err; 103 103 } 104 - 105 104 EXPORT_SYMBOL(blk_execute_rq);
-9
block/blk-ioc.c
··· 176 176 } 177 177 EXPORT_SYMBOL(copy_io_context); 178 178 179 - void swap_io_context(struct io_context **ioc1, struct io_context **ioc2) 180 - { 181 - struct io_context *temp; 182 - temp = *ioc1; 183 - *ioc1 = *ioc2; 184 - *ioc2 = temp; 185 - } 186 - EXPORT_SYMBOL(swap_io_context); 187 - 188 179 int __init blk_ioc_init(void) 189 180 { 190 181 iocontext_cachep = kmem_cache_create("blkdev_ioc",
+4 -6
block/blk-map.c
··· 53 53 * direct dma. else, set up kernel bounce buffers 54 54 */ 55 55 uaddr = (unsigned long) ubuf; 56 - if (!(uaddr & queue_dma_alignment(q)) && !(len & queue_dma_alignment(q))) 56 + if (!(uaddr & queue_dma_alignment(q)) && 57 + !(len & queue_dma_alignment(q))) 57 58 bio = bio_map_user(q, NULL, uaddr, len, reading); 58 59 else 59 60 bio = bio_copy_user(q, uaddr, len, reading); ··· 145 144 blk_rq_unmap_user(bio); 146 145 return ret; 147 146 } 148 - 149 147 EXPORT_SYMBOL(blk_rq_map_user); 150 148 151 149 /** ··· 179 179 /* we don't allow misaligned data like bio_map_user() does. If the 180 180 * user is using sg, they're expected to know the alignment constraints 181 181 * and respect them accordingly */ 182 - bio = bio_map_user_iov(q, NULL, iov, iov_count, rq_data_dir(rq)== READ); 182 + bio = bio_map_user_iov(q, NULL, iov, iov_count, 183 + rq_data_dir(rq) == READ); 183 184 if (IS_ERR(bio)) 184 185 return PTR_ERR(bio); 185 186 ··· 195 194 rq->buffer = rq->data = NULL; 196 195 return 0; 197 196 } 198 - 199 197 EXPORT_SYMBOL(blk_rq_map_user_iov); 200 198 201 199 /** ··· 227 227 228 228 return ret; 229 229 } 230 - 231 230 EXPORT_SYMBOL(blk_rq_unmap_user); 232 231 233 232 /** ··· 259 260 rq->buffer = rq->data = NULL; 260 261 return 0; 261 262 } 262 - 263 263 EXPORT_SYMBOL(blk_rq_map_kern);
+6 -6
block/blk-merge.c
··· 32 32 * size, something has gone terribly wrong 33 33 */ 34 34 if (rq->nr_sectors < rq->current_nr_sectors) { 35 - printk("blk: request botched\n"); 35 + printk(KERN_ERR "blk: request botched\n"); 36 36 rq->nr_sectors = rq->current_nr_sectors; 37 37 } 38 38 } ··· 235 235 236 236 return nsegs; 237 237 } 238 - 239 238 EXPORT_SYMBOL(blk_rq_map_sg); 240 239 241 240 static inline int ll_new_mergeable(struct request_queue *q, ··· 304 305 if (unlikely(!bio_flagged(bio, BIO_SEG_VALID))) 305 306 blk_recount_segments(q, bio); 306 307 len = req->biotail->bi_hw_back_size + bio->bi_hw_front_size; 307 - if (BIOVEC_VIRT_MERGEABLE(__BVEC_END(req->biotail), __BVEC_START(bio)) && 308 - !BIOVEC_VIRT_OVERSIZE(len)) { 308 + if (BIOVEC_VIRT_MERGEABLE(__BVEC_END(req->biotail), __BVEC_START(bio)) 309 + && !BIOVEC_VIRT_OVERSIZE(len)) { 309 310 int mergeable = ll_new_mergeable(q, req, bio); 310 311 311 312 if (mergeable) { ··· 320 321 return ll_new_hw_segment(q, req, bio); 321 322 } 322 323 323 - int ll_front_merge_fn(struct request_queue *q, struct request *req, 324 + int ll_front_merge_fn(struct request_queue *q, struct request *req, 324 325 struct bio *bio) 325 326 { 326 327 unsigned short max_sectors; ··· 387 388 388 389 total_hw_segments = req->nr_hw_segments + next->nr_hw_segments; 389 390 if (blk_hw_contig_segment(q, req->biotail, next->bio)) { 390 - int len = req->biotail->bi_hw_back_size + next->bio->bi_hw_front_size; 391 + int len = req->biotail->bi_hw_back_size + 392 + next->bio->bi_hw_front_size; 391 393 /* 392 394 * propagate the combined length to the end of the requests 393 395 */
+27 -34
block/blk-settings.c
··· 10 10 11 11 #include "blk.h" 12 12 13 - unsigned long blk_max_low_pfn, blk_max_pfn; 13 + unsigned long blk_max_low_pfn; 14 14 EXPORT_SYMBOL(blk_max_low_pfn); 15 + 16 + unsigned long blk_max_pfn; 15 17 EXPORT_SYMBOL(blk_max_pfn); 16 18 17 19 /** ··· 31 29 { 32 30 q->prep_rq_fn = pfn; 33 31 } 34 - 35 32 EXPORT_SYMBOL(blk_queue_prep_rq); 36 33 37 34 /** ··· 53 52 { 54 53 q->merge_bvec_fn = mbfn; 55 54 } 56 - 57 55 EXPORT_SYMBOL(blk_queue_merge_bvec); 58 56 59 57 void blk_queue_softirq_done(struct request_queue *q, softirq_done_fn *fn) 60 58 { 61 59 q->softirq_done_fn = fn; 62 60 } 63 - 64 61 EXPORT_SYMBOL(blk_queue_softirq_done); 65 62 66 63 /** ··· 83 84 * __bio_kmap_atomic() to get a temporary kernel mapping, or by calling 84 85 * blk_queue_bounce() to create a buffer in normal memory. 85 86 **/ 86 - void blk_queue_make_request(struct request_queue * q, make_request_fn * mfn) 87 + void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn) 87 88 { 88 89 /* 89 90 * set defaults ··· 92 93 blk_queue_max_phys_segments(q, MAX_PHYS_SEGMENTS); 93 94 blk_queue_max_hw_segments(q, MAX_HW_SEGMENTS); 94 95 q->make_request_fn = mfn; 95 - q->backing_dev_info.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE; 96 + q->backing_dev_info.ra_pages = 97 + (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE; 96 98 q->backing_dev_info.state = 0; 97 99 q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY; 98 100 blk_queue_max_sectors(q, SAFE_MAX_SECTORS); ··· 117 117 */ 118 118 blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH); 119 119 } 120 - 121 120 EXPORT_SYMBOL(blk_queue_make_request); 122 121 123 122 /** ··· 132 133 **/ 133 134 void blk_queue_bounce_limit(struct request_queue *q, u64 dma_addr) 134 135 { 135 - unsigned long bounce_pfn = dma_addr >> PAGE_SHIFT; 136 + unsigned long b_pfn = dma_addr >> PAGE_SHIFT; 136 137 int dma = 0; 137 138 138 139 q->bounce_gfp = GFP_NOIO; ··· 140 141 /* Assume anything <= 4GB can be handled by IOMMU. 141 142 Actually some IOMMUs can handle everything, but I don't 142 143 know of a way to test this here. */ 143 - if (bounce_pfn < (min_t(u64,0xffffffff,BLK_BOUNCE_HIGH) >> PAGE_SHIFT)) 144 + if (b_pfn < (min_t(u64, 0xffffffff, BLK_BOUNCE_HIGH) >> PAGE_SHIFT)) 144 145 dma = 1; 145 146 q->bounce_pfn = max_low_pfn; 146 147 #else 147 - if (bounce_pfn < blk_max_low_pfn) 148 + if (b_pfn < blk_max_low_pfn) 148 149 dma = 1; 149 - q->bounce_pfn = bounce_pfn; 150 + q->bounce_pfn = b_pfn; 150 151 #endif 151 152 if (dma) { 152 153 init_emergency_isa_pool(); 153 154 q->bounce_gfp = GFP_NOIO | GFP_DMA; 154 - q->bounce_pfn = bounce_pfn; 155 + q->bounce_pfn = b_pfn; 155 156 } 156 157 } 157 - 158 158 EXPORT_SYMBOL(blk_queue_bounce_limit); 159 159 160 160 /** ··· 169 171 { 170 172 if ((max_sectors << 9) < PAGE_CACHE_SIZE) { 171 173 max_sectors = 1 << (PAGE_CACHE_SHIFT - 9); 172 - printk("%s: set to minimum %d\n", __FUNCTION__, max_sectors); 174 + printk(KERN_INFO "%s: set to minimum %d\n", __FUNCTION__, 175 + max_sectors); 173 176 } 174 177 175 178 if (BLK_DEF_MAX_SECTORS > max_sectors) ··· 180 181 q->max_hw_sectors = max_sectors; 181 182 } 182 183 } 183 - 184 184 EXPORT_SYMBOL(blk_queue_max_sectors); 185 185 186 186 /** ··· 197 199 { 198 200 if (!max_segments) { 199 201 max_segments = 1; 200 - printk("%s: set to minimum %d\n", __FUNCTION__, max_segments); 202 + printk(KERN_INFO "%s: set to minimum %d\n", __FUNCTION__, 203 + max_segments); 201 204 } 202 205 203 206 q->max_phys_segments = max_segments; 204 207 } 205 - 206 208 EXPORT_SYMBOL(blk_queue_max_phys_segments); 207 209 208 210 /** ··· 221 223 { 222 224 if (!max_segments) { 223 225 max_segments = 1; 224 - printk("%s: set to minimum %d\n", __FUNCTION__, max_segments); 226 + printk(KERN_INFO "%s: set to minimum %d\n", __FUNCTION__, 227 + max_segments); 225 228 } 226 229 227 230 q->max_hw_segments = max_segments; 228 231 } 229 - 230 232 EXPORT_SYMBOL(blk_queue_max_hw_segments); 231 233 232 234 /** ··· 242 244 { 243 245 if (max_size < PAGE_CACHE_SIZE) { 244 246 max_size = PAGE_CACHE_SIZE; 245 - printk("%s: set to minimum %d\n", __FUNCTION__, max_size); 247 + printk(KERN_INFO "%s: set to minimum %d\n", __FUNCTION__, 248 + max_size); 246 249 } 247 250 248 251 q->max_segment_size = max_size; 249 252 } 250 - 251 253 EXPORT_SYMBOL(blk_queue_max_segment_size); 252 254 253 255 /** ··· 265 267 { 266 268 q->hardsect_size = size; 267 269 } 268 - 269 270 EXPORT_SYMBOL(blk_queue_hardsect_size); 270 271 271 272 /* ··· 280 283 void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b) 281 284 { 282 285 /* zero is "infinity" */ 283 - t->max_sectors = min_not_zero(t->max_sectors,b->max_sectors); 284 - t->max_hw_sectors = min_not_zero(t->max_hw_sectors,b->max_hw_sectors); 286 + t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors); 287 + t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors); 285 288 286 - t->max_phys_segments = min(t->max_phys_segments,b->max_phys_segments); 287 - t->max_hw_segments = min(t->max_hw_segments,b->max_hw_segments); 288 - t->max_segment_size = min(t->max_segment_size,b->max_segment_size); 289 - t->hardsect_size = max(t->hardsect_size,b->hardsect_size); 289 + t->max_phys_segments = min(t->max_phys_segments, b->max_phys_segments); 290 + t->max_hw_segments = min(t->max_hw_segments, b->max_hw_segments); 291 + t->max_segment_size = min(t->max_segment_size, b->max_segment_size); 292 + t->hardsect_size = max(t->hardsect_size, b->hardsect_size); 290 293 if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) 291 294 clear_bit(QUEUE_FLAG_CLUSTER, &t->queue_flags); 292 295 } 293 - 294 296 EXPORT_SYMBOL(blk_queue_stack_limits); 295 297 296 298 /** ··· 328 332 329 333 return 0; 330 334 } 331 - 332 335 EXPORT_SYMBOL_GPL(blk_queue_dma_drain); 333 336 334 337 /** ··· 339 344 { 340 345 if (mask < PAGE_CACHE_SIZE - 1) { 341 346 mask = PAGE_CACHE_SIZE - 1; 342 - printk("%s: set to minimum %lx\n", __FUNCTION__, mask); 347 + printk(KERN_INFO "%s: set to minimum %lx\n", __FUNCTION__, 348 + mask); 343 349 } 344 350 345 351 q->seg_boundary_mask = mask; 346 352 } 347 - 348 353 EXPORT_SYMBOL(blk_queue_segment_boundary); 349 354 350 355 /** ··· 361 366 { 362 367 q->dma_alignment = mask; 363 368 } 364 - 365 369 EXPORT_SYMBOL(blk_queue_dma_alignment); 366 370 367 371 /** ··· 384 390 if (mask > q->dma_alignment) 385 391 q->dma_alignment = mask; 386 392 } 387 - 388 393 EXPORT_SYMBOL(blk_queue_update_dma_alignment); 389 394 390 395 int __init blk_settings_init(void)
+3 -2
block/blk-sysfs.c
··· 207 207 const char *page, size_t length) 208 208 { 209 209 struct queue_sysfs_entry *entry = to_queue(attr); 210 - struct request_queue *q = container_of(kobj, struct request_queue, kobj); 211 - 210 + struct request_queue *q; 212 211 ssize_t res; 213 212 214 213 if (!entry->store) 215 214 return -EIO; 215 + 216 + q = container_of(kobj, struct request_queue, kobj); 216 217 mutex_lock(&q->sysfs_lock); 217 218 if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)) { 218 219 mutex_unlock(&q->sysfs_lock);
+3 -9
block/blk-tag.c
··· 21 21 { 22 22 return blk_map_queue_find_tag(q->queue_tags, tag); 23 23 } 24 - 25 24 EXPORT_SYMBOL(blk_queue_find_tag); 26 25 27 26 /** ··· 98 99 { 99 100 clear_bit(QUEUE_FLAG_QUEUED, &q->queue_flags); 100 101 } 101 - 102 102 EXPORT_SYMBOL(blk_queue_free_tags); 103 103 104 104 static int ··· 183 185 if (!tags) 184 186 goto fail; 185 187 } else if (q->queue_tags) { 186 - if ((rc = blk_queue_resize_tags(q, depth))) 188 + rc = blk_queue_resize_tags(q, depth); 189 + if (rc) 187 190 return rc; 188 191 set_bit(QUEUE_FLAG_QUEUED, &q->queue_flags); 189 192 return 0; ··· 202 203 kfree(tags); 203 204 return -ENOMEM; 204 205 } 205 - 206 206 EXPORT_SYMBOL(blk_queue_init_tags); 207 207 208 208 /** ··· 258 260 kfree(tag_map); 259 261 return 0; 260 262 } 261 - 262 263 EXPORT_SYMBOL(blk_queue_resize_tags); 263 264 264 265 /** ··· 310 313 clear_bit_unlock(tag, bqt->tag_map); 311 314 bqt->busy--; 312 315 } 313 - 314 316 EXPORT_SYMBOL(blk_queue_end_tag); 315 317 316 318 /** ··· 336 340 int tag; 337 341 338 342 if (unlikely((rq->cmd_flags & REQ_QUEUED))) { 339 - printk(KERN_ERR 343 + printk(KERN_ERR 340 344 "%s: request %p for device [%s] already tagged %d", 341 345 __FUNCTION__, rq, 342 346 rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->tag); ··· 366 370 bqt->busy++; 367 371 return 0; 368 372 } 369 - 370 373 EXPORT_SYMBOL(blk_queue_start_tag); 371 374 372 375 /** ··· 387 392 list_for_each_safe(tmp, n, &q->tag_busy_list) 388 393 blk_requeue_request(q, list_entry_rq(tmp)); 389 394 } 390 - 391 395 EXPORT_SYMBOL(blk_queue_invalidate_tags);
+46 -37
block/cfq-iosched.c
··· 15 15 /* 16 16 * tunables 17 17 */ 18 - static const int cfq_quantum = 4; /* max queue in one round of service */ 18 + /* max queue in one round of service */ 19 + static const int cfq_quantum = 4; 19 20 static const int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 }; 20 - static const int cfq_back_max = 16 * 1024; /* maximum backwards seek, in KiB */ 21 - static const int cfq_back_penalty = 2; /* penalty of a backwards seek */ 22 - 21 + /* maximum backwards seek, in KiB */ 22 + static const int cfq_back_max = 16 * 1024; 23 + /* penalty of a backwards seek */ 24 + static const int cfq_back_penalty = 2; 23 25 static const int cfq_slice_sync = HZ / 10; 24 26 static int cfq_slice_async = HZ / 25; 25 27 static const int cfq_slice_async_rq = 2; ··· 39 37 40 38 #define CFQ_SLICE_SCALE (5) 41 39 42 - #define RQ_CIC(rq) ((struct cfq_io_context*)(rq)->elevator_private) 40 + #define RQ_CIC(rq) \ 41 + ((struct cfq_io_context *) (rq)->elevator_private) 43 42 #define RQ_CFQQ(rq) ((rq)->elevator_private2) 44 43 45 44 static struct kmem_cache *cfq_pool; ··· 174 171 #define CFQ_CFQQ_FNS(name) \ 175 172 static inline void cfq_mark_cfqq_##name(struct cfq_queue *cfqq) \ 176 173 { \ 177 - cfqq->flags |= (1 << CFQ_CFQQ_FLAG_##name); \ 174 + (cfqq)->flags |= (1 << CFQ_CFQQ_FLAG_##name); \ 178 175 } \ 179 176 static inline void cfq_clear_cfqq_##name(struct cfq_queue *cfqq) \ 180 177 { \ 181 - cfqq->flags &= ~(1 << CFQ_CFQQ_FLAG_##name); \ 178 + (cfqq)->flags &= ~(1 << CFQ_CFQQ_FLAG_##name); \ 182 179 } \ 183 180 static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq) \ 184 181 { \ 185 - return (cfqq->flags & (1 << CFQ_CFQQ_FLAG_##name)) != 0; \ 182 + return ((cfqq)->flags & (1 << CFQ_CFQQ_FLAG_##name)) != 0; \ 186 183 } 187 184 188 185 CFQ_CFQQ_FNS(on_rr); ··· 1008 1005 /* 1009 1006 * follow expired path, else get first next available 1010 1007 */ 1011 - if ((rq = cfq_check_fifo(cfqq)) == NULL) 1008 + rq = cfq_check_fifo(cfqq); 1009 + if (rq == NULL) 1012 1010 rq = cfqq->next_rq; 1013 1011 1014 1012 /* ··· 1298 1294 1299 1295 ioprio_class = IOPRIO_PRIO_CLASS(ioc->ioprio); 1300 1296 switch (ioprio_class) { 1301 - default: 1302 - printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class); 1303 - case IOPRIO_CLASS_NONE: 1304 - /* 1305 - * no prio set, place us in the middle of the BE classes 1306 - */ 1307 - cfqq->ioprio = task_nice_ioprio(tsk); 1308 - cfqq->ioprio_class = IOPRIO_CLASS_BE; 1309 - break; 1310 - case IOPRIO_CLASS_RT: 1311 - cfqq->ioprio = task_ioprio(ioc); 1312 - cfqq->ioprio_class = IOPRIO_CLASS_RT; 1313 - break; 1314 - case IOPRIO_CLASS_BE: 1315 - cfqq->ioprio = task_ioprio(ioc); 1316 - cfqq->ioprio_class = IOPRIO_CLASS_BE; 1317 - break; 1318 - case IOPRIO_CLASS_IDLE: 1319 - cfqq->ioprio_class = IOPRIO_CLASS_IDLE; 1320 - cfqq->ioprio = 7; 1321 - cfq_clear_cfqq_idle_window(cfqq); 1322 - break; 1297 + default: 1298 + printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class); 1299 + case IOPRIO_CLASS_NONE: 1300 + /* 1301 + * no prio set, place us in the middle of the BE classes 1302 + */ 1303 + cfqq->ioprio = task_nice_ioprio(tsk); 1304 + cfqq->ioprio_class = IOPRIO_CLASS_BE; 1305 + break; 1306 + case IOPRIO_CLASS_RT: 1307 + cfqq->ioprio = task_ioprio(ioc); 1308 + cfqq->ioprio_class = IOPRIO_CLASS_RT; 1309 + break; 1310 + case IOPRIO_CLASS_BE: 1311 + cfqq->ioprio = task_ioprio(ioc); 1312 + cfqq->ioprio_class = IOPRIO_CLASS_BE; 1313 + break; 1314 + case IOPRIO_CLASS_IDLE: 1315 + cfqq->ioprio_class = IOPRIO_CLASS_IDLE; 1316 + cfqq->ioprio = 7; 1317 + cfq_clear_cfqq_idle_window(cfqq); 1318 + break; 1323 1319 } 1324 1320 1325 1321 /* ··· 1431 1427 static struct cfq_queue ** 1432 1428 cfq_async_queue_prio(struct cfq_data *cfqd, int ioprio_class, int ioprio) 1433 1429 { 1434 - switch(ioprio_class) { 1430 + switch (ioprio_class) { 1435 1431 case IOPRIO_CLASS_RT: 1436 1432 return &cfqd->async_cfqq[0][ioprio]; 1437 1433 case IOPRIO_CLASS_BE: ··· 2022 2018 2023 2019 spin_lock_irqsave(cfqd->queue->queue_lock, flags); 2024 2020 2025 - if ((cfqq = cfqd->active_queue) != NULL) { 2021 + cfqq = cfqd->active_queue; 2022 + if (cfqq) { 2026 2023 timed_out = 0; 2027 2024 2028 2025 /* ··· 2217 2212 return ret; \ 2218 2213 } 2219 2214 STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0); 2220 - STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1, UINT_MAX, 1); 2221 - STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1, UINT_MAX, 1); 2215 + STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1, 2216 + UINT_MAX, 1); 2217 + STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1, 2218 + UINT_MAX, 1); 2222 2219 STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0); 2223 - STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1, UINT_MAX, 0); 2220 + STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1, 2221 + UINT_MAX, 0); 2224 2222 STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1); 2225 2223 STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1); 2226 2224 STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1); 2227 - STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1, UINT_MAX, 0); 2225 + STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1, 2226 + UINT_MAX, 0); 2228 2227 #undef STORE_FUNCTION 2229 2228 2230 2229 #define CFQ_ATTR(name) \
+30 -27
block/elevator.c
··· 45 45 */ 46 46 static const int elv_hash_shift = 6; 47 47 #define ELV_HASH_BLOCK(sec) ((sec) >> 3) 48 - #define ELV_HASH_FN(sec) (hash_long(ELV_HASH_BLOCK((sec)), elv_hash_shift)) 48 + #define ELV_HASH_FN(sec) \ 49 + (hash_long(ELV_HASH_BLOCK((sec)), elv_hash_shift)) 49 50 #define ELV_HASH_ENTRIES (1 << elv_hash_shift) 50 51 #define rq_hash_key(rq) ((rq)->sector + (rq)->nr_sectors) 51 52 #define ELV_ON_HASH(rq) (!hlist_unhashed(&(rq)->hash)) ··· 225 224 q->end_sector = 0; 226 225 q->boundary_rq = NULL; 227 226 228 - if (name && !(e = elevator_get(name))) 229 - return -EINVAL; 227 + if (name) { 228 + e = elevator_get(name); 229 + if (!e) 230 + return -EINVAL; 231 + } 230 232 231 - if (!e && *chosen_elevator && !(e = elevator_get(chosen_elevator))) 232 - printk("I/O scheduler %s not found\n", chosen_elevator); 233 + if (!e && *chosen_elevator) { 234 + e = elevator_get(chosen_elevator); 235 + if (!e) 236 + printk(KERN_ERR "I/O scheduler %s not found\n", 237 + chosen_elevator); 238 + } 233 239 234 - if (!e && !(e = elevator_get(CONFIG_DEFAULT_IOSCHED))) { 235 - printk("Default I/O scheduler not found, using no-op\n"); 236 - e = elevator_get("noop"); 240 + if (!e) { 241 + e = elevator_get(CONFIG_DEFAULT_IOSCHED); 242 + if (!e) { 243 + printk(KERN_ERR 244 + "Default I/O scheduler not found. " \ 245 + "Using noop.\n"); 246 + e = elevator_get("noop"); 247 + } 237 248 } 238 249 239 250 eq = elevator_alloc(q, e); ··· 261 248 elevator_attach(q, eq, data); 262 249 return ret; 263 250 } 264 - 265 251 EXPORT_SYMBOL(elevator_init); 266 252 267 253 void elevator_exit(elevator_t *e) ··· 273 261 274 262 kobject_put(&e->kobj); 275 263 } 276 - 277 264 EXPORT_SYMBOL(elevator_exit); 278 265 279 266 static void elv_activate_rq(struct request_queue *q, struct request *rq) ··· 364 353 rb_insert_color(&rq->rb_node, root); 365 354 return NULL; 366 355 } 367 - 368 356 EXPORT_SYMBOL(elv_rb_add); 369 357 370 358 void elv_rb_del(struct rb_root *root, struct request *rq) ··· 372 362 rb_erase(&rq->rb_node, root); 373 363 RB_CLEAR_NODE(&rq->rb_node); 374 364 } 375 - 376 365 EXPORT_SYMBOL(elv_rb_del); 377 366 378 367 struct request *elv_rb_find(struct rb_root *root, sector_t sector) ··· 392 383 393 384 return NULL; 394 385 } 395 - 396 386 EXPORT_SYMBOL(elv_rb_find); 397 387 398 388 /* ··· 403 395 { 404 396 sector_t boundary; 405 397 struct list_head *entry; 398 + int stop_flags; 406 399 407 400 if (q->last_merge == rq) 408 401 q->last_merge = NULL; ··· 413 404 q->nr_sorted--; 414 405 415 406 boundary = q->end_sector; 416 - 407 + stop_flags = REQ_SOFTBARRIER | REQ_HARDBARRIER | REQ_STARTED; 417 408 list_for_each_prev(entry, &q->queue_head) { 418 409 struct request *pos = list_entry_rq(entry); 419 410 420 411 if (rq_data_dir(rq) != rq_data_dir(pos)) 421 412 break; 422 - if (pos->cmd_flags & (REQ_SOFTBARRIER|REQ_HARDBARRIER|REQ_STARTED)) 413 + if (pos->cmd_flags & stop_flags) 423 414 break; 424 415 if (rq->sector >= boundary) { 425 416 if (pos->sector < boundary) ··· 434 425 435 426 list_add(&rq->queuelist, entry); 436 427 } 437 - 438 428 EXPORT_SYMBOL(elv_dispatch_sort); 439 429 440 430 /* ··· 454 446 q->boundary_rq = rq; 455 447 list_add_tail(&rq->queuelist, &q->queue_head); 456 448 } 457 - 458 449 EXPORT_SYMBOL(elv_dispatch_add_tail); 459 450 460 451 int elv_merge(struct request_queue *q, struct request **req, struct bio *bio) ··· 672 665 q->end_sector = rq_end_sector(rq); 673 666 q->boundary_rq = rq; 674 667 } 675 - } else if (!(rq->cmd_flags & REQ_ELVPRIV) && where == ELEVATOR_INSERT_SORT) 668 + } else if (!(rq->cmd_flags & REQ_ELVPRIV) && 669 + where == ELEVATOR_INSERT_SORT) 676 670 where = ELEVATOR_INSERT_BACK; 677 671 678 672 if (plug) ··· 681 673 682 674 elv_insert(q, rq, where); 683 675 } 684 - 685 676 EXPORT_SYMBOL(__elv_add_request); 686 677 687 678 void elv_add_request(struct request_queue *q, struct request *rq, int where, ··· 692 685 __elv_add_request(q, rq, where, plug); 693 686 spin_unlock_irqrestore(q->queue_lock, flags); 694 687 } 695 - 696 688 EXPORT_SYMBOL(elv_add_request); 697 689 698 690 static inline struct request *__elv_next_request(struct request_queue *q) ··· 798 792 799 793 return rq; 800 794 } 801 - 802 795 EXPORT_SYMBOL(elv_next_request); 803 796 804 797 void elv_dequeue_request(struct request_queue *q, struct request *rq) ··· 815 810 if (blk_account_rq(rq)) 816 811 q->in_flight++; 817 812 } 818 - 819 813 EXPORT_SYMBOL(elv_dequeue_request); 820 814 821 815 int elv_queue_empty(struct request_queue *q) ··· 829 825 830 826 return 1; 831 827 } 832 - 833 828 EXPORT_SYMBOL(elv_queue_empty); 834 829 835 830 struct request *elv_latter_request(struct request_queue *q, struct request *rq) ··· 997 994 !strcmp(e->elevator_name, CONFIG_DEFAULT_IOSCHED))) 998 995 def = " (default)"; 999 996 1000 - printk(KERN_INFO "io scheduler %s registered%s\n", e->elevator_name, def); 997 + printk(KERN_INFO "io scheduler %s registered%s\n", e->elevator_name, 998 + def); 1001 999 } 1002 1000 EXPORT_SYMBOL_GPL(elv_register); 1003 1001 ··· 1130 1126 } 1131 1127 1132 1128 if (!elevator_switch(q, e)) 1133 - printk(KERN_ERR "elevator: switch to %s failed\n",elevator_name); 1129 + printk(KERN_ERR "elevator: switch to %s failed\n", 1130 + elevator_name); 1134 1131 return count; 1135 1132 } 1136 1133 ··· 1165 1160 1166 1161 return NULL; 1167 1162 } 1168 - 1169 1163 EXPORT_SYMBOL(elv_rb_former_request); 1170 1164 1171 1165 struct request *elv_rb_latter_request(struct request_queue *q, ··· 1177 1173 1178 1174 return NULL; 1179 1175 } 1180 - 1181 1176 EXPORT_SYMBOL(elv_rb_latter_request);
+1 -1
drivers/block/sunvdc.c
··· 732 732 .handshake_complete = vdc_handshake_complete, 733 733 }; 734 734 735 - static void print_version(void) 735 + static void __devinit print_version(void) 736 736 { 737 737 static int version_printed; 738 738
-2
drivers/char/random.c
··· 667 667 add_timer_randomness(disk->random, 668 668 0x100 + MKDEV(disk->major, disk->first_minor)); 669 669 } 670 - 671 - EXPORT_SYMBOL(add_disk_randomness); 672 670 #endif 673 671 674 672 #define EXTRACT_SIZE 10
+1 -1
drivers/ide/ide-cd.c
··· 1722 1722 */ 1723 1723 if ((stat & DRQ_STAT) == 0) { 1724 1724 spin_lock_irqsave(&ide_lock, flags); 1725 - if (__blk_end_request(rq, 0, 0)) 1725 + if (__blk_end_request(rq, 0, rq->data_len)) 1726 1726 BUG(); 1727 1727 HWGROUP(drive)->rq = NULL; 1728 1728 spin_unlock_irqrestore(&ide_lock, flags);
+1 -3
fs/splice.c
··· 1033 1033 1034 1034 done: 1035 1035 pipe->nrbufs = pipe->curbuf = 0; 1036 - if (bytes > 0) 1037 - file_accessed(in); 1038 - 1036 + file_accessed(in); 1039 1037 return bytes; 1040 1038 1041 1039 out_release:
+9 -7
include/linux/blkdev.h
··· 39 39 struct io_context *get_io_context(gfp_t gfp_flags, int node); 40 40 struct io_context *alloc_io_context(gfp_t gfp_flags, int node); 41 41 void copy_io_context(struct io_context **pdst, struct io_context **psrc); 42 - void swap_io_context(struct io_context **ioc1, struct io_context **ioc2); 43 42 44 43 struct request; 45 44 typedef void (rq_end_io_fn)(struct request *, int); ··· 654 655 * blk_end_request() for parts of the original function. 655 656 * This prevents code duplication in drivers. 656 657 */ 657 - extern int blk_end_request(struct request *rq, int error, int nr_bytes); 658 - extern int __blk_end_request(struct request *rq, int error, int nr_bytes); 659 - extern int blk_end_bidi_request(struct request *rq, int error, int nr_bytes, 660 - int bidi_bytes); 658 + extern int blk_end_request(struct request *rq, int error, 659 + unsigned int nr_bytes); 660 + extern int __blk_end_request(struct request *rq, int error, 661 + unsigned int nr_bytes); 662 + extern int blk_end_bidi_request(struct request *rq, int error, 663 + unsigned int nr_bytes, unsigned int bidi_bytes); 661 664 extern void end_request(struct request *, int); 662 665 extern void end_queued_request(struct request *, int); 663 666 extern void end_dequeued_request(struct request *, int); 664 - extern int blk_end_request_callback(struct request *rq, int error, int nr_bytes, 665 - int (drv_callback)(struct request *)); 667 + extern int blk_end_request_callback(struct request *rq, int error, 668 + unsigned int nr_bytes, 669 + int (drv_callback)(struct request *)); 666 670 extern void blk_complete_request(struct request *); 667 671 668 672 /*