Merge branch 'for-linus' of git://git.kernel.dk/linux-2.6-block

* 'for-linus' of git://git.kernel.dk/linux-2.6-block:
block: kill swap_io_context()
as-iosched: fix inconsistent ioc->lock context
ide-cd: fix leftover data BUG
block: make elevator lib checkpatch compliant
cfq-iosched: make checkpatch compliant
block: make core bits checkpatch compliant
block: new end request handling interface should take unsigned byte counts
unexport add_disk_randomness
block/sunvdc.c:print_version() must be __devinit
splice: always updated atime in direct splice

+219 -253
+6 -18
block/as-iosched.c
··· 170 171 static void as_trim(struct io_context *ioc) 172 { 173 - spin_lock(&ioc->lock); 174 if (ioc->aic) 175 free_as_io_context(ioc->aic); 176 ioc->aic = NULL; 177 - spin_unlock(&ioc->lock); 178 } 179 180 /* Called when the task exits */ ··· 235 aic = RQ_IOC(rq)->aic; 236 237 if (rq_is_sync(rq) && aic) { 238 - spin_lock(&aic->lock); 239 set_bit(AS_TASK_IORUNNING, &aic->state); 240 aic->last_end_request = jiffies; 241 - spin_unlock(&aic->lock); 242 } 243 244 put_io_context(RQ_IOC(rq)); ··· 1268 */ 1269 if (!list_empty(&req->queuelist) && !list_empty(&next->queuelist)) { 1270 if (time_before(rq_fifo_time(next), rq_fifo_time(req))) { 1271 - struct io_context *rioc = RQ_IOC(req); 1272 - struct io_context *nioc = RQ_IOC(next); 1273 - 1274 list_move(&req->queuelist, &next->queuelist); 1275 rq_set_fifo_time(req, rq_fifo_time(next)); 1276 - /* 1277 - * Don't copy here but swap, because when anext is 1278 - * removed below, it must contain the unused context 1279 - */ 1280 - if (rioc != nioc) { 1281 - double_spin_lock(&rioc->lock, &nioc->lock, 1282 - rioc < nioc); 1283 - swap_io_context(&rioc, &nioc); 1284 - double_spin_unlock(&rioc->lock, &nioc->lock, 1285 - rioc < nioc); 1286 - } 1287 } 1288 } 1289
··· 170 171 static void as_trim(struct io_context *ioc) 172 { 173 + spin_lock_irq(&ioc->lock); 174 if (ioc->aic) 175 free_as_io_context(ioc->aic); 176 ioc->aic = NULL; 177 + spin_unlock_irq(&ioc->lock); 178 } 179 180 /* Called when the task exits */ ··· 235 aic = RQ_IOC(rq)->aic; 236 237 if (rq_is_sync(rq) && aic) { 238 + unsigned long flags; 239 + 240 + spin_lock_irqsave(&aic->lock, flags); 241 set_bit(AS_TASK_IORUNNING, &aic->state); 242 aic->last_end_request = jiffies; 243 + spin_unlock_irqrestore(&aic->lock, flags); 244 } 245 246 put_io_context(RQ_IOC(rq)); ··· 1266 */ 1267 if (!list_empty(&req->queuelist) && !list_empty(&next->queuelist)) { 1268 if (time_before(rq_fifo_time(next), rq_fifo_time(req))) { 1269 list_move(&req->queuelist, &next->queuelist); 1270 rq_set_fifo_time(req, rq_fifo_time(next)); 1271 } 1272 } 1273
+2 -3
block/blk-barrier.c
··· 26 { 27 if (ordered & (QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH) && 28 prepare_flush_fn == NULL) { 29 - printk(KERN_ERR "blk_queue_ordered: prepare_flush_fn required\n"); 30 return -EINVAL; 31 } 32 ··· 48 49 return 0; 50 } 51 - 52 EXPORT_SYMBOL(blk_queue_ordered); 53 54 /* ··· 315 bio_put(bio); 316 return ret; 317 } 318 - 319 EXPORT_SYMBOL(blkdev_issue_flush);
··· 26 { 27 if (ordered & (QUEUE_ORDERED_PREFLUSH | QUEUE_ORDERED_POSTFLUSH) && 28 prepare_flush_fn == NULL) { 29 + printk(KERN_ERR "%s: prepare_flush_fn required\n", 30 + __FUNCTION__); 31 return -EINVAL; 32 } 33 ··· 47 48 return 0; 49 } 50 EXPORT_SYMBOL(blk_queue_ordered); 51 52 /* ··· 315 bio_put(bio); 316 return ret; 317 } 318 EXPORT_SYMBOL(blkdev_issue_flush);
+80 -87
block/blk-core.c
··· 3 * Copyright (C) 1994, Karl Keyte: Added support for disk statistics 4 * Elevator latency, (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE 5 * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de> 6 - * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au> - July2000 7 * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001 8 */ 9 ··· 43 /* 44 * For queue allocation 45 */ 46 - struct kmem_cache *blk_requestq_cachep = NULL; 47 48 /* 49 * Controlling structure to kblockd ··· 138 error = -EIO; 139 140 if (unlikely(nbytes > bio->bi_size)) { 141 - printk("%s: want %u bytes done, only %u left\n", 142 __FUNCTION__, nbytes, bio->bi_size); 143 nbytes = bio->bi_size; 144 } ··· 162 { 163 int bit; 164 165 - printk("%s: dev %s: type=%x, flags=%x\n", msg, 166 rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->cmd_type, 167 rq->cmd_flags); 168 169 - printk("\nsector %llu, nr/cnr %lu/%u\n", (unsigned long long)rq->sector, 170 - rq->nr_sectors, 171 - rq->current_nr_sectors); 172 - printk("bio %p, biotail %p, buffer %p, data %p, len %u\n", rq->bio, rq->biotail, rq->buffer, rq->data, rq->data_len); 173 174 if (blk_pc_request(rq)) { 175 - printk("cdb: "); 176 for (bit = 0; bit < sizeof(rq->cmd); bit++) 177 printk("%02x ", rq->cmd[bit]); 178 printk("\n"); 179 } 180 } 181 - 182 EXPORT_SYMBOL(blk_dump_rq_flags); 183 184 /* ··· 208 blk_add_trace_generic(q, NULL, 0, BLK_TA_PLUG); 209 } 210 } 211 - 212 EXPORT_SYMBOL(blk_plug_device); 213 214 /* ··· 224 del_timer(&q->unplug_timer); 225 return 1; 226 } 227 - 228 EXPORT_SYMBOL(blk_remove_plug); 229 230 /* ··· 330 kblockd_schedule_work(&q->unplug_work); 331 } 332 } 333 - 334 EXPORT_SYMBOL(blk_start_queue); 335 336 /** ··· 409 } 410 EXPORT_SYMBOL(blk_put_queue); 411 412 - void blk_cleanup_queue(struct request_queue * q) 413 { 414 mutex_lock(&q->sysfs_lock); 415 set_bit(QUEUE_FLAG_DEAD, &q->queue_flags); ··· 420 421 blk_put_queue(q); 422 } 423 - 424 EXPORT_SYMBOL(blk_cleanup_queue); 425 426 static int blk_init_free_list(struct request_queue *q) ··· 575 576 return 1; 577 } 578 - 579 EXPORT_SYMBOL(blk_get_queue); 580 581 static inline void blk_free_request(struct request_queue *q, struct request *rq) ··· 773 */ 774 if (ioc_batching(q, ioc)) 775 ioc->nr_batch_requests--; 776 - 777 rq_init(q, rq); 778 779 blk_add_trace_generic(q, bio, rw, BLK_TA_GETRQ); ··· 887 888 elv_requeue_request(q, rq); 889 } 890 - 891 EXPORT_SYMBOL(blk_requeue_request); 892 893 /** ··· 937 blk_start_queueing(q); 938 spin_unlock_irqrestore(q->queue_lock, flags); 939 } 940 - 941 EXPORT_SYMBOL(blk_insert_request); 942 943 /* ··· 944 * queue lock is held and interrupts disabled, as we muck with the 945 * request queue list. 946 */ 947 - static inline void add_request(struct request_queue * q, struct request * req) 948 { 949 drive_stat_acct(req, 1); 950 ··· 954 */ 955 __elv_add_request(q, req, ELEVATOR_INSERT_SORT, 0); 956 } 957 - 958 /* 959 * disk_round_stats() - Round off the performance stats on a struct 960 * disk_stats. ··· 984 } 985 disk->stamp = now; 986 } 987 - 988 EXPORT_SYMBOL_GPL(disk_round_stats); 989 990 /* ··· 1013 freed_request(q, rw, priv); 1014 } 1015 } 1016 - 1017 EXPORT_SYMBOL_GPL(__blk_put_request); 1018 1019 void blk_put_request(struct request *req) ··· 1030 spin_unlock_irqrestore(q->queue_lock, flags); 1031 } 1032 } 1033 - 1034 EXPORT_SYMBOL(blk_put_request); 1035 1036 void init_request_from_bio(struct request *req, struct bio *bio) ··· 1090 1091 el_ret = elv_merge(q, &req, bio); 1092 switch (el_ret) { 1093 - case ELEVATOR_BACK_MERGE: 1094 - BUG_ON(!rq_mergeable(req)); 1095 1096 - if (!ll_back_merge_fn(q, req, bio)) 1097 - break; 1098 1099 - blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE); 1100 1101 - req->biotail->bi_next = bio; 1102 - req->biotail = bio; 1103 - req->nr_sectors = req->hard_nr_sectors += nr_sectors; 1104 - req->ioprio = ioprio_best(req->ioprio, prio); 1105 - drive_stat_acct(req, 0); 1106 - if (!attempt_back_merge(q, req)) 1107 - elv_merged_request(q, req, el_ret); 1108 - goto out; 1109 1110 - case ELEVATOR_FRONT_MERGE: 1111 - BUG_ON(!rq_mergeable(req)); 1112 1113 - if (!ll_front_merge_fn(q, req, bio)) 1114 - break; 1115 1116 - blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE); 1117 1118 - bio->bi_next = req->bio; 1119 - req->bio = bio; 1120 1121 - /* 1122 - * may not be valid. if the low level driver said 1123 - * it didn't need a bounce buffer then it better 1124 - * not touch req->buffer either... 1125 - */ 1126 - req->buffer = bio_data(bio); 1127 - req->current_nr_sectors = bio_cur_sectors(bio); 1128 - req->hard_cur_sectors = req->current_nr_sectors; 1129 - req->sector = req->hard_sector = bio->bi_sector; 1130 - req->nr_sectors = req->hard_nr_sectors += nr_sectors; 1131 - req->ioprio = ioprio_best(req->ioprio, prio); 1132 - drive_stat_acct(req, 0); 1133 - if (!attempt_front_merge(q, req)) 1134 - elv_merged_request(q, req, el_ret); 1135 - goto out; 1136 1137 - /* ELV_NO_MERGE: elevator says don't/can't merge. */ 1138 - default: 1139 - ; 1140 } 1141 1142 get_rq: ··· 1344 } 1345 1346 if (unlikely(nr_sectors > q->max_hw_sectors)) { 1347 - printk("bio too big device %s (%u > %u)\n", 1348 bdevname(bio->bi_bdev, b), 1349 bio_sectors(bio), 1350 q->max_hw_sectors); ··· 1433 } while (bio); 1434 current->bio_tail = NULL; /* deactivate */ 1435 } 1436 - 1437 EXPORT_SYMBOL(generic_make_request); 1438 1439 /** ··· 1473 current->comm, task_pid_nr(current), 1474 (rw & WRITE) ? "WRITE" : "READ", 1475 (unsigned long long)bio->bi_sector, 1476 - bdevname(bio->bi_bdev,b)); 1477 } 1478 } 1479 1480 generic_make_request(bio); 1481 } 1482 - 1483 EXPORT_SYMBOL(submit_bio); 1484 1485 /** ··· 1510 if (!blk_pc_request(req)) 1511 req->errors = 0; 1512 1513 - if (error) { 1514 - if (blk_fs_request(req) && !(req->cmd_flags & REQ_QUIET)) 1515 - printk("end_request: I/O error, dev %s, sector %llu\n", 1516 req->rq_disk ? req->rq_disk->disk_name : "?", 1517 (unsigned long long)req->sector); 1518 } ··· 1545 1546 if (unlikely(bio->bi_idx >= bio->bi_vcnt)) { 1547 blk_dump_rq_flags(req, "__end_that"); 1548 - printk("%s: bio idx %d >= vcnt %d\n", 1549 - __FUNCTION__, 1550 - bio->bi_idx, bio->bi_vcnt); 1551 break; 1552 } 1553 ··· 1573 total_bytes += nbytes; 1574 nr_bytes -= nbytes; 1575 1576 - if ((bio = req->bio)) { 1577 /* 1578 * end more in this run, or just return 'not-done' 1579 */ ··· 1618 local_irq_enable(); 1619 1620 while (!list_empty(&local_list)) { 1621 - struct request *rq = list_entry(local_list.next, struct request, donelist); 1622 1623 list_del_init(&rq->donelist); 1624 rq->q->softirq_done_fn(rq); 1625 } 1626 } 1627 1628 - static int __cpuinit blk_cpu_notify(struct notifier_block *self, unsigned long action, 1629 - void *hcpu) 1630 { 1631 /* 1632 * If a CPU goes away, splice its entries to the current CPU ··· 1669 unsigned long flags; 1670 1671 BUG_ON(!req->q->softirq_done_fn); 1672 - 1673 local_irq_save(flags); 1674 1675 cpu_list = &__get_cpu_var(blk_cpu_done); ··· 1678 1679 local_irq_restore(flags); 1680 } 1681 - 1682 EXPORT_SYMBOL(blk_complete_request); 1683 - 1684 /* 1685 * queue lock must be held 1686 */ ··· 1838 * 0 - we are done with this request 1839 * 1 - this request is not freed yet, it still has pending buffers. 1840 **/ 1841 - static int blk_end_io(struct request *rq, int error, int nr_bytes, 1842 - int bidi_bytes, int (drv_callback)(struct request *)) 1843 { 1844 struct request_queue *q = rq->q; 1845 unsigned long flags = 0UL; ··· 1882 * 0 - we are done with this request 1883 * 1 - still buffers pending for this request 1884 **/ 1885 - int blk_end_request(struct request *rq, int error, int nr_bytes) 1886 { 1887 return blk_end_io(rq, error, nr_bytes, 0, NULL); 1888 } ··· 1901 * 0 - we are done with this request 1902 * 1 - still buffers pending for this request 1903 **/ 1904 - int __blk_end_request(struct request *rq, int error, int nr_bytes) 1905 { 1906 if (blk_fs_request(rq) || blk_pc_request(rq)) { 1907 if (__end_that_request_first(rq, error, nr_bytes)) ··· 1930 * 0 - we are done with this request 1931 * 1 - still buffers pending for this request 1932 **/ 1933 - int blk_end_bidi_request(struct request *rq, int error, int nr_bytes, 1934 - int bidi_bytes) 1935 { 1936 return blk_end_io(rq, error, nr_bytes, bidi_bytes, NULL); 1937 } ··· 1962 * this request still has pending buffers or 1963 * the driver doesn't want to finish this request yet. 1964 **/ 1965 - int blk_end_request_callback(struct request *rq, int error, int nr_bytes, 1966 int (drv_callback)(struct request *)) 1967 { 1968 return blk_end_io(rq, error, nr_bytes, 0, drv_callback); ··· 1994 { 1995 return queue_work(kblockd_workqueue, work); 1996 } 1997 - 1998 EXPORT_SYMBOL(kblockd_schedule_work); 1999 2000 void kblockd_flush_work(struct work_struct *work)
··· 3 * Copyright (C) 1994, Karl Keyte: Added support for disk statistics 4 * Elevator latency, (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE 5 * Queue request tables / lock, selectable elevator, Jens Axboe <axboe@suse.de> 6 + * kernel-doc documentation started by NeilBrown <neilb@cse.unsw.edu.au> 7 + * - July2000 8 * bio rewrite, highmem i/o, etc, Jens Axboe <axboe@suse.de> - may 2001 9 */ 10 ··· 42 /* 43 * For queue allocation 44 */ 45 + struct kmem_cache *blk_requestq_cachep; 46 47 /* 48 * Controlling structure to kblockd ··· 137 error = -EIO; 138 139 if (unlikely(nbytes > bio->bi_size)) { 140 + printk(KERN_ERR "%s: want %u bytes done, %u left\n", 141 __FUNCTION__, nbytes, bio->bi_size); 142 nbytes = bio->bi_size; 143 } ··· 161 { 162 int bit; 163 164 + printk(KERN_INFO "%s: dev %s: type=%x, flags=%x\n", msg, 165 rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->cmd_type, 166 rq->cmd_flags); 167 168 + printk(KERN_INFO " sector %llu, nr/cnr %lu/%u\n", 169 + (unsigned long long)rq->sector, 170 + rq->nr_sectors, 171 + rq->current_nr_sectors); 172 + printk(KERN_INFO " bio %p, biotail %p, buffer %p, data %p, len %u\n", 173 + rq->bio, rq->biotail, 174 + rq->buffer, rq->data, 175 + rq->data_len); 176 177 if (blk_pc_request(rq)) { 178 + printk(KERN_INFO " cdb: "); 179 for (bit = 0; bit < sizeof(rq->cmd); bit++) 180 printk("%02x ", rq->cmd[bit]); 181 printk("\n"); 182 } 183 } 184 EXPORT_SYMBOL(blk_dump_rq_flags); 185 186 /* ··· 204 blk_add_trace_generic(q, NULL, 0, BLK_TA_PLUG); 205 } 206 } 207 EXPORT_SYMBOL(blk_plug_device); 208 209 /* ··· 221 del_timer(&q->unplug_timer); 222 return 1; 223 } 224 EXPORT_SYMBOL(blk_remove_plug); 225 226 /* ··· 328 kblockd_schedule_work(&q->unplug_work); 329 } 330 } 331 EXPORT_SYMBOL(blk_start_queue); 332 333 /** ··· 408 } 409 EXPORT_SYMBOL(blk_put_queue); 410 411 + void blk_cleanup_queue(struct request_queue *q) 412 { 413 mutex_lock(&q->sysfs_lock); 414 set_bit(QUEUE_FLAG_DEAD, &q->queue_flags); ··· 419 420 blk_put_queue(q); 421 } 422 EXPORT_SYMBOL(blk_cleanup_queue); 423 424 static int blk_init_free_list(struct request_queue *q) ··· 575 576 return 1; 577 } 578 EXPORT_SYMBOL(blk_get_queue); 579 580 static inline void blk_free_request(struct request_queue *q, struct request *rq) ··· 774 */ 775 if (ioc_batching(q, ioc)) 776 ioc->nr_batch_requests--; 777 + 778 rq_init(q, rq); 779 780 blk_add_trace_generic(q, bio, rw, BLK_TA_GETRQ); ··· 888 889 elv_requeue_request(q, rq); 890 } 891 EXPORT_SYMBOL(blk_requeue_request); 892 893 /** ··· 939 blk_start_queueing(q); 940 spin_unlock_irqrestore(q->queue_lock, flags); 941 } 942 EXPORT_SYMBOL(blk_insert_request); 943 944 /* ··· 947 * queue lock is held and interrupts disabled, as we muck with the 948 * request queue list. 949 */ 950 + static inline void add_request(struct request_queue *q, struct request *req) 951 { 952 drive_stat_acct(req, 1); 953 ··· 957 */ 958 __elv_add_request(q, req, ELEVATOR_INSERT_SORT, 0); 959 } 960 + 961 /* 962 * disk_round_stats() - Round off the performance stats on a struct 963 * disk_stats. ··· 987 } 988 disk->stamp = now; 989 } 990 EXPORT_SYMBOL_GPL(disk_round_stats); 991 992 /* ··· 1017 freed_request(q, rw, priv); 1018 } 1019 } 1020 EXPORT_SYMBOL_GPL(__blk_put_request); 1021 1022 void blk_put_request(struct request *req) ··· 1035 spin_unlock_irqrestore(q->queue_lock, flags); 1036 } 1037 } 1038 EXPORT_SYMBOL(blk_put_request); 1039 1040 void init_request_from_bio(struct request *req, struct bio *bio) ··· 1096 1097 el_ret = elv_merge(q, &req, bio); 1098 switch (el_ret) { 1099 + case ELEVATOR_BACK_MERGE: 1100 + BUG_ON(!rq_mergeable(req)); 1101 1102 + if (!ll_back_merge_fn(q, req, bio)) 1103 + break; 1104 1105 + blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE); 1106 1107 + req->biotail->bi_next = bio; 1108 + req->biotail = bio; 1109 + req->nr_sectors = req->hard_nr_sectors += nr_sectors; 1110 + req->ioprio = ioprio_best(req->ioprio, prio); 1111 + drive_stat_acct(req, 0); 1112 + if (!attempt_back_merge(q, req)) 1113 + elv_merged_request(q, req, el_ret); 1114 + goto out; 1115 1116 + case ELEVATOR_FRONT_MERGE: 1117 + BUG_ON(!rq_mergeable(req)); 1118 1119 + if (!ll_front_merge_fn(q, req, bio)) 1120 + break; 1121 1122 + blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE); 1123 1124 + bio->bi_next = req->bio; 1125 + req->bio = bio; 1126 1127 + /* 1128 + * may not be valid. if the low level driver said 1129 + * it didn't need a bounce buffer then it better 1130 + * not touch req->buffer either... 1131 + */ 1132 + req->buffer = bio_data(bio); 1133 + req->current_nr_sectors = bio_cur_sectors(bio); 1134 + req->hard_cur_sectors = req->current_nr_sectors; 1135 + req->sector = req->hard_sector = bio->bi_sector; 1136 + req->nr_sectors = req->hard_nr_sectors += nr_sectors; 1137 + req->ioprio = ioprio_best(req->ioprio, prio); 1138 + drive_stat_acct(req, 0); 1139 + if (!attempt_front_merge(q, req)) 1140 + elv_merged_request(q, req, el_ret); 1141 + goto out; 1142 1143 + /* ELV_NO_MERGE: elevator says don't/can't merge. */ 1144 + default: 1145 + ; 1146 } 1147 1148 get_rq: ··· 1350 } 1351 1352 if (unlikely(nr_sectors > q->max_hw_sectors)) { 1353 + printk(KERN_ERR "bio too big device %s (%u > %u)\n", 1354 bdevname(bio->bi_bdev, b), 1355 bio_sectors(bio), 1356 q->max_hw_sectors); ··· 1439 } while (bio); 1440 current->bio_tail = NULL; /* deactivate */ 1441 } 1442 EXPORT_SYMBOL(generic_make_request); 1443 1444 /** ··· 1480 current->comm, task_pid_nr(current), 1481 (rw & WRITE) ? "WRITE" : "READ", 1482 (unsigned long long)bio->bi_sector, 1483 + bdevname(bio->bi_bdev, b)); 1484 } 1485 } 1486 1487 generic_make_request(bio); 1488 } 1489 EXPORT_SYMBOL(submit_bio); 1490 1491 /** ··· 1518 if (!blk_pc_request(req)) 1519 req->errors = 0; 1520 1521 + if (error && (blk_fs_request(req) && !(req->cmd_flags & REQ_QUIET))) { 1522 + printk(KERN_ERR "end_request: I/O error, dev %s, sector %llu\n", 1523 req->rq_disk ? req->rq_disk->disk_name : "?", 1524 (unsigned long long)req->sector); 1525 } ··· 1554 1555 if (unlikely(bio->bi_idx >= bio->bi_vcnt)) { 1556 blk_dump_rq_flags(req, "__end_that"); 1557 + printk(KERN_ERR "%s: bio idx %d >= vcnt %d\n", 1558 + __FUNCTION__, bio->bi_idx, 1559 + bio->bi_vcnt); 1560 break; 1561 } 1562 ··· 1582 total_bytes += nbytes; 1583 nr_bytes -= nbytes; 1584 1585 + bio = req->bio; 1586 + if (bio) { 1587 /* 1588 * end more in this run, or just return 'not-done' 1589 */ ··· 1626 local_irq_enable(); 1627 1628 while (!list_empty(&local_list)) { 1629 + struct request *rq; 1630 1631 + rq = list_entry(local_list.next, struct request, donelist); 1632 list_del_init(&rq->donelist); 1633 rq->q->softirq_done_fn(rq); 1634 } 1635 } 1636 1637 + static int __cpuinit blk_cpu_notify(struct notifier_block *self, 1638 + unsigned long action, void *hcpu) 1639 { 1640 /* 1641 * If a CPU goes away, splice its entries to the current CPU ··· 1676 unsigned long flags; 1677 1678 BUG_ON(!req->q->softirq_done_fn); 1679 + 1680 local_irq_save(flags); 1681 1682 cpu_list = &__get_cpu_var(blk_cpu_done); ··· 1685 1686 local_irq_restore(flags); 1687 } 1688 EXPORT_SYMBOL(blk_complete_request); 1689 + 1690 /* 1691 * queue lock must be held 1692 */ ··· 1846 * 0 - we are done with this request 1847 * 1 - this request is not freed yet, it still has pending buffers. 1848 **/ 1849 + static int blk_end_io(struct request *rq, int error, unsigned int nr_bytes, 1850 + unsigned int bidi_bytes, 1851 + int (drv_callback)(struct request *)) 1852 { 1853 struct request_queue *q = rq->q; 1854 unsigned long flags = 0UL; ··· 1889 * 0 - we are done with this request 1890 * 1 - still buffers pending for this request 1891 **/ 1892 + int blk_end_request(struct request *rq, int error, unsigned int nr_bytes) 1893 { 1894 return blk_end_io(rq, error, nr_bytes, 0, NULL); 1895 } ··· 1908 * 0 - we are done with this request 1909 * 1 - still buffers pending for this request 1910 **/ 1911 + int __blk_end_request(struct request *rq, int error, unsigned int nr_bytes) 1912 { 1913 if (blk_fs_request(rq) || blk_pc_request(rq)) { 1914 if (__end_that_request_first(rq, error, nr_bytes)) ··· 1937 * 0 - we are done with this request 1938 * 1 - still buffers pending for this request 1939 **/ 1940 + int blk_end_bidi_request(struct request *rq, int error, unsigned int nr_bytes, 1941 + unsigned int bidi_bytes) 1942 { 1943 return blk_end_io(rq, error, nr_bytes, bidi_bytes, NULL); 1944 } ··· 1969 * this request still has pending buffers or 1970 * the driver doesn't want to finish this request yet. 1971 **/ 1972 + int blk_end_request_callback(struct request *rq, int error, 1973 + unsigned int nr_bytes, 1974 int (drv_callback)(struct request *)) 1975 { 1976 return blk_end_io(rq, error, nr_bytes, 0, drv_callback); ··· 2000 { 2001 return queue_work(kblockd_workqueue, work); 2002 } 2003 EXPORT_SYMBOL(kblockd_schedule_work); 2004 2005 void kblockd_flush_work(struct work_struct *work)
-1
block/blk-exec.c
··· 101 102 return err; 103 } 104 - 105 EXPORT_SYMBOL(blk_execute_rq);
··· 101 102 return err; 103 } 104 EXPORT_SYMBOL(blk_execute_rq);
-9
block/blk-ioc.c
··· 176 } 177 EXPORT_SYMBOL(copy_io_context); 178 179 - void swap_io_context(struct io_context **ioc1, struct io_context **ioc2) 180 - { 181 - struct io_context *temp; 182 - temp = *ioc1; 183 - *ioc1 = *ioc2; 184 - *ioc2 = temp; 185 - } 186 - EXPORT_SYMBOL(swap_io_context); 187 - 188 int __init blk_ioc_init(void) 189 { 190 iocontext_cachep = kmem_cache_create("blkdev_ioc",
··· 176 } 177 EXPORT_SYMBOL(copy_io_context); 178 179 int __init blk_ioc_init(void) 180 { 181 iocontext_cachep = kmem_cache_create("blkdev_ioc",
+4 -6
block/blk-map.c
··· 53 * direct dma. else, set up kernel bounce buffers 54 */ 55 uaddr = (unsigned long) ubuf; 56 - if (!(uaddr & queue_dma_alignment(q)) && !(len & queue_dma_alignment(q))) 57 bio = bio_map_user(q, NULL, uaddr, len, reading); 58 else 59 bio = bio_copy_user(q, uaddr, len, reading); ··· 145 blk_rq_unmap_user(bio); 146 return ret; 147 } 148 - 149 EXPORT_SYMBOL(blk_rq_map_user); 150 151 /** ··· 179 /* we don't allow misaligned data like bio_map_user() does. If the 180 * user is using sg, they're expected to know the alignment constraints 181 * and respect them accordingly */ 182 - bio = bio_map_user_iov(q, NULL, iov, iov_count, rq_data_dir(rq)== READ); 183 if (IS_ERR(bio)) 184 return PTR_ERR(bio); 185 ··· 195 rq->buffer = rq->data = NULL; 196 return 0; 197 } 198 - 199 EXPORT_SYMBOL(blk_rq_map_user_iov); 200 201 /** ··· 227 228 return ret; 229 } 230 - 231 EXPORT_SYMBOL(blk_rq_unmap_user); 232 233 /** ··· 259 rq->buffer = rq->data = NULL; 260 return 0; 261 } 262 - 263 EXPORT_SYMBOL(blk_rq_map_kern);
··· 53 * direct dma. else, set up kernel bounce buffers 54 */ 55 uaddr = (unsigned long) ubuf; 56 + if (!(uaddr & queue_dma_alignment(q)) && 57 + !(len & queue_dma_alignment(q))) 58 bio = bio_map_user(q, NULL, uaddr, len, reading); 59 else 60 bio = bio_copy_user(q, uaddr, len, reading); ··· 144 blk_rq_unmap_user(bio); 145 return ret; 146 } 147 EXPORT_SYMBOL(blk_rq_map_user); 148 149 /** ··· 179 /* we don't allow misaligned data like bio_map_user() does. If the 180 * user is using sg, they're expected to know the alignment constraints 181 * and respect them accordingly */ 182 + bio = bio_map_user_iov(q, NULL, iov, iov_count, 183 + rq_data_dir(rq) == READ); 184 if (IS_ERR(bio)) 185 return PTR_ERR(bio); 186 ··· 194 rq->buffer = rq->data = NULL; 195 return 0; 196 } 197 EXPORT_SYMBOL(blk_rq_map_user_iov); 198 199 /** ··· 227 228 return ret; 229 } 230 EXPORT_SYMBOL(blk_rq_unmap_user); 231 232 /** ··· 260 rq->buffer = rq->data = NULL; 261 return 0; 262 } 263 EXPORT_SYMBOL(blk_rq_map_kern);
+6 -6
block/blk-merge.c
··· 32 * size, something has gone terribly wrong 33 */ 34 if (rq->nr_sectors < rq->current_nr_sectors) { 35 - printk("blk: request botched\n"); 36 rq->nr_sectors = rq->current_nr_sectors; 37 } 38 } ··· 235 236 return nsegs; 237 } 238 - 239 EXPORT_SYMBOL(blk_rq_map_sg); 240 241 static inline int ll_new_mergeable(struct request_queue *q, ··· 304 if (unlikely(!bio_flagged(bio, BIO_SEG_VALID))) 305 blk_recount_segments(q, bio); 306 len = req->biotail->bi_hw_back_size + bio->bi_hw_front_size; 307 - if (BIOVEC_VIRT_MERGEABLE(__BVEC_END(req->biotail), __BVEC_START(bio)) && 308 - !BIOVEC_VIRT_OVERSIZE(len)) { 309 int mergeable = ll_new_mergeable(q, req, bio); 310 311 if (mergeable) { ··· 320 return ll_new_hw_segment(q, req, bio); 321 } 322 323 - int ll_front_merge_fn(struct request_queue *q, struct request *req, 324 struct bio *bio) 325 { 326 unsigned short max_sectors; ··· 387 388 total_hw_segments = req->nr_hw_segments + next->nr_hw_segments; 389 if (blk_hw_contig_segment(q, req->biotail, next->bio)) { 390 - int len = req->biotail->bi_hw_back_size + next->bio->bi_hw_front_size; 391 /* 392 * propagate the combined length to the end of the requests 393 */
··· 32 * size, something has gone terribly wrong 33 */ 34 if (rq->nr_sectors < rq->current_nr_sectors) { 35 + printk(KERN_ERR "blk: request botched\n"); 36 rq->nr_sectors = rq->current_nr_sectors; 37 } 38 } ··· 235 236 return nsegs; 237 } 238 EXPORT_SYMBOL(blk_rq_map_sg); 239 240 static inline int ll_new_mergeable(struct request_queue *q, ··· 305 if (unlikely(!bio_flagged(bio, BIO_SEG_VALID))) 306 blk_recount_segments(q, bio); 307 len = req->biotail->bi_hw_back_size + bio->bi_hw_front_size; 308 + if (BIOVEC_VIRT_MERGEABLE(__BVEC_END(req->biotail), __BVEC_START(bio)) 309 + && !BIOVEC_VIRT_OVERSIZE(len)) { 310 int mergeable = ll_new_mergeable(q, req, bio); 311 312 if (mergeable) { ··· 321 return ll_new_hw_segment(q, req, bio); 322 } 323 324 + int ll_front_merge_fn(struct request_queue *q, struct request *req, 325 struct bio *bio) 326 { 327 unsigned short max_sectors; ··· 388 389 total_hw_segments = req->nr_hw_segments + next->nr_hw_segments; 390 if (blk_hw_contig_segment(q, req->biotail, next->bio)) { 391 + int len = req->biotail->bi_hw_back_size + 392 + next->bio->bi_hw_front_size; 393 /* 394 * propagate the combined length to the end of the requests 395 */
+27 -34
block/blk-settings.c
··· 10 11 #include "blk.h" 12 13 - unsigned long blk_max_low_pfn, blk_max_pfn; 14 EXPORT_SYMBOL(blk_max_low_pfn); 15 EXPORT_SYMBOL(blk_max_pfn); 16 17 /** ··· 31 { 32 q->prep_rq_fn = pfn; 33 } 34 - 35 EXPORT_SYMBOL(blk_queue_prep_rq); 36 37 /** ··· 53 { 54 q->merge_bvec_fn = mbfn; 55 } 56 - 57 EXPORT_SYMBOL(blk_queue_merge_bvec); 58 59 void blk_queue_softirq_done(struct request_queue *q, softirq_done_fn *fn) 60 { 61 q->softirq_done_fn = fn; 62 } 63 - 64 EXPORT_SYMBOL(blk_queue_softirq_done); 65 66 /** ··· 83 * __bio_kmap_atomic() to get a temporary kernel mapping, or by calling 84 * blk_queue_bounce() to create a buffer in normal memory. 85 **/ 86 - void blk_queue_make_request(struct request_queue * q, make_request_fn * mfn) 87 { 88 /* 89 * set defaults ··· 92 blk_queue_max_phys_segments(q, MAX_PHYS_SEGMENTS); 93 blk_queue_max_hw_segments(q, MAX_HW_SEGMENTS); 94 q->make_request_fn = mfn; 95 - q->backing_dev_info.ra_pages = (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE; 96 q->backing_dev_info.state = 0; 97 q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY; 98 blk_queue_max_sectors(q, SAFE_MAX_SECTORS); ··· 117 */ 118 blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH); 119 } 120 - 121 EXPORT_SYMBOL(blk_queue_make_request); 122 123 /** ··· 132 **/ 133 void blk_queue_bounce_limit(struct request_queue *q, u64 dma_addr) 134 { 135 - unsigned long bounce_pfn = dma_addr >> PAGE_SHIFT; 136 int dma = 0; 137 138 q->bounce_gfp = GFP_NOIO; ··· 140 /* Assume anything <= 4GB can be handled by IOMMU. 141 Actually some IOMMUs can handle everything, but I don't 142 know of a way to test this here. */ 143 - if (bounce_pfn < (min_t(u64,0xffffffff,BLK_BOUNCE_HIGH) >> PAGE_SHIFT)) 144 dma = 1; 145 q->bounce_pfn = max_low_pfn; 146 #else 147 - if (bounce_pfn < blk_max_low_pfn) 148 dma = 1; 149 - q->bounce_pfn = bounce_pfn; 150 #endif 151 if (dma) { 152 init_emergency_isa_pool(); 153 q->bounce_gfp = GFP_NOIO | GFP_DMA; 154 - q->bounce_pfn = bounce_pfn; 155 } 156 } 157 - 158 EXPORT_SYMBOL(blk_queue_bounce_limit); 159 160 /** ··· 169 { 170 if ((max_sectors << 9) < PAGE_CACHE_SIZE) { 171 max_sectors = 1 << (PAGE_CACHE_SHIFT - 9); 172 - printk("%s: set to minimum %d\n", __FUNCTION__, max_sectors); 173 } 174 175 if (BLK_DEF_MAX_SECTORS > max_sectors) ··· 180 q->max_hw_sectors = max_sectors; 181 } 182 } 183 - 184 EXPORT_SYMBOL(blk_queue_max_sectors); 185 186 /** ··· 197 { 198 if (!max_segments) { 199 max_segments = 1; 200 - printk("%s: set to minimum %d\n", __FUNCTION__, max_segments); 201 } 202 203 q->max_phys_segments = max_segments; 204 } 205 - 206 EXPORT_SYMBOL(blk_queue_max_phys_segments); 207 208 /** ··· 221 { 222 if (!max_segments) { 223 max_segments = 1; 224 - printk("%s: set to minimum %d\n", __FUNCTION__, max_segments); 225 } 226 227 q->max_hw_segments = max_segments; 228 } 229 - 230 EXPORT_SYMBOL(blk_queue_max_hw_segments); 231 232 /** ··· 242 { 243 if (max_size < PAGE_CACHE_SIZE) { 244 max_size = PAGE_CACHE_SIZE; 245 - printk("%s: set to minimum %d\n", __FUNCTION__, max_size); 246 } 247 248 q->max_segment_size = max_size; 249 } 250 - 251 EXPORT_SYMBOL(blk_queue_max_segment_size); 252 253 /** ··· 265 { 266 q->hardsect_size = size; 267 } 268 - 269 EXPORT_SYMBOL(blk_queue_hardsect_size); 270 271 /* ··· 280 void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b) 281 { 282 /* zero is "infinity" */ 283 - t->max_sectors = min_not_zero(t->max_sectors,b->max_sectors); 284 - t->max_hw_sectors = min_not_zero(t->max_hw_sectors,b->max_hw_sectors); 285 286 - t->max_phys_segments = min(t->max_phys_segments,b->max_phys_segments); 287 - t->max_hw_segments = min(t->max_hw_segments,b->max_hw_segments); 288 - t->max_segment_size = min(t->max_segment_size,b->max_segment_size); 289 - t->hardsect_size = max(t->hardsect_size,b->hardsect_size); 290 if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) 291 clear_bit(QUEUE_FLAG_CLUSTER, &t->queue_flags); 292 } 293 - 294 EXPORT_SYMBOL(blk_queue_stack_limits); 295 296 /** ··· 328 329 return 0; 330 } 331 - 332 EXPORT_SYMBOL_GPL(blk_queue_dma_drain); 333 334 /** ··· 339 { 340 if (mask < PAGE_CACHE_SIZE - 1) { 341 mask = PAGE_CACHE_SIZE - 1; 342 - printk("%s: set to minimum %lx\n", __FUNCTION__, mask); 343 } 344 345 q->seg_boundary_mask = mask; 346 } 347 - 348 EXPORT_SYMBOL(blk_queue_segment_boundary); 349 350 /** ··· 361 { 362 q->dma_alignment = mask; 363 } 364 - 365 EXPORT_SYMBOL(blk_queue_dma_alignment); 366 367 /** ··· 384 if (mask > q->dma_alignment) 385 q->dma_alignment = mask; 386 } 387 - 388 EXPORT_SYMBOL(blk_queue_update_dma_alignment); 389 390 int __init blk_settings_init(void)
··· 10 11 #include "blk.h" 12 13 + unsigned long blk_max_low_pfn; 14 EXPORT_SYMBOL(blk_max_low_pfn); 15 + 16 + unsigned long blk_max_pfn; 17 EXPORT_SYMBOL(blk_max_pfn); 18 19 /** ··· 29 { 30 q->prep_rq_fn = pfn; 31 } 32 EXPORT_SYMBOL(blk_queue_prep_rq); 33 34 /** ··· 52 { 53 q->merge_bvec_fn = mbfn; 54 } 55 EXPORT_SYMBOL(blk_queue_merge_bvec); 56 57 void blk_queue_softirq_done(struct request_queue *q, softirq_done_fn *fn) 58 { 59 q->softirq_done_fn = fn; 60 } 61 EXPORT_SYMBOL(blk_queue_softirq_done); 62 63 /** ··· 84 * __bio_kmap_atomic() to get a temporary kernel mapping, or by calling 85 * blk_queue_bounce() to create a buffer in normal memory. 86 **/ 87 + void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn) 88 { 89 /* 90 * set defaults ··· 93 blk_queue_max_phys_segments(q, MAX_PHYS_SEGMENTS); 94 blk_queue_max_hw_segments(q, MAX_HW_SEGMENTS); 95 q->make_request_fn = mfn; 96 + q->backing_dev_info.ra_pages = 97 + (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE; 98 q->backing_dev_info.state = 0; 99 q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY; 100 blk_queue_max_sectors(q, SAFE_MAX_SECTORS); ··· 117 */ 118 blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH); 119 } 120 EXPORT_SYMBOL(blk_queue_make_request); 121 122 /** ··· 133 **/ 134 void blk_queue_bounce_limit(struct request_queue *q, u64 dma_addr) 135 { 136 + unsigned long b_pfn = dma_addr >> PAGE_SHIFT; 137 int dma = 0; 138 139 q->bounce_gfp = GFP_NOIO; ··· 141 /* Assume anything <= 4GB can be handled by IOMMU. 142 Actually some IOMMUs can handle everything, but I don't 143 know of a way to test this here. */ 144 + if (b_pfn < (min_t(u64, 0xffffffff, BLK_BOUNCE_HIGH) >> PAGE_SHIFT)) 145 dma = 1; 146 q->bounce_pfn = max_low_pfn; 147 #else 148 + if (b_pfn < blk_max_low_pfn) 149 dma = 1; 150 + q->bounce_pfn = b_pfn; 151 #endif 152 if (dma) { 153 init_emergency_isa_pool(); 154 q->bounce_gfp = GFP_NOIO | GFP_DMA; 155 + q->bounce_pfn = b_pfn; 156 } 157 } 158 EXPORT_SYMBOL(blk_queue_bounce_limit); 159 160 /** ··· 171 { 172 if ((max_sectors << 9) < PAGE_CACHE_SIZE) { 173 max_sectors = 1 << (PAGE_CACHE_SHIFT - 9); 174 + printk(KERN_INFO "%s: set to minimum %d\n", __FUNCTION__, 175 + max_sectors); 176 } 177 178 if (BLK_DEF_MAX_SECTORS > max_sectors) ··· 181 q->max_hw_sectors = max_sectors; 182 } 183 } 184 EXPORT_SYMBOL(blk_queue_max_sectors); 185 186 /** ··· 199 { 200 if (!max_segments) { 201 max_segments = 1; 202 + printk(KERN_INFO "%s: set to minimum %d\n", __FUNCTION__, 203 + max_segments); 204 } 205 206 q->max_phys_segments = max_segments; 207 } 208 EXPORT_SYMBOL(blk_queue_max_phys_segments); 209 210 /** ··· 223 { 224 if (!max_segments) { 225 max_segments = 1; 226 + printk(KERN_INFO "%s: set to minimum %d\n", __FUNCTION__, 227 + max_segments); 228 } 229 230 q->max_hw_segments = max_segments; 231 } 232 EXPORT_SYMBOL(blk_queue_max_hw_segments); 233 234 /** ··· 244 { 245 if (max_size < PAGE_CACHE_SIZE) { 246 max_size = PAGE_CACHE_SIZE; 247 + printk(KERN_INFO "%s: set to minimum %d\n", __FUNCTION__, 248 + max_size); 249 } 250 251 q->max_segment_size = max_size; 252 } 253 EXPORT_SYMBOL(blk_queue_max_segment_size); 254 255 /** ··· 267 { 268 q->hardsect_size = size; 269 } 270 EXPORT_SYMBOL(blk_queue_hardsect_size); 271 272 /* ··· 283 void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b) 284 { 285 /* zero is "infinity" */ 286 + t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors); 287 + t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors); 288 289 + t->max_phys_segments = min(t->max_phys_segments, b->max_phys_segments); 290 + t->max_hw_segments = min(t->max_hw_segments, b->max_hw_segments); 291 + t->max_segment_size = min(t->max_segment_size, b->max_segment_size); 292 + t->hardsect_size = max(t->hardsect_size, b->hardsect_size); 293 if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) 294 clear_bit(QUEUE_FLAG_CLUSTER, &t->queue_flags); 295 } 296 EXPORT_SYMBOL(blk_queue_stack_limits); 297 298 /** ··· 332 333 return 0; 334 } 335 EXPORT_SYMBOL_GPL(blk_queue_dma_drain); 336 337 /** ··· 344 { 345 if (mask < PAGE_CACHE_SIZE - 1) { 346 mask = PAGE_CACHE_SIZE - 1; 347 + printk(KERN_INFO "%s: set to minimum %lx\n", __FUNCTION__, 348 + mask); 349 } 350 351 q->seg_boundary_mask = mask; 352 } 353 EXPORT_SYMBOL(blk_queue_segment_boundary); 354 355 /** ··· 366 { 367 q->dma_alignment = mask; 368 } 369 EXPORT_SYMBOL(blk_queue_dma_alignment); 370 371 /** ··· 390 if (mask > q->dma_alignment) 391 q->dma_alignment = mask; 392 } 393 EXPORT_SYMBOL(blk_queue_update_dma_alignment); 394 395 int __init blk_settings_init(void)
+3 -2
block/blk-sysfs.c
··· 207 const char *page, size_t length) 208 { 209 struct queue_sysfs_entry *entry = to_queue(attr); 210 - struct request_queue *q = container_of(kobj, struct request_queue, kobj); 211 - 212 ssize_t res; 213 214 if (!entry->store) 215 return -EIO; 216 mutex_lock(&q->sysfs_lock); 217 if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)) { 218 mutex_unlock(&q->sysfs_lock);
··· 207 const char *page, size_t length) 208 { 209 struct queue_sysfs_entry *entry = to_queue(attr); 210 + struct request_queue *q; 211 ssize_t res; 212 213 if (!entry->store) 214 return -EIO; 215 + 216 + q = container_of(kobj, struct request_queue, kobj); 217 mutex_lock(&q->sysfs_lock); 218 if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)) { 219 mutex_unlock(&q->sysfs_lock);
+3 -9
block/blk-tag.c
··· 21 { 22 return blk_map_queue_find_tag(q->queue_tags, tag); 23 } 24 - 25 EXPORT_SYMBOL(blk_queue_find_tag); 26 27 /** ··· 98 { 99 clear_bit(QUEUE_FLAG_QUEUED, &q->queue_flags); 100 } 101 - 102 EXPORT_SYMBOL(blk_queue_free_tags); 103 104 static int ··· 183 if (!tags) 184 goto fail; 185 } else if (q->queue_tags) { 186 - if ((rc = blk_queue_resize_tags(q, depth))) 187 return rc; 188 set_bit(QUEUE_FLAG_QUEUED, &q->queue_flags); 189 return 0; ··· 202 kfree(tags); 203 return -ENOMEM; 204 } 205 - 206 EXPORT_SYMBOL(blk_queue_init_tags); 207 208 /** ··· 258 kfree(tag_map); 259 return 0; 260 } 261 - 262 EXPORT_SYMBOL(blk_queue_resize_tags); 263 264 /** ··· 310 clear_bit_unlock(tag, bqt->tag_map); 311 bqt->busy--; 312 } 313 - 314 EXPORT_SYMBOL(blk_queue_end_tag); 315 316 /** ··· 336 int tag; 337 338 if (unlikely((rq->cmd_flags & REQ_QUEUED))) { 339 - printk(KERN_ERR 340 "%s: request %p for device [%s] already tagged %d", 341 __FUNCTION__, rq, 342 rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->tag); ··· 366 bqt->busy++; 367 return 0; 368 } 369 - 370 EXPORT_SYMBOL(blk_queue_start_tag); 371 372 /** ··· 387 list_for_each_safe(tmp, n, &q->tag_busy_list) 388 blk_requeue_request(q, list_entry_rq(tmp)); 389 } 390 - 391 EXPORT_SYMBOL(blk_queue_invalidate_tags);
··· 21 { 22 return blk_map_queue_find_tag(q->queue_tags, tag); 23 } 24 EXPORT_SYMBOL(blk_queue_find_tag); 25 26 /** ··· 99 { 100 clear_bit(QUEUE_FLAG_QUEUED, &q->queue_flags); 101 } 102 EXPORT_SYMBOL(blk_queue_free_tags); 103 104 static int ··· 185 if (!tags) 186 goto fail; 187 } else if (q->queue_tags) { 188 + rc = blk_queue_resize_tags(q, depth); 189 + if (rc) 190 return rc; 191 set_bit(QUEUE_FLAG_QUEUED, &q->queue_flags); 192 return 0; ··· 203 kfree(tags); 204 return -ENOMEM; 205 } 206 EXPORT_SYMBOL(blk_queue_init_tags); 207 208 /** ··· 260 kfree(tag_map); 261 return 0; 262 } 263 EXPORT_SYMBOL(blk_queue_resize_tags); 264 265 /** ··· 313 clear_bit_unlock(tag, bqt->tag_map); 314 bqt->busy--; 315 } 316 EXPORT_SYMBOL(blk_queue_end_tag); 317 318 /** ··· 340 int tag; 341 342 if (unlikely((rq->cmd_flags & REQ_QUEUED))) { 343 + printk(KERN_ERR 344 "%s: request %p for device [%s] already tagged %d", 345 __FUNCTION__, rq, 346 rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->tag); ··· 370 bqt->busy++; 371 return 0; 372 } 373 EXPORT_SYMBOL(blk_queue_start_tag); 374 375 /** ··· 392 list_for_each_safe(tmp, n, &q->tag_busy_list) 393 blk_requeue_request(q, list_entry_rq(tmp)); 394 } 395 EXPORT_SYMBOL(blk_queue_invalidate_tags);
+46 -37
block/cfq-iosched.c
··· 15 /* 16 * tunables 17 */ 18 - static const int cfq_quantum = 4; /* max queue in one round of service */ 19 static const int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 }; 20 - static const int cfq_back_max = 16 * 1024; /* maximum backwards seek, in KiB */ 21 - static const int cfq_back_penalty = 2; /* penalty of a backwards seek */ 22 - 23 static const int cfq_slice_sync = HZ / 10; 24 static int cfq_slice_async = HZ / 25; 25 static const int cfq_slice_async_rq = 2; ··· 39 40 #define CFQ_SLICE_SCALE (5) 41 42 - #define RQ_CIC(rq) ((struct cfq_io_context*)(rq)->elevator_private) 43 #define RQ_CFQQ(rq) ((rq)->elevator_private2) 44 45 static struct kmem_cache *cfq_pool; ··· 174 #define CFQ_CFQQ_FNS(name) \ 175 static inline void cfq_mark_cfqq_##name(struct cfq_queue *cfqq) \ 176 { \ 177 - cfqq->flags |= (1 << CFQ_CFQQ_FLAG_##name); \ 178 } \ 179 static inline void cfq_clear_cfqq_##name(struct cfq_queue *cfqq) \ 180 { \ 181 - cfqq->flags &= ~(1 << CFQ_CFQQ_FLAG_##name); \ 182 } \ 183 static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq) \ 184 { \ 185 - return (cfqq->flags & (1 << CFQ_CFQQ_FLAG_##name)) != 0; \ 186 } 187 188 CFQ_CFQQ_FNS(on_rr); ··· 1008 /* 1009 * follow expired path, else get first next available 1010 */ 1011 - if ((rq = cfq_check_fifo(cfqq)) == NULL) 1012 rq = cfqq->next_rq; 1013 1014 /* ··· 1298 1299 ioprio_class = IOPRIO_PRIO_CLASS(ioc->ioprio); 1300 switch (ioprio_class) { 1301 - default: 1302 - printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class); 1303 - case IOPRIO_CLASS_NONE: 1304 - /* 1305 - * no prio set, place us in the middle of the BE classes 1306 - */ 1307 - cfqq->ioprio = task_nice_ioprio(tsk); 1308 - cfqq->ioprio_class = IOPRIO_CLASS_BE; 1309 - break; 1310 - case IOPRIO_CLASS_RT: 1311 - cfqq->ioprio = task_ioprio(ioc); 1312 - cfqq->ioprio_class = IOPRIO_CLASS_RT; 1313 - break; 1314 - case IOPRIO_CLASS_BE: 1315 - cfqq->ioprio = task_ioprio(ioc); 1316 - cfqq->ioprio_class = IOPRIO_CLASS_BE; 1317 - break; 1318 - case IOPRIO_CLASS_IDLE: 1319 - cfqq->ioprio_class = IOPRIO_CLASS_IDLE; 1320 - cfqq->ioprio = 7; 1321 - cfq_clear_cfqq_idle_window(cfqq); 1322 - break; 1323 } 1324 1325 /* ··· 1431 static struct cfq_queue ** 1432 cfq_async_queue_prio(struct cfq_data *cfqd, int ioprio_class, int ioprio) 1433 { 1434 - switch(ioprio_class) { 1435 case IOPRIO_CLASS_RT: 1436 return &cfqd->async_cfqq[0][ioprio]; 1437 case IOPRIO_CLASS_BE: ··· 2022 2023 spin_lock_irqsave(cfqd->queue->queue_lock, flags); 2024 2025 - if ((cfqq = cfqd->active_queue) != NULL) { 2026 timed_out = 0; 2027 2028 /* ··· 2217 return ret; \ 2218 } 2219 STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0); 2220 - STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1, UINT_MAX, 1); 2221 - STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1, UINT_MAX, 1); 2222 STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0); 2223 - STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1, UINT_MAX, 0); 2224 STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1); 2225 STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1); 2226 STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1); 2227 - STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1, UINT_MAX, 0); 2228 #undef STORE_FUNCTION 2229 2230 #define CFQ_ATTR(name) \
··· 15 /* 16 * tunables 17 */ 18 + /* max queue in one round of service */ 19 + static const int cfq_quantum = 4; 20 static const int cfq_fifo_expire[2] = { HZ / 4, HZ / 8 }; 21 + /* maximum backwards seek, in KiB */ 22 + static const int cfq_back_max = 16 * 1024; 23 + /* penalty of a backwards seek */ 24 + static const int cfq_back_penalty = 2; 25 static const int cfq_slice_sync = HZ / 10; 26 static int cfq_slice_async = HZ / 25; 27 static const int cfq_slice_async_rq = 2; ··· 37 38 #define CFQ_SLICE_SCALE (5) 39 40 + #define RQ_CIC(rq) \ 41 + ((struct cfq_io_context *) (rq)->elevator_private) 42 #define RQ_CFQQ(rq) ((rq)->elevator_private2) 43 44 static struct kmem_cache *cfq_pool; ··· 171 #define CFQ_CFQQ_FNS(name) \ 172 static inline void cfq_mark_cfqq_##name(struct cfq_queue *cfqq) \ 173 { \ 174 + (cfqq)->flags |= (1 << CFQ_CFQQ_FLAG_##name); \ 175 } \ 176 static inline void cfq_clear_cfqq_##name(struct cfq_queue *cfqq) \ 177 { \ 178 + (cfqq)->flags &= ~(1 << CFQ_CFQQ_FLAG_##name); \ 179 } \ 180 static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq) \ 181 { \ 182 + return ((cfqq)->flags & (1 << CFQ_CFQQ_FLAG_##name)) != 0; \ 183 } 184 185 CFQ_CFQQ_FNS(on_rr); ··· 1005 /* 1006 * follow expired path, else get first next available 1007 */ 1008 + rq = cfq_check_fifo(cfqq); 1009 + if (rq == NULL) 1010 rq = cfqq->next_rq; 1011 1012 /* ··· 1294 1295 ioprio_class = IOPRIO_PRIO_CLASS(ioc->ioprio); 1296 switch (ioprio_class) { 1297 + default: 1298 + printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class); 1299 + case IOPRIO_CLASS_NONE: 1300 + /* 1301 + * no prio set, place us in the middle of the BE classes 1302 + */ 1303 + cfqq->ioprio = task_nice_ioprio(tsk); 1304 + cfqq->ioprio_class = IOPRIO_CLASS_BE; 1305 + break; 1306 + case IOPRIO_CLASS_RT: 1307 + cfqq->ioprio = task_ioprio(ioc); 1308 + cfqq->ioprio_class = IOPRIO_CLASS_RT; 1309 + break; 1310 + case IOPRIO_CLASS_BE: 1311 + cfqq->ioprio = task_ioprio(ioc); 1312 + cfqq->ioprio_class = IOPRIO_CLASS_BE; 1313 + break; 1314 + case IOPRIO_CLASS_IDLE: 1315 + cfqq->ioprio_class = IOPRIO_CLASS_IDLE; 1316 + cfqq->ioprio = 7; 1317 + cfq_clear_cfqq_idle_window(cfqq); 1318 + break; 1319 } 1320 1321 /* ··· 1427 static struct cfq_queue ** 1428 cfq_async_queue_prio(struct cfq_data *cfqd, int ioprio_class, int ioprio) 1429 { 1430 + switch (ioprio_class) { 1431 case IOPRIO_CLASS_RT: 1432 return &cfqd->async_cfqq[0][ioprio]; 1433 case IOPRIO_CLASS_BE: ··· 2018 2019 spin_lock_irqsave(cfqd->queue->queue_lock, flags); 2020 2021 + cfqq = cfqd->active_queue; 2022 + if (cfqq) { 2023 timed_out = 0; 2024 2025 /* ··· 2212 return ret; \ 2213 } 2214 STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0); 2215 + STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1, 2216 + UINT_MAX, 1); 2217 + STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1, 2218 + UINT_MAX, 1); 2219 STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0); 2220 + STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1, 2221 + UINT_MAX, 0); 2222 STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1); 2223 STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1); 2224 STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1); 2225 + STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1, 2226 + UINT_MAX, 0); 2227 #undef STORE_FUNCTION 2228 2229 #define CFQ_ATTR(name) \
+30 -27
block/elevator.c
··· 45 */ 46 static const int elv_hash_shift = 6; 47 #define ELV_HASH_BLOCK(sec) ((sec) >> 3) 48 - #define ELV_HASH_FN(sec) (hash_long(ELV_HASH_BLOCK((sec)), elv_hash_shift)) 49 #define ELV_HASH_ENTRIES (1 << elv_hash_shift) 50 #define rq_hash_key(rq) ((rq)->sector + (rq)->nr_sectors) 51 #define ELV_ON_HASH(rq) (!hlist_unhashed(&(rq)->hash)) ··· 225 q->end_sector = 0; 226 q->boundary_rq = NULL; 227 228 - if (name && !(e = elevator_get(name))) 229 - return -EINVAL; 230 231 - if (!e && *chosen_elevator && !(e = elevator_get(chosen_elevator))) 232 - printk("I/O scheduler %s not found\n", chosen_elevator); 233 234 - if (!e && !(e = elevator_get(CONFIG_DEFAULT_IOSCHED))) { 235 - printk("Default I/O scheduler not found, using no-op\n"); 236 - e = elevator_get("noop"); 237 } 238 239 eq = elevator_alloc(q, e); ··· 261 elevator_attach(q, eq, data); 262 return ret; 263 } 264 - 265 EXPORT_SYMBOL(elevator_init); 266 267 void elevator_exit(elevator_t *e) ··· 273 274 kobject_put(&e->kobj); 275 } 276 - 277 EXPORT_SYMBOL(elevator_exit); 278 279 static void elv_activate_rq(struct request_queue *q, struct request *rq) ··· 364 rb_insert_color(&rq->rb_node, root); 365 return NULL; 366 } 367 - 368 EXPORT_SYMBOL(elv_rb_add); 369 370 void elv_rb_del(struct rb_root *root, struct request *rq) ··· 372 rb_erase(&rq->rb_node, root); 373 RB_CLEAR_NODE(&rq->rb_node); 374 } 375 - 376 EXPORT_SYMBOL(elv_rb_del); 377 378 struct request *elv_rb_find(struct rb_root *root, sector_t sector) ··· 392 393 return NULL; 394 } 395 - 396 EXPORT_SYMBOL(elv_rb_find); 397 398 /* ··· 403 { 404 sector_t boundary; 405 struct list_head *entry; 406 407 if (q->last_merge == rq) 408 q->last_merge = NULL; ··· 413 q->nr_sorted--; 414 415 boundary = q->end_sector; 416 - 417 list_for_each_prev(entry, &q->queue_head) { 418 struct request *pos = list_entry_rq(entry); 419 420 if (rq_data_dir(rq) != rq_data_dir(pos)) 421 break; 422 - if (pos->cmd_flags & (REQ_SOFTBARRIER|REQ_HARDBARRIER|REQ_STARTED)) 423 break; 424 if (rq->sector >= boundary) { 425 if (pos->sector < boundary) ··· 434 435 list_add(&rq->queuelist, entry); 436 } 437 - 438 EXPORT_SYMBOL(elv_dispatch_sort); 439 440 /* ··· 454 q->boundary_rq = rq; 455 list_add_tail(&rq->queuelist, &q->queue_head); 456 } 457 - 458 EXPORT_SYMBOL(elv_dispatch_add_tail); 459 460 int elv_merge(struct request_queue *q, struct request **req, struct bio *bio) ··· 672 q->end_sector = rq_end_sector(rq); 673 q->boundary_rq = rq; 674 } 675 - } else if (!(rq->cmd_flags & REQ_ELVPRIV) && where == ELEVATOR_INSERT_SORT) 676 where = ELEVATOR_INSERT_BACK; 677 678 if (plug) ··· 681 682 elv_insert(q, rq, where); 683 } 684 - 685 EXPORT_SYMBOL(__elv_add_request); 686 687 void elv_add_request(struct request_queue *q, struct request *rq, int where, ··· 692 __elv_add_request(q, rq, where, plug); 693 spin_unlock_irqrestore(q->queue_lock, flags); 694 } 695 - 696 EXPORT_SYMBOL(elv_add_request); 697 698 static inline struct request *__elv_next_request(struct request_queue *q) ··· 798 799 return rq; 800 } 801 - 802 EXPORT_SYMBOL(elv_next_request); 803 804 void elv_dequeue_request(struct request_queue *q, struct request *rq) ··· 815 if (blk_account_rq(rq)) 816 q->in_flight++; 817 } 818 - 819 EXPORT_SYMBOL(elv_dequeue_request); 820 821 int elv_queue_empty(struct request_queue *q) ··· 829 830 return 1; 831 } 832 - 833 EXPORT_SYMBOL(elv_queue_empty); 834 835 struct request *elv_latter_request(struct request_queue *q, struct request *rq) ··· 997 !strcmp(e->elevator_name, CONFIG_DEFAULT_IOSCHED))) 998 def = " (default)"; 999 1000 - printk(KERN_INFO "io scheduler %s registered%s\n", e->elevator_name, def); 1001 } 1002 EXPORT_SYMBOL_GPL(elv_register); 1003 ··· 1130 } 1131 1132 if (!elevator_switch(q, e)) 1133 - printk(KERN_ERR "elevator: switch to %s failed\n",elevator_name); 1134 return count; 1135 } 1136 ··· 1165 1166 return NULL; 1167 } 1168 - 1169 EXPORT_SYMBOL(elv_rb_former_request); 1170 1171 struct request *elv_rb_latter_request(struct request_queue *q, ··· 1177 1178 return NULL; 1179 } 1180 - 1181 EXPORT_SYMBOL(elv_rb_latter_request);
··· 45 */ 46 static const int elv_hash_shift = 6; 47 #define ELV_HASH_BLOCK(sec) ((sec) >> 3) 48 + #define ELV_HASH_FN(sec) \ 49 + (hash_long(ELV_HASH_BLOCK((sec)), elv_hash_shift)) 50 #define ELV_HASH_ENTRIES (1 << elv_hash_shift) 51 #define rq_hash_key(rq) ((rq)->sector + (rq)->nr_sectors) 52 #define ELV_ON_HASH(rq) (!hlist_unhashed(&(rq)->hash)) ··· 224 q->end_sector = 0; 225 q->boundary_rq = NULL; 226 227 + if (name) { 228 + e = elevator_get(name); 229 + if (!e) 230 + return -EINVAL; 231 + } 232 233 + if (!e && *chosen_elevator) { 234 + e = elevator_get(chosen_elevator); 235 + if (!e) 236 + printk(KERN_ERR "I/O scheduler %s not found\n", 237 + chosen_elevator); 238 + } 239 240 + if (!e) { 241 + e = elevator_get(CONFIG_DEFAULT_IOSCHED); 242 + if (!e) { 243 + printk(KERN_ERR 244 + "Default I/O scheduler not found. " \ 245 + "Using noop.\n"); 246 + e = elevator_get("noop"); 247 + } 248 } 249 250 eq = elevator_alloc(q, e); ··· 248 elevator_attach(q, eq, data); 249 return ret; 250 } 251 EXPORT_SYMBOL(elevator_init); 252 253 void elevator_exit(elevator_t *e) ··· 261 262 kobject_put(&e->kobj); 263 } 264 EXPORT_SYMBOL(elevator_exit); 265 266 static void elv_activate_rq(struct request_queue *q, struct request *rq) ··· 353 rb_insert_color(&rq->rb_node, root); 354 return NULL; 355 } 356 EXPORT_SYMBOL(elv_rb_add); 357 358 void elv_rb_del(struct rb_root *root, struct request *rq) ··· 362 rb_erase(&rq->rb_node, root); 363 RB_CLEAR_NODE(&rq->rb_node); 364 } 365 EXPORT_SYMBOL(elv_rb_del); 366 367 struct request *elv_rb_find(struct rb_root *root, sector_t sector) ··· 383 384 return NULL; 385 } 386 EXPORT_SYMBOL(elv_rb_find); 387 388 /* ··· 395 { 396 sector_t boundary; 397 struct list_head *entry; 398 + int stop_flags; 399 400 if (q->last_merge == rq) 401 q->last_merge = NULL; ··· 404 q->nr_sorted--; 405 406 boundary = q->end_sector; 407 + stop_flags = REQ_SOFTBARRIER | REQ_HARDBARRIER | REQ_STARTED; 408 list_for_each_prev(entry, &q->queue_head) { 409 struct request *pos = list_entry_rq(entry); 410 411 if (rq_data_dir(rq) != rq_data_dir(pos)) 412 break; 413 + if (pos->cmd_flags & stop_flags) 414 break; 415 if (rq->sector >= boundary) { 416 if (pos->sector < boundary) ··· 425 426 list_add(&rq->queuelist, entry); 427 } 428 EXPORT_SYMBOL(elv_dispatch_sort); 429 430 /* ··· 446 q->boundary_rq = rq; 447 list_add_tail(&rq->queuelist, &q->queue_head); 448 } 449 EXPORT_SYMBOL(elv_dispatch_add_tail); 450 451 int elv_merge(struct request_queue *q, struct request **req, struct bio *bio) ··· 665 q->end_sector = rq_end_sector(rq); 666 q->boundary_rq = rq; 667 } 668 + } else if (!(rq->cmd_flags & REQ_ELVPRIV) && 669 + where == ELEVATOR_INSERT_SORT) 670 where = ELEVATOR_INSERT_BACK; 671 672 if (plug) ··· 673 674 elv_insert(q, rq, where); 675 } 676 EXPORT_SYMBOL(__elv_add_request); 677 678 void elv_add_request(struct request_queue *q, struct request *rq, int where, ··· 685 __elv_add_request(q, rq, where, plug); 686 spin_unlock_irqrestore(q->queue_lock, flags); 687 } 688 EXPORT_SYMBOL(elv_add_request); 689 690 static inline struct request *__elv_next_request(struct request_queue *q) ··· 792 793 return rq; 794 } 795 EXPORT_SYMBOL(elv_next_request); 796 797 void elv_dequeue_request(struct request_queue *q, struct request *rq) ··· 810 if (blk_account_rq(rq)) 811 q->in_flight++; 812 } 813 EXPORT_SYMBOL(elv_dequeue_request); 814 815 int elv_queue_empty(struct request_queue *q) ··· 825 826 return 1; 827 } 828 EXPORT_SYMBOL(elv_queue_empty); 829 830 struct request *elv_latter_request(struct request_queue *q, struct request *rq) ··· 994 !strcmp(e->elevator_name, CONFIG_DEFAULT_IOSCHED))) 995 def = " (default)"; 996 997 + printk(KERN_INFO "io scheduler %s registered%s\n", e->elevator_name, 998 + def); 999 } 1000 EXPORT_SYMBOL_GPL(elv_register); 1001 ··· 1126 } 1127 1128 if (!elevator_switch(q, e)) 1129 + printk(KERN_ERR "elevator: switch to %s failed\n", 1130 + elevator_name); 1131 return count; 1132 } 1133 ··· 1160 1161 return NULL; 1162 } 1163 EXPORT_SYMBOL(elv_rb_former_request); 1164 1165 struct request *elv_rb_latter_request(struct request_queue *q, ··· 1173 1174 return NULL; 1175 } 1176 EXPORT_SYMBOL(elv_rb_latter_request);
+1 -1
drivers/block/sunvdc.c
··· 732 .handshake_complete = vdc_handshake_complete, 733 }; 734 735 - static void print_version(void) 736 { 737 static int version_printed; 738
··· 732 .handshake_complete = vdc_handshake_complete, 733 }; 734 735 + static void __devinit print_version(void) 736 { 737 static int version_printed; 738
-2
drivers/char/random.c
··· 667 add_timer_randomness(disk->random, 668 0x100 + MKDEV(disk->major, disk->first_minor)); 669 } 670 - 671 - EXPORT_SYMBOL(add_disk_randomness); 672 #endif 673 674 #define EXTRACT_SIZE 10
··· 667 add_timer_randomness(disk->random, 668 0x100 + MKDEV(disk->major, disk->first_minor)); 669 } 670 #endif 671 672 #define EXTRACT_SIZE 10
+1 -1
drivers/ide/ide-cd.c
··· 1722 */ 1723 if ((stat & DRQ_STAT) == 0) { 1724 spin_lock_irqsave(&ide_lock, flags); 1725 - if (__blk_end_request(rq, 0, 0)) 1726 BUG(); 1727 HWGROUP(drive)->rq = NULL; 1728 spin_unlock_irqrestore(&ide_lock, flags);
··· 1722 */ 1723 if ((stat & DRQ_STAT) == 0) { 1724 spin_lock_irqsave(&ide_lock, flags); 1725 + if (__blk_end_request(rq, 0, rq->data_len)) 1726 BUG(); 1727 HWGROUP(drive)->rq = NULL; 1728 spin_unlock_irqrestore(&ide_lock, flags);
+1 -3
fs/splice.c
··· 1033 1034 done: 1035 pipe->nrbufs = pipe->curbuf = 0; 1036 - if (bytes > 0) 1037 - file_accessed(in); 1038 - 1039 return bytes; 1040 1041 out_release:
··· 1033 1034 done: 1035 pipe->nrbufs = pipe->curbuf = 0; 1036 + file_accessed(in); 1037 return bytes; 1038 1039 out_release:
+9 -7
include/linux/blkdev.h
··· 39 struct io_context *get_io_context(gfp_t gfp_flags, int node); 40 struct io_context *alloc_io_context(gfp_t gfp_flags, int node); 41 void copy_io_context(struct io_context **pdst, struct io_context **psrc); 42 - void swap_io_context(struct io_context **ioc1, struct io_context **ioc2); 43 44 struct request; 45 typedef void (rq_end_io_fn)(struct request *, int); ··· 654 * blk_end_request() for parts of the original function. 655 * This prevents code duplication in drivers. 656 */ 657 - extern int blk_end_request(struct request *rq, int error, int nr_bytes); 658 - extern int __blk_end_request(struct request *rq, int error, int nr_bytes); 659 - extern int blk_end_bidi_request(struct request *rq, int error, int nr_bytes, 660 - int bidi_bytes); 661 extern void end_request(struct request *, int); 662 extern void end_queued_request(struct request *, int); 663 extern void end_dequeued_request(struct request *, int); 664 - extern int blk_end_request_callback(struct request *rq, int error, int nr_bytes, 665 - int (drv_callback)(struct request *)); 666 extern void blk_complete_request(struct request *); 667 668 /*
··· 39 struct io_context *get_io_context(gfp_t gfp_flags, int node); 40 struct io_context *alloc_io_context(gfp_t gfp_flags, int node); 41 void copy_io_context(struct io_context **pdst, struct io_context **psrc); 42 43 struct request; 44 typedef void (rq_end_io_fn)(struct request *, int); ··· 655 * blk_end_request() for parts of the original function. 656 * This prevents code duplication in drivers. 657 */ 658 + extern int blk_end_request(struct request *rq, int error, 659 + unsigned int nr_bytes); 660 + extern int __blk_end_request(struct request *rq, int error, 661 + unsigned int nr_bytes); 662 + extern int blk_end_bidi_request(struct request *rq, int error, 663 + unsigned int nr_bytes, unsigned int bidi_bytes); 664 extern void end_request(struct request *, int); 665 extern void end_queued_request(struct request *, int); 666 extern void end_dequeued_request(struct request *, int); 667 + extern int blk_end_request_callback(struct request *rq, int error, 668 + unsigned int nr_bytes, 669 + int (drv_callback)(struct request *)); 670 extern void blk_complete_request(struct request *); 671 672 /*