Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'for-linus' of git://git.kernel.dk/linux-2.6-block

* 'for-linus' of git://git.kernel.dk/linux-2.6-block:
block: Skip I/O merges when disabled
block: add large command support
block: replace sizeof(rq->cmd) with BLK_MAX_CDB
ide: use blk_rq_init() to initialize the request
block: use blk_rq_init() to initialize the request
block: rename and export rq_init()
block: no need to initialize rq->cmd with blk_get_request
block: no need to initialize rq->cmd in prepare_flush_fn hook
block/blk-barrier.c:blk_ordered_cur_seq() mustn't be inline
block/elevator.c:elv_rq_merge_ok() mustn't be inline
block: make queue flags non-atomic
block: add dma alignment and padding support to blk_rq_map_kern
unexport blk_max_pfn
ps3disk: Remove superfluous cast
block: make rq_init() do a full memset()
relay: fix splice problem

+274 -119
+3 -8
block/blk-barrier.c
··· 53 53 /* 54 54 * Cache flushing for ordered writes handling 55 55 */ 56 - inline unsigned blk_ordered_cur_seq(struct request_queue *q) 56 + unsigned blk_ordered_cur_seq(struct request_queue *q) 57 57 { 58 58 if (!q->ordseq) 59 59 return 0; ··· 143 143 end_io = post_flush_end_io; 144 144 } 145 145 146 + blk_rq_init(q, rq); 146 147 rq->cmd_flags = REQ_HARDBARRIER; 147 - rq_init(q, rq); 148 - rq->elevator_private = NULL; 149 - rq->elevator_private2 = NULL; 150 148 rq->rq_disk = q->bar_rq.rq_disk; 151 149 rq->end_io = end_io; 152 150 q->prepare_flush_fn(q, rq); ··· 165 167 blkdev_dequeue_request(rq); 166 168 q->orig_bar_rq = rq; 167 169 rq = &q->bar_rq; 168 - rq->cmd_flags = 0; 169 - rq_init(q, rq); 170 + blk_rq_init(q, rq); 170 171 if (bio_data_dir(q->orig_bar_rq->bio) == WRITE) 171 172 rq->cmd_flags |= REQ_RW; 172 173 if (q->ordered & QUEUE_ORDERED_FUA) 173 174 rq->cmd_flags |= REQ_FUA; 174 - rq->elevator_private = NULL; 175 - rq->elevator_private2 = NULL; 176 175 init_request_from_bio(rq, q->orig_bar_rq->bio); 177 176 rq->end_io = bar_end_io; 178 177
+34 -41
block/blk-core.c
··· 107 107 } 108 108 EXPORT_SYMBOL(blk_get_backing_dev_info); 109 109 110 - /* 111 - * We can't just memset() the structure, since the allocation path 112 - * already stored some information in the request. 113 - */ 114 - void rq_init(struct request_queue *q, struct request *rq) 110 + void blk_rq_init(struct request_queue *q, struct request *rq) 115 111 { 112 + memset(rq, 0, sizeof(*rq)); 113 + 116 114 INIT_LIST_HEAD(&rq->queuelist); 117 115 INIT_LIST_HEAD(&rq->donelist); 118 116 rq->q = q; 119 117 rq->sector = rq->hard_sector = (sector_t) -1; 120 - rq->nr_sectors = rq->hard_nr_sectors = 0; 121 - rq->current_nr_sectors = rq->hard_cur_sectors = 0; 122 - rq->bio = rq->biotail = NULL; 123 118 INIT_HLIST_NODE(&rq->hash); 124 119 RB_CLEAR_NODE(&rq->rb_node); 125 - rq->rq_disk = NULL; 126 - rq->nr_phys_segments = 0; 127 - rq->nr_hw_segments = 0; 128 - rq->ioprio = 0; 129 - rq->special = NULL; 130 - rq->buffer = NULL; 120 + rq->cmd = rq->__cmd; 131 121 rq->tag = -1; 132 - rq->errors = 0; 133 122 rq->ref_count = 1; 134 - rq->cmd_len = 0; 135 - memset(rq->cmd, 0, sizeof(rq->cmd)); 136 - rq->data_len = 0; 137 - rq->extra_len = 0; 138 - rq->sense_len = 0; 139 - rq->data = NULL; 140 - rq->sense = NULL; 141 - rq->end_io = NULL; 142 - rq->end_io_data = NULL; 143 - rq->next_rq = NULL; 144 123 } 124 + EXPORT_SYMBOL(blk_rq_init); 145 125 146 126 static void req_bio_endio(struct request *rq, struct bio *bio, 147 127 unsigned int nbytes, int error) ··· 174 194 175 195 if (blk_pc_request(rq)) { 176 196 printk(KERN_INFO " cdb: "); 177 - for (bit = 0; bit < sizeof(rq->cmd); bit++) 197 + for (bit = 0; bit < BLK_MAX_CDB; bit++) 178 198 printk("%02x ", rq->cmd[bit]); 179 199 printk("\n"); 180 200 } ··· 200 220 if (blk_queue_stopped(q)) 201 221 return; 202 222 203 - if (!test_and_set_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags)) { 223 + if (!test_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags)) { 224 + __set_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags); 204 225 mod_timer(&q->unplug_timer, jiffies + q->unplug_delay); 205 226 blk_add_trace_generic(q, NULL, 0, BLK_TA_PLUG); 206 227 } ··· 216 235 { 217 236 WARN_ON(!irqs_disabled()); 218 237 219 - if (!test_and_clear_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags)) 238 + if (!test_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags)) 220 239 return 0; 221 240 241 + queue_flag_clear(QUEUE_FLAG_PLUGGED, q); 222 242 del_timer(&q->unplug_timer); 223 243 return 1; 224 244 } ··· 315 333 { 316 334 WARN_ON(!irqs_disabled()); 317 335 318 - clear_bit(QUEUE_FLAG_STOPPED, &q->queue_flags); 336 + queue_flag_clear(QUEUE_FLAG_STOPPED, q); 319 337 320 338 /* 321 339 * one level of recursion is ok and is much faster than kicking 322 340 * the unplug handling 323 341 */ 324 - if (!test_and_set_bit(QUEUE_FLAG_REENTER, &q->queue_flags)) { 342 + if (!test_bit(QUEUE_FLAG_REENTER, &q->queue_flags)) { 343 + queue_flag_set(QUEUE_FLAG_REENTER, q); 325 344 q->request_fn(q); 326 - clear_bit(QUEUE_FLAG_REENTER, &q->queue_flags); 345 + queue_flag_clear(QUEUE_FLAG_REENTER, q); 327 346 } else { 328 347 blk_plug_device(q); 329 348 kblockd_schedule_work(&q->unplug_work); ··· 349 366 void blk_stop_queue(struct request_queue *q) 350 367 { 351 368 blk_remove_plug(q); 352 - set_bit(QUEUE_FLAG_STOPPED, &q->queue_flags); 369 + queue_flag_set(QUEUE_FLAG_STOPPED, q); 353 370 } 354 371 EXPORT_SYMBOL(blk_stop_queue); 355 372 ··· 378 395 * blk_run_queue - run a single device queue 379 396 * @q: The queue to run 380 397 */ 381 - void blk_run_queue(struct request_queue *q) 398 + void __blk_run_queue(struct request_queue *q) 382 399 { 383 - unsigned long flags; 384 - 385 - spin_lock_irqsave(q->queue_lock, flags); 386 400 blk_remove_plug(q); 387 401 388 402 /* ··· 387 407 * handling reinvoke the handler shortly if we already got there. 388 408 */ 389 409 if (!elv_queue_empty(q)) { 390 - if (!test_and_set_bit(QUEUE_FLAG_REENTER, &q->queue_flags)) { 410 + if (!test_bit(QUEUE_FLAG_REENTER, &q->queue_flags)) { 411 + queue_flag_set(QUEUE_FLAG_REENTER, q); 391 412 q->request_fn(q); 392 - clear_bit(QUEUE_FLAG_REENTER, &q->queue_flags); 413 + queue_flag_clear(QUEUE_FLAG_REENTER, q); 393 414 } else { 394 415 blk_plug_device(q); 395 416 kblockd_schedule_work(&q->unplug_work); 396 417 } 397 418 } 419 + } 420 + EXPORT_SYMBOL(__blk_run_queue); 398 421 422 + /** 423 + * blk_run_queue - run a single device queue 424 + * @q: The queue to run 425 + */ 426 + void blk_run_queue(struct request_queue *q) 427 + { 428 + unsigned long flags; 429 + 430 + spin_lock_irqsave(q->queue_lock, flags); 431 + __blk_run_queue(q); 399 432 spin_unlock_irqrestore(q->queue_lock, flags); 400 433 } 401 434 EXPORT_SYMBOL(blk_run_queue); ··· 421 428 void blk_cleanup_queue(struct request_queue *q) 422 429 { 423 430 mutex_lock(&q->sysfs_lock); 424 - set_bit(QUEUE_FLAG_DEAD, &q->queue_flags); 431 + queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q); 425 432 mutex_unlock(&q->sysfs_lock); 426 433 427 434 if (q->elevator) ··· 599 606 600 607 if (!rq) 601 608 return NULL; 609 + 610 + blk_rq_init(q, rq); 602 611 603 612 /* 604 613 * first three bits are identical in rq->cmd_flags and bio->bi_rw, ··· 783 788 */ 784 789 if (ioc_batching(q, ioc)) 785 790 ioc->nr_batch_requests--; 786 - 787 - rq_init(q, rq); 788 791 789 792 blk_add_trace_generic(q, bio, rw, BLK_TA_GETRQ); 790 793 out:
+20 -1
block/blk-map.c
··· 255 255 * @kbuf: the kernel buffer 256 256 * @len: length of user data 257 257 * @gfp_mask: memory allocation flags 258 + * 259 + * Description: 260 + * Data will be mapped directly if possible. Otherwise a bounce 261 + * buffer is used. 258 262 */ 259 263 int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, 260 264 unsigned int len, gfp_t gfp_mask) 261 265 { 266 + unsigned long kaddr; 267 + unsigned int alignment; 268 + int reading = rq_data_dir(rq) == READ; 269 + int do_copy = 0; 262 270 struct bio *bio; 263 271 264 272 if (len > (q->max_hw_sectors << 9)) ··· 274 266 if (!len || !kbuf) 275 267 return -EINVAL; 276 268 277 - bio = bio_map_kern(q, kbuf, len, gfp_mask); 269 + kaddr = (unsigned long)kbuf; 270 + alignment = queue_dma_alignment(q) | q->dma_pad_mask; 271 + do_copy = ((kaddr & alignment) || (len & alignment)); 272 + 273 + if (do_copy) 274 + bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading); 275 + else 276 + bio = bio_map_kern(q, kbuf, len, gfp_mask); 277 + 278 278 if (IS_ERR(bio)) 279 279 return PTR_ERR(bio); 280 280 281 281 if (rq_data_dir(rq) == WRITE) 282 282 bio->bi_rw |= (1 << BIO_RW); 283 + 284 + if (do_copy) 285 + rq->cmd_flags |= REQ_COPY_USER; 283 286 284 287 blk_rq_bio_prep(q, rq, bio); 285 288 blk_queue_bounce(q, &rq->bio);
+3 -3
block/blk-merge.c
··· 55 55 if (!rq->bio) 56 56 return; 57 57 58 - cluster = q->queue_flags & (1 << QUEUE_FLAG_CLUSTER); 58 + cluster = test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags); 59 59 hw_seg_size = seg_size = 0; 60 60 phys_size = hw_size = nr_phys_segs = nr_hw_segs = 0; 61 61 rq_for_each_segment(bv, rq, iter) { ··· 128 128 static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio, 129 129 struct bio *nxt) 130 130 { 131 - if (!(q->queue_flags & (1 << QUEUE_FLAG_CLUSTER))) 131 + if (!test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags)) 132 132 return 0; 133 133 134 134 if (!BIOVEC_PHYS_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt))) ··· 175 175 int nsegs, cluster; 176 176 177 177 nsegs = 0; 178 - cluster = q->queue_flags & (1 << QUEUE_FLAG_CLUSTER); 178 + cluster = test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags); 179 179 180 180 /* 181 181 * for each bio in rq
+1 -2
block/blk-settings.c
··· 14 14 EXPORT_SYMBOL(blk_max_low_pfn); 15 15 16 16 unsigned long blk_max_pfn; 17 - EXPORT_SYMBOL(blk_max_pfn); 18 17 19 18 /** 20 19 * blk_queue_prep_rq - set a prepare_request function for queue ··· 287 288 t->max_segment_size = min(t->max_segment_size, b->max_segment_size); 288 289 t->hardsect_size = max(t->hardsect_size, b->hardsect_size); 289 290 if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) 290 - clear_bit(QUEUE_FLAG_CLUSTER, &t->queue_flags); 291 + queue_flag_clear(QUEUE_FLAG_CLUSTER, t); 291 292 } 292 293 EXPORT_SYMBOL(blk_queue_stack_limits); 293 294
+26
block/blk-sysfs.c
··· 135 135 return queue_var_show(max_hw_sectors_kb, (page)); 136 136 } 137 137 138 + static ssize_t queue_nomerges_show(struct request_queue *q, char *page) 139 + { 140 + return queue_var_show(blk_queue_nomerges(q), page); 141 + } 142 + 143 + static ssize_t queue_nomerges_store(struct request_queue *q, const char *page, 144 + size_t count) 145 + { 146 + unsigned long nm; 147 + ssize_t ret = queue_var_store(&nm, page, count); 148 + 149 + if (nm) 150 + set_bit(QUEUE_FLAG_NOMERGES, &q->queue_flags); 151 + else 152 + clear_bit(QUEUE_FLAG_NOMERGES, &q->queue_flags); 153 + 154 + return ret; 155 + } 156 + 138 157 139 158 static struct queue_sysfs_entry queue_requests_entry = { 140 159 .attr = {.name = "nr_requests", .mode = S_IRUGO | S_IWUSR }, ··· 189 170 .show = queue_hw_sector_size_show, 190 171 }; 191 172 173 + static struct queue_sysfs_entry queue_nomerges_entry = { 174 + .attr = {.name = "nomerges", .mode = S_IRUGO | S_IWUSR }, 175 + .show = queue_nomerges_show, 176 + .store = queue_nomerges_store, 177 + }; 178 + 192 179 static struct attribute *default_attrs[] = { 193 180 &queue_requests_entry.attr, 194 181 &queue_ra_entry.attr, ··· 202 177 &queue_max_sectors_entry.attr, 203 178 &queue_iosched_entry.attr, 204 179 &queue_hw_sector_size_entry.attr, 180 + &queue_nomerges_entry.attr, 205 181 NULL, 206 182 }; 207 183
+4 -4
block/blk-tag.c
··· 70 70 __blk_free_tags(bqt); 71 71 72 72 q->queue_tags = NULL; 73 - q->queue_flags &= ~(1 << QUEUE_FLAG_QUEUED); 73 + queue_flag_clear(QUEUE_FLAG_QUEUED, q); 74 74 } 75 75 76 76 /** ··· 98 98 **/ 99 99 void blk_queue_free_tags(struct request_queue *q) 100 100 { 101 - clear_bit(QUEUE_FLAG_QUEUED, &q->queue_flags); 101 + queue_flag_clear(QUEUE_FLAG_QUEUED, q); 102 102 } 103 103 EXPORT_SYMBOL(blk_queue_free_tags); 104 104 ··· 188 188 rc = blk_queue_resize_tags(q, depth); 189 189 if (rc) 190 190 return rc; 191 - set_bit(QUEUE_FLAG_QUEUED, &q->queue_flags); 191 + queue_flag_set(QUEUE_FLAG_QUEUED, q); 192 192 return 0; 193 193 } else 194 194 atomic_inc(&tags->refcnt); ··· 197 197 * assign it, all done 198 198 */ 199 199 q->queue_tags = tags; 200 - q->queue_flags |= (1 << QUEUE_FLAG_QUEUED); 200 + queue_flag_set(QUEUE_FLAG_QUEUED, q); 201 201 INIT_LIST_HEAD(&q->tag_busy_list); 202 202 return 0; 203 203 fail:
-1
block/blk.h
··· 10 10 extern struct kmem_cache *blk_requestq_cachep; 11 11 extern struct kobj_type blk_queue_ktype; 12 12 13 - void rq_init(struct request_queue *q, struct request *rq); 14 13 void init_request_from_bio(struct request *req, struct bio *bio); 15 14 void blk_rq_bio_prep(struct request_queue *q, struct request *rq, 16 15 struct bio *bio);
+14 -4
block/elevator.c
··· 69 69 /* 70 70 * can we safely merge with this request? 71 71 */ 72 - inline int elv_rq_merge_ok(struct request *rq, struct bio *bio) 72 + int elv_rq_merge_ok(struct request *rq, struct bio *bio) 73 73 { 74 74 if (!rq_mergeable(rq)) 75 75 return 0; ··· 487 487 return ret; 488 488 } 489 489 } 490 + 491 + if (blk_queue_nomerges(q)) 492 + return ELEVATOR_NO_MERGE; 490 493 491 494 /* 492 495 * See if our hash lookup can find a potential backmerge. ··· 1073 1070 */ 1074 1071 spin_lock_irq(q->queue_lock); 1075 1072 1076 - set_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags); 1073 + queue_flag_set(QUEUE_FLAG_ELVSWITCH, q); 1077 1074 1078 1075 elv_drain_elevator(q); 1079 1076 ··· 1107 1104 * finally exit old elevator and turn off BYPASS. 1108 1105 */ 1109 1106 elevator_exit(old_elevator); 1110 - clear_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags); 1107 + spin_lock_irq(q->queue_lock); 1108 + queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q); 1109 + spin_unlock_irq(q->queue_lock); 1110 + 1111 1111 return 1; 1112 1112 1113 1113 fail_register: ··· 1121 1115 elevator_exit(e); 1122 1116 q->elevator = old_elevator; 1123 1117 elv_register_queue(q); 1124 - clear_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags); 1118 + 1119 + spin_lock_irq(q->queue_lock); 1120 + queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q); 1121 + spin_unlock_irq(q->queue_lock); 1122 + 1125 1123 return 0; 1126 1124 } 1127 1125
-3
block/scsi_ioctl.c
··· 217 217 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq, 218 218 struct sg_io_hdr *hdr, int has_write_perm) 219 219 { 220 - memset(rq->cmd, 0, BLK_MAX_CDB); /* ATAPI hates garbage after CDB */ 221 - 222 220 if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len)) 223 221 return -EFAULT; 224 222 if (blk_verify_command(rq->cmd, has_write_perm)) ··· 529 531 rq->data_len = 0; 530 532 rq->extra_len = 0; 531 533 rq->timeout = BLK_DEFAULT_SG_TIMEOUT; 532 - memset(rq->cmd, 0, sizeof(rq->cmd)); 533 534 rq->cmd[0] = cmd; 534 535 rq->cmd[4] = data; 535 536 rq->cmd_len = 6;
+1 -1
drivers/block/loop.c
··· 546 546 { 547 547 struct loop_device *lo = q->queuedata; 548 548 549 - clear_bit(QUEUE_FLAG_PLUGGED, &q->queue_flags); 549 + queue_flag_clear_unlocked(QUEUE_FLAG_PLUGGED, q); 550 550 blk_run_address_space(lo->lo_backing_file->f_mapping); 551 551 } 552 552
+1
drivers/block/nbd.c
··· 577 577 switch (cmd) { 578 578 case NBD_DISCONNECT: 579 579 printk(KERN_INFO "%s: NBD_DISCONNECT\n", lo->disk->disk_name); 580 + blk_rq_init(NULL, &sreq); 580 581 sreq.cmd_type = REQ_TYPE_SPECIAL; 581 582 nbd_cmd(&sreq) = NBD_CMD_DISC; 582 583 /*
+1 -3
drivers/block/paride/pd.c
··· 716 716 struct request rq; 717 717 int err = 0; 718 718 719 - memset(&rq, 0, sizeof(rq)); 720 - rq.errors = 0; 719 + blk_rq_init(NULL, &rq); 721 720 rq.rq_disk = disk->gd; 722 - rq.ref_count = 1; 723 721 rq.end_io_data = &wait; 724 722 rq.end_io = blk_end_sync_rq; 725 723 blk_insert_request(disk->gd->queue, &rq, 0, func);
-2
drivers/block/pktcdvd.c
··· 776 776 777 777 rq->cmd_len = COMMAND_SIZE(cgc->cmd[0]); 778 778 memcpy(rq->cmd, cgc->cmd, CDROM_PACKET_SIZE); 779 - if (sizeof(rq->cmd) > CDROM_PACKET_SIZE) 780 - memset(rq->cmd + CDROM_PACKET_SIZE, 0, sizeof(rq->cmd) - CDROM_PACKET_SIZE); 781 779 782 780 rq->timeout = 60*HZ; 783 781 rq->cmd_type = REQ_TYPE_BLOCK_PC;
+1 -3
drivers/block/ps3disk.c
··· 102 102 dev_dbg(&dev->sbd.core, 103 103 "%s:%u: bio %u: %u segs %u sectors from %lu\n", 104 104 __func__, __LINE__, i, bio_segments(iter.bio), 105 - bio_sectors(iter.bio), 106 - (unsigned long)iter.bio->bi_sector); 105 + bio_sectors(iter.bio), iter.bio->bi_sector); 107 106 108 107 size = bvec->bv_len; 109 108 buf = bvec_kmap_irq(bvec, &flags); ··· 405 406 406 407 dev_dbg(&dev->sbd.core, "%s:%u\n", __func__, __LINE__); 407 408 408 - memset(req->cmd, 0, sizeof(req->cmd)); 409 409 req->cmd_type = REQ_TYPE_FLUSH; 410 410 } 411 411
+1 -1
drivers/block/ub.c
··· 2399 2399 del_gendisk(lun->disk); 2400 2400 /* 2401 2401 * I wish I could do: 2402 - * set_bit(QUEUE_FLAG_DEAD, &q->queue_flags); 2402 + * queue_flag_set(QUEUE_FLAG_DEAD, q); 2403 2403 * As it is, we rely on our internal poisoning and let 2404 2404 * the upper levels to spin furiously failing all the I/O. 2405 2405 */
-1
drivers/cdrom/cdrom.c
··· 2194 2194 if (ret) 2195 2195 break; 2196 2196 2197 - memset(rq->cmd, 0, sizeof(rq->cmd)); 2198 2197 rq->cmd[0] = GPCMD_READ_CD; 2199 2198 rq->cmd[1] = 1 << 2; 2200 2199 rq->cmd[2] = (lba >> 24) & 0xff;
+2 -2
drivers/ide/ide-cd.c
··· 782 782 783 783 sector_div(frame, queue_hardsect_size(drive->queue) >> SECTOR_BITS); 784 784 785 - memset(rq->cmd, 0, sizeof(rq->cmd)); 785 + memset(rq->cmd, 0, BLK_MAX_CDB); 786 786 rq->cmd[0] = GPCMD_SEEK; 787 787 put_unaligned(cpu_to_be32(frame), (unsigned int *) &rq->cmd[2]); 788 788 ··· 1694 1694 long block = (long)rq->hard_sector / (hard_sect >> 9); 1695 1695 unsigned long blocks = rq->hard_nr_sectors / (hard_sect >> 9); 1696 1696 1697 - memset(rq->cmd, 0, sizeof(rq->cmd)); 1697 + memset(rq->cmd, 0, BLK_MAX_CDB); 1698 1698 1699 1699 if (rq_data_dir(rq) == READ) 1700 1700 rq->cmd[0] = GPCMD_READ_10;
+1 -1
drivers/ide/ide-cd_verbose.c
··· 326 326 327 327 printk(KERN_ERR " The failed \"%s\" packet command " 328 328 "was: \n \"", s); 329 - for (i = 0; i < sizeof(failed_command->cmd); i++) 329 + for (i = 0; i < BLK_MAX_CDB; i++) 330 330 printk(KERN_CONT "%02x ", failed_command->cmd[i]); 331 331 printk(KERN_CONT "\"\n"); 332 332 }
+1 -2
drivers/ide/ide-io.c
··· 1550 1550 1551 1551 void ide_init_drive_cmd (struct request *rq) 1552 1552 { 1553 - memset(rq, 0, sizeof(*rq)); 1554 - rq->ref_count = 1; 1553 + blk_rq_init(NULL, rq); 1555 1554 } 1556 1555 1557 1556 EXPORT_SYMBOL(ide_init_drive_cmd);
+1 -1
drivers/ide/ide-tape.c
··· 662 662 663 663 static void idetape_init_rq(struct request *rq, u8 cmd) 664 664 { 665 - memset(rq, 0, sizeof(*rq)); 665 + blk_rq_init(NULL, rq); 666 666 rq->cmd_type = REQ_TYPE_SPECIAL; 667 667 rq->cmd[0] = cmd; 668 668 }
+1 -2
drivers/ide/ide-taskfile.c
··· 494 494 { 495 495 struct request rq; 496 496 497 - memset(&rq, 0, sizeof(rq)); 498 - rq.ref_count = 1; 497 + blk_rq_init(NULL, &rq); 499 498 rq.cmd_type = REQ_TYPE_ATA_TASKFILE; 500 499 rq.buffer = buf; 501 500
+2 -2
drivers/ide/ide.c
··· 564 564 if (!(drive->dn % 2)) 565 565 ide_acpi_get_timing(hwif); 566 566 567 - memset(&rq, 0, sizeof(rq)); 567 + blk_rq_init(NULL, &rq); 568 568 memset(&rqpm, 0, sizeof(rqpm)); 569 569 memset(&args, 0, sizeof(args)); 570 570 rq.cmd_type = REQ_TYPE_PM_SUSPEND; ··· 602 602 603 603 ide_acpi_exec_tfs(drive); 604 604 605 - memset(&rq, 0, sizeof(rq)); 605 + blk_rq_init(NULL, &rq); 606 606 memset(&rqpm, 0, sizeof(rqpm)); 607 607 memset(&args, 0, sizeof(args)); 608 608 rq.cmd_type = REQ_TYPE_PM_RESUME;
-2
drivers/md/dm-emc.c
··· 110 110 memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE); 111 111 rq->sense_len = 0; 112 112 113 - memset(&rq->cmd, 0, BLK_MAX_CDB); 114 - 115 113 rq->timeout = EMC_FAILOVER_TIMEOUT; 116 114 rq->cmd_type = REQ_TYPE_BLOCK_PC; 117 115 rq->cmd_flags |= REQ_FAILFAST | REQ_NOMERGE;
-1
drivers/md/dm-mpath-hp-sw.c
··· 137 137 req->sense = h->sense; 138 138 memset(req->sense, 0, SCSI_SENSE_BUFFERSIZE); 139 139 140 - memset(&req->cmd, 0, BLK_MAX_CDB); 141 140 req->cmd[0] = START_STOP; 142 141 req->cmd[4] = 1; 143 142 req->cmd_len = COMMAND_SIZE(req->cmd[0]);
-1
drivers/md/dm-mpath-rdac.c
··· 284 284 return NULL; 285 285 } 286 286 287 - memset(&rq->cmd, 0, BLK_MAX_CDB); 288 287 rq->sense = h->sense; 289 288 memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE); 290 289 rq->sense_len = 0;
+5 -2
drivers/md/dm-table.c
··· 873 873 q->max_hw_sectors = t->limits.max_hw_sectors; 874 874 q->seg_boundary_mask = t->limits.seg_boundary_mask; 875 875 q->bounce_pfn = t->limits.bounce_pfn; 876 + /* XXX: the below will probably go bug. must ensure there can be no 877 + * concurrency on queue_flags, and use the unlocked versions... 878 + */ 876 879 if (t->limits.no_cluster) 877 - q->queue_flags &= ~(1 << QUEUE_FLAG_CLUSTER); 880 + queue_flag_clear(QUEUE_FLAG_CLUSTER, q); 878 881 else 879 - q->queue_flags |= (1 << QUEUE_FLAG_CLUSTER); 882 + queue_flag_set(QUEUE_FLAG_CLUSTER, q); 880 883 881 884 } 882 885
+2 -1
drivers/md/md.c
··· 282 282 kfree(new); 283 283 return NULL; 284 284 } 285 - set_bit(QUEUE_FLAG_CLUSTER, &new->queue->queue_flags); 285 + /* Can be unlocked because the queue is new: no concurrency */ 286 + queue_flag_set_unlocked(QUEUE_FLAG_CLUSTER, new->queue); 286 287 287 288 blk_queue_make_request(new->queue, md_fail_request); 288 289
+1 -1
drivers/scsi/scsi_debug.c
··· 1773 1773 if (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) 1774 1774 printk(KERN_INFO "scsi_debug: slave_alloc <%u %u %u %u>\n", 1775 1775 sdp->host->host_no, sdp->channel, sdp->id, sdp->lun); 1776 - set_bit(QUEUE_FLAG_BIDI, &sdp->request_queue->queue_flags); 1776 + queue_flag_set_unlocked(QUEUE_FLAG_BIDI, sdp->request_queue); 1777 1777 return 0; 1778 1778 } 1779 1779
+1
drivers/scsi/scsi_error.c
··· 1771 1771 unsigned long flags; 1772 1772 int rtn; 1773 1773 1774 + blk_rq_init(NULL, &req); 1774 1775 scmd->request = &req; 1775 1776 memset(&scmd->eh_timeout, 0, sizeof(scmd->eh_timeout)); 1776 1777
+17 -12
drivers/scsi/scsi_lib.c
··· 536 536 !shost->host_blocked && !shost->host_self_blocked && 537 537 !((shost->can_queue > 0) && 538 538 (shost->host_busy >= shost->can_queue))) { 539 + 540 + int flagset; 541 + 539 542 /* 540 543 * As long as shost is accepting commands and we have 541 544 * starved queues, call blk_run_queue. scsi_request_fn ··· 552 549 sdev = list_entry(shost->starved_list.next, 553 550 struct scsi_device, starved_entry); 554 551 list_del_init(&sdev->starved_entry); 555 - spin_unlock_irqrestore(shost->host_lock, flags); 552 + spin_unlock(shost->host_lock); 556 553 554 + spin_lock(sdev->request_queue->queue_lock); 555 + flagset = test_bit(QUEUE_FLAG_REENTER, &q->queue_flags) && 556 + !test_bit(QUEUE_FLAG_REENTER, 557 + &sdev->request_queue->queue_flags); 558 + if (flagset) 559 + queue_flag_set(QUEUE_FLAG_REENTER, sdev->request_queue); 560 + __blk_run_queue(sdev->request_queue); 561 + if (flagset) 562 + queue_flag_clear(QUEUE_FLAG_REENTER, sdev->request_queue); 563 + spin_unlock(sdev->request_queue->queue_lock); 557 564 558 - if (test_bit(QUEUE_FLAG_REENTER, &q->queue_flags) && 559 - !test_and_set_bit(QUEUE_FLAG_REENTER, 560 - &sdev->request_queue->queue_flags)) { 561 - blk_run_queue(sdev->request_queue); 562 - clear_bit(QUEUE_FLAG_REENTER, 563 - &sdev->request_queue->queue_flags); 564 - } else 565 - blk_run_queue(sdev->request_queue); 566 - 567 - spin_lock_irqsave(shost->host_lock, flags); 565 + spin_lock(shost->host_lock); 568 566 if (unlikely(!list_empty(&sdev->starved_entry))) 569 567 /* 570 568 * sdev lost a race, and was put back on the ··· 1589 1585 1590 1586 blk_queue_max_segment_size(q, dma_get_max_seg_size(dev)); 1591 1587 1588 + /* New queue, no concurrency on queue_flags */ 1592 1589 if (!shost->use_clustering) 1593 - clear_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags); 1590 + queue_flag_clear_unlocked(QUEUE_FLAG_CLUSTER, q); 1594 1591 1595 1592 /* 1596 1593 * set a reasonable default alignment on word boundaries: the
+1 -2
drivers/scsi/scsi_transport_sas.c
··· 248 248 else 249 249 q->queuedata = shost; 250 250 251 - set_bit(QUEUE_FLAG_BIDI, &q->queue_flags); 252 - 251 + queue_flag_set_unlocked(QUEUE_FLAG_BIDI, q); 253 252 return 0; 254 253 } 255 254
-1
drivers/scsi/sd.c
··· 860 860 861 861 static void sd_prepare_flush(struct request_queue *q, struct request *rq) 862 862 { 863 - memset(rq->cmd, 0, sizeof(rq->cmd)); 864 863 rq->cmd_type = REQ_TYPE_BLOCK_PC; 865 864 rq->timeout = SD_TIMEOUT; 866 865 rq->cmd[0] = SYNCHRONIZE_CACHE;
+90
fs/bio.c
··· 937 937 return ERR_PTR(-EINVAL); 938 938 } 939 939 940 + static void bio_copy_kern_endio(struct bio *bio, int err) 941 + { 942 + struct bio_vec *bvec; 943 + const int read = bio_data_dir(bio) == READ; 944 + char *p = bio->bi_private; 945 + int i; 946 + 947 + __bio_for_each_segment(bvec, bio, i, 0) { 948 + char *addr = page_address(bvec->bv_page); 949 + 950 + if (read && !err) 951 + memcpy(p, addr, bvec->bv_len); 952 + 953 + __free_page(bvec->bv_page); 954 + p += bvec->bv_len; 955 + } 956 + 957 + bio_put(bio); 958 + } 959 + 960 + /** 961 + * bio_copy_kern - copy kernel address into bio 962 + * @q: the struct request_queue for the bio 963 + * @data: pointer to buffer to copy 964 + * @len: length in bytes 965 + * @gfp_mask: allocation flags for bio and page allocation 966 + * 967 + * copy the kernel address into a bio suitable for io to a block 968 + * device. Returns an error pointer in case of error. 969 + */ 970 + struct bio *bio_copy_kern(struct request_queue *q, void *data, unsigned int len, 971 + gfp_t gfp_mask, int reading) 972 + { 973 + unsigned long kaddr = (unsigned long)data; 974 + unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT; 975 + unsigned long start = kaddr >> PAGE_SHIFT; 976 + const int nr_pages = end - start; 977 + struct bio *bio; 978 + struct bio_vec *bvec; 979 + int i, ret; 980 + 981 + bio = bio_alloc(gfp_mask, nr_pages); 982 + if (!bio) 983 + return ERR_PTR(-ENOMEM); 984 + 985 + while (len) { 986 + struct page *page; 987 + unsigned int bytes = PAGE_SIZE; 988 + 989 + if (bytes > len) 990 + bytes = len; 991 + 992 + page = alloc_page(q->bounce_gfp | gfp_mask); 993 + if (!page) { 994 + ret = -ENOMEM; 995 + goto cleanup; 996 + } 997 + 998 + if (bio_add_pc_page(q, bio, page, bytes, 0) < bytes) { 999 + ret = -EINVAL; 1000 + goto cleanup; 1001 + } 1002 + 1003 + len -= bytes; 1004 + } 1005 + 1006 + if (!reading) { 1007 + void *p = data; 1008 + 1009 + bio_for_each_segment(bvec, bio, i) { 1010 + char *addr = page_address(bvec->bv_page); 1011 + 1012 + memcpy(addr, p, bvec->bv_len); 1013 + p += bvec->bv_len; 1014 + } 1015 + } 1016 + 1017 + bio->bi_private = data; 1018 + bio->bi_end_io = bio_copy_kern_endio; 1019 + return bio; 1020 + cleanup: 1021 + bio_for_each_segment(bvec, bio, i) 1022 + __free_page(bvec->bv_page); 1023 + 1024 + bio_put(bio); 1025 + 1026 + return ERR_PTR(ret); 1027 + } 1028 + 940 1029 /* 941 1030 * bio_set_pages_dirty() and bio_check_pages_dirty() are support functions 942 1031 * for performing direct-IO in BIOs. ··· 1362 1273 EXPORT_SYMBOL(bio_map_user); 1363 1274 EXPORT_SYMBOL(bio_unmap_user); 1364 1275 EXPORT_SYMBOL(bio_map_kern); 1276 + EXPORT_SYMBOL(bio_copy_kern); 1365 1277 EXPORT_SYMBOL(bio_pair_release); 1366 1278 EXPORT_SYMBOL(bio_split); 1367 1279 EXPORT_SYMBOL(bio_split_pool);
+1 -1
fs/splice.c
··· 1075 1075 1076 1076 ret = splice_direct_to_actor(in, &sd, direct_splice_actor); 1077 1077 if (ret > 0) 1078 - *ppos += ret; 1078 + *ppos = sd.pos; 1079 1079 1080 1080 return ret; 1081 1081 }
+2
include/linux/bio.h
··· 324 324 extern void bio_unmap_user(struct bio *); 325 325 extern struct bio *bio_map_kern(struct request_queue *, void *, unsigned int, 326 326 gfp_t); 327 + extern struct bio *bio_copy_kern(struct request_queue *, void *, unsigned int, 328 + gfp_t, int); 327 329 extern void bio_set_pages_dirty(struct bio *bio); 328 330 extern void bio_check_pages_dirty(struct bio *bio); 329 331 extern struct bio *bio_copy_user(struct request_queue *, unsigned long, unsigned int, int);
+35 -6
include/linux/blkdev.h
··· 215 215 /* 216 216 * when request is used as a packet command carrier 217 217 */ 218 - unsigned int cmd_len; 219 - unsigned char cmd[BLK_MAX_CDB]; 218 + unsigned short cmd_len; 219 + unsigned char __cmd[BLK_MAX_CDB]; 220 + unsigned char *cmd; 220 221 221 222 unsigned int data_len; 222 223 unsigned int extra_len; /* length of alignment and padding */ ··· 408 407 #define QUEUE_FLAG_PLUGGED 7 /* queue is plugged */ 409 408 #define QUEUE_FLAG_ELVSWITCH 8 /* don't use elevator, just do FIFO */ 410 409 #define QUEUE_FLAG_BIDI 9 /* queue supports bidi requests */ 410 + #define QUEUE_FLAG_NOMERGES 10 /* disable merge attempts */ 411 + 412 + static inline void queue_flag_set_unlocked(unsigned int flag, 413 + struct request_queue *q) 414 + { 415 + __set_bit(flag, &q->queue_flags); 416 + } 417 + 418 + static inline void queue_flag_set(unsigned int flag, struct request_queue *q) 419 + { 420 + WARN_ON_ONCE(!spin_is_locked(q->queue_lock)); 421 + __set_bit(flag, &q->queue_flags); 422 + } 423 + 424 + static inline void queue_flag_clear_unlocked(unsigned int flag, 425 + struct request_queue *q) 426 + { 427 + __clear_bit(flag, &q->queue_flags); 428 + } 429 + 430 + static inline void queue_flag_clear(unsigned int flag, struct request_queue *q) 431 + { 432 + WARN_ON_ONCE(!spin_is_locked(q->queue_lock)); 433 + __clear_bit(flag, &q->queue_flags); 434 + } 411 435 412 436 enum { 413 437 /* ··· 477 451 #define blk_queue_plugged(q) test_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags) 478 452 #define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) 479 453 #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) 454 + #define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) 480 455 #define blk_queue_flushing(q) ((q)->ordseq) 481 456 482 457 #define blk_fs_request(rq) ((rq)->cmd_type == REQ_TYPE_FS) ··· 523 496 static inline void blk_set_queue_full(struct request_queue *q, int rw) 524 497 { 525 498 if (rw == READ) 526 - set_bit(QUEUE_FLAG_READFULL, &q->queue_flags); 499 + queue_flag_set(QUEUE_FLAG_READFULL, q); 527 500 else 528 - set_bit(QUEUE_FLAG_WRITEFULL, &q->queue_flags); 501 + queue_flag_set(QUEUE_FLAG_WRITEFULL, q); 529 502 } 530 503 531 504 static inline void blk_clear_queue_full(struct request_queue *q, int rw) 532 505 { 533 506 if (rw == READ) 534 - clear_bit(QUEUE_FLAG_READFULL, &q->queue_flags); 507 + queue_flag_clear(QUEUE_FLAG_READFULL, q); 535 508 else 536 - clear_bit(QUEUE_FLAG_WRITEFULL, &q->queue_flags); 509 + queue_flag_clear(QUEUE_FLAG_WRITEFULL, q); 537 510 } 538 511 539 512 ··· 610 583 extern void blk_unregister_queue(struct gendisk *disk); 611 584 extern void register_disk(struct gendisk *dev); 612 585 extern void generic_make_request(struct bio *bio); 586 + extern void blk_rq_init(struct request_queue *q, struct request *rq); 613 587 extern void blk_put_request(struct request *); 614 588 extern void __blk_put_request(struct request_queue *, struct request *); 615 589 extern void blk_end_sync_rq(struct request *rq, int error); ··· 654 626 extern void blk_stop_queue(struct request_queue *q); 655 627 extern void blk_sync_queue(struct request_queue *q); 656 628 extern void __blk_stop_queue(struct request_queue *q); 629 + extern void __blk_run_queue(struct request_queue *); 657 630 extern void blk_run_queue(struct request_queue *); 658 631 extern void blk_start_queueing(struct request_queue *); 659 632 extern int blk_rq_map_user(struct request_queue *, struct request *, void __user *, unsigned long);
+1 -1
kernel/relay.c
··· 1191 1191 ret = 0; 1192 1192 spliced = 0; 1193 1193 1194 - while (len) { 1194 + while (len && !spliced) { 1195 1195 ret = subbuf_splice_actor(in, ppos, pipe, len, flags, &nonpad_ret); 1196 1196 if (ret < 0) 1197 1197 break;