Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'for-2.6.31' of git://git.kernel.dk/linux-2.6-block

* 'for-2.6.31' of git://git.kernel.dk/linux-2.6-block: (153 commits)
block: add request clone interface (v2)
floppy: fix hibernation
ramdisk: remove long-deprecated "ramdisk=" boot-time parameter
fs/bio.c: add missing __user annotation
block: prevent possible io_context->refcount overflow
Add serial number support for virtio_blk, V4a
block: Add missing bounce_pfn stacking and fix comments
Revert "block: Fix bounce limit setting in DM"
cciss: decode unit attention in SCSI error handling code
cciss: Remove no longer needed sendcmd reject processing code
cciss: change SCSI error handling routines to work with interrupts enabled.
cciss: separate error processing and command retrying code in sendcmd_withirq_core()
cciss: factor out fix target status processing code from sendcmd functions
cciss: simplify interface of sendcmd() and sendcmd_withirq()
cciss: factor out core of sendcmd_withirq() for use by SCSI error handling code
cciss: Use schedule_timeout_uninterruptible in SCSI error handling code
block: needs to set the residual length of a bidi request
Revert "block: implement blkdev_readpages"
block: Fix bounce limit setting in DM
Removed reference to non-existing file Documentation/PCI/PCI-DMA-mapping.txt
...

Manually fix conflicts with tracing updates in:
block/blk-sysfs.c
drivers/ide/ide-atapi.c
drivers/ide/ide-cd.c
drivers/ide/ide-floppy.c
drivers/ide/ide-tape.c
include/trace/events/block.h
kernel/trace/blktrace.c

+3801 -2771
+59
Documentation/ABI/testing/sysfs-block
··· 60 60 Indicates whether the block layer should automatically 61 61 generate checksums for write requests bound for 62 62 devices that support receiving integrity metadata. 63 + 64 + What: /sys/block/<disk>/alignment_offset 65 + Date: April 2009 66 + Contact: Martin K. Petersen <martin.petersen@oracle.com> 67 + Description: 68 + Storage devices may report a physical block size that is 69 + bigger than the logical block size (for instance a drive 70 + with 4KB physical sectors exposing 512-byte logical 71 + blocks to the operating system). This parameter 72 + indicates how many bytes the beginning of the device is 73 + offset from the disk's natural alignment. 74 + 75 + What: /sys/block/<disk>/<partition>/alignment_offset 76 + Date: April 2009 77 + Contact: Martin K. Petersen <martin.petersen@oracle.com> 78 + Description: 79 + Storage devices may report a physical block size that is 80 + bigger than the logical block size (for instance a drive 81 + with 4KB physical sectors exposing 512-byte logical 82 + blocks to the operating system). This parameter 83 + indicates how many bytes the beginning of the partition 84 + is offset from the disk's natural alignment. 85 + 86 + What: /sys/block/<disk>/queue/logical_block_size 87 + Date: May 2009 88 + Contact: Martin K. Petersen <martin.petersen@oracle.com> 89 + Description: 90 + This is the smallest unit the storage device can 91 + address. It is typically 512 bytes. 92 + 93 + What: /sys/block/<disk>/queue/physical_block_size 94 + Date: May 2009 95 + Contact: Martin K. Petersen <martin.petersen@oracle.com> 96 + Description: 97 + This is the smallest unit the storage device can write 98 + without resorting to read-modify-write operation. It is 99 + usually the same as the logical block size but may be 100 + bigger. One example is SATA drives with 4KB sectors 101 + that expose a 512-byte logical block size to the 102 + operating system. 103 + 104 + What: /sys/block/<disk>/queue/minimum_io_size 105 + Date: April 2009 106 + Contact: Martin K. Petersen <martin.petersen@oracle.com> 107 + Description: 108 + Storage devices may report a preferred minimum I/O size, 109 + which is the smallest request the device can perform 110 + without incurring a read-modify-write penalty. For disk 111 + drives this is often the physical block size. For RAID 112 + arrays it is often the stripe chunk size. 113 + 114 + What: /sys/block/<disk>/queue/optimal_io_size 115 + Date: April 2009 116 + Contact: Martin K. Petersen <martin.petersen@oracle.com> 117 + Description: 118 + Storage devices may report an optimal I/O size, which is 119 + the device's preferred unit of receiving I/O. This is 120 + rarely reported for disk drives. For RAID devices it is 121 + usually the stripe width or the internal block size.
+33
Documentation/ABI/testing/sysfs-bus-pci-devices-cciss
··· 1 + Where: /sys/bus/pci/devices/<dev>/ccissX/cXdY/model 2 + Date: March 2009 3 + Kernel Version: 2.6.30 4 + Contact: iss_storagedev@hp.com 5 + Description: Displays the SCSI INQUIRY page 0 model for logical drive 6 + Y of controller X. 7 + 8 + Where: /sys/bus/pci/devices/<dev>/ccissX/cXdY/rev 9 + Date: March 2009 10 + Kernel Version: 2.6.30 11 + Contact: iss_storagedev@hp.com 12 + Description: Displays the SCSI INQUIRY page 0 revision for logical 13 + drive Y of controller X. 14 + 15 + Where: /sys/bus/pci/devices/<dev>/ccissX/cXdY/unique_id 16 + Date: March 2009 17 + Kernel Version: 2.6.30 18 + Contact: iss_storagedev@hp.com 19 + Description: Displays the SCSI INQUIRY page 83 serial number for logical 20 + drive Y of controller X. 21 + 22 + Where: /sys/bus/pci/devices/<dev>/ccissX/cXdY/vendor 23 + Date: March 2009 24 + Kernel Version: 2.6.30 25 + Contact: iss_storagedev@hp.com 26 + Description: Displays the SCSI INQUIRY page 0 vendor for logical drive 27 + Y of controller X. 28 + 29 + Where: /sys/bus/pci/devices/<dev>/ccissX/cXdY/block:cciss!cXdY 30 + Date: March 2009 31 + Kernel Version: 2.6.30 32 + Contact: iss_storagedev@hp.com 33 + Description: A symbolic link to /sys/block/cciss!cXdY
+1 -1
Documentation/block/biodoc.txt
··· 186 186 do not have a corresponding kernel virtual address space mapping) and 187 187 low-memory pages. 188 188 189 - Note: Please refer to Documentation/PCI/PCI-DMA-mapping.txt for a discussion 189 + Note: Please refer to Documentation/DMA-mapping.txt for a discussion 190 190 on PCI high mem DMA aspects and mapping of scatter gather lists, and support 191 191 for 64 bit PCI. 192 192
+40 -23
arch/arm/plat-omap/mailbox.c
··· 147 147 return ret; 148 148 } 149 149 150 + struct omap_msg_tx_data { 151 + mbox_msg_t msg; 152 + void *arg; 153 + }; 154 + 155 + static void omap_msg_tx_end_io(struct request *rq, int error) 156 + { 157 + kfree(rq->special); 158 + __blk_put_request(rq->q, rq); 159 + } 160 + 150 161 int omap_mbox_msg_send(struct omap_mbox *mbox, mbox_msg_t msg, void* arg) 151 162 { 163 + struct omap_msg_tx_data *tx_data; 152 164 struct request *rq; 153 165 struct request_queue *q = mbox->txq->queue; 154 - int ret = 0; 166 + 167 + tx_data = kmalloc(sizeof(*tx_data), GFP_ATOMIC); 168 + if (unlikely(!tx_data)) 169 + return -ENOMEM; 155 170 156 171 rq = blk_get_request(q, WRITE, GFP_ATOMIC); 157 172 if (unlikely(!rq)) { 158 - ret = -ENOMEM; 159 - goto fail; 173 + kfree(tx_data); 174 + return -ENOMEM; 160 175 } 161 176 162 - rq->data = (void *)msg; 163 - blk_insert_request(q, rq, 0, arg); 177 + tx_data->msg = msg; 178 + tx_data->arg = arg; 179 + rq->end_io = omap_msg_tx_end_io; 180 + blk_insert_request(q, rq, 0, tx_data); 164 181 165 182 schedule_work(&mbox->txq->work); 166 - fail: 167 - return ret; 183 + return 0; 168 184 } 169 185 EXPORT_SYMBOL(omap_mbox_msg_send); 170 186 ··· 194 178 struct request_queue *q = mbox->txq->queue; 195 179 196 180 while (1) { 181 + struct omap_msg_tx_data *tx_data; 182 + 197 183 spin_lock(q->queue_lock); 198 - rq = elv_next_request(q); 184 + rq = blk_fetch_request(q); 199 185 spin_unlock(q->queue_lock); 200 186 201 187 if (!rq) 202 188 break; 203 189 204 - ret = __mbox_msg_send(mbox, (mbox_msg_t) rq->data, rq->special); 190 + tx_data = rq->special; 191 + 192 + ret = __mbox_msg_send(mbox, tx_data->msg, tx_data->arg); 205 193 if (ret) { 206 194 enable_mbox_irq(mbox, IRQ_TX); 195 + spin_lock(q->queue_lock); 196 + blk_requeue_request(q, rq); 197 + spin_unlock(q->queue_lock); 207 198 return; 208 199 } 209 200 210 201 spin_lock(q->queue_lock); 211 - if (__blk_end_request(rq, 0, 0)) 212 - BUG(); 202 + __blk_end_request_all(rq, 0); 213 203 spin_unlock(q->queue_lock); 214 204 } 215 205 } ··· 240 218 241 219 while (1) { 242 220 spin_lock_irqsave(q->queue_lock, flags); 243 - rq = elv_next_request(q); 221 + rq = blk_fetch_request(q); 244 222 spin_unlock_irqrestore(q->queue_lock, flags); 245 223 if (!rq) 246 224 break; 247 225 248 - msg = (mbox_msg_t) rq->data; 249 - 250 - if (blk_end_request(rq, 0, 0)) 251 - BUG(); 252 - 226 + msg = (mbox_msg_t)rq->special; 227 + blk_end_request_all(rq, 0); 253 228 mbox->rxq->callback((void *)msg); 254 229 } 255 230 } ··· 283 264 goto nomem; 284 265 285 266 msg = mbox_fifo_read(mbox); 286 - rq->data = (void *)msg; 287 267 288 268 if (unlikely(mbox_seq_test(mbox, msg))) { 289 269 pr_info("mbox: Illegal seq bit!(%08x)\n", msg); ··· 290 272 mbox->err_notify(); 291 273 } 292 274 293 - blk_insert_request(q, rq, 0, NULL); 275 + blk_insert_request(q, rq, 0, (void *)msg); 294 276 if (mbox->ops->type == OMAP_MBOX_TYPE1) 295 277 break; 296 278 } ··· 347 329 348 330 while (1) { 349 331 spin_lock_irqsave(q->queue_lock, flags); 350 - rq = elv_next_request(q); 332 + rq = blk_fetch_request(q); 351 333 spin_unlock_irqrestore(q->queue_lock, flags); 352 334 353 335 if (!rq) 354 336 break; 355 337 356 - *p = (mbox_msg_t) rq->data; 338 + *p = (mbox_msg_t)rq->special; 357 339 358 - if (blk_end_request(rq, 0, 0)) 359 - BUG(); 340 + blk_end_request_all(rq, 0); 360 341 361 342 if (unlikely(mbox_seq_test(mbox, *p))) { 362 343 pr_info("mbox: Illegal seq bit!(%08x) ignored\n", *p);
+1 -1
arch/powerpc/sysdev/axonram.c
··· 250 250 251 251 set_capacity(bank->disk, bank->size >> AXON_RAM_SECTOR_SHIFT); 252 252 blk_queue_make_request(bank->disk->queue, axon_ram_make_request); 253 - blk_queue_hardsect_size(bank->disk->queue, AXON_RAM_SECTOR_SIZE); 253 + blk_queue_logical_block_size(bank->disk->queue, AXON_RAM_SECTOR_SIZE); 254 254 add_disk(bank->disk); 255 255 256 256 bank->irq_id = irq_of_parse_and_map(device->node, 0);
+7 -29
arch/um/drivers/ubd_kern.c
··· 451 451 452 452 /* Only changed by ubd_init, which is an initcall. */ 453 453 static int thread_fd = -1; 454 - 455 - static void ubd_end_request(struct request *req, int bytes, int error) 456 - { 457 - blk_end_request(req, error, bytes); 458 - } 459 - 460 - /* Callable only from interrupt context - otherwise you need to do 461 - * spin_lock_irq()/spin_lock_irqsave() */ 462 - static inline void ubd_finish(struct request *req, int bytes) 463 - { 464 - if(bytes < 0){ 465 - ubd_end_request(req, 0, -EIO); 466 - return; 467 - } 468 - ubd_end_request(req, bytes, 0); 469 - } 470 - 471 454 static LIST_HEAD(restart); 472 455 473 456 /* XXX - move this inside ubd_intr. */ ··· 458 475 static void ubd_handler(void) 459 476 { 460 477 struct io_thread_req *req; 461 - struct request *rq; 462 478 struct ubd *ubd; 463 479 struct list_head *list, *next_ele; 464 480 unsigned long flags; ··· 474 492 return; 475 493 } 476 494 477 - rq = req->req; 478 - rq->nr_sectors -= req->length >> 9; 479 - if(rq->nr_sectors == 0) 480 - ubd_finish(rq, rq->hard_nr_sectors << 9); 495 + blk_end_request(req->req, 0, req->length); 481 496 kfree(req); 482 497 } 483 498 reactivate_fd(thread_fd, UBD_IRQ); ··· 1222 1243 { 1223 1244 struct io_thread_req *io_req; 1224 1245 struct request *req; 1225 - int n, last_sectors; 1246 + sector_t sector; 1247 + int n; 1226 1248 1227 1249 while(1){ 1228 1250 struct ubd *dev = q->queuedata; 1229 1251 if(dev->end_sg == 0){ 1230 - struct request *req = elv_next_request(q); 1252 + struct request *req = blk_fetch_request(q); 1231 1253 if(req == NULL) 1232 1254 return; 1233 1255 1234 1256 dev->request = req; 1235 - blkdev_dequeue_request(req); 1236 1257 dev->start_sg = 0; 1237 1258 dev->end_sg = blk_rq_map_sg(q, req, dev->sg); 1238 1259 } 1239 1260 1240 1261 req = dev->request; 1241 - last_sectors = 0; 1262 + sector = blk_rq_pos(req); 1242 1263 while(dev->start_sg < dev->end_sg){ 1243 1264 struct scatterlist *sg = &dev->sg[dev->start_sg]; 1244 1265 1245 - req->sector += last_sectors; 1246 1266 io_req = kmalloc(sizeof(struct io_thread_req), 1247 1267 GFP_ATOMIC); 1248 1268 if(io_req == NULL){ ··· 1250 1272 return; 1251 1273 } 1252 1274 prepare_request(req, io_req, 1253 - (unsigned long long) req->sector << 9, 1275 + (unsigned long long)sector << 9, 1254 1276 sg->offset, sg->length, sg_page(sg)); 1255 1277 1256 - last_sectors = sg->length >> 9; 1278 + sector += sg->length >> 9; 1257 1279 n = os_write_file(thread_fd, &io_req, 1258 1280 sizeof(struct io_thread_req *)); 1259 1281 if(n != sizeof(struct io_thread_req *)){
+7 -4
block/Kconfig
··· 26 26 config LBD 27 27 bool "Support for large block devices and files" 28 28 depends on !64BIT 29 + default y 29 30 help 30 31 Enable block devices or files of size 2TB and larger. 31 32 ··· 39 38 40 39 The ext4 filesystem requires that this feature be enabled in 41 40 order to support filesystems that have the huge_file feature 42 - enabled. Otherwise, it will refuse to mount any filesystems 43 - that use the huge_file feature, which is enabled by default 44 - by mke2fs.ext4. The GFS2 filesystem also requires this feature. 41 + enabled. Otherwise, it will refuse to mount in the read-write 42 + mode any filesystems that use the huge_file feature, which is 43 + enabled by default by mke2fs.ext4. 45 44 46 - If unsure, say N. 45 + The GFS2 filesystem also requires this feature. 46 + 47 + If unsure, say Y. 47 48 48 49 config BLK_DEV_BSG 49 50 bool "Block layer SG support v4 (EXPERIMENTAL)"
+11 -13
block/as-iosched.c
··· 306 306 data_dir = rq_is_sync(rq1); 307 307 308 308 last = ad->last_sector[data_dir]; 309 - s1 = rq1->sector; 310 - s2 = rq2->sector; 309 + s1 = blk_rq_pos(rq1); 310 + s2 = blk_rq_pos(rq2); 311 311 312 312 BUG_ON(data_dir != rq_is_sync(rq2)); 313 313 ··· 566 566 as_update_thinktime(ad, aic, thinktime); 567 567 568 568 /* Calculate read -> read seek distance */ 569 - if (aic->last_request_pos < rq->sector) 570 - seek_dist = rq->sector - aic->last_request_pos; 569 + if (aic->last_request_pos < blk_rq_pos(rq)) 570 + seek_dist = blk_rq_pos(rq) - 571 + aic->last_request_pos; 571 572 else 572 - seek_dist = aic->last_request_pos - rq->sector; 573 + seek_dist = aic->last_request_pos - 574 + blk_rq_pos(rq); 573 575 as_update_seekdist(ad, aic, seek_dist); 574 576 } 575 - aic->last_request_pos = rq->sector + rq->nr_sectors; 577 + aic->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq); 576 578 set_bit(AS_TASK_IOSTARTED, &aic->state); 577 579 spin_unlock(&aic->lock); 578 580 } ··· 589 587 { 590 588 unsigned long delay; /* jiffies */ 591 589 sector_t last = ad->last_sector[ad->batch_data_dir]; 592 - sector_t next = rq->sector; 590 + sector_t next = blk_rq_pos(rq); 593 591 sector_t delta; /* acceptable close offset (in sectors) */ 594 592 sector_t s; 595 593 ··· 983 981 * This has to be set in order to be correctly updated by 984 982 * as_find_next_rq 985 983 */ 986 - ad->last_sector[data_dir] = rq->sector + rq->nr_sectors; 984 + ad->last_sector[data_dir] = blk_rq_pos(rq) + blk_rq_sectors(rq); 987 985 988 986 if (data_dir == BLK_RW_SYNC) { 989 987 struct io_context *ioc = RQ_IOC(rq); ··· 1314 1312 static void as_work_handler(struct work_struct *work) 1315 1313 { 1316 1314 struct as_data *ad = container_of(work, struct as_data, antic_work); 1317 - struct request_queue *q = ad->q; 1318 - unsigned long flags; 1319 1315 1320 - spin_lock_irqsave(q->queue_lock, flags); 1321 - blk_start_queueing(q); 1322 - spin_unlock_irqrestore(q->queue_lock, flags); 1316 + blk_run_queue(ad->q); 1323 1317 } 1324 1318 1325 1319 static int as_may_queue(struct request_queue *q, int rw)
+11 -16
block/blk-barrier.c
··· 106 106 */ 107 107 q->ordseq = 0; 108 108 rq = q->orig_bar_rq; 109 - 110 - if (__blk_end_request(rq, q->orderr, blk_rq_bytes(rq))) 111 - BUG(); 112 - 109 + __blk_end_request_all(rq, q->orderr); 113 110 return true; 114 111 } 115 112 ··· 163 166 * For an empty barrier, there's no actual BAR request, which 164 167 * in turn makes POSTFLUSH unnecessary. Mask them off. 165 168 */ 166 - if (!rq->hard_nr_sectors) { 169 + if (!blk_rq_sectors(rq)) { 167 170 q->ordered &= ~(QUEUE_ORDERED_DO_BAR | 168 171 QUEUE_ORDERED_DO_POSTFLUSH); 169 172 /* ··· 180 183 } 181 184 182 185 /* stash away the original request */ 183 - elv_dequeue_request(q, rq); 186 + blk_dequeue_request(rq); 184 187 q->orig_bar_rq = rq; 185 188 rq = NULL; 186 189 ··· 218 221 } else 219 222 skip |= QUEUE_ORDSEQ_PREFLUSH; 220 223 221 - if ((q->ordered & QUEUE_ORDERED_BY_DRAIN) && q->in_flight) 224 + if ((q->ordered & QUEUE_ORDERED_BY_DRAIN) && queue_in_flight(q)) 222 225 rq = NULL; 223 226 else 224 227 skip |= QUEUE_ORDSEQ_DRAIN; ··· 248 251 * Queue ordering not supported. Terminate 249 252 * with prejudice. 250 253 */ 251 - elv_dequeue_request(q, rq); 252 - if (__blk_end_request(rq, -EOPNOTSUPP, 253 - blk_rq_bytes(rq))) 254 - BUG(); 254 + blk_dequeue_request(rq); 255 + __blk_end_request_all(rq, -EOPNOTSUPP); 255 256 *rqp = NULL; 256 257 return false; 257 258 } ··· 324 329 /* 325 330 * The driver must store the error location in ->bi_sector, if 326 331 * it supports it. For non-stacked drivers, this should be copied 327 - * from rq->sector. 332 + * from blk_rq_pos(rq). 328 333 */ 329 334 if (error_sector) 330 335 *error_sector = bio->bi_sector; ··· 388 393 389 394 bio->bi_sector = sector; 390 395 391 - if (nr_sects > q->max_hw_sectors) { 392 - bio->bi_size = q->max_hw_sectors << 9; 393 - nr_sects -= q->max_hw_sectors; 394 - sector += q->max_hw_sectors; 396 + if (nr_sects > queue_max_hw_sectors(q)) { 397 + bio->bi_size = queue_max_hw_sectors(q) << 9; 398 + nr_sects -= queue_max_hw_sectors(q); 399 + sector += queue_max_hw_sectors(q); 395 400 } else { 396 401 bio->bi_size = nr_sects << 9; 397 402 nr_sects = 0;
+555 -311
block/blk-core.c
··· 60 60 int rw = rq_data_dir(rq); 61 61 int cpu; 62 62 63 - if (!blk_fs_request(rq) || !blk_do_io_stat(rq)) 63 + if (!blk_do_io_stat(rq)) 64 64 return; 65 65 66 66 cpu = part_stat_lock(); 67 - part = disk_map_sector_rcu(rq->rq_disk, rq->sector); 67 + part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq)); 68 68 69 69 if (!new_io) 70 70 part_stat_inc(cpu, part, merges[rw]); ··· 119 119 INIT_LIST_HEAD(&rq->timeout_list); 120 120 rq->cpu = -1; 121 121 rq->q = q; 122 - rq->sector = rq->hard_sector = (sector_t) -1; 122 + rq->__sector = (sector_t) -1; 123 123 INIT_HLIST_NODE(&rq->hash); 124 124 RB_CLEAR_NODE(&rq->rb_node); 125 125 rq->cmd = rq->__cmd; 126 126 rq->cmd_len = BLK_MAX_CDB; 127 127 rq->tag = -1; 128 128 rq->ref_count = 1; 129 + rq->start_time = jiffies; 129 130 } 130 131 EXPORT_SYMBOL(blk_rq_init); 131 132 ··· 177 176 rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->cmd_type, 178 177 rq->cmd_flags); 179 178 180 - printk(KERN_INFO " sector %llu, nr/cnr %lu/%u\n", 181 - (unsigned long long)rq->sector, 182 - rq->nr_sectors, 183 - rq->current_nr_sectors); 184 - printk(KERN_INFO " bio %p, biotail %p, buffer %p, data %p, len %u\n", 185 - rq->bio, rq->biotail, 186 - rq->buffer, rq->data, 187 - rq->data_len); 179 + printk(KERN_INFO " sector %llu, nr/cnr %u/%u\n", 180 + (unsigned long long)blk_rq_pos(rq), 181 + blk_rq_sectors(rq), blk_rq_cur_sectors(rq)); 182 + printk(KERN_INFO " bio %p, biotail %p, buffer %p, len %u\n", 183 + rq->bio, rq->biotail, rq->buffer, blk_rq_bytes(rq)); 188 184 189 185 if (blk_pc_request(rq)) { 190 186 printk(KERN_INFO " cdb: "); ··· 323 325 } 324 326 EXPORT_SYMBOL(blk_unplug); 325 327 326 - static void blk_invoke_request_fn(struct request_queue *q) 327 - { 328 - if (unlikely(blk_queue_stopped(q))) 329 - return; 330 - 331 - /* 332 - * one level of recursion is ok and is much faster than kicking 333 - * the unplug handling 334 - */ 335 - if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) { 336 - q->request_fn(q); 337 - queue_flag_clear(QUEUE_FLAG_REENTER, q); 338 - } else { 339 - queue_flag_set(QUEUE_FLAG_PLUGGED, q); 340 - kblockd_schedule_work(q, &q->unplug_work); 341 - } 342 - } 343 - 344 328 /** 345 329 * blk_start_queue - restart a previously stopped queue 346 330 * @q: The &struct request_queue in question ··· 337 357 WARN_ON(!irqs_disabled()); 338 358 339 359 queue_flag_clear(QUEUE_FLAG_STOPPED, q); 340 - blk_invoke_request_fn(q); 360 + __blk_run_queue(q); 341 361 } 342 362 EXPORT_SYMBOL(blk_start_queue); 343 363 ··· 397 417 { 398 418 blk_remove_plug(q); 399 419 420 + if (unlikely(blk_queue_stopped(q))) 421 + return; 422 + 423 + if (elv_queue_empty(q)) 424 + return; 425 + 400 426 /* 401 427 * Only recurse once to avoid overrunning the stack, let the unplug 402 428 * handling reinvoke the handler shortly if we already got there. 403 429 */ 404 - if (!elv_queue_empty(q)) 405 - blk_invoke_request_fn(q); 430 + if (!queue_flag_test_and_set(QUEUE_FLAG_REENTER, q)) { 431 + q->request_fn(q); 432 + queue_flag_clear(QUEUE_FLAG_REENTER, q); 433 + } else { 434 + queue_flag_set(QUEUE_FLAG_PLUGGED, q); 435 + kblockd_schedule_work(q, &q->unplug_work); 436 + } 406 437 } 407 438 EXPORT_SYMBOL(__blk_run_queue); 408 439 ··· 423 432 * 424 433 * Description: 425 434 * Invoke request handling on this queue, if it has pending work to do. 426 - * May be used to restart queueing when a request has completed. Also 427 - * See @blk_start_queueing. 428 - * 435 + * May be used to restart queueing when a request has completed. 429 436 */ 430 437 void blk_run_queue(struct request_queue *q) 431 438 { ··· 883 894 EXPORT_SYMBOL(blk_get_request); 884 895 885 896 /** 886 - * blk_start_queueing - initiate dispatch of requests to device 887 - * @q: request queue to kick into gear 897 + * blk_make_request - given a bio, allocate a corresponding struct request. 888 898 * 889 - * This is basically a helper to remove the need to know whether a queue 890 - * is plugged or not if someone just wants to initiate dispatch of requests 891 - * for this queue. Should be used to start queueing on a device outside 892 - * of ->request_fn() context. Also see @blk_run_queue. 899 + * @bio: The bio describing the memory mappings that will be submitted for IO. 900 + * It may be a chained-bio properly constructed by block/bio layer. 893 901 * 894 - * The queue lock must be held with interrupts disabled. 902 + * blk_make_request is the parallel of generic_make_request for BLOCK_PC 903 + * type commands. Where the struct request needs to be farther initialized by 904 + * the caller. It is passed a &struct bio, which describes the memory info of 905 + * the I/O transfer. 906 + * 907 + * The caller of blk_make_request must make sure that bi_io_vec 908 + * are set to describe the memory buffers. That bio_data_dir() will return 909 + * the needed direction of the request. (And all bio's in the passed bio-chain 910 + * are properly set accordingly) 911 + * 912 + * If called under none-sleepable conditions, mapped bio buffers must not 913 + * need bouncing, by calling the appropriate masked or flagged allocator, 914 + * suitable for the target device. Otherwise the call to blk_queue_bounce will 915 + * BUG. 916 + * 917 + * WARNING: When allocating/cloning a bio-chain, careful consideration should be 918 + * given to how you allocate bios. In particular, you cannot use __GFP_WAIT for 919 + * anything but the first bio in the chain. Otherwise you risk waiting for IO 920 + * completion of a bio that hasn't been submitted yet, thus resulting in a 921 + * deadlock. Alternatively bios should be allocated using bio_kmalloc() instead 922 + * of bio_alloc(), as that avoids the mempool deadlock. 923 + * If possible a big IO should be split into smaller parts when allocation 924 + * fails. Partial allocation should not be an error, or you risk a live-lock. 895 925 */ 896 - void blk_start_queueing(struct request_queue *q) 926 + struct request *blk_make_request(struct request_queue *q, struct bio *bio, 927 + gfp_t gfp_mask) 897 928 { 898 - if (!blk_queue_plugged(q)) { 899 - if (unlikely(blk_queue_stopped(q))) 900 - return; 901 - q->request_fn(q); 902 - } else 903 - __generic_unplug_device(q); 929 + struct request *rq = blk_get_request(q, bio_data_dir(bio), gfp_mask); 930 + 931 + if (unlikely(!rq)) 932 + return ERR_PTR(-ENOMEM); 933 + 934 + for_each_bio(bio) { 935 + struct bio *bounce_bio = bio; 936 + int ret; 937 + 938 + blk_queue_bounce(q, &bounce_bio); 939 + ret = blk_rq_append_bio(q, rq, bounce_bio); 940 + if (unlikely(ret)) { 941 + blk_put_request(rq); 942 + return ERR_PTR(ret); 943 + } 944 + } 945 + 946 + return rq; 904 947 } 905 - EXPORT_SYMBOL(blk_start_queueing); 948 + EXPORT_SYMBOL(blk_make_request); 906 949 907 950 /** 908 951 * blk_requeue_request - put a request back on queue ··· 954 933 955 934 if (blk_rq_tagged(rq)) 956 935 blk_queue_end_tag(q, rq); 936 + 937 + BUG_ON(blk_queued_rq(rq)); 957 938 958 939 elv_requeue_request(q, rq); 959 940 } ··· 992 969 * barrier 993 970 */ 994 971 rq->cmd_type = REQ_TYPE_SPECIAL; 995 - rq->cmd_flags |= REQ_SOFTBARRIER; 996 972 997 973 rq->special = data; 998 974 ··· 1005 983 1006 984 drive_stat_acct(rq, 1); 1007 985 __elv_add_request(q, rq, where, 0); 1008 - blk_start_queueing(q); 986 + __blk_run_queue(q); 1009 987 spin_unlock_irqrestore(q->queue_lock, flags); 1010 988 } 1011 989 EXPORT_SYMBOL(blk_insert_request); ··· 1127 1105 if (bio_failfast_driver(bio)) 1128 1106 req->cmd_flags |= REQ_FAILFAST_DRIVER; 1129 1107 1130 - /* 1131 - * REQ_BARRIER implies no merging, but lets make it explicit 1132 - */ 1133 1108 if (unlikely(bio_discard(bio))) { 1134 1109 req->cmd_flags |= REQ_DISCARD; 1135 1110 if (bio_barrier(bio)) 1136 1111 req->cmd_flags |= REQ_SOFTBARRIER; 1137 1112 req->q->prepare_discard_fn(req->q, req); 1138 1113 } else if (unlikely(bio_barrier(bio))) 1139 - req->cmd_flags |= (REQ_HARDBARRIER | REQ_NOMERGE); 1114 + req->cmd_flags |= REQ_HARDBARRIER; 1140 1115 1141 1116 if (bio_sync(bio)) 1142 1117 req->cmd_flags |= REQ_RW_SYNC; ··· 1143 1124 req->cmd_flags |= REQ_NOIDLE; 1144 1125 1145 1126 req->errors = 0; 1146 - req->hard_sector = req->sector = bio->bi_sector; 1127 + req->__sector = bio->bi_sector; 1147 1128 req->ioprio = bio_prio(bio); 1148 - req->start_time = jiffies; 1149 1129 blk_rq_bio_prep(req->q, req, bio); 1150 1130 } 1151 1131 ··· 1160 1142 static int __make_request(struct request_queue *q, struct bio *bio) 1161 1143 { 1162 1144 struct request *req; 1163 - int el_ret, nr_sectors; 1145 + int el_ret; 1146 + unsigned int bytes = bio->bi_size; 1164 1147 const unsigned short prio = bio_prio(bio); 1165 1148 const int sync = bio_sync(bio); 1166 1149 const int unplug = bio_unplug(bio); 1167 1150 int rw_flags; 1168 - 1169 - nr_sectors = bio_sectors(bio); 1170 1151 1171 1152 /* 1172 1153 * low level driver can indicate that it wants pages above a ··· 1191 1174 1192 1175 req->biotail->bi_next = bio; 1193 1176 req->biotail = bio; 1194 - req->nr_sectors = req->hard_nr_sectors += nr_sectors; 1177 + req->__data_len += bytes; 1195 1178 req->ioprio = ioprio_best(req->ioprio, prio); 1196 1179 if (!blk_rq_cpu_valid(req)) 1197 1180 req->cpu = bio->bi_comp_cpu; ··· 1217 1200 * not touch req->buffer either... 1218 1201 */ 1219 1202 req->buffer = bio_data(bio); 1220 - req->current_nr_sectors = bio_cur_sectors(bio); 1221 - req->hard_cur_sectors = req->current_nr_sectors; 1222 - req->sector = req->hard_sector = bio->bi_sector; 1223 - req->nr_sectors = req->hard_nr_sectors += nr_sectors; 1203 + req->__sector = bio->bi_sector; 1204 + req->__data_len += bytes; 1224 1205 req->ioprio = ioprio_best(req->ioprio, prio); 1225 1206 if (!blk_rq_cpu_valid(req)) 1226 1207 req->cpu = bio->bi_comp_cpu; ··· 1429 1414 goto end_io; 1430 1415 } 1431 1416 1432 - if (unlikely(nr_sectors > q->max_hw_sectors)) { 1417 + if (unlikely(nr_sectors > queue_max_hw_sectors(q))) { 1433 1418 printk(KERN_ERR "bio too big device %s (%u > %u)\n", 1434 - bdevname(bio->bi_bdev, b), 1435 - bio_sectors(bio), 1436 - q->max_hw_sectors); 1419 + bdevname(bio->bi_bdev, b), 1420 + bio_sectors(bio), 1421 + queue_max_hw_sectors(q)); 1437 1422 goto end_io; 1438 1423 } 1439 1424 ··· 1599 1584 */ 1600 1585 int blk_rq_check_limits(struct request_queue *q, struct request *rq) 1601 1586 { 1602 - if (rq->nr_sectors > q->max_sectors || 1603 - rq->data_len > q->max_hw_sectors << 9) { 1587 + if (blk_rq_sectors(rq) > queue_max_sectors(q) || 1588 + blk_rq_bytes(rq) > queue_max_hw_sectors(q) << 9) { 1604 1589 printk(KERN_ERR "%s: over max size limit.\n", __func__); 1605 1590 return -EIO; 1606 1591 } ··· 1612 1597 * limitation. 1613 1598 */ 1614 1599 blk_recalc_rq_segments(rq); 1615 - if (rq->nr_phys_segments > q->max_phys_segments || 1616 - rq->nr_phys_segments > q->max_hw_segments) { 1600 + if (rq->nr_phys_segments > queue_max_phys_segments(q) || 1601 + rq->nr_phys_segments > queue_max_hw_segments(q)) { 1617 1602 printk(KERN_ERR "%s: over max segments limit.\n", __func__); 1618 1603 return -EIO; 1619 1604 } ··· 1657 1642 } 1658 1643 EXPORT_SYMBOL_GPL(blk_insert_cloned_request); 1659 1644 1660 - /** 1661 - * blkdev_dequeue_request - dequeue request and start timeout timer 1662 - * @req: request to dequeue 1663 - * 1664 - * Dequeue @req and start timeout timer on it. This hands off the 1665 - * request to the driver. 1666 - * 1667 - * Block internal functions which don't want to start timer should 1668 - * call elv_dequeue_request(). 1669 - */ 1670 - void blkdev_dequeue_request(struct request *req) 1671 - { 1672 - elv_dequeue_request(req->q, req); 1673 - 1674 - /* 1675 - * We are now handing the request to the hardware, add the 1676 - * timeout handler. 1677 - */ 1678 - blk_add_timer(req); 1679 - } 1680 - EXPORT_SYMBOL(blkdev_dequeue_request); 1681 - 1682 1645 static void blk_account_io_completion(struct request *req, unsigned int bytes) 1683 1646 { 1684 - if (!blk_do_io_stat(req)) 1685 - return; 1686 - 1687 - if (blk_fs_request(req)) { 1647 + if (blk_do_io_stat(req)) { 1688 1648 const int rw = rq_data_dir(req); 1689 1649 struct hd_struct *part; 1690 1650 int cpu; 1691 1651 1692 1652 cpu = part_stat_lock(); 1693 - part = disk_map_sector_rcu(req->rq_disk, req->sector); 1653 + part = disk_map_sector_rcu(req->rq_disk, blk_rq_pos(req)); 1694 1654 part_stat_add(cpu, part, sectors[rw], bytes >> 9); 1695 1655 part_stat_unlock(); 1696 1656 } ··· 1673 1683 1674 1684 static void blk_account_io_done(struct request *req) 1675 1685 { 1676 - if (!blk_do_io_stat(req)) 1677 - return; 1678 - 1679 1686 /* 1680 1687 * Account IO completion. bar_rq isn't accounted as a normal 1681 1688 * IO on queueing nor completion. Accounting the containing 1682 1689 * request is enough. 1683 1690 */ 1684 - if (blk_fs_request(req) && req != &req->q->bar_rq) { 1691 + if (blk_do_io_stat(req) && req != &req->q->bar_rq) { 1685 1692 unsigned long duration = jiffies - req->start_time; 1686 1693 const int rw = rq_data_dir(req); 1687 1694 struct hd_struct *part; 1688 1695 int cpu; 1689 1696 1690 1697 cpu = part_stat_lock(); 1691 - part = disk_map_sector_rcu(req->rq_disk, req->sector); 1698 + part = disk_map_sector_rcu(req->rq_disk, blk_rq_pos(req)); 1692 1699 1693 1700 part_stat_inc(cpu, part, ios[rw]); 1694 1701 part_stat_add(cpu, part, ticks[rw], duration); ··· 1697 1710 } 1698 1711 1699 1712 /** 1700 - * __end_that_request_first - end I/O on a request 1701 - * @req: the request being processed 1702 - * @error: %0 for success, < %0 for error 1703 - * @nr_bytes: number of bytes to complete 1713 + * blk_peek_request - peek at the top of a request queue 1714 + * @q: request queue to peek at 1704 1715 * 1705 1716 * Description: 1706 - * Ends I/O on a number of bytes attached to @req, and sets it up 1707 - * for the next range of segments (if any) in the cluster. 1717 + * Return the request at the top of @q. The returned request 1718 + * should be started using blk_start_request() before LLD starts 1719 + * processing it. 1708 1720 * 1709 1721 * Return: 1710 - * %0 - we are done with this request, call end_that_request_last() 1711 - * %1 - still buffers pending for this request 1722 + * Pointer to the request at the top of @q if available. Null 1723 + * otherwise. 1724 + * 1725 + * Context: 1726 + * queue_lock must be held. 1727 + */ 1728 + struct request *blk_peek_request(struct request_queue *q) 1729 + { 1730 + struct request *rq; 1731 + int ret; 1732 + 1733 + while ((rq = __elv_next_request(q)) != NULL) { 1734 + if (!(rq->cmd_flags & REQ_STARTED)) { 1735 + /* 1736 + * This is the first time the device driver 1737 + * sees this request (possibly after 1738 + * requeueing). Notify IO scheduler. 1739 + */ 1740 + if (blk_sorted_rq(rq)) 1741 + elv_activate_rq(q, rq); 1742 + 1743 + /* 1744 + * just mark as started even if we don't start 1745 + * it, a request that has been delayed should 1746 + * not be passed by new incoming requests 1747 + */ 1748 + rq->cmd_flags |= REQ_STARTED; 1749 + trace_block_rq_issue(q, rq); 1750 + } 1751 + 1752 + if (!q->boundary_rq || q->boundary_rq == rq) { 1753 + q->end_sector = rq_end_sector(rq); 1754 + q->boundary_rq = NULL; 1755 + } 1756 + 1757 + if (rq->cmd_flags & REQ_DONTPREP) 1758 + break; 1759 + 1760 + if (q->dma_drain_size && blk_rq_bytes(rq)) { 1761 + /* 1762 + * make sure space for the drain appears we 1763 + * know we can do this because max_hw_segments 1764 + * has been adjusted to be one fewer than the 1765 + * device can handle 1766 + */ 1767 + rq->nr_phys_segments++; 1768 + } 1769 + 1770 + if (!q->prep_rq_fn) 1771 + break; 1772 + 1773 + ret = q->prep_rq_fn(q, rq); 1774 + if (ret == BLKPREP_OK) { 1775 + break; 1776 + } else if (ret == BLKPREP_DEFER) { 1777 + /* 1778 + * the request may have been (partially) prepped. 1779 + * we need to keep this request in the front to 1780 + * avoid resource deadlock. REQ_STARTED will 1781 + * prevent other fs requests from passing this one. 1782 + */ 1783 + if (q->dma_drain_size && blk_rq_bytes(rq) && 1784 + !(rq->cmd_flags & REQ_DONTPREP)) { 1785 + /* 1786 + * remove the space for the drain we added 1787 + * so that we don't add it again 1788 + */ 1789 + --rq->nr_phys_segments; 1790 + } 1791 + 1792 + rq = NULL; 1793 + break; 1794 + } else if (ret == BLKPREP_KILL) { 1795 + rq->cmd_flags |= REQ_QUIET; 1796 + /* 1797 + * Mark this request as started so we don't trigger 1798 + * any debug logic in the end I/O path. 1799 + */ 1800 + blk_start_request(rq); 1801 + __blk_end_request_all(rq, -EIO); 1802 + } else { 1803 + printk(KERN_ERR "%s: bad return=%d\n", __func__, ret); 1804 + break; 1805 + } 1806 + } 1807 + 1808 + return rq; 1809 + } 1810 + EXPORT_SYMBOL(blk_peek_request); 1811 + 1812 + void blk_dequeue_request(struct request *rq) 1813 + { 1814 + struct request_queue *q = rq->q; 1815 + 1816 + BUG_ON(list_empty(&rq->queuelist)); 1817 + BUG_ON(ELV_ON_HASH(rq)); 1818 + 1819 + list_del_init(&rq->queuelist); 1820 + 1821 + /* 1822 + * the time frame between a request being removed from the lists 1823 + * and to it is freed is accounted as io that is in progress at 1824 + * the driver side. 1825 + */ 1826 + if (blk_account_rq(rq)) 1827 + q->in_flight[rq_is_sync(rq)]++; 1828 + } 1829 + 1830 + /** 1831 + * blk_start_request - start request processing on the driver 1832 + * @req: request to dequeue 1833 + * 1834 + * Description: 1835 + * Dequeue @req and start timeout timer on it. This hands off the 1836 + * request to the driver. 1837 + * 1838 + * Block internal functions which don't want to start timer should 1839 + * call blk_dequeue_request(). 1840 + * 1841 + * Context: 1842 + * queue_lock must be held. 1843 + */ 1844 + void blk_start_request(struct request *req) 1845 + { 1846 + blk_dequeue_request(req); 1847 + 1848 + /* 1849 + * We are now handing the request to the hardware, initialize 1850 + * resid_len to full count and add the timeout handler. 1851 + */ 1852 + req->resid_len = blk_rq_bytes(req); 1853 + if (unlikely(blk_bidi_rq(req))) 1854 + req->next_rq->resid_len = blk_rq_bytes(req->next_rq); 1855 + 1856 + blk_add_timer(req); 1857 + } 1858 + EXPORT_SYMBOL(blk_start_request); 1859 + 1860 + /** 1861 + * blk_fetch_request - fetch a request from a request queue 1862 + * @q: request queue to fetch a request from 1863 + * 1864 + * Description: 1865 + * Return the request at the top of @q. The request is started on 1866 + * return and LLD can start processing it immediately. 1867 + * 1868 + * Return: 1869 + * Pointer to the request at the top of @q if available. Null 1870 + * otherwise. 1871 + * 1872 + * Context: 1873 + * queue_lock must be held. 1874 + */ 1875 + struct request *blk_fetch_request(struct request_queue *q) 1876 + { 1877 + struct request *rq; 1878 + 1879 + rq = blk_peek_request(q); 1880 + if (rq) 1881 + blk_start_request(rq); 1882 + return rq; 1883 + } 1884 + EXPORT_SYMBOL(blk_fetch_request); 1885 + 1886 + /** 1887 + * blk_update_request - Special helper function for request stacking drivers 1888 + * @rq: the request being processed 1889 + * @error: %0 for success, < %0 for error 1890 + * @nr_bytes: number of bytes to complete @rq 1891 + * 1892 + * Description: 1893 + * Ends I/O on a number of bytes attached to @rq, but doesn't complete 1894 + * the request structure even if @rq doesn't have leftover. 1895 + * If @rq has leftover, sets it up for the next range of segments. 1896 + * 1897 + * This special helper function is only for request stacking drivers 1898 + * (e.g. request-based dm) so that they can handle partial completion. 1899 + * Actual device drivers should use blk_end_request instead. 1900 + * 1901 + * Passing the result of blk_rq_bytes() as @nr_bytes guarantees 1902 + * %false return from this function. 1903 + * 1904 + * Return: 1905 + * %false - this request doesn't have any more data 1906 + * %true - this request has more data 1712 1907 **/ 1713 - static int __end_that_request_first(struct request *req, int error, 1714 - int nr_bytes) 1908 + bool blk_update_request(struct request *req, int error, unsigned int nr_bytes) 1715 1909 { 1716 1910 int total_bytes, bio_nbytes, next_idx = 0; 1717 1911 struct bio *bio; 1912 + 1913 + if (!req->bio) 1914 + return false; 1718 1915 1719 1916 trace_block_rq_complete(req->q, req); 1720 1917 ··· 1916 1745 if (error && (blk_fs_request(req) && !(req->cmd_flags & REQ_QUIET))) { 1917 1746 printk(KERN_ERR "end_request: I/O error, dev %s, sector %llu\n", 1918 1747 req->rq_disk ? req->rq_disk->disk_name : "?", 1919 - (unsigned long long)req->sector); 1748 + (unsigned long long)blk_rq_pos(req)); 1920 1749 } 1921 1750 1922 1751 blk_account_io_completion(req, nr_bytes); ··· 1976 1805 /* 1977 1806 * completely done 1978 1807 */ 1979 - if (!req->bio) 1980 - return 0; 1808 + if (!req->bio) { 1809 + /* 1810 + * Reset counters so that the request stacking driver 1811 + * can find how many bytes remain in the request 1812 + * later. 1813 + */ 1814 + req->__data_len = 0; 1815 + return false; 1816 + } 1981 1817 1982 1818 /* 1983 1819 * if the request wasn't completed, update state ··· 1996 1818 bio_iovec(bio)->bv_len -= nr_bytes; 1997 1819 } 1998 1820 1999 - blk_recalc_rq_sectors(req, total_bytes >> 9); 1821 + req->__data_len -= total_bytes; 1822 + req->buffer = bio_data(req->bio); 1823 + 1824 + /* update sector only for requests with clear definition of sector */ 1825 + if (blk_fs_request(req) || blk_discard_rq(req)) 1826 + req->__sector += total_bytes >> 9; 1827 + 1828 + /* 1829 + * If total number of sectors is less than the first segment 1830 + * size, something has gone terribly wrong. 1831 + */ 1832 + if (blk_rq_bytes(req) < blk_rq_cur_bytes(req)) { 1833 + printk(KERN_ERR "blk: request botched\n"); 1834 + req->__data_len = blk_rq_cur_bytes(req); 1835 + } 1836 + 1837 + /* recalculate the number of segments */ 2000 1838 blk_recalc_rq_segments(req); 2001 - return 1; 1839 + 1840 + return true; 1841 + } 1842 + EXPORT_SYMBOL_GPL(blk_update_request); 1843 + 1844 + static bool blk_update_bidi_request(struct request *rq, int error, 1845 + unsigned int nr_bytes, 1846 + unsigned int bidi_bytes) 1847 + { 1848 + if (blk_update_request(rq, error, nr_bytes)) 1849 + return true; 1850 + 1851 + /* Bidi request must be completed as a whole */ 1852 + if (unlikely(blk_bidi_rq(rq)) && 1853 + blk_update_request(rq->next_rq, error, bidi_bytes)) 1854 + return true; 1855 + 1856 + add_disk_randomness(rq->rq_disk); 1857 + 1858 + return false; 2002 1859 } 2003 1860 2004 1861 /* 2005 1862 * queue lock must be held 2006 1863 */ 2007 - static void end_that_request_last(struct request *req, int error) 1864 + static void blk_finish_request(struct request *req, int error) 2008 1865 { 2009 1866 if (blk_rq_tagged(req)) 2010 1867 blk_queue_end_tag(req->q, req); 2011 1868 2012 - if (blk_queued_rq(req)) 2013 - elv_dequeue_request(req->q, req); 1869 + BUG_ON(blk_queued_rq(req)); 2014 1870 2015 1871 if (unlikely(laptop_mode) && blk_fs_request(req)) 2016 1872 laptop_io_completion(); ··· 2064 1852 } 2065 1853 2066 1854 /** 2067 - * blk_rq_bytes - Returns bytes left to complete in the entire request 2068 - * @rq: the request being processed 2069 - **/ 2070 - unsigned int blk_rq_bytes(struct request *rq) 2071 - { 2072 - if (blk_fs_request(rq)) 2073 - return rq->hard_nr_sectors << 9; 2074 - 2075 - return rq->data_len; 2076 - } 2077 - EXPORT_SYMBOL_GPL(blk_rq_bytes); 2078 - 2079 - /** 2080 - * blk_rq_cur_bytes - Returns bytes left to complete in the current segment 2081 - * @rq: the request being processed 2082 - **/ 2083 - unsigned int blk_rq_cur_bytes(struct request *rq) 2084 - { 2085 - if (blk_fs_request(rq)) 2086 - return rq->current_nr_sectors << 9; 2087 - 2088 - if (rq->bio) 2089 - return rq->bio->bi_size; 2090 - 2091 - return rq->data_len; 2092 - } 2093 - EXPORT_SYMBOL_GPL(blk_rq_cur_bytes); 2094 - 2095 - /** 2096 - * end_request - end I/O on the current segment of the request 2097 - * @req: the request being processed 2098 - * @uptodate: error value or %0/%1 uptodate flag 2099 - * 2100 - * Description: 2101 - * Ends I/O on the current segment of a request. If that is the only 2102 - * remaining segment, the request is also completed and freed. 2103 - * 2104 - * This is a remnant of how older block drivers handled I/O completions. 2105 - * Modern drivers typically end I/O on the full request in one go, unless 2106 - * they have a residual value to account for. For that case this function 2107 - * isn't really useful, unless the residual just happens to be the 2108 - * full current segment. In other words, don't use this function in new 2109 - * code. Use blk_end_request() or __blk_end_request() to end a request. 2110 - **/ 2111 - void end_request(struct request *req, int uptodate) 2112 - { 2113 - int error = 0; 2114 - 2115 - if (uptodate <= 0) 2116 - error = uptodate ? uptodate : -EIO; 2117 - 2118 - __blk_end_request(req, error, req->hard_cur_sectors << 9); 2119 - } 2120 - EXPORT_SYMBOL(end_request); 2121 - 2122 - static int end_that_request_data(struct request *rq, int error, 2123 - unsigned int nr_bytes, unsigned int bidi_bytes) 2124 - { 2125 - if (rq->bio) { 2126 - if (__end_that_request_first(rq, error, nr_bytes)) 2127 - return 1; 2128 - 2129 - /* Bidi request must be completed as a whole */ 2130 - if (blk_bidi_rq(rq) && 2131 - __end_that_request_first(rq->next_rq, error, bidi_bytes)) 2132 - return 1; 2133 - } 2134 - 2135 - return 0; 2136 - } 2137 - 2138 - /** 2139 - * blk_end_io - Generic end_io function to complete a request. 2140 - * @rq: the request being processed 2141 - * @error: %0 for success, < %0 for error 2142 - * @nr_bytes: number of bytes to complete @rq 2143 - * @bidi_bytes: number of bytes to complete @rq->next_rq 2144 - * @drv_callback: function called between completion of bios in the request 2145 - * and completion of the request. 2146 - * If the callback returns non %0, this helper returns without 2147 - * completion of the request. 1855 + * blk_end_bidi_request - Complete a bidi request 1856 + * @rq: the request to complete 1857 + * @error: %0 for success, < %0 for error 1858 + * @nr_bytes: number of bytes to complete @rq 1859 + * @bidi_bytes: number of bytes to complete @rq->next_rq 2148 1860 * 2149 1861 * Description: 2150 1862 * Ends I/O on a number of bytes attached to @rq and @rq->next_rq. 2151 - * If @rq has leftover, sets it up for the next range of segments. 1863 + * Drivers that supports bidi can safely call this member for any 1864 + * type of request, bidi or uni. In the later case @bidi_bytes is 1865 + * just ignored. 2152 1866 * 2153 1867 * Return: 2154 - * %0 - we are done with this request 2155 - * %1 - this request is not freed yet, it still has pending buffers. 1868 + * %false - we are done with this request 1869 + * %true - still buffers pending for this request 2156 1870 **/ 2157 - static int blk_end_io(struct request *rq, int error, unsigned int nr_bytes, 2158 - unsigned int bidi_bytes, 2159 - int (drv_callback)(struct request *)) 1871 + static bool blk_end_bidi_request(struct request *rq, int error, 1872 + unsigned int nr_bytes, unsigned int bidi_bytes) 2160 1873 { 2161 1874 struct request_queue *q = rq->q; 2162 - unsigned long flags = 0UL; 1875 + unsigned long flags; 2163 1876 2164 - if (end_that_request_data(rq, error, nr_bytes, bidi_bytes)) 2165 - return 1; 2166 - 2167 - /* Special feature for tricky drivers */ 2168 - if (drv_callback && drv_callback(rq)) 2169 - return 1; 2170 - 2171 - add_disk_randomness(rq->rq_disk); 1877 + if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes)) 1878 + return true; 2172 1879 2173 1880 spin_lock_irqsave(q->queue_lock, flags); 2174 - end_that_request_last(rq, error); 1881 + blk_finish_request(rq, error); 2175 1882 spin_unlock_irqrestore(q->queue_lock, flags); 2176 1883 2177 - return 0; 1884 + return false; 1885 + } 1886 + 1887 + /** 1888 + * __blk_end_bidi_request - Complete a bidi request with queue lock held 1889 + * @rq: the request to complete 1890 + * @error: %0 for success, < %0 for error 1891 + * @nr_bytes: number of bytes to complete @rq 1892 + * @bidi_bytes: number of bytes to complete @rq->next_rq 1893 + * 1894 + * Description: 1895 + * Identical to blk_end_bidi_request() except that queue lock is 1896 + * assumed to be locked on entry and remains so on return. 1897 + * 1898 + * Return: 1899 + * %false - we are done with this request 1900 + * %true - still buffers pending for this request 1901 + **/ 1902 + static bool __blk_end_bidi_request(struct request *rq, int error, 1903 + unsigned int nr_bytes, unsigned int bidi_bytes) 1904 + { 1905 + if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes)) 1906 + return true; 1907 + 1908 + blk_finish_request(rq, error); 1909 + 1910 + return false; 2178 1911 } 2179 1912 2180 1913 /** ··· 2133 1976 * If @rq has leftover, sets it up for the next range of segments. 2134 1977 * 2135 1978 * Return: 2136 - * %0 - we are done with this request 2137 - * %1 - still buffers pending for this request 1979 + * %false - we are done with this request 1980 + * %true - still buffers pending for this request 2138 1981 **/ 2139 - int blk_end_request(struct request *rq, int error, unsigned int nr_bytes) 1982 + bool blk_end_request(struct request *rq, int error, unsigned int nr_bytes) 2140 1983 { 2141 - return blk_end_io(rq, error, nr_bytes, 0, NULL); 1984 + return blk_end_bidi_request(rq, error, nr_bytes, 0); 2142 1985 } 2143 1986 EXPORT_SYMBOL_GPL(blk_end_request); 1987 + 1988 + /** 1989 + * blk_end_request_all - Helper function for drives to finish the request. 1990 + * @rq: the request to finish 1991 + * @err: %0 for success, < %0 for error 1992 + * 1993 + * Description: 1994 + * Completely finish @rq. 1995 + */ 1996 + void blk_end_request_all(struct request *rq, int error) 1997 + { 1998 + bool pending; 1999 + unsigned int bidi_bytes = 0; 2000 + 2001 + if (unlikely(blk_bidi_rq(rq))) 2002 + bidi_bytes = blk_rq_bytes(rq->next_rq); 2003 + 2004 + pending = blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes); 2005 + BUG_ON(pending); 2006 + } 2007 + EXPORT_SYMBOL_GPL(blk_end_request_all); 2008 + 2009 + /** 2010 + * blk_end_request_cur - Helper function to finish the current request chunk. 2011 + * @rq: the request to finish the current chunk for 2012 + * @err: %0 for success, < %0 for error 2013 + * 2014 + * Description: 2015 + * Complete the current consecutively mapped chunk from @rq. 2016 + * 2017 + * Return: 2018 + * %false - we are done with this request 2019 + * %true - still buffers pending for this request 2020 + */ 2021 + bool blk_end_request_cur(struct request *rq, int error) 2022 + { 2023 + return blk_end_request(rq, error, blk_rq_cur_bytes(rq)); 2024 + } 2025 + EXPORT_SYMBOL_GPL(blk_end_request_cur); 2144 2026 2145 2027 /** 2146 2028 * __blk_end_request - Helper function for drivers to complete the request. ··· 2191 1995 * Must be called with queue lock held unlike blk_end_request(). 2192 1996 * 2193 1997 * Return: 2194 - * %0 - we are done with this request 2195 - * %1 - still buffers pending for this request 1998 + * %false - we are done with this request 1999 + * %true - still buffers pending for this request 2196 2000 **/ 2197 - int __blk_end_request(struct request *rq, int error, unsigned int nr_bytes) 2001 + bool __blk_end_request(struct request *rq, int error, unsigned int nr_bytes) 2198 2002 { 2199 - if (rq->bio && __end_that_request_first(rq, error, nr_bytes)) 2200 - return 1; 2201 - 2202 - add_disk_randomness(rq->rq_disk); 2203 - 2204 - end_that_request_last(rq, error); 2205 - 2206 - return 0; 2003 + return __blk_end_bidi_request(rq, error, nr_bytes, 0); 2207 2004 } 2208 2005 EXPORT_SYMBOL_GPL(__blk_end_request); 2209 2006 2210 2007 /** 2211 - * blk_end_bidi_request - Helper function for drivers to complete bidi request. 2212 - * @rq: the bidi request being processed 2213 - * @error: %0 for success, < %0 for error 2214 - * @nr_bytes: number of bytes to complete @rq 2215 - * @bidi_bytes: number of bytes to complete @rq->next_rq 2008 + * __blk_end_request_all - Helper function for drives to finish the request. 2009 + * @rq: the request to finish 2010 + * @err: %0 for success, < %0 for error 2216 2011 * 2217 2012 * Description: 2218 - * Ends I/O on a number of bytes attached to @rq and @rq->next_rq. 2219 - * 2220 - * Return: 2221 - * %0 - we are done with this request 2222 - * %1 - still buffers pending for this request 2223 - **/ 2224 - int blk_end_bidi_request(struct request *rq, int error, unsigned int nr_bytes, 2225 - unsigned int bidi_bytes) 2226 - { 2227 - return blk_end_io(rq, error, nr_bytes, bidi_bytes, NULL); 2228 - } 2229 - EXPORT_SYMBOL_GPL(blk_end_bidi_request); 2230 - 2231 - /** 2232 - * blk_update_request - Special helper function for request stacking drivers 2233 - * @rq: the request being processed 2234 - * @error: %0 for success, < %0 for error 2235 - * @nr_bytes: number of bytes to complete @rq 2236 - * 2237 - * Description: 2238 - * Ends I/O on a number of bytes attached to @rq, but doesn't complete 2239 - * the request structure even if @rq doesn't have leftover. 2240 - * If @rq has leftover, sets it up for the next range of segments. 2241 - * 2242 - * This special helper function is only for request stacking drivers 2243 - * (e.g. request-based dm) so that they can handle partial completion. 2244 - * Actual device drivers should use blk_end_request instead. 2013 + * Completely finish @rq. Must be called with queue lock held. 2245 2014 */ 2246 - void blk_update_request(struct request *rq, int error, unsigned int nr_bytes) 2015 + void __blk_end_request_all(struct request *rq, int error) 2247 2016 { 2248 - if (!end_that_request_data(rq, error, nr_bytes, 0)) { 2249 - /* 2250 - * These members are not updated in end_that_request_data() 2251 - * when all bios are completed. 2252 - * Update them so that the request stacking driver can find 2253 - * how many bytes remain in the request later. 2254 - */ 2255 - rq->nr_sectors = rq->hard_nr_sectors = 0; 2256 - rq->current_nr_sectors = rq->hard_cur_sectors = 0; 2257 - } 2017 + bool pending; 2018 + unsigned int bidi_bytes = 0; 2019 + 2020 + if (unlikely(blk_bidi_rq(rq))) 2021 + bidi_bytes = blk_rq_bytes(rq->next_rq); 2022 + 2023 + pending = __blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes); 2024 + BUG_ON(pending); 2258 2025 } 2259 - EXPORT_SYMBOL_GPL(blk_update_request); 2026 + EXPORT_SYMBOL_GPL(__blk_end_request_all); 2260 2027 2261 2028 /** 2262 - * blk_end_request_callback - Special helper function for tricky drivers 2263 - * @rq: the request being processed 2264 - * @error: %0 for success, < %0 for error 2265 - * @nr_bytes: number of bytes to complete 2266 - * @drv_callback: function called between completion of bios in the request 2267 - * and completion of the request. 2268 - * If the callback returns non %0, this helper returns without 2269 - * completion of the request. 2029 + * __blk_end_request_cur - Helper function to finish the current request chunk. 2030 + * @rq: the request to finish the current chunk for 2031 + * @err: %0 for success, < %0 for error 2270 2032 * 2271 2033 * Description: 2272 - * Ends I/O on a number of bytes attached to @rq. 2273 - * If @rq has leftover, sets it up for the next range of segments. 2274 - * 2275 - * This special helper function is used only for existing tricky drivers. 2276 - * (e.g. cdrom_newpc_intr() of ide-cd) 2277 - * This interface will be removed when such drivers are rewritten. 2278 - * Don't use this interface in other places anymore. 2034 + * Complete the current consecutively mapped chunk from @rq. Must 2035 + * be called with queue lock held. 2279 2036 * 2280 2037 * Return: 2281 - * %0 - we are done with this request 2282 - * %1 - this request is not freed yet. 2283 - * this request still has pending buffers or 2284 - * the driver doesn't want to finish this request yet. 2285 - **/ 2286 - int blk_end_request_callback(struct request *rq, int error, 2287 - unsigned int nr_bytes, 2288 - int (drv_callback)(struct request *)) 2038 + * %false - we are done with this request 2039 + * %true - still buffers pending for this request 2040 + */ 2041 + bool __blk_end_request_cur(struct request *rq, int error) 2289 2042 { 2290 - return blk_end_io(rq, error, nr_bytes, 0, drv_callback); 2043 + return __blk_end_request(rq, error, blk_rq_cur_bytes(rq)); 2291 2044 } 2292 - EXPORT_SYMBOL_GPL(blk_end_request_callback); 2045 + EXPORT_SYMBOL_GPL(__blk_end_request_cur); 2293 2046 2294 2047 void blk_rq_bio_prep(struct request_queue *q, struct request *rq, 2295 2048 struct bio *bio) ··· 2251 2106 rq->nr_phys_segments = bio_phys_segments(q, bio); 2252 2107 rq->buffer = bio_data(bio); 2253 2108 } 2254 - rq->current_nr_sectors = bio_cur_sectors(bio); 2255 - rq->hard_cur_sectors = rq->current_nr_sectors; 2256 - rq->hard_nr_sectors = rq->nr_sectors = bio_sectors(bio); 2257 - rq->data_len = bio->bi_size; 2258 - 2109 + rq->__data_len = bio->bi_size; 2259 2110 rq->bio = rq->biotail = bio; 2260 2111 2261 2112 if (bio->bi_bdev) ··· 2286 2145 } 2287 2146 EXPORT_SYMBOL_GPL(blk_lld_busy); 2288 2147 2148 + /** 2149 + * blk_rq_unprep_clone - Helper function to free all bios in a cloned request 2150 + * @rq: the clone request to be cleaned up 2151 + * 2152 + * Description: 2153 + * Free all bios in @rq for a cloned request. 2154 + */ 2155 + void blk_rq_unprep_clone(struct request *rq) 2156 + { 2157 + struct bio *bio; 2158 + 2159 + while ((bio = rq->bio) != NULL) { 2160 + rq->bio = bio->bi_next; 2161 + 2162 + bio_put(bio); 2163 + } 2164 + } 2165 + EXPORT_SYMBOL_GPL(blk_rq_unprep_clone); 2166 + 2167 + /* 2168 + * Copy attributes of the original request to the clone request. 2169 + * The actual data parts (e.g. ->cmd, ->buffer, ->sense) are not copied. 2170 + */ 2171 + static void __blk_rq_prep_clone(struct request *dst, struct request *src) 2172 + { 2173 + dst->cpu = src->cpu; 2174 + dst->cmd_flags = (rq_data_dir(src) | REQ_NOMERGE); 2175 + dst->cmd_type = src->cmd_type; 2176 + dst->__sector = blk_rq_pos(src); 2177 + dst->__data_len = blk_rq_bytes(src); 2178 + dst->nr_phys_segments = src->nr_phys_segments; 2179 + dst->ioprio = src->ioprio; 2180 + dst->extra_len = src->extra_len; 2181 + } 2182 + 2183 + /** 2184 + * blk_rq_prep_clone - Helper function to setup clone request 2185 + * @rq: the request to be setup 2186 + * @rq_src: original request to be cloned 2187 + * @bs: bio_set that bios for clone are allocated from 2188 + * @gfp_mask: memory allocation mask for bio 2189 + * @bio_ctr: setup function to be called for each clone bio. 2190 + * Returns %0 for success, non %0 for failure. 2191 + * @data: private data to be passed to @bio_ctr 2192 + * 2193 + * Description: 2194 + * Clones bios in @rq_src to @rq, and copies attributes of @rq_src to @rq. 2195 + * The actual data parts of @rq_src (e.g. ->cmd, ->buffer, ->sense) 2196 + * are not copied, and copying such parts is the caller's responsibility. 2197 + * Also, pages which the original bios are pointing to are not copied 2198 + * and the cloned bios just point same pages. 2199 + * So cloned bios must be completed before original bios, which means 2200 + * the caller must complete @rq before @rq_src. 2201 + */ 2202 + int blk_rq_prep_clone(struct request *rq, struct request *rq_src, 2203 + struct bio_set *bs, gfp_t gfp_mask, 2204 + int (*bio_ctr)(struct bio *, struct bio *, void *), 2205 + void *data) 2206 + { 2207 + struct bio *bio, *bio_src; 2208 + 2209 + if (!bs) 2210 + bs = fs_bio_set; 2211 + 2212 + blk_rq_init(NULL, rq); 2213 + 2214 + __rq_for_each_bio(bio_src, rq_src) { 2215 + bio = bio_alloc_bioset(gfp_mask, bio_src->bi_max_vecs, bs); 2216 + if (!bio) 2217 + goto free_and_out; 2218 + 2219 + __bio_clone(bio, bio_src); 2220 + 2221 + if (bio_integrity(bio_src) && 2222 + bio_integrity_clone(bio, bio_src, gfp_mask)) 2223 + goto free_and_out; 2224 + 2225 + if (bio_ctr && bio_ctr(bio, bio_src, data)) 2226 + goto free_and_out; 2227 + 2228 + if (rq->bio) { 2229 + rq->biotail->bi_next = bio; 2230 + rq->biotail = bio; 2231 + } else 2232 + rq->bio = rq->biotail = bio; 2233 + } 2234 + 2235 + __blk_rq_prep_clone(rq, rq_src); 2236 + 2237 + return 0; 2238 + 2239 + free_and_out: 2240 + if (bio) 2241 + bio_free(bio, bs); 2242 + blk_rq_unprep_clone(rq); 2243 + 2244 + return -ENOMEM; 2245 + } 2246 + EXPORT_SYMBOL_GPL(blk_rq_prep_clone); 2247 + 2289 2248 int kblockd_schedule_work(struct request_queue *q, struct work_struct *work) 2290 2249 { 2291 2250 return queue_work(kblockd_workqueue, work); ··· 2394 2153 2395 2154 int __init blk_dev_init(void) 2396 2155 { 2156 + BUILD_BUG_ON(__REQ_NR_BITS > 8 * 2157 + sizeof(((struct request *)0)->cmd_flags)); 2158 + 2397 2159 kblockd_workqueue = create_workqueue("kblockd"); 2398 2160 if (!kblockd_workqueue) 2399 2161 panic("Failed to create kblockd\n");
-1
block/blk-exec.c
··· 51 51 int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK; 52 52 53 53 rq->rq_disk = bd_disk; 54 - rq->cmd_flags |= REQ_NOMERGE; 55 54 rq->end_io = done; 56 55 WARN_ON(irqs_disabled()); 57 56 spin_lock_irq(q->queue_lock);
+1 -1
block/blk-integrity.c
··· 340 340 kobject_uevent(&bi->kobj, KOBJ_ADD); 341 341 342 342 bi->flags |= INTEGRITY_FLAG_READ | INTEGRITY_FLAG_WRITE; 343 - bi->sector_size = disk->queue->hardsect_size; 343 + bi->sector_size = queue_logical_block_size(disk->queue); 344 344 disk->integrity = bi; 345 345 } else 346 346 bi = disk->integrity;
+6 -6
block/blk-ioc.c
··· 35 35 if (ioc == NULL) 36 36 return 1; 37 37 38 - BUG_ON(atomic_read(&ioc->refcount) == 0); 38 + BUG_ON(atomic_long_read(&ioc->refcount) == 0); 39 39 40 - if (atomic_dec_and_test(&ioc->refcount)) { 40 + if (atomic_long_dec_and_test(&ioc->refcount)) { 41 41 rcu_read_lock(); 42 42 if (ioc->aic && ioc->aic->dtor) 43 43 ioc->aic->dtor(ioc->aic); ··· 90 90 91 91 ret = kmem_cache_alloc_node(iocontext_cachep, gfp_flags, node); 92 92 if (ret) { 93 - atomic_set(&ret->refcount, 1); 93 + atomic_long_set(&ret->refcount, 1); 94 94 atomic_set(&ret->nr_tasks, 1); 95 95 spin_lock_init(&ret->lock); 96 96 ret->ioprio_changed = 0; ··· 151 151 ret = current_io_context(gfp_flags, node); 152 152 if (unlikely(!ret)) 153 153 break; 154 - } while (!atomic_inc_not_zero(&ret->refcount)); 154 + } while (!atomic_long_inc_not_zero(&ret->refcount)); 155 155 156 156 return ret; 157 157 } ··· 163 163 struct io_context *dst = *pdst; 164 164 165 165 if (src) { 166 - BUG_ON(atomic_read(&src->refcount) == 0); 167 - atomic_inc(&src->refcount); 166 + BUG_ON(atomic_long_read(&src->refcount) == 0); 167 + atomic_long_inc(&src->refcount); 168 168 put_io_context(dst); 169 169 *pdst = src; 170 170 }
+16 -9
block/blk-map.c
··· 20 20 rq->biotail->bi_next = bio; 21 21 rq->biotail = bio; 22 22 23 - rq->data_len += bio->bi_size; 23 + rq->__data_len += bio->bi_size; 24 24 } 25 25 return 0; 26 26 } 27 - EXPORT_SYMBOL(blk_rq_append_bio); 28 27 29 28 static int __blk_rq_unmap_user(struct bio *bio) 30 29 { ··· 115 116 struct bio *bio = NULL; 116 117 int ret; 117 118 118 - if (len > (q->max_hw_sectors << 9)) 119 + if (len > (queue_max_hw_sectors(q) << 9)) 119 120 return -EINVAL; 120 121 if (!len) 121 122 return -EINVAL; ··· 155 156 if (!bio_flagged(bio, BIO_USER_MAPPED)) 156 157 rq->cmd_flags |= REQ_COPY_USER; 157 158 158 - rq->buffer = rq->data = NULL; 159 + rq->buffer = NULL; 159 160 return 0; 160 161 unmap_rq: 161 162 blk_rq_unmap_user(bio); ··· 234 235 blk_queue_bounce(q, &bio); 235 236 bio_get(bio); 236 237 blk_rq_bio_prep(q, rq, bio); 237 - rq->buffer = rq->data = NULL; 238 + rq->buffer = NULL; 238 239 return 0; 239 240 } 240 241 EXPORT_SYMBOL(blk_rq_map_user_iov); ··· 281 282 * 282 283 * Description: 283 284 * Data will be mapped directly if possible. Otherwise a bounce 284 - * buffer is used. 285 + * buffer is used. Can be called multple times to append multple 286 + * buffers. 285 287 */ 286 288 int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf, 287 289 unsigned int len, gfp_t gfp_mask) ··· 290 290 int reading = rq_data_dir(rq) == READ; 291 291 int do_copy = 0; 292 292 struct bio *bio; 293 + int ret; 293 294 294 - if (len > (q->max_hw_sectors << 9)) 295 + if (len > (queue_max_hw_sectors(q) << 9)) 295 296 return -EINVAL; 296 297 if (!len || !kbuf) 297 298 return -EINVAL; ··· 312 311 if (do_copy) 313 312 rq->cmd_flags |= REQ_COPY_USER; 314 313 315 - blk_rq_bio_prep(q, rq, bio); 314 + ret = blk_rq_append_bio(q, rq, bio); 315 + if (unlikely(ret)) { 316 + /* request is too big */ 317 + bio_put(bio); 318 + return ret; 319 + } 320 + 316 321 blk_queue_bounce(q, &rq->bio); 317 - rq->buffer = rq->data = NULL; 322 + rq->buffer = NULL; 318 323 return 0; 319 324 } 320 325 EXPORT_SYMBOL(blk_rq_map_kern);
+22 -49
block/blk-merge.c
··· 9 9 10 10 #include "blk.h" 11 11 12 - void blk_recalc_rq_sectors(struct request *rq, int nsect) 13 - { 14 - if (blk_fs_request(rq) || blk_discard_rq(rq)) { 15 - rq->hard_sector += nsect; 16 - rq->hard_nr_sectors -= nsect; 17 - 18 - /* 19 - * Move the I/O submission pointers ahead if required. 20 - */ 21 - if ((rq->nr_sectors >= rq->hard_nr_sectors) && 22 - (rq->sector <= rq->hard_sector)) { 23 - rq->sector = rq->hard_sector; 24 - rq->nr_sectors = rq->hard_nr_sectors; 25 - rq->hard_cur_sectors = bio_cur_sectors(rq->bio); 26 - rq->current_nr_sectors = rq->hard_cur_sectors; 27 - rq->buffer = bio_data(rq->bio); 28 - } 29 - 30 - /* 31 - * if total number of sectors is less than the first segment 32 - * size, something has gone terribly wrong 33 - */ 34 - if (rq->nr_sectors < rq->current_nr_sectors) { 35 - printk(KERN_ERR "blk: request botched\n"); 36 - rq->nr_sectors = rq->current_nr_sectors; 37 - } 38 - } 39 - } 40 - 41 12 static unsigned int __blk_recalc_rq_segments(struct request_queue *q, 42 13 struct bio *bio) 43 14 { ··· 32 61 * never considered part of another segment, since that 33 62 * might change with the bounce page. 34 63 */ 35 - high = page_to_pfn(bv->bv_page) > q->bounce_pfn; 64 + high = page_to_pfn(bv->bv_page) > queue_bounce_pfn(q); 36 65 if (high || highprv) 37 66 goto new_segment; 38 67 if (cluster) { 39 - if (seg_size + bv->bv_len > q->max_segment_size) 68 + if (seg_size + bv->bv_len 69 + > queue_max_segment_size(q)) 40 70 goto new_segment; 41 71 if (!BIOVEC_PHYS_MERGEABLE(bvprv, bv)) 42 72 goto new_segment; ··· 92 120 return 0; 93 121 94 122 if (bio->bi_seg_back_size + nxt->bi_seg_front_size > 95 - q->max_segment_size) 123 + queue_max_segment_size(q)) 96 124 return 0; 97 125 98 126 if (!bio_has_data(bio)) ··· 135 163 int nbytes = bvec->bv_len; 136 164 137 165 if (bvprv && cluster) { 138 - if (sg->length + nbytes > q->max_segment_size) 166 + if (sg->length + nbytes > queue_max_segment_size(q)) 139 167 goto new_segment; 140 168 141 169 if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec)) ··· 171 199 172 200 173 201 if (unlikely(rq->cmd_flags & REQ_COPY_USER) && 174 - (rq->data_len & q->dma_pad_mask)) { 175 - unsigned int pad_len = (q->dma_pad_mask & ~rq->data_len) + 1; 202 + (blk_rq_bytes(rq) & q->dma_pad_mask)) { 203 + unsigned int pad_len = 204 + (q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1; 176 205 177 206 sg->length += pad_len; 178 207 rq->extra_len += pad_len; ··· 206 233 { 207 234 int nr_phys_segs = bio_phys_segments(q, bio); 208 235 209 - if (req->nr_phys_segments + nr_phys_segs > q->max_hw_segments 210 - || req->nr_phys_segments + nr_phys_segs > q->max_phys_segments) { 236 + if (req->nr_phys_segments + nr_phys_segs > queue_max_hw_segments(q) || 237 + req->nr_phys_segments + nr_phys_segs > queue_max_phys_segments(q)) { 211 238 req->cmd_flags |= REQ_NOMERGE; 212 239 if (req == q->last_merge) 213 240 q->last_merge = NULL; ··· 228 255 unsigned short max_sectors; 229 256 230 257 if (unlikely(blk_pc_request(req))) 231 - max_sectors = q->max_hw_sectors; 258 + max_sectors = queue_max_hw_sectors(q); 232 259 else 233 - max_sectors = q->max_sectors; 260 + max_sectors = queue_max_sectors(q); 234 261 235 - if (req->nr_sectors + bio_sectors(bio) > max_sectors) { 262 + if (blk_rq_sectors(req) + bio_sectors(bio) > max_sectors) { 236 263 req->cmd_flags |= REQ_NOMERGE; 237 264 if (req == q->last_merge) 238 265 q->last_merge = NULL; ··· 252 279 unsigned short max_sectors; 253 280 254 281 if (unlikely(blk_pc_request(req))) 255 - max_sectors = q->max_hw_sectors; 282 + max_sectors = queue_max_hw_sectors(q); 256 283 else 257 - max_sectors = q->max_sectors; 284 + max_sectors = queue_max_sectors(q); 258 285 259 286 260 - if (req->nr_sectors + bio_sectors(bio) > max_sectors) { 287 + if (blk_rq_sectors(req) + bio_sectors(bio) > max_sectors) { 261 288 req->cmd_flags |= REQ_NOMERGE; 262 289 if (req == q->last_merge) 263 290 q->last_merge = NULL; ··· 288 315 /* 289 316 * Will it become too large? 290 317 */ 291 - if ((req->nr_sectors + next->nr_sectors) > q->max_sectors) 318 + if ((blk_rq_sectors(req) + blk_rq_sectors(next)) > queue_max_sectors(q)) 292 319 return 0; 293 320 294 321 total_phys_segments = req->nr_phys_segments + next->nr_phys_segments; ··· 300 327 total_phys_segments--; 301 328 } 302 329 303 - if (total_phys_segments > q->max_phys_segments) 330 + if (total_phys_segments > queue_max_phys_segments(q)) 304 331 return 0; 305 332 306 - if (total_phys_segments > q->max_hw_segments) 333 + if (total_phys_segments > queue_max_hw_segments(q)) 307 334 return 0; 308 335 309 336 /* Merge is OK... */ ··· 318 345 int cpu; 319 346 320 347 cpu = part_stat_lock(); 321 - part = disk_map_sector_rcu(req->rq_disk, req->sector); 348 + part = disk_map_sector_rcu(req->rq_disk, blk_rq_pos(req)); 322 349 323 350 part_round_stats(cpu, part); 324 351 part_dec_in_flight(part); ··· 339 366 /* 340 367 * not contiguous 341 368 */ 342 - if (req->sector + req->nr_sectors != next->sector) 369 + if (blk_rq_pos(req) + blk_rq_sectors(req) != blk_rq_pos(next)) 343 370 return 0; 344 371 345 372 if (rq_data_dir(req) != rq_data_dir(next) ··· 371 398 req->biotail->bi_next = next->bio; 372 399 req->biotail = next->biotail; 373 400 374 - req->nr_sectors = req->hard_nr_sectors += next->hard_nr_sectors; 401 + req->__data_len += blk_rq_bytes(next); 375 402 376 403 elv_merge_requests(q, req, next); 377 404
+239 -30
block/blk-settings.c
··· 134 134 q->backing_dev_info.state = 0; 135 135 q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY; 136 136 blk_queue_max_sectors(q, SAFE_MAX_SECTORS); 137 - blk_queue_hardsect_size(q, 512); 137 + blk_queue_logical_block_size(q, 512); 138 138 blk_queue_dma_alignment(q, 511); 139 139 blk_queue_congestion_threshold(q); 140 140 q->nr_batching = BLK_BATCH_REQ; ··· 179 179 */ 180 180 if (b_pfn < (min_t(u64, 0xffffffffUL, BLK_BOUNCE_HIGH) >> PAGE_SHIFT)) 181 181 dma = 1; 182 - q->bounce_pfn = max_low_pfn; 182 + q->limits.bounce_pfn = max_low_pfn; 183 183 #else 184 184 if (b_pfn < blk_max_low_pfn) 185 185 dma = 1; 186 - q->bounce_pfn = b_pfn; 186 + q->limits.bounce_pfn = b_pfn; 187 187 #endif 188 188 if (dma) { 189 189 init_emergency_isa_pool(); 190 190 q->bounce_gfp = GFP_NOIO | GFP_DMA; 191 - q->bounce_pfn = b_pfn; 191 + q->limits.bounce_pfn = b_pfn; 192 192 } 193 193 } 194 194 EXPORT_SYMBOL(blk_queue_bounce_limit); ··· 211 211 } 212 212 213 213 if (BLK_DEF_MAX_SECTORS > max_sectors) 214 - q->max_hw_sectors = q->max_sectors = max_sectors; 214 + q->limits.max_hw_sectors = q->limits.max_sectors = max_sectors; 215 215 else { 216 - q->max_sectors = BLK_DEF_MAX_SECTORS; 217 - q->max_hw_sectors = max_sectors; 216 + q->limits.max_sectors = BLK_DEF_MAX_SECTORS; 217 + q->limits.max_hw_sectors = max_sectors; 218 218 } 219 219 } 220 220 EXPORT_SYMBOL(blk_queue_max_sectors); 221 + 222 + void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_sectors) 223 + { 224 + if (BLK_DEF_MAX_SECTORS > max_sectors) 225 + q->limits.max_hw_sectors = BLK_DEF_MAX_SECTORS; 226 + else 227 + q->limits.max_hw_sectors = max_sectors; 228 + } 229 + EXPORT_SYMBOL(blk_queue_max_hw_sectors); 221 230 222 231 /** 223 232 * blk_queue_max_phys_segments - set max phys segments for a request for this queue ··· 247 238 __func__, max_segments); 248 239 } 249 240 250 - q->max_phys_segments = max_segments; 241 + q->limits.max_phys_segments = max_segments; 251 242 } 252 243 EXPORT_SYMBOL(blk_queue_max_phys_segments); 253 244 ··· 271 262 __func__, max_segments); 272 263 } 273 264 274 - q->max_hw_segments = max_segments; 265 + q->limits.max_hw_segments = max_segments; 275 266 } 276 267 EXPORT_SYMBOL(blk_queue_max_hw_segments); 277 268 ··· 292 283 __func__, max_size); 293 284 } 294 285 295 - q->max_segment_size = max_size; 286 + q->limits.max_segment_size = max_size; 296 287 } 297 288 EXPORT_SYMBOL(blk_queue_max_segment_size); 298 289 299 290 /** 300 - * blk_queue_hardsect_size - set hardware sector size for the queue 291 + * blk_queue_logical_block_size - set logical block size for the queue 301 292 * @q: the request queue for the device 302 - * @size: the hardware sector size, in bytes 293 + * @size: the logical block size, in bytes 303 294 * 304 295 * Description: 305 - * This should typically be set to the lowest possible sector size 306 - * that the hardware can operate on (possible without reverting to 307 - * even internal read-modify-write operations). Usually the default 308 - * of 512 covers most hardware. 296 + * This should be set to the lowest possible block size that the 297 + * storage device can address. The default of 512 covers most 298 + * hardware. 309 299 **/ 310 - void blk_queue_hardsect_size(struct request_queue *q, unsigned short size) 300 + void blk_queue_logical_block_size(struct request_queue *q, unsigned short size) 311 301 { 312 - q->hardsect_size = size; 302 + q->limits.logical_block_size = size; 303 + 304 + if (q->limits.physical_block_size < size) 305 + q->limits.physical_block_size = size; 306 + 307 + if (q->limits.io_min < q->limits.physical_block_size) 308 + q->limits.io_min = q->limits.physical_block_size; 313 309 } 314 - EXPORT_SYMBOL(blk_queue_hardsect_size); 310 + EXPORT_SYMBOL(blk_queue_logical_block_size); 311 + 312 + /** 313 + * blk_queue_physical_block_size - set physical block size for the queue 314 + * @q: the request queue for the device 315 + * @size: the physical block size, in bytes 316 + * 317 + * Description: 318 + * This should be set to the lowest possible sector size that the 319 + * hardware can operate on without reverting to read-modify-write 320 + * operations. 321 + */ 322 + void blk_queue_physical_block_size(struct request_queue *q, unsigned short size) 323 + { 324 + q->limits.physical_block_size = size; 325 + 326 + if (q->limits.physical_block_size < q->limits.logical_block_size) 327 + q->limits.physical_block_size = q->limits.logical_block_size; 328 + 329 + if (q->limits.io_min < q->limits.physical_block_size) 330 + q->limits.io_min = q->limits.physical_block_size; 331 + } 332 + EXPORT_SYMBOL(blk_queue_physical_block_size); 333 + 334 + /** 335 + * blk_queue_alignment_offset - set physical block alignment offset 336 + * @q: the request queue for the device 337 + * @alignment: alignment offset in bytes 338 + * 339 + * Description: 340 + * Some devices are naturally misaligned to compensate for things like 341 + * the legacy DOS partition table 63-sector offset. Low-level drivers 342 + * should call this function for devices whose first sector is not 343 + * naturally aligned. 344 + */ 345 + void blk_queue_alignment_offset(struct request_queue *q, unsigned int offset) 346 + { 347 + q->limits.alignment_offset = 348 + offset & (q->limits.physical_block_size - 1); 349 + q->limits.misaligned = 0; 350 + } 351 + EXPORT_SYMBOL(blk_queue_alignment_offset); 352 + 353 + /** 354 + * blk_queue_io_min - set minimum request size for the queue 355 + * @q: the request queue for the device 356 + * @io_min: smallest I/O size in bytes 357 + * 358 + * Description: 359 + * Some devices have an internal block size bigger than the reported 360 + * hardware sector size. This function can be used to signal the 361 + * smallest I/O the device can perform without incurring a performance 362 + * penalty. 363 + */ 364 + void blk_queue_io_min(struct request_queue *q, unsigned int min) 365 + { 366 + q->limits.io_min = min; 367 + 368 + if (q->limits.io_min < q->limits.logical_block_size) 369 + q->limits.io_min = q->limits.logical_block_size; 370 + 371 + if (q->limits.io_min < q->limits.physical_block_size) 372 + q->limits.io_min = q->limits.physical_block_size; 373 + } 374 + EXPORT_SYMBOL(blk_queue_io_min); 375 + 376 + /** 377 + * blk_queue_io_opt - set optimal request size for the queue 378 + * @q: the request queue for the device 379 + * @io_opt: optimal request size in bytes 380 + * 381 + * Description: 382 + * Drivers can call this function to set the preferred I/O request 383 + * size for devices that report such a value. 384 + */ 385 + void blk_queue_io_opt(struct request_queue *q, unsigned int opt) 386 + { 387 + q->limits.io_opt = opt; 388 + } 389 + EXPORT_SYMBOL(blk_queue_io_opt); 315 390 316 391 /* 317 392 * Returns the minimum that is _not_ zero, unless both are zero. ··· 410 317 void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b) 411 318 { 412 319 /* zero is "infinity" */ 413 - t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors); 414 - t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors); 415 - t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask, b->seg_boundary_mask); 320 + t->limits.max_sectors = min_not_zero(queue_max_sectors(t), 321 + queue_max_sectors(b)); 416 322 417 - t->max_phys_segments = min_not_zero(t->max_phys_segments, b->max_phys_segments); 418 - t->max_hw_segments = min_not_zero(t->max_hw_segments, b->max_hw_segments); 419 - t->max_segment_size = min_not_zero(t->max_segment_size, b->max_segment_size); 420 - t->hardsect_size = max(t->hardsect_size, b->hardsect_size); 323 + t->limits.max_hw_sectors = min_not_zero(queue_max_hw_sectors(t), 324 + queue_max_hw_sectors(b)); 325 + 326 + t->limits.seg_boundary_mask = min_not_zero(queue_segment_boundary(t), 327 + queue_segment_boundary(b)); 328 + 329 + t->limits.max_phys_segments = min_not_zero(queue_max_phys_segments(t), 330 + queue_max_phys_segments(b)); 331 + 332 + t->limits.max_hw_segments = min_not_zero(queue_max_hw_segments(t), 333 + queue_max_hw_segments(b)); 334 + 335 + t->limits.max_segment_size = min_not_zero(queue_max_segment_size(t), 336 + queue_max_segment_size(b)); 337 + 338 + t->limits.logical_block_size = max(queue_logical_block_size(t), 339 + queue_logical_block_size(b)); 340 + 421 341 if (!t->queue_lock) 422 342 WARN_ON_ONCE(1); 423 343 else if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) { ··· 441 335 } 442 336 } 443 337 EXPORT_SYMBOL(blk_queue_stack_limits); 338 + 339 + /** 340 + * blk_stack_limits - adjust queue_limits for stacked devices 341 + * @t: the stacking driver limits (top) 342 + * @b: the underlying queue limits (bottom) 343 + * @offset: offset to beginning of data within component device 344 + * 345 + * Description: 346 + * Merges two queue_limit structs. Returns 0 if alignment didn't 347 + * change. Returns -1 if adding the bottom device caused 348 + * misalignment. 349 + */ 350 + int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, 351 + sector_t offset) 352 + { 353 + t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors); 354 + t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors); 355 + t->bounce_pfn = min_not_zero(t->bounce_pfn, b->bounce_pfn); 356 + 357 + t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask, 358 + b->seg_boundary_mask); 359 + 360 + t->max_phys_segments = min_not_zero(t->max_phys_segments, 361 + b->max_phys_segments); 362 + 363 + t->max_hw_segments = min_not_zero(t->max_hw_segments, 364 + b->max_hw_segments); 365 + 366 + t->max_segment_size = min_not_zero(t->max_segment_size, 367 + b->max_segment_size); 368 + 369 + t->logical_block_size = max(t->logical_block_size, 370 + b->logical_block_size); 371 + 372 + t->physical_block_size = max(t->physical_block_size, 373 + b->physical_block_size); 374 + 375 + t->io_min = max(t->io_min, b->io_min); 376 + t->no_cluster |= b->no_cluster; 377 + 378 + /* Bottom device offset aligned? */ 379 + if (offset && 380 + (offset & (b->physical_block_size - 1)) != b->alignment_offset) { 381 + t->misaligned = 1; 382 + return -1; 383 + } 384 + 385 + /* If top has no alignment offset, inherit from bottom */ 386 + if (!t->alignment_offset) 387 + t->alignment_offset = 388 + b->alignment_offset & (b->physical_block_size - 1); 389 + 390 + /* Top device aligned on logical block boundary? */ 391 + if (t->alignment_offset & (t->logical_block_size - 1)) { 392 + t->misaligned = 1; 393 + return -1; 394 + } 395 + 396 + return 0; 397 + } 398 + EXPORT_SYMBOL(blk_stack_limits); 399 + 400 + /** 401 + * disk_stack_limits - adjust queue limits for stacked drivers 402 + * @disk: MD/DM gendisk (top) 403 + * @bdev: the underlying block device (bottom) 404 + * @offset: offset to beginning of data within component device 405 + * 406 + * Description: 407 + * Merges the limits for two queues. Returns 0 if alignment 408 + * didn't change. Returns -1 if adding the bottom device caused 409 + * misalignment. 410 + */ 411 + void disk_stack_limits(struct gendisk *disk, struct block_device *bdev, 412 + sector_t offset) 413 + { 414 + struct request_queue *t = disk->queue; 415 + struct request_queue *b = bdev_get_queue(bdev); 416 + 417 + offset += get_start_sect(bdev) << 9; 418 + 419 + if (blk_stack_limits(&t->limits, &b->limits, offset) < 0) { 420 + char top[BDEVNAME_SIZE], bottom[BDEVNAME_SIZE]; 421 + 422 + disk_name(disk, 0, top); 423 + bdevname(bdev, bottom); 424 + 425 + printk(KERN_NOTICE "%s: Warning: Device %s is misaligned\n", 426 + top, bottom); 427 + } 428 + 429 + if (!t->queue_lock) 430 + WARN_ON_ONCE(1); 431 + else if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) { 432 + unsigned long flags; 433 + 434 + spin_lock_irqsave(t->queue_lock, flags); 435 + if (!test_bit(QUEUE_FLAG_CLUSTER, &b->queue_flags)) 436 + queue_flag_clear(QUEUE_FLAG_CLUSTER, t); 437 + spin_unlock_irqrestore(t->queue_lock, flags); 438 + } 439 + } 440 + EXPORT_SYMBOL(disk_stack_limits); 444 441 445 442 /** 446 443 * blk_queue_dma_pad - set pad mask ··· 605 396 dma_drain_needed_fn *dma_drain_needed, 606 397 void *buf, unsigned int size) 607 398 { 608 - if (q->max_hw_segments < 2 || q->max_phys_segments < 2) 399 + if (queue_max_hw_segments(q) < 2 || queue_max_phys_segments(q) < 2) 609 400 return -EINVAL; 610 401 /* make room for appending the drain */ 611 - --q->max_hw_segments; 612 - --q->max_phys_segments; 402 + blk_queue_max_hw_segments(q, queue_max_hw_segments(q) - 1); 403 + blk_queue_max_phys_segments(q, queue_max_phys_segments(q) - 1); 613 404 q->dma_drain_needed = dma_drain_needed; 614 405 q->dma_drain_buffer = buf; 615 406 q->dma_drain_size = size; ··· 631 422 __func__, mask); 632 423 } 633 424 634 - q->seg_boundary_mask = mask; 425 + q->limits.seg_boundary_mask = mask; 635 426 } 636 427 EXPORT_SYMBOL(blk_queue_segment_boundary); 637 428
+50 -12
block/blk-sysfs.c
··· 95 95 96 96 static ssize_t queue_max_sectors_show(struct request_queue *q, char *page) 97 97 { 98 - int max_sectors_kb = q->max_sectors >> 1; 98 + int max_sectors_kb = queue_max_sectors(q) >> 1; 99 99 100 100 return queue_var_show(max_sectors_kb, (page)); 101 101 } 102 102 103 - static ssize_t queue_hw_sector_size_show(struct request_queue *q, char *page) 103 + static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page) 104 104 { 105 - return queue_var_show(q->hardsect_size, page); 105 + return queue_var_show(queue_logical_block_size(q), page); 106 + } 107 + 108 + static ssize_t queue_physical_block_size_show(struct request_queue *q, char *page) 109 + { 110 + return queue_var_show(queue_physical_block_size(q), page); 111 + } 112 + 113 + static ssize_t queue_io_min_show(struct request_queue *q, char *page) 114 + { 115 + return queue_var_show(queue_io_min(q), page); 116 + } 117 + 118 + static ssize_t queue_io_opt_show(struct request_queue *q, char *page) 119 + { 120 + return queue_var_show(queue_io_opt(q), page); 106 121 } 107 122 108 123 static ssize_t 109 124 queue_max_sectors_store(struct request_queue *q, const char *page, size_t count) 110 125 { 111 126 unsigned long max_sectors_kb, 112 - max_hw_sectors_kb = q->max_hw_sectors >> 1, 127 + max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1, 113 128 page_kb = 1 << (PAGE_CACHE_SHIFT - 10); 114 129 ssize_t ret = queue_var_store(&max_sectors_kb, page, count); 115 130 ··· 132 117 return -EINVAL; 133 118 134 119 spin_lock_irq(q->queue_lock); 135 - q->max_sectors = max_sectors_kb << 1; 120 + blk_queue_max_sectors(q, max_sectors_kb << 1); 136 121 spin_unlock_irq(q->queue_lock); 137 122 138 123 return ret; ··· 140 125 141 126 static ssize_t queue_max_hw_sectors_show(struct request_queue *q, char *page) 142 127 { 143 - int max_hw_sectors_kb = q->max_hw_sectors >> 1; 128 + int max_hw_sectors_kb = queue_max_hw_sectors(q) >> 1; 144 129 145 130 return queue_var_show(max_hw_sectors_kb, (page)); 146 131 } ··· 264 249 265 250 static struct queue_sysfs_entry queue_hw_sector_size_entry = { 266 251 .attr = {.name = "hw_sector_size", .mode = S_IRUGO }, 267 - .show = queue_hw_sector_size_show, 252 + .show = queue_logical_block_size_show, 253 + }; 254 + 255 + static struct queue_sysfs_entry queue_logical_block_size_entry = { 256 + .attr = {.name = "logical_block_size", .mode = S_IRUGO }, 257 + .show = queue_logical_block_size_show, 258 + }; 259 + 260 + static struct queue_sysfs_entry queue_physical_block_size_entry = { 261 + .attr = {.name = "physical_block_size", .mode = S_IRUGO }, 262 + .show = queue_physical_block_size_show, 263 + }; 264 + 265 + static struct queue_sysfs_entry queue_io_min_entry = { 266 + .attr = {.name = "minimum_io_size", .mode = S_IRUGO }, 267 + .show = queue_io_min_show, 268 + }; 269 + 270 + static struct queue_sysfs_entry queue_io_opt_entry = { 271 + .attr = {.name = "optimal_io_size", .mode = S_IRUGO }, 272 + .show = queue_io_opt_show, 268 273 }; 269 274 270 275 static struct queue_sysfs_entry queue_nonrot_entry = { ··· 318 283 &queue_max_sectors_entry.attr, 319 284 &queue_iosched_entry.attr, 320 285 &queue_hw_sector_size_entry.attr, 286 + &queue_logical_block_size_entry.attr, 287 + &queue_physical_block_size_entry.attr, 288 + &queue_io_min_entry.attr, 289 + &queue_io_opt_entry.attr, 321 290 &queue_nonrot_entry.attr, 322 291 &queue_nomerges_entry.attr, 323 292 &queue_rq_affinity_entry.attr, ··· 433 394 if (ret) 434 395 return ret; 435 396 436 - if (!q->request_fn) 437 - return 0; 438 - 439 - ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), 440 - "%s", "queue"); 397 + ret = kobject_add(&q->kobj, kobject_get(&dev->kobj), "%s", "queue"); 441 398 if (ret < 0) 442 399 return ret; 443 400 444 401 kobject_uevent(&q->kobj, KOBJ_ADD); 402 + 403 + if (!q->request_fn) 404 + return 0; 445 405 446 406 ret = elv_register_queue(q); 447 407 if (ret) {
+10 -7
block/blk-tag.c
··· 336 336 int blk_queue_start_tag(struct request_queue *q, struct request *rq) 337 337 { 338 338 struct blk_queue_tag *bqt = q->queue_tags; 339 - unsigned max_depth, offset; 339 + unsigned max_depth; 340 340 int tag; 341 341 342 342 if (unlikely((rq->cmd_flags & REQ_QUEUED))) { ··· 355 355 * to starve sync IO on behalf of flooding async IO. 356 356 */ 357 357 max_depth = bqt->max_depth; 358 - if (rq_is_sync(rq)) 359 - offset = 0; 360 - else 361 - offset = max_depth >> 2; 358 + if (!rq_is_sync(rq) && max_depth > 1) { 359 + max_depth -= 2; 360 + if (!max_depth) 361 + max_depth = 1; 362 + if (q->in_flight[0] > max_depth) 363 + return 1; 364 + } 362 365 363 366 do { 364 - tag = find_next_zero_bit(bqt->tag_map, max_depth, offset); 367 + tag = find_first_zero_bit(bqt->tag_map, max_depth); 365 368 if (tag >= max_depth) 366 369 return 1; 367 370 ··· 377 374 rq->cmd_flags |= REQ_QUEUED; 378 375 rq->tag = tag; 379 376 bqt->tag_index[tag] = rq; 380 - blkdev_dequeue_request(rq); 377 + blk_start_request(rq); 381 378 list_add(&rq->queuelist, &q->tag_busy_list); 382 379 return 0; 383 380 }
+9 -13
block/blk-timeout.c
··· 122 122 if (blk_mark_rq_complete(rq)) 123 123 continue; 124 124 blk_rq_timed_out(rq); 125 - } else { 126 - if (!next || time_after(next, rq->deadline)) 127 - next = rq->deadline; 128 - } 125 + } else if (!next || time_after(next, rq->deadline)) 126 + next = rq->deadline; 129 127 } 130 128 131 129 /* ··· 174 176 BUG_ON(!list_empty(&req->timeout_list)); 175 177 BUG_ON(test_bit(REQ_ATOM_COMPLETE, &req->atomic_flags)); 176 178 177 - if (req->timeout) 178 - req->deadline = jiffies + req->timeout; 179 - else { 180 - req->deadline = jiffies + q->rq_timeout; 181 - /* 182 - * Some LLDs, like scsi, peek at the timeout to prevent 183 - * a command from being retried forever. 184 - */ 179 + /* 180 + * Some LLDs, like scsi, peek at the timeout to prevent a 181 + * command from being retried forever. 182 + */ 183 + if (!req->timeout) 185 184 req->timeout = q->rq_timeout; 186 - } 185 + 186 + req->deadline = jiffies + req->timeout; 187 187 list_add_tail(&req->timeout_list, &q->timeout_list); 188 188 189 189 /*
+49 -2
block/blk.h
··· 13 13 void init_request_from_bio(struct request *req, struct bio *bio); 14 14 void blk_rq_bio_prep(struct request_queue *q, struct request *rq, 15 15 struct bio *bio); 16 + int blk_rq_append_bio(struct request_queue *q, struct request *rq, 17 + struct bio *bio); 18 + void blk_dequeue_request(struct request *rq); 16 19 void __blk_queue_free_tags(struct request_queue *q); 17 20 18 21 void blk_unplug_work(struct work_struct *work); ··· 46 43 clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags); 47 44 } 48 45 46 + /* 47 + * Internal elevator interface 48 + */ 49 + #define ELV_ON_HASH(rq) (!hlist_unhashed(&(rq)->hash)) 50 + 51 + static inline struct request *__elv_next_request(struct request_queue *q) 52 + { 53 + struct request *rq; 54 + 55 + while (1) { 56 + while (!list_empty(&q->queue_head)) { 57 + rq = list_entry_rq(q->queue_head.next); 58 + if (blk_do_ordered(q, &rq)) 59 + return rq; 60 + } 61 + 62 + if (!q->elevator->ops->elevator_dispatch_fn(q, 0)) 63 + return NULL; 64 + } 65 + } 66 + 67 + static inline void elv_activate_rq(struct request_queue *q, struct request *rq) 68 + { 69 + struct elevator_queue *e = q->elevator; 70 + 71 + if (e->ops->elevator_activate_req_fn) 72 + e->ops->elevator_activate_req_fn(q, rq); 73 + } 74 + 75 + static inline void elv_deactivate_rq(struct request_queue *q, struct request *rq) 76 + { 77 + struct elevator_queue *e = q->elevator; 78 + 79 + if (e->ops->elevator_deactivate_req_fn) 80 + e->ops->elevator_deactivate_req_fn(q, rq); 81 + } 82 + 49 83 #ifdef CONFIG_FAIL_IO_TIMEOUT 50 84 int blk_should_fake_timeout(struct request_queue *); 51 85 ssize_t part_timeout_show(struct device *, struct device_attribute *, char *); ··· 104 64 int attempt_back_merge(struct request_queue *q, struct request *rq); 105 65 int attempt_front_merge(struct request_queue *q, struct request *rq); 106 66 void blk_recalc_rq_segments(struct request *rq); 107 - void blk_recalc_rq_sectors(struct request *rq, int nsect); 108 67 109 68 void blk_queue_congestion_threshold(struct request_queue *q); 110 69 ··· 151 112 #endif 152 113 } 153 114 115 + /* 116 + * Contribute to IO statistics IFF: 117 + * 118 + * a) it's attached to a gendisk, and 119 + * b) the queue had IO stats enabled when this request was started, and 120 + * c) it's a file system request or a discard request 121 + */ 154 122 static inline int blk_do_io_stat(struct request *rq) 155 123 { 156 - return rq->rq_disk && blk_rq_io_stat(rq); 124 + return rq->rq_disk && blk_rq_io_stat(rq) && 125 + (blk_fs_request(rq) || blk_discard_rq(rq)); 157 126 } 158 127 159 128 #endif
+4 -4
block/bsg.c
··· 446 446 } 447 447 448 448 if (rq->next_rq) { 449 - hdr->dout_resid = rq->data_len; 450 - hdr->din_resid = rq->next_rq->data_len; 449 + hdr->dout_resid = rq->resid_len; 450 + hdr->din_resid = rq->next_rq->resid_len; 451 451 blk_rq_unmap_user(bidi_bio); 452 452 rq->next_rq->bio = NULL; 453 453 blk_put_request(rq->next_rq); 454 454 } else if (rq_data_dir(rq) == READ) 455 - hdr->din_resid = rq->data_len; 455 + hdr->din_resid = rq->resid_len; 456 456 else 457 - hdr->dout_resid = rq->data_len; 457 + hdr->dout_resid = rq->resid_len; 458 458 459 459 /* 460 460 * If the request generated a negative error number, return it
+19 -19
block/cfq-iosched.c
··· 349 349 else if (rq_is_meta(rq2) && !rq_is_meta(rq1)) 350 350 return rq2; 351 351 352 - s1 = rq1->sector; 353 - s2 = rq2->sector; 352 + s1 = blk_rq_pos(rq1); 353 + s2 = blk_rq_pos(rq2); 354 354 355 355 last = cfqd->last_position; 356 356 ··· 579 579 * Sort strictly based on sector. Smallest to the left, 580 580 * largest to the right. 581 581 */ 582 - if (sector > cfqq->next_rq->sector) 582 + if (sector > blk_rq_pos(cfqq->next_rq)) 583 583 n = &(*p)->rb_right; 584 - else if (sector < cfqq->next_rq->sector) 584 + else if (sector < blk_rq_pos(cfqq->next_rq)) 585 585 n = &(*p)->rb_left; 586 586 else 587 587 break; ··· 611 611 return; 612 612 613 613 cfqq->p_root = &cfqd->prio_trees[cfqq->org_ioprio]; 614 - __cfqq = cfq_prio_tree_lookup(cfqd, cfqq->p_root, cfqq->next_rq->sector, 615 - &parent, &p); 614 + __cfqq = cfq_prio_tree_lookup(cfqd, cfqq->p_root, 615 + blk_rq_pos(cfqq->next_rq), &parent, &p); 616 616 if (!__cfqq) { 617 617 rb_link_node(&cfqq->p_node, parent, p); 618 618 rb_insert_color(&cfqq->p_node, cfqq->p_root); ··· 760 760 cfq_log_cfqq(cfqd, RQ_CFQQ(rq), "activate rq, drv=%d", 761 761 cfqd->rq_in_driver); 762 762 763 - cfqd->last_position = rq->hard_sector + rq->hard_nr_sectors; 763 + cfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq); 764 764 } 765 765 766 766 static void cfq_deactivate_request(struct request_queue *q, struct request *rq) ··· 949 949 static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd, 950 950 struct request *rq) 951 951 { 952 - if (rq->sector >= cfqd->last_position) 953 - return rq->sector - cfqd->last_position; 952 + if (blk_rq_pos(rq) >= cfqd->last_position) 953 + return blk_rq_pos(rq) - cfqd->last_position; 954 954 else 955 - return cfqd->last_position - rq->sector; 955 + return cfqd->last_position - blk_rq_pos(rq); 956 956 } 957 957 958 958 #define CIC_SEEK_THR 8 * 1024 ··· 996 996 if (cfq_rq_close(cfqd, __cfqq->next_rq)) 997 997 return __cfqq; 998 998 999 - if (__cfqq->next_rq->sector < sector) 999 + if (blk_rq_pos(__cfqq->next_rq) < sector) 1000 1000 node = rb_next(&__cfqq->p_node); 1001 1001 else 1002 1002 node = rb_prev(&__cfqq->p_node); ··· 1282 1282 if (!cfqd->active_cic) { 1283 1283 struct cfq_io_context *cic = RQ_CIC(rq); 1284 1284 1285 - atomic_inc(&cic->ioc->refcount); 1285 + atomic_long_inc(&cic->ioc->refcount); 1286 1286 cfqd->active_cic = cic; 1287 1287 } 1288 1288 } ··· 1918 1918 1919 1919 if (!cic->last_request_pos) 1920 1920 sdist = 0; 1921 - else if (cic->last_request_pos < rq->sector) 1922 - sdist = rq->sector - cic->last_request_pos; 1921 + else if (cic->last_request_pos < blk_rq_pos(rq)) 1922 + sdist = blk_rq_pos(rq) - cic->last_request_pos; 1923 1923 else 1924 - sdist = cic->last_request_pos - rq->sector; 1924 + sdist = cic->last_request_pos - blk_rq_pos(rq); 1925 1925 1926 1926 /* 1927 1927 * Don't allow the seek distance to get too large from the ··· 2071 2071 cfq_update_io_seektime(cfqd, cic, rq); 2072 2072 cfq_update_idle_window(cfqd, cfqq, cic); 2073 2073 2074 - cic->last_request_pos = rq->sector + rq->nr_sectors; 2074 + cic->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq); 2075 2075 2076 2076 if (cfqq == cfqd->active_queue) { 2077 2077 /* ··· 2088 2088 if (blk_rq_bytes(rq) > PAGE_CACHE_SIZE || 2089 2089 cfqd->busy_queues > 1) { 2090 2090 del_timer(&cfqd->idle_slice_timer); 2091 - blk_start_queueing(cfqd->queue); 2091 + __blk_run_queue(cfqd->queue); 2092 2092 } 2093 2093 cfq_mark_cfqq_must_dispatch(cfqq); 2094 2094 } ··· 2100 2100 * this new queue is RT and the current one is BE 2101 2101 */ 2102 2102 cfq_preempt_queue(cfqd, cfqq); 2103 - blk_start_queueing(cfqd->queue); 2103 + __blk_run_queue(cfqd->queue); 2104 2104 } 2105 2105 } 2106 2106 ··· 2345 2345 struct request_queue *q = cfqd->queue; 2346 2346 2347 2347 spin_lock_irq(q->queue_lock); 2348 - blk_start_queueing(q); 2348 + __blk_run_queue(cfqd->queue); 2349 2349 spin_unlock_irq(q->queue_lock); 2350 2350 } 2351 2351
+2 -2
block/compat_ioctl.c
··· 763 763 case BLKBSZGET_32: /* get the logical block size (cf. BLKSSZGET) */ 764 764 return compat_put_int(arg, block_size(bdev)); 765 765 case BLKSSZGET: /* get block device hardware sector size */ 766 - return compat_put_int(arg, bdev_hardsect_size(bdev)); 766 + return compat_put_int(arg, bdev_logical_block_size(bdev)); 767 767 case BLKSECTGET: 768 768 return compat_put_ushort(arg, 769 - bdev_get_queue(bdev)->max_sectors); 769 + queue_max_sectors(bdev_get_queue(bdev))); 770 770 case BLKRASET: /* compatible, but no compat_ptr (!) */ 771 771 case BLKFRASET: 772 772 if (!capable(CAP_SYS_ADMIN))
+1 -1
block/deadline-iosched.c
··· 138 138 139 139 __rq = elv_rb_find(&dd->sort_list[bio_data_dir(bio)], sector); 140 140 if (__rq) { 141 - BUG_ON(sector != __rq->sector); 141 + BUG_ON(sector != blk_rq_pos(__rq)); 142 142 143 143 if (elv_rq_merge_ok(__rq, bio)) { 144 144 ret = ELEVATOR_FRONT_MERGE;
+36 -149
block/elevator.c
··· 51 51 #define ELV_HASH_FN(sec) \ 52 52 (hash_long(ELV_HASH_BLOCK((sec)), elv_hash_shift)) 53 53 #define ELV_HASH_ENTRIES (1 << elv_hash_shift) 54 - #define rq_hash_key(rq) ((rq)->sector + (rq)->nr_sectors) 55 - #define ELV_ON_HASH(rq) (!hlist_unhashed(&(rq)->hash)) 54 + #define rq_hash_key(rq) (blk_rq_pos(rq) + blk_rq_sectors(rq)) 56 55 57 56 /* 58 57 * Query io scheduler to see if the current process issuing bio may be ··· 115 116 * we can merge and sequence is ok, check if it's possible 116 117 */ 117 118 if (elv_rq_merge_ok(__rq, bio)) { 118 - if (__rq->sector + __rq->nr_sectors == bio->bi_sector) 119 + if (blk_rq_pos(__rq) + blk_rq_sectors(__rq) == bio->bi_sector) 119 120 ret = ELEVATOR_BACK_MERGE; 120 - else if (__rq->sector - bio_sectors(bio) == bio->bi_sector) 121 + else if (blk_rq_pos(__rq) - bio_sectors(bio) == bio->bi_sector) 121 122 ret = ELEVATOR_FRONT_MERGE; 122 123 } 123 124 ··· 305 306 } 306 307 EXPORT_SYMBOL(elevator_exit); 307 308 308 - static void elv_activate_rq(struct request_queue *q, struct request *rq) 309 - { 310 - struct elevator_queue *e = q->elevator; 311 - 312 - if (e->ops->elevator_activate_req_fn) 313 - e->ops->elevator_activate_req_fn(q, rq); 314 - } 315 - 316 - static void elv_deactivate_rq(struct request_queue *q, struct request *rq) 317 - { 318 - struct elevator_queue *e = q->elevator; 319 - 320 - if (e->ops->elevator_deactivate_req_fn) 321 - e->ops->elevator_deactivate_req_fn(q, rq); 322 - } 323 - 324 309 static inline void __elv_rqhash_del(struct request *rq) 325 310 { 326 311 hlist_del_init(&rq->hash); ··· 366 383 parent = *p; 367 384 __rq = rb_entry(parent, struct request, rb_node); 368 385 369 - if (rq->sector < __rq->sector) 386 + if (blk_rq_pos(rq) < blk_rq_pos(__rq)) 370 387 p = &(*p)->rb_left; 371 - else if (rq->sector > __rq->sector) 388 + else if (blk_rq_pos(rq) > blk_rq_pos(__rq)) 372 389 p = &(*p)->rb_right; 373 390 else 374 391 return __rq; ··· 396 413 while (n) { 397 414 rq = rb_entry(n, struct request, rb_node); 398 415 399 - if (sector < rq->sector) 416 + if (sector < blk_rq_pos(rq)) 400 417 n = n->rb_left; 401 - else if (sector > rq->sector) 418 + else if (sector > blk_rq_pos(rq)) 402 419 n = n->rb_right; 403 420 else 404 421 return rq; ··· 437 454 break; 438 455 if (pos->cmd_flags & stop_flags) 439 456 break; 440 - if (rq->sector >= boundary) { 441 - if (pos->sector < boundary) 457 + if (blk_rq_pos(rq) >= boundary) { 458 + if (blk_rq_pos(pos) < boundary) 442 459 continue; 443 460 } else { 444 - if (pos->sector >= boundary) 461 + if (blk_rq_pos(pos) >= boundary) 445 462 break; 446 463 } 447 - if (rq->sector >= pos->sector) 464 + if (blk_rq_pos(rq) >= blk_rq_pos(pos)) 448 465 break; 449 466 } 450 467 ··· 542 559 * in_flight count again 543 560 */ 544 561 if (blk_account_rq(rq)) { 545 - q->in_flight--; 562 + q->in_flight[rq_is_sync(rq)]--; 546 563 if (blk_sorted_rq(rq)) 547 564 elv_deactivate_rq(q, rq); 548 565 } ··· 571 588 */ 572 589 void elv_quiesce_start(struct request_queue *q) 573 590 { 591 + if (!q->elevator) 592 + return; 593 + 574 594 queue_flag_set(QUEUE_FLAG_ELVSWITCH, q); 575 595 576 596 /* ··· 581 595 */ 582 596 elv_drain_elevator(q); 583 597 while (q->rq.elvpriv) { 584 - blk_start_queueing(q); 598 + __blk_run_queue(q); 585 599 spin_unlock_irq(q->queue_lock); 586 600 msleep(10); 587 601 spin_lock_irq(q->queue_lock); ··· 625 639 * with anything. There's no point in delaying queue 626 640 * processing. 627 641 */ 628 - blk_remove_plug(q); 629 - blk_start_queueing(q); 642 + __blk_run_queue(q); 630 643 break; 631 644 632 645 case ELEVATOR_INSERT_SORT: ··· 684 699 685 700 if (unplug_it && blk_queue_plugged(q)) { 686 701 int nrq = q->rq.count[BLK_RW_SYNC] + q->rq.count[BLK_RW_ASYNC] 687 - - q->in_flight; 702 + - queue_in_flight(q); 688 703 689 704 if (nrq >= q->unplug_thresh) 690 705 __generic_unplug_device(q); ··· 739 754 spin_unlock_irqrestore(q->queue_lock, flags); 740 755 } 741 756 EXPORT_SYMBOL(elv_add_request); 742 - 743 - static inline struct request *__elv_next_request(struct request_queue *q) 744 - { 745 - struct request *rq; 746 - 747 - while (1) { 748 - while (!list_empty(&q->queue_head)) { 749 - rq = list_entry_rq(q->queue_head.next); 750 - if (blk_do_ordered(q, &rq)) 751 - return rq; 752 - } 753 - 754 - if (!q->elevator->ops->elevator_dispatch_fn(q, 0)) 755 - return NULL; 756 - } 757 - } 758 - 759 - struct request *elv_next_request(struct request_queue *q) 760 - { 761 - struct request *rq; 762 - int ret; 763 - 764 - while ((rq = __elv_next_request(q)) != NULL) { 765 - if (!(rq->cmd_flags & REQ_STARTED)) { 766 - /* 767 - * This is the first time the device driver 768 - * sees this request (possibly after 769 - * requeueing). Notify IO scheduler. 770 - */ 771 - if (blk_sorted_rq(rq)) 772 - elv_activate_rq(q, rq); 773 - 774 - /* 775 - * just mark as started even if we don't start 776 - * it, a request that has been delayed should 777 - * not be passed by new incoming requests 778 - */ 779 - rq->cmd_flags |= REQ_STARTED; 780 - trace_block_rq_issue(q, rq); 781 - } 782 - 783 - if (!q->boundary_rq || q->boundary_rq == rq) { 784 - q->end_sector = rq_end_sector(rq); 785 - q->boundary_rq = NULL; 786 - } 787 - 788 - if (rq->cmd_flags & REQ_DONTPREP) 789 - break; 790 - 791 - if (q->dma_drain_size && rq->data_len) { 792 - /* 793 - * make sure space for the drain appears we 794 - * know we can do this because max_hw_segments 795 - * has been adjusted to be one fewer than the 796 - * device can handle 797 - */ 798 - rq->nr_phys_segments++; 799 - } 800 - 801 - if (!q->prep_rq_fn) 802 - break; 803 - 804 - ret = q->prep_rq_fn(q, rq); 805 - if (ret == BLKPREP_OK) { 806 - break; 807 - } else if (ret == BLKPREP_DEFER) { 808 - /* 809 - * the request may have been (partially) prepped. 810 - * we need to keep this request in the front to 811 - * avoid resource deadlock. REQ_STARTED will 812 - * prevent other fs requests from passing this one. 813 - */ 814 - if (q->dma_drain_size && rq->data_len && 815 - !(rq->cmd_flags & REQ_DONTPREP)) { 816 - /* 817 - * remove the space for the drain we added 818 - * so that we don't add it again 819 - */ 820 - --rq->nr_phys_segments; 821 - } 822 - 823 - rq = NULL; 824 - break; 825 - } else if (ret == BLKPREP_KILL) { 826 - rq->cmd_flags |= REQ_QUIET; 827 - __blk_end_request(rq, -EIO, blk_rq_bytes(rq)); 828 - } else { 829 - printk(KERN_ERR "%s: bad return=%d\n", __func__, ret); 830 - break; 831 - } 832 - } 833 - 834 - return rq; 835 - } 836 - EXPORT_SYMBOL(elv_next_request); 837 - 838 - void elv_dequeue_request(struct request_queue *q, struct request *rq) 839 - { 840 - BUG_ON(list_empty(&rq->queuelist)); 841 - BUG_ON(ELV_ON_HASH(rq)); 842 - 843 - list_del_init(&rq->queuelist); 844 - 845 - /* 846 - * the time frame between a request being removed from the lists 847 - * and to it is freed is accounted as io that is in progress at 848 - * the driver side. 849 - */ 850 - if (blk_account_rq(rq)) 851 - q->in_flight++; 852 - } 853 757 854 758 int elv_queue_empty(struct request_queue *q) 855 759 { ··· 809 935 rq = list_entry_rq(q->queue_head.next); 810 936 rq->cmd_flags |= REQ_QUIET; 811 937 trace_block_rq_abort(q, rq); 812 - __blk_end_request(rq, -EIO, blk_rq_bytes(rq)); 938 + /* 939 + * Mark this request as started so we don't trigger 940 + * any debug logic in the end I/O path. 941 + */ 942 + blk_start_request(rq); 943 + __blk_end_request_all(rq, -EIO); 813 944 } 814 945 } 815 946 EXPORT_SYMBOL(elv_abort_queue); ··· 827 948 * request is released from the driver, io must be done 828 949 */ 829 950 if (blk_account_rq(rq)) { 830 - q->in_flight--; 951 + q->in_flight[rq_is_sync(rq)]--; 831 952 if (blk_sorted_rq(rq) && e->ops->elevator_completed_req_fn) 832 953 e->ops->elevator_completed_req_fn(q, rq); 833 954 } ··· 842 963 if (!list_empty(&q->queue_head)) 843 964 next = list_entry_rq(q->queue_head.next); 844 965 845 - if (!q->in_flight && 966 + if (!queue_in_flight(q) && 846 967 blk_ordered_cur_seq(q) == QUEUE_ORDSEQ_DRAIN && 847 968 (!next || blk_ordered_req_seq(next) > QUEUE_ORDSEQ_DRAIN)) { 848 969 blk_ordered_complete_seq(q, QUEUE_ORDSEQ_DRAIN, 0); 849 - blk_start_queueing(q); 970 + __blk_run_queue(q); 850 971 } 851 972 } 852 973 } ··· 1054 1175 char elevator_name[ELV_NAME_MAX]; 1055 1176 struct elevator_type *e; 1056 1177 1178 + if (!q->elevator) 1179 + return count; 1180 + 1057 1181 strlcpy(elevator_name, name, sizeof(elevator_name)); 1058 1182 strstrip(elevator_name); 1059 1183 ··· 1080 1198 ssize_t elv_iosched_show(struct request_queue *q, char *name) 1081 1199 { 1082 1200 struct elevator_queue *e = q->elevator; 1083 - struct elevator_type *elv = e->elevator_type; 1201 + struct elevator_type *elv; 1084 1202 struct elevator_type *__e; 1085 1203 int len = 0; 1204 + 1205 + if (!q->elevator) 1206 + return sprintf(name, "none\n"); 1207 + 1208 + elv = e->elevator_type; 1086 1209 1087 1210 spin_lock(&elv_list_lock); 1088 1211 list_for_each_entry(__e, &elv_list, list) {
+11
block/genhd.c
··· 852 852 return sprintf(buf, "%x\n", disk->flags); 853 853 } 854 854 855 + static ssize_t disk_alignment_offset_show(struct device *dev, 856 + struct device_attribute *attr, 857 + char *buf) 858 + { 859 + struct gendisk *disk = dev_to_disk(dev); 860 + 861 + return sprintf(buf, "%d\n", queue_alignment_offset(disk->queue)); 862 + } 863 + 855 864 static DEVICE_ATTR(range, S_IRUGO, disk_range_show, NULL); 856 865 static DEVICE_ATTR(ext_range, S_IRUGO, disk_ext_range_show, NULL); 857 866 static DEVICE_ATTR(removable, S_IRUGO, disk_removable_show, NULL); 858 867 static DEVICE_ATTR(ro, S_IRUGO, disk_ro_show, NULL); 859 868 static DEVICE_ATTR(size, S_IRUGO, part_size_show, NULL); 869 + static DEVICE_ATTR(alignment_offset, S_IRUGO, disk_alignment_offset_show, NULL); 860 870 static DEVICE_ATTR(capability, S_IRUGO, disk_capability_show, NULL); 861 871 static DEVICE_ATTR(stat, S_IRUGO, part_stat_show, NULL); 862 872 #ifdef CONFIG_FAIL_MAKE_REQUEST ··· 885 875 &dev_attr_removable.attr, 886 876 &dev_attr_ro.attr, 887 877 &dev_attr_size.attr, 878 + &dev_attr_alignment_offset.attr, 888 879 &dev_attr_capability.attr, 889 880 &dev_attr_stat.attr, 890 881 #ifdef CONFIG_FAIL_MAKE_REQUEST
+6 -6
block/ioctl.c
··· 152 152 bio->bi_private = &wait; 153 153 bio->bi_sector = start; 154 154 155 - if (len > q->max_hw_sectors) { 156 - bio->bi_size = q->max_hw_sectors << 9; 157 - len -= q->max_hw_sectors; 158 - start += q->max_hw_sectors; 155 + if (len > queue_max_hw_sectors(q)) { 156 + bio->bi_size = queue_max_hw_sectors(q) << 9; 157 + len -= queue_max_hw_sectors(q); 158 + start += queue_max_hw_sectors(q); 159 159 } else { 160 160 bio->bi_size = len << 9; 161 161 len = 0; ··· 311 311 case BLKBSZGET: /* get the logical block size (cf. BLKSSZGET) */ 312 312 return put_int(arg, block_size(bdev)); 313 313 case BLKSSZGET: /* get block device hardware sector size */ 314 - return put_int(arg, bdev_hardsect_size(bdev)); 314 + return put_int(arg, bdev_logical_block_size(bdev)); 315 315 case BLKSECTGET: 316 - return put_ushort(arg, bdev_get_queue(bdev)->max_sectors); 316 + return put_ushort(arg, queue_max_sectors(bdev_get_queue(bdev))); 317 317 case BLKRASET: 318 318 case BLKFRASET: 319 319 if(!capable(CAP_SYS_ADMIN))
+5 -8
block/scsi_ioctl.c
··· 75 75 76 76 static int sg_get_reserved_size(struct request_queue *q, int __user *p) 77 77 { 78 - unsigned val = min(q->sg_reserved_size, q->max_sectors << 9); 78 + unsigned val = min(q->sg_reserved_size, queue_max_sectors(q) << 9); 79 79 80 80 return put_user(val, p); 81 81 } ··· 89 89 90 90 if (size < 0) 91 91 return -EINVAL; 92 - if (size > (q->max_sectors << 9)) 93 - size = q->max_sectors << 9; 92 + if (size > (queue_max_sectors(q) << 9)) 93 + size = queue_max_sectors(q) << 9; 94 94 95 95 q->sg_reserved_size = size; 96 96 return 0; ··· 230 230 hdr->info = 0; 231 231 if (hdr->masked_status || hdr->host_status || hdr->driver_status) 232 232 hdr->info |= SG_INFO_CHECK; 233 - hdr->resid = rq->data_len; 233 + hdr->resid = rq->resid_len; 234 234 hdr->sb_len_wr = 0; 235 235 236 236 if (rq->sense_len && hdr->sbp) { ··· 264 264 if (hdr->cmd_len > BLK_MAX_CDB) 265 265 return -EINVAL; 266 266 267 - if (hdr->dxfer_len > (q->max_hw_sectors << 9)) 267 + if (hdr->dxfer_len > (queue_max_hw_sectors(q) << 9)) 268 268 return -EIO; 269 269 270 270 if (hdr->dxfer_len) ··· 500 500 501 501 rq = blk_get_request(q, WRITE, __GFP_WAIT); 502 502 rq->cmd_type = REQ_TYPE_BLOCK_PC; 503 - rq->data = NULL; 504 - rq->data_len = 0; 505 - rq->extra_len = 0; 506 503 rq->timeout = BLK_DEFAULT_SG_TIMEOUT; 507 504 rq->cmd[0] = cmd; 508 505 rq->cmd[4] = data;
+1 -1
drivers/ata/libata-scsi.c
··· 1084 1084 if (likely(!blk_pc_request(rq))) 1085 1085 return 0; 1086 1086 1087 - if (!rq->data_len || (rq->cmd_flags & REQ_RW)) 1087 + if (!blk_rq_bytes(rq) || (rq->cmd_flags & REQ_RW)) 1088 1088 return 0; 1089 1089 1090 1090 return atapi_cmd_type(rq->cmd[0]) == ATAPI_MISC;
+5 -5
drivers/block/DAC960.c
··· 3321 3321 DAC960_Command_T *Command; 3322 3322 3323 3323 while(1) { 3324 - Request = elv_next_request(req_q); 3324 + Request = blk_peek_request(req_q); 3325 3325 if (!Request) 3326 3326 return 1; 3327 3327 ··· 3338 3338 } 3339 3339 Command->Completion = Request->end_io_data; 3340 3340 Command->LogicalDriveNumber = (long)Request->rq_disk->private_data; 3341 - Command->BlockNumber = Request->sector; 3342 - Command->BlockCount = Request->nr_sectors; 3341 + Command->BlockNumber = blk_rq_pos(Request); 3342 + Command->BlockCount = blk_rq_sectors(Request); 3343 3343 Command->Request = Request; 3344 - blkdev_dequeue_request(Request); 3344 + blk_start_request(Request); 3345 3345 Command->SegmentCount = blk_rq_map_sg(req_q, 3346 3346 Command->Request, Command->cmd_sglist); 3347 3347 /* pci_map_sg MAY change the value of SegCount */ ··· 3431 3431 * successfully as possible. 3432 3432 */ 3433 3433 Command->SegmentCount = 1; 3434 - Command->BlockNumber = Request->sector; 3434 + Command->BlockNumber = blk_rq_pos(Request); 3435 3435 Command->BlockCount = 1; 3436 3436 DAC960_QueueReadWriteCommand(Command); 3437 3437 return;
+1 -1
drivers/block/Kconfig
··· 412 412 413 413 config MG_DISK 414 414 tristate "mGine mflash, gflash support" 415 - depends on ARM && ATA && GPIOLIB 415 + depends on ARM && GPIOLIB 416 416 help 417 417 mGine mFlash(gFlash) block device driver 418 418
+23 -31
drivers/block/amiflop.c
··· 112 112 MODULE_LICENSE("GPL"); 113 113 114 114 static struct request_queue *floppy_queue; 115 - #define QUEUE (floppy_queue) 116 - #define CURRENT elv_next_request(floppy_queue) 117 115 118 116 /* 119 117 * Macros ··· 1333 1335 1334 1336 static void redo_fd_request(void) 1335 1337 { 1338 + struct request *rq; 1336 1339 unsigned int cnt, block, track, sector; 1337 1340 int drive; 1338 1341 struct amiga_floppy_struct *floppy; 1339 1342 char *data; 1340 1343 unsigned long flags; 1344 + int err; 1341 1345 1342 - repeat: 1343 - if (!CURRENT) { 1346 + next_req: 1347 + rq = blk_fetch_request(floppy_queue); 1348 + if (!rq) { 1344 1349 /* Nothing left to do */ 1345 1350 return; 1346 1351 } 1347 1352 1348 - floppy = CURRENT->rq_disk->private_data; 1353 + floppy = rq->rq_disk->private_data; 1349 1354 drive = floppy - unit; 1350 1355 1356 + next_segment: 1351 1357 /* Here someone could investigate to be more efficient */ 1352 - for (cnt = 0; cnt < CURRENT->current_nr_sectors; cnt++) { 1358 + for (cnt = 0, err = 0; cnt < blk_rq_cur_sectors(rq); cnt++) { 1353 1359 #ifdef DEBUG 1354 1360 printk("fd: sector %ld + %d requested for %s\n", 1355 - CURRENT->sector,cnt, 1356 - (rq_data_dir(CURRENT) == READ) ? "read" : "write"); 1361 + blk_rq_pos(rq), cnt, 1362 + (rq_data_dir(rq) == READ) ? "read" : "write"); 1357 1363 #endif 1358 - block = CURRENT->sector + cnt; 1364 + block = blk_rq_pos(rq) + cnt; 1359 1365 if ((int)block > floppy->blocks) { 1360 - end_request(CURRENT, 0); 1361 - goto repeat; 1366 + err = -EIO; 1367 + break; 1362 1368 } 1363 1369 1364 1370 track = block / (floppy->dtype->sects * floppy->type->sect_mult); 1365 1371 sector = block % (floppy->dtype->sects * floppy->type->sect_mult); 1366 - data = CURRENT->buffer + 512 * cnt; 1372 + data = rq->buffer + 512 * cnt; 1367 1373 #ifdef DEBUG 1368 1374 printk("access to track %d, sector %d, with buffer at " 1369 1375 "0x%08lx\n", track, sector, data); 1370 1376 #endif 1371 1377 1372 - if ((rq_data_dir(CURRENT) != READ) && (rq_data_dir(CURRENT) != WRITE)) { 1373 - printk(KERN_WARNING "do_fd_request: unknown command\n"); 1374 - end_request(CURRENT, 0); 1375 - goto repeat; 1376 - } 1377 1378 if (get_track(drive, track) == -1) { 1378 - end_request(CURRENT, 0); 1379 - goto repeat; 1379 + err = -EIO; 1380 + break; 1380 1381 } 1381 1382 1382 - switch (rq_data_dir(CURRENT)) { 1383 - case READ: 1383 + if (rq_data_dir(rq) == READ) { 1384 1384 memcpy(data, floppy->trackbuf + sector * 512, 512); 1385 - break; 1386 - 1387 - case WRITE: 1385 + } else { 1388 1386 memcpy(floppy->trackbuf + sector * 512, data, 512); 1389 1387 1390 1388 /* keep the drive spinning while writes are scheduled */ 1391 1389 if (!fd_motor_on(drive)) { 1392 - end_request(CURRENT, 0); 1393 - goto repeat; 1390 + err = -EIO; 1391 + break; 1394 1392 } 1395 1393 /* 1396 1394 * setup a callback to write the track buffer ··· 1398 1404 /* reset the timer */ 1399 1405 mod_timer (flush_track_timer + drive, jiffies + 1); 1400 1406 local_irq_restore(flags); 1401 - break; 1402 1407 } 1403 1408 } 1404 - CURRENT->nr_sectors -= CURRENT->current_nr_sectors; 1405 - CURRENT->sector += CURRENT->current_nr_sectors; 1406 1409 1407 - end_request(CURRENT, 1); 1408 - goto repeat; 1410 + if (__blk_end_request_cur(rq, err)) 1411 + goto next_segment; 1412 + goto next_req; 1409 1413 } 1410 1414 1411 1415 static void do_fd_request(struct request_queue * q)
+34 -32
drivers/block/ataflop.c
··· 79 79 #undef DEBUG 80 80 81 81 static struct request_queue *floppy_queue; 82 - 83 - #define QUEUE (floppy_queue) 84 - #define CURRENT elv_next_request(floppy_queue) 82 + static struct request *fd_request; 85 83 86 84 /* Disk types: DD, HD, ED */ 87 85 static struct atari_disk_type { ··· 374 376 static DEFINE_TIMER(timeout_timer, fd_times_out, 0, 0); 375 377 static DEFINE_TIMER(fd_timer, check_change, 0, 0); 376 378 379 + static void fd_end_request_cur(int err) 380 + { 381 + if (!__blk_end_request_cur(fd_request, err)) 382 + fd_request = NULL; 383 + } 384 + 377 385 static inline void start_motor_off_timer(void) 378 386 { 379 387 mod_timer(&motor_off_timer, jiffies + FD_MOTOR_OFF_DELAY); ··· 610 606 return; 611 607 } 612 608 613 - if (!CURRENT) 609 + if (!fd_request) 614 610 return; 615 611 616 - CURRENT->errors++; 617 - if (CURRENT->errors >= MAX_ERRORS) { 612 + fd_request->errors++; 613 + if (fd_request->errors >= MAX_ERRORS) { 618 614 printk(KERN_ERR "fd%d: too many errors.\n", SelectedDrive ); 619 - end_request(CURRENT, 0); 615 + fd_end_request_cur(-EIO); 620 616 } 621 - else if (CURRENT->errors == RECALIBRATE_ERRORS) { 617 + else if (fd_request->errors == RECALIBRATE_ERRORS) { 622 618 printk(KERN_WARNING "fd%d: recalibrating\n", SelectedDrive ); 623 619 if (SelectedDrive != -1) 624 620 SUD.track = -1; ··· 729 725 if (IS_BUFFERED( drive, ReqSide, ReqTrack )) { 730 726 if (ReqCmd == READ) { 731 727 copy_buffer( SECTOR_BUFFER(ReqSector), ReqData ); 732 - if (++ReqCnt < CURRENT->current_nr_sectors) { 728 + if (++ReqCnt < blk_rq_cur_sectors(fd_request)) { 733 729 /* read next sector */ 734 730 setup_req_params( drive ); 735 731 goto repeat; 736 732 } 737 733 else { 738 734 /* all sectors finished */ 739 - CURRENT->nr_sectors -= CURRENT->current_nr_sectors; 740 - CURRENT->sector += CURRENT->current_nr_sectors; 741 - end_request(CURRENT, 1); 735 + fd_end_request_cur(0); 742 736 redo_fd_request(); 743 737 return; 744 738 } ··· 1134 1132 } 1135 1133 } 1136 1134 1137 - if (++ReqCnt < CURRENT->current_nr_sectors) { 1135 + if (++ReqCnt < blk_rq_cur_sectors(fd_request)) { 1138 1136 /* read next sector */ 1139 1137 setup_req_params( SelectedDrive ); 1140 1138 do_fd_action( SelectedDrive ); 1141 1139 } 1142 1140 else { 1143 1141 /* all sectors finished */ 1144 - CURRENT->nr_sectors -= CURRENT->current_nr_sectors; 1145 - CURRENT->sector += CURRENT->current_nr_sectors; 1146 - end_request(CURRENT, 1); 1142 + fd_end_request_cur(0); 1147 1143 redo_fd_request(); 1148 1144 } 1149 1145 return; ··· 1382 1382 ReqData = ReqBuffer + 512 * ReqCnt; 1383 1383 1384 1384 if (UseTrackbuffer) 1385 - read_track = (ReqCmd == READ && CURRENT->errors == 0); 1385 + read_track = (ReqCmd == READ && fd_request->errors == 0); 1386 1386 else 1387 1387 read_track = 0; 1388 1388 ··· 1396 1396 int drive, type; 1397 1397 struct atari_floppy_struct *floppy; 1398 1398 1399 - DPRINT(("redo_fd_request: CURRENT=%p dev=%s CURRENT->sector=%ld\n", 1400 - CURRENT, CURRENT ? CURRENT->rq_disk->disk_name : "", 1401 - CURRENT ? CURRENT->sector : 0 )); 1399 + DPRINT(("redo_fd_request: fd_request=%p dev=%s fd_request->sector=%ld\n", 1400 + fd_request, fd_request ? fd_request->rq_disk->disk_name : "", 1401 + fd_request ? blk_rq_pos(fd_request) : 0 )); 1402 1402 1403 1403 IsFormatting = 0; 1404 1404 1405 1405 repeat: 1406 + if (!fd_request) { 1407 + fd_request = blk_fetch_request(floppy_queue); 1408 + if (!fd_request) 1409 + goto the_end; 1410 + } 1406 1411 1407 - if (!CURRENT) 1408 - goto the_end; 1409 - 1410 - floppy = CURRENT->rq_disk->private_data; 1412 + floppy = fd_request->rq_disk->private_data; 1411 1413 drive = floppy - unit; 1412 1414 type = floppy->type; 1413 1415 1414 1416 if (!UD.connected) { 1415 1417 /* drive not connected */ 1416 1418 printk(KERN_ERR "Unknown Device: fd%d\n", drive ); 1417 - end_request(CURRENT, 0); 1419 + fd_end_request_cur(-EIO); 1418 1420 goto repeat; 1419 1421 } 1420 1422 ··· 1432 1430 /* user supplied disk type */ 1433 1431 if (--type >= NUM_DISK_MINORS) { 1434 1432 printk(KERN_WARNING "fd%d: invalid disk format", drive ); 1435 - end_request(CURRENT, 0); 1433 + fd_end_request_cur(-EIO); 1436 1434 goto repeat; 1437 1435 } 1438 1436 if (minor2disktype[type].drive_types > DriveType) { 1439 1437 printk(KERN_WARNING "fd%d: unsupported disk format", drive ); 1440 - end_request(CURRENT, 0); 1438 + fd_end_request_cur(-EIO); 1441 1439 goto repeat; 1442 1440 } 1443 1441 type = minor2disktype[type].index; ··· 1446 1444 UD.autoprobe = 0; 1447 1445 } 1448 1446 1449 - if (CURRENT->sector + 1 > UDT->blocks) { 1450 - end_request(CURRENT, 0); 1447 + if (blk_rq_pos(fd_request) + 1 > UDT->blocks) { 1448 + fd_end_request_cur(-EIO); 1451 1449 goto repeat; 1452 1450 } 1453 1451 ··· 1455 1453 del_timer( &motor_off_timer ); 1456 1454 1457 1455 ReqCnt = 0; 1458 - ReqCmd = rq_data_dir(CURRENT); 1459 - ReqBlock = CURRENT->sector; 1460 - ReqBuffer = CURRENT->buffer; 1456 + ReqCmd = rq_data_dir(fd_request); 1457 + ReqBlock = blk_rq_pos(fd_request); 1458 + ReqBuffer = fd_request->buffer; 1461 1459 setup_req_params( drive ); 1462 1460 do_fd_action( drive ); 1463 1461
+1 -6
drivers/block/brd.c
··· 407 407 rd_size = simple_strtol(str, NULL, 0); 408 408 return 1; 409 409 } 410 - static int __init ramdisk_size2(char *str) 411 - { 412 - return ramdisk_size(str); 413 - } 414 - __setup("ramdisk=", ramdisk_size); 415 - __setup("ramdisk_size=", ramdisk_size2); 410 + __setup("ramdisk_size=", ramdisk_size); 416 411 #endif 417 412 418 413 /*
+561 -368
drivers/block/cciss.c
··· 180 180 __u32); 181 181 static void start_io(ctlr_info_t *h); 182 182 static int sendcmd(__u8 cmd, int ctlr, void *buff, size_t size, 183 - unsigned int use_unit_num, unsigned int log_unit, 184 183 __u8 page_code, unsigned char *scsi3addr, int cmd_type); 185 184 static int sendcmd_withirq(__u8 cmd, int ctlr, void *buff, size_t size, 186 - unsigned int use_unit_num, unsigned int log_unit, 187 - __u8 page_code, int cmd_type); 185 + __u8 page_code, unsigned char scsi3addr[], 186 + int cmd_type); 187 + static int sendcmd_withirq_core(ctlr_info_t *h, CommandList_struct *c, 188 + int attempt_retry); 189 + static int process_sendcmd_error(ctlr_info_t *h, CommandList_struct *c); 188 190 189 191 static void fail_all_cmds(unsigned long ctlr); 190 192 static int scan_thread(void *data); ··· 438 436 &cciss_proc_fops, hba[i]); 439 437 } 440 438 #endif /* CONFIG_PROC_FS */ 439 + 440 + #define MAX_PRODUCT_NAME_LEN 19 441 + 442 + #define to_hba(n) container_of(n, struct ctlr_info, dev) 443 + #define to_drv(n) container_of(n, drive_info_struct, dev) 444 + 445 + static struct device_type cciss_host_type = { 446 + .name = "cciss_host", 447 + }; 448 + 449 + static ssize_t dev_show_unique_id(struct device *dev, 450 + struct device_attribute *attr, 451 + char *buf) 452 + { 453 + drive_info_struct *drv = to_drv(dev); 454 + struct ctlr_info *h = to_hba(drv->dev.parent); 455 + __u8 sn[16]; 456 + unsigned long flags; 457 + int ret = 0; 458 + 459 + spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags); 460 + if (h->busy_configuring) 461 + ret = -EBUSY; 462 + else 463 + memcpy(sn, drv->serial_no, sizeof(sn)); 464 + spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); 465 + 466 + if (ret) 467 + return ret; 468 + else 469 + return snprintf(buf, 16 * 2 + 2, 470 + "%02X%02X%02X%02X%02X%02X%02X%02X" 471 + "%02X%02X%02X%02X%02X%02X%02X%02X\n", 472 + sn[0], sn[1], sn[2], sn[3], 473 + sn[4], sn[5], sn[6], sn[7], 474 + sn[8], sn[9], sn[10], sn[11], 475 + sn[12], sn[13], sn[14], sn[15]); 476 + } 477 + DEVICE_ATTR(unique_id, S_IRUGO, dev_show_unique_id, NULL); 478 + 479 + static ssize_t dev_show_vendor(struct device *dev, 480 + struct device_attribute *attr, 481 + char *buf) 482 + { 483 + drive_info_struct *drv = to_drv(dev); 484 + struct ctlr_info *h = to_hba(drv->dev.parent); 485 + char vendor[VENDOR_LEN + 1]; 486 + unsigned long flags; 487 + int ret = 0; 488 + 489 + spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags); 490 + if (h->busy_configuring) 491 + ret = -EBUSY; 492 + else 493 + memcpy(vendor, drv->vendor, VENDOR_LEN + 1); 494 + spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); 495 + 496 + if (ret) 497 + return ret; 498 + else 499 + return snprintf(buf, sizeof(vendor) + 1, "%s\n", drv->vendor); 500 + } 501 + DEVICE_ATTR(vendor, S_IRUGO, dev_show_vendor, NULL); 502 + 503 + static ssize_t dev_show_model(struct device *dev, 504 + struct device_attribute *attr, 505 + char *buf) 506 + { 507 + drive_info_struct *drv = to_drv(dev); 508 + struct ctlr_info *h = to_hba(drv->dev.parent); 509 + char model[MODEL_LEN + 1]; 510 + unsigned long flags; 511 + int ret = 0; 512 + 513 + spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags); 514 + if (h->busy_configuring) 515 + ret = -EBUSY; 516 + else 517 + memcpy(model, drv->model, MODEL_LEN + 1); 518 + spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); 519 + 520 + if (ret) 521 + return ret; 522 + else 523 + return snprintf(buf, sizeof(model) + 1, "%s\n", drv->model); 524 + } 525 + DEVICE_ATTR(model, S_IRUGO, dev_show_model, NULL); 526 + 527 + static ssize_t dev_show_rev(struct device *dev, 528 + struct device_attribute *attr, 529 + char *buf) 530 + { 531 + drive_info_struct *drv = to_drv(dev); 532 + struct ctlr_info *h = to_hba(drv->dev.parent); 533 + char rev[REV_LEN + 1]; 534 + unsigned long flags; 535 + int ret = 0; 536 + 537 + spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags); 538 + if (h->busy_configuring) 539 + ret = -EBUSY; 540 + else 541 + memcpy(rev, drv->rev, REV_LEN + 1); 542 + spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); 543 + 544 + if (ret) 545 + return ret; 546 + else 547 + return snprintf(buf, sizeof(rev) + 1, "%s\n", drv->rev); 548 + } 549 + DEVICE_ATTR(rev, S_IRUGO, dev_show_rev, NULL); 550 + 551 + static struct attribute *cciss_dev_attrs[] = { 552 + &dev_attr_unique_id.attr, 553 + &dev_attr_model.attr, 554 + &dev_attr_vendor.attr, 555 + &dev_attr_rev.attr, 556 + NULL 557 + }; 558 + 559 + static struct attribute_group cciss_dev_attr_group = { 560 + .attrs = cciss_dev_attrs, 561 + }; 562 + 563 + static struct attribute_group *cciss_dev_attr_groups[] = { 564 + &cciss_dev_attr_group, 565 + NULL 566 + }; 567 + 568 + static struct device_type cciss_dev_type = { 569 + .name = "cciss_device", 570 + .groups = cciss_dev_attr_groups, 571 + }; 572 + 573 + static struct bus_type cciss_bus_type = { 574 + .name = "cciss", 575 + }; 576 + 577 + 578 + /* 579 + * Initialize sysfs entry for each controller. This sets up and registers 580 + * the 'cciss#' directory for each individual controller under 581 + * /sys/bus/pci/devices/<dev>/. 582 + */ 583 + static int cciss_create_hba_sysfs_entry(struct ctlr_info *h) 584 + { 585 + device_initialize(&h->dev); 586 + h->dev.type = &cciss_host_type; 587 + h->dev.bus = &cciss_bus_type; 588 + dev_set_name(&h->dev, "%s", h->devname); 589 + h->dev.parent = &h->pdev->dev; 590 + 591 + return device_add(&h->dev); 592 + } 593 + 594 + /* 595 + * Remove sysfs entries for an hba. 596 + */ 597 + static void cciss_destroy_hba_sysfs_entry(struct ctlr_info *h) 598 + { 599 + device_del(&h->dev); 600 + } 601 + 602 + /* 603 + * Initialize sysfs for each logical drive. This sets up and registers 604 + * the 'c#d#' directory for each individual logical drive under 605 + * /sys/bus/pci/devices/<dev/ccis#/. We also create a link from 606 + * /sys/block/cciss!c#d# to this entry. 607 + */ 608 + static int cciss_create_ld_sysfs_entry(struct ctlr_info *h, 609 + drive_info_struct *drv, 610 + int drv_index) 611 + { 612 + device_initialize(&drv->dev); 613 + drv->dev.type = &cciss_dev_type; 614 + drv->dev.bus = &cciss_bus_type; 615 + dev_set_name(&drv->dev, "c%dd%d", h->ctlr, drv_index); 616 + drv->dev.parent = &h->dev; 617 + return device_add(&drv->dev); 618 + } 619 + 620 + /* 621 + * Remove sysfs entries for a logical drive. 622 + */ 623 + static void cciss_destroy_ld_sysfs_entry(drive_info_struct *drv) 624 + { 625 + device_del(&drv->dev); 626 + } 441 627 442 628 /* 443 629 * For operations that cannot sleep, a command block is allocated at init, ··· 1489 1299 { 1490 1300 CommandList_struct *cmd = rq->completion_data; 1491 1301 ctlr_info_t *h = hba[cmd->ctlr]; 1492 - unsigned int nr_bytes; 1493 1302 unsigned long flags; 1494 1303 u64bit temp64; 1495 1304 int i, ddir; ··· 1510 1321 printk("Done with %p\n", rq); 1511 1322 #endif /* CCISS_DEBUG */ 1512 1323 1513 - /* 1514 - * Store the full size and set the residual count for pc requests 1515 - */ 1516 - nr_bytes = blk_rq_bytes(rq); 1324 + /* set the residual count for pc requests */ 1517 1325 if (blk_pc_request(rq)) 1518 - rq->data_len = cmd->err_info->ResidualCnt; 1326 + rq->resid_len = cmd->err_info->ResidualCnt; 1519 1327 1520 - if (blk_end_request(rq, (rq->errors == 0) ? 0 : -EIO, nr_bytes)) 1521 - BUG(); 1328 + blk_end_request_all(rq, (rq->errors == 0) ? 0 : -EIO); 1522 1329 1523 1330 spin_lock_irqsave(&h->lock, flags); 1524 1331 cmd_free(h, cmd, 1); 1525 1332 cciss_check_queues(h); 1526 1333 spin_unlock_irqrestore(&h->lock, flags); 1334 + } 1335 + 1336 + static void log_unit_to_scsi3addr(ctlr_info_t *h, unsigned char scsi3addr[], 1337 + uint32_t log_unit) 1338 + { 1339 + log_unit = h->drv[log_unit].LunID & 0x03fff; 1340 + memset(&scsi3addr[4], 0, 4); 1341 + memcpy(&scsi3addr[0], &log_unit, 4); 1342 + scsi3addr[3] |= 0x40; 1343 + } 1344 + 1345 + /* This function gets the SCSI vendor, model, and revision of a logical drive 1346 + * via the inquiry page 0. Model, vendor, and rev are set to empty strings if 1347 + * they cannot be read. 1348 + */ 1349 + static void cciss_get_device_descr(int ctlr, int logvol, int withirq, 1350 + char *vendor, char *model, char *rev) 1351 + { 1352 + int rc; 1353 + InquiryData_struct *inq_buf; 1354 + unsigned char scsi3addr[8]; 1355 + 1356 + *vendor = '\0'; 1357 + *model = '\0'; 1358 + *rev = '\0'; 1359 + 1360 + inq_buf = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL); 1361 + if (!inq_buf) 1362 + return; 1363 + 1364 + log_unit_to_scsi3addr(hba[ctlr], scsi3addr, logvol); 1365 + if (withirq) 1366 + rc = sendcmd_withirq(CISS_INQUIRY, ctlr, inq_buf, 1367 + sizeof(InquiryData_struct), 0, 1368 + scsi3addr, TYPE_CMD); 1369 + else 1370 + rc = sendcmd(CISS_INQUIRY, ctlr, inq_buf, 1371 + sizeof(InquiryData_struct), 0, 1372 + scsi3addr, TYPE_CMD); 1373 + if (rc == IO_OK) { 1374 + memcpy(vendor, &inq_buf->data_byte[8], VENDOR_LEN); 1375 + vendor[VENDOR_LEN] = '\0'; 1376 + memcpy(model, &inq_buf->data_byte[16], MODEL_LEN); 1377 + model[MODEL_LEN] = '\0'; 1378 + memcpy(rev, &inq_buf->data_byte[32], REV_LEN); 1379 + rev[REV_LEN] = '\0'; 1380 + } 1381 + 1382 + kfree(inq_buf); 1383 + return; 1527 1384 } 1528 1385 1529 1386 /* This function gets the serial number of a logical drive via ··· 1583 1348 #define PAGE_83_INQ_BYTES 64 1584 1349 int rc; 1585 1350 unsigned char *buf; 1351 + unsigned char scsi3addr[8]; 1586 1352 1587 1353 if (buflen > 16) 1588 1354 buflen = 16; ··· 1592 1356 if (!buf) 1593 1357 return; 1594 1358 memset(serial_no, 0, buflen); 1359 + log_unit_to_scsi3addr(hba[ctlr], scsi3addr, logvol); 1595 1360 if (withirq) 1596 1361 rc = sendcmd_withirq(CISS_INQUIRY, ctlr, buf, 1597 - PAGE_83_INQ_BYTES, 1, logvol, 0x83, TYPE_CMD); 1362 + PAGE_83_INQ_BYTES, 0x83, scsi3addr, TYPE_CMD); 1598 1363 else 1599 1364 rc = sendcmd(CISS_INQUIRY, ctlr, buf, 1600 - PAGE_83_INQ_BYTES, 1, logvol, 0x83, NULL, TYPE_CMD); 1365 + PAGE_83_INQ_BYTES, 0x83, scsi3addr, TYPE_CMD); 1601 1366 if (rc == IO_OK) 1602 1367 memcpy(serial_no, &buf[8], buflen); 1603 1368 kfree(buf); ··· 1614 1377 disk->first_minor = drv_index << NWD_SHIFT; 1615 1378 disk->fops = &cciss_fops; 1616 1379 disk->private_data = &h->drv[drv_index]; 1617 - disk->driverfs_dev = &h->pdev->dev; 1380 + disk->driverfs_dev = &h->drv[drv_index].dev; 1618 1381 1619 1382 /* Set up queue information */ 1620 1383 blk_queue_bounce_limit(disk->queue, h->pdev->dma_mask); ··· 1631 1394 1632 1395 disk->queue->queuedata = h; 1633 1396 1634 - blk_queue_hardsect_size(disk->queue, 1635 - h->drv[drv_index].block_size); 1397 + blk_queue_logical_block_size(disk->queue, 1398 + h->drv[drv_index].block_size); 1636 1399 1637 1400 /* Make sure all queue data is written out before */ 1638 1401 /* setting h->drv[drv_index].queue, as setting this */ ··· 1705 1468 drvinfo->block_size = block_size; 1706 1469 drvinfo->nr_blocks = total_size + 1; 1707 1470 1471 + cciss_get_device_descr(ctlr, drv_index, 1, drvinfo->vendor, 1472 + drvinfo->model, drvinfo->rev); 1708 1473 cciss_get_serial_no(ctlr, drv_index, 1, drvinfo->serial_no, 1709 1474 sizeof(drvinfo->serial_no)); 1710 1475 ··· 1756 1517 h->drv[drv_index].cylinders = drvinfo->cylinders; 1757 1518 h->drv[drv_index].raid_level = drvinfo->raid_level; 1758 1519 memcpy(h->drv[drv_index].serial_no, drvinfo->serial_no, 16); 1520 + memcpy(h->drv[drv_index].vendor, drvinfo->vendor, VENDOR_LEN + 1); 1521 + memcpy(h->drv[drv_index].model, drvinfo->model, MODEL_LEN + 1); 1522 + memcpy(h->drv[drv_index].rev, drvinfo->rev, REV_LEN + 1); 1759 1523 1760 1524 ++h->num_luns; 1761 1525 disk = h->gendisk[drv_index]; ··· 1833 1591 } 1834 1592 } 1835 1593 h->drv[drv_index].LunID = lunid; 1594 + if (cciss_create_ld_sysfs_entry(h, &h->drv[drv_index], drv_index)) 1595 + goto err_free_disk; 1836 1596 1837 1597 /* Don't need to mark this busy because nobody */ 1838 1598 /* else knows about this disk yet to contend */ ··· 1842 1598 h->drv[drv_index].busy_configuring = 0; 1843 1599 wmb(); 1844 1600 return drv_index; 1601 + 1602 + err_free_disk: 1603 + put_disk(h->gendisk[drv_index]); 1604 + h->gendisk[drv_index] = NULL; 1605 + return -1; 1845 1606 } 1846 1607 1847 1608 /* This is for the special case of a controller which ··· 1917 1668 goto mem_msg; 1918 1669 1919 1670 return_code = sendcmd_withirq(CISS_REPORT_LOG, ctlr, ld_buff, 1920 - sizeof(ReportLunData_struct), 0, 1921 - 0, 0, TYPE_CMD); 1671 + sizeof(ReportLunData_struct), 1672 + 0, CTLR_LUNID, TYPE_CMD); 1922 1673 1923 1674 if (return_code == IO_OK) 1924 1675 listlength = be32_to_cpu(*(__be32 *) ld_buff->LUNListLength); ··· 1967 1718 h->drv[i].busy_configuring = 1; 1968 1719 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); 1969 1720 return_code = deregister_disk(h, i, 1); 1721 + cciss_destroy_ld_sysfs_entry(&h->drv[i]); 1970 1722 h->drv[i].busy_configuring = 0; 1971 1723 } 1972 1724 } ··· 2127 1877 return 0; 2128 1878 } 2129 1879 2130 - static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff, size_t size, unsigned int use_unit_num, /* 0: address the controller, 2131 - 1: address logical volume log_unit, 2132 - 2: periph device address is scsi3addr */ 2133 - unsigned int log_unit, __u8 page_code, 2134 - unsigned char *scsi3addr, int cmd_type) 1880 + static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff, 1881 + size_t size, __u8 page_code, unsigned char *scsi3addr, 1882 + int cmd_type) 2135 1883 { 2136 1884 ctlr_info_t *h = hba[ctlr]; 2137 1885 u64bit buff_dma_handle; ··· 2145 1897 c->Header.SGTotal = 0; 2146 1898 } 2147 1899 c->Header.Tag.lower = c->busaddr; 1900 + memcpy(c->Header.LUN.LunAddrBytes, scsi3addr, 8); 2148 1901 2149 1902 c->Request.Type.Type = cmd_type; 2150 1903 if (cmd_type == TYPE_CMD) { 2151 1904 switch (cmd) { 2152 1905 case CISS_INQUIRY: 2153 - /* If the logical unit number is 0 then, this is going 2154 - to controller so It's a physical command 2155 - mode = 0 target = 0. So we have nothing to write. 2156 - otherwise, if use_unit_num == 1, 2157 - mode = 1(volume set addressing) target = LUNID 2158 - otherwise, if use_unit_num == 2, 2159 - mode = 0(periph dev addr) target = scsi3addr */ 2160 - if (use_unit_num == 1) { 2161 - c->Header.LUN.LogDev.VolId = 2162 - h->drv[log_unit].LunID; 2163 - c->Header.LUN.LogDev.Mode = 1; 2164 - } else if (use_unit_num == 2) { 2165 - memcpy(c->Header.LUN.LunAddrBytes, scsi3addr, 2166 - 8); 2167 - c->Header.LUN.LogDev.Mode = 0; 2168 - } 2169 1906 /* are we trying to read a vital product page */ 2170 1907 if (page_code != 0) { 2171 1908 c->Request.CDB[1] = 0x01; ··· 2180 1947 break; 2181 1948 2182 1949 case CCISS_READ_CAPACITY: 2183 - c->Header.LUN.LogDev.VolId = h->drv[log_unit].LunID; 2184 - c->Header.LUN.LogDev.Mode = 1; 2185 1950 c->Request.CDBLen = 10; 2186 1951 c->Request.Type.Attribute = ATTR_SIMPLE; 2187 1952 c->Request.Type.Direction = XFER_READ; ··· 2187 1956 c->Request.CDB[0] = cmd; 2188 1957 break; 2189 1958 case CCISS_READ_CAPACITY_16: 2190 - c->Header.LUN.LogDev.VolId = h->drv[log_unit].LunID; 2191 - c->Header.LUN.LogDev.Mode = 1; 2192 1959 c->Request.CDBLen = 16; 2193 1960 c->Request.Type.Attribute = ATTR_SIMPLE; 2194 1961 c->Request.Type.Direction = XFER_READ; ··· 2208 1979 c->Request.CDB[0] = BMIC_WRITE; 2209 1980 c->Request.CDB[6] = BMIC_CACHE_FLUSH; 2210 1981 break; 1982 + case TEST_UNIT_READY: 1983 + c->Request.CDBLen = 6; 1984 + c->Request.Type.Attribute = ATTR_SIMPLE; 1985 + c->Request.Type.Direction = XFER_NONE; 1986 + c->Request.Timeout = 0; 1987 + break; 2211 1988 default: 2212 1989 printk(KERN_WARNING 2213 1990 "cciss%d: Unknown Command 0x%c\n", ctlr, cmd); ··· 2232 1997 memcpy(&c->Request.CDB[4], buff, 8); 2233 1998 break; 2234 1999 case 1: /* RESET message */ 2235 - c->Request.CDBLen = 12; 2000 + c->Request.CDBLen = 16; 2236 2001 c->Request.Type.Attribute = ATTR_SIMPLE; 2237 - c->Request.Type.Direction = XFER_WRITE; 2002 + c->Request.Type.Direction = XFER_NONE; 2238 2003 c->Request.Timeout = 0; 2239 2004 memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB)); 2240 2005 c->Request.CDB[0] = cmd; /* reset */ 2241 - c->Request.CDB[1] = 0x04; /* reset a LUN */ 2006 + c->Request.CDB[1] = 0x03; /* reset a target */ 2242 2007 break; 2243 2008 case 3: /* No-Op message */ 2244 2009 c->Request.CDBLen = 1; ··· 2270 2035 return status; 2271 2036 } 2272 2037 2273 - static int sendcmd_withirq(__u8 cmd, 2274 - int ctlr, 2275 - void *buff, 2276 - size_t size, 2277 - unsigned int use_unit_num, 2278 - unsigned int log_unit, __u8 page_code, int cmd_type) 2038 + static int check_target_status(ctlr_info_t *h, CommandList_struct *c) 2279 2039 { 2280 - ctlr_info_t *h = hba[ctlr]; 2281 - CommandList_struct *c; 2040 + switch (c->err_info->ScsiStatus) { 2041 + case SAM_STAT_GOOD: 2042 + return IO_OK; 2043 + case SAM_STAT_CHECK_CONDITION: 2044 + switch (0xf & c->err_info->SenseInfo[2]) { 2045 + case 0: return IO_OK; /* no sense */ 2046 + case 1: return IO_OK; /* recovered error */ 2047 + default: 2048 + printk(KERN_WARNING "cciss%d: cmd 0x%02x " 2049 + "check condition, sense key = 0x%02x\n", 2050 + h->ctlr, c->Request.CDB[0], 2051 + c->err_info->SenseInfo[2]); 2052 + } 2053 + break; 2054 + default: 2055 + printk(KERN_WARNING "cciss%d: cmd 0x%02x" 2056 + "scsi status = 0x%02x\n", h->ctlr, 2057 + c->Request.CDB[0], c->err_info->ScsiStatus); 2058 + break; 2059 + } 2060 + return IO_ERROR; 2061 + } 2062 + 2063 + static int process_sendcmd_error(ctlr_info_t *h, CommandList_struct *c) 2064 + { 2065 + int return_status = IO_OK; 2066 + 2067 + if (c->err_info->CommandStatus == CMD_SUCCESS) 2068 + return IO_OK; 2069 + 2070 + switch (c->err_info->CommandStatus) { 2071 + case CMD_TARGET_STATUS: 2072 + return_status = check_target_status(h, c); 2073 + break; 2074 + case CMD_DATA_UNDERRUN: 2075 + case CMD_DATA_OVERRUN: 2076 + /* expected for inquiry and report lun commands */ 2077 + break; 2078 + case CMD_INVALID: 2079 + printk(KERN_WARNING "cciss: cmd 0x%02x is " 2080 + "reported invalid\n", c->Request.CDB[0]); 2081 + return_status = IO_ERROR; 2082 + break; 2083 + case CMD_PROTOCOL_ERR: 2084 + printk(KERN_WARNING "cciss: cmd 0x%02x has " 2085 + "protocol error \n", c->Request.CDB[0]); 2086 + return_status = IO_ERROR; 2087 + break; 2088 + case CMD_HARDWARE_ERR: 2089 + printk(KERN_WARNING "cciss: cmd 0x%02x had " 2090 + " hardware error\n", c->Request.CDB[0]); 2091 + return_status = IO_ERROR; 2092 + break; 2093 + case CMD_CONNECTION_LOST: 2094 + printk(KERN_WARNING "cciss: cmd 0x%02x had " 2095 + "connection lost\n", c->Request.CDB[0]); 2096 + return_status = IO_ERROR; 2097 + break; 2098 + case CMD_ABORTED: 2099 + printk(KERN_WARNING "cciss: cmd 0x%02x was " 2100 + "aborted\n", c->Request.CDB[0]); 2101 + return_status = IO_ERROR; 2102 + break; 2103 + case CMD_ABORT_FAILED: 2104 + printk(KERN_WARNING "cciss: cmd 0x%02x reports " 2105 + "abort failed\n", c->Request.CDB[0]); 2106 + return_status = IO_ERROR; 2107 + break; 2108 + case CMD_UNSOLICITED_ABORT: 2109 + printk(KERN_WARNING 2110 + "cciss%d: unsolicited abort 0x%02x\n", h->ctlr, 2111 + c->Request.CDB[0]); 2112 + return_status = IO_NEEDS_RETRY; 2113 + break; 2114 + default: 2115 + printk(KERN_WARNING "cciss: cmd 0x%02x returned " 2116 + "unknown status %x\n", c->Request.CDB[0], 2117 + c->err_info->CommandStatus); 2118 + return_status = IO_ERROR; 2119 + } 2120 + return return_status; 2121 + } 2122 + 2123 + static int sendcmd_withirq_core(ctlr_info_t *h, CommandList_struct *c, 2124 + int attempt_retry) 2125 + { 2126 + DECLARE_COMPLETION_ONSTACK(wait); 2282 2127 u64bit buff_dma_handle; 2283 2128 unsigned long flags; 2284 - int return_status; 2285 - DECLARE_COMPLETION_ONSTACK(wait); 2129 + int return_status = IO_OK; 2286 2130 2287 - if ((c = cmd_alloc(h, 0)) == NULL) 2288 - return -ENOMEM; 2289 - return_status = fill_cmd(c, cmd, ctlr, buff, size, use_unit_num, 2290 - log_unit, page_code, NULL, cmd_type); 2291 - if (return_status != IO_OK) { 2292 - cmd_free(h, c, 0); 2293 - return return_status; 2294 - } 2295 - resend_cmd2: 2131 + resend_cmd2: 2296 2132 c->waiting = &wait; 2297 - 2298 2133 /* Put the request on the tail of the queue and send it */ 2299 - spin_lock_irqsave(CCISS_LOCK(ctlr), flags); 2134 + spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags); 2300 2135 addQ(&h->reqQ, c); 2301 2136 h->Qdepth++; 2302 2137 start_io(h); 2303 - spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags); 2138 + spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); 2304 2139 2305 2140 wait_for_completion(&wait); 2306 2141 2307 - if (c->err_info->CommandStatus != 0) { /* an error has occurred */ 2308 - switch (c->err_info->CommandStatus) { 2309 - case CMD_TARGET_STATUS: 2310 - printk(KERN_WARNING "cciss: cmd %p has " 2311 - " completed with errors\n", c); 2312 - if (c->err_info->ScsiStatus) { 2313 - printk(KERN_WARNING "cciss: cmd %p " 2314 - "has SCSI Status = %x\n", 2315 - c, c->err_info->ScsiStatus); 2316 - } 2142 + if (c->err_info->CommandStatus == 0 || !attempt_retry) 2143 + goto command_done; 2317 2144 2318 - break; 2319 - case CMD_DATA_UNDERRUN: 2320 - case CMD_DATA_OVERRUN: 2321 - /* expected for inquire and report lun commands */ 2322 - break; 2323 - case CMD_INVALID: 2324 - printk(KERN_WARNING "cciss: Cmd %p is " 2325 - "reported invalid\n", c); 2326 - return_status = IO_ERROR; 2327 - break; 2328 - case CMD_PROTOCOL_ERR: 2329 - printk(KERN_WARNING "cciss: cmd %p has " 2330 - "protocol error \n", c); 2331 - return_status = IO_ERROR; 2332 - break; 2333 - case CMD_HARDWARE_ERR: 2334 - printk(KERN_WARNING "cciss: cmd %p had " 2335 - " hardware error\n", c); 2336 - return_status = IO_ERROR; 2337 - break; 2338 - case CMD_CONNECTION_LOST: 2339 - printk(KERN_WARNING "cciss: cmd %p had " 2340 - "connection lost\n", c); 2341 - return_status = IO_ERROR; 2342 - break; 2343 - case CMD_ABORTED: 2344 - printk(KERN_WARNING "cciss: cmd %p was " 2345 - "aborted\n", c); 2346 - return_status = IO_ERROR; 2347 - break; 2348 - case CMD_ABORT_FAILED: 2349 - printk(KERN_WARNING "cciss: cmd %p reports " 2350 - "abort failed\n", c); 2351 - return_status = IO_ERROR; 2352 - break; 2353 - case CMD_UNSOLICITED_ABORT: 2354 - printk(KERN_WARNING 2355 - "cciss%d: unsolicited abort %p\n", ctlr, c); 2356 - if (c->retry_count < MAX_CMD_RETRIES) { 2357 - printk(KERN_WARNING 2358 - "cciss%d: retrying %p\n", ctlr, c); 2359 - c->retry_count++; 2360 - /* erase the old error information */ 2361 - memset(c->err_info, 0, 2362 - sizeof(ErrorInfo_struct)); 2363 - return_status = IO_OK; 2364 - INIT_COMPLETION(wait); 2365 - goto resend_cmd2; 2366 - } 2367 - return_status = IO_ERROR; 2368 - break; 2369 - default: 2370 - printk(KERN_WARNING "cciss: cmd %p returned " 2371 - "unknown status %x\n", c, 2372 - c->err_info->CommandStatus); 2373 - return_status = IO_ERROR; 2374 - } 2145 + return_status = process_sendcmd_error(h, c); 2146 + 2147 + if (return_status == IO_NEEDS_RETRY && 2148 + c->retry_count < MAX_CMD_RETRIES) { 2149 + printk(KERN_WARNING "cciss%d: retrying 0x%02x\n", h->ctlr, 2150 + c->Request.CDB[0]); 2151 + c->retry_count++; 2152 + /* erase the old error information */ 2153 + memset(c->err_info, 0, sizeof(ErrorInfo_struct)); 2154 + return_status = IO_OK; 2155 + INIT_COMPLETION(wait); 2156 + goto resend_cmd2; 2375 2157 } 2158 + 2159 + command_done: 2376 2160 /* unlock the buffers from DMA */ 2377 2161 buff_dma_handle.val32.lower = c->SG[0].Addr.lower; 2378 2162 buff_dma_handle.val32.upper = c->SG[0].Addr.upper; 2379 2163 pci_unmap_single(h->pdev, (dma_addr_t) buff_dma_handle.val, 2380 2164 c->SG[0].Len, PCI_DMA_BIDIRECTIONAL); 2165 + return return_status; 2166 + } 2167 + 2168 + static int sendcmd_withirq(__u8 cmd, int ctlr, void *buff, size_t size, 2169 + __u8 page_code, unsigned char scsi3addr[], 2170 + int cmd_type) 2171 + { 2172 + ctlr_info_t *h = hba[ctlr]; 2173 + CommandList_struct *c; 2174 + int return_status; 2175 + 2176 + c = cmd_alloc(h, 0); 2177 + if (!c) 2178 + return -ENOMEM; 2179 + return_status = fill_cmd(c, cmd, ctlr, buff, size, page_code, 2180 + scsi3addr, cmd_type); 2181 + if (return_status == IO_OK) 2182 + return_status = sendcmd_withirq_core(h, c, 1); 2183 + 2381 2184 cmd_free(h, c, 0); 2382 2185 return return_status; 2383 2186 } ··· 2428 2155 { 2429 2156 int return_code; 2430 2157 unsigned long t; 2158 + unsigned char scsi3addr[8]; 2431 2159 2432 2160 memset(inq_buff, 0, sizeof(InquiryData_struct)); 2161 + log_unit_to_scsi3addr(hba[ctlr], scsi3addr, logvol); 2433 2162 if (withirq) 2434 2163 return_code = sendcmd_withirq(CISS_INQUIRY, ctlr, 2435 - inq_buff, sizeof(*inq_buff), 1, 2436 - logvol, 0xC1, TYPE_CMD); 2164 + inq_buff, sizeof(*inq_buff), 2165 + 0xC1, scsi3addr, TYPE_CMD); 2437 2166 else 2438 2167 return_code = sendcmd(CISS_INQUIRY, ctlr, inq_buff, 2439 - sizeof(*inq_buff), 1, logvol, 0xC1, NULL, 2168 + sizeof(*inq_buff), 0xC1, scsi3addr, 2440 2169 TYPE_CMD); 2441 2170 if (return_code == IO_OK) { 2442 2171 if (inq_buff->data_byte[8] == 0xFF) { ··· 2479 2204 { 2480 2205 ReadCapdata_struct *buf; 2481 2206 int return_code; 2207 + unsigned char scsi3addr[8]; 2482 2208 2483 2209 buf = kzalloc(sizeof(ReadCapdata_struct), GFP_KERNEL); 2484 2210 if (!buf) { ··· 2487 2211 return; 2488 2212 } 2489 2213 2214 + log_unit_to_scsi3addr(hba[ctlr], scsi3addr, logvol); 2490 2215 if (withirq) 2491 2216 return_code = sendcmd_withirq(CCISS_READ_CAPACITY, 2492 2217 ctlr, buf, sizeof(ReadCapdata_struct), 2493 - 1, logvol, 0, TYPE_CMD); 2218 + 0, scsi3addr, TYPE_CMD); 2494 2219 else 2495 2220 return_code = sendcmd(CCISS_READ_CAPACITY, 2496 2221 ctlr, buf, sizeof(ReadCapdata_struct), 2497 - 1, logvol, 0, NULL, TYPE_CMD); 2222 + 0, scsi3addr, TYPE_CMD); 2498 2223 if (return_code == IO_OK) { 2499 2224 *total_size = be32_to_cpu(*(__be32 *) buf->total_size); 2500 2225 *block_size = be32_to_cpu(*(__be32 *) buf->block_size); ··· 2515 2238 { 2516 2239 ReadCapdata_struct_16 *buf; 2517 2240 int return_code; 2241 + unsigned char scsi3addr[8]; 2518 2242 2519 2243 buf = kzalloc(sizeof(ReadCapdata_struct_16), GFP_KERNEL); 2520 2244 if (!buf) { ··· 2523 2245 return; 2524 2246 } 2525 2247 2248 + log_unit_to_scsi3addr(hba[ctlr], scsi3addr, logvol); 2526 2249 if (withirq) { 2527 2250 return_code = sendcmd_withirq(CCISS_READ_CAPACITY_16, 2528 2251 ctlr, buf, sizeof(ReadCapdata_struct_16), 2529 - 1, logvol, 0, TYPE_CMD); 2252 + 0, scsi3addr, TYPE_CMD); 2530 2253 } 2531 2254 else { 2532 2255 return_code = sendcmd(CCISS_READ_CAPACITY_16, 2533 2256 ctlr, buf, sizeof(ReadCapdata_struct_16), 2534 - 1, logvol, 0, NULL, TYPE_CMD); 2257 + 0, scsi3addr, TYPE_CMD); 2535 2258 } 2536 2259 if (return_code == IO_OK) { 2537 2260 *total_size = be64_to_cpu(*(__be64 *) buf->total_size); ··· 2582 2303 cciss_geometry_inquiry(h->ctlr, logvol, 1, total_size, block_size, 2583 2304 inq_buff, drv); 2584 2305 2585 - blk_queue_hardsect_size(drv->queue, drv->block_size); 2306 + blk_queue_logical_block_size(drv->queue, drv->block_size); 2586 2307 set_capacity(disk, drv->nr_blocks); 2587 2308 2588 2309 kfree(inq_buff); ··· 2612 2333 return 1; 2613 2334 } 2614 2335 2615 - static int add_sendcmd_reject(__u8 cmd, int ctlr, unsigned long complete) 2616 - { 2617 - /* We get in here if sendcmd() is polling for completions 2618 - and gets some command back that it wasn't expecting -- 2619 - something other than that which it just sent down. 2620 - Ordinarily, that shouldn't happen, but it can happen when 2621 - the scsi tape stuff gets into error handling mode, and 2622 - starts using sendcmd() to try to abort commands and 2623 - reset tape drives. In that case, sendcmd may pick up 2624 - completions of commands that were sent to logical drives 2625 - through the block i/o system, or cciss ioctls completing, etc. 2626 - In that case, we need to save those completions for later 2627 - processing by the interrupt handler. 2628 - */ 2629 - 2630 - #ifdef CONFIG_CISS_SCSI_TAPE 2631 - struct sendcmd_reject_list *srl = &hba[ctlr]->scsi_rejects; 2632 - 2633 - /* If it's not the scsi tape stuff doing error handling, (abort */ 2634 - /* or reset) then we don't expect anything weird. */ 2635 - if (cmd != CCISS_RESET_MSG && cmd != CCISS_ABORT_MSG) { 2636 - #endif 2637 - printk(KERN_WARNING "cciss cciss%d: SendCmd " 2638 - "Invalid command list address returned! (%lx)\n", 2639 - ctlr, complete); 2640 - /* not much we can do. */ 2641 - #ifdef CONFIG_CISS_SCSI_TAPE 2642 - return 1; 2643 - } 2644 - 2645 - /* We've sent down an abort or reset, but something else 2646 - has completed */ 2647 - if (srl->ncompletions >= (hba[ctlr]->nr_cmds + 2)) { 2648 - /* Uh oh. No room to save it for later... */ 2649 - printk(KERN_WARNING "cciss%d: Sendcmd: Invalid command addr, " 2650 - "reject list overflow, command lost!\n", ctlr); 2651 - return 1; 2652 - } 2653 - /* Save it for later */ 2654 - srl->complete[srl->ncompletions] = complete; 2655 - srl->ncompletions++; 2656 - #endif 2657 - return 0; 2658 - } 2659 - 2660 - /* 2661 - * Send a command to the controller, and wait for it to complete. 2662 - * Only used at init time. 2336 + /* Send command c to controller h and poll for it to complete. 2337 + * Turns interrupts off on the board. Used at driver init time 2338 + * and during SCSI error recovery. 2663 2339 */ 2664 - static int sendcmd(__u8 cmd, int ctlr, void *buff, size_t size, unsigned int use_unit_num, /* 0: address the controller, 2665 - 1: address logical volume log_unit, 2666 - 2: periph device address is scsi3addr */ 2667 - unsigned int log_unit, 2668 - __u8 page_code, unsigned char *scsi3addr, int cmd_type) 2340 + static int sendcmd_core(ctlr_info_t *h, CommandList_struct *c) 2669 2341 { 2670 - CommandList_struct *c; 2671 2342 int i; 2672 2343 unsigned long complete; 2673 - ctlr_info_t *info_p = hba[ctlr]; 2344 + int status = IO_ERROR; 2674 2345 u64bit buff_dma_handle; 2675 - int status, done = 0; 2676 2346 2677 - if ((c = cmd_alloc(info_p, 1)) == NULL) { 2678 - printk(KERN_WARNING "cciss: unable to get memory"); 2679 - return IO_ERROR; 2680 - } 2681 - status = fill_cmd(c, cmd, ctlr, buff, size, use_unit_num, 2682 - log_unit, page_code, scsi3addr, cmd_type); 2683 - if (status != IO_OK) { 2684 - cmd_free(info_p, c, 1); 2685 - return status; 2686 - } 2687 - resend_cmd1: 2688 - /* 2689 - * Disable interrupt 2690 - */ 2691 - #ifdef CCISS_DEBUG 2692 - printk(KERN_DEBUG "cciss: turning intr off\n"); 2693 - #endif /* CCISS_DEBUG */ 2694 - info_p->access.set_intr_mask(info_p, CCISS_INTR_OFF); 2347 + resend_cmd1: 2348 + 2349 + /* Disable interrupt on the board. */ 2350 + h->access.set_intr_mask(h, CCISS_INTR_OFF); 2695 2351 2696 2352 /* Make sure there is room in the command FIFO */ 2697 2353 /* Actually it should be completely empty at this time */ ··· 2634 2420 /* tape side of the driver. */ 2635 2421 for (i = 200000; i > 0; i--) { 2636 2422 /* if fifo isn't full go */ 2637 - if (!(info_p->access.fifo_full(info_p))) { 2638 - 2423 + if (!(h->access.fifo_full(h))) 2639 2424 break; 2640 - } 2641 2425 udelay(10); 2642 2426 printk(KERN_WARNING "cciss cciss%d: SendCmd FIFO full," 2643 - " waiting!\n", ctlr); 2427 + " waiting!\n", h->ctlr); 2644 2428 } 2645 - /* 2646 - * Send the cmd 2647 - */ 2648 - info_p->access.submit_command(info_p, c); 2649 - done = 0; 2429 + h->access.submit_command(h, c); /* Send the cmd */ 2650 2430 do { 2651 - complete = pollcomplete(ctlr); 2431 + complete = pollcomplete(h->ctlr); 2652 2432 2653 2433 #ifdef CCISS_DEBUG 2654 2434 printk(KERN_DEBUG "cciss: command completed\n"); ··· 2651 2443 if (complete == 1) { 2652 2444 printk(KERN_WARNING 2653 2445 "cciss cciss%d: SendCmd Timeout out, " 2654 - "No command list address returned!\n", ctlr); 2446 + "No command list address returned!\n", h->ctlr); 2655 2447 status = IO_ERROR; 2656 - done = 1; 2657 2448 break; 2658 2449 } 2659 2450 2660 - /* This will need to change for direct lookup completions */ 2661 - if ((complete & CISS_ERROR_BIT) 2662 - && (complete & ~CISS_ERROR_BIT) == c->busaddr) { 2663 - /* if data overrun or underun on Report command 2664 - ignore it 2665 - */ 2666 - if (((c->Request.CDB[0] == CISS_REPORT_LOG) || 2667 - (c->Request.CDB[0] == CISS_REPORT_PHYS) || 2668 - (c->Request.CDB[0] == CISS_INQUIRY)) && 2669 - ((c->err_info->CommandStatus == 2670 - CMD_DATA_OVERRUN) || 2671 - (c->err_info->CommandStatus == CMD_DATA_UNDERRUN) 2672 - )) { 2673 - complete = c->busaddr; 2674 - } else { 2675 - if (c->err_info->CommandStatus == 2676 - CMD_UNSOLICITED_ABORT) { 2677 - printk(KERN_WARNING "cciss%d: " 2678 - "unsolicited abort %p\n", 2679 - ctlr, c); 2680 - if (c->retry_count < MAX_CMD_RETRIES) { 2681 - printk(KERN_WARNING 2682 - "cciss%d: retrying %p\n", 2683 - ctlr, c); 2684 - c->retry_count++; 2685 - /* erase the old error */ 2686 - /* information */ 2687 - memset(c->err_info, 0, 2688 - sizeof 2689 - (ErrorInfo_struct)); 2690 - goto resend_cmd1; 2691 - } else { 2692 - printk(KERN_WARNING 2693 - "cciss%d: retried %p too " 2694 - "many times\n", ctlr, c); 2695 - status = IO_ERROR; 2696 - goto cleanup1; 2697 - } 2698 - } else if (c->err_info->CommandStatus == 2699 - CMD_UNABORTABLE) { 2700 - printk(KERN_WARNING 2701 - "cciss%d: command could not be aborted.\n", 2702 - ctlr); 2703 - status = IO_ERROR; 2704 - goto cleanup1; 2705 - } 2706 - printk(KERN_WARNING "ciss ciss%d: sendcmd" 2707 - " Error %x \n", ctlr, 2708 - c->err_info->CommandStatus); 2709 - printk(KERN_WARNING "ciss ciss%d: sendcmd" 2710 - " offensive info\n" 2711 - " size %x\n num %x value %x\n", 2712 - ctlr, 2713 - c->err_info->MoreErrInfo.Invalid_Cmd. 2714 - offense_size, 2715 - c->err_info->MoreErrInfo.Invalid_Cmd. 2716 - offense_num, 2717 - c->err_info->MoreErrInfo.Invalid_Cmd. 2718 - offense_value); 2719 - status = IO_ERROR; 2720 - goto cleanup1; 2721 - } 2722 - } 2723 - /* This will need changing for direct lookup completions */ 2724 - if (complete != c->busaddr) { 2725 - if (add_sendcmd_reject(cmd, ctlr, complete) != 0) { 2726 - BUG(); /* we are pretty much hosed if we get here. */ 2727 - } 2451 + /* Make sure it's the command we're expecting. */ 2452 + if ((complete & ~CISS_ERROR_BIT) != c->busaddr) { 2453 + printk(KERN_WARNING "cciss%d: Unexpected command " 2454 + "completion.\n", h->ctlr); 2728 2455 continue; 2729 - } else 2730 - done = 1; 2731 - } while (!done); 2456 + } 2732 2457 2733 - cleanup1: 2458 + /* It is our command. If no error, we're done. */ 2459 + if (!(complete & CISS_ERROR_BIT)) { 2460 + status = IO_OK; 2461 + break; 2462 + } 2463 + 2464 + /* There is an error... */ 2465 + 2466 + /* if data overrun or underun on Report command ignore it */ 2467 + if (((c->Request.CDB[0] == CISS_REPORT_LOG) || 2468 + (c->Request.CDB[0] == CISS_REPORT_PHYS) || 2469 + (c->Request.CDB[0] == CISS_INQUIRY)) && 2470 + ((c->err_info->CommandStatus == CMD_DATA_OVERRUN) || 2471 + (c->err_info->CommandStatus == CMD_DATA_UNDERRUN))) { 2472 + complete = c->busaddr; 2473 + status = IO_OK; 2474 + break; 2475 + } 2476 + 2477 + if (c->err_info->CommandStatus == CMD_UNSOLICITED_ABORT) { 2478 + printk(KERN_WARNING "cciss%d: unsolicited abort %p\n", 2479 + h->ctlr, c); 2480 + if (c->retry_count < MAX_CMD_RETRIES) { 2481 + printk(KERN_WARNING "cciss%d: retrying %p\n", 2482 + h->ctlr, c); 2483 + c->retry_count++; 2484 + /* erase the old error information */ 2485 + memset(c->err_info, 0, sizeof(c->err_info)); 2486 + goto resend_cmd1; 2487 + } 2488 + printk(KERN_WARNING "cciss%d: retried %p too many " 2489 + "times\n", h->ctlr, c); 2490 + status = IO_ERROR; 2491 + break; 2492 + } 2493 + 2494 + if (c->err_info->CommandStatus == CMD_UNABORTABLE) { 2495 + printk(KERN_WARNING "cciss%d: command could not be " 2496 + "aborted.\n", h->ctlr); 2497 + status = IO_ERROR; 2498 + break; 2499 + } 2500 + 2501 + if (c->err_info->CommandStatus == CMD_TARGET_STATUS) { 2502 + status = check_target_status(h, c); 2503 + break; 2504 + } 2505 + 2506 + printk(KERN_WARNING "cciss%d: sendcmd error\n", h->ctlr); 2507 + printk(KERN_WARNING "cmd = 0x%02x, CommandStatus = 0x%02x\n", 2508 + c->Request.CDB[0], c->err_info->CommandStatus); 2509 + status = IO_ERROR; 2510 + break; 2511 + 2512 + } while (1); 2513 + 2734 2514 /* unlock the data buffer from DMA */ 2735 2515 buff_dma_handle.val32.lower = c->SG[0].Addr.lower; 2736 2516 buff_dma_handle.val32.upper = c->SG[0].Addr.upper; 2737 - pci_unmap_single(info_p->pdev, (dma_addr_t) buff_dma_handle.val, 2517 + pci_unmap_single(h->pdev, (dma_addr_t) buff_dma_handle.val, 2738 2518 c->SG[0].Len, PCI_DMA_BIDIRECTIONAL); 2739 - #ifdef CONFIG_CISS_SCSI_TAPE 2740 - /* if we saved some commands for later, process them now. */ 2741 - if (info_p->scsi_rejects.ncompletions > 0) 2742 - do_cciss_intr(0, info_p); 2743 - #endif 2744 - cmd_free(info_p, c, 1); 2519 + return status; 2520 + } 2521 + 2522 + /* 2523 + * Send a command to the controller, and wait for it to complete. 2524 + * Used at init time, and during SCSI error recovery. 2525 + */ 2526 + static int sendcmd(__u8 cmd, int ctlr, void *buff, size_t size, 2527 + __u8 page_code, unsigned char *scsi3addr, int cmd_type) 2528 + { 2529 + CommandList_struct *c; 2530 + int status; 2531 + 2532 + c = cmd_alloc(hba[ctlr], 1); 2533 + if (!c) { 2534 + printk(KERN_WARNING "cciss: unable to get memory"); 2535 + return IO_ERROR; 2536 + } 2537 + status = fill_cmd(c, cmd, ctlr, buff, size, page_code, 2538 + scsi3addr, cmd_type); 2539 + if (status == IO_OK) 2540 + status = sendcmd_core(hba[ctlr], c); 2541 + cmd_free(hba[ctlr], c, 1); 2745 2542 return status; 2746 2543 } 2747 2544 ··· 2904 2691 printk(KERN_WARNING "cciss: cmd %p has" 2905 2692 " completed with data underrun " 2906 2693 "reported\n", cmd); 2907 - cmd->rq->data_len = cmd->err_info->ResidualCnt; 2694 + cmd->rq->resid_len = cmd->err_info->ResidualCnt; 2908 2695 } 2909 2696 break; 2910 2697 case CMD_DATA_OVERRUN: ··· 3019 2806 goto startio; 3020 2807 3021 2808 queue: 3022 - creq = elv_next_request(q); 2809 + creq = blk_peek_request(q); 3023 2810 if (!creq) 3024 2811 goto startio; 3025 2812 ··· 3028 2815 if ((c = cmd_alloc(h, 1)) == NULL) 3029 2816 goto full; 3030 2817 3031 - blkdev_dequeue_request(creq); 2818 + blk_start_request(creq); 3032 2819 3033 2820 spin_unlock_irq(q->queue_lock); 3034 2821 ··· 3053 2840 c->Request.Timeout = 0; // Don't time out 3054 2841 c->Request.CDB[0] = 3055 2842 (rq_data_dir(creq) == READ) ? h->cciss_read : h->cciss_write; 3056 - start_blk = creq->sector; 2843 + start_blk = blk_rq_pos(creq); 3057 2844 #ifdef CCISS_DEBUG 3058 - printk(KERN_DEBUG "ciss: sector =%d nr_sectors=%d\n", (int)creq->sector, 3059 - (int)creq->nr_sectors); 2845 + printk(KERN_DEBUG "ciss: sector =%d nr_sectors=%d\n", 2846 + (int)blk_rq_pos(creq), (int)blk_rq_sectors(creq)); 3060 2847 #endif /* CCISS_DEBUG */ 3061 2848 3062 2849 sg_init_table(tmp_sg, MAXSGENTRIES); ··· 3082 2869 h->maxSG = seg; 3083 2870 3084 2871 #ifdef CCISS_DEBUG 3085 - printk(KERN_DEBUG "cciss: Submitting %lu sectors in %d segments\n", 3086 - creq->nr_sectors, seg); 2872 + printk(KERN_DEBUG "cciss: Submitting %u sectors in %d segments\n", 2873 + blk_rq_sectors(creq), seg); 3087 2874 #endif /* CCISS_DEBUG */ 3088 2875 3089 2876 c->Header.SGList = c->Header.SGTotal = seg; ··· 3095 2882 c->Request.CDB[4] = (start_blk >> 8) & 0xff; 3096 2883 c->Request.CDB[5] = start_blk & 0xff; 3097 2884 c->Request.CDB[6] = 0; // (sect >> 24) & 0xff; MSB 3098 - c->Request.CDB[7] = (creq->nr_sectors >> 8) & 0xff; 3099 - c->Request.CDB[8] = creq->nr_sectors & 0xff; 2885 + c->Request.CDB[7] = (blk_rq_sectors(creq) >> 8) & 0xff; 2886 + c->Request.CDB[8] = blk_rq_sectors(creq) & 0xff; 3100 2887 c->Request.CDB[9] = c->Request.CDB[11] = c->Request.CDB[12] = 0; 3101 2888 } else { 3102 2889 u32 upper32 = upper_32_bits(start_blk); ··· 3111 2898 c->Request.CDB[7]= (start_blk >> 16) & 0xff; 3112 2899 c->Request.CDB[8]= (start_blk >> 8) & 0xff; 3113 2900 c->Request.CDB[9]= start_blk & 0xff; 3114 - c->Request.CDB[10]= (creq->nr_sectors >> 24) & 0xff; 3115 - c->Request.CDB[11]= (creq->nr_sectors >> 16) & 0xff; 3116 - c->Request.CDB[12]= (creq->nr_sectors >> 8) & 0xff; 3117 - c->Request.CDB[13]= creq->nr_sectors & 0xff; 2901 + c->Request.CDB[10]= (blk_rq_sectors(creq) >> 24) & 0xff; 2902 + c->Request.CDB[11]= (blk_rq_sectors(creq) >> 16) & 0xff; 2903 + c->Request.CDB[12]= (blk_rq_sectors(creq) >> 8) & 0xff; 2904 + c->Request.CDB[13]= blk_rq_sectors(creq) & 0xff; 3118 2905 c->Request.CDB[14] = c->Request.CDB[15] = 0; 3119 2906 } 3120 2907 } else if (blk_pc_request(creq)) { ··· 3144 2931 3145 2932 static inline unsigned long get_next_completion(ctlr_info_t *h) 3146 2933 { 3147 - #ifdef CONFIG_CISS_SCSI_TAPE 3148 - /* Any rejects from sendcmd() lying around? Process them first */ 3149 - if (h->scsi_rejects.ncompletions == 0) 3150 - return h->access.command_completed(h); 3151 - else { 3152 - struct sendcmd_reject_list *srl; 3153 - int n; 3154 - srl = &h->scsi_rejects; 3155 - n = --srl->ncompletions; 3156 - /* printk("cciss%d: processing saved reject\n", h->ctlr); */ 3157 - printk("p"); 3158 - return srl->complete[n]; 3159 - } 3160 - #else 3161 2934 return h->access.command_completed(h); 3162 - #endif 3163 2935 } 3164 2936 3165 2937 static inline int interrupt_pending(ctlr_info_t *h) 3166 2938 { 3167 - #ifdef CONFIG_CISS_SCSI_TAPE 3168 - return (h->access.intr_pending(h) 3169 - || (h->scsi_rejects.ncompletions > 0)); 3170 - #else 3171 2939 return h->access.intr_pending(h); 3172 - #endif 3173 2940 } 3174 2941 3175 2942 static inline long interrupt_not_for_us(ctlr_info_t *h) 3176 2943 { 3177 - #ifdef CONFIG_CISS_SCSI_TAPE 3178 - return (((h->access.intr_pending(h) == 0) || 3179 - (h->interrupts_enabled == 0)) 3180 - && (h->scsi_rejects.ncompletions == 0)); 3181 - #else 3182 2944 return (((h->access.intr_pending(h) == 0) || 3183 2945 (h->interrupts_enabled == 0))); 3184 - #endif 3185 2946 } 3186 2947 3187 2948 static irqreturn_t do_cciss_intr(int irq, void *dev_id) ··· 3910 3723 INIT_HLIST_HEAD(&hba[i]->reqQ); 3911 3724 3912 3725 if (cciss_pci_init(hba[i], pdev) != 0) 3913 - goto clean1; 3726 + goto clean0; 3914 3727 3915 3728 sprintf(hba[i]->devname, "cciss%d", i); 3916 3729 hba[i]->ctlr = i; 3917 3730 hba[i]->pdev = pdev; 3731 + 3732 + if (cciss_create_hba_sysfs_entry(hba[i])) 3733 + goto clean0; 3918 3734 3919 3735 /* configure PCI DMA stuff */ 3920 3736 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) ··· 3977 3787 printk(KERN_ERR "cciss: out of memory"); 3978 3788 goto clean4; 3979 3789 } 3980 - #ifdef CONFIG_CISS_SCSI_TAPE 3981 - hba[i]->scsi_rejects.complete = 3982 - kmalloc(sizeof(hba[i]->scsi_rejects.complete[0]) * 3983 - (hba[i]->nr_cmds + 5), GFP_KERNEL); 3984 - if (hba[i]->scsi_rejects.complete == NULL) { 3985 - printk(KERN_ERR "cciss: out of memory"); 3986 - goto clean4; 3987 - } 3988 - #endif 3989 3790 spin_lock_init(&hba[i]->lock); 3990 3791 3991 3792 /* Initialize the pdev driver private data. ··· 4009 3828 } 4010 3829 4011 3830 return_code = sendcmd_withirq(CISS_INQUIRY, i, inq_buff, 4012 - sizeof(InquiryData_struct), 0, 0 , 0, TYPE_CMD); 3831 + sizeof(InquiryData_struct), 0, CTLR_LUNID, TYPE_CMD); 4013 3832 if (return_code == IO_OK) { 4014 3833 hba[i]->firm_ver[0] = inq_buff->data_byte[32]; 4015 3834 hba[i]->firm_ver[1] = inq_buff->data_byte[33]; ··· 4036 3855 4037 3856 clean4: 4038 3857 kfree(inq_buff); 4039 - #ifdef CONFIG_CISS_SCSI_TAPE 4040 - kfree(hba[i]->scsi_rejects.complete); 4041 - #endif 4042 3858 kfree(hba[i]->cmd_pool_bits); 4043 3859 if (hba[i]->cmd_pool) 4044 3860 pci_free_consistent(hba[i]->pdev, ··· 4050 3872 clean2: 4051 3873 unregister_blkdev(hba[i]->major, hba[i]->devname); 4052 3874 clean1: 3875 + cciss_destroy_hba_sysfs_entry(hba[i]); 3876 + clean0: 4053 3877 hba[i]->busy_initializing = 0; 4054 3878 /* cleanup any queues that may have been initialized */ 4055 3879 for (j=0; j <= hba[i]->highest_lun; j++){ ··· 4087 3907 /* sendcmd will turn off interrupt, and send the flush... 4088 3908 * To write all data in the battery backed cache to disks */ 4089 3909 memset(flush_buf, 0, 4); 4090 - return_code = sendcmd(CCISS_CACHE_FLUSH, i, flush_buf, 4, 0, 0, 0, NULL, 4091 - TYPE_CMD); 3910 + return_code = sendcmd(CCISS_CACHE_FLUSH, i, flush_buf, 4, 0, 3911 + CTLR_LUNID, TYPE_CMD); 4092 3912 if (return_code == IO_OK) { 4093 3913 printk(KERN_INFO "Completed flushing cache on controller %d\n", i); 4094 3914 } else { ··· 4153 3973 pci_free_consistent(hba[i]->pdev, hba[i]->nr_cmds * sizeof(ErrorInfo_struct), 4154 3974 hba[i]->errinfo_pool, hba[i]->errinfo_pool_dhandle); 4155 3975 kfree(hba[i]->cmd_pool_bits); 4156 - #ifdef CONFIG_CISS_SCSI_TAPE 4157 - kfree(hba[i]->scsi_rejects.complete); 4158 - #endif 4159 3976 /* 4160 3977 * Deliberately omit pci_disable_device(): it does something nasty to 4161 3978 * Smart Array controllers that pci_enable_device does not undo 4162 3979 */ 4163 3980 pci_release_regions(pdev); 4164 3981 pci_set_drvdata(pdev, NULL); 3982 + cciss_destroy_hba_sysfs_entry(hba[i]); 4165 3983 free_hba(i); 4166 3984 } 4167 3985 ··· 4177 3999 */ 4178 4000 static int __init cciss_init(void) 4179 4001 { 4002 + int err; 4003 + 4180 4004 /* 4181 4005 * The hardware requires that commands are aligned on a 64-bit 4182 4006 * boundary. Given that we use pci_alloc_consistent() to allocate an ··· 4188 4008 4189 4009 printk(KERN_INFO DRIVER_NAME "\n"); 4190 4010 4011 + err = bus_register(&cciss_bus_type); 4012 + if (err) 4013 + return err; 4014 + 4191 4015 /* Register for our PCI devices */ 4192 - return pci_register_driver(&cciss_pci_driver); 4016 + err = pci_register_driver(&cciss_pci_driver); 4017 + if (err) 4018 + goto err_bus_register; 4019 + 4020 + return 0; 4021 + 4022 + err_bus_register: 4023 + bus_unregister(&cciss_bus_type); 4024 + return err; 4193 4025 } 4194 4026 4195 4027 static void __exit cciss_cleanup(void) ··· 4218 4026 } 4219 4027 } 4220 4028 remove_proc_entry("driver/cciss", NULL); 4029 + bus_unregister(&cciss_bus_type); 4221 4030 } 4222 4031 4223 4032 static void fail_all_cmds(unsigned long ctlr)
+18 -16
drivers/block/cciss.h
··· 11 11 12 12 #define IO_OK 0 13 13 #define IO_ERROR 1 14 + #define IO_NEEDS_RETRY 3 15 + 16 + #define VENDOR_LEN 8 17 + #define MODEL_LEN 16 18 + #define REV_LEN 4 14 19 15 20 struct ctlr_info; 16 21 typedef struct ctlr_info ctlr_info_t; ··· 39 34 int cylinders; 40 35 int raid_level; /* set to -1 to indicate that 41 36 * the drive is not in use/configured 42 - */ 43 - int busy_configuring; /*This is set when the drive is being removed 44 - *to prevent it from being opened or it's queue 45 - *from being started. 46 - */ 47 - __u8 serial_no[16]; /* from inquiry page 0x83, */ 48 - /* not necc. null terminated. */ 37 + */ 38 + int busy_configuring; /* This is set when a drive is being removed 39 + * to prevent it from being opened or it's 40 + * queue from being started. 41 + */ 42 + struct device dev; 43 + __u8 serial_no[16]; /* from inquiry page 0x83, 44 + * not necc. null terminated. 45 + */ 46 + char vendor[VENDOR_LEN + 1]; /* SCSI vendor string */ 47 + char model[MODEL_LEN + 1]; /* SCSI model string */ 48 + char rev[REV_LEN + 1]; /* SCSI revision string */ 49 49 } drive_info_struct; 50 50 51 - #ifdef CONFIG_CISS_SCSI_TAPE 52 - 53 - struct sendcmd_reject_list { 54 - int ncompletions; 55 - unsigned long *complete; /* array of NR_CMDS tags */ 56 - }; 57 - 58 - #endif 59 51 struct ctlr_info 60 52 { 61 53 int ctlr; ··· 120 118 void *scsi_ctlr; /* ptr to structure containing scsi related stuff */ 121 119 /* list of block side commands the scsi error handling sucked up */ 122 120 /* and saved for later processing */ 123 - struct sendcmd_reject_list scsi_rejects; 124 121 #endif 125 122 unsigned char alive; 126 123 struct completion *rescan_wait; 127 124 struct task_struct *cciss_scan_thread; 125 + struct device dev; 128 126 }; 129 127 130 128 /* Defining the diffent access_menthods */
+2
drivers/block/cciss_cmd.h
··· 217 217 LogDevAddr_struct LogDev; 218 218 } LUNAddr_struct; 219 219 220 + #define CTLR_LUNID "\0\0\0\0\0\0\0\0" 221 + 220 222 typedef struct _CommandListHeader_struct { 221 223 BYTE ReplyQueue; 222 224 BYTE SGList;
+83 -26
drivers/block/cciss_scsi.c
··· 44 44 #define CCISS_ABORT_MSG 0x00 45 45 #define CCISS_RESET_MSG 0x01 46 46 47 - /* some prototypes... */ 48 - static int sendcmd( 49 - __u8 cmd, 50 - int ctlr, 51 - void *buff, 52 - size_t size, 53 - unsigned int use_unit_num, /* 0: address the controller, 54 - 1: address logical volume log_unit, 55 - 2: address is in scsi3addr */ 56 - unsigned int log_unit, 57 - __u8 page_code, 58 - unsigned char *scsi3addr, 47 + static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff, 48 + size_t size, 49 + __u8 page_code, unsigned char *scsi3addr, 59 50 int cmd_type); 60 51 52 + static CommandList_struct *cmd_alloc(ctlr_info_t *h, int get_from_pool); 53 + static void cmd_free(ctlr_info_t *h, CommandList_struct *c, int got_from_pool); 61 54 62 55 static int cciss_scsi_proc_info( 63 56 struct Scsi_Host *sh, ··· 1568 1575 CPQ_TAPE_UNLOCK(ctlr, flags); 1569 1576 } 1570 1577 1578 + static int wait_for_device_to_become_ready(ctlr_info_t *h, 1579 + unsigned char lunaddr[]) 1580 + { 1581 + int rc; 1582 + int count = 0; 1583 + int waittime = HZ; 1584 + CommandList_struct *c; 1585 + 1586 + c = cmd_alloc(h, 1); 1587 + if (!c) { 1588 + printk(KERN_WARNING "cciss%d: out of memory in " 1589 + "wait_for_device_to_become_ready.\n", h->ctlr); 1590 + return IO_ERROR; 1591 + } 1592 + 1593 + /* Send test unit ready until device ready, or give up. */ 1594 + while (count < 20) { 1595 + 1596 + /* Wait for a bit. do this first, because if we send 1597 + * the TUR right away, the reset will just abort it. 1598 + */ 1599 + schedule_timeout_uninterruptible(waittime); 1600 + count++; 1601 + 1602 + /* Increase wait time with each try, up to a point. */ 1603 + if (waittime < (HZ * 30)) 1604 + waittime = waittime * 2; 1605 + 1606 + /* Send the Test Unit Ready */ 1607 + rc = fill_cmd(c, TEST_UNIT_READY, h->ctlr, NULL, 0, 0, 1608 + lunaddr, TYPE_CMD); 1609 + if (rc == 0) 1610 + rc = sendcmd_withirq_core(h, c, 0); 1611 + 1612 + (void) process_sendcmd_error(h, c); 1613 + 1614 + if (rc != 0) 1615 + goto retry_tur; 1616 + 1617 + if (c->err_info->CommandStatus == CMD_SUCCESS) 1618 + break; 1619 + 1620 + if (c->err_info->CommandStatus == CMD_TARGET_STATUS && 1621 + c->err_info->ScsiStatus == SAM_STAT_CHECK_CONDITION) { 1622 + if (c->err_info->SenseInfo[2] == NO_SENSE) 1623 + break; 1624 + if (c->err_info->SenseInfo[2] == UNIT_ATTENTION) { 1625 + unsigned char asc; 1626 + asc = c->err_info->SenseInfo[12]; 1627 + check_for_unit_attention(h, c); 1628 + if (asc == POWER_OR_RESET) 1629 + break; 1630 + } 1631 + } 1632 + retry_tur: 1633 + printk(KERN_WARNING "cciss%d: Waiting %d secs " 1634 + "for device to become ready.\n", 1635 + h->ctlr, waittime / HZ); 1636 + rc = 1; /* device not ready. */ 1637 + } 1638 + 1639 + if (rc) 1640 + printk("cciss%d: giving up on device.\n", h->ctlr); 1641 + else 1642 + printk(KERN_WARNING "cciss%d: device is ready.\n", h->ctlr); 1643 + 1644 + cmd_free(h, c, 1); 1645 + return rc; 1646 + } 1571 1647 1572 1648 /* Need at least one of these error handlers to keep ../scsi/hosts.c from 1573 1649 * complaining. Doing a host- or bus-reset can't do anything good here. ··· 1653 1591 { 1654 1592 int rc; 1655 1593 CommandList_struct *cmd_in_trouble; 1594 + unsigned char lunaddr[8]; 1656 1595 ctlr_info_t **c; 1657 1596 int ctlr; 1658 1597 ··· 1663 1600 return FAILED; 1664 1601 ctlr = (*c)->ctlr; 1665 1602 printk(KERN_WARNING "cciss%d: resetting tape drive or medium changer.\n", ctlr); 1666 - 1667 1603 /* find the command that's giving us trouble */ 1668 1604 cmd_in_trouble = (CommandList_struct *) scsicmd->host_scribble; 1669 - if (cmd_in_trouble == NULL) { /* paranoia */ 1605 + if (cmd_in_trouble == NULL) /* paranoia */ 1670 1606 return FAILED; 1671 - } 1607 + memcpy(lunaddr, &cmd_in_trouble->Header.LUN.LunAddrBytes[0], 8); 1672 1608 /* send a reset to the SCSI LUN which the command was sent to */ 1673 - rc = sendcmd(CCISS_RESET_MSG, ctlr, NULL, 0, 2, 0, 0, 1674 - (unsigned char *) &cmd_in_trouble->Header.LUN.LunAddrBytes[0], 1609 + rc = sendcmd_withirq(CCISS_RESET_MSG, ctlr, NULL, 0, 0, lunaddr, 1675 1610 TYPE_MSG); 1676 - /* sendcmd turned off interrupts on the board, turn 'em back on. */ 1677 - (*c)->access.set_intr_mask(*c, CCISS_INTR_ON); 1678 - if (rc == 0) 1611 + if (rc == 0 && wait_for_device_to_become_ready(*c, lunaddr) == 0) 1679 1612 return SUCCESS; 1680 1613 printk(KERN_WARNING "cciss%d: resetting device failed.\n", ctlr); 1681 1614 return FAILED; ··· 1681 1622 { 1682 1623 int rc; 1683 1624 CommandList_struct *cmd_to_abort; 1625 + unsigned char lunaddr[8]; 1684 1626 ctlr_info_t **c; 1685 1627 int ctlr; 1686 1628 ··· 1696 1636 cmd_to_abort = (CommandList_struct *) scsicmd->host_scribble; 1697 1637 if (cmd_to_abort == NULL) /* paranoia */ 1698 1638 return FAILED; 1699 - rc = sendcmd(CCISS_ABORT_MSG, ctlr, &cmd_to_abort->Header.Tag, 1700 - 0, 2, 0, 0, 1701 - (unsigned char *) &cmd_to_abort->Header.LUN.LunAddrBytes[0], 1702 - TYPE_MSG); 1703 - /* sendcmd turned off interrupts on the board, turn 'em back on. */ 1704 - (*c)->access.set_intr_mask(*c, CCISS_INTR_ON); 1639 + memcpy(lunaddr, &cmd_to_abort->Header.LUN.LunAddrBytes[0], 8); 1640 + rc = sendcmd_withirq(CCISS_ABORT_MSG, ctlr, &cmd_to_abort->Header.Tag, 1641 + 0, 0, lunaddr, TYPE_MSG); 1705 1642 if (rc == 0) 1706 1643 return SUCCESS; 1707 1644 return FAILED;
+10 -10
drivers/block/cpqarray.c
··· 474 474 disk->fops = &ida_fops; 475 475 if (j && !drv->nr_blks) 476 476 continue; 477 - blk_queue_hardsect_size(hba[i]->queue, drv->blk_size); 477 + blk_queue_logical_block_size(hba[i]->queue, drv->blk_size); 478 478 set_capacity(disk, drv->nr_blks); 479 479 disk->queue = hba[i]->queue; 480 480 disk->private_data = drv; ··· 903 903 goto startio; 904 904 905 905 queue_next: 906 - creq = elv_next_request(q); 906 + creq = blk_peek_request(q); 907 907 if (!creq) 908 908 goto startio; 909 909 ··· 912 912 if ((c = cmd_alloc(h,1)) == NULL) 913 913 goto startio; 914 914 915 - blkdev_dequeue_request(creq); 915 + blk_start_request(creq); 916 916 917 917 c->ctlr = h->ctlr; 918 918 c->hdr.unit = (drv_info_t *)(creq->rq_disk->private_data) - h->drv; 919 919 c->hdr.size = sizeof(rblk_t) >> 2; 920 920 c->size += sizeof(rblk_t); 921 921 922 - c->req.hdr.blk = creq->sector; 922 + c->req.hdr.blk = blk_rq_pos(creq); 923 923 c->rq = creq; 924 924 DBGPX( 925 - printk("sector=%d, nr_sectors=%d\n", creq->sector, creq->nr_sectors); 925 + printk("sector=%d, nr_sectors=%u\n", 926 + blk_rq_pos(creq), blk_rq_sectors(creq)); 926 927 ); 927 928 sg_init_table(tmp_sg, SG_MAX); 928 929 seg = blk_rq_map_sg(q, creq, tmp_sg); ··· 941 940 tmp_sg[i].offset, 942 941 tmp_sg[i].length, dir); 943 942 } 944 - DBGPX( printk("Submitting %d sectors in %d segments\n", creq->nr_sectors, seg); ); 943 + DBGPX( printk("Submitting %u sectors in %d segments\n", blk_rq_sectors(creq), seg); ); 945 944 c->req.hdr.sg_cnt = seg; 946 - c->req.hdr.blk_cnt = creq->nr_sectors; 945 + c->req.hdr.blk_cnt = blk_rq_sectors(creq); 947 946 c->req.hdr.cmd = (rq_data_dir(creq) == READ) ? IDA_READ : IDA_WRITE; 948 947 c->type = CMD_RWREQ; 949 948 ··· 1025 1024 cmd->req.sg[i].size, ddir); 1026 1025 1027 1026 DBGPX(printk("Done with %p\n", rq);); 1028 - if (__blk_end_request(rq, error, blk_rq_bytes(rq))) 1029 - BUG(); 1027 + __blk_end_request_all(rq, error); 1030 1028 } 1031 1029 1032 1030 /* ··· 1546 1546 drv_info_t *drv = &host->drv[i]; 1547 1547 if (i && !drv->nr_blks) 1548 1548 continue; 1549 - blk_queue_hardsect_size(host->queue, drv->blk_size); 1549 + blk_queue_logical_block_size(host->queue, drv->blk_size); 1550 1550 set_capacity(disk, drv->nr_blks); 1551 1551 disk->queue = host->queue; 1552 1552 disk->private_data = drv;
+55 -30
drivers/block/floppy.c
··· 931 931 del_timer(&fd_timeout); 932 932 cont = NULL; 933 933 clear_bit(0, &fdc_busy); 934 - if (elv_next_request(floppy_queue)) 934 + if (current_req || blk_peek_request(floppy_queue)) 935 935 do_fd_request(floppy_queue); 936 936 spin_unlock_irqrestore(&floppy_lock, flags); 937 937 wake_up(&fdc_wait); ··· 2303 2303 2304 2304 /* current_count_sectors can be zero if transfer failed */ 2305 2305 if (error) 2306 - nr_sectors = req->current_nr_sectors; 2306 + nr_sectors = blk_rq_cur_sectors(req); 2307 2307 if (__blk_end_request(req, error, nr_sectors << 9)) 2308 2308 return; 2309 2309 ··· 2332 2332 if (uptodate) { 2333 2333 /* maintain values for invalidation on geometry 2334 2334 * change */ 2335 - block = current_count_sectors + req->sector; 2335 + block = current_count_sectors + blk_rq_pos(req); 2336 2336 INFBOUND(DRS->maxblock, block); 2337 2337 if (block > _floppy->sect) 2338 2338 DRS->maxtrack = 1; ··· 2346 2346 /* record write error information */ 2347 2347 DRWE->write_errors++; 2348 2348 if (DRWE->write_errors == 1) { 2349 - DRWE->first_error_sector = req->sector; 2349 + DRWE->first_error_sector = blk_rq_pos(req); 2350 2350 DRWE->first_error_generation = DRS->generation; 2351 2351 } 2352 - DRWE->last_error_sector = req->sector; 2352 + DRWE->last_error_sector = blk_rq_pos(req); 2353 2353 DRWE->last_error_generation = DRS->generation; 2354 2354 } 2355 2355 spin_lock_irqsave(q->queue_lock, flags); ··· 2503 2503 2504 2504 max_sector = transfer_size(ssize, 2505 2505 min(max_sector, max_sector_2), 2506 - current_req->nr_sectors); 2506 + blk_rq_sectors(current_req)); 2507 2507 2508 2508 if (current_count_sectors <= 0 && CT(COMMAND) == FD_WRITE && 2509 - buffer_max > fsector_t + current_req->nr_sectors) 2509 + buffer_max > fsector_t + blk_rq_sectors(current_req)) 2510 2510 current_count_sectors = min_t(int, buffer_max - fsector_t, 2511 - current_req->nr_sectors); 2511 + blk_rq_sectors(current_req)); 2512 2512 2513 2513 remaining = current_count_sectors << 9; 2514 2514 #ifdef FLOPPY_SANITY_CHECK 2515 - if ((remaining >> 9) > current_req->nr_sectors && 2516 - CT(COMMAND) == FD_WRITE) { 2515 + if (remaining > blk_rq_bytes(current_req) && CT(COMMAND) == FD_WRITE) { 2517 2516 DPRINT("in copy buffer\n"); 2518 2517 printk("current_count_sectors=%ld\n", current_count_sectors); 2519 2518 printk("remaining=%d\n", remaining >> 9); 2520 - printk("current_req->nr_sectors=%ld\n", 2521 - current_req->nr_sectors); 2519 + printk("current_req->nr_sectors=%u\n", 2520 + blk_rq_sectors(current_req)); 2522 2521 printk("current_req->current_nr_sectors=%u\n", 2523 - current_req->current_nr_sectors); 2522 + blk_rq_cur_sectors(current_req)); 2524 2523 printk("max_sector=%d\n", max_sector); 2525 2524 printk("ssize=%d\n", ssize); 2526 2525 } ··· 2529 2530 2530 2531 dma_buffer = floppy_track_buffer + ((fsector_t - buffer_min) << 9); 2531 2532 2532 - size = current_req->current_nr_sectors << 9; 2533 + size = blk_rq_cur_bytes(current_req); 2533 2534 2534 2535 rq_for_each_segment(bv, current_req, iter) { 2535 2536 if (!remaining) ··· 2647 2648 2648 2649 max_sector = _floppy->sect * _floppy->head; 2649 2650 2650 - TRACK = (int)current_req->sector / max_sector; 2651 - fsector_t = (int)current_req->sector % max_sector; 2651 + TRACK = (int)blk_rq_pos(current_req) / max_sector; 2652 + fsector_t = (int)blk_rq_pos(current_req) % max_sector; 2652 2653 if (_floppy->track && TRACK >= _floppy->track) { 2653 - if (current_req->current_nr_sectors & 1) { 2654 + if (blk_rq_cur_sectors(current_req) & 1) { 2654 2655 current_count_sectors = 1; 2655 2656 return 1; 2656 2657 } else ··· 2668 2669 if (fsector_t >= max_sector) { 2669 2670 current_count_sectors = 2670 2671 min_t(int, _floppy->sect - fsector_t, 2671 - current_req->nr_sectors); 2672 + blk_rq_sectors(current_req)); 2672 2673 return 1; 2673 2674 } 2674 2675 SIZECODE = 2; ··· 2719 2720 2720 2721 in_sector_offset = (fsector_t % _floppy->sect) % ssize; 2721 2722 aligned_sector_t = fsector_t - in_sector_offset; 2722 - max_size = current_req->nr_sectors; 2723 + max_size = blk_rq_sectors(current_req); 2723 2724 if ((raw_cmd->track == buffer_track) && 2724 2725 (current_drive == buffer_drive) && 2725 2726 (fsector_t >= buffer_min) && (fsector_t < buffer_max)) { ··· 2728 2729 copy_buffer(1, max_sector, buffer_max); 2729 2730 return 1; 2730 2731 } 2731 - } else if (in_sector_offset || current_req->nr_sectors < ssize) { 2732 + } else if (in_sector_offset || blk_rq_sectors(current_req) < ssize) { 2732 2733 if (CT(COMMAND) == FD_WRITE) { 2733 - if (fsector_t + current_req->nr_sectors > ssize && 2734 - fsector_t + current_req->nr_sectors < ssize + ssize) 2734 + if (fsector_t + blk_rq_sectors(current_req) > ssize && 2735 + fsector_t + blk_rq_sectors(current_req) < ssize + ssize) 2735 2736 max_size = ssize + ssize; 2736 2737 else 2737 2738 max_size = ssize; ··· 2775 2776 (indirect * 2 > direct * 3 && 2776 2777 *errors < DP->max_errors.read_track && ((!probing 2777 2778 || (DP->read_track & (1 << DRS->probed_format)))))) { 2778 - max_size = current_req->nr_sectors; 2779 + max_size = blk_rq_sectors(current_req); 2779 2780 } else { 2780 2781 raw_cmd->kernel_data = current_req->buffer; 2781 2782 raw_cmd->length = current_count_sectors << 9; ··· 2800 2801 fsector_t > buffer_max || 2801 2802 fsector_t < buffer_min || 2802 2803 ((CT(COMMAND) == FD_READ || 2803 - (!in_sector_offset && current_req->nr_sectors >= ssize)) && 2804 + (!in_sector_offset && blk_rq_sectors(current_req) >= ssize)) && 2804 2805 max_sector > 2 * max_buffer_sectors + buffer_min && 2805 2806 max_size + fsector_t > 2 * max_buffer_sectors + buffer_min) 2806 2807 /* not enough space */ ··· 2878 2879 printk("write\n"); 2879 2880 return 0; 2880 2881 } 2881 - } else if (raw_cmd->length > current_req->nr_sectors << 9 || 2882 - current_count_sectors > current_req->nr_sectors) { 2882 + } else if (raw_cmd->length > blk_rq_bytes(current_req) || 2883 + current_count_sectors > blk_rq_sectors(current_req)) { 2883 2884 DPRINT("buffer overrun in direct transfer\n"); 2884 2885 return 0; 2885 2886 } else if (raw_cmd->length < current_count_sectors << 9) { ··· 2912 2913 struct request *req; 2913 2914 2914 2915 spin_lock_irq(floppy_queue->queue_lock); 2915 - req = elv_next_request(floppy_queue); 2916 + req = blk_fetch_request(floppy_queue); 2916 2917 spin_unlock_irq(floppy_queue->queue_lock); 2917 2918 if (!req) { 2918 2919 do_floppy = NULL; ··· 2989 2990 if (usage_count == 0) { 2990 2991 printk("warning: usage count=0, current_req=%p exiting\n", 2991 2992 current_req); 2992 - printk("sect=%ld type=%x flags=%x\n", (long)current_req->sector, 2993 - current_req->cmd_type, current_req->cmd_flags); 2993 + printk("sect=%ld type=%x flags=%x\n", 2994 + (long)blk_rq_pos(current_req), current_req->cmd_type, 2995 + current_req->cmd_flags); 2994 2996 return; 2995 2997 } 2996 2998 if (test_bit(0, &fdc_busy)) { ··· 4148 4148 { 4149 4149 } 4150 4150 4151 + static int floppy_resume(struct platform_device *dev) 4152 + { 4153 + int fdc; 4154 + 4155 + for (fdc = 0; fdc < N_FDC; fdc++) 4156 + if (FDCS->address != -1) 4157 + user_reset_fdc(-1, FD_RESET_ALWAYS, 0); 4158 + 4159 + return 0; 4160 + } 4161 + 4162 + static struct platform_driver floppy_driver = { 4163 + .resume = floppy_resume, 4164 + .driver = { 4165 + .name = "floppy", 4166 + }, 4167 + }; 4168 + 4151 4169 static struct platform_device floppy_device[N_DRIVE]; 4152 4170 4153 4171 static struct kobject *floppy_find(dev_t dev, int *part, void *data) ··· 4214 4196 if (err) 4215 4197 goto out_put_disk; 4216 4198 4199 + err = platform_driver_register(&floppy_driver); 4200 + if (err) 4201 + goto out_unreg_blkdev; 4202 + 4217 4203 floppy_queue = blk_init_queue(do_fd_request, &floppy_lock); 4218 4204 if (!floppy_queue) { 4219 4205 err = -ENOMEM; 4220 - goto out_unreg_blkdev; 4206 + goto out_unreg_driver; 4221 4207 } 4222 4208 blk_queue_max_sectors(floppy_queue, 64); 4223 4209 ··· 4368 4346 out_unreg_region: 4369 4347 blk_unregister_region(MKDEV(FLOPPY_MAJOR, 0), 256); 4370 4348 blk_cleanup_queue(floppy_queue); 4349 + out_unreg_driver: 4350 + platform_driver_unregister(&floppy_driver); 4371 4351 out_unreg_blkdev: 4372 4352 unregister_blkdev(FLOPPY_MAJOR, "fd"); 4373 4353 out_put_disk: ··· 4590 4566 4591 4567 blk_unregister_region(MKDEV(FLOPPY_MAJOR, 0), 256); 4592 4568 unregister_blkdev(FLOPPY_MAJOR, "fd"); 4569 + platform_driver_unregister(&floppy_driver); 4593 4570 4594 4571 for (drive = 0; drive < N_DRIVE; drive++) { 4595 4572 del_timer_sync(&motor_off_timer[drive]);
+56 -52
drivers/block/hd.c
··· 98 98 99 99 static DEFINE_SPINLOCK(hd_lock); 100 100 static struct request_queue *hd_queue; 101 + static struct request *hd_req; 101 102 102 103 #define MAJOR_NR HD_MAJOR 103 - #define QUEUE (hd_queue) 104 - #define CURRENT elv_next_request(hd_queue) 105 104 106 105 #define TIMEOUT_VALUE (6*HZ) 107 106 #define HD_DELAY 0 ··· 194 195 NR_HD = hdind+1; 195 196 } 196 197 198 + static bool hd_end_request(int err, unsigned int bytes) 199 + { 200 + if (__blk_end_request(hd_req, err, bytes)) 201 + return true; 202 + hd_req = NULL; 203 + return false; 204 + } 205 + 206 + static bool hd_end_request_cur(int err) 207 + { 208 + return hd_end_request(err, blk_rq_cur_bytes(hd_req)); 209 + } 210 + 197 211 static void dump_status(const char *msg, unsigned int stat) 198 212 { 199 213 char *name = "hd?"; 200 - if (CURRENT) 201 - name = CURRENT->rq_disk->disk_name; 214 + if (hd_req) 215 + name = hd_req->rq_disk->disk_name; 202 216 203 217 #ifdef VERBOSE_ERRORS 204 218 printk("%s: %s: status=0x%02x { ", name, msg, stat & 0xff); ··· 239 227 if (hd_error & (BBD_ERR|ECC_ERR|ID_ERR|MARK_ERR)) { 240 228 printk(", CHS=%d/%d/%d", (inb(HD_HCYL)<<8) + inb(HD_LCYL), 241 229 inb(HD_CURRENT) & 0xf, inb(HD_SECTOR)); 242 - if (CURRENT) 243 - printk(", sector=%ld", CURRENT->sector); 230 + if (hd_req) 231 + printk(", sector=%ld", blk_rq_pos(hd_req)); 244 232 } 245 233 printk("\n"); 246 234 } ··· 418 406 */ 419 407 static void bad_rw_intr(void) 420 408 { 421 - struct request *req = CURRENT; 409 + struct request *req = hd_req; 410 + 422 411 if (req != NULL) { 423 412 struct hd_i_struct *disk = req->rq_disk->private_data; 424 413 if (++req->errors >= MAX_ERRORS || (hd_error & BBD_ERR)) { 425 - end_request(req, 0); 414 + hd_end_request_cur(-EIO); 426 415 disk->special_op = disk->recalibrate = 1; 427 416 } else if (req->errors % RESET_FREQ == 0) 428 417 reset = 1; ··· 465 452 bad_rw_intr(); 466 453 hd_request(); 467 454 return; 455 + 468 456 ok_to_read: 469 - req = CURRENT; 457 + req = hd_req; 470 458 insw(HD_DATA, req->buffer, 256); 471 - req->sector++; 472 - req->buffer += 512; 473 - req->errors = 0; 474 - i = --req->nr_sectors; 475 - --req->current_nr_sectors; 476 459 #ifdef DEBUG 477 - printk("%s: read: sector %ld, remaining = %ld, buffer=%p\n", 478 - req->rq_disk->disk_name, req->sector, req->nr_sectors, 479 - req->buffer+512); 460 + printk("%s: read: sector %ld, remaining = %u, buffer=%p\n", 461 + req->rq_disk->disk_name, blk_rq_pos(req) + 1, 462 + blk_rq_sectors(req) - 1, req->buffer+512); 480 463 #endif 481 - if (req->current_nr_sectors <= 0) 482 - end_request(req, 1); 483 - if (i > 0) { 464 + if (hd_end_request(0, 512)) { 484 465 SET_HANDLER(&read_intr); 485 466 return; 486 467 } 468 + 487 469 (void) inb_p(HD_STATUS); 488 470 #if (HD_DELAY > 0) 489 471 last_req = read_timer(); 490 472 #endif 491 - if (elv_next_request(QUEUE)) 492 - hd_request(); 493 - return; 473 + hd_request(); 494 474 } 495 475 496 476 static void write_intr(void) 497 477 { 498 - struct request *req = CURRENT; 478 + struct request *req = hd_req; 499 479 int i; 500 480 int retries = 100000; 501 481 ··· 498 492 continue; 499 493 if (!OK_STATUS(i)) 500 494 break; 501 - if ((req->nr_sectors <= 1) || (i & DRQ_STAT)) 495 + if ((blk_rq_sectors(req) <= 1) || (i & DRQ_STAT)) 502 496 goto ok_to_write; 503 497 } while (--retries > 0); 504 498 dump_status("write_intr", i); 505 499 bad_rw_intr(); 506 500 hd_request(); 507 501 return; 502 + 508 503 ok_to_write: 509 - req->sector++; 510 - i = --req->nr_sectors; 511 - --req->current_nr_sectors; 512 - req->buffer += 512; 513 - if (!i || (req->bio && req->current_nr_sectors <= 0)) 514 - end_request(req, 1); 515 - if (i > 0) { 504 + if (hd_end_request(0, 512)) { 516 505 SET_HANDLER(&write_intr); 517 506 outsw(HD_DATA, req->buffer, 256); 518 - } else { 519 - #if (HD_DELAY > 0) 520 - last_req = read_timer(); 521 - #endif 522 - hd_request(); 507 + return; 523 508 } 524 - return; 509 + 510 + #if (HD_DELAY > 0) 511 + last_req = read_timer(); 512 + #endif 513 + hd_request(); 525 514 } 526 515 527 516 static void recal_intr(void) ··· 538 537 539 538 do_hd = NULL; 540 539 541 - if (!CURRENT) 540 + if (!hd_req) 542 541 return; 543 542 544 543 spin_lock_irq(hd_queue->queue_lock); 545 544 reset = 1; 546 - name = CURRENT->rq_disk->disk_name; 545 + name = hd_req->rq_disk->disk_name; 547 546 printk("%s: timeout\n", name); 548 - if (++CURRENT->errors >= MAX_ERRORS) { 547 + if (++hd_req->errors >= MAX_ERRORS) { 549 548 #ifdef DEBUG 550 549 printk("%s: too many errors\n", name); 551 550 #endif 552 - end_request(CURRENT, 0); 551 + hd_end_request_cur(-EIO); 553 552 } 554 553 hd_request(); 555 554 spin_unlock_irq(hd_queue->queue_lock); ··· 564 563 } 565 564 if (disk->head > 16) { 566 565 printk("%s: cannot handle device with more than 16 heads - giving up\n", req->rq_disk->disk_name); 567 - end_request(req, 0); 566 + hd_end_request_cur(-EIO); 568 567 } 569 568 disk->special_op = 0; 570 569 return 1; ··· 591 590 repeat: 592 591 del_timer(&device_timer); 593 592 594 - req = CURRENT; 595 - if (!req) { 596 - do_hd = NULL; 597 - return; 593 + if (!hd_req) { 594 + hd_req = blk_fetch_request(hd_queue); 595 + if (!hd_req) { 596 + do_hd = NULL; 597 + return; 598 + } 598 599 } 600 + req = hd_req; 599 601 600 602 if (reset) { 601 603 reset_hd(); 602 604 return; 603 605 } 604 606 disk = req->rq_disk->private_data; 605 - block = req->sector; 606 - nsect = req->nr_sectors; 607 + block = blk_rq_pos(req); 608 + nsect = blk_rq_sectors(req); 607 609 if (block >= get_capacity(req->rq_disk) || 608 610 ((block+nsect) > get_capacity(req->rq_disk))) { 609 611 printk("%s: bad access: block=%d, count=%d\n", 610 612 req->rq_disk->disk_name, block, nsect); 611 - end_request(req, 0); 613 + hd_end_request_cur(-EIO); 612 614 goto repeat; 613 615 } 614 616 ··· 651 647 break; 652 648 default: 653 649 printk("unknown hd-command\n"); 654 - end_request(req, 0); 650 + hd_end_request_cur(-EIO); 655 651 break; 656 652 } 657 653 } ··· 724 720 blk_queue_max_sectors(hd_queue, 255); 725 721 init_timer(&device_timer); 726 722 device_timer.function = hd_times_out; 727 - blk_queue_hardsect_size(hd_queue, 512); 723 + blk_queue_logical_block_size(hd_queue, 512); 728 724 729 725 if (!NR_HD) { 730 726 /*
+8 -29
drivers/block/loop.c
··· 511 511 */ 512 512 static void loop_add_bio(struct loop_device *lo, struct bio *bio) 513 513 { 514 - if (lo->lo_biotail) { 515 - lo->lo_biotail->bi_next = bio; 516 - lo->lo_biotail = bio; 517 - } else 518 - lo->lo_bio = lo->lo_biotail = bio; 514 + bio_list_add(&lo->lo_bio_list, bio); 519 515 } 520 516 521 517 /* ··· 519 523 */ 520 524 static struct bio *loop_get_bio(struct loop_device *lo) 521 525 { 522 - struct bio *bio; 523 - 524 - if ((bio = lo->lo_bio)) { 525 - if (bio == lo->lo_biotail) 526 - lo->lo_biotail = NULL; 527 - lo->lo_bio = bio->bi_next; 528 - bio->bi_next = NULL; 529 - } 530 - 531 - return bio; 526 + return bio_list_pop(&lo->lo_bio_list); 532 527 } 533 528 534 529 static int loop_make_request(struct request_queue *q, struct bio *old_bio) ··· 596 609 597 610 set_user_nice(current, -20); 598 611 599 - while (!kthread_should_stop() || lo->lo_bio) { 612 + while (!kthread_should_stop() || !bio_list_empty(&lo->lo_bio_list)) { 600 613 601 614 wait_event_interruptible(lo->lo_event, 602 - lo->lo_bio || kthread_should_stop()); 615 + !bio_list_empty(&lo->lo_bio_list) || 616 + kthread_should_stop()); 603 617 604 - if (!lo->lo_bio) 618 + if (bio_list_empty(&lo->lo_bio_list)) 605 619 continue; 606 620 spin_lock_irq(&lo->lo_lock); 607 621 bio = loop_get_bio(lo); ··· 709 721 if (!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode)) 710 722 goto out_putf; 711 723 712 - /* new backing store needs to support loop (eg splice_read) */ 713 - if (!inode->i_fop->splice_read) 714 - goto out_putf; 715 - 716 724 /* size of the new backing store needs to be the same */ 717 725 if (get_loop_size(lo, file) != get_loop_size(lo, old_file)) 718 726 goto out_putf; ··· 784 800 error = -EINVAL; 785 801 if (S_ISREG(inode->i_mode) || S_ISBLK(inode->i_mode)) { 786 802 const struct address_space_operations *aops = mapping->a_ops; 787 - /* 788 - * If we can't read - sorry. If we only can't write - well, 789 - * it's going to be read-only. 790 - */ 791 - if (!file->f_op->splice_read) 792 - goto out_putf; 803 + 793 804 if (aops->write_begin) 794 805 lo_flags |= LO_FLAGS_USE_AOPS; 795 806 if (!(lo_flags & LO_FLAGS_USE_AOPS) && !file->f_op->write) ··· 820 841 lo->old_gfp_mask = mapping_gfp_mask(mapping); 821 842 mapping_set_gfp_mask(mapping, lo->old_gfp_mask & ~(__GFP_IO|__GFP_FS)); 822 843 823 - lo->lo_bio = lo->lo_biotail = NULL; 844 + bio_list_init(&lo->lo_bio_list); 824 845 825 846 /* 826 847 * set queue make_request_fn, and add limits based on lower level
+323 -214
drivers/block/mg_disk.c
··· 17 17 #include <linux/fs.h> 18 18 #include <linux/blkdev.h> 19 19 #include <linux/hdreg.h> 20 - #include <linux/libata.h> 20 + #include <linux/ata.h> 21 21 #include <linux/interrupt.h> 22 22 #include <linux/delay.h> 23 23 #include <linux/platform_device.h> 24 24 #include <linux/gpio.h> 25 - #include <linux/mg_disk.h> 26 25 27 26 #define MG_RES_SEC (CONFIG_MG_DISK_RES << 1) 28 27 28 + /* name for block device */ 29 + #define MG_DISK_NAME "mgd" 30 + /* name for platform device */ 31 + #define MG_DEV_NAME "mg_disk" 32 + 33 + #define MG_DISK_MAJ 0 34 + #define MG_DISK_MAX_PART 16 35 + #define MG_SECTOR_SIZE 512 36 + #define MG_MAX_SECTS 256 37 + 38 + /* Register offsets */ 39 + #define MG_BUFF_OFFSET 0x8000 40 + #define MG_STORAGE_BUFFER_SIZE 0x200 41 + #define MG_REG_OFFSET 0xC000 42 + #define MG_REG_FEATURE (MG_REG_OFFSET + 2) /* write case */ 43 + #define MG_REG_ERROR (MG_REG_OFFSET + 2) /* read case */ 44 + #define MG_REG_SECT_CNT (MG_REG_OFFSET + 4) 45 + #define MG_REG_SECT_NUM (MG_REG_OFFSET + 6) 46 + #define MG_REG_CYL_LOW (MG_REG_OFFSET + 8) 47 + #define MG_REG_CYL_HIGH (MG_REG_OFFSET + 0xA) 48 + #define MG_REG_DRV_HEAD (MG_REG_OFFSET + 0xC) 49 + #define MG_REG_COMMAND (MG_REG_OFFSET + 0xE) /* write case */ 50 + #define MG_REG_STATUS (MG_REG_OFFSET + 0xE) /* read case */ 51 + #define MG_REG_DRV_CTRL (MG_REG_OFFSET + 0x10) 52 + #define MG_REG_BURST_CTRL (MG_REG_OFFSET + 0x12) 53 + 54 + /* handy status */ 55 + #define MG_STAT_READY (ATA_DRDY | ATA_DSC) 56 + #define MG_READY_OK(s) (((s) & (MG_STAT_READY | (ATA_BUSY | ATA_DF | \ 57 + ATA_ERR))) == MG_STAT_READY) 58 + 59 + /* error code for others */ 60 + #define MG_ERR_NONE 0 61 + #define MG_ERR_TIMEOUT 0x100 62 + #define MG_ERR_INIT_STAT 0x101 63 + #define MG_ERR_TRANSLATION 0x102 64 + #define MG_ERR_CTRL_RST 0x103 65 + #define MG_ERR_INV_STAT 0x104 66 + #define MG_ERR_RSTOUT 0x105 67 + 68 + #define MG_MAX_ERRORS 6 /* Max read/write errors */ 69 + 70 + /* command */ 71 + #define MG_CMD_RD 0x20 72 + #define MG_CMD_WR 0x30 73 + #define MG_CMD_SLEEP 0x99 74 + #define MG_CMD_WAKEUP 0xC3 75 + #define MG_CMD_ID 0xEC 76 + #define MG_CMD_WR_CONF 0x3C 77 + #define MG_CMD_RD_CONF 0x40 78 + 79 + /* operation mode */ 80 + #define MG_OP_CASCADE (1 << 0) 81 + #define MG_OP_CASCADE_SYNC_RD (1 << 1) 82 + #define MG_OP_CASCADE_SYNC_WR (1 << 2) 83 + #define MG_OP_INTERLEAVE (1 << 3) 84 + 85 + /* synchronous */ 86 + #define MG_BURST_LAT_4 (3 << 4) 87 + #define MG_BURST_LAT_5 (4 << 4) 88 + #define MG_BURST_LAT_6 (5 << 4) 89 + #define MG_BURST_LAT_7 (6 << 4) 90 + #define MG_BURST_LAT_8 (7 << 4) 91 + #define MG_BURST_LEN_4 (1 << 1) 92 + #define MG_BURST_LEN_8 (2 << 1) 93 + #define MG_BURST_LEN_16 (3 << 1) 94 + #define MG_BURST_LEN_32 (4 << 1) 95 + #define MG_BURST_LEN_CONT (0 << 1) 96 + 97 + /* timeout value (unit: ms) */ 98 + #define MG_TMAX_CONF_TO_CMD 1 99 + #define MG_TMAX_WAIT_RD_DRQ 10 100 + #define MG_TMAX_WAIT_WR_DRQ 500 101 + #define MG_TMAX_RST_TO_BUSY 10 102 + #define MG_TMAX_HDRST_TO_RDY 500 103 + #define MG_TMAX_SWRST_TO_RDY 500 104 + #define MG_TMAX_RSTOUT 3000 105 + 106 + /* device attribution */ 107 + /* use mflash as boot device */ 108 + #define MG_BOOT_DEV (1 << 0) 109 + /* use mflash as storage device */ 110 + #define MG_STORAGE_DEV (1 << 1) 111 + /* same as MG_STORAGE_DEV, but bootloader already done reset sequence */ 112 + #define MG_STORAGE_DEV_SKIP_RST (1 << 2) 113 + 114 + #define MG_DEV_MASK (MG_BOOT_DEV | MG_STORAGE_DEV | MG_STORAGE_DEV_SKIP_RST) 115 + 116 + /* names of GPIO resource */ 117 + #define MG_RST_PIN "mg_rst" 118 + /* except MG_BOOT_DEV, reset-out pin should be assigned */ 119 + #define MG_RSTOUT_PIN "mg_rstout" 120 + 121 + /* private driver data */ 122 + struct mg_drv_data { 123 + /* disk resource */ 124 + u32 use_polling; 125 + 126 + /* device attribution */ 127 + u32 dev_attr; 128 + 129 + /* internally used */ 130 + struct mg_host *host; 131 + }; 132 + 133 + /* main structure for mflash driver */ 134 + struct mg_host { 135 + struct device *dev; 136 + 137 + struct request_queue *breq; 138 + struct request *req; 139 + spinlock_t lock; 140 + struct gendisk *gd; 141 + 142 + struct timer_list timer; 143 + void (*mg_do_intr) (struct mg_host *); 144 + 145 + u16 id[ATA_ID_WORDS]; 146 + 147 + u16 cyls; 148 + u16 heads; 149 + u16 sectors; 150 + u32 n_sectors; 151 + u32 nres_sectors; 152 + 153 + void __iomem *dev_base; 154 + unsigned int irq; 155 + unsigned int rst; 156 + unsigned int rstout; 157 + 158 + u32 major; 159 + u32 error; 160 + }; 161 + 162 + /* 163 + * Debugging macro and defines 164 + */ 165 + #undef DO_MG_DEBUG 166 + #ifdef DO_MG_DEBUG 167 + # define MG_DBG(fmt, args...) \ 168 + printk(KERN_DEBUG "%s:%d "fmt, __func__, __LINE__, ##args) 169 + #else /* CONFIG_MG_DEBUG */ 170 + # define MG_DBG(fmt, args...) do { } while (0) 171 + #endif /* CONFIG_MG_DEBUG */ 172 + 29 173 static void mg_request(struct request_queue *); 174 + 175 + static bool mg_end_request(struct mg_host *host, int err, unsigned int nr_bytes) 176 + { 177 + if (__blk_end_request(host->req, err, nr_bytes)) 178 + return true; 179 + 180 + host->req = NULL; 181 + return false; 182 + } 183 + 184 + static bool mg_end_request_cur(struct mg_host *host, int err) 185 + { 186 + return mg_end_request(host, err, blk_rq_cur_bytes(host->req)); 187 + } 30 188 31 189 static void mg_dump_status(const char *msg, unsigned int stat, 32 190 struct mg_host *host) 33 191 { 34 192 char *name = MG_DISK_NAME; 35 - struct request *req; 36 193 37 - if (host->breq) { 38 - req = elv_next_request(host->breq); 39 - if (req) 40 - name = req->rq_disk->disk_name; 41 - } 194 + if (host->req) 195 + name = host->req->rq_disk->disk_name; 42 196 43 197 printk(KERN_ERR "%s: %s: status=0x%02x { ", name, msg, stat & 0xff); 44 - if (stat & MG_REG_STATUS_BIT_BUSY) 198 + if (stat & ATA_BUSY) 45 199 printk("Busy "); 46 - if (stat & MG_REG_STATUS_BIT_READY) 200 + if (stat & ATA_DRDY) 47 201 printk("DriveReady "); 48 - if (stat & MG_REG_STATUS_BIT_WRITE_FAULT) 202 + if (stat & ATA_DF) 49 203 printk("WriteFault "); 50 - if (stat & MG_REG_STATUS_BIT_SEEK_DONE) 204 + if (stat & ATA_DSC) 51 205 printk("SeekComplete "); 52 - if (stat & MG_REG_STATUS_BIT_DATA_REQ) 206 + if (stat & ATA_DRQ) 53 207 printk("DataRequest "); 54 - if (stat & MG_REG_STATUS_BIT_CORRECTED_ERROR) 208 + if (stat & ATA_CORR) 55 209 printk("CorrectedError "); 56 - if (stat & MG_REG_STATUS_BIT_ERROR) 210 + if (stat & ATA_ERR) 57 211 printk("Error "); 58 212 printk("}\n"); 59 - if ((stat & MG_REG_STATUS_BIT_ERROR) == 0) { 213 + if ((stat & ATA_ERR) == 0) { 60 214 host->error = 0; 61 215 } else { 62 216 host->error = inb((unsigned long)host->dev_base + MG_REG_ERROR); 63 217 printk(KERN_ERR "%s: %s: error=0x%02x { ", name, msg, 64 218 host->error & 0xff); 65 - if (host->error & MG_REG_ERR_BBK) 219 + if (host->error & ATA_BBK) 66 220 printk("BadSector "); 67 - if (host->error & MG_REG_ERR_UNC) 221 + if (host->error & ATA_UNC) 68 222 printk("UncorrectableError "); 69 - if (host->error & MG_REG_ERR_IDNF) 223 + if (host->error & ATA_IDNF) 70 224 printk("SectorIdNotFound "); 71 - if (host->error & MG_REG_ERR_ABRT) 225 + if (host->error & ATA_ABORTED) 72 226 printk("DriveStatusError "); 73 - if (host->error & MG_REG_ERR_AMNF) 227 + if (host->error & ATA_AMNF) 74 228 printk("AddrMarkNotFound "); 75 229 printk("}"); 76 - if (host->error & 77 - (MG_REG_ERR_BBK | MG_REG_ERR_UNC | 78 - MG_REG_ERR_IDNF | MG_REG_ERR_AMNF)) { 79 - if (host->breq) { 80 - req = elv_next_request(host->breq); 81 - if (req) 82 - printk(", sector=%u", (u32)req->sector); 83 - } 84 - 230 + if (host->error & (ATA_BBK | ATA_UNC | ATA_IDNF | ATA_AMNF)) { 231 + if (host->req) 232 + printk(", sector=%u", 233 + (unsigned int)blk_rq_pos(host->req)); 85 234 } 86 235 printk("\n"); 87 236 } ··· 249 100 250 101 do { 251 102 cur_jiffies = jiffies; 252 - if (status & MG_REG_STATUS_BIT_BUSY) { 253 - if (expect == MG_REG_STATUS_BIT_BUSY) 103 + if (status & ATA_BUSY) { 104 + if (expect == ATA_BUSY) 254 105 break; 255 106 } else { 256 107 /* Check the error condition! */ 257 - if (status & MG_REG_STATUS_BIT_ERROR) { 108 + if (status & ATA_ERR) { 258 109 mg_dump_status("mg_wait", status, host); 259 110 break; 260 111 } ··· 263 114 if (MG_READY_OK(status)) 264 115 break; 265 116 266 - if (expect == MG_REG_STATUS_BIT_DATA_REQ) 267 - if (status & MG_REG_STATUS_BIT_DATA_REQ) 117 + if (expect == ATA_DRQ) 118 + if (status & ATA_DRQ) 268 119 break; 269 120 } 270 121 if (!msec) { ··· 322 173 return IRQ_HANDLED; 323 174 } 324 175 176 + /* local copy of ata_id_string() */ 177 + static void mg_id_string(const u16 *id, unsigned char *s, 178 + unsigned int ofs, unsigned int len) 179 + { 180 + unsigned int c; 181 + 182 + BUG_ON(len & 1); 183 + 184 + while (len > 0) { 185 + c = id[ofs] >> 8; 186 + *s = c; 187 + s++; 188 + 189 + c = id[ofs] & 0xff; 190 + *s = c; 191 + s++; 192 + 193 + ofs++; 194 + len -= 2; 195 + } 196 + } 197 + 198 + /* local copy of ata_id_c_string() */ 199 + static void mg_id_c_string(const u16 *id, unsigned char *s, 200 + unsigned int ofs, unsigned int len) 201 + { 202 + unsigned char *p; 203 + 204 + mg_id_string(id, s, ofs, len - 1); 205 + 206 + p = s + strnlen(s, len - 1); 207 + while (p > s && p[-1] == ' ') 208 + p--; 209 + *p = '\0'; 210 + } 211 + 325 212 static int mg_get_disk_id(struct mg_host *host) 326 213 { 327 214 u32 i; ··· 369 184 char serial[ATA_ID_SERNO_LEN + 1]; 370 185 371 186 if (!prv_data->use_polling) 372 - outb(MG_REG_CTRL_INTR_DISABLE, 373 - (unsigned long)host->dev_base + 374 - MG_REG_DRV_CTRL); 187 + outb(ATA_NIEN, (unsigned long)host->dev_base + MG_REG_DRV_CTRL); 375 188 376 189 outb(MG_CMD_ID, (unsigned long)host->dev_base + MG_REG_COMMAND); 377 - err = mg_wait(host, MG_REG_STATUS_BIT_DATA_REQ, MG_TMAX_WAIT_RD_DRQ); 190 + err = mg_wait(host, ATA_DRQ, MG_TMAX_WAIT_RD_DRQ); 378 191 if (err) 379 192 return err; 380 193 ··· 402 219 host->n_sectors -= host->nres_sectors; 403 220 } 404 221 405 - ata_id_c_string(id, fwrev, ATA_ID_FW_REV, sizeof(fwrev)); 406 - ata_id_c_string(id, model, ATA_ID_PROD, sizeof(model)); 407 - ata_id_c_string(id, serial, ATA_ID_SERNO, sizeof(serial)); 222 + mg_id_c_string(id, fwrev, ATA_ID_FW_REV, sizeof(fwrev)); 223 + mg_id_c_string(id, model, ATA_ID_PROD, sizeof(model)); 224 + mg_id_c_string(id, serial, ATA_ID_SERNO, sizeof(serial)); 408 225 printk(KERN_INFO "mg_disk: model: %s\n", model); 409 226 printk(KERN_INFO "mg_disk: firm: %.8s\n", fwrev); 410 227 printk(KERN_INFO "mg_disk: serial: %s\n", serial); ··· 412 229 host->n_sectors, host->nres_sectors); 413 230 414 231 if (!prv_data->use_polling) 415 - outb(MG_REG_CTRL_INTR_ENABLE, (unsigned long)host->dev_base + 416 - MG_REG_DRV_CTRL); 232 + outb(0, (unsigned long)host->dev_base + MG_REG_DRV_CTRL); 417 233 418 234 return err; 419 235 } ··· 426 244 427 245 /* hdd rst low */ 428 246 gpio_set_value(host->rst, 0); 429 - err = mg_wait(host, MG_REG_STATUS_BIT_BUSY, MG_TMAX_RST_TO_BUSY); 247 + err = mg_wait(host, ATA_BUSY, MG_TMAX_RST_TO_BUSY); 430 248 if (err) 431 249 return err; 432 250 ··· 437 255 return err; 438 256 439 257 /* soft reset on */ 440 - outb(MG_REG_CTRL_RESET | 441 - (prv_data->use_polling ? MG_REG_CTRL_INTR_DISABLE : 442 - MG_REG_CTRL_INTR_ENABLE), 258 + outb(ATA_SRST | (prv_data->use_polling ? ATA_NIEN : 0), 443 259 (unsigned long)host->dev_base + MG_REG_DRV_CTRL); 444 - err = mg_wait(host, MG_REG_STATUS_BIT_BUSY, MG_TMAX_RST_TO_BUSY); 260 + err = mg_wait(host, ATA_BUSY, MG_TMAX_RST_TO_BUSY); 445 261 if (err) 446 262 return err; 447 263 448 264 /* soft reset off */ 449 - outb(prv_data->use_polling ? MG_REG_CTRL_INTR_DISABLE : 450 - MG_REG_CTRL_INTR_ENABLE, 265 + outb(prv_data->use_polling ? ATA_NIEN : 0, 451 266 (unsigned long)host->dev_base + MG_REG_DRV_CTRL); 452 267 err = mg_wait(host, MG_STAT_READY, MG_TMAX_SWRST_TO_RDY); 453 268 if (err) ··· 460 281 461 282 static void mg_bad_rw_intr(struct mg_host *host) 462 283 { 463 - struct request *req = elv_next_request(host->breq); 464 - if (req != NULL) 465 - if (++req->errors >= MG_MAX_ERRORS || 466 - host->error == MG_ERR_TIMEOUT) 467 - end_request(req, 0); 284 + if (host->req) 285 + if (++host->req->errors >= MG_MAX_ERRORS || 286 + host->error == MG_ERR_TIMEOUT) 287 + mg_end_request_cur(host, -EIO); 468 288 } 469 289 470 290 static unsigned int mg_out(struct mg_host *host, ··· 489 311 MG_REG_CYL_LOW); 490 312 outb((u8)(sect_num >> 16), (unsigned long)host->dev_base + 491 313 MG_REG_CYL_HIGH); 492 - outb((u8)((sect_num >> 24) | MG_REG_HEAD_LBA_MODE), 314 + outb((u8)((sect_num >> 24) | ATA_LBA | ATA_DEVICE_OBS), 493 315 (unsigned long)host->dev_base + MG_REG_DRV_HEAD); 494 316 outb(cmd, (unsigned long)host->dev_base + MG_REG_COMMAND); 495 317 return MG_ERR_NONE; ··· 497 319 498 320 static void mg_read(struct request *req) 499 321 { 500 - u32 remains, j; 322 + u32 j; 501 323 struct mg_host *host = req->rq_disk->private_data; 502 324 503 - remains = req->nr_sectors; 504 - 505 - if (mg_out(host, req->sector, req->nr_sectors, MG_CMD_RD, NULL) != 506 - MG_ERR_NONE) 325 + if (mg_out(host, blk_rq_pos(req), blk_rq_sectors(req), 326 + MG_CMD_RD, NULL) != MG_ERR_NONE) 507 327 mg_bad_rw_intr(host); 508 328 509 329 MG_DBG("requested %d sects (from %ld), buffer=0x%p\n", 510 - remains, req->sector, req->buffer); 330 + blk_rq_sectors(req), blk_rq_pos(req), req->buffer); 511 331 512 - while (remains) { 513 - if (mg_wait(host, MG_REG_STATUS_BIT_DATA_REQ, 514 - MG_TMAX_WAIT_RD_DRQ) != MG_ERR_NONE) { 332 + do { 333 + u16 *buff = (u16 *)req->buffer; 334 + 335 + if (mg_wait(host, ATA_DRQ, 336 + MG_TMAX_WAIT_RD_DRQ) != MG_ERR_NONE) { 515 337 mg_bad_rw_intr(host); 516 338 return; 517 339 } 518 - for (j = 0; j < MG_SECTOR_SIZE >> 1; j++) { 519 - *(u16 *)req->buffer = 520 - inw((unsigned long)host->dev_base + 521 - MG_BUFF_OFFSET + (j << 1)); 522 - req->buffer += 2; 523 - } 524 - 525 - req->sector++; 526 - req->errors = 0; 527 - remains = --req->nr_sectors; 528 - --req->current_nr_sectors; 529 - 530 - if (req->current_nr_sectors <= 0) { 531 - MG_DBG("remain : %d sects\n", remains); 532 - end_request(req, 1); 533 - if (remains > 0) 534 - req = elv_next_request(host->breq); 535 - } 340 + for (j = 0; j < MG_SECTOR_SIZE >> 1; j++) 341 + *buff++ = inw((unsigned long)host->dev_base + 342 + MG_BUFF_OFFSET + (j << 1)); 536 343 537 344 outb(MG_CMD_RD_CONF, (unsigned long)host->dev_base + 538 345 MG_REG_COMMAND); 539 - } 346 + } while (mg_end_request(host, 0, MG_SECTOR_SIZE)); 540 347 } 541 348 542 349 static void mg_write(struct request *req) 543 350 { 544 - u32 remains, j; 351 + u32 j; 545 352 struct mg_host *host = req->rq_disk->private_data; 546 353 547 - remains = req->nr_sectors; 548 - 549 - if (mg_out(host, req->sector, req->nr_sectors, MG_CMD_WR, NULL) != 550 - MG_ERR_NONE) { 354 + if (mg_out(host, blk_rq_pos(req), blk_rq_sectors(req), 355 + MG_CMD_WR, NULL) != MG_ERR_NONE) { 551 356 mg_bad_rw_intr(host); 552 357 return; 553 358 } 554 359 555 - 556 360 MG_DBG("requested %d sects (from %ld), buffer=0x%p\n", 557 - remains, req->sector, req->buffer); 558 - while (remains) { 559 - if (mg_wait(host, MG_REG_STATUS_BIT_DATA_REQ, 560 - MG_TMAX_WAIT_WR_DRQ) != MG_ERR_NONE) { 361 + blk_rq_sectors(req), blk_rq_pos(req), req->buffer); 362 + 363 + do { 364 + u16 *buff = (u16 *)req->buffer; 365 + 366 + if (mg_wait(host, ATA_DRQ, MG_TMAX_WAIT_WR_DRQ) != MG_ERR_NONE) { 561 367 mg_bad_rw_intr(host); 562 368 return; 563 369 } 564 - for (j = 0; j < MG_SECTOR_SIZE >> 1; j++) { 565 - outw(*(u16 *)req->buffer, 566 - (unsigned long)host->dev_base + 567 - MG_BUFF_OFFSET + (j << 1)); 568 - req->buffer += 2; 569 - } 570 - req->sector++; 571 - remains = --req->nr_sectors; 572 - --req->current_nr_sectors; 573 - 574 - if (req->current_nr_sectors <= 0) { 575 - MG_DBG("remain : %d sects\n", remains); 576 - end_request(req, 1); 577 - if (remains > 0) 578 - req = elv_next_request(host->breq); 579 - } 370 + for (j = 0; j < MG_SECTOR_SIZE >> 1; j++) 371 + outw(*buff++, (unsigned long)host->dev_base + 372 + MG_BUFF_OFFSET + (j << 1)); 580 373 581 374 outb(MG_CMD_WR_CONF, (unsigned long)host->dev_base + 582 375 MG_REG_COMMAND); 583 - } 376 + } while (mg_end_request(host, 0, MG_SECTOR_SIZE)); 584 377 } 585 378 586 379 static void mg_read_intr(struct mg_host *host) 587 380 { 381 + struct request *req = host->req; 588 382 u32 i; 589 - struct request *req; 383 + u16 *buff; 590 384 591 385 /* check status */ 592 386 do { 593 387 i = inb((unsigned long)host->dev_base + MG_REG_STATUS); 594 - if (i & MG_REG_STATUS_BIT_BUSY) 388 + if (i & ATA_BUSY) 595 389 break; 596 390 if (!MG_READY_OK(i)) 597 391 break; 598 - if (i & MG_REG_STATUS_BIT_DATA_REQ) 392 + if (i & ATA_DRQ) 599 393 goto ok_to_read; 600 394 } while (0); 601 395 mg_dump_status("mg_read_intr", i, host); ··· 577 427 578 428 ok_to_read: 579 429 /* get current segment of request */ 580 - req = elv_next_request(host->breq); 430 + buff = (u16 *)req->buffer; 581 431 582 432 /* read 1 sector */ 583 - for (i = 0; i < MG_SECTOR_SIZE >> 1; i++) { 584 - *(u16 *)req->buffer = 585 - inw((unsigned long)host->dev_base + MG_BUFF_OFFSET + 586 - (i << 1)); 587 - req->buffer += 2; 588 - } 433 + for (i = 0; i < MG_SECTOR_SIZE >> 1; i++) 434 + *buff++ = inw((unsigned long)host->dev_base + MG_BUFF_OFFSET + 435 + (i << 1)); 589 436 590 - /* manipulate request */ 591 437 MG_DBG("sector %ld, remaining=%ld, buffer=0x%p\n", 592 - req->sector, req->nr_sectors - 1, req->buffer); 593 - 594 - req->sector++; 595 - req->errors = 0; 596 - i = --req->nr_sectors; 597 - --req->current_nr_sectors; 598 - 599 - /* let know if current segment done */ 600 - if (req->current_nr_sectors <= 0) 601 - end_request(req, 1); 602 - 603 - /* set handler if read remains */ 604 - if (i > 0) { 605 - host->mg_do_intr = mg_read_intr; 606 - mod_timer(&host->timer, jiffies + 3 * HZ); 607 - } 438 + blk_rq_pos(req), blk_rq_sectors(req) - 1, req->buffer); 608 439 609 440 /* send read confirm */ 610 441 outb(MG_CMD_RD_CONF, (unsigned long)host->dev_base + MG_REG_COMMAND); 611 442 612 - /* goto next request */ 613 - if (!i) 443 + if (mg_end_request(host, 0, MG_SECTOR_SIZE)) { 444 + /* set handler if read remains */ 445 + host->mg_do_intr = mg_read_intr; 446 + mod_timer(&host->timer, jiffies + 3 * HZ); 447 + } else /* goto next request */ 614 448 mg_request(host->breq); 615 449 } 616 450 617 451 static void mg_write_intr(struct mg_host *host) 618 452 { 453 + struct request *req = host->req; 619 454 u32 i, j; 620 455 u16 *buff; 621 - struct request *req; 622 - 623 - /* get current segment of request */ 624 - req = elv_next_request(host->breq); 456 + bool rem; 625 457 626 458 /* check status */ 627 459 do { 628 460 i = inb((unsigned long)host->dev_base + MG_REG_STATUS); 629 - if (i & MG_REG_STATUS_BIT_BUSY) 461 + if (i & ATA_BUSY) 630 462 break; 631 463 if (!MG_READY_OK(i)) 632 464 break; 633 - if ((req->nr_sectors <= 1) || (i & MG_REG_STATUS_BIT_DATA_REQ)) 465 + if ((blk_rq_sectors(req) <= 1) || (i & ATA_DRQ)) 634 466 goto ok_to_write; 635 467 } while (0); 636 468 mg_dump_status("mg_write_intr", i, host); ··· 621 489 return; 622 490 623 491 ok_to_write: 624 - /* manipulate request */ 625 - req->sector++; 626 - i = --req->nr_sectors; 627 - --req->current_nr_sectors; 628 - req->buffer += MG_SECTOR_SIZE; 629 - 630 - /* let know if current segment or all done */ 631 - if (!i || (req->bio && req->current_nr_sectors <= 0)) 632 - end_request(req, 1); 633 - 634 - /* write 1 sector and set handler if remains */ 635 - if (i > 0) { 492 + if ((rem = mg_end_request(host, 0, MG_SECTOR_SIZE))) { 493 + /* write 1 sector and set handler if remains */ 636 494 buff = (u16 *)req->buffer; 637 495 for (j = 0; j < MG_STORAGE_BUFFER_SIZE >> 1; j++) { 638 496 outw(*buff, (unsigned long)host->dev_base + ··· 630 508 buff++; 631 509 } 632 510 MG_DBG("sector %ld, remaining=%ld, buffer=0x%p\n", 633 - req->sector, req->nr_sectors, req->buffer); 511 + blk_rq_pos(req), blk_rq_sectors(req), req->buffer); 634 512 host->mg_do_intr = mg_write_intr; 635 513 mod_timer(&host->timer, jiffies + 3 * HZ); 636 514 } ··· 638 516 /* send write confirm */ 639 517 outb(MG_CMD_WR_CONF, (unsigned long)host->dev_base + MG_REG_COMMAND); 640 518 641 - if (!i) 519 + if (!rem) 642 520 mg_request(host->breq); 643 521 } 644 522 ··· 646 524 { 647 525 struct mg_host *host = (struct mg_host *)data; 648 526 char *name; 649 - struct request *req; 650 527 651 528 spin_lock_irq(&host->lock); 652 529 653 - req = elv_next_request(host->breq); 654 - if (!req) 530 + if (!host->req) 655 531 goto out_unlock; 656 532 657 533 host->mg_do_intr = NULL; 658 534 659 - name = req->rq_disk->disk_name; 535 + name = host->req->rq_disk->disk_name; 660 536 printk(KERN_DEBUG "%s: timeout\n", name); 661 537 662 538 host->error = MG_ERR_TIMEOUT; 663 539 mg_bad_rw_intr(host); 664 540 665 - mg_request(host->breq); 666 541 out_unlock: 542 + mg_request(host->breq); 667 543 spin_unlock_irq(&host->lock); 668 544 } 669 545 670 546 static void mg_request_poll(struct request_queue *q) 671 547 { 672 - struct request *req; 673 - struct mg_host *host; 548 + struct mg_host *host = q->queuedata; 674 549 675 - while ((req = elv_next_request(q)) != NULL) { 676 - host = req->rq_disk->private_data; 677 - if (blk_fs_request(req)) { 678 - switch (rq_data_dir(req)) { 679 - case READ: 680 - mg_read(req); 550 + while (1) { 551 + if (!host->req) { 552 + host->req = blk_fetch_request(q); 553 + if (!host->req) 681 554 break; 682 - case WRITE: 683 - mg_write(req); 684 - break; 685 - default: 686 - printk(KERN_WARNING "%s:%d unknown command\n", 687 - __func__, __LINE__); 688 - end_request(req, 0); 689 - break; 690 - } 691 555 } 556 + 557 + if (unlikely(!blk_fs_request(host->req))) { 558 + mg_end_request_cur(host, -EIO); 559 + continue; 560 + } 561 + 562 + if (rq_data_dir(host->req) == READ) 563 + mg_read(host->req); 564 + else 565 + mg_write(host->req); 692 566 } 693 567 } 694 568 ··· 706 588 break; 707 589 case WRITE: 708 590 /* TODO : handler */ 709 - outb(MG_REG_CTRL_INTR_DISABLE, 710 - (unsigned long)host->dev_base + 711 - MG_REG_DRV_CTRL); 591 + outb(ATA_NIEN, (unsigned long)host->dev_base + MG_REG_DRV_CTRL); 712 592 if (mg_out(host, sect_num, sect_cnt, MG_CMD_WR, &mg_write_intr) 713 593 != MG_ERR_NONE) { 714 594 mg_bad_rw_intr(host); 715 595 return host->error; 716 596 } 717 597 del_timer(&host->timer); 718 - mg_wait(host, MG_REG_STATUS_BIT_DATA_REQ, MG_TMAX_WAIT_WR_DRQ); 719 - outb(MG_REG_CTRL_INTR_ENABLE, (unsigned long)host->dev_base + 720 - MG_REG_DRV_CTRL); 598 + mg_wait(host, ATA_DRQ, MG_TMAX_WAIT_WR_DRQ); 599 + outb(0, (unsigned long)host->dev_base + MG_REG_DRV_CTRL); 721 600 if (host->error) { 722 601 mg_bad_rw_intr(host); 723 602 return host->error; ··· 729 614 outb(MG_CMD_WR_CONF, (unsigned long)host->dev_base + 730 615 MG_REG_COMMAND); 731 616 break; 732 - default: 733 - printk(KERN_WARNING "%s:%d unknown command\n", 734 - __func__, __LINE__); 735 - end_request(req, 0); 736 - break; 737 617 } 738 618 return MG_ERR_NONE; 739 619 } ··· 736 626 /* This function also called from IRQ context */ 737 627 static void mg_request(struct request_queue *q) 738 628 { 629 + struct mg_host *host = q->queuedata; 739 630 struct request *req; 740 - struct mg_host *host; 741 631 u32 sect_num, sect_cnt; 742 632 743 633 while (1) { 744 - req = elv_next_request(q); 745 - if (!req) 746 - return; 747 - 748 - host = req->rq_disk->private_data; 634 + if (!host->req) { 635 + host->req = blk_fetch_request(q); 636 + if (!host->req) 637 + break; 638 + } 639 + req = host->req; 749 640 750 641 /* check unwanted request call */ 751 642 if (host->mg_do_intr) ··· 754 643 755 644 del_timer(&host->timer); 756 645 757 - sect_num = req->sector; 646 + sect_num = blk_rq_pos(req); 758 647 /* deal whole segments */ 759 - sect_cnt = req->nr_sectors; 648 + sect_cnt = blk_rq_sectors(req); 760 649 761 650 /* sanity check */ 762 651 if (sect_num >= get_capacity(req->rq_disk) || ··· 766 655 "%s: bad access: sector=%d, count=%d\n", 767 656 req->rq_disk->disk_name, 768 657 sect_num, sect_cnt); 769 - end_request(req, 0); 658 + mg_end_request_cur(host, -EIO); 770 659 continue; 771 660 } 772 661 773 - if (!blk_fs_request(req)) 774 - return; 662 + if (unlikely(!blk_fs_request(req))) { 663 + mg_end_request_cur(host, -EIO); 664 + continue; 665 + } 775 666 776 667 if (!mg_issue_req(req, host, sect_num, sect_cnt)) 777 668 return; ··· 803 690 return -EIO; 804 691 805 692 if (!prv_data->use_polling) 806 - outb(MG_REG_CTRL_INTR_DISABLE, 807 - (unsigned long)host->dev_base + 808 - MG_REG_DRV_CTRL); 693 + outb(ATA_NIEN, (unsigned long)host->dev_base + MG_REG_DRV_CTRL); 809 694 810 695 outb(MG_CMD_SLEEP, (unsigned long)host->dev_base + MG_REG_COMMAND); 811 696 /* wait until mflash deep sleep */ ··· 811 700 812 701 if (mg_wait(host, MG_STAT_READY, MG_TMAX_CONF_TO_CMD)) { 813 702 if (!prv_data->use_polling) 814 - outb(MG_REG_CTRL_INTR_ENABLE, 815 - (unsigned long)host->dev_base + 816 - MG_REG_DRV_CTRL); 703 + outb(0, (unsigned long)host->dev_base + MG_REG_DRV_CTRL); 817 704 return -EIO; 818 705 } 819 706 ··· 834 725 return -EIO; 835 726 836 727 if (!prv_data->use_polling) 837 - outb(MG_REG_CTRL_INTR_ENABLE, (unsigned long)host->dev_base + 838 - MG_REG_DRV_CTRL); 728 + outb(0, (unsigned long)host->dev_base + MG_REG_DRV_CTRL); 839 729 840 730 return 0; 841 731 } ··· 985 877 __func__, __LINE__); 986 878 goto probe_err_5; 987 879 } 880 + host->breq->queuedata = host; 988 881 989 882 /* mflash is random device, thanx for the noop */ 990 883 elevator_exit(host->breq->elevator); ··· 996 887 goto probe_err_6; 997 888 } 998 889 blk_queue_max_sectors(host->breq, MG_MAX_SECTS); 999 - blk_queue_hardsect_size(host->breq, MG_SECTOR_SIZE); 890 + blk_queue_logical_block_size(host->breq, MG_SECTOR_SIZE); 1000 891 1001 892 init_timer(&host->timer); 1002 893 host->timer.function = mg_times_out;
+7 -16
drivers/block/nbd.c
··· 110 110 req, error ? "failed" : "done"); 111 111 112 112 spin_lock_irqsave(q->queue_lock, flags); 113 - __blk_end_request(req, error, req->nr_sectors << 9); 113 + __blk_end_request_all(req, error); 114 114 spin_unlock_irqrestore(q->queue_lock, flags); 115 115 } 116 116 ··· 231 231 { 232 232 int result, flags; 233 233 struct nbd_request request; 234 - unsigned long size = req->nr_sectors << 9; 234 + unsigned long size = blk_rq_bytes(req); 235 235 236 236 request.magic = htonl(NBD_REQUEST_MAGIC); 237 237 request.type = htonl(nbd_cmd(req)); 238 - request.from = cpu_to_be64((u64) req->sector << 9); 238 + request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9); 239 239 request.len = htonl(size); 240 240 memcpy(request.handle, &req, sizeof(req)); 241 241 242 - dprintk(DBG_TX, "%s: request %p: sending control (%s@%llu,%luB)\n", 242 + dprintk(DBG_TX, "%s: request %p: sending control (%s@%llu,%uB)\n", 243 243 lo->disk->disk_name, req, 244 244 nbdcmd_to_ascii(nbd_cmd(req)), 245 - (unsigned long long)req->sector << 9, 246 - req->nr_sectors << 9); 245 + (unsigned long long)blk_rq_pos(req) << 9, 246 + blk_rq_bytes(req)); 247 247 result = sock_xmit(lo, 1, &request, sizeof(request), 248 248 (nbd_cmd(req) == NBD_CMD_WRITE) ? MSG_MORE : 0); 249 249 if (result <= 0) { ··· 533 533 { 534 534 struct request *req; 535 535 536 - while ((req = elv_next_request(q)) != NULL) { 536 + while ((req = blk_fetch_request(q)) != NULL) { 537 537 struct nbd_device *lo; 538 - 539 - blkdev_dequeue_request(req); 540 538 541 539 spin_unlock_irq(q->queue_lock); 542 540 ··· 578 580 blk_rq_init(NULL, &sreq); 579 581 sreq.cmd_type = REQ_TYPE_SPECIAL; 580 582 nbd_cmd(&sreq) = NBD_CMD_DISC; 581 - /* 582 - * Set these to sane values in case server implementation 583 - * fails to check the request type first and also to keep 584 - * debugging output cleaner. 585 - */ 586 - sreq.sector = 0; 587 - sreq.nr_sectors = 0; 588 583 if (!lo->sock) 589 584 return -EINVAL; 590 585 nbd_send_req(lo, &sreq);
+17 -12
drivers/block/paride/pcd.c
··· 719 719 if (pcd_busy) 720 720 return; 721 721 while (1) { 722 - pcd_req = elv_next_request(q); 723 - if (!pcd_req) 724 - return; 722 + if (!pcd_req) { 723 + pcd_req = blk_fetch_request(q); 724 + if (!pcd_req) 725 + return; 726 + } 725 727 726 728 if (rq_data_dir(pcd_req) == READ) { 727 729 struct pcd_unit *cd = pcd_req->rq_disk->private_data; 728 730 if (cd != pcd_current) 729 731 pcd_bufblk = -1; 730 732 pcd_current = cd; 731 - pcd_sector = pcd_req->sector; 732 - pcd_count = pcd_req->current_nr_sectors; 733 + pcd_sector = blk_rq_pos(pcd_req); 734 + pcd_count = blk_rq_cur_sectors(pcd_req); 733 735 pcd_buf = pcd_req->buffer; 734 736 pcd_busy = 1; 735 737 ps_set_intr(do_pcd_read, NULL, 0, nice); 736 738 return; 737 - } else 738 - end_request(pcd_req, 0); 739 + } else { 740 + __blk_end_request_all(pcd_req, -EIO); 741 + pcd_req = NULL; 742 + } 739 743 } 740 744 } 741 745 742 - static inline void next_request(int success) 746 + static inline void next_request(int err) 743 747 { 744 748 unsigned long saved_flags; 745 749 746 750 spin_lock_irqsave(&pcd_lock, saved_flags); 747 - end_request(pcd_req, success); 751 + if (!__blk_end_request_cur(pcd_req, err)) 752 + pcd_req = NULL; 748 753 pcd_busy = 0; 749 754 do_pcd_request(pcd_queue); 750 755 spin_unlock_irqrestore(&pcd_lock, saved_flags); ··· 786 781 787 782 if (pcd_command(pcd_current, rd_cmd, 2048, "read block")) { 788 783 pcd_bufblk = -1; 789 - next_request(0); 784 + next_request(-EIO); 790 785 return; 791 786 } 792 787 ··· 801 796 pcd_retries = 0; 802 797 pcd_transfer(); 803 798 if (!pcd_count) { 804 - next_request(1); 799 + next_request(0); 805 800 return; 806 801 } 807 802 ··· 820 815 return; 821 816 } 822 817 pcd_bufblk = -1; 823 - next_request(0); 818 + next_request(-EIO); 824 819 return; 825 820 } 826 821
+12 -10
drivers/block/paride/pd.c
··· 410 410 pd_claimed = 0; 411 411 phase = NULL; 412 412 spin_lock_irqsave(&pd_lock, saved_flags); 413 - end_request(pd_req, res); 414 - pd_req = elv_next_request(pd_queue); 415 - if (!pd_req) 416 - stop = 1; 413 + if (!__blk_end_request_cur(pd_req, 414 + res == Ok ? 0 : -EIO)) { 415 + pd_req = blk_fetch_request(pd_queue); 416 + if (!pd_req) 417 + stop = 1; 418 + } 417 419 spin_unlock_irqrestore(&pd_lock, saved_flags); 418 420 if (stop) 419 421 return; ··· 445 443 446 444 pd_cmd = rq_data_dir(pd_req); 447 445 if (pd_cmd == READ || pd_cmd == WRITE) { 448 - pd_block = pd_req->sector; 449 - pd_count = pd_req->current_nr_sectors; 446 + pd_block = blk_rq_pos(pd_req); 447 + pd_count = blk_rq_cur_sectors(pd_req); 450 448 if (pd_block + pd_count > get_capacity(pd_req->rq_disk)) 451 449 return Fail; 452 - pd_run = pd_req->nr_sectors; 450 + pd_run = blk_rq_sectors(pd_req); 453 451 pd_buf = pd_req->buffer; 454 452 pd_retries = 0; 455 453 if (pd_cmd == READ) ··· 479 477 if (pd_count) 480 478 return 0; 481 479 spin_lock_irqsave(&pd_lock, saved_flags); 482 - end_request(pd_req, 1); 483 - pd_count = pd_req->current_nr_sectors; 480 + __blk_end_request_cur(pd_req, 0); 481 + pd_count = blk_rq_cur_sectors(pd_req); 484 482 pd_buf = pd_req->buffer; 485 483 spin_unlock_irqrestore(&pd_lock, saved_flags); 486 484 return 0; ··· 704 702 { 705 703 if (pd_req) 706 704 return; 707 - pd_req = elv_next_request(q); 705 + pd_req = blk_fetch_request(q); 708 706 if (!pd_req) 709 707 return; 710 708
+23 -24
drivers/block/paride/pf.c
··· 750 750 751 751 static struct request_queue *pf_queue; 752 752 753 - static void pf_end_request(int uptodate) 753 + static void pf_end_request(int err) 754 754 { 755 - if (pf_req) { 756 - end_request(pf_req, uptodate); 755 + if (pf_req && !__blk_end_request_cur(pf_req, err)) 757 756 pf_req = NULL; 758 - } 759 757 } 760 758 761 759 static void do_pf_request(struct request_queue * q) ··· 761 763 if (pf_busy) 762 764 return; 763 765 repeat: 764 - pf_req = elv_next_request(q); 765 - if (!pf_req) 766 - return; 766 + if (!pf_req) { 767 + pf_req = blk_fetch_request(q); 768 + if (!pf_req) 769 + return; 770 + } 767 771 768 772 pf_current = pf_req->rq_disk->private_data; 769 - pf_block = pf_req->sector; 770 - pf_run = pf_req->nr_sectors; 771 - pf_count = pf_req->current_nr_sectors; 773 + pf_block = blk_rq_pos(pf_req); 774 + pf_run = blk_rq_sectors(pf_req); 775 + pf_count = blk_rq_cur_sectors(pf_req); 772 776 773 777 if (pf_block + pf_count > get_capacity(pf_req->rq_disk)) { 774 - pf_end_request(0); 778 + pf_end_request(-EIO); 775 779 goto repeat; 776 780 } 777 781 ··· 788 788 pi_do_claimed(pf_current->pi, do_pf_write); 789 789 else { 790 790 pf_busy = 0; 791 - pf_end_request(0); 791 + pf_end_request(-EIO); 792 792 goto repeat; 793 793 } 794 794 } ··· 805 805 return 1; 806 806 if (!pf_count) { 807 807 spin_lock_irqsave(&pf_spin_lock, saved_flags); 808 - pf_end_request(1); 809 - pf_req = elv_next_request(pf_queue); 808 + pf_end_request(0); 810 809 spin_unlock_irqrestore(&pf_spin_lock, saved_flags); 811 810 if (!pf_req) 812 811 return 1; 813 - pf_count = pf_req->current_nr_sectors; 812 + pf_count = blk_rq_cur_sectors(pf_req); 814 813 pf_buf = pf_req->buffer; 815 814 } 816 815 return 0; 817 816 } 818 817 819 - static inline void next_request(int success) 818 + static inline void next_request(int err) 820 819 { 821 820 unsigned long saved_flags; 822 821 823 822 spin_lock_irqsave(&pf_spin_lock, saved_flags); 824 - pf_end_request(success); 823 + pf_end_request(err); 825 824 pf_busy = 0; 826 825 do_pf_request(pf_queue); 827 826 spin_unlock_irqrestore(&pf_spin_lock, saved_flags); ··· 843 844 pi_do_claimed(pf_current->pi, do_pf_read_start); 844 845 return; 845 846 } 846 - next_request(0); 847 + next_request(-EIO); 847 848 return; 848 849 } 849 850 pf_mask = STAT_DRQ; ··· 862 863 pi_do_claimed(pf_current->pi, do_pf_read_start); 863 864 return; 864 865 } 865 - next_request(0); 866 + next_request(-EIO); 866 867 return; 867 868 } 868 869 pi_read_block(pf_current->pi, pf_buf, 512); ··· 870 871 break; 871 872 } 872 873 pi_disconnect(pf_current->pi); 873 - next_request(1); 874 + next_request(0); 874 875 } 875 876 876 877 static void do_pf_write(void) ··· 889 890 pi_do_claimed(pf_current->pi, do_pf_write_start); 890 891 return; 891 892 } 892 - next_request(0); 893 + next_request(-EIO); 893 894 return; 894 895 } 895 896 ··· 902 903 pi_do_claimed(pf_current->pi, do_pf_write_start); 903 904 return; 904 905 } 905 - next_request(0); 906 + next_request(-EIO); 906 907 return; 907 908 } 908 909 pi_write_block(pf_current->pi, pf_buf, 512); ··· 922 923 pi_do_claimed(pf_current->pi, do_pf_write_start); 923 924 return; 924 925 } 925 - next_request(0); 926 + next_request(-EIO); 926 927 return; 927 928 } 928 929 pi_disconnect(pf_current->pi); 929 - next_request(1); 930 + next_request(0); 930 931 } 931 932 932 933 static int __init pf_init(void)
+5 -3
drivers/block/pktcdvd.c
··· 991 991 */ 992 992 static int pkt_set_segment_merging(struct pktcdvd_device *pd, struct request_queue *q) 993 993 { 994 - if ((pd->settings.size << 9) / CD_FRAMESIZE <= q->max_phys_segments) { 994 + if ((pd->settings.size << 9) / CD_FRAMESIZE 995 + <= queue_max_phys_segments(q)) { 995 996 /* 996 997 * The cdrom device can handle one segment/frame 997 998 */ 998 999 clear_bit(PACKET_MERGE_SEGS, &pd->flags); 999 1000 return 0; 1000 - } else if ((pd->settings.size << 9) / PAGE_SIZE <= q->max_phys_segments) { 1001 + } else if ((pd->settings.size << 9) / PAGE_SIZE 1002 + <= queue_max_phys_segments(q)) { 1001 1003 /* 1002 1004 * We can handle this case at the expense of some extra memory 1003 1005 * copies during write operations ··· 2659 2657 struct request_queue *q = pd->disk->queue; 2660 2658 2661 2659 blk_queue_make_request(q, pkt_make_request); 2662 - blk_queue_hardsect_size(q, CD_FRAMESIZE); 2660 + blk_queue_logical_block_size(q, CD_FRAMESIZE); 2663 2661 blk_queue_max_sectors(q, PACKET_MAX_SECTORS); 2664 2662 blk_queue_merge_bvec(q, pkt_merge_bvec); 2665 2663 q->queuedata = pd;
+10 -14
drivers/block/ps3disk.c
··· 134 134 rq_for_each_segment(bv, req, iter) 135 135 n++; 136 136 dev_dbg(&dev->sbd.core, 137 - "%s:%u: %s req has %u bvecs for %lu sectors %lu hard sectors\n", 138 - __func__, __LINE__, op, n, req->nr_sectors, 139 - req->hard_nr_sectors); 137 + "%s:%u: %s req has %u bvecs for %u sectors\n", 138 + __func__, __LINE__, op, n, blk_rq_sectors(req)); 140 139 #endif 141 140 142 - start_sector = req->sector * priv->blocking_factor; 143 - sectors = req->nr_sectors * priv->blocking_factor; 141 + start_sector = blk_rq_pos(req) * priv->blocking_factor; 142 + sectors = blk_rq_sectors(req) * priv->blocking_factor; 144 143 dev_dbg(&dev->sbd.core, "%s:%u: %s %llu sectors starting at %llu\n", 145 144 __func__, __LINE__, op, sectors, start_sector); 146 145 ··· 157 158 if (res) { 158 159 dev_err(&dev->sbd.core, "%s:%u: %s failed %d\n", __func__, 159 160 __LINE__, op, res); 160 - end_request(req, 0); 161 + __blk_end_request_all(req, -EIO); 161 162 return 0; 162 163 } 163 164 ··· 179 180 if (res) { 180 181 dev_err(&dev->sbd.core, "%s:%u: sync cache failed 0x%llx\n", 181 182 __func__, __LINE__, res); 182 - end_request(req, 0); 183 + __blk_end_request_all(req, -EIO); 183 184 return 0; 184 185 } 185 186 ··· 194 195 195 196 dev_dbg(&dev->sbd.core, "%s:%u\n", __func__, __LINE__); 196 197 197 - while ((req = elv_next_request(q))) { 198 + while ((req = blk_fetch_request(q))) { 198 199 if (blk_fs_request(req)) { 199 200 if (ps3disk_submit_request_sg(dev, req)) 200 201 break; ··· 204 205 break; 205 206 } else { 206 207 blk_dump_rq_flags(req, DEVICE_NAME " bad request"); 207 - end_request(req, 0); 208 + __blk_end_request_all(req, -EIO); 208 209 continue; 209 210 } 210 211 } ··· 230 231 struct request *req; 231 232 int res, read, error; 232 233 u64 tag, status; 233 - unsigned long num_sectors; 234 234 const char *op; 235 235 236 236 res = lv1_storage_get_async_status(dev->sbd.dev_id, &tag, &status); ··· 259 261 if (req->cmd_type == REQ_TYPE_LINUX_BLOCK && 260 262 req->cmd[0] == REQ_LB_OP_FLUSH) { 261 263 read = 0; 262 - num_sectors = req->hard_cur_sectors; 263 264 op = "flush"; 264 265 } else { 265 266 read = !rq_data_dir(req); 266 - num_sectors = req->nr_sectors; 267 267 op = read ? "read" : "write"; 268 268 } 269 269 if (status) { ··· 277 281 } 278 282 279 283 spin_lock(&priv->lock); 280 - __blk_end_request(req, error, num_sectors << 9); 284 + __blk_end_request_all(req, error); 281 285 priv->req = NULL; 282 286 ps3disk_do_request(dev, priv->queue); 283 287 spin_unlock(&priv->lock); ··· 477 481 blk_queue_max_sectors(queue, dev->bounce_size >> 9); 478 482 blk_queue_segment_boundary(queue, -1UL); 479 483 blk_queue_dma_alignment(queue, dev->blk_size-1); 480 - blk_queue_hardsect_size(queue, dev->blk_size); 484 + blk_queue_logical_block_size(queue, dev->blk_size); 481 485 482 486 blk_queue_ordered(queue, QUEUE_ORDERED_DRAIN_FLUSH, 483 487 ps3disk_prepare_flush);
+4 -10
drivers/block/sunvdc.c
··· 212 212 vdc_finish(&port->vio, -err, WAITING_FOR_GEN_CMD); 213 213 } 214 214 215 - static void vdc_end_request(struct request *req, int error, int num_sectors) 216 - { 217 - __blk_end_request(req, error, num_sectors << 9); 218 - } 219 - 220 215 static void vdc_end_one(struct vdc_port *port, struct vio_dring_state *dr, 221 216 unsigned int index) 222 217 { ··· 234 239 235 240 rqe->req = NULL; 236 241 237 - vdc_end_request(req, (desc->status ? -EIO : 0), desc->size >> 9); 242 + __blk_end_request(req, (desc->status ? -EIO : 0), desc->size); 238 243 239 244 if (blk_queue_stopped(port->disk->queue)) 240 245 blk_start_queue(port->disk->queue); ··· 416 421 desc->slice = 0; 417 422 } 418 423 desc->status = ~0; 419 - desc->offset = (req->sector << 9) / port->vdisk_block_size; 424 + desc->offset = (blk_rq_pos(req) << 9) / port->vdisk_block_size; 420 425 desc->size = len; 421 426 desc->ncookies = err; 422 427 ··· 441 446 static void do_vdc_request(struct request_queue *q) 442 447 { 443 448 while (1) { 444 - struct request *req = elv_next_request(q); 449 + struct request *req = blk_fetch_request(q); 445 450 446 451 if (!req) 447 452 break; 448 453 449 - blkdev_dequeue_request(req); 450 454 if (__send_request(req) < 0) 451 - vdc_end_request(req, -EIO, req->hard_nr_sectors); 455 + __blk_end_request_all(req, -EIO); 452 456 } 453 457 } 454 458
+17 -31
drivers/block/swim.c
··· 514 514 ret = swim_read_sector(fs, side, track, sector, 515 515 buffer); 516 516 if (try-- == 0) 517 - return -1; 517 + return -EIO; 518 518 } while (ret != 512); 519 519 520 520 buffer += ret; ··· 528 528 struct request *req; 529 529 struct floppy_state *fs; 530 530 531 - while ((req = elv_next_request(q))) { 531 + req = blk_fetch_request(q); 532 + while (req) { 533 + int err = -EIO; 532 534 533 535 fs = req->rq_disk->private_data; 534 - if (req->sector < 0 || req->sector >= fs->total_secs) { 535 - end_request(req, 0); 536 - continue; 537 - } 538 - if (req->current_nr_sectors == 0) { 539 - end_request(req, 1); 540 - continue; 541 - } 542 - if (!fs->disk_in) { 543 - end_request(req, 0); 544 - continue; 545 - } 546 - if (rq_data_dir(req) == WRITE) { 547 - if (fs->write_protected) { 548 - end_request(req, 0); 549 - continue; 550 - } 551 - } 536 + if (blk_rq_pos(req) >= fs->total_secs) 537 + goto done; 538 + if (!fs->disk_in) 539 + goto done; 540 + if (rq_data_dir(req) == WRITE && fs->write_protected) 541 + goto done; 542 + 552 543 switch (rq_data_dir(req)) { 553 544 case WRITE: 554 545 /* NOT IMPLEMENTED */ 555 - end_request(req, 0); 556 546 break; 557 547 case READ: 558 - if (floppy_read_sectors(fs, req->sector, 559 - req->current_nr_sectors, 560 - req->buffer)) { 561 - end_request(req, 0); 562 - continue; 563 - } 564 - req->nr_sectors -= req->current_nr_sectors; 565 - req->sector += req->current_nr_sectors; 566 - req->buffer += req->current_nr_sectors * 512; 567 - end_request(req, 1); 548 + err = floppy_read_sectors(fs, blk_rq_pos(req), 549 + blk_rq_cur_sectors(req), 550 + req->buffer); 568 551 break; 569 552 } 553 + done: 554 + if (!__blk_end_request_cur(req, err)) 555 + req = blk_fetch_request(q); 570 556 } 571 557 } 572 558
+54 -53
drivers/block/swim3.c
··· 251 251 static int floppy_check_change(struct gendisk *disk); 252 252 static int floppy_revalidate(struct gendisk *disk); 253 253 254 + static bool swim3_end_request(int err, unsigned int nr_bytes) 255 + { 256 + if (__blk_end_request(fd_req, err, nr_bytes)) 257 + return true; 258 + 259 + fd_req = NULL; 260 + return false; 261 + } 262 + 263 + static bool swim3_end_request_cur(int err) 264 + { 265 + return swim3_end_request(err, blk_rq_cur_bytes(fd_req)); 266 + } 267 + 254 268 static void swim3_select(struct floppy_state *fs, int sel) 255 269 { 256 270 struct swim3 __iomem *sw = fs->swim3; ··· 324 310 wake_up(&fs->wait); 325 311 return; 326 312 } 327 - while (fs->state == idle && (req = elv_next_request(swim3_queue))) { 313 + while (fs->state == idle) { 314 + if (!fd_req) { 315 + fd_req = blk_fetch_request(swim3_queue); 316 + if (!fd_req) 317 + break; 318 + } 319 + req = fd_req; 328 320 #if 0 329 - printk("do_fd_req: dev=%s cmd=%d sec=%ld nr_sec=%ld buf=%p\n", 321 + printk("do_fd_req: dev=%s cmd=%d sec=%ld nr_sec=%u buf=%p\n", 330 322 req->rq_disk->disk_name, req->cmd, 331 - (long)req->sector, req->nr_sectors, req->buffer); 332 - printk(" errors=%d current_nr_sectors=%ld\n", 333 - req->errors, req->current_nr_sectors); 323 + (long)blk_rq_pos(req), blk_rq_sectors(req), req->buffer); 324 + printk(" errors=%d current_nr_sectors=%u\n", 325 + req->errors, blk_rq_cur_sectors(req)); 334 326 #endif 335 327 336 - if (req->sector < 0 || req->sector >= fs->total_secs) { 337 - end_request(req, 0); 338 - continue; 339 - } 340 - if (req->current_nr_sectors == 0) { 341 - end_request(req, 1); 328 + if (blk_rq_pos(req) >= fs->total_secs) { 329 + swim3_end_request_cur(-EIO); 342 330 continue; 343 331 } 344 332 if (fs->ejected) { 345 - end_request(req, 0); 333 + swim3_end_request_cur(-EIO); 346 334 continue; 347 335 } 348 336 ··· 352 336 if (fs->write_prot < 0) 353 337 fs->write_prot = swim3_readbit(fs, WRITE_PROT); 354 338 if (fs->write_prot) { 355 - end_request(req, 0); 339 + swim3_end_request_cur(-EIO); 356 340 continue; 357 341 } 358 342 } 359 343 360 - /* Do not remove the cast. req->sector is now a sector_t and 361 - * can be 64 bits, but it will never go past 32 bits for this 362 - * driver anyway, so we can safely cast it down and not have 363 - * to do a 64/32 division 344 + /* Do not remove the cast. blk_rq_pos(req) is now a 345 + * sector_t and can be 64 bits, but it will never go 346 + * past 32 bits for this driver anyway, so we can 347 + * safely cast it down and not have to do a 64/32 348 + * division 364 349 */ 365 - fs->req_cyl = ((long)req->sector) / fs->secpercyl; 366 - x = ((long)req->sector) % fs->secpercyl; 350 + fs->req_cyl = ((long)blk_rq_pos(req)) / fs->secpercyl; 351 + x = ((long)blk_rq_pos(req)) % fs->secpercyl; 367 352 fs->head = x / fs->secpertrack; 368 353 fs->req_sector = x % fs->secpertrack + 1; 369 354 fd_req = req; ··· 441 424 struct dbdma_cmd *cp = fs->dma_cmd; 442 425 struct dbdma_regs __iomem *dr = fs->dma; 443 426 444 - if (fd_req->current_nr_sectors <= 0) { 427 + if (blk_rq_cur_sectors(fd_req) <= 0) { 445 428 printk(KERN_ERR "swim3: transfer 0 sectors?\n"); 446 429 return; 447 430 } ··· 449 432 n = 1; 450 433 else { 451 434 n = fs->secpertrack - fs->req_sector + 1; 452 - if (n > fd_req->current_nr_sectors) 453 - n = fd_req->current_nr_sectors; 435 + if (n > blk_rq_cur_sectors(fd_req)) 436 + n = blk_rq_cur_sectors(fd_req); 454 437 } 455 438 fs->scount = n; 456 439 swim3_select(fs, fs->head? READ_DATA_1: READ_DATA_0); ··· 525 508 case do_transfer: 526 509 if (fs->cur_cyl != fs->req_cyl) { 527 510 if (fs->retries > 5) { 528 - end_request(fd_req, 0); 511 + swim3_end_request_cur(-EIO); 529 512 fs->state = idle; 530 513 return; 531 514 } ··· 557 540 out_8(&sw->intr_enable, 0); 558 541 fs->cur_cyl = -1; 559 542 if (fs->retries > 5) { 560 - end_request(fd_req, 0); 543 + swim3_end_request_cur(-EIO); 561 544 fs->state = idle; 562 545 start_request(fs); 563 546 } else { ··· 576 559 out_8(&sw->select, RELAX); 577 560 out_8(&sw->intr_enable, 0); 578 561 printk(KERN_ERR "swim3: seek timeout\n"); 579 - end_request(fd_req, 0); 562 + swim3_end_request_cur(-EIO); 580 563 fs->state = idle; 581 564 start_request(fs); 582 565 } ··· 600 583 return; 601 584 } 602 585 printk(KERN_ERR "swim3: seek settle timeout\n"); 603 - end_request(fd_req, 0); 586 + swim3_end_request_cur(-EIO); 604 587 fs->state = idle; 605 588 start_request(fs); 606 589 } ··· 610 593 struct floppy_state *fs = (struct floppy_state *) data; 611 594 struct swim3 __iomem *sw = fs->swim3; 612 595 struct dbdma_regs __iomem *dr = fs->dma; 613 - struct dbdma_cmd *cp = fs->dma_cmd; 614 - unsigned long s; 615 596 int n; 616 597 617 598 fs->timeout_pending = 0; ··· 620 605 out_8(&sw->intr_enable, 0); 621 606 out_8(&sw->control_bic, WRITE_SECTORS | DO_ACTION); 622 607 out_8(&sw->select, RELAX); 623 - if (rq_data_dir(fd_req) == WRITE) 624 - ++cp; 625 - if (ld_le16(&cp->xfer_status) != 0) 626 - s = fs->scount - ((ld_le16(&cp->res_count) + 511) >> 9); 627 - else 628 - s = 0; 629 - fd_req->sector += s; 630 - fd_req->current_nr_sectors -= s; 631 608 printk(KERN_ERR "swim3: timeout %sing sector %ld\n", 632 - (rq_data_dir(fd_req)==WRITE? "writ": "read"), (long)fd_req->sector); 633 - end_request(fd_req, 0); 609 + (rq_data_dir(fd_req)==WRITE? "writ": "read"), 610 + (long)blk_rq_pos(fd_req)); 611 + swim3_end_request_cur(-EIO); 634 612 fs->state = idle; 635 613 start_request(fs); 636 614 } ··· 654 646 printk(KERN_ERR "swim3: seen sector but cyl=ff?\n"); 655 647 fs->cur_cyl = -1; 656 648 if (fs->retries > 5) { 657 - end_request(fd_req, 0); 649 + swim3_end_request_cur(-EIO); 658 650 fs->state = idle; 659 651 start_request(fs); 660 652 } else { ··· 727 719 if (intr & ERROR_INTR) { 728 720 n = fs->scount - 1 - resid / 512; 729 721 if (n > 0) { 730 - fd_req->sector += n; 731 - fd_req->current_nr_sectors -= n; 732 - fd_req->buffer += n * 512; 722 + blk_update_request(fd_req, 0, n << 9); 733 723 fs->req_sector += n; 734 724 } 735 725 if (fs->retries < 5) { ··· 736 730 } else { 737 731 printk("swim3: error %sing block %ld (err=%x)\n", 738 732 rq_data_dir(fd_req) == WRITE? "writ": "read", 739 - (long)fd_req->sector, err); 740 - end_request(fd_req, 0); 733 + (long)blk_rq_pos(fd_req), err); 734 + swim3_end_request_cur(-EIO); 741 735 fs->state = idle; 742 736 } 743 737 } else { ··· 746 740 printk(KERN_ERR "swim3: fd dma: stat=%x resid=%d\n", stat, resid); 747 741 printk(KERN_ERR " state=%d, dir=%x, intr=%x, err=%x\n", 748 742 fs->state, rq_data_dir(fd_req), intr, err); 749 - end_request(fd_req, 0); 743 + swim3_end_request_cur(-EIO); 750 744 fs->state = idle; 751 745 start_request(fs); 752 746 break; 753 747 } 754 - fd_req->sector += fs->scount; 755 - fd_req->current_nr_sectors -= fs->scount; 756 - fd_req->buffer += fs->scount * 512; 757 - if (fd_req->current_nr_sectors <= 0) { 758 - end_request(fd_req, 1); 759 - fs->state = idle; 760 - } else { 748 + if (swim3_end_request(0, fs->scount << 9)) { 761 749 fs->req_sector += fs->scount; 762 750 if (fs->req_sector > fs->secpertrack) { 763 751 fs->req_sector -= fs->secpertrack; ··· 761 761 } 762 762 } 763 763 act(fs); 764 - } 764 + } else 765 + fs->state = idle; 765 766 } 766 767 if (fs->state == idle) 767 768 start_request(fs);
+7 -10
drivers/block/sx8.c
··· 749 749 struct request *req = crq->rq; 750 750 int rc; 751 751 752 - rc = __blk_end_request(req, error, blk_rq_bytes(req)); 753 - assert(rc == 0); 752 + __blk_end_request_all(req, error); 754 753 755 754 rc = carm_put_request(host, crq); 756 755 assert(rc == 0); ··· 810 811 811 812 while (1) { 812 813 DPRINTK("get req\n"); 813 - rq = elv_next_request(q); 814 + rq = blk_fetch_request(q); 814 815 if (!rq) 815 816 break; 816 - 817 - blkdev_dequeue_request(rq); 818 817 819 818 crq = rq->special; 820 819 assert(crq != NULL); ··· 844 847 845 848 queue_one_request: 846 849 VPRINTK("get req\n"); 847 - rq = elv_next_request(q); 850 + rq = blk_peek_request(q); 848 851 if (!rq) 849 852 return; 850 853 ··· 855 858 } 856 859 crq->rq = rq; 857 860 858 - blkdev_dequeue_request(rq); 861 + blk_start_request(rq); 859 862 860 863 if (rq_data_dir(rq) == WRITE) { 861 864 writing = 1; ··· 901 904 msg->sg_count = n_elem; 902 905 msg->sg_type = SGT_32BIT; 903 906 msg->handle = cpu_to_le32(TAG_ENCODE(crq->tag)); 904 - msg->lba = cpu_to_le32(rq->sector & 0xffffffff); 905 - tmp = (rq->sector >> 16) >> 16; 907 + msg->lba = cpu_to_le32(blk_rq_pos(rq) & 0xffffffff); 908 + tmp = (blk_rq_pos(rq) >> 16) >> 16; 906 909 msg->lba_high = cpu_to_le16( (u16) tmp ); 907 - msg->lba_count = cpu_to_le16(rq->nr_sectors); 910 + msg->lba_count = cpu_to_le16(blk_rq_sectors(rq)); 908 911 909 912 msg_size = sizeof(struct carm_msg_rw) - sizeof(msg->sg); 910 913 for (i = 0; i < n_elem; i++) {
+22 -32
drivers/block/ub.c
··· 360 360 static void ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun, 361 361 struct ub_scsi_cmd *cmd, struct ub_request *urq); 362 362 static void ub_rw_cmd_done(struct ub_dev *sc, struct ub_scsi_cmd *cmd); 363 - static void ub_end_rq(struct request *rq, unsigned int status, 364 - unsigned int cmd_len); 363 + static void ub_end_rq(struct request *rq, unsigned int status); 365 364 static int ub_rw_cmd_retry(struct ub_dev *sc, struct ub_lun *lun, 366 365 struct ub_request *urq, struct ub_scsi_cmd *cmd); 367 366 static int ub_submit_scsi(struct ub_dev *sc, struct ub_scsi_cmd *cmd); ··· 626 627 struct ub_lun *lun = q->queuedata; 627 628 struct request *rq; 628 629 629 - while ((rq = elv_next_request(q)) != NULL) { 630 + while ((rq = blk_peek_request(q)) != NULL) { 630 631 if (ub_request_fn_1(lun, rq) != 0) { 631 632 blk_stop_queue(q); 632 633 break; ··· 642 643 int n_elem; 643 644 644 645 if (atomic_read(&sc->poison)) { 645 - blkdev_dequeue_request(rq); 646 - ub_end_rq(rq, DID_NO_CONNECT << 16, blk_rq_bytes(rq)); 646 + blk_start_request(rq); 647 + ub_end_rq(rq, DID_NO_CONNECT << 16); 647 648 return 0; 648 649 } 649 650 650 651 if (lun->changed && !blk_pc_request(rq)) { 651 - blkdev_dequeue_request(rq); 652 - ub_end_rq(rq, SAM_STAT_CHECK_CONDITION, blk_rq_bytes(rq)); 652 + blk_start_request(rq); 653 + ub_end_rq(rq, SAM_STAT_CHECK_CONDITION); 653 654 return 0; 654 655 } 655 656 ··· 659 660 return -1; 660 661 memset(cmd, 0, sizeof(struct ub_scsi_cmd)); 661 662 662 - blkdev_dequeue_request(rq); 663 + blk_start_request(rq); 663 664 664 665 urq = &lun->urq; 665 666 memset(urq, 0, sizeof(struct ub_request)); ··· 701 702 702 703 drop: 703 704 ub_put_cmd(lun, cmd); 704 - ub_end_rq(rq, DID_ERROR << 16, blk_rq_bytes(rq)); 705 + ub_end_rq(rq, DID_ERROR << 16); 705 706 return 0; 706 707 } 707 708 ··· 722 723 /* 723 724 * build the command 724 725 * 725 - * The call to blk_queue_hardsect_size() guarantees that request 726 + * The call to blk_queue_logical_block_size() guarantees that request 726 727 * is aligned, but it is given in terms of 512 byte units, always. 727 728 */ 728 - block = rq->sector >> lun->capacity.bshift; 729 - nblks = rq->nr_sectors >> lun->capacity.bshift; 729 + block = blk_rq_pos(rq) >> lun->capacity.bshift; 730 + nblks = blk_rq_sectors(rq) >> lun->capacity.bshift; 730 731 731 732 cmd->cdb[0] = (cmd->dir == UB_DIR_READ)? READ_10: WRITE_10; 732 733 /* 10-byte uses 4 bytes of LBA: 2147483648KB, 2097152MB, 2048GB */ ··· 738 739 cmd->cdb[8] = nblks; 739 740 cmd->cdb_len = 10; 740 741 741 - cmd->len = rq->nr_sectors * 512; 742 + cmd->len = blk_rq_bytes(rq); 742 743 } 743 744 744 745 static void ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun, ··· 746 747 { 747 748 struct request *rq = urq->rq; 748 749 749 - if (rq->data_len == 0) { 750 + if (blk_rq_bytes(rq) == 0) { 750 751 cmd->dir = UB_DIR_NONE; 751 752 } else { 752 753 if (rq_data_dir(rq) == WRITE) ··· 761 762 memcpy(&cmd->cdb, rq->cmd, rq->cmd_len); 762 763 cmd->cdb_len = rq->cmd_len; 763 764 764 - cmd->len = rq->data_len; 765 + cmd->len = blk_rq_bytes(rq); 765 766 766 767 /* 767 768 * To reapply this to every URB is not as incorrect as it looks. ··· 776 777 struct ub_request *urq = cmd->back; 777 778 struct request *rq; 778 779 unsigned int scsi_status; 779 - unsigned int cmd_len; 780 780 781 781 rq = urq->rq; 782 782 783 783 if (cmd->error == 0) { 784 784 if (blk_pc_request(rq)) { 785 - if (cmd->act_len >= rq->data_len) 786 - rq->data_len = 0; 785 + if (cmd->act_len >= rq->resid_len) 786 + rq->resid_len = 0; 787 787 else 788 - rq->data_len -= cmd->act_len; 788 + rq->resid_len -= cmd->act_len; 789 789 scsi_status = 0; 790 790 } else { 791 791 if (cmd->act_len != cmd->len) { ··· 816 818 817 819 urq->rq = NULL; 818 820 819 - cmd_len = cmd->len; 820 821 ub_put_cmd(lun, cmd); 821 - ub_end_rq(rq, scsi_status, cmd_len); 822 + ub_end_rq(rq, scsi_status); 822 823 blk_start_queue(lun->disk->queue); 823 824 } 824 825 825 - static void ub_end_rq(struct request *rq, unsigned int scsi_status, 826 - unsigned int cmd_len) 826 + static void ub_end_rq(struct request *rq, unsigned int scsi_status) 827 827 { 828 828 int error; 829 - long rqlen; 830 829 831 830 if (scsi_status == 0) { 832 831 error = 0; ··· 831 836 error = -EIO; 832 837 rq->errors = scsi_status; 833 838 } 834 - rqlen = blk_rq_bytes(rq); /* Oddly enough, this is the residue. */ 835 - if (__blk_end_request(rq, error, cmd_len)) { 836 - printk(KERN_WARNING DRV_NAME 837 - ": __blk_end_request blew, %s-cmd total %u rqlen %ld\n", 838 - blk_pc_request(rq)? "pc": "fs", cmd_len, rqlen); 839 - } 839 + __blk_end_request_all(rq, error); 840 840 } 841 841 842 842 static int ub_rw_cmd_retry(struct ub_dev *sc, struct ub_lun *lun, ··· 1749 1759 ub_revalidate(lun->udev, lun); 1750 1760 1751 1761 /* XXX Support sector size switching like in sr.c */ 1752 - blk_queue_hardsect_size(disk->queue, lun->capacity.bsize); 1762 + blk_queue_logical_block_size(disk->queue, lun->capacity.bsize); 1753 1763 set_capacity(disk, lun->capacity.nsec); 1754 1764 // set_disk_ro(sdkp->disk, lun->readonly); 1755 1765 ··· 2324 2334 blk_queue_max_phys_segments(q, UB_MAX_REQ_SG); 2325 2335 blk_queue_segment_boundary(q, 0xffffffff); /* Dubious. */ 2326 2336 blk_queue_max_sectors(q, UB_MAX_SECTORS); 2327 - blk_queue_hardsect_size(q, lun->capacity.bsize); 2337 + blk_queue_logical_block_size(q, lun->capacity.bsize); 2328 2338 2329 2339 lun->disk = disk; 2330 2340 q->queuedata = lun;
+5 -7
drivers/block/viodasd.c
··· 252 252 struct viodasd_device *d; 253 253 unsigned long flags; 254 254 255 - start = (u64)req->sector << 9; 255 + start = (u64)blk_rq_pos(req) << 9; 256 256 257 257 if (rq_data_dir(req) == READ) { 258 258 direction = DMA_FROM_DEVICE; ··· 361 361 * back later. 362 362 */ 363 363 while (num_req_outstanding < VIOMAXREQ) { 364 - req = elv_next_request(q); 364 + req = blk_fetch_request(q); 365 365 if (req == NULL) 366 366 return; 367 - /* dequeue the current request from the queue */ 368 - blkdev_dequeue_request(req); 369 367 /* check that request contains a valid command */ 370 368 if (!blk_fs_request(req)) { 371 - viodasd_end_request(req, -EIO, req->hard_nr_sectors); 369 + viodasd_end_request(req, -EIO, blk_rq_sectors(req)); 372 370 continue; 373 371 } 374 372 /* Try sending the request */ 375 373 if (send_request(req) != 0) 376 - viodasd_end_request(req, -EIO, req->hard_nr_sectors); 374 + viodasd_end_request(req, -EIO, blk_rq_sectors(req)); 377 375 } 378 376 } 379 377 ··· 588 590 err = vio_lookup_rc(viodasd_err_table, bevent->sub_result); 589 591 printk(VIOD_KERN_WARNING "read/write error %d:0x%04x (%s)\n", 590 592 event->xRc, bevent->sub_result, err->msg); 591 - num_sect = req->hard_nr_sectors; 593 + num_sect = blk_rq_sectors(req); 592 594 } 593 595 qlock = req->q->queue_lock; 594 596 spin_lock_irqsave(qlock, irq_flags);
+88 -22
drivers/block/virtio_blk.c
··· 37 37 struct list_head list; 38 38 struct request *req; 39 39 struct virtio_blk_outhdr out_hdr; 40 + struct virtio_scsi_inhdr in_hdr; 40 41 u8 status; 41 42 }; 42 43 ··· 51 50 spin_lock_irqsave(&vblk->lock, flags); 52 51 while ((vbr = vblk->vq->vq_ops->get_buf(vblk->vq, &len)) != NULL) { 53 52 int error; 53 + 54 54 switch (vbr->status) { 55 55 case VIRTIO_BLK_S_OK: 56 56 error = 0; ··· 64 62 break; 65 63 } 66 64 67 - __blk_end_request(vbr->req, error, blk_rq_bytes(vbr->req)); 65 + if (blk_pc_request(vbr->req)) { 66 + vbr->req->resid_len = vbr->in_hdr.residual; 67 + vbr->req->sense_len = vbr->in_hdr.sense_len; 68 + vbr->req->errors = vbr->in_hdr.errors; 69 + } 70 + 71 + __blk_end_request_all(vbr->req, error); 68 72 list_del(&vbr->list); 69 73 mempool_free(vbr, vblk->pool); 70 74 } ··· 82 74 static bool do_req(struct request_queue *q, struct virtio_blk *vblk, 83 75 struct request *req) 84 76 { 85 - unsigned long num, out, in; 77 + unsigned long num, out = 0, in = 0; 86 78 struct virtblk_req *vbr; 87 79 88 80 vbr = mempool_alloc(vblk->pool, GFP_ATOMIC); ··· 93 85 vbr->req = req; 94 86 if (blk_fs_request(vbr->req)) { 95 87 vbr->out_hdr.type = 0; 96 - vbr->out_hdr.sector = vbr->req->sector; 88 + vbr->out_hdr.sector = blk_rq_pos(vbr->req); 97 89 vbr->out_hdr.ioprio = req_get_ioprio(vbr->req); 98 90 } else if (blk_pc_request(vbr->req)) { 99 91 vbr->out_hdr.type = VIRTIO_BLK_T_SCSI_CMD; ··· 107 99 if (blk_barrier_rq(vbr->req)) 108 100 vbr->out_hdr.type |= VIRTIO_BLK_T_BARRIER; 109 101 110 - sg_set_buf(&vblk->sg[0], &vbr->out_hdr, sizeof(vbr->out_hdr)); 111 - num = blk_rq_map_sg(q, vbr->req, vblk->sg+1); 112 - sg_set_buf(&vblk->sg[num+1], &vbr->status, sizeof(vbr->status)); 102 + sg_set_buf(&vblk->sg[out++], &vbr->out_hdr, sizeof(vbr->out_hdr)); 113 103 114 - if (rq_data_dir(vbr->req) == WRITE) { 115 - vbr->out_hdr.type |= VIRTIO_BLK_T_OUT; 116 - out = 1 + num; 117 - in = 1; 118 - } else { 119 - vbr->out_hdr.type |= VIRTIO_BLK_T_IN; 120 - out = 1; 121 - in = 1 + num; 104 + /* 105 + * If this is a packet command we need a couple of additional headers. 106 + * Behind the normal outhdr we put a segment with the scsi command 107 + * block, and before the normal inhdr we put the sense data and the 108 + * inhdr with additional status information before the normal inhdr. 109 + */ 110 + if (blk_pc_request(vbr->req)) 111 + sg_set_buf(&vblk->sg[out++], vbr->req->cmd, vbr->req->cmd_len); 112 + 113 + num = blk_rq_map_sg(q, vbr->req, vblk->sg + out); 114 + 115 + if (blk_pc_request(vbr->req)) { 116 + sg_set_buf(&vblk->sg[num + out + in++], vbr->req->sense, 96); 117 + sg_set_buf(&vblk->sg[num + out + in++], &vbr->in_hdr, 118 + sizeof(vbr->in_hdr)); 119 + } 120 + 121 + sg_set_buf(&vblk->sg[num + out + in++], &vbr->status, 122 + sizeof(vbr->status)); 123 + 124 + if (num) { 125 + if (rq_data_dir(vbr->req) == WRITE) { 126 + vbr->out_hdr.type |= VIRTIO_BLK_T_OUT; 127 + out += num; 128 + } else { 129 + vbr->out_hdr.type |= VIRTIO_BLK_T_IN; 130 + in += num; 131 + } 122 132 } 123 133 124 134 if (vblk->vq->vq_ops->add_buf(vblk->vq, vblk->sg, out, in, vbr)) { ··· 150 124 151 125 static void do_virtblk_request(struct request_queue *q) 152 126 { 153 - struct virtio_blk *vblk = NULL; 127 + struct virtio_blk *vblk = q->queuedata; 154 128 struct request *req; 155 129 unsigned int issued = 0; 156 130 157 - while ((req = elv_next_request(q)) != NULL) { 158 - vblk = req->rq_disk->private_data; 131 + while ((req = blk_peek_request(q)) != NULL) { 159 132 BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems); 160 133 161 134 /* If this request fails, stop queue and wait for something to ··· 163 138 blk_stop_queue(q); 164 139 break; 165 140 } 166 - blkdev_dequeue_request(req); 141 + blk_start_request(req); 167 142 issued++; 168 143 } 169 144 ··· 171 146 vblk->vq->vq_ops->kick(vblk->vq); 172 147 } 173 148 149 + /* return ATA identify data 150 + */ 151 + static int virtblk_identify(struct gendisk *disk, void *argp) 152 + { 153 + struct virtio_blk *vblk = disk->private_data; 154 + void *opaque; 155 + int err = -ENOMEM; 156 + 157 + opaque = kmalloc(VIRTIO_BLK_ID_BYTES, GFP_KERNEL); 158 + if (!opaque) 159 + goto out; 160 + 161 + err = virtio_config_buf(vblk->vdev, VIRTIO_BLK_F_IDENTIFY, 162 + offsetof(struct virtio_blk_config, identify), opaque, 163 + VIRTIO_BLK_ID_BYTES); 164 + 165 + if (err) 166 + goto out_kfree; 167 + 168 + if (copy_to_user(argp, opaque, VIRTIO_BLK_ID_BYTES)) 169 + err = -EFAULT; 170 + 171 + out_kfree: 172 + kfree(opaque); 173 + out: 174 + return err; 175 + } 176 + 174 177 static int virtblk_ioctl(struct block_device *bdev, fmode_t mode, 175 178 unsigned cmd, unsigned long data) 176 179 { 177 - return scsi_cmd_ioctl(bdev->bd_disk->queue, 178 - bdev->bd_disk, mode, cmd, 179 - (void __user *)data); 180 + struct gendisk *disk = bdev->bd_disk; 181 + struct virtio_blk *vblk = disk->private_data; 182 + void __user *argp = (void __user *)data; 183 + 184 + if (cmd == HDIO_GET_IDENTITY) 185 + return virtblk_identify(disk, argp); 186 + 187 + /* 188 + * Only allow the generic SCSI ioctls if the host can support it. 189 + */ 190 + if (!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_SCSI)) 191 + return -ENOIOCTLCMD; 192 + 193 + return scsi_cmd_ioctl(disk->queue, disk, mode, cmd, argp); 180 194 } 181 195 182 196 /* We provide getgeo only to please some old bootloader/partitioning tools */ ··· 313 249 goto out_put_disk; 314 250 } 315 251 252 + vblk->disk->queue->queuedata = vblk; 316 253 queue_flag_set_unlocked(QUEUE_FLAG_VIRT, vblk->disk->queue); 317 254 318 255 if (index < 26) { ··· 378 313 offsetof(struct virtio_blk_config, blk_size), 379 314 &blk_size); 380 315 if (!err) 381 - blk_queue_hardsect_size(vblk->disk->queue, blk_size); 316 + blk_queue_logical_block_size(vblk->disk->queue, blk_size); 382 317 383 318 add_disk(vblk->disk); 384 319 return 0; ··· 421 356 static unsigned int features[] = { 422 357 VIRTIO_BLK_F_BARRIER, VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, 423 358 VIRTIO_BLK_F_GEOMETRY, VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE, 359 + VIRTIO_BLK_F_SCSI, VIRTIO_BLK_F_IDENTIFY 424 360 }; 425 361 426 362 static struct virtio_driver virtio_blk = {
+18 -23
drivers/block/xd.c
··· 305 305 if (xdc_busy) 306 306 return; 307 307 308 - while ((req = elv_next_request(q)) != NULL) { 309 - unsigned block = req->sector; 310 - unsigned count = req->nr_sectors; 311 - int rw = rq_data_dir(req); 308 + req = blk_fetch_request(q); 309 + while (req) { 310 + unsigned block = blk_rq_pos(req); 311 + unsigned count = blk_rq_cur_sectors(req); 312 312 XD_INFO *disk = req->rq_disk->private_data; 313 - int res = 0; 313 + int res = -EIO; 314 314 int retry; 315 315 316 - if (!blk_fs_request(req)) { 317 - end_request(req, 0); 318 - continue; 319 - } 320 - if (block + count > get_capacity(req->rq_disk)) { 321 - end_request(req, 0); 322 - continue; 323 - } 324 - if (rw != READ && rw != WRITE) { 325 - printk("do_xd_request: unknown request\n"); 326 - end_request(req, 0); 327 - continue; 328 - } 316 + if (!blk_fs_request(req)) 317 + goto done; 318 + if (block + count > get_capacity(req->rq_disk)) 319 + goto done; 329 320 for (retry = 0; (retry < XD_RETRIES) && !res; retry++) 330 - res = xd_readwrite(rw, disk, req->buffer, block, count); 331 - end_request(req, res); /* wrap up, 0 = fail, 1 = success */ 321 + res = xd_readwrite(rq_data_dir(req), disk, req->buffer, 322 + block, count); 323 + done: 324 + /* wrap up, 0 = success, -errno = fail */ 325 + if (!__blk_end_request_cur(req, res)) 326 + req = blk_fetch_request(q); 332 327 } 333 328 } 334 329 ··· 413 418 printk("xd%c: %s timeout, recalibrating drive\n",'a'+drive,(operation == READ ? "read" : "write")); 414 419 xd_recalibrate(drive); 415 420 spin_lock_irq(&xd_lock); 416 - return (0); 421 + return -EIO; 417 422 case 2: 418 423 if (sense[0] & 0x30) { 419 424 printk("xd%c: %s - ",'a'+drive,(operation == READ ? "reading" : "writing")); ··· 434 439 else 435 440 printk(" - no valid disk address\n"); 436 441 spin_lock_irq(&xd_lock); 437 - return (0); 442 + return -EIO; 438 443 } 439 444 if (xd_dma_buffer) 440 445 for (i=0; i < (temp * 0x200); i++) ··· 443 448 count -= temp, buffer += temp * 0x200, block += temp; 444 449 } 445 450 spin_lock_irq(&xd_lock); 446 - return (1); 451 + return 0; 447 452 } 448 453 449 454 /* xd_recalibrate: recalibrate a given drive and reset controller if necessary */
+16 -18
drivers/block/xen-blkfront.c
··· 122 122 static int get_id_from_freelist(struct blkfront_info *info) 123 123 { 124 124 unsigned long free = info->shadow_free; 125 - BUG_ON(free > BLK_RING_SIZE); 125 + BUG_ON(free >= BLK_RING_SIZE); 126 126 info->shadow_free = info->shadow[free].req.id; 127 127 info->shadow[free].req.id = 0x0fffffee; /* debug */ 128 128 return free; ··· 231 231 info->shadow[id].request = (unsigned long)req; 232 232 233 233 ring_req->id = id; 234 - ring_req->sector_number = (blkif_sector_t)req->sector; 234 + ring_req->sector_number = (blkif_sector_t)blk_rq_pos(req); 235 235 ring_req->handle = info->handle; 236 236 237 237 ring_req->operation = rq_data_dir(req) ? ··· 299 299 300 300 queued = 0; 301 301 302 - while ((req = elv_next_request(rq)) != NULL) { 302 + while ((req = blk_peek_request(rq)) != NULL) { 303 303 info = req->rq_disk->private_data; 304 - if (!blk_fs_request(req)) { 305 - end_request(req, 0); 306 - continue; 307 - } 308 304 309 305 if (RING_FULL(&info->ring)) 310 306 goto wait; 311 307 308 + blk_start_request(req); 309 + 310 + if (!blk_fs_request(req)) { 311 + __blk_end_request_all(req, -EIO); 312 + continue; 313 + } 314 + 312 315 pr_debug("do_blk_req %p: cmd %p, sec %lx, " 313 - "(%u/%li) buffer:%p [%s]\n", 314 - req, req->cmd, (unsigned long)req->sector, 315 - req->current_nr_sectors, 316 - req->nr_sectors, req->buffer, 317 - rq_data_dir(req) ? "write" : "read"); 316 + "(%u/%u) buffer:%p [%s]\n", 317 + req, req->cmd, (unsigned long)blk_rq_pos(req), 318 + blk_rq_cur_sectors(req), blk_rq_sectors(req), 319 + req->buffer, rq_data_dir(req) ? "write" : "read"); 318 320 319 - 320 - blkdev_dequeue_request(req); 321 321 if (blkif_queue_request(req)) { 322 322 blk_requeue_request(rq, req); 323 323 wait: ··· 344 344 queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq); 345 345 346 346 /* Hard sector size and max sectors impersonate the equiv. hardware. */ 347 - blk_queue_hardsect_size(rq, sector_size); 347 + blk_queue_logical_block_size(rq, sector_size); 348 348 blk_queue_max_sectors(rq, 512); 349 349 350 350 /* Each segment in a request is up to an aligned page in size. */ ··· 551 551 552 552 for (i = info->ring.rsp_cons; i != rp; i++) { 553 553 unsigned long id; 554 - int ret; 555 554 556 555 bret = RING_GET_RESPONSE(&info->ring, i); 557 556 id = bret->id; ··· 577 578 dev_dbg(&info->xbdev->dev, "Bad return from blkdev data " 578 579 "request: %x\n", bret->status); 579 580 580 - ret = __blk_end_request(req, error, blk_rq_bytes(req)); 581 - BUG_ON(ret); 581 + __blk_end_request_all(req, error); 582 582 break; 583 583 default: 584 584 BUG();
+26 -20
drivers/block/xsysace.c
··· 463 463 { 464 464 struct request *req; 465 465 466 - while ((req = elv_next_request(q)) != NULL) { 466 + while ((req = blk_peek_request(q)) != NULL) { 467 467 if (blk_fs_request(req)) 468 468 break; 469 - end_request(req, 0); 469 + blk_start_request(req); 470 + __blk_end_request_all(req, -EIO); 470 471 } 471 472 return req; 472 473 } ··· 493 492 set_capacity(ace->gd, 0); 494 493 dev_info(ace->dev, "No CF in slot\n"); 495 494 496 - /* Drop all pending requests */ 497 - while ((req = elv_next_request(ace->queue)) != NULL) 498 - end_request(req, 0); 495 + /* Drop all in-flight and pending requests */ 496 + if (ace->req) { 497 + __blk_end_request_all(ace->req, -EIO); 498 + ace->req = NULL; 499 + } 500 + while ((req = blk_fetch_request(ace->queue)) != NULL) 501 + __blk_end_request_all(req, -EIO); 499 502 500 503 /* Drop back to IDLE state and notify waiters */ 501 504 ace->fsm_state = ACE_FSM_STATE_IDLE; ··· 647 642 ace->fsm_state = ACE_FSM_STATE_IDLE; 648 643 break; 649 644 } 645 + blk_start_request(req); 650 646 651 647 /* Okay, it's a data request, set it up for transfer */ 652 648 dev_dbg(ace->dev, 653 - "request: sec=%llx hcnt=%lx, ccnt=%x, dir=%i\n", 654 - (unsigned long long) req->sector, req->hard_nr_sectors, 655 - req->current_nr_sectors, rq_data_dir(req)); 649 + "request: sec=%llx hcnt=%x, ccnt=%x, dir=%i\n", 650 + (unsigned long long)blk_rq_pos(req), 651 + blk_rq_sectors(req), blk_rq_cur_sectors(req), 652 + rq_data_dir(req)); 656 653 657 654 ace->req = req; 658 655 ace->data_ptr = req->buffer; 659 - ace->data_count = req->current_nr_sectors * ACE_BUF_PER_SECTOR; 660 - ace_out32(ace, ACE_MPULBA, req->sector & 0x0FFFFFFF); 656 + ace->data_count = blk_rq_cur_sectors(req) * ACE_BUF_PER_SECTOR; 657 + ace_out32(ace, ACE_MPULBA, blk_rq_pos(req) & 0x0FFFFFFF); 661 658 662 - count = req->hard_nr_sectors; 659 + count = blk_rq_sectors(req); 663 660 if (rq_data_dir(req)) { 664 661 /* Kick off write request */ 665 662 dev_dbg(ace->dev, "write data\n"); ··· 695 688 dev_dbg(ace->dev, 696 689 "CFBSY set; t=%i iter=%i c=%i dc=%i irq=%i\n", 697 690 ace->fsm_task, ace->fsm_iter_num, 698 - ace->req->current_nr_sectors * 16, 691 + blk_rq_cur_sectors(ace->req) * 16, 699 692 ace->data_count, ace->in_irq); 700 693 ace_fsm_yield(ace); /* need to poll CFBSY bit */ 701 694 break; ··· 704 697 dev_dbg(ace->dev, 705 698 "DATABUF not set; t=%i iter=%i c=%i dc=%i irq=%i\n", 706 699 ace->fsm_task, ace->fsm_iter_num, 707 - ace->req->current_nr_sectors * 16, 700 + blk_rq_cur_sectors(ace->req) * 16, 708 701 ace->data_count, ace->in_irq); 709 702 ace_fsm_yieldirq(ace); 710 703 break; ··· 724 717 } 725 718 726 719 /* bio finished; is there another one? */ 727 - if (__blk_end_request(ace->req, 0, 728 - blk_rq_cur_bytes(ace->req))) { 729 - /* dev_dbg(ace->dev, "next block; h=%li c=%i\n", 730 - * ace->req->hard_nr_sectors, 731 - * ace->req->current_nr_sectors); 720 + if (__blk_end_request_cur(ace->req, 0)) { 721 + /* dev_dbg(ace->dev, "next block; h=%u c=%u\n", 722 + * blk_rq_sectors(ace->req), 723 + * blk_rq_cur_sectors(ace->req)); 732 724 */ 733 725 ace->data_ptr = ace->req->buffer; 734 - ace->data_count = ace->req->current_nr_sectors * 16; 726 + ace->data_count = blk_rq_cur_sectors(ace->req) * 16; 735 727 ace_fsm_yieldirq(ace); 736 728 break; 737 729 } ··· 984 978 ace->queue = blk_init_queue(ace_request, &ace->lock); 985 979 if (ace->queue == NULL) 986 980 goto err_blk_initq; 987 - blk_queue_hardsect_size(ace->queue, 512); 981 + blk_queue_logical_block_size(ace->queue, 512); 988 982 989 983 /* 990 984 * Allocate and initialize GD structure
+12 -7
drivers/block/z2ram.c
··· 70 70 static void do_z2_request(struct request_queue *q) 71 71 { 72 72 struct request *req; 73 - while ((req = elv_next_request(q)) != NULL) { 74 - unsigned long start = req->sector << 9; 75 - unsigned long len = req->current_nr_sectors << 9; 73 + 74 + req = blk_fetch_request(q); 75 + while (req) { 76 + unsigned long start = blk_rq_pos(req) << 9; 77 + unsigned long len = blk_rq_cur_bytes(req); 78 + int err = 0; 76 79 77 80 if (start + len > z2ram_size) { 78 81 printk( KERN_ERR DEVICE_NAME ": bad access: block=%lu, count=%u\n", 79 - req->sector, req->current_nr_sectors); 80 - end_request(req, 0); 81 - continue; 82 + blk_rq_pos(req), blk_rq_cur_sectors(req)); 83 + err = -EIO; 84 + goto done; 82 85 } 83 86 while (len) { 84 87 unsigned long addr = start & Z2RAM_CHUNKMASK; ··· 96 93 start += size; 97 94 len -= size; 98 95 } 99 - end_request(req, 1); 96 + done: 97 + if (!__blk_end_request_cur(req, err)) 98 + req = blk_fetch_request(q); 100 99 } 101 100 } 102 101
+2 -2
drivers/cdrom/cdrom.c
··· 2101 2101 nr = nframes; 2102 2102 if (cdi->cdda_method == CDDA_BPC_SINGLE) 2103 2103 nr = 1; 2104 - if (nr * CD_FRAMESIZE_RAW > (q->max_sectors << 9)) 2105 - nr = (q->max_sectors << 9) / CD_FRAMESIZE_RAW; 2104 + if (nr * CD_FRAMESIZE_RAW > (queue_max_sectors(q) << 9)) 2105 + nr = (queue_max_sectors(q) << 9) / CD_FRAMESIZE_RAW; 2106 2106 2107 2107 len = nr * CD_FRAMESIZE_RAW; 2108 2108
+16 -20
drivers/cdrom/gdrom.c
··· 584 584 list_for_each_safe(elem, next, &gdrom_deferred) { 585 585 req = list_entry(elem, struct request, queuelist); 586 586 spin_unlock(&gdrom_lock); 587 - block = req->sector/GD_TO_BLK + GD_SESSION_OFFSET; 588 - block_cnt = req->nr_sectors/GD_TO_BLK; 587 + block = blk_rq_pos(req)/GD_TO_BLK + GD_SESSION_OFFSET; 588 + block_cnt = blk_rq_sectors(req)/GD_TO_BLK; 589 589 ctrl_outl(PHYSADDR(req->buffer), GDROM_DMA_STARTADDR_REG); 590 590 ctrl_outl(block_cnt * GDROM_HARD_SECTOR, GDROM_DMA_LENGTH_REG); 591 591 ctrl_outl(1, GDROM_DMA_DIRECTION_REG); ··· 632 632 * before handling ending the request */ 633 633 spin_lock(&gdrom_lock); 634 634 list_del_init(&req->queuelist); 635 - __blk_end_request(req, err, blk_rq_bytes(req)); 635 + __blk_end_request_all(req, err); 636 636 } 637 637 spin_unlock(&gdrom_lock); 638 638 kfree(read_command); 639 - } 640 - 641 - static void gdrom_request_handler_dma(struct request *req) 642 - { 643 - /* dequeue, add to list of deferred work 644 - * and then schedule workqueue */ 645 - blkdev_dequeue_request(req); 646 - list_add_tail(&req->queuelist, &gdrom_deferred); 647 - schedule_work(&work); 648 639 } 649 640 650 641 static void gdrom_request(struct request_queue *rq) 651 642 { 652 643 struct request *req; 653 644 654 - while ((req = elv_next_request(rq)) != NULL) { 645 + while ((req = blk_fetch_request(rq)) != NULL) { 655 646 if (!blk_fs_request(req)) { 656 647 printk(KERN_DEBUG "GDROM: Non-fs request ignored\n"); 657 - end_request(req, 0); 648 + __blk_end_request_all(req, -EIO); 649 + continue; 658 650 } 659 651 if (rq_data_dir(req) != READ) { 660 652 printk(KERN_NOTICE "GDROM: Read only device -"); 661 653 printk(" write request ignored\n"); 662 - end_request(req, 0); 654 + __blk_end_request_all(req, -EIO); 655 + continue; 663 656 } 664 - if (req->nr_sectors) 665 - gdrom_request_handler_dma(req); 666 - else 667 - end_request(req, 0); 657 + 658 + /* 659 + * Add to list of deferred work and then schedule 660 + * workqueue. 661 + */ 662 + list_add_tail(&req->queuelist, &gdrom_deferred); 663 + schedule_work(&work); 668 664 } 669 665 } 670 666 ··· 739 743 740 744 static int __devinit probe_gdrom_setupqueue(void) 741 745 { 742 - blk_queue_hardsect_size(gd.gdrom_rq, GDROM_HARD_SECTOR); 746 + blk_queue_logical_block_size(gd.gdrom_rq, GDROM_HARD_SECTOR); 743 747 /* using DMA so memory will need to be contiguous */ 744 748 blk_queue_max_hw_segments(gd.gdrom_rq, 1); 745 749 /* set a large max size to get most from DMA */
+8 -25
drivers/cdrom/viocd.c
··· 282 282 viopath_targetinst(viopath_hostLp), 283 283 (u64)req, VIOVERSION << 16, 284 284 ((u64)DEVICE_NR(diskinfo) << 48) | dmaaddr, 285 - (u64)req->sector * 512, len, 0); 285 + (u64)blk_rq_pos(req) * 512, len, 0); 286 286 if (hvrc != HvLpEvent_Rc_Good) { 287 287 printk(VIOCD_KERN_WARNING "hv error on op %d\n", (int)hvrc); 288 288 return -1; ··· 291 291 return 0; 292 292 } 293 293 294 - static void viocd_end_request(struct request *req, int error) 295 - { 296 - int nsectors = req->hard_nr_sectors; 297 - 298 - /* 299 - * Make sure it's fully ended, and ensure that we process 300 - * at least one sector. 301 - */ 302 - if (blk_pc_request(req)) 303 - nsectors = (req->data_len + 511) >> 9; 304 - if (!nsectors) 305 - nsectors = 1; 306 - 307 - if (__blk_end_request(req, error, nsectors << 9)) 308 - BUG(); 309 - } 310 - 311 294 static int rwreq; 312 295 313 296 static void do_viocd_request(struct request_queue *q) 314 297 { 315 298 struct request *req; 316 299 317 - while ((rwreq == 0) && ((req = elv_next_request(q)) != NULL)) { 300 + while ((rwreq == 0) && ((req = blk_fetch_request(q)) != NULL)) { 318 301 if (!blk_fs_request(req)) 319 - viocd_end_request(req, -EIO); 302 + __blk_end_request_all(req, -EIO); 320 303 else if (send_request(req) < 0) { 321 304 printk(VIOCD_KERN_WARNING 322 305 "unable to send message to OS/400!"); 323 - viocd_end_request(req, -EIO); 306 + __blk_end_request_all(req, -EIO); 324 307 } else 325 308 rwreq++; 326 309 } ··· 469 486 case viocdopen: 470 487 if (event->xRc == 0) { 471 488 di = &viocd_diskinfo[bevent->disk]; 472 - blk_queue_hardsect_size(di->viocd_disk->queue, 473 - bevent->block_size); 489 + blk_queue_logical_block_size(di->viocd_disk->queue, 490 + bevent->block_size); 474 491 set_capacity(di->viocd_disk, 475 492 bevent->media_size * 476 493 bevent->block_size / 512); ··· 514 531 "with rc %d:0x%04X: %s\n", 515 532 req, event->xRc, 516 533 bevent->sub_result, err->msg); 517 - viocd_end_request(req, -EIO); 534 + __blk_end_request_all(req, -EIO); 518 535 } else 519 - viocd_end_request(req, 0); 536 + __blk_end_request_all(req, 0); 520 537 521 538 /* restart handling of incoming requests */ 522 539 spin_unlock_irqrestore(&viocd_reqlock, flags);
+1 -1
drivers/char/raw.c
··· 71 71 err = bd_claim(bdev, raw_open); 72 72 if (err) 73 73 goto out1; 74 - err = set_blocksize(bdev, bdev_hardsect_size(bdev)); 74 + err = set_blocksize(bdev, bdev_logical_block_size(bdev)); 75 75 if (err) 76 76 goto out2; 77 77 filp->f_flags |= O_DIRECT;
+15 -6
drivers/ide/ide-atapi.c
··· 246 246 */ 247 247 void ide_retry_pc(ide_drive_t *drive) 248 248 { 249 + struct request *failed_rq = drive->hwif->rq; 249 250 struct request *sense_rq = &drive->sense_rq; 250 251 struct ide_atapi_pc *pc = &drive->request_sense_pc; 251 252 ··· 256 255 ide_init_pc(pc); 257 256 memcpy(pc->c, sense_rq->cmd, 12); 258 257 pc->buf = bio_data(sense_rq->bio); /* pointer to mapped address */ 259 - pc->req_xfer = sense_rq->data_len; 258 + pc->req_xfer = blk_rq_bytes(sense_rq); 260 259 261 260 if (drive->media == ide_tape) 262 261 set_bit(IDE_AFLAG_IGNORE_DSC, &drive->atapi_flags); 263 262 264 - if (ide_queue_sense_rq(drive, pc)) 265 - ide_complete_rq(drive, -EIO, blk_rq_bytes(drive->hwif->rq)); 263 + /* 264 + * Push back the failed request and put request sense on top 265 + * of it. The failed command will be retried after sense data 266 + * is acquired. 267 + */ 268 + blk_requeue_request(failed_rq->q, failed_rq); 269 + drive->hwif->rq = NULL; 270 + if (ide_queue_sense_rq(drive, pc)) { 271 + blk_start_request(failed_rq); 272 + ide_complete_rq(drive, -EIO, blk_rq_bytes(failed_rq)); 273 + } 266 274 } 267 275 EXPORT_SYMBOL_GPL(ide_retry_pc); 268 276 ··· 313 303 return 32768; 314 304 else if (blk_sense_request(rq) || blk_pc_request(rq) || 315 305 rq->cmd_type == REQ_TYPE_ATA_PC) 316 - return rq->data_len; 306 + return blk_rq_bytes(rq); 317 307 else 318 308 return 0; 319 309 } ··· 377 367 /* No more interrupts */ 378 368 if ((stat & ATA_DRQ) == 0) { 379 369 int uptodate, error; 380 - unsigned int done; 381 370 382 371 debug_log("Packet command completed, %d bytes transferred\n", 383 372 pc->xferred); ··· 440 431 error = uptodate ? 0 : -EIO; 441 432 } 442 433 443 - ide_complete_rq(drive, error, done); 434 + ide_complete_rq(drive, error, blk_rq_bytes(rq)); 444 435 return ide_stopped; 445 436 } 446 437
+21 -43
drivers/ide/ide-cd.c
··· 182 182 (sense->information[2] << 8) | 183 183 (sense->information[3]); 184 184 185 - if (drive->queue->hardsect_size == 2048) 185 + if (queue_logical_block_size(drive->queue) == 2048) 186 186 /* device sector size is 2K */ 187 187 sector <<= 2; 188 188 ··· 404 404 405 405 end_request: 406 406 if (stat & ATA_ERR) { 407 - struct request_queue *q = drive->queue; 408 - unsigned long flags; 409 - 410 - spin_lock_irqsave(q->queue_lock, flags); 411 - blkdev_dequeue_request(rq); 412 - spin_unlock_irqrestore(q->queue_lock, flags); 413 - 414 407 hwif->rq = NULL; 415 - 416 408 return ide_queue_sense_rq(drive, rq) ? 2 : 1; 417 409 } else 418 410 return 2; ··· 510 518 error = blk_execute_rq(drive->queue, info->disk, rq, 0); 511 519 512 520 if (buffer) 513 - *bufflen = rq->data_len; 521 + *bufflen = rq->resid_len; 514 522 515 523 flags = rq->cmd_flags; 516 524 blk_put_request(rq); ··· 568 576 struct request *rq = hwif->rq; 569 577 ide_expiry_t *expiry = NULL; 570 578 int dma_error = 0, dma, thislen, uptodate = 0; 571 - int write = (rq_data_dir(rq) == WRITE) ? 1 : 0, rc = 0, nsectors; 579 + int write = (rq_data_dir(rq) == WRITE) ? 1 : 0, rc = 0; 572 580 int sense = blk_sense_request(rq); 573 581 unsigned int timeout; 574 582 u16 len; ··· 698 706 699 707 out_end: 700 708 if (blk_pc_request(rq) && rc == 0) { 701 - unsigned int dlen = rq->data_len; 702 - 703 - rq->data_len = 0; 704 - 705 - if (blk_end_request(rq, 0, dlen)) 706 - BUG(); 707 - 709 + rq->resid_len = 0; 710 + blk_end_request_all(rq, 0); 708 711 hwif->rq = NULL; 709 712 } else { 710 713 if (sense && uptodate) ··· 717 730 ide_cd_error_cmd(drive, cmd); 718 731 719 732 /* make sure it's fully ended */ 720 - if (blk_pc_request(rq)) 721 - nsectors = (rq->data_len + 511) >> 9; 722 - else 723 - nsectors = rq->hard_nr_sectors; 724 - 725 - if (nsectors == 0) 726 - nsectors = 1; 727 - 728 733 if (blk_fs_request(rq) == 0) { 729 - rq->data_len -= (cmd->nbytes - cmd->nleft); 734 + rq->resid_len -= cmd->nbytes - cmd->nleft; 730 735 if (uptodate == 0 && (cmd->tf_flags & IDE_TFLAG_WRITE)) 731 - rq->data_len += cmd->last_xfer_len; 736 + rq->resid_len += cmd->last_xfer_len; 732 737 } 733 738 734 - ide_complete_rq(drive, uptodate ? 0 : -EIO, nsectors << 9); 739 + ide_complete_rq(drive, uptodate ? 0 : -EIO, blk_rq_bytes(rq)); 735 740 736 741 if (sense && rc == 2) 737 742 ide_error(drive, "request sense failure", stat); ··· 737 758 struct request_queue *q = drive->queue; 738 759 int write = rq_data_dir(rq) == WRITE; 739 760 unsigned short sectors_per_frame = 740 - queue_hardsect_size(q) >> SECTOR_BITS; 761 + queue_logical_block_size(q) >> SECTOR_BITS; 741 762 742 763 ide_debug_log(IDE_DBG_RQ, "rq->cmd[0]: 0x%x, rq->cmd_flags: 0x%x, " 743 764 "secs_per_frame: %u", ··· 756 777 } 757 778 758 779 /* fs requests *must* be hardware frame aligned */ 759 - if ((rq->nr_sectors & (sectors_per_frame - 1)) || 760 - (rq->sector & (sectors_per_frame - 1))) 780 + if ((blk_rq_sectors(rq) & (sectors_per_frame - 1)) || 781 + (blk_rq_pos(rq) & (sectors_per_frame - 1))) 761 782 return ide_stopped; 762 783 763 784 /* use DMA, if possible */ ··· 800 821 */ 801 822 alignment = queue_dma_alignment(q) | q->dma_pad_mask; 802 823 if ((unsigned long)buf & alignment 803 - || rq->data_len & q->dma_pad_mask 824 + || blk_rq_bytes(rq) & q->dma_pad_mask 804 825 || object_is_on_stack(buf)) 805 826 drive->dma = 0; 806 827 } ··· 848 869 849 870 cmd.rq = rq; 850 871 851 - if (blk_fs_request(rq) || rq->data_len) { 852 - ide_init_sg_cmd(&cmd, blk_fs_request(rq) ? (rq->nr_sectors << 9) 853 - : rq->data_len); 872 + if (blk_fs_request(rq) || blk_rq_bytes(rq)) { 873 + ide_init_sg_cmd(&cmd, blk_rq_bytes(rq)); 854 874 ide_map_sg(drive, &cmd); 855 875 } 856 876 857 877 return ide_issue_pc(drive, &cmd); 858 878 out_end: 859 - nsectors = rq->hard_nr_sectors; 879 + nsectors = blk_rq_sectors(rq); 860 880 861 881 if (nsectors == 0) 862 882 nsectors = 1; ··· 1021 1043 /* save a private copy of the TOC capacity for error handling */ 1022 1044 drive->probed_capacity = toc->capacity * sectors_per_frame; 1023 1045 1024 - blk_queue_hardsect_size(drive->queue, 1025 - sectors_per_frame << SECTOR_BITS); 1046 + blk_queue_logical_block_size(drive->queue, 1047 + sectors_per_frame << SECTOR_BITS); 1026 1048 1027 1049 /* first read just the header, so we know how long the TOC is */ 1028 1050 stat = cdrom_read_tocentry(drive, 0, 1, 0, (char *) &toc->hdr, ··· 1338 1360 /* standard prep_rq_fn that builds 10 byte cmds */ 1339 1361 static int ide_cdrom_prep_fs(struct request_queue *q, struct request *rq) 1340 1362 { 1341 - int hard_sect = queue_hardsect_size(q); 1342 - long block = (long)rq->hard_sector / (hard_sect >> 9); 1343 - unsigned long blocks = rq->hard_nr_sectors / (hard_sect >> 9); 1363 + int hard_sect = queue_logical_block_size(q); 1364 + long block = (long)blk_rq_pos(rq) / (hard_sect >> 9); 1365 + unsigned long blocks = blk_rq_sectors(rq) / (hard_sect >> 9); 1344 1366 1345 1367 memset(rq->cmd, 0, BLK_MAX_CDB); 1346 1368 ··· 1543 1565 1544 1566 nslots = ide_cdrom_probe_capabilities(drive); 1545 1567 1546 - blk_queue_hardsect_size(q, CD_FRAMESIZE); 1568 + blk_queue_logical_block_size(q, CD_FRAMESIZE); 1547 1569 1548 1570 if (ide_cdrom_register(drive, nslots)) { 1549 1571 printk(KERN_ERR PFX "%s: %s failed to register device with the"
+5 -5
drivers/ide/ide-disk.c
··· 82 82 sector_t block) 83 83 { 84 84 ide_hwif_t *hwif = drive->hwif; 85 - u16 nsectors = (u16)rq->nr_sectors; 85 + u16 nsectors = (u16)blk_rq_sectors(rq); 86 86 u8 lba48 = !!(drive->dev_flags & IDE_DFLAG_LBA48); 87 87 u8 dma = !!(drive->dev_flags & IDE_DFLAG_USING_DMA); 88 88 struct ide_cmd cmd; ··· 90 90 ide_startstop_t rc; 91 91 92 92 if ((hwif->host_flags & IDE_HFLAG_NO_LBA48_DMA) && lba48 && dma) { 93 - if (block + rq->nr_sectors > 1ULL << 28) 93 + if (block + blk_rq_sectors(rq) > 1ULL << 28) 94 94 dma = 0; 95 95 else 96 96 lba48 = 0; ··· 195 195 196 196 ledtrig_ide_activity(); 197 197 198 - pr_debug("%s: %sing: block=%llu, sectors=%lu, buffer=0x%08lx\n", 198 + pr_debug("%s: %sing: block=%llu, sectors=%u, buffer=0x%08lx\n", 199 199 drive->name, rq_data_dir(rq) == READ ? "read" : "writ", 200 - (unsigned long long)block, rq->nr_sectors, 200 + (unsigned long long)block, blk_rq_sectors(rq), 201 201 (unsigned long)rq->buffer); 202 202 203 203 if (hwif->rw_disk) ··· 639 639 } 640 640 641 641 printk(KERN_INFO "%s: max request size: %dKiB\n", drive->name, 642 - q->max_sectors / 2); 642 + queue_max_sectors(q) / 2); 643 643 644 644 if (ata_id_is_ssd(id)) 645 645 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
+1 -1
drivers/ide/ide-dma.c
··· 103 103 ide_finish_cmd(drive, cmd, stat); 104 104 else 105 105 ide_complete_rq(drive, 0, 106 - cmd->rq->nr_sectors << 9); 106 + blk_rq_sectors(cmd->rq) << 9); 107 107 return ide_stopped; 108 108 } 109 109 printk(KERN_ERR "%s: %s: bad DMA status (0x%02x)\n",
+5 -5
drivers/ide/ide-floppy.c
··· 194 194 { 195 195 struct ide_disk_obj *floppy = drive->driver_data; 196 196 int block = sector / floppy->bs_factor; 197 - int blocks = rq->nr_sectors / floppy->bs_factor; 197 + int blocks = blk_rq_sectors(rq) / floppy->bs_factor; 198 198 int cmd = rq_data_dir(rq); 199 199 200 200 ide_debug_log(IDE_DBG_FUNC, "block: %d, blocks: %d", block, blocks); ··· 220 220 ide_init_pc(pc); 221 221 memcpy(pc->c, rq->cmd, sizeof(pc->c)); 222 222 pc->rq = rq; 223 - if (rq->data_len) { 223 + if (blk_rq_bytes(rq)) { 224 224 pc->flags |= PC_FLAG_DMA_OK; 225 225 if (rq_data_dir(rq) == WRITE) 226 226 pc->flags |= PC_FLAG_WRITING; 227 227 } 228 228 /* pio will be performed by ide_pio_bytes() which handles sg fine */ 229 229 pc->buf = NULL; 230 - pc->req_xfer = pc->buf_size = rq->data_len; 230 + pc->req_xfer = pc->buf_size = blk_rq_bytes(rq); 231 231 } 232 232 233 233 static ide_startstop_t ide_floppy_do_request(ide_drive_t *drive, ··· 259 259 goto out_end; 260 260 } 261 261 if (blk_fs_request(rq)) { 262 - if (((long)rq->sector % floppy->bs_factor) || 263 - (rq->nr_sectors % floppy->bs_factor)) { 262 + if (((long)blk_rq_pos(rq) % floppy->bs_factor) || 263 + (blk_rq_sectors(rq) % floppy->bs_factor)) { 264 264 printk(KERN_ERR PFX "%s: unsupported r/w rq size\n", 265 265 drive->name); 266 266 goto out_end;
+30 -13
drivers/ide/ide-io.c
··· 116 116 unsigned int ide_rq_bytes(struct request *rq) 117 117 { 118 118 if (blk_pc_request(rq)) 119 - return rq->data_len; 119 + return blk_rq_bytes(rq); 120 120 else 121 - return rq->hard_cur_sectors << 9; 121 + return blk_rq_cur_sectors(rq) << 9; 122 122 } 123 123 EXPORT_SYMBOL_GPL(ide_rq_bytes); 124 124 ··· 133 133 * and complete the whole request right now 134 134 */ 135 135 if (blk_noretry_request(rq) && error <= 0) 136 - nr_bytes = rq->hard_nr_sectors << 9; 136 + nr_bytes = blk_rq_sectors(rq) << 9; 137 137 138 138 rc = ide_end_rq(drive, rq, error, nr_bytes); 139 139 if (rc == 0) ··· 279 279 280 280 if (cmd) { 281 281 if (cmd->protocol == ATA_PROT_PIO) { 282 - ide_init_sg_cmd(cmd, rq->nr_sectors << 9); 282 + ide_init_sg_cmd(cmd, blk_rq_sectors(rq) << 9); 283 283 ide_map_sg(drive, cmd); 284 284 } 285 285 ··· 387 387 388 388 drv = *(struct ide_driver **)rq->rq_disk->private_data; 389 389 390 - return drv->do_request(drive, rq, rq->sector); 390 + return drv->do_request(drive, rq, blk_rq_pos(rq)); 391 391 } 392 392 return do_special(drive); 393 393 kill_rq: ··· 487 487 488 488 if (!ide_lock_port(hwif)) { 489 489 ide_hwif_t *prev_port; 490 + 491 + WARN_ON_ONCE(hwif->rq); 490 492 repeat: 491 493 prev_port = hwif->host->cur_port; 492 - hwif->rq = NULL; 493 - 494 494 if (drive->dev_flags & IDE_DFLAG_SLEEPING && 495 495 time_after(drive->sleep, jiffies)) { 496 496 ide_unlock_port(hwif); ··· 519 519 * we know that the queue isn't empty, but this can happen 520 520 * if the q->prep_rq_fn() decides to kill a request 521 521 */ 522 - rq = elv_next_request(drive->queue); 522 + if (!rq) 523 + rq = blk_fetch_request(drive->queue); 524 + 523 525 spin_unlock_irq(q->queue_lock); 524 526 spin_lock_irq(&hwif->lock); 525 527 ··· 533 531 /* 534 532 * Sanity: don't accept a request that isn't a PM request 535 533 * if we are currently power managed. This is very important as 536 - * blk_stop_queue() doesn't prevent the elv_next_request() 534 + * blk_stop_queue() doesn't prevent the blk_fetch_request() 537 535 * above to return us whatever is in the queue. Since we call 538 536 * ide_do_request() ourselves, we end up taking requests while 539 537 * the queue is blocked... ··· 557 555 startstop = start_request(drive, rq); 558 556 spin_lock_irq(&hwif->lock); 559 557 560 - if (startstop == ide_stopped) 558 + if (startstop == ide_stopped) { 559 + rq = hwif->rq; 560 + hwif->rq = NULL; 561 561 goto repeat; 562 + } 562 563 } else 563 564 goto plug_device; 564 565 out: ··· 577 572 plug_device_2: 578 573 spin_lock_irq(q->queue_lock); 579 574 575 + if (rq) 576 + blk_requeue_request(q, rq); 580 577 if (!elv_queue_empty(q)) 581 578 blk_plug_device(q); 582 579 } 583 580 584 - static void ide_plug_device(ide_drive_t *drive) 581 + static void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq) 585 582 { 586 583 struct request_queue *q = drive->queue; 587 584 unsigned long flags; 588 585 589 586 spin_lock_irqsave(q->queue_lock, flags); 587 + 588 + if (rq) 589 + blk_requeue_request(q, rq); 590 590 if (!elv_queue_empty(q)) 591 591 blk_plug_device(q); 592 + 592 593 spin_unlock_irqrestore(q->queue_lock, flags); 593 594 } 594 595 ··· 643 632 unsigned long flags; 644 633 int wait = -1; 645 634 int plug_device = 0; 635 + struct request *uninitialized_var(rq_in_flight); 646 636 647 637 spin_lock_irqsave(&hwif->lock, flags); 648 638 ··· 705 693 spin_lock_irq(&hwif->lock); 706 694 enable_irq(hwif->irq); 707 695 if (startstop == ide_stopped && hwif->polling == 0) { 696 + rq_in_flight = hwif->rq; 697 + hwif->rq = NULL; 708 698 ide_unlock_port(hwif); 709 699 plug_device = 1; 710 700 } ··· 715 701 716 702 if (plug_device) { 717 703 ide_unlock_host(hwif->host); 718 - ide_plug_device(drive); 704 + ide_requeue_and_plug(drive, rq_in_flight); 719 705 } 720 706 } 721 707 ··· 801 787 ide_startstop_t startstop; 802 788 irqreturn_t irq_ret = IRQ_NONE; 803 789 int plug_device = 0; 790 + struct request *uninitialized_var(rq_in_flight); 804 791 805 792 if (host->host_flags & IDE_HFLAG_SERIALIZE) { 806 793 if (hwif != host->cur_port) ··· 881 866 */ 882 867 if (startstop == ide_stopped && hwif->polling == 0) { 883 868 BUG_ON(hwif->handler); 869 + rq_in_flight = hwif->rq; 870 + hwif->rq = NULL; 884 871 ide_unlock_port(hwif); 885 872 plug_device = 1; 886 873 } ··· 892 875 out_early: 893 876 if (plug_device) { 894 877 ide_unlock_host(hwif->host); 895 - ide_plug_device(drive); 878 + ide_requeue_and_plug(drive, rq_in_flight); 896 879 } 897 880 898 881 return irq_ret;
+1 -1
drivers/ide/ide-lib.c
··· 96 96 97 97 if (rq) 98 98 printk(KERN_CONT ", sector=%llu", 99 - (unsigned long long)rq->sector); 99 + (unsigned long long)blk_rq_pos(rq)); 100 100 } 101 101 printk(KERN_CONT "\n"); 102 102 }
+6 -6
drivers/ide/ide-tape.c
··· 380 380 } 381 381 382 382 tape->first_frame += blocks; 383 - rq->data_len -= blocks * tape->blk_size; 383 + rq->resid_len -= blocks * tape->blk_size; 384 384 385 385 if (pc->error) { 386 386 uptodate = 0; ··· 586 586 struct ide_atapi_pc *pc, struct request *rq, 587 587 u8 opcode) 588 588 { 589 - unsigned int length = rq->nr_sectors; 589 + unsigned int length = blk_rq_sectors(rq); 590 590 591 591 ide_init_pc(pc); 592 592 put_unaligned(cpu_to_be32(length), (unsigned int *) &pc->c[1]); ··· 617 617 struct ide_cmd cmd; 618 618 u8 stat; 619 619 620 - debug_log(DBG_SENSE, "sector: %llu, nr_sectors: %lu\n", 621 - (unsigned long long)rq->sector, rq->nr_sectors); 620 + debug_log(DBG_SENSE, "sector: %llu, nr_sectors: %u\n" 621 + (unsigned long long)blk_rq_pos(rq), blk_rq_sectors(rq)); 622 622 623 623 if (!(blk_special_request(rq) || blk_sense_request(rq))) { 624 624 /* We do not support buffer cache originated requests. */ ··· 892 892 rq->cmd_type = REQ_TYPE_SPECIAL; 893 893 rq->cmd[13] = cmd; 894 894 rq->rq_disk = tape->disk; 895 - rq->sector = tape->first_frame; 895 + rq->__sector = tape->first_frame; 896 896 897 897 if (size) { 898 898 ret = blk_rq_map_kern(drive->queue, rq, tape->buf, size, ··· 904 904 blk_execute_rq(drive->queue, tape->disk, rq, 0); 905 905 906 906 /* calculate the number of transferred bytes and update buffer state */ 907 - size -= rq->data_len; 907 + size -= rq->resid_len; 908 908 tape->cur = tape->buf; 909 909 if (cmd == REQ_IDETAPE_READ) 910 910 tape->valid = size;
+1 -1
drivers/ide/ide-taskfile.c
··· 385 385 if ((cmd->tf_flags & IDE_TFLAG_FS) == 0) 386 386 ide_finish_cmd(drive, cmd, stat); 387 387 else 388 - ide_complete_rq(drive, 0, cmd->rq->nr_sectors << 9); 388 + ide_complete_rq(drive, 0, blk_rq_sectors(cmd->rq) << 9); 389 389 return ide_stopped; 390 390 out_err: 391 391 ide_error_cmd(drive, cmd);
+1 -1
drivers/ide/pdc202xx_old.c
··· 177 177 u8 clock = inb(high_16 + 0x11); 178 178 179 179 outb(clock | (hwif->channel ? 0x08 : 0x02), high_16 + 0x11); 180 - word_count = (rq->nr_sectors << 8); 180 + word_count = (blk_rq_sectors(rq) << 8); 181 181 word_count = (rq_data_dir(rq) == READ) ? 182 182 word_count | 0x05000000 : 183 183 word_count | 0x06000000;
+1 -1
drivers/ide/tc86c001.c
··· 112 112 ide_hwif_t *hwif = drive->hwif; 113 113 unsigned long sc_base = hwif->config_data; 114 114 unsigned long twcr_port = sc_base + (drive->dn ? 0x06 : 0x04); 115 - unsigned long nsectors = hwif->rq->nr_sectors; 115 + unsigned long nsectors = blk_rq_sectors(hwif->rq); 116 116 117 117 /* 118 118 * We have to manually load the sector count and size into
+1 -1
drivers/ide/tx4939ide.c
··· 307 307 tx4939ide_writew(SECTOR_SIZE / 2, base, drive->dn ? 308 308 TX4939IDE_Xfer_Cnt_2 : TX4939IDE_Xfer_Cnt_1); 309 309 310 - tx4939ide_writew(cmd->rq->nr_sectors, base, TX4939IDE_Sec_Cnt); 310 + tx4939ide_writew(blk_rq_sectors(cmd->rq), base, TX4939IDE_Sec_Cnt); 311 311 312 312 return 0; 313 313 }
+2 -2
drivers/md/bitmap.c
··· 232 232 target = rdev->sb_start + offset + index * (PAGE_SIZE/512); 233 233 234 234 if (sync_page_io(rdev->bdev, target, 235 - roundup(size, bdev_hardsect_size(rdev->bdev)), 235 + roundup(size, bdev_logical_block_size(rdev->bdev)), 236 236 page, READ)) { 237 237 page->index = index; 238 238 attach_page_buffers(page, NULL); /* so that free_buffer will ··· 287 287 int size = PAGE_SIZE; 288 288 if (page->index == bitmap->file_pages-1) 289 289 size = roundup(bitmap->last_page_size, 290 - bdev_hardsect_size(rdev->bdev)); 290 + bdev_logical_block_size(rdev->bdev)); 291 291 /* Just make sure we aren't corrupting data or 292 292 * metadata 293 293 */
+1 -1
drivers/md/dm-exception-store.c
··· 178 178 } 179 179 180 180 /* Validate the chunk size against the device block size */ 181 - if (chunk_size_ulong % (bdev_hardsect_size(store->cow->bdev) >> 9)) { 181 + if (chunk_size_ulong % (bdev_logical_block_size(store->cow->bdev) >> 9)) { 182 182 *error = "Chunk size is not a multiple of device blocksize"; 183 183 return -EINVAL; 184 184 }
+2 -1
drivers/md/dm-log.c
··· 413 413 * Buffer holds both header and bitset. 414 414 */ 415 415 buf_size = dm_round_up((LOG_OFFSET << SECTOR_SHIFT) + 416 - bitset_size, ti->limits.hardsect_size); 416 + bitset_size, 417 + ti->limits.logical_block_size); 417 418 418 419 if (buf_size > dev->bdev->bd_inode->i_size) { 419 420 DMWARN("log device %s too small: need %llu bytes",
+1 -1
drivers/md/dm-snap-persistent.c
··· 282 282 */ 283 283 if (!ps->store->chunk_size) { 284 284 ps->store->chunk_size = max(DM_CHUNK_SIZE_DEFAULT_SECTORS, 285 - bdev_hardsect_size(ps->store->cow->bdev) >> 9); 285 + bdev_logical_block_size(ps->store->cow->bdev) >> 9); 286 286 ps->store->chunk_mask = ps->store->chunk_size - 1; 287 287 ps->store->chunk_shift = ffs(ps->store->chunk_size) - 1; 288 288 chunk_size_supplied = 0;
+20 -18
drivers/md/dm-table.c
··· 108 108 lhs->max_hw_segments = 109 109 min_not_zero(lhs->max_hw_segments, rhs->max_hw_segments); 110 110 111 - lhs->hardsect_size = max(lhs->hardsect_size, rhs->hardsect_size); 111 + lhs->logical_block_size = max(lhs->logical_block_size, 112 + rhs->logical_block_size); 112 113 113 114 lhs->max_segment_size = 114 115 min_not_zero(lhs->max_segment_size, rhs->max_segment_size); ··· 510 509 * combine_restrictions_low() 511 510 */ 512 511 rs->max_sectors = 513 - min_not_zero(rs->max_sectors, q->max_sectors); 512 + min_not_zero(rs->max_sectors, queue_max_sectors(q)); 514 513 515 514 /* 516 515 * Check if merge fn is supported. ··· 525 524 526 525 rs->max_phys_segments = 527 526 min_not_zero(rs->max_phys_segments, 528 - q->max_phys_segments); 527 + queue_max_phys_segments(q)); 529 528 530 529 rs->max_hw_segments = 531 - min_not_zero(rs->max_hw_segments, q->max_hw_segments); 530 + min_not_zero(rs->max_hw_segments, queue_max_hw_segments(q)); 532 531 533 - rs->hardsect_size = max(rs->hardsect_size, q->hardsect_size); 532 + rs->logical_block_size = max(rs->logical_block_size, 533 + queue_logical_block_size(q)); 534 534 535 535 rs->max_segment_size = 536 - min_not_zero(rs->max_segment_size, q->max_segment_size); 536 + min_not_zero(rs->max_segment_size, queue_max_segment_size(q)); 537 537 538 538 rs->max_hw_sectors = 539 - min_not_zero(rs->max_hw_sectors, q->max_hw_sectors); 539 + min_not_zero(rs->max_hw_sectors, queue_max_hw_sectors(q)); 540 540 541 541 rs->seg_boundary_mask = 542 542 min_not_zero(rs->seg_boundary_mask, 543 - q->seg_boundary_mask); 543 + queue_segment_boundary(q)); 544 544 545 - rs->bounce_pfn = min_not_zero(rs->bounce_pfn, q->bounce_pfn); 545 + rs->bounce_pfn = min_not_zero(rs->bounce_pfn, queue_bounce_pfn(q)); 546 546 547 547 rs->no_cluster |= !test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags); 548 548 } ··· 685 683 rs->max_phys_segments = MAX_PHYS_SEGMENTS; 686 684 if (!rs->max_hw_segments) 687 685 rs->max_hw_segments = MAX_HW_SEGMENTS; 688 - if (!rs->hardsect_size) 689 - rs->hardsect_size = 1 << SECTOR_SHIFT; 686 + if (!rs->logical_block_size) 687 + rs->logical_block_size = 1 << SECTOR_SHIFT; 690 688 if (!rs->max_segment_size) 691 689 rs->max_segment_size = MAX_SEGMENT_SIZE; 692 690 if (!rs->seg_boundary_mask) ··· 914 912 * restrictions. 915 913 */ 916 914 blk_queue_max_sectors(q, t->limits.max_sectors); 917 - q->max_phys_segments = t->limits.max_phys_segments; 918 - q->max_hw_segments = t->limits.max_hw_segments; 919 - q->hardsect_size = t->limits.hardsect_size; 920 - q->max_segment_size = t->limits.max_segment_size; 921 - q->max_hw_sectors = t->limits.max_hw_sectors; 922 - q->seg_boundary_mask = t->limits.seg_boundary_mask; 923 - q->bounce_pfn = t->limits.bounce_pfn; 915 + blk_queue_max_phys_segments(q, t->limits.max_phys_segments); 916 + blk_queue_max_hw_segments(q, t->limits.max_hw_segments); 917 + blk_queue_logical_block_size(q, t->limits.logical_block_size); 918 + blk_queue_max_segment_size(q, t->limits.max_segment_size); 919 + blk_queue_max_hw_sectors(q, t->limits.max_hw_sectors); 920 + blk_queue_segment_boundary(q, t->limits.seg_boundary_mask); 921 + blk_queue_bounce_limit(q, t->limits.bounce_pfn); 924 922 925 923 if (t->limits.no_cluster) 926 924 queue_flag_clear_unlocked(QUEUE_FLAG_CLUSTER, q);
+1 -1
drivers/md/linear.c
··· 146 146 * a one page request is never in violation. 147 147 */ 148 148 if (rdev->bdev->bd_disk->queue->merge_bvec_fn && 149 - mddev->queue->max_sectors > (PAGE_SIZE>>9)) 149 + queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9)) 150 150 blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9); 151 151 152 152 disk->num_sectors = rdev->sectors;
+1 -1
drivers/md/md.c
··· 1202 1202 atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read)); 1203 1203 1204 1204 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256; 1205 - bmask = queue_hardsect_size(rdev->bdev->bd_disk->queue)-1; 1205 + bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1; 1206 1206 if (rdev->sb_size & bmask) 1207 1207 rdev->sb_size = (rdev->sb_size | bmask) + 1; 1208 1208
+2 -2
drivers/md/multipath.c
··· 303 303 * merge_bvec_fn will be involved in multipath.) 304 304 */ 305 305 if (q->merge_bvec_fn && 306 - mddev->queue->max_sectors > (PAGE_SIZE>>9)) 306 + queue_max_sectors(q) > (PAGE_SIZE>>9)) 307 307 blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9); 308 308 309 309 conf->working_disks++; ··· 467 467 * violating it, not that we ever expect a device with 468 468 * a merge_bvec_fn to be involved in multipath */ 469 469 if (rdev->bdev->bd_disk->queue->merge_bvec_fn && 470 - mddev->queue->max_sectors > (PAGE_SIZE>>9)) 470 + queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9)) 471 471 blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9); 472 472 473 473 if (!test_bit(Faulty, &rdev->flags))
+1 -1
drivers/md/raid0.c
··· 144 144 */ 145 145 146 146 if (rdev1->bdev->bd_disk->queue->merge_bvec_fn && 147 - mddev->queue->max_sectors > (PAGE_SIZE>>9)) 147 + queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9)) 148 148 blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9); 149 149 150 150 if (!smallest || (rdev1->sectors < smallest->sectors))
+2 -2
drivers/md/raid1.c
··· 1130 1130 * a one page request is never in violation. 1131 1131 */ 1132 1132 if (rdev->bdev->bd_disk->queue->merge_bvec_fn && 1133 - mddev->queue->max_sectors > (PAGE_SIZE>>9)) 1133 + queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9)) 1134 1134 blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9); 1135 1135 1136 1136 p->head_position = 0; ··· 1996 1996 * a one page request is never in violation. 1997 1997 */ 1998 1998 if (rdev->bdev->bd_disk->queue->merge_bvec_fn && 1999 - mddev->queue->max_sectors > (PAGE_SIZE>>9)) 1999 + queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9)) 2000 2000 blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9); 2001 2001 2002 2002 disk->head_position = 0;
+4 -4
drivers/md/raid10.c
··· 1158 1158 * a one page request is never in violation. 1159 1159 */ 1160 1160 if (rdev->bdev->bd_disk->queue->merge_bvec_fn && 1161 - mddev->queue->max_sectors > (PAGE_SIZE>>9)) 1162 - mddev->queue->max_sectors = (PAGE_SIZE>>9); 1161 + queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9)) 1162 + blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9); 1163 1163 1164 1164 p->head_position = 0; 1165 1165 rdev->raid_disk = mirror; ··· 2145 2145 * a one page request is never in violation. 2146 2146 */ 2147 2147 if (rdev->bdev->bd_disk->queue->merge_bvec_fn && 2148 - mddev->queue->max_sectors > (PAGE_SIZE>>9)) 2149 - mddev->queue->max_sectors = (PAGE_SIZE>>9); 2148 + queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9)) 2149 + blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9); 2150 2150 2151 2151 disk->head_position = 0; 2152 2152 }
+2 -2
drivers/md/raid5.c
··· 3463 3463 { 3464 3464 struct request_queue *q = bdev_get_queue(bi->bi_bdev); 3465 3465 3466 - if ((bi->bi_size>>9) > q->max_sectors) 3466 + if ((bi->bi_size>>9) > queue_max_sectors(q)) 3467 3467 return 0; 3468 3468 blk_recount_segments(q, bi); 3469 - if (bi->bi_phys_segments > q->max_phys_segments) 3469 + if (bi->bi_phys_segments > queue_max_phys_segments(q)) 3470 3470 return 0; 3471 3471 3472 3472 if (q->merge_bvec_fn)
+9 -10
drivers/memstick/core/mspro_block.c
··· 672 672 msb->req_sg); 673 673 674 674 if (!msb->seg_count) { 675 - chunk = __blk_end_request(msb->block_req, -ENOMEM, 676 - blk_rq_cur_bytes(msb->block_req)); 675 + chunk = __blk_end_request_cur(msb->block_req, -ENOMEM); 677 676 continue; 678 677 } 679 678 680 - t_sec = msb->block_req->sector << 9; 679 + t_sec = blk_rq_pos(msb->block_req) << 9; 681 680 sector_div(t_sec, msb->page_size); 682 681 683 - count = msb->block_req->nr_sectors << 9; 682 + count = blk_rq_bytes(msb->block_req); 684 683 count /= msb->page_size; 685 684 686 685 param.system = msb->system; ··· 704 705 return 0; 705 706 } 706 707 707 - dev_dbg(&card->dev, "elv_next\n"); 708 - msb->block_req = elv_next_request(msb->queue); 708 + dev_dbg(&card->dev, "blk_fetch\n"); 709 + msb->block_req = blk_fetch_request(msb->queue); 709 710 if (!msb->block_req) { 710 711 dev_dbg(&card->dev, "issue end\n"); 711 712 return -EAGAIN; ··· 744 745 t_len *= msb->page_size; 745 746 } 746 747 } else 747 - t_len = msb->block_req->nr_sectors << 9; 748 + t_len = blk_rq_bytes(msb->block_req); 748 749 749 750 dev_dbg(&card->dev, "transferred %x (%d)\n", t_len, error); 750 751 ··· 824 825 return; 825 826 826 827 if (msb->eject) { 827 - while ((req = elv_next_request(q)) != NULL) 828 - __blk_end_request(req, -ENODEV, blk_rq_bytes(req)); 828 + while ((req = blk_fetch_request(q)) != NULL) 829 + __blk_end_request_all(req, -ENODEV); 829 830 830 831 return; 831 832 } ··· 1242 1243 1243 1244 sprintf(msb->disk->disk_name, "mspblk%d", disk_id); 1244 1245 1245 - blk_queue_hardsect_size(msb->queue, msb->page_size); 1246 + blk_queue_logical_block_size(msb->queue, msb->page_size); 1246 1247 1247 1248 capacity = be16_to_cpu(sys_info->user_block_count); 1248 1249 capacity *= be16_to_cpu(sys_info->block_size);
+11 -11
drivers/message/fusion/mptsas.c
··· 1277 1277 /* do we need to support multiple segments? */ 1278 1278 if (req->bio->bi_vcnt > 1 || rsp->bio->bi_vcnt > 1) { 1279 1279 printk(MYIOC_s_ERR_FMT "%s: multiple segments req %u %u, rsp %u %u\n", 1280 - ioc->name, __func__, req->bio->bi_vcnt, req->data_len, 1281 - rsp->bio->bi_vcnt, rsp->data_len); 1280 + ioc->name, __func__, req->bio->bi_vcnt, blk_rq_bytes(req), 1281 + rsp->bio->bi_vcnt, blk_rq_bytes(rsp)); 1282 1282 return -EINVAL; 1283 1283 } 1284 1284 ··· 1295 1295 smpreq = (SmpPassthroughRequest_t *)mf; 1296 1296 memset(smpreq, 0, sizeof(*smpreq)); 1297 1297 1298 - smpreq->RequestDataLength = cpu_to_le16(req->data_len - 4); 1298 + smpreq->RequestDataLength = cpu_to_le16(blk_rq_bytes(req) - 4); 1299 1299 smpreq->Function = MPI_FUNCTION_SMP_PASSTHROUGH; 1300 1300 1301 1301 if (rphy) ··· 1321 1321 MPI_SGE_FLAGS_END_OF_BUFFER | 1322 1322 MPI_SGE_FLAGS_DIRECTION | 1323 1323 mpt_addr_size()) << MPI_SGE_FLAGS_SHIFT; 1324 - flagsLength |= (req->data_len - 4); 1324 + flagsLength |= (blk_rq_bytes(req) - 4); 1325 1325 1326 1326 dma_addr_out = pci_map_single(ioc->pcidev, bio_data(req->bio), 1327 - req->data_len, PCI_DMA_BIDIRECTIONAL); 1327 + blk_rq_bytes(req), PCI_DMA_BIDIRECTIONAL); 1328 1328 if (!dma_addr_out) 1329 1329 goto put_mf; 1330 1330 mpt_add_sge(psge, flagsLength, dma_addr_out); ··· 1332 1332 1333 1333 /* response */ 1334 1334 flagsLength = MPT_SGE_FLAGS_SSIMPLE_READ; 1335 - flagsLength |= rsp->data_len + 4; 1335 + flagsLength |= blk_rq_bytes(rsp) + 4; 1336 1336 dma_addr_in = pci_map_single(ioc->pcidev, bio_data(rsp->bio), 1337 - rsp->data_len, PCI_DMA_BIDIRECTIONAL); 1337 + blk_rq_bytes(rsp), PCI_DMA_BIDIRECTIONAL); 1338 1338 if (!dma_addr_in) 1339 1339 goto unmap; 1340 1340 mpt_add_sge(psge, flagsLength, dma_addr_in); ··· 1357 1357 smprep = (SmpPassthroughReply_t *)ioc->sas_mgmt.reply; 1358 1358 memcpy(req->sense, smprep, sizeof(*smprep)); 1359 1359 req->sense_len = sizeof(*smprep); 1360 - req->data_len = 0; 1361 - rsp->data_len -= smprep->ResponseDataLength; 1360 + req->resid_len = 0; 1361 + rsp->resid_len -= smprep->ResponseDataLength; 1362 1362 } else { 1363 1363 printk(MYIOC_s_ERR_FMT "%s: smp passthru reply failed to be returned\n", 1364 1364 ioc->name, __func__); ··· 1366 1366 } 1367 1367 unmap: 1368 1368 if (dma_addr_out) 1369 - pci_unmap_single(ioc->pcidev, dma_addr_out, req->data_len, 1369 + pci_unmap_single(ioc->pcidev, dma_addr_out, blk_rq_bytes(req), 1370 1370 PCI_DMA_BIDIRECTIONAL); 1371 1371 if (dma_addr_in) 1372 - pci_unmap_single(ioc->pcidev, dma_addr_in, rsp->data_len, 1372 + pci_unmap_single(ioc->pcidev, dma_addr_in, blk_rq_bytes(rsp), 1373 1373 PCI_DMA_BIDIRECTIONAL); 1374 1374 put_mf: 1375 1375 if (mf)
+20 -23
drivers/message/i2o/i2o_block.c
··· 426 426 struct request_queue *q = req->q; 427 427 unsigned long flags; 428 428 429 - if (blk_end_request(req, error, nr_bytes)) { 430 - int leftover = (req->hard_nr_sectors << KERNEL_SECTOR_SHIFT); 431 - 432 - if (blk_pc_request(req)) 433 - leftover = req->data_len; 434 - 429 + if (blk_end_request(req, error, nr_bytes)) 435 430 if (error) 436 - blk_end_request(req, -EIO, leftover); 437 - } 431 + blk_end_request_all(req, -EIO); 438 432 439 433 spin_lock_irqsave(q->queue_lock, flags); 440 434 ··· 755 761 break; 756 762 757 763 case CACHE_SMARTFETCH: 758 - if (req->nr_sectors > 16) 764 + if (blk_rq_sectors(req) > 16) 759 765 ctl_flags = 0x201F0008; 760 766 else 761 767 ctl_flags = 0x001F0000; ··· 775 781 ctl_flags = 0x001F0010; 776 782 break; 777 783 case CACHE_SMARTBACK: 778 - if (req->nr_sectors > 16) 784 + if (blk_rq_sectors(req) > 16) 779 785 ctl_flags = 0x001F0004; 780 786 else 781 787 ctl_flags = 0x001F0010; 782 788 break; 783 789 case CACHE_SMARTTHROUGH: 784 - if (req->nr_sectors > 16) 790 + if (blk_rq_sectors(req) > 16) 785 791 ctl_flags = 0x001F0004; 786 792 else 787 793 ctl_flags = 0x001F0010; ··· 794 800 if (c->adaptec) { 795 801 u8 cmd[10]; 796 802 u32 scsi_flags; 797 - u16 hwsec = queue_hardsect_size(req->q) >> KERNEL_SECTOR_SHIFT; 803 + u16 hwsec; 798 804 805 + hwsec = queue_logical_block_size(req->q) >> KERNEL_SECTOR_SHIFT; 799 806 memset(cmd, 0, 10); 800 807 801 808 sgl_offset = SGL_OFFSET_12; ··· 822 827 823 828 *mptr++ = cpu_to_le32(scsi_flags); 824 829 825 - *((u32 *) & cmd[2]) = cpu_to_be32(req->sector * hwsec); 826 - *((u16 *) & cmd[7]) = cpu_to_be16(req->nr_sectors * hwsec); 830 + *((u32 *) & cmd[2]) = cpu_to_be32(blk_rq_pos(req) * hwsec); 831 + *((u16 *) & cmd[7]) = cpu_to_be16(blk_rq_sectors(req) * hwsec); 827 832 828 833 memcpy(mptr, cmd, 10); 829 834 mptr += 4; 830 - *mptr++ = cpu_to_le32(req->nr_sectors << KERNEL_SECTOR_SHIFT); 835 + *mptr++ = cpu_to_le32(blk_rq_bytes(req)); 831 836 } else 832 837 #endif 833 838 { 834 839 msg->u.head[1] = cpu_to_le32(cmd | HOST_TID << 12 | tid); 835 840 *mptr++ = cpu_to_le32(ctl_flags); 836 - *mptr++ = cpu_to_le32(req->nr_sectors << KERNEL_SECTOR_SHIFT); 841 + *mptr++ = cpu_to_le32(blk_rq_bytes(req)); 837 842 *mptr++ = 838 - cpu_to_le32((u32) (req->sector << KERNEL_SECTOR_SHIFT)); 843 + cpu_to_le32((u32) (blk_rq_pos(req) << KERNEL_SECTOR_SHIFT)); 839 844 *mptr++ = 840 - cpu_to_le32(req->sector >> (32 - KERNEL_SECTOR_SHIFT)); 845 + cpu_to_le32(blk_rq_pos(req) >> (32 - KERNEL_SECTOR_SHIFT)); 841 846 } 842 847 843 848 if (!i2o_block_sglist_alloc(c, ireq, &mptr)) { ··· 878 883 struct request *req; 879 884 880 885 while (!blk_queue_plugged(q)) { 881 - req = elv_next_request(q); 886 + req = blk_peek_request(q); 882 887 if (!req) 883 888 break; 884 889 ··· 891 896 892 897 if (queue_depth < I2O_BLOCK_MAX_OPEN_REQUESTS) { 893 898 if (!i2o_block_transfer(req)) { 894 - blkdev_dequeue_request(req); 899 + blk_start_request(req); 895 900 continue; 896 901 } else 897 902 osm_info("transfer error\n"); ··· 917 922 blk_stop_queue(q); 918 923 break; 919 924 } 920 - } else 921 - end_request(req, 0); 925 + } else { 926 + blk_start_request(req); 927 + __blk_end_request_all(req, -EIO); 928 + } 922 929 } 923 930 }; 924 931 ··· 1079 1082 */ 1080 1083 if (!i2o_parm_field_get(i2o_dev, 0x0004, 1, &blocksize, 4) || 1081 1084 !i2o_parm_field_get(i2o_dev, 0x0000, 3, &blocksize, 4)) { 1082 - blk_queue_hardsect_size(queue, le32_to_cpu(blocksize)); 1085 + blk_queue_logical_block_size(queue, le32_to_cpu(blocksize)); 1083 1086 } else 1084 1087 osm_warn("unable to get blocksize of %s\n", gd->disk_name); 1085 1088
+6 -6
drivers/mmc/card/block.c
··· 243 243 brq.mrq.cmd = &brq.cmd; 244 244 brq.mrq.data = &brq.data; 245 245 246 - brq.cmd.arg = req->sector; 246 + brq.cmd.arg = blk_rq_pos(req); 247 247 if (!mmc_card_blockaddr(card)) 248 248 brq.cmd.arg <<= 9; 249 249 brq.cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; ··· 251 251 brq.stop.opcode = MMC_STOP_TRANSMISSION; 252 252 brq.stop.arg = 0; 253 253 brq.stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; 254 - brq.data.blocks = req->nr_sectors; 254 + brq.data.blocks = blk_rq_sectors(req); 255 255 256 256 /* 257 257 * The block layer doesn't support all sector count ··· 301 301 * Adjust the sg list so it is the same size as the 302 302 * request. 303 303 */ 304 - if (brq.data.blocks != req->nr_sectors) { 304 + if (brq.data.blocks != blk_rq_sectors(req)) { 305 305 int i, data_size = brq.data.blocks << 9; 306 306 struct scatterlist *sg; 307 307 ··· 352 352 printk(KERN_ERR "%s: error %d transferring data," 353 353 " sector %u, nr %u, card status %#x\n", 354 354 req->rq_disk->disk_name, brq.data.error, 355 - (unsigned)req->sector, 356 - (unsigned)req->nr_sectors, status); 355 + (unsigned)blk_rq_pos(req), 356 + (unsigned)blk_rq_sectors(req), status); 357 357 } 358 358 359 359 if (brq.stop.error) { ··· 521 521 522 522 sprintf(md->disk->disk_name, "mmcblk%d", devidx); 523 523 524 - blk_queue_hardsect_size(md->queue.queue, 512); 524 + blk_queue_logical_block_size(md->queue.queue, 512); 525 525 526 526 if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) { 527 527 /*
+3 -8
drivers/mmc/card/queue.c
··· 55 55 spin_lock_irq(q->queue_lock); 56 56 set_current_state(TASK_INTERRUPTIBLE); 57 57 if (!blk_queue_plugged(q)) 58 - req = elv_next_request(q); 58 + req = blk_fetch_request(q); 59 59 mq->req = req; 60 60 spin_unlock_irq(q->queue_lock); 61 61 ··· 88 88 { 89 89 struct mmc_queue *mq = q->queuedata; 90 90 struct request *req; 91 - int ret; 92 91 93 92 if (!mq) { 94 93 printk(KERN_ERR "MMC: killing requests for dead queue\n"); 95 - while ((req = elv_next_request(q)) != NULL) { 96 - do { 97 - ret = __blk_end_request(req, -EIO, 98 - blk_rq_cur_bytes(req)); 99 - } while (ret); 100 - } 94 + while ((req = blk_fetch_request(q)) != NULL) 95 + __blk_end_request_all(req, -EIO); 101 96 return; 102 97 } 103 98
+24 -19
drivers/mtd/mtd_blkdevs.c
··· 47 47 unsigned long block, nsect; 48 48 char *buf; 49 49 50 - block = req->sector << 9 >> tr->blkshift; 51 - nsect = req->current_nr_sectors << 9 >> tr->blkshift; 50 + block = blk_rq_pos(req) << 9 >> tr->blkshift; 51 + nsect = blk_rq_cur_bytes(req) >> tr->blkshift; 52 52 53 53 buf = req->buffer; 54 54 55 55 if (req->cmd_type == REQ_TYPE_LINUX_BLOCK && 56 56 req->cmd[0] == REQ_LB_OP_DISCARD) 57 - return !tr->discard(dev, block, nsect); 57 + return tr->discard(dev, block, nsect); 58 58 59 59 if (!blk_fs_request(req)) 60 - return 0; 60 + return -EIO; 61 61 62 - if (req->sector + req->current_nr_sectors > get_capacity(req->rq_disk)) 63 - return 0; 62 + if (blk_rq_pos(req) + blk_rq_cur_sectors(req) > 63 + get_capacity(req->rq_disk)) 64 + return -EIO; 64 65 65 66 switch(rq_data_dir(req)) { 66 67 case READ: 67 68 for (; nsect > 0; nsect--, block++, buf += tr->blksize) 68 69 if (tr->readsect(dev, block, buf)) 69 - return 0; 70 - return 1; 70 + return -EIO; 71 + return 0; 71 72 72 73 case WRITE: 73 74 if (!tr->writesect) 74 - return 0; 75 + return -EIO; 75 76 76 77 for (; nsect > 0; nsect--, block++, buf += tr->blksize) 77 78 if (tr->writesect(dev, block, buf)) 78 - return 0; 79 - return 1; 79 + return -EIO; 80 + return 0; 80 81 81 82 default: 82 83 printk(KERN_NOTICE "Unknown request %u\n", rq_data_dir(req)); 83 - return 0; 84 + return -EIO; 84 85 } 85 86 } 86 87 ··· 89 88 { 90 89 struct mtd_blktrans_ops *tr = arg; 91 90 struct request_queue *rq = tr->blkcore_priv->rq; 91 + struct request *req = NULL; 92 92 93 93 /* we might get involved when memory gets low, so use PF_MEMALLOC */ 94 94 current->flags |= PF_MEMALLOC; 95 95 96 96 spin_lock_irq(rq->queue_lock); 97 + 97 98 while (!kthread_should_stop()) { 98 - struct request *req; 99 99 struct mtd_blktrans_dev *dev; 100 - int res = 0; 100 + int res; 101 101 102 - req = elv_next_request(rq); 103 - 104 - if (!req) { 102 + if (!req && !(req = blk_fetch_request(rq))) { 105 103 set_current_state(TASK_INTERRUPTIBLE); 106 104 spin_unlock_irq(rq->queue_lock); 107 105 schedule(); ··· 119 119 120 120 spin_lock_irq(rq->queue_lock); 121 121 122 - end_request(req, res); 122 + if (!__blk_end_request_cur(req, res)) 123 + req = NULL; 123 124 } 125 + 126 + if (req) 127 + __blk_end_request_all(req, -EIO); 128 + 124 129 spin_unlock_irq(rq->queue_lock); 125 130 126 131 return 0; ··· 378 373 } 379 374 380 375 tr->blkcore_priv->rq->queuedata = tr; 381 - blk_queue_hardsect_size(tr->blkcore_priv->rq, tr->blksize); 376 + blk_queue_logical_block_size(tr->blkcore_priv->rq, tr->blksize); 382 377 if (tr->discard) 383 378 blk_queue_set_discard(tr->blkcore_priv->rq, 384 379 blktrans_discard_request);
+11 -26
drivers/s390/block/dasd.c
··· 603 603 if (dasd_profile_level != DASD_PROFILE_ON) 604 604 return; 605 605 606 - sectors = req->nr_sectors; 606 + sectors = blk_rq_sectors(req); 607 607 if (!cqr->buildclk || !cqr->startclk || 608 608 !cqr->stopclk || !cqr->endclk || 609 609 !sectors) ··· 1614 1614 } 1615 1615 1616 1616 /* 1617 - * posts the buffer_cache about a finalized request 1618 - */ 1619 - static inline void dasd_end_request(struct request *req, int error) 1620 - { 1621 - if (__blk_end_request(req, error, blk_rq_bytes(req))) 1622 - BUG(); 1623 - } 1624 - 1625 - /* 1626 1617 * Process finished error recovery ccw. 1627 1618 */ 1628 1619 static inline void __dasd_block_process_erp(struct dasd_block *block, ··· 1656 1665 if (basedev->state < DASD_STATE_READY) 1657 1666 return; 1658 1667 /* Now we try to fetch requests from the request queue */ 1659 - while (!blk_queue_plugged(queue) && 1660 - elv_next_request(queue)) { 1661 - 1662 - req = elv_next_request(queue); 1663 - 1668 + while (!blk_queue_plugged(queue) && (req = blk_peek_request(queue))) { 1664 1669 if (basedev->features & DASD_FEATURE_READONLY && 1665 1670 rq_data_dir(req) == WRITE) { 1666 1671 DBF_DEV_EVENT(DBF_ERR, basedev, 1667 1672 "Rejecting write request %p", 1668 1673 req); 1669 - blkdev_dequeue_request(req); 1670 - dasd_end_request(req, -EIO); 1674 + blk_start_request(req); 1675 + __blk_end_request_all(req, -EIO); 1671 1676 continue; 1672 1677 } 1673 1678 cqr = basedev->discipline->build_cp(basedev, block, req); ··· 1691 1704 "CCW creation failed (rc=%ld) " 1692 1705 "on request %p", 1693 1706 PTR_ERR(cqr), req); 1694 - blkdev_dequeue_request(req); 1695 - dasd_end_request(req, -EIO); 1707 + blk_start_request(req); 1708 + __blk_end_request_all(req, -EIO); 1696 1709 continue; 1697 1710 } 1698 1711 /* ··· 1701 1714 */ 1702 1715 cqr->callback_data = (void *) req; 1703 1716 cqr->status = DASD_CQR_FILLED; 1704 - blkdev_dequeue_request(req); 1717 + blk_start_request(req); 1705 1718 list_add_tail(&cqr->blocklist, &block->ccw_queue); 1706 1719 dasd_profile_start(block, cqr, req); 1707 1720 } ··· 1718 1731 status = cqr->block->base->discipline->free_cp(cqr, req); 1719 1732 if (status <= 0) 1720 1733 error = status ? status : -EIO; 1721 - dasd_end_request(req, error); 1734 + __blk_end_request_all(req, error); 1722 1735 } 1723 1736 1724 1737 /* ··· 1990 2003 { 1991 2004 int max; 1992 2005 1993 - blk_queue_hardsect_size(block->request_queue, block->bp_block); 2006 + blk_queue_logical_block_size(block->request_queue, block->bp_block); 1994 2007 max = block->base->discipline->max_blocks << block->s2b_shift; 1995 2008 blk_queue_max_sectors(block->request_queue, max); 1996 2009 blk_queue_max_phys_segments(block->request_queue, -1L); ··· 2025 2038 return; 2026 2039 2027 2040 spin_lock_irq(&block->request_queue_lock); 2028 - while ((req = elv_next_request(block->request_queue))) { 2029 - blkdev_dequeue_request(req); 2030 - dasd_end_request(req, -EIO); 2031 - } 2041 + while ((req = blk_fetch_request(block->request_queue))) 2042 + __blk_end_request_all(req, -EIO); 2032 2043 spin_unlock_irq(&block->request_queue_lock); 2033 2044 } 2034 2045
+3 -2
drivers/s390/block/dasd_diag.c
··· 505 505 return ERR_PTR(-EINVAL); 506 506 blksize = block->bp_block; 507 507 /* Calculate record id of first and last block. */ 508 - first_rec = req->sector >> block->s2b_shift; 509 - last_rec = (req->sector + req->nr_sectors - 1) >> block->s2b_shift; 508 + first_rec = blk_rq_pos(req) >> block->s2b_shift; 509 + last_rec = 510 + (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift; 510 511 /* Check struct bio and count the number of blocks for the request. */ 511 512 count = 0; 512 513 rq_for_each_segment(bv, req, iter) {
+3 -3
drivers/s390/block/dasd_eckd.c
··· 2354 2354 blksize = block->bp_block; 2355 2355 blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize); 2356 2356 /* Calculate record id of first and last block. */ 2357 - first_rec = first_trk = req->sector >> block->s2b_shift; 2357 + first_rec = first_trk = blk_rq_pos(req) >> block->s2b_shift; 2358 2358 first_offs = sector_div(first_trk, blk_per_trk); 2359 2359 last_rec = last_trk = 2360 - (req->sector + req->nr_sectors - 1) >> block->s2b_shift; 2360 + (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift; 2361 2361 last_offs = sector_div(last_trk, blk_per_trk); 2362 2362 cdlspecial = (private->uses_cdl && first_rec < 2*blk_per_trk); 2363 2363 ··· 2420 2420 private = (struct dasd_eckd_private *) cqr->block->base->private; 2421 2421 blksize = cqr->block->bp_block; 2422 2422 blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize); 2423 - recid = req->sector >> cqr->block->s2b_shift; 2423 + recid = blk_rq_pos(req) >> cqr->block->s2b_shift; 2424 2424 ccw = cqr->cpaddr; 2425 2425 /* Skip over define extent & locate record. */ 2426 2426 ccw++;
+4 -3
drivers/s390/block/dasd_fba.c
··· 270 270 return ERR_PTR(-EINVAL); 271 271 blksize = block->bp_block; 272 272 /* Calculate record id of first and last block. */ 273 - first_rec = req->sector >> block->s2b_shift; 274 - last_rec = (req->sector + req->nr_sectors - 1) >> block->s2b_shift; 273 + first_rec = blk_rq_pos(req) >> block->s2b_shift; 274 + last_rec = 275 + (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift; 275 276 /* Check struct bio and count the number of blocks for the request. */ 276 277 count = 0; 277 278 cidaw = 0; ··· 310 309 ccw = cqr->cpaddr; 311 310 /* First ccw is define extent. */ 312 311 define_extent(ccw++, cqr->data, rq_data_dir(req), 313 - block->bp_block, req->sector, req->nr_sectors); 312 + block->bp_block, blk_rq_pos(req), blk_rq_sectors(req)); 314 313 /* Build locate_record + read/write ccws. */ 315 314 idaws = (unsigned long *) (cqr->data + sizeof(struct DE_fba_data)); 316 315 LO_data = (struct LO_fba_data *) (idaws + cidaw);
+1 -1
drivers/s390/block/dcssblk.c
··· 602 602 dev_info->gd->private_data = dev_info; 603 603 dev_info->gd->driverfs_dev = &dev_info->dev; 604 604 blk_queue_make_request(dev_info->dcssblk_queue, dcssblk_make_request); 605 - blk_queue_hardsect_size(dev_info->dcssblk_queue, 4096); 605 + blk_queue_logical_block_size(dev_info->dcssblk_queue, 4096); 606 606 607 607 seg_byte_size = (dev_info->end - dev_info->start + 1); 608 608 set_capacity(dev_info->gd, seg_byte_size >> 9); // size in sectors
+1 -1
drivers/s390/block/xpram.c
··· 343 343 goto out; 344 344 } 345 345 blk_queue_make_request(xpram_queues[i], xpram_make_request); 346 - blk_queue_hardsect_size(xpram_queues[i], 4096); 346 + blk_queue_logical_block_size(xpram_queues[i], 4096); 347 347 } 348 348 349 349 /*
+1 -1
drivers/s390/char/tape_34xx.c
··· 1134 1134 /* Setup ccws. */ 1135 1135 request->op = TO_BLOCK; 1136 1136 start_block = (struct tape_34xx_block_id *) request->cpdata; 1137 - start_block->block = req->sector >> TAPEBLOCK_HSEC_S2B; 1137 + start_block->block = blk_rq_pos(req) >> TAPEBLOCK_HSEC_S2B; 1138 1138 DBF_EVENT(6, "start_block = %i\n", start_block->block); 1139 1139 1140 1140 ccw = request->cpaddr;
+1 -1
drivers/s390/char/tape_3590.c
··· 633 633 struct req_iterator iter; 634 634 635 635 DBF_EVENT(6, "xBREDid:"); 636 - start_block = req->sector >> TAPEBLOCK_HSEC_S2B; 636 + start_block = blk_rq_pos(req) >> TAPEBLOCK_HSEC_S2B; 637 637 DBF_EVENT(6, "start_block = %i\n", start_block); 638 638 639 639 rq_for_each_segment(bv, req, iter)
+8 -18
drivers/s390/char/tape_block.c
··· 74 74 * Post finished request. 75 75 */ 76 76 static void 77 - tapeblock_end_request(struct request *req, int error) 78 - { 79 - if (blk_end_request(req, error, blk_rq_bytes(req))) 80 - BUG(); 81 - } 82 - 83 - static void 84 77 __tapeblock_end_request(struct tape_request *ccw_req, void *data) 85 78 { 86 79 struct tape_device *device; ··· 83 90 84 91 device = ccw_req->device; 85 92 req = (struct request *) data; 86 - tapeblock_end_request(req, (ccw_req->rc == 0) ? 0 : -EIO); 93 + blk_end_request_all(req, (ccw_req->rc == 0) ? 0 : -EIO); 87 94 if (ccw_req->rc == 0) 88 95 /* Update position. */ 89 96 device->blk_data.block_position = 90 - (req->sector + req->nr_sectors) >> TAPEBLOCK_HSEC_S2B; 97 + (blk_rq_pos(req) + blk_rq_sectors(req)) >> TAPEBLOCK_HSEC_S2B; 91 98 else 92 99 /* We lost the position information due to an error. */ 93 100 device->blk_data.block_position = -1; 94 101 device->discipline->free_bread(ccw_req); 95 102 if (!list_empty(&device->req_queue) || 96 - elv_next_request(device->blk_data.request_queue)) 103 + blk_peek_request(device->blk_data.request_queue)) 97 104 tapeblock_trigger_requeue(device); 98 105 } 99 106 ··· 111 118 ccw_req = device->discipline->bread(device, req); 112 119 if (IS_ERR(ccw_req)) { 113 120 DBF_EVENT(1, "TBLOCK: bread failed\n"); 114 - tapeblock_end_request(req, -EIO); 121 + blk_end_request_all(req, -EIO); 115 122 return PTR_ERR(ccw_req); 116 123 } 117 124 ccw_req->callback = __tapeblock_end_request; ··· 124 131 * Start/enqueueing failed. No retries in 125 132 * this case. 126 133 */ 127 - tapeblock_end_request(req, -EIO); 134 + blk_end_request_all(req, -EIO); 128 135 device->discipline->free_bread(ccw_req); 129 136 } 130 137 ··· 162 169 spin_lock_irq(&device->blk_data.request_queue_lock); 163 170 while ( 164 171 !blk_queue_plugged(queue) && 165 - elv_next_request(queue) && 172 + (req = blk_fetch_request(queue)) && 166 173 nr_queued < TAPEBLOCK_MIN_REQUEUE 167 174 ) { 168 - req = elv_next_request(queue); 169 175 if (rq_data_dir(req) == WRITE) { 170 176 DBF_EVENT(1, "TBLOCK: Rejecting write request\n"); 171 - blkdev_dequeue_request(req); 172 177 spin_unlock_irq(&device->blk_data.request_queue_lock); 173 - tapeblock_end_request(req, -EIO); 178 + blk_end_request_all(req, -EIO); 174 179 spin_lock_irq(&device->blk_data.request_queue_lock); 175 180 continue; 176 181 } 177 - blkdev_dequeue_request(req); 178 182 nr_queued++; 179 183 spin_unlock_irq(&device->blk_data.request_queue_lock); 180 184 rc = tapeblock_start_request(device, req); ··· 222 232 if (rc) 223 233 goto cleanup_queue; 224 234 225 - blk_queue_hardsect_size(blkdat->request_queue, TAPEBLOCK_HSEC_SIZE); 235 + blk_queue_logical_block_size(blkdat->request_queue, TAPEBLOCK_HSEC_SIZE); 226 236 blk_queue_max_sectors(blkdat->request_queue, TAPEBLOCK_MAX_SEC); 227 237 blk_queue_max_phys_segments(blkdat->request_queue, -1L); 228 238 blk_queue_max_hw_segments(blkdat->request_queue, -1L);
+13 -13
drivers/sbus/char/jsflash.c
··· 186 186 { 187 187 struct request *req; 188 188 189 - while ((req = elv_next_request(q)) != NULL) { 189 + req = blk_fetch_request(q); 190 + while (req) { 190 191 struct jsfd_part *jdp = req->rq_disk->private_data; 191 - unsigned long offset = req->sector << 9; 192 - size_t len = req->current_nr_sectors << 9; 192 + unsigned long offset = blk_rq_pos(req) << 9; 193 + size_t len = blk_rq_cur_bytes(req); 194 + int err = -EIO; 193 195 194 - if ((offset + len) > jdp->dsize) { 195 - end_request(req, 0); 196 - continue; 197 - } 196 + if ((offset + len) > jdp->dsize) 197 + goto end; 198 198 199 199 if (rq_data_dir(req) != READ) { 200 200 printk(KERN_ERR "jsfd: write\n"); 201 - end_request(req, 0); 202 - continue; 201 + goto end; 203 202 } 204 203 205 204 if ((jdp->dbase & 0xff000000) != 0x20000000) { 206 205 printk(KERN_ERR "jsfd: bad base %x\n", (int)jdp->dbase); 207 - end_request(req, 0); 208 - continue; 206 + goto end; 209 207 } 210 208 211 209 jsfd_read(req->buffer, jdp->dbase + offset, len); 212 - 213 - end_request(req, 1); 210 + err = 0; 211 + end: 212 + if (!__blk_end_request_cur(req, err)) 213 + req = blk_fetch_request(q); 214 214 } 215 215 } 216 216
+12 -12
drivers/scsi/eata.c
··· 1825 1825 if (linked_comm && SCpnt->device->queue_depth > 2 1826 1826 && TLDEV(SCpnt->device->type)) { 1827 1827 ha->cp_stat[i] = READY; 1828 - flush_dev(SCpnt->device, SCpnt->request->sector, ha, 0); 1828 + flush_dev(SCpnt->device, blk_rq_pos(SCpnt->request), ha, 0); 1829 1829 return 0; 1830 1830 } 1831 1831 ··· 2144 2144 if (!cpp->din) 2145 2145 input_only = 0; 2146 2146 2147 - if (SCpnt->request->sector < minsec) 2148 - minsec = SCpnt->request->sector; 2149 - if (SCpnt->request->sector > maxsec) 2150 - maxsec = SCpnt->request->sector; 2147 + if (blk_rq_pos(SCpnt->request) < minsec) 2148 + minsec = blk_rq_pos(SCpnt->request); 2149 + if (blk_rq_pos(SCpnt->request) > maxsec) 2150 + maxsec = blk_rq_pos(SCpnt->request); 2151 2151 2152 - sl[n] = SCpnt->request->sector; 2153 - ioseek += SCpnt->request->nr_sectors; 2152 + sl[n] = blk_rq_pos(SCpnt->request); 2153 + ioseek += blk_rq_sectors(SCpnt->request); 2154 2154 2155 2155 if (!n) 2156 2156 continue; ··· 2190 2190 k = il[n]; 2191 2191 cpp = &ha->cp[k]; 2192 2192 SCpnt = cpp->SCpnt; 2193 - ll[n] = SCpnt->request->nr_sectors; 2193 + ll[n] = blk_rq_sectors(SCpnt->request); 2194 2194 pl[n] = SCpnt->serial_number; 2195 2195 2196 2196 if (!n) ··· 2236 2236 cpp = &ha->cp[k]; 2237 2237 SCpnt = cpp->SCpnt; 2238 2238 scmd_printk(KERN_INFO, SCpnt, 2239 - "%s pid %ld mb %d fc %d nr %d sec %ld ns %ld" 2239 + "%s pid %ld mb %d fc %d nr %d sec %ld ns %u" 2240 2240 " cur %ld s:%c r:%c rev:%c in:%c ov:%c xd %d.\n", 2241 2241 (ihdlr ? "ihdlr" : "qcomm"), 2242 2242 SCpnt->serial_number, k, flushcount, 2243 - n_ready, SCpnt->request->sector, 2244 - SCpnt->request->nr_sectors, cursec, YESNO(s), 2243 + n_ready, blk_rq_pos(SCpnt->request), 2244 + blk_rq_sectors(SCpnt->request), cursec, YESNO(s), 2245 2245 YESNO(r), YESNO(rev), YESNO(input_only), 2246 2246 YESNO(overlap), cpp->din); 2247 2247 } ··· 2408 2408 2409 2409 if (linked_comm && SCpnt->device->queue_depth > 2 2410 2410 && TLDEV(SCpnt->device->type)) 2411 - flush_dev(SCpnt->device, SCpnt->request->sector, ha, 1); 2411 + flush_dev(SCpnt->device, blk_rq_pos(SCpnt->request), ha, 1); 2412 2412 2413 2413 tstatus = status_byte(spp->target_status); 2414 2414
+8 -8
drivers/scsi/libsas/sas_expander.c
··· 1927 1927 /* do we need to support multiple segments? */ 1928 1928 if (req->bio->bi_vcnt > 1 || rsp->bio->bi_vcnt > 1) { 1929 1929 printk("%s: multiple segments req %u %u, rsp %u %u\n", 1930 - __func__, req->bio->bi_vcnt, req->data_len, 1931 - rsp->bio->bi_vcnt, rsp->data_len); 1930 + __func__, req->bio->bi_vcnt, blk_rq_bytes(req), 1931 + rsp->bio->bi_vcnt, blk_rq_bytes(rsp)); 1932 1932 return -EINVAL; 1933 1933 } 1934 1934 1935 - ret = smp_execute_task(dev, bio_data(req->bio), req->data_len, 1936 - bio_data(rsp->bio), rsp->data_len); 1935 + ret = smp_execute_task(dev, bio_data(req->bio), blk_rq_bytes(req), 1936 + bio_data(rsp->bio), blk_rq_bytes(rsp)); 1937 1937 if (ret > 0) { 1938 1938 /* positive number is the untransferred residual */ 1939 - rsp->data_len = ret; 1940 - req->data_len = 0; 1939 + rsp->resid_len = ret; 1940 + req->resid_len = 0; 1941 1941 ret = 0; 1942 1942 } else if (ret == 0) { 1943 - rsp->data_len = 0; 1944 - req->data_len = 0; 1943 + rsp->resid_len = 0; 1944 + req->resid_len = 0; 1945 1945 } 1946 1946 1947 1947 return ret;
+24 -25
drivers/scsi/libsas/sas_host_smp.c
··· 134 134 { 135 135 u8 *req_data = NULL, *resp_data = NULL, *buf; 136 136 struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost); 137 - int error = -EINVAL, resp_data_len = rsp->data_len; 137 + int error = -EINVAL; 138 138 139 139 /* eight is the minimum size for request and response frames */ 140 - if (req->data_len < 8 || rsp->data_len < 8) 140 + if (blk_rq_bytes(req) < 8 || blk_rq_bytes(rsp) < 8) 141 141 goto out; 142 142 143 - if (bio_offset(req->bio) + req->data_len > PAGE_SIZE || 144 - bio_offset(rsp->bio) + rsp->data_len > PAGE_SIZE) { 143 + if (bio_offset(req->bio) + blk_rq_bytes(req) > PAGE_SIZE || 144 + bio_offset(rsp->bio) + blk_rq_bytes(rsp) > PAGE_SIZE) { 145 145 shost_printk(KERN_ERR, shost, 146 146 "SMP request/response frame crosses page boundary"); 147 147 goto out; 148 148 } 149 149 150 - req_data = kzalloc(req->data_len, GFP_KERNEL); 150 + req_data = kzalloc(blk_rq_bytes(req), GFP_KERNEL); 151 151 152 152 /* make sure frame can always be built ... we copy 153 153 * back only the requested length */ 154 - resp_data = kzalloc(max(rsp->data_len, 128U), GFP_KERNEL); 154 + resp_data = kzalloc(max(blk_rq_bytes(rsp), 128U), GFP_KERNEL); 155 155 156 156 if (!req_data || !resp_data) { 157 157 error = -ENOMEM; ··· 160 160 161 161 local_irq_disable(); 162 162 buf = kmap_atomic(bio_page(req->bio), KM_USER0) + bio_offset(req->bio); 163 - memcpy(req_data, buf, req->data_len); 163 + memcpy(req_data, buf, blk_rq_bytes(req)); 164 164 kunmap_atomic(buf - bio_offset(req->bio), KM_USER0); 165 165 local_irq_enable(); 166 166 ··· 178 178 179 179 switch (req_data[1]) { 180 180 case SMP_REPORT_GENERAL: 181 - req->data_len -= 8; 182 - resp_data_len -= 32; 181 + req->resid_len -= 8; 182 + rsp->resid_len -= 32; 183 183 resp_data[2] = SMP_RESP_FUNC_ACC; 184 184 resp_data[9] = sas_ha->num_phys; 185 185 break; 186 186 187 187 case SMP_REPORT_MANUF_INFO: 188 - req->data_len -= 8; 189 - resp_data_len -= 64; 188 + req->resid_len -= 8; 189 + rsp->resid_len -= 64; 190 190 resp_data[2] = SMP_RESP_FUNC_ACC; 191 191 memcpy(resp_data + 12, shost->hostt->name, 192 192 SAS_EXPANDER_VENDOR_ID_LEN); ··· 199 199 break; 200 200 201 201 case SMP_DISCOVER: 202 - req->data_len -= 16; 203 - if ((int)req->data_len < 0) { 204 - req->data_len = 0; 202 + req->resid_len -= 16; 203 + if ((int)req->resid_len < 0) { 204 + req->resid_len = 0; 205 205 error = -EINVAL; 206 206 goto out; 207 207 } 208 - resp_data_len -= 56; 208 + rsp->resid_len -= 56; 209 209 sas_host_smp_discover(sas_ha, resp_data, req_data[9]); 210 210 break; 211 211 ··· 215 215 break; 216 216 217 217 case SMP_REPORT_PHY_SATA: 218 - req->data_len -= 16; 219 - if ((int)req->data_len < 0) { 220 - req->data_len = 0; 218 + req->resid_len -= 16; 219 + if ((int)req->resid_len < 0) { 220 + req->resid_len = 0; 221 221 error = -EINVAL; 222 222 goto out; 223 223 } 224 - resp_data_len -= 60; 224 + rsp->resid_len -= 60; 225 225 sas_report_phy_sata(sas_ha, resp_data, req_data[9]); 226 226 break; 227 227 ··· 238 238 break; 239 239 240 240 case SMP_PHY_CONTROL: 241 - req->data_len -= 44; 242 - if ((int)req->data_len < 0) { 243 - req->data_len = 0; 241 + req->resid_len -= 44; 242 + if ((int)req->resid_len < 0) { 243 + req->resid_len = 0; 244 244 error = -EINVAL; 245 245 goto out; 246 246 } 247 - resp_data_len -= 8; 247 + rsp->resid_len -= 8; 248 248 sas_phy_control(sas_ha, req_data[9], req_data[10], 249 249 req_data[32] >> 4, req_data[33] >> 4, 250 250 resp_data); ··· 261 261 262 262 local_irq_disable(); 263 263 buf = kmap_atomic(bio_page(rsp->bio), KM_USER0) + bio_offset(rsp->bio); 264 - memcpy(buf, resp_data, rsp->data_len); 264 + memcpy(buf, resp_data, blk_rq_bytes(rsp)); 265 265 flush_kernel_dcache_page(bio_page(rsp->bio)); 266 266 kunmap_atomic(buf - bio_offset(rsp->bio), KM_USER0); 267 267 local_irq_enable(); 268 - rsp->data_len = resp_data_len; 269 268 270 269 out: 271 270 kfree(req_data);
+11 -11
drivers/scsi/lpfc/lpfc_scsi.c
··· 1312 1312 uint32_t bgstat = bgf->bgstat; 1313 1313 uint64_t failing_sector = 0; 1314 1314 1315 - printk(KERN_ERR "BG ERROR in cmd 0x%x lba 0x%llx blk cnt 0x%lx " 1315 + printk(KERN_ERR "BG ERROR in cmd 0x%x lba 0x%llx blk cnt 0x%x " 1316 1316 "bgstat=0x%x bghm=0x%x\n", 1317 1317 cmd->cmnd[0], (unsigned long long)scsi_get_lba(cmd), 1318 - cmd->request->nr_sectors, bgstat, bghm); 1318 + blk_rq_sectors(cmd->request), bgstat, bghm); 1319 1319 1320 1320 spin_lock(&_dump_buf_lock); 1321 1321 if (!_dump_buf_done) { ··· 2378 2378 if (cmnd->cmnd[0] == READ_10) 2379 2379 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, 2380 2380 "9035 BLKGRD: READ @ sector %llu, " 2381 - "count %lu\n", 2382 - (unsigned long long)scsi_get_lba(cmnd), 2383 - cmnd->request->nr_sectors); 2381 + "count %u\n", 2382 + (unsigned long long)scsi_get_lba(cmnd), 2383 + blk_rq_sectors(cmnd->request)); 2384 2384 else if (cmnd->cmnd[0] == WRITE_10) 2385 2385 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, 2386 2386 "9036 BLKGRD: WRITE @ sector %llu, " 2387 - "count %lu cmd=%p\n", 2387 + "count %u cmd=%p\n", 2388 2388 (unsigned long long)scsi_get_lba(cmnd), 2389 - cmnd->request->nr_sectors, 2389 + blk_rq_sectors(cmnd->request), 2390 2390 cmnd); 2391 2391 2392 2392 err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd); ··· 2406 2406 if (cmnd->cmnd[0] == READ_10) 2407 2407 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, 2408 2408 "9040 dbg: READ @ sector %llu, " 2409 - "count %lu\n", 2409 + "count %u\n", 2410 2410 (unsigned long long)scsi_get_lba(cmnd), 2411 - cmnd->request->nr_sectors); 2411 + blk_rq_sectors(cmnd->request)); 2412 2412 else if (cmnd->cmnd[0] == WRITE_10) 2413 2413 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, 2414 2414 "9041 dbg: WRITE @ sector %llu, " 2415 - "count %lu cmd=%p\n", 2415 + "count %u cmd=%p\n", 2416 2416 (unsigned long long)scsi_get_lba(cmnd), 2417 - cmnd->request->nr_sectors, cmnd); 2417 + blk_rq_sectors(cmnd->request), cmnd); 2418 2418 else 2419 2419 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, 2420 2420 "9042 dbg: parser not implemented\n");
+11 -12
drivers/scsi/mpt2sas/mpt2sas_transport.c
··· 1041 1041 if (req->bio->bi_vcnt > 1 || rsp->bio->bi_vcnt > 1) { 1042 1042 printk(MPT2SAS_ERR_FMT "%s: multiple segments req %u %u, " 1043 1043 "rsp %u %u\n", ioc->name, __func__, req->bio->bi_vcnt, 1044 - req->data_len, rsp->bio->bi_vcnt, rsp->data_len); 1044 + blk_rq_bytes(req), rsp->bio->bi_vcnt, blk_rq_bytes(rsp)); 1045 1045 return -EINVAL; 1046 1046 } 1047 1047 ··· 1104 1104 *((u64 *)&mpi_request->SASAddress) = (rphy) ? 1105 1105 cpu_to_le64(rphy->identify.sas_address) : 1106 1106 cpu_to_le64(ioc->sas_hba.sas_address); 1107 - mpi_request->RequestDataLength = cpu_to_le16(req->data_len - 4); 1107 + mpi_request->RequestDataLength = cpu_to_le16(blk_rq_bytes(req) - 4); 1108 1108 psge = &mpi_request->SGL; 1109 1109 1110 1110 /* WRITE sgel first */ ··· 1112 1112 MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_HOST_TO_IOC); 1113 1113 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT; 1114 1114 dma_addr_out = pci_map_single(ioc->pdev, bio_data(req->bio), 1115 - req->data_len, PCI_DMA_BIDIRECTIONAL); 1115 + blk_rq_bytes(req), PCI_DMA_BIDIRECTIONAL); 1116 1116 if (!dma_addr_out) { 1117 1117 mpt2sas_base_free_smid(ioc, le16_to_cpu(smid)); 1118 1118 goto unmap; 1119 1119 } 1120 1120 1121 - ioc->base_add_sg_single(psge, sgl_flags | (req->data_len - 4), 1121 + ioc->base_add_sg_single(psge, sgl_flags | (blk_rq_bytes(req) - 4), 1122 1122 dma_addr_out); 1123 1123 1124 1124 /* incr sgel */ ··· 1129 1129 MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER | 1130 1130 MPI2_SGE_FLAGS_END_OF_LIST); 1131 1131 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT; 1132 - dma_addr_in = pci_map_single(ioc->pdev, bio_data(rsp->bio), 1133 - rsp->data_len, PCI_DMA_BIDIRECTIONAL); 1132 + dma_addr_in = pci_map_single(ioc->pdev, bio_data(rsp->bio), 1133 + blk_rq_bytes(rsp), PCI_DMA_BIDIRECTIONAL); 1134 1134 if (!dma_addr_in) { 1135 1135 mpt2sas_base_free_smid(ioc, le16_to_cpu(smid)); 1136 1136 goto unmap; 1137 1137 } 1138 1138 1139 - ioc->base_add_sg_single(psge, sgl_flags | (rsp->data_len + 4), 1139 + ioc->base_add_sg_single(psge, sgl_flags | (blk_rq_bytes(rsp) + 4), 1140 1140 dma_addr_in); 1141 1141 1142 1142 dtransportprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s - " ··· 1170 1170 1171 1171 memcpy(req->sense, mpi_reply, sizeof(*mpi_reply)); 1172 1172 req->sense_len = sizeof(*mpi_reply); 1173 - req->data_len = 0; 1174 - rsp->data_len -= mpi_reply->ResponseDataLength; 1175 - 1173 + req->resid_len = 0; 1174 + rsp->resid_len -= mpi_reply->ResponseDataLength; 1176 1175 } else { 1177 1176 dtransportprintk(ioc, printk(MPT2SAS_DEBUG_FMT 1178 1177 "%s - no reply\n", ioc->name, __func__)); ··· 1187 1188 1188 1189 unmap: 1189 1190 if (dma_addr_out) 1190 - pci_unmap_single(ioc->pdev, dma_addr_out, req->data_len, 1191 + pci_unmap_single(ioc->pdev, dma_addr_out, blk_rq_bytes(req), 1191 1192 PCI_DMA_BIDIRECTIONAL); 1192 1193 if (dma_addr_in) 1193 - pci_unmap_single(ioc->pdev, dma_addr_in, rsp->data_len, 1194 + pci_unmap_single(ioc->pdev, dma_addr_in, blk_rq_bytes(rsp), 1194 1195 PCI_DMA_BIDIRECTIONAL); 1195 1196 1196 1197 out:
+25 -47
drivers/scsi/osd/osd_initiator.c
··· 889 889 } 890 890 EXPORT_SYMBOL(osd_req_add_set_attr_list); 891 891 892 - static int _append_map_kern(struct request *req, 893 - void *buff, unsigned len, gfp_t flags) 894 - { 895 - struct bio *bio; 896 - int ret; 897 - 898 - bio = bio_map_kern(req->q, buff, len, flags); 899 - if (IS_ERR(bio)) { 900 - OSD_ERR("Failed bio_map_kern(%p, %d) => %ld\n", buff, len, 901 - PTR_ERR(bio)); 902 - return PTR_ERR(bio); 903 - } 904 - ret = blk_rq_append_bio(req->q, req, bio); 905 - if (ret) { 906 - OSD_ERR("Failed blk_rq_append_bio(%p) => %d\n", bio, ret); 907 - bio_put(bio); 908 - } 909 - return ret; 910 - } 911 - 912 892 static int _req_append_segment(struct osd_request *or, 913 893 unsigned padding, struct _osd_req_data_segment *seg, 914 894 struct _osd_req_data_segment *last_seg, struct _osd_io_info *io) ··· 904 924 else 905 925 pad_buff = io->pad_buff; 906 926 907 - ret = _append_map_kern(io->req, pad_buff, padding, 927 + ret = blk_rq_map_kern(io->req->q, io->req, pad_buff, padding, 908 928 or->alloc_flags); 909 929 if (ret) 910 930 return ret; 911 931 io->total_bytes += padding; 912 932 } 913 933 914 - ret = _append_map_kern(io->req, seg->buff, seg->total_bytes, 934 + ret = blk_rq_map_kern(io->req->q, io->req, seg->buff, seg->total_bytes, 915 935 or->alloc_flags); 916 936 if (ret) 917 937 return ret; ··· 1273 1293 /* 1274 1294 * osd_finalize_request and helpers 1275 1295 */ 1296 + static struct request *_make_request(struct request_queue *q, bool has_write, 1297 + struct _osd_io_info *oii, gfp_t flags) 1298 + { 1299 + if (oii->bio) 1300 + return blk_make_request(q, oii->bio, flags); 1301 + else { 1302 + struct request *req; 1303 + 1304 + req = blk_get_request(q, has_write ? WRITE : READ, flags); 1305 + if (unlikely(!req)) 1306 + return ERR_PTR(-ENOMEM); 1307 + 1308 + return req; 1309 + } 1310 + } 1276 1311 1277 1312 static int _init_blk_request(struct osd_request *or, 1278 1313 bool has_in, bool has_out) ··· 1296 1301 struct scsi_device *scsi_device = or->osd_dev->scsi_device; 1297 1302 struct request_queue *q = scsi_device->request_queue; 1298 1303 struct request *req; 1299 - int ret = -ENOMEM; 1304 + int ret; 1300 1305 1301 - req = blk_get_request(q, has_out, flags); 1302 - if (!req) 1306 + req = _make_request(q, has_out, has_out ? &or->out : &or->in, flags); 1307 + if (IS_ERR(req)) { 1308 + ret = PTR_ERR(req); 1303 1309 goto out; 1310 + } 1304 1311 1305 1312 or->request = req; 1306 1313 req->cmd_type = REQ_TYPE_BLOCK_PC; ··· 1315 1318 or->out.req = req; 1316 1319 if (has_in) { 1317 1320 /* allocate bidi request */ 1318 - req = blk_get_request(q, READ, flags); 1319 - if (!req) { 1321 + req = _make_request(q, false, &or->in, flags); 1322 + if (IS_ERR(req)) { 1320 1323 OSD_DEBUG("blk_get_request for bidi failed\n"); 1324 + ret = PTR_ERR(req); 1321 1325 goto out; 1322 1326 } 1323 1327 req->cmd_type = REQ_TYPE_BLOCK_PC; ··· 1360 1362 if (ret) { 1361 1363 OSD_DEBUG("_init_blk_request failed\n"); 1362 1364 return ret; 1363 - } 1364 - 1365 - if (or->out.bio) { 1366 - ret = blk_rq_append_bio(or->request->q, or->out.req, 1367 - or->out.bio); 1368 - if (ret) { 1369 - OSD_DEBUG("blk_rq_append_bio out failed\n"); 1370 - return ret; 1371 - } 1372 - OSD_DEBUG("out bytes=%llu (bytes_req=%u)\n", 1373 - _LLU(or->out.total_bytes), or->out.req->data_len); 1374 - } 1375 - if (or->in.bio) { 1376 - ret = blk_rq_append_bio(or->request->q, or->in.req, or->in.bio); 1377 - if (ret) { 1378 - OSD_DEBUG("blk_rq_append_bio in failed\n"); 1379 - return ret; 1380 - } 1381 - OSD_DEBUG("in bytes=%llu (bytes_req=%u)\n", 1382 - _LLU(or->in.total_bytes), or->in.req->data_len); 1383 1365 } 1384 1366 1385 1367 or->out.pad_buff = sg_out_pad_buffer;
+29 -58
drivers/scsi/scsi_lib.c
··· 240 240 * is invalid. Prevent the garbage from being misinterpreted 241 241 * and prevent security leaks by zeroing out the excess data. 242 242 */ 243 - if (unlikely(req->data_len > 0 && req->data_len <= bufflen)) 244 - memset(buffer + (bufflen - req->data_len), 0, req->data_len); 243 + if (unlikely(req->resid_len > 0 && req->resid_len <= bufflen)) 244 + memset(buffer + (bufflen - req->resid_len), 0, req->resid_len); 245 245 246 246 if (resid) 247 - *resid = req->data_len; 247 + *resid = req->resid_len; 248 248 ret = req->errors; 249 249 out: 250 250 blk_put_request(req); ··· 546 546 * to queue the remainder of them. 547 547 */ 548 548 if (blk_end_request(req, error, bytes)) { 549 - int leftover = (req->hard_nr_sectors << 9); 550 - 551 - if (blk_pc_request(req)) 552 - leftover = req->data_len; 553 - 554 549 /* kill remainder if no retrys */ 555 550 if (error && scsi_noretry_cmd(cmd)) 556 - blk_end_request(req, error, leftover); 551 + blk_end_request_all(req, error); 557 552 else { 558 553 if (requeue) { 559 554 /* ··· 668 673 EXPORT_SYMBOL(scsi_release_buffers); 669 674 670 675 /* 671 - * Bidi commands Must be complete as a whole, both sides at once. 672 - * If part of the bytes were written and lld returned 673 - * scsi_in()->resid and/or scsi_out()->resid this information will be left 674 - * in req->data_len and req->next_rq->data_len. The upper-layer driver can 675 - * decide what to do with this information. 676 - */ 677 - static void scsi_end_bidi_request(struct scsi_cmnd *cmd) 678 - { 679 - struct request *req = cmd->request; 680 - unsigned int dlen = req->data_len; 681 - unsigned int next_dlen = req->next_rq->data_len; 682 - 683 - req->data_len = scsi_out(cmd)->resid; 684 - req->next_rq->data_len = scsi_in(cmd)->resid; 685 - 686 - /* The req and req->next_rq have not been completed */ 687 - BUG_ON(blk_end_bidi_request(req, 0, dlen, next_dlen)); 688 - 689 - scsi_release_buffers(cmd); 690 - 691 - /* 692 - * This will goose the queue request function at the end, so we don't 693 - * need to worry about launching another command. 694 - */ 695 - scsi_next_command(cmd); 696 - } 697 - 698 - /* 699 676 * Function: scsi_io_completion() 700 677 * 701 678 * Purpose: Completion processing for block device I/O requests. ··· 706 739 void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) 707 740 { 708 741 int result = cmd->result; 709 - int this_count; 710 742 struct request_queue *q = cmd->device->request_queue; 711 743 struct request *req = cmd->request; 712 744 int error = 0; ··· 739 773 if (!sense_deferred) 740 774 error = -EIO; 741 775 } 776 + 777 + req->resid_len = scsi_get_resid(cmd); 778 + 742 779 if (scsi_bidi_cmnd(cmd)) { 743 - /* will also release_buffers */ 744 - scsi_end_bidi_request(cmd); 780 + /* 781 + * Bidi commands Must be complete as a whole, 782 + * both sides at once. 783 + */ 784 + req->next_rq->resid_len = scsi_in(cmd)->resid; 785 + 786 + blk_end_request_all(req, 0); 787 + 788 + scsi_release_buffers(cmd); 789 + scsi_next_command(cmd); 745 790 return; 746 791 } 747 - req->data_len = scsi_get_resid(cmd); 748 792 } 749 793 750 794 BUG_ON(blk_bidi_rq(req)); /* bidi not support for !blk_pc_request yet */ ··· 763 787 * Next deal with any sectors which we were able to correctly 764 788 * handle. 765 789 */ 766 - SCSI_LOG_HLCOMPLETE(1, printk("%ld sectors total, " 790 + SCSI_LOG_HLCOMPLETE(1, printk("%u sectors total, " 767 791 "%d bytes done.\n", 768 - req->nr_sectors, good_bytes)); 792 + blk_rq_sectors(req), good_bytes)); 769 793 770 794 /* 771 795 * Recovered errors need reporting, but they're always treated ··· 788 812 */ 789 813 if (scsi_end_request(cmd, error, good_bytes, result == 0) == NULL) 790 814 return; 791 - this_count = blk_rq_bytes(req); 792 815 793 816 error = -EIO; 794 817 ··· 897 922 if (driver_byte(result) & DRIVER_SENSE) 898 923 scsi_print_sense("", cmd); 899 924 } 900 - blk_end_request(req, -EIO, blk_rq_bytes(req)); 925 + blk_end_request_all(req, -EIO); 901 926 scsi_next_command(cmd); 902 927 break; 903 928 case ACTION_REPREP: ··· 940 965 count = blk_rq_map_sg(req->q, req, sdb->table.sgl); 941 966 BUG_ON(count > sdb->table.nents); 942 967 sdb->table.nents = count; 943 - if (blk_pc_request(req)) 944 - sdb->length = req->data_len; 945 - else 946 - sdb->length = req->nr_sectors << 9; 968 + sdb->length = blk_rq_bytes(req); 947 969 return BLKPREP_OK; 948 970 } 949 971 ··· 1059 1087 if (unlikely(ret)) 1060 1088 return ret; 1061 1089 } else { 1062 - BUG_ON(req->data_len); 1063 - BUG_ON(req->data); 1090 + BUG_ON(blk_rq_bytes(req)); 1064 1091 1065 1092 memset(&cmd->sdb, 0, sizeof(cmd->sdb)); 1066 1093 req->buffer = NULL; 1067 1094 } 1068 1095 1069 1096 cmd->cmd_len = req->cmd_len; 1070 - if (!req->data_len) 1097 + if (!blk_rq_bytes(req)) 1071 1098 cmd->sc_data_direction = DMA_NONE; 1072 1099 else if (rq_data_dir(req) == WRITE) 1073 1100 cmd->sc_data_direction = DMA_TO_DEVICE; 1074 1101 else 1075 1102 cmd->sc_data_direction = DMA_FROM_DEVICE; 1076 1103 1077 - cmd->transfersize = req->data_len; 1104 + cmd->transfersize = blk_rq_bytes(req); 1078 1105 cmd->allowed = req->retries; 1079 1106 return BLKPREP_OK; 1080 1107 } ··· 1183 1212 break; 1184 1213 case BLKPREP_DEFER: 1185 1214 /* 1186 - * If we defer, the elv_next_request() returns NULL, but the 1215 + * If we defer, the blk_peek_request() returns NULL, but the 1187 1216 * queue must be restarted, so we plug here if no returning 1188 1217 * command will automatically do that. 1189 1218 */ ··· 1359 1388 struct scsi_target *starget = scsi_target(sdev); 1360 1389 struct Scsi_Host *shost = sdev->host; 1361 1390 1362 - blkdev_dequeue_request(req); 1391 + blk_start_request(req); 1363 1392 1364 1393 if (unlikely(cmd == NULL)) { 1365 1394 printk(KERN_CRIT "impossible request in %s.\n", ··· 1451 1480 1452 1481 if (!sdev) { 1453 1482 printk("scsi: killing requests for dead queue\n"); 1454 - while ((req = elv_next_request(q)) != NULL) 1483 + while ((req = blk_peek_request(q)) != NULL) 1455 1484 scsi_kill_request(req, q); 1456 1485 return; 1457 1486 } ··· 1472 1501 * that the request is fully prepared even if we cannot 1473 1502 * accept it. 1474 1503 */ 1475 - req = elv_next_request(q); 1504 + req = blk_peek_request(q); 1476 1505 if (!req || !scsi_dev_queue_ready(q, sdev)) 1477 1506 break; 1478 1507 ··· 1488 1517 * Remove the request from the request list. 1489 1518 */ 1490 1519 if (!(blk_queue_tagged(q) && !blk_queue_start_tag(q, req))) 1491 - blkdev_dequeue_request(req); 1520 + blk_start_request(req); 1492 1521 sdev->device_busy++; 1493 1522 1494 1523 spin_unlock(q->queue_lock);
+1 -1
drivers/scsi/scsi_tgt_lib.c
··· 387 387 * we use REQ_TYPE_BLOCK_PC so scsi_init_io doesn't set the 388 388 * length for us. 389 389 */ 390 - cmd->sdb.length = rq->data_len; 390 + cmd->sdb.length = blk_rq_bytes(rq); 391 391 392 392 return 0; 393 393
+1 -3
drivers/scsi/scsi_transport_sas.c
··· 163 163 int (*handler)(struct Scsi_Host *, struct sas_rphy *, struct request *); 164 164 165 165 while (!blk_queue_plugged(q)) { 166 - req = elv_next_request(q); 166 + req = blk_fetch_request(q); 167 167 if (!req) 168 168 break; 169 - 170 - blkdev_dequeue_request(req); 171 169 172 170 spin_unlock_irq(q->queue_lock); 173 171
+13 -13
drivers/scsi/sd.c
··· 384 384 struct scsi_device *sdp = q->queuedata; 385 385 struct gendisk *disk = rq->rq_disk; 386 386 struct scsi_disk *sdkp; 387 - sector_t block = rq->sector; 387 + sector_t block = blk_rq_pos(rq); 388 388 sector_t threshold; 389 - unsigned int this_count = rq->nr_sectors; 389 + unsigned int this_count = blk_rq_sectors(rq); 390 390 int ret, host_dif; 391 391 392 392 if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { ··· 413 413 this_count)); 414 414 415 415 if (!sdp || !scsi_device_online(sdp) || 416 - block + rq->nr_sectors > get_capacity(disk)) { 416 + block + blk_rq_sectors(rq) > get_capacity(disk)) { 417 417 SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt, 418 - "Finishing %ld sectors\n", 419 - rq->nr_sectors)); 418 + "Finishing %u sectors\n", 419 + blk_rq_sectors(rq))); 420 420 SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt, 421 421 "Retry with 0x%p\n", SCpnt)); 422 422 goto out; ··· 463 463 * for this. 464 464 */ 465 465 if (sdp->sector_size == 1024) { 466 - if ((block & 1) || (rq->nr_sectors & 1)) { 466 + if ((block & 1) || (blk_rq_sectors(rq) & 1)) { 467 467 scmd_printk(KERN_ERR, SCpnt, 468 468 "Bad block number requested\n"); 469 469 goto out; ··· 473 473 } 474 474 } 475 475 if (sdp->sector_size == 2048) { 476 - if ((block & 3) || (rq->nr_sectors & 3)) { 476 + if ((block & 3) || (blk_rq_sectors(rq) & 3)) { 477 477 scmd_printk(KERN_ERR, SCpnt, 478 478 "Bad block number requested\n"); 479 479 goto out; ··· 483 483 } 484 484 } 485 485 if (sdp->sector_size == 4096) { 486 - if ((block & 7) || (rq->nr_sectors & 7)) { 486 + if ((block & 7) || (blk_rq_sectors(rq) & 7)) { 487 487 scmd_printk(KERN_ERR, SCpnt, 488 488 "Bad block number requested\n"); 489 489 goto out; ··· 512 512 } 513 513 514 514 SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt, 515 - "%s %d/%ld 512 byte blocks.\n", 515 + "%s %d/%u 512 byte blocks.\n", 516 516 (rq_data_dir(rq) == WRITE) ? 517 517 "writing" : "reading", this_count, 518 - rq->nr_sectors)); 518 + blk_rq_sectors(rq))); 519 519 520 520 /* Set RDPROTECT/WRPROTECT if disk is formatted with DIF */ 521 521 host_dif = scsi_host_dif_capable(sdp->host, sdkp->protection_type); ··· 971 971 972 972 static unsigned int sd_completed_bytes(struct scsi_cmnd *scmd) 973 973 { 974 - u64 start_lba = scmd->request->sector; 975 - u64 end_lba = scmd->request->sector + (scsi_bufflen(scmd) / 512); 974 + u64 start_lba = blk_rq_pos(scmd->request); 975 + u64 end_lba = blk_rq_pos(scmd->request) + (scsi_bufflen(scmd) / 512); 976 976 u64 bad_lba; 977 977 int info_valid; 978 978 ··· 1510 1510 */ 1511 1511 sector_size = 512; 1512 1512 } 1513 - blk_queue_hardsect_size(sdp->request_queue, sector_size); 1513 + blk_queue_logical_block_size(sdp->request_queue, sector_size); 1514 1514 1515 1515 { 1516 1516 char cap_str_2[10], cap_str_10[10];
+1 -1
drivers/scsi/sd_dif.c
··· 507 507 sector_sz = scmd->device->sector_size; 508 508 sectors = good_bytes / sector_sz; 509 509 510 - phys = scmd->request->sector & 0xffffffff; 510 + phys = blk_rq_pos(scmd->request) & 0xffffffff; 511 511 if (sector_sz == 4096) 512 512 phys >>= 3; 513 513
+9 -8
drivers/scsi/sg.c
··· 289 289 if (list_empty(&sdp->sfds)) { /* no existing opens on this device */ 290 290 sdp->sgdebug = 0; 291 291 q = sdp->device->request_queue; 292 - sdp->sg_tablesize = min(q->max_hw_segments, 293 - q->max_phys_segments); 292 + sdp->sg_tablesize = min(queue_max_hw_segments(q), 293 + queue_max_phys_segments(q)); 294 294 } 295 295 if ((sfp = sg_add_sfp(sdp, dev))) 296 296 filp->private_data = sfp; ··· 909 909 if (val < 0) 910 910 return -EINVAL; 911 911 val = min_t(int, val, 912 - sdp->device->request_queue->max_sectors * 512); 912 + queue_max_sectors(sdp->device->request_queue) * 512); 913 913 if (val != sfp->reserve.bufflen) { 914 914 if (sg_res_in_use(sfp) || sfp->mmap_called) 915 915 return -EBUSY; ··· 919 919 return 0; 920 920 case SG_GET_RESERVED_SIZE: 921 921 val = min_t(int, sfp->reserve.bufflen, 922 - sdp->device->request_queue->max_sectors * 512); 922 + queue_max_sectors(sdp->device->request_queue) * 512); 923 923 return put_user(val, ip); 924 924 case SG_SET_COMMAND_Q: 925 925 result = get_user(val, ip); ··· 1059 1059 return -ENODEV; 1060 1060 return scsi_ioctl(sdp->device, cmd_in, p); 1061 1061 case BLKSECTGET: 1062 - return put_user(sdp->device->request_queue->max_sectors * 512, 1062 + return put_user(queue_max_sectors(sdp->device->request_queue) * 512, 1063 1063 ip); 1064 1064 case BLKTRACESETUP: 1065 1065 return blk_trace_setup(sdp->device->request_queue, ··· 1261 1261 1262 1262 sense = rq->sense; 1263 1263 result = rq->errors; 1264 - resid = rq->data_len; 1264 + resid = rq->resid_len; 1265 1265 1266 1266 SCSI_LOG_TIMEOUT(4, printk("sg_cmd_done: %s, pack_id=%d, res=0x%x\n", 1267 1267 sdp->disk->disk_name, srp->header.pack_id, result)); ··· 1378 1378 sdp->device = scsidp; 1379 1379 INIT_LIST_HEAD(&sdp->sfds); 1380 1380 init_waitqueue_head(&sdp->o_excl_wait); 1381 - sdp->sg_tablesize = min(q->max_hw_segments, q->max_phys_segments); 1381 + sdp->sg_tablesize = min(queue_max_hw_segments(q), 1382 + queue_max_phys_segments(q)); 1382 1383 sdp->index = k; 1383 1384 kref_init(&sdp->d_ref); 1384 1385 ··· 2057 2056 sg_big_buff = def_reserved_size; 2058 2057 2059 2058 bufflen = min_t(int, sg_big_buff, 2060 - sdp->device->request_queue->max_sectors * 512); 2059 + queue_max_sectors(sdp->device->request_queue) * 512); 2061 2060 sg_build_reserve(sfp, bufflen); 2062 2061 SCSI_LOG_TIMEOUT(3, printk("sg_add_sfp: bufflen=%d, k_use_sg=%d\n", 2063 2062 sfp->reserve.bufflen, sfp->reserve.k_use_sg));
+9 -8
drivers/scsi/sr.c
··· 292 292 if (cd->device->sector_size == 2048) 293 293 error_sector <<= 2; 294 294 error_sector &= ~(block_sectors - 1); 295 - good_bytes = (error_sector - SCpnt->request->sector) << 9; 295 + good_bytes = (error_sector - 296 + blk_rq_pos(SCpnt->request)) << 9; 296 297 if (good_bytes < 0 || good_bytes >= this_count) 297 298 good_bytes = 0; 298 299 /* ··· 350 349 cd->disk->disk_name, block)); 351 350 352 351 if (!cd->device || !scsi_device_online(cd->device)) { 353 - SCSI_LOG_HLQUEUE(2, printk("Finishing %ld sectors\n", 354 - rq->nr_sectors)); 352 + SCSI_LOG_HLQUEUE(2, printk("Finishing %u sectors\n", 353 + blk_rq_sectors(rq))); 355 354 SCSI_LOG_HLQUEUE(2, printk("Retry with 0x%p\n", SCpnt)); 356 355 goto out; 357 356 } ··· 414 413 /* 415 414 * request doesn't start on hw block boundary, add scatter pads 416 415 */ 417 - if (((unsigned int)rq->sector % (s_size >> 9)) || 416 + if (((unsigned int)blk_rq_pos(rq) % (s_size >> 9)) || 418 417 (scsi_bufflen(SCpnt) % s_size)) { 419 418 scmd_printk(KERN_NOTICE, SCpnt, "unaligned transfer\n"); 420 419 goto out; ··· 423 422 this_count = (scsi_bufflen(SCpnt) >> 9) / (s_size >> 9); 424 423 425 424 426 - SCSI_LOG_HLQUEUE(2, printk("%s : %s %d/%ld 512 byte blocks.\n", 425 + SCSI_LOG_HLQUEUE(2, printk("%s : %s %d/%u 512 byte blocks.\n", 427 426 cd->cdi.name, 428 427 (rq_data_dir(rq) == WRITE) ? 429 428 "writing" : "reading", 430 - this_count, rq->nr_sectors)); 429 + this_count, blk_rq_sectors(rq))); 431 430 432 431 SCpnt->cmnd[1] = 0; 433 - block = (unsigned int)rq->sector / (s_size >> 9); 432 + block = (unsigned int)blk_rq_pos(rq) / (s_size >> 9); 434 433 435 434 if (this_count > 0xffff) { 436 435 this_count = 0xffff; ··· 727 726 } 728 727 729 728 queue = cd->device->request_queue; 730 - blk_queue_hardsect_size(queue, sector_size); 729 + blk_queue_logical_block_size(queue, sector_size); 731 730 732 731 return; 733 732 }
+3 -3
drivers/scsi/st.c
··· 463 463 struct scsi_tape *STp = SRpnt->stp; 464 464 465 465 STp->buffer->cmdstat.midlevel_result = SRpnt->result = req->errors; 466 - STp->buffer->cmdstat.residual = req->data_len; 466 + STp->buffer->cmdstat.residual = req->resid_len; 467 467 468 468 if (SRpnt->waiting) 469 469 complete(SRpnt->waiting); ··· 3983 3983 return -ENODEV; 3984 3984 } 3985 3985 3986 - i = min(SDp->request_queue->max_hw_segments, 3987 - SDp->request_queue->max_phys_segments); 3986 + i = min(queue_max_hw_segments(SDp->request_queue), 3987 + queue_max_phys_segments(SDp->request_queue)); 3988 3988 if (st_max_sg_segs < i) 3989 3989 i = st_max_sg_segs; 3990 3990 buffer = new_tape_buffer((SDp->host)->unchecked_isa_dma, i);
+12 -10
drivers/scsi/u14-34f.c
··· 1306 1306 if (linked_comm && SCpnt->device->queue_depth > 2 1307 1307 && TLDEV(SCpnt->device->type)) { 1308 1308 HD(j)->cp_stat[i] = READY; 1309 - flush_dev(SCpnt->device, SCpnt->request->sector, j, FALSE); 1309 + flush_dev(SCpnt->device, blk_rq_pos(SCpnt->request), j, FALSE); 1310 1310 return 0; 1311 1311 } 1312 1312 ··· 1610 1610 1611 1611 if (!(cpp->xdir == DTD_IN)) input_only = FALSE; 1612 1612 1613 - if (SCpnt->request->sector < minsec) minsec = SCpnt->request->sector; 1614 - if (SCpnt->request->sector > maxsec) maxsec = SCpnt->request->sector; 1613 + if (blk_rq_pos(SCpnt->request) < minsec) 1614 + minsec = blk_rq_pos(SCpnt->request); 1615 + if (blk_rq_pos(SCpnt->request) > maxsec) 1616 + maxsec = blk_rq_pos(SCpnt->request); 1615 1617 1616 - sl[n] = SCpnt->request->sector; 1617 - ioseek += SCpnt->request->nr_sectors; 1618 + sl[n] = blk_rq_pos(SCpnt->request); 1619 + ioseek += blk_rq_sectors(SCpnt->request); 1618 1620 1619 1621 if (!n) continue; 1620 1622 ··· 1644 1642 1645 1643 if (!input_only) for (n = 0; n < n_ready; n++) { 1646 1644 k = il[n]; cpp = &HD(j)->cp[k]; SCpnt = cpp->SCpnt; 1647 - ll[n] = SCpnt->request->nr_sectors; pl[n] = SCpnt->serial_number; 1645 + ll[n] = blk_rq_sectors(SCpnt->request); pl[n] = SCpnt->serial_number; 1648 1646 1649 1647 if (!n) continue; 1650 1648 ··· 1668 1666 if (link_statistics && (overlap || !(flushcount % link_statistics))) 1669 1667 for (n = 0; n < n_ready; n++) { 1670 1668 k = il[n]; cpp = &HD(j)->cp[k]; SCpnt = cpp->SCpnt; 1671 - printk("%s %d.%d:%d pid %ld mb %d fc %d nr %d sec %ld ns %ld"\ 1669 + printk("%s %d.%d:%d pid %ld mb %d fc %d nr %d sec %ld ns %u"\ 1672 1670 " cur %ld s:%c r:%c rev:%c in:%c ov:%c xd %d.\n", 1673 1671 (ihdlr ? "ihdlr" : "qcomm"), SCpnt->channel, SCpnt->target, 1674 1672 SCpnt->lun, SCpnt->serial_number, k, flushcount, n_ready, 1675 - SCpnt->request->sector, SCpnt->request->nr_sectors, cursec, 1676 - YESNO(s), YESNO(r), YESNO(rev), YESNO(input_only), 1673 + blk_rq_pos(SCpnt->request), blk_rq_sectors(SCpnt->request), 1674 + cursec, YESNO(s), YESNO(r), YESNO(rev), YESNO(input_only), 1677 1675 YESNO(overlap), cpp->xdir); 1678 1676 } 1679 1677 #endif ··· 1801 1799 1802 1800 if (linked_comm && SCpnt->device->queue_depth > 2 1803 1801 && TLDEV(SCpnt->device->type)) 1804 - flush_dev(SCpnt->device, SCpnt->request->sector, j, TRUE); 1802 + flush_dev(SCpnt->device, blk_rq_pos(SCpnt->request), j, TRUE); 1805 1803 1806 1804 tstatus = status_byte(spp->target_status); 1807 1805
+2 -2
drivers/usb/storage/scsiglue.c
··· 132 132 133 133 if (us->fflags & US_FL_MAX_SECTORS_MIN) 134 134 max_sectors = PAGE_CACHE_SIZE >> 9; 135 - if (sdev->request_queue->max_sectors > max_sectors) 135 + if (queue_max_sectors(sdev->request_queue) > max_sectors) 136 136 blk_queue_max_sectors(sdev->request_queue, 137 137 max_sectors); 138 138 } else if (sdev->type == TYPE_TAPE) { ··· 483 483 { 484 484 struct scsi_device *sdev = to_scsi_device(dev); 485 485 486 - return sprintf(buf, "%u\n", sdev->request_queue->max_sectors); 486 + return sprintf(buf, "%u\n", queue_max_sectors(sdev->request_queue)); 487 487 } 488 488 489 489 /* Input routine for the sysfs max_sectors file */
+14 -12
fs/bio.c
··· 498 498 struct request_queue *q = bdev_get_queue(bdev); 499 499 int nr_pages; 500 500 501 - nr_pages = ((q->max_sectors << 9) + PAGE_SIZE - 1) >> PAGE_SHIFT; 502 - if (nr_pages > q->max_phys_segments) 503 - nr_pages = q->max_phys_segments; 504 - if (nr_pages > q->max_hw_segments) 505 - nr_pages = q->max_hw_segments; 501 + nr_pages = ((queue_max_sectors(q) << 9) + PAGE_SIZE - 1) >> PAGE_SHIFT; 502 + if (nr_pages > queue_max_phys_segments(q)) 503 + nr_pages = queue_max_phys_segments(q); 504 + if (nr_pages > queue_max_hw_segments(q)) 505 + nr_pages = queue_max_hw_segments(q); 506 506 507 507 return nr_pages; 508 508 } ··· 561 561 * make this too complex. 562 562 */ 563 563 564 - while (bio->bi_phys_segments >= q->max_phys_segments 565 - || bio->bi_phys_segments >= q->max_hw_segments) { 564 + while (bio->bi_phys_segments >= queue_max_phys_segments(q) 565 + || bio->bi_phys_segments >= queue_max_hw_segments(q)) { 566 566 567 567 if (retried_segments) 568 568 return 0; ··· 633 633 int bio_add_pc_page(struct request_queue *q, struct bio *bio, struct page *page, 634 634 unsigned int len, unsigned int offset) 635 635 { 636 - return __bio_add_page(q, bio, page, len, offset, q->max_hw_sectors); 636 + return __bio_add_page(q, bio, page, len, offset, 637 + queue_max_hw_sectors(q)); 637 638 } 638 639 639 640 /** ··· 654 653 unsigned int offset) 655 654 { 656 655 struct request_queue *q = bdev_get_queue(bio->bi_bdev); 657 - return __bio_add_page(q, bio, page, len, offset, q->max_sectors); 656 + return __bio_add_page(q, bio, page, len, offset, queue_max_sectors(q)); 658 657 } 659 658 660 659 struct bio_map_data { ··· 721 720 722 721 while (bv_len && iov_idx < iov_count) { 723 722 unsigned int bytes; 724 - char *iov_addr; 723 + char __user *iov_addr; 725 724 726 725 bytes = min_t(unsigned int, 727 726 iov[iov_idx].iov_len - iov_off, bv_len); ··· 1201 1200 char *addr = page_address(bvec->bv_page); 1202 1201 int len = bmd->iovecs[i].bv_len; 1203 1202 1204 - if (read && !err) 1203 + if (read) 1205 1204 memcpy(p, addr, len); 1206 1205 1207 1206 __free_page(bvec->bv_page); ··· 1490 1489 sector_t bio_sector_offset(struct bio *bio, unsigned short index, 1491 1490 unsigned int offset) 1492 1491 { 1493 - unsigned int sector_sz = queue_hardsect_size(bio->bi_bdev->bd_disk->queue); 1492 + unsigned int sector_sz; 1494 1493 struct bio_vec *bv; 1495 1494 sector_t sectors; 1496 1495 int i; 1497 1496 1497 + sector_sz = queue_logical_block_size(bio->bi_bdev->bd_disk->queue); 1498 1498 sectors = 0; 1499 1499 1500 1500 if (index >= bio->bi_idx)
+3 -3
fs/block_dev.c
··· 76 76 return -EINVAL; 77 77 78 78 /* Size cannot be smaller than the size supported by the device */ 79 - if (size < bdev_hardsect_size(bdev)) 79 + if (size < bdev_logical_block_size(bdev)) 80 80 return -EINVAL; 81 81 82 82 /* Don't change the size if it is same as current */ ··· 106 106 107 107 int sb_min_blocksize(struct super_block *sb, int size) 108 108 { 109 - int minsize = bdev_hardsect_size(sb->s_bdev); 109 + int minsize = bdev_logical_block_size(sb->s_bdev); 110 110 if (size < minsize) 111 111 size = minsize; 112 112 return sb_set_blocksize(sb, size); ··· 1111 1111 1112 1112 void bd_set_size(struct block_device *bdev, loff_t size) 1113 1113 { 1114 - unsigned bsize = bdev_hardsect_size(bdev); 1114 + unsigned bsize = bdev_logical_block_size(bdev); 1115 1115 1116 1116 bdev->bd_inode->i_size = size; 1117 1117 while (bsize < PAGE_CACHE_SIZE) {
+3 -3
fs/buffer.c
··· 1085 1085 __getblk_slow(struct block_device *bdev, sector_t block, int size) 1086 1086 { 1087 1087 /* Size must be multiple of hard sectorsize */ 1088 - if (unlikely(size & (bdev_hardsect_size(bdev)-1) || 1088 + if (unlikely(size & (bdev_logical_block_size(bdev)-1) || 1089 1089 (size < 512 || size > PAGE_SIZE))) { 1090 1090 printk(KERN_ERR "getblk(): invalid block size %d requested\n", 1091 1091 size); 1092 - printk(KERN_ERR "hardsect size: %d\n", 1093 - bdev_hardsect_size(bdev)); 1092 + printk(KERN_ERR "logical block size: %d\n", 1093 + bdev_logical_block_size(bdev)); 1094 1094 1095 1095 dump_stack(); 1096 1096 return NULL;
+6 -3
fs/coda/file.c
··· 47 47 struct pipe_inode_info *pipe, size_t count, 48 48 unsigned int flags) 49 49 { 50 + ssize_t (*splice_read)(struct file *, loff_t *, 51 + struct pipe_inode_info *, size_t, unsigned int); 50 52 struct coda_file_info *cfi; 51 53 struct file *host_file; 52 54 ··· 56 54 BUG_ON(!cfi || cfi->cfi_magic != CODA_MAGIC); 57 55 host_file = cfi->cfi_container; 58 56 59 - if (!host_file->f_op || !host_file->f_op->splice_read) 60 - return -EINVAL; 57 + splice_read = host_file->f_op->splice_read; 58 + if (!splice_read) 59 + splice_read = default_file_splice_read; 61 60 62 - return host_file->f_op->splice_read(host_file, ppos, pipe, count,flags); 61 + return splice_read(host_file, ppos, pipe, count, flags); 63 62 } 64 63 65 64 static ssize_t
+1 -1
fs/direct-io.c
··· 1127 1127 rw = WRITE_ODIRECT; 1128 1128 1129 1129 if (bdev) 1130 - bdev_blkbits = blksize_bits(bdev_hardsect_size(bdev)); 1130 + bdev_blkbits = blksize_bits(bdev_logical_block_size(bdev)); 1131 1131 1132 1132 if (offset & blocksize_mask) { 1133 1133 if (bdev)
+2 -2
fs/exofs/osd.c
··· 50 50 51 51 /* FIXME: should be include in osd_sense_info */ 52 52 if (in_resid) 53 - *in_resid = or->in.req ? or->in.req->data_len : 0; 53 + *in_resid = or->in.req ? or->in.req->resid_len : 0; 54 54 55 55 if (out_resid) 56 - *out_resid = or->out.req ? or->out.req->data_len : 0; 56 + *out_resid = or->out.req ? or->out.req->resid_len : 0; 57 57 58 58 return ret; 59 59 }
+2 -2
fs/ext3/super.c
··· 1696 1696 goto failed_mount; 1697 1697 } 1698 1698 1699 - hblock = bdev_hardsect_size(sb->s_bdev); 1699 + hblock = bdev_logical_block_size(sb->s_bdev); 1700 1700 if (sb->s_blocksize != blocksize) { 1701 1701 /* 1702 1702 * Make sure the blocksize for the filesystem is larger ··· 2120 2120 } 2121 2121 2122 2122 blocksize = sb->s_blocksize; 2123 - hblock = bdev_hardsect_size(bdev); 2123 + hblock = bdev_logical_block_size(bdev); 2124 2124 if (blocksize < hblock) { 2125 2125 printk(KERN_ERR 2126 2126 "EXT3-fs: blocksize too small for journal device.\n");
+1 -1
fs/ext4/super.c
··· 3035 3035 } 3036 3036 3037 3037 blocksize = sb->s_blocksize; 3038 - hblock = bdev_hardsect_size(bdev); 3038 + hblock = bdev_logical_block_size(bdev); 3039 3039 if (blocksize < hblock) { 3040 3040 ext4_msg(sb, KERN_ERR, 3041 3041 "blocksize too small for journal device");
+2 -2
fs/gfs2/ops_fstype.c
··· 525 525 } 526 526 527 527 /* Set up the buffer cache and SB for real */ 528 - if (sdp->sd_sb.sb_bsize < bdev_hardsect_size(sb->s_bdev)) { 528 + if (sdp->sd_sb.sb_bsize < bdev_logical_block_size(sb->s_bdev)) { 529 529 ret = -EINVAL; 530 530 fs_err(sdp, "FS block size (%u) is too small for device " 531 531 "block size (%u)\n", 532 - sdp->sd_sb.sb_bsize, bdev_hardsect_size(sb->s_bdev)); 532 + sdp->sd_sb.sb_bsize, bdev_logical_block_size(sb->s_bdev)); 533 533 goto out; 534 534 } 535 535 if (sdp->sd_sb.sb_bsize > PAGE_SIZE) {
+1 -1
fs/gfs2/rgrp.c
··· 842 842 struct super_block *sb = sdp->sd_vfs; 843 843 struct block_device *bdev = sb->s_bdev; 844 844 const unsigned int sects_per_blk = sdp->sd_sb.sb_bsize / 845 - bdev_hardsect_size(sb->s_bdev); 845 + bdev_logical_block_size(sb->s_bdev); 846 846 u64 blk; 847 847 sector_t start = 0; 848 848 sector_t nr_sects = 0;
+1 -1
fs/nilfs2/the_nilfs.c
··· 515 515 516 516 blocksize = BLOCK_SIZE << le32_to_cpu(sbp->s_log_block_size); 517 517 if (sb->s_blocksize != blocksize) { 518 - int hw_blocksize = bdev_hardsect_size(sb->s_bdev); 518 + int hw_blocksize = bdev_logical_block_size(sb->s_bdev); 519 519 520 520 if (blocksize < hw_blocksize) { 521 521 printk(KERN_ERR
+3 -3
fs/ntfs/super.c
··· 25 25 #include <linux/slab.h> 26 26 #include <linux/string.h> 27 27 #include <linux/spinlock.h> 28 - #include <linux/blkdev.h> /* For bdev_hardsect_size(). */ 28 + #include <linux/blkdev.h> /* For bdev_logical_block_size(). */ 29 29 #include <linux/backing-dev.h> 30 30 #include <linux/buffer_head.h> 31 31 #include <linux/vfs.h> ··· 2785 2785 goto err_out_now; 2786 2786 2787 2787 /* We support sector sizes up to the PAGE_CACHE_SIZE. */ 2788 - if (bdev_hardsect_size(sb->s_bdev) > PAGE_CACHE_SIZE) { 2788 + if (bdev_logical_block_size(sb->s_bdev) > PAGE_CACHE_SIZE) { 2789 2789 if (!silent) 2790 2790 ntfs_error(sb, "Device has unsupported sector size " 2791 2791 "(%i). The maximum supported sector " 2792 2792 "size on this architecture is %lu " 2793 2793 "bytes.", 2794 - bdev_hardsect_size(sb->s_bdev), 2794 + bdev_logical_block_size(sb->s_bdev), 2795 2795 PAGE_CACHE_SIZE); 2796 2796 goto err_out_now; 2797 2797 }
+1 -1
fs/ocfs2/cluster/heartbeat.c
··· 1371 1371 1372 1372 bdevname(reg->hr_bdev, reg->hr_dev_name); 1373 1373 1374 - sectsize = bdev_hardsect_size(reg->hr_bdev); 1374 + sectsize = bdev_logical_block_size(reg->hr_bdev); 1375 1375 if (sectsize != reg->hr_block_bytes) { 1376 1376 mlog(ML_ERROR, 1377 1377 "blocksize %u incorrect for device, expected %d",
+1 -1
fs/ocfs2/super.c
··· 713 713 *bh = NULL; 714 714 715 715 /* may be > 512 */ 716 - *sector_size = bdev_hardsect_size(sb->s_bdev); 716 + *sector_size = bdev_logical_block_size(sb->s_bdev); 717 717 if (*sector_size > OCFS2_MAX_BLOCKSIZE) { 718 718 mlog(ML_ERROR, "Hardware sector size too large: %d (max=%d)\n", 719 719 *sector_size, OCFS2_MAX_BLOCKSIZE);
+10
fs/partitions/check.c
··· 219 219 return sprintf(buf, "%llu\n",(unsigned long long)p->nr_sects); 220 220 } 221 221 222 + ssize_t part_alignment_offset_show(struct device *dev, 223 + struct device_attribute *attr, char *buf) 224 + { 225 + struct hd_struct *p = dev_to_part(dev); 226 + return sprintf(buf, "%llu\n", (unsigned long long)p->alignment_offset); 227 + } 228 + 222 229 ssize_t part_stat_show(struct device *dev, 223 230 struct device_attribute *attr, char *buf) 224 231 { ··· 279 272 static DEVICE_ATTR(partition, S_IRUGO, part_partition_show, NULL); 280 273 static DEVICE_ATTR(start, S_IRUGO, part_start_show, NULL); 281 274 static DEVICE_ATTR(size, S_IRUGO, part_size_show, NULL); 275 + static DEVICE_ATTR(alignment_offset, S_IRUGO, part_alignment_offset_show, NULL); 282 276 static DEVICE_ATTR(stat, S_IRUGO, part_stat_show, NULL); 283 277 #ifdef CONFIG_FAIL_MAKE_REQUEST 284 278 static struct device_attribute dev_attr_fail = ··· 290 282 &dev_attr_partition.attr, 291 283 &dev_attr_start.attr, 292 284 &dev_attr_size.attr, 285 + &dev_attr_alignment_offset.attr, 293 286 &dev_attr_stat.attr, 294 287 #ifdef CONFIG_FAIL_MAKE_REQUEST 295 288 &dev_attr_fail.attr, ··· 392 383 pdev = part_to_dev(p); 393 384 394 385 p->start_sect = start; 386 + p->alignment_offset = queue_sector_alignment_offset(disk->queue, start); 395 387 p->nr_sects = len; 396 388 p->partno = partno; 397 389 p->policy = get_disk_ro(disk);
+1 -1
fs/partitions/ibm.c
··· 76 76 Sector sect; 77 77 78 78 res = 0; 79 - blocksize = bdev_hardsect_size(bdev); 79 + blocksize = bdev_logical_block_size(bdev); 80 80 if (blocksize <= 0) 81 81 goto out_exit; 82 82 i_size = i_size_read(bdev->bd_inode);
+2 -2
fs/partitions/msdos.c
··· 110 110 Sector sect; 111 111 unsigned char *data; 112 112 u32 this_sector, this_size; 113 - int sector_size = bdev_hardsect_size(bdev) / 512; 113 + int sector_size = bdev_logical_block_size(bdev) / 512; 114 114 int loopct = 0; /* number of links followed 115 115 without finding a data partition */ 116 116 int i; ··· 415 415 416 416 int msdos_partition(struct parsed_partitions *state, struct block_device *bdev) 417 417 { 418 - int sector_size = bdev_hardsect_size(bdev) / 512; 418 + int sector_size = bdev_logical_block_size(bdev) / 512; 419 419 Sector sect; 420 420 unsigned char *data; 421 421 struct partition *p;
+14
fs/pipe.c
··· 302 302 return 0; 303 303 } 304 304 305 + /** 306 + * generic_pipe_buf_release - put a reference to a &struct pipe_buffer 307 + * @pipe: the pipe that the buffer belongs to 308 + * @buf: the buffer to put a reference to 309 + * 310 + * Description: 311 + * This function releases a reference to @buf. 312 + */ 313 + void generic_pipe_buf_release(struct pipe_inode_info *pipe, 314 + struct pipe_buffer *buf) 315 + { 316 + page_cache_release(buf->page); 317 + } 318 + 305 319 static const struct pipe_buf_operations anon_pipe_buf_ops = { 306 320 .can_merge = 1, 307 321 .map = generic_pipe_buf_map,
+1 -6
fs/read_write.c
··· 805 805 goto out; 806 806 if (!(in_file->f_mode & FMODE_READ)) 807 807 goto fput_in; 808 - retval = -EINVAL; 809 - in_inode = in_file->f_path.dentry->d_inode; 810 - if (!in_inode) 811 - goto fput_in; 812 - if (!in_file->f_op || !in_file->f_op->splice_read) 813 - goto fput_in; 814 808 retval = -ESPIPE; 815 809 if (!ppos) 816 810 ppos = &in_file->f_pos; ··· 828 834 retval = -EINVAL; 829 835 if (!out_file->f_op || !out_file->f_op->sendpage) 830 836 goto fput_out; 837 + in_inode = in_file->f_path.dentry->d_inode; 831 838 out_inode = out_file->f_path.dentry->d_inode; 832 839 retval = rw_verify_area(WRITE, out_file, &out_file->f_pos, count); 833 840 if (retval < 0)
+318 -20
fs/splice.c
··· 507 507 508 508 return ret; 509 509 } 510 - 511 510 EXPORT_SYMBOL(generic_file_splice_read); 511 + 512 + static const struct pipe_buf_operations default_pipe_buf_ops = { 513 + .can_merge = 0, 514 + .map = generic_pipe_buf_map, 515 + .unmap = generic_pipe_buf_unmap, 516 + .confirm = generic_pipe_buf_confirm, 517 + .release = generic_pipe_buf_release, 518 + .steal = generic_pipe_buf_steal, 519 + .get = generic_pipe_buf_get, 520 + }; 521 + 522 + static ssize_t kernel_readv(struct file *file, const struct iovec *vec, 523 + unsigned long vlen, loff_t offset) 524 + { 525 + mm_segment_t old_fs; 526 + loff_t pos = offset; 527 + ssize_t res; 528 + 529 + old_fs = get_fs(); 530 + set_fs(get_ds()); 531 + /* The cast to a user pointer is valid due to the set_fs() */ 532 + res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos); 533 + set_fs(old_fs); 534 + 535 + return res; 536 + } 537 + 538 + static ssize_t kernel_write(struct file *file, const char *buf, size_t count, 539 + loff_t pos) 540 + { 541 + mm_segment_t old_fs; 542 + ssize_t res; 543 + 544 + old_fs = get_fs(); 545 + set_fs(get_ds()); 546 + /* The cast to a user pointer is valid due to the set_fs() */ 547 + res = vfs_write(file, (const char __user *)buf, count, &pos); 548 + set_fs(old_fs); 549 + 550 + return res; 551 + } 552 + 553 + ssize_t default_file_splice_read(struct file *in, loff_t *ppos, 554 + struct pipe_inode_info *pipe, size_t len, 555 + unsigned int flags) 556 + { 557 + unsigned int nr_pages; 558 + unsigned int nr_freed; 559 + size_t offset; 560 + struct page *pages[PIPE_BUFFERS]; 561 + struct partial_page partial[PIPE_BUFFERS]; 562 + struct iovec vec[PIPE_BUFFERS]; 563 + pgoff_t index; 564 + ssize_t res; 565 + size_t this_len; 566 + int error; 567 + int i; 568 + struct splice_pipe_desc spd = { 569 + .pages = pages, 570 + .partial = partial, 571 + .flags = flags, 572 + .ops = &default_pipe_buf_ops, 573 + .spd_release = spd_release_page, 574 + }; 575 + 576 + index = *ppos >> PAGE_CACHE_SHIFT; 577 + offset = *ppos & ~PAGE_CACHE_MASK; 578 + nr_pages = (len + offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 579 + 580 + for (i = 0; i < nr_pages && i < PIPE_BUFFERS && len; i++) { 581 + struct page *page; 582 + 583 + page = alloc_page(GFP_USER); 584 + error = -ENOMEM; 585 + if (!page) 586 + goto err; 587 + 588 + this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset); 589 + vec[i].iov_base = (void __user *) page_address(page); 590 + vec[i].iov_len = this_len; 591 + pages[i] = page; 592 + spd.nr_pages++; 593 + len -= this_len; 594 + offset = 0; 595 + } 596 + 597 + res = kernel_readv(in, vec, spd.nr_pages, *ppos); 598 + if (res < 0) { 599 + error = res; 600 + goto err; 601 + } 602 + 603 + error = 0; 604 + if (!res) 605 + goto err; 606 + 607 + nr_freed = 0; 608 + for (i = 0; i < spd.nr_pages; i++) { 609 + this_len = min_t(size_t, vec[i].iov_len, res); 610 + partial[i].offset = 0; 611 + partial[i].len = this_len; 612 + if (!this_len) { 613 + __free_page(pages[i]); 614 + pages[i] = NULL; 615 + nr_freed++; 616 + } 617 + res -= this_len; 618 + } 619 + spd.nr_pages -= nr_freed; 620 + 621 + res = splice_to_pipe(pipe, &spd); 622 + if (res > 0) 623 + *ppos += res; 624 + 625 + return res; 626 + 627 + err: 628 + for (i = 0; i < spd.nr_pages; i++) 629 + __free_page(pages[i]); 630 + 631 + return error; 632 + } 633 + EXPORT_SYMBOL(default_file_splice_read); 512 634 513 635 /* 514 636 * Send 'sd->len' bytes to socket from 'sd->file' at position 'sd->pos' ··· 1003 881 1004 882 EXPORT_SYMBOL(generic_file_splice_write); 1005 883 884 + static int write_pipe_buf(struct pipe_inode_info *pipe, struct pipe_buffer *buf, 885 + struct splice_desc *sd) 886 + { 887 + int ret; 888 + void *data; 889 + 890 + ret = buf->ops->confirm(pipe, buf); 891 + if (ret) 892 + return ret; 893 + 894 + data = buf->ops->map(pipe, buf, 0); 895 + ret = kernel_write(sd->u.file, data + buf->offset, sd->len, sd->pos); 896 + buf->ops->unmap(pipe, buf, data); 897 + 898 + return ret; 899 + } 900 + 901 + static ssize_t default_file_splice_write(struct pipe_inode_info *pipe, 902 + struct file *out, loff_t *ppos, 903 + size_t len, unsigned int flags) 904 + { 905 + ssize_t ret; 906 + 907 + ret = splice_from_pipe(pipe, out, ppos, len, flags, write_pipe_buf); 908 + if (ret > 0) 909 + *ppos += ret; 910 + 911 + return ret; 912 + } 913 + 1006 914 /** 1007 915 * generic_splice_sendpage - splice data from a pipe to a socket 1008 916 * @pipe: pipe to splice from ··· 1060 908 static long do_splice_from(struct pipe_inode_info *pipe, struct file *out, 1061 909 loff_t *ppos, size_t len, unsigned int flags) 1062 910 { 911 + ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, 912 + loff_t *, size_t, unsigned int); 1063 913 int ret; 1064 - 1065 - if (unlikely(!out->f_op || !out->f_op->splice_write)) 1066 - return -EINVAL; 1067 914 1068 915 if (unlikely(!(out->f_mode & FMODE_WRITE))) 1069 916 return -EBADF; ··· 1074 923 if (unlikely(ret < 0)) 1075 924 return ret; 1076 925 1077 - return out->f_op->splice_write(pipe, out, ppos, len, flags); 926 + splice_write = out->f_op->splice_write; 927 + if (!splice_write) 928 + splice_write = default_file_splice_write; 929 + 930 + return splice_write(pipe, out, ppos, len, flags); 1078 931 } 1079 932 1080 933 /* ··· 1088 933 struct pipe_inode_info *pipe, size_t len, 1089 934 unsigned int flags) 1090 935 { 936 + ssize_t (*splice_read)(struct file *, loff_t *, 937 + struct pipe_inode_info *, size_t, unsigned int); 1091 938 int ret; 1092 - 1093 - if (unlikely(!in->f_op || !in->f_op->splice_read)) 1094 - return -EINVAL; 1095 939 1096 940 if (unlikely(!(in->f_mode & FMODE_READ))) 1097 941 return -EBADF; ··· 1099 945 if (unlikely(ret < 0)) 1100 946 return ret; 1101 947 1102 - return in->f_op->splice_read(in, ppos, pipe, len, flags); 948 + splice_read = in->f_op->splice_read; 949 + if (!splice_read) 950 + splice_read = default_file_splice_read; 951 + 952 + return splice_read(in, ppos, pipe, len, flags); 1103 953 } 1104 954 1105 955 /** ··· 1270 1112 return ret; 1271 1113 } 1272 1114 1115 + static int splice_pipe_to_pipe(struct pipe_inode_info *ipipe, 1116 + struct pipe_inode_info *opipe, 1117 + size_t len, unsigned int flags); 1273 1118 /* 1274 1119 * After the inode slimming patch, i_pipe/i_bdev/i_cdev share the same 1275 1120 * location, so checking ->i_pipe is not enough to verify that this is a ··· 1293 1132 struct file *out, loff_t __user *off_out, 1294 1133 size_t len, unsigned int flags) 1295 1134 { 1296 - struct pipe_inode_info *pipe; 1135 + struct pipe_inode_info *ipipe; 1136 + struct pipe_inode_info *opipe; 1297 1137 loff_t offset, *off; 1298 1138 long ret; 1299 1139 1300 - pipe = pipe_info(in->f_path.dentry->d_inode); 1301 - if (pipe) { 1140 + ipipe = pipe_info(in->f_path.dentry->d_inode); 1141 + opipe = pipe_info(out->f_path.dentry->d_inode); 1142 + 1143 + if (ipipe && opipe) { 1144 + if (off_in || off_out) 1145 + return -ESPIPE; 1146 + 1147 + if (!(in->f_mode & FMODE_READ)) 1148 + return -EBADF; 1149 + 1150 + if (!(out->f_mode & FMODE_WRITE)) 1151 + return -EBADF; 1152 + 1153 + /* Splicing to self would be fun, but... */ 1154 + if (ipipe == opipe) 1155 + return -EINVAL; 1156 + 1157 + return splice_pipe_to_pipe(ipipe, opipe, len, flags); 1158 + } 1159 + 1160 + if (ipipe) { 1302 1161 if (off_in) 1303 1162 return -ESPIPE; 1304 1163 if (off_out) { ··· 1330 1149 } else 1331 1150 off = &out->f_pos; 1332 1151 1333 - ret = do_splice_from(pipe, out, off, len, flags); 1152 + ret = do_splice_from(ipipe, out, off, len, flags); 1334 1153 1335 1154 if (off_out && copy_to_user(off_out, off, sizeof(loff_t))) 1336 1155 ret = -EFAULT; ··· 1338 1157 return ret; 1339 1158 } 1340 1159 1341 - pipe = pipe_info(out->f_path.dentry->d_inode); 1342 - if (pipe) { 1160 + if (opipe) { 1343 1161 if (off_out) 1344 1162 return -ESPIPE; 1345 1163 if (off_in) { ··· 1350 1170 } else 1351 1171 off = &in->f_pos; 1352 1172 1353 - ret = do_splice_to(in, off, pipe, len, flags); 1173 + ret = do_splice_to(in, off, opipe, len, flags); 1354 1174 1355 1175 if (off_in && copy_to_user(off_in, off, sizeof(loff_t))) 1356 1176 ret = -EFAULT; ··· 1691 1511 * Make sure there's data to read. Wait for input if we can, otherwise 1692 1512 * return an appropriate error. 1693 1513 */ 1694 - static int link_ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags) 1514 + static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags) 1695 1515 { 1696 1516 int ret; 1697 1517 ··· 1729 1549 * Make sure there's writeable room. Wait for room if we can, otherwise 1730 1550 * return an appropriate error. 1731 1551 */ 1732 - static int link_opipe_prep(struct pipe_inode_info *pipe, unsigned int flags) 1552 + static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags) 1733 1553 { 1734 1554 int ret; 1735 1555 ··· 1763 1583 } 1764 1584 1765 1585 pipe_unlock(pipe); 1586 + return ret; 1587 + } 1588 + 1589 + /* 1590 + * Splice contents of ipipe to opipe. 1591 + */ 1592 + static int splice_pipe_to_pipe(struct pipe_inode_info *ipipe, 1593 + struct pipe_inode_info *opipe, 1594 + size_t len, unsigned int flags) 1595 + { 1596 + struct pipe_buffer *ibuf, *obuf; 1597 + int ret = 0, nbuf; 1598 + bool input_wakeup = false; 1599 + 1600 + 1601 + retry: 1602 + ret = ipipe_prep(ipipe, flags); 1603 + if (ret) 1604 + return ret; 1605 + 1606 + ret = opipe_prep(opipe, flags); 1607 + if (ret) 1608 + return ret; 1609 + 1610 + /* 1611 + * Potential ABBA deadlock, work around it by ordering lock 1612 + * grabbing by pipe info address. Otherwise two different processes 1613 + * could deadlock (one doing tee from A -> B, the other from B -> A). 1614 + */ 1615 + pipe_double_lock(ipipe, opipe); 1616 + 1617 + do { 1618 + if (!opipe->readers) { 1619 + send_sig(SIGPIPE, current, 0); 1620 + if (!ret) 1621 + ret = -EPIPE; 1622 + break; 1623 + } 1624 + 1625 + if (!ipipe->nrbufs && !ipipe->writers) 1626 + break; 1627 + 1628 + /* 1629 + * Cannot make any progress, because either the input 1630 + * pipe is empty or the output pipe is full. 1631 + */ 1632 + if (!ipipe->nrbufs || opipe->nrbufs >= PIPE_BUFFERS) { 1633 + /* Already processed some buffers, break */ 1634 + if (ret) 1635 + break; 1636 + 1637 + if (flags & SPLICE_F_NONBLOCK) { 1638 + ret = -EAGAIN; 1639 + break; 1640 + } 1641 + 1642 + /* 1643 + * We raced with another reader/writer and haven't 1644 + * managed to process any buffers. A zero return 1645 + * value means EOF, so retry instead. 1646 + */ 1647 + pipe_unlock(ipipe); 1648 + pipe_unlock(opipe); 1649 + goto retry; 1650 + } 1651 + 1652 + ibuf = ipipe->bufs + ipipe->curbuf; 1653 + nbuf = (opipe->curbuf + opipe->nrbufs) % PIPE_BUFFERS; 1654 + obuf = opipe->bufs + nbuf; 1655 + 1656 + if (len >= ibuf->len) { 1657 + /* 1658 + * Simply move the whole buffer from ipipe to opipe 1659 + */ 1660 + *obuf = *ibuf; 1661 + ibuf->ops = NULL; 1662 + opipe->nrbufs++; 1663 + ipipe->curbuf = (ipipe->curbuf + 1) % PIPE_BUFFERS; 1664 + ipipe->nrbufs--; 1665 + input_wakeup = true; 1666 + } else { 1667 + /* 1668 + * Get a reference to this pipe buffer, 1669 + * so we can copy the contents over. 1670 + */ 1671 + ibuf->ops->get(ipipe, ibuf); 1672 + *obuf = *ibuf; 1673 + 1674 + /* 1675 + * Don't inherit the gift flag, we need to 1676 + * prevent multiple steals of this page. 1677 + */ 1678 + obuf->flags &= ~PIPE_BUF_FLAG_GIFT; 1679 + 1680 + obuf->len = len; 1681 + opipe->nrbufs++; 1682 + ibuf->offset += obuf->len; 1683 + ibuf->len -= obuf->len; 1684 + } 1685 + ret += obuf->len; 1686 + len -= obuf->len; 1687 + } while (len); 1688 + 1689 + pipe_unlock(ipipe); 1690 + pipe_unlock(opipe); 1691 + 1692 + /* 1693 + * If we put data in the output pipe, wakeup any potential readers. 1694 + */ 1695 + if (ret > 0) { 1696 + smp_mb(); 1697 + if (waitqueue_active(&opipe->wait)) 1698 + wake_up_interruptible(&opipe->wait); 1699 + kill_fasync(&opipe->fasync_readers, SIGIO, POLL_IN); 1700 + } 1701 + if (input_wakeup) 1702 + wakeup_pipe_writers(ipipe); 1703 + 1766 1704 return ret; 1767 1705 } 1768 1706 ··· 1988 1690 * Keep going, unless we encounter an error. The ipipe/opipe 1989 1691 * ordering doesn't really matter. 1990 1692 */ 1991 - ret = link_ipipe_prep(ipipe, flags); 1693 + ret = ipipe_prep(ipipe, flags); 1992 1694 if (!ret) { 1993 - ret = link_opipe_prep(opipe, flags); 1695 + ret = opipe_prep(opipe, flags); 1994 1696 if (!ret) 1995 1697 ret = link_pipe(ipipe, opipe, len, flags); 1996 1698 }
+1 -1
fs/udf/super.c
··· 1915 1915 if (uopt.flags & (1 << UDF_FLAG_BLOCKSIZE_SET)) { 1916 1916 ret = udf_load_vrs(sb, &uopt, silent, &fileset); 1917 1917 } else { 1918 - uopt.blocksize = bdev_hardsect_size(sb->s_bdev); 1918 + uopt.blocksize = bdev_logical_block_size(sb->s_bdev); 1919 1919 ret = udf_load_vrs(sb, &uopt, silent, &fileset); 1920 1920 if (!ret && uopt.blocksize != UDF_DEFAULT_BLOCKSIZE) { 1921 1921 if (!silent)
+1 -1
fs/xfs/linux-2.6/xfs_buf.c
··· 1501 1501 struct block_device *bdev) 1502 1502 { 1503 1503 return xfs_setsize_buftarg_flags(btp, 1504 - PAGE_CACHE_SIZE, bdev_hardsect_size(bdev), 0); 1504 + PAGE_CACHE_SIZE, bdev_logical_block_size(bdev), 0); 1505 1505 } 1506 1506 1507 1507 int
+5 -5
include/linux/bio.h
··· 218 218 #define bio_sectors(bio) ((bio)->bi_size >> 9) 219 219 #define bio_empty_barrier(bio) (bio_barrier(bio) && !bio_has_data(bio) && !bio_discard(bio)) 220 220 221 - static inline unsigned int bio_cur_sectors(struct bio *bio) 221 + static inline unsigned int bio_cur_bytes(struct bio *bio) 222 222 { 223 223 if (bio->bi_vcnt) 224 - return bio_iovec(bio)->bv_len >> 9; 224 + return bio_iovec(bio)->bv_len; 225 225 else /* dataless requests such as discard */ 226 - return bio->bi_size >> 9; 226 + return bio->bi_size; 227 227 } 228 228 229 229 static inline void *bio_data(struct bio *bio) ··· 279 279 #define __BIO_SEG_BOUNDARY(addr1, addr2, mask) \ 280 280 (((addr1) | (mask)) == (((addr2) - 1) | (mask))) 281 281 #define BIOVEC_SEG_BOUNDARY(q, b1, b2) \ 282 - __BIO_SEG_BOUNDARY(bvec_to_phys((b1)), bvec_to_phys((b2)) + (b2)->bv_len, (q)->seg_boundary_mask) 282 + __BIO_SEG_BOUNDARY(bvec_to_phys((b1)), bvec_to_phys((b2)) + (b2)->bv_len, queue_segment_boundary((q))) 283 283 #define BIO_SEG_BOUNDARY(q, b1, b2) \ 284 284 BIOVEC_SEG_BOUNDARY((q), __BVEC_END((b1)), __BVEC_START((b2))) 285 285 ··· 506 506 } 507 507 508 508 /* 509 - * BIO list managment for use by remapping drivers (e.g. DM or MD). 509 + * BIO list management for use by remapping drivers (e.g. DM or MD) and loop. 510 510 * 511 511 * A bio_list anchors a singly-linked list of bios chained through the bi_next 512 512 * member of the bio. The bio_list also caches the last list member to allow
+181 -64
include/linux/blkdev.h
··· 166 166 enum rq_cmd_type_bits cmd_type; 167 167 unsigned long atomic_flags; 168 168 169 - /* Maintain bio traversal state for part by part I/O submission. 170 - * hard_* are block layer internals, no driver should touch them! 171 - */ 172 - 173 - sector_t sector; /* next sector to submit */ 174 - sector_t hard_sector; /* next sector to complete */ 175 - unsigned long nr_sectors; /* no. of sectors left to submit */ 176 - unsigned long hard_nr_sectors; /* no. of sectors left to complete */ 177 - /* no. of sectors left to submit in the current segment */ 178 - unsigned int current_nr_sectors; 179 - 180 - /* no. of sectors left to complete in the current segment */ 181 - unsigned int hard_cur_sectors; 169 + /* the following two fields are internal, NEVER access directly */ 170 + sector_t __sector; /* sector cursor */ 171 + unsigned int __data_len; /* total data len */ 182 172 183 173 struct bio *bio; 184 174 struct bio *biotail; ··· 201 211 202 212 unsigned short ioprio; 203 213 204 - void *special; 205 - char *buffer; 214 + void *special; /* opaque pointer available for LLD use */ 215 + char *buffer; /* kaddr of the current segment if available */ 206 216 207 217 int tag; 208 218 int errors; ··· 216 226 unsigned char __cmd[BLK_MAX_CDB]; 217 227 unsigned char *cmd; 218 228 219 - unsigned int data_len; 220 229 unsigned int extra_len; /* length of alignment and padding */ 221 230 unsigned int sense_len; 222 - void *data; 231 + unsigned int resid_len; /* residual count */ 223 232 void *sense; 224 233 225 234 unsigned long deadline; ··· 307 318 struct kobject kobj; 308 319 }; 309 320 321 + struct queue_limits { 322 + unsigned long bounce_pfn; 323 + unsigned long seg_boundary_mask; 324 + 325 + unsigned int max_hw_sectors; 326 + unsigned int max_sectors; 327 + unsigned int max_segment_size; 328 + unsigned int physical_block_size; 329 + unsigned int alignment_offset; 330 + unsigned int io_min; 331 + unsigned int io_opt; 332 + 333 + unsigned short logical_block_size; 334 + unsigned short max_hw_segments; 335 + unsigned short max_phys_segments; 336 + 337 + unsigned char misaligned; 338 + unsigned char no_cluster; 339 + }; 340 + 310 341 struct request_queue 311 342 { 312 343 /* ··· 378 369 /* 379 370 * queue needs bounce pages for pages above this limit 380 371 */ 381 - unsigned long bounce_pfn; 382 372 gfp_t bounce_gfp; 383 373 384 374 /* ··· 406 398 unsigned int nr_congestion_off; 407 399 unsigned int nr_batching; 408 400 409 - unsigned int max_sectors; 410 - unsigned int max_hw_sectors; 411 - unsigned short max_phys_segments; 412 - unsigned short max_hw_segments; 413 - unsigned short hardsect_size; 414 - unsigned int max_segment_size; 415 - 416 - unsigned long seg_boundary_mask; 417 401 void *dma_drain_buffer; 418 402 unsigned int dma_drain_size; 419 403 unsigned int dma_pad_mask; ··· 415 415 struct list_head tag_busy_list; 416 416 417 417 unsigned int nr_sorted; 418 - unsigned int in_flight; 418 + unsigned int in_flight[2]; 419 419 420 420 unsigned int rq_timeout; 421 421 struct timer_list timeout; 422 422 struct list_head timeout_list; 423 + 424 + struct queue_limits limits; 423 425 424 426 /* 425 427 * sg stuff ··· 522 520 struct request_queue *q) 523 521 { 524 522 __clear_bit(flag, &q->queue_flags); 523 + } 524 + 525 + static inline int queue_in_flight(struct request_queue *q) 526 + { 527 + return q->in_flight[0] + q->in_flight[1]; 525 528 } 526 529 527 530 static inline void queue_flag_clear(unsigned int flag, struct request_queue *q) ··· 759 752 extern void blk_put_request(struct request *); 760 753 extern void __blk_put_request(struct request_queue *, struct request *); 761 754 extern struct request *blk_get_request(struct request_queue *, int, gfp_t); 755 + extern struct request *blk_make_request(struct request_queue *, struct bio *, 756 + gfp_t); 762 757 extern void blk_insert_request(struct request_queue *, struct request *, int, void *); 763 758 extern void blk_requeue_request(struct request_queue *, struct request *); 764 759 extern int blk_rq_check_limits(struct request_queue *q, struct request *rq); 765 760 extern int blk_lld_busy(struct request_queue *q); 761 + extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src, 762 + struct bio_set *bs, gfp_t gfp_mask, 763 + int (*bio_ctr)(struct bio *, struct bio *, void *), 764 + void *data); 765 + extern void blk_rq_unprep_clone(struct request *rq); 766 766 extern int blk_insert_cloned_request(struct request_queue *q, 767 767 struct request *rq); 768 768 extern void blk_plug_device(struct request_queue *); ··· 780 766 unsigned int, void __user *); 781 767 extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t, 782 768 struct scsi_ioctl_command __user *); 783 - 784 - /* 785 - * Temporary export, until SCSI gets fixed up. 786 - */ 787 - extern int blk_rq_append_bio(struct request_queue *q, struct request *rq, 788 - struct bio *bio); 789 769 790 770 /* 791 771 * A queue has just exitted congestion. Note this in the global counter of ··· 806 798 extern void __blk_stop_queue(struct request_queue *q); 807 799 extern void __blk_run_queue(struct request_queue *); 808 800 extern void blk_run_queue(struct request_queue *); 809 - extern void blk_start_queueing(struct request_queue *); 810 801 extern int blk_rq_map_user(struct request_queue *, struct request *, 811 802 struct rq_map_data *, void __user *, unsigned long, 812 803 gfp_t); ··· 838 831 blk_run_backing_dev(mapping->backing_dev_info, NULL); 839 832 } 840 833 841 - extern void blkdev_dequeue_request(struct request *req); 834 + /* 835 + * blk_rq_pos() : the current sector 836 + * blk_rq_bytes() : bytes left in the entire request 837 + * blk_rq_cur_bytes() : bytes left in the current segment 838 + * blk_rq_sectors() : sectors left in the entire request 839 + * blk_rq_cur_sectors() : sectors left in the current segment 840 + */ 841 + static inline sector_t blk_rq_pos(const struct request *rq) 842 + { 843 + return rq->__sector; 844 + } 845 + 846 + static inline unsigned int blk_rq_bytes(const struct request *rq) 847 + { 848 + return rq->__data_len; 849 + } 850 + 851 + static inline int blk_rq_cur_bytes(const struct request *rq) 852 + { 853 + return rq->bio ? bio_cur_bytes(rq->bio) : 0; 854 + } 855 + 856 + static inline unsigned int blk_rq_sectors(const struct request *rq) 857 + { 858 + return blk_rq_bytes(rq) >> 9; 859 + } 860 + 861 + static inline unsigned int blk_rq_cur_sectors(const struct request *rq) 862 + { 863 + return blk_rq_cur_bytes(rq) >> 9; 864 + } 842 865 843 866 /* 844 - * blk_end_request() and friends. 845 - * __blk_end_request() and end_request() must be called with 846 - * the request queue spinlock acquired. 867 + * Request issue related functions. 868 + */ 869 + extern struct request *blk_peek_request(struct request_queue *q); 870 + extern void blk_start_request(struct request *rq); 871 + extern struct request *blk_fetch_request(struct request_queue *q); 872 + 873 + /* 874 + * Request completion related functions. 875 + * 876 + * blk_update_request() completes given number of bytes and updates 877 + * the request without completing it. 878 + * 879 + * blk_end_request() and friends. __blk_end_request() must be called 880 + * with the request queue spinlock acquired. 847 881 * 848 882 * Several drivers define their own end_request and call 849 883 * blk_end_request() for parts of the original function. 850 884 * This prevents code duplication in drivers. 851 885 */ 852 - extern int blk_end_request(struct request *rq, int error, 853 - unsigned int nr_bytes); 854 - extern int __blk_end_request(struct request *rq, int error, 855 - unsigned int nr_bytes); 856 - extern int blk_end_bidi_request(struct request *rq, int error, 857 - unsigned int nr_bytes, unsigned int bidi_bytes); 858 - extern void end_request(struct request *, int); 859 - extern int blk_end_request_callback(struct request *rq, int error, 860 - unsigned int nr_bytes, 861 - int (drv_callback)(struct request *)); 886 + extern bool blk_update_request(struct request *rq, int error, 887 + unsigned int nr_bytes); 888 + extern bool blk_end_request(struct request *rq, int error, 889 + unsigned int nr_bytes); 890 + extern void blk_end_request_all(struct request *rq, int error); 891 + extern bool blk_end_request_cur(struct request *rq, int error); 892 + extern bool __blk_end_request(struct request *rq, int error, 893 + unsigned int nr_bytes); 894 + extern void __blk_end_request_all(struct request *rq, int error); 895 + extern bool __blk_end_request_cur(struct request *rq, int error); 896 + 862 897 extern void blk_complete_request(struct request *); 863 898 extern void __blk_complete_request(struct request *); 864 899 extern void blk_abort_request(struct request *); 865 900 extern void blk_abort_queue(struct request_queue *); 866 - extern void blk_update_request(struct request *rq, int error, 867 - unsigned int nr_bytes); 868 - 869 - /* 870 - * blk_end_request() takes bytes instead of sectors as a complete size. 871 - * blk_rq_bytes() returns bytes left to complete in the entire request. 872 - * blk_rq_cur_bytes() returns bytes left to complete in the current segment. 873 - */ 874 - extern unsigned int blk_rq_bytes(struct request *rq); 875 - extern unsigned int blk_rq_cur_bytes(struct request *rq); 876 901 877 902 /* 878 903 * Access functions for manipulating queue properties ··· 916 877 extern void blk_queue_make_request(struct request_queue *, make_request_fn *); 917 878 extern void blk_queue_bounce_limit(struct request_queue *, u64); 918 879 extern void blk_queue_max_sectors(struct request_queue *, unsigned int); 880 + extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int); 919 881 extern void blk_queue_max_phys_segments(struct request_queue *, unsigned short); 920 882 extern void blk_queue_max_hw_segments(struct request_queue *, unsigned short); 921 883 extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); 922 - extern void blk_queue_hardsect_size(struct request_queue *, unsigned short); 884 + extern void blk_queue_logical_block_size(struct request_queue *, unsigned short); 885 + extern void blk_queue_physical_block_size(struct request_queue *, unsigned short); 886 + extern void blk_queue_alignment_offset(struct request_queue *q, 887 + unsigned int alignment); 888 + extern void blk_queue_io_min(struct request_queue *q, unsigned int min); 889 + extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt); 890 + extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, 891 + sector_t offset); 892 + extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev, 893 + sector_t offset); 923 894 extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b); 924 895 extern void blk_queue_dma_pad(struct request_queue *, unsigned int); 925 896 extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int); ··· 1016 967 1017 968 #define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist) 1018 969 1019 - static inline int queue_hardsect_size(struct request_queue *q) 970 + static inline unsigned long queue_bounce_pfn(struct request_queue *q) 971 + { 972 + return q->limits.bounce_pfn; 973 + } 974 + 975 + static inline unsigned long queue_segment_boundary(struct request_queue *q) 976 + { 977 + return q->limits.seg_boundary_mask; 978 + } 979 + 980 + static inline unsigned int queue_max_sectors(struct request_queue *q) 981 + { 982 + return q->limits.max_sectors; 983 + } 984 + 985 + static inline unsigned int queue_max_hw_sectors(struct request_queue *q) 986 + { 987 + return q->limits.max_hw_sectors; 988 + } 989 + 990 + static inline unsigned short queue_max_hw_segments(struct request_queue *q) 991 + { 992 + return q->limits.max_hw_segments; 993 + } 994 + 995 + static inline unsigned short queue_max_phys_segments(struct request_queue *q) 996 + { 997 + return q->limits.max_phys_segments; 998 + } 999 + 1000 + static inline unsigned int queue_max_segment_size(struct request_queue *q) 1001 + { 1002 + return q->limits.max_segment_size; 1003 + } 1004 + 1005 + static inline unsigned short queue_logical_block_size(struct request_queue *q) 1020 1006 { 1021 1007 int retval = 512; 1022 1008 1023 - if (q && q->hardsect_size) 1024 - retval = q->hardsect_size; 1009 + if (q && q->limits.logical_block_size) 1010 + retval = q->limits.logical_block_size; 1025 1011 1026 1012 return retval; 1027 1013 } 1028 1014 1029 - static inline int bdev_hardsect_size(struct block_device *bdev) 1015 + static inline unsigned short bdev_logical_block_size(struct block_device *bdev) 1030 1016 { 1031 - return queue_hardsect_size(bdev_get_queue(bdev)); 1017 + return queue_logical_block_size(bdev_get_queue(bdev)); 1018 + } 1019 + 1020 + static inline unsigned int queue_physical_block_size(struct request_queue *q) 1021 + { 1022 + return q->limits.physical_block_size; 1023 + } 1024 + 1025 + static inline unsigned int queue_io_min(struct request_queue *q) 1026 + { 1027 + return q->limits.io_min; 1028 + } 1029 + 1030 + static inline unsigned int queue_io_opt(struct request_queue *q) 1031 + { 1032 + return q->limits.io_opt; 1033 + } 1034 + 1035 + static inline int queue_alignment_offset(struct request_queue *q) 1036 + { 1037 + if (q && q->limits.misaligned) 1038 + return -1; 1039 + 1040 + if (q && q->limits.alignment_offset) 1041 + return q->limits.alignment_offset; 1042 + 1043 + return 0; 1044 + } 1045 + 1046 + static inline int queue_sector_alignment_offset(struct request_queue *q, 1047 + sector_t sector) 1048 + { 1049 + return ((sector << 9) - q->limits.alignment_offset) 1050 + & (q->limits.io_min - 1); 1032 1051 } 1033 1052 1034 1053 static inline int queue_dma_alignment(struct request_queue *q)
+1 -1
include/linux/device-mapper.h
··· 149 149 unsigned max_hw_sectors; 150 150 unsigned max_sectors; 151 151 unsigned max_segment_size; 152 - unsigned short hardsect_size; 152 + unsigned short logical_block_size; 153 153 unsigned short max_hw_segments; 154 154 unsigned short max_phys_segments; 155 155 unsigned char no_cluster; /* inverted so that 0 is default */
+1 -3
include/linux/elevator.h
··· 103 103 extern void elv_merge_requests(struct request_queue *, struct request *, 104 104 struct request *); 105 105 extern void elv_merged_request(struct request_queue *, struct request *, int); 106 - extern void elv_dequeue_request(struct request_queue *, struct request *); 107 106 extern void elv_requeue_request(struct request_queue *, struct request *); 108 107 extern int elv_queue_empty(struct request_queue *); 109 - extern struct request *elv_next_request(struct request_queue *q); 110 108 extern struct request *elv_former_request(struct request_queue *, struct request *); 111 109 extern struct request *elv_latter_request(struct request_queue *, struct request *); 112 110 extern int elv_register_queue(struct request_queue *q); ··· 169 171 ELV_MQUEUE_MUST, 170 172 }; 171 173 172 - #define rq_end_sector(rq) ((rq)->sector + (rq)->nr_sectors) 174 + #define rq_end_sector(rq) (blk_rq_pos(rq) + blk_rq_sectors(rq)) 173 175 #define rb_entry_rq(node) rb_entry((node), struct request, rb_node) 174 176 175 177 /*
+2
include/linux/fs.h
··· 2205 2205 /* fs/splice.c */ 2206 2206 extern ssize_t generic_file_splice_read(struct file *, loff_t *, 2207 2207 struct pipe_inode_info *, size_t, unsigned int); 2208 + extern ssize_t default_file_splice_read(struct file *, loff_t *, 2209 + struct pipe_inode_info *, size_t, unsigned int); 2208 2210 extern ssize_t generic_file_splice_write(struct pipe_inode_info *, 2209 2211 struct file *, loff_t *, size_t, unsigned int); 2210 2212 extern ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe,
+1
include/linux/genhd.h
··· 90 90 struct hd_struct { 91 91 sector_t start_sect; 92 92 sector_t nr_sects; 93 + sector_t alignment_offset; 93 94 struct device __dev; 94 95 struct kobject *holder_dir; 95 96 int policy, partno;
+3 -3
include/linux/iocontext.h
··· 64 64 * and kmalloc'ed. These could be shared between processes. 65 65 */ 66 66 struct io_context { 67 - atomic_t refcount; 67 + atomic_long_t refcount; 68 68 atomic_t nr_tasks; 69 69 70 70 /* all the fields below are protected by this lock */ ··· 91 91 * if ref count is zero, don't allow sharing (ioc is going away, it's 92 92 * a race). 93 93 */ 94 - if (ioc && atomic_inc_not_zero(&ioc->refcount)) { 95 - atomic_inc(&ioc->nr_tasks); 94 + if (ioc && atomic_long_inc_not_zero(&ioc->refcount)) { 95 + atomic_long_inc(&ioc->refcount); 96 96 return ioc; 97 97 } 98 98
+1 -2
include/linux/loop.h
··· 56 56 gfp_t old_gfp_mask; 57 57 58 58 spinlock_t lo_lock; 59 - struct bio *lo_bio; 60 - struct bio *lo_biotail; 59 + struct bio_list lo_bio_list; 61 60 int lo_state; 62 61 struct mutex lo_ctl_mutex; 63 62 struct task_struct *lo_thread;
-206
include/linux/mg_disk.h
··· 1 - /* 2 - * include/linux/mg_disk.c 3 - * 4 - * Support for the mGine m[g]flash IO mode. 5 - * Based on legacy hd.c 6 - * 7 - * (c) 2008 mGine Co.,LTD 8 - * (c) 2008 unsik Kim <donari75@gmail.com> 9 - * 10 - * This program is free software; you can redistribute it and/or modify 11 - * it under the terms of the GNU General Public License version 2 as 12 - * published by the Free Software Foundation. 13 - */ 14 - 15 - #ifndef __MG_DISK_H__ 16 - #define __MG_DISK_H__ 17 - 18 - #include <linux/blkdev.h> 19 - #include <linux/ata.h> 20 - 21 - /* name for block device */ 22 - #define MG_DISK_NAME "mgd" 23 - /* name for platform device */ 24 - #define MG_DEV_NAME "mg_disk" 25 - 26 - #define MG_DISK_MAJ 0 27 - #define MG_DISK_MAX_PART 16 28 - #define MG_SECTOR_SIZE 512 29 - #define MG_MAX_SECTS 256 30 - 31 - /* Register offsets */ 32 - #define MG_BUFF_OFFSET 0x8000 33 - #define MG_STORAGE_BUFFER_SIZE 0x200 34 - #define MG_REG_OFFSET 0xC000 35 - #define MG_REG_FEATURE (MG_REG_OFFSET + 2) /* write case */ 36 - #define MG_REG_ERROR (MG_REG_OFFSET + 2) /* read case */ 37 - #define MG_REG_SECT_CNT (MG_REG_OFFSET + 4) 38 - #define MG_REG_SECT_NUM (MG_REG_OFFSET + 6) 39 - #define MG_REG_CYL_LOW (MG_REG_OFFSET + 8) 40 - #define MG_REG_CYL_HIGH (MG_REG_OFFSET + 0xA) 41 - #define MG_REG_DRV_HEAD (MG_REG_OFFSET + 0xC) 42 - #define MG_REG_COMMAND (MG_REG_OFFSET + 0xE) /* write case */ 43 - #define MG_REG_STATUS (MG_REG_OFFSET + 0xE) /* read case */ 44 - #define MG_REG_DRV_CTRL (MG_REG_OFFSET + 0x10) 45 - #define MG_REG_BURST_CTRL (MG_REG_OFFSET + 0x12) 46 - 47 - /* "Drive Select/Head Register" bit values */ 48 - #define MG_REG_HEAD_MUST_BE_ON 0xA0 /* These 2 bits are always on */ 49 - #define MG_REG_HEAD_DRIVE_MASTER (0x00 | MG_REG_HEAD_MUST_BE_ON) 50 - #define MG_REG_HEAD_DRIVE_SLAVE (0x10 | MG_REG_HEAD_MUST_BE_ON) 51 - #define MG_REG_HEAD_LBA_MODE (0x40 | MG_REG_HEAD_MUST_BE_ON) 52 - 53 - 54 - /* "Device Control Register" bit values */ 55 - #define MG_REG_CTRL_INTR_ENABLE 0x0 56 - #define MG_REG_CTRL_INTR_DISABLE (0x1<<1) 57 - #define MG_REG_CTRL_RESET (0x1<<2) 58 - #define MG_REG_CTRL_INTR_POLA_ACTIVE_HIGH 0x0 59 - #define MG_REG_CTRL_INTR_POLA_ACTIVE_LOW (0x1<<4) 60 - #define MG_REG_CTRL_DPD_POLA_ACTIVE_LOW 0x0 61 - #define MG_REG_CTRL_DPD_POLA_ACTIVE_HIGH (0x1<<5) 62 - #define MG_REG_CTRL_DPD_DISABLE 0x0 63 - #define MG_REG_CTRL_DPD_ENABLE (0x1<<6) 64 - 65 - /* Status register bit */ 66 - /* error bit in status register */ 67 - #define MG_REG_STATUS_BIT_ERROR 0x01 68 - /* corrected error in status register */ 69 - #define MG_REG_STATUS_BIT_CORRECTED_ERROR 0x04 70 - /* data request bit in status register */ 71 - #define MG_REG_STATUS_BIT_DATA_REQ 0x08 72 - /* DSC - Drive Seek Complete */ 73 - #define MG_REG_STATUS_BIT_SEEK_DONE 0x10 74 - /* DWF - Drive Write Fault */ 75 - #define MG_REG_STATUS_BIT_WRITE_FAULT 0x20 76 - #define MG_REG_STATUS_BIT_READY 0x40 77 - #define MG_REG_STATUS_BIT_BUSY 0x80 78 - 79 - /* handy status */ 80 - #define MG_STAT_READY (MG_REG_STATUS_BIT_READY | MG_REG_STATUS_BIT_SEEK_DONE) 81 - #define MG_READY_OK(s) (((s) & (MG_STAT_READY | \ 82 - (MG_REG_STATUS_BIT_BUSY | \ 83 - MG_REG_STATUS_BIT_WRITE_FAULT | \ 84 - MG_REG_STATUS_BIT_ERROR))) == MG_STAT_READY) 85 - 86 - /* Error register */ 87 - #define MG_REG_ERR_AMNF 0x01 88 - #define MG_REG_ERR_ABRT 0x04 89 - #define MG_REG_ERR_IDNF 0x10 90 - #define MG_REG_ERR_UNC 0x40 91 - #define MG_REG_ERR_BBK 0x80 92 - 93 - /* error code for others */ 94 - #define MG_ERR_NONE 0 95 - #define MG_ERR_TIMEOUT 0x100 96 - #define MG_ERR_INIT_STAT 0x101 97 - #define MG_ERR_TRANSLATION 0x102 98 - #define MG_ERR_CTRL_RST 0x103 99 - #define MG_ERR_INV_STAT 0x104 100 - #define MG_ERR_RSTOUT 0x105 101 - 102 - #define MG_MAX_ERRORS 6 /* Max read/write errors */ 103 - 104 - /* command */ 105 - #define MG_CMD_RD 0x20 106 - #define MG_CMD_WR 0x30 107 - #define MG_CMD_SLEEP 0x99 108 - #define MG_CMD_WAKEUP 0xC3 109 - #define MG_CMD_ID 0xEC 110 - #define MG_CMD_WR_CONF 0x3C 111 - #define MG_CMD_RD_CONF 0x40 112 - 113 - /* operation mode */ 114 - #define MG_OP_CASCADE (1 << 0) 115 - #define MG_OP_CASCADE_SYNC_RD (1 << 1) 116 - #define MG_OP_CASCADE_SYNC_WR (1 << 2) 117 - #define MG_OP_INTERLEAVE (1 << 3) 118 - 119 - /* synchronous */ 120 - #define MG_BURST_LAT_4 (3 << 4) 121 - #define MG_BURST_LAT_5 (4 << 4) 122 - #define MG_BURST_LAT_6 (5 << 4) 123 - #define MG_BURST_LAT_7 (6 << 4) 124 - #define MG_BURST_LAT_8 (7 << 4) 125 - #define MG_BURST_LEN_4 (1 << 1) 126 - #define MG_BURST_LEN_8 (2 << 1) 127 - #define MG_BURST_LEN_16 (3 << 1) 128 - #define MG_BURST_LEN_32 (4 << 1) 129 - #define MG_BURST_LEN_CONT (0 << 1) 130 - 131 - /* timeout value (unit: ms) */ 132 - #define MG_TMAX_CONF_TO_CMD 1 133 - #define MG_TMAX_WAIT_RD_DRQ 10 134 - #define MG_TMAX_WAIT_WR_DRQ 500 135 - #define MG_TMAX_RST_TO_BUSY 10 136 - #define MG_TMAX_HDRST_TO_RDY 500 137 - #define MG_TMAX_SWRST_TO_RDY 500 138 - #define MG_TMAX_RSTOUT 3000 139 - 140 - /* device attribution */ 141 - /* use mflash as boot device */ 142 - #define MG_BOOT_DEV (1 << 0) 143 - /* use mflash as storage device */ 144 - #define MG_STORAGE_DEV (1 << 1) 145 - /* same as MG_STORAGE_DEV, but bootloader already done reset sequence */ 146 - #define MG_STORAGE_DEV_SKIP_RST (1 << 2) 147 - 148 - #define MG_DEV_MASK (MG_BOOT_DEV | MG_STORAGE_DEV | MG_STORAGE_DEV_SKIP_RST) 149 - 150 - /* names of GPIO resource */ 151 - #define MG_RST_PIN "mg_rst" 152 - /* except MG_BOOT_DEV, reset-out pin should be assigned */ 153 - #define MG_RSTOUT_PIN "mg_rstout" 154 - 155 - /* private driver data */ 156 - struct mg_drv_data { 157 - /* disk resource */ 158 - u32 use_polling; 159 - 160 - /* device attribution */ 161 - u32 dev_attr; 162 - 163 - /* internally used */ 164 - struct mg_host *host; 165 - }; 166 - 167 - /* main structure for mflash driver */ 168 - struct mg_host { 169 - struct device *dev; 170 - 171 - struct request_queue *breq; 172 - spinlock_t lock; 173 - struct gendisk *gd; 174 - 175 - struct timer_list timer; 176 - void (*mg_do_intr) (struct mg_host *); 177 - 178 - u16 id[ATA_ID_WORDS]; 179 - 180 - u16 cyls; 181 - u16 heads; 182 - u16 sectors; 183 - u32 n_sectors; 184 - u32 nres_sectors; 185 - 186 - void __iomem *dev_base; 187 - unsigned int irq; 188 - unsigned int rst; 189 - unsigned int rstout; 190 - 191 - u32 major; 192 - u32 error; 193 - }; 194 - 195 - /* 196 - * Debugging macro and defines 197 - */ 198 - #undef DO_MG_DEBUG 199 - #ifdef DO_MG_DEBUG 200 - # define MG_DBG(fmt, args...) \ 201 - printk(KERN_DEBUG "%s:%d "fmt, __func__, __LINE__, ##args) 202 - #else /* CONFIG_MG_DEBUG */ 203 - # define MG_DBG(fmt, args...) do { } while (0) 204 - #endif /* CONFIG_MG_DEBUG */ 205 - 206 - #endif
+1
include/linux/pipe_fs_i.h
··· 152 152 void generic_pipe_buf_get(struct pipe_inode_info *, struct pipe_buffer *); 153 153 int generic_pipe_buf_confirm(struct pipe_inode_info *, struct pipe_buffer *); 154 154 int generic_pipe_buf_steal(struct pipe_inode_info *, struct pipe_buffer *); 155 + void generic_pipe_buf_release(struct pipe_inode_info *, struct pipe_buffer *); 155 156 156 157 #endif
+1 -2
include/linux/splice.h
··· 11 11 #include <linux/pipe_fs_i.h> 12 12 13 13 /* 14 - * splice is tied to pipes as a transport (at least for now), so we'll just 15 - * add the splice flags here. 14 + * Flags passed in from splice/tee/vmsplice 16 15 */ 17 16 #define SPLICE_F_MOVE (0x01) /* move pages instead of copying */ 18 17 #define SPLICE_F_NONBLOCK (0x02) /* don't block on the pipe splicing (but */
+12
include/linux/virtio_blk.h
··· 15 15 #define VIRTIO_BLK_F_GEOMETRY 4 /* Legacy geometry available */ 16 16 #define VIRTIO_BLK_F_RO 5 /* Disk is read-only */ 17 17 #define VIRTIO_BLK_F_BLK_SIZE 6 /* Block size of disk is available*/ 18 + #define VIRTIO_BLK_F_SCSI 7 /* Supports scsi command passthru */ 19 + #define VIRTIO_BLK_F_IDENTIFY 8 /* ATA IDENTIFY supported */ 20 + 21 + #define VIRTIO_BLK_ID_BYTES (sizeof(__u16[256])) /* IDENTIFY DATA */ 18 22 19 23 struct virtio_blk_config 20 24 { ··· 36 32 } geometry; 37 33 /* block size of device (if VIRTIO_BLK_F_BLK_SIZE) */ 38 34 __u32 blk_size; 35 + __u8 identify[VIRTIO_BLK_ID_BYTES]; 39 36 } __attribute__((packed)); 40 37 41 38 /* These two define direction. */ ··· 58 53 __u32 ioprio; 59 54 /* Sector (ie. 512 byte offset) */ 60 55 __u64 sector; 56 + }; 57 + 58 + struct virtio_scsi_inhdr { 59 + __u32 errors; 60 + __u32 data_len; 61 + __u32 sense_len; 62 + __u32 residual; 61 63 }; 62 64 63 65 /* And this is the final byte of the write scatter-gather list. */
+1 -1
include/scsi/scsi_cmnd.h
··· 270 270 271 271 static inline sector_t scsi_get_lba(struct scsi_cmnd *scmd) 272 272 { 273 - return scmd->request->sector; 273 + return blk_rq_pos(scmd->request); 274 274 } 275 275 276 276 static inline unsigned scsi_prot_sg_count(struct scsi_cmnd *cmd)
+12 -17
include/trace/events/block.h
··· 25 25 26 26 TP_fast_assign( 27 27 __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0; 28 - __entry->sector = blk_pc_request(rq) ? 0 : rq->hard_sector; 29 - __entry->nr_sector = blk_pc_request(rq) ? 30 - 0 : rq->hard_nr_sectors; 28 + __entry->sector = blk_pc_request(rq) ? 0 : blk_rq_pos(rq); 29 + __entry->nr_sector = blk_pc_request(rq) ? 0 : blk_rq_sectors(rq); 31 30 __entry->errors = rq->errors; 32 31 33 32 blk_fill_rwbs_rq(__entry->rwbs, rq); ··· 58 59 59 60 TP_fast_assign( 60 61 __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0; 61 - __entry->sector = blk_pc_request(rq) ? 0 : rq->hard_sector; 62 - __entry->nr_sector = blk_pc_request(rq) ? 63 - 0 : rq->hard_nr_sectors; 64 - __entry->bytes = blk_pc_request(rq) ? rq->data_len : 0; 62 + __entry->sector = blk_pc_request(rq) ? 0 : blk_rq_pos(rq); 63 + __entry->nr_sector = blk_pc_request(rq) ? 0 : blk_rq_sectors(rq); 64 + __entry->bytes = blk_pc_request(rq) ? blk_rq_bytes(rq) : 0; 65 65 66 66 blk_fill_rwbs_rq(__entry->rwbs, rq); 67 67 blk_dump_cmd(__get_str(cmd), rq); ··· 92 94 93 95 TP_fast_assign( 94 96 __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0; 95 - __entry->sector = blk_pc_request(rq) ? 0 : rq->hard_sector; 96 - __entry->nr_sector = blk_pc_request(rq) ? 97 - 0 : rq->hard_nr_sectors; 98 - __entry->bytes = blk_pc_request(rq) ? rq->data_len : 0; 97 + __entry->sector = blk_pc_request(rq) ? 0 : blk_rq_pos(rq); 98 + __entry->nr_sector = blk_pc_request(rq) ? 0 : blk_rq_sectors(rq); 99 + __entry->bytes = blk_pc_request(rq) ? blk_rq_bytes(rq) : 0; 99 100 100 101 blk_fill_rwbs_rq(__entry->rwbs, rq); 101 102 blk_dump_cmd(__get_str(cmd), rq); ··· 125 128 126 129 TP_fast_assign( 127 130 __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0; 128 - __entry->sector = blk_pc_request(rq) ? 0 : rq->hard_sector; 129 - __entry->nr_sector = blk_pc_request(rq) ? 130 - 0 : rq->hard_nr_sectors; 131 + __entry->sector = blk_pc_request(rq) ? 0 : blk_rq_pos(rq); 132 + __entry->nr_sector = blk_pc_request(rq) ? 0 : blk_rq_sectors(rq); 131 133 __entry->errors = rq->errors; 132 134 133 135 blk_fill_rwbs_rq(__entry->rwbs, rq); ··· 157 161 158 162 TP_fast_assign( 159 163 __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0; 160 - __entry->sector = blk_pc_request(rq) ? 0 : rq->hard_sector; 161 - __entry->nr_sector = blk_pc_request(rq) ? 162 - 0 : rq->hard_nr_sectors; 164 + __entry->sector = blk_pc_request(rq) ? 0 : blk_rq_pos(rq); 165 + __entry->nr_sector = blk_pc_request(rq) ? 0 : blk_rq_sectors(rq); 163 166 __entry->errors = rq->errors; 164 167 165 168 blk_fill_rwbs_rq(__entry->rwbs, rq);
+9 -12
kernel/trace/blktrace.c
··· 669 669 670 670 if (blk_pc_request(rq)) { 671 671 what |= BLK_TC_ACT(BLK_TC_PC); 672 - __blk_add_trace(bt, 0, rq->data_len, rw, what, rq->errors, 673 - rq->cmd_len, rq->cmd); 672 + __blk_add_trace(bt, 0, blk_rq_bytes(rq), rw, 673 + what, rq->errors, rq->cmd_len, rq->cmd); 674 674 } else { 675 675 what |= BLK_TC_ACT(BLK_TC_FS); 676 - __blk_add_trace(bt, rq->hard_sector, rq->hard_nr_sectors << 9, 677 - rw, what, rq->errors, 0, NULL); 676 + __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq), rw, 677 + what, rq->errors, 0, NULL); 678 678 } 679 679 } 680 680 ··· 881 881 return; 882 882 883 883 if (blk_pc_request(rq)) 884 - __blk_add_trace(bt, 0, rq->data_len, 0, BLK_TA_DRV_DATA, 885 - rq->errors, len, data); 884 + __blk_add_trace(bt, 0, blk_rq_bytes(rq), 0, 885 + BLK_TA_DRV_DATA, rq->errors, len, data); 886 886 else 887 - __blk_add_trace(bt, rq->hard_sector, rq->hard_nr_sectors << 9, 888 - 0, BLK_TA_DRV_DATA, rq->errors, len, data); 887 + __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq), 0, 888 + BLK_TA_DRV_DATA, rq->errors, len, data); 889 889 } 890 890 EXPORT_SYMBOL_GPL(blk_add_driver_data); 891 891 ··· 1724 1724 if (blk_discard_rq(rq)) 1725 1725 rw |= (1 << BIO_RW_DISCARD); 1726 1726 1727 - if (blk_pc_request(rq)) 1728 - bytes = rq->data_len; 1729 - else 1730 - bytes = rq->hard_nr_sectors << 9; 1727 + bytes = blk_rq_bytes(rq); 1731 1728 1732 1729 blk_fill_rwbs(rwbs, rw, bytes); 1733 1730 }
+2 -2
mm/bounce.c
··· 191 191 /* 192 192 * is destination page below bounce pfn? 193 193 */ 194 - if (page_to_pfn(page) <= q->bounce_pfn) 194 + if (page_to_pfn(page) <= queue_bounce_pfn(q)) 195 195 continue; 196 196 197 197 /* ··· 283 283 * don't waste time iterating over bio segments 284 284 */ 285 285 if (!(q->bounce_gfp & GFP_DMA)) { 286 - if (q->bounce_pfn >= blk_max_pfn) 286 + if (queue_bounce_pfn(q) >= blk_max_pfn) 287 287 return; 288 288 pool = page_pool; 289 289 } else {