Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

block: convert to pos and nr_sectors accessors

With recent cleanups, there is no place where low level driver
directly manipulates request fields. This means that the 'hard'
request fields always equal the !hard fields. Convert all
rq->sectors, nr_sectors and current_nr_sectors references to
accessors.

While at it, drop superflous blk_rq_pos() < 0 test in swim.c.

[ Impact: use pos and nr_sectors accessors ]

Signed-off-by: Tejun Heo <tj@kernel.org>
Acked-by: Geert Uytterhoeven <Geert.Uytterhoeven@sonycom.com>
Tested-by: Grant Likely <grant.likely@secretlab.ca>
Acked-by: Grant Likely <grant.likely@secretlab.ca>
Tested-by: Adrian McMenamin <adrian@mcmen.demon.co.uk>
Acked-by: Adrian McMenamin <adrian@mcmen.demon.co.uk>
Acked-by: Mike Miller <mike.miller@hp.com>
Cc: James Bottomley <James.Bottomley@HansenPartnership.com>
Cc: Bartlomiej Zolnierkiewicz <bzolnier@gmail.com>
Cc: Borislav Petkov <petkovbb@googlemail.com>
Cc: Sergei Shtylyov <sshtylyov@ru.mvista.com>
Cc: Eric Moore <Eric.Moore@lsi.com>
Cc: Alan Stern <stern@rowland.harvard.edu>
Cc: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
Cc: Pete Zaitcev <zaitcev@redhat.com>
Cc: Stephen Rothwell <sfr@canb.auug.org.au>
Cc: Paul Clements <paul.clements@steeleye.com>
Cc: Tim Waugh <tim@cyberelk.net>
Cc: Jeff Garzik <jgarzik@pobox.com>
Cc: Jeremy Fitzhardinge <jeremy@xensource.com>
Cc: Alex Dubov <oakad@yahoo.com>
Cc: David Woodhouse <dwmw2@infradead.org>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Dario Ballabio <ballabio_dario@emc.com>
Cc: David S. Miller <davem@davemloft.net>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: unsik Kim <donari75@gmail.com>
Cc: Laurent Vivier <Laurent@lvivier.info>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>

authored by

Tejun Heo and committed by
Jens Axboe
83096ebf 5b93629b

+292 -279
+1 -1
arch/um/drivers/ubd_kern.c
··· 1239 1239 } 1240 1240 1241 1241 req = dev->request; 1242 - sector = req->sector; 1242 + sector = blk_rq_pos(req); 1243 1243 while(dev->start_sg < dev->end_sg){ 1244 1244 struct scatterlist *sg = &dev->sg[dev->start_sg]; 1245 1245
+10 -8
block/as-iosched.c
··· 306 306 data_dir = rq_is_sync(rq1); 307 307 308 308 last = ad->last_sector[data_dir]; 309 - s1 = rq1->sector; 310 - s2 = rq2->sector; 309 + s1 = blk_rq_pos(rq1); 310 + s2 = blk_rq_pos(rq2); 311 311 312 312 BUG_ON(data_dir != rq_is_sync(rq2)); 313 313 ··· 566 566 as_update_thinktime(ad, aic, thinktime); 567 567 568 568 /* Calculate read -> read seek distance */ 569 - if (aic->last_request_pos < rq->sector) 570 - seek_dist = rq->sector - aic->last_request_pos; 569 + if (aic->last_request_pos < blk_rq_pos(rq)) 570 + seek_dist = blk_rq_pos(rq) - 571 + aic->last_request_pos; 571 572 else 572 - seek_dist = aic->last_request_pos - rq->sector; 573 + seek_dist = aic->last_request_pos - 574 + blk_rq_pos(rq); 573 575 as_update_seekdist(ad, aic, seek_dist); 574 576 } 575 - aic->last_request_pos = rq->sector + rq->nr_sectors; 577 + aic->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq); 576 578 set_bit(AS_TASK_IOSTARTED, &aic->state); 577 579 spin_unlock(&aic->lock); 578 580 } ··· 589 587 { 590 588 unsigned long delay; /* jiffies */ 591 589 sector_t last = ad->last_sector[ad->batch_data_dir]; 592 - sector_t next = rq->sector; 590 + sector_t next = blk_rq_pos(rq); 593 591 sector_t delta; /* acceptable close offset (in sectors) */ 594 592 sector_t s; 595 593 ··· 983 981 * This has to be set in order to be correctly updated by 984 982 * as_find_next_rq 985 983 */ 986 - ad->last_sector[data_dir] = rq->sector + rq->nr_sectors; 984 + ad->last_sector[data_dir] = blk_rq_pos(rq) + blk_rq_sectors(rq); 987 985 988 986 if (data_dir == BLK_RW_SYNC) { 989 987 struct io_context *ioc = RQ_IOC(rq);
+1 -1
block/blk-barrier.c
··· 324 324 /* 325 325 * The driver must store the error location in ->bi_sector, if 326 326 * it supports it. For non-stacked drivers, this should be copied 327 - * from rq->sector. 327 + * from blk_rq_pos(rq). 328 328 */ 329 329 if (error_sector) 330 330 *error_sector = bio->bi_sector;
+8 -9
block/blk-core.c
··· 72 72 return; 73 73 74 74 cpu = part_stat_lock(); 75 - part = disk_map_sector_rcu(rq->rq_disk, rq->sector); 75 + part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq)); 76 76 77 77 if (!new_io) 78 78 part_stat_inc(cpu, part, merges[rw]); ··· 185 185 rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->cmd_type, 186 186 rq->cmd_flags); 187 187 188 - printk(KERN_INFO " sector %llu, nr/cnr %lu/%u\n", 189 - (unsigned long long)rq->sector, 190 - rq->nr_sectors, 191 - rq->current_nr_sectors); 188 + printk(KERN_INFO " sector %llu, nr/cnr %u/%u\n", 189 + (unsigned long long)blk_rq_pos(rq), 190 + blk_rq_sectors(rq), blk_rq_cur_sectors(rq)); 192 191 printk(KERN_INFO " bio %p, biotail %p, buffer %p, len %u\n", 193 192 rq->bio, rq->biotail, 194 193 rq->buffer, rq->data_len); ··· 1556 1557 */ 1557 1558 int blk_rq_check_limits(struct request_queue *q, struct request *rq) 1558 1559 { 1559 - if (rq->nr_sectors > q->max_sectors || 1560 + if (blk_rq_sectors(rq) > q->max_sectors || 1560 1561 rq->data_len > q->max_hw_sectors << 9) { 1561 1562 printk(KERN_ERR "%s: over max size limit.\n", __func__); 1562 1563 return -EIO; ··· 1644 1645 int cpu; 1645 1646 1646 1647 cpu = part_stat_lock(); 1647 - part = disk_map_sector_rcu(req->rq_disk, req->sector); 1648 + part = disk_map_sector_rcu(req->rq_disk, blk_rq_pos(req)); 1648 1649 part_stat_add(cpu, part, sectors[rw], bytes >> 9); 1649 1650 part_stat_unlock(); 1650 1651 } ··· 1664 1665 int cpu; 1665 1666 1666 1667 cpu = part_stat_lock(); 1667 - part = disk_map_sector_rcu(req->rq_disk, req->sector); 1668 + part = disk_map_sector_rcu(req->rq_disk, blk_rq_pos(req)); 1668 1669 1669 1670 part_stat_inc(cpu, part, ios[rw]); 1670 1671 part_stat_add(cpu, part, ticks[rw], duration); ··· 1845 1846 if (error && (blk_fs_request(req) && !(req->cmd_flags & REQ_QUIET))) { 1846 1847 printk(KERN_ERR "end_request: I/O error, dev %s, sector %llu\n", 1847 1848 req->rq_disk ? req->rq_disk->disk_name : "?", 1848 - (unsigned long long)req->sector); 1849 + (unsigned long long)blk_rq_pos(req)); 1849 1850 } 1850 1851 1851 1852 blk_account_io_completion(req, nr_bytes);
+5 -5
block/blk-merge.c
··· 259 259 else 260 260 max_sectors = q->max_sectors; 261 261 262 - if (req->nr_sectors + bio_sectors(bio) > max_sectors) { 262 + if (blk_rq_sectors(req) + bio_sectors(bio) > max_sectors) { 263 263 req->cmd_flags |= REQ_NOMERGE; 264 264 if (req == q->last_merge) 265 265 q->last_merge = NULL; ··· 284 284 max_sectors = q->max_sectors; 285 285 286 286 287 - if (req->nr_sectors + bio_sectors(bio) > max_sectors) { 287 + if (blk_rq_sectors(req) + bio_sectors(bio) > max_sectors) { 288 288 req->cmd_flags |= REQ_NOMERGE; 289 289 if (req == q->last_merge) 290 290 q->last_merge = NULL; ··· 315 315 /* 316 316 * Will it become too large? 317 317 */ 318 - if ((req->nr_sectors + next->nr_sectors) > q->max_sectors) 318 + if ((blk_rq_sectors(req) + blk_rq_sectors(next)) > q->max_sectors) 319 319 return 0; 320 320 321 321 total_phys_segments = req->nr_phys_segments + next->nr_phys_segments; ··· 345 345 int cpu; 346 346 347 347 cpu = part_stat_lock(); 348 - part = disk_map_sector_rcu(req->rq_disk, req->sector); 348 + part = disk_map_sector_rcu(req->rq_disk, blk_rq_pos(req)); 349 349 350 350 part_round_stats(cpu, part); 351 351 part_dec_in_flight(part); ··· 366 366 /* 367 367 * not contiguous 368 368 */ 369 - if (req->sector + req->nr_sectors != next->sector) 369 + if (blk_rq_pos(req) + blk_rq_sectors(req) != blk_rq_pos(next)) 370 370 return 0; 371 371 372 372 if (rq_data_dir(req) != rq_data_dir(next)
+9 -9
block/cfq-iosched.c
··· 349 349 else if (rq_is_meta(rq2) && !rq_is_meta(rq1)) 350 350 return rq2; 351 351 352 - s1 = rq1->sector; 353 - s2 = rq2->sector; 352 + s1 = blk_rq_pos(rq1); 353 + s2 = blk_rq_pos(rq2); 354 354 355 355 last = cfqd->last_position; 356 356 ··· 949 949 static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd, 950 950 struct request *rq) 951 951 { 952 - if (rq->sector >= cfqd->last_position) 953 - return rq->sector - cfqd->last_position; 952 + if (blk_rq_pos(rq) >= cfqd->last_position) 953 + return blk_rq_pos(rq) - cfqd->last_position; 954 954 else 955 - return cfqd->last_position - rq->sector; 955 + return cfqd->last_position - blk_rq_pos(rq); 956 956 } 957 957 958 958 #define CIC_SEEK_THR 8 * 1024 ··· 1918 1918 1919 1919 if (!cic->last_request_pos) 1920 1920 sdist = 0; 1921 - else if (cic->last_request_pos < rq->sector) 1922 - sdist = rq->sector - cic->last_request_pos; 1921 + else if (cic->last_request_pos < blk_rq_pos(rq)) 1922 + sdist = blk_rq_pos(rq) - cic->last_request_pos; 1923 1923 else 1924 - sdist = cic->last_request_pos - rq->sector; 1924 + sdist = cic->last_request_pos - blk_rq_pos(rq); 1925 1925 1926 1926 /* 1927 1927 * Don't allow the seek distance to get too large from the ··· 2071 2071 cfq_update_io_seektime(cfqd, cic, rq); 2072 2072 cfq_update_idle_window(cfqd, cfqq, cic); 2073 2073 2074 - cic->last_request_pos = rq->sector + rq->nr_sectors; 2074 + cic->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq); 2075 2075 2076 2076 if (cfqq == cfqd->active_queue) { 2077 2077 /*
+1 -1
block/deadline-iosched.c
··· 138 138 139 139 __rq = elv_rb_find(&dd->sort_list[bio_data_dir(bio)], sector); 140 140 if (__rq) { 141 - BUG_ON(sector != __rq->sector); 141 + BUG_ON(sector != blk_rq_pos(__rq)); 142 142 143 143 if (elv_rq_merge_ok(__rq, bio)) { 144 144 ret = ELEVATOR_FRONT_MERGE;
+11 -11
block/elevator.c
··· 52 52 #define ELV_HASH_FN(sec) \ 53 53 (hash_long(ELV_HASH_BLOCK((sec)), elv_hash_shift)) 54 54 #define ELV_HASH_ENTRIES (1 << elv_hash_shift) 55 - #define rq_hash_key(rq) ((rq)->sector + (rq)->nr_sectors) 55 + #define rq_hash_key(rq) (blk_rq_pos(rq) + blk_rq_sectors(rq)) 56 56 57 57 DEFINE_TRACE(block_rq_insert); 58 58 DEFINE_TRACE(block_rq_issue); ··· 119 119 * we can merge and sequence is ok, check if it's possible 120 120 */ 121 121 if (elv_rq_merge_ok(__rq, bio)) { 122 - if (__rq->sector + __rq->nr_sectors == bio->bi_sector) 122 + if (blk_rq_pos(__rq) + blk_rq_sectors(__rq) == bio->bi_sector) 123 123 ret = ELEVATOR_BACK_MERGE; 124 - else if (__rq->sector - bio_sectors(bio) == bio->bi_sector) 124 + else if (blk_rq_pos(__rq) - bio_sectors(bio) == bio->bi_sector) 125 125 ret = ELEVATOR_FRONT_MERGE; 126 126 } 127 127 ··· 370 370 parent = *p; 371 371 __rq = rb_entry(parent, struct request, rb_node); 372 372 373 - if (rq->sector < __rq->sector) 373 + if (blk_rq_pos(rq) < blk_rq_pos(__rq)) 374 374 p = &(*p)->rb_left; 375 - else if (rq->sector > __rq->sector) 375 + else if (blk_rq_pos(rq) > blk_rq_pos(__rq)) 376 376 p = &(*p)->rb_right; 377 377 else 378 378 return __rq; ··· 400 400 while (n) { 401 401 rq = rb_entry(n, struct request, rb_node); 402 402 403 - if (sector < rq->sector) 403 + if (sector < blk_rq_pos(rq)) 404 404 n = n->rb_left; 405 - else if (sector > rq->sector) 405 + else if (sector > blk_rq_pos(rq)) 406 406 n = n->rb_right; 407 407 else 408 408 return rq; ··· 441 441 break; 442 442 if (pos->cmd_flags & stop_flags) 443 443 break; 444 - if (rq->sector >= boundary) { 445 - if (pos->sector < boundary) 444 + if (blk_rq_pos(rq) >= boundary) { 445 + if (blk_rq_pos(pos) < boundary) 446 446 continue; 447 447 } else { 448 - if (pos->sector >= boundary) 448 + if (blk_rq_pos(pos) >= boundary) 449 449 break; 450 450 } 451 - if (rq->sector >= pos->sector) 451 + if (blk_rq_pos(rq) >= blk_rq_pos(pos)) 452 452 break; 453 453 } 454 454
+3 -3
drivers/block/DAC960.c
··· 3338 3338 } 3339 3339 Command->Completion = Request->end_io_data; 3340 3340 Command->LogicalDriveNumber = (long)Request->rq_disk->private_data; 3341 - Command->BlockNumber = Request->sector; 3342 - Command->BlockCount = Request->nr_sectors; 3341 + Command->BlockNumber = blk_rq_pos(Request); 3342 + Command->BlockCount = blk_rq_sectors(Request); 3343 3343 Command->Request = Request; 3344 3344 blkdev_dequeue_request(Request); 3345 3345 Command->SegmentCount = blk_rq_map_sg(req_q, ··· 3431 3431 * successfully as possible. 3432 3432 */ 3433 3433 Command->SegmentCount = 1; 3434 - Command->BlockNumber = Request->sector; 3434 + Command->BlockNumber = blk_rq_pos(Request); 3435 3435 Command->BlockCount = 1; 3436 3436 DAC960_QueueReadWriteCommand(Command); 3437 3437 return;
+3 -3
drivers/block/amiflop.c
··· 1351 1351 drive = floppy - unit; 1352 1352 1353 1353 /* Here someone could investigate to be more efficient */ 1354 - for (cnt = 0; cnt < CURRENT->current_nr_sectors; cnt++) { 1354 + for (cnt = 0; cnt < blk_rq_cur_sectors(CURRENT); cnt++) { 1355 1355 #ifdef DEBUG 1356 1356 printk("fd: sector %ld + %d requested for %s\n", 1357 - CURRENT->sector,cnt, 1357 + blk_rq_pos(CURRENT), cnt, 1358 1358 (rq_data_dir(CURRENT) == READ) ? "read" : "write"); 1359 1359 #endif 1360 - block = CURRENT->sector + cnt; 1360 + block = blk_rq_pos(CURRENT) + cnt; 1361 1361 if ((int)block > floppy->blocks) { 1362 1362 __blk_end_request_cur(CURRENT, -EIO); 1363 1363 goto repeat;
+5 -5
drivers/block/ataflop.c
··· 725 725 if (IS_BUFFERED( drive, ReqSide, ReqTrack )) { 726 726 if (ReqCmd == READ) { 727 727 copy_buffer( SECTOR_BUFFER(ReqSector), ReqData ); 728 - if (++ReqCnt < CURRENT->current_nr_sectors) { 728 + if (++ReqCnt < blk_rq_cur_sectors(CURRENT)) { 729 729 /* read next sector */ 730 730 setup_req_params( drive ); 731 731 goto repeat; ··· 1130 1130 } 1131 1131 } 1132 1132 1133 - if (++ReqCnt < CURRENT->current_nr_sectors) { 1133 + if (++ReqCnt < blk_rq_cur_sectors(CURRENT)) { 1134 1134 /* read next sector */ 1135 1135 setup_req_params( SelectedDrive ); 1136 1136 do_fd_action( SelectedDrive ); ··· 1394 1394 1395 1395 DPRINT(("redo_fd_request: CURRENT=%p dev=%s CURRENT->sector=%ld\n", 1396 1396 CURRENT, CURRENT ? CURRENT->rq_disk->disk_name : "", 1397 - CURRENT ? CURRENT->sector : 0 )); 1397 + CURRENT ? blk_rq_pos(CURRENT) : 0 )); 1398 1398 1399 1399 IsFormatting = 0; 1400 1400 ··· 1440 1440 UD.autoprobe = 0; 1441 1441 } 1442 1442 1443 - if (CURRENT->sector + 1 > UDT->blocks) { 1443 + if (blk_rq_pos(CURRENT) + 1 > UDT->blocks) { 1444 1444 __blk_end_request_cur(CURRENT, -EIO); 1445 1445 goto repeat; 1446 1446 } ··· 1450 1450 1451 1451 ReqCnt = 0; 1452 1452 ReqCmd = rq_data_dir(CURRENT); 1453 - ReqBlock = CURRENT->sector; 1453 + ReqBlock = blk_rq_pos(CURRENT); 1454 1454 ReqBuffer = CURRENT->buffer; 1455 1455 setup_req_params( drive ); 1456 1456 do_fd_action( drive );
+11 -11
drivers/block/cciss.c
··· 2835 2835 c->Request.Timeout = 0; // Don't time out 2836 2836 c->Request.CDB[0] = 2837 2837 (rq_data_dir(creq) == READ) ? h->cciss_read : h->cciss_write; 2838 - start_blk = creq->sector; 2838 + start_blk = blk_rq_pos(creq); 2839 2839 #ifdef CCISS_DEBUG 2840 - printk(KERN_DEBUG "ciss: sector =%d nr_sectors=%d\n", (int)creq->sector, 2841 - (int)creq->nr_sectors); 2840 + printk(KERN_DEBUG "ciss: sector =%d nr_sectors=%d\n", 2841 + (int)blk_rq_pos(creq), (int)blk_rq_sectors(creq)); 2842 2842 #endif /* CCISS_DEBUG */ 2843 2843 2844 2844 sg_init_table(tmp_sg, MAXSGENTRIES); ··· 2864 2864 h->maxSG = seg; 2865 2865 2866 2866 #ifdef CCISS_DEBUG 2867 - printk(KERN_DEBUG "cciss: Submitting %lu sectors in %d segments\n", 2868 - creq->nr_sectors, seg); 2867 + printk(KERN_DEBUG "cciss: Submitting %u sectors in %d segments\n", 2868 + blk_rq_sectors(creq), seg); 2869 2869 #endif /* CCISS_DEBUG */ 2870 2870 2871 2871 c->Header.SGList = c->Header.SGTotal = seg; ··· 2877 2877 c->Request.CDB[4] = (start_blk >> 8) & 0xff; 2878 2878 c->Request.CDB[5] = start_blk & 0xff; 2879 2879 c->Request.CDB[6] = 0; // (sect >> 24) & 0xff; MSB 2880 - c->Request.CDB[7] = (creq->nr_sectors >> 8) & 0xff; 2881 - c->Request.CDB[8] = creq->nr_sectors & 0xff; 2880 + c->Request.CDB[7] = (blk_rq_sectors(creq) >> 8) & 0xff; 2881 + c->Request.CDB[8] = blk_rq_sectors(creq) & 0xff; 2882 2882 c->Request.CDB[9] = c->Request.CDB[11] = c->Request.CDB[12] = 0; 2883 2883 } else { 2884 2884 u32 upper32 = upper_32_bits(start_blk); ··· 2893 2893 c->Request.CDB[7]= (start_blk >> 16) & 0xff; 2894 2894 c->Request.CDB[8]= (start_blk >> 8) & 0xff; 2895 2895 c->Request.CDB[9]= start_blk & 0xff; 2896 - c->Request.CDB[10]= (creq->nr_sectors >> 24) & 0xff; 2897 - c->Request.CDB[11]= (creq->nr_sectors >> 16) & 0xff; 2898 - c->Request.CDB[12]= (creq->nr_sectors >> 8) & 0xff; 2899 - c->Request.CDB[13]= creq->nr_sectors & 0xff; 2896 + c->Request.CDB[10]= (blk_rq_sectors(creq) >> 24) & 0xff; 2897 + c->Request.CDB[11]= (blk_rq_sectors(creq) >> 16) & 0xff; 2898 + c->Request.CDB[12]= (blk_rq_sectors(creq) >> 8) & 0xff; 2899 + c->Request.CDB[13]= blk_rq_sectors(creq) & 0xff; 2900 2900 c->Request.CDB[14] = c->Request.CDB[15] = 0; 2901 2901 } 2902 2902 } else if (blk_pc_request(creq)) {
+5 -4
drivers/block/cpqarray.c
··· 919 919 c->hdr.size = sizeof(rblk_t) >> 2; 920 920 c->size += sizeof(rblk_t); 921 921 922 - c->req.hdr.blk = creq->sector; 922 + c->req.hdr.blk = blk_rq_pos(creq); 923 923 c->rq = creq; 924 924 DBGPX( 925 - printk("sector=%d, nr_sectors=%d\n", creq->sector, creq->nr_sectors); 925 + printk("sector=%d, nr_sectors=%u\n", 926 + blk_rq_pos(creq), blk_rq_sectors(creq)); 926 927 ); 927 928 sg_init_table(tmp_sg, SG_MAX); 928 929 seg = blk_rq_map_sg(q, creq, tmp_sg); ··· 941 940 tmp_sg[i].offset, 942 941 tmp_sg[i].length, dir); 943 942 } 944 - DBGPX( printk("Submitting %d sectors in %d segments\n", creq->nr_sectors, seg); ); 943 + DBGPX( printk("Submitting %u sectors in %d segments\n", blk_rq_sectors(creq), seg); ); 945 944 c->req.hdr.sg_cnt = seg; 946 - c->req.hdr.blk_cnt = creq->nr_sectors; 945 + c->req.hdr.blk_cnt = blk_rq_sectors(creq); 947 946 c->req.hdr.cmd = (rq_data_dir(creq) == READ) ? IDA_READ : IDA_WRITE; 948 947 c->type = CMD_RWREQ; 949 948
+27 -26
drivers/block/floppy.c
··· 2303 2303 2304 2304 /* current_count_sectors can be zero if transfer failed */ 2305 2305 if (error) 2306 - nr_sectors = req->current_nr_sectors; 2306 + nr_sectors = blk_rq_cur_sectors(req); 2307 2307 if (__blk_end_request(req, error, nr_sectors << 9)) 2308 2308 return; 2309 2309 ··· 2332 2332 if (uptodate) { 2333 2333 /* maintain values for invalidation on geometry 2334 2334 * change */ 2335 - block = current_count_sectors + req->sector; 2335 + block = current_count_sectors + blk_rq_pos(req); 2336 2336 INFBOUND(DRS->maxblock, block); 2337 2337 if (block > _floppy->sect) 2338 2338 DRS->maxtrack = 1; ··· 2346 2346 /* record write error information */ 2347 2347 DRWE->write_errors++; 2348 2348 if (DRWE->write_errors == 1) { 2349 - DRWE->first_error_sector = req->sector; 2349 + DRWE->first_error_sector = blk_rq_pos(req); 2350 2350 DRWE->first_error_generation = DRS->generation; 2351 2351 } 2352 - DRWE->last_error_sector = req->sector; 2352 + DRWE->last_error_sector = blk_rq_pos(req); 2353 2353 DRWE->last_error_generation = DRS->generation; 2354 2354 } 2355 2355 spin_lock_irqsave(q->queue_lock, flags); ··· 2503 2503 2504 2504 max_sector = transfer_size(ssize, 2505 2505 min(max_sector, max_sector_2), 2506 - current_req->nr_sectors); 2506 + blk_rq_sectors(current_req)); 2507 2507 2508 2508 if (current_count_sectors <= 0 && CT(COMMAND) == FD_WRITE && 2509 - buffer_max > fsector_t + current_req->nr_sectors) 2509 + buffer_max > fsector_t + blk_rq_sectors(current_req)) 2510 2510 current_count_sectors = min_t(int, buffer_max - fsector_t, 2511 - current_req->nr_sectors); 2511 + blk_rq_sectors(current_req)); 2512 2512 2513 2513 remaining = current_count_sectors << 9; 2514 2514 #ifdef FLOPPY_SANITY_CHECK 2515 - if ((remaining >> 9) > current_req->nr_sectors && 2515 + if ((remaining >> 9) > blk_rq_sectors(current_req) && 2516 2516 CT(COMMAND) == FD_WRITE) { 2517 2517 DPRINT("in copy buffer\n"); 2518 2518 printk("current_count_sectors=%ld\n", current_count_sectors); 2519 2519 printk("remaining=%d\n", remaining >> 9); 2520 - printk("current_req->nr_sectors=%ld\n", 2521 - current_req->nr_sectors); 2520 + printk("current_req->nr_sectors=%u\n", 2521 + blk_rq_sectors(current_req)); 2522 2522 printk("current_req->current_nr_sectors=%u\n", 2523 - current_req->current_nr_sectors); 2523 + blk_rq_cur_sectors(current_req)); 2524 2524 printk("max_sector=%d\n", max_sector); 2525 2525 printk("ssize=%d\n", ssize); 2526 2526 } ··· 2530 2530 2531 2531 dma_buffer = floppy_track_buffer + ((fsector_t - buffer_min) << 9); 2532 2532 2533 - size = current_req->current_nr_sectors << 9; 2533 + size = blk_rq_cur_sectors(current_req) << 9; 2534 2534 2535 2535 rq_for_each_segment(bv, current_req, iter) { 2536 2536 if (!remaining) ··· 2648 2648 2649 2649 max_sector = _floppy->sect * _floppy->head; 2650 2650 2651 - TRACK = (int)current_req->sector / max_sector; 2652 - fsector_t = (int)current_req->sector % max_sector; 2651 + TRACK = (int)blk_rq_pos(current_req) / max_sector; 2652 + fsector_t = (int)blk_rq_pos(current_req) % max_sector; 2653 2653 if (_floppy->track && TRACK >= _floppy->track) { 2654 - if (current_req->current_nr_sectors & 1) { 2654 + if (blk_rq_cur_sectors(current_req) & 1) { 2655 2655 current_count_sectors = 1; 2656 2656 return 1; 2657 2657 } else ··· 2669 2669 if (fsector_t >= max_sector) { 2670 2670 current_count_sectors = 2671 2671 min_t(int, _floppy->sect - fsector_t, 2672 - current_req->nr_sectors); 2672 + blk_rq_sectors(current_req)); 2673 2673 return 1; 2674 2674 } 2675 2675 SIZECODE = 2; ··· 2720 2720 2721 2721 in_sector_offset = (fsector_t % _floppy->sect) % ssize; 2722 2722 aligned_sector_t = fsector_t - in_sector_offset; 2723 - max_size = current_req->nr_sectors; 2723 + max_size = blk_rq_sectors(current_req); 2724 2724 if ((raw_cmd->track == buffer_track) && 2725 2725 (current_drive == buffer_drive) && 2726 2726 (fsector_t >= buffer_min) && (fsector_t < buffer_max)) { ··· 2729 2729 copy_buffer(1, max_sector, buffer_max); 2730 2730 return 1; 2731 2731 } 2732 - } else if (in_sector_offset || current_req->nr_sectors < ssize) { 2732 + } else if (in_sector_offset || blk_rq_sectors(current_req) < ssize) { 2733 2733 if (CT(COMMAND) == FD_WRITE) { 2734 - if (fsector_t + current_req->nr_sectors > ssize && 2735 - fsector_t + current_req->nr_sectors < ssize + ssize) 2734 + if (fsector_t + blk_rq_sectors(current_req) > ssize && 2735 + fsector_t + blk_rq_sectors(current_req) < ssize + ssize) 2736 2736 max_size = ssize + ssize; 2737 2737 else 2738 2738 max_size = ssize; ··· 2776 2776 (indirect * 2 > direct * 3 && 2777 2777 *errors < DP->max_errors.read_track && ((!probing 2778 2778 || (DP->read_track & (1 << DRS->probed_format)))))) { 2779 - max_size = current_req->nr_sectors; 2779 + max_size = blk_rq_sectors(current_req); 2780 2780 } else { 2781 2781 raw_cmd->kernel_data = current_req->buffer; 2782 2782 raw_cmd->length = current_count_sectors << 9; ··· 2801 2801 fsector_t > buffer_max || 2802 2802 fsector_t < buffer_min || 2803 2803 ((CT(COMMAND) == FD_READ || 2804 - (!in_sector_offset && current_req->nr_sectors >= ssize)) && 2804 + (!in_sector_offset && blk_rq_sectors(current_req) >= ssize)) && 2805 2805 max_sector > 2 * max_buffer_sectors + buffer_min && 2806 2806 max_size + fsector_t > 2 * max_buffer_sectors + buffer_min) 2807 2807 /* not enough space */ ··· 2879 2879 printk("write\n"); 2880 2880 return 0; 2881 2881 } 2882 - } else if (raw_cmd->length > current_req->nr_sectors << 9 || 2883 - current_count_sectors > current_req->nr_sectors) { 2882 + } else if (raw_cmd->length > blk_rq_sectors(current_req) << 9 || 2883 + current_count_sectors > blk_rq_sectors(current_req)) { 2884 2884 DPRINT("buffer overrun in direct transfer\n"); 2885 2885 return 0; 2886 2886 } else if (raw_cmd->length < current_count_sectors << 9) { ··· 2990 2990 if (usage_count == 0) { 2991 2991 printk("warning: usage count=0, current_req=%p exiting\n", 2992 2992 current_req); 2993 - printk("sect=%ld type=%x flags=%x\n", (long)current_req->sector, 2994 - current_req->cmd_type, current_req->cmd_flags); 2993 + printk("sect=%ld type=%x flags=%x\n", 2994 + (long)blk_rq_pos(current_req), current_req->cmd_type, 2995 + current_req->cmd_flags); 2995 2996 return; 2996 2997 } 2997 2998 if (test_bit(0, &fdc_busy)) {
+7 -7
drivers/block/hd.c
··· 228 228 printk(", CHS=%d/%d/%d", (inb(HD_HCYL)<<8) + inb(HD_LCYL), 229 229 inb(HD_CURRENT) & 0xf, inb(HD_SECTOR)); 230 230 if (CURRENT) 231 - printk(", sector=%ld", CURRENT->sector); 231 + printk(", sector=%ld", blk_rq_pos(CURRENT)); 232 232 } 233 233 printk("\n"); 234 234 } ··· 457 457 req = CURRENT; 458 458 insw(HD_DATA, req->buffer, 256); 459 459 #ifdef DEBUG 460 - printk("%s: read: sector %ld, remaining = %ld, buffer=%p\n", 461 - req->rq_disk->disk_name, req->sector + 1, req->nr_sectors - 1, 462 - req->buffer+512); 460 + printk("%s: read: sector %ld, remaining = %u, buffer=%p\n", 461 + req->rq_disk->disk_name, blk_rq_pos(req) + 1, 462 + blk_rq_sectors(req) - 1, req->buffer+512); 463 463 #endif 464 464 if (__blk_end_request(req, 0, 512)) { 465 465 SET_HANDLER(&read_intr); ··· 485 485 continue; 486 486 if (!OK_STATUS(i)) 487 487 break; 488 - if ((req->nr_sectors <= 1) || (i & DRQ_STAT)) 488 + if ((blk_rq_sectors(req) <= 1) || (i & DRQ_STAT)) 489 489 goto ok_to_write; 490 490 } while (--retries > 0); 491 491 dump_status("write_intr", i); ··· 589 589 return; 590 590 } 591 591 disk = req->rq_disk->private_data; 592 - block = req->sector; 593 - nsect = req->nr_sectors; 592 + block = blk_rq_pos(req); 593 + nsect = blk_rq_sectors(req); 594 594 if (block >= get_capacity(req->rq_disk) || 595 595 ((block+nsect) > get_capacity(req->rq_disk))) { 596 596 printk("%s: bad access: block=%d, count=%d\n",
+13 -12
drivers/block/mg_disk.c
··· 220 220 if (host->breq) { 221 221 req = elv_next_request(host->breq); 222 222 if (req) 223 - printk(", sector=%u", (u32)req->sector); 223 + printk(", sector=%u", 224 + (unsigned int)blk_rq_pos(req)); 224 225 } 225 226 226 227 } ··· 494 493 u32 j; 495 494 struct mg_host *host = req->rq_disk->private_data; 496 495 497 - if (mg_out(host, req->sector, req->nr_sectors, MG_CMD_RD, NULL) != 498 - MG_ERR_NONE) 496 + if (mg_out(host, blk_rq_pos(req), blk_rq_sectors(req), 497 + MG_CMD_RD, NULL) != MG_ERR_NONE) 499 498 mg_bad_rw_intr(host); 500 499 501 500 MG_DBG("requested %d sects (from %ld), buffer=0x%p\n", 502 - req->nr_sectors, req->sector, req->buffer); 501 + blk_rq_sectors(req), blk_rq_pos(req), req->buffer); 503 502 504 503 do { 505 504 u16 *buff = (u16 *)req->buffer; ··· 523 522 u32 j; 524 523 struct mg_host *host = req->rq_disk->private_data; 525 524 526 - if (mg_out(host, req->sector, req->nr_sectors, MG_CMD_WR, NULL) != 527 - MG_ERR_NONE) { 525 + if (mg_out(host, blk_rq_pos(req), blk_rq_sectors(req), 526 + MG_CMD_WR, NULL) != MG_ERR_NONE) { 528 527 mg_bad_rw_intr(host); 529 528 return; 530 529 } 531 530 532 531 MG_DBG("requested %d sects (from %ld), buffer=0x%p\n", 533 - req->nr_sectors, req->sector, req->buffer); 532 + blk_rq_sectors(req), blk_rq_pos(req), req->buffer); 534 533 535 534 do { 536 535 u16 *buff = (u16 *)req->buffer; ··· 580 579 (i << 1)); 581 580 582 581 MG_DBG("sector %ld, remaining=%ld, buffer=0x%p\n", 583 - req->sector, req->nr_sectors - 1, req->buffer); 582 + blk_rq_pos(req), blk_rq_sectors(req) - 1, req->buffer); 584 583 585 584 /* send read confirm */ 586 585 outb(MG_CMD_RD_CONF, (unsigned long)host->dev_base + MG_REG_COMMAND); ··· 610 609 break; 611 610 if (!MG_READY_OK(i)) 612 611 break; 613 - if ((req->nr_sectors <= 1) || (i & ATA_DRQ)) 612 + if ((blk_rq_sectors(req) <= 1) || (i & ATA_DRQ)) 614 613 goto ok_to_write; 615 614 } while (0); 616 615 mg_dump_status("mg_write_intr", i, host); ··· 628 627 buff++; 629 628 } 630 629 MG_DBG("sector %ld, remaining=%ld, buffer=0x%p\n", 631 - req->sector, req->nr_sectors, req->buffer); 630 + blk_rq_pos(req), blk_rq_sectors(req), req->buffer); 632 631 host->mg_do_intr = mg_write_intr; 633 632 mod_timer(&host->timer, jiffies + 3 * HZ); 634 633 } ··· 750 749 751 750 del_timer(&host->timer); 752 751 753 - sect_num = req->sector; 752 + sect_num = blk_rq_pos(req); 754 753 /* deal whole segments */ 755 - sect_cnt = req->nr_sectors; 754 + sect_cnt = blk_rq_sectors(req); 756 755 757 756 /* sanity check */ 758 757 if (sect_num >= get_capacity(req->rq_disk) ||
+6 -6
drivers/block/nbd.c
··· 110 110 req, error ? "failed" : "done"); 111 111 112 112 spin_lock_irqsave(q->queue_lock, flags); 113 - __blk_end_request(req, error, req->nr_sectors << 9); 113 + __blk_end_request(req, error, blk_rq_sectors(req) << 9); 114 114 spin_unlock_irqrestore(q->queue_lock, flags); 115 115 } 116 116 ··· 231 231 { 232 232 int result, flags; 233 233 struct nbd_request request; 234 - unsigned long size = req->nr_sectors << 9; 234 + unsigned long size = blk_rq_sectors(req) << 9; 235 235 236 236 request.magic = htonl(NBD_REQUEST_MAGIC); 237 237 request.type = htonl(nbd_cmd(req)); 238 - request.from = cpu_to_be64((u64) req->sector << 9); 238 + request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9); 239 239 request.len = htonl(size); 240 240 memcpy(request.handle, &req, sizeof(req)); 241 241 242 - dprintk(DBG_TX, "%s: request %p: sending control (%s@%llu,%luB)\n", 242 + dprintk(DBG_TX, "%s: request %p: sending control (%s@%llu,%uB)\n", 243 243 lo->disk->disk_name, req, 244 244 nbdcmd_to_ascii(nbd_cmd(req)), 245 - (unsigned long long)req->sector << 9, 246 - req->nr_sectors << 9); 245 + (unsigned long long)blk_rq_pos(req) << 9, 246 + blk_rq_sectors(req) << 9); 247 247 result = sock_xmit(lo, 1, &request, sizeof(request), 248 248 (nbd_cmd(req) == NBD_CMD_WRITE) ? MSG_MORE : 0); 249 249 if (result <= 0) {
+2 -2
drivers/block/paride/pcd.c
··· 728 728 if (cd != pcd_current) 729 729 pcd_bufblk = -1; 730 730 pcd_current = cd; 731 - pcd_sector = pcd_req->sector; 732 - pcd_count = pcd_req->current_nr_sectors; 731 + pcd_sector = blk_rq_pos(pcd_req); 732 + pcd_count = blk_rq_cur_sectors(pcd_req); 733 733 pcd_buf = pcd_req->buffer; 734 734 pcd_busy = 1; 735 735 ps_set_intr(do_pcd_read, NULL, 0, nice);
+4 -4
drivers/block/paride/pd.c
··· 444 444 445 445 pd_cmd = rq_data_dir(pd_req); 446 446 if (pd_cmd == READ || pd_cmd == WRITE) { 447 - pd_block = pd_req->sector; 448 - pd_count = pd_req->current_nr_sectors; 447 + pd_block = blk_rq_pos(pd_req); 448 + pd_count = blk_rq_cur_sectors(pd_req); 449 449 if (pd_block + pd_count > get_capacity(pd_req->rq_disk)) 450 450 return Fail; 451 - pd_run = pd_req->nr_sectors; 451 + pd_run = blk_rq_sectors(pd_req); 452 452 pd_buf = pd_req->buffer; 453 453 pd_retries = 0; 454 454 if (pd_cmd == READ) ··· 479 479 return 0; 480 480 spin_lock_irqsave(&pd_lock, saved_flags); 481 481 __blk_end_request_cur(pd_req, 0); 482 - pd_count = pd_req->current_nr_sectors; 482 + pd_count = blk_rq_cur_sectors(pd_req); 483 483 pd_buf = pd_req->buffer; 484 484 spin_unlock_irqrestore(&pd_lock, saved_flags); 485 485 return 0;
+4 -4
drivers/block/paride/pf.c
··· 768 768 return; 769 769 770 770 pf_current = pf_req->rq_disk->private_data; 771 - pf_block = pf_req->sector; 772 - pf_run = pf_req->nr_sectors; 773 - pf_count = pf_req->current_nr_sectors; 771 + pf_block = blk_rq_pos(pf_req); 772 + pf_run = blk_rq_sectors(pf_req); 773 + pf_count = blk_rq_cur_sectors(pf_req); 774 774 775 775 if (pf_block + pf_count > get_capacity(pf_req->rq_disk)) { 776 776 pf_end_request(-EIO); ··· 810 810 spin_unlock_irqrestore(&pf_spin_lock, saved_flags); 811 811 if (!pf_req) 812 812 return 1; 813 - pf_count = pf_req->current_nr_sectors; 813 + pf_count = blk_rq_cur_sectors(pf_req); 814 814 pf_buf = pf_req->buffer; 815 815 } 816 816 return 0;
+4 -5
drivers/block/ps3disk.c
··· 134 134 rq_for_each_segment(bv, req, iter) 135 135 n++; 136 136 dev_dbg(&dev->sbd.core, 137 - "%s:%u: %s req has %u bvecs for %lu sectors %lu hard sectors\n", 138 - __func__, __LINE__, op, n, req->nr_sectors, 139 - blk_rq_sectors(req)); 137 + "%s:%u: %s req has %u bvecs for %u sectors\n", 138 + __func__, __LINE__, op, n, blk_rq_sectors(req)); 140 139 #endif 141 140 142 - start_sector = req->sector * priv->blocking_factor; 143 - sectors = req->nr_sectors * priv->blocking_factor; 141 + start_sector = blk_rq_pos(req) * priv->blocking_factor; 142 + sectors = blk_rq_sectors(req) * priv->blocking_factor; 144 143 dev_dbg(&dev->sbd.core, "%s:%u: %s %llu sectors starting at %llu\n", 145 144 __func__, __LINE__, op, sectors, start_sector); 146 145
+1 -1
drivers/block/sunvdc.c
··· 416 416 desc->slice = 0; 417 417 } 418 418 desc->status = ~0; 419 - desc->offset = (req->sector << 9) / port->vdisk_block_size; 419 + desc->offset = (blk_rq_pos(req) << 9) / port->vdisk_block_size; 420 420 desc->size = len; 421 421 desc->ncookies = err; 422 422
+3 -3
drivers/block/swim.c
··· 531 531 while ((req = elv_next_request(q))) { 532 532 533 533 fs = req->rq_disk->private_data; 534 - if (req->sector < 0 || req->sector >= fs->total_secs) { 534 + if (blk_rq_pos(req) >= fs->total_secs) { 535 535 __blk_end_request_cur(req, -EIO); 536 536 continue; 537 537 } ··· 551 551 __blk_end_request_cur(req, -EIO); 552 552 break; 553 553 case READ: 554 - if (floppy_read_sectors(fs, req->sector, 555 - req->current_nr_sectors, 554 + if (floppy_read_sectors(fs, blk_rq_pos(req), 555 + blk_rq_cur_sectors(req), 556 556 req->buffer)) { 557 557 __blk_end_request_cur(req, -EIO); 558 558 continue;
+18 -16
drivers/block/swim3.c
··· 312 312 } 313 313 while (fs->state == idle && (req = elv_next_request(swim3_queue))) { 314 314 #if 0 315 - printk("do_fd_req: dev=%s cmd=%d sec=%ld nr_sec=%ld buf=%p\n", 315 + printk("do_fd_req: dev=%s cmd=%d sec=%ld nr_sec=%u buf=%p\n", 316 316 req->rq_disk->disk_name, req->cmd, 317 - (long)req->sector, req->nr_sectors, req->buffer); 318 - printk(" errors=%d current_nr_sectors=%ld\n", 319 - req->errors, req->current_nr_sectors); 317 + (long)blk_rq_pos(req), blk_rq_sectors(req), req->buffer); 318 + printk(" errors=%d current_nr_sectors=%u\n", 319 + req->errors, blk_rq_cur_sectors(req)); 320 320 #endif 321 321 322 - if (req->sector >= fs->total_secs) { 322 + if (blk_rq_pos(req) >= fs->total_secs) { 323 323 __blk_end_request_cur(req, -EIO); 324 324 continue; 325 325 } ··· 337 337 } 338 338 } 339 339 340 - /* Do not remove the cast. req->sector is now a sector_t and 341 - * can be 64 bits, but it will never go past 32 bits for this 342 - * driver anyway, so we can safely cast it down and not have 343 - * to do a 64/32 division 340 + /* Do not remove the cast. blk_rq_pos(req) is now a 341 + * sector_t and can be 64 bits, but it will never go 342 + * past 32 bits for this driver anyway, so we can 343 + * safely cast it down and not have to do a 64/32 344 + * division 344 345 */ 345 - fs->req_cyl = ((long)req->sector) / fs->secpercyl; 346 - x = ((long)req->sector) % fs->secpercyl; 346 + fs->req_cyl = ((long)blk_rq_pos(req)) / fs->secpercyl; 347 + x = ((long)blk_rq_pos(req)) % fs->secpercyl; 347 348 fs->head = x / fs->secpertrack; 348 349 fs->req_sector = x % fs->secpertrack + 1; 349 350 fd_req = req; ··· 421 420 struct dbdma_cmd *cp = fs->dma_cmd; 422 421 struct dbdma_regs __iomem *dr = fs->dma; 423 422 424 - if (fd_req->current_nr_sectors <= 0) { 423 + if (blk_rq_cur_sectors(fd_req) <= 0) { 425 424 printk(KERN_ERR "swim3: transfer 0 sectors?\n"); 426 425 return; 427 426 } ··· 429 428 n = 1; 430 429 else { 431 430 n = fs->secpertrack - fs->req_sector + 1; 432 - if (n > fd_req->current_nr_sectors) 433 - n = fd_req->current_nr_sectors; 431 + if (n > blk_rq_cur_sectors(fd_req)) 432 + n = blk_rq_cur_sectors(fd_req); 434 433 } 435 434 fs->scount = n; 436 435 swim3_select(fs, fs->head? READ_DATA_1: READ_DATA_0); ··· 601 600 out_8(&sw->control_bic, WRITE_SECTORS | DO_ACTION); 602 601 out_8(&sw->select, RELAX); 603 602 printk(KERN_ERR "swim3: timeout %sing sector %ld\n", 604 - (rq_data_dir(fd_req)==WRITE? "writ": "read"), (long)fd_req->sector); 603 + (rq_data_dir(fd_req)==WRITE? "writ": "read"), 604 + (long)blk_rq_pos(fd_req)); 605 605 __blk_end_request_cur(fd_req, -EIO); 606 606 fs->state = idle; 607 607 start_request(fs); ··· 716 714 } else { 717 715 printk("swim3: error %sing block %ld (err=%x)\n", 718 716 rq_data_dir(fd_req) == WRITE? "writ": "read", 719 - (long)fd_req->sector, err); 717 + (long)blk_rq_pos(fd_req), err); 720 718 __blk_end_request_cur(fd_req, -EIO); 721 719 fs->state = idle; 722 720 }
+3 -3
drivers/block/sx8.c
··· 903 903 msg->sg_count = n_elem; 904 904 msg->sg_type = SGT_32BIT; 905 905 msg->handle = cpu_to_le32(TAG_ENCODE(crq->tag)); 906 - msg->lba = cpu_to_le32(rq->sector & 0xffffffff); 907 - tmp = (rq->sector >> 16) >> 16; 906 + msg->lba = cpu_to_le32(blk_rq_pos(rq) & 0xffffffff); 907 + tmp = (blk_rq_pos(rq) >> 16) >> 16; 908 908 msg->lba_high = cpu_to_le16( (u16) tmp ); 909 - msg->lba_count = cpu_to_le16(rq->nr_sectors); 909 + msg->lba_count = cpu_to_le16(blk_rq_sectors(rq)); 910 910 911 911 msg_size = sizeof(struct carm_msg_rw) - sizeof(msg->sg); 912 912 for (i = 0; i < n_elem; i++) {
+3 -3
drivers/block/ub.c
··· 726 726 * The call to blk_queue_hardsect_size() guarantees that request 727 727 * is aligned, but it is given in terms of 512 byte units, always. 728 728 */ 729 - block = rq->sector >> lun->capacity.bshift; 730 - nblks = rq->nr_sectors >> lun->capacity.bshift; 729 + block = blk_rq_pos(rq) >> lun->capacity.bshift; 730 + nblks = blk_rq_sectors(rq) >> lun->capacity.bshift; 731 731 732 732 cmd->cdb[0] = (cmd->dir == UB_DIR_READ)? READ_10: WRITE_10; 733 733 /* 10-byte uses 4 bytes of LBA: 2147483648KB, 2097152MB, 2048GB */ ··· 739 739 cmd->cdb[8] = nblks; 740 740 cmd->cdb_len = 10; 741 741 742 - cmd->len = rq->nr_sectors * 512; 742 + cmd->len = blk_rq_sectors(rq) * 512; 743 743 } 744 744 745 745 static void ub_cmd_build_packet(struct ub_dev *sc, struct ub_lun *lun,
+1 -1
drivers/block/viodasd.c
··· 252 252 struct viodasd_device *d; 253 253 unsigned long flags; 254 254 255 - start = (u64)req->sector << 9; 255 + start = (u64)blk_rq_pos(req) << 9; 256 256 257 257 if (rq_data_dir(req) == READ) { 258 258 direction = DMA_FROM_DEVICE;
+1 -1
drivers/block/virtio_blk.c
··· 85 85 vbr->req = req; 86 86 if (blk_fs_request(vbr->req)) { 87 87 vbr->out_hdr.type = 0; 88 - vbr->out_hdr.sector = vbr->req->sector; 88 + vbr->out_hdr.sector = blk_rq_pos(vbr->req); 89 89 vbr->out_hdr.ioprio = req_get_ioprio(vbr->req); 90 90 } else if (blk_pc_request(vbr->req)) { 91 91 vbr->out_hdr.type = VIRTIO_BLK_T_SCSI_CMD;
+2 -2
drivers/block/xd.c
··· 306 306 return; 307 307 308 308 while ((req = elv_next_request(q)) != NULL) { 309 - unsigned block = req->sector; 310 - unsigned count = req->nr_sectors; 309 + unsigned block = blk_rq_pos(req); 310 + unsigned count = blk_rq_sectors(req); 311 311 XD_INFO *disk = req->rq_disk->private_data; 312 312 int res = 0; 313 313 int retry;
+5 -6
drivers/block/xen-blkfront.c
··· 231 231 info->shadow[id].request = (unsigned long)req; 232 232 233 233 ring_req->id = id; 234 - ring_req->sector_number = (blkif_sector_t)req->sector; 234 + ring_req->sector_number = (blkif_sector_t)blk_rq_pos(req); 235 235 ring_req->handle = info->handle; 236 236 237 237 ring_req->operation = rq_data_dir(req) ? ··· 310 310 goto wait; 311 311 312 312 pr_debug("do_blk_req %p: cmd %p, sec %lx, " 313 - "(%u/%li) buffer:%p [%s]\n", 314 - req, req->cmd, (unsigned long)req->sector, 315 - req->current_nr_sectors, 316 - req->nr_sectors, req->buffer, 317 - rq_data_dir(req) ? "write" : "read"); 313 + "(%u/%u) buffer:%p [%s]\n", 314 + req, req->cmd, (unsigned long)blk_rq_pos(req), 315 + blk_rq_cur_sectors(req), blk_rq_sectors(req), 316 + req->buffer, rq_data_dir(req) ? "write" : "read"); 318 317 319 318 320 319 blkdev_dequeue_request(req);
+9 -8
drivers/block/xsysace.c
··· 646 646 /* Okay, it's a data request, set it up for transfer */ 647 647 dev_dbg(ace->dev, 648 648 "request: sec=%llx hcnt=%x, ccnt=%x, dir=%i\n", 649 - (unsigned long long) req->sector, blk_rq_sectors(req), 650 - req->current_nr_sectors, rq_data_dir(req)); 649 + (unsigned long long)blk_rq_pos(req), 650 + blk_rq_sectors(req), blk_rq_cur_sectors(req), 651 + rq_data_dir(req)); 651 652 652 653 ace->req = req; 653 654 ace->data_ptr = req->buffer; 654 - ace->data_count = req->current_nr_sectors * ACE_BUF_PER_SECTOR; 655 - ace_out32(ace, ACE_MPULBA, req->sector & 0x0FFFFFFF); 655 + ace->data_count = blk_rq_cur_sectors(req) * ACE_BUF_PER_SECTOR; 656 + ace_out32(ace, ACE_MPULBA, blk_rq_pos(req) & 0x0FFFFFFF); 656 657 657 658 count = blk_rq_sectors(req); 658 659 if (rq_data_dir(req)) { ··· 689 688 dev_dbg(ace->dev, 690 689 "CFBSY set; t=%i iter=%i c=%i dc=%i irq=%i\n", 691 690 ace->fsm_task, ace->fsm_iter_num, 692 - ace->req->current_nr_sectors * 16, 691 + blk_rq_cur_sectors(ace->req) * 16, 693 692 ace->data_count, ace->in_irq); 694 693 ace_fsm_yield(ace); /* need to poll CFBSY bit */ 695 694 break; ··· 698 697 dev_dbg(ace->dev, 699 698 "DATABUF not set; t=%i iter=%i c=%i dc=%i irq=%i\n", 700 699 ace->fsm_task, ace->fsm_iter_num, 701 - ace->req->current_nr_sectors * 16, 700 + blk_rq_cur_sectors(ace->req) * 16, 702 701 ace->data_count, ace->in_irq); 703 702 ace_fsm_yieldirq(ace); 704 703 break; ··· 722 721 blk_rq_cur_bytes(ace->req))) { 723 722 /* dev_dbg(ace->dev, "next block; h=%u c=%u\n", 724 723 * blk_rq_sectors(ace->req), 725 - * ace->req->current_nr_sectors); 724 + * blk_rq_cur_sectors(ace->req)); 726 725 */ 727 726 ace->data_ptr = ace->req->buffer; 728 - ace->data_count = ace->req->current_nr_sectors * 16; 727 + ace->data_count = blk_rq_cur_sectors(ace->req) * 16; 729 728 ace_fsm_yieldirq(ace); 730 729 break; 731 730 }
+3 -3
drivers/block/z2ram.c
··· 71 71 { 72 72 struct request *req; 73 73 while ((req = elv_next_request(q)) != NULL) { 74 - unsigned long start = req->sector << 9; 75 - unsigned long len = req->current_nr_sectors << 9; 74 + unsigned long start = blk_rq_pos(req) << 9; 75 + unsigned long len = blk_rq_cur_sectors(req) << 9; 76 76 77 77 if (start + len > z2ram_size) { 78 78 printk( KERN_ERR DEVICE_NAME ": bad access: block=%lu, count=%u\n", 79 - req->sector, req->current_nr_sectors); 79 + blk_rq_pos(req), blk_rq_cur_sectors(req)); 80 80 __blk_end_request_cur(req, -EIO); 81 81 continue; 82 82 }
+3 -3
drivers/cdrom/gdrom.c
··· 584 584 list_for_each_safe(elem, next, &gdrom_deferred) { 585 585 req = list_entry(elem, struct request, queuelist); 586 586 spin_unlock(&gdrom_lock); 587 - block = req->sector/GD_TO_BLK + GD_SESSION_OFFSET; 588 - block_cnt = req->nr_sectors/GD_TO_BLK; 587 + block = blk_rq_pos(req)/GD_TO_BLK + GD_SESSION_OFFSET; 588 + block_cnt = blk_rq_sectors(req)/GD_TO_BLK; 589 589 ctrl_outl(PHYSADDR(req->buffer), GDROM_DMA_STARTADDR_REG); 590 590 ctrl_outl(block_cnt * GDROM_HARD_SECTOR, GDROM_DMA_LENGTH_REG); 591 591 ctrl_outl(1, GDROM_DMA_DIRECTION_REG); ··· 661 661 printk(" write request ignored\n"); 662 662 __blk_end_request_cur(req, -EIO); 663 663 } 664 - if (req->nr_sectors) 664 + if (blk_rq_sectors(req)) 665 665 gdrom_request_handler_dma(req); 666 666 else 667 667 __blk_end_request_cur(req, -EIO);
+1 -1
drivers/cdrom/viocd.c
··· 282 282 viopath_targetinst(viopath_hostLp), 283 283 (u64)req, VIOVERSION << 16, 284 284 ((u64)DEVICE_NR(diskinfo) << 48) | dmaaddr, 285 - (u64)req->sector * 512, len, 0); 285 + (u64)blk_rq_pos(req) * 512, len, 0); 286 286 if (hvrc != HvLpEvent_Rc_Good) { 287 287 printk(VIOCD_KERN_WARNING "hv error on op %d\n", (int)hvrc); 288 288 return -1;
+3 -3
drivers/memstick/core/mspro_block.c
··· 677 677 continue; 678 678 } 679 679 680 - t_sec = msb->block_req->sector << 9; 680 + t_sec = blk_rq_pos(msb->block_req) << 9; 681 681 sector_div(t_sec, msb->page_size); 682 682 683 - count = msb->block_req->nr_sectors << 9; 683 + count = blk_rq_sectors(msb->block_req) << 9; 684 684 count /= msb->page_size; 685 685 686 686 param.system = msb->system; ··· 745 745 t_len *= msb->page_size; 746 746 } 747 747 } else 748 - t_len = msb->block_req->nr_sectors << 9; 748 + t_len = blk_rq_sectors(msb->block_req) << 9; 749 749 750 750 dev_dbg(&card->dev, "transferred %x (%d)\n", t_len, error); 751 751
+11 -9
drivers/message/i2o/i2o_block.c
··· 761 761 break; 762 762 763 763 case CACHE_SMARTFETCH: 764 - if (req->nr_sectors > 16) 764 + if (blk_rq_sectors(req) > 16) 765 765 ctl_flags = 0x201F0008; 766 766 else 767 767 ctl_flags = 0x001F0000; ··· 781 781 ctl_flags = 0x001F0010; 782 782 break; 783 783 case CACHE_SMARTBACK: 784 - if (req->nr_sectors > 16) 784 + if (blk_rq_sectors(req) > 16) 785 785 ctl_flags = 0x001F0004; 786 786 else 787 787 ctl_flags = 0x001F0010; 788 788 break; 789 789 case CACHE_SMARTTHROUGH: 790 - if (req->nr_sectors > 16) 790 + if (blk_rq_sectors(req) > 16) 791 791 ctl_flags = 0x001F0004; 792 792 else 793 793 ctl_flags = 0x001F0010; ··· 827 827 828 828 *mptr++ = cpu_to_le32(scsi_flags); 829 829 830 - *((u32 *) & cmd[2]) = cpu_to_be32(req->sector * hwsec); 831 - *((u16 *) & cmd[7]) = cpu_to_be16(req->nr_sectors * hwsec); 830 + *((u32 *) & cmd[2]) = cpu_to_be32(blk_rq_pos(req) * hwsec); 831 + *((u16 *) & cmd[7]) = cpu_to_be16(blk_rq_sectors(req) * hwsec); 832 832 833 833 memcpy(mptr, cmd, 10); 834 834 mptr += 4; 835 - *mptr++ = cpu_to_le32(req->nr_sectors << KERNEL_SECTOR_SHIFT); 835 + *mptr++ = 836 + cpu_to_le32(blk_rq_sectors(req) << KERNEL_SECTOR_SHIFT); 836 837 } else 837 838 #endif 838 839 { 839 840 msg->u.head[1] = cpu_to_le32(cmd | HOST_TID << 12 | tid); 840 841 *mptr++ = cpu_to_le32(ctl_flags); 841 - *mptr++ = cpu_to_le32(req->nr_sectors << KERNEL_SECTOR_SHIFT); 842 842 *mptr++ = 843 - cpu_to_le32((u32) (req->sector << KERNEL_SECTOR_SHIFT)); 843 + cpu_to_le32(blk_rq_sectors(req) << KERNEL_SECTOR_SHIFT); 844 844 *mptr++ = 845 - cpu_to_le32(req->sector >> (32 - KERNEL_SECTOR_SHIFT)); 845 + cpu_to_le32((u32) (blk_rq_pos(req) << KERNEL_SECTOR_SHIFT)); 846 + *mptr++ = 847 + cpu_to_le32(blk_rq_pos(req) >> (32 - KERNEL_SECTOR_SHIFT)); 846 848 } 847 849 848 850 if (!i2o_block_sglist_alloc(c, ireq, &mptr)) {
+5 -5
drivers/mmc/card/block.c
··· 243 243 brq.mrq.cmd = &brq.cmd; 244 244 brq.mrq.data = &brq.data; 245 245 246 - brq.cmd.arg = req->sector; 246 + brq.cmd.arg = blk_rq_pos(req); 247 247 if (!mmc_card_blockaddr(card)) 248 248 brq.cmd.arg <<= 9; 249 249 brq.cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; ··· 251 251 brq.stop.opcode = MMC_STOP_TRANSMISSION; 252 252 brq.stop.arg = 0; 253 253 brq.stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; 254 - brq.data.blocks = req->nr_sectors; 254 + brq.data.blocks = blk_rq_sectors(req); 255 255 256 256 /* 257 257 * After a read error, we redo the request one sector at a time ··· 293 293 * Adjust the sg list so it is the same size as the 294 294 * request. 295 295 */ 296 - if (brq.data.blocks != req->nr_sectors) { 296 + if (brq.data.blocks != blk_rq_sectors(req)) { 297 297 int i, data_size = brq.data.blocks << 9; 298 298 struct scatterlist *sg; 299 299 ··· 344 344 printk(KERN_ERR "%s: error %d transferring data," 345 345 " sector %u, nr %u, card status %#x\n", 346 346 req->rq_disk->disk_name, brq.data.error, 347 - (unsigned)req->sector, 348 - (unsigned)req->nr_sectors, status); 347 + (unsigned)blk_rq_pos(req), 348 + (unsigned)blk_rq_sectors(req), status); 349 349 } 350 350 351 351 if (brq.stop.error) {
+4 -3
drivers/mtd/mtd_blkdevs.c
··· 47 47 unsigned long block, nsect; 48 48 char *buf; 49 49 50 - block = req->sector << 9 >> tr->blkshift; 51 - nsect = req->current_nr_sectors << 9 >> tr->blkshift; 50 + block = blk_rq_pos(req) << 9 >> tr->blkshift; 51 + nsect = blk_rq_cur_sectors(req) << 9 >> tr->blkshift; 52 52 53 53 buf = req->buffer; 54 54 ··· 59 59 if (!blk_fs_request(req)) 60 60 return -EIO; 61 61 62 - if (req->sector + req->current_nr_sectors > get_capacity(req->rq_disk)) 62 + if (blk_rq_pos(req) + blk_rq_cur_sectors(req) > 63 + get_capacity(req->rq_disk)) 63 64 return -EIO; 64 65 65 66 switch(rq_data_dir(req)) {
+1 -1
drivers/s390/block/dasd.c
··· 603 603 if (dasd_profile_level != DASD_PROFILE_ON) 604 604 return; 605 605 606 - sectors = req->nr_sectors; 606 + sectors = blk_rq_sectors(req); 607 607 if (!cqr->buildclk || !cqr->startclk || 608 608 !cqr->stopclk || !cqr->endclk || 609 609 !sectors)
+3 -2
drivers/s390/block/dasd_diag.c
··· 505 505 return ERR_PTR(-EINVAL); 506 506 blksize = block->bp_block; 507 507 /* Calculate record id of first and last block. */ 508 - first_rec = req->sector >> block->s2b_shift; 509 - last_rec = (req->sector + req->nr_sectors - 1) >> block->s2b_shift; 508 + first_rec = blk_rq_pos(req) >> block->s2b_shift; 509 + last_rec = 510 + (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift; 510 511 /* Check struct bio and count the number of blocks for the request. */ 511 512 count = 0; 512 513 rq_for_each_segment(bv, req, iter) {
+3 -3
drivers/s390/block/dasd_eckd.c
··· 2354 2354 blksize = block->bp_block; 2355 2355 blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize); 2356 2356 /* Calculate record id of first and last block. */ 2357 - first_rec = first_trk = req->sector >> block->s2b_shift; 2357 + first_rec = first_trk = blk_rq_pos(req) >> block->s2b_shift; 2358 2358 first_offs = sector_div(first_trk, blk_per_trk); 2359 2359 last_rec = last_trk = 2360 - (req->sector + req->nr_sectors - 1) >> block->s2b_shift; 2360 + (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift; 2361 2361 last_offs = sector_div(last_trk, blk_per_trk); 2362 2362 cdlspecial = (private->uses_cdl && first_rec < 2*blk_per_trk); 2363 2363 ··· 2420 2420 private = (struct dasd_eckd_private *) cqr->block->base->private; 2421 2421 blksize = cqr->block->bp_block; 2422 2422 blk_per_trk = recs_per_track(&private->rdc_data, 0, blksize); 2423 - recid = req->sector >> cqr->block->s2b_shift; 2423 + recid = blk_rq_pos(req) >> cqr->block->s2b_shift; 2424 2424 ccw = cqr->cpaddr; 2425 2425 /* Skip over define extent & locate record. */ 2426 2426 ccw++;
+4 -3
drivers/s390/block/dasd_fba.c
··· 270 270 return ERR_PTR(-EINVAL); 271 271 blksize = block->bp_block; 272 272 /* Calculate record id of first and last block. */ 273 - first_rec = req->sector >> block->s2b_shift; 274 - last_rec = (req->sector + req->nr_sectors - 1) >> block->s2b_shift; 273 + first_rec = blk_rq_pos(req) >> block->s2b_shift; 274 + last_rec = 275 + (blk_rq_pos(req) + blk_rq_sectors(req) - 1) >> block->s2b_shift; 275 276 /* Check struct bio and count the number of blocks for the request. */ 276 277 count = 0; 277 278 cidaw = 0; ··· 310 309 ccw = cqr->cpaddr; 311 310 /* First ccw is define extent. */ 312 311 define_extent(ccw++, cqr->data, rq_data_dir(req), 313 - block->bp_block, req->sector, req->nr_sectors); 312 + block->bp_block, blk_rq_pos(req), blk_rq_sectors(req)); 314 313 /* Build locate_record + read/write ccws. */ 315 314 idaws = (unsigned long *) (cqr->data + sizeof(struct DE_fba_data)); 316 315 LO_data = (struct LO_fba_data *) (idaws + cidaw);
+1 -1
drivers/s390/char/tape_34xx.c
··· 1134 1134 /* Setup ccws. */ 1135 1135 request->op = TO_BLOCK; 1136 1136 start_block = (struct tape_34xx_block_id *) request->cpdata; 1137 - start_block->block = req->sector >> TAPEBLOCK_HSEC_S2B; 1137 + start_block->block = blk_rq_pos(req) >> TAPEBLOCK_HSEC_S2B; 1138 1138 DBF_EVENT(6, "start_block = %i\n", start_block->block); 1139 1139 1140 1140 ccw = request->cpaddr;
+1 -1
drivers/s390/char/tape_3590.c
··· 633 633 struct req_iterator iter; 634 634 635 635 DBF_EVENT(6, "xBREDid:"); 636 - start_block = req->sector >> TAPEBLOCK_HSEC_S2B; 636 + start_block = blk_rq_pos(req) >> TAPEBLOCK_HSEC_S2B; 637 637 DBF_EVENT(6, "start_block = %i\n", start_block); 638 638 639 639 rq_for_each_segment(bv, req, iter)
+1 -1
drivers/s390/char/tape_block.c
··· 87 87 if (ccw_req->rc == 0) 88 88 /* Update position. */ 89 89 device->blk_data.block_position = 90 - (req->sector + req->nr_sectors) >> TAPEBLOCK_HSEC_S2B; 90 + (blk_rq_pos(req) + blk_rq_sectors(req)) >> TAPEBLOCK_HSEC_S2B; 91 91 else 92 92 /* We lost the position information due to an error. */ 93 93 device->blk_data.block_position = -1;
+2 -2
drivers/sbus/char/jsflash.c
··· 188 188 189 189 while ((req = elv_next_request(q)) != NULL) { 190 190 struct jsfd_part *jdp = req->rq_disk->private_data; 191 - unsigned long offset = req->sector << 9; 192 - size_t len = req->current_nr_sectors << 9; 191 + unsigned long offset = blk_rq_pos(req) << 9; 192 + size_t len = blk_rq_cur_sectors(req) << 9; 193 193 194 194 if ((offset + len) > jdp->dsize) { 195 195 __blk_end_request_cur(req, -EIO);
+12 -12
drivers/scsi/eata.c
··· 1825 1825 if (linked_comm && SCpnt->device->queue_depth > 2 1826 1826 && TLDEV(SCpnt->device->type)) { 1827 1827 ha->cp_stat[i] = READY; 1828 - flush_dev(SCpnt->device, SCpnt->request->sector, ha, 0); 1828 + flush_dev(SCpnt->device, blk_rq_pos(SCpnt->request), ha, 0); 1829 1829 return 0; 1830 1830 } 1831 1831 ··· 2144 2144 if (!cpp->din) 2145 2145 input_only = 0; 2146 2146 2147 - if (SCpnt->request->sector < minsec) 2148 - minsec = SCpnt->request->sector; 2149 - if (SCpnt->request->sector > maxsec) 2150 - maxsec = SCpnt->request->sector; 2147 + if (blk_rq_pos(SCpnt->request) < minsec) 2148 + minsec = blk_rq_pos(SCpnt->request); 2149 + if (blk_rq_pos(SCpnt->request) > maxsec) 2150 + maxsec = blk_rq_pos(SCpnt->request); 2151 2151 2152 - sl[n] = SCpnt->request->sector; 2153 - ioseek += SCpnt->request->nr_sectors; 2152 + sl[n] = blk_rq_pos(SCpnt->request); 2153 + ioseek += blk_rq_sectors(SCpnt->request); 2154 2154 2155 2155 if (!n) 2156 2156 continue; ··· 2190 2190 k = il[n]; 2191 2191 cpp = &ha->cp[k]; 2192 2192 SCpnt = cpp->SCpnt; 2193 - ll[n] = SCpnt->request->nr_sectors; 2193 + ll[n] = blk_rq_sectors(SCpnt->request); 2194 2194 pl[n] = SCpnt->serial_number; 2195 2195 2196 2196 if (!n) ··· 2236 2236 cpp = &ha->cp[k]; 2237 2237 SCpnt = cpp->SCpnt; 2238 2238 scmd_printk(KERN_INFO, SCpnt, 2239 - "%s pid %ld mb %d fc %d nr %d sec %ld ns %ld" 2239 + "%s pid %ld mb %d fc %d nr %d sec %ld ns %u" 2240 2240 " cur %ld s:%c r:%c rev:%c in:%c ov:%c xd %d.\n", 2241 2241 (ihdlr ? "ihdlr" : "qcomm"), 2242 2242 SCpnt->serial_number, k, flushcount, 2243 - n_ready, SCpnt->request->sector, 2244 - SCpnt->request->nr_sectors, cursec, YESNO(s), 2243 + n_ready, blk_rq_pos(SCpnt->request), 2244 + blk_rq_sectors(SCpnt->request), cursec, YESNO(s), 2245 2245 YESNO(r), YESNO(rev), YESNO(input_only), 2246 2246 YESNO(overlap), cpp->din); 2247 2247 } ··· 2408 2408 2409 2409 if (linked_comm && SCpnt->device->queue_depth > 2 2410 2410 && TLDEV(SCpnt->device->type)) 2411 - flush_dev(SCpnt->device, SCpnt->request->sector, ha, 1); 2411 + flush_dev(SCpnt->device, blk_rq_pos(SCpnt->request), ha, 1); 2412 2412 2413 2413 tstatus = status_byte(spp->target_status); 2414 2414
+11 -11
drivers/scsi/lpfc/lpfc_scsi.c
··· 1313 1313 uint32_t bgstat = bgf->bgstat; 1314 1314 uint64_t failing_sector = 0; 1315 1315 1316 - printk(KERN_ERR "BG ERROR in cmd 0x%x lba 0x%llx blk cnt 0x%lx " 1316 + printk(KERN_ERR "BG ERROR in cmd 0x%x lba 0x%llx blk cnt 0x%x " 1317 1317 "bgstat=0x%x bghm=0x%x\n", 1318 1318 cmd->cmnd[0], (unsigned long long)scsi_get_lba(cmd), 1319 - cmd->request->nr_sectors, bgstat, bghm); 1319 + blk_rq_sectors(cmd->request), bgstat, bghm); 1320 1320 1321 1321 spin_lock(&_dump_buf_lock); 1322 1322 if (!_dump_buf_done) { ··· 2375 2375 if (cmnd->cmnd[0] == READ_10) 2376 2376 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, 2377 2377 "9035 BLKGRD: READ @ sector %llu, " 2378 - "count %lu\n", 2379 - (unsigned long long)scsi_get_lba(cmnd), 2380 - cmnd->request->nr_sectors); 2378 + "count %u\n", 2379 + (unsigned long long)scsi_get_lba(cmnd), 2380 + blk_rq_sectors(cmnd->request)); 2381 2381 else if (cmnd->cmnd[0] == WRITE_10) 2382 2382 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, 2383 2383 "9036 BLKGRD: WRITE @ sector %llu, " 2384 - "count %lu cmd=%p\n", 2384 + "count %u cmd=%p\n", 2385 2385 (unsigned long long)scsi_get_lba(cmnd), 2386 - cmnd->request->nr_sectors, 2386 + blk_rq_sectors(cmnd->request), 2387 2387 cmnd); 2388 2388 2389 2389 err = lpfc_bg_scsi_prep_dma_buf(phba, lpfc_cmd); ··· 2403 2403 if (cmnd->cmnd[0] == READ_10) 2404 2404 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, 2405 2405 "9040 dbg: READ @ sector %llu, " 2406 - "count %lu\n", 2406 + "count %u\n", 2407 2407 (unsigned long long)scsi_get_lba(cmnd), 2408 - cmnd->request->nr_sectors); 2408 + blk_rq_sectors(cmnd->request)); 2409 2409 else if (cmnd->cmnd[0] == WRITE_10) 2410 2410 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, 2411 2411 "9041 dbg: WRITE @ sector %llu, " 2412 - "count %lu cmd=%p\n", 2412 + "count %u cmd=%p\n", 2413 2413 (unsigned long long)scsi_get_lba(cmnd), 2414 - cmnd->request->nr_sectors, cmnd); 2414 + blk_rq_sectors(cmnd->request), cmnd); 2415 2415 else 2416 2416 lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG, 2417 2417 "9042 dbg: parser not implemented\n");
+3 -3
drivers/scsi/scsi_lib.c
··· 787 787 * Next deal with any sectors which we were able to correctly 788 788 * handle. 789 789 */ 790 - SCSI_LOG_HLCOMPLETE(1, printk("%ld sectors total, " 790 + SCSI_LOG_HLCOMPLETE(1, printk("%u sectors total, " 791 791 "%d bytes done.\n", 792 - req->nr_sectors, good_bytes)); 792 + blk_rq_sectors(req), good_bytes)); 793 793 794 794 /* 795 795 * Recovered errors need reporting, but they're always treated ··· 968 968 if (blk_pc_request(req)) 969 969 sdb->length = req->data_len; 970 970 else 971 - sdb->length = req->nr_sectors << 9; 971 + sdb->length = blk_rq_sectors(req) << 9; 972 972 return BLKPREP_OK; 973 973 } 974 974
+12 -12
drivers/scsi/sd.c
··· 383 383 struct scsi_device *sdp = q->queuedata; 384 384 struct gendisk *disk = rq->rq_disk; 385 385 struct scsi_disk *sdkp; 386 - sector_t block = rq->sector; 386 + sector_t block = blk_rq_pos(rq); 387 387 sector_t threshold; 388 - unsigned int this_count = rq->nr_sectors; 388 + unsigned int this_count = blk_rq_sectors(rq); 389 389 int ret, host_dif; 390 390 391 391 if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { ··· 412 412 this_count)); 413 413 414 414 if (!sdp || !scsi_device_online(sdp) || 415 - block + rq->nr_sectors > get_capacity(disk)) { 415 + block + blk_rq_sectors(rq) > get_capacity(disk)) { 416 416 SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt, 417 - "Finishing %ld sectors\n", 418 - rq->nr_sectors)); 417 + "Finishing %u sectors\n", 418 + blk_rq_sectors(rq))); 419 419 SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt, 420 420 "Retry with 0x%p\n", SCpnt)); 421 421 goto out; ··· 462 462 * for this. 463 463 */ 464 464 if (sdp->sector_size == 1024) { 465 - if ((block & 1) || (rq->nr_sectors & 1)) { 465 + if ((block & 1) || (blk_rq_sectors(rq) & 1)) { 466 466 scmd_printk(KERN_ERR, SCpnt, 467 467 "Bad block number requested\n"); 468 468 goto out; ··· 472 472 } 473 473 } 474 474 if (sdp->sector_size == 2048) { 475 - if ((block & 3) || (rq->nr_sectors & 3)) { 475 + if ((block & 3) || (blk_rq_sectors(rq) & 3)) { 476 476 scmd_printk(KERN_ERR, SCpnt, 477 477 "Bad block number requested\n"); 478 478 goto out; ··· 482 482 } 483 483 } 484 484 if (sdp->sector_size == 4096) { 485 - if ((block & 7) || (rq->nr_sectors & 7)) { 485 + if ((block & 7) || (blk_rq_sectors(rq) & 7)) { 486 486 scmd_printk(KERN_ERR, SCpnt, 487 487 "Bad block number requested\n"); 488 488 goto out; ··· 511 511 } 512 512 513 513 SCSI_LOG_HLQUEUE(2, scmd_printk(KERN_INFO, SCpnt, 514 - "%s %d/%ld 512 byte blocks.\n", 514 + "%s %d/%u 512 byte blocks.\n", 515 515 (rq_data_dir(rq) == WRITE) ? 516 516 "writing" : "reading", this_count, 517 - rq->nr_sectors)); 517 + blk_rq_sectors(rq))); 518 518 519 519 /* Set RDPROTECT/WRPROTECT if disk is formatted with DIF */ 520 520 host_dif = scsi_host_dif_capable(sdp->host, sdkp->protection_type); ··· 970 970 971 971 static unsigned int sd_completed_bytes(struct scsi_cmnd *scmd) 972 972 { 973 - u64 start_lba = scmd->request->sector; 974 - u64 end_lba = scmd->request->sector + (scsi_bufflen(scmd) / 512); 973 + u64 start_lba = blk_rq_pos(scmd->request); 974 + u64 end_lba = blk_rq_pos(scmd->request) + (scsi_bufflen(scmd) / 512); 975 975 u64 bad_lba; 976 976 int info_valid; 977 977
+1 -1
drivers/scsi/sd_dif.c
··· 507 507 sector_sz = scmd->device->sector_size; 508 508 sectors = good_bytes / sector_sz; 509 509 510 - phys = scmd->request->sector & 0xffffffff; 510 + phys = blk_rq_pos(scmd->request) & 0xffffffff; 511 511 if (sector_sz == 4096) 512 512 phys >>= 3; 513 513
+8 -7
drivers/scsi/sr.c
··· 292 292 if (cd->device->sector_size == 2048) 293 293 error_sector <<= 2; 294 294 error_sector &= ~(block_sectors - 1); 295 - good_bytes = (error_sector - SCpnt->request->sector) << 9; 295 + good_bytes = (error_sector - 296 + blk_rq_pos(SCpnt->request)) << 9; 296 297 if (good_bytes < 0 || good_bytes >= this_count) 297 298 good_bytes = 0; 298 299 /* ··· 350 349 cd->disk->disk_name, block)); 351 350 352 351 if (!cd->device || !scsi_device_online(cd->device)) { 353 - SCSI_LOG_HLQUEUE(2, printk("Finishing %ld sectors\n", 354 - rq->nr_sectors)); 352 + SCSI_LOG_HLQUEUE(2, printk("Finishing %u sectors\n", 353 + blk_rq_sectors(rq))); 355 354 SCSI_LOG_HLQUEUE(2, printk("Retry with 0x%p\n", SCpnt)); 356 355 goto out; 357 356 } ··· 414 413 /* 415 414 * request doesn't start on hw block boundary, add scatter pads 416 415 */ 417 - if (((unsigned int)rq->sector % (s_size >> 9)) || 416 + if (((unsigned int)blk_rq_pos(rq) % (s_size >> 9)) || 418 417 (scsi_bufflen(SCpnt) % s_size)) { 419 418 scmd_printk(KERN_NOTICE, SCpnt, "unaligned transfer\n"); 420 419 goto out; ··· 423 422 this_count = (scsi_bufflen(SCpnt) >> 9) / (s_size >> 9); 424 423 425 424 426 - SCSI_LOG_HLQUEUE(2, printk("%s : %s %d/%ld 512 byte blocks.\n", 425 + SCSI_LOG_HLQUEUE(2, printk("%s : %s %d/%u 512 byte blocks.\n", 427 426 cd->cdi.name, 428 427 (rq_data_dir(rq) == WRITE) ? 429 428 "writing" : "reading", 430 - this_count, rq->nr_sectors)); 429 + this_count, blk_rq_sectors(rq))); 431 430 432 431 SCpnt->cmnd[1] = 0; 433 - block = (unsigned int)rq->sector / (s_size >> 9); 432 + block = (unsigned int)blk_rq_pos(rq) / (s_size >> 9); 434 433 435 434 if (this_count > 0xffff) { 436 435 this_count = 0xffff;
+12 -10
drivers/scsi/u14-34f.c
··· 1306 1306 if (linked_comm && SCpnt->device->queue_depth > 2 1307 1307 && TLDEV(SCpnt->device->type)) { 1308 1308 HD(j)->cp_stat[i] = READY; 1309 - flush_dev(SCpnt->device, SCpnt->request->sector, j, FALSE); 1309 + flush_dev(SCpnt->device, blk_rq_pos(SCpnt->request), j, FALSE); 1310 1310 return 0; 1311 1311 } 1312 1312 ··· 1610 1610 1611 1611 if (!(cpp->xdir == DTD_IN)) input_only = FALSE; 1612 1612 1613 - if (SCpnt->request->sector < minsec) minsec = SCpnt->request->sector; 1614 - if (SCpnt->request->sector > maxsec) maxsec = SCpnt->request->sector; 1613 + if (blk_rq_pos(SCpnt->request) < minsec) 1614 + minsec = blk_rq_pos(SCpnt->request); 1615 + if (blk_rq_pos(SCpnt->request) > maxsec) 1616 + maxsec = blk_rq_pos(SCpnt->request); 1615 1617 1616 - sl[n] = SCpnt->request->sector; 1617 - ioseek += SCpnt->request->nr_sectors; 1618 + sl[n] = blk_rq_pos(SCpnt->request); 1619 + ioseek += blk_rq_sectors(SCpnt->request); 1618 1620 1619 1621 if (!n) continue; 1620 1622 ··· 1644 1642 1645 1643 if (!input_only) for (n = 0; n < n_ready; n++) { 1646 1644 k = il[n]; cpp = &HD(j)->cp[k]; SCpnt = cpp->SCpnt; 1647 - ll[n] = SCpnt->request->nr_sectors; pl[n] = SCpnt->serial_number; 1645 + ll[n] = blk_rq_sectors(SCpnt->request); pl[n] = SCpnt->serial_number; 1648 1646 1649 1647 if (!n) continue; 1650 1648 ··· 1668 1666 if (link_statistics && (overlap || !(flushcount % link_statistics))) 1669 1667 for (n = 0; n < n_ready; n++) { 1670 1668 k = il[n]; cpp = &HD(j)->cp[k]; SCpnt = cpp->SCpnt; 1671 - printk("%s %d.%d:%d pid %ld mb %d fc %d nr %d sec %ld ns %ld"\ 1669 + printk("%s %d.%d:%d pid %ld mb %d fc %d nr %d sec %ld ns %u"\ 1672 1670 " cur %ld s:%c r:%c rev:%c in:%c ov:%c xd %d.\n", 1673 1671 (ihdlr ? "ihdlr" : "qcomm"), SCpnt->channel, SCpnt->target, 1674 1672 SCpnt->lun, SCpnt->serial_number, k, flushcount, n_ready, 1675 - SCpnt->request->sector, SCpnt->request->nr_sectors, cursec, 1676 - YESNO(s), YESNO(r), YESNO(rev), YESNO(input_only), 1673 + blk_rq_pos(SCpnt->request), blk_rq_sectors(SCpnt->request), 1674 + cursec, YESNO(s), YESNO(r), YESNO(rev), YESNO(input_only), 1677 1675 YESNO(overlap), cpp->xdir); 1678 1676 } 1679 1677 #endif ··· 1801 1799 1802 1800 if (linked_comm && SCpnt->device->queue_depth > 2 1803 1801 && TLDEV(SCpnt->device->type)) 1804 - flush_dev(SCpnt->device, SCpnt->request->sector, j, TRUE); 1802 + flush_dev(SCpnt->device, blk_rq_pos(SCpnt->request), j, TRUE); 1805 1803 1806 1804 tstatus = status_byte(spp->target_status); 1807 1805
+1 -1
include/scsi/scsi_cmnd.h
··· 270 270 271 271 static inline sector_t scsi_get_lba(struct scsi_cmnd *scmd) 272 272 { 273 - return scmd->request->sector; 273 + return blk_rq_pos(scmd->request); 274 274 } 275 275 276 276 static inline unsigned scsi_prot_sg_count(struct scsi_cmnd *cmd)