Merge branch 'for-2.6.36' of git://git.kernel.dk/linux-2.6-block

* 'for-2.6.36' of git://git.kernel.dk/linux-2.6-block: (149 commits)
block: make sure that REQ_* types are seen even with CONFIG_BLOCK=n
xen-blkfront: fix missing out label
blkdev: fix blkdev_issue_zeroout return value
block: update request stacking methods to support discards
block: fix missing export of blk_types.h
writeback: fix bad _bh spinlock nesting
drbd: revert "delay probes", feature is being re-implemented differently
drbd: Initialize all members of sync_conf to their defaults [Bugz 315]
drbd: Disable delay probes for the upcomming release
writeback: cleanup bdi_register
writeback: add new tracepoints
writeback: remove unnecessary init_timer call
writeback: optimize periodic bdi thread wakeups
writeback: prevent unnecessary bdi threads wakeups
writeback: move bdi threads exiting logic to the forker thread
writeback: restructure bdi forker loop a little
writeback: move last_active to bdi
writeback: do not remove bdi from bdi_list
writeback: simplify bdi code a little
writeback: do not lose wake-ups in bdi threads
...

Fixed up pretty trivial conflicts in drivers/block/virtio_blk.c and
drivers/scsi/scsi_error.c as per Jens.

+4312 -3237
-2
arch/alpha/include/asm/scatterlist.h
··· 3 4 #include <asm-generic/scatterlist.h> 5 6 - #define ISA_DMA_THRESHOLD (~0UL) 7 - 8 #endif /* !(_ALPHA_SCATTERLIST_H) */
··· 3 4 #include <asm-generic/scatterlist.h> 5 6 #endif /* !(_ALPHA_SCATTERLIST_H) */
-2
arch/avr32/include/asm/scatterlist.h
··· 3 4 #include <asm-generic/scatterlist.h> 5 6 - #define ISA_DMA_THRESHOLD (0xffffffff) 7 - 8 #endif /* __ASM_AVR32_SCATTERLIST_H */
··· 3 4 #include <asm-generic/scatterlist.h> 5 6 #endif /* __ASM_AVR32_SCATTERLIST_H */
-2
arch/blackfin/include/asm/scatterlist.h
··· 3 4 #include <asm-generic/scatterlist.h> 5 6 - #define ISA_DMA_THRESHOLD (0xffffffff) 7 - 8 #endif /* !(_BLACKFIN_SCATTERLIST_H) */
··· 3 4 #include <asm-generic/scatterlist.h> 5 6 #endif /* !(_BLACKFIN_SCATTERLIST_H) */
-2
arch/cris/include/asm/scatterlist.h
··· 3 4 #include <asm-generic/scatterlist.h> 5 6 - #define ISA_DMA_THRESHOLD (0x1fffffff) 7 - 8 #endif /* !(__ASM_CRIS_SCATTERLIST_H) */
··· 3 4 #include <asm-generic/scatterlist.h> 5 6 #endif /* !(__ASM_CRIS_SCATTERLIST_H) */
-2
arch/frv/include/asm/scatterlist.h
··· 3 4 #include <asm-generic/scatterlist.h> 5 6 - #define ISA_DMA_THRESHOLD (0xffffffffUL) 7 - 8 #endif /* !_ASM_SCATTERLIST_H */
··· 3 4 #include <asm-generic/scatterlist.h> 5 6 #endif /* !_ASM_SCATTERLIST_H */
-2
arch/h8300/include/asm/scatterlist.h
··· 3 4 #include <asm-generic/scatterlist.h> 5 6 - #define ISA_DMA_THRESHOLD (0xffffffff) 7 - 8 #endif /* !(_H8300_SCATTERLIST_H) */
··· 3 4 #include <asm-generic/scatterlist.h> 5 6 #endif /* !(_H8300_SCATTERLIST_H) */
-9
arch/ia64/include/asm/scatterlist.h
··· 2 #define _ASM_IA64_SCATTERLIST_H 3 4 #include <asm-generic/scatterlist.h> 5 - /* 6 - * It used to be that ISA_DMA_THRESHOLD had something to do with the 7 - * DMA-limits of ISA-devices. Nowadays, its only remaining use (apart 8 - * from the aha1542.c driver, which isn't 64-bit clean anyhow) is to 9 - * tell the block-layer (via BLK_BOUNCE_ISA) what the max. physical 10 - * address of a page is that is allocated with GFP_DMA. On IA-64, 11 - * that's 4GB - 1. 12 - */ 13 - #define ISA_DMA_THRESHOLD 0xffffffff 14 #define ARCH_HAS_SG_CHAIN 15 16 #endif /* _ASM_IA64_SCATTERLIST_H */
··· 2 #define _ASM_IA64_SCATTERLIST_H 3 4 #include <asm-generic/scatterlist.h> 5 #define ARCH_HAS_SG_CHAIN 6 7 #endif /* _ASM_IA64_SCATTERLIST_H */
-2
arch/m32r/include/asm/scatterlist.h
··· 3 4 #include <asm-generic/scatterlist.h> 5 6 - #define ISA_DMA_THRESHOLD (0x1fffffff) 7 - 8 #endif /* _ASM_M32R_SCATTERLIST_H */
··· 3 4 #include <asm-generic/scatterlist.h> 5 6 #endif /* _ASM_M32R_SCATTERLIST_H */
-3
arch/m68k/include/asm/scatterlist.h
··· 3 4 #include <asm-generic/scatterlist.h> 5 6 - /* This is bogus and should go away. */ 7 - #define ISA_DMA_THRESHOLD (0x00ffffff) 8 - 9 #endif /* !(_M68K_SCATTERLIST_H) */
··· 3 4 #include <asm-generic/scatterlist.h> 5 6 #endif /* !(_M68K_SCATTERLIST_H) */
-2
arch/microblaze/include/asm/scatterlist.h
··· 1 #include <asm-generic/scatterlist.h> 2 - 3 - #define ISA_DMA_THRESHOLD (~0UL)
··· 1 #include <asm-generic/scatterlist.h>
-2
arch/mips/include/asm/scatterlist.h
··· 3 4 #include <asm-generic/scatterlist.h> 5 6 - #define ISA_DMA_THRESHOLD (0x00ffffffUL) 7 - 8 #endif /* __ASM_SCATTERLIST_H */
··· 3 4 #include <asm-generic/scatterlist.h> 5 6 #endif /* __ASM_SCATTERLIST_H */
-2
arch/mn10300/include/asm/scatterlist.h
··· 13 14 #include <asm-generic/scatterlist.h> 15 16 - #define ISA_DMA_THRESHOLD (0x00ffffff) 17 - 18 #endif /* _ASM_SCATTERLIST_H */
··· 13 14 #include <asm-generic/scatterlist.h> 15 16 #endif /* _ASM_SCATTERLIST_H */
-1
arch/parisc/include/asm/scatterlist.h
··· 5 #include <asm/types.h> 6 #include <asm-generic/scatterlist.h> 7 8 - #define ISA_DMA_THRESHOLD (~0UL) 9 #define sg_virt_addr(sg) ((unsigned long)sg_virt(sg)) 10 11 #endif /* _ASM_PARISC_SCATTERLIST_H */
··· 5 #include <asm/types.h> 6 #include <asm-generic/scatterlist.h> 7 8 #define sg_virt_addr(sg) ((unsigned long)sg_virt(sg)) 9 10 #endif /* _ASM_PARISC_SCATTERLIST_H */
-3
arch/powerpc/include/asm/scatterlist.h
··· 12 #include <asm/dma.h> 13 #include <asm-generic/scatterlist.h> 14 15 - #ifdef __powerpc64__ 16 - #define ISA_DMA_THRESHOLD (~0UL) 17 - #endif 18 #define ARCH_HAS_SG_CHAIN 19 20 #endif /* _ASM_POWERPC_SCATTERLIST_H */
··· 12 #include <asm/dma.h> 13 #include <asm-generic/scatterlist.h> 14 15 #define ARCH_HAS_SG_CHAIN 16 17 #endif /* _ASM_POWERPC_SCATTERLIST_H */
-2
arch/s390/include/asm/scatterlist.h
··· 1 - #define ISA_DMA_THRESHOLD (~0UL) 2 - 3 #include <asm-generic/scatterlist.h>
··· 1 #include <asm-generic/scatterlist.h>
-2
arch/score/include/asm/scatterlist.h
··· 1 #ifndef _ASM_SCORE_SCATTERLIST_H 2 #define _ASM_SCORE_SCATTERLIST_H 3 4 - #define ISA_DMA_THRESHOLD (~0UL) 5 - 6 #include <asm-generic/scatterlist.h> 7 8 #endif /* _ASM_SCORE_SCATTERLIST_H */
··· 1 #ifndef _ASM_SCORE_SCATTERLIST_H 2 #define _ASM_SCORE_SCATTERLIST_H 3 4 #include <asm-generic/scatterlist.h> 5 6 #endif /* _ASM_SCORE_SCATTERLIST_H */
-2
arch/sh/include/asm/scatterlist.h
··· 1 #ifndef __ASM_SH_SCATTERLIST_H 2 #define __ASM_SH_SCATTERLIST_H 3 4 - #define ISA_DMA_THRESHOLD phys_addr_mask() 5 - 6 #include <asm-generic/scatterlist.h> 7 8 #endif /* __ASM_SH_SCATTERLIST_H */
··· 1 #ifndef __ASM_SH_SCATTERLIST_H 2 #define __ASM_SH_SCATTERLIST_H 3 4 #include <asm-generic/scatterlist.h> 5 6 #endif /* __ASM_SH_SCATTERLIST_H */
-1
arch/sparc/include/asm/scatterlist.h
··· 3 4 #include <asm-generic/scatterlist.h> 5 6 - #define ISA_DMA_THRESHOLD (~0UL) 7 #define ARCH_HAS_SG_CHAIN 8 9 #endif /* !(_SPARC_SCATTERLIST_H) */
··· 3 4 #include <asm-generic/scatterlist.h> 5 6 #define ARCH_HAS_SG_CHAIN 7 8 #endif /* !(_SPARC_SCATTERLIST_H) */
+6 -1
arch/um/drivers/ubd_kern.c
··· 33 #include "linux/mm.h" 34 #include "linux/slab.h" 35 #include "linux/vmalloc.h" 36 #include "linux/blkpg.h" 37 #include "linux/genhd.h" 38 #include "linux/spinlock.h" ··· 1099 struct ubd *ubd_dev = disk->private_data; 1100 int err = 0; 1101 1102 if(ubd_dev->count == 0){ 1103 err = ubd_open_dev(ubd_dev); 1104 if(err){ ··· 1117 if(--ubd_dev->count == 0) ubd_close_dev(ubd_dev); 1118 err = -EROFS; 1119 }*/ 1120 - out: 1121 return err; 1122 } 1123 ··· 1126 { 1127 struct ubd *ubd_dev = disk->private_data; 1128 1129 if(--ubd_dev->count == 0) 1130 ubd_close_dev(ubd_dev); 1131 return 0; 1132 } 1133
··· 33 #include "linux/mm.h" 34 #include "linux/slab.h" 35 #include "linux/vmalloc.h" 36 + #include "linux/smp_lock.h" 37 #include "linux/blkpg.h" 38 #include "linux/genhd.h" 39 #include "linux/spinlock.h" ··· 1098 struct ubd *ubd_dev = disk->private_data; 1099 int err = 0; 1100 1101 + lock_kernel(); 1102 if(ubd_dev->count == 0){ 1103 err = ubd_open_dev(ubd_dev); 1104 if(err){ ··· 1115 if(--ubd_dev->count == 0) ubd_close_dev(ubd_dev); 1116 err = -EROFS; 1117 }*/ 1118 + out: 1119 + unlock_kernel(); 1120 return err; 1121 } 1122 ··· 1123 { 1124 struct ubd *ubd_dev = disk->private_data; 1125 1126 + lock_kernel(); 1127 if(--ubd_dev->count == 0) 1128 ubd_close_dev(ubd_dev); 1129 + unlock_kernel(); 1130 return 0; 1131 } 1132
-1
arch/x86/include/asm/scatterlist.h
··· 3 4 #include <asm-generic/scatterlist.h> 5 6 - #define ISA_DMA_THRESHOLD (0x00ffffff) 7 #define ARCH_HAS_SG_CHAIN 8 9 #endif /* _ASM_X86_SCATTERLIST_H */
··· 3 4 #include <asm-generic/scatterlist.h> 5 6 #define ARCH_HAS_SG_CHAIN 7 8 #endif /* _ASM_X86_SCATTERLIST_H */
-2
arch/xtensa/include/asm/scatterlist.h
··· 13 14 #include <asm-generic/scatterlist.h> 15 16 - #define ISA_DMA_THRESHOLD (~0UL) 17 - 18 #endif /* _XTENSA_SCATTERLIST_H */
··· 13 14 #include <asm-generic/scatterlist.h> 15 16 #endif /* _XTENSA_SCATTERLIST_H */
+18 -17
block/blk-barrier.c
··· 13 * blk_queue_ordered - does this queue support ordered writes 14 * @q: the request queue 15 * @ordered: one of QUEUE_ORDERED_* 16 - * @prepare_flush_fn: rq setup helper for cache flush ordered writes 17 * 18 * Description: 19 * For journalled file systems, doing ordered writes on a commit ··· 21 * feature should call this function and indicate so. 22 * 23 **/ 24 - int blk_queue_ordered(struct request_queue *q, unsigned ordered, 25 - prepare_flush_fn *prepare_flush_fn) 26 { 27 - if (!prepare_flush_fn && (ordered & (QUEUE_ORDERED_DO_PREFLUSH | 28 - QUEUE_ORDERED_DO_POSTFLUSH))) { 29 - printk(KERN_ERR "%s: prepare_flush_fn required\n", __func__); 30 - return -EINVAL; 31 - } 32 - 33 if (ordered != QUEUE_ORDERED_NONE && 34 ordered != QUEUE_ORDERED_DRAIN && 35 ordered != QUEUE_ORDERED_DRAIN_FLUSH && ··· 36 37 q->ordered = ordered; 38 q->next_ordered = ordered; 39 - q->prepare_flush_fn = prepare_flush_fn; 40 41 return 0; 42 } ··· 70 * 71 * http://thread.gmane.org/gmane.linux.kernel/537473 72 */ 73 - if (!blk_fs_request(rq)) 74 return QUEUE_ORDSEQ_DRAIN; 75 76 if ((rq->cmd_flags & REQ_ORDERED_COLOR) == ··· 134 } 135 136 blk_rq_init(q, rq); 137 - rq->cmd_flags = REQ_HARDBARRIER; 138 - rq->rq_disk = q->bar_rq.rq_disk; 139 rq->end_io = end_io; 140 - q->prepare_flush_fn(q, rq); 141 142 elv_insert(q, rq, ELEVATOR_INSERT_FRONT); 143 } ··· 194 /* initialize proxy request and queue it */ 195 blk_rq_init(q, rq); 196 if (bio_data_dir(q->orig_bar_rq->bio) == WRITE) 197 - rq->cmd_flags |= REQ_RW; 198 if (q->ordered & QUEUE_ORDERED_DO_FUA) 199 rq->cmd_flags |= REQ_FUA; 200 init_request_from_bio(rq, q->orig_bar_rq->bio); ··· 227 bool blk_do_ordered(struct request_queue *q, struct request **rqp) 228 { 229 struct request *rq = *rqp; 230 - const int is_barrier = blk_fs_request(rq) && blk_barrier_rq(rq); 231 232 if (!q->ordseq) { 233 if (!is_barrier) ··· 253 */ 254 255 /* Special requests are not subject to ordering rules. */ 256 - if (!blk_fs_request(rq) && 257 rq != &q->pre_flush_rq && rq != &q->post_flush_rq) 258 return true; 259 ··· 309 310 q = bdev_get_queue(bdev); 311 if (!q) 312 return -ENXIO; 313 314 bio = bio_alloc(gfp_mask, 0);
··· 13 * blk_queue_ordered - does this queue support ordered writes 14 * @q: the request queue 15 * @ordered: one of QUEUE_ORDERED_* 16 * 17 * Description: 18 * For journalled file systems, doing ordered writes on a commit ··· 22 * feature should call this function and indicate so. 23 * 24 **/ 25 + int blk_queue_ordered(struct request_queue *q, unsigned ordered) 26 { 27 if (ordered != QUEUE_ORDERED_NONE && 28 ordered != QUEUE_ORDERED_DRAIN && 29 ordered != QUEUE_ORDERED_DRAIN_FLUSH && ··· 44 45 q->ordered = ordered; 46 q->next_ordered = ordered; 47 48 return 0; 49 } ··· 79 * 80 * http://thread.gmane.org/gmane.linux.kernel/537473 81 */ 82 + if (rq->cmd_type != REQ_TYPE_FS) 83 return QUEUE_ORDSEQ_DRAIN; 84 85 if ((rq->cmd_flags & REQ_ORDERED_COLOR) == ··· 143 } 144 145 blk_rq_init(q, rq); 146 + rq->cmd_type = REQ_TYPE_FS; 147 + rq->cmd_flags = REQ_HARDBARRIER | REQ_FLUSH; 148 + rq->rq_disk = q->orig_bar_rq->rq_disk; 149 rq->end_io = end_io; 150 151 elv_insert(q, rq, ELEVATOR_INSERT_FRONT); 152 } ··· 203 /* initialize proxy request and queue it */ 204 blk_rq_init(q, rq); 205 if (bio_data_dir(q->orig_bar_rq->bio) == WRITE) 206 + rq->cmd_flags |= REQ_WRITE; 207 if (q->ordered & QUEUE_ORDERED_DO_FUA) 208 rq->cmd_flags |= REQ_FUA; 209 init_request_from_bio(rq, q->orig_bar_rq->bio); ··· 236 bool blk_do_ordered(struct request_queue *q, struct request **rqp) 237 { 238 struct request *rq = *rqp; 239 + const int is_barrier = rq->cmd_type == REQ_TYPE_FS && 240 + (rq->cmd_flags & REQ_HARDBARRIER); 241 242 if (!q->ordseq) { 243 if (!is_barrier) ··· 261 */ 262 263 /* Special requests are not subject to ordering rules. */ 264 + if (rq->cmd_type != REQ_TYPE_FS && 265 rq != &q->pre_flush_rq && rq != &q->post_flush_rq) 266 return true; 267 ··· 317 318 q = bdev_get_queue(bdev); 319 if (!q) 320 + return -ENXIO; 321 + 322 + /* 323 + * some block devices may not have their queue correctly set up here 324 + * (e.g. loop device without a backing file) and so issuing a flush 325 + * here will panic. Ensure there is a request function before issuing 326 + * the barrier. 327 + */ 328 + if (!q->make_request_fn) 329 return -ENXIO; 330 331 bio = bio_alloc(gfp_mask, 0);
+82 -35
block/blk-core.c
··· 184 printk(KERN_INFO " bio %p, biotail %p, buffer %p, len %u\n", 185 rq->bio, rq->biotail, rq->buffer, blk_rq_bytes(rq)); 186 187 - if (blk_pc_request(rq)) { 188 printk(KERN_INFO " cdb: "); 189 for (bit = 0; bit < BLK_MAX_CDB; bit++) 190 printk("%02x ", rq->cmd[bit]); ··· 608 609 q->request_fn = rfn; 610 q->prep_rq_fn = NULL; 611 q->unplug_fn = generic_unplug_device; 612 q->queue_flags = QUEUE_FLAG_DEFAULT; 613 q->queue_lock = lock; ··· 1136 } 1137 EXPORT_SYMBOL(blk_put_request); 1138 1139 void init_request_from_bio(struct request *req, struct bio *bio) 1140 { 1141 req->cpu = bio->bi_comp_cpu; 1142 req->cmd_type = REQ_TYPE_FS; 1143 1144 - /* 1145 - * Inherit FAILFAST from bio (for read-ahead, and explicit 1146 - * FAILFAST). FAILFAST flags are identical for req and bio. 1147 - */ 1148 - if (bio_rw_flagged(bio, BIO_RW_AHEAD)) 1149 req->cmd_flags |= REQ_FAILFAST_MASK; 1150 - else 1151 - req->cmd_flags |= bio->bi_rw & REQ_FAILFAST_MASK; 1152 - 1153 - if (bio_rw_flagged(bio, BIO_RW_DISCARD)) 1154 - req->cmd_flags |= REQ_DISCARD; 1155 - if (bio_rw_flagged(bio, BIO_RW_BARRIER)) 1156 - req->cmd_flags |= REQ_HARDBARRIER; 1157 - if (bio_rw_flagged(bio, BIO_RW_SYNCIO)) 1158 - req->cmd_flags |= REQ_RW_SYNC; 1159 - if (bio_rw_flagged(bio, BIO_RW_META)) 1160 - req->cmd_flags |= REQ_RW_META; 1161 - if (bio_rw_flagged(bio, BIO_RW_NOIDLE)) 1162 - req->cmd_flags |= REQ_NOIDLE; 1163 1164 req->errors = 0; 1165 req->__sector = bio->bi_sector; ··· 1198 int el_ret; 1199 unsigned int bytes = bio->bi_size; 1200 const unsigned short prio = bio_prio(bio); 1201 - const bool sync = bio_rw_flagged(bio, BIO_RW_SYNCIO); 1202 - const bool unplug = bio_rw_flagged(bio, BIO_RW_UNPLUG); 1203 const unsigned int ff = bio->bi_rw & REQ_FAILFAST_MASK; 1204 int rw_flags; 1205 1206 - if (bio_rw_flagged(bio, BIO_RW_BARRIER) && 1207 (q->next_ordered == QUEUE_ORDERED_NONE)) { 1208 bio_endio(bio, -EOPNOTSUPP); 1209 return 0; ··· 1217 1218 spin_lock_irq(q->queue_lock); 1219 1220 - if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER)) || elv_queue_empty(q)) 1221 goto get_rq; 1222 1223 el_ret = elv_merge(q, &req, bio); ··· 1292 */ 1293 rw_flags = bio_data_dir(bio); 1294 if (sync) 1295 - rw_flags |= REQ_RW_SYNC; 1296 1297 /* 1298 * Grab a free request. This is might sleep but can not fail. ··· 1481 goto end_io; 1482 } 1483 1484 - if (unlikely(!bio_rw_flagged(bio, BIO_RW_DISCARD) && 1485 nr_sectors > queue_max_hw_sectors(q))) { 1486 printk(KERN_ERR "bio too big device %s (%u > %u)\n", 1487 bdevname(bio->bi_bdev, b), ··· 1514 if (bio_check_eod(bio, nr_sectors)) 1515 goto end_io; 1516 1517 - if (bio_rw_flagged(bio, BIO_RW_DISCARD) && 1518 - !blk_queue_discard(q)) { 1519 err = -EOPNOTSUPP; 1520 goto end_io; 1521 } ··· 1599 * If it's a regular read/write or a barrier with data attached, 1600 * go through the normal accounting stuff before submission. 1601 */ 1602 - if (bio_has_data(bio) && !(rw & (1 << BIO_RW_DISCARD))) { 1603 if (rw & WRITE) { 1604 count_vm_events(PGPGOUT, count); 1605 } else { ··· 1644 */ 1645 int blk_rq_check_limits(struct request_queue *q, struct request *rq) 1646 { 1647 if (blk_rq_sectors(rq) > queue_max_sectors(q) || 1648 blk_rq_bytes(rq) > queue_max_hw_sectors(q) << 9) { 1649 printk(KERN_ERR "%s: over max size limit.\n", __func__); ··· 1815 * sees this request (possibly after 1816 * requeueing). Notify IO scheduler. 1817 */ 1818 - if (blk_sorted_rq(rq)) 1819 elv_activate_rq(q, rq); 1820 1821 /* ··· 2003 * TODO: tj: This is too subtle. It would be better to let 2004 * low level drivers do what they see fit. 2005 */ 2006 - if (blk_fs_request(req)) 2007 req->errors = 0; 2008 2009 - if (error && (blk_fs_request(req) && !(req->cmd_flags & REQ_QUIET))) { 2010 printk(KERN_ERR "end_request: I/O error, dev %s, sector %llu\n", 2011 req->rq_disk ? req->rq_disk->disk_name : "?", 2012 (unsigned long long)blk_rq_pos(req)); ··· 2094 req->buffer = bio_data(req->bio); 2095 2096 /* update sector only for requests with clear definition of sector */ 2097 - if (blk_fs_request(req) || blk_discard_rq(req)) 2098 req->__sector += total_bytes >> 9; 2099 2100 /* mixed attributes always follow the first bio */ ··· 2131 blk_update_request(rq->next_rq, error, bidi_bytes)) 2132 return true; 2133 2134 - add_disk_randomness(rq->rq_disk); 2135 2136 return false; 2137 } 2138 2139 /* 2140 * queue lock must be held ··· 2167 2168 BUG_ON(blk_queued_rq(req)); 2169 2170 - if (unlikely(laptop_mode) && blk_fs_request(req)) 2171 laptop_io_completion(&req->q->backing_dev_info); 2172 2173 blk_delete_timer(req); 2174 2175 blk_account_io_done(req); 2176 ··· 2408 struct bio *bio) 2409 { 2410 /* Bit 0 (R/W) is identical in rq->cmd_flags and bio->bi_rw */ 2411 - rq->cmd_flags |= bio->bi_rw & REQ_RW; 2412 2413 if (bio_has_data(bio)) { 2414 rq->nr_phys_segments = bio_phys_segments(q, bio); ··· 2495 { 2496 dst->cpu = src->cpu; 2497 dst->cmd_flags = (rq_data_dir(src) | REQ_NOMERGE); 2498 dst->cmd_type = src->cmd_type; 2499 dst->__sector = blk_rq_pos(src); 2500 dst->__data_len = blk_rq_bytes(src);
··· 184 printk(KERN_INFO " bio %p, biotail %p, buffer %p, len %u\n", 185 rq->bio, rq->biotail, rq->buffer, blk_rq_bytes(rq)); 186 187 + if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { 188 printk(KERN_INFO " cdb: "); 189 for (bit = 0; bit < BLK_MAX_CDB; bit++) 190 printk("%02x ", rq->cmd[bit]); ··· 608 609 q->request_fn = rfn; 610 q->prep_rq_fn = NULL; 611 + q->unprep_rq_fn = NULL; 612 q->unplug_fn = generic_unplug_device; 613 q->queue_flags = QUEUE_FLAG_DEFAULT; 614 q->queue_lock = lock; ··· 1135 } 1136 EXPORT_SYMBOL(blk_put_request); 1137 1138 + /** 1139 + * blk_add_request_payload - add a payload to a request 1140 + * @rq: request to update 1141 + * @page: page backing the payload 1142 + * @len: length of the payload. 1143 + * 1144 + * This allows to later add a payload to an already submitted request by 1145 + * a block driver. The driver needs to take care of freeing the payload 1146 + * itself. 1147 + * 1148 + * Note that this is a quite horrible hack and nothing but handling of 1149 + * discard requests should ever use it. 1150 + */ 1151 + void blk_add_request_payload(struct request *rq, struct page *page, 1152 + unsigned int len) 1153 + { 1154 + struct bio *bio = rq->bio; 1155 + 1156 + bio->bi_io_vec->bv_page = page; 1157 + bio->bi_io_vec->bv_offset = 0; 1158 + bio->bi_io_vec->bv_len = len; 1159 + 1160 + bio->bi_size = len; 1161 + bio->bi_vcnt = 1; 1162 + bio->bi_phys_segments = 1; 1163 + 1164 + rq->__data_len = rq->resid_len = len; 1165 + rq->nr_phys_segments = 1; 1166 + rq->buffer = bio_data(bio); 1167 + } 1168 + EXPORT_SYMBOL_GPL(blk_add_request_payload); 1169 + 1170 void init_request_from_bio(struct request *req, struct bio *bio) 1171 { 1172 req->cpu = bio->bi_comp_cpu; 1173 req->cmd_type = REQ_TYPE_FS; 1174 1175 + req->cmd_flags |= bio->bi_rw & REQ_COMMON_MASK; 1176 + if (bio->bi_rw & REQ_RAHEAD) 1177 req->cmd_flags |= REQ_FAILFAST_MASK; 1178 1179 req->errors = 0; 1180 req->__sector = bio->bi_sector; ··· 1181 int el_ret; 1182 unsigned int bytes = bio->bi_size; 1183 const unsigned short prio = bio_prio(bio); 1184 + const bool sync = (bio->bi_rw & REQ_SYNC); 1185 + const bool unplug = (bio->bi_rw & REQ_UNPLUG); 1186 const unsigned int ff = bio->bi_rw & REQ_FAILFAST_MASK; 1187 int rw_flags; 1188 1189 + if ((bio->bi_rw & REQ_HARDBARRIER) && 1190 (q->next_ordered == QUEUE_ORDERED_NONE)) { 1191 bio_endio(bio, -EOPNOTSUPP); 1192 return 0; ··· 1200 1201 spin_lock_irq(q->queue_lock); 1202 1203 + if (unlikely((bio->bi_rw & REQ_HARDBARRIER)) || elv_queue_empty(q)) 1204 goto get_rq; 1205 1206 el_ret = elv_merge(q, &req, bio); ··· 1275 */ 1276 rw_flags = bio_data_dir(bio); 1277 if (sync) 1278 + rw_flags |= REQ_SYNC; 1279 1280 /* 1281 * Grab a free request. This is might sleep but can not fail. ··· 1464 goto end_io; 1465 } 1466 1467 + if (unlikely(!(bio->bi_rw & REQ_DISCARD) && 1468 nr_sectors > queue_max_hw_sectors(q))) { 1469 printk(KERN_ERR "bio too big device %s (%u > %u)\n", 1470 bdevname(bio->bi_bdev, b), ··· 1497 if (bio_check_eod(bio, nr_sectors)) 1498 goto end_io; 1499 1500 + if ((bio->bi_rw & REQ_DISCARD) && !blk_queue_discard(q)) { 1501 err = -EOPNOTSUPP; 1502 goto end_io; 1503 } ··· 1583 * If it's a regular read/write or a barrier with data attached, 1584 * go through the normal accounting stuff before submission. 1585 */ 1586 + if (bio_has_data(bio) && !(rw & REQ_DISCARD)) { 1587 if (rw & WRITE) { 1588 count_vm_events(PGPGOUT, count); 1589 } else { ··· 1628 */ 1629 int blk_rq_check_limits(struct request_queue *q, struct request *rq) 1630 { 1631 + if (rq->cmd_flags & REQ_DISCARD) 1632 + return 0; 1633 + 1634 if (blk_rq_sectors(rq) > queue_max_sectors(q) || 1635 blk_rq_bytes(rq) > queue_max_hw_sectors(q) << 9) { 1636 printk(KERN_ERR "%s: over max size limit.\n", __func__); ··· 1796 * sees this request (possibly after 1797 * requeueing). Notify IO scheduler. 1798 */ 1799 + if (rq->cmd_flags & REQ_SORTED) 1800 elv_activate_rq(q, rq); 1801 1802 /* ··· 1984 * TODO: tj: This is too subtle. It would be better to let 1985 * low level drivers do what they see fit. 1986 */ 1987 + if (req->cmd_type == REQ_TYPE_FS) 1988 req->errors = 0; 1989 1990 + if (error && req->cmd_type == REQ_TYPE_FS && 1991 + !(req->cmd_flags & REQ_QUIET)) { 1992 printk(KERN_ERR "end_request: I/O error, dev %s, sector %llu\n", 1993 req->rq_disk ? req->rq_disk->disk_name : "?", 1994 (unsigned long long)blk_rq_pos(req)); ··· 2074 req->buffer = bio_data(req->bio); 2075 2076 /* update sector only for requests with clear definition of sector */ 2077 + if (req->cmd_type == REQ_TYPE_FS || (req->cmd_flags & REQ_DISCARD)) 2078 req->__sector += total_bytes >> 9; 2079 2080 /* mixed attributes always follow the first bio */ ··· 2111 blk_update_request(rq->next_rq, error, bidi_bytes)) 2112 return true; 2113 2114 + if (blk_queue_add_random(rq->q)) 2115 + add_disk_randomness(rq->rq_disk); 2116 2117 return false; 2118 } 2119 + 2120 + /** 2121 + * blk_unprep_request - unprepare a request 2122 + * @req: the request 2123 + * 2124 + * This function makes a request ready for complete resubmission (or 2125 + * completion). It happens only after all error handling is complete, 2126 + * so represents the appropriate moment to deallocate any resources 2127 + * that were allocated to the request in the prep_rq_fn. The queue 2128 + * lock is held when calling this. 2129 + */ 2130 + void blk_unprep_request(struct request *req) 2131 + { 2132 + struct request_queue *q = req->q; 2133 + 2134 + req->cmd_flags &= ~REQ_DONTPREP; 2135 + if (q->unprep_rq_fn) 2136 + q->unprep_rq_fn(q, req); 2137 + } 2138 + EXPORT_SYMBOL_GPL(blk_unprep_request); 2139 2140 /* 2141 * queue lock must be held ··· 2126 2127 BUG_ON(blk_queued_rq(req)); 2128 2129 + if (unlikely(laptop_mode) && req->cmd_type == REQ_TYPE_FS) 2130 laptop_io_completion(&req->q->backing_dev_info); 2131 2132 blk_delete_timer(req); 2133 + 2134 + if (req->cmd_flags & REQ_DONTPREP) 2135 + blk_unprep_request(req); 2136 + 2137 2138 blk_account_io_done(req); 2139 ··· 2363 struct bio *bio) 2364 { 2365 /* Bit 0 (R/W) is identical in rq->cmd_flags and bio->bi_rw */ 2366 + rq->cmd_flags |= bio->bi_rw & REQ_WRITE; 2367 2368 if (bio_has_data(bio)) { 2369 rq->nr_phys_segments = bio_phys_segments(q, bio); ··· 2450 { 2451 dst->cpu = src->cpu; 2452 dst->cmd_flags = (rq_data_dir(src) | REQ_NOMERGE); 2453 + if (src->cmd_flags & REQ_DISCARD) 2454 + dst->cmd_flags |= REQ_DISCARD; 2455 dst->cmd_type = src->cmd_type; 2456 dst->__sector = blk_rq_pos(src); 2457 dst->__data_len = blk_rq_bytes(src);
+1 -1
block/blk-exec.c
··· 57 __elv_add_request(q, rq, where, 1); 58 __generic_unplug_device(q); 59 /* the queue is stopped so it won't be plugged+unplugged */ 60 - if (blk_pm_resume_request(rq)) 61 q->request_fn(q); 62 spin_unlock_irq(q->queue_lock); 63 }
··· 57 __elv_add_request(q, rq, where, 1); 58 __generic_unplug_device(q); 59 /* the queue is stopped so it won't be plugged+unplugged */ 60 + if (rq->cmd_type == REQ_TYPE_PM_RESUME) 61 q->request_fn(q); 62 spin_unlock_irq(q->queue_lock); 63 }
+24 -32
block/blk-lib.c
··· 19 20 if (bio->bi_private) 21 complete(bio->bi_private); 22 - __free_page(bio_page(bio)); 23 24 bio_put(bio); 25 } ··· 41 struct request_queue *q = bdev_get_queue(bdev); 42 int type = flags & BLKDEV_IFL_BARRIER ? 43 DISCARD_BARRIER : DISCARD_NOBARRIER; 44 struct bio *bio; 45 - struct page *page; 46 int ret = 0; 47 48 if (!q) ··· 51 if (!blk_queue_discard(q)) 52 return -EOPNOTSUPP; 53 54 - while (nr_sects && !ret) { 55 - unsigned int sector_size = q->limits.logical_block_size; 56 - unsigned int max_discard_sectors = 57 - min(q->limits.max_discard_sectors, UINT_MAX >> 9); 58 59 bio = bio_alloc(gfp_mask, 1); 60 - if (!bio) 61 - goto out; 62 bio->bi_sector = sector; 63 bio->bi_end_io = blkdev_discard_end_io; 64 bio->bi_bdev = bdev; 65 if (flags & BLKDEV_IFL_WAIT) 66 bio->bi_private = &wait; 67 68 - /* 69 - * Add a zeroed one-sector payload as that's what 70 - * our current implementations need. If we'll ever need 71 - * more the interface will need revisiting. 72 - */ 73 - page = alloc_page(gfp_mask | __GFP_ZERO); 74 - if (!page) 75 - goto out_free_bio; 76 - if (bio_add_pc_page(q, bio, page, sector_size, 0) < sector_size) 77 - goto out_free_page; 78 - 79 - /* 80 - * And override the bio size - the way discard works we 81 - * touch many more blocks on disk than the actual payload 82 - * length. 83 - */ 84 if (nr_sects > max_discard_sectors) { 85 bio->bi_size = max_discard_sectors << 9; 86 nr_sects -= max_discard_sectors; ··· 96 ret = -EIO; 97 bio_put(bio); 98 } 99 return ret; 100 - out_free_page: 101 - __free_page(page); 102 - out_free_bio: 103 - bio_put(bio); 104 - out: 105 - return -ENOMEM; 106 } 107 EXPORT_SYMBOL(blkdev_issue_discard); 108 ··· 145 int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, 146 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags) 147 { 148 - int ret = 0; 149 struct bio *bio; 150 struct bio_batch bb; 151 unsigned int sz, issued = 0; ··· 163 return ret; 164 } 165 submit: 166 while (nr_sects != 0) { 167 bio = bio_alloc(gfp_mask, 168 min(nr_sects, (sector_t)BIO_MAX_PAGES)); 169 - if (!bio) 170 break; 171 172 bio->bi_sector = sector; 173 bio->bi_bdev = bdev; ··· 189 if (ret < (sz << 9)) 190 break; 191 } 192 issued++; 193 submit_bio(WRITE, bio); 194 }
··· 19 20 if (bio->bi_private) 21 complete(bio->bi_private); 22 23 bio_put(bio); 24 } ··· 42 struct request_queue *q = bdev_get_queue(bdev); 43 int type = flags & BLKDEV_IFL_BARRIER ? 44 DISCARD_BARRIER : DISCARD_NOBARRIER; 45 + unsigned int max_discard_sectors; 46 struct bio *bio; 47 int ret = 0; 48 49 if (!q) ··· 52 if (!blk_queue_discard(q)) 53 return -EOPNOTSUPP; 54 55 + /* 56 + * Ensure that max_discard_sectors is of the proper 57 + * granularity 58 + */ 59 + max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9); 60 + if (q->limits.discard_granularity) { 61 + unsigned int disc_sects = q->limits.discard_granularity >> 9; 62 63 + max_discard_sectors &= ~(disc_sects - 1); 64 + } 65 + 66 + while (nr_sects && !ret) { 67 bio = bio_alloc(gfp_mask, 1); 68 + if (!bio) { 69 + ret = -ENOMEM; 70 + break; 71 + } 72 + 73 bio->bi_sector = sector; 74 bio->bi_end_io = blkdev_discard_end_io; 75 bio->bi_bdev = bdev; 76 if (flags & BLKDEV_IFL_WAIT) 77 bio->bi_private = &wait; 78 79 if (nr_sects > max_discard_sectors) { 80 bio->bi_size = max_discard_sectors << 9; 81 nr_sects -= max_discard_sectors; ··· 103 ret = -EIO; 104 bio_put(bio); 105 } 106 + 107 return ret; 108 } 109 EXPORT_SYMBOL(blkdev_issue_discard); 110 ··· 157 int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, 158 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags) 159 { 160 + int ret; 161 struct bio *bio; 162 struct bio_batch bb; 163 unsigned int sz, issued = 0; ··· 175 return ret; 176 } 177 submit: 178 + ret = 0; 179 while (nr_sects != 0) { 180 bio = bio_alloc(gfp_mask, 181 min(nr_sects, (sector_t)BIO_MAX_PAGES)); 182 + if (!bio) { 183 + ret = -ENOMEM; 184 break; 185 + } 186 187 bio->bi_sector = sector; 188 bio->bi_bdev = bdev; ··· 198 if (ret < (sz << 9)) 199 break; 200 } 201 + ret = 0; 202 issued++; 203 submit_bio(WRITE, bio); 204 }
+1 -1
block/blk-map.c
··· 307 return PTR_ERR(bio); 308 309 if (rq_data_dir(rq) == WRITE) 310 - bio->bi_rw |= (1 << BIO_RW); 311 312 if (do_copy) 313 rq->cmd_flags |= REQ_COPY_USER;
··· 307 return PTR_ERR(bio); 308 309 if (rq_data_dir(rq) == WRITE) 310 + bio->bi_rw |= (1 << REQ_WRITE); 311 312 if (do_copy) 313 rq->cmd_flags |= REQ_COPY_USER;
+4 -5
block/blk-merge.c
··· 12 static unsigned int __blk_recalc_rq_segments(struct request_queue *q, 13 struct bio *bio) 14 { 15 - unsigned int phys_size; 16 struct bio_vec *bv, *bvprv = NULL; 17 int cluster, i, high, highprv = 1; 18 unsigned int seg_size, nr_phys_segs; ··· 23 fbio = bio; 24 cluster = test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags); 25 seg_size = 0; 26 - phys_size = nr_phys_segs = 0; 27 for_each_bio(bio) { 28 bio_for_each_segment(bv, bio, i) { 29 /* ··· 179 } 180 181 if (q->dma_drain_size && q->dma_drain_needed(rq)) { 182 - if (rq->cmd_flags & REQ_RW) 183 memset(q->dma_drain_buffer, 0, q->dma_drain_size); 184 185 sg->page_link &= ~0x02; ··· 225 { 226 unsigned short max_sectors; 227 228 - if (unlikely(blk_pc_request(req))) 229 max_sectors = queue_max_hw_sectors(q); 230 else 231 max_sectors = queue_max_sectors(q); ··· 249 { 250 unsigned short max_sectors; 251 252 - if (unlikely(blk_pc_request(req))) 253 max_sectors = queue_max_hw_sectors(q); 254 else 255 max_sectors = queue_max_sectors(q);
··· 12 static unsigned int __blk_recalc_rq_segments(struct request_queue *q, 13 struct bio *bio) 14 { 15 struct bio_vec *bv, *bvprv = NULL; 16 int cluster, i, high, highprv = 1; 17 unsigned int seg_size, nr_phys_segs; ··· 24 fbio = bio; 25 cluster = test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags); 26 seg_size = 0; 27 + nr_phys_segs = 0; 28 for_each_bio(bio) { 29 bio_for_each_segment(bv, bio, i) { 30 /* ··· 180 } 181 182 if (q->dma_drain_size && q->dma_drain_needed(rq)) { 183 + if (rq->cmd_flags & REQ_WRITE) 184 memset(q->dma_drain_buffer, 0, q->dma_drain_size); 185 186 sg->page_link &= ~0x02; ··· 226 { 227 unsigned short max_sectors; 228 229 + if (unlikely(req->cmd_type == REQ_TYPE_BLOCK_PC)) 230 max_sectors = queue_max_hw_sectors(q); 231 else 232 max_sectors = queue_max_sectors(q); ··· 250 { 251 unsigned short max_sectors; 252 253 + if (unlikely(req->cmd_type == REQ_TYPE_BLOCK_PC)) 254 max_sectors = queue_max_hw_sectors(q); 255 else 256 max_sectors = queue_max_sectors(q);
+17
block/blk-settings.c
··· 37 EXPORT_SYMBOL(blk_queue_prep_rq); 38 39 /** 40 * blk_queue_merge_bvec - set a merge_bvec function for queue 41 * @q: queue 42 * @mbfn: merge_bvec_fn
··· 37 EXPORT_SYMBOL(blk_queue_prep_rq); 38 39 /** 40 + * blk_queue_unprep_rq - set an unprepare_request function for queue 41 + * @q: queue 42 + * @ufn: unprepare_request function 43 + * 44 + * It's possible for a queue to register an unprepare_request callback 45 + * which is invoked before the request is finally completed. The goal 46 + * of the function is to deallocate any data that was allocated in the 47 + * prepare_request callback. 48 + * 49 + */ 50 + void blk_queue_unprep_rq(struct request_queue *q, unprep_rq_fn *ufn) 51 + { 52 + q->unprep_rq_fn = ufn; 53 + } 54 + EXPORT_SYMBOL(blk_queue_unprep_rq); 55 + 56 + /** 57 * blk_queue_merge_bvec - set a merge_bvec function for queue 58 * @q: queue 59 * @mbfn: merge_bvec_fn
+39 -43
block/blk-sysfs.c
··· 180 return queue_var_show(max_hw_sectors_kb, (page)); 181 } 182 183 - static ssize_t queue_nonrot_show(struct request_queue *q, char *page) 184 - { 185 - return queue_var_show(!blk_queue_nonrot(q), page); 186 } 187 188 - static ssize_t queue_nonrot_store(struct request_queue *q, const char *page, 189 - size_t count) 190 - { 191 - unsigned long nm; 192 - ssize_t ret = queue_var_store(&nm, page, count); 193 - 194 - spin_lock_irq(q->queue_lock); 195 - if (nm) 196 - queue_flag_clear(QUEUE_FLAG_NONROT, q); 197 - else 198 - queue_flag_set(QUEUE_FLAG_NONROT, q); 199 - spin_unlock_irq(q->queue_lock); 200 - 201 - return ret; 202 - } 203 204 static ssize_t queue_nomerges_show(struct request_queue *q, char *page) 205 { ··· 257 queue_flag_clear(QUEUE_FLAG_SAME_COMP, q); 258 spin_unlock_irq(q->queue_lock); 259 #endif 260 - return ret; 261 - } 262 - 263 - static ssize_t queue_iostats_show(struct request_queue *q, char *page) 264 - { 265 - return queue_var_show(blk_queue_io_stat(q), page); 266 - } 267 - 268 - static ssize_t queue_iostats_store(struct request_queue *q, const char *page, 269 - size_t count) 270 - { 271 - unsigned long stats; 272 - ssize_t ret = queue_var_store(&stats, page, count); 273 - 274 - spin_lock_irq(q->queue_lock); 275 - if (stats) 276 - queue_flag_set(QUEUE_FLAG_IO_STAT, q); 277 - else 278 - queue_flag_clear(QUEUE_FLAG_IO_STAT, q); 279 - spin_unlock_irq(q->queue_lock); 280 - 281 return ret; 282 } 283 ··· 341 342 static struct queue_sysfs_entry queue_nonrot_entry = { 343 .attr = {.name = "rotational", .mode = S_IRUGO | S_IWUSR }, 344 - .show = queue_nonrot_show, 345 - .store = queue_nonrot_store, 346 }; 347 348 static struct queue_sysfs_entry queue_nomerges_entry = { ··· 359 360 static struct queue_sysfs_entry queue_iostats_entry = { 361 .attr = {.name = "iostats", .mode = S_IRUGO | S_IWUSR }, 362 - .show = queue_iostats_show, 363 - .store = queue_iostats_store, 364 }; 365 366 static struct attribute *default_attrs[] = { ··· 389 &queue_nomerges_entry.attr, 390 &queue_rq_affinity_entry.attr, 391 &queue_iostats_entry.attr, 392 NULL, 393 }; 394
··· 180 return queue_var_show(max_hw_sectors_kb, (page)); 181 } 182 183 + #define QUEUE_SYSFS_BIT_FNS(name, flag, neg) \ 184 + static ssize_t \ 185 + queue_show_##name(struct request_queue *q, char *page) \ 186 + { \ 187 + int bit; \ 188 + bit = test_bit(QUEUE_FLAG_##flag, &q->queue_flags); \ 189 + return queue_var_show(neg ? !bit : bit, page); \ 190 + } \ 191 + static ssize_t \ 192 + queue_store_##name(struct request_queue *q, const char *page, size_t count) \ 193 + { \ 194 + unsigned long val; \ 195 + ssize_t ret; \ 196 + ret = queue_var_store(&val, page, count); \ 197 + if (neg) \ 198 + val = !val; \ 199 + \ 200 + spin_lock_irq(q->queue_lock); \ 201 + if (val) \ 202 + queue_flag_set(QUEUE_FLAG_##flag, q); \ 203 + else \ 204 + queue_flag_clear(QUEUE_FLAG_##flag, q); \ 205 + spin_unlock_irq(q->queue_lock); \ 206 + return ret; \ 207 } 208 209 + QUEUE_SYSFS_BIT_FNS(nonrot, NONROT, 1); 210 + QUEUE_SYSFS_BIT_FNS(random, ADD_RANDOM, 0); 211 + QUEUE_SYSFS_BIT_FNS(iostats, IO_STAT, 0); 212 + #undef QUEUE_SYSFS_BIT_FNS 213 214 static ssize_t queue_nomerges_show(struct request_queue *q, char *page) 215 { ··· 247 queue_flag_clear(QUEUE_FLAG_SAME_COMP, q); 248 spin_unlock_irq(q->queue_lock); 249 #endif 250 return ret; 251 } 252 ··· 352 353 static struct queue_sysfs_entry queue_nonrot_entry = { 354 .attr = {.name = "rotational", .mode = S_IRUGO | S_IWUSR }, 355 + .show = queue_show_nonrot, 356 + .store = queue_store_nonrot, 357 }; 358 359 static struct queue_sysfs_entry queue_nomerges_entry = { ··· 370 371 static struct queue_sysfs_entry queue_iostats_entry = { 372 .attr = {.name = "iostats", .mode = S_IRUGO | S_IWUSR }, 373 + .show = queue_show_iostats, 374 + .store = queue_store_iostats, 375 + }; 376 + 377 + static struct queue_sysfs_entry queue_random_entry = { 378 + .attr = {.name = "add_random", .mode = S_IRUGO | S_IWUSR }, 379 + .show = queue_show_random, 380 + .store = queue_store_random, 381 }; 382 383 static struct attribute *default_attrs[] = { ··· 394 &queue_nomerges_entry.attr, 395 &queue_rq_affinity_entry.attr, 396 &queue_iostats_entry.attr, 397 + &queue_random_entry.attr, 398 NULL, 399 }; 400
+4 -2
block/blk.h
··· 161 */ 162 static inline int blk_do_io_stat(struct request *rq) 163 { 164 - return rq->rq_disk && blk_rq_io_stat(rq) && 165 - (blk_fs_request(rq) || blk_discard_rq(rq)); 166 } 167 168 #endif
··· 161 */ 162 static inline int blk_do_io_stat(struct request *rq) 163 { 164 + return rq->rq_disk && 165 + (rq->cmd_flags & REQ_IO_STAT) && 166 + (rq->cmd_type == REQ_TYPE_FS || 167 + (rq->cmd_flags & REQ_DISCARD)); 168 } 169 170 #endif
+12 -9
block/cfq-iosched.c
··· 458 */ 459 static inline bool cfq_bio_sync(struct bio *bio) 460 { 461 - return bio_data_dir(bio) == READ || bio_rw_flagged(bio, BIO_RW_SYNCIO); 462 } 463 464 /* ··· 646 return rq1; 647 else if (rq_is_sync(rq2) && !rq_is_sync(rq1)) 648 return rq2; 649 - if (rq_is_meta(rq1) && !rq_is_meta(rq2)) 650 return rq1; 651 - else if (rq_is_meta(rq2) && !rq_is_meta(rq1)) 652 return rq2; 653 654 s1 = blk_rq_pos(rq1); ··· 1485 cfqq->cfqd->rq_queued--; 1486 cfq_blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg, 1487 rq_data_dir(rq), rq_is_sync(rq)); 1488 - if (rq_is_meta(rq)) { 1489 WARN_ON(!cfqq->meta_pending); 1490 cfqq->meta_pending--; 1491 } ··· 3177 * So both queues are sync. Let the new request get disk time if 3178 * it's a metadata request and the current queue is doing regular IO. 3179 */ 3180 - if (rq_is_meta(rq) && !cfqq->meta_pending) 3181 return true; 3182 3183 /* ··· 3231 struct cfq_io_context *cic = RQ_CIC(rq); 3232 3233 cfqd->rq_queued++; 3234 - if (rq_is_meta(rq)) 3235 cfqq->meta_pending++; 3236 3237 cfq_update_io_thinktime(cfqd, cic); ··· 3366 unsigned long now; 3367 3368 now = jiffies; 3369 - cfq_log_cfqq(cfqd, cfqq, "complete rqnoidle %d", !!rq_noidle(rq)); 3370 3371 cfq_update_hw_tag(cfqd); 3372 ··· 3421 cfq_slice_expired(cfqd, 1); 3422 else if (sync && cfqq_empty && 3423 !cfq_close_cooperator(cfqd, cfqq)) { 3424 - cfqd->noidle_tree_requires_idle |= !rq_noidle(rq); 3425 /* 3426 * Idling is enabled for SYNC_WORKLOAD. 3427 * SYNC_NOIDLE_WORKLOAD idles at the end of the tree 3428 - * only if we processed at least one !rq_noidle request 3429 */ 3430 if (cfqd->serving_type == SYNC_WORKLOAD 3431 || cfqd->noidle_tree_requires_idle
··· 458 */ 459 static inline bool cfq_bio_sync(struct bio *bio) 460 { 461 + return bio_data_dir(bio) == READ || (bio->bi_rw & REQ_SYNC); 462 } 463 464 /* ··· 646 return rq1; 647 else if (rq_is_sync(rq2) && !rq_is_sync(rq1)) 648 return rq2; 649 + if ((rq1->cmd_flags & REQ_META) && !(rq2->cmd_flags & REQ_META)) 650 return rq1; 651 + else if ((rq2->cmd_flags & REQ_META) && 652 + !(rq1->cmd_flags & REQ_META)) 653 return rq2; 654 655 s1 = blk_rq_pos(rq1); ··· 1484 cfqq->cfqd->rq_queued--; 1485 cfq_blkiocg_update_io_remove_stats(&(RQ_CFQG(rq))->blkg, 1486 rq_data_dir(rq), rq_is_sync(rq)); 1487 + if (rq->cmd_flags & REQ_META) { 1488 WARN_ON(!cfqq->meta_pending); 1489 cfqq->meta_pending--; 1490 } ··· 3176 * So both queues are sync. Let the new request get disk time if 3177 * it's a metadata request and the current queue is doing regular IO. 3178 */ 3179 + if ((rq->cmd_flags & REQ_META) && !cfqq->meta_pending) 3180 return true; 3181 3182 /* ··· 3230 struct cfq_io_context *cic = RQ_CIC(rq); 3231 3232 cfqd->rq_queued++; 3233 + if (rq->cmd_flags & REQ_META) 3234 cfqq->meta_pending++; 3235 3236 cfq_update_io_thinktime(cfqd, cic); ··· 3365 unsigned long now; 3366 3367 now = jiffies; 3368 + cfq_log_cfqq(cfqd, cfqq, "complete rqnoidle %d", 3369 + !!(rq->cmd_flags & REQ_NOIDLE)); 3370 3371 cfq_update_hw_tag(cfqd); 3372 ··· 3419 cfq_slice_expired(cfqd, 1); 3420 else if (sync && cfqq_empty && 3421 !cfq_close_cooperator(cfqd, cfqq)) { 3422 + cfqd->noidle_tree_requires_idle |= 3423 + !(rq->cmd_flags & REQ_NOIDLE); 3424 /* 3425 * Idling is enabled for SYNC_WORKLOAD. 3426 * SYNC_NOIDLE_WORKLOAD idles at the end of the tree 3427 + * only if we processed at least one !REQ_NOIDLE request 3428 */ 3429 if (cfqd->serving_type == SYNC_WORKLOAD 3430 || cfqd->noidle_tree_requires_idle
-56
block/compat_ioctl.c
··· 535 return err; 536 } 537 538 - struct compat_blk_user_trace_setup { 539 - char name[32]; 540 - u16 act_mask; 541 - u32 buf_size; 542 - u32 buf_nr; 543 - compat_u64 start_lba; 544 - compat_u64 end_lba; 545 - u32 pid; 546 - }; 547 - #define BLKTRACESETUP32 _IOWR(0x12, 115, struct compat_blk_user_trace_setup) 548 - 549 - static int compat_blk_trace_setup(struct block_device *bdev, char __user *arg) 550 - { 551 - struct blk_user_trace_setup buts; 552 - struct compat_blk_user_trace_setup cbuts; 553 - struct request_queue *q; 554 - char b[BDEVNAME_SIZE]; 555 - int ret; 556 - 557 - q = bdev_get_queue(bdev); 558 - if (!q) 559 - return -ENXIO; 560 - 561 - if (copy_from_user(&cbuts, arg, sizeof(cbuts))) 562 - return -EFAULT; 563 - 564 - bdevname(bdev, b); 565 - 566 - buts = (struct blk_user_trace_setup) { 567 - .act_mask = cbuts.act_mask, 568 - .buf_size = cbuts.buf_size, 569 - .buf_nr = cbuts.buf_nr, 570 - .start_lba = cbuts.start_lba, 571 - .end_lba = cbuts.end_lba, 572 - .pid = cbuts.pid, 573 - }; 574 - memcpy(&buts.name, &cbuts.name, 32); 575 - 576 - mutex_lock(&bdev->bd_mutex); 577 - ret = do_blk_trace_setup(q, b, bdev->bd_dev, bdev, &buts); 578 - mutex_unlock(&bdev->bd_mutex); 579 - if (ret) 580 - return ret; 581 - 582 - if (copy_to_user(arg, &buts.name, 32)) 583 - return -EFAULT; 584 - 585 - return 0; 586 - } 587 - 588 static int compat_blkdev_driver_ioctl(struct block_device *bdev, fmode_t mode, 589 unsigned cmd, unsigned long arg) 590 { ··· 752 return compat_put_u64(arg, bdev->bd_inode->i_size); 753 754 case BLKTRACESETUP32: 755 - lock_kernel(); 756 - ret = compat_blk_trace_setup(bdev, compat_ptr(arg)); 757 - unlock_kernel(); 758 - return ret; 759 case BLKTRACESTART: /* compatible */ 760 case BLKTRACESTOP: /* compatible */ 761 case BLKTRACETEARDOWN: /* compatible */ 762 - lock_kernel(); 763 ret = blk_trace_ioctl(bdev, cmd, compat_ptr(arg)); 764 - unlock_kernel(); 765 return ret; 766 default: 767 if (disk->fops->compat_ioctl)
··· 535 return err; 536 } 537 538 static int compat_blkdev_driver_ioctl(struct block_device *bdev, fmode_t mode, 539 unsigned cmd, unsigned long arg) 540 { ··· 802 return compat_put_u64(arg, bdev->bd_inode->i_size); 803 804 case BLKTRACESETUP32: 805 case BLKTRACESTART: /* compatible */ 806 case BLKTRACESTOP: /* compatible */ 807 case BLKTRACETEARDOWN: /* compatible */ 808 ret = blk_trace_ioctl(bdev, cmd, compat_ptr(arg)); 809 return ret; 810 default: 811 if (disk->fops->compat_ioctl)
+11 -8
block/elevator.c
··· 79 /* 80 * Don't merge file system requests and discard requests 81 */ 82 - if (bio_rw_flagged(bio, BIO_RW_DISCARD) != 83 - bio_rw_flagged(rq->bio, BIO_RW_DISCARD)) 84 return 0; 85 86 /* ··· 427 list_for_each_prev(entry, &q->queue_head) { 428 struct request *pos = list_entry_rq(entry); 429 430 - if (blk_discard_rq(rq) != blk_discard_rq(pos)) 431 break; 432 if (rq_data_dir(rq) != rq_data_dir(pos)) 433 break; ··· 558 */ 559 if (blk_account_rq(rq)) { 560 q->in_flight[rq_is_sync(rq)]--; 561 - if (blk_sorted_rq(rq)) 562 elv_deactivate_rq(q, rq); 563 } 564 ··· 644 break; 645 646 case ELEVATOR_INSERT_SORT: 647 - BUG_ON(!blk_fs_request(rq) && !blk_discard_rq(rq)); 648 rq->cmd_flags |= REQ_SORTED; 649 q->nr_sorted++; 650 if (rq_mergeable(rq)) { ··· 717 /* 718 * toggle ordered color 719 */ 720 - if (blk_barrier_rq(rq)) 721 q->ordcolor ^= 1; 722 723 /* ··· 730 * this request is scheduling boundary, update 731 * end_sector 732 */ 733 - if (blk_fs_request(rq) || blk_discard_rq(rq)) { 734 q->end_sector = rq_end_sector(rq); 735 q->boundary_rq = rq; 736 } ··· 845 */ 846 if (blk_account_rq(rq)) { 847 q->in_flight[rq_is_sync(rq)]--; 848 - if (blk_sorted_rq(rq) && e->ops->elevator_completed_req_fn) 849 e->ops->elevator_completed_req_fn(q, rq); 850 } 851
··· 79 /* 80 * Don't merge file system requests and discard requests 81 */ 82 + if ((bio->bi_rw & REQ_DISCARD) != (rq->bio->bi_rw & REQ_DISCARD)) 83 return 0; 84 85 /* ··· 428 list_for_each_prev(entry, &q->queue_head) { 429 struct request *pos = list_entry_rq(entry); 430 431 + if ((rq->cmd_flags & REQ_DISCARD) != 432 + (pos->cmd_flags & REQ_DISCARD)) 433 break; 434 if (rq_data_dir(rq) != rq_data_dir(pos)) 435 break; ··· 558 */ 559 if (blk_account_rq(rq)) { 560 q->in_flight[rq_is_sync(rq)]--; 561 + if (rq->cmd_flags & REQ_SORTED) 562 elv_deactivate_rq(q, rq); 563 } 564 ··· 644 break; 645 646 case ELEVATOR_INSERT_SORT: 647 + BUG_ON(rq->cmd_type != REQ_TYPE_FS && 648 + !(rq->cmd_flags & REQ_DISCARD)); 649 rq->cmd_flags |= REQ_SORTED; 650 q->nr_sorted++; 651 if (rq_mergeable(rq)) { ··· 716 /* 717 * toggle ordered color 718 */ 719 + if (rq->cmd_flags & REQ_HARDBARRIER) 720 q->ordcolor ^= 1; 721 722 /* ··· 729 * this request is scheduling boundary, update 730 * end_sector 731 */ 732 + if (rq->cmd_type == REQ_TYPE_FS || 733 + (rq->cmd_flags & REQ_DISCARD)) { 734 q->end_sector = rq_end_sector(rq); 735 q->boundary_rq = rq; 736 } ··· 843 */ 844 if (blk_account_rq(rq)) { 845 q->in_flight[rq_is_sync(rq)]--; 846 + if ((rq->cmd_flags & REQ_SORTED) && 847 + e->ops->elevator_completed_req_fn) 848 e->ops->elevator_completed_req_fn(q, rq); 849 } 850
+1 -20
block/ioctl.c
··· 163 unsigned cmd, unsigned long arg) 164 { 165 struct gendisk *disk = bdev->bd_disk; 166 - int ret; 167 168 if (disk->fops->ioctl) 169 return disk->fops->ioctl(bdev, mode, cmd, arg); 170 - 171 - if (disk->fops->locked_ioctl) { 172 - lock_kernel(); 173 - ret = disk->fops->locked_ioctl(bdev, mode, cmd, arg); 174 - unlock_kernel(); 175 - return ret; 176 - } 177 178 return -ENOTTY; 179 } ··· 177 EXPORT_SYMBOL_GPL(__blkdev_driver_ioctl); 178 179 /* 180 - * always keep this in sync with compat_blkdev_ioctl() and 181 - * compat_blkdev_locked_ioctl() 182 */ 183 int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd, 184 unsigned long arg) ··· 197 if (ret != -EINVAL && ret != -ENOTTY) 198 return ret; 199 200 - lock_kernel(); 201 fsync_bdev(bdev); 202 invalidate_bdev(bdev); 203 - unlock_kernel(); 204 return 0; 205 206 case BLKROSET: ··· 210 return -EACCES; 211 if (get_user(n, (int __user *)(arg))) 212 return -EFAULT; 213 - lock_kernel(); 214 set_device_ro(bdev, n); 215 - unlock_kernel(); 216 return 0; 217 218 case BLKDISCARD: { ··· 296 bd_release(bdev); 297 return ret; 298 case BLKPG: 299 - lock_kernel(); 300 ret = blkpg_ioctl(bdev, (struct blkpg_ioctl_arg __user *) arg); 301 - unlock_kernel(); 302 break; 303 case BLKRRPART: 304 - lock_kernel(); 305 ret = blkdev_reread_part(bdev); 306 - unlock_kernel(); 307 break; 308 case BLKGETSIZE: 309 size = bdev->bd_inode->i_size; ··· 312 case BLKTRACESTOP: 313 case BLKTRACESETUP: 314 case BLKTRACETEARDOWN: 315 - lock_kernel(); 316 ret = blk_trace_ioctl(bdev, cmd, (char __user *) arg); 317 - unlock_kernel(); 318 break; 319 default: 320 ret = __blkdev_driver_ioctl(bdev, mode, cmd, arg);
··· 163 unsigned cmd, unsigned long arg) 164 { 165 struct gendisk *disk = bdev->bd_disk; 166 167 if (disk->fops->ioctl) 168 return disk->fops->ioctl(bdev, mode, cmd, arg); 169 170 return -ENOTTY; 171 } ··· 185 EXPORT_SYMBOL_GPL(__blkdev_driver_ioctl); 186 187 /* 188 + * always keep this in sync with compat_blkdev_ioctl() 189 */ 190 int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd, 191 unsigned long arg) ··· 206 if (ret != -EINVAL && ret != -ENOTTY) 207 return ret; 208 209 fsync_bdev(bdev); 210 invalidate_bdev(bdev); 211 return 0; 212 213 case BLKROSET: ··· 221 return -EACCES; 222 if (get_user(n, (int __user *)(arg))) 223 return -EFAULT; 224 set_device_ro(bdev, n); 225 return 0; 226 227 case BLKDISCARD: { ··· 309 bd_release(bdev); 310 return ret; 311 case BLKPG: 312 ret = blkpg_ioctl(bdev, (struct blkpg_ioctl_arg __user *) arg); 313 break; 314 case BLKRRPART: 315 ret = blkdev_reread_part(bdev); 316 break; 317 case BLKGETSIZE: 318 size = bdev->bd_inode->i_size; ··· 329 case BLKTRACESTOP: 330 case BLKTRACESETUP: 331 case BLKTRACETEARDOWN: 332 ret = blk_trace_ioctl(bdev, cmd, (char __user *) arg); 333 break; 334 default: 335 ret = __blkdev_driver_ioctl(bdev, mode, cmd, arg);
+2 -2
drivers/ata/libata-scsi.c
··· 1111 */ 1112 static int atapi_drain_needed(struct request *rq) 1113 { 1114 - if (likely(!blk_pc_request(rq))) 1115 return 0; 1116 1117 - if (!blk_rq_bytes(rq) || (rq->cmd_flags & REQ_RW)) 1118 return 0; 1119 1120 return atapi_cmd_type(rq->cmd[0]) == ATAPI_MISC;
··· 1111 */ 1112 static int atapi_drain_needed(struct request *rq) 1113 { 1114 + if (likely(rq->cmd_type != REQ_TYPE_BLOCK_PC)) 1115 return 0; 1116 1117 + if (!blk_rq_bytes(rq) || (rq->cmd_flags & REQ_WRITE)) 1118 return 0; 1119 1120 return atapi_cmd_type(rq->cmd[0]) == ATAPI_MISC;
+9 -4
drivers/block/DAC960.c
··· 79 struct gendisk *disk = bdev->bd_disk; 80 DAC960_Controller_T *p = disk->queue->queuedata; 81 int drive_nr = (long)disk->private_data; 82 83 if (p->FirmwareType == DAC960_V1_Controller) { 84 if (p->V1.LogicalDriveInformation[drive_nr]. 85 LogicalDriveState == DAC960_V1_LogicalDrive_Offline) 86 - return -ENXIO; 87 } else { 88 DAC960_V2_LogicalDeviceInfo_T *i = 89 p->V2.LogicalDeviceInformation[drive_nr]; 90 if (!i || i->LogicalDeviceState == DAC960_V2_LogicalDevice_Offline) 91 - return -ENXIO; 92 } 93 94 check_disk_change(bdev); 95 96 if (!get_capacity(p->disks[drive_nr])) 97 - return -ENXIO; 98 - return 0; 99 } 100 101 static int DAC960_getgeo(struct block_device *bdev, struct hd_geometry *geo)
··· 79 struct gendisk *disk = bdev->bd_disk; 80 DAC960_Controller_T *p = disk->queue->queuedata; 81 int drive_nr = (long)disk->private_data; 82 + int ret = -ENXIO; 83 84 + lock_kernel(); 85 if (p->FirmwareType == DAC960_V1_Controller) { 86 if (p->V1.LogicalDriveInformation[drive_nr]. 87 LogicalDriveState == DAC960_V1_LogicalDrive_Offline) 88 + goto out; 89 } else { 90 DAC960_V2_LogicalDeviceInfo_T *i = 91 p->V2.LogicalDeviceInformation[drive_nr]; 92 if (!i || i->LogicalDeviceState == DAC960_V2_LogicalDevice_Offline) 93 + goto out; 94 } 95 96 check_disk_change(bdev); 97 98 if (!get_capacity(p->disks[drive_nr])) 99 + goto out; 100 + ret = 0; 101 + out: 102 + unlock_kernel(); 103 + return ret; 104 } 105 106 static int DAC960_getgeo(struct block_device *bdev, struct hd_geometry *geo)
+25 -4
drivers/block/amiflop.c
··· 60 #include <linux/hdreg.h> 61 #include <linux/delay.h> 62 #include <linux/init.h> 63 #include <linux/amifdreg.h> 64 #include <linux/amifd.h> 65 #include <linux/buffer_head.h> ··· 1424 return 0; 1425 } 1426 1427 - static int fd_ioctl(struct block_device *bdev, fmode_t mode, 1428 unsigned int cmd, unsigned long param) 1429 { 1430 struct amiga_floppy_struct *p = bdev->bd_disk->private_data; ··· 1501 return 0; 1502 } 1503 1504 static void fd_probe(int dev) 1505 { 1506 unsigned long code; ··· 1555 int old_dev; 1556 unsigned long flags; 1557 1558 old_dev = fd_device[drive]; 1559 1560 - if (fd_ref[drive] && old_dev != system) 1561 return -EBUSY; 1562 1563 if (mode & (FMODE_READ|FMODE_WRITE)) { 1564 check_disk_change(bdev); ··· 1574 fd_deselect (drive); 1575 rel_fdc(); 1576 1577 - if (wrprot) 1578 return -EROFS; 1579 } 1580 } 1581 ··· 1594 printk(KERN_INFO "fd%d: accessing %s-disk with %s-layout\n",drive, 1595 unit[drive].type->name, data_types[system].name); 1596 1597 return 0; 1598 } 1599 ··· 1603 struct amiga_floppy_struct *p = disk->private_data; 1604 int drive = p - unit; 1605 1606 if (unit[drive].dirty == 1) { 1607 del_timer (flush_track_timer + drive); 1608 non_int_flush_track (drive); ··· 1617 /* the mod_use counter is handled this way */ 1618 floppy_off (drive | 0x40000000); 1619 #endif 1620 return 0; 1621 } 1622 ··· 1659 .owner = THIS_MODULE, 1660 .open = floppy_open, 1661 .release = floppy_release, 1662 - .locked_ioctl = fd_ioctl, 1663 .getgeo = fd_getgeo, 1664 .media_changed = amiga_floppy_change, 1665 };
··· 60 #include <linux/hdreg.h> 61 #include <linux/delay.h> 62 #include <linux/init.h> 63 + #include <linux/smp_lock.h> 64 #include <linux/amifdreg.h> 65 #include <linux/amifd.h> 66 #include <linux/buffer_head.h> ··· 1423 return 0; 1424 } 1425 1426 + static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, 1427 unsigned int cmd, unsigned long param) 1428 { 1429 struct amiga_floppy_struct *p = bdev->bd_disk->private_data; ··· 1500 return 0; 1501 } 1502 1503 + static int fd_ioctl(struct block_device *bdev, fmode_t mode, 1504 + unsigned int cmd, unsigned long param) 1505 + { 1506 + int ret; 1507 + 1508 + lock_kernel(); 1509 + ret = fd_locked_ioctl(bdev, mode, cmd, param); 1510 + unlock_kernel(); 1511 + 1512 + return ret; 1513 + } 1514 + 1515 static void fd_probe(int dev) 1516 { 1517 unsigned long code; ··· 1542 int old_dev; 1543 unsigned long flags; 1544 1545 + lock_kernel(); 1546 old_dev = fd_device[drive]; 1547 1548 + if (fd_ref[drive] && old_dev != system) { 1549 + unlock_kernel(); 1550 return -EBUSY; 1551 + } 1552 1553 if (mode & (FMODE_READ|FMODE_WRITE)) { 1554 check_disk_change(bdev); ··· 1558 fd_deselect (drive); 1559 rel_fdc(); 1560 1561 + if (wrprot) { 1562 + unlock_kernel(); 1563 return -EROFS; 1564 + } 1565 } 1566 } 1567 ··· 1576 printk(KERN_INFO "fd%d: accessing %s-disk with %s-layout\n",drive, 1577 unit[drive].type->name, data_types[system].name); 1578 1579 + unlock_kernel(); 1580 return 0; 1581 } 1582 ··· 1584 struct amiga_floppy_struct *p = disk->private_data; 1585 int drive = p - unit; 1586 1587 + lock_kernel(); 1588 if (unit[drive].dirty == 1) { 1589 del_timer (flush_track_timer + drive); 1590 non_int_flush_track (drive); ··· 1597 /* the mod_use counter is handled this way */ 1598 floppy_off (drive | 0x40000000); 1599 #endif 1600 + unlock_kernel(); 1601 return 0; 1602 } 1603 ··· 1638 .owner = THIS_MODULE, 1639 .open = floppy_open, 1640 .release = floppy_release, 1641 + .ioctl = fd_ioctl, 1642 .getgeo = fd_getgeo, 1643 .media_changed = amiga_floppy_change, 1644 };
+5 -1
drivers/block/aoe/aoeblk.c
··· 12 #include <linux/slab.h> 13 #include <linux/genhd.h> 14 #include <linux/netdevice.h> 15 #include "aoe.h" 16 17 static struct kmem_cache *buf_pool_cache; ··· 125 struct aoedev *d = bdev->bd_disk->private_data; 126 ulong flags; 127 128 spin_lock_irqsave(&d->lock, flags); 129 if (d->flags & DEVFL_UP) { 130 d->nopen++; 131 spin_unlock_irqrestore(&d->lock, flags); 132 return 0; 133 } 134 spin_unlock_irqrestore(&d->lock, flags); 135 return -ENODEV; 136 } 137 ··· 177 BUG(); 178 bio_endio(bio, -ENXIO); 179 return 0; 180 - } else if (bio_rw_flagged(bio, BIO_RW_BARRIER)) { 181 bio_endio(bio, -EOPNOTSUPP); 182 return 0; 183 } else if (bio->bi_io_vec == NULL) {
··· 12 #include <linux/slab.h> 13 #include <linux/genhd.h> 14 #include <linux/netdevice.h> 15 + #include <linux/smp_lock.h> 16 #include "aoe.h" 17 18 static struct kmem_cache *buf_pool_cache; ··· 124 struct aoedev *d = bdev->bd_disk->private_data; 125 ulong flags; 126 127 + lock_kernel(); 128 spin_lock_irqsave(&d->lock, flags); 129 if (d->flags & DEVFL_UP) { 130 d->nopen++; 131 spin_unlock_irqrestore(&d->lock, flags); 132 + unlock_kernel(); 133 return 0; 134 } 135 spin_unlock_irqrestore(&d->lock, flags); 136 + unlock_kernel(); 137 return -ENODEV; 138 } 139 ··· 173 BUG(); 174 bio_endio(bio, -ENXIO); 175 return 0; 176 + } else if (bio->bi_rw & REQ_HARDBARRIER) { 177 bio_endio(bio, -EOPNOTSUPP); 178 return 0; 179 } else if (bio->bi_io_vec == NULL) {
+28 -4
drivers/block/ataflop.c
··· 67 #include <linux/delay.h> 68 #include <linux/init.h> 69 #include <linux/blkdev.h> 70 71 #include <asm/atafd.h> 72 #include <asm/atafdreg.h> ··· 360 static void finish_fdc_done( int dummy ); 361 static void setup_req_params( int drive ); 362 static void redo_fd_request( void); 363 - static int fd_ioctl(struct block_device *bdev, fmode_t mode, unsigned int 364 cmd, unsigned long param); 365 static void fd_probe( int drive ); 366 static int fd_test_drive_present( int drive ); ··· 1481 atari_enable_irq( IRQ_MFP_FDC ); 1482 } 1483 1484 - static int fd_ioctl(struct block_device *bdev, fmode_t mode, 1485 unsigned int cmd, unsigned long param) 1486 { 1487 struct gendisk *disk = bdev->bd_disk; ··· 1666 } 1667 } 1668 1669 1670 /* Initialize the 'unit' variable for drive 'drive' */ 1671 ··· 1850 return 0; 1851 } 1852 1853 1854 static int floppy_release(struct gendisk *disk, fmode_t mode) 1855 { 1856 struct atari_floppy_struct *p = disk->private_data; 1857 if (p->ref < 0) 1858 p->ref = 0; 1859 else if (!p->ref--) { 1860 printk(KERN_ERR "floppy_release with fd_ref == 0"); 1861 p->ref = 0; 1862 } 1863 return 0; 1864 } 1865 1866 static const struct block_device_operations floppy_fops = { 1867 .owner = THIS_MODULE, 1868 - .open = floppy_open, 1869 .release = floppy_release, 1870 - .locked_ioctl = fd_ioctl, 1871 .media_changed = check_floppy_change, 1872 .revalidate_disk= floppy_revalidate, 1873 };
··· 67 #include <linux/delay.h> 68 #include <linux/init.h> 69 #include <linux/blkdev.h> 70 + #include <linux/smp_lock.h> 71 72 #include <asm/atafd.h> 73 #include <asm/atafdreg.h> ··· 359 static void finish_fdc_done( int dummy ); 360 static void setup_req_params( int drive ); 361 static void redo_fd_request( void); 362 + static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int 363 cmd, unsigned long param); 364 static void fd_probe( int drive ); 365 static int fd_test_drive_present( int drive ); ··· 1480 atari_enable_irq( IRQ_MFP_FDC ); 1481 } 1482 1483 + static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, 1484 unsigned int cmd, unsigned long param) 1485 { 1486 struct gendisk *disk = bdev->bd_disk; ··· 1665 } 1666 } 1667 1668 + static int fd_ioctl(struct block_device *bdev, fmode_t mode, 1669 + unsigned int cmd, unsigned long arg) 1670 + { 1671 + int ret; 1672 + 1673 + lock_kernel(); 1674 + ret = fd_locked_ioctl(bdev, mode, cmd, arg); 1675 + unlock_kernel(); 1676 + 1677 + return ret; 1678 + } 1679 1680 /* Initialize the 'unit' variable for drive 'drive' */ 1681 ··· 1838 return 0; 1839 } 1840 1841 + static int floppy_unlocked_open(struct block_device *bdev, fmode_t mode) 1842 + { 1843 + int ret; 1844 + 1845 + lock_kernel(); 1846 + ret = floppy_open(bdev, mode); 1847 + unlock_kernel(); 1848 + 1849 + return ret; 1850 + } 1851 1852 static int floppy_release(struct gendisk *disk, fmode_t mode) 1853 { 1854 struct atari_floppy_struct *p = disk->private_data; 1855 + lock_kernel(); 1856 if (p->ref < 0) 1857 p->ref = 0; 1858 else if (!p->ref--) { 1859 printk(KERN_ERR "floppy_release with fd_ref == 0"); 1860 p->ref = 0; 1861 } 1862 + unlock_kernel(); 1863 return 0; 1864 } 1865 1866 static const struct block_device_operations floppy_fops = { 1867 .owner = THIS_MODULE, 1868 + .open = floppy_unlocked_open, 1869 .release = floppy_release, 1870 + .ioctl = fd_ioctl, 1871 .media_changed = check_floppy_change, 1872 .revalidate_disk= floppy_revalidate, 1873 };
+6 -3
drivers/block/brd.c
··· 15 #include <linux/blkdev.h> 16 #include <linux/bio.h> 17 #include <linux/highmem.h> 18 #include <linux/radix-tree.h> 19 #include <linux/buffer_head.h> /* invalidate_bh_lrus() */ 20 #include <linux/slab.h> ··· 341 get_capacity(bdev->bd_disk)) 342 goto out; 343 344 - if (unlikely(bio_rw_flagged(bio, BIO_RW_DISCARD))) { 345 err = 0; 346 discard_from_brd(brd, sector, bio->bi_size); 347 goto out; ··· 402 * ram device BLKFLSBUF has special semantics, we want to actually 403 * release and destroy the ramdisk data. 404 */ 405 mutex_lock(&bdev->bd_mutex); 406 error = -EBUSY; 407 if (bdev->bd_openers <= 1) { ··· 419 error = 0; 420 } 421 mutex_unlock(&bdev->bd_mutex); 422 423 return error; 424 } 425 426 static const struct block_device_operations brd_fops = { 427 .owner = THIS_MODULE, 428 - .locked_ioctl = brd_ioctl, 429 #ifdef CONFIG_BLK_DEV_XIP 430 .direct_access = brd_direct_access, 431 #endif ··· 482 if (!brd->brd_queue) 483 goto out_free_dev; 484 blk_queue_make_request(brd->brd_queue, brd_make_request); 485 - blk_queue_ordered(brd->brd_queue, QUEUE_ORDERED_TAG, NULL); 486 blk_queue_max_hw_sectors(brd->brd_queue, 1024); 487 blk_queue_bounce_limit(brd->brd_queue, BLK_BOUNCE_ANY); 488
··· 15 #include <linux/blkdev.h> 16 #include <linux/bio.h> 17 #include <linux/highmem.h> 18 + #include <linux/smp_lock.h> 19 #include <linux/radix-tree.h> 20 #include <linux/buffer_head.h> /* invalidate_bh_lrus() */ 21 #include <linux/slab.h> ··· 340 get_capacity(bdev->bd_disk)) 341 goto out; 342 343 + if (unlikely(bio->bi_rw & REQ_DISCARD)) { 344 err = 0; 345 discard_from_brd(brd, sector, bio->bi_size); 346 goto out; ··· 401 * ram device BLKFLSBUF has special semantics, we want to actually 402 * release and destroy the ramdisk data. 403 */ 404 + lock_kernel(); 405 mutex_lock(&bdev->bd_mutex); 406 error = -EBUSY; 407 if (bdev->bd_openers <= 1) { ··· 417 error = 0; 418 } 419 mutex_unlock(&bdev->bd_mutex); 420 + unlock_kernel(); 421 422 return error; 423 } 424 425 static const struct block_device_operations brd_fops = { 426 .owner = THIS_MODULE, 427 + .ioctl = brd_ioctl, 428 #ifdef CONFIG_BLK_DEV_XIP 429 .direct_access = brd_direct_access, 430 #endif ··· 479 if (!brd->brd_queue) 480 goto out_free_dev; 481 blk_queue_make_request(brd->brd_queue, brd_make_request); 482 + blk_queue_ordered(brd->brd_queue, QUEUE_ORDERED_TAG); 483 blk_queue_max_hw_sectors(brd->brd_queue, 1024); 484 blk_queue_bounce_limit(brd->brd_queue, BLK_BOUNCE_ANY); 485
+1296 -899
drivers/block/cciss.c
··· 56 #include <linux/kthread.h> 57 58 #define CCISS_DRIVER_VERSION(maj,min,submin) ((maj<<16)|(min<<8)|(submin)) 59 - #define DRIVER_NAME "HP CISS Driver (v 3.6.20)" 60 - #define DRIVER_VERSION CCISS_DRIVER_VERSION(3, 6, 20) 61 62 /* Embedded module documentation macros - see modules.h */ 63 MODULE_AUTHOR("Hewlett-Packard Company"); 64 MODULE_DESCRIPTION("Driver for HP Smart Array Controllers"); 65 - MODULE_SUPPORTED_DEVICE("HP SA5i SA5i+ SA532 SA5300 SA5312 SA641 SA642 SA6400" 66 - " SA6i P600 P800 P400 P400i E200 E200i E500 P700m" 67 - " Smart Array G2 Series SAS/SATA Controllers"); 68 - MODULE_VERSION("3.6.20"); 69 MODULE_LICENSE("GPL"); 70 71 static int cciss_allow_hpsa; ··· 105 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249}, 106 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324A}, 107 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324B}, 108 {0,} 109 }; 110 ··· 149 {0x3249103C, "Smart Array P812", &SA5_access}, 150 {0x324A103C, "Smart Array P712m", &SA5_access}, 151 {0x324B103C, "Smart Array P711m", &SA5_access}, 152 }; 153 154 /* How long to wait (in milliseconds) for board to go into simple mode */ ··· 175 static LIST_HEAD(scan_q); 176 177 static void do_cciss_request(struct request_queue *q); 178 - static irqreturn_t do_cciss_intr(int irq, void *dev_id); 179 static int cciss_open(struct block_device *bdev, fmode_t mode); 180 static int cciss_release(struct gendisk *disk, fmode_t mode); 181 static int cciss_ioctl(struct block_device *bdev, fmode_t mode, 182 unsigned int cmd, unsigned long arg); 183 static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo); ··· 191 static int deregister_disk(ctlr_info_t *h, int drv_index, 192 int clear_all, int via_ioctl); 193 194 - static void cciss_read_capacity(int ctlr, int logvol, 195 sector_t *total_size, unsigned int *block_size); 196 - static void cciss_read_capacity_16(int ctlr, int logvol, 197 sector_t *total_size, unsigned int *block_size); 198 - static void cciss_geometry_inquiry(int ctlr, int logvol, 199 sector_t total_size, 200 unsigned int block_size, InquiryData_struct *inq_buff, 201 drive_info_struct *drv); 202 - static void __devinit cciss_interrupt_mode(ctlr_info_t *, struct pci_dev *, 203 - __u32); 204 static void start_io(ctlr_info_t *h); 205 - static int sendcmd_withirq(__u8 cmd, int ctlr, void *buff, size_t size, 206 __u8 page_code, unsigned char scsi3addr[], 207 int cmd_type); 208 static int sendcmd_withirq_core(ctlr_info_t *h, CommandList_struct *c, 209 int attempt_retry); 210 static int process_sendcmd_error(ctlr_info_t *h, CommandList_struct *c); 211 212 - static void fail_all_cmds(unsigned long ctlr); 213 static int add_to_scan_list(struct ctlr_info *h); 214 static int scan_thread(void *data); 215 static int check_for_unit_attention(ctlr_info_t *h, CommandList_struct *c); ··· 215 static void cciss_device_release(struct device *dev); 216 static void cciss_free_gendisk(ctlr_info_t *h, int drv_index); 217 static void cciss_free_drive_info(ctlr_info_t *h, int drv_index); 218 219 #ifdef CONFIG_PROC_FS 220 - static void cciss_procinit(int i); 221 #else 222 - static void cciss_procinit(int i) 223 { 224 } 225 #endif /* CONFIG_PROC_FS */ ··· 243 244 static const struct block_device_operations cciss_fops = { 245 .owner = THIS_MODULE, 246 - .open = cciss_open, 247 .release = cciss_release, 248 - .locked_ioctl = cciss_ioctl, 249 .getgeo = cciss_getgeo, 250 #ifdef CONFIG_COMPAT 251 .compat_ioctl = cciss_compat_ioctl, 252 #endif 253 .revalidate_disk = cciss_revalidate, 254 }; 255 256 /* 257 * Enqueuing and dequeuing functions for cmdlists. ··· 287 } 288 289 hlist_del_init(&c->list); 290 } 291 292 static void cciss_free_sg_chain_blocks(SGDescriptor_struct **cmd_sg_list, ··· 410 h->product_name, 411 (unsigned long)h->board_id, 412 h->firm_ver[0], h->firm_ver[1], h->firm_ver[2], 413 - h->firm_ver[3], (unsigned int)h->intr[SIMPLE_MODE_INT], 414 h->num_luns, 415 h->Qdepth, h->commands_outstanding, 416 h->maxQsinceinit, h->max_outstanding, h->maxSG); 417 418 #ifdef CONFIG_CISS_SCSI_TAPE 419 - cciss_seq_tape_report(seq, h->ctlr); 420 #endif /* CONFIG_CISS_SCSI_TAPE */ 421 } 422 423 static void *cciss_seq_start(struct seq_file *seq, loff_t *pos) 424 { 425 ctlr_info_t *h = seq->private; 426 - unsigned ctlr = h->ctlr; 427 unsigned long flags; 428 429 /* prevent displaying bogus info during configuration 430 * or deconfiguration of a logical volume 431 */ 432 - spin_lock_irqsave(CCISS_LOCK(ctlr), flags); 433 if (h->busy_configuring) { 434 - spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags); 435 return ERR_PTR(-EBUSY); 436 } 437 h->busy_configuring = 1; 438 - spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags); 439 440 if (*pos == 0) 441 cciss_seq_show_header(seq); ··· 542 struct seq_file *seq = file->private_data; 543 ctlr_info_t *h = seq->private; 544 545 - err = cciss_engage_scsi(h->ctlr); 546 if (err == 0) 547 err = length; 548 } else ··· 565 .write = cciss_proc_write, 566 }; 567 568 - static void __devinit cciss_procinit(int i) 569 { 570 struct proc_dir_entry *pde; 571 ··· 573 proc_cciss = proc_mkdir("driver/cciss", NULL); 574 if (!proc_cciss) 575 return; 576 - pde = proc_create_data(hba[i]->devname, S_IWUSR | S_IRUSR | S_IRGRP | 577 S_IROTH, proc_cciss, 578 - &cciss_proc_fops, hba[i]); 579 } 580 #endif /* CONFIG_PROC_FS */ 581 ··· 608 unsigned long flags; 609 int ret = 0; 610 611 - spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags); 612 if (h->busy_configuring) 613 ret = -EBUSY; 614 else 615 memcpy(sn, drv->serial_no, sizeof(sn)); 616 - spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); 617 618 if (ret) 619 return ret; ··· 638 unsigned long flags; 639 int ret = 0; 640 641 - spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags); 642 if (h->busy_configuring) 643 ret = -EBUSY; 644 else 645 memcpy(vendor, drv->vendor, VENDOR_LEN + 1); 646 - spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); 647 648 if (ret) 649 return ret; ··· 662 unsigned long flags; 663 int ret = 0; 664 665 - spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags); 666 if (h->busy_configuring) 667 ret = -EBUSY; 668 else 669 memcpy(model, drv->model, MODEL_LEN + 1); 670 - spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); 671 672 if (ret) 673 return ret; ··· 686 unsigned long flags; 687 int ret = 0; 688 689 - spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags); 690 if (h->busy_configuring) 691 ret = -EBUSY; 692 else 693 memcpy(rev, drv->rev, REV_LEN + 1); 694 - spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); 695 696 if (ret) 697 return ret; ··· 708 unsigned long flags; 709 unsigned char lunid[8]; 710 711 - spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags); 712 if (h->busy_configuring) { 713 - spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); 714 return -EBUSY; 715 } 716 if (!drv->heads) { 717 - spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); 718 return -ENOTTY; 719 } 720 memcpy(lunid, drv->LunID, sizeof(lunid)); 721 - spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); 722 return snprintf(buf, 20, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n", 723 lunid[0], lunid[1], lunid[2], lunid[3], 724 lunid[4], lunid[5], lunid[6], lunid[7]); ··· 733 int raid; 734 unsigned long flags; 735 736 - spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags); 737 if (h->busy_configuring) { 738 - spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); 739 return -EBUSY; 740 } 741 raid = drv->raid_level; 742 - spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); 743 if (raid < 0 || raid > RAID_UNKNOWN) 744 raid = RAID_UNKNOWN; 745 ··· 756 unsigned long flags; 757 int count; 758 759 - spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags); 760 if (h->busy_configuring) { 761 - spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); 762 return -EBUSY; 763 } 764 count = drv->usage_count; 765 - spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); 766 return snprintf(buf, 20, "%d\n", count); 767 } 768 static DEVICE_ATTR(usage_count, S_IRUGO, cciss_show_usage_count, NULL); ··· 907 /* 908 * For operations that cannot sleep, a command block is allocated at init, 909 * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track 910 - * which ones are free or in use. For operations that can wait for kmalloc 911 - * to possible sleep, this routine can be called with get_from_pool set to 0. 912 - * cmd_free() MUST be called with a got_from_pool set to 0 if cmd_alloc was. 913 */ 914 - static CommandList_struct *cmd_alloc(ctlr_info_t *h, int get_from_pool) 915 { 916 CommandList_struct *c; 917 int i; 918 u64bit temp64; 919 dma_addr_t cmd_dma_handle, err_dma_handle; 920 921 - if (!get_from_pool) { 922 - c = (CommandList_struct *) pci_alloc_consistent(h->pdev, 923 - sizeof(CommandList_struct), &cmd_dma_handle); 924 - if (c == NULL) 925 return NULL; 926 - memset(c, 0, sizeof(CommandList_struct)); 927 928 - c->cmdindex = -1; 929 - 930 - c->err_info = (ErrorInfo_struct *) 931 - pci_alloc_consistent(h->pdev, sizeof(ErrorInfo_struct), 932 - &err_dma_handle); 933 - 934 - if (c->err_info == NULL) { 935 - pci_free_consistent(h->pdev, 936 - sizeof(CommandList_struct), c, cmd_dma_handle); 937 - return NULL; 938 - } 939 - memset(c->err_info, 0, sizeof(ErrorInfo_struct)); 940 - } else { /* get it out of the controllers pool */ 941 - 942 - do { 943 - i = find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds); 944 - if (i == h->nr_cmds) 945 - return NULL; 946 - } while (test_and_set_bit 947 - (i & (BITS_PER_LONG - 1), 948 - h->cmd_pool_bits + (i / BITS_PER_LONG)) != 0); 949 - #ifdef CCISS_DEBUG 950 - printk(KERN_DEBUG "cciss: using command buffer %d\n", i); 951 - #endif 952 - c = h->cmd_pool + i; 953 - memset(c, 0, sizeof(CommandList_struct)); 954 - cmd_dma_handle = h->cmd_pool_dhandle 955 - + i * sizeof(CommandList_struct); 956 - c->err_info = h->errinfo_pool + i; 957 - memset(c->err_info, 0, sizeof(ErrorInfo_struct)); 958 - err_dma_handle = h->errinfo_pool_dhandle 959 - + i * sizeof(ErrorInfo_struct); 960 - h->nr_allocs++; 961 - 962 - c->cmdindex = i; 963 - } 964 965 INIT_HLIST_NODE(&c->list); 966 c->busaddr = (__u32) cmd_dma_handle; ··· 944 return c; 945 } 946 947 - /* 948 - * Frees a command block that was previously allocated with cmd_alloc(). 949 */ 950 - static void cmd_free(ctlr_info_t *h, CommandList_struct *c, int got_from_pool) 951 { 952 int i; 953 u64bit temp64; 954 955 - if (!got_from_pool) { 956 - temp64.val32.lower = c->ErrDesc.Addr.lower; 957 - temp64.val32.upper = c->ErrDesc.Addr.upper; 958 - pci_free_consistent(h->pdev, sizeof(ErrorInfo_struct), 959 - c->err_info, (dma_addr_t) temp64.val); 960 - pci_free_consistent(h->pdev, sizeof(CommandList_struct), 961 - c, (dma_addr_t) c->busaddr); 962 - } else { 963 - i = c - h->cmd_pool; 964 - clear_bit(i & (BITS_PER_LONG - 1), 965 - h->cmd_pool_bits + (i / BITS_PER_LONG)); 966 - h->nr_frees++; 967 - } 968 } 969 970 static inline ctlr_info_t *get_host(struct gendisk *disk) ··· 1020 */ 1021 static int cciss_open(struct block_device *bdev, fmode_t mode) 1022 { 1023 - ctlr_info_t *host = get_host(bdev->bd_disk); 1024 drive_info_struct *drv = get_drv(bdev->bd_disk); 1025 1026 - #ifdef CCISS_DEBUG 1027 - printk(KERN_DEBUG "cciss_open %s\n", bdev->bd_disk->disk_name); 1028 - #endif /* CCISS_DEBUG */ 1029 - 1030 if (drv->busy_configuring) 1031 return -EBUSY; 1032 /* ··· 1049 return -EPERM; 1050 } 1051 drv->usage_count++; 1052 - host->usage_count++; 1053 return 0; 1054 } 1055 1056 /* ··· 1069 */ 1070 static int cciss_release(struct gendisk *disk, fmode_t mode) 1071 { 1072 - ctlr_info_t *host = get_host(disk); 1073 - drive_info_struct *drv = get_drv(disk); 1074 1075 - #ifdef CCISS_DEBUG 1076 - printk(KERN_DEBUG "cciss_release %s\n", disk->disk_name); 1077 - #endif /* CCISS_DEBUG */ 1078 - 1079 drv->usage_count--; 1080 - host->usage_count--; 1081 return 0; 1082 } 1083 - 1084 - #ifdef CONFIG_COMPAT 1085 1086 static int do_ioctl(struct block_device *bdev, fmode_t mode, 1087 unsigned cmd, unsigned long arg) ··· 1091 unlock_kernel(); 1092 return ret; 1093 } 1094 1095 static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode, 1096 unsigned cmd, unsigned long arg); ··· 1224 return 0; 1225 } 1226 1227 - static void check_ioctl_unit_attention(ctlr_info_t *host, CommandList_struct *c) 1228 { 1229 if (c->err_info->CommandStatus == CMD_TARGET_STATUS && 1230 c->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION) 1231 - (void)check_for_unit_attention(host, c); 1232 } 1233 /* 1234 * ioctl ··· 1237 unsigned int cmd, unsigned long arg) 1238 { 1239 struct gendisk *disk = bdev->bd_disk; 1240 - ctlr_info_t *host = get_host(disk); 1241 drive_info_struct *drv = get_drv(disk); 1242 - int ctlr = host->ctlr; 1243 void __user *argp = (void __user *)arg; 1244 1245 - #ifdef CCISS_DEBUG 1246 - printk(KERN_DEBUG "cciss_ioctl: Called with cmd=%x %lx\n", cmd, arg); 1247 - #endif /* CCISS_DEBUG */ 1248 - 1249 switch (cmd) { 1250 case CCISS_GETPCIINFO: 1251 { ··· 1250 1251 if (!arg) 1252 return -EINVAL; 1253 - pciinfo.domain = pci_domain_nr(host->pdev->bus); 1254 - pciinfo.bus = host->pdev->bus->number; 1255 - pciinfo.dev_fn = host->pdev->devfn; 1256 - pciinfo.board_id = host->board_id; 1257 if (copy_to_user 1258 (argp, &pciinfo, sizeof(cciss_pci_info_struct))) 1259 return -EFAULT; ··· 1265 if (!arg) 1266 return -EINVAL; 1267 intinfo.delay = 1268 - readl(&host->cfgtable->HostWrite.CoalIntDelay); 1269 intinfo.count = 1270 - readl(&host->cfgtable->HostWrite.CoalIntCount); 1271 if (copy_to_user 1272 (argp, &intinfo, sizeof(cciss_coalint_struct))) 1273 return -EFAULT; ··· 1287 (&intinfo, argp, sizeof(cciss_coalint_struct))) 1288 return -EFAULT; 1289 if ((intinfo.delay == 0) && (intinfo.count == 0)) 1290 - { 1291 - // printk("cciss_ioctl: delay and count cannot be 0\n"); 1292 return -EINVAL; 1293 - } 1294 - spin_lock_irqsave(CCISS_LOCK(ctlr), flags); 1295 /* Update the field, and then ring the doorbell */ 1296 writel(intinfo.delay, 1297 - &(host->cfgtable->HostWrite.CoalIntDelay)); 1298 writel(intinfo.count, 1299 - &(host->cfgtable->HostWrite.CoalIntCount)); 1300 - writel(CFGTBL_ChangeReq, host->vaddr + SA5_DOORBELL); 1301 1302 for (i = 0; i < MAX_IOCTL_CONFIG_WAIT; i++) { 1303 - if (!(readl(host->vaddr + SA5_DOORBELL) 1304 & CFGTBL_ChangeReq)) 1305 break; 1306 /* delay and try again */ 1307 udelay(1000); 1308 } 1309 - spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags); 1310 if (i >= MAX_IOCTL_CONFIG_WAIT) 1311 return -EAGAIN; 1312 return 0; ··· 1317 return -EINVAL; 1318 for (i = 0; i < 16; i++) 1319 NodeName[i] = 1320 - readb(&host->cfgtable->ServerName[i]); 1321 if (copy_to_user(argp, NodeName, sizeof(NodeName_type))) 1322 return -EFAULT; 1323 return 0; ··· 1337 (NodeName, argp, sizeof(NodeName_type))) 1338 return -EFAULT; 1339 1340 - spin_lock_irqsave(CCISS_LOCK(ctlr), flags); 1341 1342 /* Update the field, and then ring the doorbell */ 1343 for (i = 0; i < 16; i++) 1344 writeb(NodeName[i], 1345 - &host->cfgtable->ServerName[i]); 1346 1347 - writel(CFGTBL_ChangeReq, host->vaddr + SA5_DOORBELL); 1348 1349 for (i = 0; i < MAX_IOCTL_CONFIG_WAIT; i++) { 1350 - if (!(readl(host->vaddr + SA5_DOORBELL) 1351 & CFGTBL_ChangeReq)) 1352 break; 1353 /* delay and try again */ 1354 udelay(1000); 1355 } 1356 - spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags); 1357 if (i >= MAX_IOCTL_CONFIG_WAIT) 1358 return -EAGAIN; 1359 return 0; ··· 1365 1366 if (!arg) 1367 return -EINVAL; 1368 - heartbeat = readl(&host->cfgtable->HeartBeat); 1369 if (copy_to_user 1370 (argp, &heartbeat, sizeof(Heartbeat_type))) 1371 return -EFAULT; ··· 1377 1378 if (!arg) 1379 return -EINVAL; 1380 - BusTypes = readl(&host->cfgtable->BusTypes); 1381 if (copy_to_user 1382 (argp, &BusTypes, sizeof(BusTypes_type))) 1383 return -EFAULT; ··· 1389 1390 if (!arg) 1391 return -EINVAL; 1392 - memcpy(firmware, host->firm_ver, 4); 1393 1394 if (copy_to_user 1395 (argp, firmware, sizeof(FirmwareVer_type))) ··· 1412 case CCISS_DEREGDISK: 1413 case CCISS_REGNEWD: 1414 case CCISS_REVALIDVOLS: 1415 - return rebuild_lun_table(host, 0, 1); 1416 1417 case CCISS_GETLUNINFO:{ 1418 LogvolInfo_struct luninfo; ··· 1432 CommandList_struct *c; 1433 char *buff = NULL; 1434 u64bit temp64; 1435 - unsigned long flags; 1436 DECLARE_COMPLETION_ONSTACK(wait); 1437 1438 if (!arg) ··· 1467 } else { 1468 memset(buff, 0, iocommand.buf_size); 1469 } 1470 - if ((c = cmd_alloc(host, 0)) == NULL) { 1471 kfree(buff); 1472 return -ENOMEM; 1473 } ··· 1494 1495 /* Fill in the scatter gather information */ 1496 if (iocommand.buf_size > 0) { 1497 - temp64.val = pci_map_single(host->pdev, buff, 1498 iocommand.buf_size, 1499 PCI_DMA_BIDIRECTIONAL); 1500 c->SG[0].Addr.lower = temp64.val32.lower; ··· 1504 } 1505 c->waiting = &wait; 1506 1507 - /* Put the request on the tail of the request queue */ 1508 - spin_lock_irqsave(CCISS_LOCK(ctlr), flags); 1509 - addQ(&host->reqQ, c); 1510 - host->Qdepth++; 1511 - start_io(host); 1512 - spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags); 1513 - 1514 wait_for_completion(&wait); 1515 1516 /* unlock the buffers from DMA */ 1517 temp64.val32.lower = c->SG[0].Addr.lower; 1518 temp64.val32.upper = c->SG[0].Addr.upper; 1519 - pci_unmap_single(host->pdev, (dma_addr_t) temp64.val, 1520 iocommand.buf_size, 1521 PCI_DMA_BIDIRECTIONAL); 1522 1523 - check_ioctl_unit_attention(host, c); 1524 1525 /* Copy the error information out */ 1526 iocommand.error_info = *(c->err_info); 1527 if (copy_to_user 1528 (argp, &iocommand, sizeof(IOCTL_Command_struct))) { 1529 kfree(buff); 1530 - cmd_free(host, c, 0); 1531 return -EFAULT; 1532 } 1533 ··· 1530 if (copy_to_user 1531 (iocommand.buf, buff, iocommand.buf_size)) { 1532 kfree(buff); 1533 - cmd_free(host, c, 0); 1534 return -EFAULT; 1535 } 1536 } 1537 kfree(buff); 1538 - cmd_free(host, c, 0); 1539 return 0; 1540 } 1541 case CCISS_BIG_PASSTHRU:{ ··· 1544 unsigned char **buff = NULL; 1545 int *buff_size = NULL; 1546 u64bit temp64; 1547 - unsigned long flags; 1548 BYTE sg_used = 0; 1549 int status = 0; 1550 int i; ··· 1617 data_ptr += sz; 1618 sg_used++; 1619 } 1620 - if ((c = cmd_alloc(host, 0)) == NULL) { 1621 status = -ENOMEM; 1622 goto cleanup1; 1623 } ··· 1639 if (ioc->buf_size > 0) { 1640 for (i = 0; i < sg_used; i++) { 1641 temp64.val = 1642 - pci_map_single(host->pdev, buff[i], 1643 buff_size[i], 1644 PCI_DMA_BIDIRECTIONAL); 1645 c->SG[i].Addr.lower = ··· 1651 } 1652 } 1653 c->waiting = &wait; 1654 - /* Put the request on the tail of the request queue */ 1655 - spin_lock_irqsave(CCISS_LOCK(ctlr), flags); 1656 - addQ(&host->reqQ, c); 1657 - host->Qdepth++; 1658 - start_io(host); 1659 - spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags); 1660 wait_for_completion(&wait); 1661 /* unlock the buffers from DMA */ 1662 for (i = 0; i < sg_used; i++) { 1663 temp64.val32.lower = c->SG[i].Addr.lower; 1664 temp64.val32.upper = c->SG[i].Addr.upper; 1665 - pci_unmap_single(host->pdev, 1666 (dma_addr_t) temp64.val, buff_size[i], 1667 PCI_DMA_BIDIRECTIONAL); 1668 } 1669 - check_ioctl_unit_attention(host, c); 1670 /* Copy the error information out */ 1671 ioc->error_info = *(c->err_info); 1672 if (copy_to_user(argp, ioc, sizeof(*ioc))) { 1673 - cmd_free(host, c, 0); 1674 status = -EFAULT; 1675 goto cleanup1; 1676 } ··· 1675 for (i = 0; i < sg_used; i++) { 1676 if (copy_to_user 1677 (ptr, buff[i], buff_size[i])) { 1678 - cmd_free(host, c, 0); 1679 status = -EFAULT; 1680 goto cleanup1; 1681 } 1682 ptr += buff_size[i]; 1683 } 1684 } 1685 - cmd_free(host, c, 0); 1686 status = 0; 1687 cleanup1: 1688 if (buff) { ··· 1770 1771 static void cciss_softirq_done(struct request *rq) 1772 { 1773 - CommandList_struct *cmd = rq->completion_data; 1774 - ctlr_info_t *h = hba[cmd->ctlr]; 1775 - SGDescriptor_struct *curr_sg = cmd->SG; 1776 - unsigned long flags; 1777 u64bit temp64; 1778 int i, ddir; 1779 int sg_index = 0; 1780 1781 - if (cmd->Request.Type.Direction == XFER_READ) 1782 ddir = PCI_DMA_FROMDEVICE; 1783 else 1784 ddir = PCI_DMA_TODEVICE; 1785 1786 /* command did not need to be retried */ 1787 /* unmap the DMA mapping for all the scatter gather elements */ 1788 - for (i = 0; i < cmd->Header.SGList; i++) { 1789 if (curr_sg[sg_index].Ext == CCISS_SG_CHAIN) { 1790 - cciss_unmap_sg_chain_block(h, cmd); 1791 /* Point to the next block */ 1792 - curr_sg = h->cmd_sg_list[cmd->cmdindex]; 1793 sg_index = 0; 1794 } 1795 temp64.val32.lower = curr_sg[sg_index].Addr.lower; ··· 1799 ++sg_index; 1800 } 1801 1802 - #ifdef CCISS_DEBUG 1803 - printk("Done with %p\n", rq); 1804 - #endif /* CCISS_DEBUG */ 1805 1806 /* set the residual count for pc requests */ 1807 - if (blk_pc_request(rq)) 1808 - rq->resid_len = cmd->err_info->ResidualCnt; 1809 1810 blk_end_request_all(rq, (rq->errors == 0) ? 0 : -EIO); 1811 1812 spin_lock_irqsave(&h->lock, flags); 1813 - cmd_free(h, cmd, 1); 1814 cciss_check_queues(h); 1815 spin_unlock_irqrestore(&h->lock, flags); 1816 } ··· 1824 * via the inquiry page 0. Model, vendor, and rev are set to empty strings if 1825 * they cannot be read. 1826 */ 1827 - static void cciss_get_device_descr(int ctlr, int logvol, 1828 char *vendor, char *model, char *rev) 1829 { 1830 int rc; ··· 1839 if (!inq_buf) 1840 return; 1841 1842 - log_unit_to_scsi3addr(hba[ctlr], scsi3addr, logvol); 1843 - rc = sendcmd_withirq(CISS_INQUIRY, ctlr, inq_buf, sizeof(*inq_buf), 0, 1844 scsi3addr, TYPE_CMD); 1845 if (rc == IO_OK) { 1846 memcpy(vendor, &inq_buf->data_byte[8], VENDOR_LEN); ··· 1860 * number cannot be had, for whatever reason, 16 bytes of 0xff 1861 * are returned instead. 1862 */ 1863 - static void cciss_get_serial_no(int ctlr, int logvol, 1864 unsigned char *serial_no, int buflen) 1865 { 1866 #define PAGE_83_INQ_BYTES 64 ··· 1875 if (!buf) 1876 return; 1877 memset(serial_no, 0, buflen); 1878 - log_unit_to_scsi3addr(hba[ctlr], scsi3addr, logvol); 1879 - rc = sendcmd_withirq(CISS_INQUIRY, ctlr, buf, 1880 PAGE_83_INQ_BYTES, 0x83, scsi3addr, TYPE_CMD); 1881 if (rc == IO_OK) 1882 memcpy(serial_no, &buf[8], buflen); ··· 1942 * is also the controller node. Any changes to disk 0 will show up on 1943 * the next reboot. 1944 */ 1945 - static void cciss_update_drive_info(int ctlr, int drv_index, int first_time, 1946 - int via_ioctl) 1947 { 1948 - ctlr_info_t *h = hba[ctlr]; 1949 struct gendisk *disk; 1950 InquiryData_struct *inq_buff = NULL; 1951 unsigned int block_size; ··· 1961 1962 /* testing to see if 16-byte CDBs are already being used */ 1963 if (h->cciss_read == CCISS_READ_16) { 1964 - cciss_read_capacity_16(h->ctlr, drv_index, 1965 &total_size, &block_size); 1966 1967 } else { 1968 - cciss_read_capacity(ctlr, drv_index, &total_size, &block_size); 1969 /* if read_capacity returns all F's this volume is >2TB */ 1970 /* in size so we switch to 16-byte CDB's for all */ 1971 /* read/write ops */ 1972 if (total_size == 0xFFFFFFFFULL) { 1973 - cciss_read_capacity_16(ctlr, drv_index, 1974 &total_size, &block_size); 1975 h->cciss_read = CCISS_READ_16; 1976 h->cciss_write = CCISS_WRITE_16; ··· 1980 } 1981 } 1982 1983 - cciss_geometry_inquiry(ctlr, drv_index, total_size, block_size, 1984 inq_buff, drvinfo); 1985 drvinfo->block_size = block_size; 1986 drvinfo->nr_blocks = total_size + 1; 1987 1988 - cciss_get_device_descr(ctlr, drv_index, drvinfo->vendor, 1989 drvinfo->model, drvinfo->rev); 1990 - cciss_get_serial_no(ctlr, drv_index, drvinfo->serial_no, 1991 sizeof(drvinfo->serial_no)); 1992 /* Save the lunid in case we deregister the disk, below. */ 1993 memcpy(drvinfo->LunID, h->drv[drv_index]->LunID, ··· 2012 * (unless it's the first disk (for the controller node). 2013 */ 2014 if (h->drv[drv_index]->raid_level != -1 && drv_index != 0) { 2015 - printk(KERN_WARNING "disk %d has changed.\n", drv_index); 2016 - spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags); 2017 h->drv[drv_index]->busy_configuring = 1; 2018 - spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); 2019 2020 /* deregister_disk sets h->drv[drv_index]->queue = NULL 2021 * which keeps the interrupt handler from starting ··· 2065 if (cciss_add_disk(h, disk, drv_index) != 0) { 2066 cciss_free_gendisk(h, drv_index); 2067 cciss_free_drive_info(h, drv_index); 2068 - printk(KERN_WARNING "cciss:%d could not update " 2069 - "disk %d\n", h->ctlr, drv_index); 2070 --h->num_luns; 2071 } 2072 } ··· 2076 kfree(drvinfo); 2077 return; 2078 mem_msg: 2079 - printk(KERN_ERR "cciss: out of memory\n"); 2080 goto freeret; 2081 } 2082 ··· 2168 h->gendisk[drv_index] = 2169 alloc_disk(1 << NWD_SHIFT); 2170 if (!h->gendisk[drv_index]) { 2171 - printk(KERN_ERR "cciss%d: could not " 2172 - "allocate a new disk %d\n", 2173 - h->ctlr, drv_index); 2174 goto err_free_drive_info; 2175 } 2176 } ··· 2221 cciss_free_gendisk(h, drv_index); 2222 cciss_free_drive_info(h, drv_index); 2223 error: 2224 - printk(KERN_WARNING "cciss%d: could not " 2225 - "add disk 0.\n", h->ctlr); 2226 return; 2227 } 2228 ··· 2236 static int rebuild_lun_table(ctlr_info_t *h, int first_time, 2237 int via_ioctl) 2238 { 2239 - int ctlr = h->ctlr; 2240 int num_luns; 2241 ReportLunData_struct *ld_buff = NULL; 2242 int return_code; ··· 2250 return -EPERM; 2251 2252 /* Set busy_configuring flag for this operation */ 2253 - spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags); 2254 if (h->busy_configuring) { 2255 - spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); 2256 return -EBUSY; 2257 } 2258 h->busy_configuring = 1; 2259 - spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); 2260 2261 ld_buff = kzalloc(sizeof(ReportLunData_struct), GFP_KERNEL); 2262 if (ld_buff == NULL) 2263 goto mem_msg; 2264 2265 - return_code = sendcmd_withirq(CISS_REPORT_LOG, ctlr, ld_buff, 2266 sizeof(ReportLunData_struct), 2267 0, CTLR_LUNID, TYPE_CMD); 2268 2269 if (return_code == IO_OK) 2270 listlength = be32_to_cpu(*(__be32 *) ld_buff->LUNListLength); 2271 else { /* reading number of logical volumes failed */ 2272 - printk(KERN_WARNING "cciss: report logical volume" 2273 - " command failed\n"); 2274 listlength = 0; 2275 goto freeret; 2276 } ··· 2278 num_luns = listlength / 8; /* 8 bytes per entry */ 2279 if (num_luns > CISS_MAX_LUN) { 2280 num_luns = CISS_MAX_LUN; 2281 - printk(KERN_WARNING "cciss: more luns configured" 2282 " on controller than can be handled by" 2283 " this driver.\n"); 2284 } ··· 2309 } 2310 if (!drv_found) { 2311 /* Deregister it from the OS, it's gone. */ 2312 - spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags); 2313 h->drv[i]->busy_configuring = 1; 2314 - spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); 2315 return_code = deregister_disk(h, i, 1, via_ioctl); 2316 if (h->drv[i] != NULL) 2317 h->drv[i]->busy_configuring = 0; ··· 2350 if (drv_index == -1) 2351 goto freeret; 2352 } 2353 - cciss_update_drive_info(ctlr, drv_index, first_time, 2354 - via_ioctl); 2355 } /* end for */ 2356 2357 freeret: ··· 2362 */ 2363 return -1; 2364 mem_msg: 2365 - printk(KERN_ERR "cciss: out of memory\n"); 2366 h->busy_configuring = 0; 2367 goto freeret; 2368 } ··· 2482 return 0; 2483 } 2484 2485 - static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff, 2486 size_t size, __u8 page_code, unsigned char *scsi3addr, 2487 int cmd_type) 2488 { 2489 - ctlr_info_t *h = hba[ctlr]; 2490 u64bit buff_dma_handle; 2491 int status = IO_OK; 2492 ··· 2569 c->Request.Timeout = 0; 2570 break; 2571 default: 2572 - printk(KERN_WARNING 2573 - "cciss%d: Unknown Command 0x%c\n", ctlr, cmd); 2574 return IO_ERROR; 2575 } 2576 } else if (cmd_type == TYPE_MSG) { ··· 2601 c->Request.CDB[0] = cmd; 2602 break; 2603 default: 2604 - printk(KERN_WARNING 2605 - "cciss%d: unknown message type %d\n", ctlr, cmd); 2606 return IO_ERROR; 2607 } 2608 } else { 2609 - printk(KERN_WARNING 2610 - "cciss%d: unknown command type %d\n", ctlr, cmd_type); 2611 return IO_ERROR; 2612 } 2613 /* Fill in the scatter gather information */ ··· 2634 default: 2635 if (check_for_unit_attention(h, c)) 2636 return IO_NEEDS_RETRY; 2637 - printk(KERN_WARNING "cciss%d: cmd 0x%02x " 2638 "check condition, sense key = 0x%02x\n", 2639 - h->ctlr, c->Request.CDB[0], 2640 - c->err_info->SenseInfo[2]); 2641 } 2642 break; 2643 default: 2644 - printk(KERN_WARNING "cciss%d: cmd 0x%02x" 2645 - "scsi status = 0x%02x\n", h->ctlr, 2646 c->Request.CDB[0], c->err_info->ScsiStatus); 2647 break; 2648 } ··· 2664 /* expected for inquiry and report lun commands */ 2665 break; 2666 case CMD_INVALID: 2667 - printk(KERN_WARNING "cciss: cmd 0x%02x is " 2668 "reported invalid\n", c->Request.CDB[0]); 2669 return_status = IO_ERROR; 2670 break; 2671 case CMD_PROTOCOL_ERR: 2672 - printk(KERN_WARNING "cciss: cmd 0x%02x has " 2673 - "protocol error \n", c->Request.CDB[0]); 2674 return_status = IO_ERROR; 2675 break; 2676 case CMD_HARDWARE_ERR: 2677 - printk(KERN_WARNING "cciss: cmd 0x%02x had " 2678 " hardware error\n", c->Request.CDB[0]); 2679 return_status = IO_ERROR; 2680 break; 2681 case CMD_CONNECTION_LOST: 2682 - printk(KERN_WARNING "cciss: cmd 0x%02x had " 2683 "connection lost\n", c->Request.CDB[0]); 2684 return_status = IO_ERROR; 2685 break; 2686 case CMD_ABORTED: 2687 - printk(KERN_WARNING "cciss: cmd 0x%02x was " 2688 "aborted\n", c->Request.CDB[0]); 2689 return_status = IO_ERROR; 2690 break; 2691 case CMD_ABORT_FAILED: 2692 - printk(KERN_WARNING "cciss: cmd 0x%02x reports " 2693 "abort failed\n", c->Request.CDB[0]); 2694 return_status = IO_ERROR; 2695 break; 2696 case CMD_UNSOLICITED_ABORT: 2697 - printk(KERN_WARNING 2698 - "cciss%d: unsolicited abort 0x%02x\n", h->ctlr, 2699 c->Request.CDB[0]); 2700 return_status = IO_NEEDS_RETRY; 2701 break; 2702 default: 2703 - printk(KERN_WARNING "cciss: cmd 0x%02x returned " 2704 "unknown status %x\n", c->Request.CDB[0], 2705 c->err_info->CommandStatus); 2706 return_status = IO_ERROR; ··· 2712 { 2713 DECLARE_COMPLETION_ONSTACK(wait); 2714 u64bit buff_dma_handle; 2715 - unsigned long flags; 2716 int return_status = IO_OK; 2717 2718 resend_cmd2: 2719 c->waiting = &wait; 2720 - /* Put the request on the tail of the queue and send it */ 2721 - spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags); 2722 - addQ(&h->reqQ, c); 2723 - h->Qdepth++; 2724 - start_io(h); 2725 - spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); 2726 2727 wait_for_completion(&wait); 2728 ··· 2727 2728 if (return_status == IO_NEEDS_RETRY && 2729 c->retry_count < MAX_CMD_RETRIES) { 2730 - printk(KERN_WARNING "cciss%d: retrying 0x%02x\n", h->ctlr, 2731 c->Request.CDB[0]); 2732 c->retry_count++; 2733 /* erase the old error information */ ··· 2746 return return_status; 2747 } 2748 2749 - static int sendcmd_withirq(__u8 cmd, int ctlr, void *buff, size_t size, 2750 __u8 page_code, unsigned char scsi3addr[], 2751 int cmd_type) 2752 { 2753 - ctlr_info_t *h = hba[ctlr]; 2754 CommandList_struct *c; 2755 int return_status; 2756 2757 - c = cmd_alloc(h, 0); 2758 if (!c) 2759 return -ENOMEM; 2760 - return_status = fill_cmd(c, cmd, ctlr, buff, size, page_code, 2761 scsi3addr, cmd_type); 2762 if (return_status == IO_OK) 2763 return_status = sendcmd_withirq_core(h, c, 1); 2764 2765 - cmd_free(h, c, 0); 2766 return return_status; 2767 } 2768 2769 - static void cciss_geometry_inquiry(int ctlr, int logvol, 2770 sector_t total_size, 2771 unsigned int block_size, 2772 InquiryData_struct *inq_buff, ··· 2776 unsigned char scsi3addr[8]; 2777 2778 memset(inq_buff, 0, sizeof(InquiryData_struct)); 2779 - log_unit_to_scsi3addr(hba[ctlr], scsi3addr, logvol); 2780 - return_code = sendcmd_withirq(CISS_INQUIRY, ctlr, inq_buff, 2781 sizeof(*inq_buff), 0xC1, scsi3addr, TYPE_CMD); 2782 if (return_code == IO_OK) { 2783 if (inq_buff->data_byte[8] == 0xFF) { 2784 - printk(KERN_WARNING 2785 - "cciss: reading geometry failed, volume " 2786 "does not support reading geometry\n"); 2787 drv->heads = 255; 2788 drv->sectors = 32; /* Sectors per track */ ··· 2806 drv->cylinders = real_size; 2807 } 2808 } else { /* Get geometry failed */ 2809 - printk(KERN_WARNING "cciss: reading geometry failed\n"); 2810 } 2811 } 2812 2813 static void 2814 - cciss_read_capacity(int ctlr, int logvol, sector_t *total_size, 2815 unsigned int *block_size) 2816 { 2817 ReadCapdata_struct *buf; ··· 2820 2821 buf = kzalloc(sizeof(ReadCapdata_struct), GFP_KERNEL); 2822 if (!buf) { 2823 - printk(KERN_WARNING "cciss: out of memory\n"); 2824 return; 2825 } 2826 2827 - log_unit_to_scsi3addr(hba[ctlr], scsi3addr, logvol); 2828 - return_code = sendcmd_withirq(CCISS_READ_CAPACITY, ctlr, buf, 2829 sizeof(ReadCapdata_struct), 0, scsi3addr, TYPE_CMD); 2830 if (return_code == IO_OK) { 2831 *total_size = be32_to_cpu(*(__be32 *) buf->total_size); 2832 *block_size = be32_to_cpu(*(__be32 *) buf->block_size); 2833 } else { /* read capacity command failed */ 2834 - printk(KERN_WARNING "cciss: read capacity failed\n"); 2835 *total_size = 0; 2836 *block_size = BLOCK_SIZE; 2837 } 2838 kfree(buf); 2839 } 2840 2841 - static void cciss_read_capacity_16(int ctlr, int logvol, 2842 sector_t *total_size, unsigned int *block_size) 2843 { 2844 ReadCapdata_struct_16 *buf; ··· 2847 2848 buf = kzalloc(sizeof(ReadCapdata_struct_16), GFP_KERNEL); 2849 if (!buf) { 2850 - printk(KERN_WARNING "cciss: out of memory\n"); 2851 return; 2852 } 2853 2854 - log_unit_to_scsi3addr(hba[ctlr], scsi3addr, logvol); 2855 - return_code = sendcmd_withirq(CCISS_READ_CAPACITY_16, 2856 - ctlr, buf, sizeof(ReadCapdata_struct_16), 2857 0, scsi3addr, TYPE_CMD); 2858 if (return_code == IO_OK) { 2859 *total_size = be64_to_cpu(*(__be64 *) buf->total_size); 2860 *block_size = be32_to_cpu(*(__be32 *) buf->block_size); 2861 } else { /* read capacity command failed */ 2862 - printk(KERN_WARNING "cciss: read capacity failed\n"); 2863 *total_size = 0; 2864 *block_size = BLOCK_SIZE; 2865 } 2866 - printk(KERN_INFO " blocks= %llu block_size= %d\n", 2867 (unsigned long long)*total_size+1, *block_size); 2868 kfree(buf); 2869 } ··· 2891 2892 inq_buff = kmalloc(sizeof(InquiryData_struct), GFP_KERNEL); 2893 if (inq_buff == NULL) { 2894 - printk(KERN_WARNING "cciss: out of memory\n"); 2895 return 1; 2896 } 2897 if (h->cciss_read == CCISS_READ_10) { 2898 - cciss_read_capacity(h->ctlr, logvol, 2899 &total_size, &block_size); 2900 } else { 2901 - cciss_read_capacity_16(h->ctlr, logvol, 2902 &total_size, &block_size); 2903 } 2904 - cciss_geometry_inquiry(h->ctlr, logvol, total_size, block_size, 2905 inq_buff, drv); 2906 2907 blk_queue_logical_block_size(drv->queue, drv->block_size); ··· 2935 c = hlist_entry(h->reqQ.first, CommandList_struct, list); 2936 /* can't do anything if fifo is full */ 2937 if ((h->access.fifo_full(h))) { 2938 - printk(KERN_WARNING "cciss: fifo full\n"); 2939 break; 2940 } 2941 ··· 2951 } 2952 } 2953 2954 - /* Assumes that CCISS_LOCK(h->ctlr) is held. */ 2955 /* Zeros out the error record and then resends the command back */ 2956 /* to the controller */ 2957 static inline void resend_cciss_cmd(ctlr_info_t *h, CommandList_struct *c) ··· 2992 driver_byte = DRIVER_OK; 2993 msg_byte = cmd->err_info->CommandStatus; /* correct? seems too device specific */ 2994 2995 - if (blk_pc_request(cmd->rq)) 2996 host_byte = DID_PASSTHROUGH; 2997 else 2998 host_byte = DID_OK; ··· 3001 host_byte, driver_byte); 3002 3003 if (cmd->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION) { 3004 - if (!blk_pc_request(cmd->rq)) 3005 - printk(KERN_WARNING "cciss: cmd %p " 3006 "has SCSI Status 0x%x\n", 3007 cmd, cmd->err_info->ScsiStatus); 3008 return error_value; ··· 3011 /* check the sense key */ 3012 sense_key = 0xf & cmd->err_info->SenseInfo[2]; 3013 /* no status or recovered error */ 3014 - if (((sense_key == 0x0) || (sense_key == 0x1)) && !blk_pc_request(cmd->rq)) 3015 error_value = 0; 3016 3017 if (check_for_unit_attention(h, cmd)) { 3018 - *retry_cmd = !blk_pc_request(cmd->rq); 3019 return 0; 3020 } 3021 3022 - if (!blk_pc_request(cmd->rq)) { /* Not SG_IO or similar? */ 3023 if (error_value != 0) 3024 - printk(KERN_WARNING "cciss: cmd %p has CHECK CONDITION" 3025 " sense key = 0x%x\n", cmd, sense_key); 3026 return error_value; 3027 } ··· 3063 rq->errors = evaluate_target_status(h, cmd, &retry_cmd); 3064 break; 3065 case CMD_DATA_UNDERRUN: 3066 - if (blk_fs_request(cmd->rq)) { 3067 - printk(KERN_WARNING "cciss: cmd %p has" 3068 " completed with data underrun " 3069 "reported\n", cmd); 3070 cmd->rq->resid_len = cmd->err_info->ResidualCnt; 3071 } 3072 break; 3073 case CMD_DATA_OVERRUN: 3074 - if (blk_fs_request(cmd->rq)) 3075 - printk(KERN_WARNING "cciss: cmd %p has" 3076 " completed with data overrun " 3077 "reported\n", cmd); 3078 break; 3079 case CMD_INVALID: 3080 - printk(KERN_WARNING "cciss: cmd %p is " 3081 "reported invalid\n", cmd); 3082 rq->errors = make_status_bytes(SAM_STAT_GOOD, 3083 cmd->err_info->CommandStatus, DRIVER_OK, 3084 - blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR); 3085 break; 3086 case CMD_PROTOCOL_ERR: 3087 - printk(KERN_WARNING "cciss: cmd %p has " 3088 - "protocol error \n", cmd); 3089 rq->errors = make_status_bytes(SAM_STAT_GOOD, 3090 cmd->err_info->CommandStatus, DRIVER_OK, 3091 - blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR); 3092 break; 3093 case CMD_HARDWARE_ERR: 3094 - printk(KERN_WARNING "cciss: cmd %p had " 3095 " hardware error\n", cmd); 3096 rq->errors = make_status_bytes(SAM_STAT_GOOD, 3097 cmd->err_info->CommandStatus, DRIVER_OK, 3098 - blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR); 3099 break; 3100 case CMD_CONNECTION_LOST: 3101 - printk(KERN_WARNING "cciss: cmd %p had " 3102 "connection lost\n", cmd); 3103 rq->errors = make_status_bytes(SAM_STAT_GOOD, 3104 cmd->err_info->CommandStatus, DRIVER_OK, 3105 - blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR); 3106 break; 3107 case CMD_ABORTED: 3108 - printk(KERN_WARNING "cciss: cmd %p was " 3109 "aborted\n", cmd); 3110 rq->errors = make_status_bytes(SAM_STAT_GOOD, 3111 cmd->err_info->CommandStatus, DRIVER_OK, 3112 - blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ABORT); 3113 break; 3114 case CMD_ABORT_FAILED: 3115 - printk(KERN_WARNING "cciss: cmd %p reports " 3116 "abort failed\n", cmd); 3117 rq->errors = make_status_bytes(SAM_STAT_GOOD, 3118 cmd->err_info->CommandStatus, DRIVER_OK, 3119 - blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR); 3120 break; 3121 case CMD_UNSOLICITED_ABORT: 3122 - printk(KERN_WARNING "cciss%d: unsolicited " 3123 "abort %p\n", h->ctlr, cmd); 3124 if (cmd->retry_count < MAX_CMD_RETRIES) { 3125 retry_cmd = 1; 3126 - printk(KERN_WARNING 3127 - "cciss%d: retrying %p\n", h->ctlr, cmd); 3128 cmd->retry_count++; 3129 } else 3130 - printk(KERN_WARNING 3131 - "cciss%d: %p retried too " 3132 - "many times\n", h->ctlr, cmd); 3133 rq->errors = make_status_bytes(SAM_STAT_GOOD, 3134 cmd->err_info->CommandStatus, DRIVER_OK, 3135 - blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ABORT); 3136 break; 3137 case CMD_TIMEOUT: 3138 - printk(KERN_WARNING "cciss: cmd %p timedout\n", cmd); 3139 rq->errors = make_status_bytes(SAM_STAT_GOOD, 3140 cmd->err_info->CommandStatus, DRIVER_OK, 3141 - blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR); 3142 break; 3143 default: 3144 - printk(KERN_WARNING "cciss: cmd %p returned " 3145 "unknown status %x\n", cmd, 3146 cmd->err_info->CommandStatus); 3147 rq->errors = make_status_bytes(SAM_STAT_GOOD, 3148 cmd->err_info->CommandStatus, DRIVER_OK, 3149 - blk_pc_request(cmd->rq) ? DID_PASSTHROUGH : DID_ERROR); 3150 } 3151 3152 after_error_processing: ··· 3165 } 3166 cmd->rq->completion_data = cmd; 3167 blk_complete_request(cmd->rq); 3168 } 3169 3170 /* ··· 3226 3227 BUG_ON(creq->nr_phys_segments > h->maxsgentries); 3228 3229 - if ((c = cmd_alloc(h, 1)) == NULL) 3230 goto full; 3231 3232 blk_start_request(creq); ··· 3244 /* got command from pool, so use the command block index instead */ 3245 /* for direct lookups. */ 3246 /* The first 2 bits are reserved for controller error reporting. */ 3247 - c->Header.Tag.lower = (c->cmdindex << 3); 3248 - c->Header.Tag.lower |= 0x04; /* flag for direct lookup. */ 3249 memcpy(&c->Header.LUN, drv->LunID, sizeof(drv->LunID)); 3250 c->Request.CDBLen = 10; /* 12 byte commands not in FW yet; */ 3251 c->Request.Type.Type = TYPE_CMD; /* It is a command. */ ··· 3256 c->Request.CDB[0] = 3257 (rq_data_dir(creq) == READ) ? h->cciss_read : h->cciss_write; 3258 start_blk = blk_rq_pos(creq); 3259 - #ifdef CCISS_DEBUG 3260 - printk(KERN_DEBUG "ciss: sector =%d nr_sectors=%d\n", 3261 (int)blk_rq_pos(creq), (int)blk_rq_sectors(creq)); 3262 - #endif /* CCISS_DEBUG */ 3263 - 3264 sg_init_table(tmp_sg, h->maxsgentries); 3265 seg = blk_rq_map_sg(q, creq, tmp_sg); 3266 ··· 3297 if (seg > h->maxSG) 3298 h->maxSG = seg; 3299 3300 - #ifdef CCISS_DEBUG 3301 - printk(KERN_DEBUG "cciss: Submitting %ld sectors in %d segments " 3302 "chained[%d]\n", 3303 blk_rq_sectors(creq), seg, chained); 3304 - #endif /* CCISS_DEBUG */ 3305 3306 - c->Header.SGList = c->Header.SGTotal = seg + chained; 3307 - if (seg > h->max_cmd_sgentries) 3308 c->Header.SGList = h->max_cmd_sgentries; 3309 3310 - if (likely(blk_fs_request(creq))) { 3311 if(h->cciss_read == CCISS_READ_10) { 3312 c->Request.CDB[1] = 0; 3313 c->Request.CDB[2] = (start_blk >> 24) & 0xff; /* MSB */ ··· 3338 c->Request.CDB[13]= blk_rq_sectors(creq) & 0xff; 3339 c->Request.CDB[14] = c->Request.CDB[15] = 0; 3340 } 3341 - } else if (blk_pc_request(creq)) { 3342 c->Request.CDBLen = creq->cmd_len; 3343 memcpy(c->Request.CDB, creq->cmd, BLK_MAX_CDB); 3344 } else { 3345 - printk(KERN_WARNING "cciss%d: bad request type %d\n", h->ctlr, creq->cmd_type); 3346 BUG(); 3347 } 3348 ··· 3376 3377 static inline long interrupt_not_for_us(ctlr_info_t *h) 3378 { 3379 - return (((h->access.intr_pending(h) == 0) || 3380 - (h->interrupts_enabled == 0))); 3381 } 3382 3383 - static irqreturn_t do_cciss_intr(int irq, void *dev_id) 3384 { 3385 ctlr_info_t *h = dev_id; 3386 - CommandList_struct *c; 3387 unsigned long flags; 3388 - __u32 a, a1, a2; 3389 3390 if (interrupt_not_for_us(h)) 3391 return IRQ_NONE; 3392 - /* 3393 - * If there are completed commands in the completion queue, 3394 - * we had better do something about it. 3395 - */ 3396 - spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags); 3397 while (interrupt_pending(h)) { 3398 - while ((a = get_next_completion(h)) != FIFO_EMPTY) { 3399 - a1 = a; 3400 - if ((a & 0x04)) { 3401 - a2 = (a >> 3); 3402 - if (a2 >= h->nr_cmds) { 3403 - printk(KERN_WARNING 3404 - "cciss: controller cciss%d failed, stopping.\n", 3405 - h->ctlr); 3406 - spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); 3407 - fail_all_cmds(h->ctlr); 3408 - return IRQ_HANDLED; 3409 - } 3410 - 3411 - c = h->cmd_pool + a2; 3412 - a = c->busaddr; 3413 - 3414 - } else { 3415 - struct hlist_node *tmp; 3416 - 3417 - a &= ~3; 3418 - c = NULL; 3419 - hlist_for_each_entry(c, tmp, &h->cmpQ, list) { 3420 - if (c->busaddr == a) 3421 - break; 3422 - } 3423 - } 3424 - /* 3425 - * If we've found the command, take it off the 3426 - * completion Q and free it 3427 - */ 3428 - if (c && c->busaddr == a) { 3429 - removeQ(c); 3430 - if (c->cmd_type == CMD_RWREQ) { 3431 - complete_command(h, c, 0); 3432 - } else if (c->cmd_type == CMD_IOCTL_PEND) { 3433 - complete(c->waiting); 3434 - } 3435 - # ifdef CONFIG_CISS_SCSI_TAPE 3436 - else if (c->cmd_type == CMD_SCSI) 3437 - complete_scsi_command(c, 0, a1); 3438 - # endif 3439 - continue; 3440 - } 3441 } 3442 } 3443 3444 - spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); 3445 return IRQ_HANDLED; 3446 } 3447 ··· 3632 3633 switch (c->err_info->SenseInfo[12]) { 3634 case STATE_CHANGED: 3635 - printk(KERN_WARNING "cciss%d: a state change " 3636 - "detected, command retried\n", h->ctlr); 3637 return 1; 3638 break; 3639 case LUN_FAILED: 3640 - printk(KERN_WARNING "cciss%d: LUN failure " 3641 - "detected, action required\n", h->ctlr); 3642 return 1; 3643 break; 3644 case REPORT_LUNS_CHANGED: 3645 - printk(KERN_WARNING "cciss%d: report LUN data " 3646 - "changed\n", h->ctlr); 3647 /* 3648 * Here, we could call add_to_scan_list and wake up the scan thread, 3649 * except that it's quite likely that we will get more than one ··· 3662 return 1; 3663 break; 3664 case POWER_OR_RESET: 3665 - printk(KERN_WARNING "cciss%d: a power on " 3666 - "or device reset detected\n", h->ctlr); 3667 return 1; 3668 break; 3669 case UNIT_ATTENTION_CLEARED: 3670 - printk(KERN_WARNING "cciss%d: unit attention " 3671 - "cleared by another initiator\n", h->ctlr); 3672 return 1; 3673 break; 3674 default: 3675 - printk(KERN_WARNING "cciss%d: unknown " 3676 - "unit attention detected\n", h->ctlr); 3677 - return 1; 3678 } 3679 } 3680 ··· 3682 * the io functions. 3683 * This is for debug only. 3684 */ 3685 - #ifdef CCISS_DEBUG 3686 - static void print_cfg_table(CfgTable_struct *tb) 3687 { 3688 int i; 3689 char temp_name[17]; 3690 3691 - printk("Controller Configuration information\n"); 3692 - printk("------------------------------------\n"); 3693 for (i = 0; i < 4; i++) 3694 temp_name[i] = readb(&(tb->Signature[i])); 3695 temp_name[4] = '\0'; 3696 - printk(" Signature = %s\n", temp_name); 3697 - printk(" Spec Number = %d\n", readl(&(tb->SpecValence))); 3698 - printk(" Transport methods supported = 0x%x\n", 3699 readl(&(tb->TransportSupport))); 3700 - printk(" Transport methods active = 0x%x\n", 3701 readl(&(tb->TransportActive))); 3702 - printk(" Requested transport Method = 0x%x\n", 3703 readl(&(tb->HostWrite.TransportRequest))); 3704 - printk(" Coalesce Interrupt Delay = 0x%x\n", 3705 readl(&(tb->HostWrite.CoalIntDelay))); 3706 - printk(" Coalesce Interrupt Count = 0x%x\n", 3707 readl(&(tb->HostWrite.CoalIntCount))); 3708 - printk(" Max outstanding commands = 0x%d\n", 3709 readl(&(tb->CmdsOutMax))); 3710 - printk(" Bus Types = 0x%x\n", readl(&(tb->BusTypes))); 3711 for (i = 0; i < 16; i++) 3712 temp_name[i] = readb(&(tb->ServerName[i])); 3713 temp_name[16] = '\0'; 3714 - printk(" Server Name = %s\n", temp_name); 3715 - printk(" Heartbeat Counter = 0x%x\n\n\n", readl(&(tb->HeartBeat))); 3716 } 3717 - #endif /* CCISS_DEBUG */ 3718 3719 static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr) 3720 { ··· 3740 offset += 8; 3741 break; 3742 default: /* reserved in PCI 2.2 */ 3743 - printk(KERN_WARNING 3744 "Base address is invalid\n"); 3745 return -1; 3746 break; ··· 3752 return -1; 3753 } 3754 3755 /* If MSI/MSI-X is supported by the kernel we will try to enable it on 3756 * controllers that are capable. If not, we use IO-APIC mode. 3757 */ 3758 3759 - static void __devinit cciss_interrupt_mode(ctlr_info_t *c, 3760 - struct pci_dev *pdev, __u32 board_id) 3761 { 3762 #ifdef CONFIG_PCI_MSI 3763 int err; ··· 3936 }; 3937 3938 /* Some boards advertise MSI but don't really support it */ 3939 - if ((board_id == 0x40700E11) || 3940 - (board_id == 0x40800E11) || 3941 - (board_id == 0x40820E11) || (board_id == 0x40830E11)) 3942 goto default_int_mode; 3943 3944 - if (pci_find_capability(pdev, PCI_CAP_ID_MSIX)) { 3945 - err = pci_enable_msix(pdev, cciss_msix_entries, 4); 3946 if (!err) { 3947 - c->intr[0] = cciss_msix_entries[0].vector; 3948 - c->intr[1] = cciss_msix_entries[1].vector; 3949 - c->intr[2] = cciss_msix_entries[2].vector; 3950 - c->intr[3] = cciss_msix_entries[3].vector; 3951 - c->msix_vector = 1; 3952 return; 3953 } 3954 if (err > 0) { 3955 - printk(KERN_WARNING "cciss: only %d MSI-X vectors " 3956 - "available\n", err); 3957 goto default_int_mode; 3958 } else { 3959 - printk(KERN_WARNING "cciss: MSI-X init failed %d\n", 3960 - err); 3961 goto default_int_mode; 3962 } 3963 } 3964 - if (pci_find_capability(pdev, PCI_CAP_ID_MSI)) { 3965 - if (!pci_enable_msi(pdev)) { 3966 - c->msi_vector = 1; 3967 - } else { 3968 - printk(KERN_WARNING "cciss: MSI init failed\n"); 3969 - } 3970 } 3971 default_int_mode: 3972 #endif /* CONFIG_PCI_MSI */ 3973 /* if we get here we're going to use the default interrupt mode */ 3974 - c->intr[SIMPLE_MODE_INT] = pdev->irq; 3975 return; 3976 } 3977 3978 - static int __devinit cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev) 3979 { 3980 - ushort subsystem_vendor_id, subsystem_device_id, command; 3981 - __u32 board_id, scratchpad = 0; 3982 - __u64 cfg_offset; 3983 - __u32 cfg_base_addr; 3984 - __u64 cfg_base_addr_index; 3985 - int i, prod_index, err; 3986 3987 subsystem_vendor_id = pdev->subsystem_vendor; 3988 subsystem_device_id = pdev->subsystem_device; 3989 - board_id = (((__u32) (subsystem_device_id << 16) & 0xffff0000) | 3990 - subsystem_vendor_id); 3991 3992 for (i = 0; i < ARRAY_SIZE(products); i++) { 3993 /* Stand aside for hpsa driver on request */ 3994 if (cciss_allow_hpsa && products[i].board_id == HPSA_BOUNDARY) 3995 return -ENODEV; 3996 - if (board_id == products[i].board_id) 3997 - break; 3998 } 3999 - prod_index = i; 4000 - if (prod_index == ARRAY_SIZE(products)) { 4001 - dev_warn(&pdev->dev, 4002 - "unrecognized board ID: 0x%08lx, ignoring.\n", 4003 - (unsigned long) board_id); 4004 return -ENODEV; 4005 } 4006 4007 - /* check to see if controller has been disabled */ 4008 - /* BEFORE trying to enable it */ 4009 - (void)pci_read_config_word(pdev, PCI_COMMAND, &command); 4010 - if (!(command & 0x02)) { 4011 - printk(KERN_WARNING 4012 - "cciss: controller appears to be disabled\n"); 4013 return -ENODEV; 4014 } 4015 - 4016 - err = pci_enable_device(pdev); 4017 if (err) { 4018 - printk(KERN_ERR "cciss: Unable to Enable PCI device\n"); 4019 return err; 4020 } 4021 4022 - err = pci_request_regions(pdev, "cciss"); 4023 if (err) { 4024 - printk(KERN_ERR "cciss: Cannot obtain PCI resources, " 4025 - "aborting\n"); 4026 return err; 4027 } 4028 4029 - #ifdef CCISS_DEBUG 4030 - printk("command = %x\n", command); 4031 - printk("irq = %x\n", pdev->irq); 4032 - printk("board_id = %x\n", board_id); 4033 - #endif /* CCISS_DEBUG */ 4034 4035 /* If the kernel supports MSI/MSI-X we will try to enable that functionality, 4036 * else we use the IO-APIC interrupt assigned to us by system ROM. 4037 */ 4038 - cciss_interrupt_mode(c, pdev, board_id); 4039 - 4040 - /* find the memory BAR */ 4041 - for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { 4042 - if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) 4043 - break; 4044 } 4045 - if (i == DEVICE_COUNT_RESOURCE) { 4046 - printk(KERN_WARNING "cciss: No memory BAR found\n"); 4047 err = -ENODEV; 4048 goto err_out_free_res; 4049 } 4050 - 4051 - c->paddr = pci_resource_start(pdev, i); /* addressing mode bits 4052 - * already removed 4053 - */ 4054 - 4055 - #ifdef CCISS_DEBUG 4056 - printk("address 0 = %lx\n", c->paddr); 4057 - #endif /* CCISS_DEBUG */ 4058 - c->vaddr = remap_pci_mem(c->paddr, 0x250); 4059 - 4060 - /* Wait for the board to become ready. (PCI hotplug needs this.) 4061 - * We poll for up to 120 secs, once per 100ms. */ 4062 - for (i = 0; i < 1200; i++) { 4063 - scratchpad = readl(c->vaddr + SA5_SCRATCHPAD_OFFSET); 4064 - if (scratchpad == CCISS_FIRMWARE_READY) 4065 - break; 4066 - set_current_state(TASK_INTERRUPTIBLE); 4067 - schedule_timeout(msecs_to_jiffies(100)); /* wait 100ms */ 4068 - } 4069 - if (scratchpad != CCISS_FIRMWARE_READY) { 4070 - printk(KERN_WARNING "cciss: Board not ready. Timed out.\n"); 4071 - err = -ENODEV; 4072 - goto err_out_free_res; 4073 - } 4074 - 4075 - /* get the address index number */ 4076 - cfg_base_addr = readl(c->vaddr + SA5_CTCFG_OFFSET); 4077 - cfg_base_addr &= (__u32) 0x0000ffff; 4078 - #ifdef CCISS_DEBUG 4079 - printk("cfg base address = %x\n", cfg_base_addr); 4080 - #endif /* CCISS_DEBUG */ 4081 - cfg_base_addr_index = find_PCI_BAR_index(pdev, cfg_base_addr); 4082 - #ifdef CCISS_DEBUG 4083 - printk("cfg base address index = %llx\n", 4084 - (unsigned long long)cfg_base_addr_index); 4085 - #endif /* CCISS_DEBUG */ 4086 - if (cfg_base_addr_index == -1) { 4087 - printk(KERN_WARNING "cciss: Cannot find cfg_base_addr_index\n"); 4088 - err = -ENODEV; 4089 - goto err_out_free_res; 4090 - } 4091 - 4092 - cfg_offset = readl(c->vaddr + SA5_CTMEM_OFFSET); 4093 - #ifdef CCISS_DEBUG 4094 - printk("cfg offset = %llx\n", (unsigned long long)cfg_offset); 4095 - #endif /* CCISS_DEBUG */ 4096 - c->cfgtable = remap_pci_mem(pci_resource_start(pdev, 4097 - cfg_base_addr_index) + 4098 - cfg_offset, sizeof(CfgTable_struct)); 4099 - c->board_id = board_id; 4100 - 4101 - #ifdef CCISS_DEBUG 4102 - print_cfg_table(c->cfgtable); 4103 - #endif /* CCISS_DEBUG */ 4104 - 4105 - /* Some controllers support Zero Memory Raid (ZMR). 4106 - * When configured in ZMR mode the number of supported 4107 - * commands drops to 64. So instead of just setting an 4108 - * arbitrary value we make the driver a little smarter. 4109 - * We read the config table to tell us how many commands 4110 - * are supported on the controller then subtract 4 to 4111 - * leave a little room for ioctl calls. 4112 - */ 4113 - c->max_commands = readl(&(c->cfgtable->CmdsOutMax)); 4114 - c->maxsgentries = readl(&(c->cfgtable->MaxSGElements)); 4115 - 4116 - /* 4117 - * Limit native command to 32 s/g elements to save dma'able memory. 4118 - * Howvever spec says if 0, use 31 4119 - */ 4120 - 4121 - c->max_cmd_sgentries = 31; 4122 - if (c->maxsgentries > 512) { 4123 - c->max_cmd_sgentries = 32; 4124 - c->chainsize = c->maxsgentries - c->max_cmd_sgentries + 1; 4125 - c->maxsgentries -= 1; /* account for chain pointer */ 4126 - } else { 4127 - c->maxsgentries = 31; /* Default to traditional value */ 4128 - c->chainsize = 0; /* traditional */ 4129 - } 4130 - 4131 - c->product_name = products[prod_index].product_name; 4132 - c->access = *(products[prod_index].access); 4133 - c->nr_cmds = c->max_commands - 4; 4134 - if ((readb(&c->cfgtable->Signature[0]) != 'C') || 4135 - (readb(&c->cfgtable->Signature[1]) != 'I') || 4136 - (readb(&c->cfgtable->Signature[2]) != 'S') || 4137 - (readb(&c->cfgtable->Signature[3]) != 'S')) { 4138 - printk("Does not appear to be a valid CISS config table\n"); 4139 - err = -ENODEV; 4140 - goto err_out_free_res; 4141 - } 4142 - #ifdef CONFIG_X86 4143 - { 4144 - /* Need to enable prefetch in the SCSI core for 6400 in x86 */ 4145 - __u32 prefetch; 4146 - prefetch = readl(&(c->cfgtable->SCSI_Prefetch)); 4147 - prefetch |= 0x100; 4148 - writel(prefetch, &(c->cfgtable->SCSI_Prefetch)); 4149 - } 4150 - #endif 4151 - 4152 - /* Disabling DMA prefetch and refetch for the P600. 4153 - * An ASIC bug may result in accesses to invalid memory addresses. 4154 - * We've disabled prefetch for some time now. Testing with XEN 4155 - * kernels revealed a bug in the refetch if dom0 resides on a P600. 4156 - */ 4157 - if(board_id == 0x3225103C) { 4158 - __u32 dma_prefetch; 4159 - __u32 dma_refetch; 4160 - dma_prefetch = readl(c->vaddr + I2O_DMA1_CFG); 4161 - dma_prefetch |= 0x8000; 4162 - writel(dma_prefetch, c->vaddr + I2O_DMA1_CFG); 4163 - pci_read_config_dword(pdev, PCI_COMMAND_PARITY, &dma_refetch); 4164 - dma_refetch |= 0x1; 4165 - pci_write_config_dword(pdev, PCI_COMMAND_PARITY, dma_refetch); 4166 - } 4167 - 4168 - #ifdef CCISS_DEBUG 4169 - printk("Trying to put board into Simple mode\n"); 4170 - #endif /* CCISS_DEBUG */ 4171 - c->max_commands = readl(&(c->cfgtable->CmdsOutMax)); 4172 - /* Update the field, and then ring the doorbell */ 4173 - writel(CFGTBL_Trans_Simple, &(c->cfgtable->HostWrite.TransportRequest)); 4174 - writel(CFGTBL_ChangeReq, c->vaddr + SA5_DOORBELL); 4175 - 4176 - /* under certain very rare conditions, this can take awhile. 4177 - * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right 4178 - * as we enter this code.) */ 4179 - for (i = 0; i < MAX_CONFIG_WAIT; i++) { 4180 - if (!(readl(c->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq)) 4181 - break; 4182 - /* delay and try again */ 4183 - set_current_state(TASK_INTERRUPTIBLE); 4184 - schedule_timeout(msecs_to_jiffies(1)); 4185 - } 4186 - 4187 - #ifdef CCISS_DEBUG 4188 - printk(KERN_DEBUG "I counter got to %d %x\n", i, 4189 - readl(c->vaddr + SA5_DOORBELL)); 4190 - #endif /* CCISS_DEBUG */ 4191 - #ifdef CCISS_DEBUG 4192 - print_cfg_table(c->cfgtable); 4193 - #endif /* CCISS_DEBUG */ 4194 - 4195 - if (!(readl(&(c->cfgtable->TransportActive)) & CFGTBL_Trans_Simple)) { 4196 - printk(KERN_WARNING "cciss: unable to get board into" 4197 - " simple mode\n"); 4198 - err = -ENODEV; 4199 - goto err_out_free_res; 4200 - } 4201 return 0; 4202 4203 err_out_free_res: ··· 4220 * Deliberately omit pci_disable_device(): it does something nasty to 4221 * Smart Array controllers that pci_enable_device does not undo 4222 */ 4223 - pci_release_regions(pdev); 4224 return err; 4225 } 4226 4227 /* Function to find the first free pointer into our hba[] array 4228 * Returns -1 if no free entries are left. 4229 */ 4230 - static int alloc_cciss_hba(void) 4231 { 4232 int i; 4233 4234 for (i = 0; i < MAX_CTLR; i++) { 4235 if (!hba[i]) { 4236 - ctlr_info_t *p; 4237 4238 - p = kzalloc(sizeof(ctlr_info_t), GFP_KERNEL); 4239 - if (!p) 4240 goto Enomem; 4241 - hba[i] = p; 4242 return i; 4243 } 4244 } 4245 - printk(KERN_WARNING "cciss: This driver supports a maximum" 4246 " of %d controllers.\n", MAX_CTLR); 4247 return -1; 4248 Enomem: 4249 - printk(KERN_ERR "cciss: out of memory.\n"); 4250 return -1; 4251 } 4252 4253 - static void free_hba(int n) 4254 { 4255 - ctlr_info_t *h = hba[n]; 4256 int i; 4257 4258 - hba[n] = NULL; 4259 for (i = 0; i < h->highest_lun + 1; i++) 4260 if (h->gendisk[i] != NULL) 4261 put_disk(h->gendisk[i]); ··· 4340 /* we leak the DMA buffer here ... no choice since the controller could 4341 still complete the command. */ 4342 if (i == 10) { 4343 - printk(KERN_ERR "cciss: controller message %02x:%02x timed out\n", 4344 opcode, type); 4345 return -ETIMEDOUT; 4346 } ··· 4349 pci_free_consistent(pdev, cmd_sz, cmd, paddr64); 4350 4351 if (tag & 2) { 4352 - printk(KERN_ERR "cciss: controller message %02x:%02x failed\n", 4353 opcode, type); 4354 return -EIO; 4355 } 4356 4357 - printk(KERN_INFO "cciss: controller message %02x:%02x succeeded\n", 4358 opcode, type); 4359 return 0; 4360 } ··· 4375 if (pos) { 4376 pci_read_config_word(pdev, msi_control_reg(pos), &control); 4377 if (control & PCI_MSI_FLAGS_ENABLE) { 4378 - printk(KERN_INFO "cciss: resetting MSI\n"); 4379 pci_write_config_word(pdev, msi_control_reg(pos), control & ~PCI_MSI_FLAGS_ENABLE); 4380 } 4381 } ··· 4384 if (pos) { 4385 pci_read_config_word(pdev, msi_control_reg(pos), &control); 4386 if (control & PCI_MSIX_FLAGS_ENABLE) { 4387 - printk(KERN_INFO "cciss: resetting MSI-X\n"); 4388 pci_write_config_word(pdev, msi_control_reg(pos), control & ~PCI_MSIX_FLAGS_ENABLE); 4389 } 4390 } ··· 4392 return 0; 4393 } 4394 4395 - /* This does a hard reset of the controller using PCI power management 4396 - * states. */ 4397 - static __devinit int cciss_hard_reset_controller(struct pci_dev *pdev) 4398 { 4399 - u16 pmcsr, saved_config_space[32]; 4400 - int i, pos; 4401 4402 - printk(KERN_INFO "cciss: using PCI PM to reset controller\n"); 4403 4404 - /* This is very nearly the same thing as 4405 4406 - pci_save_state(pci_dev); 4407 - pci_set_power_state(pci_dev, PCI_D3hot); 4408 - pci_set_power_state(pci_dev, PCI_D0); 4409 - pci_restore_state(pci_dev); 4410 4411 - but we can't use these nice canned kernel routines on 4412 - kexec, because they also check the MSI/MSI-X state in PCI 4413 - configuration space and do the wrong thing when it is 4414 - set/cleared. Also, the pci_save/restore_state functions 4415 - violate the ordering requirements for restoring the 4416 - configuration space from the CCISS document (see the 4417 - comment below). So we roll our own .... */ 4418 4419 for (i = 0; i < 32; i++) 4420 pci_read_config_word(pdev, 2*i, &saved_config_space[i]); 4421 4422 - pos = pci_find_capability(pdev, PCI_CAP_ID_PM); 4423 - if (pos == 0) { 4424 - printk(KERN_ERR "cciss_reset_controller: PCI PM not supported\n"); 4425 - return -ENODEV; 4426 } 4427 4428 - /* Quoting from the Open CISS Specification: "The Power 4429 - * Management Control/Status Register (CSR) controls the power 4430 - * state of the device. The normal operating state is D0, 4431 - * CSR=00h. The software off state is D3, CSR=03h. To reset 4432 - * the controller, place the interface device in D3 then to 4433 - * D0, this causes a secondary PCI reset which will reset the 4434 - * controller." */ 4435 4436 - /* enter the D3hot power management state */ 4437 - pci_read_config_word(pdev, pos + PCI_PM_CTRL, &pmcsr); 4438 - pmcsr &= ~PCI_PM_CTRL_STATE_MASK; 4439 - pmcsr |= PCI_D3hot; 4440 - pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr); 4441 - 4442 - schedule_timeout_uninterruptible(HZ >> 1); 4443 - 4444 - /* enter the D0 power management state */ 4445 - pmcsr &= ~PCI_PM_CTRL_STATE_MASK; 4446 - pmcsr |= PCI_D0; 4447 - pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr); 4448 - 4449 - schedule_timeout_uninterruptible(HZ >> 1); 4450 4451 /* Restore the PCI configuration space. The Open CISS 4452 * Specification says, "Restore the PCI Configuration 4453 * Registers, offsets 00h through 60h. It is important to 4454 * restore the command register, 16-bits at offset 04h, 4455 * last. Do not restore the configuration status register, 4456 - * 16-bits at offset 06h." Note that the offset is 2*i. */ 4457 for (i = 0; i < 32; i++) { 4458 if (i == 2 || i == 3) 4459 continue; ··· 4538 wmb(); 4539 pci_write_config_word(pdev, 4, saved_config_space[2]); 4540 4541 return 0; 4542 } 4543 ··· 4612 int rc; 4613 int dac, return_code; 4614 InquiryData_struct *inq_buff; 4615 4616 - if (reset_devices) { 4617 - /* Reset the controller with a PCI power-cycle */ 4618 - if (cciss_hard_reset_controller(pdev) || cciss_reset_msi(pdev)) 4619 - return -ENODEV; 4620 - 4621 - /* Now try to get the controller to respond to a no-op. Some 4622 - devices (notably the HP Smart Array 5i Controller) need 4623 - up to 30 seconds to respond. */ 4624 - for (i=0; i<30; i++) { 4625 - if (cciss_noop(pdev) == 0) 4626 - break; 4627 - 4628 - schedule_timeout_uninterruptible(HZ); 4629 - } 4630 - if (i == 30) { 4631 - printk(KERN_ERR "cciss: controller seems dead\n"); 4632 - return -EBUSY; 4633 - } 4634 - } 4635 - 4636 - i = alloc_cciss_hba(); 4637 if (i < 0) 4638 return -1; 4639 4640 - hba[i]->busy_initializing = 1; 4641 - INIT_HLIST_HEAD(&hba[i]->cmpQ); 4642 - INIT_HLIST_HEAD(&hba[i]->reqQ); 4643 - mutex_init(&hba[i]->busy_shutting_down); 4644 4645 - if (cciss_pci_init(hba[i], pdev) != 0) 4646 goto clean_no_release_regions; 4647 4648 - sprintf(hba[i]->devname, "cciss%d", i); 4649 - hba[i]->ctlr = i; 4650 - hba[i]->pdev = pdev; 4651 4652 - init_completion(&hba[i]->scan_wait); 4653 4654 - if (cciss_create_hba_sysfs_entry(hba[i])) 4655 goto clean0; 4656 4657 /* configure PCI DMA stuff */ ··· 4645 else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) 4646 dac = 0; 4647 else { 4648 - printk(KERN_ERR "cciss: no suitable DMA available\n"); 4649 goto clean1; 4650 } 4651 ··· 4655 * 8 controller support. 4656 */ 4657 if (i < MAX_CTLR_ORIG) 4658 - hba[i]->major = COMPAQ_CISS_MAJOR + i; 4659 - rc = register_blkdev(hba[i]->major, hba[i]->devname); 4660 if (rc == -EBUSY || rc == -EINVAL) { 4661 - printk(KERN_ERR 4662 - "cciss: Unable to get major number %d for %s " 4663 - "on hba %d\n", hba[i]->major, hba[i]->devname, i); 4664 goto clean1; 4665 } else { 4666 if (i >= MAX_CTLR_ORIG) 4667 - hba[i]->major = rc; 4668 } 4669 4670 /* make sure the board interrupts are off */ 4671 - hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_OFF); 4672 - if (request_irq(hba[i]->intr[SIMPLE_MODE_INT], do_cciss_intr, 4673 - IRQF_DISABLED | IRQF_SHARED, hba[i]->devname, hba[i])) { 4674 - printk(KERN_ERR "cciss: Unable to get irq %d for %s\n", 4675 - hba[i]->intr[SIMPLE_MODE_INT], hba[i]->devname); 4676 - goto clean2; 4677 } 4678 4679 - printk(KERN_INFO "%s: <0x%x> at PCI %s IRQ %d%s using DAC\n", 4680 - hba[i]->devname, pdev->device, pci_name(pdev), 4681 - hba[i]->intr[SIMPLE_MODE_INT], dac ? "" : " not"); 4682 4683 - hba[i]->cmd_pool_bits = 4684 - kmalloc(DIV_ROUND_UP(hba[i]->nr_cmds, BITS_PER_LONG) 4685 * sizeof(unsigned long), GFP_KERNEL); 4686 - hba[i]->cmd_pool = (CommandList_struct *) 4687 - pci_alloc_consistent(hba[i]->pdev, 4688 - hba[i]->nr_cmds * sizeof(CommandList_struct), 4689 - &(hba[i]->cmd_pool_dhandle)); 4690 - hba[i]->errinfo_pool = (ErrorInfo_struct *) 4691 - pci_alloc_consistent(hba[i]->pdev, 4692 - hba[i]->nr_cmds * sizeof(ErrorInfo_struct), 4693 - &(hba[i]->errinfo_pool_dhandle)); 4694 - if ((hba[i]->cmd_pool_bits == NULL) 4695 - || (hba[i]->cmd_pool == NULL) 4696 - || (hba[i]->errinfo_pool == NULL)) { 4697 - printk(KERN_ERR "cciss: out of memory"); 4698 goto clean4; 4699 } 4700 4701 /* Need space for temp scatter list */ 4702 - hba[i]->scatter_list = kmalloc(hba[i]->max_commands * 4703 sizeof(struct scatterlist *), 4704 GFP_KERNEL); 4705 - for (k = 0; k < hba[i]->nr_cmds; k++) { 4706 - hba[i]->scatter_list[k] = kmalloc(sizeof(struct scatterlist) * 4707 - hba[i]->maxsgentries, 4708 GFP_KERNEL); 4709 - if (hba[i]->scatter_list[k] == NULL) { 4710 - printk(KERN_ERR "cciss%d: could not allocate " 4711 - "s/g lists\n", i); 4712 goto clean4; 4713 } 4714 } 4715 - hba[i]->cmd_sg_list = cciss_allocate_sg_chain_blocks(hba[i], 4716 - hba[i]->chainsize, hba[i]->nr_cmds); 4717 - if (!hba[i]->cmd_sg_list && hba[i]->chainsize > 0) 4718 goto clean4; 4719 4720 - spin_lock_init(&hba[i]->lock); 4721 4722 /* Initialize the pdev driver private data. 4723 - have it point to hba[i]. */ 4724 - pci_set_drvdata(pdev, hba[i]); 4725 /* command and error info recs zeroed out before 4726 they are used */ 4727 - memset(hba[i]->cmd_pool_bits, 0, 4728 - DIV_ROUND_UP(hba[i]->nr_cmds, BITS_PER_LONG) 4729 * sizeof(unsigned long)); 4730 4731 - hba[i]->num_luns = 0; 4732 - hba[i]->highest_lun = -1; 4733 for (j = 0; j < CISS_MAX_LUN; j++) { 4734 - hba[i]->drv[j] = NULL; 4735 - hba[i]->gendisk[j] = NULL; 4736 } 4737 4738 - cciss_scsi_setup(i); 4739 4740 /* Turn the interrupts on so we can service requests */ 4741 - hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_ON); 4742 4743 /* Get the firmware version */ 4744 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL); 4745 if (inq_buff == NULL) { 4746 - printk(KERN_ERR "cciss: out of memory\n"); 4747 goto clean4; 4748 } 4749 4750 - return_code = sendcmd_withirq(CISS_INQUIRY, i, inq_buff, 4751 sizeof(InquiryData_struct), 0, CTLR_LUNID, TYPE_CMD); 4752 if (return_code == IO_OK) { 4753 - hba[i]->firm_ver[0] = inq_buff->data_byte[32]; 4754 - hba[i]->firm_ver[1] = inq_buff->data_byte[33]; 4755 - hba[i]->firm_ver[2] = inq_buff->data_byte[34]; 4756 - hba[i]->firm_ver[3] = inq_buff->data_byte[35]; 4757 } else { /* send command failed */ 4758 - printk(KERN_WARNING "cciss: unable to determine firmware" 4759 " version of controller\n"); 4760 } 4761 kfree(inq_buff); 4762 4763 - cciss_procinit(i); 4764 4765 - hba[i]->cciss_max_sectors = 8192; 4766 4767 - rebuild_lun_table(hba[i], 1, 0); 4768 - hba[i]->busy_initializing = 0; 4769 return 1; 4770 4771 clean4: 4772 - kfree(hba[i]->cmd_pool_bits); 4773 /* Free up sg elements */ 4774 - for (k = 0; k < hba[i]->nr_cmds; k++) 4775 - kfree(hba[i]->scatter_list[k]); 4776 - kfree(hba[i]->scatter_list); 4777 - cciss_free_sg_chain_blocks(hba[i]->cmd_sg_list, hba[i]->nr_cmds); 4778 - if (hba[i]->cmd_pool) 4779 - pci_free_consistent(hba[i]->pdev, 4780 - hba[i]->nr_cmds * sizeof(CommandList_struct), 4781 - hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle); 4782 - if (hba[i]->errinfo_pool) 4783 - pci_free_consistent(hba[i]->pdev, 4784 - hba[i]->nr_cmds * sizeof(ErrorInfo_struct), 4785 - hba[i]->errinfo_pool, 4786 - hba[i]->errinfo_pool_dhandle); 4787 - free_irq(hba[i]->intr[SIMPLE_MODE_INT], hba[i]); 4788 clean2: 4789 - unregister_blkdev(hba[i]->major, hba[i]->devname); 4790 clean1: 4791 - cciss_destroy_hba_sysfs_entry(hba[i]); 4792 clean0: 4793 pci_release_regions(pdev); 4794 clean_no_release_regions: 4795 - hba[i]->busy_initializing = 0; 4796 4797 /* 4798 * Deliberately omit pci_disable_device(): it does something nasty to 4799 * Smart Array controllers that pci_enable_device does not undo 4800 */ 4801 pci_set_drvdata(pdev, NULL); 4802 - free_hba(i); 4803 return -1; 4804 } 4805 ··· 4822 h = pci_get_drvdata(pdev); 4823 flush_buf = kzalloc(4, GFP_KERNEL); 4824 if (!flush_buf) { 4825 - printk(KERN_WARNING 4826 - "cciss:%d cache not flushed, out of memory.\n", 4827 - h->ctlr); 4828 return; 4829 } 4830 /* write all data in the battery backed cache to disk */ 4831 memset(flush_buf, 0, 4); 4832 - return_code = sendcmd_withirq(CCISS_CACHE_FLUSH, h->ctlr, flush_buf, 4833 4, 0, CTLR_LUNID, TYPE_CMD); 4834 kfree(flush_buf); 4835 if (return_code != IO_OK) 4836 - printk(KERN_WARNING "cciss%d: Error flushing cache\n", 4837 - h->ctlr); 4838 h->access.set_intr_mask(h, CCISS_INTR_OFF); 4839 - free_irq(h->intr[2], h); 4840 } 4841 4842 static void __devexit cciss_remove_one(struct pci_dev *pdev) 4843 { 4844 - ctlr_info_t *tmp_ptr; 4845 int i, j; 4846 4847 if (pci_get_drvdata(pdev) == NULL) { 4848 - printk(KERN_ERR "cciss: Unable to remove device \n"); 4849 return; 4850 } 4851 4852 - tmp_ptr = pci_get_drvdata(pdev); 4853 - i = tmp_ptr->ctlr; 4854 if (hba[i] == NULL) { 4855 - printk(KERN_ERR "cciss: device appears to " 4856 - "already be removed \n"); 4857 return; 4858 } 4859 4860 - mutex_lock(&hba[i]->busy_shutting_down); 4861 4862 - remove_from_scan_list(hba[i]); 4863 - remove_proc_entry(hba[i]->devname, proc_cciss); 4864 - unregister_blkdev(hba[i]->major, hba[i]->devname); 4865 4866 /* remove it from the disk list */ 4867 for (j = 0; j < CISS_MAX_LUN; j++) { 4868 - struct gendisk *disk = hba[i]->gendisk[j]; 4869 if (disk) { 4870 struct request_queue *q = disk->queue; 4871 4872 if (disk->flags & GENHD_FL_UP) { 4873 - cciss_destroy_ld_sysfs_entry(hba[i], j, 1); 4874 del_gendisk(disk); 4875 } 4876 if (q) ··· 4875 } 4876 4877 #ifdef CONFIG_CISS_SCSI_TAPE 4878 - cciss_unregister_scsi(i); /* unhook from SCSI subsystem */ 4879 #endif 4880 4881 cciss_shutdown(pdev); 4882 4883 #ifdef CONFIG_PCI_MSI 4884 - if (hba[i]->msix_vector) 4885 - pci_disable_msix(hba[i]->pdev); 4886 - else if (hba[i]->msi_vector) 4887 - pci_disable_msi(hba[i]->pdev); 4888 #endif /* CONFIG_PCI_MSI */ 4889 4890 - iounmap(hba[i]->vaddr); 4891 4892 - pci_free_consistent(hba[i]->pdev, hba[i]->nr_cmds * sizeof(CommandList_struct), 4893 - hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle); 4894 - pci_free_consistent(hba[i]->pdev, hba[i]->nr_cmds * sizeof(ErrorInfo_struct), 4895 - hba[i]->errinfo_pool, hba[i]->errinfo_pool_dhandle); 4896 - kfree(hba[i]->cmd_pool_bits); 4897 /* Free up sg elements */ 4898 - for (j = 0; j < hba[i]->nr_cmds; j++) 4899 - kfree(hba[i]->scatter_list[j]); 4900 - kfree(hba[i]->scatter_list); 4901 - cciss_free_sg_chain_blocks(hba[i]->cmd_sg_list, hba[i]->nr_cmds); 4902 /* 4903 * Deliberately omit pci_disable_device(): it does something nasty to 4904 * Smart Array controllers that pci_enable_device does not undo 4905 */ 4906 pci_release_regions(pdev); 4907 pci_set_drvdata(pdev, NULL); 4908 - cciss_destroy_hba_sysfs_entry(hba[i]); 4909 - mutex_unlock(&hba[i]->busy_shutting_down); 4910 - free_hba(i); 4911 } 4912 4913 static struct pci_driver cciss_pci_driver = { ··· 4934 * array of them, the size must be a multiple of 8 bytes. 4935 */ 4936 BUILD_BUG_ON(sizeof(CommandList_struct) % COMMANDLIST_ALIGNMENT); 4937 - 4938 printk(KERN_INFO DRIVER_NAME "\n"); 4939 4940 err = bus_register(&cciss_bus_type); ··· 4970 /* double check that all controller entrys have been removed */ 4971 for (i = 0; i < MAX_CTLR; i++) { 4972 if (hba[i] != NULL) { 4973 - printk(KERN_WARNING "cciss: had to remove" 4974 - " controller %d\n", i); 4975 cciss_remove_one(hba[i]->pdev); 4976 } 4977 } 4978 kthread_stop(cciss_scan_thread); 4979 remove_proc_entry("driver/cciss", NULL); 4980 bus_unregister(&cciss_bus_type); 4981 - } 4982 - 4983 - static void fail_all_cmds(unsigned long ctlr) 4984 - { 4985 - /* If we get here, the board is apparently dead. */ 4986 - ctlr_info_t *h = hba[ctlr]; 4987 - CommandList_struct *c; 4988 - unsigned long flags; 4989 - 4990 - printk(KERN_WARNING "cciss%d: controller not responding.\n", h->ctlr); 4991 - h->alive = 0; /* the controller apparently died... */ 4992 - 4993 - spin_lock_irqsave(CCISS_LOCK(ctlr), flags); 4994 - 4995 - pci_disable_device(h->pdev); /* Make sure it is really dead. */ 4996 - 4997 - /* move everything off the request queue onto the completed queue */ 4998 - while (!hlist_empty(&h->reqQ)) { 4999 - c = hlist_entry(h->reqQ.first, CommandList_struct, list); 5000 - removeQ(c); 5001 - h->Qdepth--; 5002 - addQ(&h->cmpQ, c); 5003 - } 5004 - 5005 - /* Now, fail everything on the completed queue with a HW error */ 5006 - while (!hlist_empty(&h->cmpQ)) { 5007 - c = hlist_entry(h->cmpQ.first, CommandList_struct, list); 5008 - removeQ(c); 5009 - if (c->cmd_type != CMD_MSG_STALE) 5010 - c->err_info->CommandStatus = CMD_HARDWARE_ERR; 5011 - if (c->cmd_type == CMD_RWREQ) { 5012 - complete_command(h, c, 0); 5013 - } else if (c->cmd_type == CMD_IOCTL_PEND) 5014 - complete(c->waiting); 5015 - #ifdef CONFIG_CISS_SCSI_TAPE 5016 - else if (c->cmd_type == CMD_SCSI) 5017 - complete_scsi_command(c, 0, 0); 5018 - #endif 5019 - } 5020 - spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags); 5021 - return; 5022 } 5023 5024 module_init(cciss_init);
··· 56 #include <linux/kthread.h> 57 58 #define CCISS_DRIVER_VERSION(maj,min,submin) ((maj<<16)|(min<<8)|(submin)) 59 + #define DRIVER_NAME "HP CISS Driver (v 3.6.26)" 60 + #define DRIVER_VERSION CCISS_DRIVER_VERSION(3, 6, 26) 61 62 /* Embedded module documentation macros - see modules.h */ 63 MODULE_AUTHOR("Hewlett-Packard Company"); 64 MODULE_DESCRIPTION("Driver for HP Smart Array Controllers"); 65 + MODULE_SUPPORTED_DEVICE("HP Smart Array Controllers"); 66 + MODULE_VERSION("3.6.26"); 67 MODULE_LICENSE("GPL"); 68 69 static int cciss_allow_hpsa; ··· 107 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249}, 108 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324A}, 109 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324B}, 110 + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3250}, 111 + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3251}, 112 + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3252}, 113 + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3253}, 114 + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3254}, 115 {0,} 116 }; 117 ··· 146 {0x3249103C, "Smart Array P812", &SA5_access}, 147 {0x324A103C, "Smart Array P712m", &SA5_access}, 148 {0x324B103C, "Smart Array P711m", &SA5_access}, 149 + {0x3250103C, "Smart Array", &SA5_access}, 150 + {0x3251103C, "Smart Array", &SA5_access}, 151 + {0x3252103C, "Smart Array", &SA5_access}, 152 + {0x3253103C, "Smart Array", &SA5_access}, 153 + {0x3254103C, "Smart Array", &SA5_access}, 154 }; 155 156 /* How long to wait (in milliseconds) for board to go into simple mode */ ··· 167 static LIST_HEAD(scan_q); 168 169 static void do_cciss_request(struct request_queue *q); 170 + static irqreturn_t do_cciss_intx(int irq, void *dev_id); 171 + static irqreturn_t do_cciss_msix_intr(int irq, void *dev_id); 172 static int cciss_open(struct block_device *bdev, fmode_t mode); 173 + static int cciss_unlocked_open(struct block_device *bdev, fmode_t mode); 174 static int cciss_release(struct gendisk *disk, fmode_t mode); 175 + static int do_ioctl(struct block_device *bdev, fmode_t mode, 176 + unsigned int cmd, unsigned long arg); 177 static int cciss_ioctl(struct block_device *bdev, fmode_t mode, 178 unsigned int cmd, unsigned long arg); 179 static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo); ··· 179 static int deregister_disk(ctlr_info_t *h, int drv_index, 180 int clear_all, int via_ioctl); 181 182 + static void cciss_read_capacity(ctlr_info_t *h, int logvol, 183 sector_t *total_size, unsigned int *block_size); 184 + static void cciss_read_capacity_16(ctlr_info_t *h, int logvol, 185 sector_t *total_size, unsigned int *block_size); 186 + static void cciss_geometry_inquiry(ctlr_info_t *h, int logvol, 187 sector_t total_size, 188 unsigned int block_size, InquiryData_struct *inq_buff, 189 drive_info_struct *drv); 190 + static void __devinit cciss_interrupt_mode(ctlr_info_t *); 191 static void start_io(ctlr_info_t *h); 192 + static int sendcmd_withirq(ctlr_info_t *h, __u8 cmd, void *buff, size_t size, 193 __u8 page_code, unsigned char scsi3addr[], 194 int cmd_type); 195 static int sendcmd_withirq_core(ctlr_info_t *h, CommandList_struct *c, 196 int attempt_retry); 197 static int process_sendcmd_error(ctlr_info_t *h, CommandList_struct *c); 198 199 static int add_to_scan_list(struct ctlr_info *h); 200 static int scan_thread(void *data); 201 static int check_for_unit_attention(ctlr_info_t *h, CommandList_struct *c); ··· 205 static void cciss_device_release(struct device *dev); 206 static void cciss_free_gendisk(ctlr_info_t *h, int drv_index); 207 static void cciss_free_drive_info(ctlr_info_t *h, int drv_index); 208 + static inline u32 next_command(ctlr_info_t *h); 209 + static int __devinit cciss_find_cfg_addrs(struct pci_dev *pdev, 210 + void __iomem *vaddr, u32 *cfg_base_addr, u64 *cfg_base_addr_index, 211 + u64 *cfg_offset); 212 + static int __devinit cciss_pci_find_memory_BAR(struct pci_dev *pdev, 213 + unsigned long *memory_bar); 214 + 215 + 216 + /* performant mode helper functions */ 217 + static void calc_bucket_map(int *bucket, int num_buckets, int nsgs, 218 + int *bucket_map); 219 + static void cciss_put_controller_into_performant_mode(ctlr_info_t *h); 220 221 #ifdef CONFIG_PROC_FS 222 + static void cciss_procinit(ctlr_info_t *h); 223 #else 224 + static void cciss_procinit(ctlr_info_t *h) 225 { 226 } 227 #endif /* CONFIG_PROC_FS */ ··· 221 222 static const struct block_device_operations cciss_fops = { 223 .owner = THIS_MODULE, 224 + .open = cciss_unlocked_open, 225 .release = cciss_release, 226 + .ioctl = do_ioctl, 227 .getgeo = cciss_getgeo, 228 #ifdef CONFIG_COMPAT 229 .compat_ioctl = cciss_compat_ioctl, 230 #endif 231 .revalidate_disk = cciss_revalidate, 232 }; 233 + 234 + /* set_performant_mode: Modify the tag for cciss performant 235 + * set bit 0 for pull model, bits 3-1 for block fetch 236 + * register number 237 + */ 238 + static void set_performant_mode(ctlr_info_t *h, CommandList_struct *c) 239 + { 240 + if (likely(h->transMethod == CFGTBL_Trans_Performant)) 241 + c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1); 242 + } 243 244 /* 245 * Enqueuing and dequeuing functions for cmdlists. ··· 255 } 256 257 hlist_del_init(&c->list); 258 + } 259 + 260 + static void enqueue_cmd_and_start_io(ctlr_info_t *h, 261 + CommandList_struct *c) 262 + { 263 + unsigned long flags; 264 + set_performant_mode(h, c); 265 + spin_lock_irqsave(&h->lock, flags); 266 + addQ(&h->reqQ, c); 267 + h->Qdepth++; 268 + start_io(h); 269 + spin_unlock_irqrestore(&h->lock, flags); 270 } 271 272 static void cciss_free_sg_chain_blocks(SGDescriptor_struct **cmd_sg_list, ··· 366 h->product_name, 367 (unsigned long)h->board_id, 368 h->firm_ver[0], h->firm_ver[1], h->firm_ver[2], 369 + h->firm_ver[3], (unsigned int)h->intr[PERF_MODE_INT], 370 h->num_luns, 371 h->Qdepth, h->commands_outstanding, 372 h->maxQsinceinit, h->max_outstanding, h->maxSG); 373 374 #ifdef CONFIG_CISS_SCSI_TAPE 375 + cciss_seq_tape_report(seq, h); 376 #endif /* CONFIG_CISS_SCSI_TAPE */ 377 } 378 379 static void *cciss_seq_start(struct seq_file *seq, loff_t *pos) 380 { 381 ctlr_info_t *h = seq->private; 382 unsigned long flags; 383 384 /* prevent displaying bogus info during configuration 385 * or deconfiguration of a logical volume 386 */ 387 + spin_lock_irqsave(&h->lock, flags); 388 if (h->busy_configuring) { 389 + spin_unlock_irqrestore(&h->lock, flags); 390 return ERR_PTR(-EBUSY); 391 } 392 h->busy_configuring = 1; 393 + spin_unlock_irqrestore(&h->lock, flags); 394 395 if (*pos == 0) 396 cciss_seq_show_header(seq); ··· 499 struct seq_file *seq = file->private_data; 500 ctlr_info_t *h = seq->private; 501 502 + err = cciss_engage_scsi(h); 503 if (err == 0) 504 err = length; 505 } else ··· 522 .write = cciss_proc_write, 523 }; 524 525 + static void __devinit cciss_procinit(ctlr_info_t *h) 526 { 527 struct proc_dir_entry *pde; 528 ··· 530 proc_cciss = proc_mkdir("driver/cciss", NULL); 531 if (!proc_cciss) 532 return; 533 + pde = proc_create_data(h->devname, S_IWUSR | S_IRUSR | S_IRGRP | 534 S_IROTH, proc_cciss, 535 + &cciss_proc_fops, h); 536 } 537 #endif /* CONFIG_PROC_FS */ 538 ··· 565 unsigned long flags; 566 int ret = 0; 567 568 + spin_lock_irqsave(&h->lock, flags); 569 if (h->busy_configuring) 570 ret = -EBUSY; 571 else 572 memcpy(sn, drv->serial_no, sizeof(sn)); 573 + spin_unlock_irqrestore(&h->lock, flags); 574 575 if (ret) 576 return ret; ··· 595 unsigned long flags; 596 int ret = 0; 597 598 + spin_lock_irqsave(&h->lock, flags); 599 if (h->busy_configuring) 600 ret = -EBUSY; 601 else 602 memcpy(vendor, drv->vendor, VENDOR_LEN + 1); 603 + spin_unlock_irqrestore(&h->lock, flags); 604 605 if (ret) 606 return ret; ··· 619 unsigned long flags; 620 int ret = 0; 621 622 + spin_lock_irqsave(&h->lock, flags); 623 if (h->busy_configuring) 624 ret = -EBUSY; 625 else 626 memcpy(model, drv->model, MODEL_LEN + 1); 627 + spin_unlock_irqrestore(&h->lock, flags); 628 629 if (ret) 630 return ret; ··· 643 unsigned long flags; 644 int ret = 0; 645 646 + spin_lock_irqsave(&h->lock, flags); 647 if (h->busy_configuring) 648 ret = -EBUSY; 649 else 650 memcpy(rev, drv->rev, REV_LEN + 1); 651 + spin_unlock_irqrestore(&h->lock, flags); 652 653 if (ret) 654 return ret; ··· 665 unsigned long flags; 666 unsigned char lunid[8]; 667 668 + spin_lock_irqsave(&h->lock, flags); 669 if (h->busy_configuring) { 670 + spin_unlock_irqrestore(&h->lock, flags); 671 return -EBUSY; 672 } 673 if (!drv->heads) { 674 + spin_unlock_irqrestore(&h->lock, flags); 675 return -ENOTTY; 676 } 677 memcpy(lunid, drv->LunID, sizeof(lunid)); 678 + spin_unlock_irqrestore(&h->lock, flags); 679 return snprintf(buf, 20, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n", 680 lunid[0], lunid[1], lunid[2], lunid[3], 681 lunid[4], lunid[5], lunid[6], lunid[7]); ··· 690 int raid; 691 unsigned long flags; 692 693 + spin_lock_irqsave(&h->lock, flags); 694 if (h->busy_configuring) { 695 + spin_unlock_irqrestore(&h->lock, flags); 696 return -EBUSY; 697 } 698 raid = drv->raid_level; 699 + spin_unlock_irqrestore(&h->lock, flags); 700 if (raid < 0 || raid > RAID_UNKNOWN) 701 raid = RAID_UNKNOWN; 702 ··· 713 unsigned long flags; 714 int count; 715 716 + spin_lock_irqsave(&h->lock, flags); 717 if (h->busy_configuring) { 718 + spin_unlock_irqrestore(&h->lock, flags); 719 return -EBUSY; 720 } 721 count = drv->usage_count; 722 + spin_unlock_irqrestore(&h->lock, flags); 723 return snprintf(buf, 20, "%d\n", count); 724 } 725 static DEVICE_ATTR(usage_count, S_IRUGO, cciss_show_usage_count, NULL); ··· 864 /* 865 * For operations that cannot sleep, a command block is allocated at init, 866 * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track 867 + * which ones are free or in use. 868 */ 869 + static CommandList_struct *cmd_alloc(ctlr_info_t *h) 870 { 871 CommandList_struct *c; 872 int i; 873 u64bit temp64; 874 dma_addr_t cmd_dma_handle, err_dma_handle; 875 876 + do { 877 + i = find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds); 878 + if (i == h->nr_cmds) 879 return NULL; 880 + } while (test_and_set_bit(i & (BITS_PER_LONG - 1), 881 + h->cmd_pool_bits + (i / BITS_PER_LONG)) != 0); 882 + c = h->cmd_pool + i; 883 + memset(c, 0, sizeof(CommandList_struct)); 884 + cmd_dma_handle = h->cmd_pool_dhandle + i * sizeof(CommandList_struct); 885 + c->err_info = h->errinfo_pool + i; 886 + memset(c->err_info, 0, sizeof(ErrorInfo_struct)); 887 + err_dma_handle = h->errinfo_pool_dhandle 888 + + i * sizeof(ErrorInfo_struct); 889 + h->nr_allocs++; 890 891 + c->cmdindex = i; 892 893 INIT_HLIST_NODE(&c->list); 894 c->busaddr = (__u32) cmd_dma_handle; ··· 930 return c; 931 } 932 933 + /* allocate a command using pci_alloc_consistent, used for ioctls, 934 + * etc., not for the main i/o path. 935 */ 936 + static CommandList_struct *cmd_special_alloc(ctlr_info_t *h) 937 + { 938 + CommandList_struct *c; 939 + u64bit temp64; 940 + dma_addr_t cmd_dma_handle, err_dma_handle; 941 + 942 + c = (CommandList_struct *) pci_alloc_consistent(h->pdev, 943 + sizeof(CommandList_struct), &cmd_dma_handle); 944 + if (c == NULL) 945 + return NULL; 946 + memset(c, 0, sizeof(CommandList_struct)); 947 + 948 + c->cmdindex = -1; 949 + 950 + c->err_info = (ErrorInfo_struct *) 951 + pci_alloc_consistent(h->pdev, sizeof(ErrorInfo_struct), 952 + &err_dma_handle); 953 + 954 + if (c->err_info == NULL) { 955 + pci_free_consistent(h->pdev, 956 + sizeof(CommandList_struct), c, cmd_dma_handle); 957 + return NULL; 958 + } 959 + memset(c->err_info, 0, sizeof(ErrorInfo_struct)); 960 + 961 + INIT_HLIST_NODE(&c->list); 962 + c->busaddr = (__u32) cmd_dma_handle; 963 + temp64.val = (__u64) err_dma_handle; 964 + c->ErrDesc.Addr.lower = temp64.val32.lower; 965 + c->ErrDesc.Addr.upper = temp64.val32.upper; 966 + c->ErrDesc.Len = sizeof(ErrorInfo_struct); 967 + 968 + c->ctlr = h->ctlr; 969 + return c; 970 + } 971 + 972 + static void cmd_free(ctlr_info_t *h, CommandList_struct *c) 973 { 974 int i; 975 + 976 + i = c - h->cmd_pool; 977 + clear_bit(i & (BITS_PER_LONG - 1), 978 + h->cmd_pool_bits + (i / BITS_PER_LONG)); 979 + h->nr_frees++; 980 + } 981 + 982 + static void cmd_special_free(ctlr_info_t *h, CommandList_struct *c) 983 + { 984 u64bit temp64; 985 986 + temp64.val32.lower = c->ErrDesc.Addr.lower; 987 + temp64.val32.upper = c->ErrDesc.Addr.upper; 988 + pci_free_consistent(h->pdev, sizeof(ErrorInfo_struct), 989 + c->err_info, (dma_addr_t) temp64.val); 990 + pci_free_consistent(h->pdev, sizeof(CommandList_struct), 991 + c, (dma_addr_t) c->busaddr); 992 } 993 994 static inline ctlr_info_t *get_host(struct gendisk *disk) ··· 968 */ 969 static int cciss_open(struct block_device *bdev, fmode_t mode) 970 { 971 + ctlr_info_t *h = get_host(bdev->bd_disk); 972 drive_info_struct *drv = get_drv(bdev->bd_disk); 973 974 + dev_dbg(&h->pdev->dev, "cciss_open %s\n", bdev->bd_disk->disk_name); 975 if (drv->busy_configuring) 976 return -EBUSY; 977 /* ··· 1000 return -EPERM; 1001 } 1002 drv->usage_count++; 1003 + h->usage_count++; 1004 return 0; 1005 + } 1006 + 1007 + static int cciss_unlocked_open(struct block_device *bdev, fmode_t mode) 1008 + { 1009 + int ret; 1010 + 1011 + lock_kernel(); 1012 + ret = cciss_open(bdev, mode); 1013 + unlock_kernel(); 1014 + 1015 + return ret; 1016 } 1017 1018 /* ··· 1009 */ 1010 static int cciss_release(struct gendisk *disk, fmode_t mode) 1011 { 1012 + ctlr_info_t *h; 1013 + drive_info_struct *drv; 1014 1015 + lock_kernel(); 1016 + h = get_host(disk); 1017 + drv = get_drv(disk); 1018 + dev_dbg(&h->pdev->dev, "cciss_release %s\n", disk->disk_name); 1019 drv->usage_count--; 1020 + h->usage_count--; 1021 + unlock_kernel(); 1022 return 0; 1023 } 1024 1025 static int do_ioctl(struct block_device *bdev, fmode_t mode, 1026 unsigned cmd, unsigned long arg) ··· 1032 unlock_kernel(); 1033 return ret; 1034 } 1035 + 1036 + #ifdef CONFIG_COMPAT 1037 1038 static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode, 1039 unsigned cmd, unsigned long arg); ··· 1163 return 0; 1164 } 1165 1166 + static void check_ioctl_unit_attention(ctlr_info_t *h, CommandList_struct *c) 1167 { 1168 if (c->err_info->CommandStatus == CMD_TARGET_STATUS && 1169 c->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION) 1170 + (void)check_for_unit_attention(h, c); 1171 } 1172 /* 1173 * ioctl ··· 1176 unsigned int cmd, unsigned long arg) 1177 { 1178 struct gendisk *disk = bdev->bd_disk; 1179 + ctlr_info_t *h = get_host(disk); 1180 drive_info_struct *drv = get_drv(disk); 1181 void __user *argp = (void __user *)arg; 1182 1183 + dev_dbg(&h->pdev->dev, "cciss_ioctl: Called with cmd=%x %lx\n", 1184 + cmd, arg); 1185 switch (cmd) { 1186 case CCISS_GETPCIINFO: 1187 { ··· 1192 1193 if (!arg) 1194 return -EINVAL; 1195 + pciinfo.domain = pci_domain_nr(h->pdev->bus); 1196 + pciinfo.bus = h->pdev->bus->number; 1197 + pciinfo.dev_fn = h->pdev->devfn; 1198 + pciinfo.board_id = h->board_id; 1199 if (copy_to_user 1200 (argp, &pciinfo, sizeof(cciss_pci_info_struct))) 1201 return -EFAULT; ··· 1207 if (!arg) 1208 return -EINVAL; 1209 intinfo.delay = 1210 + readl(&h->cfgtable->HostWrite.CoalIntDelay); 1211 intinfo.count = 1212 + readl(&h->cfgtable->HostWrite.CoalIntCount); 1213 if (copy_to_user 1214 (argp, &intinfo, sizeof(cciss_coalint_struct))) 1215 return -EFAULT; ··· 1229 (&intinfo, argp, sizeof(cciss_coalint_struct))) 1230 return -EFAULT; 1231 if ((intinfo.delay == 0) && (intinfo.count == 0)) 1232 return -EINVAL; 1233 + spin_lock_irqsave(&h->lock, flags); 1234 /* Update the field, and then ring the doorbell */ 1235 writel(intinfo.delay, 1236 + &(h->cfgtable->HostWrite.CoalIntDelay)); 1237 writel(intinfo.count, 1238 + &(h->cfgtable->HostWrite.CoalIntCount)); 1239 + writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); 1240 1241 for (i = 0; i < MAX_IOCTL_CONFIG_WAIT; i++) { 1242 + if (!(readl(h->vaddr + SA5_DOORBELL) 1243 & CFGTBL_ChangeReq)) 1244 break; 1245 /* delay and try again */ 1246 udelay(1000); 1247 } 1248 + spin_unlock_irqrestore(&h->lock, flags); 1249 if (i >= MAX_IOCTL_CONFIG_WAIT) 1250 return -EAGAIN; 1251 return 0; ··· 1262 return -EINVAL; 1263 for (i = 0; i < 16; i++) 1264 NodeName[i] = 1265 + readb(&h->cfgtable->ServerName[i]); 1266 if (copy_to_user(argp, NodeName, sizeof(NodeName_type))) 1267 return -EFAULT; 1268 return 0; ··· 1282 (NodeName, argp, sizeof(NodeName_type))) 1283 return -EFAULT; 1284 1285 + spin_lock_irqsave(&h->lock, flags); 1286 1287 /* Update the field, and then ring the doorbell */ 1288 for (i = 0; i < 16; i++) 1289 writeb(NodeName[i], 1290 + &h->cfgtable->ServerName[i]); 1291 1292 + writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); 1293 1294 for (i = 0; i < MAX_IOCTL_CONFIG_WAIT; i++) { 1295 + if (!(readl(h->vaddr + SA5_DOORBELL) 1296 & CFGTBL_ChangeReq)) 1297 break; 1298 /* delay and try again */ 1299 udelay(1000); 1300 } 1301 + spin_unlock_irqrestore(&h->lock, flags); 1302 if (i >= MAX_IOCTL_CONFIG_WAIT) 1303 return -EAGAIN; 1304 return 0; ··· 1310 1311 if (!arg) 1312 return -EINVAL; 1313 + heartbeat = readl(&h->cfgtable->HeartBeat); 1314 if (copy_to_user 1315 (argp, &heartbeat, sizeof(Heartbeat_type))) 1316 return -EFAULT; ··· 1322 1323 if (!arg) 1324 return -EINVAL; 1325 + BusTypes = readl(&h->cfgtable->BusTypes); 1326 if (copy_to_user 1327 (argp, &BusTypes, sizeof(BusTypes_type))) 1328 return -EFAULT; ··· 1334 1335 if (!arg) 1336 return -EINVAL; 1337 + memcpy(firmware, h->firm_ver, 4); 1338 1339 if (copy_to_user 1340 (argp, firmware, sizeof(FirmwareVer_type))) ··· 1357 case CCISS_DEREGDISK: 1358 case CCISS_REGNEWD: 1359 case CCISS_REVALIDVOLS: 1360 + return rebuild_lun_table(h, 0, 1); 1361 1362 case CCISS_GETLUNINFO:{ 1363 LogvolInfo_struct luninfo; ··· 1377 CommandList_struct *c; 1378 char *buff = NULL; 1379 u64bit temp64; 1380 DECLARE_COMPLETION_ONSTACK(wait); 1381 1382 if (!arg) ··· 1413 } else { 1414 memset(buff, 0, iocommand.buf_size); 1415 } 1416 + c = cmd_special_alloc(h); 1417 + if (!c) { 1418 kfree(buff); 1419 return -ENOMEM; 1420 } ··· 1439 1440 /* Fill in the scatter gather information */ 1441 if (iocommand.buf_size > 0) { 1442 + temp64.val = pci_map_single(h->pdev, buff, 1443 iocommand.buf_size, 1444 PCI_DMA_BIDIRECTIONAL); 1445 c->SG[0].Addr.lower = temp64.val32.lower; ··· 1449 } 1450 c->waiting = &wait; 1451 1452 + enqueue_cmd_and_start_io(h, c); 1453 wait_for_completion(&wait); 1454 1455 /* unlock the buffers from DMA */ 1456 temp64.val32.lower = c->SG[0].Addr.lower; 1457 temp64.val32.upper = c->SG[0].Addr.upper; 1458 + pci_unmap_single(h->pdev, (dma_addr_t) temp64.val, 1459 iocommand.buf_size, 1460 PCI_DMA_BIDIRECTIONAL); 1461 1462 + check_ioctl_unit_attention(h, c); 1463 1464 /* Copy the error information out */ 1465 iocommand.error_info = *(c->err_info); 1466 if (copy_to_user 1467 (argp, &iocommand, sizeof(IOCTL_Command_struct))) { 1468 kfree(buff); 1469 + cmd_special_free(h, c); 1470 return -EFAULT; 1471 } 1472 ··· 1481 if (copy_to_user 1482 (iocommand.buf, buff, iocommand.buf_size)) { 1483 kfree(buff); 1484 + cmd_special_free(h, c); 1485 return -EFAULT; 1486 } 1487 } 1488 kfree(buff); 1489 + cmd_special_free(h, c); 1490 return 0; 1491 } 1492 case CCISS_BIG_PASSTHRU:{ ··· 1495 unsigned char **buff = NULL; 1496 int *buff_size = NULL; 1497 u64bit temp64; 1498 BYTE sg_used = 0; 1499 int status = 0; 1500 int i; ··· 1569 data_ptr += sz; 1570 sg_used++; 1571 } 1572 + c = cmd_special_alloc(h); 1573 + if (!c) { 1574 status = -ENOMEM; 1575 goto cleanup1; 1576 } ··· 1590 if (ioc->buf_size > 0) { 1591 for (i = 0; i < sg_used; i++) { 1592 temp64.val = 1593 + pci_map_single(h->pdev, buff[i], 1594 buff_size[i], 1595 PCI_DMA_BIDIRECTIONAL); 1596 c->SG[i].Addr.lower = ··· 1602 } 1603 } 1604 c->waiting = &wait; 1605 + enqueue_cmd_and_start_io(h, c); 1606 wait_for_completion(&wait); 1607 /* unlock the buffers from DMA */ 1608 for (i = 0; i < sg_used; i++) { 1609 temp64.val32.lower = c->SG[i].Addr.lower; 1610 temp64.val32.upper = c->SG[i].Addr.upper; 1611 + pci_unmap_single(h->pdev, 1612 (dma_addr_t) temp64.val, buff_size[i], 1613 PCI_DMA_BIDIRECTIONAL); 1614 } 1615 + check_ioctl_unit_attention(h, c); 1616 /* Copy the error information out */ 1617 ioc->error_info = *(c->err_info); 1618 if (copy_to_user(argp, ioc, sizeof(*ioc))) { 1619 + cmd_special_free(h, c); 1620 status = -EFAULT; 1621 goto cleanup1; 1622 } ··· 1631 for (i = 0; i < sg_used; i++) { 1632 if (copy_to_user 1633 (ptr, buff[i], buff_size[i])) { 1634 + cmd_special_free(h, c); 1635 status = -EFAULT; 1636 goto cleanup1; 1637 } 1638 ptr += buff_size[i]; 1639 } 1640 } 1641 + cmd_special_free(h, c); 1642 status = 0; 1643 cleanup1: 1644 if (buff) { ··· 1726 1727 static void cciss_softirq_done(struct request *rq) 1728 { 1729 + CommandList_struct *c = rq->completion_data; 1730 + ctlr_info_t *h = hba[c->ctlr]; 1731 + SGDescriptor_struct *curr_sg = c->SG; 1732 u64bit temp64; 1733 + unsigned long flags; 1734 int i, ddir; 1735 int sg_index = 0; 1736 1737 + if (c->Request.Type.Direction == XFER_READ) 1738 ddir = PCI_DMA_FROMDEVICE; 1739 else 1740 ddir = PCI_DMA_TODEVICE; 1741 1742 /* command did not need to be retried */ 1743 /* unmap the DMA mapping for all the scatter gather elements */ 1744 + for (i = 0; i < c->Header.SGList; i++) { 1745 if (curr_sg[sg_index].Ext == CCISS_SG_CHAIN) { 1746 + cciss_unmap_sg_chain_block(h, c); 1747 /* Point to the next block */ 1748 + curr_sg = h->cmd_sg_list[c->cmdindex]; 1749 sg_index = 0; 1750 } 1751 temp64.val32.lower = curr_sg[sg_index].Addr.lower; ··· 1755 ++sg_index; 1756 } 1757 1758 + dev_dbg(&h->pdev->dev, "Done with %p\n", rq); 1759 1760 /* set the residual count for pc requests */ 1761 + if (rq->cmd_type == REQ_TYPE_BLOCK_PC) 1762 + rq->resid_len = c->err_info->ResidualCnt; 1763 1764 blk_end_request_all(rq, (rq->errors == 0) ? 0 : -EIO); 1765 1766 spin_lock_irqsave(&h->lock, flags); 1767 + cmd_free(h, c); 1768 cciss_check_queues(h); 1769 spin_unlock_irqrestore(&h->lock, flags); 1770 } ··· 1782 * via the inquiry page 0. Model, vendor, and rev are set to empty strings if 1783 * they cannot be read. 1784 */ 1785 + static void cciss_get_device_descr(ctlr_info_t *h, int logvol, 1786 char *vendor, char *model, char *rev) 1787 { 1788 int rc; ··· 1797 if (!inq_buf) 1798 return; 1799 1800 + log_unit_to_scsi3addr(h, scsi3addr, logvol); 1801 + rc = sendcmd_withirq(h, CISS_INQUIRY, inq_buf, sizeof(*inq_buf), 0, 1802 scsi3addr, TYPE_CMD); 1803 if (rc == IO_OK) { 1804 memcpy(vendor, &inq_buf->data_byte[8], VENDOR_LEN); ··· 1818 * number cannot be had, for whatever reason, 16 bytes of 0xff 1819 * are returned instead. 1820 */ 1821 + static void cciss_get_serial_no(ctlr_info_t *h, int logvol, 1822 unsigned char *serial_no, int buflen) 1823 { 1824 #define PAGE_83_INQ_BYTES 64 ··· 1833 if (!buf) 1834 return; 1835 memset(serial_no, 0, buflen); 1836 + log_unit_to_scsi3addr(h, scsi3addr, logvol); 1837 + rc = sendcmd_withirq(h, CISS_INQUIRY, buf, 1838 PAGE_83_INQ_BYTES, 0x83, scsi3addr, TYPE_CMD); 1839 if (rc == IO_OK) 1840 memcpy(serial_no, &buf[8], buflen); ··· 1900 * is also the controller node. Any changes to disk 0 will show up on 1901 * the next reboot. 1902 */ 1903 + static void cciss_update_drive_info(ctlr_info_t *h, int drv_index, 1904 + int first_time, int via_ioctl) 1905 { 1906 struct gendisk *disk; 1907 InquiryData_struct *inq_buff = NULL; 1908 unsigned int block_size; ··· 1920 1921 /* testing to see if 16-byte CDBs are already being used */ 1922 if (h->cciss_read == CCISS_READ_16) { 1923 + cciss_read_capacity_16(h, drv_index, 1924 &total_size, &block_size); 1925 1926 } else { 1927 + cciss_read_capacity(h, drv_index, &total_size, &block_size); 1928 /* if read_capacity returns all F's this volume is >2TB */ 1929 /* in size so we switch to 16-byte CDB's for all */ 1930 /* read/write ops */ 1931 if (total_size == 0xFFFFFFFFULL) { 1932 + cciss_read_capacity_16(h, drv_index, 1933 &total_size, &block_size); 1934 h->cciss_read = CCISS_READ_16; 1935 h->cciss_write = CCISS_WRITE_16; ··· 1939 } 1940 } 1941 1942 + cciss_geometry_inquiry(h, drv_index, total_size, block_size, 1943 inq_buff, drvinfo); 1944 drvinfo->block_size = block_size; 1945 drvinfo->nr_blocks = total_size + 1; 1946 1947 + cciss_get_device_descr(h, drv_index, drvinfo->vendor, 1948 drvinfo->model, drvinfo->rev); 1949 + cciss_get_serial_no(h, drv_index, drvinfo->serial_no, 1950 sizeof(drvinfo->serial_no)); 1951 /* Save the lunid in case we deregister the disk, below. */ 1952 memcpy(drvinfo->LunID, h->drv[drv_index]->LunID, ··· 1971 * (unless it's the first disk (for the controller node). 1972 */ 1973 if (h->drv[drv_index]->raid_level != -1 && drv_index != 0) { 1974 + dev_warn(&h->pdev->dev, "disk %d has changed.\n", drv_index); 1975 + spin_lock_irqsave(&h->lock, flags); 1976 h->drv[drv_index]->busy_configuring = 1; 1977 + spin_unlock_irqrestore(&h->lock, flags); 1978 1979 /* deregister_disk sets h->drv[drv_index]->queue = NULL 1980 * which keeps the interrupt handler from starting ··· 2024 if (cciss_add_disk(h, disk, drv_index) != 0) { 2025 cciss_free_gendisk(h, drv_index); 2026 cciss_free_drive_info(h, drv_index); 2027 + dev_warn(&h->pdev->dev, "could not update disk %d\n", 2028 + drv_index); 2029 --h->num_luns; 2030 } 2031 } ··· 2035 kfree(drvinfo); 2036 return; 2037 mem_msg: 2038 + dev_err(&h->pdev->dev, "out of memory\n"); 2039 goto freeret; 2040 } 2041 ··· 2127 h->gendisk[drv_index] = 2128 alloc_disk(1 << NWD_SHIFT); 2129 if (!h->gendisk[drv_index]) { 2130 + dev_err(&h->pdev->dev, 2131 + "could not allocate a new disk %d\n", 2132 + drv_index); 2133 goto err_free_drive_info; 2134 } 2135 } ··· 2180 cciss_free_gendisk(h, drv_index); 2181 cciss_free_drive_info(h, drv_index); 2182 error: 2183 + dev_warn(&h->pdev->dev, "could not add disk 0.\n"); 2184 return; 2185 } 2186 ··· 2196 static int rebuild_lun_table(ctlr_info_t *h, int first_time, 2197 int via_ioctl) 2198 { 2199 int num_luns; 2200 ReportLunData_struct *ld_buff = NULL; 2201 int return_code; ··· 2211 return -EPERM; 2212 2213 /* Set busy_configuring flag for this operation */ 2214 + spin_lock_irqsave(&h->lock, flags); 2215 if (h->busy_configuring) { 2216 + spin_unlock_irqrestore(&h->lock, flags); 2217 return -EBUSY; 2218 } 2219 h->busy_configuring = 1; 2220 + spin_unlock_irqrestore(&h->lock, flags); 2221 2222 ld_buff = kzalloc(sizeof(ReportLunData_struct), GFP_KERNEL); 2223 if (ld_buff == NULL) 2224 goto mem_msg; 2225 2226 + return_code = sendcmd_withirq(h, CISS_REPORT_LOG, ld_buff, 2227 sizeof(ReportLunData_struct), 2228 0, CTLR_LUNID, TYPE_CMD); 2229 2230 if (return_code == IO_OK) 2231 listlength = be32_to_cpu(*(__be32 *) ld_buff->LUNListLength); 2232 else { /* reading number of logical volumes failed */ 2233 + dev_warn(&h->pdev->dev, 2234 + "report logical volume command failed\n"); 2235 listlength = 0; 2236 goto freeret; 2237 } ··· 2239 num_luns = listlength / 8; /* 8 bytes per entry */ 2240 if (num_luns > CISS_MAX_LUN) { 2241 num_luns = CISS_MAX_LUN; 2242 + dev_warn(&h->pdev->dev, "more luns configured" 2243 " on controller than can be handled by" 2244 " this driver.\n"); 2245 } ··· 2270 } 2271 if (!drv_found) { 2272 /* Deregister it from the OS, it's gone. */ 2273 + spin_lock_irqsave(&h->lock, flags); 2274 h->drv[i]->busy_configuring = 1; 2275 + spin_unlock_irqrestore(&h->lock, flags); 2276 return_code = deregister_disk(h, i, 1, via_ioctl); 2277 if (h->drv[i] != NULL) 2278 h->drv[i]->busy_configuring = 0; ··· 2311 if (drv_index == -1) 2312 goto freeret; 2313 } 2314 + cciss_update_drive_info(h, drv_index, first_time, via_ioctl); 2315 } /* end for */ 2316 2317 freeret: ··· 2324 */ 2325 return -1; 2326 mem_msg: 2327 + dev_err(&h->pdev->dev, "out of memory\n"); 2328 h->busy_configuring = 0; 2329 goto freeret; 2330 } ··· 2444 return 0; 2445 } 2446 2447 + static int fill_cmd(ctlr_info_t *h, CommandList_struct *c, __u8 cmd, void *buff, 2448 size_t size, __u8 page_code, unsigned char *scsi3addr, 2449 int cmd_type) 2450 { 2451 u64bit buff_dma_handle; 2452 int status = IO_OK; 2453 ··· 2532 c->Request.Timeout = 0; 2533 break; 2534 default: 2535 + dev_warn(&h->pdev->dev, "Unknown Command 0x%c\n", cmd); 2536 return IO_ERROR; 2537 } 2538 } else if (cmd_type == TYPE_MSG) { ··· 2565 c->Request.CDB[0] = cmd; 2566 break; 2567 default: 2568 + dev_warn(&h->pdev->dev, 2569 + "unknown message type %d\n", cmd); 2570 return IO_ERROR; 2571 } 2572 } else { 2573 + dev_warn(&h->pdev->dev, "unknown command type %d\n", cmd_type); 2574 return IO_ERROR; 2575 } 2576 /* Fill in the scatter gather information */ ··· 2599 default: 2600 if (check_for_unit_attention(h, c)) 2601 return IO_NEEDS_RETRY; 2602 + dev_warn(&h->pdev->dev, "cmd 0x%02x " 2603 "check condition, sense key = 0x%02x\n", 2604 + c->Request.CDB[0], c->err_info->SenseInfo[2]); 2605 } 2606 break; 2607 default: 2608 + dev_warn(&h->pdev->dev, "cmd 0x%02x" 2609 + "scsi status = 0x%02x\n", 2610 c->Request.CDB[0], c->err_info->ScsiStatus); 2611 break; 2612 } ··· 2630 /* expected for inquiry and report lun commands */ 2631 break; 2632 case CMD_INVALID: 2633 + dev_warn(&h->pdev->dev, "cmd 0x%02x is " 2634 "reported invalid\n", c->Request.CDB[0]); 2635 return_status = IO_ERROR; 2636 break; 2637 case CMD_PROTOCOL_ERR: 2638 + dev_warn(&h->pdev->dev, "cmd 0x%02x has " 2639 + "protocol error\n", c->Request.CDB[0]); 2640 return_status = IO_ERROR; 2641 break; 2642 case CMD_HARDWARE_ERR: 2643 + dev_warn(&h->pdev->dev, "cmd 0x%02x had " 2644 " hardware error\n", c->Request.CDB[0]); 2645 return_status = IO_ERROR; 2646 break; 2647 case CMD_CONNECTION_LOST: 2648 + dev_warn(&h->pdev->dev, "cmd 0x%02x had " 2649 "connection lost\n", c->Request.CDB[0]); 2650 return_status = IO_ERROR; 2651 break; 2652 case CMD_ABORTED: 2653 + dev_warn(&h->pdev->dev, "cmd 0x%02x was " 2654 "aborted\n", c->Request.CDB[0]); 2655 return_status = IO_ERROR; 2656 break; 2657 case CMD_ABORT_FAILED: 2658 + dev_warn(&h->pdev->dev, "cmd 0x%02x reports " 2659 "abort failed\n", c->Request.CDB[0]); 2660 return_status = IO_ERROR; 2661 break; 2662 case CMD_UNSOLICITED_ABORT: 2663 + dev_warn(&h->pdev->dev, "unsolicited abort 0x%02x\n", 2664 c->Request.CDB[0]); 2665 return_status = IO_NEEDS_RETRY; 2666 break; 2667 default: 2668 + dev_warn(&h->pdev->dev, "cmd 0x%02x returned " 2669 "unknown status %x\n", c->Request.CDB[0], 2670 c->err_info->CommandStatus); 2671 return_status = IO_ERROR; ··· 2679 { 2680 DECLARE_COMPLETION_ONSTACK(wait); 2681 u64bit buff_dma_handle; 2682 int return_status = IO_OK; 2683 2684 resend_cmd2: 2685 c->waiting = &wait; 2686 + enqueue_cmd_and_start_io(h, c); 2687 2688 wait_for_completion(&wait); 2689 ··· 2700 2701 if (return_status == IO_NEEDS_RETRY && 2702 c->retry_count < MAX_CMD_RETRIES) { 2703 + dev_warn(&h->pdev->dev, "retrying 0x%02x\n", 2704 c->Request.CDB[0]); 2705 c->retry_count++; 2706 /* erase the old error information */ ··· 2719 return return_status; 2720 } 2721 2722 + static int sendcmd_withirq(ctlr_info_t *h, __u8 cmd, void *buff, size_t size, 2723 __u8 page_code, unsigned char scsi3addr[], 2724 int cmd_type) 2725 { 2726 CommandList_struct *c; 2727 int return_status; 2728 2729 + c = cmd_special_alloc(h); 2730 if (!c) 2731 return -ENOMEM; 2732 + return_status = fill_cmd(h, c, cmd, buff, size, page_code, 2733 scsi3addr, cmd_type); 2734 if (return_status == IO_OK) 2735 return_status = sendcmd_withirq_core(h, c, 1); 2736 2737 + cmd_special_free(h, c); 2738 return return_status; 2739 } 2740 2741 + static void cciss_geometry_inquiry(ctlr_info_t *h, int logvol, 2742 sector_t total_size, 2743 unsigned int block_size, 2744 InquiryData_struct *inq_buff, ··· 2750 unsigned char scsi3addr[8]; 2751 2752 memset(inq_buff, 0, sizeof(InquiryData_struct)); 2753 + log_unit_to_scsi3addr(h, scsi3addr, logvol); 2754 + return_code = sendcmd_withirq(h, CISS_INQUIRY, inq_buff, 2755 sizeof(*inq_buff), 0xC1, scsi3addr, TYPE_CMD); 2756 if (return_code == IO_OK) { 2757 if (inq_buff->data_byte[8] == 0xFF) { 2758 + dev_warn(&h->pdev->dev, 2759 + "reading geometry failed, volume " 2760 "does not support reading geometry\n"); 2761 drv->heads = 255; 2762 drv->sectors = 32; /* Sectors per track */ ··· 2780 drv->cylinders = real_size; 2781 } 2782 } else { /* Get geometry failed */ 2783 + dev_warn(&h->pdev->dev, "reading geometry failed\n"); 2784 } 2785 } 2786 2787 static void 2788 + cciss_read_capacity(ctlr_info_t *h, int logvol, sector_t *total_size, 2789 unsigned int *block_size) 2790 { 2791 ReadCapdata_struct *buf; ··· 2794 2795 buf = kzalloc(sizeof(ReadCapdata_struct), GFP_KERNEL); 2796 if (!buf) { 2797 + dev_warn(&h->pdev->dev, "out of memory\n"); 2798 return; 2799 } 2800 2801 + log_unit_to_scsi3addr(h, scsi3addr, logvol); 2802 + return_code = sendcmd_withirq(h, CCISS_READ_CAPACITY, buf, 2803 sizeof(ReadCapdata_struct), 0, scsi3addr, TYPE_CMD); 2804 if (return_code == IO_OK) { 2805 *total_size = be32_to_cpu(*(__be32 *) buf->total_size); 2806 *block_size = be32_to_cpu(*(__be32 *) buf->block_size); 2807 } else { /* read capacity command failed */ 2808 + dev_warn(&h->pdev->dev, "read capacity failed\n"); 2809 *total_size = 0; 2810 *block_size = BLOCK_SIZE; 2811 } 2812 kfree(buf); 2813 } 2814 2815 + static void cciss_read_capacity_16(ctlr_info_t *h, int logvol, 2816 sector_t *total_size, unsigned int *block_size) 2817 { 2818 ReadCapdata_struct_16 *buf; ··· 2821 2822 buf = kzalloc(sizeof(ReadCapdata_struct_16), GFP_KERNEL); 2823 if (!buf) { 2824 + dev_warn(&h->pdev->dev, "out of memory\n"); 2825 return; 2826 } 2827 2828 + log_unit_to_scsi3addr(h, scsi3addr, logvol); 2829 + return_code = sendcmd_withirq(h, CCISS_READ_CAPACITY_16, 2830 + buf, sizeof(ReadCapdata_struct_16), 2831 0, scsi3addr, TYPE_CMD); 2832 if (return_code == IO_OK) { 2833 *total_size = be64_to_cpu(*(__be64 *) buf->total_size); 2834 *block_size = be32_to_cpu(*(__be32 *) buf->block_size); 2835 } else { /* read capacity command failed */ 2836 + dev_warn(&h->pdev->dev, "read capacity failed\n"); 2837 *total_size = 0; 2838 *block_size = BLOCK_SIZE; 2839 } 2840 + dev_info(&h->pdev->dev, " blocks= %llu block_size= %d\n", 2841 (unsigned long long)*total_size+1, *block_size); 2842 kfree(buf); 2843 } ··· 2865 2866 inq_buff = kmalloc(sizeof(InquiryData_struct), GFP_KERNEL); 2867 if (inq_buff == NULL) { 2868 + dev_warn(&h->pdev->dev, "out of memory\n"); 2869 return 1; 2870 } 2871 if (h->cciss_read == CCISS_READ_10) { 2872 + cciss_read_capacity(h, logvol, 2873 &total_size, &block_size); 2874 } else { 2875 + cciss_read_capacity_16(h, logvol, 2876 &total_size, &block_size); 2877 } 2878 + cciss_geometry_inquiry(h, logvol, total_size, block_size, 2879 inq_buff, drv); 2880 2881 blk_queue_logical_block_size(drv->queue, drv->block_size); ··· 2909 c = hlist_entry(h->reqQ.first, CommandList_struct, list); 2910 /* can't do anything if fifo is full */ 2911 if ((h->access.fifo_full(h))) { 2912 + dev_warn(&h->pdev->dev, "fifo full\n"); 2913 break; 2914 } 2915 ··· 2925 } 2926 } 2927 2928 + /* Assumes that h->lock is held. */ 2929 /* Zeros out the error record and then resends the command back */ 2930 /* to the controller */ 2931 static inline void resend_cciss_cmd(ctlr_info_t *h, CommandList_struct *c) ··· 2966 driver_byte = DRIVER_OK; 2967 msg_byte = cmd->err_info->CommandStatus; /* correct? seems too device specific */ 2968 2969 + if (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) 2970 host_byte = DID_PASSTHROUGH; 2971 else 2972 host_byte = DID_OK; ··· 2975 host_byte, driver_byte); 2976 2977 if (cmd->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION) { 2978 + if (cmd->rq->cmd_type != REQ_TYPE_BLOCK_PC) 2979 + dev_warn(&h->pdev->dev, "cmd %p " 2980 "has SCSI Status 0x%x\n", 2981 cmd, cmd->err_info->ScsiStatus); 2982 return error_value; ··· 2985 /* check the sense key */ 2986 sense_key = 0xf & cmd->err_info->SenseInfo[2]; 2987 /* no status or recovered error */ 2988 + if (((sense_key == 0x0) || (sense_key == 0x1)) && 2989 + (cmd->rq->cmd_type != REQ_TYPE_BLOCK_PC)) 2990 error_value = 0; 2991 2992 if (check_for_unit_attention(h, cmd)) { 2993 + *retry_cmd = !(cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC); 2994 return 0; 2995 } 2996 2997 + /* Not SG_IO or similar? */ 2998 + if (cmd->rq->cmd_type != REQ_TYPE_BLOCK_PC) { 2999 if (error_value != 0) 3000 + dev_warn(&h->pdev->dev, "cmd %p has CHECK CONDITION" 3001 " sense key = 0x%x\n", cmd, sense_key); 3002 return error_value; 3003 } ··· 3035 rq->errors = evaluate_target_status(h, cmd, &retry_cmd); 3036 break; 3037 case CMD_DATA_UNDERRUN: 3038 + if (cmd->rq->cmd_type == REQ_TYPE_FS) { 3039 + dev_warn(&h->pdev->dev, "cmd %p has" 3040 " completed with data underrun " 3041 "reported\n", cmd); 3042 cmd->rq->resid_len = cmd->err_info->ResidualCnt; 3043 } 3044 break; 3045 case CMD_DATA_OVERRUN: 3046 + if (cmd->rq->cmd_type == REQ_TYPE_FS) 3047 + dev_warn(&h->pdev->dev, "cciss: cmd %p has" 3048 " completed with data overrun " 3049 "reported\n", cmd); 3050 break; 3051 case CMD_INVALID: 3052 + dev_warn(&h->pdev->dev, "cciss: cmd %p is " 3053 "reported invalid\n", cmd); 3054 rq->errors = make_status_bytes(SAM_STAT_GOOD, 3055 cmd->err_info->CommandStatus, DRIVER_OK, 3056 + (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ? 3057 + DID_PASSTHROUGH : DID_ERROR); 3058 break; 3059 case CMD_PROTOCOL_ERR: 3060 + dev_warn(&h->pdev->dev, "cciss: cmd %p has " 3061 + "protocol error\n", cmd); 3062 rq->errors = make_status_bytes(SAM_STAT_GOOD, 3063 cmd->err_info->CommandStatus, DRIVER_OK, 3064 + (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ? 3065 + DID_PASSTHROUGH : DID_ERROR); 3066 break; 3067 case CMD_HARDWARE_ERR: 3068 + dev_warn(&h->pdev->dev, "cciss: cmd %p had " 3069 " hardware error\n", cmd); 3070 rq->errors = make_status_bytes(SAM_STAT_GOOD, 3071 cmd->err_info->CommandStatus, DRIVER_OK, 3072 + (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ? 3073 + DID_PASSTHROUGH : DID_ERROR); 3074 break; 3075 case CMD_CONNECTION_LOST: 3076 + dev_warn(&h->pdev->dev, "cciss: cmd %p had " 3077 "connection lost\n", cmd); 3078 rq->errors = make_status_bytes(SAM_STAT_GOOD, 3079 cmd->err_info->CommandStatus, DRIVER_OK, 3080 + (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ? 3081 + DID_PASSTHROUGH : DID_ERROR); 3082 break; 3083 case CMD_ABORTED: 3084 + dev_warn(&h->pdev->dev, "cciss: cmd %p was " 3085 "aborted\n", cmd); 3086 rq->errors = make_status_bytes(SAM_STAT_GOOD, 3087 cmd->err_info->CommandStatus, DRIVER_OK, 3088 + (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ? 3089 + DID_PASSTHROUGH : DID_ABORT); 3090 break; 3091 case CMD_ABORT_FAILED: 3092 + dev_warn(&h->pdev->dev, "cciss: cmd %p reports " 3093 "abort failed\n", cmd); 3094 rq->errors = make_status_bytes(SAM_STAT_GOOD, 3095 cmd->err_info->CommandStatus, DRIVER_OK, 3096 + (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ? 3097 + DID_PASSTHROUGH : DID_ERROR); 3098 break; 3099 case CMD_UNSOLICITED_ABORT: 3100 + dev_warn(&h->pdev->dev, "cciss%d: unsolicited " 3101 "abort %p\n", h->ctlr, cmd); 3102 if (cmd->retry_count < MAX_CMD_RETRIES) { 3103 retry_cmd = 1; 3104 + dev_warn(&h->pdev->dev, "retrying %p\n", cmd); 3105 cmd->retry_count++; 3106 } else 3107 + dev_warn(&h->pdev->dev, 3108 + "%p retried too many times\n", cmd); 3109 rq->errors = make_status_bytes(SAM_STAT_GOOD, 3110 cmd->err_info->CommandStatus, DRIVER_OK, 3111 + (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ? 3112 + DID_PASSTHROUGH : DID_ABORT); 3113 break; 3114 case CMD_TIMEOUT: 3115 + dev_warn(&h->pdev->dev, "cmd %p timedout\n", cmd); 3116 rq->errors = make_status_bytes(SAM_STAT_GOOD, 3117 cmd->err_info->CommandStatus, DRIVER_OK, 3118 + (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ? 3119 + DID_PASSTHROUGH : DID_ERROR); 3120 break; 3121 default: 3122 + dev_warn(&h->pdev->dev, "cmd %p returned " 3123 "unknown status %x\n", cmd, 3124 cmd->err_info->CommandStatus); 3125 rq->errors = make_status_bytes(SAM_STAT_GOOD, 3126 cmd->err_info->CommandStatus, DRIVER_OK, 3127 + (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) ? 3128 + DID_PASSTHROUGH : DID_ERROR); 3129 } 3130 3131 after_error_processing: ··· 3130 } 3131 cmd->rq->completion_data = cmd; 3132 blk_complete_request(cmd->rq); 3133 + } 3134 + 3135 + static inline u32 cciss_tag_contains_index(u32 tag) 3136 + { 3137 + #define DIRECT_LOOKUP_BIT 0x10 3138 + return tag & DIRECT_LOOKUP_BIT; 3139 + } 3140 + 3141 + static inline u32 cciss_tag_to_index(u32 tag) 3142 + { 3143 + #define DIRECT_LOOKUP_SHIFT 5 3144 + return tag >> DIRECT_LOOKUP_SHIFT; 3145 + } 3146 + 3147 + static inline u32 cciss_tag_discard_error_bits(u32 tag) 3148 + { 3149 + #define CCISS_ERROR_BITS 0x03 3150 + return tag & ~CCISS_ERROR_BITS; 3151 + } 3152 + 3153 + static inline void cciss_mark_tag_indexed(u32 *tag) 3154 + { 3155 + *tag |= DIRECT_LOOKUP_BIT; 3156 + } 3157 + 3158 + static inline void cciss_set_tag_index(u32 *tag, u32 index) 3159 + { 3160 + *tag |= (index << DIRECT_LOOKUP_SHIFT); 3161 } 3162 3163 /* ··· 3163 3164 BUG_ON(creq->nr_phys_segments > h->maxsgentries); 3165 3166 + c = cmd_alloc(h); 3167 + if (!c) 3168 goto full; 3169 3170 blk_start_request(creq); ··· 3180 /* got command from pool, so use the command block index instead */ 3181 /* for direct lookups. */ 3182 /* The first 2 bits are reserved for controller error reporting. */ 3183 + cciss_set_tag_index(&c->Header.Tag.lower, c->cmdindex); 3184 + cciss_mark_tag_indexed(&c->Header.Tag.lower); 3185 memcpy(&c->Header.LUN, drv->LunID, sizeof(drv->LunID)); 3186 c->Request.CDBLen = 10; /* 12 byte commands not in FW yet; */ 3187 c->Request.Type.Type = TYPE_CMD; /* It is a command. */ ··· 3192 c->Request.CDB[0] = 3193 (rq_data_dir(creq) == READ) ? h->cciss_read : h->cciss_write; 3194 start_blk = blk_rq_pos(creq); 3195 + dev_dbg(&h->pdev->dev, "sector =%d nr_sectors=%d\n", 3196 (int)blk_rq_pos(creq), (int)blk_rq_sectors(creq)); 3197 sg_init_table(tmp_sg, h->maxsgentries); 3198 seg = blk_rq_map_sg(q, creq, tmp_sg); 3199 ··· 3236 if (seg > h->maxSG) 3237 h->maxSG = seg; 3238 3239 + dev_dbg(&h->pdev->dev, "Submitting %u sectors in %d segments " 3240 "chained[%d]\n", 3241 blk_rq_sectors(creq), seg, chained); 3242 3243 + c->Header.SGTotal = seg + chained; 3244 + if (seg <= h->max_cmd_sgentries) 3245 + c->Header.SGList = c->Header.SGTotal; 3246 + else 3247 c->Header.SGList = h->max_cmd_sgentries; 3248 + set_performant_mode(h, c); 3249 3250 + if (likely(creq->cmd_type == REQ_TYPE_FS)) { 3251 if(h->cciss_read == CCISS_READ_10) { 3252 c->Request.CDB[1] = 0; 3253 c->Request.CDB[2] = (start_blk >> 24) & 0xff; /* MSB */ ··· 3276 c->Request.CDB[13]= blk_rq_sectors(creq) & 0xff; 3277 c->Request.CDB[14] = c->Request.CDB[15] = 0; 3278 } 3279 + } else if (creq->cmd_type == REQ_TYPE_BLOCK_PC) { 3280 c->Request.CDBLen = creq->cmd_len; 3281 memcpy(c->Request.CDB, creq->cmd, BLK_MAX_CDB); 3282 } else { 3283 + dev_warn(&h->pdev->dev, "bad request type %d\n", 3284 + creq->cmd_type); 3285 BUG(); 3286 } 3287 ··· 3313 3314 static inline long interrupt_not_for_us(ctlr_info_t *h) 3315 { 3316 + return ((h->access.intr_pending(h) == 0) || 3317 + (h->interrupts_enabled == 0)); 3318 } 3319 3320 + static inline int bad_tag(ctlr_info_t *h, u32 tag_index, 3321 + u32 raw_tag) 3322 + { 3323 + if (unlikely(tag_index >= h->nr_cmds)) { 3324 + dev_warn(&h->pdev->dev, "bad tag 0x%08x ignored.\n", raw_tag); 3325 + return 1; 3326 + } 3327 + return 0; 3328 + } 3329 + 3330 + static inline void finish_cmd(ctlr_info_t *h, CommandList_struct *c, 3331 + u32 raw_tag) 3332 + { 3333 + removeQ(c); 3334 + if (likely(c->cmd_type == CMD_RWREQ)) 3335 + complete_command(h, c, 0); 3336 + else if (c->cmd_type == CMD_IOCTL_PEND) 3337 + complete(c->waiting); 3338 + #ifdef CONFIG_CISS_SCSI_TAPE 3339 + else if (c->cmd_type == CMD_SCSI) 3340 + complete_scsi_command(c, 0, raw_tag); 3341 + #endif 3342 + } 3343 + 3344 + static inline u32 next_command(ctlr_info_t *h) 3345 + { 3346 + u32 a; 3347 + 3348 + if (unlikely(h->transMethod != CFGTBL_Trans_Performant)) 3349 + return h->access.command_completed(h); 3350 + 3351 + if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) { 3352 + a = *(h->reply_pool_head); /* Next cmd in ring buffer */ 3353 + (h->reply_pool_head)++; 3354 + h->commands_outstanding--; 3355 + } else { 3356 + a = FIFO_EMPTY; 3357 + } 3358 + /* Check for wraparound */ 3359 + if (h->reply_pool_head == (h->reply_pool + h->max_commands)) { 3360 + h->reply_pool_head = h->reply_pool; 3361 + h->reply_pool_wraparound ^= 1; 3362 + } 3363 + return a; 3364 + } 3365 + 3366 + /* process completion of an indexed ("direct lookup") command */ 3367 + static inline u32 process_indexed_cmd(ctlr_info_t *h, u32 raw_tag) 3368 + { 3369 + u32 tag_index; 3370 + CommandList_struct *c; 3371 + 3372 + tag_index = cciss_tag_to_index(raw_tag); 3373 + if (bad_tag(h, tag_index, raw_tag)) 3374 + return next_command(h); 3375 + c = h->cmd_pool + tag_index; 3376 + finish_cmd(h, c, raw_tag); 3377 + return next_command(h); 3378 + } 3379 + 3380 + /* process completion of a non-indexed command */ 3381 + static inline u32 process_nonindexed_cmd(ctlr_info_t *h, u32 raw_tag) 3382 + { 3383 + u32 tag; 3384 + CommandList_struct *c = NULL; 3385 + struct hlist_node *tmp; 3386 + __u32 busaddr_masked, tag_masked; 3387 + 3388 + tag = cciss_tag_discard_error_bits(raw_tag); 3389 + hlist_for_each_entry(c, tmp, &h->cmpQ, list) { 3390 + busaddr_masked = cciss_tag_discard_error_bits(c->busaddr); 3391 + tag_masked = cciss_tag_discard_error_bits(tag); 3392 + if (busaddr_masked == tag_masked) { 3393 + finish_cmd(h, c, raw_tag); 3394 + return next_command(h); 3395 + } 3396 + } 3397 + bad_tag(h, h->nr_cmds + 1, raw_tag); 3398 + return next_command(h); 3399 + } 3400 + 3401 + static irqreturn_t do_cciss_intx(int irq, void *dev_id) 3402 { 3403 ctlr_info_t *h = dev_id; 3404 unsigned long flags; 3405 + u32 raw_tag; 3406 3407 if (interrupt_not_for_us(h)) 3408 return IRQ_NONE; 3409 + spin_lock_irqsave(&h->lock, flags); 3410 while (interrupt_pending(h)) { 3411 + raw_tag = get_next_completion(h); 3412 + while (raw_tag != FIFO_EMPTY) { 3413 + if (cciss_tag_contains_index(raw_tag)) 3414 + raw_tag = process_indexed_cmd(h, raw_tag); 3415 + else 3416 + raw_tag = process_nonindexed_cmd(h, raw_tag); 3417 } 3418 } 3419 + spin_unlock_irqrestore(&h->lock, flags); 3420 + return IRQ_HANDLED; 3421 + } 3422 3423 + /* Add a second interrupt handler for MSI/MSI-X mode. In this mode we never 3424 + * check the interrupt pending register because it is not set. 3425 + */ 3426 + static irqreturn_t do_cciss_msix_intr(int irq, void *dev_id) 3427 + { 3428 + ctlr_info_t *h = dev_id; 3429 + unsigned long flags; 3430 + u32 raw_tag; 3431 + 3432 + spin_lock_irqsave(&h->lock, flags); 3433 + raw_tag = get_next_completion(h); 3434 + while (raw_tag != FIFO_EMPTY) { 3435 + if (cciss_tag_contains_index(raw_tag)) 3436 + raw_tag = process_indexed_cmd(h, raw_tag); 3437 + else 3438 + raw_tag = process_nonindexed_cmd(h, raw_tag); 3439 + } 3440 + spin_unlock_irqrestore(&h->lock, flags); 3441 return IRQ_HANDLED; 3442 } 3443 ··· 3510 3511 switch (c->err_info->SenseInfo[12]) { 3512 case STATE_CHANGED: 3513 + dev_warn(&h->pdev->dev, "a state change " 3514 + "detected, command retried\n"); 3515 return 1; 3516 break; 3517 case LUN_FAILED: 3518 + dev_warn(&h->pdev->dev, "LUN failure " 3519 + "detected, action required\n"); 3520 return 1; 3521 break; 3522 case REPORT_LUNS_CHANGED: 3523 + dev_warn(&h->pdev->dev, "report LUN data changed\n"); 3524 /* 3525 * Here, we could call add_to_scan_list and wake up the scan thread, 3526 * except that it's quite likely that we will get more than one ··· 3541 return 1; 3542 break; 3543 case POWER_OR_RESET: 3544 + dev_warn(&h->pdev->dev, 3545 + "a power on or device reset detected\n"); 3546 return 1; 3547 break; 3548 case UNIT_ATTENTION_CLEARED: 3549 + dev_warn(&h->pdev->dev, 3550 + "unit attention cleared by another initiator\n"); 3551 return 1; 3552 break; 3553 default: 3554 + dev_warn(&h->pdev->dev, "unknown unit attention detected\n"); 3555 + return 1; 3556 } 3557 } 3558 ··· 3562 * the io functions. 3563 * This is for debug only. 3564 */ 3565 + static void print_cfg_table(ctlr_info_t *h) 3566 { 3567 int i; 3568 char temp_name[17]; 3569 + CfgTable_struct *tb = h->cfgtable; 3570 3571 + dev_dbg(&h->pdev->dev, "Controller Configuration information\n"); 3572 + dev_dbg(&h->pdev->dev, "------------------------------------\n"); 3573 for (i = 0; i < 4; i++) 3574 temp_name[i] = readb(&(tb->Signature[i])); 3575 temp_name[4] = '\0'; 3576 + dev_dbg(&h->pdev->dev, " Signature = %s\n", temp_name); 3577 + dev_dbg(&h->pdev->dev, " Spec Number = %d\n", 3578 + readl(&(tb->SpecValence))); 3579 + dev_dbg(&h->pdev->dev, " Transport methods supported = 0x%x\n", 3580 readl(&(tb->TransportSupport))); 3581 + dev_dbg(&h->pdev->dev, " Transport methods active = 0x%x\n", 3582 readl(&(tb->TransportActive))); 3583 + dev_dbg(&h->pdev->dev, " Requested transport Method = 0x%x\n", 3584 readl(&(tb->HostWrite.TransportRequest))); 3585 + dev_dbg(&h->pdev->dev, " Coalesce Interrupt Delay = 0x%x\n", 3586 readl(&(tb->HostWrite.CoalIntDelay))); 3587 + dev_dbg(&h->pdev->dev, " Coalesce Interrupt Count = 0x%x\n", 3588 readl(&(tb->HostWrite.CoalIntCount))); 3589 + dev_dbg(&h->pdev->dev, " Max outstanding commands = 0x%d\n", 3590 readl(&(tb->CmdsOutMax))); 3591 + dev_dbg(&h->pdev->dev, " Bus Types = 0x%x\n", 3592 + readl(&(tb->BusTypes))); 3593 for (i = 0; i < 16; i++) 3594 temp_name[i] = readb(&(tb->ServerName[i])); 3595 temp_name[16] = '\0'; 3596 + dev_dbg(&h->pdev->dev, " Server Name = %s\n", temp_name); 3597 + dev_dbg(&h->pdev->dev, " Heartbeat Counter = 0x%x\n\n\n", 3598 + readl(&(tb->HeartBeat))); 3599 } 3600 3601 static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr) 3602 { ··· 3618 offset += 8; 3619 break; 3620 default: /* reserved in PCI 2.2 */ 3621 + dev_warn(&pdev->dev, 3622 "Base address is invalid\n"); 3623 return -1; 3624 break; ··· 3630 return -1; 3631 } 3632 3633 + /* Fill in bucket_map[], given nsgs (the max number of 3634 + * scatter gather elements supported) and bucket[], 3635 + * which is an array of 8 integers. The bucket[] array 3636 + * contains 8 different DMA transfer sizes (in 16 3637 + * byte increments) which the controller uses to fetch 3638 + * commands. This function fills in bucket_map[], which 3639 + * maps a given number of scatter gather elements to one of 3640 + * the 8 DMA transfer sizes. The point of it is to allow the 3641 + * controller to only do as much DMA as needed to fetch the 3642 + * command, with the DMA transfer size encoded in the lower 3643 + * bits of the command address. 3644 + */ 3645 + static void calc_bucket_map(int bucket[], int num_buckets, 3646 + int nsgs, int *bucket_map) 3647 + { 3648 + int i, j, b, size; 3649 + 3650 + /* even a command with 0 SGs requires 4 blocks */ 3651 + #define MINIMUM_TRANSFER_BLOCKS 4 3652 + #define NUM_BUCKETS 8 3653 + /* Note, bucket_map must have nsgs+1 entries. */ 3654 + for (i = 0; i <= nsgs; i++) { 3655 + /* Compute size of a command with i SG entries */ 3656 + size = i + MINIMUM_TRANSFER_BLOCKS; 3657 + b = num_buckets; /* Assume the biggest bucket */ 3658 + /* Find the bucket that is just big enough */ 3659 + for (j = 0; j < 8; j++) { 3660 + if (bucket[j] >= size) { 3661 + b = j; 3662 + break; 3663 + } 3664 + } 3665 + /* for a command with i SG entries, use bucket b. */ 3666 + bucket_map[i] = b; 3667 + } 3668 + } 3669 + 3670 + static void __devinit cciss_wait_for_mode_change_ack(ctlr_info_t *h) 3671 + { 3672 + int i; 3673 + 3674 + /* under certain very rare conditions, this can take awhile. 3675 + * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right 3676 + * as we enter this code.) */ 3677 + for (i = 0; i < MAX_CONFIG_WAIT; i++) { 3678 + if (!(readl(h->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq)) 3679 + break; 3680 + msleep(10); 3681 + } 3682 + } 3683 + 3684 + static __devinit void cciss_enter_performant_mode(ctlr_info_t *h) 3685 + { 3686 + /* This is a bit complicated. There are 8 registers on 3687 + * the controller which we write to to tell it 8 different 3688 + * sizes of commands which there may be. It's a way of 3689 + * reducing the DMA done to fetch each command. Encoded into 3690 + * each command's tag are 3 bits which communicate to the controller 3691 + * which of the eight sizes that command fits within. The size of 3692 + * each command depends on how many scatter gather entries there are. 3693 + * Each SG entry requires 16 bytes. The eight registers are programmed 3694 + * with the number of 16-byte blocks a command of that size requires. 3695 + * The smallest command possible requires 5 such 16 byte blocks. 3696 + * the largest command possible requires MAXSGENTRIES + 4 16-byte 3697 + * blocks. Note, this only extends to the SG entries contained 3698 + * within the command block, and does not extend to chained blocks 3699 + * of SG elements. bft[] contains the eight values we write to 3700 + * the registers. They are not evenly distributed, but have more 3701 + * sizes for small commands, and fewer sizes for larger commands. 3702 + */ 3703 + __u32 trans_offset; 3704 + int bft[8] = { 5, 6, 8, 10, 12, 20, 28, MAXSGENTRIES + 4}; 3705 + /* 3706 + * 5 = 1 s/g entry or 4k 3707 + * 6 = 2 s/g entry or 8k 3708 + * 8 = 4 s/g entry or 16k 3709 + * 10 = 6 s/g entry or 24k 3710 + */ 3711 + unsigned long register_value; 3712 + BUILD_BUG_ON(28 > MAXSGENTRIES + 4); 3713 + 3714 + h->reply_pool_wraparound = 1; /* spec: init to 1 */ 3715 + 3716 + /* Controller spec: zero out this buffer. */ 3717 + memset(h->reply_pool, 0, h->max_commands * sizeof(__u64)); 3718 + h->reply_pool_head = h->reply_pool; 3719 + 3720 + trans_offset = readl(&(h->cfgtable->TransMethodOffset)); 3721 + calc_bucket_map(bft, ARRAY_SIZE(bft), h->maxsgentries, 3722 + h->blockFetchTable); 3723 + writel(bft[0], &h->transtable->BlockFetch0); 3724 + writel(bft[1], &h->transtable->BlockFetch1); 3725 + writel(bft[2], &h->transtable->BlockFetch2); 3726 + writel(bft[3], &h->transtable->BlockFetch3); 3727 + writel(bft[4], &h->transtable->BlockFetch4); 3728 + writel(bft[5], &h->transtable->BlockFetch5); 3729 + writel(bft[6], &h->transtable->BlockFetch6); 3730 + writel(bft[7], &h->transtable->BlockFetch7); 3731 + 3732 + /* size of controller ring buffer */ 3733 + writel(h->max_commands, &h->transtable->RepQSize); 3734 + writel(1, &h->transtable->RepQCount); 3735 + writel(0, &h->transtable->RepQCtrAddrLow32); 3736 + writel(0, &h->transtable->RepQCtrAddrHigh32); 3737 + writel(h->reply_pool_dhandle, &h->transtable->RepQAddr0Low32); 3738 + writel(0, &h->transtable->RepQAddr0High32); 3739 + writel(CFGTBL_Trans_Performant, 3740 + &(h->cfgtable->HostWrite.TransportRequest)); 3741 + 3742 + writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); 3743 + cciss_wait_for_mode_change_ack(h); 3744 + register_value = readl(&(h->cfgtable->TransportActive)); 3745 + if (!(register_value & CFGTBL_Trans_Performant)) 3746 + dev_warn(&h->pdev->dev, "cciss: unable to get board into" 3747 + " performant mode\n"); 3748 + } 3749 + 3750 + static void __devinit cciss_put_controller_into_performant_mode(ctlr_info_t *h) 3751 + { 3752 + __u32 trans_support; 3753 + 3754 + dev_dbg(&h->pdev->dev, "Trying to put board into Performant mode\n"); 3755 + /* Attempt to put controller into performant mode if supported */ 3756 + /* Does board support performant mode? */ 3757 + trans_support = readl(&(h->cfgtable->TransportSupport)); 3758 + if (!(trans_support & PERFORMANT_MODE)) 3759 + return; 3760 + 3761 + dev_dbg(&h->pdev->dev, "Placing controller into performant mode\n"); 3762 + /* Performant mode demands commands on a 32 byte boundary 3763 + * pci_alloc_consistent aligns on page boundarys already. 3764 + * Just need to check if divisible by 32 3765 + */ 3766 + if ((sizeof(CommandList_struct) % 32) != 0) { 3767 + dev_warn(&h->pdev->dev, "%s %d %s\n", 3768 + "cciss info: command size[", 3769 + (int)sizeof(CommandList_struct), 3770 + "] not divisible by 32, no performant mode..\n"); 3771 + return; 3772 + } 3773 + 3774 + /* Performant mode ring buffer and supporting data structures */ 3775 + h->reply_pool = (__u64 *)pci_alloc_consistent( 3776 + h->pdev, h->max_commands * sizeof(__u64), 3777 + &(h->reply_pool_dhandle)); 3778 + 3779 + /* Need a block fetch table for performant mode */ 3780 + h->blockFetchTable = kmalloc(((h->maxsgentries+1) * 3781 + sizeof(__u32)), GFP_KERNEL); 3782 + 3783 + if ((h->reply_pool == NULL) || (h->blockFetchTable == NULL)) 3784 + goto clean_up; 3785 + 3786 + cciss_enter_performant_mode(h); 3787 + 3788 + /* Change the access methods to the performant access methods */ 3789 + h->access = SA5_performant_access; 3790 + h->transMethod = CFGTBL_Trans_Performant; 3791 + 3792 + return; 3793 + clean_up: 3794 + kfree(h->blockFetchTable); 3795 + if (h->reply_pool) 3796 + pci_free_consistent(h->pdev, 3797 + h->max_commands * sizeof(__u64), 3798 + h->reply_pool, 3799 + h->reply_pool_dhandle); 3800 + return; 3801 + 3802 + } /* cciss_put_controller_into_performant_mode */ 3803 + 3804 /* If MSI/MSI-X is supported by the kernel we will try to enable it on 3805 * controllers that are capable. If not, we use IO-APIC mode. 3806 */ 3807 3808 + static void __devinit cciss_interrupt_mode(ctlr_info_t *h) 3809 { 3810 #ifdef CONFIG_PCI_MSI 3811 int err; ··· 3644 }; 3645 3646 /* Some boards advertise MSI but don't really support it */ 3647 + if ((h->board_id == 0x40700E11) || (h->board_id == 0x40800E11) || 3648 + (h->board_id == 0x40820E11) || (h->board_id == 0x40830E11)) 3649 goto default_int_mode; 3650 3651 + if (pci_find_capability(h->pdev, PCI_CAP_ID_MSIX)) { 3652 + err = pci_enable_msix(h->pdev, cciss_msix_entries, 4); 3653 if (!err) { 3654 + h->intr[0] = cciss_msix_entries[0].vector; 3655 + h->intr[1] = cciss_msix_entries[1].vector; 3656 + h->intr[2] = cciss_msix_entries[2].vector; 3657 + h->intr[3] = cciss_msix_entries[3].vector; 3658 + h->msix_vector = 1; 3659 return; 3660 } 3661 if (err > 0) { 3662 + dev_warn(&h->pdev->dev, 3663 + "only %d MSI-X vectors available\n", err); 3664 goto default_int_mode; 3665 } else { 3666 + dev_warn(&h->pdev->dev, 3667 + "MSI-X init failed %d\n", err); 3668 goto default_int_mode; 3669 } 3670 } 3671 + if (pci_find_capability(h->pdev, PCI_CAP_ID_MSI)) { 3672 + if (!pci_enable_msi(h->pdev)) 3673 + h->msi_vector = 1; 3674 + else 3675 + dev_warn(&h->pdev->dev, "MSI init failed\n"); 3676 } 3677 default_int_mode: 3678 #endif /* CONFIG_PCI_MSI */ 3679 /* if we get here we're going to use the default interrupt mode */ 3680 + h->intr[PERF_MODE_INT] = h->pdev->irq; 3681 return; 3682 } 3683 3684 + static int __devinit cciss_lookup_board_id(struct pci_dev *pdev, u32 *board_id) 3685 { 3686 + int i; 3687 + u32 subsystem_vendor_id, subsystem_device_id; 3688 3689 subsystem_vendor_id = pdev->subsystem_vendor; 3690 subsystem_device_id = pdev->subsystem_device; 3691 + *board_id = ((subsystem_device_id << 16) & 0xffff0000) | 3692 + subsystem_vendor_id; 3693 3694 for (i = 0; i < ARRAY_SIZE(products); i++) { 3695 /* Stand aside for hpsa driver on request */ 3696 if (cciss_allow_hpsa && products[i].board_id == HPSA_BOUNDARY) 3697 return -ENODEV; 3698 + if (*board_id == products[i].board_id) 3699 + return i; 3700 } 3701 + dev_warn(&pdev->dev, "unrecognized board ID: 0x%08x, ignoring.\n", 3702 + *board_id); 3703 + return -ENODEV; 3704 + } 3705 + 3706 + static inline bool cciss_board_disabled(ctlr_info_t *h) 3707 + { 3708 + u16 command; 3709 + 3710 + (void) pci_read_config_word(h->pdev, PCI_COMMAND, &command); 3711 + return ((command & PCI_COMMAND_MEMORY) == 0); 3712 + } 3713 + 3714 + static int __devinit cciss_pci_find_memory_BAR(struct pci_dev *pdev, 3715 + unsigned long *memory_bar) 3716 + { 3717 + int i; 3718 + 3719 + for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) 3720 + if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) { 3721 + /* addressing mode bits already removed */ 3722 + *memory_bar = pci_resource_start(pdev, i); 3723 + dev_dbg(&pdev->dev, "memory BAR = %lx\n", 3724 + *memory_bar); 3725 + return 0; 3726 + } 3727 + dev_warn(&pdev->dev, "no memory BAR found\n"); 3728 + return -ENODEV; 3729 + } 3730 + 3731 + static int __devinit cciss_wait_for_board_ready(ctlr_info_t *h) 3732 + { 3733 + int i; 3734 + u32 scratchpad; 3735 + 3736 + for (i = 0; i < CCISS_BOARD_READY_ITERATIONS; i++) { 3737 + scratchpad = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET); 3738 + if (scratchpad == CCISS_FIRMWARE_READY) 3739 + return 0; 3740 + msleep(CCISS_BOARD_READY_POLL_INTERVAL_MSECS); 3741 + } 3742 + dev_warn(&h->pdev->dev, "board not ready, timed out.\n"); 3743 + return -ENODEV; 3744 + } 3745 + 3746 + static int __devinit cciss_find_cfg_addrs(struct pci_dev *pdev, 3747 + void __iomem *vaddr, u32 *cfg_base_addr, u64 *cfg_base_addr_index, 3748 + u64 *cfg_offset) 3749 + { 3750 + *cfg_base_addr = readl(vaddr + SA5_CTCFG_OFFSET); 3751 + *cfg_offset = readl(vaddr + SA5_CTMEM_OFFSET); 3752 + *cfg_base_addr &= (u32) 0x0000ffff; 3753 + *cfg_base_addr_index = find_PCI_BAR_index(pdev, *cfg_base_addr); 3754 + if (*cfg_base_addr_index == -1) { 3755 + dev_warn(&pdev->dev, "cannot find cfg_base_addr_index, " 3756 + "*cfg_base_addr = 0x%08x\n", *cfg_base_addr); 3757 return -ENODEV; 3758 } 3759 + return 0; 3760 + } 3761 3762 + static int __devinit cciss_find_cfgtables(ctlr_info_t *h) 3763 + { 3764 + u64 cfg_offset; 3765 + u32 cfg_base_addr; 3766 + u64 cfg_base_addr_index; 3767 + u32 trans_offset; 3768 + int rc; 3769 + 3770 + rc = cciss_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr, 3771 + &cfg_base_addr_index, &cfg_offset); 3772 + if (rc) 3773 + return rc; 3774 + h->cfgtable = remap_pci_mem(pci_resource_start(h->pdev, 3775 + cfg_base_addr_index) + cfg_offset, sizeof(h->cfgtable)); 3776 + if (!h->cfgtable) 3777 + return -ENOMEM; 3778 + /* Find performant mode table. */ 3779 + trans_offset = readl(&h->cfgtable->TransMethodOffset); 3780 + h->transtable = remap_pci_mem(pci_resource_start(h->pdev, 3781 + cfg_base_addr_index)+cfg_offset+trans_offset, 3782 + sizeof(*h->transtable)); 3783 + if (!h->transtable) 3784 + return -ENOMEM; 3785 + return 0; 3786 + } 3787 + 3788 + static void __devinit cciss_get_max_perf_mode_cmds(struct ctlr_info *h) 3789 + { 3790 + h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands)); 3791 + if (h->max_commands < 16) { 3792 + dev_warn(&h->pdev->dev, "Controller reports " 3793 + "max supported commands of %d, an obvious lie. " 3794 + "Using 16. Ensure that firmware is up to date.\n", 3795 + h->max_commands); 3796 + h->max_commands = 16; 3797 + } 3798 + } 3799 + 3800 + /* Interrogate the hardware for some limits: 3801 + * max commands, max SG elements without chaining, and with chaining, 3802 + * SG chain block size, etc. 3803 + */ 3804 + static void __devinit cciss_find_board_params(ctlr_info_t *h) 3805 + { 3806 + cciss_get_max_perf_mode_cmds(h); 3807 + h->nr_cmds = h->max_commands - 4; /* Allow room for some ioctls */ 3808 + h->maxsgentries = readl(&(h->cfgtable->MaxSGElements)); 3809 + /* 3810 + * Limit in-command s/g elements to 32 save dma'able memory. 3811 + * Howvever spec says if 0, use 31 3812 + */ 3813 + h->max_cmd_sgentries = 31; 3814 + if (h->maxsgentries > 512) { 3815 + h->max_cmd_sgentries = 32; 3816 + h->chainsize = h->maxsgentries - h->max_cmd_sgentries + 1; 3817 + h->maxsgentries--; /* save one for chain pointer */ 3818 + } else { 3819 + h->maxsgentries = 31; /* default to traditional values */ 3820 + h->chainsize = 0; 3821 + } 3822 + } 3823 + 3824 + static inline bool CISS_signature_present(ctlr_info_t *h) 3825 + { 3826 + if ((readb(&h->cfgtable->Signature[0]) != 'C') || 3827 + (readb(&h->cfgtable->Signature[1]) != 'I') || 3828 + (readb(&h->cfgtable->Signature[2]) != 'S') || 3829 + (readb(&h->cfgtable->Signature[3]) != 'S')) { 3830 + dev_warn(&h->pdev->dev, "not a valid CISS config table\n"); 3831 + return false; 3832 + } 3833 + return true; 3834 + } 3835 + 3836 + /* Need to enable prefetch in the SCSI core for 6400 in x86 */ 3837 + static inline void cciss_enable_scsi_prefetch(ctlr_info_t *h) 3838 + { 3839 + #ifdef CONFIG_X86 3840 + u32 prefetch; 3841 + 3842 + prefetch = readl(&(h->cfgtable->SCSI_Prefetch)); 3843 + prefetch |= 0x100; 3844 + writel(prefetch, &(h->cfgtable->SCSI_Prefetch)); 3845 + #endif 3846 + } 3847 + 3848 + /* Disable DMA prefetch for the P600. Otherwise an ASIC bug may result 3849 + * in a prefetch beyond physical memory. 3850 + */ 3851 + static inline void cciss_p600_dma_prefetch_quirk(ctlr_info_t *h) 3852 + { 3853 + u32 dma_prefetch; 3854 + __u32 dma_refetch; 3855 + 3856 + if (h->board_id != 0x3225103C) 3857 + return; 3858 + dma_prefetch = readl(h->vaddr + I2O_DMA1_CFG); 3859 + dma_prefetch |= 0x8000; 3860 + writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG); 3861 + pci_read_config_dword(h->pdev, PCI_COMMAND_PARITY, &dma_refetch); 3862 + dma_refetch |= 0x1; 3863 + pci_write_config_dword(h->pdev, PCI_COMMAND_PARITY, dma_refetch); 3864 + } 3865 + 3866 + static int __devinit cciss_pci_init(ctlr_info_t *h) 3867 + { 3868 + int prod_index, err; 3869 + 3870 + prod_index = cciss_lookup_board_id(h->pdev, &h->board_id); 3871 + if (prod_index < 0) 3872 + return -ENODEV; 3873 + h->product_name = products[prod_index].product_name; 3874 + h->access = *(products[prod_index].access); 3875 + 3876 + if (cciss_board_disabled(h)) { 3877 + dev_warn(&h->pdev->dev, "controller appears to be disabled\n"); 3878 return -ENODEV; 3879 } 3880 + err = pci_enable_device(h->pdev); 3881 if (err) { 3882 + dev_warn(&h->pdev->dev, "Unable to Enable PCI device\n"); 3883 return err; 3884 } 3885 3886 + err = pci_request_regions(h->pdev, "cciss"); 3887 if (err) { 3888 + dev_warn(&h->pdev->dev, 3889 + "Cannot obtain PCI resources, aborting\n"); 3890 return err; 3891 } 3892 3893 + dev_dbg(&h->pdev->dev, "irq = %x\n", h->pdev->irq); 3894 + dev_dbg(&h->pdev->dev, "board_id = %x\n", h->board_id); 3895 3896 /* If the kernel supports MSI/MSI-X we will try to enable that functionality, 3897 * else we use the IO-APIC interrupt assigned to us by system ROM. 3898 */ 3899 + cciss_interrupt_mode(h); 3900 + err = cciss_pci_find_memory_BAR(h->pdev, &h->paddr); 3901 + if (err) 3902 + goto err_out_free_res; 3903 + h->vaddr = remap_pci_mem(h->paddr, 0x250); 3904 + if (!h->vaddr) { 3905 + err = -ENOMEM; 3906 + goto err_out_free_res; 3907 } 3908 + err = cciss_wait_for_board_ready(h); 3909 + if (err) 3910 + goto err_out_free_res; 3911 + err = cciss_find_cfgtables(h); 3912 + if (err) 3913 + goto err_out_free_res; 3914 + print_cfg_table(h); 3915 + cciss_find_board_params(h); 3916 + 3917 + if (!CISS_signature_present(h)) { 3918 err = -ENODEV; 3919 goto err_out_free_res; 3920 } 3921 + cciss_enable_scsi_prefetch(h); 3922 + cciss_p600_dma_prefetch_quirk(h); 3923 + cciss_put_controller_into_performant_mode(h); 3924 return 0; 3925 3926 err_out_free_res: ··· 3913 * Deliberately omit pci_disable_device(): it does something nasty to 3914 * Smart Array controllers that pci_enable_device does not undo 3915 */ 3916 + if (h->transtable) 3917 + iounmap(h->transtable); 3918 + if (h->cfgtable) 3919 + iounmap(h->cfgtable); 3920 + if (h->vaddr) 3921 + iounmap(h->vaddr); 3922 + pci_release_regions(h->pdev); 3923 return err; 3924 } 3925 3926 /* Function to find the first free pointer into our hba[] array 3927 * Returns -1 if no free entries are left. 3928 */ 3929 + static int alloc_cciss_hba(struct pci_dev *pdev) 3930 { 3931 int i; 3932 3933 for (i = 0; i < MAX_CTLR; i++) { 3934 if (!hba[i]) { 3935 + ctlr_info_t *h; 3936 3937 + h = kzalloc(sizeof(ctlr_info_t), GFP_KERNEL); 3938 + if (!h) 3939 goto Enomem; 3940 + hba[i] = h; 3941 return i; 3942 } 3943 } 3944 + dev_warn(&pdev->dev, "This driver supports a maximum" 3945 " of %d controllers.\n", MAX_CTLR); 3946 return -1; 3947 Enomem: 3948 + dev_warn(&pdev->dev, "out of memory.\n"); 3949 return -1; 3950 } 3951 3952 + static void free_hba(ctlr_info_t *h) 3953 { 3954 int i; 3955 3956 + hba[h->ctlr] = NULL; 3957 for (i = 0; i < h->highest_lun + 1; i++) 3958 if (h->gendisk[i] != NULL) 3959 put_disk(h->gendisk[i]); ··· 4028 /* we leak the DMA buffer here ... no choice since the controller could 4029 still complete the command. */ 4030 if (i == 10) { 4031 + dev_err(&pdev->dev, 4032 + "controller message %02x:%02x timed out\n", 4033 opcode, type); 4034 return -ETIMEDOUT; 4035 } ··· 4036 pci_free_consistent(pdev, cmd_sz, cmd, paddr64); 4037 4038 if (tag & 2) { 4039 + dev_err(&pdev->dev, "controller message %02x:%02x failed\n", 4040 opcode, type); 4041 return -EIO; 4042 } 4043 4044 + dev_info(&pdev->dev, "controller message %02x:%02x succeeded\n", 4045 opcode, type); 4046 return 0; 4047 } ··· 4062 if (pos) { 4063 pci_read_config_word(pdev, msi_control_reg(pos), &control); 4064 if (control & PCI_MSI_FLAGS_ENABLE) { 4065 + dev_info(&pdev->dev, "resetting MSI\n"); 4066 pci_write_config_word(pdev, msi_control_reg(pos), control & ~PCI_MSI_FLAGS_ENABLE); 4067 } 4068 } ··· 4071 if (pos) { 4072 pci_read_config_word(pdev, msi_control_reg(pos), &control); 4073 if (control & PCI_MSIX_FLAGS_ENABLE) { 4074 + dev_info(&pdev->dev, "resetting MSI-X\n"); 4075 pci_write_config_word(pdev, msi_control_reg(pos), control & ~PCI_MSIX_FLAGS_ENABLE); 4076 } 4077 } ··· 4079 return 0; 4080 } 4081 4082 + static int cciss_controller_hard_reset(struct pci_dev *pdev, 4083 + void * __iomem vaddr, bool use_doorbell) 4084 { 4085 + u16 pmcsr; 4086 + int pos; 4087 4088 + if (use_doorbell) { 4089 + /* For everything after the P600, the PCI power state method 4090 + * of resetting the controller doesn't work, so we have this 4091 + * other way using the doorbell register. 4092 + */ 4093 + dev_info(&pdev->dev, "using doorbell to reset controller\n"); 4094 + writel(DOORBELL_CTLR_RESET, vaddr + SA5_DOORBELL); 4095 + msleep(1000); 4096 + } else { /* Try to do it the PCI power state way */ 4097 4098 + /* Quoting from the Open CISS Specification: "The Power 4099 + * Management Control/Status Register (CSR) controls the power 4100 + * state of the device. The normal operating state is D0, 4101 + * CSR=00h. The software off state is D3, CSR=03h. To reset 4102 + * the controller, place the interface device in D3 then to D0, 4103 + * this causes a secondary PCI reset which will reset the 4104 + * controller." */ 4105 4106 + pos = pci_find_capability(pdev, PCI_CAP_ID_PM); 4107 + if (pos == 0) { 4108 + dev_err(&pdev->dev, 4109 + "cciss_controller_hard_reset: " 4110 + "PCI PM not supported\n"); 4111 + return -ENODEV; 4112 + } 4113 + dev_info(&pdev->dev, "using PCI PM to reset controller\n"); 4114 + /* enter the D3hot power management state */ 4115 + pci_read_config_word(pdev, pos + PCI_PM_CTRL, &pmcsr); 4116 + pmcsr &= ~PCI_PM_CTRL_STATE_MASK; 4117 + pmcsr |= PCI_D3hot; 4118 + pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr); 4119 4120 + msleep(500); 4121 + 4122 + /* enter the D0 power management state */ 4123 + pmcsr &= ~PCI_PM_CTRL_STATE_MASK; 4124 + pmcsr |= PCI_D0; 4125 + pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr); 4126 + 4127 + msleep(500); 4128 + } 4129 + return 0; 4130 + } 4131 + 4132 + /* This does a hard reset of the controller using PCI power management 4133 + * states or using the doorbell register. */ 4134 + static __devinit int cciss_kdump_hard_reset_controller(struct pci_dev *pdev) 4135 + { 4136 + u16 saved_config_space[32]; 4137 + u64 cfg_offset; 4138 + u32 cfg_base_addr; 4139 + u64 cfg_base_addr_index; 4140 + void __iomem *vaddr; 4141 + unsigned long paddr; 4142 + u32 misc_fw_support, active_transport; 4143 + int rc, i; 4144 + CfgTable_struct __iomem *cfgtable; 4145 + bool use_doorbell; 4146 + u32 board_id; 4147 + 4148 + /* For controllers as old a the p600, this is very nearly 4149 + * the same thing as 4150 + * 4151 + * pci_save_state(pci_dev); 4152 + * pci_set_power_state(pci_dev, PCI_D3hot); 4153 + * pci_set_power_state(pci_dev, PCI_D0); 4154 + * pci_restore_state(pci_dev); 4155 + * 4156 + * but we can't use these nice canned kernel routines on 4157 + * kexec, because they also check the MSI/MSI-X state in PCI 4158 + * configuration space and do the wrong thing when it is 4159 + * set/cleared. Also, the pci_save/restore_state functions 4160 + * violate the ordering requirements for restoring the 4161 + * configuration space from the CCISS document (see the 4162 + * comment below). So we roll our own .... 4163 + * 4164 + * For controllers newer than the P600, the pci power state 4165 + * method of resetting doesn't work so we have another way 4166 + * using the doorbell register. 4167 + */ 4168 + 4169 + /* Exclude 640x boards. These are two pci devices in one slot 4170 + * which share a battery backed cache module. One controls the 4171 + * cache, the other accesses the cache through the one that controls 4172 + * it. If we reset the one controlling the cache, the other will 4173 + * likely not be happy. Just forbid resetting this conjoined mess. 4174 + */ 4175 + cciss_lookup_board_id(pdev, &board_id); 4176 + if (board_id == 0x409C0E11 || board_id == 0x409D0E11) { 4177 + dev_warn(&pdev->dev, "Cannot reset Smart Array 640x " 4178 + "due to shared cache module."); 4179 + return -ENODEV; 4180 + } 4181 4182 for (i = 0; i < 32; i++) 4183 pci_read_config_word(pdev, 2*i, &saved_config_space[i]); 4184 4185 + /* find the first memory BAR, so we can find the cfg table */ 4186 + rc = cciss_pci_find_memory_BAR(pdev, &paddr); 4187 + if (rc) 4188 + return rc; 4189 + vaddr = remap_pci_mem(paddr, 0x250); 4190 + if (!vaddr) 4191 + return -ENOMEM; 4192 + 4193 + /* find cfgtable in order to check if reset via doorbell is supported */ 4194 + rc = cciss_find_cfg_addrs(pdev, vaddr, &cfg_base_addr, 4195 + &cfg_base_addr_index, &cfg_offset); 4196 + if (rc) 4197 + goto unmap_vaddr; 4198 + cfgtable = remap_pci_mem(pci_resource_start(pdev, 4199 + cfg_base_addr_index) + cfg_offset, sizeof(*cfgtable)); 4200 + if (!cfgtable) { 4201 + rc = -ENOMEM; 4202 + goto unmap_vaddr; 4203 } 4204 4205 + /* If reset via doorbell register is supported, use that. */ 4206 + misc_fw_support = readl(&cfgtable->misc_fw_support); 4207 + use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET; 4208 4209 + rc = cciss_controller_hard_reset(pdev, vaddr, use_doorbell); 4210 + if (rc) 4211 + goto unmap_cfgtable; 4212 4213 /* Restore the PCI configuration space. The Open CISS 4214 * Specification says, "Restore the PCI Configuration 4215 * Registers, offsets 00h through 60h. It is important to 4216 * restore the command register, 16-bits at offset 04h, 4217 * last. Do not restore the configuration status register, 4218 + * 16-bits at offset 06h." Note that the offset is 2*i. 4219 + */ 4220 for (i = 0; i < 32; i++) { 4221 if (i == 2 || i == 3) 4222 continue; ··· 4149 wmb(); 4150 pci_write_config_word(pdev, 4, saved_config_space[2]); 4151 4152 + /* Some devices (notably the HP Smart Array 5i Controller) 4153 + need a little pause here */ 4154 + msleep(CCISS_POST_RESET_PAUSE_MSECS); 4155 + 4156 + /* Controller should be in simple mode at this point. If it's not, 4157 + * It means we're on one of those controllers which doesn't support 4158 + * the doorbell reset method and on which the PCI power management reset 4159 + * method doesn't work (P800, for example.) 4160 + * In those cases, don't try to proceed, as it generally doesn't work. 4161 + */ 4162 + active_transport = readl(&cfgtable->TransportActive); 4163 + if (active_transport & PERFORMANT_MODE) { 4164 + dev_warn(&pdev->dev, "Unable to successfully reset controller," 4165 + " Ignoring controller.\n"); 4166 + rc = -ENODEV; 4167 + } 4168 + 4169 + unmap_cfgtable: 4170 + iounmap(cfgtable); 4171 + 4172 + unmap_vaddr: 4173 + iounmap(vaddr); 4174 + return rc; 4175 + } 4176 + 4177 + static __devinit int cciss_init_reset_devices(struct pci_dev *pdev) 4178 + { 4179 + int rc, i; 4180 + 4181 + if (!reset_devices) 4182 + return 0; 4183 + 4184 + /* Reset the controller with a PCI power-cycle or via doorbell */ 4185 + rc = cciss_kdump_hard_reset_controller(pdev); 4186 + 4187 + /* -ENOTSUPP here means we cannot reset the controller 4188 + * but it's already (and still) up and running in 4189 + * "performant mode". Or, it might be 640x, which can't reset 4190 + * due to concerns about shared bbwc between 6402/6404 pair. 4191 + */ 4192 + if (rc == -ENOTSUPP) 4193 + return 0; /* just try to do the kdump anyhow. */ 4194 + if (rc) 4195 + return -ENODEV; 4196 + if (cciss_reset_msi(pdev)) 4197 + return -ENODEV; 4198 + 4199 + /* Now try to get the controller to respond to a no-op */ 4200 + for (i = 0; i < CCISS_POST_RESET_NOOP_RETRIES; i++) { 4201 + if (cciss_noop(pdev) == 0) 4202 + break; 4203 + else 4204 + dev_warn(&pdev->dev, "no-op failed%s\n", 4205 + (i < CCISS_POST_RESET_NOOP_RETRIES - 1 ? 4206 + "; re-trying" : "")); 4207 + msleep(CCISS_POST_RESET_NOOP_INTERVAL_MSECS); 4208 + } 4209 return 0; 4210 } 4211 ··· 4166 int rc; 4167 int dac, return_code; 4168 InquiryData_struct *inq_buff; 4169 + ctlr_info_t *h; 4170 4171 + rc = cciss_init_reset_devices(pdev); 4172 + if (rc) 4173 + return rc; 4174 + i = alloc_cciss_hba(pdev); 4175 if (i < 0) 4176 return -1; 4177 4178 + h = hba[i]; 4179 + h->pdev = pdev; 4180 + h->busy_initializing = 1; 4181 + INIT_HLIST_HEAD(&h->cmpQ); 4182 + INIT_HLIST_HEAD(&h->reqQ); 4183 + mutex_init(&h->busy_shutting_down); 4184 4185 + if (cciss_pci_init(h) != 0) 4186 goto clean_no_release_regions; 4187 4188 + sprintf(h->devname, "cciss%d", i); 4189 + h->ctlr = i; 4190 4191 + init_completion(&h->scan_wait); 4192 4193 + if (cciss_create_hba_sysfs_entry(h)) 4194 goto clean0; 4195 4196 /* configure PCI DMA stuff */ ··· 4214 else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) 4215 dac = 0; 4216 else { 4217 + dev_err(&h->pdev->dev, "no suitable DMA available\n"); 4218 goto clean1; 4219 } 4220 ··· 4224 * 8 controller support. 4225 */ 4226 if (i < MAX_CTLR_ORIG) 4227 + h->major = COMPAQ_CISS_MAJOR + i; 4228 + rc = register_blkdev(h->major, h->devname); 4229 if (rc == -EBUSY || rc == -EINVAL) { 4230 + dev_err(&h->pdev->dev, 4231 + "Unable to get major number %d for %s " 4232 + "on hba %d\n", h->major, h->devname, i); 4233 goto clean1; 4234 } else { 4235 if (i >= MAX_CTLR_ORIG) 4236 + h->major = rc; 4237 } 4238 4239 /* make sure the board interrupts are off */ 4240 + h->access.set_intr_mask(h, CCISS_INTR_OFF); 4241 + if (h->msi_vector || h->msix_vector) { 4242 + if (request_irq(h->intr[PERF_MODE_INT], 4243 + do_cciss_msix_intr, 4244 + IRQF_DISABLED, h->devname, h)) { 4245 + dev_err(&h->pdev->dev, "Unable to get irq %d for %s\n", 4246 + h->intr[PERF_MODE_INT], h->devname); 4247 + goto clean2; 4248 + } 4249 + } else { 4250 + if (request_irq(h->intr[PERF_MODE_INT], do_cciss_intx, 4251 + IRQF_DISABLED, h->devname, h)) { 4252 + dev_err(&h->pdev->dev, "Unable to get irq %d for %s\n", 4253 + h->intr[PERF_MODE_INT], h->devname); 4254 + goto clean2; 4255 + } 4256 } 4257 4258 + dev_info(&h->pdev->dev, "%s: <0x%x> at PCI %s IRQ %d%s using DAC\n", 4259 + h->devname, pdev->device, pci_name(pdev), 4260 + h->intr[PERF_MODE_INT], dac ? "" : " not"); 4261 4262 + h->cmd_pool_bits = 4263 + kmalloc(DIV_ROUND_UP(h->nr_cmds, BITS_PER_LONG) 4264 * sizeof(unsigned long), GFP_KERNEL); 4265 + h->cmd_pool = (CommandList_struct *) 4266 + pci_alloc_consistent(h->pdev, 4267 + h->nr_cmds * sizeof(CommandList_struct), 4268 + &(h->cmd_pool_dhandle)); 4269 + h->errinfo_pool = (ErrorInfo_struct *) 4270 + pci_alloc_consistent(h->pdev, 4271 + h->nr_cmds * sizeof(ErrorInfo_struct), 4272 + &(h->errinfo_pool_dhandle)); 4273 + if ((h->cmd_pool_bits == NULL) 4274 + || (h->cmd_pool == NULL) 4275 + || (h->errinfo_pool == NULL)) { 4276 + dev_err(&h->pdev->dev, "out of memory"); 4277 goto clean4; 4278 } 4279 4280 /* Need space for temp scatter list */ 4281 + h->scatter_list = kmalloc(h->max_commands * 4282 sizeof(struct scatterlist *), 4283 GFP_KERNEL); 4284 + for (k = 0; k < h->nr_cmds; k++) { 4285 + h->scatter_list[k] = kmalloc(sizeof(struct scatterlist) * 4286 + h->maxsgentries, 4287 GFP_KERNEL); 4288 + if (h->scatter_list[k] == NULL) { 4289 + dev_err(&h->pdev->dev, 4290 + "could not allocate s/g lists\n"); 4291 goto clean4; 4292 } 4293 } 4294 + h->cmd_sg_list = cciss_allocate_sg_chain_blocks(h, 4295 + h->chainsize, h->nr_cmds); 4296 + if (!h->cmd_sg_list && h->chainsize > 0) 4297 goto clean4; 4298 4299 + spin_lock_init(&h->lock); 4300 4301 /* Initialize the pdev driver private data. 4302 + have it point to h. */ 4303 + pci_set_drvdata(pdev, h); 4304 /* command and error info recs zeroed out before 4305 they are used */ 4306 + memset(h->cmd_pool_bits, 0, 4307 + DIV_ROUND_UP(h->nr_cmds, BITS_PER_LONG) 4308 * sizeof(unsigned long)); 4309 4310 + h->num_luns = 0; 4311 + h->highest_lun = -1; 4312 for (j = 0; j < CISS_MAX_LUN; j++) { 4313 + h->drv[j] = NULL; 4314 + h->gendisk[j] = NULL; 4315 } 4316 4317 + cciss_scsi_setup(h); 4318 4319 /* Turn the interrupts on so we can service requests */ 4320 + h->access.set_intr_mask(h, CCISS_INTR_ON); 4321 4322 /* Get the firmware version */ 4323 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL); 4324 if (inq_buff == NULL) { 4325 + dev_err(&h->pdev->dev, "out of memory\n"); 4326 goto clean4; 4327 } 4328 4329 + return_code = sendcmd_withirq(h, CISS_INQUIRY, inq_buff, 4330 sizeof(InquiryData_struct), 0, CTLR_LUNID, TYPE_CMD); 4331 if (return_code == IO_OK) { 4332 + h->firm_ver[0] = inq_buff->data_byte[32]; 4333 + h->firm_ver[1] = inq_buff->data_byte[33]; 4334 + h->firm_ver[2] = inq_buff->data_byte[34]; 4335 + h->firm_ver[3] = inq_buff->data_byte[35]; 4336 } else { /* send command failed */ 4337 + dev_warn(&h->pdev->dev, "unable to determine firmware" 4338 " version of controller\n"); 4339 } 4340 kfree(inq_buff); 4341 4342 + cciss_procinit(h); 4343 4344 + h->cciss_max_sectors = 8192; 4345 4346 + rebuild_lun_table(h, 1, 0); 4347 + h->busy_initializing = 0; 4348 return 1; 4349 4350 clean4: 4351 + kfree(h->cmd_pool_bits); 4352 /* Free up sg elements */ 4353 + for (k = 0; k < h->nr_cmds; k++) 4354 + kfree(h->scatter_list[k]); 4355 + kfree(h->scatter_list); 4356 + cciss_free_sg_chain_blocks(h->cmd_sg_list, h->nr_cmds); 4357 + if (h->cmd_pool) 4358 + pci_free_consistent(h->pdev, 4359 + h->nr_cmds * sizeof(CommandList_struct), 4360 + h->cmd_pool, h->cmd_pool_dhandle); 4361 + if (h->errinfo_pool) 4362 + pci_free_consistent(h->pdev, 4363 + h->nr_cmds * sizeof(ErrorInfo_struct), 4364 + h->errinfo_pool, 4365 + h->errinfo_pool_dhandle); 4366 + free_irq(h->intr[PERF_MODE_INT], h); 4367 clean2: 4368 + unregister_blkdev(h->major, h->devname); 4369 clean1: 4370 + cciss_destroy_hba_sysfs_entry(h); 4371 clean0: 4372 pci_release_regions(pdev); 4373 clean_no_release_regions: 4374 + h->busy_initializing = 0; 4375 4376 /* 4377 * Deliberately omit pci_disable_device(): it does something nasty to 4378 * Smart Array controllers that pci_enable_device does not undo 4379 */ 4380 pci_set_drvdata(pdev, NULL); 4381 + free_hba(h); 4382 return -1; 4383 } 4384 ··· 4381 h = pci_get_drvdata(pdev); 4382 flush_buf = kzalloc(4, GFP_KERNEL); 4383 if (!flush_buf) { 4384 + dev_warn(&h->pdev->dev, "cache not flushed, out of memory.\n"); 4385 return; 4386 } 4387 /* write all data in the battery backed cache to disk */ 4388 memset(flush_buf, 0, 4); 4389 + return_code = sendcmd_withirq(h, CCISS_CACHE_FLUSH, flush_buf, 4390 4, 0, CTLR_LUNID, TYPE_CMD); 4391 kfree(flush_buf); 4392 if (return_code != IO_OK) 4393 + dev_warn(&h->pdev->dev, "Error flushing cache\n"); 4394 h->access.set_intr_mask(h, CCISS_INTR_OFF); 4395 + free_irq(h->intr[PERF_MODE_INT], h); 4396 } 4397 4398 static void __devexit cciss_remove_one(struct pci_dev *pdev) 4399 { 4400 + ctlr_info_t *h; 4401 int i, j; 4402 4403 if (pci_get_drvdata(pdev) == NULL) { 4404 + dev_err(&pdev->dev, "Unable to remove device\n"); 4405 return; 4406 } 4407 4408 + h = pci_get_drvdata(pdev); 4409 + i = h->ctlr; 4410 if (hba[i] == NULL) { 4411 + dev_err(&pdev->dev, "device appears to already be removed\n"); 4412 return; 4413 } 4414 4415 + mutex_lock(&h->busy_shutting_down); 4416 4417 + remove_from_scan_list(h); 4418 + remove_proc_entry(h->devname, proc_cciss); 4419 + unregister_blkdev(h->major, h->devname); 4420 4421 /* remove it from the disk list */ 4422 for (j = 0; j < CISS_MAX_LUN; j++) { 4423 + struct gendisk *disk = h->gendisk[j]; 4424 if (disk) { 4425 struct request_queue *q = disk->queue; 4426 4427 if (disk->flags & GENHD_FL_UP) { 4428 + cciss_destroy_ld_sysfs_entry(h, j, 1); 4429 del_gendisk(disk); 4430 } 4431 if (q) ··· 4438 } 4439 4440 #ifdef CONFIG_CISS_SCSI_TAPE 4441 + cciss_unregister_scsi(h); /* unhook from SCSI subsystem */ 4442 #endif 4443 4444 cciss_shutdown(pdev); 4445 4446 #ifdef CONFIG_PCI_MSI 4447 + if (h->msix_vector) 4448 + pci_disable_msix(h->pdev); 4449 + else if (h->msi_vector) 4450 + pci_disable_msi(h->pdev); 4451 #endif /* CONFIG_PCI_MSI */ 4452 4453 + iounmap(h->transtable); 4454 + iounmap(h->cfgtable); 4455 + iounmap(h->vaddr); 4456 4457 + pci_free_consistent(h->pdev, h->nr_cmds * sizeof(CommandList_struct), 4458 + h->cmd_pool, h->cmd_pool_dhandle); 4459 + pci_free_consistent(h->pdev, h->nr_cmds * sizeof(ErrorInfo_struct), 4460 + h->errinfo_pool, h->errinfo_pool_dhandle); 4461 + kfree(h->cmd_pool_bits); 4462 /* Free up sg elements */ 4463 + for (j = 0; j < h->nr_cmds; j++) 4464 + kfree(h->scatter_list[j]); 4465 + kfree(h->scatter_list); 4466 + cciss_free_sg_chain_blocks(h->cmd_sg_list, h->nr_cmds); 4467 /* 4468 * Deliberately omit pci_disable_device(): it does something nasty to 4469 * Smart Array controllers that pci_enable_device does not undo 4470 */ 4471 pci_release_regions(pdev); 4472 pci_set_drvdata(pdev, NULL); 4473 + cciss_destroy_hba_sysfs_entry(h); 4474 + mutex_unlock(&h->busy_shutting_down); 4475 + free_hba(h); 4476 } 4477 4478 static struct pci_driver cciss_pci_driver = { ··· 4495 * array of them, the size must be a multiple of 8 bytes. 4496 */ 4497 BUILD_BUG_ON(sizeof(CommandList_struct) % COMMANDLIST_ALIGNMENT); 4498 printk(KERN_INFO DRIVER_NAME "\n"); 4499 4500 err = bus_register(&cciss_bus_type); ··· 4532 /* double check that all controller entrys have been removed */ 4533 for (i = 0; i < MAX_CTLR; i++) { 4534 if (hba[i] != NULL) { 4535 + dev_warn(&hba[i]->pdev->dev, 4536 + "had to remove controller\n"); 4537 cciss_remove_one(hba[i]->pdev); 4538 } 4539 } 4540 kthread_stop(cciss_scan_thread); 4541 remove_proc_entry("driver/cciss", NULL); 4542 bus_unregister(&cciss_bus_type); 4543 } 4544 4545 module_init(cciss_init);
+124 -11
drivers/block/cciss.h
··· 25 void (*submit_command)(ctlr_info_t *h, CommandList_struct *c); 26 void (*set_intr_mask)(ctlr_info_t *h, unsigned long val); 27 unsigned long (*fifo_full)(ctlr_info_t *h); 28 - unsigned long (*intr_pending)(ctlr_info_t *h); 29 unsigned long (*command_completed)(ctlr_info_t *h); 30 }; 31 typedef struct _drive_info_struct ··· 85 int max_cmd_sgentries; 86 SGDescriptor_struct **cmd_sg_list; 87 88 - # define DOORBELL_INT 0 89 - # define PERF_MODE_INT 1 90 # define SIMPLE_MODE_INT 2 91 # define MEMQ_MODE_INT 3 92 unsigned int intr[4]; ··· 137 struct list_head scan_list; 138 struct completion scan_wait; 139 struct device dev; 140 }; 141 142 - /* Defining the diffent access_menthods */ 143 - /* 144 * Memory mapped FIFO interface (SMART 53xx cards) 145 */ 146 #define SA5_DOORBELL 0x20 ··· 176 #define SA5B_INTR_PENDING 0x04 177 #define FIFO_EMPTY 0xffffffff 178 #define CCISS_FIRMWARE_READY 0xffff0000 /* value in scratchpad register */ 179 180 #define CISS_ERROR_BIT 0x02 181 182 #define CCISS_INTR_ON 1 183 #define CCISS_INTR_OFF 0 184 /* 185 Send the command to the hardware 186 */ 187 static void SA5_submit_command( ctlr_info_t *h, CommandList_struct *c) 188 { 189 #ifdef CCISS_DEBUG 190 - printk("Sending %x - down to controller\n", c->busaddr ); 191 - #endif /* CCISS_DEBUG */ 192 writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET); 193 h->commands_outstanding++; 194 if ( h->commands_outstanding > h->max_outstanding) ··· 259 h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); 260 } 261 } 262 /* 263 * Returns true if fifo is full. 264 * ··· 309 return ( register_value); 310 311 } 312 /* 313 * Returns true if an interrupt is pending.. 314 */ 315 - static unsigned long SA5_intr_pending(ctlr_info_t *h) 316 { 317 unsigned long register_value = 318 readl(h->vaddr + SA5_INTR_STATUS); ··· 361 /* 362 * Returns true if an interrupt is pending.. 363 */ 364 - static unsigned long SA5B_intr_pending(ctlr_info_t *h) 365 { 366 unsigned long register_value = 367 readl(h->vaddr + SA5_INTR_STATUS); ··· 373 return 0 ; 374 } 375 376 377 static struct access_method SA5_access = { 378 SA5_submit_command, ··· 404 SA5_completed, 405 }; 406 407 struct board_type { 408 __u32 board_id; 409 char *product_name; 410 struct access_method *access; 411 int nr_cmds; /* Max cmds this kind of ctlr can handle. */ 412 }; 413 - 414 - #define CCISS_LOCK(i) (&hba[i]->lock) 415 416 #endif /* CCISS_H */
··· 25 void (*submit_command)(ctlr_info_t *h, CommandList_struct *c); 26 void (*set_intr_mask)(ctlr_info_t *h, unsigned long val); 27 unsigned long (*fifo_full)(ctlr_info_t *h); 28 + bool (*intr_pending)(ctlr_info_t *h); 29 unsigned long (*command_completed)(ctlr_info_t *h); 30 }; 31 typedef struct _drive_info_struct ··· 85 int max_cmd_sgentries; 86 SGDescriptor_struct **cmd_sg_list; 87 88 + # define PERF_MODE_INT 0 89 + # define DOORBELL_INT 1 90 # define SIMPLE_MODE_INT 2 91 # define MEMQ_MODE_INT 3 92 unsigned int intr[4]; ··· 137 struct list_head scan_list; 138 struct completion scan_wait; 139 struct device dev; 140 + /* 141 + * Performant mode tables. 142 + */ 143 + u32 trans_support; 144 + u32 trans_offset; 145 + struct TransTable_struct *transtable; 146 + unsigned long transMethod; 147 + 148 + /* 149 + * Performant mode completion buffer 150 + */ 151 + u64 *reply_pool; 152 + dma_addr_t reply_pool_dhandle; 153 + u64 *reply_pool_head; 154 + size_t reply_pool_size; 155 + unsigned char reply_pool_wraparound; 156 + u32 *blockFetchTable; 157 }; 158 159 + /* Defining the diffent access_methods 160 + * 161 * Memory mapped FIFO interface (SMART 53xx cards) 162 */ 163 #define SA5_DOORBELL 0x20 ··· 159 #define SA5B_INTR_PENDING 0x04 160 #define FIFO_EMPTY 0xffffffff 161 #define CCISS_FIRMWARE_READY 0xffff0000 /* value in scratchpad register */ 162 + /* Perf. mode flags */ 163 + #define SA5_PERF_INTR_PENDING 0x04 164 + #define SA5_PERF_INTR_OFF 0x05 165 + #define SA5_OUTDB_STATUS_PERF_BIT 0x01 166 + #define SA5_OUTDB_CLEAR_PERF_BIT 0x01 167 + #define SA5_OUTDB_CLEAR 0xA0 168 + #define SA5_OUTDB_CLEAR_PERF_BIT 0x01 169 + #define SA5_OUTDB_STATUS 0x9C 170 + 171 172 #define CISS_ERROR_BIT 0x02 173 174 #define CCISS_INTR_ON 1 175 #define CCISS_INTR_OFF 0 176 + 177 + 178 + /* CCISS_BOARD_READY_WAIT_SECS is how long to wait for a board 179 + * to become ready, in seconds, before giving up on it. 180 + * CCISS_BOARD_READY_POLL_INTERVAL_MSECS * is how long to wait 181 + * between polling the board to see if it is ready, in 182 + * milliseconds. CCISS_BOARD_READY_ITERATIONS is derived 183 + * the above. 184 + */ 185 + #define CCISS_BOARD_READY_WAIT_SECS (120) 186 + #define CCISS_BOARD_READY_POLL_INTERVAL_MSECS (100) 187 + #define CCISS_BOARD_READY_ITERATIONS \ 188 + ((CCISS_BOARD_READY_WAIT_SECS * 1000) / \ 189 + CCISS_BOARD_READY_POLL_INTERVAL_MSECS) 190 + #define CCISS_POST_RESET_PAUSE_MSECS (3000) 191 + #define CCISS_POST_RESET_NOOP_INTERVAL_MSECS (1000) 192 + #define CCISS_POST_RESET_NOOP_RETRIES (12) 193 + 194 /* 195 Send the command to the hardware 196 */ 197 static void SA5_submit_command( ctlr_info_t *h, CommandList_struct *c) 198 { 199 #ifdef CCISS_DEBUG 200 + printk(KERN_WARNING "cciss%d: Sending %08x - down to controller\n", 201 + h->ctlr, c->busaddr); 202 + #endif /* CCISS_DEBUG */ 203 writel(c->busaddr, h->vaddr + SA5_REQUEST_PORT_OFFSET); 204 h->commands_outstanding++; 205 if ( h->commands_outstanding > h->max_outstanding) ··· 214 h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); 215 } 216 } 217 + 218 + /* Performant mode intr_mask */ 219 + static void SA5_performant_intr_mask(ctlr_info_t *h, unsigned long val) 220 + { 221 + if (val) { /* turn on interrupts */ 222 + h->interrupts_enabled = 1; 223 + writel(0, h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); 224 + } else { 225 + h->interrupts_enabled = 0; 226 + writel(SA5_PERF_INTR_OFF, 227 + h->vaddr + SA5_REPLY_INTR_MASK_OFFSET); 228 + } 229 + } 230 + 231 /* 232 * Returns true if fifo is full. 233 * ··· 250 return ( register_value); 251 252 } 253 + 254 + /* Performant mode command completed */ 255 + static unsigned long SA5_performant_completed(ctlr_info_t *h) 256 + { 257 + unsigned long register_value = FIFO_EMPTY; 258 + 259 + /* flush the controller write of the reply queue by reading 260 + * outbound doorbell status register. 261 + */ 262 + register_value = readl(h->vaddr + SA5_OUTDB_STATUS); 263 + /* msi auto clears the interrupt pending bit. */ 264 + if (!(h->msi_vector || h->msix_vector)) { 265 + writel(SA5_OUTDB_CLEAR_PERF_BIT, h->vaddr + SA5_OUTDB_CLEAR); 266 + /* Do a read in order to flush the write to the controller 267 + * (as per spec.) 268 + */ 269 + register_value = readl(h->vaddr + SA5_OUTDB_STATUS); 270 + } 271 + 272 + if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) { 273 + register_value = *(h->reply_pool_head); 274 + (h->reply_pool_head)++; 275 + h->commands_outstanding--; 276 + } else { 277 + register_value = FIFO_EMPTY; 278 + } 279 + /* Check for wraparound */ 280 + if (h->reply_pool_head == (h->reply_pool + h->max_commands)) { 281 + h->reply_pool_head = h->reply_pool; 282 + h->reply_pool_wraparound ^= 1; 283 + } 284 + 285 + return register_value; 286 + } 287 /* 288 * Returns true if an interrupt is pending.. 289 */ 290 + static bool SA5_intr_pending(ctlr_info_t *h) 291 { 292 unsigned long register_value = 293 readl(h->vaddr + SA5_INTR_STATUS); ··· 268 /* 269 * Returns true if an interrupt is pending.. 270 */ 271 + static bool SA5B_intr_pending(ctlr_info_t *h) 272 { 273 unsigned long register_value = 274 readl(h->vaddr + SA5_INTR_STATUS); ··· 280 return 0 ; 281 } 282 283 + static bool SA5_performant_intr_pending(ctlr_info_t *h) 284 + { 285 + unsigned long register_value = readl(h->vaddr + SA5_INTR_STATUS); 286 + 287 + if (!register_value) 288 + return false; 289 + 290 + if (h->msi_vector || h->msix_vector) 291 + return true; 292 + 293 + /* Read outbound doorbell to flush */ 294 + register_value = readl(h->vaddr + SA5_OUTDB_STATUS); 295 + return register_value & SA5_OUTDB_STATUS_PERF_BIT; 296 + } 297 298 static struct access_method SA5_access = { 299 SA5_submit_command, ··· 297 SA5_completed, 298 }; 299 300 + static struct access_method SA5_performant_access = { 301 + SA5_submit_command, 302 + SA5_performant_intr_mask, 303 + SA5_fifo_full, 304 + SA5_performant_intr_pending, 305 + SA5_performant_completed, 306 + }; 307 + 308 struct board_type { 309 __u32 board_id; 310 char *product_name; 311 struct access_method *access; 312 int nr_cmds; /* Max cmds this kind of ctlr can handle. */ 313 }; 314 315 #endif /* CCISS_H */
+33 -3
drivers/block/cciss_cmd.h
··· 52 /* Configuration Table */ 53 #define CFGTBL_ChangeReq 0x00000001l 54 #define CFGTBL_AccCmds 0x00000001l 55 56 #define CFGTBL_Trans_Simple 0x00000002l 57 58 #define CFGTBL_BusType_Ultra2 0x00000001l 59 #define CFGTBL_BusType_Ultra3 0x00000002l ··· 175 * PAD_64 can be adjusted independently as needed for 32-bit 176 * and 64-bits systems. 177 */ 178 - #define COMMANDLIST_ALIGNMENT (8) 179 #define IS_64_BIT ((sizeof(long) - 4)/4) 180 #define IS_32_BIT (!IS_64_BIT) 181 #define PAD_32 (0) 182 #define PAD_64 (4) 183 #define PADSIZE (IS_32_BIT * PAD_32 + IS_64_BIT * PAD_64) 184 typedef struct _CommandList_struct { 185 CommandListHeader_struct Header; 186 RequestBlock_struct Request; ··· 200 struct completion *waiting; 201 int retry_count; 202 void * scsi_cmd; 203 - char pad[PADSIZE]; 204 } CommandList_struct; 205 206 /* Configuration Table Structure */ ··· 214 typedef struct _CfgTable_struct { 215 BYTE Signature[4]; 216 DWORD SpecValence; 217 DWORD TransportSupport; 218 DWORD TransportActive; 219 HostWrite_struct HostWrite; 220 DWORD CmdsOutMax; 221 DWORD BusTypes; 222 - DWORD Reserved; 223 BYTE ServerName[16]; 224 DWORD HeartBeat; 225 DWORD SCSI_Prefetch; ··· 230 DWORD MaxLogicalUnits; 231 DWORD MaxPhysicalDrives; 232 DWORD MaxPhysicalDrivesPerLogicalUnit; 233 } CfgTable_struct; 234 #pragma pack() 235 #endif /* CCISS_CMD_H */
··· 52 /* Configuration Table */ 53 #define CFGTBL_ChangeReq 0x00000001l 54 #define CFGTBL_AccCmds 0x00000001l 55 + #define DOORBELL_CTLR_RESET 0x00000004l 56 57 #define CFGTBL_Trans_Simple 0x00000002l 58 + #define CFGTBL_Trans_Performant 0x00000004l 59 60 #define CFGTBL_BusType_Ultra2 0x00000001l 61 #define CFGTBL_BusType_Ultra3 0x00000002l ··· 173 * PAD_64 can be adjusted independently as needed for 32-bit 174 * and 64-bits systems. 175 */ 176 + #define COMMANDLIST_ALIGNMENT (32) 177 #define IS_64_BIT ((sizeof(long) - 4)/4) 178 #define IS_32_BIT (!IS_64_BIT) 179 #define PAD_32 (0) 180 #define PAD_64 (4) 181 #define PADSIZE (IS_32_BIT * PAD_32 + IS_64_BIT * PAD_64) 182 + #define DIRECT_LOOKUP_BIT 0x10 183 + #define DIRECT_LOOKUP_SHIFT 5 184 + 185 typedef struct _CommandList_struct { 186 CommandListHeader_struct Header; 187 RequestBlock_struct Request; ··· 195 struct completion *waiting; 196 int retry_count; 197 void * scsi_cmd; 198 + char pad[PADSIZE]; 199 } CommandList_struct; 200 201 /* Configuration Table Structure */ ··· 209 typedef struct _CfgTable_struct { 210 BYTE Signature[4]; 211 DWORD SpecValence; 212 + #define SIMPLE_MODE 0x02 213 + #define PERFORMANT_MODE 0x04 214 + #define MEMQ_MODE 0x08 215 DWORD TransportSupport; 216 DWORD TransportActive; 217 HostWrite_struct HostWrite; 218 DWORD CmdsOutMax; 219 DWORD BusTypes; 220 + DWORD TransMethodOffset; 221 BYTE ServerName[16]; 222 DWORD HeartBeat; 223 DWORD SCSI_Prefetch; ··· 222 DWORD MaxLogicalUnits; 223 DWORD MaxPhysicalDrives; 224 DWORD MaxPhysicalDrivesPerLogicalUnit; 225 + DWORD MaxPerformantModeCommands; 226 + u8 reserved[0x78 - 0x58]; 227 + u32 misc_fw_support; /* offset 0x78 */ 228 + #define MISC_FW_DOORBELL_RESET (0x02) 229 } CfgTable_struct; 230 + 231 + struct TransTable_struct { 232 + u32 BlockFetch0; 233 + u32 BlockFetch1; 234 + u32 BlockFetch2; 235 + u32 BlockFetch3; 236 + u32 BlockFetch4; 237 + u32 BlockFetch5; 238 + u32 BlockFetch6; 239 + u32 BlockFetch7; 240 + u32 RepQSize; 241 + u32 RepQCount; 242 + u32 RepQCtrAddrLow32; 243 + u32 RepQCtrAddrHigh32; 244 + u32 RepQAddr0Low32; 245 + u32 RepQAddr0High32; 246 + }; 247 + 248 #pragma pack() 249 #endif /* CCISS_CMD_H */
+315 -355
drivers/block/cciss_scsi.c
··· 44 #define CCISS_ABORT_MSG 0x00 45 #define CCISS_RESET_MSG 0x01 46 47 - static int fill_cmd(CommandList_struct *c, __u8 cmd, int ctlr, void *buff, 48 size_t size, 49 __u8 page_code, unsigned char *scsi3addr, 50 int cmd_type); 51 52 - static CommandList_struct *cmd_alloc(ctlr_info_t *h, int get_from_pool); 53 - static void cmd_free(ctlr_info_t *h, CommandList_struct *c, int got_from_pool); 54 55 static int cciss_scsi_proc_info( 56 struct Scsi_Host *sh, ··· 95 96 #pragma pack(1) 97 98 - #define SCSI_PAD_32 0 99 - #define SCSI_PAD_64 0 100 101 struct cciss_scsi_cmd_stack_elem_t { 102 CommandList_struct cmd; ··· 129 spinlock_t lock; // to protect ccissscsi[ctlr]; 130 }; 131 132 - #define CPQ_TAPE_LOCK(ctlr, flags) spin_lock_irqsave( \ 133 - &hba[ctlr]->scsi_ctlr->lock, flags); 134 - #define CPQ_TAPE_UNLOCK(ctlr, flags) spin_unlock_irqrestore( \ 135 - &hba[ctlr]->scsi_ctlr->lock, flags); 136 137 static CommandList_struct * 138 scsi_cmd_alloc(ctlr_info_t *h) 139 { 140 /* assume only one process in here at a time, locking done by caller. */ 141 - /* use CCISS_LOCK(ctlr) */ 142 /* might be better to rewrite how we allocate scsi commands in a way that */ 143 /* needs no locking at all. */ 144 ··· 179 } 180 181 static void 182 - scsi_cmd_free(ctlr_info_t *h, CommandList_struct *cmd) 183 { 184 /* assume only one process in here at a time, locking done by caller. */ 185 - /* use CCISS_LOCK(ctlr) */ 186 /* drop the free memory chunk on top of the stack. */ 187 188 struct cciss_scsi_adapter_data_t *sa; ··· 192 stk = &sa->cmd_stack; 193 stk->top++; 194 if (stk->top >= CMD_STACK_SIZE) { 195 - printk("cciss: scsi_cmd_free called too many times.\n"); 196 BUG(); 197 } 198 - stk->elem[stk->top] = (struct cciss_scsi_cmd_stack_elem_t *) cmd; 199 } 200 201 static int 202 - scsi_cmd_stack_setup(int ctlr, struct cciss_scsi_adapter_data_t *sa) 203 { 204 int i; 205 struct cciss_scsi_cmd_stack_t *stk; 206 size_t size; 207 208 - sa->cmd_sg_list = cciss_allocate_sg_chain_blocks(hba[ctlr], 209 - hba[ctlr]->chainsize, CMD_STACK_SIZE); 210 - if (!sa->cmd_sg_list && hba[ctlr]->chainsize > 0) 211 return -ENOMEM; 212 213 stk = &sa->cmd_stack; ··· 218 BUILD_BUG_ON((sizeof(*stk->pool) % COMMANDLIST_ALIGNMENT) != 0); 219 /* pci_alloc_consistent guarantees 32-bit DMA address will be used */ 220 stk->pool = (struct cciss_scsi_cmd_stack_elem_t *) 221 - pci_alloc_consistent(hba[ctlr]->pdev, size, &stk->cmd_pool_handle); 222 223 if (stk->pool == NULL) { 224 cciss_free_sg_chain_blocks(sa->cmd_sg_list, CMD_STACK_SIZE); ··· 237 } 238 239 static void 240 - scsi_cmd_stack_free(int ctlr) 241 { 242 struct cciss_scsi_adapter_data_t *sa; 243 struct cciss_scsi_cmd_stack_t *stk; 244 size_t size; 245 246 - sa = hba[ctlr]->scsi_ctlr; 247 stk = &sa->cmd_stack; 248 if (stk->top != CMD_STACK_SIZE-1) { 249 - printk( "cciss: %d scsi commands are still outstanding.\n", 250 CMD_STACK_SIZE - stk->top); 251 - // BUG(); 252 - printk("WE HAVE A BUG HERE!!! stk=0x%p\n", stk); 253 } 254 size = sizeof(struct cciss_scsi_cmd_stack_elem_t) * CMD_STACK_SIZE; 255 256 - pci_free_consistent(hba[ctlr]->pdev, size, stk->pool, stk->cmd_pool_handle); 257 stk->pool = NULL; 258 cciss_free_sg_chain_blocks(sa->cmd_sg_list, CMD_STACK_SIZE); 259 } ··· 344 #endif 345 346 static int 347 - find_bus_target_lun(int ctlr, int *bus, int *target, int *lun) 348 { 349 /* finds an unused bus, target, lun for a new device */ 350 - /* assumes hba[ctlr]->scsi_ctlr->lock is held */ 351 int i, found=0; 352 unsigned char target_taken[CCISS_MAX_SCSI_DEVS_PER_HBA]; 353 354 memset(&target_taken[0], 0, CCISS_MAX_SCSI_DEVS_PER_HBA); 355 356 target_taken[SELF_SCSI_ID] = 1; 357 - for (i=0;i<ccissscsi[ctlr].ndevices;i++) 358 - target_taken[ccissscsi[ctlr].dev[i].target] = 1; 359 360 - for (i=0;i<CCISS_MAX_SCSI_DEVS_PER_HBA;i++) { 361 if (!target_taken[i]) { 362 *bus = 0; *target=i; *lun = 0; found=1; 363 break; ··· 371 }; 372 373 static int 374 - cciss_scsi_add_entry(int ctlr, int hostno, 375 struct cciss_scsi_dev_t *device, 376 struct scsi2map *added, int *nadded) 377 { 378 - /* assumes hba[ctlr]->scsi_ctlr->lock is held */ 379 - int n = ccissscsi[ctlr].ndevices; 380 struct cciss_scsi_dev_t *sd; 381 int i, bus, target, lun; 382 unsigned char addr1[8], addr2[8]; 383 384 if (n >= CCISS_MAX_SCSI_DEVS_PER_HBA) { 385 - printk("cciss%d: Too many devices, " 386 - "some will be inaccessible.\n", ctlr); 387 return -1; 388 } 389 ··· 399 memcpy(addr1, device->scsi3addr, 8); 400 addr1[4] = 0; 401 for (i = 0; i < n; i++) { 402 - sd = &ccissscsi[ctlr].dev[i]; 403 memcpy(addr2, sd->scsi3addr, 8); 404 addr2[4] = 0; 405 /* differ only in byte 4? */ ··· 412 } 413 } 414 415 - sd = &ccissscsi[ctlr].dev[n]; 416 if (lun == 0) { 417 - if (find_bus_target_lun(ctlr, 418 &sd->bus, &sd->target, &sd->lun) != 0) 419 return -1; 420 } else { ··· 433 memcpy(sd->device_id, device->device_id, sizeof(sd->device_id)); 434 sd->devtype = device->devtype; 435 436 - ccissscsi[ctlr].ndevices++; 437 438 /* initially, (before registering with scsi layer) we don't 439 know our hostno and we don't want to print anything first 440 time anyway (the scsi layer's inquiries will show that info) */ 441 if (hostno != -1) 442 - printk("cciss%d: %s device c%db%dt%dl%d added.\n", 443 - ctlr, scsi_device_type(sd->devtype), hostno, 444 sd->bus, sd->target, sd->lun); 445 return 0; 446 } 447 448 static void 449 - cciss_scsi_remove_entry(int ctlr, int hostno, int entry, 450 struct scsi2map *removed, int *nremoved) 451 { 452 - /* assumes hba[ctlr]->scsi_ctlr->lock is held */ 453 int i; 454 struct cciss_scsi_dev_t sd; 455 456 if (entry < 0 || entry >= CCISS_MAX_SCSI_DEVS_PER_HBA) return; 457 - sd = ccissscsi[ctlr].dev[entry]; 458 removed[*nremoved].bus = sd.bus; 459 removed[*nremoved].target = sd.target; 460 removed[*nremoved].lun = sd.lun; 461 (*nremoved)++; 462 - for (i=entry;i<ccissscsi[ctlr].ndevices-1;i++) 463 - ccissscsi[ctlr].dev[i] = ccissscsi[ctlr].dev[i+1]; 464 - ccissscsi[ctlr].ndevices--; 465 - printk("cciss%d: %s device c%db%dt%dl%d removed.\n", 466 - ctlr, scsi_device_type(sd.devtype), hostno, 467 sd.bus, sd.target, sd.lun); 468 } 469 ··· 478 (a)[1] == (b)[1] && \ 479 (a)[0] == (b)[0]) 480 481 - static void fixup_botched_add(int ctlr, char *scsi3addr) 482 { 483 /* called when scsi_add_device fails in order to re-adjust */ 484 /* ccissscsi[] to match the mid layer's view. */ 485 unsigned long flags; 486 int i, j; 487 - CPQ_TAPE_LOCK(ctlr, flags); 488 - for (i = 0; i < ccissscsi[ctlr].ndevices; i++) { 489 if (memcmp(scsi3addr, 490 - ccissscsi[ctlr].dev[i].scsi3addr, 8) == 0) { 491 - for (j = i; j < ccissscsi[ctlr].ndevices-1; j++) 492 - ccissscsi[ctlr].dev[j] = 493 - ccissscsi[ctlr].dev[j+1]; 494 - ccissscsi[ctlr].ndevices--; 495 break; 496 } 497 } 498 - CPQ_TAPE_UNLOCK(ctlr, flags); 499 } 500 501 static int device_is_the_same(struct cciss_scsi_dev_t *dev1, ··· 515 } 516 517 static int 518 - adjust_cciss_scsi_table(int ctlr, int hostno, 519 struct cciss_scsi_dev_t sd[], int nsds) 520 { 521 /* sd contains scsi3 addresses and devtypes, but ··· 536 GFP_KERNEL); 537 538 if (!added || !removed) { 539 - printk(KERN_WARNING "cciss%d: Out of memory in " 540 - "adjust_cciss_scsi_table\n", ctlr); 541 goto free_and_out; 542 } 543 544 - CPQ_TAPE_LOCK(ctlr, flags); 545 546 if (hostno != -1) /* if it's not the first time... */ 547 - sh = hba[ctlr]->scsi_ctlr->scsi_host; 548 549 /* find any devices in ccissscsi[] that are not in 550 sd[] and remove them from ccissscsi[] */ ··· 552 i = 0; 553 nremoved = 0; 554 nadded = 0; 555 - while(i<ccissscsi[ctlr].ndevices) { 556 - csd = &ccissscsi[ctlr].dev[i]; 557 found=0; 558 for (j=0;j<nsds;j++) { 559 if (SCSI3ADDR_EQ(sd[j].scsi3addr, ··· 568 569 if (found == 0) { /* device no longer present. */ 570 changes++; 571 - /* printk("cciss%d: %s device c%db%dt%dl%d removed.\n", 572 - ctlr, scsi_device_type(csd->devtype), hostno, 573 - csd->bus, csd->target, csd->lun); */ 574 - cciss_scsi_remove_entry(ctlr, hostno, i, 575 removed, &nremoved); 576 /* remove ^^^, hence i not incremented */ 577 } else if (found == 1) { /* device is different in some way */ 578 changes++; 579 - printk("cciss%d: device c%db%dt%dl%d has changed.\n", 580 - ctlr, hostno, csd->bus, csd->target, csd->lun); 581 - cciss_scsi_remove_entry(ctlr, hostno, i, 582 removed, &nremoved); 583 /* remove ^^^, hence i not incremented */ 584 - if (cciss_scsi_add_entry(ctlr, hostno, &sd[j], 585 added, &nadded) != 0) 586 /* we just removed one, so add can't fail. */ 587 BUG(); ··· 601 602 for (i=0;i<nsds;i++) { 603 found=0; 604 - for (j=0;j<ccissscsi[ctlr].ndevices;j++) { 605 - csd = &ccissscsi[ctlr].dev[j]; 606 if (SCSI3ADDR_EQ(sd[i].scsi3addr, 607 csd->scsi3addr)) { 608 if (device_is_the_same(&sd[i], csd)) ··· 614 } 615 if (!found) { 616 changes++; 617 - if (cciss_scsi_add_entry(ctlr, hostno, &sd[i], 618 added, &nadded) != 0) 619 break; 620 } else if (found == 1) { 621 /* should never happen... */ 622 changes++; 623 - printk(KERN_WARNING "cciss%d: device " 624 - "unexpectedly changed\n", ctlr); 625 /* but if it does happen, we just ignore that device */ 626 } 627 } 628 - CPQ_TAPE_UNLOCK(ctlr, flags); 629 630 /* Don't notify scsi mid layer of any changes the first time through */ 631 /* (or if there are no changes) scsi_scan_host will do it later the */ ··· 645 /* We don't expect to get here. */ 646 /* future cmds to this device will get selection */ 647 /* timeout as if the device was gone. */ 648 - printk(KERN_WARNING "cciss%d: didn't find " 649 "c%db%dt%dl%d\n for removal.", 650 - ctlr, hostno, removed[i].bus, 651 removed[i].target, removed[i].lun); 652 } 653 } ··· 659 added[i].target, added[i].lun); 660 if (rc == 0) 661 continue; 662 - printk(KERN_WARNING "cciss%d: scsi_add_device " 663 "c%db%dt%dl%d failed, device not added.\n", 664 - ctlr, hostno, 665 - added[i].bus, added[i].target, added[i].lun); 666 /* now we have to remove it from ccissscsi, */ 667 /* since it didn't get added to scsi mid layer */ 668 - fixup_botched_add(ctlr, added[i].scsi3addr); 669 } 670 671 free_and_out: ··· 674 } 675 676 static int 677 - lookup_scsi3addr(int ctlr, int bus, int target, int lun, char *scsi3addr) 678 { 679 int i; 680 struct cciss_scsi_dev_t *sd; 681 unsigned long flags; 682 683 - CPQ_TAPE_LOCK(ctlr, flags); 684 - for (i=0;i<ccissscsi[ctlr].ndevices;i++) { 685 - sd = &ccissscsi[ctlr].dev[i]; 686 if (sd->bus == bus && 687 sd->target == target && 688 sd->lun == lun) { 689 memcpy(scsi3addr, &sd->scsi3addr[0], 8); 690 - CPQ_TAPE_UNLOCK(ctlr, flags); 691 return 0; 692 } 693 } 694 - CPQ_TAPE_UNLOCK(ctlr, flags); 695 return -1; 696 } 697 698 static void 699 - cciss_scsi_setup(int cntl_num) 700 { 701 struct cciss_scsi_adapter_data_t * shba; 702 703 - ccissscsi[cntl_num].ndevices = 0; 704 shba = (struct cciss_scsi_adapter_data_t *) 705 kmalloc(sizeof(*shba), GFP_KERNEL); 706 if (shba == NULL) ··· 708 shba->scsi_host = NULL; 709 spin_lock_init(&shba->lock); 710 shba->registered = 0; 711 - if (scsi_cmd_stack_setup(cntl_num, shba) != 0) { 712 kfree(shba); 713 shba = NULL; 714 } 715 - hba[cntl_num]->scsi_ctlr = shba; 716 return; 717 } 718 719 - static void 720 - complete_scsi_command( CommandList_struct *cp, int timeout, __u32 tag) 721 { 722 struct scsi_cmnd *cmd; 723 - ctlr_info_t *ctlr; 724 ErrorInfo_struct *ei; 725 726 - ei = cp->err_info; 727 728 /* First, see if it was a message rather than a command */ 729 - if (cp->Request.Type.Type == TYPE_MSG) { 730 - cp->cmd_type = CMD_MSG_DONE; 731 return; 732 } 733 734 - cmd = (struct scsi_cmnd *) cp->scsi_cmd; 735 - ctlr = hba[cp->ctlr]; 736 737 scsi_dma_unmap(cmd); 738 - if (cp->Header.SGTotal > ctlr->max_cmd_sgentries) 739 - cciss_unmap_sg_chain_block(ctlr, cp); 740 741 cmd->result = (DID_OK << 16); /* host byte */ 742 cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */ ··· 763 { 764 #if 0 765 printk(KERN_WARNING "cciss: cmd %p " 766 - "has SCSI Status = %x\n", 767 - cp, 768 - ei->ScsiStatus); 769 #endif 770 cmd->result |= (ei->ScsiStatus << 1); 771 } ··· 784 case CMD_DATA_UNDERRUN: /* let mid layer handle it. */ 785 break; 786 case CMD_DATA_OVERRUN: 787 - printk(KERN_WARNING "cciss: cp %p has" 788 " completed with data overrun " 789 - "reported\n", cp); 790 break; 791 case CMD_INVALID: { 792 - /* print_bytes(cp, sizeof(*cp), 1, 0); 793 - print_cmd(cp); */ 794 /* We get CMD_INVALID if you address a non-existent tape drive instead 795 of a selection timeout (no response). You will see this if you yank 796 out a tape drive, then try to access it. This is kind of a shame ··· 800 } 801 break; 802 case CMD_PROTOCOL_ERR: 803 - printk(KERN_WARNING "cciss: cp %p has " 804 - "protocol error \n", cp); 805 break; 806 case CMD_HARDWARE_ERR: 807 cmd->result = DID_ERROR << 16; 808 - printk(KERN_WARNING "cciss: cp %p had " 809 - " hardware error\n", cp); 810 break; 811 case CMD_CONNECTION_LOST: 812 cmd->result = DID_ERROR << 16; 813 - printk(KERN_WARNING "cciss: cp %p had " 814 - "connection lost\n", cp); 815 break; 816 case CMD_ABORTED: 817 cmd->result = DID_ABORT << 16; 818 - printk(KERN_WARNING "cciss: cp %p was " 819 - "aborted\n", cp); 820 break; 821 case CMD_ABORT_FAILED: 822 cmd->result = DID_ERROR << 16; 823 - printk(KERN_WARNING "cciss: cp %p reports " 824 - "abort failed\n", cp); 825 break; 826 case CMD_UNSOLICITED_ABORT: 827 cmd->result = DID_ABORT << 16; 828 - printk(KERN_WARNING "cciss: cp %p aborted " 829 - "do to an unsolicited abort\n", cp); 830 break; 831 case CMD_TIMEOUT: 832 cmd->result = DID_TIME_OUT << 16; 833 - printk(KERN_WARNING "cciss: cp %p timedout\n", 834 - cp); 835 break; 836 default: 837 cmd->result = DID_ERROR << 16; 838 - printk(KERN_WARNING "cciss: cp %p returned " 839 - "unknown status %x\n", cp, 840 ei->CommandStatus); 841 } 842 } 843 - // printk("c:%p:c%db%dt%dl%d ", cmd, ctlr->ctlr, cmd->channel, 844 - // cmd->target, cmd->lun); 845 cmd->scsi_done(cmd); 846 - scsi_cmd_free(ctlr, cp); 847 } 848 849 static int 850 - cciss_scsi_detect(int ctlr) 851 { 852 struct Scsi_Host *sh; 853 int error; ··· 854 sh->io_port = 0; // good enough? FIXME, 855 sh->n_io_port = 0; // I don't think we use these two... 856 sh->this_id = SELF_SCSI_ID; 857 - sh->sg_tablesize = hba[ctlr]->maxsgentries; 858 sh->max_cmd_len = MAX_COMMAND_SIZE; 859 860 ((struct cciss_scsi_adapter_data_t *) 861 - hba[ctlr]->scsi_ctlr)->scsi_host = sh; 862 - sh->hostdata[0] = (unsigned long) hba[ctlr]; 863 - sh->irq = hba[ctlr]->intr[SIMPLE_MODE_INT]; 864 sh->unique_id = sh->irq; 865 - error = scsi_add_host(sh, &hba[ctlr]->pdev->dev); 866 if (error) 867 goto fail_host_put; 868 scsi_scan_host(sh); ··· 876 877 static void 878 cciss_unmap_one(struct pci_dev *pdev, 879 - CommandList_struct *cp, 880 size_t buflen, 881 int data_direction) 882 { 883 u64bit addr64; 884 885 - addr64.val32.lower = cp->SG[0].Addr.lower; 886 - addr64.val32.upper = cp->SG[0].Addr.upper; 887 pci_unmap_single(pdev, (dma_addr_t) addr64.val, buflen, data_direction); 888 } 889 890 static void 891 cciss_map_one(struct pci_dev *pdev, 892 - CommandList_struct *cp, 893 unsigned char *buf, 894 size_t buflen, 895 int data_direction) ··· 897 __u64 addr64; 898 899 addr64 = (__u64) pci_map_single(pdev, buf, buflen, data_direction); 900 - cp->SG[0].Addr.lower = 901 (__u32) (addr64 & (__u64) 0x00000000FFFFFFFF); 902 - cp->SG[0].Addr.upper = 903 (__u32) ((addr64 >> 32) & (__u64) 0x00000000FFFFFFFF); 904 - cp->SG[0].Len = buflen; 905 - cp->Header.SGList = (__u8) 1; /* no. SGs contig in this cmd */ 906 - cp->Header.SGTotal = (__u16) 1; /* total sgs in this cmd list */ 907 } 908 909 static int 910 - cciss_scsi_do_simple_cmd(ctlr_info_t *c, 911 - CommandList_struct *cp, 912 unsigned char *scsi3addr, 913 unsigned char *cdb, 914 unsigned char cdblen, 915 unsigned char *buf, int bufsize, 916 int direction) 917 { 918 - unsigned long flags; 919 DECLARE_COMPLETION_ONSTACK(wait); 920 921 - cp->cmd_type = CMD_IOCTL_PEND; // treat this like an ioctl 922 - cp->scsi_cmd = NULL; 923 - cp->Header.ReplyQueue = 0; // unused in simple mode 924 - memcpy(&cp->Header.LUN, scsi3addr, sizeof(cp->Header.LUN)); 925 - cp->Header.Tag.lower = cp->busaddr; // Use k. address of cmd as tag 926 // Fill in the request block... 927 928 /* printk("Using scsi3addr 0x%02x%0x2%0x2%0x2%0x2%0x2%0x2%0x2\n", 929 scsi3addr[0], scsi3addr[1], scsi3addr[2], scsi3addr[3], 930 scsi3addr[4], scsi3addr[5], scsi3addr[6], scsi3addr[7]); */ 931 932 - memset(cp->Request.CDB, 0, sizeof(cp->Request.CDB)); 933 - memcpy(cp->Request.CDB, cdb, cdblen); 934 - cp->Request.Timeout = 0; 935 - cp->Request.CDBLen = cdblen; 936 - cp->Request.Type.Type = TYPE_CMD; 937 - cp->Request.Type.Attribute = ATTR_SIMPLE; 938 - cp->Request.Type.Direction = direction; 939 940 /* Fill in the SG list and do dma mapping */ 941 - cciss_map_one(c->pdev, cp, (unsigned char *) buf, 942 bufsize, DMA_FROM_DEVICE); 943 944 - cp->waiting = &wait; 945 - 946 - /* Put the request on the tail of the request queue */ 947 - spin_lock_irqsave(CCISS_LOCK(c->ctlr), flags); 948 - addQ(&c->reqQ, cp); 949 - c->Qdepth++; 950 - start_io(c); 951 - spin_unlock_irqrestore(CCISS_LOCK(c->ctlr), flags); 952 - 953 wait_for_completion(&wait); 954 955 /* undo the dma mapping */ 956 - cciss_unmap_one(c->pdev, cp, bufsize, DMA_FROM_DEVICE); 957 return(0); 958 } 959 960 static void 961 - cciss_scsi_interpret_error(CommandList_struct *cp) 962 { 963 ErrorInfo_struct *ei; 964 965 - ei = cp->err_info; 966 switch(ei->CommandStatus) 967 { 968 case CMD_TARGET_STATUS: 969 - printk(KERN_WARNING "cciss: cmd %p has " 970 - "completed with errors\n", cp); 971 - printk(KERN_WARNING "cciss: cmd %p " 972 - "has SCSI Status = %x\n", 973 - cp, 974 - ei->ScsiStatus); 975 if (ei->ScsiStatus == 0) 976 - printk(KERN_WARNING 977 - "cciss:SCSI status is abnormally zero. " 978 "(probably indicates selection timeout " 979 "reported incorrectly due to a known " 980 "firmware bug, circa July, 2001.)\n"); 981 break; 982 case CMD_DATA_UNDERRUN: /* let mid layer handle it. */ 983 - printk("UNDERRUN\n"); 984 break; 985 case CMD_DATA_OVERRUN: 986 - printk(KERN_WARNING "cciss: cp %p has" 987 " completed with data overrun " 988 - "reported\n", cp); 989 break; 990 case CMD_INVALID: { 991 /* controller unfortunately reports SCSI passthru's */ 992 /* to non-existent targets as invalid commands. */ 993 - printk(KERN_WARNING "cciss: cp %p is " 994 - "reported invalid (probably means " 995 - "target device no longer present)\n", 996 - cp); 997 - /* print_bytes((unsigned char *) cp, sizeof(*cp), 1, 0); 998 - print_cmd(cp); */ 999 } 1000 break; 1001 case CMD_PROTOCOL_ERR: 1002 - printk(KERN_WARNING "cciss: cp %p has " 1003 - "protocol error \n", cp); 1004 break; 1005 case CMD_HARDWARE_ERR: 1006 /* cmd->result = DID_ERROR << 16; */ 1007 - printk(KERN_WARNING "cciss: cp %p had " 1008 - " hardware error\n", cp); 1009 break; 1010 case CMD_CONNECTION_LOST: 1011 - printk(KERN_WARNING "cciss: cp %p had " 1012 - "connection lost\n", cp); 1013 break; 1014 case CMD_ABORTED: 1015 - printk(KERN_WARNING "cciss: cp %p was " 1016 - "aborted\n", cp); 1017 break; 1018 case CMD_ABORT_FAILED: 1019 - printk(KERN_WARNING "cciss: cp %p reports " 1020 - "abort failed\n", cp); 1021 break; 1022 case CMD_UNSOLICITED_ABORT: 1023 - printk(KERN_WARNING "cciss: cp %p aborted " 1024 - "do to an unsolicited abort\n", cp); 1025 break; 1026 case CMD_TIMEOUT: 1027 - printk(KERN_WARNING "cciss: cp %p timedout\n", 1028 - cp); 1029 break; 1030 default: 1031 - printk(KERN_WARNING "cciss: cp %p returned " 1032 - "unknown status %x\n", cp, 1033 - ei->CommandStatus); 1034 } 1035 } 1036 1037 static int 1038 - cciss_scsi_do_inquiry(ctlr_info_t *c, unsigned char *scsi3addr, 1039 unsigned char page, unsigned char *buf, 1040 unsigned char bufsize) 1041 { 1042 int rc; 1043 - CommandList_struct *cp; 1044 char cdb[6]; 1045 ErrorInfo_struct *ei; 1046 unsigned long flags; 1047 1048 - spin_lock_irqsave(CCISS_LOCK(c->ctlr), flags); 1049 - cp = scsi_cmd_alloc(c); 1050 - spin_unlock_irqrestore(CCISS_LOCK(c->ctlr), flags); 1051 1052 - if (cp == NULL) { /* trouble... */ 1053 printk("cmd_alloc returned NULL!\n"); 1054 return -1; 1055 } 1056 1057 - ei = cp->err_info; 1058 1059 cdb[0] = CISS_INQUIRY; 1060 cdb[1] = (page != 0); ··· 1047 cdb[3] = 0; 1048 cdb[4] = bufsize; 1049 cdb[5] = 0; 1050 - rc = cciss_scsi_do_simple_cmd(c, cp, scsi3addr, cdb, 1051 6, buf, bufsize, XFER_READ); 1052 1053 if (rc != 0) return rc; /* something went wrong */ 1054 1055 if (ei->CommandStatus != 0 && 1056 ei->CommandStatus != CMD_DATA_UNDERRUN) { 1057 - cciss_scsi_interpret_error(cp); 1058 rc = -1; 1059 } 1060 - spin_lock_irqsave(CCISS_LOCK(c->ctlr), flags); 1061 - scsi_cmd_free(c, cp); 1062 - spin_unlock_irqrestore(CCISS_LOCK(c->ctlr), flags); 1063 return rc; 1064 } 1065 1066 /* Get the device id from inquiry page 0x83 */ 1067 - static int cciss_scsi_get_device_id(ctlr_info_t *c, unsigned char *scsi3addr, 1068 unsigned char *device_id, int buflen) 1069 { 1070 int rc; ··· 1075 buf = kzalloc(64, GFP_KERNEL); 1076 if (!buf) 1077 return -1; 1078 - rc = cciss_scsi_do_inquiry(c, scsi3addr, 0x83, buf, 64); 1079 if (rc == 0) 1080 memcpy(device_id, &buf[8], buflen); 1081 kfree(buf); ··· 1083 } 1084 1085 static int 1086 - cciss_scsi_do_report_phys_luns(ctlr_info_t *c, 1087 ReportLunData_struct *buf, int bufsize) 1088 { 1089 int rc; 1090 - CommandList_struct *cp; 1091 unsigned char cdb[12]; 1092 unsigned char scsi3addr[8]; 1093 ErrorInfo_struct *ei; 1094 unsigned long flags; 1095 1096 - spin_lock_irqsave(CCISS_LOCK(c->ctlr), flags); 1097 - cp = scsi_cmd_alloc(c); 1098 - spin_unlock_irqrestore(CCISS_LOCK(c->ctlr), flags); 1099 - if (cp == NULL) { /* trouble... */ 1100 printk("cmd_alloc returned NULL!\n"); 1101 return -1; 1102 } ··· 1115 cdb[10] = 0; 1116 cdb[11] = 0; 1117 1118 - rc = cciss_scsi_do_simple_cmd(c, cp, scsi3addr, 1119 cdb, 12, 1120 (unsigned char *) buf, 1121 bufsize, XFER_READ); 1122 1123 if (rc != 0) return rc; /* something went wrong */ 1124 1125 - ei = cp->err_info; 1126 if (ei->CommandStatus != 0 && 1127 ei->CommandStatus != CMD_DATA_UNDERRUN) { 1128 - cciss_scsi_interpret_error(cp); 1129 rc = -1; 1130 } 1131 - spin_lock_irqsave(CCISS_LOCK(c->ctlr), flags); 1132 - scsi_cmd_free(c, cp); 1133 - spin_unlock_irqrestore(CCISS_LOCK(c->ctlr), flags); 1134 return rc; 1135 } 1136 1137 static void 1138 - cciss_update_non_disk_devices(int cntl_num, int hostno) 1139 { 1140 /* the idea here is we could get notified from /proc 1141 that some devices have changed, so we do a report ··· 1168 ReportLunData_struct *ld_buff; 1169 unsigned char *inq_buff; 1170 unsigned char scsi3addr[8]; 1171 - ctlr_info_t *c; 1172 __u32 num_luns=0; 1173 unsigned char *ch; 1174 struct cciss_scsi_dev_t *currentsd, *this_device; ··· 1175 int reportlunsize = sizeof(*ld_buff) + CISS_MAX_PHYS_LUN * 8; 1176 int i; 1177 1178 - c = (ctlr_info_t *) hba[cntl_num]; 1179 ld_buff = kzalloc(reportlunsize, GFP_KERNEL); 1180 inq_buff = kmalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL); 1181 currentsd = kzalloc(sizeof(*currentsd) * ··· 1184 goto out; 1185 } 1186 this_device = &currentsd[CCISS_MAX_SCSI_DEVS_PER_HBA]; 1187 - if (cciss_scsi_do_report_phys_luns(c, ld_buff, reportlunsize) == 0) { 1188 ch = &ld_buff->LUNListLength[0]; 1189 num_luns = ((ch[0]<<24) | (ch[1]<<16) | (ch[2]<<8) | ch[3]) / 8; 1190 if (num_luns > CISS_MAX_PHYS_LUN) { ··· 1208 memset(inq_buff, 0, OBDR_TAPE_INQ_SIZE); 1209 memcpy(&scsi3addr[0], &ld_buff->LUN[i][0], 8); 1210 1211 - if (cciss_scsi_do_inquiry(hba[cntl_num], scsi3addr, 0, inq_buff, 1212 (unsigned char) OBDR_TAPE_INQ_SIZE) != 0) 1213 /* Inquiry failed (msg printed already) */ 1214 continue; /* so we will skip this device. */ ··· 1226 sizeof(this_device->revision)); 1227 memset(this_device->device_id, 0, 1228 sizeof(this_device->device_id)); 1229 - cciss_scsi_get_device_id(hba[cntl_num], scsi3addr, 1230 this_device->device_id, sizeof(this_device->device_id)); 1231 1232 switch (this_device->devtype) ··· 1253 case 0x08: /* medium changer */ 1254 if (ncurrent >= CCISS_MAX_SCSI_DEVS_PER_HBA) { 1255 printk(KERN_INFO "cciss%d: %s ignored, " 1256 - "too many devices.\n", cntl_num, 1257 scsi_device_type(this_device->devtype)); 1258 break; 1259 } ··· 1265 } 1266 } 1267 1268 - adjust_cciss_scsi_table(cntl_num, hostno, currentsd, ncurrent); 1269 out: 1270 kfree(inq_buff); 1271 kfree(ld_buff); ··· 1284 } 1285 1286 static int 1287 - cciss_scsi_user_command(int ctlr, int hostno, char *buffer, int length) 1288 { 1289 int arg_len; 1290 1291 if ((arg_len = is_keyword(buffer, length, "rescan")) != 0) 1292 - cciss_update_non_disk_devices(ctlr, hostno); 1293 else 1294 return -EINVAL; 1295 return length; ··· 1306 { 1307 1308 int buflen, datalen; 1309 - ctlr_info_t *ci; 1310 int i; 1311 - int cntl_num; 1312 1313 - 1314 - ci = (ctlr_info_t *) sh->hostdata[0]; 1315 - if (ci == NULL) /* This really shouldn't ever happen. */ 1316 return -EINVAL; 1317 - 1318 - cntl_num = ci->ctlr; /* Get our index into the hba[] array */ 1319 1320 if (func == 0) { /* User is reading from /proc/scsi/ciss*?/?* */ 1321 buflen = sprintf(buffer, "cciss%d: SCSI host: %d\n", 1322 - cntl_num, sh->host_no); 1323 1324 /* this information is needed by apps to know which cciss 1325 device corresponds to which scsi host number without ··· 1325 this info is for an app to be able to use to know how to 1326 get them back in sync. */ 1327 1328 - for (i=0;i<ccissscsi[cntl_num].ndevices;i++) { 1329 - struct cciss_scsi_dev_t *sd = &ccissscsi[cntl_num].dev[i]; 1330 buflen += sprintf(&buffer[buflen], "c%db%dt%dl%d %02d " 1331 "0x%02x%02x%02x%02x%02x%02x%02x%02x\n", 1332 sh->host_no, sd->bus, sd->target, sd->lun, ··· 1345 *start = buffer + offset; 1346 return(datalen); 1347 } else /* User is writing to /proc/scsi/cciss*?/?* ... */ 1348 - return cciss_scsi_user_command(cntl_num, sh->host_no, 1349 buffer, length); 1350 } 1351 1352 /* cciss_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci 1353 dma mapping and fills in the scatter gather entries of the 1354 - cciss command, cp. */ 1355 1356 - static void cciss_scatter_gather(ctlr_info_t *h, CommandList_struct *cp, 1357 struct scsi_cmnd *cmd) 1358 { 1359 unsigned int len; ··· 1367 1368 chained = 0; 1369 sg_index = 0; 1370 - curr_sg = cp->SG; 1371 request_nsgs = scsi_dma_map(cmd); 1372 if (request_nsgs) { 1373 scsi_for_each_sg(cmd, sg, request_nsgs, i) { ··· 1375 !chained && request_nsgs - i > 1) { 1376 chained = 1; 1377 sg_index = 0; 1378 - curr_sg = sa->cmd_sg_list[cp->cmdindex]; 1379 } 1380 addr64 = (__u64) sg_dma_address(sg); 1381 len = sg_dma_len(sg); ··· 1388 ++sg_index; 1389 } 1390 if (chained) 1391 - cciss_map_sg_chain_block(h, cp, 1392 - sa->cmd_sg_list[cp->cmdindex], 1393 (request_nsgs - (h->max_cmd_sgentries - 1)) * 1394 sizeof(SGDescriptor_struct)); 1395 } 1396 /* track how many SG entries we are using */ 1397 if (request_nsgs > h->maxSG) 1398 h->maxSG = request_nsgs; 1399 - cp->Header.SGTotal = (__u8) request_nsgs + chained; 1400 if (request_nsgs > h->max_cmd_sgentries) 1401 - cp->Header.SGList = h->max_cmd_sgentries; 1402 else 1403 - cp->Header.SGList = cp->Header.SGTotal; 1404 return; 1405 } 1406 ··· 1408 static int 1409 cciss_scsi_queue_command (struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd *)) 1410 { 1411 - ctlr_info_t *c; 1412 - int ctlr, rc; 1413 unsigned char scsi3addr[8]; 1414 - CommandList_struct *cp; 1415 unsigned long flags; 1416 1417 // Get the ptr to our adapter structure (hba[i]) out of cmd->host. 1418 // We violate cmd->host privacy here. (Is there another way?) 1419 - c = (ctlr_info_t *) cmd->device->host->hostdata[0]; 1420 - ctlr = c->ctlr; 1421 1422 - rc = lookup_scsi3addr(ctlr, cmd->device->channel, cmd->device->id, 1423 cmd->device->lun, scsi3addr); 1424 if (rc != 0) { 1425 /* the scsi nexus does not match any that we presented... */ ··· 1430 return 0; 1431 } 1432 1433 - /* printk("cciss_queue_command, p=%p, cmd=0x%02x, c%db%dt%dl%d\n", 1434 - cmd, cmd->cmnd[0], ctlr, cmd->channel, cmd->target, cmd->lun);*/ 1435 - // printk("q:%p:c%db%dt%dl%d ", cmd, ctlr, cmd->channel, 1436 - // cmd->target, cmd->lun); 1437 - 1438 /* Ok, we have a reasonable scsi nexus, so send the cmd down, and 1439 see what the device thinks of it. */ 1440 1441 - spin_lock_irqsave(CCISS_LOCK(ctlr), flags); 1442 - cp = scsi_cmd_alloc(c); 1443 - spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags); 1444 - if (cp == NULL) { /* trouble... */ 1445 - printk("scsi_cmd_alloc returned NULL!\n"); 1446 /* FIXME: next 3 lines are -> BAD! <- */ 1447 cmd->result = DID_NO_CONNECT << 16; 1448 done(cmd); ··· 1448 1449 cmd->scsi_done = done; // save this for use by completion code 1450 1451 - // save cp in case we have to abort it 1452 - cmd->host_scribble = (unsigned char *) cp; 1453 1454 - cp->cmd_type = CMD_SCSI; 1455 - cp->scsi_cmd = cmd; 1456 - cp->Header.ReplyQueue = 0; // unused in simple mode 1457 - memcpy(&cp->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8); 1458 - cp->Header.Tag.lower = cp->busaddr; // Use k. address of cmd as tag 1459 1460 // Fill in the request block... 1461 1462 - cp->Request.Timeout = 0; 1463 - memset(cp->Request.CDB, 0, sizeof(cp->Request.CDB)); 1464 - BUG_ON(cmd->cmd_len > sizeof(cp->Request.CDB)); 1465 - cp->Request.CDBLen = cmd->cmd_len; 1466 - memcpy(cp->Request.CDB, cmd->cmnd, cmd->cmd_len); 1467 - cp->Request.Type.Type = TYPE_CMD; 1468 - cp->Request.Type.Attribute = ATTR_SIMPLE; 1469 switch(cmd->sc_data_direction) 1470 { 1471 - case DMA_TO_DEVICE: cp->Request.Type.Direction = XFER_WRITE; break; 1472 - case DMA_FROM_DEVICE: cp->Request.Type.Direction = XFER_READ; break; 1473 - case DMA_NONE: cp->Request.Type.Direction = XFER_NONE; break; 1474 case DMA_BIDIRECTIONAL: 1475 // This can happen if a buggy application does a scsi passthru 1476 // and sets both inlen and outlen to non-zero. ( see 1477 // ../scsi/scsi_ioctl.c:scsi_ioctl_send_command() ) 1478 1479 - cp->Request.Type.Direction = XFER_RSVD; 1480 // This is technically wrong, and cciss controllers should 1481 // reject it with CMD_INVALID, which is the most correct 1482 // response, but non-fibre backends appear to let it ··· 1493 break; 1494 1495 default: 1496 - printk("cciss: unknown data direction: %d\n", 1497 cmd->sc_data_direction); 1498 BUG(); 1499 break; 1500 } 1501 - cciss_scatter_gather(c, cp, cmd); 1502 - 1503 - /* Put the request on the tail of the request queue */ 1504 - 1505 - spin_lock_irqsave(CCISS_LOCK(ctlr), flags); 1506 - addQ(&c->reqQ, cp); 1507 - c->Qdepth++; 1508 - start_io(c); 1509 - spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags); 1510 - 1511 /* the cmd'll come back via intr handler in complete_scsi_command() */ 1512 return 0; 1513 } 1514 1515 - static void 1516 - cciss_unregister_scsi(int ctlr) 1517 { 1518 struct cciss_scsi_adapter_data_t *sa; 1519 struct cciss_scsi_cmd_stack_t *stk; ··· 1512 1513 /* we are being forcibly unloaded, and may not refuse. */ 1514 1515 - spin_lock_irqsave(CCISS_LOCK(ctlr), flags); 1516 - sa = hba[ctlr]->scsi_ctlr; 1517 stk = &sa->cmd_stack; 1518 1519 /* if we weren't ever actually registered, don't unregister */ 1520 if (sa->registered) { 1521 - spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags); 1522 scsi_remove_host(sa->scsi_host); 1523 scsi_host_put(sa->scsi_host); 1524 - spin_lock_irqsave(CCISS_LOCK(ctlr), flags); 1525 } 1526 1527 /* set scsi_host to NULL so our detect routine will 1528 find us on register */ 1529 sa->scsi_host = NULL; 1530 - spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags); 1531 - scsi_cmd_stack_free(ctlr); 1532 kfree(sa); 1533 } 1534 1535 - static int 1536 - cciss_engage_scsi(int ctlr) 1537 { 1538 struct cciss_scsi_adapter_data_t *sa; 1539 struct cciss_scsi_cmd_stack_t *stk; 1540 unsigned long flags; 1541 1542 - spin_lock_irqsave(CCISS_LOCK(ctlr), flags); 1543 - sa = hba[ctlr]->scsi_ctlr; 1544 stk = &sa->cmd_stack; 1545 1546 if (sa->registered) { 1547 - printk("cciss%d: SCSI subsystem already engaged.\n", ctlr); 1548 - spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags); 1549 return -ENXIO; 1550 } 1551 sa->registered = 1; 1552 - spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags); 1553 - cciss_update_non_disk_devices(ctlr, -1); 1554 - cciss_scsi_detect(ctlr); 1555 return 0; 1556 } 1557 1558 static void 1559 - cciss_seq_tape_report(struct seq_file *seq, int ctlr) 1560 { 1561 unsigned long flags; 1562 1563 - CPQ_TAPE_LOCK(ctlr, flags); 1564 seq_printf(seq, 1565 "Sequential access devices: %d\n\n", 1566 - ccissscsi[ctlr].ndevices); 1567 - CPQ_TAPE_UNLOCK(ctlr, flags); 1568 } 1569 1570 static int wait_for_device_to_become_ready(ctlr_info_t *h, ··· 1574 int waittime = HZ; 1575 CommandList_struct *c; 1576 1577 - c = cmd_alloc(h, 1); 1578 if (!c) { 1579 - printk(KERN_WARNING "cciss%d: out of memory in " 1580 - "wait_for_device_to_become_ready.\n", h->ctlr); 1581 return IO_ERROR; 1582 } 1583 ··· 1595 waittime = waittime * 2; 1596 1597 /* Send the Test Unit Ready */ 1598 - rc = fill_cmd(c, TEST_UNIT_READY, h->ctlr, NULL, 0, 0, 1599 lunaddr, TYPE_CMD); 1600 if (rc == 0) 1601 rc = sendcmd_withirq_core(h, c, 0); ··· 1621 } 1622 } 1623 retry_tur: 1624 - printk(KERN_WARNING "cciss%d: Waiting %d secs " 1625 "for device to become ready.\n", 1626 - h->ctlr, waittime / HZ); 1627 rc = 1; /* device not ready. */ 1628 } 1629 1630 if (rc) 1631 - printk("cciss%d: giving up on device.\n", h->ctlr); 1632 else 1633 - printk(KERN_WARNING "cciss%d: device is ready.\n", h->ctlr); 1634 1635 - cmd_free(h, c, 1); 1636 return rc; 1637 } 1638 ··· 1652 int rc; 1653 CommandList_struct *cmd_in_trouble; 1654 unsigned char lunaddr[8]; 1655 - ctlr_info_t *c; 1656 - int ctlr; 1657 1658 /* find the controller to which the command to be aborted was sent */ 1659 - c = (ctlr_info_t *) scsicmd->device->host->hostdata[0]; 1660 - if (c == NULL) /* paranoia */ 1661 return FAILED; 1662 - ctlr = c->ctlr; 1663 - printk(KERN_WARNING "cciss%d: resetting tape drive or medium changer.\n", ctlr); 1664 /* find the command that's giving us trouble */ 1665 cmd_in_trouble = (CommandList_struct *) scsicmd->host_scribble; 1666 if (cmd_in_trouble == NULL) /* paranoia */ 1667 return FAILED; 1668 memcpy(lunaddr, &cmd_in_trouble->Header.LUN.LunAddrBytes[0], 8); 1669 /* send a reset to the SCSI LUN which the command was sent to */ 1670 - rc = sendcmd_withirq(CCISS_RESET_MSG, ctlr, NULL, 0, 0, lunaddr, 1671 TYPE_MSG); 1672 - if (rc == 0 && wait_for_device_to_become_ready(c, lunaddr) == 0) 1673 return SUCCESS; 1674 - printk(KERN_WARNING "cciss%d: resetting device failed.\n", ctlr); 1675 return FAILED; 1676 } 1677 ··· 1678 int rc; 1679 CommandList_struct *cmd_to_abort; 1680 unsigned char lunaddr[8]; 1681 - ctlr_info_t *c; 1682 - int ctlr; 1683 1684 /* find the controller to which the command to be aborted was sent */ 1685 - c = (ctlr_info_t *) scsicmd->device->host->hostdata[0]; 1686 - if (c == NULL) /* paranoia */ 1687 return FAILED; 1688 - ctlr = c->ctlr; 1689 - printk(KERN_WARNING "cciss%d: aborting tardy SCSI cmd\n", ctlr); 1690 1691 /* find the command to be aborted */ 1692 cmd_to_abort = (CommandList_struct *) scsicmd->host_scribble; 1693 if (cmd_to_abort == NULL) /* paranoia */ 1694 return FAILED; 1695 memcpy(lunaddr, &cmd_to_abort->Header.LUN.LunAddrBytes[0], 8); 1696 - rc = sendcmd_withirq(CCISS_ABORT_MSG, ctlr, &cmd_to_abort->Header.Tag, 1697 0, 0, lunaddr, TYPE_MSG); 1698 if (rc == 0) 1699 return SUCCESS;
··· 44 #define CCISS_ABORT_MSG 0x00 45 #define CCISS_RESET_MSG 0x01 46 47 + static int fill_cmd(ctlr_info_t *h, CommandList_struct *c, __u8 cmd, void *buff, 48 size_t size, 49 __u8 page_code, unsigned char *scsi3addr, 50 int cmd_type); 51 52 + static CommandList_struct *cmd_alloc(ctlr_info_t *h); 53 + static CommandList_struct *cmd_special_alloc(ctlr_info_t *h); 54 + static void cmd_free(ctlr_info_t *h, CommandList_struct *c); 55 + static void cmd_special_free(ctlr_info_t *h, CommandList_struct *c); 56 57 static int cciss_scsi_proc_info( 58 struct Scsi_Host *sh, ··· 93 94 #pragma pack(1) 95 96 + #define SCSI_PAD_32 8 97 + #define SCSI_PAD_64 8 98 99 struct cciss_scsi_cmd_stack_elem_t { 100 CommandList_struct cmd; ··· 127 spinlock_t lock; // to protect ccissscsi[ctlr]; 128 }; 129 130 + #define CPQ_TAPE_LOCK(h, flags) spin_lock_irqsave( \ 131 + &h->scsi_ctlr->lock, flags); 132 + #define CPQ_TAPE_UNLOCK(h, flags) spin_unlock_irqrestore( \ 133 + &h->scsi_ctlr->lock, flags); 134 135 static CommandList_struct * 136 scsi_cmd_alloc(ctlr_info_t *h) 137 { 138 /* assume only one process in here at a time, locking done by caller. */ 139 + /* use h->lock */ 140 /* might be better to rewrite how we allocate scsi commands in a way that */ 141 /* needs no locking at all. */ 142 ··· 177 } 178 179 static void 180 + scsi_cmd_free(ctlr_info_t *h, CommandList_struct *c) 181 { 182 /* assume only one process in here at a time, locking done by caller. */ 183 + /* use h->lock */ 184 /* drop the free memory chunk on top of the stack. */ 185 186 struct cciss_scsi_adapter_data_t *sa; ··· 190 stk = &sa->cmd_stack; 191 stk->top++; 192 if (stk->top >= CMD_STACK_SIZE) { 193 + dev_err(&h->pdev->dev, 194 + "scsi_cmd_free called too many times.\n"); 195 BUG(); 196 } 197 + stk->elem[stk->top] = (struct cciss_scsi_cmd_stack_elem_t *) c; 198 } 199 200 static int 201 + scsi_cmd_stack_setup(ctlr_info_t *h, struct cciss_scsi_adapter_data_t *sa) 202 { 203 int i; 204 struct cciss_scsi_cmd_stack_t *stk; 205 size_t size; 206 207 + sa->cmd_sg_list = cciss_allocate_sg_chain_blocks(h, 208 + h->chainsize, CMD_STACK_SIZE); 209 + if (!sa->cmd_sg_list && h->chainsize > 0) 210 return -ENOMEM; 211 212 stk = &sa->cmd_stack; ··· 215 BUILD_BUG_ON((sizeof(*stk->pool) % COMMANDLIST_ALIGNMENT) != 0); 216 /* pci_alloc_consistent guarantees 32-bit DMA address will be used */ 217 stk->pool = (struct cciss_scsi_cmd_stack_elem_t *) 218 + pci_alloc_consistent(h->pdev, size, &stk->cmd_pool_handle); 219 220 if (stk->pool == NULL) { 221 cciss_free_sg_chain_blocks(sa->cmd_sg_list, CMD_STACK_SIZE); ··· 234 } 235 236 static void 237 + scsi_cmd_stack_free(ctlr_info_t *h) 238 { 239 struct cciss_scsi_adapter_data_t *sa; 240 struct cciss_scsi_cmd_stack_t *stk; 241 size_t size; 242 243 + sa = h->scsi_ctlr; 244 stk = &sa->cmd_stack; 245 if (stk->top != CMD_STACK_SIZE-1) { 246 + dev_warn(&h->pdev->dev, 247 + "bug: %d scsi commands are still outstanding.\n", 248 CMD_STACK_SIZE - stk->top); 249 } 250 size = sizeof(struct cciss_scsi_cmd_stack_elem_t) * CMD_STACK_SIZE; 251 252 + pci_free_consistent(h->pdev, size, stk->pool, stk->cmd_pool_handle); 253 stk->pool = NULL; 254 cciss_free_sg_chain_blocks(sa->cmd_sg_list, CMD_STACK_SIZE); 255 } ··· 342 #endif 343 344 static int 345 + find_bus_target_lun(ctlr_info_t *h, int *bus, int *target, int *lun) 346 { 347 /* finds an unused bus, target, lun for a new device */ 348 + /* assumes h->scsi_ctlr->lock is held */ 349 int i, found=0; 350 unsigned char target_taken[CCISS_MAX_SCSI_DEVS_PER_HBA]; 351 352 memset(&target_taken[0], 0, CCISS_MAX_SCSI_DEVS_PER_HBA); 353 354 target_taken[SELF_SCSI_ID] = 1; 355 + for (i = 0; i < ccissscsi[h->ctlr].ndevices; i++) 356 + target_taken[ccissscsi[h->ctlr].dev[i].target] = 1; 357 358 + for (i = 0; i < CCISS_MAX_SCSI_DEVS_PER_HBA; i++) { 359 if (!target_taken[i]) { 360 *bus = 0; *target=i; *lun = 0; found=1; 361 break; ··· 369 }; 370 371 static int 372 + cciss_scsi_add_entry(ctlr_info_t *h, int hostno, 373 struct cciss_scsi_dev_t *device, 374 struct scsi2map *added, int *nadded) 375 { 376 + /* assumes h->scsi_ctlr->lock is held */ 377 + int n = ccissscsi[h->ctlr].ndevices; 378 struct cciss_scsi_dev_t *sd; 379 int i, bus, target, lun; 380 unsigned char addr1[8], addr2[8]; 381 382 if (n >= CCISS_MAX_SCSI_DEVS_PER_HBA) { 383 + dev_warn(&h->pdev->dev, "Too many devices, " 384 + "some will be inaccessible.\n"); 385 return -1; 386 } 387 ··· 397 memcpy(addr1, device->scsi3addr, 8); 398 addr1[4] = 0; 399 for (i = 0; i < n; i++) { 400 + sd = &ccissscsi[h->ctlr].dev[i]; 401 memcpy(addr2, sd->scsi3addr, 8); 402 addr2[4] = 0; 403 /* differ only in byte 4? */ ··· 410 } 411 } 412 413 + sd = &ccissscsi[h->ctlr].dev[n]; 414 if (lun == 0) { 415 + if (find_bus_target_lun(h, 416 &sd->bus, &sd->target, &sd->lun) != 0) 417 return -1; 418 } else { ··· 431 memcpy(sd->device_id, device->device_id, sizeof(sd->device_id)); 432 sd->devtype = device->devtype; 433 434 + ccissscsi[h->ctlr].ndevices++; 435 436 /* initially, (before registering with scsi layer) we don't 437 know our hostno and we don't want to print anything first 438 time anyway (the scsi layer's inquiries will show that info) */ 439 if (hostno != -1) 440 + dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d added.\n", 441 + scsi_device_type(sd->devtype), hostno, 442 sd->bus, sd->target, sd->lun); 443 return 0; 444 } 445 446 static void 447 + cciss_scsi_remove_entry(ctlr_info_t *h, int hostno, int entry, 448 struct scsi2map *removed, int *nremoved) 449 { 450 + /* assumes h->ctlr]->scsi_ctlr->lock is held */ 451 int i; 452 struct cciss_scsi_dev_t sd; 453 454 if (entry < 0 || entry >= CCISS_MAX_SCSI_DEVS_PER_HBA) return; 455 + sd = ccissscsi[h->ctlr].dev[entry]; 456 removed[*nremoved].bus = sd.bus; 457 removed[*nremoved].target = sd.target; 458 removed[*nremoved].lun = sd.lun; 459 (*nremoved)++; 460 + for (i = entry; i < ccissscsi[h->ctlr].ndevices-1; i++) 461 + ccissscsi[h->ctlr].dev[i] = ccissscsi[h->ctlr].dev[i+1]; 462 + ccissscsi[h->ctlr].ndevices--; 463 + dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d removed.\n", 464 + scsi_device_type(sd.devtype), hostno, 465 sd.bus, sd.target, sd.lun); 466 } 467 ··· 476 (a)[1] == (b)[1] && \ 477 (a)[0] == (b)[0]) 478 479 + static void fixup_botched_add(ctlr_info_t *h, char *scsi3addr) 480 { 481 /* called when scsi_add_device fails in order to re-adjust */ 482 /* ccissscsi[] to match the mid layer's view. */ 483 unsigned long flags; 484 int i, j; 485 + CPQ_TAPE_LOCK(h, flags); 486 + for (i = 0; i < ccissscsi[h->ctlr].ndevices; i++) { 487 if (memcmp(scsi3addr, 488 + ccissscsi[h->ctlr].dev[i].scsi3addr, 8) == 0) { 489 + for (j = i; j < ccissscsi[h->ctlr].ndevices-1; j++) 490 + ccissscsi[h->ctlr].dev[j] = 491 + ccissscsi[h->ctlr].dev[j+1]; 492 + ccissscsi[h->ctlr].ndevices--; 493 break; 494 } 495 } 496 + CPQ_TAPE_UNLOCK(h, flags); 497 } 498 499 static int device_is_the_same(struct cciss_scsi_dev_t *dev1, ··· 513 } 514 515 static int 516 + adjust_cciss_scsi_table(ctlr_info_t *h, int hostno, 517 struct cciss_scsi_dev_t sd[], int nsds) 518 { 519 /* sd contains scsi3 addresses and devtypes, but ··· 534 GFP_KERNEL); 535 536 if (!added || !removed) { 537 + dev_warn(&h->pdev->dev, 538 + "Out of memory in adjust_cciss_scsi_table\n"); 539 goto free_and_out; 540 } 541 542 + CPQ_TAPE_LOCK(h, flags); 543 544 if (hostno != -1) /* if it's not the first time... */ 545 + sh = h->scsi_ctlr->scsi_host; 546 547 /* find any devices in ccissscsi[] that are not in 548 sd[] and remove them from ccissscsi[] */ ··· 550 i = 0; 551 nremoved = 0; 552 nadded = 0; 553 + while (i < ccissscsi[h->ctlr].ndevices) { 554 + csd = &ccissscsi[h->ctlr].dev[i]; 555 found=0; 556 for (j=0;j<nsds;j++) { 557 if (SCSI3ADDR_EQ(sd[j].scsi3addr, ··· 566 567 if (found == 0) { /* device no longer present. */ 568 changes++; 569 + cciss_scsi_remove_entry(h, hostno, i, 570 removed, &nremoved); 571 /* remove ^^^, hence i not incremented */ 572 } else if (found == 1) { /* device is different in some way */ 573 changes++; 574 + dev_info(&h->pdev->dev, 575 + "device c%db%dt%dl%d has changed.\n", 576 + hostno, csd->bus, csd->target, csd->lun); 577 + cciss_scsi_remove_entry(h, hostno, i, 578 removed, &nremoved); 579 /* remove ^^^, hence i not incremented */ 580 + if (cciss_scsi_add_entry(h, hostno, &sd[j], 581 added, &nadded) != 0) 582 /* we just removed one, so add can't fail. */ 583 BUG(); ··· 601 602 for (i=0;i<nsds;i++) { 603 found=0; 604 + for (j = 0; j < ccissscsi[h->ctlr].ndevices; j++) { 605 + csd = &ccissscsi[h->ctlr].dev[j]; 606 if (SCSI3ADDR_EQ(sd[i].scsi3addr, 607 csd->scsi3addr)) { 608 if (device_is_the_same(&sd[i], csd)) ··· 614 } 615 if (!found) { 616 changes++; 617 + if (cciss_scsi_add_entry(h, hostno, &sd[i], 618 added, &nadded) != 0) 619 break; 620 } else if (found == 1) { 621 /* should never happen... */ 622 changes++; 623 + dev_warn(&h->pdev->dev, 624 + "device unexpectedly changed\n"); 625 /* but if it does happen, we just ignore that device */ 626 } 627 } 628 + CPQ_TAPE_UNLOCK(h, flags); 629 630 /* Don't notify scsi mid layer of any changes the first time through */ 631 /* (or if there are no changes) scsi_scan_host will do it later the */ ··· 645 /* We don't expect to get here. */ 646 /* future cmds to this device will get selection */ 647 /* timeout as if the device was gone. */ 648 + dev_warn(&h->pdev->dev, "didn't find " 649 "c%db%dt%dl%d\n for removal.", 650 + hostno, removed[i].bus, 651 removed[i].target, removed[i].lun); 652 } 653 } ··· 659 added[i].target, added[i].lun); 660 if (rc == 0) 661 continue; 662 + dev_warn(&h->pdev->dev, "scsi_add_device " 663 "c%db%dt%dl%d failed, device not added.\n", 664 + hostno, added[i].bus, added[i].target, added[i].lun); 665 /* now we have to remove it from ccissscsi, */ 666 /* since it didn't get added to scsi mid layer */ 667 + fixup_botched_add(h, added[i].scsi3addr); 668 } 669 670 free_and_out: ··· 675 } 676 677 static int 678 + lookup_scsi3addr(ctlr_info_t *h, int bus, int target, int lun, char *scsi3addr) 679 { 680 int i; 681 struct cciss_scsi_dev_t *sd; 682 unsigned long flags; 683 684 + CPQ_TAPE_LOCK(h, flags); 685 + for (i = 0; i < ccissscsi[h->ctlr].ndevices; i++) { 686 + sd = &ccissscsi[h->ctlr].dev[i]; 687 if (sd->bus == bus && 688 sd->target == target && 689 sd->lun == lun) { 690 memcpy(scsi3addr, &sd->scsi3addr[0], 8); 691 + CPQ_TAPE_UNLOCK(h, flags); 692 return 0; 693 } 694 } 695 + CPQ_TAPE_UNLOCK(h, flags); 696 return -1; 697 } 698 699 static void 700 + cciss_scsi_setup(ctlr_info_t *h) 701 { 702 struct cciss_scsi_adapter_data_t * shba; 703 704 + ccissscsi[h->ctlr].ndevices = 0; 705 shba = (struct cciss_scsi_adapter_data_t *) 706 kmalloc(sizeof(*shba), GFP_KERNEL); 707 if (shba == NULL) ··· 709 shba->scsi_host = NULL; 710 spin_lock_init(&shba->lock); 711 shba->registered = 0; 712 + if (scsi_cmd_stack_setup(h, shba) != 0) { 713 kfree(shba); 714 shba = NULL; 715 } 716 + h->scsi_ctlr = shba; 717 return; 718 } 719 720 + static void complete_scsi_command(CommandList_struct *c, int timeout, 721 + __u32 tag) 722 { 723 struct scsi_cmnd *cmd; 724 + ctlr_info_t *h; 725 ErrorInfo_struct *ei; 726 727 + ei = c->err_info; 728 729 /* First, see if it was a message rather than a command */ 730 + if (c->Request.Type.Type == TYPE_MSG) { 731 + c->cmd_type = CMD_MSG_DONE; 732 return; 733 } 734 735 + cmd = (struct scsi_cmnd *) c->scsi_cmd; 736 + h = hba[c->ctlr]; 737 738 scsi_dma_unmap(cmd); 739 + if (c->Header.SGTotal > h->max_cmd_sgentries) 740 + cciss_unmap_sg_chain_block(h, c); 741 742 cmd->result = (DID_OK << 16); /* host byte */ 743 cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */ ··· 764 { 765 #if 0 766 printk(KERN_WARNING "cciss: cmd %p " 767 + "has SCSI Status = %x\n", 768 + c, ei->ScsiStatus); 769 #endif 770 cmd->result |= (ei->ScsiStatus << 1); 771 } ··· 786 case CMD_DATA_UNDERRUN: /* let mid layer handle it. */ 787 break; 788 case CMD_DATA_OVERRUN: 789 + dev_warn(&h->pdev->dev, "%p has" 790 " completed with data overrun " 791 + "reported\n", c); 792 break; 793 case CMD_INVALID: { 794 + /* print_bytes(c, sizeof(*c), 1, 0); 795 + print_cmd(c); */ 796 /* We get CMD_INVALID if you address a non-existent tape drive instead 797 of a selection timeout (no response). You will see this if you yank 798 out a tape drive, then try to access it. This is kind of a shame ··· 802 } 803 break; 804 case CMD_PROTOCOL_ERR: 805 + dev_warn(&h->pdev->dev, 806 + "%p has protocol error\n", c); 807 break; 808 case CMD_HARDWARE_ERR: 809 cmd->result = DID_ERROR << 16; 810 + dev_warn(&h->pdev->dev, 811 + "%p had hardware error\n", c); 812 break; 813 case CMD_CONNECTION_LOST: 814 cmd->result = DID_ERROR << 16; 815 + dev_warn(&h->pdev->dev, 816 + "%p had connection lost\n", c); 817 break; 818 case CMD_ABORTED: 819 cmd->result = DID_ABORT << 16; 820 + dev_warn(&h->pdev->dev, "%p was aborted\n", c); 821 break; 822 case CMD_ABORT_FAILED: 823 cmd->result = DID_ERROR << 16; 824 + dev_warn(&h->pdev->dev, 825 + "%p reports abort failed\n", c); 826 break; 827 case CMD_UNSOLICITED_ABORT: 828 cmd->result = DID_ABORT << 16; 829 + dev_warn(&h->pdev->dev, "%p aborted do to an " 830 + "unsolicited abort\n", c); 831 break; 832 case CMD_TIMEOUT: 833 cmd->result = DID_TIME_OUT << 16; 834 + dev_warn(&h->pdev->dev, "%p timedout\n", c); 835 break; 836 default: 837 cmd->result = DID_ERROR << 16; 838 + dev_warn(&h->pdev->dev, 839 + "%p returned unknown status %x\n", c, 840 ei->CommandStatus); 841 } 842 } 843 cmd->scsi_done(cmd); 844 + scsi_cmd_free(h, c); 845 } 846 847 static int 848 + cciss_scsi_detect(ctlr_info_t *h) 849 { 850 struct Scsi_Host *sh; 851 int error; ··· 860 sh->io_port = 0; // good enough? FIXME, 861 sh->n_io_port = 0; // I don't think we use these two... 862 sh->this_id = SELF_SCSI_ID; 863 + sh->sg_tablesize = h->maxsgentries; 864 sh->max_cmd_len = MAX_COMMAND_SIZE; 865 866 ((struct cciss_scsi_adapter_data_t *) 867 + h->scsi_ctlr)->scsi_host = sh; 868 + sh->hostdata[0] = (unsigned long) h; 869 + sh->irq = h->intr[SIMPLE_MODE_INT]; 870 sh->unique_id = sh->irq; 871 + error = scsi_add_host(sh, &h->pdev->dev); 872 if (error) 873 goto fail_host_put; 874 scsi_scan_host(sh); ··· 882 883 static void 884 cciss_unmap_one(struct pci_dev *pdev, 885 + CommandList_struct *c, 886 size_t buflen, 887 int data_direction) 888 { 889 u64bit addr64; 890 891 + addr64.val32.lower = c->SG[0].Addr.lower; 892 + addr64.val32.upper = c->SG[0].Addr.upper; 893 pci_unmap_single(pdev, (dma_addr_t) addr64.val, buflen, data_direction); 894 } 895 896 static void 897 cciss_map_one(struct pci_dev *pdev, 898 + CommandList_struct *c, 899 unsigned char *buf, 900 size_t buflen, 901 int data_direction) ··· 903 __u64 addr64; 904 905 addr64 = (__u64) pci_map_single(pdev, buf, buflen, data_direction); 906 + c->SG[0].Addr.lower = 907 (__u32) (addr64 & (__u64) 0x00000000FFFFFFFF); 908 + c->SG[0].Addr.upper = 909 (__u32) ((addr64 >> 32) & (__u64) 0x00000000FFFFFFFF); 910 + c->SG[0].Len = buflen; 911 + c->Header.SGList = (__u8) 1; /* no. SGs contig in this cmd */ 912 + c->Header.SGTotal = (__u16) 1; /* total sgs in this cmd list */ 913 } 914 915 static int 916 + cciss_scsi_do_simple_cmd(ctlr_info_t *h, 917 + CommandList_struct *c, 918 unsigned char *scsi3addr, 919 unsigned char *cdb, 920 unsigned char cdblen, 921 unsigned char *buf, int bufsize, 922 int direction) 923 { 924 DECLARE_COMPLETION_ONSTACK(wait); 925 926 + c->cmd_type = CMD_IOCTL_PEND; /* treat this like an ioctl */ 927 + c->scsi_cmd = NULL; 928 + c->Header.ReplyQueue = 0; /* unused in simple mode */ 929 + memcpy(&c->Header.LUN, scsi3addr, sizeof(c->Header.LUN)); 930 + c->Header.Tag.lower = c->busaddr; /* Use k. address of cmd as tag */ 931 // Fill in the request block... 932 933 /* printk("Using scsi3addr 0x%02x%0x2%0x2%0x2%0x2%0x2%0x2%0x2\n", 934 scsi3addr[0], scsi3addr[1], scsi3addr[2], scsi3addr[3], 935 scsi3addr[4], scsi3addr[5], scsi3addr[6], scsi3addr[7]); */ 936 937 + memset(c->Request.CDB, 0, sizeof(c->Request.CDB)); 938 + memcpy(c->Request.CDB, cdb, cdblen); 939 + c->Request.Timeout = 0; 940 + c->Request.CDBLen = cdblen; 941 + c->Request.Type.Type = TYPE_CMD; 942 + c->Request.Type.Attribute = ATTR_SIMPLE; 943 + c->Request.Type.Direction = direction; 944 945 /* Fill in the SG list and do dma mapping */ 946 + cciss_map_one(h->pdev, c, (unsigned char *) buf, 947 bufsize, DMA_FROM_DEVICE); 948 949 + c->waiting = &wait; 950 + enqueue_cmd_and_start_io(h, c); 951 wait_for_completion(&wait); 952 953 /* undo the dma mapping */ 954 + cciss_unmap_one(h->pdev, c, bufsize, DMA_FROM_DEVICE); 955 return(0); 956 } 957 958 static void 959 + cciss_scsi_interpret_error(ctlr_info_t *h, CommandList_struct *c) 960 { 961 ErrorInfo_struct *ei; 962 963 + ei = c->err_info; 964 switch(ei->CommandStatus) 965 { 966 case CMD_TARGET_STATUS: 967 + dev_warn(&h->pdev->dev, 968 + "cmd %p has completed with errors\n", c); 969 + dev_warn(&h->pdev->dev, 970 + "cmd %p has SCSI Status = %x\n", 971 + c, ei->ScsiStatus); 972 if (ei->ScsiStatus == 0) 973 + dev_warn(&h->pdev->dev, 974 + "SCSI status is abnormally zero. " 975 "(probably indicates selection timeout " 976 "reported incorrectly due to a known " 977 "firmware bug, circa July, 2001.)\n"); 978 break; 979 case CMD_DATA_UNDERRUN: /* let mid layer handle it. */ 980 + dev_info(&h->pdev->dev, "UNDERRUN\n"); 981 break; 982 case CMD_DATA_OVERRUN: 983 + dev_warn(&h->pdev->dev, "%p has" 984 " completed with data overrun " 985 + "reported\n", c); 986 break; 987 case CMD_INVALID: { 988 /* controller unfortunately reports SCSI passthru's */ 989 /* to non-existent targets as invalid commands. */ 990 + dev_warn(&h->pdev->dev, 991 + "%p is reported invalid (probably means " 992 + "target device no longer present)\n", c); 993 + /* print_bytes((unsigned char *) c, sizeof(*c), 1, 0); 994 + print_cmd(c); */ 995 } 996 break; 997 case CMD_PROTOCOL_ERR: 998 + dev_warn(&h->pdev->dev, "%p has protocol error\n", c); 999 break; 1000 case CMD_HARDWARE_ERR: 1001 /* cmd->result = DID_ERROR << 16; */ 1002 + dev_warn(&h->pdev->dev, "%p had hardware error\n", c); 1003 break; 1004 case CMD_CONNECTION_LOST: 1005 + dev_warn(&h->pdev->dev, "%p had connection lost\n", c); 1006 break; 1007 case CMD_ABORTED: 1008 + dev_warn(&h->pdev->dev, "%p was aborted\n", c); 1009 break; 1010 case CMD_ABORT_FAILED: 1011 + dev_warn(&h->pdev->dev, 1012 + "%p reports abort failed\n", c); 1013 break; 1014 case CMD_UNSOLICITED_ABORT: 1015 + dev_warn(&h->pdev->dev, 1016 + "%p aborted do to an unsolicited abort\n", c); 1017 break; 1018 case CMD_TIMEOUT: 1019 + dev_warn(&h->pdev->dev, "%p timedout\n", c); 1020 break; 1021 default: 1022 + dev_warn(&h->pdev->dev, 1023 + "%p returned unknown status %x\n", 1024 + c, ei->CommandStatus); 1025 } 1026 } 1027 1028 static int 1029 + cciss_scsi_do_inquiry(ctlr_info_t *h, unsigned char *scsi3addr, 1030 unsigned char page, unsigned char *buf, 1031 unsigned char bufsize) 1032 { 1033 int rc; 1034 + CommandList_struct *c; 1035 char cdb[6]; 1036 ErrorInfo_struct *ei; 1037 unsigned long flags; 1038 1039 + spin_lock_irqsave(&h->lock, flags); 1040 + c = scsi_cmd_alloc(h); 1041 + spin_unlock_irqrestore(&h->lock, flags); 1042 1043 + if (c == NULL) { /* trouble... */ 1044 printk("cmd_alloc returned NULL!\n"); 1045 return -1; 1046 } 1047 1048 + ei = c->err_info; 1049 1050 cdb[0] = CISS_INQUIRY; 1051 cdb[1] = (page != 0); ··· 1068 cdb[3] = 0; 1069 cdb[4] = bufsize; 1070 cdb[5] = 0; 1071 + rc = cciss_scsi_do_simple_cmd(h, c, scsi3addr, cdb, 1072 6, buf, bufsize, XFER_READ); 1073 1074 if (rc != 0) return rc; /* something went wrong */ 1075 1076 if (ei->CommandStatus != 0 && 1077 ei->CommandStatus != CMD_DATA_UNDERRUN) { 1078 + cciss_scsi_interpret_error(h, c); 1079 rc = -1; 1080 } 1081 + spin_lock_irqsave(&h->lock, flags); 1082 + scsi_cmd_free(h, c); 1083 + spin_unlock_irqrestore(&h->lock, flags); 1084 return rc; 1085 } 1086 1087 /* Get the device id from inquiry page 0x83 */ 1088 + static int cciss_scsi_get_device_id(ctlr_info_t *h, unsigned char *scsi3addr, 1089 unsigned char *device_id, int buflen) 1090 { 1091 int rc; ··· 1096 buf = kzalloc(64, GFP_KERNEL); 1097 if (!buf) 1098 return -1; 1099 + rc = cciss_scsi_do_inquiry(h, scsi3addr, 0x83, buf, 64); 1100 if (rc == 0) 1101 memcpy(device_id, &buf[8], buflen); 1102 kfree(buf); ··· 1104 } 1105 1106 static int 1107 + cciss_scsi_do_report_phys_luns(ctlr_info_t *h, 1108 ReportLunData_struct *buf, int bufsize) 1109 { 1110 int rc; 1111 + CommandList_struct *c; 1112 unsigned char cdb[12]; 1113 unsigned char scsi3addr[8]; 1114 ErrorInfo_struct *ei; 1115 unsigned long flags; 1116 1117 + spin_lock_irqsave(&h->lock, flags); 1118 + c = scsi_cmd_alloc(h); 1119 + spin_unlock_irqrestore(&h->lock, flags); 1120 + if (c == NULL) { /* trouble... */ 1121 printk("cmd_alloc returned NULL!\n"); 1122 return -1; 1123 } ··· 1136 cdb[10] = 0; 1137 cdb[11] = 0; 1138 1139 + rc = cciss_scsi_do_simple_cmd(h, c, scsi3addr, 1140 cdb, 12, 1141 (unsigned char *) buf, 1142 bufsize, XFER_READ); 1143 1144 if (rc != 0) return rc; /* something went wrong */ 1145 1146 + ei = c->err_info; 1147 if (ei->CommandStatus != 0 && 1148 ei->CommandStatus != CMD_DATA_UNDERRUN) { 1149 + cciss_scsi_interpret_error(h, c); 1150 rc = -1; 1151 } 1152 + spin_lock_irqsave(&h->lock, flags); 1153 + scsi_cmd_free(h, c); 1154 + spin_unlock_irqrestore(&h->lock, flags); 1155 return rc; 1156 } 1157 1158 static void 1159 + cciss_update_non_disk_devices(ctlr_info_t *h, int hostno) 1160 { 1161 /* the idea here is we could get notified from /proc 1162 that some devices have changed, so we do a report ··· 1189 ReportLunData_struct *ld_buff; 1190 unsigned char *inq_buff; 1191 unsigned char scsi3addr[8]; 1192 __u32 num_luns=0; 1193 unsigned char *ch; 1194 struct cciss_scsi_dev_t *currentsd, *this_device; ··· 1197 int reportlunsize = sizeof(*ld_buff) + CISS_MAX_PHYS_LUN * 8; 1198 int i; 1199 1200 ld_buff = kzalloc(reportlunsize, GFP_KERNEL); 1201 inq_buff = kmalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL); 1202 currentsd = kzalloc(sizeof(*currentsd) * ··· 1207 goto out; 1208 } 1209 this_device = &currentsd[CCISS_MAX_SCSI_DEVS_PER_HBA]; 1210 + if (cciss_scsi_do_report_phys_luns(h, ld_buff, reportlunsize) == 0) { 1211 ch = &ld_buff->LUNListLength[0]; 1212 num_luns = ((ch[0]<<24) | (ch[1]<<16) | (ch[2]<<8) | ch[3]) / 8; 1213 if (num_luns > CISS_MAX_PHYS_LUN) { ··· 1231 memset(inq_buff, 0, OBDR_TAPE_INQ_SIZE); 1232 memcpy(&scsi3addr[0], &ld_buff->LUN[i][0], 8); 1233 1234 + if (cciss_scsi_do_inquiry(h, scsi3addr, 0, inq_buff, 1235 (unsigned char) OBDR_TAPE_INQ_SIZE) != 0) 1236 /* Inquiry failed (msg printed already) */ 1237 continue; /* so we will skip this device. */ ··· 1249 sizeof(this_device->revision)); 1250 memset(this_device->device_id, 0, 1251 sizeof(this_device->device_id)); 1252 + cciss_scsi_get_device_id(h, scsi3addr, 1253 this_device->device_id, sizeof(this_device->device_id)); 1254 1255 switch (this_device->devtype) ··· 1276 case 0x08: /* medium changer */ 1277 if (ncurrent >= CCISS_MAX_SCSI_DEVS_PER_HBA) { 1278 printk(KERN_INFO "cciss%d: %s ignored, " 1279 + "too many devices.\n", h->ctlr, 1280 scsi_device_type(this_device->devtype)); 1281 break; 1282 } ··· 1288 } 1289 } 1290 1291 + adjust_cciss_scsi_table(h, hostno, currentsd, ncurrent); 1292 out: 1293 kfree(inq_buff); 1294 kfree(ld_buff); ··· 1307 } 1308 1309 static int 1310 + cciss_scsi_user_command(ctlr_info_t *h, int hostno, char *buffer, int length) 1311 { 1312 int arg_len; 1313 1314 if ((arg_len = is_keyword(buffer, length, "rescan")) != 0) 1315 + cciss_update_non_disk_devices(h, hostno); 1316 else 1317 return -EINVAL; 1318 return length; ··· 1329 { 1330 1331 int buflen, datalen; 1332 + ctlr_info_t *h; 1333 int i; 1334 1335 + h = (ctlr_info_t *) sh->hostdata[0]; 1336 + if (h == NULL) /* This really shouldn't ever happen. */ 1337 return -EINVAL; 1338 1339 if (func == 0) { /* User is reading from /proc/scsi/ciss*?/?* */ 1340 buflen = sprintf(buffer, "cciss%d: SCSI host: %d\n", 1341 + h->ctlr, sh->host_no); 1342 1343 /* this information is needed by apps to know which cciss 1344 device corresponds to which scsi host number without ··· 1352 this info is for an app to be able to use to know how to 1353 get them back in sync. */ 1354 1355 + for (i = 0; i < ccissscsi[h->ctlr].ndevices; i++) { 1356 + struct cciss_scsi_dev_t *sd = 1357 + &ccissscsi[h->ctlr].dev[i]; 1358 buflen += sprintf(&buffer[buflen], "c%db%dt%dl%d %02d " 1359 "0x%02x%02x%02x%02x%02x%02x%02x%02x\n", 1360 sh->host_no, sd->bus, sd->target, sd->lun, ··· 1371 *start = buffer + offset; 1372 return(datalen); 1373 } else /* User is writing to /proc/scsi/cciss*?/?* ... */ 1374 + return cciss_scsi_user_command(h, sh->host_no, 1375 buffer, length); 1376 } 1377 1378 /* cciss_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci 1379 dma mapping and fills in the scatter gather entries of the 1380 + cciss command, c. */ 1381 1382 + static void cciss_scatter_gather(ctlr_info_t *h, CommandList_struct *c, 1383 struct scsi_cmnd *cmd) 1384 { 1385 unsigned int len; ··· 1393 1394 chained = 0; 1395 sg_index = 0; 1396 + curr_sg = c->SG; 1397 request_nsgs = scsi_dma_map(cmd); 1398 if (request_nsgs) { 1399 scsi_for_each_sg(cmd, sg, request_nsgs, i) { ··· 1401 !chained && request_nsgs - i > 1) { 1402 chained = 1; 1403 sg_index = 0; 1404 + curr_sg = sa->cmd_sg_list[c->cmdindex]; 1405 } 1406 addr64 = (__u64) sg_dma_address(sg); 1407 len = sg_dma_len(sg); ··· 1414 ++sg_index; 1415 } 1416 if (chained) 1417 + cciss_map_sg_chain_block(h, c, 1418 + sa->cmd_sg_list[c->cmdindex], 1419 (request_nsgs - (h->max_cmd_sgentries - 1)) * 1420 sizeof(SGDescriptor_struct)); 1421 } 1422 /* track how many SG entries we are using */ 1423 if (request_nsgs > h->maxSG) 1424 h->maxSG = request_nsgs; 1425 + c->Header.SGTotal = (__u8) request_nsgs + chained; 1426 if (request_nsgs > h->max_cmd_sgentries) 1427 + c->Header.SGList = h->max_cmd_sgentries; 1428 else 1429 + c->Header.SGList = c->Header.SGTotal; 1430 return; 1431 } 1432 ··· 1434 static int 1435 cciss_scsi_queue_command (struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd *)) 1436 { 1437 + ctlr_info_t *h; 1438 + int rc; 1439 unsigned char scsi3addr[8]; 1440 + CommandList_struct *c; 1441 unsigned long flags; 1442 1443 // Get the ptr to our adapter structure (hba[i]) out of cmd->host. 1444 // We violate cmd->host privacy here. (Is there another way?) 1445 + h = (ctlr_info_t *) cmd->device->host->hostdata[0]; 1446 1447 + rc = lookup_scsi3addr(h, cmd->device->channel, cmd->device->id, 1448 cmd->device->lun, scsi3addr); 1449 if (rc != 0) { 1450 /* the scsi nexus does not match any that we presented... */ ··· 1457 return 0; 1458 } 1459 1460 /* Ok, we have a reasonable scsi nexus, so send the cmd down, and 1461 see what the device thinks of it. */ 1462 1463 + spin_lock_irqsave(&h->lock, flags); 1464 + c = scsi_cmd_alloc(h); 1465 + spin_unlock_irqrestore(&h->lock, flags); 1466 + if (c == NULL) { /* trouble... */ 1467 + dev_warn(&h->pdev->dev, "scsi_cmd_alloc returned NULL!\n"); 1468 /* FIXME: next 3 lines are -> BAD! <- */ 1469 cmd->result = DID_NO_CONNECT << 16; 1470 done(cmd); ··· 1480 1481 cmd->scsi_done = done; // save this for use by completion code 1482 1483 + /* save c in case we have to abort it */ 1484 + cmd->host_scribble = (unsigned char *) c; 1485 1486 + c->cmd_type = CMD_SCSI; 1487 + c->scsi_cmd = cmd; 1488 + c->Header.ReplyQueue = 0; /* unused in simple mode */ 1489 + memcpy(&c->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8); 1490 + c->Header.Tag.lower = c->busaddr; /* Use k. address of cmd as tag */ 1491 1492 // Fill in the request block... 1493 1494 + c->Request.Timeout = 0; 1495 + memset(c->Request.CDB, 0, sizeof(c->Request.CDB)); 1496 + BUG_ON(cmd->cmd_len > sizeof(c->Request.CDB)); 1497 + c->Request.CDBLen = cmd->cmd_len; 1498 + memcpy(c->Request.CDB, cmd->cmnd, cmd->cmd_len); 1499 + c->Request.Type.Type = TYPE_CMD; 1500 + c->Request.Type.Attribute = ATTR_SIMPLE; 1501 switch(cmd->sc_data_direction) 1502 { 1503 + case DMA_TO_DEVICE: 1504 + c->Request.Type.Direction = XFER_WRITE; 1505 + break; 1506 + case DMA_FROM_DEVICE: 1507 + c->Request.Type.Direction = XFER_READ; 1508 + break; 1509 + case DMA_NONE: 1510 + c->Request.Type.Direction = XFER_NONE; 1511 + break; 1512 case DMA_BIDIRECTIONAL: 1513 // This can happen if a buggy application does a scsi passthru 1514 // and sets both inlen and outlen to non-zero. ( see 1515 // ../scsi/scsi_ioctl.c:scsi_ioctl_send_command() ) 1516 1517 + c->Request.Type.Direction = XFER_RSVD; 1518 // This is technically wrong, and cciss controllers should 1519 // reject it with CMD_INVALID, which is the most correct 1520 // response, but non-fibre backends appear to let it ··· 1519 break; 1520 1521 default: 1522 + dev_warn(&h->pdev->dev, "unknown data direction: %d\n", 1523 cmd->sc_data_direction); 1524 BUG(); 1525 break; 1526 } 1527 + cciss_scatter_gather(h, c, cmd); 1528 + enqueue_cmd_and_start_io(h, c); 1529 /* the cmd'll come back via intr handler in complete_scsi_command() */ 1530 return 0; 1531 } 1532 1533 + static void cciss_unregister_scsi(ctlr_info_t *h) 1534 { 1535 struct cciss_scsi_adapter_data_t *sa; 1536 struct cciss_scsi_cmd_stack_t *stk; ··· 1547 1548 /* we are being forcibly unloaded, and may not refuse. */ 1549 1550 + spin_lock_irqsave(&h->lock, flags); 1551 + sa = h->scsi_ctlr; 1552 stk = &sa->cmd_stack; 1553 1554 /* if we weren't ever actually registered, don't unregister */ 1555 if (sa->registered) { 1556 + spin_unlock_irqrestore(&h->lock, flags); 1557 scsi_remove_host(sa->scsi_host); 1558 scsi_host_put(sa->scsi_host); 1559 + spin_lock_irqsave(&h->lock, flags); 1560 } 1561 1562 /* set scsi_host to NULL so our detect routine will 1563 find us on register */ 1564 sa->scsi_host = NULL; 1565 + spin_unlock_irqrestore(&h->lock, flags); 1566 + scsi_cmd_stack_free(h); 1567 kfree(sa); 1568 } 1569 1570 + static int cciss_engage_scsi(ctlr_info_t *h) 1571 { 1572 struct cciss_scsi_adapter_data_t *sa; 1573 struct cciss_scsi_cmd_stack_t *stk; 1574 unsigned long flags; 1575 1576 + spin_lock_irqsave(&h->lock, flags); 1577 + sa = h->scsi_ctlr; 1578 stk = &sa->cmd_stack; 1579 1580 if (sa->registered) { 1581 + dev_info(&h->pdev->dev, "SCSI subsystem already engaged.\n"); 1582 + spin_unlock_irqrestore(&h->lock, flags); 1583 return -ENXIO; 1584 } 1585 sa->registered = 1; 1586 + spin_unlock_irqrestore(&h->lock, flags); 1587 + cciss_update_non_disk_devices(h, -1); 1588 + cciss_scsi_detect(h); 1589 return 0; 1590 } 1591 1592 static void 1593 + cciss_seq_tape_report(struct seq_file *seq, ctlr_info_t *h) 1594 { 1595 unsigned long flags; 1596 1597 + CPQ_TAPE_LOCK(h, flags); 1598 seq_printf(seq, 1599 "Sequential access devices: %d\n\n", 1600 + ccissscsi[h->ctlr].ndevices); 1601 + CPQ_TAPE_UNLOCK(h, flags); 1602 } 1603 1604 static int wait_for_device_to_become_ready(ctlr_info_t *h, ··· 1610 int waittime = HZ; 1611 CommandList_struct *c; 1612 1613 + c = cmd_alloc(h); 1614 if (!c) { 1615 + dev_warn(&h->pdev->dev, "out of memory in " 1616 + "wait_for_device_to_become_ready.\n"); 1617 return IO_ERROR; 1618 } 1619 ··· 1631 waittime = waittime * 2; 1632 1633 /* Send the Test Unit Ready */ 1634 + rc = fill_cmd(h, c, TEST_UNIT_READY, NULL, 0, 0, 1635 lunaddr, TYPE_CMD); 1636 if (rc == 0) 1637 rc = sendcmd_withirq_core(h, c, 0); ··· 1657 } 1658 } 1659 retry_tur: 1660 + dev_warn(&h->pdev->dev, "Waiting %d secs " 1661 "for device to become ready.\n", 1662 + waittime / HZ); 1663 rc = 1; /* device not ready. */ 1664 } 1665 1666 if (rc) 1667 + dev_warn(&h->pdev->dev, "giving up on device.\n"); 1668 else 1669 + dev_warn(&h->pdev->dev, "device is ready.\n"); 1670 1671 + cmd_free(h, c); 1672 return rc; 1673 } 1674 ··· 1688 int rc; 1689 CommandList_struct *cmd_in_trouble; 1690 unsigned char lunaddr[8]; 1691 + ctlr_info_t *h; 1692 1693 /* find the controller to which the command to be aborted was sent */ 1694 + h = (ctlr_info_t *) scsicmd->device->host->hostdata[0]; 1695 + if (h == NULL) /* paranoia */ 1696 return FAILED; 1697 + dev_warn(&h->pdev->dev, "resetting tape drive or medium changer.\n"); 1698 /* find the command that's giving us trouble */ 1699 cmd_in_trouble = (CommandList_struct *) scsicmd->host_scribble; 1700 if (cmd_in_trouble == NULL) /* paranoia */ 1701 return FAILED; 1702 memcpy(lunaddr, &cmd_in_trouble->Header.LUN.LunAddrBytes[0], 8); 1703 /* send a reset to the SCSI LUN which the command was sent to */ 1704 + rc = sendcmd_withirq(h, CCISS_RESET_MSG, NULL, 0, 0, lunaddr, 1705 TYPE_MSG); 1706 + if (rc == 0 && wait_for_device_to_become_ready(h, lunaddr) == 0) 1707 return SUCCESS; 1708 + dev_warn(&h->pdev->dev, "resetting device failed.\n"); 1709 return FAILED; 1710 } 1711 ··· 1716 int rc; 1717 CommandList_struct *cmd_to_abort; 1718 unsigned char lunaddr[8]; 1719 + ctlr_info_t *h; 1720 1721 /* find the controller to which the command to be aborted was sent */ 1722 + h = (ctlr_info_t *) scsicmd->device->host->hostdata[0]; 1723 + if (h == NULL) /* paranoia */ 1724 return FAILED; 1725 + dev_warn(&h->pdev->dev, "aborting tardy SCSI cmd\n"); 1726 1727 /* find the command to be aborted */ 1728 cmd_to_abort = (CommandList_struct *) scsicmd->host_scribble; 1729 if (cmd_to_abort == NULL) /* paranoia */ 1730 return FAILED; 1731 memcpy(lunaddr, &cmd_to_abort->Header.LUN.LunAddrBytes[0], 8); 1732 + rc = sendcmd_withirq(h, CCISS_ABORT_MSG, &cmd_to_abort->Header.Tag, 1733 0, 0, lunaddr, TYPE_MSG); 1734 if (rc == 0) 1735 return SUCCESS;
+49 -29
drivers/block/cpqarray.c
··· 35 #include <linux/seq_file.h> 36 #include <linux/init.h> 37 #include <linux/hdreg.h> 38 #include <linux/spinlock.h> 39 #include <linux/blkdev.h> 40 #include <linux/genhd.h> ··· 158 unsigned int blkcnt, 159 unsigned int log_unit ); 160 161 - static int ida_open(struct block_device *bdev, fmode_t mode); 162 static int ida_release(struct gendisk *disk, fmode_t mode); 163 static int ida_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg); 164 static int ida_getgeo(struct block_device *bdev, struct hd_geometry *geo); ··· 196 197 static const struct block_device_operations ida_fops = { 198 .owner = THIS_MODULE, 199 - .open = ida_open, 200 .release = ida_release, 201 - .locked_ioctl = ida_ioctl, 202 .getgeo = ida_getgeo, 203 .revalidate_disk= ida_revalidate, 204 }; ··· 841 return 0; 842 } 843 844 /* 845 * Close. Sync first. 846 */ 847 static int ida_release(struct gendisk *disk, fmode_t mode) 848 { 849 - ctlr_info_t *host = get_host(disk); 850 host->usage_count--; 851 return 0; 852 } 853 ··· 1145 * ida_ioctl does some miscellaneous stuff like reporting drive geometry, 1146 * setting readahead and submitting commands from userspace to the controller. 1147 */ 1148 - static int ida_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg) 1149 { 1150 drv_info_t *drv = get_drv(bdev->bd_disk); 1151 ctlr_info_t *host = get_host(bdev->bd_disk); ··· 1179 return error; 1180 case IDAGETCTLRSIG: 1181 if (!arg) return -EINVAL; 1182 - put_user(host->ctlr_sig, (int __user *)arg); 1183 return 0; 1184 case IDAREVALIDATEVOLS: 1185 if (MINOR(bdev->bd_dev) != 0) ··· 1188 return revalidate_allvol(host); 1189 case IDADRIVERVERSION: 1190 if (!arg) return -EINVAL; 1191 - put_user(DRIVER_VERSION, (unsigned long __user *)arg); 1192 return 0; 1193 case IDAGETPCIINFO: 1194 { ··· 1211 } 1212 1213 } 1214 /* 1215 * ida_ctlr_ioctl is for passing commands to the controller from userspace. 1216 * The command block (io) has already been copied to kernel space for us, ··· 1257 /* Pre submit processing */ 1258 switch(io->cmd) { 1259 case PASSTHRU_A: 1260 - p = kmalloc(io->sg[0].size, GFP_KERNEL); 1261 - if (!p) 1262 - { 1263 - error = -ENOMEM; 1264 - cmd_free(h, c, 0); 1265 - return(error); 1266 - } 1267 - if (copy_from_user(p, io->sg[0].addr, io->sg[0].size)) { 1268 - kfree(p); 1269 - cmd_free(h, c, 0); 1270 - return -EFAULT; 1271 } 1272 c->req.hdr.blk = pci_map_single(h->pci_dev, &(io->c), 1273 sizeof(ida_ioctl_t), ··· 1292 case DIAG_PASS_THRU: 1293 case COLLECT_BUFFER: 1294 case WRITE_FLASH_ROM: 1295 - p = kmalloc(io->sg[0].size, GFP_KERNEL); 1296 - if (!p) 1297 - { 1298 - error = -ENOMEM; 1299 - cmd_free(h, c, 0); 1300 - return(error); 1301 } 1302 - if (copy_from_user(p, io->sg[0].addr, io->sg[0].size)) { 1303 - kfree(p); 1304 - cmd_free(h, c, 0); 1305 - return -EFAULT; 1306 - } 1307 c->req.sg[0].size = io->sg[0].size; 1308 c->req.sg[0].addr = pci_map_single(h->pci_dev, p, 1309 c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL);
··· 35 #include <linux/seq_file.h> 36 #include <linux/init.h> 37 #include <linux/hdreg.h> 38 + #include <linux/smp_lock.h> 39 #include <linux/spinlock.h> 40 #include <linux/blkdev.h> 41 #include <linux/genhd.h> ··· 157 unsigned int blkcnt, 158 unsigned int log_unit ); 159 160 + static int ida_unlocked_open(struct block_device *bdev, fmode_t mode); 161 static int ida_release(struct gendisk *disk, fmode_t mode); 162 static int ida_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg); 163 static int ida_getgeo(struct block_device *bdev, struct hd_geometry *geo); ··· 195 196 static const struct block_device_operations ida_fops = { 197 .owner = THIS_MODULE, 198 + .open = ida_unlocked_open, 199 .release = ida_release, 200 + .ioctl = ida_ioctl, 201 .getgeo = ida_getgeo, 202 .revalidate_disk= ida_revalidate, 203 }; ··· 840 return 0; 841 } 842 843 + static int ida_unlocked_open(struct block_device *bdev, fmode_t mode) 844 + { 845 + int ret; 846 + 847 + lock_kernel(); 848 + ret = ida_open(bdev, mode); 849 + unlock_kernel(); 850 + 851 + return ret; 852 + } 853 + 854 /* 855 * Close. Sync first. 856 */ 857 static int ida_release(struct gendisk *disk, fmode_t mode) 858 { 859 + ctlr_info_t *host; 860 + 861 + lock_kernel(); 862 + host = get_host(disk); 863 host->usage_count--; 864 + unlock_kernel(); 865 + 866 return 0; 867 } 868 ··· 1128 * ida_ioctl does some miscellaneous stuff like reporting drive geometry, 1129 * setting readahead and submitting commands from userspace to the controller. 1130 */ 1131 + static int ida_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg) 1132 { 1133 drv_info_t *drv = get_drv(bdev->bd_disk); 1134 ctlr_info_t *host = get_host(bdev->bd_disk); ··· 1162 return error; 1163 case IDAGETCTLRSIG: 1164 if (!arg) return -EINVAL; 1165 + if (put_user(host->ctlr_sig, (int __user *)arg)) 1166 + return -EFAULT; 1167 return 0; 1168 case IDAREVALIDATEVOLS: 1169 if (MINOR(bdev->bd_dev) != 0) ··· 1170 return revalidate_allvol(host); 1171 case IDADRIVERVERSION: 1172 if (!arg) return -EINVAL; 1173 + if (put_user(DRIVER_VERSION, (unsigned long __user *)arg)) 1174 + return -EFAULT; 1175 return 0; 1176 case IDAGETPCIINFO: 1177 { ··· 1192 } 1193 1194 } 1195 + 1196 + static int ida_ioctl(struct block_device *bdev, fmode_t mode, 1197 + unsigned int cmd, unsigned long param) 1198 + { 1199 + int ret; 1200 + 1201 + lock_kernel(); 1202 + ret = ida_locked_ioctl(bdev, mode, cmd, param); 1203 + unlock_kernel(); 1204 + 1205 + return ret; 1206 + } 1207 + 1208 /* 1209 * ida_ctlr_ioctl is for passing commands to the controller from userspace. 1210 * The command block (io) has already been copied to kernel space for us, ··· 1225 /* Pre submit processing */ 1226 switch(io->cmd) { 1227 case PASSTHRU_A: 1228 + p = memdup_user(io->sg[0].addr, io->sg[0].size); 1229 + if (IS_ERR(p)) { 1230 + error = PTR_ERR(p); 1231 + cmd_free(h, c, 0); 1232 + return error; 1233 } 1234 c->req.hdr.blk = pci_map_single(h->pci_dev, &(io->c), 1235 sizeof(ida_ioctl_t), ··· 1266 case DIAG_PASS_THRU: 1267 case COLLECT_BUFFER: 1268 case WRITE_FLASH_ROM: 1269 + p = memdup_user(io->sg[0].addr, io->sg[0].size); 1270 + if (IS_ERR(p)) { 1271 + error = PTR_ERR(p); 1272 + cmd_free(h, c, 0); 1273 + return error; 1274 } 1275 c->req.sg[0].size = io->sg[0].size; 1276 c->req.sg[0].addr = pci_map_single(h->pci_dev, p, 1277 c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL);
+4 -4
drivers/block/drbd/drbd_actlog.c
··· 79 md_io.error = 0; 80 81 if ((rw & WRITE) && !test_bit(MD_NO_BARRIER, &mdev->flags)) 82 - rw |= (1 << BIO_RW_BARRIER); 83 - rw |= ((1<<BIO_RW_UNPLUG) | (1<<BIO_RW_SYNCIO)); 84 85 retry: 86 bio = bio_alloc(GFP_NOIO, 1); ··· 103 /* check for unsupported barrier op. 104 * would rather check on EOPNOTSUPP, but that is not reliable. 105 * don't try again for ANY return value != 0 */ 106 - if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER) && !ok)) { 107 /* Try again with no barrier */ 108 dev_warn(DEV, "Barriers not supported on meta data device - disabling\n"); 109 set_bit(MD_NO_BARRIER, &mdev->flags); 110 - rw &= ~(1 << BIO_RW_BARRIER); 111 bio_put(bio); 112 goto retry; 113 }
··· 79 md_io.error = 0; 80 81 if ((rw & WRITE) && !test_bit(MD_NO_BARRIER, &mdev->flags)) 82 + rw |= REQ_HARDBARRIER; 83 + rw |= REQ_UNPLUG | REQ_SYNC; 84 85 retry: 86 bio = bio_alloc(GFP_NOIO, 1); ··· 103 /* check for unsupported barrier op. 104 * would rather check on EOPNOTSUPP, but that is not reliable. 105 * don't try again for ANY return value != 0 */ 106 + if (unlikely((bio->bi_rw & REQ_HARDBARRIER) && !ok)) { 107 /* Try again with no barrier */ 108 dev_warn(DEV, "Barriers not supported on meta data device - disabling\n"); 109 set_bit(MD_NO_BARRIER, &mdev->flags); 110 + rw &= ~REQ_HARDBARRIER; 111 bio_put(bio); 112 goto retry; 113 }
+1 -15
drivers/block/drbd/drbd_int.h
··· 550 u32 offset; /* usecs the probe got sent after the reference time point */ 551 } __packed; 552 553 - struct delay_probe { 554 - struct list_head list; 555 - unsigned int seq_num; 556 - struct timeval time; 557 - }; 558 - 559 /* DCBP: Drbd Compressed Bitmap Packet ... */ 560 static inline enum drbd_bitmap_code 561 DCBP_get_code(struct p_compressed_bm *p) ··· 936 unsigned int ko_count; 937 struct drbd_work resync_work, 938 unplug_work, 939 - md_sync_work, 940 - delay_probe_work; 941 struct timer_list resync_timer; 942 struct timer_list md_sync_timer; 943 - struct timer_list delay_probe_timer; 944 945 /* Used after attach while negotiating new disk state. */ 946 union drbd_state new_state_tmp; ··· 1054 u64 ed_uuid; /* UUID of the exposed data */ 1055 struct mutex state_mutex; 1056 char congestion_reason; /* Why we where congested... */ 1057 - struct list_head delay_probes; /* protected by peer_seq_lock */ 1058 - int data_delay; /* Delay of packets on the data-sock behind meta-sock */ 1059 - unsigned int delay_seq; /* To generate sequence numbers of delay probes */ 1060 - struct timeval dps_time; /* delay-probes-start-time */ 1061 - unsigned int dp_volume_last; /* send_cnt of last delay probe */ 1062 - int c_sync_rate; /* current resync rate after delay_probe magic */ 1063 }; 1064 1065 static inline struct drbd_conf *minor_to_mdev(unsigned int minor)
··· 550 u32 offset; /* usecs the probe got sent after the reference time point */ 551 } __packed; 552 553 /* DCBP: Drbd Compressed Bitmap Packet ... */ 554 static inline enum drbd_bitmap_code 555 DCBP_get_code(struct p_compressed_bm *p) ··· 942 unsigned int ko_count; 943 struct drbd_work resync_work, 944 unplug_work, 945 + md_sync_work; 946 struct timer_list resync_timer; 947 struct timer_list md_sync_timer; 948 949 /* Used after attach while negotiating new disk state. */ 950 union drbd_state new_state_tmp; ··· 1062 u64 ed_uuid; /* UUID of the exposed data */ 1063 struct mutex state_mutex; 1064 char congestion_reason; /* Why we where congested... */ 1065 }; 1066 1067 static inline struct drbd_conf *minor_to_mdev(unsigned int minor)
+21 -81
drivers/block/drbd/drbd_main.c
··· 2184 return ok; 2185 } 2186 2187 - static int drbd_send_delay_probe(struct drbd_conf *mdev, struct drbd_socket *ds) 2188 - { 2189 - struct p_delay_probe dp; 2190 - int offset, ok = 0; 2191 - struct timeval now; 2192 - 2193 - mutex_lock(&ds->mutex); 2194 - if (likely(ds->socket)) { 2195 - do_gettimeofday(&now); 2196 - offset = now.tv_usec - mdev->dps_time.tv_usec + 2197 - (now.tv_sec - mdev->dps_time.tv_sec) * 1000000; 2198 - dp.seq_num = cpu_to_be32(mdev->delay_seq); 2199 - dp.offset = cpu_to_be32(offset); 2200 - 2201 - ok = _drbd_send_cmd(mdev, ds->socket, P_DELAY_PROBE, 2202 - (struct p_header *)&dp, sizeof(dp), 0); 2203 - } 2204 - mutex_unlock(&ds->mutex); 2205 - 2206 - return ok; 2207 - } 2208 - 2209 - static int drbd_send_delay_probes(struct drbd_conf *mdev) 2210 - { 2211 - int ok; 2212 - 2213 - mdev->delay_seq++; 2214 - do_gettimeofday(&mdev->dps_time); 2215 - ok = drbd_send_delay_probe(mdev, &mdev->meta); 2216 - ok = ok && drbd_send_delay_probe(mdev, &mdev->data); 2217 - 2218 - mdev->dp_volume_last = mdev->send_cnt; 2219 - mod_timer(&mdev->delay_probe_timer, jiffies + mdev->sync_conf.dp_interval * HZ / 10); 2220 - 2221 - return ok; 2222 - } 2223 - 2224 /* called on sndtimeo 2225 * returns FALSE if we should retry, 2226 * TRUE if we think connection is dead ··· 2332 return 1; 2333 } 2334 2335 - static void consider_delay_probes(struct drbd_conf *mdev) 2336 - { 2337 - if (mdev->state.conn != C_SYNC_SOURCE || mdev->agreed_pro_version < 93) 2338 - return; 2339 - 2340 - if (mdev->dp_volume_last + mdev->sync_conf.dp_volume * 2 < mdev->send_cnt) 2341 - drbd_send_delay_probes(mdev); 2342 - } 2343 - 2344 - static int w_delay_probes(struct drbd_conf *mdev, struct drbd_work *w, int cancel) 2345 - { 2346 - if (!cancel && mdev->state.conn == C_SYNC_SOURCE) 2347 - drbd_send_delay_probes(mdev); 2348 - 2349 - return 1; 2350 - } 2351 - 2352 - static void delay_probe_timer_fn(unsigned long data) 2353 - { 2354 - struct drbd_conf *mdev = (struct drbd_conf *) data; 2355 - 2356 - if (list_empty(&mdev->delay_probe_work.list)) 2357 - drbd_queue_work(&mdev->data.work, &mdev->delay_probe_work); 2358 - } 2359 - 2360 /* Used to send write requests 2361 * R_PRIMARY -> Peer (P_DATA) 2362 */ ··· 2363 /* NOTE: no need to check if barriers supported here as we would 2364 * not pass the test in make_request_common in that case 2365 */ 2366 - if (bio_rw_flagged(req->master_bio, BIO_RW_BARRIER)) { 2367 dev_err(DEV, "ASSERT FAILED would have set DP_HARDBARRIER\n"); 2368 /* dp_flags |= DP_HARDBARRIER; */ 2369 } 2370 - if (bio_rw_flagged(req->master_bio, BIO_RW_SYNCIO)) 2371 dp_flags |= DP_RW_SYNC; 2372 /* for now handle SYNCIO and UNPLUG 2373 * as if they still were one and the same flag */ 2374 - if (bio_rw_flagged(req->master_bio, BIO_RW_UNPLUG)) 2375 dp_flags |= DP_RW_SYNC; 2376 if (mdev->state.conn >= C_SYNC_SOURCE && 2377 mdev->state.conn <= C_PAUSED_SYNC_T) ··· 2394 } 2395 2396 drbd_put_data_sock(mdev); 2397 - 2398 - if (ok) 2399 - consider_delay_probes(mdev); 2400 2401 return ok; 2402 } ··· 2440 ok = _drbd_send_zc_ee(mdev, e); 2441 2442 drbd_put_data_sock(mdev); 2443 - 2444 - if (ok) 2445 - consider_delay_probes(mdev); 2446 2447 return ok; 2448 } ··· 2536 unsigned long flags; 2537 int rv = 0; 2538 2539 spin_lock_irqsave(&mdev->req_lock, flags); 2540 /* to have a stable mdev->state.role 2541 * and no race with updating open_cnt */ ··· 2551 if (!rv) 2552 mdev->open_cnt++; 2553 spin_unlock_irqrestore(&mdev->req_lock, flags); 2554 2555 return rv; 2556 } ··· 2559 static int drbd_release(struct gendisk *gd, fmode_t mode) 2560 { 2561 struct drbd_conf *mdev = gd->private_data; 2562 mdev->open_cnt--; 2563 return 0; 2564 } 2565 ··· 2596 2597 static void drbd_set_defaults(struct drbd_conf *mdev) 2598 { 2599 - mdev->sync_conf.after = DRBD_AFTER_DEF; 2600 - mdev->sync_conf.rate = DRBD_RATE_DEF; 2601 - mdev->sync_conf.al_extents = DRBD_AL_EXTENTS_DEF; 2602 mdev->state = (union drbd_state) { 2603 { .role = R_SECONDARY, 2604 .peer = R_UNKNOWN, ··· 2668 INIT_LIST_HEAD(&mdev->unplug_work.list); 2669 INIT_LIST_HEAD(&mdev->md_sync_work.list); 2670 INIT_LIST_HEAD(&mdev->bm_io_work.w.list); 2671 - INIT_LIST_HEAD(&mdev->delay_probes); 2672 - INIT_LIST_HEAD(&mdev->delay_probe_work.list); 2673 2674 mdev->resync_work.cb = w_resync_inactive; 2675 mdev->unplug_work.cb = w_send_write_hint; 2676 mdev->md_sync_work.cb = w_md_sync; 2677 mdev->bm_io_work.w.cb = w_bitmap_io; 2678 - mdev->delay_probe_work.cb = w_delay_probes; 2679 init_timer(&mdev->resync_timer); 2680 init_timer(&mdev->md_sync_timer); 2681 - init_timer(&mdev->delay_probe_timer); 2682 mdev->resync_timer.function = resync_timer_fn; 2683 mdev->resync_timer.data = (unsigned long) mdev; 2684 mdev->md_sync_timer.function = md_sync_timer_fn; 2685 mdev->md_sync_timer.data = (unsigned long) mdev; 2686 - mdev->delay_probe_timer.function = delay_probe_timer_fn; 2687 - mdev->delay_probe_timer.data = (unsigned long) mdev; 2688 - 2689 2690 init_waitqueue_head(&mdev->misc_wait); 2691 init_waitqueue_head(&mdev->state_wait);
··· 2184 return ok; 2185 } 2186 2187 /* called on sndtimeo 2188 * returns FALSE if we should retry, 2189 * TRUE if we think connection is dead ··· 2369 return 1; 2370 } 2371 2372 /* Used to send write requests 2373 * R_PRIMARY -> Peer (P_DATA) 2374 */ ··· 2425 /* NOTE: no need to check if barriers supported here as we would 2426 * not pass the test in make_request_common in that case 2427 */ 2428 + if (req->master_bio->bi_rw & REQ_HARDBARRIER) { 2429 dev_err(DEV, "ASSERT FAILED would have set DP_HARDBARRIER\n"); 2430 /* dp_flags |= DP_HARDBARRIER; */ 2431 } 2432 + if (req->master_bio->bi_rw & REQ_SYNC) 2433 dp_flags |= DP_RW_SYNC; 2434 /* for now handle SYNCIO and UNPLUG 2435 * as if they still were one and the same flag */ 2436 + if (req->master_bio->bi_rw & REQ_UNPLUG) 2437 dp_flags |= DP_RW_SYNC; 2438 if (mdev->state.conn >= C_SYNC_SOURCE && 2439 mdev->state.conn <= C_PAUSED_SYNC_T) ··· 2456 } 2457 2458 drbd_put_data_sock(mdev); 2459 2460 return ok; 2461 } ··· 2505 ok = _drbd_send_zc_ee(mdev, e); 2506 2507 drbd_put_data_sock(mdev); 2508 2509 return ok; 2510 } ··· 2604 unsigned long flags; 2605 int rv = 0; 2606 2607 + lock_kernel(); 2608 spin_lock_irqsave(&mdev->req_lock, flags); 2609 /* to have a stable mdev->state.role 2610 * and no race with updating open_cnt */ ··· 2618 if (!rv) 2619 mdev->open_cnt++; 2620 spin_unlock_irqrestore(&mdev->req_lock, flags); 2621 + unlock_kernel(); 2622 2623 return rv; 2624 } ··· 2625 static int drbd_release(struct gendisk *gd, fmode_t mode) 2626 { 2627 struct drbd_conf *mdev = gd->private_data; 2628 + lock_kernel(); 2629 mdev->open_cnt--; 2630 + unlock_kernel(); 2631 return 0; 2632 } 2633 ··· 2660 2661 static void drbd_set_defaults(struct drbd_conf *mdev) 2662 { 2663 + /* This way we get a compile error when sync_conf grows, 2664 + and we forgot to initialize it here */ 2665 + mdev->sync_conf = (struct syncer_conf) { 2666 + /* .rate = */ DRBD_RATE_DEF, 2667 + /* .after = */ DRBD_AFTER_DEF, 2668 + /* .al_extents = */ DRBD_AL_EXTENTS_DEF, 2669 + /* .verify_alg = */ {}, 0, 2670 + /* .cpu_mask = */ {}, 0, 2671 + /* .csums_alg = */ {}, 0, 2672 + /* .use_rle = */ 0 2673 + }; 2674 + 2675 + /* Have to use that way, because the layout differs between 2676 + big endian and little endian */ 2677 mdev->state = (union drbd_state) { 2678 { .role = R_SECONDARY, 2679 .peer = R_UNKNOWN, ··· 2721 INIT_LIST_HEAD(&mdev->unplug_work.list); 2722 INIT_LIST_HEAD(&mdev->md_sync_work.list); 2723 INIT_LIST_HEAD(&mdev->bm_io_work.w.list); 2724 2725 mdev->resync_work.cb = w_resync_inactive; 2726 mdev->unplug_work.cb = w_send_write_hint; 2727 mdev->md_sync_work.cb = w_md_sync; 2728 mdev->bm_io_work.w.cb = w_bitmap_io; 2729 init_timer(&mdev->resync_timer); 2730 init_timer(&mdev->md_sync_timer); 2731 mdev->resync_timer.function = resync_timer_fn; 2732 mdev->resync_timer.data = (unsigned long) mdev; 2733 mdev->md_sync_timer.function = md_sync_timer_fn; 2734 mdev->md_sync_timer.data = (unsigned long) mdev; 2735 2736 init_waitqueue_head(&mdev->misc_wait); 2737 init_waitqueue_head(&mdev->state_wait);
-4
drivers/block/drbd/drbd_nl.c
··· 1557 sc.rate = DRBD_RATE_DEF; 1558 sc.after = DRBD_AFTER_DEF; 1559 sc.al_extents = DRBD_AL_EXTENTS_DEF; 1560 - sc.dp_volume = DRBD_DP_VOLUME_DEF; 1561 - sc.dp_interval = DRBD_DP_INTERVAL_DEF; 1562 - sc.throttle_th = DRBD_RS_THROTTLE_TH_DEF; 1563 - sc.hold_off_th = DRBD_RS_HOLD_OFF_TH_DEF; 1564 } else 1565 memcpy(&sc, &mdev->sync_conf, sizeof(struct syncer_conf)); 1566
··· 1557 sc.rate = DRBD_RATE_DEF; 1558 sc.after = DRBD_AFTER_DEF; 1559 sc.al_extents = DRBD_AL_EXTENTS_DEF; 1560 } else 1561 memcpy(&sc, &mdev->sync_conf, sizeof(struct syncer_conf)); 1562
+2 -17
drivers/block/drbd/drbd_proc.c
··· 73 seq_printf(seq, "sync'ed:%3u.%u%% ", res / 10, res % 10); 74 /* if more than 1 GB display in MB */ 75 if (mdev->rs_total > 0x100000L) 76 - seq_printf(seq, "(%lu/%lu)M", 77 (unsigned long) Bit2KB(rs_left >> 10), 78 (unsigned long) Bit2KB(mdev->rs_total >> 10)); 79 else 80 - seq_printf(seq, "(%lu/%lu)K", 81 (unsigned long) Bit2KB(rs_left), 82 (unsigned long) Bit2KB(mdev->rs_total)); 83 - 84 - if (mdev->state.conn == C_SYNC_TARGET) 85 - seq_printf(seq, " queue_delay: %d.%d ms\n\t", 86 - mdev->data_delay / 1000, 87 - (mdev->data_delay % 1000) / 100); 88 - else if (mdev->state.conn == C_SYNC_SOURCE) 89 - seq_printf(seq, " delay_probe: %u\n\t", mdev->delay_seq); 90 91 /* see drivers/md/md.c 92 * We do not want to overflow, so the order of operands and ··· 127 dbdt/1000, dbdt % 1000); 128 else 129 seq_printf(seq, " (%ld)", dbdt); 130 - 131 - if (mdev->state.conn == C_SYNC_TARGET) { 132 - if (mdev->c_sync_rate > 1000) 133 - seq_printf(seq, " want: %d,%03d", 134 - mdev->c_sync_rate / 1000, mdev->c_sync_rate % 1000); 135 - else 136 - seq_printf(seq, " want: %d", mdev->c_sync_rate); 137 - } 138 139 seq_printf(seq, " K/sec\n"); 140 }
··· 73 seq_printf(seq, "sync'ed:%3u.%u%% ", res / 10, res % 10); 74 /* if more than 1 GB display in MB */ 75 if (mdev->rs_total > 0x100000L) 76 + seq_printf(seq, "(%lu/%lu)M\n\t", 77 (unsigned long) Bit2KB(rs_left >> 10), 78 (unsigned long) Bit2KB(mdev->rs_total >> 10)); 79 else 80 + seq_printf(seq, "(%lu/%lu)K\n\t", 81 (unsigned long) Bit2KB(rs_left), 82 (unsigned long) Bit2KB(mdev->rs_total)); 83 84 /* see drivers/md/md.c 85 * We do not want to overflow, so the order of operands and ··· 134 dbdt/1000, dbdt % 1000); 135 else 136 seq_printf(seq, " (%ld)", dbdt); 137 138 seq_printf(seq, " K/sec\n"); 139 }
+29 -106
drivers/block/drbd/drbd_receiver.c
··· 1180 bio->bi_sector = sector; 1181 bio->bi_bdev = mdev->ldev->backing_bdev; 1182 /* we special case some flags in the multi-bio case, see below 1183 - * (BIO_RW_UNPLUG, BIO_RW_BARRIER) */ 1184 bio->bi_rw = rw; 1185 bio->bi_private = e; 1186 bio->bi_end_io = drbd_endio_sec; ··· 1209 bios = bios->bi_next; 1210 bio->bi_next = NULL; 1211 1212 - /* strip off BIO_RW_UNPLUG unless it is the last bio */ 1213 if (bios) 1214 - bio->bi_rw &= ~(1<<BIO_RW_UNPLUG); 1215 1216 drbd_generic_make_request(mdev, fault_type, bio); 1217 1218 - /* strip off BIO_RW_BARRIER, 1219 * unless it is the first or last bio */ 1220 if (bios && bios->bi_next) 1221 - bios->bi_rw &= ~(1<<BIO_RW_BARRIER); 1222 } while (bios); 1223 maybe_kick_lo(mdev); 1224 return 0; ··· 1233 } 1234 1235 /** 1236 - * w_e_reissue() - Worker callback; Resubmit a bio, without BIO_RW_BARRIER set 1237 * @mdev: DRBD device. 1238 * @w: work object. 1239 * @cancel: The connection will be closed anyways (unused in this callback) ··· 1245 (and DE_BARRIER_IN_NEXT_EPOCH_ISSUED in the previous Epoch) 1246 so that we can finish that epoch in drbd_may_finish_epoch(). 1247 That is necessary if we already have a long chain of Epochs, before 1248 - we realize that BIO_RW_BARRIER is actually not supported */ 1249 1250 /* As long as the -ENOTSUPP on the barrier is reported immediately 1251 that will never trigger. If it is reported late, we will just ··· 1824 epoch = list_entry(e->epoch->list.prev, struct drbd_epoch, list); 1825 if (epoch == e->epoch) { 1826 set_bit(DE_CONTAINS_A_BARRIER, &e->epoch->flags); 1827 - rw |= (1<<BIO_RW_BARRIER); 1828 e->flags |= EE_IS_BARRIER; 1829 } else { 1830 if (atomic_read(&epoch->epoch_size) > 1 || 1831 !test_bit(DE_CONTAINS_A_BARRIER, &epoch->flags)) { 1832 set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &epoch->flags); 1833 set_bit(DE_CONTAINS_A_BARRIER, &e->epoch->flags); 1834 - rw |= (1<<BIO_RW_BARRIER); 1835 e->flags |= EE_IS_BARRIER; 1836 } 1837 } ··· 1841 dp_flags = be32_to_cpu(p->dp_flags); 1842 if (dp_flags & DP_HARDBARRIER) { 1843 dev_err(DEV, "ASSERT FAILED would have submitted barrier request\n"); 1844 - /* rw |= (1<<BIO_RW_BARRIER); */ 1845 } 1846 if (dp_flags & DP_RW_SYNC) 1847 - rw |= (1<<BIO_RW_SYNCIO) | (1<<BIO_RW_UNPLUG); 1848 if (dp_flags & DP_MAY_SET_IN_SYNC) 1849 e->flags |= EE_MAY_SET_IN_SYNC; 1850 ··· 3555 return ok; 3556 } 3557 3558 - static int receive_skip(struct drbd_conf *mdev, struct p_header *h) 3559 { 3560 /* TODO zero copy sink :) */ 3561 static char sink[128]; 3562 int size, want, r; 3563 3564 - dev_warn(DEV, "skipping unknown optional packet type %d, l: %d!\n", 3565 - h->command, h->length); 3566 3567 size = h->length; 3568 while (size > 0) { ··· 3575 return size == 0; 3576 } 3577 3578 static int receive_UnplugRemote(struct drbd_conf *mdev, struct p_header *h) 3579 { 3580 if (mdev->state.disk >= D_INCONSISTENT) ··· 3594 * with the data requests being unplugged */ 3595 drbd_tcp_quickack(mdev->data.socket); 3596 3597 - return TRUE; 3598 - } 3599 - 3600 - static void timeval_sub_us(struct timeval* tv, unsigned int us) 3601 - { 3602 - tv->tv_sec -= us / 1000000; 3603 - us = us % 1000000; 3604 - if (tv->tv_usec > us) { 3605 - tv->tv_usec += 1000000; 3606 - tv->tv_sec--; 3607 - } 3608 - tv->tv_usec -= us; 3609 - } 3610 - 3611 - static void got_delay_probe(struct drbd_conf *mdev, int from, struct p_delay_probe *p) 3612 - { 3613 - struct delay_probe *dp; 3614 - struct list_head *le; 3615 - struct timeval now; 3616 - int seq_num; 3617 - int offset; 3618 - int data_delay; 3619 - 3620 - seq_num = be32_to_cpu(p->seq_num); 3621 - offset = be32_to_cpu(p->offset); 3622 - 3623 - spin_lock(&mdev->peer_seq_lock); 3624 - if (!list_empty(&mdev->delay_probes)) { 3625 - if (from == USE_DATA_SOCKET) 3626 - le = mdev->delay_probes.next; 3627 - else 3628 - le = mdev->delay_probes.prev; 3629 - 3630 - dp = list_entry(le, struct delay_probe, list); 3631 - 3632 - if (dp->seq_num == seq_num) { 3633 - list_del(le); 3634 - spin_unlock(&mdev->peer_seq_lock); 3635 - do_gettimeofday(&now); 3636 - timeval_sub_us(&now, offset); 3637 - data_delay = 3638 - now.tv_usec - dp->time.tv_usec + 3639 - (now.tv_sec - dp->time.tv_sec) * 1000000; 3640 - 3641 - if (data_delay > 0) 3642 - mdev->data_delay = data_delay; 3643 - 3644 - kfree(dp); 3645 - return; 3646 - } 3647 - 3648 - if (dp->seq_num > seq_num) { 3649 - spin_unlock(&mdev->peer_seq_lock); 3650 - dev_warn(DEV, "Previous allocation failure of struct delay_probe?\n"); 3651 - return; /* Do not alloca a struct delay_probe.... */ 3652 - } 3653 - } 3654 - spin_unlock(&mdev->peer_seq_lock); 3655 - 3656 - dp = kmalloc(sizeof(struct delay_probe), GFP_NOIO); 3657 - if (!dp) { 3658 - dev_warn(DEV, "Failed to allocate a struct delay_probe, do not worry.\n"); 3659 - return; 3660 - } 3661 - 3662 - dp->seq_num = seq_num; 3663 - do_gettimeofday(&dp->time); 3664 - timeval_sub_us(&dp->time, offset); 3665 - 3666 - spin_lock(&mdev->peer_seq_lock); 3667 - if (from == USE_DATA_SOCKET) 3668 - list_add(&dp->list, &mdev->delay_probes); 3669 - else 3670 - list_add_tail(&dp->list, &mdev->delay_probes); 3671 - spin_unlock(&mdev->peer_seq_lock); 3672 - } 3673 - 3674 - static int receive_delay_probe(struct drbd_conf *mdev, struct p_header *h) 3675 - { 3676 - struct p_delay_probe *p = (struct p_delay_probe *)h; 3677 - 3678 - ERR_IF(h->length != (sizeof(*p)-sizeof(*h))) return FALSE; 3679 - if (drbd_recv(mdev, h->payload, h->length) != h->length) 3680 - return FALSE; 3681 - 3682 - got_delay_probe(mdev, USE_DATA_SOCKET, p); 3683 return TRUE; 3684 } 3685 ··· 3620 [P_OV_REQUEST] = receive_DataRequest, 3621 [P_OV_REPLY] = receive_DataRequest, 3622 [P_CSUM_RS_REQUEST] = receive_DataRequest, 3623 - [P_DELAY_PROBE] = receive_delay_probe, 3624 /* anything missing from this table is in 3625 * the asender_tbl, see get_asender_cmd */ 3626 [P_MAX_CMD] = NULL, ··· 4397 return TRUE; 4398 } 4399 4400 - static int got_delay_probe_m(struct drbd_conf *mdev, struct p_header *h) 4401 { 4402 - struct p_delay_probe *p = (struct p_delay_probe *)h; 4403 - 4404 - got_delay_probe(mdev, USE_META_SOCKET, p); 4405 return TRUE; 4406 } 4407 ··· 4427 [P_BARRIER_ACK] = { sizeof(struct p_barrier_ack), got_BarrierAck }, 4428 [P_STATE_CHG_REPLY] = { sizeof(struct p_req_state_reply), got_RqSReply }, 4429 [P_RS_IS_IN_SYNC] = { sizeof(struct p_block_ack), got_IsInSync }, 4430 - [P_DELAY_PROBE] = { sizeof(struct p_delay_probe), got_delay_probe_m }, 4431 [P_MAX_CMD] = { 0, NULL }, 4432 }; 4433 if (cmd > P_MAX_CMD || asender_tbl[cmd].process == NULL)
··· 1180 bio->bi_sector = sector; 1181 bio->bi_bdev = mdev->ldev->backing_bdev; 1182 /* we special case some flags in the multi-bio case, see below 1183 + * (REQ_UNPLUG, REQ_HARDBARRIER) */ 1184 bio->bi_rw = rw; 1185 bio->bi_private = e; 1186 bio->bi_end_io = drbd_endio_sec; ··· 1209 bios = bios->bi_next; 1210 bio->bi_next = NULL; 1211 1212 + /* strip off REQ_UNPLUG unless it is the last bio */ 1213 if (bios) 1214 + bio->bi_rw &= ~REQ_UNPLUG; 1215 1216 drbd_generic_make_request(mdev, fault_type, bio); 1217 1218 + /* strip off REQ_HARDBARRIER, 1219 * unless it is the first or last bio */ 1220 if (bios && bios->bi_next) 1221 + bios->bi_rw &= ~REQ_HARDBARRIER; 1222 } while (bios); 1223 maybe_kick_lo(mdev); 1224 return 0; ··· 1233 } 1234 1235 /** 1236 + * w_e_reissue() - Worker callback; Resubmit a bio, without REQ_HARDBARRIER set 1237 * @mdev: DRBD device. 1238 * @w: work object. 1239 * @cancel: The connection will be closed anyways (unused in this callback) ··· 1245 (and DE_BARRIER_IN_NEXT_EPOCH_ISSUED in the previous Epoch) 1246 so that we can finish that epoch in drbd_may_finish_epoch(). 1247 That is necessary if we already have a long chain of Epochs, before 1248 + we realize that REQ_HARDBARRIER is actually not supported */ 1249 1250 /* As long as the -ENOTSUPP on the barrier is reported immediately 1251 that will never trigger. If it is reported late, we will just ··· 1824 epoch = list_entry(e->epoch->list.prev, struct drbd_epoch, list); 1825 if (epoch == e->epoch) { 1826 set_bit(DE_CONTAINS_A_BARRIER, &e->epoch->flags); 1827 + rw |= REQ_HARDBARRIER; 1828 e->flags |= EE_IS_BARRIER; 1829 } else { 1830 if (atomic_read(&epoch->epoch_size) > 1 || 1831 !test_bit(DE_CONTAINS_A_BARRIER, &epoch->flags)) { 1832 set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &epoch->flags); 1833 set_bit(DE_CONTAINS_A_BARRIER, &e->epoch->flags); 1834 + rw |= REQ_HARDBARRIER; 1835 e->flags |= EE_IS_BARRIER; 1836 } 1837 } ··· 1841 dp_flags = be32_to_cpu(p->dp_flags); 1842 if (dp_flags & DP_HARDBARRIER) { 1843 dev_err(DEV, "ASSERT FAILED would have submitted barrier request\n"); 1844 + /* rw |= REQ_HARDBARRIER; */ 1845 } 1846 if (dp_flags & DP_RW_SYNC) 1847 + rw |= REQ_SYNC | REQ_UNPLUG; 1848 if (dp_flags & DP_MAY_SET_IN_SYNC) 1849 e->flags |= EE_MAY_SET_IN_SYNC; 1850 ··· 3555 return ok; 3556 } 3557 3558 + static int receive_skip_(struct drbd_conf *mdev, struct p_header *h, int silent) 3559 { 3560 /* TODO zero copy sink :) */ 3561 static char sink[128]; 3562 int size, want, r; 3563 3564 + if (!silent) 3565 + dev_warn(DEV, "skipping unknown optional packet type %d, l: %d!\n", 3566 + h->command, h->length); 3567 3568 size = h->length; 3569 while (size > 0) { ··· 3574 return size == 0; 3575 } 3576 3577 + static int receive_skip(struct drbd_conf *mdev, struct p_header *h) 3578 + { 3579 + return receive_skip_(mdev, h, 0); 3580 + } 3581 + 3582 + static int receive_skip_silent(struct drbd_conf *mdev, struct p_header *h) 3583 + { 3584 + return receive_skip_(mdev, h, 1); 3585 + } 3586 + 3587 static int receive_UnplugRemote(struct drbd_conf *mdev, struct p_header *h) 3588 { 3589 if (mdev->state.disk >= D_INCONSISTENT) ··· 3583 * with the data requests being unplugged */ 3584 drbd_tcp_quickack(mdev->data.socket); 3585 3586 return TRUE; 3587 } 3588 ··· 3695 [P_OV_REQUEST] = receive_DataRequest, 3696 [P_OV_REPLY] = receive_DataRequest, 3697 [P_CSUM_RS_REQUEST] = receive_DataRequest, 3698 + [P_DELAY_PROBE] = receive_skip_silent, 3699 /* anything missing from this table is in 3700 * the asender_tbl, see get_asender_cmd */ 3701 [P_MAX_CMD] = NULL, ··· 4472 return TRUE; 4473 } 4474 4475 + static int got_something_to_ignore_m(struct drbd_conf *mdev, struct p_header *h) 4476 { 4477 + /* IGNORE */ 4478 return TRUE; 4479 } 4480 ··· 4504 [P_BARRIER_ACK] = { sizeof(struct p_barrier_ack), got_BarrierAck }, 4505 [P_STATE_CHG_REPLY] = { sizeof(struct p_req_state_reply), got_RqSReply }, 4506 [P_RS_IS_IN_SYNC] = { sizeof(struct p_block_ack), got_IsInSync }, 4507 + [P_DELAY_PROBE] = { sizeof(struct p_delay_probe), got_something_to_ignore_m }, 4508 [P_MAX_CMD] = { 0, NULL }, 4509 }; 4510 if (cmd > P_MAX_CMD || asender_tbl[cmd].process == NULL)
+1 -1
drivers/block/drbd/drbd_req.c
··· 997 * because of those XXX, this is not yet enabled, 998 * i.e. in drbd_init_set_defaults we set the NO_BARRIER_SUPP bit. 999 */ 1000 - if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER) && test_bit(NO_BARRIER_SUPP, &mdev->flags))) { 1001 /* dev_warn(DEV, "Rejecting barrier request as underlying device does not support\n"); */ 1002 bio_endio(bio, -EOPNOTSUPP); 1003 return 0;
··· 997 * because of those XXX, this is not yet enabled, 998 * i.e. in drbd_init_set_defaults we set the NO_BARRIER_SUPP bit. 999 */ 1000 + if (unlikely(bio->bi_rw & REQ_HARDBARRIER) && test_bit(NO_BARRIER_SUPP, &mdev->flags)) { 1001 /* dev_warn(DEV, "Rejecting barrier request as underlying device does not support\n"); */ 1002 bio_endio(bio, -EOPNOTSUPP); 1003 return 0;
+1 -14
drivers/block/drbd/drbd_worker.c
··· 424 drbd_queue_work(&mdev->data.work, &mdev->resync_work); 425 } 426 427 - static int calc_resync_rate(struct drbd_conf *mdev) 428 - { 429 - int d = mdev->data_delay / 1000; /* us -> ms */ 430 - int td = mdev->sync_conf.throttle_th * 100; /* 0.1s -> ms */ 431 - int hd = mdev->sync_conf.hold_off_th * 100; /* 0.1s -> ms */ 432 - int cr = mdev->sync_conf.rate; 433 - 434 - return d <= td ? cr : 435 - d >= hd ? 0 : 436 - cr + (cr * (td - d) / (hd - td)); 437 - } 438 - 439 int w_make_resync_request(struct drbd_conf *mdev, 440 struct drbd_work *w, int cancel) 441 { ··· 461 max_segment_size = mdev->agreed_pro_version < 94 ? 462 queue_max_segment_size(mdev->rq_queue) : DRBD_MAX_SEGMENT_SIZE; 463 464 - mdev->c_sync_rate = calc_resync_rate(mdev); 465 - number = SLEEP_TIME * mdev->c_sync_rate / ((BM_BLOCK_SIZE / 1024) * HZ); 466 pe = atomic_read(&mdev->rs_pending_cnt); 467 468 mutex_lock(&mdev->data.mutex);
··· 424 drbd_queue_work(&mdev->data.work, &mdev->resync_work); 425 } 426 427 int w_make_resync_request(struct drbd_conf *mdev, 428 struct drbd_work *w, int cancel) 429 { ··· 473 max_segment_size = mdev->agreed_pro_version < 94 ? 474 queue_max_segment_size(mdev->rq_queue) : DRBD_MAX_SEGMENT_SIZE; 475 476 + number = SLEEP_TIME * mdev->sync_conf.rate / ((BM_BLOCK_SIZE / 1024) * HZ); 477 pe = atomic_read(&mdev->rs_pending_cnt); 478 479 mutex_lock(&mdev->data.mutex);
+70 -112
drivers/block/floppy.c
··· 178 #include <linux/slab.h> 179 #include <linux/mm.h> 180 #include <linux/bio.h> 181 #include <linux/string.h> 182 #include <linux/jiffies.h> 183 #include <linux/fcntl.h> ··· 515 static DECLARE_WAIT_QUEUE_HEAD(fdc_wait); 516 static DECLARE_WAIT_QUEUE_HEAD(command_done); 517 518 - #define NO_SIGNAL (!interruptible || !signal_pending(current)) 519 - 520 /* Errors during formatting are counted here. */ 521 static int format_errors; 522 ··· 538 539 static int *errors; 540 typedef void (*done_f)(int); 541 - static struct cont_t { 542 void (*interrupt)(void); 543 /* this is called after the interrupt of the 544 * main command */ ··· 577 #define NEED_1_RECAL -2 578 #define NEED_2_RECAL -3 579 580 - static int usage_count; 581 582 /* buffer related variables */ 583 static int buffer_track = -1; ··· 857 } 858 859 /* locks the driver */ 860 - static int _lock_fdc(int drive, bool interruptible, int line) 861 { 862 - if (!usage_count) { 863 - pr_err("Trying to lock fdc while usage count=0 at line %d\n", 864 - line); 865 return -1; 866 - } 867 868 - if (test_and_set_bit(0, &fdc_busy)) { 869 - DECLARE_WAITQUEUE(wait, current); 870 - add_wait_queue(&fdc_wait, &wait); 871 872 - for (;;) { 873 - set_current_state(TASK_INTERRUPTIBLE); 874 - 875 - if (!test_and_set_bit(0, &fdc_busy)) 876 - break; 877 - 878 - schedule(); 879 - 880 - if (!NO_SIGNAL) { 881 - remove_wait_queue(&fdc_wait, &wait); 882 - return -EINTR; 883 - } 884 - } 885 - 886 - set_current_state(TASK_RUNNING); 887 - remove_wait_queue(&fdc_wait, &wait); 888 - flush_scheduled_work(); 889 - } 890 command_status = FD_COMMAND_NONE; 891 892 __reschedule_timeout(drive, "lock fdc"); ··· 873 return 0; 874 } 875 876 - #define lock_fdc(drive, interruptible) \ 877 - _lock_fdc(drive, interruptible, __LINE__) 878 - 879 /* unlocks the driver */ 880 - static inline void unlock_fdc(void) 881 { 882 unsigned long flags; 883 ··· 1199 /* Set perpendicular mode as required, based on data rate, if supported. 1200 * 82077 Now tested. 1Mbps data rate only possible with 82077-1. 1201 */ 1202 - static inline void perpendicular_mode(void) 1203 { 1204 unsigned char perp_mode; 1205 ··· 1970 wake_up(&command_done); 1971 } 1972 1973 - static struct cont_t wakeup_cont = { 1974 .interrupt = empty, 1975 .redo = do_wakeup, 1976 .error = empty, 1977 .done = (done_f)empty 1978 }; 1979 1980 - static struct cont_t intr_cont = { 1981 .interrupt = empty, 1982 .redo = process_fd_request, 1983 .error = empty, ··· 1990 1991 schedule_bh(handler); 1992 1993 - if (command_status < 2 && NO_SIGNAL) { 1994 - DECLARE_WAITQUEUE(wait, current); 1995 - 1996 - add_wait_queue(&command_done, &wait); 1997 - for (;;) { 1998 - set_current_state(interruptible ? 1999 - TASK_INTERRUPTIBLE : 2000 - TASK_UNINTERRUPTIBLE); 2001 - 2002 - if (command_status >= 2 || !NO_SIGNAL) 2003 - break; 2004 - 2005 - is_alive(__func__, ""); 2006 - schedule(); 2007 - } 2008 - 2009 - set_current_state(TASK_RUNNING); 2010 - remove_wait_queue(&command_done, &wait); 2011 - } 2012 2013 if (command_status < 2) { 2014 cancel_activity(); ··· 2183 debugt(__func__, "queue format request"); 2184 } 2185 2186 - static struct cont_t format_cont = { 2187 .interrupt = format_interrupt, 2188 .redo = redo_format, 2189 .error = bad_flp_intr, ··· 2543 int tracksize; 2544 int ssize; 2545 2546 - if (max_buffer_sectors == 0) { 2547 - pr_info("VFS: Block I/O scheduled on unopened device\n"); 2548 return 0; 2549 - } 2550 2551 set_fdc((long)current_req->rq_disk->private_data); 2552 ··· 2879 return; 2880 } 2881 2882 - static struct cont_t rw_cont = { 2883 .interrupt = rw_interrupt, 2884 .redo = redo_fd_request, 2885 .error = bad_flp_intr, ··· 2894 2895 static void do_fd_request(struct request_queue *q) 2896 { 2897 - if (max_buffer_sectors == 0) { 2898 - pr_info("VFS: %s called on non-open device\n", __func__); 2899 return; 2900 - } 2901 2902 - if (usage_count == 0) { 2903 - pr_info("warning: usage count=0, current_req=%p exiting\n", 2904 - current_req); 2905 - pr_info("sect=%ld type=%x flags=%x\n", 2906 - (long)blk_rq_pos(current_req), current_req->cmd_type, 2907 - current_req->cmd_flags); 2908 return; 2909 - } 2910 if (test_bit(0, &fdc_busy)) { 2911 /* fdc busy, this new request will be treated when the 2912 current one is done */ ··· 2915 is_alive(__func__, ""); 2916 } 2917 2918 - static struct cont_t poll_cont = { 2919 .interrupt = success_and_wakeup, 2920 .redo = floppy_ready, 2921 .error = generic_failure, ··· 2946 pr_info("weird, reset interrupt called\n"); 2947 } 2948 2949 - static struct cont_t reset_cont = { 2950 .interrupt = reset_intr, 2951 .redo = success_and_wakeup, 2952 .error = generic_failure, ··· 2988 return copy_from_user(address, param, size) ? -EFAULT : 0; 2989 } 2990 2991 - static inline const char *drive_name(int type, int drive) 2992 { 2993 struct floppy_struct *floppy; 2994 ··· 3051 generic_done(flag); 3052 } 3053 3054 - static struct cont_t raw_cmd_cont = { 3055 .interrupt = success_and_wakeup, 3056 .redo = floppy_start, 3057 .error = generic_failure, 3058 .done = raw_cmd_done 3059 }; 3060 3061 - static inline int raw_cmd_copyout(int cmd, void __user *param, 3062 struct floppy_raw_cmd *ptr) 3063 { 3064 int ret; ··· 3103 } 3104 } 3105 3106 - static inline int raw_cmd_copyin(int cmd, void __user *param, 3107 struct floppy_raw_cmd **rcmd) 3108 { 3109 struct floppy_raw_cmd *ptr; ··· 3221 return 0; 3222 } 3223 3224 - static inline int set_geometry(unsigned int cmd, struct floppy_struct *g, 3225 int drive, int type, struct block_device *bdev) 3226 { 3227 int cnt; ··· 3292 } 3293 3294 /* handle obsolete ioctl's */ 3295 - static int ioctl_table[] = { 3296 FDCLRPRM, 3297 FDSETPRM, 3298 FDDEFPRM, ··· 3320 FDTWADDLE 3321 }; 3322 3323 - static inline int normalize_ioctl(int *cmd, int *size) 3324 { 3325 int i; 3326 ··· 3372 return 0; 3373 } 3374 3375 - static int fd_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, 3376 unsigned long param) 3377 { 3378 int drive = (long)bdev->bd_disk->private_data; ··· 3548 return 0; 3549 } 3550 3551 static void __init config_types(void) 3552 { 3553 bool has_drive = false; ··· 3616 { 3617 int drive = (long)disk->private_data; 3618 3619 mutex_lock(&open_lock); 3620 if (UDRS->fd_ref < 0) 3621 UDRS->fd_ref = 0; ··· 3627 if (!UDRS->fd_ref) 3628 opened_bdev[drive] = NULL; 3629 mutex_unlock(&open_lock); 3630 3631 return 0; 3632 } ··· 3645 int res = -EBUSY; 3646 char *tmp; 3647 3648 mutex_lock(&open_lock); 3649 old_dev = UDRS->fd_device; 3650 if (opened_bdev[drive] && opened_bdev[drive] != bdev) ··· 3722 goto out; 3723 } 3724 mutex_unlock(&open_lock); 3725 return 0; 3726 out: 3727 if (UDRS->fd_ref < 0) ··· 3733 opened_bdev[drive] = NULL; 3734 out2: 3735 mutex_unlock(&open_lock); 3736 return res; 3737 } 3738 ··· 3801 bio.bi_size = size; 3802 bio.bi_bdev = bdev; 3803 bio.bi_sector = 0; 3804 init_completion(&complete); 3805 bio.bi_private = &complete; 3806 bio.bi_end_io = floppy_rb0_complete; ··· 3830 if (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags) || 3831 test_bit(FD_VERIFY_BIT, &UDRS->flags) || 3832 test_bit(drive, &fake_change) || NO_GEOM) { 3833 - if (usage_count == 0) { 3834 - pr_info("VFS: revalidate called on non-open device.\n"); 3835 return -EFAULT; 3836 - } 3837 lock_fdc(drive, false); 3838 cf = (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags) || 3839 test_bit(FD_VERIFY_BIT, &UDRS->flags)); ··· 3866 .owner = THIS_MODULE, 3867 .open = floppy_open, 3868 .release = floppy_release, 3869 - .locked_ioctl = fd_ioctl, 3870 .getgeo = fd_getgeo, 3871 .media_changed = check_floppy_change, 3872 .revalidate_disk = floppy_revalidate, ··· 4099 return sprintf(buf, "%X\n", UDP->cmos); 4100 } 4101 4102 - DEVICE_ATTR(cmos, S_IRUGO, floppy_cmos_show, NULL); 4103 4104 static void floppy_device_release(struct device *dev) 4105 { ··· 4147 { 4148 int i, unit, drive; 4149 int err, dr; 4150 4151 #if defined(CONFIG_PPC) 4152 if (check_legacy_ioport(FDC1)) ··· 4329 platform_device_unregister(&floppy_device[drive]); 4330 out_flush_work: 4331 flush_scheduled_work(); 4332 - if (usage_count) 4333 floppy_release_irq_and_dma(); 4334 out_unreg_region: 4335 blk_unregister_region(MKDEV(FLOPPY_MAJOR, 0), 256); ··· 4345 } 4346 return err; 4347 } 4348 - 4349 - static DEFINE_SPINLOCK(floppy_usage_lock); 4350 4351 static const struct io_region { 4352 int offset; ··· 4391 4392 static int floppy_grab_irq_and_dma(void) 4393 { 4394 - unsigned long flags; 4395 - 4396 - spin_lock_irqsave(&floppy_usage_lock, flags); 4397 - if (usage_count++) { 4398 - spin_unlock_irqrestore(&floppy_usage_lock, flags); 4399 return 0; 4400 - } 4401 - spin_unlock_irqrestore(&floppy_usage_lock, flags); 4402 4403 /* 4404 * We might have scheduled a free_irq(), wait it to ··· 4403 if (fd_request_irq()) { 4404 DPRINT("Unable to grab IRQ%d for the floppy driver\n", 4405 FLOPPY_IRQ); 4406 - spin_lock_irqsave(&floppy_usage_lock, flags); 4407 - usage_count--; 4408 - spin_unlock_irqrestore(&floppy_usage_lock, flags); 4409 return -1; 4410 } 4411 if (fd_request_dma()) { ··· 4413 use_virtual_dma = can_use_virtual_dma = 1; 4414 if (!(can_use_virtual_dma & 1)) { 4415 fd_free_irq(); 4416 - spin_lock_irqsave(&floppy_usage_lock, flags); 4417 - usage_count--; 4418 - spin_unlock_irqrestore(&floppy_usage_lock, flags); 4419 return -1; 4420 } 4421 } ··· 4448 fd_free_dma(); 4449 while (--fdc >= 0) 4450 floppy_release_regions(fdc); 4451 - spin_lock_irqsave(&floppy_usage_lock, flags); 4452 - usage_count--; 4453 - spin_unlock_irqrestore(&floppy_usage_lock, flags); 4454 return -1; 4455 } 4456 ··· 4460 #endif 4461 long tmpsize; 4462 unsigned long tmpaddr; 4463 - unsigned long flags; 4464 4465 - spin_lock_irqsave(&floppy_usage_lock, flags); 4466 - if (--usage_count) { 4467 - spin_unlock_irqrestore(&floppy_usage_lock, flags); 4468 return; 4469 - } 4470 - spin_unlock_irqrestore(&floppy_usage_lock, flags); 4471 if (irqdma_allocated) { 4472 fd_disable_dma(); 4473 fd_free_dma(); ··· 4556 del_timer_sync(&fd_timer); 4557 blk_cleanup_queue(floppy_queue); 4558 4559 - if (usage_count) 4560 floppy_release_irq_and_dma(); 4561 4562 /* eject disk, if any */
··· 178 #include <linux/slab.h> 179 #include <linux/mm.h> 180 #include <linux/bio.h> 181 + #include <linux/smp_lock.h> 182 #include <linux/string.h> 183 #include <linux/jiffies.h> 184 #include <linux/fcntl.h> ··· 514 static DECLARE_WAIT_QUEUE_HEAD(fdc_wait); 515 static DECLARE_WAIT_QUEUE_HEAD(command_done); 516 517 /* Errors during formatting are counted here. */ 518 static int format_errors; 519 ··· 539 540 static int *errors; 541 typedef void (*done_f)(int); 542 + static const struct cont_t { 543 void (*interrupt)(void); 544 /* this is called after the interrupt of the 545 * main command */ ··· 578 #define NEED_1_RECAL -2 579 #define NEED_2_RECAL -3 580 581 + static atomic_t usage_count = ATOMIC_INIT(0); 582 583 /* buffer related variables */ 584 static int buffer_track = -1; ··· 858 } 859 860 /* locks the driver */ 861 + static int lock_fdc(int drive, bool interruptible) 862 { 863 + if (WARN(atomic_read(&usage_count) == 0, 864 + "Trying to lock fdc while usage count=0\n")) 865 return -1; 866 867 + if (wait_event_interruptible(fdc_wait, !test_and_set_bit(0, &fdc_busy))) 868 + return -EINTR; 869 870 command_status = FD_COMMAND_NONE; 871 872 __reschedule_timeout(drive, "lock fdc"); ··· 895 return 0; 896 } 897 898 /* unlocks the driver */ 899 + static void unlock_fdc(void) 900 { 901 unsigned long flags; 902 ··· 1224 /* Set perpendicular mode as required, based on data rate, if supported. 1225 * 82077 Now tested. 1Mbps data rate only possible with 82077-1. 1226 */ 1227 + static void perpendicular_mode(void) 1228 { 1229 unsigned char perp_mode; 1230 ··· 1995 wake_up(&command_done); 1996 } 1997 1998 + static const struct cont_t wakeup_cont = { 1999 .interrupt = empty, 2000 .redo = do_wakeup, 2001 .error = empty, 2002 .done = (done_f)empty 2003 }; 2004 2005 + static const struct cont_t intr_cont = { 2006 .interrupt = empty, 2007 .redo = process_fd_request, 2008 .error = empty, ··· 2015 2016 schedule_bh(handler); 2017 2018 + if (interruptible) 2019 + wait_event_interruptible(command_done, command_status >= 2); 2020 + else 2021 + wait_event(command_done, command_status >= 2); 2022 2023 if (command_status < 2) { 2024 cancel_activity(); ··· 2223 debugt(__func__, "queue format request"); 2224 } 2225 2226 + static const struct cont_t format_cont = { 2227 .interrupt = format_interrupt, 2228 .redo = redo_format, 2229 .error = bad_flp_intr, ··· 2583 int tracksize; 2584 int ssize; 2585 2586 + if (WARN(max_buffer_sectors == 0, "VFS: Block I/O scheduled on unopened device\n")) 2587 return 0; 2588 2589 set_fdc((long)current_req->rq_disk->private_data); 2590 ··· 2921 return; 2922 } 2923 2924 + static const struct cont_t rw_cont = { 2925 .interrupt = rw_interrupt, 2926 .redo = redo_fd_request, 2927 .error = bad_flp_intr, ··· 2936 2937 static void do_fd_request(struct request_queue *q) 2938 { 2939 + if (WARN(max_buffer_sectors == 0, 2940 + "VFS: %s called on non-open device\n", __func__)) 2941 return; 2942 2943 + if (WARN(atomic_read(&usage_count) == 0, 2944 + "warning: usage count=0, current_req=%p sect=%ld type=%x flags=%x\n", 2945 + current_req, (long)blk_rq_pos(current_req), current_req->cmd_type, 2946 + current_req->cmd_flags)) 2947 return; 2948 + 2949 if (test_bit(0, &fdc_busy)) { 2950 /* fdc busy, this new request will be treated when the 2951 current one is done */ ··· 2960 is_alive(__func__, ""); 2961 } 2962 2963 + static const struct cont_t poll_cont = { 2964 .interrupt = success_and_wakeup, 2965 .redo = floppy_ready, 2966 .error = generic_failure, ··· 2991 pr_info("weird, reset interrupt called\n"); 2992 } 2993 2994 + static const struct cont_t reset_cont = { 2995 .interrupt = reset_intr, 2996 .redo = success_and_wakeup, 2997 .error = generic_failure, ··· 3033 return copy_from_user(address, param, size) ? -EFAULT : 0; 3034 } 3035 3036 + static const char *drive_name(int type, int drive) 3037 { 3038 struct floppy_struct *floppy; 3039 ··· 3096 generic_done(flag); 3097 } 3098 3099 + static const struct cont_t raw_cmd_cont = { 3100 .interrupt = success_and_wakeup, 3101 .redo = floppy_start, 3102 .error = generic_failure, 3103 .done = raw_cmd_done 3104 }; 3105 3106 + static int raw_cmd_copyout(int cmd, void __user *param, 3107 struct floppy_raw_cmd *ptr) 3108 { 3109 int ret; ··· 3148 } 3149 } 3150 3151 + static int raw_cmd_copyin(int cmd, void __user *param, 3152 struct floppy_raw_cmd **rcmd) 3153 { 3154 struct floppy_raw_cmd *ptr; ··· 3266 return 0; 3267 } 3268 3269 + static int set_geometry(unsigned int cmd, struct floppy_struct *g, 3270 int drive, int type, struct block_device *bdev) 3271 { 3272 int cnt; ··· 3337 } 3338 3339 /* handle obsolete ioctl's */ 3340 + static unsigned int ioctl_table[] = { 3341 FDCLRPRM, 3342 FDSETPRM, 3343 FDDEFPRM, ··· 3365 FDTWADDLE 3366 }; 3367 3368 + static int normalize_ioctl(unsigned int *cmd, int *size) 3369 { 3370 int i; 3371 ··· 3417 return 0; 3418 } 3419 3420 + static int fd_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, 3421 unsigned long param) 3422 { 3423 int drive = (long)bdev->bd_disk->private_data; ··· 3593 return 0; 3594 } 3595 3596 + static int fd_ioctl(struct block_device *bdev, fmode_t mode, 3597 + unsigned int cmd, unsigned long param) 3598 + { 3599 + int ret; 3600 + 3601 + lock_kernel(); 3602 + ret = fd_locked_ioctl(bdev, mode, cmd, param); 3603 + unlock_kernel(); 3604 + 3605 + return ret; 3606 + } 3607 + 3608 static void __init config_types(void) 3609 { 3610 bool has_drive = false; ··· 3649 { 3650 int drive = (long)disk->private_data; 3651 3652 + lock_kernel(); 3653 mutex_lock(&open_lock); 3654 if (UDRS->fd_ref < 0) 3655 UDRS->fd_ref = 0; ··· 3659 if (!UDRS->fd_ref) 3660 opened_bdev[drive] = NULL; 3661 mutex_unlock(&open_lock); 3662 + unlock_kernel(); 3663 3664 return 0; 3665 } ··· 3676 int res = -EBUSY; 3677 char *tmp; 3678 3679 + lock_kernel(); 3680 mutex_lock(&open_lock); 3681 old_dev = UDRS->fd_device; 3682 if (opened_bdev[drive] && opened_bdev[drive] != bdev) ··· 3752 goto out; 3753 } 3754 mutex_unlock(&open_lock); 3755 + unlock_kernel(); 3756 return 0; 3757 out: 3758 if (UDRS->fd_ref < 0) ··· 3762 opened_bdev[drive] = NULL; 3763 out2: 3764 mutex_unlock(&open_lock); 3765 + unlock_kernel(); 3766 return res; 3767 } 3768 ··· 3829 bio.bi_size = size; 3830 bio.bi_bdev = bdev; 3831 bio.bi_sector = 0; 3832 + bio.bi_flags = BIO_QUIET; 3833 init_completion(&complete); 3834 bio.bi_private = &complete; 3835 bio.bi_end_io = floppy_rb0_complete; ··· 3857 if (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags) || 3858 test_bit(FD_VERIFY_BIT, &UDRS->flags) || 3859 test_bit(drive, &fake_change) || NO_GEOM) { 3860 + if (WARN(atomic_read(&usage_count) == 0, 3861 + "VFS: revalidate called on non-open device.\n")) 3862 return -EFAULT; 3863 + 3864 lock_fdc(drive, false); 3865 cf = (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags) || 3866 test_bit(FD_VERIFY_BIT, &UDRS->flags)); ··· 3893 .owner = THIS_MODULE, 3894 .open = floppy_open, 3895 .release = floppy_release, 3896 + .ioctl = fd_ioctl, 3897 .getgeo = fd_getgeo, 3898 .media_changed = check_floppy_change, 3899 .revalidate_disk = floppy_revalidate, ··· 4126 return sprintf(buf, "%X\n", UDP->cmos); 4127 } 4128 4129 + static DEVICE_ATTR(cmos, S_IRUGO, floppy_cmos_show, NULL); 4130 4131 static void floppy_device_release(struct device *dev) 4132 { ··· 4174 { 4175 int i, unit, drive; 4176 int err, dr; 4177 + 4178 + set_debugt(); 4179 + interruptjiffies = resultjiffies = jiffies; 4180 4181 #if defined(CONFIG_PPC) 4182 if (check_legacy_ioport(FDC1)) ··· 4353 platform_device_unregister(&floppy_device[drive]); 4354 out_flush_work: 4355 flush_scheduled_work(); 4356 + if (atomic_read(&usage_count)) 4357 floppy_release_irq_and_dma(); 4358 out_unreg_region: 4359 blk_unregister_region(MKDEV(FLOPPY_MAJOR, 0), 256); ··· 4369 } 4370 return err; 4371 } 4372 4373 static const struct io_region { 4374 int offset; ··· 4417 4418 static int floppy_grab_irq_and_dma(void) 4419 { 4420 + if (atomic_inc_return(&usage_count) > 1) 4421 return 0; 4422 4423 /* 4424 * We might have scheduled a free_irq(), wait it to ··· 4435 if (fd_request_irq()) { 4436 DPRINT("Unable to grab IRQ%d for the floppy driver\n", 4437 FLOPPY_IRQ); 4438 + atomic_dec(&usage_count); 4439 return -1; 4440 } 4441 if (fd_request_dma()) { ··· 4447 use_virtual_dma = can_use_virtual_dma = 1; 4448 if (!(can_use_virtual_dma & 1)) { 4449 fd_free_irq(); 4450 + atomic_dec(&usage_count); 4451 return -1; 4452 } 4453 } ··· 4484 fd_free_dma(); 4485 while (--fdc >= 0) 4486 floppy_release_regions(fdc); 4487 + atomic_dec(&usage_count); 4488 return -1; 4489 } 4490 ··· 4498 #endif 4499 long tmpsize; 4500 unsigned long tmpaddr; 4501 4502 + if (!atomic_dec_and_test(&usage_count)) 4503 return; 4504 + 4505 if (irqdma_allocated) { 4506 fd_disable_dma(); 4507 fd_free_dma(); ··· 4598 del_timer_sync(&fd_timer); 4599 blk_cleanup_queue(floppy_queue); 4600 4601 + if (atomic_read(&usage_count)) 4602 floppy_release_irq_and_dma(); 4603 4604 /* eject disk, if any */
+1 -1
drivers/block/hd.c
··· 627 req_data_dir(req) == READ ? "read" : "writ", 628 cyl, head, sec, nsect, req->buffer); 629 #endif 630 - if (blk_fs_request(req)) { 631 switch (rq_data_dir(req)) { 632 case READ: 633 hd_out(disk, nsect, sec, head, cyl, ATA_CMD_PIO_READ,
··· 627 req_data_dir(req) == READ ? "read" : "writ", 628 cyl, head, sec, nsect, req->buffer); 629 #endif 630 + if (req->cmd_type == REQ_TYPE_FS) { 631 switch (rq_data_dir(req)) { 632 case READ: 633 hd_out(disk, nsect, sec, head, cyl, ATA_CMD_PIO_READ,
+7 -2
drivers/block/loop.c
··· 67 #include <linux/compat.h> 68 #include <linux/suspend.h> 69 #include <linux/freezer.h> 70 #include <linux/writeback.h> 71 #include <linux/buffer_head.h> /* for invalidate_bdev() */ 72 #include <linux/completion.h> ··· 477 pos = ((loff_t) bio->bi_sector << 9) + lo->lo_offset; 478 479 if (bio_rw(bio) == WRITE) { 480 - bool barrier = bio_rw_flagged(bio, BIO_RW_BARRIER); 481 struct file *file = lo->lo_backing_file; 482 483 if (barrier) { ··· 832 lo->lo_queue->unplug_fn = loop_unplug; 833 834 if (!(lo_flags & LO_FLAGS_READ_ONLY) && file->f_op->fsync) 835 - blk_queue_ordered(lo->lo_queue, QUEUE_ORDERED_DRAIN, NULL); 836 837 set_capacity(lo->lo_disk, size); 838 bd_set_size(bdev, size << 9); ··· 1409 { 1410 struct loop_device *lo = bdev->bd_disk->private_data; 1411 1412 mutex_lock(&lo->lo_ctl_mutex); 1413 lo->lo_refcnt++; 1414 mutex_unlock(&lo->lo_ctl_mutex); 1415 1416 return 0; 1417 } ··· 1423 struct loop_device *lo = disk->private_data; 1424 int err; 1425 1426 mutex_lock(&lo->lo_ctl_mutex); 1427 1428 if (--lo->lo_refcnt) ··· 1448 out: 1449 mutex_unlock(&lo->lo_ctl_mutex); 1450 out_unlocked: 1451 return 0; 1452 } 1453
··· 67 #include <linux/compat.h> 68 #include <linux/suspend.h> 69 #include <linux/freezer.h> 70 + #include <linux/smp_lock.h> 71 #include <linux/writeback.h> 72 #include <linux/buffer_head.h> /* for invalidate_bdev() */ 73 #include <linux/completion.h> ··· 476 pos = ((loff_t) bio->bi_sector << 9) + lo->lo_offset; 477 478 if (bio_rw(bio) == WRITE) { 479 + bool barrier = (bio->bi_rw & REQ_HARDBARRIER); 480 struct file *file = lo->lo_backing_file; 481 482 if (barrier) { ··· 831 lo->lo_queue->unplug_fn = loop_unplug; 832 833 if (!(lo_flags & LO_FLAGS_READ_ONLY) && file->f_op->fsync) 834 + blk_queue_ordered(lo->lo_queue, QUEUE_ORDERED_DRAIN); 835 836 set_capacity(lo->lo_disk, size); 837 bd_set_size(bdev, size << 9); ··· 1408 { 1409 struct loop_device *lo = bdev->bd_disk->private_data; 1410 1411 + lock_kernel(); 1412 mutex_lock(&lo->lo_ctl_mutex); 1413 lo->lo_refcnt++; 1414 mutex_unlock(&lo->lo_ctl_mutex); 1415 + unlock_kernel(); 1416 1417 return 0; 1418 } ··· 1420 struct loop_device *lo = disk->private_data; 1421 int err; 1422 1423 + lock_kernel(); 1424 mutex_lock(&lo->lo_ctl_mutex); 1425 1426 if (--lo->lo_refcnt) ··· 1444 out: 1445 mutex_unlock(&lo->lo_ctl_mutex); 1446 out_unlocked: 1447 + lock_kernel(); 1448 return 0; 1449 } 1450
+2 -2
drivers/block/mg_disk.c
··· 670 break; 671 } 672 673 - if (unlikely(!blk_fs_request(host->req))) { 674 mg_end_request_cur(host, -EIO); 675 continue; 676 } ··· 756 continue; 757 } 758 759 - if (unlikely(!blk_fs_request(req))) { 760 mg_end_request_cur(host, -EIO); 761 continue; 762 }
··· 670 break; 671 } 672 673 + if (unlikely(host->req->cmd_type != REQ_TYPE_FS)) { 674 mg_end_request_cur(host, -EIO); 675 continue; 676 } ··· 756 continue; 757 } 758 759 + if (unlikely(req->cmd_type != REQ_TYPE_FS)) { 760 mg_end_request_cur(host, -EIO); 761 continue; 762 }
+5 -2
drivers/block/nbd.c
··· 24 #include <linux/errno.h> 25 #include <linux/file.h> 26 #include <linux/ioctl.h> 27 #include <linux/compiler.h> 28 #include <linux/err.h> 29 #include <linux/kernel.h> ··· 449 450 static void nbd_handle_req(struct nbd_device *lo, struct request *req) 451 { 452 - if (!blk_fs_request(req)) 453 goto error_out; 454 455 nbd_cmd(req) = NBD_CMD_READ; ··· 717 dprintk(DBG_IOCTL, "%s: nbd_ioctl cmd=%s(0x%x) arg=%lu\n", 718 lo->disk->disk_name, ioctl_cmd_to_ascii(cmd), cmd, arg); 719 720 mutex_lock(&lo->tx_lock); 721 error = __nbd_ioctl(bdev, lo, cmd, arg); 722 mutex_unlock(&lo->tx_lock); 723 724 return error; 725 } ··· 729 static const struct block_device_operations nbd_fops = 730 { 731 .owner = THIS_MODULE, 732 - .locked_ioctl = nbd_ioctl, 733 }; 734 735 /*
··· 24 #include <linux/errno.h> 25 #include <linux/file.h> 26 #include <linux/ioctl.h> 27 + #include <linux/smp_lock.h> 28 #include <linux/compiler.h> 29 #include <linux/err.h> 30 #include <linux/kernel.h> ··· 448 449 static void nbd_handle_req(struct nbd_device *lo, struct request *req) 450 { 451 + if (req->cmd_type != REQ_TYPE_FS) 452 goto error_out; 453 454 nbd_cmd(req) = NBD_CMD_READ; ··· 716 dprintk(DBG_IOCTL, "%s: nbd_ioctl cmd=%s(0x%x) arg=%lu\n", 717 lo->disk->disk_name, ioctl_cmd_to_ascii(cmd), cmd, arg); 718 719 + lock_kernel(); 720 mutex_lock(&lo->tx_lock); 721 error = __nbd_ioctl(bdev, lo, cmd, arg); 722 mutex_unlock(&lo->tx_lock); 723 + unlock_kernel(); 724 725 return error; 726 } ··· 726 static const struct block_device_operations nbd_fops = 727 { 728 .owner = THIS_MODULE, 729 + .ioctl = nbd_ioctl, 730 }; 731 732 /*
+4 -11
drivers/block/osdblk.c
··· 310 break; 311 312 /* filter out block requests we don't understand */ 313 - if (!blk_fs_request(rq) && !blk_barrier_rq(rq)) { 314 blk_end_request_all(rq, 0); 315 continue; 316 } ··· 323 * driver-specific, etc. 324 */ 325 326 - do_flush = (rq->special == (void *) 0xdeadbeefUL); 327 do_write = (rq_data_dir(rq) == WRITE); 328 329 if (!do_flush) { /* osd_flush does not use a bio */ ··· 378 */ 379 rq->special = NULL; 380 } 381 - } 382 - 383 - static void osdblk_prepare_flush(struct request_queue *q, struct request *rq) 384 - { 385 - /* add driver-specific marker, to indicate that this request 386 - * is a flush command 387 - */ 388 - rq->special = (void *) 0xdeadbeefUL; 389 } 390 391 static void osdblk_free_disk(struct osdblk_device *osdev) ··· 439 blk_queue_stack_limits(q, osd_request_queue(osdev->osd)); 440 441 blk_queue_prep_rq(q, blk_queue_start_tag); 442 - blk_queue_ordered(q, QUEUE_ORDERED_DRAIN_FLUSH, osdblk_prepare_flush); 443 444 disk->queue = q; 445
··· 310 break; 311 312 /* filter out block requests we don't understand */ 313 + if (rq->cmd_type != REQ_TYPE_FS && 314 + !(rq->cmd_flags & REQ_HARDBARRIER)) { 315 blk_end_request_all(rq, 0); 316 continue; 317 } ··· 322 * driver-specific, etc. 323 */ 324 325 + do_flush = rq->cmd_flags & REQ_FLUSH; 326 do_write = (rq_data_dir(rq) == WRITE); 327 328 if (!do_flush) { /* osd_flush does not use a bio */ ··· 377 */ 378 rq->special = NULL; 379 } 380 } 381 382 static void osdblk_free_disk(struct osdblk_device *osdev) ··· 446 blk_queue_stack_limits(q, osd_request_queue(osdev->osd)); 447 448 blk_queue_prep_rq(q, blk_queue_start_tag); 449 + blk_queue_ordered(q, QUEUE_ORDERED_DRAIN_FLUSH); 450 451 disk->queue = q; 452
+18 -3
drivers/block/paride/pcd.c
··· 138 #include <linux/cdrom.h> 139 #include <linux/spinlock.h> 140 #include <linux/blkdev.h> 141 #include <asm/uaccess.h> 142 143 static DEFINE_SPINLOCK(pcd_lock); ··· 225 static int pcd_block_open(struct block_device *bdev, fmode_t mode) 226 { 227 struct pcd_unit *cd = bdev->bd_disk->private_data; 228 - return cdrom_open(&cd->info, bdev, mode); 229 } 230 231 static int pcd_block_release(struct gendisk *disk, fmode_t mode) 232 { 233 struct pcd_unit *cd = disk->private_data; 234 cdrom_release(&cd->info, mode); 235 return 0; 236 } 237 ··· 247 unsigned cmd, unsigned long arg) 248 { 249 struct pcd_unit *cd = bdev->bd_disk->private_data; 250 - return cdrom_ioctl(&cd->info, bdev, mode, cmd, arg); 251 } 252 253 static int pcd_block_media_changed(struct gendisk *disk) ··· 266 .owner = THIS_MODULE, 267 .open = pcd_block_open, 268 .release = pcd_block_release, 269 - .locked_ioctl = pcd_block_ioctl, 270 .media_changed = pcd_block_media_changed, 271 }; 272
··· 138 #include <linux/cdrom.h> 139 #include <linux/spinlock.h> 140 #include <linux/blkdev.h> 141 + #include <linux/smp_lock.h> 142 #include <asm/uaccess.h> 143 144 static DEFINE_SPINLOCK(pcd_lock); ··· 224 static int pcd_block_open(struct block_device *bdev, fmode_t mode) 225 { 226 struct pcd_unit *cd = bdev->bd_disk->private_data; 227 + int ret; 228 + 229 + lock_kernel(); 230 + ret = cdrom_open(&cd->info, bdev, mode); 231 + unlock_kernel(); 232 + 233 + return ret; 234 } 235 236 static int pcd_block_release(struct gendisk *disk, fmode_t mode) 237 { 238 struct pcd_unit *cd = disk->private_data; 239 + lock_kernel(); 240 cdrom_release(&cd->info, mode); 241 + unlock_kernel(); 242 return 0; 243 } 244 ··· 238 unsigned cmd, unsigned long arg) 239 { 240 struct pcd_unit *cd = bdev->bd_disk->private_data; 241 + int ret; 242 + 243 + lock_kernel(); 244 + ret = cdrom_ioctl(&cd->info, bdev, mode, cmd, arg); 245 + unlock_kernel(); 246 + 247 + return ret; 248 } 249 250 static int pcd_block_media_changed(struct gendisk *disk) ··· 251 .owner = THIS_MODULE, 252 .open = pcd_block_open, 253 .release = pcd_block_release, 254 + .ioctl = pcd_block_ioctl, 255 .media_changed = pcd_block_media_changed, 256 }; 257
+9 -2
drivers/block/paride/pd.c
··· 153 #include <linux/blkdev.h> 154 #include <linux/blkpg.h> 155 #include <linux/kernel.h> 156 #include <asm/uaccess.h> 157 #include <linux/workqueue.h> 158 ··· 440 441 static enum action do_pd_io_start(void) 442 { 443 - if (blk_special_request(pd_req)) { 444 phase = pd_special; 445 return pd_special(); 446 } ··· 736 { 737 struct pd_unit *disk = bdev->bd_disk->private_data; 738 739 disk->access++; 740 741 if (disk->removable) { 742 pd_special_command(disk, pd_media_check); 743 pd_special_command(disk, pd_door_lock); 744 } 745 return 0; 746 } 747 ··· 771 772 switch (cmd) { 773 case CDROMEJECT: 774 if (disk->access == 1) 775 pd_special_command(disk, pd_eject); 776 return 0; 777 default: 778 return -EINVAL; ··· 785 { 786 struct pd_unit *disk = p->private_data; 787 788 if (!--disk->access && disk->removable) 789 pd_special_command(disk, pd_door_unlock); 790 791 return 0; 792 } ··· 819 .owner = THIS_MODULE, 820 .open = pd_open, 821 .release = pd_release, 822 - .locked_ioctl = pd_ioctl, 823 .getgeo = pd_getgeo, 824 .media_changed = pd_check_media, 825 .revalidate_disk= pd_revalidate
··· 153 #include <linux/blkdev.h> 154 #include <linux/blkpg.h> 155 #include <linux/kernel.h> 156 + #include <linux/smp_lock.h> 157 #include <asm/uaccess.h> 158 #include <linux/workqueue.h> 159 ··· 439 440 static enum action do_pd_io_start(void) 441 { 442 + if (pd_req->cmd_type == REQ_TYPE_SPECIAL) { 443 phase = pd_special; 444 return pd_special(); 445 } ··· 735 { 736 struct pd_unit *disk = bdev->bd_disk->private_data; 737 738 + lock_kernel(); 739 disk->access++; 740 741 if (disk->removable) { 742 pd_special_command(disk, pd_media_check); 743 pd_special_command(disk, pd_door_lock); 744 } 745 + unlock_kernel(); 746 return 0; 747 } 748 ··· 768 769 switch (cmd) { 770 case CDROMEJECT: 771 + lock_kernel(); 772 if (disk->access == 1) 773 pd_special_command(disk, pd_eject); 774 + unlock_kernel(); 775 return 0; 776 default: 777 return -EINVAL; ··· 780 { 781 struct pd_unit *disk = p->private_data; 782 783 + lock_kernel(); 784 if (!--disk->access && disk->removable) 785 pd_special_command(disk, pd_door_unlock); 786 + unlock_kernel(); 787 788 return 0; 789 } ··· 812 .owner = THIS_MODULE, 813 .open = pd_open, 814 .release = pd_release, 815 + .ioctl = pd_ioctl, 816 .getgeo = pd_getgeo, 817 .media_changed = pd_check_media, 818 .revalidate_disk= pd_revalidate
+20 -6
drivers/block/paride/pf.c
··· 152 #include <linux/spinlock.h> 153 #include <linux/blkdev.h> 154 #include <linux/blkpg.h> 155 #include <asm/uaccess.h> 156 157 static DEFINE_SPINLOCK(pf_spin_lock); ··· 267 .owner = THIS_MODULE, 268 .open = pf_open, 269 .release = pf_release, 270 - .locked_ioctl = pf_ioctl, 271 .getgeo = pf_getgeo, 272 .media_changed = pf_check_media, 273 }; ··· 300 static int pf_open(struct block_device *bdev, fmode_t mode) 301 { 302 struct pf_unit *pf = bdev->bd_disk->private_data; 303 304 pf_identify(pf); 305 306 if (pf->media_status == PF_NM) 307 - return -ENODEV; 308 309 if ((pf->media_status == PF_RO) && (mode & FMODE_WRITE)) 310 - return -EROFS; 311 312 pf->access++; 313 if (pf->removable) 314 pf_lock(pf, 1); 315 - 316 - return 0; 317 } 318 319 static int pf_getgeo(struct block_device *bdev, struct hd_geometry *geo) ··· 349 350 if (pf->access != 1) 351 return -EBUSY; 352 pf_eject(pf); 353 return 0; 354 } 355 ··· 360 { 361 struct pf_unit *pf = disk->private_data; 362 363 - if (pf->access <= 0) 364 return -EINVAL; 365 366 pf->access--; 367 368 if (!pf->access && pf->removable) 369 pf_lock(pf, 0); 370 371 return 0; 372 373 }
··· 152 #include <linux/spinlock.h> 153 #include <linux/blkdev.h> 154 #include <linux/blkpg.h> 155 + #include <linux/smp_lock.h> 156 #include <asm/uaccess.h> 157 158 static DEFINE_SPINLOCK(pf_spin_lock); ··· 266 .owner = THIS_MODULE, 267 .open = pf_open, 268 .release = pf_release, 269 + .ioctl = pf_ioctl, 270 .getgeo = pf_getgeo, 271 .media_changed = pf_check_media, 272 }; ··· 299 static int pf_open(struct block_device *bdev, fmode_t mode) 300 { 301 struct pf_unit *pf = bdev->bd_disk->private_data; 302 + int ret; 303 304 + lock_kernel(); 305 pf_identify(pf); 306 307 + ret = -ENODEV; 308 if (pf->media_status == PF_NM) 309 + goto out; 310 311 + ret = -EROFS; 312 if ((pf->media_status == PF_RO) && (mode & FMODE_WRITE)) 313 + goto out; 314 315 + ret = 0; 316 pf->access++; 317 if (pf->removable) 318 pf_lock(pf, 1); 319 + out: 320 + unlock_kernel(); 321 + return ret; 322 } 323 324 static int pf_getgeo(struct block_device *bdev, struct hd_geometry *geo) ··· 342 343 if (pf->access != 1) 344 return -EBUSY; 345 + lock_kernel(); 346 pf_eject(pf); 347 + unlock_kernel(); 348 + 349 return 0; 350 } 351 ··· 350 { 351 struct pf_unit *pf = disk->private_data; 352 353 + lock_kernel(); 354 + if (pf->access <= 0) { 355 + unlock_kernel(); 356 return -EINVAL; 357 + } 358 359 pf->access--; 360 361 if (!pf->access && pf->removable) 362 pf_lock(pf, 0); 363 364 + unlock_kernel(); 365 return 0; 366 367 }
+15 -5
drivers/block/pktcdvd.c
··· 57 #include <linux/seq_file.h> 58 #include <linux/miscdevice.h> 59 #include <linux/freezer.h> 60 #include <linux/mutex.h> 61 #include <linux/slab.h> 62 #include <scsi/scsi_cmnd.h> ··· 1222 pkt->bio->bi_flags = 1 << BIO_UPTODATE; 1223 pkt->bio->bi_idx = 0; 1224 1225 - BUG_ON(pkt->bio->bi_rw != (1 << BIO_RW)); 1226 BUG_ON(pkt->bio->bi_vcnt != pkt->frames); 1227 BUG_ON(pkt->bio->bi_size != pkt->frames * CD_FRAMESIZE); 1228 BUG_ON(pkt->bio->bi_end_io != pkt_end_io_packet_write); ··· 2383 2384 VPRINTK(DRIVER_NAME": entering open\n"); 2385 2386 mutex_lock(&ctl_mutex); 2387 pd = pkt_find_dev_from_minor(MINOR(bdev->bd_dev)); 2388 if (!pd) { ··· 2411 } 2412 2413 mutex_unlock(&ctl_mutex); 2414 return 0; 2415 2416 out_dec: ··· 2419 out: 2420 VPRINTK(DRIVER_NAME": failed open (%d)\n", ret); 2421 mutex_unlock(&ctl_mutex); 2422 return ret; 2423 } 2424 ··· 2428 struct pktcdvd_device *pd = disk->private_data; 2429 int ret = 0; 2430 2431 mutex_lock(&ctl_mutex); 2432 pd->refcnt--; 2433 BUG_ON(pd->refcnt < 0); ··· 2437 pkt_release_dev(pd, flush); 2438 } 2439 mutex_unlock(&ctl_mutex); 2440 return ret; 2441 } 2442 ··· 2768 static int pkt_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg) 2769 { 2770 struct pktcdvd_device *pd = bdev->bd_disk->private_data; 2771 2772 VPRINTK("pkt_ioctl: cmd %x, dev %d:%d\n", cmd, 2773 MAJOR(bdev->bd_dev), MINOR(bdev->bd_dev)); 2774 2775 switch (cmd) { 2776 case CDROMEJECT: 2777 /* ··· 2791 case CDROM_LAST_WRITTEN: 2792 case CDROM_SEND_PACKET: 2793 case SCSI_IOCTL_SEND_COMMAND: 2794 - return __blkdev_driver_ioctl(pd->bdev, mode, cmd, arg); 2795 2796 default: 2797 VPRINTK(DRIVER_NAME": Unknown ioctl for %s (%x)\n", pd->name, cmd); 2798 - return -ENOTTY; 2799 } 2800 2801 - return 0; 2802 } 2803 2804 static int pkt_media_changed(struct gendisk *disk) ··· 2822 .owner = THIS_MODULE, 2823 .open = pkt_open, 2824 .release = pkt_close, 2825 - .locked_ioctl = pkt_ioctl, 2826 .media_changed = pkt_media_changed, 2827 }; 2828
··· 57 #include <linux/seq_file.h> 58 #include <linux/miscdevice.h> 59 #include <linux/freezer.h> 60 + #include <linux/smp_lock.h> 61 #include <linux/mutex.h> 62 #include <linux/slab.h> 63 #include <scsi/scsi_cmnd.h> ··· 1221 pkt->bio->bi_flags = 1 << BIO_UPTODATE; 1222 pkt->bio->bi_idx = 0; 1223 1224 + BUG_ON(pkt->bio->bi_rw != REQ_WRITE); 1225 BUG_ON(pkt->bio->bi_vcnt != pkt->frames); 1226 BUG_ON(pkt->bio->bi_size != pkt->frames * CD_FRAMESIZE); 1227 BUG_ON(pkt->bio->bi_end_io != pkt_end_io_packet_write); ··· 2382 2383 VPRINTK(DRIVER_NAME": entering open\n"); 2384 2385 + lock_kernel(); 2386 mutex_lock(&ctl_mutex); 2387 pd = pkt_find_dev_from_minor(MINOR(bdev->bd_dev)); 2388 if (!pd) { ··· 2409 } 2410 2411 mutex_unlock(&ctl_mutex); 2412 + unlock_kernel(); 2413 return 0; 2414 2415 out_dec: ··· 2416 out: 2417 VPRINTK(DRIVER_NAME": failed open (%d)\n", ret); 2418 mutex_unlock(&ctl_mutex); 2419 + unlock_kernel(); 2420 return ret; 2421 } 2422 ··· 2424 struct pktcdvd_device *pd = disk->private_data; 2425 int ret = 0; 2426 2427 + lock_kernel(); 2428 mutex_lock(&ctl_mutex); 2429 pd->refcnt--; 2430 BUG_ON(pd->refcnt < 0); ··· 2432 pkt_release_dev(pd, flush); 2433 } 2434 mutex_unlock(&ctl_mutex); 2435 + unlock_kernel(); 2436 return ret; 2437 } 2438 ··· 2762 static int pkt_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg) 2763 { 2764 struct pktcdvd_device *pd = bdev->bd_disk->private_data; 2765 + int ret; 2766 2767 VPRINTK("pkt_ioctl: cmd %x, dev %d:%d\n", cmd, 2768 MAJOR(bdev->bd_dev), MINOR(bdev->bd_dev)); 2769 2770 + lock_kernel(); 2771 switch (cmd) { 2772 case CDROMEJECT: 2773 /* ··· 2783 case CDROM_LAST_WRITTEN: 2784 case CDROM_SEND_PACKET: 2785 case SCSI_IOCTL_SEND_COMMAND: 2786 + ret = __blkdev_driver_ioctl(pd->bdev, mode, cmd, arg); 2787 + break; 2788 2789 default: 2790 VPRINTK(DRIVER_NAME": Unknown ioctl for %s (%x)\n", pd->name, cmd); 2791 + ret = -ENOTTY; 2792 } 2793 + unlock_kernel(); 2794 2795 + return ret; 2796 } 2797 2798 static int pkt_media_changed(struct gendisk *disk) ··· 2812 .owner = THIS_MODULE, 2813 .open = pkt_open, 2814 .release = pkt_close, 2815 + .ioctl = pkt_ioctl, 2816 .media_changed = pkt_media_changed, 2817 }; 2818
+6 -19
drivers/block/ps3disk.c
··· 196 dev_dbg(&dev->sbd.core, "%s:%u\n", __func__, __LINE__); 197 198 while ((req = blk_fetch_request(q))) { 199 - if (blk_fs_request(req)) { 200 - if (ps3disk_submit_request_sg(dev, req)) 201 - break; 202 - } else if (req->cmd_type == REQ_TYPE_LINUX_BLOCK && 203 - req->cmd[0] == REQ_LB_OP_FLUSH) { 204 if (ps3disk_submit_flush_request(dev, req)) 205 break; 206 } else { 207 blk_dump_rq_flags(req, DEVICE_NAME " bad request"); ··· 256 return IRQ_HANDLED; 257 } 258 259 - if (req->cmd_type == REQ_TYPE_LINUX_BLOCK && 260 - req->cmd[0] == REQ_LB_OP_FLUSH) { 261 read = 0; 262 op = "flush"; 263 } else { ··· 396 return 0; 397 } 398 399 - static void ps3disk_prepare_flush(struct request_queue *q, struct request *req) 400 - { 401 - struct ps3_storage_device *dev = q->queuedata; 402 - 403 - dev_dbg(&dev->sbd.core, "%s:%u\n", __func__, __LINE__); 404 - 405 - req->cmd_type = REQ_TYPE_LINUX_BLOCK; 406 - req->cmd[0] = REQ_LB_OP_FLUSH; 407 - } 408 - 409 static unsigned long ps3disk_mask; 410 411 static DEFINE_MUTEX(ps3disk_mask_mutex); ··· 468 blk_queue_dma_alignment(queue, dev->blk_size-1); 469 blk_queue_logical_block_size(queue, dev->blk_size); 470 471 - blk_queue_ordered(queue, QUEUE_ORDERED_DRAIN_FLUSH, 472 - ps3disk_prepare_flush); 473 474 blk_queue_max_segments(queue, -1); 475 blk_queue_max_segment_size(queue, dev->bounce_size);
··· 196 dev_dbg(&dev->sbd.core, "%s:%u\n", __func__, __LINE__); 197 198 while ((req = blk_fetch_request(q))) { 199 + if (req->cmd_flags & REQ_FLUSH) { 200 if (ps3disk_submit_flush_request(dev, req)) 201 + break; 202 + } else if (req->cmd_type == REQ_TYPE_FS) { 203 + if (ps3disk_submit_request_sg(dev, req)) 204 break; 205 } else { 206 blk_dump_rq_flags(req, DEVICE_NAME " bad request"); ··· 257 return IRQ_HANDLED; 258 } 259 260 + if (req->cmd_flags & REQ_FLUSH) { 261 read = 0; 262 op = "flush"; 263 } else { ··· 398 return 0; 399 } 400 401 static unsigned long ps3disk_mask; 402 403 static DEFINE_MUTEX(ps3disk_mask_mutex); ··· 480 blk_queue_dma_alignment(queue, dev->blk_size-1); 481 blk_queue_logical_block_size(queue, dev->blk_size); 482 483 + blk_queue_ordered(queue, QUEUE_ORDERED_DRAIN_FLUSH); 484 485 blk_queue_max_segments(queue, -1); 486 blk_queue_max_segment_size(queue, dev->bounce_size);
+18 -2
drivers/block/swim.c
··· 20 #include <linux/fd.h> 21 #include <linux/slab.h> 22 #include <linux/blkdev.h> 23 #include <linux/hdreg.h> 24 #include <linux/kernel.h> 25 #include <linux/delay.h> ··· 662 return err; 663 } 664 665 static int floppy_release(struct gendisk *disk, fmode_t mode) 666 { 667 struct floppy_state *fs = disk->private_data; 668 struct swim __iomem *base = fs->swd->base; 669 670 if (fs->ref_count < 0) 671 fs->ref_count = 0; 672 else if (fs->ref_count > 0) ··· 686 687 if (fs->ref_count == 0) 688 swim_motor(base, OFF); 689 690 return 0; 691 } ··· 704 case FDEJECT: 705 if (fs->ref_count != 1) 706 return -EBUSY; 707 err = floppy_eject(fs); 708 return err; 709 710 case FDGETPRM: ··· 767 768 static const struct block_device_operations floppy_fops = { 769 .owner = THIS_MODULE, 770 - .open = floppy_open, 771 .release = floppy_release, 772 - .locked_ioctl = floppy_ioctl, 773 .getgeo = floppy_getgeo, 774 .media_changed = floppy_check_change, 775 .revalidate_disk = floppy_revalidate,
··· 20 #include <linux/fd.h> 21 #include <linux/slab.h> 22 #include <linux/blkdev.h> 23 + #include <linux/smp_lock.h> 24 #include <linux/hdreg.h> 25 #include <linux/kernel.h> 26 #include <linux/delay.h> ··· 661 return err; 662 } 663 664 + static int floppy_unlocked_open(struct block_device *bdev, fmode_t mode) 665 + { 666 + int ret; 667 + 668 + lock_kernel(); 669 + ret = floppy_open(bdev, mode); 670 + unlock_kernel(); 671 + 672 + return ret; 673 + } 674 + 675 static int floppy_release(struct gendisk *disk, fmode_t mode) 676 { 677 struct floppy_state *fs = disk->private_data; 678 struct swim __iomem *base = fs->swd->base; 679 680 + lock_kernel(); 681 if (fs->ref_count < 0) 682 fs->ref_count = 0; 683 else if (fs->ref_count > 0) ··· 673 674 if (fs->ref_count == 0) 675 swim_motor(base, OFF); 676 + unlock_kernel(); 677 678 return 0; 679 } ··· 690 case FDEJECT: 691 if (fs->ref_count != 1) 692 return -EBUSY; 693 + lock_kernel(); 694 err = floppy_eject(fs); 695 + unlock_kernel(); 696 return err; 697 698 case FDGETPRM: ··· 751 752 static const struct block_device_operations floppy_fops = { 753 .owner = THIS_MODULE, 754 + .open = floppy_unlocked_open, 755 .release = floppy_release, 756 + .ioctl = floppy_ioctl, 757 .getgeo = floppy_getgeo, 758 .media_changed = floppy_check_change, 759 .revalidate_disk = floppy_revalidate,
+29 -3
drivers/block/swim3.c
··· 25 #include <linux/ioctl.h> 26 #include <linux/blkdev.h> 27 #include <linux/interrupt.h> 28 #include <linux/module.h> 29 #include <linux/spinlock.h> 30 #include <asm/io.h> ··· 840 static struct floppy_struct floppy_type = 841 { 2880,18,2,80,0,0x1B,0x00,0xCF,0x6C,NULL }; /* 7 1.44MB 3.5" */ 842 843 - static int floppy_ioctl(struct block_device *bdev, fmode_t mode, 844 unsigned int cmd, unsigned long param) 845 { 846 struct floppy_state *fs = bdev->bd_disk->private_data; ··· 866 return 0; 867 } 868 return -ENOTTY; 869 } 870 871 static int floppy_open(struct block_device *bdev, fmode_t mode) ··· 949 return 0; 950 } 951 952 static int floppy_release(struct gendisk *disk, fmode_t mode) 953 { 954 struct floppy_state *fs = disk->private_data; 955 struct swim3 __iomem *sw = fs->swim3; 956 if (fs->ref_count > 0 && --fs->ref_count == 0) { 957 swim3_action(fs, MOTOR_OFF); 958 out_8(&sw->control_bic, 0xff); 959 swim3_select(fs, RELAX); 960 } 961 return 0; 962 } 963 ··· 1021 } 1022 1023 static const struct block_device_operations floppy_fops = { 1024 - .open = floppy_open, 1025 .release = floppy_release, 1026 - .locked_ioctl = floppy_ioctl, 1027 .media_changed = floppy_check_change, 1028 .revalidate_disk= floppy_revalidate, 1029 };
··· 25 #include <linux/ioctl.h> 26 #include <linux/blkdev.h> 27 #include <linux/interrupt.h> 28 + #include <linux/smp_lock.h> 29 #include <linux/module.h> 30 #include <linux/spinlock.h> 31 #include <asm/io.h> ··· 839 static struct floppy_struct floppy_type = 840 { 2880,18,2,80,0,0x1B,0x00,0xCF,0x6C,NULL }; /* 7 1.44MB 3.5" */ 841 842 + static int floppy_locked_ioctl(struct block_device *bdev, fmode_t mode, 843 unsigned int cmd, unsigned long param) 844 { 845 struct floppy_state *fs = bdev->bd_disk->private_data; ··· 865 return 0; 866 } 867 return -ENOTTY; 868 + } 869 + 870 + static int floppy_ioctl(struct block_device *bdev, fmode_t mode, 871 + unsigned int cmd, unsigned long param) 872 + { 873 + int ret; 874 + 875 + lock_kernel(); 876 + ret = floppy_locked_ioctl(bdev, mode, cmd, param); 877 + unlock_kernel(); 878 + 879 + return ret; 880 } 881 882 static int floppy_open(struct block_device *bdev, fmode_t mode) ··· 936 return 0; 937 } 938 939 + static int floppy_unlocked_open(struct block_device *bdev, fmode_t mode) 940 + { 941 + int ret; 942 + 943 + lock_kernel(); 944 + ret = floppy_open(bdev, mode); 945 + unlock_kernel(); 946 + 947 + return ret; 948 + } 949 + 950 static int floppy_release(struct gendisk *disk, fmode_t mode) 951 { 952 struct floppy_state *fs = disk->private_data; 953 struct swim3 __iomem *sw = fs->swim3; 954 + lock_kernel(); 955 if (fs->ref_count > 0 && --fs->ref_count == 0) { 956 swim3_action(fs, MOTOR_OFF); 957 out_8(&sw->control_bic, 0xff); 958 swim3_select(fs, RELAX); 959 } 960 + unlock_kernel(); 961 return 0; 962 } 963 ··· 995 } 996 997 static const struct block_device_operations floppy_fops = { 998 + .open = floppy_unlocked_open, 999 .release = floppy_release, 1000 + .ioctl = floppy_ioctl, 1001 .media_changed = floppy_check_change, 1002 .revalidate_disk= floppy_revalidate, 1003 };
+28 -7
drivers/block/ub.c
··· 28 #include <linux/timer.h> 29 #include <linux/scatterlist.h> 30 #include <linux/slab.h> 31 #include <scsi/scsi.h> 32 33 #define DRV_NAME "ub" ··· 649 return 0; 650 } 651 652 - if (lun->changed && !blk_pc_request(rq)) { 653 blk_start_request(rq); 654 ub_end_rq(rq, SAM_STAT_CHECK_CONDITION); 655 return 0; ··· 685 } 686 urq->nsg = n_elem; 687 688 - if (blk_pc_request(rq)) { 689 ub_cmd_build_packet(sc, lun, cmd, urq); 690 } else { 691 ub_cmd_build_block(sc, lun, cmd, urq); ··· 782 rq = urq->rq; 783 784 if (cmd->error == 0) { 785 - if (blk_pc_request(rq)) { 786 if (cmd->act_len >= rq->resid_len) 787 rq->resid_len = 0; 788 else ··· 796 } 797 } 798 } else { 799 - if (blk_pc_request(rq)) { 800 /* UB_SENSE_SIZE is smaller than SCSI_SENSE_BUFFERSIZE */ 801 memcpy(rq->sense, sc->top_sense, UB_SENSE_SIZE); 802 rq->sense_len = UB_SENSE_SIZE; ··· 1711 return rc; 1712 } 1713 1714 /* 1715 */ 1716 static int ub_bd_release(struct gendisk *disk, fmode_t mode) ··· 1730 struct ub_lun *lun = disk->private_data; 1731 struct ub_dev *sc = lun->udev; 1732 1733 ub_put(sc); 1734 return 0; 1735 } 1736 ··· 1745 { 1746 struct gendisk *disk = bdev->bd_disk; 1747 void __user *usermem = (void __user *) arg; 1748 1749 - return scsi_cmd_ioctl(disk->queue, disk, mode, cmd, usermem); 1750 } 1751 1752 /* ··· 1813 1814 static const struct block_device_operations ub_bd_fops = { 1815 .owner = THIS_MODULE, 1816 - .open = ub_bd_open, 1817 .release = ub_bd_release, 1818 - .locked_ioctl = ub_bd_ioctl, 1819 .media_changed = ub_bd_media_changed, 1820 .revalidate_disk = ub_bd_revalidate, 1821 };
··· 28 #include <linux/timer.h> 29 #include <linux/scatterlist.h> 30 #include <linux/slab.h> 31 + #include <linux/smp_lock.h> 32 #include <scsi/scsi.h> 33 34 #define DRV_NAME "ub" ··· 648 return 0; 649 } 650 651 + if (lun->changed && rq->cmd_type != REQ_TYPE_BLOCK_PC) { 652 blk_start_request(rq); 653 ub_end_rq(rq, SAM_STAT_CHECK_CONDITION); 654 return 0; ··· 684 } 685 urq->nsg = n_elem; 686 687 + if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { 688 ub_cmd_build_packet(sc, lun, cmd, urq); 689 } else { 690 ub_cmd_build_block(sc, lun, cmd, urq); ··· 781 rq = urq->rq; 782 783 if (cmd->error == 0) { 784 + if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { 785 if (cmd->act_len >= rq->resid_len) 786 rq->resid_len = 0; 787 else ··· 795 } 796 } 797 } else { 798 + if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { 799 /* UB_SENSE_SIZE is smaller than SCSI_SENSE_BUFFERSIZE */ 800 memcpy(rq->sense, sc->top_sense, UB_SENSE_SIZE); 801 rq->sense_len = UB_SENSE_SIZE; ··· 1710 return rc; 1711 } 1712 1713 + static int ub_bd_unlocked_open(struct block_device *bdev, fmode_t mode) 1714 + { 1715 + int ret; 1716 + 1717 + lock_kernel(); 1718 + ret = ub_bd_open(bdev, mode); 1719 + unlock_kernel(); 1720 + 1721 + return ret; 1722 + } 1723 + 1724 + 1725 /* 1726 */ 1727 static int ub_bd_release(struct gendisk *disk, fmode_t mode) ··· 1717 struct ub_lun *lun = disk->private_data; 1718 struct ub_dev *sc = lun->udev; 1719 1720 + lock_kernel(); 1721 ub_put(sc); 1722 + unlock_kernel(); 1723 + 1724 return 0; 1725 } 1726 ··· 1729 { 1730 struct gendisk *disk = bdev->bd_disk; 1731 void __user *usermem = (void __user *) arg; 1732 + int ret; 1733 1734 + lock_kernel(); 1735 + ret = scsi_cmd_ioctl(disk->queue, disk, mode, cmd, usermem); 1736 + unlock_kernel(); 1737 + 1738 + return ret; 1739 } 1740 1741 /* ··· 1792 1793 static const struct block_device_operations ub_bd_fops = { 1794 .owner = THIS_MODULE, 1795 + .open = ub_bd_unlocked_open, 1796 .release = ub_bd_release, 1797 + .ioctl = ub_bd_ioctl, 1798 .media_changed = ub_bd_media_changed, 1799 .revalidate_disk = ub_bd_revalidate, 1800 };
+1 -1
drivers/block/umem.c
··· 478 le32_to_cpu(desc->local_addr)>>9, 479 le32_to_cpu(desc->transfer_size)); 480 dump_dmastat(card, control); 481 - } else if (test_bit(BIO_RW, &bio->bi_rw) && 482 le32_to_cpu(desc->local_addr) >> 9 == 483 card->init_size) { 484 card->init_size += le32_to_cpu(desc->transfer_size) >> 9;
··· 478 le32_to_cpu(desc->local_addr)>>9, 479 le32_to_cpu(desc->transfer_size)); 480 dump_dmastat(card, control); 481 + } else if ((bio->bi_rw & REQ_WRITE) && 482 le32_to_cpu(desc->local_addr) >> 9 == 483 card->init_size) { 484 card->init_size += le32_to_cpu(desc->transfer_size) >> 9;
+19 -2
drivers/block/viodasd.c
··· 41 #include <linux/errno.h> 42 #include <linux/init.h> 43 #include <linux/string.h> 44 #include <linux/dma-mapping.h> 45 #include <linux/completion.h> 46 #include <linux/device.h> ··· 176 return 0; 177 } 178 179 /* 180 * External release entry point. 181 */ ··· 196 struct viodasd_device *d = disk->private_data; 197 HvLpEvent_Rc hvrc; 198 199 /* Send the event to OS/400. We DON'T expect a response */ 200 hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp, 201 HvLpEvent_Type_VirtualIo, ··· 209 0, 0, 0); 210 if (hvrc != 0) 211 pr_warning("HV close call failed %d\n", (int)hvrc); 212 return 0; 213 } 214 ··· 236 */ 237 static const struct block_device_operations viodasd_fops = { 238 .owner = THIS_MODULE, 239 - .open = viodasd_open, 240 .release = viodasd_release, 241 .getgeo = viodasd_getgeo, 242 }; ··· 378 if (req == NULL) 379 return; 380 /* check that request contains a valid command */ 381 - if (!blk_fs_request(req)) { 382 viodasd_end_request(req, -EIO, blk_rq_sectors(req)); 383 continue; 384 }
··· 41 #include <linux/errno.h> 42 #include <linux/init.h> 43 #include <linux/string.h> 44 + #include <linux/smp_lock.h> 45 #include <linux/dma-mapping.h> 46 #include <linux/completion.h> 47 #include <linux/device.h> ··· 175 return 0; 176 } 177 178 + static int viodasd_unlocked_open(struct block_device *bdev, fmode_t mode) 179 + { 180 + int ret; 181 + 182 + lock_kernel(); 183 + ret = viodasd_open(bdev, mode); 184 + unlock_kernel(); 185 + 186 + return ret; 187 + } 188 + 189 + 190 /* 191 * External release entry point. 192 */ ··· 183 struct viodasd_device *d = disk->private_data; 184 HvLpEvent_Rc hvrc; 185 186 + lock_kernel(); 187 /* Send the event to OS/400. We DON'T expect a response */ 188 hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp, 189 HvLpEvent_Type_VirtualIo, ··· 195 0, 0, 0); 196 if (hvrc != 0) 197 pr_warning("HV close call failed %d\n", (int)hvrc); 198 + 199 + unlock_kernel(); 200 + 201 return 0; 202 } 203 ··· 219 */ 220 static const struct block_device_operations viodasd_fops = { 221 .owner = THIS_MODULE, 222 + .open = viodasd_unlocked_open, 223 .release = viodasd_release, 224 .getgeo = viodasd_getgeo, 225 }; ··· 361 if (req == NULL) 362 return; 363 /* check that request contains a valid command */ 364 + if (req->cmd_type != REQ_TYPE_FS) { 365 viodasd_end_request(req, -EIO, blk_rq_sectors(req)); 366 continue; 367 }
+49 -39
drivers/block/virtio_blk.c
··· 2 #include <linux/spinlock.h> 3 #include <linux/slab.h> 4 #include <linux/blkdev.h> 5 #include <linux/hdreg.h> 6 #include <linux/virtio.h> 7 #include <linux/virtio_blk.h> ··· 66 break; 67 } 68 69 - if (blk_pc_request(vbr->req)) { 70 vbr->req->resid_len = vbr->in_hdr.residual; 71 vbr->req->sense_len = vbr->in_hdr.sense_len; 72 vbr->req->errors = vbr->in_hdr.errors; 73 - } 74 - if (blk_special_request(vbr->req)) 75 vbr->req->errors = (error != 0); 76 77 __blk_end_request_all(vbr->req, error); 78 list_del(&vbr->list); ··· 100 return false; 101 102 vbr->req = req; 103 - switch (req->cmd_type) { 104 - case REQ_TYPE_FS: 105 - vbr->out_hdr.type = 0; 106 - vbr->out_hdr.sector = blk_rq_pos(vbr->req); 107 - vbr->out_hdr.ioprio = req_get_ioprio(vbr->req); 108 - break; 109 - case REQ_TYPE_BLOCK_PC: 110 - vbr->out_hdr.type = VIRTIO_BLK_T_SCSI_CMD; 111 vbr->out_hdr.sector = 0; 112 vbr->out_hdr.ioprio = req_get_ioprio(vbr->req); 113 - break; 114 - case REQ_TYPE_SPECIAL: 115 - vbr->out_hdr.type = VIRTIO_BLK_T_GET_ID; 116 - vbr->out_hdr.sector = 0; 117 - vbr->out_hdr.ioprio = req_get_ioprio(vbr->req); 118 - break; 119 - case REQ_TYPE_LINUX_BLOCK: 120 - if (req->cmd[0] == REQ_LB_OP_FLUSH) { 121 - vbr->out_hdr.type = VIRTIO_BLK_T_FLUSH; 122 vbr->out_hdr.sector = 0; 123 vbr->out_hdr.ioprio = req_get_ioprio(vbr->req); 124 break; 125 } 126 - /*FALLTHRU*/ 127 - default: 128 - /* We don't put anything else in the queue. */ 129 - BUG(); 130 } 131 132 - if (blk_barrier_rq(vbr->req)) 133 vbr->out_hdr.type |= VIRTIO_BLK_T_BARRIER; 134 135 sg_set_buf(&vblk->sg[out++], &vbr->out_hdr, sizeof(vbr->out_hdr)); ··· 139 * block, and before the normal inhdr we put the sense data and the 140 * inhdr with additional status information before the normal inhdr. 141 */ 142 - if (blk_pc_request(vbr->req)) 143 sg_set_buf(&vblk->sg[out++], vbr->req->cmd, vbr->req->cmd_len); 144 145 num = blk_rq_map_sg(q, vbr->req, vblk->sg + out); 146 147 - if (blk_pc_request(vbr->req)) { 148 sg_set_buf(&vblk->sg[num + out + in++], vbr->req->sense, 96); 149 sg_set_buf(&vblk->sg[num + out + in++], &vbr->in_hdr, 150 sizeof(vbr->in_hdr)); ··· 195 virtqueue_kick(vblk->vq); 196 } 197 198 - static void virtblk_prepare_flush(struct request_queue *q, struct request *req) 199 - { 200 - req->cmd_type = REQ_TYPE_LINUX_BLOCK; 201 - req->cmd[0] = REQ_LB_OP_FLUSH; 202 - } 203 - 204 /* return id (s/n) string for *disk to *id_str 205 */ 206 static int virtblk_get_id(struct gendisk *disk, char *id_str) ··· 218 return blk_execute_rq(vblk->disk->queue, vblk->disk, req, false); 219 } 220 221 - static int virtblk_ioctl(struct block_device *bdev, fmode_t mode, 222 unsigned cmd, unsigned long data) 223 { 224 struct gendisk *disk = bdev->bd_disk; ··· 232 233 return scsi_cmd_ioctl(disk->queue, disk, mode, cmd, 234 (void __user *)data); 235 } 236 237 /* We provide getgeo only to please some old bootloader/partitioning tools */ ··· 272 } 273 274 static const struct block_device_operations virtblk_fops = { 275 - .locked_ioctl = virtblk_ioctl, 276 .owner = THIS_MODULE, 277 .getgeo = virtblk_getgeo, 278 }; ··· 394 * flushing a volatile write cache on the host. Use that 395 * to implement write barrier support. 396 */ 397 - blk_queue_ordered(q, QUEUE_ORDERED_DRAIN_FLUSH, 398 - virtblk_prepare_flush); 399 } else if (virtio_has_feature(vdev, VIRTIO_BLK_F_BARRIER)) { 400 /* 401 * If the BARRIER feature is supported the host expects us ··· 403 * never re-orders outstanding I/O. This feature is not 404 * useful for real life scenarious and deprecated. 405 */ 406 - blk_queue_ordered(q, QUEUE_ORDERED_TAG, NULL); 407 } else { 408 /* 409 * If the FLUSH feature is not supported we must assume that ··· 411 * caching. We still need to drain the queue to provider 412 * proper barrier semantics. 413 */ 414 - blk_queue_ordered(q, QUEUE_ORDERED_DRAIN, NULL); 415 } 416 417 /* If disk is read-only in the host, the guest should obey */
··· 2 #include <linux/spinlock.h> 3 #include <linux/slab.h> 4 #include <linux/blkdev.h> 5 + #include <linux/smp_lock.h> 6 #include <linux/hdreg.h> 7 #include <linux/virtio.h> 8 #include <linux/virtio_blk.h> ··· 65 break; 66 } 67 68 + switch (vbr->req->cmd_type) { 69 + case REQ_TYPE_BLOCK_PC: 70 vbr->req->resid_len = vbr->in_hdr.residual; 71 vbr->req->sense_len = vbr->in_hdr.sense_len; 72 vbr->req->errors = vbr->in_hdr.errors; 73 + break; 74 + case REQ_TYPE_SPECIAL: 75 vbr->req->errors = (error != 0); 76 + break; 77 + default: 78 + break; 79 + } 80 81 __blk_end_request_all(vbr->req, error); 82 list_del(&vbr->list); ··· 94 return false; 95 96 vbr->req = req; 97 + 98 + if (req->cmd_flags & REQ_FLUSH) { 99 + vbr->out_hdr.type = VIRTIO_BLK_T_FLUSH; 100 vbr->out_hdr.sector = 0; 101 vbr->out_hdr.ioprio = req_get_ioprio(vbr->req); 102 + } else { 103 + switch (req->cmd_type) { 104 + case REQ_TYPE_FS: 105 + vbr->out_hdr.type = 0; 106 + vbr->out_hdr.sector = blk_rq_pos(vbr->req); 107 + vbr->out_hdr.ioprio = req_get_ioprio(vbr->req); 108 + break; 109 + case REQ_TYPE_BLOCK_PC: 110 + vbr->out_hdr.type = VIRTIO_BLK_T_SCSI_CMD; 111 vbr->out_hdr.sector = 0; 112 vbr->out_hdr.ioprio = req_get_ioprio(vbr->req); 113 break; 114 + case REQ_TYPE_SPECIAL: 115 + vbr->out_hdr.type = VIRTIO_BLK_T_GET_ID; 116 + vbr->out_hdr.sector = 0; 117 + vbr->out_hdr.ioprio = req_get_ioprio(vbr->req); 118 + break; 119 + default: 120 + /* We don't put anything else in the queue. */ 121 + BUG(); 122 } 123 } 124 125 + if (vbr->req->cmd_flags & REQ_HARDBARRIER) 126 vbr->out_hdr.type |= VIRTIO_BLK_T_BARRIER; 127 128 sg_set_buf(&vblk->sg[out++], &vbr->out_hdr, sizeof(vbr->out_hdr)); ··· 134 * block, and before the normal inhdr we put the sense data and the 135 * inhdr with additional status information before the normal inhdr. 136 */ 137 + if (vbr->req->cmd_type == REQ_TYPE_BLOCK_PC) 138 sg_set_buf(&vblk->sg[out++], vbr->req->cmd, vbr->req->cmd_len); 139 140 num = blk_rq_map_sg(q, vbr->req, vblk->sg + out); 141 142 + if (vbr->req->cmd_type == REQ_TYPE_BLOCK_PC) { 143 sg_set_buf(&vblk->sg[num + out + in++], vbr->req->sense, 96); 144 sg_set_buf(&vblk->sg[num + out + in++], &vbr->in_hdr, 145 sizeof(vbr->in_hdr)); ··· 190 virtqueue_kick(vblk->vq); 191 } 192 193 /* return id (s/n) string for *disk to *id_str 194 */ 195 static int virtblk_get_id(struct gendisk *disk, char *id_str) ··· 219 return blk_execute_rq(vblk->disk->queue, vblk->disk, req, false); 220 } 221 222 + static int virtblk_locked_ioctl(struct block_device *bdev, fmode_t mode, 223 unsigned cmd, unsigned long data) 224 { 225 struct gendisk *disk = bdev->bd_disk; ··· 233 234 return scsi_cmd_ioctl(disk->queue, disk, mode, cmd, 235 (void __user *)data); 236 + } 237 + 238 + static int virtblk_ioctl(struct block_device *bdev, fmode_t mode, 239 + unsigned int cmd, unsigned long param) 240 + { 241 + int ret; 242 + 243 + lock_kernel(); 244 + ret = virtblk_locked_ioctl(bdev, mode, cmd, param); 245 + unlock_kernel(); 246 + 247 + return ret; 248 } 249 250 /* We provide getgeo only to please some old bootloader/partitioning tools */ ··· 261 } 262 263 static const struct block_device_operations virtblk_fops = { 264 + .ioctl = virtblk_ioctl, 265 .owner = THIS_MODULE, 266 .getgeo = virtblk_getgeo, 267 }; ··· 383 * flushing a volatile write cache on the host. Use that 384 * to implement write barrier support. 385 */ 386 + blk_queue_ordered(q, QUEUE_ORDERED_DRAIN_FLUSH); 387 } else if (virtio_has_feature(vdev, VIRTIO_BLK_F_BARRIER)) { 388 /* 389 * If the BARRIER feature is supported the host expects us ··· 393 * never re-orders outstanding I/O. This feature is not 394 * useful for real life scenarious and deprecated. 395 */ 396 + blk_queue_ordered(q, QUEUE_ORDERED_TAG); 397 } else { 398 /* 399 * If the FLUSH feature is not supported we must assume that ··· 401 * caching. We still need to drain the queue to provider 402 * proper barrier semantics. 403 */ 404 + blk_queue_ordered(q, QUEUE_ORDERED_DRAIN); 405 } 406 407 /* If disk is read-only in the host, the guest should obey */
+16 -3
drivers/block/xd.c
··· 46 #include <linux/init.h> 47 #include <linux/wait.h> 48 #include <linux/blkdev.h> 49 #include <linux/blkpg.h> 50 #include <linux/delay.h> 51 #include <linux/io.h> ··· 134 135 static const struct block_device_operations xd_fops = { 136 .owner = THIS_MODULE, 137 - .locked_ioctl = xd_ioctl, 138 .getgeo = xd_getgeo, 139 }; 140 static DECLARE_WAIT_QUEUE_HEAD(xd_wait_int); ··· 323 int res = -EIO; 324 int retry; 325 326 - if (!blk_fs_request(req)) 327 goto done; 328 if (block + count > get_capacity(req->rq_disk)) 329 goto done; ··· 348 } 349 350 /* xd_ioctl: handle device ioctl's */ 351 - static int xd_ioctl(struct block_device *bdev, fmode_t mode, u_int cmd, u_long arg) 352 { 353 switch (cmd) { 354 case HDIO_SET_DMA: ··· 374 default: 375 return -EINVAL; 376 } 377 } 378 379 /* xd_readwrite: handle a read/write request */
··· 46 #include <linux/init.h> 47 #include <linux/wait.h> 48 #include <linux/blkdev.h> 49 + #include <linux/smp_lock.h> 50 #include <linux/blkpg.h> 51 #include <linux/delay.h> 52 #include <linux/io.h> ··· 133 134 static const struct block_device_operations xd_fops = { 135 .owner = THIS_MODULE, 136 + .ioctl = xd_ioctl, 137 .getgeo = xd_getgeo, 138 }; 139 static DECLARE_WAIT_QUEUE_HEAD(xd_wait_int); ··· 322 int res = -EIO; 323 int retry; 324 325 + if (req->cmd_type != REQ_TYPE_FS) 326 goto done; 327 if (block + count > get_capacity(req->rq_disk)) 328 goto done; ··· 347 } 348 349 /* xd_ioctl: handle device ioctl's */ 350 + static int xd_locked_ioctl(struct block_device *bdev, fmode_t mode, u_int cmd, u_long arg) 351 { 352 switch (cmd) { 353 case HDIO_SET_DMA: ··· 373 default: 374 return -EINVAL; 375 } 376 + } 377 + 378 + static int xd_ioctl(struct block_device *bdev, fmode_t mode, 379 + unsigned int cmd, unsigned long param) 380 + { 381 + int ret; 382 + 383 + lock_kernel(); 384 + ret = xd_locked_ioctl(bdev, mode, cmd, param); 385 + unlock_kernel(); 386 + 387 + return ret; 388 } 389 390 /* xd_readwrite: handle a read/write request */
+298 -101
drivers/block/xen-blkfront.c
··· 41 #include <linux/cdrom.h> 42 #include <linux/module.h> 43 #include <linux/slab.h> 44 #include <linux/scatterlist.h> 45 46 #include <xen/xen.h> ··· 80 */ 81 struct blkfront_info 82 { 83 struct xenbus_device *xbdev; 84 struct gendisk *gd; 85 int vdevice; ··· 97 unsigned long shadow_free; 98 int feature_barrier; 99 int is_ready; 100 - 101 - /** 102 - * The number of people holding this device open. We won't allow a 103 - * hot-unplug unless this is 0. 104 - */ 105 - int users; 106 }; 107 108 static DEFINE_SPINLOCK(blkif_io_lock); 109 110 #define MAXIMUM_OUTSTANDING_BLOCK_REQS \ 111 (BLKIF_MAX_SEGMENTS_PER_REQUEST * BLK_RING_SIZE) ··· 137 info->shadow[id].req.id = info->shadow_free; 138 info->shadow[id].request = 0; 139 info->shadow_free = id; 140 } 141 142 static void blkif_restart_queue_callback(void *arg) ··· 288 289 ring_req->operation = rq_data_dir(req) ? 290 BLKIF_OP_WRITE : BLKIF_OP_READ; 291 - if (blk_barrier_rq(req)) 292 ring_req->operation = BLKIF_OP_WRITE_BARRIER; 293 294 ring_req->nr_segments = blk_rq_map_sg(req->q, req, info->sg); ··· 359 360 blk_start_request(req); 361 362 - if (!blk_fs_request(req)) { 363 __blk_end_request_all(req, -EIO); 364 continue; 365 } ··· 421 static int xlvbd_barrier(struct blkfront_info *info) 422 { 423 int err; 424 425 - err = blk_queue_ordered(info->rq, 426 - info->feature_barrier ? QUEUE_ORDERED_DRAIN : QUEUE_ORDERED_NONE, 427 - NULL); 428 429 if (err) 430 return err; 431 432 printk(KERN_INFO "blkfront: %s: barriers %s\n", 433 - info->gd->disk_name, 434 - info->feature_barrier ? "enabled" : "disabled"); 435 return 0; 436 } 437 ··· 472 if ((minor % nr_parts) == 0) 473 nr_minors = nr_parts; 474 475 gd = alloc_disk(nr_minors); 476 if (gd == NULL) 477 - goto out; 478 479 offset = minor / nr_parts; 480 ··· 510 511 if (xlvbd_init_blk_queue(gd, sector_size)) { 512 del_gendisk(gd); 513 - goto out; 514 } 515 516 info->rq = gd->queue; 517 info->gd = gd; 518 519 - if (info->feature_barrier) 520 - xlvbd_barrier(info); 521 522 if (vdisk_info & VDISK_READONLY) 523 set_disk_ro(gd, 1); ··· 529 530 return 0; 531 532 out: 533 return err; 534 } 535 536 static void kick_pending_request_queues(struct blkfront_info *info) ··· 662 printk(KERN_WARNING "blkfront: %s: write barrier op failed\n", 663 info->gd->disk_name); 664 error = -EOPNOTSUPP; 665 - info->feature_barrier = 0; 666 xlvbd_barrier(info); 667 } 668 /* fall through */ ··· 745 746 747 /* Common code used when first setting up, and when resuming. */ 748 - static int talk_to_backend(struct xenbus_device *dev, 749 struct blkfront_info *info) 750 { 751 const char *message = NULL; ··· 804 out: 805 return err; 806 } 807 - 808 809 /** 810 * Entry point to this code when a new device is created. Allocate the basic ··· 865 return -ENOMEM; 866 } 867 868 info->xbdev = dev; 869 info->vdevice = vdevice; 870 info->connected = BLKIF_STATE_DISCONNECTED; ··· 879 info->handle = simple_strtoul(strrchr(dev->nodename, '/')+1, NULL, 0); 880 dev_set_drvdata(&dev->dev, info); 881 882 - err = talk_to_backend(dev, info); 883 if (err) { 884 kfree(info); 885 dev_set_drvdata(&dev->dev, NULL); ··· 974 975 blkif_free(info, info->connected == BLKIF_STATE_CONNECTED); 976 977 - err = talk_to_backend(dev, info); 978 if (info->connected == BLKIF_STATE_SUSPENDED && !err) 979 err = blkif_recover(info); 980 981 return err; 982 } 983 984 985 /* 986 * Invoked when the backend is finally 'ready' (and has told produced ··· 1029 unsigned long sector_size; 1030 unsigned int binfo; 1031 int err; 1032 1033 - if ((info->connected == BLKIF_STATE_CONNECTED) || 1034 - (info->connected == BLKIF_STATE_SUSPENDED) ) 1035 return; 1036 1037 dev_dbg(&info->xbdev->dev, "%s:%s.\n", 1038 __func__, info->xbdev->otherend); ··· 1070 } 1071 1072 err = xenbus_gather(XBT_NIL, info->xbdev->otherend, 1073 - "feature-barrier", "%lu", &info->feature_barrier, 1074 NULL); 1075 if (err) 1076 - info->feature_barrier = 0; 1077 1078 err = xlvbd_alloc_gendisk(sectors, info, binfo, sector_size); 1079 if (err) { ··· 1112 } 1113 1114 /** 1115 - * Handle the change of state of the backend to Closing. We must delete our 1116 - * device-layer structures now, to ensure that writes are flushed through to 1117 - * the backend. Once is this done, we can switch to Closed in 1118 - * acknowledgement. 1119 - */ 1120 - static void blkfront_closing(struct xenbus_device *dev) 1121 - { 1122 - struct blkfront_info *info = dev_get_drvdata(&dev->dev); 1123 - unsigned long flags; 1124 - 1125 - dev_dbg(&dev->dev, "blkfront_closing: %s removed\n", dev->nodename); 1126 - 1127 - if (info->rq == NULL) 1128 - goto out; 1129 - 1130 - spin_lock_irqsave(&blkif_io_lock, flags); 1131 - 1132 - /* No more blkif_request(). */ 1133 - blk_stop_queue(info->rq); 1134 - 1135 - /* No more gnttab callback work. */ 1136 - gnttab_cancel_free_callback(&info->callback); 1137 - spin_unlock_irqrestore(&blkif_io_lock, flags); 1138 - 1139 - /* Flush gnttab callback work. Must be done with no locks held. */ 1140 - flush_scheduled_work(); 1141 - 1142 - blk_cleanup_queue(info->rq); 1143 - info->rq = NULL; 1144 - 1145 - del_gendisk(info->gd); 1146 - 1147 - out: 1148 - xenbus_frontend_closed(dev); 1149 - } 1150 - 1151 - /** 1152 * Callback received when the backend's state changes. 1153 */ 1154 - static void backend_changed(struct xenbus_device *dev, 1155 enum xenbus_state backend_state) 1156 { 1157 struct blkfront_info *info = dev_get_drvdata(&dev->dev); 1158 - struct block_device *bd; 1159 1160 - dev_dbg(&dev->dev, "blkfront:backend_changed.\n"); 1161 1162 switch (backend_state) { 1163 case XenbusStateInitialising: ··· 1134 break; 1135 1136 case XenbusStateClosing: 1137 - if (info->gd == NULL) { 1138 - xenbus_frontend_closed(dev); 1139 - break; 1140 - } 1141 - bd = bdget_disk(info->gd, 0); 1142 - if (bd == NULL) 1143 - xenbus_dev_fatal(dev, -ENODEV, "bdget failed"); 1144 - 1145 - mutex_lock(&bd->bd_mutex); 1146 - if (info->users > 0) 1147 - xenbus_dev_error(dev, -EBUSY, 1148 - "Device in use; refusing to close"); 1149 - else 1150 - blkfront_closing(dev); 1151 - mutex_unlock(&bd->bd_mutex); 1152 - bdput(bd); 1153 break; 1154 } 1155 } 1156 1157 - static int blkfront_remove(struct xenbus_device *dev) 1158 { 1159 - struct blkfront_info *info = dev_get_drvdata(&dev->dev); 1160 1161 - dev_dbg(&dev->dev, "blkfront_remove: %s removed\n", dev->nodename); 1162 1163 blkif_free(info, 0); 1164 1165 - kfree(info); 1166 1167 return 0; 1168 } ··· 1192 { 1193 struct blkfront_info *info = dev_get_drvdata(&dev->dev); 1194 1195 - return info->is_ready; 1196 } 1197 1198 static int blkif_open(struct block_device *bdev, fmode_t mode) 1199 { 1200 - struct blkfront_info *info = bdev->bd_disk->private_data; 1201 - info->users++; 1202 - return 0; 1203 } 1204 1205 static int blkif_release(struct gendisk *disk, fmode_t mode) 1206 { 1207 struct blkfront_info *info = disk->private_data; 1208 - info->users--; 1209 - if (info->users == 0) { 1210 - /* Check whether we have been instructed to close. We will 1211 - have ignored this request initially, as the device was 1212 - still mounted. */ 1213 - struct xenbus_device *dev = info->xbdev; 1214 - enum xenbus_state state = xenbus_read_driver_state(dev->otherend); 1215 1216 - if (state == XenbusStateClosing && info->is_ready) 1217 - blkfront_closing(dev); 1218 } 1219 return 0; 1220 } 1221 ··· 1273 .open = blkif_open, 1274 .release = blkif_release, 1275 .getgeo = blkif_getgeo, 1276 - .locked_ioctl = blkif_ioctl, 1277 }; 1278 1279 ··· 1289 .probe = blkfront_probe, 1290 .remove = blkfront_remove, 1291 .resume = blkfront_resume, 1292 - .otherend_changed = backend_changed, 1293 .is_ready = blkfront_is_ready, 1294 }; 1295
··· 41 #include <linux/cdrom.h> 42 #include <linux/module.h> 43 #include <linux/slab.h> 44 + #include <linux/smp_lock.h> 45 #include <linux/scatterlist.h> 46 47 #include <xen/xen.h> ··· 79 */ 80 struct blkfront_info 81 { 82 + struct mutex mutex; 83 struct xenbus_device *xbdev; 84 struct gendisk *gd; 85 int vdevice; ··· 95 unsigned long shadow_free; 96 int feature_barrier; 97 int is_ready; 98 }; 99 100 static DEFINE_SPINLOCK(blkif_io_lock); 101 + 102 + static unsigned int nr_minors; 103 + static unsigned long *minors; 104 + static DEFINE_SPINLOCK(minor_lock); 105 106 #define MAXIMUM_OUTSTANDING_BLOCK_REQS \ 107 (BLKIF_MAX_SEGMENTS_PER_REQUEST * BLK_RING_SIZE) ··· 137 info->shadow[id].req.id = info->shadow_free; 138 info->shadow[id].request = 0; 139 info->shadow_free = id; 140 + } 141 + 142 + static int xlbd_reserve_minors(unsigned int minor, unsigned int nr) 143 + { 144 + unsigned int end = minor + nr; 145 + int rc; 146 + 147 + if (end > nr_minors) { 148 + unsigned long *bitmap, *old; 149 + 150 + bitmap = kzalloc(BITS_TO_LONGS(end) * sizeof(*bitmap), 151 + GFP_KERNEL); 152 + if (bitmap == NULL) 153 + return -ENOMEM; 154 + 155 + spin_lock(&minor_lock); 156 + if (end > nr_minors) { 157 + old = minors; 158 + memcpy(bitmap, minors, 159 + BITS_TO_LONGS(nr_minors) * sizeof(*bitmap)); 160 + minors = bitmap; 161 + nr_minors = BITS_TO_LONGS(end) * BITS_PER_LONG; 162 + } else 163 + old = bitmap; 164 + spin_unlock(&minor_lock); 165 + kfree(old); 166 + } 167 + 168 + spin_lock(&minor_lock); 169 + if (find_next_bit(minors, end, minor) >= end) { 170 + for (; minor < end; ++minor) 171 + __set_bit(minor, minors); 172 + rc = 0; 173 + } else 174 + rc = -EBUSY; 175 + spin_unlock(&minor_lock); 176 + 177 + return rc; 178 + } 179 + 180 + static void xlbd_release_minors(unsigned int minor, unsigned int nr) 181 + { 182 + unsigned int end = minor + nr; 183 + 184 + BUG_ON(end > nr_minors); 185 + spin_lock(&minor_lock); 186 + for (; minor < end; ++minor) 187 + __clear_bit(minor, minors); 188 + spin_unlock(&minor_lock); 189 } 190 191 static void blkif_restart_queue_callback(void *arg) ··· 239 240 ring_req->operation = rq_data_dir(req) ? 241 BLKIF_OP_WRITE : BLKIF_OP_READ; 242 + if (req->cmd_flags & REQ_HARDBARRIER) 243 ring_req->operation = BLKIF_OP_WRITE_BARRIER; 244 245 ring_req->nr_segments = blk_rq_map_sg(req->q, req, info->sg); ··· 310 311 blk_start_request(req); 312 313 + if (req->cmd_type != REQ_TYPE_FS) { 314 __blk_end_request_all(req, -EIO); 315 continue; 316 } ··· 372 static int xlvbd_barrier(struct blkfront_info *info) 373 { 374 int err; 375 + const char *barrier; 376 377 + switch (info->feature_barrier) { 378 + case QUEUE_ORDERED_DRAIN: barrier = "enabled (drain)"; break; 379 + case QUEUE_ORDERED_TAG: barrier = "enabled (tag)"; break; 380 + case QUEUE_ORDERED_NONE: barrier = "disabled"; break; 381 + default: return -EINVAL; 382 + } 383 + 384 + err = blk_queue_ordered(info->rq, info->feature_barrier); 385 386 if (err) 387 return err; 388 389 printk(KERN_INFO "blkfront: %s: barriers %s\n", 390 + info->gd->disk_name, barrier); 391 return 0; 392 } 393 ··· 418 if ((minor % nr_parts) == 0) 419 nr_minors = nr_parts; 420 421 + err = xlbd_reserve_minors(minor, nr_minors); 422 + if (err) 423 + goto out; 424 + err = -ENODEV; 425 + 426 gd = alloc_disk(nr_minors); 427 if (gd == NULL) 428 + goto release; 429 430 offset = minor / nr_parts; 431 ··· 451 452 if (xlvbd_init_blk_queue(gd, sector_size)) { 453 del_gendisk(gd); 454 + goto release; 455 } 456 457 info->rq = gd->queue; 458 info->gd = gd; 459 460 + xlvbd_barrier(info); 461 462 if (vdisk_info & VDISK_READONLY) 463 set_disk_ro(gd, 1); ··· 471 472 return 0; 473 474 + release: 475 + xlbd_release_minors(minor, nr_minors); 476 out: 477 return err; 478 + } 479 + 480 + static void xlvbd_release_gendisk(struct blkfront_info *info) 481 + { 482 + unsigned int minor, nr_minors; 483 + unsigned long flags; 484 + 485 + if (info->rq == NULL) 486 + return; 487 + 488 + spin_lock_irqsave(&blkif_io_lock, flags); 489 + 490 + /* No more blkif_request(). */ 491 + blk_stop_queue(info->rq); 492 + 493 + /* No more gnttab callback work. */ 494 + gnttab_cancel_free_callback(&info->callback); 495 + spin_unlock_irqrestore(&blkif_io_lock, flags); 496 + 497 + /* Flush gnttab callback work. Must be done with no locks held. */ 498 + flush_scheduled_work(); 499 + 500 + del_gendisk(info->gd); 501 + 502 + minor = info->gd->first_minor; 503 + nr_minors = info->gd->minors; 504 + xlbd_release_minors(minor, nr_minors); 505 + 506 + blk_cleanup_queue(info->rq); 507 + info->rq = NULL; 508 + 509 + put_disk(info->gd); 510 + info->gd = NULL; 511 } 512 513 static void kick_pending_request_queues(struct blkfront_info *info) ··· 569 printk(KERN_WARNING "blkfront: %s: write barrier op failed\n", 570 info->gd->disk_name); 571 error = -EOPNOTSUPP; 572 + info->feature_barrier = QUEUE_ORDERED_NONE; 573 xlvbd_barrier(info); 574 } 575 /* fall through */ ··· 652 653 654 /* Common code used when first setting up, and when resuming. */ 655 + static int talk_to_blkback(struct xenbus_device *dev, 656 struct blkfront_info *info) 657 { 658 const char *message = NULL; ··· 711 out: 712 return err; 713 } 714 715 /** 716 * Entry point to this code when a new device is created. Allocate the basic ··· 773 return -ENOMEM; 774 } 775 776 + mutex_init(&info->mutex); 777 info->xbdev = dev; 778 info->vdevice = vdevice; 779 info->connected = BLKIF_STATE_DISCONNECTED; ··· 786 info->handle = simple_strtoul(strrchr(dev->nodename, '/')+1, NULL, 0); 787 dev_set_drvdata(&dev->dev, info); 788 789 + err = talk_to_blkback(dev, info); 790 if (err) { 791 kfree(info); 792 dev_set_drvdata(&dev->dev, NULL); ··· 881 882 blkif_free(info, info->connected == BLKIF_STATE_CONNECTED); 883 884 + err = talk_to_blkback(dev, info); 885 if (info->connected == BLKIF_STATE_SUSPENDED && !err) 886 err = blkif_recover(info); 887 888 return err; 889 } 890 891 + static void 892 + blkfront_closing(struct blkfront_info *info) 893 + { 894 + struct xenbus_device *xbdev = info->xbdev; 895 + struct block_device *bdev = NULL; 896 + 897 + mutex_lock(&info->mutex); 898 + 899 + if (xbdev->state == XenbusStateClosing) { 900 + mutex_unlock(&info->mutex); 901 + return; 902 + } 903 + 904 + if (info->gd) 905 + bdev = bdget_disk(info->gd, 0); 906 + 907 + mutex_unlock(&info->mutex); 908 + 909 + if (!bdev) { 910 + xenbus_frontend_closed(xbdev); 911 + return; 912 + } 913 + 914 + mutex_lock(&bdev->bd_mutex); 915 + 916 + if (bdev->bd_openers) { 917 + xenbus_dev_error(xbdev, -EBUSY, 918 + "Device in use; refusing to close"); 919 + xenbus_switch_state(xbdev, XenbusStateClosing); 920 + } else { 921 + xlvbd_release_gendisk(info); 922 + xenbus_frontend_closed(xbdev); 923 + } 924 + 925 + mutex_unlock(&bdev->bd_mutex); 926 + bdput(bdev); 927 + } 928 929 /* 930 * Invoked when the backend is finally 'ready' (and has told produced ··· 899 unsigned long sector_size; 900 unsigned int binfo; 901 int err; 902 + int barrier; 903 904 + switch (info->connected) { 905 + case BLKIF_STATE_CONNECTED: 906 + /* 907 + * Potentially, the back-end may be signalling 908 + * a capacity change; update the capacity. 909 + */ 910 + err = xenbus_scanf(XBT_NIL, info->xbdev->otherend, 911 + "sectors", "%Lu", &sectors); 912 + if (XENBUS_EXIST_ERR(err)) 913 + return; 914 + printk(KERN_INFO "Setting capacity to %Lu\n", 915 + sectors); 916 + set_capacity(info->gd, sectors); 917 + revalidate_disk(info->gd); 918 + 919 + /* fall through */ 920 + case BLKIF_STATE_SUSPENDED: 921 return; 922 + 923 + default: 924 + break; 925 + } 926 927 dev_dbg(&info->xbdev->dev, "%s:%s.\n", 928 __func__, info->xbdev->otherend); ··· 920 } 921 922 err = xenbus_gather(XBT_NIL, info->xbdev->otherend, 923 + "feature-barrier", "%lu", &barrier, 924 NULL); 925 + 926 + /* 927 + * If there's no "feature-barrier" defined, then it means 928 + * we're dealing with a very old backend which writes 929 + * synchronously; draining will do what needs to get done. 930 + * 931 + * If there are barriers, then we can do full queued writes 932 + * with tagged barriers. 933 + * 934 + * If barriers are not supported, then there's no much we can 935 + * do, so just set ordering to NONE. 936 + */ 937 if (err) 938 + info->feature_barrier = QUEUE_ORDERED_DRAIN; 939 + else if (barrier) 940 + info->feature_barrier = QUEUE_ORDERED_TAG; 941 + else 942 + info->feature_barrier = QUEUE_ORDERED_NONE; 943 944 err = xlvbd_alloc_gendisk(sectors, info, binfo, sector_size); 945 if (err) { ··· 946 } 947 948 /** 949 * Callback received when the backend's state changes. 950 */ 951 + static void blkback_changed(struct xenbus_device *dev, 952 enum xenbus_state backend_state) 953 { 954 struct blkfront_info *info = dev_get_drvdata(&dev->dev); 955 956 + dev_dbg(&dev->dev, "blkfront:blkback_changed to state %d.\n", backend_state); 957 958 switch (backend_state) { 959 case XenbusStateInitialising: ··· 1006 break; 1007 1008 case XenbusStateClosing: 1009 + blkfront_closing(info); 1010 break; 1011 } 1012 } 1013 1014 + static int blkfront_remove(struct xenbus_device *xbdev) 1015 { 1016 + struct blkfront_info *info = dev_get_drvdata(&xbdev->dev); 1017 + struct block_device *bdev = NULL; 1018 + struct gendisk *disk; 1019 1020 + dev_dbg(&xbdev->dev, "%s removed", xbdev->nodename); 1021 1022 blkif_free(info, 0); 1023 1024 + mutex_lock(&info->mutex); 1025 + 1026 + disk = info->gd; 1027 + if (disk) 1028 + bdev = bdget_disk(disk, 0); 1029 + 1030 + info->xbdev = NULL; 1031 + mutex_unlock(&info->mutex); 1032 + 1033 + if (!bdev) { 1034 + kfree(info); 1035 + return 0; 1036 + } 1037 + 1038 + /* 1039 + * The xbdev was removed before we reached the Closed 1040 + * state. See if it's safe to remove the disk. If the bdev 1041 + * isn't closed yet, we let release take care of it. 1042 + */ 1043 + 1044 + mutex_lock(&bdev->bd_mutex); 1045 + info = disk->private_data; 1046 + 1047 + dev_warn(disk_to_dev(disk), 1048 + "%s was hot-unplugged, %d stale handles\n", 1049 + xbdev->nodename, bdev->bd_openers); 1050 + 1051 + if (info && !bdev->bd_openers) { 1052 + xlvbd_release_gendisk(info); 1053 + disk->private_data = NULL; 1054 + kfree(info); 1055 + } 1056 + 1057 + mutex_unlock(&bdev->bd_mutex); 1058 + bdput(bdev); 1059 1060 return 0; 1061 } ··· 1043 { 1044 struct blkfront_info *info = dev_get_drvdata(&dev->dev); 1045 1046 + return info->is_ready && info->xbdev; 1047 } 1048 1049 static int blkif_open(struct block_device *bdev, fmode_t mode) 1050 { 1051 + struct gendisk *disk = bdev->bd_disk; 1052 + struct blkfront_info *info; 1053 + int err = 0; 1054 + 1055 + lock_kernel(); 1056 + 1057 + info = disk->private_data; 1058 + if (!info) { 1059 + /* xbdev gone */ 1060 + err = -ERESTARTSYS; 1061 + goto out; 1062 + } 1063 + 1064 + mutex_lock(&info->mutex); 1065 + 1066 + if (!info->gd) 1067 + /* xbdev is closed */ 1068 + err = -ERESTARTSYS; 1069 + 1070 + mutex_unlock(&info->mutex); 1071 + 1072 + out: 1073 + unlock_kernel(); 1074 + return err; 1075 } 1076 1077 static int blkif_release(struct gendisk *disk, fmode_t mode) 1078 { 1079 struct blkfront_info *info = disk->private_data; 1080 + struct block_device *bdev; 1081 + struct xenbus_device *xbdev; 1082 1083 + lock_kernel(); 1084 + 1085 + bdev = bdget_disk(disk, 0); 1086 + bdput(bdev); 1087 + 1088 + if (bdev->bd_openers) 1089 + goto out; 1090 + 1091 + /* 1092 + * Check if we have been instructed to close. We will have 1093 + * deferred this request, because the bdev was still open. 1094 + */ 1095 + 1096 + mutex_lock(&info->mutex); 1097 + xbdev = info->xbdev; 1098 + 1099 + if (xbdev && xbdev->state == XenbusStateClosing) { 1100 + /* pending switch to state closed */ 1101 + dev_info(disk_to_dev(bdev->bd_disk), "releasing disk\n"); 1102 + xlvbd_release_gendisk(info); 1103 + xenbus_frontend_closed(info->xbdev); 1104 + } 1105 + 1106 + mutex_unlock(&info->mutex); 1107 + 1108 + if (!xbdev) { 1109 + /* sudden device removal */ 1110 + dev_info(disk_to_dev(bdev->bd_disk), "releasing disk\n"); 1111 + xlvbd_release_gendisk(info); 1112 + disk->private_data = NULL; 1113 + kfree(info); 1114 } 1115 + 1116 + out: 1117 + unlock_kernel(); 1118 return 0; 1119 } 1120 ··· 1076 .open = blkif_open, 1077 .release = blkif_release, 1078 .getgeo = blkif_getgeo, 1079 + .ioctl = blkif_ioctl, 1080 }; 1081 1082 ··· 1092 .probe = blkfront_probe, 1093 .remove = blkfront_remove, 1094 .resume = blkfront_resume, 1095 + .otherend_changed = blkback_changed, 1096 .is_ready = blkfront_is_ready, 1097 }; 1098
+7 -1
drivers/block/xsysace.c
··· 89 #include <linux/delay.h> 90 #include <linux/slab.h> 91 #include <linux/blkdev.h> 92 #include <linux/ata.h> 93 #include <linux/hdreg.h> 94 #include <linux/platform_device.h> ··· 466 struct request *req; 467 468 while ((req = blk_peek_request(q)) != NULL) { 469 - if (blk_fs_request(req)) 470 break; 471 blk_start_request(req); 472 __blk_end_request_all(req, -EIO); ··· 902 903 dev_dbg(ace->dev, "ace_open() users=%i\n", ace->users + 1); 904 905 spin_lock_irqsave(&ace->lock, flags); 906 ace->users++; 907 spin_unlock_irqrestore(&ace->lock, flags); 908 909 check_disk_change(bdev); 910 return 0; 911 } 912 ··· 921 922 dev_dbg(ace->dev, "ace_release() users=%i\n", ace->users - 1); 923 924 spin_lock_irqsave(&ace->lock, flags); 925 ace->users--; 926 if (ace->users == 0) { ··· 929 ace_out(ace, ACE_CTRL, val & ~ACE_CTRL_LOCKREQ); 930 } 931 spin_unlock_irqrestore(&ace->lock, flags); 932 return 0; 933 } 934
··· 89 #include <linux/delay.h> 90 #include <linux/slab.h> 91 #include <linux/blkdev.h> 92 + #include <linux/smp_lock.h> 93 #include <linux/ata.h> 94 #include <linux/hdreg.h> 95 #include <linux/platform_device.h> ··· 465 struct request *req; 466 467 while ((req = blk_peek_request(q)) != NULL) { 468 + if (req->cmd_type == REQ_TYPE_FS) 469 break; 470 blk_start_request(req); 471 __blk_end_request_all(req, -EIO); ··· 901 902 dev_dbg(ace->dev, "ace_open() users=%i\n", ace->users + 1); 903 904 + lock_kernel(); 905 spin_lock_irqsave(&ace->lock, flags); 906 ace->users++; 907 spin_unlock_irqrestore(&ace->lock, flags); 908 909 check_disk_change(bdev); 910 + unlock_kernel(); 911 + 912 return 0; 913 } 914 ··· 917 918 dev_dbg(ace->dev, "ace_release() users=%i\n", ace->users - 1); 919 920 + lock_kernel(); 921 spin_lock_irqsave(&ace->lock, flags); 922 ace->users--; 923 if (ace->users == 0) { ··· 924 ace_out(ace, ACE_CTRL, val & ~ACE_CTRL_LOCKREQ); 925 } 926 spin_unlock_irqrestore(&ace->lock, flags); 927 + unlock_kernel(); 928 return 0; 929 } 930
+10 -3
drivers/block/z2ram.c
··· 33 #include <linux/module.h> 34 #include <linux/blkdev.h> 35 #include <linux/bitops.h> 36 #include <linux/slab.h> 37 38 #include <asm/setup.h> ··· 154 155 device = MINOR(bdev->bd_dev); 156 157 if ( current_device != -1 && current_device != device ) 158 { 159 rc = -EBUSY; ··· 296 set_capacity(z2ram_gendisk, z2ram_size >> 9); 297 } 298 299 return 0; 300 301 err_out_kfree: 302 kfree(z2ram_map); 303 err_out: 304 return rc; 305 } 306 307 static int 308 z2_release(struct gendisk *disk, fmode_t mode) 309 { 310 - if ( current_device == -1 ) 311 - return 0; 312 - 313 /* 314 * FIXME: unmap memory 315 */
··· 33 #include <linux/module.h> 34 #include <linux/blkdev.h> 35 #include <linux/bitops.h> 36 + #include <linux/smp_lock.h> 37 #include <linux/slab.h> 38 39 #include <asm/setup.h> ··· 153 154 device = MINOR(bdev->bd_dev); 155 156 + lock_kernel(); 157 if ( current_device != -1 && current_device != device ) 158 { 159 rc = -EBUSY; ··· 294 set_capacity(z2ram_gendisk, z2ram_size >> 9); 295 } 296 297 + unlock_kernel(); 298 return 0; 299 300 err_out_kfree: 301 kfree(z2ram_map); 302 err_out: 303 + unlock_kernel(); 304 return rc; 305 } 306 307 static int 308 z2_release(struct gendisk *disk, fmode_t mode) 309 { 310 + lock_kernel(); 311 + if ( current_device == -1 ) { 312 + unlock_kernel(); 313 + return 0; 314 + } 315 + unlock_kernel(); 316 /* 317 * FIXME: unmap memory 318 */
+26 -20
drivers/cdrom/cdrom.c
··· 242 243 -------------------------------------------------------------------------*/ 244 245 #define REVISION "Revision: 3.20" 246 #define VERSION "Id: cdrom.c 3.20 2003/12/17" 247 ··· 316 static const char *mrw_address_space[] = { "DMA", "GAA" }; 317 318 #if (ERRLOGMASK!=CD_NOTHING) 319 - #define cdinfo(type, fmt, args...) \ 320 - if ((ERRLOGMASK & type) || debug==1 ) \ 321 - printk(KERN_INFO "cdrom: " fmt, ## args) 322 #else 323 - #define cdinfo(type, fmt, args...) 324 #endif 325 326 /* These are used to simplify getting data in from and back to user land */ ··· 403 if (cdo->open == NULL || cdo->release == NULL) 404 return -EINVAL; 405 if (!banner_printed) { 406 - printk(KERN_INFO "Uniform CD-ROM driver " REVISION "\n"); 407 banner_printed = 1; 408 cdrom_sysctl_register(); 409 } ··· 554 unsigned char buffer[12]; 555 int ret; 556 557 - printk(KERN_INFO "cdrom: %sstarting format\n", cont ? "Re" : ""); 558 559 /* 560 * FmtData bit set (bit 4), format type is 1 ··· 584 585 ret = cdi->ops->generic_packet(cdi, &cgc); 586 if (ret) 587 - printk(KERN_INFO "cdrom: bgformat failed\n"); 588 589 return ret; 590 } ··· 630 631 ret = 0; 632 if (di.mrw_status == CDM_MRW_BGFORMAT_ACTIVE) { 633 - printk(KERN_INFO "cdrom: issuing MRW back ground " 634 - "format suspend\n"); 635 ret = cdrom_mrw_bgformat_susp(cdi, 0); 636 } 637 ··· 665 if ((ret = cdrom_mode_select(cdi, &cgc))) 666 return ret; 667 668 - printk(KERN_INFO "cdrom: %s: mrw address space %s selected\n", cdi->name, mrw_address_space[space]); 669 return 0; 670 } 671 ··· 770 * always reset to DMA lba space on open 771 */ 772 if (cdrom_mrw_set_lba_space(cdi, MRW_LBA_DMA)) { 773 - printk(KERN_ERR "cdrom: failed setting lba address space\n"); 774 return 1; 775 } 776 ··· 789 * 3 - MRW formatting complete 790 */ 791 ret = 0; 792 - printk(KERN_INFO "cdrom open: mrw_status '%s'\n", 793 - mrw_format_status[di.mrw_status]); 794 if (!di.mrw_status) 795 ret = 1; 796 else if (di.mrw_status == CDM_MRW_BGFORMAT_INACTIVE && ··· 939 return; 940 } 941 942 - printk(KERN_INFO "cdrom: %s: dirty DVD+RW media, \"finalizing\"\n", 943 - cdi->name); 944 945 init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE); 946 cgc.cmd[0] = GPCMD_FLUSH_CACHE; ··· 2182 * frame dma, so drop to single frame dma if we need to 2183 */ 2184 if (cdi->cdda_method == CDDA_BPC_FULL && nframes > 1) { 2185 - printk("cdrom: dropping to single frame dma\n"); 2186 cdi->cdda_method = CDDA_BPC_SINGLE; 2187 goto retry; 2188 } ··· 2195 if (cdi->last_sense != 0x04 && cdi->last_sense != 0x0b) 2196 return ret; 2197 2198 - printk("cdrom: dropping to old style cdda (sense=%x)\n", cdi->last_sense); 2199 cdi->cdda_method = CDDA_OLD; 2200 return cdrom_read_cdda_old(cdi, ubuf, lba, nframes); 2201 } ··· 3407 "\t%d", CDROM_CAN(val) != 0); 3408 break; 3409 default: 3410 - printk(KERN_INFO "cdrom: invalid option%d\n", option); 3411 return 1; 3412 } 3413 if (!ret) ··· 3497 mutex_unlock(&cdrom_mutex); 3498 return proc_dostring(ctl, write, buffer, lenp, ppos); 3499 done: 3500 - printk(KERN_INFO "cdrom: info buffer too small\n"); 3501 goto doit; 3502 } 3503 ··· 3671 3672 static void __exit cdrom_exit(void) 3673 { 3674 - printk(KERN_INFO "Uniform CD-ROM driver unloaded\n"); 3675 cdrom_sysctl_unregister(); 3676 } 3677
··· 242 243 -------------------------------------------------------------------------*/ 244 245 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 246 + 247 #define REVISION "Revision: 3.20" 248 #define VERSION "Id: cdrom.c 3.20 2003/12/17" 249 ··· 314 static const char *mrw_address_space[] = { "DMA", "GAA" }; 315 316 #if (ERRLOGMASK!=CD_NOTHING) 317 + #define cdinfo(type, fmt, args...) \ 318 + do { \ 319 + if ((ERRLOGMASK & type) || debug == 1) \ 320 + pr_info(fmt, ##args); \ 321 + } while (0) 322 #else 323 + #define cdinfo(type, fmt, args...) \ 324 + do { \ 325 + if (0 && (ERRLOGMASK & type) || debug == 1) \ 326 + pr_info(fmt, ##args); \ 327 + } while (0) 328 #endif 329 330 /* These are used to simplify getting data in from and back to user land */ ··· 395 if (cdo->open == NULL || cdo->release == NULL) 396 return -EINVAL; 397 if (!banner_printed) { 398 + pr_info("Uniform CD-ROM driver " REVISION "\n"); 399 banner_printed = 1; 400 cdrom_sysctl_register(); 401 } ··· 546 unsigned char buffer[12]; 547 int ret; 548 549 + pr_info("%sstarting format\n", cont ? "Re" : ""); 550 551 /* 552 * FmtData bit set (bit 4), format type is 1 ··· 576 577 ret = cdi->ops->generic_packet(cdi, &cgc); 578 if (ret) 579 + pr_info("bgformat failed\n"); 580 581 return ret; 582 } ··· 622 623 ret = 0; 624 if (di.mrw_status == CDM_MRW_BGFORMAT_ACTIVE) { 625 + pr_info("issuing MRW background format suspend\n"); 626 ret = cdrom_mrw_bgformat_susp(cdi, 0); 627 } 628 ··· 658 if ((ret = cdrom_mode_select(cdi, &cgc))) 659 return ret; 660 661 + pr_info("%s: mrw address space %s selected\n", 662 + cdi->name, mrw_address_space[space]); 663 return 0; 664 } 665 ··· 762 * always reset to DMA lba space on open 763 */ 764 if (cdrom_mrw_set_lba_space(cdi, MRW_LBA_DMA)) { 765 + pr_err("failed setting lba address space\n"); 766 return 1; 767 } 768 ··· 781 * 3 - MRW formatting complete 782 */ 783 ret = 0; 784 + pr_info("open: mrw_status '%s'\n", mrw_format_status[di.mrw_status]); 785 if (!di.mrw_status) 786 ret = 1; 787 else if (di.mrw_status == CDM_MRW_BGFORMAT_INACTIVE && ··· 932 return; 933 } 934 935 + pr_info("%s: dirty DVD+RW media, \"finalizing\"\n", cdi->name); 936 937 init_cdrom_command(&cgc, NULL, 0, CGC_DATA_NONE); 938 cgc.cmd[0] = GPCMD_FLUSH_CACHE; ··· 2176 * frame dma, so drop to single frame dma if we need to 2177 */ 2178 if (cdi->cdda_method == CDDA_BPC_FULL && nframes > 1) { 2179 + pr_info("dropping to single frame dma\n"); 2180 cdi->cdda_method = CDDA_BPC_SINGLE; 2181 goto retry; 2182 } ··· 2189 if (cdi->last_sense != 0x04 && cdi->last_sense != 0x0b) 2190 return ret; 2191 2192 + pr_info("dropping to old style cdda (sense=%x)\n", cdi->last_sense); 2193 cdi->cdda_method = CDDA_OLD; 2194 return cdrom_read_cdda_old(cdi, ubuf, lba, nframes); 2195 } ··· 3401 "\t%d", CDROM_CAN(val) != 0); 3402 break; 3403 default: 3404 + pr_info("invalid option%d\n", option); 3405 return 1; 3406 } 3407 if (!ret) ··· 3491 mutex_unlock(&cdrom_mutex); 3492 return proc_dostring(ctl, write, buffer, lenp, ppos); 3493 done: 3494 + pr_info("info buffer too small\n"); 3495 goto doit; 3496 } 3497 ··· 3665 3666 static void __exit cdrom_exit(void) 3667 { 3668 + pr_info("Uniform CD-ROM driver unloaded\n"); 3669 cdrom_sysctl_unregister(); 3670 } 3671
+30 -18
drivers/cdrom/gdrom.c
··· 19 * 20 */ 21 22 #include <linux/init.h> 23 #include <linux/module.h> 24 #include <linux/fs.h> ··· 34 #include <linux/blkdev.h> 35 #include <linux/interrupt.h> 36 #include <linux/device.h> 37 #include <linux/wait.h> 38 #include <linux/workqueue.h> 39 #include <linux/platform_device.h> ··· 342 tocuse = 0; 343 err = gdrom_readtoc_cmd(gd.toc, 0); 344 if (err) { 345 - printk(KERN_INFO "GDROM: Could not get CD " 346 - "table of contents\n"); 347 return -ENXIO; 348 } 349 } ··· 359 } while (track >= fentry); 360 361 if ((track > 100) || (track < get_entry_track(gd.toc->first))) { 362 - printk(KERN_INFO "GDROM: No data on the last " 363 - "session of the CD\n"); 364 gdrom_getsense(NULL); 365 return -ENXIO; 366 } ··· 452 goto cleanup_sense; 453 insw(GDROM_DATA_REG, &sense, sense_command->buflen/2); 454 if (sense[1] & 40) { 455 - printk(KERN_INFO "GDROM: Drive not ready - command aborted\n"); 456 goto cleanup_sense; 457 } 458 sense_key = sense[1] & 0x0F; 459 if (sense_key < ARRAY_SIZE(sense_texts)) 460 - printk(KERN_INFO "GDROM: %s\n", sense_texts[sense_key].text); 461 else 462 - printk(KERN_ERR "GDROM: Unknown sense key: %d\n", sense_key); 463 if (bufstring) /* return addional sense data */ 464 memcpy(bufstring, &sense[4], 2); 465 if (sense_key < 2) ··· 493 494 static int gdrom_bdops_open(struct block_device *bdev, fmode_t mode) 495 { 496 - return cdrom_open(gd.cd_info, bdev, mode); 497 } 498 499 static int gdrom_bdops_release(struct gendisk *disk, fmode_t mode) 500 { 501 cdrom_release(gd.cd_info, mode); 502 return 0; 503 } 504 ··· 516 static int gdrom_bdops_ioctl(struct block_device *bdev, fmode_t mode, 517 unsigned cmd, unsigned long arg) 518 { 519 - return cdrom_ioctl(gd.cd_info, bdev, mode, cmd, arg); 520 } 521 522 static const struct block_device_operations gdrom_bdops = { ··· 530 .open = gdrom_bdops_open, 531 .release = gdrom_bdops_release, 532 .media_changed = gdrom_bdops_mediachanged, 533 - .locked_ioctl = gdrom_bdops_ioctl, 534 }; 535 536 static irqreturn_t gdrom_command_interrupt(int irq, void *dev_id) ··· 656 struct request *req; 657 658 while ((req = blk_fetch_request(rq)) != NULL) { 659 - if (!blk_fs_request(req)) { 660 - printk(KERN_DEBUG "GDROM: Non-fs request ignored\n"); 661 __blk_end_request_all(req, -EIO); 662 continue; 663 } 664 if (rq_data_dir(req) != READ) { 665 - printk(KERN_NOTICE "GDROM: Read only device -"); 666 - printk(" write request ignored\n"); 667 __blk_end_request_all(req, -EIO); 668 continue; 669 } ··· 697 firmw_ver = kstrndup(id->firmver, 16, GFP_KERNEL); 698 if (!firmw_ver) 699 goto free_manuf_name; 700 - printk(KERN_INFO "GDROM: %s from %s with firmware %s\n", 701 model_name, manuf_name, firmw_ver); 702 err = 0; 703 kfree(firmw_ver); ··· 769 int err; 770 /* Start the device */ 771 if (gdrom_execute_diagnostic() != 1) { 772 - printk(KERN_WARNING "GDROM: ATA Probe for GDROM failed.\n"); 773 return -ENODEV; 774 } 775 /* Print out firmware ID */ ··· 779 gdrom_major = register_blkdev(0, GDROM_DEV_NAME); 780 if (gdrom_major <= 0) 781 return gdrom_major; 782 - printk(KERN_INFO "GDROM: Registered with major number %d\n", 783 gdrom_major); 784 /* Specify basic properties of drive */ 785 gd.cd_info = kzalloc(sizeof(struct cdrom_device_info), GFP_KERNEL); ··· 830 unregister_blkdev(gdrom_major, GDROM_DEV_NAME); 831 gdrom_major = 0; 832 probe_fail_no_mem: 833 - printk(KERN_WARNING "GDROM: Probe failed - error is 0x%X\n", err); 834 return err; 835 } 836
··· 19 * 20 */ 21 22 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 23 + 24 #include <linux/init.h> 25 #include <linux/module.h> 26 #include <linux/fs.h> ··· 32 #include <linux/blkdev.h> 33 #include <linux/interrupt.h> 34 #include <linux/device.h> 35 + #include <linux/smp_lock.h> 36 #include <linux/wait.h> 37 #include <linux/workqueue.h> 38 #include <linux/platform_device.h> ··· 339 tocuse = 0; 340 err = gdrom_readtoc_cmd(gd.toc, 0); 341 if (err) { 342 + pr_info("Could not get CD table of contents\n"); 343 return -ENXIO; 344 } 345 } ··· 357 } while (track >= fentry); 358 359 if ((track > 100) || (track < get_entry_track(gd.toc->first))) { 360 + pr_info("No data on the last session of the CD\n"); 361 gdrom_getsense(NULL); 362 return -ENXIO; 363 } ··· 451 goto cleanup_sense; 452 insw(GDROM_DATA_REG, &sense, sense_command->buflen/2); 453 if (sense[1] & 40) { 454 + pr_info("Drive not ready - command aborted\n"); 455 goto cleanup_sense; 456 } 457 sense_key = sense[1] & 0x0F; 458 if (sense_key < ARRAY_SIZE(sense_texts)) 459 + pr_info("%s\n", sense_texts[sense_key].text); 460 else 461 + pr_err("Unknown sense key: %d\n", sense_key); 462 if (bufstring) /* return addional sense data */ 463 memcpy(bufstring, &sense[4], 2); 464 if (sense_key < 2) ··· 492 493 static int gdrom_bdops_open(struct block_device *bdev, fmode_t mode) 494 { 495 + int ret; 496 + lock_kernel(); 497 + ret = cdrom_open(gd.cd_info, bdev, mode); 498 + unlock_kernel(); 499 + return ret; 500 } 501 502 static int gdrom_bdops_release(struct gendisk *disk, fmode_t mode) 503 { 504 + lock_kernel(); 505 cdrom_release(gd.cd_info, mode); 506 + unlock_kernel(); 507 return 0; 508 } 509 ··· 509 static int gdrom_bdops_ioctl(struct block_device *bdev, fmode_t mode, 510 unsigned cmd, unsigned long arg) 511 { 512 + int ret; 513 + 514 + lock_kernel(); 515 + ret = cdrom_ioctl(gd.cd_info, bdev, mode, cmd, arg); 516 + unlock_kernel(); 517 + 518 + return ret; 519 } 520 521 static const struct block_device_operations gdrom_bdops = { ··· 517 .open = gdrom_bdops_open, 518 .release = gdrom_bdops_release, 519 .media_changed = gdrom_bdops_mediachanged, 520 + .ioctl = gdrom_bdops_ioctl, 521 }; 522 523 static irqreturn_t gdrom_command_interrupt(int irq, void *dev_id) ··· 643 struct request *req; 644 645 while ((req = blk_fetch_request(rq)) != NULL) { 646 + if (req->cmd_type != REQ_TYPE_FS) { 647 + printk(KERN_DEBUG "gdrom: Non-fs request ignored\n"); 648 __blk_end_request_all(req, -EIO); 649 continue; 650 } 651 if (rq_data_dir(req) != READ) { 652 + pr_notice("Read only device - write request ignored\n"); 653 __blk_end_request_all(req, -EIO); 654 continue; 655 } ··· 685 firmw_ver = kstrndup(id->firmver, 16, GFP_KERNEL); 686 if (!firmw_ver) 687 goto free_manuf_name; 688 + pr_info("%s from %s with firmware %s\n", 689 model_name, manuf_name, firmw_ver); 690 err = 0; 691 kfree(firmw_ver); ··· 757 int err; 758 /* Start the device */ 759 if (gdrom_execute_diagnostic() != 1) { 760 + pr_warning("ATA Probe for GDROM failed\n"); 761 return -ENODEV; 762 } 763 /* Print out firmware ID */ ··· 767 gdrom_major = register_blkdev(0, GDROM_DEV_NAME); 768 if (gdrom_major <= 0) 769 return gdrom_major; 770 + pr_info("Registered with major number %d\n", 771 gdrom_major); 772 /* Specify basic properties of drive */ 773 gd.cd_info = kzalloc(sizeof(struct cdrom_device_info), GFP_KERNEL); ··· 818 unregister_blkdev(gdrom_major, GDROM_DEV_NAME); 819 gdrom_major = 0; 820 probe_fail_no_mem: 821 + pr_warning("Probe failed - error is 0x%X\n", err); 822 return err; 823 } 824
+53 -53
drivers/cdrom/viocd.c
··· 31 * the OS/400 partition. 32 */ 33 34 #include <linux/major.h> 35 #include <linux/blkdev.h> 36 #include <linux/cdrom.h> ··· 42 #include <linux/module.h> 43 #include <linux/completion.h> 44 #include <linux/proc_fs.h> 45 #include <linux/seq_file.h> 46 #include <linux/scatterlist.h> 47 ··· 55 #define VIOCD_DEVICE "iseries/vcd" 56 57 #define VIOCD_VERS "1.06" 58 - 59 - #define VIOCD_KERN_WARNING KERN_WARNING "viocd: " 60 - #define VIOCD_KERN_INFO KERN_INFO "viocd: " 61 62 /* 63 * Should probably make this a module parameter....sigh ··· 154 static int viocd_blk_open(struct block_device *bdev, fmode_t mode) 155 { 156 struct disk_info *di = bdev->bd_disk->private_data; 157 - return cdrom_open(&di->viocd_info, bdev, mode); 158 } 159 160 static int viocd_blk_release(struct gendisk *disk, fmode_t mode) 161 { 162 struct disk_info *di = disk->private_data; 163 cdrom_release(&di->viocd_info, mode); 164 return 0; 165 } 166 ··· 176 unsigned cmd, unsigned long arg) 177 { 178 struct disk_info *di = bdev->bd_disk->private_data; 179 - return cdrom_ioctl(&di->viocd_info, bdev, mode, cmd, arg); 180 } 181 182 static int viocd_blk_media_changed(struct gendisk *disk) ··· 195 .owner = THIS_MODULE, 196 .open = viocd_blk_open, 197 .release = viocd_blk_release, 198 - .locked_ioctl = viocd_blk_ioctl, 199 .media_changed = viocd_blk_media_changed, 200 }; 201 ··· 216 (u64)&we, VIOVERSION << 16, ((u64)device_no << 48), 217 0, 0, 0); 218 if (hvrc != 0) { 219 - printk(VIOCD_KERN_WARNING 220 - "bad rc on HvCallEvent_signalLpEventFast %d\n", 221 - (int)hvrc); 222 return -EIO; 223 } 224 ··· 226 if (we.rc) { 227 const struct vio_error_entry *err = 228 vio_lookup_rc(viocd_err_table, we.sub_result); 229 - printk(VIOCD_KERN_WARNING "bad rc %d:0x%04X on open: %s\n", 230 - we.rc, we.sub_result, err->msg); 231 return -err->errno; 232 } 233 ··· 247 viopath_targetinst(viopath_hostLp), 0, 248 VIOVERSION << 16, ((u64)device_no << 48), 0, 0, 0); 249 if (hvrc != 0) 250 - printk(VIOCD_KERN_WARNING 251 - "bad rc on HvCallEvent_signalLpEventFast %d\n", 252 - (int)hvrc); 253 } 254 255 /* Send a read or write request to OS/400 */ ··· 274 275 sg_init_table(&sg, 1); 276 if (blk_rq_map_sg(req->q, req, &sg) == 0) { 277 - printk(VIOCD_KERN_WARNING 278 - "error setting up scatter/gather list\n"); 279 return -1; 280 } 281 282 if (dma_map_sg(diskinfo->dev, &sg, 1, direction) == 0) { 283 - printk(VIOCD_KERN_WARNING "error allocating sg tce\n"); 284 return -1; 285 } 286 dmaaddr = sg_dma_address(&sg); ··· 295 ((u64)DEVICE_NR(diskinfo) << 48) | dmaaddr, 296 (u64)blk_rq_pos(req) * 512, len, 0); 297 if (hvrc != HvLpEvent_Rc_Good) { 298 - printk(VIOCD_KERN_WARNING "hv error on op %d\n", (int)hvrc); 299 return -1; 300 } 301 ··· 309 struct request *req; 310 311 while ((rwreq == 0) && ((req = blk_fetch_request(q)) != NULL)) { 312 - if (!blk_fs_request(req)) 313 __blk_end_request_all(req, -EIO); 314 else if (send_request(req) < 0) { 315 - printk(VIOCD_KERN_WARNING 316 - "unable to send message to OS/400!"); 317 __blk_end_request_all(req, -EIO); 318 } else 319 rwreq++; ··· 337 (u64)&we, VIOVERSION << 16, ((u64)device_no << 48), 338 0, 0, 0); 339 if (hvrc != 0) { 340 - printk(VIOCD_KERN_WARNING "bad rc on HvCallEvent_signalLpEventFast %d\n", 341 - (int)hvrc); 342 return -EIO; 343 } 344 ··· 348 if (we.rc) { 349 const struct vio_error_entry *err = 350 vio_lookup_rc(viocd_err_table, we.sub_result); 351 - printk(VIOCD_KERN_WARNING 352 - "bad rc %d:0x%04X on check_change: %s; Assuming no change\n", 353 - we.rc, we.sub_result, err->msg); 354 return 0; 355 } 356 ··· 376 (u64)&we, VIOVERSION << 16, 377 (device_no << 48) | (flags << 32), 0, 0, 0); 378 if (hvrc != 0) { 379 - printk(VIOCD_KERN_WARNING "bad rc on HvCallEvent_signalLpEventFast %d\n", 380 - (int)hvrc); 381 return -EIO; 382 } 383 ··· 464 return; 465 /* First, we should NEVER get an int here...only acks */ 466 if (hvlpevent_is_int(event)) { 467 - printk(VIOCD_KERN_WARNING 468 - "Yikes! got an int in viocd event handler!\n"); 469 if (hvlpevent_need_ack(event)) { 470 event->xRc = HvLpEvent_Rc_InvalidSubtype; 471 HvCallEvent_ackLpEvent(event); ··· 518 const struct vio_error_entry *err = 519 vio_lookup_rc(viocd_err_table, 520 bevent->sub_result); 521 - printk(VIOCD_KERN_WARNING "request %p failed " 522 - "with rc %d:0x%04X: %s\n", 523 - req, event->xRc, 524 - bevent->sub_result, err->msg); 525 __blk_end_request_all(req, -EIO); 526 } else 527 __blk_end_request_all(req, 0); ··· 531 break; 532 533 default: 534 - printk(VIOCD_KERN_WARNING 535 - "message with invalid subtype %0x04X!\n", 536 - event->xSubtype & VIOMINOR_SUBTYPE_MASK); 537 if (hvlpevent_need_ack(event)) { 538 event->xRc = HvLpEvent_Rc_InvalidSubtype; 539 HvCallEvent_ackLpEvent(event); ··· 599 sprintf(c->name, VIOCD_DEVICE "%c", 'a' + deviceno); 600 601 if (register_cdrom(c) != 0) { 602 - printk(VIOCD_KERN_WARNING "Cannot register viocd CD-ROM %s!\n", 603 - c->name); 604 goto out; 605 } 606 - printk(VIOCD_KERN_INFO "cd %s is iSeries resource %10.10s " 607 - "type %4.4s, model %3.3s\n", 608 - c->name, d->rsrcname, d->type, d->model); 609 q = blk_init_queue(do_viocd_request, &viocd_reqlock); 610 if (q == NULL) { 611 - printk(VIOCD_KERN_WARNING "Cannot allocate queue for %s!\n", 612 - c->name); 613 goto out_unregister_cdrom; 614 } 615 gendisk = alloc_disk(1); 616 if (gendisk == NULL) { 617 - printk(VIOCD_KERN_WARNING "Cannot create gendisk for %s!\n", 618 - c->name); 619 goto out_cleanup_queue; 620 } 621 gendisk->major = VIOCD_MAJOR; ··· 684 return -ENODEV; 685 } 686 687 - printk(VIOCD_KERN_INFO "vers " VIOCD_VERS ", hosting partition %d\n", 688 - viopath_hostLp); 689 690 if (register_blkdev(VIOCD_MAJOR, VIOCD_DEVICE) != 0) { 691 - printk(VIOCD_KERN_WARNING "Unable to get major %d for %s\n", 692 - VIOCD_MAJOR, VIOCD_DEVICE); 693 return -EIO; 694 } 695 696 ret = viopath_open(viopath_hostLp, viomajorsubtype_cdio, 697 MAX_CD_REQ + 2); 698 if (ret) { 699 - printk(VIOCD_KERN_WARNING 700 - "error opening path to host partition %d\n", 701 - viopath_hostLp); 702 goto out_unregister; 703 } 704
··· 31 * the OS/400 partition. 32 */ 33 34 + #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 35 + 36 #include <linux/major.h> 37 #include <linux/blkdev.h> 38 #include <linux/cdrom.h> ··· 40 #include <linux/module.h> 41 #include <linux/completion.h> 42 #include <linux/proc_fs.h> 43 + #include <linux/smp_lock.h> 44 #include <linux/seq_file.h> 45 #include <linux/scatterlist.h> 46 ··· 52 #define VIOCD_DEVICE "iseries/vcd" 53 54 #define VIOCD_VERS "1.06" 55 56 /* 57 * Should probably make this a module parameter....sigh ··· 154 static int viocd_blk_open(struct block_device *bdev, fmode_t mode) 155 { 156 struct disk_info *di = bdev->bd_disk->private_data; 157 + int ret; 158 + 159 + lock_kernel(); 160 + ret = cdrom_open(&di->viocd_info, bdev, mode); 161 + unlock_kernel(); 162 + 163 + return ret; 164 } 165 166 static int viocd_blk_release(struct gendisk *disk, fmode_t mode) 167 { 168 struct disk_info *di = disk->private_data; 169 + lock_kernel(); 170 cdrom_release(&di->viocd_info, mode); 171 + unlock_kernel(); 172 return 0; 173 } 174 ··· 168 unsigned cmd, unsigned long arg) 169 { 170 struct disk_info *di = bdev->bd_disk->private_data; 171 + int ret; 172 + 173 + lock_kernel(); 174 + ret = cdrom_ioctl(&di->viocd_info, bdev, mode, cmd, arg); 175 + unlock_kernel(); 176 + 177 + return ret; 178 } 179 180 static int viocd_blk_media_changed(struct gendisk *disk) ··· 181 .owner = THIS_MODULE, 182 .open = viocd_blk_open, 183 .release = viocd_blk_release, 184 + .ioctl = viocd_blk_ioctl, 185 .media_changed = viocd_blk_media_changed, 186 }; 187 ··· 202 (u64)&we, VIOVERSION << 16, ((u64)device_no << 48), 203 0, 0, 0); 204 if (hvrc != 0) { 205 + pr_warning("bad rc on HvCallEvent_signalLpEventFast %d\n", 206 + (int)hvrc); 207 return -EIO; 208 } 209 ··· 213 if (we.rc) { 214 const struct vio_error_entry *err = 215 vio_lookup_rc(viocd_err_table, we.sub_result); 216 + pr_warning("bad rc %d:0x%04X on open: %s\n", 217 + we.rc, we.sub_result, err->msg); 218 return -err->errno; 219 } 220 ··· 234 viopath_targetinst(viopath_hostLp), 0, 235 VIOVERSION << 16, ((u64)device_no << 48), 0, 0, 0); 236 if (hvrc != 0) 237 + pr_warning("bad rc on HvCallEvent_signalLpEventFast %d\n", 238 + (int)hvrc); 239 } 240 241 /* Send a read or write request to OS/400 */ ··· 262 263 sg_init_table(&sg, 1); 264 if (blk_rq_map_sg(req->q, req, &sg) == 0) { 265 + pr_warning("error setting up scatter/gather list\n"); 266 return -1; 267 } 268 269 if (dma_map_sg(diskinfo->dev, &sg, 1, direction) == 0) { 270 + pr_warning("error allocating sg tce\n"); 271 return -1; 272 } 273 dmaaddr = sg_dma_address(&sg); ··· 284 ((u64)DEVICE_NR(diskinfo) << 48) | dmaaddr, 285 (u64)blk_rq_pos(req) * 512, len, 0); 286 if (hvrc != HvLpEvent_Rc_Good) { 287 + pr_warning("hv error on op %d\n", (int)hvrc); 288 return -1; 289 } 290 ··· 298 struct request *req; 299 300 while ((rwreq == 0) && ((req = blk_fetch_request(q)) != NULL)) { 301 + if (req->cmd_type != REQ_TYPE_FS) 302 __blk_end_request_all(req, -EIO); 303 else if (send_request(req) < 0) { 304 + pr_warning("unable to send message to OS/400!\n"); 305 __blk_end_request_all(req, -EIO); 306 } else 307 rwreq++; ··· 327 (u64)&we, VIOVERSION << 16, ((u64)device_no << 48), 328 0, 0, 0); 329 if (hvrc != 0) { 330 + pr_warning("bad rc on HvCallEvent_signalLpEventFast %d\n", 331 + (int)hvrc); 332 return -EIO; 333 } 334 ··· 338 if (we.rc) { 339 const struct vio_error_entry *err = 340 vio_lookup_rc(viocd_err_table, we.sub_result); 341 + pr_warning("bad rc %d:0x%04X on check_change: %s; Assuming no change\n", 342 + we.rc, we.sub_result, err->msg); 343 return 0; 344 } 345 ··· 367 (u64)&we, VIOVERSION << 16, 368 (device_no << 48) | (flags << 32), 0, 0, 0); 369 if (hvrc != 0) { 370 + pr_warning("bad rc on HvCallEvent_signalLpEventFast %d\n", 371 + (int)hvrc); 372 return -EIO; 373 } 374 ··· 455 return; 456 /* First, we should NEVER get an int here...only acks */ 457 if (hvlpevent_is_int(event)) { 458 + pr_warning("Yikes! got an int in viocd event handler!\n"); 459 if (hvlpevent_need_ack(event)) { 460 event->xRc = HvLpEvent_Rc_InvalidSubtype; 461 HvCallEvent_ackLpEvent(event); ··· 510 const struct vio_error_entry *err = 511 vio_lookup_rc(viocd_err_table, 512 bevent->sub_result); 513 + pr_warning("request %p failed with rc %d:0x%04X: %s\n", 514 + req, event->xRc, 515 + bevent->sub_result, err->msg); 516 __blk_end_request_all(req, -EIO); 517 } else 518 __blk_end_request_all(req, 0); ··· 524 break; 525 526 default: 527 + pr_warning("message with invalid subtype %0x04X!\n", 528 + event->xSubtype & VIOMINOR_SUBTYPE_MASK); 529 if (hvlpevent_need_ack(event)) { 530 event->xRc = HvLpEvent_Rc_InvalidSubtype; 531 HvCallEvent_ackLpEvent(event); ··· 593 sprintf(c->name, VIOCD_DEVICE "%c", 'a' + deviceno); 594 595 if (register_cdrom(c) != 0) { 596 + pr_warning("Cannot register viocd CD-ROM %s!\n", c->name); 597 goto out; 598 } 599 + pr_info("cd %s is iSeries resource %10.10s type %4.4s, model %3.3s\n", 600 + c->name, d->rsrcname, d->type, d->model); 601 q = blk_init_queue(do_viocd_request, &viocd_reqlock); 602 if (q == NULL) { 603 + pr_warning("Cannot allocate queue for %s!\n", c->name); 604 goto out_unregister_cdrom; 605 } 606 gendisk = alloc_disk(1); 607 if (gendisk == NULL) { 608 + pr_warning("Cannot create gendisk for %s!\n", c->name); 609 goto out_cleanup_queue; 610 } 611 gendisk->major = VIOCD_MAJOR; ··· 682 return -ENODEV; 683 } 684 685 + pr_info("vers " VIOCD_VERS ", hosting partition %d\n", viopath_hostLp); 686 687 if (register_blkdev(VIOCD_MAJOR, VIOCD_DEVICE) != 0) { 688 + pr_warning("Unable to get major %d for %s\n", 689 + VIOCD_MAJOR, VIOCD_DEVICE); 690 return -EIO; 691 } 692 693 ret = viopath_open(viopath_hostLp, viomajorsubtype_cdio, 694 MAX_CD_REQ + 2); 695 if (ret) { 696 + pr_warning("error opening path to host partition %d\n", 697 + viopath_hostLp); 698 goto out_unregister; 699 } 700
+10 -7
drivers/ide/ide-atapi.c
··· 190 191 BUG_ON(sense_len > sizeof(*sense)); 192 193 - if (blk_sense_request(rq) || drive->sense_rq_armed) 194 return; 195 196 memset(sense, 0, sizeof(*sense)); ··· 307 308 int ide_cd_get_xferlen(struct request *rq) 309 { 310 - if (blk_fs_request(rq)) 311 return 32768; 312 - else if (blk_sense_request(rq) || blk_pc_request(rq) || 313 - rq->cmd_type == REQ_TYPE_ATA_PC) 314 return blk_rq_bytes(rq); 315 - else 316 return 0; 317 } 318 EXPORT_SYMBOL_GPL(ide_cd_get_xferlen); 319 ··· 477 if (uptodate == 0) 478 drive->failed_pc = NULL; 479 480 - if (blk_special_request(rq)) { 481 rq->errors = 0; 482 error = 0; 483 } else { 484 485 - if (blk_fs_request(rq) == 0 && uptodate <= 0) { 486 if (rq->errors == 0) 487 rq->errors = -EIO; 488 }
··· 190 191 BUG_ON(sense_len > sizeof(*sense)); 192 193 + if (rq->cmd_type == REQ_TYPE_SENSE || drive->sense_rq_armed) 194 return; 195 196 memset(sense, 0, sizeof(*sense)); ··· 307 308 int ide_cd_get_xferlen(struct request *rq) 309 { 310 + switch (rq->cmd_type) { 311 + case REQ_TYPE_FS: 312 return 32768; 313 + case REQ_TYPE_SENSE: 314 + case REQ_TYPE_BLOCK_PC: 315 + case REQ_TYPE_ATA_PC: 316 return blk_rq_bytes(rq); 317 + default: 318 return 0; 319 + } 320 } 321 EXPORT_SYMBOL_GPL(ide_cd_get_xferlen); 322 ··· 474 if (uptodate == 0) 475 drive->failed_pc = NULL; 476 477 + if (rq->cmd_type == REQ_TYPE_SPECIAL) { 478 rq->errors = 0; 479 error = 0; 480 } else { 481 482 + if (rq->cmd_type != REQ_TYPE_FS && uptodate <= 0) { 483 if (rq->errors == 0) 484 rq->errors = -EIO; 485 }
+61 -37
drivers/ide/ide-cd.c
··· 31 #include <linux/delay.h> 32 #include <linux/timer.h> 33 #include <linux/seq_file.h> 34 #include <linux/slab.h> 35 #include <linux/interrupt.h> 36 #include <linux/errno.h> ··· 177 if (!sense->valid) 178 break; 179 if (failed_command == NULL || 180 - !blk_fs_request(failed_command)) 181 break; 182 sector = (sense->information[0] << 24) | 183 (sense->information[1] << 16) | ··· 293 "stat 0x%x", 294 rq->cmd[0], rq->cmd_type, err, stat); 295 296 - if (blk_sense_request(rq)) { 297 /* 298 * We got an error trying to get sense info from the drive 299 * (probably while trying to recover from a former error). ··· 304 } 305 306 /* if we have an error, pass CHECK_CONDITION as the SCSI status byte */ 307 - if (blk_pc_request(rq) && !rq->errors) 308 rq->errors = SAM_STAT_CHECK_CONDITION; 309 310 if (blk_noretry_request(rq)) ··· 312 313 switch (sense_key) { 314 case NOT_READY: 315 - if (blk_fs_request(rq) && rq_data_dir(rq) == WRITE) { 316 if (ide_cd_breathe(drive, rq)) 317 return 1; 318 } else { 319 cdrom_saw_media_change(drive); 320 321 - if (blk_fs_request(rq) && !blk_rq_quiet(rq)) 322 printk(KERN_ERR PFX "%s: tray open\n", 323 drive->name); 324 } ··· 328 case UNIT_ATTENTION: 329 cdrom_saw_media_change(drive); 330 331 - if (blk_fs_request(rq) == 0) 332 return 0; 333 334 /* ··· 354 * No point in retrying after an illegal request or data 355 * protect error. 356 */ 357 - if (!blk_rq_quiet(rq)) 358 ide_dump_status(drive, "command error", stat); 359 do_end_request = 1; 360 break; ··· 363 * No point in re-trying a zillion times on a bad sector. 364 * If we got here the error is not correctable. 365 */ 366 - if (!blk_rq_quiet(rq)) 367 ide_dump_status(drive, "media error " 368 "(bad sector)", stat); 369 do_end_request = 1; 370 break; 371 case BLANK_CHECK: 372 /* disk appears blank? */ 373 - if (!blk_rq_quiet(rq)) 374 ide_dump_status(drive, "media error (blank)", 375 stat); 376 do_end_request = 1; 377 break; 378 default: 379 - if (blk_fs_request(rq) == 0) 380 break; 381 if (err & ~ATA_ABORTED) { 382 /* go to the default handler for other errors */ ··· 387 do_end_request = 1; 388 } 389 390 - if (blk_fs_request(rq) == 0) { 391 rq->cmd_flags |= REQ_FAILED; 392 do_end_request = 1; 393 } ··· 534 ide_expiry_t *expiry = NULL; 535 int dma_error = 0, dma, thislen, uptodate = 0; 536 int write = (rq_data_dir(rq) == WRITE) ? 1 : 0, rc = 0; 537 - int sense = blk_sense_request(rq); 538 unsigned int timeout; 539 u16 len; 540 u8 ireason, stat; ··· 577 578 ide_read_bcount_and_ireason(drive, &len, &ireason); 579 580 - thislen = blk_fs_request(rq) ? len : cmd->nleft; 581 if (thislen > len) 582 thislen = len; 583 ··· 586 587 /* If DRQ is clear, the command has completed. */ 588 if ((stat & ATA_DRQ) == 0) { 589 - if (blk_fs_request(rq)) { 590 /* 591 * If we're not done reading/writing, complain. 592 * Otherwise, complete the command normally. ··· 600 rq->cmd_flags |= REQ_FAILED; 601 uptodate = 0; 602 } 603 - } else if (!blk_pc_request(rq)) { 604 ide_cd_request_sense_fixup(drive, cmd); 605 606 uptodate = cmd->nleft ? 0 : 1; ··· 649 650 /* pad, if necessary */ 651 if (len > 0) { 652 - if (blk_fs_request(rq) == 0 || write == 0) 653 ide_pad_transfer(drive, write, len); 654 else { 655 printk(KERN_ERR PFX "%s: confused, missing data\n", ··· 658 } 659 } 660 661 - if (blk_pc_request(rq)) { 662 timeout = rq->timeout; 663 } else { 664 timeout = ATAPI_WAIT_PC; 665 - if (!blk_fs_request(rq)) 666 expiry = ide_cd_expiry; 667 } 668 ··· 671 return ide_started; 672 673 out_end: 674 - if (blk_pc_request(rq) && rc == 0) { 675 rq->resid_len = 0; 676 blk_end_request_all(rq, 0); 677 hwif->rq = NULL; ··· 679 if (sense && uptodate) 680 ide_cd_complete_failed_rq(drive, rq); 681 682 - if (blk_fs_request(rq)) { 683 if (cmd->nleft == 0) 684 uptodate = 1; 685 } else { ··· 692 return ide_stopped; 693 694 /* make sure it's fully ended */ 695 - if (blk_fs_request(rq) == 0) { 696 rq->resid_len -= cmd->nbytes - cmd->nleft; 697 if (uptodate == 0 && (cmd->tf_flags & IDE_TFLAG_WRITE)) 698 rq->resid_len += cmd->last_xfer_len; ··· 752 ide_debug_log(IDE_DBG_PC, "rq->cmd[0]: 0x%x, rq->cmd_type: 0x%x", 753 rq->cmd[0], rq->cmd_type); 754 755 - if (blk_pc_request(rq)) 756 rq->cmd_flags |= REQ_QUIET; 757 else 758 rq->cmd_flags &= ~REQ_FAILED; ··· 793 if (drive->debug_mask & IDE_DBG_RQ) 794 blk_dump_rq_flags(rq, "ide_cd_do_request"); 795 796 - if (blk_fs_request(rq)) { 797 if (cdrom_start_rw(drive, rq) == ide_stopped) 798 goto out_end; 799 - } else if (blk_sense_request(rq) || blk_pc_request(rq) || 800 - rq->cmd_type == REQ_TYPE_ATA_PC) { 801 if (!rq->timeout) 802 rq->timeout = ATAPI_WAIT_PC; 803 804 cdrom_do_block_pc(drive, rq); 805 - } else if (blk_special_request(rq)) { 806 /* right now this can only be a reset... */ 807 uptodate = 1; 808 goto out_end; 809 - } else 810 BUG(); 811 812 /* prepare sense request for this command */ 813 ide_prep_sense(drive, rq); ··· 824 825 cmd.rq = rq; 826 827 - if (blk_fs_request(rq) || blk_rq_bytes(rq)) { 828 ide_init_sg_cmd(&cmd, blk_rq_bytes(rq)); 829 ide_map_sg(drive, &cmd); 830 } ··· 1380 1381 static int ide_cdrom_prep_fn(struct request_queue *q, struct request *rq) 1382 { 1383 - if (blk_fs_request(rq)) 1384 return ide_cdrom_prep_fs(q, rq); 1385 - else if (blk_pc_request(rq)) 1386 return ide_cdrom_prep_pc(rq); 1387 1388 return 0; ··· 1599 1600 static int idecd_open(struct block_device *bdev, fmode_t mode) 1601 { 1602 - struct cdrom_info *info = ide_cd_get(bdev->bd_disk); 1603 - int rc = -ENOMEM; 1604 1605 if (!info) 1606 - return -ENXIO; 1607 1608 rc = cdrom_open(&info->devinfo, bdev, mode); 1609 - 1610 if (rc < 0) 1611 ide_cd_put(info); 1612 - 1613 return rc; 1614 } 1615 ··· 1619 { 1620 struct cdrom_info *info = ide_drv_g(disk, cdrom_info); 1621 1622 cdrom_release(&info->devinfo, mode); 1623 1624 ide_cd_put(info); 1625 1626 return 0; 1627 } ··· 1667 return 0; 1668 } 1669 1670 - static int idecd_ioctl(struct block_device *bdev, fmode_t mode, 1671 unsigned int cmd, unsigned long arg) 1672 { 1673 struct cdrom_info *info = ide_drv_g(bdev->bd_disk, cdrom_info); ··· 1689 return err; 1690 } 1691 1692 static int idecd_media_changed(struct gendisk *disk) 1693 { 1694 struct cdrom_info *info = ide_drv_g(disk, cdrom_info); ··· 1722 .owner = THIS_MODULE, 1723 .open = idecd_open, 1724 .release = idecd_release, 1725 - .locked_ioctl = idecd_ioctl, 1726 .media_changed = idecd_media_changed, 1727 .revalidate_disk = idecd_revalidate_disk 1728 };
··· 31 #include <linux/delay.h> 32 #include <linux/timer.h> 33 #include <linux/seq_file.h> 34 + #include <linux/smp_lock.h> 35 #include <linux/slab.h> 36 #include <linux/interrupt.h> 37 #include <linux/errno.h> ··· 176 if (!sense->valid) 177 break; 178 if (failed_command == NULL || 179 + failed_command->cmd_type != REQ_TYPE_FS) 180 break; 181 sector = (sense->information[0] << 24) | 182 (sense->information[1] << 16) | ··· 292 "stat 0x%x", 293 rq->cmd[0], rq->cmd_type, err, stat); 294 295 + if (rq->cmd_type == REQ_TYPE_SENSE) { 296 /* 297 * We got an error trying to get sense info from the drive 298 * (probably while trying to recover from a former error). ··· 303 } 304 305 /* if we have an error, pass CHECK_CONDITION as the SCSI status byte */ 306 + if (rq->cmd_type == REQ_TYPE_BLOCK_PC && !rq->errors) 307 rq->errors = SAM_STAT_CHECK_CONDITION; 308 309 if (blk_noretry_request(rq)) ··· 311 312 switch (sense_key) { 313 case NOT_READY: 314 + if (rq->cmd_type == REQ_TYPE_FS && rq_data_dir(rq) == WRITE) { 315 if (ide_cd_breathe(drive, rq)) 316 return 1; 317 } else { 318 cdrom_saw_media_change(drive); 319 320 + if (rq->cmd_type == REQ_TYPE_FS && 321 + !(rq->cmd_flags & REQ_QUIET)) 322 printk(KERN_ERR PFX "%s: tray open\n", 323 drive->name); 324 } ··· 326 case UNIT_ATTENTION: 327 cdrom_saw_media_change(drive); 328 329 + if (rq->cmd_type != REQ_TYPE_FS) 330 return 0; 331 332 /* ··· 352 * No point in retrying after an illegal request or data 353 * protect error. 354 */ 355 + if (!(rq->cmd_flags & REQ_QUIET)) 356 ide_dump_status(drive, "command error", stat); 357 do_end_request = 1; 358 break; ··· 361 * No point in re-trying a zillion times on a bad sector. 362 * If we got here the error is not correctable. 363 */ 364 + if (!(rq->cmd_flags & REQ_QUIET)) 365 ide_dump_status(drive, "media error " 366 "(bad sector)", stat); 367 do_end_request = 1; 368 break; 369 case BLANK_CHECK: 370 /* disk appears blank? */ 371 + if (!(rq->cmd_flags & REQ_QUIET)) 372 ide_dump_status(drive, "media error (blank)", 373 stat); 374 do_end_request = 1; 375 break; 376 default: 377 + if (rq->cmd_type != REQ_TYPE_FS) 378 break; 379 if (err & ~ATA_ABORTED) { 380 /* go to the default handler for other errors */ ··· 385 do_end_request = 1; 386 } 387 388 + if (rq->cmd_type != REQ_TYPE_FS) { 389 rq->cmd_flags |= REQ_FAILED; 390 do_end_request = 1; 391 } ··· 532 ide_expiry_t *expiry = NULL; 533 int dma_error = 0, dma, thislen, uptodate = 0; 534 int write = (rq_data_dir(rq) == WRITE) ? 1 : 0, rc = 0; 535 + int sense = (rq->cmd_type == REQ_TYPE_SENSE); 536 unsigned int timeout; 537 u16 len; 538 u8 ireason, stat; ··· 575 576 ide_read_bcount_and_ireason(drive, &len, &ireason); 577 578 + thislen = (rq->cmd_type == REQ_TYPE_FS) ? len : cmd->nleft; 579 if (thislen > len) 580 thislen = len; 581 ··· 584 585 /* If DRQ is clear, the command has completed. */ 586 if ((stat & ATA_DRQ) == 0) { 587 + if (rq->cmd_type == REQ_TYPE_FS) { 588 /* 589 * If we're not done reading/writing, complain. 590 * Otherwise, complete the command normally. ··· 598 rq->cmd_flags |= REQ_FAILED; 599 uptodate = 0; 600 } 601 + } else if (rq->cmd_type != REQ_TYPE_BLOCK_PC) { 602 ide_cd_request_sense_fixup(drive, cmd); 603 604 uptodate = cmd->nleft ? 0 : 1; ··· 647 648 /* pad, if necessary */ 649 if (len > 0) { 650 + if (rq->cmd_type != REQ_TYPE_FS || write == 0) 651 ide_pad_transfer(drive, write, len); 652 else { 653 printk(KERN_ERR PFX "%s: confused, missing data\n", ··· 656 } 657 } 658 659 + if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { 660 timeout = rq->timeout; 661 } else { 662 timeout = ATAPI_WAIT_PC; 663 + if (rq->cmd_type != REQ_TYPE_FS) 664 expiry = ide_cd_expiry; 665 } 666 ··· 669 return ide_started; 670 671 out_end: 672 + if (rq->cmd_type == REQ_TYPE_BLOCK_PC && rc == 0) { 673 rq->resid_len = 0; 674 blk_end_request_all(rq, 0); 675 hwif->rq = NULL; ··· 677 if (sense && uptodate) 678 ide_cd_complete_failed_rq(drive, rq); 679 680 + if (rq->cmd_type == REQ_TYPE_FS) { 681 if (cmd->nleft == 0) 682 uptodate = 1; 683 } else { ··· 690 return ide_stopped; 691 692 /* make sure it's fully ended */ 693 + if (rq->cmd_type != REQ_TYPE_FS) { 694 rq->resid_len -= cmd->nbytes - cmd->nleft; 695 if (uptodate == 0 && (cmd->tf_flags & IDE_TFLAG_WRITE)) 696 rq->resid_len += cmd->last_xfer_len; ··· 750 ide_debug_log(IDE_DBG_PC, "rq->cmd[0]: 0x%x, rq->cmd_type: 0x%x", 751 rq->cmd[0], rq->cmd_type); 752 753 + if (rq->cmd_type == REQ_TYPE_BLOCK_PC) 754 rq->cmd_flags |= REQ_QUIET; 755 else 756 rq->cmd_flags &= ~REQ_FAILED; ··· 791 if (drive->debug_mask & IDE_DBG_RQ) 792 blk_dump_rq_flags(rq, "ide_cd_do_request"); 793 794 + switch (rq->cmd_type) { 795 + case REQ_TYPE_FS: 796 if (cdrom_start_rw(drive, rq) == ide_stopped) 797 goto out_end; 798 + break; 799 + case REQ_TYPE_SENSE: 800 + case REQ_TYPE_BLOCK_PC: 801 + case REQ_TYPE_ATA_PC: 802 if (!rq->timeout) 803 rq->timeout = ATAPI_WAIT_PC; 804 805 cdrom_do_block_pc(drive, rq); 806 + break; 807 + case REQ_TYPE_SPECIAL: 808 /* right now this can only be a reset... */ 809 uptodate = 1; 810 goto out_end; 811 + default: 812 BUG(); 813 + } 814 815 /* prepare sense request for this command */ 816 ide_prep_sense(drive, rq); ··· 817 818 cmd.rq = rq; 819 820 + if (rq->cmd_type == REQ_TYPE_FS || blk_rq_bytes(rq)) { 821 ide_init_sg_cmd(&cmd, blk_rq_bytes(rq)); 822 ide_map_sg(drive, &cmd); 823 } ··· 1373 1374 static int ide_cdrom_prep_fn(struct request_queue *q, struct request *rq) 1375 { 1376 + if (rq->cmd_type == REQ_TYPE_FS) 1377 return ide_cdrom_prep_fs(q, rq); 1378 + else if (rq->cmd_type == REQ_TYPE_BLOCK_PC) 1379 return ide_cdrom_prep_pc(rq); 1380 1381 return 0; ··· 1592 1593 static int idecd_open(struct block_device *bdev, fmode_t mode) 1594 { 1595 + struct cdrom_info *info; 1596 + int rc = -ENXIO; 1597 1598 + lock_kernel(); 1599 + info = ide_cd_get(bdev->bd_disk); 1600 if (!info) 1601 + goto out; 1602 1603 rc = cdrom_open(&info->devinfo, bdev, mode); 1604 if (rc < 0) 1605 ide_cd_put(info); 1606 + out: 1607 + unlock_kernel(); 1608 return rc; 1609 } 1610 ··· 1610 { 1611 struct cdrom_info *info = ide_drv_g(disk, cdrom_info); 1612 1613 + lock_kernel(); 1614 cdrom_release(&info->devinfo, mode); 1615 1616 ide_cd_put(info); 1617 + unlock_kernel(); 1618 1619 return 0; 1620 } ··· 1656 return 0; 1657 } 1658 1659 + static int idecd_locked_ioctl(struct block_device *bdev, fmode_t mode, 1660 unsigned int cmd, unsigned long arg) 1661 { 1662 struct cdrom_info *info = ide_drv_g(bdev->bd_disk, cdrom_info); ··· 1678 return err; 1679 } 1680 1681 + static int idecd_ioctl(struct block_device *bdev, fmode_t mode, 1682 + unsigned int cmd, unsigned long arg) 1683 + { 1684 + int ret; 1685 + 1686 + lock_kernel(); 1687 + ret = idecd_locked_ioctl(bdev, mode, cmd, arg); 1688 + unlock_kernel(); 1689 + 1690 + return ret; 1691 + } 1692 + 1693 + 1694 static int idecd_media_changed(struct gendisk *disk) 1695 { 1696 struct cdrom_info *info = ide_drv_g(disk, cdrom_info); ··· 1698 .owner = THIS_MODULE, 1699 .open = idecd_open, 1700 .release = idecd_release, 1701 + .ioctl = idecd_ioctl, 1702 .media_changed = idecd_media_changed, 1703 .revalidate_disk = idecd_revalidate_disk 1704 };
+1 -1
drivers/ide/ide-cd_ioctl.c
··· 454 touch it at all. */ 455 456 if (cgc->data_direction == CGC_DATA_WRITE) 457 - flags |= REQ_RW; 458 459 if (cgc->sense) 460 memset(cgc->sense, 0, sizeof(struct request_sense));
··· 454 touch it at all. */ 455 456 if (cgc->data_direction == CGC_DATA_WRITE) 457 + flags |= REQ_WRITE; 458 459 if (cgc->sense) 460 memset(cgc->sense, 0, sizeof(struct request_sense));
+12 -6
drivers/ide/ide-disk.c
··· 184 ide_hwif_t *hwif = drive->hwif; 185 186 BUG_ON(drive->dev_flags & IDE_DFLAG_BLOCKED); 187 - BUG_ON(!blk_fs_request(rq)); 188 189 ledtrig_ide_activity(); 190 ··· 427 drive->dev_flags |= IDE_DFLAG_NOHPA; /* disable HPA on resume */ 428 } 429 430 - static void idedisk_prepare_flush(struct request_queue *q, struct request *rq) 431 { 432 ide_drive_t *drive = q->queuedata; 433 - struct ide_cmd *cmd = kmalloc(sizeof(*cmd), GFP_ATOMIC); 434 435 /* FIXME: map struct ide_taskfile on rq->cmd[] */ 436 BUG_ON(cmd == NULL); ··· 453 rq->cmd_type = REQ_TYPE_ATA_TASKFILE; 454 rq->special = cmd; 455 cmd->rq = rq; 456 } 457 458 ide_devset_get(multcount, mult_count); ··· 520 { 521 u16 *id = drive->id; 522 unsigned ordered = QUEUE_ORDERED_NONE; 523 - prepare_flush_fn *prep_fn = NULL; 524 525 if (drive->dev_flags & IDE_DFLAG_WCACHE) { 526 unsigned long long capacity; ··· 544 545 if (barrier) { 546 ordered = QUEUE_ORDERED_DRAIN_FLUSH; 547 - prep_fn = idedisk_prepare_flush; 548 } 549 } else 550 ordered = QUEUE_ORDERED_DRAIN; 551 552 - blk_queue_ordered(drive->queue, ordered, prep_fn); 553 } 554 555 ide_devset_get_flag(wcache, IDE_DFLAG_WCACHE);
··· 184 ide_hwif_t *hwif = drive->hwif; 185 186 BUG_ON(drive->dev_flags & IDE_DFLAG_BLOCKED); 187 + BUG_ON(rq->cmd_type != REQ_TYPE_FS); 188 189 ledtrig_ide_activity(); 190 ··· 427 drive->dev_flags |= IDE_DFLAG_NOHPA; /* disable HPA on resume */ 428 } 429 430 + static int idedisk_prep_fn(struct request_queue *q, struct request *rq) 431 { 432 ide_drive_t *drive = q->queuedata; 433 + struct ide_cmd *cmd; 434 + 435 + if (!(rq->cmd_flags & REQ_FLUSH)) 436 + return BLKPREP_OK; 437 + 438 + cmd = kmalloc(sizeof(*cmd), GFP_ATOMIC); 439 440 /* FIXME: map struct ide_taskfile on rq->cmd[] */ 441 BUG_ON(cmd == NULL); ··· 448 rq->cmd_type = REQ_TYPE_ATA_TASKFILE; 449 rq->special = cmd; 450 cmd->rq = rq; 451 + 452 + return BLKPREP_OK; 453 } 454 455 ide_devset_get(multcount, mult_count); ··· 513 { 514 u16 *id = drive->id; 515 unsigned ordered = QUEUE_ORDERED_NONE; 516 517 if (drive->dev_flags & IDE_DFLAG_WCACHE) { 518 unsigned long long capacity; ··· 538 539 if (barrier) { 540 ordered = QUEUE_ORDERED_DRAIN_FLUSH; 541 + blk_queue_prep_rq(drive->queue, idedisk_prep_fn); 542 } 543 } else 544 ordered = QUEUE_ORDERED_DRAIN; 545 546 + blk_queue_ordered(drive->queue, ordered); 547 } 548 549 ide_devset_get_flag(wcache, IDE_DFLAG_WCACHE);
+7 -2
drivers/ide/ide-disk_ioctl.c
··· 1 #include <linux/kernel.h> 2 #include <linux/ide.h> 3 #include <linux/hdreg.h> 4 5 #include "ide-disk.h" 6 ··· 19 { 20 int err; 21 22 err = ide_setting_ioctl(drive, bdev, cmd, arg, ide_disk_ioctl_settings); 23 if (err != -EOPNOTSUPP) 24 - return err; 25 26 - return generic_ide_ioctl(drive, bdev, cmd, arg); 27 }
··· 1 #include <linux/kernel.h> 2 #include <linux/ide.h> 3 #include <linux/hdreg.h> 4 + #include <linux/smp_lock.h> 5 6 #include "ide-disk.h" 7 ··· 18 { 19 int err; 20 21 + lock_kernel(); 22 err = ide_setting_ioctl(drive, bdev, cmd, arg, ide_disk_ioctl_settings); 23 if (err != -EOPNOTSUPP) 24 + goto out; 25 26 + err = generic_ide_ioctl(drive, bdev, cmd, arg); 27 + out: 28 + unlock_kernel(); 29 + return err; 30 }
+3 -2
drivers/ide/ide-eh.c
··· 122 return ide_stopped; 123 124 /* retry only "normal" I/O: */ 125 - if (!blk_fs_request(rq)) { 126 if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) { 127 struct ide_cmd *cmd = rq->special; 128 ··· 146 { 147 struct request *rq = drive->hwif->rq; 148 149 - if (rq && blk_special_request(rq) && rq->cmd[0] == REQ_DRIVE_RESET) { 150 if (err <= 0 && rq->errors == 0) 151 rq->errors = -EIO; 152 ide_complete_rq(drive, err ? err : 0, blk_rq_bytes(rq));
··· 122 return ide_stopped; 123 124 /* retry only "normal" I/O: */ 125 + if (rq->cmd_type != REQ_TYPE_FS) { 126 if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) { 127 struct ide_cmd *cmd = rq->special; 128 ··· 146 { 147 struct request *rq = drive->hwif->rq; 148 149 + if (rq && rq->cmd_type == REQ_TYPE_SPECIAL && 150 + rq->cmd[0] == REQ_DRIVE_RESET) { 151 if (err <= 0 && rq->errors == 0) 152 rq->errors = -EIO; 153 ide_complete_rq(drive, err ? err : 0, blk_rq_bytes(rq));
+17 -10
drivers/ide/ide-floppy.c
··· 73 drive->failed_pc = NULL; 74 75 if (pc->c[0] == GPCMD_READ_10 || pc->c[0] == GPCMD_WRITE_10 || 76 - (rq && blk_pc_request(rq))) 77 uptodate = 1; /* FIXME */ 78 else if (pc->c[0] == GPCMD_REQUEST_SENSE) { 79 ··· 98 "Aborting request!\n"); 99 } 100 101 - if (blk_special_request(rq)) 102 rq->errors = uptodate ? 0 : IDE_DRV_ERROR_GENERAL; 103 104 return uptodate; ··· 207 memcpy(rq->cmd, pc->c, 12); 208 209 pc->rq = rq; 210 - if (rq->cmd_flags & REQ_RW) 211 pc->flags |= PC_FLAG_WRITING; 212 213 pc->flags |= PC_FLAG_DMA_OK; ··· 247 } else 248 printk(KERN_ERR PFX "%s: I/O error\n", drive->name); 249 250 - if (blk_special_request(rq)) { 251 rq->errors = 0; 252 ide_complete_rq(drive, 0, blk_rq_bytes(rq)); 253 return ide_stopped; 254 } else 255 goto out_end; 256 } 257 - if (blk_fs_request(rq)) { 258 if (((long)blk_rq_pos(rq) % floppy->bs_factor) || 259 (blk_rq_sectors(rq) % floppy->bs_factor)) { 260 printk(KERN_ERR PFX "%s: unsupported r/w rq size\n", ··· 265 } 266 pc = &floppy->queued_pc; 267 idefloppy_create_rw_cmd(drive, pc, rq, (unsigned long)block); 268 - } else if (blk_special_request(rq) || blk_sense_request(rq)) { 269 pc = (struct ide_atapi_pc *)rq->special; 270 - } else if (blk_pc_request(rq)) { 271 pc = &floppy->queued_pc; 272 idefloppy_blockpc_cmd(floppy, pc, rq); 273 - } else 274 BUG(); 275 276 ide_prep_sense(drive, rq); 277 ··· 287 288 cmd.rq = rq; 289 290 - if (blk_fs_request(rq) || blk_rq_bytes(rq)) { 291 ide_init_sg_cmd(&cmd, blk_rq_bytes(rq)); 292 ide_map_sg(drive, &cmd); 293 } ··· 297 return ide_floppy_issue_pc(drive, &cmd, pc); 298 out_end: 299 drive->failed_pc = NULL; 300 - if (blk_fs_request(rq) == 0 && rq->errors == 0) 301 rq->errors = -EIO; 302 ide_complete_rq(drive, -EIO, blk_rq_bytes(rq)); 303 return ide_stopped;
··· 73 drive->failed_pc = NULL; 74 75 if (pc->c[0] == GPCMD_READ_10 || pc->c[0] == GPCMD_WRITE_10 || 76 + (rq && rq->cmd_type == REQ_TYPE_BLOCK_PC)) 77 uptodate = 1; /* FIXME */ 78 else if (pc->c[0] == GPCMD_REQUEST_SENSE) { 79 ··· 98 "Aborting request!\n"); 99 } 100 101 + if (rq->cmd_type == REQ_TYPE_SPECIAL) 102 rq->errors = uptodate ? 0 : IDE_DRV_ERROR_GENERAL; 103 104 return uptodate; ··· 207 memcpy(rq->cmd, pc->c, 12); 208 209 pc->rq = rq; 210 + if (rq->cmd_flags & REQ_WRITE) 211 pc->flags |= PC_FLAG_WRITING; 212 213 pc->flags |= PC_FLAG_DMA_OK; ··· 247 } else 248 printk(KERN_ERR PFX "%s: I/O error\n", drive->name); 249 250 + if (rq->cmd_type == REQ_TYPE_SPECIAL) { 251 rq->errors = 0; 252 ide_complete_rq(drive, 0, blk_rq_bytes(rq)); 253 return ide_stopped; 254 } else 255 goto out_end; 256 } 257 + 258 + switch (rq->cmd_type) { 259 + case REQ_TYPE_FS: 260 if (((long)blk_rq_pos(rq) % floppy->bs_factor) || 261 (blk_rq_sectors(rq) % floppy->bs_factor)) { 262 printk(KERN_ERR PFX "%s: unsupported r/w rq size\n", ··· 263 } 264 pc = &floppy->queued_pc; 265 idefloppy_create_rw_cmd(drive, pc, rq, (unsigned long)block); 266 + break; 267 + case REQ_TYPE_SPECIAL: 268 + case REQ_TYPE_SENSE: 269 pc = (struct ide_atapi_pc *)rq->special; 270 + break; 271 + case REQ_TYPE_BLOCK_PC: 272 pc = &floppy->queued_pc; 273 idefloppy_blockpc_cmd(floppy, pc, rq); 274 + break; 275 + default: 276 BUG(); 277 + } 278 279 ide_prep_sense(drive, rq); 280 ··· 280 281 cmd.rq = rq; 282 283 + if (rq->cmd_type == REQ_TYPE_FS || blk_rq_bytes(rq)) { 284 ide_init_sg_cmd(&cmd, blk_rq_bytes(rq)); 285 ide_map_sg(drive, &cmd); 286 } ··· 290 return ide_floppy_issue_pc(drive, &cmd, pc); 291 out_end: 292 drive->failed_pc = NULL; 293 + if (rq->cmd_type != REQ_TYPE_FS && rq->errors == 0) 294 rq->errors = -EIO; 295 ide_complete_rq(drive, -EIO, blk_rq_bytes(rq)); 296 return ide_stopped;
+9 -3
drivers/ide/ide-floppy_ioctl.c
··· 5 #include <linux/kernel.h> 6 #include <linux/ide.h> 7 #include <linux/cdrom.h> 8 9 #include <asm/unaligned.h> 10 ··· 276 void __user *argp = (void __user *)arg; 277 int err; 278 279 - if (cmd == CDROMEJECT || cmd == CDROM_LOCKDOOR) 280 - return ide_floppy_lockdoor(drive, &pc, arg, cmd); 281 282 err = ide_floppy_format_ioctl(drive, &pc, mode, cmd, argp); 283 if (err != -ENOTTY) 284 - return err; 285 286 /* 287 * skip SCSI_IOCTL_SEND_COMMAND (deprecated) ··· 297 if (err == -ENOTTY) 298 err = generic_ide_ioctl(drive, bdev, cmd, arg); 299 300 return err; 301 }
··· 5 #include <linux/kernel.h> 6 #include <linux/ide.h> 7 #include <linux/cdrom.h> 8 + #include <linux/smp_lock.h> 9 10 #include <asm/unaligned.h> 11 ··· 275 void __user *argp = (void __user *)arg; 276 int err; 277 278 + lock_kernel(); 279 + if (cmd == CDROMEJECT || cmd == CDROM_LOCKDOOR) { 280 + err = ide_floppy_lockdoor(drive, &pc, arg, cmd); 281 + goto out; 282 + } 283 284 err = ide_floppy_format_ioctl(drive, &pc, mode, cmd, argp); 285 if (err != -ENOTTY) 286 + goto out; 287 288 /* 289 * skip SCSI_IOCTL_SEND_COMMAND (deprecated) ··· 293 if (err == -ENOTTY) 294 err = generic_ide_ioctl(drive, bdev, cmd, arg); 295 296 + out: 297 + unlock_kernel(); 298 return err; 299 }
+17 -2
drivers/ide/ide-gd.c
··· 1 #include <linux/module.h> 2 #include <linux/types.h> 3 #include <linux/string.h> ··· 238 return ret; 239 } 240 241 static int ide_gd_release(struct gendisk *disk, fmode_t mode) 242 { 243 struct ide_disk_obj *idkp = ide_drv_g(disk, ide_disk_obj); ··· 257 258 ide_debug_log(IDE_DBG_FUNC, "enter"); 259 260 if (idkp->openers == 1) 261 drive->disk_ops->flush(drive); 262 ··· 269 idkp->openers--; 270 271 ide_disk_put(idkp); 272 273 return 0; 274 } ··· 336 337 static const struct block_device_operations ide_gd_ops = { 338 .owner = THIS_MODULE, 339 - .open = ide_gd_open, 340 .release = ide_gd_release, 341 - .locked_ioctl = ide_gd_ioctl, 342 .getgeo = ide_gd_getgeo, 343 .media_changed = ide_gd_media_changed, 344 .unlock_native_capacity = ide_gd_unlock_native_capacity,
··· 1 + #include <linux/smp_lock.h> 2 #include <linux/module.h> 3 #include <linux/types.h> 4 #include <linux/string.h> ··· 237 return ret; 238 } 239 240 + static int ide_gd_unlocked_open(struct block_device *bdev, fmode_t mode) 241 + { 242 + int ret; 243 + 244 + lock_kernel(); 245 + ret = ide_gd_open(bdev, mode); 246 + unlock_kernel(); 247 + 248 + return ret; 249 + } 250 + 251 + 252 static int ide_gd_release(struct gendisk *disk, fmode_t mode) 253 { 254 struct ide_disk_obj *idkp = ide_drv_g(disk, ide_disk_obj); ··· 244 245 ide_debug_log(IDE_DBG_FUNC, "enter"); 246 247 + lock_kernel(); 248 if (idkp->openers == 1) 249 drive->disk_ops->flush(drive); 250 ··· 255 idkp->openers--; 256 257 ide_disk_put(idkp); 258 + unlock_kernel(); 259 260 return 0; 261 } ··· 321 322 static const struct block_device_operations ide_gd_ops = { 323 .owner = THIS_MODULE, 324 + .open = ide_gd_unlocked_open, 325 .release = ide_gd_release, 326 + .ioctl = ide_gd_ioctl, 327 .getgeo = ide_gd_getgeo, 328 .media_changed = ide_gd_media_changed, 329 .unlock_native_capacity = ide_gd_unlock_native_capacity,
+4 -4
drivers/ide/ide-io.c
··· 135 136 void ide_kill_rq(ide_drive_t *drive, struct request *rq) 137 { 138 - u8 drv_req = blk_special_request(rq) && rq->rq_disk; 139 u8 media = drive->media; 140 141 drive->failed_pc = NULL; ··· 145 } else { 146 if (media == ide_tape) 147 rq->errors = IDE_DRV_ERROR_GENERAL; 148 - else if (blk_fs_request(rq) == 0 && rq->errors == 0) 149 rq->errors = -EIO; 150 } 151 ··· 307 { 308 ide_startstop_t startstop; 309 310 - BUG_ON(!blk_rq_started(rq)); 311 312 #ifdef DEBUG 313 printk("%s: start_request: current=0x%08lx\n", ··· 353 pm->pm_step == IDE_PM_COMPLETED) 354 ide_complete_pm_rq(drive, rq); 355 return startstop; 356 - } else if (!rq->rq_disk && blk_special_request(rq)) 357 /* 358 * TODO: Once all ULDs have been modified to 359 * check for specific op codes rather than
··· 135 136 void ide_kill_rq(ide_drive_t *drive, struct request *rq) 137 { 138 + u8 drv_req = (rq->cmd_type == REQ_TYPE_SPECIAL) && rq->rq_disk; 139 u8 media = drive->media; 140 141 drive->failed_pc = NULL; ··· 145 } else { 146 if (media == ide_tape) 147 rq->errors = IDE_DRV_ERROR_GENERAL; 148 + else if (rq->cmd_type != REQ_TYPE_FS && rq->errors == 0) 149 rq->errors = -EIO; 150 } 151 ··· 307 { 308 ide_startstop_t startstop; 309 310 + BUG_ON(!(rq->cmd_flags & REQ_STARTED)); 311 312 #ifdef DEBUG 313 printk("%s: start_request: current=0x%08lx\n", ··· 353 pm->pm_step == IDE_PM_COMPLETED) 354 ide_complete_pm_rq(drive, rq); 355 return startstop; 356 + } else if (!rq->rq_disk && rq->cmd_type == REQ_TYPE_SPECIAL) 357 /* 358 * TODO: Once all ULDs have been modified to 359 * check for specific op codes rather than
+4 -4
drivers/ide/ide-pm.c
··· 191 192 #ifdef DEBUG_PM 193 printk("%s: completing PM request, %s\n", drive->name, 194 - blk_pm_suspend_request(rq) ? "suspend" : "resume"); 195 #endif 196 spin_lock_irqsave(q->queue_lock, flags); 197 - if (blk_pm_suspend_request(rq)) 198 blk_stop_queue(q); 199 else 200 drive->dev_flags &= ~IDE_DFLAG_BLOCKED; ··· 210 { 211 struct request_pm_state *pm = rq->special; 212 213 - if (blk_pm_suspend_request(rq) && 214 pm->pm_step == IDE_PM_START_SUSPEND) 215 /* Mark drive blocked when starting the suspend sequence. */ 216 drive->dev_flags |= IDE_DFLAG_BLOCKED; 217 - else if (blk_pm_resume_request(rq) && 218 pm->pm_step == IDE_PM_START_RESUME) { 219 /* 220 * The first thing we do on wakeup is to wait for BSY bit to
··· 191 192 #ifdef DEBUG_PM 193 printk("%s: completing PM request, %s\n", drive->name, 194 + (rq->cmd_type == REQ_TYPE_PM_SUSPEND) ? "suspend" : "resume"); 195 #endif 196 spin_lock_irqsave(q->queue_lock, flags); 197 + if (rq->cmd_type == REQ_TYPE_PM_SUSPEND) 198 blk_stop_queue(q); 199 else 200 drive->dev_flags &= ~IDE_DFLAG_BLOCKED; ··· 210 { 211 struct request_pm_state *pm = rq->special; 212 213 + if (rq->cmd_type == REQ_TYPE_PM_SUSPEND && 214 pm->pm_step == IDE_PM_START_SUSPEND) 215 /* Mark drive blocked when starting the suspend sequence. */ 216 drive->dev_flags |= IDE_DFLAG_BLOCKED; 217 + else if (rq->cmd_type == REQ_TYPE_PM_RESUME && 218 pm->pm_step == IDE_PM_START_RESUME) { 219 /* 220 * The first thing we do on wakeup is to wait for BSY bit to
+18 -4
drivers/ide/ide-tape.c
··· 32 #include <linux/errno.h> 33 #include <linux/genhd.h> 34 #include <linux/seq_file.h> 35 #include <linux/slab.h> 36 #include <linux/pci.h> 37 #include <linux/ide.h> ··· 578 rq->cmd[0], (unsigned long long)blk_rq_pos(rq), 579 blk_rq_sectors(rq)); 580 581 - BUG_ON(!(blk_special_request(rq) || blk_sense_request(rq))); 582 583 /* Retry a failed packet command */ 584 if (drive->failed_pc && drive->pc->c[0] == REQUEST_SENSE) { ··· 1907 1908 static int idetape_open(struct block_device *bdev, fmode_t mode) 1909 { 1910 - struct ide_tape_obj *tape = ide_tape_get(bdev->bd_disk, false, 0); 1911 1912 if (!tape) 1913 return -ENXIO; ··· 1923 { 1924 struct ide_tape_obj *tape = ide_drv_g(disk, ide_tape_obj); 1925 1926 ide_tape_put(tape); 1927 return 0; 1928 } 1929 ··· 1935 { 1936 struct ide_tape_obj *tape = ide_drv_g(bdev->bd_disk, ide_tape_obj); 1937 ide_drive_t *drive = tape->drive; 1938 - int err = generic_ide_ioctl(drive, bdev, cmd, arg); 1939 if (err == -EINVAL) 1940 err = idetape_blkdev_ioctl(drive, cmd, arg); 1941 return err; 1942 } 1943 ··· 1950 .owner = THIS_MODULE, 1951 .open = idetape_open, 1952 .release = idetape_release, 1953 - .locked_ioctl = idetape_ioctl, 1954 }; 1955 1956 static int ide_tape_probe(ide_drive_t *drive)
··· 32 #include <linux/errno.h> 33 #include <linux/genhd.h> 34 #include <linux/seq_file.h> 35 + #include <linux/smp_lock.h> 36 #include <linux/slab.h> 37 #include <linux/pci.h> 38 #include <linux/ide.h> ··· 577 rq->cmd[0], (unsigned long long)blk_rq_pos(rq), 578 blk_rq_sectors(rq)); 579 580 + BUG_ON(!(rq->cmd_type == REQ_TYPE_SPECIAL || 581 + rq->cmd_type == REQ_TYPE_SENSE)); 582 583 /* Retry a failed packet command */ 584 if (drive->failed_pc && drive->pc->c[0] == REQUEST_SENSE) { ··· 1905 1906 static int idetape_open(struct block_device *bdev, fmode_t mode) 1907 { 1908 + struct ide_tape_obj *tape; 1909 + 1910 + lock_kernel(); 1911 + tape = ide_tape_get(bdev->bd_disk, false, 0); 1912 + unlock_kernel(); 1913 1914 if (!tape) 1915 return -ENXIO; ··· 1917 { 1918 struct ide_tape_obj *tape = ide_drv_g(disk, ide_tape_obj); 1919 1920 + lock_kernel(); 1921 ide_tape_put(tape); 1922 + unlock_kernel(); 1923 + 1924 return 0; 1925 } 1926 ··· 1926 { 1927 struct ide_tape_obj *tape = ide_drv_g(bdev->bd_disk, ide_tape_obj); 1928 ide_drive_t *drive = tape->drive; 1929 + int err; 1930 + 1931 + lock_kernel(); 1932 + err = generic_ide_ioctl(drive, bdev, cmd, arg); 1933 if (err == -EINVAL) 1934 err = idetape_blkdev_ioctl(drive, cmd, arg); 1935 + unlock_kernel(); 1936 + 1937 return err; 1938 } 1939 ··· 1936 .owner = THIS_MODULE, 1937 .open = idetape_open, 1938 .release = idetape_release, 1939 + .ioctl = idetape_ioctl, 1940 }; 1941 1942 static int ide_tape_probe(ide_drive_t *drive)
+6 -6
drivers/md/dm-io.c
··· 356 BUG_ON(num_regions > DM_IO_MAX_REGIONS); 357 358 if (sync) 359 - rw |= (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG); 360 361 /* 362 * For multiple regions we need to be careful to rewind ··· 364 */ 365 for (i = 0; i < num_regions; i++) { 366 *dp = old_pages; 367 - if (where[i].count || (rw & (1 << BIO_RW_BARRIER))) 368 do_region(rw, i, where + i, dp, io); 369 } 370 ··· 412 } 413 set_current_state(TASK_RUNNING); 414 415 - if (io->eopnotsupp_bits && (rw & (1 << BIO_RW_BARRIER))) { 416 - rw &= ~(1 << BIO_RW_BARRIER); 417 goto retry; 418 } 419 ··· 479 * New collapsed (a)synchronous interface. 480 * 481 * If the IO is asynchronous (i.e. it has notify.fn), you must either unplug 482 - * the queue with blk_unplug() some time later or set the BIO_RW_SYNC bit in 483 - * io_req->bi_rw. If you fail to do one of these, the IO will be submitted to 484 * the disk after q->unplug_delay, which defaults to 3ms in blk-settings.c. 485 */ 486 int dm_io(struct dm_io_request *io_req, unsigned num_regions,
··· 356 BUG_ON(num_regions > DM_IO_MAX_REGIONS); 357 358 if (sync) 359 + rw |= REQ_SYNC | REQ_UNPLUG; 360 361 /* 362 * For multiple regions we need to be careful to rewind ··· 364 */ 365 for (i = 0; i < num_regions; i++) { 366 *dp = old_pages; 367 + if (where[i].count || (rw & REQ_HARDBARRIER)) 368 do_region(rw, i, where + i, dp, io); 369 } 370 ··· 412 } 413 set_current_state(TASK_RUNNING); 414 415 + if (io->eopnotsupp_bits && (rw & REQ_HARDBARRIER)) { 416 + rw &= ~REQ_HARDBARRIER; 417 goto retry; 418 } 419 ··· 479 * New collapsed (a)synchronous interface. 480 * 481 * If the IO is asynchronous (i.e. it has notify.fn), you must either unplug 482 + * the queue with blk_unplug() some time later or set REQ_SYNC in 483 + io_req->bi_rw. If you fail to do one of these, the IO will be submitted to 484 * the disk after q->unplug_delay, which defaults to 3ms in blk-settings.c. 485 */ 486 int dm_io(struct dm_io_request *io_req, unsigned num_regions,
+1 -1
drivers/md/dm-kcopyd.c
··· 345 { 346 int r; 347 struct dm_io_request io_req = { 348 - .bi_rw = job->rw | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG), 349 .mem.type = DM_IO_PAGE_LIST, 350 .mem.ptr.pl = job->pages, 351 .mem.offset = job->offset,
··· 345 { 346 int r; 347 struct dm_io_request io_req = { 348 + .bi_rw = job->rw | REQ_SYNC | REQ_UNPLUG, 349 .mem.type = DM_IO_PAGE_LIST, 350 .mem.ptr.pl = job->pages, 351 .mem.offset = job->offset,
+1 -1
drivers/md/dm-raid1.c
··· 1211 if (error == -EOPNOTSUPP) 1212 goto out; 1213 1214 - if ((error == -EWOULDBLOCK) && bio_rw_flagged(bio, BIO_RW_AHEAD)) 1215 goto out; 1216 1217 if (unlikely(error)) {
··· 1211 if (error == -EOPNOTSUPP) 1212 goto out; 1213 1214 + if ((error == -EWOULDBLOCK) && (bio->bi_rw & REQ_RAHEAD)) 1215 goto out; 1216 1217 if (unlikely(error)) {
+1 -1
drivers/md/dm-stripe.c
··· 284 if (!error) 285 return 0; /* I/O complete */ 286 287 - if ((error == -EWOULDBLOCK) && bio_rw_flagged(bio, BIO_RW_AHEAD)) 288 return error; 289 290 if (error == -EOPNOTSUPP)
··· 284 if (!error) 285 return 0; /* I/O complete */ 286 287 + if ((error == -EWOULDBLOCK) && (bio->bi_rw & REQ_RAHEAD)) 288 return error; 289 290 if (error == -EOPNOTSUPP)
+21 -26
drivers/md/dm.c
··· 15 #include <linux/blkpg.h> 16 #include <linux/bio.h> 17 #include <linux/buffer_head.h> 18 #include <linux/mempool.h> 19 #include <linux/slab.h> 20 #include <linux/idr.h> ··· 339 { 340 struct mapped_device *md; 341 342 spin_lock(&_minor_lock); 343 344 md = bdev->bd_disk->private_data; ··· 357 358 out: 359 spin_unlock(&_minor_lock); 360 361 return md ? 0 : -ENXIO; 362 } ··· 365 static int dm_blk_close(struct gendisk *disk, fmode_t mode) 366 { 367 struct mapped_device *md = disk->private_data; 368 atomic_dec(&md->open_count); 369 dm_put(md); 370 return 0; 371 } 372 ··· 621 */ 622 spin_lock_irqsave(&md->deferred_lock, flags); 623 if (__noflush_suspending(md)) { 624 - if (!bio_rw_flagged(io->bio, BIO_RW_BARRIER)) 625 bio_list_add_head(&md->deferred, 626 io->bio); 627 } else ··· 633 io_error = io->error; 634 bio = io->bio; 635 636 - if (bio_rw_flagged(bio, BIO_RW_BARRIER)) { 637 /* 638 * There can be just one barrier request so we use 639 * a per-device variable for error reporting. ··· 799 { 800 int rw = rq_data_dir(clone); 801 int run_queue = 1; 802 - bool is_barrier = blk_barrier_rq(clone); 803 struct dm_rq_target_io *tio = clone->end_io_data; 804 struct mapped_device *md = tio->md; 805 struct request *rq = tio->orig; 806 807 - if (blk_pc_request(rq) && !is_barrier) { 808 rq->errors = clone->errors; 809 rq->resid_len = clone->resid_len; 810 ··· 851 struct request_queue *q = rq->q; 852 unsigned long flags; 853 854 - if (unlikely(blk_barrier_rq(clone))) { 855 /* 856 * Barrier clones share an original request. 857 * Leave it to dm_end_request(), which handles this special ··· 950 struct dm_rq_target_io *tio = clone->end_io_data; 951 struct request *rq = tio->orig; 952 953 - if (unlikely(blk_barrier_rq(clone))) { 954 /* 955 * Barrier clones share an original request. So can't use 956 * softirq_done with the original. ··· 979 struct dm_rq_target_io *tio = clone->end_io_data; 980 struct request *rq = tio->orig; 981 982 - if (unlikely(blk_barrier_rq(clone))) { 983 /* 984 * Barrier clones share an original request. 985 * Leave it to dm_end_request(), which handles this special ··· 1113 1114 clone->bi_sector = sector; 1115 clone->bi_bdev = bio->bi_bdev; 1116 - clone->bi_rw = bio->bi_rw & ~(1 << BIO_RW_BARRIER); 1117 clone->bi_vcnt = 1; 1118 clone->bi_size = to_bytes(len); 1119 clone->bi_io_vec->bv_offset = offset; ··· 1140 1141 clone = bio_alloc_bioset(GFP_NOIO, bio->bi_max_vecs, bs); 1142 __bio_clone(clone, bio); 1143 - clone->bi_rw &= ~(1 << BIO_RW_BARRIER); 1144 clone->bi_destructor = dm_bio_destructor; 1145 clone->bi_sector = sector; 1146 clone->bi_idx = idx; ··· 1308 1309 ci.map = dm_get_live_table(md); 1310 if (unlikely(!ci.map)) { 1311 - if (!bio_rw_flagged(bio, BIO_RW_BARRIER)) 1312 bio_io_error(bio); 1313 else 1314 if (!md->barrier_error) ··· 1421 * we have to queue this io for later. 1422 */ 1423 if (unlikely(test_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags)) || 1424 - unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) { 1425 up_read(&md->io_lock); 1426 1427 if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) && ··· 1462 return _dm_request(q, bio); 1463 } 1464 1465 - /* 1466 - * Mark this request as flush request, so that dm_request_fn() can 1467 - * recognize. 1468 - */ 1469 - static void dm_rq_prepare_flush(struct request_queue *q, struct request *rq) 1470 - { 1471 - rq->cmd_type = REQ_TYPE_LINUX_BLOCK; 1472 - rq->cmd[0] = REQ_LB_OP_FLUSH; 1473 - } 1474 - 1475 static bool dm_rq_is_flush_request(struct request *rq) 1476 { 1477 - if (rq->cmd_type == REQ_TYPE_LINUX_BLOCK && 1478 - rq->cmd[0] == REQ_LB_OP_FLUSH) 1479 return true; 1480 else 1481 return false; ··· 1908 blk_queue_softirq_done(md->queue, dm_softirq_done); 1909 blk_queue_prep_rq(md->queue, dm_prep_fn); 1910 blk_queue_lld_busy(md->queue, dm_lld_busy); 1911 - blk_queue_ordered(md->queue, QUEUE_ORDERED_DRAIN_FLUSH, 1912 - dm_rq_prepare_flush); 1913 1914 md->disk = alloc_disk(1); 1915 if (!md->disk) ··· 2291 if (dm_request_based(md)) 2292 generic_make_request(c); 2293 else { 2294 - if (bio_rw_flagged(c, BIO_RW_BARRIER)) 2295 process_barrier(md, c); 2296 else 2297 __split_and_process_bio(md, c);
··· 15 #include <linux/blkpg.h> 16 #include <linux/bio.h> 17 #include <linux/buffer_head.h> 18 + #include <linux/smp_lock.h> 19 #include <linux/mempool.h> 20 #include <linux/slab.h> 21 #include <linux/idr.h> ··· 338 { 339 struct mapped_device *md; 340 341 + lock_kernel(); 342 spin_lock(&_minor_lock); 343 344 md = bdev->bd_disk->private_data; ··· 355 356 out: 357 spin_unlock(&_minor_lock); 358 + unlock_kernel(); 359 360 return md ? 0 : -ENXIO; 361 } ··· 362 static int dm_blk_close(struct gendisk *disk, fmode_t mode) 363 { 364 struct mapped_device *md = disk->private_data; 365 + 366 + lock_kernel(); 367 atomic_dec(&md->open_count); 368 dm_put(md); 369 + unlock_kernel(); 370 + 371 return 0; 372 } 373 ··· 614 */ 615 spin_lock_irqsave(&md->deferred_lock, flags); 616 if (__noflush_suspending(md)) { 617 + if (!(io->bio->bi_rw & REQ_HARDBARRIER)) 618 bio_list_add_head(&md->deferred, 619 io->bio); 620 } else ··· 626 io_error = io->error; 627 bio = io->bio; 628 629 + if (bio->bi_rw & REQ_HARDBARRIER) { 630 /* 631 * There can be just one barrier request so we use 632 * a per-device variable for error reporting. ··· 792 { 793 int rw = rq_data_dir(clone); 794 int run_queue = 1; 795 + bool is_barrier = clone->cmd_flags & REQ_HARDBARRIER; 796 struct dm_rq_target_io *tio = clone->end_io_data; 797 struct mapped_device *md = tio->md; 798 struct request *rq = tio->orig; 799 800 + if (rq->cmd_type == REQ_TYPE_BLOCK_PC && !is_barrier) { 801 rq->errors = clone->errors; 802 rq->resid_len = clone->resid_len; 803 ··· 844 struct request_queue *q = rq->q; 845 unsigned long flags; 846 847 + if (unlikely(clone->cmd_flags & REQ_HARDBARRIER)) { 848 /* 849 * Barrier clones share an original request. 850 * Leave it to dm_end_request(), which handles this special ··· 943 struct dm_rq_target_io *tio = clone->end_io_data; 944 struct request *rq = tio->orig; 945 946 + if (unlikely(clone->cmd_flags & REQ_HARDBARRIER)) { 947 /* 948 * Barrier clones share an original request. So can't use 949 * softirq_done with the original. ··· 972 struct dm_rq_target_io *tio = clone->end_io_data; 973 struct request *rq = tio->orig; 974 975 + if (unlikely(clone->cmd_flags & REQ_HARDBARRIER)) { 976 /* 977 * Barrier clones share an original request. 978 * Leave it to dm_end_request(), which handles this special ··· 1106 1107 clone->bi_sector = sector; 1108 clone->bi_bdev = bio->bi_bdev; 1109 + clone->bi_rw = bio->bi_rw & ~REQ_HARDBARRIER; 1110 clone->bi_vcnt = 1; 1111 clone->bi_size = to_bytes(len); 1112 clone->bi_io_vec->bv_offset = offset; ··· 1133 1134 clone = bio_alloc_bioset(GFP_NOIO, bio->bi_max_vecs, bs); 1135 __bio_clone(clone, bio); 1136 + clone->bi_rw &= ~REQ_HARDBARRIER; 1137 clone->bi_destructor = dm_bio_destructor; 1138 clone->bi_sector = sector; 1139 clone->bi_idx = idx; ··· 1301 1302 ci.map = dm_get_live_table(md); 1303 if (unlikely(!ci.map)) { 1304 + if (!(bio->bi_rw & REQ_HARDBARRIER)) 1305 bio_io_error(bio); 1306 else 1307 if (!md->barrier_error) ··· 1414 * we have to queue this io for later. 1415 */ 1416 if (unlikely(test_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags)) || 1417 + unlikely(bio->bi_rw & REQ_HARDBARRIER)) { 1418 up_read(&md->io_lock); 1419 1420 if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) && ··· 1455 return _dm_request(q, bio); 1456 } 1457 1458 static bool dm_rq_is_flush_request(struct request *rq) 1459 { 1460 + if (rq->cmd_flags & REQ_FLUSH) 1461 return true; 1462 else 1463 return false; ··· 1912 blk_queue_softirq_done(md->queue, dm_softirq_done); 1913 blk_queue_prep_rq(md->queue, dm_prep_fn); 1914 blk_queue_lld_busy(md->queue, dm_lld_busy); 1915 + blk_queue_ordered(md->queue, QUEUE_ORDERED_DRAIN_FLUSH); 1916 1917 md->disk = alloc_disk(1); 1918 if (!md->disk) ··· 2296 if (dm_request_based(md)) 2297 generic_make_request(c); 2298 else { 2299 + if (c->bi_rw & REQ_HARDBARRIER) 2300 process_barrier(md, c); 2301 else 2302 __split_and_process_bio(md, c);
+1 -1
drivers/md/linear.c
··· 294 dev_info_t *tmp_dev; 295 sector_t start_sector; 296 297 - if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) { 298 md_barrier_request(mddev, bio); 299 return 0; 300 }
··· 294 dev_info_t *tmp_dev; 295 sector_t start_sector; 296 297 + if (unlikely(bio->bi_rw & REQ_HARDBARRIER)) { 298 md_barrier_request(mddev, bio); 299 return 0; 300 }
+11 -5
drivers/md/md.c
··· 36 #include <linux/blkdev.h> 37 #include <linux/sysctl.h> 38 #include <linux/seq_file.h> 39 #include <linux/buffer_head.h> /* for invalidate_bdev */ 40 #include <linux/poll.h> 41 #include <linux/ctype.h> ··· 354 /* an empty barrier - all done */ 355 bio_endio(bio, 0); 356 else { 357 - bio->bi_rw &= ~(1<<BIO_RW_BARRIER); 358 if (mddev->pers->make_request(mddev, bio)) 359 generic_make_request(bio); 360 mddev->barrier = POST_REQUEST_BARRIER; ··· 676 * if zero is reached. 677 * If an error occurred, call md_error 678 * 679 - * As we might need to resubmit the request if BIO_RW_BARRIER 680 * causes ENOTSUPP, we allocate a spare bio... 681 */ 682 struct bio *bio = bio_alloc(GFP_NOIO, 1); 683 - int rw = (1<<BIO_RW) | (1<<BIO_RW_SYNCIO) | (1<<BIO_RW_UNPLUG); 684 685 bio->bi_bdev = rdev->bdev; 686 bio->bi_sector = sector; ··· 692 atomic_inc(&mddev->pending_writes); 693 if (!test_bit(BarriersNotsupp, &rdev->flags)) { 694 struct bio *rbio; 695 - rw |= (1<<BIO_RW_BARRIER); 696 rbio = bio_clone(bio, GFP_NOIO); 697 rbio->bi_private = bio; 698 rbio->bi_end_io = super_written_barrier; ··· 737 struct completion event; 738 int ret; 739 740 - rw |= (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG); 741 742 bio->bi_bdev = bdev; 743 bio->bi_sector = sector; ··· 5903 mddev_t *mddev = mddev_find(bdev->bd_dev); 5904 int err; 5905 5906 if (mddev->gendisk != bdev->bd_disk) { 5907 /* we are racing with mddev_put which is discarding this 5908 * bd_disk. ··· 5912 /* Wait until bdev->bd_disk is definitely gone */ 5913 flush_scheduled_work(); 5914 /* Then retry the open from the top */ 5915 return -ERESTARTSYS; 5916 } 5917 BUG_ON(mddev != bdev->bd_disk->private_data); ··· 5926 5927 check_disk_size_change(mddev->gendisk, bdev); 5928 out: 5929 return err; 5930 } 5931 ··· 5935 mddev_t *mddev = disk->private_data; 5936 5937 BUG_ON(!mddev); 5938 atomic_dec(&mddev->openers); 5939 mddev_put(mddev); 5940 5941 return 0; 5942 }
··· 36 #include <linux/blkdev.h> 37 #include <linux/sysctl.h> 38 #include <linux/seq_file.h> 39 + #include <linux/smp_lock.h> 40 #include <linux/buffer_head.h> /* for invalidate_bdev */ 41 #include <linux/poll.h> 42 #include <linux/ctype.h> ··· 353 /* an empty barrier - all done */ 354 bio_endio(bio, 0); 355 else { 356 + bio->bi_rw &= ~REQ_HARDBARRIER; 357 if (mddev->pers->make_request(mddev, bio)) 358 generic_make_request(bio); 359 mddev->barrier = POST_REQUEST_BARRIER; ··· 675 * if zero is reached. 676 * If an error occurred, call md_error 677 * 678 + * As we might need to resubmit the request if REQ_HARDBARRIER 679 * causes ENOTSUPP, we allocate a spare bio... 680 */ 681 struct bio *bio = bio_alloc(GFP_NOIO, 1); 682 + int rw = REQ_WRITE | REQ_SYNC | REQ_UNPLUG; 683 684 bio->bi_bdev = rdev->bdev; 685 bio->bi_sector = sector; ··· 691 atomic_inc(&mddev->pending_writes); 692 if (!test_bit(BarriersNotsupp, &rdev->flags)) { 693 struct bio *rbio; 694 + rw |= REQ_HARDBARRIER; 695 rbio = bio_clone(bio, GFP_NOIO); 696 rbio->bi_private = bio; 697 rbio->bi_end_io = super_written_barrier; ··· 736 struct completion event; 737 int ret; 738 739 + rw |= REQ_SYNC | REQ_UNPLUG; 740 741 bio->bi_bdev = bdev; 742 bio->bi_sector = sector; ··· 5902 mddev_t *mddev = mddev_find(bdev->bd_dev); 5903 int err; 5904 5905 + lock_kernel(); 5906 if (mddev->gendisk != bdev->bd_disk) { 5907 /* we are racing with mddev_put which is discarding this 5908 * bd_disk. ··· 5910 /* Wait until bdev->bd_disk is definitely gone */ 5911 flush_scheduled_work(); 5912 /* Then retry the open from the top */ 5913 + unlock_kernel(); 5914 return -ERESTARTSYS; 5915 } 5916 BUG_ON(mddev != bdev->bd_disk->private_data); ··· 5923 5924 check_disk_size_change(mddev->gendisk, bdev); 5925 out: 5926 + unlock_kernel(); 5927 return err; 5928 } 5929 ··· 5931 mddev_t *mddev = disk->private_data; 5932 5933 BUG_ON(!mddev); 5934 + lock_kernel(); 5935 atomic_dec(&mddev->openers); 5936 mddev_put(mddev); 5937 + unlock_kernel(); 5938 5939 return 0; 5940 }
+2 -2
drivers/md/md.h
··· 67 #define Faulty 1 /* device is known to have a fault */ 68 #define In_sync 2 /* device is in_sync with rest of array */ 69 #define WriteMostly 4 /* Avoid reading if at all possible */ 70 - #define BarriersNotsupp 5 /* BIO_RW_BARRIER is not supported */ 71 #define AllReserved 6 /* If whole device is reserved for 72 * one array */ 73 #define AutoDetected 7 /* added by auto-detect */ ··· 254 * fails. Only supported 255 */ 256 struct bio *biolist; /* bios that need to be retried 257 - * because BIO_RW_BARRIER is not supported 258 */ 259 260 atomic_t recovery_active; /* blocks scheduled, but not written */
··· 67 #define Faulty 1 /* device is known to have a fault */ 68 #define In_sync 2 /* device is in_sync with rest of array */ 69 #define WriteMostly 4 /* Avoid reading if at all possible */ 70 + #define BarriersNotsupp 5 /* REQ_HARDBARRIER is not supported */ 71 #define AllReserved 6 /* If whole device is reserved for 72 * one array */ 73 #define AutoDetected 7 /* added by auto-detect */ ··· 254 * fails. Only supported 255 */ 256 struct bio *biolist; /* bios that need to be retried 257 + * because REQ_HARDBARRIER is not supported 258 */ 259 260 atomic_t recovery_active; /* blocks scheduled, but not written */
+4 -4
drivers/md/multipath.c
··· 91 92 if (uptodate) 93 multipath_end_bh_io(mp_bh, 0); 94 - else if (!bio_rw_flagged(bio, BIO_RW_AHEAD)) { 95 /* 96 * oops, IO error: 97 */ ··· 142 struct multipath_bh * mp_bh; 143 struct multipath_info *multipath; 144 145 - if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) { 146 md_barrier_request(mddev, bio); 147 return 0; 148 } ··· 163 mp_bh->bio = *bio; 164 mp_bh->bio.bi_sector += multipath->rdev->data_offset; 165 mp_bh->bio.bi_bdev = multipath->rdev->bdev; 166 - mp_bh->bio.bi_rw |= (1 << BIO_RW_FAILFAST_TRANSPORT); 167 mp_bh->bio.bi_end_io = multipath_end_request; 168 mp_bh->bio.bi_private = mp_bh; 169 generic_make_request(&mp_bh->bio); ··· 398 *bio = *(mp_bh->master_bio); 399 bio->bi_sector += conf->multipaths[mp_bh->path].rdev->data_offset; 400 bio->bi_bdev = conf->multipaths[mp_bh->path].rdev->bdev; 401 - bio->bi_rw |= (1 << BIO_RW_FAILFAST_TRANSPORT); 402 bio->bi_end_io = multipath_end_request; 403 bio->bi_private = mp_bh; 404 generic_make_request(bio);
··· 91 92 if (uptodate) 93 multipath_end_bh_io(mp_bh, 0); 94 + else if (!(bio->bi_rw & REQ_RAHEAD)) { 95 /* 96 * oops, IO error: 97 */ ··· 142 struct multipath_bh * mp_bh; 143 struct multipath_info *multipath; 144 145 + if (unlikely(bio->bi_rw & REQ_HARDBARRIER)) { 146 md_barrier_request(mddev, bio); 147 return 0; 148 } ··· 163 mp_bh->bio = *bio; 164 mp_bh->bio.bi_sector += multipath->rdev->data_offset; 165 mp_bh->bio.bi_bdev = multipath->rdev->bdev; 166 + mp_bh->bio.bi_rw |= REQ_FAILFAST_TRANSPORT; 167 mp_bh->bio.bi_end_io = multipath_end_request; 168 mp_bh->bio.bi_private = mp_bh; 169 generic_make_request(&mp_bh->bio); ··· 398 *bio = *(mp_bh->master_bio); 399 bio->bi_sector += conf->multipaths[mp_bh->path].rdev->data_offset; 400 bio->bi_bdev = conf->multipaths[mp_bh->path].rdev->bdev; 401 + bio->bi_rw |= REQ_FAILFAST_TRANSPORT; 402 bio->bi_end_io = multipath_end_request; 403 bio->bi_private = mp_bh; 404 generic_make_request(bio);
+1 -1
drivers/md/raid0.c
··· 483 struct strip_zone *zone; 484 mdk_rdev_t *tmp_dev; 485 486 - if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) { 487 md_barrier_request(mddev, bio); 488 return 0; 489 }
··· 483 struct strip_zone *zone; 484 mdk_rdev_t *tmp_dev; 485 486 + if (unlikely(bio->bi_rw & REQ_HARDBARRIER)) { 487 md_barrier_request(mddev, bio); 488 return 0; 489 }
+10 -12
drivers/md/raid1.c
··· 787 struct bio_list bl; 788 struct page **behind_pages = NULL; 789 const int rw = bio_data_dir(bio); 790 - const bool do_sync = bio_rw_flagged(bio, BIO_RW_SYNCIO); 791 bool do_barriers; 792 mdk_rdev_t *blocked_rdev; 793 ··· 822 finish_wait(&conf->wait_barrier, &w); 823 } 824 if (unlikely(!mddev->barriers_work && 825 - bio_rw_flagged(bio, BIO_RW_BARRIER))) { 826 if (rw == WRITE) 827 md_write_end(mddev); 828 bio_endio(bio, -EOPNOTSUPP); ··· 877 read_bio->bi_sector = r1_bio->sector + mirror->rdev->data_offset; 878 read_bio->bi_bdev = mirror->rdev->bdev; 879 read_bio->bi_end_io = raid1_end_read_request; 880 - read_bio->bi_rw = READ | (do_sync << BIO_RW_SYNCIO); 881 read_bio->bi_private = r1_bio; 882 883 generic_make_request(read_bio); ··· 959 atomic_set(&r1_bio->remaining, 0); 960 atomic_set(&r1_bio->behind_remaining, 0); 961 962 - do_barriers = bio_rw_flagged(bio, BIO_RW_BARRIER); 963 if (do_barriers) 964 set_bit(R1BIO_Barrier, &r1_bio->state); 965 ··· 975 mbio->bi_sector = r1_bio->sector + conf->mirrors[i].rdev->data_offset; 976 mbio->bi_bdev = conf->mirrors[i].rdev->bdev; 977 mbio->bi_end_io = raid1_end_write_request; 978 - mbio->bi_rw = WRITE | (do_barriers << BIO_RW_BARRIER) | 979 - (do_sync << BIO_RW_SYNCIO); 980 mbio->bi_private = r1_bio; 981 982 if (behind_pages) { ··· 1632 sync_request_write(mddev, r1_bio); 1633 unplug = 1; 1634 } else if (test_bit(R1BIO_BarrierRetry, &r1_bio->state)) { 1635 - /* some requests in the r1bio were BIO_RW_BARRIER 1636 * requests which failed with -EOPNOTSUPP. Hohumm.. 1637 * Better resubmit without the barrier. 1638 * We know which devices to resubmit for, because ··· 1640 * We already have a nr_pending reference on these rdevs. 1641 */ 1642 int i; 1643 - const bool do_sync = bio_rw_flagged(r1_bio->master_bio, BIO_RW_SYNCIO); 1644 clear_bit(R1BIO_BarrierRetry, &r1_bio->state); 1645 clear_bit(R1BIO_Barrier, &r1_bio->state); 1646 for (i=0; i < conf->raid_disks; i++) ··· 1661 conf->mirrors[i].rdev->data_offset; 1662 bio->bi_bdev = conf->mirrors[i].rdev->bdev; 1663 bio->bi_end_io = raid1_end_write_request; 1664 - bio->bi_rw = WRITE | 1665 - (do_sync << BIO_RW_SYNCIO); 1666 bio->bi_private = r1_bio; 1667 r1_bio->bios[i] = bio; 1668 generic_make_request(bio); ··· 1696 (unsigned long long)r1_bio->sector); 1697 raid_end_bio_io(r1_bio); 1698 } else { 1699 - const bool do_sync = bio_rw_flagged(r1_bio->master_bio, BIO_RW_SYNCIO); 1700 r1_bio->bios[r1_bio->read_disk] = 1701 mddev->ro ? IO_BLOCKED : NULL; 1702 r1_bio->read_disk = disk; ··· 1713 bio->bi_sector = r1_bio->sector + rdev->data_offset; 1714 bio->bi_bdev = rdev->bdev; 1715 bio->bi_end_io = raid1_end_read_request; 1716 - bio->bi_rw = READ | (do_sync << BIO_RW_SYNCIO); 1717 bio->bi_private = r1_bio; 1718 unplug = 1; 1719 generic_make_request(bio);
··· 787 struct bio_list bl; 788 struct page **behind_pages = NULL; 789 const int rw = bio_data_dir(bio); 790 + const bool do_sync = (bio->bi_rw & REQ_SYNC); 791 bool do_barriers; 792 mdk_rdev_t *blocked_rdev; 793 ··· 822 finish_wait(&conf->wait_barrier, &w); 823 } 824 if (unlikely(!mddev->barriers_work && 825 + (bio->bi_rw & REQ_HARDBARRIER))) { 826 if (rw == WRITE) 827 md_write_end(mddev); 828 bio_endio(bio, -EOPNOTSUPP); ··· 877 read_bio->bi_sector = r1_bio->sector + mirror->rdev->data_offset; 878 read_bio->bi_bdev = mirror->rdev->bdev; 879 read_bio->bi_end_io = raid1_end_read_request; 880 + read_bio->bi_rw = READ | do_sync; 881 read_bio->bi_private = r1_bio; 882 883 generic_make_request(read_bio); ··· 959 atomic_set(&r1_bio->remaining, 0); 960 atomic_set(&r1_bio->behind_remaining, 0); 961 962 + do_barriers = bio->bi_rw & REQ_HARDBARRIER; 963 if (do_barriers) 964 set_bit(R1BIO_Barrier, &r1_bio->state); 965 ··· 975 mbio->bi_sector = r1_bio->sector + conf->mirrors[i].rdev->data_offset; 976 mbio->bi_bdev = conf->mirrors[i].rdev->bdev; 977 mbio->bi_end_io = raid1_end_write_request; 978 + mbio->bi_rw = WRITE | do_barriers | do_sync; 979 mbio->bi_private = r1_bio; 980 981 if (behind_pages) { ··· 1633 sync_request_write(mddev, r1_bio); 1634 unplug = 1; 1635 } else if (test_bit(R1BIO_BarrierRetry, &r1_bio->state)) { 1636 + /* some requests in the r1bio were REQ_HARDBARRIER 1637 * requests which failed with -EOPNOTSUPP. Hohumm.. 1638 * Better resubmit without the barrier. 1639 * We know which devices to resubmit for, because ··· 1641 * We already have a nr_pending reference on these rdevs. 1642 */ 1643 int i; 1644 + const bool do_sync = (r1_bio->master_bio->bi_rw & REQ_SYNC); 1645 clear_bit(R1BIO_BarrierRetry, &r1_bio->state); 1646 clear_bit(R1BIO_Barrier, &r1_bio->state); 1647 for (i=0; i < conf->raid_disks; i++) ··· 1662 conf->mirrors[i].rdev->data_offset; 1663 bio->bi_bdev = conf->mirrors[i].rdev->bdev; 1664 bio->bi_end_io = raid1_end_write_request; 1665 + bio->bi_rw = WRITE | do_sync; 1666 bio->bi_private = r1_bio; 1667 r1_bio->bios[i] = bio; 1668 generic_make_request(bio); ··· 1698 (unsigned long long)r1_bio->sector); 1699 raid_end_bio_io(r1_bio); 1700 } else { 1701 + const bool do_sync = r1_bio->master_bio->bi_rw & REQ_SYNC; 1702 r1_bio->bios[r1_bio->read_disk] = 1703 mddev->ro ? IO_BLOCKED : NULL; 1704 r1_bio->read_disk = disk; ··· 1715 bio->bi_sector = r1_bio->sector + rdev->data_offset; 1716 bio->bi_bdev = rdev->bdev; 1717 bio->bi_end_io = raid1_end_read_request; 1718 + bio->bi_rw = READ | do_sync; 1719 bio->bi_private = r1_bio; 1720 unplug = 1; 1721 generic_make_request(bio);
+6 -6
drivers/md/raid10.c
··· 799 int i; 800 int chunk_sects = conf->chunk_mask + 1; 801 const int rw = bio_data_dir(bio); 802 - const bool do_sync = bio_rw_flagged(bio, BIO_RW_SYNCIO); 803 struct bio_list bl; 804 unsigned long flags; 805 mdk_rdev_t *blocked_rdev; 806 807 - if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) { 808 md_barrier_request(mddev, bio); 809 return 0; 810 } ··· 879 mirror->rdev->data_offset; 880 read_bio->bi_bdev = mirror->rdev->bdev; 881 read_bio->bi_end_io = raid10_end_read_request; 882 - read_bio->bi_rw = READ | (do_sync << BIO_RW_SYNCIO); 883 read_bio->bi_private = r10_bio; 884 885 generic_make_request(read_bio); ··· 947 conf->mirrors[d].rdev->data_offset; 948 mbio->bi_bdev = conf->mirrors[d].rdev->bdev; 949 mbio->bi_end_io = raid10_end_write_request; 950 - mbio->bi_rw = WRITE | (do_sync << BIO_RW_SYNCIO); 951 mbio->bi_private = r10_bio; 952 953 atomic_inc(&r10_bio->remaining); ··· 1716 raid_end_bio_io(r10_bio); 1717 bio_put(bio); 1718 } else { 1719 - const bool do_sync = bio_rw_flagged(r10_bio->master_bio, BIO_RW_SYNCIO); 1720 bio_put(bio); 1721 rdev = conf->mirrors[mirror].rdev; 1722 if (printk_ratelimit()) ··· 1730 bio->bi_sector = r10_bio->devs[r10_bio->read_slot].addr 1731 + rdev->data_offset; 1732 bio->bi_bdev = rdev->bdev; 1733 - bio->bi_rw = READ | (do_sync << BIO_RW_SYNCIO); 1734 bio->bi_private = r10_bio; 1735 bio->bi_end_io = raid10_end_read_request; 1736 unplug = 1;
··· 799 int i; 800 int chunk_sects = conf->chunk_mask + 1; 801 const int rw = bio_data_dir(bio); 802 + const bool do_sync = (bio->bi_rw & REQ_SYNC); 803 struct bio_list bl; 804 unsigned long flags; 805 mdk_rdev_t *blocked_rdev; 806 807 + if (unlikely(bio->bi_rw & REQ_HARDBARRIER)) { 808 md_barrier_request(mddev, bio); 809 return 0; 810 } ··· 879 mirror->rdev->data_offset; 880 read_bio->bi_bdev = mirror->rdev->bdev; 881 read_bio->bi_end_io = raid10_end_read_request; 882 + read_bio->bi_rw = READ | do_sync; 883 read_bio->bi_private = r10_bio; 884 885 generic_make_request(read_bio); ··· 947 conf->mirrors[d].rdev->data_offset; 948 mbio->bi_bdev = conf->mirrors[d].rdev->bdev; 949 mbio->bi_end_io = raid10_end_write_request; 950 + mbio->bi_rw = WRITE | do_sync; 951 mbio->bi_private = r10_bio; 952 953 atomic_inc(&r10_bio->remaining); ··· 1716 raid_end_bio_io(r10_bio); 1717 bio_put(bio); 1718 } else { 1719 + const bool do_sync = (r10_bio->master_bio->bi_rw & REQ_SYNC); 1720 bio_put(bio); 1721 rdev = conf->mirrors[mirror].rdev; 1722 if (printk_ratelimit()) ··· 1730 bio->bi_sector = r10_bio->devs[r10_bio->read_slot].addr 1731 + rdev->data_offset; 1732 bio->bi_bdev = rdev->bdev; 1733 + bio->bi_rw = READ | do_sync; 1734 bio->bi_private = r10_bio; 1735 bio->bi_end_io = raid10_end_read_request; 1736 unplug = 1;
+1 -1
drivers/md/raid5.c
··· 3958 const int rw = bio_data_dir(bi); 3959 int remaining; 3960 3961 - if (unlikely(bio_rw_flagged(bi, BIO_RW_BARRIER))) { 3962 /* Drain all pending writes. We only really need 3963 * to ensure they have been submitted, but this is 3964 * easier.
··· 3958 const int rw = bio_data_dir(bi); 3959 int remaining; 3960 3961 + if (unlikely(bi->bi_rw & REQ_HARDBARRIER)) { 3962 /* Drain all pending writes. We only really need 3963 * to ensure they have been submitted, but this is 3964 * easier.
+10 -2
drivers/memstick/core/mspro_block.c
··· 18 #include <linux/kthread.h> 19 #include <linux/delay.h> 20 #include <linux/slab.h> 21 #include <linux/memstick.h> 22 23 #define DRIVER_NAME "mspro_block" ··· 180 struct mspro_block_data *msb = disk->private_data; 181 int rc = -ENXIO; 182 183 mutex_lock(&mspro_block_disk_lock); 184 185 if (msb && msb->card) { ··· 192 } 193 194 mutex_unlock(&mspro_block_disk_lock); 195 196 return rc; 197 } ··· 224 225 static int mspro_block_bd_release(struct gendisk *disk, fmode_t mode) 226 { 227 - return mspro_block_disk_release(disk); 228 } 229 230 static int mspro_block_bd_getgeo(struct block_device *bdev, ··· 812 813 static int mspro_block_prepare_req(struct request_queue *q, struct request *req) 814 { 815 - if (!blk_fs_request(req) && !blk_pc_request(req)) { 816 blk_dump_rq_flags(req, "MSPro unsupported request"); 817 return BLKPREP_KILL; 818 }
··· 18 #include <linux/kthread.h> 19 #include <linux/delay.h> 20 #include <linux/slab.h> 21 + #include <linux/smp_lock.h> 22 #include <linux/memstick.h> 23 24 #define DRIVER_NAME "mspro_block" ··· 179 struct mspro_block_data *msb = disk->private_data; 180 int rc = -ENXIO; 181 182 + lock_kernel(); 183 mutex_lock(&mspro_block_disk_lock); 184 185 if (msb && msb->card) { ··· 190 } 191 192 mutex_unlock(&mspro_block_disk_lock); 193 + unlock_kernel(); 194 195 return rc; 196 } ··· 221 222 static int mspro_block_bd_release(struct gendisk *disk, fmode_t mode) 223 { 224 + int ret; 225 + lock_kernel(); 226 + ret = mspro_block_disk_release(disk); 227 + unlock_kernel(); 228 + return ret; 229 } 230 231 static int mspro_block_bd_getgeo(struct block_device *bdev, ··· 805 806 static int mspro_block_prepare_req(struct request_queue *q, struct request *req) 807 { 808 + if (req->cmd_type != REQ_TYPE_FS && 809 + req->cmd_type != REQ_TYPE_BLOCK_PC) { 810 blk_dump_rq_flags(req, "MSPro unsupported request"); 811 return BLKPREP_KILL; 812 }
+23 -7
drivers/message/i2o/i2o_block.c
··· 53 #include <linux/module.h> 54 #include <linux/slab.h> 55 #include <linux/i2o.h> 56 57 #include <linux/mempool.h> 58 ··· 578 if (!dev->i2o_dev) 579 return -ENODEV; 580 581 if (dev->power > 0x1f) 582 i2o_block_device_power(dev, 0x02); 583 ··· 587 i2o_block_device_lock(dev->i2o_dev, -1); 588 589 osm_debug("Ready.\n"); 590 591 return 0; 592 }; ··· 618 if (!dev->i2o_dev) 619 return 0; 620 621 i2o_block_device_flush(dev->i2o_dev); 622 623 i2o_block_device_unlock(dev->i2o_dev, -1); ··· 629 operation = 0x24; 630 631 i2o_block_device_power(dev, operation); 632 633 return 0; 634 } ··· 657 { 658 struct gendisk *disk = bdev->bd_disk; 659 struct i2o_block_device *dev = disk->private_data; 660 661 /* Anyone capable of this syscall can do *real bad* things */ 662 663 if (!capable(CAP_SYS_ADMIN)) 664 return -EPERM; 665 666 switch (cmd) { 667 case BLKI2OGRSTRAT: 668 - return put_user(dev->rcache, (int __user *)arg); 669 case BLKI2OGWSTRAT: 670 - return put_user(dev->wcache, (int __user *)arg); 671 case BLKI2OSRSTRAT: 672 if (arg < 0 || arg > CACHE_SMARTFETCH) 673 - return -EINVAL; 674 dev->rcache = arg; 675 break; 676 case BLKI2OSWSTRAT: 677 if (arg != 0 678 && (arg < CACHE_WRITETHROUGH || arg > CACHE_SMARTBACK)) 679 - return -EINVAL; 680 dev->wcache = arg; 681 break; 682 } 683 - return -ENOTTY; 684 }; 685 686 /** ··· 898 if (!req) 899 break; 900 901 - if (blk_fs_request(req)) { 902 struct i2o_block_delayed_request *dreq; 903 struct i2o_block_request *ireq = req->special; 904 unsigned int queue_depth; ··· 945 .owner = THIS_MODULE, 946 .open = i2o_block_open, 947 .release = i2o_block_release, 948 - .locked_ioctl = i2o_block_ioctl, 949 .getgeo = i2o_block_getgeo, 950 .media_changed = i2o_block_media_changed 951 };
··· 53 #include <linux/module.h> 54 #include <linux/slab.h> 55 #include <linux/i2o.h> 56 + #include <linux/smp_lock.h> 57 58 #include <linux/mempool.h> 59 ··· 577 if (!dev->i2o_dev) 578 return -ENODEV; 579 580 + lock_kernel(); 581 if (dev->power > 0x1f) 582 i2o_block_device_power(dev, 0x02); 583 ··· 585 i2o_block_device_lock(dev->i2o_dev, -1); 586 587 osm_debug("Ready.\n"); 588 + unlock_kernel(); 589 590 return 0; 591 }; ··· 615 if (!dev->i2o_dev) 616 return 0; 617 618 + lock_kernel(); 619 i2o_block_device_flush(dev->i2o_dev); 620 621 i2o_block_device_unlock(dev->i2o_dev, -1); ··· 625 operation = 0x24; 626 627 i2o_block_device_power(dev, operation); 628 + unlock_kernel(); 629 630 return 0; 631 } ··· 652 { 653 struct gendisk *disk = bdev->bd_disk; 654 struct i2o_block_device *dev = disk->private_data; 655 + int ret = -ENOTTY; 656 657 /* Anyone capable of this syscall can do *real bad* things */ 658 659 if (!capable(CAP_SYS_ADMIN)) 660 return -EPERM; 661 662 + lock_kernel(); 663 switch (cmd) { 664 case BLKI2OGRSTRAT: 665 + ret = put_user(dev->rcache, (int __user *)arg); 666 + break; 667 case BLKI2OGWSTRAT: 668 + ret = put_user(dev->wcache, (int __user *)arg); 669 + break; 670 case BLKI2OSRSTRAT: 671 + ret = -EINVAL; 672 if (arg < 0 || arg > CACHE_SMARTFETCH) 673 + break; 674 dev->rcache = arg; 675 + ret = 0; 676 break; 677 case BLKI2OSWSTRAT: 678 + ret = -EINVAL; 679 if (arg != 0 680 && (arg < CACHE_WRITETHROUGH || arg > CACHE_SMARTBACK)) 681 + break; 682 dev->wcache = arg; 683 + ret = 0; 684 break; 685 } 686 + unlock_kernel(); 687 + 688 + return ret; 689 }; 690 691 /** ··· 883 if (!req) 884 break; 885 886 + if (req->cmd_type == REQ_TYPE_FS) { 887 struct i2o_block_delayed_request *dreq; 888 struct i2o_block_request *ireq = req->special; 889 unsigned int queue_depth; ··· 930 .owner = THIS_MODULE, 931 .open = i2o_block_open, 932 .release = i2o_block_release, 933 + .ioctl = i2o_block_ioctl, 934 + .compat_ioctl = i2o_block_ioctl, 935 .getgeo = i2o_block_getgeo, 936 .media_changed = i2o_block_media_changed 937 };
+5
drivers/mmc/card/block.c
··· 29 #include <linux/kdev_t.h> 30 #include <linux/blkdev.h> 31 #include <linux/mutex.h> 32 #include <linux/scatterlist.h> 33 #include <linux/string_helpers.h> 34 ··· 108 struct mmc_blk_data *md = mmc_blk_get(bdev->bd_disk); 109 int ret = -ENXIO; 110 111 if (md) { 112 if (md->usage == 2) 113 check_disk_change(bdev); ··· 119 ret = -EROFS; 120 } 121 } 122 123 return ret; 124 } ··· 128 { 129 struct mmc_blk_data *md = disk->private_data; 130 131 mmc_blk_put(md); 132 return 0; 133 } 134
··· 29 #include <linux/kdev_t.h> 30 #include <linux/blkdev.h> 31 #include <linux/mutex.h> 32 + #include <linux/smp_lock.h> 33 #include <linux/scatterlist.h> 34 #include <linux/string_helpers.h> 35 ··· 107 struct mmc_blk_data *md = mmc_blk_get(bdev->bd_disk); 108 int ret = -ENXIO; 109 110 + lock_kernel(); 111 if (md) { 112 if (md->usage == 2) 113 check_disk_change(bdev); ··· 117 ret = -EROFS; 118 } 119 } 120 + unlock_kernel(); 121 122 return ret; 123 } ··· 125 { 126 struct mmc_blk_data *md = disk->private_data; 127 128 + lock_kernel(); 129 mmc_blk_put(md); 130 + unlock_kernel(); 131 return 0; 132 } 133
+2 -2
drivers/mmc/card/queue.c
··· 32 /* 33 * We only like normal block requests. 34 */ 35 - if (!blk_fs_request(req)) { 36 blk_dump_rq_flags(req, "MMC bad request"); 37 return BLKPREP_KILL; 38 } ··· 128 mq->req = NULL; 129 130 blk_queue_prep_rq(mq->queue, mmc_prep_request); 131 - blk_queue_ordered(mq->queue, QUEUE_ORDERED_DRAIN, NULL); 132 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue); 133 134 #ifdef CONFIG_MMC_BLOCK_BOUNCE
··· 32 /* 33 * We only like normal block requests. 34 */ 35 + if (req->cmd_type != REQ_TYPE_FS) { 36 blk_dump_rq_flags(req, "MMC bad request"); 37 return BLKPREP_KILL; 38 } ··· 128 mq->req = NULL; 129 130 blk_queue_prep_rq(mq->queue, mmc_prep_request); 131 + blk_queue_ordered(mq->queue, QUEUE_ORDERED_DRAIN); 132 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue); 133 134 #ifdef CONFIG_MMC_BLOCK_BOUNCE
+11 -4
drivers/mtd/mtd_blkdevs.c
··· 29 #include <linux/blkdev.h> 30 #include <linux/blkpg.h> 31 #include <linux/spinlock.h> 32 #include <linux/hdreg.h> 33 #include <linux/init.h> 34 #include <linux/mutex.h> ··· 88 89 buf = req->buffer; 90 91 - if (!blk_fs_request(req)) 92 return -EIO; 93 94 if (blk_rq_pos(req) + blk_rq_cur_sectors(req) > 95 get_capacity(req->rq_disk)) 96 return -EIO; 97 98 - if (blk_discard_rq(req)) 99 return tr->discard(dev, block, nsect); 100 101 switch(rq_data_dir(req)) { ··· 179 int ret; 180 181 if (!dev) 182 - return -ERESTARTSYS; 183 184 mutex_lock(&dev->lock); 185 186 if (!dev->mtd) { ··· 198 unlock: 199 mutex_unlock(&dev->lock); 200 blktrans_dev_put(dev); 201 return ret; 202 } 203 ··· 210 if (!dev) 211 return ret; 212 213 mutex_lock(&dev->lock); 214 215 /* Release one reference, we sure its not the last one here*/ ··· 223 unlock: 224 mutex_unlock(&dev->lock); 225 blktrans_dev_put(dev); 226 return ret; 227 } 228 ··· 256 if (!dev) 257 return ret; 258 259 mutex_lock(&dev->lock); 260 261 if (!dev->mtd) ··· 271 } 272 unlock: 273 mutex_unlock(&dev->lock); 274 blktrans_dev_put(dev); 275 return ret; 276 } ··· 280 .owner = THIS_MODULE, 281 .open = blktrans_open, 282 .release = blktrans_release, 283 - .locked_ioctl = blktrans_ioctl, 284 .getgeo = blktrans_getgeo, 285 }; 286
··· 29 #include <linux/blkdev.h> 30 #include <linux/blkpg.h> 31 #include <linux/spinlock.h> 32 + #include <linux/smp_lock.h> 33 #include <linux/hdreg.h> 34 #include <linux/init.h> 35 #include <linux/mutex.h> ··· 87 88 buf = req->buffer; 89 90 + if (req->cmd_type != REQ_TYPE_FS) 91 return -EIO; 92 93 if (blk_rq_pos(req) + blk_rq_cur_sectors(req) > 94 get_capacity(req->rq_disk)) 95 return -EIO; 96 97 + if (req->cmd_flags & REQ_DISCARD) 98 return tr->discard(dev, block, nsect); 99 100 switch(rq_data_dir(req)) { ··· 178 int ret; 179 180 if (!dev) 181 + return -ERESTARTSYS; /* FIXME: busy loop! -arnd*/ 182 183 + lock_kernel(); 184 mutex_lock(&dev->lock); 185 186 if (!dev->mtd) { ··· 196 unlock: 197 mutex_unlock(&dev->lock); 198 blktrans_dev_put(dev); 199 + unlock_kernel(); 200 return ret; 201 } 202 ··· 207 if (!dev) 208 return ret; 209 210 + lock_kernel(); 211 mutex_lock(&dev->lock); 212 213 /* Release one reference, we sure its not the last one here*/ ··· 219 unlock: 220 mutex_unlock(&dev->lock); 221 blktrans_dev_put(dev); 222 + unlock_kernel(); 223 return ret; 224 } 225 ··· 251 if (!dev) 252 return ret; 253 254 + lock_kernel(); 255 mutex_lock(&dev->lock); 256 257 if (!dev->mtd) ··· 265 } 266 unlock: 267 mutex_unlock(&dev->lock); 268 + unlock_kernel(); 269 blktrans_dev_put(dev); 270 return ret; 271 } ··· 273 .owner = THIS_MODULE, 274 .open = blktrans_open, 275 .release = blktrans_release, 276 + .ioctl = blktrans_ioctl, 277 .getgeo = blktrans_getgeo, 278 }; 279
+7 -1
drivers/s390/block/dasd.c
··· 21 #include <linux/hdreg.h> 22 #include <linux/async.h> 23 #include <linux/mutex.h> 24 25 #include <asm/ccwdev.h> 26 #include <asm/ebcdic.h> ··· 2197 */ 2198 blk_queue_max_segment_size(block->request_queue, PAGE_SIZE); 2199 blk_queue_segment_boundary(block->request_queue, PAGE_SIZE - 1); 2200 - blk_queue_ordered(block->request_queue, QUEUE_ORDERED_DRAIN, NULL); 2201 } 2202 2203 /* ··· 2236 if (!block) 2237 return -ENODEV; 2238 2239 base = block->base; 2240 atomic_inc(&block->open_count); 2241 if (test_bit(DASD_FLAG_OFFLINE, &base->flags)) { ··· 2271 goto out; 2272 } 2273 2274 return 0; 2275 2276 out: 2277 module_put(base->discipline->owner); 2278 unlock: 2279 atomic_dec(&block->open_count); 2280 return rc; 2281 } 2282 ··· 2286 { 2287 struct dasd_block *block = disk->private_data; 2288 2289 atomic_dec(&block->open_count); 2290 module_put(block->base->discipline->owner); 2291 return 0; 2292 } 2293
··· 21 #include <linux/hdreg.h> 22 #include <linux/async.h> 23 #include <linux/mutex.h> 24 + #include <linux/smp_lock.h> 25 26 #include <asm/ccwdev.h> 27 #include <asm/ebcdic.h> ··· 2196 */ 2197 blk_queue_max_segment_size(block->request_queue, PAGE_SIZE); 2198 blk_queue_segment_boundary(block->request_queue, PAGE_SIZE - 1); 2199 + blk_queue_ordered(block->request_queue, QUEUE_ORDERED_DRAIN); 2200 } 2201 2202 /* ··· 2235 if (!block) 2236 return -ENODEV; 2237 2238 + lock_kernel(); 2239 base = block->base; 2240 atomic_inc(&block->open_count); 2241 if (test_bit(DASD_FLAG_OFFLINE, &base->flags)) { ··· 2269 goto out; 2270 } 2271 2272 + unlock_kernel(); 2273 return 0; 2274 2275 out: 2276 module_put(base->discipline->owner); 2277 unlock: 2278 atomic_dec(&block->open_count); 2279 + unlock_kernel(); 2280 return rc; 2281 } 2282 ··· 2282 { 2283 struct dasd_block *block = disk->private_data; 2284 2285 + lock_kernel(); 2286 atomic_dec(&block->open_count); 2287 module_put(block->base->discipline->owner); 2288 + unlock_kernel(); 2289 return 0; 2290 } 2291
+5
drivers/s390/block/dcssblk.c
··· 14 #include <linux/init.h> 15 #include <linux/slab.h> 16 #include <linux/blkdev.h> 17 #include <linux/completion.h> 18 #include <linux/interrupt.h> 19 #include <linux/platform_device.h> ··· 776 struct dcssblk_dev_info *dev_info; 777 int rc; 778 779 dev_info = bdev->bd_disk->private_data; 780 if (NULL == dev_info) { 781 rc = -ENODEV; ··· 786 bdev->bd_block_size = 4096; 787 rc = 0; 788 out: 789 return rc; 790 } 791 ··· 797 struct segment_info *entry; 798 int rc; 799 800 if (!dev_info) { 801 rc = -ENODEV; 802 goto out; ··· 815 up_write(&dcssblk_devices_sem); 816 rc = 0; 817 out: 818 return rc; 819 } 820
··· 14 #include <linux/init.h> 15 #include <linux/slab.h> 16 #include <linux/blkdev.h> 17 + #include <linux/smp_lock.h> 18 #include <linux/completion.h> 19 #include <linux/interrupt.h> 20 #include <linux/platform_device.h> ··· 775 struct dcssblk_dev_info *dev_info; 776 int rc; 777 778 + lock_kernel(); 779 dev_info = bdev->bd_disk->private_data; 780 if (NULL == dev_info) { 781 rc = -ENODEV; ··· 784 bdev->bd_block_size = 4096; 785 rc = 0; 786 out: 787 + unlock_kernel(); 788 return rc; 789 } 790 ··· 794 struct segment_info *entry; 795 int rc; 796 797 + lock_kernel(); 798 if (!dev_info) { 799 rc = -ENODEV; 800 goto out; ··· 811 up_write(&dcssblk_devices_sem); 812 rc = 0; 813 out: 814 + unlock_kernel(); 815 return rc; 816 } 817
+7 -1
drivers/s390/char/tape_block.c
··· 16 #include <linux/fs.h> 17 #include <linux/module.h> 18 #include <linux/blkdev.h> 19 #include <linux/interrupt.h> 20 #include <linux/buffer_head.h> 21 #include <linux/kernel.h> ··· 362 struct tape_device * device; 363 int rc; 364 365 device = tape_get_device(disk->private_data); 366 367 if (device->required_tapemarks) { ··· 386 * is called. 387 */ 388 tape_state_set(device, TS_BLKUSE); 389 return 0; 390 391 release: 392 tape_release(device); 393 put_device: 394 tape_put_device(device); 395 return rc; 396 } 397 ··· 407 tapeblock_release(struct gendisk *disk, fmode_t mode) 408 { 409 struct tape_device *device = disk->private_data; 410 - 411 tape_state_set(device, TS_IN_USE); 412 tape_release(device); 413 tape_put_device(device); 414 415 return 0; 416 }
··· 16 #include <linux/fs.h> 17 #include <linux/module.h> 18 #include <linux/blkdev.h> 19 + #include <linux/smp_lock.h> 20 #include <linux/interrupt.h> 21 #include <linux/buffer_head.h> 22 #include <linux/kernel.h> ··· 361 struct tape_device * device; 362 int rc; 363 364 + lock_kernel(); 365 device = tape_get_device(disk->private_data); 366 367 if (device->required_tapemarks) { ··· 384 * is called. 385 */ 386 tape_state_set(device, TS_BLKUSE); 387 + unlock_kernel(); 388 return 0; 389 390 release: 391 tape_release(device); 392 put_device: 393 tape_put_device(device); 394 + unlock_kernel(); 395 return rc; 396 } 397 ··· 403 tapeblock_release(struct gendisk *disk, fmode_t mode) 404 { 405 struct tape_device *device = disk->private_data; 406 + 407 + lock_kernel(); 408 tape_state_set(device, TS_IN_USE); 409 tape_release(device); 410 tape_put_device(device); 411 + unlock_kernel(); 412 413 return 0; 414 }
-25
drivers/scsi/aha1542.c
··· 52 #define SCSI_BUF_PA(address) isa_virt_to_bus(address) 53 #define SCSI_SG_PA(sgent) (isa_page_to_bus(sg_page((sgent))) + (sgent)->offset) 54 55 - static void BAD_SG_DMA(Scsi_Cmnd * SCpnt, 56 - struct scatterlist *sgp, 57 - int nseg, 58 - int badseg) 59 - { 60 - printk(KERN_CRIT "sgpnt[%d:%d] page %p/0x%llx length %u\n", 61 - badseg, nseg, sg_virt(sgp), 62 - (unsigned long long)SCSI_SG_PA(sgp), 63 - sgp->length); 64 - 65 - /* 66 - * Not safe to continue. 67 - */ 68 - panic("Buffer at physical address > 16Mb used for aha1542"); 69 - } 70 - 71 #include<linux/stat.h> 72 73 #ifdef DEBUG ··· 675 } 676 scsi_for_each_sg(SCpnt, sg, sg_count, i) { 677 any2scsi(cptr[i].dataptr, SCSI_SG_PA(sg)); 678 - if (SCSI_SG_PA(sg) + sg->length - 1 > ISA_DMA_THRESHOLD) 679 - BAD_SG_DMA(SCpnt, scsi_sglist(SCpnt), sg_count, i); 680 any2scsi(cptr[i].datalen, sg->length); 681 }; 682 any2scsi(ccb[mbo].datalen, sg_count * sizeof(struct chain)); ··· 1115 release_region(bases[indx], 4); 1116 continue; 1117 } 1118 - /* For now we do this - until kmalloc is more intelligent 1119 - we are resigned to stupid hacks like this */ 1120 - if (SCSI_BUF_PA(shpnt) >= ISA_DMA_THRESHOLD) { 1121 - printk(KERN_ERR "Invalid address for shpnt with 1542.\n"); 1122 - goto unregister; 1123 - } 1124 if (!aha1542_test_port(bases[indx], shpnt)) 1125 goto unregister; 1126 - 1127 1128 base_io = bases[indx]; 1129
··· 52 #define SCSI_BUF_PA(address) isa_virt_to_bus(address) 53 #define SCSI_SG_PA(sgent) (isa_page_to_bus(sg_page((sgent))) + (sgent)->offset) 54 55 #include<linux/stat.h> 56 57 #ifdef DEBUG ··· 691 } 692 scsi_for_each_sg(SCpnt, sg, sg_count, i) { 693 any2scsi(cptr[i].dataptr, SCSI_SG_PA(sg)); 694 any2scsi(cptr[i].datalen, sg->length); 695 }; 696 any2scsi(ccb[mbo].datalen, sg_count * sizeof(struct chain)); ··· 1133 release_region(bases[indx], 4); 1134 continue; 1135 } 1136 if (!aha1542_test_port(bases[indx], shpnt)) 1137 goto unregister; 1138 1139 base_io = bases[indx]; 1140
+4 -4
drivers/scsi/osd/osd_initiator.c
··· 716 return PTR_ERR(bio); 717 } 718 719 - bio->bi_rw &= ~(1 << BIO_RW); 720 or->in.bio = bio; 721 or->in.total_bytes = bio->bi_size; 722 return 0; ··· 814 { 815 _osd_req_encode_common(or, OSD_ACT_WRITE, obj, offset, len); 816 WARN_ON(or->out.bio || or->out.total_bytes); 817 - WARN_ON(0 == bio_rw_flagged(bio, BIO_RW)); 818 or->out.bio = bio; 819 or->out.total_bytes = len; 820 } ··· 829 if (IS_ERR(bio)) 830 return PTR_ERR(bio); 831 832 - bio->bi_rw |= (1 << BIO_RW); /* FIXME: bio_set_dir() */ 833 osd_req_write(or, obj, offset, bio, len); 834 return 0; 835 } ··· 865 { 866 _osd_req_encode_common(or, OSD_ACT_READ, obj, offset, len); 867 WARN_ON(or->in.bio || or->in.total_bytes); 868 - WARN_ON(1 == bio_rw_flagged(bio, BIO_RW)); 869 or->in.bio = bio; 870 or->in.total_bytes = len; 871 }
··· 716 return PTR_ERR(bio); 717 } 718 719 + bio->bi_rw &= ~REQ_WRITE; 720 or->in.bio = bio; 721 or->in.total_bytes = bio->bi_size; 722 return 0; ··· 814 { 815 _osd_req_encode_common(or, OSD_ACT_WRITE, obj, offset, len); 816 WARN_ON(or->out.bio || or->out.total_bytes); 817 + WARN_ON(0 == (bio->bi_rw & REQ_WRITE)); 818 or->out.bio = bio; 819 or->out.total_bytes = len; 820 } ··· 829 if (IS_ERR(bio)) 830 return PTR_ERR(bio); 831 832 + bio->bi_rw |= REQ_WRITE; /* FIXME: bio_set_dir() */ 833 osd_req_write(or, obj, offset, bio, len); 834 return 0; 835 } ··· 865 { 866 _osd_req_encode_common(or, OSD_ACT_READ, obj, offset, len); 867 WARN_ON(or->in.bio || or->in.total_bytes); 868 + WARN_ON(1 == (bio->bi_rw & REQ_WRITE)); 869 or->in.bio = bio; 870 or->in.total_bytes = len; 871 }
+7 -5
drivers/scsi/scsi_error.c
··· 320 "changed. The Linux SCSI layer does not " 321 "automatically adjust these parameters.\n"); 322 323 - if (blk_barrier_rq(scmd->request)) 324 /* 325 * barrier requests should always retry on UA 326 * otherwise block will get a spurious error ··· 1331 case DID_OK: 1332 break; 1333 case DID_BUS_BUSY: 1334 - return blk_failfast_transport(scmd->request); 1335 case DID_PARITY: 1336 - return blk_failfast_dev(scmd->request); 1337 case DID_ERROR: 1338 if (msg_byte(scmd->result) == COMMAND_COMPLETE && 1339 status_byte(scmd->result) == RESERVATION_CONFLICT) 1340 return 0; 1341 /* fall through */ 1342 case DID_SOFT_ERROR: 1343 - return blk_failfast_driver(scmd->request); 1344 } 1345 1346 switch (status_byte(scmd->result)) { ··· 1349 * assume caller has checked sense and determinted 1350 * the check condition was retryable. 1351 */ 1352 - return blk_failfast_dev(scmd->request); 1353 } 1354 1355 return 0;
··· 320 "changed. The Linux SCSI layer does not " 321 "automatically adjust these parameters.\n"); 322 323 + if (scmd->request->cmd_flags & REQ_HARDBARRIER) 324 /* 325 * barrier requests should always retry on UA 326 * otherwise block will get a spurious error ··· 1331 case DID_OK: 1332 break; 1333 case DID_BUS_BUSY: 1334 + return (scmd->request->cmd_flags & REQ_FAILFAST_TRANSPORT); 1335 case DID_PARITY: 1336 + return (scmd->request->cmd_flags & REQ_FAILFAST_DEV); 1337 case DID_ERROR: 1338 if (msg_byte(scmd->result) == COMMAND_COMPLETE && 1339 status_byte(scmd->result) == RESERVATION_CONFLICT) 1340 return 0; 1341 /* fall through */ 1342 case DID_SOFT_ERROR: 1343 + return (scmd->request->cmd_flags & REQ_FAILFAST_DRIVER); 1344 } 1345 1346 switch (status_byte(scmd->result)) { ··· 1349 * assume caller has checked sense and determinted 1350 * the check condition was retryable. 1351 */ 1352 + if (scmd->request->cmd_flags & REQ_FAILFAST_DEV || 1353 + scmd->request->cmd_type == REQ_TYPE_BLOCK_PC) 1354 + return 1; 1355 } 1356 1357 return 0;
+6 -8
drivers/scsi/scsi_lib.c
··· 85 { 86 struct scsi_cmnd *cmd = req->special; 87 88 - req->cmd_flags &= ~REQ_DONTPREP; 89 req->special = NULL; 90 91 scsi_put_command(cmd); ··· 722 sense_deferred = scsi_sense_is_deferred(&sshdr); 723 } 724 725 - if (blk_pc_request(req)) { /* SG_IO ioctl from block level */ 726 req->errors = result; 727 if (result) { 728 if (sense_valid && req->sense) { ··· 757 } 758 } 759 760 - BUG_ON(blk_bidi_rq(req)); /* bidi not support for !blk_pc_request yet */ 761 762 /* 763 * Next deal with any sectors which we were able to correctly ··· 1011 1012 err_exit: 1013 scsi_release_buffers(cmd); 1014 - if (error == BLKPREP_KILL) 1015 - scsi_put_command(cmd); 1016 - else /* BLKPREP_DEFER */ 1017 - scsi_unprep_request(cmd->request); 1018 - 1019 return error; 1020 } 1021 EXPORT_SYMBOL(scsi_init_io);
··· 85 { 86 struct scsi_cmnd *cmd = req->special; 87 88 + blk_unprep_request(req); 89 req->special = NULL; 90 91 scsi_put_command(cmd); ··· 722 sense_deferred = scsi_sense_is_deferred(&sshdr); 723 } 724 725 + if (req->cmd_type == REQ_TYPE_BLOCK_PC) { /* SG_IO ioctl from block level */ 726 req->errors = result; 727 if (result) { 728 if (sense_valid && req->sense) { ··· 757 } 758 } 759 760 + /* no bidi support for !REQ_TYPE_BLOCK_PC yet */ 761 + BUG_ON(blk_bidi_rq(req)); 762 763 /* 764 * Next deal with any sectors which we were able to correctly ··· 1010 1011 err_exit: 1012 scsi_release_buffers(cmd); 1013 + scsi_put_command(cmd); 1014 + cmd->request->special = NULL; 1015 return error; 1016 } 1017 EXPORT_SYMBOL(scsi_init_io);
+85 -43
drivers/scsi/sd.c
··· 46 #include <linux/blkdev.h> 47 #include <linux/blkpg.h> 48 #include <linux/delay.h> 49 #include <linux/mutex.h> 50 #include <linux/string_helpers.h> 51 #include <linux/async.h> ··· 412 } 413 414 /** 415 - * sd_prepare_discard - unmap blocks on thinly provisioned device 416 * @rq: Request to prepare 417 * 418 * Will issue either UNMAP or WRITE SAME(16) depending on preference 419 * indicated by target device. 420 **/ 421 - static int sd_prepare_discard(struct request *rq) 422 { 423 struct scsi_disk *sdkp = scsi_disk(rq->rq_disk); 424 struct bio *bio = rq->bio; 425 sector_t sector = bio->bi_sector; 426 - unsigned int num = bio_sectors(bio); 427 428 if (sdkp->device->sector_size == 4096) { 429 sector >>= 3; 430 - num >>= 3; 431 } 432 433 - rq->cmd_type = REQ_TYPE_BLOCK_PC; 434 rq->timeout = SD_TIMEOUT; 435 436 memset(rq->cmd, 0, rq->cmd_len); 437 438 - if (sdkp->unmap) { 439 - char *buf = kmap_atomic(bio_page(bio), KM_USER0); 440 441 rq->cmd[0] = UNMAP; 442 rq->cmd[8] = 24; 443 - rq->cmd_len = 10; 444 - 445 - /* Ensure that data length matches payload */ 446 - rq->__data_len = bio->bi_size = bio->bi_io_vec->bv_len = 24; 447 448 put_unaligned_be16(6 + 16, &buf[0]); 449 put_unaligned_be16(16, &buf[2]); 450 put_unaligned_be64(sector, &buf[8]); 451 - put_unaligned_be32(num, &buf[16]); 452 453 - kunmap_atomic(buf, KM_USER0); 454 } else { 455 rq->cmd[0] = WRITE_SAME_16; 456 rq->cmd[1] = 0x8; /* UNMAP */ 457 put_unaligned_be64(sector, &rq->cmd[2]); 458 - put_unaligned_be32(num, &rq->cmd[10]); 459 - rq->cmd_len = 16; 460 } 461 462 - return BLKPREP_OK; 463 } 464 465 /** ··· 517 * Discard request come in as REQ_TYPE_FS but we turn them into 518 * block PC requests to make life easier. 519 */ 520 - if (blk_discard_rq(rq)) 521 - ret = sd_prepare_discard(rq); 522 - 523 - if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { 524 ret = scsi_setup_blk_pc_cmnd(sdp, rq); 525 goto out; 526 } else if (rq->cmd_type != REQ_TYPE_FS) { ··· 671 SCpnt->cmnd[0] = VARIABLE_LENGTH_CMD; 672 SCpnt->cmnd[7] = 0x18; 673 SCpnt->cmnd[9] = (rq_data_dir(rq) == READ) ? READ_32 : WRITE_32; 674 - SCpnt->cmnd[10] = protect | (blk_fua_rq(rq) ? 0x8 : 0); 675 676 /* LBA */ 677 SCpnt->cmnd[12] = sizeof(block) > 4 ? (unsigned char) (block >> 56) & 0xff : 0; ··· 696 SCpnt->cmnd[31] = (unsigned char) this_count & 0xff; 697 } else if (block > 0xffffffff) { 698 SCpnt->cmnd[0] += READ_16 - READ_6; 699 - SCpnt->cmnd[1] = protect | (blk_fua_rq(rq) ? 0x8 : 0); 700 SCpnt->cmnd[2] = sizeof(block) > 4 ? (unsigned char) (block >> 56) & 0xff : 0; 701 SCpnt->cmnd[3] = sizeof(block) > 4 ? (unsigned char) (block >> 48) & 0xff : 0; 702 SCpnt->cmnd[4] = sizeof(block) > 4 ? (unsigned char) (block >> 40) & 0xff : 0; ··· 717 this_count = 0xffff; 718 719 SCpnt->cmnd[0] += READ_10 - READ_6; 720 - SCpnt->cmnd[1] = protect | (blk_fua_rq(rq) ? 0x8 : 0); 721 SCpnt->cmnd[2] = (unsigned char) (block >> 24) & 0xff; 722 SCpnt->cmnd[3] = (unsigned char) (block >> 16) & 0xff; 723 SCpnt->cmnd[4] = (unsigned char) (block >> 8) & 0xff; ··· 726 SCpnt->cmnd[7] = (unsigned char) (this_count >> 8) & 0xff; 727 SCpnt->cmnd[8] = (unsigned char) this_count & 0xff; 728 } else { 729 - if (unlikely(blk_fua_rq(rq))) { 730 /* 731 * This happens only if this drive failed 732 * 10byte rw command with ILLEGAL_REQUEST ··· 780 * or from within the kernel (e.g. as a result of a mount(1) ). 781 * In the latter case @inode and @filp carry an abridged amount 782 * of information as noted above. 783 **/ 784 static int sd_open(struct block_device *bdev, fmode_t mode) 785 { ··· 836 if (!scsi_device_online(sdev)) 837 goto error_out; 838 839 - if (!sdkp->openers++ && sdev->removable) { 840 if (scsi_block_when_processing_errors(sdev)) 841 scsi_set_medium_removal(sdev, SCSI_REMOVAL_PREVENT); 842 } ··· 860 * 861 * Note: may block (uninterruptible) if error recovery is underway 862 * on this disk. 863 **/ 864 static int sd_release(struct gendisk *disk, fmode_t mode) 865 { ··· 870 871 SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_release\n")); 872 873 - if (!--sdkp->openers && sdev->removable) { 874 if (scsi_block_when_processing_errors(sdev)) 875 scsi_set_medium_removal(sdev, SCSI_REMOVAL_ALLOW); 876 } ··· 943 error = scsi_nonblockable_ioctl(sdp, cmd, p, 944 (mode & FMODE_NDELAY) != 0); 945 if (!scsi_block_when_processing_errors(sdp) || !error) 946 - return error; 947 948 /* 949 * Send SCSI addressing ioctls directly to mid level, send other ··· 953 switch (cmd) { 954 case SCSI_IOCTL_GET_IDLUN: 955 case SCSI_IOCTL_GET_BUS_NUMBER: 956 - return scsi_ioctl(sdp, cmd, p); 957 default: 958 error = scsi_cmd_ioctl(disk->queue, disk, mode, cmd, p); 959 if (error != -ENOTTY) 960 - return error; 961 } 962 - return scsi_ioctl(sdp, cmd, p); 963 } 964 965 static void set_media_not_present(struct scsi_disk *sdkp) ··· 1088 return 0; 1089 } 1090 1091 - static void sd_prepare_flush(struct request_queue *q, struct request *rq) 1092 - { 1093 - rq->cmd_type = REQ_TYPE_BLOCK_PC; 1094 - rq->timeout = SD_TIMEOUT; 1095 - rq->retries = SD_MAX_RETRIES; 1096 - rq->cmd[0] = SYNCHRONIZE_CACHE; 1097 - rq->cmd_len = 10; 1098 - } 1099 - 1100 static void sd_rescan(struct device *dev) 1101 { 1102 struct scsi_disk *sdkp = scsi_disk_get_from_dev(dev); ··· 1137 .owner = THIS_MODULE, 1138 .open = sd_open, 1139 .release = sd_release, 1140 - .locked_ioctl = sd_ioctl, 1141 .getgeo = sd_getgeo, 1142 #ifdef CONFIG_COMPAT 1143 .compat_ioctl = sd_compat_ioctl, ··· 1154 u64 bad_lba; 1155 int info_valid; 1156 1157 - if (!blk_fs_request(scmd->request)) 1158 return 0; 1159 1160 info_valid = scsi_get_sense_info_fld(scmd->sense_buffer, ··· 1204 struct scsi_disk *sdkp = scsi_disk(SCpnt->request->rq_disk); 1205 int sense_valid = 0; 1206 int sense_deferred = 0; 1207 1208 if (result) { 1209 sense_valid = scsi_command_normalize_sense(SCpnt, &sshdr); ··· 2161 else 2162 ordered = QUEUE_ORDERED_DRAIN; 2163 2164 - blk_queue_ordered(sdkp->disk->queue, ordered, sd_prepare_flush); 2165 2166 set_capacity(disk, sdkp->capacity); 2167 kfree(buffer); ··· 2274 sd_revalidate_disk(gd); 2275 2276 blk_queue_prep_rq(sdp->request_queue, sd_prep_fn); 2277 2278 gd->driverfs_dev = &sdp->sdev_gendev; 2279 gd->flags = GENHD_FL_EXT_DEVT; ··· 2354 sdkp->driver = &sd_template; 2355 sdkp->disk = gd; 2356 sdkp->index = index; 2357 - sdkp->openers = 0; 2358 sdkp->previous_state = 1; 2359 2360 if (!sdp->request_queue->rq_timeout) { ··· 2413 2414 async_synchronize_full(); 2415 blk_queue_prep_rq(sdkp->device->request_queue, scsi_prep_fn); 2416 device_del(&sdkp->dev); 2417 del_gendisk(sdkp->disk); 2418 sd_shutdown(dev);
··· 46 #include <linux/blkdev.h> 47 #include <linux/blkpg.h> 48 #include <linux/delay.h> 49 + #include <linux/smp_lock.h> 50 #include <linux/mutex.h> 51 #include <linux/string_helpers.h> 52 #include <linux/async.h> ··· 411 } 412 413 /** 414 + * scsi_setup_discard_cmnd - unmap blocks on thinly provisioned device 415 + * @sdp: scsi device to operate one 416 * @rq: Request to prepare 417 * 418 * Will issue either UNMAP or WRITE SAME(16) depending on preference 419 * indicated by target device. 420 **/ 421 + static int scsi_setup_discard_cmnd(struct scsi_device *sdp, struct request *rq) 422 { 423 struct scsi_disk *sdkp = scsi_disk(rq->rq_disk); 424 struct bio *bio = rq->bio; 425 sector_t sector = bio->bi_sector; 426 + unsigned int nr_sectors = bio_sectors(bio); 427 + unsigned int len; 428 + int ret; 429 + struct page *page; 430 431 if (sdkp->device->sector_size == 4096) { 432 sector >>= 3; 433 + nr_sectors >>= 3; 434 } 435 436 rq->timeout = SD_TIMEOUT; 437 438 memset(rq->cmd, 0, rq->cmd_len); 439 440 + page = alloc_page(GFP_ATOMIC | __GFP_ZERO); 441 + if (!page) 442 + return BLKPREP_DEFER; 443 444 + if (sdkp->unmap) { 445 + char *buf = page_address(page); 446 + 447 + rq->cmd_len = 10; 448 rq->cmd[0] = UNMAP; 449 rq->cmd[8] = 24; 450 451 put_unaligned_be16(6 + 16, &buf[0]); 452 put_unaligned_be16(16, &buf[2]); 453 put_unaligned_be64(sector, &buf[8]); 454 + put_unaligned_be32(nr_sectors, &buf[16]); 455 456 + len = 24; 457 } else { 458 + rq->cmd_len = 16; 459 rq->cmd[0] = WRITE_SAME_16; 460 rq->cmd[1] = 0x8; /* UNMAP */ 461 put_unaligned_be64(sector, &rq->cmd[2]); 462 + put_unaligned_be32(nr_sectors, &rq->cmd[10]); 463 + 464 + len = sdkp->device->sector_size; 465 } 466 467 + blk_add_request_payload(rq, page, len); 468 + ret = scsi_setup_blk_pc_cmnd(sdp, rq); 469 + rq->buffer = page_address(page); 470 + if (ret != BLKPREP_OK) { 471 + __free_page(page); 472 + rq->buffer = NULL; 473 + } 474 + return ret; 475 + } 476 + 477 + static int scsi_setup_flush_cmnd(struct scsi_device *sdp, struct request *rq) 478 + { 479 + rq->timeout = SD_TIMEOUT; 480 + rq->retries = SD_MAX_RETRIES; 481 + rq->cmd[0] = SYNCHRONIZE_CACHE; 482 + rq->cmd_len = 10; 483 + 484 + return scsi_setup_blk_pc_cmnd(sdp, rq); 485 + } 486 + 487 + static void sd_unprep_fn(struct request_queue *q, struct request *rq) 488 + { 489 + if (rq->cmd_flags & REQ_DISCARD) { 490 + free_page((unsigned long)rq->buffer); 491 + rq->buffer = NULL; 492 + } 493 } 494 495 /** ··· 485 * Discard request come in as REQ_TYPE_FS but we turn them into 486 * block PC requests to make life easier. 487 */ 488 + if (rq->cmd_flags & REQ_DISCARD) { 489 + ret = scsi_setup_discard_cmnd(sdp, rq); 490 + goto out; 491 + } else if (rq->cmd_flags & REQ_FLUSH) { 492 + ret = scsi_setup_flush_cmnd(sdp, rq); 493 + goto out; 494 + } else if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { 495 ret = scsi_setup_blk_pc_cmnd(sdp, rq); 496 goto out; 497 } else if (rq->cmd_type != REQ_TYPE_FS) { ··· 636 SCpnt->cmnd[0] = VARIABLE_LENGTH_CMD; 637 SCpnt->cmnd[7] = 0x18; 638 SCpnt->cmnd[9] = (rq_data_dir(rq) == READ) ? READ_32 : WRITE_32; 639 + SCpnt->cmnd[10] = protect | ((rq->cmd_flags & REQ_FUA) ? 0x8 : 0); 640 641 /* LBA */ 642 SCpnt->cmnd[12] = sizeof(block) > 4 ? (unsigned char) (block >> 56) & 0xff : 0; ··· 661 SCpnt->cmnd[31] = (unsigned char) this_count & 0xff; 662 } else if (block > 0xffffffff) { 663 SCpnt->cmnd[0] += READ_16 - READ_6; 664 + SCpnt->cmnd[1] = protect | ((rq->cmd_flags & REQ_FUA) ? 0x8 : 0); 665 SCpnt->cmnd[2] = sizeof(block) > 4 ? (unsigned char) (block >> 56) & 0xff : 0; 666 SCpnt->cmnd[3] = sizeof(block) > 4 ? (unsigned char) (block >> 48) & 0xff : 0; 667 SCpnt->cmnd[4] = sizeof(block) > 4 ? (unsigned char) (block >> 40) & 0xff : 0; ··· 682 this_count = 0xffff; 683 684 SCpnt->cmnd[0] += READ_10 - READ_6; 685 + SCpnt->cmnd[1] = protect | ((rq->cmd_flags & REQ_FUA) ? 0x8 : 0); 686 SCpnt->cmnd[2] = (unsigned char) (block >> 24) & 0xff; 687 SCpnt->cmnd[3] = (unsigned char) (block >> 16) & 0xff; 688 SCpnt->cmnd[4] = (unsigned char) (block >> 8) & 0xff; ··· 691 SCpnt->cmnd[7] = (unsigned char) (this_count >> 8) & 0xff; 692 SCpnt->cmnd[8] = (unsigned char) this_count & 0xff; 693 } else { 694 + if (unlikely(rq->cmd_flags & REQ_FUA)) { 695 /* 696 * This happens only if this drive failed 697 * 10byte rw command with ILLEGAL_REQUEST ··· 745 * or from within the kernel (e.g. as a result of a mount(1) ). 746 * In the latter case @inode and @filp carry an abridged amount 747 * of information as noted above. 748 + * 749 + * Locking: called with bdev->bd_mutex held. 750 **/ 751 static int sd_open(struct block_device *bdev, fmode_t mode) 752 { ··· 799 if (!scsi_device_online(sdev)) 800 goto error_out; 801 802 + if ((atomic_inc_return(&sdkp->openers) == 1) && sdev->removable) { 803 if (scsi_block_when_processing_errors(sdev)) 804 scsi_set_medium_removal(sdev, SCSI_REMOVAL_PREVENT); 805 } ··· 823 * 824 * Note: may block (uninterruptible) if error recovery is underway 825 * on this disk. 826 + * 827 + * Locking: called with bdev->bd_mutex held. 828 **/ 829 static int sd_release(struct gendisk *disk, fmode_t mode) 830 { ··· 831 832 SCSI_LOG_HLQUEUE(3, sd_printk(KERN_INFO, sdkp, "sd_release\n")); 833 834 + if (atomic_dec_return(&sdkp->openers) && sdev->removable) { 835 if (scsi_block_when_processing_errors(sdev)) 836 scsi_set_medium_removal(sdev, SCSI_REMOVAL_ALLOW); 837 } ··· 904 error = scsi_nonblockable_ioctl(sdp, cmd, p, 905 (mode & FMODE_NDELAY) != 0); 906 if (!scsi_block_when_processing_errors(sdp) || !error) 907 + goto out; 908 909 /* 910 * Send SCSI addressing ioctls directly to mid level, send other ··· 914 switch (cmd) { 915 case SCSI_IOCTL_GET_IDLUN: 916 case SCSI_IOCTL_GET_BUS_NUMBER: 917 + error = scsi_ioctl(sdp, cmd, p); 918 + break; 919 default: 920 error = scsi_cmd_ioctl(disk->queue, disk, mode, cmd, p); 921 if (error != -ENOTTY) 922 + break; 923 + error = scsi_ioctl(sdp, cmd, p); 924 + break; 925 } 926 + out: 927 + return error; 928 } 929 930 static void set_media_not_present(struct scsi_disk *sdkp) ··· 1045 return 0; 1046 } 1047 1048 static void sd_rescan(struct device *dev) 1049 { 1050 struct scsi_disk *sdkp = scsi_disk_get_from_dev(dev); ··· 1103 .owner = THIS_MODULE, 1104 .open = sd_open, 1105 .release = sd_release, 1106 + .ioctl = sd_ioctl, 1107 .getgeo = sd_getgeo, 1108 #ifdef CONFIG_COMPAT 1109 .compat_ioctl = sd_compat_ioctl, ··· 1120 u64 bad_lba; 1121 int info_valid; 1122 1123 + if (scmd->request->cmd_type != REQ_TYPE_FS) 1124 return 0; 1125 1126 info_valid = scsi_get_sense_info_fld(scmd->sense_buffer, ··· 1170 struct scsi_disk *sdkp = scsi_disk(SCpnt->request->rq_disk); 1171 int sense_valid = 0; 1172 int sense_deferred = 0; 1173 + 1174 + if (SCpnt->request->cmd_flags & REQ_DISCARD) { 1175 + if (!result) 1176 + scsi_set_resid(SCpnt, 0); 1177 + return good_bytes; 1178 + } 1179 1180 if (result) { 1181 sense_valid = scsi_command_normalize_sense(SCpnt, &sshdr); ··· 2121 else 2122 ordered = QUEUE_ORDERED_DRAIN; 2123 2124 + blk_queue_ordered(sdkp->disk->queue, ordered); 2125 2126 set_capacity(disk, sdkp->capacity); 2127 kfree(buffer); ··· 2234 sd_revalidate_disk(gd); 2235 2236 blk_queue_prep_rq(sdp->request_queue, sd_prep_fn); 2237 + blk_queue_unprep_rq(sdp->request_queue, sd_unprep_fn); 2238 2239 gd->driverfs_dev = &sdp->sdev_gendev; 2240 gd->flags = GENHD_FL_EXT_DEVT; ··· 2313 sdkp->driver = &sd_template; 2314 sdkp->disk = gd; 2315 sdkp->index = index; 2316 + atomic_set(&sdkp->openers, 0); 2317 sdkp->previous_state = 1; 2318 2319 if (!sdp->request_queue->rq_timeout) { ··· 2372 2373 async_synchronize_full(); 2374 blk_queue_prep_rq(sdkp->device->request_queue, scsi_prep_fn); 2375 + blk_queue_unprep_rq(sdkp->device->request_queue, NULL); 2376 device_del(&sdkp->dev); 2377 del_gendisk(sdkp->disk); 2378 sd_shutdown(dev);
+1 -1
drivers/scsi/sd.h
··· 47 struct scsi_device *device; 48 struct device dev; 49 struct gendisk *disk; 50 - unsigned int openers; /* protected by BKL for now, yuck */ 51 sector_t capacity; /* size in 512-byte sectors */ 52 u32 index; 53 unsigned short hw_sector_size;
··· 47 struct scsi_device *device; 48 struct device dev; 49 struct gendisk *disk; 50 + atomic_t openers; 51 sector_t capacity; /* size in 512-byte sectors */ 52 u32 index; 53 unsigned short hw_sector_size;
+19 -6
drivers/scsi/sr.c
··· 44 #include <linux/init.h> 45 #include <linux/blkdev.h> 46 #include <linux/mutex.h> 47 #include <linux/slab.h> 48 #include <asm/uaccess.h> 49 ··· 467 468 static int sr_block_open(struct block_device *bdev, fmode_t mode) 469 { 470 - struct scsi_cd *cd = scsi_cd_get(bdev->bd_disk); 471 int ret = -ENXIO; 472 473 if (cd) { 474 ret = cdrom_open(&cd->cdi, bdev, mode); 475 if (ret) 476 scsi_cd_put(cd); 477 } 478 return ret; 479 } 480 481 static int sr_block_release(struct gendisk *disk, fmode_t mode) 482 { 483 struct scsi_cd *cd = scsi_cd(disk); 484 cdrom_release(&cd->cdi, mode); 485 scsi_cd_put(cd); 486 return 0; 487 } 488 ··· 499 void __user *argp = (void __user *)arg; 500 int ret; 501 502 /* 503 * Send SCSI addressing ioctls directly to mid level, send other 504 * ioctls to cdrom/block level. ··· 508 switch (cmd) { 509 case SCSI_IOCTL_GET_IDLUN: 510 case SCSI_IOCTL_GET_BUS_NUMBER: 511 - return scsi_ioctl(sdev, cmd, argp); 512 } 513 514 ret = cdrom_ioctl(&cd->cdi, bdev, mode, cmd, arg); 515 if (ret != -ENOSYS) 516 - return ret; 517 518 /* 519 * ENODEV means that we didn't recognise the ioctl, or that we ··· 525 ret = scsi_nonblockable_ioctl(sdev, cmd, argp, 526 (mode & FMODE_NDELAY) != 0); 527 if (ret != -ENODEV) 528 - return ret; 529 - return scsi_ioctl(sdev, cmd, argp); 530 } 531 532 static int sr_block_media_changed(struct gendisk *disk) ··· 544 .owner = THIS_MODULE, 545 .open = sr_block_open, 546 .release = sr_block_release, 547 - .locked_ioctl = sr_block_ioctl, 548 .media_changed = sr_block_media_changed, 549 /* 550 * No compat_ioctl for now because sr_block_ioctl never
··· 44 #include <linux/init.h> 45 #include <linux/blkdev.h> 46 #include <linux/mutex.h> 47 + #include <linux/smp_lock.h> 48 #include <linux/slab.h> 49 #include <asm/uaccess.h> 50 ··· 466 467 static int sr_block_open(struct block_device *bdev, fmode_t mode) 468 { 469 + struct scsi_cd *cd; 470 int ret = -ENXIO; 471 472 + lock_kernel(); 473 + cd = scsi_cd_get(bdev->bd_disk); 474 if (cd) { 475 ret = cdrom_open(&cd->cdi, bdev, mode); 476 if (ret) 477 scsi_cd_put(cd); 478 } 479 + unlock_kernel(); 480 return ret; 481 } 482 483 static int sr_block_release(struct gendisk *disk, fmode_t mode) 484 { 485 struct scsi_cd *cd = scsi_cd(disk); 486 + lock_kernel(); 487 cdrom_release(&cd->cdi, mode); 488 scsi_cd_put(cd); 489 + unlock_kernel(); 490 return 0; 491 } 492 ··· 493 void __user *argp = (void __user *)arg; 494 int ret; 495 496 + lock_kernel(); 497 + 498 /* 499 * Send SCSI addressing ioctls directly to mid level, send other 500 * ioctls to cdrom/block level. ··· 500 switch (cmd) { 501 case SCSI_IOCTL_GET_IDLUN: 502 case SCSI_IOCTL_GET_BUS_NUMBER: 503 + ret = scsi_ioctl(sdev, cmd, argp); 504 + goto out; 505 } 506 507 ret = cdrom_ioctl(&cd->cdi, bdev, mode, cmd, arg); 508 if (ret != -ENOSYS) 509 + goto out; 510 511 /* 512 * ENODEV means that we didn't recognise the ioctl, or that we ··· 516 ret = scsi_nonblockable_ioctl(sdev, cmd, argp, 517 (mode & FMODE_NDELAY) != 0); 518 if (ret != -ENODEV) 519 + goto out; 520 + ret = scsi_ioctl(sdev, cmd, argp); 521 + 522 + out: 523 + unlock_kernel(); 524 + return ret; 525 } 526 527 static int sr_block_media_changed(struct gendisk *disk) ··· 531 .owner = THIS_MODULE, 532 .open = sr_block_open, 533 .release = sr_block_release, 534 + .ioctl = sr_block_ioctl, 535 .media_changed = sr_block_media_changed, 536 /* 537 * No compat_ioctl for now because sr_block_ioctl never
+1 -1
drivers/scsi/sun3_NCR5380.c
··· 2022 if((count > SUN3_DMA_MINSIZE) && (sun3_dma_setup_done 2023 != cmd)) 2024 { 2025 - if(blk_fs_request(cmd->request)) { 2026 sun3scsi_dma_setup(d, count, 2027 rq_data_dir(cmd->request)); 2028 sun3_dma_setup_done = cmd;
··· 2022 if((count > SUN3_DMA_MINSIZE) && (sun3_dma_setup_done 2023 != cmd)) 2024 { 2025 + if (cmd->request->cmd_type == REQ_TYPE_FS) { 2026 sun3scsi_dma_setup(d, count, 2027 rq_data_dir(cmd->request)); 2028 sun3_dma_setup_done = cmd;
+1 -1
drivers/scsi/sun3_scsi.c
··· 524 struct scsi_cmnd *cmd, 525 int write_flag) 526 { 527 - if(blk_fs_request(cmd->request)) 528 return wanted; 529 else 530 return 0;
··· 524 struct scsi_cmnd *cmd, 525 int write_flag) 526 { 527 + if (cmd->request->cmd_type == REQ_TYPE_FS) 528 return wanted; 529 else 530 return 0;
+1 -1
drivers/scsi/sun3_scsi_vme.c
··· 458 struct scsi_cmnd *cmd, 459 int write_flag) 460 { 461 - if(blk_fs_request(cmd->request)) 462 return wanted; 463 else 464 return 0;
··· 458 struct scsi_cmnd *cmd, 459 int write_flag) 460 { 461 + if (cmd->request->cmd_type == REQ_TYPE_FS) 462 return wanted; 463 else 464 return 0;
+10 -3
drivers/staging/hv/blkvsc_drv.c
··· 25 #include <linux/major.h> 26 #include <linux/delay.h> 27 #include <linux/hdreg.h> 28 #include <linux/slab.h> 29 #include <scsi/scsi.h> 30 #include <scsi/scsi_cmnd.h> ··· 806 blkvsc_req->cmnd[0] = READ_16; 807 } 808 809 - blkvsc_req->cmnd[1] |= blk_fua_rq(blkvsc_req->req) ? 0x8 : 0; 810 811 *(unsigned long long *)&blkvsc_req->cmnd[2] = 812 cpu_to_be64(blkvsc_req->sector_start); ··· 823 blkvsc_req->cmnd[0] = READ_10; 824 } 825 826 - blkvsc_req->cmnd[1] |= blk_fua_rq(blkvsc_req->req) ? 0x8 : 0; 827 828 *(unsigned int *)&blkvsc_req->cmnd[2] = 829 cpu_to_be32(blkvsc_req->sector_start); ··· 1271 DPRINT_DBG(BLKVSC_DRV, "- req %p\n", req); 1272 1273 blkdev = req->rq_disk->private_data; 1274 - if (blkdev->shutting_down || !blk_fs_request(req) || 1275 blkdev->media_not_present) { 1276 __blk_end_request_cur(req, 0); 1277 continue; ··· 1309 DPRINT_DBG(BLKVSC_DRV, "- users %d disk %s\n", blkdev->users, 1310 blkdev->gd->disk_name); 1311 1312 spin_lock(&blkdev->lock); 1313 1314 if (!blkdev->users && blkdev->device_type == DVD_TYPE) { ··· 1321 blkdev->users++; 1322 1323 spin_unlock(&blkdev->lock); 1324 return 0; 1325 } 1326 ··· 1332 DPRINT_DBG(BLKVSC_DRV, "- users %d disk %s\n", blkdev->users, 1333 blkdev->gd->disk_name); 1334 1335 spin_lock(&blkdev->lock); 1336 if (blkdev->users == 1) { 1337 spin_unlock(&blkdev->lock); ··· 1343 blkdev->users--; 1344 1345 spin_unlock(&blkdev->lock); 1346 return 0; 1347 } 1348
··· 25 #include <linux/major.h> 26 #include <linux/delay.h> 27 #include <linux/hdreg.h> 28 + #include <linux/smp_lock.h> 29 #include <linux/slab.h> 30 #include <scsi/scsi.h> 31 #include <scsi/scsi_cmnd.h> ··· 805 blkvsc_req->cmnd[0] = READ_16; 806 } 807 808 + blkvsc_req->cmnd[1] |= 809 + (blkvsc_req->req->cmd_flags & REQ_FUA) ? 0x8 : 0; 810 811 *(unsigned long long *)&blkvsc_req->cmnd[2] = 812 cpu_to_be64(blkvsc_req->sector_start); ··· 821 blkvsc_req->cmnd[0] = READ_10; 822 } 823 824 + blkvsc_req->cmnd[1] |= 825 + (blkvsc_req->req->cmd_flags & REQ_FUA) ? 0x8 : 0; 826 827 *(unsigned int *)&blkvsc_req->cmnd[2] = 828 cpu_to_be32(blkvsc_req->sector_start); ··· 1268 DPRINT_DBG(BLKVSC_DRV, "- req %p\n", req); 1269 1270 blkdev = req->rq_disk->private_data; 1271 + if (blkdev->shutting_down || req->cmd_type != REQ_TYPE_FS || 1272 blkdev->media_not_present) { 1273 __blk_end_request_cur(req, 0); 1274 continue; ··· 1306 DPRINT_DBG(BLKVSC_DRV, "- users %d disk %s\n", blkdev->users, 1307 blkdev->gd->disk_name); 1308 1309 + lock_kernel(); 1310 spin_lock(&blkdev->lock); 1311 1312 if (!blkdev->users && blkdev->device_type == DVD_TYPE) { ··· 1317 blkdev->users++; 1318 1319 spin_unlock(&blkdev->lock); 1320 + unlock_kernel(); 1321 return 0; 1322 } 1323 ··· 1327 DPRINT_DBG(BLKVSC_DRV, "- users %d disk %s\n", blkdev->users, 1328 blkdev->gd->disk_name); 1329 1330 + lock_kernel(); 1331 spin_lock(&blkdev->lock); 1332 if (blkdev->users == 1) { 1333 spin_unlock(&blkdev->lock); ··· 1337 blkdev->users--; 1338 1339 spin_unlock(&blkdev->lock); 1340 + unlock_kernel(); 1341 return 0; 1342 } 1343
+77 -35
drivers/xen/xenbus/xenbus_client.c
··· 133 } 134 EXPORT_SYMBOL_GPL(xenbus_watch_pathfmt); 135 136 137 /** 138 * xenbus_switch_state ··· 203 */ 204 int xenbus_switch_state(struct xenbus_device *dev, enum xenbus_state state) 205 { 206 - /* We check whether the state is currently set to the given value, and 207 - if not, then the state is set. We don't want to unconditionally 208 - write the given state, because we don't want to fire watches 209 - unnecessarily. Furthermore, if the node has gone, we don't write 210 - to it, as the device will be tearing down, and we don't want to 211 - resurrect that directory. 212 - 213 - Note that, because of this cached value of our state, this function 214 - will not work inside a Xenstore transaction (something it was 215 - trying to in the past) because dev->state would not get reset if 216 - the transaction was aborted. 217 - 218 - */ 219 - 220 - int current_state; 221 - int err; 222 - 223 - if (state == dev->state) 224 - return 0; 225 - 226 - err = xenbus_scanf(XBT_NIL, dev->nodename, "state", "%d", 227 - &current_state); 228 - if (err != 1) 229 - return 0; 230 - 231 - err = xenbus_printf(XBT_NIL, dev->nodename, "state", "%d", state); 232 - if (err) { 233 - if (state != XenbusStateClosing) /* Avoid looping */ 234 - xenbus_dev_fatal(dev, err, "writing new state"); 235 - return err; 236 - } 237 - 238 - dev->state = state; 239 - 240 - return 0; 241 } 242 EXPORT_SYMBOL_GPL(xenbus_switch_state); 243 244 int xenbus_frontend_closed(struct xenbus_device *dev) ··· 307 xenbus_switch_state(dev, XenbusStateClosing); 308 } 309 EXPORT_SYMBOL_GPL(xenbus_dev_fatal); 310 311 /** 312 * xenbus_grant_ring
··· 133 } 134 EXPORT_SYMBOL_GPL(xenbus_watch_pathfmt); 135 136 + static void xenbus_switch_fatal(struct xenbus_device *, int, int, 137 + const char *, ...); 138 + 139 + static int 140 + __xenbus_switch_state(struct xenbus_device *dev, 141 + enum xenbus_state state, int depth) 142 + { 143 + /* We check whether the state is currently set to the given value, and 144 + if not, then the state is set. We don't want to unconditionally 145 + write the given state, because we don't want to fire watches 146 + unnecessarily. Furthermore, if the node has gone, we don't write 147 + to it, as the device will be tearing down, and we don't want to 148 + resurrect that directory. 149 + 150 + Note that, because of this cached value of our state, this 151 + function will not take a caller's Xenstore transaction 152 + (something it was trying to in the past) because dev->state 153 + would not get reset if the transaction was aborted. 154 + */ 155 + 156 + struct xenbus_transaction xbt; 157 + int current_state; 158 + int err, abort; 159 + 160 + if (state == dev->state) 161 + return 0; 162 + 163 + again: 164 + abort = 1; 165 + 166 + err = xenbus_transaction_start(&xbt); 167 + if (err) { 168 + xenbus_switch_fatal(dev, depth, err, "starting transaction"); 169 + return 0; 170 + } 171 + 172 + err = xenbus_scanf(xbt, dev->nodename, "state", "%d", &current_state); 173 + if (err != 1) 174 + goto abort; 175 + 176 + err = xenbus_printf(xbt, dev->nodename, "state", "%d", state); 177 + if (err) { 178 + xenbus_switch_fatal(dev, depth, err, "writing new state"); 179 + goto abort; 180 + } 181 + 182 + abort = 0; 183 + abort: 184 + err = xenbus_transaction_end(xbt, abort); 185 + if (err) { 186 + if (err == -EAGAIN && !abort) 187 + goto again; 188 + xenbus_switch_fatal(dev, depth, err, "ending transaction"); 189 + } else 190 + dev->state = state; 191 + 192 + return 0; 193 + } 194 195 /** 196 * xenbus_switch_state ··· 145 */ 146 int xenbus_switch_state(struct xenbus_device *dev, enum xenbus_state state) 147 { 148 + return __xenbus_switch_state(dev, state, 0); 149 } 150 + 151 EXPORT_SYMBOL_GPL(xenbus_switch_state); 152 153 int xenbus_frontend_closed(struct xenbus_device *dev) ··· 282 xenbus_switch_state(dev, XenbusStateClosing); 283 } 284 EXPORT_SYMBOL_GPL(xenbus_dev_fatal); 285 + 286 + /** 287 + * Equivalent to xenbus_dev_fatal(dev, err, fmt, args), but helps 288 + * avoiding recursion within xenbus_switch_state. 289 + */ 290 + static void xenbus_switch_fatal(struct xenbus_device *dev, int depth, int err, 291 + const char *fmt, ...) 292 + { 293 + va_list ap; 294 + 295 + va_start(ap, fmt); 296 + xenbus_va_dev_error(dev, err, fmt, ap); 297 + va_end(ap); 298 + 299 + if (!depth) 300 + __xenbus_switch_state(dev, XenbusStateClosing, 1); 301 + } 302 303 /** 304 * xenbus_grant_ring
+3 -2
fs/bio.c
··· 843 if (!bio) 844 goto out_bmd; 845 846 - bio->bi_rw |= (!write_to_vm << BIO_RW); 847 848 ret = 0; 849 ··· 1025 * set data direction, and check if mapped pages need bouncing 1026 */ 1027 if (!write_to_vm) 1028 - bio->bi_rw |= (1 << BIO_RW); 1029 1030 bio->bi_bdev = bdev; 1031 bio->bi_flags |= (1 << BIO_USER_MAPPED);
··· 843 if (!bio) 844 goto out_bmd; 845 846 + if (!write_to_vm) 847 + bio->bi_rw |= REQ_WRITE; 848 849 ret = 0; 850 ··· 1024 * set data direction, and check if mapped pages need bouncing 1025 */ 1026 if (!write_to_vm) 1027 + bio->bi_rw |= REQ_WRITE; 1028 1029 bio->bi_bdev = bdev; 1030 bio->bi_flags |= (1 << BIO_USER_MAPPED);
+2 -8
fs/block_dev.c
··· 1346 return ret; 1347 } 1348 1349 - lock_kernel(); 1350 restart: 1351 1352 ret = -ENXIO; 1353 disk = get_gendisk(bdev->bd_dev, &partno); 1354 if (!disk) 1355 - goto out_unlock_kernel; 1356 1357 mutex_lock_nested(&bdev->bd_mutex, for_part); 1358 if (!bdev->bd_openers) { ··· 1431 if (for_part) 1432 bdev->bd_part_count++; 1433 mutex_unlock(&bdev->bd_mutex); 1434 - unlock_kernel(); 1435 return 0; 1436 1437 out_clear: ··· 1443 bdev->bd_contains = NULL; 1444 out_unlock_bdev: 1445 mutex_unlock(&bdev->bd_mutex); 1446 - out_unlock_kernel: 1447 - unlock_kernel(); 1448 - 1449 if (disk) 1450 module_put(disk->fops->owner); 1451 put_disk(disk); ··· 1512 struct block_device *victim = NULL; 1513 1514 mutex_lock_nested(&bdev->bd_mutex, for_part); 1515 - lock_kernel(); 1516 if (for_part) 1517 bdev->bd_part_count--; 1518 ··· 1536 victim = bdev->bd_contains; 1537 bdev->bd_contains = NULL; 1538 } 1539 - unlock_kernel(); 1540 mutex_unlock(&bdev->bd_mutex); 1541 bdput(bdev); 1542 if (victim)
··· 1346 return ret; 1347 } 1348 1349 restart: 1350 1351 ret = -ENXIO; 1352 disk = get_gendisk(bdev->bd_dev, &partno); 1353 if (!disk) 1354 + goto out; 1355 1356 mutex_lock_nested(&bdev->bd_mutex, for_part); 1357 if (!bdev->bd_openers) { ··· 1432 if (for_part) 1433 bdev->bd_part_count++; 1434 mutex_unlock(&bdev->bd_mutex); 1435 return 0; 1436 1437 out_clear: ··· 1445 bdev->bd_contains = NULL; 1446 out_unlock_bdev: 1447 mutex_unlock(&bdev->bd_mutex); 1448 + out: 1449 if (disk) 1450 module_put(disk->fops->owner); 1451 put_disk(disk); ··· 1516 struct block_device *victim = NULL; 1517 1518 mutex_lock_nested(&bdev->bd_mutex, for_part); 1519 if (for_part) 1520 bdev->bd_part_count--; 1521 ··· 1541 victim = bdev->bd_contains; 1542 bdev->bd_contains = NULL; 1543 } 1544 mutex_unlock(&bdev->bd_mutex); 1545 bdput(bdev); 1546 if (victim)
+4 -4
fs/btrfs/disk-io.c
··· 480 end_io_wq->work.func = end_workqueue_fn; 481 end_io_wq->work.flags = 0; 482 483 - if (bio->bi_rw & (1 << BIO_RW)) { 484 if (end_io_wq->metadata) 485 btrfs_queue_worker(&fs_info->endio_meta_write_workers, 486 &end_io_wq->work); ··· 604 605 atomic_inc(&fs_info->nr_async_submits); 606 607 - if (rw & (1 << BIO_RW_SYNCIO)) 608 btrfs_set_work_high_prio(&async->work); 609 610 btrfs_queue_worker(&fs_info->workers, &async->work); ··· 668 bio, 1); 669 BUG_ON(ret); 670 671 - if (!(rw & (1 << BIO_RW))) { 672 /* 673 * called for a read, do the setup so that checksum validation 674 * can happen in the async kernel threads ··· 1427 * ram and up to date before trying to verify things. For 1428 * blocksize <= pagesize, it is basically a noop 1429 */ 1430 - if (!(bio->bi_rw & (1 << BIO_RW)) && end_io_wq->metadata && 1431 !bio_ready_for_csum(bio)) { 1432 btrfs_queue_worker(&fs_info->endio_meta_workers, 1433 &end_io_wq->work);
··· 480 end_io_wq->work.func = end_workqueue_fn; 481 end_io_wq->work.flags = 0; 482 483 + if (bio->bi_rw & REQ_WRITE) { 484 if (end_io_wq->metadata) 485 btrfs_queue_worker(&fs_info->endio_meta_write_workers, 486 &end_io_wq->work); ··· 604 605 atomic_inc(&fs_info->nr_async_submits); 606 607 + if (rw & REQ_SYNC) 608 btrfs_set_work_high_prio(&async->work); 609 610 btrfs_queue_worker(&fs_info->workers, &async->work); ··· 668 bio, 1); 669 BUG_ON(ret); 670 671 + if (!(rw & REQ_WRITE)) { 672 /* 673 * called for a read, do the setup so that checksum validation 674 * can happen in the async kernel threads ··· 1427 * ram and up to date before trying to verify things. For 1428 * blocksize <= pagesize, it is basically a noop 1429 */ 1430 + if (!(bio->bi_rw & REQ_WRITE) && end_io_wq->metadata && 1431 !bio_ready_for_csum(bio)) { 1432 btrfs_queue_worker(&fs_info->endio_meta_workers, 1433 &end_io_wq->work);
+3 -3
fs/btrfs/inode.c
··· 1429 ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0); 1430 BUG_ON(ret); 1431 1432 - if (!(rw & (1 << BIO_RW))) { 1433 if (bio_flags & EXTENT_BIO_COMPRESSED) { 1434 return btrfs_submit_compressed_read(inode, bio, 1435 mirror_num, bio_flags); ··· 1841 bio->bi_size = 0; 1842 1843 bio_add_page(bio, page, failrec->len, start - page_offset(page)); 1844 - if (failed_bio->bi_rw & (1 << BIO_RW)) 1845 rw = WRITE; 1846 else 1847 rw = READ; ··· 5647 struct bio_vec *bvec = bio->bi_io_vec; 5648 u64 start; 5649 int skip_sum; 5650 - int write = rw & (1 << BIO_RW); 5651 int ret = 0; 5652 5653 skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
··· 1429 ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0); 1430 BUG_ON(ret); 1431 1432 + if (!(rw & REQ_WRITE)) { 1433 if (bio_flags & EXTENT_BIO_COMPRESSED) { 1434 return btrfs_submit_compressed_read(inode, bio, 1435 mirror_num, bio_flags); ··· 1841 bio->bi_size = 0; 1842 1843 bio_add_page(bio, page, failrec->len, start - page_offset(page)); 1844 + if (failed_bio->bi_rw & REQ_WRITE) 1845 rw = WRITE; 1846 else 1847 rw = READ; ··· 5647 struct bio_vec *bvec = bio->bi_io_vec; 5648 u64 start; 5649 int skip_sum; 5650 + int write = rw & REQ_WRITE; 5651 int ret = 0; 5652 5653 skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
+9 -9
fs/btrfs/volumes.c
··· 258 259 BUG_ON(atomic_read(&cur->bi_cnt) == 0); 260 261 - if (bio_rw_flagged(cur, BIO_RW_SYNCIO)) 262 num_sync_run++; 263 264 submit_bio(cur->bi_rw, cur); ··· 2651 int max_errors = 0; 2652 struct btrfs_multi_bio *multi = NULL; 2653 2654 - if (multi_ret && !(rw & (1 << BIO_RW))) 2655 stripes_allocated = 1; 2656 again: 2657 if (multi_ret) { ··· 2687 mirror_num = 0; 2688 2689 /* if our multi bio struct is too small, back off and try again */ 2690 - if (rw & (1 << BIO_RW)) { 2691 if (map->type & (BTRFS_BLOCK_GROUP_RAID1 | 2692 BTRFS_BLOCK_GROUP_DUP)) { 2693 stripes_required = map->num_stripes; ··· 2697 max_errors = 1; 2698 } 2699 } 2700 - if (multi_ret && (rw & (1 << BIO_RW)) && 2701 stripes_allocated < stripes_required) { 2702 stripes_allocated = map->num_stripes; 2703 free_extent_map(em); ··· 2733 num_stripes = 1; 2734 stripe_index = 0; 2735 if (map->type & BTRFS_BLOCK_GROUP_RAID1) { 2736 - if (unplug_page || (rw & (1 << BIO_RW))) 2737 num_stripes = map->num_stripes; 2738 else if (mirror_num) 2739 stripe_index = mirror_num - 1; ··· 2744 } 2745 2746 } else if (map->type & BTRFS_BLOCK_GROUP_DUP) { 2747 - if (rw & (1 << BIO_RW)) 2748 num_stripes = map->num_stripes; 2749 else if (mirror_num) 2750 stripe_index = mirror_num - 1; ··· 2755 stripe_index = do_div(stripe_nr, factor); 2756 stripe_index *= map->sub_stripes; 2757 2758 - if (unplug_page || (rw & (1 << BIO_RW))) 2759 num_stripes = map->sub_stripes; 2760 else if (mirror_num) 2761 stripe_index += mirror_num - 1; ··· 2945 struct btrfs_pending_bios *pending_bios; 2946 2947 /* don't bother with additional async steps for reads, right now */ 2948 - if (!(rw & (1 << BIO_RW))) { 2949 bio_get(bio); 2950 submit_bio(rw, bio); 2951 bio_put(bio); ··· 2964 bio->bi_rw |= rw; 2965 2966 spin_lock(&device->io_lock); 2967 - if (bio_rw_flagged(bio, BIO_RW_SYNCIO)) 2968 pending_bios = &device->pending_sync_bios; 2969 else 2970 pending_bios = &device->pending_bios;
··· 258 259 BUG_ON(atomic_read(&cur->bi_cnt) == 0); 260 261 + if (cur->bi_rw & REQ_SYNC) 262 num_sync_run++; 263 264 submit_bio(cur->bi_rw, cur); ··· 2651 int max_errors = 0; 2652 struct btrfs_multi_bio *multi = NULL; 2653 2654 + if (multi_ret && !(rw & REQ_WRITE)) 2655 stripes_allocated = 1; 2656 again: 2657 if (multi_ret) { ··· 2687 mirror_num = 0; 2688 2689 /* if our multi bio struct is too small, back off and try again */ 2690 + if (rw & REQ_WRITE) { 2691 if (map->type & (BTRFS_BLOCK_GROUP_RAID1 | 2692 BTRFS_BLOCK_GROUP_DUP)) { 2693 stripes_required = map->num_stripes; ··· 2697 max_errors = 1; 2698 } 2699 } 2700 + if (multi_ret && (rw & REQ_WRITE) && 2701 stripes_allocated < stripes_required) { 2702 stripes_allocated = map->num_stripes; 2703 free_extent_map(em); ··· 2733 num_stripes = 1; 2734 stripe_index = 0; 2735 if (map->type & BTRFS_BLOCK_GROUP_RAID1) { 2736 + if (unplug_page || (rw & REQ_WRITE)) 2737 num_stripes = map->num_stripes; 2738 else if (mirror_num) 2739 stripe_index = mirror_num - 1; ··· 2744 } 2745 2746 } else if (map->type & BTRFS_BLOCK_GROUP_DUP) { 2747 + if (rw & REQ_WRITE) 2748 num_stripes = map->num_stripes; 2749 else if (mirror_num) 2750 stripe_index = mirror_num - 1; ··· 2755 stripe_index = do_div(stripe_nr, factor); 2756 stripe_index *= map->sub_stripes; 2757 2758 + if (unplug_page || (rw & REQ_WRITE)) 2759 num_stripes = map->sub_stripes; 2760 else if (mirror_num) 2761 stripe_index += mirror_num - 1; ··· 2945 struct btrfs_pending_bios *pending_bios; 2946 2947 /* don't bother with additional async steps for reads, right now */ 2948 + if (!(rw & REQ_WRITE)) { 2949 bio_get(bio); 2950 submit_bio(rw, bio); 2951 bio_put(bio); ··· 2964 bio->bi_rw |= rw; 2965 2966 spin_lock(&device->io_lock); 2967 + if (bio->bi_rw & REQ_SYNC) 2968 pending_bios = &device->pending_sync_bios; 2969 else 2970 pending_bios = &device->pending_bios;
+6 -6
fs/coda/psdev.c
··· 177 nbytes = req->uc_outSize; /* don't have more space! */ 178 } 179 if (copy_from_user(req->uc_data, buf, nbytes)) { 180 - req->uc_flags |= REQ_ABORT; 181 wake_up(&req->uc_sleep); 182 retval = -EFAULT; 183 goto out; ··· 254 retval = -EFAULT; 255 256 /* If request was not a signal, enqueue and don't free */ 257 - if (!(req->uc_flags & REQ_ASYNC)) { 258 - req->uc_flags |= REQ_READ; 259 list_add_tail(&(req->uc_chain), &vcp->vc_processing); 260 goto out; 261 } ··· 315 list_del(&req->uc_chain); 316 317 /* Async requests need to be freed here */ 318 - if (req->uc_flags & REQ_ASYNC) { 319 CODA_FREE(req->uc_data, sizeof(struct coda_in_hdr)); 320 kfree(req); 321 continue; 322 } 323 - req->uc_flags |= REQ_ABORT; 324 wake_up(&req->uc_sleep); 325 } 326 327 list_for_each_entry_safe(req, tmp, &vcp->vc_processing, uc_chain) { 328 list_del(&req->uc_chain); 329 330 - req->uc_flags |= REQ_ABORT; 331 wake_up(&req->uc_sleep); 332 } 333
··· 177 nbytes = req->uc_outSize; /* don't have more space! */ 178 } 179 if (copy_from_user(req->uc_data, buf, nbytes)) { 180 + req->uc_flags |= CODA_REQ_ABORT; 181 wake_up(&req->uc_sleep); 182 retval = -EFAULT; 183 goto out; ··· 254 retval = -EFAULT; 255 256 /* If request was not a signal, enqueue and don't free */ 257 + if (!(req->uc_flags & CODA_REQ_ASYNC)) { 258 + req->uc_flags |= CODA_REQ_READ; 259 list_add_tail(&(req->uc_chain), &vcp->vc_processing); 260 goto out; 261 } ··· 315 list_del(&req->uc_chain); 316 317 /* Async requests need to be freed here */ 318 + if (req->uc_flags & CODA_REQ_ASYNC) { 319 CODA_FREE(req->uc_data, sizeof(struct coda_in_hdr)); 320 kfree(req); 321 continue; 322 } 323 + req->uc_flags |= CODA_REQ_ABORT; 324 wake_up(&req->uc_sleep); 325 } 326 327 list_for_each_entry_safe(req, tmp, &vcp->vc_processing, uc_chain) { 328 list_del(&req->uc_chain); 329 330 + req->uc_flags |= CODA_REQ_ABORT; 331 wake_up(&req->uc_sleep); 332 } 333
+6 -6
fs/coda/upcall.c
··· 604 (((r)->uc_opcode != CODA_CLOSE && \ 605 (r)->uc_opcode != CODA_STORE && \ 606 (r)->uc_opcode != CODA_RELEASE) || \ 607 - (r)->uc_flags & REQ_READ)) 608 609 static inline void coda_waitfor_upcall(struct upc_req *req) 610 { ··· 624 set_current_state(TASK_UNINTERRUPTIBLE); 625 626 /* got a reply */ 627 - if (req->uc_flags & (REQ_WRITE | REQ_ABORT)) 628 break; 629 630 if (blocked && time_after(jiffies, timeout) && ··· 708 coda_waitfor_upcall(req); 709 710 /* Op went through, interrupt or not... */ 711 - if (req->uc_flags & REQ_WRITE) { 712 out = (union outputArgs *)req->uc_data; 713 /* here we map positive Venus errors to kernel errors */ 714 error = -out->oh.result; ··· 717 } 718 719 error = -EINTR; 720 - if ((req->uc_flags & REQ_ABORT) || !signal_pending(current)) { 721 printk(KERN_WARNING "coda: Unexpected interruption.\n"); 722 goto exit; 723 } 724 725 /* Interrupted before venus read it. */ 726 - if (!(req->uc_flags & REQ_READ)) 727 goto exit; 728 729 /* Venus saw the upcall, make sure we can send interrupt signal */ ··· 747 sig_inputArgs->ih.opcode = CODA_SIGNAL; 748 sig_inputArgs->ih.unique = req->uc_unique; 749 750 - sig_req->uc_flags = REQ_ASYNC; 751 sig_req->uc_opcode = sig_inputArgs->ih.opcode; 752 sig_req->uc_unique = sig_inputArgs->ih.unique; 753 sig_req->uc_inSize = sizeof(struct coda_in_hdr);
··· 604 (((r)->uc_opcode != CODA_CLOSE && \ 605 (r)->uc_opcode != CODA_STORE && \ 606 (r)->uc_opcode != CODA_RELEASE) || \ 607 + (r)->uc_flags & CODA_REQ_READ)) 608 609 static inline void coda_waitfor_upcall(struct upc_req *req) 610 { ··· 624 set_current_state(TASK_UNINTERRUPTIBLE); 625 626 /* got a reply */ 627 + if (req->uc_flags & (CODA_REQ_WRITE | CODA_REQ_ABORT)) 628 break; 629 630 if (blocked && time_after(jiffies, timeout) && ··· 708 coda_waitfor_upcall(req); 709 710 /* Op went through, interrupt or not... */ 711 + if (req->uc_flags & CODA_REQ_WRITE) { 712 out = (union outputArgs *)req->uc_data; 713 /* here we map positive Venus errors to kernel errors */ 714 error = -out->oh.result; ··· 717 } 718 719 error = -EINTR; 720 + if ((req->uc_flags & CODA_REQ_ABORT) || !signal_pending(current)) { 721 printk(KERN_WARNING "coda: Unexpected interruption.\n"); 722 goto exit; 723 } 724 725 /* Interrupted before venus read it. */ 726 + if (!(req->uc_flags & CODA_REQ_READ)) 727 goto exit; 728 729 /* Venus saw the upcall, make sure we can send interrupt signal */ ··· 747 sig_inputArgs->ih.opcode = CODA_SIGNAL; 748 sig_inputArgs->ih.unique = req->uc_unique; 749 750 + sig_req->uc_flags = CODA_REQ_ASYNC; 751 sig_req->uc_opcode = sig_inputArgs->ih.opcode; 752 sig_req->uc_unique = sig_inputArgs->ih.unique; 753 sig_req->uc_inSize = sizeof(struct coda_in_hdr);
+1 -1
fs/exofs/ios.c
··· 599 } else { 600 bio = master_dev->bio; 601 /* FIXME: bio_set_dir() */ 602 - bio->bi_rw |= (1 << BIO_RW); 603 } 604 605 osd_req_write(or, &ios->obj, per_dev->offset, bio,
··· 599 } else { 600 bio = master_dev->bio; 601 /* FIXME: bio_set_dir() */ 602 + bio->bi_rw |= REQ_WRITE; 603 } 604 605 osd_req_write(or, &ios->obj, per_dev->offset, bio,
+104 -57
fs/fs-writeback.c
··· 26 #include <linux/blkdev.h> 27 #include <linux/backing-dev.h> 28 #include <linux/buffer_head.h> 29 #include "internal.h" 30 - 31 - #define inode_to_bdi(inode) ((inode)->i_mapping->backing_dev_info) 32 - 33 - /* 34 - * We don't actually have pdflush, but this one is exported though /proc... 35 - */ 36 - int nr_pdflush_threads; 37 38 /* 39 * Passed into wb_writeback(), essentially a subset of writeback_control ··· 44 struct completion *done; /* set if the caller waits */ 45 }; 46 47 /** 48 * writeback_in_progress - determine whether there is writeback in progress 49 * @bdi: the device's backing_dev_info structure. ··· 74 static void bdi_queue_work(struct backing_dev_info *bdi, 75 struct wb_writeback_work *work) 76 { 77 - spin_lock(&bdi->wb_lock); 78 list_add_tail(&work->list, &bdi->work_list); 79 - spin_unlock(&bdi->wb_lock); 80 - 81 - /* 82 - * If the default thread isn't there, make sure we add it. When 83 - * it gets created and wakes up, we'll run this work. 84 - */ 85 - if (unlikely(list_empty_careful(&bdi->wb_list))) 86 wake_up_process(default_backing_dev_info.wb.task); 87 - else { 88 - struct bdi_writeback *wb = &bdi->wb; 89 - 90 - if (wb->task) 91 - wake_up_process(wb->task); 92 } 93 } 94 95 static void ··· 103 */ 104 work = kzalloc(sizeof(*work), GFP_ATOMIC); 105 if (!work) { 106 - if (bdi->wb.task) 107 wake_up_process(bdi->wb.task); 108 return; 109 } 110 ··· 653 wbc.more_io = 0; 654 wbc.nr_to_write = MAX_WRITEBACK_PAGES; 655 wbc.pages_skipped = 0; 656 if (work->sb) 657 __writeback_inodes_sb(work->sb, wb, &wbc); 658 else 659 writeback_inodes_wb(wb, &wbc); 660 work->nr_pages -= MAX_WRITEBACK_PAGES - wbc.nr_to_write; 661 wrote += MAX_WRITEBACK_PAGES - wbc.nr_to_write; 662 ··· 688 if (!list_empty(&wb->b_more_io)) { 689 inode = list_entry(wb->b_more_io.prev, 690 struct inode, i_list); 691 inode_wait_for_writeback(inode); 692 } 693 spin_unlock(&inode_lock); ··· 701 * Return the next wb_writeback_work struct that hasn't been processed yet. 702 */ 703 static struct wb_writeback_work * 704 - get_next_work_item(struct backing_dev_info *bdi, struct bdi_writeback *wb) 705 { 706 struct wb_writeback_work *work = NULL; 707 708 - spin_lock(&bdi->wb_lock); 709 if (!list_empty(&bdi->work_list)) { 710 work = list_entry(bdi->work_list.next, 711 struct wb_writeback_work, list); 712 list_del_init(&work->list); 713 } 714 - spin_unlock(&bdi->wb_lock); 715 return work; 716 } 717 ··· 759 struct wb_writeback_work *work; 760 long wrote = 0; 761 762 - while ((work = get_next_work_item(bdi, wb)) != NULL) { 763 /* 764 * Override sync mode, in case we must wait for completion 765 * because this thread is exiting now. 766 */ 767 if (force_wait) 768 work->sync_mode = WB_SYNC_ALL; 769 770 wrote += wb_writeback(wb, work); 771 ··· 793 * Handle writeback of dirty data for the device backed by this bdi. Also 794 * wakes up periodically and does kupdated style flushing. 795 */ 796 - int bdi_writeback_task(struct bdi_writeback *wb) 797 { 798 - unsigned long last_active = jiffies; 799 - unsigned long wait_jiffies = -1UL; 800 long pages_written; 801 802 while (!kthread_should_stop()) { 803 pages_written = wb_do_writeback(wb, 0); 804 805 - if (pages_written) 806 - last_active = jiffies; 807 - else if (wait_jiffies != -1UL) { 808 - unsigned long max_idle; 809 810 - /* 811 - * Longest period of inactivity that we tolerate. If we 812 - * see dirty data again later, the task will get 813 - * recreated automatically. 814 - */ 815 - max_idle = max(5UL * 60 * HZ, wait_jiffies); 816 - if (time_after(jiffies, max_idle + last_active)) 817 - break; 818 } 819 820 - if (dirty_writeback_interval) { 821 - wait_jiffies = msecs_to_jiffies(dirty_writeback_interval * 10); 822 - schedule_timeout_interruptible(wait_jiffies); 823 - } else { 824 - set_current_state(TASK_INTERRUPTIBLE); 825 - if (list_empty_careful(&wb->bdi->work_list) && 826 - !kthread_should_stop()) 827 - schedule(); 828 - __set_current_state(TASK_RUNNING); 829 } 830 831 try_to_freeze(); 832 } 833 834 return 0; 835 } 836 837 /* 838 * Start writeback of `nr_pages' pages. If `nr_pages' is zero, write back ··· 927 void __mark_inode_dirty(struct inode *inode, int flags) 928 { 929 struct super_block *sb = inode->i_sb; 930 931 /* 932 * Don't do this for I_DIRTY_PAGES - that doesn't actually ··· 982 * reposition it (that would break b_dirty time-ordering). 983 */ 984 if (!was_dirty) { 985 - struct bdi_writeback *wb = &inode_to_bdi(inode)->wb; 986 - struct backing_dev_info *bdi = wb->bdi; 987 988 - if (bdi_cap_writeback_dirty(bdi) && 989 - !test_bit(BDI_registered, &bdi->state)) { 990 - WARN_ON(1); 991 - printk(KERN_ERR "bdi-%s not registered\n", 992 - bdi->name); 993 } 994 995 inode->dirtied_when = jiffies; 996 - list_move(&inode->i_list, &wb->b_dirty); 997 } 998 } 999 out: 1000 spin_unlock(&inode_lock); 1001 } 1002 EXPORT_SYMBOL(__mark_inode_dirty); 1003
··· 26 #include <linux/blkdev.h> 27 #include <linux/backing-dev.h> 28 #include <linux/buffer_head.h> 29 + #include <linux/tracepoint.h> 30 #include "internal.h" 31 32 /* 33 * Passed into wb_writeback(), essentially a subset of writeback_control ··· 50 struct completion *done; /* set if the caller waits */ 51 }; 52 53 + /* 54 + * Include the creation of the trace points after defining the 55 + * wb_writeback_work structure so that the definition remains local to this 56 + * file. 57 + */ 58 + #define CREATE_TRACE_POINTS 59 + #include <trace/events/writeback.h> 60 + 61 + #define inode_to_bdi(inode) ((inode)->i_mapping->backing_dev_info) 62 + 63 + /* 64 + * We don't actually have pdflush, but this one is exported though /proc... 65 + */ 66 + int nr_pdflush_threads; 67 + 68 /** 69 * writeback_in_progress - determine whether there is writeback in progress 70 * @bdi: the device's backing_dev_info structure. ··· 65 static void bdi_queue_work(struct backing_dev_info *bdi, 66 struct wb_writeback_work *work) 67 { 68 + trace_writeback_queue(bdi, work); 69 + 70 + spin_lock_bh(&bdi->wb_lock); 71 list_add_tail(&work->list, &bdi->work_list); 72 + if (bdi->wb.task) { 73 + wake_up_process(bdi->wb.task); 74 + } else { 75 + /* 76 + * The bdi thread isn't there, wake up the forker thread which 77 + * will create and run it. 78 + */ 79 + trace_writeback_nothread(bdi, work); 80 wake_up_process(default_backing_dev_info.wb.task); 81 } 82 + spin_unlock_bh(&bdi->wb_lock); 83 } 84 85 static void ··· 95 */ 96 work = kzalloc(sizeof(*work), GFP_ATOMIC); 97 if (!work) { 98 + if (bdi->wb.task) { 99 + trace_writeback_nowork(bdi); 100 wake_up_process(bdi->wb.task); 101 + } 102 return; 103 } 104 ··· 643 wbc.more_io = 0; 644 wbc.nr_to_write = MAX_WRITEBACK_PAGES; 645 wbc.pages_skipped = 0; 646 + 647 + trace_wbc_writeback_start(&wbc, wb->bdi); 648 if (work->sb) 649 __writeback_inodes_sb(work->sb, wb, &wbc); 650 else 651 writeback_inodes_wb(wb, &wbc); 652 + trace_wbc_writeback_written(&wbc, wb->bdi); 653 + 654 work->nr_pages -= MAX_WRITEBACK_PAGES - wbc.nr_to_write; 655 wrote += MAX_WRITEBACK_PAGES - wbc.nr_to_write; 656 ··· 674 if (!list_empty(&wb->b_more_io)) { 675 inode = list_entry(wb->b_more_io.prev, 676 struct inode, i_list); 677 + trace_wbc_writeback_wait(&wbc, wb->bdi); 678 inode_wait_for_writeback(inode); 679 } 680 spin_unlock(&inode_lock); ··· 686 * Return the next wb_writeback_work struct that hasn't been processed yet. 687 */ 688 static struct wb_writeback_work * 689 + get_next_work_item(struct backing_dev_info *bdi) 690 { 691 struct wb_writeback_work *work = NULL; 692 693 + spin_lock_bh(&bdi->wb_lock); 694 if (!list_empty(&bdi->work_list)) { 695 work = list_entry(bdi->work_list.next, 696 struct wb_writeback_work, list); 697 list_del_init(&work->list); 698 } 699 + spin_unlock_bh(&bdi->wb_lock); 700 return work; 701 } 702 ··· 744 struct wb_writeback_work *work; 745 long wrote = 0; 746 747 + while ((work = get_next_work_item(bdi)) != NULL) { 748 /* 749 * Override sync mode, in case we must wait for completion 750 * because this thread is exiting now. 751 */ 752 if (force_wait) 753 work->sync_mode = WB_SYNC_ALL; 754 + 755 + trace_writeback_exec(bdi, work); 756 757 wrote += wb_writeback(wb, work); 758 ··· 776 * Handle writeback of dirty data for the device backed by this bdi. Also 777 * wakes up periodically and does kupdated style flushing. 778 */ 779 + int bdi_writeback_thread(void *data) 780 { 781 + struct bdi_writeback *wb = data; 782 + struct backing_dev_info *bdi = wb->bdi; 783 long pages_written; 784 785 + current->flags |= PF_FLUSHER | PF_SWAPWRITE; 786 + set_freezable(); 787 + wb->last_active = jiffies; 788 + 789 + /* 790 + * Our parent may run at a different priority, just set us to normal 791 + */ 792 + set_user_nice(current, 0); 793 + 794 + trace_writeback_thread_start(bdi); 795 + 796 while (!kthread_should_stop()) { 797 + /* 798 + * Remove own delayed wake-up timer, since we are already awake 799 + * and we'll take care of the preriodic write-back. 800 + */ 801 + del_timer(&wb->wakeup_timer); 802 + 803 pages_written = wb_do_writeback(wb, 0); 804 805 + trace_writeback_pages_written(pages_written); 806 807 + if (pages_written) 808 + wb->last_active = jiffies; 809 + 810 + set_current_state(TASK_INTERRUPTIBLE); 811 + if (!list_empty(&bdi->work_list)) { 812 + __set_current_state(TASK_RUNNING); 813 + continue; 814 } 815 816 + if (wb_has_dirty_io(wb) && dirty_writeback_interval) 817 + schedule_timeout(msecs_to_jiffies(dirty_writeback_interval * 10)); 818 + else { 819 + /* 820 + * We have nothing to do, so can go sleep without any 821 + * timeout and save power. When a work is queued or 822 + * something is made dirty - we will be woken up. 823 + */ 824 + schedule(); 825 } 826 827 try_to_freeze(); 828 } 829 830 + /* Flush any work that raced with us exiting */ 831 + if (!list_empty(&bdi->work_list)) 832 + wb_do_writeback(wb, 1); 833 + 834 + trace_writeback_thread_stop(bdi); 835 return 0; 836 } 837 + 838 839 /* 840 * Start writeback of `nr_pages' pages. If `nr_pages' is zero, write back ··· 891 void __mark_inode_dirty(struct inode *inode, int flags) 892 { 893 struct super_block *sb = inode->i_sb; 894 + struct backing_dev_info *bdi = NULL; 895 + bool wakeup_bdi = false; 896 897 /* 898 * Don't do this for I_DIRTY_PAGES - that doesn't actually ··· 944 * reposition it (that would break b_dirty time-ordering). 945 */ 946 if (!was_dirty) { 947 + bdi = inode_to_bdi(inode); 948 949 + if (bdi_cap_writeback_dirty(bdi)) { 950 + WARN(!test_bit(BDI_registered, &bdi->state), 951 + "bdi-%s not registered\n", bdi->name); 952 + 953 + /* 954 + * If this is the first dirty inode for this 955 + * bdi, we have to wake-up the corresponding 956 + * bdi thread to make sure background 957 + * write-back happens later. 958 + */ 959 + if (!wb_has_dirty_io(&bdi->wb)) 960 + wakeup_bdi = true; 961 } 962 963 inode->dirtied_when = jiffies; 964 + list_move(&inode->i_list, &bdi->wb.b_dirty); 965 } 966 } 967 out: 968 spin_unlock(&inode_lock); 969 + 970 + if (wakeup_bdi) 971 + bdi_wakeup_thread_delayed(bdi); 972 } 973 EXPORT_SYMBOL(__mark_inode_dirty); 974
+2 -2
fs/gfs2/log.c
··· 595 if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags)) 596 goto skip_barrier; 597 get_bh(bh); 598 - submit_bh(WRITE_SYNC | (1 << BIO_RW_BARRIER) | (1 << BIO_RW_META), bh); 599 wait_on_buffer(bh); 600 if (buffer_eopnotsupp(bh)) { 601 clear_buffer_eopnotsupp(bh); ··· 605 lock_buffer(bh); 606 skip_barrier: 607 get_bh(bh); 608 - submit_bh(WRITE_SYNC | (1 << BIO_RW_META), bh); 609 wait_on_buffer(bh); 610 } 611 if (!buffer_uptodate(bh))
··· 595 if (test_bit(SDF_NOBARRIERS, &sdp->sd_flags)) 596 goto skip_barrier; 597 get_bh(bh); 598 + submit_bh(WRITE_BARRIER | REQ_META, bh); 599 wait_on_buffer(bh); 600 if (buffer_eopnotsupp(bh)) { 601 clear_buffer_eopnotsupp(bh); ··· 605 lock_buffer(bh); 606 skip_barrier: 607 get_bh(bh); 608 + submit_bh(WRITE_SYNC | REQ_META, bh); 609 wait_on_buffer(bh); 610 } 611 if (!buffer_uptodate(bh))
+4 -4
fs/gfs2/meta_io.c
··· 36 { 37 struct buffer_head *bh, *head; 38 int nr_underway = 0; 39 - int write_op = (1 << BIO_RW_META) | ((wbc->sync_mode == WB_SYNC_ALL ? 40 - WRITE_SYNC_PLUG : WRITE)); 41 42 BUG_ON(!PageLocked(page)); 43 BUG_ON(!page_has_buffers(page)); ··· 225 } 226 bh->b_end_io = end_buffer_read_sync; 227 get_bh(bh); 228 - submit_bh(READ_SYNC | (1 << BIO_RW_META), bh); 229 if (!(flags & DIO_WAIT)) 230 return 0; 231 ··· 432 if (buffer_uptodate(first_bh)) 433 goto out; 434 if (!buffer_locked(first_bh)) 435 - ll_rw_block(READ_SYNC | (1 << BIO_RW_META), 1, &first_bh); 436 437 dblock++; 438 extlen--;
··· 36 { 37 struct buffer_head *bh, *head; 38 int nr_underway = 0; 39 + int write_op = REQ_META | 40 + (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC_PLUG : WRITE); 41 42 BUG_ON(!PageLocked(page)); 43 BUG_ON(!page_has_buffers(page)); ··· 225 } 226 bh->b_end_io = end_buffer_read_sync; 227 get_bh(bh); 228 + submit_bh(READ_SYNC | REQ_META, bh); 229 if (!(flags & DIO_WAIT)) 230 return 0; 231 ··· 432 if (buffer_uptodate(first_bh)) 433 goto out; 434 if (!buffer_locked(first_bh)) 435 + ll_rw_block(READ_SYNC | REQ_META, 1, &first_bh); 436 437 dblock++; 438 extlen--;
+1 -1
fs/gfs2/ops_fstype.c
··· 274 275 bio->bi_end_io = end_bio_io_page; 276 bio->bi_private = page; 277 - submit_bio(READ_SYNC | (1 << BIO_RW_META), bio); 278 wait_on_page_locked(page); 279 bio_put(bio); 280 if (!PageUptodate(page)) {
··· 274 275 bio->bi_end_io = end_bio_io_page; 276 bio->bi_private = page; 277 + submit_bio(READ_SYNC | REQ_META, bio); 278 wait_on_page_locked(page); 279 bio_put(bio); 280 if (!PageUptodate(page)) {
+1 -1
fs/nilfs2/segbuf.c
··· 508 * Last BIO is always sent through the following 509 * submission. 510 */ 511 - rw |= (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG); 512 res = nilfs_segbuf_submit_bio(segbuf, &wi, rw); 513 } 514
··· 508 * Last BIO is always sent through the following 509 * submission. 510 */ 511 + rw |= REQ_SYNC | REQ_UNPLUG; 512 res = nilfs_segbuf_submit_bio(segbuf, &wi, rw); 513 } 514
+1 -13
fs/splice.c
··· 399 * If the page isn't uptodate, we may need to start io on it 400 */ 401 if (!PageUptodate(page)) { 402 - /* 403 - * If in nonblock mode then dont block on waiting 404 - * for an in-flight io page 405 - */ 406 - if (flags & SPLICE_F_NONBLOCK) { 407 - if (!trylock_page(page)) { 408 - error = -EAGAIN; 409 - break; 410 - } 411 - } else 412 - lock_page(page); 413 414 /* 415 * Page was truncated, or invalidated by the ··· 587 struct page *pages[PIPE_DEF_BUFFERS]; 588 struct partial_page partial[PIPE_DEF_BUFFERS]; 589 struct iovec *vec, __vec[PIPE_DEF_BUFFERS]; 590 - pgoff_t index; 591 ssize_t res; 592 size_t this_len; 593 int error; ··· 610 goto shrink_ret; 611 } 612 613 - index = *ppos >> PAGE_CACHE_SHIFT; 614 offset = *ppos & ~PAGE_CACHE_MASK; 615 nr_pages = (len + offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 616
··· 399 * If the page isn't uptodate, we may need to start io on it 400 */ 401 if (!PageUptodate(page)) { 402 + lock_page(page); 403 404 /* 405 * Page was truncated, or invalidated by the ··· 597 struct page *pages[PIPE_DEF_BUFFERS]; 598 struct partial_page partial[PIPE_DEF_BUFFERS]; 599 struct iovec *vec, __vec[PIPE_DEF_BUFFERS]; 600 ssize_t res; 601 size_t this_len; 602 int error; ··· 621 goto shrink_ret; 622 } 623 624 offset = *ppos & ~PAGE_CACHE_MASK; 625 nr_pages = (len + offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 626
+1
include/linux/Kbuild
··· 39 header-y += b1lli.h 40 header-y += baycom.h 41 header-y += bfs_fs.h 42 header-y += blkpg.h 43 header-y += bpqether.h 44 header-y += bsg.h
··· 39 header-y += b1lli.h 40 header-y += baycom.h 41 header-y += bfs_fs.h 42 + header-y += blk_types.h 43 header-y += blkpg.h 44 header-y += bpqether.h 45 header-y += bsg.h
+1 -1
include/linux/audit.h
··· 544 #define audit_putname(n) do { ; } while (0) 545 #define __audit_inode(n,d) do { ; } while (0) 546 #define __audit_inode_child(i,p) do { ; } while (0) 547 - #define audit_inode(n,d) do { ; } while (0) 548 #define audit_inode_child(i,p) do { ; } while (0) 549 #define audit_core_dumps(i) do { ; } while (0) 550 #define auditsc_get_stamp(c,t,s) (0)
··· 544 #define audit_putname(n) do { ; } while (0) 545 #define __audit_inode(n,d) do { ; } while (0) 546 #define __audit_inode_child(i,p) do { ; } while (0) 547 + #define audit_inode(n,d) do { (void)(d); } while (0) 548 #define audit_inode_child(i,p) do { ; } while (0) 549 #define audit_core_dumps(i) do { ; } while (0) 550 #define auditsc_get_stamp(c,t,s) (0)
+11 -12
include/linux/backing-dev.h
··· 45 #define BDI_STAT_BATCH (8*(1+ilog2(nr_cpu_ids))) 46 47 struct bdi_writeback { 48 - struct list_head list; /* hangs off the bdi */ 49 - 50 - struct backing_dev_info *bdi; /* our parent bdi */ 51 unsigned int nr; 52 53 - unsigned long last_old_flush; /* last old data flush */ 54 55 - struct task_struct *task; /* writeback task */ 56 - struct list_head b_dirty; /* dirty inodes */ 57 - struct list_head b_io; /* parked for writeback */ 58 - struct list_head b_more_io; /* parked for more writeback */ 59 }; 60 61 struct backing_dev_info { 62 struct list_head bdi_list; 63 - struct rcu_head rcu_head; 64 unsigned long ra_pages; /* max readahead in PAGE_CACHE_SIZE units */ 65 unsigned long state; /* Always use atomic bitops on this */ 66 unsigned int capabilities; /* Device capabilities */ ··· 79 unsigned int max_ratio, max_prop_frac; 80 81 struct bdi_writeback wb; /* default writeback info for this bdi */ 82 - spinlock_t wb_lock; /* protects update side of wb_list */ 83 - struct list_head wb_list; /* the flusher threads hanging off this bdi */ 84 85 struct list_head work_list; 86 ··· 103 int bdi_setup_and_register(struct backing_dev_info *, char *, unsigned int); 104 void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages); 105 void bdi_start_background_writeback(struct backing_dev_info *bdi); 106 - int bdi_writeback_task(struct bdi_writeback *wb); 107 int bdi_has_dirty_io(struct backing_dev_info *bdi); 108 void bdi_arm_supers_timer(void); 109 110 extern spinlock_t bdi_lock; 111 extern struct list_head bdi_list;
··· 45 #define BDI_STAT_BATCH (8*(1+ilog2(nr_cpu_ids))) 46 47 struct bdi_writeback { 48 + struct backing_dev_info *bdi; /* our parent bdi */ 49 unsigned int nr; 50 51 + unsigned long last_old_flush; /* last old data flush */ 52 + unsigned long last_active; /* last time bdi thread was active */ 53 54 + struct task_struct *task; /* writeback thread */ 55 + struct timer_list wakeup_timer; /* used for delayed bdi thread wakeup */ 56 + struct list_head b_dirty; /* dirty inodes */ 57 + struct list_head b_io; /* parked for writeback */ 58 + struct list_head b_more_io; /* parked for more writeback */ 59 }; 60 61 struct backing_dev_info { 62 struct list_head bdi_list; 63 unsigned long ra_pages; /* max readahead in PAGE_CACHE_SIZE units */ 64 unsigned long state; /* Always use atomic bitops on this */ 65 unsigned int capabilities; /* Device capabilities */ ··· 80 unsigned int max_ratio, max_prop_frac; 81 82 struct bdi_writeback wb; /* default writeback info for this bdi */ 83 + spinlock_t wb_lock; /* protects work_list */ 84 85 struct list_head work_list; 86 ··· 105 int bdi_setup_and_register(struct backing_dev_info *, char *, unsigned int); 106 void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages); 107 void bdi_start_background_writeback(struct backing_dev_info *bdi); 108 + int bdi_writeback_thread(void *data); 109 int bdi_has_dirty_io(struct backing_dev_info *bdi); 110 void bdi_arm_supers_timer(void); 111 + void bdi_wakeup_thread_delayed(struct backing_dev_info *bdi); 112 113 extern spinlock_t bdi_lock; 114 extern struct list_head bdi_list;
+8 -150
include/linux/bio.h
··· 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 - 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * ··· 28 29 #include <asm/io.h> 30 31 #define BIO_DEBUG 32 33 #ifdef BIO_DEBUG ··· 42 #define BIO_MAX_PAGES 256 43 #define BIO_MAX_SIZE (BIO_MAX_PAGES << PAGE_CACHE_SHIFT) 44 #define BIO_MAX_SECTORS (BIO_MAX_SIZE >> 9) 45 - 46 - /* 47 - * was unsigned short, but we might as well be ready for > 64kB I/O pages 48 - */ 49 - struct bio_vec { 50 - struct page *bv_page; 51 - unsigned int bv_len; 52 - unsigned int bv_offset; 53 - }; 54 - 55 - struct bio_set; 56 - struct bio; 57 - struct bio_integrity_payload; 58 - typedef void (bio_end_io_t) (struct bio *, int); 59 - typedef void (bio_destructor_t) (struct bio *); 60 - 61 - /* 62 - * main unit of I/O for the block layer and lower layers (ie drivers and 63 - * stacking drivers) 64 - */ 65 - struct bio { 66 - sector_t bi_sector; /* device address in 512 byte 67 - sectors */ 68 - struct bio *bi_next; /* request queue link */ 69 - struct block_device *bi_bdev; 70 - unsigned long bi_flags; /* status, command, etc */ 71 - unsigned long bi_rw; /* bottom bits READ/WRITE, 72 - * top bits priority 73 - */ 74 - 75 - unsigned short bi_vcnt; /* how many bio_vec's */ 76 - unsigned short bi_idx; /* current index into bvl_vec */ 77 - 78 - /* Number of segments in this BIO after 79 - * physical address coalescing is performed. 80 - */ 81 - unsigned int bi_phys_segments; 82 - 83 - unsigned int bi_size; /* residual I/O count */ 84 - 85 - /* 86 - * To keep track of the max segment size, we account for the 87 - * sizes of the first and last mergeable segments in this bio. 88 - */ 89 - unsigned int bi_seg_front_size; 90 - unsigned int bi_seg_back_size; 91 - 92 - unsigned int bi_max_vecs; /* max bvl_vecs we can hold */ 93 - 94 - unsigned int bi_comp_cpu; /* completion CPU */ 95 - 96 - atomic_t bi_cnt; /* pin count */ 97 - 98 - struct bio_vec *bi_io_vec; /* the actual vec list */ 99 - 100 - bio_end_io_t *bi_end_io; 101 - 102 - void *bi_private; 103 - #if defined(CONFIG_BLK_DEV_INTEGRITY) 104 - struct bio_integrity_payload *bi_integrity; /* data integrity */ 105 - #endif 106 - 107 - bio_destructor_t *bi_destructor; /* destructor */ 108 - 109 - /* 110 - * We can inline a number of vecs at the end of the bio, to avoid 111 - * double allocations for a small number of bio_vecs. This member 112 - * MUST obviously be kept at the very end of the bio. 113 - */ 114 - struct bio_vec bi_inline_vecs[0]; 115 - }; 116 - 117 - /* 118 - * bio flags 119 - */ 120 - #define BIO_UPTODATE 0 /* ok after I/O completion */ 121 - #define BIO_RW_BLOCK 1 /* RW_AHEAD set, and read/write would block */ 122 - #define BIO_EOF 2 /* out-out-bounds error */ 123 - #define BIO_SEG_VALID 3 /* bi_phys_segments valid */ 124 - #define BIO_CLONED 4 /* doesn't own data */ 125 - #define BIO_BOUNCED 5 /* bio is a bounce bio */ 126 - #define BIO_USER_MAPPED 6 /* contains user pages */ 127 - #define BIO_EOPNOTSUPP 7 /* not supported */ 128 - #define BIO_CPU_AFFINE 8 /* complete bio on same CPU as submitted */ 129 - #define BIO_NULL_MAPPED 9 /* contains invalid user pages */ 130 - #define BIO_FS_INTEGRITY 10 /* fs owns integrity data, not block layer */ 131 - #define BIO_QUIET 11 /* Make BIO Quiet */ 132 - #define bio_flagged(bio, flag) ((bio)->bi_flags & (1 << (flag))) 133 - 134 - /* 135 - * top 4 bits of bio flags indicate the pool this bio came from 136 - */ 137 - #define BIO_POOL_BITS (4) 138 - #define BIO_POOL_NONE ((1UL << BIO_POOL_BITS) - 1) 139 - #define BIO_POOL_OFFSET (BITS_PER_LONG - BIO_POOL_BITS) 140 - #define BIO_POOL_MASK (1UL << BIO_POOL_OFFSET) 141 - #define BIO_POOL_IDX(bio) ((bio)->bi_flags >> BIO_POOL_OFFSET) 142 - 143 - /* 144 - * bio bi_rw flags 145 - * 146 - * bit 0 -- data direction 147 - * If not set, bio is a read from device. If set, it's a write to device. 148 - * bit 1 -- fail fast device errors 149 - * bit 2 -- fail fast transport errors 150 - * bit 3 -- fail fast driver errors 151 - * bit 4 -- rw-ahead when set 152 - * bit 5 -- barrier 153 - * Insert a serialization point in the IO queue, forcing previously 154 - * submitted IO to be completed before this one is issued. 155 - * bit 6 -- synchronous I/O hint. 156 - * bit 7 -- Unplug the device immediately after submitting this bio. 157 - * bit 8 -- metadata request 158 - * Used for tracing to differentiate metadata and data IO. May also 159 - * get some preferential treatment in the IO scheduler 160 - * bit 9 -- discard sectors 161 - * Informs the lower level device that this range of sectors is no longer 162 - * used by the file system and may thus be freed by the device. Used 163 - * for flash based storage. 164 - * Don't want driver retries for any fast fail whatever the reason. 165 - * bit 10 -- Tell the IO scheduler not to wait for more requests after this 166 - one has been submitted, even if it is a SYNC request. 167 - */ 168 - enum bio_rw_flags { 169 - BIO_RW, 170 - BIO_RW_FAILFAST_DEV, 171 - BIO_RW_FAILFAST_TRANSPORT, 172 - BIO_RW_FAILFAST_DRIVER, 173 - /* above flags must match REQ_* */ 174 - BIO_RW_AHEAD, 175 - BIO_RW_BARRIER, 176 - BIO_RW_SYNCIO, 177 - BIO_RW_UNPLUG, 178 - BIO_RW_META, 179 - BIO_RW_DISCARD, 180 - BIO_RW_NOIDLE, 181 - }; 182 - 183 - /* 184 - * First four bits must match between bio->bi_rw and rq->cmd_flags, make 185 - * that explicit here. 186 - */ 187 - #define BIO_RW_RQ_MASK 0xf 188 - 189 - static inline bool bio_rw_flagged(struct bio *bio, enum bio_rw_flags flag) 190 - { 191 - return (bio->bi_rw & (1 << flag)) != 0; 192 - } 193 194 /* 195 * upper 16 bits of bi_rw define the io priority of this bio ··· 66 #define bio_offset(bio) bio_iovec((bio))->bv_offset 67 #define bio_segments(bio) ((bio)->bi_vcnt - (bio)->bi_idx) 68 #define bio_sectors(bio) ((bio)->bi_size >> 9) 69 - #define bio_empty_barrier(bio) (bio_rw_flagged(bio, BIO_RW_BARRIER) && !bio_has_data(bio) && !bio_rw_flagged(bio, BIO_RW_DISCARD)) 70 71 static inline unsigned int bio_cur_bytes(struct bio *bio) 72 {
··· 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 + * 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * ··· 28 29 #include <asm/io.h> 30 31 + /* struct bio, bio_vec and BIO_* flags are defined in blk_types.h */ 32 + #include <linux/blk_types.h> 33 + 34 #define BIO_DEBUG 35 36 #ifdef BIO_DEBUG ··· 39 #define BIO_MAX_PAGES 256 40 #define BIO_MAX_SIZE (BIO_MAX_PAGES << PAGE_CACHE_SHIFT) 41 #define BIO_MAX_SECTORS (BIO_MAX_SIZE >> 9) 42 43 /* 44 * upper 16 bits of bi_rw define the io priority of this bio ··· 211 #define bio_offset(bio) bio_iovec((bio))->bv_offset 212 #define bio_segments(bio) ((bio)->bi_vcnt - (bio)->bi_idx) 213 #define bio_sectors(bio) ((bio)->bi_size >> 9) 214 + #define bio_empty_barrier(bio) \ 215 + ((bio->bi_rw & REQ_HARDBARRIER) && \ 216 + !bio_has_data(bio) && \ 217 + !(bio->bi_rw & REQ_DISCARD)) 218 219 static inline unsigned int bio_cur_bytes(struct bio *bio) 220 {
+194
include/linux/blk_types.h
···
··· 1 + /* 2 + * Block data types and constants. Directly include this file only to 3 + * break include dependency loop. 4 + */ 5 + #ifndef __LINUX_BLK_TYPES_H 6 + #define __LINUX_BLK_TYPES_H 7 + 8 + #ifdef CONFIG_BLOCK 9 + 10 + #include <linux/types.h> 11 + 12 + struct bio_set; 13 + struct bio; 14 + struct bio_integrity_payload; 15 + struct page; 16 + struct block_device; 17 + typedef void (bio_end_io_t) (struct bio *, int); 18 + typedef void (bio_destructor_t) (struct bio *); 19 + 20 + /* 21 + * was unsigned short, but we might as well be ready for > 64kB I/O pages 22 + */ 23 + struct bio_vec { 24 + struct page *bv_page; 25 + unsigned int bv_len; 26 + unsigned int bv_offset; 27 + }; 28 + 29 + /* 30 + * main unit of I/O for the block layer and lower layers (ie drivers and 31 + * stacking drivers) 32 + */ 33 + struct bio { 34 + sector_t bi_sector; /* device address in 512 byte 35 + sectors */ 36 + struct bio *bi_next; /* request queue link */ 37 + struct block_device *bi_bdev; 38 + unsigned long bi_flags; /* status, command, etc */ 39 + unsigned long bi_rw; /* bottom bits READ/WRITE, 40 + * top bits priority 41 + */ 42 + 43 + unsigned short bi_vcnt; /* how many bio_vec's */ 44 + unsigned short bi_idx; /* current index into bvl_vec */ 45 + 46 + /* Number of segments in this BIO after 47 + * physical address coalescing is performed. 48 + */ 49 + unsigned int bi_phys_segments; 50 + 51 + unsigned int bi_size; /* residual I/O count */ 52 + 53 + /* 54 + * To keep track of the max segment size, we account for the 55 + * sizes of the first and last mergeable segments in this bio. 56 + */ 57 + unsigned int bi_seg_front_size; 58 + unsigned int bi_seg_back_size; 59 + 60 + unsigned int bi_max_vecs; /* max bvl_vecs we can hold */ 61 + 62 + unsigned int bi_comp_cpu; /* completion CPU */ 63 + 64 + atomic_t bi_cnt; /* pin count */ 65 + 66 + struct bio_vec *bi_io_vec; /* the actual vec list */ 67 + 68 + bio_end_io_t *bi_end_io; 69 + 70 + void *bi_private; 71 + #if defined(CONFIG_BLK_DEV_INTEGRITY) 72 + struct bio_integrity_payload *bi_integrity; /* data integrity */ 73 + #endif 74 + 75 + bio_destructor_t *bi_destructor; /* destructor */ 76 + 77 + /* 78 + * We can inline a number of vecs at the end of the bio, to avoid 79 + * double allocations for a small number of bio_vecs. This member 80 + * MUST obviously be kept at the very end of the bio. 81 + */ 82 + struct bio_vec bi_inline_vecs[0]; 83 + }; 84 + 85 + /* 86 + * bio flags 87 + */ 88 + #define BIO_UPTODATE 0 /* ok after I/O completion */ 89 + #define BIO_RW_BLOCK 1 /* RW_AHEAD set, and read/write would block */ 90 + #define BIO_EOF 2 /* out-out-bounds error */ 91 + #define BIO_SEG_VALID 3 /* bi_phys_segments valid */ 92 + #define BIO_CLONED 4 /* doesn't own data */ 93 + #define BIO_BOUNCED 5 /* bio is a bounce bio */ 94 + #define BIO_USER_MAPPED 6 /* contains user pages */ 95 + #define BIO_EOPNOTSUPP 7 /* not supported */ 96 + #define BIO_CPU_AFFINE 8 /* complete bio on same CPU as submitted */ 97 + #define BIO_NULL_MAPPED 9 /* contains invalid user pages */ 98 + #define BIO_FS_INTEGRITY 10 /* fs owns integrity data, not block layer */ 99 + #define BIO_QUIET 11 /* Make BIO Quiet */ 100 + #define bio_flagged(bio, flag) ((bio)->bi_flags & (1 << (flag))) 101 + 102 + /* 103 + * top 4 bits of bio flags indicate the pool this bio came from 104 + */ 105 + #define BIO_POOL_BITS (4) 106 + #define BIO_POOL_NONE ((1UL << BIO_POOL_BITS) - 1) 107 + #define BIO_POOL_OFFSET (BITS_PER_LONG - BIO_POOL_BITS) 108 + #define BIO_POOL_MASK (1UL << BIO_POOL_OFFSET) 109 + #define BIO_POOL_IDX(bio) ((bio)->bi_flags >> BIO_POOL_OFFSET) 110 + 111 + #endif /* CONFIG_BLOCK */ 112 + 113 + /* 114 + * Request flags. For use in the cmd_flags field of struct request, and in 115 + * bi_rw of struct bio. Note that some flags are only valid in either one. 116 + */ 117 + enum rq_flag_bits { 118 + /* common flags */ 119 + __REQ_WRITE, /* not set, read. set, write */ 120 + __REQ_FAILFAST_DEV, /* no driver retries of device errors */ 121 + __REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */ 122 + __REQ_FAILFAST_DRIVER, /* no driver retries of driver errors */ 123 + 124 + __REQ_HARDBARRIER, /* may not be passed by drive either */ 125 + __REQ_SYNC, /* request is sync (sync write or read) */ 126 + __REQ_META, /* metadata io request */ 127 + __REQ_DISCARD, /* request to discard sectors */ 128 + __REQ_NOIDLE, /* don't anticipate more IO after this one */ 129 + 130 + /* bio only flags */ 131 + __REQ_UNPLUG, /* unplug the immediately after submission */ 132 + __REQ_RAHEAD, /* read ahead, can fail anytime */ 133 + 134 + /* request only flags */ 135 + __REQ_SORTED, /* elevator knows about this request */ 136 + __REQ_SOFTBARRIER, /* may not be passed by ioscheduler */ 137 + __REQ_FUA, /* forced unit access */ 138 + __REQ_NOMERGE, /* don't touch this for merging */ 139 + __REQ_STARTED, /* drive already may have started this one */ 140 + __REQ_DONTPREP, /* don't call prep for this one */ 141 + __REQ_QUEUED, /* uses queueing */ 142 + __REQ_ELVPRIV, /* elevator private data attached */ 143 + __REQ_FAILED, /* set if the request failed */ 144 + __REQ_QUIET, /* don't worry about errors */ 145 + __REQ_PREEMPT, /* set for "ide_preempt" requests */ 146 + __REQ_ORDERED_COLOR, /* is before or after barrier */ 147 + __REQ_ALLOCED, /* request came from our alloc pool */ 148 + __REQ_COPY_USER, /* contains copies of user pages */ 149 + __REQ_INTEGRITY, /* integrity metadata has been remapped */ 150 + __REQ_FLUSH, /* request for cache flush */ 151 + __REQ_IO_STAT, /* account I/O stat */ 152 + __REQ_MIXED_MERGE, /* merge of different types, fail separately */ 153 + __REQ_NR_BITS, /* stops here */ 154 + }; 155 + 156 + #define REQ_WRITE (1 << __REQ_WRITE) 157 + #define REQ_FAILFAST_DEV (1 << __REQ_FAILFAST_DEV) 158 + #define REQ_FAILFAST_TRANSPORT (1 << __REQ_FAILFAST_TRANSPORT) 159 + #define REQ_FAILFAST_DRIVER (1 << __REQ_FAILFAST_DRIVER) 160 + #define REQ_HARDBARRIER (1 << __REQ_HARDBARRIER) 161 + #define REQ_SYNC (1 << __REQ_SYNC) 162 + #define REQ_META (1 << __REQ_META) 163 + #define REQ_DISCARD (1 << __REQ_DISCARD) 164 + #define REQ_NOIDLE (1 << __REQ_NOIDLE) 165 + 166 + #define REQ_FAILFAST_MASK \ 167 + (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER) 168 + #define REQ_COMMON_MASK \ 169 + (REQ_WRITE | REQ_FAILFAST_MASK | REQ_HARDBARRIER | REQ_SYNC | \ 170 + REQ_META| REQ_DISCARD | REQ_NOIDLE) 171 + 172 + #define REQ_UNPLUG (1 << __REQ_UNPLUG) 173 + #define REQ_RAHEAD (1 << __REQ_RAHEAD) 174 + 175 + #define REQ_SORTED (1 << __REQ_SORTED) 176 + #define REQ_SOFTBARRIER (1 << __REQ_SOFTBARRIER) 177 + #define REQ_FUA (1 << __REQ_FUA) 178 + #define REQ_NOMERGE (1 << __REQ_NOMERGE) 179 + #define REQ_STARTED (1 << __REQ_STARTED) 180 + #define REQ_DONTPREP (1 << __REQ_DONTPREP) 181 + #define REQ_QUEUED (1 << __REQ_QUEUED) 182 + #define REQ_ELVPRIV (1 << __REQ_ELVPRIV) 183 + #define REQ_FAILED (1 << __REQ_FAILED) 184 + #define REQ_QUIET (1 << __REQ_QUIET) 185 + #define REQ_PREEMPT (1 << __REQ_PREEMPT) 186 + #define REQ_ORDERED_COLOR (1 << __REQ_ORDERED_COLOR) 187 + #define REQ_ALLOCED (1 << __REQ_ALLOCED) 188 + #define REQ_COPY_USER (1 << __REQ_COPY_USER) 189 + #define REQ_INTEGRITY (1 << __REQ_INTEGRITY) 190 + #define REQ_FLUSH (1 << __REQ_FLUSH) 191 + #define REQ_IO_STAT (1 << __REQ_IO_STAT) 192 + #define REQ_MIXED_MERGE (1 << __REQ_MIXED_MERGE) 193 + 194 + #endif /* __LINUX_BLK_TYPES_H */
+25 -113
include/linux/blkdev.h
··· 60 REQ_TYPE_PM_RESUME, /* resume request */ 61 REQ_TYPE_PM_SHUTDOWN, /* shutdown request */ 62 REQ_TYPE_SPECIAL, /* driver defined type */ 63 - REQ_TYPE_LINUX_BLOCK, /* generic block layer message */ 64 /* 65 * for ATA/ATAPI devices. this really doesn't belong here, ide should 66 * use REQ_TYPE_SPECIAL and use rq->cmd[0] with the range of driver ··· 68 REQ_TYPE_ATA_TASKFILE, 69 REQ_TYPE_ATA_PC, 70 }; 71 - 72 - /* 73 - * For request of type REQ_TYPE_LINUX_BLOCK, rq->cmd[0] is the opcode being 74 - * sent down (similar to how REQ_TYPE_BLOCK_PC means that ->cmd[] holds a 75 - * SCSI cdb. 76 - * 77 - * 0x00 -> 0x3f are driver private, to be used for whatever purpose they need, 78 - * typically to differentiate REQ_TYPE_SPECIAL requests. 79 - * 80 - */ 81 - enum { 82 - REQ_LB_OP_EJECT = 0x40, /* eject request */ 83 - REQ_LB_OP_FLUSH = 0x41, /* flush request */ 84 - }; 85 - 86 - /* 87 - * request type modified bits. first four bits match BIO_RW* bits, important 88 - */ 89 - enum rq_flag_bits { 90 - __REQ_RW, /* not set, read. set, write */ 91 - __REQ_FAILFAST_DEV, /* no driver retries of device errors */ 92 - __REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */ 93 - __REQ_FAILFAST_DRIVER, /* no driver retries of driver errors */ 94 - /* above flags must match BIO_RW_* */ 95 - __REQ_DISCARD, /* request to discard sectors */ 96 - __REQ_SORTED, /* elevator knows about this request */ 97 - __REQ_SOFTBARRIER, /* may not be passed by ioscheduler */ 98 - __REQ_HARDBARRIER, /* may not be passed by drive either */ 99 - __REQ_FUA, /* forced unit access */ 100 - __REQ_NOMERGE, /* don't touch this for merging */ 101 - __REQ_STARTED, /* drive already may have started this one */ 102 - __REQ_DONTPREP, /* don't call prep for this one */ 103 - __REQ_QUEUED, /* uses queueing */ 104 - __REQ_ELVPRIV, /* elevator private data attached */ 105 - __REQ_FAILED, /* set if the request failed */ 106 - __REQ_QUIET, /* don't worry about errors */ 107 - __REQ_PREEMPT, /* set for "ide_preempt" requests */ 108 - __REQ_ORDERED_COLOR, /* is before or after barrier */ 109 - __REQ_RW_SYNC, /* request is sync (sync write or read) */ 110 - __REQ_ALLOCED, /* request came from our alloc pool */ 111 - __REQ_RW_META, /* metadata io request */ 112 - __REQ_COPY_USER, /* contains copies of user pages */ 113 - __REQ_INTEGRITY, /* integrity metadata has been remapped */ 114 - __REQ_NOIDLE, /* Don't anticipate more IO after this one */ 115 - __REQ_IO_STAT, /* account I/O stat */ 116 - __REQ_MIXED_MERGE, /* merge of different types, fail separately */ 117 - __REQ_NR_BITS, /* stops here */ 118 - }; 119 - 120 - #define REQ_RW (1 << __REQ_RW) 121 - #define REQ_FAILFAST_DEV (1 << __REQ_FAILFAST_DEV) 122 - #define REQ_FAILFAST_TRANSPORT (1 << __REQ_FAILFAST_TRANSPORT) 123 - #define REQ_FAILFAST_DRIVER (1 << __REQ_FAILFAST_DRIVER) 124 - #define REQ_DISCARD (1 << __REQ_DISCARD) 125 - #define REQ_SORTED (1 << __REQ_SORTED) 126 - #define REQ_SOFTBARRIER (1 << __REQ_SOFTBARRIER) 127 - #define REQ_HARDBARRIER (1 << __REQ_HARDBARRIER) 128 - #define REQ_FUA (1 << __REQ_FUA) 129 - #define REQ_NOMERGE (1 << __REQ_NOMERGE) 130 - #define REQ_STARTED (1 << __REQ_STARTED) 131 - #define REQ_DONTPREP (1 << __REQ_DONTPREP) 132 - #define REQ_QUEUED (1 << __REQ_QUEUED) 133 - #define REQ_ELVPRIV (1 << __REQ_ELVPRIV) 134 - #define REQ_FAILED (1 << __REQ_FAILED) 135 - #define REQ_QUIET (1 << __REQ_QUIET) 136 - #define REQ_PREEMPT (1 << __REQ_PREEMPT) 137 - #define REQ_ORDERED_COLOR (1 << __REQ_ORDERED_COLOR) 138 - #define REQ_RW_SYNC (1 << __REQ_RW_SYNC) 139 - #define REQ_ALLOCED (1 << __REQ_ALLOCED) 140 - #define REQ_RW_META (1 << __REQ_RW_META) 141 - #define REQ_COPY_USER (1 << __REQ_COPY_USER) 142 - #define REQ_INTEGRITY (1 << __REQ_INTEGRITY) 143 - #define REQ_NOIDLE (1 << __REQ_NOIDLE) 144 - #define REQ_IO_STAT (1 << __REQ_IO_STAT) 145 - #define REQ_MIXED_MERGE (1 << __REQ_MIXED_MERGE) 146 - 147 - #define REQ_FAILFAST_MASK (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | \ 148 - REQ_FAILFAST_DRIVER) 149 150 #define BLK_MAX_CDB 16 151 ··· 185 typedef void (request_fn_proc) (struct request_queue *q); 186 typedef int (make_request_fn) (struct request_queue *q, struct bio *bio); 187 typedef int (prep_rq_fn) (struct request_queue *, struct request *); 188 typedef void (unplug_fn) (struct request_queue *); 189 190 struct bio_vec; ··· 197 }; 198 typedef int (merge_bvec_fn) (struct request_queue *, struct bvec_merge_data *, 199 struct bio_vec *); 200 - typedef void (prepare_flush_fn) (struct request_queue *, struct request *); 201 typedef void (softirq_done_fn)(struct request *); 202 typedef int (dma_drain_needed_fn)(struct request *); 203 typedef int (lld_busy_fn) (struct request_queue *q); ··· 267 request_fn_proc *request_fn; 268 make_request_fn *make_request_fn; 269 prep_rq_fn *prep_rq_fn; 270 unplug_fn *unplug_fn; 271 merge_bvec_fn *merge_bvec_fn; 272 - prepare_flush_fn *prepare_flush_fn; 273 softirq_done_fn *softirq_done_fn; 274 rq_timed_out_fn *rq_timed_out_fn; 275 dma_drain_needed_fn *dma_drain_needed; ··· 388 #define QUEUE_FLAG_IO_STAT 15 /* do IO stats */ 389 #define QUEUE_FLAG_DISCARD 16 /* supports DISCARD */ 390 #define QUEUE_FLAG_NOXMERGES 17 /* No extended merges */ 391 392 #define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ 393 (1 << QUEUE_FLAG_CLUSTER) | \ 394 (1 << QUEUE_FLAG_STACKABLE) | \ 395 - (1 << QUEUE_FLAG_SAME_COMP)) 396 397 static inline int queue_is_locked(struct request_queue *q) 398 { ··· 519 test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags) 520 #define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags) 521 #define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags) 522 #define blk_queue_flushing(q) ((q)->ordseq) 523 #define blk_queue_stackable(q) \ 524 test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags) 525 #define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags) 526 527 - #define blk_fs_request(rq) ((rq)->cmd_type == REQ_TYPE_FS) 528 - #define blk_pc_request(rq) ((rq)->cmd_type == REQ_TYPE_BLOCK_PC) 529 - #define blk_special_request(rq) ((rq)->cmd_type == REQ_TYPE_SPECIAL) 530 - #define blk_sense_request(rq) ((rq)->cmd_type == REQ_TYPE_SENSE) 531 532 - #define blk_failfast_dev(rq) ((rq)->cmd_flags & REQ_FAILFAST_DEV) 533 - #define blk_failfast_transport(rq) ((rq)->cmd_flags & REQ_FAILFAST_TRANSPORT) 534 - #define blk_failfast_driver(rq) ((rq)->cmd_flags & REQ_FAILFAST_DRIVER) 535 - #define blk_noretry_request(rq) (blk_failfast_dev(rq) || \ 536 - blk_failfast_transport(rq) || \ 537 - blk_failfast_driver(rq)) 538 - #define blk_rq_started(rq) ((rq)->cmd_flags & REQ_STARTED) 539 - #define blk_rq_io_stat(rq) ((rq)->cmd_flags & REQ_IO_STAT) 540 - #define blk_rq_quiet(rq) ((rq)->cmd_flags & REQ_QUIET) 541 542 - #define blk_account_rq(rq) (blk_rq_started(rq) && (blk_fs_request(rq) || blk_discard_rq(rq))) 543 - 544 - #define blk_pm_suspend_request(rq) ((rq)->cmd_type == REQ_TYPE_PM_SUSPEND) 545 - #define blk_pm_resume_request(rq) ((rq)->cmd_type == REQ_TYPE_PM_RESUME) 546 #define blk_pm_request(rq) \ 547 - (blk_pm_suspend_request(rq) || blk_pm_resume_request(rq)) 548 549 #define blk_rq_cpu_valid(rq) ((rq)->cpu != -1) 550 - #define blk_sorted_rq(rq) ((rq)->cmd_flags & REQ_SORTED) 551 - #define blk_barrier_rq(rq) ((rq)->cmd_flags & REQ_HARDBARRIER) 552 - #define blk_fua_rq(rq) ((rq)->cmd_flags & REQ_FUA) 553 - #define blk_discard_rq(rq) ((rq)->cmd_flags & REQ_DISCARD) 554 #define blk_bidi_rq(rq) ((rq)->next_rq != NULL) 555 /* rq->queuelist of dequeued request must be list_empty() */ 556 #define blk_queued_rq(rq) (!list_empty(&(rq)->queuelist)) ··· 552 */ 553 static inline bool rw_is_sync(unsigned int rw_flags) 554 { 555 - return !(rw_flags & REQ_RW) || (rw_flags & REQ_RW_SYNC); 556 } 557 558 static inline bool rq_is_sync(struct request *rq) 559 { 560 return rw_is_sync(rq->cmd_flags); 561 } 562 - 563 - #define rq_is_meta(rq) ((rq)->cmd_flags & REQ_RW_META) 564 - #define rq_noidle(rq) ((rq)->cmd_flags & REQ_NOIDLE) 565 566 static inline int blk_queue_full(struct request_queue *q, int sync) 567 { ··· 592 (REQ_NOMERGE | REQ_STARTED | REQ_HARDBARRIER | REQ_SOFTBARRIER) 593 #define rq_mergeable(rq) \ 594 (!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && \ 595 - (blk_discard_rq(rq) || blk_fs_request((rq)))) 596 597 /* 598 * q->prep_rq_fn return values ··· 618 #define BLK_BOUNCE_HIGH -1ULL 619 #endif 620 #define BLK_BOUNCE_ANY (-1ULL) 621 - #define BLK_BOUNCE_ISA (ISA_DMA_THRESHOLD) 622 623 /* 624 * default timeout for SG_IO if none specified ··· 690 gfp_t); 691 extern void blk_insert_request(struct request_queue *, struct request *, int, void *); 692 extern void blk_requeue_request(struct request_queue *, struct request *); 693 extern int blk_rq_check_limits(struct request_queue *q, struct request *rq); 694 extern int blk_lld_busy(struct request_queue *q); 695 extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src, ··· 826 extern void __blk_complete_request(struct request *); 827 extern void blk_abort_request(struct request *); 828 extern void blk_abort_queue(struct request_queue *); 829 830 /* 831 * Access functions for manipulating queue properties ··· 871 extern void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn); 872 extern void blk_queue_segment_boundary(struct request_queue *, unsigned long); 873 extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn); 874 extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *); 875 extern void blk_queue_dma_alignment(struct request_queue *, int); 876 extern void blk_queue_update_dma_alignment(struct request_queue *, int); ··· 879 extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *); 880 extern void blk_queue_rq_timeout(struct request_queue *, unsigned int); 881 extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); 882 - extern int blk_queue_ordered(struct request_queue *, unsigned, prepare_flush_fn *); 883 extern bool blk_do_ordered(struct request_queue *, struct request **); 884 extern unsigned blk_ordered_cur_seq(struct request_queue *); 885 extern unsigned blk_ordered_req_seq(struct request *); ··· 933 { 934 block <<= (sb->s_blocksize_bits - 9); 935 nr_blocks <<= (sb->s_blocksize_bits - 9); 936 - return blkdev_issue_discard(sb->s_bdev, block, nr_blocks, GFP_KERNEL, 937 BLKDEV_IFL_WAIT | BLKDEV_IFL_BARRIER); 938 } 939 ··· 1246 struct block_device_operations { 1247 int (*open) (struct block_device *, fmode_t); 1248 int (*release) (struct gendisk *, fmode_t); 1249 - int (*locked_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); 1250 int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); 1251 int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); 1252 int (*direct_access) (struct block_device *, sector_t,
··· 60 REQ_TYPE_PM_RESUME, /* resume request */ 61 REQ_TYPE_PM_SHUTDOWN, /* shutdown request */ 62 REQ_TYPE_SPECIAL, /* driver defined type */ 63 /* 64 * for ATA/ATAPI devices. this really doesn't belong here, ide should 65 * use REQ_TYPE_SPECIAL and use rq->cmd[0] with the range of driver ··· 69 REQ_TYPE_ATA_TASKFILE, 70 REQ_TYPE_ATA_PC, 71 }; 72 73 #define BLK_MAX_CDB 16 74 ··· 264 typedef void (request_fn_proc) (struct request_queue *q); 265 typedef int (make_request_fn) (struct request_queue *q, struct bio *bio); 266 typedef int (prep_rq_fn) (struct request_queue *, struct request *); 267 + typedef void (unprep_rq_fn) (struct request_queue *, struct request *); 268 typedef void (unplug_fn) (struct request_queue *); 269 270 struct bio_vec; ··· 275 }; 276 typedef int (merge_bvec_fn) (struct request_queue *, struct bvec_merge_data *, 277 struct bio_vec *); 278 typedef void (softirq_done_fn)(struct request *); 279 typedef int (dma_drain_needed_fn)(struct request *); 280 typedef int (lld_busy_fn) (struct request_queue *q); ··· 346 request_fn_proc *request_fn; 347 make_request_fn *make_request_fn; 348 prep_rq_fn *prep_rq_fn; 349 + unprep_rq_fn *unprep_rq_fn; 350 unplug_fn *unplug_fn; 351 merge_bvec_fn *merge_bvec_fn; 352 softirq_done_fn *softirq_done_fn; 353 rq_timed_out_fn *rq_timed_out_fn; 354 dma_drain_needed_fn *dma_drain_needed; ··· 467 #define QUEUE_FLAG_IO_STAT 15 /* do IO stats */ 468 #define QUEUE_FLAG_DISCARD 16 /* supports DISCARD */ 469 #define QUEUE_FLAG_NOXMERGES 17 /* No extended merges */ 470 + #define QUEUE_FLAG_ADD_RANDOM 18 /* Contributes to random pool */ 471 472 #define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ 473 (1 << QUEUE_FLAG_CLUSTER) | \ 474 (1 << QUEUE_FLAG_STACKABLE) | \ 475 + (1 << QUEUE_FLAG_SAME_COMP) | \ 476 + (1 << QUEUE_FLAG_ADD_RANDOM)) 477 478 static inline int queue_is_locked(struct request_queue *q) 479 { ··· 596 test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags) 597 #define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags) 598 #define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags) 599 + #define blk_queue_add_random(q) test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags) 600 #define blk_queue_flushing(q) ((q)->ordseq) 601 #define blk_queue_stackable(q) \ 602 test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags) 603 #define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags) 604 605 + #define blk_noretry_request(rq) \ 606 + ((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \ 607 + REQ_FAILFAST_DRIVER)) 608 609 + #define blk_account_rq(rq) \ 610 + (((rq)->cmd_flags & REQ_STARTED) && \ 611 + ((rq)->cmd_type == REQ_TYPE_FS || \ 612 + ((rq)->cmd_flags & REQ_DISCARD))) 613 614 #define blk_pm_request(rq) \ 615 + ((rq)->cmd_type == REQ_TYPE_PM_SUSPEND || \ 616 + (rq)->cmd_type == REQ_TYPE_PM_RESUME) 617 618 #define blk_rq_cpu_valid(rq) ((rq)->cpu != -1) 619 #define blk_bidi_rq(rq) ((rq)->next_rq != NULL) 620 /* rq->queuelist of dequeued request must be list_empty() */ 621 #define blk_queued_rq(rq) (!list_empty(&(rq)->queuelist)) ··· 641 */ 642 static inline bool rw_is_sync(unsigned int rw_flags) 643 { 644 + return !(rw_flags & REQ_WRITE) || (rw_flags & REQ_SYNC); 645 } 646 647 static inline bool rq_is_sync(struct request *rq) 648 { 649 return rw_is_sync(rq->cmd_flags); 650 } 651 652 static inline int blk_queue_full(struct request_queue *q, int sync) 653 { ··· 684 (REQ_NOMERGE | REQ_STARTED | REQ_HARDBARRIER | REQ_SOFTBARRIER) 685 #define rq_mergeable(rq) \ 686 (!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && \ 687 + (((rq)->cmd_flags & REQ_DISCARD) || \ 688 + (rq)->cmd_type == REQ_TYPE_FS)) 689 690 /* 691 * q->prep_rq_fn return values ··· 709 #define BLK_BOUNCE_HIGH -1ULL 710 #endif 711 #define BLK_BOUNCE_ANY (-1ULL) 712 + #define BLK_BOUNCE_ISA (DMA_BIT_MASK(24)) 713 714 /* 715 * default timeout for SG_IO if none specified ··· 781 gfp_t); 782 extern void blk_insert_request(struct request_queue *, struct request *, int, void *); 783 extern void blk_requeue_request(struct request_queue *, struct request *); 784 + extern void blk_add_request_payload(struct request *rq, struct page *page, 785 + unsigned int len); 786 extern int blk_rq_check_limits(struct request_queue *q, struct request *rq); 787 extern int blk_lld_busy(struct request_queue *q); 788 extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src, ··· 915 extern void __blk_complete_request(struct request *); 916 extern void blk_abort_request(struct request *); 917 extern void blk_abort_queue(struct request_queue *); 918 + extern void blk_unprep_request(struct request *); 919 920 /* 921 * Access functions for manipulating queue properties ··· 959 extern void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn); 960 extern void blk_queue_segment_boundary(struct request_queue *, unsigned long); 961 extern void blk_queue_prep_rq(struct request_queue *, prep_rq_fn *pfn); 962 + extern void blk_queue_unprep_rq(struct request_queue *, unprep_rq_fn *ufn); 963 extern void blk_queue_merge_bvec(struct request_queue *, merge_bvec_fn *); 964 extern void blk_queue_dma_alignment(struct request_queue *, int); 965 extern void blk_queue_update_dma_alignment(struct request_queue *, int); ··· 966 extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *); 967 extern void blk_queue_rq_timeout(struct request_queue *, unsigned int); 968 extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); 969 + extern int blk_queue_ordered(struct request_queue *, unsigned); 970 extern bool blk_do_ordered(struct request_queue *, struct request **); 971 extern unsigned blk_ordered_cur_seq(struct request_queue *); 972 extern unsigned blk_ordered_req_seq(struct request *); ··· 1020 { 1021 block <<= (sb->s_blocksize_bits - 9); 1022 nr_blocks <<= (sb->s_blocksize_bits - 9); 1023 + return blkdev_issue_discard(sb->s_bdev, block, nr_blocks, GFP_NOFS, 1024 BLKDEV_IFL_WAIT | BLKDEV_IFL_BARRIER); 1025 } 1026 ··· 1333 struct block_device_operations { 1334 int (*open) (struct block_device *, fmode_t); 1335 int (*release) (struct gendisk *, fmode_t); 1336 int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); 1337 int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); 1338 int (*direct_access) (struct block_device *, sector_t,
+17 -1
include/linux/blktrace_api.h
··· 5 #ifdef __KERNEL__ 6 #include <linux/blkdev.h> 7 #include <linux/relay.h> 8 #endif 9 10 /* ··· 221 222 #endif /* CONFIG_BLK_DEV_IO_TRACE */ 223 224 #if defined(CONFIG_EVENT_TRACING) && defined(CONFIG_BLOCK) 225 226 static inline int blk_cmd_buf_len(struct request *rq) 227 { 228 - return blk_pc_request(rq) ? rq->cmd_len * 3 : 1; 229 } 230 231 extern void blk_dump_cmd(char *buf, struct request *rq);
··· 5 #ifdef __KERNEL__ 6 #include <linux/blkdev.h> 7 #include <linux/relay.h> 8 + #include <linux/compat.h> 9 #endif 10 11 /* ··· 220 221 #endif /* CONFIG_BLK_DEV_IO_TRACE */ 222 223 + #ifdef CONFIG_COMPAT 224 + 225 + struct compat_blk_user_trace_setup { 226 + char name[32]; 227 + u16 act_mask; 228 + u32 buf_size; 229 + u32 buf_nr; 230 + compat_u64 start_lba; 231 + compat_u64 end_lba; 232 + u32 pid; 233 + }; 234 + #define BLKTRACESETUP32 _IOWR(0x12, 115, struct compat_blk_user_trace_setup) 235 + 236 + #endif 237 + 238 #if defined(CONFIG_EVENT_TRACING) && defined(CONFIG_BLOCK) 239 240 static inline int blk_cmd_buf_len(struct request *rq) 241 { 242 + return (rq->cmd_type == REQ_TYPE_BLOCK_PC) ? rq->cmd_len * 3 : 1; 243 } 244 245 extern void blk_dump_cmd(char *buf, struct request *rq);
+4 -4
include/linux/coda_psdev.h
··· 86 wait_queue_head_t uc_sleep; /* process' wait queue */ 87 }; 88 89 - #define REQ_ASYNC 0x1 90 - #define REQ_READ 0x2 91 - #define REQ_WRITE 0x4 92 - #define REQ_ABORT 0x8 93 94 #endif
··· 86 wait_queue_head_t uc_sleep; /* process' wait queue */ 87 }; 88 89 + #define CODA_REQ_ASYNC 0x1 90 + #define CODA_REQ_READ 0x2 91 + #define CODA_REQ_WRITE 0x4 92 + #define CODA_REQ_ABORT 0x8 93 94 #endif
+1 -1
include/linux/drbd.h
··· 53 54 55 extern const char *drbd_buildtag(void); 56 - #define REL_VERSION "8.3.8" 57 #define API_VERSION 88 58 #define PRO_VERSION_MIN 86 59 #define PRO_VERSION_MAX 94
··· 53 54 55 extern const char *drbd_buildtag(void); 56 + #define REL_VERSION "8.3.8.1" 57 #define API_VERSION 88 58 #define PRO_VERSION_MIN 86 59 #define PRO_VERSION_MAX 94
+5 -4
include/linux/drbd_nl.h
··· 78 NL_INTEGER( 30, T_MAY_IGNORE, rate) 79 NL_INTEGER( 31, T_MAY_IGNORE, after) 80 NL_INTEGER( 32, T_MAY_IGNORE, al_extents) 81 - NL_INTEGER( 71, T_MAY_IGNORE, dp_volume) 82 - NL_INTEGER( 72, T_MAY_IGNORE, dp_interval) 83 - NL_INTEGER( 73, T_MAY_IGNORE, throttle_th) 84 - NL_INTEGER( 74, T_MAY_IGNORE, hold_off_th) 85 NL_STRING( 52, T_MAY_IGNORE, verify_alg, SHARED_SECRET_MAX) 86 NL_STRING( 51, T_MAY_IGNORE, cpu_mask, 32) 87 NL_STRING( 64, T_MAY_IGNORE, csums_alg, SHARED_SECRET_MAX)
··· 78 NL_INTEGER( 30, T_MAY_IGNORE, rate) 79 NL_INTEGER( 31, T_MAY_IGNORE, after) 80 NL_INTEGER( 32, T_MAY_IGNORE, al_extents) 81 + /* NL_INTEGER( 71, T_MAY_IGNORE, dp_volume) 82 + * NL_INTEGER( 72, T_MAY_IGNORE, dp_interval) 83 + * NL_INTEGER( 73, T_MAY_IGNORE, throttle_th) 84 + * NL_INTEGER( 74, T_MAY_IGNORE, hold_off_th) 85 + * feature will be reimplemented differently with 8.3.9 */ 86 NL_STRING( 52, T_MAY_IGNORE, verify_alg, SHARED_SECRET_MAX) 87 NL_STRING( 51, T_MAY_IGNORE, cpu_mask, 32) 88 NL_STRING( 64, T_MAY_IGNORE, csums_alg, SHARED_SECRET_MAX)
+23 -22
include/linux/fs.h
··· 8 9 #include <linux/limits.h> 10 #include <linux/ioctl.h> 11 12 /* 13 * It's silly to have NR_OPEN bigger than NR_FILE, but you can change ··· 122 * immediately wait on this read without caring about 123 * unplugging. 124 * READA Used for read-ahead operations. Lower priority, and the 125 - * block layer could (in theory) choose to ignore this 126 * request if it runs into resource problems. 127 * WRITE A normal async write. Device will be plugged. 128 * SWRITE Like WRITE, but a special case for ll_rw_block() that ··· 141 * SWRITE_SYNC 142 * SWRITE_SYNC_PLUG Like WRITE_SYNC/WRITE_SYNC_PLUG, but locks the buffer. 143 * See SWRITE. 144 - * WRITE_BARRIER Like WRITE, but tells the block layer that all 145 * previously submitted writes must be safely on storage 146 * before this one is started. Also guarantees that when 147 * this write is complete, it itself is also safely on ··· 149 * of this IO. 150 * 151 */ 152 - #define RW_MASK 1 153 - #define RWA_MASK 2 154 - #define READ 0 155 - #define WRITE 1 156 - #define READA 2 /* read-ahead - don't block if no resources */ 157 - #define SWRITE 3 /* for ll_rw_block() - wait for buffer lock */ 158 - #define READ_SYNC (READ | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG)) 159 - #define READ_META (READ | (1 << BIO_RW_META)) 160 - #define WRITE_SYNC_PLUG (WRITE | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_NOIDLE)) 161 - #define WRITE_SYNC (WRITE_SYNC_PLUG | (1 << BIO_RW_UNPLUG)) 162 - #define WRITE_ODIRECT_PLUG (WRITE | (1 << BIO_RW_SYNCIO)) 163 - #define WRITE_META (WRITE | (1 << BIO_RW_META)) 164 - #define SWRITE_SYNC_PLUG \ 165 - (SWRITE | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_NOIDLE)) 166 - #define SWRITE_SYNC (SWRITE_SYNC_PLUG | (1 << BIO_RW_UNPLUG)) 167 - #define WRITE_BARRIER (WRITE | (1 << BIO_RW_BARRIER)) 168 169 /* 170 * These aren't really reads or writes, they pass down information about 171 * parts of device that are now unused by the file system. 172 */ 173 - #define DISCARD_NOBARRIER (WRITE | (1 << BIO_RW_DISCARD)) 174 - #define DISCARD_BARRIER (DISCARD_NOBARRIER | (1 << BIO_RW_BARRIER)) 175 176 #define SEL_IN 1 177 #define SEL_OUT 2 ··· 2199 extern void file_move(struct file *f, struct list_head *list); 2200 extern void file_kill(struct file *f); 2201 #ifdef CONFIG_BLOCK 2202 - struct bio; 2203 extern void submit_bio(int, struct bio *); 2204 extern int bdev_read_only(struct block_device *); 2205 #endif ··· 2265 #endif 2266 2267 #ifdef CONFIG_BLOCK 2268 - struct bio; 2269 typedef void (dio_submit_t)(int rw, struct bio *bio, struct inode *inode, 2270 loff_t file_offset); 2271
··· 8 9 #include <linux/limits.h> 10 #include <linux/ioctl.h> 11 + #include <linux/blk_types.h> 12 13 /* 14 * It's silly to have NR_OPEN bigger than NR_FILE, but you can change ··· 121 * immediately wait on this read without caring about 122 * unplugging. 123 * READA Used for read-ahead operations. Lower priority, and the 124 + * block layer could (in theory) choose to ignore this 125 * request if it runs into resource problems. 126 * WRITE A normal async write. Device will be plugged. 127 * SWRITE Like WRITE, but a special case for ll_rw_block() that ··· 140 * SWRITE_SYNC 141 * SWRITE_SYNC_PLUG Like WRITE_SYNC/WRITE_SYNC_PLUG, but locks the buffer. 142 * See SWRITE. 143 + * WRITE_BARRIER Like WRITE_SYNC, but tells the block layer that all 144 * previously submitted writes must be safely on storage 145 * before this one is started. Also guarantees that when 146 * this write is complete, it itself is also safely on ··· 148 * of this IO. 149 * 150 */ 151 + #define RW_MASK REQ_WRITE 152 + #define RWA_MASK REQ_RAHEAD 153 + 154 + #define READ 0 155 + #define WRITE RW_MASK 156 + #define READA RWA_MASK 157 + #define SWRITE (WRITE | READA) 158 + 159 + #define READ_SYNC (READ | REQ_SYNC | REQ_UNPLUG) 160 + #define READ_META (READ | REQ_META) 161 + #define WRITE_SYNC_PLUG (WRITE | REQ_SYNC | REQ_NOIDLE) 162 + #define WRITE_SYNC (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_UNPLUG) 163 + #define WRITE_ODIRECT_PLUG (WRITE | REQ_SYNC) 164 + #define WRITE_META (WRITE | REQ_META) 165 + #define WRITE_BARRIER (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_UNPLUG | \ 166 + REQ_HARDBARRIER) 167 + #define SWRITE_SYNC_PLUG (SWRITE | REQ_SYNC | REQ_NOIDLE) 168 + #define SWRITE_SYNC (SWRITE | REQ_SYNC | REQ_NOIDLE | REQ_UNPLUG) 169 170 /* 171 * These aren't really reads or writes, they pass down information about 172 * parts of device that are now unused by the file system. 173 */ 174 + #define DISCARD_NOBARRIER (WRITE | REQ_DISCARD) 175 + #define DISCARD_BARRIER (WRITE | REQ_DISCARD | REQ_HARDBARRIER) 176 177 #define SEL_IN 1 178 #define SEL_OUT 2 ··· 2196 extern void file_move(struct file *f, struct list_head *list); 2197 extern void file_kill(struct file *f); 2198 #ifdef CONFIG_BLOCK 2199 extern void submit_bio(int, struct bio *); 2200 extern int bdev_read_only(struct block_device *); 2201 #endif ··· 2263 #endif 2264 2265 #ifdef CONFIG_BLOCK 2266 typedef void (dio_submit_t)(int rw, struct bio *bio, struct inode *inode, 2267 loff_t file_offset); 2268
+10 -5
include/trace/events/block.h
··· 25 26 TP_fast_assign( 27 __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0; 28 - __entry->sector = blk_pc_request(rq) ? 0 : blk_rq_pos(rq); 29 - __entry->nr_sector = blk_pc_request(rq) ? 0 : blk_rq_sectors(rq); 30 __entry->errors = rq->errors; 31 32 blk_fill_rwbs_rq(__entry->rwbs, rq); ··· 111 112 TP_fast_assign( 113 __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0; 114 - __entry->sector = blk_pc_request(rq) ? 0 : blk_rq_pos(rq); 115 - __entry->nr_sector = blk_pc_request(rq) ? 0 : blk_rq_sectors(rq); 116 - __entry->bytes = blk_pc_request(rq) ? blk_rq_bytes(rq) : 0; 117 118 blk_fill_rwbs_rq(__entry->rwbs, rq); 119 blk_dump_cmd(__get_str(cmd), rq);
··· 25 26 TP_fast_assign( 27 __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0; 28 + __entry->sector = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ? 29 + 0 : blk_rq_pos(rq); 30 + __entry->nr_sector = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ? 31 + 0 : blk_rq_sectors(rq); 32 __entry->errors = rq->errors; 33 34 blk_fill_rwbs_rq(__entry->rwbs, rq); ··· 109 110 TP_fast_assign( 111 __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0; 112 + __entry->sector = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ? 113 + 0 : blk_rq_pos(rq); 114 + __entry->nr_sector = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ? 115 + 0 : blk_rq_sectors(rq); 116 + __entry->bytes = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ? 117 + blk_rq_bytes(rq) : 0; 118 119 blk_fill_rwbs_rq(__entry->rwbs, rq); 120 blk_dump_cmd(__get_str(cmd), rq);
+159
include/trace/events/writeback.h
···
··· 1 + #undef TRACE_SYSTEM 2 + #define TRACE_SYSTEM writeback 3 + 4 + #if !defined(_TRACE_WRITEBACK_H) || defined(TRACE_HEADER_MULTI_READ) 5 + #define _TRACE_WRITEBACK_H 6 + 7 + #include <linux/backing-dev.h> 8 + #include <linux/device.h> 9 + #include <linux/writeback.h> 10 + 11 + struct wb_writeback_work; 12 + 13 + DECLARE_EVENT_CLASS(writeback_work_class, 14 + TP_PROTO(struct backing_dev_info *bdi, struct wb_writeback_work *work), 15 + TP_ARGS(bdi, work), 16 + TP_STRUCT__entry( 17 + __array(char, name, 32) 18 + __field(long, nr_pages) 19 + __field(dev_t, sb_dev) 20 + __field(int, sync_mode) 21 + __field(int, for_kupdate) 22 + __field(int, range_cyclic) 23 + __field(int, for_background) 24 + ), 25 + TP_fast_assign( 26 + strncpy(__entry->name, dev_name(bdi->dev), 32); 27 + __entry->nr_pages = work->nr_pages; 28 + __entry->sb_dev = work->sb ? work->sb->s_dev : 0; 29 + __entry->sync_mode = work->sync_mode; 30 + __entry->for_kupdate = work->for_kupdate; 31 + __entry->range_cyclic = work->range_cyclic; 32 + __entry->for_background = work->for_background; 33 + ), 34 + TP_printk("bdi %s: sb_dev %d:%d nr_pages=%ld sync_mode=%d " 35 + "kupdate=%d range_cyclic=%d background=%d", 36 + __entry->name, 37 + MAJOR(__entry->sb_dev), MINOR(__entry->sb_dev), 38 + __entry->nr_pages, 39 + __entry->sync_mode, 40 + __entry->for_kupdate, 41 + __entry->range_cyclic, 42 + __entry->for_background 43 + ) 44 + ); 45 + #define DEFINE_WRITEBACK_WORK_EVENT(name) \ 46 + DEFINE_EVENT(writeback_work_class, name, \ 47 + TP_PROTO(struct backing_dev_info *bdi, struct wb_writeback_work *work), \ 48 + TP_ARGS(bdi, work)) 49 + DEFINE_WRITEBACK_WORK_EVENT(writeback_nothread); 50 + DEFINE_WRITEBACK_WORK_EVENT(writeback_queue); 51 + DEFINE_WRITEBACK_WORK_EVENT(writeback_exec); 52 + 53 + TRACE_EVENT(writeback_pages_written, 54 + TP_PROTO(long pages_written), 55 + TP_ARGS(pages_written), 56 + TP_STRUCT__entry( 57 + __field(long, pages) 58 + ), 59 + TP_fast_assign( 60 + __entry->pages = pages_written; 61 + ), 62 + TP_printk("%ld", __entry->pages) 63 + ); 64 + 65 + DECLARE_EVENT_CLASS(writeback_class, 66 + TP_PROTO(struct backing_dev_info *bdi), 67 + TP_ARGS(bdi), 68 + TP_STRUCT__entry( 69 + __array(char, name, 32) 70 + ), 71 + TP_fast_assign( 72 + strncpy(__entry->name, dev_name(bdi->dev), 32); 73 + ), 74 + TP_printk("bdi %s", 75 + __entry->name 76 + ) 77 + ); 78 + #define DEFINE_WRITEBACK_EVENT(name) \ 79 + DEFINE_EVENT(writeback_class, name, \ 80 + TP_PROTO(struct backing_dev_info *bdi), \ 81 + TP_ARGS(bdi)) 82 + 83 + DEFINE_WRITEBACK_EVENT(writeback_nowork); 84 + DEFINE_WRITEBACK_EVENT(writeback_wake_thread); 85 + DEFINE_WRITEBACK_EVENT(writeback_wake_forker_thread); 86 + DEFINE_WRITEBACK_EVENT(writeback_bdi_register); 87 + DEFINE_WRITEBACK_EVENT(writeback_bdi_unregister); 88 + DEFINE_WRITEBACK_EVENT(writeback_thread_start); 89 + DEFINE_WRITEBACK_EVENT(writeback_thread_stop); 90 + 91 + DECLARE_EVENT_CLASS(wbc_class, 92 + TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi), 93 + TP_ARGS(wbc, bdi), 94 + TP_STRUCT__entry( 95 + __array(char, name, 32) 96 + __field(long, nr_to_write) 97 + __field(long, pages_skipped) 98 + __field(int, sync_mode) 99 + __field(int, nonblocking) 100 + __field(int, encountered_congestion) 101 + __field(int, for_kupdate) 102 + __field(int, for_background) 103 + __field(int, for_reclaim) 104 + __field(int, range_cyclic) 105 + __field(int, more_io) 106 + __field(unsigned long, older_than_this) 107 + __field(long, range_start) 108 + __field(long, range_end) 109 + ), 110 + 111 + TP_fast_assign( 112 + strncpy(__entry->name, dev_name(bdi->dev), 32); 113 + __entry->nr_to_write = wbc->nr_to_write; 114 + __entry->pages_skipped = wbc->pages_skipped; 115 + __entry->sync_mode = wbc->sync_mode; 116 + __entry->for_kupdate = wbc->for_kupdate; 117 + __entry->for_background = wbc->for_background; 118 + __entry->for_reclaim = wbc->for_reclaim; 119 + __entry->range_cyclic = wbc->range_cyclic; 120 + __entry->more_io = wbc->more_io; 121 + __entry->older_than_this = wbc->older_than_this ? 122 + *wbc->older_than_this : 0; 123 + __entry->range_start = (long)wbc->range_start; 124 + __entry->range_end = (long)wbc->range_end; 125 + ), 126 + 127 + TP_printk("bdi %s: towrt=%ld skip=%ld mode=%d kupd=%d " 128 + "bgrd=%d reclm=%d cyclic=%d more=%d older=0x%lx " 129 + "start=0x%lx end=0x%lx", 130 + __entry->name, 131 + __entry->nr_to_write, 132 + __entry->pages_skipped, 133 + __entry->sync_mode, 134 + __entry->for_kupdate, 135 + __entry->for_background, 136 + __entry->for_reclaim, 137 + __entry->range_cyclic, 138 + __entry->more_io, 139 + __entry->older_than_this, 140 + __entry->range_start, 141 + __entry->range_end) 142 + ) 143 + 144 + #define DEFINE_WBC_EVENT(name) \ 145 + DEFINE_EVENT(wbc_class, name, \ 146 + TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi), \ 147 + TP_ARGS(wbc, bdi)) 148 + DEFINE_WBC_EVENT(wbc_writeback_start); 149 + DEFINE_WBC_EVENT(wbc_writeback_written); 150 + DEFINE_WBC_EVENT(wbc_writeback_wait); 151 + DEFINE_WBC_EVENT(wbc_balance_dirty_start); 152 + DEFINE_WBC_EVENT(wbc_balance_dirty_written); 153 + DEFINE_WBC_EVENT(wbc_balance_dirty_wait); 154 + DEFINE_WBC_EVENT(wbc_writepage); 155 + 156 + #endif /* _TRACE_WRITEBACK_H */ 157 + 158 + /* This part must be outside protection */ 159 + #include <trace/define_trace.h>
+1 -1
kernel/power/block_io.c
··· 28 static int submit(int rw, struct block_device *bdev, sector_t sector, 29 struct page *page, struct bio **bio_chain) 30 { 31 - const int bio_rw = rw | (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG); 32 struct bio *bio; 33 34 bio = bio_alloc(__GFP_WAIT | __GFP_HIGH, 1);
··· 28 static int submit(int rw, struct block_device *bdev, sector_t sector, 29 struct page *page, struct bio **bio_chain) 30 { 31 + const int bio_rw = rw | REQ_SYNC | REQ_UNPLUG; 32 struct bio *bio; 33 34 bio = bio_alloc(__GFP_WAIT | __GFP_HIGH, 1);
+63 -17
kernel/trace/blktrace.c
··· 169 static const u32 ddir_act[2] = { BLK_TC_ACT(BLK_TC_READ), 170 BLK_TC_ACT(BLK_TC_WRITE) }; 171 172 /* The ilog2() calls fall out because they're constant */ 173 - #define MASK_TC_BIT(rw, __name) ((rw & (1 << BIO_RW_ ## __name)) << \ 174 - (ilog2(BLK_TC_ ## __name) + BLK_TC_SHIFT - BIO_RW_ ## __name)) 175 176 /* 177 * The worker for the various blk_add_trace*() types. Fills out a ··· 197 return; 198 199 what |= ddir_act[rw & WRITE]; 200 - what |= MASK_TC_BIT(rw, BARRIER); 201 - what |= MASK_TC_BIT(rw, SYNCIO); 202 - what |= MASK_TC_BIT(rw, AHEAD); 203 what |= MASK_TC_BIT(rw, META); 204 what |= MASK_TC_BIT(rw, DISCARD); 205 ··· 552 } 553 EXPORT_SYMBOL_GPL(blk_trace_setup); 554 555 int blk_trace_startstop(struct request_queue *q, int start) 556 { 557 int ret; ··· 639 if (!q) 640 return -ENXIO; 641 642 mutex_lock(&bdev->bd_mutex); 643 644 switch (cmd) { ··· 647 bdevname(bdev, b); 648 ret = blk_trace_setup(q, b, bdev->bd_dev, bdev, arg); 649 break; 650 case BLKTRACESTART: 651 start = 1; 652 case BLKTRACESTOP: ··· 667 } 668 669 mutex_unlock(&bdev->bd_mutex); 670 return ret; 671 } 672 ··· 707 if (likely(!bt)) 708 return; 709 710 - if (blk_discard_rq(rq)) 711 - rw |= (1 << BIO_RW_DISCARD); 712 713 - if (blk_pc_request(rq)) { 714 what |= BLK_TC_ACT(BLK_TC_PC); 715 __blk_add_trace(bt, 0, blk_rq_bytes(rq), rw, 716 what, rq->errors, rq->cmd_len, rq->cmd); ··· 971 if (likely(!bt)) 972 return; 973 974 - if (blk_pc_request(rq)) 975 __blk_add_trace(bt, 0, blk_rq_bytes(rq), 0, 976 BLK_TA_DRV_DATA, rq->errors, len, data); 977 else ··· 1776 int len = rq->cmd_len; 1777 unsigned char *cmd = rq->cmd; 1778 1779 - if (!blk_pc_request(rq)) { 1780 buf[0] = '\0'; 1781 return; 1782 } ··· 1801 1802 if (rw & WRITE) 1803 rwbs[i++] = 'W'; 1804 - else if (rw & 1 << BIO_RW_DISCARD) 1805 rwbs[i++] = 'D'; 1806 else if (bytes) 1807 rwbs[i++] = 'R'; 1808 else 1809 rwbs[i++] = 'N'; 1810 1811 - if (rw & 1 << BIO_RW_AHEAD) 1812 rwbs[i++] = 'A'; 1813 - if (rw & 1 << BIO_RW_BARRIER) 1814 rwbs[i++] = 'B'; 1815 - if (rw & 1 << BIO_RW_SYNCIO) 1816 rwbs[i++] = 'S'; 1817 - if (rw & 1 << BIO_RW_META) 1818 rwbs[i++] = 'M'; 1819 1820 rwbs[i] = '\0'; ··· 1825 int rw = rq->cmd_flags & 0x03; 1826 int bytes; 1827 1828 - if (blk_discard_rq(rq)) 1829 - rw |= (1 << BIO_RW_DISCARD); 1830 1831 bytes = blk_rq_bytes(rq); 1832
··· 169 static const u32 ddir_act[2] = { BLK_TC_ACT(BLK_TC_READ), 170 BLK_TC_ACT(BLK_TC_WRITE) }; 171 172 + #define BLK_TC_HARDBARRIER BLK_TC_BARRIER 173 + #define BLK_TC_RAHEAD BLK_TC_AHEAD 174 + 175 /* The ilog2() calls fall out because they're constant */ 176 + #define MASK_TC_BIT(rw, __name) ((rw & REQ_ ## __name) << \ 177 + (ilog2(BLK_TC_ ## __name) + BLK_TC_SHIFT - __REQ_ ## __name)) 178 179 /* 180 * The worker for the various blk_add_trace*() types. Fills out a ··· 194 return; 195 196 what |= ddir_act[rw & WRITE]; 197 + what |= MASK_TC_BIT(rw, HARDBARRIER); 198 + what |= MASK_TC_BIT(rw, SYNC); 199 + what |= MASK_TC_BIT(rw, RAHEAD); 200 what |= MASK_TC_BIT(rw, META); 201 what |= MASK_TC_BIT(rw, DISCARD); 202 ··· 549 } 550 EXPORT_SYMBOL_GPL(blk_trace_setup); 551 552 + #if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64) 553 + static int compat_blk_trace_setup(struct request_queue *q, char *name, 554 + dev_t dev, struct block_device *bdev, 555 + char __user *arg) 556 + { 557 + struct blk_user_trace_setup buts; 558 + struct compat_blk_user_trace_setup cbuts; 559 + int ret; 560 + 561 + if (copy_from_user(&cbuts, arg, sizeof(cbuts))) 562 + return -EFAULT; 563 + 564 + buts = (struct blk_user_trace_setup) { 565 + .act_mask = cbuts.act_mask, 566 + .buf_size = cbuts.buf_size, 567 + .buf_nr = cbuts.buf_nr, 568 + .start_lba = cbuts.start_lba, 569 + .end_lba = cbuts.end_lba, 570 + .pid = cbuts.pid, 571 + }; 572 + memcpy(&buts.name, &cbuts.name, 32); 573 + 574 + ret = do_blk_trace_setup(q, name, dev, bdev, &buts); 575 + if (ret) 576 + return ret; 577 + 578 + if (copy_to_user(arg, &buts.name, 32)) { 579 + blk_trace_remove(q); 580 + return -EFAULT; 581 + } 582 + 583 + return 0; 584 + } 585 + #endif 586 + 587 int blk_trace_startstop(struct request_queue *q, int start) 588 { 589 int ret; ··· 601 if (!q) 602 return -ENXIO; 603 604 + lock_kernel(); 605 mutex_lock(&bdev->bd_mutex); 606 607 switch (cmd) { ··· 608 bdevname(bdev, b); 609 ret = blk_trace_setup(q, b, bdev->bd_dev, bdev, arg); 610 break; 611 + #if defined(CONFIG_COMPAT) && defined(CONFIG_X86_64) 612 + case BLKTRACESETUP32: 613 + bdevname(bdev, b); 614 + ret = compat_blk_trace_setup(q, b, bdev->bd_dev, bdev, arg); 615 + break; 616 + #endif 617 case BLKTRACESTART: 618 start = 1; 619 case BLKTRACESTOP: ··· 622 } 623 624 mutex_unlock(&bdev->bd_mutex); 625 + unlock_kernel(); 626 return ret; 627 } 628 ··· 661 if (likely(!bt)) 662 return; 663 664 + if (rq->cmd_flags & REQ_DISCARD) 665 + rw |= REQ_DISCARD; 666 667 + if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { 668 what |= BLK_TC_ACT(BLK_TC_PC); 669 __blk_add_trace(bt, 0, blk_rq_bytes(rq), rw, 670 what, rq->errors, rq->cmd_len, rq->cmd); ··· 925 if (likely(!bt)) 926 return; 927 928 + if (rq->cmd_type == REQ_TYPE_BLOCK_PC) 929 __blk_add_trace(bt, 0, blk_rq_bytes(rq), 0, 930 BLK_TA_DRV_DATA, rq->errors, len, data); 931 else ··· 1730 int len = rq->cmd_len; 1731 unsigned char *cmd = rq->cmd; 1732 1733 + if (rq->cmd_type != REQ_TYPE_BLOCK_PC) { 1734 buf[0] = '\0'; 1735 return; 1736 } ··· 1755 1756 if (rw & WRITE) 1757 rwbs[i++] = 'W'; 1758 + else if (rw & REQ_DISCARD) 1759 rwbs[i++] = 'D'; 1760 else if (bytes) 1761 rwbs[i++] = 'R'; 1762 else 1763 rwbs[i++] = 'N'; 1764 1765 + if (rw & REQ_RAHEAD) 1766 rwbs[i++] = 'A'; 1767 + if (rw & REQ_HARDBARRIER) 1768 rwbs[i++] = 'B'; 1769 + if (rw & REQ_SYNC) 1770 rwbs[i++] = 'S'; 1771 + if (rw & REQ_META) 1772 rwbs[i++] = 'M'; 1773 1774 rwbs[i] = '\0'; ··· 1779 int rw = rq->cmd_flags & 0x03; 1780 int bytes; 1781 1782 + if (rq->cmd_flags & REQ_DISCARD) 1783 + rw |= REQ_DISCARD; 1784 1785 bytes = blk_rq_bytes(rq); 1786
+213 -238
mm/backing-dev.c
··· 10 #include <linux/module.h> 11 #include <linux/writeback.h> 12 #include <linux/device.h> 13 14 static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0); 15 ··· 50 static int bdi_sync_supers(void *); 51 static void sync_supers_timer_fn(unsigned long); 52 53 - static void bdi_add_default_flusher_task(struct backing_dev_info *bdi); 54 - 55 #ifdef CONFIG_DEBUG_FS 56 #include <linux/debugfs.h> 57 #include <linux/seq_file.h> ··· 64 static int bdi_debug_stats_show(struct seq_file *m, void *v) 65 { 66 struct backing_dev_info *bdi = m->private; 67 - struct bdi_writeback *wb; 68 unsigned long background_thresh; 69 unsigned long dirty_thresh; 70 unsigned long bdi_thresh; 71 unsigned long nr_dirty, nr_io, nr_more_io, nr_wb; 72 struct inode *inode; 73 74 - /* 75 - * inode lock is enough here, the bdi->wb_list is protected by 76 - * RCU on the reader side 77 - */ 78 nr_wb = nr_dirty = nr_io = nr_more_io = 0; 79 spin_lock(&inode_lock); 80 - list_for_each_entry(wb, &bdi->wb_list, list) { 81 - nr_wb++; 82 - list_for_each_entry(inode, &wb->b_dirty, i_list) 83 - nr_dirty++; 84 - list_for_each_entry(inode, &wb->b_io, i_list) 85 - nr_io++; 86 - list_for_each_entry(inode, &wb->b_more_io, i_list) 87 - nr_more_io++; 88 - } 89 spin_unlock(&inode_lock); 90 91 get_dirty_limits(&background_thresh, &dirty_thresh, &bdi_thresh, bdi); ··· 90 "BdiDirtyThresh: %8lu kB\n" 91 "DirtyThresh: %8lu kB\n" 92 "BackgroundThresh: %8lu kB\n" 93 - "WritebackThreads: %8lu\n" 94 "b_dirty: %8lu\n" 95 "b_io: %8lu\n" 96 "b_more_io: %8lu\n" 97 "bdi_list: %8u\n" 98 - "state: %8lx\n" 99 - "wb_list: %8u\n", 100 (unsigned long) K(bdi_stat(bdi, BDI_WRITEBACK)), 101 (unsigned long) K(bdi_stat(bdi, BDI_RECLAIMABLE)), 102 K(bdi_thresh), K(dirty_thresh), 103 - K(background_thresh), nr_wb, nr_dirty, nr_io, nr_more_io, 104 - !list_empty(&bdi->bdi_list), bdi->state, 105 - !list_empty(&bdi->wb_list)); 106 #undef K 107 108 return 0; ··· 236 sync_supers_tsk = kthread_run(bdi_sync_supers, NULL, "sync_supers"); 237 BUG_ON(IS_ERR(sync_supers_tsk)); 238 239 - init_timer(&sync_supers_timer); 240 setup_timer(&sync_supers_timer, sync_supers_timer_fn, 0); 241 bdi_arm_supers_timer(); 242 ··· 246 return err; 247 } 248 subsys_initcall(default_bdi_init); 249 - 250 - static void bdi_wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi) 251 - { 252 - memset(wb, 0, sizeof(*wb)); 253 - 254 - wb->bdi = bdi; 255 - wb->last_old_flush = jiffies; 256 - INIT_LIST_HEAD(&wb->b_dirty); 257 - INIT_LIST_HEAD(&wb->b_io); 258 - INIT_LIST_HEAD(&wb->b_more_io); 259 - } 260 - 261 - static void bdi_task_init(struct backing_dev_info *bdi, 262 - struct bdi_writeback *wb) 263 - { 264 - struct task_struct *tsk = current; 265 - 266 - spin_lock(&bdi->wb_lock); 267 - list_add_tail_rcu(&wb->list, &bdi->wb_list); 268 - spin_unlock(&bdi->wb_lock); 269 - 270 - tsk->flags |= PF_FLUSHER | PF_SWAPWRITE; 271 - set_freezable(); 272 - 273 - /* 274 - * Our parent may run at a different priority, just set us to normal 275 - */ 276 - set_user_nice(tsk, 0); 277 - } 278 - 279 - static int bdi_start_fn(void *ptr) 280 - { 281 - struct bdi_writeback *wb = ptr; 282 - struct backing_dev_info *bdi = wb->bdi; 283 - int ret; 284 - 285 - /* 286 - * Add us to the active bdi_list 287 - */ 288 - spin_lock_bh(&bdi_lock); 289 - list_add_rcu(&bdi->bdi_list, &bdi_list); 290 - spin_unlock_bh(&bdi_lock); 291 - 292 - bdi_task_init(bdi, wb); 293 - 294 - /* 295 - * Clear pending bit and wakeup anybody waiting to tear us down 296 - */ 297 - clear_bit(BDI_pending, &bdi->state); 298 - smp_mb__after_clear_bit(); 299 - wake_up_bit(&bdi->state, BDI_pending); 300 - 301 - ret = bdi_writeback_task(wb); 302 - 303 - /* 304 - * Remove us from the list 305 - */ 306 - spin_lock(&bdi->wb_lock); 307 - list_del_rcu(&wb->list); 308 - spin_unlock(&bdi->wb_lock); 309 - 310 - /* 311 - * Flush any work that raced with us exiting. No new work 312 - * will be added, since this bdi isn't discoverable anymore. 313 - */ 314 - if (!list_empty(&bdi->work_list)) 315 - wb_do_writeback(wb, 1); 316 - 317 - wb->task = NULL; 318 - return ret; 319 - } 320 321 int bdi_has_dirty_io(struct backing_dev_info *bdi) 322 { ··· 265 } 266 267 /* 268 - * kupdated() used to do this. We cannot do it from the bdi_forker_task() 269 * or we risk deadlocking on ->s_umount. The longer term solution would be 270 * to implement sync_supers_bdi() or similar and simply do it from the 271 - * bdi writeback tasks individually. 272 */ 273 static int bdi_sync_supers(void *unused) 274 { ··· 304 bdi_arm_supers_timer(); 305 } 306 307 - static int bdi_forker_task(void *ptr) 308 { 309 struct bdi_writeback *me = ptr; 310 311 - bdi_task_init(me->bdi, me); 312 313 for (;;) { 314 - struct backing_dev_info *bdi, *tmp; 315 - struct bdi_writeback *wb; 316 317 /* 318 * Temporary measure, we want to make sure we don't see 319 * dirty data on the default backing_dev_info 320 */ 321 - if (wb_has_dirty_io(me) || !list_empty(&me->bdi->work_list)) 322 wb_do_writeback(me, 0); 323 - 324 - spin_lock_bh(&bdi_lock); 325 - 326 - /* 327 - * Check if any existing bdi's have dirty data without 328 - * a thread registered. If so, set that up. 329 - */ 330 - list_for_each_entry_safe(bdi, tmp, &bdi_list, bdi_list) { 331 - if (bdi->wb.task) 332 - continue; 333 - if (list_empty(&bdi->work_list) && 334 - !bdi_has_dirty_io(bdi)) 335 - continue; 336 - 337 - bdi_add_default_flusher_task(bdi); 338 } 339 340 set_current_state(TASK_INTERRUPTIBLE); 341 342 - if (list_empty(&bdi_pending_list)) { 343 - unsigned long wait; 344 345 - spin_unlock_bh(&bdi_lock); 346 - wait = msecs_to_jiffies(dirty_writeback_interval * 10); 347 - if (wait) 348 - schedule_timeout(wait); 349 else 350 - schedule(); 351 try_to_freeze(); 352 continue; 353 } 354 355 - __set_current_state(TASK_RUNNING); 356 - 357 /* 358 - * This is our real job - check for pending entries in 359 - * bdi_pending_list, and create the tasks that got added 360 */ 361 - bdi = list_entry(bdi_pending_list.next, struct backing_dev_info, 362 - bdi_list); 363 - list_del_init(&bdi->bdi_list); 364 - spin_unlock_bh(&bdi_lock); 365 - 366 - wb = &bdi->wb; 367 - wb->task = kthread_run(bdi_start_fn, wb, "flush-%s", 368 - dev_name(bdi->dev)); 369 - /* 370 - * If task creation fails, then readd the bdi to 371 - * the pending list and force writeout of the bdi 372 - * from this forker thread. That will free some memory 373 - * and we can try again. 374 - */ 375 - if (IS_ERR(wb->task)) { 376 - wb->task = NULL; 377 - 378 - /* 379 - * Add this 'bdi' to the back, so we get 380 - * a chance to flush other bdi's to free 381 - * memory. 382 - */ 383 - spin_lock_bh(&bdi_lock); 384 - list_add_tail(&bdi->bdi_list, &bdi_pending_list); 385 - spin_unlock_bh(&bdi_lock); 386 - 387 - bdi_flush_io(bdi); 388 - } 389 } 390 391 return 0; 392 - } 393 - 394 - static void bdi_add_to_pending(struct rcu_head *head) 395 - { 396 - struct backing_dev_info *bdi; 397 - 398 - bdi = container_of(head, struct backing_dev_info, rcu_head); 399 - INIT_LIST_HEAD(&bdi->bdi_list); 400 - 401 - spin_lock(&bdi_lock); 402 - list_add_tail(&bdi->bdi_list, &bdi_pending_list); 403 - spin_unlock(&bdi_lock); 404 - 405 - /* 406 - * We are now on the pending list, wake up bdi_forker_task() 407 - * to finish the job and add us back to the active bdi_list 408 - */ 409 - wake_up_process(default_backing_dev_info.wb.task); 410 - } 411 - 412 - /* 413 - * Add the default flusher task that gets created for any bdi 414 - * that has dirty data pending writeout 415 - */ 416 - void static bdi_add_default_flusher_task(struct backing_dev_info *bdi) 417 - { 418 - if (!bdi_cap_writeback_dirty(bdi)) 419 - return; 420 - 421 - if (WARN_ON(!test_bit(BDI_registered, &bdi->state))) { 422 - printk(KERN_ERR "bdi %p/%s is not registered!\n", 423 - bdi, bdi->name); 424 - return; 425 - } 426 - 427 - /* 428 - * Check with the helper whether to proceed adding a task. Will only 429 - * abort if we two or more simultanous calls to 430 - * bdi_add_default_flusher_task() occured, further additions will block 431 - * waiting for previous additions to finish. 432 - */ 433 - if (!test_and_set_bit(BDI_pending, &bdi->state)) { 434 - list_del_rcu(&bdi->bdi_list); 435 - 436 - /* 437 - * We must wait for the current RCU period to end before 438 - * moving to the pending list. So schedule that operation 439 - * from an RCU callback. 440 - */ 441 - call_rcu(&bdi->rcu_head, bdi_add_to_pending); 442 - } 443 } 444 445 /* ··· 512 const char *fmt, ...) 513 { 514 va_list args; 515 - int ret = 0; 516 struct device *dev; 517 518 if (bdi->dev) /* The driver needs to use separate queues per device */ 519 - goto exit; 520 521 va_start(args, fmt); 522 dev = device_create_vargs(bdi_class, parent, MKDEV(0, 0), bdi, fmt, args); 523 va_end(args); 524 - if (IS_ERR(dev)) { 525 - ret = PTR_ERR(dev); 526 - goto exit; 527 - } 528 - 529 - spin_lock_bh(&bdi_lock); 530 - list_add_tail_rcu(&bdi->bdi_list, &bdi_list); 531 - spin_unlock_bh(&bdi_lock); 532 533 bdi->dev = dev; 534 ··· 533 if (bdi_cap_flush_forker(bdi)) { 534 struct bdi_writeback *wb = &bdi->wb; 535 536 - wb->task = kthread_run(bdi_forker_task, wb, "bdi-%s", 537 dev_name(dev)); 538 - if (IS_ERR(wb->task)) { 539 - wb->task = NULL; 540 - ret = -ENOMEM; 541 - 542 - bdi_remove_from_list(bdi); 543 - goto exit; 544 - } 545 } 546 547 bdi_debug_register(bdi, dev_name(dev)); 548 set_bit(BDI_registered, &bdi->state); 549 - exit: 550 - return ret; 551 } 552 EXPORT_SYMBOL(bdi_register); 553 ··· 562 */ 563 static void bdi_wb_shutdown(struct backing_dev_info *bdi) 564 { 565 - struct bdi_writeback *wb; 566 - 567 if (!bdi_cap_writeback_dirty(bdi)) 568 return; 569 570 /* 571 * If setup is pending, wait for that to complete first ··· 577 TASK_UNINTERRUPTIBLE); 578 579 /* 580 - * Make sure nobody finds us on the bdi_list anymore 581 - */ 582 - bdi_remove_from_list(bdi); 583 - 584 - /* 585 - * Finally, kill the kernel threads. We don't need to be RCU 586 * safe anymore, since the bdi is gone from visibility. Force 587 * unfreeze of the thread before calling kthread_stop(), otherwise 588 * it would never exet if it is currently stuck in the refrigerator. 589 */ 590 - list_for_each_entry(wb, &bdi->wb_list, list) { 591 - thaw_process(wb->task); 592 - kthread_stop(wb->task); 593 } 594 } 595 ··· 606 void bdi_unregister(struct backing_dev_info *bdi) 607 { 608 if (bdi->dev) { 609 bdi_prune_sb(bdi); 610 611 if (!bdi_cap_flush_forker(bdi)) 612 bdi_wb_shutdown(bdi); ··· 618 } 619 } 620 EXPORT_SYMBOL(bdi_unregister); 621 622 int bdi_init(struct backing_dev_info *bdi) 623 { ··· 642 bdi->max_prop_frac = PROP_FRAC_BASE; 643 spin_lock_init(&bdi->wb_lock); 644 INIT_LIST_HEAD(&bdi->bdi_list); 645 - INIT_LIST_HEAD(&bdi->wb_list); 646 INIT_LIST_HEAD(&bdi->work_list); 647 648 bdi_wb_init(&bdi->wb, bdi);
··· 10 #include <linux/module.h> 11 #include <linux/writeback.h> 12 #include <linux/device.h> 13 + #include <trace/events/writeback.h> 14 15 static atomic_long_t bdi_seq = ATOMIC_LONG_INIT(0); 16 ··· 49 static int bdi_sync_supers(void *); 50 static void sync_supers_timer_fn(unsigned long); 51 52 #ifdef CONFIG_DEBUG_FS 53 #include <linux/debugfs.h> 54 #include <linux/seq_file.h> ··· 65 static int bdi_debug_stats_show(struct seq_file *m, void *v) 66 { 67 struct backing_dev_info *bdi = m->private; 68 + struct bdi_writeback *wb = &bdi->wb; 69 unsigned long background_thresh; 70 unsigned long dirty_thresh; 71 unsigned long bdi_thresh; 72 unsigned long nr_dirty, nr_io, nr_more_io, nr_wb; 73 struct inode *inode; 74 75 nr_wb = nr_dirty = nr_io = nr_more_io = 0; 76 spin_lock(&inode_lock); 77 + list_for_each_entry(inode, &wb->b_dirty, i_list) 78 + nr_dirty++; 79 + list_for_each_entry(inode, &wb->b_io, i_list) 80 + nr_io++; 81 + list_for_each_entry(inode, &wb->b_more_io, i_list) 82 + nr_more_io++; 83 spin_unlock(&inode_lock); 84 85 get_dirty_limits(&background_thresh, &dirty_thresh, &bdi_thresh, bdi); ··· 98 "BdiDirtyThresh: %8lu kB\n" 99 "DirtyThresh: %8lu kB\n" 100 "BackgroundThresh: %8lu kB\n" 101 "b_dirty: %8lu\n" 102 "b_io: %8lu\n" 103 "b_more_io: %8lu\n" 104 "bdi_list: %8u\n" 105 + "state: %8lx\n", 106 (unsigned long) K(bdi_stat(bdi, BDI_WRITEBACK)), 107 (unsigned long) K(bdi_stat(bdi, BDI_RECLAIMABLE)), 108 K(bdi_thresh), K(dirty_thresh), 109 + K(background_thresh), nr_dirty, nr_io, nr_more_io, 110 + !list_empty(&bdi->bdi_list), bdi->state); 111 #undef K 112 113 return 0; ··· 247 sync_supers_tsk = kthread_run(bdi_sync_supers, NULL, "sync_supers"); 248 BUG_ON(IS_ERR(sync_supers_tsk)); 249 250 setup_timer(&sync_supers_timer, sync_supers_timer_fn, 0); 251 bdi_arm_supers_timer(); 252 ··· 258 return err; 259 } 260 subsys_initcall(default_bdi_init); 261 262 int bdi_has_dirty_io(struct backing_dev_info *bdi) 263 { ··· 348 } 349 350 /* 351 + * kupdated() used to do this. We cannot do it from the bdi_forker_thread() 352 * or we risk deadlocking on ->s_umount. The longer term solution would be 353 * to implement sync_supers_bdi() or similar and simply do it from the 354 + * bdi writeback thread individually. 355 */ 356 static int bdi_sync_supers(void *unused) 357 { ··· 387 bdi_arm_supers_timer(); 388 } 389 390 + static void wakeup_timer_fn(unsigned long data) 391 + { 392 + struct backing_dev_info *bdi = (struct backing_dev_info *)data; 393 + 394 + spin_lock_bh(&bdi->wb_lock); 395 + if (bdi->wb.task) { 396 + trace_writeback_wake_thread(bdi); 397 + wake_up_process(bdi->wb.task); 398 + } else { 399 + /* 400 + * When bdi tasks are inactive for long time, they are killed. 401 + * In this case we have to wake-up the forker thread which 402 + * should create and run the bdi thread. 403 + */ 404 + trace_writeback_wake_forker_thread(bdi); 405 + wake_up_process(default_backing_dev_info.wb.task); 406 + } 407 + spin_unlock_bh(&bdi->wb_lock); 408 + } 409 + 410 + /* 411 + * This function is used when the first inode for this bdi is marked dirty. It 412 + * wakes-up the corresponding bdi thread which should then take care of the 413 + * periodic background write-out of dirty inodes. Since the write-out would 414 + * starts only 'dirty_writeback_interval' centisecs from now anyway, we just 415 + * set up a timer which wakes the bdi thread up later. 416 + * 417 + * Note, we wouldn't bother setting up the timer, but this function is on the 418 + * fast-path (used by '__mark_inode_dirty()'), so we save few context switches 419 + * by delaying the wake-up. 420 + */ 421 + void bdi_wakeup_thread_delayed(struct backing_dev_info *bdi) 422 + { 423 + unsigned long timeout; 424 + 425 + timeout = msecs_to_jiffies(dirty_writeback_interval * 10); 426 + mod_timer(&bdi->wb.wakeup_timer, jiffies + timeout); 427 + } 428 + 429 + /* 430 + * Calculate the longest interval (jiffies) bdi threads are allowed to be 431 + * inactive. 432 + */ 433 + static unsigned long bdi_longest_inactive(void) 434 + { 435 + unsigned long interval; 436 + 437 + interval = msecs_to_jiffies(dirty_writeback_interval * 10); 438 + return max(5UL * 60 * HZ, interval); 439 + } 440 + 441 + static int bdi_forker_thread(void *ptr) 442 { 443 struct bdi_writeback *me = ptr; 444 445 + current->flags |= PF_FLUSHER | PF_SWAPWRITE; 446 + set_freezable(); 447 + 448 + /* 449 + * Our parent may run at a different priority, just set us to normal 450 + */ 451 + set_user_nice(current, 0); 452 453 for (;;) { 454 + struct task_struct *task = NULL; 455 + struct backing_dev_info *bdi; 456 + enum { 457 + NO_ACTION, /* Nothing to do */ 458 + FORK_THREAD, /* Fork bdi thread */ 459 + KILL_THREAD, /* Kill inactive bdi thread */ 460 + } action = NO_ACTION; 461 462 /* 463 * Temporary measure, we want to make sure we don't see 464 * dirty data on the default backing_dev_info 465 */ 466 + if (wb_has_dirty_io(me) || !list_empty(&me->bdi->work_list)) { 467 + del_timer(&me->wakeup_timer); 468 wb_do_writeback(me, 0); 469 } 470 471 + spin_lock_bh(&bdi_lock); 472 set_current_state(TASK_INTERRUPTIBLE); 473 474 + list_for_each_entry(bdi, &bdi_list, bdi_list) { 475 + bool have_dirty_io; 476 477 + if (!bdi_cap_writeback_dirty(bdi) || 478 + bdi_cap_flush_forker(bdi)) 479 + continue; 480 + 481 + WARN(!test_bit(BDI_registered, &bdi->state), 482 + "bdi %p/%s is not registered!\n", bdi, bdi->name); 483 + 484 + have_dirty_io = !list_empty(&bdi->work_list) || 485 + wb_has_dirty_io(&bdi->wb); 486 + 487 + /* 488 + * If the bdi has work to do, but the thread does not 489 + * exist - create it. 490 + */ 491 + if (!bdi->wb.task && have_dirty_io) { 492 + /* 493 + * Set the pending bit - if someone will try to 494 + * unregister this bdi - it'll wait on this bit. 495 + */ 496 + set_bit(BDI_pending, &bdi->state); 497 + action = FORK_THREAD; 498 + break; 499 + } 500 + 501 + spin_lock(&bdi->wb_lock); 502 + 503 + /* 504 + * If there is no work to do and the bdi thread was 505 + * inactive long enough - kill it. The wb_lock is taken 506 + * to make sure no-one adds more work to this bdi and 507 + * wakes the bdi thread up. 508 + */ 509 + if (bdi->wb.task && !have_dirty_io && 510 + time_after(jiffies, bdi->wb.last_active + 511 + bdi_longest_inactive())) { 512 + task = bdi->wb.task; 513 + bdi->wb.task = NULL; 514 + spin_unlock(&bdi->wb_lock); 515 + set_bit(BDI_pending, &bdi->state); 516 + action = KILL_THREAD; 517 + break; 518 + } 519 + spin_unlock(&bdi->wb_lock); 520 + } 521 + spin_unlock_bh(&bdi_lock); 522 + 523 + /* Keep working if default bdi still has things to do */ 524 + if (!list_empty(&me->bdi->work_list)) 525 + __set_current_state(TASK_RUNNING); 526 + 527 + switch (action) { 528 + case FORK_THREAD: 529 + __set_current_state(TASK_RUNNING); 530 + task = kthread_run(bdi_writeback_thread, &bdi->wb, "flush-%s", 531 + dev_name(bdi->dev)); 532 + if (IS_ERR(task)) { 533 + /* 534 + * If thread creation fails, force writeout of 535 + * the bdi from the thread. 536 + */ 537 + bdi_flush_io(bdi); 538 + } else { 539 + /* 540 + * The spinlock makes sure we do not lose 541 + * wake-ups when racing with 'bdi_queue_work()'. 542 + */ 543 + spin_lock_bh(&bdi->wb_lock); 544 + bdi->wb.task = task; 545 + spin_unlock_bh(&bdi->wb_lock); 546 + } 547 + break; 548 + 549 + case KILL_THREAD: 550 + __set_current_state(TASK_RUNNING); 551 + kthread_stop(task); 552 + break; 553 + 554 + case NO_ACTION: 555 + if (!wb_has_dirty_io(me) || !dirty_writeback_interval) 556 + /* 557 + * There are no dirty data. The only thing we 558 + * should now care about is checking for 559 + * inactive bdi threads and killing them. Thus, 560 + * let's sleep for longer time, save energy and 561 + * be friendly for battery-driven devices. 562 + */ 563 + schedule_timeout(bdi_longest_inactive()); 564 else 565 + schedule_timeout(msecs_to_jiffies(dirty_writeback_interval * 10)); 566 try_to_freeze(); 567 + /* Back to the main loop */ 568 continue; 569 } 570 571 /* 572 + * Clear pending bit and wakeup anybody waiting to tear us down. 573 */ 574 + clear_bit(BDI_pending, &bdi->state); 575 + smp_mb__after_clear_bit(); 576 + wake_up_bit(&bdi->state, BDI_pending); 577 } 578 579 return 0; 580 } 581 582 /* ··· 541 const char *fmt, ...) 542 { 543 va_list args; 544 struct device *dev; 545 546 if (bdi->dev) /* The driver needs to use separate queues per device */ 547 + return 0; 548 549 va_start(args, fmt); 550 dev = device_create_vargs(bdi_class, parent, MKDEV(0, 0), bdi, fmt, args); 551 va_end(args); 552 + if (IS_ERR(dev)) 553 + return PTR_ERR(dev); 554 555 bdi->dev = dev; 556 ··· 569 if (bdi_cap_flush_forker(bdi)) { 570 struct bdi_writeback *wb = &bdi->wb; 571 572 + wb->task = kthread_run(bdi_forker_thread, wb, "bdi-%s", 573 dev_name(dev)); 574 + if (IS_ERR(wb->task)) 575 + return PTR_ERR(wb->task); 576 } 577 578 bdi_debug_register(bdi, dev_name(dev)); 579 set_bit(BDI_registered, &bdi->state); 580 + 581 + spin_lock_bh(&bdi_lock); 582 + list_add_tail_rcu(&bdi->bdi_list, &bdi_list); 583 + spin_unlock_bh(&bdi_lock); 584 + 585 + trace_writeback_bdi_register(bdi); 586 + return 0; 587 } 588 EXPORT_SYMBOL(bdi_register); 589 ··· 598 */ 599 static void bdi_wb_shutdown(struct backing_dev_info *bdi) 600 { 601 if (!bdi_cap_writeback_dirty(bdi)) 602 return; 603 + 604 + /* 605 + * Make sure nobody finds us on the bdi_list anymore 606 + */ 607 + bdi_remove_from_list(bdi); 608 609 /* 610 * If setup is pending, wait for that to complete first ··· 610 TASK_UNINTERRUPTIBLE); 611 612 /* 613 + * Finally, kill the kernel thread. We don't need to be RCU 614 * safe anymore, since the bdi is gone from visibility. Force 615 * unfreeze of the thread before calling kthread_stop(), otherwise 616 * it would never exet if it is currently stuck in the refrigerator. 617 */ 618 + if (bdi->wb.task) { 619 + thaw_process(bdi->wb.task); 620 + kthread_stop(bdi->wb.task); 621 } 622 } 623 ··· 644 void bdi_unregister(struct backing_dev_info *bdi) 645 { 646 if (bdi->dev) { 647 + trace_writeback_bdi_unregister(bdi); 648 bdi_prune_sb(bdi); 649 + del_timer_sync(&bdi->wb.wakeup_timer); 650 651 if (!bdi_cap_flush_forker(bdi)) 652 bdi_wb_shutdown(bdi); ··· 654 } 655 } 656 EXPORT_SYMBOL(bdi_unregister); 657 + 658 + static void bdi_wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi) 659 + { 660 + memset(wb, 0, sizeof(*wb)); 661 + 662 + wb->bdi = bdi; 663 + wb->last_old_flush = jiffies; 664 + INIT_LIST_HEAD(&wb->b_dirty); 665 + INIT_LIST_HEAD(&wb->b_io); 666 + INIT_LIST_HEAD(&wb->b_more_io); 667 + setup_timer(&wb->wakeup_timer, wakeup_timer_fn, (unsigned long)bdi); 668 + } 669 670 int bdi_init(struct backing_dev_info *bdi) 671 { ··· 666 bdi->max_prop_frac = PROP_FRAC_BASE; 667 spin_lock_init(&bdi->wb_lock); 668 INIT_LIST_HEAD(&bdi->bdi_list); 669 INIT_LIST_HEAD(&bdi->work_list); 670 671 bdi_wb_init(&bdi->wb, bdi);
+5
mm/page-writeback.c
··· 34 #include <linux/syscalls.h> 35 #include <linux/buffer_head.h> 36 #include <linux/pagevec.h> 37 38 /* 39 * After a CPU has dirtied this many pages, balance_dirty_pages_ratelimited ··· 536 * threshold otherwise wait until the disk writes catch 537 * up. 538 */ 539 if (bdi_nr_reclaimable > bdi_thresh) { 540 writeback_inodes_wb(&bdi->wb, &wbc); 541 pages_written += write_chunk - wbc.nr_to_write; 542 get_dirty_limits(&background_thresh, &dirty_thresh, 543 &bdi_thresh, bdi); 544 } 545 546 /* ··· 568 if (pages_written >= write_chunk) 569 break; /* We've done our duty */ 570 571 __set_current_state(TASK_INTERRUPTIBLE); 572 io_schedule_timeout(pause); 573 ··· 966 if (!clear_page_dirty_for_io(page)) 967 goto continue_unlock; 968 969 ret = (*writepage)(page, wbc, data); 970 if (unlikely(ret)) { 971 if (ret == AOP_WRITEPAGE_ACTIVATE) {
··· 34 #include <linux/syscalls.h> 35 #include <linux/buffer_head.h> 36 #include <linux/pagevec.h> 37 + #include <trace/events/writeback.h> 38 39 /* 40 * After a CPU has dirtied this many pages, balance_dirty_pages_ratelimited ··· 535 * threshold otherwise wait until the disk writes catch 536 * up. 537 */ 538 + trace_wbc_balance_dirty_start(&wbc, bdi); 539 if (bdi_nr_reclaimable > bdi_thresh) { 540 writeback_inodes_wb(&bdi->wb, &wbc); 541 pages_written += write_chunk - wbc.nr_to_write; 542 get_dirty_limits(&background_thresh, &dirty_thresh, 543 &bdi_thresh, bdi); 544 + trace_wbc_balance_dirty_written(&wbc, bdi); 545 } 546 547 /* ··· 565 if (pages_written >= write_chunk) 566 break; /* We've done our duty */ 567 568 + trace_wbc_balance_dirty_wait(&wbc, bdi); 569 __set_current_state(TASK_INTERRUPTIBLE); 570 io_schedule_timeout(pause); 571 ··· 962 if (!clear_page_dirty_for_io(page)) 963 goto continue_unlock; 964 965 + trace_wbc_writepage(wbc, mapping->backing_dev_info); 966 ret = (*writepage)(page, wbc, data); 967 if (unlikely(ret)) { 968 if (ret == AOP_WRITEPAGE_ACTIVATE) {
+1 -1
mm/page_io.c
··· 106 goto out; 107 } 108 if (wbc->sync_mode == WB_SYNC_ALL) 109 - rw |= (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG); 110 count_vm_event(PSWPOUT); 111 set_page_writeback(page); 112 unlock_page(page);
··· 106 goto out; 107 } 108 if (wbc->sync_mode == WB_SYNC_ALL) 109 + rw |= REQ_SYNC | REQ_UNPLUG; 110 count_vm_event(PSWPOUT); 111 set_page_writeback(page); 112 unlock_page(page);