Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'for-linus' of git://git.kernel.dk/linux-2.6-block

* 'for-linus' of git://git.kernel.dk/linux-2.6-block:
block: fix blkdev_issue_flush() not detecting and passing EOPNOTSUPP back
block: fix shadowed variable warning in blk-map.c
block: remove extern on function definition
cciss: remove READ_AHEAD define and use block layer defaults
make cdrom.c:check_for_audio_disc() static
block/genhd.c: proper externs
unexport blk_rq_map_user_iov
unexport blk_{get,put}_queue
block/genhd.c: cleanups
proper prototype for blk_dev_init()
block/blk-tag.c should #include "blk.h"
Fix DMA access of block device in 64-bit kernel on some non-x86 systems with 4GB or upper 4GB memory
block: separate out padding from alignment
block: restore the meaning of rq->data_len to the true data length
resubmit: cciss: procfs updates to display info about many
splice: only return -EAGAIN if there's hope of more data
block: fix kernel-docbook parameters and files

+247 -155
+2
Documentation/DocBook/kernel-api.tmpl
··· 361 361 <chapter id="blkdev"> 362 362 <title>Block Devices</title> 363 363 !Eblock/blk-core.c 364 + !Iblock/blk-core.c 364 365 !Eblock/blk-map.c 365 366 !Iblock/blk-sysfs.c 366 367 !Eblock/blk-settings.c 367 368 !Eblock/blk-exec.c 368 369 !Eblock/blk-barrier.c 369 370 !Eblock/blk-tag.c 371 + !Iblock/blk-tag.c 370 372 </chapter> 371 373 372 374 <chapter id="chrdev">
+7 -2
block/blk-barrier.c
··· 259 259 260 260 static void bio_end_empty_barrier(struct bio *bio, int err) 261 261 { 262 - if (err) 262 + if (err) { 263 + if (err == -EOPNOTSUPP) 264 + set_bit(BIO_EOPNOTSUPP, &bio->bi_flags); 263 265 clear_bit(BIO_UPTODATE, &bio->bi_flags); 266 + } 264 267 265 268 complete(bio->bi_private); 266 269 } ··· 312 309 *error_sector = bio->bi_sector; 313 310 314 311 ret = 0; 315 - if (!bio_flagged(bio, BIO_UPTODATE)) 312 + if (bio_flagged(bio, BIO_EOPNOTSUPP)) 313 + ret = -EOPNOTSUPP; 314 + else if (!bio_flagged(bio, BIO_UPTODATE)) 316 315 ret = -EIO; 317 316 318 317 bio_put(bio);
+3 -4
block/blk-core.c
··· 127 127 rq->nr_hw_segments = 0; 128 128 rq->ioprio = 0; 129 129 rq->special = NULL; 130 - rq->raw_data_len = 0; 131 130 rq->buffer = NULL; 132 131 rq->tag = -1; 133 132 rq->errors = 0; ··· 134 135 rq->cmd_len = 0; 135 136 memset(rq->cmd, 0, sizeof(rq->cmd)); 136 137 rq->data_len = 0; 138 + rq->extra_len = 0; 137 139 rq->sense_len = 0; 138 140 rq->data = NULL; 139 141 rq->sense = NULL; ··· 424 424 { 425 425 kobject_put(&q->kobj); 426 426 } 427 - EXPORT_SYMBOL(blk_put_queue); 428 427 429 428 void blk_cleanup_queue(struct request_queue *q) 430 429 { ··· 591 592 592 593 return 1; 593 594 } 594 - EXPORT_SYMBOL(blk_get_queue); 595 595 596 596 static inline void blk_free_request(struct request_queue *q, struct request *rq) 597 597 { ··· 1766 1768 1767 1769 /** 1768 1770 * blk_rq_bytes - Returns bytes left to complete in the entire request 1771 + * @rq: the request being processed 1769 1772 **/ 1770 1773 unsigned int blk_rq_bytes(struct request *rq) 1771 1774 { ··· 1779 1780 1780 1781 /** 1781 1782 * blk_rq_cur_bytes - Returns bytes left to complete in the current segment 1783 + * @rq: the request being processed 1782 1784 **/ 1783 1785 unsigned int blk_rq_cur_bytes(struct request *rq) 1784 1786 { ··· 2016 2016 rq->hard_cur_sectors = rq->current_nr_sectors; 2017 2017 rq->hard_nr_sectors = rq->nr_sectors = bio_sectors(bio); 2018 2018 rq->buffer = bio_data(bio); 2019 - rq->raw_data_len = bio->bi_size; 2020 2019 rq->data_len = bio->bi_size; 2021 2020 2022 2021 rq->bio = rq->biotail = bio;
+14 -13
block/blk-map.c
··· 19 19 rq->biotail->bi_next = bio; 20 20 rq->biotail = bio; 21 21 22 - rq->raw_data_len += bio->bi_size; 23 22 rq->data_len += bio->bi_size; 24 23 } 25 24 return 0; ··· 43 44 void __user *ubuf, unsigned int len) 44 45 { 45 46 unsigned long uaddr; 47 + unsigned int alignment; 46 48 struct bio *bio, *orig_bio; 47 49 int reading, ret; 48 50 ··· 54 54 * direct dma. else, set up kernel bounce buffers 55 55 */ 56 56 uaddr = (unsigned long) ubuf; 57 - if (!(uaddr & queue_dma_alignment(q)) && 58 - !(len & queue_dma_alignment(q))) 57 + alignment = queue_dma_alignment(q) | q->dma_pad_mask; 58 + if (!(uaddr & alignment) && !(len & alignment)) 59 59 bio = bio_map_user(q, NULL, uaddr, len, reading); 60 60 else 61 61 bio = bio_copy_user(q, uaddr, len, reading); ··· 142 142 143 143 /* 144 144 * __blk_rq_map_user() copies the buffers if starting address 145 - * or length isn't aligned. As the copied buffer is always 146 - * page aligned, we know that there's enough room for padding. 147 - * Extend the last bio and update rq->data_len accordingly. 145 + * or length isn't aligned to dma_pad_mask. As the copied 146 + * buffer is always page aligned, we know that there's enough 147 + * room for padding. Extend the last bio and update 148 + * rq->data_len accordingly. 148 149 * 149 150 * On unmap, bio_uncopy_user() will use unmodified 150 151 * bio_map_data pointed to by bio->bi_private. 151 152 */ 152 - if (len & queue_dma_alignment(q)) { 153 - unsigned int pad_len = (queue_dma_alignment(q) & ~len) + 1; 154 - struct bio *bio = rq->biotail; 153 + if (len & q->dma_pad_mask) { 154 + unsigned int pad_len = (q->dma_pad_mask & ~len) + 1; 155 + struct bio *tail = rq->biotail; 155 156 156 - bio->bi_io_vec[bio->bi_vcnt - 1].bv_len += pad_len; 157 - bio->bi_size += pad_len; 158 - rq->data_len += pad_len; 157 + tail->bi_io_vec[tail->bi_vcnt - 1].bv_len += pad_len; 158 + tail->bi_size += pad_len; 159 + 160 + rq->extra_len += pad_len; 159 161 } 160 162 161 163 rq->buffer = rq->data = NULL; ··· 217 215 rq->buffer = rq->data = NULL; 218 216 return 0; 219 217 } 220 - EXPORT_SYMBOL(blk_rq_map_user_iov); 221 218 222 219 /** 223 220 * blk_rq_unmap_user - unmap a request with user data
+1 -1
block/blk-merge.c
··· 231 231 ((unsigned long)q->dma_drain_buffer) & 232 232 (PAGE_SIZE - 1)); 233 233 nsegs++; 234 - rq->data_len += q->dma_drain_size; 234 + rq->extra_len += q->dma_drain_size; 235 235 } 236 236 237 237 if (sg)
+19 -3
block/blk-settings.c
··· 140 140 /* Assume anything <= 4GB can be handled by IOMMU. 141 141 Actually some IOMMUs can handle everything, but I don't 142 142 know of a way to test this here. */ 143 - if (b_pfn < (min_t(u64, 0xffffffff, BLK_BOUNCE_HIGH) >> PAGE_SHIFT)) 143 + if (b_pfn <= (min_t(u64, 0xffffffff, BLK_BOUNCE_HIGH) >> PAGE_SHIFT)) 144 144 dma = 1; 145 145 q->bounce_pfn = max_low_pfn; 146 146 #else ··· 293 293 EXPORT_SYMBOL(blk_queue_stack_limits); 294 294 295 295 /** 296 - * blk_queue_dma_drain - Set up a drain buffer for excess dma. 296 + * blk_queue_dma_pad - set pad mask 297 + * @q: the request queue for the device 298 + * @mask: pad mask 297 299 * 300 + * Set pad mask. Direct IO requests are padded to the mask specified. 301 + * 302 + * Appending pad buffer to a request modifies ->data_len such that it 303 + * includes the pad buffer. The original requested data length can be 304 + * obtained using blk_rq_raw_data_len(). 305 + **/ 306 + void blk_queue_dma_pad(struct request_queue *q, unsigned int mask) 307 + { 308 + q->dma_pad_mask = mask; 309 + } 310 + EXPORT_SYMBOL(blk_queue_dma_pad); 311 + 312 + /** 313 + * blk_queue_dma_drain - Set up a drain buffer for excess dma. 298 314 * @q: the request queue for the device 299 315 * @dma_drain_needed: fn which returns non-zero if drain is necessary 300 316 * @buf: physically contiguous buffer ··· 332 316 * device can support otherwise there won't be room for the drain 333 317 * buffer. 334 318 */ 335 - extern int blk_queue_dma_drain(struct request_queue *q, 319 + int blk_queue_dma_drain(struct request_queue *q, 336 320 dma_drain_needed_fn *dma_drain_needed, 337 321 void *buf, unsigned int size) 338 322 {
+2
block/blk-tag.c
··· 6 6 #include <linux/bio.h> 7 7 #include <linux/blkdev.h> 8 8 9 + #include "blk.h" 10 + 9 11 /** 10 12 * blk_queue_find_tag - find a request by its tag and queue 11 13 * @q: The request queue for the device
+2
block/blk.h
··· 32 32 33 33 void blk_queue_congestion_threshold(struct request_queue *q); 34 34 35 + int blk_dev_init(void); 36 + 35 37 /* 36 38 * Return the threshold (number of used requests) at which the queue is 37 39 * considered to be congested. It include a little hysteresis to keep the
+4 -4
block/bsg.c
··· 437 437 } 438 438 439 439 if (rq->next_rq) { 440 - hdr->dout_resid = rq->raw_data_len; 441 - hdr->din_resid = rq->next_rq->raw_data_len; 440 + hdr->dout_resid = rq->data_len; 441 + hdr->din_resid = rq->next_rq->data_len; 442 442 blk_rq_unmap_user(bidi_bio); 443 443 blk_put_request(rq->next_rq); 444 444 } else if (rq_data_dir(rq) == READ) 445 - hdr->din_resid = rq->raw_data_len; 445 + hdr->din_resid = rq->data_len; 446 446 else 447 - hdr->dout_resid = rq->raw_data_len; 447 + hdr->dout_resid = rq->data_len; 448 448 449 449 /* 450 450 * If the request generated a negative error number, return it
+7 -3
block/genhd.c
··· 17 17 #include <linux/buffer_head.h> 18 18 #include <linux/mutex.h> 19 19 20 + #include "blk.h" 21 + 20 22 static DEFINE_MUTEX(block_class_lock); 21 23 #ifndef CONFIG_SYSFS_DEPRECATED 22 24 struct kobject *block_depr; 23 25 #endif 26 + 27 + static struct device_type disk_type; 24 28 25 29 /* 26 30 * Can be deleted altogether. Later. ··· 350 346 #endif 351 347 352 348 353 - extern int blk_dev_init(void); 354 - 355 349 static struct kobject *base_probe(dev_t devt, int *part, void *data) 356 350 { 357 351 if (request_module("block-major-%d-%d", MAJOR(devt), MINOR(devt)) > 0) ··· 504 502 .name = "block", 505 503 }; 506 504 507 - struct device_type disk_type = { 505 + static struct device_type disk_type = { 508 506 .name = "disk", 509 507 .groups = disk_attr_groups, 510 508 .release = disk_release, ··· 634 632 put_device(gd->driverfs_dev); 635 633 } 636 634 635 + #if 0 637 636 void genhd_media_change_notify(struct gendisk *disk) 638 637 { 639 638 get_device(disk->driverfs_dev); 640 639 schedule_work(&disk->async_notify); 641 640 } 642 641 EXPORT_SYMBOL_GPL(genhd_media_change_notify); 642 + #endif /* 0 */ 643 643 644 644 dev_t blk_lookup_devt(const char *name) 645 645 {
+2 -2
block/scsi_ioctl.c
··· 266 266 hdr->info = 0; 267 267 if (hdr->masked_status || hdr->host_status || hdr->driver_status) 268 268 hdr->info |= SG_INFO_CHECK; 269 - hdr->resid = rq->raw_data_len; 269 + hdr->resid = rq->data_len; 270 270 hdr->sb_len_wr = 0; 271 271 272 272 if (rq->sense_len && hdr->sbp) { ··· 528 528 rq = blk_get_request(q, WRITE, __GFP_WAIT); 529 529 rq->cmd_type = REQ_TYPE_BLOCK_PC; 530 530 rq->data = NULL; 531 - rq->raw_data_len = 0; 532 531 rq->data_len = 0; 532 + rq->extra_len = 0; 533 533 rq->timeout = BLK_DEFAULT_SG_TIMEOUT; 534 534 memset(rq->cmd, 0, sizeof(rq->cmd)); 535 535 rq->cmd[0] = cmd;
+5 -4
drivers/ata/libata-scsi.c
··· 862 862 struct request_queue *q = sdev->request_queue; 863 863 void *buf; 864 864 865 - /* set the min alignment */ 865 + /* set the min alignment and padding */ 866 866 blk_queue_update_dma_alignment(sdev->request_queue, 867 867 ATA_DMA_PAD_SZ - 1); 868 + blk_queue_dma_pad(sdev->request_queue, ATA_DMA_PAD_SZ - 1); 868 869 869 870 /* configure draining */ 870 871 buf = kmalloc(ATAPI_MAX_DRAIN, q->bounce_gfp | GFP_KERNEL); ··· 2539 2538 } 2540 2539 2541 2540 qc->tf.command = ATA_CMD_PACKET; 2542 - qc->nbytes = scsi_bufflen(scmd); 2541 + qc->nbytes = scsi_bufflen(scmd) + scmd->request->extra_len; 2543 2542 2544 2543 /* check whether ATAPI DMA is safe */ 2545 2544 if (!using_pio && ata_check_atapi_dma(qc)) ··· 2550 2549 * want to set it properly, and for DMA where it is 2551 2550 * effectively meaningless. 2552 2551 */ 2553 - nbytes = min(scmd->request->raw_data_len, (unsigned int)63 * 1024); 2552 + nbytes = min(scmd->request->data_len, (unsigned int)63 * 1024); 2554 2553 2555 2554 /* Most ATAPI devices which honor transfer chunk size don't 2556 2555 * behave according to the spec when odd chunk size which ··· 2876 2875 * TODO: find out if we need to do more here to 2877 2876 * cover scatter/gather case. 2878 2877 */ 2879 - qc->nbytes = scsi_bufflen(scmd); 2878 + qc->nbytes = scsi_bufflen(scmd) + scmd->request->extra_len; 2880 2879 2881 2880 /* request result TF and be quiet about device error */ 2882 2881 qc->flags |= ATA_QCFLAG_RESULT_TF | ATA_QCFLAG_QUIET;
+159 -101
drivers/block/cciss.c
··· 33 33 #include <linux/blkpg.h> 34 34 #include <linux/timer.h> 35 35 #include <linux/proc_fs.h> 36 + #include <linux/seq_file.h> 36 37 #include <linux/init.h> 37 38 #include <linux/hdreg.h> 38 39 #include <linux/spinlock.h> ··· 132 131 /*define how many times we will try a command because of bus resets */ 133 132 #define MAX_CMD_RETRIES 3 134 133 135 - #define READ_AHEAD 1024 136 134 #define MAX_CTLR 32 137 135 138 136 /* Originally cciss driver only supports 8 major numbers */ ··· 174 174 static void fail_all_cmds(unsigned long ctlr); 175 175 176 176 #ifdef CONFIG_PROC_FS 177 - static int cciss_proc_get_info(char *buffer, char **start, off_t offset, 178 - int length, int *eof, void *data); 179 177 static void cciss_procinit(int i); 180 178 #else 181 179 static void cciss_procinit(int i) ··· 238 240 */ 239 241 #define ENG_GIG 1000000000 240 242 #define ENG_GIG_FACTOR (ENG_GIG/512) 243 + #define ENGAGE_SCSI "engage scsi" 241 244 static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG", 242 245 "UNKNOWN" 243 246 }; 244 247 245 248 static struct proc_dir_entry *proc_cciss; 246 249 247 - static int cciss_proc_get_info(char *buffer, char **start, off_t offset, 248 - int length, int *eof, void *data) 250 + static void cciss_seq_show_header(struct seq_file *seq) 249 251 { 250 - off_t pos = 0; 251 - off_t len = 0; 252 - int size, i, ctlr; 253 - ctlr_info_t *h = (ctlr_info_t *) data; 254 - drive_info_struct *drv; 255 - unsigned long flags; 256 - sector_t vol_sz, vol_sz_frac; 252 + ctlr_info_t *h = seq->private; 257 253 258 - ctlr = h->ctlr; 254 + seq_printf(seq, "%s: HP %s Controller\n" 255 + "Board ID: 0x%08lx\n" 256 + "Firmware Version: %c%c%c%c\n" 257 + "IRQ: %d\n" 258 + "Logical drives: %d\n" 259 + "Current Q depth: %d\n" 260 + "Current # commands on controller: %d\n" 261 + "Max Q depth since init: %d\n" 262 + "Max # commands on controller since init: %d\n" 263 + "Max SG entries since init: %d\n", 264 + h->devname, 265 + h->product_name, 266 + (unsigned long)h->board_id, 267 + h->firm_ver[0], h->firm_ver[1], h->firm_ver[2], 268 + h->firm_ver[3], (unsigned int)h->intr[SIMPLE_MODE_INT], 269 + h->num_luns, 270 + h->Qdepth, h->commands_outstanding, 271 + h->maxQsinceinit, h->max_outstanding, h->maxSG); 272 + 273 + #ifdef CONFIG_CISS_SCSI_TAPE 274 + cciss_seq_tape_report(seq, h->ctlr); 275 + #endif /* CONFIG_CISS_SCSI_TAPE */ 276 + } 277 + 278 + static void *cciss_seq_start(struct seq_file *seq, loff_t *pos) 279 + { 280 + ctlr_info_t *h = seq->private; 281 + unsigned ctlr = h->ctlr; 282 + unsigned long flags; 259 283 260 284 /* prevent displaying bogus info during configuration 261 285 * or deconfiguration of a logical volume ··· 285 265 spin_lock_irqsave(CCISS_LOCK(ctlr), flags); 286 266 if (h->busy_configuring) { 287 267 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags); 288 - return -EBUSY; 268 + return ERR_PTR(-EBUSY); 289 269 } 290 270 h->busy_configuring = 1; 291 271 spin_unlock_irqrestore(CCISS_LOCK(ctlr), flags); 292 272 293 - size = sprintf(buffer, "%s: HP %s Controller\n" 294 - "Board ID: 0x%08lx\n" 295 - "Firmware Version: %c%c%c%c\n" 296 - "IRQ: %d\n" 297 - "Logical drives: %d\n" 298 - "Max sectors: %d\n" 299 - "Current Q depth: %d\n" 300 - "Current # commands on controller: %d\n" 301 - "Max Q depth since init: %d\n" 302 - "Max # commands on controller since init: %d\n" 303 - "Max SG entries since init: %d\n\n", 304 - h->devname, 305 - h->product_name, 306 - (unsigned long)h->board_id, 307 - h->firm_ver[0], h->firm_ver[1], h->firm_ver[2], 308 - h->firm_ver[3], (unsigned int)h->intr[SIMPLE_MODE_INT], 309 - h->num_luns, 310 - h->cciss_max_sectors, 311 - h->Qdepth, h->commands_outstanding, 312 - h->maxQsinceinit, h->max_outstanding, h->maxSG); 273 + if (*pos == 0) 274 + cciss_seq_show_header(seq); 313 275 314 - pos += size; 315 - len += size; 316 - cciss_proc_tape_report(ctlr, buffer, &pos, &len); 317 - for (i = 0; i <= h->highest_lun; i++) { 318 - 319 - drv = &h->drv[i]; 320 - if (drv->heads == 0) 321 - continue; 322 - 323 - vol_sz = drv->nr_blocks; 324 - vol_sz_frac = sector_div(vol_sz, ENG_GIG_FACTOR); 325 - vol_sz_frac *= 100; 326 - sector_div(vol_sz_frac, ENG_GIG_FACTOR); 327 - 328 - if (drv->raid_level > 5) 329 - drv->raid_level = RAID_UNKNOWN; 330 - size = sprintf(buffer + len, "cciss/c%dd%d:" 331 - "\t%4u.%02uGB\tRAID %s\n", 332 - ctlr, i, (int)vol_sz, (int)vol_sz_frac, 333 - raid_label[drv->raid_level]); 334 - pos += size; 335 - len += size; 336 - } 337 - 338 - *eof = 1; 339 - *start = buffer + offset; 340 - len -= offset; 341 - if (len > length) 342 - len = length; 343 - h->busy_configuring = 0; 344 - return len; 276 + return pos; 345 277 } 346 278 347 - static int 348 - cciss_proc_write(struct file *file, const char __user *buffer, 349 - unsigned long count, void *data) 279 + static int cciss_seq_show(struct seq_file *seq, void *v) 350 280 { 351 - unsigned char cmd[80]; 352 - int len; 353 - #ifdef CONFIG_CISS_SCSI_TAPE 354 - ctlr_info_t *h = (ctlr_info_t *) data; 355 - int rc; 281 + sector_t vol_sz, vol_sz_frac; 282 + ctlr_info_t *h = seq->private; 283 + unsigned ctlr = h->ctlr; 284 + loff_t *pos = v; 285 + drive_info_struct *drv = &h->drv[*pos]; 286 + 287 + if (*pos > h->highest_lun) 288 + return 0; 289 + 290 + if (drv->heads == 0) 291 + return 0; 292 + 293 + vol_sz = drv->nr_blocks; 294 + vol_sz_frac = sector_div(vol_sz, ENG_GIG_FACTOR); 295 + vol_sz_frac *= 100; 296 + sector_div(vol_sz_frac, ENG_GIG_FACTOR); 297 + 298 + if (drv->raid_level > 5) 299 + drv->raid_level = RAID_UNKNOWN; 300 + seq_printf(seq, "cciss/c%dd%d:" 301 + "\t%4u.%02uGB\tRAID %s\n", 302 + ctlr, (int) *pos, (int)vol_sz, (int)vol_sz_frac, 303 + raid_label[drv->raid_level]); 304 + return 0; 305 + } 306 + 307 + static void *cciss_seq_next(struct seq_file *seq, void *v, loff_t *pos) 308 + { 309 + ctlr_info_t *h = seq->private; 310 + 311 + if (*pos > h->highest_lun) 312 + return NULL; 313 + *pos += 1; 314 + 315 + return pos; 316 + } 317 + 318 + static void cciss_seq_stop(struct seq_file *seq, void *v) 319 + { 320 + ctlr_info_t *h = seq->private; 321 + 322 + /* Only reset h->busy_configuring if we succeeded in setting 323 + * it during cciss_seq_start. */ 324 + if (v == ERR_PTR(-EBUSY)) 325 + return; 326 + 327 + h->busy_configuring = 0; 328 + } 329 + 330 + static struct seq_operations cciss_seq_ops = { 331 + .start = cciss_seq_start, 332 + .show = cciss_seq_show, 333 + .next = cciss_seq_next, 334 + .stop = cciss_seq_stop, 335 + }; 336 + 337 + static int cciss_seq_open(struct inode *inode, struct file *file) 338 + { 339 + int ret = seq_open(file, &cciss_seq_ops); 340 + struct seq_file *seq = file->private_data; 341 + 342 + if (!ret) 343 + seq->private = PDE(inode)->data; 344 + 345 + return ret; 346 + } 347 + 348 + static ssize_t 349 + cciss_proc_write(struct file *file, const char __user *buf, 350 + size_t length, loff_t *ppos) 351 + { 352 + int err; 353 + char *buffer; 354 + 355 + #ifndef CONFIG_CISS_SCSI_TAPE 356 + return -EINVAL; 356 357 #endif 357 358 358 - if (count > sizeof(cmd) - 1) 359 + if (!buf || length > PAGE_SIZE - 1) 359 360 return -EINVAL; 360 - if (copy_from_user(cmd, buffer, count)) 361 - return -EFAULT; 362 - cmd[count] = '\0'; 363 - len = strlen(cmd); // above 3 lines ensure safety 364 - if (len && cmd[len - 1] == '\n') 365 - cmd[--len] = '\0'; 366 - # ifdef CONFIG_CISS_SCSI_TAPE 367 - if (strcmp("engage scsi", cmd) == 0) { 361 + 362 + buffer = (char *)__get_free_page(GFP_KERNEL); 363 + if (!buffer) 364 + return -ENOMEM; 365 + 366 + err = -EFAULT; 367 + if (copy_from_user(buffer, buf, length)) 368 + goto out; 369 + buffer[length] = '\0'; 370 + 371 + #ifdef CONFIG_CISS_SCSI_TAPE 372 + if (strncmp(ENGAGE_SCSI, buffer, sizeof ENGAGE_SCSI - 1) == 0) { 373 + struct seq_file *seq = file->private_data; 374 + ctlr_info_t *h = seq->private; 375 + int rc; 376 + 368 377 rc = cciss_engage_scsi(h->ctlr); 369 378 if (rc != 0) 370 - return -rc; 371 - return count; 372 - } 379 + err = -rc; 380 + else 381 + err = length; 382 + } else 383 + #endif /* CONFIG_CISS_SCSI_TAPE */ 384 + err = -EINVAL; 373 385 /* might be nice to have "disengage" too, but it's not 374 386 safely possible. (only 1 module use count, lock issues.) */ 375 - # endif 376 - return -EINVAL; 387 + 388 + out: 389 + free_page((unsigned long)buffer); 390 + return err; 377 391 } 378 392 379 - /* 380 - * Get us a file in /proc/cciss that says something about each controller. 381 - * Create /proc/cciss if it doesn't exist yet. 382 - */ 393 + static struct file_operations cciss_proc_fops = { 394 + .owner = THIS_MODULE, 395 + .open = cciss_seq_open, 396 + .read = seq_read, 397 + .llseek = seq_lseek, 398 + .release = seq_release, 399 + .write = cciss_proc_write, 400 + }; 401 + 383 402 static void __devinit cciss_procinit(int i) 384 403 { 385 404 struct proc_dir_entry *pde; 386 405 387 - if (proc_cciss == NULL) { 406 + if (proc_cciss == NULL) 388 407 proc_cciss = proc_mkdir("cciss", proc_root_driver); 389 - if (!proc_cciss) 390 - return; 391 - } 408 + if (!proc_cciss) 409 + return; 410 + pde = proc_create(hba[i]->devname, S_IWUSR | S_IRUSR | S_IRGRP | 411 + S_IROTH, proc_cciss, 412 + &cciss_proc_fops); 413 + if (!pde) 414 + return; 392 415 393 - pde = create_proc_read_entry(hba[i]->devname, 394 - S_IWUSR | S_IRUSR | S_IRGRP | S_IROTH, 395 - proc_cciss, cciss_proc_get_info, hba[i]); 396 - pde->write_proc = cciss_proc_write; 416 + pde->data = hba[i]; 397 417 } 398 418 #endif /* CONFIG_PROC_FS */ 399 419 ··· 1401 1341 disk->private_data = &h->drv[drv_index]; 1402 1342 1403 1343 /* Set up queue information */ 1404 - disk->queue->backing_dev_info.ra_pages = READ_AHEAD; 1405 1344 blk_queue_bounce_limit(disk->queue, hba[ctlr]->pdev->dma_mask); 1406 1345 1407 1346 /* This is a hardware imposed limit. */ ··· 3493 3434 } 3494 3435 drv->queue = q; 3495 3436 3496 - q->backing_dev_info.ra_pages = READ_AHEAD; 3497 3437 blk_queue_bounce_limit(q, hba[i]->pdev->dma_mask); 3498 3438 3499 3439 /* This is a hardware imposed limit. */
+3 -7
drivers/block/cciss_scsi.c
··· 1404 1404 } 1405 1405 1406 1406 static void 1407 - cciss_proc_tape_report(int ctlr, unsigned char *buffer, off_t *pos, off_t *len) 1407 + cciss_seq_tape_report(struct seq_file *seq, int ctlr) 1408 1408 { 1409 1409 unsigned long flags; 1410 - int size; 1411 - 1412 - *pos = *pos -1; *len = *len - 1; // cut off the last trailing newline 1413 1410 1414 1411 CPQ_TAPE_LOCK(ctlr, flags); 1415 - size = sprintf(buffer + *len, 1412 + seq_printf(seq, 1416 1413 "Sequential access devices: %d\n\n", 1417 1414 ccissscsi[ctlr].ndevices); 1418 1415 CPQ_TAPE_UNLOCK(ctlr, flags); 1419 - *pos += size; *len += size; 1420 1416 } 1417 + 1421 1418 1422 1419 /* Need at least one of these error handlers to keep ../scsi/hosts.c from 1423 1420 * complaining. Doing a host- or bus-reset can't do anything good here. ··· 1495 1498 #define cciss_scsi_setup(cntl_num) 1496 1499 #define cciss_unregister_scsi(ctlr) 1497 1500 #define cciss_register_scsi(ctlr) 1498 - #define cciss_proc_tape_report(ctlr, buffer, pos, len) 1499 1501 1500 1502 #endif /* CONFIG_CISS_SCSI_TAPE */
+2 -2
drivers/cdrom/cdrom.c
··· 1152 1152 /* This code is similar to that in open_for_data. The routine is called 1153 1153 whenever an audio play operation is requested. 1154 1154 */ 1155 - int check_for_audio_disc(struct cdrom_device_info * cdi, 1156 - struct cdrom_device_ops * cdo) 1155 + static int check_for_audio_disc(struct cdrom_device_info * cdi, 1156 + struct cdrom_device_ops * cdo) 1157 1157 { 1158 1158 int ret; 1159 1159 tracktype tracks;
+1 -2
fs/proc/proc_misc.c
··· 32 32 #include <linux/interrupt.h> 33 33 #include <linux/swap.h> 34 34 #include <linux/slab.h> 35 + #include <linux/genhd.h> 35 36 #include <linux/smp.h> 36 37 #include <linux/signal.h> 37 38 #include <linux/module.h> ··· 378 377 #endif 379 378 380 379 #ifdef CONFIG_BLOCK 381 - extern const struct seq_operations partitions_op; 382 380 static int partitions_open(struct inode *inode, struct file *file) 383 381 { 384 382 return seq_open(file, &partitions_op); ··· 389 389 .release = seq_release, 390 390 }; 391 391 392 - extern const struct seq_operations diskstats_op; 393 392 static int diskstats_open(struct inode *inode, struct file *file) 394 393 { 395 394 return seq_open(file, &diskstats_op);
+8 -4
fs/splice.c
··· 1669 1669 i++; 1670 1670 } while (len); 1671 1671 1672 + /* 1673 + * return EAGAIN if we have the potential of some data in the 1674 + * future, otherwise just return 0 1675 + */ 1676 + if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK)) 1677 + ret = -EAGAIN; 1678 + 1672 1679 inode_double_unlock(ipipe->inode, opipe->inode); 1673 1680 1674 1681 /* ··· 1716 1709 ret = link_ipipe_prep(ipipe, flags); 1717 1710 if (!ret) { 1718 1711 ret = link_opipe_prep(opipe, flags); 1719 - if (!ret) { 1712 + if (!ret) 1720 1713 ret = link_pipe(ipipe, opipe, len, flags); 1721 - if (!ret && (flags & SPLICE_F_NONBLOCK)) 1722 - ret = -EAGAIN; 1723 - } 1724 1714 } 1725 1715 } 1726 1716
+3 -1
include/linux/blkdev.h
··· 216 216 unsigned int cmd_len; 217 217 unsigned char cmd[BLK_MAX_CDB]; 218 218 219 - unsigned int raw_data_len; 220 219 unsigned int data_len; 220 + unsigned int extra_len; /* length of alignment and padding */ 221 221 unsigned int sense_len; 222 222 void *data; 223 223 void *sense; ··· 362 362 unsigned long seg_boundary_mask; 363 363 void *dma_drain_buffer; 364 364 unsigned int dma_drain_size; 365 + unsigned int dma_pad_mask; 365 366 unsigned int dma_alignment; 366 367 367 368 struct blk_queue_tag *queue_tags; ··· 702 701 extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); 703 702 extern void blk_queue_hardsect_size(struct request_queue *, unsigned short); 704 703 extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b); 704 + extern void blk_queue_dma_pad(struct request_queue *, unsigned int); 705 705 extern int blk_queue_dma_drain(struct request_queue *q, 706 706 dma_drain_needed_fn *dma_drain_needed, 707 707 void *buf, unsigned int size);
+3 -2
include/linux/genhd.h
··· 18 18 #define dev_to_disk(device) container_of(device, struct gendisk, dev) 19 19 #define dev_to_part(device) container_of(device, struct hd_struct, dev) 20 20 21 - extern struct device_type disk_type; 22 21 extern struct device_type part_type; 23 22 extern struct kobject *block_depr; 24 23 extern struct class block_class; 24 + 25 + extern const struct seq_operations partitions_op; 26 + extern const struct seq_operations diskstats_op; 25 27 26 28 enum { 27 29 /* These three have identical behaviour; use the second one if DOS FDISK gets ··· 558 556 extern struct gendisk *alloc_disk(int minors); 559 557 extern struct kobject *get_disk(struct gendisk *disk); 560 558 extern void put_disk(struct gendisk *disk); 561 - extern void genhd_media_change_notify(struct gendisk *disk); 562 559 extern void blk_register_region(dev_t devt, unsigned long range, 563 560 struct module *module, 564 561 struct kobject *(*probe)(dev_t, int *, void *),