Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

Merge branch 'for-linus' of git://git.kernel.dk/linux-2.6-block

* 'for-linus' of git://git.kernel.dk/linux-2.6-block: (41 commits)
Revert "Seperate read and write statistics of in_flight requests"
cfq-iosched: don't delay async queue if it hasn't dispatched at all
block: Topology ioctls
cfq-iosched: use assigned slice sync value, not default
cfq-iosched: rename 'desktop' sysfs entry to 'low_latency'
cfq-iosched: implement slower async initiate and queue ramp up
cfq-iosched: delay async IO dispatch, if sync IO was just done
cfq-iosched: add a knob for desktop interactiveness
Add a tracepoint for block request remapping
block: allow large discard requests
block: use normal I/O path for discard requests
swapfile: avoid NULL pointer dereference in swapon when s_bdev is NULL
fs/bio.c: move EXPORT* macros to line after function
Add missing blk_trace_remove_sysfs to be in pair with blk_trace_init_sysfs
cciss: fix build when !PROC_FS
block: Do not clamp max_hw_sectors for stacking devices
block: Set max_sectors correctly for stacking devices
cciss: cciss_host_attr_groups should be const
cciss: Dynamically allocate the drive_info_struct for each logical drive.
cciss: Add usage_count attribute to each logical drive in /sys
...

+1002 -483
+28
Documentation/ABI/testing/sysfs-bus-pci-devices-cciss
··· 31 31 Kernel Version: 2.6.30 32 32 Contact: iss_storagedev@hp.com 33 33 Description: A symbolic link to /sys/block/cciss!cXdY 34 + 35 + Where: /sys/bus/pci/devices/<dev>/ccissX/rescan 36 + Date: August 2009 37 + Kernel Version: 2.6.31 38 + Contact: iss_storagedev@hp.com 39 + Description: Kicks of a rescan of the controller to discover logical 40 + drive topology changes. 41 + 42 + Where: /sys/bus/pci/devices/<dev>/ccissX/cXdY/lunid 43 + Date: August 2009 44 + Kernel Version: 2.6.31 45 + Contact: iss_storagedev@hp.com 46 + Description: Displays the 8-byte LUN ID used to address logical 47 + drive Y of controller X. 48 + 49 + Where: /sys/bus/pci/devices/<dev>/ccissX/cXdY/raid_level 50 + Date: August 2009 51 + Kernel Version: 2.6.31 52 + Contact: iss_storagedev@hp.com 53 + Description: Displays the RAID level of logical drive Y of 54 + controller X. 55 + 56 + Where: /sys/bus/pci/devices/<dev>/ccissX/cXdY/usage_count 57 + Date: August 2009 58 + Kernel Version: 2.6.31 59 + Contact: iss_storagedev@hp.com 60 + Description: Displays the usage count (number of opens) of logical drive Y 61 + of controller X.
+36 -9
block/blk-barrier.c
··· 350 350 351 351 if (bio->bi_private) 352 352 complete(bio->bi_private); 353 + __free_page(bio_page(bio)); 353 354 354 355 bio_put(bio); 355 356 } ··· 373 372 struct request_queue *q = bdev_get_queue(bdev); 374 373 int type = flags & DISCARD_FL_BARRIER ? 375 374 DISCARD_BARRIER : DISCARD_NOBARRIER; 375 + struct bio *bio; 376 + struct page *page; 376 377 int ret = 0; 377 378 378 379 if (!q) 379 380 return -ENXIO; 380 381 381 - if (!q->prepare_discard_fn) 382 + if (!blk_queue_discard(q)) 382 383 return -EOPNOTSUPP; 383 384 384 385 while (nr_sects && !ret) { 385 - struct bio *bio = bio_alloc(gfp_mask, 0); 386 - if (!bio) 387 - return -ENOMEM; 386 + unsigned int sector_size = q->limits.logical_block_size; 387 + unsigned int max_discard_sectors = 388 + min(q->limits.max_discard_sectors, UINT_MAX >> 9); 388 389 390 + bio = bio_alloc(gfp_mask, 1); 391 + if (!bio) 392 + goto out; 393 + bio->bi_sector = sector; 389 394 bio->bi_end_io = blkdev_discard_end_io; 390 395 bio->bi_bdev = bdev; 391 396 if (flags & DISCARD_FL_WAIT) 392 397 bio->bi_private = &wait; 393 398 394 - bio->bi_sector = sector; 399 + /* 400 + * Add a zeroed one-sector payload as that's what 401 + * our current implementations need. If we'll ever need 402 + * more the interface will need revisiting. 403 + */ 404 + page = alloc_page(GFP_KERNEL | __GFP_ZERO); 405 + if (!page) 406 + goto out_free_bio; 407 + if (bio_add_pc_page(q, bio, page, sector_size, 0) < sector_size) 408 + goto out_free_page; 395 409 396 - if (nr_sects > queue_max_hw_sectors(q)) { 397 - bio->bi_size = queue_max_hw_sectors(q) << 9; 398 - nr_sects -= queue_max_hw_sectors(q); 399 - sector += queue_max_hw_sectors(q); 410 + /* 411 + * And override the bio size - the way discard works we 412 + * touch many more blocks on disk than the actual payload 413 + * length. 414 + */ 415 + if (nr_sects > max_discard_sectors) { 416 + bio->bi_size = max_discard_sectors << 9; 417 + nr_sects -= max_discard_sectors; 418 + sector += max_discard_sectors; 400 419 } else { 401 420 bio->bi_size = nr_sects << 9; 402 421 nr_sects = 0; ··· 435 414 bio_put(bio); 436 415 } 437 416 return ret; 417 + out_free_page: 418 + __free_page(page); 419 + out_free_bio: 420 + bio_put(bio); 421 + out: 422 + return -ENOMEM; 438 423 } 439 424 EXPORT_SYMBOL(blkdev_issue_discard);
+15 -6
block/blk-core.c
··· 34 34 #include "blk.h" 35 35 36 36 EXPORT_TRACEPOINT_SYMBOL_GPL(block_remap); 37 + EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap); 37 38 EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete); 38 39 39 40 static int __make_request(struct request_queue *q, struct bio *bio); ··· 70 69 part_stat_inc(cpu, part, merges[rw]); 71 70 else { 72 71 part_round_stats(cpu, part); 73 - part_inc_in_flight(part, rw); 72 + part_inc_in_flight(part); 74 73 } 75 74 76 75 part_stat_unlock(); ··· 1032 1031 1033 1032 if (part->in_flight) { 1034 1033 __part_stat_add(cpu, part, time_in_queue, 1035 - part_in_flight(part) * (now - part->stamp)); 1034 + part->in_flight * (now - part->stamp)); 1036 1035 __part_stat_add(cpu, part, io_ticks, (now - part->stamp)); 1037 1036 } 1038 1037 part->stamp = now; ··· 1125 1124 req->cmd_flags |= REQ_DISCARD; 1126 1125 if (bio_rw_flagged(bio, BIO_RW_BARRIER)) 1127 1126 req->cmd_flags |= REQ_SOFTBARRIER; 1128 - req->q->prepare_discard_fn(req->q, req); 1129 1127 } else if (unlikely(bio_rw_flagged(bio, BIO_RW_BARRIER))) 1130 1128 req->cmd_flags |= REQ_HARDBARRIER; 1131 1129 ··· 1437 1437 goto end_io; 1438 1438 } 1439 1439 1440 - if (unlikely(nr_sectors > queue_max_hw_sectors(q))) { 1440 + if (unlikely(!bio_rw_flagged(bio, BIO_RW_DISCARD) && 1441 + nr_sectors > queue_max_hw_sectors(q))) { 1441 1442 printk(KERN_ERR "bio too big device %s (%u > %u)\n", 1442 1443 bdevname(bio->bi_bdev, b), 1443 1444 bio_sectors(bio), ··· 1471 1470 goto end_io; 1472 1471 1473 1472 if (bio_rw_flagged(bio, BIO_RW_DISCARD) && 1474 - !q->prepare_discard_fn) { 1473 + !blk_queue_discard(q)) { 1475 1474 err = -EOPNOTSUPP; 1476 1475 goto end_io; 1477 1476 } ··· 1739 1738 part_stat_inc(cpu, part, ios[rw]); 1740 1739 part_stat_add(cpu, part, ticks[rw], duration); 1741 1740 part_round_stats(cpu, part); 1742 - part_dec_in_flight(part, rw); 1741 + part_dec_in_flight(part); 1743 1742 1744 1743 part_stat_unlock(); 1745 1744 } ··· 2491 2490 return queue_work(kblockd_workqueue, work); 2492 2491 } 2493 2492 EXPORT_SYMBOL(kblockd_schedule_work); 2493 + 2494 + int kblockd_schedule_delayed_work(struct request_queue *q, 2495 + struct delayed_work *work, 2496 + unsigned long delay) 2497 + { 2498 + return queue_delayed_work(kblockd_workqueue, work, delay); 2499 + } 2500 + EXPORT_SYMBOL(kblockd_schedule_delayed_work); 2494 2501 2495 2502 int __init blk_dev_init(void) 2496 2503 {
+1 -1
block/blk-merge.c
··· 351 351 part = disk_map_sector_rcu(req->rq_disk, blk_rq_pos(req)); 352 352 353 353 part_round_stats(cpu, part); 354 - part_dec_in_flight(part, rq_data_dir(req)); 354 + part_dec_in_flight(part); 355 355 356 356 part_stat_unlock(); 357 357 }
+16 -18
block/blk-settings.c
··· 34 34 EXPORT_SYMBOL(blk_queue_prep_rq); 35 35 36 36 /** 37 - * blk_queue_set_discard - set a discard_sectors function for queue 38 - * @q: queue 39 - * @dfn: prepare_discard function 40 - * 41 - * It's possible for a queue to register a discard callback which is used 42 - * to transform a discard request into the appropriate type for the 43 - * hardware. If none is registered, then discard requests are failed 44 - * with %EOPNOTSUPP. 45 - * 46 - */ 47 - void blk_queue_set_discard(struct request_queue *q, prepare_discard_fn *dfn) 48 - { 49 - q->prepare_discard_fn = dfn; 50 - } 51 - EXPORT_SYMBOL(blk_queue_set_discard); 52 - 53 - /** 54 37 * blk_queue_merge_bvec - set a merge_bvec function for queue 55 38 * @q: queue 56 39 * @mbfn: merge_bvec_fn ··· 94 111 lim->max_hw_segments = MAX_HW_SEGMENTS; 95 112 lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK; 96 113 lim->max_segment_size = MAX_SEGMENT_SIZE; 97 - lim->max_sectors = lim->max_hw_sectors = SAFE_MAX_SECTORS; 114 + lim->max_sectors = BLK_DEF_MAX_SECTORS; 115 + lim->max_hw_sectors = INT_MAX; 116 + lim->max_discard_sectors = SAFE_MAX_SECTORS; 98 117 lim->logical_block_size = lim->physical_block_size = lim->io_min = 512; 99 118 lim->bounce_pfn = (unsigned long)(BLK_BOUNCE_ANY >> PAGE_SHIFT); 100 119 lim->alignment_offset = 0; ··· 149 164 q->unplug_timer.data = (unsigned long)q; 150 165 151 166 blk_set_default_limits(&q->limits); 167 + blk_queue_max_sectors(q, SAFE_MAX_SECTORS); 152 168 153 169 /* 154 170 * If the caller didn't supply a lock, fall back to our embedded ··· 238 252 q->limits.max_hw_sectors = max_sectors; 239 253 } 240 254 EXPORT_SYMBOL(blk_queue_max_hw_sectors); 255 + 256 + /** 257 + * blk_queue_max_discard_sectors - set max sectors for a single discard 258 + * @q: the request queue for the device 259 + * @max_discard: maximum number of sectors to discard 260 + **/ 261 + void blk_queue_max_discard_sectors(struct request_queue *q, 262 + unsigned int max_discard_sectors) 263 + { 264 + q->limits.max_discard_sectors = max_discard_sectors; 265 + } 266 + EXPORT_SYMBOL(blk_queue_max_discard_sectors); 241 267 242 268 /** 243 269 * blk_queue_max_phys_segments - set max phys segments for a request for this queue
+6 -5
block/blk-sysfs.c
··· 452 452 if (ret) { 453 453 kobject_uevent(&q->kobj, KOBJ_REMOVE); 454 454 kobject_del(&q->kobj); 455 + blk_trace_remove_sysfs(disk_to_dev(disk)); 455 456 return ret; 456 457 } 457 458 ··· 466 465 if (WARN_ON(!q)) 467 466 return; 468 467 469 - if (q->request_fn) { 468 + if (q->request_fn) 470 469 elv_unregister_queue(q); 471 470 472 - kobject_uevent(&q->kobj, KOBJ_REMOVE); 473 - kobject_del(&q->kobj); 474 - kobject_put(&disk_to_dev(disk)->kobj); 475 - } 471 + kobject_uevent(&q->kobj, KOBJ_REMOVE); 472 + kobject_del(&q->kobj); 473 + blk_trace_remove_sysfs(disk_to_dev(disk)); 474 + kobject_put(&disk_to_dev(disk)->kobj); 476 475 }
+46 -17
block/cfq-iosched.c
··· 150 150 * idle window management 151 151 */ 152 152 struct timer_list idle_slice_timer; 153 - struct work_struct unplug_work; 153 + struct delayed_work unplug_work; 154 154 155 155 struct cfq_queue *active_queue; 156 156 struct cfq_io_context *active_cic; ··· 173 173 unsigned int cfq_slice[2]; 174 174 unsigned int cfq_slice_async_rq; 175 175 unsigned int cfq_slice_idle; 176 + unsigned int cfq_latency; 176 177 177 178 struct list_head cic_list; 178 179 ··· 181 180 * Fallback dummy cfqq for extreme OOM conditions 182 181 */ 183 182 struct cfq_queue oom_cfqq; 183 + 184 + unsigned long last_end_sync_rq; 184 185 }; 185 186 186 187 enum cfqq_state_flags { ··· 268 265 * scheduler run of queue, if there are requests pending and no one in the 269 266 * driver that will restart queueing 270 267 */ 271 - static inline void cfq_schedule_dispatch(struct cfq_data *cfqd) 268 + static inline void cfq_schedule_dispatch(struct cfq_data *cfqd, 269 + unsigned long delay) 272 270 { 273 271 if (cfqd->busy_queues) { 274 272 cfq_log(cfqd, "schedule dispatch"); 275 - kblockd_schedule_work(cfqd->queue, &cfqd->unplug_work); 273 + kblockd_schedule_delayed_work(cfqd->queue, &cfqd->unplug_work, 274 + delay); 276 275 } 277 276 } 278 277 ··· 1331 1326 return 0; 1332 1327 1333 1328 /* 1334 - * we are the only queue, allow up to 4 times of 'quantum' 1329 + * Sole queue user, allow bigger slice 1335 1330 */ 1336 - if (cfqq->dispatched >= 4 * max_dispatch) 1337 - return 0; 1331 + max_dispatch *= 4; 1338 1332 } 1333 + 1334 + /* 1335 + * Async queues must wait a bit before being allowed dispatch. 1336 + * We also ramp up the dispatch depth gradually for async IO, 1337 + * based on the last sync IO we serviced 1338 + */ 1339 + if (!cfq_cfqq_sync(cfqq) && cfqd->cfq_latency) { 1340 + unsigned long last_sync = jiffies - cfqd->last_end_sync_rq; 1341 + unsigned int depth; 1342 + 1343 + depth = last_sync / cfqd->cfq_slice[1]; 1344 + if (!depth && !cfqq->dispatched) 1345 + depth = 1; 1346 + if (depth < max_dispatch) 1347 + max_dispatch = depth; 1348 + } 1349 + 1350 + if (cfqq->dispatched >= max_dispatch) 1351 + return 0; 1339 1352 1340 1353 /* 1341 1354 * Dispatch a request from this cfqq ··· 1399 1376 1400 1377 if (unlikely(cfqd->active_queue == cfqq)) { 1401 1378 __cfq_slice_expired(cfqd, cfqq, 0); 1402 - cfq_schedule_dispatch(cfqd); 1379 + cfq_schedule_dispatch(cfqd, 0); 1403 1380 } 1404 1381 1405 1382 kmem_cache_free(cfq_pool, cfqq); ··· 1494 1471 { 1495 1472 if (unlikely(cfqq == cfqd->active_queue)) { 1496 1473 __cfq_slice_expired(cfqd, cfqq, 0); 1497 - cfq_schedule_dispatch(cfqd); 1474 + cfq_schedule_dispatch(cfqd, 0); 1498 1475 } 1499 1476 1500 1477 cfq_put_queue(cfqq); ··· 1974 1951 enable_idle = old_idle = cfq_cfqq_idle_window(cfqq); 1975 1952 1976 1953 if (!atomic_read(&cic->ioc->nr_tasks) || !cfqd->cfq_slice_idle || 1977 - (cfqd->hw_tag && CIC_SEEKY(cic))) 1954 + (!cfqd->cfq_latency && cfqd->hw_tag && CIC_SEEKY(cic))) 1978 1955 enable_idle = 0; 1979 1956 else if (sample_valid(cic->ttime_samples)) { 1980 1957 if (cic->ttime_mean > cfqd->cfq_slice_idle) ··· 2180 2157 if (cfq_cfqq_sync(cfqq)) 2181 2158 cfqd->sync_flight--; 2182 2159 2183 - if (sync) 2160 + if (sync) { 2184 2161 RQ_CIC(rq)->last_end_request = now; 2162 + cfqd->last_end_sync_rq = now; 2163 + } 2185 2164 2186 2165 /* 2187 2166 * If this is the active queue, check if it needs to be expired, ··· 2211 2186 } 2212 2187 2213 2188 if (!rq_in_driver(cfqd)) 2214 - cfq_schedule_dispatch(cfqd); 2189 + cfq_schedule_dispatch(cfqd, 0); 2215 2190 } 2216 2191 2217 2192 /* ··· 2341 2316 if (cic) 2342 2317 put_io_context(cic->ioc); 2343 2318 2344 - cfq_schedule_dispatch(cfqd); 2319 + cfq_schedule_dispatch(cfqd, 0); 2345 2320 spin_unlock_irqrestore(q->queue_lock, flags); 2346 2321 cfq_log(cfqd, "set_request fail"); 2347 2322 return 1; ··· 2350 2325 static void cfq_kick_queue(struct work_struct *work) 2351 2326 { 2352 2327 struct cfq_data *cfqd = 2353 - container_of(work, struct cfq_data, unplug_work); 2328 + container_of(work, struct cfq_data, unplug_work.work); 2354 2329 struct request_queue *q = cfqd->queue; 2355 2330 2356 2331 spin_lock_irq(q->queue_lock); ··· 2404 2379 expire: 2405 2380 cfq_slice_expired(cfqd, timed_out); 2406 2381 out_kick: 2407 - cfq_schedule_dispatch(cfqd); 2382 + cfq_schedule_dispatch(cfqd, 0); 2408 2383 out_cont: 2409 2384 spin_unlock_irqrestore(cfqd->queue->queue_lock, flags); 2410 2385 } ··· 2412 2387 static void cfq_shutdown_timer_wq(struct cfq_data *cfqd) 2413 2388 { 2414 2389 del_timer_sync(&cfqd->idle_slice_timer); 2415 - cancel_work_sync(&cfqd->unplug_work); 2390 + cancel_delayed_work_sync(&cfqd->unplug_work); 2416 2391 } 2417 2392 2418 2393 static void cfq_put_async_queues(struct cfq_data *cfqd) ··· 2494 2469 cfqd->idle_slice_timer.function = cfq_idle_slice_timer; 2495 2470 cfqd->idle_slice_timer.data = (unsigned long) cfqd; 2496 2471 2497 - INIT_WORK(&cfqd->unplug_work, cfq_kick_queue); 2472 + INIT_DELAYED_WORK(&cfqd->unplug_work, cfq_kick_queue); 2498 2473 2499 2474 cfqd->cfq_quantum = cfq_quantum; 2500 2475 cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0]; ··· 2505 2480 cfqd->cfq_slice[1] = cfq_slice_sync; 2506 2481 cfqd->cfq_slice_async_rq = cfq_slice_async_rq; 2507 2482 cfqd->cfq_slice_idle = cfq_slice_idle; 2483 + cfqd->cfq_latency = 1; 2508 2484 cfqd->hw_tag = 1; 2509 - 2485 + cfqd->last_end_sync_rq = jiffies; 2510 2486 return cfqd; 2511 2487 } 2512 2488 ··· 2575 2549 SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1); 2576 2550 SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1); 2577 2551 SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0); 2552 + SHOW_FUNCTION(cfq_low_latency_show, cfqd->cfq_latency, 0); 2578 2553 #undef SHOW_FUNCTION 2579 2554 2580 2555 #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \ ··· 2607 2580 STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1); 2608 2581 STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1, 2609 2582 UINT_MAX, 0); 2583 + STORE_FUNCTION(cfq_low_latency_store, &cfqd->cfq_latency, 0, 1, 0); 2610 2584 #undef STORE_FUNCTION 2611 2585 2612 2586 #define CFQ_ATTR(name) \ ··· 2623 2595 CFQ_ATTR(slice_async), 2624 2596 CFQ_ATTR(slice_async_rq), 2625 2597 CFQ_ATTR(slice_idle), 2598 + CFQ_ATTR(low_latency), 2626 2599 __ATTR_NULL 2627 2600 }; 2628 2601
+13
block/compat_ioctl.c
··· 21 21 return put_user(val, (compat_int_t __user *)compat_ptr(arg)); 22 22 } 23 23 24 + static int compat_put_uint(unsigned long arg, unsigned int val) 25 + { 26 + return put_user(val, (compat_uint_t __user *)compat_ptr(arg)); 27 + } 28 + 24 29 static int compat_put_long(unsigned long arg, long val) 25 30 { 26 31 return put_user(val, (compat_long_t __user *)compat_ptr(arg)); ··· 739 734 switch (cmd) { 740 735 case HDIO_GETGEO: 741 736 return compat_hdio_getgeo(disk, bdev, compat_ptr(arg)); 737 + case BLKPBSZGET: 738 + return compat_put_uint(arg, bdev_physical_block_size(bdev)); 739 + case BLKIOMIN: 740 + return compat_put_uint(arg, bdev_io_min(bdev)); 741 + case BLKIOOPT: 742 + return compat_put_uint(arg, bdev_io_opt(bdev)); 743 + case BLKALIGNOFF: 744 + return compat_put_int(arg, bdev_alignment_offset(bdev)); 742 745 case BLKFLSBUF: 743 746 case BLKROSET: 744 747 case BLKDISCARD:
+1 -3
block/genhd.c
··· 869 869 static DEVICE_ATTR(alignment_offset, S_IRUGO, disk_alignment_offset_show, NULL); 870 870 static DEVICE_ATTR(capability, S_IRUGO, disk_capability_show, NULL); 871 871 static DEVICE_ATTR(stat, S_IRUGO, part_stat_show, NULL); 872 - static DEVICE_ATTR(inflight, S_IRUGO, part_inflight_show, NULL); 873 872 #ifdef CONFIG_FAIL_MAKE_REQUEST 874 873 static struct device_attribute dev_attr_fail = 875 874 __ATTR(make-it-fail, S_IRUGO|S_IWUSR, part_fail_show, part_fail_store); ··· 888 889 &dev_attr_alignment_offset.attr, 889 890 &dev_attr_capability.attr, 890 891 &dev_attr_stat.attr, 891 - &dev_attr_inflight.attr, 892 892 #ifdef CONFIG_FAIL_MAKE_REQUEST 893 893 &dev_attr_fail.attr, 894 894 #endif ··· 1053 1055 part_stat_read(hd, merges[1]), 1054 1056 (unsigned long long)part_stat_read(hd, sectors[1]), 1055 1057 jiffies_to_msecs(part_stat_read(hd, ticks[1])), 1056 - part_in_flight(hd), 1058 + hd->in_flight, 1057 1059 jiffies_to_msecs(part_stat_read(hd, io_ticks)), 1058 1060 jiffies_to_msecs(part_stat_read(hd, time_in_queue)) 1059 1061 );
+15 -2
block/ioctl.c
··· 138 138 return put_user(val, (int __user *)arg); 139 139 } 140 140 141 + static int put_uint(unsigned long arg, unsigned int val) 142 + { 143 + return put_user(val, (unsigned int __user *)arg); 144 + } 145 + 141 146 static int put_long(unsigned long arg, long val) 142 147 { 143 148 return put_user(val, (long __user *)arg); ··· 268 263 return put_long(arg, (bdi->ra_pages * PAGE_CACHE_SIZE) / 512); 269 264 case BLKROGET: 270 265 return put_int(arg, bdev_read_only(bdev) != 0); 271 - case BLKBSZGET: /* get the logical block size (cf. BLKSSZGET) */ 266 + case BLKBSZGET: /* get block device soft block size (cf. BLKSSZGET) */ 272 267 return put_int(arg, block_size(bdev)); 273 - case BLKSSZGET: /* get block device hardware sector size */ 268 + case BLKSSZGET: /* get block device logical block size */ 274 269 return put_int(arg, bdev_logical_block_size(bdev)); 270 + case BLKPBSZGET: /* get block device physical block size */ 271 + return put_uint(arg, bdev_physical_block_size(bdev)); 272 + case BLKIOMIN: 273 + return put_uint(arg, bdev_io_min(bdev)); 274 + case BLKIOOPT: 275 + return put_uint(arg, bdev_io_opt(bdev)); 276 + case BLKALIGNOFF: 277 + return put_int(arg, bdev_alignment_offset(bdev)); 275 278 case BLKSECTGET: 276 279 return put_ushort(arg, queue_max_sectors(bdev_get_queue(bdev))); 277 280 case BLKRASET:
+75 -89
drivers/block/DAC960.c
··· 38 38 #include <linux/slab.h> 39 39 #include <linux/smp_lock.h> 40 40 #include <linux/proc_fs.h> 41 + #include <linux/seq_file.h> 41 42 #include <linux/reboot.h> 42 43 #include <linux/spinlock.h> 43 44 #include <linux/timer.h> ··· 6423 6422 return true; 6424 6423 } 6425 6424 6426 - 6427 - /* 6428 - DAC960_ProcReadStatus implements reading /proc/rd/status. 6429 - */ 6430 - 6431 - static int DAC960_ProcReadStatus(char *Page, char **Start, off_t Offset, 6432 - int Count, int *EOF, void *Data) 6425 + static int dac960_proc_show(struct seq_file *m, void *v) 6433 6426 { 6434 6427 unsigned char *StatusMessage = "OK\n"; 6435 - int ControllerNumber, BytesAvailable; 6428 + int ControllerNumber; 6436 6429 for (ControllerNumber = 0; 6437 6430 ControllerNumber < DAC960_ControllerCount; 6438 6431 ControllerNumber++) ··· 6439 6444 break; 6440 6445 } 6441 6446 } 6442 - BytesAvailable = strlen(StatusMessage) - Offset; 6443 - if (Count >= BytesAvailable) 6444 - { 6445 - Count = BytesAvailable; 6446 - *EOF = true; 6447 - } 6448 - if (Count <= 0) return 0; 6449 - *Start = Page; 6450 - memcpy(Page, &StatusMessage[Offset], Count); 6451 - return Count; 6447 + seq_puts(m, StatusMessage); 6448 + return 0; 6452 6449 } 6453 6450 6454 - 6455 - /* 6456 - DAC960_ProcReadInitialStatus implements reading /proc/rd/cN/initial_status. 6457 - */ 6458 - 6459 - static int DAC960_ProcReadInitialStatus(char *Page, char **Start, off_t Offset, 6460 - int Count, int *EOF, void *Data) 6451 + static int dac960_proc_open(struct inode *inode, struct file *file) 6461 6452 { 6462 - DAC960_Controller_T *Controller = (DAC960_Controller_T *) Data; 6463 - int BytesAvailable = Controller->InitialStatusLength - Offset; 6464 - if (Count >= BytesAvailable) 6465 - { 6466 - Count = BytesAvailable; 6467 - *EOF = true; 6468 - } 6469 - if (Count <= 0) return 0; 6470 - *Start = Page; 6471 - memcpy(Page, &Controller->CombinedStatusBuffer[Offset], Count); 6472 - return Count; 6453 + return single_open(file, dac960_proc_show, NULL); 6473 6454 } 6474 6455 6456 + static const struct file_operations dac960_proc_fops = { 6457 + .owner = THIS_MODULE, 6458 + .open = dac960_proc_open, 6459 + .read = seq_read, 6460 + .llseek = seq_lseek, 6461 + .release = single_release, 6462 + }; 6475 6463 6476 - /* 6477 - DAC960_ProcReadCurrentStatus implements reading /proc/rd/cN/current_status. 6478 - */ 6479 - 6480 - static int DAC960_ProcReadCurrentStatus(char *Page, char **Start, off_t Offset, 6481 - int Count, int *EOF, void *Data) 6464 + static int dac960_initial_status_proc_show(struct seq_file *m, void *v) 6482 6465 { 6483 - DAC960_Controller_T *Controller = (DAC960_Controller_T *) Data; 6466 + DAC960_Controller_T *Controller = (DAC960_Controller_T *)m->private; 6467 + seq_printf(m, "%.*s", Controller->InitialStatusLength, Controller->CombinedStatusBuffer); 6468 + return 0; 6469 + } 6470 + 6471 + static int dac960_initial_status_proc_open(struct inode *inode, struct file *file) 6472 + { 6473 + return single_open(file, dac960_initial_status_proc_show, PDE(inode)->data); 6474 + } 6475 + 6476 + static const struct file_operations dac960_initial_status_proc_fops = { 6477 + .owner = THIS_MODULE, 6478 + .open = dac960_initial_status_proc_open, 6479 + .read = seq_read, 6480 + .llseek = seq_lseek, 6481 + .release = single_release, 6482 + }; 6483 + 6484 + static int dac960_current_status_proc_show(struct seq_file *m, void *v) 6485 + { 6486 + DAC960_Controller_T *Controller = (DAC960_Controller_T *) m->private; 6484 6487 unsigned char *StatusMessage = 6485 6488 "No Rebuild or Consistency Check in Progress\n"; 6486 6489 int ProgressMessageLength = strlen(StatusMessage); 6487 - int BytesAvailable; 6488 6490 if (jiffies != Controller->LastCurrentStatusTime) 6489 6491 { 6490 6492 Controller->CurrentStatusLength = 0; ··· 6505 6513 } 6506 6514 Controller->LastCurrentStatusTime = jiffies; 6507 6515 } 6508 - BytesAvailable = Controller->CurrentStatusLength - Offset; 6509 - if (Count >= BytesAvailable) 6510 - { 6511 - Count = BytesAvailable; 6512 - *EOF = true; 6513 - } 6514 - if (Count <= 0) return 0; 6515 - *Start = Page; 6516 - memcpy(Page, &Controller->CurrentStatusBuffer[Offset], Count); 6517 - return Count; 6516 + seq_printf(m, "%.*s", Controller->CurrentStatusLength, Controller->CurrentStatusBuffer); 6517 + return 0; 6518 6518 } 6519 6519 6520 - 6521 - /* 6522 - DAC960_ProcReadUserCommand implements reading /proc/rd/cN/user_command. 6523 - */ 6524 - 6525 - static int DAC960_ProcReadUserCommand(char *Page, char **Start, off_t Offset, 6526 - int Count, int *EOF, void *Data) 6520 + static int dac960_current_status_proc_open(struct inode *inode, struct file *file) 6527 6521 { 6528 - DAC960_Controller_T *Controller = (DAC960_Controller_T *) Data; 6529 - int BytesAvailable = Controller->UserStatusLength - Offset; 6530 - if (Count >= BytesAvailable) 6531 - { 6532 - Count = BytesAvailable; 6533 - *EOF = true; 6534 - } 6535 - if (Count <= 0) return 0; 6536 - *Start = Page; 6537 - memcpy(Page, &Controller->UserStatusBuffer[Offset], Count); 6538 - return Count; 6522 + return single_open(file, dac960_current_status_proc_show, PDE(inode)->data); 6539 6523 } 6540 6524 6525 + static const struct file_operations dac960_current_status_proc_fops = { 6526 + .owner = THIS_MODULE, 6527 + .open = dac960_current_status_proc_open, 6528 + .read = seq_read, 6529 + .llseek = seq_lseek, 6530 + .release = single_release, 6531 + }; 6541 6532 6542 - /* 6543 - DAC960_ProcWriteUserCommand implements writing /proc/rd/cN/user_command. 6544 - */ 6533 + static int dac960_user_command_proc_show(struct seq_file *m, void *v) 6534 + { 6535 + DAC960_Controller_T *Controller = (DAC960_Controller_T *)m->private; 6545 6536 6546 - static int DAC960_ProcWriteUserCommand(struct file *file, 6537 + seq_printf(m, "%.*s", Controller->UserStatusLength, Controller->UserStatusBuffer); 6538 + return 0; 6539 + } 6540 + 6541 + static int dac960_user_command_proc_open(struct inode *inode, struct file *file) 6542 + { 6543 + return single_open(file, dac960_user_command_proc_show, PDE(inode)->data); 6544 + } 6545 + 6546 + static ssize_t dac960_user_command_proc_write(struct file *file, 6547 6547 const char __user *Buffer, 6548 - unsigned long Count, void *Data) 6548 + size_t Count, loff_t *pos) 6549 6549 { 6550 - DAC960_Controller_T *Controller = (DAC960_Controller_T *) Data; 6550 + DAC960_Controller_T *Controller = (DAC960_Controller_T *) PDE(file->f_path.dentry->d_inode)->data; 6551 6551 unsigned char CommandBuffer[80]; 6552 6552 int Length; 6553 6553 if (Count > sizeof(CommandBuffer)-1) return -EINVAL; ··· 6556 6572 ? Count : -EBUSY); 6557 6573 } 6558 6574 6575 + static const struct file_operations dac960_user_command_proc_fops = { 6576 + .owner = THIS_MODULE, 6577 + .open = dac960_user_command_proc_open, 6578 + .read = seq_read, 6579 + .llseek = seq_lseek, 6580 + .release = single_release, 6581 + .write = dac960_user_command_proc_write, 6582 + }; 6559 6583 6560 6584 /* 6561 6585 DAC960_CreateProcEntries creates the /proc/rd/... entries for the ··· 6578 6586 6579 6587 if (DAC960_ProcDirectoryEntry == NULL) { 6580 6588 DAC960_ProcDirectoryEntry = proc_mkdir("rd", NULL); 6581 - StatusProcEntry = create_proc_read_entry("status", 0, 6589 + StatusProcEntry = proc_create("status", 0, 6582 6590 DAC960_ProcDirectoryEntry, 6583 - DAC960_ProcReadStatus, NULL); 6591 + &dac960_proc_fops); 6584 6592 } 6585 6593 6586 6594 sprintf(Controller->ControllerName, "c%d", Controller->ControllerNumber); 6587 6595 ControllerProcEntry = proc_mkdir(Controller->ControllerName, 6588 6596 DAC960_ProcDirectoryEntry); 6589 - create_proc_read_entry("initial_status", 0, ControllerProcEntry, 6590 - DAC960_ProcReadInitialStatus, Controller); 6591 - create_proc_read_entry("current_status", 0, ControllerProcEntry, 6592 - DAC960_ProcReadCurrentStatus, Controller); 6593 - UserCommandProcEntry = 6594 - create_proc_read_entry("user_command", S_IWUSR | S_IRUSR, 6595 - ControllerProcEntry, DAC960_ProcReadUserCommand, 6596 - Controller); 6597 - UserCommandProcEntry->write_proc = DAC960_ProcWriteUserCommand; 6597 + proc_create_data("initial_status", 0, ControllerProcEntry, &dac960_initial_status_proc_fops, Controller); 6598 + proc_create_data("current_status", 0, ControllerProcEntry, &dac960_current_status_proc_fops, Controller); 6599 + UserCommandProcEntry = proc_create_data("user_command", S_IWUSR | S_IRUSR, ControllerProcEntry, &dac960_user_command_proc_fops, Controller); 6598 6600 Controller->ControllerProcEntry = ControllerProcEntry; 6599 6601 } 6600 6602
+545 -206
drivers/block/cciss.c
··· 36 36 #include <linux/proc_fs.h> 37 37 #include <linux/seq_file.h> 38 38 #include <linux/init.h> 39 + #include <linux/jiffies.h> 39 40 #include <linux/hdreg.h> 40 41 #include <linux/spinlock.h> 41 42 #include <linux/compat.h> 43 + #include <linux/mutex.h> 42 44 #include <asm/uaccess.h> 43 45 #include <asm/io.h> 44 46 ··· 157 155 158 156 static ctlr_info_t *hba[MAX_CTLR]; 159 157 158 + static struct task_struct *cciss_scan_thread; 159 + static DEFINE_MUTEX(scan_mutex); 160 + static LIST_HEAD(scan_q); 161 + 160 162 static void do_cciss_request(struct request_queue *q); 161 163 static irqreturn_t do_cciss_intr(int irq, void *dev_id); 162 164 static int cciss_open(struct block_device *bdev, fmode_t mode); ··· 170 164 static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo); 171 165 172 166 static int cciss_revalidate(struct gendisk *disk); 173 - static int rebuild_lun_table(ctlr_info_t *h, int first_time); 167 + static int rebuild_lun_table(ctlr_info_t *h, int first_time, int via_ioctl); 174 168 static int deregister_disk(ctlr_info_t *h, int drv_index, 175 - int clear_all); 169 + int clear_all, int via_ioctl); 176 170 177 171 static void cciss_read_capacity(int ctlr, int logvol, int withirq, 178 172 sector_t *total_size, unsigned int *block_size); ··· 195 189 static int process_sendcmd_error(ctlr_info_t *h, CommandList_struct *c); 196 190 197 191 static void fail_all_cmds(unsigned long ctlr); 192 + static int add_to_scan_list(struct ctlr_info *h); 198 193 static int scan_thread(void *data); 199 194 static int check_for_unit_attention(ctlr_info_t *h, CommandList_struct *c); 195 + static void cciss_hba_release(struct device *dev); 196 + static void cciss_device_release(struct device *dev); 197 + static void cciss_free_gendisk(ctlr_info_t *h, int drv_index); 198 + static void cciss_free_drive_info(ctlr_info_t *h, int drv_index); 200 199 201 200 #ifdef CONFIG_PROC_FS 202 201 static void cciss_procinit(int i); ··· 256 245 257 246 #include "cciss_scsi.c" /* For SCSI tape support */ 258 247 259 - #define RAID_UNKNOWN 6 248 + static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG", 249 + "UNKNOWN" 250 + }; 251 + #define RAID_UNKNOWN (sizeof(raid_label) / sizeof(raid_label[0])-1) 260 252 261 253 #ifdef CONFIG_PROC_FS 262 254 ··· 269 255 #define ENG_GIG 1000000000 270 256 #define ENG_GIG_FACTOR (ENG_GIG/512) 271 257 #define ENGAGE_SCSI "engage scsi" 272 - static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG", 273 - "UNKNOWN" 274 - }; 275 258 276 259 static struct proc_dir_entry *proc_cciss; 277 260 ··· 329 318 ctlr_info_t *h = seq->private; 330 319 unsigned ctlr = h->ctlr; 331 320 loff_t *pos = v; 332 - drive_info_struct *drv = &h->drv[*pos]; 321 + drive_info_struct *drv = h->drv[*pos]; 333 322 334 323 if (*pos > h->highest_lun) 335 324 return 0; ··· 342 331 vol_sz_frac *= 100; 343 332 sector_div(vol_sz_frac, ENG_GIG_FACTOR); 344 333 345 - if (drv->raid_level > 5) 334 + if (drv->raid_level < 0 || drv->raid_level > RAID_UNKNOWN) 346 335 drv->raid_level = RAID_UNKNOWN; 347 336 seq_printf(seq, "cciss/c%dd%d:" 348 337 "\t%4u.%02uGB\tRAID %s\n", ··· 465 454 #define to_hba(n) container_of(n, struct ctlr_info, dev) 466 455 #define to_drv(n) container_of(n, drive_info_struct, dev) 467 456 468 - static struct device_type cciss_host_type = { 469 - .name = "cciss_host", 470 - }; 457 + static ssize_t host_store_rescan(struct device *dev, 458 + struct device_attribute *attr, 459 + const char *buf, size_t count) 460 + { 461 + struct ctlr_info *h = to_hba(dev); 462 + 463 + add_to_scan_list(h); 464 + wake_up_process(cciss_scan_thread); 465 + wait_for_completion_interruptible(&h->scan_wait); 466 + 467 + return count; 468 + } 469 + DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan); 471 470 472 471 static ssize_t dev_show_unique_id(struct device *dev, 473 472 struct device_attribute *attr, ··· 581 560 } 582 561 DEVICE_ATTR(rev, S_IRUGO, dev_show_rev, NULL); 583 562 563 + static ssize_t cciss_show_lunid(struct device *dev, 564 + struct device_attribute *attr, char *buf) 565 + { 566 + drive_info_struct *drv = to_drv(dev); 567 + struct ctlr_info *h = to_hba(drv->dev.parent); 568 + unsigned long flags; 569 + unsigned char lunid[8]; 570 + 571 + spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags); 572 + if (h->busy_configuring) { 573 + spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); 574 + return -EBUSY; 575 + } 576 + if (!drv->heads) { 577 + spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); 578 + return -ENOTTY; 579 + } 580 + memcpy(lunid, drv->LunID, sizeof(lunid)); 581 + spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); 582 + return snprintf(buf, 20, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n", 583 + lunid[0], lunid[1], lunid[2], lunid[3], 584 + lunid[4], lunid[5], lunid[6], lunid[7]); 585 + } 586 + DEVICE_ATTR(lunid, S_IRUGO, cciss_show_lunid, NULL); 587 + 588 + static ssize_t cciss_show_raid_level(struct device *dev, 589 + struct device_attribute *attr, char *buf) 590 + { 591 + drive_info_struct *drv = to_drv(dev); 592 + struct ctlr_info *h = to_hba(drv->dev.parent); 593 + int raid; 594 + unsigned long flags; 595 + 596 + spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags); 597 + if (h->busy_configuring) { 598 + spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); 599 + return -EBUSY; 600 + } 601 + raid = drv->raid_level; 602 + spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); 603 + if (raid < 0 || raid > RAID_UNKNOWN) 604 + raid = RAID_UNKNOWN; 605 + 606 + return snprintf(buf, strlen(raid_label[raid]) + 7, "RAID %s\n", 607 + raid_label[raid]); 608 + } 609 + DEVICE_ATTR(raid_level, S_IRUGO, cciss_show_raid_level, NULL); 610 + 611 + static ssize_t cciss_show_usage_count(struct device *dev, 612 + struct device_attribute *attr, char *buf) 613 + { 614 + drive_info_struct *drv = to_drv(dev); 615 + struct ctlr_info *h = to_hba(drv->dev.parent); 616 + unsigned long flags; 617 + int count; 618 + 619 + spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags); 620 + if (h->busy_configuring) { 621 + spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); 622 + return -EBUSY; 623 + } 624 + count = drv->usage_count; 625 + spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); 626 + return snprintf(buf, 20, "%d\n", count); 627 + } 628 + DEVICE_ATTR(usage_count, S_IRUGO, cciss_show_usage_count, NULL); 629 + 630 + static struct attribute *cciss_host_attrs[] = { 631 + &dev_attr_rescan.attr, 632 + NULL 633 + }; 634 + 635 + static struct attribute_group cciss_host_attr_group = { 636 + .attrs = cciss_host_attrs, 637 + }; 638 + 639 + static const struct attribute_group *cciss_host_attr_groups[] = { 640 + &cciss_host_attr_group, 641 + NULL 642 + }; 643 + 644 + static struct device_type cciss_host_type = { 645 + .name = "cciss_host", 646 + .groups = cciss_host_attr_groups, 647 + .release = cciss_hba_release, 648 + }; 649 + 584 650 static struct attribute *cciss_dev_attrs[] = { 585 651 &dev_attr_unique_id.attr, 586 652 &dev_attr_model.attr, 587 653 &dev_attr_vendor.attr, 588 654 &dev_attr_rev.attr, 655 + &dev_attr_lunid.attr, 656 + &dev_attr_raid_level.attr, 657 + &dev_attr_usage_count.attr, 589 658 NULL 590 659 }; 591 660 ··· 691 580 static struct device_type cciss_dev_type = { 692 581 .name = "cciss_device", 693 582 .groups = cciss_dev_attr_groups, 583 + .release = cciss_device_release, 694 584 }; 695 585 696 586 static struct bus_type cciss_bus_type = { 697 587 .name = "cciss", 698 588 }; 699 589 590 + /* 591 + * cciss_hba_release is called when the reference count 592 + * of h->dev goes to zero. 593 + */ 594 + static void cciss_hba_release(struct device *dev) 595 + { 596 + /* 597 + * nothing to do, but need this to avoid a warning 598 + * about not having a release handler from lib/kref.c. 599 + */ 600 + } 700 601 701 602 /* 702 603 * Initialize sysfs entry for each controller. This sets up and registers ··· 732 609 static void cciss_destroy_hba_sysfs_entry(struct ctlr_info *h) 733 610 { 734 611 device_del(&h->dev); 612 + put_device(&h->dev); /* final put. */ 613 + } 614 + 615 + /* cciss_device_release is called when the reference count 616 + * of h->drv[x]dev goes to zero. 617 + */ 618 + static void cciss_device_release(struct device *dev) 619 + { 620 + drive_info_struct *drv = to_drv(dev); 621 + kfree(drv); 735 622 } 736 623 737 624 /* ··· 750 617 * /sys/bus/pci/devices/<dev/ccis#/. We also create a link from 751 618 * /sys/block/cciss!c#d# to this entry. 752 619 */ 753 - static int cciss_create_ld_sysfs_entry(struct ctlr_info *h, 754 - drive_info_struct *drv, 620 + static long cciss_create_ld_sysfs_entry(struct ctlr_info *h, 755 621 int drv_index) 756 622 { 757 - device_initialize(&drv->dev); 758 - drv->dev.type = &cciss_dev_type; 759 - drv->dev.bus = &cciss_bus_type; 760 - dev_set_name(&drv->dev, "c%dd%d", h->ctlr, drv_index); 761 - drv->dev.parent = &h->dev; 762 - return device_add(&drv->dev); 623 + struct device *dev; 624 + 625 + if (h->drv[drv_index]->device_initialized) 626 + return 0; 627 + 628 + dev = &h->drv[drv_index]->dev; 629 + device_initialize(dev); 630 + dev->type = &cciss_dev_type; 631 + dev->bus = &cciss_bus_type; 632 + dev_set_name(dev, "c%dd%d", h->ctlr, drv_index); 633 + dev->parent = &h->dev; 634 + h->drv[drv_index]->device_initialized = 1; 635 + return device_add(dev); 763 636 } 764 637 765 638 /* 766 639 * Remove sysfs entries for a logical drive. 767 640 */ 768 - static void cciss_destroy_ld_sysfs_entry(drive_info_struct *drv) 641 + static void cciss_destroy_ld_sysfs_entry(struct ctlr_info *h, int drv_index, 642 + int ctlr_exiting) 769 643 { 770 - device_del(&drv->dev); 644 + struct device *dev = &h->drv[drv_index]->dev; 645 + 646 + /* special case for c*d0, we only destroy it on controller exit */ 647 + if (drv_index == 0 && !ctlr_exiting) 648 + return; 649 + 650 + device_del(dev); 651 + put_device(dev); /* the "final" put. */ 652 + h->drv[drv_index] = NULL; 771 653 } 772 654 773 655 /* ··· 899 751 printk(KERN_DEBUG "cciss_open %s\n", bdev->bd_disk->disk_name); 900 752 #endif /* CCISS_DEBUG */ 901 753 902 - if (host->busy_initializing || drv->busy_configuring) 754 + if (drv->busy_configuring) 903 755 return -EBUSY; 904 756 /* 905 757 * Root is allowed to open raw volume zero even if it's not configured ··· 915 767 if (MINOR(bdev->bd_dev) & 0x0f) { 916 768 return -ENXIO; 917 769 /* if it is, make sure we have a LUN ID */ 918 - } else if (drv->LunID == 0) { 770 + } else if (memcmp(drv->LunID, CTLR_LUNID, 771 + sizeof(drv->LunID))) { 919 772 return -ENXIO; 920 773 } 921 774 } ··· 1281 1132 case CCISS_DEREGDISK: 1282 1133 case CCISS_REGNEWD: 1283 1134 case CCISS_REVALIDVOLS: 1284 - return rebuild_lun_table(host, 0); 1135 + return rebuild_lun_table(host, 0, 1); 1285 1136 1286 1137 case CCISS_GETLUNINFO:{ 1287 1138 LogvolInfo_struct luninfo; 1288 1139 1289 - luninfo.LunID = drv->LunID; 1140 + memcpy(&luninfo.LunID, drv->LunID, 1141 + sizeof(luninfo.LunID)); 1290 1142 luninfo.num_opens = drv->usage_count; 1291 1143 luninfo.num_parts = 0; 1292 1144 if (copy_to_user(argp, &luninfo, ··· 1625 1475 /* make sure the disk has been added and the drive is real 1626 1476 * because this can be called from the middle of init_one. 1627 1477 */ 1628 - if (!(h->drv[curr_queue].queue) || !(h->drv[curr_queue].heads)) 1478 + if (!h->drv[curr_queue]) 1479 + continue; 1480 + if (!(h->drv[curr_queue]->queue) || 1481 + !(h->drv[curr_queue]->heads)) 1629 1482 continue; 1630 1483 blk_start_queue(h->gendisk[curr_queue]->queue); 1631 1484 ··· 1685 1532 spin_unlock_irqrestore(&h->lock, flags); 1686 1533 } 1687 1534 1688 - static void log_unit_to_scsi3addr(ctlr_info_t *h, unsigned char scsi3addr[], 1689 - uint32_t log_unit) 1535 + static inline void log_unit_to_scsi3addr(ctlr_info_t *h, 1536 + unsigned char scsi3addr[], uint32_t log_unit) 1690 1537 { 1691 - log_unit = h->drv[log_unit].LunID & 0x03fff; 1692 - memset(&scsi3addr[4], 0, 4); 1693 - memcpy(&scsi3addr[0], &log_unit, 4); 1694 - scsi3addr[3] |= 0x40; 1538 + memcpy(scsi3addr, h->drv[log_unit]->LunID, 1539 + sizeof(h->drv[log_unit]->LunID)); 1695 1540 } 1696 1541 1697 1542 /* This function gets the SCSI vendor, model, and revision of a logical drive ··· 1766 1615 return; 1767 1616 } 1768 1617 1769 - static void cciss_add_disk(ctlr_info_t *h, struct gendisk *disk, 1618 + /* 1619 + * cciss_add_disk sets up the block device queue for a logical drive 1620 + */ 1621 + static int cciss_add_disk(ctlr_info_t *h, struct gendisk *disk, 1770 1622 int drv_index) 1771 1623 { 1772 1624 disk->queue = blk_init_queue(do_cciss_request, &h->lock); 1625 + if (!disk->queue) 1626 + goto init_queue_failure; 1773 1627 sprintf(disk->disk_name, "cciss/c%dd%d", h->ctlr, drv_index); 1774 1628 disk->major = h->major; 1775 1629 disk->first_minor = drv_index << NWD_SHIFT; 1776 1630 disk->fops = &cciss_fops; 1777 - disk->private_data = &h->drv[drv_index]; 1778 - disk->driverfs_dev = &h->drv[drv_index].dev; 1631 + if (cciss_create_ld_sysfs_entry(h, drv_index)) 1632 + goto cleanup_queue; 1633 + disk->private_data = h->drv[drv_index]; 1634 + disk->driverfs_dev = &h->drv[drv_index]->dev; 1779 1635 1780 1636 /* Set up queue information */ 1781 1637 blk_queue_bounce_limit(disk->queue, h->pdev->dma_mask); ··· 1800 1642 disk->queue->queuedata = h; 1801 1643 1802 1644 blk_queue_logical_block_size(disk->queue, 1803 - h->drv[drv_index].block_size); 1645 + h->drv[drv_index]->block_size); 1804 1646 1805 1647 /* Make sure all queue data is written out before */ 1806 - /* setting h->drv[drv_index].queue, as setting this */ 1648 + /* setting h->drv[drv_index]->queue, as setting this */ 1807 1649 /* allows the interrupt handler to start the queue */ 1808 1650 wmb(); 1809 - h->drv[drv_index].queue = disk->queue; 1651 + h->drv[drv_index]->queue = disk->queue; 1810 1652 add_disk(disk); 1653 + return 0; 1654 + 1655 + cleanup_queue: 1656 + blk_cleanup_queue(disk->queue); 1657 + disk->queue = NULL; 1658 + init_queue_failure: 1659 + return -1; 1811 1660 } 1812 1661 1813 1662 /* This function will check the usage_count of the drive to be updated/added. ··· 1827 1662 * is also the controller node. Any changes to disk 0 will show up on 1828 1663 * the next reboot. 1829 1664 */ 1830 - static void cciss_update_drive_info(int ctlr, int drv_index, int first_time) 1665 + static void cciss_update_drive_info(int ctlr, int drv_index, int first_time, 1666 + int via_ioctl) 1831 1667 { 1832 1668 ctlr_info_t *h = hba[ctlr]; 1833 1669 struct gendisk *disk; ··· 1838 1672 unsigned long flags = 0; 1839 1673 int ret = 0; 1840 1674 drive_info_struct *drvinfo; 1841 - int was_only_controller_node; 1842 1675 1843 1676 /* Get information about the disk and modify the driver structure */ 1844 1677 inq_buff = kmalloc(sizeof(InquiryData_struct), GFP_KERNEL); 1845 - drvinfo = kmalloc(sizeof(*drvinfo), GFP_KERNEL); 1678 + drvinfo = kzalloc(sizeof(*drvinfo), GFP_KERNEL); 1846 1679 if (inq_buff == NULL || drvinfo == NULL) 1847 1680 goto mem_msg; 1848 - 1849 - /* See if we're trying to update the "controller node" 1850 - * this will happen the when the first logical drive gets 1851 - * created by ACU. 1852 - */ 1853 - was_only_controller_node = (drv_index == 0 && 1854 - h->drv[0].raid_level == -1); 1855 1681 1856 1682 /* testing to see if 16-byte CDBs are already being used */ 1857 1683 if (h->cciss_read == CCISS_READ_16) { ··· 1877 1719 drvinfo->model, drvinfo->rev); 1878 1720 cciss_get_serial_no(ctlr, drv_index, 1, drvinfo->serial_no, 1879 1721 sizeof(drvinfo->serial_no)); 1722 + /* Save the lunid in case we deregister the disk, below. */ 1723 + memcpy(drvinfo->LunID, h->drv[drv_index]->LunID, 1724 + sizeof(drvinfo->LunID)); 1880 1725 1881 1726 /* Is it the same disk we already know, and nothing's changed? */ 1882 - if (h->drv[drv_index].raid_level != -1 && 1727 + if (h->drv[drv_index]->raid_level != -1 && 1883 1728 ((memcmp(drvinfo->serial_no, 1884 - h->drv[drv_index].serial_no, 16) == 0) && 1885 - drvinfo->block_size == h->drv[drv_index].block_size && 1886 - drvinfo->nr_blocks == h->drv[drv_index].nr_blocks && 1887 - drvinfo->heads == h->drv[drv_index].heads && 1888 - drvinfo->sectors == h->drv[drv_index].sectors && 1889 - drvinfo->cylinders == h->drv[drv_index].cylinders)) 1729 + h->drv[drv_index]->serial_no, 16) == 0) && 1730 + drvinfo->block_size == h->drv[drv_index]->block_size && 1731 + drvinfo->nr_blocks == h->drv[drv_index]->nr_blocks && 1732 + drvinfo->heads == h->drv[drv_index]->heads && 1733 + drvinfo->sectors == h->drv[drv_index]->sectors && 1734 + drvinfo->cylinders == h->drv[drv_index]->cylinders)) 1890 1735 /* The disk is unchanged, nothing to update */ 1891 1736 goto freeret; 1892 1737 ··· 1899 1738 * If the disk already exists then deregister it before proceeding 1900 1739 * (unless it's the first disk (for the controller node). 1901 1740 */ 1902 - if (h->drv[drv_index].raid_level != -1 && drv_index != 0) { 1741 + if (h->drv[drv_index]->raid_level != -1 && drv_index != 0) { 1903 1742 printk(KERN_WARNING "disk %d has changed.\n", drv_index); 1904 1743 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags); 1905 - h->drv[drv_index].busy_configuring = 1; 1744 + h->drv[drv_index]->busy_configuring = 1; 1906 1745 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); 1907 1746 1908 - /* deregister_disk sets h->drv[drv_index].queue = NULL 1747 + /* deregister_disk sets h->drv[drv_index]->queue = NULL 1909 1748 * which keeps the interrupt handler from starting 1910 1749 * the queue. 1911 1750 */ 1912 - ret = deregister_disk(h, drv_index, 0); 1913 - h->drv[drv_index].busy_configuring = 0; 1751 + ret = deregister_disk(h, drv_index, 0, via_ioctl); 1914 1752 } 1915 1753 1916 1754 /* If the disk is in use return */ ··· 1917 1757 goto freeret; 1918 1758 1919 1759 /* Save the new information from cciss_geometry_inquiry 1920 - * and serial number inquiry. 1760 + * and serial number inquiry. If the disk was deregistered 1761 + * above, then h->drv[drv_index] will be NULL. 1921 1762 */ 1922 - h->drv[drv_index].block_size = drvinfo->block_size; 1923 - h->drv[drv_index].nr_blocks = drvinfo->nr_blocks; 1924 - h->drv[drv_index].heads = drvinfo->heads; 1925 - h->drv[drv_index].sectors = drvinfo->sectors; 1926 - h->drv[drv_index].cylinders = drvinfo->cylinders; 1927 - h->drv[drv_index].raid_level = drvinfo->raid_level; 1928 - memcpy(h->drv[drv_index].serial_no, drvinfo->serial_no, 16); 1929 - memcpy(h->drv[drv_index].vendor, drvinfo->vendor, VENDOR_LEN + 1); 1930 - memcpy(h->drv[drv_index].model, drvinfo->model, MODEL_LEN + 1); 1931 - memcpy(h->drv[drv_index].rev, drvinfo->rev, REV_LEN + 1); 1763 + if (h->drv[drv_index] == NULL) { 1764 + drvinfo->device_initialized = 0; 1765 + h->drv[drv_index] = drvinfo; 1766 + drvinfo = NULL; /* so it won't be freed below. */ 1767 + } else { 1768 + /* special case for cxd0 */ 1769 + h->drv[drv_index]->block_size = drvinfo->block_size; 1770 + h->drv[drv_index]->nr_blocks = drvinfo->nr_blocks; 1771 + h->drv[drv_index]->heads = drvinfo->heads; 1772 + h->drv[drv_index]->sectors = drvinfo->sectors; 1773 + h->drv[drv_index]->cylinders = drvinfo->cylinders; 1774 + h->drv[drv_index]->raid_level = drvinfo->raid_level; 1775 + memcpy(h->drv[drv_index]->serial_no, drvinfo->serial_no, 16); 1776 + memcpy(h->drv[drv_index]->vendor, drvinfo->vendor, 1777 + VENDOR_LEN + 1); 1778 + memcpy(h->drv[drv_index]->model, drvinfo->model, MODEL_LEN + 1); 1779 + memcpy(h->drv[drv_index]->rev, drvinfo->rev, REV_LEN + 1); 1780 + } 1932 1781 1933 1782 ++h->num_luns; 1934 1783 disk = h->gendisk[drv_index]; 1935 - set_capacity(disk, h->drv[drv_index].nr_blocks); 1784 + set_capacity(disk, h->drv[drv_index]->nr_blocks); 1936 1785 1937 1786 /* If it's not disk 0 (drv_index != 0) 1938 1787 * or if it was disk 0, but there was previously ··· 1949 1780 * (raid_leve == -1) then we want to update the 1950 1781 * logical drive's information. 1951 1782 */ 1952 - if (drv_index || first_time) 1953 - cciss_add_disk(h, disk, drv_index); 1783 + if (drv_index || first_time) { 1784 + if (cciss_add_disk(h, disk, drv_index) != 0) { 1785 + cciss_free_gendisk(h, drv_index); 1786 + cciss_free_drive_info(h, drv_index); 1787 + printk(KERN_WARNING "cciss:%d could not update " 1788 + "disk %d\n", h->ctlr, drv_index); 1789 + --h->num_luns; 1790 + } 1791 + } 1954 1792 1955 1793 freeret: 1956 1794 kfree(inq_buff); ··· 1969 1793 } 1970 1794 1971 1795 /* This function will find the first index of the controllers drive array 1972 - * that has a -1 for the raid_level and will return that index. This is 1973 - * where new drives will be added. If the index to be returned is greater 1974 - * than the highest_lun index for the controller then highest_lun is set 1975 - * to this new index. If there are no available indexes then -1 is returned. 1976 - * "controller_node" is used to know if this is a real logical drive, or just 1977 - * the controller node, which determines if this counts towards highest_lun. 1796 + * that has a null drv pointer and allocate the drive info struct and 1797 + * will return that index This is where new drives will be added. 1798 + * If the index to be returned is greater than the highest_lun index for 1799 + * the controller then highest_lun is set * to this new index. 1800 + * If there are no available indexes or if tha allocation fails, then -1 1801 + * is returned. * "controller_node" is used to know if this is a real 1802 + * logical drive, or just the controller node, which determines if this 1803 + * counts towards highest_lun. 1978 1804 */ 1979 - static int cciss_find_free_drive_index(int ctlr, int controller_node) 1805 + static int cciss_alloc_drive_info(ctlr_info_t *h, int controller_node) 1980 1806 { 1981 1807 int i; 1808 + drive_info_struct *drv; 1982 1809 1810 + /* Search for an empty slot for our drive info */ 1983 1811 for (i = 0; i < CISS_MAX_LUN; i++) { 1984 - if (hba[ctlr]->drv[i].raid_level == -1) { 1985 - if (i > hba[ctlr]->highest_lun) 1986 - if (!controller_node) 1987 - hba[ctlr]->highest_lun = i; 1812 + 1813 + /* if not cxd0 case, and it's occupied, skip it. */ 1814 + if (h->drv[i] && i != 0) 1815 + continue; 1816 + /* 1817 + * If it's cxd0 case, and drv is alloc'ed already, and a 1818 + * disk is configured there, skip it. 1819 + */ 1820 + if (i == 0 && h->drv[i] && h->drv[i]->raid_level != -1) 1821 + continue; 1822 + 1823 + /* 1824 + * We've found an empty slot. Update highest_lun 1825 + * provided this isn't just the fake cxd0 controller node. 1826 + */ 1827 + if (i > h->highest_lun && !controller_node) 1828 + h->highest_lun = i; 1829 + 1830 + /* If adding a real disk at cxd0, and it's already alloc'ed */ 1831 + if (i == 0 && h->drv[i] != NULL) 1988 1832 return i; 1989 - } 1833 + 1834 + /* 1835 + * Found an empty slot, not already alloc'ed. Allocate it. 1836 + * Mark it with raid_level == -1, so we know it's new later on. 1837 + */ 1838 + drv = kzalloc(sizeof(*drv), GFP_KERNEL); 1839 + if (!drv) 1840 + return -1; 1841 + drv->raid_level = -1; /* so we know it's new */ 1842 + h->drv[i] = drv; 1843 + return i; 1990 1844 } 1991 1845 return -1; 1846 + } 1847 + 1848 + static void cciss_free_drive_info(ctlr_info_t *h, int drv_index) 1849 + { 1850 + kfree(h->drv[drv_index]); 1851 + h->drv[drv_index] = NULL; 1852 + } 1853 + 1854 + static void cciss_free_gendisk(ctlr_info_t *h, int drv_index) 1855 + { 1856 + put_disk(h->gendisk[drv_index]); 1857 + h->gendisk[drv_index] = NULL; 1992 1858 } 1993 1859 1994 1860 /* cciss_add_gendisk finds a free hba[]->drv structure ··· 2042 1824 * a means to talk to the controller in case no logical 2043 1825 * drives have yet been configured. 2044 1826 */ 2045 - static int cciss_add_gendisk(ctlr_info_t *h, __u32 lunid, int controller_node) 1827 + static int cciss_add_gendisk(ctlr_info_t *h, unsigned char lunid[], 1828 + int controller_node) 2046 1829 { 2047 1830 int drv_index; 2048 1831 2049 - drv_index = cciss_find_free_drive_index(h->ctlr, controller_node); 1832 + drv_index = cciss_alloc_drive_info(h, controller_node); 2050 1833 if (drv_index == -1) 2051 1834 return -1; 1835 + 2052 1836 /*Check if the gendisk needs to be allocated */ 2053 1837 if (!h->gendisk[drv_index]) { 2054 1838 h->gendisk[drv_index] = ··· 2059 1839 printk(KERN_ERR "cciss%d: could not " 2060 1840 "allocate a new disk %d\n", 2061 1841 h->ctlr, drv_index); 2062 - return -1; 1842 + goto err_free_drive_info; 2063 1843 } 2064 1844 } 2065 - h->drv[drv_index].LunID = lunid; 2066 - if (cciss_create_ld_sysfs_entry(h, &h->drv[drv_index], drv_index)) 1845 + memcpy(h->drv[drv_index]->LunID, lunid, 1846 + sizeof(h->drv[drv_index]->LunID)); 1847 + if (cciss_create_ld_sysfs_entry(h, drv_index)) 2067 1848 goto err_free_disk; 2068 - 2069 1849 /* Don't need to mark this busy because nobody */ 2070 1850 /* else knows about this disk yet to contend */ 2071 1851 /* for access to it. */ 2072 - h->drv[drv_index].busy_configuring = 0; 1852 + h->drv[drv_index]->busy_configuring = 0; 2073 1853 wmb(); 2074 1854 return drv_index; 2075 1855 2076 1856 err_free_disk: 2077 - put_disk(h->gendisk[drv_index]); 2078 - h->gendisk[drv_index] = NULL; 1857 + cciss_free_gendisk(h, drv_index); 1858 + err_free_drive_info: 1859 + cciss_free_drive_info(h, drv_index); 2079 1860 return -1; 2080 1861 } 2081 1862 ··· 2093 1872 if (h->gendisk[0] != NULL) /* already did this? Then bail. */ 2094 1873 return; 2095 1874 2096 - drv_index = cciss_add_gendisk(h, 0, 1); 2097 - if (drv_index == -1) { 2098 - printk(KERN_WARNING "cciss%d: could not " 2099 - "add disk 0.\n", h->ctlr); 2100 - return; 2101 - } 2102 - h->drv[drv_index].block_size = 512; 2103 - h->drv[drv_index].nr_blocks = 0; 2104 - h->drv[drv_index].heads = 0; 2105 - h->drv[drv_index].sectors = 0; 2106 - h->drv[drv_index].cylinders = 0; 2107 - h->drv[drv_index].raid_level = -1; 2108 - memset(h->drv[drv_index].serial_no, 0, 16); 1875 + drv_index = cciss_add_gendisk(h, CTLR_LUNID, 1); 1876 + if (drv_index == -1) 1877 + goto error; 1878 + h->drv[drv_index]->block_size = 512; 1879 + h->drv[drv_index]->nr_blocks = 0; 1880 + h->drv[drv_index]->heads = 0; 1881 + h->drv[drv_index]->sectors = 0; 1882 + h->drv[drv_index]->cylinders = 0; 1883 + h->drv[drv_index]->raid_level = -1; 1884 + memset(h->drv[drv_index]->serial_no, 0, 16); 2109 1885 disk = h->gendisk[drv_index]; 2110 - cciss_add_disk(h, disk, drv_index); 1886 + if (cciss_add_disk(h, disk, drv_index) == 0) 1887 + return; 1888 + cciss_free_gendisk(h, drv_index); 1889 + cciss_free_drive_info(h, drv_index); 1890 + error: 1891 + printk(KERN_WARNING "cciss%d: could not " 1892 + "add disk 0.\n", h->ctlr); 1893 + return; 2111 1894 } 2112 1895 2113 1896 /* This function will add and remove logical drives from the Logical ··· 2122 1897 * INPUT 2123 1898 * h = The controller to perform the operations on 2124 1899 */ 2125 - static int rebuild_lun_table(ctlr_info_t *h, int first_time) 1900 + static int rebuild_lun_table(ctlr_info_t *h, int first_time, 1901 + int via_ioctl) 2126 1902 { 2127 1903 int ctlr = h->ctlr; 2128 1904 int num_luns; ··· 2133 1907 int i; 2134 1908 int drv_found; 2135 1909 int drv_index = 0; 2136 - __u32 lunid = 0; 1910 + unsigned char lunid[8] = CTLR_LUNID; 2137 1911 unsigned long flags; 2138 1912 2139 1913 if (!capable(CAP_SYS_RAWIO)) ··· 2186 1960 drv_found = 0; 2187 1961 2188 1962 /* skip holes in the array from already deleted drives */ 2189 - if (h->drv[i].raid_level == -1) 1963 + if (h->drv[i] == NULL) 2190 1964 continue; 2191 1965 2192 1966 for (j = 0; j < num_luns; j++) { 2193 - memcpy(&lunid, &ld_buff->LUN[j][0], 4); 2194 - lunid = le32_to_cpu(lunid); 2195 - if (h->drv[i].LunID == lunid) { 1967 + memcpy(lunid, &ld_buff->LUN[j][0], sizeof(lunid)); 1968 + if (memcmp(h->drv[i]->LunID, lunid, 1969 + sizeof(lunid)) == 0) { 2196 1970 drv_found = 1; 2197 1971 break; 2198 1972 } ··· 2200 1974 if (!drv_found) { 2201 1975 /* Deregister it from the OS, it's gone. */ 2202 1976 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags); 2203 - h->drv[i].busy_configuring = 1; 1977 + h->drv[i]->busy_configuring = 1; 2204 1978 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); 2205 - return_code = deregister_disk(h, i, 1); 2206 - cciss_destroy_ld_sysfs_entry(&h->drv[i]); 2207 - h->drv[i].busy_configuring = 0; 1979 + return_code = deregister_disk(h, i, 1, via_ioctl); 1980 + if (h->drv[i] != NULL) 1981 + h->drv[i]->busy_configuring = 0; 2208 1982 } 2209 1983 } 2210 1984 ··· 2218 1992 2219 1993 drv_found = 0; 2220 1994 2221 - memcpy(&lunid, &ld_buff->LUN[i][0], 4); 2222 - lunid = le32_to_cpu(lunid); 2223 - 1995 + memcpy(lunid, &ld_buff->LUN[i][0], sizeof(lunid)); 2224 1996 /* Find if the LUN is already in the drive array 2225 1997 * of the driver. If so then update its info 2226 1998 * if not in use. If it does not exist then find 2227 1999 * the first free index and add it. 2228 2000 */ 2229 2001 for (j = 0; j <= h->highest_lun; j++) { 2230 - if (h->drv[j].raid_level != -1 && 2231 - h->drv[j].LunID == lunid) { 2002 + if (h->drv[j] != NULL && 2003 + memcmp(h->drv[j]->LunID, lunid, 2004 + sizeof(h->drv[j]->LunID)) == 0) { 2232 2005 drv_index = j; 2233 2006 drv_found = 1; 2234 2007 break; ··· 2240 2015 if (drv_index == -1) 2241 2016 goto freeret; 2242 2017 } 2243 - cciss_update_drive_info(ctlr, drv_index, first_time); 2018 + cciss_update_drive_info(ctlr, drv_index, first_time, 2019 + via_ioctl); 2244 2020 } /* end for */ 2245 2021 2246 2022 freeret: ··· 2258 2032 goto freeret; 2259 2033 } 2260 2034 2035 + static void cciss_clear_drive_info(drive_info_struct *drive_info) 2036 + { 2037 + /* zero out the disk size info */ 2038 + drive_info->nr_blocks = 0; 2039 + drive_info->block_size = 0; 2040 + drive_info->heads = 0; 2041 + drive_info->sectors = 0; 2042 + drive_info->cylinders = 0; 2043 + drive_info->raid_level = -1; 2044 + memset(drive_info->serial_no, 0, sizeof(drive_info->serial_no)); 2045 + memset(drive_info->model, 0, sizeof(drive_info->model)); 2046 + memset(drive_info->rev, 0, sizeof(drive_info->rev)); 2047 + memset(drive_info->vendor, 0, sizeof(drive_info->vendor)); 2048 + /* 2049 + * don't clear the LUNID though, we need to remember which 2050 + * one this one is. 2051 + */ 2052 + } 2053 + 2261 2054 /* This function will deregister the disk and it's queue from the 2262 2055 * kernel. It must be called with the controller lock held and the 2263 2056 * drv structures busy_configuring flag set. It's parameters are: ··· 2291 2046 * the disk in preparation for re-adding it. In this case 2292 2047 * the highest_lun should be left unchanged and the LunID 2293 2048 * should not be cleared. 2049 + * via_ioctl 2050 + * This indicates whether we've reached this path via ioctl. 2051 + * This affects the maximum usage count allowed for c0d0 to be messed with. 2052 + * If this path is reached via ioctl(), then the max_usage_count will 2053 + * be 1, as the process calling ioctl() has got to have the device open. 2054 + * If we get here via sysfs, then the max usage count will be zero. 2294 2055 */ 2295 2056 static int deregister_disk(ctlr_info_t *h, int drv_index, 2296 - int clear_all) 2057 + int clear_all, int via_ioctl) 2297 2058 { 2298 2059 int i; 2299 2060 struct gendisk *disk; 2300 2061 drive_info_struct *drv; 2062 + int recalculate_highest_lun; 2301 2063 2302 2064 if (!capable(CAP_SYS_RAWIO)) 2303 2065 return -EPERM; 2304 2066 2305 - drv = &h->drv[drv_index]; 2067 + drv = h->drv[drv_index]; 2306 2068 disk = h->gendisk[drv_index]; 2307 2069 2308 2070 /* make sure logical volume is NOT is use */ 2309 2071 if (clear_all || (h->gendisk[0] == disk)) { 2310 - if (drv->usage_count > 1) 2072 + if (drv->usage_count > via_ioctl) 2311 2073 return -EBUSY; 2312 2074 } else if (drv->usage_count > 0) 2313 2075 return -EBUSY; 2076 + 2077 + recalculate_highest_lun = (drv == h->drv[h->highest_lun]); 2314 2078 2315 2079 /* invalidate the devices and deregister the disk. If it is disk 2316 2080 * zero do not deregister it but just zero out it's values. This ··· 2327 2073 */ 2328 2074 if (h->gendisk[0] != disk) { 2329 2075 struct request_queue *q = disk->queue; 2330 - if (disk->flags & GENHD_FL_UP) 2076 + if (disk->flags & GENHD_FL_UP) { 2077 + cciss_destroy_ld_sysfs_entry(h, drv_index, 0); 2331 2078 del_gendisk(disk); 2332 - if (q) { 2333 - blk_cleanup_queue(q); 2334 - /* Set drv->queue to NULL so that we do not try 2335 - * to call blk_start_queue on this queue in the 2336 - * interrupt handler 2337 - */ 2338 - drv->queue = NULL; 2339 2079 } 2080 + if (q) 2081 + blk_cleanup_queue(q); 2340 2082 /* If clear_all is set then we are deleting the logical 2341 2083 * drive, not just refreshing its info. For drives 2342 2084 * other than disk 0 we will call put_disk. We do not ··· 2355 2105 } 2356 2106 } else { 2357 2107 set_capacity(disk, 0); 2108 + cciss_clear_drive_info(drv); 2358 2109 } 2359 2110 2360 2111 --h->num_luns; 2361 - /* zero out the disk size info */ 2362 - drv->nr_blocks = 0; 2363 - drv->block_size = 0; 2364 - drv->heads = 0; 2365 - drv->sectors = 0; 2366 - drv->cylinders = 0; 2367 - drv->raid_level = -1; /* This can be used as a flag variable to 2368 - * indicate that this element of the drive 2369 - * array is free. 2370 - */ 2371 2112 2372 - if (clear_all) { 2373 - /* check to see if it was the last disk */ 2374 - if (drv == h->drv + h->highest_lun) { 2375 - /* if so, find the new hightest lun */ 2376 - int i, newhighest = -1; 2377 - for (i = 0; i <= h->highest_lun; i++) { 2378 - /* if the disk has size > 0, it is available */ 2379 - if (h->drv[i].heads) 2380 - newhighest = i; 2381 - } 2382 - h->highest_lun = newhighest; 2113 + /* if it was the last disk, find the new hightest lun */ 2114 + if (clear_all && recalculate_highest_lun) { 2115 + int i, newhighest = -1; 2116 + for (i = 0; i <= h->highest_lun; i++) { 2117 + /* if the disk has size > 0, it is available */ 2118 + if (h->drv[i] && h->drv[i]->heads) 2119 + newhighest = i; 2383 2120 } 2384 - 2385 - drv->LunID = 0; 2121 + h->highest_lun = newhighest; 2386 2122 } 2387 2123 return 0; 2388 2124 } ··· 2715 2479 } else { /* Get geometry failed */ 2716 2480 printk(KERN_WARNING "cciss: reading geometry failed\n"); 2717 2481 } 2718 - printk(KERN_INFO " heads=%d, sectors=%d, cylinders=%d\n\n", 2719 - drv->heads, drv->sectors, drv->cylinders); 2720 2482 } 2721 2483 2722 2484 static void ··· 2748 2514 *total_size = 0; 2749 2515 *block_size = BLOCK_SIZE; 2750 2516 } 2751 - if (*total_size != 0) 2752 - printk(KERN_INFO " blocks= %llu block_size= %d\n", 2753 - (unsigned long long)*total_size+1, *block_size); 2754 2517 kfree(buf); 2755 2518 } 2756 2519 ··· 2799 2568 InquiryData_struct *inq_buff = NULL; 2800 2569 2801 2570 for (logvol = 0; logvol < CISS_MAX_LUN; logvol++) { 2802 - if (h->drv[logvol].LunID == drv->LunID) { 2571 + if (memcmp(h->drv[logvol]->LunID, drv->LunID, 2572 + sizeof(drv->LunID)) == 0) { 2803 2573 FOUND = 1; 2804 2574 break; 2805 2575 } ··· 3285 3053 /* The first 2 bits are reserved for controller error reporting. */ 3286 3054 c->Header.Tag.lower = (c->cmdindex << 3); 3287 3055 c->Header.Tag.lower |= 0x04; /* flag for direct lookup. */ 3288 - c->Header.LUN.LogDev.VolId = drv->LunID; 3289 - c->Header.LUN.LogDev.Mode = 1; 3056 + memcpy(&c->Header.LUN, drv->LunID, sizeof(drv->LunID)); 3290 3057 c->Request.CDBLen = 10; // 12 byte commands not in FW yet; 3291 3058 c->Request.Type.Type = TYPE_CMD; // It is a command. 3292 3059 c->Request.Type.Attribute = ATTR_SIMPLE; ··· 3463 3232 return IRQ_HANDLED; 3464 3233 } 3465 3234 3235 + /** 3236 + * add_to_scan_list() - add controller to rescan queue 3237 + * @h: Pointer to the controller. 3238 + * 3239 + * Adds the controller to the rescan queue if not already on the queue. 3240 + * 3241 + * returns 1 if added to the queue, 0 if skipped (could be on the 3242 + * queue already, or the controller could be initializing or shutting 3243 + * down). 3244 + **/ 3245 + static int add_to_scan_list(struct ctlr_info *h) 3246 + { 3247 + struct ctlr_info *test_h; 3248 + int found = 0; 3249 + int ret = 0; 3250 + 3251 + if (h->busy_initializing) 3252 + return 0; 3253 + 3254 + if (!mutex_trylock(&h->busy_shutting_down)) 3255 + return 0; 3256 + 3257 + mutex_lock(&scan_mutex); 3258 + list_for_each_entry(test_h, &scan_q, scan_list) { 3259 + if (test_h == h) { 3260 + found = 1; 3261 + break; 3262 + } 3263 + } 3264 + if (!found && !h->busy_scanning) { 3265 + INIT_COMPLETION(h->scan_wait); 3266 + list_add_tail(&h->scan_list, &scan_q); 3267 + ret = 1; 3268 + } 3269 + mutex_unlock(&scan_mutex); 3270 + mutex_unlock(&h->busy_shutting_down); 3271 + 3272 + return ret; 3273 + } 3274 + 3275 + /** 3276 + * remove_from_scan_list() - remove controller from rescan queue 3277 + * @h: Pointer to the controller. 3278 + * 3279 + * Removes the controller from the rescan queue if present. Blocks if 3280 + * the controller is currently conducting a rescan. 3281 + **/ 3282 + static void remove_from_scan_list(struct ctlr_info *h) 3283 + { 3284 + struct ctlr_info *test_h, *tmp_h; 3285 + int scanning = 0; 3286 + 3287 + mutex_lock(&scan_mutex); 3288 + list_for_each_entry_safe(test_h, tmp_h, &scan_q, scan_list) { 3289 + if (test_h == h) { 3290 + list_del(&h->scan_list); 3291 + complete_all(&h->scan_wait); 3292 + mutex_unlock(&scan_mutex); 3293 + return; 3294 + } 3295 + } 3296 + if (&h->busy_scanning) 3297 + scanning = 0; 3298 + mutex_unlock(&scan_mutex); 3299 + 3300 + if (scanning) 3301 + wait_for_completion(&h->scan_wait); 3302 + } 3303 + 3304 + /** 3305 + * scan_thread() - kernel thread used to rescan controllers 3306 + * @data: Ignored. 3307 + * 3308 + * A kernel thread used scan for drive topology changes on 3309 + * controllers. The thread processes only one controller at a time 3310 + * using a queue. Controllers are added to the queue using 3311 + * add_to_scan_list() and removed from the queue either after done 3312 + * processing or using remove_from_scan_list(). 3313 + * 3314 + * returns 0. 3315 + **/ 3466 3316 static int scan_thread(void *data) 3467 3317 { 3468 - ctlr_info_t *h = data; 3469 - int rc; 3470 - DECLARE_COMPLETION_ONSTACK(wait); 3471 - h->rescan_wait = &wait; 3318 + struct ctlr_info *h; 3472 3319 3473 - for (;;) { 3474 - rc = wait_for_completion_interruptible(&wait); 3320 + while (1) { 3321 + set_current_state(TASK_INTERRUPTIBLE); 3322 + schedule(); 3475 3323 if (kthread_should_stop()) 3476 3324 break; 3477 - if (!rc) 3478 - rebuild_lun_table(h, 0); 3325 + 3326 + while (1) { 3327 + mutex_lock(&scan_mutex); 3328 + if (list_empty(&scan_q)) { 3329 + mutex_unlock(&scan_mutex); 3330 + break; 3331 + } 3332 + 3333 + h = list_entry(scan_q.next, 3334 + struct ctlr_info, 3335 + scan_list); 3336 + list_del(&h->scan_list); 3337 + h->busy_scanning = 1; 3338 + mutex_unlock(&scan_mutex); 3339 + 3340 + if (h) { 3341 + rebuild_lun_table(h, 0, 0); 3342 + complete_all(&h->scan_wait); 3343 + mutex_lock(&scan_mutex); 3344 + h->busy_scanning = 0; 3345 + mutex_unlock(&scan_mutex); 3346 + } 3347 + } 3479 3348 } 3349 + 3480 3350 return 0; 3481 3351 } 3482 3352 ··· 3600 3268 case REPORT_LUNS_CHANGED: 3601 3269 printk(KERN_WARNING "cciss%d: report LUN data " 3602 3270 "changed\n", h->ctlr); 3603 - if (h->rescan_wait) 3604 - complete(h->rescan_wait); 3271 + add_to_scan_list(h); 3272 + wake_up_process(cciss_scan_thread); 3605 3273 return 1; 3606 3274 break; 3607 3275 case POWER_OR_RESET: ··· 3821 3489 if (scratchpad == CCISS_FIRMWARE_READY) 3822 3490 break; 3823 3491 set_current_state(TASK_INTERRUPTIBLE); 3824 - schedule_timeout(HZ / 10); /* wait 100ms */ 3492 + schedule_timeout(msecs_to_jiffies(100)); /* wait 100ms */ 3825 3493 } 3826 3494 if (scratchpad != CCISS_FIRMWARE_READY) { 3827 3495 printk(KERN_WARNING "cciss: Board not ready. Timed out.\n"); ··· 3947 3615 break; 3948 3616 /* delay and try again */ 3949 3617 set_current_state(TASK_INTERRUPTIBLE); 3950 - schedule_timeout(10); 3618 + schedule_timeout(msecs_to_jiffies(1)); 3951 3619 } 3952 3620 3953 3621 #ifdef CCISS_DEBUG ··· 4001 3669 return -1; 4002 3670 } 4003 3671 4004 - static void free_hba(int i) 3672 + static void free_hba(int n) 4005 3673 { 4006 - ctlr_info_t *p = hba[i]; 4007 - int n; 3674 + ctlr_info_t *h = hba[n]; 3675 + int i; 4008 3676 4009 - hba[i] = NULL; 4010 - for (n = 0; n < CISS_MAX_LUN; n++) 4011 - put_disk(p->gendisk[n]); 4012 - kfree(p); 3677 + hba[n] = NULL; 3678 + for (i = 0; i < h->highest_lun + 1; i++) 3679 + if (h->gendisk[i] != NULL) 3680 + put_disk(h->gendisk[i]); 3681 + kfree(h); 4013 3682 } 4014 3683 4015 3684 /* Send a message CDB to the firmware. */ ··· 4251 3918 hba[i]->busy_initializing = 1; 4252 3919 INIT_HLIST_HEAD(&hba[i]->cmpQ); 4253 3920 INIT_HLIST_HEAD(&hba[i]->reqQ); 3921 + mutex_init(&hba[i]->busy_shutting_down); 4254 3922 4255 3923 if (cciss_pci_init(hba[i], pdev) != 0) 4256 3924 goto clean0; ··· 4259 3925 sprintf(hba[i]->devname, "cciss%d", i); 4260 3926 hba[i]->ctlr = i; 4261 3927 hba[i]->pdev = pdev; 3928 + 3929 + init_completion(&hba[i]->scan_wait); 4262 3930 4263 3931 if (cciss_create_hba_sysfs_entry(hba[i])) 4264 3932 goto clean0; ··· 4337 4001 hba[i]->num_luns = 0; 4338 4002 hba[i]->highest_lun = -1; 4339 4003 for (j = 0; j < CISS_MAX_LUN; j++) { 4340 - hba[i]->drv[j].raid_level = -1; 4341 - hba[i]->drv[j].queue = NULL; 4004 + hba[i]->drv[j] = NULL; 4342 4005 hba[i]->gendisk[j] = NULL; 4343 4006 } 4344 4007 ··· 4370 4035 4371 4036 hba[i]->cciss_max_sectors = 2048; 4372 4037 4038 + rebuild_lun_table(hba[i], 1, 0); 4373 4039 hba[i]->busy_initializing = 0; 4374 - 4375 - rebuild_lun_table(hba[i], 1); 4376 - hba[i]->cciss_scan_thread = kthread_run(scan_thread, hba[i], 4377 - "cciss_scan%02d", i); 4378 - if (IS_ERR(hba[i]->cciss_scan_thread)) 4379 - return PTR_ERR(hba[i]->cciss_scan_thread); 4380 - 4381 4040 return 1; 4382 4041 4383 4042 clean4: ··· 4392 4063 cciss_destroy_hba_sysfs_entry(hba[i]); 4393 4064 clean0: 4394 4065 hba[i]->busy_initializing = 0; 4395 - /* cleanup any queues that may have been initialized */ 4396 - for (j=0; j <= hba[i]->highest_lun; j++){ 4397 - drive_info_struct *drv = &(hba[i]->drv[j]); 4398 - if (drv->queue) 4399 - blk_cleanup_queue(drv->queue); 4400 - } 4066 + 4401 4067 /* 4402 4068 * Deliberately omit pci_disable_device(): it does something nasty to 4403 4069 * Smart Array controllers that pci_enable_device does not undo ··· 4449 4125 return; 4450 4126 } 4451 4127 4452 - kthread_stop(hba[i]->cciss_scan_thread); 4128 + mutex_lock(&hba[i]->busy_shutting_down); 4453 4129 4130 + remove_from_scan_list(hba[i]); 4454 4131 remove_proc_entry(hba[i]->devname, proc_cciss); 4455 4132 unregister_blkdev(hba[i]->major, hba[i]->devname); 4456 4133 ··· 4461 4136 if (disk) { 4462 4137 struct request_queue *q = disk->queue; 4463 4138 4464 - if (disk->flags & GENHD_FL_UP) 4139 + if (disk->flags & GENHD_FL_UP) { 4140 + cciss_destroy_ld_sysfs_entry(hba[i], j, 1); 4465 4141 del_gendisk(disk); 4142 + } 4466 4143 if (q) 4467 4144 blk_cleanup_queue(q); 4468 4145 } ··· 4497 4170 pci_release_regions(pdev); 4498 4171 pci_set_drvdata(pdev, NULL); 4499 4172 cciss_destroy_hba_sysfs_entry(hba[i]); 4173 + mutex_unlock(&hba[i]->busy_shutting_down); 4500 4174 free_hba(i); 4501 4175 } 4502 4176 ··· 4530 4202 if (err) 4531 4203 return err; 4532 4204 4205 + /* Start the scan thread */ 4206 + cciss_scan_thread = kthread_run(scan_thread, NULL, "cciss_scan"); 4207 + if (IS_ERR(cciss_scan_thread)) { 4208 + err = PTR_ERR(cciss_scan_thread); 4209 + goto err_bus_unregister; 4210 + } 4211 + 4533 4212 /* Register for our PCI devices */ 4534 4213 err = pci_register_driver(&cciss_pci_driver); 4535 4214 if (err) 4536 - goto err_bus_register; 4215 + goto err_thread_stop; 4537 4216 4538 - return 0; 4217 + return err; 4539 4218 4540 - err_bus_register: 4219 + err_thread_stop: 4220 + kthread_stop(cciss_scan_thread); 4221 + err_bus_unregister: 4541 4222 bus_unregister(&cciss_bus_type); 4223 + 4542 4224 return err; 4543 4225 } 4544 4226 ··· 4565 4227 cciss_remove_one(hba[i]->pdev); 4566 4228 } 4567 4229 } 4230 + kthread_stop(cciss_scan_thread); 4568 4231 remove_proc_entry("driver/cciss", NULL); 4569 4232 bus_unregister(&cciss_bus_type); 4570 4233 }
+8 -4
drivers/block/cciss.h
··· 2 2 #define CCISS_H 3 3 4 4 #include <linux/genhd.h> 5 + #include <linux/mutex.h> 5 6 6 7 #include "cciss_cmd.h" 7 8 ··· 30 29 }; 31 30 typedef struct _drive_info_struct 32 31 { 33 - __u32 LunID; 32 + unsigned char LunID[8]; 34 33 int usage_count; 35 34 struct request_queue *queue; 36 35 sector_t nr_blocks; ··· 52 51 char vendor[VENDOR_LEN + 1]; /* SCSI vendor string */ 53 52 char model[MODEL_LEN + 1]; /* SCSI model string */ 54 53 char rev[REV_LEN + 1]; /* SCSI revision string */ 54 + char device_initialized; /* indicates whether dev is initialized */ 55 55 } drive_info_struct; 56 56 57 57 struct ctlr_info ··· 88 86 BYTE cciss_read_capacity; 89 87 90 88 // information about each logical volume 91 - drive_info_struct drv[CISS_MAX_LUN]; 89 + drive_info_struct *drv[CISS_MAX_LUN]; 92 90 93 91 struct access_method access; 94 92 ··· 110 108 int nr_frees; 111 109 int busy_configuring; 112 110 int busy_initializing; 111 + int busy_scanning; 112 + struct mutex busy_shutting_down; 113 113 114 114 /* This element holds the zero based queue number of the last 115 115 * queue to be started. It is used for fairness. ··· 126 122 /* and saved for later processing */ 127 123 #endif 128 124 unsigned char alive; 129 - struct completion *rescan_wait; 130 - struct task_struct *cciss_scan_thread; 125 + struct list_head scan_list; 126 + struct completion scan_wait; 131 127 struct device dev; 132 128 }; 133 129
+30 -33
drivers/block/cpqarray.c
··· 32 32 #include <linux/blkpg.h> 33 33 #include <linux/timer.h> 34 34 #include <linux/proc_fs.h> 35 + #include <linux/seq_file.h> 35 36 #include <linux/init.h> 36 37 #include <linux/hdreg.h> 37 38 #include <linux/spinlock.h> ··· 178 177 179 178 #ifdef CONFIG_PROC_FS 180 179 static void ida_procinit(int i); 181 - static int ida_proc_get_info(char *buffer, char **start, off_t offset, int length, int *eof, void *data); 182 180 #else 183 181 static void ida_procinit(int i) {} 184 182 #endif ··· 206 206 #ifdef CONFIG_PROC_FS 207 207 208 208 static struct proc_dir_entry *proc_array; 209 + static const struct file_operations ida_proc_fops; 209 210 210 211 /* 211 212 * Get us a file in /proc/array that says something about each controller. ··· 219 218 if (!proc_array) return; 220 219 } 221 220 222 - create_proc_read_entry(hba[i]->devname, 0, proc_array, 223 - ida_proc_get_info, hba[i]); 221 + proc_create_data(hba[i]->devname, 0, proc_array, &ida_proc_fops, hba[i]); 224 222 } 225 223 226 224 /* 227 225 * Report information about this controller. 228 226 */ 229 - static int ida_proc_get_info(char *buffer, char **start, off_t offset, int length, int *eof, void *data) 227 + static int ida_proc_show(struct seq_file *m, void *v) 230 228 { 231 - off_t pos = 0; 232 - off_t len = 0; 233 - int size, i, ctlr; 234 - ctlr_info_t *h = (ctlr_info_t*)data; 229 + int i, ctlr; 230 + ctlr_info_t *h = (ctlr_info_t*)m->private; 235 231 drv_info_t *drv; 236 232 #ifdef CPQ_PROC_PRINT_QUEUES 237 233 cmdlist_t *c; ··· 236 238 #endif 237 239 238 240 ctlr = h->ctlr; 239 - size = sprintf(buffer, "%s: Compaq %s Controller\n" 241 + seq_printf(m, "%s: Compaq %s Controller\n" 240 242 " Board ID: 0x%08lx\n" 241 243 " Firmware Revision: %c%c%c%c\n" 242 244 " Controller Sig: 0x%08lx\n" ··· 256 258 h->log_drives, h->phys_drives, 257 259 h->Qdepth, h->maxQsinceinit); 258 260 259 - pos += size; len += size; 260 - 261 - size = sprintf(buffer+len, "Logical Drive Info:\n"); 262 - pos += size; len += size; 261 + seq_puts(m, "Logical Drive Info:\n"); 263 262 264 263 for(i=0; i<h->log_drives; i++) { 265 264 drv = &h->drv[i]; 266 - size = sprintf(buffer+len, "ida/c%dd%d: blksz=%d nr_blks=%d\n", 265 + seq_printf(m, "ida/c%dd%d: blksz=%d nr_blks=%d\n", 267 266 ctlr, i, drv->blk_size, drv->nr_blks); 268 - pos += size; len += size; 269 267 } 270 268 271 269 #ifdef CPQ_PROC_PRINT_QUEUES 272 270 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags); 273 - size = sprintf(buffer+len, "\nCurrent Queues:\n"); 274 - pos += size; len += size; 271 + seq_puts(m, "\nCurrent Queues:\n"); 275 272 276 273 c = h->reqQ; 277 - size = sprintf(buffer+len, "reqQ = %p", c); pos += size; len += size; 274 + seq_printf(m, "reqQ = %p", c); 278 275 if (c) c=c->next; 279 276 while(c && c != h->reqQ) { 280 - size = sprintf(buffer+len, "->%p", c); 281 - pos += size; len += size; 277 + seq_printf(m, "->%p", c); 282 278 c=c->next; 283 279 } 284 280 285 281 c = h->cmpQ; 286 - size = sprintf(buffer+len, "\ncmpQ = %p", c); pos += size; len += size; 282 + seq_printf(m, "\ncmpQ = %p", c); 287 283 if (c) c=c->next; 288 284 while(c && c != h->cmpQ) { 289 - size = sprintf(buffer+len, "->%p", c); 290 - pos += size; len += size; 285 + seq_printf(m, "->%p", c); 291 286 c=c->next; 292 287 } 293 288 294 - size = sprintf(buffer+len, "\n"); pos += size; len += size; 289 + seq_putc(m, '\n'); 295 290 spin_unlock_irqrestore(IDA_LOCK(h->ctlr), flags); 296 291 #endif 297 - size = sprintf(buffer+len, "nr_allocs = %d\nnr_frees = %d\n", 292 + seq_printf(m, "nr_allocs = %d\nnr_frees = %d\n", 298 293 h->nr_allocs, h->nr_frees); 299 - pos += size; len += size; 300 - 301 - *eof = 1; 302 - *start = buffer+offset; 303 - len -= offset; 304 - if (len>length) 305 - len = length; 306 - return len; 294 + return 0; 307 295 } 296 + 297 + static int ida_proc_open(struct inode *inode, struct file *file) 298 + { 299 + return single_open(file, ida_proc_show, PDE(inode)->data); 300 + } 301 + 302 + static const struct file_operations ida_proc_fops = { 303 + .owner = THIS_MODULE, 304 + .open = ida_proc_open, 305 + .read = seq_read, 306 + .llseek = seq_lseek, 307 + .release = single_release, 308 + }; 308 309 #endif /* CONFIG_PROC_FS */ 309 310 310 311 module_param_array(eisa, int, NULL, 0);
+6 -10
drivers/md/dm.c
··· 130 130 /* 131 131 * A list of ios that arrived while we were suspended. 132 132 */ 133 - atomic_t pending[2]; 133 + atomic_t pending; 134 134 wait_queue_head_t wait; 135 135 struct work_struct work; 136 136 struct bio_list deferred; ··· 453 453 { 454 454 struct mapped_device *md = io->md; 455 455 int cpu; 456 - int rw = bio_data_dir(io->bio); 457 456 458 457 io->start_time = jiffies; 459 458 460 459 cpu = part_stat_lock(); 461 460 part_round_stats(cpu, &dm_disk(md)->part0); 462 461 part_stat_unlock(); 463 - dm_disk(md)->part0.in_flight[rw] = atomic_inc_return(&md->pending[rw]); 462 + dm_disk(md)->part0.in_flight = atomic_inc_return(&md->pending); 464 463 } 465 464 466 465 static void end_io_acct(struct dm_io *io) ··· 479 480 * After this is decremented the bio must not be touched if it is 480 481 * a barrier. 481 482 */ 482 - dm_disk(md)->part0.in_flight[rw] = pending = 483 - atomic_dec_return(&md->pending[rw]); 484 - pending += atomic_read(&md->pending[rw^0x1]); 483 + dm_disk(md)->part0.in_flight = pending = 484 + atomic_dec_return(&md->pending); 485 485 486 486 /* nudge anyone waiting on suspend queue */ 487 487 if (!pending) ··· 1785 1787 if (!md->disk) 1786 1788 goto bad_disk; 1787 1789 1788 - atomic_set(&md->pending[0], 0); 1789 - atomic_set(&md->pending[1], 0); 1790 + atomic_set(&md->pending, 0); 1790 1791 init_waitqueue_head(&md->wait); 1791 1792 INIT_WORK(&md->work, dm_wq_work); 1792 1793 init_waitqueue_head(&md->eventq); ··· 2088 2091 break; 2089 2092 } 2090 2093 spin_unlock_irqrestore(q->queue_lock, flags); 2091 - } else if (!atomic_read(&md->pending[0]) && 2092 - !atomic_read(&md->pending[1])) 2094 + } else if (!atomic_read(&md->pending)) 2093 2095 break; 2094 2096 2095 2097 if (interruptible == TASK_INTERRUPTIBLE &&
+5 -14
drivers/mtd/mtd_blkdevs.c
··· 32 32 spinlock_t queue_lock; 33 33 }; 34 34 35 - static int blktrans_discard_request(struct request_queue *q, 36 - struct request *req) 37 - { 38 - req->cmd_type = REQ_TYPE_LINUX_BLOCK; 39 - req->cmd[0] = REQ_LB_OP_DISCARD; 40 - return 0; 41 - } 42 - 43 35 static int do_blktrans_request(struct mtd_blktrans_ops *tr, 44 36 struct mtd_blktrans_dev *dev, 45 37 struct request *req) ··· 44 52 45 53 buf = req->buffer; 46 54 47 - if (req->cmd_type == REQ_TYPE_LINUX_BLOCK && 48 - req->cmd[0] == REQ_LB_OP_DISCARD) 49 - return tr->discard(dev, block, nsect); 50 - 51 55 if (!blk_fs_request(req)) 52 56 return -EIO; 53 57 54 58 if (blk_rq_pos(req) + blk_rq_cur_sectors(req) > 55 59 get_capacity(req->rq_disk)) 56 60 return -EIO; 61 + 62 + if (blk_discard_rq(req)) 63 + return tr->discard(dev, block, nsect); 57 64 58 65 switch(rq_data_dir(req)) { 59 66 case READ: ··· 371 380 tr->blkcore_priv->rq->queuedata = tr; 372 381 blk_queue_logical_block_size(tr->blkcore_priv->rq, tr->blksize); 373 382 if (tr->discard) 374 - blk_queue_set_discard(tr->blkcore_priv->rq, 375 - blktrans_discard_request); 383 + queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, 384 + tr->blkcore_priv->rq); 376 385 377 386 tr->blkshift = ffs(tr->blksize) - 1; 378 387
+1 -1
drivers/staging/dst/dcore.c
··· 102 102 struct dst_node *n = q->queuedata; 103 103 int err = -EIO; 104 104 105 - if (bio_empty_barrier(bio) && !q->prepare_discard_fn) { 105 + if (bio_empty_barrier(bio) && !blk_queue_discard(q)) { 106 106 /* 107 107 * This is a dirty^Wnice hack, but if we complete this 108 108 * operation with -EOPNOTSUPP like intended, XFS
+23 -26
fs/bio.c
··· 249 249 250 250 mempool_free(p, bs->bio_pool); 251 251 } 252 + EXPORT_SYMBOL(bio_free); 252 253 253 254 void bio_init(struct bio *bio) 254 255 { ··· 258 257 bio->bi_comp_cpu = -1; 259 258 atomic_set(&bio->bi_cnt, 1); 260 259 } 260 + EXPORT_SYMBOL(bio_init); 261 261 262 262 /** 263 263 * bio_alloc_bioset - allocate a bio for I/O ··· 313 311 mempool_free(p, bs->bio_pool); 314 312 return NULL; 315 313 } 314 + EXPORT_SYMBOL(bio_alloc_bioset); 316 315 317 316 static void bio_fs_destructor(struct bio *bio) 318 317 { ··· 340 337 341 338 return bio; 342 339 } 340 + EXPORT_SYMBOL(bio_alloc); 343 341 344 342 static void bio_kmalloc_destructor(struct bio *bio) 345 343 { ··· 384 380 385 381 return bio; 386 382 } 383 + EXPORT_SYMBOL(bio_kmalloc); 387 384 388 385 void zero_fill_bio(struct bio *bio) 389 386 { ··· 421 416 bio->bi_destructor(bio); 422 417 } 423 418 } 419 + EXPORT_SYMBOL(bio_put); 424 420 425 421 inline int bio_phys_segments(struct request_queue *q, struct bio *bio) 426 422 { ··· 430 424 431 425 return bio->bi_phys_segments; 432 426 } 427 + EXPORT_SYMBOL(bio_phys_segments); 433 428 434 429 /** 435 430 * __bio_clone - clone a bio ··· 458 451 bio->bi_size = bio_src->bi_size; 459 452 bio->bi_idx = bio_src->bi_idx; 460 453 } 454 + EXPORT_SYMBOL(__bio_clone); 461 455 462 456 /** 463 457 * bio_clone - clone a bio ··· 490 482 491 483 return b; 492 484 } 485 + EXPORT_SYMBOL(bio_clone); 493 486 494 487 /** 495 488 * bio_get_nr_vecs - return approx number of vecs ··· 514 505 515 506 return nr_pages; 516 507 } 508 + EXPORT_SYMBOL(bio_get_nr_vecs); 517 509 518 510 static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page 519 511 *page, unsigned int len, unsigned int offset, ··· 645 635 return __bio_add_page(q, bio, page, len, offset, 646 636 queue_max_hw_sectors(q)); 647 637 } 638 + EXPORT_SYMBOL(bio_add_pc_page); 648 639 649 640 /** 650 641 * bio_add_page - attempt to add page to bio ··· 666 655 struct request_queue *q = bdev_get_queue(bio->bi_bdev); 667 656 return __bio_add_page(q, bio, page, len, offset, queue_max_sectors(q)); 668 657 } 658 + EXPORT_SYMBOL(bio_add_page); 669 659 670 660 struct bio_map_data { 671 661 struct bio_vec *iovecs; ··· 788 776 bio_put(bio); 789 777 return ret; 790 778 } 779 + EXPORT_SYMBOL(bio_uncopy_user); 791 780 792 781 /** 793 782 * bio_copy_user_iov - copy user data to bio ··· 933 920 934 921 return bio_copy_user_iov(q, map_data, &iov, 1, write_to_vm, gfp_mask); 935 922 } 923 + EXPORT_SYMBOL(bio_copy_user); 936 924 937 925 static struct bio *__bio_map_user_iov(struct request_queue *q, 938 926 struct block_device *bdev, ··· 1064 1050 1065 1051 return bio_map_user_iov(q, bdev, &iov, 1, write_to_vm, gfp_mask); 1066 1052 } 1053 + EXPORT_SYMBOL(bio_map_user); 1067 1054 1068 1055 /** 1069 1056 * bio_map_user_iov - map user sg_iovec table into bio ··· 1132 1117 __bio_unmap_user(bio); 1133 1118 bio_put(bio); 1134 1119 } 1120 + EXPORT_SYMBOL(bio_unmap_user); 1135 1121 1136 1122 static void bio_map_kern_endio(struct bio *bio, int err) 1137 1123 { 1138 1124 bio_put(bio); 1139 1125 } 1140 - 1141 1126 1142 1127 static struct bio *__bio_map_kern(struct request_queue *q, void *data, 1143 1128 unsigned int len, gfp_t gfp_mask) ··· 1204 1189 bio_put(bio); 1205 1190 return ERR_PTR(-EINVAL); 1206 1191 } 1192 + EXPORT_SYMBOL(bio_map_kern); 1207 1193 1208 1194 static void bio_copy_kern_endio(struct bio *bio, int err) 1209 1195 { ··· 1266 1250 1267 1251 return bio; 1268 1252 } 1253 + EXPORT_SYMBOL(bio_copy_kern); 1269 1254 1270 1255 /* 1271 1256 * bio_set_pages_dirty() and bio_check_pages_dirty() are support functions ··· 1417 1400 if (bio->bi_end_io) 1418 1401 bio->bi_end_io(bio, error); 1419 1402 } 1403 + EXPORT_SYMBOL(bio_endio); 1420 1404 1421 1405 void bio_pair_release(struct bio_pair *bp) 1422 1406 { ··· 1428 1410 mempool_free(bp, bp->bio2.bi_private); 1429 1411 } 1430 1412 } 1413 + EXPORT_SYMBOL(bio_pair_release); 1431 1414 1432 1415 static void bio_pair_end_1(struct bio *bi, int err) 1433 1416 { ··· 1496 1477 1497 1478 return bp; 1498 1479 } 1480 + EXPORT_SYMBOL(bio_split); 1499 1481 1500 1482 /** 1501 1483 * bio_sector_offset - Find hardware sector offset in bio ··· 1567 1547 1568 1548 kfree(bs); 1569 1549 } 1550 + EXPORT_SYMBOL(bioset_free); 1570 1551 1571 1552 /** 1572 1553 * bioset_create - Create a bio_set ··· 1613 1592 bioset_free(bs); 1614 1593 return NULL; 1615 1594 } 1595 + EXPORT_SYMBOL(bioset_create); 1616 1596 1617 1597 static void __init biovec_init_slabs(void) 1618 1598 { ··· 1658 1636 1659 1637 return 0; 1660 1638 } 1661 - 1662 1639 subsys_initcall(init_bio); 1663 - 1664 - EXPORT_SYMBOL(bio_alloc); 1665 - EXPORT_SYMBOL(bio_kmalloc); 1666 - EXPORT_SYMBOL(bio_put); 1667 - EXPORT_SYMBOL(bio_free); 1668 - EXPORT_SYMBOL(bio_endio); 1669 - EXPORT_SYMBOL(bio_init); 1670 - EXPORT_SYMBOL(__bio_clone); 1671 - EXPORT_SYMBOL(bio_clone); 1672 - EXPORT_SYMBOL(bio_phys_segments); 1673 - EXPORT_SYMBOL(bio_add_page); 1674 - EXPORT_SYMBOL(bio_add_pc_page); 1675 - EXPORT_SYMBOL(bio_get_nr_vecs); 1676 - EXPORT_SYMBOL(bio_map_user); 1677 - EXPORT_SYMBOL(bio_unmap_user); 1678 - EXPORT_SYMBOL(bio_map_kern); 1679 - EXPORT_SYMBOL(bio_copy_kern); 1680 - EXPORT_SYMBOL(bio_pair_release); 1681 - EXPORT_SYMBOL(bio_split); 1682 - EXPORT_SYMBOL(bio_copy_user); 1683 - EXPORT_SYMBOL(bio_uncopy_user); 1684 - EXPORT_SYMBOL(bioset_create); 1685 - EXPORT_SYMBOL(bioset_free); 1686 - EXPORT_SYMBOL(bio_alloc_bioset);
+1 -11
fs/partitions/check.c
··· 248 248 part_stat_read(p, merges[WRITE]), 249 249 (unsigned long long)part_stat_read(p, sectors[WRITE]), 250 250 jiffies_to_msecs(part_stat_read(p, ticks[WRITE])), 251 - part_in_flight(p), 251 + p->in_flight, 252 252 jiffies_to_msecs(part_stat_read(p, io_ticks)), 253 253 jiffies_to_msecs(part_stat_read(p, time_in_queue))); 254 - } 255 - 256 - ssize_t part_inflight_show(struct device *dev, 257 - struct device_attribute *attr, char *buf) 258 - { 259 - struct hd_struct *p = dev_to_part(dev); 260 - 261 - return sprintf(buf, "%8u %8u\n", p->in_flight[0], p->in_flight[1]); 262 254 } 263 255 264 256 #ifdef CONFIG_FAIL_MAKE_REQUEST ··· 281 289 static DEVICE_ATTR(size, S_IRUGO, part_size_show, NULL); 282 290 static DEVICE_ATTR(alignment_offset, S_IRUGO, part_alignment_offset_show, NULL); 283 291 static DEVICE_ATTR(stat, S_IRUGO, part_stat_show, NULL); 284 - static DEVICE_ATTR(inflight, S_IRUGO, part_inflight_show, NULL); 285 292 #ifdef CONFIG_FAIL_MAKE_REQUEST 286 293 static struct device_attribute dev_attr_fail = 287 294 __ATTR(make-it-fail, S_IRUGO|S_IWUSR, part_fail_show, part_fail_store); ··· 292 301 &dev_attr_size.attr, 293 302 &dev_attr_alignment_offset.attr, 294 303 &dev_attr_stat.attr, 295 - &dev_attr_inflight.attr, 296 304 #ifdef CONFIG_FAIL_MAKE_REQUEST 297 305 &dev_attr_fail.attr, 298 306 #endif
+39 -9
include/linux/blkdev.h
··· 82 82 enum { 83 83 REQ_LB_OP_EJECT = 0x40, /* eject request */ 84 84 REQ_LB_OP_FLUSH = 0x41, /* flush request */ 85 - REQ_LB_OP_DISCARD = 0x42, /* discard sectors */ 86 85 }; 87 86 88 87 /* ··· 260 261 typedef int (make_request_fn) (struct request_queue *q, struct bio *bio); 261 262 typedef int (prep_rq_fn) (struct request_queue *, struct request *); 262 263 typedef void (unplug_fn) (struct request_queue *); 263 - typedef int (prepare_discard_fn) (struct request_queue *, struct request *); 264 264 265 265 struct bio_vec; 266 266 struct bvec_merge_data { ··· 311 313 unsigned int alignment_offset; 312 314 unsigned int io_min; 313 315 unsigned int io_opt; 316 + unsigned int max_discard_sectors; 314 317 315 318 unsigned short logical_block_size; 316 319 unsigned short max_hw_segments; ··· 339 340 make_request_fn *make_request_fn; 340 341 prep_rq_fn *prep_rq_fn; 341 342 unplug_fn *unplug_fn; 342 - prepare_discard_fn *prepare_discard_fn; 343 343 merge_bvec_fn *merge_bvec_fn; 344 344 prepare_flush_fn *prepare_flush_fn; 345 345 softirq_done_fn *softirq_done_fn; ··· 458 460 #define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */ 459 461 #define QUEUE_FLAG_IO_STAT 15 /* do IO stats */ 460 462 #define QUEUE_FLAG_CQ 16 /* hardware does queuing */ 463 + #define QUEUE_FLAG_DISCARD 17 /* supports DISCARD */ 461 464 462 465 #define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ 463 466 (1 << QUEUE_FLAG_CLUSTER) | \ ··· 590 591 #define blk_queue_flushing(q) ((q)->ordseq) 591 592 #define blk_queue_stackable(q) \ 592 593 test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags) 594 + #define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags) 593 595 594 596 #define blk_fs_request(rq) ((rq)->cmd_type == REQ_TYPE_FS) 595 597 #define blk_pc_request(rq) ((rq)->cmd_type == REQ_TYPE_BLOCK_PC) ··· 929 929 extern void blk_queue_max_phys_segments(struct request_queue *, unsigned short); 930 930 extern void blk_queue_max_hw_segments(struct request_queue *, unsigned short); 931 931 extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); 932 + extern void blk_queue_max_discard_sectors(struct request_queue *q, 933 + unsigned int max_discard_sectors); 932 934 extern void blk_queue_logical_block_size(struct request_queue *, unsigned short); 933 935 extern void blk_queue_physical_block_size(struct request_queue *, unsigned short); 934 936 extern void blk_queue_alignment_offset(struct request_queue *q, ··· 957 955 extern void blk_queue_dma_alignment(struct request_queue *, int); 958 956 extern void blk_queue_update_dma_alignment(struct request_queue *, int); 959 957 extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *); 960 - extern void blk_queue_set_discard(struct request_queue *, prepare_discard_fn *); 961 958 extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *); 962 959 extern void blk_queue_rq_timeout(struct request_queue *, unsigned int); 963 960 extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); ··· 1081 1080 return q->limits.physical_block_size; 1082 1081 } 1083 1082 1083 + static inline int bdev_physical_block_size(struct block_device *bdev) 1084 + { 1085 + return queue_physical_block_size(bdev_get_queue(bdev)); 1086 + } 1087 + 1084 1088 static inline unsigned int queue_io_min(struct request_queue *q) 1085 1089 { 1086 1090 return q->limits.io_min; 1091 + } 1092 + 1093 + static inline int bdev_io_min(struct block_device *bdev) 1094 + { 1095 + return queue_io_min(bdev_get_queue(bdev)); 1087 1096 } 1088 1097 1089 1098 static inline unsigned int queue_io_opt(struct request_queue *q) ··· 1101 1090 return q->limits.io_opt; 1102 1091 } 1103 1092 1093 + static inline int bdev_io_opt(struct block_device *bdev) 1094 + { 1095 + return queue_io_opt(bdev_get_queue(bdev)); 1096 + } 1097 + 1104 1098 static inline int queue_alignment_offset(struct request_queue *q) 1105 1099 { 1106 - if (q && q->limits.misaligned) 1100 + if (q->limits.misaligned) 1107 1101 return -1; 1108 1102 1109 - if (q && q->limits.alignment_offset) 1110 - return q->limits.alignment_offset; 1111 - 1112 - return 0; 1103 + return q->limits.alignment_offset; 1113 1104 } 1114 1105 1115 1106 static inline int queue_sector_alignment_offset(struct request_queue *q, ··· 1119 1106 { 1120 1107 return ((sector << 9) - q->limits.alignment_offset) 1121 1108 & (q->limits.io_min - 1); 1109 + } 1110 + 1111 + static inline int bdev_alignment_offset(struct block_device *bdev) 1112 + { 1113 + struct request_queue *q = bdev_get_queue(bdev); 1114 + 1115 + if (q->limits.misaligned) 1116 + return -1; 1117 + 1118 + if (bdev != bdev->bd_contains) 1119 + return bdev->bd_part->alignment_offset; 1120 + 1121 + return q->limits.alignment_offset; 1122 1122 } 1123 1123 1124 1124 static inline int queue_dma_alignment(struct request_queue *q) ··· 1172 1146 } 1173 1147 1174 1148 struct work_struct; 1149 + struct delayed_work; 1175 1150 int kblockd_schedule_work(struct request_queue *q, struct work_struct *work); 1151 + int kblockd_schedule_delayed_work(struct request_queue *q, 1152 + struct delayed_work *work, 1153 + unsigned long delay); 1176 1154 1177 1155 #define MODULE_ALIAS_BLOCKDEV(major,minor) \ 1178 1156 MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor))
+2
include/linux/blktrace_api.h
··· 198 198 char __user *arg); 199 199 extern int blk_trace_startstop(struct request_queue *q, int start); 200 200 extern int blk_trace_remove(struct request_queue *q); 201 + extern void blk_trace_remove_sysfs(struct device *dev); 201 202 extern int blk_trace_init_sysfs(struct device *dev); 202 203 203 204 extern struct attribute_group blk_trace_attr_group; ··· 212 211 # define blk_trace_startstop(q, start) (-ENOTTY) 213 212 # define blk_trace_remove(q) (-ENOTTY) 214 213 # define blk_add_trace_msg(q, fmt, ...) do { } while (0) 214 + # define blk_trace_remove_sysfs(dev) do { } while (0) 215 215 static inline int blk_trace_init_sysfs(struct device *dev) 216 216 { 217 217 return 0;
+4
include/linux/fs.h
··· 300 300 #define BLKTRACESTOP _IO(0x12,117) 301 301 #define BLKTRACETEARDOWN _IO(0x12,118) 302 302 #define BLKDISCARD _IO(0x12,119) 303 + #define BLKIOMIN _IO(0x12,120) 304 + #define BLKIOOPT _IO(0x12,121) 305 + #define BLKALIGNOFF _IO(0x12,122) 306 + #define BLKPBSZGET _IO(0x12,123) 303 307 304 308 #define BMAP_IOCTL 1 /* obsolete - kept for compatibility */ 305 309 #define FIBMAP _IO(0x00,1) /* bmap access */
+7 -14
include/linux/genhd.h
··· 98 98 int make_it_fail; 99 99 #endif 100 100 unsigned long stamp; 101 - int in_flight[2]; 101 + int in_flight; 102 102 #ifdef CONFIG_SMP 103 103 struct disk_stats *dkstats; 104 104 #else ··· 322 322 #define part_stat_sub(cpu, gendiskp, field, subnd) \ 323 323 part_stat_add(cpu, gendiskp, field, -subnd) 324 324 325 - static inline void part_inc_in_flight(struct hd_struct *part, int rw) 325 + static inline void part_inc_in_flight(struct hd_struct *part) 326 326 { 327 - part->in_flight[rw]++; 327 + part->in_flight++; 328 328 if (part->partno) 329 - part_to_disk(part)->part0.in_flight[rw]++; 329 + part_to_disk(part)->part0.in_flight++; 330 330 } 331 331 332 - static inline void part_dec_in_flight(struct hd_struct *part, int rw) 332 + static inline void part_dec_in_flight(struct hd_struct *part) 333 333 { 334 - part->in_flight[rw]--; 334 + part->in_flight--; 335 335 if (part->partno) 336 - part_to_disk(part)->part0.in_flight[rw]--; 337 - } 338 - 339 - static inline int part_in_flight(struct hd_struct *part) 340 - { 341 - return part->in_flight[0] + part->in_flight[1]; 336 + part_to_disk(part)->part0.in_flight--; 342 337 } 343 338 344 339 /* block/blk-core.c */ ··· 545 550 extern ssize_t part_size_show(struct device *dev, 546 551 struct device_attribute *attr, char *buf); 547 552 extern ssize_t part_stat_show(struct device *dev, 548 - struct device_attribute *attr, char *buf); 549 - extern ssize_t part_inflight_show(struct device *dev, 550 553 struct device_attribute *attr, char *buf); 551 554 #ifdef CONFIG_FAIL_MAKE_REQUEST 552 555 extern ssize_t part_fail_show(struct device *dev,
+33
include/trace/events/block.h
··· 488 488 (unsigned long long)__entry->old_sector) 489 489 ); 490 490 491 + TRACE_EVENT(block_rq_remap, 492 + 493 + TP_PROTO(struct request_queue *q, struct request *rq, dev_t dev, 494 + sector_t from), 495 + 496 + TP_ARGS(q, rq, dev, from), 497 + 498 + TP_STRUCT__entry( 499 + __field( dev_t, dev ) 500 + __field( sector_t, sector ) 501 + __field( unsigned int, nr_sector ) 502 + __field( dev_t, old_dev ) 503 + __field( sector_t, old_sector ) 504 + __array( char, rwbs, 6 ) 505 + ), 506 + 507 + TP_fast_assign( 508 + __entry->dev = disk_devt(rq->rq_disk); 509 + __entry->sector = blk_rq_pos(rq); 510 + __entry->nr_sector = blk_rq_sectors(rq); 511 + __entry->old_dev = dev; 512 + __entry->old_sector = from; 513 + blk_fill_rwbs_rq(__entry->rwbs, rq); 514 + ), 515 + 516 + TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu", 517 + MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs, 518 + (unsigned long long)__entry->sector, 519 + __entry->nr_sector, 520 + MAJOR(__entry->old_dev), MINOR(__entry->old_dev), 521 + (unsigned long long)__entry->old_sector) 522 + ); 523 + 491 524 #endif /* _TRACE_BLOCK_H */ 492 525 493 526 /* This part must be outside protection */
+39
kernel/trace/blktrace.c
··· 856 856 } 857 857 858 858 /** 859 + * blk_add_trace_rq_remap - Add a trace for a request-remap operation 860 + * @q: queue the io is for 861 + * @rq: the source request 862 + * @dev: target device 863 + * @from: source sector 864 + * 865 + * Description: 866 + * Device mapper remaps request to other devices. 867 + * Add a trace for that action. 868 + * 869 + **/ 870 + static void blk_add_trace_rq_remap(struct request_queue *q, 871 + struct request *rq, dev_t dev, 872 + sector_t from) 873 + { 874 + struct blk_trace *bt = q->blk_trace; 875 + struct blk_io_trace_remap r; 876 + 877 + if (likely(!bt)) 878 + return; 879 + 880 + r.device_from = cpu_to_be32(dev); 881 + r.device_to = cpu_to_be32(disk_devt(rq->rq_disk)); 882 + r.sector_from = cpu_to_be64(from); 883 + 884 + __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq), 885 + rq_data_dir(rq), BLK_TA_REMAP, !!rq->errors, 886 + sizeof(r), &r); 887 + } 888 + 889 + /** 859 890 * blk_add_driver_data - Add binary message with driver-specific data 860 891 * @q: queue the io is for 861 892 * @rq: io request ··· 953 922 WARN_ON(ret); 954 923 ret = register_trace_block_remap(blk_add_trace_remap); 955 924 WARN_ON(ret); 925 + ret = register_trace_block_rq_remap(blk_add_trace_rq_remap); 926 + WARN_ON(ret); 956 927 } 957 928 958 929 static void blk_unregister_tracepoints(void) 959 930 { 931 + unregister_trace_block_rq_remap(blk_add_trace_rq_remap); 960 932 unregister_trace_block_remap(blk_add_trace_remap); 961 933 unregister_trace_block_split(blk_add_trace_split); 962 934 unregister_trace_block_unplug_io(blk_add_trace_unplug_io); ··· 1689 1655 int blk_trace_init_sysfs(struct device *dev) 1690 1656 { 1691 1657 return sysfs_create_group(&dev->kobj, &blk_trace_attr_group); 1658 + } 1659 + 1660 + void blk_trace_remove_sysfs(struct device *dev) 1661 + { 1662 + sysfs_remove_group(&dev->kobj, &blk_trace_attr_group); 1692 1663 } 1693 1664 1694 1665 #endif /* CONFIG_BLK_DEV_IO_TRACE */
+7 -5
mm/swapfile.c
··· 1974 1974 goto bad_swap; 1975 1975 } 1976 1976 1977 - if (blk_queue_nonrot(bdev_get_queue(p->bdev))) { 1978 - p->flags |= SWP_SOLIDSTATE; 1979 - p->cluster_next = 1 + (random32() % p->highest_bit); 1977 + if (p->bdev) { 1978 + if (blk_queue_nonrot(bdev_get_queue(p->bdev))) { 1979 + p->flags |= SWP_SOLIDSTATE; 1980 + p->cluster_next = 1 + (random32() % p->highest_bit); 1981 + } 1982 + if (discard_swap(p) == 0) 1983 + p->flags |= SWP_DISCARDABLE; 1980 1984 } 1981 - if (discard_swap(p) == 0) 1982 - p->flags |= SWP_DISCARDABLE; 1983 1985 1984 1986 mutex_lock(&swapon_mutex); 1985 1987 spin_lock(&swap_lock);