Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.24-rc3 1224 lines 32 kB view raw
1/* 2 * Block OSM 3 * 4 * Copyright (C) 1999-2002 Red Hat Software 5 * 6 * Written by Alan Cox, Building Number Three Ltd 7 * 8 * This program is free software; you can redistribute it and/or modify it 9 * under the terms of the GNU General Public License as published by the 10 * Free Software Foundation; either version 2 of the License, or (at your 11 * option) any later version. 12 * 13 * This program is distributed in the hope that it will be useful, but 14 * WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 16 * General Public License for more details. 17 * 18 * For the purpose of avoiding doubt the preferred form of the work 19 * for making modifications shall be a standards compliant form such 20 * gzipped tar and not one requiring a proprietary or patent encumbered 21 * tool to unpack. 22 * 23 * Fixes/additions: 24 * Steve Ralston: 25 * Multiple device handling error fixes, 26 * Added a queue depth. 27 * Alan Cox: 28 * FC920 has an rmw bug. Dont or in the end marker. 29 * Removed queue walk, fixed for 64bitness. 30 * Rewrote much of the code over time 31 * Added indirect block lists 32 * Handle 64K limits on many controllers 33 * Don't use indirects on the Promise (breaks) 34 * Heavily chop down the queue depths 35 * Deepak Saxena: 36 * Independent queues per IOP 37 * Support for dynamic device creation/deletion 38 * Code cleanup 39 * Support for larger I/Os through merge* functions 40 * (taken from DAC960 driver) 41 * Boji T Kannanthanam: 42 * Set the I2O Block devices to be detected in increasing 43 * order of TIDs during boot. 44 * Search and set the I2O block device that we boot off 45 * from as the first device to be claimed (as /dev/i2o/hda) 46 * Properly attach/detach I2O gendisk structure from the 47 * system gendisk list. The I2O block devices now appear in 48 * /proc/partitions. 49 * Markus Lidel <Markus.Lidel@shadowconnect.com>: 50 * Minor bugfixes for 2.6. 51 */ 52 53#include <linux/module.h> 54#include <linux/i2o.h> 55 56#include <linux/mempool.h> 57 58#include <linux/genhd.h> 59#include <linux/blkdev.h> 60#include <linux/hdreg.h> 61 62#include <scsi/scsi.h> 63 64#include "i2o_block.h" 65 66#define OSM_NAME "block-osm" 67#define OSM_VERSION "1.325" 68#define OSM_DESCRIPTION "I2O Block Device OSM" 69 70static struct i2o_driver i2o_block_driver; 71 72/* global Block OSM request mempool */ 73static struct i2o_block_mempool i2o_blk_req_pool; 74 75/* Block OSM class handling definition */ 76static struct i2o_class_id i2o_block_class_id[] = { 77 {I2O_CLASS_RANDOM_BLOCK_STORAGE}, 78 {I2O_CLASS_END} 79}; 80 81/** 82 * i2o_block_device_free - free the memory of the I2O Block device 83 * @dev: I2O Block device, which should be cleaned up 84 * 85 * Frees the request queue, gendisk and the i2o_block_device structure. 86 */ 87static void i2o_block_device_free(struct i2o_block_device *dev) 88{ 89 blk_cleanup_queue(dev->gd->queue); 90 91 put_disk(dev->gd); 92 93 kfree(dev); 94}; 95 96/** 97 * i2o_block_remove - remove the I2O Block device from the system again 98 * @dev: I2O Block device which should be removed 99 * 100 * Remove gendisk from system and free all allocated memory. 101 * 102 * Always returns 0. 103 */ 104static int i2o_block_remove(struct device *dev) 105{ 106 struct i2o_device *i2o_dev = to_i2o_device(dev); 107 struct i2o_block_device *i2o_blk_dev = dev_get_drvdata(dev); 108 109 osm_info("device removed (TID: %03x): %s\n", i2o_dev->lct_data.tid, 110 i2o_blk_dev->gd->disk_name); 111 112 i2o_event_register(i2o_dev, &i2o_block_driver, 0, 0); 113 114 del_gendisk(i2o_blk_dev->gd); 115 116 dev_set_drvdata(dev, NULL); 117 118 i2o_device_claim_release(i2o_dev); 119 120 i2o_block_device_free(i2o_blk_dev); 121 122 return 0; 123}; 124 125/** 126 * i2o_block_device flush - Flush all dirty data of I2O device dev 127 * @dev: I2O device which should be flushed 128 * 129 * Flushes all dirty data on device dev. 130 * 131 * Returns 0 on success or negative error code on failure. 132 */ 133static int i2o_block_device_flush(struct i2o_device *dev) 134{ 135 struct i2o_message *msg; 136 137 msg = i2o_msg_get_wait(dev->iop, I2O_TIMEOUT_MESSAGE_GET); 138 if (IS_ERR(msg)) 139 return PTR_ERR(msg); 140 141 msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0); 142 msg->u.head[1] = 143 cpu_to_le32(I2O_CMD_BLOCK_CFLUSH << 24 | HOST_TID << 12 | dev-> 144 lct_data.tid); 145 msg->body[0] = cpu_to_le32(60 << 16); 146 osm_debug("Flushing...\n"); 147 148 return i2o_msg_post_wait(dev->iop, msg, 60); 149}; 150 151/** 152 * i2o_block_device_mount - Mount (load) the media of device dev 153 * @dev: I2O device which should receive the mount request 154 * @media_id: Media Identifier 155 * 156 * Load a media into drive. Identifier should be set to -1, because the 157 * spec does not support any other value. 158 * 159 * Returns 0 on success or negative error code on failure. 160 */ 161static int i2o_block_device_mount(struct i2o_device *dev, u32 media_id) 162{ 163 struct i2o_message *msg; 164 165 msg = i2o_msg_get_wait(dev->iop, I2O_TIMEOUT_MESSAGE_GET); 166 if (IS_ERR(msg)) 167 return PTR_ERR(msg); 168 169 msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0); 170 msg->u.head[1] = 171 cpu_to_le32(I2O_CMD_BLOCK_MMOUNT << 24 | HOST_TID << 12 | dev-> 172 lct_data.tid); 173 msg->body[0] = cpu_to_le32(-1); 174 msg->body[1] = cpu_to_le32(0x00000000); 175 osm_debug("Mounting...\n"); 176 177 return i2o_msg_post_wait(dev->iop, msg, 2); 178}; 179 180/** 181 * i2o_block_device_lock - Locks the media of device dev 182 * @dev: I2O device which should receive the lock request 183 * @media_id: Media Identifier 184 * 185 * Lock media of device dev to prevent removal. The media identifier 186 * should be set to -1, because the spec does not support any other value. 187 * 188 * Returns 0 on success or negative error code on failure. 189 */ 190static int i2o_block_device_lock(struct i2o_device *dev, u32 media_id) 191{ 192 struct i2o_message *msg; 193 194 msg = i2o_msg_get_wait(dev->iop, I2O_TIMEOUT_MESSAGE_GET); 195 if (IS_ERR(msg)) 196 return PTR_ERR(msg); 197 198 msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0); 199 msg->u.head[1] = 200 cpu_to_le32(I2O_CMD_BLOCK_MLOCK << 24 | HOST_TID << 12 | dev-> 201 lct_data.tid); 202 msg->body[0] = cpu_to_le32(-1); 203 osm_debug("Locking...\n"); 204 205 return i2o_msg_post_wait(dev->iop, msg, 2); 206}; 207 208/** 209 * i2o_block_device_unlock - Unlocks the media of device dev 210 * @dev: I2O device which should receive the unlocked request 211 * @media_id: Media Identifier 212 * 213 * Unlocks the media in device dev. The media identifier should be set to 214 * -1, because the spec does not support any other value. 215 * 216 * Returns 0 on success or negative error code on failure. 217 */ 218static int i2o_block_device_unlock(struct i2o_device *dev, u32 media_id) 219{ 220 struct i2o_message *msg; 221 222 msg = i2o_msg_get_wait(dev->iop, I2O_TIMEOUT_MESSAGE_GET); 223 if (IS_ERR(msg)) 224 return PTR_ERR(msg); 225 226 msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0); 227 msg->u.head[1] = 228 cpu_to_le32(I2O_CMD_BLOCK_MUNLOCK << 24 | HOST_TID << 12 | dev-> 229 lct_data.tid); 230 msg->body[0] = cpu_to_le32(media_id); 231 osm_debug("Unlocking...\n"); 232 233 return i2o_msg_post_wait(dev->iop, msg, 2); 234}; 235 236/** 237 * i2o_block_device_power - Power management for device dev 238 * @dev: I2O device which should receive the power management request 239 * @op: Operation to send 240 * 241 * Send a power management request to the device dev. 242 * 243 * Returns 0 on success or negative error code on failure. 244 */ 245static int i2o_block_device_power(struct i2o_block_device *dev, u8 op) 246{ 247 struct i2o_device *i2o_dev = dev->i2o_dev; 248 struct i2o_controller *c = i2o_dev->iop; 249 struct i2o_message *msg; 250 int rc; 251 252 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET); 253 if (IS_ERR(msg)) 254 return PTR_ERR(msg); 255 256 msg->u.head[0] = cpu_to_le32(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0); 257 msg->u.head[1] = 258 cpu_to_le32(I2O_CMD_BLOCK_POWER << 24 | HOST_TID << 12 | i2o_dev-> 259 lct_data.tid); 260 msg->body[0] = cpu_to_le32(op << 24); 261 osm_debug("Power...\n"); 262 263 rc = i2o_msg_post_wait(c, msg, 60); 264 if (!rc) 265 dev->power = op; 266 267 return rc; 268}; 269 270/** 271 * i2o_block_request_alloc - Allocate an I2O block request struct 272 * 273 * Allocates an I2O block request struct and initialize the list. 274 * 275 * Returns a i2o_block_request pointer on success or negative error code 276 * on failure. 277 */ 278static inline struct i2o_block_request *i2o_block_request_alloc(void) 279{ 280 struct i2o_block_request *ireq; 281 282 ireq = mempool_alloc(i2o_blk_req_pool.pool, GFP_ATOMIC); 283 if (!ireq) 284 return ERR_PTR(-ENOMEM); 285 286 INIT_LIST_HEAD(&ireq->queue); 287 sg_init_table(ireq->sg_table, I2O_MAX_PHYS_SEGMENTS); 288 289 return ireq; 290}; 291 292/** 293 * i2o_block_request_free - Frees a I2O block request 294 * @ireq: I2O block request which should be freed 295 * 296 * Frees the allocated memory (give it back to the request mempool). 297 */ 298static inline void i2o_block_request_free(struct i2o_block_request *ireq) 299{ 300 mempool_free(ireq, i2o_blk_req_pool.pool); 301}; 302 303/** 304 * i2o_block_sglist_alloc - Allocate the SG list and map it 305 * @c: I2O controller to which the request belongs 306 * @ireq: I2O block request 307 * @mptr: message body pointer 308 * 309 * Builds the SG list and map it to be accessable by the controller. 310 * 311 * Returns 0 on failure or 1 on success. 312 */ 313static inline int i2o_block_sglist_alloc(struct i2o_controller *c, 314 struct i2o_block_request *ireq, 315 u32 ** mptr) 316{ 317 int nents; 318 enum dma_data_direction direction; 319 320 ireq->dev = &c->pdev->dev; 321 nents = blk_rq_map_sg(ireq->req->q, ireq->req, ireq->sg_table); 322 323 if (rq_data_dir(ireq->req) == READ) 324 direction = PCI_DMA_FROMDEVICE; 325 else 326 direction = PCI_DMA_TODEVICE; 327 328 ireq->sg_nents = nents; 329 330 return i2o_dma_map_sg(c, ireq->sg_table, nents, direction, mptr); 331}; 332 333/** 334 * i2o_block_sglist_free - Frees the SG list 335 * @ireq: I2O block request from which the SG should be freed 336 * 337 * Frees the SG list from the I2O block request. 338 */ 339static inline void i2o_block_sglist_free(struct i2o_block_request *ireq) 340{ 341 enum dma_data_direction direction; 342 343 if (rq_data_dir(ireq->req) == READ) 344 direction = PCI_DMA_FROMDEVICE; 345 else 346 direction = PCI_DMA_TODEVICE; 347 348 dma_unmap_sg(ireq->dev, ireq->sg_table, ireq->sg_nents, direction); 349}; 350 351/** 352 * i2o_block_prep_req_fn - Allocates I2O block device specific struct 353 * @q: request queue for the request 354 * @req: the request to prepare 355 * 356 * Allocate the necessary i2o_block_request struct and connect it to 357 * the request. This is needed that we not loose the SG list later on. 358 * 359 * Returns BLKPREP_OK on success or BLKPREP_DEFER on failure. 360 */ 361static int i2o_block_prep_req_fn(struct request_queue *q, struct request *req) 362{ 363 struct i2o_block_device *i2o_blk_dev = q->queuedata; 364 struct i2o_block_request *ireq; 365 366 if (unlikely(!i2o_blk_dev)) { 367 osm_err("block device already removed\n"); 368 return BLKPREP_KILL; 369 } 370 371 /* connect the i2o_block_request to the request */ 372 if (!req->special) { 373 ireq = i2o_block_request_alloc(); 374 if (unlikely(IS_ERR(ireq))) { 375 osm_debug("unable to allocate i2o_block_request!\n"); 376 return BLKPREP_DEFER; 377 } 378 379 ireq->i2o_blk_dev = i2o_blk_dev; 380 req->special = ireq; 381 ireq->req = req; 382 } 383 /* do not come back here */ 384 req->cmd_flags |= REQ_DONTPREP; 385 386 return BLKPREP_OK; 387}; 388 389/** 390 * i2o_block_delayed_request_fn - delayed request queue function 391 * @work: the delayed request with the queue to start 392 * 393 * If the request queue is stopped for a disk, and there is no open 394 * request, a new event is created, which calls this function to start 395 * the queue after I2O_BLOCK_REQUEST_TIME. Otherwise the queue will never 396 * be started again. 397 */ 398static void i2o_block_delayed_request_fn(struct work_struct *work) 399{ 400 struct i2o_block_delayed_request *dreq = 401 container_of(work, struct i2o_block_delayed_request, 402 work.work); 403 struct request_queue *q = dreq->queue; 404 unsigned long flags; 405 406 spin_lock_irqsave(q->queue_lock, flags); 407 blk_start_queue(q); 408 spin_unlock_irqrestore(q->queue_lock, flags); 409 kfree(dreq); 410}; 411 412/** 413 * i2o_block_end_request - Post-processing of completed commands 414 * @req: request which should be completed 415 * @uptodate: 1 for success, 0 for I/O error, < 0 for specific error 416 * @nr_bytes: number of bytes to complete 417 * 418 * Mark the request as complete. The lock must not be held when entering. 419 * 420 */ 421static void i2o_block_end_request(struct request *req, int uptodate, 422 int nr_bytes) 423{ 424 struct i2o_block_request *ireq = req->special; 425 struct i2o_block_device *dev = ireq->i2o_blk_dev; 426 struct request_queue *q = req->q; 427 unsigned long flags; 428 429 if (end_that_request_chunk(req, uptodate, nr_bytes)) { 430 int leftover = (req->hard_nr_sectors << KERNEL_SECTOR_SHIFT); 431 432 if (blk_pc_request(req)) 433 leftover = req->data_len; 434 435 if (end_io_error(uptodate)) 436 end_that_request_chunk(req, 0, leftover); 437 } 438 439 add_disk_randomness(req->rq_disk); 440 441 spin_lock_irqsave(q->queue_lock, flags); 442 443 end_that_request_last(req, uptodate); 444 445 if (likely(dev)) { 446 dev->open_queue_depth--; 447 list_del(&ireq->queue); 448 } 449 450 blk_start_queue(q); 451 452 spin_unlock_irqrestore(q->queue_lock, flags); 453 454 i2o_block_sglist_free(ireq); 455 i2o_block_request_free(ireq); 456}; 457 458/** 459 * i2o_block_reply - Block OSM reply handler. 460 * @c: I2O controller from which the message arrives 461 * @m: message id of reply 462 * @msg: the actual I2O message reply 463 * 464 * This function gets all the message replies. 465 * 466 */ 467static int i2o_block_reply(struct i2o_controller *c, u32 m, 468 struct i2o_message *msg) 469{ 470 struct request *req; 471 int uptodate = 1; 472 473 req = i2o_cntxt_list_get(c, le32_to_cpu(msg->u.s.tcntxt)); 474 if (unlikely(!req)) { 475 osm_err("NULL reply received!\n"); 476 return -1; 477 } 478 479 /* 480 * Lets see what is cooking. We stuffed the 481 * request in the context. 482 */ 483 484 if ((le32_to_cpu(msg->body[0]) >> 24) != 0) { 485 u32 status = le32_to_cpu(msg->body[0]); 486 /* 487 * Device not ready means two things. One is that the 488 * the thing went offline (but not a removal media) 489 * 490 * The second is that you have a SuperTrak 100 and the 491 * firmware got constipated. Unlike standard i2o card 492 * setups the supertrak returns an error rather than 493 * blocking for the timeout in these cases. 494 * 495 * Don't stick a supertrak100 into cache aggressive modes 496 */ 497 498 osm_err("TID %03x error status: 0x%02x, detailed status: " 499 "0x%04x\n", (le32_to_cpu(msg->u.head[1]) >> 12 & 0xfff), 500 status >> 24, status & 0xffff); 501 502 req->errors++; 503 504 uptodate = 0; 505 } 506 507 i2o_block_end_request(req, uptodate, le32_to_cpu(msg->body[1])); 508 509 return 1; 510}; 511 512static void i2o_block_event(struct work_struct *work) 513{ 514 struct i2o_event *evt = container_of(work, struct i2o_event, work); 515 osm_debug("event received\n"); 516 kfree(evt); 517}; 518 519/* 520 * SCSI-CAM for ioctl geometry mapping 521 * Duplicated with SCSI - this should be moved into somewhere common 522 * perhaps genhd ? 523 * 524 * LBA -> CHS mapping table taken from: 525 * 526 * "Incorporating the I2O Architecture into BIOS for Intel Architecture 527 * Platforms" 528 * 529 * This is an I2O document that is only available to I2O members, 530 * not developers. 531 * 532 * From my understanding, this is how all the I2O cards do this 533 * 534 * Disk Size | Sectors | Heads | Cylinders 535 * ---------------+---------+-------+------------------- 536 * 1 < X <= 528M | 63 | 16 | X/(63 * 16 * 512) 537 * 528M < X <= 1G | 63 | 32 | X/(63 * 32 * 512) 538 * 1 < X <528M | 63 | 16 | X/(63 * 16 * 512) 539 * 1 < X <528M | 63 | 16 | X/(63 * 16 * 512) 540 * 541 */ 542#define BLOCK_SIZE_528M 1081344 543#define BLOCK_SIZE_1G 2097152 544#define BLOCK_SIZE_21G 4403200 545#define BLOCK_SIZE_42G 8806400 546#define BLOCK_SIZE_84G 17612800 547 548static void i2o_block_biosparam(unsigned long capacity, unsigned short *cyls, 549 unsigned char *hds, unsigned char *secs) 550{ 551 unsigned long heads, sectors, cylinders; 552 553 sectors = 63L; /* Maximize sectors per track */ 554 if (capacity <= BLOCK_SIZE_528M) 555 heads = 16; 556 else if (capacity <= BLOCK_SIZE_1G) 557 heads = 32; 558 else if (capacity <= BLOCK_SIZE_21G) 559 heads = 64; 560 else if (capacity <= BLOCK_SIZE_42G) 561 heads = 128; 562 else 563 heads = 255; 564 565 cylinders = (unsigned long)capacity / (heads * sectors); 566 567 *cyls = (unsigned short)cylinders; /* Stuff return values */ 568 *secs = (unsigned char)sectors; 569 *hds = (unsigned char)heads; 570} 571 572/** 573 * i2o_block_open - Open the block device 574 * @inode: inode for block device being opened 575 * @file: file to open 576 * 577 * Power up the device, mount and lock the media. This function is called, 578 * if the block device is opened for access. 579 * 580 * Returns 0 on success or negative error code on failure. 581 */ 582static int i2o_block_open(struct inode *inode, struct file *file) 583{ 584 struct i2o_block_device *dev = inode->i_bdev->bd_disk->private_data; 585 586 if (!dev->i2o_dev) 587 return -ENODEV; 588 589 if (dev->power > 0x1f) 590 i2o_block_device_power(dev, 0x02); 591 592 i2o_block_device_mount(dev->i2o_dev, -1); 593 594 i2o_block_device_lock(dev->i2o_dev, -1); 595 596 osm_debug("Ready.\n"); 597 598 return 0; 599}; 600 601/** 602 * i2o_block_release - Release the I2O block device 603 * @inode: inode for block device being released 604 * @file: file to close 605 * 606 * Unlock and unmount the media, and power down the device. Gets called if 607 * the block device is closed. 608 * 609 * Returns 0 on success or negative error code on failure. 610 */ 611static int i2o_block_release(struct inode *inode, struct file *file) 612{ 613 struct gendisk *disk = inode->i_bdev->bd_disk; 614 struct i2o_block_device *dev = disk->private_data; 615 u8 operation; 616 617 /* 618 * This is to deail with the case of an application 619 * opening a device and then the device dissapears while 620 * it's in use, and then the application tries to release 621 * it. ex: Unmounting a deleted RAID volume at reboot. 622 * If we send messages, it will just cause FAILs since 623 * the TID no longer exists. 624 */ 625 if (!dev->i2o_dev) 626 return 0; 627 628 i2o_block_device_flush(dev->i2o_dev); 629 630 i2o_block_device_unlock(dev->i2o_dev, -1); 631 632 if (dev->flags & (1 << 3 | 1 << 4)) /* Removable */ 633 operation = 0x21; 634 else 635 operation = 0x24; 636 637 i2o_block_device_power(dev, operation); 638 639 return 0; 640} 641 642static int i2o_block_getgeo(struct block_device *bdev, struct hd_geometry *geo) 643{ 644 i2o_block_biosparam(get_capacity(bdev->bd_disk), 645 &geo->cylinders, &geo->heads, &geo->sectors); 646 return 0; 647} 648 649/** 650 * i2o_block_ioctl - Issue device specific ioctl calls. 651 * @inode: inode for block device ioctl 652 * @file: file for ioctl 653 * @cmd: ioctl command 654 * @arg: arg 655 * 656 * Handles ioctl request for the block device. 657 * 658 * Return 0 on success or negative error on failure. 659 */ 660static int i2o_block_ioctl(struct inode *inode, struct file *file, 661 unsigned int cmd, unsigned long arg) 662{ 663 struct gendisk *disk = inode->i_bdev->bd_disk; 664 struct i2o_block_device *dev = disk->private_data; 665 666 /* Anyone capable of this syscall can do *real bad* things */ 667 668 if (!capable(CAP_SYS_ADMIN)) 669 return -EPERM; 670 671 switch (cmd) { 672 case BLKI2OGRSTRAT: 673 return put_user(dev->rcache, (int __user *)arg); 674 case BLKI2OGWSTRAT: 675 return put_user(dev->wcache, (int __user *)arg); 676 case BLKI2OSRSTRAT: 677 if (arg < 0 || arg > CACHE_SMARTFETCH) 678 return -EINVAL; 679 dev->rcache = arg; 680 break; 681 case BLKI2OSWSTRAT: 682 if (arg != 0 683 && (arg < CACHE_WRITETHROUGH || arg > CACHE_SMARTBACK)) 684 return -EINVAL; 685 dev->wcache = arg; 686 break; 687 } 688 return -ENOTTY; 689}; 690 691/** 692 * i2o_block_media_changed - Have we seen a media change? 693 * @disk: gendisk which should be verified 694 * 695 * Verifies if the media has changed. 696 * 697 * Returns 1 if the media was changed or 0 otherwise. 698 */ 699static int i2o_block_media_changed(struct gendisk *disk) 700{ 701 struct i2o_block_device *p = disk->private_data; 702 703 if (p->media_change_flag) { 704 p->media_change_flag = 0; 705 return 1; 706 } 707 return 0; 708} 709 710/** 711 * i2o_block_transfer - Transfer a request to/from the I2O controller 712 * @req: the request which should be transfered 713 * 714 * This function converts the request into a I2O message. The necessary 715 * DMA buffers are allocated and after everything is setup post the message 716 * to the I2O controller. No cleanup is done by this function. It is done 717 * on the interrupt side when the reply arrives. 718 * 719 * Return 0 on success or negative error code on failure. 720 */ 721static int i2o_block_transfer(struct request *req) 722{ 723 struct i2o_block_device *dev = req->rq_disk->private_data; 724 struct i2o_controller *c; 725 u32 tid = dev->i2o_dev->lct_data.tid; 726 struct i2o_message *msg; 727 u32 *mptr; 728 struct i2o_block_request *ireq = req->special; 729 u32 tcntxt; 730 u32 sgl_offset = SGL_OFFSET_8; 731 u32 ctl_flags = 0x00000000; 732 int rc; 733 u32 cmd; 734 735 if (unlikely(!dev->i2o_dev)) { 736 osm_err("transfer to removed drive\n"); 737 rc = -ENODEV; 738 goto exit; 739 } 740 741 c = dev->i2o_dev->iop; 742 743 msg = i2o_msg_get(c); 744 if (IS_ERR(msg)) { 745 rc = PTR_ERR(msg); 746 goto exit; 747 } 748 749 tcntxt = i2o_cntxt_list_add(c, req); 750 if (!tcntxt) { 751 rc = -ENOMEM; 752 goto nop_msg; 753 } 754 755 msg->u.s.icntxt = cpu_to_le32(i2o_block_driver.context); 756 msg->u.s.tcntxt = cpu_to_le32(tcntxt); 757 758 mptr = &msg->body[0]; 759 760 if (rq_data_dir(req) == READ) { 761 cmd = I2O_CMD_BLOCK_READ << 24; 762 763 switch (dev->rcache) { 764 case CACHE_PREFETCH: 765 ctl_flags = 0x201F0008; 766 break; 767 768 case CACHE_SMARTFETCH: 769 if (req->nr_sectors > 16) 770 ctl_flags = 0x201F0008; 771 else 772 ctl_flags = 0x001F0000; 773 break; 774 775 default: 776 break; 777 } 778 } else { 779 cmd = I2O_CMD_BLOCK_WRITE << 24; 780 781 switch (dev->wcache) { 782 case CACHE_WRITETHROUGH: 783 ctl_flags = 0x001F0008; 784 break; 785 case CACHE_WRITEBACK: 786 ctl_flags = 0x001F0010; 787 break; 788 case CACHE_SMARTBACK: 789 if (req->nr_sectors > 16) 790 ctl_flags = 0x001F0004; 791 else 792 ctl_flags = 0x001F0010; 793 break; 794 case CACHE_SMARTTHROUGH: 795 if (req->nr_sectors > 16) 796 ctl_flags = 0x001F0004; 797 else 798 ctl_flags = 0x001F0010; 799 default: 800 break; 801 } 802 } 803 804#ifdef CONFIG_I2O_EXT_ADAPTEC 805 if (c->adaptec) { 806 u8 cmd[10]; 807 u32 scsi_flags; 808 u16 hwsec = queue_hardsect_size(req->q) >> KERNEL_SECTOR_SHIFT; 809 810 memset(cmd, 0, 10); 811 812 sgl_offset = SGL_OFFSET_12; 813 814 msg->u.head[1] = 815 cpu_to_le32(I2O_CMD_PRIVATE << 24 | HOST_TID << 12 | tid); 816 817 *mptr++ = cpu_to_le32(I2O_VENDOR_DPT << 16 | I2O_CMD_SCSI_EXEC); 818 *mptr++ = cpu_to_le32(tid); 819 820 /* 821 * ENABLE_DISCONNECT 822 * SIMPLE_TAG 823 * RETURN_SENSE_DATA_IN_REPLY_MESSAGE_FRAME 824 */ 825 if (rq_data_dir(req) == READ) { 826 cmd[0] = READ_10; 827 scsi_flags = 0x60a0000a; 828 } else { 829 cmd[0] = WRITE_10; 830 scsi_flags = 0xa0a0000a; 831 } 832 833 *mptr++ = cpu_to_le32(scsi_flags); 834 835 *((u32 *) & cmd[2]) = cpu_to_be32(req->sector * hwsec); 836 *((u16 *) & cmd[7]) = cpu_to_be16(req->nr_sectors * hwsec); 837 838 memcpy(mptr, cmd, 10); 839 mptr += 4; 840 *mptr++ = cpu_to_le32(req->nr_sectors << KERNEL_SECTOR_SHIFT); 841 } else 842#endif 843 { 844 msg->u.head[1] = cpu_to_le32(cmd | HOST_TID << 12 | tid); 845 *mptr++ = cpu_to_le32(ctl_flags); 846 *mptr++ = cpu_to_le32(req->nr_sectors << KERNEL_SECTOR_SHIFT); 847 *mptr++ = 848 cpu_to_le32((u32) (req->sector << KERNEL_SECTOR_SHIFT)); 849 *mptr++ = 850 cpu_to_le32(req->sector >> (32 - KERNEL_SECTOR_SHIFT)); 851 } 852 853 if (!i2o_block_sglist_alloc(c, ireq, &mptr)) { 854 rc = -ENOMEM; 855 goto context_remove; 856 } 857 858 msg->u.head[0] = 859 cpu_to_le32(I2O_MESSAGE_SIZE(mptr - &msg->u.head[0]) | sgl_offset); 860 861 list_add_tail(&ireq->queue, &dev->open_queue); 862 dev->open_queue_depth++; 863 864 i2o_msg_post(c, msg); 865 866 return 0; 867 868 context_remove: 869 i2o_cntxt_list_remove(c, req); 870 871 nop_msg: 872 i2o_msg_nop(c, msg); 873 874 exit: 875 return rc; 876}; 877 878/** 879 * i2o_block_request_fn - request queue handling function 880 * @q: request queue from which the request could be fetched 881 * 882 * Takes the next request from the queue, transfers it and if no error 883 * occurs dequeue it from the queue. On arrival of the reply the message 884 * will be processed further. If an error occurs requeue the request. 885 */ 886static void i2o_block_request_fn(struct request_queue *q) 887{ 888 struct request *req; 889 890 while (!blk_queue_plugged(q)) { 891 req = elv_next_request(q); 892 if (!req) 893 break; 894 895 if (blk_fs_request(req)) { 896 struct i2o_block_delayed_request *dreq; 897 struct i2o_block_request *ireq = req->special; 898 unsigned int queue_depth; 899 900 queue_depth = ireq->i2o_blk_dev->open_queue_depth; 901 902 if (queue_depth < I2O_BLOCK_MAX_OPEN_REQUESTS) { 903 if (!i2o_block_transfer(req)) { 904 blkdev_dequeue_request(req); 905 continue; 906 } else 907 osm_info("transfer error\n"); 908 } 909 910 if (queue_depth) 911 break; 912 913 /* stop the queue and retry later */ 914 dreq = kmalloc(sizeof(*dreq), GFP_ATOMIC); 915 if (!dreq) 916 continue; 917 918 dreq->queue = q; 919 INIT_DELAYED_WORK(&dreq->work, 920 i2o_block_delayed_request_fn); 921 922 if (!queue_delayed_work(i2o_block_driver.event_queue, 923 &dreq->work, 924 I2O_BLOCK_RETRY_TIME)) 925 kfree(dreq); 926 else { 927 blk_stop_queue(q); 928 break; 929 } 930 } else 931 end_request(req, 0); 932 } 933}; 934 935/* I2O Block device operations definition */ 936static struct block_device_operations i2o_block_fops = { 937 .owner = THIS_MODULE, 938 .open = i2o_block_open, 939 .release = i2o_block_release, 940 .ioctl = i2o_block_ioctl, 941 .getgeo = i2o_block_getgeo, 942 .media_changed = i2o_block_media_changed 943}; 944 945/** 946 * i2o_block_device_alloc - Allocate memory for a I2O Block device 947 * 948 * Allocate memory for the i2o_block_device struct, gendisk and request 949 * queue and initialize them as far as no additional information is needed. 950 * 951 * Returns a pointer to the allocated I2O Block device on succes or a 952 * negative error code on failure. 953 */ 954static struct i2o_block_device *i2o_block_device_alloc(void) 955{ 956 struct i2o_block_device *dev; 957 struct gendisk *gd; 958 struct request_queue *queue; 959 int rc; 960 961 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 962 if (!dev) { 963 osm_err("Insufficient memory to allocate I2O Block disk.\n"); 964 rc = -ENOMEM; 965 goto exit; 966 } 967 968 INIT_LIST_HEAD(&dev->open_queue); 969 spin_lock_init(&dev->lock); 970 dev->rcache = CACHE_PREFETCH; 971 dev->wcache = CACHE_WRITEBACK; 972 973 /* allocate a gendisk with 16 partitions */ 974 gd = alloc_disk(16); 975 if (!gd) { 976 osm_err("Insufficient memory to allocate gendisk.\n"); 977 rc = -ENOMEM; 978 goto cleanup_dev; 979 } 980 981 /* initialize the request queue */ 982 queue = blk_init_queue(i2o_block_request_fn, &dev->lock); 983 if (!queue) { 984 osm_err("Insufficient memory to allocate request queue.\n"); 985 rc = -ENOMEM; 986 goto cleanup_queue; 987 } 988 989 blk_queue_prep_rq(queue, i2o_block_prep_req_fn); 990 991 gd->major = I2O_MAJOR; 992 gd->queue = queue; 993 gd->fops = &i2o_block_fops; 994 gd->private_data = dev; 995 996 dev->gd = gd; 997 998 return dev; 999 1000 cleanup_queue: 1001 put_disk(gd); 1002 1003 cleanup_dev: 1004 kfree(dev); 1005 1006 exit: 1007 return ERR_PTR(rc); 1008}; 1009 1010/** 1011 * i2o_block_probe - verify if dev is a I2O Block device and install it 1012 * @dev: device to verify if it is a I2O Block device 1013 * 1014 * We only verify if the user_tid of the device is 0xfff and then install 1015 * the device. Otherwise it is used by some other device (e. g. RAID). 1016 * 1017 * Returns 0 on success or negative error code on failure. 1018 */ 1019static int i2o_block_probe(struct device *dev) 1020{ 1021 struct i2o_device *i2o_dev = to_i2o_device(dev); 1022 struct i2o_controller *c = i2o_dev->iop; 1023 struct i2o_block_device *i2o_blk_dev; 1024 struct gendisk *gd; 1025 struct request_queue *queue; 1026 static int unit = 0; 1027 int rc; 1028 u64 size; 1029 u32 blocksize; 1030 u16 body_size = 4; 1031 u16 power; 1032 unsigned short max_sectors; 1033 1034#ifdef CONFIG_I2O_EXT_ADAPTEC 1035 if (c->adaptec) 1036 body_size = 8; 1037#endif 1038 1039 if (c->limit_sectors) 1040 max_sectors = I2O_MAX_SECTORS_LIMITED; 1041 else 1042 max_sectors = I2O_MAX_SECTORS; 1043 1044 /* skip devices which are used by IOP */ 1045 if (i2o_dev->lct_data.user_tid != 0xfff) { 1046 osm_debug("skipping used device %03x\n", i2o_dev->lct_data.tid); 1047 return -ENODEV; 1048 } 1049 1050 if (i2o_device_claim(i2o_dev)) { 1051 osm_warn("Unable to claim device. Installation aborted\n"); 1052 rc = -EFAULT; 1053 goto exit; 1054 } 1055 1056 i2o_blk_dev = i2o_block_device_alloc(); 1057 if (IS_ERR(i2o_blk_dev)) { 1058 osm_err("could not alloc a new I2O block device"); 1059 rc = PTR_ERR(i2o_blk_dev); 1060 goto claim_release; 1061 } 1062 1063 i2o_blk_dev->i2o_dev = i2o_dev; 1064 dev_set_drvdata(dev, i2o_blk_dev); 1065 1066 /* setup gendisk */ 1067 gd = i2o_blk_dev->gd; 1068 gd->first_minor = unit << 4; 1069 sprintf(gd->disk_name, "i2o/hd%c", 'a' + unit); 1070 gd->driverfs_dev = &i2o_dev->device; 1071 1072 /* setup request queue */ 1073 queue = gd->queue; 1074 queue->queuedata = i2o_blk_dev; 1075 1076 blk_queue_max_phys_segments(queue, I2O_MAX_PHYS_SEGMENTS); 1077 blk_queue_max_sectors(queue, max_sectors); 1078 blk_queue_max_hw_segments(queue, i2o_sg_tablesize(c, body_size)); 1079 1080 osm_debug("max sectors = %d\n", queue->max_sectors); 1081 osm_debug("phys segments = %d\n", queue->max_phys_segments); 1082 osm_debug("max hw segments = %d\n", queue->max_hw_segments); 1083 1084 /* 1085 * Ask for the current media data. If that isn't supported 1086 * then we ask for the device capacity data 1087 */ 1088 if (!i2o_parm_field_get(i2o_dev, 0x0004, 1, &blocksize, 4) || 1089 !i2o_parm_field_get(i2o_dev, 0x0000, 3, &blocksize, 4)) { 1090 blk_queue_hardsect_size(queue, le32_to_cpu(blocksize)); 1091 } else 1092 osm_warn("unable to get blocksize of %s\n", gd->disk_name); 1093 1094 if (!i2o_parm_field_get(i2o_dev, 0x0004, 0, &size, 8) || 1095 !i2o_parm_field_get(i2o_dev, 0x0000, 4, &size, 8)) { 1096 set_capacity(gd, le64_to_cpu(size) >> KERNEL_SECTOR_SHIFT); 1097 } else 1098 osm_warn("could not get size of %s\n", gd->disk_name); 1099 1100 if (!i2o_parm_field_get(i2o_dev, 0x0000, 2, &power, 2)) 1101 i2o_blk_dev->power = power; 1102 1103 i2o_event_register(i2o_dev, &i2o_block_driver, 0, 0xffffffff); 1104 1105 add_disk(gd); 1106 1107 unit++; 1108 1109 osm_info("device added (TID: %03x): %s\n", i2o_dev->lct_data.tid, 1110 i2o_blk_dev->gd->disk_name); 1111 1112 return 0; 1113 1114 claim_release: 1115 i2o_device_claim_release(i2o_dev); 1116 1117 exit: 1118 return rc; 1119}; 1120 1121/* Block OSM driver struct */ 1122static struct i2o_driver i2o_block_driver = { 1123 .name = OSM_NAME, 1124 .event = i2o_block_event, 1125 .reply = i2o_block_reply, 1126 .classes = i2o_block_class_id, 1127 .driver = { 1128 .probe = i2o_block_probe, 1129 .remove = i2o_block_remove, 1130 }, 1131}; 1132 1133/** 1134 * i2o_block_init - Block OSM initialization function 1135 * 1136 * Allocate the slab and mempool for request structs, registers i2o_block 1137 * block device and finally register the Block OSM in the I2O core. 1138 * 1139 * Returns 0 on success or negative error code on failure. 1140 */ 1141static int __init i2o_block_init(void) 1142{ 1143 int rc; 1144 int size; 1145 1146 printk(KERN_INFO OSM_DESCRIPTION " v" OSM_VERSION "\n"); 1147 1148 /* Allocate request mempool and slab */ 1149 size = sizeof(struct i2o_block_request); 1150 i2o_blk_req_pool.slab = kmem_cache_create("i2o_block_req", size, 0, 1151 SLAB_HWCACHE_ALIGN, NULL); 1152 if (!i2o_blk_req_pool.slab) { 1153 osm_err("can't init request slab\n"); 1154 rc = -ENOMEM; 1155 goto exit; 1156 } 1157 1158 i2o_blk_req_pool.pool = 1159 mempool_create_slab_pool(I2O_BLOCK_REQ_MEMPOOL_SIZE, 1160 i2o_blk_req_pool.slab); 1161 if (!i2o_blk_req_pool.pool) { 1162 osm_err("can't init request mempool\n"); 1163 rc = -ENOMEM; 1164 goto free_slab; 1165 } 1166 1167 /* Register the block device interfaces */ 1168 rc = register_blkdev(I2O_MAJOR, "i2o_block"); 1169 if (rc) { 1170 osm_err("unable to register block device\n"); 1171 goto free_mempool; 1172 } 1173#ifdef MODULE 1174 osm_info("registered device at major %d\n", I2O_MAJOR); 1175#endif 1176 1177 /* Register Block OSM into I2O core */ 1178 rc = i2o_driver_register(&i2o_block_driver); 1179 if (rc) { 1180 osm_err("Could not register Block driver\n"); 1181 goto unregister_blkdev; 1182 } 1183 1184 return 0; 1185 1186 unregister_blkdev: 1187 unregister_blkdev(I2O_MAJOR, "i2o_block"); 1188 1189 free_mempool: 1190 mempool_destroy(i2o_blk_req_pool.pool); 1191 1192 free_slab: 1193 kmem_cache_destroy(i2o_blk_req_pool.slab); 1194 1195 exit: 1196 return rc; 1197}; 1198 1199/** 1200 * i2o_block_exit - Block OSM exit function 1201 * 1202 * Unregisters Block OSM from I2O core, unregisters i2o_block block device 1203 * and frees the mempool and slab. 1204 */ 1205static void __exit i2o_block_exit(void) 1206{ 1207 /* Unregister I2O Block OSM from I2O core */ 1208 i2o_driver_unregister(&i2o_block_driver); 1209 1210 /* Unregister block device */ 1211 unregister_blkdev(I2O_MAJOR, "i2o_block"); 1212 1213 /* Free request mempool and slab */ 1214 mempool_destroy(i2o_blk_req_pool.pool); 1215 kmem_cache_destroy(i2o_blk_req_pool.slab); 1216}; 1217 1218MODULE_AUTHOR("Red Hat"); 1219MODULE_LICENSE("GPL"); 1220MODULE_DESCRIPTION(OSM_DESCRIPTION); 1221MODULE_VERSION(OSM_VERSION); 1222 1223module_init(i2o_block_init); 1224module_exit(i2o_block_exit);