Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.15-rc4 1251 lines 32 kB view raw
1/* 2 * Block OSM 3 * 4 * Copyright (C) 1999-2002 Red Hat Software 5 * 6 * Written by Alan Cox, Building Number Three Ltd 7 * 8 * This program is free software; you can redistribute it and/or modify it 9 * under the terms of the GNU General Public License as published by the 10 * Free Software Foundation; either version 2 of the License, or (at your 11 * option) any later version. 12 * 13 * This program is distributed in the hope that it will be useful, but 14 * WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 16 * General Public License for more details. 17 * 18 * For the purpose of avoiding doubt the preferred form of the work 19 * for making modifications shall be a standards compliant form such 20 * gzipped tar and not one requiring a proprietary or patent encumbered 21 * tool to unpack. 22 * 23 * Fixes/additions: 24 * Steve Ralston: 25 * Multiple device handling error fixes, 26 * Added a queue depth. 27 * Alan Cox: 28 * FC920 has an rmw bug. Dont or in the end marker. 29 * Removed queue walk, fixed for 64bitness. 30 * Rewrote much of the code over time 31 * Added indirect block lists 32 * Handle 64K limits on many controllers 33 * Don't use indirects on the Promise (breaks) 34 * Heavily chop down the queue depths 35 * Deepak Saxena: 36 * Independent queues per IOP 37 * Support for dynamic device creation/deletion 38 * Code cleanup 39 * Support for larger I/Os through merge* functions 40 * (taken from DAC960 driver) 41 * Boji T Kannanthanam: 42 * Set the I2O Block devices to be detected in increasing 43 * order of TIDs during boot. 44 * Search and set the I2O block device that we boot off 45 * from as the first device to be claimed (as /dev/i2o/hda) 46 * Properly attach/detach I2O gendisk structure from the 47 * system gendisk list. The I2O block devices now appear in 48 * /proc/partitions. 49 * Markus Lidel <Markus.Lidel@shadowconnect.com>: 50 * Minor bugfixes for 2.6. 51 */ 52 53#include <linux/module.h> 54#include <linux/i2o.h> 55 56#include <linux/mempool.h> 57 58#include <linux/genhd.h> 59#include <linux/blkdev.h> 60#include <linux/hdreg.h> 61 62#include "i2o_block.h" 63 64#define OSM_NAME "block-osm" 65#define OSM_VERSION "1.287" 66#define OSM_DESCRIPTION "I2O Block Device OSM" 67 68static struct i2o_driver i2o_block_driver; 69 70/* global Block OSM request mempool */ 71static struct i2o_block_mempool i2o_blk_req_pool; 72 73/* Block OSM class handling definition */ 74static struct i2o_class_id i2o_block_class_id[] = { 75 {I2O_CLASS_RANDOM_BLOCK_STORAGE}, 76 {I2O_CLASS_END} 77}; 78 79/** 80 * i2o_block_device_free - free the memory of the I2O Block device 81 * @dev: I2O Block device, which should be cleaned up 82 * 83 * Frees the request queue, gendisk and the i2o_block_device structure. 84 */ 85static void i2o_block_device_free(struct i2o_block_device *dev) 86{ 87 blk_cleanup_queue(dev->gd->queue); 88 89 put_disk(dev->gd); 90 91 kfree(dev); 92}; 93 94/** 95 * i2o_block_remove - remove the I2O Block device from the system again 96 * @dev: I2O Block device which should be removed 97 * 98 * Remove gendisk from system and free all allocated memory. 99 * 100 * Always returns 0. 101 */ 102static int i2o_block_remove(struct device *dev) 103{ 104 struct i2o_device *i2o_dev = to_i2o_device(dev); 105 struct i2o_block_device *i2o_blk_dev = dev_get_drvdata(dev); 106 107 osm_info("device removed (TID: %03x): %s\n", i2o_dev->lct_data.tid, 108 i2o_blk_dev->gd->disk_name); 109 110 i2o_event_register(i2o_dev, &i2o_block_driver, 0, 0); 111 112 del_gendisk(i2o_blk_dev->gd); 113 114 dev_set_drvdata(dev, NULL); 115 116 i2o_device_claim_release(i2o_dev); 117 118 i2o_block_device_free(i2o_blk_dev); 119 120 return 0; 121}; 122 123/** 124 * i2o_block_device flush - Flush all dirty data of I2O device dev 125 * @dev: I2O device which should be flushed 126 * 127 * Flushes all dirty data on device dev. 128 * 129 * Returns 0 on success or negative error code on failure. 130 */ 131static int i2o_block_device_flush(struct i2o_device *dev) 132{ 133 struct i2o_message __iomem *msg; 134 u32 m; 135 136 m = i2o_msg_get_wait(dev->iop, &msg, I2O_TIMEOUT_MESSAGE_GET); 137 if (m == I2O_QUEUE_EMPTY) 138 return -ETIMEDOUT; 139 140 writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); 141 writel(I2O_CMD_BLOCK_CFLUSH << 24 | HOST_TID << 12 | dev->lct_data.tid, 142 &msg->u.head[1]); 143 writel(60 << 16, &msg->body[0]); 144 osm_debug("Flushing...\n"); 145 146 return i2o_msg_post_wait(dev->iop, m, 60); 147}; 148 149/** 150 * i2o_block_issue_flush - device-flush interface for block-layer 151 * @queue: the request queue of the device which should be flushed 152 * @disk: gendisk 153 * @error_sector: error offset 154 * 155 * Helper function to provide flush functionality to block-layer. 156 * 157 * Returns 0 on success or negative error code on failure. 158 */ 159 160static int i2o_block_issue_flush(request_queue_t * queue, struct gendisk *disk, 161 sector_t * error_sector) 162{ 163 struct i2o_block_device *i2o_blk_dev = queue->queuedata; 164 int rc = -ENODEV; 165 166 if (likely(i2o_blk_dev)) 167 rc = i2o_block_device_flush(i2o_blk_dev->i2o_dev); 168 169 return rc; 170} 171 172/** 173 * i2o_block_device_mount - Mount (load) the media of device dev 174 * @dev: I2O device which should receive the mount request 175 * @media_id: Media Identifier 176 * 177 * Load a media into drive. Identifier should be set to -1, because the 178 * spec does not support any other value. 179 * 180 * Returns 0 on success or negative error code on failure. 181 */ 182static int i2o_block_device_mount(struct i2o_device *dev, u32 media_id) 183{ 184 struct i2o_message __iomem *msg; 185 u32 m; 186 187 m = i2o_msg_get_wait(dev->iop, &msg, I2O_TIMEOUT_MESSAGE_GET); 188 if (m == I2O_QUEUE_EMPTY) 189 return -ETIMEDOUT; 190 191 writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); 192 writel(I2O_CMD_BLOCK_MMOUNT << 24 | HOST_TID << 12 | dev->lct_data.tid, 193 &msg->u.head[1]); 194 writel(-1, &msg->body[0]); 195 writel(0, &msg->body[1]); 196 osm_debug("Mounting...\n"); 197 198 return i2o_msg_post_wait(dev->iop, m, 2); 199}; 200 201/** 202 * i2o_block_device_lock - Locks the media of device dev 203 * @dev: I2O device which should receive the lock request 204 * @media_id: Media Identifier 205 * 206 * Lock media of device dev to prevent removal. The media identifier 207 * should be set to -1, because the spec does not support any other value. 208 * 209 * Returns 0 on success or negative error code on failure. 210 */ 211static int i2o_block_device_lock(struct i2o_device *dev, u32 media_id) 212{ 213 struct i2o_message __iomem *msg; 214 u32 m; 215 216 m = i2o_msg_get_wait(dev->iop, &msg, I2O_TIMEOUT_MESSAGE_GET); 217 if (m == I2O_QUEUE_EMPTY) 218 return -ETIMEDOUT; 219 220 writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); 221 writel(I2O_CMD_BLOCK_MLOCK << 24 | HOST_TID << 12 | dev->lct_data.tid, 222 &msg->u.head[1]); 223 writel(-1, &msg->body[0]); 224 osm_debug("Locking...\n"); 225 226 return i2o_msg_post_wait(dev->iop, m, 2); 227}; 228 229/** 230 * i2o_block_device_unlock - Unlocks the media of device dev 231 * @dev: I2O device which should receive the unlocked request 232 * @media_id: Media Identifier 233 * 234 * Unlocks the media in device dev. The media identifier should be set to 235 * -1, because the spec does not support any other value. 236 * 237 * Returns 0 on success or negative error code on failure. 238 */ 239static int i2o_block_device_unlock(struct i2o_device *dev, u32 media_id) 240{ 241 struct i2o_message __iomem *msg; 242 u32 m; 243 244 m = i2o_msg_get_wait(dev->iop, &msg, I2O_TIMEOUT_MESSAGE_GET); 245 if (m == I2O_QUEUE_EMPTY) 246 return -ETIMEDOUT; 247 248 writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); 249 writel(I2O_CMD_BLOCK_MUNLOCK << 24 | HOST_TID << 12 | dev->lct_data.tid, 250 &msg->u.head[1]); 251 writel(media_id, &msg->body[0]); 252 osm_debug("Unlocking...\n"); 253 254 return i2o_msg_post_wait(dev->iop, m, 2); 255}; 256 257/** 258 * i2o_block_device_power - Power management for device dev 259 * @dev: I2O device which should receive the power management request 260 * @operation: Operation which should be send 261 * 262 * Send a power management request to the device dev. 263 * 264 * Returns 0 on success or negative error code on failure. 265 */ 266static int i2o_block_device_power(struct i2o_block_device *dev, u8 op) 267{ 268 struct i2o_device *i2o_dev = dev->i2o_dev; 269 struct i2o_controller *c = i2o_dev->iop; 270 struct i2o_message __iomem *msg; 271 u32 m; 272 int rc; 273 274 m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET); 275 if (m == I2O_QUEUE_EMPTY) 276 return -ETIMEDOUT; 277 278 writel(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]); 279 writel(I2O_CMD_BLOCK_POWER << 24 | HOST_TID << 12 | i2o_dev->lct_data. 280 tid, &msg->u.head[1]); 281 writel(op << 24, &msg->body[0]); 282 osm_debug("Power...\n"); 283 284 rc = i2o_msg_post_wait(c, m, 60); 285 if (!rc) 286 dev->power = op; 287 288 return rc; 289}; 290 291/** 292 * i2o_block_request_alloc - Allocate an I2O block request struct 293 * 294 * Allocates an I2O block request struct and initialize the list. 295 * 296 * Returns a i2o_block_request pointer on success or negative error code 297 * on failure. 298 */ 299static inline struct i2o_block_request *i2o_block_request_alloc(void) 300{ 301 struct i2o_block_request *ireq; 302 303 ireq = mempool_alloc(i2o_blk_req_pool.pool, GFP_ATOMIC); 304 if (!ireq) 305 return ERR_PTR(-ENOMEM); 306 307 INIT_LIST_HEAD(&ireq->queue); 308 309 return ireq; 310}; 311 312/** 313 * i2o_block_request_free - Frees a I2O block request 314 * @ireq: I2O block request which should be freed 315 * 316 * Fres the allocated memory (give it back to the request mempool). 317 */ 318static inline void i2o_block_request_free(struct i2o_block_request *ireq) 319{ 320 mempool_free(ireq, i2o_blk_req_pool.pool); 321}; 322 323/** 324 * i2o_block_sglist_alloc - Allocate the SG list and map it 325 * @c: I2O controller to which the request belongs 326 * @ireq: I2O block request 327 * 328 * Builds the SG list and map it to be accessable by the controller. 329 * 330 * Returns 0 on failure or 1 on success. 331 */ 332static inline int i2o_block_sglist_alloc(struct i2o_controller *c, 333 struct i2o_block_request *ireq, 334 u32 __iomem ** mptr) 335{ 336 int nents; 337 enum dma_data_direction direction; 338 339 ireq->dev = &c->pdev->dev; 340 nents = blk_rq_map_sg(ireq->req->q, ireq->req, ireq->sg_table); 341 342 if (rq_data_dir(ireq->req) == READ) 343 direction = PCI_DMA_FROMDEVICE; 344 else 345 direction = PCI_DMA_TODEVICE; 346 347 ireq->sg_nents = nents; 348 349 return i2o_dma_map_sg(c, ireq->sg_table, nents, direction, mptr); 350}; 351 352/** 353 * i2o_block_sglist_free - Frees the SG list 354 * @ireq: I2O block request from which the SG should be freed 355 * 356 * Frees the SG list from the I2O block request. 357 */ 358static inline void i2o_block_sglist_free(struct i2o_block_request *ireq) 359{ 360 enum dma_data_direction direction; 361 362 if (rq_data_dir(ireq->req) == READ) 363 direction = PCI_DMA_FROMDEVICE; 364 else 365 direction = PCI_DMA_TODEVICE; 366 367 dma_unmap_sg(ireq->dev, ireq->sg_table, ireq->sg_nents, direction); 368}; 369 370/** 371 * i2o_block_prep_req_fn - Allocates I2O block device specific struct 372 * @q: request queue for the request 373 * @req: the request to prepare 374 * 375 * Allocate the necessary i2o_block_request struct and connect it to 376 * the request. This is needed that we not loose the SG list later on. 377 * 378 * Returns BLKPREP_OK on success or BLKPREP_DEFER on failure. 379 */ 380static int i2o_block_prep_req_fn(struct request_queue *q, struct request *req) 381{ 382 struct i2o_block_device *i2o_blk_dev = q->queuedata; 383 struct i2o_block_request *ireq; 384 385 if (unlikely(!i2o_blk_dev)) { 386 osm_err("block device already removed\n"); 387 return BLKPREP_KILL; 388 } 389 390 /* request is already processed by us, so return */ 391 if (req->flags & REQ_SPECIAL) { 392 osm_debug("REQ_SPECIAL already set!\n"); 393 req->flags |= REQ_DONTPREP; 394 return BLKPREP_OK; 395 } 396 397 /* connect the i2o_block_request to the request */ 398 if (!req->special) { 399 ireq = i2o_block_request_alloc(); 400 if (unlikely(IS_ERR(ireq))) { 401 osm_debug("unable to allocate i2o_block_request!\n"); 402 return BLKPREP_DEFER; 403 } 404 405 ireq->i2o_blk_dev = i2o_blk_dev; 406 req->special = ireq; 407 ireq->req = req; 408 } else 409 ireq = req->special; 410 411 /* do not come back here */ 412 req->flags |= REQ_DONTPREP | REQ_SPECIAL; 413 414 return BLKPREP_OK; 415}; 416 417/** 418 * i2o_block_delayed_request_fn - delayed request queue function 419 * delayed_request: the delayed request with the queue to start 420 * 421 * If the request queue is stopped for a disk, and there is no open 422 * request, a new event is created, which calls this function to start 423 * the queue after I2O_BLOCK_REQUEST_TIME. Otherwise the queue will never 424 * be started again. 425 */ 426static void i2o_block_delayed_request_fn(void *delayed_request) 427{ 428 struct i2o_block_delayed_request *dreq = delayed_request; 429 struct request_queue *q = dreq->queue; 430 unsigned long flags; 431 432 spin_lock_irqsave(q->queue_lock, flags); 433 blk_start_queue(q); 434 spin_unlock_irqrestore(q->queue_lock, flags); 435 kfree(dreq); 436}; 437 438/** 439 * i2o_block_end_request - Post-processing of completed commands 440 * @req: request which should be completed 441 * @uptodate: 1 for success, 0 for I/O error, < 0 for specific error 442 * @nr_bytes: number of bytes to complete 443 * 444 * Mark the request as complete. The lock must not be held when entering. 445 * 446 */ 447static void i2o_block_end_request(struct request *req, int uptodate, 448 int nr_bytes) 449{ 450 struct i2o_block_request *ireq = req->special; 451 struct i2o_block_device *dev = ireq->i2o_blk_dev; 452 request_queue_t *q = req->q; 453 unsigned long flags; 454 455 if (end_that_request_chunk(req, uptodate, nr_bytes)) { 456 int leftover = (req->hard_nr_sectors << KERNEL_SECTOR_SHIFT); 457 458 if (blk_pc_request(req)) 459 leftover = req->data_len; 460 461 if (end_io_error(uptodate)) 462 end_that_request_chunk(req, 0, leftover); 463 } 464 465 add_disk_randomness(req->rq_disk); 466 467 spin_lock_irqsave(q->queue_lock, flags); 468 469 end_that_request_last(req); 470 471 if (likely(dev)) { 472 dev->open_queue_depth--; 473 list_del(&ireq->queue); 474 } 475 476 blk_start_queue(q); 477 478 spin_unlock_irqrestore(q->queue_lock, flags); 479 480 i2o_block_sglist_free(ireq); 481 i2o_block_request_free(ireq); 482}; 483 484/** 485 * i2o_block_reply - Block OSM reply handler. 486 * @c: I2O controller from which the message arrives 487 * @m: message id of reply 488 * qmsg: the actuall I2O message reply 489 * 490 * This function gets all the message replies. 491 * 492 */ 493static int i2o_block_reply(struct i2o_controller *c, u32 m, 494 struct i2o_message *msg) 495{ 496 struct request *req; 497 int uptodate = 1; 498 499 req = i2o_cntxt_list_get(c, le32_to_cpu(msg->u.s.tcntxt)); 500 if (unlikely(!req)) { 501 osm_err("NULL reply received!\n"); 502 return -1; 503 } 504 505 /* 506 * Lets see what is cooking. We stuffed the 507 * request in the context. 508 */ 509 510 if ((le32_to_cpu(msg->body[0]) >> 24) != 0) { 511 u32 status = le32_to_cpu(msg->body[0]); 512 /* 513 * Device not ready means two things. One is that the 514 * the thing went offline (but not a removal media) 515 * 516 * The second is that you have a SuperTrak 100 and the 517 * firmware got constipated. Unlike standard i2o card 518 * setups the supertrak returns an error rather than 519 * blocking for the timeout in these cases. 520 * 521 * Don't stick a supertrak100 into cache aggressive modes 522 */ 523 524 osm_err("TID %03x error status: 0x%02x, detailed status: " 525 "0x%04x\n", (le32_to_cpu(msg->u.head[1]) >> 12 & 0xfff), 526 status >> 24, status & 0xffff); 527 528 req->errors++; 529 530 uptodate = 0; 531 } 532 533 i2o_block_end_request(req, uptodate, le32_to_cpu(msg->body[1])); 534 535 return 1; 536}; 537 538static void i2o_block_event(struct i2o_event *evt) 539{ 540 osm_debug("event received\n"); 541 kfree(evt); 542}; 543 544/* 545 * SCSI-CAM for ioctl geometry mapping 546 * Duplicated with SCSI - this should be moved into somewhere common 547 * perhaps genhd ? 548 * 549 * LBA -> CHS mapping table taken from: 550 * 551 * "Incorporating the I2O Architecture into BIOS for Intel Architecture 552 * Platforms" 553 * 554 * This is an I2O document that is only available to I2O members, 555 * not developers. 556 * 557 * From my understanding, this is how all the I2O cards do this 558 * 559 * Disk Size | Sectors | Heads | Cylinders 560 * ---------------+---------+-------+------------------- 561 * 1 < X <= 528M | 63 | 16 | X/(63 * 16 * 512) 562 * 528M < X <= 1G | 63 | 32 | X/(63 * 32 * 512) 563 * 1 < X <528M | 63 | 16 | X/(63 * 16 * 512) 564 * 1 < X <528M | 63 | 16 | X/(63 * 16 * 512) 565 * 566 */ 567#define BLOCK_SIZE_528M 1081344 568#define BLOCK_SIZE_1G 2097152 569#define BLOCK_SIZE_21G 4403200 570#define BLOCK_SIZE_42G 8806400 571#define BLOCK_SIZE_84G 17612800 572 573static void i2o_block_biosparam(unsigned long capacity, unsigned short *cyls, 574 unsigned char *hds, unsigned char *secs) 575{ 576 unsigned long heads, sectors, cylinders; 577 578 sectors = 63L; /* Maximize sectors per track */ 579 if (capacity <= BLOCK_SIZE_528M) 580 heads = 16; 581 else if (capacity <= BLOCK_SIZE_1G) 582 heads = 32; 583 else if (capacity <= BLOCK_SIZE_21G) 584 heads = 64; 585 else if (capacity <= BLOCK_SIZE_42G) 586 heads = 128; 587 else 588 heads = 255; 589 590 cylinders = (unsigned long)capacity / (heads * sectors); 591 592 *cyls = (unsigned short)cylinders; /* Stuff return values */ 593 *secs = (unsigned char)sectors; 594 *hds = (unsigned char)heads; 595} 596 597/** 598 * i2o_block_open - Open the block device 599 * 600 * Power up the device, mount and lock the media. This function is called, 601 * if the block device is opened for access. 602 * 603 * Returns 0 on success or negative error code on failure. 604 */ 605static int i2o_block_open(struct inode *inode, struct file *file) 606{ 607 struct i2o_block_device *dev = inode->i_bdev->bd_disk->private_data; 608 609 if (!dev->i2o_dev) 610 return -ENODEV; 611 612 if (dev->power > 0x1f) 613 i2o_block_device_power(dev, 0x02); 614 615 i2o_block_device_mount(dev->i2o_dev, -1); 616 617 i2o_block_device_lock(dev->i2o_dev, -1); 618 619 osm_debug("Ready.\n"); 620 621 return 0; 622}; 623 624/** 625 * i2o_block_release - Release the I2O block device 626 * 627 * Unlock and unmount the media, and power down the device. Gets called if 628 * the block device is closed. 629 * 630 * Returns 0 on success or negative error code on failure. 631 */ 632static int i2o_block_release(struct inode *inode, struct file *file) 633{ 634 struct gendisk *disk = inode->i_bdev->bd_disk; 635 struct i2o_block_device *dev = disk->private_data; 636 u8 operation; 637 638 /* 639 * This is to deail with the case of an application 640 * opening a device and then the device dissapears while 641 * it's in use, and then the application tries to release 642 * it. ex: Unmounting a deleted RAID volume at reboot. 643 * If we send messages, it will just cause FAILs since 644 * the TID no longer exists. 645 */ 646 if (!dev->i2o_dev) 647 return 0; 648 649 i2o_block_device_flush(dev->i2o_dev); 650 651 i2o_block_device_unlock(dev->i2o_dev, -1); 652 653 if (dev->flags & (1 << 3 | 1 << 4)) /* Removable */ 654 operation = 0x21; 655 else 656 operation = 0x24; 657 658 i2o_block_device_power(dev, operation); 659 660 return 0; 661} 662 663/** 664 * i2o_block_ioctl - Issue device specific ioctl calls. 665 * @cmd: ioctl command 666 * @arg: arg 667 * 668 * Handles ioctl request for the block device. 669 * 670 * Return 0 on success or negative error on failure. 671 */ 672static int i2o_block_ioctl(struct inode *inode, struct file *file, 673 unsigned int cmd, unsigned long arg) 674{ 675 struct gendisk *disk = inode->i_bdev->bd_disk; 676 struct i2o_block_device *dev = disk->private_data; 677 void __user *argp = (void __user *)arg; 678 679 /* Anyone capable of this syscall can do *real bad* things */ 680 681 if (!capable(CAP_SYS_ADMIN)) 682 return -EPERM; 683 684 switch (cmd) { 685 case HDIO_GETGEO: 686 { 687 struct hd_geometry g; 688 i2o_block_biosparam(get_capacity(disk), 689 &g.cylinders, &g.heads, &g.sectors); 690 g.start = get_start_sect(inode->i_bdev); 691 return copy_to_user(argp, &g, sizeof(g)) ? -EFAULT : 0; 692 } 693 694 case BLKI2OGRSTRAT: 695 return put_user(dev->rcache, (int __user *)arg); 696 case BLKI2OGWSTRAT: 697 return put_user(dev->wcache, (int __user *)arg); 698 case BLKI2OSRSTRAT: 699 if (arg < 0 || arg > CACHE_SMARTFETCH) 700 return -EINVAL; 701 dev->rcache = arg; 702 break; 703 case BLKI2OSWSTRAT: 704 if (arg != 0 705 && (arg < CACHE_WRITETHROUGH || arg > CACHE_SMARTBACK)) 706 return -EINVAL; 707 dev->wcache = arg; 708 break; 709 } 710 return -ENOTTY; 711}; 712 713/** 714 * i2o_block_media_changed - Have we seen a media change? 715 * @disk: gendisk which should be verified 716 * 717 * Verifies if the media has changed. 718 * 719 * Returns 1 if the media was changed or 0 otherwise. 720 */ 721static int i2o_block_media_changed(struct gendisk *disk) 722{ 723 struct i2o_block_device *p = disk->private_data; 724 725 if (p->media_change_flag) { 726 p->media_change_flag = 0; 727 return 1; 728 } 729 return 0; 730} 731 732/** 733 * i2o_block_transfer - Transfer a request to/from the I2O controller 734 * @req: the request which should be transfered 735 * 736 * This function converts the request into a I2O message. The necessary 737 * DMA buffers are allocated and after everything is setup post the message 738 * to the I2O controller. No cleanup is done by this function. It is done 739 * on the interrupt side when the reply arrives. 740 * 741 * Return 0 on success or negative error code on failure. 742 */ 743static int i2o_block_transfer(struct request *req) 744{ 745 struct i2o_block_device *dev = req->rq_disk->private_data; 746 struct i2o_controller *c; 747 int tid = dev->i2o_dev->lct_data.tid; 748 struct i2o_message __iomem *msg; 749 u32 __iomem *mptr; 750 struct i2o_block_request *ireq = req->special; 751 u32 m; 752 u32 tcntxt; 753 u32 sgl_offset = SGL_OFFSET_8; 754 u32 ctl_flags = 0x00000000; 755 int rc; 756 u32 cmd; 757 758 if (unlikely(!dev->i2o_dev)) { 759 osm_err("transfer to removed drive\n"); 760 rc = -ENODEV; 761 goto exit; 762 } 763 764 c = dev->i2o_dev->iop; 765 766 m = i2o_msg_get(c, &msg); 767 if (m == I2O_QUEUE_EMPTY) { 768 rc = -EBUSY; 769 goto exit; 770 } 771 772 tcntxt = i2o_cntxt_list_add(c, req); 773 if (!tcntxt) { 774 rc = -ENOMEM; 775 goto nop_msg; 776 } 777 778 writel(i2o_block_driver.context, &msg->u.s.icntxt); 779 writel(tcntxt, &msg->u.s.tcntxt); 780 781 mptr = &msg->body[0]; 782 783 if (rq_data_dir(req) == READ) { 784 cmd = I2O_CMD_BLOCK_READ << 24; 785 786 switch (dev->rcache) { 787 case CACHE_PREFETCH: 788 ctl_flags = 0x201F0008; 789 break; 790 791 case CACHE_SMARTFETCH: 792 if (req->nr_sectors > 16) 793 ctl_flags = 0x201F0008; 794 else 795 ctl_flags = 0x001F0000; 796 break; 797 798 default: 799 break; 800 } 801 } else { 802 cmd = I2O_CMD_BLOCK_WRITE << 24; 803 804 switch (dev->wcache) { 805 case CACHE_WRITETHROUGH: 806 ctl_flags = 0x001F0008; 807 break; 808 case CACHE_WRITEBACK: 809 ctl_flags = 0x001F0010; 810 break; 811 case CACHE_SMARTBACK: 812 if (req->nr_sectors > 16) 813 ctl_flags = 0x001F0004; 814 else 815 ctl_flags = 0x001F0010; 816 break; 817 case CACHE_SMARTTHROUGH: 818 if (req->nr_sectors > 16) 819 ctl_flags = 0x001F0004; 820 else 821 ctl_flags = 0x001F0010; 822 default: 823 break; 824 } 825 } 826 827#ifdef CONFIG_I2O_EXT_ADAPTEC 828 if (c->adaptec) { 829 u8 cmd[10]; 830 u32 scsi_flags; 831 u16 hwsec = queue_hardsect_size(req->q) >> KERNEL_SECTOR_SHIFT; 832 833 memset(cmd, 0, 10); 834 835 sgl_offset = SGL_OFFSET_12; 836 837 writel(I2O_CMD_PRIVATE << 24 | HOST_TID << 12 | tid, 838 &msg->u.head[1]); 839 840 writel(I2O_VENDOR_DPT << 16 | I2O_CMD_SCSI_EXEC, mptr++); 841 writel(tid, mptr++); 842 843 /* 844 * ENABLE_DISCONNECT 845 * SIMPLE_TAG 846 * RETURN_SENSE_DATA_IN_REPLY_MESSAGE_FRAME 847 */ 848 if (rq_data_dir(req) == READ) { 849 cmd[0] = 0x28; 850 scsi_flags = 0x60a0000a; 851 } else { 852 cmd[0] = 0x2A; 853 scsi_flags = 0xa0a0000a; 854 } 855 856 writel(scsi_flags, mptr++); 857 858 *((u32 *) & cmd[2]) = cpu_to_be32(req->sector * hwsec); 859 *((u16 *) & cmd[7]) = cpu_to_be16(req->nr_sectors * hwsec); 860 861 memcpy_toio(mptr, cmd, 10); 862 mptr += 4; 863 writel(req->nr_sectors << KERNEL_SECTOR_SHIFT, mptr++); 864 } else 865#endif 866 { 867 writel(cmd | HOST_TID << 12 | tid, &msg->u.head[1]); 868 writel(ctl_flags, mptr++); 869 writel(req->nr_sectors << KERNEL_SECTOR_SHIFT, mptr++); 870 writel((u32) (req->sector << KERNEL_SECTOR_SHIFT), mptr++); 871 writel(req->sector >> (32 - KERNEL_SECTOR_SHIFT), mptr++); 872 } 873 874 if (!i2o_block_sglist_alloc(c, ireq, &mptr)) { 875 rc = -ENOMEM; 876 goto context_remove; 877 } 878 879 writel(I2O_MESSAGE_SIZE(mptr - &msg->u.head[0]) | 880 sgl_offset, &msg->u.head[0]); 881 882 list_add_tail(&ireq->queue, &dev->open_queue); 883 dev->open_queue_depth++; 884 885 i2o_msg_post(c, m); 886 887 return 0; 888 889 context_remove: 890 i2o_cntxt_list_remove(c, req); 891 892 nop_msg: 893 i2o_msg_nop(c, m); 894 895 exit: 896 return rc; 897}; 898 899/** 900 * i2o_block_request_fn - request queue handling function 901 * q: request queue from which the request could be fetched 902 * 903 * Takes the next request from the queue, transfers it and if no error 904 * occurs dequeue it from the queue. On arrival of the reply the message 905 * will be processed further. If an error occurs requeue the request. 906 */ 907static void i2o_block_request_fn(struct request_queue *q) 908{ 909 struct request *req; 910 911 while (!blk_queue_plugged(q)) { 912 req = elv_next_request(q); 913 if (!req) 914 break; 915 916 if (blk_fs_request(req)) { 917 struct i2o_block_delayed_request *dreq; 918 struct i2o_block_request *ireq = req->special; 919 unsigned int queue_depth; 920 921 queue_depth = ireq->i2o_blk_dev->open_queue_depth; 922 923 if (queue_depth < I2O_BLOCK_MAX_OPEN_REQUESTS) { 924 if (!i2o_block_transfer(req)) { 925 blkdev_dequeue_request(req); 926 continue; 927 } else 928 osm_info("transfer error\n"); 929 } 930 931 if (queue_depth) 932 break; 933 934 /* stop the queue and retry later */ 935 dreq = kmalloc(sizeof(*dreq), GFP_ATOMIC); 936 if (!dreq) 937 continue; 938 939 dreq->queue = q; 940 INIT_WORK(&dreq->work, i2o_block_delayed_request_fn, 941 dreq); 942 943 if (!queue_delayed_work(i2o_block_driver.event_queue, 944 &dreq->work, 945 I2O_BLOCK_RETRY_TIME)) 946 kfree(dreq); 947 else { 948 blk_stop_queue(q); 949 break; 950 } 951 } else 952 end_request(req, 0); 953 } 954}; 955 956/* I2O Block device operations definition */ 957static struct block_device_operations i2o_block_fops = { 958 .owner = THIS_MODULE, 959 .open = i2o_block_open, 960 .release = i2o_block_release, 961 .ioctl = i2o_block_ioctl, 962 .media_changed = i2o_block_media_changed 963}; 964 965/** 966 * i2o_block_device_alloc - Allocate memory for a I2O Block device 967 * 968 * Allocate memory for the i2o_block_device struct, gendisk and request 969 * queue and initialize them as far as no additional information is needed. 970 * 971 * Returns a pointer to the allocated I2O Block device on succes or a 972 * negative error code on failure. 973 */ 974static struct i2o_block_device *i2o_block_device_alloc(void) 975{ 976 struct i2o_block_device *dev; 977 struct gendisk *gd; 978 struct request_queue *queue; 979 int rc; 980 981 dev = kmalloc(sizeof(*dev), GFP_KERNEL); 982 if (!dev) { 983 osm_err("Insufficient memory to allocate I2O Block disk.\n"); 984 rc = -ENOMEM; 985 goto exit; 986 } 987 memset(dev, 0, sizeof(*dev)); 988 989 INIT_LIST_HEAD(&dev->open_queue); 990 spin_lock_init(&dev->lock); 991 dev->rcache = CACHE_PREFETCH; 992 dev->wcache = CACHE_WRITEBACK; 993 994 /* allocate a gendisk with 16 partitions */ 995 gd = alloc_disk(16); 996 if (!gd) { 997 osm_err("Insufficient memory to allocate gendisk.\n"); 998 rc = -ENOMEM; 999 goto cleanup_dev; 1000 } 1001 1002 /* initialize the request queue */ 1003 queue = blk_init_queue(i2o_block_request_fn, &dev->lock); 1004 if (!queue) { 1005 osm_err("Insufficient memory to allocate request queue.\n"); 1006 rc = -ENOMEM; 1007 goto cleanup_queue; 1008 } 1009 1010 blk_queue_prep_rq(queue, i2o_block_prep_req_fn); 1011 blk_queue_issue_flush_fn(queue, i2o_block_issue_flush); 1012 1013 gd->major = I2O_MAJOR; 1014 gd->queue = queue; 1015 gd->fops = &i2o_block_fops; 1016 gd->private_data = dev; 1017 1018 dev->gd = gd; 1019 1020 return dev; 1021 1022 cleanup_queue: 1023 put_disk(gd); 1024 1025 cleanup_dev: 1026 kfree(dev); 1027 1028 exit: 1029 return ERR_PTR(rc); 1030}; 1031 1032/** 1033 * i2o_block_probe - verify if dev is a I2O Block device and install it 1034 * @dev: device to verify if it is a I2O Block device 1035 * 1036 * We only verify if the user_tid of the device is 0xfff and then install 1037 * the device. Otherwise it is used by some other device (e. g. RAID). 1038 * 1039 * Returns 0 on success or negative error code on failure. 1040 */ 1041static int i2o_block_probe(struct device *dev) 1042{ 1043 struct i2o_device *i2o_dev = to_i2o_device(dev); 1044 struct i2o_controller *c = i2o_dev->iop; 1045 struct i2o_block_device *i2o_blk_dev; 1046 struct gendisk *gd; 1047 struct request_queue *queue; 1048 static int unit = 0; 1049 int rc; 1050 u64 size; 1051 u32 blocksize; 1052 u32 flags, status; 1053 u16 body_size = 4; 1054 unsigned short max_sectors; 1055 1056#ifdef CONFIG_I2O_EXT_ADAPTEC 1057 if (c->adaptec) 1058 body_size = 8; 1059#endif 1060 1061 if (c->limit_sectors) 1062 max_sectors = I2O_MAX_SECTORS_LIMITED; 1063 else 1064 max_sectors = I2O_MAX_SECTORS; 1065 1066 /* skip devices which are used by IOP */ 1067 if (i2o_dev->lct_data.user_tid != 0xfff) { 1068 osm_debug("skipping used device %03x\n", i2o_dev->lct_data.tid); 1069 return -ENODEV; 1070 } 1071 1072 if (i2o_device_claim(i2o_dev)) { 1073 osm_warn("Unable to claim device. Installation aborted\n"); 1074 rc = -EFAULT; 1075 goto exit; 1076 } 1077 1078 i2o_blk_dev = i2o_block_device_alloc(); 1079 if (IS_ERR(i2o_blk_dev)) { 1080 osm_err("could not alloc a new I2O block device"); 1081 rc = PTR_ERR(i2o_blk_dev); 1082 goto claim_release; 1083 } 1084 1085 i2o_blk_dev->i2o_dev = i2o_dev; 1086 dev_set_drvdata(dev, i2o_blk_dev); 1087 1088 /* setup gendisk */ 1089 gd = i2o_blk_dev->gd; 1090 gd->first_minor = unit << 4; 1091 sprintf(gd->disk_name, "i2o/hd%c", 'a' + unit); 1092 sprintf(gd->devfs_name, "i2o/hd%c", 'a' + unit); 1093 gd->driverfs_dev = &i2o_dev->device; 1094 1095 /* setup request queue */ 1096 queue = gd->queue; 1097 queue->queuedata = i2o_blk_dev; 1098 1099 blk_queue_max_phys_segments(queue, I2O_MAX_PHYS_SEGMENTS); 1100 blk_queue_max_sectors(queue, max_sectors); 1101 blk_queue_max_hw_segments(queue, i2o_sg_tablesize(c, body_size)); 1102 1103 osm_debug("max sectors = %d\n", queue->max_phys_segments); 1104 osm_debug("phys segments = %d\n", queue->max_sectors); 1105 osm_debug("max hw segments = %d\n", queue->max_hw_segments); 1106 1107 /* 1108 * Ask for the current media data. If that isn't supported 1109 * then we ask for the device capacity data 1110 */ 1111 if (i2o_parm_field_get(i2o_dev, 0x0004, 1, &blocksize, 4) || 1112 i2o_parm_field_get(i2o_dev, 0x0000, 3, &blocksize, 4)) { 1113 blk_queue_hardsect_size(queue, blocksize); 1114 } else 1115 osm_warn("unable to get blocksize of %s\n", gd->disk_name); 1116 1117 if (i2o_parm_field_get(i2o_dev, 0x0004, 0, &size, 8) || 1118 i2o_parm_field_get(i2o_dev, 0x0000, 4, &size, 8)) { 1119 set_capacity(gd, size >> KERNEL_SECTOR_SHIFT); 1120 } else 1121 osm_warn("could not get size of %s\n", gd->disk_name); 1122 1123 if (!i2o_parm_field_get(i2o_dev, 0x0000, 2, &i2o_blk_dev->power, 2)) 1124 i2o_blk_dev->power = 0; 1125 i2o_parm_field_get(i2o_dev, 0x0000, 5, &flags, 4); 1126 i2o_parm_field_get(i2o_dev, 0x0000, 6, &status, 4); 1127 1128 i2o_event_register(i2o_dev, &i2o_block_driver, 0, 0xffffffff); 1129 1130 add_disk(gd); 1131 1132 unit++; 1133 1134 osm_info("device added (TID: %03x): %s\n", i2o_dev->lct_data.tid, 1135 i2o_blk_dev->gd->disk_name); 1136 1137 return 0; 1138 1139 claim_release: 1140 i2o_device_claim_release(i2o_dev); 1141 1142 exit: 1143 return rc; 1144}; 1145 1146/* Block OSM driver struct */ 1147static struct i2o_driver i2o_block_driver = { 1148 .name = OSM_NAME, 1149 .event = i2o_block_event, 1150 .reply = i2o_block_reply, 1151 .classes = i2o_block_class_id, 1152 .driver = { 1153 .probe = i2o_block_probe, 1154 .remove = i2o_block_remove, 1155 }, 1156}; 1157 1158/** 1159 * i2o_block_init - Block OSM initialization function 1160 * 1161 * Allocate the slab and mempool for request structs, registers i2o_block 1162 * block device and finally register the Block OSM in the I2O core. 1163 * 1164 * Returns 0 on success or negative error code on failure. 1165 */ 1166static int __init i2o_block_init(void) 1167{ 1168 int rc; 1169 int size; 1170 1171 printk(KERN_INFO OSM_DESCRIPTION " v" OSM_VERSION "\n"); 1172 1173 /* Allocate request mempool and slab */ 1174 size = sizeof(struct i2o_block_request); 1175 i2o_blk_req_pool.slab = kmem_cache_create("i2o_block_req", size, 0, 1176 SLAB_HWCACHE_ALIGN, NULL, 1177 NULL); 1178 if (!i2o_blk_req_pool.slab) { 1179 osm_err("can't init request slab\n"); 1180 rc = -ENOMEM; 1181 goto exit; 1182 } 1183 1184 i2o_blk_req_pool.pool = mempool_create(I2O_BLOCK_REQ_MEMPOOL_SIZE, 1185 mempool_alloc_slab, 1186 mempool_free_slab, 1187 i2o_blk_req_pool.slab); 1188 if (!i2o_blk_req_pool.pool) { 1189 osm_err("can't init request mempool\n"); 1190 rc = -ENOMEM; 1191 goto free_slab; 1192 } 1193 1194 /* Register the block device interfaces */ 1195 rc = register_blkdev(I2O_MAJOR, "i2o_block"); 1196 if (rc) { 1197 osm_err("unable to register block device\n"); 1198 goto free_mempool; 1199 } 1200#ifdef MODULE 1201 osm_info("registered device at major %d\n", I2O_MAJOR); 1202#endif 1203 1204 /* Register Block OSM into I2O core */ 1205 rc = i2o_driver_register(&i2o_block_driver); 1206 if (rc) { 1207 osm_err("Could not register Block driver\n"); 1208 goto unregister_blkdev; 1209 } 1210 1211 return 0; 1212 1213 unregister_blkdev: 1214 unregister_blkdev(I2O_MAJOR, "i2o_block"); 1215 1216 free_mempool: 1217 mempool_destroy(i2o_blk_req_pool.pool); 1218 1219 free_slab: 1220 kmem_cache_destroy(i2o_blk_req_pool.slab); 1221 1222 exit: 1223 return rc; 1224}; 1225 1226/** 1227 * i2o_block_exit - Block OSM exit function 1228 * 1229 * Unregisters Block OSM from I2O core, unregisters i2o_block block device 1230 * and frees the mempool and slab. 1231 */ 1232static void __exit i2o_block_exit(void) 1233{ 1234 /* Unregister I2O Block OSM from I2O core */ 1235 i2o_driver_unregister(&i2o_block_driver); 1236 1237 /* Unregister block device */ 1238 unregister_blkdev(I2O_MAJOR, "i2o_block"); 1239 1240 /* Free request mempool and slab */ 1241 mempool_destroy(i2o_blk_req_pool.pool); 1242 kmem_cache_destroy(i2o_blk_req_pool.slab); 1243}; 1244 1245MODULE_AUTHOR("Red Hat"); 1246MODULE_LICENSE("GPL"); 1247MODULE_DESCRIPTION(OSM_DESCRIPTION); 1248MODULE_VERSION(OSM_VERSION); 1249 1250module_init(i2o_block_init); 1251module_exit(i2o_block_exit);