Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.31-rc7 1216 lines 31 kB view raw
1/* 2 * Block OSM 3 * 4 * Copyright (C) 1999-2002 Red Hat Software 5 * 6 * Written by Alan Cox, Building Number Three Ltd 7 * 8 * This program is free software; you can redistribute it and/or modify it 9 * under the terms of the GNU General Public License as published by the 10 * Free Software Foundation; either version 2 of the License, or (at your 11 * option) any later version. 12 * 13 * This program is distributed in the hope that it will be useful, but 14 * WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 16 * General Public License for more details. 17 * 18 * For the purpose of avoiding doubt the preferred form of the work 19 * for making modifications shall be a standards compliant form such 20 * gzipped tar and not one requiring a proprietary or patent encumbered 21 * tool to unpack. 22 * 23 * Fixes/additions: 24 * Steve Ralston: 25 * Multiple device handling error fixes, 26 * Added a queue depth. 27 * Alan Cox: 28 * FC920 has an rmw bug. Dont or in the end marker. 29 * Removed queue walk, fixed for 64bitness. 30 * Rewrote much of the code over time 31 * Added indirect block lists 32 * Handle 64K limits on many controllers 33 * Don't use indirects on the Promise (breaks) 34 * Heavily chop down the queue depths 35 * Deepak Saxena: 36 * Independent queues per IOP 37 * Support for dynamic device creation/deletion 38 * Code cleanup 39 * Support for larger I/Os through merge* functions 40 * (taken from DAC960 driver) 41 * Boji T Kannanthanam: 42 * Set the I2O Block devices to be detected in increasing 43 * order of TIDs during boot. 44 * Search and set the I2O block device that we boot off 45 * from as the first device to be claimed (as /dev/i2o/hda) 46 * Properly attach/detach I2O gendisk structure from the 47 * system gendisk list. The I2O block devices now appear in 48 * /proc/partitions. 49 * Markus Lidel <Markus.Lidel@shadowconnect.com>: 50 * Minor bugfixes for 2.6. 51 */ 52 53#include <linux/module.h> 54#include <linux/i2o.h> 55 56#include <linux/mempool.h> 57 58#include <linux/genhd.h> 59#include <linux/blkdev.h> 60#include <linux/hdreg.h> 61 62#include <scsi/scsi.h> 63 64#include "i2o_block.h" 65 66#define OSM_NAME "block-osm" 67#define OSM_VERSION "1.325" 68#define OSM_DESCRIPTION "I2O Block Device OSM" 69 70static struct i2o_driver i2o_block_driver; 71 72/* global Block OSM request mempool */ 73static struct i2o_block_mempool i2o_blk_req_pool; 74 75/* Block OSM class handling definition */ 76static struct i2o_class_id i2o_block_class_id[] = { 77 {I2O_CLASS_RANDOM_BLOCK_STORAGE}, 78 {I2O_CLASS_END} 79}; 80 81/** 82 * i2o_block_device_free - free the memory of the I2O Block device 83 * @dev: I2O Block device, which should be cleaned up 84 * 85 * Frees the request queue, gendisk and the i2o_block_device structure. 86 */ 87static void i2o_block_device_free(struct i2o_block_device *dev) 88{ 89 blk_cleanup_queue(dev->gd->queue); 90 91 put_disk(dev->gd); 92 93 kfree(dev); 94}; 95 96/** 97 * i2o_block_remove - remove the I2O Block device from the system again 98 * @dev: I2O Block device which should be removed 99 * 100 * Remove gendisk from system and free all allocated memory. 101 * 102 * Always returns 0. 103 */ 104static int i2o_block_remove(struct device *dev) 105{ 106 struct i2o_device *i2o_dev = to_i2o_device(dev); 107 struct i2o_block_device *i2o_blk_dev = dev_get_drvdata(dev); 108 109 osm_info("device removed (TID: %03x): %s\n", i2o_dev->lct_data.tid, 110 i2o_blk_dev->gd->disk_name); 111 112 i2o_event_register(i2o_dev, &i2o_block_driver, 0, 0); 113 114 del_gendisk(i2o_blk_dev->gd); 115 116 dev_set_drvdata(dev, NULL); 117 118 i2o_device_claim_release(i2o_dev); 119 120 i2o_block_device_free(i2o_blk_dev); 121 122 return 0; 123}; 124 125/** 126 * i2o_block_device flush - Flush all dirty data of I2O device dev 127 * @dev: I2O device which should be flushed 128 * 129 * Flushes all dirty data on device dev. 130 * 131 * Returns 0 on success or negative error code on failure. 132 */ 133static int i2o_block_device_flush(struct i2o_device *dev) 134{ 135 struct i2o_message *msg; 136 137 msg = i2o_msg_get_wait(dev->iop, I2O_TIMEOUT_MESSAGE_GET); 138 if (IS_ERR(msg)) 139 return PTR_ERR(msg); 140 141 msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0); 142 msg->u.head[1] = 143 cpu_to_le32(I2O_CMD_BLOCK_CFLUSH << 24 | HOST_TID << 12 | dev-> 144 lct_data.tid); 145 msg->body[0] = cpu_to_le32(60 << 16); 146 osm_debug("Flushing...\n"); 147 148 return i2o_msg_post_wait(dev->iop, msg, 60); 149}; 150 151/** 152 * i2o_block_device_mount - Mount (load) the media of device dev 153 * @dev: I2O device which should receive the mount request 154 * @media_id: Media Identifier 155 * 156 * Load a media into drive. Identifier should be set to -1, because the 157 * spec does not support any other value. 158 * 159 * Returns 0 on success or negative error code on failure. 160 */ 161static int i2o_block_device_mount(struct i2o_device *dev, u32 media_id) 162{ 163 struct i2o_message *msg; 164 165 msg = i2o_msg_get_wait(dev->iop, I2O_TIMEOUT_MESSAGE_GET); 166 if (IS_ERR(msg)) 167 return PTR_ERR(msg); 168 169 msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0); 170 msg->u.head[1] = 171 cpu_to_le32(I2O_CMD_BLOCK_MMOUNT << 24 | HOST_TID << 12 | dev-> 172 lct_data.tid); 173 msg->body[0] = cpu_to_le32(-1); 174 msg->body[1] = cpu_to_le32(0x00000000); 175 osm_debug("Mounting...\n"); 176 177 return i2o_msg_post_wait(dev->iop, msg, 2); 178}; 179 180/** 181 * i2o_block_device_lock - Locks the media of device dev 182 * @dev: I2O device which should receive the lock request 183 * @media_id: Media Identifier 184 * 185 * Lock media of device dev to prevent removal. The media identifier 186 * should be set to -1, because the spec does not support any other value. 187 * 188 * Returns 0 on success or negative error code on failure. 189 */ 190static int i2o_block_device_lock(struct i2o_device *dev, u32 media_id) 191{ 192 struct i2o_message *msg; 193 194 msg = i2o_msg_get_wait(dev->iop, I2O_TIMEOUT_MESSAGE_GET); 195 if (IS_ERR(msg)) 196 return PTR_ERR(msg); 197 198 msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0); 199 msg->u.head[1] = 200 cpu_to_le32(I2O_CMD_BLOCK_MLOCK << 24 | HOST_TID << 12 | dev-> 201 lct_data.tid); 202 msg->body[0] = cpu_to_le32(-1); 203 osm_debug("Locking...\n"); 204 205 return i2o_msg_post_wait(dev->iop, msg, 2); 206}; 207 208/** 209 * i2o_block_device_unlock - Unlocks the media of device dev 210 * @dev: I2O device which should receive the unlocked request 211 * @media_id: Media Identifier 212 * 213 * Unlocks the media in device dev. The media identifier should be set to 214 * -1, because the spec does not support any other value. 215 * 216 * Returns 0 on success or negative error code on failure. 217 */ 218static int i2o_block_device_unlock(struct i2o_device *dev, u32 media_id) 219{ 220 struct i2o_message *msg; 221 222 msg = i2o_msg_get_wait(dev->iop, I2O_TIMEOUT_MESSAGE_GET); 223 if (IS_ERR(msg)) 224 return PTR_ERR(msg); 225 226 msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0); 227 msg->u.head[1] = 228 cpu_to_le32(I2O_CMD_BLOCK_MUNLOCK << 24 | HOST_TID << 12 | dev-> 229 lct_data.tid); 230 msg->body[0] = cpu_to_le32(media_id); 231 osm_debug("Unlocking...\n"); 232 233 return i2o_msg_post_wait(dev->iop, msg, 2); 234}; 235 236/** 237 * i2o_block_device_power - Power management for device dev 238 * @dev: I2O device which should receive the power management request 239 * @op: Operation to send 240 * 241 * Send a power management request to the device dev. 242 * 243 * Returns 0 on success or negative error code on failure. 244 */ 245static int i2o_block_device_power(struct i2o_block_device *dev, u8 op) 246{ 247 struct i2o_device *i2o_dev = dev->i2o_dev; 248 struct i2o_controller *c = i2o_dev->iop; 249 struct i2o_message *msg; 250 int rc; 251 252 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET); 253 if (IS_ERR(msg)) 254 return PTR_ERR(msg); 255 256 msg->u.head[0] = cpu_to_le32(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0); 257 msg->u.head[1] = 258 cpu_to_le32(I2O_CMD_BLOCK_POWER << 24 | HOST_TID << 12 | i2o_dev-> 259 lct_data.tid); 260 msg->body[0] = cpu_to_le32(op << 24); 261 osm_debug("Power...\n"); 262 263 rc = i2o_msg_post_wait(c, msg, 60); 264 if (!rc) 265 dev->power = op; 266 267 return rc; 268}; 269 270/** 271 * i2o_block_request_alloc - Allocate an I2O block request struct 272 * 273 * Allocates an I2O block request struct and initialize the list. 274 * 275 * Returns a i2o_block_request pointer on success or negative error code 276 * on failure. 277 */ 278static inline struct i2o_block_request *i2o_block_request_alloc(void) 279{ 280 struct i2o_block_request *ireq; 281 282 ireq = mempool_alloc(i2o_blk_req_pool.pool, GFP_ATOMIC); 283 if (!ireq) 284 return ERR_PTR(-ENOMEM); 285 286 INIT_LIST_HEAD(&ireq->queue); 287 sg_init_table(ireq->sg_table, I2O_MAX_PHYS_SEGMENTS); 288 289 return ireq; 290}; 291 292/** 293 * i2o_block_request_free - Frees a I2O block request 294 * @ireq: I2O block request which should be freed 295 * 296 * Frees the allocated memory (give it back to the request mempool). 297 */ 298static inline void i2o_block_request_free(struct i2o_block_request *ireq) 299{ 300 mempool_free(ireq, i2o_blk_req_pool.pool); 301}; 302 303/** 304 * i2o_block_sglist_alloc - Allocate the SG list and map it 305 * @c: I2O controller to which the request belongs 306 * @ireq: I2O block request 307 * @mptr: message body pointer 308 * 309 * Builds the SG list and map it to be accessable by the controller. 310 * 311 * Returns 0 on failure or 1 on success. 312 */ 313static inline int i2o_block_sglist_alloc(struct i2o_controller *c, 314 struct i2o_block_request *ireq, 315 u32 ** mptr) 316{ 317 int nents; 318 enum dma_data_direction direction; 319 320 ireq->dev = &c->pdev->dev; 321 nents = blk_rq_map_sg(ireq->req->q, ireq->req, ireq->sg_table); 322 323 if (rq_data_dir(ireq->req) == READ) 324 direction = PCI_DMA_FROMDEVICE; 325 else 326 direction = PCI_DMA_TODEVICE; 327 328 ireq->sg_nents = nents; 329 330 return i2o_dma_map_sg(c, ireq->sg_table, nents, direction, mptr); 331}; 332 333/** 334 * i2o_block_sglist_free - Frees the SG list 335 * @ireq: I2O block request from which the SG should be freed 336 * 337 * Frees the SG list from the I2O block request. 338 */ 339static inline void i2o_block_sglist_free(struct i2o_block_request *ireq) 340{ 341 enum dma_data_direction direction; 342 343 if (rq_data_dir(ireq->req) == READ) 344 direction = PCI_DMA_FROMDEVICE; 345 else 346 direction = PCI_DMA_TODEVICE; 347 348 dma_unmap_sg(ireq->dev, ireq->sg_table, ireq->sg_nents, direction); 349}; 350 351/** 352 * i2o_block_prep_req_fn - Allocates I2O block device specific struct 353 * @q: request queue for the request 354 * @req: the request to prepare 355 * 356 * Allocate the necessary i2o_block_request struct and connect it to 357 * the request. This is needed that we not lose the SG list later on. 358 * 359 * Returns BLKPREP_OK on success or BLKPREP_DEFER on failure. 360 */ 361static int i2o_block_prep_req_fn(struct request_queue *q, struct request *req) 362{ 363 struct i2o_block_device *i2o_blk_dev = q->queuedata; 364 struct i2o_block_request *ireq; 365 366 if (unlikely(!i2o_blk_dev)) { 367 osm_err("block device already removed\n"); 368 return BLKPREP_KILL; 369 } 370 371 /* connect the i2o_block_request to the request */ 372 if (!req->special) { 373 ireq = i2o_block_request_alloc(); 374 if (IS_ERR(ireq)) { 375 osm_debug("unable to allocate i2o_block_request!\n"); 376 return BLKPREP_DEFER; 377 } 378 379 ireq->i2o_blk_dev = i2o_blk_dev; 380 req->special = ireq; 381 ireq->req = req; 382 } 383 /* do not come back here */ 384 req->cmd_flags |= REQ_DONTPREP; 385 386 return BLKPREP_OK; 387}; 388 389/** 390 * i2o_block_delayed_request_fn - delayed request queue function 391 * @work: the delayed request with the queue to start 392 * 393 * If the request queue is stopped for a disk, and there is no open 394 * request, a new event is created, which calls this function to start 395 * the queue after I2O_BLOCK_REQUEST_TIME. Otherwise the queue will never 396 * be started again. 397 */ 398static void i2o_block_delayed_request_fn(struct work_struct *work) 399{ 400 struct i2o_block_delayed_request *dreq = 401 container_of(work, struct i2o_block_delayed_request, 402 work.work); 403 struct request_queue *q = dreq->queue; 404 unsigned long flags; 405 406 spin_lock_irqsave(q->queue_lock, flags); 407 blk_start_queue(q); 408 spin_unlock_irqrestore(q->queue_lock, flags); 409 kfree(dreq); 410}; 411 412/** 413 * i2o_block_end_request - Post-processing of completed commands 414 * @req: request which should be completed 415 * @error: 0 for success, < 0 for error 416 * @nr_bytes: number of bytes to complete 417 * 418 * Mark the request as complete. The lock must not be held when entering. 419 * 420 */ 421static void i2o_block_end_request(struct request *req, int error, 422 int nr_bytes) 423{ 424 struct i2o_block_request *ireq = req->special; 425 struct i2o_block_device *dev = ireq->i2o_blk_dev; 426 struct request_queue *q = req->q; 427 unsigned long flags; 428 429 if (blk_end_request(req, error, nr_bytes)) 430 if (error) 431 blk_end_request_all(req, -EIO); 432 433 spin_lock_irqsave(q->queue_lock, flags); 434 435 if (likely(dev)) { 436 dev->open_queue_depth--; 437 list_del(&ireq->queue); 438 } 439 440 blk_start_queue(q); 441 442 spin_unlock_irqrestore(q->queue_lock, flags); 443 444 i2o_block_sglist_free(ireq); 445 i2o_block_request_free(ireq); 446}; 447 448/** 449 * i2o_block_reply - Block OSM reply handler. 450 * @c: I2O controller from which the message arrives 451 * @m: message id of reply 452 * @msg: the actual I2O message reply 453 * 454 * This function gets all the message replies. 455 * 456 */ 457static int i2o_block_reply(struct i2o_controller *c, u32 m, 458 struct i2o_message *msg) 459{ 460 struct request *req; 461 int error = 0; 462 463 req = i2o_cntxt_list_get(c, le32_to_cpu(msg->u.s.tcntxt)); 464 if (unlikely(!req)) { 465 osm_err("NULL reply received!\n"); 466 return -1; 467 } 468 469 /* 470 * Lets see what is cooking. We stuffed the 471 * request in the context. 472 */ 473 474 if ((le32_to_cpu(msg->body[0]) >> 24) != 0) { 475 u32 status = le32_to_cpu(msg->body[0]); 476 /* 477 * Device not ready means two things. One is that the 478 * the thing went offline (but not a removal media) 479 * 480 * The second is that you have a SuperTrak 100 and the 481 * firmware got constipated. Unlike standard i2o card 482 * setups the supertrak returns an error rather than 483 * blocking for the timeout in these cases. 484 * 485 * Don't stick a supertrak100 into cache aggressive modes 486 */ 487 488 osm_err("TID %03x error status: 0x%02x, detailed status: " 489 "0x%04x\n", (le32_to_cpu(msg->u.head[1]) >> 12 & 0xfff), 490 status >> 24, status & 0xffff); 491 492 req->errors++; 493 494 error = -EIO; 495 } 496 497 i2o_block_end_request(req, error, le32_to_cpu(msg->body[1])); 498 499 return 1; 500}; 501 502static void i2o_block_event(struct work_struct *work) 503{ 504 struct i2o_event *evt = container_of(work, struct i2o_event, work); 505 osm_debug("event received\n"); 506 kfree(evt); 507}; 508 509/* 510 * SCSI-CAM for ioctl geometry mapping 511 * Duplicated with SCSI - this should be moved into somewhere common 512 * perhaps genhd ? 513 * 514 * LBA -> CHS mapping table taken from: 515 * 516 * "Incorporating the I2O Architecture into BIOS for Intel Architecture 517 * Platforms" 518 * 519 * This is an I2O document that is only available to I2O members, 520 * not developers. 521 * 522 * From my understanding, this is how all the I2O cards do this 523 * 524 * Disk Size | Sectors | Heads | Cylinders 525 * ---------------+---------+-------+------------------- 526 * 1 < X <= 528M | 63 | 16 | X/(63 * 16 * 512) 527 * 528M < X <= 1G | 63 | 32 | X/(63 * 32 * 512) 528 * 1 < X <528M | 63 | 16 | X/(63 * 16 * 512) 529 * 1 < X <528M | 63 | 16 | X/(63 * 16 * 512) 530 * 531 */ 532#define BLOCK_SIZE_528M 1081344 533#define BLOCK_SIZE_1G 2097152 534#define BLOCK_SIZE_21G 4403200 535#define BLOCK_SIZE_42G 8806400 536#define BLOCK_SIZE_84G 17612800 537 538static void i2o_block_biosparam(unsigned long capacity, unsigned short *cyls, 539 unsigned char *hds, unsigned char *secs) 540{ 541 unsigned long heads, sectors, cylinders; 542 543 sectors = 63L; /* Maximize sectors per track */ 544 if (capacity <= BLOCK_SIZE_528M) 545 heads = 16; 546 else if (capacity <= BLOCK_SIZE_1G) 547 heads = 32; 548 else if (capacity <= BLOCK_SIZE_21G) 549 heads = 64; 550 else if (capacity <= BLOCK_SIZE_42G) 551 heads = 128; 552 else 553 heads = 255; 554 555 cylinders = (unsigned long)capacity / (heads * sectors); 556 557 *cyls = (unsigned short)cylinders; /* Stuff return values */ 558 *secs = (unsigned char)sectors; 559 *hds = (unsigned char)heads; 560} 561 562/** 563 * i2o_block_open - Open the block device 564 * @bdev: block device being opened 565 * @mode: file open mode 566 * 567 * Power up the device, mount and lock the media. This function is called, 568 * if the block device is opened for access. 569 * 570 * Returns 0 on success or negative error code on failure. 571 */ 572static int i2o_block_open(struct block_device *bdev, fmode_t mode) 573{ 574 struct i2o_block_device *dev = bdev->bd_disk->private_data; 575 576 if (!dev->i2o_dev) 577 return -ENODEV; 578 579 if (dev->power > 0x1f) 580 i2o_block_device_power(dev, 0x02); 581 582 i2o_block_device_mount(dev->i2o_dev, -1); 583 584 i2o_block_device_lock(dev->i2o_dev, -1); 585 586 osm_debug("Ready.\n"); 587 588 return 0; 589}; 590 591/** 592 * i2o_block_release - Release the I2O block device 593 * @disk: gendisk device being released 594 * @mode: file open mode 595 * 596 * Unlock and unmount the media, and power down the device. Gets called if 597 * the block device is closed. 598 * 599 * Returns 0 on success or negative error code on failure. 600 */ 601static int i2o_block_release(struct gendisk *disk, fmode_t mode) 602{ 603 struct i2o_block_device *dev = disk->private_data; 604 u8 operation; 605 606 /* 607 * This is to deail with the case of an application 608 * opening a device and then the device dissapears while 609 * it's in use, and then the application tries to release 610 * it. ex: Unmounting a deleted RAID volume at reboot. 611 * If we send messages, it will just cause FAILs since 612 * the TID no longer exists. 613 */ 614 if (!dev->i2o_dev) 615 return 0; 616 617 i2o_block_device_flush(dev->i2o_dev); 618 619 i2o_block_device_unlock(dev->i2o_dev, -1); 620 621 if (dev->flags & (1 << 3 | 1 << 4)) /* Removable */ 622 operation = 0x21; 623 else 624 operation = 0x24; 625 626 i2o_block_device_power(dev, operation); 627 628 return 0; 629} 630 631static int i2o_block_getgeo(struct block_device *bdev, struct hd_geometry *geo) 632{ 633 i2o_block_biosparam(get_capacity(bdev->bd_disk), 634 &geo->cylinders, &geo->heads, &geo->sectors); 635 return 0; 636} 637 638/** 639 * i2o_block_ioctl - Issue device specific ioctl calls. 640 * @bdev: block device being opened 641 * @mode: file open mode 642 * @cmd: ioctl command 643 * @arg: arg 644 * 645 * Handles ioctl request for the block device. 646 * 647 * Return 0 on success or negative error on failure. 648 */ 649static int i2o_block_ioctl(struct block_device *bdev, fmode_t mode, 650 unsigned int cmd, unsigned long arg) 651{ 652 struct gendisk *disk = bdev->bd_disk; 653 struct i2o_block_device *dev = disk->private_data; 654 655 /* Anyone capable of this syscall can do *real bad* things */ 656 657 if (!capable(CAP_SYS_ADMIN)) 658 return -EPERM; 659 660 switch (cmd) { 661 case BLKI2OGRSTRAT: 662 return put_user(dev->rcache, (int __user *)arg); 663 case BLKI2OGWSTRAT: 664 return put_user(dev->wcache, (int __user *)arg); 665 case BLKI2OSRSTRAT: 666 if (arg < 0 || arg > CACHE_SMARTFETCH) 667 return -EINVAL; 668 dev->rcache = arg; 669 break; 670 case BLKI2OSWSTRAT: 671 if (arg != 0 672 && (arg < CACHE_WRITETHROUGH || arg > CACHE_SMARTBACK)) 673 return -EINVAL; 674 dev->wcache = arg; 675 break; 676 } 677 return -ENOTTY; 678}; 679 680/** 681 * i2o_block_media_changed - Have we seen a media change? 682 * @disk: gendisk which should be verified 683 * 684 * Verifies if the media has changed. 685 * 686 * Returns 1 if the media was changed or 0 otherwise. 687 */ 688static int i2o_block_media_changed(struct gendisk *disk) 689{ 690 struct i2o_block_device *p = disk->private_data; 691 692 if (p->media_change_flag) { 693 p->media_change_flag = 0; 694 return 1; 695 } 696 return 0; 697} 698 699/** 700 * i2o_block_transfer - Transfer a request to/from the I2O controller 701 * @req: the request which should be transfered 702 * 703 * This function converts the request into a I2O message. The necessary 704 * DMA buffers are allocated and after everything is setup post the message 705 * to the I2O controller. No cleanup is done by this function. It is done 706 * on the interrupt side when the reply arrives. 707 * 708 * Return 0 on success or negative error code on failure. 709 */ 710static int i2o_block_transfer(struct request *req) 711{ 712 struct i2o_block_device *dev = req->rq_disk->private_data; 713 struct i2o_controller *c; 714 u32 tid = dev->i2o_dev->lct_data.tid; 715 struct i2o_message *msg; 716 u32 *mptr; 717 struct i2o_block_request *ireq = req->special; 718 u32 tcntxt; 719 u32 sgl_offset = SGL_OFFSET_8; 720 u32 ctl_flags = 0x00000000; 721 int rc; 722 u32 cmd; 723 724 if (unlikely(!dev->i2o_dev)) { 725 osm_err("transfer to removed drive\n"); 726 rc = -ENODEV; 727 goto exit; 728 } 729 730 c = dev->i2o_dev->iop; 731 732 msg = i2o_msg_get(c); 733 if (IS_ERR(msg)) { 734 rc = PTR_ERR(msg); 735 goto exit; 736 } 737 738 tcntxt = i2o_cntxt_list_add(c, req); 739 if (!tcntxt) { 740 rc = -ENOMEM; 741 goto nop_msg; 742 } 743 744 msg->u.s.icntxt = cpu_to_le32(i2o_block_driver.context); 745 msg->u.s.tcntxt = cpu_to_le32(tcntxt); 746 747 mptr = &msg->body[0]; 748 749 if (rq_data_dir(req) == READ) { 750 cmd = I2O_CMD_BLOCK_READ << 24; 751 752 switch (dev->rcache) { 753 case CACHE_PREFETCH: 754 ctl_flags = 0x201F0008; 755 break; 756 757 case CACHE_SMARTFETCH: 758 if (blk_rq_sectors(req) > 16) 759 ctl_flags = 0x201F0008; 760 else 761 ctl_flags = 0x001F0000; 762 break; 763 764 default: 765 break; 766 } 767 } else { 768 cmd = I2O_CMD_BLOCK_WRITE << 24; 769 770 switch (dev->wcache) { 771 case CACHE_WRITETHROUGH: 772 ctl_flags = 0x001F0008; 773 break; 774 case CACHE_WRITEBACK: 775 ctl_flags = 0x001F0010; 776 break; 777 case CACHE_SMARTBACK: 778 if (blk_rq_sectors(req) > 16) 779 ctl_flags = 0x001F0004; 780 else 781 ctl_flags = 0x001F0010; 782 break; 783 case CACHE_SMARTTHROUGH: 784 if (blk_rq_sectors(req) > 16) 785 ctl_flags = 0x001F0004; 786 else 787 ctl_flags = 0x001F0010; 788 default: 789 break; 790 } 791 } 792 793#ifdef CONFIG_I2O_EXT_ADAPTEC 794 if (c->adaptec) { 795 u8 cmd[10]; 796 u32 scsi_flags; 797 u16 hwsec; 798 799 hwsec = queue_logical_block_size(req->q) >> KERNEL_SECTOR_SHIFT; 800 memset(cmd, 0, 10); 801 802 sgl_offset = SGL_OFFSET_12; 803 804 msg->u.head[1] = 805 cpu_to_le32(I2O_CMD_PRIVATE << 24 | HOST_TID << 12 | tid); 806 807 *mptr++ = cpu_to_le32(I2O_VENDOR_DPT << 16 | I2O_CMD_SCSI_EXEC); 808 *mptr++ = cpu_to_le32(tid); 809 810 /* 811 * ENABLE_DISCONNECT 812 * SIMPLE_TAG 813 * RETURN_SENSE_DATA_IN_REPLY_MESSAGE_FRAME 814 */ 815 if (rq_data_dir(req) == READ) { 816 cmd[0] = READ_10; 817 scsi_flags = 0x60a0000a; 818 } else { 819 cmd[0] = WRITE_10; 820 scsi_flags = 0xa0a0000a; 821 } 822 823 *mptr++ = cpu_to_le32(scsi_flags); 824 825 *((u32 *) & cmd[2]) = cpu_to_be32(blk_rq_pos(req) * hwsec); 826 *((u16 *) & cmd[7]) = cpu_to_be16(blk_rq_sectors(req) * hwsec); 827 828 memcpy(mptr, cmd, 10); 829 mptr += 4; 830 *mptr++ = cpu_to_le32(blk_rq_bytes(req)); 831 } else 832#endif 833 { 834 msg->u.head[1] = cpu_to_le32(cmd | HOST_TID << 12 | tid); 835 *mptr++ = cpu_to_le32(ctl_flags); 836 *mptr++ = cpu_to_le32(blk_rq_bytes(req)); 837 *mptr++ = 838 cpu_to_le32((u32) (blk_rq_pos(req) << KERNEL_SECTOR_SHIFT)); 839 *mptr++ = 840 cpu_to_le32(blk_rq_pos(req) >> (32 - KERNEL_SECTOR_SHIFT)); 841 } 842 843 if (!i2o_block_sglist_alloc(c, ireq, &mptr)) { 844 rc = -ENOMEM; 845 goto context_remove; 846 } 847 848 msg->u.head[0] = 849 cpu_to_le32(I2O_MESSAGE_SIZE(mptr - &msg->u.head[0]) | sgl_offset); 850 851 list_add_tail(&ireq->queue, &dev->open_queue); 852 dev->open_queue_depth++; 853 854 i2o_msg_post(c, msg); 855 856 return 0; 857 858 context_remove: 859 i2o_cntxt_list_remove(c, req); 860 861 nop_msg: 862 i2o_msg_nop(c, msg); 863 864 exit: 865 return rc; 866}; 867 868/** 869 * i2o_block_request_fn - request queue handling function 870 * @q: request queue from which the request could be fetched 871 * 872 * Takes the next request from the queue, transfers it and if no error 873 * occurs dequeue it from the queue. On arrival of the reply the message 874 * will be processed further. If an error occurs requeue the request. 875 */ 876static void i2o_block_request_fn(struct request_queue *q) 877{ 878 struct request *req; 879 880 while (!blk_queue_plugged(q)) { 881 req = blk_peek_request(q); 882 if (!req) 883 break; 884 885 if (blk_fs_request(req)) { 886 struct i2o_block_delayed_request *dreq; 887 struct i2o_block_request *ireq = req->special; 888 unsigned int queue_depth; 889 890 queue_depth = ireq->i2o_blk_dev->open_queue_depth; 891 892 if (queue_depth < I2O_BLOCK_MAX_OPEN_REQUESTS) { 893 if (!i2o_block_transfer(req)) { 894 blk_start_request(req); 895 continue; 896 } else 897 osm_info("transfer error\n"); 898 } 899 900 if (queue_depth) 901 break; 902 903 /* stop the queue and retry later */ 904 dreq = kmalloc(sizeof(*dreq), GFP_ATOMIC); 905 if (!dreq) 906 continue; 907 908 dreq->queue = q; 909 INIT_DELAYED_WORK(&dreq->work, 910 i2o_block_delayed_request_fn); 911 912 if (!queue_delayed_work(i2o_block_driver.event_queue, 913 &dreq->work, 914 I2O_BLOCK_RETRY_TIME)) 915 kfree(dreq); 916 else { 917 blk_stop_queue(q); 918 break; 919 } 920 } else { 921 blk_start_request(req); 922 __blk_end_request_all(req, -EIO); 923 } 924 } 925}; 926 927/* I2O Block device operations definition */ 928static struct block_device_operations i2o_block_fops = { 929 .owner = THIS_MODULE, 930 .open = i2o_block_open, 931 .release = i2o_block_release, 932 .locked_ioctl = i2o_block_ioctl, 933 .getgeo = i2o_block_getgeo, 934 .media_changed = i2o_block_media_changed 935}; 936 937/** 938 * i2o_block_device_alloc - Allocate memory for a I2O Block device 939 * 940 * Allocate memory for the i2o_block_device struct, gendisk and request 941 * queue and initialize them as far as no additional information is needed. 942 * 943 * Returns a pointer to the allocated I2O Block device on succes or a 944 * negative error code on failure. 945 */ 946static struct i2o_block_device *i2o_block_device_alloc(void) 947{ 948 struct i2o_block_device *dev; 949 struct gendisk *gd; 950 struct request_queue *queue; 951 int rc; 952 953 dev = kzalloc(sizeof(*dev), GFP_KERNEL); 954 if (!dev) { 955 osm_err("Insufficient memory to allocate I2O Block disk.\n"); 956 rc = -ENOMEM; 957 goto exit; 958 } 959 960 INIT_LIST_HEAD(&dev->open_queue); 961 spin_lock_init(&dev->lock); 962 dev->rcache = CACHE_PREFETCH; 963 dev->wcache = CACHE_WRITEBACK; 964 965 /* allocate a gendisk with 16 partitions */ 966 gd = alloc_disk(16); 967 if (!gd) { 968 osm_err("Insufficient memory to allocate gendisk.\n"); 969 rc = -ENOMEM; 970 goto cleanup_dev; 971 } 972 973 /* initialize the request queue */ 974 queue = blk_init_queue(i2o_block_request_fn, &dev->lock); 975 if (!queue) { 976 osm_err("Insufficient memory to allocate request queue.\n"); 977 rc = -ENOMEM; 978 goto cleanup_queue; 979 } 980 981 blk_queue_prep_rq(queue, i2o_block_prep_req_fn); 982 983 gd->major = I2O_MAJOR; 984 gd->queue = queue; 985 gd->fops = &i2o_block_fops; 986 gd->private_data = dev; 987 988 dev->gd = gd; 989 990 return dev; 991 992 cleanup_queue: 993 put_disk(gd); 994 995 cleanup_dev: 996 kfree(dev); 997 998 exit: 999 return ERR_PTR(rc); 1000}; 1001 1002/** 1003 * i2o_block_probe - verify if dev is a I2O Block device and install it 1004 * @dev: device to verify if it is a I2O Block device 1005 * 1006 * We only verify if the user_tid of the device is 0xfff and then install 1007 * the device. Otherwise it is used by some other device (e. g. RAID). 1008 * 1009 * Returns 0 on success or negative error code on failure. 1010 */ 1011static int i2o_block_probe(struct device *dev) 1012{ 1013 struct i2o_device *i2o_dev = to_i2o_device(dev); 1014 struct i2o_controller *c = i2o_dev->iop; 1015 struct i2o_block_device *i2o_blk_dev; 1016 struct gendisk *gd; 1017 struct request_queue *queue; 1018 static int unit = 0; 1019 int rc; 1020 u64 size; 1021 u32 blocksize; 1022 u16 body_size = 4; 1023 u16 power; 1024 unsigned short max_sectors; 1025 1026#ifdef CONFIG_I2O_EXT_ADAPTEC 1027 if (c->adaptec) 1028 body_size = 8; 1029#endif 1030 1031 if (c->limit_sectors) 1032 max_sectors = I2O_MAX_SECTORS_LIMITED; 1033 else 1034 max_sectors = I2O_MAX_SECTORS; 1035 1036 /* skip devices which are used by IOP */ 1037 if (i2o_dev->lct_data.user_tid != 0xfff) { 1038 osm_debug("skipping used device %03x\n", i2o_dev->lct_data.tid); 1039 return -ENODEV; 1040 } 1041 1042 if (i2o_device_claim(i2o_dev)) { 1043 osm_warn("Unable to claim device. Installation aborted\n"); 1044 rc = -EFAULT; 1045 goto exit; 1046 } 1047 1048 i2o_blk_dev = i2o_block_device_alloc(); 1049 if (IS_ERR(i2o_blk_dev)) { 1050 osm_err("could not alloc a new I2O block device"); 1051 rc = PTR_ERR(i2o_blk_dev); 1052 goto claim_release; 1053 } 1054 1055 i2o_blk_dev->i2o_dev = i2o_dev; 1056 dev_set_drvdata(dev, i2o_blk_dev); 1057 1058 /* setup gendisk */ 1059 gd = i2o_blk_dev->gd; 1060 gd->first_minor = unit << 4; 1061 sprintf(gd->disk_name, "i2o/hd%c", 'a' + unit); 1062 gd->driverfs_dev = &i2o_dev->device; 1063 1064 /* setup request queue */ 1065 queue = gd->queue; 1066 queue->queuedata = i2o_blk_dev; 1067 1068 blk_queue_max_phys_segments(queue, I2O_MAX_PHYS_SEGMENTS); 1069 blk_queue_max_sectors(queue, max_sectors); 1070 blk_queue_max_hw_segments(queue, i2o_sg_tablesize(c, body_size)); 1071 1072 osm_debug("max sectors = %d\n", queue->max_sectors); 1073 osm_debug("phys segments = %d\n", queue->max_phys_segments); 1074 osm_debug("max hw segments = %d\n", queue->max_hw_segments); 1075 1076 /* 1077 * Ask for the current media data. If that isn't supported 1078 * then we ask for the device capacity data 1079 */ 1080 if (!i2o_parm_field_get(i2o_dev, 0x0004, 1, &blocksize, 4) || 1081 !i2o_parm_field_get(i2o_dev, 0x0000, 3, &blocksize, 4)) { 1082 blk_queue_logical_block_size(queue, le32_to_cpu(blocksize)); 1083 } else 1084 osm_warn("unable to get blocksize of %s\n", gd->disk_name); 1085 1086 if (!i2o_parm_field_get(i2o_dev, 0x0004, 0, &size, 8) || 1087 !i2o_parm_field_get(i2o_dev, 0x0000, 4, &size, 8)) { 1088 set_capacity(gd, le64_to_cpu(size) >> KERNEL_SECTOR_SHIFT); 1089 } else 1090 osm_warn("could not get size of %s\n", gd->disk_name); 1091 1092 if (!i2o_parm_field_get(i2o_dev, 0x0000, 2, &power, 2)) 1093 i2o_blk_dev->power = power; 1094 1095 i2o_event_register(i2o_dev, &i2o_block_driver, 0, 0xffffffff); 1096 1097 add_disk(gd); 1098 1099 unit++; 1100 1101 osm_info("device added (TID: %03x): %s\n", i2o_dev->lct_data.tid, 1102 i2o_blk_dev->gd->disk_name); 1103 1104 return 0; 1105 1106 claim_release: 1107 i2o_device_claim_release(i2o_dev); 1108 1109 exit: 1110 return rc; 1111}; 1112 1113/* Block OSM driver struct */ 1114static struct i2o_driver i2o_block_driver = { 1115 .name = OSM_NAME, 1116 .event = i2o_block_event, 1117 .reply = i2o_block_reply, 1118 .classes = i2o_block_class_id, 1119 .driver = { 1120 .probe = i2o_block_probe, 1121 .remove = i2o_block_remove, 1122 }, 1123}; 1124 1125/** 1126 * i2o_block_init - Block OSM initialization function 1127 * 1128 * Allocate the slab and mempool for request structs, registers i2o_block 1129 * block device and finally register the Block OSM in the I2O core. 1130 * 1131 * Returns 0 on success or negative error code on failure. 1132 */ 1133static int __init i2o_block_init(void) 1134{ 1135 int rc; 1136 int size; 1137 1138 printk(KERN_INFO OSM_DESCRIPTION " v" OSM_VERSION "\n"); 1139 1140 /* Allocate request mempool and slab */ 1141 size = sizeof(struct i2o_block_request); 1142 i2o_blk_req_pool.slab = kmem_cache_create("i2o_block_req", size, 0, 1143 SLAB_HWCACHE_ALIGN, NULL); 1144 if (!i2o_blk_req_pool.slab) { 1145 osm_err("can't init request slab\n"); 1146 rc = -ENOMEM; 1147 goto exit; 1148 } 1149 1150 i2o_blk_req_pool.pool = 1151 mempool_create_slab_pool(I2O_BLOCK_REQ_MEMPOOL_SIZE, 1152 i2o_blk_req_pool.slab); 1153 if (!i2o_blk_req_pool.pool) { 1154 osm_err("can't init request mempool\n"); 1155 rc = -ENOMEM; 1156 goto free_slab; 1157 } 1158 1159 /* Register the block device interfaces */ 1160 rc = register_blkdev(I2O_MAJOR, "i2o_block"); 1161 if (rc) { 1162 osm_err("unable to register block device\n"); 1163 goto free_mempool; 1164 } 1165#ifdef MODULE 1166 osm_info("registered device at major %d\n", I2O_MAJOR); 1167#endif 1168 1169 /* Register Block OSM into I2O core */ 1170 rc = i2o_driver_register(&i2o_block_driver); 1171 if (rc) { 1172 osm_err("Could not register Block driver\n"); 1173 goto unregister_blkdev; 1174 } 1175 1176 return 0; 1177 1178 unregister_blkdev: 1179 unregister_blkdev(I2O_MAJOR, "i2o_block"); 1180 1181 free_mempool: 1182 mempool_destroy(i2o_blk_req_pool.pool); 1183 1184 free_slab: 1185 kmem_cache_destroy(i2o_blk_req_pool.slab); 1186 1187 exit: 1188 return rc; 1189}; 1190 1191/** 1192 * i2o_block_exit - Block OSM exit function 1193 * 1194 * Unregisters Block OSM from I2O core, unregisters i2o_block block device 1195 * and frees the mempool and slab. 1196 */ 1197static void __exit i2o_block_exit(void) 1198{ 1199 /* Unregister I2O Block OSM from I2O core */ 1200 i2o_driver_unregister(&i2o_block_driver); 1201 1202 /* Unregister block device */ 1203 unregister_blkdev(I2O_MAJOR, "i2o_block"); 1204 1205 /* Free request mempool and slab */ 1206 mempool_destroy(i2o_blk_req_pool.pool); 1207 kmem_cache_destroy(i2o_blk_req_pool.slab); 1208}; 1209 1210MODULE_AUTHOR("Red Hat"); 1211MODULE_LICENSE("GPL"); 1212MODULE_DESCRIPTION(OSM_DESCRIPTION); 1213MODULE_VERSION(OSM_VERSION); 1214 1215module_init(i2o_block_init); 1216module_exit(i2o_block_exit);