1/* 2 * Disk Array driver for Compaq SMART2 Controllers 3 * Copyright 1998 Compaq Computer Corporation 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the GNU General Public License as published by 7 * the Free Software Foundation; either version 2 of the License, or 8 * (at your option) any later version. 9 * 10 * This program is distributed in the hope that it will be useful, 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or 13 * NON INFRINGEMENT. See the GNU General Public License for more details. 14 * 15 * You should have received a copy of the GNU General Public License 16 * along with this program; if not, write to the Free Software 17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 18 * 19 * Questions/Comments/Bugfixes to iss_storagedev@hp.com 20 * 21 */ 22#include <linux/module.h> 23#include <linux/types.h> 24#include <linux/pci.h> 25#include <linux/bio.h> 26#include <linux/interrupt.h> 27#include <linux/kernel.h> 28#include <linux/slab.h> 29#include <linux/delay.h> 30#include <linux/major.h> 31#include <linux/fs.h> 32#include <linux/blkpg.h> 33#include <linux/timer.h> 34#include <linux/proc_fs.h> 35#include <linux/init.h> 36#include <linux/hdreg.h> 37#include <linux/spinlock.h> 38#include <linux/blkdev.h> 39#include <linux/genhd.h> 40#include <linux/scatterlist.h> 41#include <asm/uaccess.h> 42#include <asm/io.h> 43 44 45#define SMART2_DRIVER_VERSION(maj,min,submin) ((maj<<16)|(min<<8)|(submin)) 46 47#define DRIVER_NAME "Compaq SMART2 Driver (v 2.6.0)" 48#define DRIVER_VERSION SMART2_DRIVER_VERSION(2,6,0) 49 50/* Embedded module documentation macros - see modules.h */ 51/* Original author Chris Frantz - Compaq Computer Corporation */ 52MODULE_AUTHOR("Compaq Computer Corporation"); 53MODULE_DESCRIPTION("Driver for Compaq Smart2 Array Controllers version 2.6.0"); 54MODULE_LICENSE("GPL"); 55 56#include "cpqarray.h" 57#include "ida_cmd.h" 58#include "smart1,2.h" 59#include "ida_ioctl.h" 60 61#define READ_AHEAD 128 62#define NR_CMDS 128 /* This could probably go as high as ~400 */ 63 64#define MAX_CTLR 8 65#define CTLR_SHIFT 8 66 67#define CPQARRAY_DMA_MASK 0xFFFFFFFF /* 32 bit DMA */ 68 69static int nr_ctlr; 70static ctlr_info_t *hba[MAX_CTLR]; 71 72static int eisa[8]; 73 74#define NR_PRODUCTS ARRAY_SIZE(products) 75 76/* board_id = Subsystem Device ID & Vendor ID 77 * product = Marketing Name for the board 78 * access = Address of the struct of function pointers 79 */ 80static struct board_type products[] = { 81 { 0x0040110E, "IDA", &smart1_access }, 82 { 0x0140110E, "IDA-2", &smart1_access }, 83 { 0x1040110E, "IAES", &smart1_access }, 84 { 0x2040110E, "SMART", &smart1_access }, 85 { 0x3040110E, "SMART-2/E", &smart2e_access }, 86 { 0x40300E11, "SMART-2/P", &smart2_access }, 87 { 0x40310E11, "SMART-2SL", &smart2_access }, 88 { 0x40320E11, "Smart Array 3200", &smart2_access }, 89 { 0x40330E11, "Smart Array 3100ES", &smart2_access }, 90 { 0x40340E11, "Smart Array 221", &smart2_access }, 91 { 0x40400E11, "Integrated Array", &smart4_access }, 92 { 0x40480E11, "Compaq Raid LC2", &smart4_access }, 93 { 0x40500E11, "Smart Array 4200", &smart4_access }, 94 { 0x40510E11, "Smart Array 4250ES", &smart4_access }, 95 { 0x40580E11, "Smart Array 431", &smart4_access }, 96}; 97 98/* define the PCI info for the PCI cards this driver can control */ 99static const struct pci_device_id cpqarray_pci_device_id[] = 100{ 101 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_COMPAQ_42XX, 102 0x0E11, 0x4058, 0, 0, 0}, /* SA431 */ 103 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_COMPAQ_42XX, 104 0x0E11, 0x4051, 0, 0, 0}, /* SA4250ES */ 105 { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_COMPAQ_42XX, 106 0x0E11, 0x4050, 0, 0, 0}, /* SA4200 */ 107 { PCI_VENDOR_ID_NCR, PCI_DEVICE_ID_NCR_53C1510, 108 0x0E11, 0x4048, 0, 0, 0}, /* LC2 */ 109 { PCI_VENDOR_ID_NCR, PCI_DEVICE_ID_NCR_53C1510, 110 0x0E11, 0x4040, 0, 0, 0}, /* Integrated Array */ 111 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_SMART2P, 112 0x0E11, 0x4034, 0, 0, 0}, /* SA 221 */ 113 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_SMART2P, 114 0x0E11, 0x4033, 0, 0, 0}, /* SA 3100ES*/ 115 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_SMART2P, 116 0x0E11, 0x4032, 0, 0, 0}, /* SA 3200*/ 117 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_SMART2P, 118 0x0E11, 0x4031, 0, 0, 0}, /* SA 2SL*/ 119 { PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_SMART2P, 120 0x0E11, 0x4030, 0, 0, 0}, /* SA 2P */ 121 { 0 } 122}; 123 124MODULE_DEVICE_TABLE(pci, cpqarray_pci_device_id); 125 126static struct gendisk *ida_gendisk[MAX_CTLR][NWD]; 127 128/* Debug... */ 129#define DBG(s) do { s } while(0) 130/* Debug (general info)... */ 131#define DBGINFO(s) do { } while(0) 132/* Debug Paranoid... */ 133#define DBGP(s) do { } while(0) 134/* Debug Extra Paranoid... */ 135#define DBGPX(s) do { } while(0) 136 137static int cpqarray_pci_init(ctlr_info_t *c, struct pci_dev *pdev); 138static void __iomem *remap_pci_mem(ulong base, ulong size); 139static int cpqarray_eisa_detect(void); 140static int pollcomplete(int ctlr); 141static void getgeometry(int ctlr); 142static void start_fwbk(int ctlr); 143 144static cmdlist_t * cmd_alloc(ctlr_info_t *h, int get_from_pool); 145static void cmd_free(ctlr_info_t *h, cmdlist_t *c, int got_from_pool); 146 147static void free_hba(int i); 148static int alloc_cpqarray_hba(void); 149 150static int sendcmd( 151 __u8 cmd, 152 int ctlr, 153 void *buff, 154 size_t size, 155 unsigned int blk, 156 unsigned int blkcnt, 157 unsigned int log_unit ); 158 159static int ida_open(struct block_device *bdev, fmode_t mode); 160static int ida_release(struct gendisk *disk, fmode_t mode); 161static int ida_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg); 162static int ida_getgeo(struct block_device *bdev, struct hd_geometry *geo); 163static int ida_ctlr_ioctl(ctlr_info_t *h, int dsk, ida_ioctl_t *io); 164 165static void do_ida_request(struct request_queue *q); 166static void start_io(ctlr_info_t *h); 167 168static inline void addQ(cmdlist_t **Qptr, cmdlist_t *c); 169static inline cmdlist_t *removeQ(cmdlist_t **Qptr, cmdlist_t *c); 170static inline void complete_command(cmdlist_t *cmd, int timeout); 171 172static irqreturn_t do_ida_intr(int irq, void *dev_id); 173static void ida_timer(unsigned long tdata); 174static int ida_revalidate(struct gendisk *disk); 175static int revalidate_allvol(ctlr_info_t *host); 176static int cpqarray_register_ctlr(int ctlr, struct pci_dev *pdev); 177 178#ifdef CONFIG_PROC_FS 179static void ida_procinit(int i); 180static int ida_proc_get_info(char *buffer, char **start, off_t offset, int length, int *eof, void *data); 181#else 182static void ida_procinit(int i) {} 183#endif 184 185static inline drv_info_t *get_drv(struct gendisk *disk) 186{ 187 return disk->private_data; 188} 189 190static inline ctlr_info_t *get_host(struct gendisk *disk) 191{ 192 return disk->queue->queuedata; 193} 194 195 196static struct block_device_operations ida_fops = { 197 .owner = THIS_MODULE, 198 .open = ida_open, 199 .release = ida_release, 200 .locked_ioctl = ida_ioctl, 201 .getgeo = ida_getgeo, 202 .revalidate_disk= ida_revalidate, 203}; 204 205 206#ifdef CONFIG_PROC_FS 207 208static struct proc_dir_entry *proc_array; 209 210/* 211 * Get us a file in /proc/array that says something about each controller. 212 * Create /proc/array if it doesn't exist yet. 213 */ 214static void __init ida_procinit(int i) 215{ 216 if (proc_array == NULL) { 217 proc_array = proc_mkdir("driver/cpqarray", NULL); 218 if (!proc_array) return; 219 } 220 221 create_proc_read_entry(hba[i]->devname, 0, proc_array, 222 ida_proc_get_info, hba[i]); 223} 224 225/* 226 * Report information about this controller. 227 */ 228static int ida_proc_get_info(char *buffer, char **start, off_t offset, int length, int *eof, void *data) 229{ 230 off_t pos = 0; 231 off_t len = 0; 232 int size, i, ctlr; 233 ctlr_info_t *h = (ctlr_info_t*)data; 234 drv_info_t *drv; 235#ifdef CPQ_PROC_PRINT_QUEUES 236 cmdlist_t *c; 237 unsigned long flags; 238#endif 239 240 ctlr = h->ctlr; 241 size = sprintf(buffer, "%s: Compaq %s Controller\n" 242 " Board ID: 0x%08lx\n" 243 " Firmware Revision: %c%c%c%c\n" 244 " Controller Sig: 0x%08lx\n" 245 " Memory Address: 0x%08lx\n" 246 " I/O Port: 0x%04x\n" 247 " IRQ: %d\n" 248 " Logical drives: %d\n" 249 " Physical drives: %d\n\n" 250 " Current Q depth: %d\n" 251 " Max Q depth since init: %d\n\n", 252 h->devname, 253 h->product_name, 254 (unsigned long)h->board_id, 255 h->firm_rev[0], h->firm_rev[1], h->firm_rev[2], h->firm_rev[3], 256 (unsigned long)h->ctlr_sig, (unsigned long)h->vaddr, 257 (unsigned int) h->io_mem_addr, (unsigned int)h->intr, 258 h->log_drives, h->phys_drives, 259 h->Qdepth, h->maxQsinceinit); 260 261 pos += size; len += size; 262 263 size = sprintf(buffer+len, "Logical Drive Info:\n"); 264 pos += size; len += size; 265 266 for(i=0; i<h->log_drives; i++) { 267 drv = &h->drv[i]; 268 size = sprintf(buffer+len, "ida/c%dd%d: blksz=%d nr_blks=%d\n", 269 ctlr, i, drv->blk_size, drv->nr_blks); 270 pos += size; len += size; 271 } 272 273#ifdef CPQ_PROC_PRINT_QUEUES 274 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags); 275 size = sprintf(buffer+len, "\nCurrent Queues:\n"); 276 pos += size; len += size; 277 278 c = h->reqQ; 279 size = sprintf(buffer+len, "reqQ = %p", c); pos += size; len += size; 280 if (c) c=c->next; 281 while(c && c != h->reqQ) { 282 size = sprintf(buffer+len, "->%p", c); 283 pos += size; len += size; 284 c=c->next; 285 } 286 287 c = h->cmpQ; 288 size = sprintf(buffer+len, "\ncmpQ = %p", c); pos += size; len += size; 289 if (c) c=c->next; 290 while(c && c != h->cmpQ) { 291 size = sprintf(buffer+len, "->%p", c); 292 pos += size; len += size; 293 c=c->next; 294 } 295 296 size = sprintf(buffer+len, "\n"); pos += size; len += size; 297 spin_unlock_irqrestore(IDA_LOCK(h->ctlr), flags); 298#endif 299 size = sprintf(buffer+len, "nr_allocs = %d\nnr_frees = %d\n", 300 h->nr_allocs, h->nr_frees); 301 pos += size; len += size; 302 303 *eof = 1; 304 *start = buffer+offset; 305 len -= offset; 306 if (len>length) 307 len = length; 308 return len; 309} 310#endif /* CONFIG_PROC_FS */ 311 312module_param_array(eisa, int, NULL, 0); 313 314static void release_io_mem(ctlr_info_t *c) 315{ 316 /* if IO mem was not protected do nothing */ 317 if( c->io_mem_addr == 0) 318 return; 319 release_region(c->io_mem_addr, c->io_mem_length); 320 c->io_mem_addr = 0; 321 c->io_mem_length = 0; 322} 323 324static void __devexit cpqarray_remove_one(int i) 325{ 326 int j; 327 char buff[4]; 328 329 /* sendcmd will turn off interrupt, and send the flush... 330 * To write all data in the battery backed cache to disks 331 * no data returned, but don't want to send NULL to sendcmd */ 332 if( sendcmd(FLUSH_CACHE, i, buff, 4, 0, 0, 0)) 333 { 334 printk(KERN_WARNING "Unable to flush cache on controller %d\n", 335 i); 336 } 337 free_irq(hba[i]->intr, hba[i]); 338 iounmap(hba[i]->vaddr); 339 unregister_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname); 340 del_timer(&hba[i]->timer); 341 remove_proc_entry(hba[i]->devname, proc_array); 342 pci_free_consistent(hba[i]->pci_dev, 343 NR_CMDS * sizeof(cmdlist_t), (hba[i]->cmd_pool), 344 hba[i]->cmd_pool_dhandle); 345 kfree(hba[i]->cmd_pool_bits); 346 for(j = 0; j < NWD; j++) { 347 if (ida_gendisk[i][j]->flags & GENHD_FL_UP) 348 del_gendisk(ida_gendisk[i][j]); 349 put_disk(ida_gendisk[i][j]); 350 } 351 blk_cleanup_queue(hba[i]->queue); 352 release_io_mem(hba[i]); 353 free_hba(i); 354} 355 356static void __devexit cpqarray_remove_one_pci (struct pci_dev *pdev) 357{ 358 int i; 359 ctlr_info_t *tmp_ptr; 360 361 if (pci_get_drvdata(pdev) == NULL) { 362 printk( KERN_ERR "cpqarray: Unable to remove device \n"); 363 return; 364 } 365 366 tmp_ptr = pci_get_drvdata(pdev); 367 i = tmp_ptr->ctlr; 368 if (hba[i] == NULL) { 369 printk(KERN_ERR "cpqarray: controller %d appears to have" 370 "already been removed \n", i); 371 return; 372 } 373 pci_set_drvdata(pdev, NULL); 374 375 cpqarray_remove_one(i); 376} 377 378/* removing an instance that was not removed automatically.. 379 * must be an eisa card. 380 */ 381static void __devexit cpqarray_remove_one_eisa (int i) 382{ 383 if (hba[i] == NULL) { 384 printk(KERN_ERR "cpqarray: controller %d appears to have" 385 "already been removed \n", i); 386 return; 387 } 388 cpqarray_remove_one(i); 389} 390 391/* pdev is NULL for eisa */ 392static int __init cpqarray_register_ctlr( int i, struct pci_dev *pdev) 393{ 394 struct request_queue *q; 395 int j; 396 397 /* 398 * register block devices 399 * Find disks and fill in structs 400 * Get an interrupt, set the Q depth and get into /proc 401 */ 402 403 /* If this successful it should insure that we are the only */ 404 /* instance of the driver */ 405 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) { 406 goto Enomem4; 407 } 408 hba[i]->access.set_intr_mask(hba[i], 0); 409 if (request_irq(hba[i]->intr, do_ida_intr, 410 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i])) 411 { 412 printk(KERN_ERR "cpqarray: Unable to get irq %d for %s\n", 413 hba[i]->intr, hba[i]->devname); 414 goto Enomem3; 415 } 416 417 for (j=0; j<NWD; j++) { 418 ida_gendisk[i][j] = alloc_disk(1 << NWD_SHIFT); 419 if (!ida_gendisk[i][j]) 420 goto Enomem2; 421 } 422 423 hba[i]->cmd_pool = pci_alloc_consistent( 424 hba[i]->pci_dev, NR_CMDS * sizeof(cmdlist_t), 425 &(hba[i]->cmd_pool_dhandle)); 426 hba[i]->cmd_pool_bits = kcalloc( 427 DIV_ROUND_UP(NR_CMDS, BITS_PER_LONG), sizeof(unsigned long), 428 GFP_KERNEL); 429 430 if (!hba[i]->cmd_pool_bits || !hba[i]->cmd_pool) 431 goto Enomem1; 432 433 memset(hba[i]->cmd_pool, 0, NR_CMDS * sizeof(cmdlist_t)); 434 printk(KERN_INFO "cpqarray: Finding drives on %s", 435 hba[i]->devname); 436 437 spin_lock_init(&hba[i]->lock); 438 q = blk_init_queue(do_ida_request, &hba[i]->lock); 439 if (!q) 440 goto Enomem1; 441 442 hba[i]->queue = q; 443 q->queuedata = hba[i]; 444 445 getgeometry(i); 446 start_fwbk(i); 447 448 ida_procinit(i); 449 450 if (pdev) 451 blk_queue_bounce_limit(q, hba[i]->pci_dev->dma_mask); 452 453 /* This is a hardware imposed limit. */ 454 blk_queue_max_hw_segments(q, SG_MAX); 455 456 /* This is a driver limit and could be eliminated. */ 457 blk_queue_max_phys_segments(q, SG_MAX); 458 459 init_timer(&hba[i]->timer); 460 hba[i]->timer.expires = jiffies + IDA_TIMER; 461 hba[i]->timer.data = (unsigned long)hba[i]; 462 hba[i]->timer.function = ida_timer; 463 add_timer(&hba[i]->timer); 464 465 /* Enable IRQ now that spinlock and rate limit timer are set up */ 466 hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY); 467 468 for(j=0; j<NWD; j++) { 469 struct gendisk *disk = ida_gendisk[i][j]; 470 drv_info_t *drv = &hba[i]->drv[j]; 471 sprintf(disk->disk_name, "ida/c%dd%d", i, j); 472 disk->major = COMPAQ_SMART2_MAJOR + i; 473 disk->first_minor = j<<NWD_SHIFT; 474 disk->fops = &ida_fops; 475 if (j && !drv->nr_blks) 476 continue; 477 blk_queue_hardsect_size(hba[i]->queue, drv->blk_size); 478 set_capacity(disk, drv->nr_blks); 479 disk->queue = hba[i]->queue; 480 disk->private_data = drv; 481 add_disk(disk); 482 } 483 484 /* done ! */ 485 return(i); 486 487Enomem1: 488 nr_ctlr = i; 489 kfree(hba[i]->cmd_pool_bits); 490 if (hba[i]->cmd_pool) 491 pci_free_consistent(hba[i]->pci_dev, NR_CMDS*sizeof(cmdlist_t), 492 hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle); 493Enomem2: 494 while (j--) { 495 put_disk(ida_gendisk[i][j]); 496 ida_gendisk[i][j] = NULL; 497 } 498 free_irq(hba[i]->intr, hba[i]); 499Enomem3: 500 unregister_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname); 501Enomem4: 502 if (pdev) 503 pci_set_drvdata(pdev, NULL); 504 release_io_mem(hba[i]); 505 free_hba(i); 506 507 printk( KERN_ERR "cpqarray: out of memory"); 508 509 return -1; 510} 511 512static int __init cpqarray_init_one( struct pci_dev *pdev, 513 const struct pci_device_id *ent) 514{ 515 int i; 516 517 printk(KERN_DEBUG "cpqarray: Device 0x%x has been found at" 518 " bus %d dev %d func %d\n", 519 pdev->device, pdev->bus->number, PCI_SLOT(pdev->devfn), 520 PCI_FUNC(pdev->devfn)); 521 i = alloc_cpqarray_hba(); 522 if( i < 0 ) 523 return (-1); 524 memset(hba[i], 0, sizeof(ctlr_info_t)); 525 sprintf(hba[i]->devname, "ida%d", i); 526 hba[i]->ctlr = i; 527 /* Initialize the pdev driver private data */ 528 pci_set_drvdata(pdev, hba[i]); 529 530 if (cpqarray_pci_init(hba[i], pdev) != 0) { 531 pci_set_drvdata(pdev, NULL); 532 release_io_mem(hba[i]); 533 free_hba(i); 534 return -1; 535 } 536 537 return (cpqarray_register_ctlr(i, pdev)); 538} 539 540static struct pci_driver cpqarray_pci_driver = { 541 .name = "cpqarray", 542 .probe = cpqarray_init_one, 543 .remove = __devexit_p(cpqarray_remove_one_pci), 544 .id_table = cpqarray_pci_device_id, 545}; 546 547/* 548 * This is it. Find all the controllers and register them. 549 * returns the number of block devices registered. 550 */ 551static int __init cpqarray_init(void) 552{ 553 int num_cntlrs_reg = 0; 554 int i; 555 int rc = 0; 556 557 /* detect controllers */ 558 printk(DRIVER_NAME "\n"); 559 560 rc = pci_register_driver(&cpqarray_pci_driver); 561 if (rc) 562 return rc; 563 cpqarray_eisa_detect(); 564 565 for (i=0; i < MAX_CTLR; i++) { 566 if (hba[i] != NULL) 567 num_cntlrs_reg++; 568 } 569 570 if (num_cntlrs_reg) 571 return 0; 572 else { 573 pci_unregister_driver(&cpqarray_pci_driver); 574 return -ENODEV; 575 } 576} 577 578/* Function to find the first free pointer into our hba[] array */ 579/* Returns -1 if no free entries are left. */ 580static int alloc_cpqarray_hba(void) 581{ 582 int i; 583 584 for(i=0; i< MAX_CTLR; i++) { 585 if (hba[i] == NULL) { 586 hba[i] = kmalloc(sizeof(ctlr_info_t), GFP_KERNEL); 587 if(hba[i]==NULL) { 588 printk(KERN_ERR "cpqarray: out of memory.\n"); 589 return (-1); 590 } 591 return (i); 592 } 593 } 594 printk(KERN_WARNING "cpqarray: This driver supports a maximum" 595 " of 8 controllers.\n"); 596 return(-1); 597} 598 599static void free_hba(int i) 600{ 601 kfree(hba[i]); 602 hba[i]=NULL; 603} 604 605/* 606 * Find the IO address of the controller, its IRQ and so forth. Fill 607 * in some basic stuff into the ctlr_info_t structure. 608 */ 609static int cpqarray_pci_init(ctlr_info_t *c, struct pci_dev *pdev) 610{ 611 ushort vendor_id, device_id, command; 612 unchar cache_line_size, latency_timer; 613 unchar irq, revision; 614 unsigned long addr[6]; 615 __u32 board_id; 616 617 int i; 618 619 c->pci_dev = pdev; 620 pci_set_master(pdev); 621 if (pci_enable_device(pdev)) { 622 printk(KERN_ERR "cpqarray: Unable to Enable PCI device\n"); 623 return -1; 624 } 625 vendor_id = pdev->vendor; 626 device_id = pdev->device; 627 irq = pdev->irq; 628 629 for(i=0; i<6; i++) 630 addr[i] = pci_resource_start(pdev, i); 631 632 if (pci_set_dma_mask(pdev, CPQARRAY_DMA_MASK) != 0) 633 { 634 printk(KERN_ERR "cpqarray: Unable to set DMA mask\n"); 635 return -1; 636 } 637 638 pci_read_config_word(pdev, PCI_COMMAND, &command); 639 pci_read_config_byte(pdev, PCI_CLASS_REVISION, &revision); 640 pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &cache_line_size); 641 pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &latency_timer); 642 643 pci_read_config_dword(pdev, 0x2c, &board_id); 644 645 /* check to see if controller has been disabled */ 646 if(!(command & 0x02)) { 647 printk(KERN_WARNING 648 "cpqarray: controller appears to be disabled\n"); 649 return(-1); 650 } 651 652DBGINFO( 653 printk("vendor_id = %x\n", vendor_id); 654 printk("device_id = %x\n", device_id); 655 printk("command = %x\n", command); 656 for(i=0; i<6; i++) 657 printk("addr[%d] = %lx\n", i, addr[i]); 658 printk("revision = %x\n", revision); 659 printk("irq = %x\n", irq); 660 printk("cache_line_size = %x\n", cache_line_size); 661 printk("latency_timer = %x\n", latency_timer); 662 printk("board_id = %x\n", board_id); 663); 664 665 c->intr = irq; 666 667 for(i=0; i<6; i++) { 668 if (pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE_IO) 669 { /* IO space */ 670 c->io_mem_addr = addr[i]; 671 c->io_mem_length = pci_resource_end(pdev, i) 672 - pci_resource_start(pdev, i) + 1; 673 if(!request_region( c->io_mem_addr, c->io_mem_length, 674 "cpqarray")) 675 { 676 printk( KERN_WARNING "cpqarray I/O memory range already in use addr %lx length = %ld\n", c->io_mem_addr, c->io_mem_length); 677 c->io_mem_addr = 0; 678 c->io_mem_length = 0; 679 } 680 break; 681 } 682 } 683 684 c->paddr = 0; 685 for(i=0; i<6; i++) 686 if (!(pci_resource_flags(pdev, i) & 687 PCI_BASE_ADDRESS_SPACE_IO)) { 688 c->paddr = pci_resource_start (pdev, i); 689 break; 690 } 691 if (!c->paddr) 692 return -1; 693 c->vaddr = remap_pci_mem(c->paddr, 128); 694 if (!c->vaddr) 695 return -1; 696 c->board_id = board_id; 697 698 for(i=0; i<NR_PRODUCTS; i++) { 699 if (board_id == products[i].board_id) { 700 c->product_name = products[i].product_name; 701 c->access = *(products[i].access); 702 break; 703 } 704 } 705 if (i == NR_PRODUCTS) { 706 printk(KERN_WARNING "cpqarray: Sorry, I don't know how" 707 " to access the SMART Array controller %08lx\n", 708 (unsigned long)board_id); 709 return -1; 710 } 711 712 return 0; 713} 714 715/* 716 * Map (physical) PCI mem into (virtual) kernel space 717 */ 718static void __iomem *remap_pci_mem(ulong base, ulong size) 719{ 720 ulong page_base = ((ulong) base) & PAGE_MASK; 721 ulong page_offs = ((ulong) base) - page_base; 722 void __iomem *page_remapped = ioremap(page_base, page_offs+size); 723 724 return (page_remapped ? (page_remapped + page_offs) : NULL); 725} 726 727#ifndef MODULE 728/* 729 * Config string is a comma separated set of i/o addresses of EISA cards. 730 */ 731static int cpqarray_setup(char *str) 732{ 733 int i, ints[9]; 734 735 (void)get_options(str, ARRAY_SIZE(ints), ints); 736 737 for(i=0; i<ints[0] && i<8; i++) 738 eisa[i] = ints[i+1]; 739 return 1; 740} 741 742__setup("smart2=", cpqarray_setup); 743 744#endif 745 746/* 747 * Find an EISA controller's signature. Set up an hba if we find it. 748 */ 749static int __init cpqarray_eisa_detect(void) 750{ 751 int i=0, j; 752 __u32 board_id; 753 int intr; 754 int ctlr; 755 int num_ctlr = 0; 756 757 while(i<8 && eisa[i]) { 758 ctlr = alloc_cpqarray_hba(); 759 if(ctlr == -1) 760 break; 761 board_id = inl(eisa[i]+0xC80); 762 for(j=0; j < NR_PRODUCTS; j++) 763 if (board_id == products[j].board_id) 764 break; 765 766 if (j == NR_PRODUCTS) { 767 printk(KERN_WARNING "cpqarray: Sorry, I don't know how" 768 " to access the SMART Array controller %08lx\n", (unsigned long)board_id); 769 continue; 770 } 771 772 memset(hba[ctlr], 0, sizeof(ctlr_info_t)); 773 hba[ctlr]->io_mem_addr = eisa[i]; 774 hba[ctlr]->io_mem_length = 0x7FF; 775 if(!request_region(hba[ctlr]->io_mem_addr, 776 hba[ctlr]->io_mem_length, 777 "cpqarray")) 778 { 779 printk(KERN_WARNING "cpqarray: I/O range already in " 780 "use addr = %lx length = %ld\n", 781 hba[ctlr]->io_mem_addr, 782 hba[ctlr]->io_mem_length); 783 free_hba(ctlr); 784 continue; 785 } 786 787 /* 788 * Read the config register to find our interrupt 789 */ 790 intr = inb(eisa[i]+0xCC0) >> 4; 791 if (intr & 1) intr = 11; 792 else if (intr & 2) intr = 10; 793 else if (intr & 4) intr = 14; 794 else if (intr & 8) intr = 15; 795 796 hba[ctlr]->intr = intr; 797 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr); 798 hba[ctlr]->product_name = products[j].product_name; 799 hba[ctlr]->access = *(products[j].access); 800 hba[ctlr]->ctlr = ctlr; 801 hba[ctlr]->board_id = board_id; 802 hba[ctlr]->pci_dev = NULL; /* not PCI */ 803 804DBGINFO( 805 printk("i = %d, j = %d\n", i, j); 806 printk("irq = %x\n", intr); 807 printk("product name = %s\n", products[j].product_name); 808 printk("board_id = %x\n", board_id); 809); 810 811 num_ctlr++; 812 i++; 813 814 if (cpqarray_register_ctlr(ctlr, NULL) == -1) 815 printk(KERN_WARNING 816 "cpqarray: Can't register EISA controller %d\n", 817 ctlr); 818 819 } 820 821 return num_ctlr; 822} 823 824/* 825 * Open. Make sure the device is really there. 826 */ 827static int ida_open(struct block_device *bdev, fmode_t mode) 828{ 829 drv_info_t *drv = get_drv(bdev->bd_disk); 830 ctlr_info_t *host = get_host(bdev->bd_disk); 831 832 DBGINFO(printk("ida_open %s\n", bdev->bd_disk->disk_name)); 833 /* 834 * Root is allowed to open raw volume zero even if it's not configured 835 * so array config can still work. I don't think I really like this, 836 * but I'm already using way to many device nodes to claim another one 837 * for "raw controller". 838 */ 839 if (!drv->nr_blks) { 840 if (!capable(CAP_SYS_RAWIO)) 841 return -ENXIO; 842 if (!capable(CAP_SYS_ADMIN) && drv != host->drv) 843 return -ENXIO; 844 } 845 host->usage_count++; 846 return 0; 847} 848 849/* 850 * Close. Sync first. 851 */ 852static int ida_release(struct gendisk *disk, fmode_t mode) 853{ 854 ctlr_info_t *host = get_host(disk); 855 host->usage_count--; 856 return 0; 857} 858 859/* 860 * Enqueuing and dequeuing functions for cmdlists. 861 */ 862static inline void addQ(cmdlist_t **Qptr, cmdlist_t *c) 863{ 864 if (*Qptr == NULL) { 865 *Qptr = c; 866 c->next = c->prev = c; 867 } else { 868 c->prev = (*Qptr)->prev; 869 c->next = (*Qptr); 870 (*Qptr)->prev->next = c; 871 (*Qptr)->prev = c; 872 } 873} 874 875static inline cmdlist_t *removeQ(cmdlist_t **Qptr, cmdlist_t *c) 876{ 877 if (c && c->next != c) { 878 if (*Qptr == c) *Qptr = c->next; 879 c->prev->next = c->next; 880 c->next->prev = c->prev; 881 } else { 882 *Qptr = NULL; 883 } 884 return c; 885} 886 887/* 888 * Get a request and submit it to the controller. 889 * This routine needs to grab all the requests it possibly can from the 890 * req Q and submit them. Interrupts are off (and need to be off) when you 891 * are in here (either via the dummy do_ida_request functions or by being 892 * called from the interrupt handler 893 */ 894static void do_ida_request(struct request_queue *q) 895{ 896 ctlr_info_t *h = q->queuedata; 897 cmdlist_t *c; 898 struct request *creq; 899 struct scatterlist tmp_sg[SG_MAX]; 900 int i, dir, seg; 901 902 if (blk_queue_plugged(q)) 903 goto startio; 904 905queue_next: 906 creq = elv_next_request(q); 907 if (!creq) 908 goto startio; 909 910 BUG_ON(creq->nr_phys_segments > SG_MAX); 911 912 if ((c = cmd_alloc(h,1)) == NULL) 913 goto startio; 914 915 blkdev_dequeue_request(creq); 916 917 c->ctlr = h->ctlr; 918 c->hdr.unit = (drv_info_t *)(creq->rq_disk->private_data) - h->drv; 919 c->hdr.size = sizeof(rblk_t) >> 2; 920 c->size += sizeof(rblk_t); 921 922 c->req.hdr.blk = creq->sector; 923 c->rq = creq; 924DBGPX( 925 printk("sector=%d, nr_sectors=%d\n", creq->sector, creq->nr_sectors); 926); 927 sg_init_table(tmp_sg, SG_MAX); 928 seg = blk_rq_map_sg(q, creq, tmp_sg); 929 930 /* Now do all the DMA Mappings */ 931 if (rq_data_dir(creq) == READ) 932 dir = PCI_DMA_FROMDEVICE; 933 else 934 dir = PCI_DMA_TODEVICE; 935 for( i=0; i < seg; i++) 936 { 937 c->req.sg[i].size = tmp_sg[i].length; 938 c->req.sg[i].addr = (__u32) pci_map_page(h->pci_dev, 939 sg_page(&tmp_sg[i]), 940 tmp_sg[i].offset, 941 tmp_sg[i].length, dir); 942 } 943DBGPX( printk("Submitting %d sectors in %d segments\n", creq->nr_sectors, seg); ); 944 c->req.hdr.sg_cnt = seg; 945 c->req.hdr.blk_cnt = creq->nr_sectors; 946 c->req.hdr.cmd = (rq_data_dir(creq) == READ) ? IDA_READ : IDA_WRITE; 947 c->type = CMD_RWREQ; 948 949 /* Put the request on the tail of the request queue */ 950 addQ(&h->reqQ, c); 951 h->Qdepth++; 952 if (h->Qdepth > h->maxQsinceinit) 953 h->maxQsinceinit = h->Qdepth; 954 955 goto queue_next; 956 957startio: 958 start_io(h); 959} 960 961/* 962 * start_io submits everything on a controller's request queue 963 * and moves it to the completion queue. 964 * 965 * Interrupts had better be off if you're in here 966 */ 967static void start_io(ctlr_info_t *h) 968{ 969 cmdlist_t *c; 970 971 while((c = h->reqQ) != NULL) { 972 /* Can't do anything if we're busy */ 973 if (h->access.fifo_full(h) == 0) 974 return; 975 976 /* Get the first entry from the request Q */ 977 removeQ(&h->reqQ, c); 978 h->Qdepth--; 979 980 /* Tell the controller to do our bidding */ 981 h->access.submit_command(h, c); 982 983 /* Get onto the completion Q */ 984 addQ(&h->cmpQ, c); 985 } 986} 987 988/* 989 * Mark all buffers that cmd was responsible for 990 */ 991static inline void complete_command(cmdlist_t *cmd, int timeout) 992{ 993 struct request *rq = cmd->rq; 994 int error = 0; 995 int i, ddir; 996 997 if (cmd->req.hdr.rcode & RCODE_NONFATAL && 998 (hba[cmd->ctlr]->misc_tflags & MISC_NONFATAL_WARN) == 0) { 999 printk(KERN_NOTICE "Non Fatal error on ida/c%dd%d\n", 1000 cmd->ctlr, cmd->hdr.unit); 1001 hba[cmd->ctlr]->misc_tflags |= MISC_NONFATAL_WARN; 1002 } 1003 if (cmd->req.hdr.rcode & RCODE_FATAL) { 1004 printk(KERN_WARNING "Fatal error on ida/c%dd%d\n", 1005 cmd->ctlr, cmd->hdr.unit); 1006 error = -EIO; 1007 } 1008 if (cmd->req.hdr.rcode & RCODE_INVREQ) { 1009 printk(KERN_WARNING "Invalid request on ida/c%dd%d = (cmd=%x sect=%d cnt=%d sg=%d ret=%x)\n", 1010 cmd->ctlr, cmd->hdr.unit, cmd->req.hdr.cmd, 1011 cmd->req.hdr.blk, cmd->req.hdr.blk_cnt, 1012 cmd->req.hdr.sg_cnt, cmd->req.hdr.rcode); 1013 error = -EIO; 1014 } 1015 if (timeout) 1016 error = -EIO; 1017 /* unmap the DMA mapping for all the scatter gather elements */ 1018 if (cmd->req.hdr.cmd == IDA_READ) 1019 ddir = PCI_DMA_FROMDEVICE; 1020 else 1021 ddir = PCI_DMA_TODEVICE; 1022 for(i=0; i<cmd->req.hdr.sg_cnt; i++) 1023 pci_unmap_page(hba[cmd->ctlr]->pci_dev, cmd->req.sg[i].addr, 1024 cmd->req.sg[i].size, ddir); 1025 1026 DBGPX(printk("Done with %p\n", rq);); 1027 if (__blk_end_request(rq, error, blk_rq_bytes(rq))) 1028 BUG(); 1029} 1030 1031/* 1032 * The controller will interrupt us upon completion of commands. 1033 * Find the command on the completion queue, remove it, tell the OS and 1034 * try to queue up more IO 1035 */ 1036static irqreturn_t do_ida_intr(int irq, void *dev_id) 1037{ 1038 ctlr_info_t *h = dev_id; 1039 cmdlist_t *c; 1040 unsigned long istat; 1041 unsigned long flags; 1042 __u32 a,a1; 1043 1044 istat = h->access.intr_pending(h); 1045 /* Is this interrupt for us? */ 1046 if (istat == 0) 1047 return IRQ_NONE; 1048 1049 /* 1050 * If there are completed commands in the completion queue, 1051 * we had better do something about it. 1052 */ 1053 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags); 1054 if (istat & FIFO_NOT_EMPTY) { 1055 while((a = h->access.command_completed(h))) { 1056 a1 = a; a &= ~3; 1057 if ((c = h->cmpQ) == NULL) 1058 { 1059 printk(KERN_WARNING "cpqarray: Completion of %08lx ignored\n", (unsigned long)a1); 1060 continue; 1061 } 1062 while(c->busaddr != a) { 1063 c = c->next; 1064 if (c == h->cmpQ) 1065 break; 1066 } 1067 /* 1068 * If we've found the command, take it off the 1069 * completion Q and free it 1070 */ 1071 if (c->busaddr == a) { 1072 removeQ(&h->cmpQ, c); 1073 /* Check for invalid command. 1074 * Controller returns command error, 1075 * But rcode = 0. 1076 */ 1077 1078 if((a1 & 0x03) && (c->req.hdr.rcode == 0)) 1079 { 1080 c->req.hdr.rcode = RCODE_INVREQ; 1081 } 1082 if (c->type == CMD_RWREQ) { 1083 complete_command(c, 0); 1084 cmd_free(h, c, 1); 1085 } else if (c->type == CMD_IOCTL_PEND) { 1086 c->type = CMD_IOCTL_DONE; 1087 } 1088 continue; 1089 } 1090 } 1091 } 1092 1093 /* 1094 * See if we can queue up some more IO 1095 */ 1096 do_ida_request(h->queue); 1097 spin_unlock_irqrestore(IDA_LOCK(h->ctlr), flags); 1098 return IRQ_HANDLED; 1099} 1100 1101/* 1102 * This timer was for timing out requests that haven't happened after 1103 * IDA_TIMEOUT. That wasn't such a good idea. This timer is used to 1104 * reset a flags structure so we don't flood the user with 1105 * "Non-Fatal error" messages. 1106 */ 1107static void ida_timer(unsigned long tdata) 1108{ 1109 ctlr_info_t *h = (ctlr_info_t*)tdata; 1110 1111 h->timer.expires = jiffies + IDA_TIMER; 1112 add_timer(&h->timer); 1113 h->misc_tflags = 0; 1114} 1115 1116static int ida_getgeo(struct block_device *bdev, struct hd_geometry *geo) 1117{ 1118 drv_info_t *drv = get_drv(bdev->bd_disk); 1119 1120 if (drv->cylinders) { 1121 geo->heads = drv->heads; 1122 geo->sectors = drv->sectors; 1123 geo->cylinders = drv->cylinders; 1124 } else { 1125 geo->heads = 0xff; 1126 geo->sectors = 0x3f; 1127 geo->cylinders = drv->nr_blks / (0xff*0x3f); 1128 } 1129 1130 return 0; 1131} 1132 1133/* 1134 * ida_ioctl does some miscellaneous stuff like reporting drive geometry, 1135 * setting readahead and submitting commands from userspace to the controller. 1136 */ 1137static int ida_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg) 1138{ 1139 drv_info_t *drv = get_drv(bdev->bd_disk); 1140 ctlr_info_t *host = get_host(bdev->bd_disk); 1141 int error; 1142 ida_ioctl_t __user *io = (ida_ioctl_t __user *)arg; 1143 ida_ioctl_t *my_io; 1144 1145 switch(cmd) { 1146 case IDAGETDRVINFO: 1147 if (copy_to_user(&io->c.drv, drv, sizeof(drv_info_t))) 1148 return -EFAULT; 1149 return 0; 1150 case IDAPASSTHRU: 1151 if (!capable(CAP_SYS_RAWIO)) 1152 return -EPERM; 1153 my_io = kmalloc(sizeof(ida_ioctl_t), GFP_KERNEL); 1154 if (!my_io) 1155 return -ENOMEM; 1156 error = -EFAULT; 1157 if (copy_from_user(my_io, io, sizeof(*my_io))) 1158 goto out_passthru; 1159 error = ida_ctlr_ioctl(host, drv - host->drv, my_io); 1160 if (error) 1161 goto out_passthru; 1162 error = -EFAULT; 1163 if (copy_to_user(io, my_io, sizeof(*my_io))) 1164 goto out_passthru; 1165 error = 0; 1166out_passthru: 1167 kfree(my_io); 1168 return error; 1169 case IDAGETCTLRSIG: 1170 if (!arg) return -EINVAL; 1171 put_user(host->ctlr_sig, (int __user *)arg); 1172 return 0; 1173 case IDAREVALIDATEVOLS: 1174 if (MINOR(bdev->bd_dev) != 0) 1175 return -ENXIO; 1176 return revalidate_allvol(host); 1177 case IDADRIVERVERSION: 1178 if (!arg) return -EINVAL; 1179 put_user(DRIVER_VERSION, (unsigned long __user *)arg); 1180 return 0; 1181 case IDAGETPCIINFO: 1182 { 1183 1184 ida_pci_info_struct pciinfo; 1185 1186 if (!arg) return -EINVAL; 1187 pciinfo.bus = host->pci_dev->bus->number; 1188 pciinfo.dev_fn = host->pci_dev->devfn; 1189 pciinfo.board_id = host->board_id; 1190 if(copy_to_user((void __user *) arg, &pciinfo, 1191 sizeof( ida_pci_info_struct))) 1192 return -EFAULT; 1193 return(0); 1194 } 1195 1196 default: 1197 return -EINVAL; 1198 } 1199 1200} 1201/* 1202 * ida_ctlr_ioctl is for passing commands to the controller from userspace. 1203 * The command block (io) has already been copied to kernel space for us, 1204 * however, any elements in the sglist need to be copied to kernel space 1205 * or copied back to userspace. 1206 * 1207 * Only root may perform a controller passthru command, however I'm not doing 1208 * any serious sanity checking on the arguments. Doing an IDA_WRITE_MEDIA and 1209 * putting a 64M buffer in the sglist is probably a *bad* idea. 1210 */ 1211static int ida_ctlr_ioctl(ctlr_info_t *h, int dsk, ida_ioctl_t *io) 1212{ 1213 int ctlr = h->ctlr; 1214 cmdlist_t *c; 1215 void *p = NULL; 1216 unsigned long flags; 1217 int error; 1218 1219 if ((c = cmd_alloc(h, 0)) == NULL) 1220 return -ENOMEM; 1221 c->ctlr = ctlr; 1222 c->hdr.unit = (io->unit & UNITVALID) ? (io->unit & ~UNITVALID) : dsk; 1223 c->hdr.size = sizeof(rblk_t) >> 2; 1224 c->size += sizeof(rblk_t); 1225 1226 c->req.hdr.cmd = io->cmd; 1227 c->req.hdr.blk = io->blk; 1228 c->req.hdr.blk_cnt = io->blk_cnt; 1229 c->type = CMD_IOCTL_PEND; 1230 1231 /* Pre submit processing */ 1232 switch(io->cmd) { 1233 case PASSTHRU_A: 1234 p = kmalloc(io->sg[0].size, GFP_KERNEL); 1235 if (!p) 1236 { 1237 error = -ENOMEM; 1238 cmd_free(h, c, 0); 1239 return(error); 1240 } 1241 if (copy_from_user(p, io->sg[0].addr, io->sg[0].size)) { 1242 kfree(p); 1243 cmd_free(h, c, 0); 1244 return -EFAULT; 1245 } 1246 c->req.hdr.blk = pci_map_single(h->pci_dev, &(io->c), 1247 sizeof(ida_ioctl_t), 1248 PCI_DMA_BIDIRECTIONAL); 1249 c->req.sg[0].size = io->sg[0].size; 1250 c->req.sg[0].addr = pci_map_single(h->pci_dev, p, 1251 c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL); 1252 c->req.hdr.sg_cnt = 1; 1253 break; 1254 case IDA_READ: 1255 case READ_FLASH_ROM: 1256 case SENSE_CONTROLLER_PERFORMANCE: 1257 p = kmalloc(io->sg[0].size, GFP_KERNEL); 1258 if (!p) 1259 { 1260 error = -ENOMEM; 1261 cmd_free(h, c, 0); 1262 return(error); 1263 } 1264 1265 c->req.sg[0].size = io->sg[0].size; 1266 c->req.sg[0].addr = pci_map_single(h->pci_dev, p, 1267 c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL); 1268 c->req.hdr.sg_cnt = 1; 1269 break; 1270 case IDA_WRITE: 1271 case IDA_WRITE_MEDIA: 1272 case DIAG_PASS_THRU: 1273 case COLLECT_BUFFER: 1274 case WRITE_FLASH_ROM: 1275 p = kmalloc(io->sg[0].size, GFP_KERNEL); 1276 if (!p) 1277 { 1278 error = -ENOMEM; 1279 cmd_free(h, c, 0); 1280 return(error); 1281 } 1282 if (copy_from_user(p, io->sg[0].addr, io->sg[0].size)) { 1283 kfree(p); 1284 cmd_free(h, c, 0); 1285 return -EFAULT; 1286 } 1287 c->req.sg[0].size = io->sg[0].size; 1288 c->req.sg[0].addr = pci_map_single(h->pci_dev, p, 1289 c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL); 1290 c->req.hdr.sg_cnt = 1; 1291 break; 1292 default: 1293 c->req.sg[0].size = sizeof(io->c); 1294 c->req.sg[0].addr = pci_map_single(h->pci_dev,&io->c, 1295 c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL); 1296 c->req.hdr.sg_cnt = 1; 1297 } 1298 1299 /* Put the request on the tail of the request queue */ 1300 spin_lock_irqsave(IDA_LOCK(ctlr), flags); 1301 addQ(&h->reqQ, c); 1302 h->Qdepth++; 1303 start_io(h); 1304 spin_unlock_irqrestore(IDA_LOCK(ctlr), flags); 1305 1306 /* Wait for completion */ 1307 while(c->type != CMD_IOCTL_DONE) 1308 schedule(); 1309 1310 /* Unmap the DMA */ 1311 pci_unmap_single(h->pci_dev, c->req.sg[0].addr, c->req.sg[0].size, 1312 PCI_DMA_BIDIRECTIONAL); 1313 /* Post submit processing */ 1314 switch(io->cmd) { 1315 case PASSTHRU_A: 1316 pci_unmap_single(h->pci_dev, c->req.hdr.blk, 1317 sizeof(ida_ioctl_t), 1318 PCI_DMA_BIDIRECTIONAL); 1319 case IDA_READ: 1320 case DIAG_PASS_THRU: 1321 case SENSE_CONTROLLER_PERFORMANCE: 1322 case READ_FLASH_ROM: 1323 if (copy_to_user(io->sg[0].addr, p, io->sg[0].size)) { 1324 kfree(p); 1325 return -EFAULT; 1326 } 1327 /* fall through and free p */ 1328 case IDA_WRITE: 1329 case IDA_WRITE_MEDIA: 1330 case COLLECT_BUFFER: 1331 case WRITE_FLASH_ROM: 1332 kfree(p); 1333 break; 1334 default:; 1335 /* Nothing to do */ 1336 } 1337 1338 io->rcode = c->req.hdr.rcode; 1339 cmd_free(h, c, 0); 1340 return(0); 1341} 1342 1343/* 1344 * Commands are pre-allocated in a large block. Here we use a simple bitmap 1345 * scheme to suballocte them to the driver. Operations that are not time 1346 * critical (and can wait for kmalloc and possibly sleep) can pass in NULL 1347 * as the first argument to get a new command. 1348 */ 1349static cmdlist_t * cmd_alloc(ctlr_info_t *h, int get_from_pool) 1350{ 1351 cmdlist_t * c; 1352 int i; 1353 dma_addr_t cmd_dhandle; 1354 1355 if (!get_from_pool) { 1356 c = (cmdlist_t*)pci_alloc_consistent(h->pci_dev, 1357 sizeof(cmdlist_t), &cmd_dhandle); 1358 if(c==NULL) 1359 return NULL; 1360 } else { 1361 do { 1362 i = find_first_zero_bit(h->cmd_pool_bits, NR_CMDS); 1363 if (i == NR_CMDS) 1364 return NULL; 1365 } while(test_and_set_bit(i&(BITS_PER_LONG-1), h->cmd_pool_bits+(i/BITS_PER_LONG)) != 0); 1366 c = h->cmd_pool + i; 1367 cmd_dhandle = h->cmd_pool_dhandle + i*sizeof(cmdlist_t); 1368 h->nr_allocs++; 1369 } 1370 1371 memset(c, 0, sizeof(cmdlist_t)); 1372 c->busaddr = cmd_dhandle; 1373 return c; 1374} 1375 1376static void cmd_free(ctlr_info_t *h, cmdlist_t *c, int got_from_pool) 1377{ 1378 int i; 1379 1380 if (!got_from_pool) { 1381 pci_free_consistent(h->pci_dev, sizeof(cmdlist_t), c, 1382 c->busaddr); 1383 } else { 1384 i = c - h->cmd_pool; 1385 clear_bit(i&(BITS_PER_LONG-1), h->cmd_pool_bits+(i/BITS_PER_LONG)); 1386 h->nr_frees++; 1387 } 1388} 1389 1390/*********************************************************************** 1391 name: sendcmd 1392 Send a command to an IDA using the memory mapped FIFO interface 1393 and wait for it to complete. 1394 This routine should only be called at init time. 1395***********************************************************************/ 1396static int sendcmd( 1397 __u8 cmd, 1398 int ctlr, 1399 void *buff, 1400 size_t size, 1401 unsigned int blk, 1402 unsigned int blkcnt, 1403 unsigned int log_unit ) 1404{ 1405 cmdlist_t *c; 1406 int complete; 1407 unsigned long temp; 1408 unsigned long i; 1409 ctlr_info_t *info_p = hba[ctlr]; 1410 1411 c = cmd_alloc(info_p, 1); 1412 if(!c) 1413 return IO_ERROR; 1414 c->ctlr = ctlr; 1415 c->hdr.unit = log_unit; 1416 c->hdr.prio = 0; 1417 c->hdr.size = sizeof(rblk_t) >> 2; 1418 c->size += sizeof(rblk_t); 1419 1420 /* The request information. */ 1421 c->req.hdr.next = 0; 1422 c->req.hdr.rcode = 0; 1423 c->req.bp = 0; 1424 c->req.hdr.sg_cnt = 1; 1425 c->req.hdr.reserved = 0; 1426 1427 if (size == 0) 1428 c->req.sg[0].size = 512; 1429 else 1430 c->req.sg[0].size = size; 1431 1432 c->req.hdr.blk = blk; 1433 c->req.hdr.blk_cnt = blkcnt; 1434 c->req.hdr.cmd = (unsigned char) cmd; 1435 c->req.sg[0].addr = (__u32) pci_map_single(info_p->pci_dev, 1436 buff, c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL); 1437 /* 1438 * Disable interrupt 1439 */ 1440 info_p->access.set_intr_mask(info_p, 0); 1441 /* Make sure there is room in the command FIFO */ 1442 /* Actually it should be completely empty at this time. */ 1443 for (i = 200000; i > 0; i--) { 1444 temp = info_p->access.fifo_full(info_p); 1445 if (temp != 0) { 1446 break; 1447 } 1448 udelay(10); 1449DBG( 1450 printk(KERN_WARNING "cpqarray ida%d: idaSendPciCmd FIFO full," 1451 " waiting!\n", ctlr); 1452); 1453 } 1454 /* 1455 * Send the cmd 1456 */ 1457 info_p->access.submit_command(info_p, c); 1458 complete = pollcomplete(ctlr); 1459 1460 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr, 1461 c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL); 1462 if (complete != 1) { 1463 if (complete != c->busaddr) { 1464 printk( KERN_WARNING 1465 "cpqarray ida%d: idaSendPciCmd " 1466 "Invalid command list address returned! (%08lx)\n", 1467 ctlr, (unsigned long)complete); 1468 cmd_free(info_p, c, 1); 1469 return (IO_ERROR); 1470 } 1471 } else { 1472 printk( KERN_WARNING 1473 "cpqarray ida%d: idaSendPciCmd Timeout out, " 1474 "No command list address returned!\n", 1475 ctlr); 1476 cmd_free(info_p, c, 1); 1477 return (IO_ERROR); 1478 } 1479 1480 if (c->req.hdr.rcode & 0x00FE) { 1481 if (!(c->req.hdr.rcode & BIG_PROBLEM)) { 1482 printk( KERN_WARNING 1483 "cpqarray ida%d: idaSendPciCmd, error: " 1484 "Controller failed at init time " 1485 "cmd: 0x%x, return code = 0x%x\n", 1486 ctlr, c->req.hdr.cmd, c->req.hdr.rcode); 1487 1488 cmd_free(info_p, c, 1); 1489 return (IO_ERROR); 1490 } 1491 } 1492 cmd_free(info_p, c, 1); 1493 return (IO_OK); 1494} 1495 1496/* 1497 * revalidate_allvol is for online array config utilities. After a 1498 * utility reconfigures the drives in the array, it can use this function 1499 * (through an ioctl) to make the driver zap any previous disk structs for 1500 * that controller and get new ones. 1501 * 1502 * Right now I'm using the getgeometry() function to do this, but this 1503 * function should probably be finer grained and allow you to revalidate one 1504 * particualar logical volume (instead of all of them on a particular 1505 * controller). 1506 */ 1507static int revalidate_allvol(ctlr_info_t *host) 1508{ 1509 int ctlr = host->ctlr; 1510 int i; 1511 unsigned long flags; 1512 1513 spin_lock_irqsave(IDA_LOCK(ctlr), flags); 1514 if (host->usage_count > 1) { 1515 spin_unlock_irqrestore(IDA_LOCK(ctlr), flags); 1516 printk(KERN_WARNING "cpqarray: Device busy for volume" 1517 " revalidation (usage=%d)\n", host->usage_count); 1518 return -EBUSY; 1519 } 1520 host->usage_count++; 1521 spin_unlock_irqrestore(IDA_LOCK(ctlr), flags); 1522 1523 /* 1524 * Set the partition and block size structures for all volumes 1525 * on this controller to zero. We will reread all of this data 1526 */ 1527 set_capacity(ida_gendisk[ctlr][0], 0); 1528 for (i = 1; i < NWD; i++) { 1529 struct gendisk *disk = ida_gendisk[ctlr][i]; 1530 if (disk->flags & GENHD_FL_UP) 1531 del_gendisk(disk); 1532 } 1533 memset(host->drv, 0, sizeof(drv_info_t)*NWD); 1534 1535 /* 1536 * Tell the array controller not to give us any interrupts while 1537 * we check the new geometry. Then turn interrupts back on when 1538 * we're done. 1539 */ 1540 host->access.set_intr_mask(host, 0); 1541 getgeometry(ctlr); 1542 host->access.set_intr_mask(host, FIFO_NOT_EMPTY); 1543 1544 for(i=0; i<NWD; i++) { 1545 struct gendisk *disk = ida_gendisk[ctlr][i]; 1546 drv_info_t *drv = &host->drv[i]; 1547 if (i && !drv->nr_blks) 1548 continue; 1549 blk_queue_hardsect_size(host->queue, drv->blk_size); 1550 set_capacity(disk, drv->nr_blks); 1551 disk->queue = host->queue; 1552 disk->private_data = drv; 1553 if (i) 1554 add_disk(disk); 1555 } 1556 1557 host->usage_count--; 1558 return 0; 1559} 1560 1561static int ida_revalidate(struct gendisk *disk) 1562{ 1563 drv_info_t *drv = disk->private_data; 1564 set_capacity(disk, drv->nr_blks); 1565 return 0; 1566} 1567 1568/******************************************************************** 1569 name: pollcomplete 1570 Wait polling for a command to complete. 1571 The memory mapped FIFO is polled for the completion. 1572 Used only at init time, interrupts disabled. 1573 ********************************************************************/ 1574static int pollcomplete(int ctlr) 1575{ 1576 int done; 1577 int i; 1578 1579 /* Wait (up to 2 seconds) for a command to complete */ 1580 1581 for (i = 200000; i > 0; i--) { 1582 done = hba[ctlr]->access.command_completed(hba[ctlr]); 1583 if (done == 0) { 1584 udelay(10); /* a short fixed delay */ 1585 } else 1586 return (done); 1587 } 1588 /* Invalid address to tell caller we ran out of time */ 1589 return 1; 1590} 1591/***************************************************************** 1592 start_fwbk 1593 Starts controller firmwares background processing. 1594 Currently only the Integrated Raid controller needs this done. 1595 If the PCI mem address registers are written to after this, 1596 data corruption may occur 1597*****************************************************************/ 1598static void start_fwbk(int ctlr) 1599{ 1600 id_ctlr_t *id_ctlr_buf; 1601 int ret_code; 1602 1603 if( (hba[ctlr]->board_id != 0x40400E11) 1604 && (hba[ctlr]->board_id != 0x40480E11) ) 1605 1606 /* Not a Integrated Raid, so there is nothing for us to do */ 1607 return; 1608 printk(KERN_DEBUG "cpqarray: Starting firmware's background" 1609 " processing\n"); 1610 /* Command does not return anything, but idasend command needs a 1611 buffer */ 1612 id_ctlr_buf = kmalloc(sizeof(id_ctlr_t), GFP_KERNEL); 1613 if(id_ctlr_buf==NULL) 1614 { 1615 printk(KERN_WARNING "cpqarray: Out of memory. " 1616 "Unable to start background processing.\n"); 1617 return; 1618 } 1619 ret_code = sendcmd(RESUME_BACKGROUND_ACTIVITY, ctlr, 1620 id_ctlr_buf, 0, 0, 0, 0); 1621 if(ret_code != IO_OK) 1622 printk(KERN_WARNING "cpqarray: Unable to start" 1623 " background processing\n"); 1624 1625 kfree(id_ctlr_buf); 1626} 1627/***************************************************************** 1628 getgeometry 1629 Get ida logical volume geometry from the controller 1630 This is a large bit of code which once existed in two flavors, 1631 It is used only at init time. 1632*****************************************************************/ 1633static void getgeometry(int ctlr) 1634{ 1635 id_log_drv_t *id_ldrive; 1636 id_ctlr_t *id_ctlr_buf; 1637 sense_log_drv_stat_t *id_lstatus_buf; 1638 config_t *sense_config_buf; 1639 unsigned int log_unit, log_index; 1640 int ret_code, size; 1641 drv_info_t *drv; 1642 ctlr_info_t *info_p = hba[ctlr]; 1643 int i; 1644 1645 info_p->log_drv_map = 0; 1646 1647 id_ldrive = kzalloc(sizeof(id_log_drv_t), GFP_KERNEL); 1648 if (!id_ldrive) { 1649 printk( KERN_ERR "cpqarray: out of memory.\n"); 1650 goto err_0; 1651 } 1652 1653 id_ctlr_buf = kzalloc(sizeof(id_ctlr_t), GFP_KERNEL); 1654 if (!id_ctlr_buf) { 1655 printk( KERN_ERR "cpqarray: out of memory.\n"); 1656 goto err_1; 1657 } 1658 1659 id_lstatus_buf = kzalloc(sizeof(sense_log_drv_stat_t), GFP_KERNEL); 1660 if (!id_lstatus_buf) { 1661 printk( KERN_ERR "cpqarray: out of memory.\n"); 1662 goto err_2; 1663 } 1664 1665 sense_config_buf = kzalloc(sizeof(config_t), GFP_KERNEL); 1666 if (!sense_config_buf) { 1667 printk( KERN_ERR "cpqarray: out of memory.\n"); 1668 goto err_3; 1669 } 1670 1671 info_p->phys_drives = 0; 1672 info_p->log_drv_map = 0; 1673 info_p->drv_assign_map = 0; 1674 info_p->drv_spare_map = 0; 1675 info_p->mp_failed_drv_map = 0; /* only initialized here */ 1676 /* Get controllers info for this logical drive */ 1677 ret_code = sendcmd(ID_CTLR, ctlr, id_ctlr_buf, 0, 0, 0, 0); 1678 if (ret_code == IO_ERROR) { 1679 /* 1680 * If can't get controller info, set the logical drive map to 0, 1681 * so the idastubopen will fail on all logical drives 1682 * on the controller. 1683 */ 1684 printk(KERN_ERR "cpqarray: error sending ID controller\n"); 1685 goto err_4; 1686 } 1687 1688 info_p->log_drives = id_ctlr_buf->nr_drvs; 1689 for(i=0;i<4;i++) 1690 info_p->firm_rev[i] = id_ctlr_buf->firm_rev[i]; 1691 info_p->ctlr_sig = id_ctlr_buf->cfg_sig; 1692 1693 printk(" (%s)\n", info_p->product_name); 1694 /* 1695 * Initialize logical drive map to zero 1696 */ 1697 log_index = 0; 1698 /* 1699 * Get drive geometry for all logical drives 1700 */ 1701 if (id_ctlr_buf->nr_drvs > 16) 1702 printk(KERN_WARNING "cpqarray ida%d: This driver supports " 1703 "16 logical drives per controller.\n. " 1704 " Additional drives will not be " 1705 "detected\n", ctlr); 1706 1707 for (log_unit = 0; 1708 (log_index < id_ctlr_buf->nr_drvs) 1709 && (log_unit < NWD); 1710 log_unit++) { 1711 size = sizeof(sense_log_drv_stat_t); 1712 1713 /* 1714 Send "Identify logical drive status" cmd 1715 */ 1716 ret_code = sendcmd(SENSE_LOG_DRV_STAT, 1717 ctlr, id_lstatus_buf, size, 0, 0, log_unit); 1718 if (ret_code == IO_ERROR) { 1719 /* 1720 If can't get logical drive status, set 1721 the logical drive map to 0, so the 1722 idastubopen will fail for all logical drives 1723 on the controller. 1724 */ 1725 info_p->log_drv_map = 0; 1726 printk( KERN_WARNING 1727 "cpqarray ida%d: idaGetGeometry - Controller" 1728 " failed to report status of logical drive %d\n" 1729 "Access to this controller has been disabled\n", 1730 ctlr, log_unit); 1731 goto err_4; 1732 } 1733 /* 1734 Make sure the logical drive is configured 1735 */ 1736 if (id_lstatus_buf->status != LOG_NOT_CONF) { 1737 ret_code = sendcmd(ID_LOG_DRV, ctlr, id_ldrive, 1738 sizeof(id_log_drv_t), 0, 0, log_unit); 1739 /* 1740 If error, the bit for this 1741 logical drive won't be set and 1742 idastubopen will return error. 1743 */ 1744 if (ret_code != IO_ERROR) { 1745 drv = &info_p->drv[log_unit]; 1746 drv->blk_size = id_ldrive->blk_size; 1747 drv->nr_blks = id_ldrive->nr_blks; 1748 drv->cylinders = id_ldrive->drv.cyl; 1749 drv->heads = id_ldrive->drv.heads; 1750 drv->sectors = id_ldrive->drv.sect_per_track; 1751 info_p->log_drv_map |= (1 << log_unit); 1752 1753 printk(KERN_INFO "cpqarray ida/c%dd%d: blksz=%d nr_blks=%d\n", 1754 ctlr, log_unit, drv->blk_size, drv->nr_blks); 1755 ret_code = sendcmd(SENSE_CONFIG, 1756 ctlr, sense_config_buf, 1757 sizeof(config_t), 0, 0, log_unit); 1758 if (ret_code == IO_ERROR) { 1759 info_p->log_drv_map = 0; 1760 printk(KERN_ERR "cpqarray: error sending sense config\n"); 1761 goto err_4; 1762 } 1763 1764 info_p->phys_drives = 1765 sense_config_buf->ctlr_phys_drv; 1766 info_p->drv_assign_map 1767 |= sense_config_buf->drv_asgn_map; 1768 info_p->drv_assign_map 1769 |= sense_config_buf->spare_asgn_map; 1770 info_p->drv_spare_map 1771 |= sense_config_buf->spare_asgn_map; 1772 } /* end of if no error on id_ldrive */ 1773 log_index = log_index + 1; 1774 } /* end of if logical drive configured */ 1775 } /* end of for log_unit */ 1776 1777 /* Free all the buffers and return */ 1778err_4: 1779 kfree(sense_config_buf); 1780err_3: 1781 kfree(id_lstatus_buf); 1782err_2: 1783 kfree(id_ctlr_buf); 1784err_1: 1785 kfree(id_ldrive); 1786err_0: 1787 return; 1788} 1789 1790static void __exit cpqarray_exit(void) 1791{ 1792 int i; 1793 1794 pci_unregister_driver(&cpqarray_pci_driver); 1795 1796 /* Double check that all controller entries have been removed */ 1797 for(i=0; i<MAX_CTLR; i++) { 1798 if (hba[i] != NULL) { 1799 printk(KERN_WARNING "cpqarray: Removing EISA " 1800 "controller %d\n", i); 1801 cpqarray_remove_one_eisa(i); 1802 } 1803 } 1804 1805 remove_proc_entry("driver/cpqarray", NULL); 1806} 1807 1808module_init(cpqarray_init) 1809module_exit(cpqarray_exit)