at v2.6.13 698 lines 20 kB view raw
1/* 2 * linux/drivers/ide/pci/serverworks.c Version 0.8 25 Ebr 2003 3 * 4 * Copyright (C) 1998-2000 Michel Aubry 5 * Copyright (C) 1998-2000 Andrzej Krzysztofowicz 6 * Copyright (C) 1998-2000 Andre Hedrick <andre@linux-ide.org> 7 * Portions copyright (c) 2001 Sun Microsystems 8 * 9 * 10 * RCC/ServerWorks IDE driver for Linux 11 * 12 * OSB4: `Open South Bridge' IDE Interface (fn 1) 13 * supports UDMA mode 2 (33 MB/s) 14 * 15 * CSB5: `Champion South Bridge' IDE Interface (fn 1) 16 * all revisions support UDMA mode 4 (66 MB/s) 17 * revision A2.0 and up support UDMA mode 5 (100 MB/s) 18 * 19 * *** The CSB5 does not provide ANY register *** 20 * *** to detect 80-conductor cable presence. *** 21 * 22 * CSB6: `Champion South Bridge' IDE Interface (optional: third channel) 23 * 24 * HT1000: AKA BCM5785 - Hypertransport Southbridge for Opteron systems. IDE 25 * controller same as the CSB6. Single channel ATA100 only. 26 * 27 * Documentation: 28 * Available under NDA only. Errata info very hard to get. 29 * 30 */ 31 32#include <linux/config.h> 33#include <linux/types.h> 34#include <linux/module.h> 35#include <linux/kernel.h> 36#include <linux/ioport.h> 37#include <linux/pci.h> 38#include <linux/hdreg.h> 39#include <linux/ide.h> 40#include <linux/init.h> 41#include <linux/delay.h> 42 43#include <asm/io.h> 44 45#define SVWKS_CSB5_REVISION_NEW 0x92 /* min PCI_REVISION_ID for UDMA5 (A2.0) */ 46#define SVWKS_CSB6_REVISION 0xa0 /* min PCI_REVISION_ID for UDMA4 (A1.0) */ 47 48/* Seagate Barracuda ATA IV Family drives in UDMA mode 5 49 * can overrun their FIFOs when used with the CSB5 */ 50static const char *svwks_bad_ata100[] = { 51 "ST320011A", 52 "ST340016A", 53 "ST360021A", 54 "ST380021A", 55 NULL 56}; 57 58static u8 svwks_revision = 0; 59static struct pci_dev *isa_dev; 60 61static int check_in_drive_lists (ide_drive_t *drive, const char **list) 62{ 63 while (*list) 64 if (!strcmp(*list++, drive->id->model)) 65 return 1; 66 return 0; 67} 68 69static u8 svwks_ratemask (ide_drive_t *drive) 70{ 71 struct pci_dev *dev = HWIF(drive)->pci_dev; 72 u8 mode; 73 74 if (!svwks_revision) 75 pci_read_config_byte(dev, PCI_REVISION_ID, &svwks_revision); 76 77 if (dev->device == PCI_DEVICE_ID_SERVERWORKS_HT1000IDE) 78 return 2; 79 if (dev->device == PCI_DEVICE_ID_SERVERWORKS_OSB4IDE) { 80 u32 reg = 0; 81 if (isa_dev) 82 pci_read_config_dword(isa_dev, 0x64, &reg); 83 84 /* 85 * Don't enable UDMA on disk devices for the moment 86 */ 87 if(drive->media == ide_disk) 88 return 0; 89 /* Check the OSB4 DMA33 enable bit */ 90 return ((reg & 0x00004000) == 0x00004000) ? 1 : 0; 91 } else if (svwks_revision < SVWKS_CSB5_REVISION_NEW) { 92 return 1; 93 } else if (svwks_revision >= SVWKS_CSB5_REVISION_NEW) { 94 u8 btr = 0; 95 pci_read_config_byte(dev, 0x5A, &btr); 96 mode = btr & 0x3; 97 if (!eighty_ninty_three(drive)) 98 mode = min(mode, (u8)1); 99 /* If someone decides to do UDMA133 on CSB5 the same 100 issue will bite so be inclusive */ 101 if (mode > 2 && check_in_drive_lists(drive, svwks_bad_ata100)) 102 mode = 2; 103 } 104 if (((dev->device == PCI_DEVICE_ID_SERVERWORKS_CSB6IDE) || 105 (dev->device == PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2)) && 106 (!(PCI_FUNC(dev->devfn) & 1))) 107 mode = 2; 108 return mode; 109} 110 111static u8 svwks_csb_check (struct pci_dev *dev) 112{ 113 switch (dev->device) { 114 case PCI_DEVICE_ID_SERVERWORKS_CSB5IDE: 115 case PCI_DEVICE_ID_SERVERWORKS_CSB6IDE: 116 case PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2: 117 case PCI_DEVICE_ID_SERVERWORKS_HT1000IDE: 118 return 1; 119 default: 120 break; 121 } 122 return 0; 123} 124static int svwks_tune_chipset (ide_drive_t *drive, u8 xferspeed) 125{ 126 u8 udma_modes[] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05 }; 127 u8 dma_modes[] = { 0x77, 0x21, 0x20 }; 128 u8 pio_modes[] = { 0x5d, 0x47, 0x34, 0x22, 0x20 }; 129 u8 drive_pci[] = { 0x41, 0x40, 0x43, 0x42 }; 130 u8 drive_pci2[] = { 0x45, 0x44, 0x47, 0x46 }; 131 132 ide_hwif_t *hwif = HWIF(drive); 133 struct pci_dev *dev = hwif->pci_dev; 134 u8 speed; 135 u8 pio = ide_get_best_pio_mode(drive, 255, 5, NULL); 136 u8 unit = (drive->select.b.unit & 0x01); 137 u8 csb5 = svwks_csb_check(dev); 138 u8 ultra_enable = 0, ultra_timing = 0; 139 u8 dma_timing = 0, pio_timing = 0; 140 u16 csb5_pio = 0; 141 142 if (xferspeed == 255) /* PIO auto-tuning */ 143 speed = XFER_PIO_0 + pio; 144 else 145 speed = ide_rate_filter(svwks_ratemask(drive), xferspeed); 146 147 /* If we are about to put a disk into UDMA mode we screwed up. 148 Our code assumes we never _ever_ do this on an OSB4 */ 149 150 if(dev->device == PCI_DEVICE_ID_SERVERWORKS_OSB4 && 151 drive->media == ide_disk && speed >= XFER_UDMA_0) 152 BUG(); 153 154 pci_read_config_byte(dev, drive_pci[drive->dn], &pio_timing); 155 pci_read_config_byte(dev, drive_pci2[drive->dn], &dma_timing); 156 pci_read_config_byte(dev, (0x56|hwif->channel), &ultra_timing); 157 pci_read_config_word(dev, 0x4A, &csb5_pio); 158 pci_read_config_byte(dev, 0x54, &ultra_enable); 159 160 /* Per Specified Design by OEM, and ASIC Architect */ 161 if ((dev->device == PCI_DEVICE_ID_SERVERWORKS_CSB6IDE) || 162 (dev->device == PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2)) { 163 if (!drive->init_speed) { 164 u8 dma_stat = hwif->INB(hwif->dma_status); 165 166dma_pio: 167 if (((ultra_enable << (7-drive->dn) & 0x80) == 0x80) && 168 ((dma_stat & (1<<(5+unit))) == (1<<(5+unit)))) { 169 drive->current_speed = drive->init_speed = XFER_UDMA_0 + udma_modes[(ultra_timing >> (4*unit)) & ~(0xF0)]; 170 return 0; 171 } else if ((dma_timing) && 172 ((dma_stat&(1<<(5+unit)))==(1<<(5+unit)))) { 173 u8 dmaspeed = dma_timing; 174 175 dma_timing &= ~0xFF; 176 if ((dmaspeed & 0x20) == 0x20) 177 dmaspeed = XFER_MW_DMA_2; 178 else if ((dmaspeed & 0x21) == 0x21) 179 dmaspeed = XFER_MW_DMA_1; 180 else if ((dmaspeed & 0x77) == 0x77) 181 dmaspeed = XFER_MW_DMA_0; 182 else 183 goto dma_pio; 184 drive->current_speed = drive->init_speed = dmaspeed; 185 return 0; 186 } else if (pio_timing) { 187 u8 piospeed = pio_timing; 188 189 pio_timing &= ~0xFF; 190 if ((piospeed & 0x20) == 0x20) 191 piospeed = XFER_PIO_4; 192 else if ((piospeed & 0x22) == 0x22) 193 piospeed = XFER_PIO_3; 194 else if ((piospeed & 0x34) == 0x34) 195 piospeed = XFER_PIO_2; 196 else if ((piospeed & 0x47) == 0x47) 197 piospeed = XFER_PIO_1; 198 else if ((piospeed & 0x5d) == 0x5d) 199 piospeed = XFER_PIO_0; 200 else 201 goto oem_setup_failed; 202 drive->current_speed = drive->init_speed = piospeed; 203 return 0; 204 } 205 } 206 } 207 208oem_setup_failed: 209 210 pio_timing &= ~0xFF; 211 dma_timing &= ~0xFF; 212 ultra_timing &= ~(0x0F << (4*unit)); 213 ultra_enable &= ~(0x01 << drive->dn); 214 csb5_pio &= ~(0x0F << (4*drive->dn)); 215 216 switch(speed) { 217 case XFER_PIO_4: 218 case XFER_PIO_3: 219 case XFER_PIO_2: 220 case XFER_PIO_1: 221 case XFER_PIO_0: 222 pio_timing |= pio_modes[speed - XFER_PIO_0]; 223 csb5_pio |= ((speed - XFER_PIO_0) << (4*drive->dn)); 224 break; 225 226 case XFER_MW_DMA_2: 227 case XFER_MW_DMA_1: 228 case XFER_MW_DMA_0: 229 pio_timing |= pio_modes[pio]; 230 csb5_pio |= (pio << (4*drive->dn)); 231 dma_timing |= dma_modes[speed - XFER_MW_DMA_0]; 232 break; 233 234 case XFER_UDMA_5: 235 case XFER_UDMA_4: 236 case XFER_UDMA_3: 237 case XFER_UDMA_2: 238 case XFER_UDMA_1: 239 case XFER_UDMA_0: 240 pio_timing |= pio_modes[pio]; 241 csb5_pio |= (pio << (4*drive->dn)); 242 dma_timing |= dma_modes[2]; 243 ultra_timing |= ((udma_modes[speed - XFER_UDMA_0]) << (4*unit)); 244 ultra_enable |= (0x01 << drive->dn); 245 default: 246 break; 247 } 248 249 pci_write_config_byte(dev, drive_pci[drive->dn], pio_timing); 250 if (csb5) 251 pci_write_config_word(dev, 0x4A, csb5_pio); 252 253 pci_write_config_byte(dev, drive_pci2[drive->dn], dma_timing); 254 pci_write_config_byte(dev, (0x56|hwif->channel), ultra_timing); 255 pci_write_config_byte(dev, 0x54, ultra_enable); 256 257 return (ide_config_drive_speed(drive, speed)); 258} 259 260static void config_chipset_for_pio (ide_drive_t *drive) 261{ 262 u16 eide_pio_timing[6] = {960, 480, 240, 180, 120, 90}; 263 u16 xfer_pio = drive->id->eide_pio_modes; 264 u8 timing, speed, pio; 265 266 pio = ide_get_best_pio_mode(drive, 255, 5, NULL); 267 268 if (xfer_pio > 4) 269 xfer_pio = 0; 270 271 if (drive->id->eide_pio_iordy > 0) 272 for (xfer_pio = 5; 273 xfer_pio>0 && 274 drive->id->eide_pio_iordy>eide_pio_timing[xfer_pio]; 275 xfer_pio--); 276 else 277 xfer_pio = (drive->id->eide_pio_modes & 4) ? 0x05 : 278 (drive->id->eide_pio_modes & 2) ? 0x04 : 279 (drive->id->eide_pio_modes & 1) ? 0x03 : 280 (drive->id->tPIO & 2) ? 0x02 : 281 (drive->id->tPIO & 1) ? 0x01 : xfer_pio; 282 283 timing = (xfer_pio >= pio) ? xfer_pio : pio; 284 285 switch(timing) { 286 case 4: speed = XFER_PIO_4;break; 287 case 3: speed = XFER_PIO_3;break; 288 case 2: speed = XFER_PIO_2;break; 289 case 1: speed = XFER_PIO_1;break; 290 default: 291 speed = (!drive->id->tPIO) ? XFER_PIO_0 : XFER_PIO_SLOW; 292 break; 293 } 294 (void) svwks_tune_chipset(drive, speed); 295 drive->current_speed = speed; 296} 297 298static void svwks_tune_drive (ide_drive_t *drive, u8 pio) 299{ 300 if(pio == 255) 301 (void) svwks_tune_chipset(drive, 255); 302 else 303 (void) svwks_tune_chipset(drive, (XFER_PIO_0 + pio)); 304} 305 306static int config_chipset_for_dma (ide_drive_t *drive) 307{ 308 u8 speed = ide_dma_speed(drive, svwks_ratemask(drive)); 309 310 if (!(speed)) 311 speed = XFER_PIO_0 + ide_get_best_pio_mode(drive, 255, 5, NULL); 312 313 (void) svwks_tune_chipset(drive, speed); 314 return ide_dma_enable(drive); 315} 316 317static int svwks_config_drive_xfer_rate (ide_drive_t *drive) 318{ 319 ide_hwif_t *hwif = HWIF(drive); 320 struct hd_driveid *id = drive->id; 321 322 drive->init_speed = 0; 323 324 if ((id->capability & 1) && drive->autodma) { 325 326 if (ide_use_dma(drive)) { 327 if (config_chipset_for_dma(drive)) 328 return hwif->ide_dma_on(drive); 329 } 330 331 goto fast_ata_pio; 332 333 } else if ((id->capability & 8) || (id->field_valid & 2)) { 334fast_ata_pio: 335 config_chipset_for_pio(drive); 336 // hwif->tuneproc(drive, 5); 337 return hwif->ide_dma_off_quietly(drive); 338 } 339 /* IORDY not supported */ 340 return 0; 341} 342 343/* This can go soon */ 344 345static int svwks_ide_dma_end (ide_drive_t *drive) 346{ 347 return __ide_dma_end(drive); 348} 349 350static unsigned int __devinit init_chipset_svwks (struct pci_dev *dev, const char *name) 351{ 352 unsigned int reg; 353 u8 btr; 354 355 /* save revision id to determine DMA capability */ 356 pci_read_config_byte(dev, PCI_REVISION_ID, &svwks_revision); 357 358 /* force Master Latency Timer value to 64 PCICLKs */ 359 pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0x40); 360 361 /* OSB4 : South Bridge and IDE */ 362 if (dev->device == PCI_DEVICE_ID_SERVERWORKS_OSB4IDE) { 363 isa_dev = pci_find_device(PCI_VENDOR_ID_SERVERWORKS, 364 PCI_DEVICE_ID_SERVERWORKS_OSB4, NULL); 365 if (isa_dev) { 366 pci_read_config_dword(isa_dev, 0x64, &reg); 367 reg &= ~0x00002000; /* disable 600ns interrupt mask */ 368 if(!(reg & 0x00004000)) 369 printk(KERN_DEBUG "%s: UDMA not BIOS enabled.\n", name); 370 reg |= 0x00004000; /* enable UDMA/33 support */ 371 pci_write_config_dword(isa_dev, 0x64, reg); 372 } 373 } 374 375 /* setup CSB5/CSB6 : South Bridge and IDE option RAID */ 376 else if ((dev->device == PCI_DEVICE_ID_SERVERWORKS_CSB5IDE) || 377 (dev->device == PCI_DEVICE_ID_SERVERWORKS_CSB6IDE) || 378 (dev->device == PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2)) { 379 380 /* Third Channel Test */ 381 if (!(PCI_FUNC(dev->devfn) & 1)) { 382 struct pci_dev * findev = NULL; 383 u32 reg4c = 0; 384 findev = pci_find_device(PCI_VENDOR_ID_SERVERWORKS, 385 PCI_DEVICE_ID_SERVERWORKS_CSB5, NULL); 386 if (findev) { 387 pci_read_config_dword(findev, 0x4C, &reg4c); 388 reg4c &= ~0x000007FF; 389 reg4c |= 0x00000040; 390 reg4c |= 0x00000020; 391 pci_write_config_dword(findev, 0x4C, reg4c); 392 } 393 outb_p(0x06, 0x0c00); 394 dev->irq = inb_p(0x0c01); 395#if 0 396 printk("%s: device class (0x%04x)\n", 397 name, dev->class); 398 if ((dev->class >> 8) != PCI_CLASS_STORAGE_IDE) { 399 dev->class &= ~0x000F0F00; 400 // dev->class |= ~0x00000400; 401 dev->class |= ~0x00010100; 402 /**/ 403 } 404#endif 405 } else { 406 struct pci_dev * findev = NULL; 407 u8 reg41 = 0; 408 409 findev = pci_find_device(PCI_VENDOR_ID_SERVERWORKS, 410 PCI_DEVICE_ID_SERVERWORKS_CSB6, NULL); 411 if (findev) { 412 pci_read_config_byte(findev, 0x41, &reg41); 413 reg41 &= ~0x40; 414 pci_write_config_byte(findev, 0x41, reg41); 415 } 416 /* 417 * This is a device pin issue on CSB6. 418 * Since there will be a future raid mode, 419 * early versions of the chipset require the 420 * interrupt pin to be set, and it is a compatibility 421 * mode issue. 422 */ 423 if ((dev->class >> 8) == PCI_CLASS_STORAGE_IDE) 424 dev->irq = 0; 425 } 426// pci_read_config_dword(dev, 0x40, &pioreg) 427// pci_write_config_dword(dev, 0x40, 0x99999999); 428// pci_read_config_dword(dev, 0x44, &dmareg); 429// pci_write_config_dword(dev, 0x44, 0xFFFFFFFF); 430 /* setup the UDMA Control register 431 * 432 * 1. clear bit 6 to enable DMA 433 * 2. enable DMA modes with bits 0-1 434 * 00 : legacy 435 * 01 : udma2 436 * 10 : udma2/udma4 437 * 11 : udma2/udma4/udma5 438 */ 439 pci_read_config_byte(dev, 0x5A, &btr); 440 btr &= ~0x40; 441 if (!(PCI_FUNC(dev->devfn) & 1)) 442 btr |= 0x2; 443 else 444 btr |= (svwks_revision >= SVWKS_CSB5_REVISION_NEW) ? 0x3 : 0x2; 445 pci_write_config_byte(dev, 0x5A, btr); 446 } 447 /* Setup HT1000 SouthBridge Controller - Single Channel Only */ 448 else if (dev->device == PCI_DEVICE_ID_SERVERWORKS_HT1000IDE) { 449 pci_read_config_byte(dev, 0x5A, &btr); 450 btr &= ~0x40; 451 btr |= 0x3; 452 pci_write_config_byte(dev, 0x5A, btr); 453 } 454 455 return (dev->irq) ? dev->irq : 0; 456} 457 458static unsigned int __devinit ata66_svwks_svwks (ide_hwif_t *hwif) 459{ 460 return 1; 461} 462 463/* On Dell PowerEdge servers with a CSB5/CSB6, the top two bits 464 * of the subsystem device ID indicate presence of an 80-pin cable. 465 * Bit 15 clear = secondary IDE channel does not have 80-pin cable. 466 * Bit 15 set = secondary IDE channel has 80-pin cable. 467 * Bit 14 clear = primary IDE channel does not have 80-pin cable. 468 * Bit 14 set = primary IDE channel has 80-pin cable. 469 */ 470static unsigned int __devinit ata66_svwks_dell (ide_hwif_t *hwif) 471{ 472 struct pci_dev *dev = hwif->pci_dev; 473 if (dev->subsystem_vendor == PCI_VENDOR_ID_DELL && 474 dev->vendor == PCI_VENDOR_ID_SERVERWORKS && 475 (dev->device == PCI_DEVICE_ID_SERVERWORKS_CSB5IDE || 476 dev->device == PCI_DEVICE_ID_SERVERWORKS_CSB6IDE)) 477 return ((1 << (hwif->channel + 14)) & 478 dev->subsystem_device) ? 1 : 0; 479 return 0; 480} 481 482/* Sun Cobalt Alpine hardware avoids the 80-pin cable 483 * detect issue by attaching the drives directly to the board. 484 * This check follows the Dell precedent (how scary is that?!) 485 * 486 * WARNING: this only works on Alpine hardware! 487 */ 488static unsigned int __devinit ata66_svwks_cobalt (ide_hwif_t *hwif) 489{ 490 struct pci_dev *dev = hwif->pci_dev; 491 if (dev->subsystem_vendor == PCI_VENDOR_ID_SUN && 492 dev->vendor == PCI_VENDOR_ID_SERVERWORKS && 493 dev->device == PCI_DEVICE_ID_SERVERWORKS_CSB5IDE) 494 return ((1 << (hwif->channel + 14)) & 495 dev->subsystem_device) ? 1 : 0; 496 return 0; 497} 498 499static unsigned int __devinit ata66_svwks (ide_hwif_t *hwif) 500{ 501 struct pci_dev *dev = hwif->pci_dev; 502 503 /* Per Specified Design by OEM, and ASIC Architect */ 504 if ((dev->device == PCI_DEVICE_ID_SERVERWORKS_CSB6IDE) || 505 (dev->device == PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2)) 506 return 1; 507 508 /* Server Works */ 509 if (dev->subsystem_vendor == PCI_VENDOR_ID_SERVERWORKS) 510 return ata66_svwks_svwks (hwif); 511 512 /* Dell PowerEdge */ 513 if (dev->subsystem_vendor == PCI_VENDOR_ID_DELL) 514 return ata66_svwks_dell (hwif); 515 516 /* Cobalt Alpine */ 517 if (dev->subsystem_vendor == PCI_VENDOR_ID_SUN) 518 return ata66_svwks_cobalt (hwif); 519 520 return 0; 521} 522 523#undef CAN_SW_DMA 524static void __devinit init_hwif_svwks (ide_hwif_t *hwif) 525{ 526 u8 dma_stat = 0; 527 528 if (!hwif->irq) 529 hwif->irq = hwif->channel ? 15 : 14; 530 531 hwif->tuneproc = &svwks_tune_drive; 532 hwif->speedproc = &svwks_tune_chipset; 533 534 hwif->atapi_dma = 1; 535 536 if (hwif->pci_dev->device != PCI_DEVICE_ID_SERVERWORKS_OSB4IDE) 537 hwif->ultra_mask = 0x3f; 538 539 hwif->mwdma_mask = 0x07; 540#ifdef CAN_SW_DMA 541 hwif->swdma_mask = 0x07; 542#endif /* CAN_SW_DMA */ 543 544 hwif->autodma = 0; 545 546 if (!hwif->dma_base) { 547 hwif->drives[0].autotune = 1; 548 hwif->drives[1].autotune = 1; 549 return; 550 } 551 552 hwif->ide_dma_check = &svwks_config_drive_xfer_rate; 553 if (hwif->pci_dev->device == PCI_DEVICE_ID_SERVERWORKS_OSB4IDE) 554 hwif->ide_dma_end = &svwks_ide_dma_end; 555 else if (!(hwif->udma_four)) 556 hwif->udma_four = ata66_svwks(hwif); 557 if (!noautodma) 558 hwif->autodma = 1; 559 560 dma_stat = hwif->INB(hwif->dma_status); 561 hwif->drives[0].autodma = (dma_stat & 0x20); 562 hwif->drives[1].autodma = (dma_stat & 0x40); 563 hwif->drives[0].autotune = (!(dma_stat & 0x20)); 564 hwif->drives[1].autotune = (!(dma_stat & 0x40)); 565// hwif->drives[0].autodma = hwif->autodma; 566// hwif->drives[1].autodma = hwif->autodma; 567} 568 569/* 570 * We allow the BM-DMA driver to only work on enabled interfaces. 571 */ 572static void __devinit init_dma_svwks (ide_hwif_t *hwif, unsigned long dmabase) 573{ 574 struct pci_dev *dev = hwif->pci_dev; 575 576 if (((dev->device == PCI_DEVICE_ID_SERVERWORKS_CSB6IDE) || 577 (dev->device == PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2)) && 578 (!(PCI_FUNC(dev->devfn) & 1)) && (hwif->channel)) 579 return; 580 581 ide_setup_dma(hwif, dmabase, 8); 582} 583 584static int __devinit init_setup_svwks (struct pci_dev *dev, ide_pci_device_t *d) 585{ 586 return ide_setup_pci_device(dev, d); 587} 588 589static int __devinit init_setup_csb6 (struct pci_dev *dev, ide_pci_device_t *d) 590{ 591 if (!(PCI_FUNC(dev->devfn) & 1)) { 592 d->bootable = NEVER_BOARD; 593 if (dev->resource[0].start == 0x01f1) 594 d->bootable = ON_BOARD; 595 } 596#if 0 597 if ((IDE_PCI_DEVID_EQ(d->devid, DEVID_CSB6) && 598 (!(PCI_FUNC(dev->devfn) & 1))) 599 d->autodma = AUTODMA; 600#endif 601 602 d->channels = ((dev->device == PCI_DEVICE_ID_SERVERWORKS_CSB6IDE || 603 dev->device == PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2) && 604 (!(PCI_FUNC(dev->devfn) & 1))) ? 1 : 2; 605 606 return ide_setup_pci_device(dev, d); 607} 608 609static ide_pci_device_t serverworks_chipsets[] __devinitdata = { 610 { /* 0 */ 611 .name = "SvrWks OSB4", 612 .init_setup = init_setup_svwks, 613 .init_chipset = init_chipset_svwks, 614 .init_hwif = init_hwif_svwks, 615 .channels = 2, 616 .autodma = AUTODMA, 617 .bootable = ON_BOARD, 618 },{ /* 1 */ 619 .name = "SvrWks CSB5", 620 .init_setup = init_setup_svwks, 621 .init_chipset = init_chipset_svwks, 622 .init_hwif = init_hwif_svwks, 623 .init_dma = init_dma_svwks, 624 .channels = 2, 625 .autodma = AUTODMA, 626 .bootable = ON_BOARD, 627 },{ /* 2 */ 628 .name = "SvrWks CSB6", 629 .init_setup = init_setup_csb6, 630 .init_chipset = init_chipset_svwks, 631 .init_hwif = init_hwif_svwks, 632 .init_dma = init_dma_svwks, 633 .channels = 2, 634 .autodma = AUTODMA, 635 .bootable = ON_BOARD, 636 },{ /* 3 */ 637 .name = "SvrWks CSB6", 638 .init_setup = init_setup_csb6, 639 .init_chipset = init_chipset_svwks, 640 .init_hwif = init_hwif_svwks, 641 .init_dma = init_dma_svwks, 642 .channels = 1, /* 2 */ 643 .autodma = AUTODMA, 644 .bootable = ON_BOARD, 645 },{ /* 4 */ 646 .name = "SvrWks HT1000", 647 .init_setup = init_setup_svwks, 648 .init_chipset = init_chipset_svwks, 649 .init_hwif = init_hwif_svwks, 650 .init_dma = init_dma_svwks, 651 .channels = 1, /* 2 */ 652 .autodma = AUTODMA, 653 .bootable = ON_BOARD, 654 } 655}; 656 657/** 658 * svwks_init_one - called when a OSB/CSB is found 659 * @dev: the svwks device 660 * @id: the matching pci id 661 * 662 * Called when the PCI registration layer (or the IDE initialization) 663 * finds a device matching our IDE device tables. 664 */ 665 666static int __devinit svwks_init_one(struct pci_dev *dev, const struct pci_device_id *id) 667{ 668 ide_pci_device_t *d = &serverworks_chipsets[id->driver_data]; 669 670 return d->init_setup(dev, d); 671} 672 673static struct pci_device_id svwks_pci_tbl[] = { 674 { PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_OSB4IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, 675 { PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB5IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1}, 676 { PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB6IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2}, 677 { PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_CSB6IDE2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 3}, 678 { PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_HT1000IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4}, 679 { 0, }, 680}; 681MODULE_DEVICE_TABLE(pci, svwks_pci_tbl); 682 683static struct pci_driver driver = { 684 .name = "Serverworks_IDE", 685 .id_table = svwks_pci_tbl, 686 .probe = svwks_init_one, 687}; 688 689static int svwks_ide_init(void) 690{ 691 return ide_pci_register_driver(&driver); 692} 693 694module_init(svwks_ide_init); 695 696MODULE_AUTHOR("Michael Aubry. Andrzej Krzysztofowicz, Andre Hedrick"); 697MODULE_DESCRIPTION("PCI driver module for Serverworks OSB4/CSB5/CSB6 IDE"); 698MODULE_LICENSE("GPL");