Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.15-rc7 4966 lines 123 kB view raw
1/* 2 * libata-core.c - helper library for ATA 3 * 4 * Maintained by: Jeff Garzik <jgarzik@pobox.com> 5 * Please ALWAYS copy linux-ide@vger.kernel.org 6 * on emails. 7 * 8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved. 9 * Copyright 2003-2004 Jeff Garzik 10 * 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of the GNU General Public License as published by 14 * the Free Software Foundation; either version 2, or (at your option) 15 * any later version. 16 * 17 * This program is distributed in the hope that it will be useful, 18 * but WITHOUT ANY WARRANTY; without even the implied warranty of 19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 20 * GNU General Public License for more details. 21 * 22 * You should have received a copy of the GNU General Public License 23 * along with this program; see the file COPYING. If not, write to 24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. 25 * 26 * 27 * libata documentation is available via 'make {ps|pdf}docs', 28 * as Documentation/DocBook/libata.* 29 * 30 * Hardware documentation available from http://www.t13.org/ and 31 * http://www.sata-io.org/ 32 * 33 */ 34 35#include <linux/config.h> 36#include <linux/kernel.h> 37#include <linux/module.h> 38#include <linux/pci.h> 39#include <linux/init.h> 40#include <linux/list.h> 41#include <linux/mm.h> 42#include <linux/highmem.h> 43#include <linux/spinlock.h> 44#include <linux/blkdev.h> 45#include <linux/delay.h> 46#include <linux/timer.h> 47#include <linux/interrupt.h> 48#include <linux/completion.h> 49#include <linux/suspend.h> 50#include <linux/workqueue.h> 51#include <linux/jiffies.h> 52#include <linux/scatterlist.h> 53#include <scsi/scsi.h> 54#include "scsi_priv.h" 55#include <scsi/scsi_cmnd.h> 56#include <scsi/scsi_host.h> 57#include <linux/libata.h> 58#include <asm/io.h> 59#include <asm/semaphore.h> 60#include <asm/byteorder.h> 61 62#include "libata.h" 63 64static unsigned int ata_busy_sleep (struct ata_port *ap, 65 unsigned long tmout_pat, 66 unsigned long tmout); 67static void ata_dev_reread_id(struct ata_port *ap, struct ata_device *dev); 68static void ata_dev_init_params(struct ata_port *ap, struct ata_device *dev); 69static void ata_set_mode(struct ata_port *ap); 70static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev); 71static unsigned int ata_get_mode_mask(const struct ata_port *ap, int shift); 72static int fgb(u32 bitmap); 73static int ata_choose_xfer_mode(const struct ata_port *ap, 74 u8 *xfer_mode_out, 75 unsigned int *xfer_shift_out); 76static void __ata_qc_complete(struct ata_queued_cmd *qc); 77 78static unsigned int ata_unique_id = 1; 79static struct workqueue_struct *ata_wq; 80 81int atapi_enabled = 0; 82module_param(atapi_enabled, int, 0444); 83MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)"); 84 85MODULE_AUTHOR("Jeff Garzik"); 86MODULE_DESCRIPTION("Library module for ATA devices"); 87MODULE_LICENSE("GPL"); 88MODULE_VERSION(DRV_VERSION); 89 90/** 91 * ata_tf_load_pio - send taskfile registers to host controller 92 * @ap: Port to which output is sent 93 * @tf: ATA taskfile register set 94 * 95 * Outputs ATA taskfile to standard ATA host controller. 96 * 97 * LOCKING: 98 * Inherited from caller. 99 */ 100 101static void ata_tf_load_pio(struct ata_port *ap, const struct ata_taskfile *tf) 102{ 103 struct ata_ioports *ioaddr = &ap->ioaddr; 104 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR; 105 106 if (tf->ctl != ap->last_ctl) { 107 outb(tf->ctl, ioaddr->ctl_addr); 108 ap->last_ctl = tf->ctl; 109 ata_wait_idle(ap); 110 } 111 112 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) { 113 outb(tf->hob_feature, ioaddr->feature_addr); 114 outb(tf->hob_nsect, ioaddr->nsect_addr); 115 outb(tf->hob_lbal, ioaddr->lbal_addr); 116 outb(tf->hob_lbam, ioaddr->lbam_addr); 117 outb(tf->hob_lbah, ioaddr->lbah_addr); 118 VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n", 119 tf->hob_feature, 120 tf->hob_nsect, 121 tf->hob_lbal, 122 tf->hob_lbam, 123 tf->hob_lbah); 124 } 125 126 if (is_addr) { 127 outb(tf->feature, ioaddr->feature_addr); 128 outb(tf->nsect, ioaddr->nsect_addr); 129 outb(tf->lbal, ioaddr->lbal_addr); 130 outb(tf->lbam, ioaddr->lbam_addr); 131 outb(tf->lbah, ioaddr->lbah_addr); 132 VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n", 133 tf->feature, 134 tf->nsect, 135 tf->lbal, 136 tf->lbam, 137 tf->lbah); 138 } 139 140 if (tf->flags & ATA_TFLAG_DEVICE) { 141 outb(tf->device, ioaddr->device_addr); 142 VPRINTK("device 0x%X\n", tf->device); 143 } 144 145 ata_wait_idle(ap); 146} 147 148/** 149 * ata_tf_load_mmio - send taskfile registers to host controller 150 * @ap: Port to which output is sent 151 * @tf: ATA taskfile register set 152 * 153 * Outputs ATA taskfile to standard ATA host controller using MMIO. 154 * 155 * LOCKING: 156 * Inherited from caller. 157 */ 158 159static void ata_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf) 160{ 161 struct ata_ioports *ioaddr = &ap->ioaddr; 162 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR; 163 164 if (tf->ctl != ap->last_ctl) { 165 writeb(tf->ctl, (void __iomem *) ap->ioaddr.ctl_addr); 166 ap->last_ctl = tf->ctl; 167 ata_wait_idle(ap); 168 } 169 170 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) { 171 writeb(tf->hob_feature, (void __iomem *) ioaddr->feature_addr); 172 writeb(tf->hob_nsect, (void __iomem *) ioaddr->nsect_addr); 173 writeb(tf->hob_lbal, (void __iomem *) ioaddr->lbal_addr); 174 writeb(tf->hob_lbam, (void __iomem *) ioaddr->lbam_addr); 175 writeb(tf->hob_lbah, (void __iomem *) ioaddr->lbah_addr); 176 VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n", 177 tf->hob_feature, 178 tf->hob_nsect, 179 tf->hob_lbal, 180 tf->hob_lbam, 181 tf->hob_lbah); 182 } 183 184 if (is_addr) { 185 writeb(tf->feature, (void __iomem *) ioaddr->feature_addr); 186 writeb(tf->nsect, (void __iomem *) ioaddr->nsect_addr); 187 writeb(tf->lbal, (void __iomem *) ioaddr->lbal_addr); 188 writeb(tf->lbam, (void __iomem *) ioaddr->lbam_addr); 189 writeb(tf->lbah, (void __iomem *) ioaddr->lbah_addr); 190 VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n", 191 tf->feature, 192 tf->nsect, 193 tf->lbal, 194 tf->lbam, 195 tf->lbah); 196 } 197 198 if (tf->flags & ATA_TFLAG_DEVICE) { 199 writeb(tf->device, (void __iomem *) ioaddr->device_addr); 200 VPRINTK("device 0x%X\n", tf->device); 201 } 202 203 ata_wait_idle(ap); 204} 205 206 207/** 208 * ata_tf_load - send taskfile registers to host controller 209 * @ap: Port to which output is sent 210 * @tf: ATA taskfile register set 211 * 212 * Outputs ATA taskfile to standard ATA host controller using MMIO 213 * or PIO as indicated by the ATA_FLAG_MMIO flag. 214 * Writes the control, feature, nsect, lbal, lbam, and lbah registers. 215 * Optionally (ATA_TFLAG_LBA48) writes hob_feature, hob_nsect, 216 * hob_lbal, hob_lbam, and hob_lbah. 217 * 218 * This function waits for idle (!BUSY and !DRQ) after writing 219 * registers. If the control register has a new value, this 220 * function also waits for idle after writing control and before 221 * writing the remaining registers. 222 * 223 * May be used as the tf_load() entry in ata_port_operations. 224 * 225 * LOCKING: 226 * Inherited from caller. 227 */ 228void ata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf) 229{ 230 if (ap->flags & ATA_FLAG_MMIO) 231 ata_tf_load_mmio(ap, tf); 232 else 233 ata_tf_load_pio(ap, tf); 234} 235 236/** 237 * ata_exec_command_pio - issue ATA command to host controller 238 * @ap: port to which command is being issued 239 * @tf: ATA taskfile register set 240 * 241 * Issues PIO write to ATA command register, with proper 242 * synchronization with interrupt handler / other threads. 243 * 244 * LOCKING: 245 * spin_lock_irqsave(host_set lock) 246 */ 247 248static void ata_exec_command_pio(struct ata_port *ap, const struct ata_taskfile *tf) 249{ 250 DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command); 251 252 outb(tf->command, ap->ioaddr.command_addr); 253 ata_pause(ap); 254} 255 256 257/** 258 * ata_exec_command_mmio - issue ATA command to host controller 259 * @ap: port to which command is being issued 260 * @tf: ATA taskfile register set 261 * 262 * Issues MMIO write to ATA command register, with proper 263 * synchronization with interrupt handler / other threads. 264 * 265 * LOCKING: 266 * spin_lock_irqsave(host_set lock) 267 */ 268 269static void ata_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf) 270{ 271 DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command); 272 273 writeb(tf->command, (void __iomem *) ap->ioaddr.command_addr); 274 ata_pause(ap); 275} 276 277 278/** 279 * ata_exec_command - issue ATA command to host controller 280 * @ap: port to which command is being issued 281 * @tf: ATA taskfile register set 282 * 283 * Issues PIO/MMIO write to ATA command register, with proper 284 * synchronization with interrupt handler / other threads. 285 * 286 * LOCKING: 287 * spin_lock_irqsave(host_set lock) 288 */ 289void ata_exec_command(struct ata_port *ap, const struct ata_taskfile *tf) 290{ 291 if (ap->flags & ATA_FLAG_MMIO) 292 ata_exec_command_mmio(ap, tf); 293 else 294 ata_exec_command_pio(ap, tf); 295} 296 297/** 298 * ata_tf_to_host - issue ATA taskfile to host controller 299 * @ap: port to which command is being issued 300 * @tf: ATA taskfile register set 301 * 302 * Issues ATA taskfile register set to ATA host controller, 303 * with proper synchronization with interrupt handler and 304 * other threads. 305 * 306 * LOCKING: 307 * spin_lock_irqsave(host_set lock) 308 */ 309 310static inline void ata_tf_to_host(struct ata_port *ap, 311 const struct ata_taskfile *tf) 312{ 313 ap->ops->tf_load(ap, tf); 314 ap->ops->exec_command(ap, tf); 315} 316 317/** 318 * ata_tf_read_pio - input device's ATA taskfile shadow registers 319 * @ap: Port from which input is read 320 * @tf: ATA taskfile register set for storing input 321 * 322 * Reads ATA taskfile registers for currently-selected device 323 * into @tf. 324 * 325 * LOCKING: 326 * Inherited from caller. 327 */ 328 329static void ata_tf_read_pio(struct ata_port *ap, struct ata_taskfile *tf) 330{ 331 struct ata_ioports *ioaddr = &ap->ioaddr; 332 333 tf->command = ata_check_status(ap); 334 tf->feature = inb(ioaddr->error_addr); 335 tf->nsect = inb(ioaddr->nsect_addr); 336 tf->lbal = inb(ioaddr->lbal_addr); 337 tf->lbam = inb(ioaddr->lbam_addr); 338 tf->lbah = inb(ioaddr->lbah_addr); 339 tf->device = inb(ioaddr->device_addr); 340 341 if (tf->flags & ATA_TFLAG_LBA48) { 342 outb(tf->ctl | ATA_HOB, ioaddr->ctl_addr); 343 tf->hob_feature = inb(ioaddr->error_addr); 344 tf->hob_nsect = inb(ioaddr->nsect_addr); 345 tf->hob_lbal = inb(ioaddr->lbal_addr); 346 tf->hob_lbam = inb(ioaddr->lbam_addr); 347 tf->hob_lbah = inb(ioaddr->lbah_addr); 348 } 349} 350 351/** 352 * ata_tf_read_mmio - input device's ATA taskfile shadow registers 353 * @ap: Port from which input is read 354 * @tf: ATA taskfile register set for storing input 355 * 356 * Reads ATA taskfile registers for currently-selected device 357 * into @tf via MMIO. 358 * 359 * LOCKING: 360 * Inherited from caller. 361 */ 362 363static void ata_tf_read_mmio(struct ata_port *ap, struct ata_taskfile *tf) 364{ 365 struct ata_ioports *ioaddr = &ap->ioaddr; 366 367 tf->command = ata_check_status(ap); 368 tf->feature = readb((void __iomem *)ioaddr->error_addr); 369 tf->nsect = readb((void __iomem *)ioaddr->nsect_addr); 370 tf->lbal = readb((void __iomem *)ioaddr->lbal_addr); 371 tf->lbam = readb((void __iomem *)ioaddr->lbam_addr); 372 tf->lbah = readb((void __iomem *)ioaddr->lbah_addr); 373 tf->device = readb((void __iomem *)ioaddr->device_addr); 374 375 if (tf->flags & ATA_TFLAG_LBA48) { 376 writeb(tf->ctl | ATA_HOB, (void __iomem *) ap->ioaddr.ctl_addr); 377 tf->hob_feature = readb((void __iomem *)ioaddr->error_addr); 378 tf->hob_nsect = readb((void __iomem *)ioaddr->nsect_addr); 379 tf->hob_lbal = readb((void __iomem *)ioaddr->lbal_addr); 380 tf->hob_lbam = readb((void __iomem *)ioaddr->lbam_addr); 381 tf->hob_lbah = readb((void __iomem *)ioaddr->lbah_addr); 382 } 383} 384 385 386/** 387 * ata_tf_read - input device's ATA taskfile shadow registers 388 * @ap: Port from which input is read 389 * @tf: ATA taskfile register set for storing input 390 * 391 * Reads ATA taskfile registers for currently-selected device 392 * into @tf. 393 * 394 * Reads nsect, lbal, lbam, lbah, and device. If ATA_TFLAG_LBA48 395 * is set, also reads the hob registers. 396 * 397 * May be used as the tf_read() entry in ata_port_operations. 398 * 399 * LOCKING: 400 * Inherited from caller. 401 */ 402void ata_tf_read(struct ata_port *ap, struct ata_taskfile *tf) 403{ 404 if (ap->flags & ATA_FLAG_MMIO) 405 ata_tf_read_mmio(ap, tf); 406 else 407 ata_tf_read_pio(ap, tf); 408} 409 410/** 411 * ata_check_status_pio - Read device status reg & clear interrupt 412 * @ap: port where the device is 413 * 414 * Reads ATA taskfile status register for currently-selected device 415 * and return its value. This also clears pending interrupts 416 * from this device 417 * 418 * LOCKING: 419 * Inherited from caller. 420 */ 421static u8 ata_check_status_pio(struct ata_port *ap) 422{ 423 return inb(ap->ioaddr.status_addr); 424} 425 426/** 427 * ata_check_status_mmio - Read device status reg & clear interrupt 428 * @ap: port where the device is 429 * 430 * Reads ATA taskfile status register for currently-selected device 431 * via MMIO and return its value. This also clears pending interrupts 432 * from this device 433 * 434 * LOCKING: 435 * Inherited from caller. 436 */ 437static u8 ata_check_status_mmio(struct ata_port *ap) 438{ 439 return readb((void __iomem *) ap->ioaddr.status_addr); 440} 441 442 443/** 444 * ata_check_status - Read device status reg & clear interrupt 445 * @ap: port where the device is 446 * 447 * Reads ATA taskfile status register for currently-selected device 448 * and return its value. This also clears pending interrupts 449 * from this device 450 * 451 * May be used as the check_status() entry in ata_port_operations. 452 * 453 * LOCKING: 454 * Inherited from caller. 455 */ 456u8 ata_check_status(struct ata_port *ap) 457{ 458 if (ap->flags & ATA_FLAG_MMIO) 459 return ata_check_status_mmio(ap); 460 return ata_check_status_pio(ap); 461} 462 463 464/** 465 * ata_altstatus - Read device alternate status reg 466 * @ap: port where the device is 467 * 468 * Reads ATA taskfile alternate status register for 469 * currently-selected device and return its value. 470 * 471 * Note: may NOT be used as the check_altstatus() entry in 472 * ata_port_operations. 473 * 474 * LOCKING: 475 * Inherited from caller. 476 */ 477u8 ata_altstatus(struct ata_port *ap) 478{ 479 if (ap->ops->check_altstatus) 480 return ap->ops->check_altstatus(ap); 481 482 if (ap->flags & ATA_FLAG_MMIO) 483 return readb((void __iomem *)ap->ioaddr.altstatus_addr); 484 return inb(ap->ioaddr.altstatus_addr); 485} 486 487 488/** 489 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure 490 * @tf: Taskfile to convert 491 * @fis: Buffer into which data will output 492 * @pmp: Port multiplier port 493 * 494 * Converts a standard ATA taskfile to a Serial ATA 495 * FIS structure (Register - Host to Device). 496 * 497 * LOCKING: 498 * Inherited from caller. 499 */ 500 501void ata_tf_to_fis(const struct ata_taskfile *tf, u8 *fis, u8 pmp) 502{ 503 fis[0] = 0x27; /* Register - Host to Device FIS */ 504 fis[1] = (pmp & 0xf) | (1 << 7); /* Port multiplier number, 505 bit 7 indicates Command FIS */ 506 fis[2] = tf->command; 507 fis[3] = tf->feature; 508 509 fis[4] = tf->lbal; 510 fis[5] = tf->lbam; 511 fis[6] = tf->lbah; 512 fis[7] = tf->device; 513 514 fis[8] = tf->hob_lbal; 515 fis[9] = tf->hob_lbam; 516 fis[10] = tf->hob_lbah; 517 fis[11] = tf->hob_feature; 518 519 fis[12] = tf->nsect; 520 fis[13] = tf->hob_nsect; 521 fis[14] = 0; 522 fis[15] = tf->ctl; 523 524 fis[16] = 0; 525 fis[17] = 0; 526 fis[18] = 0; 527 fis[19] = 0; 528} 529 530/** 531 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile 532 * @fis: Buffer from which data will be input 533 * @tf: Taskfile to output 534 * 535 * Converts a serial ATA FIS structure to a standard ATA taskfile. 536 * 537 * LOCKING: 538 * Inherited from caller. 539 */ 540 541void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf) 542{ 543 tf->command = fis[2]; /* status */ 544 tf->feature = fis[3]; /* error */ 545 546 tf->lbal = fis[4]; 547 tf->lbam = fis[5]; 548 tf->lbah = fis[6]; 549 tf->device = fis[7]; 550 551 tf->hob_lbal = fis[8]; 552 tf->hob_lbam = fis[9]; 553 tf->hob_lbah = fis[10]; 554 555 tf->nsect = fis[12]; 556 tf->hob_nsect = fis[13]; 557} 558 559static const u8 ata_rw_cmds[] = { 560 /* pio multi */ 561 ATA_CMD_READ_MULTI, 562 ATA_CMD_WRITE_MULTI, 563 ATA_CMD_READ_MULTI_EXT, 564 ATA_CMD_WRITE_MULTI_EXT, 565 /* pio */ 566 ATA_CMD_PIO_READ, 567 ATA_CMD_PIO_WRITE, 568 ATA_CMD_PIO_READ_EXT, 569 ATA_CMD_PIO_WRITE_EXT, 570 /* dma */ 571 ATA_CMD_READ, 572 ATA_CMD_WRITE, 573 ATA_CMD_READ_EXT, 574 ATA_CMD_WRITE_EXT 575}; 576 577/** 578 * ata_rwcmd_protocol - set taskfile r/w commands and protocol 579 * @qc: command to examine and configure 580 * 581 * Examine the device configuration and tf->flags to calculate 582 * the proper read/write commands and protocol to use. 583 * 584 * LOCKING: 585 * caller. 586 */ 587void ata_rwcmd_protocol(struct ata_queued_cmd *qc) 588{ 589 struct ata_taskfile *tf = &qc->tf; 590 struct ata_device *dev = qc->dev; 591 592 int index, lba48, write; 593 594 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0; 595 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0; 596 597 if (dev->flags & ATA_DFLAG_PIO) { 598 tf->protocol = ATA_PROT_PIO; 599 index = dev->multi_count ? 0 : 4; 600 } else { 601 tf->protocol = ATA_PROT_DMA; 602 index = 8; 603 } 604 605 tf->command = ata_rw_cmds[index + lba48 + write]; 606} 607 608static const char * xfer_mode_str[] = { 609 "UDMA/16", 610 "UDMA/25", 611 "UDMA/33", 612 "UDMA/44", 613 "UDMA/66", 614 "UDMA/100", 615 "UDMA/133", 616 "UDMA7", 617 "MWDMA0", 618 "MWDMA1", 619 "MWDMA2", 620 "PIO0", 621 "PIO1", 622 "PIO2", 623 "PIO3", 624 "PIO4", 625}; 626 627/** 628 * ata_udma_string - convert UDMA bit offset to string 629 * @mask: mask of bits supported; only highest bit counts. 630 * 631 * Determine string which represents the highest speed 632 * (highest bit in @udma_mask). 633 * 634 * LOCKING: 635 * None. 636 * 637 * RETURNS: 638 * Constant C string representing highest speed listed in 639 * @udma_mask, or the constant C string "<n/a>". 640 */ 641 642static const char *ata_mode_string(unsigned int mask) 643{ 644 int i; 645 646 for (i = 7; i >= 0; i--) 647 if (mask & (1 << i)) 648 goto out; 649 for (i = ATA_SHIFT_MWDMA + 2; i >= ATA_SHIFT_MWDMA; i--) 650 if (mask & (1 << i)) 651 goto out; 652 for (i = ATA_SHIFT_PIO + 4; i >= ATA_SHIFT_PIO; i--) 653 if (mask & (1 << i)) 654 goto out; 655 656 return "<n/a>"; 657 658out: 659 return xfer_mode_str[i]; 660} 661 662/** 663 * ata_pio_devchk - PATA device presence detection 664 * @ap: ATA channel to examine 665 * @device: Device to examine (starting at zero) 666 * 667 * This technique was originally described in 668 * Hale Landis's ATADRVR (www.ata-atapi.com), and 669 * later found its way into the ATA/ATAPI spec. 670 * 671 * Write a pattern to the ATA shadow registers, 672 * and if a device is present, it will respond by 673 * correctly storing and echoing back the 674 * ATA shadow register contents. 675 * 676 * LOCKING: 677 * caller. 678 */ 679 680static unsigned int ata_pio_devchk(struct ata_port *ap, 681 unsigned int device) 682{ 683 struct ata_ioports *ioaddr = &ap->ioaddr; 684 u8 nsect, lbal; 685 686 ap->ops->dev_select(ap, device); 687 688 outb(0x55, ioaddr->nsect_addr); 689 outb(0xaa, ioaddr->lbal_addr); 690 691 outb(0xaa, ioaddr->nsect_addr); 692 outb(0x55, ioaddr->lbal_addr); 693 694 outb(0x55, ioaddr->nsect_addr); 695 outb(0xaa, ioaddr->lbal_addr); 696 697 nsect = inb(ioaddr->nsect_addr); 698 lbal = inb(ioaddr->lbal_addr); 699 700 if ((nsect == 0x55) && (lbal == 0xaa)) 701 return 1; /* we found a device */ 702 703 return 0; /* nothing found */ 704} 705 706/** 707 * ata_mmio_devchk - PATA device presence detection 708 * @ap: ATA channel to examine 709 * @device: Device to examine (starting at zero) 710 * 711 * This technique was originally described in 712 * Hale Landis's ATADRVR (www.ata-atapi.com), and 713 * later found its way into the ATA/ATAPI spec. 714 * 715 * Write a pattern to the ATA shadow registers, 716 * and if a device is present, it will respond by 717 * correctly storing and echoing back the 718 * ATA shadow register contents. 719 * 720 * LOCKING: 721 * caller. 722 */ 723 724static unsigned int ata_mmio_devchk(struct ata_port *ap, 725 unsigned int device) 726{ 727 struct ata_ioports *ioaddr = &ap->ioaddr; 728 u8 nsect, lbal; 729 730 ap->ops->dev_select(ap, device); 731 732 writeb(0x55, (void __iomem *) ioaddr->nsect_addr); 733 writeb(0xaa, (void __iomem *) ioaddr->lbal_addr); 734 735 writeb(0xaa, (void __iomem *) ioaddr->nsect_addr); 736 writeb(0x55, (void __iomem *) ioaddr->lbal_addr); 737 738 writeb(0x55, (void __iomem *) ioaddr->nsect_addr); 739 writeb(0xaa, (void __iomem *) ioaddr->lbal_addr); 740 741 nsect = readb((void __iomem *) ioaddr->nsect_addr); 742 lbal = readb((void __iomem *) ioaddr->lbal_addr); 743 744 if ((nsect == 0x55) && (lbal == 0xaa)) 745 return 1; /* we found a device */ 746 747 return 0; /* nothing found */ 748} 749 750/** 751 * ata_devchk - PATA device presence detection 752 * @ap: ATA channel to examine 753 * @device: Device to examine (starting at zero) 754 * 755 * Dispatch ATA device presence detection, depending 756 * on whether we are using PIO or MMIO to talk to the 757 * ATA shadow registers. 758 * 759 * LOCKING: 760 * caller. 761 */ 762 763static unsigned int ata_devchk(struct ata_port *ap, 764 unsigned int device) 765{ 766 if (ap->flags & ATA_FLAG_MMIO) 767 return ata_mmio_devchk(ap, device); 768 return ata_pio_devchk(ap, device); 769} 770 771/** 772 * ata_dev_classify - determine device type based on ATA-spec signature 773 * @tf: ATA taskfile register set for device to be identified 774 * 775 * Determine from taskfile register contents whether a device is 776 * ATA or ATAPI, as per "Signature and persistence" section 777 * of ATA/PI spec (volume 1, sect 5.14). 778 * 779 * LOCKING: 780 * None. 781 * 782 * RETURNS: 783 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, or %ATA_DEV_UNKNOWN 784 * the event of failure. 785 */ 786 787unsigned int ata_dev_classify(const struct ata_taskfile *tf) 788{ 789 /* Apple's open source Darwin code hints that some devices only 790 * put a proper signature into the LBA mid/high registers, 791 * So, we only check those. It's sufficient for uniqueness. 792 */ 793 794 if (((tf->lbam == 0) && (tf->lbah == 0)) || 795 ((tf->lbam == 0x3c) && (tf->lbah == 0xc3))) { 796 DPRINTK("found ATA device by sig\n"); 797 return ATA_DEV_ATA; 798 } 799 800 if (((tf->lbam == 0x14) && (tf->lbah == 0xeb)) || 801 ((tf->lbam == 0x69) && (tf->lbah == 0x96))) { 802 DPRINTK("found ATAPI device by sig\n"); 803 return ATA_DEV_ATAPI; 804 } 805 806 DPRINTK("unknown device\n"); 807 return ATA_DEV_UNKNOWN; 808} 809 810/** 811 * ata_dev_try_classify - Parse returned ATA device signature 812 * @ap: ATA channel to examine 813 * @device: Device to examine (starting at zero) 814 * 815 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs, 816 * an ATA/ATAPI-defined set of values is placed in the ATA 817 * shadow registers, indicating the results of device detection 818 * and diagnostics. 819 * 820 * Select the ATA device, and read the values from the ATA shadow 821 * registers. Then parse according to the Error register value, 822 * and the spec-defined values examined by ata_dev_classify(). 823 * 824 * LOCKING: 825 * caller. 826 */ 827 828static u8 ata_dev_try_classify(struct ata_port *ap, unsigned int device) 829{ 830 struct ata_device *dev = &ap->device[device]; 831 struct ata_taskfile tf; 832 unsigned int class; 833 u8 err; 834 835 ap->ops->dev_select(ap, device); 836 837 memset(&tf, 0, sizeof(tf)); 838 839 ap->ops->tf_read(ap, &tf); 840 err = tf.feature; 841 842 dev->class = ATA_DEV_NONE; 843 844 /* see if device passed diags */ 845 if (err == 1) 846 /* do nothing */ ; 847 else if ((device == 0) && (err == 0x81)) 848 /* do nothing */ ; 849 else 850 return err; 851 852 /* determine if device if ATA or ATAPI */ 853 class = ata_dev_classify(&tf); 854 if (class == ATA_DEV_UNKNOWN) 855 return err; 856 if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0)) 857 return err; 858 859 dev->class = class; 860 861 return err; 862} 863 864/** 865 * ata_dev_id_string - Convert IDENTIFY DEVICE page into string 866 * @id: IDENTIFY DEVICE results we will examine 867 * @s: string into which data is output 868 * @ofs: offset into identify device page 869 * @len: length of string to return. must be an even number. 870 * 871 * The strings in the IDENTIFY DEVICE page are broken up into 872 * 16-bit chunks. Run through the string, and output each 873 * 8-bit chunk linearly, regardless of platform. 874 * 875 * LOCKING: 876 * caller. 877 */ 878 879void ata_dev_id_string(const u16 *id, unsigned char *s, 880 unsigned int ofs, unsigned int len) 881{ 882 unsigned int c; 883 884 while (len > 0) { 885 c = id[ofs] >> 8; 886 *s = c; 887 s++; 888 889 c = id[ofs] & 0xff; 890 *s = c; 891 s++; 892 893 ofs++; 894 len -= 2; 895 } 896} 897 898 899/** 900 * ata_noop_dev_select - Select device 0/1 on ATA bus 901 * @ap: ATA channel to manipulate 902 * @device: ATA device (numbered from zero) to select 903 * 904 * This function performs no actual function. 905 * 906 * May be used as the dev_select() entry in ata_port_operations. 907 * 908 * LOCKING: 909 * caller. 910 */ 911void ata_noop_dev_select (struct ata_port *ap, unsigned int device) 912{ 913} 914 915 916/** 917 * ata_std_dev_select - Select device 0/1 on ATA bus 918 * @ap: ATA channel to manipulate 919 * @device: ATA device (numbered from zero) to select 920 * 921 * Use the method defined in the ATA specification to 922 * make either device 0, or device 1, active on the 923 * ATA channel. Works with both PIO and MMIO. 924 * 925 * May be used as the dev_select() entry in ata_port_operations. 926 * 927 * LOCKING: 928 * caller. 929 */ 930 931void ata_std_dev_select (struct ata_port *ap, unsigned int device) 932{ 933 u8 tmp; 934 935 if (device == 0) 936 tmp = ATA_DEVICE_OBS; 937 else 938 tmp = ATA_DEVICE_OBS | ATA_DEV1; 939 940 if (ap->flags & ATA_FLAG_MMIO) { 941 writeb(tmp, (void __iomem *) ap->ioaddr.device_addr); 942 } else { 943 outb(tmp, ap->ioaddr.device_addr); 944 } 945 ata_pause(ap); /* needed; also flushes, for mmio */ 946} 947 948/** 949 * ata_dev_select - Select device 0/1 on ATA bus 950 * @ap: ATA channel to manipulate 951 * @device: ATA device (numbered from zero) to select 952 * @wait: non-zero to wait for Status register BSY bit to clear 953 * @can_sleep: non-zero if context allows sleeping 954 * 955 * Use the method defined in the ATA specification to 956 * make either device 0, or device 1, active on the 957 * ATA channel. 958 * 959 * This is a high-level version of ata_std_dev_select(), 960 * which additionally provides the services of inserting 961 * the proper pauses and status polling, where needed. 962 * 963 * LOCKING: 964 * caller. 965 */ 966 967void ata_dev_select(struct ata_port *ap, unsigned int device, 968 unsigned int wait, unsigned int can_sleep) 969{ 970 VPRINTK("ENTER, ata%u: device %u, wait %u\n", 971 ap->id, device, wait); 972 973 if (wait) 974 ata_wait_idle(ap); 975 976 ap->ops->dev_select(ap, device); 977 978 if (wait) { 979 if (can_sleep && ap->device[device].class == ATA_DEV_ATAPI) 980 msleep(150); 981 ata_wait_idle(ap); 982 } 983} 984 985/** 986 * ata_dump_id - IDENTIFY DEVICE info debugging output 987 * @dev: Device whose IDENTIFY DEVICE page we will dump 988 * 989 * Dump selected 16-bit words from a detected device's 990 * IDENTIFY PAGE page. 991 * 992 * LOCKING: 993 * caller. 994 */ 995 996static inline void ata_dump_id(const struct ata_device *dev) 997{ 998 DPRINTK("49==0x%04x " 999 "53==0x%04x " 1000 "63==0x%04x " 1001 "64==0x%04x " 1002 "75==0x%04x \n", 1003 dev->id[49], 1004 dev->id[53], 1005 dev->id[63], 1006 dev->id[64], 1007 dev->id[75]); 1008 DPRINTK("80==0x%04x " 1009 "81==0x%04x " 1010 "82==0x%04x " 1011 "83==0x%04x " 1012 "84==0x%04x \n", 1013 dev->id[80], 1014 dev->id[81], 1015 dev->id[82], 1016 dev->id[83], 1017 dev->id[84]); 1018 DPRINTK("88==0x%04x " 1019 "93==0x%04x\n", 1020 dev->id[88], 1021 dev->id[93]); 1022} 1023 1024/* 1025 * Compute the PIO modes available for this device. This is not as 1026 * trivial as it seems if we must consider early devices correctly. 1027 * 1028 * FIXME: pre IDE drive timing (do we care ?). 1029 */ 1030 1031static unsigned int ata_pio_modes(const struct ata_device *adev) 1032{ 1033 u16 modes; 1034 1035 /* Usual case. Word 53 indicates word 88 is valid */ 1036 if (adev->id[ATA_ID_FIELD_VALID] & (1 << 2)) { 1037 modes = adev->id[ATA_ID_PIO_MODES] & 0x03; 1038 modes <<= 3; 1039 modes |= 0x7; 1040 return modes; 1041 } 1042 1043 /* If word 88 isn't valid then Word 51 holds the PIO timing number 1044 for the maximum. Turn it into a mask and return it */ 1045 modes = (2 << (adev->id[ATA_ID_OLD_PIO_MODES] & 0xFF)) - 1 ; 1046 return modes; 1047} 1048 1049static int ata_qc_wait_err(struct ata_queued_cmd *qc, 1050 struct completion *wait) 1051{ 1052 int rc = 0; 1053 1054 if (wait_for_completion_timeout(wait, 30 * HZ) < 1) { 1055 /* timeout handling */ 1056 unsigned int err_mask = ac_err_mask(ata_chk_status(qc->ap)); 1057 1058 if (!err_mask) { 1059 printk(KERN_WARNING "ata%u: slow completion (cmd %x)\n", 1060 qc->ap->id, qc->tf.command); 1061 } else { 1062 printk(KERN_WARNING "ata%u: qc timeout (cmd %x)\n", 1063 qc->ap->id, qc->tf.command); 1064 rc = -EIO; 1065 } 1066 1067 ata_qc_complete(qc, err_mask); 1068 } 1069 1070 return rc; 1071} 1072 1073/** 1074 * ata_dev_identify - obtain IDENTIFY x DEVICE page 1075 * @ap: port on which device we wish to probe resides 1076 * @device: device bus address, starting at zero 1077 * 1078 * Following bus reset, we issue the IDENTIFY [PACKET] DEVICE 1079 * command, and read back the 512-byte device information page. 1080 * The device information page is fed to us via the standard 1081 * PIO-IN protocol, but we hand-code it here. (TODO: investigate 1082 * using standard PIO-IN paths) 1083 * 1084 * After reading the device information page, we use several 1085 * bits of information from it to initialize data structures 1086 * that will be used during the lifetime of the ata_device. 1087 * Other data from the info page is used to disqualify certain 1088 * older ATA devices we do not wish to support. 1089 * 1090 * LOCKING: 1091 * Inherited from caller. Some functions called by this function 1092 * obtain the host_set lock. 1093 */ 1094 1095static void ata_dev_identify(struct ata_port *ap, unsigned int device) 1096{ 1097 struct ata_device *dev = &ap->device[device]; 1098 unsigned int major_version; 1099 u16 tmp; 1100 unsigned long xfer_modes; 1101 unsigned int using_edd; 1102 DECLARE_COMPLETION(wait); 1103 struct ata_queued_cmd *qc; 1104 unsigned long flags; 1105 int rc; 1106 1107 if (!ata_dev_present(dev)) { 1108 DPRINTK("ENTER/EXIT (host %u, dev %u) -- nodev\n", 1109 ap->id, device); 1110 return; 1111 } 1112 1113 if (ap->flags & (ATA_FLAG_SRST | ATA_FLAG_SATA_RESET)) 1114 using_edd = 0; 1115 else 1116 using_edd = 1; 1117 1118 DPRINTK("ENTER, host %u, dev %u\n", ap->id, device); 1119 1120 assert (dev->class == ATA_DEV_ATA || dev->class == ATA_DEV_ATAPI || 1121 dev->class == ATA_DEV_NONE); 1122 1123 ata_dev_select(ap, device, 1, 1); /* select device 0/1 */ 1124 1125 qc = ata_qc_new_init(ap, dev); 1126 BUG_ON(qc == NULL); 1127 1128 ata_sg_init_one(qc, dev->id, sizeof(dev->id)); 1129 qc->dma_dir = DMA_FROM_DEVICE; 1130 qc->tf.protocol = ATA_PROT_PIO; 1131 qc->nsect = 1; 1132 1133retry: 1134 if (dev->class == ATA_DEV_ATA) { 1135 qc->tf.command = ATA_CMD_ID_ATA; 1136 DPRINTK("do ATA identify\n"); 1137 } else { 1138 qc->tf.command = ATA_CMD_ID_ATAPI; 1139 DPRINTK("do ATAPI identify\n"); 1140 } 1141 1142 qc->waiting = &wait; 1143 qc->complete_fn = ata_qc_complete_noop; 1144 1145 spin_lock_irqsave(&ap->host_set->lock, flags); 1146 rc = ata_qc_issue(qc); 1147 spin_unlock_irqrestore(&ap->host_set->lock, flags); 1148 1149 if (rc) 1150 goto err_out; 1151 else 1152 ata_qc_wait_err(qc, &wait); 1153 1154 spin_lock_irqsave(&ap->host_set->lock, flags); 1155 ap->ops->tf_read(ap, &qc->tf); 1156 spin_unlock_irqrestore(&ap->host_set->lock, flags); 1157 1158 if (qc->tf.command & ATA_ERR) { 1159 /* 1160 * arg! EDD works for all test cases, but seems to return 1161 * the ATA signature for some ATAPI devices. Until the 1162 * reason for this is found and fixed, we fix up the mess 1163 * here. If IDENTIFY DEVICE returns command aborted 1164 * (as ATAPI devices do), then we issue an 1165 * IDENTIFY PACKET DEVICE. 1166 * 1167 * ATA software reset (SRST, the default) does not appear 1168 * to have this problem. 1169 */ 1170 if ((using_edd) && (dev->class == ATA_DEV_ATA)) { 1171 u8 err = qc->tf.feature; 1172 if (err & ATA_ABORTED) { 1173 dev->class = ATA_DEV_ATAPI; 1174 qc->cursg = 0; 1175 qc->cursg_ofs = 0; 1176 qc->cursect = 0; 1177 qc->nsect = 1; 1178 goto retry; 1179 } 1180 } 1181 goto err_out; 1182 } 1183 1184 swap_buf_le16(dev->id, ATA_ID_WORDS); 1185 1186 /* print device capabilities */ 1187 printk(KERN_DEBUG "ata%u: dev %u cfg " 1188 "49:%04x 82:%04x 83:%04x 84:%04x 85:%04x 86:%04x 87:%04x 88:%04x\n", 1189 ap->id, device, dev->id[49], 1190 dev->id[82], dev->id[83], dev->id[84], 1191 dev->id[85], dev->id[86], dev->id[87], 1192 dev->id[88]); 1193 1194 /* 1195 * common ATA, ATAPI feature tests 1196 */ 1197 1198 /* we require DMA support (bits 8 of word 49) */ 1199 if (!ata_id_has_dma(dev->id)) { 1200 printk(KERN_DEBUG "ata%u: no dma\n", ap->id); 1201 goto err_out_nosup; 1202 } 1203 1204 /* quick-n-dirty find max transfer mode; for printk only */ 1205 xfer_modes = dev->id[ATA_ID_UDMA_MODES]; 1206 if (!xfer_modes) 1207 xfer_modes = (dev->id[ATA_ID_MWDMA_MODES]) << ATA_SHIFT_MWDMA; 1208 if (!xfer_modes) 1209 xfer_modes = ata_pio_modes(dev); 1210 1211 ata_dump_id(dev); 1212 1213 /* ATA-specific feature tests */ 1214 if (dev->class == ATA_DEV_ATA) { 1215 if (!ata_id_is_ata(dev->id)) /* sanity check */ 1216 goto err_out_nosup; 1217 1218 /* get major version */ 1219 tmp = dev->id[ATA_ID_MAJOR_VER]; 1220 for (major_version = 14; major_version >= 1; major_version--) 1221 if (tmp & (1 << major_version)) 1222 break; 1223 1224 /* 1225 * The exact sequence expected by certain pre-ATA4 drives is: 1226 * SRST RESET 1227 * IDENTIFY 1228 * INITIALIZE DEVICE PARAMETERS 1229 * anything else.. 1230 * Some drives were very specific about that exact sequence. 1231 */ 1232 if (major_version < 4 || (!ata_id_has_lba(dev->id))) { 1233 ata_dev_init_params(ap, dev); 1234 1235 /* current CHS translation info (id[53-58]) might be 1236 * changed. reread the identify device info. 1237 */ 1238 ata_dev_reread_id(ap, dev); 1239 } 1240 1241 if (ata_id_has_lba(dev->id)) { 1242 dev->flags |= ATA_DFLAG_LBA; 1243 1244 if (ata_id_has_lba48(dev->id)) { 1245 dev->flags |= ATA_DFLAG_LBA48; 1246 dev->n_sectors = ata_id_u64(dev->id, 100); 1247 } else { 1248 dev->n_sectors = ata_id_u32(dev->id, 60); 1249 } 1250 1251 /* print device info to dmesg */ 1252 printk(KERN_INFO "ata%u: dev %u ATA-%d, max %s, %Lu sectors:%s\n", 1253 ap->id, device, 1254 major_version, 1255 ata_mode_string(xfer_modes), 1256 (unsigned long long)dev->n_sectors, 1257 dev->flags & ATA_DFLAG_LBA48 ? " LBA48" : " LBA"); 1258 } else { 1259 /* CHS */ 1260 1261 /* Default translation */ 1262 dev->cylinders = dev->id[1]; 1263 dev->heads = dev->id[3]; 1264 dev->sectors = dev->id[6]; 1265 dev->n_sectors = dev->cylinders * dev->heads * dev->sectors; 1266 1267 if (ata_id_current_chs_valid(dev->id)) { 1268 /* Current CHS translation is valid. */ 1269 dev->cylinders = dev->id[54]; 1270 dev->heads = dev->id[55]; 1271 dev->sectors = dev->id[56]; 1272 1273 dev->n_sectors = ata_id_u32(dev->id, 57); 1274 } 1275 1276 /* print device info to dmesg */ 1277 printk(KERN_INFO "ata%u: dev %u ATA-%d, max %s, %Lu sectors: CHS %d/%d/%d\n", 1278 ap->id, device, 1279 major_version, 1280 ata_mode_string(xfer_modes), 1281 (unsigned long long)dev->n_sectors, 1282 (int)dev->cylinders, (int)dev->heads, (int)dev->sectors); 1283 1284 } 1285 1286 ap->host->max_cmd_len = 16; 1287 } 1288 1289 /* ATAPI-specific feature tests */ 1290 else if (dev->class == ATA_DEV_ATAPI) { 1291 if (ata_id_is_ata(dev->id)) /* sanity check */ 1292 goto err_out_nosup; 1293 1294 rc = atapi_cdb_len(dev->id); 1295 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) { 1296 printk(KERN_WARNING "ata%u: unsupported CDB len\n", ap->id); 1297 goto err_out_nosup; 1298 } 1299 ap->cdb_len = (unsigned int) rc; 1300 ap->host->max_cmd_len = (unsigned char) ap->cdb_len; 1301 1302 /* print device info to dmesg */ 1303 printk(KERN_INFO "ata%u: dev %u ATAPI, max %s\n", 1304 ap->id, device, 1305 ata_mode_string(xfer_modes)); 1306 } 1307 1308 DPRINTK("EXIT, drv_stat = 0x%x\n", ata_chk_status(ap)); 1309 return; 1310 1311err_out_nosup: 1312 printk(KERN_WARNING "ata%u: dev %u not supported, ignoring\n", 1313 ap->id, device); 1314err_out: 1315 dev->class++; /* converts ATA_DEV_xxx into ATA_DEV_xxx_UNSUP */ 1316 DPRINTK("EXIT, err\n"); 1317} 1318 1319 1320static inline u8 ata_dev_knobble(const struct ata_port *ap) 1321{ 1322 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(ap->device->id))); 1323} 1324 1325/** 1326 * ata_dev_config - Run device specific handlers and check for 1327 * SATA->PATA bridges 1328 * @ap: Bus 1329 * @i: Device 1330 * 1331 * LOCKING: 1332 */ 1333 1334void ata_dev_config(struct ata_port *ap, unsigned int i) 1335{ 1336 /* limit bridge transfers to udma5, 200 sectors */ 1337 if (ata_dev_knobble(ap)) { 1338 printk(KERN_INFO "ata%u(%u): applying bridge limits\n", 1339 ap->id, ap->device->devno); 1340 ap->udma_mask &= ATA_UDMA5; 1341 ap->host->max_sectors = ATA_MAX_SECTORS; 1342 ap->host->hostt->max_sectors = ATA_MAX_SECTORS; 1343 ap->device->flags |= ATA_DFLAG_LOCK_SECTORS; 1344 } 1345 1346 if (ap->ops->dev_config) 1347 ap->ops->dev_config(ap, &ap->device[i]); 1348} 1349 1350/** 1351 * ata_bus_probe - Reset and probe ATA bus 1352 * @ap: Bus to probe 1353 * 1354 * Master ATA bus probing function. Initiates a hardware-dependent 1355 * bus reset, then attempts to identify any devices found on 1356 * the bus. 1357 * 1358 * LOCKING: 1359 * PCI/etc. bus probe sem. 1360 * 1361 * RETURNS: 1362 * Zero on success, non-zero on error. 1363 */ 1364 1365static int ata_bus_probe(struct ata_port *ap) 1366{ 1367 unsigned int i, found = 0; 1368 1369 ap->ops->phy_reset(ap); 1370 if (ap->flags & ATA_FLAG_PORT_DISABLED) 1371 goto err_out; 1372 1373 for (i = 0; i < ATA_MAX_DEVICES; i++) { 1374 ata_dev_identify(ap, i); 1375 if (ata_dev_present(&ap->device[i])) { 1376 found = 1; 1377 ata_dev_config(ap,i); 1378 } 1379 } 1380 1381 if ((!found) || (ap->flags & ATA_FLAG_PORT_DISABLED)) 1382 goto err_out_disable; 1383 1384 ata_set_mode(ap); 1385 if (ap->flags & ATA_FLAG_PORT_DISABLED) 1386 goto err_out_disable; 1387 1388 return 0; 1389 1390err_out_disable: 1391 ap->ops->port_disable(ap); 1392err_out: 1393 return -1; 1394} 1395 1396/** 1397 * ata_port_probe - Mark port as enabled 1398 * @ap: Port for which we indicate enablement 1399 * 1400 * Modify @ap data structure such that the system 1401 * thinks that the entire port is enabled. 1402 * 1403 * LOCKING: host_set lock, or some other form of 1404 * serialization. 1405 */ 1406 1407void ata_port_probe(struct ata_port *ap) 1408{ 1409 ap->flags &= ~ATA_FLAG_PORT_DISABLED; 1410} 1411 1412/** 1413 * __sata_phy_reset - Wake/reset a low-level SATA PHY 1414 * @ap: SATA port associated with target SATA PHY. 1415 * 1416 * This function issues commands to standard SATA Sxxx 1417 * PHY registers, to wake up the phy (and device), and 1418 * clear any reset condition. 1419 * 1420 * LOCKING: 1421 * PCI/etc. bus probe sem. 1422 * 1423 */ 1424void __sata_phy_reset(struct ata_port *ap) 1425{ 1426 u32 sstatus; 1427 unsigned long timeout = jiffies + (HZ * 5); 1428 1429 if (ap->flags & ATA_FLAG_SATA_RESET) { 1430 /* issue phy wake/reset */ 1431 scr_write_flush(ap, SCR_CONTROL, 0x301); 1432 /* Couldn't find anything in SATA I/II specs, but 1433 * AHCI-1.1 10.4.2 says at least 1 ms. */ 1434 mdelay(1); 1435 } 1436 scr_write_flush(ap, SCR_CONTROL, 0x300); /* phy wake/clear reset */ 1437 1438 /* wait for phy to become ready, if necessary */ 1439 do { 1440 msleep(200); 1441 sstatus = scr_read(ap, SCR_STATUS); 1442 if ((sstatus & 0xf) != 1) 1443 break; 1444 } while (time_before(jiffies, timeout)); 1445 1446 /* TODO: phy layer with polling, timeouts, etc. */ 1447 if (sata_dev_present(ap)) 1448 ata_port_probe(ap); 1449 else { 1450 sstatus = scr_read(ap, SCR_STATUS); 1451 printk(KERN_INFO "ata%u: no device found (phy stat %08x)\n", 1452 ap->id, sstatus); 1453 ata_port_disable(ap); 1454 } 1455 1456 if (ap->flags & ATA_FLAG_PORT_DISABLED) 1457 return; 1458 1459 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) { 1460 ata_port_disable(ap); 1461 return; 1462 } 1463 1464 ap->cbl = ATA_CBL_SATA; 1465} 1466 1467/** 1468 * sata_phy_reset - Reset SATA bus. 1469 * @ap: SATA port associated with target SATA PHY. 1470 * 1471 * This function resets the SATA bus, and then probes 1472 * the bus for devices. 1473 * 1474 * LOCKING: 1475 * PCI/etc. bus probe sem. 1476 * 1477 */ 1478void sata_phy_reset(struct ata_port *ap) 1479{ 1480 __sata_phy_reset(ap); 1481 if (ap->flags & ATA_FLAG_PORT_DISABLED) 1482 return; 1483 ata_bus_reset(ap); 1484} 1485 1486/** 1487 * ata_port_disable - Disable port. 1488 * @ap: Port to be disabled. 1489 * 1490 * Modify @ap data structure such that the system 1491 * thinks that the entire port is disabled, and should 1492 * never attempt to probe or communicate with devices 1493 * on this port. 1494 * 1495 * LOCKING: host_set lock, or some other form of 1496 * serialization. 1497 */ 1498 1499void ata_port_disable(struct ata_port *ap) 1500{ 1501 ap->device[0].class = ATA_DEV_NONE; 1502 ap->device[1].class = ATA_DEV_NONE; 1503 ap->flags |= ATA_FLAG_PORT_DISABLED; 1504} 1505 1506/* 1507 * This mode timing computation functionality is ported over from 1508 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik 1509 */ 1510/* 1511 * PIO 0-5, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds). 1512 * These were taken from ATA/ATAPI-6 standard, rev 0a, except 1513 * for PIO 5, which is a nonstandard extension and UDMA6, which 1514 * is currently supported only by Maxtor drives. 1515 */ 1516 1517static const struct ata_timing ata_timing[] = { 1518 1519 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 }, 1520 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 }, 1521 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 }, 1522 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 }, 1523 1524 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 }, 1525 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 }, 1526 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 }, 1527 1528/* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */ 1529 1530 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 }, 1531 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 }, 1532 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 }, 1533 1534 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 }, 1535 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 }, 1536 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 }, 1537 1538/* { XFER_PIO_5, 20, 50, 30, 100, 50, 30, 100, 0 }, */ 1539 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 }, 1540 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 }, 1541 1542 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 }, 1543 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 }, 1544 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 }, 1545 1546/* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */ 1547 1548 { 0xFF } 1549}; 1550 1551#define ENOUGH(v,unit) (((v)-1)/(unit)+1) 1552#define EZ(v,unit) ((v)?ENOUGH(v,unit):0) 1553 1554static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT) 1555{ 1556 q->setup = EZ(t->setup * 1000, T); 1557 q->act8b = EZ(t->act8b * 1000, T); 1558 q->rec8b = EZ(t->rec8b * 1000, T); 1559 q->cyc8b = EZ(t->cyc8b * 1000, T); 1560 q->active = EZ(t->active * 1000, T); 1561 q->recover = EZ(t->recover * 1000, T); 1562 q->cycle = EZ(t->cycle * 1000, T); 1563 q->udma = EZ(t->udma * 1000, UT); 1564} 1565 1566void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b, 1567 struct ata_timing *m, unsigned int what) 1568{ 1569 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup); 1570 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b); 1571 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b); 1572 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b); 1573 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active); 1574 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover); 1575 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle); 1576 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma); 1577} 1578 1579static const struct ata_timing* ata_timing_find_mode(unsigned short speed) 1580{ 1581 const struct ata_timing *t; 1582 1583 for (t = ata_timing; t->mode != speed; t++) 1584 if (t->mode == 0xFF) 1585 return NULL; 1586 return t; 1587} 1588 1589int ata_timing_compute(struct ata_device *adev, unsigned short speed, 1590 struct ata_timing *t, int T, int UT) 1591{ 1592 const struct ata_timing *s; 1593 struct ata_timing p; 1594 1595 /* 1596 * Find the mode. 1597 */ 1598 1599 if (!(s = ata_timing_find_mode(speed))) 1600 return -EINVAL; 1601 1602 memcpy(t, s, sizeof(*s)); 1603 1604 /* 1605 * If the drive is an EIDE drive, it can tell us it needs extended 1606 * PIO/MW_DMA cycle timing. 1607 */ 1608 1609 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */ 1610 memset(&p, 0, sizeof(p)); 1611 if(speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) { 1612 if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO]; 1613 else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY]; 1614 } else if(speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) { 1615 p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN]; 1616 } 1617 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B); 1618 } 1619 1620 /* 1621 * Convert the timing to bus clock counts. 1622 */ 1623 1624 ata_timing_quantize(t, t, T, UT); 1625 1626 /* 1627 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY, S.M.A.R.T 1628 * and some other commands. We have to ensure that the DMA cycle timing is 1629 * slower/equal than the fastest PIO timing. 1630 */ 1631 1632 if (speed > XFER_PIO_4) { 1633 ata_timing_compute(adev, adev->pio_mode, &p, T, UT); 1634 ata_timing_merge(&p, t, t, ATA_TIMING_ALL); 1635 } 1636 1637 /* 1638 * Lenghten active & recovery time so that cycle time is correct. 1639 */ 1640 1641 if (t->act8b + t->rec8b < t->cyc8b) { 1642 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2; 1643 t->rec8b = t->cyc8b - t->act8b; 1644 } 1645 1646 if (t->active + t->recover < t->cycle) { 1647 t->active += (t->cycle - (t->active + t->recover)) / 2; 1648 t->recover = t->cycle - t->active; 1649 } 1650 1651 return 0; 1652} 1653 1654static const struct { 1655 unsigned int shift; 1656 u8 base; 1657} xfer_mode_classes[] = { 1658 { ATA_SHIFT_UDMA, XFER_UDMA_0 }, 1659 { ATA_SHIFT_MWDMA, XFER_MW_DMA_0 }, 1660 { ATA_SHIFT_PIO, XFER_PIO_0 }, 1661}; 1662 1663static inline u8 base_from_shift(unsigned int shift) 1664{ 1665 int i; 1666 1667 for (i = 0; i < ARRAY_SIZE(xfer_mode_classes); i++) 1668 if (xfer_mode_classes[i].shift == shift) 1669 return xfer_mode_classes[i].base; 1670 1671 return 0xff; 1672} 1673 1674static void ata_dev_set_mode(struct ata_port *ap, struct ata_device *dev) 1675{ 1676 int ofs, idx; 1677 u8 base; 1678 1679 if (!ata_dev_present(dev) || (ap->flags & ATA_FLAG_PORT_DISABLED)) 1680 return; 1681 1682 if (dev->xfer_shift == ATA_SHIFT_PIO) 1683 dev->flags |= ATA_DFLAG_PIO; 1684 1685 ata_dev_set_xfermode(ap, dev); 1686 1687 base = base_from_shift(dev->xfer_shift); 1688 ofs = dev->xfer_mode - base; 1689 idx = ofs + dev->xfer_shift; 1690 WARN_ON(idx >= ARRAY_SIZE(xfer_mode_str)); 1691 1692 DPRINTK("idx=%d xfer_shift=%u, xfer_mode=0x%x, base=0x%x, offset=%d\n", 1693 idx, dev->xfer_shift, (int)dev->xfer_mode, (int)base, ofs); 1694 1695 printk(KERN_INFO "ata%u: dev %u configured for %s\n", 1696 ap->id, dev->devno, xfer_mode_str[idx]); 1697} 1698 1699static int ata_host_set_pio(struct ata_port *ap) 1700{ 1701 unsigned int mask; 1702 int x, i; 1703 u8 base, xfer_mode; 1704 1705 mask = ata_get_mode_mask(ap, ATA_SHIFT_PIO); 1706 x = fgb(mask); 1707 if (x < 0) { 1708 printk(KERN_WARNING "ata%u: no PIO support\n", ap->id); 1709 return -1; 1710 } 1711 1712 base = base_from_shift(ATA_SHIFT_PIO); 1713 xfer_mode = base + x; 1714 1715 DPRINTK("base 0x%x xfer_mode 0x%x mask 0x%x x %d\n", 1716 (int)base, (int)xfer_mode, mask, x); 1717 1718 for (i = 0; i < ATA_MAX_DEVICES; i++) { 1719 struct ata_device *dev = &ap->device[i]; 1720 if (ata_dev_present(dev)) { 1721 dev->pio_mode = xfer_mode; 1722 dev->xfer_mode = xfer_mode; 1723 dev->xfer_shift = ATA_SHIFT_PIO; 1724 if (ap->ops->set_piomode) 1725 ap->ops->set_piomode(ap, dev); 1726 } 1727 } 1728 1729 return 0; 1730} 1731 1732static void ata_host_set_dma(struct ata_port *ap, u8 xfer_mode, 1733 unsigned int xfer_shift) 1734{ 1735 int i; 1736 1737 for (i = 0; i < ATA_MAX_DEVICES; i++) { 1738 struct ata_device *dev = &ap->device[i]; 1739 if (ata_dev_present(dev)) { 1740 dev->dma_mode = xfer_mode; 1741 dev->xfer_mode = xfer_mode; 1742 dev->xfer_shift = xfer_shift; 1743 if (ap->ops->set_dmamode) 1744 ap->ops->set_dmamode(ap, dev); 1745 } 1746 } 1747} 1748 1749/** 1750 * ata_set_mode - Program timings and issue SET FEATURES - XFER 1751 * @ap: port on which timings will be programmed 1752 * 1753 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). 1754 * 1755 * LOCKING: 1756 * PCI/etc. bus probe sem. 1757 * 1758 */ 1759static void ata_set_mode(struct ata_port *ap) 1760{ 1761 unsigned int xfer_shift; 1762 u8 xfer_mode; 1763 int rc; 1764 1765 /* step 1: always set host PIO timings */ 1766 rc = ata_host_set_pio(ap); 1767 if (rc) 1768 goto err_out; 1769 1770 /* step 2: choose the best data xfer mode */ 1771 xfer_mode = xfer_shift = 0; 1772 rc = ata_choose_xfer_mode(ap, &xfer_mode, &xfer_shift); 1773 if (rc) 1774 goto err_out; 1775 1776 /* step 3: if that xfer mode isn't PIO, set host DMA timings */ 1777 if (xfer_shift != ATA_SHIFT_PIO) 1778 ata_host_set_dma(ap, xfer_mode, xfer_shift); 1779 1780 /* step 4: update devices' xfer mode */ 1781 ata_dev_set_mode(ap, &ap->device[0]); 1782 ata_dev_set_mode(ap, &ap->device[1]); 1783 1784 if (ap->flags & ATA_FLAG_PORT_DISABLED) 1785 return; 1786 1787 if (ap->ops->post_set_mode) 1788 ap->ops->post_set_mode(ap); 1789 1790 return; 1791 1792err_out: 1793 ata_port_disable(ap); 1794} 1795 1796/** 1797 * ata_busy_sleep - sleep until BSY clears, or timeout 1798 * @ap: port containing status register to be polled 1799 * @tmout_pat: impatience timeout 1800 * @tmout: overall timeout 1801 * 1802 * Sleep until ATA Status register bit BSY clears, 1803 * or a timeout occurs. 1804 * 1805 * LOCKING: None. 1806 * 1807 */ 1808 1809static unsigned int ata_busy_sleep (struct ata_port *ap, 1810 unsigned long tmout_pat, 1811 unsigned long tmout) 1812{ 1813 unsigned long timer_start, timeout; 1814 u8 status; 1815 1816 status = ata_busy_wait(ap, ATA_BUSY, 300); 1817 timer_start = jiffies; 1818 timeout = timer_start + tmout_pat; 1819 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) { 1820 msleep(50); 1821 status = ata_busy_wait(ap, ATA_BUSY, 3); 1822 } 1823 1824 if (status & ATA_BUSY) 1825 printk(KERN_WARNING "ata%u is slow to respond, " 1826 "please be patient\n", ap->id); 1827 1828 timeout = timer_start + tmout; 1829 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) { 1830 msleep(50); 1831 status = ata_chk_status(ap); 1832 } 1833 1834 if (status & ATA_BUSY) { 1835 printk(KERN_ERR "ata%u failed to respond (%lu secs)\n", 1836 ap->id, tmout / HZ); 1837 return 1; 1838 } 1839 1840 return 0; 1841} 1842 1843static void ata_bus_post_reset(struct ata_port *ap, unsigned int devmask) 1844{ 1845 struct ata_ioports *ioaddr = &ap->ioaddr; 1846 unsigned int dev0 = devmask & (1 << 0); 1847 unsigned int dev1 = devmask & (1 << 1); 1848 unsigned long timeout; 1849 1850 /* if device 0 was found in ata_devchk, wait for its 1851 * BSY bit to clear 1852 */ 1853 if (dev0) 1854 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT); 1855 1856 /* if device 1 was found in ata_devchk, wait for 1857 * register access, then wait for BSY to clear 1858 */ 1859 timeout = jiffies + ATA_TMOUT_BOOT; 1860 while (dev1) { 1861 u8 nsect, lbal; 1862 1863 ap->ops->dev_select(ap, 1); 1864 if (ap->flags & ATA_FLAG_MMIO) { 1865 nsect = readb((void __iomem *) ioaddr->nsect_addr); 1866 lbal = readb((void __iomem *) ioaddr->lbal_addr); 1867 } else { 1868 nsect = inb(ioaddr->nsect_addr); 1869 lbal = inb(ioaddr->lbal_addr); 1870 } 1871 if ((nsect == 1) && (lbal == 1)) 1872 break; 1873 if (time_after(jiffies, timeout)) { 1874 dev1 = 0; 1875 break; 1876 } 1877 msleep(50); /* give drive a breather */ 1878 } 1879 if (dev1) 1880 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT); 1881 1882 /* is all this really necessary? */ 1883 ap->ops->dev_select(ap, 0); 1884 if (dev1) 1885 ap->ops->dev_select(ap, 1); 1886 if (dev0) 1887 ap->ops->dev_select(ap, 0); 1888} 1889 1890/** 1891 * ata_bus_edd - Issue EXECUTE DEVICE DIAGNOSTIC command. 1892 * @ap: Port to reset and probe 1893 * 1894 * Use the EXECUTE DEVICE DIAGNOSTIC command to reset and 1895 * probe the bus. Not often used these days. 1896 * 1897 * LOCKING: 1898 * PCI/etc. bus probe sem. 1899 * Obtains host_set lock. 1900 * 1901 */ 1902 1903static unsigned int ata_bus_edd(struct ata_port *ap) 1904{ 1905 struct ata_taskfile tf; 1906 unsigned long flags; 1907 1908 /* set up execute-device-diag (bus reset) taskfile */ 1909 /* also, take interrupts to a known state (disabled) */ 1910 DPRINTK("execute-device-diag\n"); 1911 ata_tf_init(ap, &tf, 0); 1912 tf.ctl |= ATA_NIEN; 1913 tf.command = ATA_CMD_EDD; 1914 tf.protocol = ATA_PROT_NODATA; 1915 1916 /* do bus reset */ 1917 spin_lock_irqsave(&ap->host_set->lock, flags); 1918 ata_tf_to_host(ap, &tf); 1919 spin_unlock_irqrestore(&ap->host_set->lock, flags); 1920 1921 /* spec says at least 2ms. but who knows with those 1922 * crazy ATAPI devices... 1923 */ 1924 msleep(150); 1925 1926 return ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT); 1927} 1928 1929static unsigned int ata_bus_softreset(struct ata_port *ap, 1930 unsigned int devmask) 1931{ 1932 struct ata_ioports *ioaddr = &ap->ioaddr; 1933 1934 DPRINTK("ata%u: bus reset via SRST\n", ap->id); 1935 1936 /* software reset. causes dev0 to be selected */ 1937 if (ap->flags & ATA_FLAG_MMIO) { 1938 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr); 1939 udelay(20); /* FIXME: flush */ 1940 writeb(ap->ctl | ATA_SRST, (void __iomem *) ioaddr->ctl_addr); 1941 udelay(20); /* FIXME: flush */ 1942 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr); 1943 } else { 1944 outb(ap->ctl, ioaddr->ctl_addr); 1945 udelay(10); 1946 outb(ap->ctl | ATA_SRST, ioaddr->ctl_addr); 1947 udelay(10); 1948 outb(ap->ctl, ioaddr->ctl_addr); 1949 } 1950 1951 /* spec mandates ">= 2ms" before checking status. 1952 * We wait 150ms, because that was the magic delay used for 1953 * ATAPI devices in Hale Landis's ATADRVR, for the period of time 1954 * between when the ATA command register is written, and then 1955 * status is checked. Because waiting for "a while" before 1956 * checking status is fine, post SRST, we perform this magic 1957 * delay here as well. 1958 */ 1959 msleep(150); 1960 1961 ata_bus_post_reset(ap, devmask); 1962 1963 return 0; 1964} 1965 1966/** 1967 * ata_bus_reset - reset host port and associated ATA channel 1968 * @ap: port to reset 1969 * 1970 * This is typically the first time we actually start issuing 1971 * commands to the ATA channel. We wait for BSY to clear, then 1972 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its 1973 * result. Determine what devices, if any, are on the channel 1974 * by looking at the device 0/1 error register. Look at the signature 1975 * stored in each device's taskfile registers, to determine if 1976 * the device is ATA or ATAPI. 1977 * 1978 * LOCKING: 1979 * PCI/etc. bus probe sem. 1980 * Obtains host_set lock. 1981 * 1982 * SIDE EFFECTS: 1983 * Sets ATA_FLAG_PORT_DISABLED if bus reset fails. 1984 */ 1985 1986void ata_bus_reset(struct ata_port *ap) 1987{ 1988 struct ata_ioports *ioaddr = &ap->ioaddr; 1989 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS; 1990 u8 err; 1991 unsigned int dev0, dev1 = 0, rc = 0, devmask = 0; 1992 1993 DPRINTK("ENTER, host %u, port %u\n", ap->id, ap->port_no); 1994 1995 /* determine if device 0/1 are present */ 1996 if (ap->flags & ATA_FLAG_SATA_RESET) 1997 dev0 = 1; 1998 else { 1999 dev0 = ata_devchk(ap, 0); 2000 if (slave_possible) 2001 dev1 = ata_devchk(ap, 1); 2002 } 2003 2004 if (dev0) 2005 devmask |= (1 << 0); 2006 if (dev1) 2007 devmask |= (1 << 1); 2008 2009 /* select device 0 again */ 2010 ap->ops->dev_select(ap, 0); 2011 2012 /* issue bus reset */ 2013 if (ap->flags & ATA_FLAG_SRST) 2014 rc = ata_bus_softreset(ap, devmask); 2015 else if ((ap->flags & ATA_FLAG_SATA_RESET) == 0) { 2016 /* set up device control */ 2017 if (ap->flags & ATA_FLAG_MMIO) 2018 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr); 2019 else 2020 outb(ap->ctl, ioaddr->ctl_addr); 2021 rc = ata_bus_edd(ap); 2022 } 2023 2024 if (rc) 2025 goto err_out; 2026 2027 /* 2028 * determine by signature whether we have ATA or ATAPI devices 2029 */ 2030 err = ata_dev_try_classify(ap, 0); 2031 if ((slave_possible) && (err != 0x81)) 2032 ata_dev_try_classify(ap, 1); 2033 2034 /* re-enable interrupts */ 2035 if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */ 2036 ata_irq_on(ap); 2037 2038 /* is double-select really necessary? */ 2039 if (ap->device[1].class != ATA_DEV_NONE) 2040 ap->ops->dev_select(ap, 1); 2041 if (ap->device[0].class != ATA_DEV_NONE) 2042 ap->ops->dev_select(ap, 0); 2043 2044 /* if no devices were detected, disable this port */ 2045 if ((ap->device[0].class == ATA_DEV_NONE) && 2046 (ap->device[1].class == ATA_DEV_NONE)) 2047 goto err_out; 2048 2049 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) { 2050 /* set up device control for ATA_FLAG_SATA_RESET */ 2051 if (ap->flags & ATA_FLAG_MMIO) 2052 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr); 2053 else 2054 outb(ap->ctl, ioaddr->ctl_addr); 2055 } 2056 2057 DPRINTK("EXIT\n"); 2058 return; 2059 2060err_out: 2061 printk(KERN_ERR "ata%u: disabling port\n", ap->id); 2062 ap->ops->port_disable(ap); 2063 2064 DPRINTK("EXIT\n"); 2065} 2066 2067static void ata_pr_blacklisted(const struct ata_port *ap, 2068 const struct ata_device *dev) 2069{ 2070 printk(KERN_WARNING "ata%u: dev %u is on DMA blacklist, disabling DMA\n", 2071 ap->id, dev->devno); 2072} 2073 2074static const char * ata_dma_blacklist [] = { 2075 "WDC AC11000H", 2076 "WDC AC22100H", 2077 "WDC AC32500H", 2078 "WDC AC33100H", 2079 "WDC AC31600H", 2080 "WDC AC32100H", 2081 "WDC AC23200L", 2082 "Compaq CRD-8241B", 2083 "CRD-8400B", 2084 "CRD-8480B", 2085 "CRD-8482B", 2086 "CRD-84", 2087 "SanDisk SDP3B", 2088 "SanDisk SDP3B-64", 2089 "SANYO CD-ROM CRD", 2090 "HITACHI CDR-8", 2091 "HITACHI CDR-8335", 2092 "HITACHI CDR-8435", 2093 "Toshiba CD-ROM XM-6202B", 2094 "TOSHIBA CD-ROM XM-1702BC", 2095 "CD-532E-A", 2096 "E-IDE CD-ROM CR-840", 2097 "CD-ROM Drive/F5A", 2098 "WPI CDD-820", 2099 "SAMSUNG CD-ROM SC-148C", 2100 "SAMSUNG CD-ROM SC", 2101 "SanDisk SDP3B-64", 2102 "ATAPI CD-ROM DRIVE 40X MAXIMUM", 2103 "_NEC DV5800A", 2104}; 2105 2106static int ata_dma_blacklisted(const struct ata_device *dev) 2107{ 2108 unsigned char model_num[40]; 2109 char *s; 2110 unsigned int len; 2111 int i; 2112 2113 ata_dev_id_string(dev->id, model_num, ATA_ID_PROD_OFS, 2114 sizeof(model_num)); 2115 s = &model_num[0]; 2116 len = strnlen(s, sizeof(model_num)); 2117 2118 /* ATAPI specifies that empty space is blank-filled; remove blanks */ 2119 while ((len > 0) && (s[len - 1] == ' ')) { 2120 len--; 2121 s[len] = 0; 2122 } 2123 2124 for (i = 0; i < ARRAY_SIZE(ata_dma_blacklist); i++) 2125 if (!strncmp(ata_dma_blacklist[i], s, len)) 2126 return 1; 2127 2128 return 0; 2129} 2130 2131static unsigned int ata_get_mode_mask(const struct ata_port *ap, int shift) 2132{ 2133 const struct ata_device *master, *slave; 2134 unsigned int mask; 2135 2136 master = &ap->device[0]; 2137 slave = &ap->device[1]; 2138 2139 assert (ata_dev_present(master) || ata_dev_present(slave)); 2140 2141 if (shift == ATA_SHIFT_UDMA) { 2142 mask = ap->udma_mask; 2143 if (ata_dev_present(master)) { 2144 mask &= (master->id[ATA_ID_UDMA_MODES] & 0xff); 2145 if (ata_dma_blacklisted(master)) { 2146 mask = 0; 2147 ata_pr_blacklisted(ap, master); 2148 } 2149 } 2150 if (ata_dev_present(slave)) { 2151 mask &= (slave->id[ATA_ID_UDMA_MODES] & 0xff); 2152 if (ata_dma_blacklisted(slave)) { 2153 mask = 0; 2154 ata_pr_blacklisted(ap, slave); 2155 } 2156 } 2157 } 2158 else if (shift == ATA_SHIFT_MWDMA) { 2159 mask = ap->mwdma_mask; 2160 if (ata_dev_present(master)) { 2161 mask &= (master->id[ATA_ID_MWDMA_MODES] & 0x07); 2162 if (ata_dma_blacklisted(master)) { 2163 mask = 0; 2164 ata_pr_blacklisted(ap, master); 2165 } 2166 } 2167 if (ata_dev_present(slave)) { 2168 mask &= (slave->id[ATA_ID_MWDMA_MODES] & 0x07); 2169 if (ata_dma_blacklisted(slave)) { 2170 mask = 0; 2171 ata_pr_blacklisted(ap, slave); 2172 } 2173 } 2174 } 2175 else if (shift == ATA_SHIFT_PIO) { 2176 mask = ap->pio_mask; 2177 if (ata_dev_present(master)) { 2178 /* spec doesn't return explicit support for 2179 * PIO0-2, so we fake it 2180 */ 2181 u16 tmp_mode = master->id[ATA_ID_PIO_MODES] & 0x03; 2182 tmp_mode <<= 3; 2183 tmp_mode |= 0x7; 2184 mask &= tmp_mode; 2185 } 2186 if (ata_dev_present(slave)) { 2187 /* spec doesn't return explicit support for 2188 * PIO0-2, so we fake it 2189 */ 2190 u16 tmp_mode = slave->id[ATA_ID_PIO_MODES] & 0x03; 2191 tmp_mode <<= 3; 2192 tmp_mode |= 0x7; 2193 mask &= tmp_mode; 2194 } 2195 } 2196 else { 2197 mask = 0xffffffff; /* shut up compiler warning */ 2198 BUG(); 2199 } 2200 2201 return mask; 2202} 2203 2204/* find greatest bit */ 2205static int fgb(u32 bitmap) 2206{ 2207 unsigned int i; 2208 int x = -1; 2209 2210 for (i = 0; i < 32; i++) 2211 if (bitmap & (1 << i)) 2212 x = i; 2213 2214 return x; 2215} 2216 2217/** 2218 * ata_choose_xfer_mode - attempt to find best transfer mode 2219 * @ap: Port for which an xfer mode will be selected 2220 * @xfer_mode_out: (output) SET FEATURES - XFER MODE code 2221 * @xfer_shift_out: (output) bit shift that selects this mode 2222 * 2223 * Based on host and device capabilities, determine the 2224 * maximum transfer mode that is amenable to all. 2225 * 2226 * LOCKING: 2227 * PCI/etc. bus probe sem. 2228 * 2229 * RETURNS: 2230 * Zero on success, negative on error. 2231 */ 2232 2233static int ata_choose_xfer_mode(const struct ata_port *ap, 2234 u8 *xfer_mode_out, 2235 unsigned int *xfer_shift_out) 2236{ 2237 unsigned int mask, shift; 2238 int x, i; 2239 2240 for (i = 0; i < ARRAY_SIZE(xfer_mode_classes); i++) { 2241 shift = xfer_mode_classes[i].shift; 2242 mask = ata_get_mode_mask(ap, shift); 2243 2244 x = fgb(mask); 2245 if (x >= 0) { 2246 *xfer_mode_out = xfer_mode_classes[i].base + x; 2247 *xfer_shift_out = shift; 2248 return 0; 2249 } 2250 } 2251 2252 return -1; 2253} 2254 2255/** 2256 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command 2257 * @ap: Port associated with device @dev 2258 * @dev: Device to which command will be sent 2259 * 2260 * Issue SET FEATURES - XFER MODE command to device @dev 2261 * on port @ap. 2262 * 2263 * LOCKING: 2264 * PCI/etc. bus probe sem. 2265 */ 2266 2267static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev) 2268{ 2269 DECLARE_COMPLETION(wait); 2270 struct ata_queued_cmd *qc; 2271 int rc; 2272 unsigned long flags; 2273 2274 /* set up set-features taskfile */ 2275 DPRINTK("set features - xfer mode\n"); 2276 2277 qc = ata_qc_new_init(ap, dev); 2278 BUG_ON(qc == NULL); 2279 2280 qc->tf.command = ATA_CMD_SET_FEATURES; 2281 qc->tf.feature = SETFEATURES_XFER; 2282 qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 2283 qc->tf.protocol = ATA_PROT_NODATA; 2284 qc->tf.nsect = dev->xfer_mode; 2285 2286 qc->waiting = &wait; 2287 qc->complete_fn = ata_qc_complete_noop; 2288 2289 spin_lock_irqsave(&ap->host_set->lock, flags); 2290 rc = ata_qc_issue(qc); 2291 spin_unlock_irqrestore(&ap->host_set->lock, flags); 2292 2293 if (rc) 2294 ata_port_disable(ap); 2295 else 2296 ata_qc_wait_err(qc, &wait); 2297 2298 DPRINTK("EXIT\n"); 2299} 2300 2301/** 2302 * ata_dev_reread_id - Reread the device identify device info 2303 * @ap: port where the device is 2304 * @dev: device to reread the identify device info 2305 * 2306 * LOCKING: 2307 */ 2308 2309static void ata_dev_reread_id(struct ata_port *ap, struct ata_device *dev) 2310{ 2311 DECLARE_COMPLETION(wait); 2312 struct ata_queued_cmd *qc; 2313 unsigned long flags; 2314 int rc; 2315 2316 qc = ata_qc_new_init(ap, dev); 2317 BUG_ON(qc == NULL); 2318 2319 ata_sg_init_one(qc, dev->id, sizeof(dev->id)); 2320 qc->dma_dir = DMA_FROM_DEVICE; 2321 2322 if (dev->class == ATA_DEV_ATA) { 2323 qc->tf.command = ATA_CMD_ID_ATA; 2324 DPRINTK("do ATA identify\n"); 2325 } else { 2326 qc->tf.command = ATA_CMD_ID_ATAPI; 2327 DPRINTK("do ATAPI identify\n"); 2328 } 2329 2330 qc->tf.flags |= ATA_TFLAG_DEVICE; 2331 qc->tf.protocol = ATA_PROT_PIO; 2332 qc->nsect = 1; 2333 2334 qc->waiting = &wait; 2335 qc->complete_fn = ata_qc_complete_noop; 2336 2337 spin_lock_irqsave(&ap->host_set->lock, flags); 2338 rc = ata_qc_issue(qc); 2339 spin_unlock_irqrestore(&ap->host_set->lock, flags); 2340 2341 if (rc) 2342 goto err_out; 2343 2344 ata_qc_wait_err(qc, &wait); 2345 2346 swap_buf_le16(dev->id, ATA_ID_WORDS); 2347 2348 ata_dump_id(dev); 2349 2350 DPRINTK("EXIT\n"); 2351 2352 return; 2353err_out: 2354 ata_port_disable(ap); 2355} 2356 2357/** 2358 * ata_dev_init_params - Issue INIT DEV PARAMS command 2359 * @ap: Port associated with device @dev 2360 * @dev: Device to which command will be sent 2361 * 2362 * LOCKING: 2363 */ 2364 2365static void ata_dev_init_params(struct ata_port *ap, struct ata_device *dev) 2366{ 2367 DECLARE_COMPLETION(wait); 2368 struct ata_queued_cmd *qc; 2369 int rc; 2370 unsigned long flags; 2371 u16 sectors = dev->id[6]; 2372 u16 heads = dev->id[3]; 2373 2374 /* Number of sectors per track 1-255. Number of heads 1-16 */ 2375 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16) 2376 return; 2377 2378 /* set up init dev params taskfile */ 2379 DPRINTK("init dev params \n"); 2380 2381 qc = ata_qc_new_init(ap, dev); 2382 BUG_ON(qc == NULL); 2383 2384 qc->tf.command = ATA_CMD_INIT_DEV_PARAMS; 2385 qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 2386 qc->tf.protocol = ATA_PROT_NODATA; 2387 qc->tf.nsect = sectors; 2388 qc->tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */ 2389 2390 qc->waiting = &wait; 2391 qc->complete_fn = ata_qc_complete_noop; 2392 2393 spin_lock_irqsave(&ap->host_set->lock, flags); 2394 rc = ata_qc_issue(qc); 2395 spin_unlock_irqrestore(&ap->host_set->lock, flags); 2396 2397 if (rc) 2398 ata_port_disable(ap); 2399 else 2400 ata_qc_wait_err(qc, &wait); 2401 2402 DPRINTK("EXIT\n"); 2403} 2404 2405/** 2406 * ata_sg_clean - Unmap DMA memory associated with command 2407 * @qc: Command containing DMA memory to be released 2408 * 2409 * Unmap all mapped DMA memory associated with this command. 2410 * 2411 * LOCKING: 2412 * spin_lock_irqsave(host_set lock) 2413 */ 2414 2415static void ata_sg_clean(struct ata_queued_cmd *qc) 2416{ 2417 struct ata_port *ap = qc->ap; 2418 struct scatterlist *sg = qc->__sg; 2419 int dir = qc->dma_dir; 2420 void *pad_buf = NULL; 2421 2422 assert(qc->flags & ATA_QCFLAG_DMAMAP); 2423 assert(sg != NULL); 2424 2425 if (qc->flags & ATA_QCFLAG_SINGLE) 2426 assert(qc->n_elem == 1); 2427 2428 VPRINTK("unmapping %u sg elements\n", qc->n_elem); 2429 2430 /* if we padded the buffer out to 32-bit bound, and data 2431 * xfer direction is from-device, we must copy from the 2432 * pad buffer back into the supplied buffer 2433 */ 2434 if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE)) 2435 pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ); 2436 2437 if (qc->flags & ATA_QCFLAG_SG) { 2438 if (qc->n_elem) 2439 dma_unmap_sg(ap->host_set->dev, sg, qc->n_elem, dir); 2440 /* restore last sg */ 2441 sg[qc->orig_n_elem - 1].length += qc->pad_len; 2442 if (pad_buf) { 2443 struct scatterlist *psg = &qc->pad_sgent; 2444 void *addr = kmap_atomic(psg->page, KM_IRQ0); 2445 memcpy(addr + psg->offset, pad_buf, qc->pad_len); 2446 kunmap_atomic(addr, KM_IRQ0); 2447 } 2448 } else { 2449 if (sg_dma_len(&sg[0]) > 0) 2450 dma_unmap_single(ap->host_set->dev, 2451 sg_dma_address(&sg[0]), sg_dma_len(&sg[0]), 2452 dir); 2453 /* restore sg */ 2454 sg->length += qc->pad_len; 2455 if (pad_buf) 2456 memcpy(qc->buf_virt + sg->length - qc->pad_len, 2457 pad_buf, qc->pad_len); 2458 } 2459 2460 qc->flags &= ~ATA_QCFLAG_DMAMAP; 2461 qc->__sg = NULL; 2462} 2463 2464/** 2465 * ata_fill_sg - Fill PCI IDE PRD table 2466 * @qc: Metadata associated with taskfile to be transferred 2467 * 2468 * Fill PCI IDE PRD (scatter-gather) table with segments 2469 * associated with the current disk command. 2470 * 2471 * LOCKING: 2472 * spin_lock_irqsave(host_set lock) 2473 * 2474 */ 2475static void ata_fill_sg(struct ata_queued_cmd *qc) 2476{ 2477 struct ata_port *ap = qc->ap; 2478 struct scatterlist *sg; 2479 unsigned int idx; 2480 2481 assert(qc->__sg != NULL); 2482 assert(qc->n_elem > 0); 2483 2484 idx = 0; 2485 ata_for_each_sg(sg, qc) { 2486 u32 addr, offset; 2487 u32 sg_len, len; 2488 2489 /* determine if physical DMA addr spans 64K boundary. 2490 * Note h/w doesn't support 64-bit, so we unconditionally 2491 * truncate dma_addr_t to u32. 2492 */ 2493 addr = (u32) sg_dma_address(sg); 2494 sg_len = sg_dma_len(sg); 2495 2496 while (sg_len) { 2497 offset = addr & 0xffff; 2498 len = sg_len; 2499 if ((offset + sg_len) > 0x10000) 2500 len = 0x10000 - offset; 2501 2502 ap->prd[idx].addr = cpu_to_le32(addr); 2503 ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff); 2504 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len); 2505 2506 idx++; 2507 sg_len -= len; 2508 addr += len; 2509 } 2510 } 2511 2512 if (idx) 2513 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT); 2514} 2515/** 2516 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported 2517 * @qc: Metadata associated with taskfile to check 2518 * 2519 * Allow low-level driver to filter ATA PACKET commands, returning 2520 * a status indicating whether or not it is OK to use DMA for the 2521 * supplied PACKET command. 2522 * 2523 * LOCKING: 2524 * spin_lock_irqsave(host_set lock) 2525 * 2526 * RETURNS: 0 when ATAPI DMA can be used 2527 * nonzero otherwise 2528 */ 2529int ata_check_atapi_dma(struct ata_queued_cmd *qc) 2530{ 2531 struct ata_port *ap = qc->ap; 2532 int rc = 0; /* Assume ATAPI DMA is OK by default */ 2533 2534 if (ap->ops->check_atapi_dma) 2535 rc = ap->ops->check_atapi_dma(qc); 2536 2537 return rc; 2538} 2539/** 2540 * ata_qc_prep - Prepare taskfile for submission 2541 * @qc: Metadata associated with taskfile to be prepared 2542 * 2543 * Prepare ATA taskfile for submission. 2544 * 2545 * LOCKING: 2546 * spin_lock_irqsave(host_set lock) 2547 */ 2548void ata_qc_prep(struct ata_queued_cmd *qc) 2549{ 2550 if (!(qc->flags & ATA_QCFLAG_DMAMAP)) 2551 return; 2552 2553 ata_fill_sg(qc); 2554} 2555 2556/** 2557 * ata_sg_init_one - Associate command with memory buffer 2558 * @qc: Command to be associated 2559 * @buf: Memory buffer 2560 * @buflen: Length of memory buffer, in bytes. 2561 * 2562 * Initialize the data-related elements of queued_cmd @qc 2563 * to point to a single memory buffer, @buf of byte length @buflen. 2564 * 2565 * LOCKING: 2566 * spin_lock_irqsave(host_set lock) 2567 */ 2568 2569void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen) 2570{ 2571 struct scatterlist *sg; 2572 2573 qc->flags |= ATA_QCFLAG_SINGLE; 2574 2575 memset(&qc->sgent, 0, sizeof(qc->sgent)); 2576 qc->__sg = &qc->sgent; 2577 qc->n_elem = 1; 2578 qc->orig_n_elem = 1; 2579 qc->buf_virt = buf; 2580 2581 sg = qc->__sg; 2582 sg_init_one(sg, buf, buflen); 2583} 2584 2585/** 2586 * ata_sg_init - Associate command with scatter-gather table. 2587 * @qc: Command to be associated 2588 * @sg: Scatter-gather table. 2589 * @n_elem: Number of elements in s/g table. 2590 * 2591 * Initialize the data-related elements of queued_cmd @qc 2592 * to point to a scatter-gather table @sg, containing @n_elem 2593 * elements. 2594 * 2595 * LOCKING: 2596 * spin_lock_irqsave(host_set lock) 2597 */ 2598 2599void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg, 2600 unsigned int n_elem) 2601{ 2602 qc->flags |= ATA_QCFLAG_SG; 2603 qc->__sg = sg; 2604 qc->n_elem = n_elem; 2605 qc->orig_n_elem = n_elem; 2606} 2607 2608/** 2609 * ata_sg_setup_one - DMA-map the memory buffer associated with a command. 2610 * @qc: Command with memory buffer to be mapped. 2611 * 2612 * DMA-map the memory buffer associated with queued_cmd @qc. 2613 * 2614 * LOCKING: 2615 * spin_lock_irqsave(host_set lock) 2616 * 2617 * RETURNS: 2618 * Zero on success, negative on error. 2619 */ 2620 2621static int ata_sg_setup_one(struct ata_queued_cmd *qc) 2622{ 2623 struct ata_port *ap = qc->ap; 2624 int dir = qc->dma_dir; 2625 struct scatterlist *sg = qc->__sg; 2626 dma_addr_t dma_address; 2627 2628 /* we must lengthen transfers to end on a 32-bit boundary */ 2629 qc->pad_len = sg->length & 3; 2630 if (qc->pad_len) { 2631 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ); 2632 struct scatterlist *psg = &qc->pad_sgent; 2633 2634 assert(qc->dev->class == ATA_DEV_ATAPI); 2635 2636 memset(pad_buf, 0, ATA_DMA_PAD_SZ); 2637 2638 if (qc->tf.flags & ATA_TFLAG_WRITE) 2639 memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len, 2640 qc->pad_len); 2641 2642 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ); 2643 sg_dma_len(psg) = ATA_DMA_PAD_SZ; 2644 /* trim sg */ 2645 sg->length -= qc->pad_len; 2646 2647 DPRINTK("padding done, sg->length=%u pad_len=%u\n", 2648 sg->length, qc->pad_len); 2649 } 2650 2651 if (!sg->length) { 2652 sg_dma_address(sg) = 0; 2653 goto skip_map; 2654 } 2655 2656 dma_address = dma_map_single(ap->host_set->dev, qc->buf_virt, 2657 sg->length, dir); 2658 if (dma_mapping_error(dma_address)) { 2659 /* restore sg */ 2660 sg->length += qc->pad_len; 2661 return -1; 2662 } 2663 2664 sg_dma_address(sg) = dma_address; 2665skip_map: 2666 sg_dma_len(sg) = sg->length; 2667 2668 DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg), 2669 qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read"); 2670 2671 return 0; 2672} 2673 2674/** 2675 * ata_sg_setup - DMA-map the scatter-gather table associated with a command. 2676 * @qc: Command with scatter-gather table to be mapped. 2677 * 2678 * DMA-map the scatter-gather table associated with queued_cmd @qc. 2679 * 2680 * LOCKING: 2681 * spin_lock_irqsave(host_set lock) 2682 * 2683 * RETURNS: 2684 * Zero on success, negative on error. 2685 * 2686 */ 2687 2688static int ata_sg_setup(struct ata_queued_cmd *qc) 2689{ 2690 struct ata_port *ap = qc->ap; 2691 struct scatterlist *sg = qc->__sg; 2692 struct scatterlist *lsg = &sg[qc->n_elem - 1]; 2693 int n_elem, pre_n_elem, dir, trim_sg = 0; 2694 2695 VPRINTK("ENTER, ata%u\n", ap->id); 2696 assert(qc->flags & ATA_QCFLAG_SG); 2697 2698 /* we must lengthen transfers to end on a 32-bit boundary */ 2699 qc->pad_len = lsg->length & 3; 2700 if (qc->pad_len) { 2701 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ); 2702 struct scatterlist *psg = &qc->pad_sgent; 2703 unsigned int offset; 2704 2705 assert(qc->dev->class == ATA_DEV_ATAPI); 2706 2707 memset(pad_buf, 0, ATA_DMA_PAD_SZ); 2708 2709 /* 2710 * psg->page/offset are used to copy to-be-written 2711 * data in this function or read data in ata_sg_clean. 2712 */ 2713 offset = lsg->offset + lsg->length - qc->pad_len; 2714 psg->page = nth_page(lsg->page, offset >> PAGE_SHIFT); 2715 psg->offset = offset_in_page(offset); 2716 2717 if (qc->tf.flags & ATA_TFLAG_WRITE) { 2718 void *addr = kmap_atomic(psg->page, KM_IRQ0); 2719 memcpy(pad_buf, addr + psg->offset, qc->pad_len); 2720 kunmap_atomic(addr, KM_IRQ0); 2721 } 2722 2723 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ); 2724 sg_dma_len(psg) = ATA_DMA_PAD_SZ; 2725 /* trim last sg */ 2726 lsg->length -= qc->pad_len; 2727 if (lsg->length == 0) 2728 trim_sg = 1; 2729 2730 DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n", 2731 qc->n_elem - 1, lsg->length, qc->pad_len); 2732 } 2733 2734 pre_n_elem = qc->n_elem; 2735 if (trim_sg && pre_n_elem) 2736 pre_n_elem--; 2737 2738 if (!pre_n_elem) { 2739 n_elem = 0; 2740 goto skip_map; 2741 } 2742 2743 dir = qc->dma_dir; 2744 n_elem = dma_map_sg(ap->host_set->dev, sg, pre_n_elem, dir); 2745 if (n_elem < 1) { 2746 /* restore last sg */ 2747 lsg->length += qc->pad_len; 2748 return -1; 2749 } 2750 2751 DPRINTK("%d sg elements mapped\n", n_elem); 2752 2753skip_map: 2754 qc->n_elem = n_elem; 2755 2756 return 0; 2757} 2758 2759/** 2760 * ata_poll_qc_complete - turn irq back on and finish qc 2761 * @qc: Command to complete 2762 * @err_mask: ATA status register content 2763 * 2764 * LOCKING: 2765 * None. (grabs host lock) 2766 */ 2767 2768void ata_poll_qc_complete(struct ata_queued_cmd *qc, unsigned int err_mask) 2769{ 2770 struct ata_port *ap = qc->ap; 2771 unsigned long flags; 2772 2773 spin_lock_irqsave(&ap->host_set->lock, flags); 2774 ap->flags &= ~ATA_FLAG_NOINTR; 2775 ata_irq_on(ap); 2776 ata_qc_complete(qc, err_mask); 2777 spin_unlock_irqrestore(&ap->host_set->lock, flags); 2778} 2779 2780/** 2781 * ata_pio_poll - 2782 * @ap: the target ata_port 2783 * 2784 * LOCKING: 2785 * None. (executing in kernel thread context) 2786 * 2787 * RETURNS: 2788 * timeout value to use 2789 */ 2790 2791static unsigned long ata_pio_poll(struct ata_port *ap) 2792{ 2793 u8 status; 2794 unsigned int poll_state = HSM_ST_UNKNOWN; 2795 unsigned int reg_state = HSM_ST_UNKNOWN; 2796 2797 switch (ap->hsm_task_state) { 2798 case HSM_ST: 2799 case HSM_ST_POLL: 2800 poll_state = HSM_ST_POLL; 2801 reg_state = HSM_ST; 2802 break; 2803 case HSM_ST_LAST: 2804 case HSM_ST_LAST_POLL: 2805 poll_state = HSM_ST_LAST_POLL; 2806 reg_state = HSM_ST_LAST; 2807 break; 2808 default: 2809 BUG(); 2810 break; 2811 } 2812 2813 status = ata_chk_status(ap); 2814 if (status & ATA_BUSY) { 2815 if (time_after(jiffies, ap->pio_task_timeout)) { 2816 ap->hsm_task_state = HSM_ST_TMOUT; 2817 return 0; 2818 } 2819 ap->hsm_task_state = poll_state; 2820 return ATA_SHORT_PAUSE; 2821 } 2822 2823 ap->hsm_task_state = reg_state; 2824 return 0; 2825} 2826 2827/** 2828 * ata_pio_complete - check if drive is busy or idle 2829 * @ap: the target ata_port 2830 * 2831 * LOCKING: 2832 * None. (executing in kernel thread context) 2833 * 2834 * RETURNS: 2835 * Non-zero if qc completed, zero otherwise. 2836 */ 2837 2838static int ata_pio_complete (struct ata_port *ap) 2839{ 2840 struct ata_queued_cmd *qc; 2841 u8 drv_stat; 2842 2843 /* 2844 * This is purely heuristic. This is a fast path. Sometimes when 2845 * we enter, BSY will be cleared in a chk-status or two. If not, 2846 * the drive is probably seeking or something. Snooze for a couple 2847 * msecs, then chk-status again. If still busy, fall back to 2848 * HSM_ST_POLL state. 2849 */ 2850 drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 10); 2851 if (drv_stat & (ATA_BUSY | ATA_DRQ)) { 2852 msleep(2); 2853 drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 10); 2854 if (drv_stat & (ATA_BUSY | ATA_DRQ)) { 2855 ap->hsm_task_state = HSM_ST_LAST_POLL; 2856 ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO; 2857 return 0; 2858 } 2859 } 2860 2861 drv_stat = ata_wait_idle(ap); 2862 if (!ata_ok(drv_stat)) { 2863 ap->hsm_task_state = HSM_ST_ERR; 2864 return 0; 2865 } 2866 2867 qc = ata_qc_from_tag(ap, ap->active_tag); 2868 assert(qc != NULL); 2869 2870 ap->hsm_task_state = HSM_ST_IDLE; 2871 2872 ata_poll_qc_complete(qc, 0); 2873 2874 /* another command may start at this point */ 2875 2876 return 1; 2877} 2878 2879 2880/** 2881 * swap_buf_le16 - swap halves of 16-words in place 2882 * @buf: Buffer to swap 2883 * @buf_words: Number of 16-bit words in buffer. 2884 * 2885 * Swap halves of 16-bit words if needed to convert from 2886 * little-endian byte order to native cpu byte order, or 2887 * vice-versa. 2888 * 2889 * LOCKING: 2890 * Inherited from caller. 2891 */ 2892void swap_buf_le16(u16 *buf, unsigned int buf_words) 2893{ 2894#ifdef __BIG_ENDIAN 2895 unsigned int i; 2896 2897 for (i = 0; i < buf_words; i++) 2898 buf[i] = le16_to_cpu(buf[i]); 2899#endif /* __BIG_ENDIAN */ 2900} 2901 2902/** 2903 * ata_mmio_data_xfer - Transfer data by MMIO 2904 * @ap: port to read/write 2905 * @buf: data buffer 2906 * @buflen: buffer length 2907 * @write_data: read/write 2908 * 2909 * Transfer data from/to the device data register by MMIO. 2910 * 2911 * LOCKING: 2912 * Inherited from caller. 2913 */ 2914 2915static void ata_mmio_data_xfer(struct ata_port *ap, unsigned char *buf, 2916 unsigned int buflen, int write_data) 2917{ 2918 unsigned int i; 2919 unsigned int words = buflen >> 1; 2920 u16 *buf16 = (u16 *) buf; 2921 void __iomem *mmio = (void __iomem *)ap->ioaddr.data_addr; 2922 2923 /* Transfer multiple of 2 bytes */ 2924 if (write_data) { 2925 for (i = 0; i < words; i++) 2926 writew(le16_to_cpu(buf16[i]), mmio); 2927 } else { 2928 for (i = 0; i < words; i++) 2929 buf16[i] = cpu_to_le16(readw(mmio)); 2930 } 2931 2932 /* Transfer trailing 1 byte, if any. */ 2933 if (unlikely(buflen & 0x01)) { 2934 u16 align_buf[1] = { 0 }; 2935 unsigned char *trailing_buf = buf + buflen - 1; 2936 2937 if (write_data) { 2938 memcpy(align_buf, trailing_buf, 1); 2939 writew(le16_to_cpu(align_buf[0]), mmio); 2940 } else { 2941 align_buf[0] = cpu_to_le16(readw(mmio)); 2942 memcpy(trailing_buf, align_buf, 1); 2943 } 2944 } 2945} 2946 2947/** 2948 * ata_pio_data_xfer - Transfer data by PIO 2949 * @ap: port to read/write 2950 * @buf: data buffer 2951 * @buflen: buffer length 2952 * @write_data: read/write 2953 * 2954 * Transfer data from/to the device data register by PIO. 2955 * 2956 * LOCKING: 2957 * Inherited from caller. 2958 */ 2959 2960static void ata_pio_data_xfer(struct ata_port *ap, unsigned char *buf, 2961 unsigned int buflen, int write_data) 2962{ 2963 unsigned int words = buflen >> 1; 2964 2965 /* Transfer multiple of 2 bytes */ 2966 if (write_data) 2967 outsw(ap->ioaddr.data_addr, buf, words); 2968 else 2969 insw(ap->ioaddr.data_addr, buf, words); 2970 2971 /* Transfer trailing 1 byte, if any. */ 2972 if (unlikely(buflen & 0x01)) { 2973 u16 align_buf[1] = { 0 }; 2974 unsigned char *trailing_buf = buf + buflen - 1; 2975 2976 if (write_data) { 2977 memcpy(align_buf, trailing_buf, 1); 2978 outw(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr); 2979 } else { 2980 align_buf[0] = cpu_to_le16(inw(ap->ioaddr.data_addr)); 2981 memcpy(trailing_buf, align_buf, 1); 2982 } 2983 } 2984} 2985 2986/** 2987 * ata_data_xfer - Transfer data from/to the data register. 2988 * @ap: port to read/write 2989 * @buf: data buffer 2990 * @buflen: buffer length 2991 * @do_write: read/write 2992 * 2993 * Transfer data from/to the device data register. 2994 * 2995 * LOCKING: 2996 * Inherited from caller. 2997 */ 2998 2999static void ata_data_xfer(struct ata_port *ap, unsigned char *buf, 3000 unsigned int buflen, int do_write) 3001{ 3002 if (ap->flags & ATA_FLAG_MMIO) 3003 ata_mmio_data_xfer(ap, buf, buflen, do_write); 3004 else 3005 ata_pio_data_xfer(ap, buf, buflen, do_write); 3006} 3007 3008/** 3009 * ata_pio_sector - Transfer ATA_SECT_SIZE (512 bytes) of data. 3010 * @qc: Command on going 3011 * 3012 * Transfer ATA_SECT_SIZE of data from/to the ATA device. 3013 * 3014 * LOCKING: 3015 * Inherited from caller. 3016 */ 3017 3018static void ata_pio_sector(struct ata_queued_cmd *qc) 3019{ 3020 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE); 3021 struct scatterlist *sg = qc->__sg; 3022 struct ata_port *ap = qc->ap; 3023 struct page *page; 3024 unsigned int offset; 3025 unsigned char *buf; 3026 3027 if (qc->cursect == (qc->nsect - 1)) 3028 ap->hsm_task_state = HSM_ST_LAST; 3029 3030 page = sg[qc->cursg].page; 3031 offset = sg[qc->cursg].offset + qc->cursg_ofs * ATA_SECT_SIZE; 3032 3033 /* get the current page and offset */ 3034 page = nth_page(page, (offset >> PAGE_SHIFT)); 3035 offset %= PAGE_SIZE; 3036 3037 buf = kmap(page) + offset; 3038 3039 qc->cursect++; 3040 qc->cursg_ofs++; 3041 3042 if ((qc->cursg_ofs * ATA_SECT_SIZE) == (&sg[qc->cursg])->length) { 3043 qc->cursg++; 3044 qc->cursg_ofs = 0; 3045 } 3046 3047 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read"); 3048 3049 /* do the actual data transfer */ 3050 do_write = (qc->tf.flags & ATA_TFLAG_WRITE); 3051 ata_data_xfer(ap, buf, ATA_SECT_SIZE, do_write); 3052 3053 kunmap(page); 3054} 3055 3056/** 3057 * __atapi_pio_bytes - Transfer data from/to the ATAPI device. 3058 * @qc: Command on going 3059 * @bytes: number of bytes 3060 * 3061 * Transfer Transfer data from/to the ATAPI device. 3062 * 3063 * LOCKING: 3064 * Inherited from caller. 3065 * 3066 */ 3067 3068static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes) 3069{ 3070 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE); 3071 struct scatterlist *sg = qc->__sg; 3072 struct ata_port *ap = qc->ap; 3073 struct page *page; 3074 unsigned char *buf; 3075 unsigned int offset, count; 3076 3077 if (qc->curbytes + bytes >= qc->nbytes) 3078 ap->hsm_task_state = HSM_ST_LAST; 3079 3080next_sg: 3081 if (unlikely(qc->cursg >= qc->n_elem)) { 3082 /* 3083 * The end of qc->sg is reached and the device expects 3084 * more data to transfer. In order not to overrun qc->sg 3085 * and fulfill length specified in the byte count register, 3086 * - for read case, discard trailing data from the device 3087 * - for write case, padding zero data to the device 3088 */ 3089 u16 pad_buf[1] = { 0 }; 3090 unsigned int words = bytes >> 1; 3091 unsigned int i; 3092 3093 if (words) /* warning if bytes > 1 */ 3094 printk(KERN_WARNING "ata%u: %u bytes trailing data\n", 3095 ap->id, bytes); 3096 3097 for (i = 0; i < words; i++) 3098 ata_data_xfer(ap, (unsigned char*)pad_buf, 2, do_write); 3099 3100 ap->hsm_task_state = HSM_ST_LAST; 3101 return; 3102 } 3103 3104 sg = &qc->__sg[qc->cursg]; 3105 3106 page = sg->page; 3107 offset = sg->offset + qc->cursg_ofs; 3108 3109 /* get the current page and offset */ 3110 page = nth_page(page, (offset >> PAGE_SHIFT)); 3111 offset %= PAGE_SIZE; 3112 3113 /* don't overrun current sg */ 3114 count = min(sg->length - qc->cursg_ofs, bytes); 3115 3116 /* don't cross page boundaries */ 3117 count = min(count, (unsigned int)PAGE_SIZE - offset); 3118 3119 buf = kmap(page) + offset; 3120 3121 bytes -= count; 3122 qc->curbytes += count; 3123 qc->cursg_ofs += count; 3124 3125 if (qc->cursg_ofs == sg->length) { 3126 qc->cursg++; 3127 qc->cursg_ofs = 0; 3128 } 3129 3130 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read"); 3131 3132 /* do the actual data transfer */ 3133 ata_data_xfer(ap, buf, count, do_write); 3134 3135 kunmap(page); 3136 3137 if (bytes) 3138 goto next_sg; 3139} 3140 3141/** 3142 * atapi_pio_bytes - Transfer data from/to the ATAPI device. 3143 * @qc: Command on going 3144 * 3145 * Transfer Transfer data from/to the ATAPI device. 3146 * 3147 * LOCKING: 3148 * Inherited from caller. 3149 */ 3150 3151static void atapi_pio_bytes(struct ata_queued_cmd *qc) 3152{ 3153 struct ata_port *ap = qc->ap; 3154 struct ata_device *dev = qc->dev; 3155 unsigned int ireason, bc_lo, bc_hi, bytes; 3156 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0; 3157 3158 ap->ops->tf_read(ap, &qc->tf); 3159 ireason = qc->tf.nsect; 3160 bc_lo = qc->tf.lbam; 3161 bc_hi = qc->tf.lbah; 3162 bytes = (bc_hi << 8) | bc_lo; 3163 3164 /* shall be cleared to zero, indicating xfer of data */ 3165 if (ireason & (1 << 0)) 3166 goto err_out; 3167 3168 /* make sure transfer direction matches expected */ 3169 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0; 3170 if (do_write != i_write) 3171 goto err_out; 3172 3173 __atapi_pio_bytes(qc, bytes); 3174 3175 return; 3176 3177err_out: 3178 printk(KERN_INFO "ata%u: dev %u: ATAPI check failed\n", 3179 ap->id, dev->devno); 3180 ap->hsm_task_state = HSM_ST_ERR; 3181} 3182 3183/** 3184 * ata_pio_block - start PIO on a block 3185 * @ap: the target ata_port 3186 * 3187 * LOCKING: 3188 * None. (executing in kernel thread context) 3189 */ 3190 3191static void ata_pio_block(struct ata_port *ap) 3192{ 3193 struct ata_queued_cmd *qc; 3194 u8 status; 3195 3196 /* 3197 * This is purely heuristic. This is a fast path. 3198 * Sometimes when we enter, BSY will be cleared in 3199 * a chk-status or two. If not, the drive is probably seeking 3200 * or something. Snooze for a couple msecs, then 3201 * chk-status again. If still busy, fall back to 3202 * HSM_ST_POLL state. 3203 */ 3204 status = ata_busy_wait(ap, ATA_BUSY, 5); 3205 if (status & ATA_BUSY) { 3206 msleep(2); 3207 status = ata_busy_wait(ap, ATA_BUSY, 10); 3208 if (status & ATA_BUSY) { 3209 ap->hsm_task_state = HSM_ST_POLL; 3210 ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO; 3211 return; 3212 } 3213 } 3214 3215 qc = ata_qc_from_tag(ap, ap->active_tag); 3216 assert(qc != NULL); 3217 3218 if (is_atapi_taskfile(&qc->tf)) { 3219 /* no more data to transfer or unsupported ATAPI command */ 3220 if ((status & ATA_DRQ) == 0) { 3221 ap->hsm_task_state = HSM_ST_LAST; 3222 return; 3223 } 3224 3225 atapi_pio_bytes(qc); 3226 } else { 3227 /* handle BSY=0, DRQ=0 as error */ 3228 if ((status & ATA_DRQ) == 0) { 3229 ap->hsm_task_state = HSM_ST_ERR; 3230 return; 3231 } 3232 3233 ata_pio_sector(qc); 3234 } 3235} 3236 3237static void ata_pio_error(struct ata_port *ap) 3238{ 3239 struct ata_queued_cmd *qc; 3240 3241 printk(KERN_WARNING "ata%u: PIO error\n", ap->id); 3242 3243 qc = ata_qc_from_tag(ap, ap->active_tag); 3244 assert(qc != NULL); 3245 3246 ap->hsm_task_state = HSM_ST_IDLE; 3247 3248 ata_poll_qc_complete(qc, AC_ERR_ATA_BUS); 3249} 3250 3251static void ata_pio_task(void *_data) 3252{ 3253 struct ata_port *ap = _data; 3254 unsigned long timeout; 3255 int qc_completed; 3256 3257fsm_start: 3258 timeout = 0; 3259 qc_completed = 0; 3260 3261 switch (ap->hsm_task_state) { 3262 case HSM_ST_IDLE: 3263 return; 3264 3265 case HSM_ST: 3266 ata_pio_block(ap); 3267 break; 3268 3269 case HSM_ST_LAST: 3270 qc_completed = ata_pio_complete(ap); 3271 break; 3272 3273 case HSM_ST_POLL: 3274 case HSM_ST_LAST_POLL: 3275 timeout = ata_pio_poll(ap); 3276 break; 3277 3278 case HSM_ST_TMOUT: 3279 case HSM_ST_ERR: 3280 ata_pio_error(ap); 3281 return; 3282 } 3283 3284 if (timeout) 3285 queue_delayed_work(ata_wq, &ap->pio_task, timeout); 3286 else if (!qc_completed) 3287 goto fsm_start; 3288} 3289 3290/** 3291 * ata_qc_timeout - Handle timeout of queued command 3292 * @qc: Command that timed out 3293 * 3294 * Some part of the kernel (currently, only the SCSI layer) 3295 * has noticed that the active command on port @ap has not 3296 * completed after a specified length of time. Handle this 3297 * condition by disabling DMA (if necessary) and completing 3298 * transactions, with error if necessary. 3299 * 3300 * This also handles the case of the "lost interrupt", where 3301 * for some reason (possibly hardware bug, possibly driver bug) 3302 * an interrupt was not delivered to the driver, even though the 3303 * transaction completed successfully. 3304 * 3305 * LOCKING: 3306 * Inherited from SCSI layer (none, can sleep) 3307 */ 3308 3309static void ata_qc_timeout(struct ata_queued_cmd *qc) 3310{ 3311 struct ata_port *ap = qc->ap; 3312 struct ata_host_set *host_set = ap->host_set; 3313 u8 host_stat = 0, drv_stat; 3314 unsigned long flags; 3315 3316 DPRINTK("ENTER\n"); 3317 3318 spin_lock_irqsave(&host_set->lock, flags); 3319 3320 /* hack alert! We cannot use the supplied completion 3321 * function from inside the ->eh_strategy_handler() thread. 3322 * libata is the only user of ->eh_strategy_handler() in 3323 * any kernel, so the default scsi_done() assumes it is 3324 * not being called from the SCSI EH. 3325 */ 3326 qc->scsidone = scsi_finish_command; 3327 3328 switch (qc->tf.protocol) { 3329 3330 case ATA_PROT_DMA: 3331 case ATA_PROT_ATAPI_DMA: 3332 host_stat = ap->ops->bmdma_status(ap); 3333 3334 /* before we do anything else, clear DMA-Start bit */ 3335 ap->ops->bmdma_stop(qc); 3336 3337 /* fall through */ 3338 3339 default: 3340 ata_altstatus(ap); 3341 drv_stat = ata_chk_status(ap); 3342 3343 /* ack bmdma irq events */ 3344 ap->ops->irq_clear(ap); 3345 3346 printk(KERN_ERR "ata%u: command 0x%x timeout, stat 0x%x host_stat 0x%x\n", 3347 ap->id, qc->tf.command, drv_stat, host_stat); 3348 3349 /* complete taskfile transaction */ 3350 ata_qc_complete(qc, ac_err_mask(drv_stat)); 3351 break; 3352 } 3353 3354 spin_unlock_irqrestore(&host_set->lock, flags); 3355 3356 DPRINTK("EXIT\n"); 3357} 3358 3359/** 3360 * ata_eng_timeout - Handle timeout of queued command 3361 * @ap: Port on which timed-out command is active 3362 * 3363 * Some part of the kernel (currently, only the SCSI layer) 3364 * has noticed that the active command on port @ap has not 3365 * completed after a specified length of time. Handle this 3366 * condition by disabling DMA (if necessary) and completing 3367 * transactions, with error if necessary. 3368 * 3369 * This also handles the case of the "lost interrupt", where 3370 * for some reason (possibly hardware bug, possibly driver bug) 3371 * an interrupt was not delivered to the driver, even though the 3372 * transaction completed successfully. 3373 * 3374 * LOCKING: 3375 * Inherited from SCSI layer (none, can sleep) 3376 */ 3377 3378void ata_eng_timeout(struct ata_port *ap) 3379{ 3380 struct ata_queued_cmd *qc; 3381 3382 DPRINTK("ENTER\n"); 3383 3384 qc = ata_qc_from_tag(ap, ap->active_tag); 3385 if (qc) 3386 ata_qc_timeout(qc); 3387 else { 3388 printk(KERN_ERR "ata%u: BUG: timeout without command\n", 3389 ap->id); 3390 goto out; 3391 } 3392 3393out: 3394 DPRINTK("EXIT\n"); 3395} 3396 3397/** 3398 * ata_qc_new - Request an available ATA command, for queueing 3399 * @ap: Port associated with device @dev 3400 * @dev: Device from whom we request an available command structure 3401 * 3402 * LOCKING: 3403 * None. 3404 */ 3405 3406static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap) 3407{ 3408 struct ata_queued_cmd *qc = NULL; 3409 unsigned int i; 3410 3411 for (i = 0; i < ATA_MAX_QUEUE; i++) 3412 if (!test_and_set_bit(i, &ap->qactive)) { 3413 qc = ata_qc_from_tag(ap, i); 3414 break; 3415 } 3416 3417 if (qc) 3418 qc->tag = i; 3419 3420 return qc; 3421} 3422 3423/** 3424 * ata_qc_new_init - Request an available ATA command, and initialize it 3425 * @ap: Port associated with device @dev 3426 * @dev: Device from whom we request an available command structure 3427 * 3428 * LOCKING: 3429 * None. 3430 */ 3431 3432struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap, 3433 struct ata_device *dev) 3434{ 3435 struct ata_queued_cmd *qc; 3436 3437 qc = ata_qc_new(ap); 3438 if (qc) { 3439 qc->scsicmd = NULL; 3440 qc->ap = ap; 3441 qc->dev = dev; 3442 3443 ata_qc_reinit(qc); 3444 } 3445 3446 return qc; 3447} 3448 3449int ata_qc_complete_noop(struct ata_queued_cmd *qc, unsigned int err_mask) 3450{ 3451 return 0; 3452} 3453 3454static void __ata_qc_complete(struct ata_queued_cmd *qc) 3455{ 3456 struct ata_port *ap = qc->ap; 3457 unsigned int tag, do_clear = 0; 3458 3459 qc->flags = 0; 3460 tag = qc->tag; 3461 if (likely(ata_tag_valid(tag))) { 3462 if (tag == ap->active_tag) 3463 ap->active_tag = ATA_TAG_POISON; 3464 qc->tag = ATA_TAG_POISON; 3465 do_clear = 1; 3466 } 3467 3468 if (qc->waiting) { 3469 struct completion *waiting = qc->waiting; 3470 qc->waiting = NULL; 3471 complete(waiting); 3472 } 3473 3474 if (likely(do_clear)) 3475 clear_bit(tag, &ap->qactive); 3476} 3477 3478/** 3479 * ata_qc_free - free unused ata_queued_cmd 3480 * @qc: Command to complete 3481 * 3482 * Designed to free unused ata_queued_cmd object 3483 * in case something prevents using it. 3484 * 3485 * LOCKING: 3486 * spin_lock_irqsave(host_set lock) 3487 */ 3488void ata_qc_free(struct ata_queued_cmd *qc) 3489{ 3490 assert(qc != NULL); /* ata_qc_from_tag _might_ return NULL */ 3491 assert(qc->waiting == NULL); /* nothing should be waiting */ 3492 3493 __ata_qc_complete(qc); 3494} 3495 3496/** 3497 * ata_qc_complete - Complete an active ATA command 3498 * @qc: Command to complete 3499 * @err_mask: ATA Status register contents 3500 * 3501 * Indicate to the mid and upper layers that an ATA 3502 * command has completed, with either an ok or not-ok status. 3503 * 3504 * LOCKING: 3505 * spin_lock_irqsave(host_set lock) 3506 */ 3507 3508void ata_qc_complete(struct ata_queued_cmd *qc, unsigned int err_mask) 3509{ 3510 int rc; 3511 3512 assert(qc != NULL); /* ata_qc_from_tag _might_ return NULL */ 3513 assert(qc->flags & ATA_QCFLAG_ACTIVE); 3514 3515 if (likely(qc->flags & ATA_QCFLAG_DMAMAP)) 3516 ata_sg_clean(qc); 3517 3518 /* atapi: mark qc as inactive to prevent the interrupt handler 3519 * from completing the command twice later, before the error handler 3520 * is called. (when rc != 0 and atapi request sense is needed) 3521 */ 3522 qc->flags &= ~ATA_QCFLAG_ACTIVE; 3523 3524 /* call completion callback */ 3525 rc = qc->complete_fn(qc, err_mask); 3526 3527 /* if callback indicates not to complete command (non-zero), 3528 * return immediately 3529 */ 3530 if (rc != 0) 3531 return; 3532 3533 __ata_qc_complete(qc); 3534 3535 VPRINTK("EXIT\n"); 3536} 3537 3538static inline int ata_should_dma_map(struct ata_queued_cmd *qc) 3539{ 3540 struct ata_port *ap = qc->ap; 3541 3542 switch (qc->tf.protocol) { 3543 case ATA_PROT_DMA: 3544 case ATA_PROT_ATAPI_DMA: 3545 return 1; 3546 3547 case ATA_PROT_ATAPI: 3548 case ATA_PROT_PIO: 3549 case ATA_PROT_PIO_MULT: 3550 if (ap->flags & ATA_FLAG_PIO_DMA) 3551 return 1; 3552 3553 /* fall through */ 3554 3555 default: 3556 return 0; 3557 } 3558 3559 /* never reached */ 3560} 3561 3562/** 3563 * ata_qc_issue - issue taskfile to device 3564 * @qc: command to issue to device 3565 * 3566 * Prepare an ATA command to submission to device. 3567 * This includes mapping the data into a DMA-able 3568 * area, filling in the S/G table, and finally 3569 * writing the taskfile to hardware, starting the command. 3570 * 3571 * LOCKING: 3572 * spin_lock_irqsave(host_set lock) 3573 * 3574 * RETURNS: 3575 * Zero on success, negative on error. 3576 */ 3577 3578int ata_qc_issue(struct ata_queued_cmd *qc) 3579{ 3580 struct ata_port *ap = qc->ap; 3581 3582 if (ata_should_dma_map(qc)) { 3583 if (qc->flags & ATA_QCFLAG_SG) { 3584 if (ata_sg_setup(qc)) 3585 goto err_out; 3586 } else if (qc->flags & ATA_QCFLAG_SINGLE) { 3587 if (ata_sg_setup_one(qc)) 3588 goto err_out; 3589 } 3590 } else { 3591 qc->flags &= ~ATA_QCFLAG_DMAMAP; 3592 } 3593 3594 ap->ops->qc_prep(qc); 3595 3596 qc->ap->active_tag = qc->tag; 3597 qc->flags |= ATA_QCFLAG_ACTIVE; 3598 3599 return ap->ops->qc_issue(qc); 3600 3601err_out: 3602 return -1; 3603} 3604 3605 3606/** 3607 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner 3608 * @qc: command to issue to device 3609 * 3610 * Using various libata functions and hooks, this function 3611 * starts an ATA command. ATA commands are grouped into 3612 * classes called "protocols", and issuing each type of protocol 3613 * is slightly different. 3614 * 3615 * May be used as the qc_issue() entry in ata_port_operations. 3616 * 3617 * LOCKING: 3618 * spin_lock_irqsave(host_set lock) 3619 * 3620 * RETURNS: 3621 * Zero on success, negative on error. 3622 */ 3623 3624int ata_qc_issue_prot(struct ata_queued_cmd *qc) 3625{ 3626 struct ata_port *ap = qc->ap; 3627 3628 ata_dev_select(ap, qc->dev->devno, 1, 0); 3629 3630 switch (qc->tf.protocol) { 3631 case ATA_PROT_NODATA: 3632 ata_tf_to_host(ap, &qc->tf); 3633 break; 3634 3635 case ATA_PROT_DMA: 3636 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */ 3637 ap->ops->bmdma_setup(qc); /* set up bmdma */ 3638 ap->ops->bmdma_start(qc); /* initiate bmdma */ 3639 break; 3640 3641 case ATA_PROT_PIO: /* load tf registers, initiate polling pio */ 3642 ata_qc_set_polling(qc); 3643 ata_tf_to_host(ap, &qc->tf); 3644 ap->hsm_task_state = HSM_ST; 3645 queue_work(ata_wq, &ap->pio_task); 3646 break; 3647 3648 case ATA_PROT_ATAPI: 3649 ata_qc_set_polling(qc); 3650 ata_tf_to_host(ap, &qc->tf); 3651 queue_work(ata_wq, &ap->packet_task); 3652 break; 3653 3654 case ATA_PROT_ATAPI_NODATA: 3655 ap->flags |= ATA_FLAG_NOINTR; 3656 ata_tf_to_host(ap, &qc->tf); 3657 queue_work(ata_wq, &ap->packet_task); 3658 break; 3659 3660 case ATA_PROT_ATAPI_DMA: 3661 ap->flags |= ATA_FLAG_NOINTR; 3662 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */ 3663 ap->ops->bmdma_setup(qc); /* set up bmdma */ 3664 queue_work(ata_wq, &ap->packet_task); 3665 break; 3666 3667 default: 3668 WARN_ON(1); 3669 return -1; 3670 } 3671 3672 return 0; 3673} 3674 3675/** 3676 * ata_bmdma_setup_mmio - Set up PCI IDE BMDMA transaction 3677 * @qc: Info associated with this ATA transaction. 3678 * 3679 * LOCKING: 3680 * spin_lock_irqsave(host_set lock) 3681 */ 3682 3683static void ata_bmdma_setup_mmio (struct ata_queued_cmd *qc) 3684{ 3685 struct ata_port *ap = qc->ap; 3686 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE); 3687 u8 dmactl; 3688 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr; 3689 3690 /* load PRD table addr. */ 3691 mb(); /* make sure PRD table writes are visible to controller */ 3692 writel(ap->prd_dma, mmio + ATA_DMA_TABLE_OFS); 3693 3694 /* specify data direction, triple-check start bit is clear */ 3695 dmactl = readb(mmio + ATA_DMA_CMD); 3696 dmactl &= ~(ATA_DMA_WR | ATA_DMA_START); 3697 if (!rw) 3698 dmactl |= ATA_DMA_WR; 3699 writeb(dmactl, mmio + ATA_DMA_CMD); 3700 3701 /* issue r/w command */ 3702 ap->ops->exec_command(ap, &qc->tf); 3703} 3704 3705/** 3706 * ata_bmdma_start_mmio - Start a PCI IDE BMDMA transaction 3707 * @qc: Info associated with this ATA transaction. 3708 * 3709 * LOCKING: 3710 * spin_lock_irqsave(host_set lock) 3711 */ 3712 3713static void ata_bmdma_start_mmio (struct ata_queued_cmd *qc) 3714{ 3715 struct ata_port *ap = qc->ap; 3716 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr; 3717 u8 dmactl; 3718 3719 /* start host DMA transaction */ 3720 dmactl = readb(mmio + ATA_DMA_CMD); 3721 writeb(dmactl | ATA_DMA_START, mmio + ATA_DMA_CMD); 3722 3723 /* Strictly, one may wish to issue a readb() here, to 3724 * flush the mmio write. However, control also passes 3725 * to the hardware at this point, and it will interrupt 3726 * us when we are to resume control. So, in effect, 3727 * we don't care when the mmio write flushes. 3728 * Further, a read of the DMA status register _immediately_ 3729 * following the write may not be what certain flaky hardware 3730 * is expected, so I think it is best to not add a readb() 3731 * without first all the MMIO ATA cards/mobos. 3732 * Or maybe I'm just being paranoid. 3733 */ 3734} 3735 3736/** 3737 * ata_bmdma_setup_pio - Set up PCI IDE BMDMA transaction (PIO) 3738 * @qc: Info associated with this ATA transaction. 3739 * 3740 * LOCKING: 3741 * spin_lock_irqsave(host_set lock) 3742 */ 3743 3744static void ata_bmdma_setup_pio (struct ata_queued_cmd *qc) 3745{ 3746 struct ata_port *ap = qc->ap; 3747 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE); 3748 u8 dmactl; 3749 3750 /* load PRD table addr. */ 3751 outl(ap->prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS); 3752 3753 /* specify data direction, triple-check start bit is clear */ 3754 dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD); 3755 dmactl &= ~(ATA_DMA_WR | ATA_DMA_START); 3756 if (!rw) 3757 dmactl |= ATA_DMA_WR; 3758 outb(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD); 3759 3760 /* issue r/w command */ 3761 ap->ops->exec_command(ap, &qc->tf); 3762} 3763 3764/** 3765 * ata_bmdma_start_pio - Start a PCI IDE BMDMA transaction (PIO) 3766 * @qc: Info associated with this ATA transaction. 3767 * 3768 * LOCKING: 3769 * spin_lock_irqsave(host_set lock) 3770 */ 3771 3772static void ata_bmdma_start_pio (struct ata_queued_cmd *qc) 3773{ 3774 struct ata_port *ap = qc->ap; 3775 u8 dmactl; 3776 3777 /* start host DMA transaction */ 3778 dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD); 3779 outb(dmactl | ATA_DMA_START, 3780 ap->ioaddr.bmdma_addr + ATA_DMA_CMD); 3781} 3782 3783 3784/** 3785 * ata_bmdma_start - Start a PCI IDE BMDMA transaction 3786 * @qc: Info associated with this ATA transaction. 3787 * 3788 * Writes the ATA_DMA_START flag to the DMA command register. 3789 * 3790 * May be used as the bmdma_start() entry in ata_port_operations. 3791 * 3792 * LOCKING: 3793 * spin_lock_irqsave(host_set lock) 3794 */ 3795void ata_bmdma_start(struct ata_queued_cmd *qc) 3796{ 3797 if (qc->ap->flags & ATA_FLAG_MMIO) 3798 ata_bmdma_start_mmio(qc); 3799 else 3800 ata_bmdma_start_pio(qc); 3801} 3802 3803 3804/** 3805 * ata_bmdma_setup - Set up PCI IDE BMDMA transaction 3806 * @qc: Info associated with this ATA transaction. 3807 * 3808 * Writes address of PRD table to device's PRD Table Address 3809 * register, sets the DMA control register, and calls 3810 * ops->exec_command() to start the transfer. 3811 * 3812 * May be used as the bmdma_setup() entry in ata_port_operations. 3813 * 3814 * LOCKING: 3815 * spin_lock_irqsave(host_set lock) 3816 */ 3817void ata_bmdma_setup(struct ata_queued_cmd *qc) 3818{ 3819 if (qc->ap->flags & ATA_FLAG_MMIO) 3820 ata_bmdma_setup_mmio(qc); 3821 else 3822 ata_bmdma_setup_pio(qc); 3823} 3824 3825 3826/** 3827 * ata_bmdma_irq_clear - Clear PCI IDE BMDMA interrupt. 3828 * @ap: Port associated with this ATA transaction. 3829 * 3830 * Clear interrupt and error flags in DMA status register. 3831 * 3832 * May be used as the irq_clear() entry in ata_port_operations. 3833 * 3834 * LOCKING: 3835 * spin_lock_irqsave(host_set lock) 3836 */ 3837 3838void ata_bmdma_irq_clear(struct ata_port *ap) 3839{ 3840 if (ap->flags & ATA_FLAG_MMIO) { 3841 void __iomem *mmio = ((void __iomem *) ap->ioaddr.bmdma_addr) + ATA_DMA_STATUS; 3842 writeb(readb(mmio), mmio); 3843 } else { 3844 unsigned long addr = ap->ioaddr.bmdma_addr + ATA_DMA_STATUS; 3845 outb(inb(addr), addr); 3846 } 3847 3848} 3849 3850 3851/** 3852 * ata_bmdma_status - Read PCI IDE BMDMA status 3853 * @ap: Port associated with this ATA transaction. 3854 * 3855 * Read and return BMDMA status register. 3856 * 3857 * May be used as the bmdma_status() entry in ata_port_operations. 3858 * 3859 * LOCKING: 3860 * spin_lock_irqsave(host_set lock) 3861 */ 3862 3863u8 ata_bmdma_status(struct ata_port *ap) 3864{ 3865 u8 host_stat; 3866 if (ap->flags & ATA_FLAG_MMIO) { 3867 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr; 3868 host_stat = readb(mmio + ATA_DMA_STATUS); 3869 } else 3870 host_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); 3871 return host_stat; 3872} 3873 3874 3875/** 3876 * ata_bmdma_stop - Stop PCI IDE BMDMA transfer 3877 * @qc: Command we are ending DMA for 3878 * 3879 * Clears the ATA_DMA_START flag in the dma control register 3880 * 3881 * May be used as the bmdma_stop() entry in ata_port_operations. 3882 * 3883 * LOCKING: 3884 * spin_lock_irqsave(host_set lock) 3885 */ 3886 3887void ata_bmdma_stop(struct ata_queued_cmd *qc) 3888{ 3889 struct ata_port *ap = qc->ap; 3890 if (ap->flags & ATA_FLAG_MMIO) { 3891 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr; 3892 3893 /* clear start/stop bit */ 3894 writeb(readb(mmio + ATA_DMA_CMD) & ~ATA_DMA_START, 3895 mmio + ATA_DMA_CMD); 3896 } else { 3897 /* clear start/stop bit */ 3898 outb(inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD) & ~ATA_DMA_START, 3899 ap->ioaddr.bmdma_addr + ATA_DMA_CMD); 3900 } 3901 3902 /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */ 3903 ata_altstatus(ap); /* dummy read */ 3904} 3905 3906/** 3907 * ata_host_intr - Handle host interrupt for given (port, task) 3908 * @ap: Port on which interrupt arrived (possibly...) 3909 * @qc: Taskfile currently active in engine 3910 * 3911 * Handle host interrupt for given queued command. Currently, 3912 * only DMA interrupts are handled. All other commands are 3913 * handled via polling with interrupts disabled (nIEN bit). 3914 * 3915 * LOCKING: 3916 * spin_lock_irqsave(host_set lock) 3917 * 3918 * RETURNS: 3919 * One if interrupt was handled, zero if not (shared irq). 3920 */ 3921 3922inline unsigned int ata_host_intr (struct ata_port *ap, 3923 struct ata_queued_cmd *qc) 3924{ 3925 u8 status, host_stat; 3926 3927 switch (qc->tf.protocol) { 3928 3929 case ATA_PROT_DMA: 3930 case ATA_PROT_ATAPI_DMA: 3931 case ATA_PROT_ATAPI: 3932 /* check status of DMA engine */ 3933 host_stat = ap->ops->bmdma_status(ap); 3934 VPRINTK("ata%u: host_stat 0x%X\n", ap->id, host_stat); 3935 3936 /* if it's not our irq... */ 3937 if (!(host_stat & ATA_DMA_INTR)) 3938 goto idle_irq; 3939 3940 /* before we do anything else, clear DMA-Start bit */ 3941 ap->ops->bmdma_stop(qc); 3942 3943 /* fall through */ 3944 3945 case ATA_PROT_ATAPI_NODATA: 3946 case ATA_PROT_NODATA: 3947 /* check altstatus */ 3948 status = ata_altstatus(ap); 3949 if (status & ATA_BUSY) 3950 goto idle_irq; 3951 3952 /* check main status, clearing INTRQ */ 3953 status = ata_chk_status(ap); 3954 if (unlikely(status & ATA_BUSY)) 3955 goto idle_irq; 3956 DPRINTK("ata%u: protocol %d (dev_stat 0x%X)\n", 3957 ap->id, qc->tf.protocol, status); 3958 3959 /* ack bmdma irq events */ 3960 ap->ops->irq_clear(ap); 3961 3962 /* complete taskfile transaction */ 3963 ata_qc_complete(qc, ac_err_mask(status)); 3964 break; 3965 3966 default: 3967 goto idle_irq; 3968 } 3969 3970 return 1; /* irq handled */ 3971 3972idle_irq: 3973 ap->stats.idle_irq++; 3974 3975#ifdef ATA_IRQ_TRAP 3976 if ((ap->stats.idle_irq % 1000) == 0) { 3977 handled = 1; 3978 ata_irq_ack(ap, 0); /* debug trap */ 3979 printk(KERN_WARNING "ata%d: irq trap\n", ap->id); 3980 } 3981#endif 3982 return 0; /* irq not handled */ 3983} 3984 3985/** 3986 * ata_interrupt - Default ATA host interrupt handler 3987 * @irq: irq line (unused) 3988 * @dev_instance: pointer to our ata_host_set information structure 3989 * @regs: unused 3990 * 3991 * Default interrupt handler for PCI IDE devices. Calls 3992 * ata_host_intr() for each port that is not disabled. 3993 * 3994 * LOCKING: 3995 * Obtains host_set lock during operation. 3996 * 3997 * RETURNS: 3998 * IRQ_NONE or IRQ_HANDLED. 3999 */ 4000 4001irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs) 4002{ 4003 struct ata_host_set *host_set = dev_instance; 4004 unsigned int i; 4005 unsigned int handled = 0; 4006 unsigned long flags; 4007 4008 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */ 4009 spin_lock_irqsave(&host_set->lock, flags); 4010 4011 for (i = 0; i < host_set->n_ports; i++) { 4012 struct ata_port *ap; 4013 4014 ap = host_set->ports[i]; 4015 if (ap && 4016 !(ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR))) { 4017 struct ata_queued_cmd *qc; 4018 4019 qc = ata_qc_from_tag(ap, ap->active_tag); 4020 if (qc && (!(qc->tf.ctl & ATA_NIEN)) && 4021 (qc->flags & ATA_QCFLAG_ACTIVE)) 4022 handled |= ata_host_intr(ap, qc); 4023 } 4024 } 4025 4026 spin_unlock_irqrestore(&host_set->lock, flags); 4027 4028 return IRQ_RETVAL(handled); 4029} 4030 4031/** 4032 * atapi_packet_task - Write CDB bytes to hardware 4033 * @_data: Port to which ATAPI device is attached. 4034 * 4035 * When device has indicated its readiness to accept 4036 * a CDB, this function is called. Send the CDB. 4037 * If DMA is to be performed, exit immediately. 4038 * Otherwise, we are in polling mode, so poll 4039 * status under operation succeeds or fails. 4040 * 4041 * LOCKING: 4042 * Kernel thread context (may sleep) 4043 */ 4044 4045static void atapi_packet_task(void *_data) 4046{ 4047 struct ata_port *ap = _data; 4048 struct ata_queued_cmd *qc; 4049 u8 status; 4050 4051 qc = ata_qc_from_tag(ap, ap->active_tag); 4052 assert(qc != NULL); 4053 assert(qc->flags & ATA_QCFLAG_ACTIVE); 4054 4055 /* sleep-wait for BSY to clear */ 4056 DPRINTK("busy wait\n"); 4057 if (ata_busy_sleep(ap, ATA_TMOUT_CDB_QUICK, ATA_TMOUT_CDB)) 4058 goto err_out_status; 4059 4060 /* make sure DRQ is set */ 4061 status = ata_chk_status(ap); 4062 if ((status & (ATA_BUSY | ATA_DRQ)) != ATA_DRQ) 4063 goto err_out; 4064 4065 /* send SCSI cdb */ 4066 DPRINTK("send cdb\n"); 4067 assert(ap->cdb_len >= 12); 4068 4069 if (qc->tf.protocol == ATA_PROT_ATAPI_DMA || 4070 qc->tf.protocol == ATA_PROT_ATAPI_NODATA) { 4071 unsigned long flags; 4072 4073 /* Once we're done issuing command and kicking bmdma, 4074 * irq handler takes over. To not lose irq, we need 4075 * to clear NOINTR flag before sending cdb, but 4076 * interrupt handler shouldn't be invoked before we're 4077 * finished. Hence, the following locking. 4078 */ 4079 spin_lock_irqsave(&ap->host_set->lock, flags); 4080 ap->flags &= ~ATA_FLAG_NOINTR; 4081 ata_data_xfer(ap, qc->cdb, ap->cdb_len, 1); 4082 if (qc->tf.protocol == ATA_PROT_ATAPI_DMA) 4083 ap->ops->bmdma_start(qc); /* initiate bmdma */ 4084 spin_unlock_irqrestore(&ap->host_set->lock, flags); 4085 } else { 4086 ata_data_xfer(ap, qc->cdb, ap->cdb_len, 1); 4087 4088 /* PIO commands are handled by polling */ 4089 ap->hsm_task_state = HSM_ST; 4090 queue_work(ata_wq, &ap->pio_task); 4091 } 4092 4093 return; 4094 4095err_out_status: 4096 status = ata_chk_status(ap); 4097err_out: 4098 ata_poll_qc_complete(qc, __ac_err_mask(status)); 4099} 4100 4101 4102/** 4103 * ata_port_start - Set port up for dma. 4104 * @ap: Port to initialize 4105 * 4106 * Called just after data structures for each port are 4107 * initialized. Allocates space for PRD table. 4108 * 4109 * May be used as the port_start() entry in ata_port_operations. 4110 * 4111 * LOCKING: 4112 * Inherited from caller. 4113 */ 4114 4115int ata_port_start (struct ata_port *ap) 4116{ 4117 struct device *dev = ap->host_set->dev; 4118 int rc; 4119 4120 ap->prd = dma_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma, GFP_KERNEL); 4121 if (!ap->prd) 4122 return -ENOMEM; 4123 4124 rc = ata_pad_alloc(ap, dev); 4125 if (rc) { 4126 dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma); 4127 return rc; 4128 } 4129 4130 DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd, (unsigned long long) ap->prd_dma); 4131 4132 return 0; 4133} 4134 4135 4136/** 4137 * ata_port_stop - Undo ata_port_start() 4138 * @ap: Port to shut down 4139 * 4140 * Frees the PRD table. 4141 * 4142 * May be used as the port_stop() entry in ata_port_operations. 4143 * 4144 * LOCKING: 4145 * Inherited from caller. 4146 */ 4147 4148void ata_port_stop (struct ata_port *ap) 4149{ 4150 struct device *dev = ap->host_set->dev; 4151 4152 dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma); 4153 ata_pad_free(ap, dev); 4154} 4155 4156void ata_host_stop (struct ata_host_set *host_set) 4157{ 4158 if (host_set->mmio_base) 4159 iounmap(host_set->mmio_base); 4160} 4161 4162 4163/** 4164 * ata_host_remove - Unregister SCSI host structure with upper layers 4165 * @ap: Port to unregister 4166 * @do_unregister: 1 if we fully unregister, 0 to just stop the port 4167 * 4168 * LOCKING: 4169 * Inherited from caller. 4170 */ 4171 4172static void ata_host_remove(struct ata_port *ap, unsigned int do_unregister) 4173{ 4174 struct Scsi_Host *sh = ap->host; 4175 4176 DPRINTK("ENTER\n"); 4177 4178 if (do_unregister) 4179 scsi_remove_host(sh); 4180 4181 ap->ops->port_stop(ap); 4182} 4183 4184/** 4185 * ata_host_init - Initialize an ata_port structure 4186 * @ap: Structure to initialize 4187 * @host: associated SCSI mid-layer structure 4188 * @host_set: Collection of hosts to which @ap belongs 4189 * @ent: Probe information provided by low-level driver 4190 * @port_no: Port number associated with this ata_port 4191 * 4192 * Initialize a new ata_port structure, and its associated 4193 * scsi_host. 4194 * 4195 * LOCKING: 4196 * Inherited from caller. 4197 */ 4198 4199static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host, 4200 struct ata_host_set *host_set, 4201 const struct ata_probe_ent *ent, unsigned int port_no) 4202{ 4203 unsigned int i; 4204 4205 host->max_id = 16; 4206 host->max_lun = 1; 4207 host->max_channel = 1; 4208 host->unique_id = ata_unique_id++; 4209 host->max_cmd_len = 12; 4210 4211 ap->flags = ATA_FLAG_PORT_DISABLED; 4212 ap->id = host->unique_id; 4213 ap->host = host; 4214 ap->ctl = ATA_DEVCTL_OBS; 4215 ap->host_set = host_set; 4216 ap->port_no = port_no; 4217 ap->hard_port_no = 4218 ent->legacy_mode ? ent->hard_port_no : port_no; 4219 ap->pio_mask = ent->pio_mask; 4220 ap->mwdma_mask = ent->mwdma_mask; 4221 ap->udma_mask = ent->udma_mask; 4222 ap->flags |= ent->host_flags; 4223 ap->ops = ent->port_ops; 4224 ap->cbl = ATA_CBL_NONE; 4225 ap->active_tag = ATA_TAG_POISON; 4226 ap->last_ctl = 0xFF; 4227 4228 INIT_WORK(&ap->packet_task, atapi_packet_task, ap); 4229 INIT_WORK(&ap->pio_task, ata_pio_task, ap); 4230 4231 for (i = 0; i < ATA_MAX_DEVICES; i++) 4232 ap->device[i].devno = i; 4233 4234#ifdef ATA_IRQ_TRAP 4235 ap->stats.unhandled_irq = 1; 4236 ap->stats.idle_irq = 1; 4237#endif 4238 4239 memcpy(&ap->ioaddr, &ent->port[port_no], sizeof(struct ata_ioports)); 4240} 4241 4242/** 4243 * ata_host_add - Attach low-level ATA driver to system 4244 * @ent: Information provided by low-level driver 4245 * @host_set: Collections of ports to which we add 4246 * @port_no: Port number associated with this host 4247 * 4248 * Attach low-level ATA driver to system. 4249 * 4250 * LOCKING: 4251 * PCI/etc. bus probe sem. 4252 * 4253 * RETURNS: 4254 * New ata_port on success, for NULL on error. 4255 */ 4256 4257static struct ata_port * ata_host_add(const struct ata_probe_ent *ent, 4258 struct ata_host_set *host_set, 4259 unsigned int port_no) 4260{ 4261 struct Scsi_Host *host; 4262 struct ata_port *ap; 4263 int rc; 4264 4265 DPRINTK("ENTER\n"); 4266 host = scsi_host_alloc(ent->sht, sizeof(struct ata_port)); 4267 if (!host) 4268 return NULL; 4269 4270 ap = (struct ata_port *) &host->hostdata[0]; 4271 4272 ata_host_init(ap, host, host_set, ent, port_no); 4273 4274 rc = ap->ops->port_start(ap); 4275 if (rc) 4276 goto err_out; 4277 4278 return ap; 4279 4280err_out: 4281 scsi_host_put(host); 4282 return NULL; 4283} 4284 4285/** 4286 * ata_device_add - Register hardware device with ATA and SCSI layers 4287 * @ent: Probe information describing hardware device to be registered 4288 * 4289 * This function processes the information provided in the probe 4290 * information struct @ent, allocates the necessary ATA and SCSI 4291 * host information structures, initializes them, and registers 4292 * everything with requisite kernel subsystems. 4293 * 4294 * This function requests irqs, probes the ATA bus, and probes 4295 * the SCSI bus. 4296 * 4297 * LOCKING: 4298 * PCI/etc. bus probe sem. 4299 * 4300 * RETURNS: 4301 * Number of ports registered. Zero on error (no ports registered). 4302 */ 4303 4304int ata_device_add(const struct ata_probe_ent *ent) 4305{ 4306 unsigned int count = 0, i; 4307 struct device *dev = ent->dev; 4308 struct ata_host_set *host_set; 4309 4310 DPRINTK("ENTER\n"); 4311 /* alloc a container for our list of ATA ports (buses) */ 4312 host_set = kzalloc(sizeof(struct ata_host_set) + 4313 (ent->n_ports * sizeof(void *)), GFP_KERNEL); 4314 if (!host_set) 4315 return 0; 4316 spin_lock_init(&host_set->lock); 4317 4318 host_set->dev = dev; 4319 host_set->n_ports = ent->n_ports; 4320 host_set->irq = ent->irq; 4321 host_set->mmio_base = ent->mmio_base; 4322 host_set->private_data = ent->private_data; 4323 host_set->ops = ent->port_ops; 4324 4325 /* register each port bound to this device */ 4326 for (i = 0; i < ent->n_ports; i++) { 4327 struct ata_port *ap; 4328 unsigned long xfer_mode_mask; 4329 4330 ap = ata_host_add(ent, host_set, i); 4331 if (!ap) 4332 goto err_out; 4333 4334 host_set->ports[i] = ap; 4335 xfer_mode_mask =(ap->udma_mask << ATA_SHIFT_UDMA) | 4336 (ap->mwdma_mask << ATA_SHIFT_MWDMA) | 4337 (ap->pio_mask << ATA_SHIFT_PIO); 4338 4339 /* print per-port info to dmesg */ 4340 printk(KERN_INFO "ata%u: %cATA max %s cmd 0x%lX ctl 0x%lX " 4341 "bmdma 0x%lX irq %lu\n", 4342 ap->id, 4343 ap->flags & ATA_FLAG_SATA ? 'S' : 'P', 4344 ata_mode_string(xfer_mode_mask), 4345 ap->ioaddr.cmd_addr, 4346 ap->ioaddr.ctl_addr, 4347 ap->ioaddr.bmdma_addr, 4348 ent->irq); 4349 4350 ata_chk_status(ap); 4351 host_set->ops->irq_clear(ap); 4352 count++; 4353 } 4354 4355 if (!count) 4356 goto err_free_ret; 4357 4358 /* obtain irq, that is shared between channels */ 4359 if (request_irq(ent->irq, ent->port_ops->irq_handler, ent->irq_flags, 4360 DRV_NAME, host_set)) 4361 goto err_out; 4362 4363 /* perform each probe synchronously */ 4364 DPRINTK("probe begin\n"); 4365 for (i = 0; i < count; i++) { 4366 struct ata_port *ap; 4367 int rc; 4368 4369 ap = host_set->ports[i]; 4370 4371 DPRINTK("ata%u: probe begin\n", ap->id); 4372 rc = ata_bus_probe(ap); 4373 DPRINTK("ata%u: probe end\n", ap->id); 4374 4375 if (rc) { 4376 /* FIXME: do something useful here? 4377 * Current libata behavior will 4378 * tear down everything when 4379 * the module is removed 4380 * or the h/w is unplugged. 4381 */ 4382 } 4383 4384 rc = scsi_add_host(ap->host, dev); 4385 if (rc) { 4386 printk(KERN_ERR "ata%u: scsi_add_host failed\n", 4387 ap->id); 4388 /* FIXME: do something useful here */ 4389 /* FIXME: handle unconditional calls to 4390 * scsi_scan_host and ata_host_remove, below, 4391 * at the very least 4392 */ 4393 } 4394 } 4395 4396 /* probes are done, now scan each port's disk(s) */ 4397 DPRINTK("probe begin\n"); 4398 for (i = 0; i < count; i++) { 4399 struct ata_port *ap = host_set->ports[i]; 4400 4401 ata_scsi_scan_host(ap); 4402 } 4403 4404 dev_set_drvdata(dev, host_set); 4405 4406 VPRINTK("EXIT, returning %u\n", ent->n_ports); 4407 return ent->n_ports; /* success */ 4408 4409err_out: 4410 for (i = 0; i < count; i++) { 4411 ata_host_remove(host_set->ports[i], 1); 4412 scsi_host_put(host_set->ports[i]->host); 4413 } 4414err_free_ret: 4415 kfree(host_set); 4416 VPRINTK("EXIT, returning 0\n"); 4417 return 0; 4418} 4419 4420/** 4421 * ata_host_set_remove - PCI layer callback for device removal 4422 * @host_set: ATA host set that was removed 4423 * 4424 * Unregister all objects associated with this host set. Free those 4425 * objects. 4426 * 4427 * LOCKING: 4428 * Inherited from calling layer (may sleep). 4429 */ 4430 4431void ata_host_set_remove(struct ata_host_set *host_set) 4432{ 4433 struct ata_port *ap; 4434 unsigned int i; 4435 4436 for (i = 0; i < host_set->n_ports; i++) { 4437 ap = host_set->ports[i]; 4438 scsi_remove_host(ap->host); 4439 } 4440 4441 free_irq(host_set->irq, host_set); 4442 4443 for (i = 0; i < host_set->n_ports; i++) { 4444 ap = host_set->ports[i]; 4445 4446 ata_scsi_release(ap->host); 4447 4448 if ((ap->flags & ATA_FLAG_NO_LEGACY) == 0) { 4449 struct ata_ioports *ioaddr = &ap->ioaddr; 4450 4451 if (ioaddr->cmd_addr == 0x1f0) 4452 release_region(0x1f0, 8); 4453 else if (ioaddr->cmd_addr == 0x170) 4454 release_region(0x170, 8); 4455 } 4456 4457 scsi_host_put(ap->host); 4458 } 4459 4460 if (host_set->ops->host_stop) 4461 host_set->ops->host_stop(host_set); 4462 4463 kfree(host_set); 4464} 4465 4466/** 4467 * ata_scsi_release - SCSI layer callback hook for host unload 4468 * @host: libata host to be unloaded 4469 * 4470 * Performs all duties necessary to shut down a libata port... 4471 * Kill port kthread, disable port, and release resources. 4472 * 4473 * LOCKING: 4474 * Inherited from SCSI layer. 4475 * 4476 * RETURNS: 4477 * One. 4478 */ 4479 4480int ata_scsi_release(struct Scsi_Host *host) 4481{ 4482 struct ata_port *ap = (struct ata_port *) &host->hostdata[0]; 4483 4484 DPRINTK("ENTER\n"); 4485 4486 ap->ops->port_disable(ap); 4487 ata_host_remove(ap, 0); 4488 4489 DPRINTK("EXIT\n"); 4490 return 1; 4491} 4492 4493/** 4494 * ata_std_ports - initialize ioaddr with standard port offsets. 4495 * @ioaddr: IO address structure to be initialized 4496 * 4497 * Utility function which initializes data_addr, error_addr, 4498 * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr, 4499 * device_addr, status_addr, and command_addr to standard offsets 4500 * relative to cmd_addr. 4501 * 4502 * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr. 4503 */ 4504 4505void ata_std_ports(struct ata_ioports *ioaddr) 4506{ 4507 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA; 4508 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR; 4509 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE; 4510 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT; 4511 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL; 4512 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM; 4513 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH; 4514 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE; 4515 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS; 4516 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD; 4517} 4518 4519static struct ata_probe_ent * 4520ata_probe_ent_alloc(struct device *dev, const struct ata_port_info *port) 4521{ 4522 struct ata_probe_ent *probe_ent; 4523 4524 probe_ent = kzalloc(sizeof(*probe_ent), GFP_KERNEL); 4525 if (!probe_ent) { 4526 printk(KERN_ERR DRV_NAME "(%s): out of memory\n", 4527 kobject_name(&(dev->kobj))); 4528 return NULL; 4529 } 4530 4531 INIT_LIST_HEAD(&probe_ent->node); 4532 probe_ent->dev = dev; 4533 4534 probe_ent->sht = port->sht; 4535 probe_ent->host_flags = port->host_flags; 4536 probe_ent->pio_mask = port->pio_mask; 4537 probe_ent->mwdma_mask = port->mwdma_mask; 4538 probe_ent->udma_mask = port->udma_mask; 4539 probe_ent->port_ops = port->port_ops; 4540 4541 return probe_ent; 4542} 4543 4544 4545 4546#ifdef CONFIG_PCI 4547 4548void ata_pci_host_stop (struct ata_host_set *host_set) 4549{ 4550 struct pci_dev *pdev = to_pci_dev(host_set->dev); 4551 4552 pci_iounmap(pdev, host_set->mmio_base); 4553} 4554 4555/** 4556 * ata_pci_init_native_mode - Initialize native-mode driver 4557 * @pdev: pci device to be initialized 4558 * @port: array[2] of pointers to port info structures. 4559 * @ports: bitmap of ports present 4560 * 4561 * Utility function which allocates and initializes an 4562 * ata_probe_ent structure for a standard dual-port 4563 * PIO-based IDE controller. The returned ata_probe_ent 4564 * structure can be passed to ata_device_add(). The returned 4565 * ata_probe_ent structure should then be freed with kfree(). 4566 * 4567 * The caller need only pass the address of the primary port, the 4568 * secondary will be deduced automatically. If the device has non 4569 * standard secondary port mappings this function can be called twice, 4570 * once for each interface. 4571 */ 4572 4573struct ata_probe_ent * 4574ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port, int ports) 4575{ 4576 struct ata_probe_ent *probe_ent = 4577 ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[0]); 4578 int p = 0; 4579 4580 if (!probe_ent) 4581 return NULL; 4582 4583 probe_ent->irq = pdev->irq; 4584 probe_ent->irq_flags = SA_SHIRQ; 4585 probe_ent->private_data = port[0]->private_data; 4586 4587 if (ports & ATA_PORT_PRIMARY) { 4588 probe_ent->port[p].cmd_addr = pci_resource_start(pdev, 0); 4589 probe_ent->port[p].altstatus_addr = 4590 probe_ent->port[p].ctl_addr = 4591 pci_resource_start(pdev, 1) | ATA_PCI_CTL_OFS; 4592 probe_ent->port[p].bmdma_addr = pci_resource_start(pdev, 4); 4593 ata_std_ports(&probe_ent->port[p]); 4594 p++; 4595 } 4596 4597 if (ports & ATA_PORT_SECONDARY) { 4598 probe_ent->port[p].cmd_addr = pci_resource_start(pdev, 2); 4599 probe_ent->port[p].altstatus_addr = 4600 probe_ent->port[p].ctl_addr = 4601 pci_resource_start(pdev, 3) | ATA_PCI_CTL_OFS; 4602 probe_ent->port[p].bmdma_addr = pci_resource_start(pdev, 4) + 8; 4603 ata_std_ports(&probe_ent->port[p]); 4604 p++; 4605 } 4606 4607 probe_ent->n_ports = p; 4608 return probe_ent; 4609} 4610 4611static struct ata_probe_ent *ata_pci_init_legacy_port(struct pci_dev *pdev, struct ata_port_info *port, int port_num) 4612{ 4613 struct ata_probe_ent *probe_ent; 4614 4615 probe_ent = ata_probe_ent_alloc(pci_dev_to_dev(pdev), port); 4616 if (!probe_ent) 4617 return NULL; 4618 4619 probe_ent->legacy_mode = 1; 4620 probe_ent->n_ports = 1; 4621 probe_ent->hard_port_no = port_num; 4622 probe_ent->private_data = port->private_data; 4623 4624 switch(port_num) 4625 { 4626 case 0: 4627 probe_ent->irq = 14; 4628 probe_ent->port[0].cmd_addr = 0x1f0; 4629 probe_ent->port[0].altstatus_addr = 4630 probe_ent->port[0].ctl_addr = 0x3f6; 4631 break; 4632 case 1: 4633 probe_ent->irq = 15; 4634 probe_ent->port[0].cmd_addr = 0x170; 4635 probe_ent->port[0].altstatus_addr = 4636 probe_ent->port[0].ctl_addr = 0x376; 4637 break; 4638 } 4639 probe_ent->port[0].bmdma_addr = pci_resource_start(pdev, 4) + 8 * port_num; 4640 ata_std_ports(&probe_ent->port[0]); 4641 return probe_ent; 4642} 4643 4644/** 4645 * ata_pci_init_one - Initialize/register PCI IDE host controller 4646 * @pdev: Controller to be initialized 4647 * @port_info: Information from low-level host driver 4648 * @n_ports: Number of ports attached to host controller 4649 * 4650 * This is a helper function which can be called from a driver's 4651 * xxx_init_one() probe function if the hardware uses traditional 4652 * IDE taskfile registers. 4653 * 4654 * This function calls pci_enable_device(), reserves its register 4655 * regions, sets the dma mask, enables bus master mode, and calls 4656 * ata_device_add() 4657 * 4658 * LOCKING: 4659 * Inherited from PCI layer (may sleep). 4660 * 4661 * RETURNS: 4662 * Zero on success, negative on errno-based value on error. 4663 */ 4664 4665int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info, 4666 unsigned int n_ports) 4667{ 4668 struct ata_probe_ent *probe_ent = NULL, *probe_ent2 = NULL; 4669 struct ata_port_info *port[2]; 4670 u8 tmp8, mask; 4671 unsigned int legacy_mode = 0; 4672 int disable_dev_on_err = 1; 4673 int rc; 4674 4675 DPRINTK("ENTER\n"); 4676 4677 port[0] = port_info[0]; 4678 if (n_ports > 1) 4679 port[1] = port_info[1]; 4680 else 4681 port[1] = port[0]; 4682 4683 if ((port[0]->host_flags & ATA_FLAG_NO_LEGACY) == 0 4684 && (pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) { 4685 /* TODO: What if one channel is in native mode ... */ 4686 pci_read_config_byte(pdev, PCI_CLASS_PROG, &tmp8); 4687 mask = (1 << 2) | (1 << 0); 4688 if ((tmp8 & mask) != mask) 4689 legacy_mode = (1 << 3); 4690 } 4691 4692 /* FIXME... */ 4693 if ((!legacy_mode) && (n_ports > 2)) { 4694 printk(KERN_ERR "ata: BUG: native mode, n_ports > 2\n"); 4695 n_ports = 2; 4696 /* For now */ 4697 } 4698 4699 /* FIXME: Really for ATA it isn't safe because the device may be 4700 multi-purpose and we want to leave it alone if it was already 4701 enabled. Secondly for shared use as Arjan says we want refcounting 4702 4703 Checking dev->is_enabled is insufficient as this is not set at 4704 boot for the primary video which is BIOS enabled 4705 */ 4706 4707 rc = pci_enable_device(pdev); 4708 if (rc) 4709 return rc; 4710 4711 rc = pci_request_regions(pdev, DRV_NAME); 4712 if (rc) { 4713 disable_dev_on_err = 0; 4714 goto err_out; 4715 } 4716 4717 /* FIXME: Should use platform specific mappers for legacy port ranges */ 4718 if (legacy_mode) { 4719 if (!request_region(0x1f0, 8, "libata")) { 4720 struct resource *conflict, res; 4721 res.start = 0x1f0; 4722 res.end = 0x1f0 + 8 - 1; 4723 conflict = ____request_resource(&ioport_resource, &res); 4724 if (!strcmp(conflict->name, "libata")) 4725 legacy_mode |= (1 << 0); 4726 else { 4727 disable_dev_on_err = 0; 4728 printk(KERN_WARNING "ata: 0x1f0 IDE port busy\n"); 4729 } 4730 } else 4731 legacy_mode |= (1 << 0); 4732 4733 if (!request_region(0x170, 8, "libata")) { 4734 struct resource *conflict, res; 4735 res.start = 0x170; 4736 res.end = 0x170 + 8 - 1; 4737 conflict = ____request_resource(&ioport_resource, &res); 4738 if (!strcmp(conflict->name, "libata")) 4739 legacy_mode |= (1 << 1); 4740 else { 4741 disable_dev_on_err = 0; 4742 printk(KERN_WARNING "ata: 0x170 IDE port busy\n"); 4743 } 4744 } else 4745 legacy_mode |= (1 << 1); 4746 } 4747 4748 /* we have legacy mode, but all ports are unavailable */ 4749 if (legacy_mode == (1 << 3)) { 4750 rc = -EBUSY; 4751 goto err_out_regions; 4752 } 4753 4754 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); 4755 if (rc) 4756 goto err_out_regions; 4757 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK); 4758 if (rc) 4759 goto err_out_regions; 4760 4761 if (legacy_mode) { 4762 if (legacy_mode & (1 << 0)) 4763 probe_ent = ata_pci_init_legacy_port(pdev, port[0], 0); 4764 if (legacy_mode & (1 << 1)) 4765 probe_ent2 = ata_pci_init_legacy_port(pdev, port[1], 1); 4766 } else { 4767 if (n_ports == 2) 4768 probe_ent = ata_pci_init_native_mode(pdev, port, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY); 4769 else 4770 probe_ent = ata_pci_init_native_mode(pdev, port, ATA_PORT_PRIMARY); 4771 } 4772 if (!probe_ent && !probe_ent2) { 4773 rc = -ENOMEM; 4774 goto err_out_regions; 4775 } 4776 4777 pci_set_master(pdev); 4778 4779 /* FIXME: check ata_device_add return */ 4780 if (legacy_mode) { 4781 if (legacy_mode & (1 << 0)) 4782 ata_device_add(probe_ent); 4783 if (legacy_mode & (1 << 1)) 4784 ata_device_add(probe_ent2); 4785 } else 4786 ata_device_add(probe_ent); 4787 4788 kfree(probe_ent); 4789 kfree(probe_ent2); 4790 4791 return 0; 4792 4793err_out_regions: 4794 if (legacy_mode & (1 << 0)) 4795 release_region(0x1f0, 8); 4796 if (legacy_mode & (1 << 1)) 4797 release_region(0x170, 8); 4798 pci_release_regions(pdev); 4799err_out: 4800 if (disable_dev_on_err) 4801 pci_disable_device(pdev); 4802 return rc; 4803} 4804 4805/** 4806 * ata_pci_remove_one - PCI layer callback for device removal 4807 * @pdev: PCI device that was removed 4808 * 4809 * PCI layer indicates to libata via this hook that 4810 * hot-unplug or module unload event has occurred. 4811 * Handle this by unregistering all objects associated 4812 * with this PCI device. Free those objects. Then finally 4813 * release PCI resources and disable device. 4814 * 4815 * LOCKING: 4816 * Inherited from PCI layer (may sleep). 4817 */ 4818 4819void ata_pci_remove_one (struct pci_dev *pdev) 4820{ 4821 struct device *dev = pci_dev_to_dev(pdev); 4822 struct ata_host_set *host_set = dev_get_drvdata(dev); 4823 4824 ata_host_set_remove(host_set); 4825 pci_release_regions(pdev); 4826 pci_disable_device(pdev); 4827 dev_set_drvdata(dev, NULL); 4828} 4829 4830/* move to PCI subsystem */ 4831int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits) 4832{ 4833 unsigned long tmp = 0; 4834 4835 switch (bits->width) { 4836 case 1: { 4837 u8 tmp8 = 0; 4838 pci_read_config_byte(pdev, bits->reg, &tmp8); 4839 tmp = tmp8; 4840 break; 4841 } 4842 case 2: { 4843 u16 tmp16 = 0; 4844 pci_read_config_word(pdev, bits->reg, &tmp16); 4845 tmp = tmp16; 4846 break; 4847 } 4848 case 4: { 4849 u32 tmp32 = 0; 4850 pci_read_config_dword(pdev, bits->reg, &tmp32); 4851 tmp = tmp32; 4852 break; 4853 } 4854 4855 default: 4856 return -EINVAL; 4857 } 4858 4859 tmp &= bits->mask; 4860 4861 return (tmp == bits->val) ? 1 : 0; 4862} 4863#endif /* CONFIG_PCI */ 4864 4865 4866static int __init ata_init(void) 4867{ 4868 ata_wq = create_workqueue("ata"); 4869 if (!ata_wq) 4870 return -ENOMEM; 4871 4872 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n"); 4873 return 0; 4874} 4875 4876static void __exit ata_exit(void) 4877{ 4878 destroy_workqueue(ata_wq); 4879} 4880 4881module_init(ata_init); 4882module_exit(ata_exit); 4883 4884static unsigned long ratelimit_time; 4885static spinlock_t ata_ratelimit_lock = SPIN_LOCK_UNLOCKED; 4886 4887int ata_ratelimit(void) 4888{ 4889 int rc; 4890 unsigned long flags; 4891 4892 spin_lock_irqsave(&ata_ratelimit_lock, flags); 4893 4894 if (time_after(jiffies, ratelimit_time)) { 4895 rc = 1; 4896 ratelimit_time = jiffies + (HZ/5); 4897 } else 4898 rc = 0; 4899 4900 spin_unlock_irqrestore(&ata_ratelimit_lock, flags); 4901 4902 return rc; 4903} 4904 4905/* 4906 * libata is essentially a library of internal helper functions for 4907 * low-level ATA host controller drivers. As such, the API/ABI is 4908 * likely to change as new drivers are added and updated. 4909 * Do not depend on ABI/API stability. 4910 */ 4911 4912EXPORT_SYMBOL_GPL(ata_std_bios_param); 4913EXPORT_SYMBOL_GPL(ata_std_ports); 4914EXPORT_SYMBOL_GPL(ata_device_add); 4915EXPORT_SYMBOL_GPL(ata_host_set_remove); 4916EXPORT_SYMBOL_GPL(ata_sg_init); 4917EXPORT_SYMBOL_GPL(ata_sg_init_one); 4918EXPORT_SYMBOL_GPL(ata_qc_complete); 4919EXPORT_SYMBOL_GPL(ata_qc_issue_prot); 4920EXPORT_SYMBOL_GPL(ata_eng_timeout); 4921EXPORT_SYMBOL_GPL(ata_tf_load); 4922EXPORT_SYMBOL_GPL(ata_tf_read); 4923EXPORT_SYMBOL_GPL(ata_noop_dev_select); 4924EXPORT_SYMBOL_GPL(ata_std_dev_select); 4925EXPORT_SYMBOL_GPL(ata_tf_to_fis); 4926EXPORT_SYMBOL_GPL(ata_tf_from_fis); 4927EXPORT_SYMBOL_GPL(ata_check_status); 4928EXPORT_SYMBOL_GPL(ata_altstatus); 4929EXPORT_SYMBOL_GPL(ata_exec_command); 4930EXPORT_SYMBOL_GPL(ata_port_start); 4931EXPORT_SYMBOL_GPL(ata_port_stop); 4932EXPORT_SYMBOL_GPL(ata_host_stop); 4933EXPORT_SYMBOL_GPL(ata_interrupt); 4934EXPORT_SYMBOL_GPL(ata_qc_prep); 4935EXPORT_SYMBOL_GPL(ata_bmdma_setup); 4936EXPORT_SYMBOL_GPL(ata_bmdma_start); 4937EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear); 4938EXPORT_SYMBOL_GPL(ata_bmdma_status); 4939EXPORT_SYMBOL_GPL(ata_bmdma_stop); 4940EXPORT_SYMBOL_GPL(ata_port_probe); 4941EXPORT_SYMBOL_GPL(sata_phy_reset); 4942EXPORT_SYMBOL_GPL(__sata_phy_reset); 4943EXPORT_SYMBOL_GPL(ata_bus_reset); 4944EXPORT_SYMBOL_GPL(ata_port_disable); 4945EXPORT_SYMBOL_GPL(ata_ratelimit); 4946EXPORT_SYMBOL_GPL(ata_scsi_ioctl); 4947EXPORT_SYMBOL_GPL(ata_scsi_queuecmd); 4948EXPORT_SYMBOL_GPL(ata_scsi_error); 4949EXPORT_SYMBOL_GPL(ata_scsi_slave_config); 4950EXPORT_SYMBOL_GPL(ata_scsi_release); 4951EXPORT_SYMBOL_GPL(ata_host_intr); 4952EXPORT_SYMBOL_GPL(ata_dev_classify); 4953EXPORT_SYMBOL_GPL(ata_dev_id_string); 4954EXPORT_SYMBOL_GPL(ata_dev_config); 4955EXPORT_SYMBOL_GPL(ata_scsi_simulate); 4956 4957EXPORT_SYMBOL_GPL(ata_timing_compute); 4958EXPORT_SYMBOL_GPL(ata_timing_merge); 4959 4960#ifdef CONFIG_PCI 4961EXPORT_SYMBOL_GPL(pci_test_config_bits); 4962EXPORT_SYMBOL_GPL(ata_pci_host_stop); 4963EXPORT_SYMBOL_GPL(ata_pci_init_native_mode); 4964EXPORT_SYMBOL_GPL(ata_pci_init_one); 4965EXPORT_SYMBOL_GPL(ata_pci_remove_one); 4966#endif /* CONFIG_PCI */