Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
at v2.6.15-rc1 4947 lines 122 kB view raw
1/* 2 * libata-core.c - helper library for ATA 3 * 4 * Maintained by: Jeff Garzik <jgarzik@pobox.com> 5 * Please ALWAYS copy linux-ide@vger.kernel.org 6 * on emails. 7 * 8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved. 9 * Copyright 2003-2004 Jeff Garzik 10 * 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of the GNU General Public License as published by 14 * the Free Software Foundation; either version 2, or (at your option) 15 * any later version. 16 * 17 * This program is distributed in the hope that it will be useful, 18 * but WITHOUT ANY WARRANTY; without even the implied warranty of 19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 20 * GNU General Public License for more details. 21 * 22 * You should have received a copy of the GNU General Public License 23 * along with this program; see the file COPYING. If not, write to 24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. 25 * 26 * 27 * libata documentation is available via 'make {ps|pdf}docs', 28 * as Documentation/DocBook/libata.* 29 * 30 * Hardware documentation available from http://www.t13.org/ and 31 * http://www.sata-io.org/ 32 * 33 */ 34 35#include <linux/config.h> 36#include <linux/kernel.h> 37#include <linux/module.h> 38#include <linux/pci.h> 39#include <linux/init.h> 40#include <linux/list.h> 41#include <linux/mm.h> 42#include <linux/highmem.h> 43#include <linux/spinlock.h> 44#include <linux/blkdev.h> 45#include <linux/delay.h> 46#include <linux/timer.h> 47#include <linux/interrupt.h> 48#include <linux/completion.h> 49#include <linux/suspend.h> 50#include <linux/workqueue.h> 51#include <linux/jiffies.h> 52#include <linux/scatterlist.h> 53#include <scsi/scsi.h> 54#include "scsi_priv.h" 55#include <scsi/scsi_cmnd.h> 56#include <scsi/scsi_host.h> 57#include <linux/libata.h> 58#include <asm/io.h> 59#include <asm/semaphore.h> 60#include <asm/byteorder.h> 61 62#include "libata.h" 63 64static unsigned int ata_busy_sleep (struct ata_port *ap, 65 unsigned long tmout_pat, 66 unsigned long tmout); 67static void ata_dev_reread_id(struct ata_port *ap, struct ata_device *dev); 68static void ata_dev_init_params(struct ata_port *ap, struct ata_device *dev); 69static void ata_set_mode(struct ata_port *ap); 70static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev); 71static unsigned int ata_get_mode_mask(const struct ata_port *ap, int shift); 72static int fgb(u32 bitmap); 73static int ata_choose_xfer_mode(const struct ata_port *ap, 74 u8 *xfer_mode_out, 75 unsigned int *xfer_shift_out); 76static void __ata_qc_complete(struct ata_queued_cmd *qc); 77 78static unsigned int ata_unique_id = 1; 79static struct workqueue_struct *ata_wq; 80 81int atapi_enabled = 0; 82module_param(atapi_enabled, int, 0444); 83MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)"); 84 85MODULE_AUTHOR("Jeff Garzik"); 86MODULE_DESCRIPTION("Library module for ATA devices"); 87MODULE_LICENSE("GPL"); 88MODULE_VERSION(DRV_VERSION); 89 90/** 91 * ata_tf_load_pio - send taskfile registers to host controller 92 * @ap: Port to which output is sent 93 * @tf: ATA taskfile register set 94 * 95 * Outputs ATA taskfile to standard ATA host controller. 96 * 97 * LOCKING: 98 * Inherited from caller. 99 */ 100 101static void ata_tf_load_pio(struct ata_port *ap, const struct ata_taskfile *tf) 102{ 103 struct ata_ioports *ioaddr = &ap->ioaddr; 104 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR; 105 106 if (tf->ctl != ap->last_ctl) { 107 outb(tf->ctl, ioaddr->ctl_addr); 108 ap->last_ctl = tf->ctl; 109 ata_wait_idle(ap); 110 } 111 112 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) { 113 outb(tf->hob_feature, ioaddr->feature_addr); 114 outb(tf->hob_nsect, ioaddr->nsect_addr); 115 outb(tf->hob_lbal, ioaddr->lbal_addr); 116 outb(tf->hob_lbam, ioaddr->lbam_addr); 117 outb(tf->hob_lbah, ioaddr->lbah_addr); 118 VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n", 119 tf->hob_feature, 120 tf->hob_nsect, 121 tf->hob_lbal, 122 tf->hob_lbam, 123 tf->hob_lbah); 124 } 125 126 if (is_addr) { 127 outb(tf->feature, ioaddr->feature_addr); 128 outb(tf->nsect, ioaddr->nsect_addr); 129 outb(tf->lbal, ioaddr->lbal_addr); 130 outb(tf->lbam, ioaddr->lbam_addr); 131 outb(tf->lbah, ioaddr->lbah_addr); 132 VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n", 133 tf->feature, 134 tf->nsect, 135 tf->lbal, 136 tf->lbam, 137 tf->lbah); 138 } 139 140 if (tf->flags & ATA_TFLAG_DEVICE) { 141 outb(tf->device, ioaddr->device_addr); 142 VPRINTK("device 0x%X\n", tf->device); 143 } 144 145 ata_wait_idle(ap); 146} 147 148/** 149 * ata_tf_load_mmio - send taskfile registers to host controller 150 * @ap: Port to which output is sent 151 * @tf: ATA taskfile register set 152 * 153 * Outputs ATA taskfile to standard ATA host controller using MMIO. 154 * 155 * LOCKING: 156 * Inherited from caller. 157 */ 158 159static void ata_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf) 160{ 161 struct ata_ioports *ioaddr = &ap->ioaddr; 162 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR; 163 164 if (tf->ctl != ap->last_ctl) { 165 writeb(tf->ctl, (void __iomem *) ap->ioaddr.ctl_addr); 166 ap->last_ctl = tf->ctl; 167 ata_wait_idle(ap); 168 } 169 170 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) { 171 writeb(tf->hob_feature, (void __iomem *) ioaddr->feature_addr); 172 writeb(tf->hob_nsect, (void __iomem *) ioaddr->nsect_addr); 173 writeb(tf->hob_lbal, (void __iomem *) ioaddr->lbal_addr); 174 writeb(tf->hob_lbam, (void __iomem *) ioaddr->lbam_addr); 175 writeb(tf->hob_lbah, (void __iomem *) ioaddr->lbah_addr); 176 VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n", 177 tf->hob_feature, 178 tf->hob_nsect, 179 tf->hob_lbal, 180 tf->hob_lbam, 181 tf->hob_lbah); 182 } 183 184 if (is_addr) { 185 writeb(tf->feature, (void __iomem *) ioaddr->feature_addr); 186 writeb(tf->nsect, (void __iomem *) ioaddr->nsect_addr); 187 writeb(tf->lbal, (void __iomem *) ioaddr->lbal_addr); 188 writeb(tf->lbam, (void __iomem *) ioaddr->lbam_addr); 189 writeb(tf->lbah, (void __iomem *) ioaddr->lbah_addr); 190 VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n", 191 tf->feature, 192 tf->nsect, 193 tf->lbal, 194 tf->lbam, 195 tf->lbah); 196 } 197 198 if (tf->flags & ATA_TFLAG_DEVICE) { 199 writeb(tf->device, (void __iomem *) ioaddr->device_addr); 200 VPRINTK("device 0x%X\n", tf->device); 201 } 202 203 ata_wait_idle(ap); 204} 205 206 207/** 208 * ata_tf_load - send taskfile registers to host controller 209 * @ap: Port to which output is sent 210 * @tf: ATA taskfile register set 211 * 212 * Outputs ATA taskfile to standard ATA host controller using MMIO 213 * or PIO as indicated by the ATA_FLAG_MMIO flag. 214 * Writes the control, feature, nsect, lbal, lbam, and lbah registers. 215 * Optionally (ATA_TFLAG_LBA48) writes hob_feature, hob_nsect, 216 * hob_lbal, hob_lbam, and hob_lbah. 217 * 218 * This function waits for idle (!BUSY and !DRQ) after writing 219 * registers. If the control register has a new value, this 220 * function also waits for idle after writing control and before 221 * writing the remaining registers. 222 * 223 * May be used as the tf_load() entry in ata_port_operations. 224 * 225 * LOCKING: 226 * Inherited from caller. 227 */ 228void ata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf) 229{ 230 if (ap->flags & ATA_FLAG_MMIO) 231 ata_tf_load_mmio(ap, tf); 232 else 233 ata_tf_load_pio(ap, tf); 234} 235 236/** 237 * ata_exec_command_pio - issue ATA command to host controller 238 * @ap: port to which command is being issued 239 * @tf: ATA taskfile register set 240 * 241 * Issues PIO write to ATA command register, with proper 242 * synchronization with interrupt handler / other threads. 243 * 244 * LOCKING: 245 * spin_lock_irqsave(host_set lock) 246 */ 247 248static void ata_exec_command_pio(struct ata_port *ap, const struct ata_taskfile *tf) 249{ 250 DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command); 251 252 outb(tf->command, ap->ioaddr.command_addr); 253 ata_pause(ap); 254} 255 256 257/** 258 * ata_exec_command_mmio - issue ATA command to host controller 259 * @ap: port to which command is being issued 260 * @tf: ATA taskfile register set 261 * 262 * Issues MMIO write to ATA command register, with proper 263 * synchronization with interrupt handler / other threads. 264 * 265 * LOCKING: 266 * spin_lock_irqsave(host_set lock) 267 */ 268 269static void ata_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf) 270{ 271 DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command); 272 273 writeb(tf->command, (void __iomem *) ap->ioaddr.command_addr); 274 ata_pause(ap); 275} 276 277 278/** 279 * ata_exec_command - issue ATA command to host controller 280 * @ap: port to which command is being issued 281 * @tf: ATA taskfile register set 282 * 283 * Issues PIO/MMIO write to ATA command register, with proper 284 * synchronization with interrupt handler / other threads. 285 * 286 * LOCKING: 287 * spin_lock_irqsave(host_set lock) 288 */ 289void ata_exec_command(struct ata_port *ap, const struct ata_taskfile *tf) 290{ 291 if (ap->flags & ATA_FLAG_MMIO) 292 ata_exec_command_mmio(ap, tf); 293 else 294 ata_exec_command_pio(ap, tf); 295} 296 297/** 298 * ata_tf_to_host - issue ATA taskfile to host controller 299 * @ap: port to which command is being issued 300 * @tf: ATA taskfile register set 301 * 302 * Issues ATA taskfile register set to ATA host controller, 303 * with proper synchronization with interrupt handler and 304 * other threads. 305 * 306 * LOCKING: 307 * spin_lock_irqsave(host_set lock) 308 */ 309 310static inline void ata_tf_to_host(struct ata_port *ap, 311 const struct ata_taskfile *tf) 312{ 313 ap->ops->tf_load(ap, tf); 314 ap->ops->exec_command(ap, tf); 315} 316 317/** 318 * ata_tf_read_pio - input device's ATA taskfile shadow registers 319 * @ap: Port from which input is read 320 * @tf: ATA taskfile register set for storing input 321 * 322 * Reads ATA taskfile registers for currently-selected device 323 * into @tf. 324 * 325 * LOCKING: 326 * Inherited from caller. 327 */ 328 329static void ata_tf_read_pio(struct ata_port *ap, struct ata_taskfile *tf) 330{ 331 struct ata_ioports *ioaddr = &ap->ioaddr; 332 333 tf->command = ata_check_status(ap); 334 tf->feature = inb(ioaddr->error_addr); 335 tf->nsect = inb(ioaddr->nsect_addr); 336 tf->lbal = inb(ioaddr->lbal_addr); 337 tf->lbam = inb(ioaddr->lbam_addr); 338 tf->lbah = inb(ioaddr->lbah_addr); 339 tf->device = inb(ioaddr->device_addr); 340 341 if (tf->flags & ATA_TFLAG_LBA48) { 342 outb(tf->ctl | ATA_HOB, ioaddr->ctl_addr); 343 tf->hob_feature = inb(ioaddr->error_addr); 344 tf->hob_nsect = inb(ioaddr->nsect_addr); 345 tf->hob_lbal = inb(ioaddr->lbal_addr); 346 tf->hob_lbam = inb(ioaddr->lbam_addr); 347 tf->hob_lbah = inb(ioaddr->lbah_addr); 348 } 349} 350 351/** 352 * ata_tf_read_mmio - input device's ATA taskfile shadow registers 353 * @ap: Port from which input is read 354 * @tf: ATA taskfile register set for storing input 355 * 356 * Reads ATA taskfile registers for currently-selected device 357 * into @tf via MMIO. 358 * 359 * LOCKING: 360 * Inherited from caller. 361 */ 362 363static void ata_tf_read_mmio(struct ata_port *ap, struct ata_taskfile *tf) 364{ 365 struct ata_ioports *ioaddr = &ap->ioaddr; 366 367 tf->command = ata_check_status(ap); 368 tf->feature = readb((void __iomem *)ioaddr->error_addr); 369 tf->nsect = readb((void __iomem *)ioaddr->nsect_addr); 370 tf->lbal = readb((void __iomem *)ioaddr->lbal_addr); 371 tf->lbam = readb((void __iomem *)ioaddr->lbam_addr); 372 tf->lbah = readb((void __iomem *)ioaddr->lbah_addr); 373 tf->device = readb((void __iomem *)ioaddr->device_addr); 374 375 if (tf->flags & ATA_TFLAG_LBA48) { 376 writeb(tf->ctl | ATA_HOB, (void __iomem *) ap->ioaddr.ctl_addr); 377 tf->hob_feature = readb((void __iomem *)ioaddr->error_addr); 378 tf->hob_nsect = readb((void __iomem *)ioaddr->nsect_addr); 379 tf->hob_lbal = readb((void __iomem *)ioaddr->lbal_addr); 380 tf->hob_lbam = readb((void __iomem *)ioaddr->lbam_addr); 381 tf->hob_lbah = readb((void __iomem *)ioaddr->lbah_addr); 382 } 383} 384 385 386/** 387 * ata_tf_read - input device's ATA taskfile shadow registers 388 * @ap: Port from which input is read 389 * @tf: ATA taskfile register set for storing input 390 * 391 * Reads ATA taskfile registers for currently-selected device 392 * into @tf. 393 * 394 * Reads nsect, lbal, lbam, lbah, and device. If ATA_TFLAG_LBA48 395 * is set, also reads the hob registers. 396 * 397 * May be used as the tf_read() entry in ata_port_operations. 398 * 399 * LOCKING: 400 * Inherited from caller. 401 */ 402void ata_tf_read(struct ata_port *ap, struct ata_taskfile *tf) 403{ 404 if (ap->flags & ATA_FLAG_MMIO) 405 ata_tf_read_mmio(ap, tf); 406 else 407 ata_tf_read_pio(ap, tf); 408} 409 410/** 411 * ata_check_status_pio - Read device status reg & clear interrupt 412 * @ap: port where the device is 413 * 414 * Reads ATA taskfile status register for currently-selected device 415 * and return its value. This also clears pending interrupts 416 * from this device 417 * 418 * LOCKING: 419 * Inherited from caller. 420 */ 421static u8 ata_check_status_pio(struct ata_port *ap) 422{ 423 return inb(ap->ioaddr.status_addr); 424} 425 426/** 427 * ata_check_status_mmio - Read device status reg & clear interrupt 428 * @ap: port where the device is 429 * 430 * Reads ATA taskfile status register for currently-selected device 431 * via MMIO and return its value. This also clears pending interrupts 432 * from this device 433 * 434 * LOCKING: 435 * Inherited from caller. 436 */ 437static u8 ata_check_status_mmio(struct ata_port *ap) 438{ 439 return readb((void __iomem *) ap->ioaddr.status_addr); 440} 441 442 443/** 444 * ata_check_status - Read device status reg & clear interrupt 445 * @ap: port where the device is 446 * 447 * Reads ATA taskfile status register for currently-selected device 448 * and return its value. This also clears pending interrupts 449 * from this device 450 * 451 * May be used as the check_status() entry in ata_port_operations. 452 * 453 * LOCKING: 454 * Inherited from caller. 455 */ 456u8 ata_check_status(struct ata_port *ap) 457{ 458 if (ap->flags & ATA_FLAG_MMIO) 459 return ata_check_status_mmio(ap); 460 return ata_check_status_pio(ap); 461} 462 463 464/** 465 * ata_altstatus - Read device alternate status reg 466 * @ap: port where the device is 467 * 468 * Reads ATA taskfile alternate status register for 469 * currently-selected device and return its value. 470 * 471 * Note: may NOT be used as the check_altstatus() entry in 472 * ata_port_operations. 473 * 474 * LOCKING: 475 * Inherited from caller. 476 */ 477u8 ata_altstatus(struct ata_port *ap) 478{ 479 if (ap->ops->check_altstatus) 480 return ap->ops->check_altstatus(ap); 481 482 if (ap->flags & ATA_FLAG_MMIO) 483 return readb((void __iomem *)ap->ioaddr.altstatus_addr); 484 return inb(ap->ioaddr.altstatus_addr); 485} 486 487 488/** 489 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure 490 * @tf: Taskfile to convert 491 * @fis: Buffer into which data will output 492 * @pmp: Port multiplier port 493 * 494 * Converts a standard ATA taskfile to a Serial ATA 495 * FIS structure (Register - Host to Device). 496 * 497 * LOCKING: 498 * Inherited from caller. 499 */ 500 501void ata_tf_to_fis(const struct ata_taskfile *tf, u8 *fis, u8 pmp) 502{ 503 fis[0] = 0x27; /* Register - Host to Device FIS */ 504 fis[1] = (pmp & 0xf) | (1 << 7); /* Port multiplier number, 505 bit 7 indicates Command FIS */ 506 fis[2] = tf->command; 507 fis[3] = tf->feature; 508 509 fis[4] = tf->lbal; 510 fis[5] = tf->lbam; 511 fis[6] = tf->lbah; 512 fis[7] = tf->device; 513 514 fis[8] = tf->hob_lbal; 515 fis[9] = tf->hob_lbam; 516 fis[10] = tf->hob_lbah; 517 fis[11] = tf->hob_feature; 518 519 fis[12] = tf->nsect; 520 fis[13] = tf->hob_nsect; 521 fis[14] = 0; 522 fis[15] = tf->ctl; 523 524 fis[16] = 0; 525 fis[17] = 0; 526 fis[18] = 0; 527 fis[19] = 0; 528} 529 530/** 531 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile 532 * @fis: Buffer from which data will be input 533 * @tf: Taskfile to output 534 * 535 * Converts a standard ATA taskfile to a Serial ATA 536 * FIS structure (Register - Host to Device). 537 * 538 * LOCKING: 539 * Inherited from caller. 540 */ 541 542void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf) 543{ 544 tf->command = fis[2]; /* status */ 545 tf->feature = fis[3]; /* error */ 546 547 tf->lbal = fis[4]; 548 tf->lbam = fis[5]; 549 tf->lbah = fis[6]; 550 tf->device = fis[7]; 551 552 tf->hob_lbal = fis[8]; 553 tf->hob_lbam = fis[9]; 554 tf->hob_lbah = fis[10]; 555 556 tf->nsect = fis[12]; 557 tf->hob_nsect = fis[13]; 558} 559 560static const u8 ata_rw_cmds[] = { 561 /* pio multi */ 562 ATA_CMD_READ_MULTI, 563 ATA_CMD_WRITE_MULTI, 564 ATA_CMD_READ_MULTI_EXT, 565 ATA_CMD_WRITE_MULTI_EXT, 566 /* pio */ 567 ATA_CMD_PIO_READ, 568 ATA_CMD_PIO_WRITE, 569 ATA_CMD_PIO_READ_EXT, 570 ATA_CMD_PIO_WRITE_EXT, 571 /* dma */ 572 ATA_CMD_READ, 573 ATA_CMD_WRITE, 574 ATA_CMD_READ_EXT, 575 ATA_CMD_WRITE_EXT 576}; 577 578/** 579 * ata_rwcmd_protocol - set taskfile r/w commands and protocol 580 * @qc: command to examine and configure 581 * 582 * Examine the device configuration and tf->flags to calculate 583 * the proper read/write commands and protocol to use. 584 * 585 * LOCKING: 586 * caller. 587 */ 588void ata_rwcmd_protocol(struct ata_queued_cmd *qc) 589{ 590 struct ata_taskfile *tf = &qc->tf; 591 struct ata_device *dev = qc->dev; 592 593 int index, lba48, write; 594 595 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0; 596 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0; 597 598 if (dev->flags & ATA_DFLAG_PIO) { 599 tf->protocol = ATA_PROT_PIO; 600 index = dev->multi_count ? 0 : 4; 601 } else { 602 tf->protocol = ATA_PROT_DMA; 603 index = 8; 604 } 605 606 tf->command = ata_rw_cmds[index + lba48 + write]; 607} 608 609static const char * xfer_mode_str[] = { 610 "UDMA/16", 611 "UDMA/25", 612 "UDMA/33", 613 "UDMA/44", 614 "UDMA/66", 615 "UDMA/100", 616 "UDMA/133", 617 "UDMA7", 618 "MWDMA0", 619 "MWDMA1", 620 "MWDMA2", 621 "PIO0", 622 "PIO1", 623 "PIO2", 624 "PIO3", 625 "PIO4", 626}; 627 628/** 629 * ata_udma_string - convert UDMA bit offset to string 630 * @mask: mask of bits supported; only highest bit counts. 631 * 632 * Determine string which represents the highest speed 633 * (highest bit in @udma_mask). 634 * 635 * LOCKING: 636 * None. 637 * 638 * RETURNS: 639 * Constant C string representing highest speed listed in 640 * @udma_mask, or the constant C string "<n/a>". 641 */ 642 643static const char *ata_mode_string(unsigned int mask) 644{ 645 int i; 646 647 for (i = 7; i >= 0; i--) 648 if (mask & (1 << i)) 649 goto out; 650 for (i = ATA_SHIFT_MWDMA + 2; i >= ATA_SHIFT_MWDMA; i--) 651 if (mask & (1 << i)) 652 goto out; 653 for (i = ATA_SHIFT_PIO + 4; i >= ATA_SHIFT_PIO; i--) 654 if (mask & (1 << i)) 655 goto out; 656 657 return "<n/a>"; 658 659out: 660 return xfer_mode_str[i]; 661} 662 663/** 664 * ata_pio_devchk - PATA device presence detection 665 * @ap: ATA channel to examine 666 * @device: Device to examine (starting at zero) 667 * 668 * This technique was originally described in 669 * Hale Landis's ATADRVR (www.ata-atapi.com), and 670 * later found its way into the ATA/ATAPI spec. 671 * 672 * Write a pattern to the ATA shadow registers, 673 * and if a device is present, it will respond by 674 * correctly storing and echoing back the 675 * ATA shadow register contents. 676 * 677 * LOCKING: 678 * caller. 679 */ 680 681static unsigned int ata_pio_devchk(struct ata_port *ap, 682 unsigned int device) 683{ 684 struct ata_ioports *ioaddr = &ap->ioaddr; 685 u8 nsect, lbal; 686 687 ap->ops->dev_select(ap, device); 688 689 outb(0x55, ioaddr->nsect_addr); 690 outb(0xaa, ioaddr->lbal_addr); 691 692 outb(0xaa, ioaddr->nsect_addr); 693 outb(0x55, ioaddr->lbal_addr); 694 695 outb(0x55, ioaddr->nsect_addr); 696 outb(0xaa, ioaddr->lbal_addr); 697 698 nsect = inb(ioaddr->nsect_addr); 699 lbal = inb(ioaddr->lbal_addr); 700 701 if ((nsect == 0x55) && (lbal == 0xaa)) 702 return 1; /* we found a device */ 703 704 return 0; /* nothing found */ 705} 706 707/** 708 * ata_mmio_devchk - PATA device presence detection 709 * @ap: ATA channel to examine 710 * @device: Device to examine (starting at zero) 711 * 712 * This technique was originally described in 713 * Hale Landis's ATADRVR (www.ata-atapi.com), and 714 * later found its way into the ATA/ATAPI spec. 715 * 716 * Write a pattern to the ATA shadow registers, 717 * and if a device is present, it will respond by 718 * correctly storing and echoing back the 719 * ATA shadow register contents. 720 * 721 * LOCKING: 722 * caller. 723 */ 724 725static unsigned int ata_mmio_devchk(struct ata_port *ap, 726 unsigned int device) 727{ 728 struct ata_ioports *ioaddr = &ap->ioaddr; 729 u8 nsect, lbal; 730 731 ap->ops->dev_select(ap, device); 732 733 writeb(0x55, (void __iomem *) ioaddr->nsect_addr); 734 writeb(0xaa, (void __iomem *) ioaddr->lbal_addr); 735 736 writeb(0xaa, (void __iomem *) ioaddr->nsect_addr); 737 writeb(0x55, (void __iomem *) ioaddr->lbal_addr); 738 739 writeb(0x55, (void __iomem *) ioaddr->nsect_addr); 740 writeb(0xaa, (void __iomem *) ioaddr->lbal_addr); 741 742 nsect = readb((void __iomem *) ioaddr->nsect_addr); 743 lbal = readb((void __iomem *) ioaddr->lbal_addr); 744 745 if ((nsect == 0x55) && (lbal == 0xaa)) 746 return 1; /* we found a device */ 747 748 return 0; /* nothing found */ 749} 750 751/** 752 * ata_devchk - PATA device presence detection 753 * @ap: ATA channel to examine 754 * @device: Device to examine (starting at zero) 755 * 756 * Dispatch ATA device presence detection, depending 757 * on whether we are using PIO or MMIO to talk to the 758 * ATA shadow registers. 759 * 760 * LOCKING: 761 * caller. 762 */ 763 764static unsigned int ata_devchk(struct ata_port *ap, 765 unsigned int device) 766{ 767 if (ap->flags & ATA_FLAG_MMIO) 768 return ata_mmio_devchk(ap, device); 769 return ata_pio_devchk(ap, device); 770} 771 772/** 773 * ata_dev_classify - determine device type based on ATA-spec signature 774 * @tf: ATA taskfile register set for device to be identified 775 * 776 * Determine from taskfile register contents whether a device is 777 * ATA or ATAPI, as per "Signature and persistence" section 778 * of ATA/PI spec (volume 1, sect 5.14). 779 * 780 * LOCKING: 781 * None. 782 * 783 * RETURNS: 784 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, or %ATA_DEV_UNKNOWN 785 * the event of failure. 786 */ 787 788unsigned int ata_dev_classify(const struct ata_taskfile *tf) 789{ 790 /* Apple's open source Darwin code hints that some devices only 791 * put a proper signature into the LBA mid/high registers, 792 * So, we only check those. It's sufficient for uniqueness. 793 */ 794 795 if (((tf->lbam == 0) && (tf->lbah == 0)) || 796 ((tf->lbam == 0x3c) && (tf->lbah == 0xc3))) { 797 DPRINTK("found ATA device by sig\n"); 798 return ATA_DEV_ATA; 799 } 800 801 if (((tf->lbam == 0x14) && (tf->lbah == 0xeb)) || 802 ((tf->lbam == 0x69) && (tf->lbah == 0x96))) { 803 DPRINTK("found ATAPI device by sig\n"); 804 return ATA_DEV_ATAPI; 805 } 806 807 DPRINTK("unknown device\n"); 808 return ATA_DEV_UNKNOWN; 809} 810 811/** 812 * ata_dev_try_classify - Parse returned ATA device signature 813 * @ap: ATA channel to examine 814 * @device: Device to examine (starting at zero) 815 * 816 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs, 817 * an ATA/ATAPI-defined set of values is placed in the ATA 818 * shadow registers, indicating the results of device detection 819 * and diagnostics. 820 * 821 * Select the ATA device, and read the values from the ATA shadow 822 * registers. Then parse according to the Error register value, 823 * and the spec-defined values examined by ata_dev_classify(). 824 * 825 * LOCKING: 826 * caller. 827 */ 828 829static u8 ata_dev_try_classify(struct ata_port *ap, unsigned int device) 830{ 831 struct ata_device *dev = &ap->device[device]; 832 struct ata_taskfile tf; 833 unsigned int class; 834 u8 err; 835 836 ap->ops->dev_select(ap, device); 837 838 memset(&tf, 0, sizeof(tf)); 839 840 ap->ops->tf_read(ap, &tf); 841 err = tf.feature; 842 843 dev->class = ATA_DEV_NONE; 844 845 /* see if device passed diags */ 846 if (err == 1) 847 /* do nothing */ ; 848 else if ((device == 0) && (err == 0x81)) 849 /* do nothing */ ; 850 else 851 return err; 852 853 /* determine if device if ATA or ATAPI */ 854 class = ata_dev_classify(&tf); 855 if (class == ATA_DEV_UNKNOWN) 856 return err; 857 if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0)) 858 return err; 859 860 dev->class = class; 861 862 return err; 863} 864 865/** 866 * ata_dev_id_string - Convert IDENTIFY DEVICE page into string 867 * @id: IDENTIFY DEVICE results we will examine 868 * @s: string into which data is output 869 * @ofs: offset into identify device page 870 * @len: length of string to return. must be an even number. 871 * 872 * The strings in the IDENTIFY DEVICE page are broken up into 873 * 16-bit chunks. Run through the string, and output each 874 * 8-bit chunk linearly, regardless of platform. 875 * 876 * LOCKING: 877 * caller. 878 */ 879 880void ata_dev_id_string(const u16 *id, unsigned char *s, 881 unsigned int ofs, unsigned int len) 882{ 883 unsigned int c; 884 885 while (len > 0) { 886 c = id[ofs] >> 8; 887 *s = c; 888 s++; 889 890 c = id[ofs] & 0xff; 891 *s = c; 892 s++; 893 894 ofs++; 895 len -= 2; 896 } 897} 898 899 900/** 901 * ata_noop_dev_select - Select device 0/1 on ATA bus 902 * @ap: ATA channel to manipulate 903 * @device: ATA device (numbered from zero) to select 904 * 905 * This function performs no actual function. 906 * 907 * May be used as the dev_select() entry in ata_port_operations. 908 * 909 * LOCKING: 910 * caller. 911 */ 912void ata_noop_dev_select (struct ata_port *ap, unsigned int device) 913{ 914} 915 916 917/** 918 * ata_std_dev_select - Select device 0/1 on ATA bus 919 * @ap: ATA channel to manipulate 920 * @device: ATA device (numbered from zero) to select 921 * 922 * Use the method defined in the ATA specification to 923 * make either device 0, or device 1, active on the 924 * ATA channel. Works with both PIO and MMIO. 925 * 926 * May be used as the dev_select() entry in ata_port_operations. 927 * 928 * LOCKING: 929 * caller. 930 */ 931 932void ata_std_dev_select (struct ata_port *ap, unsigned int device) 933{ 934 u8 tmp; 935 936 if (device == 0) 937 tmp = ATA_DEVICE_OBS; 938 else 939 tmp = ATA_DEVICE_OBS | ATA_DEV1; 940 941 if (ap->flags & ATA_FLAG_MMIO) { 942 writeb(tmp, (void __iomem *) ap->ioaddr.device_addr); 943 } else { 944 outb(tmp, ap->ioaddr.device_addr); 945 } 946 ata_pause(ap); /* needed; also flushes, for mmio */ 947} 948 949/** 950 * ata_dev_select - Select device 0/1 on ATA bus 951 * @ap: ATA channel to manipulate 952 * @device: ATA device (numbered from zero) to select 953 * @wait: non-zero to wait for Status register BSY bit to clear 954 * @can_sleep: non-zero if context allows sleeping 955 * 956 * Use the method defined in the ATA specification to 957 * make either device 0, or device 1, active on the 958 * ATA channel. 959 * 960 * This is a high-level version of ata_std_dev_select(), 961 * which additionally provides the services of inserting 962 * the proper pauses and status polling, where needed. 963 * 964 * LOCKING: 965 * caller. 966 */ 967 968void ata_dev_select(struct ata_port *ap, unsigned int device, 969 unsigned int wait, unsigned int can_sleep) 970{ 971 VPRINTK("ENTER, ata%u: device %u, wait %u\n", 972 ap->id, device, wait); 973 974 if (wait) 975 ata_wait_idle(ap); 976 977 ap->ops->dev_select(ap, device); 978 979 if (wait) { 980 if (can_sleep && ap->device[device].class == ATA_DEV_ATAPI) 981 msleep(150); 982 ata_wait_idle(ap); 983 } 984} 985 986/** 987 * ata_dump_id - IDENTIFY DEVICE info debugging output 988 * @dev: Device whose IDENTIFY DEVICE page we will dump 989 * 990 * Dump selected 16-bit words from a detected device's 991 * IDENTIFY PAGE page. 992 * 993 * LOCKING: 994 * caller. 995 */ 996 997static inline void ata_dump_id(const struct ata_device *dev) 998{ 999 DPRINTK("49==0x%04x " 1000 "53==0x%04x " 1001 "63==0x%04x " 1002 "64==0x%04x " 1003 "75==0x%04x \n", 1004 dev->id[49], 1005 dev->id[53], 1006 dev->id[63], 1007 dev->id[64], 1008 dev->id[75]); 1009 DPRINTK("80==0x%04x " 1010 "81==0x%04x " 1011 "82==0x%04x " 1012 "83==0x%04x " 1013 "84==0x%04x \n", 1014 dev->id[80], 1015 dev->id[81], 1016 dev->id[82], 1017 dev->id[83], 1018 dev->id[84]); 1019 DPRINTK("88==0x%04x " 1020 "93==0x%04x\n", 1021 dev->id[88], 1022 dev->id[93]); 1023} 1024 1025/* 1026 * Compute the PIO modes available for this device. This is not as 1027 * trivial as it seems if we must consider early devices correctly. 1028 * 1029 * FIXME: pre IDE drive timing (do we care ?). 1030 */ 1031 1032static unsigned int ata_pio_modes(const struct ata_device *adev) 1033{ 1034 u16 modes; 1035 1036 /* Usual case. Word 53 indicates word 88 is valid */ 1037 if (adev->id[ATA_ID_FIELD_VALID] & (1 << 2)) { 1038 modes = adev->id[ATA_ID_PIO_MODES] & 0x03; 1039 modes <<= 3; 1040 modes |= 0x7; 1041 return modes; 1042 } 1043 1044 /* If word 88 isn't valid then Word 51 holds the PIO timing number 1045 for the maximum. Turn it into a mask and return it */ 1046 modes = (2 << (adev->id[ATA_ID_OLD_PIO_MODES] & 0xFF)) - 1 ; 1047 return modes; 1048} 1049 1050/** 1051 * ata_dev_identify - obtain IDENTIFY x DEVICE page 1052 * @ap: port on which device we wish to probe resides 1053 * @device: device bus address, starting at zero 1054 * 1055 * Following bus reset, we issue the IDENTIFY [PACKET] DEVICE 1056 * command, and read back the 512-byte device information page. 1057 * The device information page is fed to us via the standard 1058 * PIO-IN protocol, but we hand-code it here. (TODO: investigate 1059 * using standard PIO-IN paths) 1060 * 1061 * After reading the device information page, we use several 1062 * bits of information from it to initialize data structures 1063 * that will be used during the lifetime of the ata_device. 1064 * Other data from the info page is used to disqualify certain 1065 * older ATA devices we do not wish to support. 1066 * 1067 * LOCKING: 1068 * Inherited from caller. Some functions called by this function 1069 * obtain the host_set lock. 1070 */ 1071 1072static void ata_dev_identify(struct ata_port *ap, unsigned int device) 1073{ 1074 struct ata_device *dev = &ap->device[device]; 1075 unsigned int major_version; 1076 u16 tmp; 1077 unsigned long xfer_modes; 1078 unsigned int using_edd; 1079 DECLARE_COMPLETION(wait); 1080 struct ata_queued_cmd *qc; 1081 unsigned long flags; 1082 int rc; 1083 1084 if (!ata_dev_present(dev)) { 1085 DPRINTK("ENTER/EXIT (host %u, dev %u) -- nodev\n", 1086 ap->id, device); 1087 return; 1088 } 1089 1090 if (ap->flags & (ATA_FLAG_SRST | ATA_FLAG_SATA_RESET)) 1091 using_edd = 0; 1092 else 1093 using_edd = 1; 1094 1095 DPRINTK("ENTER, host %u, dev %u\n", ap->id, device); 1096 1097 assert (dev->class == ATA_DEV_ATA || dev->class == ATA_DEV_ATAPI || 1098 dev->class == ATA_DEV_NONE); 1099 1100 ata_dev_select(ap, device, 1, 1); /* select device 0/1 */ 1101 1102 qc = ata_qc_new_init(ap, dev); 1103 BUG_ON(qc == NULL); 1104 1105 ata_sg_init_one(qc, dev->id, sizeof(dev->id)); 1106 qc->dma_dir = DMA_FROM_DEVICE; 1107 qc->tf.protocol = ATA_PROT_PIO; 1108 qc->nsect = 1; 1109 1110retry: 1111 if (dev->class == ATA_DEV_ATA) { 1112 qc->tf.command = ATA_CMD_ID_ATA; 1113 DPRINTK("do ATA identify\n"); 1114 } else { 1115 qc->tf.command = ATA_CMD_ID_ATAPI; 1116 DPRINTK("do ATAPI identify\n"); 1117 } 1118 1119 qc->waiting = &wait; 1120 qc->complete_fn = ata_qc_complete_noop; 1121 1122 spin_lock_irqsave(&ap->host_set->lock, flags); 1123 rc = ata_qc_issue(qc); 1124 spin_unlock_irqrestore(&ap->host_set->lock, flags); 1125 1126 if (rc) 1127 goto err_out; 1128 else 1129 wait_for_completion(&wait); 1130 1131 spin_lock_irqsave(&ap->host_set->lock, flags); 1132 ap->ops->tf_read(ap, &qc->tf); 1133 spin_unlock_irqrestore(&ap->host_set->lock, flags); 1134 1135 if (qc->tf.command & ATA_ERR) { 1136 /* 1137 * arg! EDD works for all test cases, but seems to return 1138 * the ATA signature for some ATAPI devices. Until the 1139 * reason for this is found and fixed, we fix up the mess 1140 * here. If IDENTIFY DEVICE returns command aborted 1141 * (as ATAPI devices do), then we issue an 1142 * IDENTIFY PACKET DEVICE. 1143 * 1144 * ATA software reset (SRST, the default) does not appear 1145 * to have this problem. 1146 */ 1147 if ((using_edd) && (dev->class == ATA_DEV_ATA)) { 1148 u8 err = qc->tf.feature; 1149 if (err & ATA_ABORTED) { 1150 dev->class = ATA_DEV_ATAPI; 1151 qc->cursg = 0; 1152 qc->cursg_ofs = 0; 1153 qc->cursect = 0; 1154 qc->nsect = 1; 1155 goto retry; 1156 } 1157 } 1158 goto err_out; 1159 } 1160 1161 swap_buf_le16(dev->id, ATA_ID_WORDS); 1162 1163 /* print device capabilities */ 1164 printk(KERN_DEBUG "ata%u: dev %u cfg " 1165 "49:%04x 82:%04x 83:%04x 84:%04x 85:%04x 86:%04x 87:%04x 88:%04x\n", 1166 ap->id, device, dev->id[49], 1167 dev->id[82], dev->id[83], dev->id[84], 1168 dev->id[85], dev->id[86], dev->id[87], 1169 dev->id[88]); 1170 1171 /* 1172 * common ATA, ATAPI feature tests 1173 */ 1174 1175 /* we require DMA support (bits 8 of word 49) */ 1176 if (!ata_id_has_dma(dev->id)) { 1177 printk(KERN_DEBUG "ata%u: no dma\n", ap->id); 1178 goto err_out_nosup; 1179 } 1180 1181 /* quick-n-dirty find max transfer mode; for printk only */ 1182 xfer_modes = dev->id[ATA_ID_UDMA_MODES]; 1183 if (!xfer_modes) 1184 xfer_modes = (dev->id[ATA_ID_MWDMA_MODES]) << ATA_SHIFT_MWDMA; 1185 if (!xfer_modes) 1186 xfer_modes = ata_pio_modes(dev); 1187 1188 ata_dump_id(dev); 1189 1190 /* ATA-specific feature tests */ 1191 if (dev->class == ATA_DEV_ATA) { 1192 if (!ata_id_is_ata(dev->id)) /* sanity check */ 1193 goto err_out_nosup; 1194 1195 /* get major version */ 1196 tmp = dev->id[ATA_ID_MAJOR_VER]; 1197 for (major_version = 14; major_version >= 1; major_version--) 1198 if (tmp & (1 << major_version)) 1199 break; 1200 1201 /* 1202 * The exact sequence expected by certain pre-ATA4 drives is: 1203 * SRST RESET 1204 * IDENTIFY 1205 * INITIALIZE DEVICE PARAMETERS 1206 * anything else.. 1207 * Some drives were very specific about that exact sequence. 1208 */ 1209 if (major_version < 4 || (!ata_id_has_lba(dev->id))) { 1210 ata_dev_init_params(ap, dev); 1211 1212 /* current CHS translation info (id[53-58]) might be 1213 * changed. reread the identify device info. 1214 */ 1215 ata_dev_reread_id(ap, dev); 1216 } 1217 1218 if (ata_id_has_lba(dev->id)) { 1219 dev->flags |= ATA_DFLAG_LBA; 1220 1221 if (ata_id_has_lba48(dev->id)) { 1222 dev->flags |= ATA_DFLAG_LBA48; 1223 dev->n_sectors = ata_id_u64(dev->id, 100); 1224 } else { 1225 dev->n_sectors = ata_id_u32(dev->id, 60); 1226 } 1227 1228 /* print device info to dmesg */ 1229 printk(KERN_INFO "ata%u: dev %u ATA-%d, max %s, %Lu sectors:%s\n", 1230 ap->id, device, 1231 major_version, 1232 ata_mode_string(xfer_modes), 1233 (unsigned long long)dev->n_sectors, 1234 dev->flags & ATA_DFLAG_LBA48 ? " LBA48" : " LBA"); 1235 } else { 1236 /* CHS */ 1237 1238 /* Default translation */ 1239 dev->cylinders = dev->id[1]; 1240 dev->heads = dev->id[3]; 1241 dev->sectors = dev->id[6]; 1242 dev->n_sectors = dev->cylinders * dev->heads * dev->sectors; 1243 1244 if (ata_id_current_chs_valid(dev->id)) { 1245 /* Current CHS translation is valid. */ 1246 dev->cylinders = dev->id[54]; 1247 dev->heads = dev->id[55]; 1248 dev->sectors = dev->id[56]; 1249 1250 dev->n_sectors = ata_id_u32(dev->id, 57); 1251 } 1252 1253 /* print device info to dmesg */ 1254 printk(KERN_INFO "ata%u: dev %u ATA-%d, max %s, %Lu sectors: CHS %d/%d/%d\n", 1255 ap->id, device, 1256 major_version, 1257 ata_mode_string(xfer_modes), 1258 (unsigned long long)dev->n_sectors, 1259 (int)dev->cylinders, (int)dev->heads, (int)dev->sectors); 1260 1261 } 1262 1263 ap->host->max_cmd_len = 16; 1264 } 1265 1266 /* ATAPI-specific feature tests */ 1267 else { 1268 if (ata_id_is_ata(dev->id)) /* sanity check */ 1269 goto err_out_nosup; 1270 1271 rc = atapi_cdb_len(dev->id); 1272 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) { 1273 printk(KERN_WARNING "ata%u: unsupported CDB len\n", ap->id); 1274 goto err_out_nosup; 1275 } 1276 ap->cdb_len = (unsigned int) rc; 1277 ap->host->max_cmd_len = (unsigned char) ap->cdb_len; 1278 1279 /* print device info to dmesg */ 1280 printk(KERN_INFO "ata%u: dev %u ATAPI, max %s\n", 1281 ap->id, device, 1282 ata_mode_string(xfer_modes)); 1283 } 1284 1285 DPRINTK("EXIT, drv_stat = 0x%x\n", ata_chk_status(ap)); 1286 return; 1287 1288err_out_nosup: 1289 printk(KERN_WARNING "ata%u: dev %u not supported, ignoring\n", 1290 ap->id, device); 1291err_out: 1292 dev->class++; /* converts ATA_DEV_xxx into ATA_DEV_xxx_UNSUP */ 1293 DPRINTK("EXIT, err\n"); 1294} 1295 1296 1297static inline u8 ata_dev_knobble(const struct ata_port *ap) 1298{ 1299 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(ap->device->id))); 1300} 1301 1302/** 1303 * ata_dev_config - Run device specific handlers and check for 1304 * SATA->PATA bridges 1305 * @ap: Bus 1306 * @i: Device 1307 * 1308 * LOCKING: 1309 */ 1310 1311void ata_dev_config(struct ata_port *ap, unsigned int i) 1312{ 1313 /* limit bridge transfers to udma5, 200 sectors */ 1314 if (ata_dev_knobble(ap)) { 1315 printk(KERN_INFO "ata%u(%u): applying bridge limits\n", 1316 ap->id, ap->device->devno); 1317 ap->udma_mask &= ATA_UDMA5; 1318 ap->host->max_sectors = ATA_MAX_SECTORS; 1319 ap->host->hostt->max_sectors = ATA_MAX_SECTORS; 1320 ap->device->flags |= ATA_DFLAG_LOCK_SECTORS; 1321 } 1322 1323 if (ap->ops->dev_config) 1324 ap->ops->dev_config(ap, &ap->device[i]); 1325} 1326 1327/** 1328 * ata_bus_probe - Reset and probe ATA bus 1329 * @ap: Bus to probe 1330 * 1331 * Master ATA bus probing function. Initiates a hardware-dependent 1332 * bus reset, then attempts to identify any devices found on 1333 * the bus. 1334 * 1335 * LOCKING: 1336 * PCI/etc. bus probe sem. 1337 * 1338 * RETURNS: 1339 * Zero on success, non-zero on error. 1340 */ 1341 1342static int ata_bus_probe(struct ata_port *ap) 1343{ 1344 unsigned int i, found = 0; 1345 1346 ap->ops->phy_reset(ap); 1347 if (ap->flags & ATA_FLAG_PORT_DISABLED) 1348 goto err_out; 1349 1350 for (i = 0; i < ATA_MAX_DEVICES; i++) { 1351 ata_dev_identify(ap, i); 1352 if (ata_dev_present(&ap->device[i])) { 1353 found = 1; 1354 ata_dev_config(ap,i); 1355 } 1356 } 1357 1358 if ((!found) || (ap->flags & ATA_FLAG_PORT_DISABLED)) 1359 goto err_out_disable; 1360 1361 ata_set_mode(ap); 1362 if (ap->flags & ATA_FLAG_PORT_DISABLED) 1363 goto err_out_disable; 1364 1365 return 0; 1366 1367err_out_disable: 1368 ap->ops->port_disable(ap); 1369err_out: 1370 return -1; 1371} 1372 1373/** 1374 * ata_port_probe - Mark port as enabled 1375 * @ap: Port for which we indicate enablement 1376 * 1377 * Modify @ap data structure such that the system 1378 * thinks that the entire port is enabled. 1379 * 1380 * LOCKING: host_set lock, or some other form of 1381 * serialization. 1382 */ 1383 1384void ata_port_probe(struct ata_port *ap) 1385{ 1386 ap->flags &= ~ATA_FLAG_PORT_DISABLED; 1387} 1388 1389/** 1390 * __sata_phy_reset - Wake/reset a low-level SATA PHY 1391 * @ap: SATA port associated with target SATA PHY. 1392 * 1393 * This function issues commands to standard SATA Sxxx 1394 * PHY registers, to wake up the phy (and device), and 1395 * clear any reset condition. 1396 * 1397 * LOCKING: 1398 * PCI/etc. bus probe sem. 1399 * 1400 */ 1401void __sata_phy_reset(struct ata_port *ap) 1402{ 1403 u32 sstatus; 1404 unsigned long timeout = jiffies + (HZ * 5); 1405 1406 if (ap->flags & ATA_FLAG_SATA_RESET) { 1407 /* issue phy wake/reset */ 1408 scr_write_flush(ap, SCR_CONTROL, 0x301); 1409 /* Couldn't find anything in SATA I/II specs, but 1410 * AHCI-1.1 10.4.2 says at least 1 ms. */ 1411 mdelay(1); 1412 } 1413 scr_write_flush(ap, SCR_CONTROL, 0x300); /* phy wake/clear reset */ 1414 1415 /* wait for phy to become ready, if necessary */ 1416 do { 1417 msleep(200); 1418 sstatus = scr_read(ap, SCR_STATUS); 1419 if ((sstatus & 0xf) != 1) 1420 break; 1421 } while (time_before(jiffies, timeout)); 1422 1423 /* TODO: phy layer with polling, timeouts, etc. */ 1424 if (sata_dev_present(ap)) 1425 ata_port_probe(ap); 1426 else { 1427 sstatus = scr_read(ap, SCR_STATUS); 1428 printk(KERN_INFO "ata%u: no device found (phy stat %08x)\n", 1429 ap->id, sstatus); 1430 ata_port_disable(ap); 1431 } 1432 1433 if (ap->flags & ATA_FLAG_PORT_DISABLED) 1434 return; 1435 1436 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) { 1437 ata_port_disable(ap); 1438 return; 1439 } 1440 1441 ap->cbl = ATA_CBL_SATA; 1442} 1443 1444/** 1445 * sata_phy_reset - Reset SATA bus. 1446 * @ap: SATA port associated with target SATA PHY. 1447 * 1448 * This function resets the SATA bus, and then probes 1449 * the bus for devices. 1450 * 1451 * LOCKING: 1452 * PCI/etc. bus probe sem. 1453 * 1454 */ 1455void sata_phy_reset(struct ata_port *ap) 1456{ 1457 __sata_phy_reset(ap); 1458 if (ap->flags & ATA_FLAG_PORT_DISABLED) 1459 return; 1460 ata_bus_reset(ap); 1461} 1462 1463/** 1464 * ata_port_disable - Disable port. 1465 * @ap: Port to be disabled. 1466 * 1467 * Modify @ap data structure such that the system 1468 * thinks that the entire port is disabled, and should 1469 * never attempt to probe or communicate with devices 1470 * on this port. 1471 * 1472 * LOCKING: host_set lock, or some other form of 1473 * serialization. 1474 */ 1475 1476void ata_port_disable(struct ata_port *ap) 1477{ 1478 ap->device[0].class = ATA_DEV_NONE; 1479 ap->device[1].class = ATA_DEV_NONE; 1480 ap->flags |= ATA_FLAG_PORT_DISABLED; 1481} 1482 1483/* 1484 * This mode timing computation functionality is ported over from 1485 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik 1486 */ 1487/* 1488 * PIO 0-5, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds). 1489 * These were taken from ATA/ATAPI-6 standard, rev 0a, except 1490 * for PIO 5, which is a nonstandard extension and UDMA6, which 1491 * is currently supported only by Maxtor drives. 1492 */ 1493 1494static const struct ata_timing ata_timing[] = { 1495 1496 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 }, 1497 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 }, 1498 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 }, 1499 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 }, 1500 1501 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 }, 1502 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 }, 1503 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 }, 1504 1505/* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */ 1506 1507 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 }, 1508 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 }, 1509 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 }, 1510 1511 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 }, 1512 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 }, 1513 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 }, 1514 1515/* { XFER_PIO_5, 20, 50, 30, 100, 50, 30, 100, 0 }, */ 1516 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 }, 1517 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 }, 1518 1519 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 }, 1520 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 }, 1521 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 }, 1522 1523/* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */ 1524 1525 { 0xFF } 1526}; 1527 1528#define ENOUGH(v,unit) (((v)-1)/(unit)+1) 1529#define EZ(v,unit) ((v)?ENOUGH(v,unit):0) 1530 1531static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT) 1532{ 1533 q->setup = EZ(t->setup * 1000, T); 1534 q->act8b = EZ(t->act8b * 1000, T); 1535 q->rec8b = EZ(t->rec8b * 1000, T); 1536 q->cyc8b = EZ(t->cyc8b * 1000, T); 1537 q->active = EZ(t->active * 1000, T); 1538 q->recover = EZ(t->recover * 1000, T); 1539 q->cycle = EZ(t->cycle * 1000, T); 1540 q->udma = EZ(t->udma * 1000, UT); 1541} 1542 1543void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b, 1544 struct ata_timing *m, unsigned int what) 1545{ 1546 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup); 1547 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b); 1548 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b); 1549 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b); 1550 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active); 1551 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover); 1552 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle); 1553 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma); 1554} 1555 1556static const struct ata_timing* ata_timing_find_mode(unsigned short speed) 1557{ 1558 const struct ata_timing *t; 1559 1560 for (t = ata_timing; t->mode != speed; t++) 1561 if (t->mode == 0xFF) 1562 return NULL; 1563 return t; 1564} 1565 1566int ata_timing_compute(struct ata_device *adev, unsigned short speed, 1567 struct ata_timing *t, int T, int UT) 1568{ 1569 const struct ata_timing *s; 1570 struct ata_timing p; 1571 1572 /* 1573 * Find the mode. 1574 */ 1575 1576 if (!(s = ata_timing_find_mode(speed))) 1577 return -EINVAL; 1578 1579 /* 1580 * If the drive is an EIDE drive, it can tell us it needs extended 1581 * PIO/MW_DMA cycle timing. 1582 */ 1583 1584 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */ 1585 memset(&p, 0, sizeof(p)); 1586 if(speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) { 1587 if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO]; 1588 else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY]; 1589 } else if(speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) { 1590 p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN]; 1591 } 1592 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B); 1593 } 1594 1595 /* 1596 * Convert the timing to bus clock counts. 1597 */ 1598 1599 ata_timing_quantize(s, t, T, UT); 1600 1601 /* 1602 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY, S.M.A.R.T 1603 * and some other commands. We have to ensure that the DMA cycle timing is 1604 * slower/equal than the fastest PIO timing. 1605 */ 1606 1607 if (speed > XFER_PIO_4) { 1608 ata_timing_compute(adev, adev->pio_mode, &p, T, UT); 1609 ata_timing_merge(&p, t, t, ATA_TIMING_ALL); 1610 } 1611 1612 /* 1613 * Lenghten active & recovery time so that cycle time is correct. 1614 */ 1615 1616 if (t->act8b + t->rec8b < t->cyc8b) { 1617 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2; 1618 t->rec8b = t->cyc8b - t->act8b; 1619 } 1620 1621 if (t->active + t->recover < t->cycle) { 1622 t->active += (t->cycle - (t->active + t->recover)) / 2; 1623 t->recover = t->cycle - t->active; 1624 } 1625 1626 return 0; 1627} 1628 1629static const struct { 1630 unsigned int shift; 1631 u8 base; 1632} xfer_mode_classes[] = { 1633 { ATA_SHIFT_UDMA, XFER_UDMA_0 }, 1634 { ATA_SHIFT_MWDMA, XFER_MW_DMA_0 }, 1635 { ATA_SHIFT_PIO, XFER_PIO_0 }, 1636}; 1637 1638static inline u8 base_from_shift(unsigned int shift) 1639{ 1640 int i; 1641 1642 for (i = 0; i < ARRAY_SIZE(xfer_mode_classes); i++) 1643 if (xfer_mode_classes[i].shift == shift) 1644 return xfer_mode_classes[i].base; 1645 1646 return 0xff; 1647} 1648 1649static void ata_dev_set_mode(struct ata_port *ap, struct ata_device *dev) 1650{ 1651 int ofs, idx; 1652 u8 base; 1653 1654 if (!ata_dev_present(dev) || (ap->flags & ATA_FLAG_PORT_DISABLED)) 1655 return; 1656 1657 if (dev->xfer_shift == ATA_SHIFT_PIO) 1658 dev->flags |= ATA_DFLAG_PIO; 1659 1660 ata_dev_set_xfermode(ap, dev); 1661 1662 base = base_from_shift(dev->xfer_shift); 1663 ofs = dev->xfer_mode - base; 1664 idx = ofs + dev->xfer_shift; 1665 WARN_ON(idx >= ARRAY_SIZE(xfer_mode_str)); 1666 1667 DPRINTK("idx=%d xfer_shift=%u, xfer_mode=0x%x, base=0x%x, offset=%d\n", 1668 idx, dev->xfer_shift, (int)dev->xfer_mode, (int)base, ofs); 1669 1670 printk(KERN_INFO "ata%u: dev %u configured for %s\n", 1671 ap->id, dev->devno, xfer_mode_str[idx]); 1672} 1673 1674static int ata_host_set_pio(struct ata_port *ap) 1675{ 1676 unsigned int mask; 1677 int x, i; 1678 u8 base, xfer_mode; 1679 1680 mask = ata_get_mode_mask(ap, ATA_SHIFT_PIO); 1681 x = fgb(mask); 1682 if (x < 0) { 1683 printk(KERN_WARNING "ata%u: no PIO support\n", ap->id); 1684 return -1; 1685 } 1686 1687 base = base_from_shift(ATA_SHIFT_PIO); 1688 xfer_mode = base + x; 1689 1690 DPRINTK("base 0x%x xfer_mode 0x%x mask 0x%x x %d\n", 1691 (int)base, (int)xfer_mode, mask, x); 1692 1693 for (i = 0; i < ATA_MAX_DEVICES; i++) { 1694 struct ata_device *dev = &ap->device[i]; 1695 if (ata_dev_present(dev)) { 1696 dev->pio_mode = xfer_mode; 1697 dev->xfer_mode = xfer_mode; 1698 dev->xfer_shift = ATA_SHIFT_PIO; 1699 if (ap->ops->set_piomode) 1700 ap->ops->set_piomode(ap, dev); 1701 } 1702 } 1703 1704 return 0; 1705} 1706 1707static void ata_host_set_dma(struct ata_port *ap, u8 xfer_mode, 1708 unsigned int xfer_shift) 1709{ 1710 int i; 1711 1712 for (i = 0; i < ATA_MAX_DEVICES; i++) { 1713 struct ata_device *dev = &ap->device[i]; 1714 if (ata_dev_present(dev)) { 1715 dev->dma_mode = xfer_mode; 1716 dev->xfer_mode = xfer_mode; 1717 dev->xfer_shift = xfer_shift; 1718 if (ap->ops->set_dmamode) 1719 ap->ops->set_dmamode(ap, dev); 1720 } 1721 } 1722} 1723 1724/** 1725 * ata_set_mode - Program timings and issue SET FEATURES - XFER 1726 * @ap: port on which timings will be programmed 1727 * 1728 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). 1729 * 1730 * LOCKING: 1731 * PCI/etc. bus probe sem. 1732 * 1733 */ 1734static void ata_set_mode(struct ata_port *ap) 1735{ 1736 unsigned int xfer_shift; 1737 u8 xfer_mode; 1738 int rc; 1739 1740 /* step 1: always set host PIO timings */ 1741 rc = ata_host_set_pio(ap); 1742 if (rc) 1743 goto err_out; 1744 1745 /* step 2: choose the best data xfer mode */ 1746 xfer_mode = xfer_shift = 0; 1747 rc = ata_choose_xfer_mode(ap, &xfer_mode, &xfer_shift); 1748 if (rc) 1749 goto err_out; 1750 1751 /* step 3: if that xfer mode isn't PIO, set host DMA timings */ 1752 if (xfer_shift != ATA_SHIFT_PIO) 1753 ata_host_set_dma(ap, xfer_mode, xfer_shift); 1754 1755 /* step 4: update devices' xfer mode */ 1756 ata_dev_set_mode(ap, &ap->device[0]); 1757 ata_dev_set_mode(ap, &ap->device[1]); 1758 1759 if (ap->flags & ATA_FLAG_PORT_DISABLED) 1760 return; 1761 1762 if (ap->ops->post_set_mode) 1763 ap->ops->post_set_mode(ap); 1764 1765 return; 1766 1767err_out: 1768 ata_port_disable(ap); 1769} 1770 1771/** 1772 * ata_busy_sleep - sleep until BSY clears, or timeout 1773 * @ap: port containing status register to be polled 1774 * @tmout_pat: impatience timeout 1775 * @tmout: overall timeout 1776 * 1777 * Sleep until ATA Status register bit BSY clears, 1778 * or a timeout occurs. 1779 * 1780 * LOCKING: None. 1781 * 1782 */ 1783 1784static unsigned int ata_busy_sleep (struct ata_port *ap, 1785 unsigned long tmout_pat, 1786 unsigned long tmout) 1787{ 1788 unsigned long timer_start, timeout; 1789 u8 status; 1790 1791 status = ata_busy_wait(ap, ATA_BUSY, 300); 1792 timer_start = jiffies; 1793 timeout = timer_start + tmout_pat; 1794 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) { 1795 msleep(50); 1796 status = ata_busy_wait(ap, ATA_BUSY, 3); 1797 } 1798 1799 if (status & ATA_BUSY) 1800 printk(KERN_WARNING "ata%u is slow to respond, " 1801 "please be patient\n", ap->id); 1802 1803 timeout = timer_start + tmout; 1804 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) { 1805 msleep(50); 1806 status = ata_chk_status(ap); 1807 } 1808 1809 if (status & ATA_BUSY) { 1810 printk(KERN_ERR "ata%u failed to respond (%lu secs)\n", 1811 ap->id, tmout / HZ); 1812 return 1; 1813 } 1814 1815 return 0; 1816} 1817 1818static void ata_bus_post_reset(struct ata_port *ap, unsigned int devmask) 1819{ 1820 struct ata_ioports *ioaddr = &ap->ioaddr; 1821 unsigned int dev0 = devmask & (1 << 0); 1822 unsigned int dev1 = devmask & (1 << 1); 1823 unsigned long timeout; 1824 1825 /* if device 0 was found in ata_devchk, wait for its 1826 * BSY bit to clear 1827 */ 1828 if (dev0) 1829 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT); 1830 1831 /* if device 1 was found in ata_devchk, wait for 1832 * register access, then wait for BSY to clear 1833 */ 1834 timeout = jiffies + ATA_TMOUT_BOOT; 1835 while (dev1) { 1836 u8 nsect, lbal; 1837 1838 ap->ops->dev_select(ap, 1); 1839 if (ap->flags & ATA_FLAG_MMIO) { 1840 nsect = readb((void __iomem *) ioaddr->nsect_addr); 1841 lbal = readb((void __iomem *) ioaddr->lbal_addr); 1842 } else { 1843 nsect = inb(ioaddr->nsect_addr); 1844 lbal = inb(ioaddr->lbal_addr); 1845 } 1846 if ((nsect == 1) && (lbal == 1)) 1847 break; 1848 if (time_after(jiffies, timeout)) { 1849 dev1 = 0; 1850 break; 1851 } 1852 msleep(50); /* give drive a breather */ 1853 } 1854 if (dev1) 1855 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT); 1856 1857 /* is all this really necessary? */ 1858 ap->ops->dev_select(ap, 0); 1859 if (dev1) 1860 ap->ops->dev_select(ap, 1); 1861 if (dev0) 1862 ap->ops->dev_select(ap, 0); 1863} 1864 1865/** 1866 * ata_bus_edd - Issue EXECUTE DEVICE DIAGNOSTIC command. 1867 * @ap: Port to reset and probe 1868 * 1869 * Use the EXECUTE DEVICE DIAGNOSTIC command to reset and 1870 * probe the bus. Not often used these days. 1871 * 1872 * LOCKING: 1873 * PCI/etc. bus probe sem. 1874 * Obtains host_set lock. 1875 * 1876 */ 1877 1878static unsigned int ata_bus_edd(struct ata_port *ap) 1879{ 1880 struct ata_taskfile tf; 1881 unsigned long flags; 1882 1883 /* set up execute-device-diag (bus reset) taskfile */ 1884 /* also, take interrupts to a known state (disabled) */ 1885 DPRINTK("execute-device-diag\n"); 1886 ata_tf_init(ap, &tf, 0); 1887 tf.ctl |= ATA_NIEN; 1888 tf.command = ATA_CMD_EDD; 1889 tf.protocol = ATA_PROT_NODATA; 1890 1891 /* do bus reset */ 1892 spin_lock_irqsave(&ap->host_set->lock, flags); 1893 ata_tf_to_host(ap, &tf); 1894 spin_unlock_irqrestore(&ap->host_set->lock, flags); 1895 1896 /* spec says at least 2ms. but who knows with those 1897 * crazy ATAPI devices... 1898 */ 1899 msleep(150); 1900 1901 return ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT); 1902} 1903 1904static unsigned int ata_bus_softreset(struct ata_port *ap, 1905 unsigned int devmask) 1906{ 1907 struct ata_ioports *ioaddr = &ap->ioaddr; 1908 1909 DPRINTK("ata%u: bus reset via SRST\n", ap->id); 1910 1911 /* software reset. causes dev0 to be selected */ 1912 if (ap->flags & ATA_FLAG_MMIO) { 1913 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr); 1914 udelay(20); /* FIXME: flush */ 1915 writeb(ap->ctl | ATA_SRST, (void __iomem *) ioaddr->ctl_addr); 1916 udelay(20); /* FIXME: flush */ 1917 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr); 1918 } else { 1919 outb(ap->ctl, ioaddr->ctl_addr); 1920 udelay(10); 1921 outb(ap->ctl | ATA_SRST, ioaddr->ctl_addr); 1922 udelay(10); 1923 outb(ap->ctl, ioaddr->ctl_addr); 1924 } 1925 1926 /* spec mandates ">= 2ms" before checking status. 1927 * We wait 150ms, because that was the magic delay used for 1928 * ATAPI devices in Hale Landis's ATADRVR, for the period of time 1929 * between when the ATA command register is written, and then 1930 * status is checked. Because waiting for "a while" before 1931 * checking status is fine, post SRST, we perform this magic 1932 * delay here as well. 1933 */ 1934 msleep(150); 1935 1936 ata_bus_post_reset(ap, devmask); 1937 1938 return 0; 1939} 1940 1941/** 1942 * ata_bus_reset - reset host port and associated ATA channel 1943 * @ap: port to reset 1944 * 1945 * This is typically the first time we actually start issuing 1946 * commands to the ATA channel. We wait for BSY to clear, then 1947 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its 1948 * result. Determine what devices, if any, are on the channel 1949 * by looking at the device 0/1 error register. Look at the signature 1950 * stored in each device's taskfile registers, to determine if 1951 * the device is ATA or ATAPI. 1952 * 1953 * LOCKING: 1954 * PCI/etc. bus probe sem. 1955 * Obtains host_set lock. 1956 * 1957 * SIDE EFFECTS: 1958 * Sets ATA_FLAG_PORT_DISABLED if bus reset fails. 1959 */ 1960 1961void ata_bus_reset(struct ata_port *ap) 1962{ 1963 struct ata_ioports *ioaddr = &ap->ioaddr; 1964 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS; 1965 u8 err; 1966 unsigned int dev0, dev1 = 0, rc = 0, devmask = 0; 1967 1968 DPRINTK("ENTER, host %u, port %u\n", ap->id, ap->port_no); 1969 1970 /* determine if device 0/1 are present */ 1971 if (ap->flags & ATA_FLAG_SATA_RESET) 1972 dev0 = 1; 1973 else { 1974 dev0 = ata_devchk(ap, 0); 1975 if (slave_possible) 1976 dev1 = ata_devchk(ap, 1); 1977 } 1978 1979 if (dev0) 1980 devmask |= (1 << 0); 1981 if (dev1) 1982 devmask |= (1 << 1); 1983 1984 /* select device 0 again */ 1985 ap->ops->dev_select(ap, 0); 1986 1987 /* issue bus reset */ 1988 if (ap->flags & ATA_FLAG_SRST) 1989 rc = ata_bus_softreset(ap, devmask); 1990 else if ((ap->flags & ATA_FLAG_SATA_RESET) == 0) { 1991 /* set up device control */ 1992 if (ap->flags & ATA_FLAG_MMIO) 1993 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr); 1994 else 1995 outb(ap->ctl, ioaddr->ctl_addr); 1996 rc = ata_bus_edd(ap); 1997 } 1998 1999 if (rc) 2000 goto err_out; 2001 2002 /* 2003 * determine by signature whether we have ATA or ATAPI devices 2004 */ 2005 err = ata_dev_try_classify(ap, 0); 2006 if ((slave_possible) && (err != 0x81)) 2007 ata_dev_try_classify(ap, 1); 2008 2009 /* re-enable interrupts */ 2010 if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */ 2011 ata_irq_on(ap); 2012 2013 /* is double-select really necessary? */ 2014 if (ap->device[1].class != ATA_DEV_NONE) 2015 ap->ops->dev_select(ap, 1); 2016 if (ap->device[0].class != ATA_DEV_NONE) 2017 ap->ops->dev_select(ap, 0); 2018 2019 /* if no devices were detected, disable this port */ 2020 if ((ap->device[0].class == ATA_DEV_NONE) && 2021 (ap->device[1].class == ATA_DEV_NONE)) 2022 goto err_out; 2023 2024 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) { 2025 /* set up device control for ATA_FLAG_SATA_RESET */ 2026 if (ap->flags & ATA_FLAG_MMIO) 2027 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr); 2028 else 2029 outb(ap->ctl, ioaddr->ctl_addr); 2030 } 2031 2032 DPRINTK("EXIT\n"); 2033 return; 2034 2035err_out: 2036 printk(KERN_ERR "ata%u: disabling port\n", ap->id); 2037 ap->ops->port_disable(ap); 2038 2039 DPRINTK("EXIT\n"); 2040} 2041 2042static void ata_pr_blacklisted(const struct ata_port *ap, 2043 const struct ata_device *dev) 2044{ 2045 printk(KERN_WARNING "ata%u: dev %u is on DMA blacklist, disabling DMA\n", 2046 ap->id, dev->devno); 2047} 2048 2049static const char * ata_dma_blacklist [] = { 2050 "WDC AC11000H", 2051 "WDC AC22100H", 2052 "WDC AC32500H", 2053 "WDC AC33100H", 2054 "WDC AC31600H", 2055 "WDC AC32100H", 2056 "WDC AC23200L", 2057 "Compaq CRD-8241B", 2058 "CRD-8400B", 2059 "CRD-8480B", 2060 "CRD-8482B", 2061 "CRD-84", 2062 "SanDisk SDP3B", 2063 "SanDisk SDP3B-64", 2064 "SANYO CD-ROM CRD", 2065 "HITACHI CDR-8", 2066 "HITACHI CDR-8335", 2067 "HITACHI CDR-8435", 2068 "Toshiba CD-ROM XM-6202B", 2069 "TOSHIBA CD-ROM XM-1702BC", 2070 "CD-532E-A", 2071 "E-IDE CD-ROM CR-840", 2072 "CD-ROM Drive/F5A", 2073 "WPI CDD-820", 2074 "SAMSUNG CD-ROM SC-148C", 2075 "SAMSUNG CD-ROM SC", 2076 "SanDisk SDP3B-64", 2077 "ATAPI CD-ROM DRIVE 40X MAXIMUM", 2078 "_NEC DV5800A", 2079}; 2080 2081static int ata_dma_blacklisted(const struct ata_device *dev) 2082{ 2083 unsigned char model_num[40]; 2084 char *s; 2085 unsigned int len; 2086 int i; 2087 2088 ata_dev_id_string(dev->id, model_num, ATA_ID_PROD_OFS, 2089 sizeof(model_num)); 2090 s = &model_num[0]; 2091 len = strnlen(s, sizeof(model_num)); 2092 2093 /* ATAPI specifies that empty space is blank-filled; remove blanks */ 2094 while ((len > 0) && (s[len - 1] == ' ')) { 2095 len--; 2096 s[len] = 0; 2097 } 2098 2099 for (i = 0; i < ARRAY_SIZE(ata_dma_blacklist); i++) 2100 if (!strncmp(ata_dma_blacklist[i], s, len)) 2101 return 1; 2102 2103 return 0; 2104} 2105 2106static unsigned int ata_get_mode_mask(const struct ata_port *ap, int shift) 2107{ 2108 const struct ata_device *master, *slave; 2109 unsigned int mask; 2110 2111 master = &ap->device[0]; 2112 slave = &ap->device[1]; 2113 2114 assert (ata_dev_present(master) || ata_dev_present(slave)); 2115 2116 if (shift == ATA_SHIFT_UDMA) { 2117 mask = ap->udma_mask; 2118 if (ata_dev_present(master)) { 2119 mask &= (master->id[ATA_ID_UDMA_MODES] & 0xff); 2120 if (ata_dma_blacklisted(master)) { 2121 mask = 0; 2122 ata_pr_blacklisted(ap, master); 2123 } 2124 } 2125 if (ata_dev_present(slave)) { 2126 mask &= (slave->id[ATA_ID_UDMA_MODES] & 0xff); 2127 if (ata_dma_blacklisted(slave)) { 2128 mask = 0; 2129 ata_pr_blacklisted(ap, slave); 2130 } 2131 } 2132 } 2133 else if (shift == ATA_SHIFT_MWDMA) { 2134 mask = ap->mwdma_mask; 2135 if (ata_dev_present(master)) { 2136 mask &= (master->id[ATA_ID_MWDMA_MODES] & 0x07); 2137 if (ata_dma_blacklisted(master)) { 2138 mask = 0; 2139 ata_pr_blacklisted(ap, master); 2140 } 2141 } 2142 if (ata_dev_present(slave)) { 2143 mask &= (slave->id[ATA_ID_MWDMA_MODES] & 0x07); 2144 if (ata_dma_blacklisted(slave)) { 2145 mask = 0; 2146 ata_pr_blacklisted(ap, slave); 2147 } 2148 } 2149 } 2150 else if (shift == ATA_SHIFT_PIO) { 2151 mask = ap->pio_mask; 2152 if (ata_dev_present(master)) { 2153 /* spec doesn't return explicit support for 2154 * PIO0-2, so we fake it 2155 */ 2156 u16 tmp_mode = master->id[ATA_ID_PIO_MODES] & 0x03; 2157 tmp_mode <<= 3; 2158 tmp_mode |= 0x7; 2159 mask &= tmp_mode; 2160 } 2161 if (ata_dev_present(slave)) { 2162 /* spec doesn't return explicit support for 2163 * PIO0-2, so we fake it 2164 */ 2165 u16 tmp_mode = slave->id[ATA_ID_PIO_MODES] & 0x03; 2166 tmp_mode <<= 3; 2167 tmp_mode |= 0x7; 2168 mask &= tmp_mode; 2169 } 2170 } 2171 else { 2172 mask = 0xffffffff; /* shut up compiler warning */ 2173 BUG(); 2174 } 2175 2176 return mask; 2177} 2178 2179/* find greatest bit */ 2180static int fgb(u32 bitmap) 2181{ 2182 unsigned int i; 2183 int x = -1; 2184 2185 for (i = 0; i < 32; i++) 2186 if (bitmap & (1 << i)) 2187 x = i; 2188 2189 return x; 2190} 2191 2192/** 2193 * ata_choose_xfer_mode - attempt to find best transfer mode 2194 * @ap: Port for which an xfer mode will be selected 2195 * @xfer_mode_out: (output) SET FEATURES - XFER MODE code 2196 * @xfer_shift_out: (output) bit shift that selects this mode 2197 * 2198 * Based on host and device capabilities, determine the 2199 * maximum transfer mode that is amenable to all. 2200 * 2201 * LOCKING: 2202 * PCI/etc. bus probe sem. 2203 * 2204 * RETURNS: 2205 * Zero on success, negative on error. 2206 */ 2207 2208static int ata_choose_xfer_mode(const struct ata_port *ap, 2209 u8 *xfer_mode_out, 2210 unsigned int *xfer_shift_out) 2211{ 2212 unsigned int mask, shift; 2213 int x, i; 2214 2215 for (i = 0; i < ARRAY_SIZE(xfer_mode_classes); i++) { 2216 shift = xfer_mode_classes[i].shift; 2217 mask = ata_get_mode_mask(ap, shift); 2218 2219 x = fgb(mask); 2220 if (x >= 0) { 2221 *xfer_mode_out = xfer_mode_classes[i].base + x; 2222 *xfer_shift_out = shift; 2223 return 0; 2224 } 2225 } 2226 2227 return -1; 2228} 2229 2230/** 2231 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command 2232 * @ap: Port associated with device @dev 2233 * @dev: Device to which command will be sent 2234 * 2235 * Issue SET FEATURES - XFER MODE command to device @dev 2236 * on port @ap. 2237 * 2238 * LOCKING: 2239 * PCI/etc. bus probe sem. 2240 */ 2241 2242static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev) 2243{ 2244 DECLARE_COMPLETION(wait); 2245 struct ata_queued_cmd *qc; 2246 int rc; 2247 unsigned long flags; 2248 2249 /* set up set-features taskfile */ 2250 DPRINTK("set features - xfer mode\n"); 2251 2252 qc = ata_qc_new_init(ap, dev); 2253 BUG_ON(qc == NULL); 2254 2255 qc->tf.command = ATA_CMD_SET_FEATURES; 2256 qc->tf.feature = SETFEATURES_XFER; 2257 qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 2258 qc->tf.protocol = ATA_PROT_NODATA; 2259 qc->tf.nsect = dev->xfer_mode; 2260 2261 qc->waiting = &wait; 2262 qc->complete_fn = ata_qc_complete_noop; 2263 2264 spin_lock_irqsave(&ap->host_set->lock, flags); 2265 rc = ata_qc_issue(qc); 2266 spin_unlock_irqrestore(&ap->host_set->lock, flags); 2267 2268 if (rc) 2269 ata_port_disable(ap); 2270 else 2271 wait_for_completion(&wait); 2272 2273 DPRINTK("EXIT\n"); 2274} 2275 2276/** 2277 * ata_dev_reread_id - Reread the device identify device info 2278 * @ap: port where the device is 2279 * @dev: device to reread the identify device info 2280 * 2281 * LOCKING: 2282 */ 2283 2284static void ata_dev_reread_id(struct ata_port *ap, struct ata_device *dev) 2285{ 2286 DECLARE_COMPLETION(wait); 2287 struct ata_queued_cmd *qc; 2288 unsigned long flags; 2289 int rc; 2290 2291 qc = ata_qc_new_init(ap, dev); 2292 BUG_ON(qc == NULL); 2293 2294 ata_sg_init_one(qc, dev->id, sizeof(dev->id)); 2295 qc->dma_dir = DMA_FROM_DEVICE; 2296 2297 if (dev->class == ATA_DEV_ATA) { 2298 qc->tf.command = ATA_CMD_ID_ATA; 2299 DPRINTK("do ATA identify\n"); 2300 } else { 2301 qc->tf.command = ATA_CMD_ID_ATAPI; 2302 DPRINTK("do ATAPI identify\n"); 2303 } 2304 2305 qc->tf.flags |= ATA_TFLAG_DEVICE; 2306 qc->tf.protocol = ATA_PROT_PIO; 2307 qc->nsect = 1; 2308 2309 qc->waiting = &wait; 2310 qc->complete_fn = ata_qc_complete_noop; 2311 2312 spin_lock_irqsave(&ap->host_set->lock, flags); 2313 rc = ata_qc_issue(qc); 2314 spin_unlock_irqrestore(&ap->host_set->lock, flags); 2315 2316 if (rc) 2317 goto err_out; 2318 2319 wait_for_completion(&wait); 2320 2321 swap_buf_le16(dev->id, ATA_ID_WORDS); 2322 2323 ata_dump_id(dev); 2324 2325 DPRINTK("EXIT\n"); 2326 2327 return; 2328err_out: 2329 ata_port_disable(ap); 2330} 2331 2332/** 2333 * ata_dev_init_params - Issue INIT DEV PARAMS command 2334 * @ap: Port associated with device @dev 2335 * @dev: Device to which command will be sent 2336 * 2337 * LOCKING: 2338 */ 2339 2340static void ata_dev_init_params(struct ata_port *ap, struct ata_device *dev) 2341{ 2342 DECLARE_COMPLETION(wait); 2343 struct ata_queued_cmd *qc; 2344 int rc; 2345 unsigned long flags; 2346 u16 sectors = dev->id[6]; 2347 u16 heads = dev->id[3]; 2348 2349 /* Number of sectors per track 1-255. Number of heads 1-16 */ 2350 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16) 2351 return; 2352 2353 /* set up init dev params taskfile */ 2354 DPRINTK("init dev params \n"); 2355 2356 qc = ata_qc_new_init(ap, dev); 2357 BUG_ON(qc == NULL); 2358 2359 qc->tf.command = ATA_CMD_INIT_DEV_PARAMS; 2360 qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 2361 qc->tf.protocol = ATA_PROT_NODATA; 2362 qc->tf.nsect = sectors; 2363 qc->tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */ 2364 2365 qc->waiting = &wait; 2366 qc->complete_fn = ata_qc_complete_noop; 2367 2368 spin_lock_irqsave(&ap->host_set->lock, flags); 2369 rc = ata_qc_issue(qc); 2370 spin_unlock_irqrestore(&ap->host_set->lock, flags); 2371 2372 if (rc) 2373 ata_port_disable(ap); 2374 else 2375 wait_for_completion(&wait); 2376 2377 DPRINTK("EXIT\n"); 2378} 2379 2380/** 2381 * ata_sg_clean - Unmap DMA memory associated with command 2382 * @qc: Command containing DMA memory to be released 2383 * 2384 * Unmap all mapped DMA memory associated with this command. 2385 * 2386 * LOCKING: 2387 * spin_lock_irqsave(host_set lock) 2388 */ 2389 2390static void ata_sg_clean(struct ata_queued_cmd *qc) 2391{ 2392 struct ata_port *ap = qc->ap; 2393 struct scatterlist *sg = qc->__sg; 2394 int dir = qc->dma_dir; 2395 void *pad_buf = NULL; 2396 2397 assert(qc->flags & ATA_QCFLAG_DMAMAP); 2398 assert(sg != NULL); 2399 2400 if (qc->flags & ATA_QCFLAG_SINGLE) 2401 assert(qc->n_elem == 1); 2402 2403 DPRINTK("unmapping %u sg elements\n", qc->n_elem); 2404 2405 /* if we padded the buffer out to 32-bit bound, and data 2406 * xfer direction is from-device, we must copy from the 2407 * pad buffer back into the supplied buffer 2408 */ 2409 if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE)) 2410 pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ); 2411 2412 if (qc->flags & ATA_QCFLAG_SG) { 2413 dma_unmap_sg(ap->host_set->dev, sg, qc->n_elem, dir); 2414 /* restore last sg */ 2415 sg[qc->orig_n_elem - 1].length += qc->pad_len; 2416 if (pad_buf) { 2417 struct scatterlist *psg = &qc->pad_sgent; 2418 void *addr = kmap_atomic(psg->page, KM_IRQ0); 2419 memcpy(addr + psg->offset, pad_buf, qc->pad_len); 2420 kunmap_atomic(psg->page, KM_IRQ0); 2421 } 2422 } else { 2423 dma_unmap_single(ap->host_set->dev, sg_dma_address(&sg[0]), 2424 sg_dma_len(&sg[0]), dir); 2425 /* restore sg */ 2426 sg->length += qc->pad_len; 2427 if (pad_buf) 2428 memcpy(qc->buf_virt + sg->length - qc->pad_len, 2429 pad_buf, qc->pad_len); 2430 } 2431 2432 qc->flags &= ~ATA_QCFLAG_DMAMAP; 2433 qc->__sg = NULL; 2434} 2435 2436/** 2437 * ata_fill_sg - Fill PCI IDE PRD table 2438 * @qc: Metadata associated with taskfile to be transferred 2439 * 2440 * Fill PCI IDE PRD (scatter-gather) table with segments 2441 * associated with the current disk command. 2442 * 2443 * LOCKING: 2444 * spin_lock_irqsave(host_set lock) 2445 * 2446 */ 2447static void ata_fill_sg(struct ata_queued_cmd *qc) 2448{ 2449 struct ata_port *ap = qc->ap; 2450 struct scatterlist *sg; 2451 unsigned int idx; 2452 2453 assert(qc->__sg != NULL); 2454 assert(qc->n_elem > 0); 2455 2456 idx = 0; 2457 ata_for_each_sg(sg, qc) { 2458 u32 addr, offset; 2459 u32 sg_len, len; 2460 2461 /* determine if physical DMA addr spans 64K boundary. 2462 * Note h/w doesn't support 64-bit, so we unconditionally 2463 * truncate dma_addr_t to u32. 2464 */ 2465 addr = (u32) sg_dma_address(sg); 2466 sg_len = sg_dma_len(sg); 2467 2468 while (sg_len) { 2469 offset = addr & 0xffff; 2470 len = sg_len; 2471 if ((offset + sg_len) > 0x10000) 2472 len = 0x10000 - offset; 2473 2474 ap->prd[idx].addr = cpu_to_le32(addr); 2475 ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff); 2476 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len); 2477 2478 idx++; 2479 sg_len -= len; 2480 addr += len; 2481 } 2482 } 2483 2484 if (idx) 2485 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT); 2486} 2487/** 2488 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported 2489 * @qc: Metadata associated with taskfile to check 2490 * 2491 * Allow low-level driver to filter ATA PACKET commands, returning 2492 * a status indicating whether or not it is OK to use DMA for the 2493 * supplied PACKET command. 2494 * 2495 * LOCKING: 2496 * spin_lock_irqsave(host_set lock) 2497 * 2498 * RETURNS: 0 when ATAPI DMA can be used 2499 * nonzero otherwise 2500 */ 2501int ata_check_atapi_dma(struct ata_queued_cmd *qc) 2502{ 2503 struct ata_port *ap = qc->ap; 2504 int rc = 0; /* Assume ATAPI DMA is OK by default */ 2505 2506 if (ap->ops->check_atapi_dma) 2507 rc = ap->ops->check_atapi_dma(qc); 2508 2509 return rc; 2510} 2511/** 2512 * ata_qc_prep - Prepare taskfile for submission 2513 * @qc: Metadata associated with taskfile to be prepared 2514 * 2515 * Prepare ATA taskfile for submission. 2516 * 2517 * LOCKING: 2518 * spin_lock_irqsave(host_set lock) 2519 */ 2520void ata_qc_prep(struct ata_queued_cmd *qc) 2521{ 2522 if (!(qc->flags & ATA_QCFLAG_DMAMAP)) 2523 return; 2524 2525 ata_fill_sg(qc); 2526} 2527 2528/** 2529 * ata_sg_init_one - Associate command with memory buffer 2530 * @qc: Command to be associated 2531 * @buf: Memory buffer 2532 * @buflen: Length of memory buffer, in bytes. 2533 * 2534 * Initialize the data-related elements of queued_cmd @qc 2535 * to point to a single memory buffer, @buf of byte length @buflen. 2536 * 2537 * LOCKING: 2538 * spin_lock_irqsave(host_set lock) 2539 */ 2540 2541void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen) 2542{ 2543 struct scatterlist *sg; 2544 2545 qc->flags |= ATA_QCFLAG_SINGLE; 2546 2547 memset(&qc->sgent, 0, sizeof(qc->sgent)); 2548 qc->__sg = &qc->sgent; 2549 qc->n_elem = 1; 2550 qc->orig_n_elem = 1; 2551 qc->buf_virt = buf; 2552 2553 sg = qc->__sg; 2554 sg_init_one(sg, buf, buflen); 2555} 2556 2557/** 2558 * ata_sg_init - Associate command with scatter-gather table. 2559 * @qc: Command to be associated 2560 * @sg: Scatter-gather table. 2561 * @n_elem: Number of elements in s/g table. 2562 * 2563 * Initialize the data-related elements of queued_cmd @qc 2564 * to point to a scatter-gather table @sg, containing @n_elem 2565 * elements. 2566 * 2567 * LOCKING: 2568 * spin_lock_irqsave(host_set lock) 2569 */ 2570 2571void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg, 2572 unsigned int n_elem) 2573{ 2574 qc->flags |= ATA_QCFLAG_SG; 2575 qc->__sg = sg; 2576 qc->n_elem = n_elem; 2577 qc->orig_n_elem = n_elem; 2578} 2579 2580/** 2581 * ata_sg_setup_one - DMA-map the memory buffer associated with a command. 2582 * @qc: Command with memory buffer to be mapped. 2583 * 2584 * DMA-map the memory buffer associated with queued_cmd @qc. 2585 * 2586 * LOCKING: 2587 * spin_lock_irqsave(host_set lock) 2588 * 2589 * RETURNS: 2590 * Zero on success, negative on error. 2591 */ 2592 2593static int ata_sg_setup_one(struct ata_queued_cmd *qc) 2594{ 2595 struct ata_port *ap = qc->ap; 2596 int dir = qc->dma_dir; 2597 struct scatterlist *sg = qc->__sg; 2598 dma_addr_t dma_address; 2599 2600 /* we must lengthen transfers to end on a 32-bit boundary */ 2601 qc->pad_len = sg->length & 3; 2602 if (qc->pad_len) { 2603 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ); 2604 struct scatterlist *psg = &qc->pad_sgent; 2605 2606 assert(qc->dev->class == ATA_DEV_ATAPI); 2607 2608 memset(pad_buf, 0, ATA_DMA_PAD_SZ); 2609 2610 if (qc->tf.flags & ATA_TFLAG_WRITE) 2611 memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len, 2612 qc->pad_len); 2613 2614 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ); 2615 sg_dma_len(psg) = ATA_DMA_PAD_SZ; 2616 /* trim sg */ 2617 sg->length -= qc->pad_len; 2618 2619 DPRINTK("padding done, sg->length=%u pad_len=%u\n", 2620 sg->length, qc->pad_len); 2621 } 2622 2623 dma_address = dma_map_single(ap->host_set->dev, qc->buf_virt, 2624 sg->length, dir); 2625 if (dma_mapping_error(dma_address)) { 2626 /* restore sg */ 2627 sg->length += qc->pad_len; 2628 return -1; 2629 } 2630 2631 sg_dma_address(sg) = dma_address; 2632 sg_dma_len(sg) = sg->length; 2633 2634 DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg), 2635 qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read"); 2636 2637 return 0; 2638} 2639 2640/** 2641 * ata_sg_setup - DMA-map the scatter-gather table associated with a command. 2642 * @qc: Command with scatter-gather table to be mapped. 2643 * 2644 * DMA-map the scatter-gather table associated with queued_cmd @qc. 2645 * 2646 * LOCKING: 2647 * spin_lock_irqsave(host_set lock) 2648 * 2649 * RETURNS: 2650 * Zero on success, negative on error. 2651 * 2652 */ 2653 2654static int ata_sg_setup(struct ata_queued_cmd *qc) 2655{ 2656 struct ata_port *ap = qc->ap; 2657 struct scatterlist *sg = qc->__sg; 2658 struct scatterlist *lsg = &sg[qc->n_elem - 1]; 2659 int n_elem, dir; 2660 2661 VPRINTK("ENTER, ata%u\n", ap->id); 2662 assert(qc->flags & ATA_QCFLAG_SG); 2663 2664 /* we must lengthen transfers to end on a 32-bit boundary */ 2665 qc->pad_len = lsg->length & 3; 2666 if (qc->pad_len) { 2667 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ); 2668 struct scatterlist *psg = &qc->pad_sgent; 2669 unsigned int offset; 2670 2671 assert(qc->dev->class == ATA_DEV_ATAPI); 2672 2673 memset(pad_buf, 0, ATA_DMA_PAD_SZ); 2674 2675 /* 2676 * psg->page/offset are used to copy to-be-written 2677 * data in this function or read data in ata_sg_clean. 2678 */ 2679 offset = lsg->offset + lsg->length - qc->pad_len; 2680 psg->page = nth_page(lsg->page, offset >> PAGE_SHIFT); 2681 psg->offset = offset_in_page(offset); 2682 2683 if (qc->tf.flags & ATA_TFLAG_WRITE) { 2684 void *addr = kmap_atomic(psg->page, KM_IRQ0); 2685 memcpy(pad_buf, addr + psg->offset, qc->pad_len); 2686 kunmap_atomic(psg->page, KM_IRQ0); 2687 } 2688 2689 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ); 2690 sg_dma_len(psg) = ATA_DMA_PAD_SZ; 2691 /* trim last sg */ 2692 lsg->length -= qc->pad_len; 2693 2694 DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n", 2695 qc->n_elem - 1, lsg->length, qc->pad_len); 2696 } 2697 2698 dir = qc->dma_dir; 2699 n_elem = dma_map_sg(ap->host_set->dev, sg, qc->n_elem, dir); 2700 if (n_elem < 1) { 2701 /* restore last sg */ 2702 lsg->length += qc->pad_len; 2703 return -1; 2704 } 2705 2706 DPRINTK("%d sg elements mapped\n", n_elem); 2707 2708 qc->n_elem = n_elem; 2709 2710 return 0; 2711} 2712 2713/** 2714 * ata_poll_qc_complete - turn irq back on and finish qc 2715 * @qc: Command to complete 2716 * @err_mask: ATA status register content 2717 * 2718 * LOCKING: 2719 * None. (grabs host lock) 2720 */ 2721 2722void ata_poll_qc_complete(struct ata_queued_cmd *qc, unsigned int err_mask) 2723{ 2724 struct ata_port *ap = qc->ap; 2725 unsigned long flags; 2726 2727 spin_lock_irqsave(&ap->host_set->lock, flags); 2728 ap->flags &= ~ATA_FLAG_NOINTR; 2729 ata_irq_on(ap); 2730 ata_qc_complete(qc, err_mask); 2731 spin_unlock_irqrestore(&ap->host_set->lock, flags); 2732} 2733 2734/** 2735 * ata_pio_poll - 2736 * @ap: the target ata_port 2737 * 2738 * LOCKING: 2739 * None. (executing in kernel thread context) 2740 * 2741 * RETURNS: 2742 * timeout value to use 2743 */ 2744 2745static unsigned long ata_pio_poll(struct ata_port *ap) 2746{ 2747 u8 status; 2748 unsigned int poll_state = HSM_ST_UNKNOWN; 2749 unsigned int reg_state = HSM_ST_UNKNOWN; 2750 2751 switch (ap->hsm_task_state) { 2752 case HSM_ST: 2753 case HSM_ST_POLL: 2754 poll_state = HSM_ST_POLL; 2755 reg_state = HSM_ST; 2756 break; 2757 case HSM_ST_LAST: 2758 case HSM_ST_LAST_POLL: 2759 poll_state = HSM_ST_LAST_POLL; 2760 reg_state = HSM_ST_LAST; 2761 break; 2762 default: 2763 BUG(); 2764 break; 2765 } 2766 2767 status = ata_chk_status(ap); 2768 if (status & ATA_BUSY) { 2769 if (time_after(jiffies, ap->pio_task_timeout)) { 2770 ap->hsm_task_state = HSM_ST_TMOUT; 2771 return 0; 2772 } 2773 ap->hsm_task_state = poll_state; 2774 return ATA_SHORT_PAUSE; 2775 } 2776 2777 ap->hsm_task_state = reg_state; 2778 return 0; 2779} 2780 2781/** 2782 * ata_pio_complete - check if drive is busy or idle 2783 * @ap: the target ata_port 2784 * 2785 * LOCKING: 2786 * None. (executing in kernel thread context) 2787 * 2788 * RETURNS: 2789 * Non-zero if qc completed, zero otherwise. 2790 */ 2791 2792static int ata_pio_complete (struct ata_port *ap) 2793{ 2794 struct ata_queued_cmd *qc; 2795 u8 drv_stat; 2796 2797 /* 2798 * This is purely heuristic. This is a fast path. Sometimes when 2799 * we enter, BSY will be cleared in a chk-status or two. If not, 2800 * the drive is probably seeking or something. Snooze for a couple 2801 * msecs, then chk-status again. If still busy, fall back to 2802 * HSM_ST_POLL state. 2803 */ 2804 drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 10); 2805 if (drv_stat & (ATA_BUSY | ATA_DRQ)) { 2806 msleep(2); 2807 drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 10); 2808 if (drv_stat & (ATA_BUSY | ATA_DRQ)) { 2809 ap->hsm_task_state = HSM_ST_LAST_POLL; 2810 ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO; 2811 return 0; 2812 } 2813 } 2814 2815 drv_stat = ata_wait_idle(ap); 2816 if (!ata_ok(drv_stat)) { 2817 ap->hsm_task_state = HSM_ST_ERR; 2818 return 0; 2819 } 2820 2821 qc = ata_qc_from_tag(ap, ap->active_tag); 2822 assert(qc != NULL); 2823 2824 ap->hsm_task_state = HSM_ST_IDLE; 2825 2826 ata_poll_qc_complete(qc, 0); 2827 2828 /* another command may start at this point */ 2829 2830 return 1; 2831} 2832 2833 2834/** 2835 * swap_buf_le16 - swap halves of 16-words in place 2836 * @buf: Buffer to swap 2837 * @buf_words: Number of 16-bit words in buffer. 2838 * 2839 * Swap halves of 16-bit words if needed to convert from 2840 * little-endian byte order to native cpu byte order, or 2841 * vice-versa. 2842 * 2843 * LOCKING: 2844 * Inherited from caller. 2845 */ 2846void swap_buf_le16(u16 *buf, unsigned int buf_words) 2847{ 2848#ifdef __BIG_ENDIAN 2849 unsigned int i; 2850 2851 for (i = 0; i < buf_words; i++) 2852 buf[i] = le16_to_cpu(buf[i]); 2853#endif /* __BIG_ENDIAN */ 2854} 2855 2856/** 2857 * ata_mmio_data_xfer - Transfer data by MMIO 2858 * @ap: port to read/write 2859 * @buf: data buffer 2860 * @buflen: buffer length 2861 * @write_data: read/write 2862 * 2863 * Transfer data from/to the device data register by MMIO. 2864 * 2865 * LOCKING: 2866 * Inherited from caller. 2867 */ 2868 2869static void ata_mmio_data_xfer(struct ata_port *ap, unsigned char *buf, 2870 unsigned int buflen, int write_data) 2871{ 2872 unsigned int i; 2873 unsigned int words = buflen >> 1; 2874 u16 *buf16 = (u16 *) buf; 2875 void __iomem *mmio = (void __iomem *)ap->ioaddr.data_addr; 2876 2877 /* Transfer multiple of 2 bytes */ 2878 if (write_data) { 2879 for (i = 0; i < words; i++) 2880 writew(le16_to_cpu(buf16[i]), mmio); 2881 } else { 2882 for (i = 0; i < words; i++) 2883 buf16[i] = cpu_to_le16(readw(mmio)); 2884 } 2885 2886 /* Transfer trailing 1 byte, if any. */ 2887 if (unlikely(buflen & 0x01)) { 2888 u16 align_buf[1] = { 0 }; 2889 unsigned char *trailing_buf = buf + buflen - 1; 2890 2891 if (write_data) { 2892 memcpy(align_buf, trailing_buf, 1); 2893 writew(le16_to_cpu(align_buf[0]), mmio); 2894 } else { 2895 align_buf[0] = cpu_to_le16(readw(mmio)); 2896 memcpy(trailing_buf, align_buf, 1); 2897 } 2898 } 2899} 2900 2901/** 2902 * ata_pio_data_xfer - Transfer data by PIO 2903 * @ap: port to read/write 2904 * @buf: data buffer 2905 * @buflen: buffer length 2906 * @write_data: read/write 2907 * 2908 * Transfer data from/to the device data register by PIO. 2909 * 2910 * LOCKING: 2911 * Inherited from caller. 2912 */ 2913 2914static void ata_pio_data_xfer(struct ata_port *ap, unsigned char *buf, 2915 unsigned int buflen, int write_data) 2916{ 2917 unsigned int words = buflen >> 1; 2918 2919 /* Transfer multiple of 2 bytes */ 2920 if (write_data) 2921 outsw(ap->ioaddr.data_addr, buf, words); 2922 else 2923 insw(ap->ioaddr.data_addr, buf, words); 2924 2925 /* Transfer trailing 1 byte, if any. */ 2926 if (unlikely(buflen & 0x01)) { 2927 u16 align_buf[1] = { 0 }; 2928 unsigned char *trailing_buf = buf + buflen - 1; 2929 2930 if (write_data) { 2931 memcpy(align_buf, trailing_buf, 1); 2932 outw(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr); 2933 } else { 2934 align_buf[0] = cpu_to_le16(inw(ap->ioaddr.data_addr)); 2935 memcpy(trailing_buf, align_buf, 1); 2936 } 2937 } 2938} 2939 2940/** 2941 * ata_data_xfer - Transfer data from/to the data register. 2942 * @ap: port to read/write 2943 * @buf: data buffer 2944 * @buflen: buffer length 2945 * @do_write: read/write 2946 * 2947 * Transfer data from/to the device data register. 2948 * 2949 * LOCKING: 2950 * Inherited from caller. 2951 */ 2952 2953static void ata_data_xfer(struct ata_port *ap, unsigned char *buf, 2954 unsigned int buflen, int do_write) 2955{ 2956 if (ap->flags & ATA_FLAG_MMIO) 2957 ata_mmio_data_xfer(ap, buf, buflen, do_write); 2958 else 2959 ata_pio_data_xfer(ap, buf, buflen, do_write); 2960} 2961 2962/** 2963 * ata_pio_sector - Transfer ATA_SECT_SIZE (512 bytes) of data. 2964 * @qc: Command on going 2965 * 2966 * Transfer ATA_SECT_SIZE of data from/to the ATA device. 2967 * 2968 * LOCKING: 2969 * Inherited from caller. 2970 */ 2971 2972static void ata_pio_sector(struct ata_queued_cmd *qc) 2973{ 2974 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE); 2975 struct scatterlist *sg = qc->__sg; 2976 struct ata_port *ap = qc->ap; 2977 struct page *page; 2978 unsigned int offset; 2979 unsigned char *buf; 2980 2981 if (qc->cursect == (qc->nsect - 1)) 2982 ap->hsm_task_state = HSM_ST_LAST; 2983 2984 page = sg[qc->cursg].page; 2985 offset = sg[qc->cursg].offset + qc->cursg_ofs * ATA_SECT_SIZE; 2986 2987 /* get the current page and offset */ 2988 page = nth_page(page, (offset >> PAGE_SHIFT)); 2989 offset %= PAGE_SIZE; 2990 2991 buf = kmap(page) + offset; 2992 2993 qc->cursect++; 2994 qc->cursg_ofs++; 2995 2996 if ((qc->cursg_ofs * ATA_SECT_SIZE) == (&sg[qc->cursg])->length) { 2997 qc->cursg++; 2998 qc->cursg_ofs = 0; 2999 } 3000 3001 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read"); 3002 3003 /* do the actual data transfer */ 3004 do_write = (qc->tf.flags & ATA_TFLAG_WRITE); 3005 ata_data_xfer(ap, buf, ATA_SECT_SIZE, do_write); 3006 3007 kunmap(page); 3008} 3009 3010/** 3011 * __atapi_pio_bytes - Transfer data from/to the ATAPI device. 3012 * @qc: Command on going 3013 * @bytes: number of bytes 3014 * 3015 * Transfer Transfer data from/to the ATAPI device. 3016 * 3017 * LOCKING: 3018 * Inherited from caller. 3019 * 3020 */ 3021 3022static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes) 3023{ 3024 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE); 3025 struct scatterlist *sg = qc->__sg; 3026 struct ata_port *ap = qc->ap; 3027 struct page *page; 3028 unsigned char *buf; 3029 unsigned int offset, count; 3030 3031 if (qc->curbytes + bytes >= qc->nbytes) 3032 ap->hsm_task_state = HSM_ST_LAST; 3033 3034next_sg: 3035 if (unlikely(qc->cursg >= qc->n_elem)) { 3036 /* 3037 * The end of qc->sg is reached and the device expects 3038 * more data to transfer. In order not to overrun qc->sg 3039 * and fulfill length specified in the byte count register, 3040 * - for read case, discard trailing data from the device 3041 * - for write case, padding zero data to the device 3042 */ 3043 u16 pad_buf[1] = { 0 }; 3044 unsigned int words = bytes >> 1; 3045 unsigned int i; 3046 3047 if (words) /* warning if bytes > 1 */ 3048 printk(KERN_WARNING "ata%u: %u bytes trailing data\n", 3049 ap->id, bytes); 3050 3051 for (i = 0; i < words; i++) 3052 ata_data_xfer(ap, (unsigned char*)pad_buf, 2, do_write); 3053 3054 ap->hsm_task_state = HSM_ST_LAST; 3055 return; 3056 } 3057 3058 sg = &qc->__sg[qc->cursg]; 3059 3060 page = sg->page; 3061 offset = sg->offset + qc->cursg_ofs; 3062 3063 /* get the current page and offset */ 3064 page = nth_page(page, (offset >> PAGE_SHIFT)); 3065 offset %= PAGE_SIZE; 3066 3067 /* don't overrun current sg */ 3068 count = min(sg->length - qc->cursg_ofs, bytes); 3069 3070 /* don't cross page boundaries */ 3071 count = min(count, (unsigned int)PAGE_SIZE - offset); 3072 3073 buf = kmap(page) + offset; 3074 3075 bytes -= count; 3076 qc->curbytes += count; 3077 qc->cursg_ofs += count; 3078 3079 if (qc->cursg_ofs == sg->length) { 3080 qc->cursg++; 3081 qc->cursg_ofs = 0; 3082 } 3083 3084 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read"); 3085 3086 /* do the actual data transfer */ 3087 ata_data_xfer(ap, buf, count, do_write); 3088 3089 kunmap(page); 3090 3091 if (bytes) 3092 goto next_sg; 3093} 3094 3095/** 3096 * atapi_pio_bytes - Transfer data from/to the ATAPI device. 3097 * @qc: Command on going 3098 * 3099 * Transfer Transfer data from/to the ATAPI device. 3100 * 3101 * LOCKING: 3102 * Inherited from caller. 3103 */ 3104 3105static void atapi_pio_bytes(struct ata_queued_cmd *qc) 3106{ 3107 struct ata_port *ap = qc->ap; 3108 struct ata_device *dev = qc->dev; 3109 unsigned int ireason, bc_lo, bc_hi, bytes; 3110 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0; 3111 3112 ap->ops->tf_read(ap, &qc->tf); 3113 ireason = qc->tf.nsect; 3114 bc_lo = qc->tf.lbam; 3115 bc_hi = qc->tf.lbah; 3116 bytes = (bc_hi << 8) | bc_lo; 3117 3118 /* shall be cleared to zero, indicating xfer of data */ 3119 if (ireason & (1 << 0)) 3120 goto err_out; 3121 3122 /* make sure transfer direction matches expected */ 3123 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0; 3124 if (do_write != i_write) 3125 goto err_out; 3126 3127 __atapi_pio_bytes(qc, bytes); 3128 3129 return; 3130 3131err_out: 3132 printk(KERN_INFO "ata%u: dev %u: ATAPI check failed\n", 3133 ap->id, dev->devno); 3134 ap->hsm_task_state = HSM_ST_ERR; 3135} 3136 3137/** 3138 * ata_pio_block - start PIO on a block 3139 * @ap: the target ata_port 3140 * 3141 * LOCKING: 3142 * None. (executing in kernel thread context) 3143 */ 3144 3145static void ata_pio_block(struct ata_port *ap) 3146{ 3147 struct ata_queued_cmd *qc; 3148 u8 status; 3149 3150 /* 3151 * This is purely heuristic. This is a fast path. 3152 * Sometimes when we enter, BSY will be cleared in 3153 * a chk-status or two. If not, the drive is probably seeking 3154 * or something. Snooze for a couple msecs, then 3155 * chk-status again. If still busy, fall back to 3156 * HSM_ST_POLL state. 3157 */ 3158 status = ata_busy_wait(ap, ATA_BUSY, 5); 3159 if (status & ATA_BUSY) { 3160 msleep(2); 3161 status = ata_busy_wait(ap, ATA_BUSY, 10); 3162 if (status & ATA_BUSY) { 3163 ap->hsm_task_state = HSM_ST_POLL; 3164 ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO; 3165 return; 3166 } 3167 } 3168 3169 qc = ata_qc_from_tag(ap, ap->active_tag); 3170 assert(qc != NULL); 3171 3172 if (is_atapi_taskfile(&qc->tf)) { 3173 /* no more data to transfer or unsupported ATAPI command */ 3174 if ((status & ATA_DRQ) == 0) { 3175 ap->hsm_task_state = HSM_ST_LAST; 3176 return; 3177 } 3178 3179 atapi_pio_bytes(qc); 3180 } else { 3181 /* handle BSY=0, DRQ=0 as error */ 3182 if ((status & ATA_DRQ) == 0) { 3183 ap->hsm_task_state = HSM_ST_ERR; 3184 return; 3185 } 3186 3187 ata_pio_sector(qc); 3188 } 3189} 3190 3191static void ata_pio_error(struct ata_port *ap) 3192{ 3193 struct ata_queued_cmd *qc; 3194 3195 printk(KERN_WARNING "ata%u: PIO error\n", ap->id); 3196 3197 qc = ata_qc_from_tag(ap, ap->active_tag); 3198 assert(qc != NULL); 3199 3200 ap->hsm_task_state = HSM_ST_IDLE; 3201 3202 ata_poll_qc_complete(qc, AC_ERR_ATA_BUS); 3203} 3204 3205static void ata_pio_task(void *_data) 3206{ 3207 struct ata_port *ap = _data; 3208 unsigned long timeout; 3209 int qc_completed; 3210 3211fsm_start: 3212 timeout = 0; 3213 qc_completed = 0; 3214 3215 switch (ap->hsm_task_state) { 3216 case HSM_ST_IDLE: 3217 return; 3218 3219 case HSM_ST: 3220 ata_pio_block(ap); 3221 break; 3222 3223 case HSM_ST_LAST: 3224 qc_completed = ata_pio_complete(ap); 3225 break; 3226 3227 case HSM_ST_POLL: 3228 case HSM_ST_LAST_POLL: 3229 timeout = ata_pio_poll(ap); 3230 break; 3231 3232 case HSM_ST_TMOUT: 3233 case HSM_ST_ERR: 3234 ata_pio_error(ap); 3235 return; 3236 } 3237 3238 if (timeout) 3239 queue_delayed_work(ata_wq, &ap->pio_task, timeout); 3240 else if (!qc_completed) 3241 goto fsm_start; 3242} 3243 3244/** 3245 * ata_qc_timeout - Handle timeout of queued command 3246 * @qc: Command that timed out 3247 * 3248 * Some part of the kernel (currently, only the SCSI layer) 3249 * has noticed that the active command on port @ap has not 3250 * completed after a specified length of time. Handle this 3251 * condition by disabling DMA (if necessary) and completing 3252 * transactions, with error if necessary. 3253 * 3254 * This also handles the case of the "lost interrupt", where 3255 * for some reason (possibly hardware bug, possibly driver bug) 3256 * an interrupt was not delivered to the driver, even though the 3257 * transaction completed successfully. 3258 * 3259 * LOCKING: 3260 * Inherited from SCSI layer (none, can sleep) 3261 */ 3262 3263static void ata_qc_timeout(struct ata_queued_cmd *qc) 3264{ 3265 struct ata_port *ap = qc->ap; 3266 struct ata_host_set *host_set = ap->host_set; 3267 struct ata_device *dev = qc->dev; 3268 u8 host_stat = 0, drv_stat; 3269 unsigned long flags; 3270 3271 DPRINTK("ENTER\n"); 3272 3273 /* FIXME: doesn't this conflict with timeout handling? */ 3274 if (qc->dev->class == ATA_DEV_ATAPI && qc->scsicmd) { 3275 struct scsi_cmnd *cmd = qc->scsicmd; 3276 3277 if (!(cmd->eh_eflags & SCSI_EH_CANCEL_CMD)) { 3278 3279 /* finish completing original command */ 3280 spin_lock_irqsave(&host_set->lock, flags); 3281 __ata_qc_complete(qc); 3282 spin_unlock_irqrestore(&host_set->lock, flags); 3283 3284 atapi_request_sense(ap, dev, cmd); 3285 3286 cmd->result = (CHECK_CONDITION << 1) | (DID_OK << 16); 3287 scsi_finish_command(cmd); 3288 3289 goto out; 3290 } 3291 } 3292 3293 spin_lock_irqsave(&host_set->lock, flags); 3294 3295 /* hack alert! We cannot use the supplied completion 3296 * function from inside the ->eh_strategy_handler() thread. 3297 * libata is the only user of ->eh_strategy_handler() in 3298 * any kernel, so the default scsi_done() assumes it is 3299 * not being called from the SCSI EH. 3300 */ 3301 qc->scsidone = scsi_finish_command; 3302 3303 switch (qc->tf.protocol) { 3304 3305 case ATA_PROT_DMA: 3306 case ATA_PROT_ATAPI_DMA: 3307 host_stat = ap->ops->bmdma_status(ap); 3308 3309 /* before we do anything else, clear DMA-Start bit */ 3310 ap->ops->bmdma_stop(qc); 3311 3312 /* fall through */ 3313 3314 default: 3315 ata_altstatus(ap); 3316 drv_stat = ata_chk_status(ap); 3317 3318 /* ack bmdma irq events */ 3319 ap->ops->irq_clear(ap); 3320 3321 printk(KERN_ERR "ata%u: command 0x%x timeout, stat 0x%x host_stat 0x%x\n", 3322 ap->id, qc->tf.command, drv_stat, host_stat); 3323 3324 /* complete taskfile transaction */ 3325 ata_qc_complete(qc, ac_err_mask(drv_stat)); 3326 break; 3327 } 3328 3329 spin_unlock_irqrestore(&host_set->lock, flags); 3330 3331out: 3332 DPRINTK("EXIT\n"); 3333} 3334 3335/** 3336 * ata_eng_timeout - Handle timeout of queued command 3337 * @ap: Port on which timed-out command is active 3338 * 3339 * Some part of the kernel (currently, only the SCSI layer) 3340 * has noticed that the active command on port @ap has not 3341 * completed after a specified length of time. Handle this 3342 * condition by disabling DMA (if necessary) and completing 3343 * transactions, with error if necessary. 3344 * 3345 * This also handles the case of the "lost interrupt", where 3346 * for some reason (possibly hardware bug, possibly driver bug) 3347 * an interrupt was not delivered to the driver, even though the 3348 * transaction completed successfully. 3349 * 3350 * LOCKING: 3351 * Inherited from SCSI layer (none, can sleep) 3352 */ 3353 3354void ata_eng_timeout(struct ata_port *ap) 3355{ 3356 struct ata_queued_cmd *qc; 3357 3358 DPRINTK("ENTER\n"); 3359 3360 qc = ata_qc_from_tag(ap, ap->active_tag); 3361 if (qc) 3362 ata_qc_timeout(qc); 3363 else { 3364 printk(KERN_ERR "ata%u: BUG: timeout without command\n", 3365 ap->id); 3366 goto out; 3367 } 3368 3369out: 3370 DPRINTK("EXIT\n"); 3371} 3372 3373/** 3374 * ata_qc_new - Request an available ATA command, for queueing 3375 * @ap: Port associated with device @dev 3376 * @dev: Device from whom we request an available command structure 3377 * 3378 * LOCKING: 3379 * None. 3380 */ 3381 3382static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap) 3383{ 3384 struct ata_queued_cmd *qc = NULL; 3385 unsigned int i; 3386 3387 for (i = 0; i < ATA_MAX_QUEUE; i++) 3388 if (!test_and_set_bit(i, &ap->qactive)) { 3389 qc = ata_qc_from_tag(ap, i); 3390 break; 3391 } 3392 3393 if (qc) 3394 qc->tag = i; 3395 3396 return qc; 3397} 3398 3399/** 3400 * ata_qc_new_init - Request an available ATA command, and initialize it 3401 * @ap: Port associated with device @dev 3402 * @dev: Device from whom we request an available command structure 3403 * 3404 * LOCKING: 3405 * None. 3406 */ 3407 3408struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap, 3409 struct ata_device *dev) 3410{ 3411 struct ata_queued_cmd *qc; 3412 3413 qc = ata_qc_new(ap); 3414 if (qc) { 3415 qc->__sg = NULL; 3416 qc->flags = 0; 3417 qc->scsicmd = NULL; 3418 qc->ap = ap; 3419 qc->dev = dev; 3420 qc->cursect = qc->cursg = qc->cursg_ofs = 0; 3421 qc->nsect = 0; 3422 qc->nbytes = qc->curbytes = 0; 3423 3424 ata_tf_init(ap, &qc->tf, dev->devno); 3425 } 3426 3427 return qc; 3428} 3429 3430int ata_qc_complete_noop(struct ata_queued_cmd *qc, unsigned int err_mask) 3431{ 3432 return 0; 3433} 3434 3435static void __ata_qc_complete(struct ata_queued_cmd *qc) 3436{ 3437 struct ata_port *ap = qc->ap; 3438 unsigned int tag, do_clear = 0; 3439 3440 qc->flags = 0; 3441 tag = qc->tag; 3442 if (likely(ata_tag_valid(tag))) { 3443 if (tag == ap->active_tag) 3444 ap->active_tag = ATA_TAG_POISON; 3445 qc->tag = ATA_TAG_POISON; 3446 do_clear = 1; 3447 } 3448 3449 if (qc->waiting) { 3450 struct completion *waiting = qc->waiting; 3451 qc->waiting = NULL; 3452 complete(waiting); 3453 } 3454 3455 if (likely(do_clear)) 3456 clear_bit(tag, &ap->qactive); 3457} 3458 3459/** 3460 * ata_qc_free - free unused ata_queued_cmd 3461 * @qc: Command to complete 3462 * 3463 * Designed to free unused ata_queued_cmd object 3464 * in case something prevents using it. 3465 * 3466 * LOCKING: 3467 * spin_lock_irqsave(host_set lock) 3468 */ 3469void ata_qc_free(struct ata_queued_cmd *qc) 3470{ 3471 assert(qc != NULL); /* ata_qc_from_tag _might_ return NULL */ 3472 assert(qc->waiting == NULL); /* nothing should be waiting */ 3473 3474 __ata_qc_complete(qc); 3475} 3476 3477/** 3478 * ata_qc_complete - Complete an active ATA command 3479 * @qc: Command to complete 3480 * @err_mask: ATA Status register contents 3481 * 3482 * Indicate to the mid and upper layers that an ATA 3483 * command has completed, with either an ok or not-ok status. 3484 * 3485 * LOCKING: 3486 * spin_lock_irqsave(host_set lock) 3487 */ 3488 3489void ata_qc_complete(struct ata_queued_cmd *qc, unsigned int err_mask) 3490{ 3491 int rc; 3492 3493 assert(qc != NULL); /* ata_qc_from_tag _might_ return NULL */ 3494 assert(qc->flags & ATA_QCFLAG_ACTIVE); 3495 3496 if (likely(qc->flags & ATA_QCFLAG_DMAMAP)) 3497 ata_sg_clean(qc); 3498 3499 /* atapi: mark qc as inactive to prevent the interrupt handler 3500 * from completing the command twice later, before the error handler 3501 * is called. (when rc != 0 and atapi request sense is needed) 3502 */ 3503 qc->flags &= ~ATA_QCFLAG_ACTIVE; 3504 3505 /* call completion callback */ 3506 rc = qc->complete_fn(qc, err_mask); 3507 3508 /* if callback indicates not to complete command (non-zero), 3509 * return immediately 3510 */ 3511 if (rc != 0) 3512 return; 3513 3514 __ata_qc_complete(qc); 3515 3516 VPRINTK("EXIT\n"); 3517} 3518 3519static inline int ata_should_dma_map(struct ata_queued_cmd *qc) 3520{ 3521 struct ata_port *ap = qc->ap; 3522 3523 switch (qc->tf.protocol) { 3524 case ATA_PROT_DMA: 3525 case ATA_PROT_ATAPI_DMA: 3526 return 1; 3527 3528 case ATA_PROT_ATAPI: 3529 case ATA_PROT_PIO: 3530 case ATA_PROT_PIO_MULT: 3531 if (ap->flags & ATA_FLAG_PIO_DMA) 3532 return 1; 3533 3534 /* fall through */ 3535 3536 default: 3537 return 0; 3538 } 3539 3540 /* never reached */ 3541} 3542 3543/** 3544 * ata_qc_issue - issue taskfile to device 3545 * @qc: command to issue to device 3546 * 3547 * Prepare an ATA command to submission to device. 3548 * This includes mapping the data into a DMA-able 3549 * area, filling in the S/G table, and finally 3550 * writing the taskfile to hardware, starting the command. 3551 * 3552 * LOCKING: 3553 * spin_lock_irqsave(host_set lock) 3554 * 3555 * RETURNS: 3556 * Zero on success, negative on error. 3557 */ 3558 3559int ata_qc_issue(struct ata_queued_cmd *qc) 3560{ 3561 struct ata_port *ap = qc->ap; 3562 3563 if (ata_should_dma_map(qc)) { 3564 if (qc->flags & ATA_QCFLAG_SG) { 3565 if (ata_sg_setup(qc)) 3566 goto err_out; 3567 } else if (qc->flags & ATA_QCFLAG_SINGLE) { 3568 if (ata_sg_setup_one(qc)) 3569 goto err_out; 3570 } 3571 } else { 3572 qc->flags &= ~ATA_QCFLAG_DMAMAP; 3573 } 3574 3575 ap->ops->qc_prep(qc); 3576 3577 qc->ap->active_tag = qc->tag; 3578 qc->flags |= ATA_QCFLAG_ACTIVE; 3579 3580 return ap->ops->qc_issue(qc); 3581 3582err_out: 3583 return -1; 3584} 3585 3586 3587/** 3588 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner 3589 * @qc: command to issue to device 3590 * 3591 * Using various libata functions and hooks, this function 3592 * starts an ATA command. ATA commands are grouped into 3593 * classes called "protocols", and issuing each type of protocol 3594 * is slightly different. 3595 * 3596 * May be used as the qc_issue() entry in ata_port_operations. 3597 * 3598 * LOCKING: 3599 * spin_lock_irqsave(host_set lock) 3600 * 3601 * RETURNS: 3602 * Zero on success, negative on error. 3603 */ 3604 3605int ata_qc_issue_prot(struct ata_queued_cmd *qc) 3606{ 3607 struct ata_port *ap = qc->ap; 3608 3609 ata_dev_select(ap, qc->dev->devno, 1, 0); 3610 3611 switch (qc->tf.protocol) { 3612 case ATA_PROT_NODATA: 3613 ata_tf_to_host(ap, &qc->tf); 3614 break; 3615 3616 case ATA_PROT_DMA: 3617 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */ 3618 ap->ops->bmdma_setup(qc); /* set up bmdma */ 3619 ap->ops->bmdma_start(qc); /* initiate bmdma */ 3620 break; 3621 3622 case ATA_PROT_PIO: /* load tf registers, initiate polling pio */ 3623 ata_qc_set_polling(qc); 3624 ata_tf_to_host(ap, &qc->tf); 3625 ap->hsm_task_state = HSM_ST; 3626 queue_work(ata_wq, &ap->pio_task); 3627 break; 3628 3629 case ATA_PROT_ATAPI: 3630 ata_qc_set_polling(qc); 3631 ata_tf_to_host(ap, &qc->tf); 3632 queue_work(ata_wq, &ap->packet_task); 3633 break; 3634 3635 case ATA_PROT_ATAPI_NODATA: 3636 ap->flags |= ATA_FLAG_NOINTR; 3637 ata_tf_to_host(ap, &qc->tf); 3638 queue_work(ata_wq, &ap->packet_task); 3639 break; 3640 3641 case ATA_PROT_ATAPI_DMA: 3642 ap->flags |= ATA_FLAG_NOINTR; 3643 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */ 3644 ap->ops->bmdma_setup(qc); /* set up bmdma */ 3645 queue_work(ata_wq, &ap->packet_task); 3646 break; 3647 3648 default: 3649 WARN_ON(1); 3650 return -1; 3651 } 3652 3653 return 0; 3654} 3655 3656/** 3657 * ata_bmdma_setup_mmio - Set up PCI IDE BMDMA transaction 3658 * @qc: Info associated with this ATA transaction. 3659 * 3660 * LOCKING: 3661 * spin_lock_irqsave(host_set lock) 3662 */ 3663 3664static void ata_bmdma_setup_mmio (struct ata_queued_cmd *qc) 3665{ 3666 struct ata_port *ap = qc->ap; 3667 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE); 3668 u8 dmactl; 3669 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr; 3670 3671 /* load PRD table addr. */ 3672 mb(); /* make sure PRD table writes are visible to controller */ 3673 writel(ap->prd_dma, mmio + ATA_DMA_TABLE_OFS); 3674 3675 /* specify data direction, triple-check start bit is clear */ 3676 dmactl = readb(mmio + ATA_DMA_CMD); 3677 dmactl &= ~(ATA_DMA_WR | ATA_DMA_START); 3678 if (!rw) 3679 dmactl |= ATA_DMA_WR; 3680 writeb(dmactl, mmio + ATA_DMA_CMD); 3681 3682 /* issue r/w command */ 3683 ap->ops->exec_command(ap, &qc->tf); 3684} 3685 3686/** 3687 * ata_bmdma_start_mmio - Start a PCI IDE BMDMA transaction 3688 * @qc: Info associated with this ATA transaction. 3689 * 3690 * LOCKING: 3691 * spin_lock_irqsave(host_set lock) 3692 */ 3693 3694static void ata_bmdma_start_mmio (struct ata_queued_cmd *qc) 3695{ 3696 struct ata_port *ap = qc->ap; 3697 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr; 3698 u8 dmactl; 3699 3700 /* start host DMA transaction */ 3701 dmactl = readb(mmio + ATA_DMA_CMD); 3702 writeb(dmactl | ATA_DMA_START, mmio + ATA_DMA_CMD); 3703 3704 /* Strictly, one may wish to issue a readb() here, to 3705 * flush the mmio write. However, control also passes 3706 * to the hardware at this point, and it will interrupt 3707 * us when we are to resume control. So, in effect, 3708 * we don't care when the mmio write flushes. 3709 * Further, a read of the DMA status register _immediately_ 3710 * following the write may not be what certain flaky hardware 3711 * is expected, so I think it is best to not add a readb() 3712 * without first all the MMIO ATA cards/mobos. 3713 * Or maybe I'm just being paranoid. 3714 */ 3715} 3716 3717/** 3718 * ata_bmdma_setup_pio - Set up PCI IDE BMDMA transaction (PIO) 3719 * @qc: Info associated with this ATA transaction. 3720 * 3721 * LOCKING: 3722 * spin_lock_irqsave(host_set lock) 3723 */ 3724 3725static void ata_bmdma_setup_pio (struct ata_queued_cmd *qc) 3726{ 3727 struct ata_port *ap = qc->ap; 3728 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE); 3729 u8 dmactl; 3730 3731 /* load PRD table addr. */ 3732 outl(ap->prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS); 3733 3734 /* specify data direction, triple-check start bit is clear */ 3735 dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD); 3736 dmactl &= ~(ATA_DMA_WR | ATA_DMA_START); 3737 if (!rw) 3738 dmactl |= ATA_DMA_WR; 3739 outb(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD); 3740 3741 /* issue r/w command */ 3742 ap->ops->exec_command(ap, &qc->tf); 3743} 3744 3745/** 3746 * ata_bmdma_start_pio - Start a PCI IDE BMDMA transaction (PIO) 3747 * @qc: Info associated with this ATA transaction. 3748 * 3749 * LOCKING: 3750 * spin_lock_irqsave(host_set lock) 3751 */ 3752 3753static void ata_bmdma_start_pio (struct ata_queued_cmd *qc) 3754{ 3755 struct ata_port *ap = qc->ap; 3756 u8 dmactl; 3757 3758 /* start host DMA transaction */ 3759 dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD); 3760 outb(dmactl | ATA_DMA_START, 3761 ap->ioaddr.bmdma_addr + ATA_DMA_CMD); 3762} 3763 3764 3765/** 3766 * ata_bmdma_start - Start a PCI IDE BMDMA transaction 3767 * @qc: Info associated with this ATA transaction. 3768 * 3769 * Writes the ATA_DMA_START flag to the DMA command register. 3770 * 3771 * May be used as the bmdma_start() entry in ata_port_operations. 3772 * 3773 * LOCKING: 3774 * spin_lock_irqsave(host_set lock) 3775 */ 3776void ata_bmdma_start(struct ata_queued_cmd *qc) 3777{ 3778 if (qc->ap->flags & ATA_FLAG_MMIO) 3779 ata_bmdma_start_mmio(qc); 3780 else 3781 ata_bmdma_start_pio(qc); 3782} 3783 3784 3785/** 3786 * ata_bmdma_setup - Set up PCI IDE BMDMA transaction 3787 * @qc: Info associated with this ATA transaction. 3788 * 3789 * Writes address of PRD table to device's PRD Table Address 3790 * register, sets the DMA control register, and calls 3791 * ops->exec_command() to start the transfer. 3792 * 3793 * May be used as the bmdma_setup() entry in ata_port_operations. 3794 * 3795 * LOCKING: 3796 * spin_lock_irqsave(host_set lock) 3797 */ 3798void ata_bmdma_setup(struct ata_queued_cmd *qc) 3799{ 3800 if (qc->ap->flags & ATA_FLAG_MMIO) 3801 ata_bmdma_setup_mmio(qc); 3802 else 3803 ata_bmdma_setup_pio(qc); 3804} 3805 3806 3807/** 3808 * ata_bmdma_irq_clear - Clear PCI IDE BMDMA interrupt. 3809 * @ap: Port associated with this ATA transaction. 3810 * 3811 * Clear interrupt and error flags in DMA status register. 3812 * 3813 * May be used as the irq_clear() entry in ata_port_operations. 3814 * 3815 * LOCKING: 3816 * spin_lock_irqsave(host_set lock) 3817 */ 3818 3819void ata_bmdma_irq_clear(struct ata_port *ap) 3820{ 3821 if (ap->flags & ATA_FLAG_MMIO) { 3822 void __iomem *mmio = ((void __iomem *) ap->ioaddr.bmdma_addr) + ATA_DMA_STATUS; 3823 writeb(readb(mmio), mmio); 3824 } else { 3825 unsigned long addr = ap->ioaddr.bmdma_addr + ATA_DMA_STATUS; 3826 outb(inb(addr), addr); 3827 } 3828 3829} 3830 3831 3832/** 3833 * ata_bmdma_status - Read PCI IDE BMDMA status 3834 * @ap: Port associated with this ATA transaction. 3835 * 3836 * Read and return BMDMA status register. 3837 * 3838 * May be used as the bmdma_status() entry in ata_port_operations. 3839 * 3840 * LOCKING: 3841 * spin_lock_irqsave(host_set lock) 3842 */ 3843 3844u8 ata_bmdma_status(struct ata_port *ap) 3845{ 3846 u8 host_stat; 3847 if (ap->flags & ATA_FLAG_MMIO) { 3848 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr; 3849 host_stat = readb(mmio + ATA_DMA_STATUS); 3850 } else 3851 host_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); 3852 return host_stat; 3853} 3854 3855 3856/** 3857 * ata_bmdma_stop - Stop PCI IDE BMDMA transfer 3858 * @qc: Command we are ending DMA for 3859 * 3860 * Clears the ATA_DMA_START flag in the dma control register 3861 * 3862 * May be used as the bmdma_stop() entry in ata_port_operations. 3863 * 3864 * LOCKING: 3865 * spin_lock_irqsave(host_set lock) 3866 */ 3867 3868void ata_bmdma_stop(struct ata_queued_cmd *qc) 3869{ 3870 struct ata_port *ap = qc->ap; 3871 if (ap->flags & ATA_FLAG_MMIO) { 3872 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr; 3873 3874 /* clear start/stop bit */ 3875 writeb(readb(mmio + ATA_DMA_CMD) & ~ATA_DMA_START, 3876 mmio + ATA_DMA_CMD); 3877 } else { 3878 /* clear start/stop bit */ 3879 outb(inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD) & ~ATA_DMA_START, 3880 ap->ioaddr.bmdma_addr + ATA_DMA_CMD); 3881 } 3882 3883 /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */ 3884 ata_altstatus(ap); /* dummy read */ 3885} 3886 3887/** 3888 * ata_host_intr - Handle host interrupt for given (port, task) 3889 * @ap: Port on which interrupt arrived (possibly...) 3890 * @qc: Taskfile currently active in engine 3891 * 3892 * Handle host interrupt for given queued command. Currently, 3893 * only DMA interrupts are handled. All other commands are 3894 * handled via polling with interrupts disabled (nIEN bit). 3895 * 3896 * LOCKING: 3897 * spin_lock_irqsave(host_set lock) 3898 * 3899 * RETURNS: 3900 * One if interrupt was handled, zero if not (shared irq). 3901 */ 3902 3903inline unsigned int ata_host_intr (struct ata_port *ap, 3904 struct ata_queued_cmd *qc) 3905{ 3906 u8 status, host_stat; 3907 3908 switch (qc->tf.protocol) { 3909 3910 case ATA_PROT_DMA: 3911 case ATA_PROT_ATAPI_DMA: 3912 case ATA_PROT_ATAPI: 3913 /* check status of DMA engine */ 3914 host_stat = ap->ops->bmdma_status(ap); 3915 VPRINTK("ata%u: host_stat 0x%X\n", ap->id, host_stat); 3916 3917 /* if it's not our irq... */ 3918 if (!(host_stat & ATA_DMA_INTR)) 3919 goto idle_irq; 3920 3921 /* before we do anything else, clear DMA-Start bit */ 3922 ap->ops->bmdma_stop(qc); 3923 3924 /* fall through */ 3925 3926 case ATA_PROT_ATAPI_NODATA: 3927 case ATA_PROT_NODATA: 3928 /* check altstatus */ 3929 status = ata_altstatus(ap); 3930 if (status & ATA_BUSY) 3931 goto idle_irq; 3932 3933 /* check main status, clearing INTRQ */ 3934 status = ata_chk_status(ap); 3935 if (unlikely(status & ATA_BUSY)) 3936 goto idle_irq; 3937 DPRINTK("ata%u: protocol %d (dev_stat 0x%X)\n", 3938 ap->id, qc->tf.protocol, status); 3939 3940 /* ack bmdma irq events */ 3941 ap->ops->irq_clear(ap); 3942 3943 /* complete taskfile transaction */ 3944 ata_qc_complete(qc, ac_err_mask(status)); 3945 break; 3946 3947 default: 3948 goto idle_irq; 3949 } 3950 3951 return 1; /* irq handled */ 3952 3953idle_irq: 3954 ap->stats.idle_irq++; 3955 3956#ifdef ATA_IRQ_TRAP 3957 if ((ap->stats.idle_irq % 1000) == 0) { 3958 handled = 1; 3959 ata_irq_ack(ap, 0); /* debug trap */ 3960 printk(KERN_WARNING "ata%d: irq trap\n", ap->id); 3961 } 3962#endif 3963 return 0; /* irq not handled */ 3964} 3965 3966/** 3967 * ata_interrupt - Default ATA host interrupt handler 3968 * @irq: irq line (unused) 3969 * @dev_instance: pointer to our ata_host_set information structure 3970 * @regs: unused 3971 * 3972 * Default interrupt handler for PCI IDE devices. Calls 3973 * ata_host_intr() for each port that is not disabled. 3974 * 3975 * LOCKING: 3976 * Obtains host_set lock during operation. 3977 * 3978 * RETURNS: 3979 * IRQ_NONE or IRQ_HANDLED. 3980 */ 3981 3982irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs) 3983{ 3984 struct ata_host_set *host_set = dev_instance; 3985 unsigned int i; 3986 unsigned int handled = 0; 3987 unsigned long flags; 3988 3989 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */ 3990 spin_lock_irqsave(&host_set->lock, flags); 3991 3992 for (i = 0; i < host_set->n_ports; i++) { 3993 struct ata_port *ap; 3994 3995 ap = host_set->ports[i]; 3996 if (ap && 3997 !(ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR))) { 3998 struct ata_queued_cmd *qc; 3999 4000 qc = ata_qc_from_tag(ap, ap->active_tag); 4001 if (qc && (!(qc->tf.ctl & ATA_NIEN)) && 4002 (qc->flags & ATA_QCFLAG_ACTIVE)) 4003 handled |= ata_host_intr(ap, qc); 4004 } 4005 } 4006 4007 spin_unlock_irqrestore(&host_set->lock, flags); 4008 4009 return IRQ_RETVAL(handled); 4010} 4011 4012/** 4013 * atapi_packet_task - Write CDB bytes to hardware 4014 * @_data: Port to which ATAPI device is attached. 4015 * 4016 * When device has indicated its readiness to accept 4017 * a CDB, this function is called. Send the CDB. 4018 * If DMA is to be performed, exit immediately. 4019 * Otherwise, we are in polling mode, so poll 4020 * status under operation succeeds or fails. 4021 * 4022 * LOCKING: 4023 * Kernel thread context (may sleep) 4024 */ 4025 4026static void atapi_packet_task(void *_data) 4027{ 4028 struct ata_port *ap = _data; 4029 struct ata_queued_cmd *qc; 4030 u8 status; 4031 4032 qc = ata_qc_from_tag(ap, ap->active_tag); 4033 assert(qc != NULL); 4034 assert(qc->flags & ATA_QCFLAG_ACTIVE); 4035 4036 /* sleep-wait for BSY to clear */ 4037 DPRINTK("busy wait\n"); 4038 if (ata_busy_sleep(ap, ATA_TMOUT_CDB_QUICK, ATA_TMOUT_CDB)) 4039 goto err_out_status; 4040 4041 /* make sure DRQ is set */ 4042 status = ata_chk_status(ap); 4043 if ((status & (ATA_BUSY | ATA_DRQ)) != ATA_DRQ) 4044 goto err_out; 4045 4046 /* send SCSI cdb */ 4047 DPRINTK("send cdb\n"); 4048 assert(ap->cdb_len >= 12); 4049 4050 if (qc->tf.protocol == ATA_PROT_ATAPI_DMA || 4051 qc->tf.protocol == ATA_PROT_ATAPI_NODATA) { 4052 unsigned long flags; 4053 4054 /* Once we're done issuing command and kicking bmdma, 4055 * irq handler takes over. To not lose irq, we need 4056 * to clear NOINTR flag before sending cdb, but 4057 * interrupt handler shouldn't be invoked before we're 4058 * finished. Hence, the following locking. 4059 */ 4060 spin_lock_irqsave(&ap->host_set->lock, flags); 4061 ap->flags &= ~ATA_FLAG_NOINTR; 4062 ata_data_xfer(ap, qc->cdb, ap->cdb_len, 1); 4063 if (qc->tf.protocol == ATA_PROT_ATAPI_DMA) 4064 ap->ops->bmdma_start(qc); /* initiate bmdma */ 4065 spin_unlock_irqrestore(&ap->host_set->lock, flags); 4066 } else { 4067 ata_data_xfer(ap, qc->cdb, ap->cdb_len, 1); 4068 4069 /* PIO commands are handled by polling */ 4070 ap->hsm_task_state = HSM_ST; 4071 queue_work(ata_wq, &ap->pio_task); 4072 } 4073 4074 return; 4075 4076err_out_status: 4077 status = ata_chk_status(ap); 4078err_out: 4079 ata_poll_qc_complete(qc, __ac_err_mask(status)); 4080} 4081 4082 4083/** 4084 * ata_port_start - Set port up for dma. 4085 * @ap: Port to initialize 4086 * 4087 * Called just after data structures for each port are 4088 * initialized. Allocates space for PRD table. 4089 * 4090 * May be used as the port_start() entry in ata_port_operations. 4091 * 4092 * LOCKING: 4093 * Inherited from caller. 4094 */ 4095 4096int ata_port_start (struct ata_port *ap) 4097{ 4098 struct device *dev = ap->host_set->dev; 4099 int rc; 4100 4101 ap->prd = dma_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma, GFP_KERNEL); 4102 if (!ap->prd) 4103 return -ENOMEM; 4104 4105 rc = ata_pad_alloc(ap, dev); 4106 if (rc) { 4107 dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma); 4108 return rc; 4109 } 4110 4111 DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd, (unsigned long long) ap->prd_dma); 4112 4113 return 0; 4114} 4115 4116 4117/** 4118 * ata_port_stop - Undo ata_port_start() 4119 * @ap: Port to shut down 4120 * 4121 * Frees the PRD table. 4122 * 4123 * May be used as the port_stop() entry in ata_port_operations. 4124 * 4125 * LOCKING: 4126 * Inherited from caller. 4127 */ 4128 4129void ata_port_stop (struct ata_port *ap) 4130{ 4131 struct device *dev = ap->host_set->dev; 4132 4133 dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma); 4134 ata_pad_free(ap, dev); 4135} 4136 4137void ata_host_stop (struct ata_host_set *host_set) 4138{ 4139 if (host_set->mmio_base) 4140 iounmap(host_set->mmio_base); 4141} 4142 4143 4144/** 4145 * ata_host_remove - Unregister SCSI host structure with upper layers 4146 * @ap: Port to unregister 4147 * @do_unregister: 1 if we fully unregister, 0 to just stop the port 4148 * 4149 * LOCKING: 4150 * Inherited from caller. 4151 */ 4152 4153static void ata_host_remove(struct ata_port *ap, unsigned int do_unregister) 4154{ 4155 struct Scsi_Host *sh = ap->host; 4156 4157 DPRINTK("ENTER\n"); 4158 4159 if (do_unregister) 4160 scsi_remove_host(sh); 4161 4162 ap->ops->port_stop(ap); 4163} 4164 4165/** 4166 * ata_host_init - Initialize an ata_port structure 4167 * @ap: Structure to initialize 4168 * @host: associated SCSI mid-layer structure 4169 * @host_set: Collection of hosts to which @ap belongs 4170 * @ent: Probe information provided by low-level driver 4171 * @port_no: Port number associated with this ata_port 4172 * 4173 * Initialize a new ata_port structure, and its associated 4174 * scsi_host. 4175 * 4176 * LOCKING: 4177 * Inherited from caller. 4178 */ 4179 4180static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host, 4181 struct ata_host_set *host_set, 4182 const struct ata_probe_ent *ent, unsigned int port_no) 4183{ 4184 unsigned int i; 4185 4186 host->max_id = 16; 4187 host->max_lun = 1; 4188 host->max_channel = 1; 4189 host->unique_id = ata_unique_id++; 4190 host->max_cmd_len = 12; 4191 4192 ap->flags = ATA_FLAG_PORT_DISABLED; 4193 ap->id = host->unique_id; 4194 ap->host = host; 4195 ap->ctl = ATA_DEVCTL_OBS; 4196 ap->host_set = host_set; 4197 ap->port_no = port_no; 4198 ap->hard_port_no = 4199 ent->legacy_mode ? ent->hard_port_no : port_no; 4200 ap->pio_mask = ent->pio_mask; 4201 ap->mwdma_mask = ent->mwdma_mask; 4202 ap->udma_mask = ent->udma_mask; 4203 ap->flags |= ent->host_flags; 4204 ap->ops = ent->port_ops; 4205 ap->cbl = ATA_CBL_NONE; 4206 ap->active_tag = ATA_TAG_POISON; 4207 ap->last_ctl = 0xFF; 4208 4209 INIT_WORK(&ap->packet_task, atapi_packet_task, ap); 4210 INIT_WORK(&ap->pio_task, ata_pio_task, ap); 4211 4212 for (i = 0; i < ATA_MAX_DEVICES; i++) 4213 ap->device[i].devno = i; 4214 4215#ifdef ATA_IRQ_TRAP 4216 ap->stats.unhandled_irq = 1; 4217 ap->stats.idle_irq = 1; 4218#endif 4219 4220 memcpy(&ap->ioaddr, &ent->port[port_no], sizeof(struct ata_ioports)); 4221} 4222 4223/** 4224 * ata_host_add - Attach low-level ATA driver to system 4225 * @ent: Information provided by low-level driver 4226 * @host_set: Collections of ports to which we add 4227 * @port_no: Port number associated with this host 4228 * 4229 * Attach low-level ATA driver to system. 4230 * 4231 * LOCKING: 4232 * PCI/etc. bus probe sem. 4233 * 4234 * RETURNS: 4235 * New ata_port on success, for NULL on error. 4236 */ 4237 4238static struct ata_port * ata_host_add(const struct ata_probe_ent *ent, 4239 struct ata_host_set *host_set, 4240 unsigned int port_no) 4241{ 4242 struct Scsi_Host *host; 4243 struct ata_port *ap; 4244 int rc; 4245 4246 DPRINTK("ENTER\n"); 4247 host = scsi_host_alloc(ent->sht, sizeof(struct ata_port)); 4248 if (!host) 4249 return NULL; 4250 4251 ap = (struct ata_port *) &host->hostdata[0]; 4252 4253 ata_host_init(ap, host, host_set, ent, port_no); 4254 4255 rc = ap->ops->port_start(ap); 4256 if (rc) 4257 goto err_out; 4258 4259 return ap; 4260 4261err_out: 4262 scsi_host_put(host); 4263 return NULL; 4264} 4265 4266/** 4267 * ata_device_add - Register hardware device with ATA and SCSI layers 4268 * @ent: Probe information describing hardware device to be registered 4269 * 4270 * This function processes the information provided in the probe 4271 * information struct @ent, allocates the necessary ATA and SCSI 4272 * host information structures, initializes them, and registers 4273 * everything with requisite kernel subsystems. 4274 * 4275 * This function requests irqs, probes the ATA bus, and probes 4276 * the SCSI bus. 4277 * 4278 * LOCKING: 4279 * PCI/etc. bus probe sem. 4280 * 4281 * RETURNS: 4282 * Number of ports registered. Zero on error (no ports registered). 4283 */ 4284 4285int ata_device_add(const struct ata_probe_ent *ent) 4286{ 4287 unsigned int count = 0, i; 4288 struct device *dev = ent->dev; 4289 struct ata_host_set *host_set; 4290 4291 DPRINTK("ENTER\n"); 4292 /* alloc a container for our list of ATA ports (buses) */ 4293 host_set = kzalloc(sizeof(struct ata_host_set) + 4294 (ent->n_ports * sizeof(void *)), GFP_KERNEL); 4295 if (!host_set) 4296 return 0; 4297 spin_lock_init(&host_set->lock); 4298 4299 host_set->dev = dev; 4300 host_set->n_ports = ent->n_ports; 4301 host_set->irq = ent->irq; 4302 host_set->mmio_base = ent->mmio_base; 4303 host_set->private_data = ent->private_data; 4304 host_set->ops = ent->port_ops; 4305 4306 /* register each port bound to this device */ 4307 for (i = 0; i < ent->n_ports; i++) { 4308 struct ata_port *ap; 4309 unsigned long xfer_mode_mask; 4310 4311 ap = ata_host_add(ent, host_set, i); 4312 if (!ap) 4313 goto err_out; 4314 4315 host_set->ports[i] = ap; 4316 xfer_mode_mask =(ap->udma_mask << ATA_SHIFT_UDMA) | 4317 (ap->mwdma_mask << ATA_SHIFT_MWDMA) | 4318 (ap->pio_mask << ATA_SHIFT_PIO); 4319 4320 /* print per-port info to dmesg */ 4321 printk(KERN_INFO "ata%u: %cATA max %s cmd 0x%lX ctl 0x%lX " 4322 "bmdma 0x%lX irq %lu\n", 4323 ap->id, 4324 ap->flags & ATA_FLAG_SATA ? 'S' : 'P', 4325 ata_mode_string(xfer_mode_mask), 4326 ap->ioaddr.cmd_addr, 4327 ap->ioaddr.ctl_addr, 4328 ap->ioaddr.bmdma_addr, 4329 ent->irq); 4330 4331 ata_chk_status(ap); 4332 host_set->ops->irq_clear(ap); 4333 count++; 4334 } 4335 4336 if (!count) 4337 goto err_free_ret; 4338 4339 /* obtain irq, that is shared between channels */ 4340 if (request_irq(ent->irq, ent->port_ops->irq_handler, ent->irq_flags, 4341 DRV_NAME, host_set)) 4342 goto err_out; 4343 4344 /* perform each probe synchronously */ 4345 DPRINTK("probe begin\n"); 4346 for (i = 0; i < count; i++) { 4347 struct ata_port *ap; 4348 int rc; 4349 4350 ap = host_set->ports[i]; 4351 4352 DPRINTK("ata%u: probe begin\n", ap->id); 4353 rc = ata_bus_probe(ap); 4354 DPRINTK("ata%u: probe end\n", ap->id); 4355 4356 if (rc) { 4357 /* FIXME: do something useful here? 4358 * Current libata behavior will 4359 * tear down everything when 4360 * the module is removed 4361 * or the h/w is unplugged. 4362 */ 4363 } 4364 4365 rc = scsi_add_host(ap->host, dev); 4366 if (rc) { 4367 printk(KERN_ERR "ata%u: scsi_add_host failed\n", 4368 ap->id); 4369 /* FIXME: do something useful here */ 4370 /* FIXME: handle unconditional calls to 4371 * scsi_scan_host and ata_host_remove, below, 4372 * at the very least 4373 */ 4374 } 4375 } 4376 4377 /* probes are done, now scan each port's disk(s) */ 4378 DPRINTK("probe begin\n"); 4379 for (i = 0; i < count; i++) { 4380 struct ata_port *ap = host_set->ports[i]; 4381 4382 ata_scsi_scan_host(ap); 4383 } 4384 4385 dev_set_drvdata(dev, host_set); 4386 4387 VPRINTK("EXIT, returning %u\n", ent->n_ports); 4388 return ent->n_ports; /* success */ 4389 4390err_out: 4391 for (i = 0; i < count; i++) { 4392 ata_host_remove(host_set->ports[i], 1); 4393 scsi_host_put(host_set->ports[i]->host); 4394 } 4395err_free_ret: 4396 kfree(host_set); 4397 VPRINTK("EXIT, returning 0\n"); 4398 return 0; 4399} 4400 4401/** 4402 * ata_host_set_remove - PCI layer callback for device removal 4403 * @host_set: ATA host set that was removed 4404 * 4405 * Unregister all objects associated with this host set. Free those 4406 * objects. 4407 * 4408 * LOCKING: 4409 * Inherited from calling layer (may sleep). 4410 */ 4411 4412void ata_host_set_remove(struct ata_host_set *host_set) 4413{ 4414 struct ata_port *ap; 4415 unsigned int i; 4416 4417 for (i = 0; i < host_set->n_ports; i++) { 4418 ap = host_set->ports[i]; 4419 scsi_remove_host(ap->host); 4420 } 4421 4422 free_irq(host_set->irq, host_set); 4423 4424 for (i = 0; i < host_set->n_ports; i++) { 4425 ap = host_set->ports[i]; 4426 4427 ata_scsi_release(ap->host); 4428 4429 if ((ap->flags & ATA_FLAG_NO_LEGACY) == 0) { 4430 struct ata_ioports *ioaddr = &ap->ioaddr; 4431 4432 if (ioaddr->cmd_addr == 0x1f0) 4433 release_region(0x1f0, 8); 4434 else if (ioaddr->cmd_addr == 0x170) 4435 release_region(0x170, 8); 4436 } 4437 4438 scsi_host_put(ap->host); 4439 } 4440 4441 if (host_set->ops->host_stop) 4442 host_set->ops->host_stop(host_set); 4443 4444 kfree(host_set); 4445} 4446 4447/** 4448 * ata_scsi_release - SCSI layer callback hook for host unload 4449 * @host: libata host to be unloaded 4450 * 4451 * Performs all duties necessary to shut down a libata port... 4452 * Kill port kthread, disable port, and release resources. 4453 * 4454 * LOCKING: 4455 * Inherited from SCSI layer. 4456 * 4457 * RETURNS: 4458 * One. 4459 */ 4460 4461int ata_scsi_release(struct Scsi_Host *host) 4462{ 4463 struct ata_port *ap = (struct ata_port *) &host->hostdata[0]; 4464 4465 DPRINTK("ENTER\n"); 4466 4467 ap->ops->port_disable(ap); 4468 ata_host_remove(ap, 0); 4469 4470 DPRINTK("EXIT\n"); 4471 return 1; 4472} 4473 4474/** 4475 * ata_std_ports - initialize ioaddr with standard port offsets. 4476 * @ioaddr: IO address structure to be initialized 4477 * 4478 * Utility function which initializes data_addr, error_addr, 4479 * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr, 4480 * device_addr, status_addr, and command_addr to standard offsets 4481 * relative to cmd_addr. 4482 * 4483 * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr. 4484 */ 4485 4486void ata_std_ports(struct ata_ioports *ioaddr) 4487{ 4488 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA; 4489 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR; 4490 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE; 4491 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT; 4492 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL; 4493 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM; 4494 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH; 4495 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE; 4496 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS; 4497 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD; 4498} 4499 4500static struct ata_probe_ent * 4501ata_probe_ent_alloc(struct device *dev, const struct ata_port_info *port) 4502{ 4503 struct ata_probe_ent *probe_ent; 4504 4505 probe_ent = kzalloc(sizeof(*probe_ent), GFP_KERNEL); 4506 if (!probe_ent) { 4507 printk(KERN_ERR DRV_NAME "(%s): out of memory\n", 4508 kobject_name(&(dev->kobj))); 4509 return NULL; 4510 } 4511 4512 INIT_LIST_HEAD(&probe_ent->node); 4513 probe_ent->dev = dev; 4514 4515 probe_ent->sht = port->sht; 4516 probe_ent->host_flags = port->host_flags; 4517 probe_ent->pio_mask = port->pio_mask; 4518 probe_ent->mwdma_mask = port->mwdma_mask; 4519 probe_ent->udma_mask = port->udma_mask; 4520 probe_ent->port_ops = port->port_ops; 4521 4522 return probe_ent; 4523} 4524 4525 4526 4527#ifdef CONFIG_PCI 4528 4529void ata_pci_host_stop (struct ata_host_set *host_set) 4530{ 4531 struct pci_dev *pdev = to_pci_dev(host_set->dev); 4532 4533 pci_iounmap(pdev, host_set->mmio_base); 4534} 4535 4536/** 4537 * ata_pci_init_native_mode - Initialize native-mode driver 4538 * @pdev: pci device to be initialized 4539 * @port: array[2] of pointers to port info structures. 4540 * @ports: bitmap of ports present 4541 * 4542 * Utility function which allocates and initializes an 4543 * ata_probe_ent structure for a standard dual-port 4544 * PIO-based IDE controller. The returned ata_probe_ent 4545 * structure can be passed to ata_device_add(). The returned 4546 * ata_probe_ent structure should then be freed with kfree(). 4547 * 4548 * The caller need only pass the address of the primary port, the 4549 * secondary will be deduced automatically. If the device has non 4550 * standard secondary port mappings this function can be called twice, 4551 * once for each interface. 4552 */ 4553 4554struct ata_probe_ent * 4555ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port, int ports) 4556{ 4557 struct ata_probe_ent *probe_ent = 4558 ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[0]); 4559 int p = 0; 4560 4561 if (!probe_ent) 4562 return NULL; 4563 4564 probe_ent->irq = pdev->irq; 4565 probe_ent->irq_flags = SA_SHIRQ; 4566 probe_ent->private_data = port[0]->private_data; 4567 4568 if (ports & ATA_PORT_PRIMARY) { 4569 probe_ent->port[p].cmd_addr = pci_resource_start(pdev, 0); 4570 probe_ent->port[p].altstatus_addr = 4571 probe_ent->port[p].ctl_addr = 4572 pci_resource_start(pdev, 1) | ATA_PCI_CTL_OFS; 4573 probe_ent->port[p].bmdma_addr = pci_resource_start(pdev, 4); 4574 ata_std_ports(&probe_ent->port[p]); 4575 p++; 4576 } 4577 4578 if (ports & ATA_PORT_SECONDARY) { 4579 probe_ent->port[p].cmd_addr = pci_resource_start(pdev, 2); 4580 probe_ent->port[p].altstatus_addr = 4581 probe_ent->port[p].ctl_addr = 4582 pci_resource_start(pdev, 3) | ATA_PCI_CTL_OFS; 4583 probe_ent->port[p].bmdma_addr = pci_resource_start(pdev, 4) + 8; 4584 ata_std_ports(&probe_ent->port[p]); 4585 p++; 4586 } 4587 4588 probe_ent->n_ports = p; 4589 return probe_ent; 4590} 4591 4592static struct ata_probe_ent *ata_pci_init_legacy_port(struct pci_dev *pdev, struct ata_port_info *port, int port_num) 4593{ 4594 struct ata_probe_ent *probe_ent; 4595 4596 probe_ent = ata_probe_ent_alloc(pci_dev_to_dev(pdev), port); 4597 if (!probe_ent) 4598 return NULL; 4599 4600 probe_ent->legacy_mode = 1; 4601 probe_ent->n_ports = 1; 4602 probe_ent->hard_port_no = port_num; 4603 probe_ent->private_data = port->private_data; 4604 4605 switch(port_num) 4606 { 4607 case 0: 4608 probe_ent->irq = 14; 4609 probe_ent->port[0].cmd_addr = 0x1f0; 4610 probe_ent->port[0].altstatus_addr = 4611 probe_ent->port[0].ctl_addr = 0x3f6; 4612 break; 4613 case 1: 4614 probe_ent->irq = 15; 4615 probe_ent->port[0].cmd_addr = 0x170; 4616 probe_ent->port[0].altstatus_addr = 4617 probe_ent->port[0].ctl_addr = 0x376; 4618 break; 4619 } 4620 probe_ent->port[0].bmdma_addr = pci_resource_start(pdev, 4) + 8 * port_num; 4621 ata_std_ports(&probe_ent->port[0]); 4622 return probe_ent; 4623} 4624 4625/** 4626 * ata_pci_init_one - Initialize/register PCI IDE host controller 4627 * @pdev: Controller to be initialized 4628 * @port_info: Information from low-level host driver 4629 * @n_ports: Number of ports attached to host controller 4630 * 4631 * This is a helper function which can be called from a driver's 4632 * xxx_init_one() probe function if the hardware uses traditional 4633 * IDE taskfile registers. 4634 * 4635 * This function calls pci_enable_device(), reserves its register 4636 * regions, sets the dma mask, enables bus master mode, and calls 4637 * ata_device_add() 4638 * 4639 * LOCKING: 4640 * Inherited from PCI layer (may sleep). 4641 * 4642 * RETURNS: 4643 * Zero on success, negative on errno-based value on error. 4644 */ 4645 4646int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info, 4647 unsigned int n_ports) 4648{ 4649 struct ata_probe_ent *probe_ent = NULL, *probe_ent2 = NULL; 4650 struct ata_port_info *port[2]; 4651 u8 tmp8, mask; 4652 unsigned int legacy_mode = 0; 4653 int disable_dev_on_err = 1; 4654 int rc; 4655 4656 DPRINTK("ENTER\n"); 4657 4658 port[0] = port_info[0]; 4659 if (n_ports > 1) 4660 port[1] = port_info[1]; 4661 else 4662 port[1] = port[0]; 4663 4664 if ((port[0]->host_flags & ATA_FLAG_NO_LEGACY) == 0 4665 && (pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) { 4666 /* TODO: What if one channel is in native mode ... */ 4667 pci_read_config_byte(pdev, PCI_CLASS_PROG, &tmp8); 4668 mask = (1 << 2) | (1 << 0); 4669 if ((tmp8 & mask) != mask) 4670 legacy_mode = (1 << 3); 4671 } 4672 4673 /* FIXME... */ 4674 if ((!legacy_mode) && (n_ports > 2)) { 4675 printk(KERN_ERR "ata: BUG: native mode, n_ports > 2\n"); 4676 n_ports = 2; 4677 /* For now */ 4678 } 4679 4680 /* FIXME: Really for ATA it isn't safe because the device may be 4681 multi-purpose and we want to leave it alone if it was already 4682 enabled. Secondly for shared use as Arjan says we want refcounting 4683 4684 Checking dev->is_enabled is insufficient as this is not set at 4685 boot for the primary video which is BIOS enabled 4686 */ 4687 4688 rc = pci_enable_device(pdev); 4689 if (rc) 4690 return rc; 4691 4692 rc = pci_request_regions(pdev, DRV_NAME); 4693 if (rc) { 4694 disable_dev_on_err = 0; 4695 goto err_out; 4696 } 4697 4698 /* FIXME: Should use platform specific mappers for legacy port ranges */ 4699 if (legacy_mode) { 4700 if (!request_region(0x1f0, 8, "libata")) { 4701 struct resource *conflict, res; 4702 res.start = 0x1f0; 4703 res.end = 0x1f0 + 8 - 1; 4704 conflict = ____request_resource(&ioport_resource, &res); 4705 if (!strcmp(conflict->name, "libata")) 4706 legacy_mode |= (1 << 0); 4707 else { 4708 disable_dev_on_err = 0; 4709 printk(KERN_WARNING "ata: 0x1f0 IDE port busy\n"); 4710 } 4711 } else 4712 legacy_mode |= (1 << 0); 4713 4714 if (!request_region(0x170, 8, "libata")) { 4715 struct resource *conflict, res; 4716 res.start = 0x170; 4717 res.end = 0x170 + 8 - 1; 4718 conflict = ____request_resource(&ioport_resource, &res); 4719 if (!strcmp(conflict->name, "libata")) 4720 legacy_mode |= (1 << 1); 4721 else { 4722 disable_dev_on_err = 0; 4723 printk(KERN_WARNING "ata: 0x170 IDE port busy\n"); 4724 } 4725 } else 4726 legacy_mode |= (1 << 1); 4727 } 4728 4729 /* we have legacy mode, but all ports are unavailable */ 4730 if (legacy_mode == (1 << 3)) { 4731 rc = -EBUSY; 4732 goto err_out_regions; 4733 } 4734 4735 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); 4736 if (rc) 4737 goto err_out_regions; 4738 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK); 4739 if (rc) 4740 goto err_out_regions; 4741 4742 if (legacy_mode) { 4743 if (legacy_mode & (1 << 0)) 4744 probe_ent = ata_pci_init_legacy_port(pdev, port[0], 0); 4745 if (legacy_mode & (1 << 1)) 4746 probe_ent2 = ata_pci_init_legacy_port(pdev, port[1], 1); 4747 } else { 4748 if (n_ports == 2) 4749 probe_ent = ata_pci_init_native_mode(pdev, port, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY); 4750 else 4751 probe_ent = ata_pci_init_native_mode(pdev, port, ATA_PORT_PRIMARY); 4752 } 4753 if (!probe_ent && !probe_ent2) { 4754 rc = -ENOMEM; 4755 goto err_out_regions; 4756 } 4757 4758 pci_set_master(pdev); 4759 4760 /* FIXME: check ata_device_add return */ 4761 if (legacy_mode) { 4762 if (legacy_mode & (1 << 0)) 4763 ata_device_add(probe_ent); 4764 if (legacy_mode & (1 << 1)) 4765 ata_device_add(probe_ent2); 4766 } else 4767 ata_device_add(probe_ent); 4768 4769 kfree(probe_ent); 4770 kfree(probe_ent2); 4771 4772 return 0; 4773 4774err_out_regions: 4775 if (legacy_mode & (1 << 0)) 4776 release_region(0x1f0, 8); 4777 if (legacy_mode & (1 << 1)) 4778 release_region(0x170, 8); 4779 pci_release_regions(pdev); 4780err_out: 4781 if (disable_dev_on_err) 4782 pci_disable_device(pdev); 4783 return rc; 4784} 4785 4786/** 4787 * ata_pci_remove_one - PCI layer callback for device removal 4788 * @pdev: PCI device that was removed 4789 * 4790 * PCI layer indicates to libata via this hook that 4791 * hot-unplug or module unload event has occurred. 4792 * Handle this by unregistering all objects associated 4793 * with this PCI device. Free those objects. Then finally 4794 * release PCI resources and disable device. 4795 * 4796 * LOCKING: 4797 * Inherited from PCI layer (may sleep). 4798 */ 4799 4800void ata_pci_remove_one (struct pci_dev *pdev) 4801{ 4802 struct device *dev = pci_dev_to_dev(pdev); 4803 struct ata_host_set *host_set = dev_get_drvdata(dev); 4804 4805 ata_host_set_remove(host_set); 4806 pci_release_regions(pdev); 4807 pci_disable_device(pdev); 4808 dev_set_drvdata(dev, NULL); 4809} 4810 4811/* move to PCI subsystem */ 4812int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits) 4813{ 4814 unsigned long tmp = 0; 4815 4816 switch (bits->width) { 4817 case 1: { 4818 u8 tmp8 = 0; 4819 pci_read_config_byte(pdev, bits->reg, &tmp8); 4820 tmp = tmp8; 4821 break; 4822 } 4823 case 2: { 4824 u16 tmp16 = 0; 4825 pci_read_config_word(pdev, bits->reg, &tmp16); 4826 tmp = tmp16; 4827 break; 4828 } 4829 case 4: { 4830 u32 tmp32 = 0; 4831 pci_read_config_dword(pdev, bits->reg, &tmp32); 4832 tmp = tmp32; 4833 break; 4834 } 4835 4836 default: 4837 return -EINVAL; 4838 } 4839 4840 tmp &= bits->mask; 4841 4842 return (tmp == bits->val) ? 1 : 0; 4843} 4844#endif /* CONFIG_PCI */ 4845 4846 4847static int __init ata_init(void) 4848{ 4849 ata_wq = create_workqueue("ata"); 4850 if (!ata_wq) 4851 return -ENOMEM; 4852 4853 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n"); 4854 return 0; 4855} 4856 4857static void __exit ata_exit(void) 4858{ 4859 destroy_workqueue(ata_wq); 4860} 4861 4862module_init(ata_init); 4863module_exit(ata_exit); 4864 4865static unsigned long ratelimit_time; 4866static spinlock_t ata_ratelimit_lock = SPIN_LOCK_UNLOCKED; 4867 4868int ata_ratelimit(void) 4869{ 4870 int rc; 4871 unsigned long flags; 4872 4873 spin_lock_irqsave(&ata_ratelimit_lock, flags); 4874 4875 if (time_after(jiffies, ratelimit_time)) { 4876 rc = 1; 4877 ratelimit_time = jiffies + (HZ/5); 4878 } else 4879 rc = 0; 4880 4881 spin_unlock_irqrestore(&ata_ratelimit_lock, flags); 4882 4883 return rc; 4884} 4885 4886/* 4887 * libata is essentially a library of internal helper functions for 4888 * low-level ATA host controller drivers. As such, the API/ABI is 4889 * likely to change as new drivers are added and updated. 4890 * Do not depend on ABI/API stability. 4891 */ 4892 4893EXPORT_SYMBOL_GPL(ata_std_bios_param); 4894EXPORT_SYMBOL_GPL(ata_std_ports); 4895EXPORT_SYMBOL_GPL(ata_device_add); 4896EXPORT_SYMBOL_GPL(ata_host_set_remove); 4897EXPORT_SYMBOL_GPL(ata_sg_init); 4898EXPORT_SYMBOL_GPL(ata_sg_init_one); 4899EXPORT_SYMBOL_GPL(ata_qc_complete); 4900EXPORT_SYMBOL_GPL(ata_qc_issue_prot); 4901EXPORT_SYMBOL_GPL(ata_eng_timeout); 4902EXPORT_SYMBOL_GPL(ata_tf_load); 4903EXPORT_SYMBOL_GPL(ata_tf_read); 4904EXPORT_SYMBOL_GPL(ata_noop_dev_select); 4905EXPORT_SYMBOL_GPL(ata_std_dev_select); 4906EXPORT_SYMBOL_GPL(ata_tf_to_fis); 4907EXPORT_SYMBOL_GPL(ata_tf_from_fis); 4908EXPORT_SYMBOL_GPL(ata_check_status); 4909EXPORT_SYMBOL_GPL(ata_altstatus); 4910EXPORT_SYMBOL_GPL(ata_exec_command); 4911EXPORT_SYMBOL_GPL(ata_port_start); 4912EXPORT_SYMBOL_GPL(ata_port_stop); 4913EXPORT_SYMBOL_GPL(ata_host_stop); 4914EXPORT_SYMBOL_GPL(ata_interrupt); 4915EXPORT_SYMBOL_GPL(ata_qc_prep); 4916EXPORT_SYMBOL_GPL(ata_bmdma_setup); 4917EXPORT_SYMBOL_GPL(ata_bmdma_start); 4918EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear); 4919EXPORT_SYMBOL_GPL(ata_bmdma_status); 4920EXPORT_SYMBOL_GPL(ata_bmdma_stop); 4921EXPORT_SYMBOL_GPL(ata_port_probe); 4922EXPORT_SYMBOL_GPL(sata_phy_reset); 4923EXPORT_SYMBOL_GPL(__sata_phy_reset); 4924EXPORT_SYMBOL_GPL(ata_bus_reset); 4925EXPORT_SYMBOL_GPL(ata_port_disable); 4926EXPORT_SYMBOL_GPL(ata_ratelimit); 4927EXPORT_SYMBOL_GPL(ata_scsi_ioctl); 4928EXPORT_SYMBOL_GPL(ata_scsi_queuecmd); 4929EXPORT_SYMBOL_GPL(ata_scsi_error); 4930EXPORT_SYMBOL_GPL(ata_scsi_slave_config); 4931EXPORT_SYMBOL_GPL(ata_scsi_release); 4932EXPORT_SYMBOL_GPL(ata_host_intr); 4933EXPORT_SYMBOL_GPL(ata_dev_classify); 4934EXPORT_SYMBOL_GPL(ata_dev_id_string); 4935EXPORT_SYMBOL_GPL(ata_dev_config); 4936EXPORT_SYMBOL_GPL(ata_scsi_simulate); 4937 4938EXPORT_SYMBOL_GPL(ata_timing_compute); 4939EXPORT_SYMBOL_GPL(ata_timing_merge); 4940 4941#ifdef CONFIG_PCI 4942EXPORT_SYMBOL_GPL(pci_test_config_bits); 4943EXPORT_SYMBOL_GPL(ata_pci_host_stop); 4944EXPORT_SYMBOL_GPL(ata_pci_init_native_mode); 4945EXPORT_SYMBOL_GPL(ata_pci_init_one); 4946EXPORT_SYMBOL_GPL(ata_pci_remove_one); 4947#endif /* CONFIG_PCI */