Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v2.6.25-rc3 7888 lines 202 kB view raw
1/* 2 * libata-core.c - helper library for ATA 3 * 4 * Maintained by: Jeff Garzik <jgarzik@pobox.com> 5 * Please ALWAYS copy linux-ide@vger.kernel.org 6 * on emails. 7 * 8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved. 9 * Copyright 2003-2004 Jeff Garzik 10 * 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of the GNU General Public License as published by 14 * the Free Software Foundation; either version 2, or (at your option) 15 * any later version. 16 * 17 * This program is distributed in the hope that it will be useful, 18 * but WITHOUT ANY WARRANTY; without even the implied warranty of 19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 20 * GNU General Public License for more details. 21 * 22 * You should have received a copy of the GNU General Public License 23 * along with this program; see the file COPYING. If not, write to 24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. 25 * 26 * 27 * libata documentation is available via 'make {ps|pdf}docs', 28 * as Documentation/DocBook/libata.* 29 * 30 * Hardware documentation available from http://www.t13.org/ and 31 * http://www.sata-io.org/ 32 * 33 * Standards documents from: 34 * http://www.t13.org (ATA standards, PCI DMA IDE spec) 35 * http://www.t10.org (SCSI MMC - for ATAPI MMC) 36 * http://www.sata-io.org (SATA) 37 * http://www.compactflash.org (CF) 38 * http://www.qic.org (QIC157 - Tape and DSC) 39 * http://www.ce-ata.org (CE-ATA: not supported) 40 * 41 */ 42 43#include <linux/kernel.h> 44#include <linux/module.h> 45#include <linux/pci.h> 46#include <linux/init.h> 47#include <linux/list.h> 48#include <linux/mm.h> 49#include <linux/highmem.h> 50#include <linux/spinlock.h> 51#include <linux/blkdev.h> 52#include <linux/delay.h> 53#include <linux/timer.h> 54#include <linux/interrupt.h> 55#include <linux/completion.h> 56#include <linux/suspend.h> 57#include <linux/workqueue.h> 58#include <linux/jiffies.h> 59#include <linux/scatterlist.h> 60#include <linux/io.h> 61#include <scsi/scsi.h> 62#include <scsi/scsi_cmnd.h> 63#include <scsi/scsi_host.h> 64#include <linux/libata.h> 65#include <asm/semaphore.h> 66#include <asm/byteorder.h> 67#include <linux/cdrom.h> 68 69#include "libata.h" 70 71 72/* debounce timing parameters in msecs { interval, duration, timeout } */ 73const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 }; 74const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 }; 75const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 }; 76 77static unsigned int ata_dev_init_params(struct ata_device *dev, 78 u16 heads, u16 sectors); 79static unsigned int ata_dev_set_xfermode(struct ata_device *dev); 80static unsigned int ata_dev_set_feature(struct ata_device *dev, 81 u8 enable, u8 feature); 82static void ata_dev_xfermask(struct ata_device *dev); 83static unsigned long ata_dev_blacklisted(const struct ata_device *dev); 84 85unsigned int ata_print_id = 1; 86static struct workqueue_struct *ata_wq; 87 88struct workqueue_struct *ata_aux_wq; 89 90struct ata_force_param { 91 const char *name; 92 unsigned int cbl; 93 int spd_limit; 94 unsigned long xfer_mask; 95 unsigned int horkage_on; 96 unsigned int horkage_off; 97}; 98 99struct ata_force_ent { 100 int port; 101 int device; 102 struct ata_force_param param; 103}; 104 105static struct ata_force_ent *ata_force_tbl; 106static int ata_force_tbl_size; 107 108static char ata_force_param_buf[PAGE_SIZE] __initdata; 109module_param_string(force, ata_force_param_buf, sizeof(ata_force_param_buf), 0444); 110MODULE_PARM_DESC(force, "Force ATA configurations including cable type, link speed and transfer mode (see Documentation/kernel-parameters.txt for details)"); 111 112int atapi_enabled = 1; 113module_param(atapi_enabled, int, 0444); 114MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)"); 115 116int atapi_dmadir = 0; 117module_param(atapi_dmadir, int, 0444); 118MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)"); 119 120int atapi_passthru16 = 1; 121module_param(atapi_passthru16, int, 0444); 122MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices; on by default (0=off, 1=on)"); 123 124int libata_fua = 0; 125module_param_named(fua, libata_fua, int, 0444); 126MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)"); 127 128static int ata_ignore_hpa; 129module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644); 130MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)"); 131 132static int libata_dma_mask = ATA_DMA_MASK_ATA|ATA_DMA_MASK_ATAPI|ATA_DMA_MASK_CFA; 133module_param_named(dma, libata_dma_mask, int, 0444); 134MODULE_PARM_DESC(dma, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)"); 135 136static int ata_probe_timeout = ATA_TMOUT_INTERNAL / HZ; 137module_param(ata_probe_timeout, int, 0444); 138MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)"); 139 140int libata_noacpi = 0; 141module_param_named(noacpi, libata_noacpi, int, 0444); 142MODULE_PARM_DESC(noacpi, "Disables the use of ACPI in probe/suspend/resume when set"); 143 144int libata_allow_tpm = 0; 145module_param_named(allow_tpm, libata_allow_tpm, int, 0444); 146MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands"); 147 148MODULE_AUTHOR("Jeff Garzik"); 149MODULE_DESCRIPTION("Library module for ATA devices"); 150MODULE_LICENSE("GPL"); 151MODULE_VERSION(DRV_VERSION); 152 153 154/** 155 * ata_force_cbl - force cable type according to libata.force 156 * @ap: ATA port of interest 157 * 158 * Force cable type according to libata.force and whine about it. 159 * The last entry which has matching port number is used, so it 160 * can be specified as part of device force parameters. For 161 * example, both "a:40c,1.00:udma4" and "1.00:40c,udma4" have the 162 * same effect. 163 * 164 * LOCKING: 165 * EH context. 166 */ 167void ata_force_cbl(struct ata_port *ap) 168{ 169 int i; 170 171 for (i = ata_force_tbl_size - 1; i >= 0; i--) { 172 const struct ata_force_ent *fe = &ata_force_tbl[i]; 173 174 if (fe->port != -1 && fe->port != ap->print_id) 175 continue; 176 177 if (fe->param.cbl == ATA_CBL_NONE) 178 continue; 179 180 ap->cbl = fe->param.cbl; 181 ata_port_printk(ap, KERN_NOTICE, 182 "FORCE: cable set to %s\n", fe->param.name); 183 return; 184 } 185} 186 187/** 188 * ata_force_spd_limit - force SATA spd limit according to libata.force 189 * @link: ATA link of interest 190 * 191 * Force SATA spd limit according to libata.force and whine about 192 * it. When only the port part is specified (e.g. 1:), the limit 193 * applies to all links connected to both the host link and all 194 * fan-out ports connected via PMP. If the device part is 195 * specified as 0 (e.g. 1.00:), it specifies the first fan-out 196 * link not the host link. Device number 15 always points to the 197 * host link whether PMP is attached or not. 198 * 199 * LOCKING: 200 * EH context. 201 */ 202static void ata_force_spd_limit(struct ata_link *link) 203{ 204 int linkno, i; 205 206 if (ata_is_host_link(link)) 207 linkno = 15; 208 else 209 linkno = link->pmp; 210 211 for (i = ata_force_tbl_size - 1; i >= 0; i--) { 212 const struct ata_force_ent *fe = &ata_force_tbl[i]; 213 214 if (fe->port != -1 && fe->port != link->ap->print_id) 215 continue; 216 217 if (fe->device != -1 && fe->device != linkno) 218 continue; 219 220 if (!fe->param.spd_limit) 221 continue; 222 223 link->hw_sata_spd_limit = (1 << fe->param.spd_limit) - 1; 224 ata_link_printk(link, KERN_NOTICE, 225 "FORCE: PHY spd limit set to %s\n", fe->param.name); 226 return; 227 } 228} 229 230/** 231 * ata_force_xfermask - force xfermask according to libata.force 232 * @dev: ATA device of interest 233 * 234 * Force xfer_mask according to libata.force and whine about it. 235 * For consistency with link selection, device number 15 selects 236 * the first device connected to the host link. 237 * 238 * LOCKING: 239 * EH context. 240 */ 241static void ata_force_xfermask(struct ata_device *dev) 242{ 243 int devno = dev->link->pmp + dev->devno; 244 int alt_devno = devno; 245 int i; 246 247 /* allow n.15 for the first device attached to host port */ 248 if (ata_is_host_link(dev->link) && devno == 0) 249 alt_devno = 15; 250 251 for (i = ata_force_tbl_size - 1; i >= 0; i--) { 252 const struct ata_force_ent *fe = &ata_force_tbl[i]; 253 unsigned long pio_mask, mwdma_mask, udma_mask; 254 255 if (fe->port != -1 && fe->port != dev->link->ap->print_id) 256 continue; 257 258 if (fe->device != -1 && fe->device != devno && 259 fe->device != alt_devno) 260 continue; 261 262 if (!fe->param.xfer_mask) 263 continue; 264 265 ata_unpack_xfermask(fe->param.xfer_mask, 266 &pio_mask, &mwdma_mask, &udma_mask); 267 if (udma_mask) 268 dev->udma_mask = udma_mask; 269 else if (mwdma_mask) { 270 dev->udma_mask = 0; 271 dev->mwdma_mask = mwdma_mask; 272 } else { 273 dev->udma_mask = 0; 274 dev->mwdma_mask = 0; 275 dev->pio_mask = pio_mask; 276 } 277 278 ata_dev_printk(dev, KERN_NOTICE, 279 "FORCE: xfer_mask set to %s\n", fe->param.name); 280 return; 281 } 282} 283 284/** 285 * ata_force_horkage - force horkage according to libata.force 286 * @dev: ATA device of interest 287 * 288 * Force horkage according to libata.force and whine about it. 289 * For consistency with link selection, device number 15 selects 290 * the first device connected to the host link. 291 * 292 * LOCKING: 293 * EH context. 294 */ 295static void ata_force_horkage(struct ata_device *dev) 296{ 297 int devno = dev->link->pmp + dev->devno; 298 int alt_devno = devno; 299 int i; 300 301 /* allow n.15 for the first device attached to host port */ 302 if (ata_is_host_link(dev->link) && devno == 0) 303 alt_devno = 15; 304 305 for (i = 0; i < ata_force_tbl_size; i++) { 306 const struct ata_force_ent *fe = &ata_force_tbl[i]; 307 308 if (fe->port != -1 && fe->port != dev->link->ap->print_id) 309 continue; 310 311 if (fe->device != -1 && fe->device != devno && 312 fe->device != alt_devno) 313 continue; 314 315 if (!(~dev->horkage & fe->param.horkage_on) && 316 !(dev->horkage & fe->param.horkage_off)) 317 continue; 318 319 dev->horkage |= fe->param.horkage_on; 320 dev->horkage &= ~fe->param.horkage_off; 321 322 ata_dev_printk(dev, KERN_NOTICE, 323 "FORCE: horkage modified (%s)\n", fe->param.name); 324 } 325} 326 327/** 328 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure 329 * @tf: Taskfile to convert 330 * @pmp: Port multiplier port 331 * @is_cmd: This FIS is for command 332 * @fis: Buffer into which data will output 333 * 334 * Converts a standard ATA taskfile to a Serial ATA 335 * FIS structure (Register - Host to Device). 336 * 337 * LOCKING: 338 * Inherited from caller. 339 */ 340void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis) 341{ 342 fis[0] = 0x27; /* Register - Host to Device FIS */ 343 fis[1] = pmp & 0xf; /* Port multiplier number*/ 344 if (is_cmd) 345 fis[1] |= (1 << 7); /* bit 7 indicates Command FIS */ 346 347 fis[2] = tf->command; 348 fis[3] = tf->feature; 349 350 fis[4] = tf->lbal; 351 fis[5] = tf->lbam; 352 fis[6] = tf->lbah; 353 fis[7] = tf->device; 354 355 fis[8] = tf->hob_lbal; 356 fis[9] = tf->hob_lbam; 357 fis[10] = tf->hob_lbah; 358 fis[11] = tf->hob_feature; 359 360 fis[12] = tf->nsect; 361 fis[13] = tf->hob_nsect; 362 fis[14] = 0; 363 fis[15] = tf->ctl; 364 365 fis[16] = 0; 366 fis[17] = 0; 367 fis[18] = 0; 368 fis[19] = 0; 369} 370 371/** 372 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile 373 * @fis: Buffer from which data will be input 374 * @tf: Taskfile to output 375 * 376 * Converts a serial ATA FIS structure to a standard ATA taskfile. 377 * 378 * LOCKING: 379 * Inherited from caller. 380 */ 381 382void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf) 383{ 384 tf->command = fis[2]; /* status */ 385 tf->feature = fis[3]; /* error */ 386 387 tf->lbal = fis[4]; 388 tf->lbam = fis[5]; 389 tf->lbah = fis[6]; 390 tf->device = fis[7]; 391 392 tf->hob_lbal = fis[8]; 393 tf->hob_lbam = fis[9]; 394 tf->hob_lbah = fis[10]; 395 396 tf->nsect = fis[12]; 397 tf->hob_nsect = fis[13]; 398} 399 400static const u8 ata_rw_cmds[] = { 401 /* pio multi */ 402 ATA_CMD_READ_MULTI, 403 ATA_CMD_WRITE_MULTI, 404 ATA_CMD_READ_MULTI_EXT, 405 ATA_CMD_WRITE_MULTI_EXT, 406 0, 407 0, 408 0, 409 ATA_CMD_WRITE_MULTI_FUA_EXT, 410 /* pio */ 411 ATA_CMD_PIO_READ, 412 ATA_CMD_PIO_WRITE, 413 ATA_CMD_PIO_READ_EXT, 414 ATA_CMD_PIO_WRITE_EXT, 415 0, 416 0, 417 0, 418 0, 419 /* dma */ 420 ATA_CMD_READ, 421 ATA_CMD_WRITE, 422 ATA_CMD_READ_EXT, 423 ATA_CMD_WRITE_EXT, 424 0, 425 0, 426 0, 427 ATA_CMD_WRITE_FUA_EXT 428}; 429 430/** 431 * ata_rwcmd_protocol - set taskfile r/w commands and protocol 432 * @tf: command to examine and configure 433 * @dev: device tf belongs to 434 * 435 * Examine the device configuration and tf->flags to calculate 436 * the proper read/write commands and protocol to use. 437 * 438 * LOCKING: 439 * caller. 440 */ 441static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev) 442{ 443 u8 cmd; 444 445 int index, fua, lba48, write; 446 447 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0; 448 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0; 449 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0; 450 451 if (dev->flags & ATA_DFLAG_PIO) { 452 tf->protocol = ATA_PROT_PIO; 453 index = dev->multi_count ? 0 : 8; 454 } else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) { 455 /* Unable to use DMA due to host limitation */ 456 tf->protocol = ATA_PROT_PIO; 457 index = dev->multi_count ? 0 : 8; 458 } else { 459 tf->protocol = ATA_PROT_DMA; 460 index = 16; 461 } 462 463 cmd = ata_rw_cmds[index + fua + lba48 + write]; 464 if (cmd) { 465 tf->command = cmd; 466 return 0; 467 } 468 return -1; 469} 470 471/** 472 * ata_tf_read_block - Read block address from ATA taskfile 473 * @tf: ATA taskfile of interest 474 * @dev: ATA device @tf belongs to 475 * 476 * LOCKING: 477 * None. 478 * 479 * Read block address from @tf. This function can handle all 480 * three address formats - LBA, LBA48 and CHS. tf->protocol and 481 * flags select the address format to use. 482 * 483 * RETURNS: 484 * Block address read from @tf. 485 */ 486u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev) 487{ 488 u64 block = 0; 489 490 if (tf->flags & ATA_TFLAG_LBA) { 491 if (tf->flags & ATA_TFLAG_LBA48) { 492 block |= (u64)tf->hob_lbah << 40; 493 block |= (u64)tf->hob_lbam << 32; 494 block |= tf->hob_lbal << 24; 495 } else 496 block |= (tf->device & 0xf) << 24; 497 498 block |= tf->lbah << 16; 499 block |= tf->lbam << 8; 500 block |= tf->lbal; 501 } else { 502 u32 cyl, head, sect; 503 504 cyl = tf->lbam | (tf->lbah << 8); 505 head = tf->device & 0xf; 506 sect = tf->lbal; 507 508 block = (cyl * dev->heads + head) * dev->sectors + sect; 509 } 510 511 return block; 512} 513 514/** 515 * ata_build_rw_tf - Build ATA taskfile for given read/write request 516 * @tf: Target ATA taskfile 517 * @dev: ATA device @tf belongs to 518 * @block: Block address 519 * @n_block: Number of blocks 520 * @tf_flags: RW/FUA etc... 521 * @tag: tag 522 * 523 * LOCKING: 524 * None. 525 * 526 * Build ATA taskfile @tf for read/write request described by 527 * @block, @n_block, @tf_flags and @tag on @dev. 528 * 529 * RETURNS: 530 * 531 * 0 on success, -ERANGE if the request is too large for @dev, 532 * -EINVAL if the request is invalid. 533 */ 534int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev, 535 u64 block, u32 n_block, unsigned int tf_flags, 536 unsigned int tag) 537{ 538 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 539 tf->flags |= tf_flags; 540 541 if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) { 542 /* yay, NCQ */ 543 if (!lba_48_ok(block, n_block)) 544 return -ERANGE; 545 546 tf->protocol = ATA_PROT_NCQ; 547 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48; 548 549 if (tf->flags & ATA_TFLAG_WRITE) 550 tf->command = ATA_CMD_FPDMA_WRITE; 551 else 552 tf->command = ATA_CMD_FPDMA_READ; 553 554 tf->nsect = tag << 3; 555 tf->hob_feature = (n_block >> 8) & 0xff; 556 tf->feature = n_block & 0xff; 557 558 tf->hob_lbah = (block >> 40) & 0xff; 559 tf->hob_lbam = (block >> 32) & 0xff; 560 tf->hob_lbal = (block >> 24) & 0xff; 561 tf->lbah = (block >> 16) & 0xff; 562 tf->lbam = (block >> 8) & 0xff; 563 tf->lbal = block & 0xff; 564 565 tf->device = 1 << 6; 566 if (tf->flags & ATA_TFLAG_FUA) 567 tf->device |= 1 << 7; 568 } else if (dev->flags & ATA_DFLAG_LBA) { 569 tf->flags |= ATA_TFLAG_LBA; 570 571 if (lba_28_ok(block, n_block)) { 572 /* use LBA28 */ 573 tf->device |= (block >> 24) & 0xf; 574 } else if (lba_48_ok(block, n_block)) { 575 if (!(dev->flags & ATA_DFLAG_LBA48)) 576 return -ERANGE; 577 578 /* use LBA48 */ 579 tf->flags |= ATA_TFLAG_LBA48; 580 581 tf->hob_nsect = (n_block >> 8) & 0xff; 582 583 tf->hob_lbah = (block >> 40) & 0xff; 584 tf->hob_lbam = (block >> 32) & 0xff; 585 tf->hob_lbal = (block >> 24) & 0xff; 586 } else 587 /* request too large even for LBA48 */ 588 return -ERANGE; 589 590 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0)) 591 return -EINVAL; 592 593 tf->nsect = n_block & 0xff; 594 595 tf->lbah = (block >> 16) & 0xff; 596 tf->lbam = (block >> 8) & 0xff; 597 tf->lbal = block & 0xff; 598 599 tf->device |= ATA_LBA; 600 } else { 601 /* CHS */ 602 u32 sect, head, cyl, track; 603 604 /* The request -may- be too large for CHS addressing. */ 605 if (!lba_28_ok(block, n_block)) 606 return -ERANGE; 607 608 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0)) 609 return -EINVAL; 610 611 /* Convert LBA to CHS */ 612 track = (u32)block / dev->sectors; 613 cyl = track / dev->heads; 614 head = track % dev->heads; 615 sect = (u32)block % dev->sectors + 1; 616 617 DPRINTK("block %u track %u cyl %u head %u sect %u\n", 618 (u32)block, track, cyl, head, sect); 619 620 /* Check whether the converted CHS can fit. 621 Cylinder: 0-65535 622 Head: 0-15 623 Sector: 1-255*/ 624 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect)) 625 return -ERANGE; 626 627 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */ 628 tf->lbal = sect; 629 tf->lbam = cyl; 630 tf->lbah = cyl >> 8; 631 tf->device |= head; 632 } 633 634 return 0; 635} 636 637/** 638 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask 639 * @pio_mask: pio_mask 640 * @mwdma_mask: mwdma_mask 641 * @udma_mask: udma_mask 642 * 643 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single 644 * unsigned int xfer_mask. 645 * 646 * LOCKING: 647 * None. 648 * 649 * RETURNS: 650 * Packed xfer_mask. 651 */ 652unsigned long ata_pack_xfermask(unsigned long pio_mask, 653 unsigned long mwdma_mask, 654 unsigned long udma_mask) 655{ 656 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) | 657 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) | 658 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA); 659} 660 661/** 662 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks 663 * @xfer_mask: xfer_mask to unpack 664 * @pio_mask: resulting pio_mask 665 * @mwdma_mask: resulting mwdma_mask 666 * @udma_mask: resulting udma_mask 667 * 668 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask. 669 * Any NULL distination masks will be ignored. 670 */ 671void ata_unpack_xfermask(unsigned long xfer_mask, unsigned long *pio_mask, 672 unsigned long *mwdma_mask, unsigned long *udma_mask) 673{ 674 if (pio_mask) 675 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO; 676 if (mwdma_mask) 677 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA; 678 if (udma_mask) 679 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA; 680} 681 682static const struct ata_xfer_ent { 683 int shift, bits; 684 u8 base; 685} ata_xfer_tbl[] = { 686 { ATA_SHIFT_PIO, ATA_NR_PIO_MODES, XFER_PIO_0 }, 687 { ATA_SHIFT_MWDMA, ATA_NR_MWDMA_MODES, XFER_MW_DMA_0 }, 688 { ATA_SHIFT_UDMA, ATA_NR_UDMA_MODES, XFER_UDMA_0 }, 689 { -1, }, 690}; 691 692/** 693 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask 694 * @xfer_mask: xfer_mask of interest 695 * 696 * Return matching XFER_* value for @xfer_mask. Only the highest 697 * bit of @xfer_mask is considered. 698 * 699 * LOCKING: 700 * None. 701 * 702 * RETURNS: 703 * Matching XFER_* value, 0xff if no match found. 704 */ 705u8 ata_xfer_mask2mode(unsigned long xfer_mask) 706{ 707 int highbit = fls(xfer_mask) - 1; 708 const struct ata_xfer_ent *ent; 709 710 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++) 711 if (highbit >= ent->shift && highbit < ent->shift + ent->bits) 712 return ent->base + highbit - ent->shift; 713 return 0xff; 714} 715 716/** 717 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_* 718 * @xfer_mode: XFER_* of interest 719 * 720 * Return matching xfer_mask for @xfer_mode. 721 * 722 * LOCKING: 723 * None. 724 * 725 * RETURNS: 726 * Matching xfer_mask, 0 if no match found. 727 */ 728unsigned long ata_xfer_mode2mask(u8 xfer_mode) 729{ 730 const struct ata_xfer_ent *ent; 731 732 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++) 733 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits) 734 return ((2 << (ent->shift + xfer_mode - ent->base)) - 1) 735 & ~((1 << ent->shift) - 1); 736 return 0; 737} 738 739/** 740 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_* 741 * @xfer_mode: XFER_* of interest 742 * 743 * Return matching xfer_shift for @xfer_mode. 744 * 745 * LOCKING: 746 * None. 747 * 748 * RETURNS: 749 * Matching xfer_shift, -1 if no match found. 750 */ 751int ata_xfer_mode2shift(unsigned long xfer_mode) 752{ 753 const struct ata_xfer_ent *ent; 754 755 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++) 756 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits) 757 return ent->shift; 758 return -1; 759} 760 761/** 762 * ata_mode_string - convert xfer_mask to string 763 * @xfer_mask: mask of bits supported; only highest bit counts. 764 * 765 * Determine string which represents the highest speed 766 * (highest bit in @modemask). 767 * 768 * LOCKING: 769 * None. 770 * 771 * RETURNS: 772 * Constant C string representing highest speed listed in 773 * @mode_mask, or the constant C string "<n/a>". 774 */ 775const char *ata_mode_string(unsigned long xfer_mask) 776{ 777 static const char * const xfer_mode_str[] = { 778 "PIO0", 779 "PIO1", 780 "PIO2", 781 "PIO3", 782 "PIO4", 783 "PIO5", 784 "PIO6", 785 "MWDMA0", 786 "MWDMA1", 787 "MWDMA2", 788 "MWDMA3", 789 "MWDMA4", 790 "UDMA/16", 791 "UDMA/25", 792 "UDMA/33", 793 "UDMA/44", 794 "UDMA/66", 795 "UDMA/100", 796 "UDMA/133", 797 "UDMA7", 798 }; 799 int highbit; 800 801 highbit = fls(xfer_mask) - 1; 802 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str)) 803 return xfer_mode_str[highbit]; 804 return "<n/a>"; 805} 806 807static const char *sata_spd_string(unsigned int spd) 808{ 809 static const char * const spd_str[] = { 810 "1.5 Gbps", 811 "3.0 Gbps", 812 }; 813 814 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str)) 815 return "<unknown>"; 816 return spd_str[spd - 1]; 817} 818 819void ata_dev_disable(struct ata_device *dev) 820{ 821 if (ata_dev_enabled(dev)) { 822 if (ata_msg_drv(dev->link->ap)) 823 ata_dev_printk(dev, KERN_WARNING, "disabled\n"); 824 ata_acpi_on_disable(dev); 825 ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 | 826 ATA_DNXFER_QUIET); 827 dev->class++; 828 } 829} 830 831static int ata_dev_set_dipm(struct ata_device *dev, enum link_pm policy) 832{ 833 struct ata_link *link = dev->link; 834 struct ata_port *ap = link->ap; 835 u32 scontrol; 836 unsigned int err_mask; 837 int rc; 838 839 /* 840 * disallow DIPM for drivers which haven't set 841 * ATA_FLAG_IPM. This is because when DIPM is enabled, 842 * phy ready will be set in the interrupt status on 843 * state changes, which will cause some drivers to 844 * think there are errors - additionally drivers will 845 * need to disable hot plug. 846 */ 847 if (!(ap->flags & ATA_FLAG_IPM) || !ata_dev_enabled(dev)) { 848 ap->pm_policy = NOT_AVAILABLE; 849 return -EINVAL; 850 } 851 852 /* 853 * For DIPM, we will only enable it for the 854 * min_power setting. 855 * 856 * Why? Because Disks are too stupid to know that 857 * If the host rejects a request to go to SLUMBER 858 * they should retry at PARTIAL, and instead it 859 * just would give up. So, for medium_power to 860 * work at all, we need to only allow HIPM. 861 */ 862 rc = sata_scr_read(link, SCR_CONTROL, &scontrol); 863 if (rc) 864 return rc; 865 866 switch (policy) { 867 case MIN_POWER: 868 /* no restrictions on IPM transitions */ 869 scontrol &= ~(0x3 << 8); 870 rc = sata_scr_write(link, SCR_CONTROL, scontrol); 871 if (rc) 872 return rc; 873 874 /* enable DIPM */ 875 if (dev->flags & ATA_DFLAG_DIPM) 876 err_mask = ata_dev_set_feature(dev, 877 SETFEATURES_SATA_ENABLE, SATA_DIPM); 878 break; 879 case MEDIUM_POWER: 880 /* allow IPM to PARTIAL */ 881 scontrol &= ~(0x1 << 8); 882 scontrol |= (0x2 << 8); 883 rc = sata_scr_write(link, SCR_CONTROL, scontrol); 884 if (rc) 885 return rc; 886 887 /* 888 * we don't have to disable DIPM since IPM flags 889 * disallow transitions to SLUMBER, which effectively 890 * disable DIPM if it does not support PARTIAL 891 */ 892 break; 893 case NOT_AVAILABLE: 894 case MAX_PERFORMANCE: 895 /* disable all IPM transitions */ 896 scontrol |= (0x3 << 8); 897 rc = sata_scr_write(link, SCR_CONTROL, scontrol); 898 if (rc) 899 return rc; 900 901 /* 902 * we don't have to disable DIPM since IPM flags 903 * disallow all transitions which effectively 904 * disable DIPM anyway. 905 */ 906 break; 907 } 908 909 /* FIXME: handle SET FEATURES failure */ 910 (void) err_mask; 911 912 return 0; 913} 914 915/** 916 * ata_dev_enable_pm - enable SATA interface power management 917 * @dev: device to enable power management 918 * @policy: the link power management policy 919 * 920 * Enable SATA Interface power management. This will enable 921 * Device Interface Power Management (DIPM) for min_power 922 * policy, and then call driver specific callbacks for 923 * enabling Host Initiated Power management. 924 * 925 * Locking: Caller. 926 * Returns: -EINVAL if IPM is not supported, 0 otherwise. 927 */ 928void ata_dev_enable_pm(struct ata_device *dev, enum link_pm policy) 929{ 930 int rc = 0; 931 struct ata_port *ap = dev->link->ap; 932 933 /* set HIPM first, then DIPM */ 934 if (ap->ops->enable_pm) 935 rc = ap->ops->enable_pm(ap, policy); 936 if (rc) 937 goto enable_pm_out; 938 rc = ata_dev_set_dipm(dev, policy); 939 940enable_pm_out: 941 if (rc) 942 ap->pm_policy = MAX_PERFORMANCE; 943 else 944 ap->pm_policy = policy; 945 return /* rc */; /* hopefully we can use 'rc' eventually */ 946} 947 948#ifdef CONFIG_PM 949/** 950 * ata_dev_disable_pm - disable SATA interface power management 951 * @dev: device to disable power management 952 * 953 * Disable SATA Interface power management. This will disable 954 * Device Interface Power Management (DIPM) without changing 955 * policy, call driver specific callbacks for disabling Host 956 * Initiated Power management. 957 * 958 * Locking: Caller. 959 * Returns: void 960 */ 961static void ata_dev_disable_pm(struct ata_device *dev) 962{ 963 struct ata_port *ap = dev->link->ap; 964 965 ata_dev_set_dipm(dev, MAX_PERFORMANCE); 966 if (ap->ops->disable_pm) 967 ap->ops->disable_pm(ap); 968} 969#endif /* CONFIG_PM */ 970 971void ata_lpm_schedule(struct ata_port *ap, enum link_pm policy) 972{ 973 ap->pm_policy = policy; 974 ap->link.eh_info.action |= ATA_EHI_LPM; 975 ap->link.eh_info.flags |= ATA_EHI_NO_AUTOPSY; 976 ata_port_schedule_eh(ap); 977} 978 979#ifdef CONFIG_PM 980static void ata_lpm_enable(struct ata_host *host) 981{ 982 struct ata_link *link; 983 struct ata_port *ap; 984 struct ata_device *dev; 985 int i; 986 987 for (i = 0; i < host->n_ports; i++) { 988 ap = host->ports[i]; 989 ata_port_for_each_link(link, ap) { 990 ata_link_for_each_dev(dev, link) 991 ata_dev_disable_pm(dev); 992 } 993 } 994} 995 996static void ata_lpm_disable(struct ata_host *host) 997{ 998 int i; 999 1000 for (i = 0; i < host->n_ports; i++) { 1001 struct ata_port *ap = host->ports[i]; 1002 ata_lpm_schedule(ap, ap->pm_policy); 1003 } 1004} 1005#endif /* CONFIG_PM */ 1006 1007 1008/** 1009 * ata_devchk - PATA device presence detection 1010 * @ap: ATA channel to examine 1011 * @device: Device to examine (starting at zero) 1012 * 1013 * This technique was originally described in 1014 * Hale Landis's ATADRVR (www.ata-atapi.com), and 1015 * later found its way into the ATA/ATAPI spec. 1016 * 1017 * Write a pattern to the ATA shadow registers, 1018 * and if a device is present, it will respond by 1019 * correctly storing and echoing back the 1020 * ATA shadow register contents. 1021 * 1022 * LOCKING: 1023 * caller. 1024 */ 1025 1026static unsigned int ata_devchk(struct ata_port *ap, unsigned int device) 1027{ 1028 struct ata_ioports *ioaddr = &ap->ioaddr; 1029 u8 nsect, lbal; 1030 1031 ap->ops->dev_select(ap, device); 1032 1033 iowrite8(0x55, ioaddr->nsect_addr); 1034 iowrite8(0xaa, ioaddr->lbal_addr); 1035 1036 iowrite8(0xaa, ioaddr->nsect_addr); 1037 iowrite8(0x55, ioaddr->lbal_addr); 1038 1039 iowrite8(0x55, ioaddr->nsect_addr); 1040 iowrite8(0xaa, ioaddr->lbal_addr); 1041 1042 nsect = ioread8(ioaddr->nsect_addr); 1043 lbal = ioread8(ioaddr->lbal_addr); 1044 1045 if ((nsect == 0x55) && (lbal == 0xaa)) 1046 return 1; /* we found a device */ 1047 1048 return 0; /* nothing found */ 1049} 1050 1051/** 1052 * ata_dev_classify - determine device type based on ATA-spec signature 1053 * @tf: ATA taskfile register set for device to be identified 1054 * 1055 * Determine from taskfile register contents whether a device is 1056 * ATA or ATAPI, as per "Signature and persistence" section 1057 * of ATA/PI spec (volume 1, sect 5.14). 1058 * 1059 * LOCKING: 1060 * None. 1061 * 1062 * RETURNS: 1063 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, %ATA_DEV_PMP or 1064 * %ATA_DEV_UNKNOWN the event of failure. 1065 */ 1066unsigned int ata_dev_classify(const struct ata_taskfile *tf) 1067{ 1068 /* Apple's open source Darwin code hints that some devices only 1069 * put a proper signature into the LBA mid/high registers, 1070 * So, we only check those. It's sufficient for uniqueness. 1071 * 1072 * ATA/ATAPI-7 (d1532v1r1: Feb. 19, 2003) specified separate 1073 * signatures for ATA and ATAPI devices attached on SerialATA, 1074 * 0x3c/0xc3 and 0x69/0x96 respectively. However, SerialATA 1075 * spec has never mentioned about using different signatures 1076 * for ATA/ATAPI devices. Then, Serial ATA II: Port 1077 * Multiplier specification began to use 0x69/0x96 to identify 1078 * port multpliers and 0x3c/0xc3 to identify SEMB device. 1079 * ATA/ATAPI-7 dropped descriptions about 0x3c/0xc3 and 1080 * 0x69/0x96 shortly and described them as reserved for 1081 * SerialATA. 1082 * 1083 * We follow the current spec and consider that 0x69/0x96 1084 * identifies a port multiplier and 0x3c/0xc3 a SEMB device. 1085 */ 1086 if ((tf->lbam == 0) && (tf->lbah == 0)) { 1087 DPRINTK("found ATA device by sig\n"); 1088 return ATA_DEV_ATA; 1089 } 1090 1091 if ((tf->lbam == 0x14) && (tf->lbah == 0xeb)) { 1092 DPRINTK("found ATAPI device by sig\n"); 1093 return ATA_DEV_ATAPI; 1094 } 1095 1096 if ((tf->lbam == 0x69) && (tf->lbah == 0x96)) { 1097 DPRINTK("found PMP device by sig\n"); 1098 return ATA_DEV_PMP; 1099 } 1100 1101 if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) { 1102 printk(KERN_INFO "ata: SEMB device ignored\n"); 1103 return ATA_DEV_SEMB_UNSUP; /* not yet */ 1104 } 1105 1106 DPRINTK("unknown device\n"); 1107 return ATA_DEV_UNKNOWN; 1108} 1109 1110/** 1111 * ata_dev_try_classify - Parse returned ATA device signature 1112 * @dev: ATA device to classify (starting at zero) 1113 * @present: device seems present 1114 * @r_err: Value of error register on completion 1115 * 1116 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs, 1117 * an ATA/ATAPI-defined set of values is placed in the ATA 1118 * shadow registers, indicating the results of device detection 1119 * and diagnostics. 1120 * 1121 * Select the ATA device, and read the values from the ATA shadow 1122 * registers. Then parse according to the Error register value, 1123 * and the spec-defined values examined by ata_dev_classify(). 1124 * 1125 * LOCKING: 1126 * caller. 1127 * 1128 * RETURNS: 1129 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE. 1130 */ 1131unsigned int ata_dev_try_classify(struct ata_device *dev, int present, 1132 u8 *r_err) 1133{ 1134 struct ata_port *ap = dev->link->ap; 1135 struct ata_taskfile tf; 1136 unsigned int class; 1137 u8 err; 1138 1139 ap->ops->dev_select(ap, dev->devno); 1140 1141 memset(&tf, 0, sizeof(tf)); 1142 1143 ap->ops->tf_read(ap, &tf); 1144 err = tf.feature; 1145 if (r_err) 1146 *r_err = err; 1147 1148 /* see if device passed diags: continue and warn later */ 1149 if (err == 0) 1150 /* diagnostic fail : do nothing _YET_ */ 1151 dev->horkage |= ATA_HORKAGE_DIAGNOSTIC; 1152 else if (err == 1) 1153 /* do nothing */ ; 1154 else if ((dev->devno == 0) && (err == 0x81)) 1155 /* do nothing */ ; 1156 else 1157 return ATA_DEV_NONE; 1158 1159 /* determine if device is ATA or ATAPI */ 1160 class = ata_dev_classify(&tf); 1161 1162 if (class == ATA_DEV_UNKNOWN) { 1163 /* If the device failed diagnostic, it's likely to 1164 * have reported incorrect device signature too. 1165 * Assume ATA device if the device seems present but 1166 * device signature is invalid with diagnostic 1167 * failure. 1168 */ 1169 if (present && (dev->horkage & ATA_HORKAGE_DIAGNOSTIC)) 1170 class = ATA_DEV_ATA; 1171 else 1172 class = ATA_DEV_NONE; 1173 } else if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0)) 1174 class = ATA_DEV_NONE; 1175 1176 return class; 1177} 1178 1179/** 1180 * ata_id_string - Convert IDENTIFY DEVICE page into string 1181 * @id: IDENTIFY DEVICE results we will examine 1182 * @s: string into which data is output 1183 * @ofs: offset into identify device page 1184 * @len: length of string to return. must be an even number. 1185 * 1186 * The strings in the IDENTIFY DEVICE page are broken up into 1187 * 16-bit chunks. Run through the string, and output each 1188 * 8-bit chunk linearly, regardless of platform. 1189 * 1190 * LOCKING: 1191 * caller. 1192 */ 1193 1194void ata_id_string(const u16 *id, unsigned char *s, 1195 unsigned int ofs, unsigned int len) 1196{ 1197 unsigned int c; 1198 1199 while (len > 0) { 1200 c = id[ofs] >> 8; 1201 *s = c; 1202 s++; 1203 1204 c = id[ofs] & 0xff; 1205 *s = c; 1206 s++; 1207 1208 ofs++; 1209 len -= 2; 1210 } 1211} 1212 1213/** 1214 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string 1215 * @id: IDENTIFY DEVICE results we will examine 1216 * @s: string into which data is output 1217 * @ofs: offset into identify device page 1218 * @len: length of string to return. must be an odd number. 1219 * 1220 * This function is identical to ata_id_string except that it 1221 * trims trailing spaces and terminates the resulting string with 1222 * null. @len must be actual maximum length (even number) + 1. 1223 * 1224 * LOCKING: 1225 * caller. 1226 */ 1227void ata_id_c_string(const u16 *id, unsigned char *s, 1228 unsigned int ofs, unsigned int len) 1229{ 1230 unsigned char *p; 1231 1232 WARN_ON(!(len & 1)); 1233 1234 ata_id_string(id, s, ofs, len - 1); 1235 1236 p = s + strnlen(s, len - 1); 1237 while (p > s && p[-1] == ' ') 1238 p--; 1239 *p = '\0'; 1240} 1241 1242static u64 ata_id_n_sectors(const u16 *id) 1243{ 1244 if (ata_id_has_lba(id)) { 1245 if (ata_id_has_lba48(id)) 1246 return ata_id_u64(id, 100); 1247 else 1248 return ata_id_u32(id, 60); 1249 } else { 1250 if (ata_id_current_chs_valid(id)) 1251 return ata_id_u32(id, 57); 1252 else 1253 return id[1] * id[3] * id[6]; 1254 } 1255} 1256 1257static u64 ata_tf_to_lba48(struct ata_taskfile *tf) 1258{ 1259 u64 sectors = 0; 1260 1261 sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40; 1262 sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32; 1263 sectors |= (tf->hob_lbal & 0xff) << 24; 1264 sectors |= (tf->lbah & 0xff) << 16; 1265 sectors |= (tf->lbam & 0xff) << 8; 1266 sectors |= (tf->lbal & 0xff); 1267 1268 return ++sectors; 1269} 1270 1271static u64 ata_tf_to_lba(struct ata_taskfile *tf) 1272{ 1273 u64 sectors = 0; 1274 1275 sectors |= (tf->device & 0x0f) << 24; 1276 sectors |= (tf->lbah & 0xff) << 16; 1277 sectors |= (tf->lbam & 0xff) << 8; 1278 sectors |= (tf->lbal & 0xff); 1279 1280 return ++sectors; 1281} 1282 1283/** 1284 * ata_read_native_max_address - Read native max address 1285 * @dev: target device 1286 * @max_sectors: out parameter for the result native max address 1287 * 1288 * Perform an LBA48 or LBA28 native size query upon the device in 1289 * question. 1290 * 1291 * RETURNS: 1292 * 0 on success, -EACCES if command is aborted by the drive. 1293 * -EIO on other errors. 1294 */ 1295static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors) 1296{ 1297 unsigned int err_mask; 1298 struct ata_taskfile tf; 1299 int lba48 = ata_id_has_lba48(dev->id); 1300 1301 ata_tf_init(dev, &tf); 1302 1303 /* always clear all address registers */ 1304 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR; 1305 1306 if (lba48) { 1307 tf.command = ATA_CMD_READ_NATIVE_MAX_EXT; 1308 tf.flags |= ATA_TFLAG_LBA48; 1309 } else 1310 tf.command = ATA_CMD_READ_NATIVE_MAX; 1311 1312 tf.protocol |= ATA_PROT_NODATA; 1313 tf.device |= ATA_LBA; 1314 1315 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); 1316 if (err_mask) { 1317 ata_dev_printk(dev, KERN_WARNING, "failed to read native " 1318 "max address (err_mask=0x%x)\n", err_mask); 1319 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED)) 1320 return -EACCES; 1321 return -EIO; 1322 } 1323 1324 if (lba48) 1325 *max_sectors = ata_tf_to_lba48(&tf); 1326 else 1327 *max_sectors = ata_tf_to_lba(&tf); 1328 if (dev->horkage & ATA_HORKAGE_HPA_SIZE) 1329 (*max_sectors)--; 1330 return 0; 1331} 1332 1333/** 1334 * ata_set_max_sectors - Set max sectors 1335 * @dev: target device 1336 * @new_sectors: new max sectors value to set for the device 1337 * 1338 * Set max sectors of @dev to @new_sectors. 1339 * 1340 * RETURNS: 1341 * 0 on success, -EACCES if command is aborted or denied (due to 1342 * previous non-volatile SET_MAX) by the drive. -EIO on other 1343 * errors. 1344 */ 1345static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors) 1346{ 1347 unsigned int err_mask; 1348 struct ata_taskfile tf; 1349 int lba48 = ata_id_has_lba48(dev->id); 1350 1351 new_sectors--; 1352 1353 ata_tf_init(dev, &tf); 1354 1355 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR; 1356 1357 if (lba48) { 1358 tf.command = ATA_CMD_SET_MAX_EXT; 1359 tf.flags |= ATA_TFLAG_LBA48; 1360 1361 tf.hob_lbal = (new_sectors >> 24) & 0xff; 1362 tf.hob_lbam = (new_sectors >> 32) & 0xff; 1363 tf.hob_lbah = (new_sectors >> 40) & 0xff; 1364 } else { 1365 tf.command = ATA_CMD_SET_MAX; 1366 1367 tf.device |= (new_sectors >> 24) & 0xf; 1368 } 1369 1370 tf.protocol |= ATA_PROT_NODATA; 1371 tf.device |= ATA_LBA; 1372 1373 tf.lbal = (new_sectors >> 0) & 0xff; 1374 tf.lbam = (new_sectors >> 8) & 0xff; 1375 tf.lbah = (new_sectors >> 16) & 0xff; 1376 1377 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); 1378 if (err_mask) { 1379 ata_dev_printk(dev, KERN_WARNING, "failed to set " 1380 "max address (err_mask=0x%x)\n", err_mask); 1381 if (err_mask == AC_ERR_DEV && 1382 (tf.feature & (ATA_ABORTED | ATA_IDNF))) 1383 return -EACCES; 1384 return -EIO; 1385 } 1386 1387 return 0; 1388} 1389 1390/** 1391 * ata_hpa_resize - Resize a device with an HPA set 1392 * @dev: Device to resize 1393 * 1394 * Read the size of an LBA28 or LBA48 disk with HPA features and resize 1395 * it if required to the full size of the media. The caller must check 1396 * the drive has the HPA feature set enabled. 1397 * 1398 * RETURNS: 1399 * 0 on success, -errno on failure. 1400 */ 1401static int ata_hpa_resize(struct ata_device *dev) 1402{ 1403 struct ata_eh_context *ehc = &dev->link->eh_context; 1404 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO; 1405 u64 sectors = ata_id_n_sectors(dev->id); 1406 u64 native_sectors; 1407 int rc; 1408 1409 /* do we need to do it? */ 1410 if (dev->class != ATA_DEV_ATA || 1411 !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) || 1412 (dev->horkage & ATA_HORKAGE_BROKEN_HPA)) 1413 return 0; 1414 1415 /* read native max address */ 1416 rc = ata_read_native_max_address(dev, &native_sectors); 1417 if (rc) { 1418 /* If HPA isn't going to be unlocked, skip HPA 1419 * resizing from the next try. 1420 */ 1421 if (!ata_ignore_hpa) { 1422 ata_dev_printk(dev, KERN_WARNING, "HPA support seems " 1423 "broken, will skip HPA handling\n"); 1424 dev->horkage |= ATA_HORKAGE_BROKEN_HPA; 1425 1426 /* we can continue if device aborted the command */ 1427 if (rc == -EACCES) 1428 rc = 0; 1429 } 1430 1431 return rc; 1432 } 1433 1434 /* nothing to do? */ 1435 if (native_sectors <= sectors || !ata_ignore_hpa) { 1436 if (!print_info || native_sectors == sectors) 1437 return 0; 1438 1439 if (native_sectors > sectors) 1440 ata_dev_printk(dev, KERN_INFO, 1441 "HPA detected: current %llu, native %llu\n", 1442 (unsigned long long)sectors, 1443 (unsigned long long)native_sectors); 1444 else if (native_sectors < sectors) 1445 ata_dev_printk(dev, KERN_WARNING, 1446 "native sectors (%llu) is smaller than " 1447 "sectors (%llu)\n", 1448 (unsigned long long)native_sectors, 1449 (unsigned long long)sectors); 1450 return 0; 1451 } 1452 1453 /* let's unlock HPA */ 1454 rc = ata_set_max_sectors(dev, native_sectors); 1455 if (rc == -EACCES) { 1456 /* if device aborted the command, skip HPA resizing */ 1457 ata_dev_printk(dev, KERN_WARNING, "device aborted resize " 1458 "(%llu -> %llu), skipping HPA handling\n", 1459 (unsigned long long)sectors, 1460 (unsigned long long)native_sectors); 1461 dev->horkage |= ATA_HORKAGE_BROKEN_HPA; 1462 return 0; 1463 } else if (rc) 1464 return rc; 1465 1466 /* re-read IDENTIFY data */ 1467 rc = ata_dev_reread_id(dev, 0); 1468 if (rc) { 1469 ata_dev_printk(dev, KERN_ERR, "failed to re-read IDENTIFY " 1470 "data after HPA resizing\n"); 1471 return rc; 1472 } 1473 1474 if (print_info) { 1475 u64 new_sectors = ata_id_n_sectors(dev->id); 1476 ata_dev_printk(dev, KERN_INFO, 1477 "HPA unlocked: %llu -> %llu, native %llu\n", 1478 (unsigned long long)sectors, 1479 (unsigned long long)new_sectors, 1480 (unsigned long long)native_sectors); 1481 } 1482 1483 return 0; 1484} 1485 1486/** 1487 * ata_noop_dev_select - Select device 0/1 on ATA bus 1488 * @ap: ATA channel to manipulate 1489 * @device: ATA device (numbered from zero) to select 1490 * 1491 * This function performs no actual function. 1492 * 1493 * May be used as the dev_select() entry in ata_port_operations. 1494 * 1495 * LOCKING: 1496 * caller. 1497 */ 1498void ata_noop_dev_select(struct ata_port *ap, unsigned int device) 1499{ 1500} 1501 1502 1503/** 1504 * ata_std_dev_select - Select device 0/1 on ATA bus 1505 * @ap: ATA channel to manipulate 1506 * @device: ATA device (numbered from zero) to select 1507 * 1508 * Use the method defined in the ATA specification to 1509 * make either device 0, or device 1, active on the 1510 * ATA channel. Works with both PIO and MMIO. 1511 * 1512 * May be used as the dev_select() entry in ata_port_operations. 1513 * 1514 * LOCKING: 1515 * caller. 1516 */ 1517 1518void ata_std_dev_select(struct ata_port *ap, unsigned int device) 1519{ 1520 u8 tmp; 1521 1522 if (device == 0) 1523 tmp = ATA_DEVICE_OBS; 1524 else 1525 tmp = ATA_DEVICE_OBS | ATA_DEV1; 1526 1527 iowrite8(tmp, ap->ioaddr.device_addr); 1528 ata_pause(ap); /* needed; also flushes, for mmio */ 1529} 1530 1531/** 1532 * ata_dev_select - Select device 0/1 on ATA bus 1533 * @ap: ATA channel to manipulate 1534 * @device: ATA device (numbered from zero) to select 1535 * @wait: non-zero to wait for Status register BSY bit to clear 1536 * @can_sleep: non-zero if context allows sleeping 1537 * 1538 * Use the method defined in the ATA specification to 1539 * make either device 0, or device 1, active on the 1540 * ATA channel. 1541 * 1542 * This is a high-level version of ata_std_dev_select(), 1543 * which additionally provides the services of inserting 1544 * the proper pauses and status polling, where needed. 1545 * 1546 * LOCKING: 1547 * caller. 1548 */ 1549 1550void ata_dev_select(struct ata_port *ap, unsigned int device, 1551 unsigned int wait, unsigned int can_sleep) 1552{ 1553 if (ata_msg_probe(ap)) 1554 ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, " 1555 "device %u, wait %u\n", device, wait); 1556 1557 if (wait) 1558 ata_wait_idle(ap); 1559 1560 ap->ops->dev_select(ap, device); 1561 1562 if (wait) { 1563 if (can_sleep && ap->link.device[device].class == ATA_DEV_ATAPI) 1564 msleep(150); 1565 ata_wait_idle(ap); 1566 } 1567} 1568 1569/** 1570 * ata_dump_id - IDENTIFY DEVICE info debugging output 1571 * @id: IDENTIFY DEVICE page to dump 1572 * 1573 * Dump selected 16-bit words from the given IDENTIFY DEVICE 1574 * page. 1575 * 1576 * LOCKING: 1577 * caller. 1578 */ 1579 1580static inline void ata_dump_id(const u16 *id) 1581{ 1582 DPRINTK("49==0x%04x " 1583 "53==0x%04x " 1584 "63==0x%04x " 1585 "64==0x%04x " 1586 "75==0x%04x \n", 1587 id[49], 1588 id[53], 1589 id[63], 1590 id[64], 1591 id[75]); 1592 DPRINTK("80==0x%04x " 1593 "81==0x%04x " 1594 "82==0x%04x " 1595 "83==0x%04x " 1596 "84==0x%04x \n", 1597 id[80], 1598 id[81], 1599 id[82], 1600 id[83], 1601 id[84]); 1602 DPRINTK("88==0x%04x " 1603 "93==0x%04x\n", 1604 id[88], 1605 id[93]); 1606} 1607 1608/** 1609 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data 1610 * @id: IDENTIFY data to compute xfer mask from 1611 * 1612 * Compute the xfermask for this device. This is not as trivial 1613 * as it seems if we must consider early devices correctly. 1614 * 1615 * FIXME: pre IDE drive timing (do we care ?). 1616 * 1617 * LOCKING: 1618 * None. 1619 * 1620 * RETURNS: 1621 * Computed xfermask 1622 */ 1623unsigned long ata_id_xfermask(const u16 *id) 1624{ 1625 unsigned long pio_mask, mwdma_mask, udma_mask; 1626 1627 /* Usual case. Word 53 indicates word 64 is valid */ 1628 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) { 1629 pio_mask = id[ATA_ID_PIO_MODES] & 0x03; 1630 pio_mask <<= 3; 1631 pio_mask |= 0x7; 1632 } else { 1633 /* If word 64 isn't valid then Word 51 high byte holds 1634 * the PIO timing number for the maximum. Turn it into 1635 * a mask. 1636 */ 1637 u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF; 1638 if (mode < 5) /* Valid PIO range */ 1639 pio_mask = (2 << mode) - 1; 1640 else 1641 pio_mask = 1; 1642 1643 /* But wait.. there's more. Design your standards by 1644 * committee and you too can get a free iordy field to 1645 * process. However its the speeds not the modes that 1646 * are supported... Note drivers using the timing API 1647 * will get this right anyway 1648 */ 1649 } 1650 1651 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07; 1652 1653 if (ata_id_is_cfa(id)) { 1654 /* 1655 * Process compact flash extended modes 1656 */ 1657 int pio = id[163] & 0x7; 1658 int dma = (id[163] >> 3) & 7; 1659 1660 if (pio) 1661 pio_mask |= (1 << 5); 1662 if (pio > 1) 1663 pio_mask |= (1 << 6); 1664 if (dma) 1665 mwdma_mask |= (1 << 3); 1666 if (dma > 1) 1667 mwdma_mask |= (1 << 4); 1668 } 1669 1670 udma_mask = 0; 1671 if (id[ATA_ID_FIELD_VALID] & (1 << 2)) 1672 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff; 1673 1674 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask); 1675} 1676 1677/** 1678 * ata_pio_queue_task - Queue port_task 1679 * @ap: The ata_port to queue port_task for 1680 * @fn: workqueue function to be scheduled 1681 * @data: data for @fn to use 1682 * @delay: delay time for workqueue function 1683 * 1684 * Schedule @fn(@data) for execution after @delay jiffies using 1685 * port_task. There is one port_task per port and it's the 1686 * user(low level driver)'s responsibility to make sure that only 1687 * one task is active at any given time. 1688 * 1689 * libata core layer takes care of synchronization between 1690 * port_task and EH. ata_pio_queue_task() may be ignored for EH 1691 * synchronization. 1692 * 1693 * LOCKING: 1694 * Inherited from caller. 1695 */ 1696static void ata_pio_queue_task(struct ata_port *ap, void *data, 1697 unsigned long delay) 1698{ 1699 ap->port_task_data = data; 1700 1701 /* may fail if ata_port_flush_task() in progress */ 1702 queue_delayed_work(ata_wq, &ap->port_task, delay); 1703} 1704 1705/** 1706 * ata_port_flush_task - Flush port_task 1707 * @ap: The ata_port to flush port_task for 1708 * 1709 * After this function completes, port_task is guranteed not to 1710 * be running or scheduled. 1711 * 1712 * LOCKING: 1713 * Kernel thread context (may sleep) 1714 */ 1715void ata_port_flush_task(struct ata_port *ap) 1716{ 1717 DPRINTK("ENTER\n"); 1718 1719 cancel_rearming_delayed_work(&ap->port_task); 1720 1721 if (ata_msg_ctl(ap)) 1722 ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __FUNCTION__); 1723} 1724 1725static void ata_qc_complete_internal(struct ata_queued_cmd *qc) 1726{ 1727 struct completion *waiting = qc->private_data; 1728 1729 complete(waiting); 1730} 1731 1732/** 1733 * ata_exec_internal_sg - execute libata internal command 1734 * @dev: Device to which the command is sent 1735 * @tf: Taskfile registers for the command and the result 1736 * @cdb: CDB for packet command 1737 * @dma_dir: Data tranfer direction of the command 1738 * @sgl: sg list for the data buffer of the command 1739 * @n_elem: Number of sg entries 1740 * @timeout: Timeout in msecs (0 for default) 1741 * 1742 * Executes libata internal command with timeout. @tf contains 1743 * command on entry and result on return. Timeout and error 1744 * conditions are reported via return value. No recovery action 1745 * is taken after a command times out. It's caller's duty to 1746 * clean up after timeout. 1747 * 1748 * LOCKING: 1749 * None. Should be called with kernel context, might sleep. 1750 * 1751 * RETURNS: 1752 * Zero on success, AC_ERR_* mask on failure 1753 */ 1754unsigned ata_exec_internal_sg(struct ata_device *dev, 1755 struct ata_taskfile *tf, const u8 *cdb, 1756 int dma_dir, struct scatterlist *sgl, 1757 unsigned int n_elem, unsigned long timeout) 1758{ 1759 struct ata_link *link = dev->link; 1760 struct ata_port *ap = link->ap; 1761 u8 command = tf->command; 1762 struct ata_queued_cmd *qc; 1763 unsigned int tag, preempted_tag; 1764 u32 preempted_sactive, preempted_qc_active; 1765 int preempted_nr_active_links; 1766 DECLARE_COMPLETION_ONSTACK(wait); 1767 unsigned long flags; 1768 unsigned int err_mask; 1769 int rc; 1770 1771 spin_lock_irqsave(ap->lock, flags); 1772 1773 /* no internal command while frozen */ 1774 if (ap->pflags & ATA_PFLAG_FROZEN) { 1775 spin_unlock_irqrestore(ap->lock, flags); 1776 return AC_ERR_SYSTEM; 1777 } 1778 1779 /* initialize internal qc */ 1780 1781 /* XXX: Tag 0 is used for drivers with legacy EH as some 1782 * drivers choke if any other tag is given. This breaks 1783 * ata_tag_internal() test for those drivers. Don't use new 1784 * EH stuff without converting to it. 1785 */ 1786 if (ap->ops->error_handler) 1787 tag = ATA_TAG_INTERNAL; 1788 else 1789 tag = 0; 1790 1791 if (test_and_set_bit(tag, &ap->qc_allocated)) 1792 BUG(); 1793 qc = __ata_qc_from_tag(ap, tag); 1794 1795 qc->tag = tag; 1796 qc->scsicmd = NULL; 1797 qc->ap = ap; 1798 qc->dev = dev; 1799 ata_qc_reinit(qc); 1800 1801 preempted_tag = link->active_tag; 1802 preempted_sactive = link->sactive; 1803 preempted_qc_active = ap->qc_active; 1804 preempted_nr_active_links = ap->nr_active_links; 1805 link->active_tag = ATA_TAG_POISON; 1806 link->sactive = 0; 1807 ap->qc_active = 0; 1808 ap->nr_active_links = 0; 1809 1810 /* prepare & issue qc */ 1811 qc->tf = *tf; 1812 if (cdb) 1813 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN); 1814 qc->flags |= ATA_QCFLAG_RESULT_TF; 1815 qc->dma_dir = dma_dir; 1816 if (dma_dir != DMA_NONE) { 1817 unsigned int i, buflen = 0; 1818 struct scatterlist *sg; 1819 1820 for_each_sg(sgl, sg, n_elem, i) 1821 buflen += sg->length; 1822 1823 ata_sg_init(qc, sgl, n_elem); 1824 qc->nbytes = buflen; 1825 } 1826 1827 qc->private_data = &wait; 1828 qc->complete_fn = ata_qc_complete_internal; 1829 1830 ata_qc_issue(qc); 1831 1832 spin_unlock_irqrestore(ap->lock, flags); 1833 1834 if (!timeout) 1835 timeout = ata_probe_timeout * 1000 / HZ; 1836 1837 rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout)); 1838 1839 ata_port_flush_task(ap); 1840 1841 if (!rc) { 1842 spin_lock_irqsave(ap->lock, flags); 1843 1844 /* We're racing with irq here. If we lose, the 1845 * following test prevents us from completing the qc 1846 * twice. If we win, the port is frozen and will be 1847 * cleaned up by ->post_internal_cmd(). 1848 */ 1849 if (qc->flags & ATA_QCFLAG_ACTIVE) { 1850 qc->err_mask |= AC_ERR_TIMEOUT; 1851 1852 if (ap->ops->error_handler) 1853 ata_port_freeze(ap); 1854 else 1855 ata_qc_complete(qc); 1856 1857 if (ata_msg_warn(ap)) 1858 ata_dev_printk(dev, KERN_WARNING, 1859 "qc timeout (cmd 0x%x)\n", command); 1860 } 1861 1862 spin_unlock_irqrestore(ap->lock, flags); 1863 } 1864 1865 /* do post_internal_cmd */ 1866 if (ap->ops->post_internal_cmd) 1867 ap->ops->post_internal_cmd(qc); 1868 1869 /* perform minimal error analysis */ 1870 if (qc->flags & ATA_QCFLAG_FAILED) { 1871 if (qc->result_tf.command & (ATA_ERR | ATA_DF)) 1872 qc->err_mask |= AC_ERR_DEV; 1873 1874 if (!qc->err_mask) 1875 qc->err_mask |= AC_ERR_OTHER; 1876 1877 if (qc->err_mask & ~AC_ERR_OTHER) 1878 qc->err_mask &= ~AC_ERR_OTHER; 1879 } 1880 1881 /* finish up */ 1882 spin_lock_irqsave(ap->lock, flags); 1883 1884 *tf = qc->result_tf; 1885 err_mask = qc->err_mask; 1886 1887 ata_qc_free(qc); 1888 link->active_tag = preempted_tag; 1889 link->sactive = preempted_sactive; 1890 ap->qc_active = preempted_qc_active; 1891 ap->nr_active_links = preempted_nr_active_links; 1892 1893 /* XXX - Some LLDDs (sata_mv) disable port on command failure. 1894 * Until those drivers are fixed, we detect the condition 1895 * here, fail the command with AC_ERR_SYSTEM and reenable the 1896 * port. 1897 * 1898 * Note that this doesn't change any behavior as internal 1899 * command failure results in disabling the device in the 1900 * higher layer for LLDDs without new reset/EH callbacks. 1901 * 1902 * Kill the following code as soon as those drivers are fixed. 1903 */ 1904 if (ap->flags & ATA_FLAG_DISABLED) { 1905 err_mask |= AC_ERR_SYSTEM; 1906 ata_port_probe(ap); 1907 } 1908 1909 spin_unlock_irqrestore(ap->lock, flags); 1910 1911 return err_mask; 1912} 1913 1914/** 1915 * ata_exec_internal - execute libata internal command 1916 * @dev: Device to which the command is sent 1917 * @tf: Taskfile registers for the command and the result 1918 * @cdb: CDB for packet command 1919 * @dma_dir: Data tranfer direction of the command 1920 * @buf: Data buffer of the command 1921 * @buflen: Length of data buffer 1922 * @timeout: Timeout in msecs (0 for default) 1923 * 1924 * Wrapper around ata_exec_internal_sg() which takes simple 1925 * buffer instead of sg list. 1926 * 1927 * LOCKING: 1928 * None. Should be called with kernel context, might sleep. 1929 * 1930 * RETURNS: 1931 * Zero on success, AC_ERR_* mask on failure 1932 */ 1933unsigned ata_exec_internal(struct ata_device *dev, 1934 struct ata_taskfile *tf, const u8 *cdb, 1935 int dma_dir, void *buf, unsigned int buflen, 1936 unsigned long timeout) 1937{ 1938 struct scatterlist *psg = NULL, sg; 1939 unsigned int n_elem = 0; 1940 1941 if (dma_dir != DMA_NONE) { 1942 WARN_ON(!buf); 1943 sg_init_one(&sg, buf, buflen); 1944 psg = &sg; 1945 n_elem++; 1946 } 1947 1948 return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem, 1949 timeout); 1950} 1951 1952/** 1953 * ata_do_simple_cmd - execute simple internal command 1954 * @dev: Device to which the command is sent 1955 * @cmd: Opcode to execute 1956 * 1957 * Execute a 'simple' command, that only consists of the opcode 1958 * 'cmd' itself, without filling any other registers 1959 * 1960 * LOCKING: 1961 * Kernel thread context (may sleep). 1962 * 1963 * RETURNS: 1964 * Zero on success, AC_ERR_* mask on failure 1965 */ 1966unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd) 1967{ 1968 struct ata_taskfile tf; 1969 1970 ata_tf_init(dev, &tf); 1971 1972 tf.command = cmd; 1973 tf.flags |= ATA_TFLAG_DEVICE; 1974 tf.protocol = ATA_PROT_NODATA; 1975 1976 return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); 1977} 1978 1979/** 1980 * ata_pio_need_iordy - check if iordy needed 1981 * @adev: ATA device 1982 * 1983 * Check if the current speed of the device requires IORDY. Used 1984 * by various controllers for chip configuration. 1985 */ 1986 1987unsigned int ata_pio_need_iordy(const struct ata_device *adev) 1988{ 1989 /* Controller doesn't support IORDY. Probably a pointless check 1990 as the caller should know this */ 1991 if (adev->link->ap->flags & ATA_FLAG_NO_IORDY) 1992 return 0; 1993 /* PIO3 and higher it is mandatory */ 1994 if (adev->pio_mode > XFER_PIO_2) 1995 return 1; 1996 /* We turn it on when possible */ 1997 if (ata_id_has_iordy(adev->id)) 1998 return 1; 1999 return 0; 2000} 2001 2002/** 2003 * ata_pio_mask_no_iordy - Return the non IORDY mask 2004 * @adev: ATA device 2005 * 2006 * Compute the highest mode possible if we are not using iordy. Return 2007 * -1 if no iordy mode is available. 2008 */ 2009 2010static u32 ata_pio_mask_no_iordy(const struct ata_device *adev) 2011{ 2012 /* If we have no drive specific rule, then PIO 2 is non IORDY */ 2013 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */ 2014 u16 pio = adev->id[ATA_ID_EIDE_PIO]; 2015 /* Is the speed faster than the drive allows non IORDY ? */ 2016 if (pio) { 2017 /* This is cycle times not frequency - watch the logic! */ 2018 if (pio > 240) /* PIO2 is 240nS per cycle */ 2019 return 3 << ATA_SHIFT_PIO; 2020 return 7 << ATA_SHIFT_PIO; 2021 } 2022 } 2023 return 3 << ATA_SHIFT_PIO; 2024} 2025 2026/** 2027 * ata_dev_read_id - Read ID data from the specified device 2028 * @dev: target device 2029 * @p_class: pointer to class of the target device (may be changed) 2030 * @flags: ATA_READID_* flags 2031 * @id: buffer to read IDENTIFY data into 2032 * 2033 * Read ID data from the specified device. ATA_CMD_ID_ATA is 2034 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI 2035 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS 2036 * for pre-ATA4 drives. 2037 * 2038 * FIXME: ATA_CMD_ID_ATA is optional for early drives and right 2039 * now we abort if we hit that case. 2040 * 2041 * LOCKING: 2042 * Kernel thread context (may sleep) 2043 * 2044 * RETURNS: 2045 * 0 on success, -errno otherwise. 2046 */ 2047int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class, 2048 unsigned int flags, u16 *id) 2049{ 2050 struct ata_port *ap = dev->link->ap; 2051 unsigned int class = *p_class; 2052 struct ata_taskfile tf; 2053 unsigned int err_mask = 0; 2054 const char *reason; 2055 int may_fallback = 1, tried_spinup = 0; 2056 int rc; 2057 2058 if (ata_msg_ctl(ap)) 2059 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__); 2060 2061 ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */ 2062 retry: 2063 ata_tf_init(dev, &tf); 2064 2065 switch (class) { 2066 case ATA_DEV_ATA: 2067 tf.command = ATA_CMD_ID_ATA; 2068 break; 2069 case ATA_DEV_ATAPI: 2070 tf.command = ATA_CMD_ID_ATAPI; 2071 break; 2072 default: 2073 rc = -ENODEV; 2074 reason = "unsupported class"; 2075 goto err_out; 2076 } 2077 2078 tf.protocol = ATA_PROT_PIO; 2079 2080 /* Some devices choke if TF registers contain garbage. Make 2081 * sure those are properly initialized. 2082 */ 2083 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 2084 2085 /* Device presence detection is unreliable on some 2086 * controllers. Always poll IDENTIFY if available. 2087 */ 2088 tf.flags |= ATA_TFLAG_POLLING; 2089 2090 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE, 2091 id, sizeof(id[0]) * ATA_ID_WORDS, 0); 2092 if (err_mask) { 2093 if (err_mask & AC_ERR_NODEV_HINT) { 2094 DPRINTK("ata%u.%d: NODEV after polling detection\n", 2095 ap->print_id, dev->devno); 2096 return -ENOENT; 2097 } 2098 2099 /* Device or controller might have reported the wrong 2100 * device class. Give a shot at the other IDENTIFY if 2101 * the current one is aborted by the device. 2102 */ 2103 if (may_fallback && 2104 (err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) { 2105 may_fallback = 0; 2106 2107 if (class == ATA_DEV_ATA) 2108 class = ATA_DEV_ATAPI; 2109 else 2110 class = ATA_DEV_ATA; 2111 goto retry; 2112 } 2113 2114 rc = -EIO; 2115 reason = "I/O error"; 2116 goto err_out; 2117 } 2118 2119 /* Falling back doesn't make sense if ID data was read 2120 * successfully at least once. 2121 */ 2122 may_fallback = 0; 2123 2124 swap_buf_le16(id, ATA_ID_WORDS); 2125 2126 /* sanity check */ 2127 rc = -EINVAL; 2128 reason = "device reports invalid type"; 2129 2130 if (class == ATA_DEV_ATA) { 2131 if (!ata_id_is_ata(id) && !ata_id_is_cfa(id)) 2132 goto err_out; 2133 } else { 2134 if (ata_id_is_ata(id)) 2135 goto err_out; 2136 } 2137 2138 if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) { 2139 tried_spinup = 1; 2140 /* 2141 * Drive powered-up in standby mode, and requires a specific 2142 * SET_FEATURES spin-up subcommand before it will accept 2143 * anything other than the original IDENTIFY command. 2144 */ 2145 err_mask = ata_dev_set_feature(dev, SETFEATURES_SPINUP, 0); 2146 if (err_mask && id[2] != 0x738c) { 2147 rc = -EIO; 2148 reason = "SPINUP failed"; 2149 goto err_out; 2150 } 2151 /* 2152 * If the drive initially returned incomplete IDENTIFY info, 2153 * we now must reissue the IDENTIFY command. 2154 */ 2155 if (id[2] == 0x37c8) 2156 goto retry; 2157 } 2158 2159 if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) { 2160 /* 2161 * The exact sequence expected by certain pre-ATA4 drives is: 2162 * SRST RESET 2163 * IDENTIFY (optional in early ATA) 2164 * INITIALIZE DEVICE PARAMETERS (later IDE and ATA) 2165 * anything else.. 2166 * Some drives were very specific about that exact sequence. 2167 * 2168 * Note that ATA4 says lba is mandatory so the second check 2169 * shoud never trigger. 2170 */ 2171 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) { 2172 err_mask = ata_dev_init_params(dev, id[3], id[6]); 2173 if (err_mask) { 2174 rc = -EIO; 2175 reason = "INIT_DEV_PARAMS failed"; 2176 goto err_out; 2177 } 2178 2179 /* current CHS translation info (id[53-58]) might be 2180 * changed. reread the identify device info. 2181 */ 2182 flags &= ~ATA_READID_POSTRESET; 2183 goto retry; 2184 } 2185 } 2186 2187 *p_class = class; 2188 2189 return 0; 2190 2191 err_out: 2192 if (ata_msg_warn(ap)) 2193 ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY " 2194 "(%s, err_mask=0x%x)\n", reason, err_mask); 2195 return rc; 2196} 2197 2198static inline u8 ata_dev_knobble(struct ata_device *dev) 2199{ 2200 struct ata_port *ap = dev->link->ap; 2201 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id))); 2202} 2203 2204static void ata_dev_config_ncq(struct ata_device *dev, 2205 char *desc, size_t desc_sz) 2206{ 2207 struct ata_port *ap = dev->link->ap; 2208 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id); 2209 2210 if (!ata_id_has_ncq(dev->id)) { 2211 desc[0] = '\0'; 2212 return; 2213 } 2214 if (dev->horkage & ATA_HORKAGE_NONCQ) { 2215 snprintf(desc, desc_sz, "NCQ (not used)"); 2216 return; 2217 } 2218 if (ap->flags & ATA_FLAG_NCQ) { 2219 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1); 2220 dev->flags |= ATA_DFLAG_NCQ; 2221 } 2222 2223 if (hdepth >= ddepth) 2224 snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth); 2225 else 2226 snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth); 2227} 2228 2229/** 2230 * ata_dev_configure - Configure the specified ATA/ATAPI device 2231 * @dev: Target device to configure 2232 * 2233 * Configure @dev according to @dev->id. Generic and low-level 2234 * driver specific fixups are also applied. 2235 * 2236 * LOCKING: 2237 * Kernel thread context (may sleep) 2238 * 2239 * RETURNS: 2240 * 0 on success, -errno otherwise 2241 */ 2242int ata_dev_configure(struct ata_device *dev) 2243{ 2244 struct ata_port *ap = dev->link->ap; 2245 struct ata_eh_context *ehc = &dev->link->eh_context; 2246 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO; 2247 const u16 *id = dev->id; 2248 unsigned long xfer_mask; 2249 char revbuf[7]; /* XYZ-99\0 */ 2250 char fwrevbuf[ATA_ID_FW_REV_LEN+1]; 2251 char modelbuf[ATA_ID_PROD_LEN+1]; 2252 int rc; 2253 2254 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) { 2255 ata_dev_printk(dev, KERN_INFO, "%s: ENTER/EXIT -- nodev\n", 2256 __FUNCTION__); 2257 return 0; 2258 } 2259 2260 if (ata_msg_probe(ap)) 2261 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__); 2262 2263 /* set horkage */ 2264 dev->horkage |= ata_dev_blacklisted(dev); 2265 ata_force_horkage(dev); 2266 2267 /* let ACPI work its magic */ 2268 rc = ata_acpi_on_devcfg(dev); 2269 if (rc) 2270 return rc; 2271 2272 /* massage HPA, do it early as it might change IDENTIFY data */ 2273 rc = ata_hpa_resize(dev); 2274 if (rc) 2275 return rc; 2276 2277 /* print device capabilities */ 2278 if (ata_msg_probe(ap)) 2279 ata_dev_printk(dev, KERN_DEBUG, 2280 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x " 2281 "85:%04x 86:%04x 87:%04x 88:%04x\n", 2282 __FUNCTION__, 2283 id[49], id[82], id[83], id[84], 2284 id[85], id[86], id[87], id[88]); 2285 2286 /* initialize to-be-configured parameters */ 2287 dev->flags &= ~ATA_DFLAG_CFG_MASK; 2288 dev->max_sectors = 0; 2289 dev->cdb_len = 0; 2290 dev->n_sectors = 0; 2291 dev->cylinders = 0; 2292 dev->heads = 0; 2293 dev->sectors = 0; 2294 2295 /* 2296 * common ATA, ATAPI feature tests 2297 */ 2298 2299 /* find max transfer mode; for printk only */ 2300 xfer_mask = ata_id_xfermask(id); 2301 2302 if (ata_msg_probe(ap)) 2303 ata_dump_id(id); 2304 2305 /* SCSI only uses 4-char revisions, dump full 8 chars from ATA */ 2306 ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV, 2307 sizeof(fwrevbuf)); 2308 2309 ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD, 2310 sizeof(modelbuf)); 2311 2312 /* ATA-specific feature tests */ 2313 if (dev->class == ATA_DEV_ATA) { 2314 if (ata_id_is_cfa(id)) { 2315 if (id[162] & 1) /* CPRM may make this media unusable */ 2316 ata_dev_printk(dev, KERN_WARNING, 2317 "supports DRM functions and may " 2318 "not be fully accessable.\n"); 2319 snprintf(revbuf, 7, "CFA"); 2320 } else { 2321 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id)); 2322 /* Warn the user if the device has TPM extensions */ 2323 if (ata_id_has_tpm(id)) 2324 ata_dev_printk(dev, KERN_WARNING, 2325 "supports DRM functions and may " 2326 "not be fully accessable.\n"); 2327 } 2328 2329 dev->n_sectors = ata_id_n_sectors(id); 2330 2331 if (dev->id[59] & 0x100) 2332 dev->multi_count = dev->id[59] & 0xff; 2333 2334 if (ata_id_has_lba(id)) { 2335 const char *lba_desc; 2336 char ncq_desc[20]; 2337 2338 lba_desc = "LBA"; 2339 dev->flags |= ATA_DFLAG_LBA; 2340 if (ata_id_has_lba48(id)) { 2341 dev->flags |= ATA_DFLAG_LBA48; 2342 lba_desc = "LBA48"; 2343 2344 if (dev->n_sectors >= (1UL << 28) && 2345 ata_id_has_flush_ext(id)) 2346 dev->flags |= ATA_DFLAG_FLUSH_EXT; 2347 } 2348 2349 /* config NCQ */ 2350 ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc)); 2351 2352 /* print device info to dmesg */ 2353 if (ata_msg_drv(ap) && print_info) { 2354 ata_dev_printk(dev, KERN_INFO, 2355 "%s: %s, %s, max %s\n", 2356 revbuf, modelbuf, fwrevbuf, 2357 ata_mode_string(xfer_mask)); 2358 ata_dev_printk(dev, KERN_INFO, 2359 "%Lu sectors, multi %u: %s %s\n", 2360 (unsigned long long)dev->n_sectors, 2361 dev->multi_count, lba_desc, ncq_desc); 2362 } 2363 } else { 2364 /* CHS */ 2365 2366 /* Default translation */ 2367 dev->cylinders = id[1]; 2368 dev->heads = id[3]; 2369 dev->sectors = id[6]; 2370 2371 if (ata_id_current_chs_valid(id)) { 2372 /* Current CHS translation is valid. */ 2373 dev->cylinders = id[54]; 2374 dev->heads = id[55]; 2375 dev->sectors = id[56]; 2376 } 2377 2378 /* print device info to dmesg */ 2379 if (ata_msg_drv(ap) && print_info) { 2380 ata_dev_printk(dev, KERN_INFO, 2381 "%s: %s, %s, max %s\n", 2382 revbuf, modelbuf, fwrevbuf, 2383 ata_mode_string(xfer_mask)); 2384 ata_dev_printk(dev, KERN_INFO, 2385 "%Lu sectors, multi %u, CHS %u/%u/%u\n", 2386 (unsigned long long)dev->n_sectors, 2387 dev->multi_count, dev->cylinders, 2388 dev->heads, dev->sectors); 2389 } 2390 } 2391 2392 dev->cdb_len = 16; 2393 } 2394 2395 /* ATAPI-specific feature tests */ 2396 else if (dev->class == ATA_DEV_ATAPI) { 2397 const char *cdb_intr_string = ""; 2398 const char *atapi_an_string = ""; 2399 const char *dma_dir_string = ""; 2400 u32 sntf; 2401 2402 rc = atapi_cdb_len(id); 2403 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) { 2404 if (ata_msg_warn(ap)) 2405 ata_dev_printk(dev, KERN_WARNING, 2406 "unsupported CDB len\n"); 2407 rc = -EINVAL; 2408 goto err_out_nosup; 2409 } 2410 dev->cdb_len = (unsigned int) rc; 2411 2412 /* Enable ATAPI AN if both the host and device have 2413 * the support. If PMP is attached, SNTF is required 2414 * to enable ATAPI AN to discern between PHY status 2415 * changed notifications and ATAPI ANs. 2416 */ 2417 if ((ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) && 2418 (!ap->nr_pmp_links || 2419 sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) { 2420 unsigned int err_mask; 2421 2422 /* issue SET feature command to turn this on */ 2423 err_mask = ata_dev_set_feature(dev, 2424 SETFEATURES_SATA_ENABLE, SATA_AN); 2425 if (err_mask) 2426 ata_dev_printk(dev, KERN_ERR, 2427 "failed to enable ATAPI AN " 2428 "(err_mask=0x%x)\n", err_mask); 2429 else { 2430 dev->flags |= ATA_DFLAG_AN; 2431 atapi_an_string = ", ATAPI AN"; 2432 } 2433 } 2434 2435 if (ata_id_cdb_intr(dev->id)) { 2436 dev->flags |= ATA_DFLAG_CDB_INTR; 2437 cdb_intr_string = ", CDB intr"; 2438 } 2439 2440 if (atapi_dmadir || atapi_id_dmadir(dev->id)) { 2441 dev->flags |= ATA_DFLAG_DMADIR; 2442 dma_dir_string = ", DMADIR"; 2443 } 2444 2445 /* print device info to dmesg */ 2446 if (ata_msg_drv(ap) && print_info) 2447 ata_dev_printk(dev, KERN_INFO, 2448 "ATAPI: %s, %s, max %s%s%s%s\n", 2449 modelbuf, fwrevbuf, 2450 ata_mode_string(xfer_mask), 2451 cdb_intr_string, atapi_an_string, 2452 dma_dir_string); 2453 } 2454 2455 /* determine max_sectors */ 2456 dev->max_sectors = ATA_MAX_SECTORS; 2457 if (dev->flags & ATA_DFLAG_LBA48) 2458 dev->max_sectors = ATA_MAX_SECTORS_LBA48; 2459 2460 if (!(dev->horkage & ATA_HORKAGE_IPM)) { 2461 if (ata_id_has_hipm(dev->id)) 2462 dev->flags |= ATA_DFLAG_HIPM; 2463 if (ata_id_has_dipm(dev->id)) 2464 dev->flags |= ATA_DFLAG_DIPM; 2465 } 2466 2467 /* Limit PATA drive on SATA cable bridge transfers to udma5, 2468 200 sectors */ 2469 if (ata_dev_knobble(dev)) { 2470 if (ata_msg_drv(ap) && print_info) 2471 ata_dev_printk(dev, KERN_INFO, 2472 "applying bridge limits\n"); 2473 dev->udma_mask &= ATA_UDMA5; 2474 dev->max_sectors = ATA_MAX_SECTORS; 2475 } 2476 2477 if ((dev->class == ATA_DEV_ATAPI) && 2478 (atapi_command_packet_set(id) == TYPE_TAPE)) { 2479 dev->max_sectors = ATA_MAX_SECTORS_TAPE; 2480 dev->horkage |= ATA_HORKAGE_STUCK_ERR; 2481 } 2482 2483 if (dev->horkage & ATA_HORKAGE_MAX_SEC_128) 2484 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128, 2485 dev->max_sectors); 2486 2487 if (ata_dev_blacklisted(dev) & ATA_HORKAGE_IPM) { 2488 dev->horkage |= ATA_HORKAGE_IPM; 2489 2490 /* reset link pm_policy for this port to no pm */ 2491 ap->pm_policy = MAX_PERFORMANCE; 2492 } 2493 2494 if (ap->ops->dev_config) 2495 ap->ops->dev_config(dev); 2496 2497 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) { 2498 /* Let the user know. We don't want to disallow opens for 2499 rescue purposes, or in case the vendor is just a blithering 2500 idiot. Do this after the dev_config call as some controllers 2501 with buggy firmware may want to avoid reporting false device 2502 bugs */ 2503 2504 if (print_info) { 2505 ata_dev_printk(dev, KERN_WARNING, 2506"Drive reports diagnostics failure. This may indicate a drive\n"); 2507 ata_dev_printk(dev, KERN_WARNING, 2508"fault or invalid emulation. Contact drive vendor for information.\n"); 2509 } 2510 } 2511 2512 if (ata_msg_probe(ap)) 2513 ata_dev_printk(dev, KERN_DEBUG, "%s: EXIT, drv_stat = 0x%x\n", 2514 __FUNCTION__, ata_chk_status(ap)); 2515 return 0; 2516 2517err_out_nosup: 2518 if (ata_msg_probe(ap)) 2519 ata_dev_printk(dev, KERN_DEBUG, 2520 "%s: EXIT, err\n", __FUNCTION__); 2521 return rc; 2522} 2523 2524/** 2525 * ata_cable_40wire - return 40 wire cable type 2526 * @ap: port 2527 * 2528 * Helper method for drivers which want to hardwire 40 wire cable 2529 * detection. 2530 */ 2531 2532int ata_cable_40wire(struct ata_port *ap) 2533{ 2534 return ATA_CBL_PATA40; 2535} 2536 2537/** 2538 * ata_cable_80wire - return 80 wire cable type 2539 * @ap: port 2540 * 2541 * Helper method for drivers which want to hardwire 80 wire cable 2542 * detection. 2543 */ 2544 2545int ata_cable_80wire(struct ata_port *ap) 2546{ 2547 return ATA_CBL_PATA80; 2548} 2549 2550/** 2551 * ata_cable_unknown - return unknown PATA cable. 2552 * @ap: port 2553 * 2554 * Helper method for drivers which have no PATA cable detection. 2555 */ 2556 2557int ata_cable_unknown(struct ata_port *ap) 2558{ 2559 return ATA_CBL_PATA_UNK; 2560} 2561 2562/** 2563 * ata_cable_ignore - return ignored PATA cable. 2564 * @ap: port 2565 * 2566 * Helper method for drivers which don't use cable type to limit 2567 * transfer mode. 2568 */ 2569int ata_cable_ignore(struct ata_port *ap) 2570{ 2571 return ATA_CBL_PATA_IGN; 2572} 2573 2574/** 2575 * ata_cable_sata - return SATA cable type 2576 * @ap: port 2577 * 2578 * Helper method for drivers which have SATA cables 2579 */ 2580 2581int ata_cable_sata(struct ata_port *ap) 2582{ 2583 return ATA_CBL_SATA; 2584} 2585 2586/** 2587 * ata_bus_probe - Reset and probe ATA bus 2588 * @ap: Bus to probe 2589 * 2590 * Master ATA bus probing function. Initiates a hardware-dependent 2591 * bus reset, then attempts to identify any devices found on 2592 * the bus. 2593 * 2594 * LOCKING: 2595 * PCI/etc. bus probe sem. 2596 * 2597 * RETURNS: 2598 * Zero on success, negative errno otherwise. 2599 */ 2600 2601int ata_bus_probe(struct ata_port *ap) 2602{ 2603 unsigned int classes[ATA_MAX_DEVICES]; 2604 int tries[ATA_MAX_DEVICES]; 2605 int rc; 2606 struct ata_device *dev; 2607 2608 ata_port_probe(ap); 2609 2610 ata_link_for_each_dev(dev, &ap->link) 2611 tries[dev->devno] = ATA_PROBE_MAX_TRIES; 2612 2613 retry: 2614 ata_link_for_each_dev(dev, &ap->link) { 2615 /* If we issue an SRST then an ATA drive (not ATAPI) 2616 * may change configuration and be in PIO0 timing. If 2617 * we do a hard reset (or are coming from power on) 2618 * this is true for ATA or ATAPI. Until we've set a 2619 * suitable controller mode we should not touch the 2620 * bus as we may be talking too fast. 2621 */ 2622 dev->pio_mode = XFER_PIO_0; 2623 2624 /* If the controller has a pio mode setup function 2625 * then use it to set the chipset to rights. Don't 2626 * touch the DMA setup as that will be dealt with when 2627 * configuring devices. 2628 */ 2629 if (ap->ops->set_piomode) 2630 ap->ops->set_piomode(ap, dev); 2631 } 2632 2633 /* reset and determine device classes */ 2634 ap->ops->phy_reset(ap); 2635 2636 ata_link_for_each_dev(dev, &ap->link) { 2637 if (!(ap->flags & ATA_FLAG_DISABLED) && 2638 dev->class != ATA_DEV_UNKNOWN) 2639 classes[dev->devno] = dev->class; 2640 else 2641 classes[dev->devno] = ATA_DEV_NONE; 2642 2643 dev->class = ATA_DEV_UNKNOWN; 2644 } 2645 2646 ata_port_probe(ap); 2647 2648 /* read IDENTIFY page and configure devices. We have to do the identify 2649 specific sequence bass-ackwards so that PDIAG- is released by 2650 the slave device */ 2651 2652 ata_link_for_each_dev(dev, &ap->link) { 2653 if (tries[dev->devno]) 2654 dev->class = classes[dev->devno]; 2655 2656 if (!ata_dev_enabled(dev)) 2657 continue; 2658 2659 rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET, 2660 dev->id); 2661 if (rc) 2662 goto fail; 2663 } 2664 2665 /* Now ask for the cable type as PDIAG- should have been released */ 2666 if (ap->ops->cable_detect) 2667 ap->cbl = ap->ops->cable_detect(ap); 2668 2669 /* We may have SATA bridge glue hiding here irrespective of the 2670 reported cable types and sensed types */ 2671 ata_link_for_each_dev(dev, &ap->link) { 2672 if (!ata_dev_enabled(dev)) 2673 continue; 2674 /* SATA drives indicate we have a bridge. We don't know which 2675 end of the link the bridge is which is a problem */ 2676 if (ata_id_is_sata(dev->id)) 2677 ap->cbl = ATA_CBL_SATA; 2678 } 2679 2680 /* After the identify sequence we can now set up the devices. We do 2681 this in the normal order so that the user doesn't get confused */ 2682 2683 ata_link_for_each_dev(dev, &ap->link) { 2684 if (!ata_dev_enabled(dev)) 2685 continue; 2686 2687 ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO; 2688 rc = ata_dev_configure(dev); 2689 ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO; 2690 if (rc) 2691 goto fail; 2692 } 2693 2694 /* configure transfer mode */ 2695 rc = ata_set_mode(&ap->link, &dev); 2696 if (rc) 2697 goto fail; 2698 2699 ata_link_for_each_dev(dev, &ap->link) 2700 if (ata_dev_enabled(dev)) 2701 return 0; 2702 2703 /* no device present, disable port */ 2704 ata_port_disable(ap); 2705 return -ENODEV; 2706 2707 fail: 2708 tries[dev->devno]--; 2709 2710 switch (rc) { 2711 case -EINVAL: 2712 /* eeek, something went very wrong, give up */ 2713 tries[dev->devno] = 0; 2714 break; 2715 2716 case -ENODEV: 2717 /* give it just one more chance */ 2718 tries[dev->devno] = min(tries[dev->devno], 1); 2719 case -EIO: 2720 if (tries[dev->devno] == 1) { 2721 /* This is the last chance, better to slow 2722 * down than lose it. 2723 */ 2724 sata_down_spd_limit(&ap->link); 2725 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO); 2726 } 2727 } 2728 2729 if (!tries[dev->devno]) 2730 ata_dev_disable(dev); 2731 2732 goto retry; 2733} 2734 2735/** 2736 * ata_port_probe - Mark port as enabled 2737 * @ap: Port for which we indicate enablement 2738 * 2739 * Modify @ap data structure such that the system 2740 * thinks that the entire port is enabled. 2741 * 2742 * LOCKING: host lock, or some other form of 2743 * serialization. 2744 */ 2745 2746void ata_port_probe(struct ata_port *ap) 2747{ 2748 ap->flags &= ~ATA_FLAG_DISABLED; 2749} 2750 2751/** 2752 * sata_print_link_status - Print SATA link status 2753 * @link: SATA link to printk link status about 2754 * 2755 * This function prints link speed and status of a SATA link. 2756 * 2757 * LOCKING: 2758 * None. 2759 */ 2760void sata_print_link_status(struct ata_link *link) 2761{ 2762 u32 sstatus, scontrol, tmp; 2763 2764 if (sata_scr_read(link, SCR_STATUS, &sstatus)) 2765 return; 2766 sata_scr_read(link, SCR_CONTROL, &scontrol); 2767 2768 if (ata_link_online(link)) { 2769 tmp = (sstatus >> 4) & 0xf; 2770 ata_link_printk(link, KERN_INFO, 2771 "SATA link up %s (SStatus %X SControl %X)\n", 2772 sata_spd_string(tmp), sstatus, scontrol); 2773 } else { 2774 ata_link_printk(link, KERN_INFO, 2775 "SATA link down (SStatus %X SControl %X)\n", 2776 sstatus, scontrol); 2777 } 2778} 2779 2780/** 2781 * ata_dev_pair - return other device on cable 2782 * @adev: device 2783 * 2784 * Obtain the other device on the same cable, or if none is 2785 * present NULL is returned 2786 */ 2787 2788struct ata_device *ata_dev_pair(struct ata_device *adev) 2789{ 2790 struct ata_link *link = adev->link; 2791 struct ata_device *pair = &link->device[1 - adev->devno]; 2792 if (!ata_dev_enabled(pair)) 2793 return NULL; 2794 return pair; 2795} 2796 2797/** 2798 * ata_port_disable - Disable port. 2799 * @ap: Port to be disabled. 2800 * 2801 * Modify @ap data structure such that the system 2802 * thinks that the entire port is disabled, and should 2803 * never attempt to probe or communicate with devices 2804 * on this port. 2805 * 2806 * LOCKING: host lock, or some other form of 2807 * serialization. 2808 */ 2809 2810void ata_port_disable(struct ata_port *ap) 2811{ 2812 ap->link.device[0].class = ATA_DEV_NONE; 2813 ap->link.device[1].class = ATA_DEV_NONE; 2814 ap->flags |= ATA_FLAG_DISABLED; 2815} 2816 2817/** 2818 * sata_down_spd_limit - adjust SATA spd limit downward 2819 * @link: Link to adjust SATA spd limit for 2820 * 2821 * Adjust SATA spd limit of @link downward. Note that this 2822 * function only adjusts the limit. The change must be applied 2823 * using sata_set_spd(). 2824 * 2825 * LOCKING: 2826 * Inherited from caller. 2827 * 2828 * RETURNS: 2829 * 0 on success, negative errno on failure 2830 */ 2831int sata_down_spd_limit(struct ata_link *link) 2832{ 2833 u32 sstatus, spd, mask; 2834 int rc, highbit; 2835 2836 if (!sata_scr_valid(link)) 2837 return -EOPNOTSUPP; 2838 2839 /* If SCR can be read, use it to determine the current SPD. 2840 * If not, use cached value in link->sata_spd. 2841 */ 2842 rc = sata_scr_read(link, SCR_STATUS, &sstatus); 2843 if (rc == 0) 2844 spd = (sstatus >> 4) & 0xf; 2845 else 2846 spd = link->sata_spd; 2847 2848 mask = link->sata_spd_limit; 2849 if (mask <= 1) 2850 return -EINVAL; 2851 2852 /* unconditionally mask off the highest bit */ 2853 highbit = fls(mask) - 1; 2854 mask &= ~(1 << highbit); 2855 2856 /* Mask off all speeds higher than or equal to the current 2857 * one. Force 1.5Gbps if current SPD is not available. 2858 */ 2859 if (spd > 1) 2860 mask &= (1 << (spd - 1)) - 1; 2861 else 2862 mask &= 1; 2863 2864 /* were we already at the bottom? */ 2865 if (!mask) 2866 return -EINVAL; 2867 2868 link->sata_spd_limit = mask; 2869 2870 ata_link_printk(link, KERN_WARNING, "limiting SATA link speed to %s\n", 2871 sata_spd_string(fls(mask))); 2872 2873 return 0; 2874} 2875 2876static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol) 2877{ 2878 struct ata_link *host_link = &link->ap->link; 2879 u32 limit, target, spd; 2880 2881 limit = link->sata_spd_limit; 2882 2883 /* Don't configure downstream link faster than upstream link. 2884 * It doesn't speed up anything and some PMPs choke on such 2885 * configuration. 2886 */ 2887 if (!ata_is_host_link(link) && host_link->sata_spd) 2888 limit &= (1 << host_link->sata_spd) - 1; 2889 2890 if (limit == UINT_MAX) 2891 target = 0; 2892 else 2893 target = fls(limit); 2894 2895 spd = (*scontrol >> 4) & 0xf; 2896 *scontrol = (*scontrol & ~0xf0) | ((target & 0xf) << 4); 2897 2898 return spd != target; 2899} 2900 2901/** 2902 * sata_set_spd_needed - is SATA spd configuration needed 2903 * @link: Link in question 2904 * 2905 * Test whether the spd limit in SControl matches 2906 * @link->sata_spd_limit. This function is used to determine 2907 * whether hardreset is necessary to apply SATA spd 2908 * configuration. 2909 * 2910 * LOCKING: 2911 * Inherited from caller. 2912 * 2913 * RETURNS: 2914 * 1 if SATA spd configuration is needed, 0 otherwise. 2915 */ 2916int sata_set_spd_needed(struct ata_link *link) 2917{ 2918 u32 scontrol; 2919 2920 if (sata_scr_read(link, SCR_CONTROL, &scontrol)) 2921 return 1; 2922 2923 return __sata_set_spd_needed(link, &scontrol); 2924} 2925 2926/** 2927 * sata_set_spd - set SATA spd according to spd limit 2928 * @link: Link to set SATA spd for 2929 * 2930 * Set SATA spd of @link according to sata_spd_limit. 2931 * 2932 * LOCKING: 2933 * Inherited from caller. 2934 * 2935 * RETURNS: 2936 * 0 if spd doesn't need to be changed, 1 if spd has been 2937 * changed. Negative errno if SCR registers are inaccessible. 2938 */ 2939int sata_set_spd(struct ata_link *link) 2940{ 2941 u32 scontrol; 2942 int rc; 2943 2944 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) 2945 return rc; 2946 2947 if (!__sata_set_spd_needed(link, &scontrol)) 2948 return 0; 2949 2950 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol))) 2951 return rc; 2952 2953 return 1; 2954} 2955 2956/* 2957 * This mode timing computation functionality is ported over from 2958 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik 2959 */ 2960/* 2961 * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds). 2962 * These were taken from ATA/ATAPI-6 standard, rev 0a, except 2963 * for UDMA6, which is currently supported only by Maxtor drives. 2964 * 2965 * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0. 2966 */ 2967 2968static const struct ata_timing ata_timing[] = { 2969/* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */ 2970 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 }, 2971 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 }, 2972 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 }, 2973 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 }, 2974 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 }, 2975 { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 100, 0 }, 2976 { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 80, 0 }, 2977 2978 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 }, 2979 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 }, 2980 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 }, 2981 2982 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 }, 2983 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 }, 2984 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 }, 2985 { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 100, 0 }, 2986 { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 80, 0 }, 2987 2988/* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */ 2989 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 }, 2990 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 }, 2991 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 }, 2992 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 }, 2993 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 }, 2994 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 }, 2995 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 }, 2996 2997 { 0xFF } 2998}; 2999 3000#define ENOUGH(v, unit) (((v)-1)/(unit)+1) 3001#define EZ(v, unit) ((v)?ENOUGH(v, unit):0) 3002 3003static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT) 3004{ 3005 q->setup = EZ(t->setup * 1000, T); 3006 q->act8b = EZ(t->act8b * 1000, T); 3007 q->rec8b = EZ(t->rec8b * 1000, T); 3008 q->cyc8b = EZ(t->cyc8b * 1000, T); 3009 q->active = EZ(t->active * 1000, T); 3010 q->recover = EZ(t->recover * 1000, T); 3011 q->cycle = EZ(t->cycle * 1000, T); 3012 q->udma = EZ(t->udma * 1000, UT); 3013} 3014 3015void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b, 3016 struct ata_timing *m, unsigned int what) 3017{ 3018 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup); 3019 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b); 3020 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b); 3021 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b); 3022 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active); 3023 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover); 3024 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle); 3025 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma); 3026} 3027 3028const struct ata_timing *ata_timing_find_mode(u8 xfer_mode) 3029{ 3030 const struct ata_timing *t = ata_timing; 3031 3032 while (xfer_mode > t->mode) 3033 t++; 3034 3035 if (xfer_mode == t->mode) 3036 return t; 3037 return NULL; 3038} 3039 3040int ata_timing_compute(struct ata_device *adev, unsigned short speed, 3041 struct ata_timing *t, int T, int UT) 3042{ 3043 const struct ata_timing *s; 3044 struct ata_timing p; 3045 3046 /* 3047 * Find the mode. 3048 */ 3049 3050 if (!(s = ata_timing_find_mode(speed))) 3051 return -EINVAL; 3052 3053 memcpy(t, s, sizeof(*s)); 3054 3055 /* 3056 * If the drive is an EIDE drive, it can tell us it needs extended 3057 * PIO/MW_DMA cycle timing. 3058 */ 3059 3060 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */ 3061 memset(&p, 0, sizeof(p)); 3062 if (speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) { 3063 if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO]; 3064 else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY]; 3065 } else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) { 3066 p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN]; 3067 } 3068 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B); 3069 } 3070 3071 /* 3072 * Convert the timing to bus clock counts. 3073 */ 3074 3075 ata_timing_quantize(t, t, T, UT); 3076 3077 /* 3078 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY, 3079 * S.M.A.R.T * and some other commands. We have to ensure that the 3080 * DMA cycle timing is slower/equal than the fastest PIO timing. 3081 */ 3082 3083 if (speed > XFER_PIO_6) { 3084 ata_timing_compute(adev, adev->pio_mode, &p, T, UT); 3085 ata_timing_merge(&p, t, t, ATA_TIMING_ALL); 3086 } 3087 3088 /* 3089 * Lengthen active & recovery time so that cycle time is correct. 3090 */ 3091 3092 if (t->act8b + t->rec8b < t->cyc8b) { 3093 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2; 3094 t->rec8b = t->cyc8b - t->act8b; 3095 } 3096 3097 if (t->active + t->recover < t->cycle) { 3098 t->active += (t->cycle - (t->active + t->recover)) / 2; 3099 t->recover = t->cycle - t->active; 3100 } 3101 3102 /* In a few cases quantisation may produce enough errors to 3103 leave t->cycle too low for the sum of active and recovery 3104 if so we must correct this */ 3105 if (t->active + t->recover > t->cycle) 3106 t->cycle = t->active + t->recover; 3107 3108 return 0; 3109} 3110 3111/** 3112 * ata_timing_cycle2mode - find xfer mode for the specified cycle duration 3113 * @xfer_shift: ATA_SHIFT_* value for transfer type to examine. 3114 * @cycle: cycle duration in ns 3115 * 3116 * Return matching xfer mode for @cycle. The returned mode is of 3117 * the transfer type specified by @xfer_shift. If @cycle is too 3118 * slow for @xfer_shift, 0xff is returned. If @cycle is faster 3119 * than the fastest known mode, the fasted mode is returned. 3120 * 3121 * LOCKING: 3122 * None. 3123 * 3124 * RETURNS: 3125 * Matching xfer_mode, 0xff if no match found. 3126 */ 3127u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle) 3128{ 3129 u8 base_mode = 0xff, last_mode = 0xff; 3130 const struct ata_xfer_ent *ent; 3131 const struct ata_timing *t; 3132 3133 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++) 3134 if (ent->shift == xfer_shift) 3135 base_mode = ent->base; 3136 3137 for (t = ata_timing_find_mode(base_mode); 3138 t && ata_xfer_mode2shift(t->mode) == xfer_shift; t++) { 3139 unsigned short this_cycle; 3140 3141 switch (xfer_shift) { 3142 case ATA_SHIFT_PIO: 3143 case ATA_SHIFT_MWDMA: 3144 this_cycle = t->cycle; 3145 break; 3146 case ATA_SHIFT_UDMA: 3147 this_cycle = t->udma; 3148 break; 3149 default: 3150 return 0xff; 3151 } 3152 3153 if (cycle > this_cycle) 3154 break; 3155 3156 last_mode = t->mode; 3157 } 3158 3159 return last_mode; 3160} 3161 3162/** 3163 * ata_down_xfermask_limit - adjust dev xfer masks downward 3164 * @dev: Device to adjust xfer masks 3165 * @sel: ATA_DNXFER_* selector 3166 * 3167 * Adjust xfer masks of @dev downward. Note that this function 3168 * does not apply the change. Invoking ata_set_mode() afterwards 3169 * will apply the limit. 3170 * 3171 * LOCKING: 3172 * Inherited from caller. 3173 * 3174 * RETURNS: 3175 * 0 on success, negative errno on failure 3176 */ 3177int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel) 3178{ 3179 char buf[32]; 3180 unsigned long orig_mask, xfer_mask; 3181 unsigned long pio_mask, mwdma_mask, udma_mask; 3182 int quiet, highbit; 3183 3184 quiet = !!(sel & ATA_DNXFER_QUIET); 3185 sel &= ~ATA_DNXFER_QUIET; 3186 3187 xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask, 3188 dev->mwdma_mask, 3189 dev->udma_mask); 3190 ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask); 3191 3192 switch (sel) { 3193 case ATA_DNXFER_PIO: 3194 highbit = fls(pio_mask) - 1; 3195 pio_mask &= ~(1 << highbit); 3196 break; 3197 3198 case ATA_DNXFER_DMA: 3199 if (udma_mask) { 3200 highbit = fls(udma_mask) - 1; 3201 udma_mask &= ~(1 << highbit); 3202 if (!udma_mask) 3203 return -ENOENT; 3204 } else if (mwdma_mask) { 3205 highbit = fls(mwdma_mask) - 1; 3206 mwdma_mask &= ~(1 << highbit); 3207 if (!mwdma_mask) 3208 return -ENOENT; 3209 } 3210 break; 3211 3212 case ATA_DNXFER_40C: 3213 udma_mask &= ATA_UDMA_MASK_40C; 3214 break; 3215 3216 case ATA_DNXFER_FORCE_PIO0: 3217 pio_mask &= 1; 3218 case ATA_DNXFER_FORCE_PIO: 3219 mwdma_mask = 0; 3220 udma_mask = 0; 3221 break; 3222 3223 default: 3224 BUG(); 3225 } 3226 3227 xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask); 3228 3229 if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask) 3230 return -ENOENT; 3231 3232 if (!quiet) { 3233 if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA)) 3234 snprintf(buf, sizeof(buf), "%s:%s", 3235 ata_mode_string(xfer_mask), 3236 ata_mode_string(xfer_mask & ATA_MASK_PIO)); 3237 else 3238 snprintf(buf, sizeof(buf), "%s", 3239 ata_mode_string(xfer_mask)); 3240 3241 ata_dev_printk(dev, KERN_WARNING, 3242 "limiting speed to %s\n", buf); 3243 } 3244 3245 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask, 3246 &dev->udma_mask); 3247 3248 return 0; 3249} 3250 3251static int ata_dev_set_mode(struct ata_device *dev) 3252{ 3253 struct ata_eh_context *ehc = &dev->link->eh_context; 3254 const char *dev_err_whine = ""; 3255 int ign_dev_err = 0; 3256 unsigned int err_mask; 3257 int rc; 3258 3259 dev->flags &= ~ATA_DFLAG_PIO; 3260 if (dev->xfer_shift == ATA_SHIFT_PIO) 3261 dev->flags |= ATA_DFLAG_PIO; 3262 3263 err_mask = ata_dev_set_xfermode(dev); 3264 3265 if (err_mask & ~AC_ERR_DEV) 3266 goto fail; 3267 3268 /* revalidate */ 3269 ehc->i.flags |= ATA_EHI_POST_SETMODE; 3270 rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0); 3271 ehc->i.flags &= ~ATA_EHI_POST_SETMODE; 3272 if (rc) 3273 return rc; 3274 3275 /* Old CFA may refuse this command, which is just fine */ 3276 if (dev->xfer_shift == ATA_SHIFT_PIO && ata_id_is_cfa(dev->id)) 3277 ign_dev_err = 1; 3278 3279 /* Some very old devices and some bad newer ones fail any kind of 3280 SET_XFERMODE request but support PIO0-2 timings and no IORDY */ 3281 if (dev->xfer_shift == ATA_SHIFT_PIO && !ata_id_has_iordy(dev->id) && 3282 dev->pio_mode <= XFER_PIO_2) 3283 ign_dev_err = 1; 3284 3285 /* Early MWDMA devices do DMA but don't allow DMA mode setting. 3286 Don't fail an MWDMA0 set IFF the device indicates it is in MWDMA0 */ 3287 if (dev->xfer_shift == ATA_SHIFT_MWDMA && 3288 dev->dma_mode == XFER_MW_DMA_0 && 3289 (dev->id[63] >> 8) & 1) 3290 ign_dev_err = 1; 3291 3292 /* if the device is actually configured correctly, ignore dev err */ 3293 if (dev->xfer_mode == ata_xfer_mask2mode(ata_id_xfermask(dev->id))) 3294 ign_dev_err = 1; 3295 3296 if (err_mask & AC_ERR_DEV) { 3297 if (!ign_dev_err) 3298 goto fail; 3299 else 3300 dev_err_whine = " (device error ignored)"; 3301 } 3302 3303 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n", 3304 dev->xfer_shift, (int)dev->xfer_mode); 3305 3306 ata_dev_printk(dev, KERN_INFO, "configured for %s%s\n", 3307 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)), 3308 dev_err_whine); 3309 3310 return 0; 3311 3312 fail: 3313 ata_dev_printk(dev, KERN_ERR, "failed to set xfermode " 3314 "(err_mask=0x%x)\n", err_mask); 3315 return -EIO; 3316} 3317 3318/** 3319 * ata_do_set_mode - Program timings and issue SET FEATURES - XFER 3320 * @link: link on which timings will be programmed 3321 * @r_failed_dev: out parameter for failed device 3322 * 3323 * Standard implementation of the function used to tune and set 3324 * ATA device disk transfer mode (PIO3, UDMA6, etc.). If 3325 * ata_dev_set_mode() fails, pointer to the failing device is 3326 * returned in @r_failed_dev. 3327 * 3328 * LOCKING: 3329 * PCI/etc. bus probe sem. 3330 * 3331 * RETURNS: 3332 * 0 on success, negative errno otherwise 3333 */ 3334 3335int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev) 3336{ 3337 struct ata_port *ap = link->ap; 3338 struct ata_device *dev; 3339 int rc = 0, used_dma = 0, found = 0; 3340 3341 /* step 1: calculate xfer_mask */ 3342 ata_link_for_each_dev(dev, link) { 3343 unsigned long pio_mask, dma_mask; 3344 unsigned int mode_mask; 3345 3346 if (!ata_dev_enabled(dev)) 3347 continue; 3348 3349 mode_mask = ATA_DMA_MASK_ATA; 3350 if (dev->class == ATA_DEV_ATAPI) 3351 mode_mask = ATA_DMA_MASK_ATAPI; 3352 else if (ata_id_is_cfa(dev->id)) 3353 mode_mask = ATA_DMA_MASK_CFA; 3354 3355 ata_dev_xfermask(dev); 3356 ata_force_xfermask(dev); 3357 3358 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0); 3359 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask); 3360 3361 if (libata_dma_mask & mode_mask) 3362 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask); 3363 else 3364 dma_mask = 0; 3365 3366 dev->pio_mode = ata_xfer_mask2mode(pio_mask); 3367 dev->dma_mode = ata_xfer_mask2mode(dma_mask); 3368 3369 found = 1; 3370 if (dev->dma_mode != 0xff) 3371 used_dma = 1; 3372 } 3373 if (!found) 3374 goto out; 3375 3376 /* step 2: always set host PIO timings */ 3377 ata_link_for_each_dev(dev, link) { 3378 if (!ata_dev_enabled(dev)) 3379 continue; 3380 3381 if (dev->pio_mode == 0xff) { 3382 ata_dev_printk(dev, KERN_WARNING, "no PIO support\n"); 3383 rc = -EINVAL; 3384 goto out; 3385 } 3386 3387 dev->xfer_mode = dev->pio_mode; 3388 dev->xfer_shift = ATA_SHIFT_PIO; 3389 if (ap->ops->set_piomode) 3390 ap->ops->set_piomode(ap, dev); 3391 } 3392 3393 /* step 3: set host DMA timings */ 3394 ata_link_for_each_dev(dev, link) { 3395 if (!ata_dev_enabled(dev) || dev->dma_mode == 0xff) 3396 continue; 3397 3398 dev->xfer_mode = dev->dma_mode; 3399 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode); 3400 if (ap->ops->set_dmamode) 3401 ap->ops->set_dmamode(ap, dev); 3402 } 3403 3404 /* step 4: update devices' xfer mode */ 3405 ata_link_for_each_dev(dev, link) { 3406 /* don't update suspended devices' xfer mode */ 3407 if (!ata_dev_enabled(dev)) 3408 continue; 3409 3410 rc = ata_dev_set_mode(dev); 3411 if (rc) 3412 goto out; 3413 } 3414 3415 /* Record simplex status. If we selected DMA then the other 3416 * host channels are not permitted to do so. 3417 */ 3418 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX)) 3419 ap->host->simplex_claimed = ap; 3420 3421 out: 3422 if (rc) 3423 *r_failed_dev = dev; 3424 return rc; 3425} 3426 3427/** 3428 * ata_tf_to_host - issue ATA taskfile to host controller 3429 * @ap: port to which command is being issued 3430 * @tf: ATA taskfile register set 3431 * 3432 * Issues ATA taskfile register set to ATA host controller, 3433 * with proper synchronization with interrupt handler and 3434 * other threads. 3435 * 3436 * LOCKING: 3437 * spin_lock_irqsave(host lock) 3438 */ 3439 3440static inline void ata_tf_to_host(struct ata_port *ap, 3441 const struct ata_taskfile *tf) 3442{ 3443 ap->ops->tf_load(ap, tf); 3444 ap->ops->exec_command(ap, tf); 3445} 3446 3447/** 3448 * ata_busy_sleep - sleep until BSY clears, or timeout 3449 * @ap: port containing status register to be polled 3450 * @tmout_pat: impatience timeout 3451 * @tmout: overall timeout 3452 * 3453 * Sleep until ATA Status register bit BSY clears, 3454 * or a timeout occurs. 3455 * 3456 * LOCKING: 3457 * Kernel thread context (may sleep). 3458 * 3459 * RETURNS: 3460 * 0 on success, -errno otherwise. 3461 */ 3462int ata_busy_sleep(struct ata_port *ap, 3463 unsigned long tmout_pat, unsigned long tmout) 3464{ 3465 unsigned long timer_start, timeout; 3466 u8 status; 3467 3468 status = ata_busy_wait(ap, ATA_BUSY, 300); 3469 timer_start = jiffies; 3470 timeout = timer_start + tmout_pat; 3471 while (status != 0xff && (status & ATA_BUSY) && 3472 time_before(jiffies, timeout)) { 3473 msleep(50); 3474 status = ata_busy_wait(ap, ATA_BUSY, 3); 3475 } 3476 3477 if (status != 0xff && (status & ATA_BUSY)) 3478 ata_port_printk(ap, KERN_WARNING, 3479 "port is slow to respond, please be patient " 3480 "(Status 0x%x)\n", status); 3481 3482 timeout = timer_start + tmout; 3483 while (status != 0xff && (status & ATA_BUSY) && 3484 time_before(jiffies, timeout)) { 3485 msleep(50); 3486 status = ata_chk_status(ap); 3487 } 3488 3489 if (status == 0xff) 3490 return -ENODEV; 3491 3492 if (status & ATA_BUSY) { 3493 ata_port_printk(ap, KERN_ERR, "port failed to respond " 3494 "(%lu secs, Status 0x%x)\n", 3495 tmout / HZ, status); 3496 return -EBUSY; 3497 } 3498 3499 return 0; 3500} 3501 3502/** 3503 * ata_wait_after_reset - wait before checking status after reset 3504 * @ap: port containing status register to be polled 3505 * @deadline: deadline jiffies for the operation 3506 * 3507 * After reset, we need to pause a while before reading status. 3508 * Also, certain combination of controller and device report 0xff 3509 * for some duration (e.g. until SATA PHY is up and running) 3510 * which is interpreted as empty port in ATA world. This 3511 * function also waits for such devices to get out of 0xff 3512 * status. 3513 * 3514 * LOCKING: 3515 * Kernel thread context (may sleep). 3516 */ 3517void ata_wait_after_reset(struct ata_port *ap, unsigned long deadline) 3518{ 3519 unsigned long until = jiffies + ATA_TMOUT_FF_WAIT; 3520 3521 if (time_before(until, deadline)) 3522 deadline = until; 3523 3524 /* Spec mandates ">= 2ms" before checking status. We wait 3525 * 150ms, because that was the magic delay used for ATAPI 3526 * devices in Hale Landis's ATADRVR, for the period of time 3527 * between when the ATA command register is written, and then 3528 * status is checked. Because waiting for "a while" before 3529 * checking status is fine, post SRST, we perform this magic 3530 * delay here as well. 3531 * 3532 * Old drivers/ide uses the 2mS rule and then waits for ready. 3533 */ 3534 msleep(150); 3535 3536 /* Wait for 0xff to clear. Some SATA devices take a long time 3537 * to clear 0xff after reset. For example, HHD424020F7SV00 3538 * iVDR needs >= 800ms while. Quantum GoVault needs even more 3539 * than that. 3540 * 3541 * Note that some PATA controllers (pata_ali) explode if 3542 * status register is read more than once when there's no 3543 * device attached. 3544 */ 3545 if (ap->flags & ATA_FLAG_SATA) { 3546 while (1) { 3547 u8 status = ata_chk_status(ap); 3548 3549 if (status != 0xff || time_after(jiffies, deadline)) 3550 return; 3551 3552 msleep(50); 3553 } 3554 } 3555} 3556 3557/** 3558 * ata_wait_ready - sleep until BSY clears, or timeout 3559 * @ap: port containing status register to be polled 3560 * @deadline: deadline jiffies for the operation 3561 * 3562 * Sleep until ATA Status register bit BSY clears, or timeout 3563 * occurs. 3564 * 3565 * LOCKING: 3566 * Kernel thread context (may sleep). 3567 * 3568 * RETURNS: 3569 * 0 on success, -errno otherwise. 3570 */ 3571int ata_wait_ready(struct ata_port *ap, unsigned long deadline) 3572{ 3573 unsigned long start = jiffies; 3574 int warned = 0; 3575 3576 while (1) { 3577 u8 status = ata_chk_status(ap); 3578 unsigned long now = jiffies; 3579 3580 if (!(status & ATA_BUSY)) 3581 return 0; 3582 if (!ata_link_online(&ap->link) && status == 0xff) 3583 return -ENODEV; 3584 if (time_after(now, deadline)) 3585 return -EBUSY; 3586 3587 if (!warned && time_after(now, start + 5 * HZ) && 3588 (deadline - now > 3 * HZ)) { 3589 ata_port_printk(ap, KERN_WARNING, 3590 "port is slow to respond, please be patient " 3591 "(Status 0x%x)\n", status); 3592 warned = 1; 3593 } 3594 3595 msleep(50); 3596 } 3597} 3598 3599static int ata_bus_post_reset(struct ata_port *ap, unsigned int devmask, 3600 unsigned long deadline) 3601{ 3602 struct ata_ioports *ioaddr = &ap->ioaddr; 3603 unsigned int dev0 = devmask & (1 << 0); 3604 unsigned int dev1 = devmask & (1 << 1); 3605 int rc, ret = 0; 3606 3607 /* if device 0 was found in ata_devchk, wait for its 3608 * BSY bit to clear 3609 */ 3610 if (dev0) { 3611 rc = ata_wait_ready(ap, deadline); 3612 if (rc) { 3613 if (rc != -ENODEV) 3614 return rc; 3615 ret = rc; 3616 } 3617 } 3618 3619 /* if device 1 was found in ata_devchk, wait for register 3620 * access briefly, then wait for BSY to clear. 3621 */ 3622 if (dev1) { 3623 int i; 3624 3625 ap->ops->dev_select(ap, 1); 3626 3627 /* Wait for register access. Some ATAPI devices fail 3628 * to set nsect/lbal after reset, so don't waste too 3629 * much time on it. We're gonna wait for !BSY anyway. 3630 */ 3631 for (i = 0; i < 2; i++) { 3632 u8 nsect, lbal; 3633 3634 nsect = ioread8(ioaddr->nsect_addr); 3635 lbal = ioread8(ioaddr->lbal_addr); 3636 if ((nsect == 1) && (lbal == 1)) 3637 break; 3638 msleep(50); /* give drive a breather */ 3639 } 3640 3641 rc = ata_wait_ready(ap, deadline); 3642 if (rc) { 3643 if (rc != -ENODEV) 3644 return rc; 3645 ret = rc; 3646 } 3647 } 3648 3649 /* is all this really necessary? */ 3650 ap->ops->dev_select(ap, 0); 3651 if (dev1) 3652 ap->ops->dev_select(ap, 1); 3653 if (dev0) 3654 ap->ops->dev_select(ap, 0); 3655 3656 return ret; 3657} 3658 3659static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask, 3660 unsigned long deadline) 3661{ 3662 struct ata_ioports *ioaddr = &ap->ioaddr; 3663 3664 DPRINTK("ata%u: bus reset via SRST\n", ap->print_id); 3665 3666 /* software reset. causes dev0 to be selected */ 3667 iowrite8(ap->ctl, ioaddr->ctl_addr); 3668 udelay(20); /* FIXME: flush */ 3669 iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr); 3670 udelay(20); /* FIXME: flush */ 3671 iowrite8(ap->ctl, ioaddr->ctl_addr); 3672 3673 /* wait a while before checking status */ 3674 ata_wait_after_reset(ap, deadline); 3675 3676 /* Before we perform post reset processing we want to see if 3677 * the bus shows 0xFF because the odd clown forgets the D7 3678 * pulldown resistor. 3679 */ 3680 if (ata_chk_status(ap) == 0xFF) 3681 return -ENODEV; 3682 3683 return ata_bus_post_reset(ap, devmask, deadline); 3684} 3685 3686/** 3687 * ata_bus_reset - reset host port and associated ATA channel 3688 * @ap: port to reset 3689 * 3690 * This is typically the first time we actually start issuing 3691 * commands to the ATA channel. We wait for BSY to clear, then 3692 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its 3693 * result. Determine what devices, if any, are on the channel 3694 * by looking at the device 0/1 error register. Look at the signature 3695 * stored in each device's taskfile registers, to determine if 3696 * the device is ATA or ATAPI. 3697 * 3698 * LOCKING: 3699 * PCI/etc. bus probe sem. 3700 * Obtains host lock. 3701 * 3702 * SIDE EFFECTS: 3703 * Sets ATA_FLAG_DISABLED if bus reset fails. 3704 */ 3705 3706void ata_bus_reset(struct ata_port *ap) 3707{ 3708 struct ata_device *device = ap->link.device; 3709 struct ata_ioports *ioaddr = &ap->ioaddr; 3710 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS; 3711 u8 err; 3712 unsigned int dev0, dev1 = 0, devmask = 0; 3713 int rc; 3714 3715 DPRINTK("ENTER, host %u, port %u\n", ap->print_id, ap->port_no); 3716 3717 /* determine if device 0/1 are present */ 3718 if (ap->flags & ATA_FLAG_SATA_RESET) 3719 dev0 = 1; 3720 else { 3721 dev0 = ata_devchk(ap, 0); 3722 if (slave_possible) 3723 dev1 = ata_devchk(ap, 1); 3724 } 3725 3726 if (dev0) 3727 devmask |= (1 << 0); 3728 if (dev1) 3729 devmask |= (1 << 1); 3730 3731 /* select device 0 again */ 3732 ap->ops->dev_select(ap, 0); 3733 3734 /* issue bus reset */ 3735 if (ap->flags & ATA_FLAG_SRST) { 3736 rc = ata_bus_softreset(ap, devmask, jiffies + 40 * HZ); 3737 if (rc && rc != -ENODEV) 3738 goto err_out; 3739 } 3740 3741 /* 3742 * determine by signature whether we have ATA or ATAPI devices 3743 */ 3744 device[0].class = ata_dev_try_classify(&device[0], dev0, &err); 3745 if ((slave_possible) && (err != 0x81)) 3746 device[1].class = ata_dev_try_classify(&device[1], dev1, &err); 3747 3748 /* is double-select really necessary? */ 3749 if (device[1].class != ATA_DEV_NONE) 3750 ap->ops->dev_select(ap, 1); 3751 if (device[0].class != ATA_DEV_NONE) 3752 ap->ops->dev_select(ap, 0); 3753 3754 /* if no devices were detected, disable this port */ 3755 if ((device[0].class == ATA_DEV_NONE) && 3756 (device[1].class == ATA_DEV_NONE)) 3757 goto err_out; 3758 3759 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) { 3760 /* set up device control for ATA_FLAG_SATA_RESET */ 3761 iowrite8(ap->ctl, ioaddr->ctl_addr); 3762 } 3763 3764 DPRINTK("EXIT\n"); 3765 return; 3766 3767err_out: 3768 ata_port_printk(ap, KERN_ERR, "disabling port\n"); 3769 ata_port_disable(ap); 3770 3771 DPRINTK("EXIT\n"); 3772} 3773 3774/** 3775 * sata_link_debounce - debounce SATA phy status 3776 * @link: ATA link to debounce SATA phy status for 3777 * @params: timing parameters { interval, duratinon, timeout } in msec 3778 * @deadline: deadline jiffies for the operation 3779 * 3780* Make sure SStatus of @link reaches stable state, determined by 3781 * holding the same value where DET is not 1 for @duration polled 3782 * every @interval, before @timeout. Timeout constraints the 3783 * beginning of the stable state. Because DET gets stuck at 1 on 3784 * some controllers after hot unplugging, this functions waits 3785 * until timeout then returns 0 if DET is stable at 1. 3786 * 3787 * @timeout is further limited by @deadline. The sooner of the 3788 * two is used. 3789 * 3790 * LOCKING: 3791 * Kernel thread context (may sleep) 3792 * 3793 * RETURNS: 3794 * 0 on success, -errno on failure. 3795 */ 3796int sata_link_debounce(struct ata_link *link, const unsigned long *params, 3797 unsigned long deadline) 3798{ 3799 unsigned long interval_msec = params[0]; 3800 unsigned long duration = msecs_to_jiffies(params[1]); 3801 unsigned long last_jiffies, t; 3802 u32 last, cur; 3803 int rc; 3804 3805 t = jiffies + msecs_to_jiffies(params[2]); 3806 if (time_before(t, deadline)) 3807 deadline = t; 3808 3809 if ((rc = sata_scr_read(link, SCR_STATUS, &cur))) 3810 return rc; 3811 cur &= 0xf; 3812 3813 last = cur; 3814 last_jiffies = jiffies; 3815 3816 while (1) { 3817 msleep(interval_msec); 3818 if ((rc = sata_scr_read(link, SCR_STATUS, &cur))) 3819 return rc; 3820 cur &= 0xf; 3821 3822 /* DET stable? */ 3823 if (cur == last) { 3824 if (cur == 1 && time_before(jiffies, deadline)) 3825 continue; 3826 if (time_after(jiffies, last_jiffies + duration)) 3827 return 0; 3828 continue; 3829 } 3830 3831 /* unstable, start over */ 3832 last = cur; 3833 last_jiffies = jiffies; 3834 3835 /* Check deadline. If debouncing failed, return 3836 * -EPIPE to tell upper layer to lower link speed. 3837 */ 3838 if (time_after(jiffies, deadline)) 3839 return -EPIPE; 3840 } 3841} 3842 3843/** 3844 * sata_link_resume - resume SATA link 3845 * @link: ATA link to resume SATA 3846 * @params: timing parameters { interval, duratinon, timeout } in msec 3847 * @deadline: deadline jiffies for the operation 3848 * 3849 * Resume SATA phy @link and debounce it. 3850 * 3851 * LOCKING: 3852 * Kernel thread context (may sleep) 3853 * 3854 * RETURNS: 3855 * 0 on success, -errno on failure. 3856 */ 3857int sata_link_resume(struct ata_link *link, const unsigned long *params, 3858 unsigned long deadline) 3859{ 3860 u32 scontrol; 3861 int rc; 3862 3863 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) 3864 return rc; 3865 3866 scontrol = (scontrol & 0x0f0) | 0x300; 3867 3868 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol))) 3869 return rc; 3870 3871 /* Some PHYs react badly if SStatus is pounded immediately 3872 * after resuming. Delay 200ms before debouncing. 3873 */ 3874 msleep(200); 3875 3876 return sata_link_debounce(link, params, deadline); 3877} 3878 3879/** 3880 * ata_std_prereset - prepare for reset 3881 * @link: ATA link to be reset 3882 * @deadline: deadline jiffies for the operation 3883 * 3884 * @link is about to be reset. Initialize it. Failure from 3885 * prereset makes libata abort whole reset sequence and give up 3886 * that port, so prereset should be best-effort. It does its 3887 * best to prepare for reset sequence but if things go wrong, it 3888 * should just whine, not fail. 3889 * 3890 * LOCKING: 3891 * Kernel thread context (may sleep) 3892 * 3893 * RETURNS: 3894 * 0 on success, -errno otherwise. 3895 */ 3896int ata_std_prereset(struct ata_link *link, unsigned long deadline) 3897{ 3898 struct ata_port *ap = link->ap; 3899 struct ata_eh_context *ehc = &link->eh_context; 3900 const unsigned long *timing = sata_ehc_deb_timing(ehc); 3901 int rc; 3902 3903 /* handle link resume */ 3904 if ((ehc->i.flags & ATA_EHI_RESUME_LINK) && 3905 (link->flags & ATA_LFLAG_HRST_TO_RESUME)) 3906 ehc->i.action |= ATA_EH_HARDRESET; 3907 3908 /* Some PMPs don't work with only SRST, force hardreset if PMP 3909 * is supported. 3910 */ 3911 if (ap->flags & ATA_FLAG_PMP) 3912 ehc->i.action |= ATA_EH_HARDRESET; 3913 3914 /* if we're about to do hardreset, nothing more to do */ 3915 if (ehc->i.action & ATA_EH_HARDRESET) 3916 return 0; 3917 3918 /* if SATA, resume link */ 3919 if (ap->flags & ATA_FLAG_SATA) { 3920 rc = sata_link_resume(link, timing, deadline); 3921 /* whine about phy resume failure but proceed */ 3922 if (rc && rc != -EOPNOTSUPP) 3923 ata_link_printk(link, KERN_WARNING, "failed to resume " 3924 "link for reset (errno=%d)\n", rc); 3925 } 3926 3927 /* Wait for !BSY if the controller can wait for the first D2H 3928 * Reg FIS and we don't know that no device is attached. 3929 */ 3930 if (!(link->flags & ATA_LFLAG_SKIP_D2H_BSY) && !ata_link_offline(link)) { 3931 rc = ata_wait_ready(ap, deadline); 3932 if (rc && rc != -ENODEV) { 3933 ata_link_printk(link, KERN_WARNING, "device not ready " 3934 "(errno=%d), forcing hardreset\n", rc); 3935 ehc->i.action |= ATA_EH_HARDRESET; 3936 } 3937 } 3938 3939 return 0; 3940} 3941 3942/** 3943 * ata_std_softreset - reset host port via ATA SRST 3944 * @link: ATA link to reset 3945 * @classes: resulting classes of attached devices 3946 * @deadline: deadline jiffies for the operation 3947 * 3948 * Reset host port using ATA SRST. 3949 * 3950 * LOCKING: 3951 * Kernel thread context (may sleep) 3952 * 3953 * RETURNS: 3954 * 0 on success, -errno otherwise. 3955 */ 3956int ata_std_softreset(struct ata_link *link, unsigned int *classes, 3957 unsigned long deadline) 3958{ 3959 struct ata_port *ap = link->ap; 3960 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS; 3961 unsigned int devmask = 0; 3962 int rc; 3963 u8 err; 3964 3965 DPRINTK("ENTER\n"); 3966 3967 if (ata_link_offline(link)) { 3968 classes[0] = ATA_DEV_NONE; 3969 goto out; 3970 } 3971 3972 /* determine if device 0/1 are present */ 3973 if (ata_devchk(ap, 0)) 3974 devmask |= (1 << 0); 3975 if (slave_possible && ata_devchk(ap, 1)) 3976 devmask |= (1 << 1); 3977 3978 /* select device 0 again */ 3979 ap->ops->dev_select(ap, 0); 3980 3981 /* issue bus reset */ 3982 DPRINTK("about to softreset, devmask=%x\n", devmask); 3983 rc = ata_bus_softreset(ap, devmask, deadline); 3984 /* if link is occupied, -ENODEV too is an error */ 3985 if (rc && (rc != -ENODEV || sata_scr_valid(link))) { 3986 ata_link_printk(link, KERN_ERR, "SRST failed (errno=%d)\n", rc); 3987 return rc; 3988 } 3989 3990 /* determine by signature whether we have ATA or ATAPI devices */ 3991 classes[0] = ata_dev_try_classify(&link->device[0], 3992 devmask & (1 << 0), &err); 3993 if (slave_possible && err != 0x81) 3994 classes[1] = ata_dev_try_classify(&link->device[1], 3995 devmask & (1 << 1), &err); 3996 3997 out: 3998 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]); 3999 return 0; 4000} 4001 4002/** 4003 * sata_link_hardreset - reset link via SATA phy reset 4004 * @link: link to reset 4005 * @timing: timing parameters { interval, duratinon, timeout } in msec 4006 * @deadline: deadline jiffies for the operation 4007 * 4008 * SATA phy-reset @link using DET bits of SControl register. 4009 * 4010 * LOCKING: 4011 * Kernel thread context (may sleep) 4012 * 4013 * RETURNS: 4014 * 0 on success, -errno otherwise. 4015 */ 4016int sata_link_hardreset(struct ata_link *link, const unsigned long *timing, 4017 unsigned long deadline) 4018{ 4019 u32 scontrol; 4020 int rc; 4021 4022 DPRINTK("ENTER\n"); 4023 4024 if (sata_set_spd_needed(link)) { 4025 /* SATA spec says nothing about how to reconfigure 4026 * spd. To be on the safe side, turn off phy during 4027 * reconfiguration. This works for at least ICH7 AHCI 4028 * and Sil3124. 4029 */ 4030 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) 4031 goto out; 4032 4033 scontrol = (scontrol & 0x0f0) | 0x304; 4034 4035 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol))) 4036 goto out; 4037 4038 sata_set_spd(link); 4039 } 4040 4041 /* issue phy wake/reset */ 4042 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) 4043 goto out; 4044 4045 scontrol = (scontrol & 0x0f0) | 0x301; 4046 4047 if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol))) 4048 goto out; 4049 4050 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1 4051 * 10.4.2 says at least 1 ms. 4052 */ 4053 msleep(1); 4054 4055 /* bring link back */ 4056 rc = sata_link_resume(link, timing, deadline); 4057 out: 4058 DPRINTK("EXIT, rc=%d\n", rc); 4059 return rc; 4060} 4061 4062/** 4063 * sata_std_hardreset - reset host port via SATA phy reset 4064 * @link: link to reset 4065 * @class: resulting class of attached device 4066 * @deadline: deadline jiffies for the operation 4067 * 4068 * SATA phy-reset host port using DET bits of SControl register, 4069 * wait for !BSY and classify the attached device. 4070 * 4071 * LOCKING: 4072 * Kernel thread context (may sleep) 4073 * 4074 * RETURNS: 4075 * 0 on success, -errno otherwise. 4076 */ 4077int sata_std_hardreset(struct ata_link *link, unsigned int *class, 4078 unsigned long deadline) 4079{ 4080 struct ata_port *ap = link->ap; 4081 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context); 4082 int rc; 4083 4084 DPRINTK("ENTER\n"); 4085 4086 /* do hardreset */ 4087 rc = sata_link_hardreset(link, timing, deadline); 4088 if (rc) { 4089 ata_link_printk(link, KERN_ERR, 4090 "COMRESET failed (errno=%d)\n", rc); 4091 return rc; 4092 } 4093 4094 /* TODO: phy layer with polling, timeouts, etc. */ 4095 if (ata_link_offline(link)) { 4096 *class = ATA_DEV_NONE; 4097 DPRINTK("EXIT, link offline\n"); 4098 return 0; 4099 } 4100 4101 /* wait a while before checking status */ 4102 ata_wait_after_reset(ap, deadline); 4103 4104 /* If PMP is supported, we have to do follow-up SRST. Note 4105 * that some PMPs don't send D2H Reg FIS after hardreset at 4106 * all if the first port is empty. Wait for it just for a 4107 * second and request follow-up SRST. 4108 */ 4109 if (ap->flags & ATA_FLAG_PMP) { 4110 ata_wait_ready(ap, jiffies + HZ); 4111 return -EAGAIN; 4112 } 4113 4114 rc = ata_wait_ready(ap, deadline); 4115 /* link occupied, -ENODEV too is an error */ 4116 if (rc) { 4117 ata_link_printk(link, KERN_ERR, 4118 "COMRESET failed (errno=%d)\n", rc); 4119 return rc; 4120 } 4121 4122 ap->ops->dev_select(ap, 0); /* probably unnecessary */ 4123 4124 *class = ata_dev_try_classify(link->device, 1, NULL); 4125 4126 DPRINTK("EXIT, class=%u\n", *class); 4127 return 0; 4128} 4129 4130/** 4131 * ata_std_postreset - standard postreset callback 4132 * @link: the target ata_link 4133 * @classes: classes of attached devices 4134 * 4135 * This function is invoked after a successful reset. Note that 4136 * the device might have been reset more than once using 4137 * different reset methods before postreset is invoked. 4138 * 4139 * LOCKING: 4140 * Kernel thread context (may sleep) 4141 */ 4142void ata_std_postreset(struct ata_link *link, unsigned int *classes) 4143{ 4144 struct ata_port *ap = link->ap; 4145 u32 serror; 4146 4147 DPRINTK("ENTER\n"); 4148 4149 /* print link status */ 4150 sata_print_link_status(link); 4151 4152 /* clear SError */ 4153 if (sata_scr_read(link, SCR_ERROR, &serror) == 0) 4154 sata_scr_write(link, SCR_ERROR, serror); 4155 link->eh_info.serror = 0; 4156 4157 /* is double-select really necessary? */ 4158 if (classes[0] != ATA_DEV_NONE) 4159 ap->ops->dev_select(ap, 1); 4160 if (classes[1] != ATA_DEV_NONE) 4161 ap->ops->dev_select(ap, 0); 4162 4163 /* bail out if no device is present */ 4164 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) { 4165 DPRINTK("EXIT, no device\n"); 4166 return; 4167 } 4168 4169 /* set up device control */ 4170 if (ap->ioaddr.ctl_addr) 4171 iowrite8(ap->ctl, ap->ioaddr.ctl_addr); 4172 4173 DPRINTK("EXIT\n"); 4174} 4175 4176/** 4177 * ata_dev_same_device - Determine whether new ID matches configured device 4178 * @dev: device to compare against 4179 * @new_class: class of the new device 4180 * @new_id: IDENTIFY page of the new device 4181 * 4182 * Compare @new_class and @new_id against @dev and determine 4183 * whether @dev is the device indicated by @new_class and 4184 * @new_id. 4185 * 4186 * LOCKING: 4187 * None. 4188 * 4189 * RETURNS: 4190 * 1 if @dev matches @new_class and @new_id, 0 otherwise. 4191 */ 4192static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class, 4193 const u16 *new_id) 4194{ 4195 const u16 *old_id = dev->id; 4196 unsigned char model[2][ATA_ID_PROD_LEN + 1]; 4197 unsigned char serial[2][ATA_ID_SERNO_LEN + 1]; 4198 4199 if (dev->class != new_class) { 4200 ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n", 4201 dev->class, new_class); 4202 return 0; 4203 } 4204 4205 ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0])); 4206 ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1])); 4207 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0])); 4208 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1])); 4209 4210 if (strcmp(model[0], model[1])) { 4211 ata_dev_printk(dev, KERN_INFO, "model number mismatch " 4212 "'%s' != '%s'\n", model[0], model[1]); 4213 return 0; 4214 } 4215 4216 if (strcmp(serial[0], serial[1])) { 4217 ata_dev_printk(dev, KERN_INFO, "serial number mismatch " 4218 "'%s' != '%s'\n", serial[0], serial[1]); 4219 return 0; 4220 } 4221 4222 return 1; 4223} 4224 4225/** 4226 * ata_dev_reread_id - Re-read IDENTIFY data 4227 * @dev: target ATA device 4228 * @readid_flags: read ID flags 4229 * 4230 * Re-read IDENTIFY page and make sure @dev is still attached to 4231 * the port. 4232 * 4233 * LOCKING: 4234 * Kernel thread context (may sleep) 4235 * 4236 * RETURNS: 4237 * 0 on success, negative errno otherwise 4238 */ 4239int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags) 4240{ 4241 unsigned int class = dev->class; 4242 u16 *id = (void *)dev->link->ap->sector_buf; 4243 int rc; 4244 4245 /* read ID data */ 4246 rc = ata_dev_read_id(dev, &class, readid_flags, id); 4247 if (rc) 4248 return rc; 4249 4250 /* is the device still there? */ 4251 if (!ata_dev_same_device(dev, class, id)) 4252 return -ENODEV; 4253 4254 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS); 4255 return 0; 4256} 4257 4258/** 4259 * ata_dev_revalidate - Revalidate ATA device 4260 * @dev: device to revalidate 4261 * @new_class: new class code 4262 * @readid_flags: read ID flags 4263 * 4264 * Re-read IDENTIFY page, make sure @dev is still attached to the 4265 * port and reconfigure it according to the new IDENTIFY page. 4266 * 4267 * LOCKING: 4268 * Kernel thread context (may sleep) 4269 * 4270 * RETURNS: 4271 * 0 on success, negative errno otherwise 4272 */ 4273int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class, 4274 unsigned int readid_flags) 4275{ 4276 u64 n_sectors = dev->n_sectors; 4277 int rc; 4278 4279 if (!ata_dev_enabled(dev)) 4280 return -ENODEV; 4281 4282 /* fail early if !ATA && !ATAPI to avoid issuing [P]IDENTIFY to PMP */ 4283 if (ata_class_enabled(new_class) && 4284 new_class != ATA_DEV_ATA && new_class != ATA_DEV_ATAPI) { 4285 ata_dev_printk(dev, KERN_INFO, "class mismatch %u != %u\n", 4286 dev->class, new_class); 4287 rc = -ENODEV; 4288 goto fail; 4289 } 4290 4291 /* re-read ID */ 4292 rc = ata_dev_reread_id(dev, readid_flags); 4293 if (rc) 4294 goto fail; 4295 4296 /* configure device according to the new ID */ 4297 rc = ata_dev_configure(dev); 4298 if (rc) 4299 goto fail; 4300 4301 /* verify n_sectors hasn't changed */ 4302 if (dev->class == ATA_DEV_ATA && n_sectors && 4303 dev->n_sectors != n_sectors) { 4304 ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch " 4305 "%llu != %llu\n", 4306 (unsigned long long)n_sectors, 4307 (unsigned long long)dev->n_sectors); 4308 4309 /* restore original n_sectors */ 4310 dev->n_sectors = n_sectors; 4311 4312 rc = -ENODEV; 4313 goto fail; 4314 } 4315 4316 return 0; 4317 4318 fail: 4319 ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc); 4320 return rc; 4321} 4322 4323struct ata_blacklist_entry { 4324 const char *model_num; 4325 const char *model_rev; 4326 unsigned long horkage; 4327}; 4328 4329static const struct ata_blacklist_entry ata_device_blacklist [] = { 4330 /* Devices with DMA related problems under Linux */ 4331 { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA }, 4332 { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA }, 4333 { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA }, 4334 { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA }, 4335 { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA }, 4336 { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA }, 4337 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA }, 4338 { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA }, 4339 { "CRD-8400B", NULL, ATA_HORKAGE_NODMA }, 4340 { "CRD-8480B", NULL, ATA_HORKAGE_NODMA }, 4341 { "CRD-8482B", NULL, ATA_HORKAGE_NODMA }, 4342 { "CRD-84", NULL, ATA_HORKAGE_NODMA }, 4343 { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA }, 4344 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA }, 4345 { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA }, 4346 { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA }, 4347 { "HITACHI CDR-8335", NULL, ATA_HORKAGE_NODMA }, 4348 { "HITACHI CDR-8435", NULL, ATA_HORKAGE_NODMA }, 4349 { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA }, 4350 { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA }, 4351 { "CD-532E-A", NULL, ATA_HORKAGE_NODMA }, 4352 { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA }, 4353 { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA }, 4354 { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA }, 4355 { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA }, 4356 { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA }, 4357 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA }, 4358 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA }, 4359 { "SAMSUNG CD-ROM SN-124", "N001", ATA_HORKAGE_NODMA }, 4360 { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA }, 4361 /* Odd clown on sil3726/4726 PMPs */ 4362 { "Config Disk", NULL, ATA_HORKAGE_NODMA | 4363 ATA_HORKAGE_SKIP_PM }, 4364 4365 /* Weird ATAPI devices */ 4366 { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 }, 4367 4368 /* Devices we expect to fail diagnostics */ 4369 4370 /* Devices where NCQ should be avoided */ 4371 /* NCQ is slow */ 4372 { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ }, 4373 { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, }, 4374 /* http://thread.gmane.org/gmane.linux.ide/14907 */ 4375 { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ }, 4376 /* NCQ is broken */ 4377 { "Maxtor *", "BANC*", ATA_HORKAGE_NONCQ }, 4378 { "Maxtor 7V300F0", "VA111630", ATA_HORKAGE_NONCQ }, 4379 { "ST380817AS", "3.42", ATA_HORKAGE_NONCQ }, 4380 { "ST3160023AS", "3.42", ATA_HORKAGE_NONCQ }, 4381 4382 /* Blacklist entries taken from Silicon Image 3124/3132 4383 Windows driver .inf file - also several Linux problem reports */ 4384 { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, }, 4385 { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, }, 4386 { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, }, 4387 4388 /* devices which puke on READ_NATIVE_MAX */ 4389 { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, }, 4390 { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA }, 4391 { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA }, 4392 { "MAXTOR 6L080L4", "A93.0500", ATA_HORKAGE_BROKEN_HPA }, 4393 4394 /* Devices which report 1 sector over size HPA */ 4395 { "ST340823A", NULL, ATA_HORKAGE_HPA_SIZE, }, 4396 { "ST320413A", NULL, ATA_HORKAGE_HPA_SIZE, }, 4397 { "ST310211A", NULL, ATA_HORKAGE_HPA_SIZE, }, 4398 4399 /* Devices which get the IVB wrong */ 4400 { "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB, }, 4401 { "TSSTcorp CDDVDW SH-S202J", "SB00", ATA_HORKAGE_IVB, }, 4402 { "TSSTcorp CDDVDW SH-S202J", "SB01", ATA_HORKAGE_IVB, }, 4403 { "TSSTcorp CDDVDW SH-S202N", "SB00", ATA_HORKAGE_IVB, }, 4404 { "TSSTcorp CDDVDW SH-S202N", "SB01", ATA_HORKAGE_IVB, }, 4405 4406 /* End Marker */ 4407 { } 4408}; 4409 4410static int strn_pattern_cmp(const char *patt, const char *name, int wildchar) 4411{ 4412 const char *p; 4413 int len; 4414 4415 /* 4416 * check for trailing wildcard: *\0 4417 */ 4418 p = strchr(patt, wildchar); 4419 if (p && ((*(p + 1)) == 0)) 4420 len = p - patt; 4421 else { 4422 len = strlen(name); 4423 if (!len) { 4424 if (!*patt) 4425 return 0; 4426 return -1; 4427 } 4428 } 4429 4430 return strncmp(patt, name, len); 4431} 4432 4433static unsigned long ata_dev_blacklisted(const struct ata_device *dev) 4434{ 4435 unsigned char model_num[ATA_ID_PROD_LEN + 1]; 4436 unsigned char model_rev[ATA_ID_FW_REV_LEN + 1]; 4437 const struct ata_blacklist_entry *ad = ata_device_blacklist; 4438 4439 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num)); 4440 ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev)); 4441 4442 while (ad->model_num) { 4443 if (!strn_pattern_cmp(ad->model_num, model_num, '*')) { 4444 if (ad->model_rev == NULL) 4445 return ad->horkage; 4446 if (!strn_pattern_cmp(ad->model_rev, model_rev, '*')) 4447 return ad->horkage; 4448 } 4449 ad++; 4450 } 4451 return 0; 4452} 4453 4454static int ata_dma_blacklisted(const struct ata_device *dev) 4455{ 4456 /* We don't support polling DMA. 4457 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO) 4458 * if the LLDD handles only interrupts in the HSM_ST_LAST state. 4459 */ 4460 if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) && 4461 (dev->flags & ATA_DFLAG_CDB_INTR)) 4462 return 1; 4463 return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0; 4464} 4465 4466/** 4467 * ata_is_40wire - check drive side detection 4468 * @dev: device 4469 * 4470 * Perform drive side detection decoding, allowing for device vendors 4471 * who can't follow the documentation. 4472 */ 4473 4474static int ata_is_40wire(struct ata_device *dev) 4475{ 4476 if (dev->horkage & ATA_HORKAGE_IVB) 4477 return ata_drive_40wire_relaxed(dev->id); 4478 return ata_drive_40wire(dev->id); 4479} 4480 4481/** 4482 * ata_dev_xfermask - Compute supported xfermask of the given device 4483 * @dev: Device to compute xfermask for 4484 * 4485 * Compute supported xfermask of @dev and store it in 4486 * dev->*_mask. This function is responsible for applying all 4487 * known limits including host controller limits, device 4488 * blacklist, etc... 4489 * 4490 * LOCKING: 4491 * None. 4492 */ 4493static void ata_dev_xfermask(struct ata_device *dev) 4494{ 4495 struct ata_link *link = dev->link; 4496 struct ata_port *ap = link->ap; 4497 struct ata_host *host = ap->host; 4498 unsigned long xfer_mask; 4499 4500 /* controller modes available */ 4501 xfer_mask = ata_pack_xfermask(ap->pio_mask, 4502 ap->mwdma_mask, ap->udma_mask); 4503 4504 /* drive modes available */ 4505 xfer_mask &= ata_pack_xfermask(dev->pio_mask, 4506 dev->mwdma_mask, dev->udma_mask); 4507 xfer_mask &= ata_id_xfermask(dev->id); 4508 4509 /* 4510 * CFA Advanced TrueIDE timings are not allowed on a shared 4511 * cable 4512 */ 4513 if (ata_dev_pair(dev)) { 4514 /* No PIO5 or PIO6 */ 4515 xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5)); 4516 /* No MWDMA3 or MWDMA 4 */ 4517 xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3)); 4518 } 4519 4520 if (ata_dma_blacklisted(dev)) { 4521 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA); 4522 ata_dev_printk(dev, KERN_WARNING, 4523 "device is on DMA blacklist, disabling DMA\n"); 4524 } 4525 4526 if ((host->flags & ATA_HOST_SIMPLEX) && 4527 host->simplex_claimed && host->simplex_claimed != ap) { 4528 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA); 4529 ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by " 4530 "other device, disabling DMA\n"); 4531 } 4532 4533 if (ap->flags & ATA_FLAG_NO_IORDY) 4534 xfer_mask &= ata_pio_mask_no_iordy(dev); 4535 4536 if (ap->ops->mode_filter) 4537 xfer_mask = ap->ops->mode_filter(dev, xfer_mask); 4538 4539 /* Apply cable rule here. Don't apply it early because when 4540 * we handle hot plug the cable type can itself change. 4541 * Check this last so that we know if the transfer rate was 4542 * solely limited by the cable. 4543 * Unknown or 80 wire cables reported host side are checked 4544 * drive side as well. Cases where we know a 40wire cable 4545 * is used safely for 80 are not checked here. 4546 */ 4547 if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA)) 4548 /* UDMA/44 or higher would be available */ 4549 if ((ap->cbl == ATA_CBL_PATA40) || 4550 (ata_is_40wire(dev) && 4551 (ap->cbl == ATA_CBL_PATA_UNK || 4552 ap->cbl == ATA_CBL_PATA80))) { 4553 ata_dev_printk(dev, KERN_WARNING, 4554 "limited to UDMA/33 due to 40-wire cable\n"); 4555 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA); 4556 } 4557 4558 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, 4559 &dev->mwdma_mask, &dev->udma_mask); 4560} 4561 4562/** 4563 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command 4564 * @dev: Device to which command will be sent 4565 * 4566 * Issue SET FEATURES - XFER MODE command to device @dev 4567 * on port @ap. 4568 * 4569 * LOCKING: 4570 * PCI/etc. bus probe sem. 4571 * 4572 * RETURNS: 4573 * 0 on success, AC_ERR_* mask otherwise. 4574 */ 4575 4576static unsigned int ata_dev_set_xfermode(struct ata_device *dev) 4577{ 4578 struct ata_taskfile tf; 4579 unsigned int err_mask; 4580 4581 /* set up set-features taskfile */ 4582 DPRINTK("set features - xfer mode\n"); 4583 4584 /* Some controllers and ATAPI devices show flaky interrupt 4585 * behavior after setting xfer mode. Use polling instead. 4586 */ 4587 ata_tf_init(dev, &tf); 4588 tf.command = ATA_CMD_SET_FEATURES; 4589 tf.feature = SETFEATURES_XFER; 4590 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING; 4591 tf.protocol = ATA_PROT_NODATA; 4592 /* If we are using IORDY we must send the mode setting command */ 4593 if (ata_pio_need_iordy(dev)) 4594 tf.nsect = dev->xfer_mode; 4595 /* If the device has IORDY and the controller does not - turn it off */ 4596 else if (ata_id_has_iordy(dev->id)) 4597 tf.nsect = 0x01; 4598 else /* In the ancient relic department - skip all of this */ 4599 return 0; 4600 4601 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); 4602 4603 DPRINTK("EXIT, err_mask=%x\n", err_mask); 4604 return err_mask; 4605} 4606/** 4607 * ata_dev_set_feature - Issue SET FEATURES - SATA FEATURES 4608 * @dev: Device to which command will be sent 4609 * @enable: Whether to enable or disable the feature 4610 * @feature: The sector count represents the feature to set 4611 * 4612 * Issue SET FEATURES - SATA FEATURES command to device @dev 4613 * on port @ap with sector count 4614 * 4615 * LOCKING: 4616 * PCI/etc. bus probe sem. 4617 * 4618 * RETURNS: 4619 * 0 on success, AC_ERR_* mask otherwise. 4620 */ 4621static unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable, 4622 u8 feature) 4623{ 4624 struct ata_taskfile tf; 4625 unsigned int err_mask; 4626 4627 /* set up set-features taskfile */ 4628 DPRINTK("set features - SATA features\n"); 4629 4630 ata_tf_init(dev, &tf); 4631 tf.command = ATA_CMD_SET_FEATURES; 4632 tf.feature = enable; 4633 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 4634 tf.protocol = ATA_PROT_NODATA; 4635 tf.nsect = feature; 4636 4637 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); 4638 4639 DPRINTK("EXIT, err_mask=%x\n", err_mask); 4640 return err_mask; 4641} 4642 4643/** 4644 * ata_dev_init_params - Issue INIT DEV PARAMS command 4645 * @dev: Device to which command will be sent 4646 * @heads: Number of heads (taskfile parameter) 4647 * @sectors: Number of sectors (taskfile parameter) 4648 * 4649 * LOCKING: 4650 * Kernel thread context (may sleep) 4651 * 4652 * RETURNS: 4653 * 0 on success, AC_ERR_* mask otherwise. 4654 */ 4655static unsigned int ata_dev_init_params(struct ata_device *dev, 4656 u16 heads, u16 sectors) 4657{ 4658 struct ata_taskfile tf; 4659 unsigned int err_mask; 4660 4661 /* Number of sectors per track 1-255. Number of heads 1-16 */ 4662 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16) 4663 return AC_ERR_INVALID; 4664 4665 /* set up init dev params taskfile */ 4666 DPRINTK("init dev params \n"); 4667 4668 ata_tf_init(dev, &tf); 4669 tf.command = ATA_CMD_INIT_DEV_PARAMS; 4670 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 4671 tf.protocol = ATA_PROT_NODATA; 4672 tf.nsect = sectors; 4673 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */ 4674 4675 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); 4676 /* A clean abort indicates an original or just out of spec drive 4677 and we should continue as we issue the setup based on the 4678 drive reported working geometry */ 4679 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED)) 4680 err_mask = 0; 4681 4682 DPRINTK("EXIT, err_mask=%x\n", err_mask); 4683 return err_mask; 4684} 4685 4686/** 4687 * ata_sg_clean - Unmap DMA memory associated with command 4688 * @qc: Command containing DMA memory to be released 4689 * 4690 * Unmap all mapped DMA memory associated with this command. 4691 * 4692 * LOCKING: 4693 * spin_lock_irqsave(host lock) 4694 */ 4695void ata_sg_clean(struct ata_queued_cmd *qc) 4696{ 4697 struct ata_port *ap = qc->ap; 4698 struct scatterlist *sg = qc->sg; 4699 int dir = qc->dma_dir; 4700 4701 WARN_ON(sg == NULL); 4702 4703 VPRINTK("unmapping %u sg elements\n", qc->n_elem); 4704 4705 if (qc->n_elem) 4706 dma_unmap_sg(ap->dev, sg, qc->n_elem, dir); 4707 4708 qc->flags &= ~ATA_QCFLAG_DMAMAP; 4709 qc->sg = NULL; 4710} 4711 4712/** 4713 * ata_fill_sg - Fill PCI IDE PRD table 4714 * @qc: Metadata associated with taskfile to be transferred 4715 * 4716 * Fill PCI IDE PRD (scatter-gather) table with segments 4717 * associated with the current disk command. 4718 * 4719 * LOCKING: 4720 * spin_lock_irqsave(host lock) 4721 * 4722 */ 4723static void ata_fill_sg(struct ata_queued_cmd *qc) 4724{ 4725 struct ata_port *ap = qc->ap; 4726 struct scatterlist *sg; 4727 unsigned int si, pi; 4728 4729 pi = 0; 4730 for_each_sg(qc->sg, sg, qc->n_elem, si) { 4731 u32 addr, offset; 4732 u32 sg_len, len; 4733 4734 /* determine if physical DMA addr spans 64K boundary. 4735 * Note h/w doesn't support 64-bit, so we unconditionally 4736 * truncate dma_addr_t to u32. 4737 */ 4738 addr = (u32) sg_dma_address(sg); 4739 sg_len = sg_dma_len(sg); 4740 4741 while (sg_len) { 4742 offset = addr & 0xffff; 4743 len = sg_len; 4744 if ((offset + sg_len) > 0x10000) 4745 len = 0x10000 - offset; 4746 4747 ap->prd[pi].addr = cpu_to_le32(addr); 4748 ap->prd[pi].flags_len = cpu_to_le32(len & 0xffff); 4749 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len); 4750 4751 pi++; 4752 sg_len -= len; 4753 addr += len; 4754 } 4755 } 4756 4757 ap->prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT); 4758} 4759 4760/** 4761 * ata_fill_sg_dumb - Fill PCI IDE PRD table 4762 * @qc: Metadata associated with taskfile to be transferred 4763 * 4764 * Fill PCI IDE PRD (scatter-gather) table with segments 4765 * associated with the current disk command. Perform the fill 4766 * so that we avoid writing any length 64K records for 4767 * controllers that don't follow the spec. 4768 * 4769 * LOCKING: 4770 * spin_lock_irqsave(host lock) 4771 * 4772 */ 4773static void ata_fill_sg_dumb(struct ata_queued_cmd *qc) 4774{ 4775 struct ata_port *ap = qc->ap; 4776 struct scatterlist *sg; 4777 unsigned int si, pi; 4778 4779 pi = 0; 4780 for_each_sg(qc->sg, sg, qc->n_elem, si) { 4781 u32 addr, offset; 4782 u32 sg_len, len, blen; 4783 4784 /* determine if physical DMA addr spans 64K boundary. 4785 * Note h/w doesn't support 64-bit, so we unconditionally 4786 * truncate dma_addr_t to u32. 4787 */ 4788 addr = (u32) sg_dma_address(sg); 4789 sg_len = sg_dma_len(sg); 4790 4791 while (sg_len) { 4792 offset = addr & 0xffff; 4793 len = sg_len; 4794 if ((offset + sg_len) > 0x10000) 4795 len = 0x10000 - offset; 4796 4797 blen = len & 0xffff; 4798 ap->prd[pi].addr = cpu_to_le32(addr); 4799 if (blen == 0) { 4800 /* Some PATA chipsets like the CS5530 can't 4801 cope with 0x0000 meaning 64K as the spec says */ 4802 ap->prd[pi].flags_len = cpu_to_le32(0x8000); 4803 blen = 0x8000; 4804 ap->prd[++pi].addr = cpu_to_le32(addr + 0x8000); 4805 } 4806 ap->prd[pi].flags_len = cpu_to_le32(blen); 4807 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len); 4808 4809 pi++; 4810 sg_len -= len; 4811 addr += len; 4812 } 4813 } 4814 4815 ap->prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT); 4816} 4817 4818/** 4819 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported 4820 * @qc: Metadata associated with taskfile to check 4821 * 4822 * Allow low-level driver to filter ATA PACKET commands, returning 4823 * a status indicating whether or not it is OK to use DMA for the 4824 * supplied PACKET command. 4825 * 4826 * LOCKING: 4827 * spin_lock_irqsave(host lock) 4828 * 4829 * RETURNS: 0 when ATAPI DMA can be used 4830 * nonzero otherwise 4831 */ 4832int ata_check_atapi_dma(struct ata_queued_cmd *qc) 4833{ 4834 struct ata_port *ap = qc->ap; 4835 4836 /* Don't allow DMA if it isn't multiple of 16 bytes. Quite a 4837 * few ATAPI devices choke on such DMA requests. 4838 */ 4839 if (unlikely(qc->nbytes & 15)) 4840 return 1; 4841 4842 if (ap->ops->check_atapi_dma) 4843 return ap->ops->check_atapi_dma(qc); 4844 4845 return 0; 4846} 4847 4848/** 4849 * ata_std_qc_defer - Check whether a qc needs to be deferred 4850 * @qc: ATA command in question 4851 * 4852 * Non-NCQ commands cannot run with any other command, NCQ or 4853 * not. As upper layer only knows the queue depth, we are 4854 * responsible for maintaining exclusion. This function checks 4855 * whether a new command @qc can be issued. 4856 * 4857 * LOCKING: 4858 * spin_lock_irqsave(host lock) 4859 * 4860 * RETURNS: 4861 * ATA_DEFER_* if deferring is needed, 0 otherwise. 4862 */ 4863int ata_std_qc_defer(struct ata_queued_cmd *qc) 4864{ 4865 struct ata_link *link = qc->dev->link; 4866 4867 if (qc->tf.protocol == ATA_PROT_NCQ) { 4868 if (!ata_tag_valid(link->active_tag)) 4869 return 0; 4870 } else { 4871 if (!ata_tag_valid(link->active_tag) && !link->sactive) 4872 return 0; 4873 } 4874 4875 return ATA_DEFER_LINK; 4876} 4877 4878/** 4879 * ata_qc_prep - Prepare taskfile for submission 4880 * @qc: Metadata associated with taskfile to be prepared 4881 * 4882 * Prepare ATA taskfile for submission. 4883 * 4884 * LOCKING: 4885 * spin_lock_irqsave(host lock) 4886 */ 4887void ata_qc_prep(struct ata_queued_cmd *qc) 4888{ 4889 if (!(qc->flags & ATA_QCFLAG_DMAMAP)) 4890 return; 4891 4892 ata_fill_sg(qc); 4893} 4894 4895/** 4896 * ata_dumb_qc_prep - Prepare taskfile for submission 4897 * @qc: Metadata associated with taskfile to be prepared 4898 * 4899 * Prepare ATA taskfile for submission. 4900 * 4901 * LOCKING: 4902 * spin_lock_irqsave(host lock) 4903 */ 4904void ata_dumb_qc_prep(struct ata_queued_cmd *qc) 4905{ 4906 if (!(qc->flags & ATA_QCFLAG_DMAMAP)) 4907 return; 4908 4909 ata_fill_sg_dumb(qc); 4910} 4911 4912void ata_noop_qc_prep(struct ata_queued_cmd *qc) { } 4913 4914/** 4915 * ata_sg_init - Associate command with scatter-gather table. 4916 * @qc: Command to be associated 4917 * @sg: Scatter-gather table. 4918 * @n_elem: Number of elements in s/g table. 4919 * 4920 * Initialize the data-related elements of queued_cmd @qc 4921 * to point to a scatter-gather table @sg, containing @n_elem 4922 * elements. 4923 * 4924 * LOCKING: 4925 * spin_lock_irqsave(host lock) 4926 */ 4927void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg, 4928 unsigned int n_elem) 4929{ 4930 qc->sg = sg; 4931 qc->n_elem = n_elem; 4932 qc->cursg = qc->sg; 4933} 4934 4935/** 4936 * ata_sg_setup - DMA-map the scatter-gather table associated with a command. 4937 * @qc: Command with scatter-gather table to be mapped. 4938 * 4939 * DMA-map the scatter-gather table associated with queued_cmd @qc. 4940 * 4941 * LOCKING: 4942 * spin_lock_irqsave(host lock) 4943 * 4944 * RETURNS: 4945 * Zero on success, negative on error. 4946 * 4947 */ 4948static int ata_sg_setup(struct ata_queued_cmd *qc) 4949{ 4950 struct ata_port *ap = qc->ap; 4951 unsigned int n_elem; 4952 4953 VPRINTK("ENTER, ata%u\n", ap->print_id); 4954 4955 n_elem = dma_map_sg(ap->dev, qc->sg, qc->n_elem, qc->dma_dir); 4956 if (n_elem < 1) 4957 return -1; 4958 4959 DPRINTK("%d sg elements mapped\n", n_elem); 4960 4961 qc->n_elem = n_elem; 4962 qc->flags |= ATA_QCFLAG_DMAMAP; 4963 4964 return 0; 4965} 4966 4967/** 4968 * swap_buf_le16 - swap halves of 16-bit words in place 4969 * @buf: Buffer to swap 4970 * @buf_words: Number of 16-bit words in buffer. 4971 * 4972 * Swap halves of 16-bit words if needed to convert from 4973 * little-endian byte order to native cpu byte order, or 4974 * vice-versa. 4975 * 4976 * LOCKING: 4977 * Inherited from caller. 4978 */ 4979void swap_buf_le16(u16 *buf, unsigned int buf_words) 4980{ 4981#ifdef __BIG_ENDIAN 4982 unsigned int i; 4983 4984 for (i = 0; i < buf_words; i++) 4985 buf[i] = le16_to_cpu(buf[i]); 4986#endif /* __BIG_ENDIAN */ 4987} 4988 4989/** 4990 * ata_data_xfer - Transfer data by PIO 4991 * @dev: device to target 4992 * @buf: data buffer 4993 * @buflen: buffer length 4994 * @rw: read/write 4995 * 4996 * Transfer data from/to the device data register by PIO. 4997 * 4998 * LOCKING: 4999 * Inherited from caller. 5000 * 5001 * RETURNS: 5002 * Bytes consumed. 5003 */ 5004unsigned int ata_data_xfer(struct ata_device *dev, unsigned char *buf, 5005 unsigned int buflen, int rw) 5006{ 5007 struct ata_port *ap = dev->link->ap; 5008 void __iomem *data_addr = ap->ioaddr.data_addr; 5009 unsigned int words = buflen >> 1; 5010 5011 /* Transfer multiple of 2 bytes */ 5012 if (rw == READ) 5013 ioread16_rep(data_addr, buf, words); 5014 else 5015 iowrite16_rep(data_addr, buf, words); 5016 5017 /* Transfer trailing 1 byte, if any. */ 5018 if (unlikely(buflen & 0x01)) { 5019 __le16 align_buf[1] = { 0 }; 5020 unsigned char *trailing_buf = buf + buflen - 1; 5021 5022 if (rw == READ) { 5023 align_buf[0] = cpu_to_le16(ioread16(data_addr)); 5024 memcpy(trailing_buf, align_buf, 1); 5025 } else { 5026 memcpy(align_buf, trailing_buf, 1); 5027 iowrite16(le16_to_cpu(align_buf[0]), data_addr); 5028 } 5029 words++; 5030 } 5031 5032 return words << 1; 5033} 5034 5035/** 5036 * ata_data_xfer_noirq - Transfer data by PIO 5037 * @dev: device to target 5038 * @buf: data buffer 5039 * @buflen: buffer length 5040 * @rw: read/write 5041 * 5042 * Transfer data from/to the device data register by PIO. Do the 5043 * transfer with interrupts disabled. 5044 * 5045 * LOCKING: 5046 * Inherited from caller. 5047 * 5048 * RETURNS: 5049 * Bytes consumed. 5050 */ 5051unsigned int ata_data_xfer_noirq(struct ata_device *dev, unsigned char *buf, 5052 unsigned int buflen, int rw) 5053{ 5054 unsigned long flags; 5055 unsigned int consumed; 5056 5057 local_irq_save(flags); 5058 consumed = ata_data_xfer(dev, buf, buflen, rw); 5059 local_irq_restore(flags); 5060 5061 return consumed; 5062} 5063 5064 5065/** 5066 * ata_pio_sector - Transfer a sector of data. 5067 * @qc: Command on going 5068 * 5069 * Transfer qc->sect_size bytes of data from/to the ATA device. 5070 * 5071 * LOCKING: 5072 * Inherited from caller. 5073 */ 5074 5075static void ata_pio_sector(struct ata_queued_cmd *qc) 5076{ 5077 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE); 5078 struct ata_port *ap = qc->ap; 5079 struct page *page; 5080 unsigned int offset; 5081 unsigned char *buf; 5082 5083 if (qc->curbytes == qc->nbytes - qc->sect_size) 5084 ap->hsm_task_state = HSM_ST_LAST; 5085 5086 page = sg_page(qc->cursg); 5087 offset = qc->cursg->offset + qc->cursg_ofs; 5088 5089 /* get the current page and offset */ 5090 page = nth_page(page, (offset >> PAGE_SHIFT)); 5091 offset %= PAGE_SIZE; 5092 5093 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read"); 5094 5095 if (PageHighMem(page)) { 5096 unsigned long flags; 5097 5098 /* FIXME: use a bounce buffer */ 5099 local_irq_save(flags); 5100 buf = kmap_atomic(page, KM_IRQ0); 5101 5102 /* do the actual data transfer */ 5103 ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write); 5104 5105 kunmap_atomic(buf, KM_IRQ0); 5106 local_irq_restore(flags); 5107 } else { 5108 buf = page_address(page); 5109 ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write); 5110 } 5111 5112 qc->curbytes += qc->sect_size; 5113 qc->cursg_ofs += qc->sect_size; 5114 5115 if (qc->cursg_ofs == qc->cursg->length) { 5116 qc->cursg = sg_next(qc->cursg); 5117 qc->cursg_ofs = 0; 5118 } 5119} 5120 5121/** 5122 * ata_pio_sectors - Transfer one or many sectors. 5123 * @qc: Command on going 5124 * 5125 * Transfer one or many sectors of data from/to the 5126 * ATA device for the DRQ request. 5127 * 5128 * LOCKING: 5129 * Inherited from caller. 5130 */ 5131 5132static void ata_pio_sectors(struct ata_queued_cmd *qc) 5133{ 5134 if (is_multi_taskfile(&qc->tf)) { 5135 /* READ/WRITE MULTIPLE */ 5136 unsigned int nsect; 5137 5138 WARN_ON(qc->dev->multi_count == 0); 5139 5140 nsect = min((qc->nbytes - qc->curbytes) / qc->sect_size, 5141 qc->dev->multi_count); 5142 while (nsect--) 5143 ata_pio_sector(qc); 5144 } else 5145 ata_pio_sector(qc); 5146 5147 ata_altstatus(qc->ap); /* flush */ 5148} 5149 5150/** 5151 * atapi_send_cdb - Write CDB bytes to hardware 5152 * @ap: Port to which ATAPI device is attached. 5153 * @qc: Taskfile currently active 5154 * 5155 * When device has indicated its readiness to accept 5156 * a CDB, this function is called. Send the CDB. 5157 * 5158 * LOCKING: 5159 * caller. 5160 */ 5161 5162static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc) 5163{ 5164 /* send SCSI cdb */ 5165 DPRINTK("send cdb\n"); 5166 WARN_ON(qc->dev->cdb_len < 12); 5167 5168 ap->ops->data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1); 5169 ata_altstatus(ap); /* flush */ 5170 5171 switch (qc->tf.protocol) { 5172 case ATAPI_PROT_PIO: 5173 ap->hsm_task_state = HSM_ST; 5174 break; 5175 case ATAPI_PROT_NODATA: 5176 ap->hsm_task_state = HSM_ST_LAST; 5177 break; 5178 case ATAPI_PROT_DMA: 5179 ap->hsm_task_state = HSM_ST_LAST; 5180 /* initiate bmdma */ 5181 ap->ops->bmdma_start(qc); 5182 break; 5183 } 5184} 5185 5186/** 5187 * __atapi_pio_bytes - Transfer data from/to the ATAPI device. 5188 * @qc: Command on going 5189 * @bytes: number of bytes 5190 * 5191 * Transfer Transfer data from/to the ATAPI device. 5192 * 5193 * LOCKING: 5194 * Inherited from caller. 5195 * 5196 */ 5197static int __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes) 5198{ 5199 int rw = (qc->tf.flags & ATA_TFLAG_WRITE) ? WRITE : READ; 5200 struct ata_port *ap = qc->ap; 5201 struct ata_device *dev = qc->dev; 5202 struct ata_eh_info *ehi = &dev->link->eh_info; 5203 struct scatterlist *sg; 5204 struct page *page; 5205 unsigned char *buf; 5206 unsigned int offset, count, consumed; 5207 5208next_sg: 5209 sg = qc->cursg; 5210 if (unlikely(!sg)) { 5211 ata_ehi_push_desc(ehi, "unexpected or too much trailing data " 5212 "buf=%u cur=%u bytes=%u", 5213 qc->nbytes, qc->curbytes, bytes); 5214 return -1; 5215 } 5216 5217 page = sg_page(sg); 5218 offset = sg->offset + qc->cursg_ofs; 5219 5220 /* get the current page and offset */ 5221 page = nth_page(page, (offset >> PAGE_SHIFT)); 5222 offset %= PAGE_SIZE; 5223 5224 /* don't overrun current sg */ 5225 count = min(sg->length - qc->cursg_ofs, bytes); 5226 5227 /* don't cross page boundaries */ 5228 count = min(count, (unsigned int)PAGE_SIZE - offset); 5229 5230 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read"); 5231 5232 if (PageHighMem(page)) { 5233 unsigned long flags; 5234 5235 /* FIXME: use bounce buffer */ 5236 local_irq_save(flags); 5237 buf = kmap_atomic(page, KM_IRQ0); 5238 5239 /* do the actual data transfer */ 5240 consumed = ap->ops->data_xfer(dev, buf + offset, count, rw); 5241 5242 kunmap_atomic(buf, KM_IRQ0); 5243 local_irq_restore(flags); 5244 } else { 5245 buf = page_address(page); 5246 consumed = ap->ops->data_xfer(dev, buf + offset, count, rw); 5247 } 5248 5249 bytes -= min(bytes, consumed); 5250 qc->curbytes += count; 5251 qc->cursg_ofs += count; 5252 5253 if (qc->cursg_ofs == sg->length) { 5254 qc->cursg = sg_next(qc->cursg); 5255 qc->cursg_ofs = 0; 5256 } 5257 5258 /* consumed can be larger than count only for the last transfer */ 5259 WARN_ON(qc->cursg && count != consumed); 5260 5261 if (bytes) 5262 goto next_sg; 5263 return 0; 5264} 5265 5266/** 5267 * atapi_pio_bytes - Transfer data from/to the ATAPI device. 5268 * @qc: Command on going 5269 * 5270 * Transfer Transfer data from/to the ATAPI device. 5271 * 5272 * LOCKING: 5273 * Inherited from caller. 5274 */ 5275 5276static void atapi_pio_bytes(struct ata_queued_cmd *qc) 5277{ 5278 struct ata_port *ap = qc->ap; 5279 struct ata_device *dev = qc->dev; 5280 struct ata_eh_info *ehi = &dev->link->eh_info; 5281 unsigned int ireason, bc_lo, bc_hi, bytes; 5282 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0; 5283 5284 /* Abuse qc->result_tf for temp storage of intermediate TF 5285 * here to save some kernel stack usage. 5286 * For normal completion, qc->result_tf is not relevant. For 5287 * error, qc->result_tf is later overwritten by ata_qc_complete(). 5288 * So, the correctness of qc->result_tf is not affected. 5289 */ 5290 ap->ops->tf_read(ap, &qc->result_tf); 5291 ireason = qc->result_tf.nsect; 5292 bc_lo = qc->result_tf.lbam; 5293 bc_hi = qc->result_tf.lbah; 5294 bytes = (bc_hi << 8) | bc_lo; 5295 5296 /* shall be cleared to zero, indicating xfer of data */ 5297 if (unlikely(ireason & (1 << 0))) 5298 goto atapi_check; 5299 5300 /* make sure transfer direction matches expected */ 5301 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0; 5302 if (unlikely(do_write != i_write)) 5303 goto atapi_check; 5304 5305 if (unlikely(!bytes)) 5306 goto atapi_check; 5307 5308 VPRINTK("ata%u: xfering %d bytes\n", ap->print_id, bytes); 5309 5310 if (unlikely(__atapi_pio_bytes(qc, bytes))) 5311 goto err_out; 5312 ata_altstatus(ap); /* flush */ 5313 5314 return; 5315 5316 atapi_check: 5317 ata_ehi_push_desc(ehi, "ATAPI check failed (ireason=0x%x bytes=%u)", 5318 ireason, bytes); 5319 err_out: 5320 qc->err_mask |= AC_ERR_HSM; 5321 ap->hsm_task_state = HSM_ST_ERR; 5322} 5323 5324/** 5325 * ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue. 5326 * @ap: the target ata_port 5327 * @qc: qc on going 5328 * 5329 * RETURNS: 5330 * 1 if ok in workqueue, 0 otherwise. 5331 */ 5332 5333static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *qc) 5334{ 5335 if (qc->tf.flags & ATA_TFLAG_POLLING) 5336 return 1; 5337 5338 if (ap->hsm_task_state == HSM_ST_FIRST) { 5339 if (qc->tf.protocol == ATA_PROT_PIO && 5340 (qc->tf.flags & ATA_TFLAG_WRITE)) 5341 return 1; 5342 5343 if (ata_is_atapi(qc->tf.protocol) && 5344 !(qc->dev->flags & ATA_DFLAG_CDB_INTR)) 5345 return 1; 5346 } 5347 5348 return 0; 5349} 5350 5351/** 5352 * ata_hsm_qc_complete - finish a qc running on standard HSM 5353 * @qc: Command to complete 5354 * @in_wq: 1 if called from workqueue, 0 otherwise 5355 * 5356 * Finish @qc which is running on standard HSM. 5357 * 5358 * LOCKING: 5359 * If @in_wq is zero, spin_lock_irqsave(host lock). 5360 * Otherwise, none on entry and grabs host lock. 5361 */ 5362static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq) 5363{ 5364 struct ata_port *ap = qc->ap; 5365 unsigned long flags; 5366 5367 if (ap->ops->error_handler) { 5368 if (in_wq) { 5369 spin_lock_irqsave(ap->lock, flags); 5370 5371 /* EH might have kicked in while host lock is 5372 * released. 5373 */ 5374 qc = ata_qc_from_tag(ap, qc->tag); 5375 if (qc) { 5376 if (likely(!(qc->err_mask & AC_ERR_HSM))) { 5377 ap->ops->irq_on(ap); 5378 ata_qc_complete(qc); 5379 } else 5380 ata_port_freeze(ap); 5381 } 5382 5383 spin_unlock_irqrestore(ap->lock, flags); 5384 } else { 5385 if (likely(!(qc->err_mask & AC_ERR_HSM))) 5386 ata_qc_complete(qc); 5387 else 5388 ata_port_freeze(ap); 5389 } 5390 } else { 5391 if (in_wq) { 5392 spin_lock_irqsave(ap->lock, flags); 5393 ap->ops->irq_on(ap); 5394 ata_qc_complete(qc); 5395 spin_unlock_irqrestore(ap->lock, flags); 5396 } else 5397 ata_qc_complete(qc); 5398 } 5399} 5400 5401/** 5402 * ata_hsm_move - move the HSM to the next state. 5403 * @ap: the target ata_port 5404 * @qc: qc on going 5405 * @status: current device status 5406 * @in_wq: 1 if called from workqueue, 0 otherwise 5407 * 5408 * RETURNS: 5409 * 1 when poll next status needed, 0 otherwise. 5410 */ 5411int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc, 5412 u8 status, int in_wq) 5413{ 5414 unsigned long flags = 0; 5415 int poll_next; 5416 5417 WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0); 5418 5419 /* Make sure ata_qc_issue_prot() does not throw things 5420 * like DMA polling into the workqueue. Notice that 5421 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING). 5422 */ 5423 WARN_ON(in_wq != ata_hsm_ok_in_wq(ap, qc)); 5424 5425fsm_start: 5426 DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n", 5427 ap->print_id, qc->tf.protocol, ap->hsm_task_state, status); 5428 5429 switch (ap->hsm_task_state) { 5430 case HSM_ST_FIRST: 5431 /* Send first data block or PACKET CDB */ 5432 5433 /* If polling, we will stay in the work queue after 5434 * sending the data. Otherwise, interrupt handler 5435 * takes over after sending the data. 5436 */ 5437 poll_next = (qc->tf.flags & ATA_TFLAG_POLLING); 5438 5439 /* check device status */ 5440 if (unlikely((status & ATA_DRQ) == 0)) { 5441 /* handle BSY=0, DRQ=0 as error */ 5442 if (likely(status & (ATA_ERR | ATA_DF))) 5443 /* device stops HSM for abort/error */ 5444 qc->err_mask |= AC_ERR_DEV; 5445 else 5446 /* HSM violation. Let EH handle this */ 5447 qc->err_mask |= AC_ERR_HSM; 5448 5449 ap->hsm_task_state = HSM_ST_ERR; 5450 goto fsm_start; 5451 } 5452 5453 /* Device should not ask for data transfer (DRQ=1) 5454 * when it finds something wrong. 5455 * We ignore DRQ here and stop the HSM by 5456 * changing hsm_task_state to HSM_ST_ERR and 5457 * let the EH abort the command or reset the device. 5458 */ 5459 if (unlikely(status & (ATA_ERR | ATA_DF))) { 5460 /* Some ATAPI tape drives forget to clear the ERR bit 5461 * when doing the next command (mostly request sense). 5462 * We ignore ERR here to workaround and proceed sending 5463 * the CDB. 5464 */ 5465 if (!(qc->dev->horkage & ATA_HORKAGE_STUCK_ERR)) { 5466 ata_port_printk(ap, KERN_WARNING, 5467 "DRQ=1 with device error, " 5468 "dev_stat 0x%X\n", status); 5469 qc->err_mask |= AC_ERR_HSM; 5470 ap->hsm_task_state = HSM_ST_ERR; 5471 goto fsm_start; 5472 } 5473 } 5474 5475 /* Send the CDB (atapi) or the first data block (ata pio out). 5476 * During the state transition, interrupt handler shouldn't 5477 * be invoked before the data transfer is complete and 5478 * hsm_task_state is changed. Hence, the following locking. 5479 */ 5480 if (in_wq) 5481 spin_lock_irqsave(ap->lock, flags); 5482 5483 if (qc->tf.protocol == ATA_PROT_PIO) { 5484 /* PIO data out protocol. 5485 * send first data block. 5486 */ 5487 5488 /* ata_pio_sectors() might change the state 5489 * to HSM_ST_LAST. so, the state is changed here 5490 * before ata_pio_sectors(). 5491 */ 5492 ap->hsm_task_state = HSM_ST; 5493 ata_pio_sectors(qc); 5494 } else 5495 /* send CDB */ 5496 atapi_send_cdb(ap, qc); 5497 5498 if (in_wq) 5499 spin_unlock_irqrestore(ap->lock, flags); 5500 5501 /* if polling, ata_pio_task() handles the rest. 5502 * otherwise, interrupt handler takes over from here. 5503 */ 5504 break; 5505 5506 case HSM_ST: 5507 /* complete command or read/write the data register */ 5508 if (qc->tf.protocol == ATAPI_PROT_PIO) { 5509 /* ATAPI PIO protocol */ 5510 if ((status & ATA_DRQ) == 0) { 5511 /* No more data to transfer or device error. 5512 * Device error will be tagged in HSM_ST_LAST. 5513 */ 5514 ap->hsm_task_state = HSM_ST_LAST; 5515 goto fsm_start; 5516 } 5517 5518 /* Device should not ask for data transfer (DRQ=1) 5519 * when it finds something wrong. 5520 * We ignore DRQ here and stop the HSM by 5521 * changing hsm_task_state to HSM_ST_ERR and 5522 * let the EH abort the command or reset the device. 5523 */ 5524 if (unlikely(status & (ATA_ERR | ATA_DF))) { 5525 ata_port_printk(ap, KERN_WARNING, "DRQ=1 with " 5526 "device error, dev_stat 0x%X\n", 5527 status); 5528 qc->err_mask |= AC_ERR_HSM; 5529 ap->hsm_task_state = HSM_ST_ERR; 5530 goto fsm_start; 5531 } 5532 5533 atapi_pio_bytes(qc); 5534 5535 if (unlikely(ap->hsm_task_state == HSM_ST_ERR)) 5536 /* bad ireason reported by device */ 5537 goto fsm_start; 5538 5539 } else { 5540 /* ATA PIO protocol */ 5541 if (unlikely((status & ATA_DRQ) == 0)) { 5542 /* handle BSY=0, DRQ=0 as error */ 5543 if (likely(status & (ATA_ERR | ATA_DF))) 5544 /* device stops HSM for abort/error */ 5545 qc->err_mask |= AC_ERR_DEV; 5546 else 5547 /* HSM violation. Let EH handle this. 5548 * Phantom devices also trigger this 5549 * condition. Mark hint. 5550 */ 5551 qc->err_mask |= AC_ERR_HSM | 5552 AC_ERR_NODEV_HINT; 5553 5554 ap->hsm_task_state = HSM_ST_ERR; 5555 goto fsm_start; 5556 } 5557 5558 /* For PIO reads, some devices may ask for 5559 * data transfer (DRQ=1) alone with ERR=1. 5560 * We respect DRQ here and transfer one 5561 * block of junk data before changing the 5562 * hsm_task_state to HSM_ST_ERR. 5563 * 5564 * For PIO writes, ERR=1 DRQ=1 doesn't make 5565 * sense since the data block has been 5566 * transferred to the device. 5567 */ 5568 if (unlikely(status & (ATA_ERR | ATA_DF))) { 5569 /* data might be corrputed */ 5570 qc->err_mask |= AC_ERR_DEV; 5571 5572 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) { 5573 ata_pio_sectors(qc); 5574 status = ata_wait_idle(ap); 5575 } 5576 5577 if (status & (ATA_BUSY | ATA_DRQ)) 5578 qc->err_mask |= AC_ERR_HSM; 5579 5580 /* ata_pio_sectors() might change the 5581 * state to HSM_ST_LAST. so, the state 5582 * is changed after ata_pio_sectors(). 5583 */ 5584 ap->hsm_task_state = HSM_ST_ERR; 5585 goto fsm_start; 5586 } 5587 5588 ata_pio_sectors(qc); 5589 5590 if (ap->hsm_task_state == HSM_ST_LAST && 5591 (!(qc->tf.flags & ATA_TFLAG_WRITE))) { 5592 /* all data read */ 5593 status = ata_wait_idle(ap); 5594 goto fsm_start; 5595 } 5596 } 5597 5598 poll_next = 1; 5599 break; 5600 5601 case HSM_ST_LAST: 5602 if (unlikely(!ata_ok(status))) { 5603 qc->err_mask |= __ac_err_mask(status); 5604 ap->hsm_task_state = HSM_ST_ERR; 5605 goto fsm_start; 5606 } 5607 5608 /* no more data to transfer */ 5609 DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n", 5610 ap->print_id, qc->dev->devno, status); 5611 5612 WARN_ON(qc->err_mask); 5613 5614 ap->hsm_task_state = HSM_ST_IDLE; 5615 5616 /* complete taskfile transaction */ 5617 ata_hsm_qc_complete(qc, in_wq); 5618 5619 poll_next = 0; 5620 break; 5621 5622 case HSM_ST_ERR: 5623 /* make sure qc->err_mask is available to 5624 * know what's wrong and recover 5625 */ 5626 WARN_ON(qc->err_mask == 0); 5627 5628 ap->hsm_task_state = HSM_ST_IDLE; 5629 5630 /* complete taskfile transaction */ 5631 ata_hsm_qc_complete(qc, in_wq); 5632 5633 poll_next = 0; 5634 break; 5635 default: 5636 poll_next = 0; 5637 BUG(); 5638 } 5639 5640 return poll_next; 5641} 5642 5643static void ata_pio_task(struct work_struct *work) 5644{ 5645 struct ata_port *ap = 5646 container_of(work, struct ata_port, port_task.work); 5647 struct ata_queued_cmd *qc = ap->port_task_data; 5648 u8 status; 5649 int poll_next; 5650 5651fsm_start: 5652 WARN_ON(ap->hsm_task_state == HSM_ST_IDLE); 5653 5654 /* 5655 * This is purely heuristic. This is a fast path. 5656 * Sometimes when we enter, BSY will be cleared in 5657 * a chk-status or two. If not, the drive is probably seeking 5658 * or something. Snooze for a couple msecs, then 5659 * chk-status again. If still busy, queue delayed work. 5660 */ 5661 status = ata_busy_wait(ap, ATA_BUSY, 5); 5662 if (status & ATA_BUSY) { 5663 msleep(2); 5664 status = ata_busy_wait(ap, ATA_BUSY, 10); 5665 if (status & ATA_BUSY) { 5666 ata_pio_queue_task(ap, qc, ATA_SHORT_PAUSE); 5667 return; 5668 } 5669 } 5670 5671 /* move the HSM */ 5672 poll_next = ata_hsm_move(ap, qc, status, 1); 5673 5674 /* another command or interrupt handler 5675 * may be running at this point. 5676 */ 5677 if (poll_next) 5678 goto fsm_start; 5679} 5680 5681/** 5682 * ata_qc_new - Request an available ATA command, for queueing 5683 * @ap: Port associated with device @dev 5684 * @dev: Device from whom we request an available command structure 5685 * 5686 * LOCKING: 5687 * None. 5688 */ 5689 5690static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap) 5691{ 5692 struct ata_queued_cmd *qc = NULL; 5693 unsigned int i; 5694 5695 /* no command while frozen */ 5696 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN)) 5697 return NULL; 5698 5699 /* the last tag is reserved for internal command. */ 5700 for (i = 0; i < ATA_MAX_QUEUE - 1; i++) 5701 if (!test_and_set_bit(i, &ap->qc_allocated)) { 5702 qc = __ata_qc_from_tag(ap, i); 5703 break; 5704 } 5705 5706 if (qc) 5707 qc->tag = i; 5708 5709 return qc; 5710} 5711 5712/** 5713 * ata_qc_new_init - Request an available ATA command, and initialize it 5714 * @dev: Device from whom we request an available command structure 5715 * 5716 * LOCKING: 5717 * None. 5718 */ 5719 5720struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev) 5721{ 5722 struct ata_port *ap = dev->link->ap; 5723 struct ata_queued_cmd *qc; 5724 5725 qc = ata_qc_new(ap); 5726 if (qc) { 5727 qc->scsicmd = NULL; 5728 qc->ap = ap; 5729 qc->dev = dev; 5730 5731 ata_qc_reinit(qc); 5732 } 5733 5734 return qc; 5735} 5736 5737/** 5738 * ata_qc_free - free unused ata_queued_cmd 5739 * @qc: Command to complete 5740 * 5741 * Designed to free unused ata_queued_cmd object 5742 * in case something prevents using it. 5743 * 5744 * LOCKING: 5745 * spin_lock_irqsave(host lock) 5746 */ 5747void ata_qc_free(struct ata_queued_cmd *qc) 5748{ 5749 struct ata_port *ap = qc->ap; 5750 unsigned int tag; 5751 5752 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */ 5753 5754 qc->flags = 0; 5755 tag = qc->tag; 5756 if (likely(ata_tag_valid(tag))) { 5757 qc->tag = ATA_TAG_POISON; 5758 clear_bit(tag, &ap->qc_allocated); 5759 } 5760} 5761 5762void __ata_qc_complete(struct ata_queued_cmd *qc) 5763{ 5764 struct ata_port *ap = qc->ap; 5765 struct ata_link *link = qc->dev->link; 5766 5767 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */ 5768 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE)); 5769 5770 if (likely(qc->flags & ATA_QCFLAG_DMAMAP)) 5771 ata_sg_clean(qc); 5772 5773 /* command should be marked inactive atomically with qc completion */ 5774 if (qc->tf.protocol == ATA_PROT_NCQ) { 5775 link->sactive &= ~(1 << qc->tag); 5776 if (!link->sactive) 5777 ap->nr_active_links--; 5778 } else { 5779 link->active_tag = ATA_TAG_POISON; 5780 ap->nr_active_links--; 5781 } 5782 5783 /* clear exclusive status */ 5784 if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL && 5785 ap->excl_link == link)) 5786 ap->excl_link = NULL; 5787 5788 /* atapi: mark qc as inactive to prevent the interrupt handler 5789 * from completing the command twice later, before the error handler 5790 * is called. (when rc != 0 and atapi request sense is needed) 5791 */ 5792 qc->flags &= ~ATA_QCFLAG_ACTIVE; 5793 ap->qc_active &= ~(1 << qc->tag); 5794 5795 /* call completion callback */ 5796 qc->complete_fn(qc); 5797} 5798 5799static void fill_result_tf(struct ata_queued_cmd *qc) 5800{ 5801 struct ata_port *ap = qc->ap; 5802 5803 qc->result_tf.flags = qc->tf.flags; 5804 ap->ops->tf_read(ap, &qc->result_tf); 5805} 5806 5807static void ata_verify_xfer(struct ata_queued_cmd *qc) 5808{ 5809 struct ata_device *dev = qc->dev; 5810 5811 if (ata_tag_internal(qc->tag)) 5812 return; 5813 5814 if (ata_is_nodata(qc->tf.protocol)) 5815 return; 5816 5817 if ((dev->mwdma_mask || dev->udma_mask) && ata_is_pio(qc->tf.protocol)) 5818 return; 5819 5820 dev->flags &= ~ATA_DFLAG_DUBIOUS_XFER; 5821} 5822 5823/** 5824 * ata_qc_complete - Complete an active ATA command 5825 * @qc: Command to complete 5826 * @err_mask: ATA Status register contents 5827 * 5828 * Indicate to the mid and upper layers that an ATA 5829 * command has completed, with either an ok or not-ok status. 5830 * 5831 * LOCKING: 5832 * spin_lock_irqsave(host lock) 5833 */ 5834void ata_qc_complete(struct ata_queued_cmd *qc) 5835{ 5836 struct ata_port *ap = qc->ap; 5837 5838 /* XXX: New EH and old EH use different mechanisms to 5839 * synchronize EH with regular execution path. 5840 * 5841 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED. 5842 * Normal execution path is responsible for not accessing a 5843 * failed qc. libata core enforces the rule by returning NULL 5844 * from ata_qc_from_tag() for failed qcs. 5845 * 5846 * Old EH depends on ata_qc_complete() nullifying completion 5847 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does 5848 * not synchronize with interrupt handler. Only PIO task is 5849 * taken care of. 5850 */ 5851 if (ap->ops->error_handler) { 5852 struct ata_device *dev = qc->dev; 5853 struct ata_eh_info *ehi = &dev->link->eh_info; 5854 5855 WARN_ON(ap->pflags & ATA_PFLAG_FROZEN); 5856 5857 if (unlikely(qc->err_mask)) 5858 qc->flags |= ATA_QCFLAG_FAILED; 5859 5860 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) { 5861 if (!ata_tag_internal(qc->tag)) { 5862 /* always fill result TF for failed qc */ 5863 fill_result_tf(qc); 5864 ata_qc_schedule_eh(qc); 5865 return; 5866 } 5867 } 5868 5869 /* read result TF if requested */ 5870 if (qc->flags & ATA_QCFLAG_RESULT_TF) 5871 fill_result_tf(qc); 5872 5873 /* Some commands need post-processing after successful 5874 * completion. 5875 */ 5876 switch (qc->tf.command) { 5877 case ATA_CMD_SET_FEATURES: 5878 if (qc->tf.feature != SETFEATURES_WC_ON && 5879 qc->tf.feature != SETFEATURES_WC_OFF) 5880 break; 5881 /* fall through */ 5882 case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */ 5883 case ATA_CMD_SET_MULTI: /* multi_count changed */ 5884 /* revalidate device */ 5885 ehi->dev_action[dev->devno] |= ATA_EH_REVALIDATE; 5886 ata_port_schedule_eh(ap); 5887 break; 5888 5889 case ATA_CMD_SLEEP: 5890 dev->flags |= ATA_DFLAG_SLEEPING; 5891 break; 5892 } 5893 5894 if (unlikely(dev->flags & ATA_DFLAG_DUBIOUS_XFER)) 5895 ata_verify_xfer(qc); 5896 5897 __ata_qc_complete(qc); 5898 } else { 5899 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED) 5900 return; 5901 5902 /* read result TF if failed or requested */ 5903 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF) 5904 fill_result_tf(qc); 5905 5906 __ata_qc_complete(qc); 5907 } 5908} 5909 5910/** 5911 * ata_qc_complete_multiple - Complete multiple qcs successfully 5912 * @ap: port in question 5913 * @qc_active: new qc_active mask 5914 * @finish_qc: LLDD callback invoked before completing a qc 5915 * 5916 * Complete in-flight commands. This functions is meant to be 5917 * called from low-level driver's interrupt routine to complete 5918 * requests normally. ap->qc_active and @qc_active is compared 5919 * and commands are completed accordingly. 5920 * 5921 * LOCKING: 5922 * spin_lock_irqsave(host lock) 5923 * 5924 * RETURNS: 5925 * Number of completed commands on success, -errno otherwise. 5926 */ 5927int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active, 5928 void (*finish_qc)(struct ata_queued_cmd *)) 5929{ 5930 int nr_done = 0; 5931 u32 done_mask; 5932 int i; 5933 5934 done_mask = ap->qc_active ^ qc_active; 5935 5936 if (unlikely(done_mask & qc_active)) { 5937 ata_port_printk(ap, KERN_ERR, "illegal qc_active transition " 5938 "(%08x->%08x)\n", ap->qc_active, qc_active); 5939 return -EINVAL; 5940 } 5941 5942 for (i = 0; i < ATA_MAX_QUEUE; i++) { 5943 struct ata_queued_cmd *qc; 5944 5945 if (!(done_mask & (1 << i))) 5946 continue; 5947 5948 if ((qc = ata_qc_from_tag(ap, i))) { 5949 if (finish_qc) 5950 finish_qc(qc); 5951 ata_qc_complete(qc); 5952 nr_done++; 5953 } 5954 } 5955 5956 return nr_done; 5957} 5958 5959/** 5960 * ata_qc_issue - issue taskfile to device 5961 * @qc: command to issue to device 5962 * 5963 * Prepare an ATA command to submission to device. 5964 * This includes mapping the data into a DMA-able 5965 * area, filling in the S/G table, and finally 5966 * writing the taskfile to hardware, starting the command. 5967 * 5968 * LOCKING: 5969 * spin_lock_irqsave(host lock) 5970 */ 5971void ata_qc_issue(struct ata_queued_cmd *qc) 5972{ 5973 struct ata_port *ap = qc->ap; 5974 struct ata_link *link = qc->dev->link; 5975 u8 prot = qc->tf.protocol; 5976 5977 /* Make sure only one non-NCQ command is outstanding. The 5978 * check is skipped for old EH because it reuses active qc to 5979 * request ATAPI sense. 5980 */ 5981 WARN_ON(ap->ops->error_handler && ata_tag_valid(link->active_tag)); 5982 5983 if (ata_is_ncq(prot)) { 5984 WARN_ON(link->sactive & (1 << qc->tag)); 5985 5986 if (!link->sactive) 5987 ap->nr_active_links++; 5988 link->sactive |= 1 << qc->tag; 5989 } else { 5990 WARN_ON(link->sactive); 5991 5992 ap->nr_active_links++; 5993 link->active_tag = qc->tag; 5994 } 5995 5996 qc->flags |= ATA_QCFLAG_ACTIVE; 5997 ap->qc_active |= 1 << qc->tag; 5998 5999 /* We guarantee to LLDs that they will have at least one 6000 * non-zero sg if the command is a data command. 6001 */ 6002 BUG_ON(ata_is_data(prot) && (!qc->sg || !qc->n_elem || !qc->nbytes)); 6003 6004 if (ata_is_dma(prot) || (ata_is_pio(prot) && 6005 (ap->flags & ATA_FLAG_PIO_DMA))) 6006 if (ata_sg_setup(qc)) 6007 goto sg_err; 6008 6009 /* if device is sleeping, schedule softreset and abort the link */ 6010 if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) { 6011 link->eh_info.action |= ATA_EH_SOFTRESET; 6012 ata_ehi_push_desc(&link->eh_info, "waking up from sleep"); 6013 ata_link_abort(link); 6014 return; 6015 } 6016 6017 ap->ops->qc_prep(qc); 6018 6019 qc->err_mask |= ap->ops->qc_issue(qc); 6020 if (unlikely(qc->err_mask)) 6021 goto err; 6022 return; 6023 6024sg_err: 6025 qc->err_mask |= AC_ERR_SYSTEM; 6026err: 6027 ata_qc_complete(qc); 6028} 6029 6030/** 6031 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner 6032 * @qc: command to issue to device 6033 * 6034 * Using various libata functions and hooks, this function 6035 * starts an ATA command. ATA commands are grouped into 6036 * classes called "protocols", and issuing each type of protocol 6037 * is slightly different. 6038 * 6039 * May be used as the qc_issue() entry in ata_port_operations. 6040 * 6041 * LOCKING: 6042 * spin_lock_irqsave(host lock) 6043 * 6044 * RETURNS: 6045 * Zero on success, AC_ERR_* mask on failure 6046 */ 6047 6048unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc) 6049{ 6050 struct ata_port *ap = qc->ap; 6051 6052 /* Use polling pio if the LLD doesn't handle 6053 * interrupt driven pio and atapi CDB interrupt. 6054 */ 6055 if (ap->flags & ATA_FLAG_PIO_POLLING) { 6056 switch (qc->tf.protocol) { 6057 case ATA_PROT_PIO: 6058 case ATA_PROT_NODATA: 6059 case ATAPI_PROT_PIO: 6060 case ATAPI_PROT_NODATA: 6061 qc->tf.flags |= ATA_TFLAG_POLLING; 6062 break; 6063 case ATAPI_PROT_DMA: 6064 if (qc->dev->flags & ATA_DFLAG_CDB_INTR) 6065 /* see ata_dma_blacklisted() */ 6066 BUG(); 6067 break; 6068 default: 6069 break; 6070 } 6071 } 6072 6073 /* select the device */ 6074 ata_dev_select(ap, qc->dev->devno, 1, 0); 6075 6076 /* start the command */ 6077 switch (qc->tf.protocol) { 6078 case ATA_PROT_NODATA: 6079 if (qc->tf.flags & ATA_TFLAG_POLLING) 6080 ata_qc_set_polling(qc); 6081 6082 ata_tf_to_host(ap, &qc->tf); 6083 ap->hsm_task_state = HSM_ST_LAST; 6084 6085 if (qc->tf.flags & ATA_TFLAG_POLLING) 6086 ata_pio_queue_task(ap, qc, 0); 6087 6088 break; 6089 6090 case ATA_PROT_DMA: 6091 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING); 6092 6093 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */ 6094 ap->ops->bmdma_setup(qc); /* set up bmdma */ 6095 ap->ops->bmdma_start(qc); /* initiate bmdma */ 6096 ap->hsm_task_state = HSM_ST_LAST; 6097 break; 6098 6099 case ATA_PROT_PIO: 6100 if (qc->tf.flags & ATA_TFLAG_POLLING) 6101 ata_qc_set_polling(qc); 6102 6103 ata_tf_to_host(ap, &qc->tf); 6104 6105 if (qc->tf.flags & ATA_TFLAG_WRITE) { 6106 /* PIO data out protocol */ 6107 ap->hsm_task_state = HSM_ST_FIRST; 6108 ata_pio_queue_task(ap, qc, 0); 6109 6110 /* always send first data block using 6111 * the ata_pio_task() codepath. 6112 */ 6113 } else { 6114 /* PIO data in protocol */ 6115 ap->hsm_task_state = HSM_ST; 6116 6117 if (qc->tf.flags & ATA_TFLAG_POLLING) 6118 ata_pio_queue_task(ap, qc, 0); 6119 6120 /* if polling, ata_pio_task() handles the rest. 6121 * otherwise, interrupt handler takes over from here. 6122 */ 6123 } 6124 6125 break; 6126 6127 case ATAPI_PROT_PIO: 6128 case ATAPI_PROT_NODATA: 6129 if (qc->tf.flags & ATA_TFLAG_POLLING) 6130 ata_qc_set_polling(qc); 6131 6132 ata_tf_to_host(ap, &qc->tf); 6133 6134 ap->hsm_task_state = HSM_ST_FIRST; 6135 6136 /* send cdb by polling if no cdb interrupt */ 6137 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) || 6138 (qc->tf.flags & ATA_TFLAG_POLLING)) 6139 ata_pio_queue_task(ap, qc, 0); 6140 break; 6141 6142 case ATAPI_PROT_DMA: 6143 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING); 6144 6145 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */ 6146 ap->ops->bmdma_setup(qc); /* set up bmdma */ 6147 ap->hsm_task_state = HSM_ST_FIRST; 6148 6149 /* send cdb by polling if no cdb interrupt */ 6150 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) 6151 ata_pio_queue_task(ap, qc, 0); 6152 break; 6153 6154 default: 6155 WARN_ON(1); 6156 return AC_ERR_SYSTEM; 6157 } 6158 6159 return 0; 6160} 6161 6162/** 6163 * ata_host_intr - Handle host interrupt for given (port, task) 6164 * @ap: Port on which interrupt arrived (possibly...) 6165 * @qc: Taskfile currently active in engine 6166 * 6167 * Handle host interrupt for given queued command. Currently, 6168 * only DMA interrupts are handled. All other commands are 6169 * handled via polling with interrupts disabled (nIEN bit). 6170 * 6171 * LOCKING: 6172 * spin_lock_irqsave(host lock) 6173 * 6174 * RETURNS: 6175 * One if interrupt was handled, zero if not (shared irq). 6176 */ 6177 6178inline unsigned int ata_host_intr(struct ata_port *ap, 6179 struct ata_queued_cmd *qc) 6180{ 6181 struct ata_eh_info *ehi = &ap->link.eh_info; 6182 u8 status, host_stat = 0; 6183 6184 VPRINTK("ata%u: protocol %d task_state %d\n", 6185 ap->print_id, qc->tf.protocol, ap->hsm_task_state); 6186 6187 /* Check whether we are expecting interrupt in this state */ 6188 switch (ap->hsm_task_state) { 6189 case HSM_ST_FIRST: 6190 /* Some pre-ATAPI-4 devices assert INTRQ 6191 * at this state when ready to receive CDB. 6192 */ 6193 6194 /* Check the ATA_DFLAG_CDB_INTR flag is enough here. 6195 * The flag was turned on only for atapi devices. No 6196 * need to check ata_is_atapi(qc->tf.protocol) again. 6197 */ 6198 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) 6199 goto idle_irq; 6200 break; 6201 case HSM_ST_LAST: 6202 if (qc->tf.protocol == ATA_PROT_DMA || 6203 qc->tf.protocol == ATAPI_PROT_DMA) { 6204 /* check status of DMA engine */ 6205 host_stat = ap->ops->bmdma_status(ap); 6206 VPRINTK("ata%u: host_stat 0x%X\n", 6207 ap->print_id, host_stat); 6208 6209 /* if it's not our irq... */ 6210 if (!(host_stat & ATA_DMA_INTR)) 6211 goto idle_irq; 6212 6213 /* before we do anything else, clear DMA-Start bit */ 6214 ap->ops->bmdma_stop(qc); 6215 6216 if (unlikely(host_stat & ATA_DMA_ERR)) { 6217 /* error when transfering data to/from memory */ 6218 qc->err_mask |= AC_ERR_HOST_BUS; 6219 ap->hsm_task_state = HSM_ST_ERR; 6220 } 6221 } 6222 break; 6223 case HSM_ST: 6224 break; 6225 default: 6226 goto idle_irq; 6227 } 6228 6229 /* check altstatus */ 6230 status = ata_altstatus(ap); 6231 if (status & ATA_BUSY) 6232 goto idle_irq; 6233 6234 /* check main status, clearing INTRQ */ 6235 status = ata_chk_status(ap); 6236 if (unlikely(status & ATA_BUSY)) 6237 goto idle_irq; 6238 6239 /* ack bmdma irq events */ 6240 ap->ops->irq_clear(ap); 6241 6242 ata_hsm_move(ap, qc, status, 0); 6243 6244 if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA || 6245 qc->tf.protocol == ATAPI_PROT_DMA)) 6246 ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat); 6247 6248 return 1; /* irq handled */ 6249 6250idle_irq: 6251 ap->stats.idle_irq++; 6252 6253#ifdef ATA_IRQ_TRAP 6254 if ((ap->stats.idle_irq % 1000) == 0) { 6255 ata_chk_status(ap); 6256 ap->ops->irq_clear(ap); 6257 ata_port_printk(ap, KERN_WARNING, "irq trap\n"); 6258 return 1; 6259 } 6260#endif 6261 return 0; /* irq not handled */ 6262} 6263 6264/** 6265 * ata_interrupt - Default ATA host interrupt handler 6266 * @irq: irq line (unused) 6267 * @dev_instance: pointer to our ata_host information structure 6268 * 6269 * Default interrupt handler for PCI IDE devices. Calls 6270 * ata_host_intr() for each port that is not disabled. 6271 * 6272 * LOCKING: 6273 * Obtains host lock during operation. 6274 * 6275 * RETURNS: 6276 * IRQ_NONE or IRQ_HANDLED. 6277 */ 6278 6279irqreturn_t ata_interrupt(int irq, void *dev_instance) 6280{ 6281 struct ata_host *host = dev_instance; 6282 unsigned int i; 6283 unsigned int handled = 0; 6284 unsigned long flags; 6285 6286 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */ 6287 spin_lock_irqsave(&host->lock, flags); 6288 6289 for (i = 0; i < host->n_ports; i++) { 6290 struct ata_port *ap; 6291 6292 ap = host->ports[i]; 6293 if (ap && 6294 !(ap->flags & ATA_FLAG_DISABLED)) { 6295 struct ata_queued_cmd *qc; 6296 6297 qc = ata_qc_from_tag(ap, ap->link.active_tag); 6298 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) && 6299 (qc->flags & ATA_QCFLAG_ACTIVE)) 6300 handled |= ata_host_intr(ap, qc); 6301 } 6302 } 6303 6304 spin_unlock_irqrestore(&host->lock, flags); 6305 6306 return IRQ_RETVAL(handled); 6307} 6308 6309/** 6310 * sata_scr_valid - test whether SCRs are accessible 6311 * @link: ATA link to test SCR accessibility for 6312 * 6313 * Test whether SCRs are accessible for @link. 6314 * 6315 * LOCKING: 6316 * None. 6317 * 6318 * RETURNS: 6319 * 1 if SCRs are accessible, 0 otherwise. 6320 */ 6321int sata_scr_valid(struct ata_link *link) 6322{ 6323 struct ata_port *ap = link->ap; 6324 6325 return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read; 6326} 6327 6328/** 6329 * sata_scr_read - read SCR register of the specified port 6330 * @link: ATA link to read SCR for 6331 * @reg: SCR to read 6332 * @val: Place to store read value 6333 * 6334 * Read SCR register @reg of @link into *@val. This function is 6335 * guaranteed to succeed if @link is ap->link, the cable type of 6336 * the port is SATA and the port implements ->scr_read. 6337 * 6338 * LOCKING: 6339 * None if @link is ap->link. Kernel thread context otherwise. 6340 * 6341 * RETURNS: 6342 * 0 on success, negative errno on failure. 6343 */ 6344int sata_scr_read(struct ata_link *link, int reg, u32 *val) 6345{ 6346 if (ata_is_host_link(link)) { 6347 struct ata_port *ap = link->ap; 6348 6349 if (sata_scr_valid(link)) 6350 return ap->ops->scr_read(ap, reg, val); 6351 return -EOPNOTSUPP; 6352 } 6353 6354 return sata_pmp_scr_read(link, reg, val); 6355} 6356 6357/** 6358 * sata_scr_write - write SCR register of the specified port 6359 * @link: ATA link to write SCR for 6360 * @reg: SCR to write 6361 * @val: value to write 6362 * 6363 * Write @val to SCR register @reg of @link. This function is 6364 * guaranteed to succeed if @link is ap->link, the cable type of 6365 * the port is SATA and the port implements ->scr_read. 6366 * 6367 * LOCKING: 6368 * None if @link is ap->link. Kernel thread context otherwise. 6369 * 6370 * RETURNS: 6371 * 0 on success, negative errno on failure. 6372 */ 6373int sata_scr_write(struct ata_link *link, int reg, u32 val) 6374{ 6375 if (ata_is_host_link(link)) { 6376 struct ata_port *ap = link->ap; 6377 6378 if (sata_scr_valid(link)) 6379 return ap->ops->scr_write(ap, reg, val); 6380 return -EOPNOTSUPP; 6381 } 6382 6383 return sata_pmp_scr_write(link, reg, val); 6384} 6385 6386/** 6387 * sata_scr_write_flush - write SCR register of the specified port and flush 6388 * @link: ATA link to write SCR for 6389 * @reg: SCR to write 6390 * @val: value to write 6391 * 6392 * This function is identical to sata_scr_write() except that this 6393 * function performs flush after writing to the register. 6394 * 6395 * LOCKING: 6396 * None if @link is ap->link. Kernel thread context otherwise. 6397 * 6398 * RETURNS: 6399 * 0 on success, negative errno on failure. 6400 */ 6401int sata_scr_write_flush(struct ata_link *link, int reg, u32 val) 6402{ 6403 if (ata_is_host_link(link)) { 6404 struct ata_port *ap = link->ap; 6405 int rc; 6406 6407 if (sata_scr_valid(link)) { 6408 rc = ap->ops->scr_write(ap, reg, val); 6409 if (rc == 0) 6410 rc = ap->ops->scr_read(ap, reg, &val); 6411 return rc; 6412 } 6413 return -EOPNOTSUPP; 6414 } 6415 6416 return sata_pmp_scr_write(link, reg, val); 6417} 6418 6419/** 6420 * ata_link_online - test whether the given link is online 6421 * @link: ATA link to test 6422 * 6423 * Test whether @link is online. Note that this function returns 6424 * 0 if online status of @link cannot be obtained, so 6425 * ata_link_online(link) != !ata_link_offline(link). 6426 * 6427 * LOCKING: 6428 * None. 6429 * 6430 * RETURNS: 6431 * 1 if the port online status is available and online. 6432 */ 6433int ata_link_online(struct ata_link *link) 6434{ 6435 u32 sstatus; 6436 6437 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 && 6438 (sstatus & 0xf) == 0x3) 6439 return 1; 6440 return 0; 6441} 6442 6443/** 6444 * ata_link_offline - test whether the given link is offline 6445 * @link: ATA link to test 6446 * 6447 * Test whether @link is offline. Note that this function 6448 * returns 0 if offline status of @link cannot be obtained, so 6449 * ata_link_online(link) != !ata_link_offline(link). 6450 * 6451 * LOCKING: 6452 * None. 6453 * 6454 * RETURNS: 6455 * 1 if the port offline status is available and offline. 6456 */ 6457int ata_link_offline(struct ata_link *link) 6458{ 6459 u32 sstatus; 6460 6461 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 && 6462 (sstatus & 0xf) != 0x3) 6463 return 1; 6464 return 0; 6465} 6466 6467int ata_flush_cache(struct ata_device *dev) 6468{ 6469 unsigned int err_mask; 6470 u8 cmd; 6471 6472 if (!ata_try_flush_cache(dev)) 6473 return 0; 6474 6475 if (dev->flags & ATA_DFLAG_FLUSH_EXT) 6476 cmd = ATA_CMD_FLUSH_EXT; 6477 else 6478 cmd = ATA_CMD_FLUSH; 6479 6480 /* This is wrong. On a failed flush we get back the LBA of the lost 6481 sector and we should (assuming it wasn't aborted as unknown) issue 6482 a further flush command to continue the writeback until it 6483 does not error */ 6484 err_mask = ata_do_simple_cmd(dev, cmd); 6485 if (err_mask) { 6486 ata_dev_printk(dev, KERN_ERR, "failed to flush cache\n"); 6487 return -EIO; 6488 } 6489 6490 return 0; 6491} 6492 6493#ifdef CONFIG_PM 6494static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg, 6495 unsigned int action, unsigned int ehi_flags, 6496 int wait) 6497{ 6498 unsigned long flags; 6499 int i, rc; 6500 6501 for (i = 0; i < host->n_ports; i++) { 6502 struct ata_port *ap = host->ports[i]; 6503 struct ata_link *link; 6504 6505 /* Previous resume operation might still be in 6506 * progress. Wait for PM_PENDING to clear. 6507 */ 6508 if (ap->pflags & ATA_PFLAG_PM_PENDING) { 6509 ata_port_wait_eh(ap); 6510 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING); 6511 } 6512 6513 /* request PM ops to EH */ 6514 spin_lock_irqsave(ap->lock, flags); 6515 6516 ap->pm_mesg = mesg; 6517 if (wait) { 6518 rc = 0; 6519 ap->pm_result = &rc; 6520 } 6521 6522 ap->pflags |= ATA_PFLAG_PM_PENDING; 6523 __ata_port_for_each_link(link, ap) { 6524 link->eh_info.action |= action; 6525 link->eh_info.flags |= ehi_flags; 6526 } 6527 6528 ata_port_schedule_eh(ap); 6529 6530 spin_unlock_irqrestore(ap->lock, flags); 6531 6532 /* wait and check result */ 6533 if (wait) { 6534 ata_port_wait_eh(ap); 6535 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING); 6536 if (rc) 6537 return rc; 6538 } 6539 } 6540 6541 return 0; 6542} 6543 6544/** 6545 * ata_host_suspend - suspend host 6546 * @host: host to suspend 6547 * @mesg: PM message 6548 * 6549 * Suspend @host. Actual operation is performed by EH. This 6550 * function requests EH to perform PM operations and waits for EH 6551 * to finish. 6552 * 6553 * LOCKING: 6554 * Kernel thread context (may sleep). 6555 * 6556 * RETURNS: 6557 * 0 on success, -errno on failure. 6558 */ 6559int ata_host_suspend(struct ata_host *host, pm_message_t mesg) 6560{ 6561 int rc; 6562 6563 /* 6564 * disable link pm on all ports before requesting 6565 * any pm activity 6566 */ 6567 ata_lpm_enable(host); 6568 6569 rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1); 6570 return rc; 6571} 6572 6573/** 6574 * ata_host_resume - resume host 6575 * @host: host to resume 6576 * 6577 * Resume @host. Actual operation is performed by EH. This 6578 * function requests EH to perform PM operations and returns. 6579 * Note that all resume operations are performed parallely. 6580 * 6581 * LOCKING: 6582 * Kernel thread context (may sleep). 6583 */ 6584void ata_host_resume(struct ata_host *host) 6585{ 6586 ata_host_request_pm(host, PMSG_ON, ATA_EH_SOFTRESET, 6587 ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0); 6588 6589 /* reenable link pm */ 6590 ata_lpm_disable(host); 6591} 6592#endif 6593 6594/** 6595 * ata_port_start - Set port up for dma. 6596 * @ap: Port to initialize 6597 * 6598 * Called just after data structures for each port are 6599 * initialized. Allocates space for PRD table. 6600 * 6601 * May be used as the port_start() entry in ata_port_operations. 6602 * 6603 * LOCKING: 6604 * Inherited from caller. 6605 */ 6606int ata_port_start(struct ata_port *ap) 6607{ 6608 struct device *dev = ap->dev; 6609 6610 ap->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma, 6611 GFP_KERNEL); 6612 if (!ap->prd) 6613 return -ENOMEM; 6614 6615 return 0; 6616} 6617 6618/** 6619 * ata_dev_init - Initialize an ata_device structure 6620 * @dev: Device structure to initialize 6621 * 6622 * Initialize @dev in preparation for probing. 6623 * 6624 * LOCKING: 6625 * Inherited from caller. 6626 */ 6627void ata_dev_init(struct ata_device *dev) 6628{ 6629 struct ata_link *link = dev->link; 6630 struct ata_port *ap = link->ap; 6631 unsigned long flags; 6632 6633 /* SATA spd limit is bound to the first device */ 6634 link->sata_spd_limit = link->hw_sata_spd_limit; 6635 link->sata_spd = 0; 6636 6637 /* High bits of dev->flags are used to record warm plug 6638 * requests which occur asynchronously. Synchronize using 6639 * host lock. 6640 */ 6641 spin_lock_irqsave(ap->lock, flags); 6642 dev->flags &= ~ATA_DFLAG_INIT_MASK; 6643 dev->horkage = 0; 6644 spin_unlock_irqrestore(ap->lock, flags); 6645 6646 memset((void *)dev + ATA_DEVICE_CLEAR_OFFSET, 0, 6647 sizeof(*dev) - ATA_DEVICE_CLEAR_OFFSET); 6648 dev->pio_mask = UINT_MAX; 6649 dev->mwdma_mask = UINT_MAX; 6650 dev->udma_mask = UINT_MAX; 6651} 6652 6653/** 6654 * ata_link_init - Initialize an ata_link structure 6655 * @ap: ATA port link is attached to 6656 * @link: Link structure to initialize 6657 * @pmp: Port multiplier port number 6658 * 6659 * Initialize @link. 6660 * 6661 * LOCKING: 6662 * Kernel thread context (may sleep) 6663 */ 6664void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp) 6665{ 6666 int i; 6667 6668 /* clear everything except for devices */ 6669 memset(link, 0, offsetof(struct ata_link, device[0])); 6670 6671 link->ap = ap; 6672 link->pmp = pmp; 6673 link->active_tag = ATA_TAG_POISON; 6674 link->hw_sata_spd_limit = UINT_MAX; 6675 6676 /* can't use iterator, ap isn't initialized yet */ 6677 for (i = 0; i < ATA_MAX_DEVICES; i++) { 6678 struct ata_device *dev = &link->device[i]; 6679 6680 dev->link = link; 6681 dev->devno = dev - link->device; 6682 ata_dev_init(dev); 6683 } 6684} 6685 6686/** 6687 * sata_link_init_spd - Initialize link->sata_spd_limit 6688 * @link: Link to configure sata_spd_limit for 6689 * 6690 * Initialize @link->[hw_]sata_spd_limit to the currently 6691 * configured value. 6692 * 6693 * LOCKING: 6694 * Kernel thread context (may sleep). 6695 * 6696 * RETURNS: 6697 * 0 on success, -errno on failure. 6698 */ 6699int sata_link_init_spd(struct ata_link *link) 6700{ 6701 u32 scontrol; 6702 u8 spd; 6703 int rc; 6704 6705 rc = sata_scr_read(link, SCR_CONTROL, &scontrol); 6706 if (rc) 6707 return rc; 6708 6709 spd = (scontrol >> 4) & 0xf; 6710 if (spd) 6711 link->hw_sata_spd_limit &= (1 << spd) - 1; 6712 6713 ata_force_spd_limit(link); 6714 6715 link->sata_spd_limit = link->hw_sata_spd_limit; 6716 6717 return 0; 6718} 6719 6720/** 6721 * ata_port_alloc - allocate and initialize basic ATA port resources 6722 * @host: ATA host this allocated port belongs to 6723 * 6724 * Allocate and initialize basic ATA port resources. 6725 * 6726 * RETURNS: 6727 * Allocate ATA port on success, NULL on failure. 6728 * 6729 * LOCKING: 6730 * Inherited from calling layer (may sleep). 6731 */ 6732struct ata_port *ata_port_alloc(struct ata_host *host) 6733{ 6734 struct ata_port *ap; 6735 6736 DPRINTK("ENTER\n"); 6737 6738 ap = kzalloc(sizeof(*ap), GFP_KERNEL); 6739 if (!ap) 6740 return NULL; 6741 6742 ap->pflags |= ATA_PFLAG_INITIALIZING; 6743 ap->lock = &host->lock; 6744 ap->flags = ATA_FLAG_DISABLED; 6745 ap->print_id = -1; 6746 ap->ctl = ATA_DEVCTL_OBS; 6747 ap->host = host; 6748 ap->dev = host->dev; 6749 ap->last_ctl = 0xFF; 6750 6751#if defined(ATA_VERBOSE_DEBUG) 6752 /* turn on all debugging levels */ 6753 ap->msg_enable = 0x00FF; 6754#elif defined(ATA_DEBUG) 6755 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR; 6756#else 6757 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN; 6758#endif 6759 6760 INIT_DELAYED_WORK(&ap->port_task, ata_pio_task); 6761 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug); 6762 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan); 6763 INIT_LIST_HEAD(&ap->eh_done_q); 6764 init_waitqueue_head(&ap->eh_wait_q); 6765 init_timer_deferrable(&ap->fastdrain_timer); 6766 ap->fastdrain_timer.function = ata_eh_fastdrain_timerfn; 6767 ap->fastdrain_timer.data = (unsigned long)ap; 6768 6769 ap->cbl = ATA_CBL_NONE; 6770 6771 ata_link_init(ap, &ap->link, 0); 6772 6773#ifdef ATA_IRQ_TRAP 6774 ap->stats.unhandled_irq = 1; 6775 ap->stats.idle_irq = 1; 6776#endif 6777 return ap; 6778} 6779 6780static void ata_host_release(struct device *gendev, void *res) 6781{ 6782 struct ata_host *host = dev_get_drvdata(gendev); 6783 int i; 6784 6785 for (i = 0; i < host->n_ports; i++) { 6786 struct ata_port *ap = host->ports[i]; 6787 6788 if (!ap) 6789 continue; 6790 6791 if (ap->scsi_host) 6792 scsi_host_put(ap->scsi_host); 6793 6794 kfree(ap->pmp_link); 6795 kfree(ap); 6796 host->ports[i] = NULL; 6797 } 6798 6799 dev_set_drvdata(gendev, NULL); 6800} 6801 6802/** 6803 * ata_host_alloc - allocate and init basic ATA host resources 6804 * @dev: generic device this host is associated with 6805 * @max_ports: maximum number of ATA ports associated with this host 6806 * 6807 * Allocate and initialize basic ATA host resources. LLD calls 6808 * this function to allocate a host, initializes it fully and 6809 * attaches it using ata_host_register(). 6810 * 6811 * @max_ports ports are allocated and host->n_ports is 6812 * initialized to @max_ports. The caller is allowed to decrease 6813 * host->n_ports before calling ata_host_register(). The unused 6814 * ports will be automatically freed on registration. 6815 * 6816 * RETURNS: 6817 * Allocate ATA host on success, NULL on failure. 6818 * 6819 * LOCKING: 6820 * Inherited from calling layer (may sleep). 6821 */ 6822struct ata_host *ata_host_alloc(struct device *dev, int max_ports) 6823{ 6824 struct ata_host *host; 6825 size_t sz; 6826 int i; 6827 6828 DPRINTK("ENTER\n"); 6829 6830 if (!devres_open_group(dev, NULL, GFP_KERNEL)) 6831 return NULL; 6832 6833 /* alloc a container for our list of ATA ports (buses) */ 6834 sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *); 6835 /* alloc a container for our list of ATA ports (buses) */ 6836 host = devres_alloc(ata_host_release, sz, GFP_KERNEL); 6837 if (!host) 6838 goto err_out; 6839 6840 devres_add(dev, host); 6841 dev_set_drvdata(dev, host); 6842 6843 spin_lock_init(&host->lock); 6844 host->dev = dev; 6845 host->n_ports = max_ports; 6846 6847 /* allocate ports bound to this host */ 6848 for (i = 0; i < max_ports; i++) { 6849 struct ata_port *ap; 6850 6851 ap = ata_port_alloc(host); 6852 if (!ap) 6853 goto err_out; 6854 6855 ap->port_no = i; 6856 host->ports[i] = ap; 6857 } 6858 6859 devres_remove_group(dev, NULL); 6860 return host; 6861 6862 err_out: 6863 devres_release_group(dev, NULL); 6864 return NULL; 6865} 6866 6867/** 6868 * ata_host_alloc_pinfo - alloc host and init with port_info array 6869 * @dev: generic device this host is associated with 6870 * @ppi: array of ATA port_info to initialize host with 6871 * @n_ports: number of ATA ports attached to this host 6872 * 6873 * Allocate ATA host and initialize with info from @ppi. If NULL 6874 * terminated, @ppi may contain fewer entries than @n_ports. The 6875 * last entry will be used for the remaining ports. 6876 * 6877 * RETURNS: 6878 * Allocate ATA host on success, NULL on failure. 6879 * 6880 * LOCKING: 6881 * Inherited from calling layer (may sleep). 6882 */ 6883struct ata_host *ata_host_alloc_pinfo(struct device *dev, 6884 const struct ata_port_info * const * ppi, 6885 int n_ports) 6886{ 6887 const struct ata_port_info *pi; 6888 struct ata_host *host; 6889 int i, j; 6890 6891 host = ata_host_alloc(dev, n_ports); 6892 if (!host) 6893 return NULL; 6894 6895 for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) { 6896 struct ata_port *ap = host->ports[i]; 6897 6898 if (ppi[j]) 6899 pi = ppi[j++]; 6900 6901 ap->pio_mask = pi->pio_mask; 6902 ap->mwdma_mask = pi->mwdma_mask; 6903 ap->udma_mask = pi->udma_mask; 6904 ap->flags |= pi->flags; 6905 ap->link.flags |= pi->link_flags; 6906 ap->ops = pi->port_ops; 6907 6908 if (!host->ops && (pi->port_ops != &ata_dummy_port_ops)) 6909 host->ops = pi->port_ops; 6910 if (!host->private_data && pi->private_data) 6911 host->private_data = pi->private_data; 6912 } 6913 6914 return host; 6915} 6916 6917static void ata_host_stop(struct device *gendev, void *res) 6918{ 6919 struct ata_host *host = dev_get_drvdata(gendev); 6920 int i; 6921 6922 WARN_ON(!(host->flags & ATA_HOST_STARTED)); 6923 6924 for (i = 0; i < host->n_ports; i++) { 6925 struct ata_port *ap = host->ports[i]; 6926 6927 if (ap->ops->port_stop) 6928 ap->ops->port_stop(ap); 6929 } 6930 6931 if (host->ops->host_stop) 6932 host->ops->host_stop(host); 6933} 6934 6935/** 6936 * ata_host_start - start and freeze ports of an ATA host 6937 * @host: ATA host to start ports for 6938 * 6939 * Start and then freeze ports of @host. Started status is 6940 * recorded in host->flags, so this function can be called 6941 * multiple times. Ports are guaranteed to get started only 6942 * once. If host->ops isn't initialized yet, its set to the 6943 * first non-dummy port ops. 6944 * 6945 * LOCKING: 6946 * Inherited from calling layer (may sleep). 6947 * 6948 * RETURNS: 6949 * 0 if all ports are started successfully, -errno otherwise. 6950 */ 6951int ata_host_start(struct ata_host *host) 6952{ 6953 int have_stop = 0; 6954 void *start_dr = NULL; 6955 int i, rc; 6956 6957 if (host->flags & ATA_HOST_STARTED) 6958 return 0; 6959 6960 for (i = 0; i < host->n_ports; i++) { 6961 struct ata_port *ap = host->ports[i]; 6962 6963 if (!host->ops && !ata_port_is_dummy(ap)) 6964 host->ops = ap->ops; 6965 6966 if (ap->ops->port_stop) 6967 have_stop = 1; 6968 } 6969 6970 if (host->ops->host_stop) 6971 have_stop = 1; 6972 6973 if (have_stop) { 6974 start_dr = devres_alloc(ata_host_stop, 0, GFP_KERNEL); 6975 if (!start_dr) 6976 return -ENOMEM; 6977 } 6978 6979 for (i = 0; i < host->n_ports; i++) { 6980 struct ata_port *ap = host->ports[i]; 6981 6982 if (ap->ops->port_start) { 6983 rc = ap->ops->port_start(ap); 6984 if (rc) { 6985 if (rc != -ENODEV) 6986 dev_printk(KERN_ERR, host->dev, 6987 "failed to start port %d " 6988 "(errno=%d)\n", i, rc); 6989 goto err_out; 6990 } 6991 } 6992 ata_eh_freeze_port(ap); 6993 } 6994 6995 if (start_dr) 6996 devres_add(host->dev, start_dr); 6997 host->flags |= ATA_HOST_STARTED; 6998 return 0; 6999 7000 err_out: 7001 while (--i >= 0) { 7002 struct ata_port *ap = host->ports[i]; 7003 7004 if (ap->ops->port_stop) 7005 ap->ops->port_stop(ap); 7006 } 7007 devres_free(start_dr); 7008 return rc; 7009} 7010 7011/** 7012 * ata_sas_host_init - Initialize a host struct 7013 * @host: host to initialize 7014 * @dev: device host is attached to 7015 * @flags: host flags 7016 * @ops: port_ops 7017 * 7018 * LOCKING: 7019 * PCI/etc. bus probe sem. 7020 * 7021 */ 7022/* KILLME - the only user left is ipr */ 7023void ata_host_init(struct ata_host *host, struct device *dev, 7024 unsigned long flags, const struct ata_port_operations *ops) 7025{ 7026 spin_lock_init(&host->lock); 7027 host->dev = dev; 7028 host->flags = flags; 7029 host->ops = ops; 7030} 7031 7032/** 7033 * ata_host_register - register initialized ATA host 7034 * @host: ATA host to register 7035 * @sht: template for SCSI host 7036 * 7037 * Register initialized ATA host. @host is allocated using 7038 * ata_host_alloc() and fully initialized by LLD. This function 7039 * starts ports, registers @host with ATA and SCSI layers and 7040 * probe registered devices. 7041 * 7042 * LOCKING: 7043 * Inherited from calling layer (may sleep). 7044 * 7045 * RETURNS: 7046 * 0 on success, -errno otherwise. 7047 */ 7048int ata_host_register(struct ata_host *host, struct scsi_host_template *sht) 7049{ 7050 int i, rc; 7051 7052 /* host must have been started */ 7053 if (!(host->flags & ATA_HOST_STARTED)) { 7054 dev_printk(KERN_ERR, host->dev, 7055 "BUG: trying to register unstarted host\n"); 7056 WARN_ON(1); 7057 return -EINVAL; 7058 } 7059 7060 /* Blow away unused ports. This happens when LLD can't 7061 * determine the exact number of ports to allocate at 7062 * allocation time. 7063 */ 7064 for (i = host->n_ports; host->ports[i]; i++) 7065 kfree(host->ports[i]); 7066 7067 /* give ports names and add SCSI hosts */ 7068 for (i = 0; i < host->n_ports; i++) 7069 host->ports[i]->print_id = ata_print_id++; 7070 7071 rc = ata_scsi_add_hosts(host, sht); 7072 if (rc) 7073 return rc; 7074 7075 /* associate with ACPI nodes */ 7076 ata_acpi_associate(host); 7077 7078 /* set cable, sata_spd_limit and report */ 7079 for (i = 0; i < host->n_ports; i++) { 7080 struct ata_port *ap = host->ports[i]; 7081 unsigned long xfer_mask; 7082 7083 /* set SATA cable type if still unset */ 7084 if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA)) 7085 ap->cbl = ATA_CBL_SATA; 7086 7087 /* init sata_spd_limit to the current value */ 7088 sata_link_init_spd(&ap->link); 7089 7090 /* print per-port info to dmesg */ 7091 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask, 7092 ap->udma_mask); 7093 7094 if (!ata_port_is_dummy(ap)) { 7095 ata_port_printk(ap, KERN_INFO, 7096 "%cATA max %s %s\n", 7097 (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P', 7098 ata_mode_string(xfer_mask), 7099 ap->link.eh_info.desc); 7100 ata_ehi_clear_desc(&ap->link.eh_info); 7101 } else 7102 ata_port_printk(ap, KERN_INFO, "DUMMY\n"); 7103 } 7104 7105 /* perform each probe synchronously */ 7106 DPRINTK("probe begin\n"); 7107 for (i = 0; i < host->n_ports; i++) { 7108 struct ata_port *ap = host->ports[i]; 7109 7110 /* probe */ 7111 if (ap->ops->error_handler) { 7112 struct ata_eh_info *ehi = &ap->link.eh_info; 7113 unsigned long flags; 7114 7115 ata_port_probe(ap); 7116 7117 /* kick EH for boot probing */ 7118 spin_lock_irqsave(ap->lock, flags); 7119 7120 ehi->probe_mask = 7121 (1 << ata_link_max_devices(&ap->link)) - 1; 7122 ehi->action |= ATA_EH_SOFTRESET; 7123 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET; 7124 7125 ap->pflags &= ~ATA_PFLAG_INITIALIZING; 7126 ap->pflags |= ATA_PFLAG_LOADING; 7127 ata_port_schedule_eh(ap); 7128 7129 spin_unlock_irqrestore(ap->lock, flags); 7130 7131 /* wait for EH to finish */ 7132 ata_port_wait_eh(ap); 7133 } else { 7134 DPRINTK("ata%u: bus probe begin\n", ap->print_id); 7135 rc = ata_bus_probe(ap); 7136 DPRINTK("ata%u: bus probe end\n", ap->print_id); 7137 7138 if (rc) { 7139 /* FIXME: do something useful here? 7140 * Current libata behavior will 7141 * tear down everything when 7142 * the module is removed 7143 * or the h/w is unplugged. 7144 */ 7145 } 7146 } 7147 } 7148 7149 /* probes are done, now scan each port's disk(s) */ 7150 DPRINTK("host probe begin\n"); 7151 for (i = 0; i < host->n_ports; i++) { 7152 struct ata_port *ap = host->ports[i]; 7153 7154 ata_scsi_scan_host(ap, 1); 7155 ata_lpm_schedule(ap, ap->pm_policy); 7156 } 7157 7158 return 0; 7159} 7160 7161/** 7162 * ata_host_activate - start host, request IRQ and register it 7163 * @host: target ATA host 7164 * @irq: IRQ to request 7165 * @irq_handler: irq_handler used when requesting IRQ 7166 * @irq_flags: irq_flags used when requesting IRQ 7167 * @sht: scsi_host_template to use when registering the host 7168 * 7169 * After allocating an ATA host and initializing it, most libata 7170 * LLDs perform three steps to activate the host - start host, 7171 * request IRQ and register it. This helper takes necessasry 7172 * arguments and performs the three steps in one go. 7173 * 7174 * An invalid IRQ skips the IRQ registration and expects the host to 7175 * have set polling mode on the port. In this case, @irq_handler 7176 * should be NULL. 7177 * 7178 * LOCKING: 7179 * Inherited from calling layer (may sleep). 7180 * 7181 * RETURNS: 7182 * 0 on success, -errno otherwise. 7183 */ 7184int ata_host_activate(struct ata_host *host, int irq, 7185 irq_handler_t irq_handler, unsigned long irq_flags, 7186 struct scsi_host_template *sht) 7187{ 7188 int i, rc; 7189 7190 rc = ata_host_start(host); 7191 if (rc) 7192 return rc; 7193 7194 /* Special case for polling mode */ 7195 if (!irq) { 7196 WARN_ON(irq_handler); 7197 return ata_host_register(host, sht); 7198 } 7199 7200 rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags, 7201 dev_driver_string(host->dev), host); 7202 if (rc) 7203 return rc; 7204 7205 for (i = 0; i < host->n_ports; i++) 7206 ata_port_desc(host->ports[i], "irq %d", irq); 7207 7208 rc = ata_host_register(host, sht); 7209 /* if failed, just free the IRQ and leave ports alone */ 7210 if (rc) 7211 devm_free_irq(host->dev, irq, host); 7212 7213 return rc; 7214} 7215 7216/** 7217 * ata_port_detach - Detach ATA port in prepration of device removal 7218 * @ap: ATA port to be detached 7219 * 7220 * Detach all ATA devices and the associated SCSI devices of @ap; 7221 * then, remove the associated SCSI host. @ap is guaranteed to 7222 * be quiescent on return from this function. 7223 * 7224 * LOCKING: 7225 * Kernel thread context (may sleep). 7226 */ 7227static void ata_port_detach(struct ata_port *ap) 7228{ 7229 unsigned long flags; 7230 struct ata_link *link; 7231 struct ata_device *dev; 7232 7233 if (!ap->ops->error_handler) 7234 goto skip_eh; 7235 7236 /* tell EH we're leaving & flush EH */ 7237 spin_lock_irqsave(ap->lock, flags); 7238 ap->pflags |= ATA_PFLAG_UNLOADING; 7239 spin_unlock_irqrestore(ap->lock, flags); 7240 7241 ata_port_wait_eh(ap); 7242 7243 /* EH is now guaranteed to see UNLOADING - EH context belongs 7244 * to us. Disable all existing devices. 7245 */ 7246 ata_port_for_each_link(link, ap) { 7247 ata_link_for_each_dev(dev, link) 7248 ata_dev_disable(dev); 7249 } 7250 7251 /* Final freeze & EH. All in-flight commands are aborted. EH 7252 * will be skipped and retrials will be terminated with bad 7253 * target. 7254 */ 7255 spin_lock_irqsave(ap->lock, flags); 7256 ata_port_freeze(ap); /* won't be thawed */ 7257 spin_unlock_irqrestore(ap->lock, flags); 7258 7259 ata_port_wait_eh(ap); 7260 cancel_rearming_delayed_work(&ap->hotplug_task); 7261 7262 skip_eh: 7263 /* remove the associated SCSI host */ 7264 scsi_remove_host(ap->scsi_host); 7265} 7266 7267/** 7268 * ata_host_detach - Detach all ports of an ATA host 7269 * @host: Host to detach 7270 * 7271 * Detach all ports of @host. 7272 * 7273 * LOCKING: 7274 * Kernel thread context (may sleep). 7275 */ 7276void ata_host_detach(struct ata_host *host) 7277{ 7278 int i; 7279 7280 for (i = 0; i < host->n_ports; i++) 7281 ata_port_detach(host->ports[i]); 7282 7283 /* the host is dead now, dissociate ACPI */ 7284 ata_acpi_dissociate(host); 7285} 7286 7287/** 7288 * ata_std_ports - initialize ioaddr with standard port offsets. 7289 * @ioaddr: IO address structure to be initialized 7290 * 7291 * Utility function which initializes data_addr, error_addr, 7292 * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr, 7293 * device_addr, status_addr, and command_addr to standard offsets 7294 * relative to cmd_addr. 7295 * 7296 * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr. 7297 */ 7298 7299void ata_std_ports(struct ata_ioports *ioaddr) 7300{ 7301 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA; 7302 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR; 7303 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE; 7304 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT; 7305 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL; 7306 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM; 7307 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH; 7308 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE; 7309 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS; 7310 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD; 7311} 7312 7313 7314#ifdef CONFIG_PCI 7315 7316/** 7317 * ata_pci_remove_one - PCI layer callback for device removal 7318 * @pdev: PCI device that was removed 7319 * 7320 * PCI layer indicates to libata via this hook that hot-unplug or 7321 * module unload event has occurred. Detach all ports. Resource 7322 * release is handled via devres. 7323 * 7324 * LOCKING: 7325 * Inherited from PCI layer (may sleep). 7326 */ 7327void ata_pci_remove_one(struct pci_dev *pdev) 7328{ 7329 struct device *dev = &pdev->dev; 7330 struct ata_host *host = dev_get_drvdata(dev); 7331 7332 ata_host_detach(host); 7333} 7334 7335/* move to PCI subsystem */ 7336int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits) 7337{ 7338 unsigned long tmp = 0; 7339 7340 switch (bits->width) { 7341 case 1: { 7342 u8 tmp8 = 0; 7343 pci_read_config_byte(pdev, bits->reg, &tmp8); 7344 tmp = tmp8; 7345 break; 7346 } 7347 case 2: { 7348 u16 tmp16 = 0; 7349 pci_read_config_word(pdev, bits->reg, &tmp16); 7350 tmp = tmp16; 7351 break; 7352 } 7353 case 4: { 7354 u32 tmp32 = 0; 7355 pci_read_config_dword(pdev, bits->reg, &tmp32); 7356 tmp = tmp32; 7357 break; 7358 } 7359 7360 default: 7361 return -EINVAL; 7362 } 7363 7364 tmp &= bits->mask; 7365 7366 return (tmp == bits->val) ? 1 : 0; 7367} 7368 7369#ifdef CONFIG_PM 7370void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg) 7371{ 7372 pci_save_state(pdev); 7373 pci_disable_device(pdev); 7374 7375 if (mesg.event & PM_EVENT_SLEEP) 7376 pci_set_power_state(pdev, PCI_D3hot); 7377} 7378 7379int ata_pci_device_do_resume(struct pci_dev *pdev) 7380{ 7381 int rc; 7382 7383 pci_set_power_state(pdev, PCI_D0); 7384 pci_restore_state(pdev); 7385 7386 rc = pcim_enable_device(pdev); 7387 if (rc) { 7388 dev_printk(KERN_ERR, &pdev->dev, 7389 "failed to enable device after resume (%d)\n", rc); 7390 return rc; 7391 } 7392 7393 pci_set_master(pdev); 7394 return 0; 7395} 7396 7397int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg) 7398{ 7399 struct ata_host *host = dev_get_drvdata(&pdev->dev); 7400 int rc = 0; 7401 7402 rc = ata_host_suspend(host, mesg); 7403 if (rc) 7404 return rc; 7405 7406 ata_pci_device_do_suspend(pdev, mesg); 7407 7408 return 0; 7409} 7410 7411int ata_pci_device_resume(struct pci_dev *pdev) 7412{ 7413 struct ata_host *host = dev_get_drvdata(&pdev->dev); 7414 int rc; 7415 7416 rc = ata_pci_device_do_resume(pdev); 7417 if (rc == 0) 7418 ata_host_resume(host); 7419 return rc; 7420} 7421#endif /* CONFIG_PM */ 7422 7423#endif /* CONFIG_PCI */ 7424 7425static int __init ata_parse_force_one(char **cur, 7426 struct ata_force_ent *force_ent, 7427 const char **reason) 7428{ 7429 /* FIXME: Currently, there's no way to tag init const data and 7430 * using __initdata causes build failure on some versions of 7431 * gcc. Once __initdataconst is implemented, add const to the 7432 * following structure. 7433 */ 7434 static struct ata_force_param force_tbl[] __initdata = { 7435 { "40c", .cbl = ATA_CBL_PATA40 }, 7436 { "80c", .cbl = ATA_CBL_PATA80 }, 7437 { "short40c", .cbl = ATA_CBL_PATA40_SHORT }, 7438 { "unk", .cbl = ATA_CBL_PATA_UNK }, 7439 { "ign", .cbl = ATA_CBL_PATA_IGN }, 7440 { "sata", .cbl = ATA_CBL_SATA }, 7441 { "1.5Gbps", .spd_limit = 1 }, 7442 { "3.0Gbps", .spd_limit = 2 }, 7443 { "noncq", .horkage_on = ATA_HORKAGE_NONCQ }, 7444 { "ncq", .horkage_off = ATA_HORKAGE_NONCQ }, 7445 { "pio0", .xfer_mask = 1 << (ATA_SHIFT_PIO + 0) }, 7446 { "pio1", .xfer_mask = 1 << (ATA_SHIFT_PIO + 1) }, 7447 { "pio2", .xfer_mask = 1 << (ATA_SHIFT_PIO + 2) }, 7448 { "pio3", .xfer_mask = 1 << (ATA_SHIFT_PIO + 3) }, 7449 { "pio4", .xfer_mask = 1 << (ATA_SHIFT_PIO + 4) }, 7450 { "pio5", .xfer_mask = 1 << (ATA_SHIFT_PIO + 5) }, 7451 { "pio6", .xfer_mask = 1 << (ATA_SHIFT_PIO + 6) }, 7452 { "mwdma0", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 0) }, 7453 { "mwdma1", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 1) }, 7454 { "mwdma2", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 2) }, 7455 { "mwdma3", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 3) }, 7456 { "mwdma4", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 4) }, 7457 { "udma0", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) }, 7458 { "udma16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) }, 7459 { "udma/16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) }, 7460 { "udma1", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) }, 7461 { "udma25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) }, 7462 { "udma/25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) }, 7463 { "udma2", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) }, 7464 { "udma33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) }, 7465 { "udma/33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) }, 7466 { "udma3", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) }, 7467 { "udma44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) }, 7468 { "udma/44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) }, 7469 { "udma4", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) }, 7470 { "udma66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) }, 7471 { "udma/66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) }, 7472 { "udma5", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) }, 7473 { "udma100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) }, 7474 { "udma/100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) }, 7475 { "udma6", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) }, 7476 { "udma133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) }, 7477 { "udma/133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) }, 7478 { "udma7", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 7) }, 7479 }; 7480 char *start = *cur, *p = *cur; 7481 char *id, *val, *endp; 7482 const struct ata_force_param *match_fp = NULL; 7483 int nr_matches = 0, i; 7484 7485 /* find where this param ends and update *cur */ 7486 while (*p != '\0' && *p != ',') 7487 p++; 7488 7489 if (*p == '\0') 7490 *cur = p; 7491 else 7492 *cur = p + 1; 7493 7494 *p = '\0'; 7495 7496 /* parse */ 7497 p = strchr(start, ':'); 7498 if (!p) { 7499 val = strstrip(start); 7500 goto parse_val; 7501 } 7502 *p = '\0'; 7503 7504 id = strstrip(start); 7505 val = strstrip(p + 1); 7506 7507 /* parse id */ 7508 p = strchr(id, '.'); 7509 if (p) { 7510 *p++ = '\0'; 7511 force_ent->device = simple_strtoul(p, &endp, 10); 7512 if (p == endp || *endp != '\0') { 7513 *reason = "invalid device"; 7514 return -EINVAL; 7515 } 7516 } 7517 7518 force_ent->port = simple_strtoul(id, &endp, 10); 7519 if (p == endp || *endp != '\0') { 7520 *reason = "invalid port/link"; 7521 return -EINVAL; 7522 } 7523 7524 parse_val: 7525 /* parse val, allow shortcuts so that both 1.5 and 1.5Gbps work */ 7526 for (i = 0; i < ARRAY_SIZE(force_tbl); i++) { 7527 const struct ata_force_param *fp = &force_tbl[i]; 7528 7529 if (strncasecmp(val, fp->name, strlen(val))) 7530 continue; 7531 7532 nr_matches++; 7533 match_fp = fp; 7534 7535 if (strcasecmp(val, fp->name) == 0) { 7536 nr_matches = 1; 7537 break; 7538 } 7539 } 7540 7541 if (!nr_matches) { 7542 *reason = "unknown value"; 7543 return -EINVAL; 7544 } 7545 if (nr_matches > 1) { 7546 *reason = "ambigious value"; 7547 return -EINVAL; 7548 } 7549 7550 force_ent->param = *match_fp; 7551 7552 return 0; 7553} 7554 7555static void __init ata_parse_force_param(void) 7556{ 7557 int idx = 0, size = 1; 7558 int last_port = -1, last_device = -1; 7559 char *p, *cur, *next; 7560 7561 /* calculate maximum number of params and allocate force_tbl */ 7562 for (p = ata_force_param_buf; *p; p++) 7563 if (*p == ',') 7564 size++; 7565 7566 ata_force_tbl = kzalloc(sizeof(ata_force_tbl[0]) * size, GFP_KERNEL); 7567 if (!ata_force_tbl) { 7568 printk(KERN_WARNING "ata: failed to extend force table, " 7569 "libata.force ignored\n"); 7570 return; 7571 } 7572 7573 /* parse and populate the table */ 7574 for (cur = ata_force_param_buf; *cur != '\0'; cur = next) { 7575 const char *reason = ""; 7576 struct ata_force_ent te = { .port = -1, .device = -1 }; 7577 7578 next = cur; 7579 if (ata_parse_force_one(&next, &te, &reason)) { 7580 printk(KERN_WARNING "ata: failed to parse force " 7581 "parameter \"%s\" (%s)\n", 7582 cur, reason); 7583 continue; 7584 } 7585 7586 if (te.port == -1) { 7587 te.port = last_port; 7588 te.device = last_device; 7589 } 7590 7591 ata_force_tbl[idx++] = te; 7592 7593 last_port = te.port; 7594 last_device = te.device; 7595 } 7596 7597 ata_force_tbl_size = idx; 7598} 7599 7600static int __init ata_init(void) 7601{ 7602 ata_probe_timeout *= HZ; 7603 7604 ata_parse_force_param(); 7605 7606 ata_wq = create_workqueue("ata"); 7607 if (!ata_wq) 7608 return -ENOMEM; 7609 7610 ata_aux_wq = create_singlethread_workqueue("ata_aux"); 7611 if (!ata_aux_wq) { 7612 destroy_workqueue(ata_wq); 7613 return -ENOMEM; 7614 } 7615 7616 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n"); 7617 return 0; 7618} 7619 7620static void __exit ata_exit(void) 7621{ 7622 kfree(ata_force_tbl); 7623 destroy_workqueue(ata_wq); 7624 destroy_workqueue(ata_aux_wq); 7625} 7626 7627subsys_initcall(ata_init); 7628module_exit(ata_exit); 7629 7630static unsigned long ratelimit_time; 7631static DEFINE_SPINLOCK(ata_ratelimit_lock); 7632 7633int ata_ratelimit(void) 7634{ 7635 int rc; 7636 unsigned long flags; 7637 7638 spin_lock_irqsave(&ata_ratelimit_lock, flags); 7639 7640 if (time_after(jiffies, ratelimit_time)) { 7641 rc = 1; 7642 ratelimit_time = jiffies + (HZ/5); 7643 } else 7644 rc = 0; 7645 7646 spin_unlock_irqrestore(&ata_ratelimit_lock, flags); 7647 7648 return rc; 7649} 7650 7651/** 7652 * ata_wait_register - wait until register value changes 7653 * @reg: IO-mapped register 7654 * @mask: Mask to apply to read register value 7655 * @val: Wait condition 7656 * @interval_msec: polling interval in milliseconds 7657 * @timeout_msec: timeout in milliseconds 7658 * 7659 * Waiting for some bits of register to change is a common 7660 * operation for ATA controllers. This function reads 32bit LE 7661 * IO-mapped register @reg and tests for the following condition. 7662 * 7663 * (*@reg & mask) != val 7664 * 7665 * If the condition is met, it returns; otherwise, the process is 7666 * repeated after @interval_msec until timeout. 7667 * 7668 * LOCKING: 7669 * Kernel thread context (may sleep) 7670 * 7671 * RETURNS: 7672 * The final register value. 7673 */ 7674u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val, 7675 unsigned long interval_msec, 7676 unsigned long timeout_msec) 7677{ 7678 unsigned long timeout; 7679 u32 tmp; 7680 7681 tmp = ioread32(reg); 7682 7683 /* Calculate timeout _after_ the first read to make sure 7684 * preceding writes reach the controller before starting to 7685 * eat away the timeout. 7686 */ 7687 timeout = jiffies + (timeout_msec * HZ) / 1000; 7688 7689 while ((tmp & mask) == val && time_before(jiffies, timeout)) { 7690 msleep(interval_msec); 7691 tmp = ioread32(reg); 7692 } 7693 7694 return tmp; 7695} 7696 7697/* 7698 * Dummy port_ops 7699 */ 7700static void ata_dummy_noret(struct ata_port *ap) { } 7701static int ata_dummy_ret0(struct ata_port *ap) { return 0; } 7702static void ata_dummy_qc_noret(struct ata_queued_cmd *qc) { } 7703 7704static u8 ata_dummy_check_status(struct ata_port *ap) 7705{ 7706 return ATA_DRDY; 7707} 7708 7709static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc) 7710{ 7711 return AC_ERR_SYSTEM; 7712} 7713 7714const struct ata_port_operations ata_dummy_port_ops = { 7715 .check_status = ata_dummy_check_status, 7716 .check_altstatus = ata_dummy_check_status, 7717 .dev_select = ata_noop_dev_select, 7718 .qc_prep = ata_noop_qc_prep, 7719 .qc_issue = ata_dummy_qc_issue, 7720 .freeze = ata_dummy_noret, 7721 .thaw = ata_dummy_noret, 7722 .error_handler = ata_dummy_noret, 7723 .post_internal_cmd = ata_dummy_qc_noret, 7724 .irq_clear = ata_dummy_noret, 7725 .port_start = ata_dummy_ret0, 7726 .port_stop = ata_dummy_noret, 7727}; 7728 7729const struct ata_port_info ata_dummy_port_info = { 7730 .port_ops = &ata_dummy_port_ops, 7731}; 7732 7733/* 7734 * libata is essentially a library of internal helper functions for 7735 * low-level ATA host controller drivers. As such, the API/ABI is 7736 * likely to change as new drivers are added and updated. 7737 * Do not depend on ABI/API stability. 7738 */ 7739EXPORT_SYMBOL_GPL(sata_deb_timing_normal); 7740EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug); 7741EXPORT_SYMBOL_GPL(sata_deb_timing_long); 7742EXPORT_SYMBOL_GPL(ata_dummy_port_ops); 7743EXPORT_SYMBOL_GPL(ata_dummy_port_info); 7744EXPORT_SYMBOL_GPL(ata_std_bios_param); 7745EXPORT_SYMBOL_GPL(ata_std_ports); 7746EXPORT_SYMBOL_GPL(ata_host_init); 7747EXPORT_SYMBOL_GPL(ata_host_alloc); 7748EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo); 7749EXPORT_SYMBOL_GPL(ata_host_start); 7750EXPORT_SYMBOL_GPL(ata_host_register); 7751EXPORT_SYMBOL_GPL(ata_host_activate); 7752EXPORT_SYMBOL_GPL(ata_host_detach); 7753EXPORT_SYMBOL_GPL(ata_sg_init); 7754EXPORT_SYMBOL_GPL(ata_hsm_move); 7755EXPORT_SYMBOL_GPL(ata_qc_complete); 7756EXPORT_SYMBOL_GPL(ata_qc_complete_multiple); 7757EXPORT_SYMBOL_GPL(ata_qc_issue_prot); 7758EXPORT_SYMBOL_GPL(ata_tf_load); 7759EXPORT_SYMBOL_GPL(ata_tf_read); 7760EXPORT_SYMBOL_GPL(ata_noop_dev_select); 7761EXPORT_SYMBOL_GPL(ata_std_dev_select); 7762EXPORT_SYMBOL_GPL(sata_print_link_status); 7763EXPORT_SYMBOL_GPL(ata_tf_to_fis); 7764EXPORT_SYMBOL_GPL(ata_tf_from_fis); 7765EXPORT_SYMBOL_GPL(ata_pack_xfermask); 7766EXPORT_SYMBOL_GPL(ata_unpack_xfermask); 7767EXPORT_SYMBOL_GPL(ata_xfer_mask2mode); 7768EXPORT_SYMBOL_GPL(ata_xfer_mode2mask); 7769EXPORT_SYMBOL_GPL(ata_xfer_mode2shift); 7770EXPORT_SYMBOL_GPL(ata_mode_string); 7771EXPORT_SYMBOL_GPL(ata_id_xfermask); 7772EXPORT_SYMBOL_GPL(ata_check_status); 7773EXPORT_SYMBOL_GPL(ata_altstatus); 7774EXPORT_SYMBOL_GPL(ata_exec_command); 7775EXPORT_SYMBOL_GPL(ata_port_start); 7776EXPORT_SYMBOL_GPL(ata_sff_port_start); 7777EXPORT_SYMBOL_GPL(ata_interrupt); 7778EXPORT_SYMBOL_GPL(ata_do_set_mode); 7779EXPORT_SYMBOL_GPL(ata_data_xfer); 7780EXPORT_SYMBOL_GPL(ata_data_xfer_noirq); 7781EXPORT_SYMBOL_GPL(ata_std_qc_defer); 7782EXPORT_SYMBOL_GPL(ata_qc_prep); 7783EXPORT_SYMBOL_GPL(ata_dumb_qc_prep); 7784EXPORT_SYMBOL_GPL(ata_noop_qc_prep); 7785EXPORT_SYMBOL_GPL(ata_bmdma_setup); 7786EXPORT_SYMBOL_GPL(ata_bmdma_start); 7787EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear); 7788EXPORT_SYMBOL_GPL(ata_bmdma_status); 7789EXPORT_SYMBOL_GPL(ata_bmdma_stop); 7790EXPORT_SYMBOL_GPL(ata_bmdma_freeze); 7791EXPORT_SYMBOL_GPL(ata_bmdma_thaw); 7792EXPORT_SYMBOL_GPL(ata_bmdma_drive_eh); 7793EXPORT_SYMBOL_GPL(ata_bmdma_error_handler); 7794EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd); 7795EXPORT_SYMBOL_GPL(ata_port_probe); 7796EXPORT_SYMBOL_GPL(ata_dev_disable); 7797EXPORT_SYMBOL_GPL(sata_set_spd); 7798EXPORT_SYMBOL_GPL(sata_link_debounce); 7799EXPORT_SYMBOL_GPL(sata_link_resume); 7800EXPORT_SYMBOL_GPL(ata_bus_reset); 7801EXPORT_SYMBOL_GPL(ata_std_prereset); 7802EXPORT_SYMBOL_GPL(ata_std_softreset); 7803EXPORT_SYMBOL_GPL(sata_link_hardreset); 7804EXPORT_SYMBOL_GPL(sata_std_hardreset); 7805EXPORT_SYMBOL_GPL(ata_std_postreset); 7806EXPORT_SYMBOL_GPL(ata_dev_classify); 7807EXPORT_SYMBOL_GPL(ata_dev_pair); 7808EXPORT_SYMBOL_GPL(ata_port_disable); 7809EXPORT_SYMBOL_GPL(ata_ratelimit); 7810EXPORT_SYMBOL_GPL(ata_wait_register); 7811EXPORT_SYMBOL_GPL(ata_busy_sleep); 7812EXPORT_SYMBOL_GPL(ata_wait_after_reset); 7813EXPORT_SYMBOL_GPL(ata_wait_ready); 7814EXPORT_SYMBOL_GPL(ata_scsi_ioctl); 7815EXPORT_SYMBOL_GPL(ata_scsi_queuecmd); 7816EXPORT_SYMBOL_GPL(ata_scsi_slave_config); 7817EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy); 7818EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth); 7819EXPORT_SYMBOL_GPL(ata_host_intr); 7820EXPORT_SYMBOL_GPL(sata_scr_valid); 7821EXPORT_SYMBOL_GPL(sata_scr_read); 7822EXPORT_SYMBOL_GPL(sata_scr_write); 7823EXPORT_SYMBOL_GPL(sata_scr_write_flush); 7824EXPORT_SYMBOL_GPL(ata_link_online); 7825EXPORT_SYMBOL_GPL(ata_link_offline); 7826#ifdef CONFIG_PM 7827EXPORT_SYMBOL_GPL(ata_host_suspend); 7828EXPORT_SYMBOL_GPL(ata_host_resume); 7829#endif /* CONFIG_PM */ 7830EXPORT_SYMBOL_GPL(ata_id_string); 7831EXPORT_SYMBOL_GPL(ata_id_c_string); 7832EXPORT_SYMBOL_GPL(ata_scsi_simulate); 7833 7834EXPORT_SYMBOL_GPL(ata_pio_need_iordy); 7835EXPORT_SYMBOL_GPL(ata_timing_find_mode); 7836EXPORT_SYMBOL_GPL(ata_timing_compute); 7837EXPORT_SYMBOL_GPL(ata_timing_merge); 7838EXPORT_SYMBOL_GPL(ata_timing_cycle2mode); 7839 7840#ifdef CONFIG_PCI 7841EXPORT_SYMBOL_GPL(pci_test_config_bits); 7842EXPORT_SYMBOL_GPL(ata_pci_init_sff_host); 7843EXPORT_SYMBOL_GPL(ata_pci_init_bmdma); 7844EXPORT_SYMBOL_GPL(ata_pci_prepare_sff_host); 7845EXPORT_SYMBOL_GPL(ata_pci_activate_sff_host); 7846EXPORT_SYMBOL_GPL(ata_pci_init_one); 7847EXPORT_SYMBOL_GPL(ata_pci_remove_one); 7848#ifdef CONFIG_PM 7849EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend); 7850EXPORT_SYMBOL_GPL(ata_pci_device_do_resume); 7851EXPORT_SYMBOL_GPL(ata_pci_device_suspend); 7852EXPORT_SYMBOL_GPL(ata_pci_device_resume); 7853#endif /* CONFIG_PM */ 7854EXPORT_SYMBOL_GPL(ata_pci_default_filter); 7855EXPORT_SYMBOL_GPL(ata_pci_clear_simplex); 7856#endif /* CONFIG_PCI */ 7857 7858EXPORT_SYMBOL_GPL(sata_pmp_qc_defer_cmd_switch); 7859EXPORT_SYMBOL_GPL(sata_pmp_std_prereset); 7860EXPORT_SYMBOL_GPL(sata_pmp_std_hardreset); 7861EXPORT_SYMBOL_GPL(sata_pmp_std_postreset); 7862EXPORT_SYMBOL_GPL(sata_pmp_do_eh); 7863 7864EXPORT_SYMBOL_GPL(__ata_ehi_push_desc); 7865EXPORT_SYMBOL_GPL(ata_ehi_push_desc); 7866EXPORT_SYMBOL_GPL(ata_ehi_clear_desc); 7867EXPORT_SYMBOL_GPL(ata_port_desc); 7868#ifdef CONFIG_PCI 7869EXPORT_SYMBOL_GPL(ata_port_pbar_desc); 7870#endif /* CONFIG_PCI */ 7871EXPORT_SYMBOL_GPL(ata_port_schedule_eh); 7872EXPORT_SYMBOL_GPL(ata_link_abort); 7873EXPORT_SYMBOL_GPL(ata_port_abort); 7874EXPORT_SYMBOL_GPL(ata_port_freeze); 7875EXPORT_SYMBOL_GPL(sata_async_notification); 7876EXPORT_SYMBOL_GPL(ata_eh_freeze_port); 7877EXPORT_SYMBOL_GPL(ata_eh_thaw_port); 7878EXPORT_SYMBOL_GPL(ata_eh_qc_complete); 7879EXPORT_SYMBOL_GPL(ata_eh_qc_retry); 7880EXPORT_SYMBOL_GPL(ata_do_eh); 7881EXPORT_SYMBOL_GPL(ata_irq_on); 7882EXPORT_SYMBOL_GPL(ata_dev_try_classify); 7883 7884EXPORT_SYMBOL_GPL(ata_cable_40wire); 7885EXPORT_SYMBOL_GPL(ata_cable_80wire); 7886EXPORT_SYMBOL_GPL(ata_cable_unknown); 7887EXPORT_SYMBOL_GPL(ata_cable_ignore); 7888EXPORT_SYMBOL_GPL(ata_cable_sata);