at v2.6.18 37 kB view raw
1/* 2 * Copyright 2003-2005 Red Hat, Inc. All rights reserved. 3 * Copyright 2003-2005 Jeff Garzik 4 * 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2, or (at your option) 9 * any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; see the file COPYING. If not, write to 18 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. 19 * 20 * 21 * libata documentation is available via 'make {ps|pdf}docs', 22 * as Documentation/DocBook/libata.* 23 * 24 */ 25 26#ifndef __LINUX_LIBATA_H__ 27#define __LINUX_LIBATA_H__ 28 29#include <linux/delay.h> 30#include <linux/interrupt.h> 31#include <linux/pci.h> 32#include <linux/dma-mapping.h> 33#include <asm/scatterlist.h> 34#include <asm/io.h> 35#include <linux/ata.h> 36#include <linux/workqueue.h> 37#include <scsi/scsi_host.h> 38 39/* 40 * compile-time options: to be removed as soon as all the drivers are 41 * converted to the new debugging mechanism 42 */ 43#undef ATA_DEBUG /* debugging output */ 44#undef ATA_VERBOSE_DEBUG /* yet more debugging output */ 45#undef ATA_IRQ_TRAP /* define to ack screaming irqs */ 46#undef ATA_NDEBUG /* define to disable quick runtime checks */ 47#undef ATA_ENABLE_PATA /* define to enable PATA support in some 48 * low-level drivers */ 49 50 51/* note: prints function name for you */ 52#ifdef ATA_DEBUG 53#define DPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __FUNCTION__, ## args) 54#ifdef ATA_VERBOSE_DEBUG 55#define VPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __FUNCTION__, ## args) 56#else 57#define VPRINTK(fmt, args...) 58#endif /* ATA_VERBOSE_DEBUG */ 59#else 60#define DPRINTK(fmt, args...) 61#define VPRINTK(fmt, args...) 62#endif /* ATA_DEBUG */ 63 64#define BPRINTK(fmt, args...) if (ap->flags & ATA_FLAG_DEBUGMSG) printk(KERN_ERR "%s: " fmt, __FUNCTION__, ## args) 65 66/* NEW: debug levels */ 67#define HAVE_LIBATA_MSG 1 68 69enum { 70 ATA_MSG_DRV = 0x0001, 71 ATA_MSG_INFO = 0x0002, 72 ATA_MSG_PROBE = 0x0004, 73 ATA_MSG_WARN = 0x0008, 74 ATA_MSG_MALLOC = 0x0010, 75 ATA_MSG_CTL = 0x0020, 76 ATA_MSG_INTR = 0x0040, 77 ATA_MSG_ERR = 0x0080, 78}; 79 80#define ata_msg_drv(p) ((p)->msg_enable & ATA_MSG_DRV) 81#define ata_msg_info(p) ((p)->msg_enable & ATA_MSG_INFO) 82#define ata_msg_probe(p) ((p)->msg_enable & ATA_MSG_PROBE) 83#define ata_msg_warn(p) ((p)->msg_enable & ATA_MSG_WARN) 84#define ata_msg_malloc(p) ((p)->msg_enable & ATA_MSG_MALLOC) 85#define ata_msg_ctl(p) ((p)->msg_enable & ATA_MSG_CTL) 86#define ata_msg_intr(p) ((p)->msg_enable & ATA_MSG_INTR) 87#define ata_msg_err(p) ((p)->msg_enable & ATA_MSG_ERR) 88 89static inline u32 ata_msg_init(int dval, int default_msg_enable_bits) 90{ 91 if (dval < 0 || dval >= (sizeof(u32) * 8)) 92 return default_msg_enable_bits; /* should be 0x1 - only driver info msgs */ 93 if (!dval) 94 return 0; 95 return (1 << dval) - 1; 96} 97 98/* defines only for the constants which don't work well as enums */ 99#define ATA_TAG_POISON 0xfafbfcfdU 100 101/* move to PCI layer? */ 102static inline struct device *pci_dev_to_dev(struct pci_dev *pdev) 103{ 104 return &pdev->dev; 105} 106 107enum { 108 /* various global constants */ 109 LIBATA_MAX_PRD = ATA_MAX_PRD / 2, 110 ATA_MAX_PORTS = 8, 111 ATA_DEF_QUEUE = 1, 112 /* tag ATA_MAX_QUEUE - 1 is reserved for internal commands */ 113 ATA_MAX_QUEUE = 32, 114 ATA_TAG_INTERNAL = ATA_MAX_QUEUE - 1, 115 ATA_MAX_SECTORS = 200, /* FIXME */ 116 ATA_MAX_SECTORS_LBA48 = 65535, 117 ATA_MAX_BUS = 2, 118 ATA_DEF_BUSY_WAIT = 10000, 119 ATA_SHORT_PAUSE = (HZ >> 6) + 1, 120 121 ATA_SHT_EMULATED = 1, 122 ATA_SHT_CMD_PER_LUN = 1, 123 ATA_SHT_THIS_ID = -1, 124 ATA_SHT_USE_CLUSTERING = 1, 125 126 /* struct ata_device stuff */ 127 ATA_DFLAG_LBA = (1 << 0), /* device supports LBA */ 128 ATA_DFLAG_LBA48 = (1 << 1), /* device supports LBA48 */ 129 ATA_DFLAG_CDB_INTR = (1 << 2), /* device asserts INTRQ when ready for CDB */ 130 ATA_DFLAG_NCQ = (1 << 3), /* device supports NCQ */ 131 ATA_DFLAG_CFG_MASK = (1 << 8) - 1, 132 133 ATA_DFLAG_PIO = (1 << 8), /* device currently in PIO mode */ 134 ATA_DFLAG_SUSPENDED = (1 << 9), /* device suspended */ 135 ATA_DFLAG_INIT_MASK = (1 << 16) - 1, 136 137 ATA_DFLAG_DETACH = (1 << 16), 138 ATA_DFLAG_DETACHED = (1 << 17), 139 140 ATA_DEV_UNKNOWN = 0, /* unknown device */ 141 ATA_DEV_ATA = 1, /* ATA device */ 142 ATA_DEV_ATA_UNSUP = 2, /* ATA device (unsupported) */ 143 ATA_DEV_ATAPI = 3, /* ATAPI device */ 144 ATA_DEV_ATAPI_UNSUP = 4, /* ATAPI device (unsupported) */ 145 ATA_DEV_NONE = 5, /* no device */ 146 147 /* struct ata_port flags */ 148 ATA_FLAG_SLAVE_POSS = (1 << 0), /* host supports slave dev */ 149 /* (doesn't imply presence) */ 150 ATA_FLAG_SATA = (1 << 1), 151 ATA_FLAG_NO_LEGACY = (1 << 2), /* no legacy mode check */ 152 ATA_FLAG_MMIO = (1 << 3), /* use MMIO, not PIO */ 153 ATA_FLAG_SRST = (1 << 4), /* (obsolete) use ATA SRST, not E.D.D. */ 154 ATA_FLAG_SATA_RESET = (1 << 5), /* (obsolete) use COMRESET */ 155 ATA_FLAG_NO_ATAPI = (1 << 6), /* No ATAPI support */ 156 ATA_FLAG_PIO_DMA = (1 << 7), /* PIO cmds via DMA */ 157 ATA_FLAG_PIO_LBA48 = (1 << 8), /* Host DMA engine is LBA28 only */ 158 ATA_FLAG_PIO_POLLING = (1 << 9), /* use polling PIO if LLD 159 * doesn't handle PIO interrupts */ 160 ATA_FLAG_NCQ = (1 << 10), /* host supports NCQ */ 161 ATA_FLAG_HRST_TO_RESUME = (1 << 11), /* hardreset to resume phy */ 162 ATA_FLAG_SKIP_D2H_BSY = (1 << 12), /* can't wait for the first D2H 163 * Register FIS clearing BSY */ 164 ATA_FLAG_DEBUGMSG = (1 << 13), 165 166 /* The following flag belongs to ap->pflags but is kept in 167 * ap->flags because it's referenced in many LLDs and will be 168 * removed in not-too-distant future. 169 */ 170 ATA_FLAG_DISABLED = (1 << 23), /* port is disabled, ignore it */ 171 172 /* bits 24:31 of ap->flags are reserved for LLD specific flags */ 173 174 /* struct ata_port pflags */ 175 ATA_PFLAG_EH_PENDING = (1 << 0), /* EH pending */ 176 ATA_PFLAG_EH_IN_PROGRESS = (1 << 1), /* EH in progress */ 177 ATA_PFLAG_FROZEN = (1 << 2), /* port is frozen */ 178 ATA_PFLAG_RECOVERED = (1 << 3), /* recovery action performed */ 179 ATA_PFLAG_LOADING = (1 << 4), /* boot/loading probe */ 180 ATA_PFLAG_UNLOADING = (1 << 5), /* module is unloading */ 181 ATA_PFLAG_SCSI_HOTPLUG = (1 << 6), /* SCSI hotplug scheduled */ 182 183 ATA_PFLAG_FLUSH_PORT_TASK = (1 << 16), /* flush port task */ 184 ATA_PFLAG_SUSPENDED = (1 << 17), /* port is suspended (power) */ 185 ATA_PFLAG_PM_PENDING = (1 << 18), /* PM operation pending */ 186 187 /* struct ata_queued_cmd flags */ 188 ATA_QCFLAG_ACTIVE = (1 << 0), /* cmd not yet ack'd to scsi lyer */ 189 ATA_QCFLAG_SG = (1 << 1), /* have s/g table? */ 190 ATA_QCFLAG_SINGLE = (1 << 2), /* no s/g, just a single buffer */ 191 ATA_QCFLAG_DMAMAP = ATA_QCFLAG_SG | ATA_QCFLAG_SINGLE, 192 ATA_QCFLAG_IO = (1 << 3), /* standard IO command */ 193 ATA_QCFLAG_RESULT_TF = (1 << 4), /* result TF requested */ 194 195 ATA_QCFLAG_FAILED = (1 << 16), /* cmd failed and is owned by EH */ 196 ATA_QCFLAG_SENSE_VALID = (1 << 17), /* sense data valid */ 197 ATA_QCFLAG_EH_SCHEDULED = (1 << 18), /* EH scheduled (obsolete) */ 198 199 /* host set flags */ 200 ATA_HOST_SIMPLEX = (1 << 0), /* Host is simplex, one DMA channel per host_set only */ 201 202 /* various lengths of time */ 203 ATA_TMOUT_BOOT = 30 * HZ, /* heuristic */ 204 ATA_TMOUT_BOOT_QUICK = 7 * HZ, /* heuristic */ 205 ATA_TMOUT_INTERNAL = 30 * HZ, 206 ATA_TMOUT_INTERNAL_QUICK = 5 * HZ, 207 208 /* ATA bus states */ 209 BUS_UNKNOWN = 0, 210 BUS_DMA = 1, 211 BUS_IDLE = 2, 212 BUS_NOINTR = 3, 213 BUS_NODATA = 4, 214 BUS_TIMER = 5, 215 BUS_PIO = 6, 216 BUS_EDD = 7, 217 BUS_IDENTIFY = 8, 218 BUS_PACKET = 9, 219 220 /* SATA port states */ 221 PORT_UNKNOWN = 0, 222 PORT_ENABLED = 1, 223 PORT_DISABLED = 2, 224 225 /* encoding various smaller bitmaps into a single 226 * unsigned int bitmap 227 */ 228 ATA_BITS_PIO = 5, 229 ATA_BITS_MWDMA = 3, 230 ATA_BITS_UDMA = 8, 231 232 ATA_SHIFT_PIO = 0, 233 ATA_SHIFT_MWDMA = ATA_SHIFT_PIO + ATA_BITS_PIO, 234 ATA_SHIFT_UDMA = ATA_SHIFT_MWDMA + ATA_BITS_MWDMA, 235 236 ATA_MASK_PIO = ((1 << ATA_BITS_PIO) - 1) << ATA_SHIFT_PIO, 237 ATA_MASK_MWDMA = ((1 << ATA_BITS_MWDMA) - 1) << ATA_SHIFT_MWDMA, 238 ATA_MASK_UDMA = ((1 << ATA_BITS_UDMA) - 1) << ATA_SHIFT_UDMA, 239 240 /* size of buffer to pad xfers ending on unaligned boundaries */ 241 ATA_DMA_PAD_SZ = 4, 242 ATA_DMA_PAD_BUF_SZ = ATA_DMA_PAD_SZ * ATA_MAX_QUEUE, 243 244 /* masks for port functions */ 245 ATA_PORT_PRIMARY = (1 << 0), 246 ATA_PORT_SECONDARY = (1 << 1), 247 248 /* ering size */ 249 ATA_ERING_SIZE = 32, 250 251 /* desc_len for ata_eh_info and context */ 252 ATA_EH_DESC_LEN = 80, 253 254 /* reset / recovery action types */ 255 ATA_EH_REVALIDATE = (1 << 0), 256 ATA_EH_SOFTRESET = (1 << 1), 257 ATA_EH_HARDRESET = (1 << 2), 258 ATA_EH_SUSPEND = (1 << 3), 259 ATA_EH_RESUME = (1 << 4), 260 ATA_EH_PM_FREEZE = (1 << 5), 261 262 ATA_EH_RESET_MASK = ATA_EH_SOFTRESET | ATA_EH_HARDRESET, 263 ATA_EH_PERDEV_MASK = ATA_EH_REVALIDATE | ATA_EH_SUSPEND | 264 ATA_EH_RESUME | ATA_EH_PM_FREEZE, 265 266 /* ata_eh_info->flags */ 267 ATA_EHI_HOTPLUGGED = (1 << 0), /* could have been hotplugged */ 268 ATA_EHI_RESUME_LINK = (1 << 1), /* resume link (reset modifier) */ 269 ATA_EHI_NO_AUTOPSY = (1 << 2), /* no autopsy */ 270 ATA_EHI_QUIET = (1 << 3), /* be quiet */ 271 272 ATA_EHI_DID_RESET = (1 << 16), /* already reset this port */ 273 274 ATA_EHI_RESET_MODIFIER_MASK = ATA_EHI_RESUME_LINK, 275 276 /* max repeat if error condition is still set after ->error_handler */ 277 ATA_EH_MAX_REPEAT = 5, 278 279 /* how hard are we gonna try to probe/recover devices */ 280 ATA_PROBE_MAX_TRIES = 3, 281 ATA_EH_RESET_TRIES = 3, 282 ATA_EH_DEV_TRIES = 3, 283 284 /* Drive spinup time (time from power-on to the first D2H FIS) 285 * in msecs - 8s currently. Failing to get ready in this time 286 * isn't critical. It will result in reset failure for 287 * controllers which can't wait for the first D2H FIS. libata 288 * will retry, so it just has to be long enough to spin up 289 * most devices. 290 */ 291 ATA_SPINUP_WAIT = 8000, 292}; 293 294enum hsm_task_states { 295 HSM_ST_UNKNOWN, /* state unknown */ 296 HSM_ST_IDLE, /* no command on going */ 297 HSM_ST, /* (waiting the device to) transfer data */ 298 HSM_ST_LAST, /* (waiting the device to) complete command */ 299 HSM_ST_ERR, /* error */ 300 HSM_ST_FIRST, /* (waiting the device to) 301 write CDB or first data block */ 302}; 303 304enum ata_completion_errors { 305 AC_ERR_DEV = (1 << 0), /* device reported error */ 306 AC_ERR_HSM = (1 << 1), /* host state machine violation */ 307 AC_ERR_TIMEOUT = (1 << 2), /* timeout */ 308 AC_ERR_MEDIA = (1 << 3), /* media error */ 309 AC_ERR_ATA_BUS = (1 << 4), /* ATA bus error */ 310 AC_ERR_HOST_BUS = (1 << 5), /* host bus error */ 311 AC_ERR_SYSTEM = (1 << 6), /* system error */ 312 AC_ERR_INVALID = (1 << 7), /* invalid argument */ 313 AC_ERR_OTHER = (1 << 8), /* unknown */ 314}; 315 316/* forward declarations */ 317struct scsi_device; 318struct ata_port_operations; 319struct ata_port; 320struct ata_queued_cmd; 321 322/* typedefs */ 323typedef void (*ata_qc_cb_t) (struct ata_queued_cmd *qc); 324typedef int (*ata_prereset_fn_t)(struct ata_port *ap); 325typedef int (*ata_reset_fn_t)(struct ata_port *ap, unsigned int *classes); 326typedef void (*ata_postreset_fn_t)(struct ata_port *ap, unsigned int *classes); 327 328struct ata_ioports { 329 unsigned long cmd_addr; 330 unsigned long data_addr; 331 unsigned long error_addr; 332 unsigned long feature_addr; 333 unsigned long nsect_addr; 334 unsigned long lbal_addr; 335 unsigned long lbam_addr; 336 unsigned long lbah_addr; 337 unsigned long device_addr; 338 unsigned long status_addr; 339 unsigned long command_addr; 340 unsigned long altstatus_addr; 341 unsigned long ctl_addr; 342 unsigned long bmdma_addr; 343 unsigned long scr_addr; 344}; 345 346struct ata_probe_ent { 347 struct list_head node; 348 struct device *dev; 349 const struct ata_port_operations *port_ops; 350 struct scsi_host_template *sht; 351 struct ata_ioports port[ATA_MAX_PORTS]; 352 unsigned int n_ports; 353 unsigned int hard_port_no; 354 unsigned int pio_mask; 355 unsigned int mwdma_mask; 356 unsigned int udma_mask; 357 unsigned int legacy_mode; 358 unsigned long irq; 359 unsigned int irq_flags; 360 unsigned long host_flags; 361 unsigned long host_set_flags; 362 void __iomem *mmio_base; 363 void *private_data; 364}; 365 366struct ata_host_set { 367 spinlock_t lock; 368 struct device *dev; 369 unsigned long irq; 370 void __iomem *mmio_base; 371 unsigned int n_ports; 372 void *private_data; 373 const struct ata_port_operations *ops; 374 unsigned long flags; 375 int simplex_claimed; /* Keep seperate in case we 376 ever need to do this locked */ 377 struct ata_host_set *next; /* for legacy mode */ 378 struct ata_port *ports[0]; 379}; 380 381struct ata_queued_cmd { 382 struct ata_port *ap; 383 struct ata_device *dev; 384 385 struct scsi_cmnd *scsicmd; 386 void (*scsidone)(struct scsi_cmnd *); 387 388 struct ata_taskfile tf; 389 u8 cdb[ATAPI_CDB_LEN]; 390 391 unsigned long flags; /* ATA_QCFLAG_xxx */ 392 unsigned int tag; 393 unsigned int n_elem; 394 unsigned int orig_n_elem; 395 396 int dma_dir; 397 398 unsigned int pad_len; 399 400 unsigned int nsect; 401 unsigned int cursect; 402 403 unsigned int nbytes; 404 unsigned int curbytes; 405 406 unsigned int cursg; 407 unsigned int cursg_ofs; 408 409 struct scatterlist sgent; 410 struct scatterlist pad_sgent; 411 void *buf_virt; 412 413 /* DO NOT iterate over __sg manually, use ata_for_each_sg() */ 414 struct scatterlist *__sg; 415 416 unsigned int err_mask; 417 struct ata_taskfile result_tf; 418 ata_qc_cb_t complete_fn; 419 420 void *private_data; 421}; 422 423struct ata_host_stats { 424 unsigned long unhandled_irq; 425 unsigned long idle_irq; 426 unsigned long rw_reqbuf; 427}; 428 429struct ata_ering_entry { 430 int is_io; 431 unsigned int err_mask; 432 u64 timestamp; 433}; 434 435struct ata_ering { 436 int cursor; 437 struct ata_ering_entry ring[ATA_ERING_SIZE]; 438}; 439 440struct ata_device { 441 struct ata_port *ap; 442 unsigned int devno; /* 0 or 1 */ 443 unsigned long flags; /* ATA_DFLAG_xxx */ 444 struct scsi_device *sdev; /* attached SCSI device */ 445 /* n_sector is used as CLEAR_OFFSET, read comment above CLEAR_OFFSET */ 446 u64 n_sectors; /* size of device, if ATA */ 447 unsigned int class; /* ATA_DEV_xxx */ 448 u16 id[ATA_ID_WORDS]; /* IDENTIFY xxx DEVICE data */ 449 u8 pio_mode; 450 u8 dma_mode; 451 u8 xfer_mode; 452 unsigned int xfer_shift; /* ATA_SHIFT_xxx */ 453 454 unsigned int multi_count; /* sectors count for 455 READ/WRITE MULTIPLE */ 456 unsigned int max_sectors; /* per-device max sectors */ 457 unsigned int cdb_len; 458 459 /* per-dev xfer mask */ 460 unsigned int pio_mask; 461 unsigned int mwdma_mask; 462 unsigned int udma_mask; 463 464 /* for CHS addressing */ 465 u16 cylinders; /* Number of cylinders */ 466 u16 heads; /* Number of heads */ 467 u16 sectors; /* Number of sectors per track */ 468 469 /* error history */ 470 struct ata_ering ering; 471}; 472 473/* Offset into struct ata_device. Fields above it are maintained 474 * acress device init. Fields below are zeroed. 475 */ 476#define ATA_DEVICE_CLEAR_OFFSET offsetof(struct ata_device, n_sectors) 477 478struct ata_eh_info { 479 struct ata_device *dev; /* offending device */ 480 u32 serror; /* SError from LLDD */ 481 unsigned int err_mask; /* port-wide err_mask */ 482 unsigned int action; /* ATA_EH_* action mask */ 483 unsigned int dev_action[ATA_MAX_DEVICES]; /* dev EH action */ 484 unsigned int flags; /* ATA_EHI_* flags */ 485 486 unsigned long hotplug_timestamp; 487 unsigned int probe_mask; 488 489 char desc[ATA_EH_DESC_LEN]; 490 int desc_len; 491}; 492 493struct ata_eh_context { 494 struct ata_eh_info i; 495 int tries[ATA_MAX_DEVICES]; 496 unsigned int classes[ATA_MAX_DEVICES]; 497 unsigned int did_probe_mask; 498}; 499 500struct ata_port { 501 struct Scsi_Host *host; /* our co-allocated scsi host */ 502 const struct ata_port_operations *ops; 503 spinlock_t *lock; 504 unsigned long flags; /* ATA_FLAG_xxx */ 505 unsigned int pflags; /* ATA_PFLAG_xxx */ 506 unsigned int id; /* unique id req'd by scsi midlyr */ 507 unsigned int port_no; /* unique port #; from zero */ 508 unsigned int hard_port_no; /* hardware port #; from zero */ 509 510 struct ata_prd *prd; /* our SG list */ 511 dma_addr_t prd_dma; /* and its DMA mapping */ 512 513 void *pad; /* array of DMA pad buffers */ 514 dma_addr_t pad_dma; 515 516 struct ata_ioports ioaddr; /* ATA cmd/ctl/dma register blocks */ 517 518 u8 ctl; /* cache of ATA control register */ 519 u8 last_ctl; /* Cache last written value */ 520 unsigned int pio_mask; 521 unsigned int mwdma_mask; 522 unsigned int udma_mask; 523 unsigned int cbl; /* cable type; ATA_CBL_xxx */ 524 unsigned int hw_sata_spd_limit; 525 unsigned int sata_spd_limit; /* SATA PHY speed limit */ 526 527 /* record runtime error info, protected by host_set lock */ 528 struct ata_eh_info eh_info; 529 /* EH context owned by EH */ 530 struct ata_eh_context eh_context; 531 532 struct ata_device device[ATA_MAX_DEVICES]; 533 534 struct ata_queued_cmd qcmd[ATA_MAX_QUEUE]; 535 unsigned long qc_allocated; 536 unsigned int qc_active; 537 538 unsigned int active_tag; 539 u32 sactive; 540 541 struct ata_host_stats stats; 542 struct ata_host_set *host_set; 543 struct device *dev; 544 545 struct work_struct port_task; 546 struct work_struct hotplug_task; 547 struct work_struct scsi_rescan_task; 548 549 unsigned int hsm_task_state; 550 551 u32 msg_enable; 552 struct list_head eh_done_q; 553 wait_queue_head_t eh_wait_q; 554 555 pm_message_t pm_mesg; 556 int *pm_result; 557 558 void *private_data; 559 560 u8 sector_buf[ATA_SECT_SIZE]; /* owned by EH */ 561}; 562 563struct ata_port_operations { 564 void (*port_disable) (struct ata_port *); 565 566 void (*dev_config) (struct ata_port *, struct ata_device *); 567 568 void (*set_piomode) (struct ata_port *, struct ata_device *); 569 void (*set_dmamode) (struct ata_port *, struct ata_device *); 570 unsigned long (*mode_filter) (const struct ata_port *, struct ata_device *, unsigned long); 571 572 void (*tf_load) (struct ata_port *ap, const struct ata_taskfile *tf); 573 void (*tf_read) (struct ata_port *ap, struct ata_taskfile *tf); 574 575 void (*exec_command)(struct ata_port *ap, const struct ata_taskfile *tf); 576 u8 (*check_status)(struct ata_port *ap); 577 u8 (*check_altstatus)(struct ata_port *ap); 578 void (*dev_select)(struct ata_port *ap, unsigned int device); 579 580 void (*phy_reset) (struct ata_port *ap); /* obsolete */ 581 void (*set_mode) (struct ata_port *ap); 582 583 void (*post_set_mode) (struct ata_port *ap); 584 585 int (*check_atapi_dma) (struct ata_queued_cmd *qc); 586 587 void (*bmdma_setup) (struct ata_queued_cmd *qc); 588 void (*bmdma_start) (struct ata_queued_cmd *qc); 589 590 void (*data_xfer) (struct ata_device *, unsigned char *, unsigned int, int); 591 592 void (*qc_prep) (struct ata_queued_cmd *qc); 593 unsigned int (*qc_issue) (struct ata_queued_cmd *qc); 594 595 /* Error handlers. ->error_handler overrides ->eng_timeout and 596 * indicates that new-style EH is in place. 597 */ 598 void (*eng_timeout) (struct ata_port *ap); /* obsolete */ 599 600 void (*freeze) (struct ata_port *ap); 601 void (*thaw) (struct ata_port *ap); 602 void (*error_handler) (struct ata_port *ap); 603 void (*post_internal_cmd) (struct ata_queued_cmd *qc); 604 605 irqreturn_t (*irq_handler)(int, void *, struct pt_regs *); 606 void (*irq_clear) (struct ata_port *); 607 608 u32 (*scr_read) (struct ata_port *ap, unsigned int sc_reg); 609 void (*scr_write) (struct ata_port *ap, unsigned int sc_reg, 610 u32 val); 611 612 int (*port_suspend) (struct ata_port *ap, pm_message_t mesg); 613 int (*port_resume) (struct ata_port *ap); 614 615 int (*port_start) (struct ata_port *ap); 616 void (*port_stop) (struct ata_port *ap); 617 618 void (*host_stop) (struct ata_host_set *host_set); 619 620 void (*bmdma_stop) (struct ata_queued_cmd *qc); 621 u8 (*bmdma_status) (struct ata_port *ap); 622}; 623 624struct ata_port_info { 625 struct scsi_host_template *sht; 626 unsigned long host_flags; 627 unsigned long pio_mask; 628 unsigned long mwdma_mask; 629 unsigned long udma_mask; 630 const struct ata_port_operations *port_ops; 631 void *private_data; 632}; 633 634struct ata_timing { 635 unsigned short mode; /* ATA mode */ 636 unsigned short setup; /* t1 */ 637 unsigned short act8b; /* t2 for 8-bit I/O */ 638 unsigned short rec8b; /* t2i for 8-bit I/O */ 639 unsigned short cyc8b; /* t0 for 8-bit I/O */ 640 unsigned short active; /* t2 or tD */ 641 unsigned short recover; /* t2i or tK */ 642 unsigned short cycle; /* t0 */ 643 unsigned short udma; /* t2CYCTYP/2 */ 644}; 645 646#define FIT(v,vmin,vmax) max_t(short,min_t(short,v,vmax),vmin) 647 648extern const unsigned long sata_deb_timing_normal[]; 649extern const unsigned long sata_deb_timing_hotplug[]; 650extern const unsigned long sata_deb_timing_long[]; 651 652static inline const unsigned long * 653sata_ehc_deb_timing(struct ata_eh_context *ehc) 654{ 655 if (ehc->i.flags & ATA_EHI_HOTPLUGGED) 656 return sata_deb_timing_hotplug; 657 else 658 return sata_deb_timing_normal; 659} 660 661extern void ata_port_probe(struct ata_port *); 662extern void __sata_phy_reset(struct ata_port *ap); 663extern void sata_phy_reset(struct ata_port *ap); 664extern void ata_bus_reset(struct ata_port *ap); 665extern int sata_set_spd(struct ata_port *ap); 666extern int sata_phy_debounce(struct ata_port *ap, const unsigned long *param); 667extern int sata_phy_resume(struct ata_port *ap, const unsigned long *param); 668extern int ata_std_prereset(struct ata_port *ap); 669extern int ata_std_softreset(struct ata_port *ap, unsigned int *classes); 670extern int sata_std_hardreset(struct ata_port *ap, unsigned int *class); 671extern void ata_std_postreset(struct ata_port *ap, unsigned int *classes); 672extern int ata_dev_revalidate(struct ata_device *dev, int post_reset); 673extern void ata_port_disable(struct ata_port *); 674extern void ata_std_ports(struct ata_ioports *ioaddr); 675#ifdef CONFIG_PCI 676extern int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info, 677 unsigned int n_ports); 678extern void ata_pci_remove_one (struct pci_dev *pdev); 679extern void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t state); 680extern void ata_pci_device_do_resume(struct pci_dev *pdev); 681extern int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t state); 682extern int ata_pci_device_resume(struct pci_dev *pdev); 683extern int ata_pci_clear_simplex(struct pci_dev *pdev); 684#endif /* CONFIG_PCI */ 685extern int ata_device_add(const struct ata_probe_ent *ent); 686extern void ata_port_detach(struct ata_port *ap); 687extern void ata_host_set_remove(struct ata_host_set *host_set); 688extern int ata_scsi_detect(struct scsi_host_template *sht); 689extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg); 690extern int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)); 691extern int ata_scsi_release(struct Scsi_Host *host); 692extern unsigned int ata_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc); 693extern int sata_scr_valid(struct ata_port *ap); 694extern int sata_scr_read(struct ata_port *ap, int reg, u32 *val); 695extern int sata_scr_write(struct ata_port *ap, int reg, u32 val); 696extern int sata_scr_write_flush(struct ata_port *ap, int reg, u32 val); 697extern int ata_port_online(struct ata_port *ap); 698extern int ata_port_offline(struct ata_port *ap); 699extern int ata_scsi_device_resume(struct scsi_device *); 700extern int ata_scsi_device_suspend(struct scsi_device *, pm_message_t state); 701extern int ata_host_set_suspend(struct ata_host_set *host_set, 702 pm_message_t mesg); 703extern void ata_host_set_resume(struct ata_host_set *host_set); 704extern int ata_ratelimit(void); 705extern unsigned int ata_busy_sleep(struct ata_port *ap, 706 unsigned long timeout_pat, 707 unsigned long timeout); 708extern void ata_port_queue_task(struct ata_port *ap, void (*fn)(void *), 709 void *data, unsigned long delay); 710extern u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val, 711 unsigned long interval_msec, 712 unsigned long timeout_msec); 713 714/* 715 * Default driver ops implementations 716 */ 717extern void ata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf); 718extern void ata_tf_read(struct ata_port *ap, struct ata_taskfile *tf); 719extern void ata_tf_to_fis(const struct ata_taskfile *tf, u8 *fis, u8 pmp); 720extern void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf); 721extern void ata_noop_dev_select (struct ata_port *ap, unsigned int device); 722extern void ata_std_dev_select (struct ata_port *ap, unsigned int device); 723extern u8 ata_check_status(struct ata_port *ap); 724extern u8 ata_altstatus(struct ata_port *ap); 725extern void ata_exec_command(struct ata_port *ap, const struct ata_taskfile *tf); 726extern int ata_port_start (struct ata_port *ap); 727extern void ata_port_stop (struct ata_port *ap); 728extern void ata_host_stop (struct ata_host_set *host_set); 729extern irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs); 730extern void ata_mmio_data_xfer(struct ata_device *adev, unsigned char *buf, 731 unsigned int buflen, int write_data); 732extern void ata_pio_data_xfer(struct ata_device *adev, unsigned char *buf, 733 unsigned int buflen, int write_data); 734extern void ata_pio_data_xfer_noirq(struct ata_device *adev, unsigned char *buf, 735 unsigned int buflen, int write_data); 736extern void ata_qc_prep(struct ata_queued_cmd *qc); 737extern void ata_noop_qc_prep(struct ata_queued_cmd *qc); 738extern unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc); 739extern void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, 740 unsigned int buflen); 741extern void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg, 742 unsigned int n_elem); 743extern unsigned int ata_dev_classify(const struct ata_taskfile *tf); 744extern void ata_id_string(const u16 *id, unsigned char *s, 745 unsigned int ofs, unsigned int len); 746extern void ata_id_c_string(const u16 *id, unsigned char *s, 747 unsigned int ofs, unsigned int len); 748extern void ata_bmdma_setup (struct ata_queued_cmd *qc); 749extern void ata_bmdma_start (struct ata_queued_cmd *qc); 750extern void ata_bmdma_stop(struct ata_queued_cmd *qc); 751extern u8 ata_bmdma_status(struct ata_port *ap); 752extern void ata_bmdma_irq_clear(struct ata_port *ap); 753extern void ata_bmdma_freeze(struct ata_port *ap); 754extern void ata_bmdma_thaw(struct ata_port *ap); 755extern void ata_bmdma_drive_eh(struct ata_port *ap, ata_prereset_fn_t prereset, 756 ata_reset_fn_t softreset, 757 ata_reset_fn_t hardreset, 758 ata_postreset_fn_t postreset); 759extern void ata_bmdma_error_handler(struct ata_port *ap); 760extern void ata_bmdma_post_internal_cmd(struct ata_queued_cmd *qc); 761extern int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc, 762 u8 status, int in_wq); 763extern void ata_qc_complete(struct ata_queued_cmd *qc); 764extern int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active, 765 void (*finish_qc)(struct ata_queued_cmd *)); 766extern void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd, 767 void (*done)(struct scsi_cmnd *)); 768extern int ata_std_bios_param(struct scsi_device *sdev, 769 struct block_device *bdev, 770 sector_t capacity, int geom[]); 771extern int ata_scsi_slave_config(struct scsi_device *sdev); 772extern void ata_scsi_slave_destroy(struct scsi_device *sdev); 773extern int ata_scsi_change_queue_depth(struct scsi_device *sdev, 774 int queue_depth); 775extern struct ata_device *ata_dev_pair(struct ata_device *adev); 776 777/* 778 * Timing helpers 779 */ 780 781extern unsigned int ata_pio_need_iordy(const struct ata_device *); 782extern int ata_timing_compute(struct ata_device *, unsigned short, 783 struct ata_timing *, int, int); 784extern void ata_timing_merge(const struct ata_timing *, 785 const struct ata_timing *, struct ata_timing *, 786 unsigned int); 787 788enum { 789 ATA_TIMING_SETUP = (1 << 0), 790 ATA_TIMING_ACT8B = (1 << 1), 791 ATA_TIMING_REC8B = (1 << 2), 792 ATA_TIMING_CYC8B = (1 << 3), 793 ATA_TIMING_8BIT = ATA_TIMING_ACT8B | ATA_TIMING_REC8B | 794 ATA_TIMING_CYC8B, 795 ATA_TIMING_ACTIVE = (1 << 4), 796 ATA_TIMING_RECOVER = (1 << 5), 797 ATA_TIMING_CYCLE = (1 << 6), 798 ATA_TIMING_UDMA = (1 << 7), 799 ATA_TIMING_ALL = ATA_TIMING_SETUP | ATA_TIMING_ACT8B | 800 ATA_TIMING_REC8B | ATA_TIMING_CYC8B | 801 ATA_TIMING_ACTIVE | ATA_TIMING_RECOVER | 802 ATA_TIMING_CYCLE | ATA_TIMING_UDMA, 803}; 804 805 806#ifdef CONFIG_PCI 807struct pci_bits { 808 unsigned int reg; /* PCI config register to read */ 809 unsigned int width; /* 1 (8 bit), 2 (16 bit), 4 (32 bit) */ 810 unsigned long mask; 811 unsigned long val; 812}; 813 814extern void ata_pci_host_stop (struct ata_host_set *host_set); 815extern struct ata_probe_ent * 816ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port, int portmask); 817extern int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits); 818extern unsigned long ata_pci_default_filter(const struct ata_port *, struct ata_device *, unsigned long); 819#endif /* CONFIG_PCI */ 820 821/* 822 * EH 823 */ 824extern void ata_eng_timeout(struct ata_port *ap); 825 826extern void ata_port_schedule_eh(struct ata_port *ap); 827extern int ata_port_abort(struct ata_port *ap); 828extern int ata_port_freeze(struct ata_port *ap); 829 830extern void ata_eh_freeze_port(struct ata_port *ap); 831extern void ata_eh_thaw_port(struct ata_port *ap); 832 833extern void ata_eh_qc_complete(struct ata_queued_cmd *qc); 834extern void ata_eh_qc_retry(struct ata_queued_cmd *qc); 835 836extern void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset, 837 ata_reset_fn_t softreset, ata_reset_fn_t hardreset, 838 ata_postreset_fn_t postreset); 839 840/* 841 * printk helpers 842 */ 843#define ata_port_printk(ap, lv, fmt, args...) \ 844 printk(lv"ata%u: "fmt, (ap)->id , ##args) 845 846#define ata_dev_printk(dev, lv, fmt, args...) \ 847 printk(lv"ata%u.%02u: "fmt, (dev)->ap->id, (dev)->devno , ##args) 848 849/* 850 * ata_eh_info helpers 851 */ 852#define ata_ehi_push_desc(ehi, fmt, args...) do { \ 853 (ehi)->desc_len += scnprintf((ehi)->desc + (ehi)->desc_len, \ 854 ATA_EH_DESC_LEN - (ehi)->desc_len, \ 855 fmt , ##args); \ 856} while (0) 857 858#define ata_ehi_clear_desc(ehi) do { \ 859 (ehi)->desc[0] = '\0'; \ 860 (ehi)->desc_len = 0; \ 861} while (0) 862 863static inline void __ata_ehi_hotplugged(struct ata_eh_info *ehi) 864{ 865 if (ehi->flags & ATA_EHI_HOTPLUGGED) 866 return; 867 868 ehi->flags |= ATA_EHI_HOTPLUGGED | ATA_EHI_RESUME_LINK; 869 ehi->hotplug_timestamp = jiffies; 870 871 ehi->action |= ATA_EH_SOFTRESET; 872 ehi->probe_mask |= (1 << ATA_MAX_DEVICES) - 1; 873} 874 875static inline void ata_ehi_hotplugged(struct ata_eh_info *ehi) 876{ 877 __ata_ehi_hotplugged(ehi); 878 ehi->err_mask |= AC_ERR_ATA_BUS; 879} 880 881/* 882 * qc helpers 883 */ 884static inline int 885ata_sg_is_last(struct scatterlist *sg, struct ata_queued_cmd *qc) 886{ 887 if (sg == &qc->pad_sgent) 888 return 1; 889 if (qc->pad_len) 890 return 0; 891 if (((sg - qc->__sg) + 1) == qc->n_elem) 892 return 1; 893 return 0; 894} 895 896static inline struct scatterlist * 897ata_qc_first_sg(struct ata_queued_cmd *qc) 898{ 899 if (qc->n_elem) 900 return qc->__sg; 901 if (qc->pad_len) 902 return &qc->pad_sgent; 903 return NULL; 904} 905 906static inline struct scatterlist * 907ata_qc_next_sg(struct scatterlist *sg, struct ata_queued_cmd *qc) 908{ 909 if (sg == &qc->pad_sgent) 910 return NULL; 911 if (++sg - qc->__sg < qc->n_elem) 912 return sg; 913 if (qc->pad_len) 914 return &qc->pad_sgent; 915 return NULL; 916} 917 918#define ata_for_each_sg(sg, qc) \ 919 for (sg = ata_qc_first_sg(qc); sg; sg = ata_qc_next_sg(sg, qc)) 920 921static inline unsigned int ata_tag_valid(unsigned int tag) 922{ 923 return (tag < ATA_MAX_QUEUE) ? 1 : 0; 924} 925 926static inline unsigned int ata_tag_internal(unsigned int tag) 927{ 928 return tag == ATA_MAX_QUEUE - 1; 929} 930 931/* 932 * device helpers 933 */ 934static inline unsigned int ata_class_enabled(unsigned int class) 935{ 936 return class == ATA_DEV_ATA || class == ATA_DEV_ATAPI; 937} 938 939static inline unsigned int ata_class_disabled(unsigned int class) 940{ 941 return class == ATA_DEV_ATA_UNSUP || class == ATA_DEV_ATAPI_UNSUP; 942} 943 944static inline unsigned int ata_class_absent(unsigned int class) 945{ 946 return !ata_class_enabled(class) && !ata_class_disabled(class); 947} 948 949static inline unsigned int ata_dev_enabled(const struct ata_device *dev) 950{ 951 return ata_class_enabled(dev->class); 952} 953 954static inline unsigned int ata_dev_disabled(const struct ata_device *dev) 955{ 956 return ata_class_disabled(dev->class); 957} 958 959static inline unsigned int ata_dev_absent(const struct ata_device *dev) 960{ 961 return ata_class_absent(dev->class); 962} 963 964static inline unsigned int ata_dev_ready(const struct ata_device *dev) 965{ 966 return ata_dev_enabled(dev) && !(dev->flags & ATA_DFLAG_SUSPENDED); 967} 968 969/* 970 * port helpers 971 */ 972static inline int ata_port_max_devices(const struct ata_port *ap) 973{ 974 if (ap->flags & ATA_FLAG_SLAVE_POSS) 975 return 2; 976 return 1; 977} 978 979 980static inline u8 ata_chk_status(struct ata_port *ap) 981{ 982 return ap->ops->check_status(ap); 983} 984 985 986/** 987 * ata_pause - Flush writes and pause 400 nanoseconds. 988 * @ap: Port to wait for. 989 * 990 * LOCKING: 991 * Inherited from caller. 992 */ 993 994static inline void ata_pause(struct ata_port *ap) 995{ 996 ata_altstatus(ap); 997 ndelay(400); 998} 999 1000 1001/** 1002 * ata_busy_wait - Wait for a port status register 1003 * @ap: Port to wait for. 1004 * 1005 * Waits up to max*10 microseconds for the selected bits in the port's 1006 * status register to be cleared. 1007 * Returns final value of status register. 1008 * 1009 * LOCKING: 1010 * Inherited from caller. 1011 */ 1012 1013static inline u8 ata_busy_wait(struct ata_port *ap, unsigned int bits, 1014 unsigned int max) 1015{ 1016 u8 status; 1017 1018 do { 1019 udelay(10); 1020 status = ata_chk_status(ap); 1021 max--; 1022 } while ((status & bits) && (max > 0)); 1023 1024 return status; 1025} 1026 1027 1028/** 1029 * ata_wait_idle - Wait for a port to be idle. 1030 * @ap: Port to wait for. 1031 * 1032 * Waits up to 10ms for port's BUSY and DRQ signals to clear. 1033 * Returns final value of status register. 1034 * 1035 * LOCKING: 1036 * Inherited from caller. 1037 */ 1038 1039static inline u8 ata_wait_idle(struct ata_port *ap) 1040{ 1041 u8 status = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000); 1042 1043 if (status & (ATA_BUSY | ATA_DRQ)) { 1044 unsigned long l = ap->ioaddr.status_addr; 1045 if (ata_msg_warn(ap)) 1046 printk(KERN_WARNING "ATA: abnormal status 0x%X on port 0x%lX\n", 1047 status, l); 1048 } 1049 1050 return status; 1051} 1052 1053static inline void ata_qc_set_polling(struct ata_queued_cmd *qc) 1054{ 1055 qc->tf.ctl |= ATA_NIEN; 1056} 1057 1058static inline struct ata_queued_cmd *__ata_qc_from_tag(struct ata_port *ap, 1059 unsigned int tag) 1060{ 1061 if (likely(ata_tag_valid(tag))) 1062 return &ap->qcmd[tag]; 1063 return NULL; 1064} 1065 1066static inline struct ata_queued_cmd *ata_qc_from_tag(struct ata_port *ap, 1067 unsigned int tag) 1068{ 1069 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); 1070 1071 if (unlikely(!qc) || !ap->ops->error_handler) 1072 return qc; 1073 1074 if ((qc->flags & (ATA_QCFLAG_ACTIVE | 1075 ATA_QCFLAG_FAILED)) == ATA_QCFLAG_ACTIVE) 1076 return qc; 1077 1078 return NULL; 1079} 1080 1081static inline void ata_tf_init(struct ata_device *dev, struct ata_taskfile *tf) 1082{ 1083 memset(tf, 0, sizeof(*tf)); 1084 1085 tf->ctl = dev->ap->ctl; 1086 if (dev->devno == 0) 1087 tf->device = ATA_DEVICE_OBS; 1088 else 1089 tf->device = ATA_DEVICE_OBS | ATA_DEV1; 1090} 1091 1092static inline void ata_qc_reinit(struct ata_queued_cmd *qc) 1093{ 1094 qc->__sg = NULL; 1095 qc->flags = 0; 1096 qc->cursect = qc->cursg = qc->cursg_ofs = 0; 1097 qc->nsect = 0; 1098 qc->nbytes = qc->curbytes = 0; 1099 qc->err_mask = 0; 1100 1101 ata_tf_init(qc->dev, &qc->tf); 1102 1103 /* init result_tf such that it indicates normal completion */ 1104 qc->result_tf.command = ATA_DRDY; 1105 qc->result_tf.feature = 0; 1106} 1107 1108/** 1109 * ata_irq_on - Enable interrupts on a port. 1110 * @ap: Port on which interrupts are enabled. 1111 * 1112 * Enable interrupts on a legacy IDE device using MMIO or PIO, 1113 * wait for idle, clear any pending interrupts. 1114 * 1115 * LOCKING: 1116 * Inherited from caller. 1117 */ 1118 1119static inline u8 ata_irq_on(struct ata_port *ap) 1120{ 1121 struct ata_ioports *ioaddr = &ap->ioaddr; 1122 u8 tmp; 1123 1124 ap->ctl &= ~ATA_NIEN; 1125 ap->last_ctl = ap->ctl; 1126 1127 if (ap->flags & ATA_FLAG_MMIO) 1128 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr); 1129 else 1130 outb(ap->ctl, ioaddr->ctl_addr); 1131 tmp = ata_wait_idle(ap); 1132 1133 ap->ops->irq_clear(ap); 1134 1135 return tmp; 1136} 1137 1138 1139/** 1140 * ata_irq_ack - Acknowledge a device interrupt. 1141 * @ap: Port on which interrupts are enabled. 1142 * 1143 * Wait up to 10 ms for legacy IDE device to become idle (BUSY 1144 * or BUSY+DRQ clear). Obtain dma status and port status from 1145 * device. Clear the interrupt. Return port status. 1146 * 1147 * LOCKING: 1148 */ 1149 1150static inline u8 ata_irq_ack(struct ata_port *ap, unsigned int chk_drq) 1151{ 1152 unsigned int bits = chk_drq ? ATA_BUSY | ATA_DRQ : ATA_BUSY; 1153 u8 host_stat, post_stat, status; 1154 1155 status = ata_busy_wait(ap, bits, 1000); 1156 if (status & bits) 1157 if (ata_msg_err(ap)) 1158 printk(KERN_ERR "abnormal status 0x%X\n", status); 1159 1160 /* get controller status; clear intr, err bits */ 1161 if (ap->flags & ATA_FLAG_MMIO) { 1162 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr; 1163 host_stat = readb(mmio + ATA_DMA_STATUS); 1164 writeb(host_stat | ATA_DMA_INTR | ATA_DMA_ERR, 1165 mmio + ATA_DMA_STATUS); 1166 1167 post_stat = readb(mmio + ATA_DMA_STATUS); 1168 } else { 1169 host_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); 1170 outb(host_stat | ATA_DMA_INTR | ATA_DMA_ERR, 1171 ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); 1172 1173 post_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); 1174 } 1175 1176 if (ata_msg_intr(ap)) 1177 printk(KERN_INFO "%s: irq ack: host_stat 0x%X, new host_stat 0x%X, drv_stat 0x%X\n", 1178 __FUNCTION__, 1179 host_stat, post_stat, status); 1180 1181 return status; 1182} 1183 1184static inline int ata_try_flush_cache(const struct ata_device *dev) 1185{ 1186 return ata_id_wcache_enabled(dev->id) || 1187 ata_id_has_flush(dev->id) || 1188 ata_id_has_flush_ext(dev->id); 1189} 1190 1191static inline unsigned int ac_err_mask(u8 status) 1192{ 1193 if (status & (ATA_BUSY | ATA_DRQ)) 1194 return AC_ERR_HSM; 1195 if (status & (ATA_ERR | ATA_DF)) 1196 return AC_ERR_DEV; 1197 return 0; 1198} 1199 1200static inline unsigned int __ac_err_mask(u8 status) 1201{ 1202 unsigned int mask = ac_err_mask(status); 1203 if (mask == 0) 1204 return AC_ERR_OTHER; 1205 return mask; 1206} 1207 1208static inline int ata_pad_alloc(struct ata_port *ap, struct device *dev) 1209{ 1210 ap->pad_dma = 0; 1211 ap->pad = dma_alloc_coherent(dev, ATA_DMA_PAD_BUF_SZ, 1212 &ap->pad_dma, GFP_KERNEL); 1213 return (ap->pad == NULL) ? -ENOMEM : 0; 1214} 1215 1216static inline void ata_pad_free(struct ata_port *ap, struct device *dev) 1217{ 1218 dma_free_coherent(dev, ATA_DMA_PAD_BUF_SZ, ap->pad, ap->pad_dma); 1219} 1220 1221static inline struct ata_port *ata_shost_to_port(struct Scsi_Host *host) 1222{ 1223 return (struct ata_port *) &host->hostdata[0]; 1224} 1225 1226#endif /* __LINUX_LIBATA_H__ */