at v2.6.18-rc2 1224 lines 37 kB view raw
1/* 2 * Copyright 2003-2005 Red Hat, Inc. All rights reserved. 3 * Copyright 2003-2005 Jeff Garzik 4 * 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2, or (at your option) 9 * any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; see the file COPYING. If not, write to 18 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. 19 * 20 * 21 * libata documentation is available via 'make {ps|pdf}docs', 22 * as Documentation/DocBook/libata.* 23 * 24 */ 25 26#ifndef __LINUX_LIBATA_H__ 27#define __LINUX_LIBATA_H__ 28 29#include <linux/delay.h> 30#include <linux/interrupt.h> 31#include <linux/pci.h> 32#include <linux/dma-mapping.h> 33#include <asm/scatterlist.h> 34#include <asm/io.h> 35#include <linux/ata.h> 36#include <linux/workqueue.h> 37#include <scsi/scsi_host.h> 38 39/* 40 * compile-time options: to be removed as soon as all the drivers are 41 * converted to the new debugging mechanism 42 */ 43#undef ATA_DEBUG /* debugging output */ 44#undef ATA_VERBOSE_DEBUG /* yet more debugging output */ 45#undef ATA_IRQ_TRAP /* define to ack screaming irqs */ 46#undef ATA_NDEBUG /* define to disable quick runtime checks */ 47#undef ATA_ENABLE_PATA /* define to enable PATA support in some 48 * low-level drivers */ 49 50 51/* note: prints function name for you */ 52#ifdef ATA_DEBUG 53#define DPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __FUNCTION__, ## args) 54#ifdef ATA_VERBOSE_DEBUG 55#define VPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __FUNCTION__, ## args) 56#else 57#define VPRINTK(fmt, args...) 58#endif /* ATA_VERBOSE_DEBUG */ 59#else 60#define DPRINTK(fmt, args...) 61#define VPRINTK(fmt, args...) 62#endif /* ATA_DEBUG */ 63 64#define BPRINTK(fmt, args...) if (ap->flags & ATA_FLAG_DEBUGMSG) printk(KERN_ERR "%s: " fmt, __FUNCTION__, ## args) 65 66/* NEW: debug levels */ 67#define HAVE_LIBATA_MSG 1 68 69enum { 70 ATA_MSG_DRV = 0x0001, 71 ATA_MSG_INFO = 0x0002, 72 ATA_MSG_PROBE = 0x0004, 73 ATA_MSG_WARN = 0x0008, 74 ATA_MSG_MALLOC = 0x0010, 75 ATA_MSG_CTL = 0x0020, 76 ATA_MSG_INTR = 0x0040, 77 ATA_MSG_ERR = 0x0080, 78}; 79 80#define ata_msg_drv(p) ((p)->msg_enable & ATA_MSG_DRV) 81#define ata_msg_info(p) ((p)->msg_enable & ATA_MSG_INFO) 82#define ata_msg_probe(p) ((p)->msg_enable & ATA_MSG_PROBE) 83#define ata_msg_warn(p) ((p)->msg_enable & ATA_MSG_WARN) 84#define ata_msg_malloc(p) ((p)->msg_enable & ATA_MSG_MALLOC) 85#define ata_msg_ctl(p) ((p)->msg_enable & ATA_MSG_CTL) 86#define ata_msg_intr(p) ((p)->msg_enable & ATA_MSG_INTR) 87#define ata_msg_err(p) ((p)->msg_enable & ATA_MSG_ERR) 88 89static inline u32 ata_msg_init(int dval, int default_msg_enable_bits) 90{ 91 if (dval < 0 || dval >= (sizeof(u32) * 8)) 92 return default_msg_enable_bits; /* should be 0x1 - only driver info msgs */ 93 if (!dval) 94 return 0; 95 return (1 << dval) - 1; 96} 97 98/* defines only for the constants which don't work well as enums */ 99#define ATA_TAG_POISON 0xfafbfcfdU 100 101/* move to PCI layer? */ 102static inline struct device *pci_dev_to_dev(struct pci_dev *pdev) 103{ 104 return &pdev->dev; 105} 106 107enum { 108 /* various global constants */ 109 LIBATA_MAX_PRD = ATA_MAX_PRD / 2, 110 ATA_MAX_PORTS = 8, 111 ATA_DEF_QUEUE = 1, 112 /* tag ATA_MAX_QUEUE - 1 is reserved for internal commands */ 113 ATA_MAX_QUEUE = 32, 114 ATA_TAG_INTERNAL = ATA_MAX_QUEUE - 1, 115 ATA_MAX_SECTORS = 200, /* FIXME */ 116 ATA_MAX_SECTORS_LBA48 = 65535, 117 ATA_MAX_BUS = 2, 118 ATA_DEF_BUSY_WAIT = 10000, 119 ATA_SHORT_PAUSE = (HZ >> 6) + 1, 120 121 ATA_SHT_EMULATED = 1, 122 ATA_SHT_CMD_PER_LUN = 1, 123 ATA_SHT_THIS_ID = -1, 124 ATA_SHT_USE_CLUSTERING = 1, 125 126 /* struct ata_device stuff */ 127 ATA_DFLAG_LBA = (1 << 0), /* device supports LBA */ 128 ATA_DFLAG_LBA48 = (1 << 1), /* device supports LBA48 */ 129 ATA_DFLAG_CDB_INTR = (1 << 2), /* device asserts INTRQ when ready for CDB */ 130 ATA_DFLAG_NCQ = (1 << 3), /* device supports NCQ */ 131 ATA_DFLAG_CFG_MASK = (1 << 8) - 1, 132 133 ATA_DFLAG_PIO = (1 << 8), /* device currently in PIO mode */ 134 ATA_DFLAG_SUSPENDED = (1 << 9), /* device suspended */ 135 ATA_DFLAG_INIT_MASK = (1 << 16) - 1, 136 137 ATA_DFLAG_DETACH = (1 << 16), 138 ATA_DFLAG_DETACHED = (1 << 17), 139 140 ATA_DEV_UNKNOWN = 0, /* unknown device */ 141 ATA_DEV_ATA = 1, /* ATA device */ 142 ATA_DEV_ATA_UNSUP = 2, /* ATA device (unsupported) */ 143 ATA_DEV_ATAPI = 3, /* ATAPI device */ 144 ATA_DEV_ATAPI_UNSUP = 4, /* ATAPI device (unsupported) */ 145 ATA_DEV_NONE = 5, /* no device */ 146 147 /* struct ata_port flags */ 148 ATA_FLAG_SLAVE_POSS = (1 << 0), /* host supports slave dev */ 149 /* (doesn't imply presence) */ 150 ATA_FLAG_SATA = (1 << 1), 151 ATA_FLAG_NO_LEGACY = (1 << 2), /* no legacy mode check */ 152 ATA_FLAG_MMIO = (1 << 3), /* use MMIO, not PIO */ 153 ATA_FLAG_SRST = (1 << 4), /* (obsolete) use ATA SRST, not E.D.D. */ 154 ATA_FLAG_SATA_RESET = (1 << 5), /* (obsolete) use COMRESET */ 155 ATA_FLAG_NO_ATAPI = (1 << 6), /* No ATAPI support */ 156 ATA_FLAG_PIO_DMA = (1 << 7), /* PIO cmds via DMA */ 157 ATA_FLAG_PIO_LBA48 = (1 << 8), /* Host DMA engine is LBA28 only */ 158 ATA_FLAG_PIO_POLLING = (1 << 9), /* use polling PIO if LLD 159 * doesn't handle PIO interrupts */ 160 ATA_FLAG_NCQ = (1 << 10), /* host supports NCQ */ 161 ATA_FLAG_HRST_TO_RESUME = (1 << 11), /* hardreset to resume phy */ 162 ATA_FLAG_SKIP_D2H_BSY = (1 << 12), /* can't wait for the first D2H 163 * Register FIS clearing BSY */ 164 ATA_FLAG_DEBUGMSG = (1 << 13), 165 166 /* The following flag belongs to ap->pflags but is kept in 167 * ap->flags because it's referenced in many LLDs and will be 168 * removed in not-too-distant future. 169 */ 170 ATA_FLAG_DISABLED = (1 << 23), /* port is disabled, ignore it */ 171 172 /* bits 24:31 of ap->flags are reserved for LLD specific flags */ 173 174 /* struct ata_port pflags */ 175 ATA_PFLAG_EH_PENDING = (1 << 0), /* EH pending */ 176 ATA_PFLAG_EH_IN_PROGRESS = (1 << 1), /* EH in progress */ 177 ATA_PFLAG_FROZEN = (1 << 2), /* port is frozen */ 178 ATA_PFLAG_RECOVERED = (1 << 3), /* recovery action performed */ 179 ATA_PFLAG_LOADING = (1 << 4), /* boot/loading probe */ 180 ATA_PFLAG_UNLOADING = (1 << 5), /* module is unloading */ 181 ATA_PFLAG_SCSI_HOTPLUG = (1 << 6), /* SCSI hotplug scheduled */ 182 183 ATA_PFLAG_FLUSH_PORT_TASK = (1 << 16), /* flush port task */ 184 ATA_PFLAG_SUSPENDED = (1 << 17), /* port is suspended (power) */ 185 ATA_PFLAG_PM_PENDING = (1 << 18), /* PM operation pending */ 186 187 /* struct ata_queued_cmd flags */ 188 ATA_QCFLAG_ACTIVE = (1 << 0), /* cmd not yet ack'd to scsi lyer */ 189 ATA_QCFLAG_SG = (1 << 1), /* have s/g table? */ 190 ATA_QCFLAG_SINGLE = (1 << 2), /* no s/g, just a single buffer */ 191 ATA_QCFLAG_DMAMAP = ATA_QCFLAG_SG | ATA_QCFLAG_SINGLE, 192 ATA_QCFLAG_IO = (1 << 3), /* standard IO command */ 193 ATA_QCFLAG_RESULT_TF = (1 << 4), /* result TF requested */ 194 195 ATA_QCFLAG_FAILED = (1 << 16), /* cmd failed and is owned by EH */ 196 ATA_QCFLAG_SENSE_VALID = (1 << 17), /* sense data valid */ 197 ATA_QCFLAG_EH_SCHEDULED = (1 << 18), /* EH scheduled (obsolete) */ 198 199 /* host set flags */ 200 ATA_HOST_SIMPLEX = (1 << 0), /* Host is simplex, one DMA channel per host_set only */ 201 202 /* various lengths of time */ 203 ATA_TMOUT_BOOT = 30 * HZ, /* heuristic */ 204 ATA_TMOUT_BOOT_QUICK = 7 * HZ, /* heuristic */ 205 ATA_TMOUT_INTERNAL = 30 * HZ, 206 ATA_TMOUT_INTERNAL_QUICK = 5 * HZ, 207 208 /* ATA bus states */ 209 BUS_UNKNOWN = 0, 210 BUS_DMA = 1, 211 BUS_IDLE = 2, 212 BUS_NOINTR = 3, 213 BUS_NODATA = 4, 214 BUS_TIMER = 5, 215 BUS_PIO = 6, 216 BUS_EDD = 7, 217 BUS_IDENTIFY = 8, 218 BUS_PACKET = 9, 219 220 /* SATA port states */ 221 PORT_UNKNOWN = 0, 222 PORT_ENABLED = 1, 223 PORT_DISABLED = 2, 224 225 /* encoding various smaller bitmaps into a single 226 * unsigned int bitmap 227 */ 228 ATA_BITS_PIO = 5, 229 ATA_BITS_MWDMA = 3, 230 ATA_BITS_UDMA = 8, 231 232 ATA_SHIFT_PIO = 0, 233 ATA_SHIFT_MWDMA = ATA_SHIFT_PIO + ATA_BITS_PIO, 234 ATA_SHIFT_UDMA = ATA_SHIFT_MWDMA + ATA_BITS_MWDMA, 235 236 ATA_MASK_PIO = ((1 << ATA_BITS_PIO) - 1) << ATA_SHIFT_PIO, 237 ATA_MASK_MWDMA = ((1 << ATA_BITS_MWDMA) - 1) << ATA_SHIFT_MWDMA, 238 ATA_MASK_UDMA = ((1 << ATA_BITS_UDMA) - 1) << ATA_SHIFT_UDMA, 239 240 /* size of buffer to pad xfers ending on unaligned boundaries */ 241 ATA_DMA_PAD_SZ = 4, 242 ATA_DMA_PAD_BUF_SZ = ATA_DMA_PAD_SZ * ATA_MAX_QUEUE, 243 244 /* masks for port functions */ 245 ATA_PORT_PRIMARY = (1 << 0), 246 ATA_PORT_SECONDARY = (1 << 1), 247 248 /* ering size */ 249 ATA_ERING_SIZE = 32, 250 251 /* desc_len for ata_eh_info and context */ 252 ATA_EH_DESC_LEN = 80, 253 254 /* reset / recovery action types */ 255 ATA_EH_REVALIDATE = (1 << 0), 256 ATA_EH_SOFTRESET = (1 << 1), 257 ATA_EH_HARDRESET = (1 << 2), 258 ATA_EH_SUSPEND = (1 << 3), 259 ATA_EH_RESUME = (1 << 4), 260 ATA_EH_PM_FREEZE = (1 << 5), 261 262 ATA_EH_RESET_MASK = ATA_EH_SOFTRESET | ATA_EH_HARDRESET, 263 ATA_EH_PERDEV_MASK = ATA_EH_REVALIDATE | ATA_EH_SUSPEND | 264 ATA_EH_RESUME | ATA_EH_PM_FREEZE, 265 266 /* ata_eh_info->flags */ 267 ATA_EHI_HOTPLUGGED = (1 << 0), /* could have been hotplugged */ 268 ATA_EHI_RESUME_LINK = (1 << 1), /* need to resume link */ 269 ATA_EHI_NO_AUTOPSY = (1 << 2), /* no autopsy */ 270 ATA_EHI_QUIET = (1 << 3), /* be quiet */ 271 272 ATA_EHI_DID_RESET = (1 << 16), /* already reset this port */ 273 274 /* max repeat if error condition is still set after ->error_handler */ 275 ATA_EH_MAX_REPEAT = 5, 276 277 /* how hard are we gonna try to probe/recover devices */ 278 ATA_PROBE_MAX_TRIES = 3, 279 ATA_EH_RESET_TRIES = 3, 280 ATA_EH_DEV_TRIES = 3, 281 282 /* Drive spinup time (time from power-on to the first D2H FIS) 283 * in msecs - 8s currently. Failing to get ready in this time 284 * isn't critical. It will result in reset failure for 285 * controllers which can't wait for the first D2H FIS. libata 286 * will retry, so it just has to be long enough to spin up 287 * most devices. 288 */ 289 ATA_SPINUP_WAIT = 8000, 290}; 291 292enum hsm_task_states { 293 HSM_ST_UNKNOWN, /* state unknown */ 294 HSM_ST_IDLE, /* no command on going */ 295 HSM_ST, /* (waiting the device to) transfer data */ 296 HSM_ST_LAST, /* (waiting the device to) complete command */ 297 HSM_ST_ERR, /* error */ 298 HSM_ST_FIRST, /* (waiting the device to) 299 write CDB or first data block */ 300}; 301 302enum ata_completion_errors { 303 AC_ERR_DEV = (1 << 0), /* device reported error */ 304 AC_ERR_HSM = (1 << 1), /* host state machine violation */ 305 AC_ERR_TIMEOUT = (1 << 2), /* timeout */ 306 AC_ERR_MEDIA = (1 << 3), /* media error */ 307 AC_ERR_ATA_BUS = (1 << 4), /* ATA bus error */ 308 AC_ERR_HOST_BUS = (1 << 5), /* host bus error */ 309 AC_ERR_SYSTEM = (1 << 6), /* system error */ 310 AC_ERR_INVALID = (1 << 7), /* invalid argument */ 311 AC_ERR_OTHER = (1 << 8), /* unknown */ 312}; 313 314/* forward declarations */ 315struct scsi_device; 316struct ata_port_operations; 317struct ata_port; 318struct ata_queued_cmd; 319 320/* typedefs */ 321typedef void (*ata_qc_cb_t) (struct ata_queued_cmd *qc); 322typedef int (*ata_prereset_fn_t)(struct ata_port *ap); 323typedef int (*ata_reset_fn_t)(struct ata_port *ap, unsigned int *classes); 324typedef void (*ata_postreset_fn_t)(struct ata_port *ap, unsigned int *classes); 325 326struct ata_ioports { 327 unsigned long cmd_addr; 328 unsigned long data_addr; 329 unsigned long error_addr; 330 unsigned long feature_addr; 331 unsigned long nsect_addr; 332 unsigned long lbal_addr; 333 unsigned long lbam_addr; 334 unsigned long lbah_addr; 335 unsigned long device_addr; 336 unsigned long status_addr; 337 unsigned long command_addr; 338 unsigned long altstatus_addr; 339 unsigned long ctl_addr; 340 unsigned long bmdma_addr; 341 unsigned long scr_addr; 342}; 343 344struct ata_probe_ent { 345 struct list_head node; 346 struct device *dev; 347 const struct ata_port_operations *port_ops; 348 struct scsi_host_template *sht; 349 struct ata_ioports port[ATA_MAX_PORTS]; 350 unsigned int n_ports; 351 unsigned int hard_port_no; 352 unsigned int pio_mask; 353 unsigned int mwdma_mask; 354 unsigned int udma_mask; 355 unsigned int legacy_mode; 356 unsigned long irq; 357 unsigned int irq_flags; 358 unsigned long host_flags; 359 unsigned long host_set_flags; 360 void __iomem *mmio_base; 361 void *private_data; 362}; 363 364struct ata_host_set { 365 spinlock_t lock; 366 struct device *dev; 367 unsigned long irq; 368 void __iomem *mmio_base; 369 unsigned int n_ports; 370 void *private_data; 371 const struct ata_port_operations *ops; 372 unsigned long flags; 373 int simplex_claimed; /* Keep seperate in case we 374 ever need to do this locked */ 375 struct ata_host_set *next; /* for legacy mode */ 376 struct ata_port *ports[0]; 377}; 378 379struct ata_queued_cmd { 380 struct ata_port *ap; 381 struct ata_device *dev; 382 383 struct scsi_cmnd *scsicmd; 384 void (*scsidone)(struct scsi_cmnd *); 385 386 struct ata_taskfile tf; 387 u8 cdb[ATAPI_CDB_LEN]; 388 389 unsigned long flags; /* ATA_QCFLAG_xxx */ 390 unsigned int tag; 391 unsigned int n_elem; 392 unsigned int orig_n_elem; 393 394 int dma_dir; 395 396 unsigned int pad_len; 397 398 unsigned int nsect; 399 unsigned int cursect; 400 401 unsigned int nbytes; 402 unsigned int curbytes; 403 404 unsigned int cursg; 405 unsigned int cursg_ofs; 406 407 struct scatterlist sgent; 408 struct scatterlist pad_sgent; 409 void *buf_virt; 410 411 /* DO NOT iterate over __sg manually, use ata_for_each_sg() */ 412 struct scatterlist *__sg; 413 414 unsigned int err_mask; 415 struct ata_taskfile result_tf; 416 ata_qc_cb_t complete_fn; 417 418 void *private_data; 419}; 420 421struct ata_host_stats { 422 unsigned long unhandled_irq; 423 unsigned long idle_irq; 424 unsigned long rw_reqbuf; 425}; 426 427struct ata_ering_entry { 428 int is_io; 429 unsigned int err_mask; 430 u64 timestamp; 431}; 432 433struct ata_ering { 434 int cursor; 435 struct ata_ering_entry ring[ATA_ERING_SIZE]; 436}; 437 438struct ata_device { 439 struct ata_port *ap; 440 unsigned int devno; /* 0 or 1 */ 441 unsigned long flags; /* ATA_DFLAG_xxx */ 442 struct scsi_device *sdev; /* attached SCSI device */ 443 /* n_sector is used as CLEAR_OFFSET, read comment above CLEAR_OFFSET */ 444 u64 n_sectors; /* size of device, if ATA */ 445 unsigned int class; /* ATA_DEV_xxx */ 446 u16 id[ATA_ID_WORDS]; /* IDENTIFY xxx DEVICE data */ 447 u8 pio_mode; 448 u8 dma_mode; 449 u8 xfer_mode; 450 unsigned int xfer_shift; /* ATA_SHIFT_xxx */ 451 452 unsigned int multi_count; /* sectors count for 453 READ/WRITE MULTIPLE */ 454 unsigned int max_sectors; /* per-device max sectors */ 455 unsigned int cdb_len; 456 457 /* per-dev xfer mask */ 458 unsigned int pio_mask; 459 unsigned int mwdma_mask; 460 unsigned int udma_mask; 461 462 /* for CHS addressing */ 463 u16 cylinders; /* Number of cylinders */ 464 u16 heads; /* Number of heads */ 465 u16 sectors; /* Number of sectors per track */ 466 467 /* error history */ 468 struct ata_ering ering; 469}; 470 471/* Offset into struct ata_device. Fields above it are maintained 472 * acress device init. Fields below are zeroed. 473 */ 474#define ATA_DEVICE_CLEAR_OFFSET offsetof(struct ata_device, n_sectors) 475 476struct ata_eh_info { 477 struct ata_device *dev; /* offending device */ 478 u32 serror; /* SError from LLDD */ 479 unsigned int err_mask; /* port-wide err_mask */ 480 unsigned int action; /* ATA_EH_* action mask */ 481 unsigned int dev_action[ATA_MAX_DEVICES]; /* dev EH action */ 482 unsigned int flags; /* ATA_EHI_* flags */ 483 484 unsigned long hotplug_timestamp; 485 unsigned int probe_mask; 486 487 char desc[ATA_EH_DESC_LEN]; 488 int desc_len; 489}; 490 491struct ata_eh_context { 492 struct ata_eh_info i; 493 int tries[ATA_MAX_DEVICES]; 494 unsigned int classes[ATA_MAX_DEVICES]; 495 unsigned int did_probe_mask; 496}; 497 498struct ata_port { 499 struct Scsi_Host *host; /* our co-allocated scsi host */ 500 const struct ata_port_operations *ops; 501 spinlock_t *lock; 502 unsigned long flags; /* ATA_FLAG_xxx */ 503 unsigned int pflags; /* ATA_PFLAG_xxx */ 504 unsigned int id; /* unique id req'd by scsi midlyr */ 505 unsigned int port_no; /* unique port #; from zero */ 506 unsigned int hard_port_no; /* hardware port #; from zero */ 507 508 struct ata_prd *prd; /* our SG list */ 509 dma_addr_t prd_dma; /* and its DMA mapping */ 510 511 void *pad; /* array of DMA pad buffers */ 512 dma_addr_t pad_dma; 513 514 struct ata_ioports ioaddr; /* ATA cmd/ctl/dma register blocks */ 515 516 u8 ctl; /* cache of ATA control register */ 517 u8 last_ctl; /* Cache last written value */ 518 unsigned int pio_mask; 519 unsigned int mwdma_mask; 520 unsigned int udma_mask; 521 unsigned int cbl; /* cable type; ATA_CBL_xxx */ 522 unsigned int hw_sata_spd_limit; 523 unsigned int sata_spd_limit; /* SATA PHY speed limit */ 524 525 /* record runtime error info, protected by host_set lock */ 526 struct ata_eh_info eh_info; 527 /* EH context owned by EH */ 528 struct ata_eh_context eh_context; 529 530 struct ata_device device[ATA_MAX_DEVICES]; 531 532 struct ata_queued_cmd qcmd[ATA_MAX_QUEUE]; 533 unsigned long qc_allocated; 534 unsigned int qc_active; 535 536 unsigned int active_tag; 537 u32 sactive; 538 539 struct ata_host_stats stats; 540 struct ata_host_set *host_set; 541 struct device *dev; 542 543 struct work_struct port_task; 544 struct work_struct hotplug_task; 545 struct work_struct scsi_rescan_task; 546 547 unsigned int hsm_task_state; 548 549 u32 msg_enable; 550 struct list_head eh_done_q; 551 wait_queue_head_t eh_wait_q; 552 553 pm_message_t pm_mesg; 554 int *pm_result; 555 556 void *private_data; 557 558 u8 sector_buf[ATA_SECT_SIZE]; /* owned by EH */ 559}; 560 561struct ata_port_operations { 562 void (*port_disable) (struct ata_port *); 563 564 void (*dev_config) (struct ata_port *, struct ata_device *); 565 566 void (*set_piomode) (struct ata_port *, struct ata_device *); 567 void (*set_dmamode) (struct ata_port *, struct ata_device *); 568 unsigned long (*mode_filter) (const struct ata_port *, struct ata_device *, unsigned long); 569 570 void (*tf_load) (struct ata_port *ap, const struct ata_taskfile *tf); 571 void (*tf_read) (struct ata_port *ap, struct ata_taskfile *tf); 572 573 void (*exec_command)(struct ata_port *ap, const struct ata_taskfile *tf); 574 u8 (*check_status)(struct ata_port *ap); 575 u8 (*check_altstatus)(struct ata_port *ap); 576 void (*dev_select)(struct ata_port *ap, unsigned int device); 577 578 void (*phy_reset) (struct ata_port *ap); /* obsolete */ 579 void (*set_mode) (struct ata_port *ap); 580 581 void (*post_set_mode) (struct ata_port *ap); 582 583 int (*check_atapi_dma) (struct ata_queued_cmd *qc); 584 585 void (*bmdma_setup) (struct ata_queued_cmd *qc); 586 void (*bmdma_start) (struct ata_queued_cmd *qc); 587 588 void (*data_xfer) (struct ata_device *, unsigned char *, unsigned int, int); 589 590 void (*qc_prep) (struct ata_queued_cmd *qc); 591 unsigned int (*qc_issue) (struct ata_queued_cmd *qc); 592 593 /* Error handlers. ->error_handler overrides ->eng_timeout and 594 * indicates that new-style EH is in place. 595 */ 596 void (*eng_timeout) (struct ata_port *ap); /* obsolete */ 597 598 void (*freeze) (struct ata_port *ap); 599 void (*thaw) (struct ata_port *ap); 600 void (*error_handler) (struct ata_port *ap); 601 void (*post_internal_cmd) (struct ata_queued_cmd *qc); 602 603 irqreturn_t (*irq_handler)(int, void *, struct pt_regs *); 604 void (*irq_clear) (struct ata_port *); 605 606 u32 (*scr_read) (struct ata_port *ap, unsigned int sc_reg); 607 void (*scr_write) (struct ata_port *ap, unsigned int sc_reg, 608 u32 val); 609 610 int (*port_suspend) (struct ata_port *ap, pm_message_t mesg); 611 int (*port_resume) (struct ata_port *ap); 612 613 int (*port_start) (struct ata_port *ap); 614 void (*port_stop) (struct ata_port *ap); 615 616 void (*host_stop) (struct ata_host_set *host_set); 617 618 void (*bmdma_stop) (struct ata_queued_cmd *qc); 619 u8 (*bmdma_status) (struct ata_port *ap); 620}; 621 622struct ata_port_info { 623 struct scsi_host_template *sht; 624 unsigned long host_flags; 625 unsigned long pio_mask; 626 unsigned long mwdma_mask; 627 unsigned long udma_mask; 628 const struct ata_port_operations *port_ops; 629 void *private_data; 630}; 631 632struct ata_timing { 633 unsigned short mode; /* ATA mode */ 634 unsigned short setup; /* t1 */ 635 unsigned short act8b; /* t2 for 8-bit I/O */ 636 unsigned short rec8b; /* t2i for 8-bit I/O */ 637 unsigned short cyc8b; /* t0 for 8-bit I/O */ 638 unsigned short active; /* t2 or tD */ 639 unsigned short recover; /* t2i or tK */ 640 unsigned short cycle; /* t0 */ 641 unsigned short udma; /* t2CYCTYP/2 */ 642}; 643 644#define FIT(v,vmin,vmax) max_t(short,min_t(short,v,vmax),vmin) 645 646extern const unsigned long sata_deb_timing_normal[]; 647extern const unsigned long sata_deb_timing_hotplug[]; 648extern const unsigned long sata_deb_timing_long[]; 649 650static inline const unsigned long * 651sata_ehc_deb_timing(struct ata_eh_context *ehc) 652{ 653 if (ehc->i.flags & ATA_EHI_HOTPLUGGED) 654 return sata_deb_timing_hotplug; 655 else 656 return sata_deb_timing_normal; 657} 658 659extern void ata_port_probe(struct ata_port *); 660extern void __sata_phy_reset(struct ata_port *ap); 661extern void sata_phy_reset(struct ata_port *ap); 662extern void ata_bus_reset(struct ata_port *ap); 663extern int sata_set_spd(struct ata_port *ap); 664extern int sata_phy_debounce(struct ata_port *ap, const unsigned long *param); 665extern int sata_phy_resume(struct ata_port *ap, const unsigned long *param); 666extern int ata_std_prereset(struct ata_port *ap); 667extern int ata_std_softreset(struct ata_port *ap, unsigned int *classes); 668extern int sata_std_hardreset(struct ata_port *ap, unsigned int *class); 669extern void ata_std_postreset(struct ata_port *ap, unsigned int *classes); 670extern int ata_dev_revalidate(struct ata_device *dev, int post_reset); 671extern void ata_port_disable(struct ata_port *); 672extern void ata_std_ports(struct ata_ioports *ioaddr); 673#ifdef CONFIG_PCI 674extern int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info, 675 unsigned int n_ports); 676extern void ata_pci_remove_one (struct pci_dev *pdev); 677extern void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t state); 678extern void ata_pci_device_do_resume(struct pci_dev *pdev); 679extern int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t state); 680extern int ata_pci_device_resume(struct pci_dev *pdev); 681extern int ata_pci_clear_simplex(struct pci_dev *pdev); 682#endif /* CONFIG_PCI */ 683extern int ata_device_add(const struct ata_probe_ent *ent); 684extern void ata_port_detach(struct ata_port *ap); 685extern void ata_host_set_remove(struct ata_host_set *host_set); 686extern int ata_scsi_detect(struct scsi_host_template *sht); 687extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg); 688extern int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)); 689extern int ata_scsi_release(struct Scsi_Host *host); 690extern unsigned int ata_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc); 691extern int sata_scr_valid(struct ata_port *ap); 692extern int sata_scr_read(struct ata_port *ap, int reg, u32 *val); 693extern int sata_scr_write(struct ata_port *ap, int reg, u32 val); 694extern int sata_scr_write_flush(struct ata_port *ap, int reg, u32 val); 695extern int ata_port_online(struct ata_port *ap); 696extern int ata_port_offline(struct ata_port *ap); 697extern int ata_scsi_device_resume(struct scsi_device *); 698extern int ata_scsi_device_suspend(struct scsi_device *, pm_message_t state); 699extern int ata_host_set_suspend(struct ata_host_set *host_set, 700 pm_message_t mesg); 701extern void ata_host_set_resume(struct ata_host_set *host_set); 702extern int ata_ratelimit(void); 703extern unsigned int ata_busy_sleep(struct ata_port *ap, 704 unsigned long timeout_pat, 705 unsigned long timeout); 706extern void ata_port_queue_task(struct ata_port *ap, void (*fn)(void *), 707 void *data, unsigned long delay); 708extern u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val, 709 unsigned long interval_msec, 710 unsigned long timeout_msec); 711 712/* 713 * Default driver ops implementations 714 */ 715extern void ata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf); 716extern void ata_tf_read(struct ata_port *ap, struct ata_taskfile *tf); 717extern void ata_tf_to_fis(const struct ata_taskfile *tf, u8 *fis, u8 pmp); 718extern void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf); 719extern void ata_noop_dev_select (struct ata_port *ap, unsigned int device); 720extern void ata_std_dev_select (struct ata_port *ap, unsigned int device); 721extern u8 ata_check_status(struct ata_port *ap); 722extern u8 ata_altstatus(struct ata_port *ap); 723extern void ata_exec_command(struct ata_port *ap, const struct ata_taskfile *tf); 724extern int ata_port_start (struct ata_port *ap); 725extern void ata_port_stop (struct ata_port *ap); 726extern void ata_host_stop (struct ata_host_set *host_set); 727extern irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs); 728extern void ata_mmio_data_xfer(struct ata_device *adev, unsigned char *buf, 729 unsigned int buflen, int write_data); 730extern void ata_pio_data_xfer(struct ata_device *adev, unsigned char *buf, 731 unsigned int buflen, int write_data); 732extern void ata_pio_data_xfer_noirq(struct ata_device *adev, unsigned char *buf, 733 unsigned int buflen, int write_data); 734extern void ata_qc_prep(struct ata_queued_cmd *qc); 735extern void ata_noop_qc_prep(struct ata_queued_cmd *qc); 736extern unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc); 737extern void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, 738 unsigned int buflen); 739extern void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg, 740 unsigned int n_elem); 741extern unsigned int ata_dev_classify(const struct ata_taskfile *tf); 742extern void ata_id_string(const u16 *id, unsigned char *s, 743 unsigned int ofs, unsigned int len); 744extern void ata_id_c_string(const u16 *id, unsigned char *s, 745 unsigned int ofs, unsigned int len); 746extern void ata_bmdma_setup (struct ata_queued_cmd *qc); 747extern void ata_bmdma_start (struct ata_queued_cmd *qc); 748extern void ata_bmdma_stop(struct ata_queued_cmd *qc); 749extern u8 ata_bmdma_status(struct ata_port *ap); 750extern void ata_bmdma_irq_clear(struct ata_port *ap); 751extern void ata_bmdma_freeze(struct ata_port *ap); 752extern void ata_bmdma_thaw(struct ata_port *ap); 753extern void ata_bmdma_drive_eh(struct ata_port *ap, ata_prereset_fn_t prereset, 754 ata_reset_fn_t softreset, 755 ata_reset_fn_t hardreset, 756 ata_postreset_fn_t postreset); 757extern void ata_bmdma_error_handler(struct ata_port *ap); 758extern void ata_bmdma_post_internal_cmd(struct ata_queued_cmd *qc); 759extern int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc, 760 u8 status, int in_wq); 761extern void ata_qc_complete(struct ata_queued_cmd *qc); 762extern int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active, 763 void (*finish_qc)(struct ata_queued_cmd *)); 764extern void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd, 765 void (*done)(struct scsi_cmnd *)); 766extern int ata_std_bios_param(struct scsi_device *sdev, 767 struct block_device *bdev, 768 sector_t capacity, int geom[]); 769extern int ata_scsi_slave_config(struct scsi_device *sdev); 770extern void ata_scsi_slave_destroy(struct scsi_device *sdev); 771extern int ata_scsi_change_queue_depth(struct scsi_device *sdev, 772 int queue_depth); 773extern struct ata_device *ata_dev_pair(struct ata_device *adev); 774 775/* 776 * Timing helpers 777 */ 778 779extern unsigned int ata_pio_need_iordy(const struct ata_device *); 780extern int ata_timing_compute(struct ata_device *, unsigned short, 781 struct ata_timing *, int, int); 782extern void ata_timing_merge(const struct ata_timing *, 783 const struct ata_timing *, struct ata_timing *, 784 unsigned int); 785 786enum { 787 ATA_TIMING_SETUP = (1 << 0), 788 ATA_TIMING_ACT8B = (1 << 1), 789 ATA_TIMING_REC8B = (1 << 2), 790 ATA_TIMING_CYC8B = (1 << 3), 791 ATA_TIMING_8BIT = ATA_TIMING_ACT8B | ATA_TIMING_REC8B | 792 ATA_TIMING_CYC8B, 793 ATA_TIMING_ACTIVE = (1 << 4), 794 ATA_TIMING_RECOVER = (1 << 5), 795 ATA_TIMING_CYCLE = (1 << 6), 796 ATA_TIMING_UDMA = (1 << 7), 797 ATA_TIMING_ALL = ATA_TIMING_SETUP | ATA_TIMING_ACT8B | 798 ATA_TIMING_REC8B | ATA_TIMING_CYC8B | 799 ATA_TIMING_ACTIVE | ATA_TIMING_RECOVER | 800 ATA_TIMING_CYCLE | ATA_TIMING_UDMA, 801}; 802 803 804#ifdef CONFIG_PCI 805struct pci_bits { 806 unsigned int reg; /* PCI config register to read */ 807 unsigned int width; /* 1 (8 bit), 2 (16 bit), 4 (32 bit) */ 808 unsigned long mask; 809 unsigned long val; 810}; 811 812extern void ata_pci_host_stop (struct ata_host_set *host_set); 813extern struct ata_probe_ent * 814ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port, int portmask); 815extern int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits); 816extern unsigned long ata_pci_default_filter(const struct ata_port *, struct ata_device *, unsigned long); 817#endif /* CONFIG_PCI */ 818 819/* 820 * EH 821 */ 822extern void ata_eng_timeout(struct ata_port *ap); 823 824extern void ata_port_schedule_eh(struct ata_port *ap); 825extern int ata_port_abort(struct ata_port *ap); 826extern int ata_port_freeze(struct ata_port *ap); 827 828extern void ata_eh_freeze_port(struct ata_port *ap); 829extern void ata_eh_thaw_port(struct ata_port *ap); 830 831extern void ata_eh_qc_complete(struct ata_queued_cmd *qc); 832extern void ata_eh_qc_retry(struct ata_queued_cmd *qc); 833 834extern void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset, 835 ata_reset_fn_t softreset, ata_reset_fn_t hardreset, 836 ata_postreset_fn_t postreset); 837 838/* 839 * printk helpers 840 */ 841#define ata_port_printk(ap, lv, fmt, args...) \ 842 printk(lv"ata%u: "fmt, (ap)->id , ##args) 843 844#define ata_dev_printk(dev, lv, fmt, args...) \ 845 printk(lv"ata%u.%02u: "fmt, (dev)->ap->id, (dev)->devno , ##args) 846 847/* 848 * ata_eh_info helpers 849 */ 850#define ata_ehi_push_desc(ehi, fmt, args...) do { \ 851 (ehi)->desc_len += scnprintf((ehi)->desc + (ehi)->desc_len, \ 852 ATA_EH_DESC_LEN - (ehi)->desc_len, \ 853 fmt , ##args); \ 854} while (0) 855 856#define ata_ehi_clear_desc(ehi) do { \ 857 (ehi)->desc[0] = '\0'; \ 858 (ehi)->desc_len = 0; \ 859} while (0) 860 861static inline void __ata_ehi_hotplugged(struct ata_eh_info *ehi) 862{ 863 if (ehi->flags & ATA_EHI_HOTPLUGGED) 864 return; 865 866 ehi->flags |= ATA_EHI_HOTPLUGGED | ATA_EHI_RESUME_LINK; 867 ehi->hotplug_timestamp = jiffies; 868 869 ehi->action |= ATA_EH_SOFTRESET; 870 ehi->probe_mask |= (1 << ATA_MAX_DEVICES) - 1; 871} 872 873static inline void ata_ehi_hotplugged(struct ata_eh_info *ehi) 874{ 875 __ata_ehi_hotplugged(ehi); 876 ehi->err_mask |= AC_ERR_ATA_BUS; 877} 878 879/* 880 * qc helpers 881 */ 882static inline int 883ata_sg_is_last(struct scatterlist *sg, struct ata_queued_cmd *qc) 884{ 885 if (sg == &qc->pad_sgent) 886 return 1; 887 if (qc->pad_len) 888 return 0; 889 if (((sg - qc->__sg) + 1) == qc->n_elem) 890 return 1; 891 return 0; 892} 893 894static inline struct scatterlist * 895ata_qc_first_sg(struct ata_queued_cmd *qc) 896{ 897 if (qc->n_elem) 898 return qc->__sg; 899 if (qc->pad_len) 900 return &qc->pad_sgent; 901 return NULL; 902} 903 904static inline struct scatterlist * 905ata_qc_next_sg(struct scatterlist *sg, struct ata_queued_cmd *qc) 906{ 907 if (sg == &qc->pad_sgent) 908 return NULL; 909 if (++sg - qc->__sg < qc->n_elem) 910 return sg; 911 if (qc->pad_len) 912 return &qc->pad_sgent; 913 return NULL; 914} 915 916#define ata_for_each_sg(sg, qc) \ 917 for (sg = ata_qc_first_sg(qc); sg; sg = ata_qc_next_sg(sg, qc)) 918 919static inline unsigned int ata_tag_valid(unsigned int tag) 920{ 921 return (tag < ATA_MAX_QUEUE) ? 1 : 0; 922} 923 924static inline unsigned int ata_tag_internal(unsigned int tag) 925{ 926 return tag == ATA_MAX_QUEUE - 1; 927} 928 929/* 930 * device helpers 931 */ 932static inline unsigned int ata_class_enabled(unsigned int class) 933{ 934 return class == ATA_DEV_ATA || class == ATA_DEV_ATAPI; 935} 936 937static inline unsigned int ata_class_disabled(unsigned int class) 938{ 939 return class == ATA_DEV_ATA_UNSUP || class == ATA_DEV_ATAPI_UNSUP; 940} 941 942static inline unsigned int ata_class_absent(unsigned int class) 943{ 944 return !ata_class_enabled(class) && !ata_class_disabled(class); 945} 946 947static inline unsigned int ata_dev_enabled(const struct ata_device *dev) 948{ 949 return ata_class_enabled(dev->class); 950} 951 952static inline unsigned int ata_dev_disabled(const struct ata_device *dev) 953{ 954 return ata_class_disabled(dev->class); 955} 956 957static inline unsigned int ata_dev_absent(const struct ata_device *dev) 958{ 959 return ata_class_absent(dev->class); 960} 961 962static inline unsigned int ata_dev_ready(const struct ata_device *dev) 963{ 964 return ata_dev_enabled(dev) && !(dev->flags & ATA_DFLAG_SUSPENDED); 965} 966 967/* 968 * port helpers 969 */ 970static inline int ata_port_max_devices(const struct ata_port *ap) 971{ 972 if (ap->flags & ATA_FLAG_SLAVE_POSS) 973 return 2; 974 return 1; 975} 976 977 978static inline u8 ata_chk_status(struct ata_port *ap) 979{ 980 return ap->ops->check_status(ap); 981} 982 983 984/** 985 * ata_pause - Flush writes and pause 400 nanoseconds. 986 * @ap: Port to wait for. 987 * 988 * LOCKING: 989 * Inherited from caller. 990 */ 991 992static inline void ata_pause(struct ata_port *ap) 993{ 994 ata_altstatus(ap); 995 ndelay(400); 996} 997 998 999/** 1000 * ata_busy_wait - Wait for a port status register 1001 * @ap: Port to wait for. 1002 * 1003 * Waits up to max*10 microseconds for the selected bits in the port's 1004 * status register to be cleared. 1005 * Returns final value of status register. 1006 * 1007 * LOCKING: 1008 * Inherited from caller. 1009 */ 1010 1011static inline u8 ata_busy_wait(struct ata_port *ap, unsigned int bits, 1012 unsigned int max) 1013{ 1014 u8 status; 1015 1016 do { 1017 udelay(10); 1018 status = ata_chk_status(ap); 1019 max--; 1020 } while ((status & bits) && (max > 0)); 1021 1022 return status; 1023} 1024 1025 1026/** 1027 * ata_wait_idle - Wait for a port to be idle. 1028 * @ap: Port to wait for. 1029 * 1030 * Waits up to 10ms for port's BUSY and DRQ signals to clear. 1031 * Returns final value of status register. 1032 * 1033 * LOCKING: 1034 * Inherited from caller. 1035 */ 1036 1037static inline u8 ata_wait_idle(struct ata_port *ap) 1038{ 1039 u8 status = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000); 1040 1041 if (status & (ATA_BUSY | ATA_DRQ)) { 1042 unsigned long l = ap->ioaddr.status_addr; 1043 if (ata_msg_warn(ap)) 1044 printk(KERN_WARNING "ATA: abnormal status 0x%X on port 0x%lX\n", 1045 status, l); 1046 } 1047 1048 return status; 1049} 1050 1051static inline void ata_qc_set_polling(struct ata_queued_cmd *qc) 1052{ 1053 qc->tf.ctl |= ATA_NIEN; 1054} 1055 1056static inline struct ata_queued_cmd *__ata_qc_from_tag(struct ata_port *ap, 1057 unsigned int tag) 1058{ 1059 if (likely(ata_tag_valid(tag))) 1060 return &ap->qcmd[tag]; 1061 return NULL; 1062} 1063 1064static inline struct ata_queued_cmd *ata_qc_from_tag(struct ata_port *ap, 1065 unsigned int tag) 1066{ 1067 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag); 1068 1069 if (unlikely(!qc) || !ap->ops->error_handler) 1070 return qc; 1071 1072 if ((qc->flags & (ATA_QCFLAG_ACTIVE | 1073 ATA_QCFLAG_FAILED)) == ATA_QCFLAG_ACTIVE) 1074 return qc; 1075 1076 return NULL; 1077} 1078 1079static inline void ata_tf_init(struct ata_device *dev, struct ata_taskfile *tf) 1080{ 1081 memset(tf, 0, sizeof(*tf)); 1082 1083 tf->ctl = dev->ap->ctl; 1084 if (dev->devno == 0) 1085 tf->device = ATA_DEVICE_OBS; 1086 else 1087 tf->device = ATA_DEVICE_OBS | ATA_DEV1; 1088} 1089 1090static inline void ata_qc_reinit(struct ata_queued_cmd *qc) 1091{ 1092 qc->__sg = NULL; 1093 qc->flags = 0; 1094 qc->cursect = qc->cursg = qc->cursg_ofs = 0; 1095 qc->nsect = 0; 1096 qc->nbytes = qc->curbytes = 0; 1097 qc->err_mask = 0; 1098 1099 ata_tf_init(qc->dev, &qc->tf); 1100 1101 /* init result_tf such that it indicates normal completion */ 1102 qc->result_tf.command = ATA_DRDY; 1103 qc->result_tf.feature = 0; 1104} 1105 1106/** 1107 * ata_irq_on - Enable interrupts on a port. 1108 * @ap: Port on which interrupts are enabled. 1109 * 1110 * Enable interrupts on a legacy IDE device using MMIO or PIO, 1111 * wait for idle, clear any pending interrupts. 1112 * 1113 * LOCKING: 1114 * Inherited from caller. 1115 */ 1116 1117static inline u8 ata_irq_on(struct ata_port *ap) 1118{ 1119 struct ata_ioports *ioaddr = &ap->ioaddr; 1120 u8 tmp; 1121 1122 ap->ctl &= ~ATA_NIEN; 1123 ap->last_ctl = ap->ctl; 1124 1125 if (ap->flags & ATA_FLAG_MMIO) 1126 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr); 1127 else 1128 outb(ap->ctl, ioaddr->ctl_addr); 1129 tmp = ata_wait_idle(ap); 1130 1131 ap->ops->irq_clear(ap); 1132 1133 return tmp; 1134} 1135 1136 1137/** 1138 * ata_irq_ack - Acknowledge a device interrupt. 1139 * @ap: Port on which interrupts are enabled. 1140 * 1141 * Wait up to 10 ms for legacy IDE device to become idle (BUSY 1142 * or BUSY+DRQ clear). Obtain dma status and port status from 1143 * device. Clear the interrupt. Return port status. 1144 * 1145 * LOCKING: 1146 */ 1147 1148static inline u8 ata_irq_ack(struct ata_port *ap, unsigned int chk_drq) 1149{ 1150 unsigned int bits = chk_drq ? ATA_BUSY | ATA_DRQ : ATA_BUSY; 1151 u8 host_stat, post_stat, status; 1152 1153 status = ata_busy_wait(ap, bits, 1000); 1154 if (status & bits) 1155 if (ata_msg_err(ap)) 1156 printk(KERN_ERR "abnormal status 0x%X\n", status); 1157 1158 /* get controller status; clear intr, err bits */ 1159 if (ap->flags & ATA_FLAG_MMIO) { 1160 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr; 1161 host_stat = readb(mmio + ATA_DMA_STATUS); 1162 writeb(host_stat | ATA_DMA_INTR | ATA_DMA_ERR, 1163 mmio + ATA_DMA_STATUS); 1164 1165 post_stat = readb(mmio + ATA_DMA_STATUS); 1166 } else { 1167 host_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); 1168 outb(host_stat | ATA_DMA_INTR | ATA_DMA_ERR, 1169 ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); 1170 1171 post_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); 1172 } 1173 1174 if (ata_msg_intr(ap)) 1175 printk(KERN_INFO "%s: irq ack: host_stat 0x%X, new host_stat 0x%X, drv_stat 0x%X\n", 1176 __FUNCTION__, 1177 host_stat, post_stat, status); 1178 1179 return status; 1180} 1181 1182static inline int ata_try_flush_cache(const struct ata_device *dev) 1183{ 1184 return ata_id_wcache_enabled(dev->id) || 1185 ata_id_has_flush(dev->id) || 1186 ata_id_has_flush_ext(dev->id); 1187} 1188 1189static inline unsigned int ac_err_mask(u8 status) 1190{ 1191 if (status & (ATA_BUSY | ATA_DRQ)) 1192 return AC_ERR_HSM; 1193 if (status & (ATA_ERR | ATA_DF)) 1194 return AC_ERR_DEV; 1195 return 0; 1196} 1197 1198static inline unsigned int __ac_err_mask(u8 status) 1199{ 1200 unsigned int mask = ac_err_mask(status); 1201 if (mask == 0) 1202 return AC_ERR_OTHER; 1203 return mask; 1204} 1205 1206static inline int ata_pad_alloc(struct ata_port *ap, struct device *dev) 1207{ 1208 ap->pad_dma = 0; 1209 ap->pad = dma_alloc_coherent(dev, ATA_DMA_PAD_BUF_SZ, 1210 &ap->pad_dma, GFP_KERNEL); 1211 return (ap->pad == NULL) ? -ENOMEM : 0; 1212} 1213 1214static inline void ata_pad_free(struct ata_port *ap, struct device *dev) 1215{ 1216 dma_free_coherent(dev, ATA_DMA_PAD_BUF_SZ, ap->pad, ap->pad_dma); 1217} 1218 1219static inline struct ata_port *ata_shost_to_port(struct Scsi_Host *host) 1220{ 1221 return (struct ata_port *) &host->hostdata[0]; 1222} 1223 1224#endif /* __LINUX_LIBATA_H__ */