libata-sff: Fix oops reported in kerneloops.org for pnp devices with no ctl

- Make ata_sff_altstatus private so nobody uses it by mistake
- Drop the 400nS delay from it

Add

ata_sff_irq_status - encapsulates the IRQ check logic

This function keeps the existing behaviour for altstatus using devices. I
actually suspect the logic was wrong before the changes but -rc isn't the
time to play with that

ata_sff_sync - ensure writes hit the device

Really we want an io* operation for 'is posted' eg ioisposted(ioaddr) so
that we can fix the nasty delay this causes on most systems.

- ata_sff_pause - 400nS delay

Ensure the command hit the device and delay 400nS

- ata_sff_dma_pause

Ensure the I/O hit the device and enforce an HDMA1:0 transition delay.
Requires altstatus register exists, BUG if not so we don't risk
corruption in MWDMA modes. (UDMA the checksum will save your backside in
theory)

The only other complication then is devices with their own handlers.
rb532 can use dma_pause but scc needs to access its own altstatus
register for internal errata workarounds so directly call the drivers own
altstatus function.

Signed-off-by: Alan Cox <alan@redhat.com>
Signed-off-by: Jeff Garzik <jgarzik@redhat.com>

authored by Alan Cox and committed by Jeff Garzik a57c1bad 4f0ebe3c

+109 -33
+100 -15
drivers/ata/libata-sff.c
··· 247 247 * LOCKING: 248 248 * Inherited from caller. 249 249 */ 250 - u8 ata_sff_altstatus(struct ata_port *ap) 250 + static u8 ata_sff_altstatus(struct ata_port *ap) 251 251 { 252 252 if (ap->ops->sff_check_altstatus) 253 253 return ap->ops->sff_check_altstatus(ap); 254 254 255 255 return ioread8(ap->ioaddr.altstatus_addr); 256 + } 257 + 258 + /** 259 + * ata_sff_irq_status - Check if the device is busy 260 + * @ap: port where the device is 261 + * 262 + * Determine if the port is currently busy. Uses altstatus 263 + * if available in order to avoid clearing shared IRQ status 264 + * when finding an IRQ source. Non ctl capable devices don't 265 + * share interrupt lines fortunately for us. 266 + * 267 + * LOCKING: 268 + * Inherited from caller. 269 + */ 270 + static u8 ata_sff_irq_status(struct ata_port *ap) 271 + { 272 + u8 status; 273 + 274 + if (ap->ops->sff_check_altstatus || ap->ioaddr.altstatus_addr) { 275 + status = ata_sff_altstatus(ap); 276 + /* Not us: We are busy */ 277 + if (status & ATA_BUSY) 278 + return status; 279 + } 280 + /* Clear INTRQ latch */ 281 + status = ata_sff_check_status(ap); 282 + return status; 283 + } 284 + 285 + /** 286 + * ata_sff_sync - Flush writes 287 + * @ap: Port to wait for. 288 + * 289 + * CAUTION: 290 + * If we have an mmio device with no ctl and no altstatus 291 + * method this will fail. No such devices are known to exist. 292 + * 293 + * LOCKING: 294 + * Inherited from caller. 295 + */ 296 + 297 + static void ata_sff_sync(struct ata_port *ap) 298 + { 299 + if (ap->ops->sff_check_altstatus) 300 + ap->ops->sff_check_altstatus(ap); 301 + else if (ap->ioaddr.altstatus_addr) 302 + ioread8(ap->ioaddr.altstatus_addr); 303 + } 304 + 305 + /** 306 + * ata_sff_pause - Flush writes and wait 400nS 307 + * @ap: Port to pause for. 308 + * 309 + * CAUTION: 310 + * If we have an mmio device with no ctl and no altstatus 311 + * method this will fail. No such devices are known to exist. 312 + * 313 + * LOCKING: 314 + * Inherited from caller. 315 + */ 316 + 317 + void ata_sff_pause(struct ata_port *ap) 318 + { 319 + ata_sff_sync(ap); 320 + ndelay(400); 321 + } 322 + 323 + /** 324 + * ata_sff_dma_pause - Pause before commencing DMA 325 + * @ap: Port to pause for. 326 + * 327 + * Perform I/O fencing and ensure sufficient cycle delays occur 328 + * for the HDMA1:0 transition 329 + */ 330 + 331 + void ata_sff_dma_pause(struct ata_port *ap) 332 + { 333 + if (ap->ops->sff_check_altstatus || ap->ioaddr.altstatus_addr) { 334 + /* An altstatus read will cause the needed delay without 335 + messing up the IRQ status */ 336 + ata_sff_altstatus(ap); 337 + return; 338 + } 339 + /* There are no DMA controllers without ctl. BUG here to ensure 340 + we never violate the HDMA1:0 transition timing and risk 341 + corruption. */ 342 + BUG(); 256 343 } 257 344 258 345 /** ··· 829 742 } else 830 743 ata_pio_sector(qc); 831 744 832 - ata_sff_altstatus(qc->ap); /* flush */ 745 + ata_sff_sync(qc->ap); /* flush */ 833 746 } 834 747 835 748 /** ··· 850 763 WARN_ON(qc->dev->cdb_len < 12); 851 764 852 765 ap->ops->sff_data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1); 853 - ata_sff_altstatus(ap); /* flush */ 854 - 766 + ata_sff_sync(ap); 767 + /* FIXME: If the CDB is for DMA do we need to do the transition delay 768 + or is bmdma_start guaranteed to do it ? */ 855 769 switch (qc->tf.protocol) { 856 770 case ATAPI_PROT_PIO: 857 771 ap->hsm_task_state = HSM_ST; ··· 993 905 994 906 if (unlikely(__atapi_pio_bytes(qc, bytes))) 995 907 goto err_out; 996 - ata_sff_altstatus(ap); /* flush */ 908 + ata_sff_sync(ap); /* flush */ 997 909 998 910 return; 999 911 ··· 1577 1489 goto idle_irq; 1578 1490 } 1579 1491 1580 - /* check altstatus */ 1581 - status = ata_sff_altstatus(ap); 1582 - if (status & ATA_BUSY) 1583 - goto idle_irq; 1584 1492 1585 - /* check main status, clearing INTRQ */ 1586 - status = ap->ops->sff_check_status(ap); 1587 - if (unlikely(status & ATA_BUSY)) 1493 + /* check main status, clearing INTRQ if needed */ 1494 + status = ata_sff_irq_status(ap); 1495 + if (status & ATA_BUSY) 1588 1496 goto idle_irq; 1589 1497 1590 1498 /* ack bmdma irq events */ ··· 2114 2030 ap->ops->bmdma_stop(qc); 2115 2031 } 2116 2032 2117 - ata_sff_altstatus(ap); 2033 + ata_sff_sync(ap); /* FIXME: We don't need this */ 2118 2034 ap->ops->sff_check_status(ap); 2119 2035 ap->ops->sff_irq_clear(ap); 2120 2036 ··· 2287 2203 mmio + ATA_DMA_CMD); 2288 2204 2289 2205 /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */ 2290 - ata_sff_altstatus(ap); /* dummy read */ 2206 + ata_sff_dma_pause(ap); 2291 2207 } 2292 2208 2293 2209 /** ··· 2806 2722 EXPORT_SYMBOL_GPL(ata_sff_dumb_qc_prep); 2807 2723 EXPORT_SYMBOL_GPL(ata_sff_dev_select); 2808 2724 EXPORT_SYMBOL_GPL(ata_sff_check_status); 2809 - EXPORT_SYMBOL_GPL(ata_sff_altstatus); 2725 + EXPORT_SYMBOL_GPL(ata_sff_dma_pause); 2726 + EXPORT_SYMBOL_GPL(ata_sff_pause); 2810 2727 EXPORT_SYMBOL_GPL(ata_sff_busy_sleep); 2811 2728 EXPORT_SYMBOL_GPL(ata_sff_wait_ready); 2812 2729 EXPORT_SYMBOL_GPL(ata_sff_tf_load);
+1 -1
drivers/ata/pata_icside.c
··· 270 270 disable_dma(state->dma); 271 271 272 272 /* see ata_bmdma_stop */ 273 - ata_sff_altstatus(ap); 273 + ata_sff_dma_pause(ap); 274 274 } 275 275 276 276 static u8 pata_icside_bmdma_status(struct ata_port *ap)
+3 -1
drivers/ata/pata_rb532_cf.c
··· 57 57 struct ata_host *ah = ap->host; 58 58 struct rb532_cf_info *info = ah->private_data; 59 59 60 - ata_sff_altstatus(ap); 60 + /* FIXME: Keep previous delay. If this is merely a fence then 61 + ata_sff_sync might be sufficient. */ 62 + ata_sff_dma_pause(ap); 61 63 ndelay(RB500_CF_IO_DELAY); 62 64 63 65 set_irq_type(info->irq, IRQ_TYPE_LEVEL_HIGH);
+3 -2
drivers/ata/pata_scc.c
··· 726 726 in_be32(bmid_base + SCC_DMA_CMD) & ~ATA_DMA_START); 727 727 728 728 /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */ 729 - ata_sff_altstatus(ap); /* dummy read */ 729 + ata_sff_dma_pause(ap); /* dummy read */ 730 730 } 731 731 732 732 /** ··· 747 747 return host_stat; 748 748 749 749 /* errata A252,A308 workaround: Step4 */ 750 - if ((ata_sff_altstatus(ap) & ATA_ERR) && (int_status & INTSTS_INTRQ)) 750 + if ((scc_check_altstatus(ap) & ATA_ERR) 751 + && (int_status & INTSTS_INTRQ)) 751 752 return (host_stat | ATA_DMA_INTR); 752 753 753 754 /* errata A308 workaround Step5 */
+2 -14
include/linux/libata.h
··· 1432 1432 extern void ata_sff_dumb_qc_prep(struct ata_queued_cmd *qc); 1433 1433 extern void ata_sff_dev_select(struct ata_port *ap, unsigned int device); 1434 1434 extern u8 ata_sff_check_status(struct ata_port *ap); 1435 - extern u8 ata_sff_altstatus(struct ata_port *ap); 1435 + extern void ata_sff_pause(struct ata_port *ap); 1436 + extern void ata_sff_dma_pause(struct ata_port *ap); 1436 1437 extern int ata_sff_busy_sleep(struct ata_port *ap, 1437 1438 unsigned long timeout_pat, unsigned long timeout); 1438 1439 extern int ata_sff_wait_ready(struct ata_link *link, unsigned long deadline); ··· 1492 1491 const struct ata_port_info * const * ppi, 1493 1492 struct scsi_host_template *sht, void *host_priv); 1494 1493 #endif /* CONFIG_PCI */ 1495 - 1496 - /** 1497 - * ata_sff_pause - Flush writes and pause 400 nanoseconds. 1498 - * @ap: Port to wait for. 1499 - * 1500 - * LOCKING: 1501 - * Inherited from caller. 1502 - */ 1503 - static inline void ata_sff_pause(struct ata_port *ap) 1504 - { 1505 - ata_sff_altstatus(ap); 1506 - ndelay(400); 1507 - } 1508 1494 1509 1495 /** 1510 1496 * ata_sff_busy_wait - Wait for a port status register