Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

ata: make qc_prep return ata_completion_errors

In case a driver wants to return an error from qc_prep, return enum
ata_completion_errors. sata_mv is one of those drivers -- see the next
patch. Other drivers return the newly defined AC_ERR_OK.

[v2] use enum ata_completion_errors and AC_ERR_OK.

Signed-off-by: Jiri Slaby <jslaby@suse.cz>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: linux-ide@vger.kernel.org
Signed-off-by: Jens Axboe <axboe@kernel.dk>

authored by

Jiri Slaby and committed by
Jens Axboe
95364f36 25937580

+101 -59
+1 -1
Documentation/driver-api/libata.rst
··· 250 250 251 251 :: 252 252 253 - void (*qc_prep) (struct ata_queued_cmd *qc); 253 + enum ata_completion_errors (*qc_prep) (struct ata_queued_cmd *qc); 254 254 int (*qc_issue) (struct ata_queued_cmd *qc); 255 255 256 256
+4 -2
drivers/ata/acard-ahci.c
··· 56 56 __le32 size; /* bit 31 (EOT) max==0x10000 (64k) */ 57 57 }; 58 58 59 - static void acard_ahci_qc_prep(struct ata_queued_cmd *qc); 59 + static enum ata_completion_errors acard_ahci_qc_prep(struct ata_queued_cmd *qc); 60 60 static bool acard_ahci_qc_fill_rtf(struct ata_queued_cmd *qc); 61 61 static int acard_ahci_port_start(struct ata_port *ap); 62 62 static int acard_ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); ··· 210 210 return si; 211 211 } 212 212 213 - static void acard_ahci_qc_prep(struct ata_queued_cmd *qc) 213 + static enum ata_completion_errors acard_ahci_qc_prep(struct ata_queued_cmd *qc) 214 214 { 215 215 struct ata_port *ap = qc->ap; 216 216 struct ahci_port_priv *pp = ap->private_data; ··· 248 248 opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH; 249 249 250 250 ahci_fill_cmd_slot(pp, qc->hw_tag, opts); 251 + 252 + return AC_ERR_OK; 251 253 } 252 254 253 255 static bool acard_ahci_qc_fill_rtf(struct ata_queued_cmd *qc)
+4 -2
drivers/ata/libahci.c
··· 57 57 static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc); 58 58 static int ahci_port_start(struct ata_port *ap); 59 59 static void ahci_port_stop(struct ata_port *ap); 60 - static void ahci_qc_prep(struct ata_queued_cmd *qc); 60 + static enum ata_completion_errors ahci_qc_prep(struct ata_queued_cmd *qc); 61 61 static int ahci_pmp_qc_defer(struct ata_queued_cmd *qc); 62 62 static void ahci_freeze(struct ata_port *ap); 63 63 static void ahci_thaw(struct ata_port *ap); ··· 1624 1624 return sata_pmp_qc_defer_cmd_switch(qc); 1625 1625 } 1626 1626 1627 - static void ahci_qc_prep(struct ata_queued_cmd *qc) 1627 + static enum ata_completion_errors ahci_qc_prep(struct ata_queued_cmd *qc) 1628 1628 { 1629 1629 struct ata_port *ap = qc->ap; 1630 1630 struct ahci_port_priv *pp = ap->private_data; ··· 1660 1660 opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH; 1661 1661 1662 1662 ahci_fill_cmd_slot(pp, qc->hw_tag, opts); 1663 + 1664 + return AC_ERR_OK; 1663 1665 } 1664 1666 1665 1667 static void ahci_fbs_dec_intr(struct ata_port *ap)
+7 -2
drivers/ata/libata-core.c
··· 4980 4980 return ATA_DEFER_LINK; 4981 4981 } 4982 4982 4983 - void ata_noop_qc_prep(struct ata_queued_cmd *qc) { } 4983 + enum ata_completion_errors ata_noop_qc_prep(struct ata_queued_cmd *qc) 4984 + { 4985 + return AC_ERR_OK; 4986 + } 4984 4987 4985 4988 /** 4986 4989 * ata_sg_init - Associate command with scatter-gather table. ··· 5446 5443 return; 5447 5444 } 5448 5445 5449 - ap->ops->qc_prep(qc); 5446 + qc->err_mask |= ap->ops->qc_prep(qc); 5447 + if (unlikely(qc->err_mask)) 5448 + goto err; 5450 5449 trace_ata_qc_issue(qc); 5451 5450 qc->err_mask |= ap->ops->qc_issue(qc); 5452 5451 if (unlikely(qc->err_mask))
+8 -4
drivers/ata/libata-sff.c
··· 2679 2679 * LOCKING: 2680 2680 * spin_lock_irqsave(host lock) 2681 2681 */ 2682 - void ata_bmdma_qc_prep(struct ata_queued_cmd *qc) 2682 + enum ata_completion_errors ata_bmdma_qc_prep(struct ata_queued_cmd *qc) 2683 2683 { 2684 2684 if (!(qc->flags & ATA_QCFLAG_DMAMAP)) 2685 - return; 2685 + return AC_ERR_OK; 2686 2686 2687 2687 ata_bmdma_fill_sg(qc); 2688 + 2689 + return AC_ERR_OK; 2688 2690 } 2689 2691 EXPORT_SYMBOL_GPL(ata_bmdma_qc_prep); 2690 2692 ··· 2699 2697 * LOCKING: 2700 2698 * spin_lock_irqsave(host lock) 2701 2699 */ 2702 - void ata_bmdma_dumb_qc_prep(struct ata_queued_cmd *qc) 2700 + enum ata_completion_errors ata_bmdma_dumb_qc_prep(struct ata_queued_cmd *qc) 2703 2701 { 2704 2702 if (!(qc->flags & ATA_QCFLAG_DMAMAP)) 2705 - return; 2703 + return AC_ERR_OK; 2706 2704 2707 2705 ata_bmdma_fill_sg_dumb(qc); 2706 + 2707 + return AC_ERR_OK; 2708 2708 } 2709 2709 EXPORT_SYMBOL_GPL(ata_bmdma_dumb_qc_prep); 2710 2710
+4 -2
drivers/ata/pata_macio.c
··· 510 510 return ATA_CBL_PATA40; 511 511 } 512 512 513 - static void pata_macio_qc_prep(struct ata_queued_cmd *qc) 513 + static enum ata_completion_errors pata_macio_qc_prep(struct ata_queued_cmd *qc) 514 514 { 515 515 unsigned int write = (qc->tf.flags & ATA_TFLAG_WRITE); 516 516 struct ata_port *ap = qc->ap; ··· 523 523 __func__, qc, qc->flags, write, qc->dev->devno); 524 524 525 525 if (!(qc->flags & ATA_QCFLAG_DMAMAP)) 526 - return; 526 + return AC_ERR_OK; 527 527 528 528 table = (struct dbdma_cmd *) priv->dma_table_cpu; 529 529 ··· 568 568 table->command = cpu_to_le16(DBDMA_STOP); 569 569 570 570 dev_dbgdma(priv->dev, "%s: %d DMA list entries\n", __func__, pi); 571 + 572 + return AC_ERR_OK; 571 573 } 572 574 573 575
+5 -3
drivers/ata/pata_pxa.c
··· 44 44 /* 45 45 * Prepare taskfile for submission. 46 46 */ 47 - static void pxa_qc_prep(struct ata_queued_cmd *qc) 47 + static enum ata_completion_errors pxa_qc_prep(struct ata_queued_cmd *qc) 48 48 { 49 49 struct pata_pxa_data *pd = qc->ap->private_data; 50 50 struct dma_async_tx_descriptor *tx; 51 51 enum dma_transfer_direction dir; 52 52 53 53 if (!(qc->flags & ATA_QCFLAG_DMAMAP)) 54 - return; 54 + return AC_ERR_OK; 55 55 56 56 dir = (qc->dma_dir == DMA_TO_DEVICE ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM); 57 57 tx = dmaengine_prep_slave_sg(pd->dma_chan, qc->sg, qc->n_elem, dir, 58 58 DMA_PREP_INTERRUPT); 59 59 if (!tx) { 60 60 ata_dev_err(qc->dev, "prep_slave_sg() failed\n"); 61 - return; 61 + return AC_ERR_OK; 62 62 } 63 63 tx->callback = pxa_ata_dma_irq; 64 64 tx->callback_param = pd; 65 65 pd->dma_cookie = dmaengine_submit(tx); 66 + 67 + return AC_ERR_OK; 66 68 } 67 69 68 70 /*
+4 -3
drivers/ata/pdc_adma.c
··· 116 116 const struct pci_device_id *ent); 117 117 static int adma_port_start(struct ata_port *ap); 118 118 static void adma_port_stop(struct ata_port *ap); 119 - static void adma_qc_prep(struct ata_queued_cmd *qc); 119 + static enum ata_completion_errors adma_qc_prep(struct ata_queued_cmd *qc); 120 120 static unsigned int adma_qc_issue(struct ata_queued_cmd *qc); 121 121 static int adma_check_atapi_dma(struct ata_queued_cmd *qc); 122 122 static void adma_freeze(struct ata_port *ap); ··· 295 295 return i; 296 296 } 297 297 298 - static void adma_qc_prep(struct ata_queued_cmd *qc) 298 + static enum ata_completion_errors adma_qc_prep(struct ata_queued_cmd *qc) 299 299 { 300 300 struct adma_port_priv *pp = qc->ap->private_data; 301 301 u8 *buf = pp->pkt; ··· 306 306 307 307 adma_enter_reg_mode(qc->ap); 308 308 if (qc->tf.protocol != ATA_PROT_DMA) 309 - return; 309 + return AC_ERR_OK; 310 310 311 311 buf[i++] = 0; /* Response flags */ 312 312 buf[i++] = 0; /* reserved */ ··· 371 371 printk("%s\n", obuf); 372 372 } 373 373 #endif 374 + return AC_ERR_OK; 374 375 } 375 376 376 377 static inline void adma_packet_start(struct ata_queued_cmd *qc)
+3 -1
drivers/ata/sata_fsl.c
··· 502 502 return num_prde; 503 503 } 504 504 505 - static void sata_fsl_qc_prep(struct ata_queued_cmd *qc) 505 + static enum ata_completion_errors sata_fsl_qc_prep(struct ata_queued_cmd *qc) 506 506 { 507 507 struct ata_port *ap = qc->ap; 508 508 struct sata_fsl_port_priv *pp = ap->private_data; ··· 548 548 549 549 VPRINTK("SATA FSL : xx_qc_prep, di = 0x%x, ttl = %d, num_prde = %d\n", 550 550 desc_info, ttl_dwords, num_prde); 551 + 552 + return AC_ERR_OK; 551 553 } 552 554 553 555 static unsigned int sata_fsl_qc_issue(struct ata_queued_cmd *qc)
+3 -1
drivers/ata/sata_inic162x.c
··· 478 478 prd[-1].flags |= PRD_END; 479 479 } 480 480 481 - static void inic_qc_prep(struct ata_queued_cmd *qc) 481 + static enum ata_completion_errors inic_qc_prep(struct ata_queued_cmd *qc) 482 482 { 483 483 struct inic_port_priv *pp = qc->ap->private_data; 484 484 struct inic_pkt *pkt = pp->pkt; ··· 538 538 inic_fill_sg(prd, qc); 539 539 540 540 pp->cpb_tbl[0] = pp->pkt_dma; 541 + 542 + return AC_ERR_OK; 541 543 } 542 544 543 545 static unsigned int inic_qc_issue(struct ata_queued_cmd *qc)
+15 -11
drivers/ata/sata_mv.c
··· 592 592 static int mv_port_start(struct ata_port *ap); 593 593 static void mv_port_stop(struct ata_port *ap); 594 594 static int mv_qc_defer(struct ata_queued_cmd *qc); 595 - static void mv_qc_prep(struct ata_queued_cmd *qc); 596 - static void mv_qc_prep_iie(struct ata_queued_cmd *qc); 595 + static enum ata_completion_errors mv_qc_prep(struct ata_queued_cmd *qc); 596 + static enum ata_completion_errors mv_qc_prep_iie(struct ata_queued_cmd *qc); 597 597 static unsigned int mv_qc_issue(struct ata_queued_cmd *qc); 598 598 static int mv_hardreset(struct ata_link *link, unsigned int *class, 599 599 unsigned long deadline); ··· 2031 2031 * LOCKING: 2032 2032 * Inherited from caller. 2033 2033 */ 2034 - static void mv_qc_prep(struct ata_queued_cmd *qc) 2034 + static enum ata_completion_errors mv_qc_prep(struct ata_queued_cmd *qc) 2035 2035 { 2036 2036 struct ata_port *ap = qc->ap; 2037 2037 struct mv_port_priv *pp = ap->private_data; ··· 2043 2043 switch (tf->protocol) { 2044 2044 case ATA_PROT_DMA: 2045 2045 if (tf->command == ATA_CMD_DSM) 2046 - return; 2046 + return AC_ERR_OK; 2047 2047 /* fall-thru */ 2048 2048 case ATA_PROT_NCQ: 2049 2049 break; /* continue below */ 2050 2050 case ATA_PROT_PIO: 2051 2051 mv_rw_multi_errata_sata24(qc); 2052 - return; 2052 + return AC_ERR_OK; 2053 2053 default: 2054 - return; 2054 + return AC_ERR_OK; 2055 2055 } 2056 2056 2057 2057 /* Fill in command request block ··· 2116 2116 mv_crqb_pack_cmd(cw++, tf->command, ATA_REG_CMD, 1); /* last */ 2117 2117 2118 2118 if (!(qc->flags & ATA_QCFLAG_DMAMAP)) 2119 - return; 2119 + return AC_ERR_OK; 2120 2120 mv_fill_sg(qc); 2121 + 2122 + return AC_ERR_OK; 2121 2123 } 2122 2124 2123 2125 /** ··· 2134 2132 * LOCKING: 2135 2133 * Inherited from caller. 2136 2134 */ 2137 - static void mv_qc_prep_iie(struct ata_queued_cmd *qc) 2135 + static enum ata_completion_errors mv_qc_prep_iie(struct ata_queued_cmd *qc) 2138 2136 { 2139 2137 struct ata_port *ap = qc->ap; 2140 2138 struct mv_port_priv *pp = ap->private_data; ··· 2145 2143 2146 2144 if ((tf->protocol != ATA_PROT_DMA) && 2147 2145 (tf->protocol != ATA_PROT_NCQ)) 2148 - return; 2146 + return AC_ERR_OK; 2149 2147 if (tf->command == ATA_CMD_DSM) 2150 - return; /* use bmdma for this */ 2148 + return AC_ERR_OK; /* use bmdma for this */ 2151 2149 2152 2150 /* Fill in Gen IIE command request block */ 2153 2151 if (!(tf->flags & ATA_TFLAG_WRITE)) ··· 2188 2186 ); 2189 2187 2190 2188 if (!(qc->flags & ATA_QCFLAG_DMAMAP)) 2191 - return; 2189 + return AC_ERR_OK; 2192 2190 mv_fill_sg(qc); 2191 + 2192 + return AC_ERR_OK; 2193 2193 } 2194 2194 2195 2195 /**
+11 -7
drivers/ata/sata_nv.c
··· 297 297 static void nv_ck804_thaw(struct ata_port *ap); 298 298 static int nv_adma_slave_config(struct scsi_device *sdev); 299 299 static int nv_adma_check_atapi_dma(struct ata_queued_cmd *qc); 300 - static void nv_adma_qc_prep(struct ata_queued_cmd *qc); 300 + static enum ata_completion_errors nv_adma_qc_prep(struct ata_queued_cmd *qc); 301 301 static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc); 302 302 static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance); 303 303 static void nv_adma_irq_clear(struct ata_port *ap); ··· 319 319 static void nv_swncq_error_handler(struct ata_port *ap); 320 320 static int nv_swncq_slave_config(struct scsi_device *sdev); 321 321 static int nv_swncq_port_start(struct ata_port *ap); 322 - static void nv_swncq_qc_prep(struct ata_queued_cmd *qc); 322 + static enum ata_completion_errors nv_swncq_qc_prep(struct ata_queued_cmd *qc); 323 323 static void nv_swncq_fill_sg(struct ata_queued_cmd *qc); 324 324 static unsigned int nv_swncq_qc_issue(struct ata_queued_cmd *qc); 325 325 static void nv_swncq_irq_clear(struct ata_port *ap, u16 fis); ··· 1344 1344 return 1; 1345 1345 } 1346 1346 1347 - static void nv_adma_qc_prep(struct ata_queued_cmd *qc) 1347 + static enum ata_completion_errors nv_adma_qc_prep(struct ata_queued_cmd *qc) 1348 1348 { 1349 1349 struct nv_adma_port_priv *pp = qc->ap->private_data; 1350 1350 struct nv_adma_cpb *cpb = &pp->cpb[qc->hw_tag]; ··· 1356 1356 (qc->flags & ATA_QCFLAG_DMAMAP)); 1357 1357 nv_adma_register_mode(qc->ap); 1358 1358 ata_bmdma_qc_prep(qc); 1359 - return; 1359 + return AC_ERR_OK; 1360 1360 } 1361 1361 1362 1362 cpb->resp_flags = NV_CPB_RESP_DONE; ··· 1388 1388 cpb->ctl_flags = ctl_flags; 1389 1389 wmb(); 1390 1390 cpb->resp_flags = 0; 1391 + 1392 + return AC_ERR_OK; 1391 1393 } 1392 1394 1393 1395 static unsigned int nv_adma_qc_issue(struct ata_queued_cmd *qc) ··· 1952 1950 return 0; 1953 1951 } 1954 1952 1955 - static void nv_swncq_qc_prep(struct ata_queued_cmd *qc) 1953 + static enum ata_completion_errors nv_swncq_qc_prep(struct ata_queued_cmd *qc) 1956 1954 { 1957 1955 if (qc->tf.protocol != ATA_PROT_NCQ) { 1958 1956 ata_bmdma_qc_prep(qc); 1959 - return; 1957 + return AC_ERR_OK; 1960 1958 } 1961 1959 1962 1960 if (!(qc->flags & ATA_QCFLAG_DMAMAP)) 1963 - return; 1961 + return AC_ERR_OK; 1964 1962 1965 1963 nv_swncq_fill_sg(qc); 1964 + 1965 + return AC_ERR_OK; 1966 1966 } 1967 1967 1968 1968 static void nv_swncq_fill_sg(struct ata_queued_cmd *qc)
+4 -2
drivers/ata/sata_promise.c
··· 139 139 static int pdc_ata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); 140 140 static int pdc_common_port_start(struct ata_port *ap); 141 141 static int pdc_sata_port_start(struct ata_port *ap); 142 - static void pdc_qc_prep(struct ata_queued_cmd *qc); 142 + static enum ata_completion_errors pdc_qc_prep(struct ata_queued_cmd *qc); 143 143 static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf); 144 144 static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf); 145 145 static int pdc_check_atapi_dma(struct ata_queued_cmd *qc); ··· 633 633 prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT); 634 634 } 635 635 636 - static void pdc_qc_prep(struct ata_queued_cmd *qc) 636 + static enum ata_completion_errors pdc_qc_prep(struct ata_queued_cmd *qc) 637 637 { 638 638 struct pdc_port_priv *pp = qc->ap->private_data; 639 639 unsigned int i; ··· 665 665 default: 666 666 break; 667 667 } 668 + 669 + return AC_ERR_OK; 668 670 } 669 671 670 672 static int pdc_is_sataii_tx4(unsigned long flags)
+5 -3
drivers/ata/sata_qstor.c
··· 100 100 static int qs_ata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); 101 101 static int qs_port_start(struct ata_port *ap); 102 102 static void qs_host_stop(struct ata_host *host); 103 - static void qs_qc_prep(struct ata_queued_cmd *qc); 103 + static enum ata_completion_errors qs_qc_prep(struct ata_queued_cmd *qc); 104 104 static unsigned int qs_qc_issue(struct ata_queued_cmd *qc); 105 105 static int qs_check_atapi_dma(struct ata_queued_cmd *qc); 106 106 static void qs_freeze(struct ata_port *ap); ··· 260 260 return si; 261 261 } 262 262 263 - static void qs_qc_prep(struct ata_queued_cmd *qc) 263 + static enum ata_completion_errors qs_qc_prep(struct ata_queued_cmd *qc) 264 264 { 265 265 struct qs_port_priv *pp = qc->ap->private_data; 266 266 u8 dflags = QS_DF_PORD, *buf = pp->pkt; ··· 272 272 273 273 qs_enter_reg_mode(qc->ap); 274 274 if (qc->tf.protocol != ATA_PROT_DMA) 275 - return; 275 + return AC_ERR_OK; 276 276 277 277 nelem = qs_fill_sg(qc); 278 278 ··· 295 295 296 296 /* frame information structure (FIS) */ 297 297 ata_tf_to_fis(&qc->tf, 0, 1, &buf[32]); 298 + 299 + return AC_ERR_OK; 298 300 } 299 301 300 302 static inline void qs_packet_start(struct ata_queued_cmd *qc)
+4 -2
drivers/ata/sata_rcar.c
··· 550 550 prd[si - 1].addr |= cpu_to_le32(SATA_RCAR_DTEND); 551 551 } 552 552 553 - static void sata_rcar_qc_prep(struct ata_queued_cmd *qc) 553 + static enum ata_completion_errors sata_rcar_qc_prep(struct ata_queued_cmd *qc) 554 554 { 555 555 if (!(qc->flags & ATA_QCFLAG_DMAMAP)) 556 - return; 556 + return AC_ERR_OK; 557 557 558 558 sata_rcar_bmdma_fill_sg(qc); 559 + 560 + return AC_ERR_OK; 559 561 } 560 562 561 563 static void sata_rcar_bmdma_setup(struct ata_queued_cmd *qc)
+5 -3
drivers/ata/sata_sil.c
··· 103 103 static int sil_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val); 104 104 static int sil_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val); 105 105 static int sil_set_mode(struct ata_link *link, struct ata_device **r_failed); 106 - static void sil_qc_prep(struct ata_queued_cmd *qc); 106 + static enum ata_completion_errors sil_qc_prep(struct ata_queued_cmd *qc); 107 107 static void sil_bmdma_setup(struct ata_queued_cmd *qc); 108 108 static void sil_bmdma_start(struct ata_queued_cmd *qc); 109 109 static void sil_bmdma_stop(struct ata_queued_cmd *qc); ··· 317 317 last_prd->flags_len |= cpu_to_le32(ATA_PRD_EOT); 318 318 } 319 319 320 - static void sil_qc_prep(struct ata_queued_cmd *qc) 320 + static enum ata_completion_errors sil_qc_prep(struct ata_queued_cmd *qc) 321 321 { 322 322 if (!(qc->flags & ATA_QCFLAG_DMAMAP)) 323 - return; 323 + return AC_ERR_OK; 324 324 325 325 sil_fill_sg(qc); 326 + 327 + return AC_ERR_OK; 326 328 } 327 329 328 330 static unsigned char sil_get_device_cache_line(struct pci_dev *pdev)
+4 -2
drivers/ata/sata_sil24.c
··· 326 326 static int sil24_scr_read(struct ata_link *link, unsigned sc_reg, u32 *val); 327 327 static int sil24_scr_write(struct ata_link *link, unsigned sc_reg, u32 val); 328 328 static int sil24_qc_defer(struct ata_queued_cmd *qc); 329 - static void sil24_qc_prep(struct ata_queued_cmd *qc); 329 + static enum ata_completion_errors sil24_qc_prep(struct ata_queued_cmd *qc); 330 330 static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc); 331 331 static bool sil24_qc_fill_rtf(struct ata_queued_cmd *qc); 332 332 static void sil24_pmp_attach(struct ata_port *ap); ··· 830 830 return ata_std_qc_defer(qc); 831 831 } 832 832 833 - static void sil24_qc_prep(struct ata_queued_cmd *qc) 833 + static enum ata_completion_errors sil24_qc_prep(struct ata_queued_cmd *qc) 834 834 { 835 835 struct ata_port *ap = qc->ap; 836 836 struct sil24_port_priv *pp = ap->private_data; ··· 874 874 875 875 if (qc->flags & ATA_QCFLAG_DMAMAP) 876 876 sil24_fill_sg(qc, sge); 877 + 878 + return AC_ERR_OK; 877 879 } 878 880 879 881 static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc)
+4 -2
drivers/ata/sata_sx4.c
··· 202 202 static void pdc_freeze(struct ata_port *ap); 203 203 static void pdc_thaw(struct ata_port *ap); 204 204 static int pdc_port_start(struct ata_port *ap); 205 - static void pdc20621_qc_prep(struct ata_queued_cmd *qc); 205 + static enum ata_completion_errors pdc20621_qc_prep(struct ata_queued_cmd *qc); 206 206 static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf); 207 207 static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf); 208 208 static unsigned int pdc20621_dimm_init(struct ata_host *host); ··· 530 530 VPRINTK("ata pkt buf ofs %u, mmio copied\n", i); 531 531 } 532 532 533 - static void pdc20621_qc_prep(struct ata_queued_cmd *qc) 533 + static enum ata_completion_errors pdc20621_qc_prep(struct ata_queued_cmd *qc) 534 534 { 535 535 switch (qc->tf.protocol) { 536 536 case ATA_PROT_DMA: ··· 542 542 default: 543 543 break; 544 544 } 545 + 546 + return AC_ERR_OK; 545 547 } 546 548 547 549 static void __pdc20621_push_hdma(struct ata_queued_cmd *qc,
+6 -6
include/linux/libata.h
··· 892 892 /* 893 893 * Command execution 894 894 */ 895 - int (*qc_defer)(struct ata_queued_cmd *qc); 896 - int (*check_atapi_dma)(struct ata_queued_cmd *qc); 897 - void (*qc_prep)(struct ata_queued_cmd *qc); 895 + int (*qc_defer)(struct ata_queued_cmd *qc); 896 + int (*check_atapi_dma)(struct ata_queued_cmd *qc); 897 + enum ata_completion_errors (*qc_prep)(struct ata_queued_cmd *qc); 898 898 unsigned int (*qc_issue)(struct ata_queued_cmd *qc); 899 899 bool (*qc_fill_rtf)(struct ata_queued_cmd *qc); 900 900 ··· 1162 1162 extern const char *ata_mode_string(unsigned long xfer_mask); 1163 1163 extern unsigned long ata_id_xfermask(const u16 *id); 1164 1164 extern int ata_std_qc_defer(struct ata_queued_cmd *qc); 1165 - extern void ata_noop_qc_prep(struct ata_queued_cmd *qc); 1165 + extern enum ata_completion_errors ata_noop_qc_prep(struct ata_queued_cmd *qc); 1166 1166 extern void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg, 1167 1167 unsigned int n_elem); 1168 1168 extern unsigned int ata_dev_classify(const struct ata_taskfile *tf); ··· 1894 1894 .sg_tablesize = LIBATA_MAX_PRD, \ 1895 1895 .dma_boundary = ATA_DMA_BOUNDARY 1896 1896 1897 - extern void ata_bmdma_qc_prep(struct ata_queued_cmd *qc); 1897 + extern enum ata_completion_errors ata_bmdma_qc_prep(struct ata_queued_cmd *qc); 1898 1898 extern unsigned int ata_bmdma_qc_issue(struct ata_queued_cmd *qc); 1899 - extern void ata_bmdma_dumb_qc_prep(struct ata_queued_cmd *qc); 1899 + extern enum ata_completion_errors ata_bmdma_dumb_qc_prep(struct ata_queued_cmd *qc); 1900 1900 extern unsigned int ata_bmdma_port_intr(struct ata_port *ap, 1901 1901 struct ata_queued_cmd *qc); 1902 1902 extern irqreturn_t ata_bmdma_interrupt(int irq, void *dev_instance);