[libata] checkpatch-inspired cleanups

Tackle the relatively sane complaints of checkpatch --file.

The vast majority is indentation and whitespace changes, the rest are

* #include fixes
* printk KERN_xxx prefix addition
* BSS/initializer cleanups

Signed-off-by: Jeff Garzik <jgarzik@redhat.com>

+266 -268
+13 -13
drivers/ata/ahci.c
··· 227 228 static int ahci_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val); 229 static int ahci_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val); 230 - static int ahci_init_one (struct pci_dev *pdev, const struct pci_device_id *ent); 231 static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc); 232 static void ahci_irq_clear(struct ata_port *ap); 233 static int ahci_port_start(struct ata_port *ap); ··· 729 730 /* wait for engine to stop. This could be as long as 500 msec */ 731 tmp = ata_wait_register(port_mmio + PORT_CMD, 732 - PORT_CMD_LIST_ON, PORT_CMD_LIST_ON, 1, 500); 733 if (tmp & PORT_CMD_LIST_ON) 734 return -EIO; 735 ··· 1564 if (!irq_stat) 1565 return IRQ_NONE; 1566 1567 - spin_lock(&host->lock); 1568 1569 - for (i = 0; i < host->n_ports; i++) { 1570 struct ata_port *ap; 1571 1572 if (!(irq_stat & (1 << i))) ··· 1829 pp->cmd_tbl_dma = mem_dma; 1830 1831 /* 1832 - * Save off initial list of interrupts to be enabled. 1833 - * This could be changed later 1834 - */ 1835 pp->intr_mask = DEF_PORT_IRQ; 1836 1837 ap->private_data = pp; ··· 1918 dev_printk(KERN_INFO, &pdev->dev, 1919 "AHCI %02x%02x.%02x%02x " 1920 "%u slots %u ports %s Gbps 0x%x impl %s mode\n" 1921 - , 1922 1923 - (vers >> 24) & 0xff, 1924 - (vers >> 16) & 0xff, 1925 - (vers >> 8) & 0xff, 1926 - vers & 0xff, 1927 1928 ((cap >> 8) & 0x1f) + 1, 1929 (cap & 0x1f) + 1, ··· 1935 "flags: " 1936 "%s%s%s%s%s%s%s" 1937 "%s%s%s%s%s%s%s\n" 1938 - , 1939 1940 cap & (1 << 31) ? "64bit " : "", 1941 cap & (1 << 30) ? "ncq " : "",
··· 227 228 static int ahci_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val); 229 static int ahci_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val); 230 + static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); 231 static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc); 232 static void ahci_irq_clear(struct ata_port *ap); 233 static int ahci_port_start(struct ata_port *ap); ··· 729 730 /* wait for engine to stop. This could be as long as 500 msec */ 731 tmp = ata_wait_register(port_mmio + PORT_CMD, 732 + PORT_CMD_LIST_ON, PORT_CMD_LIST_ON, 1, 500); 733 if (tmp & PORT_CMD_LIST_ON) 734 return -EIO; 735 ··· 1564 if (!irq_stat) 1565 return IRQ_NONE; 1566 1567 + spin_lock(&host->lock); 1568 1569 + for (i = 0; i < host->n_ports; i++) { 1570 struct ata_port *ap; 1571 1572 if (!(irq_stat & (1 << i))) ··· 1829 pp->cmd_tbl_dma = mem_dma; 1830 1831 /* 1832 + * Save off initial list of interrupts to be enabled. 1833 + * This could be changed later 1834 + */ 1835 pp->intr_mask = DEF_PORT_IRQ; 1836 1837 ap->private_data = pp; ··· 1918 dev_printk(KERN_INFO, &pdev->dev, 1919 "AHCI %02x%02x.%02x%02x " 1920 "%u slots %u ports %s Gbps 0x%x impl %s mode\n" 1921 + , 1922 1923 + (vers >> 24) & 0xff, 1924 + (vers >> 16) & 0xff, 1925 + (vers >> 8) & 0xff, 1926 + vers & 0xff, 1927 1928 ((cap >> 8) & 0x1f) + 1, 1929 (cap & 0x1f) + 1, ··· 1935 "flags: " 1936 "%s%s%s%s%s%s%s" 1937 "%s%s%s%s%s%s%s\n" 1938 + , 1939 1940 cap & (1 << 31) ? "64bit " : "", 1941 cap & (1 << 30) ? "ncq " : "",
+14 -15
drivers/ata/ata_piix.c
··· 157 const int *map; 158 }; 159 160 - static int piix_init_one (struct pci_dev *pdev, 161 - const struct pci_device_id *ent); 162 static void piix_pata_error_handler(struct ata_port *ap); 163 - static void piix_set_piomode (struct ata_port *ap, struct ata_device *adev); 164 - static void piix_set_dmamode (struct ata_port *ap, struct ata_device *adev); 165 - static void ich_set_dmamode (struct ata_port *ap, struct ata_device *adev); 166 static int ich_pata_cable_detect(struct ata_port *ap); 167 #ifdef CONFIG_PM 168 static int piix_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg); ··· 650 while (lap->device) { 651 if (lap->device == pdev->device && 652 lap->subvendor == pdev->subsystem_vendor && 653 - lap->subdevice == pdev->subsystem_device) { 654 return ATA_CBL_PATA40_SHORT; 655 - } 656 lap++; 657 } 658 ··· 699 * None (inherited from caller). 700 */ 701 702 - static void piix_set_piomode (struct ata_port *ap, struct ata_device *adev) 703 { 704 unsigned int pio = adev->pio_mode - XFER_PIO_0; 705 struct pci_dev *dev = to_pci_dev(ap->host->dev); ··· 786 * None (inherited from caller). 787 */ 788 789 - static void do_pata_set_dmamode (struct ata_port *ap, struct ata_device *adev, int isich) 790 { 791 struct pci_dev *dev = to_pci_dev(ap->host->dev); 792 u8 master_port = ap->port_no ? 0x42 : 0x40; ··· 813 int u_clock, u_speed; 814 815 /* 816 - * UDMA is handled by a combination of clock switching and 817 * selection of dividers 818 * 819 * Handy rule: Odd modes are UDMATIMx 01, even are 02 ··· 905 * None (inherited from caller). 906 */ 907 908 - static void piix_set_dmamode (struct ata_port *ap, struct ata_device *adev) 909 { 910 do_pata_set_dmamode(ap, adev, 0); 911 } ··· 921 * None (inherited from caller). 922 */ 923 924 - static void ich_set_dmamode (struct ata_port *ap, struct ata_device *adev) 925 { 926 do_pata_set_dmamode(ap, adev, 1); 927 } ··· 1106 u16 cfg; 1107 int no_piix_dma = 0; 1108 1109 - while((pdev = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82454NX, pdev)) != NULL) 1110 - { 1111 /* Look for 450NX PXB. Check for problem configurations 1112 A PCI quirk checks bit 6 already */ 1113 pci_read_config_word(pdev, 0x41, &cfg); ··· 1240 * Zero on success, or -ERRNO value. 1241 */ 1242 1243 - static int piix_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) 1244 { 1245 static int printed_version; 1246 struct device *dev = &pdev->dev;
··· 157 const int *map; 158 }; 159 160 + static int piix_init_one(struct pci_dev *pdev, 161 + const struct pci_device_id *ent); 162 static void piix_pata_error_handler(struct ata_port *ap); 163 + static void piix_set_piomode(struct ata_port *ap, struct ata_device *adev); 164 + static void piix_set_dmamode(struct ata_port *ap, struct ata_device *adev); 165 + static void ich_set_dmamode(struct ata_port *ap, struct ata_device *adev); 166 static int ich_pata_cable_detect(struct ata_port *ap); 167 #ifdef CONFIG_PM 168 static int piix_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg); ··· 650 while (lap->device) { 651 if (lap->device == pdev->device && 652 lap->subvendor == pdev->subsystem_vendor && 653 + lap->subdevice == pdev->subsystem_device) 654 return ATA_CBL_PATA40_SHORT; 655 + 656 lap++; 657 } 658 ··· 699 * None (inherited from caller). 700 */ 701 702 + static void piix_set_piomode(struct ata_port *ap, struct ata_device *adev) 703 { 704 unsigned int pio = adev->pio_mode - XFER_PIO_0; 705 struct pci_dev *dev = to_pci_dev(ap->host->dev); ··· 786 * None (inherited from caller). 787 */ 788 789 + static void do_pata_set_dmamode(struct ata_port *ap, struct ata_device *adev, int isich) 790 { 791 struct pci_dev *dev = to_pci_dev(ap->host->dev); 792 u8 master_port = ap->port_no ? 0x42 : 0x40; ··· 813 int u_clock, u_speed; 814 815 /* 816 + * UDMA is handled by a combination of clock switching and 817 * selection of dividers 818 * 819 * Handy rule: Odd modes are UDMATIMx 01, even are 02 ··· 905 * None (inherited from caller). 906 */ 907 908 + static void piix_set_dmamode(struct ata_port *ap, struct ata_device *adev) 909 { 910 do_pata_set_dmamode(ap, adev, 0); 911 } ··· 921 * None (inherited from caller). 922 */ 923 924 + static void ich_set_dmamode(struct ata_port *ap, struct ata_device *adev) 925 { 926 do_pata_set_dmamode(ap, adev, 1); 927 } ··· 1106 u16 cfg; 1107 int no_piix_dma = 0; 1108 1109 + while ((pdev = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82454NX, pdev)) != NULL) { 1110 /* Look for 450NX PXB. Check for problem configurations 1111 A PCI quirk checks bit 6 already */ 1112 pci_read_config_word(pdev, 0x41, &cfg); ··· 1241 * Zero on success, or -ERRNO value. 1242 */ 1243 1244 + static int piix_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 1245 { 1246 static int printed_version; 1247 struct device *dev = &pdev->dev;
+8 -8
drivers/ata/libata-acpi.c
··· 26 #include <acpi/actypes.h> 27 28 #define NO_PORT_MULT 0xffff 29 - #define SATA_ADR(root,pmp) (((root) << 16) | (pmp)) 30 31 #define REGS_PER_GTF 7 32 struct ata_acpi_gtf { ··· 96 } 97 } 98 99 - static void ata_acpi_handle_hotplug (struct ata_port *ap, struct kobject *kobj, 100 - u32 event) 101 { 102 char event_string[12]; 103 char *envp[] = { event_string, NULL }; ··· 114 } 115 116 if (kobj) { 117 - sprintf(event_string, "BAY_EVENT=%d", event); 118 kobject_uevent_env(kobj, KOBJ_CHANGE, envp); 119 } 120 } ··· 127 if (dev->sdev) 128 kobj = &dev->sdev->sdev_gendev.kobj; 129 130 - ata_acpi_handle_hotplug (dev->link->ap, kobj, event); 131 } 132 133 static void ata_acpi_ap_notify(acpi_handle handle, u32 event, void *data) 134 { 135 struct ata_port *ap = data; 136 137 - ata_acpi_handle_hotplug (ap, &ap->dev->kobj, event); 138 } 139 140 /** ··· 398 { 399 struct ata_acpi_gtm gtm; 400 int valid = 0; 401 - 402 /* No _GTM data, no information */ 403 if (ata_acpi_gtm(ap, &gtm) < 0) 404 return 0; 405 - 406 /* Split timing, DMA enabled */ 407 if ((gtm.flags & 0x11) == 0x11 && gtm.drive[0].dma < 55) 408 valid |= 1;
··· 26 #include <acpi/actypes.h> 27 28 #define NO_PORT_MULT 0xffff 29 + #define SATA_ADR(root, pmp) (((root) << 16) | (pmp)) 30 31 #define REGS_PER_GTF 7 32 struct ata_acpi_gtf { ··· 96 } 97 } 98 99 + static void ata_acpi_handle_hotplug(struct ata_port *ap, struct kobject *kobj, 100 + u32 event) 101 { 102 char event_string[12]; 103 char *envp[] = { event_string, NULL }; ··· 114 } 115 116 if (kobj) { 117 + sprintf(event_string, "BAY_EVENT=%d", event); 118 kobject_uevent_env(kobj, KOBJ_CHANGE, envp); 119 } 120 } ··· 127 if (dev->sdev) 128 kobj = &dev->sdev->sdev_gendev.kobj; 129 130 + ata_acpi_handle_hotplug(dev->link->ap, kobj, event); 131 } 132 133 static void ata_acpi_ap_notify(acpi_handle handle, u32 event, void *data) 134 { 135 struct ata_port *ap = data; 136 137 + ata_acpi_handle_hotplug(ap, &ap->dev->kobj, event); 138 } 139 140 /** ··· 398 { 399 struct ata_acpi_gtm gtm; 400 int valid = 0; 401 + 402 /* No _GTM data, no information */ 403 if (ata_acpi_gtm(ap, &gtm) < 0) 404 return 0; 405 + 406 /* Split timing, DMA enabled */ 407 if ((gtm.flags & 0x11) == 0x11 && gtm.drive[0].dma < 55) 408 valid |= 1;
+34 -32
drivers/ata/libata-core.c
··· 49 #include <linux/workqueue.h> 50 #include <linux/jiffies.h> 51 #include <linux/scatterlist.h> 52 #include <scsi/scsi.h> 53 #include <scsi/scsi_cmnd.h> 54 #include <scsi/scsi_host.h> 55 #include <linux/libata.h> 56 - #include <asm/io.h> 57 #include <asm/semaphore.h> 58 #include <asm/byteorder.h> 59 ··· 93 module_param_named(fua, libata_fua, int, 0444); 94 MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)"); 95 96 - static int ata_ignore_hpa = 0; 97 module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644); 98 MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)"); 99 ··· 713 } 714 715 if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) { 716 - printk("ata: SEMB device ignored\n"); 717 return ATA_DEV_SEMB_UNSUP; /* not yet */ 718 } 719 ··· 939 *max_sectors = ata_tf_to_lba48(&tf); 940 else 941 *max_sectors = ata_tf_to_lba(&tf); 942 - if (dev->horkage & ATA_HORKAGE_HPA_SIZE) 943 (*max_sectors)--; 944 return 0; 945 } ··· 1151 * LOCKING: 1152 * caller. 1153 */ 1154 - void ata_noop_dev_select (struct ata_port *ap, unsigned int device) 1155 { 1156 } 1157 ··· 1171 * caller. 1172 */ 1173 1174 - void ata_std_dev_select (struct ata_port *ap, unsigned int device) 1175 { 1176 u8 tmp; 1177 ··· 1292 */ 1293 u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF; 1294 if (mode < 5) /* Valid PIO range */ 1295 - pio_mask = (2 << mode) - 1; 1296 else 1297 pio_mask = 1; 1298 ··· 1693 * for pre-ATA4 drives. 1694 * 1695 * FIXME: ATA_CMD_ID_ATA is optional for early drives and right 1696 - * now we abort if we hit that case. 1697 * 1698 * LOCKING: 1699 * Kernel thread context (may sleep) ··· 1979 "supports DRM functions and may " 1980 "not be fully accessable.\n"); 1981 snprintf(revbuf, 7, "CFA"); 1982 - } 1983 - else 1984 - snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id)); 1985 1986 dev->n_sectors = ata_id_n_sectors(id); 1987 ··· 2109 /* Let the user know. We don't want to disallow opens for 2110 rescue purposes, or in case the vendor is just a blithering 2111 idiot */ 2112 - if (print_info) { 2113 ata_dev_printk(dev, KERN_WARNING, 2114 "Drive reports diagnostics failure. This may indicate a drive\n"); 2115 ata_dev_printk(dev, KERN_WARNING, ··· 2666 { 0xFF } 2667 }; 2668 2669 - #define ENOUGH(v,unit) (((v)-1)/(unit)+1) 2670 - #define EZ(v,unit) ((v)?ENOUGH(v,unit):0) 2671 2672 static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT) 2673 { ··· 2694 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma); 2695 } 2696 2697 - static const struct ata_timing* ata_timing_find_mode(unsigned short speed) 2698 { 2699 const struct ata_timing *t; 2700 ··· 2726 2727 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */ 2728 memset(&p, 0, sizeof(p)); 2729 - if(speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) { 2730 if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO]; 2731 else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY]; 2732 - } else if(speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) { 2733 p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN]; 2734 } 2735 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B); ··· 2875 dev->flags |= ATA_DFLAG_PIO; 2876 2877 err_mask = ata_dev_set_xfermode(dev); 2878 /* Old CFA may refuse this command, which is just fine */ 2879 if (dev->xfer_shift == ATA_SHIFT_PIO && ata_id_is_cfa(dev->id)) 2880 - err_mask &= ~AC_ERR_DEV; 2881 /* Some very old devices and some bad newer ones fail any kind of 2882 SET_XFERMODE request but support PIO0-2 timings and no IORDY */ 2883 if (dev->xfer_shift == ATA_SHIFT_PIO && !ata_id_has_iordy(dev->id) && 2884 dev->pio_mode <= XFER_PIO_2) 2885 err_mask &= ~AC_ERR_DEV; 2886 if (err_mask) { 2887 ata_dev_printk(dev, KERN_ERR, "failed to set xfermode " 2888 "(err_mask=0x%x)\n", err_mask); ··· 3945 { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA }, 3946 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA }, 3947 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA }, 3948 - { "SAMSUNG CD-ROM SN-124","N001", ATA_HORKAGE_NODMA }, 3949 { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA }, 3950 { "IOMEGA ZIP 250 ATAPI", NULL, ATA_HORKAGE_NODMA }, /* temporary fix */ 3951 { "IOMEGA ZIP 250 ATAPI Floppy", ··· 3961 3962 /* Devices where NCQ should be avoided */ 3963 /* NCQ is slow */ 3964 - { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ }, 3965 /* http://thread.gmane.org/gmane.linux.ide/14907 */ 3966 { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ }, 3967 /* NCQ is broken */ ··· 4108 } 4109 4110 if ((host->flags & ATA_HOST_SIMPLEX) && 4111 - host->simplex_claimed && host->simplex_claimed != ap) { 4112 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA); 4113 ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by " 4114 "other device, disabling DMA\n"); ··· 4130 */ 4131 if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA)) 4132 /* UDMA/44 or higher would be available */ 4133 - if((ap->cbl == ATA_CBL_PATA40) || 4134 - (ata_drive_40wire(dev->id) && 4135 - (ap->cbl == ATA_CBL_PATA_UNK || 4136 - ap->cbl == ATA_CBL_PATA80))) { 4137 - ata_dev_printk(dev, KERN_WARNING, 4138 "limited to UDMA/33 due to 40-wire cable\n"); 4139 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA); 4140 } ··· 4397 u32 addr, offset; 4398 u32 sg_len, len, blen; 4399 4400 - /* determine if physical DMA addr spans 64K boundary. 4401 * Note h/w doesn't support 64-bit, so we unconditionally 4402 * truncate dma_addr_t to u32. 4403 */ ··· 4982 "%u bytes trailing data\n", bytes); 4983 4984 for (i = 0; i < words; i++) 4985 - ap->ops->data_xfer(qc->dev, (unsigned char*)pad_buf, 2, do_write); 4986 4987 ap->hsm_task_state = HSM_ST_LAST; 4988 return; ··· 5910 * One if interrupt was handled, zero if not (shared irq). 5911 */ 5912 5913 - inline unsigned int ata_host_intr (struct ata_port *ap, 5914 - struct ata_queued_cmd *qc) 5915 { 5916 struct ata_eh_info *ehi = &ap->link.eh_info; 5917 u8 status, host_stat = 0; ··· 6011 * IRQ_NONE or IRQ_HANDLED. 6012 */ 6013 6014 - irqreturn_t ata_interrupt (int irq, void *dev_instance) 6015 { 6016 struct ata_host *host = dev_instance; 6017 unsigned int i; ··· 6214 6215 /* This is wrong. On a failed flush we get back the LBA of the lost 6216 sector and we should (assuming it wasn't aborted as unknown) issue 6217 - a further flush command to continue the writeback until it 6218 does not error */ 6219 err_mask = ata_do_simple_cmd(dev, cmd); 6220 if (err_mask) {
··· 49 #include <linux/workqueue.h> 50 #include <linux/jiffies.h> 51 #include <linux/scatterlist.h> 52 + #include <linux/io.h> 53 #include <scsi/scsi.h> 54 #include <scsi/scsi_cmnd.h> 55 #include <scsi/scsi_host.h> 56 #include <linux/libata.h> 57 #include <asm/semaphore.h> 58 #include <asm/byteorder.h> 59 ··· 93 module_param_named(fua, libata_fua, int, 0444); 94 MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)"); 95 96 + static int ata_ignore_hpa; 97 module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644); 98 MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)"); 99 ··· 713 } 714 715 if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) { 716 + printk(KERN_INFO "ata: SEMB device ignored\n"); 717 return ATA_DEV_SEMB_UNSUP; /* not yet */ 718 } 719 ··· 939 *max_sectors = ata_tf_to_lba48(&tf); 940 else 941 *max_sectors = ata_tf_to_lba(&tf); 942 + if (dev->horkage & ATA_HORKAGE_HPA_SIZE) 943 (*max_sectors)--; 944 return 0; 945 } ··· 1151 * LOCKING: 1152 * caller. 1153 */ 1154 + void ata_noop_dev_select(struct ata_port *ap, unsigned int device) 1155 { 1156 } 1157 ··· 1171 * caller. 1172 */ 1173 1174 + void ata_std_dev_select(struct ata_port *ap, unsigned int device) 1175 { 1176 u8 tmp; 1177 ··· 1292 */ 1293 u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF; 1294 if (mode < 5) /* Valid PIO range */ 1295 + pio_mask = (2 << mode) - 1; 1296 else 1297 pio_mask = 1; 1298 ··· 1693 * for pre-ATA4 drives. 1694 * 1695 * FIXME: ATA_CMD_ID_ATA is optional for early drives and right 1696 + * now we abort if we hit that case. 1697 * 1698 * LOCKING: 1699 * Kernel thread context (may sleep) ··· 1979 "supports DRM functions and may " 1980 "not be fully accessable.\n"); 1981 snprintf(revbuf, 7, "CFA"); 1982 + } else 1983 + snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id)); 1984 1985 dev->n_sectors = ata_id_n_sectors(id); 1986 ··· 2110 /* Let the user know. We don't want to disallow opens for 2111 rescue purposes, or in case the vendor is just a blithering 2112 idiot */ 2113 + if (print_info) { 2114 ata_dev_printk(dev, KERN_WARNING, 2115 "Drive reports diagnostics failure. This may indicate a drive\n"); 2116 ata_dev_printk(dev, KERN_WARNING, ··· 2667 { 0xFF } 2668 }; 2669 2670 + #define ENOUGH(v, unit) (((v)-1)/(unit)+1) 2671 + #define EZ(v, unit) ((v)?ENOUGH(v, unit):0) 2672 2673 static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT) 2674 { ··· 2695 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma); 2696 } 2697 2698 + static const struct ata_timing *ata_timing_find_mode(unsigned short speed) 2699 { 2700 const struct ata_timing *t; 2701 ··· 2727 2728 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */ 2729 memset(&p, 0, sizeof(p)); 2730 + if (speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) { 2731 if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO]; 2732 else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY]; 2733 + } else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) { 2734 p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN]; 2735 } 2736 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B); ··· 2876 dev->flags |= ATA_DFLAG_PIO; 2877 2878 err_mask = ata_dev_set_xfermode(dev); 2879 + 2880 /* Old CFA may refuse this command, which is just fine */ 2881 if (dev->xfer_shift == ATA_SHIFT_PIO && ata_id_is_cfa(dev->id)) 2882 + err_mask &= ~AC_ERR_DEV; 2883 + 2884 /* Some very old devices and some bad newer ones fail any kind of 2885 SET_XFERMODE request but support PIO0-2 timings and no IORDY */ 2886 if (dev->xfer_shift == ATA_SHIFT_PIO && !ata_id_has_iordy(dev->id) && 2887 dev->pio_mode <= XFER_PIO_2) 2888 err_mask &= ~AC_ERR_DEV; 2889 + 2890 if (err_mask) { 2891 ata_dev_printk(dev, KERN_ERR, "failed to set xfermode " 2892 "(err_mask=0x%x)\n", err_mask); ··· 3943 { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA }, 3944 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA }, 3945 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA }, 3946 + { "SAMSUNG CD-ROM SN-124", "N001", ATA_HORKAGE_NODMA }, 3947 { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA }, 3948 { "IOMEGA ZIP 250 ATAPI", NULL, ATA_HORKAGE_NODMA }, /* temporary fix */ 3949 { "IOMEGA ZIP 250 ATAPI Floppy", ··· 3959 3960 /* Devices where NCQ should be avoided */ 3961 /* NCQ is slow */ 3962 + { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ }, 3963 /* http://thread.gmane.org/gmane.linux.ide/14907 */ 3964 { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ }, 3965 /* NCQ is broken */ ··· 4106 } 4107 4108 if ((host->flags & ATA_HOST_SIMPLEX) && 4109 + host->simplex_claimed && host->simplex_claimed != ap) { 4110 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA); 4111 ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by " 4112 "other device, disabling DMA\n"); ··· 4128 */ 4129 if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA)) 4130 /* UDMA/44 or higher would be available */ 4131 + if ((ap->cbl == ATA_CBL_PATA40) || 4132 + (ata_drive_40wire(dev->id) && 4133 + (ap->cbl == ATA_CBL_PATA_UNK || 4134 + ap->cbl == ATA_CBL_PATA80))) { 4135 + ata_dev_printk(dev, KERN_WARNING, 4136 "limited to UDMA/33 due to 40-wire cable\n"); 4137 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA); 4138 } ··· 4395 u32 addr, offset; 4396 u32 sg_len, len, blen; 4397 4398 + /* determine if physical DMA addr spans 64K boundary. 4399 * Note h/w doesn't support 64-bit, so we unconditionally 4400 * truncate dma_addr_t to u32. 4401 */ ··· 4980 "%u bytes trailing data\n", bytes); 4981 4982 for (i = 0; i < words; i++) 4983 + ap->ops->data_xfer(qc->dev, (unsigned char *)pad_buf, 2, do_write); 4984 4985 ap->hsm_task_state = HSM_ST_LAST; 4986 return; ··· 5908 * One if interrupt was handled, zero if not (shared irq). 5909 */ 5910 5911 + inline unsigned int ata_host_intr(struct ata_port *ap, 5912 + struct ata_queued_cmd *qc) 5913 { 5914 struct ata_eh_info *ehi = &ap->link.eh_info; 5915 u8 status, host_stat = 0; ··· 6009 * IRQ_NONE or IRQ_HANDLED. 6010 */ 6011 6012 + irqreturn_t ata_interrupt(int irq, void *dev_instance) 6013 { 6014 struct ata_host *host = dev_instance; 6015 unsigned int i; ··· 6212 6213 /* This is wrong. On a failed flush we get back the LBA of the lost 6214 sector and we should (assuming it wasn't aborted as unknown) issue 6215 + a further flush command to continue the writeback until it 6216 does not error */ 6217 err_mask = ata_do_simple_cmd(dev, cmd); 6218 if (err_mask) {
+7 -7
drivers/ata/libata-eh.c
··· 1197 * RETURNS: 1198 * Descriptive string for @err_mask 1199 */ 1200 - static const char * ata_err_string(unsigned int err_mask) 1201 { 1202 if (err_mask & AC_ERR_HOST_BUS) 1203 return "host bus error"; ··· 1934 ehc->i.serror & SERR_LINK_SEQ_ERR ? "LinkSeq " : "", 1935 ehc->i.serror & SERR_TRANS_ST_ERROR ? "TrStaTrns " : "", 1936 ehc->i.serror & SERR_UNRECOG_FIS ? "UnrecFIS " : "", 1937 - ehc->i.serror & SERR_DEV_XCHG ? "DevExch " : "" ); 1938 1939 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 1940 static const char *dma_str[] = { ··· 1969 qc->err_mask & AC_ERR_NCQ ? " <F>" : ""); 1970 1971 if (res->command & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ | 1972 - ATA_ERR) ) { 1973 if (res->command & ATA_BUSY) 1974 ata_dev_printk(qc->dev, KERN_ERR, 1975 - "status: { Busy }\n" ); 1976 else 1977 ata_dev_printk(qc->dev, KERN_ERR, 1978 "status: { %s%s%s%s}\n", 1979 res->command & ATA_DRDY ? "DRDY " : "", 1980 res->command & ATA_DF ? "DF " : "", 1981 res->command & ATA_DRQ ? "DRQ " : "", 1982 - res->command & ATA_ERR ? "ERR " : "" ); 1983 } 1984 1985 if (cmd->command != ATA_CMD_PACKET && ··· 1990 res->feature & ATA_ICRC ? "ICRC " : "", 1991 res->feature & ATA_UNC ? "UNC " : "", 1992 res->feature & ATA_IDNF ? "IDNF " : "", 1993 - res->feature & ATA_ABORTED ? "ABRT " : "" ); 1994 } 1995 } 1996 ··· 2611 ehc->i.flags = 0; 2612 continue; 2613 2614 - dev_fail: 2615 nr_failed_devs++; 2616 if (ata_eh_handle_dev_fail(dev, rc)) 2617 nr_disabled_devs++;
··· 1197 * RETURNS: 1198 * Descriptive string for @err_mask 1199 */ 1200 + static const char *ata_err_string(unsigned int err_mask) 1201 { 1202 if (err_mask & AC_ERR_HOST_BUS) 1203 return "host bus error"; ··· 1934 ehc->i.serror & SERR_LINK_SEQ_ERR ? "LinkSeq " : "", 1935 ehc->i.serror & SERR_TRANS_ST_ERROR ? "TrStaTrns " : "", 1936 ehc->i.serror & SERR_UNRECOG_FIS ? "UnrecFIS " : "", 1937 + ehc->i.serror & SERR_DEV_XCHG ? "DevExch " : ""); 1938 1939 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 1940 static const char *dma_str[] = { ··· 1969 qc->err_mask & AC_ERR_NCQ ? " <F>" : ""); 1970 1971 if (res->command & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ | 1972 + ATA_ERR)) { 1973 if (res->command & ATA_BUSY) 1974 ata_dev_printk(qc->dev, KERN_ERR, 1975 + "status: { Busy }\n"); 1976 else 1977 ata_dev_printk(qc->dev, KERN_ERR, 1978 "status: { %s%s%s%s}\n", 1979 res->command & ATA_DRDY ? "DRDY " : "", 1980 res->command & ATA_DF ? "DF " : "", 1981 res->command & ATA_DRQ ? "DRQ " : "", 1982 + res->command & ATA_ERR ? "ERR " : ""); 1983 } 1984 1985 if (cmd->command != ATA_CMD_PACKET && ··· 1990 res->feature & ATA_ICRC ? "ICRC " : "", 1991 res->feature & ATA_UNC ? "UNC " : "", 1992 res->feature & ATA_IDNF ? "IDNF " : "", 1993 + res->feature & ATA_ABORTED ? "ABRT " : ""); 1994 } 1995 } 1996 ··· 2611 ehc->i.flags = 0; 2612 continue; 2613 2614 + dev_fail: 2615 nr_failed_devs++; 2616 if (ata_eh_handle_dev_fail(dev, rc)) 2617 nr_disabled_devs++;
+124 -124
drivers/ata/libata-scsi.c
··· 45 #include <scsi/scsi_transport.h> 46 #include <linux/libata.h> 47 #include <linux/hdreg.h> 48 - #include <asm/uaccess.h> 49 50 #include "libata.h" 51 ··· 53 54 typedef unsigned int (*ata_xlat_func_t)(struct ata_queued_cmd *qc); 55 56 - static struct ata_device * __ata_scsi_find_dev(struct ata_port *ap, 57 const struct scsi_device *scsidev); 58 - static struct ata_device * ata_scsi_find_dev(struct ata_port *ap, 59 const struct scsi_device *scsidev); 60 static int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel, 61 unsigned int id, unsigned int lun); ··· 228 229 scsi_cmd[1] = (4 << 1); /* PIO Data-in */ 230 scsi_cmd[2] = 0x0e; /* no off.line or cc, read from dev, 231 - block count in sector count field */ 232 data_dir = DMA_FROM_DEVICE; 233 } else { 234 scsi_cmd[1] = (3 << 1); /* Non-data */ ··· 252 /* Good values for timeout and retries? Values below 253 from scsi_ioctl_send_command() for default case... */ 254 cmd_result = scsi_execute(scsidev, scsi_cmd, data_dir, argbuf, argsize, 255 - sensebuf, (10*HZ), 5, 0); 256 257 if (driver_byte(cmd_result) == DRIVER_SENSE) {/* sense data available */ 258 u8 *desc = sensebuf + 8; ··· 263 if (cmd_result & SAM_STAT_CHECK_CONDITION) { 264 struct scsi_sense_hdr sshdr; 265 scsi_normalize_sense(sensebuf, SCSI_SENSE_BUFFERSIZE, 266 - &sshdr); 267 - if (sshdr.sense_key==0 && 268 - sshdr.asc==0 && sshdr.ascq==0) 269 cmd_result &= ~SAM_STAT_CHECK_CONDITION; 270 } 271 272 /* Send userspace a few ATA registers (same as drivers/ide) */ 273 - if (sensebuf[0] == 0x72 && /* format is "descriptor" */ 274 - desc[0] == 0x09 ) { /* code is "ATA Descriptor" */ 275 - args[0] = desc[13]; /* status */ 276 - args[1] = desc[3]; /* error */ 277 - args[2] = desc[5]; /* sector count (0:7) */ 278 if (copy_to_user(arg, args, sizeof(args))) 279 rc = -EFAULT; 280 } ··· 350 struct scsi_sense_hdr sshdr; 351 scsi_normalize_sense(sensebuf, SCSI_SENSE_BUFFERSIZE, 352 &sshdr); 353 - if (sshdr.sense_key==0 && 354 - sshdr.asc==0 && sshdr.ascq==0) 355 cmd_result &= ~SAM_STAT_CHECK_CONDITION; 356 } 357 ··· 975 if ((qc->dev->flags & ATA_DFLAG_SPUNDOWN) && 976 (system_state == SYSTEM_HALT || 977 system_state == SYSTEM_POWER_OFF)) { 978 - static unsigned long warned = 0; 979 980 if (!test_and_set_bit(0, &warned)) { 981 ata_dev_printk(qc->dev, KERN_WARNING, ··· 1364 struct ata_eh_info *ehi = &qc->dev->link->eh_info; 1365 struct scsi_cmnd *cmd = qc->scsicmd; 1366 u8 *cdb = cmd->cmnd; 1367 - int need_sense = (qc->err_mask != 0); 1368 1369 /* We snoop the SET_FEATURES - Write Cache ON/OFF command, and 1370 * schedule EH_REVALIDATE operation to update the IDENTIFY DEVICE ··· 1396 * was no error, SK, ASC and ASCQ will all be zero. 1397 */ 1398 if (((cdb[0] == ATA_16) || (cdb[0] == ATA_12)) && 1399 - ((cdb[2] & 0x20) || need_sense)) { 1400 ata_gen_passthru_sense(qc); 1401 } else { 1402 if (!need_sense) { ··· 1500 return 0; 1501 1502 early_finish: 1503 - ata_qc_free(qc); 1504 qc->scsidone(cmd); 1505 DPRINTK("EXIT - early finish (good or error)\n"); 1506 return 0; ··· 1590 */ 1591 1592 void ata_scsi_rbuf_fill(struct ata_scsi_args *args, 1593 - unsigned int (*actor) (struct ata_scsi_args *args, 1594 - u8 *rbuf, unsigned int buflen)) 1595 { 1596 u8 *rbuf; 1597 unsigned int buflen, rc; ··· 2140 * None. 2141 */ 2142 unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf, 2143 - unsigned int buflen) 2144 { 2145 u64 last_lba = args->dev->n_sectors - 1; /* LBA of the last block */ 2146 ··· 2464 return 0; 2465 } 2466 2467 - static struct ata_device * ata_find_dev(struct ata_port *ap, int devno) 2468 { 2469 if (ap->nr_pmp_links == 0) { 2470 if (likely(devno < ata_link_max_devices(&ap->link))) ··· 2477 return NULL; 2478 } 2479 2480 - static struct ata_device * __ata_scsi_find_dev(struct ata_port *ap, 2481 - const struct scsi_device *scsidev) 2482 { 2483 int devno; 2484 ··· 2564 ata_scsi_map_proto(u8 byte1) 2565 { 2566 switch((byte1 & 0x1e) >> 1) { 2567 - case 3: /* Non-data */ 2568 - return ATA_PROT_NODATA; 2569 2570 - case 6: /* DMA */ 2571 - case 10: /* UDMA Data-in */ 2572 - case 11: /* UDMA Data-Out */ 2573 - return ATA_PROT_DMA; 2574 2575 - case 4: /* PIO Data-in */ 2576 - case 5: /* PIO Data-out */ 2577 - return ATA_PROT_PIO; 2578 2579 - case 0: /* Hard Reset */ 2580 - case 1: /* SRST */ 2581 - case 8: /* Device Diagnostic */ 2582 - case 9: /* Device Reset */ 2583 - case 7: /* DMA Queued */ 2584 - case 12: /* FPDMA */ 2585 - case 15: /* Return Response Info */ 2586 - default: /* Reserved */ 2587 - break; 2588 } 2589 2590 return ATA_PROT_UNKNOWN; ··· 2919 args.done = done; 2920 2921 switch(scsicmd[0]) { 2922 - /* TODO: worth improving? */ 2923 - case FORMAT_UNIT: 2924 ata_scsi_invalid_field(cmd, done); 2925 break; 2926 - 2927 - case INQUIRY: 2928 - if (scsicmd[1] & 2) /* is CmdDt set? */ 2929 - ata_scsi_invalid_field(cmd, done); 2930 - else if ((scsicmd[1] & 1) == 0) /* is EVPD clear? */ 2931 - ata_scsi_rbuf_fill(&args, ata_scsiop_inq_std); 2932 - else switch (scsicmd[2]) { 2933 - case 0x00: 2934 - ata_scsi_rbuf_fill(&args, ata_scsiop_inq_00); 2935 - break; 2936 - case 0x80: 2937 - ata_scsi_rbuf_fill(&args, ata_scsiop_inq_80); 2938 - break; 2939 - case 0x83: 2940 - ata_scsi_rbuf_fill(&args, ata_scsiop_inq_83); 2941 - break; 2942 - case 0x89: 2943 - ata_scsi_rbuf_fill(&args, ata_scsiop_inq_89); 2944 - break; 2945 - default: 2946 - ata_scsi_invalid_field(cmd, done); 2947 - break; 2948 - } 2949 break; 2950 - 2951 - case MODE_SENSE: 2952 - case MODE_SENSE_10: 2953 - ata_scsi_rbuf_fill(&args, ata_scsiop_mode_sense); 2954 break; 2955 - 2956 - case MODE_SELECT: /* unconditionally return */ 2957 - case MODE_SELECT_10: /* bad-field-in-cdb */ 2958 - ata_scsi_invalid_field(cmd, done); 2959 break; 2960 - 2961 - case READ_CAPACITY: 2962 - ata_scsi_rbuf_fill(&args, ata_scsiop_read_cap); 2963 - break; 2964 - 2965 - case SERVICE_ACTION_IN: 2966 - if ((scsicmd[1] & 0x1f) == SAI_READ_CAPACITY_16) 2967 - ata_scsi_rbuf_fill(&args, ata_scsiop_read_cap); 2968 - else 2969 - ata_scsi_invalid_field(cmd, done); 2970 - break; 2971 - 2972 - case REPORT_LUNS: 2973 - ata_scsi_rbuf_fill(&args, ata_scsiop_report_luns); 2974 - break; 2975 - 2976 - case REQUEST_SENSE: 2977 - ata_scsi_set_sense(cmd, 0, 0, 0); 2978 - cmd->result = (DRIVER_SENSE << 24); 2979 - done(cmd); 2980 - break; 2981 - 2982 - /* if we reach this, then writeback caching is disabled, 2983 - * turning this into a no-op. 2984 - */ 2985 - case SYNCHRONIZE_CACHE: 2986 - /* fall through */ 2987 - 2988 - /* no-op's, complete with success */ 2989 - case REZERO_UNIT: 2990 - case SEEK_6: 2991 - case SEEK_10: 2992 - case TEST_UNIT_READY: 2993 - ata_scsi_rbuf_fill(&args, ata_scsiop_noop); 2994 - break; 2995 - 2996 - case SEND_DIAGNOSTIC: 2997 - tmp8 = scsicmd[1] & ~(1 << 3); 2998 - if ((tmp8 == 0x4) && (!scsicmd[3]) && (!scsicmd[4])) 2999 - ata_scsi_rbuf_fill(&args, ata_scsiop_noop); 3000 - else 3001 - ata_scsi_invalid_field(cmd, done); 3002 - break; 3003 - 3004 - /* all other commands */ 3005 default: 3006 - ata_scsi_set_sense(cmd, ILLEGAL_REQUEST, 0x20, 0x0); 3007 - /* "Invalid command operation code" */ 3008 - done(cmd); 3009 break; 3010 } 3011 } 3012
··· 45 #include <scsi/scsi_transport.h> 46 #include <linux/libata.h> 47 #include <linux/hdreg.h> 48 + #include <linux/uaccess.h> 49 50 #include "libata.h" 51 ··· 53 54 typedef unsigned int (*ata_xlat_func_t)(struct ata_queued_cmd *qc); 55 56 + static struct ata_device *__ata_scsi_find_dev(struct ata_port *ap, 57 const struct scsi_device *scsidev); 58 + static struct ata_device *ata_scsi_find_dev(struct ata_port *ap, 59 const struct scsi_device *scsidev); 60 static int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel, 61 unsigned int id, unsigned int lun); ··· 228 229 scsi_cmd[1] = (4 << 1); /* PIO Data-in */ 230 scsi_cmd[2] = 0x0e; /* no off.line or cc, read from dev, 231 + block count in sector count field */ 232 data_dir = DMA_FROM_DEVICE; 233 } else { 234 scsi_cmd[1] = (3 << 1); /* Non-data */ ··· 252 /* Good values for timeout and retries? Values below 253 from scsi_ioctl_send_command() for default case... */ 254 cmd_result = scsi_execute(scsidev, scsi_cmd, data_dir, argbuf, argsize, 255 + sensebuf, (10*HZ), 5, 0); 256 257 if (driver_byte(cmd_result) == DRIVER_SENSE) {/* sense data available */ 258 u8 *desc = sensebuf + 8; ··· 263 if (cmd_result & SAM_STAT_CHECK_CONDITION) { 264 struct scsi_sense_hdr sshdr; 265 scsi_normalize_sense(sensebuf, SCSI_SENSE_BUFFERSIZE, 266 + &sshdr); 267 + if (sshdr.sense_key == 0 && 268 + sshdr.asc == 0 && sshdr.ascq == 0) 269 cmd_result &= ~SAM_STAT_CHECK_CONDITION; 270 } 271 272 /* Send userspace a few ATA registers (same as drivers/ide) */ 273 + if (sensebuf[0] == 0x72 && /* format is "descriptor" */ 274 + desc[0] == 0x09) { /* code is "ATA Descriptor" */ 275 + args[0] = desc[13]; /* status */ 276 + args[1] = desc[3]; /* error */ 277 + args[2] = desc[5]; /* sector count (0:7) */ 278 if (copy_to_user(arg, args, sizeof(args))) 279 rc = -EFAULT; 280 } ··· 350 struct scsi_sense_hdr sshdr; 351 scsi_normalize_sense(sensebuf, SCSI_SENSE_BUFFERSIZE, 352 &sshdr); 353 + if (sshdr.sense_key == 0 && 354 + sshdr.asc == 0 && sshdr.ascq == 0) 355 cmd_result &= ~SAM_STAT_CHECK_CONDITION; 356 } 357 ··· 975 if ((qc->dev->flags & ATA_DFLAG_SPUNDOWN) && 976 (system_state == SYSTEM_HALT || 977 system_state == SYSTEM_POWER_OFF)) { 978 + static unsigned long warned; 979 980 if (!test_and_set_bit(0, &warned)) { 981 ata_dev_printk(qc->dev, KERN_WARNING, ··· 1364 struct ata_eh_info *ehi = &qc->dev->link->eh_info; 1365 struct scsi_cmnd *cmd = qc->scsicmd; 1366 u8 *cdb = cmd->cmnd; 1367 + int need_sense = (qc->err_mask != 0); 1368 1369 /* We snoop the SET_FEATURES - Write Cache ON/OFF command, and 1370 * schedule EH_REVALIDATE operation to update the IDENTIFY DEVICE ··· 1396 * was no error, SK, ASC and ASCQ will all be zero. 1397 */ 1398 if (((cdb[0] == ATA_16) || (cdb[0] == ATA_12)) && 1399 + ((cdb[2] & 0x20) || need_sense)) { 1400 ata_gen_passthru_sense(qc); 1401 } else { 1402 if (!need_sense) { ··· 1500 return 0; 1501 1502 early_finish: 1503 + ata_qc_free(qc); 1504 qc->scsidone(cmd); 1505 DPRINTK("EXIT - early finish (good or error)\n"); 1506 return 0; ··· 1590 */ 1591 1592 void ata_scsi_rbuf_fill(struct ata_scsi_args *args, 1593 + unsigned int (*actor) (struct ata_scsi_args *args, 1594 + u8 *rbuf, unsigned int buflen)) 1595 { 1596 u8 *rbuf; 1597 unsigned int buflen, rc; ··· 2140 * None. 2141 */ 2142 unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf, 2143 + unsigned int buflen) 2144 { 2145 u64 last_lba = args->dev->n_sectors - 1; /* LBA of the last block */ 2146 ··· 2464 return 0; 2465 } 2466 2467 + static struct ata_device *ata_find_dev(struct ata_port *ap, int devno) 2468 { 2469 if (ap->nr_pmp_links == 0) { 2470 if (likely(devno < ata_link_max_devices(&ap->link))) ··· 2477 return NULL; 2478 } 2479 2480 + static struct ata_device *__ata_scsi_find_dev(struct ata_port *ap, 2481 + const struct scsi_device *scsidev) 2482 { 2483 int devno; 2484 ··· 2564 ata_scsi_map_proto(u8 byte1) 2565 { 2566 switch((byte1 & 0x1e) >> 1) { 2567 + case 3: /* Non-data */ 2568 + return ATA_PROT_NODATA; 2569 2570 + case 6: /* DMA */ 2571 + case 10: /* UDMA Data-in */ 2572 + case 11: /* UDMA Data-Out */ 2573 + return ATA_PROT_DMA; 2574 2575 + case 4: /* PIO Data-in */ 2576 + case 5: /* PIO Data-out */ 2577 + return ATA_PROT_PIO; 2578 2579 + case 0: /* Hard Reset */ 2580 + case 1: /* SRST */ 2581 + case 8: /* Device Diagnostic */ 2582 + case 9: /* Device Reset */ 2583 + case 7: /* DMA Queued */ 2584 + case 12: /* FPDMA */ 2585 + case 15: /* Return Response Info */ 2586 + default: /* Reserved */ 2587 + break; 2588 } 2589 2590 return ATA_PROT_UNKNOWN; ··· 2919 args.done = done; 2920 2921 switch(scsicmd[0]) { 2922 + /* TODO: worth improving? */ 2923 + case FORMAT_UNIT: 2924 + ata_scsi_invalid_field(cmd, done); 2925 + break; 2926 + 2927 + case INQUIRY: 2928 + if (scsicmd[1] & 2) /* is CmdDt set? */ 2929 ata_scsi_invalid_field(cmd, done); 2930 + else if ((scsicmd[1] & 1) == 0) /* is EVPD clear? */ 2931 + ata_scsi_rbuf_fill(&args, ata_scsiop_inq_std); 2932 + else switch (scsicmd[2]) { 2933 + case 0x00: 2934 + ata_scsi_rbuf_fill(&args, ata_scsiop_inq_00); 2935 break; 2936 + case 0x80: 2937 + ata_scsi_rbuf_fill(&args, ata_scsiop_inq_80); 2938 break; 2939 + case 0x83: 2940 + ata_scsi_rbuf_fill(&args, ata_scsiop_inq_83); 2941 break; 2942 + case 0x89: 2943 + ata_scsi_rbuf_fill(&args, ata_scsiop_inq_89); 2944 break; 2945 default: 2946 + ata_scsi_invalid_field(cmd, done); 2947 break; 2948 + } 2949 + break; 2950 + 2951 + case MODE_SENSE: 2952 + case MODE_SENSE_10: 2953 + ata_scsi_rbuf_fill(&args, ata_scsiop_mode_sense); 2954 + break; 2955 + 2956 + case MODE_SELECT: /* unconditionally return */ 2957 + case MODE_SELECT_10: /* bad-field-in-cdb */ 2958 + ata_scsi_invalid_field(cmd, done); 2959 + break; 2960 + 2961 + case READ_CAPACITY: 2962 + ata_scsi_rbuf_fill(&args, ata_scsiop_read_cap); 2963 + break; 2964 + 2965 + case SERVICE_ACTION_IN: 2966 + if ((scsicmd[1] & 0x1f) == SAI_READ_CAPACITY_16) 2967 + ata_scsi_rbuf_fill(&args, ata_scsiop_read_cap); 2968 + else 2969 + ata_scsi_invalid_field(cmd, done); 2970 + break; 2971 + 2972 + case REPORT_LUNS: 2973 + ata_scsi_rbuf_fill(&args, ata_scsiop_report_luns); 2974 + break; 2975 + 2976 + case REQUEST_SENSE: 2977 + ata_scsi_set_sense(cmd, 0, 0, 0); 2978 + cmd->result = (DRIVER_SENSE << 24); 2979 + done(cmd); 2980 + break; 2981 + 2982 + /* if we reach this, then writeback caching is disabled, 2983 + * turning this into a no-op. 2984 + */ 2985 + case SYNCHRONIZE_CACHE: 2986 + /* fall through */ 2987 + 2988 + /* no-op's, complete with success */ 2989 + case REZERO_UNIT: 2990 + case SEEK_6: 2991 + case SEEK_10: 2992 + case TEST_UNIT_READY: 2993 + ata_scsi_rbuf_fill(&args, ata_scsiop_noop); 2994 + break; 2995 + 2996 + case SEND_DIAGNOSTIC: 2997 + tmp8 = scsicmd[1] & ~(1 << 3); 2998 + if ((tmp8 == 0x4) && (!scsicmd[3]) && (!scsicmd[4])) 2999 + ata_scsi_rbuf_fill(&args, ata_scsiop_noop); 3000 + else 3001 + ata_scsi_invalid_field(cmd, done); 3002 + break; 3003 + 3004 + /* all other commands */ 3005 + default: 3006 + ata_scsi_set_sense(cmd, ILLEGAL_REQUEST, 0x20, 0x0); 3007 + /* "Invalid command operation code" */ 3008 + done(cmd); 3009 + break; 3010 } 3011 } 3012
+1 -1
drivers/ata/libata-sff.c
··· 248 * LOCKING: 249 * spin_lock_irqsave(host lock) 250 */ 251 - void ata_bmdma_start (struct ata_queued_cmd *qc) 252 { 253 struct ata_port *ap = qc->ap; 254 u8 dmactl;
··· 248 * LOCKING: 249 * spin_lock_irqsave(host lock) 250 */ 251 + void ata_bmdma_start(struct ata_queued_cmd *qc) 252 { 253 struct ata_port *ap = qc->ap; 254 u8 dmactl;
+5 -5
drivers/ata/pata_ns87415.c
··· 17 * TODO: 18 * Test PARISC SuperIO 19 * Get someone to test on SPARC 20 - * Implement lazy pio/dma switching for better performance 21 * 8bit shared timing. 22 * See if we need to kill the FIFO for ATAPI 23 */ ··· 60 u16 clocking; 61 u8 iordy; 62 u8 status; 63 - 64 /* Timing register format is 17 - low nybble read timing with 65 the high nybble being 16 - x for recovery time in PCI clocks */ 66 - 67 ata_timing_compute(adev, adev->pio_mode, &t, T, 0); 68 69 clocking = 17 - FIT(t.active, 2, 17); ··· 71 /* Use the same timing for read and write bytes */ 72 clocking |= (clocking << 8); 73 pci_write_config_word(dev, timing, clocking); 74 - 75 /* Set the IORDY enable versus DMA enable on or off properly */ 76 pci_read_config_byte(dev, 0x42, &iordy); 77 iordy &= ~(1 << (4 + unit)); ··· 185 186 if (!mmio) 187 return; 188 - iowrite8((ioread8(mmio + ATA_DMA_CMD) | ATA_DMA_INTR | ATA_DMA_ERR), 189 mmio + ATA_DMA_CMD); 190 } 191
··· 17 * TODO: 18 * Test PARISC SuperIO 19 * Get someone to test on SPARC 20 + * Implement lazy pio/dma switching for better performance 21 * 8bit shared timing. 22 * See if we need to kill the FIFO for ATAPI 23 */ ··· 60 u16 clocking; 61 u8 iordy; 62 u8 status; 63 + 64 /* Timing register format is 17 - low nybble read timing with 65 the high nybble being 16 - x for recovery time in PCI clocks */ 66 + 67 ata_timing_compute(adev, adev->pio_mode, &t, T, 0); 68 69 clocking = 17 - FIT(t.active, 2, 17); ··· 71 /* Use the same timing for read and write bytes */ 72 clocking |= (clocking << 8); 73 pci_write_config_word(dev, timing, clocking); 74 + 75 /* Set the IORDY enable versus DMA enable on or off properly */ 76 pci_read_config_byte(dev, 0x42, &iordy); 77 iordy &= ~(1 << (4 + unit)); ··· 185 186 if (!mmio) 187 return; 188 + iowrite8((ioread8(mmio + ATA_DMA_CMD) | ATA_DMA_INTR | ATA_DMA_ERR), 189 mmio + ATA_DMA_CMD); 190 } 191
+11 -12
drivers/ata/sata_mv.c
··· 845 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; 846 } else { 847 WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS)); 848 - } 849 850 /* now properly wait for the eDMA to stop */ 851 for (i = 1000; i > 0; i--) { ··· 883 for (b = 0; b < bytes; ) { 884 DPRINTK("%p: ", start + b); 885 for (w = 0; b < bytes && w < 4; w++) { 886 - printk("%08x ",readl(start + b)); 887 b += sizeof(u32); 888 } 889 printk("\n"); ··· 899 for (b = 0; b < bytes; ) { 900 DPRINTK("%02x: ", b); 901 for (w = 0; b < bytes && w < 4; w++) { 902 - (void) pci_read_config_dword(pdev,b,&dw); 903 - printk("%08x ",dw); 904 b += sizeof(u32); 905 } 906 printk("\n"); ··· 944 } 945 for (p = start_port; p < start_port + num_ports; p++) { 946 port_base = mv_port_base(mmio_base, p); 947 - DPRINTK("EDMA regs (port %i):\n",p); 948 mv_dump_mem(port_base, 0x54); 949 - DPRINTK("SATA regs (port %i):\n",p); 950 mv_dump_mem(port_base+0x300, 0x60); 951 } 952 #endif ··· 1184 u16 flags = 0; 1185 unsigned in_index; 1186 1187 - if (qc->tf.protocol != ATA_PROT_DMA) 1188 return; 1189 1190 /* Fill in command request block ··· 1276 unsigned in_index; 1277 u32 flags = 0; 1278 1279 - if (qc->tf.protocol != ATA_PROT_DMA) 1280 return; 1281 1282 /* Fill in Gen IIE command request block ··· 1606 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS); 1607 1608 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n", 1609 - hc,relevant,hc_irq_cause); 1610 1611 for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) { 1612 struct ata_port *ap = host->ports[port]; ··· 1983 for (i = 0; i < 1000; i++) { 1984 udelay(1); 1985 t = readl(reg); 1986 - if (PCI_MASTER_EMPTY & t) { 1987 break; 1988 - } 1989 } 1990 if (!(PCI_MASTER_EMPTY & t)) { 1991 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n"); ··· 2667 */ 2668 static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 2669 { 2670 - static int printed_version = 0; 2671 unsigned int board_idx = (unsigned int)ent->driver_data; 2672 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL }; 2673 struct ata_host *host;
··· 845 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; 846 } else { 847 WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS)); 848 + } 849 850 /* now properly wait for the eDMA to stop */ 851 for (i = 1000; i > 0; i--) { ··· 883 for (b = 0; b < bytes; ) { 884 DPRINTK("%p: ", start + b); 885 for (w = 0; b < bytes && w < 4; w++) { 886 + printk("%08x ", readl(start + b)); 887 b += sizeof(u32); 888 } 889 printk("\n"); ··· 899 for (b = 0; b < bytes; ) { 900 DPRINTK("%02x: ", b); 901 for (w = 0; b < bytes && w < 4; w++) { 902 + (void) pci_read_config_dword(pdev, b, &dw); 903 + printk("%08x ", dw); 904 b += sizeof(u32); 905 } 906 printk("\n"); ··· 944 } 945 for (p = start_port; p < start_port + num_ports; p++) { 946 port_base = mv_port_base(mmio_base, p); 947 + DPRINTK("EDMA regs (port %i):\n", p); 948 mv_dump_mem(port_base, 0x54); 949 + DPRINTK("SATA regs (port %i):\n", p); 950 mv_dump_mem(port_base+0x300, 0x60); 951 } 952 #endif ··· 1184 u16 flags = 0; 1185 unsigned in_index; 1186 1187 + if (qc->tf.protocol != ATA_PROT_DMA) 1188 return; 1189 1190 /* Fill in command request block ··· 1276 unsigned in_index; 1277 u32 flags = 0; 1278 1279 + if (qc->tf.protocol != ATA_PROT_DMA) 1280 return; 1281 1282 /* Fill in Gen IIE command request block ··· 1606 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS); 1607 1608 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n", 1609 + hc, relevant, hc_irq_cause); 1610 1611 for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) { 1612 struct ata_port *ap = host->ports[port]; ··· 1983 for (i = 0; i < 1000; i++) { 1984 udelay(1); 1985 t = readl(reg); 1986 + if (PCI_MASTER_EMPTY & t) 1987 break; 1988 } 1989 if (!(PCI_MASTER_EMPTY & t)) { 1990 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n"); ··· 2668 */ 2669 static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 2670 { 2671 + static int printed_version; 2672 unsigned int board_idx = (unsigned int)ent->driver_data; 2673 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL }; 2674 struct ata_host *host;
+33 -35
drivers/ata/sata_nv.c
··· 163 NV_ADMA_STAT_STOPPED = (1 << 10), 164 NV_ADMA_STAT_DONE = (1 << 12), 165 NV_ADMA_STAT_ERR = NV_ADMA_STAT_CPBERR | 166 - NV_ADMA_STAT_TIMEOUT, 167 168 /* port flags */ 169 NV_ADMA_PORT_REGISTER_MODE = (1 << 0), ··· 228 u8 reserved1; /* 1 */ 229 u8 ctl_flags; /* 2 */ 230 /* len is length of taskfile in 64 bit words */ 231 - u8 len; /* 3 */ 232 u8 tag; /* 4 */ 233 u8 next_cpb_idx; /* 5 */ 234 __le16 reserved2; /* 6-7 */ ··· 244 dma_addr_t cpb_dma; 245 struct nv_adma_prd *aprd; 246 dma_addr_t aprd_dma; 247 - void __iomem * ctl_block; 248 - void __iomem * gen_block; 249 - void __iomem * notifier_clear_block; 250 u8 flags; 251 int last_issue_ncq; 252 }; ··· 293 294 #define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & ( 1 << (19 + (12 * (PORT))))) 295 296 - static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent); 297 #ifdef CONFIG_PM 298 static int nv_pci_device_resume(struct pci_dev *pdev); 299 #endif ··· 301 static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance); 302 static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance); 303 static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance); 304 - static int nv_scr_read (struct ata_port *ap, unsigned int sc_reg, u32 *val); 305 - static int nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val); 306 307 static void nv_nf2_freeze(struct ata_port *ap); 308 static void nv_nf2_thaw(struct ata_port *ap); ··· 653 return; 654 655 status = readw(mmio + NV_ADMA_STAT); 656 - while(!(status & NV_ADMA_STAT_IDLE) && count < 20) { 657 ndelay(50); 658 status = readw(mmio + NV_ADMA_STAT); 659 count++; 660 } 661 - if(count == 20) 662 ata_port_printk(ap, KERN_WARNING, 663 "timeout waiting for ADMA IDLE, stat=0x%hx\n", 664 status); ··· 668 669 count = 0; 670 status = readw(mmio + NV_ADMA_STAT); 671 - while(!(status & NV_ADMA_STAT_LEGACY) && count < 20) { 672 ndelay(50); 673 status = readw(mmio + NV_ADMA_STAT); 674 count++; 675 } 676 - if(count == 20) 677 ata_port_printk(ap, KERN_WARNING, 678 "timeout waiting for ADMA LEGACY, stat=0x%hx\n", 679 status); ··· 697 writew(tmp | NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL); 698 699 status = readw(mmio + NV_ADMA_STAT); 700 - while(((status & NV_ADMA_STAT_LEGACY) || 701 !(status & NV_ADMA_STAT_IDLE)) && count < 20) { 702 ndelay(50); 703 status = readw(mmio + NV_ADMA_STAT); 704 count++; 705 } 706 - if(count == 20) 707 ata_port_printk(ap, KERN_WARNING, 708 "timeout waiting for ADMA LEGACY clear and IDLE, stat=0x%hx\n", 709 status); ··· 747 on the port. */ 748 adma_enable = 0; 749 nv_adma_register_mode(ap); 750 - } 751 - else { 752 bounce_limit = *ap->dev->dma_mask; 753 segment_boundary = NV_ADMA_DMA_BOUNDARY; 754 sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN; ··· 756 757 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &current_reg); 758 759 - if(ap->port_no == 1) 760 config_mask = NV_MCP_SATA_CFG_20_PORT1_EN | 761 NV_MCP_SATA_CFG_20_PORT1_PWB_EN; 762 else 763 config_mask = NV_MCP_SATA_CFG_20_PORT0_EN | 764 NV_MCP_SATA_CFG_20_PORT0_PWB_EN; 765 766 - if(adma_enable) { 767 new_reg = current_reg | config_mask; 768 pp->flags &= ~NV_ADMA_ATAPI_SETUP_COMPLETE; 769 - } 770 - else { 771 new_reg = current_reg & ~config_mask; 772 pp->flags |= NV_ADMA_ATAPI_SETUP_COMPLETE; 773 } 774 775 - if(current_reg != new_reg) 776 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, new_reg); 777 778 blk_queue_bounce_limit(sdev->request_queue, bounce_limit); ··· 805 { 806 unsigned int idx = 0; 807 808 - if(tf->flags & ATA_TFLAG_ISADDR) { 809 if (tf->flags & ATA_TFLAG_LBA48) { 810 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->hob_feature | WNB); 811 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->hob_nsect); ··· 822 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->lbah); 823 } 824 825 - if(tf->flags & ATA_TFLAG_DEVICE) 826 cpb[idx++] = cpu_to_le16((ATA_REG_DEVICE << 8) | tf->device); 827 828 cpb[idx++] = cpu_to_le16((ATA_REG_CMD << 8) | tf->command | CMDEND); 829 830 - while(idx < 12) 831 cpb[idx++] = cpu_to_le16(IGN); 832 833 return idx; ··· 848 int freeze = 0; 849 850 ata_ehi_clear_desc(ehi); 851 - __ata_ehi_push_desc(ehi, "CPB resp_flags 0x%x: ", flags ); 852 if (flags & NV_CPB_RESP_ATA_ERR) { 853 ata_ehi_push_desc(ehi, "ATA error"); 854 ehi->err_mask |= AC_ERR_DEV; ··· 877 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, cpb_num); 878 VPRINTK("CPB flags done, flags=0x%x\n", flags); 879 if (likely(qc)) { 880 - DPRINTK("Completing qc from tag %d\n",cpb_num); 881 ata_qc_complete(qc); 882 } else { 883 struct ata_eh_info *ehi = &ap->link.eh_info; ··· 950 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) { 951 u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804) 952 >> (NV_INT_PORT_SHIFT * i); 953 - if(ata_tag_valid(ap->link.active_tag)) 954 /** NV_INT_DEV indication seems unreliable at times 955 at least in ADMA mode. Force it on always when a 956 command is active, to prevent losing interrupts. */ ··· 964 965 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL); 966 967 - if( !NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier && 968 !notifier_error) 969 /* Nothing to do */ 970 continue; ··· 988 struct ata_eh_info *ehi = &ap->link.eh_info; 989 990 ata_ehi_clear_desc(ehi); 991 - __ata_ehi_push_desc(ehi, "ADMA status 0x%08x: ", status ); 992 if (status & NV_ADMA_STAT_TIMEOUT) { 993 ehi->err_mask |= AC_ERR_SYSTEM; 994 ata_ehi_push_desc(ehi, "timeout"); ··· 1054 return; 1055 1056 /* clear any outstanding CK804 notifications */ 1057 - writeb( NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT), 1058 ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804); 1059 1060 /* Disable interrupt */ 1061 tmp = readw(mmio + NV_ADMA_CTL); 1062 - writew( tmp & ~(NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN), 1063 mmio + NV_ADMA_CTL); 1064 - readw( mmio + NV_ADMA_CTL ); /* flush posted write */ 1065 } 1066 1067 static void nv_adma_thaw(struct ata_port *ap) ··· 1077 1078 /* Enable interrupt */ 1079 tmp = readw(mmio + NV_ADMA_CTL); 1080 - writew( tmp | (NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN), 1081 mmio + NV_ADMA_CTL); 1082 - readw( mmio + NV_ADMA_CTL ); /* flush posted write */ 1083 } 1084 1085 static void nv_adma_irq_clear(struct ata_port *ap) ··· 1094 } 1095 1096 /* clear any outstanding CK804 notifications */ 1097 - writeb( NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT), 1098 ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804); 1099 1100 /* clear ADMA status */
··· 163 NV_ADMA_STAT_STOPPED = (1 << 10), 164 NV_ADMA_STAT_DONE = (1 << 12), 165 NV_ADMA_STAT_ERR = NV_ADMA_STAT_CPBERR | 166 + NV_ADMA_STAT_TIMEOUT, 167 168 /* port flags */ 169 NV_ADMA_PORT_REGISTER_MODE = (1 << 0), ··· 228 u8 reserved1; /* 1 */ 229 u8 ctl_flags; /* 2 */ 230 /* len is length of taskfile in 64 bit words */ 231 + u8 len; /* 3 */ 232 u8 tag; /* 4 */ 233 u8 next_cpb_idx; /* 5 */ 234 __le16 reserved2; /* 6-7 */ ··· 244 dma_addr_t cpb_dma; 245 struct nv_adma_prd *aprd; 246 dma_addr_t aprd_dma; 247 + void __iomem *ctl_block; 248 + void __iomem *gen_block; 249 + void __iomem *notifier_clear_block; 250 u8 flags; 251 int last_issue_ncq; 252 }; ··· 293 294 #define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & ( 1 << (19 + (12 * (PORT))))) 295 296 + static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); 297 #ifdef CONFIG_PM 298 static int nv_pci_device_resume(struct pci_dev *pdev); 299 #endif ··· 301 static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance); 302 static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance); 303 static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance); 304 + static int nv_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val); 305 + static int nv_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val); 306 307 static void nv_nf2_freeze(struct ata_port *ap); 308 static void nv_nf2_thaw(struct ata_port *ap); ··· 653 return; 654 655 status = readw(mmio + NV_ADMA_STAT); 656 + while (!(status & NV_ADMA_STAT_IDLE) && count < 20) { 657 ndelay(50); 658 status = readw(mmio + NV_ADMA_STAT); 659 count++; 660 } 661 + if (count == 20) 662 ata_port_printk(ap, KERN_WARNING, 663 "timeout waiting for ADMA IDLE, stat=0x%hx\n", 664 status); ··· 668 669 count = 0; 670 status = readw(mmio + NV_ADMA_STAT); 671 + while (!(status & NV_ADMA_STAT_LEGACY) && count < 20) { 672 ndelay(50); 673 status = readw(mmio + NV_ADMA_STAT); 674 count++; 675 } 676 + if (count == 20) 677 ata_port_printk(ap, KERN_WARNING, 678 "timeout waiting for ADMA LEGACY, stat=0x%hx\n", 679 status); ··· 697 writew(tmp | NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL); 698 699 status = readw(mmio + NV_ADMA_STAT); 700 + while (((status & NV_ADMA_STAT_LEGACY) || 701 !(status & NV_ADMA_STAT_IDLE)) && count < 20) { 702 ndelay(50); 703 status = readw(mmio + NV_ADMA_STAT); 704 count++; 705 } 706 + if (count == 20) 707 ata_port_printk(ap, KERN_WARNING, 708 "timeout waiting for ADMA LEGACY clear and IDLE, stat=0x%hx\n", 709 status); ··· 747 on the port. */ 748 adma_enable = 0; 749 nv_adma_register_mode(ap); 750 + } else { 751 bounce_limit = *ap->dev->dma_mask; 752 segment_boundary = NV_ADMA_DMA_BOUNDARY; 753 sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN; ··· 757 758 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &current_reg); 759 760 + if (ap->port_no == 1) 761 config_mask = NV_MCP_SATA_CFG_20_PORT1_EN | 762 NV_MCP_SATA_CFG_20_PORT1_PWB_EN; 763 else 764 config_mask = NV_MCP_SATA_CFG_20_PORT0_EN | 765 NV_MCP_SATA_CFG_20_PORT0_PWB_EN; 766 767 + if (adma_enable) { 768 new_reg = current_reg | config_mask; 769 pp->flags &= ~NV_ADMA_ATAPI_SETUP_COMPLETE; 770 + } else { 771 new_reg = current_reg & ~config_mask; 772 pp->flags |= NV_ADMA_ATAPI_SETUP_COMPLETE; 773 } 774 775 + if (current_reg != new_reg) 776 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, new_reg); 777 778 blk_queue_bounce_limit(sdev->request_queue, bounce_limit); ··· 807 { 808 unsigned int idx = 0; 809 810 + if (tf->flags & ATA_TFLAG_ISADDR) { 811 if (tf->flags & ATA_TFLAG_LBA48) { 812 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->hob_feature | WNB); 813 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->hob_nsect); ··· 824 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->lbah); 825 } 826 827 + if (tf->flags & ATA_TFLAG_DEVICE) 828 cpb[idx++] = cpu_to_le16((ATA_REG_DEVICE << 8) | tf->device); 829 830 cpb[idx++] = cpu_to_le16((ATA_REG_CMD << 8) | tf->command | CMDEND); 831 832 + while (idx < 12) 833 cpb[idx++] = cpu_to_le16(IGN); 834 835 return idx; ··· 850 int freeze = 0; 851 852 ata_ehi_clear_desc(ehi); 853 + __ata_ehi_push_desc(ehi, "CPB resp_flags 0x%x: ", flags); 854 if (flags & NV_CPB_RESP_ATA_ERR) { 855 ata_ehi_push_desc(ehi, "ATA error"); 856 ehi->err_mask |= AC_ERR_DEV; ··· 879 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, cpb_num); 880 VPRINTK("CPB flags done, flags=0x%x\n", flags); 881 if (likely(qc)) { 882 + DPRINTK("Completing qc from tag %d\n", cpb_num); 883 ata_qc_complete(qc); 884 } else { 885 struct ata_eh_info *ehi = &ap->link.eh_info; ··· 952 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) { 953 u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804) 954 >> (NV_INT_PORT_SHIFT * i); 955 + if (ata_tag_valid(ap->link.active_tag)) 956 /** NV_INT_DEV indication seems unreliable at times 957 at least in ADMA mode. Force it on always when a 958 command is active, to prevent losing interrupts. */ ··· 966 967 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL); 968 969 + if (!NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier && 970 !notifier_error) 971 /* Nothing to do */ 972 continue; ··· 990 struct ata_eh_info *ehi = &ap->link.eh_info; 991 992 ata_ehi_clear_desc(ehi); 993 + __ata_ehi_push_desc(ehi, "ADMA status 0x%08x: ", status); 994 if (status & NV_ADMA_STAT_TIMEOUT) { 995 ehi->err_mask |= AC_ERR_SYSTEM; 996 ata_ehi_push_desc(ehi, "timeout"); ··· 1056 return; 1057 1058 /* clear any outstanding CK804 notifications */ 1059 + writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT), 1060 ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804); 1061 1062 /* Disable interrupt */ 1063 tmp = readw(mmio + NV_ADMA_CTL); 1064 + writew(tmp & ~(NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN), 1065 mmio + NV_ADMA_CTL); 1066 + readw(mmio + NV_ADMA_CTL ); /* flush posted write */ 1067 } 1068 1069 static void nv_adma_thaw(struct ata_port *ap) ··· 1079 1080 /* Enable interrupt */ 1081 tmp = readw(mmio + NV_ADMA_CTL); 1082 + writew(tmp | (NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN), 1083 mmio + NV_ADMA_CTL); 1084 + readw(mmio + NV_ADMA_CTL ); /* flush posted write */ 1085 } 1086 1087 static void nv_adma_irq_clear(struct ata_port *ap) ··· 1096 } 1097 1098 /* clear any outstanding CK804 notifications */ 1099 + writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT), 1100 ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804); 1101 1102 /* clear ADMA status */
+2 -2
drivers/ata/sata_sx4.c
··· 62 submit ATA packet to hardware 63 hardware executes ATA WRITE command, w/ data in DIMM 64 hardware raises interrupt 65 - 66 and each READ looks like this: 67 68 submit ATA packet to hardware 69 hardware executes ATA READ command, w/ data in DIMM 70 hardware raises interrupt 71 - 72 submit HDMA packet to hardware 73 hardware copies data from DIMM to system memory 74 hardware raises interrupt
··· 62 submit ATA packet to hardware 63 hardware executes ATA WRITE command, w/ data in DIMM 64 hardware raises interrupt 65 + 66 and each READ looks like this: 67 68 submit ATA packet to hardware 69 hardware executes ATA READ command, w/ data in DIMM 70 hardware raises interrupt 71 + 72 submit HDMA packet to hardware 73 hardware copies data from DIMM to system memory 74 hardware raises interrupt
+3 -3
include/linux/ata.h
··· 178 ATA_CMD_PACKET = 0xA0, 179 ATA_CMD_VERIFY = 0x40, 180 ATA_CMD_VERIFY_EXT = 0x42, 181 - ATA_CMD_STANDBYNOW1 = 0xE0, 182 - ATA_CMD_IDLEIMMEDIATE = 0xE1, 183 ATA_CMD_INIT_DEV_PARAMS = 0x91, 184 ATA_CMD_READ_NATIVE_MAX = 0xF8, 185 ATA_CMD_READ_NATIVE_MAX_EXT = 0x27, ··· 458 * ATA-3 introduces word 80 and accurate reporting 459 * 460 * The practical impact of this is that ata_id_major_version cannot 461 - * reliably report on drives below ATA3. 462 */ 463 464 static inline unsigned int ata_id_major_version(const u16 *id)
··· 178 ATA_CMD_PACKET = 0xA0, 179 ATA_CMD_VERIFY = 0x40, 180 ATA_CMD_VERIFY_EXT = 0x42, 181 + ATA_CMD_STANDBYNOW1 = 0xE0, 182 + ATA_CMD_IDLEIMMEDIATE = 0xE1, 183 ATA_CMD_INIT_DEV_PARAMS = 0x91, 184 ATA_CMD_READ_NATIVE_MAX = 0xF8, 185 ATA_CMD_READ_NATIVE_MAX_EXT = 0x27, ··· 458 * ATA-3 introduces word 80 and accurate reporting 459 * 460 * The practical impact of this is that ata_id_major_version cannot 461 + * reliably report on drives below ATA3. 462 */ 463 464 static inline unsigned int ata_id_major_version(const u16 *id)
+11 -11
include/linux/libata.h
··· 326 ATA_HORKAGE_SKIP_PM = (1 << 5), /* Skip PM operations */ 327 ATA_HORKAGE_HPA_SIZE = (1 << 6), /* native size off by one */ 328 329 - /* DMA mask for user DMA control: User visible values; DO NOT 330 renumber */ 331 ATA_DMA_MASK_ATA = (1 << 0), /* DMA on ATA Disk */ 332 ATA_DMA_MASK_ATAPI = (1 << 1), /* DMA on ATAPI */ ··· 717 unsigned short udma; /* t2CYCTYP/2 */ 718 }; 719 720 - #define FIT(v,vmin,vmax) max_t(short,min_t(short,v,vmax),vmin) 721 722 extern const unsigned long sata_deb_timing_normal[]; 723 extern const unsigned long sata_deb_timing_hotplug[]; ··· 816 extern void ata_tf_to_fis(const struct ata_taskfile *tf, 817 u8 pmp, int is_cmd, u8 *fis); 818 extern void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf); 819 - extern void ata_noop_dev_select (struct ata_port *ap, unsigned int device); 820 - extern void ata_std_dev_select (struct ata_port *ap, unsigned int device); 821 extern u8 ata_check_status(struct ata_port *ap); 822 extern u8 ata_altstatus(struct ata_port *ap); 823 extern void ata_exec_command(struct ata_port *ap, const struct ata_taskfile *tf); 824 - extern int ata_port_start (struct ata_port *ap); 825 - extern int ata_sff_port_start (struct ata_port *ap); 826 - extern irqreturn_t ata_interrupt (int irq, void *dev_instance); 827 extern void ata_data_xfer(struct ata_device *adev, unsigned char *buf, 828 unsigned int buflen, int write_data); 829 extern void ata_data_xfer_noirq(struct ata_device *adev, unsigned char *buf, ··· 844 extern void ata_id_c_string(const u16 *id, unsigned char *s, 845 unsigned int ofs, unsigned int len); 846 extern void ata_id_to_dma_mode(struct ata_device *dev, u8 unknown); 847 - extern void ata_bmdma_setup (struct ata_queued_cmd *qc); 848 - extern void ata_bmdma_start (struct ata_queued_cmd *qc); 849 extern void ata_bmdma_stop(struct ata_queued_cmd *qc); 850 extern u8 ata_bmdma_status(struct ata_port *ap); 851 extern void ata_bmdma_irq_clear(struct ata_port *ap); ··· 920 #ifdef CONFIG_PCI 921 struct pci_dev; 922 923 - extern int ata_pci_init_one (struct pci_dev *pdev, 924 const struct ata_port_info * const * ppi); 925 - extern void ata_pci_remove_one (struct pci_dev *pdev); 926 #ifdef CONFIG_PM 927 extern void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg); 928 extern int __must_check ata_pci_device_do_resume(struct pci_dev *pdev);
··· 326 ATA_HORKAGE_SKIP_PM = (1 << 5), /* Skip PM operations */ 327 ATA_HORKAGE_HPA_SIZE = (1 << 6), /* native size off by one */ 328 329 + /* DMA mask for user DMA control: User visible values; DO NOT 330 renumber */ 331 ATA_DMA_MASK_ATA = (1 << 0), /* DMA on ATA Disk */ 332 ATA_DMA_MASK_ATAPI = (1 << 1), /* DMA on ATAPI */ ··· 717 unsigned short udma; /* t2CYCTYP/2 */ 718 }; 719 720 + #define FIT(v, vmin, vmax) max_t(short, min_t(short, v, vmax), vmin) 721 722 extern const unsigned long sata_deb_timing_normal[]; 723 extern const unsigned long sata_deb_timing_hotplug[]; ··· 816 extern void ata_tf_to_fis(const struct ata_taskfile *tf, 817 u8 pmp, int is_cmd, u8 *fis); 818 extern void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf); 819 + extern void ata_noop_dev_select(struct ata_port *ap, unsigned int device); 820 + extern void ata_std_dev_select(struct ata_port *ap, unsigned int device); 821 extern u8 ata_check_status(struct ata_port *ap); 822 extern u8 ata_altstatus(struct ata_port *ap); 823 extern void ata_exec_command(struct ata_port *ap, const struct ata_taskfile *tf); 824 + extern int ata_port_start(struct ata_port *ap); 825 + extern int ata_sff_port_start(struct ata_port *ap); 826 + extern irqreturn_t ata_interrupt(int irq, void *dev_instance); 827 extern void ata_data_xfer(struct ata_device *adev, unsigned char *buf, 828 unsigned int buflen, int write_data); 829 extern void ata_data_xfer_noirq(struct ata_device *adev, unsigned char *buf, ··· 844 extern void ata_id_c_string(const u16 *id, unsigned char *s, 845 unsigned int ofs, unsigned int len); 846 extern void ata_id_to_dma_mode(struct ata_device *dev, u8 unknown); 847 + extern void ata_bmdma_setup(struct ata_queued_cmd *qc); 848 + extern void ata_bmdma_start(struct ata_queued_cmd *qc); 849 extern void ata_bmdma_stop(struct ata_queued_cmd *qc); 850 extern u8 ata_bmdma_status(struct ata_port *ap); 851 extern void ata_bmdma_irq_clear(struct ata_port *ap); ··· 920 #ifdef CONFIG_PCI 921 struct pci_dev; 922 923 + extern int ata_pci_init_one(struct pci_dev *pdev, 924 const struct ata_port_info * const * ppi); 925 + extern void ata_pci_remove_one(struct pci_dev *pdev); 926 #ifdef CONFIG_PM 927 extern void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg); 928 extern int __must_check ata_pci_device_do_resume(struct pci_dev *pdev);