[libata] checkpatch-inspired cleanups

Tackle the relatively sane complaints of checkpatch --file.

The vast majority is indentation and whitespace changes, the rest are

* #include fixes
* printk KERN_xxx prefix addition
* BSS/initializer cleanups

Signed-off-by: Jeff Garzik <jgarzik@redhat.com>

+266 -268
+13 -13
drivers/ata/ahci.c
··· 227 227 228 228 static int ahci_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val); 229 229 static int ahci_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val); 230 - static int ahci_init_one (struct pci_dev *pdev, const struct pci_device_id *ent); 230 + static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); 231 231 static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc); 232 232 static void ahci_irq_clear(struct ata_port *ap); 233 233 static int ahci_port_start(struct ata_port *ap); ··· 729 729 730 730 /* wait for engine to stop. This could be as long as 500 msec */ 731 731 tmp = ata_wait_register(port_mmio + PORT_CMD, 732 - PORT_CMD_LIST_ON, PORT_CMD_LIST_ON, 1, 500); 732 + PORT_CMD_LIST_ON, PORT_CMD_LIST_ON, 1, 500); 733 733 if (tmp & PORT_CMD_LIST_ON) 734 734 return -EIO; 735 735 ··· 1564 1564 if (!irq_stat) 1565 1565 return IRQ_NONE; 1566 1566 1567 - spin_lock(&host->lock); 1567 + spin_lock(&host->lock); 1568 1568 1569 - for (i = 0; i < host->n_ports; i++) { 1569 + for (i = 0; i < host->n_ports; i++) { 1570 1570 struct ata_port *ap; 1571 1571 1572 1572 if (!(irq_stat & (1 << i))) ··· 1829 1829 pp->cmd_tbl_dma = mem_dma; 1830 1830 1831 1831 /* 1832 - * Save off initial list of interrupts to be enabled. 1833 - * This could be changed later 1834 - */ 1832 + * Save off initial list of interrupts to be enabled. 1833 + * This could be changed later 1834 + */ 1835 1835 pp->intr_mask = DEF_PORT_IRQ; 1836 1836 1837 1837 ap->private_data = pp; ··· 1918 1918 dev_printk(KERN_INFO, &pdev->dev, 1919 1919 "AHCI %02x%02x.%02x%02x " 1920 1920 "%u slots %u ports %s Gbps 0x%x impl %s mode\n" 1921 - , 1921 + , 1922 1922 1923 - (vers >> 24) & 0xff, 1924 - (vers >> 16) & 0xff, 1925 - (vers >> 8) & 0xff, 1926 - vers & 0xff, 1923 + (vers >> 24) & 0xff, 1924 + (vers >> 16) & 0xff, 1925 + (vers >> 8) & 0xff, 1926 + vers & 0xff, 1927 1927 1928 1928 ((cap >> 8) & 0x1f) + 1, 1929 1929 (cap & 0x1f) + 1, ··· 1935 1935 "flags: " 1936 1936 "%s%s%s%s%s%s%s" 1937 1937 "%s%s%s%s%s%s%s\n" 1938 - , 1938 + , 1939 1939 1940 1940 cap & (1 << 31) ? "64bit " : "", 1941 1941 cap & (1 << 30) ? "ncq " : "",
+14 -15
drivers/ata/ata_piix.c
··· 157 157 const int *map; 158 158 }; 159 159 160 - static int piix_init_one (struct pci_dev *pdev, 161 - const struct pci_device_id *ent); 160 + static int piix_init_one(struct pci_dev *pdev, 161 + const struct pci_device_id *ent); 162 162 static void piix_pata_error_handler(struct ata_port *ap); 163 - static void piix_set_piomode (struct ata_port *ap, struct ata_device *adev); 164 - static void piix_set_dmamode (struct ata_port *ap, struct ata_device *adev); 165 - static void ich_set_dmamode (struct ata_port *ap, struct ata_device *adev); 163 + static void piix_set_piomode(struct ata_port *ap, struct ata_device *adev); 164 + static void piix_set_dmamode(struct ata_port *ap, struct ata_device *adev); 165 + static void ich_set_dmamode(struct ata_port *ap, struct ata_device *adev); 166 166 static int ich_pata_cable_detect(struct ata_port *ap); 167 167 #ifdef CONFIG_PM 168 168 static int piix_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg); ··· 650 650 while (lap->device) { 651 651 if (lap->device == pdev->device && 652 652 lap->subvendor == pdev->subsystem_vendor && 653 - lap->subdevice == pdev->subsystem_device) { 653 + lap->subdevice == pdev->subsystem_device) 654 654 return ATA_CBL_PATA40_SHORT; 655 - } 655 + 656 656 lap++; 657 657 } 658 658 ··· 699 699 * None (inherited from caller). 700 700 */ 701 701 702 - static void piix_set_piomode (struct ata_port *ap, struct ata_device *adev) 702 + static void piix_set_piomode(struct ata_port *ap, struct ata_device *adev) 703 703 { 704 704 unsigned int pio = adev->pio_mode - XFER_PIO_0; 705 705 struct pci_dev *dev = to_pci_dev(ap->host->dev); ··· 786 786 * None (inherited from caller). 787 787 */ 788 788 789 - static void do_pata_set_dmamode (struct ata_port *ap, struct ata_device *adev, int isich) 789 + static void do_pata_set_dmamode(struct ata_port *ap, struct ata_device *adev, int isich) 790 790 { 791 791 struct pci_dev *dev = to_pci_dev(ap->host->dev); 792 792 u8 master_port = ap->port_no ? 0x42 : 0x40; ··· 813 813 int u_clock, u_speed; 814 814 815 815 /* 816 - * UDMA is handled by a combination of clock switching and 816 + * UDMA is handled by a combination of clock switching and 817 817 * selection of dividers 818 818 * 819 819 * Handy rule: Odd modes are UDMATIMx 01, even are 02 ··· 905 905 * None (inherited from caller). 906 906 */ 907 907 908 - static void piix_set_dmamode (struct ata_port *ap, struct ata_device *adev) 908 + static void piix_set_dmamode(struct ata_port *ap, struct ata_device *adev) 909 909 { 910 910 do_pata_set_dmamode(ap, adev, 0); 911 911 } ··· 921 921 * None (inherited from caller). 922 922 */ 923 923 924 - static void ich_set_dmamode (struct ata_port *ap, struct ata_device *adev) 924 + static void ich_set_dmamode(struct ata_port *ap, struct ata_device *adev) 925 925 { 926 926 do_pata_set_dmamode(ap, adev, 1); 927 927 } ··· 1106 1106 u16 cfg; 1107 1107 int no_piix_dma = 0; 1108 1108 1109 - while((pdev = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82454NX, pdev)) != NULL) 1110 - { 1109 + while ((pdev = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82454NX, pdev)) != NULL) { 1111 1110 /* Look for 450NX PXB. Check for problem configurations 1112 1111 A PCI quirk checks bit 6 already */ 1113 1112 pci_read_config_word(pdev, 0x41, &cfg); ··· 1240 1241 * Zero on success, or -ERRNO value. 1241 1242 */ 1242 1243 1243 - static int piix_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) 1244 + static int piix_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 1244 1245 { 1245 1246 static int printed_version; 1246 1247 struct device *dev = &pdev->dev;
+8 -8
drivers/ata/libata-acpi.c
··· 26 26 #include <acpi/actypes.h> 27 27 28 28 #define NO_PORT_MULT 0xffff 29 - #define SATA_ADR(root,pmp) (((root) << 16) | (pmp)) 29 + #define SATA_ADR(root, pmp) (((root) << 16) | (pmp)) 30 30 31 31 #define REGS_PER_GTF 7 32 32 struct ata_acpi_gtf { ··· 96 96 } 97 97 } 98 98 99 - static void ata_acpi_handle_hotplug (struct ata_port *ap, struct kobject *kobj, 100 - u32 event) 99 + static void ata_acpi_handle_hotplug(struct ata_port *ap, struct kobject *kobj, 100 + u32 event) 101 101 { 102 102 char event_string[12]; 103 103 char *envp[] = { event_string, NULL }; ··· 114 114 } 115 115 116 116 if (kobj) { 117 - sprintf(event_string, "BAY_EVENT=%d", event); 117 + sprintf(event_string, "BAY_EVENT=%d", event); 118 118 kobject_uevent_env(kobj, KOBJ_CHANGE, envp); 119 119 } 120 120 } ··· 127 127 if (dev->sdev) 128 128 kobj = &dev->sdev->sdev_gendev.kobj; 129 129 130 - ata_acpi_handle_hotplug (dev->link->ap, kobj, event); 130 + ata_acpi_handle_hotplug(dev->link->ap, kobj, event); 131 131 } 132 132 133 133 static void ata_acpi_ap_notify(acpi_handle handle, u32 event, void *data) 134 134 { 135 135 struct ata_port *ap = data; 136 136 137 - ata_acpi_handle_hotplug (ap, &ap->dev->kobj, event); 137 + ata_acpi_handle_hotplug(ap, &ap->dev->kobj, event); 138 138 } 139 139 140 140 /** ··· 398 398 { 399 399 struct ata_acpi_gtm gtm; 400 400 int valid = 0; 401 - 401 + 402 402 /* No _GTM data, no information */ 403 403 if (ata_acpi_gtm(ap, &gtm) < 0) 404 404 return 0; 405 - 405 + 406 406 /* Split timing, DMA enabled */ 407 407 if ((gtm.flags & 0x11) == 0x11 && gtm.drive[0].dma < 55) 408 408 valid |= 1;
+34 -32
drivers/ata/libata-core.c
··· 49 49 #include <linux/workqueue.h> 50 50 #include <linux/jiffies.h> 51 51 #include <linux/scatterlist.h> 52 + #include <linux/io.h> 52 53 #include <scsi/scsi.h> 53 54 #include <scsi/scsi_cmnd.h> 54 55 #include <scsi/scsi_host.h> 55 56 #include <linux/libata.h> 56 - #include <asm/io.h> 57 57 #include <asm/semaphore.h> 58 58 #include <asm/byteorder.h> 59 59 ··· 93 93 module_param_named(fua, libata_fua, int, 0444); 94 94 MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)"); 95 95 96 - static int ata_ignore_hpa = 0; 96 + static int ata_ignore_hpa; 97 97 module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644); 98 98 MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)"); 99 99 ··· 713 713 } 714 714 715 715 if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) { 716 - printk("ata: SEMB device ignored\n"); 716 + printk(KERN_INFO "ata: SEMB device ignored\n"); 717 717 return ATA_DEV_SEMB_UNSUP; /* not yet */ 718 718 } 719 719 ··· 939 939 *max_sectors = ata_tf_to_lba48(&tf); 940 940 else 941 941 *max_sectors = ata_tf_to_lba(&tf); 942 - if (dev->horkage & ATA_HORKAGE_HPA_SIZE) 942 + if (dev->horkage & ATA_HORKAGE_HPA_SIZE) 943 943 (*max_sectors)--; 944 944 return 0; 945 945 } ··· 1151 1151 * LOCKING: 1152 1152 * caller. 1153 1153 */ 1154 - void ata_noop_dev_select (struct ata_port *ap, unsigned int device) 1154 + void ata_noop_dev_select(struct ata_port *ap, unsigned int device) 1155 1155 { 1156 1156 } 1157 1157 ··· 1171 1171 * caller. 1172 1172 */ 1173 1173 1174 - void ata_std_dev_select (struct ata_port *ap, unsigned int device) 1174 + void ata_std_dev_select(struct ata_port *ap, unsigned int device) 1175 1175 { 1176 1176 u8 tmp; 1177 1177 ··· 1292 1292 */ 1293 1293 u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF; 1294 1294 if (mode < 5) /* Valid PIO range */ 1295 - pio_mask = (2 << mode) - 1; 1295 + pio_mask = (2 << mode) - 1; 1296 1296 else 1297 1297 pio_mask = 1; 1298 1298 ··· 1693 1693 * for pre-ATA4 drives. 1694 1694 * 1695 1695 * FIXME: ATA_CMD_ID_ATA is optional for early drives and right 1696 - * now we abort if we hit that case. 1696 + * now we abort if we hit that case. 1697 1697 * 1698 1698 * LOCKING: 1699 1699 * Kernel thread context (may sleep) ··· 1979 1979 "supports DRM functions and may " 1980 1980 "not be fully accessable.\n"); 1981 1981 snprintf(revbuf, 7, "CFA"); 1982 - } 1983 - else 1984 - snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id)); 1982 + } else 1983 + snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id)); 1985 1984 1986 1985 dev->n_sectors = ata_id_n_sectors(id); 1987 1986 ··· 2109 2110 /* Let the user know. We don't want to disallow opens for 2110 2111 rescue purposes, or in case the vendor is just a blithering 2111 2112 idiot */ 2112 - if (print_info) { 2113 + if (print_info) { 2113 2114 ata_dev_printk(dev, KERN_WARNING, 2114 2115 "Drive reports diagnostics failure. This may indicate a drive\n"); 2115 2116 ata_dev_printk(dev, KERN_WARNING, ··· 2666 2667 { 0xFF } 2667 2668 }; 2668 2669 2669 - #define ENOUGH(v,unit) (((v)-1)/(unit)+1) 2670 - #define EZ(v,unit) ((v)?ENOUGH(v,unit):0) 2670 + #define ENOUGH(v, unit) (((v)-1)/(unit)+1) 2671 + #define EZ(v, unit) ((v)?ENOUGH(v, unit):0) 2671 2672 2672 2673 static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT) 2673 2674 { ··· 2694 2695 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma); 2695 2696 } 2696 2697 2697 - static const struct ata_timing* ata_timing_find_mode(unsigned short speed) 2698 + static const struct ata_timing *ata_timing_find_mode(unsigned short speed) 2698 2699 { 2699 2700 const struct ata_timing *t; 2700 2701 ··· 2726 2727 2727 2728 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */ 2728 2729 memset(&p, 0, sizeof(p)); 2729 - if(speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) { 2730 + if (speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) { 2730 2731 if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO]; 2731 2732 else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY]; 2732 - } else if(speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) { 2733 + } else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) { 2733 2734 p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN]; 2734 2735 } 2735 2736 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B); ··· 2875 2876 dev->flags |= ATA_DFLAG_PIO; 2876 2877 2877 2878 err_mask = ata_dev_set_xfermode(dev); 2879 + 2878 2880 /* Old CFA may refuse this command, which is just fine */ 2879 2881 if (dev->xfer_shift == ATA_SHIFT_PIO && ata_id_is_cfa(dev->id)) 2880 - err_mask &= ~AC_ERR_DEV; 2882 + err_mask &= ~AC_ERR_DEV; 2883 + 2881 2884 /* Some very old devices and some bad newer ones fail any kind of 2882 2885 SET_XFERMODE request but support PIO0-2 timings and no IORDY */ 2883 2886 if (dev->xfer_shift == ATA_SHIFT_PIO && !ata_id_has_iordy(dev->id) && 2884 2887 dev->pio_mode <= XFER_PIO_2) 2885 2888 err_mask &= ~AC_ERR_DEV; 2889 + 2886 2890 if (err_mask) { 2887 2891 ata_dev_printk(dev, KERN_ERR, "failed to set xfermode " 2888 2892 "(err_mask=0x%x)\n", err_mask); ··· 3945 3943 { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA }, 3946 3944 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA }, 3947 3945 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA }, 3948 - { "SAMSUNG CD-ROM SN-124","N001", ATA_HORKAGE_NODMA }, 3946 + { "SAMSUNG CD-ROM SN-124", "N001", ATA_HORKAGE_NODMA }, 3949 3947 { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA }, 3950 3948 { "IOMEGA ZIP 250 ATAPI", NULL, ATA_HORKAGE_NODMA }, /* temporary fix */ 3951 3949 { "IOMEGA ZIP 250 ATAPI Floppy", ··· 3961 3959 3962 3960 /* Devices where NCQ should be avoided */ 3963 3961 /* NCQ is slow */ 3964 - { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ }, 3962 + { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ }, 3965 3963 /* http://thread.gmane.org/gmane.linux.ide/14907 */ 3966 3964 { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ }, 3967 3965 /* NCQ is broken */ ··· 4108 4106 } 4109 4107 4110 4108 if ((host->flags & ATA_HOST_SIMPLEX) && 4111 - host->simplex_claimed && host->simplex_claimed != ap) { 4109 + host->simplex_claimed && host->simplex_claimed != ap) { 4112 4110 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA); 4113 4111 ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by " 4114 4112 "other device, disabling DMA\n"); ··· 4130 4128 */ 4131 4129 if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA)) 4132 4130 /* UDMA/44 or higher would be available */ 4133 - if((ap->cbl == ATA_CBL_PATA40) || 4134 - (ata_drive_40wire(dev->id) && 4135 - (ap->cbl == ATA_CBL_PATA_UNK || 4136 - ap->cbl == ATA_CBL_PATA80))) { 4137 - ata_dev_printk(dev, KERN_WARNING, 4131 + if ((ap->cbl == ATA_CBL_PATA40) || 4132 + (ata_drive_40wire(dev->id) && 4133 + (ap->cbl == ATA_CBL_PATA_UNK || 4134 + ap->cbl == ATA_CBL_PATA80))) { 4135 + ata_dev_printk(dev, KERN_WARNING, 4138 4136 "limited to UDMA/33 due to 40-wire cable\n"); 4139 4137 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA); 4140 4138 } ··· 4397 4395 u32 addr, offset; 4398 4396 u32 sg_len, len, blen; 4399 4397 4400 - /* determine if physical DMA addr spans 64K boundary. 4398 + /* determine if physical DMA addr spans 64K boundary. 4401 4399 * Note h/w doesn't support 64-bit, so we unconditionally 4402 4400 * truncate dma_addr_t to u32. 4403 4401 */ ··· 4982 4980 "%u bytes trailing data\n", bytes); 4983 4981 4984 4982 for (i = 0; i < words; i++) 4985 - ap->ops->data_xfer(qc->dev, (unsigned char*)pad_buf, 2, do_write); 4983 + ap->ops->data_xfer(qc->dev, (unsigned char *)pad_buf, 2, do_write); 4986 4984 4987 4985 ap->hsm_task_state = HSM_ST_LAST; 4988 4986 return; ··· 5910 5908 * One if interrupt was handled, zero if not (shared irq). 5911 5909 */ 5912 5910 5913 - inline unsigned int ata_host_intr (struct ata_port *ap, 5914 - struct ata_queued_cmd *qc) 5911 + inline unsigned int ata_host_intr(struct ata_port *ap, 5912 + struct ata_queued_cmd *qc) 5915 5913 { 5916 5914 struct ata_eh_info *ehi = &ap->link.eh_info; 5917 5915 u8 status, host_stat = 0; ··· 6011 6009 * IRQ_NONE or IRQ_HANDLED. 6012 6010 */ 6013 6011 6014 - irqreturn_t ata_interrupt (int irq, void *dev_instance) 6012 + irqreturn_t ata_interrupt(int irq, void *dev_instance) 6015 6013 { 6016 6014 struct ata_host *host = dev_instance; 6017 6015 unsigned int i; ··· 6214 6212 6215 6213 /* This is wrong. On a failed flush we get back the LBA of the lost 6216 6214 sector and we should (assuming it wasn't aborted as unknown) issue 6217 - a further flush command to continue the writeback until it 6215 + a further flush command to continue the writeback until it 6218 6216 does not error */ 6219 6217 err_mask = ata_do_simple_cmd(dev, cmd); 6220 6218 if (err_mask) {
+7 -7
drivers/ata/libata-eh.c
··· 1197 1197 * RETURNS: 1198 1198 * Descriptive string for @err_mask 1199 1199 */ 1200 - static const char * ata_err_string(unsigned int err_mask) 1200 + static const char *ata_err_string(unsigned int err_mask) 1201 1201 { 1202 1202 if (err_mask & AC_ERR_HOST_BUS) 1203 1203 return "host bus error"; ··· 1934 1934 ehc->i.serror & SERR_LINK_SEQ_ERR ? "LinkSeq " : "", 1935 1935 ehc->i.serror & SERR_TRANS_ST_ERROR ? "TrStaTrns " : "", 1936 1936 ehc->i.serror & SERR_UNRECOG_FIS ? "UnrecFIS " : "", 1937 - ehc->i.serror & SERR_DEV_XCHG ? "DevExch " : "" ); 1937 + ehc->i.serror & SERR_DEV_XCHG ? "DevExch " : ""); 1938 1938 1939 1939 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) { 1940 1940 static const char *dma_str[] = { ··· 1969 1969 qc->err_mask & AC_ERR_NCQ ? " <F>" : ""); 1970 1970 1971 1971 if (res->command & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ | 1972 - ATA_ERR) ) { 1972 + ATA_ERR)) { 1973 1973 if (res->command & ATA_BUSY) 1974 1974 ata_dev_printk(qc->dev, KERN_ERR, 1975 - "status: { Busy }\n" ); 1975 + "status: { Busy }\n"); 1976 1976 else 1977 1977 ata_dev_printk(qc->dev, KERN_ERR, 1978 1978 "status: { %s%s%s%s}\n", 1979 1979 res->command & ATA_DRDY ? "DRDY " : "", 1980 1980 res->command & ATA_DF ? "DF " : "", 1981 1981 res->command & ATA_DRQ ? "DRQ " : "", 1982 - res->command & ATA_ERR ? "ERR " : "" ); 1982 + res->command & ATA_ERR ? "ERR " : ""); 1983 1983 } 1984 1984 1985 1985 if (cmd->command != ATA_CMD_PACKET && ··· 1990 1990 res->feature & ATA_ICRC ? "ICRC " : "", 1991 1991 res->feature & ATA_UNC ? "UNC " : "", 1992 1992 res->feature & ATA_IDNF ? "IDNF " : "", 1993 - res->feature & ATA_ABORTED ? "ABRT " : "" ); 1993 + res->feature & ATA_ABORTED ? "ABRT " : ""); 1994 1994 } 1995 1995 } 1996 1996 ··· 2611 2611 ehc->i.flags = 0; 2612 2612 continue; 2613 2613 2614 - dev_fail: 2614 + dev_fail: 2615 2615 nr_failed_devs++; 2616 2616 if (ata_eh_handle_dev_fail(dev, rc)) 2617 2617 nr_disabled_devs++;
+124 -124
drivers/ata/libata-scsi.c
··· 45 45 #include <scsi/scsi_transport.h> 46 46 #include <linux/libata.h> 47 47 #include <linux/hdreg.h> 48 - #include <asm/uaccess.h> 48 + #include <linux/uaccess.h> 49 49 50 50 #include "libata.h" 51 51 ··· 53 53 54 54 typedef unsigned int (*ata_xlat_func_t)(struct ata_queued_cmd *qc); 55 55 56 - static struct ata_device * __ata_scsi_find_dev(struct ata_port *ap, 56 + static struct ata_device *__ata_scsi_find_dev(struct ata_port *ap, 57 57 const struct scsi_device *scsidev); 58 - static struct ata_device * ata_scsi_find_dev(struct ata_port *ap, 58 + static struct ata_device *ata_scsi_find_dev(struct ata_port *ap, 59 59 const struct scsi_device *scsidev); 60 60 static int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel, 61 61 unsigned int id, unsigned int lun); ··· 228 228 229 229 scsi_cmd[1] = (4 << 1); /* PIO Data-in */ 230 230 scsi_cmd[2] = 0x0e; /* no off.line or cc, read from dev, 231 - block count in sector count field */ 231 + block count in sector count field */ 232 232 data_dir = DMA_FROM_DEVICE; 233 233 } else { 234 234 scsi_cmd[1] = (3 << 1); /* Non-data */ ··· 252 252 /* Good values for timeout and retries? Values below 253 253 from scsi_ioctl_send_command() for default case... */ 254 254 cmd_result = scsi_execute(scsidev, scsi_cmd, data_dir, argbuf, argsize, 255 - sensebuf, (10*HZ), 5, 0); 255 + sensebuf, (10*HZ), 5, 0); 256 256 257 257 if (driver_byte(cmd_result) == DRIVER_SENSE) {/* sense data available */ 258 258 u8 *desc = sensebuf + 8; ··· 263 263 if (cmd_result & SAM_STAT_CHECK_CONDITION) { 264 264 struct scsi_sense_hdr sshdr; 265 265 scsi_normalize_sense(sensebuf, SCSI_SENSE_BUFFERSIZE, 266 - &sshdr); 267 - if (sshdr.sense_key==0 && 268 - sshdr.asc==0 && sshdr.ascq==0) 266 + &sshdr); 267 + if (sshdr.sense_key == 0 && 268 + sshdr.asc == 0 && sshdr.ascq == 0) 269 269 cmd_result &= ~SAM_STAT_CHECK_CONDITION; 270 270 } 271 271 272 272 /* Send userspace a few ATA registers (same as drivers/ide) */ 273 - if (sensebuf[0] == 0x72 && /* format is "descriptor" */ 274 - desc[0] == 0x09 ) { /* code is "ATA Descriptor" */ 275 - args[0] = desc[13]; /* status */ 276 - args[1] = desc[3]; /* error */ 277 - args[2] = desc[5]; /* sector count (0:7) */ 273 + if (sensebuf[0] == 0x72 && /* format is "descriptor" */ 274 + desc[0] == 0x09) { /* code is "ATA Descriptor" */ 275 + args[0] = desc[13]; /* status */ 276 + args[1] = desc[3]; /* error */ 277 + args[2] = desc[5]; /* sector count (0:7) */ 278 278 if (copy_to_user(arg, args, sizeof(args))) 279 279 rc = -EFAULT; 280 280 } ··· 350 350 struct scsi_sense_hdr sshdr; 351 351 scsi_normalize_sense(sensebuf, SCSI_SENSE_BUFFERSIZE, 352 352 &sshdr); 353 - if (sshdr.sense_key==0 && 354 - sshdr.asc==0 && sshdr.ascq==0) 353 + if (sshdr.sense_key == 0 && 354 + sshdr.asc == 0 && sshdr.ascq == 0) 355 355 cmd_result &= ~SAM_STAT_CHECK_CONDITION; 356 356 } 357 357 ··· 975 975 if ((qc->dev->flags & ATA_DFLAG_SPUNDOWN) && 976 976 (system_state == SYSTEM_HALT || 977 977 system_state == SYSTEM_POWER_OFF)) { 978 - static unsigned long warned = 0; 978 + static unsigned long warned; 979 979 980 980 if (!test_and_set_bit(0, &warned)) { 981 981 ata_dev_printk(qc->dev, KERN_WARNING, ··· 1364 1364 struct ata_eh_info *ehi = &qc->dev->link->eh_info; 1365 1365 struct scsi_cmnd *cmd = qc->scsicmd; 1366 1366 u8 *cdb = cmd->cmnd; 1367 - int need_sense = (qc->err_mask != 0); 1367 + int need_sense = (qc->err_mask != 0); 1368 1368 1369 1369 /* We snoop the SET_FEATURES - Write Cache ON/OFF command, and 1370 1370 * schedule EH_REVALIDATE operation to update the IDENTIFY DEVICE ··· 1396 1396 * was no error, SK, ASC and ASCQ will all be zero. 1397 1397 */ 1398 1398 if (((cdb[0] == ATA_16) || (cdb[0] == ATA_12)) && 1399 - ((cdb[2] & 0x20) || need_sense)) { 1399 + ((cdb[2] & 0x20) || need_sense)) { 1400 1400 ata_gen_passthru_sense(qc); 1401 1401 } else { 1402 1402 if (!need_sense) { ··· 1500 1500 return 0; 1501 1501 1502 1502 early_finish: 1503 - ata_qc_free(qc); 1503 + ata_qc_free(qc); 1504 1504 qc->scsidone(cmd); 1505 1505 DPRINTK("EXIT - early finish (good or error)\n"); 1506 1506 return 0; ··· 1590 1590 */ 1591 1591 1592 1592 void ata_scsi_rbuf_fill(struct ata_scsi_args *args, 1593 - unsigned int (*actor) (struct ata_scsi_args *args, 1594 - u8 *rbuf, unsigned int buflen)) 1593 + unsigned int (*actor) (struct ata_scsi_args *args, 1594 + u8 *rbuf, unsigned int buflen)) 1595 1595 { 1596 1596 u8 *rbuf; 1597 1597 unsigned int buflen, rc; ··· 2140 2140 * None. 2141 2141 */ 2142 2142 unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf, 2143 - unsigned int buflen) 2143 + unsigned int buflen) 2144 2144 { 2145 2145 u64 last_lba = args->dev->n_sectors - 1; /* LBA of the last block */ 2146 2146 ··· 2464 2464 return 0; 2465 2465 } 2466 2466 2467 - static struct ata_device * ata_find_dev(struct ata_port *ap, int devno) 2467 + static struct ata_device *ata_find_dev(struct ata_port *ap, int devno) 2468 2468 { 2469 2469 if (ap->nr_pmp_links == 0) { 2470 2470 if (likely(devno < ata_link_max_devices(&ap->link))) ··· 2477 2477 return NULL; 2478 2478 } 2479 2479 2480 - static struct ata_device * __ata_scsi_find_dev(struct ata_port *ap, 2481 - const struct scsi_device *scsidev) 2480 + static struct ata_device *__ata_scsi_find_dev(struct ata_port *ap, 2481 + const struct scsi_device *scsidev) 2482 2482 { 2483 2483 int devno; 2484 2484 ··· 2564 2564 ata_scsi_map_proto(u8 byte1) 2565 2565 { 2566 2566 switch((byte1 & 0x1e) >> 1) { 2567 - case 3: /* Non-data */ 2568 - return ATA_PROT_NODATA; 2567 + case 3: /* Non-data */ 2568 + return ATA_PROT_NODATA; 2569 2569 2570 - case 6: /* DMA */ 2571 - case 10: /* UDMA Data-in */ 2572 - case 11: /* UDMA Data-Out */ 2573 - return ATA_PROT_DMA; 2570 + case 6: /* DMA */ 2571 + case 10: /* UDMA Data-in */ 2572 + case 11: /* UDMA Data-Out */ 2573 + return ATA_PROT_DMA; 2574 2574 2575 - case 4: /* PIO Data-in */ 2576 - case 5: /* PIO Data-out */ 2577 - return ATA_PROT_PIO; 2575 + case 4: /* PIO Data-in */ 2576 + case 5: /* PIO Data-out */ 2577 + return ATA_PROT_PIO; 2578 2578 2579 - case 0: /* Hard Reset */ 2580 - case 1: /* SRST */ 2581 - case 8: /* Device Diagnostic */ 2582 - case 9: /* Device Reset */ 2583 - case 7: /* DMA Queued */ 2584 - case 12: /* FPDMA */ 2585 - case 15: /* Return Response Info */ 2586 - default: /* Reserved */ 2587 - break; 2579 + case 0: /* Hard Reset */ 2580 + case 1: /* SRST */ 2581 + case 8: /* Device Diagnostic */ 2582 + case 9: /* Device Reset */ 2583 + case 7: /* DMA Queued */ 2584 + case 12: /* FPDMA */ 2585 + case 15: /* Return Response Info */ 2586 + default: /* Reserved */ 2587 + break; 2588 2588 } 2589 2589 2590 2590 return ATA_PROT_UNKNOWN; ··· 2919 2919 args.done = done; 2920 2920 2921 2921 switch(scsicmd[0]) { 2922 - /* TODO: worth improving? */ 2923 - case FORMAT_UNIT: 2922 + /* TODO: worth improving? */ 2923 + case FORMAT_UNIT: 2924 + ata_scsi_invalid_field(cmd, done); 2925 + break; 2926 + 2927 + case INQUIRY: 2928 + if (scsicmd[1] & 2) /* is CmdDt set? */ 2924 2929 ata_scsi_invalid_field(cmd, done); 2930 + else if ((scsicmd[1] & 1) == 0) /* is EVPD clear? */ 2931 + ata_scsi_rbuf_fill(&args, ata_scsiop_inq_std); 2932 + else switch (scsicmd[2]) { 2933 + case 0x00: 2934 + ata_scsi_rbuf_fill(&args, ata_scsiop_inq_00); 2925 2935 break; 2926 - 2927 - case INQUIRY: 2928 - if (scsicmd[1] & 2) /* is CmdDt set? */ 2929 - ata_scsi_invalid_field(cmd, done); 2930 - else if ((scsicmd[1] & 1) == 0) /* is EVPD clear? */ 2931 - ata_scsi_rbuf_fill(&args, ata_scsiop_inq_std); 2932 - else switch (scsicmd[2]) { 2933 - case 0x00: 2934 - ata_scsi_rbuf_fill(&args, ata_scsiop_inq_00); 2935 - break; 2936 - case 0x80: 2937 - ata_scsi_rbuf_fill(&args, ata_scsiop_inq_80); 2938 - break; 2939 - case 0x83: 2940 - ata_scsi_rbuf_fill(&args, ata_scsiop_inq_83); 2941 - break; 2942 - case 0x89: 2943 - ata_scsi_rbuf_fill(&args, ata_scsiop_inq_89); 2944 - break; 2945 - default: 2946 - ata_scsi_invalid_field(cmd, done); 2947 - break; 2948 - } 2936 + case 0x80: 2937 + ata_scsi_rbuf_fill(&args, ata_scsiop_inq_80); 2949 2938 break; 2950 - 2951 - case MODE_SENSE: 2952 - case MODE_SENSE_10: 2953 - ata_scsi_rbuf_fill(&args, ata_scsiop_mode_sense); 2939 + case 0x83: 2940 + ata_scsi_rbuf_fill(&args, ata_scsiop_inq_83); 2954 2941 break; 2955 - 2956 - case MODE_SELECT: /* unconditionally return */ 2957 - case MODE_SELECT_10: /* bad-field-in-cdb */ 2958 - ata_scsi_invalid_field(cmd, done); 2942 + case 0x89: 2943 + ata_scsi_rbuf_fill(&args, ata_scsiop_inq_89); 2959 2944 break; 2960 - 2961 - case READ_CAPACITY: 2962 - ata_scsi_rbuf_fill(&args, ata_scsiop_read_cap); 2963 - break; 2964 - 2965 - case SERVICE_ACTION_IN: 2966 - if ((scsicmd[1] & 0x1f) == SAI_READ_CAPACITY_16) 2967 - ata_scsi_rbuf_fill(&args, ata_scsiop_read_cap); 2968 - else 2969 - ata_scsi_invalid_field(cmd, done); 2970 - break; 2971 - 2972 - case REPORT_LUNS: 2973 - ata_scsi_rbuf_fill(&args, ata_scsiop_report_luns); 2974 - break; 2975 - 2976 - case REQUEST_SENSE: 2977 - ata_scsi_set_sense(cmd, 0, 0, 0); 2978 - cmd->result = (DRIVER_SENSE << 24); 2979 - done(cmd); 2980 - break; 2981 - 2982 - /* if we reach this, then writeback caching is disabled, 2983 - * turning this into a no-op. 2984 - */ 2985 - case SYNCHRONIZE_CACHE: 2986 - /* fall through */ 2987 - 2988 - /* no-op's, complete with success */ 2989 - case REZERO_UNIT: 2990 - case SEEK_6: 2991 - case SEEK_10: 2992 - case TEST_UNIT_READY: 2993 - ata_scsi_rbuf_fill(&args, ata_scsiop_noop); 2994 - break; 2995 - 2996 - case SEND_DIAGNOSTIC: 2997 - tmp8 = scsicmd[1] & ~(1 << 3); 2998 - if ((tmp8 == 0x4) && (!scsicmd[3]) && (!scsicmd[4])) 2999 - ata_scsi_rbuf_fill(&args, ata_scsiop_noop); 3000 - else 3001 - ata_scsi_invalid_field(cmd, done); 3002 - break; 3003 - 3004 - /* all other commands */ 3005 2945 default: 3006 - ata_scsi_set_sense(cmd, ILLEGAL_REQUEST, 0x20, 0x0); 3007 - /* "Invalid command operation code" */ 3008 - done(cmd); 2946 + ata_scsi_invalid_field(cmd, done); 3009 2947 break; 2948 + } 2949 + break; 2950 + 2951 + case MODE_SENSE: 2952 + case MODE_SENSE_10: 2953 + ata_scsi_rbuf_fill(&args, ata_scsiop_mode_sense); 2954 + break; 2955 + 2956 + case MODE_SELECT: /* unconditionally return */ 2957 + case MODE_SELECT_10: /* bad-field-in-cdb */ 2958 + ata_scsi_invalid_field(cmd, done); 2959 + break; 2960 + 2961 + case READ_CAPACITY: 2962 + ata_scsi_rbuf_fill(&args, ata_scsiop_read_cap); 2963 + break; 2964 + 2965 + case SERVICE_ACTION_IN: 2966 + if ((scsicmd[1] & 0x1f) == SAI_READ_CAPACITY_16) 2967 + ata_scsi_rbuf_fill(&args, ata_scsiop_read_cap); 2968 + else 2969 + ata_scsi_invalid_field(cmd, done); 2970 + break; 2971 + 2972 + case REPORT_LUNS: 2973 + ata_scsi_rbuf_fill(&args, ata_scsiop_report_luns); 2974 + break; 2975 + 2976 + case REQUEST_SENSE: 2977 + ata_scsi_set_sense(cmd, 0, 0, 0); 2978 + cmd->result = (DRIVER_SENSE << 24); 2979 + done(cmd); 2980 + break; 2981 + 2982 + /* if we reach this, then writeback caching is disabled, 2983 + * turning this into a no-op. 2984 + */ 2985 + case SYNCHRONIZE_CACHE: 2986 + /* fall through */ 2987 + 2988 + /* no-op's, complete with success */ 2989 + case REZERO_UNIT: 2990 + case SEEK_6: 2991 + case SEEK_10: 2992 + case TEST_UNIT_READY: 2993 + ata_scsi_rbuf_fill(&args, ata_scsiop_noop); 2994 + break; 2995 + 2996 + case SEND_DIAGNOSTIC: 2997 + tmp8 = scsicmd[1] & ~(1 << 3); 2998 + if ((tmp8 == 0x4) && (!scsicmd[3]) && (!scsicmd[4])) 2999 + ata_scsi_rbuf_fill(&args, ata_scsiop_noop); 3000 + else 3001 + ata_scsi_invalid_field(cmd, done); 3002 + break; 3003 + 3004 + /* all other commands */ 3005 + default: 3006 + ata_scsi_set_sense(cmd, ILLEGAL_REQUEST, 0x20, 0x0); 3007 + /* "Invalid command operation code" */ 3008 + done(cmd); 3009 + break; 3010 3010 } 3011 3011 } 3012 3012
+1 -1
drivers/ata/libata-sff.c
··· 248 248 * LOCKING: 249 249 * spin_lock_irqsave(host lock) 250 250 */ 251 - void ata_bmdma_start (struct ata_queued_cmd *qc) 251 + void ata_bmdma_start(struct ata_queued_cmd *qc) 252 252 { 253 253 struct ata_port *ap = qc->ap; 254 254 u8 dmactl;
+5 -5
drivers/ata/pata_ns87415.c
··· 17 17 * TODO: 18 18 * Test PARISC SuperIO 19 19 * Get someone to test on SPARC 20 - * Implement lazy pio/dma switching for better performance 20 + * Implement lazy pio/dma switching for better performance 21 21 * 8bit shared timing. 22 22 * See if we need to kill the FIFO for ATAPI 23 23 */ ··· 60 60 u16 clocking; 61 61 u8 iordy; 62 62 u8 status; 63 - 63 + 64 64 /* Timing register format is 17 - low nybble read timing with 65 65 the high nybble being 16 - x for recovery time in PCI clocks */ 66 - 66 + 67 67 ata_timing_compute(adev, adev->pio_mode, &t, T, 0); 68 68 69 69 clocking = 17 - FIT(t.active, 2, 17); ··· 71 71 /* Use the same timing for read and write bytes */ 72 72 clocking |= (clocking << 8); 73 73 pci_write_config_word(dev, timing, clocking); 74 - 74 + 75 75 /* Set the IORDY enable versus DMA enable on or off properly */ 76 76 pci_read_config_byte(dev, 0x42, &iordy); 77 77 iordy &= ~(1 << (4 + unit)); ··· 185 185 186 186 if (!mmio) 187 187 return; 188 - iowrite8((ioread8(mmio + ATA_DMA_CMD) | ATA_DMA_INTR | ATA_DMA_ERR), 188 + iowrite8((ioread8(mmio + ATA_DMA_CMD) | ATA_DMA_INTR | ATA_DMA_ERR), 189 189 mmio + ATA_DMA_CMD); 190 190 } 191 191
+11 -12
drivers/ata/sata_mv.c
··· 845 845 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; 846 846 } else { 847 847 WARN_ON(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS)); 848 - } 848 + } 849 849 850 850 /* now properly wait for the eDMA to stop */ 851 851 for (i = 1000; i > 0; i--) { ··· 883 883 for (b = 0; b < bytes; ) { 884 884 DPRINTK("%p: ", start + b); 885 885 for (w = 0; b < bytes && w < 4; w++) { 886 - printk("%08x ",readl(start + b)); 886 + printk("%08x ", readl(start + b)); 887 887 b += sizeof(u32); 888 888 } 889 889 printk("\n"); ··· 899 899 for (b = 0; b < bytes; ) { 900 900 DPRINTK("%02x: ", b); 901 901 for (w = 0; b < bytes && w < 4; w++) { 902 - (void) pci_read_config_dword(pdev,b,&dw); 903 - printk("%08x ",dw); 902 + (void) pci_read_config_dword(pdev, b, &dw); 903 + printk("%08x ", dw); 904 904 b += sizeof(u32); 905 905 } 906 906 printk("\n"); ··· 944 944 } 945 945 for (p = start_port; p < start_port + num_ports; p++) { 946 946 port_base = mv_port_base(mmio_base, p); 947 - DPRINTK("EDMA regs (port %i):\n",p); 947 + DPRINTK("EDMA regs (port %i):\n", p); 948 948 mv_dump_mem(port_base, 0x54); 949 - DPRINTK("SATA regs (port %i):\n",p); 949 + DPRINTK("SATA regs (port %i):\n", p); 950 950 mv_dump_mem(port_base+0x300, 0x60); 951 951 } 952 952 #endif ··· 1184 1184 u16 flags = 0; 1185 1185 unsigned in_index; 1186 1186 1187 - if (qc->tf.protocol != ATA_PROT_DMA) 1187 + if (qc->tf.protocol != ATA_PROT_DMA) 1188 1188 return; 1189 1189 1190 1190 /* Fill in command request block ··· 1276 1276 unsigned in_index; 1277 1277 u32 flags = 0; 1278 1278 1279 - if (qc->tf.protocol != ATA_PROT_DMA) 1279 + if (qc->tf.protocol != ATA_PROT_DMA) 1280 1280 return; 1281 1281 1282 1282 /* Fill in Gen IIE command request block ··· 1606 1606 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS); 1607 1607 1608 1608 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n", 1609 - hc,relevant,hc_irq_cause); 1609 + hc, relevant, hc_irq_cause); 1610 1610 1611 1611 for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) { 1612 1612 struct ata_port *ap = host->ports[port]; ··· 1983 1983 for (i = 0; i < 1000; i++) { 1984 1984 udelay(1); 1985 1985 t = readl(reg); 1986 - if (PCI_MASTER_EMPTY & t) { 1986 + if (PCI_MASTER_EMPTY & t) 1987 1987 break; 1988 - } 1989 1988 } 1990 1989 if (!(PCI_MASTER_EMPTY & t)) { 1991 1990 printk(KERN_ERR DRV_NAME ": PCI master won't flush\n"); ··· 2667 2668 */ 2668 2669 static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 2669 2670 { 2670 - static int printed_version = 0; 2671 + static int printed_version; 2671 2672 unsigned int board_idx = (unsigned int)ent->driver_data; 2672 2673 const struct ata_port_info *ppi[] = { &mv_port_info[board_idx], NULL }; 2673 2674 struct ata_host *host;
+33 -35
drivers/ata/sata_nv.c
··· 163 163 NV_ADMA_STAT_STOPPED = (1 << 10), 164 164 NV_ADMA_STAT_DONE = (1 << 12), 165 165 NV_ADMA_STAT_ERR = NV_ADMA_STAT_CPBERR | 166 - NV_ADMA_STAT_TIMEOUT, 166 + NV_ADMA_STAT_TIMEOUT, 167 167 168 168 /* port flags */ 169 169 NV_ADMA_PORT_REGISTER_MODE = (1 << 0), ··· 228 228 u8 reserved1; /* 1 */ 229 229 u8 ctl_flags; /* 2 */ 230 230 /* len is length of taskfile in 64 bit words */ 231 - u8 len; /* 3 */ 231 + u8 len; /* 3 */ 232 232 u8 tag; /* 4 */ 233 233 u8 next_cpb_idx; /* 5 */ 234 234 __le16 reserved2; /* 6-7 */ ··· 244 244 dma_addr_t cpb_dma; 245 245 struct nv_adma_prd *aprd; 246 246 dma_addr_t aprd_dma; 247 - void __iomem * ctl_block; 248 - void __iomem * gen_block; 249 - void __iomem * notifier_clear_block; 247 + void __iomem *ctl_block; 248 + void __iomem *gen_block; 249 + void __iomem *notifier_clear_block; 250 250 u8 flags; 251 251 int last_issue_ncq; 252 252 }; ··· 293 293 294 294 #define NV_ADMA_CHECK_INTR(GCTL, PORT) ((GCTL) & ( 1 << (19 + (12 * (PORT))))) 295 295 296 - static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent); 296 + static int nv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); 297 297 #ifdef CONFIG_PM 298 298 static int nv_pci_device_resume(struct pci_dev *pdev); 299 299 #endif ··· 301 301 static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance); 302 302 static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance); 303 303 static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance); 304 - static int nv_scr_read (struct ata_port *ap, unsigned int sc_reg, u32 *val); 305 - static int nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val); 304 + static int nv_scr_read(struct ata_port *ap, unsigned int sc_reg, u32 *val); 305 + static int nv_scr_write(struct ata_port *ap, unsigned int sc_reg, u32 val); 306 306 307 307 static void nv_nf2_freeze(struct ata_port *ap); 308 308 static void nv_nf2_thaw(struct ata_port *ap); ··· 653 653 return; 654 654 655 655 status = readw(mmio + NV_ADMA_STAT); 656 - while(!(status & NV_ADMA_STAT_IDLE) && count < 20) { 656 + while (!(status & NV_ADMA_STAT_IDLE) && count < 20) { 657 657 ndelay(50); 658 658 status = readw(mmio + NV_ADMA_STAT); 659 659 count++; 660 660 } 661 - if(count == 20) 661 + if (count == 20) 662 662 ata_port_printk(ap, KERN_WARNING, 663 663 "timeout waiting for ADMA IDLE, stat=0x%hx\n", 664 664 status); ··· 668 668 669 669 count = 0; 670 670 status = readw(mmio + NV_ADMA_STAT); 671 - while(!(status & NV_ADMA_STAT_LEGACY) && count < 20) { 671 + while (!(status & NV_ADMA_STAT_LEGACY) && count < 20) { 672 672 ndelay(50); 673 673 status = readw(mmio + NV_ADMA_STAT); 674 674 count++; 675 675 } 676 - if(count == 20) 676 + if (count == 20) 677 677 ata_port_printk(ap, KERN_WARNING, 678 678 "timeout waiting for ADMA LEGACY, stat=0x%hx\n", 679 679 status); ··· 697 697 writew(tmp | NV_ADMA_CTL_GO, mmio + NV_ADMA_CTL); 698 698 699 699 status = readw(mmio + NV_ADMA_STAT); 700 - while(((status & NV_ADMA_STAT_LEGACY) || 700 + while (((status & NV_ADMA_STAT_LEGACY) || 701 701 !(status & NV_ADMA_STAT_IDLE)) && count < 20) { 702 702 ndelay(50); 703 703 status = readw(mmio + NV_ADMA_STAT); 704 704 count++; 705 705 } 706 - if(count == 20) 706 + if (count == 20) 707 707 ata_port_printk(ap, KERN_WARNING, 708 708 "timeout waiting for ADMA LEGACY clear and IDLE, stat=0x%hx\n", 709 709 status); ··· 747 747 on the port. */ 748 748 adma_enable = 0; 749 749 nv_adma_register_mode(ap); 750 - } 751 - else { 750 + } else { 752 751 bounce_limit = *ap->dev->dma_mask; 753 752 segment_boundary = NV_ADMA_DMA_BOUNDARY; 754 753 sg_tablesize = NV_ADMA_SGTBL_TOTAL_LEN; ··· 756 757 757 758 pci_read_config_dword(pdev, NV_MCP_SATA_CFG_20, &current_reg); 758 759 759 - if(ap->port_no == 1) 760 + if (ap->port_no == 1) 760 761 config_mask = NV_MCP_SATA_CFG_20_PORT1_EN | 761 762 NV_MCP_SATA_CFG_20_PORT1_PWB_EN; 762 763 else 763 764 config_mask = NV_MCP_SATA_CFG_20_PORT0_EN | 764 765 NV_MCP_SATA_CFG_20_PORT0_PWB_EN; 765 766 766 - if(adma_enable) { 767 + if (adma_enable) { 767 768 new_reg = current_reg | config_mask; 768 769 pp->flags &= ~NV_ADMA_ATAPI_SETUP_COMPLETE; 769 - } 770 - else { 770 + } else { 771 771 new_reg = current_reg & ~config_mask; 772 772 pp->flags |= NV_ADMA_ATAPI_SETUP_COMPLETE; 773 773 } 774 774 775 - if(current_reg != new_reg) 775 + if (current_reg != new_reg) 776 776 pci_write_config_dword(pdev, NV_MCP_SATA_CFG_20, new_reg); 777 777 778 778 blk_queue_bounce_limit(sdev->request_queue, bounce_limit); ··· 805 807 { 806 808 unsigned int idx = 0; 807 809 808 - if(tf->flags & ATA_TFLAG_ISADDR) { 810 + if (tf->flags & ATA_TFLAG_ISADDR) { 809 811 if (tf->flags & ATA_TFLAG_LBA48) { 810 812 cpb[idx++] = cpu_to_le16((ATA_REG_ERR << 8) | tf->hob_feature | WNB); 811 813 cpb[idx++] = cpu_to_le16((ATA_REG_NSECT << 8) | tf->hob_nsect); ··· 822 824 cpb[idx++] = cpu_to_le16((ATA_REG_LBAH << 8) | tf->lbah); 823 825 } 824 826 825 - if(tf->flags & ATA_TFLAG_DEVICE) 827 + if (tf->flags & ATA_TFLAG_DEVICE) 826 828 cpb[idx++] = cpu_to_le16((ATA_REG_DEVICE << 8) | tf->device); 827 829 828 830 cpb[idx++] = cpu_to_le16((ATA_REG_CMD << 8) | tf->command | CMDEND); 829 831 830 - while(idx < 12) 832 + while (idx < 12) 831 833 cpb[idx++] = cpu_to_le16(IGN); 832 834 833 835 return idx; ··· 848 850 int freeze = 0; 849 851 850 852 ata_ehi_clear_desc(ehi); 851 - __ata_ehi_push_desc(ehi, "CPB resp_flags 0x%x: ", flags ); 853 + __ata_ehi_push_desc(ehi, "CPB resp_flags 0x%x: ", flags); 852 854 if (flags & NV_CPB_RESP_ATA_ERR) { 853 855 ata_ehi_push_desc(ehi, "ATA error"); 854 856 ehi->err_mask |= AC_ERR_DEV; ··· 877 879 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, cpb_num); 878 880 VPRINTK("CPB flags done, flags=0x%x\n", flags); 879 881 if (likely(qc)) { 880 - DPRINTK("Completing qc from tag %d\n",cpb_num); 882 + DPRINTK("Completing qc from tag %d\n", cpb_num); 881 883 ata_qc_complete(qc); 882 884 } else { 883 885 struct ata_eh_info *ehi = &ap->link.eh_info; ··· 950 952 if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) { 951 953 u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804) 952 954 >> (NV_INT_PORT_SHIFT * i); 953 - if(ata_tag_valid(ap->link.active_tag)) 955 + if (ata_tag_valid(ap->link.active_tag)) 954 956 /** NV_INT_DEV indication seems unreliable at times 955 957 at least in ADMA mode. Force it on always when a 956 958 command is active, to prevent losing interrupts. */ ··· 964 966 965 967 gen_ctl = readl(pp->gen_block + NV_ADMA_GEN_CTL); 966 968 967 - if( !NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier && 969 + if (!NV_ADMA_CHECK_INTR(gen_ctl, ap->port_no) && !notifier && 968 970 !notifier_error) 969 971 /* Nothing to do */ 970 972 continue; ··· 988 990 struct ata_eh_info *ehi = &ap->link.eh_info; 989 991 990 992 ata_ehi_clear_desc(ehi); 991 - __ata_ehi_push_desc(ehi, "ADMA status 0x%08x: ", status ); 993 + __ata_ehi_push_desc(ehi, "ADMA status 0x%08x: ", status); 992 994 if (status & NV_ADMA_STAT_TIMEOUT) { 993 995 ehi->err_mask |= AC_ERR_SYSTEM; 994 996 ata_ehi_push_desc(ehi, "timeout"); ··· 1054 1056 return; 1055 1057 1056 1058 /* clear any outstanding CK804 notifications */ 1057 - writeb( NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT), 1059 + writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT), 1058 1060 ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804); 1059 1061 1060 1062 /* Disable interrupt */ 1061 1063 tmp = readw(mmio + NV_ADMA_CTL); 1062 - writew( tmp & ~(NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN), 1064 + writew(tmp & ~(NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN), 1063 1065 mmio + NV_ADMA_CTL); 1064 - readw( mmio + NV_ADMA_CTL ); /* flush posted write */ 1066 + readw(mmio + NV_ADMA_CTL ); /* flush posted write */ 1065 1067 } 1066 1068 1067 1069 static void nv_adma_thaw(struct ata_port *ap) ··· 1077 1079 1078 1080 /* Enable interrupt */ 1079 1081 tmp = readw(mmio + NV_ADMA_CTL); 1080 - writew( tmp | (NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN), 1082 + writew(tmp | (NV_ADMA_CTL_AIEN | NV_ADMA_CTL_HOTPLUG_IEN), 1081 1083 mmio + NV_ADMA_CTL); 1082 - readw( mmio + NV_ADMA_CTL ); /* flush posted write */ 1084 + readw(mmio + NV_ADMA_CTL ); /* flush posted write */ 1083 1085 } 1084 1086 1085 1087 static void nv_adma_irq_clear(struct ata_port *ap) ··· 1094 1096 } 1095 1097 1096 1098 /* clear any outstanding CK804 notifications */ 1097 - writeb( NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT), 1099 + writeb(NV_INT_ALL << (ap->port_no * NV_INT_PORT_SHIFT), 1098 1100 ap->host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804); 1099 1101 1100 1102 /* clear ADMA status */
+2 -2
drivers/ata/sata_sx4.c
··· 62 62 submit ATA packet to hardware 63 63 hardware executes ATA WRITE command, w/ data in DIMM 64 64 hardware raises interrupt 65 - 65 + 66 66 and each READ looks like this: 67 67 68 68 submit ATA packet to hardware 69 69 hardware executes ATA READ command, w/ data in DIMM 70 70 hardware raises interrupt 71 - 71 + 72 72 submit HDMA packet to hardware 73 73 hardware copies data from DIMM to system memory 74 74 hardware raises interrupt
+3 -3
include/linux/ata.h
··· 178 178 ATA_CMD_PACKET = 0xA0, 179 179 ATA_CMD_VERIFY = 0x40, 180 180 ATA_CMD_VERIFY_EXT = 0x42, 181 - ATA_CMD_STANDBYNOW1 = 0xE0, 182 - ATA_CMD_IDLEIMMEDIATE = 0xE1, 181 + ATA_CMD_STANDBYNOW1 = 0xE0, 182 + ATA_CMD_IDLEIMMEDIATE = 0xE1, 183 183 ATA_CMD_INIT_DEV_PARAMS = 0x91, 184 184 ATA_CMD_READ_NATIVE_MAX = 0xF8, 185 185 ATA_CMD_READ_NATIVE_MAX_EXT = 0x27, ··· 458 458 * ATA-3 introduces word 80 and accurate reporting 459 459 * 460 460 * The practical impact of this is that ata_id_major_version cannot 461 - * reliably report on drives below ATA3. 461 + * reliably report on drives below ATA3. 462 462 */ 463 463 464 464 static inline unsigned int ata_id_major_version(const u16 *id)
+11 -11
include/linux/libata.h
··· 326 326 ATA_HORKAGE_SKIP_PM = (1 << 5), /* Skip PM operations */ 327 327 ATA_HORKAGE_HPA_SIZE = (1 << 6), /* native size off by one */ 328 328 329 - /* DMA mask for user DMA control: User visible values; DO NOT 329 + /* DMA mask for user DMA control: User visible values; DO NOT 330 330 renumber */ 331 331 ATA_DMA_MASK_ATA = (1 << 0), /* DMA on ATA Disk */ 332 332 ATA_DMA_MASK_ATAPI = (1 << 1), /* DMA on ATAPI */ ··· 717 717 unsigned short udma; /* t2CYCTYP/2 */ 718 718 }; 719 719 720 - #define FIT(v,vmin,vmax) max_t(short,min_t(short,v,vmax),vmin) 720 + #define FIT(v, vmin, vmax) max_t(short, min_t(short, v, vmax), vmin) 721 721 722 722 extern const unsigned long sata_deb_timing_normal[]; 723 723 extern const unsigned long sata_deb_timing_hotplug[]; ··· 816 816 extern void ata_tf_to_fis(const struct ata_taskfile *tf, 817 817 u8 pmp, int is_cmd, u8 *fis); 818 818 extern void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf); 819 - extern void ata_noop_dev_select (struct ata_port *ap, unsigned int device); 820 - extern void ata_std_dev_select (struct ata_port *ap, unsigned int device); 819 + extern void ata_noop_dev_select(struct ata_port *ap, unsigned int device); 820 + extern void ata_std_dev_select(struct ata_port *ap, unsigned int device); 821 821 extern u8 ata_check_status(struct ata_port *ap); 822 822 extern u8 ata_altstatus(struct ata_port *ap); 823 823 extern void ata_exec_command(struct ata_port *ap, const struct ata_taskfile *tf); 824 - extern int ata_port_start (struct ata_port *ap); 825 - extern int ata_sff_port_start (struct ata_port *ap); 826 - extern irqreturn_t ata_interrupt (int irq, void *dev_instance); 824 + extern int ata_port_start(struct ata_port *ap); 825 + extern int ata_sff_port_start(struct ata_port *ap); 826 + extern irqreturn_t ata_interrupt(int irq, void *dev_instance); 827 827 extern void ata_data_xfer(struct ata_device *adev, unsigned char *buf, 828 828 unsigned int buflen, int write_data); 829 829 extern void ata_data_xfer_noirq(struct ata_device *adev, unsigned char *buf, ··· 844 844 extern void ata_id_c_string(const u16 *id, unsigned char *s, 845 845 unsigned int ofs, unsigned int len); 846 846 extern void ata_id_to_dma_mode(struct ata_device *dev, u8 unknown); 847 - extern void ata_bmdma_setup (struct ata_queued_cmd *qc); 848 - extern void ata_bmdma_start (struct ata_queued_cmd *qc); 847 + extern void ata_bmdma_setup(struct ata_queued_cmd *qc); 848 + extern void ata_bmdma_start(struct ata_queued_cmd *qc); 849 849 extern void ata_bmdma_stop(struct ata_queued_cmd *qc); 850 850 extern u8 ata_bmdma_status(struct ata_port *ap); 851 851 extern void ata_bmdma_irq_clear(struct ata_port *ap); ··· 920 920 #ifdef CONFIG_PCI 921 921 struct pci_dev; 922 922 923 - extern int ata_pci_init_one (struct pci_dev *pdev, 923 + extern int ata_pci_init_one(struct pci_dev *pdev, 924 924 const struct ata_port_info * const * ppi); 925 - extern void ata_pci_remove_one (struct pci_dev *pdev); 925 + extern void ata_pci_remove_one(struct pci_dev *pdev); 926 926 #ifdef CONFIG_PM 927 927 extern void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg); 928 928 extern int __must_check ata_pci_device_do_resume(struct pci_dev *pdev);