Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

libata: Grand renaming.

The biggest change is that ata_host_set is renamed to ata_host.

* ata_host_set => ata_host
* ata_probe_ent->host_flags => ata_probe_ent->port_flags
* ata_probe_ent->host_set_flags => ata_probe_ent->_host_flags
* ata_host_stats => ata_port_stats
* ata_port->host => ata_port->scsi_host
* ata_port->host_set => ata_port->host
* ata_port_info->host_flags => ata_port_info->flags
* ata_(.*)host_set(.*)\(\) => ata_\1host\2()

The leading underscore in ata_probe_ent->_host_flags is to avoid
reusing ->host_flags for different purpose. Currently, the only user
of the field is libata-bmdma.c and probe_ent itself is scheduled to be
removed.

ata_port->host is reused for different purpose but this field is used
inside libata core proper and of different type.

Signed-off-by: Tejun Heo <htejun@gmail.com>
Signed-off-by: Jeff Garzik <jeff@garzik.org>

+568 -573
+49 -49
drivers/ata/ahci.c
··· 277 277 /* board_ahci */ 278 278 { 279 279 .sht = &ahci_sht, 280 - .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 280 + .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 281 281 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA | 282 282 ATA_FLAG_SKIP_D2H_BSY, 283 283 .pio_mask = 0x1f, /* pio0-4 */ ··· 287 287 /* board_ahci_vt8251 */ 288 288 { 289 289 .sht = &ahci_sht, 290 - .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 290 + .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 291 291 ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA | 292 292 ATA_FLAG_SKIP_D2H_BSY | 293 293 AHCI_FLAG_RESET_NEEDS_CLO | AHCI_FLAG_NO_NCQ, ··· 709 709 static int ahci_clo(struct ata_port *ap) 710 710 { 711 711 void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr; 712 - struct ahci_host_priv *hpriv = ap->host_set->private_data; 712 + struct ahci_host_priv *hpriv = ap->host->private_data; 713 713 u32 tmp; 714 714 715 715 if (!(hpriv->cap & HOST_CAP_CLO)) ··· 741 741 static int ahci_softreset(struct ata_port *ap, unsigned int *class) 742 742 { 743 743 struct ahci_port_priv *pp = ap->private_data; 744 - void __iomem *mmio = ap->host_set->mmio_base; 744 + void __iomem *mmio = ap->host->mmio_base; 745 745 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no); 746 746 const u32 cmd_fis_len = 5; /* five dwords */ 747 747 const char *reason = NULL; ··· 850 850 struct ahci_port_priv *pp = ap->private_data; 851 851 u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG; 852 852 struct ata_taskfile tf; 853 - void __iomem *mmio = ap->host_set->mmio_base; 853 + void __iomem *mmio = ap->host->mmio_base; 854 854 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no); 855 855 int rc; 856 856 ··· 1039 1039 1040 1040 static void ahci_host_intr(struct ata_port *ap) 1041 1041 { 1042 - void __iomem *mmio = ap->host_set->mmio_base; 1042 + void __iomem *mmio = ap->host->mmio_base; 1043 1043 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no); 1044 1044 struct ata_eh_info *ehi = &ap->eh_info; 1045 1045 u32 status, qc_active; ··· 1091 1091 1092 1092 static irqreturn_t ahci_interrupt(int irq, void *dev_instance, struct pt_regs *regs) 1093 1093 { 1094 - struct ata_host_set *host_set = dev_instance; 1094 + struct ata_host *host = dev_instance; 1095 1095 struct ahci_host_priv *hpriv; 1096 1096 unsigned int i, handled = 0; 1097 1097 void __iomem *mmio; ··· 1099 1099 1100 1100 VPRINTK("ENTER\n"); 1101 1101 1102 - hpriv = host_set->private_data; 1103 - mmio = host_set->mmio_base; 1102 + hpriv = host->private_data; 1103 + mmio = host->mmio_base; 1104 1104 1105 1105 /* sigh. 0xffffffff is a valid return from h/w */ 1106 1106 irq_stat = readl(mmio + HOST_IRQ_STAT); ··· 1108 1108 if (!irq_stat) 1109 1109 return IRQ_NONE; 1110 1110 1111 - spin_lock(&host_set->lock); 1111 + spin_lock(&host->lock); 1112 1112 1113 - for (i = 0; i < host_set->n_ports; i++) { 1113 + for (i = 0; i < host->n_ports; i++) { 1114 1114 struct ata_port *ap; 1115 1115 1116 1116 if (!(irq_stat & (1 << i))) 1117 1117 continue; 1118 1118 1119 - ap = host_set->ports[i]; 1119 + ap = host->ports[i]; 1120 1120 if (ap) { 1121 1121 ahci_host_intr(ap); 1122 1122 VPRINTK("port %u\n", i); 1123 1123 } else { 1124 1124 VPRINTK("port %u (no irq)\n", i); 1125 1125 if (ata_ratelimit()) 1126 - dev_printk(KERN_WARNING, host_set->dev, 1126 + dev_printk(KERN_WARNING, host->dev, 1127 1127 "interrupt on disabled port %u\n", i); 1128 1128 } 1129 1129 ··· 1135 1135 handled = 1; 1136 1136 } 1137 1137 1138 - spin_unlock(&host_set->lock); 1138 + spin_unlock(&host->lock); 1139 1139 1140 1140 VPRINTK("EXIT\n"); 1141 1141 ··· 1157 1157 1158 1158 static void ahci_freeze(struct ata_port *ap) 1159 1159 { 1160 - void __iomem *mmio = ap->host_set->mmio_base; 1160 + void __iomem *mmio = ap->host->mmio_base; 1161 1161 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no); 1162 1162 1163 1163 /* turn IRQ off */ ··· 1166 1166 1167 1167 static void ahci_thaw(struct ata_port *ap) 1168 1168 { 1169 - void __iomem *mmio = ap->host_set->mmio_base; 1169 + void __iomem *mmio = ap->host->mmio_base; 1170 1170 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no); 1171 1171 u32 tmp; 1172 1172 ··· 1181 1181 1182 1182 static void ahci_error_handler(struct ata_port *ap) 1183 1183 { 1184 - void __iomem *mmio = ap->host_set->mmio_base; 1184 + void __iomem *mmio = ap->host->mmio_base; 1185 1185 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no); 1186 1186 1187 1187 if (!(ap->pflags & ATA_PFLAG_FROZEN)) { ··· 1198 1198 static void ahci_post_internal_cmd(struct ata_queued_cmd *qc) 1199 1199 { 1200 1200 struct ata_port *ap = qc->ap; 1201 - void __iomem *mmio = ap->host_set->mmio_base; 1201 + void __iomem *mmio = ap->host->mmio_base; 1202 1202 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no); 1203 1203 1204 1204 if (qc->flags & ATA_QCFLAG_FAILED) ··· 1213 1213 1214 1214 static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg) 1215 1215 { 1216 - struct ahci_host_priv *hpriv = ap->host_set->private_data; 1216 + struct ahci_host_priv *hpriv = ap->host->private_data; 1217 1217 struct ahci_port_priv *pp = ap->private_data; 1218 - void __iomem *mmio = ap->host_set->mmio_base; 1218 + void __iomem *mmio = ap->host->mmio_base; 1219 1219 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no); 1220 1220 const char *emsg = NULL; 1221 1221 int rc; ··· 1233 1233 static int ahci_port_resume(struct ata_port *ap) 1234 1234 { 1235 1235 struct ahci_port_priv *pp = ap->private_data; 1236 - struct ahci_host_priv *hpriv = ap->host_set->private_data; 1237 - void __iomem *mmio = ap->host_set->mmio_base; 1236 + struct ahci_host_priv *hpriv = ap->host->private_data; 1237 + void __iomem *mmio = ap->host->mmio_base; 1238 1238 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no); 1239 1239 1240 1240 ahci_init_port(port_mmio, hpriv->cap, pp->cmd_slot_dma, pp->rx_fis_dma); ··· 1244 1244 1245 1245 static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg) 1246 1246 { 1247 - struct ata_host_set *host_set = dev_get_drvdata(&pdev->dev); 1248 - void __iomem *mmio = host_set->mmio_base; 1247 + struct ata_host *host = dev_get_drvdata(&pdev->dev); 1248 + void __iomem *mmio = host->mmio_base; 1249 1249 u32 ctl; 1250 1250 1251 1251 if (mesg.event == PM_EVENT_SUSPEND) { ··· 1264 1264 1265 1265 static int ahci_pci_device_resume(struct pci_dev *pdev) 1266 1266 { 1267 - struct ata_host_set *host_set = dev_get_drvdata(&pdev->dev); 1268 - struct ahci_host_priv *hpriv = host_set->private_data; 1269 - void __iomem *mmio = host_set->mmio_base; 1267 + struct ata_host *host = dev_get_drvdata(&pdev->dev); 1268 + struct ahci_host_priv *hpriv = host->private_data; 1269 + void __iomem *mmio = host->mmio_base; 1270 1270 int rc; 1271 1271 1272 1272 ata_pci_device_do_resume(pdev); ··· 1276 1276 if (rc) 1277 1277 return rc; 1278 1278 1279 - ahci_init_controller(mmio, pdev, host_set->n_ports, hpriv->cap); 1279 + ahci_init_controller(mmio, pdev, host->n_ports, hpriv->cap); 1280 1280 } 1281 1281 1282 - ata_host_set_resume(host_set); 1282 + ata_host_resume(host); 1283 1283 1284 1284 return 0; 1285 1285 } 1286 1286 1287 1287 static int ahci_port_start(struct ata_port *ap) 1288 1288 { 1289 - struct device *dev = ap->host_set->dev; 1290 - struct ahci_host_priv *hpriv = ap->host_set->private_data; 1289 + struct device *dev = ap->host->dev; 1290 + struct ahci_host_priv *hpriv = ap->host->private_data; 1291 1291 struct ahci_port_priv *pp; 1292 - void __iomem *mmio = ap->host_set->mmio_base; 1292 + void __iomem *mmio = ap->host->mmio_base; 1293 1293 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no); 1294 1294 void *mem; 1295 1295 dma_addr_t mem_dma; ··· 1350 1350 1351 1351 static void ahci_port_stop(struct ata_port *ap) 1352 1352 { 1353 - struct device *dev = ap->host_set->dev; 1354 - struct ahci_host_priv *hpriv = ap->host_set->private_data; 1353 + struct device *dev = ap->host->dev; 1354 + struct ahci_host_priv *hpriv = ap->host->private_data; 1355 1355 struct ahci_port_priv *pp = ap->private_data; 1356 - void __iomem *mmio = ap->host_set->mmio_base; 1356 + void __iomem *mmio = ap->host->mmio_base; 1357 1357 void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no); 1358 1358 const char *emsg = NULL; 1359 1359 int rc; ··· 1581 1581 memset(hpriv, 0, sizeof(*hpriv)); 1582 1582 1583 1583 probe_ent->sht = ahci_port_info[board_idx].sht; 1584 - probe_ent->host_flags = ahci_port_info[board_idx].host_flags; 1584 + probe_ent->port_flags = ahci_port_info[board_idx].flags; 1585 1585 probe_ent->pio_mask = ahci_port_info[board_idx].pio_mask; 1586 1586 probe_ent->udma_mask = ahci_port_info[board_idx].udma_mask; 1587 1587 probe_ent->port_ops = ahci_port_info[board_idx].port_ops; ··· 1599 1599 if (rc) 1600 1600 goto err_out_hpriv; 1601 1601 1602 - if (!(probe_ent->host_flags & AHCI_FLAG_NO_NCQ) && 1602 + if (!(probe_ent->port_flags & AHCI_FLAG_NO_NCQ) && 1603 1603 (hpriv->cap & HOST_CAP_NCQ)) 1604 - probe_ent->host_flags |= ATA_FLAG_NCQ; 1604 + probe_ent->port_flags |= ATA_FLAG_NCQ; 1605 1605 1606 1606 ahci_print_info(probe_ent); 1607 1607 ··· 1632 1632 static void ahci_remove_one (struct pci_dev *pdev) 1633 1633 { 1634 1634 struct device *dev = pci_dev_to_dev(pdev); 1635 - struct ata_host_set *host_set = dev_get_drvdata(dev); 1636 - struct ahci_host_priv *hpriv = host_set->private_data; 1635 + struct ata_host *host = dev_get_drvdata(dev); 1636 + struct ahci_host_priv *hpriv = host->private_data; 1637 1637 unsigned int i; 1638 1638 int have_msi; 1639 1639 1640 - for (i = 0; i < host_set->n_ports; i++) 1641 - ata_port_detach(host_set->ports[i]); 1640 + for (i = 0; i < host->n_ports; i++) 1641 + ata_port_detach(host->ports[i]); 1642 1642 1643 1643 have_msi = hpriv->flags & AHCI_FLAG_MSI; 1644 - free_irq(host_set->irq, host_set); 1644 + free_irq(host->irq, host); 1645 1645 1646 - for (i = 0; i < host_set->n_ports; i++) { 1647 - struct ata_port *ap = host_set->ports[i]; 1646 + for (i = 0; i < host->n_ports; i++) { 1647 + struct ata_port *ap = host->ports[i]; 1648 1648 1649 - ata_scsi_release(ap->host); 1650 - scsi_host_put(ap->host); 1649 + ata_scsi_release(ap->scsi_host); 1650 + scsi_host_put(ap->scsi_host); 1651 1651 } 1652 1652 1653 1653 kfree(hpriv); 1654 - pci_iounmap(pdev, host_set->mmio_base); 1655 - kfree(host_set); 1654 + pci_iounmap(pdev, host->mmio_base); 1655 + kfree(host); 1656 1656 1657 1657 if (have_msi) 1658 1658 pci_disable_msi(pdev);
+28 -28
drivers/ata/ata_piix.c
··· 151 151 152 152 static int piix_init_one (struct pci_dev *pdev, 153 153 const struct pci_device_id *ent); 154 - static void piix_host_stop(struct ata_host_set *host_set); 154 + static void piix_host_stop(struct ata_host *host); 155 155 static void piix_set_piomode (struct ata_port *ap, struct ata_device *adev); 156 156 static void piix_set_dmamode (struct ata_port *ap, struct ata_device *adev); 157 157 static void piix_pata_error_handler(struct ata_port *ap); ··· 362 362 /* piix4_pata */ 363 363 { 364 364 .sht = &piix_sht, 365 - .host_flags = ATA_FLAG_SLAVE_POSS, 365 + .flags = ATA_FLAG_SLAVE_POSS, 366 366 .pio_mask = 0x1f, /* pio0-4 */ 367 367 #if 0 368 368 .mwdma_mask = 0x06, /* mwdma1-2 */ ··· 376 376 /* ich5_pata */ 377 377 { 378 378 .sht = &piix_sht, 379 - .host_flags = ATA_FLAG_SLAVE_POSS | PIIX_FLAG_CHECKINTR, 379 + .flags = ATA_FLAG_SLAVE_POSS | PIIX_FLAG_CHECKINTR, 380 380 .pio_mask = 0x1f, /* pio0-4 */ 381 381 #if 0 382 382 .mwdma_mask = 0x06, /* mwdma1-2 */ ··· 390 390 /* ich5_sata */ 391 391 { 392 392 .sht = &piix_sht, 393 - .host_flags = ATA_FLAG_SATA | PIIX_FLAG_CHECKINTR | 393 + .flags = ATA_FLAG_SATA | PIIX_FLAG_CHECKINTR | 394 394 PIIX_FLAG_IGNORE_PCS, 395 395 .pio_mask = 0x1f, /* pio0-4 */ 396 396 .mwdma_mask = 0x07, /* mwdma0-2 */ ··· 401 401 /* i6300esb_sata */ 402 402 { 403 403 .sht = &piix_sht, 404 - .host_flags = ATA_FLAG_SATA | 404 + .flags = ATA_FLAG_SATA | 405 405 PIIX_FLAG_CHECKINTR | PIIX_FLAG_IGNORE_PCS, 406 406 .pio_mask = 0x1f, /* pio0-4 */ 407 407 .mwdma_mask = 0x07, /* mwdma0-2 */ ··· 412 412 /* ich6_sata */ 413 413 { 414 414 .sht = &piix_sht, 415 - .host_flags = ATA_FLAG_SATA | 415 + .flags = ATA_FLAG_SATA | 416 416 PIIX_FLAG_CHECKINTR | PIIX_FLAG_SCR, 417 417 .pio_mask = 0x1f, /* pio0-4 */ 418 418 .mwdma_mask = 0x07, /* mwdma0-2 */ ··· 423 423 /* ich6_sata_ahci */ 424 424 { 425 425 .sht = &piix_sht, 426 - .host_flags = ATA_FLAG_SATA | 426 + .flags = ATA_FLAG_SATA | 427 427 PIIX_FLAG_CHECKINTR | PIIX_FLAG_SCR | 428 428 PIIX_FLAG_AHCI, 429 429 .pio_mask = 0x1f, /* pio0-4 */ ··· 435 435 /* ich6m_sata_ahci */ 436 436 { 437 437 .sht = &piix_sht, 438 - .host_flags = ATA_FLAG_SATA | 438 + .flags = ATA_FLAG_SATA | 439 439 PIIX_FLAG_CHECKINTR | PIIX_FLAG_SCR | 440 440 PIIX_FLAG_AHCI, 441 441 .pio_mask = 0x1f, /* pio0-4 */ ··· 447 447 /* ich8_sata_ahci */ 448 448 { 449 449 .sht = &piix_sht, 450 - .host_flags = ATA_FLAG_SATA | 450 + .flags = ATA_FLAG_SATA | 451 451 PIIX_FLAG_CHECKINTR | PIIX_FLAG_SCR | 452 452 PIIX_FLAG_AHCI, 453 453 .pio_mask = 0x1f, /* pio0-4 */ ··· 485 485 */ 486 486 static void piix_pata_cbl_detect(struct ata_port *ap) 487 487 { 488 - struct pci_dev *pdev = to_pci_dev(ap->host_set->dev); 488 + struct pci_dev *pdev = to_pci_dev(ap->host->dev); 489 489 u8 tmp, mask; 490 490 491 491 /* no 80c support in host controller? */ ··· 517 517 */ 518 518 static int piix_pata_prereset(struct ata_port *ap) 519 519 { 520 - struct pci_dev *pdev = to_pci_dev(ap->host_set->dev); 520 + struct pci_dev *pdev = to_pci_dev(ap->host->dev); 521 521 522 522 if (!pci_test_config_bits(pdev, &piix_enable_bits[ap->port_no])) { 523 523 ata_port_printk(ap, KERN_INFO, "port disabled. ignoring.\n"); ··· 551 551 */ 552 552 static unsigned int piix_sata_present_mask(struct ata_port *ap) 553 553 { 554 - struct pci_dev *pdev = to_pci_dev(ap->host_set->dev); 555 - struct piix_host_priv *hpriv = ap->host_set->private_data; 554 + struct pci_dev *pdev = to_pci_dev(ap->host->dev); 555 + struct piix_host_priv *hpriv = ap->host->private_data; 556 556 const unsigned int *map = hpriv->map; 557 557 int base = 2 * ap->port_no; 558 558 unsigned int present_mask = 0; ··· 631 631 static void piix_set_piomode (struct ata_port *ap, struct ata_device *adev) 632 632 { 633 633 unsigned int pio = adev->pio_mode - XFER_PIO_0; 634 - struct pci_dev *dev = to_pci_dev(ap->host_set->dev); 634 + struct pci_dev *dev = to_pci_dev(ap->host->dev); 635 635 unsigned int is_slave = (adev->devno != 0); 636 636 unsigned int master_port= ap->port_no ? 0x42 : 0x40; 637 637 unsigned int slave_port = 0x44; ··· 683 683 static void piix_set_dmamode (struct ata_port *ap, struct ata_device *adev) 684 684 { 685 685 unsigned int udma = adev->dma_mode; /* FIXME: MWDMA too */ 686 - struct pci_dev *dev = to_pci_dev(ap->host_set->dev); 686 + struct pci_dev *dev = to_pci_dev(ap->host->dev); 687 687 u8 maslave = ap->port_no ? 0x42 : 0x40; 688 688 u8 speed = udma; 689 689 unsigned int drive_dn = (ap->port_no ? 2 : 0) + adev->devno; ··· 835 835 if (force_pcs == 1) { 836 836 dev_printk(KERN_INFO, &pdev->dev, 837 837 "force ignoring PCS (0x%x)\n", new_pcs); 838 - pinfo[0].host_flags |= PIIX_FLAG_IGNORE_PCS; 839 - pinfo[1].host_flags |= PIIX_FLAG_IGNORE_PCS; 838 + pinfo[0].flags |= PIIX_FLAG_IGNORE_PCS; 839 + pinfo[1].flags |= PIIX_FLAG_IGNORE_PCS; 840 840 } else if (force_pcs == 2) { 841 841 dev_printk(KERN_INFO, &pdev->dev, 842 842 "force honoring PCS (0x%x)\n", new_pcs); 843 - pinfo[0].host_flags &= ~PIIX_FLAG_IGNORE_PCS; 844 - pinfo[1].host_flags &= ~PIIX_FLAG_IGNORE_PCS; 843 + pinfo[0].flags &= ~PIIX_FLAG_IGNORE_PCS; 844 + pinfo[1].flags &= ~PIIX_FLAG_IGNORE_PCS; 845 845 } 846 846 } 847 847 ··· 881 881 default: 882 882 printk(" P%d", map[i]); 883 883 if (i & 1) 884 - pinfo[i / 2].host_flags |= ATA_FLAG_SLAVE_POSS; 884 + pinfo[i / 2].flags |= ATA_FLAG_SLAVE_POSS; 885 885 break; 886 886 } 887 887 } ··· 916 916 struct ata_port_info port_info[2]; 917 917 struct ata_port_info *ppinfo[2] = { &port_info[0], &port_info[1] }; 918 918 struct piix_host_priv *hpriv; 919 - unsigned long host_flags; 919 + unsigned long port_flags; 920 920 921 921 if (!printed_version++) 922 922 dev_printk(KERN_DEBUG, &pdev->dev, ··· 935 935 port_info[0].private_data = hpriv; 936 936 port_info[1].private_data = hpriv; 937 937 938 - host_flags = port_info[0].host_flags; 938 + port_flags = port_info[0].flags; 939 939 940 - if (host_flags & PIIX_FLAG_AHCI) { 940 + if (port_flags & PIIX_FLAG_AHCI) { 941 941 u8 tmp; 942 942 pci_read_config_byte(pdev, PIIX_SCC, &tmp); 943 943 if (tmp == PIIX_AHCI_DEVICE) { ··· 948 948 } 949 949 950 950 /* Initialize SATA map */ 951 - if (host_flags & ATA_FLAG_SATA) { 951 + if (port_flags & ATA_FLAG_SATA) { 952 952 piix_init_sata_map(pdev, port_info, 953 953 piix_map_db_table[ent->driver_data]); 954 954 piix_init_pcs(pdev, port_info, ··· 961 961 * MSI is disabled (and it is disabled, as we don't use 962 962 * message-signalled interrupts currently). 963 963 */ 964 - if (host_flags & PIIX_FLAG_CHECKINTR) 964 + if (port_flags & PIIX_FLAG_CHECKINTR) 965 965 pci_intx(pdev, 1); 966 966 967 967 if (piix_check_450nx_errata(pdev)) { ··· 976 976 return ata_pci_init_one(pdev, ppinfo, 2); 977 977 } 978 978 979 - static void piix_host_stop(struct ata_host_set *host_set) 979 + static void piix_host_stop(struct ata_host *host) 980 980 { 981 - struct piix_host_priv *hpriv = host_set->private_data; 981 + struct piix_host_priv *hpriv = host->private_data; 982 982 983 - ata_host_stop(host_set); 983 + ata_host_stop(host); 984 984 985 985 kfree(hpriv); 986 986 }
+17 -17
drivers/ata/libata-bmdma.c
··· 193 193 * synchronization with interrupt handler / other threads. 194 194 * 195 195 * LOCKING: 196 - * spin_lock_irqsave(host_set lock) 196 + * spin_lock_irqsave(host lock) 197 197 */ 198 198 199 199 static void ata_exec_command_pio(struct ata_port *ap, const struct ata_taskfile *tf) ··· 216 216 * FIXME: missing write posting for 400nS delay enforcement 217 217 * 218 218 * LOCKING: 219 - * spin_lock_irqsave(host_set lock) 219 + * spin_lock_irqsave(host lock) 220 220 */ 221 221 222 222 static void ata_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf) ··· 237 237 * synchronization with interrupt handler / other threads. 238 238 * 239 239 * LOCKING: 240 - * spin_lock_irqsave(host_set lock) 240 + * spin_lock_irqsave(host lock) 241 241 */ 242 242 void ata_exec_command(struct ata_port *ap, const struct ata_taskfile *tf) 243 243 { ··· 422 422 * @qc: Info associated with this ATA transaction. 423 423 * 424 424 * LOCKING: 425 - * spin_lock_irqsave(host_set lock) 425 + * spin_lock_irqsave(host lock) 426 426 */ 427 427 428 428 static void ata_bmdma_setup_mmio (struct ata_queued_cmd *qc) ··· 452 452 * @qc: Info associated with this ATA transaction. 453 453 * 454 454 * LOCKING: 455 - * spin_lock_irqsave(host_set lock) 455 + * spin_lock_irqsave(host lock) 456 456 */ 457 457 458 458 static void ata_bmdma_start_mmio (struct ata_queued_cmd *qc) ··· 483 483 * @qc: Info associated with this ATA transaction. 484 484 * 485 485 * LOCKING: 486 - * spin_lock_irqsave(host_set lock) 486 + * spin_lock_irqsave(host lock) 487 487 */ 488 488 489 489 static void ata_bmdma_setup_pio (struct ata_queued_cmd *qc) ··· 511 511 * @qc: Info associated with this ATA transaction. 512 512 * 513 513 * LOCKING: 514 - * spin_lock_irqsave(host_set lock) 514 + * spin_lock_irqsave(host lock) 515 515 */ 516 516 517 517 static void ata_bmdma_start_pio (struct ata_queued_cmd *qc) ··· 535 535 * May be used as the bmdma_start() entry in ata_port_operations. 536 536 * 537 537 * LOCKING: 538 - * spin_lock_irqsave(host_set lock) 538 + * spin_lock_irqsave(host lock) 539 539 */ 540 540 void ata_bmdma_start(struct ata_queued_cmd *qc) 541 541 { ··· 557 557 * May be used as the bmdma_setup() entry in ata_port_operations. 558 558 * 559 559 * LOCKING: 560 - * spin_lock_irqsave(host_set lock) 560 + * spin_lock_irqsave(host lock) 561 561 */ 562 562 void ata_bmdma_setup(struct ata_queued_cmd *qc) 563 563 { ··· 577 577 * May be used as the irq_clear() entry in ata_port_operations. 578 578 * 579 579 * LOCKING: 580 - * spin_lock_irqsave(host_set lock) 580 + * spin_lock_irqsave(host lock) 581 581 */ 582 582 583 583 void ata_bmdma_irq_clear(struct ata_port *ap) ··· 605 605 * May be used as the bmdma_status() entry in ata_port_operations. 606 606 * 607 607 * LOCKING: 608 - * spin_lock_irqsave(host_set lock) 608 + * spin_lock_irqsave(host lock) 609 609 */ 610 610 611 611 u8 ata_bmdma_status(struct ata_port *ap) ··· 629 629 * May be used as the bmdma_stop() entry in ata_port_operations. 630 630 * 631 631 * LOCKING: 632 - * spin_lock_irqsave(host_set lock) 632 + * spin_lock_irqsave(host lock) 633 633 */ 634 634 635 635 void ata_bmdma_stop(struct ata_queued_cmd *qc) ··· 838 838 bmdma = pci_resource_start(pdev, 4); 839 839 if (bmdma) { 840 840 if (inb(bmdma + 2) & 0x80) 841 - probe_ent->host_set_flags |= ATA_HOST_SIMPLEX; 841 + probe_ent->_host_flags |= ATA_HOST_SIMPLEX; 842 842 probe_ent->port[p].bmdma_addr = bmdma; 843 843 } 844 844 ata_std_ports(&probe_ent->port[p]); ··· 854 854 if (bmdma) { 855 855 bmdma += 8; 856 856 if(inb(bmdma + 2) & 0x80) 857 - probe_ent->host_set_flags |= ATA_HOST_SIMPLEX; 857 + probe_ent->_host_flags |= ATA_HOST_SIMPLEX; 858 858 probe_ent->port[p].bmdma_addr = bmdma; 859 859 } 860 860 ata_std_ports(&probe_ent->port[p]); ··· 887 887 if (bmdma) { 888 888 probe_ent->port[0].bmdma_addr = bmdma; 889 889 if (inb(bmdma + 2) & 0x80) 890 - probe_ent->host_set_flags |= ATA_HOST_SIMPLEX; 890 + probe_ent->_host_flags |= ATA_HOST_SIMPLEX; 891 891 } 892 892 ata_std_ports(&probe_ent->port[0]); 893 893 } else ··· 904 904 if (bmdma) { 905 905 probe_ent->port[1].bmdma_addr = bmdma + 8; 906 906 if (inb(bmdma + 10) & 0x80) 907 - probe_ent->host_set_flags |= ATA_HOST_SIMPLEX; 907 + probe_ent->_host_flags |= ATA_HOST_SIMPLEX; 908 908 } 909 909 ata_std_ports(&probe_ent->port[1]); 910 910 } else ··· 957 957 else 958 958 port[1] = port[0]; 959 959 960 - if ((port[0]->host_flags & ATA_FLAG_NO_LEGACY) == 0 960 + if ((port[0]->flags & ATA_FLAG_NO_LEGACY) == 0 961 961 && (pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) { 962 962 /* TODO: What if one channel is in native mode ... */ 963 963 pci_read_config_byte(pdev, PCI_CLASS_PROG, &tmp8);
+146 -146
drivers/ata/libata-core.c
··· 1335 1335 } 1336 1336 1337 1337 if (ap->flags & ATA_FLAG_NCQ) { 1338 - hdepth = min(ap->host->can_queue, ATA_MAX_QUEUE - 1); 1338 + hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1); 1339 1339 dev->flags |= ATA_DFLAG_NCQ; 1340 1340 } 1341 1341 ··· 1349 1349 { 1350 1350 int i; 1351 1351 1352 - if (ap->host) { 1353 - ap->host->max_cmd_len = 0; 1352 + if (ap->scsi_host) { 1353 + unsigned int len = 0; 1354 + 1354 1355 for (i = 0; i < ATA_MAX_DEVICES; i++) 1355 - ap->host->max_cmd_len = max_t(unsigned int, 1356 - ap->host->max_cmd_len, 1357 - ap->device[i].cdb_len); 1356 + len = max(len, ap->device[i].cdb_len); 1357 + 1358 + ap->scsi_host->max_cmd_len = len; 1358 1359 } 1359 1360 } 1360 1361 ··· 1663 1662 * Modify @ap data structure such that the system 1664 1663 * thinks that the entire port is enabled. 1665 1664 * 1666 - * LOCKING: host_set lock, or some other form of 1665 + * LOCKING: host lock, or some other form of 1667 1666 * serialization. 1668 1667 */ 1669 1668 ··· 1801 1800 * never attempt to probe or communicate with devices 1802 1801 * on this port. 1803 1802 * 1804 - * LOCKING: host_set lock, or some other form of 1803 + * LOCKING: host lock, or some other form of 1805 1804 * serialization. 1806 1805 */ 1807 1806 ··· 2259 2258 /* Record simplex status. If we selected DMA then the other 2260 2259 * host channels are not permitted to do so. 2261 2260 */ 2262 - if (used_dma && (ap->host_set->flags & ATA_HOST_SIMPLEX)) 2263 - ap->host_set->simplex_claimed = 1; 2261 + if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX)) 2262 + ap->host->simplex_claimed = 1; 2264 2263 2265 2264 /* step5: chip specific finalisation */ 2266 2265 if (ap->ops->post_set_mode) ··· 2282 2281 * other threads. 2283 2282 * 2284 2283 * LOCKING: 2285 - * spin_lock_irqsave(host_set lock) 2284 + * spin_lock_irqsave(host lock) 2286 2285 */ 2287 2286 2288 2287 static inline void ata_tf_to_host(struct ata_port *ap, ··· 2446 2445 * 2447 2446 * LOCKING: 2448 2447 * PCI/etc. bus probe sem. 2449 - * Obtains host_set lock. 2448 + * Obtains host lock. 2450 2449 * 2451 2450 * SIDE EFFECTS: 2452 2451 * Sets ATA_FLAG_DISABLED if bus reset fails. ··· 3081 3080 static void ata_dev_xfermask(struct ata_device *dev) 3082 3081 { 3083 3082 struct ata_port *ap = dev->ap; 3084 - struct ata_host_set *hs = ap->host_set; 3083 + struct ata_host *host = ap->host; 3085 3084 unsigned long xfer_mask; 3086 3085 3087 3086 /* controller modes available */ ··· 3115 3114 "device is on DMA blacklist, disabling DMA\n"); 3116 3115 } 3117 3116 3118 - if ((hs->flags & ATA_HOST_SIMPLEX) && hs->simplex_claimed) { 3117 + if ((host->flags & ATA_HOST_SIMPLEX) && host->simplex_claimed) { 3119 3118 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA); 3120 3119 ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by " 3121 3120 "other device, disabling DMA\n"); ··· 3208 3207 * Unmap all mapped DMA memory associated with this command. 3209 3208 * 3210 3209 * LOCKING: 3211 - * spin_lock_irqsave(host_set lock) 3210 + * spin_lock_irqsave(host lock) 3212 3211 */ 3213 3212 3214 3213 static void ata_sg_clean(struct ata_queued_cmd *qc) ··· 3268 3267 * associated with the current disk command. 3269 3268 * 3270 3269 * LOCKING: 3271 - * spin_lock_irqsave(host_set lock) 3270 + * spin_lock_irqsave(host lock) 3272 3271 * 3273 3272 */ 3274 3273 static void ata_fill_sg(struct ata_queued_cmd *qc) ··· 3320 3319 * supplied PACKET command. 3321 3320 * 3322 3321 * LOCKING: 3323 - * spin_lock_irqsave(host_set lock) 3322 + * spin_lock_irqsave(host lock) 3324 3323 * 3325 3324 * RETURNS: 0 when ATAPI DMA can be used 3326 3325 * nonzero otherwise ··· 3342 3341 * Prepare ATA taskfile for submission. 3343 3342 * 3344 3343 * LOCKING: 3345 - * spin_lock_irqsave(host_set lock) 3344 + * spin_lock_irqsave(host lock) 3346 3345 */ 3347 3346 void ata_qc_prep(struct ata_queued_cmd *qc) 3348 3347 { ··· 3364 3363 * to point to a single memory buffer, @buf of byte length @buflen. 3365 3364 * 3366 3365 * LOCKING: 3367 - * spin_lock_irqsave(host_set lock) 3366 + * spin_lock_irqsave(host lock) 3368 3367 */ 3369 3368 3370 3369 void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen) ··· 3395 3394 * elements. 3396 3395 * 3397 3396 * LOCKING: 3398 - * spin_lock_irqsave(host_set lock) 3397 + * spin_lock_irqsave(host lock) 3399 3398 */ 3400 3399 3401 3400 void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg, ··· 3414 3413 * DMA-map the memory buffer associated with queued_cmd @qc. 3415 3414 * 3416 3415 * LOCKING: 3417 - * spin_lock_irqsave(host_set lock) 3416 + * spin_lock_irqsave(host lock) 3418 3417 * 3419 3418 * RETURNS: 3420 3419 * Zero on success, negative on error. ··· 3483 3482 * DMA-map the scatter-gather table associated with queued_cmd @qc. 3484 3483 * 3485 3484 * LOCKING: 3486 - * spin_lock_irqsave(host_set lock) 3485 + * spin_lock_irqsave(host lock) 3487 3486 * 3488 3487 * RETURNS: 3489 3488 * Zero on success, negative on error. ··· 3992 3991 * Finish @qc which is running on standard HSM. 3993 3992 * 3994 3993 * LOCKING: 3995 - * If @in_wq is zero, spin_lock_irqsave(host_set lock). 3994 + * If @in_wq is zero, spin_lock_irqsave(host lock). 3996 3995 * Otherwise, none on entry and grabs host lock. 3997 3996 */ 3998 3997 static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq) ··· 4004 4003 if (in_wq) { 4005 4004 spin_lock_irqsave(ap->lock, flags); 4006 4005 4007 - /* EH might have kicked in while host_set lock 4008 - * is released. 4006 + /* EH might have kicked in while host lock is 4007 + * released. 4009 4008 */ 4010 4009 qc = ata_qc_from_tag(ap, qc->tag); 4011 4010 if (qc) { ··· 4370 4369 * in case something prevents using it. 4371 4370 * 4372 4371 * LOCKING: 4373 - * spin_lock_irqsave(host_set lock) 4372 + * spin_lock_irqsave(host lock) 4374 4373 */ 4375 4374 void ata_qc_free(struct ata_queued_cmd *qc) 4376 4375 { ··· 4423 4422 * command has completed, with either an ok or not-ok status. 4424 4423 * 4425 4424 * LOCKING: 4426 - * spin_lock_irqsave(host_set lock) 4425 + * spin_lock_irqsave(host lock) 4427 4426 */ 4428 4427 void ata_qc_complete(struct ata_queued_cmd *qc) 4429 4428 { ··· 4486 4485 * and commands are completed accordingly. 4487 4486 * 4488 4487 * LOCKING: 4489 - * spin_lock_irqsave(host_set lock) 4488 + * spin_lock_irqsave(host lock) 4490 4489 * 4491 4490 * RETURNS: 4492 4491 * Number of completed commands on success, -errno otherwise. ··· 4557 4556 * writing the taskfile to hardware, starting the command. 4558 4557 * 4559 4558 * LOCKING: 4560 - * spin_lock_irqsave(host_set lock) 4559 + * spin_lock_irqsave(host lock) 4561 4560 */ 4562 4561 void ata_qc_issue(struct ata_queued_cmd *qc) 4563 4562 { ··· 4618 4617 * May be used as the qc_issue() entry in ata_port_operations. 4619 4618 * 4620 4619 * LOCKING: 4621 - * spin_lock_irqsave(host_set lock) 4620 + * spin_lock_irqsave(host lock) 4622 4621 * 4623 4622 * RETURNS: 4624 4623 * Zero on success, AC_ERR_* mask on failure ··· 4747 4746 * handled via polling with interrupts disabled (nIEN bit). 4748 4747 * 4749 4748 * LOCKING: 4750 - * spin_lock_irqsave(host_set lock) 4749 + * spin_lock_irqsave(host lock) 4751 4750 * 4752 4751 * RETURNS: 4753 4752 * One if interrupt was handled, zero if not (shared irq). ··· 4834 4833 /** 4835 4834 * ata_interrupt - Default ATA host interrupt handler 4836 4835 * @irq: irq line (unused) 4837 - * @dev_instance: pointer to our ata_host_set information structure 4836 + * @dev_instance: pointer to our ata_host information structure 4838 4837 * @regs: unused 4839 4838 * 4840 4839 * Default interrupt handler for PCI IDE devices. Calls 4841 4840 * ata_host_intr() for each port that is not disabled. 4842 4841 * 4843 4842 * LOCKING: 4844 - * Obtains host_set lock during operation. 4843 + * Obtains host lock during operation. 4845 4844 * 4846 4845 * RETURNS: 4847 4846 * IRQ_NONE or IRQ_HANDLED. ··· 4849 4848 4850 4849 irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs) 4851 4850 { 4852 - struct ata_host_set *host_set = dev_instance; 4851 + struct ata_host *host = dev_instance; 4853 4852 unsigned int i; 4854 4853 unsigned int handled = 0; 4855 4854 unsigned long flags; 4856 4855 4857 4856 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */ 4858 - spin_lock_irqsave(&host_set->lock, flags); 4857 + spin_lock_irqsave(&host->lock, flags); 4859 4858 4860 - for (i = 0; i < host_set->n_ports; i++) { 4859 + for (i = 0; i < host->n_ports; i++) { 4861 4860 struct ata_port *ap; 4862 4861 4863 - ap = host_set->ports[i]; 4862 + ap = host->ports[i]; 4864 4863 if (ap && 4865 4864 !(ap->flags & ATA_FLAG_DISABLED)) { 4866 4865 struct ata_queued_cmd *qc; ··· 4872 4871 } 4873 4872 } 4874 4873 4875 - spin_unlock_irqrestore(&host_set->lock, flags); 4874 + spin_unlock_irqrestore(&host->lock, flags); 4876 4875 4877 4876 return IRQ_RETVAL(handled); 4878 4877 } ··· 5037 5036 return 0; 5038 5037 } 5039 5038 5040 - static int ata_host_set_request_pm(struct ata_host_set *host_set, 5041 - pm_message_t mesg, unsigned int action, 5042 - unsigned int ehi_flags, int wait) 5039 + static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg, 5040 + unsigned int action, unsigned int ehi_flags, 5041 + int wait) 5043 5042 { 5044 5043 unsigned long flags; 5045 5044 int i, rc; 5046 5045 5047 - for (i = 0; i < host_set->n_ports; i++) { 5048 - struct ata_port *ap = host_set->ports[i]; 5046 + for (i = 0; i < host->n_ports; i++) { 5047 + struct ata_port *ap = host->ports[i]; 5049 5048 5050 5049 /* Previous resume operation might still be in 5051 5050 * progress. Wait for PM_PENDING to clear. ··· 5085 5084 } 5086 5085 5087 5086 /** 5088 - * ata_host_set_suspend - suspend host_set 5089 - * @host_set: host_set to suspend 5087 + * ata_host_suspend - suspend host 5088 + * @host: host to suspend 5090 5089 * @mesg: PM message 5091 5090 * 5092 - * Suspend @host_set. Actual operation is performed by EH. This 5091 + * Suspend @host. Actual operation is performed by EH. This 5093 5092 * function requests EH to perform PM operations and waits for EH 5094 5093 * to finish. 5095 5094 * ··· 5099 5098 * RETURNS: 5100 5099 * 0 on success, -errno on failure. 5101 5100 */ 5102 - int ata_host_set_suspend(struct ata_host_set *host_set, pm_message_t mesg) 5101 + int ata_host_suspend(struct ata_host *host, pm_message_t mesg) 5103 5102 { 5104 5103 int i, j, rc; 5105 5104 5106 - rc = ata_host_set_request_pm(host_set, mesg, 0, ATA_EHI_QUIET, 1); 5105 + rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1); 5107 5106 if (rc) 5108 5107 goto fail; 5109 5108 ··· 5111 5110 * This happens if hotplug occurs between completion of device 5112 5111 * suspension and here. 5113 5112 */ 5114 - for (i = 0; i < host_set->n_ports; i++) { 5115 - struct ata_port *ap = host_set->ports[i]; 5113 + for (i = 0; i < host->n_ports; i++) { 5114 + struct ata_port *ap = host->ports[i]; 5116 5115 5117 5116 for (j = 0; j < ATA_MAX_DEVICES; j++) { 5118 5117 struct ata_device *dev = &ap->device[j]; ··· 5127 5126 } 5128 5127 } 5129 5128 5130 - host_set->dev->power.power_state = mesg; 5129 + host->dev->power.power_state = mesg; 5131 5130 return 0; 5132 5131 5133 5132 fail: 5134 - ata_host_set_resume(host_set); 5133 + ata_host_resume(host); 5135 5134 return rc; 5136 5135 } 5137 5136 5138 5137 /** 5139 - * ata_host_set_resume - resume host_set 5140 - * @host_set: host_set to resume 5138 + * ata_host_resume - resume host 5139 + * @host: host to resume 5141 5140 * 5142 - * Resume @host_set. Actual operation is performed by EH. This 5141 + * Resume @host. Actual operation is performed by EH. This 5143 5142 * function requests EH to perform PM operations and returns. 5144 5143 * Note that all resume operations are performed parallely. 5145 5144 * 5146 5145 * LOCKING: 5147 5146 * Kernel thread context (may sleep). 5148 5147 */ 5149 - void ata_host_set_resume(struct ata_host_set *host_set) 5148 + void ata_host_resume(struct ata_host *host) 5150 5149 { 5151 - ata_host_set_request_pm(host_set, PMSG_ON, ATA_EH_SOFTRESET, 5152 - ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0); 5153 - host_set->dev->power.power_state = PMSG_ON; 5150 + ata_host_request_pm(host, PMSG_ON, ATA_EH_SOFTRESET, 5151 + ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0); 5152 + host->dev->power.power_state = PMSG_ON; 5154 5153 } 5155 5154 5156 5155 /** ··· 5207 5206 ata_pad_free(ap, dev); 5208 5207 } 5209 5208 5210 - void ata_host_stop (struct ata_host_set *host_set) 5209 + void ata_host_stop (struct ata_host *host) 5211 5210 { 5212 - if (host_set->mmio_base) 5213 - iounmap(host_set->mmio_base); 5211 + if (host->mmio_base) 5212 + iounmap(host->mmio_base); 5214 5213 } 5215 5214 5216 5215 /** ··· 5232 5231 5233 5232 /* High bits of dev->flags are used to record warm plug 5234 5233 * requests which occur asynchronously. Synchronize using 5235 - * host_set lock. 5234 + * host lock. 5236 5235 */ 5237 5236 spin_lock_irqsave(ap->lock, flags); 5238 5237 dev->flags &= ~ATA_DFLAG_INIT_MASK; ··· 5248 5247 /** 5249 5248 * ata_port_init - Initialize an ata_port structure 5250 5249 * @ap: Structure to initialize 5251 - * @host_set: Collection of hosts to which @ap belongs 5250 + * @host: Collection of hosts to which @ap belongs 5252 5251 * @ent: Probe information provided by low-level driver 5253 5252 * @port_no: Port number associated with this ata_port 5254 5253 * ··· 5257 5256 * LOCKING: 5258 5257 * Inherited from caller. 5259 5258 */ 5260 - void ata_port_init(struct ata_port *ap, struct ata_host_set *host_set, 5259 + void ata_port_init(struct ata_port *ap, struct ata_host *host, 5261 5260 const struct ata_probe_ent *ent, unsigned int port_no) 5262 5261 { 5263 5262 unsigned int i; 5264 5263 5265 - ap->lock = &host_set->lock; 5264 + ap->lock = &host->lock; 5266 5265 ap->flags = ATA_FLAG_DISABLED; 5267 5266 ap->id = ata_unique_id++; 5268 5267 ap->ctl = ATA_DEVCTL_OBS; 5269 - ap->host_set = host_set; 5268 + ap->host = host; 5270 5269 ap->dev = ent->dev; 5271 5270 ap->port_no = port_no; 5272 5271 ap->pio_mask = ent->pio_mask; 5273 5272 ap->mwdma_mask = ent->mwdma_mask; 5274 5273 ap->udma_mask = ent->udma_mask; 5275 - ap->flags |= ent->host_flags; 5274 + ap->flags |= ent->port_flags; 5276 5275 ap->ops = ent->port_ops; 5277 5276 ap->hw_sata_spd_limit = UINT_MAX; 5278 5277 ap->active_tag = ATA_TAG_POISON; ··· 5325 5324 */ 5326 5325 static void ata_port_init_shost(struct ata_port *ap, struct Scsi_Host *shost) 5327 5326 { 5328 - ap->host = shost; 5327 + ap->scsi_host = shost; 5329 5328 5330 5329 shost->unique_id = ap->id; 5331 5330 shost->max_id = 16; ··· 5337 5336 /** 5338 5337 * ata_port_add - Attach low-level ATA driver to system 5339 5338 * @ent: Information provided by low-level driver 5340 - * @host_set: Collections of ports to which we add 5339 + * @host: Collections of ports to which we add 5341 5340 * @port_no: Port number associated with this host 5342 5341 * 5343 5342 * Attach low-level ATA driver to system. ··· 5349 5348 * New ata_port on success, for NULL on error. 5350 5349 */ 5351 5350 static struct ata_port * ata_port_add(const struct ata_probe_ent *ent, 5352 - struct ata_host_set *host_set, 5351 + struct ata_host *host, 5353 5352 unsigned int port_no) 5354 5353 { 5355 5354 struct Scsi_Host *shost; ··· 5358 5357 DPRINTK("ENTER\n"); 5359 5358 5360 5359 if (!ent->port_ops->error_handler && 5361 - !(ent->host_flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST))) { 5360 + !(ent->port_flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST))) { 5362 5361 printk(KERN_ERR "ata%u: no reset mechanism available\n", 5363 5362 port_no); 5364 5363 return NULL; ··· 5372 5371 5373 5372 ap = ata_shost_to_port(shost); 5374 5373 5375 - ata_port_init(ap, host_set, ent, port_no); 5374 + ata_port_init(ap, host, ent, port_no); 5376 5375 ata_port_init_shost(ap, shost); 5377 5376 5378 5377 return ap; 5379 5378 } 5380 5379 5381 5380 /** 5382 - * ata_sas_host_init - Initialize a host_set struct 5383 - * @host_set: host_set to initialize 5384 - * @dev: device host_set is attached to 5385 - * @flags: host_set flags 5386 - * @ops: port_ops 5381 + * ata_sas_host_init - Initialize a host struct 5382 + * @host: host to initialize 5383 + * @dev: device host is attached to 5384 + * @flags: host flags 5385 + * @ops: port_ops 5387 5386 * 5388 5387 * LOCKING: 5389 5388 * PCI/etc. bus probe sem. 5390 5389 * 5391 5390 */ 5392 5391 5393 - void ata_host_set_init(struct ata_host_set *host_set, 5394 - struct device *dev, unsigned long flags, 5395 - const struct ata_port_operations *ops) 5392 + void ata_host_init(struct ata_host *host, struct device *dev, 5393 + unsigned long flags, const struct ata_port_operations *ops) 5396 5394 { 5397 - spin_lock_init(&host_set->lock); 5398 - host_set->dev = dev; 5399 - host_set->flags = flags; 5400 - host_set->ops = ops; 5395 + spin_lock_init(&host->lock); 5396 + host->dev = dev; 5397 + host->flags = flags; 5398 + host->ops = ops; 5401 5399 } 5402 5400 5403 5401 /** ··· 5421 5421 { 5422 5422 unsigned int i; 5423 5423 struct device *dev = ent->dev; 5424 - struct ata_host_set *host_set; 5424 + struct ata_host *host; 5425 5425 int rc; 5426 5426 5427 5427 DPRINTK("ENTER\n"); 5428 5428 /* alloc a container for our list of ATA ports (buses) */ 5429 - host_set = kzalloc(sizeof(struct ata_host_set) + 5430 - (ent->n_ports * sizeof(void *)), GFP_KERNEL); 5431 - if (!host_set) 5429 + host = kzalloc(sizeof(struct ata_host) + 5430 + (ent->n_ports * sizeof(void *)), GFP_KERNEL); 5431 + if (!host) 5432 5432 return 0; 5433 5433 5434 - ata_host_set_init(host_set, dev, ent->host_set_flags, ent->port_ops); 5435 - host_set->n_ports = ent->n_ports; 5436 - host_set->irq = ent->irq; 5437 - host_set->irq2 = ent->irq2; 5438 - host_set->mmio_base = ent->mmio_base; 5439 - host_set->private_data = ent->private_data; 5434 + ata_host_init(host, dev, ent->_host_flags, ent->port_ops); 5435 + host->n_ports = ent->n_ports; 5436 + host->irq = ent->irq; 5437 + host->irq2 = ent->irq2; 5438 + host->mmio_base = ent->mmio_base; 5439 + host->private_data = ent->private_data; 5440 5440 5441 5441 /* register each port bound to this device */ 5442 - for (i = 0; i < host_set->n_ports; i++) { 5442 + for (i = 0; i < host->n_ports; i++) { 5443 5443 struct ata_port *ap; 5444 5444 unsigned long xfer_mode_mask; 5445 5445 int irq_line = ent->irq; 5446 5446 5447 - ap = ata_port_add(ent, host_set, i); 5447 + ap = ata_port_add(ent, host, i); 5448 5448 if (!ap) 5449 5449 goto err_out; 5450 5450 5451 - host_set->ports[i] = ap; 5451 + host->ports[i] = ap; 5452 5452 5453 5453 /* dummy? */ 5454 5454 if (ent->dummy_port_mask & (1 << i)) { ··· 5460 5460 /* start port */ 5461 5461 rc = ap->ops->port_start(ap); 5462 5462 if (rc) { 5463 - host_set->ports[i] = NULL; 5464 - scsi_host_put(ap->host); 5463 + host->ports[i] = NULL; 5464 + scsi_host_put(ap->scsi_host); 5465 5465 goto err_out; 5466 5466 } 5467 5467 ··· 5484 5484 irq_line); 5485 5485 5486 5486 ata_chk_status(ap); 5487 - host_set->ops->irq_clear(ap); 5487 + host->ops->irq_clear(ap); 5488 5488 ata_eh_freeze_port(ap); /* freeze port before requesting IRQ */ 5489 5489 } 5490 5490 5491 5491 /* obtain irq, that may be shared between channels */ 5492 5492 rc = request_irq(ent->irq, ent->port_ops->irq_handler, ent->irq_flags, 5493 - DRV_NAME, host_set); 5493 + DRV_NAME, host); 5494 5494 if (rc) { 5495 5495 dev_printk(KERN_ERR, dev, "irq %lu request failed: %d\n", 5496 5496 ent->irq, rc); ··· 5504 5504 BUG_ON(ent->irq == ent->irq2); 5505 5505 5506 5506 rc = request_irq(ent->irq2, ent->port_ops->irq_handler, ent->irq_flags, 5507 - DRV_NAME, host_set); 5507 + DRV_NAME, host); 5508 5508 if (rc) { 5509 5509 dev_printk(KERN_ERR, dev, "irq %lu request failed: %d\n", 5510 5510 ent->irq2, rc); ··· 5514 5514 5515 5515 /* perform each probe synchronously */ 5516 5516 DPRINTK("probe begin\n"); 5517 - for (i = 0; i < host_set->n_ports; i++) { 5518 - struct ata_port *ap = host_set->ports[i]; 5517 + for (i = 0; i < host->n_ports; i++) { 5518 + struct ata_port *ap = host->ports[i]; 5519 5519 u32 scontrol; 5520 5520 int rc; 5521 5521 ··· 5526 5526 } 5527 5527 ap->sata_spd_limit = ap->hw_sata_spd_limit; 5528 5528 5529 - rc = scsi_add_host(ap->host, dev); 5529 + rc = scsi_add_host(ap->scsi_host, dev); 5530 5530 if (rc) { 5531 5531 ata_port_printk(ap, KERN_ERR, "scsi_add_host failed\n"); 5532 5532 /* FIXME: do something useful here */ ··· 5574 5574 5575 5575 /* probes are done, now scan each port's disk(s) */ 5576 5576 DPRINTK("host probe begin\n"); 5577 - for (i = 0; i < host_set->n_ports; i++) { 5578 - struct ata_port *ap = host_set->ports[i]; 5577 + for (i = 0; i < host->n_ports; i++) { 5578 + struct ata_port *ap = host->ports[i]; 5579 5579 5580 5580 ata_scsi_scan_host(ap); 5581 5581 } 5582 5582 5583 - dev_set_drvdata(dev, host_set); 5583 + dev_set_drvdata(dev, host); 5584 5584 5585 5585 VPRINTK("EXIT, returning %u\n", ent->n_ports); 5586 5586 return ent->n_ports; /* success */ 5587 5587 5588 5588 err_out_free_irq: 5589 - free_irq(ent->irq, host_set); 5589 + free_irq(ent->irq, host); 5590 5590 err_out: 5591 - for (i = 0; i < host_set->n_ports; i++) { 5592 - struct ata_port *ap = host_set->ports[i]; 5591 + for (i = 0; i < host->n_ports; i++) { 5592 + struct ata_port *ap = host->ports[i]; 5593 5593 if (ap) { 5594 5594 ap->ops->port_stop(ap); 5595 - scsi_host_put(ap->host); 5595 + scsi_host_put(ap->scsi_host); 5596 5596 } 5597 5597 } 5598 5598 5599 - kfree(host_set); 5599 + kfree(host); 5600 5600 VPRINTK("EXIT, returning 0\n"); 5601 5601 return 0; 5602 5602 } ··· 5656 5656 5657 5657 skip_eh: 5658 5658 /* remove the associated SCSI host */ 5659 - scsi_remove_host(ap->host); 5659 + scsi_remove_host(ap->scsi_host); 5660 5660 } 5661 5661 5662 5662 /** 5663 - * ata_host_set_remove - PCI layer callback for device removal 5664 - * @host_set: ATA host set that was removed 5663 + * ata_host_remove - PCI layer callback for device removal 5664 + * @host: ATA host set that was removed 5665 5665 * 5666 5666 * Unregister all objects associated with this host set. Free those 5667 5667 * objects. ··· 5670 5670 * Inherited from calling layer (may sleep). 5671 5671 */ 5672 5672 5673 - void ata_host_set_remove(struct ata_host_set *host_set) 5673 + void ata_host_remove(struct ata_host *host) 5674 5674 { 5675 5675 unsigned int i; 5676 5676 5677 - for (i = 0; i < host_set->n_ports; i++) 5678 - ata_port_detach(host_set->ports[i]); 5677 + for (i = 0; i < host->n_ports; i++) 5678 + ata_port_detach(host->ports[i]); 5679 5679 5680 - free_irq(host_set->irq, host_set); 5681 - if (host_set->irq2) 5682 - free_irq(host_set->irq2, host_set); 5680 + free_irq(host->irq, host); 5681 + if (host->irq2) 5682 + free_irq(host->irq2, host); 5683 5683 5684 - for (i = 0; i < host_set->n_ports; i++) { 5685 - struct ata_port *ap = host_set->ports[i]; 5684 + for (i = 0; i < host->n_ports; i++) { 5685 + struct ata_port *ap = host->ports[i]; 5686 5686 5687 - ata_scsi_release(ap->host); 5687 + ata_scsi_release(ap->scsi_host); 5688 5688 5689 5689 if ((ap->flags & ATA_FLAG_NO_LEGACY) == 0) { 5690 5690 struct ata_ioports *ioaddr = &ap->ioaddr; ··· 5696 5696 release_region(ATA_SECONDARY_CMD, 8); 5697 5697 } 5698 5698 5699 - scsi_host_put(ap->host); 5699 + scsi_host_put(ap->scsi_host); 5700 5700 } 5701 5701 5702 - if (host_set->ops->host_stop) 5703 - host_set->ops->host_stop(host_set); 5702 + if (host->ops->host_stop) 5703 + host->ops->host_stop(host); 5704 5704 5705 - kfree(host_set); 5705 + kfree(host); 5706 5706 } 5707 5707 5708 5708 /** ··· 5719 5719 * One. 5720 5720 */ 5721 5721 5722 - int ata_scsi_release(struct Scsi_Host *host) 5722 + int ata_scsi_release(struct Scsi_Host *shost) 5723 5723 { 5724 - struct ata_port *ap = ata_shost_to_port(host); 5724 + struct ata_port *ap = ata_shost_to_port(shost); 5725 5725 5726 5726 DPRINTK("ENTER\n"); 5727 5727 ··· 5748 5748 probe_ent->dev = dev; 5749 5749 5750 5750 probe_ent->sht = port->sht; 5751 - probe_ent->host_flags = port->host_flags; 5751 + probe_ent->port_flags = port->flags; 5752 5752 probe_ent->pio_mask = port->pio_mask; 5753 5753 probe_ent->mwdma_mask = port->mwdma_mask; 5754 5754 probe_ent->udma_mask = port->udma_mask; ··· 5786 5786 5787 5787 #ifdef CONFIG_PCI 5788 5788 5789 - void ata_pci_host_stop (struct ata_host_set *host_set) 5789 + void ata_pci_host_stop (struct ata_host *host) 5790 5790 { 5791 - struct pci_dev *pdev = to_pci_dev(host_set->dev); 5791 + struct pci_dev *pdev = to_pci_dev(host->dev); 5792 5792 5793 - pci_iounmap(pdev, host_set->mmio_base); 5793 + pci_iounmap(pdev, host->mmio_base); 5794 5794 } 5795 5795 5796 5796 /** ··· 5810 5810 void ata_pci_remove_one (struct pci_dev *pdev) 5811 5811 { 5812 5812 struct device *dev = pci_dev_to_dev(pdev); 5813 - struct ata_host_set *host_set = dev_get_drvdata(dev); 5813 + struct ata_host *host = dev_get_drvdata(dev); 5814 5814 5815 - ata_host_set_remove(host_set); 5815 + ata_host_remove(host); 5816 5816 5817 5817 pci_release_regions(pdev); 5818 5818 pci_disable_device(pdev); ··· 5873 5873 5874 5874 int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg) 5875 5875 { 5876 - struct ata_host_set *host_set = dev_get_drvdata(&pdev->dev); 5876 + struct ata_host *host = dev_get_drvdata(&pdev->dev); 5877 5877 int rc = 0; 5878 5878 5879 - rc = ata_host_set_suspend(host_set, mesg); 5879 + rc = ata_host_suspend(host, mesg); 5880 5880 if (rc) 5881 5881 return rc; 5882 5882 ··· 5887 5887 5888 5888 int ata_pci_device_resume(struct pci_dev *pdev) 5889 5889 { 5890 - struct ata_host_set *host_set = dev_get_drvdata(&pdev->dev); 5890 + struct ata_host *host = dev_get_drvdata(&pdev->dev); 5891 5891 5892 5892 ata_pci_device_do_resume(pdev); 5893 - ata_host_set_resume(host_set); 5893 + ata_host_resume(host); 5894 5894 return 0; 5895 5895 } 5896 5896 #endif /* CONFIG_PCI */ ··· 6035 6035 EXPORT_SYMBOL_GPL(ata_dummy_port_ops); 6036 6036 EXPORT_SYMBOL_GPL(ata_std_bios_param); 6037 6037 EXPORT_SYMBOL_GPL(ata_std_ports); 6038 - EXPORT_SYMBOL_GPL(ata_host_set_init); 6038 + EXPORT_SYMBOL_GPL(ata_host_init); 6039 6039 EXPORT_SYMBOL_GPL(ata_device_add); 6040 6040 EXPORT_SYMBOL_GPL(ata_port_detach); 6041 - EXPORT_SYMBOL_GPL(ata_host_set_remove); 6041 + EXPORT_SYMBOL_GPL(ata_host_remove); 6042 6042 EXPORT_SYMBOL_GPL(ata_sg_init); 6043 6043 EXPORT_SYMBOL_GPL(ata_sg_init_one); 6044 6044 EXPORT_SYMBOL_GPL(ata_hsm_move); ··· 6105 6105 EXPORT_SYMBOL_GPL(sata_scr_write_flush); 6106 6106 EXPORT_SYMBOL_GPL(ata_port_online); 6107 6107 EXPORT_SYMBOL_GPL(ata_port_offline); 6108 - EXPORT_SYMBOL_GPL(ata_host_set_suspend); 6109 - EXPORT_SYMBOL_GPL(ata_host_set_resume); 6108 + EXPORT_SYMBOL_GPL(ata_host_suspend); 6109 + EXPORT_SYMBOL_GPL(ata_host_resume); 6110 6110 EXPORT_SYMBOL_GPL(ata_id_string); 6111 6111 EXPORT_SYMBOL_GPL(ata_id_c_string); 6112 6112 EXPORT_SYMBOL_GPL(ata_scsi_simulate);
+8 -8
drivers/ata/libata-eh.c
··· 200 200 /* synchronize with port task */ 201 201 ata_port_flush_task(ap); 202 202 203 - /* synchronize with host_set lock and sort out timeouts */ 203 + /* synchronize with host lock and sort out timeouts */ 204 204 205 205 /* For new EH, all qcs are finished in one of three ways - 206 206 * normal completion, error completion, and SCSI timeout. ··· 377 377 spin_unlock_irqrestore(ap->lock, flags); 378 378 379 379 /* make sure SCSI EH is complete */ 380 - if (scsi_host_in_recovery(ap->host)) { 380 + if (scsi_host_in_recovery(ap->scsi_host)) { 381 381 msleep(10); 382 382 goto retry; 383 383 } ··· 486 486 * other commands are drained. 487 487 * 488 488 * LOCKING: 489 - * spin_lock_irqsave(host_set lock) 489 + * spin_lock_irqsave(host lock) 490 490 */ 491 491 void ata_qc_schedule_eh(struct ata_queued_cmd *qc) 492 492 { ··· 513 513 * all commands are drained. 514 514 * 515 515 * LOCKING: 516 - * spin_lock_irqsave(host_set lock) 516 + * spin_lock_irqsave(host lock) 517 517 */ 518 518 void ata_port_schedule_eh(struct ata_port *ap) 519 519 { 520 520 WARN_ON(!ap->ops->error_handler); 521 521 522 522 ap->pflags |= ATA_PFLAG_EH_PENDING; 523 - scsi_schedule_eh(ap->host); 523 + scsi_schedule_eh(ap->scsi_host); 524 524 525 525 DPRINTK("port EH scheduled\n"); 526 526 } ··· 532 532 * Abort all active qc's of @ap and schedule EH. 533 533 * 534 534 * LOCKING: 535 - * spin_lock_irqsave(host_set lock) 535 + * spin_lock_irqsave(host lock) 536 536 * 537 537 * RETURNS: 538 538 * Number of aborted qc's. ··· 575 575 * is frozen. 576 576 * 577 577 * LOCKING: 578 - * spin_lock_irqsave(host_set lock) 578 + * spin_lock_irqsave(host lock) 579 579 */ 580 580 static void __ata_port_freeze(struct ata_port *ap) 581 581 { ··· 596 596 * Abort and freeze @ap. 597 597 * 598 598 * LOCKING: 599 - * spin_lock_irqsave(host_set lock) 599 + * spin_lock_irqsave(host lock) 600 600 * 601 601 * RETURNS: 602 602 * Number of aborted commands.
+39 -39
drivers/ata/libata-scsi.c
··· 321 321 * current command. 322 322 * 323 323 * LOCKING: 324 - * spin_lock_irqsave(host_set lock) 324 + * spin_lock_irqsave(host lock) 325 325 * 326 326 * RETURNS: 327 327 * Command allocated, or %NULL if none available. ··· 537 537 * format sense blocks. 538 538 * 539 539 * LOCKING: 540 - * spin_lock_irqsave(host_set lock) 540 + * spin_lock_irqsave(host lock) 541 541 */ 542 542 void ata_to_sense_error(unsigned id, u8 drv_stat, u8 drv_err, u8 *sk, u8 *asc, 543 543 u8 *ascq, int verbose) ··· 649 649 * block. Clear sense key, ASC & ASCQ if there is no error. 650 650 * 651 651 * LOCKING: 652 - * spin_lock_irqsave(host_set lock) 652 + * spin_lock_irqsave(host lock) 653 653 */ 654 654 void ata_gen_ata_desc_sense(struct ata_queued_cmd *qc) 655 655 { ··· 918 918 * [See SAT revision 5 at www.t10.org] 919 919 * 920 920 * LOCKING: 921 - * spin_lock_irqsave(host_set lock) 921 + * spin_lock_irqsave(host lock) 922 922 * 923 923 * RETURNS: 924 924 * Zero on success, non-zero on error. ··· 986 986 * FLUSH CACHE EXT. 987 987 * 988 988 * LOCKING: 989 - * spin_lock_irqsave(host_set lock) 989 + * spin_lock_irqsave(host lock) 990 990 * 991 991 * RETURNS: 992 992 * Zero on success, non-zero on error. ··· 1109 1109 * Converts SCSI VERIFY command to an ATA READ VERIFY command. 1110 1110 * 1111 1111 * LOCKING: 1112 - * spin_lock_irqsave(host_set lock) 1112 + * spin_lock_irqsave(host lock) 1113 1113 * 1114 1114 * RETURNS: 1115 1115 * Zero on success, non-zero on error. ··· 1233 1233 * %WRITE_16 are currently supported. 1234 1234 * 1235 1235 * LOCKING: 1236 - * spin_lock_irqsave(host_set lock) 1236 + * spin_lock_irqsave(host lock) 1237 1237 * 1238 1238 * RETURNS: 1239 1239 * Zero on success, non-zero on error. ··· 1467 1467 * issued to @dev. 1468 1468 * 1469 1469 * LOCKING: 1470 - * spin_lock_irqsave(host_set lock) 1470 + * spin_lock_irqsave(host lock) 1471 1471 * 1472 1472 * RETURNS: 1473 1473 * 1 if deferring is needed, 0 otherwise. ··· 1510 1510 * termination. 1511 1511 * 1512 1512 * LOCKING: 1513 - * spin_lock_irqsave(host_set lock) 1513 + * spin_lock_irqsave(host lock) 1514 1514 * 1515 1515 * RETURNS: 1516 1516 * 0 on success, SCSI_ML_QUEUE_DEVICE_BUSY if the command ··· 1589 1589 * Maps buffer contained within SCSI command @cmd. 1590 1590 * 1591 1591 * LOCKING: 1592 - * spin_lock_irqsave(host_set lock) 1592 + * spin_lock_irqsave(host lock) 1593 1593 * 1594 1594 * RETURNS: 1595 1595 * Length of response buffer. ··· 1623 1623 * Unmaps response buffer contained within @cmd. 1624 1624 * 1625 1625 * LOCKING: 1626 - * spin_lock_irqsave(host_set lock) 1626 + * spin_lock_irqsave(host lock) 1627 1627 */ 1628 1628 1629 1629 static inline void ata_scsi_rbuf_put(struct scsi_cmnd *cmd, u8 *buf) ··· 1649 1649 * and sense buffer are assumed to be set). 1650 1650 * 1651 1651 * LOCKING: 1652 - * spin_lock_irqsave(host_set lock) 1652 + * spin_lock_irqsave(host lock) 1653 1653 */ 1654 1654 1655 1655 void ata_scsi_rbuf_fill(struct ata_scsi_args *args, ··· 1680 1680 * with non-VPD INQUIRY command output. 1681 1681 * 1682 1682 * LOCKING: 1683 - * spin_lock_irqsave(host_set lock) 1683 + * spin_lock_irqsave(host lock) 1684 1684 */ 1685 1685 1686 1686 unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf, ··· 1736 1736 * Returns list of inquiry VPD pages available. 1737 1737 * 1738 1738 * LOCKING: 1739 - * spin_lock_irqsave(host_set lock) 1739 + * spin_lock_irqsave(host lock) 1740 1740 */ 1741 1741 1742 1742 unsigned int ata_scsiop_inq_00(struct ata_scsi_args *args, u8 *rbuf, ··· 1764 1764 * Returns ATA device serial number. 1765 1765 * 1766 1766 * LOCKING: 1767 - * spin_lock_irqsave(host_set lock) 1767 + * spin_lock_irqsave(host lock) 1768 1768 */ 1769 1769 1770 1770 unsigned int ata_scsiop_inq_80(struct ata_scsi_args *args, u8 *rbuf, ··· 1797 1797 * name ("ATA "), model and serial numbers. 1798 1798 * 1799 1799 * LOCKING: 1800 - * spin_lock_irqsave(host_set lock) 1800 + * spin_lock_irqsave(host lock) 1801 1801 */ 1802 1802 1803 1803 unsigned int ata_scsiop_inq_83(struct ata_scsi_args *args, u8 *rbuf, ··· 1849 1849 * that the caller should successfully complete this SCSI command. 1850 1850 * 1851 1851 * LOCKING: 1852 - * spin_lock_irqsave(host_set lock) 1852 + * spin_lock_irqsave(host lock) 1853 1853 */ 1854 1854 1855 1855 unsigned int ata_scsiop_noop(struct ata_scsi_args *args, u8 *rbuf, ··· 1990 1990 * descriptor for other device types. 1991 1991 * 1992 1992 * LOCKING: 1993 - * spin_lock_irqsave(host_set lock) 1993 + * spin_lock_irqsave(host lock) 1994 1994 */ 1995 1995 1996 1996 unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf, ··· 2129 2129 * Simulate READ CAPACITY commands. 2130 2130 * 2131 2131 * LOCKING: 2132 - * spin_lock_irqsave(host_set lock) 2132 + * spin_lock_irqsave(host lock) 2133 2133 */ 2134 2134 2135 2135 unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf, ··· 2204 2204 * Simulate REPORT LUNS command. 2205 2205 * 2206 2206 * LOCKING: 2207 - * spin_lock_irqsave(host_set lock) 2207 + * spin_lock_irqsave(host lock) 2208 2208 */ 2209 2209 2210 2210 unsigned int ata_scsiop_report_luns(struct ata_scsi_args *args, u8 *rbuf, ··· 2256 2256 * and the specified additional sense codes. 2257 2257 * 2258 2258 * LOCKING: 2259 - * spin_lock_irqsave(host_set lock) 2259 + * spin_lock_irqsave(host lock) 2260 2260 */ 2261 2261 2262 2262 void ata_scsi_badcmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *), u8 asc, u8 ascq) ··· 2421 2421 * @scsicmd: SCSI CDB associated with this PACKET command 2422 2422 * 2423 2423 * LOCKING: 2424 - * spin_lock_irqsave(host_set lock) 2424 + * spin_lock_irqsave(host lock) 2425 2425 * 2426 2426 * RETURNS: 2427 2427 * Zero on success, non-zero on failure. ··· 2500 2500 * Determine if commands should be sent to the specified device. 2501 2501 * 2502 2502 * LOCKING: 2503 - * spin_lock_irqsave(host_set lock) 2503 + * spin_lock_irqsave(host lock) 2504 2504 * 2505 2505 * RETURNS: 2506 2506 * 0 if commands are not allowed / 1 if commands are allowed ··· 2534 2534 * SCSI command to be sent. 2535 2535 * 2536 2536 * LOCKING: 2537 - * spin_lock_irqsave(host_set lock) 2537 + * spin_lock_irqsave(host lock) 2538 2538 * 2539 2539 * RETURNS: 2540 2540 * Associated ATA device, or %NULL if not found. ··· 2808 2808 * ATA and ATAPI devices appearing as SCSI devices. 2809 2809 * 2810 2810 * LOCKING: 2811 - * Releases scsi-layer-held lock, and obtains host_set lock. 2811 + * Releases scsi-layer-held lock, and obtains host lock. 2812 2812 * 2813 2813 * RETURNS: 2814 2814 * Return value from __ata_scsi_queuecmd() if @cmd can be queued, ··· 2852 2852 * that can be handled internally. 2853 2853 * 2854 2854 * LOCKING: 2855 - * spin_lock_irqsave(host_set lock) 2855 + * spin_lock_irqsave(host lock) 2856 2856 */ 2857 2857 2858 2858 void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd, ··· 2944 2944 if (!ata_dev_enabled(dev) || dev->sdev) 2945 2945 continue; 2946 2946 2947 - sdev = __scsi_add_device(ap->host, 0, i, 0, NULL); 2947 + sdev = __scsi_add_device(ap->scsi_host, 0, i, 0, NULL); 2948 2948 if (!IS_ERR(sdev)) { 2949 2949 dev->sdev = sdev; 2950 2950 scsi_device_put(sdev); ··· 2958 2958 * 2959 2959 * This function is called from ata_eh_hotplug() and responsible 2960 2960 * for taking the SCSI device attached to @dev offline. This 2961 - * function is called with host_set lock which protects dev->sdev 2961 + * function is called with host lock which protects dev->sdev 2962 2962 * against clearing. 2963 2963 * 2964 2964 * LOCKING: 2965 - * spin_lock_irqsave(host_set lock) 2965 + * spin_lock_irqsave(host lock) 2966 2966 * 2967 2967 * RETURNS: 2968 2968 * 1 if attached SCSI device exists, 0 otherwise. ··· 2998 2998 * be removed if there is __scsi_device_get() interface which 2999 2999 * increments reference counts regardless of device state. 3000 3000 */ 3001 - mutex_lock(&ap->host->scan_mutex); 3001 + mutex_lock(&ap->scsi_host->scan_mutex); 3002 3002 spin_lock_irqsave(ap->lock, flags); 3003 3003 3004 - /* clearing dev->sdev is protected by host_set lock */ 3004 + /* clearing dev->sdev is protected by host lock */ 3005 3005 sdev = dev->sdev; 3006 3006 dev->sdev = NULL; 3007 3007 3008 3008 if (sdev) { 3009 3009 /* If user initiated unplug races with us, sdev can go 3010 - * away underneath us after the host_set lock and 3010 + * away underneath us after the host lock and 3011 3011 * scan_mutex are released. Hold onto it. 3012 3012 */ 3013 3013 if (scsi_device_get(sdev) == 0) { ··· 3024 3024 } 3025 3025 3026 3026 spin_unlock_irqrestore(ap->lock, flags); 3027 - mutex_unlock(&ap->host->scan_mutex); 3027 + mutex_unlock(&ap->scsi_host->scan_mutex); 3028 3028 3029 3029 if (sdev) { 3030 3030 ata_dev_printk(dev, KERN_INFO, "detaching (SCSI %s)\n", ··· 3176 3176 * ata_sas_port_alloc - Allocate port for a SAS attached SATA device 3177 3177 * @pdev: PCI device that the scsi device is attached to 3178 3178 * @port_info: Information from low-level host driver 3179 - * @host: SCSI host that the scsi device is attached to 3179 + * @shost: SCSI host that the scsi device is attached to 3180 3180 * 3181 3181 * LOCKING: 3182 3182 * PCI/etc. bus probe sem. ··· 3185 3185 * ata_port pointer on success / NULL on failure. 3186 3186 */ 3187 3187 3188 - struct ata_port *ata_sas_port_alloc(struct ata_host_set *host_set, 3188 + struct ata_port *ata_sas_port_alloc(struct ata_host *host, 3189 3189 struct ata_port_info *port_info, 3190 - struct Scsi_Host *host) 3190 + struct Scsi_Host *shost) 3191 3191 { 3192 3192 struct ata_port *ap = kzalloc(sizeof(*ap), GFP_KERNEL); 3193 3193 struct ata_probe_ent *ent; ··· 3195 3195 if (!ap) 3196 3196 return NULL; 3197 3197 3198 - ent = ata_probe_ent_alloc(host_set->dev, port_info); 3198 + ent = ata_probe_ent_alloc(host->dev, port_info); 3199 3199 if (!ent) { 3200 3200 kfree(ap); 3201 3201 return NULL; 3202 3202 } 3203 3203 3204 - ata_port_init(ap, host_set, ent, 0); 3205 - ap->lock = host->host_lock; 3204 + ata_port_init(ap, host, ent, 0); 3205 + ap->lock = shost->host_lock; 3206 3206 kfree(ent); 3207 3207 return ap; 3208 3208 }
+1 -1
drivers/ata/libata.h
··· 69 69 extern void ata_dev_init(struct ata_device *dev); 70 70 extern int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg); 71 71 extern int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg); 72 - extern void ata_port_init(struct ata_port *ap, struct ata_host_set *host_set, 72 + extern void ata_port_init(struct ata_port *ap, struct ata_host *host, 73 73 const struct ata_probe_ent *ent, unsigned int port_no); 74 74 extern struct ata_probe_ent *ata_probe_ent_alloc(struct device *dev, 75 75 const struct ata_port_info *port);
+23 -23
drivers/ata/pdc_adma.c
··· 127 127 static irqreturn_t adma_intr (int irq, void *dev_instance, 128 128 struct pt_regs *regs); 129 129 static int adma_port_start(struct ata_port *ap); 130 - static void adma_host_stop(struct ata_host_set *host_set); 130 + static void adma_host_stop(struct ata_host *host); 131 131 static void adma_port_stop(struct ata_port *ap); 132 132 static void adma_phy_reset(struct ata_port *ap); 133 133 static void adma_qc_prep(struct ata_queued_cmd *qc); ··· 182 182 /* board_1841_idx */ 183 183 { 184 184 .sht = &adma_ata_sht, 185 - .host_flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST | 185 + .flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST | 186 186 ATA_FLAG_NO_LEGACY | ATA_FLAG_MMIO | 187 187 ATA_FLAG_PIO_POLLING, 188 188 .pio_mask = 0x10, /* pio4 */ ··· 237 237 static void adma_reinit_engine(struct ata_port *ap) 238 238 { 239 239 struct adma_port_priv *pp = ap->private_data; 240 - void __iomem *mmio_base = ap->host_set->mmio_base; 240 + void __iomem *mmio_base = ap->host->mmio_base; 241 241 void __iomem *chan = ADMA_REGS(mmio_base, ap->port_no); 242 242 243 243 /* mask/clear ATA interrupts */ ··· 265 265 266 266 static inline void adma_enter_reg_mode(struct ata_port *ap) 267 267 { 268 - void __iomem *chan = ADMA_REGS(ap->host_set->mmio_base, ap->port_no); 268 + void __iomem *chan = ADMA_REGS(ap->host->mmio_base, ap->port_no); 269 269 270 270 writew(aPIOMD4, chan + ADMA_CONTROL); 271 271 readb(chan + ADMA_STATUS); /* flush */ ··· 412 412 static inline void adma_packet_start(struct ata_queued_cmd *qc) 413 413 { 414 414 struct ata_port *ap = qc->ap; 415 - void __iomem *chan = ADMA_REGS(ap->host_set->mmio_base, ap->port_no); 415 + void __iomem *chan = ADMA_REGS(ap->host->mmio_base, ap->port_no); 416 416 417 417 VPRINTK("ENTER, ap %p\n", ap); 418 418 ··· 442 442 return ata_qc_issue_prot(qc); 443 443 } 444 444 445 - static inline unsigned int adma_intr_pkt(struct ata_host_set *host_set) 445 + static inline unsigned int adma_intr_pkt(struct ata_host *host) 446 446 { 447 447 unsigned int handled = 0, port_no; 448 - u8 __iomem *mmio_base = host_set->mmio_base; 448 + u8 __iomem *mmio_base = host->mmio_base; 449 449 450 - for (port_no = 0; port_no < host_set->n_ports; ++port_no) { 451 - struct ata_port *ap = host_set->ports[port_no]; 450 + for (port_no = 0; port_no < host->n_ports; ++port_no) { 451 + struct ata_port *ap = host->ports[port_no]; 452 452 struct adma_port_priv *pp; 453 453 struct ata_queued_cmd *qc; 454 454 void __iomem *chan = ADMA_REGS(mmio_base, port_no); ··· 476 476 return handled; 477 477 } 478 478 479 - static inline unsigned int adma_intr_mmio(struct ata_host_set *host_set) 479 + static inline unsigned int adma_intr_mmio(struct ata_host *host) 480 480 { 481 481 unsigned int handled = 0, port_no; 482 482 483 - for (port_no = 0; port_no < host_set->n_ports; ++port_no) { 483 + for (port_no = 0; port_no < host->n_ports; ++port_no) { 484 484 struct ata_port *ap; 485 - ap = host_set->ports[port_no]; 485 + ap = host->ports[port_no]; 486 486 if (ap && (!(ap->flags & ATA_FLAG_DISABLED))) { 487 487 struct ata_queued_cmd *qc; 488 488 struct adma_port_priv *pp = ap->private_data; ··· 511 511 512 512 static irqreturn_t adma_intr(int irq, void *dev_instance, struct pt_regs *regs) 513 513 { 514 - struct ata_host_set *host_set = dev_instance; 514 + struct ata_host *host = dev_instance; 515 515 unsigned int handled = 0; 516 516 517 517 VPRINTK("ENTER\n"); 518 518 519 - spin_lock(&host_set->lock); 520 - handled = adma_intr_pkt(host_set) | adma_intr_mmio(host_set); 521 - spin_unlock(&host_set->lock); 519 + spin_lock(&host->lock); 520 + handled = adma_intr_pkt(host) | adma_intr_mmio(host); 521 + spin_unlock(&host->lock); 522 522 523 523 VPRINTK("EXIT\n"); 524 524 ··· 544 544 545 545 static int adma_port_start(struct ata_port *ap) 546 546 { 547 - struct device *dev = ap->host_set->dev; 547 + struct device *dev = ap->host->dev; 548 548 struct adma_port_priv *pp; 549 549 int rc; 550 550 ··· 582 582 583 583 static void adma_port_stop(struct ata_port *ap) 584 584 { 585 - struct device *dev = ap->host_set->dev; 585 + struct device *dev = ap->host->dev; 586 586 struct adma_port_priv *pp = ap->private_data; 587 587 588 - adma_reset_engine(ADMA_REGS(ap->host_set->mmio_base, ap->port_no)); 588 + adma_reset_engine(ADMA_REGS(ap->host->mmio_base, ap->port_no)); 589 589 if (pp != NULL) { 590 590 ap->private_data = NULL; 591 591 if (pp->pkt != NULL) ··· 596 596 ata_port_stop(ap); 597 597 } 598 598 599 - static void adma_host_stop(struct ata_host_set *host_set) 599 + static void adma_host_stop(struct ata_host *host) 600 600 { 601 601 unsigned int port_no; 602 602 603 603 for (port_no = 0; port_no < ADMA_PORTS; ++port_no) 604 - adma_reset_engine(ADMA_REGS(host_set->mmio_base, port_no)); 604 + adma_reset_engine(ADMA_REGS(host->mmio_base, port_no)); 605 605 606 - ata_pci_host_stop(host_set); 606 + ata_pci_host_stop(host); 607 607 } 608 608 609 609 static void adma_host_init(unsigned int chip_id, ··· 684 684 INIT_LIST_HEAD(&probe_ent->node); 685 685 686 686 probe_ent->sht = adma_port_info[board_idx].sht; 687 - probe_ent->host_flags = adma_port_info[board_idx].host_flags; 687 + probe_ent->port_flags = adma_port_info[board_idx].flags; 688 688 probe_ent->pio_mask = adma_port_info[board_idx].pio_mask; 689 689 probe_ent->mwdma_mask = adma_port_info[board_idx].mwdma_mask; 690 690 probe_ent->udma_mask = adma_port_info[board_idx].udma_mask;
+48 -50
drivers/ata/sata_mv.c
··· 342 342 static void mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val); 343 343 static void mv_phy_reset(struct ata_port *ap); 344 344 static void __mv_phy_reset(struct ata_port *ap, int can_sleep); 345 - static void mv_host_stop(struct ata_host_set *host_set); 345 + static void mv_host_stop(struct ata_host *host); 346 346 static int mv_port_start(struct ata_port *ap); 347 347 static void mv_port_stop(struct ata_port *ap); 348 348 static void mv_qc_prep(struct ata_queued_cmd *qc); ··· 480 480 static const struct ata_port_info mv_port_info[] = { 481 481 { /* chip_504x */ 482 482 .sht = &mv_sht, 483 - .host_flags = MV_COMMON_FLAGS, 483 + .flags = MV_COMMON_FLAGS, 484 484 .pio_mask = 0x1f, /* pio0-4 */ 485 485 .udma_mask = 0x7f, /* udma0-6 */ 486 486 .port_ops = &mv5_ops, 487 487 }, 488 488 { /* chip_508x */ 489 489 .sht = &mv_sht, 490 - .host_flags = (MV_COMMON_FLAGS | MV_FLAG_DUAL_HC), 490 + .flags = (MV_COMMON_FLAGS | MV_FLAG_DUAL_HC), 491 491 .pio_mask = 0x1f, /* pio0-4 */ 492 492 .udma_mask = 0x7f, /* udma0-6 */ 493 493 .port_ops = &mv5_ops, 494 494 }, 495 495 { /* chip_5080 */ 496 496 .sht = &mv_sht, 497 - .host_flags = (MV_COMMON_FLAGS | MV_FLAG_DUAL_HC), 497 + .flags = (MV_COMMON_FLAGS | MV_FLAG_DUAL_HC), 498 498 .pio_mask = 0x1f, /* pio0-4 */ 499 499 .udma_mask = 0x7f, /* udma0-6 */ 500 500 .port_ops = &mv5_ops, 501 501 }, 502 502 { /* chip_604x */ 503 503 .sht = &mv_sht, 504 - .host_flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS), 504 + .flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS), 505 505 .pio_mask = 0x1f, /* pio0-4 */ 506 506 .udma_mask = 0x7f, /* udma0-6 */ 507 507 .port_ops = &mv6_ops, 508 508 }, 509 509 { /* chip_608x */ 510 510 .sht = &mv_sht, 511 - .host_flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS | 511 + .flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS | 512 512 MV_FLAG_DUAL_HC), 513 513 .pio_mask = 0x1f, /* pio0-4 */ 514 514 .udma_mask = 0x7f, /* udma0-6 */ ··· 516 516 }, 517 517 { /* chip_6042 */ 518 518 .sht = &mv_sht, 519 - .host_flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS), 519 + .flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS), 520 520 .pio_mask = 0x1f, /* pio0-4 */ 521 521 .udma_mask = 0x7f, /* udma0-6 */ 522 522 .port_ops = &mv_iie_ops, 523 523 }, 524 524 { /* chip_7042 */ 525 525 .sht = &mv_sht, 526 - .host_flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS | 526 + .flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS | 527 527 MV_FLAG_DUAL_HC), 528 528 .pio_mask = 0x1f, /* pio0-4 */ 529 529 .udma_mask = 0x7f, /* udma0-6 */ ··· 618 618 619 619 static inline void __iomem *mv_ap_base(struct ata_port *ap) 620 620 { 621 - return mv_port_base(ap->host_set->mmio_base, ap->port_no); 621 + return mv_port_base(ap->host->mmio_base, ap->port_no); 622 622 } 623 623 624 - static inline int mv_get_hc_count(unsigned long host_flags) 624 + static inline int mv_get_hc_count(unsigned long port_flags) 625 625 { 626 - return ((host_flags & MV_FLAG_DUAL_HC) ? 2 : 1); 626 + return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1); 627 627 } 628 628 629 629 static void mv_irq_clear(struct ata_port *ap) ··· 809 809 810 810 /** 811 811 * mv_host_stop - Host specific cleanup/stop routine. 812 - * @host_set: host data structure 812 + * @host: host data structure 813 813 * 814 814 * Disable ints, cleanup host memory, call general purpose 815 815 * host_stop. ··· 817 817 * LOCKING: 818 818 * Inherited from caller. 819 819 */ 820 - static void mv_host_stop(struct ata_host_set *host_set) 820 + static void mv_host_stop(struct ata_host *host) 821 821 { 822 - struct mv_host_priv *hpriv = host_set->private_data; 823 - struct pci_dev *pdev = to_pci_dev(host_set->dev); 822 + struct mv_host_priv *hpriv = host->private_data; 823 + struct pci_dev *pdev = to_pci_dev(host->dev); 824 824 825 825 if (hpriv->hp_flags & MV_HP_FLAG_MSI) { 826 826 pci_disable_msi(pdev); ··· 828 828 pci_intx(pdev, 0); 829 829 } 830 830 kfree(hpriv); 831 - ata_host_stop(host_set); 831 + ata_host_stop(host); 832 832 } 833 833 834 834 static inline void mv_priv_free(struct mv_port_priv *pp, struct device *dev) ··· 875 875 */ 876 876 static int mv_port_start(struct ata_port *ap) 877 877 { 878 - struct device *dev = ap->host_set->dev; 879 - struct mv_host_priv *hpriv = ap->host_set->private_data; 878 + struct device *dev = ap->host->dev; 879 + struct mv_host_priv *hpriv = ap->host->private_data; 880 880 struct mv_port_priv *pp; 881 881 void __iomem *port_mmio = mv_ap_base(ap); 882 882 void *mem; ··· 965 965 * Stop DMA, cleanup port memory. 966 966 * 967 967 * LOCKING: 968 - * This routine uses the host_set lock to protect the DMA stop. 968 + * This routine uses the host lock to protect the DMA stop. 969 969 */ 970 970 static void mv_port_stop(struct ata_port *ap) 971 971 { 972 - struct device *dev = ap->host_set->dev; 972 + struct device *dev = ap->host->dev; 973 973 struct mv_port_priv *pp = ap->private_data; 974 974 unsigned long flags; 975 975 976 - spin_lock_irqsave(&ap->host_set->lock, flags); 976 + spin_lock_irqsave(&ap->host->lock, flags); 977 977 mv_stop_dma(ap); 978 - spin_unlock_irqrestore(&ap->host_set->lock, flags); 978 + spin_unlock_irqrestore(&ap->host->lock, flags); 979 979 980 980 ap->private_data = NULL; 981 981 ata_pad_free(ap, dev); ··· 1330 1330 1331 1331 /** 1332 1332 * mv_host_intr - Handle all interrupts on the given host controller 1333 - * @host_set: host specific structure 1333 + * @host: host specific structure 1334 1334 * @relevant: port error bits relevant to this host controller 1335 1335 * @hc: which host controller we're to look at 1336 1336 * ··· 1344 1344 * LOCKING: 1345 1345 * Inherited from caller. 1346 1346 */ 1347 - static void mv_host_intr(struct ata_host_set *host_set, u32 relevant, 1348 - unsigned int hc) 1347 + static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc) 1349 1348 { 1350 - void __iomem *mmio = host_set->mmio_base; 1349 + void __iomem *mmio = host->mmio_base; 1351 1350 void __iomem *hc_mmio = mv_hc_base(mmio, hc); 1352 1351 struct ata_queued_cmd *qc; 1353 1352 u32 hc_irq_cause; ··· 1370 1371 1371 1372 for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) { 1372 1373 u8 ata_status = 0; 1373 - struct ata_port *ap = host_set->ports[port]; 1374 + struct ata_port *ap = host->ports[port]; 1374 1375 struct mv_port_priv *pp = ap->private_data; 1375 1376 1376 1377 hard_port = mv_hardport_from_port(port); /* range 0..3 */ ··· 1443 1444 * reported here. 1444 1445 * 1445 1446 * LOCKING: 1446 - * This routine holds the host_set lock while processing pending 1447 + * This routine holds the host lock while processing pending 1447 1448 * interrupts. 1448 1449 */ 1449 1450 static irqreturn_t mv_interrupt(int irq, void *dev_instance, 1450 1451 struct pt_regs *regs) 1451 1452 { 1452 - struct ata_host_set *host_set = dev_instance; 1453 + struct ata_host *host = dev_instance; 1453 1454 unsigned int hc, handled = 0, n_hcs; 1454 - void __iomem *mmio = host_set->mmio_base; 1455 + void __iomem *mmio = host->mmio_base; 1455 1456 struct mv_host_priv *hpriv; 1456 1457 u32 irq_stat; 1457 1458 ··· 1464 1465 return IRQ_NONE; 1465 1466 } 1466 1467 1467 - n_hcs = mv_get_hc_count(host_set->ports[0]->flags); 1468 - spin_lock(&host_set->lock); 1468 + n_hcs = mv_get_hc_count(host->ports[0]->flags); 1469 + spin_lock(&host->lock); 1469 1470 1470 1471 for (hc = 0; hc < n_hcs; hc++) { 1471 1472 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT)); 1472 1473 if (relevant) { 1473 - mv_host_intr(host_set, relevant, hc); 1474 + mv_host_intr(host, relevant, hc); 1474 1475 handled++; 1475 1476 } 1476 1477 } 1477 1478 1478 - hpriv = host_set->private_data; 1479 + hpriv = host->private_data; 1479 1480 if (IS_60XX(hpriv)) { 1480 1481 /* deal with the interrupt coalescing bits */ 1481 1482 if (irq_stat & (TRAN_LO_DONE | TRAN_HI_DONE | PORTS_0_7_COAL_DONE)) { ··· 1490 1491 readl(mmio + PCI_IRQ_CAUSE_OFS)); 1491 1492 1492 1493 DPRINTK("All regs @ PCI error\n"); 1493 - mv_dump_all_regs(mmio, -1, to_pci_dev(host_set->dev)); 1494 + mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev)); 1494 1495 1495 1496 writelfl(0, mmio + PCI_IRQ_CAUSE_OFS); 1496 1497 handled++; 1497 1498 } 1498 - spin_unlock(&host_set->lock); 1499 + spin_unlock(&host->lock); 1499 1500 1500 1501 return IRQ_RETVAL(handled); 1501 1502 } ··· 1527 1528 1528 1529 static u32 mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in) 1529 1530 { 1530 - void __iomem *mmio = mv5_phy_base(ap->host_set->mmio_base, ap->port_no); 1531 + void __iomem *mmio = mv5_phy_base(ap->host->mmio_base, ap->port_no); 1531 1532 unsigned int ofs = mv5_scr_offset(sc_reg_in); 1532 1533 1533 1534 if (ofs != 0xffffffffU) ··· 1538 1539 1539 1540 static void mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val) 1540 1541 { 1541 - void __iomem *mmio = mv5_phy_base(ap->host_set->mmio_base, ap->port_no); 1542 + void __iomem *mmio = mv5_phy_base(ap->host->mmio_base, ap->port_no); 1542 1543 unsigned int ofs = mv5_scr_offset(sc_reg_in); 1543 1544 1544 1545 if (ofs != 0xffffffffU) ··· 1903 1904 1904 1905 static void mv_stop_and_reset(struct ata_port *ap) 1905 1906 { 1906 - struct mv_host_priv *hpriv = ap->host_set->private_data; 1907 - void __iomem *mmio = ap->host_set->mmio_base; 1907 + struct mv_host_priv *hpriv = ap->host->private_data; 1908 + void __iomem *mmio = ap->host->mmio_base; 1908 1909 1909 1910 mv_stop_dma(ap); 1910 1911 ··· 1935 1936 static void __mv_phy_reset(struct ata_port *ap, int can_sleep) 1936 1937 { 1937 1938 struct mv_port_priv *pp = ap->private_data; 1938 - struct mv_host_priv *hpriv = ap->host_set->private_data; 1939 + struct mv_host_priv *hpriv = ap->host->private_data; 1939 1940 void __iomem *port_mmio = mv_ap_base(ap); 1940 1941 struct ata_taskfile tf; 1941 1942 struct ata_device *dev = &ap->device[0]; ··· 2034 2035 * chip/bus, fail the command, and move on. 2035 2036 * 2036 2037 * LOCKING: 2037 - * This routine holds the host_set lock while failing the command. 2038 + * This routine holds the host lock while failing the command. 2038 2039 */ 2039 2040 static void mv_eng_timeout(struct ata_port *ap) 2040 2041 { ··· 2043 2044 2044 2045 ata_port_printk(ap, KERN_ERR, "Entering mv_eng_timeout\n"); 2045 2046 DPRINTK("All regs @ start of eng_timeout\n"); 2046 - mv_dump_all_regs(ap->host_set->mmio_base, ap->port_no, 2047 - to_pci_dev(ap->host_set->dev)); 2047 + mv_dump_all_regs(ap->host->mmio_base, ap->port_no, 2048 + to_pci_dev(ap->host->dev)); 2048 2049 2049 2050 qc = ata_qc_from_tag(ap, ap->active_tag); 2050 2051 printk(KERN_ERR "mmio_base %p ap %p qc %p scsi_cmnd %p &cmnd %p\n", 2051 - ap->host_set->mmio_base, ap, qc, qc->scsicmd, 2052 - &qc->scsicmd->cmnd); 2052 + ap->host->mmio_base, ap, qc, qc->scsicmd, &qc->scsicmd->cmnd); 2053 2053 2054 - spin_lock_irqsave(&ap->host_set->lock, flags); 2054 + spin_lock_irqsave(&ap->host->lock, flags); 2055 2055 mv_err_intr(ap, 0); 2056 2056 mv_stop_and_reset(ap); 2057 - spin_unlock_irqrestore(&ap->host_set->lock, flags); 2057 + spin_unlock_irqrestore(&ap->host->lock, flags); 2058 2058 2059 2059 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE)); 2060 2060 if (qc->flags & ATA_QCFLAG_ACTIVE) { ··· 2234 2236 if (rc) 2235 2237 goto done; 2236 2238 2237 - n_hc = mv_get_hc_count(probe_ent->host_flags); 2239 + n_hc = mv_get_hc_count(probe_ent->port_flags); 2238 2240 probe_ent->n_ports = MV_PORTS_PER_HC * n_hc; 2239 2241 2240 2242 for (port = 0; port < probe_ent->n_ports; port++) ··· 2387 2389 memset(hpriv, 0, sizeof(*hpriv)); 2388 2390 2389 2391 probe_ent->sht = mv_port_info[board_idx].sht; 2390 - probe_ent->host_flags = mv_port_info[board_idx].host_flags; 2392 + probe_ent->port_flags = mv_port_info[board_idx].flags; 2391 2393 probe_ent->pio_mask = mv_port_info[board_idx].pio_mask; 2392 2394 probe_ent->udma_mask = mv_port_info[board_idx].udma_mask; 2393 2395 probe_ent->port_ops = mv_port_info[board_idx].port_ops;
+29 -29
drivers/ata/sata_nv.c
··· 81 81 }; 82 82 83 83 static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent); 84 - static void nv_ck804_host_stop(struct ata_host_set *host_set); 84 + static void nv_ck804_host_stop(struct ata_host *host); 85 85 static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance, 86 86 struct pt_regs *regs); 87 87 static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance, ··· 257 257 /* generic */ 258 258 { 259 259 .sht = &nv_sht, 260 - .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY, 260 + .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY, 261 261 .pio_mask = NV_PIO_MASK, 262 262 .mwdma_mask = NV_MWDMA_MASK, 263 263 .udma_mask = NV_UDMA_MASK, ··· 266 266 /* nforce2/3 */ 267 267 { 268 268 .sht = &nv_sht, 269 - .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY, 269 + .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY, 270 270 .pio_mask = NV_PIO_MASK, 271 271 .mwdma_mask = NV_MWDMA_MASK, 272 272 .udma_mask = NV_UDMA_MASK, ··· 275 275 /* ck804 */ 276 276 { 277 277 .sht = &nv_sht, 278 - .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY, 278 + .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY, 279 279 .pio_mask = NV_PIO_MASK, 280 280 .mwdma_mask = NV_MWDMA_MASK, 281 281 .udma_mask = NV_UDMA_MASK, ··· 292 292 static irqreturn_t nv_generic_interrupt(int irq, void *dev_instance, 293 293 struct pt_regs *regs) 294 294 { 295 - struct ata_host_set *host_set = dev_instance; 295 + struct ata_host *host = dev_instance; 296 296 unsigned int i; 297 297 unsigned int handled = 0; 298 298 unsigned long flags; 299 299 300 - spin_lock_irqsave(&host_set->lock, flags); 300 + spin_lock_irqsave(&host->lock, flags); 301 301 302 - for (i = 0; i < host_set->n_ports; i++) { 302 + for (i = 0; i < host->n_ports; i++) { 303 303 struct ata_port *ap; 304 304 305 - ap = host_set->ports[i]; 305 + ap = host->ports[i]; 306 306 if (ap && 307 307 !(ap->flags & ATA_FLAG_DISABLED)) { 308 308 struct ata_queued_cmd *qc; ··· 318 318 319 319 } 320 320 321 - spin_unlock_irqrestore(&host_set->lock, flags); 321 + spin_unlock_irqrestore(&host->lock, flags); 322 322 323 323 return IRQ_RETVAL(handled); 324 324 } ··· 354 354 return 1; 355 355 } 356 356 357 - static irqreturn_t nv_do_interrupt(struct ata_host_set *host_set, u8 irq_stat) 357 + static irqreturn_t nv_do_interrupt(struct ata_host *host, u8 irq_stat) 358 358 { 359 359 int i, handled = 0; 360 360 361 - for (i = 0; i < host_set->n_ports; i++) { 362 - struct ata_port *ap = host_set->ports[i]; 361 + for (i = 0; i < host->n_ports; i++) { 362 + struct ata_port *ap = host->ports[i]; 363 363 364 364 if (ap && !(ap->flags & ATA_FLAG_DISABLED)) 365 365 handled += nv_host_intr(ap, irq_stat); ··· 373 373 static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance, 374 374 struct pt_regs *regs) 375 375 { 376 - struct ata_host_set *host_set = dev_instance; 376 + struct ata_host *host = dev_instance; 377 377 u8 irq_stat; 378 378 irqreturn_t ret; 379 379 380 - spin_lock(&host_set->lock); 381 - irq_stat = inb(host_set->ports[0]->ioaddr.scr_addr + NV_INT_STATUS); 382 - ret = nv_do_interrupt(host_set, irq_stat); 383 - spin_unlock(&host_set->lock); 380 + spin_lock(&host->lock); 381 + irq_stat = inb(host->ports[0]->ioaddr.scr_addr + NV_INT_STATUS); 382 + ret = nv_do_interrupt(host, irq_stat); 383 + spin_unlock(&host->lock); 384 384 385 385 return ret; 386 386 } ··· 388 388 static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance, 389 389 struct pt_regs *regs) 390 390 { 391 - struct ata_host_set *host_set = dev_instance; 391 + struct ata_host *host = dev_instance; 392 392 u8 irq_stat; 393 393 irqreturn_t ret; 394 394 395 - spin_lock(&host_set->lock); 396 - irq_stat = readb(host_set->mmio_base + NV_INT_STATUS_CK804); 397 - ret = nv_do_interrupt(host_set, irq_stat); 398 - spin_unlock(&host_set->lock); 395 + spin_lock(&host->lock); 396 + irq_stat = readb(host->mmio_base + NV_INT_STATUS_CK804); 397 + ret = nv_do_interrupt(host, irq_stat); 398 + spin_unlock(&host->lock); 399 399 400 400 return ret; 401 401 } ··· 418 418 419 419 static void nv_nf2_freeze(struct ata_port *ap) 420 420 { 421 - unsigned long scr_addr = ap->host_set->ports[0]->ioaddr.scr_addr; 421 + unsigned long scr_addr = ap->host->ports[0]->ioaddr.scr_addr; 422 422 int shift = ap->port_no * NV_INT_PORT_SHIFT; 423 423 u8 mask; 424 424 ··· 429 429 430 430 static void nv_nf2_thaw(struct ata_port *ap) 431 431 { 432 - unsigned long scr_addr = ap->host_set->ports[0]->ioaddr.scr_addr; 432 + unsigned long scr_addr = ap->host->ports[0]->ioaddr.scr_addr; 433 433 int shift = ap->port_no * NV_INT_PORT_SHIFT; 434 434 u8 mask; 435 435 ··· 442 442 443 443 static void nv_ck804_freeze(struct ata_port *ap) 444 444 { 445 - void __iomem *mmio_base = ap->host_set->mmio_base; 445 + void __iomem *mmio_base = ap->host->mmio_base; 446 446 int shift = ap->port_no * NV_INT_PORT_SHIFT; 447 447 u8 mask; 448 448 ··· 453 453 454 454 static void nv_ck804_thaw(struct ata_port *ap) 455 455 { 456 - void __iomem *mmio_base = ap->host_set->mmio_base; 456 + void __iomem *mmio_base = ap->host->mmio_base; 457 457 int shift = ap->port_no * NV_INT_PORT_SHIFT; 458 458 u8 mask; 459 459 ··· 568 568 return rc; 569 569 } 570 570 571 - static void nv_ck804_host_stop(struct ata_host_set *host_set) 571 + static void nv_ck804_host_stop(struct ata_host *host) 572 572 { 573 - struct pci_dev *pdev = to_pci_dev(host_set->dev); 573 + struct pci_dev *pdev = to_pci_dev(host->dev); 574 574 u8 regval; 575 575 576 576 /* disable SATA space for CK804 */ ··· 578 578 regval &= ~NV_MCP_SATA_CFG_20_SATA_SPACE_EN; 579 579 pci_write_config_byte(pdev, NV_MCP_SATA_CFG_20, regval); 580 580 581 - ata_pci_host_stop(host_set); 581 + ata_pci_host_stop(host); 582 582 } 583 583 584 584 static int __init nv_init(void)
+27 -27
drivers/ata/sata_promise.c
··· 104 104 static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf); 105 105 static void pdc_irq_clear(struct ata_port *ap); 106 106 static unsigned int pdc_qc_issue_prot(struct ata_queued_cmd *qc); 107 - static void pdc_host_stop(struct ata_host_set *host_set); 107 + static void pdc_host_stop(struct ata_host *host); 108 108 109 109 110 110 static struct scsi_host_template pdc_ata_sht = { ··· 175 175 /* board_2037x */ 176 176 { 177 177 .sht = &pdc_ata_sht, 178 - .host_flags = PDC_COMMON_FLAGS | ATA_FLAG_SATA, 178 + .flags = PDC_COMMON_FLAGS | ATA_FLAG_SATA, 179 179 .pio_mask = 0x1f, /* pio0-4 */ 180 180 .mwdma_mask = 0x07, /* mwdma0-2 */ 181 181 .udma_mask = 0x7f, /* udma0-6 ; FIXME */ ··· 185 185 /* board_20319 */ 186 186 { 187 187 .sht = &pdc_ata_sht, 188 - .host_flags = PDC_COMMON_FLAGS | ATA_FLAG_SATA, 188 + .flags = PDC_COMMON_FLAGS | ATA_FLAG_SATA, 189 189 .pio_mask = 0x1f, /* pio0-4 */ 190 190 .mwdma_mask = 0x07, /* mwdma0-2 */ 191 191 .udma_mask = 0x7f, /* udma0-6 ; FIXME */ ··· 195 195 /* board_20619 */ 196 196 { 197 197 .sht = &pdc_ata_sht, 198 - .host_flags = PDC_COMMON_FLAGS | ATA_FLAG_SLAVE_POSS, 198 + .flags = PDC_COMMON_FLAGS | ATA_FLAG_SLAVE_POSS, 199 199 .pio_mask = 0x1f, /* pio0-4 */ 200 200 .mwdma_mask = 0x07, /* mwdma0-2 */ 201 201 .udma_mask = 0x7f, /* udma0-6 ; FIXME */ ··· 205 205 /* board_20771 */ 206 206 { 207 207 .sht = &pdc_ata_sht, 208 - .host_flags = PDC_COMMON_FLAGS | ATA_FLAG_SATA, 208 + .flags = PDC_COMMON_FLAGS | ATA_FLAG_SATA, 209 209 .pio_mask = 0x1f, /* pio0-4 */ 210 210 .mwdma_mask = 0x07, /* mwdma0-2 */ 211 211 .udma_mask = 0x7f, /* udma0-6 ; FIXME */ ··· 215 215 /* board_2057x */ 216 216 { 217 217 .sht = &pdc_ata_sht, 218 - .host_flags = PDC_COMMON_FLAGS | ATA_FLAG_SATA, 218 + .flags = PDC_COMMON_FLAGS | ATA_FLAG_SATA, 219 219 .pio_mask = 0x1f, /* pio0-4 */ 220 220 .mwdma_mask = 0x07, /* mwdma0-2 */ 221 221 .udma_mask = 0x7f, /* udma0-6 ; FIXME */ ··· 225 225 /* board_40518 */ 226 226 { 227 227 .sht = &pdc_ata_sht, 228 - .host_flags = PDC_COMMON_FLAGS | ATA_FLAG_SATA, 228 + .flags = PDC_COMMON_FLAGS | ATA_FLAG_SATA, 229 229 .pio_mask = 0x1f, /* pio0-4 */ 230 230 .mwdma_mask = 0x07, /* mwdma0-2 */ 231 231 .udma_mask = 0x7f, /* udma0-6 ; FIXME */ ··· 292 292 293 293 static int pdc_port_start(struct ata_port *ap) 294 294 { 295 - struct device *dev = ap->host_set->dev; 295 + struct device *dev = ap->host->dev; 296 296 struct pdc_port_priv *pp; 297 297 int rc; 298 298 ··· 326 326 327 327 static void pdc_port_stop(struct ata_port *ap) 328 328 { 329 - struct device *dev = ap->host_set->dev; 329 + struct device *dev = ap->host->dev; 330 330 struct pdc_port_priv *pp = ap->private_data; 331 331 332 332 ap->private_data = NULL; ··· 336 336 } 337 337 338 338 339 - static void pdc_host_stop(struct ata_host_set *host_set) 339 + static void pdc_host_stop(struct ata_host *host) 340 340 { 341 - struct pdc_host_priv *hp = host_set->private_data; 341 + struct pdc_host_priv *hp = host->private_data; 342 342 343 - ata_pci_host_stop(host_set); 343 + ata_pci_host_stop(host); 344 344 345 345 kfree(hp); 346 346 } ··· 443 443 444 444 static void pdc_eng_timeout(struct ata_port *ap) 445 445 { 446 - struct ata_host_set *host_set = ap->host_set; 446 + struct ata_host *host = ap->host; 447 447 u8 drv_stat; 448 448 struct ata_queued_cmd *qc; 449 449 unsigned long flags; 450 450 451 451 DPRINTK("ENTER\n"); 452 452 453 - spin_lock_irqsave(&host_set->lock, flags); 453 + spin_lock_irqsave(&host->lock, flags); 454 454 455 455 qc = ata_qc_from_tag(ap, ap->active_tag); 456 456 ··· 473 473 break; 474 474 } 475 475 476 - spin_unlock_irqrestore(&host_set->lock, flags); 476 + spin_unlock_irqrestore(&host->lock, flags); 477 477 ata_eh_qc_complete(qc); 478 478 DPRINTK("EXIT\n"); 479 479 } ··· 509 509 510 510 static void pdc_irq_clear(struct ata_port *ap) 511 511 { 512 - struct ata_host_set *host_set = ap->host_set; 513 - void __iomem *mmio = host_set->mmio_base; 512 + struct ata_host *host = ap->host; 513 + void __iomem *mmio = host->mmio_base; 514 514 515 515 readl(mmio + PDC_INT_SEQMASK); 516 516 } 517 517 518 518 static irqreturn_t pdc_interrupt (int irq, void *dev_instance, struct pt_regs *regs) 519 519 { 520 - struct ata_host_set *host_set = dev_instance; 520 + struct ata_host *host = dev_instance; 521 521 struct ata_port *ap; 522 522 u32 mask = 0; 523 523 unsigned int i, tmp; ··· 526 526 527 527 VPRINTK("ENTER\n"); 528 528 529 - if (!host_set || !host_set->mmio_base) { 529 + if (!host || !host->mmio_base) { 530 530 VPRINTK("QUICK EXIT\n"); 531 531 return IRQ_NONE; 532 532 } 533 533 534 - mmio_base = host_set->mmio_base; 534 + mmio_base = host->mmio_base; 535 535 536 536 /* reading should also clear interrupts */ 537 537 mask = readl(mmio_base + PDC_INT_SEQMASK); ··· 541 541 return IRQ_NONE; 542 542 } 543 543 544 - spin_lock(&host_set->lock); 544 + spin_lock(&host->lock); 545 545 546 546 mask &= 0xffff; /* only 16 tags possible */ 547 547 if (!mask) { ··· 551 551 552 552 writel(mask, mmio_base + PDC_INT_SEQMASK); 553 553 554 - for (i = 0; i < host_set->n_ports; i++) { 554 + for (i = 0; i < host->n_ports; i++) { 555 555 VPRINTK("port %u\n", i); 556 - ap = host_set->ports[i]; 556 + ap = host->ports[i]; 557 557 tmp = mask & (1 << (i + 1)); 558 558 if (tmp && ap && 559 559 !(ap->flags & ATA_FLAG_DISABLED)) { ··· 568 568 VPRINTK("EXIT\n"); 569 569 570 570 done_irq: 571 - spin_unlock(&host_set->lock); 571 + spin_unlock(&host->lock); 572 572 return IRQ_RETVAL(handled); 573 573 } 574 574 ··· 581 581 582 582 VPRINTK("ENTER, ap %p\n", ap); 583 583 584 - writel(0x00000001, ap->host_set->mmio_base + (seq * 4)); 585 - readl(ap->host_set->mmio_base + (seq * 4)); /* flush */ 584 + writel(0x00000001, ap->host->mmio_base + (seq * 4)); 585 + readl(ap->host->mmio_base + (seq * 4)); /* flush */ 586 586 587 587 pp->pkt[2] = seq; 588 588 wmb(); /* flush PRD, pkt writes */ ··· 743 743 probe_ent->private_data = hp; 744 744 745 745 probe_ent->sht = pdc_port_info[board_idx].sht; 746 - probe_ent->host_flags = pdc_port_info[board_idx].host_flags; 746 + probe_ent->port_flags = pdc_port_info[board_idx].flags; 747 747 probe_ent->pio_mask = pdc_port_info[board_idx].pio_mask; 748 748 probe_ent->mwdma_mask = pdc_port_info[board_idx].mwdma_mask; 749 749 probe_ent->udma_mask = pdc_port_info[board_idx].udma_mask;
+22 -22
drivers/ata/sata_qstor.c
··· 116 116 static int qs_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *ent); 117 117 static irqreturn_t qs_intr (int irq, void *dev_instance, struct pt_regs *regs); 118 118 static int qs_port_start(struct ata_port *ap); 119 - static void qs_host_stop(struct ata_host_set *host_set); 119 + static void qs_host_stop(struct ata_host *host); 120 120 static void qs_port_stop(struct ata_port *ap); 121 121 static void qs_phy_reset(struct ata_port *ap); 122 122 static void qs_qc_prep(struct ata_queued_cmd *qc); ··· 174 174 /* board_2068_idx */ 175 175 { 176 176 .sht = &qs_ata_sht, 177 - .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 177 + .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 178 178 ATA_FLAG_SATA_RESET | 179 179 //FIXME ATA_FLAG_SRST | 180 180 ATA_FLAG_MMIO | ATA_FLAG_PIO_POLLING, ··· 220 220 221 221 static inline void qs_enter_reg_mode(struct ata_port *ap) 222 222 { 223 - u8 __iomem *chan = ap->host_set->mmio_base + (ap->port_no * 0x4000); 223 + u8 __iomem *chan = ap->host->mmio_base + (ap->port_no * 0x4000); 224 224 225 225 writeb(QS_CTR0_REG, chan + QS_CCT_CTR0); 226 226 readb(chan + QS_CCT_CTR0); /* flush */ ··· 228 228 229 229 static inline void qs_reset_channel_logic(struct ata_port *ap) 230 230 { 231 - u8 __iomem *chan = ap->host_set->mmio_base + (ap->port_no * 0x4000); 231 + u8 __iomem *chan = ap->host->mmio_base + (ap->port_no * 0x4000); 232 232 233 233 writeb(QS_CTR1_RCHN, chan + QS_CCT_CTR1); 234 234 readb(chan + QS_CCT_CTR0); /* flush */ ··· 342 342 static inline void qs_packet_start(struct ata_queued_cmd *qc) 343 343 { 344 344 struct ata_port *ap = qc->ap; 345 - u8 __iomem *chan = ap->host_set->mmio_base + (ap->port_no * 0x4000); 345 + u8 __iomem *chan = ap->host->mmio_base + (ap->port_no * 0x4000); 346 346 347 347 VPRINTK("ENTER, ap %p\n", ap); 348 348 ··· 375 375 return ata_qc_issue_prot(qc); 376 376 } 377 377 378 - static inline unsigned int qs_intr_pkt(struct ata_host_set *host_set) 378 + static inline unsigned int qs_intr_pkt(struct ata_host *host) 379 379 { 380 380 unsigned int handled = 0; 381 381 u8 sFFE; 382 - u8 __iomem *mmio_base = host_set->mmio_base; 382 + u8 __iomem *mmio_base = host->mmio_base; 383 383 384 384 do { 385 385 u32 sff0 = readl(mmio_base + QS_HST_SFF); ··· 391 391 u8 sDST = sff0 >> 16; /* dev status */ 392 392 u8 sHST = sff1 & 0x3f; /* host status */ 393 393 unsigned int port_no = (sff1 >> 8) & 0x03; 394 - struct ata_port *ap = host_set->ports[port_no]; 394 + struct ata_port *ap = host->ports[port_no]; 395 395 396 396 DPRINTK("SFF=%08x%08x: sCHAN=%u sHST=%d sDST=%02x\n", 397 397 sff1, sff0, port_no, sHST, sDST); ··· 421 421 return handled; 422 422 } 423 423 424 - static inline unsigned int qs_intr_mmio(struct ata_host_set *host_set) 424 + static inline unsigned int qs_intr_mmio(struct ata_host *host) 425 425 { 426 426 unsigned int handled = 0, port_no; 427 427 428 - for (port_no = 0; port_no < host_set->n_ports; ++port_no) { 428 + for (port_no = 0; port_no < host->n_ports; ++port_no) { 429 429 struct ata_port *ap; 430 - ap = host_set->ports[port_no]; 430 + ap = host->ports[port_no]; 431 431 if (ap && 432 432 !(ap->flags & ATA_FLAG_DISABLED)) { 433 433 struct ata_queued_cmd *qc; ··· 457 457 458 458 static irqreturn_t qs_intr(int irq, void *dev_instance, struct pt_regs *regs) 459 459 { 460 - struct ata_host_set *host_set = dev_instance; 460 + struct ata_host *host = dev_instance; 461 461 unsigned int handled = 0; 462 462 463 463 VPRINTK("ENTER\n"); 464 464 465 - spin_lock(&host_set->lock); 466 - handled = qs_intr_pkt(host_set) | qs_intr_mmio(host_set); 467 - spin_unlock(&host_set->lock); 465 + spin_lock(&host->lock); 466 + handled = qs_intr_pkt(host) | qs_intr_mmio(host); 467 + spin_unlock(&host->lock); 468 468 469 469 VPRINTK("EXIT\n"); 470 470 ··· 491 491 492 492 static int qs_port_start(struct ata_port *ap) 493 493 { 494 - struct device *dev = ap->host_set->dev; 494 + struct device *dev = ap->host->dev; 495 495 struct qs_port_priv *pp; 496 - void __iomem *mmio_base = ap->host_set->mmio_base; 496 + void __iomem *mmio_base = ap->host->mmio_base; 497 497 void __iomem *chan = mmio_base + (ap->port_no * 0x4000); 498 498 u64 addr; 499 499 int rc; ··· 530 530 531 531 static void qs_port_stop(struct ata_port *ap) 532 532 { 533 - struct device *dev = ap->host_set->dev; 533 + struct device *dev = ap->host->dev; 534 534 struct qs_port_priv *pp = ap->private_data; 535 535 536 536 if (pp != NULL) { ··· 543 543 ata_port_stop(ap); 544 544 } 545 545 546 - static void qs_host_stop(struct ata_host_set *host_set) 546 + static void qs_host_stop(struct ata_host *host) 547 547 { 548 - void __iomem *mmio_base = host_set->mmio_base; 549 - struct pci_dev *pdev = to_pci_dev(host_set->dev); 548 + void __iomem *mmio_base = host->mmio_base; 549 + struct pci_dev *pdev = to_pci_dev(host->dev); 550 550 551 551 writeb(0, mmio_base + QS_HCT_CTRL); /* disable host interrupts */ 552 552 writeb(QS_CNFG3_GSRST, mmio_base + QS_HCF_CNFG3); /* global reset */ ··· 673 673 INIT_LIST_HEAD(&probe_ent->node); 674 674 675 675 probe_ent->sht = qs_port_info[board_idx].sht; 676 - probe_ent->host_flags = qs_port_info[board_idx].host_flags; 676 + probe_ent->port_flags = qs_port_info[board_idx].flags; 677 677 probe_ent->pio_mask = qs_port_info[board_idx].pio_mask; 678 678 probe_ent->mwdma_mask = qs_port_info[board_idx].mwdma_mask; 679 679 probe_ent->udma_mask = qs_port_info[board_idx].udma_mask;
+23 -24
drivers/ata/sata_sil.c
··· 56 56 SIL_FLAG_RERR_ON_DMA_ACT = (1 << 29), 57 57 SIL_FLAG_MOD15WRITE = (1 << 30), 58 58 59 - SIL_DFL_HOST_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 59 + SIL_DFL_PORT_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 60 60 ATA_FLAG_MMIO | ATA_FLAG_HRST_TO_RESUME, 61 61 62 62 /* ··· 218 218 /* sil_3112 */ 219 219 { 220 220 .sht = &sil_sht, 221 - .host_flags = SIL_DFL_HOST_FLAGS | SIL_FLAG_MOD15WRITE, 221 + .flags = SIL_DFL_PORT_FLAGS | SIL_FLAG_MOD15WRITE, 222 222 .pio_mask = 0x1f, /* pio0-4 */ 223 223 .mwdma_mask = 0x07, /* mwdma0-2 */ 224 224 .udma_mask = 0x3f, /* udma0-5 */ ··· 227 227 /* sil_3112_no_sata_irq */ 228 228 { 229 229 .sht = &sil_sht, 230 - .host_flags = SIL_DFL_HOST_FLAGS | SIL_FLAG_MOD15WRITE | 230 + .flags = SIL_DFL_PORT_FLAGS | SIL_FLAG_MOD15WRITE | 231 231 SIL_FLAG_NO_SATA_IRQ, 232 232 .pio_mask = 0x1f, /* pio0-4 */ 233 233 .mwdma_mask = 0x07, /* mwdma0-2 */ ··· 237 237 /* sil_3512 */ 238 238 { 239 239 .sht = &sil_sht, 240 - .host_flags = SIL_DFL_HOST_FLAGS | SIL_FLAG_RERR_ON_DMA_ACT, 240 + .flags = SIL_DFL_PORT_FLAGS | SIL_FLAG_RERR_ON_DMA_ACT, 241 241 .pio_mask = 0x1f, /* pio0-4 */ 242 242 .mwdma_mask = 0x07, /* mwdma0-2 */ 243 243 .udma_mask = 0x3f, /* udma0-5 */ ··· 246 246 /* sil_3114 */ 247 247 { 248 248 .sht = &sil_sht, 249 - .host_flags = SIL_DFL_HOST_FLAGS | SIL_FLAG_RERR_ON_DMA_ACT, 249 + .flags = SIL_DFL_PORT_FLAGS | SIL_FLAG_RERR_ON_DMA_ACT, 250 250 .pio_mask = 0x1f, /* pio0-4 */ 251 251 .mwdma_mask = 0x07, /* mwdma0-2 */ 252 252 .udma_mask = 0x3f, /* udma0-5 */ ··· 295 295 296 296 static void sil_post_set_mode (struct ata_port *ap) 297 297 { 298 - struct ata_host_set *host_set = ap->host_set; 298 + struct ata_host *host = ap->host; 299 299 struct ata_device *dev; 300 - void __iomem *addr = 301 - host_set->mmio_base + sil_port[ap->port_no].xfer_mode; 300 + void __iomem *addr = host->mmio_base + sil_port[ap->port_no].xfer_mode; 302 301 u32 tmp, dev_mode[2]; 303 302 unsigned int i; 304 303 ··· 439 440 static irqreturn_t sil_interrupt(int irq, void *dev_instance, 440 441 struct pt_regs *regs) 441 442 { 442 - struct ata_host_set *host_set = dev_instance; 443 - void __iomem *mmio_base = host_set->mmio_base; 443 + struct ata_host *host = dev_instance; 444 + void __iomem *mmio_base = host->mmio_base; 444 445 int handled = 0; 445 446 int i; 446 447 447 - spin_lock(&host_set->lock); 448 + spin_lock(&host->lock); 448 449 449 - for (i = 0; i < host_set->n_ports; i++) { 450 - struct ata_port *ap = host_set->ports[i]; 450 + for (i = 0; i < host->n_ports; i++) { 451 + struct ata_port *ap = host->ports[i]; 451 452 u32 bmdma2 = readl(mmio_base + sil_port[ap->port_no].bmdma2); 452 453 453 454 if (unlikely(!ap || ap->flags & ATA_FLAG_DISABLED)) ··· 465 466 handled = 1; 466 467 } 467 468 468 - spin_unlock(&host_set->lock); 469 + spin_unlock(&host->lock); 469 470 470 471 return IRQ_RETVAL(handled); 471 472 } 472 473 473 474 static void sil_freeze(struct ata_port *ap) 474 475 { 475 - void __iomem *mmio_base = ap->host_set->mmio_base; 476 + void __iomem *mmio_base = ap->host->mmio_base; 476 477 u32 tmp; 477 478 478 479 /* global IRQ mask doesn't block SATA IRQ, turn off explicitly */ ··· 487 488 488 489 static void sil_thaw(struct ata_port *ap) 489 490 { 490 - void __iomem *mmio_base = ap->host_set->mmio_base; 491 + void __iomem *mmio_base = ap->host->mmio_base; 491 492 u32 tmp; 492 493 493 494 /* clear IRQ */ ··· 566 567 } 567 568 568 569 static void sil_init_controller(struct pci_dev *pdev, 569 - int n_ports, unsigned long host_flags, 570 + int n_ports, unsigned long port_flags, 570 571 void __iomem *mmio_base) 571 572 { 572 573 u8 cls; ··· 586 587 "cache line size not set. Driver may not function\n"); 587 588 588 589 /* Apply R_ERR on DMA activate FIS errata workaround */ 589 - if (host_flags & SIL_FLAG_RERR_ON_DMA_ACT) { 590 + if (port_flags & SIL_FLAG_RERR_ON_DMA_ACT) { 590 591 int cnt; 591 592 592 593 for (i = 0, cnt = 0; i < n_ports; i++) { ··· 657 658 probe_ent->udma_mask = sil_port_info[ent->driver_data].udma_mask; 658 659 probe_ent->irq = pdev->irq; 659 660 probe_ent->irq_flags = IRQF_SHARED; 660 - probe_ent->host_flags = sil_port_info[ent->driver_data].host_flags; 661 + probe_ent->port_flags = sil_port_info[ent->driver_data].flags; 661 662 662 663 mmio_base = pci_iomap(pdev, 5, 0); 663 664 if (mmio_base == NULL) { ··· 678 679 ata_std_ports(&probe_ent->port[i]); 679 680 } 680 681 681 - sil_init_controller(pdev, probe_ent->n_ports, probe_ent->host_flags, 682 + sil_init_controller(pdev, probe_ent->n_ports, probe_ent->port_flags, 682 683 mmio_base); 683 684 684 685 pci_set_master(pdev); ··· 702 703 #ifdef CONFIG_PM 703 704 static int sil_pci_device_resume(struct pci_dev *pdev) 704 705 { 705 - struct ata_host_set *host_set = dev_get_drvdata(&pdev->dev); 706 + struct ata_host *host = dev_get_drvdata(&pdev->dev); 706 707 707 708 ata_pci_device_do_resume(pdev); 708 - sil_init_controller(pdev, host_set->n_ports, host_set->ports[0]->flags, 709 - host_set->mmio_base); 710 - ata_host_set_resume(host_set); 709 + sil_init_controller(pdev, host->n_ports, host->ports[0]->flags, 710 + host->mmio_base); 711 + ata_host_resume(host); 711 712 712 713 return 0; 713 714 }
+29 -30
drivers/ata/sata_sil24.c
··· 316 316 struct ata_taskfile tf; /* Cached taskfile registers */ 317 317 }; 318 318 319 - /* ap->host_set->private_data */ 319 + /* ap->host->private_data */ 320 320 struct sil24_host_priv { 321 321 void __iomem *host_base; /* global controller control (128 bytes @BAR0) */ 322 322 void __iomem *port_base; /* port registers (4 * 8192 bytes @BAR2) */ ··· 337 337 static void sil24_post_internal_cmd(struct ata_queued_cmd *qc); 338 338 static int sil24_port_start(struct ata_port *ap); 339 339 static void sil24_port_stop(struct ata_port *ap); 340 - static void sil24_host_stop(struct ata_host_set *host_set); 340 + static void sil24_host_stop(struct ata_host *host); 341 341 static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent); 342 342 #ifdef CONFIG_PM 343 343 static int sil24_pci_device_resume(struct pci_dev *pdev); ··· 415 415 }; 416 416 417 417 /* 418 - * Use bits 30-31 of host_flags to encode available port numbers. 418 + * Use bits 30-31 of port_flags to encode available port numbers. 419 419 * Current maxium is 4. 420 420 */ 421 421 #define SIL24_NPORTS2FLAG(nports) ((((unsigned)(nports) - 1) & 0x3) << 30) ··· 425 425 /* sil_3124 */ 426 426 { 427 427 .sht = &sil24_sht, 428 - .host_flags = SIL24_COMMON_FLAGS | SIL24_NPORTS2FLAG(4) | 428 + .flags = SIL24_COMMON_FLAGS | SIL24_NPORTS2FLAG(4) | 429 429 SIL24_FLAG_PCIX_IRQ_WOC, 430 430 .pio_mask = 0x1f, /* pio0-4 */ 431 431 .mwdma_mask = 0x07, /* mwdma0-2 */ ··· 435 435 /* sil_3132 */ 436 436 { 437 437 .sht = &sil24_sht, 438 - .host_flags = SIL24_COMMON_FLAGS | SIL24_NPORTS2FLAG(2), 438 + .flags = SIL24_COMMON_FLAGS | SIL24_NPORTS2FLAG(2), 439 439 .pio_mask = 0x1f, /* pio0-4 */ 440 440 .mwdma_mask = 0x07, /* mwdma0-2 */ 441 441 .udma_mask = 0x3f, /* udma0-5 */ ··· 444 444 /* sil_3131/sil_3531 */ 445 445 { 446 446 .sht = &sil24_sht, 447 - .host_flags = SIL24_COMMON_FLAGS | SIL24_NPORTS2FLAG(1), 447 + .flags = SIL24_COMMON_FLAGS | SIL24_NPORTS2FLAG(1), 448 448 .pio_mask = 0x1f, /* pio0-4 */ 449 449 .mwdma_mask = 0x07, /* mwdma0-2 */ 450 450 .udma_mask = 0x3f, /* udma0-5 */ ··· 871 871 872 872 static irqreturn_t sil24_interrupt(int irq, void *dev_instance, struct pt_regs *regs) 873 873 { 874 - struct ata_host_set *host_set = dev_instance; 875 - struct sil24_host_priv *hpriv = host_set->private_data; 874 + struct ata_host *host = dev_instance; 875 + struct sil24_host_priv *hpriv = host->private_data; 876 876 unsigned handled = 0; 877 877 u32 status; 878 878 int i; ··· 888 888 if (!(status & IRQ_STAT_4PORTS)) 889 889 goto out; 890 890 891 - spin_lock(&host_set->lock); 891 + spin_lock(&host->lock); 892 892 893 - for (i = 0; i < host_set->n_ports; i++) 893 + for (i = 0; i < host->n_ports; i++) 894 894 if (status & (1 << i)) { 895 - struct ata_port *ap = host_set->ports[i]; 895 + struct ata_port *ap = host->ports[i]; 896 896 if (ap && !(ap->flags & ATA_FLAG_DISABLED)) { 897 - sil24_host_intr(host_set->ports[i]); 897 + sil24_host_intr(host->ports[i]); 898 898 handled++; 899 899 } else 900 900 printk(KERN_ERR DRV_NAME 901 901 ": interrupt from disabled port %d\n", i); 902 902 } 903 903 904 - spin_unlock(&host_set->lock); 904 + spin_unlock(&host->lock); 905 905 out: 906 906 return IRQ_RETVAL(handled); 907 907 } ··· 941 941 942 942 static int sil24_port_start(struct ata_port *ap) 943 943 { 944 - struct device *dev = ap->host_set->dev; 944 + struct device *dev = ap->host->dev; 945 945 struct sil24_port_priv *pp; 946 946 union sil24_cmd_block *cb; 947 947 size_t cb_size = sizeof(*cb) * SIL24_MAX_CMDS; ··· 980 980 981 981 static void sil24_port_stop(struct ata_port *ap) 982 982 { 983 - struct device *dev = ap->host_set->dev; 983 + struct device *dev = ap->host->dev; 984 984 struct sil24_port_priv *pp = ap->private_data; 985 985 986 986 sil24_cblk_free(pp, dev); ··· 988 988 kfree(pp); 989 989 } 990 990 991 - static void sil24_host_stop(struct ata_host_set *host_set) 991 + static void sil24_host_stop(struct ata_host *host) 992 992 { 993 - struct sil24_host_priv *hpriv = host_set->private_data; 994 - struct pci_dev *pdev = to_pci_dev(host_set->dev); 993 + struct sil24_host_priv *hpriv = host->private_data; 994 + struct pci_dev *pdev = to_pci_dev(host->dev); 995 995 996 996 pci_iounmap(pdev, hpriv->host_base); 997 997 pci_iounmap(pdev, hpriv->port_base); ··· 999 999 } 1000 1000 1001 1001 static void sil24_init_controller(struct pci_dev *pdev, int n_ports, 1002 - unsigned long host_flags, 1002 + unsigned long port_flags, 1003 1003 void __iomem *host_base, 1004 1004 void __iomem *port_base) 1005 1005 { ··· 1032 1032 } 1033 1033 1034 1034 /* Configure IRQ WoC */ 1035 - if (host_flags & SIL24_FLAG_PCIX_IRQ_WOC) 1035 + if (port_flags & SIL24_FLAG_PCIX_IRQ_WOC) 1036 1036 writel(PORT_CS_IRQ_WOC, port + PORT_CTRL_STAT); 1037 1037 else 1038 1038 writel(PORT_CS_IRQ_WOC, port + PORT_CTRL_CLR); ··· 1101 1101 INIT_LIST_HEAD(&probe_ent->node); 1102 1102 1103 1103 probe_ent->sht = pinfo->sht; 1104 - probe_ent->host_flags = pinfo->host_flags; 1104 + probe_ent->port_flags = pinfo->flags; 1105 1105 probe_ent->pio_mask = pinfo->pio_mask; 1106 1106 probe_ent->mwdma_mask = pinfo->mwdma_mask; 1107 1107 probe_ent->udma_mask = pinfo->udma_mask; 1108 1108 probe_ent->port_ops = pinfo->port_ops; 1109 - probe_ent->n_ports = SIL24_FLAG2NPORTS(pinfo->host_flags); 1109 + probe_ent->n_ports = SIL24_FLAG2NPORTS(pinfo->flags); 1110 1110 1111 1111 probe_ent->irq = pdev->irq; 1112 1112 probe_ent->irq_flags = IRQF_SHARED; ··· 1144 1144 } 1145 1145 1146 1146 /* Apply workaround for completion IRQ loss on PCI-X errata */ 1147 - if (probe_ent->host_flags & SIL24_FLAG_PCIX_IRQ_WOC) { 1147 + if (probe_ent->port_flags & SIL24_FLAG_PCIX_IRQ_WOC) { 1148 1148 tmp = readl(host_base + HOST_CTRL); 1149 1149 if (tmp & (HOST_CTRL_TRDY | HOST_CTRL_STOP | HOST_CTRL_DEVSEL)) 1150 1150 dev_printk(KERN_INFO, &pdev->dev, 1151 1151 "Applying completion IRQ loss on PCI-X " 1152 1152 "errata fix\n"); 1153 1153 else 1154 - probe_ent->host_flags &= ~SIL24_FLAG_PCIX_IRQ_WOC; 1154 + probe_ent->port_flags &= ~SIL24_FLAG_PCIX_IRQ_WOC; 1155 1155 } 1156 1156 1157 1157 for (i = 0; i < probe_ent->n_ports; i++) { ··· 1164 1164 ata_std_ports(&probe_ent->port[i]); 1165 1165 } 1166 1166 1167 - sil24_init_controller(pdev, probe_ent->n_ports, probe_ent->host_flags, 1167 + sil24_init_controller(pdev, probe_ent->n_ports, probe_ent->port_flags, 1168 1168 host_base, port_base); 1169 1169 1170 1170 pci_set_master(pdev); ··· 1191 1191 #ifdef CONFIG_PM 1192 1192 static int sil24_pci_device_resume(struct pci_dev *pdev) 1193 1193 { 1194 - struct ata_host_set *host_set = dev_get_drvdata(&pdev->dev); 1195 - struct sil24_host_priv *hpriv = host_set->private_data; 1194 + struct ata_host *host = dev_get_drvdata(&pdev->dev); 1195 + struct sil24_host_priv *hpriv = host->private_data; 1196 1196 1197 1197 ata_pci_device_do_resume(pdev); 1198 1198 1199 1199 if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) 1200 1200 writel(HOST_CTRL_GLOBAL_RST, hpriv->host_base + HOST_CTRL); 1201 1201 1202 - sil24_init_controller(pdev, host_set->n_ports, 1203 - host_set->ports[0]->flags, 1202 + sil24_init_controller(pdev, host->n_ports, host->ports[0]->flags, 1204 1203 hpriv->host_base, hpriv->port_base); 1205 1204 1206 - ata_host_set_resume(host_set); 1205 + ata_host_resume(host); 1207 1206 1208 1207 return 0; 1209 1208 }
+9 -9
drivers/ata/sata_sis.c
··· 128 128 129 129 static struct ata_port_info sis_port_info = { 130 130 .sht = &sis_sht, 131 - .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY, 131 + .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY, 132 132 .pio_mask = 0x1f, 133 133 .mwdma_mask = 0x7, 134 134 .udma_mask = 0x7f, ··· 158 158 159 159 static u32 sis_scr_cfg_read (struct ata_port *ap, unsigned int sc_reg) 160 160 { 161 - struct pci_dev *pdev = to_pci_dev(ap->host_set->dev); 161 + struct pci_dev *pdev = to_pci_dev(ap->host->dev); 162 162 unsigned int cfg_addr = get_scr_cfg_addr(ap->port_no, sc_reg, pdev->device); 163 163 u32 val, val2 = 0; 164 164 u8 pmr; ··· 178 178 179 179 static void sis_scr_cfg_write (struct ata_port *ap, unsigned int scr, u32 val) 180 180 { 181 - struct pci_dev *pdev = to_pci_dev(ap->host_set->dev); 181 + struct pci_dev *pdev = to_pci_dev(ap->host->dev); 182 182 unsigned int cfg_addr = get_scr_cfg_addr(ap->port_no, scr, pdev->device); 183 183 u8 pmr; 184 184 ··· 195 195 196 196 static u32 sis_scr_read (struct ata_port *ap, unsigned int sc_reg) 197 197 { 198 - struct pci_dev *pdev = to_pci_dev(ap->host_set->dev); 198 + struct pci_dev *pdev = to_pci_dev(ap->host->dev); 199 199 u32 val, val2 = 0; 200 200 u8 pmr; 201 201 ··· 217 217 218 218 static void sis_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val) 219 219 { 220 - struct pci_dev *pdev = to_pci_dev(ap->host_set->dev); 220 + struct pci_dev *pdev = to_pci_dev(ap->host->dev); 221 221 u8 pmr; 222 222 223 223 if (sc_reg > SCR_CONTROL) ··· 275 275 /* check and see if the SCRs are in IO space or PCI cfg space */ 276 276 pci_read_config_dword(pdev, SIS_GENCTL, &genctl); 277 277 if ((genctl & GENCTL_IOMAPPED_SCR) == 0) 278 - probe_ent->host_flags |= SIS_FLAG_CFGSCR; 278 + probe_ent->port_flags |= SIS_FLAG_CFGSCR; 279 279 280 280 /* if hardware thinks SCRs are in IO space, but there are 281 281 * no IO resources assigned, change to PCI cfg space. 282 282 */ 283 - if ((!(probe_ent->host_flags & SIS_FLAG_CFGSCR)) && 283 + if ((!(probe_ent->port_flags & SIS_FLAG_CFGSCR)) && 284 284 ((pci_resource_start(pdev, SIS_SCR_PCI_BAR) == 0) || 285 285 (pci_resource_len(pdev, SIS_SCR_PCI_BAR) < 128))) { 286 286 genctl &= ~GENCTL_IOMAPPED_SCR; 287 287 pci_write_config_dword(pdev, SIS_GENCTL, genctl); 288 - probe_ent->host_flags |= SIS_FLAG_CFGSCR; 288 + probe_ent->port_flags |= SIS_FLAG_CFGSCR; 289 289 } 290 290 291 291 pci_read_config_byte(pdev, SIS_PMR, &pmr); ··· 306 306 port2_start = 0x20; 307 307 } 308 308 309 - if (!(probe_ent->host_flags & SIS_FLAG_CFGSCR)) { 309 + if (!(probe_ent->port_flags & SIS_FLAG_CFGSCR)) { 310 310 probe_ent->port[0].scr_addr = 311 311 pci_resource_start(pdev, SIS_SCR_PCI_BAR); 312 312 probe_ent->port[1].scr_addr =
+5 -5
drivers/ata/sata_svw.c
··· 169 169 * @qc: Info associated with this ATA transaction. 170 170 * 171 171 * LOCKING: 172 - * spin_lock_irqsave(host_set lock) 172 + * spin_lock_irqsave(host lock) 173 173 */ 174 174 175 175 static void k2_bmdma_setup_mmio (struct ata_queued_cmd *qc) ··· 199 199 * @qc: Info associated with this ATA transaction. 200 200 * 201 201 * LOCKING: 202 - * spin_lock_irqsave(host_set lock) 202 + * spin_lock_irqsave(host lock) 203 203 */ 204 204 205 205 static void k2_bmdma_start_mmio (struct ata_queued_cmd *qc) ··· 261 261 return 0; 262 262 263 263 /* Find the OF node for the PCI device proper */ 264 - np = pci_device_to_OF_node(to_pci_dev(ap->host_set->dev)); 264 + np = pci_device_to_OF_node(to_pci_dev(ap->host->dev)); 265 265 if (np == NULL) 266 266 return 0; 267 267 268 268 /* Match it to a port node */ 269 - index = (ap == ap->host_set->ports[0]) ? 0 : 1; 269 + index = (ap == ap->host->ports[0]) ? 0 : 1; 270 270 for (np = np->child; np != NULL; np = np->sibling) { 271 271 u32 *reg = (u32 *)get_property(np, "reg", NULL); 272 272 if (!reg) ··· 423 423 writel(0x0, mmio_base + K2_SATA_SIM_OFFSET); 424 424 425 425 probe_ent->sht = &k2_sata_sht; 426 - probe_ent->host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 426 + probe_ent->port_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 427 427 ATA_FLAG_MMIO; 428 428 probe_ent->port_ops = &k2_sata_ops; 429 429 probe_ent->n_ports = 4;
+32 -32
drivers/ata/sata_sx4.c
··· 160 160 static void pdc20621_qc_prep(struct ata_queued_cmd *qc); 161 161 static void pdc_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf); 162 162 static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf); 163 - static void pdc20621_host_stop(struct ata_host_set *host_set); 163 + static void pdc20621_host_stop(struct ata_host *host); 164 164 static unsigned int pdc20621_dimm_init(struct ata_probe_ent *pe); 165 165 static int pdc20621_detect_dimm(struct ata_probe_ent *pe); 166 166 static unsigned int pdc20621_i2c_read(struct ata_probe_ent *pe, ··· 218 218 /* board_20621 */ 219 219 { 220 220 .sht = &pdc_sata_sht, 221 - .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 221 + .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 222 222 ATA_FLAG_SRST | ATA_FLAG_MMIO | 223 223 ATA_FLAG_NO_ATAPI | ATA_FLAG_PIO_POLLING, 224 224 .pio_mask = 0x1f, /* pio0-4 */ ··· 244 244 }; 245 245 246 246 247 - static void pdc20621_host_stop(struct ata_host_set *host_set) 247 + static void pdc20621_host_stop(struct ata_host *host) 248 248 { 249 - struct pci_dev *pdev = to_pci_dev(host_set->dev); 250 - struct pdc_host_priv *hpriv = host_set->private_data; 249 + struct pci_dev *pdev = to_pci_dev(host->dev); 250 + struct pdc_host_priv *hpriv = host->private_data; 251 251 void __iomem *dimm_mmio = hpriv->dimm_mmio; 252 252 253 253 pci_iounmap(pdev, dimm_mmio); 254 254 kfree(hpriv); 255 255 256 - pci_iounmap(pdev, host_set->mmio_base); 256 + pci_iounmap(pdev, host->mmio_base); 257 257 } 258 258 259 259 static int pdc_port_start(struct ata_port *ap) 260 260 { 261 - struct device *dev = ap->host_set->dev; 261 + struct device *dev = ap->host->dev; 262 262 struct pdc_port_priv *pp; 263 263 int rc; 264 264 ··· 293 293 294 294 static void pdc_port_stop(struct ata_port *ap) 295 295 { 296 - struct device *dev = ap->host_set->dev; 296 + struct device *dev = ap->host->dev; 297 297 struct pdc_port_priv *pp = ap->private_data; 298 298 299 299 ap->private_data = NULL; ··· 453 453 struct scatterlist *sg; 454 454 struct ata_port *ap = qc->ap; 455 455 struct pdc_port_priv *pp = ap->private_data; 456 - void __iomem *mmio = ap->host_set->mmio_base; 457 - struct pdc_host_priv *hpriv = ap->host_set->private_data; 456 + void __iomem *mmio = ap->host->mmio_base; 457 + struct pdc_host_priv *hpriv = ap->host->private_data; 458 458 void __iomem *dimm_mmio = hpriv->dimm_mmio; 459 459 unsigned int portno = ap->port_no; 460 460 unsigned int i, idx, total_len = 0, sgt_len; ··· 514 514 { 515 515 struct ata_port *ap = qc->ap; 516 516 struct pdc_port_priv *pp = ap->private_data; 517 - void __iomem *mmio = ap->host_set->mmio_base; 518 - struct pdc_host_priv *hpriv = ap->host_set->private_data; 517 + void __iomem *mmio = ap->host->mmio_base; 518 + struct pdc_host_priv *hpriv = ap->host->private_data; 519 519 void __iomem *dimm_mmio = hpriv->dimm_mmio; 520 520 unsigned int portno = ap->port_no; 521 521 unsigned int i; ··· 565 565 u32 pkt_ofs) 566 566 { 567 567 struct ata_port *ap = qc->ap; 568 - struct ata_host_set *host_set = ap->host_set; 569 - void __iomem *mmio = host_set->mmio_base; 568 + struct ata_host *host = ap->host; 569 + void __iomem *mmio = host->mmio_base; 570 570 571 571 /* hard-code chip #0 */ 572 572 mmio += PDC_CHIP0_OFS; ··· 583 583 u32 pkt_ofs) 584 584 { 585 585 struct ata_port *ap = qc->ap; 586 - struct pdc_host_priv *pp = ap->host_set->private_data; 586 + struct pdc_host_priv *pp = ap->host->private_data; 587 587 unsigned int idx = pp->hdma_prod & PDC_HDMA_Q_MASK; 588 588 589 589 if (!pp->doing_hdma) { ··· 601 601 static void pdc20621_pop_hdma(struct ata_queued_cmd *qc) 602 602 { 603 603 struct ata_port *ap = qc->ap; 604 - struct pdc_host_priv *pp = ap->host_set->private_data; 604 + struct pdc_host_priv *pp = ap->host->private_data; 605 605 unsigned int idx = pp->hdma_cons & PDC_HDMA_Q_MASK; 606 606 607 607 /* if nothing on queue, we're done */ ··· 620 620 { 621 621 struct ata_port *ap = qc->ap; 622 622 unsigned int port_no = ap->port_no; 623 - struct pdc_host_priv *hpriv = ap->host_set->private_data; 623 + struct pdc_host_priv *hpriv = ap->host->private_data; 624 624 void *dimm_mmio = hpriv->dimm_mmio; 625 625 626 626 dimm_mmio += (port_no * PDC_DIMM_WINDOW_STEP); ··· 638 638 static void pdc20621_packet_start(struct ata_queued_cmd *qc) 639 639 { 640 640 struct ata_port *ap = qc->ap; 641 - struct ata_host_set *host_set = ap->host_set; 641 + struct ata_host *host = ap->host; 642 642 unsigned int port_no = ap->port_no; 643 - void __iomem *mmio = host_set->mmio_base; 643 + void __iomem *mmio = host->mmio_base; 644 644 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE); 645 645 u8 seq = (u8) (port_no + 1); 646 646 unsigned int port_ofs; ··· 781 781 782 782 static void pdc20621_irq_clear(struct ata_port *ap) 783 783 { 784 - struct ata_host_set *host_set = ap->host_set; 785 - void __iomem *mmio = host_set->mmio_base; 784 + struct ata_host *host = ap->host; 785 + void __iomem *mmio = host->mmio_base; 786 786 787 787 mmio += PDC_CHIP0_OFS; 788 788 ··· 791 791 792 792 static irqreturn_t pdc20621_interrupt (int irq, void *dev_instance, struct pt_regs *regs) 793 793 { 794 - struct ata_host_set *host_set = dev_instance; 794 + struct ata_host *host = dev_instance; 795 795 struct ata_port *ap; 796 796 u32 mask = 0; 797 797 unsigned int i, tmp, port_no; ··· 800 800 801 801 VPRINTK("ENTER\n"); 802 802 803 - if (!host_set || !host_set->mmio_base) { 803 + if (!host || !host->mmio_base) { 804 804 VPRINTK("QUICK EXIT\n"); 805 805 return IRQ_NONE; 806 806 } 807 807 808 - mmio_base = host_set->mmio_base; 808 + mmio_base = host->mmio_base; 809 809 810 810 /* reading should also clear interrupts */ 811 811 mmio_base += PDC_CHIP0_OFS; ··· 822 822 return IRQ_NONE; 823 823 } 824 824 825 - spin_lock(&host_set->lock); 825 + spin_lock(&host->lock); 826 826 827 827 for (i = 1; i < 9; i++) { 828 828 port_no = i - 1; 829 829 if (port_no > 3) 830 830 port_no -= 4; 831 - if (port_no >= host_set->n_ports) 831 + if (port_no >= host->n_ports) 832 832 ap = NULL; 833 833 else 834 - ap = host_set->ports[port_no]; 834 + ap = host->ports[port_no]; 835 835 tmp = mask & (1 << i); 836 836 VPRINTK("seq %u, port_no %u, ap %p, tmp %x\n", i, port_no, ap, tmp); 837 837 if (tmp && ap && ··· 845 845 } 846 846 } 847 847 848 - spin_unlock(&host_set->lock); 848 + spin_unlock(&host->lock); 849 849 850 850 VPRINTK("mask == 0x%x\n", mask); 851 851 ··· 857 857 static void pdc_eng_timeout(struct ata_port *ap) 858 858 { 859 859 u8 drv_stat; 860 - struct ata_host_set *host_set = ap->host_set; 860 + struct ata_host *host = ap->host; 861 861 struct ata_queued_cmd *qc; 862 862 unsigned long flags; 863 863 864 864 DPRINTK("ENTER\n"); 865 865 866 - spin_lock_irqsave(&host_set->lock, flags); 866 + spin_lock_irqsave(&host->lock, flags); 867 867 868 868 qc = ata_qc_from_tag(ap, ap->active_tag); 869 869 ··· 885 885 break; 886 886 } 887 887 888 - spin_unlock_irqrestore(&host_set->lock, flags); 888 + spin_unlock_irqrestore(&host->lock, flags); 889 889 ata_eh_qc_complete(qc); 890 890 DPRINTK("EXIT\n"); 891 891 } ··· 1429 1429 hpriv->dimm_mmio = dimm_mmio; 1430 1430 1431 1431 probe_ent->sht = pdc_port_info[board_idx].sht; 1432 - probe_ent->host_flags = pdc_port_info[board_idx].host_flags; 1432 + probe_ent->port_flags = pdc_port_info[board_idx].flags; 1433 1433 probe_ent->pio_mask = pdc_port_info[board_idx].pio_mask; 1434 1434 probe_ent->mwdma_mask = pdc_port_info[board_idx].mwdma_mask; 1435 1435 probe_ent->udma_mask = pdc_port_info[board_idx].udma_mask;
+4 -4
drivers/ata/sata_uli.c
··· 128 128 129 129 static struct ata_port_info uli_port_info = { 130 130 .sht = &uli_sht, 131 - .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY, 131 + .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY, 132 132 .pio_mask = 0x1f, /* pio0-4 */ 133 133 .udma_mask = 0x7f, /* udma0-6 */ 134 134 .port_ops = &uli_ops, ··· 143 143 144 144 static unsigned int get_scr_cfg_addr(struct ata_port *ap, unsigned int sc_reg) 145 145 { 146 - struct uli_priv *hpriv = ap->host_set->private_data; 146 + struct uli_priv *hpriv = ap->host->private_data; 147 147 return hpriv->scr_cfg_addr[ap->port_no] + (4 * sc_reg); 148 148 } 149 149 150 150 static u32 uli_scr_cfg_read (struct ata_port *ap, unsigned int sc_reg) 151 151 { 152 - struct pci_dev *pdev = to_pci_dev(ap->host_set->dev); 152 + struct pci_dev *pdev = to_pci_dev(ap->host->dev); 153 153 unsigned int cfg_addr = get_scr_cfg_addr(ap, sc_reg); 154 154 u32 val; 155 155 ··· 159 159 160 160 static void uli_scr_cfg_write (struct ata_port *ap, unsigned int scr, u32 val) 161 161 { 162 - struct pci_dev *pdev = to_pci_dev(ap->host_set->dev); 162 + struct pci_dev *pdev = to_pci_dev(ap->host->dev); 163 163 unsigned int cfg_addr = get_scr_cfg_addr(ap, scr); 164 164 165 165 pci_write_config_dword(pdev, cfg_addr, val);
+2 -2
drivers/ata/sata_via.c
··· 176 176 177 177 static struct ata_port_info vt6420_port_info = { 178 178 .sht = &svia_sht, 179 - .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY, 179 + .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY, 180 180 .pio_mask = 0x1f, 181 181 .mwdma_mask = 0x07, 182 182 .udma_mask = 0x7f, ··· 346 346 INIT_LIST_HEAD(&probe_ent->node); 347 347 348 348 probe_ent->sht = &svia_sht; 349 - probe_ent->host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY; 349 + probe_ent->port_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY; 350 350 probe_ent->port_ops = &vt6421_sata_ops; 351 351 probe_ent->n_ports = N_PORTS; 352 352 probe_ent->irq = pdev->irq;
+8 -8
drivers/ata/sata_vsc.c
··· 123 123 void __iomem *mask_addr; 124 124 u8 mask; 125 125 126 - mask_addr = ap->host_set->mmio_base + 126 + mask_addr = ap->host->mmio_base + 127 127 VSC_SATA_INT_MASK_OFFSET + ap->port_no; 128 128 mask = readb(mask_addr); 129 129 if (ctl & ATA_NIEN) ··· 206 206 static irqreturn_t vsc_sata_interrupt (int irq, void *dev_instance, 207 207 struct pt_regs *regs) 208 208 { 209 - struct ata_host_set *host_set = dev_instance; 209 + struct ata_host *host = dev_instance; 210 210 unsigned int i; 211 211 unsigned int handled = 0; 212 212 u32 int_status; 213 213 214 - spin_lock(&host_set->lock); 214 + spin_lock(&host->lock); 215 215 216 - int_status = readl(host_set->mmio_base + VSC_SATA_INT_STAT_OFFSET); 216 + int_status = readl(host->mmio_base + VSC_SATA_INT_STAT_OFFSET); 217 217 218 - for (i = 0; i < host_set->n_ports; i++) { 218 + for (i = 0; i < host->n_ports; i++) { 219 219 if (int_status & ((u32) 0xFF << (8 * i))) { 220 220 struct ata_port *ap; 221 221 222 - ap = host_set->ports[i]; 222 + ap = host->ports[i]; 223 223 224 224 if (is_vsc_sata_int_err(i, int_status)) { 225 225 u32 err_status; ··· 259 259 } 260 260 } 261 261 262 - spin_unlock(&host_set->lock); 262 + spin_unlock(&host->lock); 263 263 264 264 return IRQ_RETVAL(handled); 265 265 } ··· 395 395 pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 0x80); 396 396 397 397 probe_ent->sht = &vsc_sata_sht; 398 - probe_ent->host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 398 + probe_ent->port_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 399 399 ATA_FLAG_MMIO; 400 400 probe_ent->port_ops = &vsc_sata_ops; 401 401 probe_ent->n_ports = 4;
+19 -20
include/linux/libata.h
··· 197 197 ATA_QCFLAG_EH_SCHEDULED = (1 << 18), /* EH scheduled (obsolete) */ 198 198 199 199 /* host set flags */ 200 - ATA_HOST_SIMPLEX = (1 << 0), /* Host is simplex, one DMA channel per host_set only */ 200 + ATA_HOST_SIMPLEX = (1 << 0), /* Host is simplex, one DMA channel per host only */ 201 201 202 202 /* various lengths of time */ 203 203 ATA_TMOUT_BOOT = 30 * HZ, /* heuristic */ ··· 357 357 unsigned long irq; 358 358 unsigned long irq2; 359 359 unsigned int irq_flags; 360 - unsigned long host_flags; 361 - unsigned long host_set_flags; 360 + unsigned long port_flags; 361 + unsigned long _host_flags; 362 362 void __iomem *mmio_base; 363 363 void *private_data; 364 364 }; 365 365 366 - struct ata_host_set { 366 + struct ata_host { 367 367 spinlock_t lock; 368 368 struct device *dev; 369 369 unsigned long irq; ··· 420 420 void *private_data; 421 421 }; 422 422 423 - struct ata_host_stats { 423 + struct ata_port_stats { 424 424 unsigned long unhandled_irq; 425 425 unsigned long idle_irq; 426 426 unsigned long rw_reqbuf; ··· 498 498 }; 499 499 500 500 struct ata_port { 501 - struct Scsi_Host *host; /* our co-allocated scsi host */ 501 + struct Scsi_Host *scsi_host; /* our co-allocated scsi host */ 502 502 const struct ata_port_operations *ops; 503 503 spinlock_t *lock; 504 504 unsigned long flags; /* ATA_FLAG_xxx */ ··· 523 523 unsigned int hw_sata_spd_limit; 524 524 unsigned int sata_spd_limit; /* SATA PHY speed limit */ 525 525 526 - /* record runtime error info, protected by host_set lock */ 526 + /* record runtime error info, protected by host lock */ 527 527 struct ata_eh_info eh_info; 528 528 /* EH context owned by EH */ 529 529 struct ata_eh_context eh_context; ··· 537 537 unsigned int active_tag; 538 538 u32 sactive; 539 539 540 - struct ata_host_stats stats; 541 - struct ata_host_set *host_set; 540 + struct ata_port_stats stats; 541 + struct ata_host *host; 542 542 struct device *dev; 543 543 544 544 struct work_struct port_task; ··· 614 614 int (*port_start) (struct ata_port *ap); 615 615 void (*port_stop) (struct ata_port *ap); 616 616 617 - void (*host_stop) (struct ata_host_set *host_set); 617 + void (*host_stop) (struct ata_host *host); 618 618 619 619 void (*bmdma_stop) (struct ata_queued_cmd *qc); 620 620 u8 (*bmdma_status) (struct ata_port *ap); ··· 622 622 623 623 struct ata_port_info { 624 624 struct scsi_host_template *sht; 625 - unsigned long host_flags; 625 + unsigned long flags; 626 626 unsigned long pio_mask; 627 627 unsigned long mwdma_mask; 628 628 unsigned long udma_mask; ··· 690 690 #endif /* CONFIG_PCI */ 691 691 extern int ata_device_add(const struct ata_probe_ent *ent); 692 692 extern void ata_port_detach(struct ata_port *ap); 693 - extern void ata_host_set_init(struct ata_host_set *, struct device *, 694 - unsigned long, const struct ata_port_operations *); 695 - extern void ata_host_set_remove(struct ata_host_set *host_set); 693 + extern void ata_host_init(struct ata_host *, struct device *, 694 + unsigned long, const struct ata_port_operations *); 695 + extern void ata_host_remove(struct ata_host *host); 696 696 extern int ata_scsi_detect(struct scsi_host_template *sht); 697 697 extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg); 698 698 extern int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)); 699 699 extern int ata_scsi_release(struct Scsi_Host *host); 700 700 extern void ata_sas_port_destroy(struct ata_port *); 701 - extern struct ata_port *ata_sas_port_alloc(struct ata_host_set *, 701 + extern struct ata_port *ata_sas_port_alloc(struct ata_host *, 702 702 struct ata_port_info *, struct Scsi_Host *); 703 703 extern int ata_sas_port_init(struct ata_port *); 704 704 extern int ata_sas_port_start(struct ata_port *ap); ··· 715 715 extern int ata_port_offline(struct ata_port *ap); 716 716 extern int ata_scsi_device_resume(struct scsi_device *); 717 717 extern int ata_scsi_device_suspend(struct scsi_device *, pm_message_t mesg); 718 - extern int ata_host_set_suspend(struct ata_host_set *host_set, 719 - pm_message_t mesg); 720 - extern void ata_host_set_resume(struct ata_host_set *host_set); 718 + extern int ata_host_suspend(struct ata_host *host, pm_message_t mesg); 719 + extern void ata_host_resume(struct ata_host *host); 721 720 extern int ata_ratelimit(void); 722 721 extern unsigned int ata_busy_sleep(struct ata_port *ap, 723 722 unsigned long timeout_pat, ··· 741 742 extern void ata_exec_command(struct ata_port *ap, const struct ata_taskfile *tf); 742 743 extern int ata_port_start (struct ata_port *ap); 743 744 extern void ata_port_stop (struct ata_port *ap); 744 - extern void ata_host_stop (struct ata_host_set *host_set); 745 + extern void ata_host_stop (struct ata_host *host); 745 746 extern irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs); 746 747 extern void ata_mmio_data_xfer(struct ata_device *adev, unsigned char *buf, 747 748 unsigned int buflen, int write_data); ··· 827 828 unsigned long val; 828 829 }; 829 830 830 - extern void ata_pci_host_stop (struct ata_host_set *host_set); 831 + extern void ata_pci_host_stop (struct ata_host *host); 831 832 extern struct ata_probe_ent * 832 833 ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port, int portmask); 833 834 extern int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits);