Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge git://git.kernel.org/pub/scm/linux/kernel/git/bart/ide-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/bart/ide-2.6: (53 commits)
ide: use try_to_identify() in ide_driveid_update()
ide: clear drive IRQ after re-enabling local IRQs in ide_driveid_update()
ide: sanitize SELECT_MASK() usage in ide_driveid_update()
ide: classify device type in do_probe()
ide: remove broken EXABYTENEST support
ide: shorten timeout value in ide_driveid_update()
ide: propagate AltStatus workarounds to ide_driveid_update()
ide: fix kmalloc() failure handling in ide_driveid_update()
mn10300: remove <asm/ide.h>
frv: remove <asm/ide.h>
ide: remove pciirq argument from ide_pci_setup_ports()
ide: fix ->init_chipset method to return 'int' value
ide: remove try_to_identify() wrapper
ide: remove no longer needed IRQ auto-probing from try_to_identify() (v2)
ide: remove no longer needed IRQ fallback code from hwif_init()
amd74xx: remove no longer needed ->init_hwif method
ide: remove no longer needed IDE_HFLAG[_FORCE]_LEGACY_IRQS
ide: use ide_pci_is_in_compatibility_mode() in ide_pci_init_{one,two}()
ide: use pci_get_legacy_ide_irq() in ide_pci_init_{one,two}()
ide: handle IDE_HFLAG[_FORCE]_LEGACY_IRQS in ide_pci_init_{one,two}()
...

+1573 -1958
+8
drivers/ide/Kconfig
··· 56 56 57 57 comment "Please see Documentation/ide/ide.txt for help/info on IDE drives" 58 58 59 + config IDE_XFER_MODE 60 + bool 61 + 59 62 config IDE_TIMINGS 60 63 bool 64 + select IDE_XFER_MODE 61 65 62 66 config IDE_ATAPI 63 67 bool ··· 702 698 config BLK_DEV_IDE_AU1XXX 703 699 bool "IDE for AMD Alchemy Au1200" 704 700 depends on SOC_AU1200 701 + select IDE_XFER_MODE 705 702 choice 706 703 prompt "IDE Mode for AMD Alchemy Au1200" 707 704 default CONFIG_BLK_DEV_IDE_AU1XXX_PIO_DBDMA ··· 876 871 877 872 config BLK_DEV_DTC2278 878 873 tristate "DTC-2278 support" 874 + select IDE_XFER_MODE 879 875 select IDE_LEGACY 880 876 help 881 877 This driver is enabled at runtime using the "dtc2278.probe" kernel ··· 908 902 909 903 config BLK_DEV_UMC8672 910 904 tristate "UMC-8672 support" 905 + select IDE_XFER_MODE 911 906 select IDE_LEGACY 912 907 help 913 908 This driver is enabled at runtime using the "umc8672.probe" kernel ··· 922 915 config BLK_DEV_IDEDMA 923 916 def_bool BLK_DEV_IDEDMA_SFF || \ 924 917 BLK_DEV_IDEDMA_ICS || BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA 918 + select IDE_XFER_MODE 925 919 926 920 endif # IDE
+3 -1
drivers/ide/Makefile
··· 5 5 EXTRA_CFLAGS += -Idrivers/ide 6 6 7 7 ide-core-y += ide.o ide-ioctls.o ide-io.o ide-iops.o ide-lib.o ide-probe.o \ 8 - ide-taskfile.o ide-pm.o ide-park.o ide-pio-blacklist.o ide-sysfs.o 8 + ide-taskfile.o ide-pm.o ide-park.o ide-sysfs.o ide-devsets.o \ 9 + ide-io-std.o ide-eh.o 9 10 10 11 # core IDE code 12 + ide-core-$(CONFIG_IDE_XFER_MODE) += ide-pio-blacklist.o ide-xfer-mode.o 11 13 ide-core-$(CONFIG_IDE_TIMINGS) += ide-timings.o 12 14 ide-core-$(CONFIG_IDE_ATAPI) += ide-atapi.o 13 15 ide-core-$(CONFIG_BLK_DEV_IDEPCI) += setup-pci.o
+2 -2
drivers/ide/aec62xx.c
··· 139 139 drive->hwif->port_ops->set_dma_mode(drive, pio + XFER_PIO_0); 140 140 } 141 141 142 - static unsigned int init_chipset_aec62xx(struct pci_dev *dev) 142 + static int init_chipset_aec62xx(struct pci_dev *dev) 143 143 { 144 144 /* These are necessary to get AEC6280 Macintosh cards to work */ 145 145 if ((dev->device == PCI_DEVICE_ID_ARTOP_ATP865) || ··· 156 156 pci_write_config_byte(dev, 0x4a, reg4ah | 0x80); 157 157 } 158 158 159 - return dev->irq; 159 + return 0; 160 160 } 161 161 162 162 static u8 atp86x_cable_detect(ide_hwif_t *hwif)
+1 -1
drivers/ide/alim15x3.c
··· 212 212 * appropriate also sets up the 1533 southbridge. 213 213 */ 214 214 215 - static unsigned int init_chipset_ali15x3(struct pci_dev *dev) 215 + static int init_chipset_ali15x3(struct pci_dev *dev) 216 216 { 217 217 unsigned long flags; 218 218 u8 tmpbyte;
+2 -12
drivers/ide/amd74xx.c
··· 140 140 * The initialization callback. Initialize drive independent registers. 141 141 */ 142 142 143 - static unsigned int init_chipset_amd74xx(struct pci_dev *dev) 143 + static int init_chipset_amd74xx(struct pci_dev *dev) 144 144 { 145 145 u8 t = 0, offset = amd_offset(dev); 146 146 ··· 172 172 t |= 0xf0; 173 173 pci_write_config_byte(dev, AMD_IDE_CONFIG + offset, t); 174 174 175 - return dev->irq; 175 + return 0; 176 176 } 177 177 178 178 static u8 amd_cable_detect(ide_hwif_t *hwif) ··· 181 181 return ATA_CBL_PATA80; 182 182 else 183 183 return ATA_CBL_PATA40; 184 - } 185 - 186 - static void __devinit init_hwif_amd74xx(ide_hwif_t *hwif) 187 - { 188 - struct pci_dev *dev = to_pci_dev(hwif->dev); 189 - 190 - if (hwif->irq == 0) /* 0 is bogus but will do for now */ 191 - hwif->irq = pci_get_legacy_ide_irq(dev, hwif->channel); 192 184 } 193 185 194 186 static const struct ide_port_ops amd_port_ops = { ··· 199 207 { \ 200 208 .name = DRV_NAME, \ 201 209 .init_chipset = init_chipset_amd74xx, \ 202 - .init_hwif = init_hwif_amd74xx, \ 203 210 .enablebits = {{0x40,0x02,0x02}, {0x40,0x01,0x01}}, \ 204 211 .port_ops = &amd_port_ops, \ 205 212 .host_flags = IDE_HFLAGS_AMD, \ ··· 212 221 { \ 213 222 .name = DRV_NAME, \ 214 223 .init_chipset = init_chipset_amd74xx, \ 215 - .init_hwif = init_hwif_amd74xx, \ 216 224 .enablebits = {{0x50,0x02,0x02}, {0x50,0x01,0x01}}, \ 217 225 .port_ops = &amd_port_ops, \ 218 226 .host_flags = IDE_HFLAGS_AMD, \
+1 -2
drivers/ide/atiixp.c
··· 142 142 .name = DRV_NAME, 143 143 .enablebits = {{0x48,0x01,0x00}, {0x48,0x08,0x00}}, 144 144 .port_ops = &atiixp_port_ops, 145 - .host_flags = IDE_HFLAG_LEGACY_IRQS, 146 145 .pio_mask = ATA_PIO4, 147 146 .mwdma_mask = ATA_MWDMA2, 148 147 .udma_mask = ATA_UDMA5, ··· 150 151 .name = DRV_NAME, 151 152 .enablebits = {{0x48,0x01,0x00}, {0x00,0x00,0x00}}, 152 153 .port_ops = &atiixp_port_ops, 153 - .host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_LEGACY_IRQS, 154 + .host_flags = IDE_HFLAG_SINGLE, 154 155 .pio_mask = ATA_PIO4, 155 156 .mwdma_mask = ATA_MWDMA2, 156 157 .udma_mask = ATA_UDMA5,
+1 -1
drivers/ide/cmd64x.c
··· 333 333 return (dma_stat & 7) != 4; 334 334 } 335 335 336 - static unsigned int init_chipset_cmd64x(struct pci_dev *dev) 336 + static int init_chipset_cmd64x(struct pci_dev *dev) 337 337 { 338 338 u8 mrdmode = 0; 339 339
+2 -1
drivers/ide/cs5520.c
··· 133 133 * do all the device setup for us 134 134 */ 135 135 136 - ide_pci_setup_ports(dev, d, 14, &hw[0], &hws[0]); 136 + ide_pci_setup_ports(dev, d, &hw[0], &hws[0]); 137 + hw[0].irq = 14; 137 138 138 139 return ide_host_add(d, hws, NULL); 139 140 }
+1 -1
drivers/ide/cs5530.c
··· 135 135 * Initialize the cs5530 bridge for reliable IDE DMA operation. 136 136 */ 137 137 138 - static unsigned int init_chipset_cs5530(struct pci_dev *dev) 138 + static int init_chipset_cs5530(struct pci_dev *dev) 139 139 { 140 140 struct pci_dev *master_0 = NULL, *cs5530_0 = NULL; 141 141
+1 -1
drivers/ide/delkin_cb.c
··· 46 46 .quirkproc = ide_undecoded_slave, 47 47 }; 48 48 49 - static unsigned int delkin_cb_init_chipset(struct pci_dev *dev) 49 + static int delkin_cb_init_chipset(struct pci_dev *dev) 50 50 { 51 51 unsigned long base = pci_resource_start(dev, 0); 52 52 int i;
+2 -2
drivers/ide/hpt366.c
··· 995 995 pci_write_config_byte(dev, mcr_addr + 1, new_mcr); 996 996 } 997 997 998 - static unsigned int init_chipset_hpt366(struct pci_dev *dev) 998 + static int init_chipset_hpt366(struct pci_dev *dev) 999 999 { 1000 1000 unsigned long io_base = pci_resource_start(dev, 4); 1001 1001 struct hpt_info *info = hpt3xx_get_info(&dev->dev); ··· 1237 1237 hpt3xx_disable_fast_irq(dev, 0x50); 1238 1238 hpt3xx_disable_fast_irq(dev, 0x54); 1239 1239 1240 - return dev->irq; 1240 + return 0; 1241 1241 } 1242 1242 1243 1243 static u8 hpt3xx_cable_detect(ide_hwif_t *hwif)
+52 -162
drivers/ide/ide-acpi.c
··· 20 20 #include <acpi/acpi_bus.h> 21 21 22 22 #define REGS_PER_GTF 7 23 - struct taskfile_array { 24 - u8 tfa[REGS_PER_GTF]; /* regs. 0x1f1 - 0x1f7 */ 25 - }; 26 23 27 24 struct GTM_buffer { 28 25 u32 PIO_speed0; ··· 86 89 { } /* terminate list */ 87 90 }; 88 91 89 - static int ide_acpi_blacklist(void) 92 + int ide_acpi_init(void) 90 93 { 91 - static int done; 92 - if (done) 93 - return 0; 94 - done = 1; 95 94 dmi_check_system(ide_acpi_dmi_table); 96 95 return 0; 97 96 } ··· 195 202 } 196 203 197 204 /** 198 - * ide_acpi_drive_get_handle - Get ACPI object handle for a given drive 199 - * @drive: device to locate 200 - * 201 - * Retrieves the object handle of a given drive. According to the ACPI 202 - * spec the drive is a child of the hwif. 203 - * 204 - * Returns handle on success, 0 on error. 205 - */ 206 - static acpi_handle ide_acpi_drive_get_handle(ide_drive_t *drive) 207 - { 208 - ide_hwif_t *hwif = drive->hwif; 209 - int port; 210 - acpi_handle drive_handle; 211 - 212 - if (!hwif->acpidata) 213 - return NULL; 214 - 215 - if (!hwif->acpidata->obj_handle) 216 - return NULL; 217 - 218 - port = hwif->channel ? drive->dn - 2: drive->dn; 219 - 220 - DEBPRINT("ENTER: %s at channel#: %d port#: %d\n", 221 - drive->name, hwif->channel, port); 222 - 223 - 224 - /* TBD: could also check ACPI object VALID bits */ 225 - drive_handle = acpi_get_child(hwif->acpidata->obj_handle, port); 226 - DEBPRINT("drive %s handle 0x%p\n", drive->name, drive_handle); 227 - 228 - return drive_handle; 229 - } 230 - 231 - /** 232 205 * do_drive_get_GTF - get the drive bootup default taskfile settings 233 206 * @drive: the drive for which the taskfile settings should be retrieved 234 207 * @gtf_length: number of bytes of _GTF data returned at @gtf_address ··· 216 257 acpi_status status; 217 258 struct acpi_buffer output; 218 259 union acpi_object *out_obj; 219 - ide_hwif_t *hwif = drive->hwif; 220 - struct device *dev = hwif->gendev.parent; 221 260 int err = -ENODEV; 222 - int port; 223 261 224 262 *gtf_length = 0; 225 263 *gtf_address = 0UL; 226 264 *obj_loc = 0UL; 227 265 228 - if (ide_noacpi) 229 - return 0; 230 - 231 - if (!dev) { 232 - DEBPRINT("no PCI device for %s\n", hwif->name); 233 - goto out; 234 - } 235 - 236 - if (!hwif->acpidata) { 237 - DEBPRINT("no ACPI data for %s\n", hwif->name); 238 - goto out; 239 - } 240 - 241 - port = hwif->channel ? drive->dn - 2: drive->dn; 242 - 243 - DEBPRINT("ENTER: %s at %s, port#: %d, hard_port#: %d\n", 244 - hwif->name, dev_name(dev), port, hwif->channel); 245 - 246 - if ((drive->dev_flags & IDE_DFLAG_PRESENT) == 0) { 247 - DEBPRINT("%s drive %d:%d not present\n", 248 - hwif->name, hwif->channel, port); 249 - goto out; 250 - } 251 - 252 - /* Get this drive's _ADR info. if not already known. */ 253 266 if (!drive->acpidata->obj_handle) { 254 - drive->acpidata->obj_handle = ide_acpi_drive_get_handle(drive); 255 - if (!drive->acpidata->obj_handle) { 256 - DEBPRINT("No ACPI object found for %s\n", 257 - drive->name); 258 - goto out; 259 - } 267 + DEBPRINT("No ACPI object found for %s\n", drive->name); 268 + goto out; 260 269 } 261 270 262 271 /* Setting up output buffer */ ··· 282 355 } 283 356 284 357 /** 285 - * taskfile_load_raw - send taskfile registers to drive 286 - * @drive: drive to which output is sent 287 - * @gtf: raw ATA taskfile register set (0x1f1 - 0x1f7) 288 - * 289 - * Outputs IDE taskfile to the drive. 290 - */ 291 - static int taskfile_load_raw(ide_drive_t *drive, 292 - const struct taskfile_array *gtf) 293 - { 294 - ide_task_t args; 295 - int err = 0; 296 - 297 - DEBPRINT("(0x1f1-1f7): hex: " 298 - "%02x %02x %02x %02x %02x %02x %02x\n", 299 - gtf->tfa[0], gtf->tfa[1], gtf->tfa[2], 300 - gtf->tfa[3], gtf->tfa[4], gtf->tfa[5], gtf->tfa[6]); 301 - 302 - memset(&args, 0, sizeof(ide_task_t)); 303 - 304 - /* convert gtf to IDE Taskfile */ 305 - memcpy(&args.tf_array[7], &gtf->tfa, 7); 306 - args.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE; 307 - 308 - if (!ide_acpigtf) { 309 - DEBPRINT("_GTF execution disabled\n"); 310 - return err; 311 - } 312 - 313 - err = ide_no_data_taskfile(drive, &args); 314 - if (err) 315 - printk(KERN_ERR "%s: ide_no_data_taskfile failed: %u\n", 316 - __func__, err); 317 - 318 - return err; 319 - } 320 - 321 - /** 322 358 * do_drive_set_taskfiles - write the drive taskfile settings from _GTF 323 359 * @drive: the drive to which the taskfile command should be sent 324 360 * @gtf_length: total number of bytes of _GTF taskfiles ··· 294 404 unsigned int gtf_length, 295 405 unsigned long gtf_address) 296 406 { 297 - int rc = -ENODEV, err; 407 + int rc = 0, err; 298 408 int gtf_count = gtf_length / REGS_PER_GTF; 299 409 int ix; 300 - struct taskfile_array *gtf; 301 - 302 - if (ide_noacpi) 303 - return 0; 304 - 305 - DEBPRINT("ENTER: %s, hard_port#: %d\n", drive->name, drive->dn); 306 - 307 - if ((drive->dev_flags & IDE_DFLAG_PRESENT) == 0) 308 - goto out; 309 - 310 - if (!gtf_count) /* shouldn't be here */ 311 - goto out; 312 410 313 411 DEBPRINT("total GTF bytes=%u (0x%x), gtf_count=%d, addr=0x%lx\n", 314 412 gtf_length, gtf_length, gtf_count, gtf_address); 315 413 316 - if (gtf_length % REGS_PER_GTF) { 317 - printk(KERN_ERR "%s: unexpected GTF length (%d)\n", 318 - __func__, gtf_length); 319 - goto out; 320 - } 321 - 322 - rc = 0; 414 + /* send all taskfile registers (0x1f1-0x1f7) *in*that*order* */ 323 415 for (ix = 0; ix < gtf_count; ix++) { 324 - gtf = (struct taskfile_array *) 325 - (gtf_address + ix * REGS_PER_GTF); 416 + u8 *gtf = (u8 *)(gtf_address + ix * REGS_PER_GTF); 417 + ide_task_t task; 326 418 327 - /* send all TaskFile registers (0x1f1-0x1f7) *in*that*order* */ 328 - err = taskfile_load_raw(drive, gtf); 329 - if (err) 419 + DEBPRINT("(0x1f1-1f7): " 420 + "hex: %02x %02x %02x %02x %02x %02x %02x\n", 421 + gtf[0], gtf[1], gtf[2], 422 + gtf[3], gtf[4], gtf[5], gtf[6]); 423 + 424 + if (!ide_acpigtf) { 425 + DEBPRINT("_GTF execution disabled\n"); 426 + continue; 427 + } 428 + 429 + /* convert GTF to taskfile */ 430 + memset(&task, 0, sizeof(ide_task_t)); 431 + memcpy(&task.tf_array[7], gtf, REGS_PER_GTF); 432 + task.tf_flags = IDE_TFLAG_TF | IDE_TFLAG_DEVICE; 433 + 434 + err = ide_no_data_taskfile(drive, &task); 435 + if (err) { 436 + printk(KERN_ERR "%s: ide_no_data_taskfile failed: %u\n", 437 + __func__, err); 330 438 rc = err; 439 + } 331 440 } 332 441 333 - out: 334 442 return rc; 335 443 } 336 444 ··· 535 647 DEBPRINT("no ACPI data for %s\n", hwif->name); 536 648 return; 537 649 } 650 + 538 651 /* channel first and then drives for power on and verse versa for power off */ 539 652 if (on) 540 653 acpi_bus_set_power(hwif->acpidata->obj_handle, ACPI_STATE_D0); 541 654 542 - ide_port_for_each_dev(i, drive, hwif) { 543 - if (!drive->acpidata->obj_handle) 544 - drive->acpidata->obj_handle = ide_acpi_drive_get_handle(drive); 545 - 546 - if (drive->acpidata->obj_handle && 547 - (drive->dev_flags & IDE_DFLAG_PRESENT)) { 655 + ide_port_for_each_present_dev(i, drive, hwif) { 656 + if (drive->acpidata->obj_handle) 548 657 acpi_bus_set_power(drive->acpidata->obj_handle, 549 - on? ACPI_STATE_D0: ACPI_STATE_D3); 550 - } 658 + on ? ACPI_STATE_D0 : ACPI_STATE_D3); 551 659 } 660 + 552 661 if (!on) 553 662 acpi_bus_set_power(hwif->acpidata->obj_handle, ACPI_STATE_D3); 554 663 } 555 664 556 665 /** 557 - * ide_acpi_init - initialize the ACPI link for an IDE interface 666 + * ide_acpi_init_port - initialize the ACPI link for an IDE interface 558 667 * @hwif: target IDE interface (channel) 559 668 * 560 669 * The ACPI spec is not quite clear when the drive identify buffer ··· 561 676 * So we get the information during startup; but this means that 562 677 * any changes during run-time will be lost after resume. 563 678 */ 564 - void ide_acpi_init(ide_hwif_t *hwif) 679 + void ide_acpi_init_port(ide_hwif_t *hwif) 565 680 { 566 - ide_acpi_blacklist(); 567 - 568 681 hwif->acpidata = kzalloc(sizeof(struct ide_acpi_hwif_link), GFP_KERNEL); 569 682 if (!hwif->acpidata) 570 683 return; ··· 591 708 hwif->devices[0]->acpidata = &hwif->acpidata->master; 592 709 hwif->devices[1]->acpidata = &hwif->acpidata->slave; 593 710 594 - /* 595 - * Send IDENTIFY for each drive 596 - */ 597 - ide_port_for_each_dev(i, drive, hwif) { 598 - memset(drive->acpidata, 0, sizeof(*drive->acpidata)); 711 + /* get _ADR info for each device */ 712 + ide_port_for_each_present_dev(i, drive, hwif) { 713 + acpi_handle dev_handle; 599 714 600 - if ((drive->dev_flags & IDE_DFLAG_PRESENT) == 0) 601 - continue; 715 + DEBPRINT("ENTER: %s at channel#: %d port#: %d\n", 716 + drive->name, hwif->channel, drive->dn & 1); 602 717 718 + /* TBD: could also check ACPI object VALID bits */ 719 + dev_handle = acpi_get_child(hwif->acpidata->obj_handle, 720 + drive->dn & 1); 721 + 722 + DEBPRINT("drive %s handle 0x%p\n", drive->name, dev_handle); 723 + 724 + drive->acpidata->obj_handle = dev_handle; 725 + } 726 + 727 + /* send IDENTIFY for each device */ 728 + ide_port_for_each_present_dev(i, drive, hwif) { 603 729 err = taskfile_lib_get_identify(drive, drive->acpidata->idbuff); 604 730 if (err) 605 731 DEBPRINT("identify device %s failed (%d)\n", ··· 628 736 ide_acpi_get_timing(hwif); 629 737 ide_acpi_push_timing(hwif); 630 738 631 - ide_port_for_each_dev(i, drive, hwif) { 632 - if (drive->dev_flags & IDE_DFLAG_PRESENT) 633 - /* Execute ACPI startup code */ 634 - ide_acpi_exec_tfs(drive); 739 + ide_port_for_each_present_dev(i, drive, hwif) { 740 + ide_acpi_exec_tfs(drive); 635 741 } 636 742 }
+39 -2
drivers/ide/ide-atapi.c
··· 149 149 memcpy(rq->cmd, pc->c, 12); 150 150 if (drive->media == ide_tape) 151 151 rq->cmd[13] = REQ_IDETAPE_PC1; 152 - ide_do_drive_cmd(drive, rq); 152 + 153 + drive->hwif->rq = NULL; 154 + 155 + elv_add_request(drive->queue, rq, ELEVATOR_INSERT_FRONT, 0); 153 156 } 154 157 155 158 /* ··· 299 296 return 0; 300 297 } 301 298 EXPORT_SYMBOL_GPL(ide_cd_get_xferlen); 299 + 300 + void ide_read_bcount_and_ireason(ide_drive_t *drive, u16 *bcount, u8 *ireason) 301 + { 302 + ide_task_t task; 303 + 304 + memset(&task, 0, sizeof(task)); 305 + task.tf_flags = IDE_TFLAG_IN_LBAH | IDE_TFLAG_IN_LBAM | 306 + IDE_TFLAG_IN_NSECT; 307 + 308 + drive->hwif->tp_ops->tf_read(drive, &task); 309 + 310 + *bcount = (task.tf.lbah << 8) | task.tf.lbam; 311 + *ireason = task.tf.nsect & 3; 312 + } 313 + EXPORT_SYMBOL_GPL(ide_read_bcount_and_ireason); 302 314 303 315 /* 304 316 * This is the usual interrupt handler which will be called during a packet ··· 472 454 /* And set the interrupt handler again */ 473 455 ide_set_handler(drive, ide_pc_intr, timeout, NULL); 474 456 return ide_started; 457 + } 458 + 459 + static void ide_pktcmd_tf_load(ide_drive_t *drive, u32 tf_flags, u16 bcount) 460 + { 461 + ide_hwif_t *hwif = drive->hwif; 462 + ide_task_t task; 463 + u8 dma = drive->dma; 464 + 465 + memset(&task, 0, sizeof(task)); 466 + task.tf_flags = IDE_TFLAG_OUT_LBAH | IDE_TFLAG_OUT_LBAM | 467 + IDE_TFLAG_OUT_FEATURE | tf_flags; 468 + task.tf.feature = dma; /* Use PIO/DMA */ 469 + task.tf.lbam = bcount & 0xff; 470 + task.tf.lbah = (bcount >> 8) & 0xff; 471 + 472 + ide_tf_dump(drive->name, &task.tf); 473 + hwif->tp_ops->set_irq(hwif, 1); 474 + SELECT_MASK(drive, 0); 475 + hwif->tp_ops->tf_load(drive, &task); 475 476 } 476 477 477 478 static u8 ide_read_ireason(ide_drive_t *drive) ··· 666 629 : WAIT_TAPE_CMD; 667 630 } 668 631 669 - ide_pktcmd_tf_load(drive, tf_flags, bcount, drive->dma); 632 + ide_pktcmd_tf_load(drive, tf_flags, bcount); 670 633 671 634 /* Issue the packet command */ 672 635 if (drive->atapi_flags & IDE_AFLAG_DRQ_INTERRUPT) {
+3 -1
drivers/ide/ide-cd.c
··· 242 242 ide_debug_log(IDE_DBG_SENSE, "failed_cmd: 0x%x\n", 243 243 failed_command->cmd[0]); 244 244 245 - ide_do_drive_cmd(drive, rq); 245 + drive->hwif->rq = NULL; 246 + 247 + elv_add_request(drive->queue, rq, ELEVATOR_INSERT_FRONT, 0); 246 248 } 247 249 248 250 static void cdrom_end_request(ide_drive_t *drive, int uptodate)
+190
drivers/ide/ide-devsets.c
··· 1 + 2 + #include <linux/kernel.h> 3 + #include <linux/ide.h> 4 + 5 + DEFINE_MUTEX(ide_setting_mtx); 6 + 7 + ide_devset_get(io_32bit, io_32bit); 8 + 9 + static int set_io_32bit(ide_drive_t *drive, int arg) 10 + { 11 + if (drive->dev_flags & IDE_DFLAG_NO_IO_32BIT) 12 + return -EPERM; 13 + 14 + if (arg < 0 || arg > 1 + (SUPPORT_VLB_SYNC << 1)) 15 + return -EINVAL; 16 + 17 + drive->io_32bit = arg; 18 + 19 + return 0; 20 + } 21 + 22 + ide_devset_get_flag(ksettings, IDE_DFLAG_KEEP_SETTINGS); 23 + 24 + static int set_ksettings(ide_drive_t *drive, int arg) 25 + { 26 + if (arg < 0 || arg > 1) 27 + return -EINVAL; 28 + 29 + if (arg) 30 + drive->dev_flags |= IDE_DFLAG_KEEP_SETTINGS; 31 + else 32 + drive->dev_flags &= ~IDE_DFLAG_KEEP_SETTINGS; 33 + 34 + return 0; 35 + } 36 + 37 + ide_devset_get_flag(using_dma, IDE_DFLAG_USING_DMA); 38 + 39 + static int set_using_dma(ide_drive_t *drive, int arg) 40 + { 41 + #ifdef CONFIG_BLK_DEV_IDEDMA 42 + int err = -EPERM; 43 + 44 + if (arg < 0 || arg > 1) 45 + return -EINVAL; 46 + 47 + if (ata_id_has_dma(drive->id) == 0) 48 + goto out; 49 + 50 + if (drive->hwif->dma_ops == NULL) 51 + goto out; 52 + 53 + err = 0; 54 + 55 + if (arg) { 56 + if (ide_set_dma(drive)) 57 + err = -EIO; 58 + } else 59 + ide_dma_off(drive); 60 + 61 + out: 62 + return err; 63 + #else 64 + if (arg < 0 || arg > 1) 65 + return -EINVAL; 66 + 67 + return -EPERM; 68 + #endif 69 + } 70 + 71 + /* 72 + * handle HDIO_SET_PIO_MODE ioctl abusers here, eventually it will go away 73 + */ 74 + static int set_pio_mode_abuse(ide_hwif_t *hwif, u8 req_pio) 75 + { 76 + switch (req_pio) { 77 + case 202: 78 + case 201: 79 + case 200: 80 + case 102: 81 + case 101: 82 + case 100: 83 + return (hwif->host_flags & IDE_HFLAG_ABUSE_DMA_MODES) ? 1 : 0; 84 + case 9: 85 + case 8: 86 + return (hwif->host_flags & IDE_HFLAG_ABUSE_PREFETCH) ? 1 : 0; 87 + case 7: 88 + case 6: 89 + return (hwif->host_flags & IDE_HFLAG_ABUSE_FAST_DEVSEL) ? 1 : 0; 90 + default: 91 + return 0; 92 + } 93 + } 94 + 95 + static int set_pio_mode(ide_drive_t *drive, int arg) 96 + { 97 + ide_hwif_t *hwif = drive->hwif; 98 + const struct ide_port_ops *port_ops = hwif->port_ops; 99 + 100 + if (arg < 0 || arg > 255) 101 + return -EINVAL; 102 + 103 + if (port_ops == NULL || port_ops->set_pio_mode == NULL || 104 + (hwif->host_flags & IDE_HFLAG_NO_SET_MODE)) 105 + return -ENOSYS; 106 + 107 + if (set_pio_mode_abuse(drive->hwif, arg)) { 108 + if (arg == 8 || arg == 9) { 109 + unsigned long flags; 110 + 111 + /* take lock for IDE_DFLAG_[NO_]UNMASK/[NO_]IO_32BIT */ 112 + spin_lock_irqsave(&hwif->lock, flags); 113 + port_ops->set_pio_mode(drive, arg); 114 + spin_unlock_irqrestore(&hwif->lock, flags); 115 + } else 116 + port_ops->set_pio_mode(drive, arg); 117 + } else { 118 + int keep_dma = !!(drive->dev_flags & IDE_DFLAG_USING_DMA); 119 + 120 + ide_set_pio(drive, arg); 121 + 122 + if (hwif->host_flags & IDE_HFLAG_SET_PIO_MODE_KEEP_DMA) { 123 + if (keep_dma) 124 + ide_dma_on(drive); 125 + } 126 + } 127 + 128 + return 0; 129 + } 130 + 131 + ide_devset_get_flag(unmaskirq, IDE_DFLAG_UNMASK); 132 + 133 + static int set_unmaskirq(ide_drive_t *drive, int arg) 134 + { 135 + if (drive->dev_flags & IDE_DFLAG_NO_UNMASK) 136 + return -EPERM; 137 + 138 + if (arg < 0 || arg > 1) 139 + return -EINVAL; 140 + 141 + if (arg) 142 + drive->dev_flags |= IDE_DFLAG_UNMASK; 143 + else 144 + drive->dev_flags &= ~IDE_DFLAG_UNMASK; 145 + 146 + return 0; 147 + } 148 + 149 + ide_ext_devset_rw_sync(io_32bit, io_32bit); 150 + ide_ext_devset_rw_sync(keepsettings, ksettings); 151 + ide_ext_devset_rw_sync(unmaskirq, unmaskirq); 152 + ide_ext_devset_rw_sync(using_dma, using_dma); 153 + __IDE_DEVSET(pio_mode, DS_SYNC, NULL, set_pio_mode); 154 + 155 + int ide_devset_execute(ide_drive_t *drive, const struct ide_devset *setting, 156 + int arg) 157 + { 158 + struct request_queue *q = drive->queue; 159 + struct request *rq; 160 + int ret = 0; 161 + 162 + if (!(setting->flags & DS_SYNC)) 163 + return setting->set(drive, arg); 164 + 165 + rq = blk_get_request(q, READ, __GFP_WAIT); 166 + rq->cmd_type = REQ_TYPE_SPECIAL; 167 + rq->cmd_len = 5; 168 + rq->cmd[0] = REQ_DEVSET_EXEC; 169 + *(int *)&rq->cmd[1] = arg; 170 + rq->special = setting->set; 171 + 172 + if (blk_execute_rq(q, NULL, rq, 0)) 173 + ret = rq->errors; 174 + blk_put_request(rq); 175 + 176 + return ret; 177 + } 178 + 179 + ide_startstop_t ide_do_devset(ide_drive_t *drive, struct request *rq) 180 + { 181 + int err, (*setfunc)(ide_drive_t *, int) = rq->special; 182 + 183 + err = setfunc(drive, *(int *)&rq->cmd[1]); 184 + if (err) 185 + rq->errors = err; 186 + else 187 + err = 1; 188 + ide_end_request(drive, err, 0); 189 + return ide_stopped; 190 + }
+57
drivers/ide/ide-dma.c
··· 470 470 } 471 471 EXPORT_SYMBOL_GPL(ide_dma_timeout); 472 472 473 + /* 474 + * un-busy the port etc, and clear any pending DMA status. we want to 475 + * retry the current request in pio mode instead of risking tossing it 476 + * all away 477 + */ 478 + ide_startstop_t ide_dma_timeout_retry(ide_drive_t *drive, int error) 479 + { 480 + ide_hwif_t *hwif = drive->hwif; 481 + struct request *rq; 482 + ide_startstop_t ret = ide_stopped; 483 + 484 + /* 485 + * end current dma transaction 486 + */ 487 + 488 + if (error < 0) { 489 + printk(KERN_WARNING "%s: DMA timeout error\n", drive->name); 490 + (void)hwif->dma_ops->dma_end(drive); 491 + ret = ide_error(drive, "dma timeout error", 492 + hwif->tp_ops->read_status(hwif)); 493 + } else { 494 + printk(KERN_WARNING "%s: DMA timeout retry\n", drive->name); 495 + hwif->dma_ops->dma_timeout(drive); 496 + } 497 + 498 + /* 499 + * disable dma for now, but remember that we did so because of 500 + * a timeout -- we'll reenable after we finish this next request 501 + * (or rather the first chunk of it) in pio. 502 + */ 503 + drive->dev_flags |= IDE_DFLAG_DMA_PIO_RETRY; 504 + drive->retry_pio++; 505 + ide_dma_off_quietly(drive); 506 + 507 + /* 508 + * un-busy drive etc and make sure request is sane 509 + */ 510 + 511 + rq = hwif->rq; 512 + if (!rq) 513 + goto out; 514 + 515 + hwif->rq = NULL; 516 + 517 + rq->errors = 0; 518 + 519 + if (!rq->bio) 520 + goto out; 521 + 522 + rq->sector = rq->bio->bi_sector; 523 + rq->current_nr_sectors = bio_iovec(rq->bio)->bv_len >> 9; 524 + rq->hard_cur_sectors = rq->current_nr_sectors; 525 + rq->buffer = bio_data(rq->bio); 526 + out: 527 + return ret; 528 + } 529 + 473 530 void ide_release_dma_engine(ide_hwif_t *hwif) 474 531 { 475 532 if (hwif->dmatable_cpu) {
+428
drivers/ide/ide-eh.c
··· 1 + 2 + #include <linux/kernel.h> 3 + #include <linux/ide.h> 4 + #include <linux/delay.h> 5 + 6 + static ide_startstop_t ide_ata_error(ide_drive_t *drive, struct request *rq, 7 + u8 stat, u8 err) 8 + { 9 + ide_hwif_t *hwif = drive->hwif; 10 + 11 + if ((stat & ATA_BUSY) || 12 + ((stat & ATA_DF) && (drive->dev_flags & IDE_DFLAG_NOWERR) == 0)) { 13 + /* other bits are useless when BUSY */ 14 + rq->errors |= ERROR_RESET; 15 + } else if (stat & ATA_ERR) { 16 + /* err has different meaning on cdrom and tape */ 17 + if (err == ATA_ABORTED) { 18 + if ((drive->dev_flags & IDE_DFLAG_LBA) && 19 + /* some newer drives don't support ATA_CMD_INIT_DEV_PARAMS */ 20 + hwif->tp_ops->read_status(hwif) == ATA_CMD_INIT_DEV_PARAMS) 21 + return ide_stopped; 22 + } else if ((err & BAD_CRC) == BAD_CRC) { 23 + /* UDMA crc error, just retry the operation */ 24 + drive->crc_count++; 25 + } else if (err & (ATA_BBK | ATA_UNC)) { 26 + /* retries won't help these */ 27 + rq->errors = ERROR_MAX; 28 + } else if (err & ATA_TRK0NF) { 29 + /* help it find track zero */ 30 + rq->errors |= ERROR_RECAL; 31 + } 32 + } 33 + 34 + if ((stat & ATA_DRQ) && rq_data_dir(rq) == READ && 35 + (hwif->host_flags & IDE_HFLAG_ERROR_STOPS_FIFO) == 0) { 36 + int nsect = drive->mult_count ? drive->mult_count : 1; 37 + 38 + ide_pad_transfer(drive, READ, nsect * SECTOR_SIZE); 39 + } 40 + 41 + if (rq->errors >= ERROR_MAX || blk_noretry_request(rq)) { 42 + ide_kill_rq(drive, rq); 43 + return ide_stopped; 44 + } 45 + 46 + if (hwif->tp_ops->read_status(hwif) & (ATA_BUSY | ATA_DRQ)) 47 + rq->errors |= ERROR_RESET; 48 + 49 + if ((rq->errors & ERROR_RESET) == ERROR_RESET) { 50 + ++rq->errors; 51 + return ide_do_reset(drive); 52 + } 53 + 54 + if ((rq->errors & ERROR_RECAL) == ERROR_RECAL) 55 + drive->special.b.recalibrate = 1; 56 + 57 + ++rq->errors; 58 + 59 + return ide_stopped; 60 + } 61 + 62 + static ide_startstop_t ide_atapi_error(ide_drive_t *drive, struct request *rq, 63 + u8 stat, u8 err) 64 + { 65 + ide_hwif_t *hwif = drive->hwif; 66 + 67 + if ((stat & ATA_BUSY) || 68 + ((stat & ATA_DF) && (drive->dev_flags & IDE_DFLAG_NOWERR) == 0)) { 69 + /* other bits are useless when BUSY */ 70 + rq->errors |= ERROR_RESET; 71 + } else { 72 + /* add decoding error stuff */ 73 + } 74 + 75 + if (hwif->tp_ops->read_status(hwif) & (ATA_BUSY | ATA_DRQ)) 76 + /* force an abort */ 77 + hwif->tp_ops->exec_command(hwif, ATA_CMD_IDLEIMMEDIATE); 78 + 79 + if (rq->errors >= ERROR_MAX) { 80 + ide_kill_rq(drive, rq); 81 + } else { 82 + if ((rq->errors & ERROR_RESET) == ERROR_RESET) { 83 + ++rq->errors; 84 + return ide_do_reset(drive); 85 + } 86 + ++rq->errors; 87 + } 88 + 89 + return ide_stopped; 90 + } 91 + 92 + static ide_startstop_t __ide_error(ide_drive_t *drive, struct request *rq, 93 + u8 stat, u8 err) 94 + { 95 + if (drive->media == ide_disk) 96 + return ide_ata_error(drive, rq, stat, err); 97 + return ide_atapi_error(drive, rq, stat, err); 98 + } 99 + 100 + /** 101 + * ide_error - handle an error on the IDE 102 + * @drive: drive the error occurred on 103 + * @msg: message to report 104 + * @stat: status bits 105 + * 106 + * ide_error() takes action based on the error returned by the drive. 107 + * For normal I/O that may well include retries. We deal with 108 + * both new-style (taskfile) and old style command handling here. 109 + * In the case of taskfile command handling there is work left to 110 + * do 111 + */ 112 + 113 + ide_startstop_t ide_error(ide_drive_t *drive, const char *msg, u8 stat) 114 + { 115 + struct request *rq; 116 + u8 err; 117 + 118 + err = ide_dump_status(drive, msg, stat); 119 + 120 + rq = drive->hwif->rq; 121 + if (rq == NULL) 122 + return ide_stopped; 123 + 124 + /* retry only "normal" I/O: */ 125 + if (!blk_fs_request(rq)) { 126 + rq->errors = 1; 127 + ide_end_drive_cmd(drive, stat, err); 128 + return ide_stopped; 129 + } 130 + 131 + return __ide_error(drive, rq, stat, err); 132 + } 133 + EXPORT_SYMBOL_GPL(ide_error); 134 + 135 + static inline void ide_complete_drive_reset(ide_drive_t *drive, int err) 136 + { 137 + struct request *rq = drive->hwif->rq; 138 + 139 + if (rq && blk_special_request(rq) && rq->cmd[0] == REQ_DRIVE_RESET) 140 + ide_end_request(drive, err ? err : 1, 0); 141 + } 142 + 143 + /* needed below */ 144 + static ide_startstop_t do_reset1(ide_drive_t *, int); 145 + 146 + /* 147 + * atapi_reset_pollfunc() gets invoked to poll the interface for completion 148 + * every 50ms during an atapi drive reset operation. If the drive has not yet 149 + * responded, and we have not yet hit our maximum waiting time, then the timer 150 + * is restarted for another 50ms. 151 + */ 152 + static ide_startstop_t atapi_reset_pollfunc(ide_drive_t *drive) 153 + { 154 + ide_hwif_t *hwif = drive->hwif; 155 + u8 stat; 156 + 157 + SELECT_DRIVE(drive); 158 + udelay(10); 159 + stat = hwif->tp_ops->read_status(hwif); 160 + 161 + if (OK_STAT(stat, 0, ATA_BUSY)) 162 + printk(KERN_INFO "%s: ATAPI reset complete\n", drive->name); 163 + else { 164 + if (time_before(jiffies, hwif->poll_timeout)) { 165 + ide_set_handler(drive, &atapi_reset_pollfunc, HZ/20, 166 + NULL); 167 + /* continue polling */ 168 + return ide_started; 169 + } 170 + /* end of polling */ 171 + hwif->polling = 0; 172 + printk(KERN_ERR "%s: ATAPI reset timed-out, status=0x%02x\n", 173 + drive->name, stat); 174 + /* do it the old fashioned way */ 175 + return do_reset1(drive, 1); 176 + } 177 + /* done polling */ 178 + hwif->polling = 0; 179 + ide_complete_drive_reset(drive, 0); 180 + return ide_stopped; 181 + } 182 + 183 + static void ide_reset_report_error(ide_hwif_t *hwif, u8 err) 184 + { 185 + static const char *err_master_vals[] = 186 + { NULL, "passed", "formatter device error", 187 + "sector buffer error", "ECC circuitry error", 188 + "controlling MPU error" }; 189 + 190 + u8 err_master = err & 0x7f; 191 + 192 + printk(KERN_ERR "%s: reset: master: ", hwif->name); 193 + if (err_master && err_master < 6) 194 + printk(KERN_CONT "%s", err_master_vals[err_master]); 195 + else 196 + printk(KERN_CONT "error (0x%02x?)", err); 197 + if (err & 0x80) 198 + printk(KERN_CONT "; slave: failed"); 199 + printk(KERN_CONT "\n"); 200 + } 201 + 202 + /* 203 + * reset_pollfunc() gets invoked to poll the interface for completion every 50ms 204 + * during an ide reset operation. If the drives have not yet responded, 205 + * and we have not yet hit our maximum waiting time, then the timer is restarted 206 + * for another 50ms. 207 + */ 208 + static ide_startstop_t reset_pollfunc(ide_drive_t *drive) 209 + { 210 + ide_hwif_t *hwif = drive->hwif; 211 + const struct ide_port_ops *port_ops = hwif->port_ops; 212 + u8 tmp; 213 + int err = 0; 214 + 215 + if (port_ops && port_ops->reset_poll) { 216 + err = port_ops->reset_poll(drive); 217 + if (err) { 218 + printk(KERN_ERR "%s: host reset_poll failure for %s.\n", 219 + hwif->name, drive->name); 220 + goto out; 221 + } 222 + } 223 + 224 + tmp = hwif->tp_ops->read_status(hwif); 225 + 226 + if (!OK_STAT(tmp, 0, ATA_BUSY)) { 227 + if (time_before(jiffies, hwif->poll_timeout)) { 228 + ide_set_handler(drive, &reset_pollfunc, HZ/20, NULL); 229 + /* continue polling */ 230 + return ide_started; 231 + } 232 + printk(KERN_ERR "%s: reset timed-out, status=0x%02x\n", 233 + hwif->name, tmp); 234 + drive->failures++; 235 + err = -EIO; 236 + } else { 237 + tmp = ide_read_error(drive); 238 + 239 + if (tmp == 1) { 240 + printk(KERN_INFO "%s: reset: success\n", hwif->name); 241 + drive->failures = 0; 242 + } else { 243 + ide_reset_report_error(hwif, tmp); 244 + drive->failures++; 245 + err = -EIO; 246 + } 247 + } 248 + out: 249 + hwif->polling = 0; /* done polling */ 250 + ide_complete_drive_reset(drive, err); 251 + return ide_stopped; 252 + } 253 + 254 + static void ide_disk_pre_reset(ide_drive_t *drive) 255 + { 256 + int legacy = (drive->id[ATA_ID_CFS_ENABLE_2] & 0x0400) ? 0 : 1; 257 + 258 + drive->special.all = 0; 259 + drive->special.b.set_geometry = legacy; 260 + drive->special.b.recalibrate = legacy; 261 + 262 + drive->mult_count = 0; 263 + drive->dev_flags &= ~IDE_DFLAG_PARKED; 264 + 265 + if ((drive->dev_flags & IDE_DFLAG_KEEP_SETTINGS) == 0 && 266 + (drive->dev_flags & IDE_DFLAG_USING_DMA) == 0) 267 + drive->mult_req = 0; 268 + 269 + if (drive->mult_req != drive->mult_count) 270 + drive->special.b.set_multmode = 1; 271 + } 272 + 273 + static void pre_reset(ide_drive_t *drive) 274 + { 275 + const struct ide_port_ops *port_ops = drive->hwif->port_ops; 276 + 277 + if (drive->media == ide_disk) 278 + ide_disk_pre_reset(drive); 279 + else 280 + drive->dev_flags |= IDE_DFLAG_POST_RESET; 281 + 282 + if (drive->dev_flags & IDE_DFLAG_USING_DMA) { 283 + if (drive->crc_count) 284 + ide_check_dma_crc(drive); 285 + else 286 + ide_dma_off(drive); 287 + } 288 + 289 + if ((drive->dev_flags & IDE_DFLAG_KEEP_SETTINGS) == 0) { 290 + if ((drive->dev_flags & IDE_DFLAG_USING_DMA) == 0) { 291 + drive->dev_flags &= ~IDE_DFLAG_UNMASK; 292 + drive->io_32bit = 0; 293 + } 294 + return; 295 + } 296 + 297 + if (port_ops && port_ops->pre_reset) 298 + port_ops->pre_reset(drive); 299 + 300 + if (drive->current_speed != 0xff) 301 + drive->desired_speed = drive->current_speed; 302 + drive->current_speed = 0xff; 303 + } 304 + 305 + /* 306 + * do_reset1() attempts to recover a confused drive by resetting it. 307 + * Unfortunately, resetting a disk drive actually resets all devices on 308 + * the same interface, so it can really be thought of as resetting the 309 + * interface rather than resetting the drive. 310 + * 311 + * ATAPI devices have their own reset mechanism which allows them to be 312 + * individually reset without clobbering other devices on the same interface. 313 + * 314 + * Unfortunately, the IDE interface does not generate an interrupt to let 315 + * us know when the reset operation has finished, so we must poll for this. 316 + * Equally poor, though, is the fact that this may a very long time to complete, 317 + * (up to 30 seconds worstcase). So, instead of busy-waiting here for it, 318 + * we set a timer to poll at 50ms intervals. 319 + */ 320 + static ide_startstop_t do_reset1(ide_drive_t *drive, int do_not_try_atapi) 321 + { 322 + ide_hwif_t *hwif = drive->hwif; 323 + struct ide_io_ports *io_ports = &hwif->io_ports; 324 + const struct ide_tp_ops *tp_ops = hwif->tp_ops; 325 + const struct ide_port_ops *port_ops; 326 + ide_drive_t *tdrive; 327 + unsigned long flags, timeout; 328 + int i; 329 + DEFINE_WAIT(wait); 330 + 331 + spin_lock_irqsave(&hwif->lock, flags); 332 + 333 + /* We must not reset with running handlers */ 334 + BUG_ON(hwif->handler != NULL); 335 + 336 + /* For an ATAPI device, first try an ATAPI SRST. */ 337 + if (drive->media != ide_disk && !do_not_try_atapi) { 338 + pre_reset(drive); 339 + SELECT_DRIVE(drive); 340 + udelay(20); 341 + tp_ops->exec_command(hwif, ATA_CMD_DEV_RESET); 342 + ndelay(400); 343 + hwif->poll_timeout = jiffies + WAIT_WORSTCASE; 344 + hwif->polling = 1; 345 + __ide_set_handler(drive, &atapi_reset_pollfunc, HZ/20, NULL); 346 + spin_unlock_irqrestore(&hwif->lock, flags); 347 + return ide_started; 348 + } 349 + 350 + /* We must not disturb devices in the IDE_DFLAG_PARKED state. */ 351 + do { 352 + unsigned long now; 353 + 354 + prepare_to_wait(&ide_park_wq, &wait, TASK_UNINTERRUPTIBLE); 355 + timeout = jiffies; 356 + ide_port_for_each_present_dev(i, tdrive, hwif) { 357 + if ((tdrive->dev_flags & IDE_DFLAG_PARKED) && 358 + time_after(tdrive->sleep, timeout)) 359 + timeout = tdrive->sleep; 360 + } 361 + 362 + now = jiffies; 363 + if (time_before_eq(timeout, now)) 364 + break; 365 + 366 + spin_unlock_irqrestore(&hwif->lock, flags); 367 + timeout = schedule_timeout_uninterruptible(timeout - now); 368 + spin_lock_irqsave(&hwif->lock, flags); 369 + } while (timeout); 370 + finish_wait(&ide_park_wq, &wait); 371 + 372 + /* 373 + * First, reset any device state data we were maintaining 374 + * for any of the drives on this interface. 375 + */ 376 + ide_port_for_each_dev(i, tdrive, hwif) 377 + pre_reset(tdrive); 378 + 379 + if (io_ports->ctl_addr == 0) { 380 + spin_unlock_irqrestore(&hwif->lock, flags); 381 + ide_complete_drive_reset(drive, -ENXIO); 382 + return ide_stopped; 383 + } 384 + 385 + /* 386 + * Note that we also set nIEN while resetting the device, 387 + * to mask unwanted interrupts from the interface during the reset. 388 + * However, due to the design of PC hardware, this will cause an 389 + * immediate interrupt due to the edge transition it produces. 390 + * This single interrupt gives us a "fast poll" for drives that 391 + * recover from reset very quickly, saving us the first 50ms wait time. 392 + * 393 + * TODO: add ->softreset method and stop abusing ->set_irq 394 + */ 395 + /* set SRST and nIEN */ 396 + tp_ops->set_irq(hwif, 4); 397 + /* more than enough time */ 398 + udelay(10); 399 + /* clear SRST, leave nIEN (unless device is on the quirk list) */ 400 + tp_ops->set_irq(hwif, drive->quirk_list == 2); 401 + /* more than enough time */ 402 + udelay(10); 403 + hwif->poll_timeout = jiffies + WAIT_WORSTCASE; 404 + hwif->polling = 1; 405 + __ide_set_handler(drive, &reset_pollfunc, HZ/20, NULL); 406 + 407 + /* 408 + * Some weird controller like resetting themselves to a strange 409 + * state when the disks are reset this way. At least, the Winbond 410 + * 553 documentation says that 411 + */ 412 + port_ops = hwif->port_ops; 413 + if (port_ops && port_ops->resetproc) 414 + port_ops->resetproc(drive); 415 + 416 + spin_unlock_irqrestore(&hwif->lock, flags); 417 + return ide_started; 418 + } 419 + 420 + /* 421 + * ide_do_reset() is the entry point to the drive/interface reset code. 422 + */ 423 + 424 + ide_startstop_t ide_do_reset(ide_drive_t *drive) 425 + { 426 + return do_reset1(drive, 0); 427 + } 428 + EXPORT_SYMBOL(ide_do_reset);
+316
drivers/ide/ide-io-std.c
··· 1 + 2 + #include <linux/kernel.h> 3 + #include <linux/ide.h> 4 + 5 + /* 6 + * Conventional PIO operations for ATA devices 7 + */ 8 + 9 + static u8 ide_inb(unsigned long port) 10 + { 11 + return (u8) inb(port); 12 + } 13 + 14 + static void ide_outb(u8 val, unsigned long port) 15 + { 16 + outb(val, port); 17 + } 18 + 19 + /* 20 + * MMIO operations, typically used for SATA controllers 21 + */ 22 + 23 + static u8 ide_mm_inb(unsigned long port) 24 + { 25 + return (u8) readb((void __iomem *) port); 26 + } 27 + 28 + static void ide_mm_outb(u8 value, unsigned long port) 29 + { 30 + writeb(value, (void __iomem *) port); 31 + } 32 + 33 + void ide_exec_command(ide_hwif_t *hwif, u8 cmd) 34 + { 35 + if (hwif->host_flags & IDE_HFLAG_MMIO) 36 + writeb(cmd, (void __iomem *)hwif->io_ports.command_addr); 37 + else 38 + outb(cmd, hwif->io_ports.command_addr); 39 + } 40 + EXPORT_SYMBOL_GPL(ide_exec_command); 41 + 42 + u8 ide_read_status(ide_hwif_t *hwif) 43 + { 44 + if (hwif->host_flags & IDE_HFLAG_MMIO) 45 + return readb((void __iomem *)hwif->io_ports.status_addr); 46 + else 47 + return inb(hwif->io_ports.status_addr); 48 + } 49 + EXPORT_SYMBOL_GPL(ide_read_status); 50 + 51 + u8 ide_read_altstatus(ide_hwif_t *hwif) 52 + { 53 + if (hwif->host_flags & IDE_HFLAG_MMIO) 54 + return readb((void __iomem *)hwif->io_ports.ctl_addr); 55 + else 56 + return inb(hwif->io_ports.ctl_addr); 57 + } 58 + EXPORT_SYMBOL_GPL(ide_read_altstatus); 59 + 60 + void ide_set_irq(ide_hwif_t *hwif, int on) 61 + { 62 + u8 ctl = ATA_DEVCTL_OBS; 63 + 64 + if (on == 4) { /* hack for SRST */ 65 + ctl |= 4; 66 + on &= ~4; 67 + } 68 + 69 + ctl |= on ? 0 : 2; 70 + 71 + if (hwif->host_flags & IDE_HFLAG_MMIO) 72 + writeb(ctl, (void __iomem *)hwif->io_ports.ctl_addr); 73 + else 74 + outb(ctl, hwif->io_ports.ctl_addr); 75 + } 76 + EXPORT_SYMBOL_GPL(ide_set_irq); 77 + 78 + void ide_tf_load(ide_drive_t *drive, ide_task_t *task) 79 + { 80 + ide_hwif_t *hwif = drive->hwif; 81 + struct ide_io_ports *io_ports = &hwif->io_ports; 82 + struct ide_taskfile *tf = &task->tf; 83 + void (*tf_outb)(u8 addr, unsigned long port); 84 + u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0; 85 + u8 HIHI = (task->tf_flags & IDE_TFLAG_LBA48) ? 0xE0 : 0xEF; 86 + 87 + if (mmio) 88 + tf_outb = ide_mm_outb; 89 + else 90 + tf_outb = ide_outb; 91 + 92 + if (task->tf_flags & IDE_TFLAG_FLAGGED) 93 + HIHI = 0xFF; 94 + 95 + if (task->tf_flags & IDE_TFLAG_OUT_DATA) { 96 + u16 data = (tf->hob_data << 8) | tf->data; 97 + 98 + if (mmio) 99 + writew(data, (void __iomem *)io_ports->data_addr); 100 + else 101 + outw(data, io_ports->data_addr); 102 + } 103 + 104 + if (task->tf_flags & IDE_TFLAG_OUT_HOB_FEATURE) 105 + tf_outb(tf->hob_feature, io_ports->feature_addr); 106 + if (task->tf_flags & IDE_TFLAG_OUT_HOB_NSECT) 107 + tf_outb(tf->hob_nsect, io_ports->nsect_addr); 108 + if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAL) 109 + tf_outb(tf->hob_lbal, io_ports->lbal_addr); 110 + if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAM) 111 + tf_outb(tf->hob_lbam, io_ports->lbam_addr); 112 + if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAH) 113 + tf_outb(tf->hob_lbah, io_ports->lbah_addr); 114 + 115 + if (task->tf_flags & IDE_TFLAG_OUT_FEATURE) 116 + tf_outb(tf->feature, io_ports->feature_addr); 117 + if (task->tf_flags & IDE_TFLAG_OUT_NSECT) 118 + tf_outb(tf->nsect, io_ports->nsect_addr); 119 + if (task->tf_flags & IDE_TFLAG_OUT_LBAL) 120 + tf_outb(tf->lbal, io_ports->lbal_addr); 121 + if (task->tf_flags & IDE_TFLAG_OUT_LBAM) 122 + tf_outb(tf->lbam, io_ports->lbam_addr); 123 + if (task->tf_flags & IDE_TFLAG_OUT_LBAH) 124 + tf_outb(tf->lbah, io_ports->lbah_addr); 125 + 126 + if (task->tf_flags & IDE_TFLAG_OUT_DEVICE) 127 + tf_outb((tf->device & HIHI) | drive->select, 128 + io_ports->device_addr); 129 + } 130 + EXPORT_SYMBOL_GPL(ide_tf_load); 131 + 132 + void ide_tf_read(ide_drive_t *drive, ide_task_t *task) 133 + { 134 + ide_hwif_t *hwif = drive->hwif; 135 + struct ide_io_ports *io_ports = &hwif->io_ports; 136 + struct ide_taskfile *tf = &task->tf; 137 + void (*tf_outb)(u8 addr, unsigned long port); 138 + u8 (*tf_inb)(unsigned long port); 139 + u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0; 140 + 141 + if (mmio) { 142 + tf_outb = ide_mm_outb; 143 + tf_inb = ide_mm_inb; 144 + } else { 145 + tf_outb = ide_outb; 146 + tf_inb = ide_inb; 147 + } 148 + 149 + if (task->tf_flags & IDE_TFLAG_IN_DATA) { 150 + u16 data; 151 + 152 + if (mmio) 153 + data = readw((void __iomem *)io_ports->data_addr); 154 + else 155 + data = inw(io_ports->data_addr); 156 + 157 + tf->data = data & 0xff; 158 + tf->hob_data = (data >> 8) & 0xff; 159 + } 160 + 161 + /* be sure we're looking at the low order bits */ 162 + tf_outb(ATA_DEVCTL_OBS & ~0x80, io_ports->ctl_addr); 163 + 164 + if (task->tf_flags & IDE_TFLAG_IN_FEATURE) 165 + tf->feature = tf_inb(io_ports->feature_addr); 166 + if (task->tf_flags & IDE_TFLAG_IN_NSECT) 167 + tf->nsect = tf_inb(io_ports->nsect_addr); 168 + if (task->tf_flags & IDE_TFLAG_IN_LBAL) 169 + tf->lbal = tf_inb(io_ports->lbal_addr); 170 + if (task->tf_flags & IDE_TFLAG_IN_LBAM) 171 + tf->lbam = tf_inb(io_ports->lbam_addr); 172 + if (task->tf_flags & IDE_TFLAG_IN_LBAH) 173 + tf->lbah = tf_inb(io_ports->lbah_addr); 174 + if (task->tf_flags & IDE_TFLAG_IN_DEVICE) 175 + tf->device = tf_inb(io_ports->device_addr); 176 + 177 + if (task->tf_flags & IDE_TFLAG_LBA48) { 178 + tf_outb(ATA_DEVCTL_OBS | 0x80, io_ports->ctl_addr); 179 + 180 + if (task->tf_flags & IDE_TFLAG_IN_HOB_FEATURE) 181 + tf->hob_feature = tf_inb(io_ports->feature_addr); 182 + if (task->tf_flags & IDE_TFLAG_IN_HOB_NSECT) 183 + tf->hob_nsect = tf_inb(io_ports->nsect_addr); 184 + if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAL) 185 + tf->hob_lbal = tf_inb(io_ports->lbal_addr); 186 + if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAM) 187 + tf->hob_lbam = tf_inb(io_ports->lbam_addr); 188 + if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAH) 189 + tf->hob_lbah = tf_inb(io_ports->lbah_addr); 190 + } 191 + } 192 + EXPORT_SYMBOL_GPL(ide_tf_read); 193 + 194 + /* 195 + * Some localbus EIDE interfaces require a special access sequence 196 + * when using 32-bit I/O instructions to transfer data. We call this 197 + * the "vlb_sync" sequence, which consists of three successive reads 198 + * of the sector count register location, with interrupts disabled 199 + * to ensure that the reads all happen together. 200 + */ 201 + static void ata_vlb_sync(unsigned long port) 202 + { 203 + (void)inb(port); 204 + (void)inb(port); 205 + (void)inb(port); 206 + } 207 + 208 + /* 209 + * This is used for most PIO data transfers *from* the IDE interface 210 + * 211 + * These routines will round up any request for an odd number of bytes, 212 + * so if an odd len is specified, be sure that there's at least one 213 + * extra byte allocated for the buffer. 214 + */ 215 + void ide_input_data(ide_drive_t *drive, struct request *rq, void *buf, 216 + unsigned int len) 217 + { 218 + ide_hwif_t *hwif = drive->hwif; 219 + struct ide_io_ports *io_ports = &hwif->io_ports; 220 + unsigned long data_addr = io_ports->data_addr; 221 + u8 io_32bit = drive->io_32bit; 222 + u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0; 223 + 224 + len++; 225 + 226 + if (io_32bit) { 227 + unsigned long uninitialized_var(flags); 228 + 229 + if ((io_32bit & 2) && !mmio) { 230 + local_irq_save(flags); 231 + ata_vlb_sync(io_ports->nsect_addr); 232 + } 233 + 234 + if (mmio) 235 + __ide_mm_insl((void __iomem *)data_addr, buf, len / 4); 236 + else 237 + insl(data_addr, buf, len / 4); 238 + 239 + if ((io_32bit & 2) && !mmio) 240 + local_irq_restore(flags); 241 + 242 + if ((len & 3) >= 2) { 243 + if (mmio) 244 + __ide_mm_insw((void __iomem *)data_addr, 245 + (u8 *)buf + (len & ~3), 1); 246 + else 247 + insw(data_addr, (u8 *)buf + (len & ~3), 1); 248 + } 249 + } else { 250 + if (mmio) 251 + __ide_mm_insw((void __iomem *)data_addr, buf, len / 2); 252 + else 253 + insw(data_addr, buf, len / 2); 254 + } 255 + } 256 + EXPORT_SYMBOL_GPL(ide_input_data); 257 + 258 + /* 259 + * This is used for most PIO data transfers *to* the IDE interface 260 + */ 261 + void ide_output_data(ide_drive_t *drive, struct request *rq, void *buf, 262 + unsigned int len) 263 + { 264 + ide_hwif_t *hwif = drive->hwif; 265 + struct ide_io_ports *io_ports = &hwif->io_ports; 266 + unsigned long data_addr = io_ports->data_addr; 267 + u8 io_32bit = drive->io_32bit; 268 + u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0; 269 + 270 + len++; 271 + 272 + if (io_32bit) { 273 + unsigned long uninitialized_var(flags); 274 + 275 + if ((io_32bit & 2) && !mmio) { 276 + local_irq_save(flags); 277 + ata_vlb_sync(io_ports->nsect_addr); 278 + } 279 + 280 + if (mmio) 281 + __ide_mm_outsl((void __iomem *)data_addr, buf, len / 4); 282 + else 283 + outsl(data_addr, buf, len / 4); 284 + 285 + if ((io_32bit & 2) && !mmio) 286 + local_irq_restore(flags); 287 + 288 + if ((len & 3) >= 2) { 289 + if (mmio) 290 + __ide_mm_outsw((void __iomem *)data_addr, 291 + (u8 *)buf + (len & ~3), 1); 292 + else 293 + outsw(data_addr, (u8 *)buf + (len & ~3), 1); 294 + } 295 + } else { 296 + if (mmio) 297 + __ide_mm_outsw((void __iomem *)data_addr, buf, len / 2); 298 + else 299 + outsw(data_addr, buf, len / 2); 300 + } 301 + } 302 + EXPORT_SYMBOL_GPL(ide_output_data); 303 + 304 + const struct ide_tp_ops default_tp_ops = { 305 + .exec_command = ide_exec_command, 306 + .read_status = ide_read_status, 307 + .read_altstatus = ide_read_altstatus, 308 + 309 + .set_irq = ide_set_irq, 310 + 311 + .tf_load = ide_tf_load, 312 + .tf_read = ide_tf_read, 313 + 314 + .input_data = ide_input_data, 315 + .output_data = ide_output_data, 316 + };
+28 -292
drivers/ide/ide-io.c
··· 196 196 } 197 197 EXPORT_SYMBOL(ide_end_drive_cmd); 198 198 199 - static void ide_kill_rq(ide_drive_t *drive, struct request *rq) 199 + void ide_kill_rq(ide_drive_t *drive, struct request *rq) 200 200 { 201 201 if (rq->rq_disk) { 202 202 struct ide_driver *drv; ··· 206 206 } else 207 207 ide_end_request(drive, 0, 0); 208 208 } 209 - 210 - static ide_startstop_t ide_ata_error(ide_drive_t *drive, struct request *rq, u8 stat, u8 err) 211 - { 212 - ide_hwif_t *hwif = drive->hwif; 213 - 214 - if ((stat & ATA_BUSY) || 215 - ((stat & ATA_DF) && (drive->dev_flags & IDE_DFLAG_NOWERR) == 0)) { 216 - /* other bits are useless when BUSY */ 217 - rq->errors |= ERROR_RESET; 218 - } else if (stat & ATA_ERR) { 219 - /* err has different meaning on cdrom and tape */ 220 - if (err == ATA_ABORTED) { 221 - if ((drive->dev_flags & IDE_DFLAG_LBA) && 222 - /* some newer drives don't support ATA_CMD_INIT_DEV_PARAMS */ 223 - hwif->tp_ops->read_status(hwif) == ATA_CMD_INIT_DEV_PARAMS) 224 - return ide_stopped; 225 - } else if ((err & BAD_CRC) == BAD_CRC) { 226 - /* UDMA crc error, just retry the operation */ 227 - drive->crc_count++; 228 - } else if (err & (ATA_BBK | ATA_UNC)) { 229 - /* retries won't help these */ 230 - rq->errors = ERROR_MAX; 231 - } else if (err & ATA_TRK0NF) { 232 - /* help it find track zero */ 233 - rq->errors |= ERROR_RECAL; 234 - } 235 - } 236 - 237 - if ((stat & ATA_DRQ) && rq_data_dir(rq) == READ && 238 - (hwif->host_flags & IDE_HFLAG_ERROR_STOPS_FIFO) == 0) { 239 - int nsect = drive->mult_count ? drive->mult_count : 1; 240 - 241 - ide_pad_transfer(drive, READ, nsect * SECTOR_SIZE); 242 - } 243 - 244 - if (rq->errors >= ERROR_MAX || blk_noretry_request(rq)) { 245 - ide_kill_rq(drive, rq); 246 - return ide_stopped; 247 - } 248 - 249 - if (hwif->tp_ops->read_status(hwif) & (ATA_BUSY | ATA_DRQ)) 250 - rq->errors |= ERROR_RESET; 251 - 252 - if ((rq->errors & ERROR_RESET) == ERROR_RESET) { 253 - ++rq->errors; 254 - return ide_do_reset(drive); 255 - } 256 - 257 - if ((rq->errors & ERROR_RECAL) == ERROR_RECAL) 258 - drive->special.b.recalibrate = 1; 259 - 260 - ++rq->errors; 261 - 262 - return ide_stopped; 263 - } 264 - 265 - static ide_startstop_t ide_atapi_error(ide_drive_t *drive, struct request *rq, u8 stat, u8 err) 266 - { 267 - ide_hwif_t *hwif = drive->hwif; 268 - 269 - if ((stat & ATA_BUSY) || 270 - ((stat & ATA_DF) && (drive->dev_flags & IDE_DFLAG_NOWERR) == 0)) { 271 - /* other bits are useless when BUSY */ 272 - rq->errors |= ERROR_RESET; 273 - } else { 274 - /* add decoding error stuff */ 275 - } 276 - 277 - if (hwif->tp_ops->read_status(hwif) & (ATA_BUSY | ATA_DRQ)) 278 - /* force an abort */ 279 - hwif->tp_ops->exec_command(hwif, ATA_CMD_IDLEIMMEDIATE); 280 - 281 - if (rq->errors >= ERROR_MAX) { 282 - ide_kill_rq(drive, rq); 283 - } else { 284 - if ((rq->errors & ERROR_RESET) == ERROR_RESET) { 285 - ++rq->errors; 286 - return ide_do_reset(drive); 287 - } 288 - ++rq->errors; 289 - } 290 - 291 - return ide_stopped; 292 - } 293 - 294 - static ide_startstop_t 295 - __ide_error(ide_drive_t *drive, struct request *rq, u8 stat, u8 err) 296 - { 297 - if (drive->media == ide_disk) 298 - return ide_ata_error(drive, rq, stat, err); 299 - return ide_atapi_error(drive, rq, stat, err); 300 - } 301 - 302 - /** 303 - * ide_error - handle an error on the IDE 304 - * @drive: drive the error occurred on 305 - * @msg: message to report 306 - * @stat: status bits 307 - * 308 - * ide_error() takes action based on the error returned by the drive. 309 - * For normal I/O that may well include retries. We deal with 310 - * both new-style (taskfile) and old style command handling here. 311 - * In the case of taskfile command handling there is work left to 312 - * do 313 - */ 314 - 315 - ide_startstop_t ide_error (ide_drive_t *drive, const char *msg, u8 stat) 316 - { 317 - struct request *rq; 318 - u8 err; 319 - 320 - err = ide_dump_status(drive, msg, stat); 321 - 322 - rq = drive->hwif->rq; 323 - if (rq == NULL) 324 - return ide_stopped; 325 - 326 - /* retry only "normal" I/O: */ 327 - if (!blk_fs_request(rq)) { 328 - rq->errors = 1; 329 - ide_end_drive_cmd(drive, stat, err); 330 - return ide_stopped; 331 - } 332 - 333 - return __ide_error(drive, rq, stat, err); 334 - } 335 - EXPORT_SYMBOL_GPL(ide_error); 336 209 337 210 static void ide_tf_set_specify_cmd(ide_drive_t *drive, struct ide_taskfile *tf) 338 211 { ··· 363 490 return ide_stopped; 364 491 } 365 492 366 - int ide_devset_execute(ide_drive_t *drive, const struct ide_devset *setting, 367 - int arg) 368 - { 369 - struct request_queue *q = drive->queue; 370 - struct request *rq; 371 - int ret = 0; 372 - 373 - if (!(setting->flags & DS_SYNC)) 374 - return setting->set(drive, arg); 375 - 376 - rq = blk_get_request(q, READ, __GFP_WAIT); 377 - rq->cmd_type = REQ_TYPE_SPECIAL; 378 - rq->cmd_len = 5; 379 - rq->cmd[0] = REQ_DEVSET_EXEC; 380 - *(int *)&rq->cmd[1] = arg; 381 - rq->special = setting->set; 382 - 383 - if (blk_execute_rq(q, NULL, rq, 0)) 384 - ret = rq->errors; 385 - blk_put_request(rq); 386 - 387 - return ret; 388 - } 389 - EXPORT_SYMBOL_GPL(ide_devset_execute); 390 - 391 493 static ide_startstop_t ide_special_rq(ide_drive_t *drive, struct request *rq) 392 494 { 393 495 u8 cmd = rq->cmd[0]; 394 496 395 - if (cmd == REQ_PARK_HEADS || cmd == REQ_UNPARK_HEADS) { 396 - ide_task_t task; 397 - struct ide_taskfile *tf = &task.tf; 398 - 399 - memset(&task, 0, sizeof(task)); 400 - if (cmd == REQ_PARK_HEADS) { 401 - drive->sleep = *(unsigned long *)rq->special; 402 - drive->dev_flags |= IDE_DFLAG_SLEEPING; 403 - tf->command = ATA_CMD_IDLEIMMEDIATE; 404 - tf->feature = 0x44; 405 - tf->lbal = 0x4c; 406 - tf->lbam = 0x4e; 407 - tf->lbah = 0x55; 408 - task.tf_flags |= IDE_TFLAG_CUSTOM_HANDLER; 409 - } else /* cmd == REQ_UNPARK_HEADS */ 410 - tf->command = ATA_CMD_CHK_POWER; 411 - 412 - task.tf_flags |= IDE_TFLAG_TF | IDE_TFLAG_DEVICE; 413 - task.rq = rq; 414 - drive->hwif->data_phase = task.data_phase = TASKFILE_NO_DATA; 415 - return do_rw_taskfile(drive, &task); 416 - } 417 - 418 497 switch (cmd) { 498 + case REQ_PARK_HEADS: 499 + case REQ_UNPARK_HEADS: 500 + return ide_do_park_unpark(drive, rq); 419 501 case REQ_DEVSET_EXEC: 420 - { 421 - int err, (*setfunc)(ide_drive_t *, int) = rq->special; 422 - 423 - err = setfunc(drive, *(int *)&rq->cmd[1]); 424 - if (err) 425 - rq->errors = err; 426 - else 427 - err = 1; 428 - ide_end_request(drive, err, 0); 429 - return ide_stopped; 430 - } 502 + return ide_do_devset(drive, rq); 431 503 case REQ_DRIVE_RESET: 432 504 return ide_do_reset(drive); 433 505 default: ··· 638 820 blk_plug_device(q); 639 821 } 640 822 641 - /* 642 - * un-busy the port etc, and clear any pending DMA status. we want to 643 - * retry the current request in pio mode instead of risking tossing it 644 - * all away 645 - */ 646 - static ide_startstop_t ide_dma_timeout_retry(ide_drive_t *drive, int error) 647 - { 648 - ide_hwif_t *hwif = drive->hwif; 649 - struct request *rq; 650 - ide_startstop_t ret = ide_stopped; 651 - 652 - /* 653 - * end current dma transaction 654 - */ 655 - 656 - if (error < 0) { 657 - printk(KERN_WARNING "%s: DMA timeout error\n", drive->name); 658 - (void)hwif->dma_ops->dma_end(drive); 659 - ret = ide_error(drive, "dma timeout error", 660 - hwif->tp_ops->read_status(hwif)); 661 - } else { 662 - printk(KERN_WARNING "%s: DMA timeout retry\n", drive->name); 663 - hwif->dma_ops->dma_timeout(drive); 664 - } 665 - 666 - /* 667 - * disable dma for now, but remember that we did so because of 668 - * a timeout -- we'll reenable after we finish this next request 669 - * (or rather the first chunk of it) in pio. 670 - */ 671 - drive->dev_flags |= IDE_DFLAG_DMA_PIO_RETRY; 672 - drive->retry_pio++; 673 - ide_dma_off_quietly(drive); 674 - 675 - /* 676 - * un-busy drive etc and make sure request is sane 677 - */ 678 - 679 - rq = hwif->rq; 680 - if (!rq) 681 - goto out; 682 - 683 - hwif->rq = NULL; 684 - 685 - rq->errors = 0; 686 - 687 - if (!rq->bio) 688 - goto out; 689 - 690 - rq->sector = rq->bio->bi_sector; 691 - rq->current_nr_sectors = bio_iovec(rq->bio)->bv_len >> 9; 692 - rq->hard_cur_sectors = rq->current_nr_sectors; 693 - rq->buffer = bio_data(rq->bio); 694 - out: 695 - return ret; 696 - } 697 - 698 823 static void ide_plug_device(ide_drive_t *drive) 699 824 { 700 825 struct request_queue *q = drive->queue; ··· 647 886 if (!elv_queue_empty(q)) 648 887 blk_plug_device(q); 649 888 spin_unlock_irqrestore(q->queue_lock, flags); 889 + } 890 + 891 + static int drive_is_ready(ide_drive_t *drive) 892 + { 893 + ide_hwif_t *hwif = drive->hwif; 894 + u8 stat = 0; 895 + 896 + if (drive->waiting_for_dma) 897 + return hwif->dma_ops->dma_test_irq(drive); 898 + 899 + if (hwif->io_ports.ctl_addr && 900 + (hwif->host_flags & IDE_HFLAG_BROKEN_ALTSTATUS) == 0) 901 + stat = hwif->tp_ops->read_altstatus(hwif); 902 + else 903 + /* Note: this may clear a pending IRQ!! */ 904 + stat = hwif->tp_ops->read_status(hwif); 905 + 906 + if (stat & ATA_BUSY) 907 + /* drive busy: definitely not interrupting */ 908 + return 0; 909 + 910 + /* drive ready: *might* be interrupting */ 911 + return 1; 650 912 } 651 913 652 914 /** ··· 947 1163 return irq_ret; 948 1164 } 949 1165 EXPORT_SYMBOL_GPL(ide_intr); 950 - 951 - /** 952 - * ide_do_drive_cmd - issue IDE special command 953 - * @drive: device to issue command 954 - * @rq: request to issue 955 - * 956 - * This function issues a special IDE device request 957 - * onto the request queue. 958 - * 959 - * the rq is queued at the head of the request queue, displacing 960 - * the currently-being-processed request and this function 961 - * returns immediately without waiting for the new rq to be 962 - * completed. This is VERY DANGEROUS, and is intended for 963 - * careful use by the ATAPI tape/cdrom driver code. 964 - */ 965 - 966 - void ide_do_drive_cmd(ide_drive_t *drive, struct request *rq) 967 - { 968 - struct request_queue *q = drive->queue; 969 - unsigned long flags; 970 - 971 - drive->hwif->rq = NULL; 972 - 973 - spin_lock_irqsave(q->queue_lock, flags); 974 - __elv_add_request(q, rq, ELEVATOR_INSERT_FRONT, 0); 975 - spin_unlock_irqrestore(q->queue_lock, flags); 976 - } 977 - EXPORT_SYMBOL(ide_do_drive_cmd); 978 - 979 - void ide_pktcmd_tf_load(ide_drive_t *drive, u32 tf_flags, u16 bcount, u8 dma) 980 - { 981 - ide_hwif_t *hwif = drive->hwif; 982 - ide_task_t task; 983 - 984 - memset(&task, 0, sizeof(task)); 985 - task.tf_flags = IDE_TFLAG_OUT_LBAH | IDE_TFLAG_OUT_LBAM | 986 - IDE_TFLAG_OUT_FEATURE | tf_flags; 987 - task.tf.feature = dma; /* Use PIO/DMA */ 988 - task.tf.lbam = bcount & 0xff; 989 - task.tf.lbah = (bcount >> 8) & 0xff; 990 - 991 - ide_tf_dump(drive->name, &task.tf); 992 - hwif->tp_ops->set_irq(hwif, 1); 993 - SELECT_MASK(drive, 0); 994 - hwif->tp_ops->tf_load(drive, &task); 995 - } 996 - 997 - EXPORT_SYMBOL_GPL(ide_pktcmd_tf_load); 998 1166 999 1167 void ide_pad_transfer(ide_drive_t *drive, int write, int len) 1000 1168 {
+29 -713
drivers/ide/ide-iops.c
··· 27 27 #include <asm/uaccess.h> 28 28 #include <asm/io.h> 29 29 30 - /* 31 - * Conventional PIO operations for ATA devices 32 - */ 33 - 34 - static u8 ide_inb (unsigned long port) 35 - { 36 - return (u8) inb(port); 37 - } 38 - 39 - static void ide_outb (u8 val, unsigned long port) 40 - { 41 - outb(val, port); 42 - } 43 - 44 - /* 45 - * MMIO operations, typically used for SATA controllers 46 - */ 47 - 48 - static u8 ide_mm_inb (unsigned long port) 49 - { 50 - return (u8) readb((void __iomem *) port); 51 - } 52 - 53 - static void ide_mm_outb (u8 value, unsigned long port) 54 - { 55 - writeb(value, (void __iomem *) port); 56 - } 57 - 58 - void SELECT_DRIVE (ide_drive_t *drive) 30 + void SELECT_DRIVE(ide_drive_t *drive) 59 31 { 60 32 ide_hwif_t *hwif = drive->hwif; 61 33 const struct ide_port_ops *port_ops = hwif->port_ops; ··· 50 78 port_ops->maskproc(drive, mask); 51 79 } 52 80 53 - void ide_exec_command(ide_hwif_t *hwif, u8 cmd) 54 - { 55 - if (hwif->host_flags & IDE_HFLAG_MMIO) 56 - writeb(cmd, (void __iomem *)hwif->io_ports.command_addr); 57 - else 58 - outb(cmd, hwif->io_ports.command_addr); 59 - } 60 - EXPORT_SYMBOL_GPL(ide_exec_command); 61 - 62 - u8 ide_read_status(ide_hwif_t *hwif) 63 - { 64 - if (hwif->host_flags & IDE_HFLAG_MMIO) 65 - return readb((void __iomem *)hwif->io_ports.status_addr); 66 - else 67 - return inb(hwif->io_ports.status_addr); 68 - } 69 - EXPORT_SYMBOL_GPL(ide_read_status); 70 - 71 - u8 ide_read_altstatus(ide_hwif_t *hwif) 72 - { 73 - if (hwif->host_flags & IDE_HFLAG_MMIO) 74 - return readb((void __iomem *)hwif->io_ports.ctl_addr); 75 - else 76 - return inb(hwif->io_ports.ctl_addr); 77 - } 78 - EXPORT_SYMBOL_GPL(ide_read_altstatus); 79 - 80 - void ide_set_irq(ide_hwif_t *hwif, int on) 81 - { 82 - u8 ctl = ATA_DEVCTL_OBS; 83 - 84 - if (on == 4) { /* hack for SRST */ 85 - ctl |= 4; 86 - on &= ~4; 87 - } 88 - 89 - ctl |= on ? 0 : 2; 90 - 91 - if (hwif->host_flags & IDE_HFLAG_MMIO) 92 - writeb(ctl, (void __iomem *)hwif->io_ports.ctl_addr); 93 - else 94 - outb(ctl, hwif->io_ports.ctl_addr); 95 - } 96 - EXPORT_SYMBOL_GPL(ide_set_irq); 97 - 98 - void ide_tf_load(ide_drive_t *drive, ide_task_t *task) 99 - { 100 - ide_hwif_t *hwif = drive->hwif; 101 - struct ide_io_ports *io_ports = &hwif->io_ports; 102 - struct ide_taskfile *tf = &task->tf; 103 - void (*tf_outb)(u8 addr, unsigned long port); 104 - u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0; 105 - u8 HIHI = (task->tf_flags & IDE_TFLAG_LBA48) ? 0xE0 : 0xEF; 106 - 107 - if (mmio) 108 - tf_outb = ide_mm_outb; 109 - else 110 - tf_outb = ide_outb; 111 - 112 - if (task->tf_flags & IDE_TFLAG_FLAGGED) 113 - HIHI = 0xFF; 114 - 115 - if (task->tf_flags & IDE_TFLAG_OUT_DATA) { 116 - u16 data = (tf->hob_data << 8) | tf->data; 117 - 118 - if (mmio) 119 - writew(data, (void __iomem *)io_ports->data_addr); 120 - else 121 - outw(data, io_ports->data_addr); 122 - } 123 - 124 - if (task->tf_flags & IDE_TFLAG_OUT_HOB_FEATURE) 125 - tf_outb(tf->hob_feature, io_ports->feature_addr); 126 - if (task->tf_flags & IDE_TFLAG_OUT_HOB_NSECT) 127 - tf_outb(tf->hob_nsect, io_ports->nsect_addr); 128 - if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAL) 129 - tf_outb(tf->hob_lbal, io_ports->lbal_addr); 130 - if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAM) 131 - tf_outb(tf->hob_lbam, io_ports->lbam_addr); 132 - if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAH) 133 - tf_outb(tf->hob_lbah, io_ports->lbah_addr); 134 - 135 - if (task->tf_flags & IDE_TFLAG_OUT_FEATURE) 136 - tf_outb(tf->feature, io_ports->feature_addr); 137 - if (task->tf_flags & IDE_TFLAG_OUT_NSECT) 138 - tf_outb(tf->nsect, io_ports->nsect_addr); 139 - if (task->tf_flags & IDE_TFLAG_OUT_LBAL) 140 - tf_outb(tf->lbal, io_ports->lbal_addr); 141 - if (task->tf_flags & IDE_TFLAG_OUT_LBAM) 142 - tf_outb(tf->lbam, io_ports->lbam_addr); 143 - if (task->tf_flags & IDE_TFLAG_OUT_LBAH) 144 - tf_outb(tf->lbah, io_ports->lbah_addr); 145 - 146 - if (task->tf_flags & IDE_TFLAG_OUT_DEVICE) 147 - tf_outb((tf->device & HIHI) | drive->select, 148 - io_ports->device_addr); 149 - } 150 - EXPORT_SYMBOL_GPL(ide_tf_load); 151 - 152 - void ide_tf_read(ide_drive_t *drive, ide_task_t *task) 153 - { 154 - ide_hwif_t *hwif = drive->hwif; 155 - struct ide_io_ports *io_ports = &hwif->io_ports; 156 - struct ide_taskfile *tf = &task->tf; 157 - void (*tf_outb)(u8 addr, unsigned long port); 158 - u8 (*tf_inb)(unsigned long port); 159 - u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0; 160 - 161 - if (mmio) { 162 - tf_outb = ide_mm_outb; 163 - tf_inb = ide_mm_inb; 164 - } else { 165 - tf_outb = ide_outb; 166 - tf_inb = ide_inb; 167 - } 168 - 169 - if (task->tf_flags & IDE_TFLAG_IN_DATA) { 170 - u16 data; 171 - 172 - if (mmio) 173 - data = readw((void __iomem *)io_ports->data_addr); 174 - else 175 - data = inw(io_ports->data_addr); 176 - 177 - tf->data = data & 0xff; 178 - tf->hob_data = (data >> 8) & 0xff; 179 - } 180 - 181 - /* be sure we're looking at the low order bits */ 182 - tf_outb(ATA_DEVCTL_OBS & ~0x80, io_ports->ctl_addr); 183 - 184 - if (task->tf_flags & IDE_TFLAG_IN_FEATURE) 185 - tf->feature = tf_inb(io_ports->feature_addr); 186 - if (task->tf_flags & IDE_TFLAG_IN_NSECT) 187 - tf->nsect = tf_inb(io_ports->nsect_addr); 188 - if (task->tf_flags & IDE_TFLAG_IN_LBAL) 189 - tf->lbal = tf_inb(io_ports->lbal_addr); 190 - if (task->tf_flags & IDE_TFLAG_IN_LBAM) 191 - tf->lbam = tf_inb(io_ports->lbam_addr); 192 - if (task->tf_flags & IDE_TFLAG_IN_LBAH) 193 - tf->lbah = tf_inb(io_ports->lbah_addr); 194 - if (task->tf_flags & IDE_TFLAG_IN_DEVICE) 195 - tf->device = tf_inb(io_ports->device_addr); 196 - 197 - if (task->tf_flags & IDE_TFLAG_LBA48) { 198 - tf_outb(ATA_DEVCTL_OBS | 0x80, io_ports->ctl_addr); 199 - 200 - if (task->tf_flags & IDE_TFLAG_IN_HOB_FEATURE) 201 - tf->hob_feature = tf_inb(io_ports->feature_addr); 202 - if (task->tf_flags & IDE_TFLAG_IN_HOB_NSECT) 203 - tf->hob_nsect = tf_inb(io_ports->nsect_addr); 204 - if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAL) 205 - tf->hob_lbal = tf_inb(io_ports->lbal_addr); 206 - if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAM) 207 - tf->hob_lbam = tf_inb(io_ports->lbam_addr); 208 - if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAH) 209 - tf->hob_lbah = tf_inb(io_ports->lbah_addr); 210 - } 211 - } 212 - EXPORT_SYMBOL_GPL(ide_tf_read); 213 - 214 - /* 215 - * Some localbus EIDE interfaces require a special access sequence 216 - * when using 32-bit I/O instructions to transfer data. We call this 217 - * the "vlb_sync" sequence, which consists of three successive reads 218 - * of the sector count register location, with interrupts disabled 219 - * to ensure that the reads all happen together. 220 - */ 221 - static void ata_vlb_sync(unsigned long port) 222 - { 223 - (void)inb(port); 224 - (void)inb(port); 225 - (void)inb(port); 226 - } 227 - 228 - /* 229 - * This is used for most PIO data transfers *from* the IDE interface 230 - * 231 - * These routines will round up any request for an odd number of bytes, 232 - * so if an odd len is specified, be sure that there's at least one 233 - * extra byte allocated for the buffer. 234 - */ 235 - void ide_input_data(ide_drive_t *drive, struct request *rq, void *buf, 236 - unsigned int len) 237 - { 238 - ide_hwif_t *hwif = drive->hwif; 239 - struct ide_io_ports *io_ports = &hwif->io_ports; 240 - unsigned long data_addr = io_ports->data_addr; 241 - u8 io_32bit = drive->io_32bit; 242 - u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0; 243 - 244 - len++; 245 - 246 - if (io_32bit) { 247 - unsigned long uninitialized_var(flags); 248 - 249 - if ((io_32bit & 2) && !mmio) { 250 - local_irq_save(flags); 251 - ata_vlb_sync(io_ports->nsect_addr); 252 - } 253 - 254 - if (mmio) 255 - __ide_mm_insl((void __iomem *)data_addr, buf, len / 4); 256 - else 257 - insl(data_addr, buf, len / 4); 258 - 259 - if ((io_32bit & 2) && !mmio) 260 - local_irq_restore(flags); 261 - 262 - if ((len & 3) >= 2) { 263 - if (mmio) 264 - __ide_mm_insw((void __iomem *)data_addr, 265 - (u8 *)buf + (len & ~3), 1); 266 - else 267 - insw(data_addr, (u8 *)buf + (len & ~3), 1); 268 - } 269 - } else { 270 - if (mmio) 271 - __ide_mm_insw((void __iomem *)data_addr, buf, len / 2); 272 - else 273 - insw(data_addr, buf, len / 2); 274 - } 275 - } 276 - EXPORT_SYMBOL_GPL(ide_input_data); 277 - 278 - /* 279 - * This is used for most PIO data transfers *to* the IDE interface 280 - */ 281 - void ide_output_data(ide_drive_t *drive, struct request *rq, void *buf, 282 - unsigned int len) 283 - { 284 - ide_hwif_t *hwif = drive->hwif; 285 - struct ide_io_ports *io_ports = &hwif->io_ports; 286 - unsigned long data_addr = io_ports->data_addr; 287 - u8 io_32bit = drive->io_32bit; 288 - u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0; 289 - 290 - len++; 291 - 292 - if (io_32bit) { 293 - unsigned long uninitialized_var(flags); 294 - 295 - if ((io_32bit & 2) && !mmio) { 296 - local_irq_save(flags); 297 - ata_vlb_sync(io_ports->nsect_addr); 298 - } 299 - 300 - if (mmio) 301 - __ide_mm_outsl((void __iomem *)data_addr, buf, len / 4); 302 - else 303 - outsl(data_addr, buf, len / 4); 304 - 305 - if ((io_32bit & 2) && !mmio) 306 - local_irq_restore(flags); 307 - 308 - if ((len & 3) >= 2) { 309 - if (mmio) 310 - __ide_mm_outsw((void __iomem *)data_addr, 311 - (u8 *)buf + (len & ~3), 1); 312 - else 313 - outsw(data_addr, (u8 *)buf + (len & ~3), 1); 314 - } 315 - } else { 316 - if (mmio) 317 - __ide_mm_outsw((void __iomem *)data_addr, buf, len / 2); 318 - else 319 - outsw(data_addr, buf, len / 2); 320 - } 321 - } 322 - EXPORT_SYMBOL_GPL(ide_output_data); 323 - 324 81 u8 ide_read_error(ide_drive_t *drive) 325 82 { 326 83 ide_task_t task; ··· 62 361 return task.tf.error; 63 362 } 64 363 EXPORT_SYMBOL_GPL(ide_read_error); 65 - 66 - void ide_read_bcount_and_ireason(ide_drive_t *drive, u16 *bcount, u8 *ireason) 67 - { 68 - ide_task_t task; 69 - 70 - memset(&task, 0, sizeof(task)); 71 - task.tf_flags = IDE_TFLAG_IN_LBAH | IDE_TFLAG_IN_LBAM | 72 - IDE_TFLAG_IN_NSECT; 73 - 74 - drive->hwif->tp_ops->tf_read(drive, &task); 75 - 76 - *bcount = (task.tf.lbah << 8) | task.tf.lbam; 77 - *ireason = task.tf.nsect & 3; 78 - } 79 - EXPORT_SYMBOL_GPL(ide_read_bcount_and_ireason); 80 - 81 - const struct ide_tp_ops default_tp_ops = { 82 - .exec_command = ide_exec_command, 83 - .read_status = ide_read_status, 84 - .read_altstatus = ide_read_altstatus, 85 - 86 - .set_irq = ide_set_irq, 87 - 88 - .tf_load = ide_tf_load, 89 - .tf_read = ide_tf_read, 90 - 91 - .input_data = ide_input_data, 92 - .output_data = ide_output_data, 93 - }; 94 364 95 365 void ide_fix_driveid(u16 *id) 96 366 { ··· 84 412 * returned by the ATA_CMD_ID_ATA[PI] commands. 85 413 */ 86 414 87 - void ide_fixstring (u8 *s, const int bytecount, const int byteswap) 415 + void ide_fixstring(u8 *s, const int bytecount, const int byteswap) 88 416 { 89 417 u8 *p, *end = &s[bytecount & ~1]; /* bytecount must be even */ 90 418 ··· 107 435 while (p != end) 108 436 *p++ = '\0'; 109 437 } 110 - 111 438 EXPORT_SYMBOL(ide_fixstring); 112 - 113 - /* 114 - * Needed for PCI irq sharing 115 - */ 116 - int drive_is_ready (ide_drive_t *drive) 117 - { 118 - ide_hwif_t *hwif = drive->hwif; 119 - u8 stat = 0; 120 - 121 - if (drive->waiting_for_dma) 122 - return hwif->dma_ops->dma_test_irq(drive); 123 - 124 - /* 125 - * We do a passive status test under shared PCI interrupts on 126 - * cards that truly share the ATA side interrupt, but may also share 127 - * an interrupt with another pci card/device. We make no assumptions 128 - * about possible isa-pnp and pci-pnp issues yet. 129 - */ 130 - if (hwif->io_ports.ctl_addr && 131 - (hwif->host_flags & IDE_HFLAG_BROKEN_ALTSTATUS) == 0) 132 - stat = hwif->tp_ops->read_altstatus(hwif); 133 - else 134 - /* Note: this may clear a pending IRQ!! */ 135 - stat = hwif->tp_ops->read_status(hwif); 136 - 137 - if (stat & ATA_BUSY) 138 - /* drive busy: definitely not interrupting */ 139 - return 0; 140 - 141 - /* drive ready: *might* be interrupting */ 142 - return 1; 143 - } 144 - 145 - EXPORT_SYMBOL(drive_is_ready); 146 439 147 440 /* 148 441 * This routine busy-waits for the drive status to be not "busy". ··· 120 483 * setting a timer to wake up at half second intervals thereafter, 121 484 * until timeout is achieved, before timing out. 122 485 */ 123 - static int __ide_wait_stat(ide_drive_t *drive, u8 good, u8 bad, unsigned long timeout, u8 *rstat) 486 + static int __ide_wait_stat(ide_drive_t *drive, u8 good, u8 bad, 487 + unsigned long timeout, u8 *rstat) 124 488 { 125 489 ide_hwif_t *hwif = drive->hwif; 126 490 const struct ide_tp_ops *tp_ops = hwif->tp_ops; ··· 179 541 * The caller should return the updated value of "startstop" in this case, 180 542 * "startstop" is unchanged when the function returns 0. 181 543 */ 182 - int ide_wait_stat(ide_startstop_t *startstop, ide_drive_t *drive, u8 good, u8 bad, unsigned long timeout) 544 + int ide_wait_stat(ide_startstop_t *startstop, ide_drive_t *drive, u8 good, 545 + u8 bad, unsigned long timeout) 183 546 { 184 547 int err; 185 548 u8 stat; ··· 200 561 201 562 return err; 202 563 } 203 - 204 564 EXPORT_SYMBOL(ide_wait_stat); 205 565 206 566 /** ··· 220 582 return 1; 221 583 return 0; 222 584 } 223 - 224 585 EXPORT_SYMBOL_GPL(ide_in_drive_list); 225 586 226 587 /* ··· 244 607 * All hosts that use the 80c ribbon must use! 245 608 * The name is derived from upper byte of word 93 and the 80c ribbon. 246 609 */ 247 - u8 eighty_ninty_three (ide_drive_t *drive) 610 + u8 eighty_ninty_three(ide_drive_t *drive) 248 611 { 249 612 ide_hwif_t *hwif = drive->hwif; 250 613 u16 *id = drive->id; ··· 289 652 290 653 int ide_driveid_update(ide_drive_t *drive) 291 654 { 292 - ide_hwif_t *hwif = drive->hwif; 293 - const struct ide_tp_ops *tp_ops = hwif->tp_ops; 294 655 u16 *id; 295 - unsigned long flags; 296 - u8 stat; 656 + int rc; 297 657 298 - /* 299 - * Re-read drive->id for possible DMA mode 300 - * change (copied from ide-probe.c) 301 - */ 658 + id = kmalloc(SECTOR_SIZE, GFP_ATOMIC); 659 + if (id == NULL) 660 + return 0; 302 661 303 662 SELECT_MASK(drive, 1); 304 - tp_ops->set_irq(hwif, 0); 305 - msleep(50); 306 - tp_ops->exec_command(hwif, ATA_CMD_ID_ATA); 307 - 308 - if (ide_busy_sleep(hwif, WAIT_WORSTCASE, 1)) { 309 - SELECT_MASK(drive, 0); 310 - return 0; 311 - } 312 - 313 - msleep(50); /* wait for IRQ and ATA_DRQ */ 314 - stat = tp_ops->read_status(hwif); 315 - 316 - if (!OK_STAT(stat, ATA_DRQ, BAD_R_STAT)) { 317 - SELECT_MASK(drive, 0); 318 - printk("%s: CHECK for good STATUS\n", drive->name); 319 - return 0; 320 - } 321 - local_irq_save(flags); 663 + rc = ide_dev_read_id(drive, ATA_CMD_ID_ATA, id); 322 664 SELECT_MASK(drive, 0); 323 - id = kmalloc(SECTOR_SIZE, GFP_ATOMIC); 324 - if (!id) { 325 - local_irq_restore(flags); 326 - return 0; 327 - } 328 - tp_ops->input_data(drive, NULL, id, SECTOR_SIZE); 329 - (void)tp_ops->read_status(hwif); /* clear drive IRQ */ 330 - local_irq_enable(); 331 - local_irq_restore(flags); 332 - ide_fix_driveid(id); 665 + 666 + if (rc) 667 + goto out_err; 333 668 334 669 drive->id[ATA_ID_UDMA_MODES] = id[ATA_ID_UDMA_MODES]; 335 670 drive->id[ATA_ID_MWDMA_MODES] = id[ATA_ID_MWDMA_MODES]; ··· 314 705 ide_dma_off(drive); 315 706 316 707 return 1; 708 + out_err: 709 + SELECT_MASK(drive, 0); 710 + if (rc == 2) 711 + printk(KERN_ERR "%s: %s: bad status\n", drive->name, __func__); 712 + kfree(id); 713 + return 0; 317 714 } 318 715 319 716 int ide_config_drive_speed(ide_drive_t *drive, u8 speed) ··· 346 731 * but for some reason these don't work at 347 732 * this point (lost interrupt). 348 733 */ 349 - /* 350 - * Select the drive, and issue the SETFEATURES command 351 - */ 352 - disable_irq_nosync(hwif->irq); 353 - 734 + 354 735 /* 355 736 * FIXME: we race against the running IRQ here if 356 737 * this is called from non IRQ context. If we use 357 738 * disable_irq() we hang on the error path. Work 358 739 * is needed. 359 740 */ 360 - 741 + disable_irq_nosync(hwif->irq); 742 + 361 743 udelay(1); 362 744 SELECT_DRIVE(drive); 363 745 SELECT_MASK(drive, 1); ··· 424 812 * 425 813 * See also ide_execute_command 426 814 */ 427 - static void __ide_set_handler (ide_drive_t *drive, ide_handler_t *handler, 428 - unsigned int timeout, ide_expiry_t *expiry) 815 + void __ide_set_handler(ide_drive_t *drive, ide_handler_t *handler, 816 + unsigned int timeout, ide_expiry_t *expiry) 429 817 { 430 818 ide_hwif_t *hwif = drive->hwif; 431 819 ··· 447 835 __ide_set_handler(drive, handler, timeout, expiry); 448 836 spin_unlock_irqrestore(&hwif->lock, flags); 449 837 } 450 - 451 838 EXPORT_SYMBOL(ide_set_handler); 452 - 839 + 453 840 /** 454 841 * ide_execute_command - execute an IDE command 455 842 * @drive: IDE drive to issue the command against ··· 458 847 * @expiry: handler to run on timeout 459 848 * 460 849 * Helper function to issue an IDE command. This handles the 461 - * atomicity requirements, command timing and ensures that the 850 + * atomicity requirements, command timing and ensures that the 462 851 * handler and IRQ setup do not race. All IDE command kick off 463 852 * should go via this function or do equivalent locking. 464 853 */ ··· 495 884 } 496 885 EXPORT_SYMBOL_GPL(ide_execute_pkt_cmd); 497 886 498 - static inline void ide_complete_drive_reset(ide_drive_t *drive, int err) 499 - { 500 - struct request *rq = drive->hwif->rq; 501 - 502 - if (rq && blk_special_request(rq) && rq->cmd[0] == REQ_DRIVE_RESET) 503 - ide_end_request(drive, err ? err : 1, 0); 504 - } 505 - 506 - /* needed below */ 507 - static ide_startstop_t do_reset1 (ide_drive_t *, int); 508 - 509 - /* 510 - * atapi_reset_pollfunc() gets invoked to poll the interface for completion every 50ms 511 - * during an atapi drive reset operation. If the drive has not yet responded, 512 - * and we have not yet hit our maximum waiting time, then the timer is restarted 513 - * for another 50ms. 514 - */ 515 - static ide_startstop_t atapi_reset_pollfunc (ide_drive_t *drive) 516 - { 517 - ide_hwif_t *hwif = drive->hwif; 518 - u8 stat; 519 - 520 - SELECT_DRIVE(drive); 521 - udelay (10); 522 - stat = hwif->tp_ops->read_status(hwif); 523 - 524 - if (OK_STAT(stat, 0, ATA_BUSY)) 525 - printk("%s: ATAPI reset complete\n", drive->name); 526 - else { 527 - if (time_before(jiffies, hwif->poll_timeout)) { 528 - ide_set_handler(drive, &atapi_reset_pollfunc, HZ/20, NULL); 529 - /* continue polling */ 530 - return ide_started; 531 - } 532 - /* end of polling */ 533 - hwif->polling = 0; 534 - printk("%s: ATAPI reset timed-out, status=0x%02x\n", 535 - drive->name, stat); 536 - /* do it the old fashioned way */ 537 - return do_reset1(drive, 1); 538 - } 539 - /* done polling */ 540 - hwif->polling = 0; 541 - ide_complete_drive_reset(drive, 0); 542 - return ide_stopped; 543 - } 544 - 545 - static void ide_reset_report_error(ide_hwif_t *hwif, u8 err) 546 - { 547 - static const char *err_master_vals[] = 548 - { NULL, "passed", "formatter device error", 549 - "sector buffer error", "ECC circuitry error", 550 - "controlling MPU error" }; 551 - 552 - u8 err_master = err & 0x7f; 553 - 554 - printk(KERN_ERR "%s: reset: master: ", hwif->name); 555 - if (err_master && err_master < 6) 556 - printk(KERN_CONT "%s", err_master_vals[err_master]); 557 - else 558 - printk(KERN_CONT "error (0x%02x?)", err); 559 - if (err & 0x80) 560 - printk(KERN_CONT "; slave: failed"); 561 - printk(KERN_CONT "\n"); 562 - } 563 - 564 - /* 565 - * reset_pollfunc() gets invoked to poll the interface for completion every 50ms 566 - * during an ide reset operation. If the drives have not yet responded, 567 - * and we have not yet hit our maximum waiting time, then the timer is restarted 568 - * for another 50ms. 569 - */ 570 - static ide_startstop_t reset_pollfunc (ide_drive_t *drive) 571 - { 572 - ide_hwif_t *hwif = drive->hwif; 573 - const struct ide_port_ops *port_ops = hwif->port_ops; 574 - u8 tmp; 575 - int err = 0; 576 - 577 - if (port_ops && port_ops->reset_poll) { 578 - err = port_ops->reset_poll(drive); 579 - if (err) { 580 - printk(KERN_ERR "%s: host reset_poll failure for %s.\n", 581 - hwif->name, drive->name); 582 - goto out; 583 - } 584 - } 585 - 586 - tmp = hwif->tp_ops->read_status(hwif); 587 - 588 - if (!OK_STAT(tmp, 0, ATA_BUSY)) { 589 - if (time_before(jiffies, hwif->poll_timeout)) { 590 - ide_set_handler(drive, &reset_pollfunc, HZ/20, NULL); 591 - /* continue polling */ 592 - return ide_started; 593 - } 594 - printk("%s: reset timed-out, status=0x%02x\n", hwif->name, tmp); 595 - drive->failures++; 596 - err = -EIO; 597 - } else { 598 - tmp = ide_read_error(drive); 599 - 600 - if (tmp == 1) { 601 - printk(KERN_INFO "%s: reset: success\n", hwif->name); 602 - drive->failures = 0; 603 - } else { 604 - ide_reset_report_error(hwif, tmp); 605 - drive->failures++; 606 - err = -EIO; 607 - } 608 - } 609 - out: 610 - hwif->polling = 0; /* done polling */ 611 - ide_complete_drive_reset(drive, err); 612 - return ide_stopped; 613 - } 614 - 615 - static void ide_disk_pre_reset(ide_drive_t *drive) 616 - { 617 - int legacy = (drive->id[ATA_ID_CFS_ENABLE_2] & 0x0400) ? 0 : 1; 618 - 619 - drive->special.all = 0; 620 - drive->special.b.set_geometry = legacy; 621 - drive->special.b.recalibrate = legacy; 622 - 623 - drive->mult_count = 0; 624 - drive->dev_flags &= ~IDE_DFLAG_PARKED; 625 - 626 - if ((drive->dev_flags & IDE_DFLAG_KEEP_SETTINGS) == 0 && 627 - (drive->dev_flags & IDE_DFLAG_USING_DMA) == 0) 628 - drive->mult_req = 0; 629 - 630 - if (drive->mult_req != drive->mult_count) 631 - drive->special.b.set_multmode = 1; 632 - } 633 - 634 - static void pre_reset(ide_drive_t *drive) 635 - { 636 - const struct ide_port_ops *port_ops = drive->hwif->port_ops; 637 - 638 - if (drive->media == ide_disk) 639 - ide_disk_pre_reset(drive); 640 - else 641 - drive->dev_flags |= IDE_DFLAG_POST_RESET; 642 - 643 - if (drive->dev_flags & IDE_DFLAG_USING_DMA) { 644 - if (drive->crc_count) 645 - ide_check_dma_crc(drive); 646 - else 647 - ide_dma_off(drive); 648 - } 649 - 650 - if ((drive->dev_flags & IDE_DFLAG_KEEP_SETTINGS) == 0) { 651 - if ((drive->dev_flags & IDE_DFLAG_USING_DMA) == 0) { 652 - drive->dev_flags &= ~IDE_DFLAG_UNMASK; 653 - drive->io_32bit = 0; 654 - } 655 - return; 656 - } 657 - 658 - if (port_ops && port_ops->pre_reset) 659 - port_ops->pre_reset(drive); 660 - 661 - if (drive->current_speed != 0xff) 662 - drive->desired_speed = drive->current_speed; 663 - drive->current_speed = 0xff; 664 - } 665 - 666 - /* 667 - * do_reset1() attempts to recover a confused drive by resetting it. 668 - * Unfortunately, resetting a disk drive actually resets all devices on 669 - * the same interface, so it can really be thought of as resetting the 670 - * interface rather than resetting the drive. 671 - * 672 - * ATAPI devices have their own reset mechanism which allows them to be 673 - * individually reset without clobbering other devices on the same interface. 674 - * 675 - * Unfortunately, the IDE interface does not generate an interrupt to let 676 - * us know when the reset operation has finished, so we must poll for this. 677 - * Equally poor, though, is the fact that this may a very long time to complete, 678 - * (up to 30 seconds worstcase). So, instead of busy-waiting here for it, 679 - * we set a timer to poll at 50ms intervals. 680 - */ 681 - static ide_startstop_t do_reset1 (ide_drive_t *drive, int do_not_try_atapi) 682 - { 683 - ide_hwif_t *hwif = drive->hwif; 684 - struct ide_io_ports *io_ports = &hwif->io_ports; 685 - const struct ide_tp_ops *tp_ops = hwif->tp_ops; 686 - const struct ide_port_ops *port_ops; 687 - ide_drive_t *tdrive; 688 - unsigned long flags, timeout; 689 - int i; 690 - DEFINE_WAIT(wait); 691 - 692 - spin_lock_irqsave(&hwif->lock, flags); 693 - 694 - /* We must not reset with running handlers */ 695 - BUG_ON(hwif->handler != NULL); 696 - 697 - /* For an ATAPI device, first try an ATAPI SRST. */ 698 - if (drive->media != ide_disk && !do_not_try_atapi) { 699 - pre_reset(drive); 700 - SELECT_DRIVE(drive); 701 - udelay (20); 702 - tp_ops->exec_command(hwif, ATA_CMD_DEV_RESET); 703 - ndelay(400); 704 - hwif->poll_timeout = jiffies + WAIT_WORSTCASE; 705 - hwif->polling = 1; 706 - __ide_set_handler(drive, &atapi_reset_pollfunc, HZ/20, NULL); 707 - spin_unlock_irqrestore(&hwif->lock, flags); 708 - return ide_started; 709 - } 710 - 711 - /* We must not disturb devices in the IDE_DFLAG_PARKED state. */ 712 - do { 713 - unsigned long now; 714 - 715 - prepare_to_wait(&ide_park_wq, &wait, TASK_UNINTERRUPTIBLE); 716 - timeout = jiffies; 717 - ide_port_for_each_dev(i, tdrive, hwif) { 718 - if (tdrive->dev_flags & IDE_DFLAG_PRESENT && 719 - tdrive->dev_flags & IDE_DFLAG_PARKED && 720 - time_after(tdrive->sleep, timeout)) 721 - timeout = tdrive->sleep; 722 - } 723 - 724 - now = jiffies; 725 - if (time_before_eq(timeout, now)) 726 - break; 727 - 728 - spin_unlock_irqrestore(&hwif->lock, flags); 729 - timeout = schedule_timeout_uninterruptible(timeout - now); 730 - spin_lock_irqsave(&hwif->lock, flags); 731 - } while (timeout); 732 - finish_wait(&ide_park_wq, &wait); 733 - 734 - /* 735 - * First, reset any device state data we were maintaining 736 - * for any of the drives on this interface. 737 - */ 738 - ide_port_for_each_dev(i, tdrive, hwif) 739 - pre_reset(tdrive); 740 - 741 - if (io_ports->ctl_addr == 0) { 742 - spin_unlock_irqrestore(&hwif->lock, flags); 743 - ide_complete_drive_reset(drive, -ENXIO); 744 - return ide_stopped; 745 - } 746 - 747 - /* 748 - * Note that we also set nIEN while resetting the device, 749 - * to mask unwanted interrupts from the interface during the reset. 750 - * However, due to the design of PC hardware, this will cause an 751 - * immediate interrupt due to the edge transition it produces. 752 - * This single interrupt gives us a "fast poll" for drives that 753 - * recover from reset very quickly, saving us the first 50ms wait time. 754 - * 755 - * TODO: add ->softreset method and stop abusing ->set_irq 756 - */ 757 - /* set SRST and nIEN */ 758 - tp_ops->set_irq(hwif, 4); 759 - /* more than enough time */ 760 - udelay(10); 761 - /* clear SRST, leave nIEN (unless device is on the quirk list) */ 762 - tp_ops->set_irq(hwif, drive->quirk_list == 2); 763 - /* more than enough time */ 764 - udelay(10); 765 - hwif->poll_timeout = jiffies + WAIT_WORSTCASE; 766 - hwif->polling = 1; 767 - __ide_set_handler(drive, &reset_pollfunc, HZ/20, NULL); 768 - 769 - /* 770 - * Some weird controller like resetting themselves to a strange 771 - * state when the disks are reset this way. At least, the Winbond 772 - * 553 documentation says that 773 - */ 774 - port_ops = hwif->port_ops; 775 - if (port_ops && port_ops->resetproc) 776 - port_ops->resetproc(drive); 777 - 778 - spin_unlock_irqrestore(&hwif->lock, flags); 779 - return ide_started; 780 - } 781 - 782 - /* 783 - * ide_do_reset() is the entry point to the drive/interface reset code. 784 - */ 785 - 786 - ide_startstop_t ide_do_reset (ide_drive_t *drive) 787 - { 788 - return do_reset1(drive, 0); 789 - } 790 - 791 - EXPORT_SYMBOL(ide_do_reset); 792 - 793 887 /* 794 888 * ide_wait_not_busy() waits for the currently selected device on the hwif 795 889 * to report a non-busy status, see comments in ide_probe_port(). ··· 503 1187 { 504 1188 u8 stat = 0; 505 1189 506 - while(timeout--) { 1190 + while (timeout--) { 507 1191 /* 508 1192 * Turn this into a schedule() sleep once I'm sure 509 1193 * about locking issues (2.5 work ?).
-240
drivers/ide/ide-lib.c
··· 5 5 #include <linux/ide.h> 6 6 #include <linux/bitops.h> 7 7 8 - static const char *udma_str[] = 9 - { "UDMA/16", "UDMA/25", "UDMA/33", "UDMA/44", 10 - "UDMA/66", "UDMA/100", "UDMA/133", "UDMA7" }; 11 - static const char *mwdma_str[] = 12 - { "MWDMA0", "MWDMA1", "MWDMA2" }; 13 - static const char *swdma_str[] = 14 - { "SWDMA0", "SWDMA1", "SWDMA2" }; 15 - static const char *pio_str[] = 16 - { "PIO0", "PIO1", "PIO2", "PIO3", "PIO4", "PIO5" }; 17 - 18 - /** 19 - * ide_xfer_verbose - return IDE mode names 20 - * @mode: transfer mode 21 - * 22 - * Returns a constant string giving the name of the mode 23 - * requested. 24 - */ 25 - 26 - const char *ide_xfer_verbose(u8 mode) 27 - { 28 - const char *s; 29 - u8 i = mode & 0xf; 30 - 31 - if (mode >= XFER_UDMA_0 && mode <= XFER_UDMA_7) 32 - s = udma_str[i]; 33 - else if (mode >= XFER_MW_DMA_0 && mode <= XFER_MW_DMA_2) 34 - s = mwdma_str[i]; 35 - else if (mode >= XFER_SW_DMA_0 && mode <= XFER_SW_DMA_2) 36 - s = swdma_str[i]; 37 - else if (mode >= XFER_PIO_0 && mode <= XFER_PIO_5) 38 - s = pio_str[i & 0x7]; 39 - else if (mode == XFER_PIO_SLOW) 40 - s = "PIO SLOW"; 41 - else 42 - s = "XFER ERROR"; 43 - 44 - return s; 45 - } 46 - EXPORT_SYMBOL(ide_xfer_verbose); 47 - 48 - /** 49 - * ide_rate_filter - filter transfer mode 50 - * @drive: IDE device 51 - * @speed: desired speed 52 - * 53 - * Given the available transfer modes this function returns 54 - * the best available speed at or below the speed requested. 55 - * 56 - * TODO: check device PIO capabilities 57 - */ 58 - 59 - static u8 ide_rate_filter(ide_drive_t *drive, u8 speed) 60 - { 61 - ide_hwif_t *hwif = drive->hwif; 62 - u8 mode = ide_find_dma_mode(drive, speed); 63 - 64 - if (mode == 0) { 65 - if (hwif->pio_mask) 66 - mode = fls(hwif->pio_mask) - 1 + XFER_PIO_0; 67 - else 68 - mode = XFER_PIO_4; 69 - } 70 - 71 - /* printk("%s: mode 0x%02x, speed 0x%02x\n", __func__, mode, speed); */ 72 - 73 - return min(speed, mode); 74 - } 75 - 76 - /** 77 - * ide_get_best_pio_mode - get PIO mode from drive 78 - * @drive: drive to consider 79 - * @mode_wanted: preferred mode 80 - * @max_mode: highest allowed mode 81 - * 82 - * This routine returns the recommended PIO settings for a given drive, 83 - * based on the drive->id information and the ide_pio_blacklist[]. 84 - * 85 - * Drive PIO mode is auto-selected if 255 is passed as mode_wanted. 86 - * This is used by most chipset support modules when "auto-tuning". 87 - */ 88 - 89 - u8 ide_get_best_pio_mode(ide_drive_t *drive, u8 mode_wanted, u8 max_mode) 90 - { 91 - u16 *id = drive->id; 92 - int pio_mode = -1, overridden = 0; 93 - 94 - if (mode_wanted != 255) 95 - return min_t(u8, mode_wanted, max_mode); 96 - 97 - if ((drive->hwif->host_flags & IDE_HFLAG_PIO_NO_BLACKLIST) == 0) 98 - pio_mode = ide_scan_pio_blacklist((char *)&id[ATA_ID_PROD]); 99 - 100 - if (pio_mode != -1) { 101 - printk(KERN_INFO "%s: is on PIO blacklist\n", drive->name); 102 - } else { 103 - pio_mode = id[ATA_ID_OLD_PIO_MODES] >> 8; 104 - if (pio_mode > 2) { /* 2 is maximum allowed tPIO value */ 105 - pio_mode = 2; 106 - overridden = 1; 107 - } 108 - 109 - if (id[ATA_ID_FIELD_VALID] & 2) { /* ATA2? */ 110 - if (ata_id_has_iordy(id)) { 111 - if (id[ATA_ID_PIO_MODES] & 7) { 112 - overridden = 0; 113 - if (id[ATA_ID_PIO_MODES] & 4) 114 - pio_mode = 5; 115 - else if (id[ATA_ID_PIO_MODES] & 2) 116 - pio_mode = 4; 117 - else 118 - pio_mode = 3; 119 - } 120 - } 121 - } 122 - 123 - if (overridden) 124 - printk(KERN_INFO "%s: tPIO > 2, assuming tPIO = 2\n", 125 - drive->name); 126 - } 127 - 128 - if (pio_mode > max_mode) 129 - pio_mode = max_mode; 130 - 131 - return pio_mode; 132 - } 133 - EXPORT_SYMBOL_GPL(ide_get_best_pio_mode); 134 - 135 - /* req_pio == "255" for auto-tune */ 136 - void ide_set_pio(ide_drive_t *drive, u8 req_pio) 137 - { 138 - ide_hwif_t *hwif = drive->hwif; 139 - const struct ide_port_ops *port_ops = hwif->port_ops; 140 - u8 host_pio, pio; 141 - 142 - if (port_ops == NULL || port_ops->set_pio_mode == NULL || 143 - (hwif->host_flags & IDE_HFLAG_NO_SET_MODE)) 144 - return; 145 - 146 - BUG_ON(hwif->pio_mask == 0x00); 147 - 148 - host_pio = fls(hwif->pio_mask) - 1; 149 - 150 - pio = ide_get_best_pio_mode(drive, req_pio, host_pio); 151 - 152 - /* 153 - * TODO: 154 - * - report device max PIO mode 155 - * - check req_pio != 255 against device max PIO mode 156 - */ 157 - printk(KERN_DEBUG "%s: host max PIO%d wanted PIO%d%s selected PIO%d\n", 158 - drive->name, host_pio, req_pio, 159 - req_pio == 255 ? "(auto-tune)" : "", pio); 160 - 161 - (void)ide_set_pio_mode(drive, XFER_PIO_0 + pio); 162 - } 163 - EXPORT_SYMBOL_GPL(ide_set_pio); 164 - 165 8 /** 166 9 * ide_toggle_bounce - handle bounce buffering 167 10 * @drive: drive to update ··· 29 186 30 187 if (drive->queue) 31 188 blk_queue_bounce_limit(drive->queue, addr); 32 - } 33 - 34 - int ide_set_pio_mode(ide_drive_t *drive, const u8 mode) 35 - { 36 - ide_hwif_t *hwif = drive->hwif; 37 - const struct ide_port_ops *port_ops = hwif->port_ops; 38 - 39 - if (hwif->host_flags & IDE_HFLAG_NO_SET_MODE) 40 - return 0; 41 - 42 - if (port_ops == NULL || port_ops->set_pio_mode == NULL) 43 - return -1; 44 - 45 - /* 46 - * TODO: temporary hack for some legacy host drivers that didn't 47 - * set transfer mode on the device in ->set_pio_mode method... 48 - */ 49 - if (port_ops->set_dma_mode == NULL) { 50 - port_ops->set_pio_mode(drive, mode - XFER_PIO_0); 51 - return 0; 52 - } 53 - 54 - if (hwif->host_flags & IDE_HFLAG_POST_SET_MODE) { 55 - if (ide_config_drive_speed(drive, mode)) 56 - return -1; 57 - port_ops->set_pio_mode(drive, mode - XFER_PIO_0); 58 - return 0; 59 - } else { 60 - port_ops->set_pio_mode(drive, mode - XFER_PIO_0); 61 - return ide_config_drive_speed(drive, mode); 62 - } 63 - } 64 - 65 - int ide_set_dma_mode(ide_drive_t *drive, const u8 mode) 66 - { 67 - ide_hwif_t *hwif = drive->hwif; 68 - const struct ide_port_ops *port_ops = hwif->port_ops; 69 - 70 - if (hwif->host_flags & IDE_HFLAG_NO_SET_MODE) 71 - return 0; 72 - 73 - if (port_ops == NULL || port_ops->set_dma_mode == NULL) 74 - return -1; 75 - 76 - if (hwif->host_flags & IDE_HFLAG_POST_SET_MODE) { 77 - if (ide_config_drive_speed(drive, mode)) 78 - return -1; 79 - port_ops->set_dma_mode(drive, mode); 80 - return 0; 81 - } else { 82 - port_ops->set_dma_mode(drive, mode); 83 - return ide_config_drive_speed(drive, mode); 84 - } 85 - } 86 - EXPORT_SYMBOL_GPL(ide_set_dma_mode); 87 - 88 - /** 89 - * ide_set_xfer_rate - set transfer rate 90 - * @drive: drive to set 91 - * @rate: speed to attempt to set 92 - * 93 - * General helper for setting the speed of an IDE device. This 94 - * function knows about user enforced limits from the configuration 95 - * which ->set_pio_mode/->set_dma_mode does not. 96 - */ 97 - 98 - int ide_set_xfer_rate(ide_drive_t *drive, u8 rate) 99 - { 100 - ide_hwif_t *hwif = drive->hwif; 101 - const struct ide_port_ops *port_ops = hwif->port_ops; 102 - 103 - if (port_ops == NULL || port_ops->set_dma_mode == NULL || 104 - (hwif->host_flags & IDE_HFLAG_NO_SET_MODE)) 105 - return -1; 106 - 107 - rate = ide_rate_filter(drive, rate); 108 - 109 - BUG_ON(rate < XFER_PIO_0); 110 - 111 - if (rate >= XFER_PIO_0 && rate <= XFER_PIO_5) 112 - return ide_set_pio_mode(drive, rate); 113 - 114 - return ide_set_dma_mode(drive, rate); 115 189 } 116 190 117 191 static void ide_dump_opcode(ide_drive_t *drive)
+25
drivers/ide/ide-park.c
··· 1 1 #include <linux/kernel.h> 2 2 #include <linux/ide.h> 3 + #include <linux/hdreg.h> 3 4 #include <linux/jiffies.h> 4 5 #include <linux/blkdev.h> 5 6 ··· 59 58 60 59 out: 61 60 return; 61 + } 62 + 63 + ide_startstop_t ide_do_park_unpark(ide_drive_t *drive, struct request *rq) 64 + { 65 + ide_task_t task; 66 + struct ide_taskfile *tf = &task.tf; 67 + 68 + memset(&task, 0, sizeof(task)); 69 + if (rq->cmd[0] == REQ_PARK_HEADS) { 70 + drive->sleep = *(unsigned long *)rq->special; 71 + drive->dev_flags |= IDE_DFLAG_SLEEPING; 72 + tf->command = ATA_CMD_IDLEIMMEDIATE; 73 + tf->feature = 0x44; 74 + tf->lbal = 0x4c; 75 + tf->lbam = 0x4e; 76 + tf->lbah = 0x55; 77 + task.tf_flags |= IDE_TFLAG_CUSTOM_HANDLER; 78 + } else /* cmd == REQ_UNPARK_HEADS */ 79 + tf->command = ATA_CMD_CHK_POWER; 80 + 81 + task.tf_flags |= IDE_TFLAG_TF | IDE_TFLAG_DEVICE; 82 + task.rq = rq; 83 + drive->hwif->data_phase = task.data_phase = TASKFILE_NO_DATA; 84 + return do_rw_taskfile(drive, &task); 62 85 } 63 86 64 87 ssize_t ide_park_show(struct device *dev, struct device_attribute *attr,
+1 -3
drivers/ide/ide-pci-generic.c
··· 33 33 module_param_named(all_generic_ide, ide_generic_all, bool, 0444); 34 34 MODULE_PARM_DESC(all_generic_ide, "IDE generic will claim all unknown PCI IDE storage controllers."); 35 35 36 - #define IDE_HFLAGS_UMC (IDE_HFLAG_NO_DMA | IDE_HFLAG_FORCE_LEGACY_IRQS) 37 - 38 36 #define DECLARE_GENERIC_PCI_DEV(extra_flags) \ 39 37 { \ 40 38 .name = DRV_NAME, \ ··· 59 61 /* 2: SAMURAI / HT6565 / HINT_IDE */ 60 62 DECLARE_GENERIC_PCI_DEV(0), 61 63 /* 3: UM8673F / UM8886A / UM8886BF */ 62 - DECLARE_GENERIC_PCI_DEV(IDE_HFLAGS_UMC), 64 + DECLARE_GENERIC_PCI_DEV(IDE_HFLAG_NO_DMA), 63 65 /* 4: VIA_IDE / OPTI621V / Piccolo010{2,3,5} */ 64 66 DECLARE_GENERIC_PCI_DEV(IDE_HFLAG_NO_AUTODMA), 65 67
+58 -199
drivers/ide/ide-probe.c
··· 181 181 * do_identify - identify a drive 182 182 * @drive: drive to identify 183 183 * @cmd: command used 184 + * @id: buffer for IDENTIFY data 184 185 * 185 186 * Called when we have issued a drive identify command to 186 187 * read and parse the results. This function is run with 187 188 * interrupts disabled. 188 189 */ 189 190 190 - static void do_identify(ide_drive_t *drive, u8 cmd) 191 + static void do_identify(ide_drive_t *drive, u8 cmd, u16 *id) 191 192 { 192 193 ide_hwif_t *hwif = drive->hwif; 193 - u16 *id = drive->id; 194 194 char *m = (char *)&id[ATA_ID_PROD]; 195 195 unsigned long flags; 196 196 int bswap = 1; ··· 233 233 drive->dev_flags |= IDE_DFLAG_PRESENT; 234 234 drive->dev_flags &= ~IDE_DFLAG_DEAD; 235 235 236 - /* 237 - * Check for an ATAPI device 238 - */ 239 - if (cmd == ATA_CMD_ID_ATAPI) 240 - ide_classify_atapi_dev(drive); 241 - else 242 - /* 243 - * Not an ATAPI device: looks like a "regular" hard disk 244 - */ 245 - ide_classify_ata_dev(drive); 246 236 return; 247 237 err_misc: 248 238 kfree(id); ··· 240 250 } 241 251 242 252 /** 243 - * actual_try_to_identify - send ata/atapi identify 253 + * ide_dev_read_id - send ATA/ATAPI IDENTIFY command 244 254 * @drive: drive to identify 245 255 * @cmd: command to use 256 + * @id: buffer for IDENTIFY data 246 257 * 247 - * try_to_identify() sends an ATA(PI) IDENTIFY request to a drive 248 - * and waits for a response. It also monitors irqs while this is 249 - * happening, in hope of automatically determining which one is 250 - * being used by the interface. 258 + * Sends an ATA(PI) IDENTIFY request to a drive and waits for a response. 251 259 * 252 260 * Returns: 0 device was identified 253 261 * 1 device timed-out (no response to identify request) 254 262 * 2 device aborted the command (refused to identify itself) 255 263 */ 256 264 257 - static int actual_try_to_identify (ide_drive_t *drive, u8 cmd) 265 + int ide_dev_read_id(ide_drive_t *drive, u8 cmd, u16 *id) 258 266 { 259 267 ide_hwif_t *hwif = drive->hwif; 260 268 struct ide_io_ports *io_ports = &hwif->io_ports; ··· 260 272 int use_altstatus = 0, rc; 261 273 unsigned long timeout; 262 274 u8 s = 0, a = 0; 275 + 276 + /* 277 + * Disable device IRQ. Otherwise we'll get spurious interrupts 278 + * during the identify phase that the IRQ handler isn't expecting. 279 + */ 280 + if (io_ports->ctl_addr) 281 + tp_ops->set_irq(hwif, 0); 263 282 264 283 /* take a deep breath */ 265 284 msleep(50); ··· 312 317 313 318 if (OK_STAT(s, ATA_DRQ, BAD_R_STAT)) { 314 319 /* drive returned ID */ 315 - do_identify(drive, cmd); 320 + do_identify(drive, cmd, id); 316 321 /* drive responded with ID */ 317 322 rc = 0; 318 323 /* clear drive IRQ */ ··· 322 327 rc = 2; 323 328 } 324 329 return rc; 325 - } 326 - 327 - /** 328 - * try_to_identify - try to identify a drive 329 - * @drive: drive to probe 330 - * @cmd: command to use 331 - * 332 - * Issue the identify command and then do IRQ probing to 333 - * complete the identification when needed by finding the 334 - * IRQ the drive is attached to 335 - */ 336 - 337 - static int try_to_identify (ide_drive_t *drive, u8 cmd) 338 - { 339 - ide_hwif_t *hwif = drive->hwif; 340 - const struct ide_tp_ops *tp_ops = hwif->tp_ops; 341 - int retval; 342 - int autoprobe = 0; 343 - unsigned long cookie = 0; 344 - 345 - /* 346 - * Disable device irq unless we need to 347 - * probe for it. Otherwise we'll get spurious 348 - * interrupts during the identify-phase that 349 - * the irq handler isn't expecting. 350 - */ 351 - if (hwif->io_ports.ctl_addr) { 352 - if (!hwif->irq) { 353 - autoprobe = 1; 354 - cookie = probe_irq_on(); 355 - } 356 - tp_ops->set_irq(hwif, autoprobe); 357 - } 358 - 359 - retval = actual_try_to_identify(drive, cmd); 360 - 361 - if (autoprobe) { 362 - int irq; 363 - 364 - tp_ops->set_irq(hwif, 0); 365 - /* clear drive IRQ */ 366 - (void)tp_ops->read_status(hwif); 367 - udelay(5); 368 - irq = probe_irq_off(cookie); 369 - if (!hwif->irq) { 370 - if (irq > 0) { 371 - hwif->irq = irq; 372 - } else { 373 - /* Mmmm.. multiple IRQs.. 374 - * don't know which was ours 375 - */ 376 - printk(KERN_ERR "%s: IRQ probe failed (0x%lx)\n", 377 - drive->name, cookie); 378 - } 379 - } 380 - } 381 - return retval; 382 330 } 383 331 384 332 int ide_busy_sleep(ide_hwif_t *hwif, unsigned long timeout, int altstatus) ··· 378 440 { 379 441 ide_hwif_t *hwif = drive->hwif; 380 442 const struct ide_tp_ops *tp_ops = hwif->tp_ops; 443 + u16 *id = drive->id; 381 444 int rc; 382 445 u8 present = !!(drive->dev_flags & IDE_DFLAG_PRESENT), stat; 383 446 ··· 414 475 415 476 if (OK_STAT(stat, ATA_DRDY, ATA_BUSY) || 416 477 present || cmd == ATA_CMD_ID_ATAPI) { 417 - /* send cmd and wait */ 418 - if ((rc = try_to_identify(drive, cmd))) { 478 + rc = ide_dev_read_id(drive, cmd, id); 479 + if (rc) 419 480 /* failed: try again */ 420 - rc = try_to_identify(drive,cmd); 421 - } 481 + rc = ide_dev_read_id(drive, cmd, id); 422 482 423 483 stat = tp_ops->read_status(hwif); 424 484 ··· 432 494 msleep(50); 433 495 tp_ops->exec_command(hwif, ATA_CMD_DEV_RESET); 434 496 (void)ide_busy_sleep(hwif, WAIT_WORSTCASE, 0); 435 - rc = try_to_identify(drive, cmd); 497 + rc = ide_dev_read_id(drive, cmd, id); 436 498 } 437 499 438 500 /* ensure drive IRQ is clear */ ··· 455 517 return rc; 456 518 } 457 519 458 - /* 459 - * 460 - */ 461 - static void enable_nest (ide_drive_t *drive) 462 - { 463 - ide_hwif_t *hwif = drive->hwif; 464 - const struct ide_tp_ops *tp_ops = hwif->tp_ops; 465 - u8 stat; 466 - 467 - printk(KERN_INFO "%s: enabling %s -- ", 468 - hwif->name, (char *)&drive->id[ATA_ID_PROD]); 469 - 470 - SELECT_DRIVE(drive); 471 - msleep(50); 472 - tp_ops->exec_command(hwif, ATA_EXABYTE_ENABLE_NEST); 473 - 474 - if (ide_busy_sleep(hwif, WAIT_WORSTCASE, 0)) { 475 - printk(KERN_CONT "failed (timeout)\n"); 476 - return; 477 - } 478 - 479 - msleep(50); 480 - 481 - stat = tp_ops->read_status(hwif); 482 - 483 - if (!OK_STAT(stat, 0, BAD_STAT)) 484 - printk(KERN_CONT "failed (status = 0x%02x)\n", stat); 485 - else 486 - printk(KERN_CONT "success\n"); 487 - } 488 - 489 520 /** 490 521 * probe_for_drives - upper level drive probe 491 522 * @drive: drive to probe for ··· 470 563 static u8 probe_for_drive(ide_drive_t *drive) 471 564 { 472 565 char *m; 566 + int rc; 567 + u8 cmd; 473 568 474 569 /* 475 570 * In order to keep things simple we have an id ··· 495 586 496 587 /* skip probing? */ 497 588 if ((drive->dev_flags & IDE_DFLAG_NOPROBE) == 0) { 498 - retry: 499 589 /* if !(success||timed-out) */ 500 - if (do_probe(drive, ATA_CMD_ID_ATA) >= 2) 590 + cmd = ATA_CMD_ID_ATA; 591 + rc = do_probe(drive, cmd); 592 + if (rc >= 2) { 501 593 /* look for ATAPI device */ 502 - (void)do_probe(drive, ATA_CMD_ID_ATAPI); 594 + cmd = ATA_CMD_ID_ATAPI; 595 + rc = do_probe(drive, cmd); 596 + } 503 597 504 598 if ((drive->dev_flags & IDE_DFLAG_PRESENT) == 0) 505 599 /* drive not found */ 506 600 return 0; 507 - 508 - if (strstr(m, "E X A B Y T E N E S T")) { 509 - enable_nest(drive); 510 - goto retry; 511 - } 512 601 513 602 /* identification failed? */ 514 603 if ((drive->dev_flags & IDE_DFLAG_ID_READ) == 0) { ··· 521 614 printk(KERN_WARNING "%s: Unknown device on bus refused identification. Ignoring.\n", drive->name); 522 615 drive->dev_flags &= ~IDE_DFLAG_PRESENT; 523 616 } 617 + } else { 618 + if (cmd == ATA_CMD_ID_ATAPI) 619 + ide_classify_atapi_dev(drive); 620 + else 621 + ide_classify_ata_dev(drive); 524 622 } 525 - /* drive was found */ 526 623 } 527 624 528 625 if ((drive->dev_flags & IDE_DFLAG_PRESENT) == 0) ··· 690 779 static int ide_probe_port(ide_hwif_t *hwif) 691 780 { 692 781 ide_drive_t *drive; 693 - unsigned long flags; 694 782 unsigned int irqd; 695 783 int i, rc = -ENODEV; 696 784 ··· 707 797 if (irqd) 708 798 disable_irq(hwif->irq); 709 799 710 - local_save_flags(flags); 711 - local_irq_enable_in_hardirq(); 712 - 713 800 if (ide_port_wait_ready(hwif) == -EBUSY) 714 801 printk(KERN_DEBUG "%s: Wait for ready failed before probe !\n", hwif->name); 715 802 ··· 719 812 if (drive->dev_flags & IDE_DFLAG_PRESENT) 720 813 rc = 0; 721 814 } 722 - 723 - local_irq_restore(flags); 724 815 725 816 /* 726 817 * Use cached IRQ number. It might be (and is...) changed by probe ··· 736 831 ide_drive_t *drive; 737 832 int i; 738 833 739 - ide_port_for_each_dev(i, drive, hwif) { 740 - if (drive->dev_flags & IDE_DFLAG_PRESENT) { 741 - if (port_ops && port_ops->quirkproc) 742 - port_ops->quirkproc(drive); 743 - } 834 + ide_port_for_each_present_dev(i, drive, hwif) { 835 + if (port_ops && port_ops->quirkproc) 836 + port_ops->quirkproc(drive); 744 837 } 745 838 746 - ide_port_for_each_dev(i, drive, hwif) { 747 - if (drive->dev_flags & IDE_DFLAG_PRESENT) { 748 - ide_set_max_pio(drive); 839 + ide_port_for_each_present_dev(i, drive, hwif) { 840 + ide_set_max_pio(drive); 749 841 750 - drive->dev_flags |= IDE_DFLAG_NICE1; 842 + drive->dev_flags |= IDE_DFLAG_NICE1; 751 843 752 - if (hwif->dma_ops) 753 - ide_set_dma(drive); 754 - } 755 - } 756 - 757 - ide_port_for_each_dev(i, drive, hwif) { 758 - if (hwif->host_flags & IDE_HFLAG_NO_IO_32BIT) 759 - drive->dev_flags |= IDE_DFLAG_NO_IO_32BIT; 760 - else 761 - drive->dev_flags &= ~IDE_DFLAG_NO_IO_32BIT; 844 + if (hwif->dma_ops) 845 + ide_set_dma(drive); 762 846 } 763 847 } 764 848 ··· 818 924 int i, j = 0; 819 925 820 926 mutex_lock(&ide_cfg_mtx); 821 - ide_port_for_each_dev(i, drive, hwif) { 822 - if ((drive->dev_flags & IDE_DFLAG_PRESENT) == 0) 823 - continue; 824 - 927 + ide_port_for_each_present_dev(i, drive, hwif) { 825 928 if (ide_init_queue(drive)) { 826 929 printk(KERN_ERR "ide: failed to init %s\n", 827 930 drive->name); ··· 843 952 struct ide_io_ports *io_ports = &hwif->io_ports; 844 953 irq_handler_t irq_handler; 845 954 int sa = 0; 846 - 847 - mutex_lock(&ide_cfg_mtx); 848 - spin_lock_init(&hwif->lock); 849 - 850 - init_timer(&hwif->timer); 851 - hwif->timer.function = &ide_timer_expiry; 852 - hwif->timer.data = (unsigned long)hwif; 853 955 854 956 irq_handler = hwif->host->irq_handler; 855 957 if (irq_handler == NULL) ··· 881 997 printk(KERN_CONT " (serialized)"); 882 998 printk(KERN_CONT "\n"); 883 999 884 - mutex_unlock(&ide_cfg_mtx); 885 1000 return 0; 886 1001 out_up: 887 - mutex_unlock(&ide_cfg_mtx); 888 1002 return 1; 889 1003 } 890 1004 ··· 981 1099 982 1100 static int hwif_init(ide_hwif_t *hwif) 983 1101 { 984 - int old_irq; 985 - 986 1102 if (!hwif->irq) { 987 - hwif->irq = __ide_default_irq(hwif->io_ports.data_addr); 988 - if (!hwif->irq) { 989 - printk(KERN_ERR "%s: disabled, no IRQ\n", hwif->name); 990 - return 0; 991 - } 1103 + printk(KERN_ERR "%s: disabled, no IRQ\n", hwif->name); 1104 + return 0; 992 1105 } 993 1106 994 1107 if (register_blkdev(hwif->major, hwif->name)) ··· 1001 1124 1002 1125 sg_init_table(hwif->sg_table, hwif->sg_max_nents); 1003 1126 1004 - if (init_irq(hwif) == 0) 1005 - goto done; 1006 - 1007 - old_irq = hwif->irq; 1008 - /* 1009 - * It failed to initialise. Find the default IRQ for 1010 - * this port and try that. 1011 - */ 1012 - hwif->irq = __ide_default_irq(hwif->io_ports.data_addr); 1013 - if (!hwif->irq) { 1014 - printk(KERN_ERR "%s: disabled, unable to get IRQ %d\n", 1015 - hwif->name, old_irq); 1016 - goto out; 1017 - } 1018 1127 if (init_irq(hwif)) { 1019 - printk(KERN_ERR "%s: probed IRQ %d and default IRQ %d failed\n", 1020 - hwif->name, old_irq, hwif->irq); 1128 + printk(KERN_ERR "%s: disabled, unable to get IRQ %d\n", 1129 + hwif->name, hwif->irq); 1021 1130 goto out; 1022 1131 } 1023 - printk(KERN_WARNING "%s: probed IRQ %d failed, using default\n", 1024 - hwif->name, hwif->irq); 1025 1132 1026 - done: 1027 1133 blk_register_region(MKDEV(hwif->major, 0), MAX_DRIVES << PARTN_BITS, 1028 1134 THIS_MODULE, ata_probe, ata_lock, hwif); 1029 1135 return 1; ··· 1021 1161 ide_drive_t *drive; 1022 1162 unsigned int i; 1023 1163 1024 - ide_port_for_each_dev(i, drive, hwif) { 1164 + ide_port_for_each_present_dev(i, drive, hwif) { 1025 1165 struct device *dev = &drive->gendev; 1026 1166 int ret; 1027 - 1028 - if ((drive->dev_flags & IDE_DFLAG_PRESENT) == 0) 1029 - continue; 1030 1167 1031 1168 dev_set_name(dev, "%u.%u", hwif->index, i); 1032 1169 dev->parent = &hwif->gendev; ··· 1049 1192 1050 1193 if (hwif->host_flags & IDE_HFLAG_IO_32BIT) 1051 1194 drive->io_32bit = 1; 1195 + if (hwif->host_flags & IDE_HFLAG_NO_IO_32BIT) 1196 + drive->dev_flags |= IDE_DFLAG_NO_IO_32BIT; 1052 1197 if (hwif->host_flags & IDE_HFLAG_UNMASK_IRQS) 1053 1198 drive->dev_flags |= IDE_DFLAG_UNMASK; 1054 1199 if (hwif->host_flags & IDE_HFLAG_NO_UNMASK_IRQS) ··· 1071 1212 1072 1213 if (d->init_iops) 1073 1214 d->init_iops(hwif); 1074 - 1075 - if ((!hwif->irq && (d->host_flags & IDE_HFLAG_LEGACY_IRQS)) || 1076 - (d->host_flags & IDE_HFLAG_FORCE_LEGACY_IRQS)) 1077 - hwif->irq = port ? 15 : 14; 1078 1215 1079 1216 /* ->host_flags may be set by ->init_iops (or even earlier...) */ 1080 1217 hwif->host_flags |= d->host_flags; ··· 1171 1316 hwif->name[1] = 'd'; 1172 1317 hwif->name[2] = 'e'; 1173 1318 hwif->name[3] = '0' + index; 1319 + 1320 + spin_lock_init(&hwif->lock); 1321 + 1322 + init_timer(&hwif->timer); 1323 + hwif->timer.function = &ide_timer_expiry; 1324 + hwif->timer.data = (unsigned long)hwif; 1174 1325 1175 1326 init_completion(&hwif->gendev_rel_comp); 1176 1327 ··· 1428 1567 1429 1568 j++; 1430 1569 1431 - ide_acpi_init(hwif); 1570 + ide_acpi_init_port(hwif); 1432 1571 1433 1572 if (hwif->present) 1434 1573 ide_acpi_port_init_devices(hwif); ··· 1485 1624 ide_drive_t *drive; 1486 1625 int i; 1487 1626 1488 - ide_port_for_each_dev(i, drive, hwif) { 1489 - if (drive->dev_flags & IDE_DFLAG_PRESENT) { 1490 - device_unregister(&drive->gendev); 1491 - wait_for_completion(&drive->gendev_rel_comp); 1492 - } 1627 + ide_port_for_each_present_dev(i, drive, hwif) { 1628 + device_unregister(&drive->gendev); 1629 + wait_for_completion(&drive->gendev_rel_comp); 1493 1630 } 1494 1631 } 1495 1632
+1 -1
drivers/ide/ide-proc.c
··· 600 600 int i; 601 601 602 602 ide_port_for_each_dev(i, drive, hwif) { 603 - if ((drive->dev_flags & IDE_DFLAG_PRESENT) == 0 || drive->proc) 603 + if ((drive->dev_flags & IDE_DFLAG_PRESENT) == 0) 604 604 continue; 605 605 606 606 drive->proc = proc_mkdir(drive->name, parent);
+246
drivers/ide/ide-xfer-mode.c
··· 1 + #include <linux/types.h> 2 + #include <linux/string.h> 3 + #include <linux/kernel.h> 4 + #include <linux/interrupt.h> 5 + #include <linux/ide.h> 6 + #include <linux/bitops.h> 7 + 8 + static const char *udma_str[] = 9 + { "UDMA/16", "UDMA/25", "UDMA/33", "UDMA/44", 10 + "UDMA/66", "UDMA/100", "UDMA/133", "UDMA7" }; 11 + static const char *mwdma_str[] = 12 + { "MWDMA0", "MWDMA1", "MWDMA2" }; 13 + static const char *swdma_str[] = 14 + { "SWDMA0", "SWDMA1", "SWDMA2" }; 15 + static const char *pio_str[] = 16 + { "PIO0", "PIO1", "PIO2", "PIO3", "PIO4", "PIO5" }; 17 + 18 + /** 19 + * ide_xfer_verbose - return IDE mode names 20 + * @mode: transfer mode 21 + * 22 + * Returns a constant string giving the name of the mode 23 + * requested. 24 + */ 25 + 26 + const char *ide_xfer_verbose(u8 mode) 27 + { 28 + const char *s; 29 + u8 i = mode & 0xf; 30 + 31 + if (mode >= XFER_UDMA_0 && mode <= XFER_UDMA_7) 32 + s = udma_str[i]; 33 + else if (mode >= XFER_MW_DMA_0 && mode <= XFER_MW_DMA_2) 34 + s = mwdma_str[i]; 35 + else if (mode >= XFER_SW_DMA_0 && mode <= XFER_SW_DMA_2) 36 + s = swdma_str[i]; 37 + else if (mode >= XFER_PIO_0 && mode <= XFER_PIO_5) 38 + s = pio_str[i & 0x7]; 39 + else if (mode == XFER_PIO_SLOW) 40 + s = "PIO SLOW"; 41 + else 42 + s = "XFER ERROR"; 43 + 44 + return s; 45 + } 46 + EXPORT_SYMBOL(ide_xfer_verbose); 47 + 48 + /** 49 + * ide_get_best_pio_mode - get PIO mode from drive 50 + * @drive: drive to consider 51 + * @mode_wanted: preferred mode 52 + * @max_mode: highest allowed mode 53 + * 54 + * This routine returns the recommended PIO settings for a given drive, 55 + * based on the drive->id information and the ide_pio_blacklist[]. 56 + * 57 + * Drive PIO mode is auto-selected if 255 is passed as mode_wanted. 58 + * This is used by most chipset support modules when "auto-tuning". 59 + */ 60 + 61 + u8 ide_get_best_pio_mode(ide_drive_t *drive, u8 mode_wanted, u8 max_mode) 62 + { 63 + u16 *id = drive->id; 64 + int pio_mode = -1, overridden = 0; 65 + 66 + if (mode_wanted != 255) 67 + return min_t(u8, mode_wanted, max_mode); 68 + 69 + if ((drive->hwif->host_flags & IDE_HFLAG_PIO_NO_BLACKLIST) == 0) 70 + pio_mode = ide_scan_pio_blacklist((char *)&id[ATA_ID_PROD]); 71 + 72 + if (pio_mode != -1) { 73 + printk(KERN_INFO "%s: is on PIO blacklist\n", drive->name); 74 + } else { 75 + pio_mode = id[ATA_ID_OLD_PIO_MODES] >> 8; 76 + if (pio_mode > 2) { /* 2 is maximum allowed tPIO value */ 77 + pio_mode = 2; 78 + overridden = 1; 79 + } 80 + 81 + if (id[ATA_ID_FIELD_VALID] & 2) { /* ATA2? */ 82 + if (ata_id_has_iordy(id)) { 83 + if (id[ATA_ID_PIO_MODES] & 7) { 84 + overridden = 0; 85 + if (id[ATA_ID_PIO_MODES] & 4) 86 + pio_mode = 5; 87 + else if (id[ATA_ID_PIO_MODES] & 2) 88 + pio_mode = 4; 89 + else 90 + pio_mode = 3; 91 + } 92 + } 93 + } 94 + 95 + if (overridden) 96 + printk(KERN_INFO "%s: tPIO > 2, assuming tPIO = 2\n", 97 + drive->name); 98 + } 99 + 100 + if (pio_mode > max_mode) 101 + pio_mode = max_mode; 102 + 103 + return pio_mode; 104 + } 105 + EXPORT_SYMBOL_GPL(ide_get_best_pio_mode); 106 + 107 + int ide_set_pio_mode(ide_drive_t *drive, const u8 mode) 108 + { 109 + ide_hwif_t *hwif = drive->hwif; 110 + const struct ide_port_ops *port_ops = hwif->port_ops; 111 + 112 + if (hwif->host_flags & IDE_HFLAG_NO_SET_MODE) 113 + return 0; 114 + 115 + if (port_ops == NULL || port_ops->set_pio_mode == NULL) 116 + return -1; 117 + 118 + /* 119 + * TODO: temporary hack for some legacy host drivers that didn't 120 + * set transfer mode on the device in ->set_pio_mode method... 121 + */ 122 + if (port_ops->set_dma_mode == NULL) { 123 + port_ops->set_pio_mode(drive, mode - XFER_PIO_0); 124 + return 0; 125 + } 126 + 127 + if (hwif->host_flags & IDE_HFLAG_POST_SET_MODE) { 128 + if (ide_config_drive_speed(drive, mode)) 129 + return -1; 130 + port_ops->set_pio_mode(drive, mode - XFER_PIO_0); 131 + return 0; 132 + } else { 133 + port_ops->set_pio_mode(drive, mode - XFER_PIO_0); 134 + return ide_config_drive_speed(drive, mode); 135 + } 136 + } 137 + 138 + int ide_set_dma_mode(ide_drive_t *drive, const u8 mode) 139 + { 140 + ide_hwif_t *hwif = drive->hwif; 141 + const struct ide_port_ops *port_ops = hwif->port_ops; 142 + 143 + if (hwif->host_flags & IDE_HFLAG_NO_SET_MODE) 144 + return 0; 145 + 146 + if (port_ops == NULL || port_ops->set_dma_mode == NULL) 147 + return -1; 148 + 149 + if (hwif->host_flags & IDE_HFLAG_POST_SET_MODE) { 150 + if (ide_config_drive_speed(drive, mode)) 151 + return -1; 152 + port_ops->set_dma_mode(drive, mode); 153 + return 0; 154 + } else { 155 + port_ops->set_dma_mode(drive, mode); 156 + return ide_config_drive_speed(drive, mode); 157 + } 158 + } 159 + EXPORT_SYMBOL_GPL(ide_set_dma_mode); 160 + 161 + /* req_pio == "255" for auto-tune */ 162 + void ide_set_pio(ide_drive_t *drive, u8 req_pio) 163 + { 164 + ide_hwif_t *hwif = drive->hwif; 165 + const struct ide_port_ops *port_ops = hwif->port_ops; 166 + u8 host_pio, pio; 167 + 168 + if (port_ops == NULL || port_ops->set_pio_mode == NULL || 169 + (hwif->host_flags & IDE_HFLAG_NO_SET_MODE)) 170 + return; 171 + 172 + BUG_ON(hwif->pio_mask == 0x00); 173 + 174 + host_pio = fls(hwif->pio_mask) - 1; 175 + 176 + pio = ide_get_best_pio_mode(drive, req_pio, host_pio); 177 + 178 + /* 179 + * TODO: 180 + * - report device max PIO mode 181 + * - check req_pio != 255 against device max PIO mode 182 + */ 183 + printk(KERN_DEBUG "%s: host max PIO%d wanted PIO%d%s selected PIO%d\n", 184 + drive->name, host_pio, req_pio, 185 + req_pio == 255 ? "(auto-tune)" : "", pio); 186 + 187 + (void)ide_set_pio_mode(drive, XFER_PIO_0 + pio); 188 + } 189 + EXPORT_SYMBOL_GPL(ide_set_pio); 190 + 191 + /** 192 + * ide_rate_filter - filter transfer mode 193 + * @drive: IDE device 194 + * @speed: desired speed 195 + * 196 + * Given the available transfer modes this function returns 197 + * the best available speed at or below the speed requested. 198 + * 199 + * TODO: check device PIO capabilities 200 + */ 201 + 202 + static u8 ide_rate_filter(ide_drive_t *drive, u8 speed) 203 + { 204 + ide_hwif_t *hwif = drive->hwif; 205 + u8 mode = ide_find_dma_mode(drive, speed); 206 + 207 + if (mode == 0) { 208 + if (hwif->pio_mask) 209 + mode = fls(hwif->pio_mask) - 1 + XFER_PIO_0; 210 + else 211 + mode = XFER_PIO_4; 212 + } 213 + 214 + /* printk("%s: mode 0x%02x, speed 0x%02x\n", __func__, mode, speed); */ 215 + 216 + return min(speed, mode); 217 + } 218 + 219 + /** 220 + * ide_set_xfer_rate - set transfer rate 221 + * @drive: drive to set 222 + * @rate: speed to attempt to set 223 + * 224 + * General helper for setting the speed of an IDE device. This 225 + * function knows about user enforced limits from the configuration 226 + * which ->set_pio_mode/->set_dma_mode does not. 227 + */ 228 + 229 + int ide_set_xfer_rate(ide_drive_t *drive, u8 rate) 230 + { 231 + ide_hwif_t *hwif = drive->hwif; 232 + const struct ide_port_ops *port_ops = hwif->port_ops; 233 + 234 + if (port_ops == NULL || port_ops->set_dma_mode == NULL || 235 + (hwif->host_flags & IDE_HFLAG_NO_SET_MODE)) 236 + return -1; 237 + 238 + rate = ide_rate_filter(drive, rate); 239 + 240 + BUG_ON(rate < XFER_PIO_0); 241 + 242 + if (rate >= XFER_PIO_0 && rate <= XFER_PIO_5) 243 + return ide_set_pio_mode(drive, rate); 244 + 245 + return ide_set_dma_mode(drive, rate); 246 + }
+2 -154
drivers/ide/ide.c
··· 62 62 63 63 struct class *ide_port_class; 64 64 65 - /* 66 - * Locks for IDE setting functionality 67 - */ 68 - 69 - DEFINE_MUTEX(ide_setting_mtx); 70 - 71 - ide_devset_get(io_32bit, io_32bit); 72 - 73 - static int set_io_32bit(ide_drive_t *drive, int arg) 74 - { 75 - if (drive->dev_flags & IDE_DFLAG_NO_IO_32BIT) 76 - return -EPERM; 77 - 78 - if (arg < 0 || arg > 1 + (SUPPORT_VLB_SYNC << 1)) 79 - return -EINVAL; 80 - 81 - drive->io_32bit = arg; 82 - 83 - return 0; 84 - } 85 - 86 - ide_devset_get_flag(ksettings, IDE_DFLAG_KEEP_SETTINGS); 87 - 88 - static int set_ksettings(ide_drive_t *drive, int arg) 89 - { 90 - if (arg < 0 || arg > 1) 91 - return -EINVAL; 92 - 93 - if (arg) 94 - drive->dev_flags |= IDE_DFLAG_KEEP_SETTINGS; 95 - else 96 - drive->dev_flags &= ~IDE_DFLAG_KEEP_SETTINGS; 97 - 98 - return 0; 99 - } 100 - 101 - ide_devset_get_flag(using_dma, IDE_DFLAG_USING_DMA); 102 - 103 - static int set_using_dma(ide_drive_t *drive, int arg) 104 - { 105 - #ifdef CONFIG_BLK_DEV_IDEDMA 106 - int err = -EPERM; 107 - 108 - if (arg < 0 || arg > 1) 109 - return -EINVAL; 110 - 111 - if (ata_id_has_dma(drive->id) == 0) 112 - goto out; 113 - 114 - if (drive->hwif->dma_ops == NULL) 115 - goto out; 116 - 117 - err = 0; 118 - 119 - if (arg) { 120 - if (ide_set_dma(drive)) 121 - err = -EIO; 122 - } else 123 - ide_dma_off(drive); 124 - 125 - out: 126 - return err; 127 - #else 128 - if (arg < 0 || arg > 1) 129 - return -EINVAL; 130 - 131 - return -EPERM; 132 - #endif 133 - } 134 - 135 - /* 136 - * handle HDIO_SET_PIO_MODE ioctl abusers here, eventually it will go away 137 - */ 138 - static int set_pio_mode_abuse(ide_hwif_t *hwif, u8 req_pio) 139 - { 140 - switch (req_pio) { 141 - case 202: 142 - case 201: 143 - case 200: 144 - case 102: 145 - case 101: 146 - case 100: 147 - return (hwif->host_flags & IDE_HFLAG_ABUSE_DMA_MODES) ? 1 : 0; 148 - case 9: 149 - case 8: 150 - return (hwif->host_flags & IDE_HFLAG_ABUSE_PREFETCH) ? 1 : 0; 151 - case 7: 152 - case 6: 153 - return (hwif->host_flags & IDE_HFLAG_ABUSE_FAST_DEVSEL) ? 1 : 0; 154 - default: 155 - return 0; 156 - } 157 - } 158 - 159 - static int set_pio_mode(ide_drive_t *drive, int arg) 160 - { 161 - ide_hwif_t *hwif = drive->hwif; 162 - const struct ide_port_ops *port_ops = hwif->port_ops; 163 - 164 - if (arg < 0 || arg > 255) 165 - return -EINVAL; 166 - 167 - if (port_ops == NULL || port_ops->set_pio_mode == NULL || 168 - (hwif->host_flags & IDE_HFLAG_NO_SET_MODE)) 169 - return -ENOSYS; 170 - 171 - if (set_pio_mode_abuse(drive->hwif, arg)) { 172 - if (arg == 8 || arg == 9) { 173 - unsigned long flags; 174 - 175 - /* take lock for IDE_DFLAG_[NO_]UNMASK/[NO_]IO_32BIT */ 176 - spin_lock_irqsave(&hwif->lock, flags); 177 - port_ops->set_pio_mode(drive, arg); 178 - spin_unlock_irqrestore(&hwif->lock, flags); 179 - } else 180 - port_ops->set_pio_mode(drive, arg); 181 - } else { 182 - int keep_dma = !!(drive->dev_flags & IDE_DFLAG_USING_DMA); 183 - 184 - ide_set_pio(drive, arg); 185 - 186 - if (hwif->host_flags & IDE_HFLAG_SET_PIO_MODE_KEEP_DMA) { 187 - if (keep_dma) 188 - ide_dma_on(drive); 189 - } 190 - } 191 - 192 - return 0; 193 - } 194 - 195 - ide_devset_get_flag(unmaskirq, IDE_DFLAG_UNMASK); 196 - 197 - static int set_unmaskirq(ide_drive_t *drive, int arg) 198 - { 199 - if (drive->dev_flags & IDE_DFLAG_NO_UNMASK) 200 - return -EPERM; 201 - 202 - if (arg < 0 || arg > 1) 203 - return -EINVAL; 204 - 205 - if (arg) 206 - drive->dev_flags |= IDE_DFLAG_UNMASK; 207 - else 208 - drive->dev_flags &= ~IDE_DFLAG_UNMASK; 209 - 210 - return 0; 211 - } 212 - 213 - ide_ext_devset_rw_sync(io_32bit, io_32bit); 214 - ide_ext_devset_rw_sync(keepsettings, ksettings); 215 - ide_ext_devset_rw_sync(unmaskirq, unmaskirq); 216 - ide_ext_devset_rw_sync(using_dma, using_dma); 217 - __IDE_DEVSET(pio_mode, DS_SYNC, NULL, set_pio_mode); 218 - 219 65 /** 220 66 * ide_device_get - get an additional reference to a ide_drive_t 221 67 * @drive: device to get a reference to ··· 372 526 ret = PTR_ERR(ide_port_class); 373 527 goto out_port_class; 374 528 } 529 + 530 + ide_acpi_init(); 375 531 376 532 proc_ide_create(); 377 533
+1 -1
drivers/ide/it821x.c
··· 603 603 pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0x20); 604 604 } 605 605 606 - static unsigned int init_chipset_it821x(struct pci_dev *dev) 606 + static int init_chipset_it821x(struct pci_dev *dev) 607 607 { 608 608 u8 conf; 609 609 static char *mode[2] = { "pass through", "smart" };
+1 -3
drivers/ide/ns87415.c
··· 286 286 } 287 287 288 288 if (!using_inta) 289 - hwif->irq = __ide_default_irq(hwif->io_ports.data_addr); 290 - else if (!hwif->irq && hwif->mate && hwif->mate->irq) 291 - hwif->irq = hwif->mate->irq; /* share IRQ with mate */ 289 + hwif->irq = pci_get_legacy_ide_irq(dev, hwif->channel); 292 290 293 291 if (!hwif->dma_base) 294 292 return;
+2 -2
drivers/ide/pdc202xx_new.c
··· 325 325 } 326 326 #endif /* CONFIG_PPC_PMAC */ 327 327 328 - static unsigned int init_chipset_pdcnew(struct pci_dev *dev) 328 + static int init_chipset_pdcnew(struct pci_dev *dev) 329 329 { 330 330 const char *name = DRV_NAME; 331 331 unsigned long dma_base = pci_resource_start(dev, 4); ··· 444 444 #endif 445 445 446 446 out: 447 - return dev->irq; 447 + return 0; 448 448 } 449 449 450 450 static struct pci_dev * __devinit pdc20270_get_dev2(struct pci_dev *dev)
+2 -2
drivers/ide/pdc202xx_old.c
··· 264 264 ide_dma_timeout(drive); 265 265 } 266 266 267 - static unsigned int init_chipset_pdc202xx(struct pci_dev *dev) 267 + static int init_chipset_pdc202xx(struct pci_dev *dev) 268 268 { 269 269 unsigned long dmabase = pci_resource_start(dev, 4); 270 270 u8 udma_speed_flag = 0, primary_mode = 0, secondary_mode = 0; ··· 290 290 printk("%sACTIVE\n", (inb(dmabase | 0x1f) & 1) ? "" : "IN"); 291 291 } 292 292 out: 293 - return dev->irq; 293 + return 0; 294 294 } 295 295 296 296 static void __devinit pdc202ata4_fixup_irq(struct pci_dev *dev,
+2 -11
drivers/ide/piix.c
··· 204 204 * out to be nice and simple. 205 205 */ 206 206 207 - static unsigned int init_chipset_ich(struct pci_dev *dev) 207 + static int init_chipset_ich(struct pci_dev *dev) 208 208 { 209 209 u32 extra = 0; 210 210 ··· 318 318 .cable_detect = piix_cable_detect, 319 319 }; 320 320 321 - #ifndef CONFIG_IA64 322 - #define IDE_HFLAGS_PIIX IDE_HFLAG_LEGACY_IRQS 323 - #else 324 - #define IDE_HFLAGS_PIIX 0 325 - #endif 326 - 327 321 #define DECLARE_PIIX_DEV(udma) \ 328 322 { \ 329 323 .name = DRV_NAME, \ 330 324 .init_hwif = init_hwif_piix, \ 331 325 .enablebits = {{0x41,0x80,0x80}, {0x43,0x80,0x80}}, \ 332 326 .port_ops = &piix_port_ops, \ 333 - .host_flags = IDE_HFLAGS_PIIX, \ 334 327 .pio_mask = ATA_PIO4, \ 335 328 .swdma_mask = ATA_SWDMA2_ONLY, \ 336 329 .mwdma_mask = ATA_MWDMA12_ONLY, \ ··· 337 344 .init_hwif = init_hwif_piix, \ 338 345 .enablebits = {{0x41,0x80,0x80}, {0x43,0x80,0x80}}, \ 339 346 .port_ops = &ich_port_ops, \ 340 - .host_flags = IDE_HFLAGS_PIIX, \ 341 347 .pio_mask = ATA_PIO4, \ 342 348 .swdma_mask = ATA_SWDMA2_ONLY, \ 343 349 .mwdma_mask = ATA_MWDMA12_ONLY, \ ··· 352 360 */ 353 361 .name = DRV_NAME, 354 362 .enablebits = {{0x6d,0xc0,0x80}, {0x6d,0xc0,0xc0}}, 355 - .host_flags = IDE_HFLAG_ISA_PORTS | IDE_HFLAG_NO_DMA | 356 - IDE_HFLAGS_PIIX, 363 + .host_flags = IDE_HFLAG_ISA_PORTS | IDE_HFLAG_NO_DMA, 357 364 .pio_mask = ATA_PIO4, 358 365 /* This is a painful system best to let it self tune for now */ 359 366 },
+4 -9
drivers/ide/serverworks.c
··· 175 175 pci_write_config_byte(dev, 0x54, ultra_enable); 176 176 } 177 177 178 - static unsigned int init_chipset_svwks(struct pci_dev *dev) 178 + static int init_chipset_svwks(struct pci_dev *dev) 179 179 { 180 180 unsigned int reg; 181 181 u8 btr; ··· 270 270 pci_write_config_byte(dev, 0x5A, btr); 271 271 } 272 272 273 - return dev->irq; 273 + return 0; 274 274 } 275 275 276 276 static u8 ata66_svwks_svwks(ide_hwif_t *hwif) ··· 353 353 .cable_detect = svwks_cable_detect, 354 354 }; 355 355 356 - #define IDE_HFLAGS_SVWKS IDE_HFLAG_LEGACY_IRQS 357 - 358 356 static const struct ide_port_info serverworks_chipsets[] __devinitdata = { 359 357 { /* 0: OSB4 */ 360 358 .name = DRV_NAME, 361 359 .init_chipset = init_chipset_svwks, 362 360 .port_ops = &osb4_port_ops, 363 - .host_flags = IDE_HFLAGS_SVWKS, 364 361 .pio_mask = ATA_PIO4, 365 362 .mwdma_mask = ATA_MWDMA2, 366 363 .udma_mask = 0x00, /* UDMA is problematic on OSB4 */ ··· 366 369 .name = DRV_NAME, 367 370 .init_chipset = init_chipset_svwks, 368 371 .port_ops = &svwks_port_ops, 369 - .host_flags = IDE_HFLAGS_SVWKS, 370 372 .pio_mask = ATA_PIO4, 371 373 .mwdma_mask = ATA_MWDMA2, 372 374 .udma_mask = ATA_UDMA5, ··· 374 378 .name = DRV_NAME, 375 379 .init_chipset = init_chipset_svwks, 376 380 .port_ops = &svwks_port_ops, 377 - .host_flags = IDE_HFLAGS_SVWKS, 378 381 .pio_mask = ATA_PIO4, 379 382 .mwdma_mask = ATA_MWDMA2, 380 383 .udma_mask = ATA_UDMA5, ··· 382 387 .name = DRV_NAME, 383 388 .init_chipset = init_chipset_svwks, 384 389 .port_ops = &svwks_port_ops, 385 - .host_flags = IDE_HFLAGS_SVWKS | IDE_HFLAG_SINGLE, 390 + .host_flags = IDE_HFLAG_SINGLE, 386 391 .pio_mask = ATA_PIO4, 387 392 .mwdma_mask = ATA_MWDMA2, 388 393 .udma_mask = ATA_UDMA5, ··· 391 396 .name = DRV_NAME, 392 397 .init_chipset = init_chipset_svwks, 393 398 .port_ops = &svwks_port_ops, 394 - .host_flags = IDE_HFLAGS_SVWKS | IDE_HFLAG_SINGLE, 399 + .host_flags = IDE_HFLAG_SINGLE, 395 400 .pio_mask = ATA_PIO4, 396 401 .mwdma_mask = ATA_MWDMA2, 397 402 .udma_mask = ATA_UDMA5,
+16 -11
drivers/ide/setup-pci.c
··· 305 305 * @dev: PCI device holding interface 306 306 * @d: IDE port info 307 307 * @port: port number 308 - * @irq: PCI IRQ 309 308 * @hw: hw_regs_t instance corresponding to this port 310 309 * 311 310 * Perform the initial set up for the hardware interface structure. This ··· 315 316 */ 316 317 317 318 static int ide_hw_configure(struct pci_dev *dev, const struct ide_port_info *d, 318 - unsigned int port, int irq, hw_regs_t *hw) 319 + unsigned int port, hw_regs_t *hw) 319 320 { 320 321 unsigned long ctl = 0, base = 0; 321 322 ··· 343 344 } 344 345 345 346 memset(hw, 0, sizeof(*hw)); 346 - hw->irq = irq; 347 347 hw->dev = &dev->dev; 348 348 hw->chipset = d->chipset ? d->chipset : ide_pci; 349 349 ide_std_init_ports(hw, base, ctl | 2); ··· 446 448 * ide_pci_setup_ports - configure ports/devices on PCI IDE 447 449 * @dev: PCI device 448 450 * @d: IDE port info 449 - * @pciirq: IRQ line 450 451 * @hw: hw_regs_t instances corresponding to this PCI IDE device 451 452 * @hws: hw_regs_t pointers table to update 452 453 * ··· 459 462 */ 460 463 461 464 void ide_pci_setup_ports(struct pci_dev *dev, const struct ide_port_info *d, 462 - int pciirq, hw_regs_t *hw, hw_regs_t **hws) 465 + hw_regs_t *hw, hw_regs_t **hws) 463 466 { 464 467 int channels = (d->host_flags & IDE_HFLAG_SINGLE) ? 1 : 2, port; 465 468 u8 tmp; ··· 478 481 continue; /* port not enabled */ 479 482 } 480 483 481 - if (ide_hw_configure(dev, d, port, pciirq, hw + port)) 484 + if (ide_hw_configure(dev, d, port, hw + port)) 482 485 continue; 483 486 484 487 *(hws + port) = hw + port; ··· 521 524 if (noisy) 522 525 printk(KERN_INFO "%s %s: not 100%% native mode: will " 523 526 "probe irqs later\n", d->name, pci_name(dev)); 524 - pciirq = ret; 527 + pciirq = 0; 525 528 } else if (!pciirq && noisy) { 526 529 printk(KERN_WARNING "%s %s: bad irq (%d): will probe later\n", 527 530 d->name, pci_name(dev), pciirq); ··· 546 549 if (ret < 0) 547 550 goto out; 548 551 549 - ide_pci_setup_ports(dev, d, 0, &hw[0], &hws[0]); 552 + ide_pci_setup_ports(dev, d, &hw[0], &hws[0]); 550 553 551 554 host = ide_host_alloc(d, hws); 552 555 if (host == NULL) { ··· 565 568 goto out; 566 569 567 570 /* fixup IRQ */ 568 - hw[1].irq = hw[0].irq = ret; 571 + if (ide_pci_is_in_compatibility_mode(dev)) { 572 + hw[0].irq = pci_get_legacy_ide_irq(dev, 0); 573 + hw[1].irq = pci_get_legacy_ide_irq(dev, 1); 574 + } else 575 + hw[1].irq = hw[0].irq = ret; 569 576 570 577 ret = ide_host_register(host, d, hws); 571 578 if (ret) ··· 592 591 if (ret < 0) 593 592 goto out; 594 593 595 - ide_pci_setup_ports(pdev[i], d, 0, &hw[i*2], &hws[i*2]); 594 + ide_pci_setup_ports(pdev[i], d, &hw[i*2], &hws[i*2]); 596 595 } 597 596 598 597 host = ide_host_alloc(d, hws); ··· 620 619 goto out; 621 620 622 621 /* fixup IRQ */ 623 - hw[i*2 + 1].irq = hw[i*2].irq = ret; 622 + if (ide_pci_is_in_compatibility_mode(pdev[i])) { 623 + hw[i*2].irq = pci_get_legacy_ide_irq(pdev[i], 0); 624 + hw[i*2 + 1].irq = pci_get_legacy_ide_irq(pdev[i], 1); 625 + } else 626 + hw[i*2 + 1].irq = hw[i*2].irq = ret; 624 627 } 625 628 626 629 ret = ide_host_register(host, d, hws);
+1 -1
drivers/ide/siimage.c
··· 464 464 * to 133 MHz clocking if the system isn't already set up to do it. 465 465 */ 466 466 467 - static unsigned int init_chipset_siimage(struct pci_dev *dev) 467 + static int init_chipset_siimage(struct pci_dev *dev) 468 468 { 469 469 struct ide_host *host = pci_get_drvdata(dev); 470 470 void __iomem *ioaddr = host->host_priv;
+2 -2
drivers/ide/sis5513.c
··· 447 447 return chipset_family; 448 448 } 449 449 450 - static unsigned int init_chipset_sis5513(struct pci_dev *dev) 450 + static int init_chipset_sis5513(struct pci_dev *dev) 451 451 { 452 452 /* Make general config ops here 453 453 1/ tell IDE channels to operate in Compatibility mode only ··· 563 563 .name = DRV_NAME, 564 564 .init_chipset = init_chipset_sis5513, 565 565 .enablebits = { {0x4a, 0x02, 0x02}, {0x4a, 0x04, 0x04} }, 566 - .host_flags = IDE_HFLAG_LEGACY_IRQS | IDE_HFLAG_NO_AUTODMA, 566 + .host_flags = IDE_HFLAG_NO_AUTODMA, 567 567 .pio_mask = ATA_PIO4, 568 568 .mwdma_mask = ATA_MWDMA2, 569 569 };
+2 -2
drivers/ide/sl82c105.c
··· 271 271 * channel 0 here at least, but channel 1 has to be enabled by 272 272 * firmware or arch code. We still set both to 16 bits mode. 273 273 */ 274 - static unsigned int init_chipset_sl82c105(struct pci_dev *dev) 274 + static int init_chipset_sl82c105(struct pci_dev *dev) 275 275 { 276 276 u32 val; 277 277 ··· 281 281 val |= CTRL_P0EN | CTRL_P0F16 | CTRL_P1F16; 282 282 pci_write_config_dword(dev, 0x40, val); 283 283 284 - return dev->irq; 284 + return 0; 285 285 } 286 286 287 287 static const struct ide_port_ops sl82c105_port_ops = {
-1
drivers/ide/slc90e66.c
··· 136 136 .name = DRV_NAME, 137 137 .enablebits = { {0x41, 0x80, 0x80}, {0x43, 0x80, 0x80} }, 138 138 .port_ops = &slc90e66_port_ops, 139 - .host_flags = IDE_HFLAG_LEGACY_IRQS, 140 139 .pio_mask = ATA_PIO4, 141 140 .swdma_mask = ATA_SWDMA2_ONLY, 142 141 .mwdma_mask = ATA_MWDMA12_ONLY,
-3
drivers/ide/trm290.c
··· 277 277 if (reg & 0x10) 278 278 /* legacy mode */ 279 279 hwif->irq = hwif->channel ? 15 : 14; 280 - else if (!hwif->irq && hwif->mate && hwif->mate->irq) 281 - /* sharing IRQ with mate */ 282 - hwif->irq = hwif->mate->irq; 283 280 284 281 #if 1 285 282 {
+1 -11
drivers/ide/via82cxxx.c
··· 267 267 * and initialize its drive independent registers. 268 268 */ 269 269 270 - static unsigned int init_chipset_via82cxxx(struct pci_dev *dev) 270 + static int init_chipset_via82cxxx(struct pci_dev *dev) 271 271 { 272 272 struct ide_host *host = pci_get_drvdata(dev); 273 273 struct via82cxxx_dev *vdev = host->host_priv; ··· 442 442 443 443 if ((via_config->flags & VIA_NO_UNMASK) == 0) 444 444 d.host_flags |= IDE_HFLAG_UNMASK_IRQS; 445 - 446 - #ifdef CONFIG_PPC_CHRP 447 - if (machine_is(chrp) && _chrp_type == _CHRP_Pegasos) 448 - d.host_flags |= IDE_HFLAG_FORCE_LEGACY_IRQS; 449 - #endif 450 - 451 - #ifdef CONFIG_AMIGAONE 452 - if (machine_is(amigaone)) 453 - d.host_flags |= IDE_HFLAG_FORCE_LEGACY_IRQS; 454 - #endif 455 445 456 446 d.udma_mask = via_config->udma_mask; 457 447
-24
include/asm-frv/ide.h
··· 1 - /* ide.h: FRV IDE declarations 2 - * 3 - * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved. 4 - * Written by David Howells (dhowells@redhat.com) 5 - * 6 - * This program is free software; you can redistribute it and/or 7 - * modify it under the terms of the GNU General Public License 8 - * as published by the Free Software Foundation; either version 9 - * 2 of the License, or (at your option) any later version. 10 - */ 11 - 12 - #ifndef _ASM_IDE_H 13 - #define _ASM_IDE_H 14 - 15 - #ifdef __KERNEL__ 16 - 17 - #include <asm/setup.h> 18 - #include <asm/io.h> 19 - #include <asm/irq.h> 20 - 21 - #include <asm-generic/ide_iops.h> 22 - 23 - #endif /* __KERNEL__ */ 24 - #endif /* _ASM_IDE_H */
-39
include/asm-mn10300/ide.h
··· 1 - /* MN10300 Arch-specific IDE code 2 - * 3 - * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. 4 - * Written by David Howells (dhowells@redhat.com) 5 - * - Derived from include/asm-i386/ide.h 6 - * 7 - * This program is free software; you can redistribute it and/or 8 - * modify it under the terms of the GNU General Public Licence 9 - * as published by the Free Software Foundation; either version 10 - * 2 of the Licence, or (at your option) any later version. 11 - */ 12 - 13 - #ifndef _ASM_IDE_H 14 - #define _ASM_IDE_H 15 - 16 - #ifdef __KERNEL__ 17 - 18 - #include <asm/intctl-regs.h> 19 - 20 - #undef SUPPORT_SLOW_DATA_PORTS 21 - #define SUPPORT_SLOW_DATA_PORTS 0 22 - 23 - #undef SUPPORT_VLB_SYNC 24 - #define SUPPORT_VLB_SYNC 0 25 - 26 - /* 27 - * some bits needed for parts of the IDE subsystem to compile 28 - */ 29 - #define __ide_mm_insw(port, addr, n) \ 30 - insw((unsigned long) (port), (addr), (n)) 31 - #define __ide_mm_insl(port, addr, n) \ 32 - insl((unsigned long) (port), (addr), (n)) 33 - #define __ide_mm_outsw(port, addr, n) \ 34 - outsw((unsigned long) (port), (addr), (n)) 35 - #define __ide_mm_outsl(port, addr, n) \ 36 - outsl((unsigned long) (port), (addr), (n)) 37 - 38 - #endif /* __KERNEL__ */ 39 - #endif /* _ASM_IDE_H */
+5
include/asm-mn10300/pci.h
··· 121 121 122 122 #define pcibios_scan_all_fns(a, b) 0 123 123 124 + static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel) 125 + { 126 + return channel ? 15 : 14; 127 + } 128 + 124 129 #endif /* _ASM_PCI_H */
-2
include/linux/ata.h
··· 244 244 ATA_CMD_MEDIA_UNLOCK = 0xDF, 245 245 /* marked obsolete in the ATA/ATAPI-7 spec */ 246 246 ATA_CMD_RESTORE = 0x10, 247 - /* EXABYTE specific */ 248 - ATA_EXABYTE_ENABLE_NEST = 0xF0, 249 247 250 248 /* READ_LOG_EXT pages */ 251 249 ATA_LOG_SATA_NCQ = 0x10,
+35 -43
include/linux/ide.h
··· 26 26 #include <asm/io.h> 27 27 #include <asm/mutex.h> 28 28 29 - #if defined(CONFIG_CRIS) || defined(CONFIG_FRV) 29 + #if defined(CONFIG_CRIS) || defined(CONFIG_FRV) || defined(CONFIG_MN10300) 30 30 # define SUPPORT_VLB_SYNC 0 31 31 #else 32 32 # define SUPPORT_VLB_SYNC 1 ··· 193 193 hw->io_ports.ctl_addr = ctl_addr; 194 194 } 195 195 196 - /* for IDE PCI controllers in legacy mode, temporary */ 197 - static inline int __ide_default_irq(unsigned long base) 198 - { 199 - switch (base) { 200 - #ifdef CONFIG_IA64 201 - case 0x1f0: return isa_irq_to_vector(14); 202 - case 0x170: return isa_irq_to_vector(15); 203 - #else 204 - case 0x1f0: return 14; 205 - case 0x170: return 15; 206 - #endif 207 - } 208 - return 0; 209 - } 210 - 211 - #if defined(CONFIG_ARM) || defined(CONFIG_FRV) || defined(CONFIG_M68K) || \ 212 - defined(CONFIG_MIPS) || defined(CONFIG_MN10300) || defined(CONFIG_PARISC) \ 213 - || defined(CONFIG_PPC) || defined(CONFIG_SPARC) || defined(CONFIG_SPARC64) 196 + #if defined(CONFIG_ARM) || defined(CONFIG_M68K) || defined(CONFIG_MIPS) || \ 197 + defined(CONFIG_PARISC) || defined(CONFIG_PPC) || defined(CONFIG_SPARC) 214 198 #include <asm/ide.h> 215 199 #else 216 200 #include <asm-generic/ide_iops.h> ··· 850 866 ide_hwif_t *ports[MAX_HOST_PORTS + 1]; 851 867 unsigned int n_ports; 852 868 struct device *dev[2]; 853 - unsigned int (*init_chipset)(struct pci_dev *); 869 + int (*init_chipset)(struct pci_dev *); 854 870 irq_handler_t irq_handler; 855 871 unsigned long host_flags; 856 872 void *host_priv; ··· 1130 1146 extern int ide_vlb_clk; 1131 1147 extern int ide_pci_clk; 1132 1148 1133 - extern int ide_end_request (ide_drive_t *drive, int uptodate, int nrsecs); 1134 - int ide_end_dequeued_request(ide_drive_t *drive, struct request *rq, 1135 - int uptodate, int nr_sectors); 1149 + int ide_end_request(ide_drive_t *, int, int); 1150 + int ide_end_dequeued_request(ide_drive_t *, struct request *, int, int); 1151 + void ide_kill_rq(ide_drive_t *, struct request *); 1136 1152 1137 - extern void ide_set_handler (ide_drive_t *drive, ide_handler_t *handler, unsigned int timeout, ide_expiry_t *expiry); 1153 + void __ide_set_handler(ide_drive_t *, ide_handler_t *, unsigned int, 1154 + ide_expiry_t *); 1155 + void ide_set_handler(ide_drive_t *, ide_handler_t *, unsigned int, 1156 + ide_expiry_t *); 1138 1157 1139 1158 void ide_execute_command(ide_drive_t *, u8, ide_handler_t *, unsigned int, 1140 1159 ide_expiry_t *); ··· 1156 1169 1157 1170 int ide_wait_stat(ide_startstop_t *, ide_drive_t *, u8, u8, unsigned long); 1158 1171 1172 + ide_startstop_t ide_do_park_unpark(ide_drive_t *, struct request *); 1173 + ide_startstop_t ide_do_devset(ide_drive_t *, struct request *); 1174 + 1159 1175 extern ide_startstop_t ide_do_reset (ide_drive_t *); 1160 1176 1161 1177 extern int ide_devset_execute(ide_drive_t *drive, 1162 1178 const struct ide_devset *setting, int arg); 1163 - 1164 - extern void ide_do_drive_cmd(ide_drive_t *, struct request *); 1165 1179 1166 1180 extern void ide_end_drive_cmd(ide_drive_t *, u8, u8); 1167 1181 ··· 1187 1199 1188 1200 u8 ide_read_error(ide_drive_t *); 1189 1201 void ide_read_bcount_and_ireason(ide_drive_t *, u16 *, u8 *); 1190 - 1191 - extern int drive_is_ready(ide_drive_t *); 1192 - 1193 - void ide_pktcmd_tf_load(ide_drive_t *, u32, u16, u8); 1194 1202 1195 1203 int ide_check_atapi_device(ide_drive_t *, const char *); 1196 1204 ··· 1235 1251 1236 1252 int ide_taskfile_ioctl(ide_drive_t *, unsigned int, unsigned long); 1237 1253 1254 + int ide_dev_read_id(ide_drive_t *, u8, u16 *); 1255 + 1238 1256 extern int ide_driveid_update(ide_drive_t *); 1239 1257 extern int ide_config_drive_speed(ide_drive_t *, u8); 1240 1258 extern u8 eighty_ninty_three (ide_drive_t *); ··· 1266 1280 return 0; 1267 1281 } 1268 1282 1269 - void ide_pci_setup_ports(struct pci_dev *, const struct ide_port_info *, int, 1283 + void ide_pci_setup_ports(struct pci_dev *, const struct ide_port_info *, 1270 1284 hw_regs_t *, hw_regs_t **); 1271 1285 void ide_setup_pci_noise(struct pci_dev *, const struct ide_port_info *); 1272 1286 ··· 1335 1349 IDE_HFLAG_ERROR_STOPS_FIFO = (1 << 19), 1336 1350 /* serialize ports */ 1337 1351 IDE_HFLAG_SERIALIZE = (1 << 20), 1338 - /* use legacy IRQs */ 1339 - IDE_HFLAG_LEGACY_IRQS = (1 << 21), 1340 - /* force use of legacy IRQs */ 1341 - IDE_HFLAG_FORCE_LEGACY_IRQS = (1 << 22), 1342 1352 /* host is TRM290 */ 1343 1353 IDE_HFLAG_TRM290 = (1 << 23), 1344 1354 /* use 32-bit I/O ops */ ··· 1362 1380 1363 1381 struct ide_port_info { 1364 1382 char *name; 1365 - unsigned int (*init_chipset)(struct pci_dev *); 1383 + int (*init_chipset)(struct pci_dev *); 1366 1384 void (*init_iops)(ide_hwif_t *); 1367 1385 void (*init_hwif)(ide_hwif_t *); 1368 1386 int (*init_dma)(ide_hwif_t *, ··· 1453 1471 1454 1472 void ide_dma_lost_irq(ide_drive_t *); 1455 1473 void ide_dma_timeout(ide_drive_t *); 1474 + ide_startstop_t ide_dma_timeout_retry(ide_drive_t *, int); 1456 1475 1457 1476 #else 1458 1477 static inline int ide_id_dma_bug(ide_drive_t *drive) { return 0; } ··· 1465 1482 static inline void ide_dma_verbose(ide_drive_t *drive) { ; } 1466 1483 static inline int ide_set_dma(ide_drive_t *drive) { return 1; } 1467 1484 static inline void ide_check_dma_crc(ide_drive_t *drive) { ; } 1485 + static inline ide_startstop_t ide_dma_timeout_retry(ide_drive_t *drive, int error) { return ide_stopped; } 1468 1486 static inline void ide_release_dma_engine(ide_hwif_t *hwif) { ; } 1469 1487 #endif /* CONFIG_BLK_DEV_IDEDMA */ 1470 1488 1471 1489 #ifdef CONFIG_BLK_DEV_IDEACPI 1490 + int ide_acpi_init(void); 1472 1491 extern int ide_acpi_exec_tfs(ide_drive_t *drive); 1473 1492 extern void ide_acpi_get_timing(ide_hwif_t *hwif); 1474 1493 extern void ide_acpi_push_timing(ide_hwif_t *hwif); 1475 - extern void ide_acpi_init(ide_hwif_t *hwif); 1494 + void ide_acpi_init_port(ide_hwif_t *); 1476 1495 void ide_acpi_port_init_devices(ide_hwif_t *); 1477 1496 extern void ide_acpi_set_state(ide_hwif_t *hwif, int on); 1478 1497 #else 1498 + static inline int ide_acpi_init(void) { return 0; } 1479 1499 static inline int ide_acpi_exec_tfs(ide_drive_t *drive) { return 0; } 1480 1500 static inline void ide_acpi_get_timing(ide_hwif_t *hwif) { ; } 1481 1501 static inline void ide_acpi_push_timing(ide_hwif_t *hwif) { ; } 1482 - static inline void ide_acpi_init(ide_hwif_t *hwif) { ; } 1502 + static inline void ide_acpi_init_port(ide_hwif_t *hwif) { ; } 1483 1503 static inline void ide_acpi_port_init_devices(ide_hwif_t *hwif) { ; } 1484 1504 static inline void ide_acpi_set_state(ide_hwif_t *hwif, int on) {} 1485 1505 #endif ··· 1516 1530 hwif->hwif_data = data; 1517 1531 } 1518 1532 1519 - const char *ide_xfer_verbose(u8 mode); 1520 1533 extern void ide_toggle_bounce(ide_drive_t *drive, int on); 1521 - extern int ide_set_xfer_rate(ide_drive_t *drive, u8 rate); 1522 1534 1523 1535 u64 ide_get_lba_addr(struct ide_taskfile *, int); 1524 1536 u8 ide_dump_status(ide_drive_t *, const char *, u8); ··· 1555 1571 struct ide_timing *, unsigned int); 1556 1572 int ide_timing_compute(ide_drive_t *, u8, struct ide_timing *, int, int); 1557 1573 1574 + #ifdef CONFIG_IDE_XFER_MODE 1558 1575 int ide_scan_pio_blacklist(char *); 1559 - 1576 + const char *ide_xfer_verbose(u8); 1560 1577 u8 ide_get_best_pio_mode(ide_drive_t *, u8, u8); 1561 - 1562 1578 int ide_set_pio_mode(ide_drive_t *, u8); 1563 1579 int ide_set_dma_mode(ide_drive_t *, u8); 1564 - 1565 1580 void ide_set_pio(ide_drive_t *, u8); 1581 + int ide_set_xfer_rate(ide_drive_t *, u8); 1582 + #else 1583 + static inline void ide_set_pio(ide_drive_t *drive, u8 pio) { ; } 1584 + static inline int ide_set_xfer_rate(ide_drive_t *drive, u8 rate) { return -1; } 1585 + #endif 1566 1586 1567 1587 static inline void ide_set_max_pio(ide_drive_t *drive) 1568 1588 { ··· 1598 1610 1599 1611 #define ide_port_for_each_dev(i, dev, port) \ 1600 1612 for ((i) = 0; ((dev) = (port)->devices[i]) || (i) < MAX_DRIVES; (i)++) 1613 + 1614 + #define ide_port_for_each_present_dev(i, dev, port) \ 1615 + for ((i) = 0; ((dev) = (port)->devices[i]) || (i) < MAX_DRIVES; (i)++) \ 1616 + if ((dev)->dev_flags & IDE_DFLAG_PRESENT) 1601 1617 1602 1618 #define ide_host_for_each_port(i, port, host) \ 1603 1619 for ((i) = 0; ((port) = (host)->ports[i]) || (i) < MAX_HOST_PORTS; (i)++)