Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
at v2.6.30-rc7 247 lines 6.7 kB view raw
1#include <linux/kernel.h> 2#include <linux/ide.h> 3 4int generic_ide_suspend(struct device *dev, pm_message_t mesg) 5{ 6 ide_drive_t *drive = dev->driver_data, *pair = ide_get_pair_dev(drive); 7 ide_hwif_t *hwif = drive->hwif; 8 struct request *rq; 9 struct request_pm_state rqpm; 10 struct ide_cmd cmd; 11 int ret; 12 13 /* call ACPI _GTM only once */ 14 if ((drive->dn & 1) == 0 || pair == NULL) 15 ide_acpi_get_timing(hwif); 16 17 memset(&rqpm, 0, sizeof(rqpm)); 18 memset(&cmd, 0, sizeof(cmd)); 19 rq = blk_get_request(drive->queue, READ, __GFP_WAIT); 20 rq->cmd_type = REQ_TYPE_PM_SUSPEND; 21 rq->special = &cmd; 22 rq->data = &rqpm; 23 rqpm.pm_step = IDE_PM_START_SUSPEND; 24 if (mesg.event == PM_EVENT_PRETHAW) 25 mesg.event = PM_EVENT_FREEZE; 26 rqpm.pm_state = mesg.event; 27 28 ret = blk_execute_rq(drive->queue, NULL, rq, 0); 29 blk_put_request(rq); 30 31 /* call ACPI _PS3 only after both devices are suspended */ 32 if (ret == 0 && ((drive->dn & 1) || pair == NULL)) 33 ide_acpi_set_state(hwif, 0); 34 35 return ret; 36} 37 38int generic_ide_resume(struct device *dev) 39{ 40 ide_drive_t *drive = dev->driver_data, *pair = ide_get_pair_dev(drive); 41 ide_hwif_t *hwif = drive->hwif; 42 struct request *rq; 43 struct request_pm_state rqpm; 44 struct ide_cmd cmd; 45 int err; 46 47 /* call ACPI _PS0 / _STM only once */ 48 if ((drive->dn & 1) == 0 || pair == NULL) { 49 ide_acpi_set_state(hwif, 1); 50 ide_acpi_push_timing(hwif); 51 } 52 53 ide_acpi_exec_tfs(drive); 54 55 memset(&rqpm, 0, sizeof(rqpm)); 56 memset(&cmd, 0, sizeof(cmd)); 57 rq = blk_get_request(drive->queue, READ, __GFP_WAIT); 58 rq->cmd_type = REQ_TYPE_PM_RESUME; 59 rq->cmd_flags |= REQ_PREEMPT; 60 rq->special = &cmd; 61 rq->data = &rqpm; 62 rqpm.pm_step = IDE_PM_START_RESUME; 63 rqpm.pm_state = PM_EVENT_ON; 64 65 err = blk_execute_rq(drive->queue, NULL, rq, 1); 66 blk_put_request(rq); 67 68 if (err == 0 && dev->driver) { 69 struct ide_driver *drv = to_ide_driver(dev->driver); 70 71 if (drv->resume) 72 drv->resume(drive); 73 } 74 75 return err; 76} 77 78void ide_complete_power_step(ide_drive_t *drive, struct request *rq) 79{ 80 struct request_pm_state *pm = rq->data; 81 82#ifdef DEBUG_PM 83 printk(KERN_INFO "%s: complete_power_step(step: %d)\n", 84 drive->name, pm->pm_step); 85#endif 86 if (drive->media != ide_disk) 87 return; 88 89 switch (pm->pm_step) { 90 case IDE_PM_FLUSH_CACHE: /* Suspend step 1 (flush cache) */ 91 if (pm->pm_state == PM_EVENT_FREEZE) 92 pm->pm_step = IDE_PM_COMPLETED; 93 else 94 pm->pm_step = IDE_PM_STANDBY; 95 break; 96 case IDE_PM_STANDBY: /* Suspend step 2 (standby) */ 97 pm->pm_step = IDE_PM_COMPLETED; 98 break; 99 case IDE_PM_RESTORE_PIO: /* Resume step 1 (restore PIO) */ 100 pm->pm_step = IDE_PM_IDLE; 101 break; 102 case IDE_PM_IDLE: /* Resume step 2 (idle)*/ 103 pm->pm_step = IDE_PM_RESTORE_DMA; 104 break; 105 } 106} 107 108ide_startstop_t ide_start_power_step(ide_drive_t *drive, struct request *rq) 109{ 110 struct request_pm_state *pm = rq->data; 111 struct ide_cmd *cmd = rq->special; 112 113 memset(cmd, 0, sizeof(*cmd)); 114 115 switch (pm->pm_step) { 116 case IDE_PM_FLUSH_CACHE: /* Suspend step 1 (flush cache) */ 117 if (drive->media != ide_disk) 118 break; 119 /* Not supported? Switch to next step now. */ 120 if (ata_id_flush_enabled(drive->id) == 0 || 121 (drive->dev_flags & IDE_DFLAG_WCACHE) == 0) { 122 ide_complete_power_step(drive, rq); 123 return ide_stopped; 124 } 125 if (ata_id_flush_ext_enabled(drive->id)) 126 cmd->tf.command = ATA_CMD_FLUSH_EXT; 127 else 128 cmd->tf.command = ATA_CMD_FLUSH; 129 goto out_do_tf; 130 case IDE_PM_STANDBY: /* Suspend step 2 (standby) */ 131 cmd->tf.command = ATA_CMD_STANDBYNOW1; 132 goto out_do_tf; 133 case IDE_PM_RESTORE_PIO: /* Resume step 1 (restore PIO) */ 134 ide_set_max_pio(drive); 135 /* 136 * skip IDE_PM_IDLE for ATAPI devices 137 */ 138 if (drive->media != ide_disk) 139 pm->pm_step = IDE_PM_RESTORE_DMA; 140 else 141 ide_complete_power_step(drive, rq); 142 return ide_stopped; 143 case IDE_PM_IDLE: /* Resume step 2 (idle) */ 144 cmd->tf.command = ATA_CMD_IDLEIMMEDIATE; 145 goto out_do_tf; 146 case IDE_PM_RESTORE_DMA: /* Resume step 3 (restore DMA) */ 147 /* 148 * Right now, all we do is call ide_set_dma(drive), 149 * we could be smarter and check for current xfer_speed 150 * in struct drive etc... 151 */ 152 if (drive->hwif->dma_ops == NULL) 153 break; 154 /* 155 * TODO: respect IDE_DFLAG_USING_DMA 156 */ 157 ide_set_dma(drive); 158 break; 159 } 160 161 pm->pm_step = IDE_PM_COMPLETED; 162 163 return ide_stopped; 164 165out_do_tf: 166 cmd->valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE; 167 cmd->valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE; 168 cmd->protocol = ATA_PROT_NODATA; 169 170 return do_rw_taskfile(drive, cmd); 171} 172 173/** 174 * ide_complete_pm_rq - end the current Power Management request 175 * @drive: target drive 176 * @rq: request 177 * 178 * This function cleans up the current PM request and stops the queue 179 * if necessary. 180 */ 181void ide_complete_pm_rq(ide_drive_t *drive, struct request *rq) 182{ 183 struct request_queue *q = drive->queue; 184 struct request_pm_state *pm = rq->data; 185 unsigned long flags; 186 187 ide_complete_power_step(drive, rq); 188 if (pm->pm_step != IDE_PM_COMPLETED) 189 return; 190 191#ifdef DEBUG_PM 192 printk("%s: completing PM request, %s\n", drive->name, 193 blk_pm_suspend_request(rq) ? "suspend" : "resume"); 194#endif 195 spin_lock_irqsave(q->queue_lock, flags); 196 if (blk_pm_suspend_request(rq)) 197 blk_stop_queue(q); 198 else 199 drive->dev_flags &= ~IDE_DFLAG_BLOCKED; 200 spin_unlock_irqrestore(q->queue_lock, flags); 201 202 drive->hwif->rq = NULL; 203 204 if (blk_end_request(rq, 0, 0)) 205 BUG(); 206} 207 208void ide_check_pm_state(ide_drive_t *drive, struct request *rq) 209{ 210 struct request_pm_state *pm = rq->data; 211 212 if (blk_pm_suspend_request(rq) && 213 pm->pm_step == IDE_PM_START_SUSPEND) 214 /* Mark drive blocked when starting the suspend sequence. */ 215 drive->dev_flags |= IDE_DFLAG_BLOCKED; 216 else if (blk_pm_resume_request(rq) && 217 pm->pm_step == IDE_PM_START_RESUME) { 218 /* 219 * The first thing we do on wakeup is to wait for BSY bit to 220 * go away (with a looong timeout) as a drive on this hwif may 221 * just be POSTing itself. 222 * We do that before even selecting as the "other" device on 223 * the bus may be broken enough to walk on our toes at this 224 * point. 225 */ 226 ide_hwif_t *hwif = drive->hwif; 227 const struct ide_tp_ops *tp_ops = hwif->tp_ops; 228 struct request_queue *q = drive->queue; 229 unsigned long flags; 230 int rc; 231#ifdef DEBUG_PM 232 printk("%s: Wakeup request inited, waiting for !BSY...\n", drive->name); 233#endif 234 rc = ide_wait_not_busy(hwif, 35000); 235 if (rc) 236 printk(KERN_WARNING "%s: bus not ready on wakeup\n", drive->name); 237 tp_ops->dev_select(drive); 238 tp_ops->write_devctl(hwif, ATA_DEVCTL_OBS); 239 rc = ide_wait_not_busy(hwif, 100000); 240 if (rc) 241 printk(KERN_WARNING "%s: drive not ready on wakeup\n", drive->name); 242 243 spin_lock_irqsave(q->queue_lock, flags); 244 blk_start_queue(q); 245 spin_unlock_irqrestore(q->queue_lock, flags); 246 } 247}