at v4.15-rc5 676 lines 17 kB view raw
1/* 2 * Copyright (C) 2000-2002 Michael Cornwell <cornwell@acm.org> 3 * Copyright (C) 2000-2002 Andre Hedrick <andre@linux-ide.org> 4 * Copyright (C) 2001-2002 Klaus Smolin 5 * IBM Storage Technology Division 6 * Copyright (C) 2003-2004, 2007 Bartlomiej Zolnierkiewicz 7 * 8 * The big the bad and the ugly. 9 */ 10 11#include <linux/types.h> 12#include <linux/string.h> 13#include <linux/kernel.h> 14#include <linux/export.h> 15#include <linux/sched.h> 16#include <linux/interrupt.h> 17#include <linux/errno.h> 18#include <linux/slab.h> 19#include <linux/delay.h> 20#include <linux/hdreg.h> 21#include <linux/ide.h> 22#include <linux/nmi.h> 23#include <linux/scatterlist.h> 24#include <linux/uaccess.h> 25 26#include <asm/io.h> 27 28void ide_tf_readback(ide_drive_t *drive, struct ide_cmd *cmd) 29{ 30 ide_hwif_t *hwif = drive->hwif; 31 const struct ide_tp_ops *tp_ops = hwif->tp_ops; 32 33 /* Be sure we're looking at the low order bytes */ 34 tp_ops->write_devctl(hwif, ATA_DEVCTL_OBS); 35 36 tp_ops->tf_read(drive, &cmd->tf, cmd->valid.in.tf); 37 38 if (cmd->tf_flags & IDE_TFLAG_LBA48) { 39 tp_ops->write_devctl(hwif, ATA_HOB | ATA_DEVCTL_OBS); 40 41 tp_ops->tf_read(drive, &cmd->hob, cmd->valid.in.hob); 42 } 43} 44 45void ide_tf_dump(const char *s, struct ide_cmd *cmd) 46{ 47#ifdef DEBUG 48 printk("%s: tf: feat 0x%02x nsect 0x%02x lbal 0x%02x " 49 "lbam 0x%02x lbah 0x%02x dev 0x%02x cmd 0x%02x\n", 50 s, cmd->tf.feature, cmd->tf.nsect, 51 cmd->tf.lbal, cmd->tf.lbam, cmd->tf.lbah, 52 cmd->tf.device, cmd->tf.command); 53 printk("%s: hob: nsect 0x%02x lbal 0x%02x lbam 0x%02x lbah 0x%02x\n", 54 s, cmd->hob.nsect, cmd->hob.lbal, cmd->hob.lbam, cmd->hob.lbah); 55#endif 56} 57 58int taskfile_lib_get_identify(ide_drive_t *drive, u8 *buf) 59{ 60 struct ide_cmd cmd; 61 62 memset(&cmd, 0, sizeof(cmd)); 63 cmd.tf.nsect = 0x01; 64 if (drive->media == ide_disk) 65 cmd.tf.command = ATA_CMD_ID_ATA; 66 else 67 cmd.tf.command = ATA_CMD_ID_ATAPI; 68 cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE; 69 cmd.valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE; 70 cmd.protocol = ATA_PROT_PIO; 71 72 return ide_raw_taskfile(drive, &cmd, buf, 1); 73} 74 75static ide_startstop_t task_no_data_intr(ide_drive_t *); 76static ide_startstop_t pre_task_out_intr(ide_drive_t *, struct ide_cmd *); 77static ide_startstop_t task_pio_intr(ide_drive_t *); 78 79ide_startstop_t do_rw_taskfile(ide_drive_t *drive, struct ide_cmd *orig_cmd) 80{ 81 ide_hwif_t *hwif = drive->hwif; 82 struct ide_cmd *cmd = &hwif->cmd; 83 struct ide_taskfile *tf = &cmd->tf; 84 ide_handler_t *handler = NULL; 85 const struct ide_tp_ops *tp_ops = hwif->tp_ops; 86 const struct ide_dma_ops *dma_ops = hwif->dma_ops; 87 88 if (orig_cmd->protocol == ATA_PROT_PIO && 89 (orig_cmd->tf_flags & IDE_TFLAG_MULTI_PIO) && 90 drive->mult_count == 0) { 91 pr_err("%s: multimode not set!\n", drive->name); 92 return ide_stopped; 93 } 94 95 if (orig_cmd->ftf_flags & IDE_FTFLAG_FLAGGED) 96 orig_cmd->ftf_flags |= IDE_FTFLAG_SET_IN_FLAGS; 97 98 memcpy(cmd, orig_cmd, sizeof(*cmd)); 99 100 if ((cmd->tf_flags & IDE_TFLAG_DMA_PIO_FALLBACK) == 0) { 101 ide_tf_dump(drive->name, cmd); 102 tp_ops->write_devctl(hwif, ATA_DEVCTL_OBS); 103 104 if (cmd->ftf_flags & IDE_FTFLAG_OUT_DATA) { 105 u8 data[2] = { cmd->tf.data, cmd->hob.data }; 106 107 tp_ops->output_data(drive, cmd, data, 2); 108 } 109 110 if (cmd->valid.out.tf & IDE_VALID_DEVICE) { 111 u8 HIHI = (cmd->tf_flags & IDE_TFLAG_LBA48) ? 112 0xE0 : 0xEF; 113 114 if (!(cmd->ftf_flags & IDE_FTFLAG_FLAGGED)) 115 cmd->tf.device &= HIHI; 116 cmd->tf.device |= drive->select; 117 } 118 119 tp_ops->tf_load(drive, &cmd->hob, cmd->valid.out.hob); 120 tp_ops->tf_load(drive, &cmd->tf, cmd->valid.out.tf); 121 } 122 123 switch (cmd->protocol) { 124 case ATA_PROT_PIO: 125 if (cmd->tf_flags & IDE_TFLAG_WRITE) { 126 tp_ops->exec_command(hwif, tf->command); 127 ndelay(400); /* FIXME */ 128 return pre_task_out_intr(drive, cmd); 129 } 130 handler = task_pio_intr; 131 /* fall-through */ 132 case ATA_PROT_NODATA: 133 if (handler == NULL) 134 handler = task_no_data_intr; 135 ide_execute_command(drive, cmd, handler, WAIT_WORSTCASE); 136 return ide_started; 137 case ATA_PROT_DMA: 138 if (ide_dma_prepare(drive, cmd)) 139 return ide_stopped; 140 hwif->expiry = dma_ops->dma_timer_expiry; 141 ide_execute_command(drive, cmd, ide_dma_intr, 2 * WAIT_CMD); 142 dma_ops->dma_start(drive); 143 default: 144 return ide_started; 145 } 146} 147EXPORT_SYMBOL_GPL(do_rw_taskfile); 148 149static ide_startstop_t task_no_data_intr(ide_drive_t *drive) 150{ 151 ide_hwif_t *hwif = drive->hwif; 152 struct ide_cmd *cmd = &hwif->cmd; 153 struct ide_taskfile *tf = &cmd->tf; 154 int custom = (cmd->tf_flags & IDE_TFLAG_CUSTOM_HANDLER) ? 1 : 0; 155 int retries = (custom && tf->command == ATA_CMD_INIT_DEV_PARAMS) ? 5 : 1; 156 u8 stat; 157 158 local_irq_enable_in_hardirq(); 159 160 while (1) { 161 stat = hwif->tp_ops->read_status(hwif); 162 if ((stat & ATA_BUSY) == 0 || retries-- == 0) 163 break; 164 udelay(10); 165 }; 166 167 if (!OK_STAT(stat, ATA_DRDY, BAD_STAT)) { 168 if (custom && tf->command == ATA_CMD_SET_MULTI) { 169 drive->mult_req = drive->mult_count = 0; 170 drive->special_flags |= IDE_SFLAG_RECALIBRATE; 171 (void)ide_dump_status(drive, __func__, stat); 172 return ide_stopped; 173 } else if (custom && tf->command == ATA_CMD_INIT_DEV_PARAMS) { 174 if ((stat & (ATA_ERR | ATA_DRQ)) == 0) { 175 ide_set_handler(drive, &task_no_data_intr, 176 WAIT_WORSTCASE); 177 return ide_started; 178 } 179 } 180 return ide_error(drive, "task_no_data_intr", stat); 181 } 182 183 if (custom && tf->command == ATA_CMD_SET_MULTI) 184 drive->mult_count = drive->mult_req; 185 186 if (custom == 0 || tf->command == ATA_CMD_IDLEIMMEDIATE || 187 tf->command == ATA_CMD_CHK_POWER) { 188 struct request *rq = hwif->rq; 189 190 if (ata_pm_request(rq)) 191 ide_complete_pm_rq(drive, rq); 192 else 193 ide_finish_cmd(drive, cmd, stat); 194 } 195 196 return ide_stopped; 197} 198 199static u8 wait_drive_not_busy(ide_drive_t *drive) 200{ 201 ide_hwif_t *hwif = drive->hwif; 202 int retries; 203 u8 stat; 204 205 /* 206 * Last sector was transferred, wait until device is ready. This can 207 * take up to 6 ms on some ATAPI devices, so we will wait max 10 ms. 208 */ 209 for (retries = 0; retries < 1000; retries++) { 210 stat = hwif->tp_ops->read_status(hwif); 211 212 if (stat & ATA_BUSY) 213 udelay(10); 214 else 215 break; 216 } 217 218 if (stat & ATA_BUSY) 219 pr_err("%s: drive still BUSY!\n", drive->name); 220 221 return stat; 222} 223 224void ide_pio_bytes(ide_drive_t *drive, struct ide_cmd *cmd, 225 unsigned int write, unsigned int len) 226{ 227 ide_hwif_t *hwif = drive->hwif; 228 struct scatterlist *sg = hwif->sg_table; 229 struct scatterlist *cursg = cmd->cursg; 230 unsigned long uninitialized_var(flags); 231 struct page *page; 232 unsigned int offset; 233 u8 *buf; 234 235 cursg = cmd->cursg; 236 if (cursg == NULL) 237 cursg = cmd->cursg = sg; 238 239 while (len) { 240 unsigned nr_bytes = min(len, cursg->length - cmd->cursg_ofs); 241 int page_is_high; 242 243 page = sg_page(cursg); 244 offset = cursg->offset + cmd->cursg_ofs; 245 246 /* get the current page and offset */ 247 page = nth_page(page, (offset >> PAGE_SHIFT)); 248 offset %= PAGE_SIZE; 249 250 nr_bytes = min_t(unsigned, nr_bytes, (PAGE_SIZE - offset)); 251 252 page_is_high = PageHighMem(page); 253 if (page_is_high) 254 local_irq_save(flags); 255 256 buf = kmap_atomic(page) + offset; 257 258 cmd->nleft -= nr_bytes; 259 cmd->cursg_ofs += nr_bytes; 260 261 if (cmd->cursg_ofs == cursg->length) { 262 cursg = cmd->cursg = sg_next(cmd->cursg); 263 cmd->cursg_ofs = 0; 264 } 265 266 /* do the actual data transfer */ 267 if (write) 268 hwif->tp_ops->output_data(drive, cmd, buf, nr_bytes); 269 else 270 hwif->tp_ops->input_data(drive, cmd, buf, nr_bytes); 271 272 kunmap_atomic(buf); 273 274 if (page_is_high) 275 local_irq_restore(flags); 276 277 len -= nr_bytes; 278 } 279} 280EXPORT_SYMBOL_GPL(ide_pio_bytes); 281 282static void ide_pio_datablock(ide_drive_t *drive, struct ide_cmd *cmd, 283 unsigned int write) 284{ 285 unsigned int nr_bytes; 286 287 u8 saved_io_32bit = drive->io_32bit; 288 289 if (cmd->tf_flags & IDE_TFLAG_FS) 290 scsi_req(cmd->rq)->result = 0; 291 292 if (cmd->tf_flags & IDE_TFLAG_IO_16BIT) 293 drive->io_32bit = 0; 294 295 touch_softlockup_watchdog(); 296 297 if (cmd->tf_flags & IDE_TFLAG_MULTI_PIO) 298 nr_bytes = min_t(unsigned, cmd->nleft, drive->mult_count << 9); 299 else 300 nr_bytes = SECTOR_SIZE; 301 302 ide_pio_bytes(drive, cmd, write, nr_bytes); 303 304 drive->io_32bit = saved_io_32bit; 305} 306 307static void ide_error_cmd(ide_drive_t *drive, struct ide_cmd *cmd) 308{ 309 if (cmd->tf_flags & IDE_TFLAG_FS) { 310 int nr_bytes = cmd->nbytes - cmd->nleft; 311 312 if (cmd->protocol == ATA_PROT_PIO && 313 ((cmd->tf_flags & IDE_TFLAG_WRITE) || cmd->nleft == 0)) { 314 if (cmd->tf_flags & IDE_TFLAG_MULTI_PIO) 315 nr_bytes -= drive->mult_count << 9; 316 else 317 nr_bytes -= SECTOR_SIZE; 318 } 319 320 if (nr_bytes > 0) 321 ide_complete_rq(drive, BLK_STS_OK, nr_bytes); 322 } 323} 324 325void ide_finish_cmd(ide_drive_t *drive, struct ide_cmd *cmd, u8 stat) 326{ 327 struct request *rq = drive->hwif->rq; 328 u8 err = ide_read_error(drive), nsect = cmd->tf.nsect; 329 u8 set_xfer = !!(cmd->tf_flags & IDE_TFLAG_SET_XFER); 330 331 ide_complete_cmd(drive, cmd, stat, err); 332 scsi_req(rq)->result = err; 333 334 if (err == 0 && set_xfer) { 335 ide_set_xfer_rate(drive, nsect); 336 ide_driveid_update(drive); 337 } 338 339 ide_complete_rq(drive, err ? BLK_STS_IOERR : BLK_STS_OK, blk_rq_bytes(rq)); 340} 341 342/* 343 * Handler for command with PIO data phase. 344 */ 345static ide_startstop_t task_pio_intr(ide_drive_t *drive) 346{ 347 ide_hwif_t *hwif = drive->hwif; 348 struct ide_cmd *cmd = &drive->hwif->cmd; 349 u8 stat = hwif->tp_ops->read_status(hwif); 350 u8 write = !!(cmd->tf_flags & IDE_TFLAG_WRITE); 351 352 if (write == 0) { 353 /* Error? */ 354 if (stat & ATA_ERR) 355 goto out_err; 356 357 /* Didn't want any data? Odd. */ 358 if ((stat & ATA_DRQ) == 0) { 359 /* Command all done? */ 360 if (OK_STAT(stat, ATA_DRDY, ATA_BUSY)) 361 goto out_end; 362 363 /* Assume it was a spurious irq */ 364 goto out_wait; 365 } 366 } else { 367 if (!OK_STAT(stat, DRIVE_READY, drive->bad_wstat)) 368 goto out_err; 369 370 /* Deal with unexpected ATA data phase. */ 371 if (((stat & ATA_DRQ) == 0) ^ (cmd->nleft == 0)) 372 goto out_err; 373 } 374 375 if (write && cmd->nleft == 0) 376 goto out_end; 377 378 /* Still data left to transfer. */ 379 ide_pio_datablock(drive, cmd, write); 380 381 /* Are we done? Check status and finish transfer. */ 382 if (write == 0 && cmd->nleft == 0) { 383 stat = wait_drive_not_busy(drive); 384 if (!OK_STAT(stat, 0, BAD_STAT)) 385 goto out_err; 386 387 goto out_end; 388 } 389out_wait: 390 /* Still data left to transfer. */ 391 ide_set_handler(drive, &task_pio_intr, WAIT_WORSTCASE); 392 return ide_started; 393out_end: 394 if ((cmd->tf_flags & IDE_TFLAG_FS) == 0) 395 ide_finish_cmd(drive, cmd, stat); 396 else 397 ide_complete_rq(drive, BLK_STS_OK, blk_rq_sectors(cmd->rq) << 9); 398 return ide_stopped; 399out_err: 400 ide_error_cmd(drive, cmd); 401 return ide_error(drive, __func__, stat); 402} 403 404static ide_startstop_t pre_task_out_intr(ide_drive_t *drive, 405 struct ide_cmd *cmd) 406{ 407 ide_startstop_t startstop; 408 409 if (ide_wait_stat(&startstop, drive, ATA_DRQ, 410 drive->bad_wstat, WAIT_DRQ)) { 411 pr_err("%s: no DRQ after issuing %sWRITE%s\n", drive->name, 412 (cmd->tf_flags & IDE_TFLAG_MULTI_PIO) ? "MULT" : "", 413 (drive->dev_flags & IDE_DFLAG_LBA48) ? "_EXT" : ""); 414 return startstop; 415 } 416 417 if ((drive->dev_flags & IDE_DFLAG_UNMASK) == 0) 418 local_irq_disable(); 419 420 ide_set_handler(drive, &task_pio_intr, WAIT_WORSTCASE); 421 422 ide_pio_datablock(drive, cmd, 1); 423 424 return ide_started; 425} 426 427int ide_raw_taskfile(ide_drive_t *drive, struct ide_cmd *cmd, u8 *buf, 428 u16 nsect) 429{ 430 struct request *rq; 431 int error; 432 433 rq = blk_get_request(drive->queue, 434 (cmd->tf_flags & IDE_TFLAG_WRITE) ? 435 REQ_OP_DRV_OUT : REQ_OP_DRV_IN, __GFP_RECLAIM); 436 ide_req(rq)->type = ATA_PRIV_TASKFILE; 437 438 /* 439 * (ks) We transfer currently only whole sectors. 440 * This is suffient for now. But, it would be great, 441 * if we would find a solution to transfer any size. 442 * To support special commands like READ LONG. 443 */ 444 if (nsect) { 445 error = blk_rq_map_kern(drive->queue, rq, buf, 446 nsect * SECTOR_SIZE, __GFP_RECLAIM); 447 if (error) 448 goto put_req; 449 } 450 451 rq->special = cmd; 452 cmd->rq = rq; 453 454 blk_execute_rq(drive->queue, NULL, rq, 0); 455 error = scsi_req(rq)->result ? -EIO : 0; 456put_req: 457 blk_put_request(rq); 458 return error; 459} 460EXPORT_SYMBOL(ide_raw_taskfile); 461 462int ide_no_data_taskfile(ide_drive_t *drive, struct ide_cmd *cmd) 463{ 464 cmd->protocol = ATA_PROT_NODATA; 465 466 return ide_raw_taskfile(drive, cmd, NULL, 0); 467} 468EXPORT_SYMBOL_GPL(ide_no_data_taskfile); 469 470#ifdef CONFIG_IDE_TASK_IOCTL 471int ide_taskfile_ioctl(ide_drive_t *drive, unsigned long arg) 472{ 473 ide_task_request_t *req_task; 474 struct ide_cmd cmd; 475 u8 *outbuf = NULL; 476 u8 *inbuf = NULL; 477 u8 *data_buf = NULL; 478 int err = 0; 479 int tasksize = sizeof(struct ide_task_request_s); 480 unsigned int taskin = 0; 481 unsigned int taskout = 0; 482 u16 nsect = 0; 483 char __user *buf = (char __user *)arg; 484 485 req_task = memdup_user(buf, tasksize); 486 if (IS_ERR(req_task)) 487 return PTR_ERR(req_task); 488 489 taskout = req_task->out_size; 490 taskin = req_task->in_size; 491 492 if (taskin > 65536 || taskout > 65536) { 493 err = -EINVAL; 494 goto abort; 495 } 496 497 if (taskout) { 498 int outtotal = tasksize; 499 outbuf = kzalloc(taskout, GFP_KERNEL); 500 if (outbuf == NULL) { 501 err = -ENOMEM; 502 goto abort; 503 } 504 if (copy_from_user(outbuf, buf + outtotal, taskout)) { 505 err = -EFAULT; 506 goto abort; 507 } 508 } 509 510 if (taskin) { 511 int intotal = tasksize + taskout; 512 inbuf = kzalloc(taskin, GFP_KERNEL); 513 if (inbuf == NULL) { 514 err = -ENOMEM; 515 goto abort; 516 } 517 if (copy_from_user(inbuf, buf + intotal, taskin)) { 518 err = -EFAULT; 519 goto abort; 520 } 521 } 522 523 memset(&cmd, 0, sizeof(cmd)); 524 525 memcpy(&cmd.hob, req_task->hob_ports, HDIO_DRIVE_HOB_HDR_SIZE - 2); 526 memcpy(&cmd.tf, req_task->io_ports, HDIO_DRIVE_TASK_HDR_SIZE); 527 528 cmd.valid.out.tf = IDE_VALID_DEVICE; 529 cmd.valid.in.tf = IDE_VALID_DEVICE | IDE_VALID_IN_TF; 530 cmd.tf_flags = IDE_TFLAG_IO_16BIT; 531 532 if (drive->dev_flags & IDE_DFLAG_LBA48) { 533 cmd.tf_flags |= IDE_TFLAG_LBA48; 534 cmd.valid.in.hob = IDE_VALID_IN_HOB; 535 } 536 537 if (req_task->out_flags.all) { 538 cmd.ftf_flags |= IDE_FTFLAG_FLAGGED; 539 540 if (req_task->out_flags.b.data) 541 cmd.ftf_flags |= IDE_FTFLAG_OUT_DATA; 542 543 if (req_task->out_flags.b.nsector_hob) 544 cmd.valid.out.hob |= IDE_VALID_NSECT; 545 if (req_task->out_flags.b.sector_hob) 546 cmd.valid.out.hob |= IDE_VALID_LBAL; 547 if (req_task->out_flags.b.lcyl_hob) 548 cmd.valid.out.hob |= IDE_VALID_LBAM; 549 if (req_task->out_flags.b.hcyl_hob) 550 cmd.valid.out.hob |= IDE_VALID_LBAH; 551 552 if (req_task->out_flags.b.error_feature) 553 cmd.valid.out.tf |= IDE_VALID_FEATURE; 554 if (req_task->out_flags.b.nsector) 555 cmd.valid.out.tf |= IDE_VALID_NSECT; 556 if (req_task->out_flags.b.sector) 557 cmd.valid.out.tf |= IDE_VALID_LBAL; 558 if (req_task->out_flags.b.lcyl) 559 cmd.valid.out.tf |= IDE_VALID_LBAM; 560 if (req_task->out_flags.b.hcyl) 561 cmd.valid.out.tf |= IDE_VALID_LBAH; 562 } else { 563 cmd.valid.out.tf |= IDE_VALID_OUT_TF; 564 if (cmd.tf_flags & IDE_TFLAG_LBA48) 565 cmd.valid.out.hob |= IDE_VALID_OUT_HOB; 566 } 567 568 if (req_task->in_flags.b.data) 569 cmd.ftf_flags |= IDE_FTFLAG_IN_DATA; 570 571 if (req_task->req_cmd == IDE_DRIVE_TASK_RAW_WRITE) { 572 /* fixup data phase if needed */ 573 if (req_task->data_phase == TASKFILE_IN_DMAQ || 574 req_task->data_phase == TASKFILE_IN_DMA) 575 cmd.tf_flags |= IDE_TFLAG_WRITE; 576 } 577 578 cmd.protocol = ATA_PROT_DMA; 579 580 switch (req_task->data_phase) { 581 case TASKFILE_MULTI_OUT: 582 if (!drive->mult_count) { 583 /* (hs): give up if multcount is not set */ 584 pr_err("%s: %s Multimode Write multcount is not set\n", 585 drive->name, __func__); 586 err = -EPERM; 587 goto abort; 588 } 589 cmd.tf_flags |= IDE_TFLAG_MULTI_PIO; 590 /* fall through */ 591 case TASKFILE_OUT: 592 cmd.protocol = ATA_PROT_PIO; 593 /* fall through */ 594 case TASKFILE_OUT_DMAQ: 595 case TASKFILE_OUT_DMA: 596 cmd.tf_flags |= IDE_TFLAG_WRITE; 597 nsect = taskout / SECTOR_SIZE; 598 data_buf = outbuf; 599 break; 600 case TASKFILE_MULTI_IN: 601 if (!drive->mult_count) { 602 /* (hs): give up if multcount is not set */ 603 pr_err("%s: %s Multimode Read multcount is not set\n", 604 drive->name, __func__); 605 err = -EPERM; 606 goto abort; 607 } 608 cmd.tf_flags |= IDE_TFLAG_MULTI_PIO; 609 /* fall through */ 610 case TASKFILE_IN: 611 cmd.protocol = ATA_PROT_PIO; 612 /* fall through */ 613 case TASKFILE_IN_DMAQ: 614 case TASKFILE_IN_DMA: 615 nsect = taskin / SECTOR_SIZE; 616 data_buf = inbuf; 617 break; 618 case TASKFILE_NO_DATA: 619 cmd.protocol = ATA_PROT_NODATA; 620 break; 621 default: 622 err = -EFAULT; 623 goto abort; 624 } 625 626 if (req_task->req_cmd == IDE_DRIVE_TASK_NO_DATA) 627 nsect = 0; 628 else if (!nsect) { 629 nsect = (cmd.hob.nsect << 8) | cmd.tf.nsect; 630 631 if (!nsect) { 632 pr_err("%s: in/out command without data\n", 633 drive->name); 634 err = -EFAULT; 635 goto abort; 636 } 637 } 638 639 err = ide_raw_taskfile(drive, &cmd, data_buf, nsect); 640 641 memcpy(req_task->hob_ports, &cmd.hob, HDIO_DRIVE_HOB_HDR_SIZE - 2); 642 memcpy(req_task->io_ports, &cmd.tf, HDIO_DRIVE_TASK_HDR_SIZE); 643 644 if ((cmd.ftf_flags & IDE_FTFLAG_SET_IN_FLAGS) && 645 req_task->in_flags.all == 0) { 646 req_task->in_flags.all = IDE_TASKFILE_STD_IN_FLAGS; 647 if (drive->dev_flags & IDE_DFLAG_LBA48) 648 req_task->in_flags.all |= (IDE_HOB_STD_IN_FLAGS << 8); 649 } 650 651 if (copy_to_user(buf, req_task, tasksize)) { 652 err = -EFAULT; 653 goto abort; 654 } 655 if (taskout) { 656 int outtotal = tasksize; 657 if (copy_to_user(buf + outtotal, outbuf, taskout)) { 658 err = -EFAULT; 659 goto abort; 660 } 661 } 662 if (taskin) { 663 int intotal = tasksize + taskout; 664 if (copy_to_user(buf + intotal, inbuf, taskin)) { 665 err = -EFAULT; 666 goto abort; 667 } 668 } 669abort: 670 kfree(req_task); 671 kfree(outbuf); 672 kfree(inbuf); 673 674 return err; 675} 676#endif