Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

scsi: pmcraid: Remove the PMCRAID_PASSTHROUGH_IOCTL ioctl implementation

The whole passthrough ioctl path looks completely broken. For example it
DMA maps the scatterlist and after that copies data to it, which is
prohibited by the DMA API contract.

Moreover, in pmcraid_alloc_sglist(), the pointer returned by a
sgl_alloc_order() call is not recorded anywhere which is pointless.

So remove the PMCRAID_PASSTHROUGH_IOCTL ioctl implementation entirely.
Should it be needed, we should reimplement it using the proper block layer
request mapping helpers.

Link: https://lore.kernel.org/r/7f27a70bec3f3dcaf46a29b1c630edd4792e71c0.1648298857.git.christophe.jaillet@wanadoo.fr
Suggested-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>

authored by

Christophe JAILLET and committed by
Martin K. Petersen
f16aa285 f06aa52c

-524
-491
drivers/scsi/pmcraid.c
··· 3182 3182 } 3183 3183 3184 3184 /** 3185 - * pmcraid_free_sglist - Frees an allocated SG buffer list 3186 - * @sglist: scatter/gather list pointer 3187 - * 3188 - * Free a DMA'able memory previously allocated with pmcraid_alloc_sglist 3189 - * 3190 - * Return value: 3191 - * none 3192 - */ 3193 - static void pmcraid_free_sglist(struct pmcraid_sglist *sglist) 3194 - { 3195 - sgl_free_order(sglist->scatterlist, sglist->order); 3196 - kfree(sglist); 3197 - } 3198 - 3199 - /** 3200 - * pmcraid_alloc_sglist - Allocates memory for a SG list 3201 - * @buflen: buffer length 3202 - * 3203 - * Allocates a DMA'able buffer in chunks and assembles a scatter/gather 3204 - * list. 3205 - * 3206 - * Return value 3207 - * pointer to sglist / NULL on failure 3208 - */ 3209 - static struct pmcraid_sglist *pmcraid_alloc_sglist(int buflen) 3210 - { 3211 - struct pmcraid_sglist *sglist; 3212 - int sg_size; 3213 - int order; 3214 - 3215 - sg_size = buflen / (PMCRAID_MAX_IOADLS - 1); 3216 - order = (sg_size > 0) ? get_order(sg_size) : 0; 3217 - 3218 - /* Allocate a scatter/gather list for the DMA */ 3219 - sglist = kzalloc(sizeof(struct pmcraid_sglist), GFP_KERNEL); 3220 - if (sglist == NULL) 3221 - return NULL; 3222 - 3223 - sglist->order = order; 3224 - sgl_alloc_order(buflen, order, false, GFP_KERNEL | __GFP_ZERO, 3225 - &sglist->num_sg); 3226 - 3227 - return sglist; 3228 - } 3229 - 3230 - /** 3231 - * pmcraid_copy_sglist - Copy user buffer to kernel buffer's SG list 3232 - * @sglist: scatter/gather list pointer 3233 - * @buffer: buffer pointer 3234 - * @len: buffer length 3235 - * @direction: data transfer direction 3236 - * 3237 - * Copy a user buffer into a buffer allocated by pmcraid_alloc_sglist 3238 - * 3239 - * Return value: 3240 - * 0 on success / other on failure 3241 - */ 3242 - static int pmcraid_copy_sglist( 3243 - struct pmcraid_sglist *sglist, 3244 - void __user *buffer, 3245 - u32 len, 3246 - int direction 3247 - ) 3248 - { 3249 - struct scatterlist *sg; 3250 - void *kaddr; 3251 - int bsize_elem; 3252 - int i; 3253 - int rc = 0; 3254 - 3255 - /* Determine the actual number of bytes per element */ 3256 - bsize_elem = PAGE_SIZE * (1 << sglist->order); 3257 - 3258 - sg = sglist->scatterlist; 3259 - 3260 - for (i = 0; i < (len / bsize_elem); i++, sg = sg_next(sg), buffer += bsize_elem) { 3261 - struct page *page = sg_page(sg); 3262 - 3263 - kaddr = kmap(page); 3264 - if (direction == DMA_TO_DEVICE) 3265 - rc = copy_from_user(kaddr, buffer, bsize_elem); 3266 - else 3267 - rc = copy_to_user(buffer, kaddr, bsize_elem); 3268 - 3269 - kunmap(page); 3270 - 3271 - if (rc) { 3272 - pmcraid_err("failed to copy user data into sg list\n"); 3273 - return -EFAULT; 3274 - } 3275 - 3276 - sg->length = bsize_elem; 3277 - } 3278 - 3279 - if (len % bsize_elem) { 3280 - struct page *page = sg_page(sg); 3281 - 3282 - kaddr = kmap(page); 3283 - 3284 - if (direction == DMA_TO_DEVICE) 3285 - rc = copy_from_user(kaddr, buffer, len % bsize_elem); 3286 - else 3287 - rc = copy_to_user(buffer, kaddr, len % bsize_elem); 3288 - 3289 - kunmap(page); 3290 - 3291 - sg->length = len % bsize_elem; 3292 - } 3293 - 3294 - if (rc) { 3295 - pmcraid_err("failed to copy user data into sg list\n"); 3296 - rc = -EFAULT; 3297 - } 3298 - 3299 - return rc; 3300 - } 3301 - 3302 - /** 3303 3185 * pmcraid_queuecommand_lck - Queue a mid-layer request 3304 3186 * @scsi_cmd: scsi command struct 3305 3187 * ··· 3336 3454 return rc; 3337 3455 } 3338 3456 3339 - 3340 - /** 3341 - * pmcraid_build_passthrough_ioadls - builds SG elements for passthrough 3342 - * commands sent over IOCTL interface 3343 - * 3344 - * @cmd : pointer to struct pmcraid_cmd 3345 - * @buflen : length of the request buffer 3346 - * @direction : data transfer direction 3347 - * 3348 - * Return value 3349 - * 0 on success, non-zero error code on failure 3350 - */ 3351 - static int pmcraid_build_passthrough_ioadls( 3352 - struct pmcraid_cmd *cmd, 3353 - int buflen, 3354 - int direction 3355 - ) 3356 - { 3357 - struct pmcraid_sglist *sglist = NULL; 3358 - struct scatterlist *sg = NULL; 3359 - struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb; 3360 - struct pmcraid_ioadl_desc *ioadl; 3361 - int i; 3362 - 3363 - sglist = pmcraid_alloc_sglist(buflen); 3364 - 3365 - if (!sglist) { 3366 - pmcraid_err("can't allocate memory for passthrough SGls\n"); 3367 - return -ENOMEM; 3368 - } 3369 - 3370 - sglist->num_dma_sg = dma_map_sg(&cmd->drv_inst->pdev->dev, 3371 - sglist->scatterlist, 3372 - sglist->num_sg, direction); 3373 - 3374 - if (!sglist->num_dma_sg || sglist->num_dma_sg > PMCRAID_MAX_IOADLS) { 3375 - dev_err(&cmd->drv_inst->pdev->dev, 3376 - "Failed to map passthrough buffer!\n"); 3377 - pmcraid_free_sglist(sglist); 3378 - return -EIO; 3379 - } 3380 - 3381 - cmd->sglist = sglist; 3382 - ioarcb->request_flags0 |= NO_LINK_DESCS; 3383 - 3384 - ioadl = pmcraid_init_ioadls(cmd, sglist->num_dma_sg); 3385 - 3386 - /* Initialize IOADL descriptor addresses */ 3387 - for_each_sg(sglist->scatterlist, sg, sglist->num_dma_sg, i) { 3388 - ioadl[i].data_len = cpu_to_le32(sg_dma_len(sg)); 3389 - ioadl[i].address = cpu_to_le64(sg_dma_address(sg)); 3390 - ioadl[i].flags = 0; 3391 - } 3392 - 3393 - /* setup the last descriptor */ 3394 - ioadl[i - 1].flags = IOADL_FLAGS_LAST_DESC; 3395 - 3396 - return 0; 3397 - } 3398 - 3399 - 3400 - /** 3401 - * pmcraid_release_passthrough_ioadls - release passthrough ioadls 3402 - * 3403 - * @cmd: pointer to struct pmcraid_cmd for which ioadls were allocated 3404 - * @buflen: size of the request buffer 3405 - * @direction: data transfer direction 3406 - * 3407 - * Return value 3408 - * 0 on success, non-zero error code on failure 3409 - */ 3410 - static void pmcraid_release_passthrough_ioadls( 3411 - struct pmcraid_cmd *cmd, 3412 - int buflen, 3413 - int direction 3414 - ) 3415 - { 3416 - struct pmcraid_sglist *sglist = cmd->sglist; 3417 - 3418 - if (buflen > 0) { 3419 - dma_unmap_sg(&cmd->drv_inst->pdev->dev, 3420 - sglist->scatterlist, 3421 - sglist->num_sg, 3422 - direction); 3423 - pmcraid_free_sglist(sglist); 3424 - cmd->sglist = NULL; 3425 - } 3426 - } 3427 - 3428 - /** 3429 - * pmcraid_ioctl_passthrough - handling passthrough IOCTL commands 3430 - * 3431 - * @pinstance: pointer to adapter instance structure 3432 - * @ioctl_cmd: ioctl code 3433 - * @buflen: unused 3434 - * @arg: pointer to pmcraid_passthrough_buffer user buffer 3435 - * 3436 - * Return value 3437 - * 0 on success, non-zero error code on failure 3438 - */ 3439 - static long pmcraid_ioctl_passthrough( 3440 - struct pmcraid_instance *pinstance, 3441 - unsigned int ioctl_cmd, 3442 - unsigned int buflen, 3443 - void __user *arg 3444 - ) 3445 - { 3446 - struct pmcraid_passthrough_ioctl_buffer *buffer; 3447 - struct pmcraid_ioarcb *ioarcb; 3448 - struct pmcraid_cmd *cmd; 3449 - struct pmcraid_cmd *cancel_cmd; 3450 - void __user *request_buffer; 3451 - unsigned long request_offset; 3452 - unsigned long lock_flags; 3453 - void __user *ioasa; 3454 - u32 ioasc; 3455 - int request_size; 3456 - int buffer_size; 3457 - u8 direction; 3458 - int rc = 0; 3459 - 3460 - /* If IOA reset is in progress, wait 10 secs for reset to complete */ 3461 - if (pinstance->ioa_reset_in_progress) { 3462 - rc = wait_event_interruptible_timeout( 3463 - pinstance->reset_wait_q, 3464 - !pinstance->ioa_reset_in_progress, 3465 - msecs_to_jiffies(10000)); 3466 - 3467 - if (!rc) 3468 - return -ETIMEDOUT; 3469 - else if (rc < 0) 3470 - return -ERESTARTSYS; 3471 - } 3472 - 3473 - /* If adapter is not in operational state, return error */ 3474 - if (pinstance->ioa_state != IOA_STATE_OPERATIONAL) { 3475 - pmcraid_err("IOA is not operational\n"); 3476 - return -ENOTTY; 3477 - } 3478 - 3479 - buffer_size = sizeof(struct pmcraid_passthrough_ioctl_buffer); 3480 - buffer = kmalloc(buffer_size, GFP_KERNEL); 3481 - 3482 - if (!buffer) { 3483 - pmcraid_err("no memory for passthrough buffer\n"); 3484 - return -ENOMEM; 3485 - } 3486 - 3487 - request_offset = 3488 - offsetof(struct pmcraid_passthrough_ioctl_buffer, request_buffer); 3489 - 3490 - request_buffer = arg + request_offset; 3491 - 3492 - rc = copy_from_user(buffer, arg, 3493 - sizeof(struct pmcraid_passthrough_ioctl_buffer)); 3494 - 3495 - ioasa = arg + offsetof(struct pmcraid_passthrough_ioctl_buffer, ioasa); 3496 - 3497 - if (rc) { 3498 - pmcraid_err("ioctl: can't copy passthrough buffer\n"); 3499 - rc = -EFAULT; 3500 - goto out_free_buffer; 3501 - } 3502 - 3503 - request_size = le32_to_cpu(buffer->ioarcb.data_transfer_length); 3504 - 3505 - if (buffer->ioarcb.request_flags0 & TRANSFER_DIR_WRITE) { 3506 - direction = DMA_TO_DEVICE; 3507 - } else { 3508 - direction = DMA_FROM_DEVICE; 3509 - } 3510 - 3511 - if (request_size < 0) { 3512 - rc = -EINVAL; 3513 - goto out_free_buffer; 3514 - } 3515 - 3516 - /* check if we have any additional command parameters */ 3517 - if (le16_to_cpu(buffer->ioarcb.add_cmd_param_length) 3518 - > PMCRAID_ADD_CMD_PARAM_LEN) { 3519 - rc = -EINVAL; 3520 - goto out_free_buffer; 3521 - } 3522 - 3523 - cmd = pmcraid_get_free_cmd(pinstance); 3524 - 3525 - if (!cmd) { 3526 - pmcraid_err("free command block is not available\n"); 3527 - rc = -ENOMEM; 3528 - goto out_free_buffer; 3529 - } 3530 - 3531 - cmd->scsi_cmd = NULL; 3532 - ioarcb = &(cmd->ioa_cb->ioarcb); 3533 - 3534 - /* Copy the user-provided IOARCB stuff field by field */ 3535 - ioarcb->resource_handle = buffer->ioarcb.resource_handle; 3536 - ioarcb->data_transfer_length = buffer->ioarcb.data_transfer_length; 3537 - ioarcb->cmd_timeout = buffer->ioarcb.cmd_timeout; 3538 - ioarcb->request_type = buffer->ioarcb.request_type; 3539 - ioarcb->request_flags0 = buffer->ioarcb.request_flags0; 3540 - ioarcb->request_flags1 = buffer->ioarcb.request_flags1; 3541 - memcpy(ioarcb->cdb, buffer->ioarcb.cdb, PMCRAID_MAX_CDB_LEN); 3542 - 3543 - if (buffer->ioarcb.add_cmd_param_length) { 3544 - ioarcb->add_cmd_param_length = 3545 - buffer->ioarcb.add_cmd_param_length; 3546 - ioarcb->add_cmd_param_offset = 3547 - buffer->ioarcb.add_cmd_param_offset; 3548 - memcpy(ioarcb->add_data.u.add_cmd_params, 3549 - buffer->ioarcb.add_data.u.add_cmd_params, 3550 - le16_to_cpu(buffer->ioarcb.add_cmd_param_length)); 3551 - } 3552 - 3553 - /* set hrrq number where the IOA should respond to. Note that all cmds 3554 - * generated internally uses hrrq_id 0, exception to this is the cmd 3555 - * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses 3556 - * hrrq_id assigned here in queuecommand 3557 - */ 3558 - ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) % 3559 - pinstance->num_hrrq; 3560 - 3561 - if (request_size) { 3562 - rc = pmcraid_build_passthrough_ioadls(cmd, 3563 - request_size, 3564 - direction); 3565 - if (rc) { 3566 - pmcraid_err("couldn't build passthrough ioadls\n"); 3567 - goto out_free_cmd; 3568 - } 3569 - } 3570 - 3571 - /* If data is being written into the device, copy the data from user 3572 - * buffers 3573 - */ 3574 - if (direction == DMA_TO_DEVICE && request_size > 0) { 3575 - rc = pmcraid_copy_sglist(cmd->sglist, 3576 - request_buffer, 3577 - request_size, 3578 - direction); 3579 - if (rc) { 3580 - pmcraid_err("failed to copy user buffer\n"); 3581 - goto out_free_sglist; 3582 - } 3583 - } 3584 - 3585 - /* passthrough ioctl is a blocking command so, put the user to sleep 3586 - * until timeout. Note that a timeout value of 0 means, do timeout. 3587 - */ 3588 - cmd->cmd_done = pmcraid_internal_done; 3589 - init_completion(&cmd->wait_for_completion); 3590 - cmd->completion_req = 1; 3591 - 3592 - pmcraid_info("command(%d) (CDB[0] = %x) for %x\n", 3593 - le32_to_cpu(cmd->ioa_cb->ioarcb.response_handle) >> 2, 3594 - cmd->ioa_cb->ioarcb.cdb[0], 3595 - le32_to_cpu(cmd->ioa_cb->ioarcb.resource_handle)); 3596 - 3597 - spin_lock_irqsave(pinstance->host->host_lock, lock_flags); 3598 - _pmcraid_fire_command(cmd); 3599 - spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags); 3600 - 3601 - /* NOTE ! Remove the below line once abort_task is implemented 3602 - * in firmware. This line disables ioctl command timeout handling logic 3603 - * similar to IO command timeout handling, making ioctl commands to wait 3604 - * until the command completion regardless of timeout value specified in 3605 - * ioarcb 3606 - */ 3607 - buffer->ioarcb.cmd_timeout = 0; 3608 - 3609 - /* If command timeout is specified put caller to wait till that time, 3610 - * otherwise it would be blocking wait. If command gets timed out, it 3611 - * will be aborted. 3612 - */ 3613 - if (buffer->ioarcb.cmd_timeout == 0) { 3614 - wait_for_completion(&cmd->wait_for_completion); 3615 - } else if (!wait_for_completion_timeout( 3616 - &cmd->wait_for_completion, 3617 - msecs_to_jiffies(le16_to_cpu(buffer->ioarcb.cmd_timeout) * 1000))) { 3618 - 3619 - pmcraid_info("aborting cmd %d (CDB[0] = %x) due to timeout\n", 3620 - le32_to_cpu(cmd->ioa_cb->ioarcb.response_handle) >> 2, 3621 - cmd->ioa_cb->ioarcb.cdb[0]); 3622 - 3623 - spin_lock_irqsave(pinstance->host->host_lock, lock_flags); 3624 - cancel_cmd = pmcraid_abort_cmd(cmd); 3625 - spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags); 3626 - 3627 - if (cancel_cmd) { 3628 - wait_for_completion(&cancel_cmd->wait_for_completion); 3629 - ioasc = le32_to_cpu(cancel_cmd->ioa_cb->ioasa.ioasc); 3630 - pmcraid_return_cmd(cancel_cmd); 3631 - 3632 - /* if abort task couldn't find the command i.e it got 3633 - * completed prior to aborting, return good completion. 3634 - * if command got aborted successfully or there was IOA 3635 - * reset due to abort task itself getting timedout then 3636 - * return -ETIMEDOUT 3637 - */ 3638 - if (ioasc == PMCRAID_IOASC_IOA_WAS_RESET || 3639 - PMCRAID_IOASC_SENSE_KEY(ioasc) == 0x00) { 3640 - if (ioasc != PMCRAID_IOASC_GC_IOARCB_NOTFOUND) 3641 - rc = -ETIMEDOUT; 3642 - goto out_handle_response; 3643 - } 3644 - } 3645 - 3646 - /* no command block for abort task or abort task failed to abort 3647 - * the IOARCB, then wait for 150 more seconds and initiate reset 3648 - * sequence after timeout 3649 - */ 3650 - if (!wait_for_completion_timeout( 3651 - &cmd->wait_for_completion, 3652 - msecs_to_jiffies(150 * 1000))) { 3653 - pmcraid_reset_bringup(cmd->drv_inst); 3654 - rc = -ETIMEDOUT; 3655 - } 3656 - } 3657 - 3658 - out_handle_response: 3659 - /* copy entire IOASA buffer and return IOCTL success. 3660 - * If copying IOASA to user-buffer fails, return 3661 - * EFAULT 3662 - */ 3663 - if (copy_to_user(ioasa, &cmd->ioa_cb->ioasa, 3664 - sizeof(struct pmcraid_ioasa))) { 3665 - pmcraid_err("failed to copy ioasa buffer to user\n"); 3666 - rc = -EFAULT; 3667 - } 3668 - 3669 - /* If the data transfer was from device, copy the data onto user 3670 - * buffers 3671 - */ 3672 - else if (direction == DMA_FROM_DEVICE && request_size > 0) { 3673 - rc = pmcraid_copy_sglist(cmd->sglist, 3674 - request_buffer, 3675 - request_size, 3676 - direction); 3677 - if (rc) { 3678 - pmcraid_err("failed to copy user buffer\n"); 3679 - rc = -EFAULT; 3680 - } 3681 - } 3682 - 3683 - out_free_sglist: 3684 - pmcraid_release_passthrough_ioadls(cmd, request_size, direction); 3685 - 3686 - out_free_cmd: 3687 - pmcraid_return_cmd(cmd); 3688 - 3689 - out_free_buffer: 3690 - kfree(buffer); 3691 - 3692 - return rc; 3693 - } 3694 - 3695 - 3696 - 3697 - 3698 3457 /** 3699 3458 * pmcraid_ioctl_driver - ioctl handler for commands handled by driver itself 3700 3459 * ··· 3444 3921 } 3445 3922 3446 3923 switch (_IOC_TYPE(cmd)) { 3447 - 3448 - case PMCRAID_PASSTHROUGH_IOCTL: 3449 - /* If ioctl code is to download microcode, we need to block 3450 - * mid-layer requests. 3451 - */ 3452 - if (cmd == PMCRAID_IOCTL_DOWNLOAD_MICROCODE) 3453 - scsi_block_requests(pinstance->host); 3454 - 3455 - retval = pmcraid_ioctl_passthrough(pinstance, cmd, 3456 - hdr->buffer_length, argp); 3457 - 3458 - if (cmd == PMCRAID_IOCTL_DOWNLOAD_MICROCODE) 3459 - scsi_unblock_requests(pinstance->host); 3460 - break; 3461 3924 3462 3925 case PMCRAID_DRIVER_IOCTL: 3463 3926 arg += sizeof(struct pmcraid_ioctl_header);
-33
drivers/scsi/pmcraid.h
··· 1023 1023 #define PMCRAID_IOCTL_SIGNATURE "PMCRAID" 1024 1024 1025 1025 /* 1026 - * pmcraid_passthrough_ioctl_buffer - structure given as argument to 1027 - * passthrough(or firmware handled) IOCTL commands. Note that ioarcb requires 1028 - * 32-byte alignment so, it is necessary to pack this structure to avoid any 1029 - * holes between ioctl_header and passthrough buffer 1030 - * 1031 - * .ioactl_header : ioctl header 1032 - * .ioarcb : filled-up ioarcb buffer, driver always reads this buffer 1033 - * .ioasa : buffer for ioasa, driver fills this with IOASA from firmware 1034 - * .request_buffer: The I/O buffer (flat), driver reads/writes to this based on 1035 - * the transfer directions passed in ioarcb.flags0. Contents 1036 - * of this buffer are valid only when ioarcb.data_transfer_len 1037 - * is not zero. 1038 - */ 1039 - struct pmcraid_passthrough_ioctl_buffer { 1040 - struct pmcraid_ioctl_header ioctl_header; 1041 - struct pmcraid_ioarcb ioarcb; 1042 - struct pmcraid_ioasa ioasa; 1043 - u8 request_buffer[]; 1044 - } __attribute__ ((packed, aligned(PMCRAID_IOARCB_ALIGNMENT))); 1045 - 1046 - /* 1047 1026 * keys to differentiate between driver handled IOCTLs and passthrough 1048 1027 * IOCTLs passed to IOA. driver determines the ioctl type using macro 1049 1028 * _IOC_TYPE 1050 1029 */ 1051 1030 #define PMCRAID_DRIVER_IOCTL 'D' 1052 - #define PMCRAID_PASSTHROUGH_IOCTL 'F' 1053 1031 1054 1032 #define DRV_IOCTL(n, size) \ 1055 1033 _IOC(_IOC_READ|_IOC_WRITE, PMCRAID_DRIVER_IOCTL, (n), (size)) 1056 - 1057 - #define FMW_IOCTL(n, size) \ 1058 - _IOC(_IOC_READ|_IOC_WRITE, PMCRAID_PASSTHROUGH_IOCTL, (n), (size)) 1059 1034 1060 1035 /* 1061 1036 * _ARGSIZE: macro that gives size of the argument type passed to an IOCTL cmd. ··· 1043 1068 1044 1069 #define PMCRAID_IOCTL_RESET_ADAPTER \ 1045 1070 DRV_IOCTL(5, sizeof(struct pmcraid_ioctl_header)) 1046 - 1047 - /* passthrough/firmware handled commands */ 1048 - #define PMCRAID_IOCTL_PASSTHROUGH_COMMAND \ 1049 - FMW_IOCTL(1, sizeof(struct pmcraid_passthrough_ioctl_buffer)) 1050 - 1051 - #define PMCRAID_IOCTL_DOWNLOAD_MICROCODE \ 1052 - FMW_IOCTL(2, sizeof(struct pmcraid_passthrough_ioctl_buffer)) 1053 - 1054 1071 1055 1072 #endif /* _PMCRAID_H */