···47 default n48 depends on NET490000000000050config SCSI_PROC_FS51 bool "legacy /proc/scsi/ support"52 depends on SCSI && PROC_FS
···47 default n48 depends on NET4950+config SCSI_MQ_DEFAULT51+ bool "SCSI: use blk-mq I/O path by default"52+ depends on SCSI53+ ---help---54+ This option enables the new blk-mq based I/O path for SCSI55+ devices by default. With the option the scsi_mod.use_blk_mq56+ module/boot option defaults to Y, without it to N, but it can57+ still be overridden either way.58+59+ If unsure say N.60+61config SCSI_PROC_FS62 bool "legacy /proc/scsi/ support"63 depends on SCSI && PROC_FS
+32-38
drivers/scsi/aacraid/aachba.c
···549 if ((le32_to_cpu(get_name_reply->status) == CT_OK)550 && (get_name_reply->data[0] != '\0')) {551 char *sp = get_name_reply->data;552- sp[sizeof(((struct aac_get_name_resp *)NULL)->data)] = '\0';00553 while (*sp == ' ')554 ++sp;555 if (*sp) {···581static int aac_get_container_name(struct scsi_cmnd * scsicmd)582{583 int status;0584 struct aac_get_name *dinfo;585 struct fib * cmd_fibcontext;586 struct aac_dev * dev;587588 dev = (struct aac_dev *)scsicmd->device->host->hostdata;58900590 cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd);591592 aac_fib_init(cmd_fibcontext);593 dinfo = (struct aac_get_name *) fib_data(cmd_fibcontext);0594595 dinfo->command = cpu_to_le32(VM_ContainerConfig);596 dinfo->type = cpu_to_le32(CT_READ_NAME);597 dinfo->cid = cpu_to_le32(scmd_id(scsicmd));598- dinfo->count = cpu_to_le32(sizeof(((struct aac_get_name_resp *)NULL)->data));599600 status = aac_fib_send(ContainerCommand,601 cmd_fibcontext,···612 /*613 * Check that the command queued to the controller614 */615- if (status == -EINPROGRESS) {616- scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;617 return 0;618- }619620 printk(KERN_WARNING "aac_get_container_name: aac_fib_send failed with status: %d.\n", status);621 aac_fib_complete(cmd_fibcontext);···724725 dinfo->count = cpu_to_le32(scmd_id(scsicmd));726 dinfo->type = cpu_to_le32(FT_FILESYS);0727728 status = aac_fib_send(ContainerCommand,729 fibptr,···736 /*737 * Check that the command queued to the controller738 */739- if (status == -EINPROGRESS)740- scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;741- else if (status < 0) {742 /* Inherit results from VM_NameServe, if any */743 dresp->status = cpu_to_le32(ST_OK);744 _aac_probe_container2(context, fibptr);···764 dinfo->count = cpu_to_le32(scmd_id(scsicmd));765 dinfo->type = cpu_to_le32(FT_FILESYS);766 scsicmd->SCp.ptr = (char *)callback;0767768 status = aac_fib_send(ContainerCommand,769 fibptr,···776 /*777 * Check that the command queued to the controller778 */779- if (status == -EINPROGRESS) {780- scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;781 return 0;782- }783 if (status < 0) {784 scsicmd->SCp.ptr = NULL;785 aac_fib_complete(fibptr);···1124 dinfo->command = cpu_to_le32(VM_ContainerConfig);1125 dinfo->type = cpu_to_le32(CT_CID_TO_32BITS_UID);1126 dinfo->cid = cpu_to_le32(scmd_id(scsicmd));011271128 status = aac_fib_send(ContainerCommand,1129 cmd_fibcontext,···1137 /*1138 * Check that the command queued to the controller1139 */1140- if (status == -EINPROGRESS) {1141- scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;1142 return 0;1143- }11441145 printk(KERN_WARNING "aac_get_container_serial: aac_fib_send failed with status: %d.\n", status);1146 aac_fib_complete(cmd_fibcontext);···2332 * Alocate and initialize a Fib2333 */2334 cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd);2335-2336 status = aac_adapter_read(cmd_fibcontext, scsicmd, lba, count);23372338 /*2339 * Check that the command queued to the controller2340 */2341- if (status == -EINPROGRESS) {2342- scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;2343 return 0;2344- }23452346 printk(KERN_WARNING "aac_read: aac_fib_send failed with status: %d.\n", status);2347 /*···2424 * Allocate and initialize a Fib then setup a BlockWrite command2425 */2426 cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd);2427-2428 status = aac_adapter_write(cmd_fibcontext, scsicmd, lba, count, fua);24292430 /*2431 * Check that the command queued to the controller2432 */2433- if (status == -EINPROGRESS) {2434- scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;2435 return 0;2436- }24372438 printk(KERN_WARNING "aac_write: aac_fib_send failed with status: %d\n", status);2439 /*···2581 synchronizecmd->cid = cpu_to_le32(scmd_id(scsicmd));2582 synchronizecmd->count =2583 cpu_to_le32(sizeof(((struct aac_synchronize_reply *)NULL)->data));025842585 /*2586 * Now send the Fib to the adapter···2597 /*2598 * Check that the command queued to the controller2599 */2600- if (status == -EINPROGRESS) {2601- scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;2602 return 0;2603- }26042605 printk(KERN_WARNING2606 "aac_synchronize: aac_fib_send failed with status: %d.\n", status);···2658 pmcmd->cid = cpu_to_le32(sdev_id(sdev));2659 pmcmd->parm = (scsicmd->cmnd[1] & 1) ?2660 cpu_to_le32(CT_PM_UNIT_IMMEDIATE) : 0;026612662 /*2663 * Now send the Fib to the adapter···2674 /*2675 * Check that the command queued to the controller2676 */2677- if (status == -EINPROGRESS) {2678- scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;2679 return 0;2680- }26812682 aac_fib_complete(cmd_fibcontext);2683 aac_fib_free(cmd_fibcontext);···3194 return -EBUSY;3195 if (copy_from_user(&qd, arg, sizeof (struct aac_query_disk)))3196 return -EFAULT;3197- if (qd.cnum == -1)003198 qd.cnum = qd.id;3199- else if ((qd.bus == -1) && (qd.id == -1) && (qd.lun == -1))3200- {3201 if (qd.cnum < 0 || qd.cnum >= dev->maximum_num_containers)3202 return -EINVAL;3203 qd.instance = dev->scsi_host_ptr->host_no;···3683 * Allocate and initialize a Fib then setup a BlockWrite command3684 */3685 cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd);3686-3687 status = aac_adapter_scsi(cmd_fibcontext, scsicmd);36883689 /*3690 * Check that the command queued to the controller3691 */3692- if (status == -EINPROGRESS) {3693- scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;3694 return 0;3695- }36963697 printk(KERN_WARNING "aac_srb: aac_fib_send failed with status: %d\n", status);3698 aac_fib_complete(cmd_fibcontext);···3728 if (!cmd_fibcontext)3729 return -1;373003731 status = aac_adapter_hba(cmd_fibcontext, scsicmd);37323733 /*3734 * Check that the command queued to the controller3735 */3736- if (status == -EINPROGRESS) {3737- scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;3738 return 0;3739- }37403741 pr_warn("aac_hba_cmd_req: aac_fib_send failed with status: %d\n",3742 status);
···549 if ((le32_to_cpu(get_name_reply->status) == CT_OK)550 && (get_name_reply->data[0] != '\0')) {551 char *sp = get_name_reply->data;552+ int data_size = FIELD_SIZEOF(struct aac_get_name_resp, data);553+554+ sp[data_size - 1] = '\0';555 while (*sp == ' ')556 ++sp;557 if (*sp) {···579static int aac_get_container_name(struct scsi_cmnd * scsicmd)580{581 int status;582+ int data_size;583 struct aac_get_name *dinfo;584 struct fib * cmd_fibcontext;585 struct aac_dev * dev;586587 dev = (struct aac_dev *)scsicmd->device->host->hostdata;588589+ data_size = FIELD_SIZEOF(struct aac_get_name_resp, data);590+591 cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd);592593 aac_fib_init(cmd_fibcontext);594 dinfo = (struct aac_get_name *) fib_data(cmd_fibcontext);595+ scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;596597 dinfo->command = cpu_to_le32(VM_ContainerConfig);598 dinfo->type = cpu_to_le32(CT_READ_NAME);599 dinfo->cid = cpu_to_le32(scmd_id(scsicmd));600+ dinfo->count = cpu_to_le32(data_size - 1);601602 status = aac_fib_send(ContainerCommand,603 cmd_fibcontext,···606 /*607 * Check that the command queued to the controller608 */609+ if (status == -EINPROGRESS)0610 return 0;0611612 printk(KERN_WARNING "aac_get_container_name: aac_fib_send failed with status: %d.\n", status);613 aac_fib_complete(cmd_fibcontext);···720721 dinfo->count = cpu_to_le32(scmd_id(scsicmd));722 dinfo->type = cpu_to_le32(FT_FILESYS);723+ scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;724725 status = aac_fib_send(ContainerCommand,726 fibptr,···731 /*732 * Check that the command queued to the controller733 */734+ if (status < 0 && status != -EINPROGRESS) {00735 /* Inherit results from VM_NameServe, if any */736 dresp->status = cpu_to_le32(ST_OK);737 _aac_probe_container2(context, fibptr);···761 dinfo->count = cpu_to_le32(scmd_id(scsicmd));762 dinfo->type = cpu_to_le32(FT_FILESYS);763 scsicmd->SCp.ptr = (char *)callback;764+ scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;765766 status = aac_fib_send(ContainerCommand,767 fibptr,···772 /*773 * Check that the command queued to the controller774 */775+ if (status == -EINPROGRESS)0776 return 0;777+778 if (status < 0) {779 scsicmd->SCp.ptr = NULL;780 aac_fib_complete(fibptr);···1121 dinfo->command = cpu_to_le32(VM_ContainerConfig);1122 dinfo->type = cpu_to_le32(CT_CID_TO_32BITS_UID);1123 dinfo->cid = cpu_to_le32(scmd_id(scsicmd));1124+ scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;11251126 status = aac_fib_send(ContainerCommand,1127 cmd_fibcontext,···1133 /*1134 * Check that the command queued to the controller1135 */1136+ if (status == -EINPROGRESS)01137 return 0;011381139 printk(KERN_WARNING "aac_get_container_serial: aac_fib_send failed with status: %d.\n", status);1140 aac_fib_complete(cmd_fibcontext);···2330 * Alocate and initialize a Fib2331 */2332 cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd);2333+ scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;2334 status = aac_adapter_read(cmd_fibcontext, scsicmd, lba, count);23352336 /*2337 * Check that the command queued to the controller2338 */2339+ if (status == -EINPROGRESS)02340 return 0;023412342 printk(KERN_WARNING "aac_read: aac_fib_send failed with status: %d.\n", status);2343 /*···2424 * Allocate and initialize a Fib then setup a BlockWrite command2425 */2426 cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd);2427+ scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;2428 status = aac_adapter_write(cmd_fibcontext, scsicmd, lba, count, fua);24292430 /*2431 * Check that the command queued to the controller2432 */2433+ if (status == -EINPROGRESS)02434 return 0;024352436 printk(KERN_WARNING "aac_write: aac_fib_send failed with status: %d\n", status);2437 /*···2583 synchronizecmd->cid = cpu_to_le32(scmd_id(scsicmd));2584 synchronizecmd->count =2585 cpu_to_le32(sizeof(((struct aac_synchronize_reply *)NULL)->data));2586+ scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;25872588 /*2589 * Now send the Fib to the adapter···2598 /*2599 * Check that the command queued to the controller2600 */2601+ if (status == -EINPROGRESS)02602 return 0;026032604 printk(KERN_WARNING2605 "aac_synchronize: aac_fib_send failed with status: %d.\n", status);···2661 pmcmd->cid = cpu_to_le32(sdev_id(sdev));2662 pmcmd->parm = (scsicmd->cmnd[1] & 1) ?2663 cpu_to_le32(CT_PM_UNIT_IMMEDIATE) : 0;2664+ scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;26652666 /*2667 * Now send the Fib to the adapter···2676 /*2677 * Check that the command queued to the controller2678 */2679+ if (status == -EINPROGRESS)02680 return 0;026812682 aac_fib_complete(cmd_fibcontext);2683 aac_fib_free(cmd_fibcontext);···3198 return -EBUSY;3199 if (copy_from_user(&qd, arg, sizeof (struct aac_query_disk)))3200 return -EFAULT;3201+ if (qd.cnum == -1) {3202+ if (qd.id < 0 || qd.id >= dev->maximum_num_containers)3203+ return -EINVAL;3204 qd.cnum = qd.id;3205+ } else if ((qd.bus == -1) && (qd.id == -1) && (qd.lun == -1)) {03206 if (qd.cnum < 0 || qd.cnum >= dev->maximum_num_containers)3207 return -EINVAL;3208 qd.instance = dev->scsi_host_ptr->host_no;···3686 * Allocate and initialize a Fib then setup a BlockWrite command3687 */3688 cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd);3689+ scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;3690 status = aac_adapter_scsi(cmd_fibcontext, scsicmd);36913692 /*3693 * Check that the command queued to the controller3694 */3695+ if (status == -EINPROGRESS)03696 return 0;036973698 printk(KERN_WARNING "aac_srb: aac_fib_send failed with status: %d\n", status);3699 aac_fib_complete(cmd_fibcontext);···3733 if (!cmd_fibcontext)3734 return -1;37353736+ scsicmd->SCp.phase = AAC_OWNER_FIRMWARE;3737 status = aac_adapter_hba(cmd_fibcontext, scsicmd);37383739 /*3740 * Check that the command queued to the controller3741 */3742+ if (status == -EINPROGRESS)03743 return 0;037443745 pr_warn("aac_hba_cmd_req: aac_fib_send failed with status: %d\n",3746 status);
···489490 /* If a SRR times out, simply free resources */491 if (srr_req->event == QEDF_IOREQ_EV_ELS_TMO)492- goto out_free;493494 /* Normalize response data into struct fc_frame */495 mp_req = &(srr_req->mp_req);···501 if (!fp) {502 QEDF_ERR(&(qedf->dbg_ctx),503 "fc_frame_alloc failure.\n");504- goto out_free;505 }506507 /* Copy frame header from firmware into fp */···526 }527528 fc_frame_free(fp);529-out_free:530 /* Put reference for original command since SRR completed */531 kref_put(&orig_io_req->refcount, qedf_release_cmd);0532 kfree(cb_arg);533}534···781782 /* If a REC times out, free resources */783 if (rec_req->event == QEDF_IOREQ_EV_ELS_TMO)784- goto out_free;785786 /* Normalize response data into struct fc_frame */787 mp_req = &(rec_req->mp_req);···793 if (!fp) {794 QEDF_ERR(&(qedf->dbg_ctx),795 "fc_frame_alloc failure.\n");796- goto out_free;797 }798799 /* Copy frame header from firmware into fp */···885886out_free_frame:887 fc_frame_free(fp);888-out_free:889 /* Put reference for original command since REC completed */890 kref_put(&orig_io_req->refcount, qedf_release_cmd);0891 kfree(cb_arg);892}893
···489490 /* If a SRR times out, simply free resources */491 if (srr_req->event == QEDF_IOREQ_EV_ELS_TMO)492+ goto out_put;493494 /* Normalize response data into struct fc_frame */495 mp_req = &(srr_req->mp_req);···501 if (!fp) {502 QEDF_ERR(&(qedf->dbg_ctx),503 "fc_frame_alloc failure.\n");504+ goto out_put;505 }506507 /* Copy frame header from firmware into fp */···526 }527528 fc_frame_free(fp);529+out_put:530 /* Put reference for original command since SRR completed */531 kref_put(&orig_io_req->refcount, qedf_release_cmd);532+out_free:533 kfree(cb_arg);534}535···780781 /* If a REC times out, free resources */782 if (rec_req->event == QEDF_IOREQ_EV_ELS_TMO)783+ goto out_put;784785 /* Normalize response data into struct fc_frame */786 mp_req = &(rec_req->mp_req);···792 if (!fp) {793 QEDF_ERR(&(qedf->dbg_ctx),794 "fc_frame_alloc failure.\n");795+ goto out_put;796 }797798 /* Copy frame header from firmware into fp */···884885out_free_frame:886 fc_frame_free(fp);887+out_put:888 /* Put reference for original command since REC completed */889 kref_put(&orig_io_req->refcount, qedf_release_cmd);890+out_free:891 kfree(cb_arg);892}893
+9-11
drivers/scsi/qedf/qedf_main.c
···2797 * we allocation is the minimum off:2798 *2799 * Number of CPUs2800- * Number of MSI-X vectors2801- * Max number allocated in hardware (QEDF_MAX_NUM_CQS)2802 */2803- qedf->num_queues = min((unsigned int)QEDF_MAX_NUM_CQS,2804- num_online_cpus());28052806 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Number of CQs is %d.\n",2807 qedf->num_queues);···2997 goto err1;2998 }299900000003000 /* queue allocation code should come here3001 * order should be3002 * slowpath_start···3018 goto err2;3019 }3020 qed_ops->common->update_pf_params(qedf->cdev, &qedf->pf_params);3021-3022- /* Learn information crucial for qedf to progress */3023- rc = qed_ops->fill_dev_info(qedf->cdev, &qedf->dev_info);3024- if (rc) {3025- QEDF_ERR(&(qedf->dbg_ctx), "Failed to dev info.\n");3026- goto err1;3027- }30283029 /* Record BDQ producer doorbell addresses */3030 qedf->bdq_primary_prod = qedf->dev_info.primary_dbq_rq_addr;
···2797 * we allocation is the minimum off:2798 *2799 * Number of CPUs2800+ * Number allocated by qed for our PCI function02801 */2802+ qedf->num_queues = MIN_NUM_CPUS_MSIX(qedf);028032804 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Number of CQs is %d.\n",2805 qedf->num_queues);···2999 goto err1;3000 }30013002+ /* Learn information crucial for qedf to progress */3003+ rc = qed_ops->fill_dev_info(qedf->cdev, &qedf->dev_info);3004+ if (rc) {3005+ QEDF_ERR(&(qedf->dbg_ctx), "Failed to dev info.\n");3006+ goto err1;3007+ }3008+3009 /* queue allocation code should come here3010 * order should be3011 * slowpath_start···3013 goto err2;3014 }3015 qed_ops->common->update_pf_params(qedf->cdev, &qedf->pf_params);000000030163017 /* Record BDQ producer doorbell addresses */3018 qedf->bdq_primary_prod = qedf->dev_info.primary_dbq_rq_addr;
···401 for (i = 0; i < vha->hw->max_req_queues; i++) {402 struct req_que *req = vha->hw->req_q_map[i];403404- if (!test_bit(i, vha->hw->req_qid_map))405- continue;406-407 if (req || !buf) {408 length = req ?409 req->length : REQUEST_ENTRY_CNT_24XX;···414 } else if (ent->t263.queue_type == T263_QUEUE_TYPE_RSP) {415 for (i = 0; i < vha->hw->max_rsp_queues; i++) {416 struct rsp_que *rsp = vha->hw->rsp_q_map[i];417-418- if (!test_bit(i, vha->hw->rsp_qid_map))419- continue;420421 if (rsp || !buf) {422 length = rsp ?···658 for (i = 0; i < vha->hw->max_req_queues; i++) {659 struct req_que *req = vha->hw->req_q_map[i];660661- if (!test_bit(i, vha->hw->req_qid_map))662- continue;663-664 if (req || !buf) {665 qla27xx_insert16(i, buf, len);666 qla27xx_insert16(1, buf, len);···669 } else if (ent->t274.queue_type == T274_QUEUE_TYPE_RSP_SHAD) {670 for (i = 0; i < vha->hw->max_rsp_queues; i++) {671 struct rsp_que *rsp = vha->hw->rsp_q_map[i];672-673- if (!test_bit(i, vha->hw->rsp_qid_map))674- continue;675676 if (rsp || !buf) {677 qla27xx_insert16(i, buf, len);
···401 for (i = 0; i < vha->hw->max_req_queues; i++) {402 struct req_que *req = vha->hw->req_q_map[i];403000404 if (req || !buf) {405 length = req ?406 req->length : REQUEST_ENTRY_CNT_24XX;···417 } else if (ent->t263.queue_type == T263_QUEUE_TYPE_RSP) {418 for (i = 0; i < vha->hw->max_rsp_queues; i++) {419 struct rsp_que *rsp = vha->hw->rsp_q_map[i];000420421 if (rsp || !buf) {422 length = rsp ?···664 for (i = 0; i < vha->hw->max_req_queues; i++) {665 struct req_que *req = vha->hw->req_q_map[i];666000667 if (req || !buf) {668 qla27xx_insert16(i, buf, len);669 qla27xx_insert16(1, buf, len);···678 } else if (ent->t274.queue_type == T274_QUEUE_TYPE_RSP_SHAD) {679 for (i = 0; i < vha->hw->max_rsp_queues; i++) {680 struct rsp_que *rsp = vha->hw->rsp_q_map[i];000681682 if (rsp || !buf) {683 qla27xx_insert16(i, buf, len);
+4
drivers/scsi/scsi.c
···780module_param(scsi_logging_level, int, S_IRUGO|S_IWUSR);781MODULE_PARM_DESC(scsi_logging_level, "a bit mask of logging levels");7820783bool scsi_use_blk_mq = true;000784module_param_named(use_blk_mq, scsi_use_blk_mq, bool, S_IWUSR | S_IRUGO);785786static int __init init_scsi(void)
···780module_param(scsi_logging_level, int, S_IRUGO|S_IWUSR);781MODULE_PARM_DESC(scsi_logging_level, "a bit mask of logging levels");782783+#ifdef CONFIG_SCSI_MQ_DEFAULT784bool scsi_use_blk_mq = true;785+#else786+bool scsi_use_blk_mq = false;787+#endif788module_param_named(use_blk_mq, scsi_use_blk_mq, bool, S_IWUSR | S_IRUGO);789790static int __init init_scsi(void)
···751 return count;752}753754-static bool sg_is_valid_dxfer(sg_io_hdr_t *hp)755-{756- switch (hp->dxfer_direction) {757- case SG_DXFER_NONE:758- if (hp->dxferp || hp->dxfer_len > 0)759- return false;760- return true;761- case SG_DXFER_FROM_DEV:762- /*763- * for SG_DXFER_FROM_DEV we always set dxfer_len to > 0. dxferp764- * can either be NULL or != NULL so there's no point in checking765- * it either. So just return true.766- */767- return true;768- case SG_DXFER_TO_DEV:769- case SG_DXFER_TO_FROM_DEV:770- if (!hp->dxferp || hp->dxfer_len == 0)771- return false;772- return true;773- case SG_DXFER_UNKNOWN:774- if ((!hp->dxferp && hp->dxfer_len) ||775- (hp->dxferp && hp->dxfer_len == 0))776- return false;777- return true;778- default:779- return false;780- }781-}782-783static int784sg_common_write(Sg_fd * sfp, Sg_request * srp,785 unsigned char *cmnd, int timeout, int blocking)···771 "sg_common_write: scsi opcode=0x%02x, cmd_size=%d\n",772 (int) cmnd[0], (int) hp->cmd_len));773774- if (!sg_is_valid_dxfer(hp))775 return -EINVAL;776777 k = sg_start_req(srp, cmnd);···1021 read_lock_irqsave(&sfp->rq_list_lock, iflags);1022 val = 0;1023 list_for_each_entry(srp, &sfp->rq_list, entry) {1024- if (val > SG_MAX_QUEUE)1025 break;1026 memset(&rinfo[val], 0, SZ_SG_REQ_INFO);1027 rinfo[val].req_state = srp->done + 1;
···751 return count;752}75300000000000000000000000000000754static int755sg_common_write(Sg_fd * sfp, Sg_request * srp,756 unsigned char *cmnd, int timeout, int blocking)···800 "sg_common_write: scsi opcode=0x%02x, cmd_size=%d\n",801 (int) cmnd[0], (int) hp->cmd_len));802803+ if (hp->dxfer_len >= SZ_256M)804 return -EINVAL;805806 k = sg_start_req(srp, cmnd);···1050 read_lock_irqsave(&sfp->rq_list_lock, iflags);1051 val = 0;1052 list_for_each_entry(srp, &sfp->rq_list, entry) {1053+ if (val >= SG_MAX_QUEUE)1054 break;1055 memset(&rinfo[val], 0, SZ_SG_REQ_INFO);1056 rinfo[val].req_state = srp->done + 1;
+2-2
drivers/scsi/st.c
···4299 kref_init(&tpnt->kref);4300 tpnt->disk = disk;4301 disk->private_data = &tpnt->driver;4302- disk->queue = SDp->request_queue;4303 /* SCSI tape doesn't register this gendisk via add_disk(). Manually4304 * take queue reference that release_disk() expects. */4305- if (!blk_get_queue(disk->queue))4306 goto out_put_disk;04307 tpnt->driver = &st_template;43084309 tpnt->device = SDp;
···4299 kref_init(&tpnt->kref);4300 tpnt->disk = disk;4301 disk->private_data = &tpnt->driver;04302 /* SCSI tape doesn't register this gendisk via add_disk(). Manually4303 * take queue reference that release_disk() expects. */4304+ if (!blk_get_queue(SDp->request_queue))4305 goto out_put_disk;4306+ disk->queue = SDp->request_queue;4307 tpnt->driver = &st_template;43084309 tpnt->device = SDp;
+2
drivers/scsi/storvsc_drv.c
···1640 put_cpu();16411642 if (ret == -EAGAIN) {001643 /* no more space */1644 return SCSI_MLQUEUE_DEVICE_BUSY;1645 }
···1640 put_cpu();16411642 if (ret == -EAGAIN) {1643+ if (payload_sz > sizeof(cmd_request->mpb))1644+ kfree(payload);1645 /* no more space */1646 return SCSI_MLQUEUE_DEVICE_BUSY;1647 }