Merge branch 'fixes' into misc

+178 -258
+11
drivers/scsi/Kconfig
··· 47 default n 48 depends on NET 49 50 config SCSI_PROC_FS 51 bool "legacy /proc/scsi/ support" 52 depends on SCSI && PROC_FS
··· 47 default n 48 depends on NET 49 50 + config SCSI_MQ_DEFAULT 51 + bool "SCSI: use blk-mq I/O path by default" 52 + depends on SCSI 53 + ---help--- 54 + This option enables the new blk-mq based I/O path for SCSI 55 + devices by default. With the option the scsi_mod.use_blk_mq 56 + module/boot option defaults to Y, without it to N, but it can 57 + still be overridden either way. 58 + 59 + If unsure say N. 60 + 61 config SCSI_PROC_FS 62 bool "legacy /proc/scsi/ support" 63 depends on SCSI && PROC_FS
+32 -38
drivers/scsi/aacraid/aachba.c
··· 549 if ((le32_to_cpu(get_name_reply->status) == CT_OK) 550 && (get_name_reply->data[0] != '\0')) { 551 char *sp = get_name_reply->data; 552 - sp[sizeof(((struct aac_get_name_resp *)NULL)->data)] = '\0'; 553 while (*sp == ' ') 554 ++sp; 555 if (*sp) { ··· 581 static int aac_get_container_name(struct scsi_cmnd * scsicmd) 582 { 583 int status; 584 struct aac_get_name *dinfo; 585 struct fib * cmd_fibcontext; 586 struct aac_dev * dev; 587 588 dev = (struct aac_dev *)scsicmd->device->host->hostdata; 589 590 cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd); 591 592 aac_fib_init(cmd_fibcontext); 593 dinfo = (struct aac_get_name *) fib_data(cmd_fibcontext); 594 595 dinfo->command = cpu_to_le32(VM_ContainerConfig); 596 dinfo->type = cpu_to_le32(CT_READ_NAME); 597 dinfo->cid = cpu_to_le32(scmd_id(scsicmd)); 598 - dinfo->count = cpu_to_le32(sizeof(((struct aac_get_name_resp *)NULL)->data)); 599 600 status = aac_fib_send(ContainerCommand, 601 cmd_fibcontext, ··· 612 /* 613 * Check that the command queued to the controller 614 */ 615 - if (status == -EINPROGRESS) { 616 - scsicmd->SCp.phase = AAC_OWNER_FIRMWARE; 617 return 0; 618 - } 619 620 printk(KERN_WARNING "aac_get_container_name: aac_fib_send failed with status: %d.\n", status); 621 aac_fib_complete(cmd_fibcontext); ··· 724 725 dinfo->count = cpu_to_le32(scmd_id(scsicmd)); 726 dinfo->type = cpu_to_le32(FT_FILESYS); 727 728 status = aac_fib_send(ContainerCommand, 729 fibptr, ··· 736 /* 737 * Check that the command queued to the controller 738 */ 739 - if (status == -EINPROGRESS) 740 - scsicmd->SCp.phase = AAC_OWNER_FIRMWARE; 741 - else if (status < 0) { 742 /* Inherit results from VM_NameServe, if any */ 743 dresp->status = cpu_to_le32(ST_OK); 744 _aac_probe_container2(context, fibptr); ··· 764 dinfo->count = cpu_to_le32(scmd_id(scsicmd)); 765 dinfo->type = cpu_to_le32(FT_FILESYS); 766 scsicmd->SCp.ptr = (char *)callback; 767 768 status = aac_fib_send(ContainerCommand, 769 fibptr, ··· 776 /* 777 * Check that the command queued to the controller 778 */ 779 - if (status == -EINPROGRESS) { 780 - scsicmd->SCp.phase = AAC_OWNER_FIRMWARE; 781 return 0; 782 - } 783 if (status < 0) { 784 scsicmd->SCp.ptr = NULL; 785 aac_fib_complete(fibptr); ··· 1124 dinfo->command = cpu_to_le32(VM_ContainerConfig); 1125 dinfo->type = cpu_to_le32(CT_CID_TO_32BITS_UID); 1126 dinfo->cid = cpu_to_le32(scmd_id(scsicmd)); 1127 1128 status = aac_fib_send(ContainerCommand, 1129 cmd_fibcontext, ··· 1137 /* 1138 * Check that the command queued to the controller 1139 */ 1140 - if (status == -EINPROGRESS) { 1141 - scsicmd->SCp.phase = AAC_OWNER_FIRMWARE; 1142 return 0; 1143 - } 1144 1145 printk(KERN_WARNING "aac_get_container_serial: aac_fib_send failed with status: %d.\n", status); 1146 aac_fib_complete(cmd_fibcontext); ··· 2332 * Alocate and initialize a Fib 2333 */ 2334 cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd); 2335 - 2336 status = aac_adapter_read(cmd_fibcontext, scsicmd, lba, count); 2337 2338 /* 2339 * Check that the command queued to the controller 2340 */ 2341 - if (status == -EINPROGRESS) { 2342 - scsicmd->SCp.phase = AAC_OWNER_FIRMWARE; 2343 return 0; 2344 - } 2345 2346 printk(KERN_WARNING "aac_read: aac_fib_send failed with status: %d.\n", status); 2347 /* ··· 2424 * Allocate and initialize a Fib then setup a BlockWrite command 2425 */ 2426 cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd); 2427 - 2428 status = aac_adapter_write(cmd_fibcontext, scsicmd, lba, count, fua); 2429 2430 /* 2431 * Check that the command queued to the controller 2432 */ 2433 - if (status == -EINPROGRESS) { 2434 - scsicmd->SCp.phase = AAC_OWNER_FIRMWARE; 2435 return 0; 2436 - } 2437 2438 printk(KERN_WARNING "aac_write: aac_fib_send failed with status: %d\n", status); 2439 /* ··· 2581 synchronizecmd->cid = cpu_to_le32(scmd_id(scsicmd)); 2582 synchronizecmd->count = 2583 cpu_to_le32(sizeof(((struct aac_synchronize_reply *)NULL)->data)); 2584 2585 /* 2586 * Now send the Fib to the adapter ··· 2597 /* 2598 * Check that the command queued to the controller 2599 */ 2600 - if (status == -EINPROGRESS) { 2601 - scsicmd->SCp.phase = AAC_OWNER_FIRMWARE; 2602 return 0; 2603 - } 2604 2605 printk(KERN_WARNING 2606 "aac_synchronize: aac_fib_send failed with status: %d.\n", status); ··· 2658 pmcmd->cid = cpu_to_le32(sdev_id(sdev)); 2659 pmcmd->parm = (scsicmd->cmnd[1] & 1) ? 2660 cpu_to_le32(CT_PM_UNIT_IMMEDIATE) : 0; 2661 2662 /* 2663 * Now send the Fib to the adapter ··· 2674 /* 2675 * Check that the command queued to the controller 2676 */ 2677 - if (status == -EINPROGRESS) { 2678 - scsicmd->SCp.phase = AAC_OWNER_FIRMWARE; 2679 return 0; 2680 - } 2681 2682 aac_fib_complete(cmd_fibcontext); 2683 aac_fib_free(cmd_fibcontext); ··· 3194 return -EBUSY; 3195 if (copy_from_user(&qd, arg, sizeof (struct aac_query_disk))) 3196 return -EFAULT; 3197 - if (qd.cnum == -1) 3198 qd.cnum = qd.id; 3199 - else if ((qd.bus == -1) && (qd.id == -1) && (qd.lun == -1)) 3200 - { 3201 if (qd.cnum < 0 || qd.cnum >= dev->maximum_num_containers) 3202 return -EINVAL; 3203 qd.instance = dev->scsi_host_ptr->host_no; ··· 3683 * Allocate and initialize a Fib then setup a BlockWrite command 3684 */ 3685 cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd); 3686 - 3687 status = aac_adapter_scsi(cmd_fibcontext, scsicmd); 3688 3689 /* 3690 * Check that the command queued to the controller 3691 */ 3692 - if (status == -EINPROGRESS) { 3693 - scsicmd->SCp.phase = AAC_OWNER_FIRMWARE; 3694 return 0; 3695 - } 3696 3697 printk(KERN_WARNING "aac_srb: aac_fib_send failed with status: %d\n", status); 3698 aac_fib_complete(cmd_fibcontext); ··· 3728 if (!cmd_fibcontext) 3729 return -1; 3730 3731 status = aac_adapter_hba(cmd_fibcontext, scsicmd); 3732 3733 /* 3734 * Check that the command queued to the controller 3735 */ 3736 - if (status == -EINPROGRESS) { 3737 - scsicmd->SCp.phase = AAC_OWNER_FIRMWARE; 3738 return 0; 3739 - } 3740 3741 pr_warn("aac_hba_cmd_req: aac_fib_send failed with status: %d\n", 3742 status);
··· 549 if ((le32_to_cpu(get_name_reply->status) == CT_OK) 550 && (get_name_reply->data[0] != '\0')) { 551 char *sp = get_name_reply->data; 552 + int data_size = FIELD_SIZEOF(struct aac_get_name_resp, data); 553 + 554 + sp[data_size - 1] = '\0'; 555 while (*sp == ' ') 556 ++sp; 557 if (*sp) { ··· 579 static int aac_get_container_name(struct scsi_cmnd * scsicmd) 580 { 581 int status; 582 + int data_size; 583 struct aac_get_name *dinfo; 584 struct fib * cmd_fibcontext; 585 struct aac_dev * dev; 586 587 dev = (struct aac_dev *)scsicmd->device->host->hostdata; 588 589 + data_size = FIELD_SIZEOF(struct aac_get_name_resp, data); 590 + 591 cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd); 592 593 aac_fib_init(cmd_fibcontext); 594 dinfo = (struct aac_get_name *) fib_data(cmd_fibcontext); 595 + scsicmd->SCp.phase = AAC_OWNER_FIRMWARE; 596 597 dinfo->command = cpu_to_le32(VM_ContainerConfig); 598 dinfo->type = cpu_to_le32(CT_READ_NAME); 599 dinfo->cid = cpu_to_le32(scmd_id(scsicmd)); 600 + dinfo->count = cpu_to_le32(data_size - 1); 601 602 status = aac_fib_send(ContainerCommand, 603 cmd_fibcontext, ··· 606 /* 607 * Check that the command queued to the controller 608 */ 609 + if (status == -EINPROGRESS) 610 return 0; 611 612 printk(KERN_WARNING "aac_get_container_name: aac_fib_send failed with status: %d.\n", status); 613 aac_fib_complete(cmd_fibcontext); ··· 720 721 dinfo->count = cpu_to_le32(scmd_id(scsicmd)); 722 dinfo->type = cpu_to_le32(FT_FILESYS); 723 + scsicmd->SCp.phase = AAC_OWNER_FIRMWARE; 724 725 status = aac_fib_send(ContainerCommand, 726 fibptr, ··· 731 /* 732 * Check that the command queued to the controller 733 */ 734 + if (status < 0 && status != -EINPROGRESS) { 735 /* Inherit results from VM_NameServe, if any */ 736 dresp->status = cpu_to_le32(ST_OK); 737 _aac_probe_container2(context, fibptr); ··· 761 dinfo->count = cpu_to_le32(scmd_id(scsicmd)); 762 dinfo->type = cpu_to_le32(FT_FILESYS); 763 scsicmd->SCp.ptr = (char *)callback; 764 + scsicmd->SCp.phase = AAC_OWNER_FIRMWARE; 765 766 status = aac_fib_send(ContainerCommand, 767 fibptr, ··· 772 /* 773 * Check that the command queued to the controller 774 */ 775 + if (status == -EINPROGRESS) 776 return 0; 777 + 778 if (status < 0) { 779 scsicmd->SCp.ptr = NULL; 780 aac_fib_complete(fibptr); ··· 1121 dinfo->command = cpu_to_le32(VM_ContainerConfig); 1122 dinfo->type = cpu_to_le32(CT_CID_TO_32BITS_UID); 1123 dinfo->cid = cpu_to_le32(scmd_id(scsicmd)); 1124 + scsicmd->SCp.phase = AAC_OWNER_FIRMWARE; 1125 1126 status = aac_fib_send(ContainerCommand, 1127 cmd_fibcontext, ··· 1133 /* 1134 * Check that the command queued to the controller 1135 */ 1136 + if (status == -EINPROGRESS) 1137 return 0; 1138 1139 printk(KERN_WARNING "aac_get_container_serial: aac_fib_send failed with status: %d.\n", status); 1140 aac_fib_complete(cmd_fibcontext); ··· 2330 * Alocate and initialize a Fib 2331 */ 2332 cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd); 2333 + scsicmd->SCp.phase = AAC_OWNER_FIRMWARE; 2334 status = aac_adapter_read(cmd_fibcontext, scsicmd, lba, count); 2335 2336 /* 2337 * Check that the command queued to the controller 2338 */ 2339 + if (status == -EINPROGRESS) 2340 return 0; 2341 2342 printk(KERN_WARNING "aac_read: aac_fib_send failed with status: %d.\n", status); 2343 /* ··· 2424 * Allocate and initialize a Fib then setup a BlockWrite command 2425 */ 2426 cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd); 2427 + scsicmd->SCp.phase = AAC_OWNER_FIRMWARE; 2428 status = aac_adapter_write(cmd_fibcontext, scsicmd, lba, count, fua); 2429 2430 /* 2431 * Check that the command queued to the controller 2432 */ 2433 + if (status == -EINPROGRESS) 2434 return 0; 2435 2436 printk(KERN_WARNING "aac_write: aac_fib_send failed with status: %d\n", status); 2437 /* ··· 2583 synchronizecmd->cid = cpu_to_le32(scmd_id(scsicmd)); 2584 synchronizecmd->count = 2585 cpu_to_le32(sizeof(((struct aac_synchronize_reply *)NULL)->data)); 2586 + scsicmd->SCp.phase = AAC_OWNER_FIRMWARE; 2587 2588 /* 2589 * Now send the Fib to the adapter ··· 2598 /* 2599 * Check that the command queued to the controller 2600 */ 2601 + if (status == -EINPROGRESS) 2602 return 0; 2603 2604 printk(KERN_WARNING 2605 "aac_synchronize: aac_fib_send failed with status: %d.\n", status); ··· 2661 pmcmd->cid = cpu_to_le32(sdev_id(sdev)); 2662 pmcmd->parm = (scsicmd->cmnd[1] & 1) ? 2663 cpu_to_le32(CT_PM_UNIT_IMMEDIATE) : 0; 2664 + scsicmd->SCp.phase = AAC_OWNER_FIRMWARE; 2665 2666 /* 2667 * Now send the Fib to the adapter ··· 2676 /* 2677 * Check that the command queued to the controller 2678 */ 2679 + if (status == -EINPROGRESS) 2680 return 0; 2681 2682 aac_fib_complete(cmd_fibcontext); 2683 aac_fib_free(cmd_fibcontext); ··· 3198 return -EBUSY; 3199 if (copy_from_user(&qd, arg, sizeof (struct aac_query_disk))) 3200 return -EFAULT; 3201 + if (qd.cnum == -1) { 3202 + if (qd.id < 0 || qd.id >= dev->maximum_num_containers) 3203 + return -EINVAL; 3204 qd.cnum = qd.id; 3205 + } else if ((qd.bus == -1) && (qd.id == -1) && (qd.lun == -1)) { 3206 if (qd.cnum < 0 || qd.cnum >= dev->maximum_num_containers) 3207 return -EINVAL; 3208 qd.instance = dev->scsi_host_ptr->host_no; ··· 3686 * Allocate and initialize a Fib then setup a BlockWrite command 3687 */ 3688 cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd); 3689 + scsicmd->SCp.phase = AAC_OWNER_FIRMWARE; 3690 status = aac_adapter_scsi(cmd_fibcontext, scsicmd); 3691 3692 /* 3693 * Check that the command queued to the controller 3694 */ 3695 + if (status == -EINPROGRESS) 3696 return 0; 3697 3698 printk(KERN_WARNING "aac_srb: aac_fib_send failed with status: %d\n", status); 3699 aac_fib_complete(cmd_fibcontext); ··· 3733 if (!cmd_fibcontext) 3734 return -1; 3735 3736 + scsicmd->SCp.phase = AAC_OWNER_FIRMWARE; 3737 status = aac_adapter_hba(cmd_fibcontext, scsicmd); 3738 3739 /* 3740 * Check that the command queued to the controller 3741 */ 3742 + if (status == -EINPROGRESS) 3743 return 0; 3744 3745 pr_warn("aac_hba_cmd_req: aac_fib_send failed with status: %d\n", 3746 status);
+1 -1
drivers/scsi/aacraid/aacraid.h
··· 2275 __le32 parm3; 2276 __le32 parm4; 2277 __le32 parm5; 2278 - u8 data[16]; 2279 }; 2280 2281 #define CT_CID_TO_32BITS_UID 165
··· 2275 __le32 parm3; 2276 __le32 parm4; 2277 __le32 parm5; 2278 + u8 data[17]; 2279 }; 2280 2281 #define CT_CID_TO_32BITS_UID 165
+15 -53
drivers/scsi/bnx2fc/bnx2fc_fcoe.c
··· 2624 }; 2625 2626 /** 2627 - * bnx2fc_percpu_thread_create - Create a receive thread for an 2628 - * online CPU 2629 * 2630 * @cpu: cpu index for the online cpu 2631 */ 2632 - static void bnx2fc_percpu_thread_create(unsigned int cpu) 2633 { 2634 struct bnx2fc_percpu_s *p; 2635 struct task_struct *thread; ··· 2638 thread = kthread_create_on_node(bnx2fc_percpu_io_thread, 2639 (void *)p, cpu_to_node(cpu), 2640 "bnx2fc_thread/%d", cpu); 2641 /* bind thread to the cpu */ 2642 - if (likely(!IS_ERR(thread))) { 2643 - kthread_bind(thread, cpu); 2644 - p->iothread = thread; 2645 - wake_up_process(thread); 2646 - } 2647 } 2648 2649 - static void bnx2fc_percpu_thread_destroy(unsigned int cpu) 2650 { 2651 struct bnx2fc_percpu_s *p; 2652 struct task_struct *thread; ··· 2662 thread = p->iothread; 2663 p->iothread = NULL; 2664 2665 - 2666 /* Free all work in the list */ 2667 list_for_each_entry_safe(work, tmp, &p->work_list, list) { 2668 list_del_init(&work->list); ··· 2673 2674 if (thread) 2675 kthread_stop(thread); 2676 - } 2677 - 2678 - 2679 - static int bnx2fc_cpu_online(unsigned int cpu) 2680 - { 2681 - printk(PFX "CPU %x online: Create Rx thread\n", cpu); 2682 - bnx2fc_percpu_thread_create(cpu); 2683 - return 0; 2684 - } 2685 - 2686 - static int bnx2fc_cpu_dead(unsigned int cpu) 2687 - { 2688 - printk(PFX "CPU %x offline: Remove Rx thread\n", cpu); 2689 - bnx2fc_percpu_thread_destroy(cpu); 2690 return 0; 2691 } 2692 ··· 2747 spin_lock_init(&p->fp_work_lock); 2748 } 2749 2750 - get_online_cpus(); 2751 - 2752 - for_each_online_cpu(cpu) 2753 - bnx2fc_percpu_thread_create(cpu); 2754 - 2755 - rc = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, 2756 - "scsi/bnx2fc:online", 2757 - bnx2fc_cpu_online, NULL); 2758 if (rc < 0) 2759 - goto stop_threads; 2760 bnx2fc_online_state = rc; 2761 2762 - cpuhp_setup_state_nocalls(CPUHP_SCSI_BNX2FC_DEAD, "scsi/bnx2fc:dead", 2763 - NULL, bnx2fc_cpu_dead); 2764 - put_online_cpus(); 2765 - 2766 cnic_register_driver(CNIC_ULP_FCOE, &bnx2fc_cnic_cb); 2767 - 2768 return 0; 2769 2770 - stop_threads: 2771 - for_each_online_cpu(cpu) 2772 - bnx2fc_percpu_thread_destroy(cpu); 2773 - put_online_cpus(); 2774 kthread_stop(l2_thread); 2775 free_wq: 2776 destroy_workqueue(bnx2fc_wq); ··· 2775 struct fcoe_percpu_s *bg; 2776 struct task_struct *l2_thread; 2777 struct sk_buff *skb; 2778 - unsigned int cpu = 0; 2779 2780 /* 2781 * NOTE: Since cnic calls register_driver routine rtnl_lock, ··· 2815 if (l2_thread) 2816 kthread_stop(l2_thread); 2817 2818 - get_online_cpus(); 2819 - /* Destroy per cpu threads */ 2820 - for_each_online_cpu(cpu) { 2821 - bnx2fc_percpu_thread_destroy(cpu); 2822 - } 2823 - 2824 - cpuhp_remove_state_nocalls(bnx2fc_online_state); 2825 - cpuhp_remove_state_nocalls(CPUHP_SCSI_BNX2FC_DEAD); 2826 - 2827 - put_online_cpus(); 2828 2829 destroy_workqueue(bnx2fc_wq); 2830 /*
··· 2624 }; 2625 2626 /** 2627 + * bnx2fc_cpu_online - Create a receive thread for an online CPU 2628 * 2629 * @cpu: cpu index for the online cpu 2630 */ 2631 + static int bnx2fc_cpu_online(unsigned int cpu) 2632 { 2633 struct bnx2fc_percpu_s *p; 2634 struct task_struct *thread; ··· 2639 thread = kthread_create_on_node(bnx2fc_percpu_io_thread, 2640 (void *)p, cpu_to_node(cpu), 2641 "bnx2fc_thread/%d", cpu); 2642 + if (IS_ERR(thread)) 2643 + return PTR_ERR(thread); 2644 + 2645 /* bind thread to the cpu */ 2646 + kthread_bind(thread, cpu); 2647 + p->iothread = thread; 2648 + wake_up_process(thread); 2649 + return 0; 2650 } 2651 2652 + static int bnx2fc_cpu_offline(unsigned int cpu) 2653 { 2654 struct bnx2fc_percpu_s *p; 2655 struct task_struct *thread; ··· 2661 thread = p->iothread; 2662 p->iothread = NULL; 2663 2664 /* Free all work in the list */ 2665 list_for_each_entry_safe(work, tmp, &p->work_list, list) { 2666 list_del_init(&work->list); ··· 2673 2674 if (thread) 2675 kthread_stop(thread); 2676 return 0; 2677 } 2678 ··· 2761 spin_lock_init(&p->fp_work_lock); 2762 } 2763 2764 + rc = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "scsi/bnx2fc:online", 2765 + bnx2fc_cpu_online, bnx2fc_cpu_offline); 2766 if (rc < 0) 2767 + goto stop_thread; 2768 bnx2fc_online_state = rc; 2769 2770 cnic_register_driver(CNIC_ULP_FCOE, &bnx2fc_cnic_cb); 2771 return 0; 2772 2773 + stop_thread: 2774 kthread_stop(l2_thread); 2775 free_wq: 2776 destroy_workqueue(bnx2fc_wq); ··· 2803 struct fcoe_percpu_s *bg; 2804 struct task_struct *l2_thread; 2805 struct sk_buff *skb; 2806 2807 /* 2808 * NOTE: Since cnic calls register_driver routine rtnl_lock, ··· 2844 if (l2_thread) 2845 kthread_stop(l2_thread); 2846 2847 + cpuhp_remove_state(bnx2fc_online_state); 2848 2849 destroy_workqueue(bnx2fc_wq); 2850 /*
+23 -22
drivers/scsi/bnx2fc/bnx2fc_hwi.c
··· 1008 return work; 1009 } 1010 1011 int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt) 1012 { 1013 struct fcoe_cqe *cq; ··· 1064 /* Unsolicited event notification */ 1065 bnx2fc_process_unsol_compl(tgt, wqe); 1066 } else { 1067 - /* Pending work request completion */ 1068 - struct bnx2fc_work *work = NULL; 1069 - struct bnx2fc_percpu_s *fps = NULL; 1070 - unsigned int cpu = wqe % num_possible_cpus(); 1071 - 1072 - fps = &per_cpu(bnx2fc_percpu, cpu); 1073 - spin_lock_bh(&fps->fp_work_lock); 1074 - if (unlikely(!fps->iothread)) 1075 - goto unlock; 1076 - 1077 - work = bnx2fc_alloc_work(tgt, wqe); 1078 - if (work) 1079 - list_add_tail(&work->list, 1080 - &fps->work_list); 1081 - unlock: 1082 - spin_unlock_bh(&fps->fp_work_lock); 1083 - 1084 - /* Pending work request completion */ 1085 - if (fps->iothread && work) 1086 - wake_up_process(fps->iothread); 1087 - else 1088 - bnx2fc_process_cq_compl(tgt, wqe); 1089 num_free_sqes++; 1090 } 1091 cqe++;
··· 1008 return work; 1009 } 1010 1011 + /* Pending work request completion */ 1012 + static void bnx2fc_pending_work(struct bnx2fc_rport *tgt, unsigned int wqe) 1013 + { 1014 + unsigned int cpu = wqe % num_possible_cpus(); 1015 + struct bnx2fc_percpu_s *fps; 1016 + struct bnx2fc_work *work; 1017 + 1018 + fps = &per_cpu(bnx2fc_percpu, cpu); 1019 + spin_lock_bh(&fps->fp_work_lock); 1020 + if (fps->iothread) { 1021 + work = bnx2fc_alloc_work(tgt, wqe); 1022 + if (work) { 1023 + list_add_tail(&work->list, &fps->work_list); 1024 + wake_up_process(fps->iothread); 1025 + spin_unlock_bh(&fps->fp_work_lock); 1026 + return; 1027 + } 1028 + } 1029 + spin_unlock_bh(&fps->fp_work_lock); 1030 + bnx2fc_process_cq_compl(tgt, wqe); 1031 + } 1032 + 1033 int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt) 1034 { 1035 struct fcoe_cqe *cq; ··· 1042 /* Unsolicited event notification */ 1043 bnx2fc_process_unsol_compl(tgt, wqe); 1044 } else { 1045 + bnx2fc_pending_work(tgt, wqe); 1046 num_free_sqes++; 1047 } 1048 cqe++;
+15 -49
drivers/scsi/bnx2i/bnx2i_init.c
··· 404 405 406 /** 407 - * bnx2i_percpu_thread_create - Create a receive thread for an 408 - * online CPU 409 * 410 * @cpu: cpu index for the online cpu 411 */ 412 - static void bnx2i_percpu_thread_create(unsigned int cpu) 413 { 414 struct bnx2i_percpu_s *p; 415 struct task_struct *thread; ··· 418 thread = kthread_create_on_node(bnx2i_percpu_io_thread, (void *)p, 419 cpu_to_node(cpu), 420 "bnx2i_thread/%d", cpu); 421 /* bind thread to the cpu */ 422 - if (likely(!IS_ERR(thread))) { 423 - kthread_bind(thread, cpu); 424 - p->iothread = thread; 425 - wake_up_process(thread); 426 - } 427 } 428 429 - 430 - static void bnx2i_percpu_thread_destroy(unsigned int cpu) 431 { 432 struct bnx2i_percpu_s *p; 433 struct task_struct *thread; ··· 451 spin_unlock_bh(&p->p_work_lock); 452 if (thread) 453 kthread_stop(thread); 454 - } 455 - 456 - static int bnx2i_cpu_online(unsigned int cpu) 457 - { 458 - pr_info("bnx2i: CPU %x online: Create Rx thread\n", cpu); 459 - bnx2i_percpu_thread_create(cpu); 460 - return 0; 461 - } 462 - 463 - static int bnx2i_cpu_dead(unsigned int cpu) 464 - { 465 - pr_info("CPU %x offline: Remove Rx thread\n", cpu); 466 - bnx2i_percpu_thread_destroy(cpu); 467 return 0; 468 } 469 ··· 498 p->iothread = NULL; 499 } 500 501 - get_online_cpus(); 502 - 503 - for_each_online_cpu(cpu) 504 - bnx2i_percpu_thread_create(cpu); 505 - 506 - err = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, 507 - "scsi/bnx2i:online", 508 - bnx2i_cpu_online, NULL); 509 if (err < 0) 510 - goto remove_threads; 511 bnx2i_online_state = err; 512 - 513 - cpuhp_setup_state_nocalls(CPUHP_SCSI_BNX2I_DEAD, "scsi/bnx2i:dead", 514 - NULL, bnx2i_cpu_dead); 515 - put_online_cpus(); 516 return 0; 517 518 - remove_threads: 519 - for_each_online_cpu(cpu) 520 - bnx2i_percpu_thread_destroy(cpu); 521 - put_online_cpus(); 522 cnic_unregister_driver(CNIC_ULP_ISCSI); 523 unreg_xport: 524 iscsi_unregister_transport(&bnx2i_iscsi_transport); ··· 525 static void __exit bnx2i_mod_exit(void) 526 { 527 struct bnx2i_hba *hba; 528 - unsigned cpu = 0; 529 530 mutex_lock(&bnx2i_dev_lock); 531 while (!list_empty(&adapter_list)) { ··· 542 } 543 mutex_unlock(&bnx2i_dev_lock); 544 545 - get_online_cpus(); 546 - 547 - for_each_online_cpu(cpu) 548 - bnx2i_percpu_thread_destroy(cpu); 549 - 550 - cpuhp_remove_state_nocalls(bnx2i_online_state); 551 - cpuhp_remove_state_nocalls(CPUHP_SCSI_BNX2I_DEAD); 552 - put_online_cpus(); 553 554 iscsi_unregister_transport(&bnx2i_iscsi_transport); 555 cnic_unregister_driver(CNIC_ULP_ISCSI);
··· 404 405 406 /** 407 + * bnx2i_cpu_online - Create a receive thread for an online CPU 408 * 409 * @cpu: cpu index for the online cpu 410 */ 411 + static int bnx2i_cpu_online(unsigned int cpu) 412 { 413 struct bnx2i_percpu_s *p; 414 struct task_struct *thread; ··· 419 thread = kthread_create_on_node(bnx2i_percpu_io_thread, (void *)p, 420 cpu_to_node(cpu), 421 "bnx2i_thread/%d", cpu); 422 + if (IS_ERR(thread)) 423 + return PTR_ERR(thread); 424 + 425 /* bind thread to the cpu */ 426 + kthread_bind(thread, cpu); 427 + p->iothread = thread; 428 + wake_up_process(thread); 429 + return 0; 430 } 431 432 + static int bnx2i_cpu_offline(unsigned int cpu) 433 { 434 struct bnx2i_percpu_s *p; 435 struct task_struct *thread; ··· 451 spin_unlock_bh(&p->p_work_lock); 452 if (thread) 453 kthread_stop(thread); 454 return 0; 455 } 456 ··· 511 p->iothread = NULL; 512 } 513 514 + err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "scsi/bnx2i:online", 515 + bnx2i_cpu_online, bnx2i_cpu_offline); 516 if (err < 0) 517 + goto unreg_driver; 518 bnx2i_online_state = err; 519 return 0; 520 521 + unreg_driver: 522 cnic_unregister_driver(CNIC_ULP_ISCSI); 523 unreg_xport: 524 iscsi_unregister_transport(&bnx2i_iscsi_transport); ··· 551 static void __exit bnx2i_mod_exit(void) 552 { 553 struct bnx2i_hba *hba; 554 555 mutex_lock(&bnx2i_dev_lock); 556 while (!list_empty(&adapter_list)) { ··· 569 } 570 mutex_unlock(&bnx2i_dev_lock); 571 572 + cpuhp_remove_state(bnx2i_online_state); 573 574 iscsi_unregister_transport(&bnx2i_iscsi_transport); 575 cnic_unregister_driver(CNIC_ULP_ISCSI);
+3 -1
drivers/scsi/csiostor/csio_hw.c
··· 3845 3846 if (csio_is_hw_ready(hw)) 3847 return 0; 3848 - else 3849 return -EINVAL; 3850 } 3851 3852 int
··· 3845 3846 if (csio_is_hw_ready(hw)) 3847 return 0; 3848 + else if (csio_match_state(hw, csio_hws_uninit)) 3849 return -EINVAL; 3850 + else 3851 + return -ENODEV; 3852 } 3853 3854 int
+8 -4
drivers/scsi/csiostor/csio_init.c
··· 970 971 pci_set_drvdata(pdev, hw); 972 973 - if (csio_hw_start(hw) != 0) { 974 - dev_err(&pdev->dev, 975 - "Failed to start FW, continuing in debug mode.\n"); 976 - return 0; 977 } 978 979 sprintf(hw->fwrev_str, "%u.%u.%u.%u\n",
··· 970 971 pci_set_drvdata(pdev, hw); 972 973 + rv = csio_hw_start(hw); 974 + if (rv) { 975 + if (rv == -EINVAL) { 976 + dev_err(&pdev->dev, 977 + "Failed to start FW, continuing in debug mode.\n"); 978 + return 0; 979 + } 980 + goto err_lnode_exit; 981 } 982 983 sprintf(hw->fwrev_str, "%u.%u.%u.%u\n",
+3
drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
··· 1635 goto rel_resource; 1636 } 1637 1638 csk->atid = cxgb4_alloc_atid(lldi->tids, csk); 1639 if (csk->atid < 0) { 1640 pr_err("%s, NO atid available.\n", ndev->name);
··· 1635 goto rel_resource; 1636 } 1637 1638 + if (!(n->nud_state & NUD_VALID)) 1639 + neigh_event_send(n, NULL); 1640 + 1641 csk->atid = cxgb4_alloc_atid(lldi->tids, csk); 1642 if (csk->atid < 0) { 1643 pr_err("%s, NO atid available.\n", ndev->name);
+20 -14
drivers/scsi/ipr.c
··· 3351 return; 3352 } 3353 3354 if (!ioa_cfg->scan_enabled) { 3355 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3356 return; ··· 4945 } 4946 if (ipr_is_vset_device(res)) { 4947 sdev->scsi_level = SCSI_SPC_3; 4948 blk_queue_rq_timeout(sdev->request_queue, 4949 IPR_VSET_RW_TIMEOUT); 4950 blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS); ··· 7222 ENTER; 7223 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) { 7224 ipr_trace; 7225 - spin_unlock_irq(ioa_cfg->host->host_lock); 7226 - scsi_unblock_requests(ioa_cfg->host); 7227 - spin_lock_irq(ioa_cfg->host->host_lock); 7228 } 7229 7230 ioa_cfg->in_reset_reload = 0; ··· 7297 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); 7298 wake_up_all(&ioa_cfg->reset_wait_q); 7299 7300 - spin_unlock(ioa_cfg->host->host_lock); 7301 - scsi_unblock_requests(ioa_cfg->host); 7302 - spin_lock(ioa_cfg->host->host_lock); 7303 - 7304 - if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) 7305 - scsi_block_requests(ioa_cfg->host); 7306 - 7307 schedule_work(&ioa_cfg->work_q); 7308 LEAVE; 7309 return IPR_RC_JOB_RETURN; ··· 9253 spin_unlock(&ioa_cfg->hrrq[i]._lock); 9254 } 9255 wmb(); 9256 - if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) 9257 scsi_block_requests(ioa_cfg->host); 9258 9259 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); 9260 ioa_cfg->reset_cmd = ipr_cmd; ··· 9313 wake_up_all(&ioa_cfg->reset_wait_q); 9314 9315 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) { 9316 - spin_unlock_irq(ioa_cfg->host->host_lock); 9317 - scsi_unblock_requests(ioa_cfg->host); 9318 - spin_lock_irq(ioa_cfg->host->host_lock); 9319 } 9320 return; 9321 } else {
··· 3351 return; 3352 } 3353 3354 + if (ioa_cfg->scsi_unblock) { 3355 + ioa_cfg->scsi_unblock = 0; 3356 + ioa_cfg->scsi_blocked = 0; 3357 + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3358 + scsi_unblock_requests(ioa_cfg->host); 3359 + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 3360 + if (ioa_cfg->scsi_blocked) 3361 + scsi_block_requests(ioa_cfg->host); 3362 + } 3363 + 3364 if (!ioa_cfg->scan_enabled) { 3365 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3366 return; ··· 4935 } 4936 if (ipr_is_vset_device(res)) { 4937 sdev->scsi_level = SCSI_SPC_3; 4938 + sdev->no_report_opcodes = 1; 4939 blk_queue_rq_timeout(sdev->request_queue, 4940 IPR_VSET_RW_TIMEOUT); 4941 blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS); ··· 7211 ENTER; 7212 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) { 7213 ipr_trace; 7214 + ioa_cfg->scsi_unblock = 1; 7215 + schedule_work(&ioa_cfg->work_q); 7216 } 7217 7218 ioa_cfg->in_reset_reload = 0; ··· 7287 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); 7288 wake_up_all(&ioa_cfg->reset_wait_q); 7289 7290 + ioa_cfg->scsi_unblock = 1; 7291 schedule_work(&ioa_cfg->work_q); 7292 LEAVE; 7293 return IPR_RC_JOB_RETURN; ··· 9249 spin_unlock(&ioa_cfg->hrrq[i]._lock); 9250 } 9251 wmb(); 9252 + if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) { 9253 + ioa_cfg->scsi_unblock = 0; 9254 + ioa_cfg->scsi_blocked = 1; 9255 scsi_block_requests(ioa_cfg->host); 9256 + } 9257 9258 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); 9259 ioa_cfg->reset_cmd = ipr_cmd; ··· 9306 wake_up_all(&ioa_cfg->reset_wait_q); 9307 9308 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) { 9309 + ioa_cfg->scsi_unblock = 1; 9310 + schedule_work(&ioa_cfg->work_q); 9311 } 9312 return; 9313 } else {
+2
drivers/scsi/ipr.h
··· 1488 u8 cfg_locked:1; 1489 u8 clear_isr:1; 1490 u8 probe_done:1; 1491 1492 u8 revid; 1493
··· 1488 u8 cfg_locked:1; 1489 u8 clear_isr:1; 1490 u8 probe_done:1; 1491 + u8 scsi_unblock:1; 1492 + u8 scsi_blocked:1; 1493 1494 u8 revid; 1495
+1 -1
drivers/scsi/megaraid/megaraid_sas_base.c
··· 6240 fail_start_aen: 6241 fail_io_attach: 6242 megasas_mgmt_info.count--; 6243 - megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = NULL; 6244 megasas_mgmt_info.max_index--; 6245 6246 instance->instancet->disable_intr(instance); 6247 megasas_destroy_irqs(instance);
··· 6240 fail_start_aen: 6241 fail_io_attach: 6242 megasas_mgmt_info.count--; 6243 megasas_mgmt_info.max_index--; 6244 + megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = NULL; 6245 6246 instance->instancet->disable_intr(instance); 6247 megasas_destroy_irqs(instance);
+2 -1
drivers/scsi/qedf/qedf.h
··· 526 #define QEDF_WRITE (1 << 0) 527 #define MAX_FIBRE_LUNS 0xffffffff 528 529 - #define QEDF_MAX_NUM_CQS 8 530 531 /* 532 * PCI function probe defines
··· 526 #define QEDF_WRITE (1 << 0) 527 #define MAX_FIBRE_LUNS 0xffffffff 528 529 + #define MIN_NUM_CPUS_MSIX(x) min_t(u32, x->dev_info.num_cqs, \ 530 + num_online_cpus()) 531 532 /* 533 * PCI function probe defines
+8 -6
drivers/scsi/qedf/qedf_els.c
··· 489 490 /* If a SRR times out, simply free resources */ 491 if (srr_req->event == QEDF_IOREQ_EV_ELS_TMO) 492 - goto out_free; 493 494 /* Normalize response data into struct fc_frame */ 495 mp_req = &(srr_req->mp_req); ··· 501 if (!fp) { 502 QEDF_ERR(&(qedf->dbg_ctx), 503 "fc_frame_alloc failure.\n"); 504 - goto out_free; 505 } 506 507 /* Copy frame header from firmware into fp */ ··· 526 } 527 528 fc_frame_free(fp); 529 - out_free: 530 /* Put reference for original command since SRR completed */ 531 kref_put(&orig_io_req->refcount, qedf_release_cmd); 532 kfree(cb_arg); 533 } 534 ··· 781 782 /* If a REC times out, free resources */ 783 if (rec_req->event == QEDF_IOREQ_EV_ELS_TMO) 784 - goto out_free; 785 786 /* Normalize response data into struct fc_frame */ 787 mp_req = &(rec_req->mp_req); ··· 793 if (!fp) { 794 QEDF_ERR(&(qedf->dbg_ctx), 795 "fc_frame_alloc failure.\n"); 796 - goto out_free; 797 } 798 799 /* Copy frame header from firmware into fp */ ··· 885 886 out_free_frame: 887 fc_frame_free(fp); 888 - out_free: 889 /* Put reference for original command since REC completed */ 890 kref_put(&orig_io_req->refcount, qedf_release_cmd); 891 kfree(cb_arg); 892 } 893
··· 489 490 /* If a SRR times out, simply free resources */ 491 if (srr_req->event == QEDF_IOREQ_EV_ELS_TMO) 492 + goto out_put; 493 494 /* Normalize response data into struct fc_frame */ 495 mp_req = &(srr_req->mp_req); ··· 501 if (!fp) { 502 QEDF_ERR(&(qedf->dbg_ctx), 503 "fc_frame_alloc failure.\n"); 504 + goto out_put; 505 } 506 507 /* Copy frame header from firmware into fp */ ··· 526 } 527 528 fc_frame_free(fp); 529 + out_put: 530 /* Put reference for original command since SRR completed */ 531 kref_put(&orig_io_req->refcount, qedf_release_cmd); 532 + out_free: 533 kfree(cb_arg); 534 } 535 ··· 780 781 /* If a REC times out, free resources */ 782 if (rec_req->event == QEDF_IOREQ_EV_ELS_TMO) 783 + goto out_put; 784 785 /* Normalize response data into struct fc_frame */ 786 mp_req = &(rec_req->mp_req); ··· 792 if (!fp) { 793 QEDF_ERR(&(qedf->dbg_ctx), 794 "fc_frame_alloc failure.\n"); 795 + goto out_put; 796 } 797 798 /* Copy frame header from firmware into fp */ ··· 884 885 out_free_frame: 886 fc_frame_free(fp); 887 + out_put: 888 /* Put reference for original command since REC completed */ 889 kref_put(&orig_io_req->refcount, qedf_release_cmd); 890 + out_free: 891 kfree(cb_arg); 892 } 893
+9 -11
drivers/scsi/qedf/qedf_main.c
··· 2797 * we allocation is the minimum off: 2798 * 2799 * Number of CPUs 2800 - * Number of MSI-X vectors 2801 - * Max number allocated in hardware (QEDF_MAX_NUM_CQS) 2802 */ 2803 - qedf->num_queues = min((unsigned int)QEDF_MAX_NUM_CQS, 2804 - num_online_cpus()); 2805 2806 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Number of CQs is %d.\n", 2807 qedf->num_queues); ··· 2997 goto err1; 2998 } 2999 3000 /* queue allocation code should come here 3001 * order should be 3002 * slowpath_start ··· 3018 goto err2; 3019 } 3020 qed_ops->common->update_pf_params(qedf->cdev, &qedf->pf_params); 3021 - 3022 - /* Learn information crucial for qedf to progress */ 3023 - rc = qed_ops->fill_dev_info(qedf->cdev, &qedf->dev_info); 3024 - if (rc) { 3025 - QEDF_ERR(&(qedf->dbg_ctx), "Failed to dev info.\n"); 3026 - goto err1; 3027 - } 3028 3029 /* Record BDQ producer doorbell addresses */ 3030 qedf->bdq_primary_prod = qedf->dev_info.primary_dbq_rq_addr;
··· 2797 * we allocation is the minimum off: 2798 * 2799 * Number of CPUs 2800 + * Number allocated by qed for our PCI function 2801 */ 2802 + qedf->num_queues = MIN_NUM_CPUS_MSIX(qedf); 2803 2804 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Number of CQs is %d.\n", 2805 qedf->num_queues); ··· 2999 goto err1; 3000 } 3001 3002 + /* Learn information crucial for qedf to progress */ 3003 + rc = qed_ops->fill_dev_info(qedf->cdev, &qedf->dev_info); 3004 + if (rc) { 3005 + QEDF_ERR(&(qedf->dbg_ctx), "Failed to dev info.\n"); 3006 + goto err1; 3007 + } 3008 + 3009 /* queue allocation code should come here 3010 * order should be 3011 * slowpath_start ··· 3013 goto err2; 3014 } 3015 qed_ops->common->update_pf_params(qedf->cdev, &qedf->pf_params); 3016 3017 /* Record BDQ producer doorbell addresses */ 3018 qedf->bdq_primary_prod = qedf->dev_info.primary_dbq_rq_addr;
+1 -1
drivers/scsi/qedi/qedi_main.c
··· 1576 { 1577 struct qedi_cmd *cmd = NULL; 1578 1579 - if (tid > MAX_ISCSI_TASK_ENTRIES) 1580 return NULL; 1581 1582 cmd = qedi->itt_map[tid].p_cmd;
··· 1576 { 1577 struct qedi_cmd *cmd = NULL; 1578 1579 + if (tid >= MAX_ISCSI_TASK_ENTRIES) 1580 return NULL; 1581 1582 cmd = qedi->itt_map[tid].p_cmd;
+4 -4
drivers/scsi/qla2xxx/qla_attr.c
··· 318 return -EINVAL; 319 if (start > ha->optrom_size) 320 return -EINVAL; 321 322 mutex_lock(&ha->optrom_mutex); 323 switch (val) { ··· 345 } 346 347 ha->optrom_region_start = start; 348 - ha->optrom_region_size = start + size > ha->optrom_size ? 349 - ha->optrom_size - start : size; 350 351 ha->optrom_state = QLA_SREADING; 352 ha->optrom_buffer = vmalloc(ha->optrom_region_size); ··· 418 } 419 420 ha->optrom_region_start = start; 421 - ha->optrom_region_size = start + size > ha->optrom_size ? 422 - ha->optrom_size - start : size; 423 424 ha->optrom_state = QLA_SWRITING; 425 ha->optrom_buffer = vmalloc(ha->optrom_region_size);
··· 318 return -EINVAL; 319 if (start > ha->optrom_size) 320 return -EINVAL; 321 + if (size > ha->optrom_size - start) 322 + size = ha->optrom_size - start; 323 324 mutex_lock(&ha->optrom_mutex); 325 switch (val) { ··· 343 } 344 345 ha->optrom_region_start = start; 346 + ha->optrom_region_size = start + size; 347 348 ha->optrom_state = QLA_SREADING; 349 ha->optrom_buffer = vmalloc(ha->optrom_region_size); ··· 417 } 418 419 ha->optrom_region_start = start; 420 + ha->optrom_region_size = start + size; 421 422 ha->optrom_state = QLA_SWRITING; 423 ha->optrom_buffer = vmalloc(ha->optrom_region_size);
-12
drivers/scsi/qla2xxx/qla_tmpl.c
··· 401 for (i = 0; i < vha->hw->max_req_queues; i++) { 402 struct req_que *req = vha->hw->req_q_map[i]; 403 404 - if (!test_bit(i, vha->hw->req_qid_map)) 405 - continue; 406 - 407 if (req || !buf) { 408 length = req ? 409 req->length : REQUEST_ENTRY_CNT_24XX; ··· 414 } else if (ent->t263.queue_type == T263_QUEUE_TYPE_RSP) { 415 for (i = 0; i < vha->hw->max_rsp_queues; i++) { 416 struct rsp_que *rsp = vha->hw->rsp_q_map[i]; 417 - 418 - if (!test_bit(i, vha->hw->rsp_qid_map)) 419 - continue; 420 421 if (rsp || !buf) { 422 length = rsp ? ··· 658 for (i = 0; i < vha->hw->max_req_queues; i++) { 659 struct req_que *req = vha->hw->req_q_map[i]; 660 661 - if (!test_bit(i, vha->hw->req_qid_map)) 662 - continue; 663 - 664 if (req || !buf) { 665 qla27xx_insert16(i, buf, len); 666 qla27xx_insert16(1, buf, len); ··· 669 } else if (ent->t274.queue_type == T274_QUEUE_TYPE_RSP_SHAD) { 670 for (i = 0; i < vha->hw->max_rsp_queues; i++) { 671 struct rsp_que *rsp = vha->hw->rsp_q_map[i]; 672 - 673 - if (!test_bit(i, vha->hw->rsp_qid_map)) 674 - continue; 675 676 if (rsp || !buf) { 677 qla27xx_insert16(i, buf, len);
··· 401 for (i = 0; i < vha->hw->max_req_queues; i++) { 402 struct req_que *req = vha->hw->req_q_map[i]; 403 404 if (req || !buf) { 405 length = req ? 406 req->length : REQUEST_ENTRY_CNT_24XX; ··· 417 } else if (ent->t263.queue_type == T263_QUEUE_TYPE_RSP) { 418 for (i = 0; i < vha->hw->max_rsp_queues; i++) { 419 struct rsp_que *rsp = vha->hw->rsp_q_map[i]; 420 421 if (rsp || !buf) { 422 length = rsp ? ··· 664 for (i = 0; i < vha->hw->max_req_queues; i++) { 665 struct req_que *req = vha->hw->req_q_map[i]; 666 667 if (req || !buf) { 668 qla27xx_insert16(i, buf, len); 669 qla27xx_insert16(1, buf, len); ··· 678 } else if (ent->t274.queue_type == T274_QUEUE_TYPE_RSP_SHAD) { 679 for (i = 0; i < vha->hw->max_rsp_queues; i++) { 680 struct rsp_que *rsp = vha->hw->rsp_q_map[i]; 681 682 if (rsp || !buf) { 683 qla27xx_insert16(i, buf, len);
+4
drivers/scsi/scsi.c
··· 780 module_param(scsi_logging_level, int, S_IRUGO|S_IWUSR); 781 MODULE_PARM_DESC(scsi_logging_level, "a bit mask of logging levels"); 782 783 bool scsi_use_blk_mq = true; 784 module_param_named(use_blk_mq, scsi_use_blk_mq, bool, S_IWUSR | S_IRUGO); 785 786 static int __init init_scsi(void)
··· 780 module_param(scsi_logging_level, int, S_IRUGO|S_IWUSR); 781 MODULE_PARM_DESC(scsi_logging_level, "a bit mask of logging levels"); 782 783 + #ifdef CONFIG_SCSI_MQ_DEFAULT 784 bool scsi_use_blk_mq = true; 785 + #else 786 + bool scsi_use_blk_mq = false; 787 + #endif 788 module_param_named(use_blk_mq, scsi_use_blk_mq, bool, S_IWUSR | S_IRUGO); 789 790 static int __init init_scsi(void)
+3
drivers/scsi/sd.c
··· 1277 { 1278 struct request *rq = SCpnt->request; 1279 1280 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) 1281 __free_page(rq->special_vec.bv_page); 1282
··· 1277 { 1278 struct request *rq = SCpnt->request; 1279 1280 + if (SCpnt->flags & SCMD_ZONE_WRITE_LOCK) 1281 + sd_zbc_write_unlock_zone(SCpnt); 1282 + 1283 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) 1284 __free_page(rq->special_vec.bv_page); 1285
+5 -4
drivers/scsi/sd_zbc.c
··· 294 test_and_set_bit(zno, sdkp->zones_wlock)) 295 return BLKPREP_DEFER; 296 297 return BLKPREP_OK; 298 } 299 ··· 305 struct request *rq = cmd->request; 306 struct scsi_disk *sdkp = scsi_disk(rq->rq_disk); 307 308 - if (sdkp->zones_wlock) { 309 unsigned int zno = sd_zbc_zone_no(sdkp, blk_rq_pos(rq)); 310 WARN_ON_ONCE(!test_bit(zno, sdkp->zones_wlock)); 311 clear_bit_unlock(zno, sdkp->zones_wlock); 312 smp_mb__after_atomic(); 313 } ··· 338 case REQ_OP_WRITE: 339 case REQ_OP_WRITE_ZEROES: 340 case REQ_OP_WRITE_SAME: 341 - 342 - /* Unlock the zone */ 343 - sd_zbc_write_unlock_zone(cmd); 344 345 if (result && 346 sshdr->sense_key == ILLEGAL_REQUEST &&
··· 294 test_and_set_bit(zno, sdkp->zones_wlock)) 295 return BLKPREP_DEFER; 296 297 + WARN_ON_ONCE(cmd->flags & SCMD_ZONE_WRITE_LOCK); 298 + cmd->flags |= SCMD_ZONE_WRITE_LOCK; 299 + 300 return BLKPREP_OK; 301 } 302 ··· 302 struct request *rq = cmd->request; 303 struct scsi_disk *sdkp = scsi_disk(rq->rq_disk); 304 305 + if (sdkp->zones_wlock && cmd->flags & SCMD_ZONE_WRITE_LOCK) { 306 unsigned int zno = sd_zbc_zone_no(sdkp, blk_rq_pos(rq)); 307 WARN_ON_ONCE(!test_bit(zno, sdkp->zones_wlock)); 308 + cmd->flags &= ~SCMD_ZONE_WRITE_LOCK; 309 clear_bit_unlock(zno, sdkp->zones_wlock); 310 smp_mb__after_atomic(); 311 } ··· 334 case REQ_OP_WRITE: 335 case REQ_OP_WRITE_ZEROES: 336 case REQ_OP_WRITE_SAME: 337 338 if (result && 339 sshdr->sense_key == ILLEGAL_REQUEST &&
+1 -1
drivers/scsi/ses.c
··· 106 107 ret = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buf, bufflen, 108 NULL, SES_TIMEOUT, SES_RETRIES, NULL); 109 - if (unlikely(!ret)) 110 return ret; 111 112 recv_page_code = ((unsigned char *)buf)[0];
··· 106 107 ret = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buf, bufflen, 108 NULL, SES_TIMEOUT, SES_RETRIES, NULL); 109 + if (unlikely(ret)) 110 return ret; 111 112 recv_page_code = ((unsigned char *)buf)[0];
+2 -31
drivers/scsi/sg.c
··· 751 return count; 752 } 753 754 - static bool sg_is_valid_dxfer(sg_io_hdr_t *hp) 755 - { 756 - switch (hp->dxfer_direction) { 757 - case SG_DXFER_NONE: 758 - if (hp->dxferp || hp->dxfer_len > 0) 759 - return false; 760 - return true; 761 - case SG_DXFER_FROM_DEV: 762 - /* 763 - * for SG_DXFER_FROM_DEV we always set dxfer_len to > 0. dxferp 764 - * can either be NULL or != NULL so there's no point in checking 765 - * it either. So just return true. 766 - */ 767 - return true; 768 - case SG_DXFER_TO_DEV: 769 - case SG_DXFER_TO_FROM_DEV: 770 - if (!hp->dxferp || hp->dxfer_len == 0) 771 - return false; 772 - return true; 773 - case SG_DXFER_UNKNOWN: 774 - if ((!hp->dxferp && hp->dxfer_len) || 775 - (hp->dxferp && hp->dxfer_len == 0)) 776 - return false; 777 - return true; 778 - default: 779 - return false; 780 - } 781 - } 782 - 783 static int 784 sg_common_write(Sg_fd * sfp, Sg_request * srp, 785 unsigned char *cmnd, int timeout, int blocking) ··· 771 "sg_common_write: scsi opcode=0x%02x, cmd_size=%d\n", 772 (int) cmnd[0], (int) hp->cmd_len)); 773 774 - if (!sg_is_valid_dxfer(hp)) 775 return -EINVAL; 776 777 k = sg_start_req(srp, cmnd); ··· 1021 read_lock_irqsave(&sfp->rq_list_lock, iflags); 1022 val = 0; 1023 list_for_each_entry(srp, &sfp->rq_list, entry) { 1024 - if (val > SG_MAX_QUEUE) 1025 break; 1026 memset(&rinfo[val], 0, SZ_SG_REQ_INFO); 1027 rinfo[val].req_state = srp->done + 1;
··· 751 return count; 752 } 753 754 static int 755 sg_common_write(Sg_fd * sfp, Sg_request * srp, 756 unsigned char *cmnd, int timeout, int blocking) ··· 800 "sg_common_write: scsi opcode=0x%02x, cmd_size=%d\n", 801 (int) cmnd[0], (int) hp->cmd_len)); 802 803 + if (hp->dxfer_len >= SZ_256M) 804 return -EINVAL; 805 806 k = sg_start_req(srp, cmnd); ··· 1050 read_lock_irqsave(&sfp->rq_list_lock, iflags); 1051 val = 0; 1052 list_for_each_entry(srp, &sfp->rq_list, entry) { 1053 + if (val >= SG_MAX_QUEUE) 1054 break; 1055 memset(&rinfo[val], 0, SZ_SG_REQ_INFO); 1056 rinfo[val].req_state = srp->done + 1;
+2 -2
drivers/scsi/st.c
··· 4299 kref_init(&tpnt->kref); 4300 tpnt->disk = disk; 4301 disk->private_data = &tpnt->driver; 4302 - disk->queue = SDp->request_queue; 4303 /* SCSI tape doesn't register this gendisk via add_disk(). Manually 4304 * take queue reference that release_disk() expects. */ 4305 - if (!blk_get_queue(disk->queue)) 4306 goto out_put_disk; 4307 tpnt->driver = &st_template; 4308 4309 tpnt->device = SDp;
··· 4299 kref_init(&tpnt->kref); 4300 tpnt->disk = disk; 4301 disk->private_data = &tpnt->driver; 4302 /* SCSI tape doesn't register this gendisk via add_disk(). Manually 4303 * take queue reference that release_disk() expects. */ 4304 + if (!blk_get_queue(SDp->request_queue)) 4305 goto out_put_disk; 4306 + disk->queue = SDp->request_queue; 4307 tpnt->driver = &st_template; 4308 4309 tpnt->device = SDp;
+2
drivers/scsi/storvsc_drv.c
··· 1640 put_cpu(); 1641 1642 if (ret == -EAGAIN) { 1643 /* no more space */ 1644 return SCSI_MLQUEUE_DEVICE_BUSY; 1645 }
··· 1640 put_cpu(); 1641 1642 if (ret == -EAGAIN) { 1643 + if (payload_sz > sizeof(cmd_request->mpb)) 1644 + kfree(payload); 1645 /* no more space */ 1646 return SCSI_MLQUEUE_DEVICE_BUSY; 1647 }
-2
include/linux/cpuhotplug.h
··· 39 CPUHP_PCI_XGENE_DEAD, 40 CPUHP_IOMMU_INTEL_DEAD, 41 CPUHP_LUSTRE_CFS_DEAD, 42 - CPUHP_SCSI_BNX2FC_DEAD, 43 - CPUHP_SCSI_BNX2I_DEAD, 44 CPUHP_WORKQUEUE_PREP, 45 CPUHP_POWER_NUMA_PREPARE, 46 CPUHP_HRTIMERS_PREPARE,
··· 39 CPUHP_PCI_XGENE_DEAD, 40 CPUHP_IOMMU_INTEL_DEAD, 41 CPUHP_LUSTRE_CFS_DEAD, 42 CPUHP_WORKQUEUE_PREP, 43 CPUHP_POWER_NUMA_PREPARE, 44 CPUHP_HRTIMERS_PREPARE,
+1
include/scsi/scsi_cmnd.h
··· 57 /* for scmd->flags */ 58 #define SCMD_TAGGED (1 << 0) 59 #define SCMD_UNCHECKED_ISA_DMA (1 << 1) 60 #define SCMD_INITIALIZED (1 << 3) 61 /* flags preserved across unprep / reprep */ 62 #define SCMD_PRESERVED_FLAGS (SCMD_UNCHECKED_ISA_DMA | SCMD_INITIALIZED)
··· 57 /* for scmd->flags */ 58 #define SCMD_TAGGED (1 << 0) 59 #define SCMD_UNCHECKED_ISA_DMA (1 << 1) 60 + #define SCMD_ZONE_WRITE_LOCK (1 << 2) 61 #define SCMD_INITIALIZED (1 << 3) 62 /* flags preserved across unprep / reprep */ 63 #define SCMD_PRESERVED_FLAGS (SCMD_UNCHECKED_ISA_DMA | SCMD_INITIALIZED)