Merge branch 'fixes' into misc

+178 -258
+11
drivers/scsi/Kconfig
··· 47 47 default n 48 48 depends on NET 49 49 50 + config SCSI_MQ_DEFAULT 51 + bool "SCSI: use blk-mq I/O path by default" 52 + depends on SCSI 53 + ---help--- 54 + This option enables the new blk-mq based I/O path for SCSI 55 + devices by default. With the option the scsi_mod.use_blk_mq 56 + module/boot option defaults to Y, without it to N, but it can 57 + still be overridden either way. 58 + 59 + If unsure say N. 60 + 50 61 config SCSI_PROC_FS 51 62 bool "legacy /proc/scsi/ support" 52 63 depends on SCSI && PROC_FS
+32 -38
drivers/scsi/aacraid/aachba.c
··· 549 549 if ((le32_to_cpu(get_name_reply->status) == CT_OK) 550 550 && (get_name_reply->data[0] != '\0')) { 551 551 char *sp = get_name_reply->data; 552 - sp[sizeof(((struct aac_get_name_resp *)NULL)->data)] = '\0'; 552 + int data_size = FIELD_SIZEOF(struct aac_get_name_resp, data); 553 + 554 + sp[data_size - 1] = '\0'; 553 555 while (*sp == ' ') 554 556 ++sp; 555 557 if (*sp) { ··· 581 579 static int aac_get_container_name(struct scsi_cmnd * scsicmd) 582 580 { 583 581 int status; 582 + int data_size; 584 583 struct aac_get_name *dinfo; 585 584 struct fib * cmd_fibcontext; 586 585 struct aac_dev * dev; 587 586 588 587 dev = (struct aac_dev *)scsicmd->device->host->hostdata; 589 588 589 + data_size = FIELD_SIZEOF(struct aac_get_name_resp, data); 590 + 590 591 cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd); 591 592 592 593 aac_fib_init(cmd_fibcontext); 593 594 dinfo = (struct aac_get_name *) fib_data(cmd_fibcontext); 595 + scsicmd->SCp.phase = AAC_OWNER_FIRMWARE; 594 596 595 597 dinfo->command = cpu_to_le32(VM_ContainerConfig); 596 598 dinfo->type = cpu_to_le32(CT_READ_NAME); 597 599 dinfo->cid = cpu_to_le32(scmd_id(scsicmd)); 598 - dinfo->count = cpu_to_le32(sizeof(((struct aac_get_name_resp *)NULL)->data)); 600 + dinfo->count = cpu_to_le32(data_size - 1); 599 601 600 602 status = aac_fib_send(ContainerCommand, 601 603 cmd_fibcontext, ··· 612 606 /* 613 607 * Check that the command queued to the controller 614 608 */ 615 - if (status == -EINPROGRESS) { 616 - scsicmd->SCp.phase = AAC_OWNER_FIRMWARE; 609 + if (status == -EINPROGRESS) 617 610 return 0; 618 - } 619 611 620 612 printk(KERN_WARNING "aac_get_container_name: aac_fib_send failed with status: %d.\n", status); 621 613 aac_fib_complete(cmd_fibcontext); ··· 724 720 725 721 dinfo->count = cpu_to_le32(scmd_id(scsicmd)); 726 722 dinfo->type = cpu_to_le32(FT_FILESYS); 723 + scsicmd->SCp.phase = AAC_OWNER_FIRMWARE; 727 724 728 725 status = aac_fib_send(ContainerCommand, 729 726 fibptr, ··· 736 731 /* 737 732 * Check that the command queued to the controller 738 733 */ 739 - if (status == -EINPROGRESS) 740 - scsicmd->SCp.phase = AAC_OWNER_FIRMWARE; 741 - else if (status < 0) { 734 + if (status < 0 && status != -EINPROGRESS) { 742 735 /* Inherit results from VM_NameServe, if any */ 743 736 dresp->status = cpu_to_le32(ST_OK); 744 737 _aac_probe_container2(context, fibptr); ··· 764 761 dinfo->count = cpu_to_le32(scmd_id(scsicmd)); 765 762 dinfo->type = cpu_to_le32(FT_FILESYS); 766 763 scsicmd->SCp.ptr = (char *)callback; 764 + scsicmd->SCp.phase = AAC_OWNER_FIRMWARE; 767 765 768 766 status = aac_fib_send(ContainerCommand, 769 767 fibptr, ··· 776 772 /* 777 773 * Check that the command queued to the controller 778 774 */ 779 - if (status == -EINPROGRESS) { 780 - scsicmd->SCp.phase = AAC_OWNER_FIRMWARE; 775 + if (status == -EINPROGRESS) 781 776 return 0; 782 - } 777 + 783 778 if (status < 0) { 784 779 scsicmd->SCp.ptr = NULL; 785 780 aac_fib_complete(fibptr); ··· 1124 1121 dinfo->command = cpu_to_le32(VM_ContainerConfig); 1125 1122 dinfo->type = cpu_to_le32(CT_CID_TO_32BITS_UID); 1126 1123 dinfo->cid = cpu_to_le32(scmd_id(scsicmd)); 1124 + scsicmd->SCp.phase = AAC_OWNER_FIRMWARE; 1127 1125 1128 1126 status = aac_fib_send(ContainerCommand, 1129 1127 cmd_fibcontext, ··· 1137 1133 /* 1138 1134 * Check that the command queued to the controller 1139 1135 */ 1140 - if (status == -EINPROGRESS) { 1141 - scsicmd->SCp.phase = AAC_OWNER_FIRMWARE; 1136 + if (status == -EINPROGRESS) 1142 1137 return 0; 1143 - } 1144 1138 1145 1139 printk(KERN_WARNING "aac_get_container_serial: aac_fib_send failed with status: %d.\n", status); 1146 1140 aac_fib_complete(cmd_fibcontext); ··· 2332 2330 * Alocate and initialize a Fib 2333 2331 */ 2334 2332 cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd); 2335 - 2333 + scsicmd->SCp.phase = AAC_OWNER_FIRMWARE; 2336 2334 status = aac_adapter_read(cmd_fibcontext, scsicmd, lba, count); 2337 2335 2338 2336 /* 2339 2337 * Check that the command queued to the controller 2340 2338 */ 2341 - if (status == -EINPROGRESS) { 2342 - scsicmd->SCp.phase = AAC_OWNER_FIRMWARE; 2339 + if (status == -EINPROGRESS) 2343 2340 return 0; 2344 - } 2345 2341 2346 2342 printk(KERN_WARNING "aac_read: aac_fib_send failed with status: %d.\n", status); 2347 2343 /* ··· 2424 2424 * Allocate and initialize a Fib then setup a BlockWrite command 2425 2425 */ 2426 2426 cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd); 2427 - 2427 + scsicmd->SCp.phase = AAC_OWNER_FIRMWARE; 2428 2428 status = aac_adapter_write(cmd_fibcontext, scsicmd, lba, count, fua); 2429 2429 2430 2430 /* 2431 2431 * Check that the command queued to the controller 2432 2432 */ 2433 - if (status == -EINPROGRESS) { 2434 - scsicmd->SCp.phase = AAC_OWNER_FIRMWARE; 2433 + if (status == -EINPROGRESS) 2435 2434 return 0; 2436 - } 2437 2435 2438 2436 printk(KERN_WARNING "aac_write: aac_fib_send failed with status: %d\n", status); 2439 2437 /* ··· 2581 2583 synchronizecmd->cid = cpu_to_le32(scmd_id(scsicmd)); 2582 2584 synchronizecmd->count = 2583 2585 cpu_to_le32(sizeof(((struct aac_synchronize_reply *)NULL)->data)); 2586 + scsicmd->SCp.phase = AAC_OWNER_FIRMWARE; 2584 2587 2585 2588 /* 2586 2589 * Now send the Fib to the adapter ··· 2597 2598 /* 2598 2599 * Check that the command queued to the controller 2599 2600 */ 2600 - if (status == -EINPROGRESS) { 2601 - scsicmd->SCp.phase = AAC_OWNER_FIRMWARE; 2601 + if (status == -EINPROGRESS) 2602 2602 return 0; 2603 - } 2604 2603 2605 2604 printk(KERN_WARNING 2606 2605 "aac_synchronize: aac_fib_send failed with status: %d.\n", status); ··· 2658 2661 pmcmd->cid = cpu_to_le32(sdev_id(sdev)); 2659 2662 pmcmd->parm = (scsicmd->cmnd[1] & 1) ? 2660 2663 cpu_to_le32(CT_PM_UNIT_IMMEDIATE) : 0; 2664 + scsicmd->SCp.phase = AAC_OWNER_FIRMWARE; 2661 2665 2662 2666 /* 2663 2667 * Now send the Fib to the adapter ··· 2674 2676 /* 2675 2677 * Check that the command queued to the controller 2676 2678 */ 2677 - if (status == -EINPROGRESS) { 2678 - scsicmd->SCp.phase = AAC_OWNER_FIRMWARE; 2679 + if (status == -EINPROGRESS) 2679 2680 return 0; 2680 - } 2681 2681 2682 2682 aac_fib_complete(cmd_fibcontext); 2683 2683 aac_fib_free(cmd_fibcontext); ··· 3194 3198 return -EBUSY; 3195 3199 if (copy_from_user(&qd, arg, sizeof (struct aac_query_disk))) 3196 3200 return -EFAULT; 3197 - if (qd.cnum == -1) 3201 + if (qd.cnum == -1) { 3202 + if (qd.id < 0 || qd.id >= dev->maximum_num_containers) 3203 + return -EINVAL; 3198 3204 qd.cnum = qd.id; 3199 - else if ((qd.bus == -1) && (qd.id == -1) && (qd.lun == -1)) 3200 - { 3205 + } else if ((qd.bus == -1) && (qd.id == -1) && (qd.lun == -1)) { 3201 3206 if (qd.cnum < 0 || qd.cnum >= dev->maximum_num_containers) 3202 3207 return -EINVAL; 3203 3208 qd.instance = dev->scsi_host_ptr->host_no; ··· 3683 3686 * Allocate and initialize a Fib then setup a BlockWrite command 3684 3687 */ 3685 3688 cmd_fibcontext = aac_fib_alloc_tag(dev, scsicmd); 3686 - 3689 + scsicmd->SCp.phase = AAC_OWNER_FIRMWARE; 3687 3690 status = aac_adapter_scsi(cmd_fibcontext, scsicmd); 3688 3691 3689 3692 /* 3690 3693 * Check that the command queued to the controller 3691 3694 */ 3692 - if (status == -EINPROGRESS) { 3693 - scsicmd->SCp.phase = AAC_OWNER_FIRMWARE; 3695 + if (status == -EINPROGRESS) 3694 3696 return 0; 3695 - } 3696 3697 3697 3698 printk(KERN_WARNING "aac_srb: aac_fib_send failed with status: %d\n", status); 3698 3699 aac_fib_complete(cmd_fibcontext); ··· 3728 3733 if (!cmd_fibcontext) 3729 3734 return -1; 3730 3735 3736 + scsicmd->SCp.phase = AAC_OWNER_FIRMWARE; 3731 3737 status = aac_adapter_hba(cmd_fibcontext, scsicmd); 3732 3738 3733 3739 /* 3734 3740 * Check that the command queued to the controller 3735 3741 */ 3736 - if (status == -EINPROGRESS) { 3737 - scsicmd->SCp.phase = AAC_OWNER_FIRMWARE; 3742 + if (status == -EINPROGRESS) 3738 3743 return 0; 3739 - } 3740 3744 3741 3745 pr_warn("aac_hba_cmd_req: aac_fib_send failed with status: %d\n", 3742 3746 status);
+1 -1
drivers/scsi/aacraid/aacraid.h
··· 2275 2275 __le32 parm3; 2276 2276 __le32 parm4; 2277 2277 __le32 parm5; 2278 - u8 data[16]; 2278 + u8 data[17]; 2279 2279 }; 2280 2280 2281 2281 #define CT_CID_TO_32BITS_UID 165
+15 -53
drivers/scsi/bnx2fc/bnx2fc_fcoe.c
··· 2624 2624 }; 2625 2625 2626 2626 /** 2627 - * bnx2fc_percpu_thread_create - Create a receive thread for an 2628 - * online CPU 2627 + * bnx2fc_cpu_online - Create a receive thread for an online CPU 2629 2628 * 2630 2629 * @cpu: cpu index for the online cpu 2631 2630 */ 2632 - static void bnx2fc_percpu_thread_create(unsigned int cpu) 2631 + static int bnx2fc_cpu_online(unsigned int cpu) 2633 2632 { 2634 2633 struct bnx2fc_percpu_s *p; 2635 2634 struct task_struct *thread; ··· 2638 2639 thread = kthread_create_on_node(bnx2fc_percpu_io_thread, 2639 2640 (void *)p, cpu_to_node(cpu), 2640 2641 "bnx2fc_thread/%d", cpu); 2642 + if (IS_ERR(thread)) 2643 + return PTR_ERR(thread); 2644 + 2641 2645 /* bind thread to the cpu */ 2642 - if (likely(!IS_ERR(thread))) { 2643 - kthread_bind(thread, cpu); 2644 - p->iothread = thread; 2645 - wake_up_process(thread); 2646 - } 2646 + kthread_bind(thread, cpu); 2647 + p->iothread = thread; 2648 + wake_up_process(thread); 2649 + return 0; 2647 2650 } 2648 2651 2649 - static void bnx2fc_percpu_thread_destroy(unsigned int cpu) 2652 + static int bnx2fc_cpu_offline(unsigned int cpu) 2650 2653 { 2651 2654 struct bnx2fc_percpu_s *p; 2652 2655 struct task_struct *thread; ··· 2662 2661 thread = p->iothread; 2663 2662 p->iothread = NULL; 2664 2663 2665 - 2666 2664 /* Free all work in the list */ 2667 2665 list_for_each_entry_safe(work, tmp, &p->work_list, list) { 2668 2666 list_del_init(&work->list); ··· 2673 2673 2674 2674 if (thread) 2675 2675 kthread_stop(thread); 2676 - } 2677 - 2678 - 2679 - static int bnx2fc_cpu_online(unsigned int cpu) 2680 - { 2681 - printk(PFX "CPU %x online: Create Rx thread\n", cpu); 2682 - bnx2fc_percpu_thread_create(cpu); 2683 - return 0; 2684 - } 2685 - 2686 - static int bnx2fc_cpu_dead(unsigned int cpu) 2687 - { 2688 - printk(PFX "CPU %x offline: Remove Rx thread\n", cpu); 2689 - bnx2fc_percpu_thread_destroy(cpu); 2690 2676 return 0; 2691 2677 } 2692 2678 ··· 2747 2761 spin_lock_init(&p->fp_work_lock); 2748 2762 } 2749 2763 2750 - get_online_cpus(); 2751 - 2752 - for_each_online_cpu(cpu) 2753 - bnx2fc_percpu_thread_create(cpu); 2754 - 2755 - rc = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, 2756 - "scsi/bnx2fc:online", 2757 - bnx2fc_cpu_online, NULL); 2764 + rc = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "scsi/bnx2fc:online", 2765 + bnx2fc_cpu_online, bnx2fc_cpu_offline); 2758 2766 if (rc < 0) 2759 - goto stop_threads; 2767 + goto stop_thread; 2760 2768 bnx2fc_online_state = rc; 2761 2769 2762 - cpuhp_setup_state_nocalls(CPUHP_SCSI_BNX2FC_DEAD, "scsi/bnx2fc:dead", 2763 - NULL, bnx2fc_cpu_dead); 2764 - put_online_cpus(); 2765 - 2766 2770 cnic_register_driver(CNIC_ULP_FCOE, &bnx2fc_cnic_cb); 2767 - 2768 2771 return 0; 2769 2772 2770 - stop_threads: 2771 - for_each_online_cpu(cpu) 2772 - bnx2fc_percpu_thread_destroy(cpu); 2773 - put_online_cpus(); 2773 + stop_thread: 2774 2774 kthread_stop(l2_thread); 2775 2775 free_wq: 2776 2776 destroy_workqueue(bnx2fc_wq); ··· 2775 2803 struct fcoe_percpu_s *bg; 2776 2804 struct task_struct *l2_thread; 2777 2805 struct sk_buff *skb; 2778 - unsigned int cpu = 0; 2779 2806 2780 2807 /* 2781 2808 * NOTE: Since cnic calls register_driver routine rtnl_lock, ··· 2815 2844 if (l2_thread) 2816 2845 kthread_stop(l2_thread); 2817 2846 2818 - get_online_cpus(); 2819 - /* Destroy per cpu threads */ 2820 - for_each_online_cpu(cpu) { 2821 - bnx2fc_percpu_thread_destroy(cpu); 2822 - } 2823 - 2824 - cpuhp_remove_state_nocalls(bnx2fc_online_state); 2825 - cpuhp_remove_state_nocalls(CPUHP_SCSI_BNX2FC_DEAD); 2826 - 2827 - put_online_cpus(); 2847 + cpuhp_remove_state(bnx2fc_online_state); 2828 2848 2829 2849 destroy_workqueue(bnx2fc_wq); 2830 2850 /*
+23 -22
drivers/scsi/bnx2fc/bnx2fc_hwi.c
··· 1008 1008 return work; 1009 1009 } 1010 1010 1011 + /* Pending work request completion */ 1012 + static void bnx2fc_pending_work(struct bnx2fc_rport *tgt, unsigned int wqe) 1013 + { 1014 + unsigned int cpu = wqe % num_possible_cpus(); 1015 + struct bnx2fc_percpu_s *fps; 1016 + struct bnx2fc_work *work; 1017 + 1018 + fps = &per_cpu(bnx2fc_percpu, cpu); 1019 + spin_lock_bh(&fps->fp_work_lock); 1020 + if (fps->iothread) { 1021 + work = bnx2fc_alloc_work(tgt, wqe); 1022 + if (work) { 1023 + list_add_tail(&work->list, &fps->work_list); 1024 + wake_up_process(fps->iothread); 1025 + spin_unlock_bh(&fps->fp_work_lock); 1026 + return; 1027 + } 1028 + } 1029 + spin_unlock_bh(&fps->fp_work_lock); 1030 + bnx2fc_process_cq_compl(tgt, wqe); 1031 + } 1032 + 1011 1033 int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt) 1012 1034 { 1013 1035 struct fcoe_cqe *cq; ··· 1064 1042 /* Unsolicited event notification */ 1065 1043 bnx2fc_process_unsol_compl(tgt, wqe); 1066 1044 } else { 1067 - /* Pending work request completion */ 1068 - struct bnx2fc_work *work = NULL; 1069 - struct bnx2fc_percpu_s *fps = NULL; 1070 - unsigned int cpu = wqe % num_possible_cpus(); 1071 - 1072 - fps = &per_cpu(bnx2fc_percpu, cpu); 1073 - spin_lock_bh(&fps->fp_work_lock); 1074 - if (unlikely(!fps->iothread)) 1075 - goto unlock; 1076 - 1077 - work = bnx2fc_alloc_work(tgt, wqe); 1078 - if (work) 1079 - list_add_tail(&work->list, 1080 - &fps->work_list); 1081 - unlock: 1082 - spin_unlock_bh(&fps->fp_work_lock); 1083 - 1084 - /* Pending work request completion */ 1085 - if (fps->iothread && work) 1086 - wake_up_process(fps->iothread); 1087 - else 1088 - bnx2fc_process_cq_compl(tgt, wqe); 1045 + bnx2fc_pending_work(tgt, wqe); 1089 1046 num_free_sqes++; 1090 1047 } 1091 1048 cqe++;
+15 -49
drivers/scsi/bnx2i/bnx2i_init.c
··· 404 404 405 405 406 406 /** 407 - * bnx2i_percpu_thread_create - Create a receive thread for an 408 - * online CPU 407 + * bnx2i_cpu_online - Create a receive thread for an online CPU 409 408 * 410 409 * @cpu: cpu index for the online cpu 411 410 */ 412 - static void bnx2i_percpu_thread_create(unsigned int cpu) 411 + static int bnx2i_cpu_online(unsigned int cpu) 413 412 { 414 413 struct bnx2i_percpu_s *p; 415 414 struct task_struct *thread; ··· 418 419 thread = kthread_create_on_node(bnx2i_percpu_io_thread, (void *)p, 419 420 cpu_to_node(cpu), 420 421 "bnx2i_thread/%d", cpu); 422 + if (IS_ERR(thread)) 423 + return PTR_ERR(thread); 424 + 421 425 /* bind thread to the cpu */ 422 - if (likely(!IS_ERR(thread))) { 423 - kthread_bind(thread, cpu); 424 - p->iothread = thread; 425 - wake_up_process(thread); 426 - } 426 + kthread_bind(thread, cpu); 427 + p->iothread = thread; 428 + wake_up_process(thread); 429 + return 0; 427 430 } 428 431 429 - 430 - static void bnx2i_percpu_thread_destroy(unsigned int cpu) 432 + static int bnx2i_cpu_offline(unsigned int cpu) 431 433 { 432 434 struct bnx2i_percpu_s *p; 433 435 struct task_struct *thread; ··· 451 451 spin_unlock_bh(&p->p_work_lock); 452 452 if (thread) 453 453 kthread_stop(thread); 454 - } 455 - 456 - static int bnx2i_cpu_online(unsigned int cpu) 457 - { 458 - pr_info("bnx2i: CPU %x online: Create Rx thread\n", cpu); 459 - bnx2i_percpu_thread_create(cpu); 460 - return 0; 461 - } 462 - 463 - static int bnx2i_cpu_dead(unsigned int cpu) 464 - { 465 - pr_info("CPU %x offline: Remove Rx thread\n", cpu); 466 - bnx2i_percpu_thread_destroy(cpu); 467 454 return 0; 468 455 } 469 456 ··· 498 511 p->iothread = NULL; 499 512 } 500 513 501 - get_online_cpus(); 502 - 503 - for_each_online_cpu(cpu) 504 - bnx2i_percpu_thread_create(cpu); 505 - 506 - err = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, 507 - "scsi/bnx2i:online", 508 - bnx2i_cpu_online, NULL); 514 + err = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "scsi/bnx2i:online", 515 + bnx2i_cpu_online, bnx2i_cpu_offline); 509 516 if (err < 0) 510 - goto remove_threads; 517 + goto unreg_driver; 511 518 bnx2i_online_state = err; 512 - 513 - cpuhp_setup_state_nocalls(CPUHP_SCSI_BNX2I_DEAD, "scsi/bnx2i:dead", 514 - NULL, bnx2i_cpu_dead); 515 - put_online_cpus(); 516 519 return 0; 517 520 518 - remove_threads: 519 - for_each_online_cpu(cpu) 520 - bnx2i_percpu_thread_destroy(cpu); 521 - put_online_cpus(); 521 + unreg_driver: 522 522 cnic_unregister_driver(CNIC_ULP_ISCSI); 523 523 unreg_xport: 524 524 iscsi_unregister_transport(&bnx2i_iscsi_transport); ··· 525 551 static void __exit bnx2i_mod_exit(void) 526 552 { 527 553 struct bnx2i_hba *hba; 528 - unsigned cpu = 0; 529 554 530 555 mutex_lock(&bnx2i_dev_lock); 531 556 while (!list_empty(&adapter_list)) { ··· 542 569 } 543 570 mutex_unlock(&bnx2i_dev_lock); 544 571 545 - get_online_cpus(); 546 - 547 - for_each_online_cpu(cpu) 548 - bnx2i_percpu_thread_destroy(cpu); 549 - 550 - cpuhp_remove_state_nocalls(bnx2i_online_state); 551 - cpuhp_remove_state_nocalls(CPUHP_SCSI_BNX2I_DEAD); 552 - put_online_cpus(); 572 + cpuhp_remove_state(bnx2i_online_state); 553 573 554 574 iscsi_unregister_transport(&bnx2i_iscsi_transport); 555 575 cnic_unregister_driver(CNIC_ULP_ISCSI);
+3 -1
drivers/scsi/csiostor/csio_hw.c
··· 3845 3845 3846 3846 if (csio_is_hw_ready(hw)) 3847 3847 return 0; 3848 - else 3848 + else if (csio_match_state(hw, csio_hws_uninit)) 3849 3849 return -EINVAL; 3850 + else 3851 + return -ENODEV; 3850 3852 } 3851 3853 3852 3854 int
+8 -4
drivers/scsi/csiostor/csio_init.c
··· 970 970 971 971 pci_set_drvdata(pdev, hw); 972 972 973 - if (csio_hw_start(hw) != 0) { 974 - dev_err(&pdev->dev, 975 - "Failed to start FW, continuing in debug mode.\n"); 976 - return 0; 973 + rv = csio_hw_start(hw); 974 + if (rv) { 975 + if (rv == -EINVAL) { 976 + dev_err(&pdev->dev, 977 + "Failed to start FW, continuing in debug mode.\n"); 978 + return 0; 979 + } 980 + goto err_lnode_exit; 977 981 } 978 982 979 983 sprintf(hw->fwrev_str, "%u.%u.%u.%u\n",
+3
drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
··· 1635 1635 goto rel_resource; 1636 1636 } 1637 1637 1638 + if (!(n->nud_state & NUD_VALID)) 1639 + neigh_event_send(n, NULL); 1640 + 1638 1641 csk->atid = cxgb4_alloc_atid(lldi->tids, csk); 1639 1642 if (csk->atid < 0) { 1640 1643 pr_err("%s, NO atid available.\n", ndev->name);
+20 -14
drivers/scsi/ipr.c
··· 3351 3351 return; 3352 3352 } 3353 3353 3354 + if (ioa_cfg->scsi_unblock) { 3355 + ioa_cfg->scsi_unblock = 0; 3356 + ioa_cfg->scsi_blocked = 0; 3357 + spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3358 + scsi_unblock_requests(ioa_cfg->host); 3359 + spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); 3360 + if (ioa_cfg->scsi_blocked) 3361 + scsi_block_requests(ioa_cfg->host); 3362 + } 3363 + 3354 3364 if (!ioa_cfg->scan_enabled) { 3355 3365 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); 3356 3366 return; ··· 4945 4935 } 4946 4936 if (ipr_is_vset_device(res)) { 4947 4937 sdev->scsi_level = SCSI_SPC_3; 4938 + sdev->no_report_opcodes = 1; 4948 4939 blk_queue_rq_timeout(sdev->request_queue, 4949 4940 IPR_VSET_RW_TIMEOUT); 4950 4941 blk_queue_max_hw_sectors(sdev->request_queue, IPR_VSET_MAX_SECTORS); ··· 7222 7211 ENTER; 7223 7212 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) { 7224 7213 ipr_trace; 7225 - spin_unlock_irq(ioa_cfg->host->host_lock); 7226 - scsi_unblock_requests(ioa_cfg->host); 7227 - spin_lock_irq(ioa_cfg->host->host_lock); 7214 + ioa_cfg->scsi_unblock = 1; 7215 + schedule_work(&ioa_cfg->work_q); 7228 7216 } 7229 7217 7230 7218 ioa_cfg->in_reset_reload = 0; ··· 7297 7287 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); 7298 7288 wake_up_all(&ioa_cfg->reset_wait_q); 7299 7289 7300 - spin_unlock(ioa_cfg->host->host_lock); 7301 - scsi_unblock_requests(ioa_cfg->host); 7302 - spin_lock(ioa_cfg->host->host_lock); 7303 - 7304 - if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].allow_cmds) 7305 - scsi_block_requests(ioa_cfg->host); 7306 - 7290 + ioa_cfg->scsi_unblock = 1; 7307 7291 schedule_work(&ioa_cfg->work_q); 7308 7292 LEAVE; 7309 7293 return IPR_RC_JOB_RETURN; ··· 9253 9249 spin_unlock(&ioa_cfg->hrrq[i]._lock); 9254 9250 } 9255 9251 wmb(); 9256 - if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) 9252 + if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) { 9253 + ioa_cfg->scsi_unblock = 0; 9254 + ioa_cfg->scsi_blocked = 1; 9257 9255 scsi_block_requests(ioa_cfg->host); 9256 + } 9258 9257 9259 9258 ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg); 9260 9259 ioa_cfg->reset_cmd = ipr_cmd; ··· 9313 9306 wake_up_all(&ioa_cfg->reset_wait_q); 9314 9307 9315 9308 if (!ioa_cfg->hrrq[IPR_INIT_HRRQ].removing_ioa) { 9316 - spin_unlock_irq(ioa_cfg->host->host_lock); 9317 - scsi_unblock_requests(ioa_cfg->host); 9318 - spin_lock_irq(ioa_cfg->host->host_lock); 9309 + ioa_cfg->scsi_unblock = 1; 9310 + schedule_work(&ioa_cfg->work_q); 9319 9311 } 9320 9312 return; 9321 9313 } else {
+2
drivers/scsi/ipr.h
··· 1488 1488 u8 cfg_locked:1; 1489 1489 u8 clear_isr:1; 1490 1490 u8 probe_done:1; 1491 + u8 scsi_unblock:1; 1492 + u8 scsi_blocked:1; 1491 1493 1492 1494 u8 revid; 1493 1495
+1 -1
drivers/scsi/megaraid/megaraid_sas_base.c
··· 6240 6240 fail_start_aen: 6241 6241 fail_io_attach: 6242 6242 megasas_mgmt_info.count--; 6243 - megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = NULL; 6244 6243 megasas_mgmt_info.max_index--; 6244 + megasas_mgmt_info.instance[megasas_mgmt_info.max_index] = NULL; 6245 6245 6246 6246 instance->instancet->disable_intr(instance); 6247 6247 megasas_destroy_irqs(instance);
+2 -1
drivers/scsi/qedf/qedf.h
··· 526 526 #define QEDF_WRITE (1 << 0) 527 527 #define MAX_FIBRE_LUNS 0xffffffff 528 528 529 - #define QEDF_MAX_NUM_CQS 8 529 + #define MIN_NUM_CPUS_MSIX(x) min_t(u32, x->dev_info.num_cqs, \ 530 + num_online_cpus()) 530 531 531 532 /* 532 533 * PCI function probe defines
+8 -6
drivers/scsi/qedf/qedf_els.c
··· 489 489 490 490 /* If a SRR times out, simply free resources */ 491 491 if (srr_req->event == QEDF_IOREQ_EV_ELS_TMO) 492 - goto out_free; 492 + goto out_put; 493 493 494 494 /* Normalize response data into struct fc_frame */ 495 495 mp_req = &(srr_req->mp_req); ··· 501 501 if (!fp) { 502 502 QEDF_ERR(&(qedf->dbg_ctx), 503 503 "fc_frame_alloc failure.\n"); 504 - goto out_free; 504 + goto out_put; 505 505 } 506 506 507 507 /* Copy frame header from firmware into fp */ ··· 526 526 } 527 527 528 528 fc_frame_free(fp); 529 - out_free: 529 + out_put: 530 530 /* Put reference for original command since SRR completed */ 531 531 kref_put(&orig_io_req->refcount, qedf_release_cmd); 532 + out_free: 532 533 kfree(cb_arg); 533 534 } 534 535 ··· 781 780 782 781 /* If a REC times out, free resources */ 783 782 if (rec_req->event == QEDF_IOREQ_EV_ELS_TMO) 784 - goto out_free; 783 + goto out_put; 785 784 786 785 /* Normalize response data into struct fc_frame */ 787 786 mp_req = &(rec_req->mp_req); ··· 793 792 if (!fp) { 794 793 QEDF_ERR(&(qedf->dbg_ctx), 795 794 "fc_frame_alloc failure.\n"); 796 - goto out_free; 795 + goto out_put; 797 796 } 798 797 799 798 /* Copy frame header from firmware into fp */ ··· 885 884 886 885 out_free_frame: 887 886 fc_frame_free(fp); 888 - out_free: 887 + out_put: 889 888 /* Put reference for original command since REC completed */ 890 889 kref_put(&orig_io_req->refcount, qedf_release_cmd); 890 + out_free: 891 891 kfree(cb_arg); 892 892 } 893 893
+9 -11
drivers/scsi/qedf/qedf_main.c
··· 2797 2797 * we allocation is the minimum off: 2798 2798 * 2799 2799 * Number of CPUs 2800 - * Number of MSI-X vectors 2801 - * Max number allocated in hardware (QEDF_MAX_NUM_CQS) 2800 + * Number allocated by qed for our PCI function 2802 2801 */ 2803 - qedf->num_queues = min((unsigned int)QEDF_MAX_NUM_CQS, 2804 - num_online_cpus()); 2802 + qedf->num_queues = MIN_NUM_CPUS_MSIX(qedf); 2805 2803 2806 2804 QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, "Number of CQs is %d.\n", 2807 2805 qedf->num_queues); ··· 2997 2999 goto err1; 2998 3000 } 2999 3001 3002 + /* Learn information crucial for qedf to progress */ 3003 + rc = qed_ops->fill_dev_info(qedf->cdev, &qedf->dev_info); 3004 + if (rc) { 3005 + QEDF_ERR(&(qedf->dbg_ctx), "Failed to dev info.\n"); 3006 + goto err1; 3007 + } 3008 + 3000 3009 /* queue allocation code should come here 3001 3010 * order should be 3002 3011 * slowpath_start ··· 3018 3013 goto err2; 3019 3014 } 3020 3015 qed_ops->common->update_pf_params(qedf->cdev, &qedf->pf_params); 3021 - 3022 - /* Learn information crucial for qedf to progress */ 3023 - rc = qed_ops->fill_dev_info(qedf->cdev, &qedf->dev_info); 3024 - if (rc) { 3025 - QEDF_ERR(&(qedf->dbg_ctx), "Failed to dev info.\n"); 3026 - goto err1; 3027 - } 3028 3016 3029 3017 /* Record BDQ producer doorbell addresses */ 3030 3018 qedf->bdq_primary_prod = qedf->dev_info.primary_dbq_rq_addr;
+1 -1
drivers/scsi/qedi/qedi_main.c
··· 1576 1576 { 1577 1577 struct qedi_cmd *cmd = NULL; 1578 1578 1579 - if (tid > MAX_ISCSI_TASK_ENTRIES) 1579 + if (tid >= MAX_ISCSI_TASK_ENTRIES) 1580 1580 return NULL; 1581 1581 1582 1582 cmd = qedi->itt_map[tid].p_cmd;
+4 -4
drivers/scsi/qla2xxx/qla_attr.c
··· 318 318 return -EINVAL; 319 319 if (start > ha->optrom_size) 320 320 return -EINVAL; 321 + if (size > ha->optrom_size - start) 322 + size = ha->optrom_size - start; 321 323 322 324 mutex_lock(&ha->optrom_mutex); 323 325 switch (val) { ··· 345 343 } 346 344 347 345 ha->optrom_region_start = start; 348 - ha->optrom_region_size = start + size > ha->optrom_size ? 349 - ha->optrom_size - start : size; 346 + ha->optrom_region_size = start + size; 350 347 351 348 ha->optrom_state = QLA_SREADING; 352 349 ha->optrom_buffer = vmalloc(ha->optrom_region_size); ··· 418 417 } 419 418 420 419 ha->optrom_region_start = start; 421 - ha->optrom_region_size = start + size > ha->optrom_size ? 422 - ha->optrom_size - start : size; 420 + ha->optrom_region_size = start + size; 423 421 424 422 ha->optrom_state = QLA_SWRITING; 425 423 ha->optrom_buffer = vmalloc(ha->optrom_region_size);
-12
drivers/scsi/qla2xxx/qla_tmpl.c
··· 401 401 for (i = 0; i < vha->hw->max_req_queues; i++) { 402 402 struct req_que *req = vha->hw->req_q_map[i]; 403 403 404 - if (!test_bit(i, vha->hw->req_qid_map)) 405 - continue; 406 - 407 404 if (req || !buf) { 408 405 length = req ? 409 406 req->length : REQUEST_ENTRY_CNT_24XX; ··· 414 417 } else if (ent->t263.queue_type == T263_QUEUE_TYPE_RSP) { 415 418 for (i = 0; i < vha->hw->max_rsp_queues; i++) { 416 419 struct rsp_que *rsp = vha->hw->rsp_q_map[i]; 417 - 418 - if (!test_bit(i, vha->hw->rsp_qid_map)) 419 - continue; 420 420 421 421 if (rsp || !buf) { 422 422 length = rsp ? ··· 658 664 for (i = 0; i < vha->hw->max_req_queues; i++) { 659 665 struct req_que *req = vha->hw->req_q_map[i]; 660 666 661 - if (!test_bit(i, vha->hw->req_qid_map)) 662 - continue; 663 - 664 667 if (req || !buf) { 665 668 qla27xx_insert16(i, buf, len); 666 669 qla27xx_insert16(1, buf, len); ··· 669 678 } else if (ent->t274.queue_type == T274_QUEUE_TYPE_RSP_SHAD) { 670 679 for (i = 0; i < vha->hw->max_rsp_queues; i++) { 671 680 struct rsp_que *rsp = vha->hw->rsp_q_map[i]; 672 - 673 - if (!test_bit(i, vha->hw->rsp_qid_map)) 674 - continue; 675 681 676 682 if (rsp || !buf) { 677 683 qla27xx_insert16(i, buf, len);
+4
drivers/scsi/scsi.c
··· 780 780 module_param(scsi_logging_level, int, S_IRUGO|S_IWUSR); 781 781 MODULE_PARM_DESC(scsi_logging_level, "a bit mask of logging levels"); 782 782 783 + #ifdef CONFIG_SCSI_MQ_DEFAULT 783 784 bool scsi_use_blk_mq = true; 785 + #else 786 + bool scsi_use_blk_mq = false; 787 + #endif 784 788 module_param_named(use_blk_mq, scsi_use_blk_mq, bool, S_IWUSR | S_IRUGO); 785 789 786 790 static int __init init_scsi(void)
+3
drivers/scsi/sd.c
··· 1277 1277 { 1278 1278 struct request *rq = SCpnt->request; 1279 1279 1280 + if (SCpnt->flags & SCMD_ZONE_WRITE_LOCK) 1281 + sd_zbc_write_unlock_zone(SCpnt); 1282 + 1280 1283 if (rq->rq_flags & RQF_SPECIAL_PAYLOAD) 1281 1284 __free_page(rq->special_vec.bv_page); 1282 1285
+5 -4
drivers/scsi/sd_zbc.c
··· 294 294 test_and_set_bit(zno, sdkp->zones_wlock)) 295 295 return BLKPREP_DEFER; 296 296 297 + WARN_ON_ONCE(cmd->flags & SCMD_ZONE_WRITE_LOCK); 298 + cmd->flags |= SCMD_ZONE_WRITE_LOCK; 299 + 297 300 return BLKPREP_OK; 298 301 } 299 302 ··· 305 302 struct request *rq = cmd->request; 306 303 struct scsi_disk *sdkp = scsi_disk(rq->rq_disk); 307 304 308 - if (sdkp->zones_wlock) { 305 + if (sdkp->zones_wlock && cmd->flags & SCMD_ZONE_WRITE_LOCK) { 309 306 unsigned int zno = sd_zbc_zone_no(sdkp, blk_rq_pos(rq)); 310 307 WARN_ON_ONCE(!test_bit(zno, sdkp->zones_wlock)); 308 + cmd->flags &= ~SCMD_ZONE_WRITE_LOCK; 311 309 clear_bit_unlock(zno, sdkp->zones_wlock); 312 310 smp_mb__after_atomic(); 313 311 } ··· 338 334 case REQ_OP_WRITE: 339 335 case REQ_OP_WRITE_ZEROES: 340 336 case REQ_OP_WRITE_SAME: 341 - 342 - /* Unlock the zone */ 343 - sd_zbc_write_unlock_zone(cmd); 344 337 345 338 if (result && 346 339 sshdr->sense_key == ILLEGAL_REQUEST &&
+1 -1
drivers/scsi/ses.c
··· 106 106 107 107 ret = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buf, bufflen, 108 108 NULL, SES_TIMEOUT, SES_RETRIES, NULL); 109 - if (unlikely(!ret)) 109 + if (unlikely(ret)) 110 110 return ret; 111 111 112 112 recv_page_code = ((unsigned char *)buf)[0];
+2 -31
drivers/scsi/sg.c
··· 751 751 return count; 752 752 } 753 753 754 - static bool sg_is_valid_dxfer(sg_io_hdr_t *hp) 755 - { 756 - switch (hp->dxfer_direction) { 757 - case SG_DXFER_NONE: 758 - if (hp->dxferp || hp->dxfer_len > 0) 759 - return false; 760 - return true; 761 - case SG_DXFER_FROM_DEV: 762 - /* 763 - * for SG_DXFER_FROM_DEV we always set dxfer_len to > 0. dxferp 764 - * can either be NULL or != NULL so there's no point in checking 765 - * it either. So just return true. 766 - */ 767 - return true; 768 - case SG_DXFER_TO_DEV: 769 - case SG_DXFER_TO_FROM_DEV: 770 - if (!hp->dxferp || hp->dxfer_len == 0) 771 - return false; 772 - return true; 773 - case SG_DXFER_UNKNOWN: 774 - if ((!hp->dxferp && hp->dxfer_len) || 775 - (hp->dxferp && hp->dxfer_len == 0)) 776 - return false; 777 - return true; 778 - default: 779 - return false; 780 - } 781 - } 782 - 783 754 static int 784 755 sg_common_write(Sg_fd * sfp, Sg_request * srp, 785 756 unsigned char *cmnd, int timeout, int blocking) ··· 771 800 "sg_common_write: scsi opcode=0x%02x, cmd_size=%d\n", 772 801 (int) cmnd[0], (int) hp->cmd_len)); 773 802 774 - if (!sg_is_valid_dxfer(hp)) 803 + if (hp->dxfer_len >= SZ_256M) 775 804 return -EINVAL; 776 805 777 806 k = sg_start_req(srp, cmnd); ··· 1021 1050 read_lock_irqsave(&sfp->rq_list_lock, iflags); 1022 1051 val = 0; 1023 1052 list_for_each_entry(srp, &sfp->rq_list, entry) { 1024 - if (val > SG_MAX_QUEUE) 1053 + if (val >= SG_MAX_QUEUE) 1025 1054 break; 1026 1055 memset(&rinfo[val], 0, SZ_SG_REQ_INFO); 1027 1056 rinfo[val].req_state = srp->done + 1;
+2 -2
drivers/scsi/st.c
··· 4299 4299 kref_init(&tpnt->kref); 4300 4300 tpnt->disk = disk; 4301 4301 disk->private_data = &tpnt->driver; 4302 - disk->queue = SDp->request_queue; 4303 4302 /* SCSI tape doesn't register this gendisk via add_disk(). Manually 4304 4303 * take queue reference that release_disk() expects. */ 4305 - if (!blk_get_queue(disk->queue)) 4304 + if (!blk_get_queue(SDp->request_queue)) 4306 4305 goto out_put_disk; 4306 + disk->queue = SDp->request_queue; 4307 4307 tpnt->driver = &st_template; 4308 4308 4309 4309 tpnt->device = SDp;
+2
drivers/scsi/storvsc_drv.c
··· 1640 1640 put_cpu(); 1641 1641 1642 1642 if (ret == -EAGAIN) { 1643 + if (payload_sz > sizeof(cmd_request->mpb)) 1644 + kfree(payload); 1643 1645 /* no more space */ 1644 1646 return SCSI_MLQUEUE_DEVICE_BUSY; 1645 1647 }
-2
include/linux/cpuhotplug.h
··· 39 39 CPUHP_PCI_XGENE_DEAD, 40 40 CPUHP_IOMMU_INTEL_DEAD, 41 41 CPUHP_LUSTRE_CFS_DEAD, 42 - CPUHP_SCSI_BNX2FC_DEAD, 43 - CPUHP_SCSI_BNX2I_DEAD, 44 42 CPUHP_WORKQUEUE_PREP, 45 43 CPUHP_POWER_NUMA_PREPARE, 46 44 CPUHP_HRTIMERS_PREPARE,
+1
include/scsi/scsi_cmnd.h
··· 57 57 /* for scmd->flags */ 58 58 #define SCMD_TAGGED (1 << 0) 59 59 #define SCMD_UNCHECKED_ISA_DMA (1 << 1) 60 + #define SCMD_ZONE_WRITE_LOCK (1 << 2) 60 61 #define SCMD_INITIALIZED (1 << 3) 61 62 /* flags preserved across unprep / reprep */ 62 63 #define SCMD_PRESERVED_FLAGS (SCMD_UNCHECKED_ISA_DMA | SCMD_INITIALIZED)