Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

[SCSI] bfa: Add support for IO throttling at port level

Add capability to limit the number of exchanges on a port to
avoid queue-full conditions from the target side.

Signed-off-by: Sudarsana Reddy Kalluru <skalluru@brocade.com>
Signed-off-by: Krishna Gudipati <kgudipat@brocade.com>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>

authored by

Krishna Gudipati and committed by
James Bottomley
7ace27ae 6894f013

+182 -8
+7 -2
drivers/scsi/bfa/bfa_core.c
··· 983 983 cfg_info->single_msix_vec = 1; 984 984 cfg_info->endian_sig = BFI_IOC_ENDIAN_SIG; 985 985 cfg_info->num_cqs = cfg->fwcfg.num_cqs; 986 - cfg_info->num_ioim_reqs = cpu_to_be16(cfg->fwcfg.num_ioim_reqs); 986 + cfg_info->num_ioim_reqs = cpu_to_be16(bfa_fcpim_get_throttle_cfg(bfa, 987 + cfg->fwcfg.num_ioim_reqs)); 987 988 cfg_info->num_fwtio_reqs = cpu_to_be16(cfg->fwcfg.num_fwtio_reqs); 988 989 989 990 bfa_dma_be_addr_set(cfg_info->cfgrsp_addr, iocfc->cfgrsp_dma.pa); ··· 1246 1245 static void 1247 1246 bfa_iocfc_res_recfg(struct bfa_s *bfa, struct bfa_iocfc_fwcfg_s *fwcfg) 1248 1247 { 1248 + struct bfa_iocfc_s *iocfc = &bfa->iocfc; 1249 + struct bfi_iocfc_cfg_s *cfg_info = iocfc->cfginfo; 1250 + 1249 1251 bfa_fcxp_res_recfg(bfa, fwcfg->num_fcxp_reqs); 1250 1252 bfa_uf_res_recfg(bfa, fwcfg->num_uf_bufs); 1251 1253 bfa_rport_res_recfg(bfa, fwcfg->num_rports); 1252 - bfa_fcp_res_recfg(bfa, fwcfg->num_ioim_reqs); 1254 + bfa_fcp_res_recfg(bfa, cpu_to_be16(cfg_info->num_ioim_reqs), 1255 + fwcfg->num_ioim_reqs); 1253 1256 bfa_tskim_res_recfg(bfa, fwcfg->num_tskim_reqs); 1254 1257 } 1255 1258
+13
drivers/scsi/bfa/bfa_defs_svc.h
··· 875 875 struct bfa_lun_mask_s lun_list[MAX_LUN_MASK_CFG]; 876 876 }; 877 877 878 + struct bfa_throttle_cfg_s { 879 + u16 is_valid; 880 + u16 value; 881 + u32 rsvd; 882 + }; 883 + 884 + struct bfa_defs_fcpim_throttle_s { 885 + u16 max_value; 886 + u16 cur_value; 887 + u16 cfg_value; 888 + u16 rsvd; 889 + }; 890 + 878 891 /* 879 892 * Physical port configuration 880 893 */
+101 -4
drivers/scsi/bfa/bfa_fcpim.c
··· 3703 3703 struct bfa_mem_dma_s *seg_ptr; 3704 3704 u16 idx, nsegs, num_io_req; 3705 3705 3706 + fcp->max_ioim_reqs = cfg->fwcfg.num_ioim_reqs; 3706 3707 fcp->num_ioim_reqs = cfg->fwcfg.num_ioim_reqs; 3707 3708 fcp->num_fwtio_reqs = cfg->fwcfg.num_fwtio_reqs; 3708 3709 fcp->num_itns = cfg->fwcfg.num_rports; ··· 3726 3725 bfa_iocfc_set_snsbase(bfa, idx, fcp->snsbase[idx].pa); 3727 3726 } 3728 3727 3728 + fcp->throttle_update_required = 1; 3729 3729 bfa_fcpim_attach(fcp, bfad, cfg, pcidev); 3730 3730 3731 3731 bfa_iotag_attach(fcp); ··· 3765 3763 { 3766 3764 struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa); 3767 3765 3768 - /* Enqueue unused ioim resources to free_q */ 3769 - list_splice_tail_init(&fcp->iotag_unused_q, &fcp->iotag_ioim_free_q); 3770 - 3771 3766 bfa_fcpim_iocdisable(fcp); 3772 3767 } 3773 3768 3774 3769 void 3775 - bfa_fcp_res_recfg(struct bfa_s *bfa, u16 num_ioim_fw) 3770 + bfa_fcp_res_recfg(struct bfa_s *bfa, u16 num_ioim_fw, u16 max_ioim_fw) 3776 3771 { 3777 3772 struct bfa_fcp_mod_s *mod = BFA_FCP_MOD(bfa); 3778 3773 struct list_head *qe; 3779 3774 int i; 3780 3775 3776 + /* Update io throttle value only once during driver load time */ 3777 + if (!mod->throttle_update_required) 3778 + return; 3779 + 3781 3780 for (i = 0; i < (mod->num_ioim_reqs - num_ioim_fw); i++) { 3782 3781 bfa_q_deq_tail(&mod->iotag_ioim_free_q, &qe); 3783 3782 list_add_tail(qe, &mod->iotag_unused_q); 3784 3783 } 3784 + 3785 + if (mod->num_ioim_reqs != num_ioim_fw) { 3786 + bfa_trc(bfa, mod->num_ioim_reqs); 3787 + bfa_trc(bfa, num_ioim_fw); 3788 + } 3789 + 3790 + mod->max_ioim_reqs = max_ioim_fw; 3791 + mod->num_ioim_reqs = num_ioim_fw; 3792 + mod->throttle_update_required = 0; 3785 3793 } 3786 3794 3787 3795 void ··· 3848 3836 } 3849 3837 3850 3838 bfa_mem_kva_curp(fcp) = (u8 *) iotag; 3839 + } 3840 + 3841 + 3842 + /** 3843 + * To send config req, first try to use throttle value from flash 3844 + * If 0, then use driver parameter 3845 + * We need to use min(flash_val, drv_val) because 3846 + * memory allocation was done based on this cfg'd value 3847 + */ 3848 + u16 3849 + bfa_fcpim_get_throttle_cfg(struct bfa_s *bfa, u16 drv_cfg_param) 3850 + { 3851 + u16 tmp; 3852 + struct bfa_fcp_mod_s *fcp = BFA_FCP_MOD(bfa); 3853 + 3854 + /* 3855 + * If throttle value from flash is already in effect after driver is 3856 + * loaded then until next load, always return current value instead 3857 + * of actual flash value 3858 + */ 3859 + if (!fcp->throttle_update_required) 3860 + return (u16)fcp->num_ioim_reqs; 3861 + 3862 + tmp = bfa_dconf_read_data_valid(bfa) ? bfa_fcpim_read_throttle(bfa) : 0; 3863 + if (!tmp || (tmp > drv_cfg_param)) 3864 + tmp = drv_cfg_param; 3865 + 3866 + return tmp; 3867 + } 3868 + 3869 + bfa_status_t 3870 + bfa_fcpim_write_throttle(struct bfa_s *bfa, u16 value) 3871 + { 3872 + if (!bfa_dconf_get_min_cfg(bfa)) { 3873 + BFA_DCONF_MOD(bfa)->dconf->throttle_cfg.value = value; 3874 + BFA_DCONF_MOD(bfa)->dconf->throttle_cfg.is_valid = 1; 3875 + return BFA_STATUS_OK; 3876 + } 3877 + 3878 + return BFA_STATUS_FAILED; 3879 + } 3880 + 3881 + u16 3882 + bfa_fcpim_read_throttle(struct bfa_s *bfa) 3883 + { 3884 + struct bfa_throttle_cfg_s *throttle_cfg = 3885 + &(BFA_DCONF_MOD(bfa)->dconf->throttle_cfg); 3886 + 3887 + return ((!bfa_dconf_get_min_cfg(bfa)) ? 3888 + ((throttle_cfg->is_valid == 1) ? (throttle_cfg->value) : 0) : 0); 3889 + } 3890 + 3891 + bfa_status_t 3892 + bfa_fcpim_throttle_set(struct bfa_s *bfa, u16 value) 3893 + { 3894 + /* in min cfg no commands should run. */ 3895 + if ((bfa_dconf_get_min_cfg(bfa) == BFA_TRUE) || 3896 + (!bfa_dconf_read_data_valid(bfa))) 3897 + return BFA_STATUS_FAILED; 3898 + 3899 + bfa_fcpim_write_throttle(bfa, value); 3900 + 3901 + return bfa_dconf_update(bfa); 3902 + } 3903 + 3904 + bfa_status_t 3905 + bfa_fcpim_throttle_get(struct bfa_s *bfa, void *buf) 3906 + { 3907 + struct bfa_fcpim_s *fcpim = BFA_FCPIM(bfa); 3908 + struct bfa_defs_fcpim_throttle_s throttle; 3909 + 3910 + if ((bfa_dconf_get_min_cfg(bfa) == BFA_TRUE) || 3911 + (!bfa_dconf_read_data_valid(bfa))) 3912 + return BFA_STATUS_FAILED; 3913 + 3914 + memset(&throttle, 0, sizeof(struct bfa_defs_fcpim_throttle_s)); 3915 + 3916 + throttle.cur_value = (u16)(fcpim->fcp->num_ioim_reqs); 3917 + throttle.cfg_value = bfa_fcpim_read_throttle(bfa); 3918 + if (!throttle.cfg_value) 3919 + throttle.cfg_value = throttle.cur_value; 3920 + throttle.max_value = (u16)(fcpim->fcp->max_ioim_reqs); 3921 + memcpy(buf, &throttle, sizeof(struct bfa_defs_fcpim_throttle_s)); 3922 + 3923 + return BFA_STATUS_OK; 3851 3924 }
+8 -1
drivers/scsi/bfa/bfa_fcpim.h
··· 42 42 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m)); 43 43 void bfa_itn_isr(struct bfa_s *bfa, struct bfi_msg_s *m); 44 44 void bfa_iotag_attach(struct bfa_fcp_mod_s *fcp); 45 - void bfa_fcp_res_recfg(struct bfa_s *bfa, u16 num_ioim_fw); 45 + void bfa_fcp_res_recfg(struct bfa_s *bfa, u16 num_ioim_fw, u16 max_ioim_fw); 46 46 47 47 #define BFA_FCP_MOD(_hal) (&(_hal)->modules.fcp_mod) 48 48 #define BFA_MEM_FCP_KVA(__bfa) (&(BFA_FCP_MOD(__bfa)->kva_seg)) ··· 150 150 struct list_head iotag_unused_q; /* unused IO resources*/ 151 151 struct bfa_iotag_s *iotag_arr; 152 152 struct bfa_itn_s *itn_arr; 153 + int max_ioim_reqs; 153 154 int num_ioim_reqs; 154 155 int num_fwtio_reqs; 155 156 int num_itns; ··· 158 157 struct bfa_fcpim_s fcpim; 159 158 struct bfa_mem_dma_s dma_seg[BFA_FCP_DMA_SEGS]; 160 159 struct bfa_mem_kva_s kva_seg; 160 + int throttle_update_required; 161 161 }; 162 162 163 163 /* ··· 420 418 bfa_status_t bfa_fcpim_lunmask_add(struct bfa_s *bfa, u16 vf_id, 421 419 wwn_t *pwwn, wwn_t rpwwn, struct scsi_lun lun); 422 420 bfa_status_t bfa_fcpim_lunmask_clear(struct bfa_s *bfa); 421 + u16 bfa_fcpim_read_throttle(struct bfa_s *bfa); 422 + bfa_status_t bfa_fcpim_write_throttle(struct bfa_s *bfa, u16 value); 423 + bfa_status_t bfa_fcpim_throttle_set(struct bfa_s *bfa, u16 value); 424 + bfa_status_t bfa_fcpim_throttle_get(struct bfa_s *bfa, void *buf); 425 + u16 bfa_fcpim_get_throttle_cfg(struct bfa_s *bfa, u16 drv_cfg_param); 423 426 424 427 #endif /* __BFA_FCPIM_H__ */
+5 -1
drivers/scsi/bfa/bfa_fcs_rport.c
··· 3026 3026 struct bfa_rport_qos_attr_s qos_attr; 3027 3027 struct bfa_fcs_lport_s *port = rport->port; 3028 3028 bfa_port_speed_t rport_speed = rport->rpf.rpsc_speed; 3029 + struct bfa_port_attr_s port_attr; 3030 + 3031 + bfa_fcport_get_attr(rport->fcs->bfa, &port_attr); 3029 3032 3030 3033 memset(rport_attr, 0, sizeof(struct bfa_rport_attr_s)); 3031 3034 memset(&qos_attr, 0, sizeof(struct bfa_rport_qos_attr_s)); ··· 3059 3056 rport_speed = 3060 3057 bfa_fcport_get_ratelim_speed(rport->fcs->bfa); 3061 3058 3062 - if (rport_speed < bfa_fcs_lport_get_rport_max_speed(port)) 3059 + if ((bfa_fcs_lport_get_rport_max_speed(port) != 3060 + BFA_PORT_SPEED_UNKNOWN) && (rport_speed < port_attr.speed)) 3063 3061 rport_attr->trl_enforced = BFA_TRUE; 3064 3062 } 3065 3063 }
+3
drivers/scsi/bfa/bfa_ioc.h
··· 716 716 struct bfa_dconf_s { 717 717 struct bfa_dconf_hdr_s hdr; 718 718 struct bfa_lunmask_cfg_s lun_mask; 719 + struct bfa_throttle_cfg_s throttle_cfg; 719 720 }; 720 721 #pragma pack() 721 722 ··· 739 738 #define bfa_dconf_read_data_valid(__bfa) \ 740 739 (BFA_DCONF_MOD(__bfa)->read_data_valid) 741 740 #define BFA_DCONF_UPDATE_TOV 5000 /* memtest timeout in msec */ 741 + #define bfa_dconf_get_min_cfg(__bfa) \ 742 + (BFA_DCONF_MOD(__bfa)->min_cfg) 742 743 743 744 void bfa_dconf_modinit(struct bfa_s *bfa); 744 745 void bfa_dconf_modexit(struct bfa_s *bfa);
+36
drivers/scsi/bfa/bfad_bsg.c
··· 2547 2547 return 0; 2548 2548 } 2549 2549 2550 + int 2551 + bfad_iocmd_fcpim_throttle_query(struct bfad_s *bfad, void *cmd) 2552 + { 2553 + struct bfa_bsg_fcpim_throttle_s *iocmd = 2554 + (struct bfa_bsg_fcpim_throttle_s *)cmd; 2555 + unsigned long flags; 2556 + 2557 + spin_lock_irqsave(&bfad->bfad_lock, flags); 2558 + iocmd->status = bfa_fcpim_throttle_get(&bfad->bfa, 2559 + (void *)&iocmd->throttle); 2560 + spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2561 + 2562 + return 0; 2563 + } 2564 + 2565 + int 2566 + bfad_iocmd_fcpim_throttle_set(struct bfad_s *bfad, void *cmd) 2567 + { 2568 + struct bfa_bsg_fcpim_throttle_s *iocmd = 2569 + (struct bfa_bsg_fcpim_throttle_s *)cmd; 2570 + unsigned long flags; 2571 + 2572 + spin_lock_irqsave(&bfad->bfad_lock, flags); 2573 + iocmd->status = bfa_fcpim_throttle_set(&bfad->bfa, 2574 + iocmd->throttle.cfg_value); 2575 + spin_unlock_irqrestore(&bfad->bfad_lock, flags); 2576 + 2577 + return 0; 2578 + } 2579 + 2550 2580 static int 2551 2581 bfad_iocmd_handler(struct bfad_s *bfad, unsigned int cmd, void *iocmd, 2552 2582 unsigned int payload_len) ··· 2910 2880 case IOCMD_FCPIM_LUNMASK_ADD: 2911 2881 case IOCMD_FCPIM_LUNMASK_DELETE: 2912 2882 rc = bfad_iocmd_fcpim_cfg_lunmask(bfad, iocmd, cmd); 2883 + break; 2884 + case IOCMD_FCPIM_THROTTLE_QUERY: 2885 + rc = bfad_iocmd_fcpim_throttle_query(bfad, iocmd); 2886 + break; 2887 + case IOCMD_FCPIM_THROTTLE_SET: 2888 + rc = bfad_iocmd_fcpim_throttle_set(bfad, iocmd); 2913 2889 break; 2914 2890 default: 2915 2891 rc = -EINVAL;
+9
drivers/scsi/bfa/bfad_bsg.h
··· 145 145 IOCMD_DIAG_DPORT_DISABLE, 146 146 IOCMD_DIAG_DPORT_GET_STATE, 147 147 IOCMD_QOS_SET_BW, 148 + IOCMD_FCPIM_THROTTLE_QUERY, 149 + IOCMD_FCPIM_THROTTLE_SET 148 150 }; 149 151 150 152 struct bfa_bsg_gen_s { ··· 740 738 wwn_t pwwn; 741 739 wwn_t rpwwn; 742 740 struct scsi_lun lun; 741 + }; 742 + 743 + struct bfa_bsg_fcpim_throttle_s { 744 + bfa_status_t status; 745 + u16 bfad_num; 746 + u16 vf_id; 747 + struct bfa_defs_fcpim_throttle_s throttle; 743 748 }; 744 749 745 750 struct bfa_bsg_fcpt_s {