Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

clk: qcom: Add support for RCG to register for DFS

Dynamic Frequency switch is a feature of clock controller by which request
from peripherals allows automatic switching frequency of input clock
without SW intervention. There are various performance levels associated
with a root clock. When the input performance state changes, the source
clocks and division ratios of the new performance state are loaded on to
RCG via HW and the RCG switches to new clock frequency when the RCG is in
DFS HW enabled mode.

Register the root clock generators(RCG) to switch to use the dfs clock ops
in the cases where DFS is enabled. The clk_round_rate() called by the clock
consumer would invoke the dfs determine clock ops and would read the DFS
performance level registers to identify all the frequencies supported and
update the frequency table. The DFS clock consumers would maintain these
frequency mapping and request the desired performance levels.

Signed-off-by: Taniya Das <tdas@codeaurora.org>
[sboyd@kernel.org: Rework registration logic to stop copying, change
recalc_rate() to index directly into the table if possible and fallback
to calculating on the fly with an assumed correct parent]
Signed-off-by: Stephen Boyd <sboyd@kernel.org>

authored by

Taniya Das and committed by
Stephen Boyd
cc4f6944 5b394b2d

+205
+11
drivers/clk/qcom/clk-rcg.h
··· 163 163 extern const struct clk_ops clk_gfx3d_ops; 164 164 extern const struct clk_ops clk_rcg2_shared_ops; 165 165 166 + struct clk_rcg_dfs_data { 167 + struct clk_rcg2 *rcg; 168 + struct clk_init_data *init; 169 + }; 170 + 171 + #define DEFINE_RCG_DFS(r) \ 172 + { .rcg = &r##_src, .init = &r##_init } 173 + 174 + extern int qcom_cc_register_rcg_dfs(struct regmap *regmap, 175 + const struct clk_rcg_dfs_data *rcgs, 176 + size_t len); 166 177 #endif
+194
drivers/clk/qcom/clk-rcg2.c
··· 12 12 #include <linux/delay.h> 13 13 #include <linux/regmap.h> 14 14 #include <linux/math64.h> 15 + #include <linux/slab.h> 15 16 16 17 #include <asm/div64.h> 17 18 ··· 40 39 #define M_REG 0x8 41 40 #define N_REG 0xc 42 41 #define D_REG 0x10 42 + 43 + /* Dynamic Frequency Scaling */ 44 + #define MAX_PERF_LEVEL 8 45 + #define SE_CMD_DFSR_OFFSET 0x14 46 + #define SE_CMD_DFS_EN BIT(0) 47 + #define SE_PERF_DFSR(level) (0x1c + 0x4 * (level)) 48 + #define SE_PERF_M_DFSR(level) (0x5c + 0x4 * (level)) 49 + #define SE_PERF_N_DFSR(level) (0x9c + 0x4 * (level)) 43 50 44 51 enum freq_policy { 45 52 FLOOR, ··· 938 929 .set_rate_and_parent = clk_rcg2_shared_set_rate_and_parent, 939 930 }; 940 931 EXPORT_SYMBOL_GPL(clk_rcg2_shared_ops); 932 + 933 + /* Common APIs to be used for DFS based RCGR */ 934 + static void clk_rcg2_dfs_populate_freq(struct clk_hw *hw, unsigned int l, 935 + struct freq_tbl *f) 936 + { 937 + struct clk_rcg2 *rcg = to_clk_rcg2(hw); 938 + struct clk_hw *p; 939 + unsigned long prate = 0; 940 + u32 val, mask, cfg, mode; 941 + int i, num_parents; 942 + 943 + regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + SE_PERF_DFSR(l), &cfg); 944 + 945 + mask = BIT(rcg->hid_width) - 1; 946 + f->pre_div = 1; 947 + if (cfg & mask) 948 + f->pre_div = cfg & mask; 949 + 950 + cfg &= CFG_SRC_SEL_MASK; 951 + cfg >>= CFG_SRC_SEL_SHIFT; 952 + 953 + num_parents = clk_hw_get_num_parents(hw); 954 + for (i = 0; i < num_parents; i++) { 955 + if (cfg == rcg->parent_map[i].cfg) { 956 + f->src = rcg->parent_map[i].src; 957 + p = clk_hw_get_parent_by_index(&rcg->clkr.hw, i); 958 + prate = clk_hw_get_rate(p); 959 + } 960 + } 961 + 962 + mode = cfg & CFG_MODE_MASK; 963 + mode >>= CFG_MODE_SHIFT; 964 + if (mode) { 965 + mask = BIT(rcg->mnd_width) - 1; 966 + regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + SE_PERF_M_DFSR(l), 967 + &val); 968 + val &= mask; 969 + f->m = val; 970 + 971 + regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + SE_PERF_N_DFSR(l), 972 + &val); 973 + val = ~val; 974 + val &= mask; 975 + val += f->m; 976 + f->n = val; 977 + } 978 + 979 + f->freq = calc_rate(prate, f->m, f->n, mode, f->pre_div); 980 + } 981 + 982 + static int clk_rcg2_dfs_populate_freq_table(struct clk_rcg2 *rcg) 983 + { 984 + struct freq_tbl *freq_tbl; 985 + int i; 986 + 987 + freq_tbl = kcalloc(MAX_PERF_LEVEL, sizeof(*freq_tbl), GFP_KERNEL); 988 + if (!freq_tbl) 989 + return -ENOMEM; 990 + rcg->freq_tbl = freq_tbl; 991 + 992 + for (i = 0; i < MAX_PERF_LEVEL; i++) 993 + clk_rcg2_dfs_populate_freq(&rcg->clkr.hw, i, freq_tbl + i); 994 + 995 + return 0; 996 + } 997 + 998 + static int clk_rcg2_dfs_determine_rate(struct clk_hw *hw, 999 + struct clk_rate_request *req) 1000 + { 1001 + struct clk_rcg2 *rcg = to_clk_rcg2(hw); 1002 + int ret; 1003 + 1004 + if (!rcg->freq_tbl) { 1005 + ret = clk_rcg2_dfs_populate_freq_table(rcg); 1006 + if (ret) { 1007 + pr_err("Failed to update DFS tables for %s\n", 1008 + clk_hw_get_name(hw)); 1009 + return ret; 1010 + } 1011 + } 1012 + 1013 + return clk_rcg2_determine_rate(hw, req); 1014 + } 1015 + 1016 + static unsigned long 1017 + clk_rcg2_dfs_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) 1018 + { 1019 + struct clk_rcg2 *rcg = to_clk_rcg2(hw); 1020 + u32 level, mask, cfg, m = 0, n = 0, mode, pre_div; 1021 + 1022 + regmap_read(rcg->clkr.regmap, 1023 + rcg->cmd_rcgr + SE_CMD_DFSR_OFFSET, &level); 1024 + level &= GENMASK(4, 1); 1025 + level >>= 1; 1026 + 1027 + if (rcg->freq_tbl) 1028 + return rcg->freq_tbl[level].freq; 1029 + 1030 + /* 1031 + * Assume that parent_rate is actually the parent because 1032 + * we can't do any better at figuring it out when the table 1033 + * hasn't been populated yet. We only populate the table 1034 + * in determine_rate because we can't guarantee the parents 1035 + * will be registered with the framework until then. 1036 + */ 1037 + regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + SE_PERF_DFSR(level), 1038 + &cfg); 1039 + 1040 + mask = BIT(rcg->hid_width) - 1; 1041 + pre_div = 1; 1042 + if (cfg & mask) 1043 + pre_div = cfg & mask; 1044 + 1045 + mode = cfg & CFG_MODE_MASK; 1046 + mode >>= CFG_MODE_SHIFT; 1047 + if (mode) { 1048 + mask = BIT(rcg->mnd_width) - 1; 1049 + regmap_read(rcg->clkr.regmap, 1050 + rcg->cmd_rcgr + SE_PERF_M_DFSR(level), &m); 1051 + m &= mask; 1052 + 1053 + regmap_read(rcg->clkr.regmap, 1054 + rcg->cmd_rcgr + SE_PERF_N_DFSR(level), &n); 1055 + n = ~n; 1056 + n &= mask; 1057 + n += m; 1058 + } 1059 + 1060 + return calc_rate(parent_rate, m, n, mode, pre_div); 1061 + } 1062 + 1063 + static const struct clk_ops clk_rcg2_dfs_ops = { 1064 + .is_enabled = clk_rcg2_is_enabled, 1065 + .get_parent = clk_rcg2_get_parent, 1066 + .determine_rate = clk_rcg2_dfs_determine_rate, 1067 + .recalc_rate = clk_rcg2_dfs_recalc_rate, 1068 + }; 1069 + 1070 + static int clk_rcg2_enable_dfs(const struct clk_rcg_dfs_data *data, 1071 + struct regmap *regmap) 1072 + { 1073 + struct clk_rcg2 *rcg = data->rcg; 1074 + struct clk_init_data *init = data->init; 1075 + u32 val; 1076 + int ret; 1077 + 1078 + ret = regmap_read(regmap, rcg->cmd_rcgr + SE_CMD_DFSR_OFFSET, &val); 1079 + if (ret) 1080 + return -EINVAL; 1081 + 1082 + if (!(val & SE_CMD_DFS_EN)) 1083 + return 0; 1084 + 1085 + /* 1086 + * Rate changes with consumer writing a register in 1087 + * their own I/O region 1088 + */ 1089 + init->flags |= CLK_GET_RATE_NOCACHE; 1090 + init->ops = &clk_rcg2_dfs_ops; 1091 + 1092 + rcg->freq_tbl = NULL; 1093 + 1094 + pr_debug("DFS registered for clk %s\n", init->name); 1095 + 1096 + return 0; 1097 + } 1098 + 1099 + int qcom_cc_register_rcg_dfs(struct regmap *regmap, 1100 + const struct clk_rcg_dfs_data *rcgs, size_t len) 1101 + { 1102 + int i, ret; 1103 + 1104 + for (i = 0; i < len; i++) { 1105 + ret = clk_rcg2_enable_dfs(&rcgs[i], regmap); 1106 + if (ret) { 1107 + const char *name = rcgs[i].init->name; 1108 + 1109 + pr_err("DFS register failed for clk %s\n", name); 1110 + return ret; 1111 + } 1112 + } 1113 + 1114 + return 0; 1115 + } 1116 + EXPORT_SYMBOL_GPL(qcom_cc_register_rcg_dfs);