Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

dmaengine: idxd: change bandwidth token to read buffers

DSA spec v1.2 has changed the term of "bandwidth tokens" to "read buffers"
in order to make the concept clearer. Deprecate bandwidth token
naming in the driver and convert to read buffers in order to match with
the spec and reduce confusion when reading the spec.

Signed-off-by: Dave Jiang <dave.jiang@intel.com>
Link: https://lore.kernel.org/r/163951338932.2988321.6162640806935567317.stgit@djiang5-desk3.ch.intel.com
Signed-off-by: Vinod Koul <vkoul@kernel.org>

authored by

Dave Jiang and committed by
Vinod Koul
7ed6f1b8 0f225705

+49 -50
+12 -13
drivers/dma/idxd/device.c
··· 678 678 memset(&group->grpcfg, 0, sizeof(group->grpcfg)); 679 679 group->num_engines = 0; 680 680 group->num_wqs = 0; 681 - group->use_token_limit = false; 682 - group->tokens_allowed = 0; 683 - group->tokens_reserved = 0; 681 + group->use_rdbuf_limit = false; 682 + group->rdbufs_allowed = 0; 683 + group->rdbufs_reserved = 0; 684 684 group->tc_a = -1; 685 685 group->tc_b = -1; 686 686 } ··· 748 748 int i; 749 749 struct device *dev = &idxd->pdev->dev; 750 750 751 - /* Setup bandwidth token limit */ 752 - if (idxd->hw.gen_cap.config_en && idxd->token_limit) { 751 + /* Setup bandwidth rdbuf limit */ 752 + if (idxd->hw.gen_cap.config_en && idxd->rdbuf_limit) { 753 753 reg.bits = ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET); 754 - reg.token_limit = idxd->token_limit; 754 + reg.rdbuf_limit = idxd->rdbuf_limit; 755 755 iowrite32(reg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET); 756 756 } 757 757 ··· 889 889 group->tc_b = group->grpcfg.flags.tc_b = 1; 890 890 else 891 891 group->grpcfg.flags.tc_b = group->tc_b; 892 - group->grpcfg.flags.use_token_limit = group->use_token_limit; 893 - group->grpcfg.flags.tokens_reserved = group->tokens_reserved; 894 - if (group->tokens_allowed) 895 - group->grpcfg.flags.tokens_allowed = 896 - group->tokens_allowed; 892 + group->grpcfg.flags.use_rdbuf_limit = group->use_rdbuf_limit; 893 + group->grpcfg.flags.rdbufs_reserved = group->rdbufs_reserved; 894 + if (group->rdbufs_allowed) 895 + group->grpcfg.flags.rdbufs_allowed = group->rdbufs_allowed; 897 896 else 898 - group->grpcfg.flags.tokens_allowed = idxd->max_tokens; 897 + group->grpcfg.flags.rdbufs_allowed = idxd->max_rdbufs; 899 898 } 900 899 } 901 900 ··· 1085 1086 int i, rc; 1086 1087 1087 1088 reg.bits = ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET); 1088 - idxd->token_limit = reg.token_limit; 1089 + idxd->rdbuf_limit = reg.rdbuf_limit; 1089 1090 1090 1091 for (i = 0; i < idxd->max_groups; i++) { 1091 1092 struct idxd_group *group = idxd->groups[i];
+6 -6
drivers/dma/idxd/idxd.h
··· 90 90 int id; 91 91 int num_engines; 92 92 int num_wqs; 93 - bool use_token_limit; 94 - u8 tokens_allowed; 95 - u8 tokens_reserved; 93 + bool use_rdbuf_limit; 94 + u8 rdbufs_allowed; 95 + u8 rdbufs_reserved; 96 96 int tc_a; 97 97 int tc_b; 98 98 }; ··· 292 292 u32 max_batch_size; 293 293 int max_groups; 294 294 int max_engines; 295 - int max_tokens; 295 + int max_rdbufs; 296 296 int max_wqs; 297 297 int max_wq_size; 298 - int token_limit; 299 - int nr_tokens; /* non-reserved tokens */ 298 + int rdbuf_limit; 299 + int nr_rdbufs; /* non-reserved read buffers */ 300 300 unsigned int wqcfg_size; 301 301 302 302 union sw_err_reg sw_err;
+3 -3
drivers/dma/idxd/init.c
··· 400 400 dev_dbg(dev, "group_cap: %#llx\n", idxd->hw.group_cap.bits); 401 401 idxd->max_groups = idxd->hw.group_cap.num_groups; 402 402 dev_dbg(dev, "max groups: %u\n", idxd->max_groups); 403 - idxd->max_tokens = idxd->hw.group_cap.total_tokens; 404 - dev_dbg(dev, "max tokens: %u\n", idxd->max_tokens); 405 - idxd->nr_tokens = idxd->max_tokens; 403 + idxd->max_rdbufs = idxd->hw.group_cap.total_rdbufs; 404 + dev_dbg(dev, "max read buffers: %u\n", idxd->max_rdbufs); 405 + idxd->nr_rdbufs = idxd->max_rdbufs; 406 406 407 407 /* read engine capabilities */ 408 408 idxd->hw.engine_cap.bits =
+7 -7
drivers/dma/idxd/registers.h
··· 64 64 union group_cap_reg { 65 65 struct { 66 66 u64 num_groups:8; 67 - u64 total_tokens:8; 68 - u64 token_en:1; 69 - u64 token_limit:1; 67 + u64 total_rdbufs:8; /* formerly total_tokens */ 68 + u64 rdbuf_ctrl:1; /* formerly token_en */ 69 + u64 rdbuf_limit:1; /* formerly token_limit */ 70 70 u64 rsvd:46; 71 71 }; 72 72 u64 bits; ··· 110 110 #define IDXD_GENCFG_OFFSET 0x80 111 111 union gencfg_reg { 112 112 struct { 113 - u32 token_limit:8; 113 + u32 rdbuf_limit:8; 114 114 u32 rsvd:4; 115 115 u32 user_int_en:1; 116 116 u32 rsvd2:19; ··· 288 288 u32 tc_a:3; 289 289 u32 tc_b:3; 290 290 u32 rsvd:1; 291 - u32 use_token_limit:1; 292 - u32 tokens_reserved:8; 291 + u32 use_rdbuf_limit:1; 292 + u32 rdbufs_reserved:8; 293 293 u32 rsvd2:4; 294 - u32 tokens_allowed:8; 294 + u32 rdbufs_allowed:8; 295 295 u32 rsvd3:4; 296 296 }; 297 297 u32 bits;
+21 -21
drivers/dma/idxd/sysfs.c
··· 99 99 100 100 /* Group attributes */ 101 101 102 - static void idxd_set_free_tokens(struct idxd_device *idxd) 102 + static void idxd_set_free_rdbufs(struct idxd_device *idxd) 103 103 { 104 - int i, tokens; 104 + int i, rdbufs; 105 105 106 - for (i = 0, tokens = 0; i < idxd->max_groups; i++) { 106 + for (i = 0, rdbufs = 0; i < idxd->max_groups; i++) { 107 107 struct idxd_group *g = idxd->groups[i]; 108 108 109 - tokens += g->tokens_reserved; 109 + rdbufs += g->rdbufs_reserved; 110 110 } 111 111 112 - idxd->nr_tokens = idxd->max_tokens - tokens; 112 + idxd->nr_rdbufs = idxd->max_rdbufs - rdbufs; 113 113 } 114 114 115 115 static ssize_t group_tokens_reserved_show(struct device *dev, ··· 118 118 { 119 119 struct idxd_group *group = confdev_to_group(dev); 120 120 121 - return sysfs_emit(buf, "%u\n", group->tokens_reserved); 121 + return sysfs_emit(buf, "%u\n", group->rdbufs_reserved); 122 122 } 123 123 124 124 static ssize_t group_tokens_reserved_store(struct device *dev, ··· 143 143 if (idxd->state == IDXD_DEV_ENABLED) 144 144 return -EPERM; 145 145 146 - if (val > idxd->max_tokens) 146 + if (val > idxd->max_rdbufs) 147 147 return -EINVAL; 148 148 149 - if (val > idxd->nr_tokens + group->tokens_reserved) 149 + if (val > idxd->nr_rdbufs + group->rdbufs_reserved) 150 150 return -EINVAL; 151 151 152 - group->tokens_reserved = val; 153 - idxd_set_free_tokens(idxd); 152 + group->rdbufs_reserved = val; 153 + idxd_set_free_rdbufs(idxd); 154 154 return count; 155 155 } 156 156 ··· 164 164 { 165 165 struct idxd_group *group = confdev_to_group(dev); 166 166 167 - return sysfs_emit(buf, "%u\n", group->tokens_allowed); 167 + return sysfs_emit(buf, "%u\n", group->rdbufs_allowed); 168 168 } 169 169 170 170 static ssize_t group_tokens_allowed_store(struct device *dev, ··· 190 190 return -EPERM; 191 191 192 192 if (val < 4 * group->num_engines || 193 - val > group->tokens_reserved + idxd->nr_tokens) 193 + val > group->rdbufs_reserved + idxd->nr_rdbufs) 194 194 return -EINVAL; 195 195 196 - group->tokens_allowed = val; 196 + group->rdbufs_allowed = val; 197 197 return count; 198 198 } 199 199 ··· 207 207 { 208 208 struct idxd_group *group = confdev_to_group(dev); 209 209 210 - return sysfs_emit(buf, "%u\n", group->use_token_limit); 210 + return sysfs_emit(buf, "%u\n", group->use_rdbuf_limit); 211 211 } 212 212 213 213 static ssize_t group_use_token_limit_store(struct device *dev, ··· 232 232 if (idxd->state == IDXD_DEV_ENABLED) 233 233 return -EPERM; 234 234 235 - if (idxd->token_limit == 0) 235 + if (idxd->rdbuf_limit == 0) 236 236 return -EPERM; 237 237 238 - group->use_token_limit = !!val; 238 + group->use_rdbuf_limit = !!val; 239 239 return count; 240 240 } 241 241 ··· 1197 1197 { 1198 1198 struct idxd_device *idxd = confdev_to_idxd(dev); 1199 1199 1200 - return sysfs_emit(buf, "%u\n", idxd->max_tokens); 1200 + return sysfs_emit(buf, "%u\n", idxd->max_rdbufs); 1201 1201 } 1202 1202 static DEVICE_ATTR_RO(max_tokens); 1203 1203 ··· 1206 1206 { 1207 1207 struct idxd_device *idxd = confdev_to_idxd(dev); 1208 1208 1209 - return sysfs_emit(buf, "%u\n", idxd->token_limit); 1209 + return sysfs_emit(buf, "%u\n", idxd->rdbuf_limit); 1210 1210 } 1211 1211 1212 1212 static ssize_t token_limit_store(struct device *dev, ··· 1227 1227 if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) 1228 1228 return -EPERM; 1229 1229 1230 - if (!idxd->hw.group_cap.token_limit) 1230 + if (!idxd->hw.group_cap.rdbuf_limit) 1231 1231 return -EPERM; 1232 1232 1233 - if (val > idxd->hw.group_cap.total_tokens) 1233 + if (val > idxd->hw.group_cap.total_rdbufs) 1234 1234 return -EINVAL; 1235 1235 1236 - idxd->token_limit = val; 1236 + idxd->rdbuf_limit = val; 1237 1237 return count; 1238 1238 } 1239 1239 static DEVICE_ATTR_RW(token_limit);