Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

ASoC: Intel: avs: Conditional path support

Merge series from Cezary Rojewski <cezary.rojewski@intel.com>:

The code presented here is a lighter version of what was initially
discussed in 2021 [1]. Later, we've had a more general discussion [2]
"what if the feature was part of the ASoC core instead?". The discussion
turned towards leaving a simplified version within the avs/, so here is
the final outcome.

What's removed when compared to the older [1] version?
- conditional path priorities
- conditional path overriding

The background:

Conditional path (condpath) helps facilitate modern audio usecases such
as Echo Cancellations and Noise Reduction. These are not invoked by the
means of userspace application opening an endpoint (FrontEnd) but are a
"side effect" of selected PCMs running simultaneously e.g.: if both
Speaker (source) and Microphone Array (sink) are running, reference
data from the Speaker and take it into account when processing capture
for better voice command detection ratio.

Which PCMs are needed for given conditional path to be spawned is
determinated by the driver when registering the condpath.

Two patches implement the feature:

1) update the topology parser to acknowledge the new tuples
2) update the path-creation and functions that participate in the
PCM-runtime e.g.: run/pause.

The runtime update is needed to keep streaming sane - all the path
operations shall be synchronized when a condpath is being spawned or its
status is about to change.

[1]: https://lore.kernel.org/alsa-devel/20211208111301.1817725-21-cezary.rojewski@intel.com/
[2]: https://lore.kernel.org/linux-sound/20240821101816.1494541-1-cezary.rojewski@intel.com/

+413
+15
include/uapi/sound/intel/avs/tokens.h
··· 133 133 AVS_TKN_PATH_FE_FMT_ID_U32 = 1902, 134 134 AVS_TKN_PATH_BE_FMT_ID_U32 = 1903, 135 135 136 + /* struct avs_tplg_path_template (conditional) */ 137 + AVS_TKN_CONDPATH_TMPL_ID_U32 = 1801, 138 + AVS_TKN_CONDPATH_TMPL_SOURCE_TPLG_NAME_STRING = 2002, 139 + AVS_TKN_CONDPATH_TMPL_SOURCE_PATH_TMPL_ID_U32 = 2003, 140 + AVS_TKN_CONDPATH_TMPL_SINK_TPLG_NAME_STRING = 2004, 141 + AVS_TKN_CONDPATH_TMPL_SINK_PATH_TMPL_ID_U32 = 2005, 142 + AVS_TKN_CONDPATH_TMPL_COND_TYPE_U32 = 2006, 143 + AVS_TKN_CONDPATH_TMPL_OVERRIDABLE_BOOL = 2007, 144 + AVS_TKN_CONDPATH_TMPL_PRIORITY_U8 = 2008, 145 + 146 + /* struct avs_tplg_path (conditional) */ 147 + AVS_TKN_CONDPATH_ID_U32 = 1901, 148 + AVS_TKN_CONDPATH_SOURCE_PATH_ID_U32 = 2102, 149 + AVS_TKN_CONDPATH_SINK_PATH_ID_U32 = 2103, 150 + 136 151 /* struct avs_tplg_pin_format */ 137 152 AVS_TKN_PIN_FMT_INDEX_U32 = 2201, 138 153 AVS_TKN_PIN_FMT_IOBS_U32 = 2202,
+267
sound/soc/intel/avs/path.c
··· 115 115 return NULL; 116 116 } 117 117 118 + static struct avs_tplg_path *avs_condpath_find_variant(struct avs_dev *adev, 119 + struct avs_tplg_path_template *template, 120 + struct avs_path *source, 121 + struct avs_path *sink) 122 + { 123 + struct avs_tplg_path *variant; 124 + 125 + list_for_each_entry(variant, &template->path_list, node) { 126 + if (variant->source_path_id == source->template->id && 127 + variant->sink_path_id == sink->template->id) 128 + return variant; 129 + } 130 + 131 + return NULL; 132 + } 133 + 134 + static bool avs_tplg_path_template_id_equal(struct avs_tplg_path_template_id *id, 135 + struct avs_tplg_path_template_id *id2) 136 + { 137 + return id->id == id2->id && !strcmp(id->tplg_name, id2->tplg_name); 138 + } 139 + 140 + static struct avs_path *avs_condpath_find_match(struct avs_dev *adev, 141 + struct avs_tplg_path_template *template, 142 + struct avs_path *path, int dir) 143 + { 144 + struct avs_tplg_path_template_id *id, *id2; 145 + 146 + if (dir) { 147 + id = &template->source; 148 + id2 = &template->sink; 149 + } else { 150 + id = &template->sink; 151 + id2 = &template->source; 152 + } 153 + 154 + /* Check whether this path is either source or sink of condpath template. */ 155 + if (id->id != path->template->owner->id || 156 + strcmp(id->tplg_name, path->template->owner->owner->name)) 157 + return NULL; 158 + 159 + /* Unidirectional condpaths are allowed. */ 160 + if (avs_tplg_path_template_id_equal(id, id2)) 161 + return path; 162 + 163 + /* Now find the counterpart. */ 164 + return avs_path_find_path(adev, id2->tplg_name, id2->id); 165 + } 166 + 118 167 static struct acpi_nhlt_config * 119 168 avs_nhlt_config_or_default(struct avs_dev *adev, struct avs_tplg_module *t); 120 169 ··· 1100 1051 path->dma_id = dma_id; 1101 1052 INIT_LIST_HEAD(&path->ppl_list); 1102 1053 INIT_LIST_HEAD(&path->node); 1054 + INIT_LIST_HEAD(&path->source_list); 1055 + INIT_LIST_HEAD(&path->sink_list); 1056 + INIT_LIST_HEAD(&path->source_node); 1057 + INIT_LIST_HEAD(&path->sink_node); 1103 1058 1104 1059 /* create all the pipelines */ 1105 1060 list_for_each_entry(tppl, &template->ppl_list, node) { ··· 1187 1134 return ERR_PTR(ret); 1188 1135 } 1189 1136 1137 + static void avs_condpath_free(struct avs_dev *adev, struct avs_path *path) 1138 + { 1139 + int ret; 1140 + 1141 + list_del(&path->source_node); 1142 + list_del(&path->sink_node); 1143 + 1144 + ret = avs_path_reset(path); 1145 + if (ret < 0) 1146 + dev_err(adev->dev, "reset condpath failed: %d\n", ret); 1147 + 1148 + ret = avs_path_unbind(path); 1149 + if (ret < 0) 1150 + dev_err(adev->dev, "unbind condpath failed: %d\n", ret); 1151 + 1152 + avs_path_free_unlocked(path); 1153 + } 1154 + 1155 + static struct avs_path *avs_condpath_create(struct avs_dev *adev, 1156 + struct avs_tplg_path *template, 1157 + struct avs_path *source, 1158 + struct avs_path *sink) 1159 + { 1160 + struct avs_path *path; 1161 + int ret; 1162 + 1163 + path = avs_path_create_unlocked(adev, 0, template); 1164 + if (IS_ERR(path)) 1165 + return path; 1166 + 1167 + ret = avs_path_bind(path); 1168 + if (ret) 1169 + goto err_bind; 1170 + 1171 + ret = avs_path_reset(path); 1172 + if (ret) 1173 + goto err_reset; 1174 + 1175 + path->source = source; 1176 + path->sink = sink; 1177 + list_add_tail(&path->source_node, &source->source_list); 1178 + list_add_tail(&path->sink_node, &sink->sink_list); 1179 + 1180 + return path; 1181 + 1182 + err_reset: 1183 + avs_path_unbind(path); 1184 + err_bind: 1185 + avs_path_free_unlocked(path); 1186 + return ERR_PTR(ret); 1187 + } 1188 + 1189 + static int avs_condpaths_walk(struct avs_dev *adev, struct avs_path *path, int dir) 1190 + { 1191 + struct avs_soc_component *acomp; 1192 + struct avs_path *source, *sink; 1193 + struct avs_path **other; 1194 + 1195 + if (dir) { 1196 + source = path; 1197 + other = &sink; 1198 + } else { 1199 + sink = path; 1200 + other = &source; 1201 + } 1202 + 1203 + list_for_each_entry(acomp, &adev->comp_list, node) { 1204 + for (int i = 0; i < acomp->tplg->num_condpath_tmpls; i++) { 1205 + struct avs_tplg_path_template *template; 1206 + struct avs_tplg_path *variant; 1207 + struct avs_path *cpath; 1208 + 1209 + template = &acomp->tplg->condpath_tmpls[i]; 1210 + 1211 + /* Do not create unidirectional condpaths twice. */ 1212 + if (avs_tplg_path_template_id_equal(&template->source, 1213 + &template->sink) && dir) 1214 + continue; 1215 + 1216 + *other = avs_condpath_find_match(adev, template, path, dir); 1217 + if (!*other) 1218 + continue; 1219 + 1220 + variant = avs_condpath_find_variant(adev, template, source, sink); 1221 + if (!variant) 1222 + continue; 1223 + 1224 + cpath = avs_condpath_create(adev, variant, source, sink); 1225 + if (IS_ERR(cpath)) 1226 + return PTR_ERR(cpath); 1227 + } 1228 + } 1229 + 1230 + return 0; 1231 + } 1232 + 1233 + /* Caller responsible for holding adev->path_mutex. */ 1234 + static int avs_condpaths_walk_all(struct avs_dev *adev, struct avs_path *path) 1235 + { 1236 + int ret; 1237 + 1238 + ret = avs_condpaths_walk(adev, path, SNDRV_PCM_STREAM_CAPTURE); 1239 + if (ret) 1240 + return ret; 1241 + 1242 + return avs_condpaths_walk(adev, path, SNDRV_PCM_STREAM_PLAYBACK); 1243 + } 1244 + 1190 1245 void avs_path_free(struct avs_path *path) 1191 1246 { 1247 + struct avs_path *cpath, *csave; 1192 1248 struct avs_dev *adev = path->owner; 1193 1249 1194 1250 mutex_lock(&adev->path_mutex); 1251 + 1252 + /* Free all condpaths this path spawned. */ 1253 + list_for_each_entry_safe(cpath, csave, &path->source_list, source_node) 1254 + avs_condpath_free(path->owner, cpath); 1255 + list_for_each_entry_safe(cpath, csave, &path->sink_list, sink_node) 1256 + avs_condpath_free(path->owner, cpath); 1257 + 1195 1258 avs_path_free_unlocked(path); 1259 + 1196 1260 mutex_unlock(&adev->path_mutex); 1197 1261 } 1198 1262 ··· 1320 1150 { 1321 1151 struct avs_tplg_path *variant; 1322 1152 struct avs_path *path; 1153 + int ret; 1323 1154 1324 1155 variant = avs_path_find_variant(adev, template, fe_params, be_params); 1325 1156 if (!variant) { ··· 1334 1163 mutex_lock(&adev->comp_list_mutex); 1335 1164 1336 1165 path = avs_path_create_unlocked(adev, dma_id, variant); 1166 + if (IS_ERR(path)) 1167 + goto exit; 1337 1168 1169 + ret = avs_condpaths_walk_all(adev, path); 1170 + if (ret) { 1171 + avs_path_free_unlocked(path); 1172 + path = ERR_PTR(ret); 1173 + } 1174 + 1175 + exit: 1338 1176 mutex_unlock(&adev->comp_list_mutex); 1339 1177 mutex_unlock(&adev->path_mutex); 1340 1178 ··· 1466 1286 return 0; 1467 1287 } 1468 1288 1289 + static int avs_condpath_pause(struct avs_dev *adev, struct avs_path *cpath) 1290 + { 1291 + struct avs_path_pipeline *ppl; 1292 + int ret; 1293 + 1294 + if (cpath->state == AVS_PPL_STATE_PAUSED) 1295 + return 0; 1296 + 1297 + list_for_each_entry_reverse(ppl, &cpath->ppl_list, node) { 1298 + ret = avs_ipc_set_pipeline_state(adev, ppl->instance_id, AVS_PPL_STATE_PAUSED); 1299 + if (ret) { 1300 + dev_err(adev->dev, "pause cpath failed: %d\n", ret); 1301 + cpath->state = AVS_PPL_STATE_INVALID; 1302 + return AVS_IPC_RET(ret); 1303 + } 1304 + } 1305 + 1306 + cpath->state = AVS_PPL_STATE_PAUSED; 1307 + return 0; 1308 + } 1309 + 1310 + static void avs_condpaths_pause(struct avs_dev *adev, struct avs_path *path) 1311 + { 1312 + struct avs_path *cpath; 1313 + 1314 + mutex_lock(&adev->path_mutex); 1315 + 1316 + /* If either source or sink stops, so do the attached conditional paths. */ 1317 + list_for_each_entry(cpath, &path->source_list, source_node) 1318 + avs_condpath_pause(adev, cpath); 1319 + list_for_each_entry(cpath, &path->sink_list, sink_node) 1320 + avs_condpath_pause(adev, cpath); 1321 + 1322 + mutex_unlock(&adev->path_mutex); 1323 + } 1324 + 1469 1325 int avs_path_pause(struct avs_path *path) 1470 1326 { 1471 1327 struct avs_path_pipeline *ppl; ··· 1510 1294 1511 1295 if (path->state == AVS_PPL_STATE_PAUSED) 1512 1296 return 0; 1297 + 1298 + avs_condpaths_pause(adev, path); 1513 1299 1514 1300 list_for_each_entry_reverse(ppl, &path->ppl_list, node) { 1515 1301 ret = avs_ipc_set_pipeline_state(adev, ppl->instance_id, ··· 1525 1307 1526 1308 path->state = AVS_PPL_STATE_PAUSED; 1527 1309 return 0; 1310 + } 1311 + 1312 + static int avs_condpath_run(struct avs_dev *adev, struct avs_path *cpath, int trigger) 1313 + { 1314 + struct avs_path_pipeline *ppl; 1315 + int ret; 1316 + 1317 + if (cpath->state == AVS_PPL_STATE_RUNNING) 1318 + return 0; 1319 + 1320 + list_for_each_entry(ppl, &cpath->ppl_list, node) { 1321 + if (ppl->template->cfg->trigger != trigger) 1322 + continue; 1323 + 1324 + ret = avs_ipc_set_pipeline_state(adev, ppl->instance_id, AVS_PPL_STATE_RUNNING); 1325 + if (ret) { 1326 + dev_err(adev->dev, "run cpath failed: %d\n", ret); 1327 + cpath->state = AVS_PPL_STATE_INVALID; 1328 + return AVS_IPC_RET(ret); 1329 + } 1330 + } 1331 + 1332 + cpath->state = AVS_PPL_STATE_RUNNING; 1333 + return 0; 1334 + } 1335 + 1336 + static void avs_condpaths_run(struct avs_dev *adev, struct avs_path *path, int trigger) 1337 + { 1338 + struct avs_path *cpath; 1339 + 1340 + mutex_lock(&adev->path_mutex); 1341 + 1342 + /* Run conditional paths only if source and sink are both running. */ 1343 + list_for_each_entry(cpath, &path->source_list, source_node) 1344 + if (cpath->source->state == AVS_PPL_STATE_RUNNING && 1345 + cpath->sink->state == AVS_PPL_STATE_RUNNING) 1346 + avs_condpath_run(adev, cpath, trigger); 1347 + 1348 + list_for_each_entry(cpath, &path->sink_list, sink_node) 1349 + if (cpath->source->state == AVS_PPL_STATE_RUNNING && 1350 + cpath->sink->state == AVS_PPL_STATE_RUNNING) 1351 + avs_condpath_run(adev, cpath, trigger); 1352 + 1353 + mutex_unlock(&adev->path_mutex); 1528 1354 } 1529 1355 1530 1356 int avs_path_run(struct avs_path *path, int trigger) ··· 1594 1332 } 1595 1333 1596 1334 path->state = AVS_PPL_STATE_RUNNING; 1335 + 1336 + /* Granular pipeline triggering not intended for conditional paths. */ 1337 + if (trigger == AVS_TPLG_TRIGGER_AUTO) 1338 + avs_condpaths_run(adev, path, trigger); 1339 + 1597 1340 return 0; 1598 1341 }
+13
sound/soc/intel/avs/path.h
··· 13 13 #include "avs.h" 14 14 #include "topology.h" 15 15 16 + #define AVS_COND_TYPE_NONE 0 17 + #define AVS_COND_TYPE_AECREF 1 18 + 16 19 struct avs_path { 17 20 u32 dma_id; 18 21 struct list_head ppl_list; 19 22 u32 state; 23 + 24 + /* condpath navigation for standard paths */ 25 + struct list_head source_list; 26 + struct list_head sink_list; 27 + 28 + /* conditional path fields */ 29 + struct avs_path *source; 30 + struct avs_path *sink; 31 + struct list_head source_node; 32 + struct list_head sink_node; 20 33 21 34 struct avs_tplg_path *template; 22 35 struct avs_dev *owner;
+110
sound/soc/intel/avs/topology.c
··· 1387 1387 }, 1388 1388 }; 1389 1389 1390 + static const struct avs_tplg_token_parser condpath_parsers[] = { 1391 + { 1392 + .token = AVS_TKN_CONDPATH_ID_U32, 1393 + .type = SND_SOC_TPLG_TUPLE_TYPE_WORD, 1394 + .offset = offsetof(struct avs_tplg_path, id), 1395 + .parse = avs_parse_word_token, 1396 + }, 1397 + { 1398 + .token = AVS_TKN_CONDPATH_SOURCE_PATH_ID_U32, 1399 + .type = SND_SOC_TPLG_TUPLE_TYPE_WORD, 1400 + .offset = offsetof(struct avs_tplg_path, source_path_id), 1401 + .parse = avs_parse_word_token, 1402 + }, 1403 + { 1404 + .token = AVS_TKN_CONDPATH_SINK_PATH_ID_U32, 1405 + .type = SND_SOC_TPLG_TUPLE_TYPE_WORD, 1406 + .offset = offsetof(struct avs_tplg_path, sink_path_id), 1407 + .parse = avs_parse_word_token, 1408 + }, 1409 + }; 1410 + 1390 1411 static struct avs_tplg_path * 1391 1412 avs_tplg_path_create(struct snd_soc_component *comp, struct avs_tplg_path_template *owner, 1392 1413 struct snd_soc_tplg_vendor_array *tuples, u32 block_size, ··· 1471 1450 .token = AVS_TKN_PATH_TMPL_ID_U32, 1472 1451 .type = SND_SOC_TPLG_TUPLE_TYPE_WORD, 1473 1452 .offset = offsetof(struct avs_tplg_path_template, id), 1453 + .parse = avs_parse_word_token, 1454 + }, 1455 + }; 1456 + 1457 + static const struct avs_tplg_token_parser condpath_tmpl_parsers[] = { 1458 + { 1459 + .token = AVS_TKN_CONDPATH_TMPL_ID_U32, 1460 + .type = SND_SOC_TPLG_TUPLE_TYPE_WORD, 1461 + .offset = offsetof(struct avs_tplg_path_template, id), 1462 + .parse = avs_parse_word_token, 1463 + }, 1464 + { 1465 + .token = AVS_TKN_CONDPATH_TMPL_SOURCE_TPLG_NAME_STRING, 1466 + .type = SND_SOC_TPLG_TUPLE_TYPE_STRING, 1467 + .offset = offsetof(struct avs_tplg_path_template, source.tplg_name), 1468 + .parse = avs_parse_string_token, 1469 + }, 1470 + { 1471 + .token = AVS_TKN_CONDPATH_TMPL_SOURCE_PATH_TMPL_ID_U32, 1472 + .type = SND_SOC_TPLG_TUPLE_TYPE_WORD, 1473 + .offset = offsetof(struct avs_tplg_path_template, source.id), 1474 + .parse = avs_parse_word_token, 1475 + }, 1476 + { 1477 + .token = AVS_TKN_CONDPATH_TMPL_SINK_TPLG_NAME_STRING, 1478 + .type = SND_SOC_TPLG_TUPLE_TYPE_STRING, 1479 + .offset = offsetof(struct avs_tplg_path_template, sink.tplg_name), 1480 + .parse = avs_parse_string_token, 1481 + }, 1482 + { 1483 + .token = AVS_TKN_CONDPATH_TMPL_SINK_PATH_TMPL_ID_U32, 1484 + .type = SND_SOC_TPLG_TUPLE_TYPE_WORD, 1485 + .offset = offsetof(struct avs_tplg_path_template, sink.id), 1474 1486 .parse = avs_parse_word_token, 1475 1487 }, 1476 1488 }; ··· 1576 1522 return ERR_PTR(ret); 1577 1523 1578 1524 return template; 1525 + } 1526 + 1527 + static int avs_tplg_parse_condpath_templates(struct snd_soc_component *comp, 1528 + struct snd_soc_tplg_vendor_array *tuples, 1529 + u32 block_size) 1530 + { 1531 + struct avs_soc_component *acomp = to_avs_soc_component(comp); 1532 + struct avs_tplg *tplg = acomp->tplg; 1533 + int ret, i; 1534 + 1535 + ret = parse_dictionary_header(comp, tuples, (void **)&tplg->condpath_tmpls, 1536 + &tplg->num_condpath_tmpls, 1537 + sizeof(*tplg->condpath_tmpls), 1538 + AVS_TKN_MANIFEST_NUM_CONDPATH_TMPLS_U32); 1539 + if (ret) 1540 + return ret; 1541 + 1542 + block_size -= le32_to_cpu(tuples->size); 1543 + /* With header parsed, move on to parsing entries. */ 1544 + tuples = avs_tplg_vendor_array_next(tuples); 1545 + 1546 + for (i = 0; i < tplg->num_condpath_tmpls; i++) { 1547 + struct avs_tplg_path_template *template; 1548 + u32 esize; 1549 + 1550 + template = &tplg->condpath_tmpls[i]; 1551 + template->owner = tplg; /* Used when building sysfs hierarchy. */ 1552 + INIT_LIST_HEAD(&template->path_list); 1553 + INIT_LIST_HEAD(&template->node); 1554 + 1555 + ret = avs_tplg_vendor_entry_size(tuples, block_size, 1556 + AVS_TKN_CONDPATH_TMPL_ID_U32, &esize); 1557 + if (ret) 1558 + return ret; 1559 + 1560 + ret = parse_path_template(comp, tuples, esize, template, 1561 + condpath_tmpl_parsers, 1562 + ARRAY_SIZE(condpath_tmpl_parsers), 1563 + condpath_parsers, 1564 + ARRAY_SIZE(condpath_parsers)); 1565 + if (ret < 0) { 1566 + dev_err(comp->dev, "parse condpath_tmpl: %d failed: %d\n", i, ret); 1567 + return ret; 1568 + } 1569 + 1570 + block_size -= esize; 1571 + tuples = avs_tplg_vendor_array_at(tuples, esize); 1572 + } 1573 + 1574 + return 0; 1579 1575 } 1580 1576 1581 1577 static const struct avs_tplg_token_parser mod_init_config_parsers[] = { ··· 1994 1890 dev_err(comp->dev, "init config lookup failed: %d\n", ret); 1995 1891 return ret; 1996 1892 } 1893 + 1894 + /* Condpaths dictionary. */ 1895 + ret = avs_tplg_parse_condpath_templates(comp, tuples, 1896 + has_init_config ? offset : remaining); 1897 + if (ret < 0) 1898 + return ret; 1997 1899 1998 1900 if (!has_init_config) 1999 1901 return 0;
+8
sound/soc/intel/avs/topology.h
··· 33 33 u32 num_pplcfgs; 34 34 struct avs_tplg_binding *bindings; 35 35 u32 num_bindings; 36 + struct avs_tplg_path_template *condpath_tmpls; 36 37 u32 num_condpath_tmpls; 37 38 struct avs_tplg_init_config *init_configs; 38 39 u32 num_init_configs; ··· 156 155 157 156 struct snd_soc_dapm_widget *w; 158 157 158 + /* Conditional path. */ 159 + struct avs_tplg_path_template_id source; 160 + struct avs_tplg_path_template_id sink; 161 + 159 162 struct list_head path_list; 160 163 161 164 struct avs_tplg *owner; ··· 181 176 /* Path format requirements. */ 182 177 struct avs_audio_format *fe_fmt; 183 178 struct avs_audio_format *be_fmt; 179 + /* Condpath path-variant requirements. */ 180 + u32 source_path_id; 181 + u32 sink_path_id; 184 182 185 183 struct list_head ppl_list; 186 184