Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge tag 'coresight-next-v6.12' of ssh://gitolite.kernel.org/pub/scm/linux/kernel/git/coresight/linux into char-misc-next

Suzuki writes:

coresight: updates for Linux v6.12

CoreSight/hwtracing subsystem updates targeting Linux v6.12:
- Miscellaneous fixes and cleanups
- TraceID allocation per sink, allowing system with > 110 cores for
perf tracing.

Signed-off-by: Suzuki K Poulose <suzuki.poulose@arm.com>

* tag 'coresight-next-v6.12' of ssh://gitolite.kernel.org/pub/scm/linux/kernel/git/coresight/linux:
coresight: Make trace ID map spinlock local to the map
coresight: Emit sink ID in the HW_ID packets
coresight: Remove pending trace ID release mechanism
coresight: Use per-sink trace ID maps for Perf sessions
coresight: Make CPU id map a property of a trace ID map
coresight: Expose map arguments in trace ID API
coresight: Move struct coresight_trace_id_map to common header
coresight: Clarify comments around the PID of the sink owner
coresight: Remove unused ETM Perf stubs
coresight: tmc: sg: Do not leak sg_table
Coresight: Set correct cs_mode for dummy source to fix disable issue
Coresight: Set correct cs_mode for TPDM to fix disable issue
coresight: cti: use device_* to iterate over device child nodes

+221 -190
+25 -12
drivers/hwtracing/coresight/coresight-core.c
··· 487 487 return csdev; 488 488 } 489 489 490 + u32 coresight_get_sink_id(struct coresight_device *csdev) 491 + { 492 + if (!csdev->ea) 493 + return 0; 494 + 495 + /* 496 + * See function etm_perf_add_symlink_sink() to know where 497 + * this comes from. 498 + */ 499 + return (u32) (unsigned long) csdev->ea->var; 500 + } 501 + 490 502 static int coresight_sink_by_id(struct device *dev, const void *data) 491 503 { 492 504 struct coresight_device *csdev = to_coresight_device(dev); 493 - unsigned long hash; 494 505 495 506 if (csdev->type == CORESIGHT_DEV_TYPE_SINK || 496 - csdev->type == CORESIGHT_DEV_TYPE_LINKSINK) { 497 - 498 - if (!csdev->ea) 499 - return 0; 500 - /* 501 - * See function etm_perf_add_symlink_sink() to know where 502 - * this comes from. 503 - */ 504 - hash = (unsigned long)csdev->ea->var; 505 - 506 - if ((u32)hash == *(u32 *)data) 507 + csdev->type == CORESIGHT_DEV_TYPE_LINKSINK) { 508 + if (coresight_get_sink_id(csdev) == *(u32 *)data) 507 509 return 1; 508 510 } 509 511 ··· 904 902 struct coresight_device *csdev = to_coresight_device(dev); 905 903 906 904 fwnode_handle_put(csdev->dev.fwnode); 905 + free_percpu(csdev->perf_sink_id_map.cpu_map); 907 906 kfree(csdev); 908 907 } 909 908 ··· 1162 1159 csdev->dev.fwnode = fwnode_handle_get(dev_fwnode(desc->dev)); 1163 1160 dev_set_name(&csdev->dev, "%s", desc->name); 1164 1161 1162 + if (csdev->type == CORESIGHT_DEV_TYPE_SINK || 1163 + csdev->type == CORESIGHT_DEV_TYPE_LINKSINK) { 1164 + spin_lock_init(&csdev->perf_sink_id_map.lock); 1165 + csdev->perf_sink_id_map.cpu_map = alloc_percpu(atomic_t); 1166 + if (!csdev->perf_sink_id_map.cpu_map) { 1167 + kfree(csdev); 1168 + ret = -ENOMEM; 1169 + goto err_out; 1170 + } 1171 + } 1165 1172 /* 1166 1173 * Make sure the device registration and the connection fixup 1167 1174 * are synchronised, so that we don't see uninitialised devices
+3 -7
drivers/hwtracing/coresight/coresight-cti-platform.c
··· 416 416 struct cti_drvdata *drvdata) 417 417 { 418 418 int rc = 0; 419 - struct fwnode_handle *fwnode = dev_fwnode(dev); 420 - struct fwnode_handle *child = NULL; 421 419 422 - if (IS_ERR_OR_NULL(fwnode)) 420 + if (IS_ERR_OR_NULL(dev_fwnode(dev))) 423 421 return -EINVAL; 424 422 425 - fwnode_for_each_child_node(fwnode, child) { 423 + device_for_each_child_node_scoped(dev, child) { 426 424 if (cti_plat_node_name_eq(child, CTI_DT_CONNS)) 427 - rc = cti_plat_create_connection(dev, drvdata, 428 - child); 425 + rc = cti_plat_create_connection(dev, drvdata, child); 429 426 if (rc != 0) 430 427 break; 431 428 } 432 - fwnode_handle_put(child); 433 429 434 430 return rc; 435 431 }
+6 -1
drivers/hwtracing/coresight/coresight-dummy.c
··· 21 21 DEFINE_CORESIGHT_DEVLIST(sink_devs, "dummy_sink"); 22 22 23 23 static int dummy_source_enable(struct coresight_device *csdev, 24 - struct perf_event *event, enum cs_mode mode) 24 + struct perf_event *event, enum cs_mode mode, 25 + __maybe_unused struct coresight_trace_id_map *id_map) 25 26 { 27 + if (!coresight_take_mode(csdev, mode)) 28 + return -EBUSY; 29 + 26 30 dev_dbg(csdev->dev.parent, "Dummy source enabled\n"); 27 31 28 32 return 0; ··· 35 31 static void dummy_source_disable(struct coresight_device *csdev, 36 32 struct perf_event *event) 37 33 { 34 + coresight_set_mode(csdev, CS_MODE_DISABLED); 38 35 dev_dbg(csdev->dev.parent, "Dummy source disabled\n"); 39 36 } 40 37
+31 -16
drivers/hwtracing/coresight/coresight-etm-perf.c
··· 229 229 struct list_head **ppath; 230 230 231 231 ppath = etm_event_cpu_path_ptr(event_data, cpu); 232 - if (!(IS_ERR_OR_NULL(*ppath))) 233 - coresight_release_path(*ppath); 234 - *ppath = NULL; 235 - coresight_trace_id_put_cpu_id(cpu); 236 - } 232 + if (!(IS_ERR_OR_NULL(*ppath))) { 233 + struct coresight_device *sink = coresight_get_sink(*ppath); 237 234 238 - /* mark perf event as done for trace id allocator */ 239 - coresight_trace_id_perf_stop(); 235 + /* 236 + * Mark perf event as done for trace id allocator, but don't call 237 + * coresight_trace_id_put_cpu_id_map() on individual IDs. Perf sessions 238 + * never free trace IDs to ensure that the ID associated with a CPU 239 + * cannot change during their and other's concurrent sessions. Instead, 240 + * a refcount is used so that the last event to call 241 + * coresight_trace_id_perf_stop() frees all IDs. 242 + */ 243 + coresight_trace_id_perf_stop(&sink->perf_sink_id_map); 244 + 245 + coresight_release_path(*ppath); 246 + } 247 + *ppath = NULL; 248 + } 240 249 241 250 free_percpu(event_data->path); 242 251 kfree(event_data); ··· 334 325 sink = user_sink = coresight_get_sink_by_id(id); 335 326 } 336 327 337 - /* tell the trace ID allocator that a perf event is starting up */ 338 - coresight_trace_id_perf_start(); 339 - 340 328 /* check if user wants a coresight configuration selected */ 341 329 cfg_hash = (u32)((event->attr.config2 & GENMASK_ULL(63, 32)) >> 32); 342 330 if (cfg_hash) { ··· 407 401 } 408 402 409 403 /* ensure we can allocate a trace ID for this CPU */ 410 - trace_id = coresight_trace_id_get_cpu_id(cpu); 404 + trace_id = coresight_trace_id_get_cpu_id_map(cpu, &sink->perf_sink_id_map); 411 405 if (!IS_VALID_CS_TRACE_ID(trace_id)) { 412 406 cpumask_clear_cpu(cpu, mask); 413 407 coresight_release_path(path); 414 408 continue; 415 409 } 416 410 411 + coresight_trace_id_perf_start(&sink->perf_sink_id_map); 417 412 *etm_event_cpu_path_ptr(event_data, cpu) = path; 418 413 } 419 414 ··· 460 453 struct coresight_device *sink, *csdev = per_cpu(csdev_src, cpu); 461 454 struct list_head *path; 462 455 u64 hw_id; 456 + u8 trace_id; 463 457 464 458 if (!csdev) 465 459 goto fail; ··· 503 495 goto fail_end_stop; 504 496 505 497 /* Finally enable the tracer */ 506 - if (source_ops(csdev)->enable(csdev, event, CS_MODE_PERF)) 498 + if (source_ops(csdev)->enable(csdev, event, CS_MODE_PERF, 499 + &sink->perf_sink_id_map)) 507 500 goto fail_disable_path; 508 501 509 502 /* ··· 513 504 */ 514 505 if (!cpumask_test_cpu(cpu, &event_data->aux_hwid_done)) { 515 506 cpumask_set_cpu(cpu, &event_data->aux_hwid_done); 516 - hw_id = FIELD_PREP(CS_AUX_HW_ID_VERSION_MASK, 517 - CS_AUX_HW_ID_CURR_VERSION); 518 - hw_id |= FIELD_PREP(CS_AUX_HW_ID_TRACE_ID_MASK, 519 - coresight_trace_id_read_cpu_id(cpu)); 507 + 508 + trace_id = coresight_trace_id_read_cpu_id_map(cpu, &sink->perf_sink_id_map); 509 + 510 + hw_id = FIELD_PREP(CS_AUX_HW_ID_MAJOR_VERSION_MASK, 511 + CS_AUX_HW_ID_MAJOR_VERSION); 512 + hw_id |= FIELD_PREP(CS_AUX_HW_ID_MINOR_VERSION_MASK, 513 + CS_AUX_HW_ID_MINOR_VERSION); 514 + hw_id |= FIELD_PREP(CS_AUX_HW_ID_TRACE_ID_MASK, trace_id); 515 + hw_id |= FIELD_PREP(CS_AUX_HW_ID_SINK_ID_MASK, coresight_get_sink_id(sink)); 516 + 520 517 perf_report_aux_output_id(event, hw_id); 521 518 } 522 519
-18
drivers/hwtracing/coresight/coresight-etm-perf.h
··· 62 62 struct list_head * __percpu *path; 63 63 }; 64 64 65 - #if IS_ENABLED(CONFIG_CORESIGHT) 66 65 int etm_perf_symlink(struct coresight_device *csdev, bool link); 67 66 int etm_perf_add_symlink_sink(struct coresight_device *csdev); 68 67 void etm_perf_del_symlink_sink(struct coresight_device *csdev); ··· 76 77 int etm_perf_add_symlink_cscfg(struct device *dev, 77 78 struct cscfg_config_desc *config_desc); 78 79 void etm_perf_del_symlink_cscfg(struct cscfg_config_desc *config_desc); 79 - #else 80 - static inline int etm_perf_symlink(struct coresight_device *csdev, bool link) 81 - { return -EINVAL; } 82 - int etm_perf_add_symlink_sink(struct coresight_device *csdev) 83 - { return -EINVAL; } 84 - void etm_perf_del_symlink_sink(struct coresight_device *csdev) {} 85 - static inline void *etm_perf_sink_config(struct perf_output_handle *handle) 86 - { 87 - return NULL; 88 - } 89 - int etm_perf_add_symlink_cscfg(struct device *dev, 90 - struct cscfg_config_desc *config_desc) 91 - { return -EINVAL; } 92 - void etm_perf_del_symlink_cscfg(struct cscfg_config_desc *config_desc) {} 93 - 94 - #endif /* CONFIG_CORESIGHT */ 95 - 96 80 int __init etm_perf_init(void); 97 81 void etm_perf_exit(void); 98 82
+5 -4
drivers/hwtracing/coresight/coresight-etm3x-core.c
··· 481 481 } 482 482 483 483 static int etm_enable_perf(struct coresight_device *csdev, 484 - struct perf_event *event) 484 + struct perf_event *event, 485 + struct coresight_trace_id_map *id_map) 485 486 { 486 487 struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); 487 488 int trace_id; ··· 501 500 * with perf locks - we know the ID cannot change until perf shuts down 502 501 * the session 503 502 */ 504 - trace_id = coresight_trace_id_read_cpu_id(drvdata->cpu); 503 + trace_id = coresight_trace_id_read_cpu_id_map(drvdata->cpu, id_map); 505 504 if (!IS_VALID_CS_TRACE_ID(trace_id)) { 506 505 dev_err(&drvdata->csdev->dev, "Failed to set trace ID for %s on CPU%d\n", 507 506 dev_name(&drvdata->csdev->dev), drvdata->cpu); ··· 554 553 } 555 554 556 555 static int etm_enable(struct coresight_device *csdev, struct perf_event *event, 557 - enum cs_mode mode) 556 + enum cs_mode mode, struct coresight_trace_id_map *id_map) 558 557 { 559 558 int ret; 560 559 struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); ··· 569 568 ret = etm_enable_sysfs(csdev); 570 569 break; 571 570 case CS_MODE_PERF: 572 - ret = etm_enable_perf(csdev, event); 571 + ret = etm_enable_perf(csdev, event, id_map); 573 572 break; 574 573 default: 575 574 ret = -EINVAL;
+5 -4
drivers/hwtracing/coresight/coresight-etm4x-core.c
··· 752 752 } 753 753 754 754 static int etm4_enable_perf(struct coresight_device *csdev, 755 - struct perf_event *event) 755 + struct perf_event *event, 756 + struct coresight_trace_id_map *id_map) 756 757 { 757 758 int ret = 0, trace_id; 758 759 struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); ··· 776 775 * with perf locks - we know the ID cannot change until perf shuts down 777 776 * the session 778 777 */ 779 - trace_id = coresight_trace_id_read_cpu_id(drvdata->cpu); 778 + trace_id = coresight_trace_id_read_cpu_id_map(drvdata->cpu, id_map); 780 779 if (!IS_VALID_CS_TRACE_ID(trace_id)) { 781 780 dev_err(&drvdata->csdev->dev, "Failed to set trace ID for %s on CPU%d\n", 782 781 dev_name(&drvdata->csdev->dev), drvdata->cpu); ··· 838 837 } 839 838 840 839 static int etm4_enable(struct coresight_device *csdev, struct perf_event *event, 841 - enum cs_mode mode) 840 + enum cs_mode mode, struct coresight_trace_id_map *id_map) 842 841 { 843 842 int ret; 844 843 ··· 852 851 ret = etm4_enable_sysfs(csdev); 853 852 break; 854 853 case CS_MODE_PERF: 855 - ret = etm4_enable_perf(csdev, event); 854 + ret = etm4_enable_perf(csdev, event, id_map); 856 855 break; 857 856 default: 858 857 ret = -EINVAL;
+1
drivers/hwtracing/coresight/coresight-priv.h
··· 148 148 struct coresight_device *target); 149 149 void coresight_remove_links(struct coresight_device *orig, 150 150 struct coresight_connection *conn); 151 + u32 coresight_get_sink_id(struct coresight_device *csdev); 151 152 152 153 #if IS_ENABLED(CONFIG_CORESIGHT_SOURCE_ETM3X) 153 154 extern int etm_readl_cp14(u32 off, unsigned int *val);
+2 -1
drivers/hwtracing/coresight/coresight-stm.c
··· 194 194 } 195 195 196 196 static int stm_enable(struct coresight_device *csdev, struct perf_event *event, 197 - enum cs_mode mode) 197 + enum cs_mode mode, 198 + __maybe_unused struct coresight_trace_id_map *trace_id) 198 199 { 199 200 struct stm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); 200 201
+2 -1
drivers/hwtracing/coresight/coresight-sysfs.c
··· 9 9 #include <linux/kernel.h> 10 10 11 11 #include "coresight-priv.h" 12 + #include "coresight-trace-id.h" 12 13 13 14 /* 14 15 * Use IDR to map the hash of the source's device name ··· 64 63 */ 65 64 lockdep_assert_held(&coresight_mutex); 66 65 if (coresight_get_mode(csdev) != CS_MODE_SYSFS) { 67 - ret = source_ops(csdev)->enable(csdev, data, mode); 66 + ret = source_ops(csdev)->enable(csdev, data, mode, NULL); 68 67 if (ret) 69 68 return ret; 70 69 }
+4 -3
drivers/hwtracing/coresight/coresight-tmc-etr.c
··· 36 36 * etr_perf_buffer - Perf buffer used for ETR 37 37 * @drvdata - The ETR drvdaga this buffer has been allocated for. 38 38 * @etr_buf - Actual buffer used by the ETR 39 - * @pid - The PID this etr_perf_buffer belongs to. 39 + * @pid - The PID of the session owner that etr_perf_buffer 40 + * belongs to. 40 41 * @snaphost - Perf session mode 41 42 * @nr_pages - Number of pages in the ring buffer. 42 43 * @pages - Array of Pages in the ring buffer. ··· 262 261 { 263 262 tmc_free_table_pages(sg_table); 264 263 tmc_free_data_pages(sg_table); 264 + kfree(sg_table); 265 265 } 266 266 EXPORT_SYMBOL_GPL(tmc_free_sg_table); 267 267 ··· 344 342 rc = tmc_alloc_table_pages(sg_table); 345 343 if (rc) { 346 344 tmc_free_sg_table(sg_table); 347 - kfree(sg_table); 348 345 return ERR_PTR(rc); 349 346 } 350 347 ··· 1663 1662 goto unlock_out; 1664 1663 } 1665 1664 1666 - /* Get a handle on the pid of the process to monitor */ 1665 + /* Get a handle on the pid of the session owner */ 1667 1666 pid = etr_perf->pid; 1668 1667 1669 1668 /* Do not proceed if this device is associated with another session */
+3 -2
drivers/hwtracing/coresight/coresight-tmc.h
··· 171 171 * @csdev: component vitals needed by the framework. 172 172 * @miscdev: specifics to handle "/dev/xyz.tmc" entry. 173 173 * @spinlock: only one at a time pls. 174 - * @pid: Process ID of the process being monitored by the session 175 - * that is using this component. 174 + * @pid: Process ID of the process that owns the session that is using 175 + * this component. For example this would be the pid of the Perf 176 + * process. 176 177 * @buf: Snapshot of the trace data for ETF/ETB. 177 178 * @etr_buf: details of buffer used in TMC-ETR 178 179 * @len: size of the available trace for ETF/ETB.
+8 -1
drivers/hwtracing/coresight/coresight-tpdm.c
··· 439 439 } 440 440 441 441 static int tpdm_enable(struct coresight_device *csdev, struct perf_event *event, 442 - enum cs_mode mode) 442 + enum cs_mode mode, 443 + __maybe_unused struct coresight_trace_id_map *id_map) 443 444 { 444 445 struct tpdm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); 445 446 446 447 spin_lock(&drvdata->spinlock); 447 448 if (drvdata->enable) { 449 + spin_unlock(&drvdata->spinlock); 450 + return -EBUSY; 451 + } 452 + 453 + if (!coresight_take_mode(csdev, mode)) { 448 454 spin_unlock(&drvdata->spinlock); 449 455 return -EBUSY; 450 456 } ··· 512 506 } 513 507 514 508 __tpdm_disable(drvdata); 509 + coresight_set_mode(csdev, CS_MODE_DISABLED); 515 510 drvdata->enable = false; 516 511 spin_unlock(&drvdata->spinlock); 517 512
+61 -77
drivers/hwtracing/coresight/coresight-trace-id.c
··· 3 3 * Copyright (c) 2022, Linaro Limited, All rights reserved. 4 4 * Author: Mike Leach <mike.leach@linaro.org> 5 5 */ 6 + #include <linux/coresight.h> 6 7 #include <linux/coresight-pmu.h> 7 8 #include <linux/cpumask.h> 8 9 #include <linux/kernel.h> ··· 12 11 13 12 #include "coresight-trace-id.h" 14 13 15 - /* Default trace ID map. Used on systems that don't require per sink mappings */ 16 - static struct coresight_trace_id_map id_map_default; 17 - 18 - /* maintain a record of the mapping of IDs and pending releases per cpu */ 19 - static DEFINE_PER_CPU(atomic_t, cpu_id) = ATOMIC_INIT(0); 20 - static cpumask_t cpu_id_release_pending; 21 - 22 - /* perf session active counter */ 23 - static atomic_t perf_cs_etm_session_active = ATOMIC_INIT(0); 24 - 25 - /* lock to protect id_map and cpu data */ 26 - static DEFINE_SPINLOCK(id_map_lock); 14 + /* Default trace ID map. Used in sysfs mode and for system sources */ 15 + static DEFINE_PER_CPU(atomic_t, id_map_default_cpu_ids) = ATOMIC_INIT(0); 16 + static struct coresight_trace_id_map id_map_default = { 17 + .cpu_map = &id_map_default_cpu_ids, 18 + .lock = __SPIN_LOCK_UNLOCKED(id_map_default.lock) 19 + }; 27 20 28 21 /* #define TRACE_ID_DEBUG 1 */ 29 22 #if defined(TRACE_ID_DEBUG) || defined(CONFIG_COMPILE_TEST) ··· 27 32 { 28 33 pr_debug("%s id_map::\n", func_name); 29 34 pr_debug("Used = %*pb\n", CORESIGHT_TRACE_IDS_MAX, id_map->used_ids); 30 - pr_debug("Pend = %*pb\n", CORESIGHT_TRACE_IDS_MAX, id_map->pend_rel_ids); 31 35 } 32 36 #define DUMP_ID_MAP(map) coresight_trace_id_dump_table(map, __func__) 33 37 #define DUMP_ID_CPU(cpu, id) pr_debug("%s called; cpu=%d, id=%d\n", __func__, cpu, id) ··· 40 46 #endif 41 47 42 48 /* unlocked read of current trace ID value for given CPU */ 43 - static int _coresight_trace_id_read_cpu_id(int cpu) 49 + static int _coresight_trace_id_read_cpu_id(int cpu, struct coresight_trace_id_map *id_map) 44 50 { 45 - return atomic_read(&per_cpu(cpu_id, cpu)); 51 + return atomic_read(per_cpu_ptr(id_map->cpu_map, cpu)); 46 52 } 47 53 48 54 /* look for next available odd ID, return 0 if none found */ ··· 113 119 clear_bit(id, id_map->used_ids); 114 120 } 115 121 116 - static void coresight_trace_id_set_pend_rel(int id, struct coresight_trace_id_map *id_map) 117 - { 118 - if (WARN(!IS_VALID_CS_TRACE_ID(id), "Invalid Trace ID %d\n", id)) 119 - return; 120 - set_bit(id, id_map->pend_rel_ids); 121 - } 122 - 123 122 /* 124 - * release all pending IDs for all current maps & clear CPU associations 125 - * 126 - * This currently operates on the default id map, but may be extended to 127 - * operate on all registered id maps if per sink id maps are used. 123 + * Release all IDs and clear CPU associations. 128 124 */ 129 - static void coresight_trace_id_release_all_pending(void) 125 + static void coresight_trace_id_release_all(struct coresight_trace_id_map *id_map) 130 126 { 131 - struct coresight_trace_id_map *id_map = &id_map_default; 132 127 unsigned long flags; 133 - int cpu, bit; 128 + int cpu; 134 129 135 - spin_lock_irqsave(&id_map_lock, flags); 136 - for_each_set_bit(bit, id_map->pend_rel_ids, CORESIGHT_TRACE_ID_RES_TOP) { 137 - clear_bit(bit, id_map->used_ids); 138 - clear_bit(bit, id_map->pend_rel_ids); 139 - } 140 - for_each_cpu(cpu, &cpu_id_release_pending) { 141 - atomic_set(&per_cpu(cpu_id, cpu), 0); 142 - cpumask_clear_cpu(cpu, &cpu_id_release_pending); 143 - } 144 - spin_unlock_irqrestore(&id_map_lock, flags); 130 + spin_lock_irqsave(&id_map->lock, flags); 131 + bitmap_zero(id_map->used_ids, CORESIGHT_TRACE_IDS_MAX); 132 + for_each_possible_cpu(cpu) 133 + atomic_set(per_cpu_ptr(id_map->cpu_map, cpu), 0); 134 + spin_unlock_irqrestore(&id_map->lock, flags); 145 135 DUMP_ID_MAP(id_map); 146 136 } 147 137 148 - static int coresight_trace_id_map_get_cpu_id(int cpu, struct coresight_trace_id_map *id_map) 138 + static int _coresight_trace_id_get_cpu_id(int cpu, struct coresight_trace_id_map *id_map) 149 139 { 150 140 unsigned long flags; 151 141 int id; 152 142 153 - spin_lock_irqsave(&id_map_lock, flags); 143 + spin_lock_irqsave(&id_map->lock, flags); 154 144 155 145 /* check for existing allocation for this CPU */ 156 - id = _coresight_trace_id_read_cpu_id(cpu); 146 + id = _coresight_trace_id_read_cpu_id(cpu, id_map); 157 147 if (id) 158 - goto get_cpu_id_clr_pend; 148 + goto get_cpu_id_out_unlock; 159 149 160 150 /* 161 151 * Find a new ID. ··· 158 180 goto get_cpu_id_out_unlock; 159 181 160 182 /* allocate the new id to the cpu */ 161 - atomic_set(&per_cpu(cpu_id, cpu), id); 162 - 163 - get_cpu_id_clr_pend: 164 - /* we are (re)using this ID - so ensure it is not marked for release */ 165 - cpumask_clear_cpu(cpu, &cpu_id_release_pending); 166 - clear_bit(id, id_map->pend_rel_ids); 183 + atomic_set(per_cpu_ptr(id_map->cpu_map, cpu), id); 167 184 168 185 get_cpu_id_out_unlock: 169 - spin_unlock_irqrestore(&id_map_lock, flags); 186 + spin_unlock_irqrestore(&id_map->lock, flags); 170 187 171 188 DUMP_ID_CPU(cpu, id); 172 189 DUMP_ID_MAP(id_map); 173 190 return id; 174 191 } 175 192 176 - static void coresight_trace_id_map_put_cpu_id(int cpu, struct coresight_trace_id_map *id_map) 193 + static void _coresight_trace_id_put_cpu_id(int cpu, struct coresight_trace_id_map *id_map) 177 194 { 178 195 unsigned long flags; 179 196 int id; 180 197 181 198 /* check for existing allocation for this CPU */ 182 - id = _coresight_trace_id_read_cpu_id(cpu); 199 + id = _coresight_trace_id_read_cpu_id(cpu, id_map); 183 200 if (!id) 184 201 return; 185 202 186 - spin_lock_irqsave(&id_map_lock, flags); 203 + spin_lock_irqsave(&id_map->lock, flags); 187 204 188 - if (atomic_read(&perf_cs_etm_session_active)) { 189 - /* set release at pending if perf still active */ 190 - coresight_trace_id_set_pend_rel(id, id_map); 191 - cpumask_set_cpu(cpu, &cpu_id_release_pending); 192 - } else { 193 - /* otherwise clear id */ 194 - coresight_trace_id_free(id, id_map); 195 - atomic_set(&per_cpu(cpu_id, cpu), 0); 196 - } 205 + coresight_trace_id_free(id, id_map); 206 + atomic_set(per_cpu_ptr(id_map->cpu_map, cpu), 0); 197 207 198 - spin_unlock_irqrestore(&id_map_lock, flags); 208 + spin_unlock_irqrestore(&id_map->lock, flags); 199 209 DUMP_ID_CPU(cpu, id); 200 210 DUMP_ID_MAP(id_map); 201 211 } ··· 193 227 unsigned long flags; 194 228 int id; 195 229 196 - spin_lock_irqsave(&id_map_lock, flags); 230 + spin_lock_irqsave(&id_map->lock, flags); 197 231 /* prefer odd IDs for system components to avoid legacy CPU IDS */ 198 232 id = coresight_trace_id_alloc_new_id(id_map, 0, true); 199 - spin_unlock_irqrestore(&id_map_lock, flags); 233 + spin_unlock_irqrestore(&id_map->lock, flags); 200 234 201 235 DUMP_ID(id); 202 236 DUMP_ID_MAP(id_map); ··· 207 241 { 208 242 unsigned long flags; 209 243 210 - spin_lock_irqsave(&id_map_lock, flags); 244 + spin_lock_irqsave(&id_map->lock, flags); 211 245 coresight_trace_id_free(id, id_map); 212 - spin_unlock_irqrestore(&id_map_lock, flags); 246 + spin_unlock_irqrestore(&id_map->lock, flags); 213 247 214 248 DUMP_ID(id); 215 249 DUMP_ID_MAP(id_map); ··· 219 253 220 254 int coresight_trace_id_get_cpu_id(int cpu) 221 255 { 222 - return coresight_trace_id_map_get_cpu_id(cpu, &id_map_default); 256 + return _coresight_trace_id_get_cpu_id(cpu, &id_map_default); 223 257 } 224 258 EXPORT_SYMBOL_GPL(coresight_trace_id_get_cpu_id); 225 259 260 + int coresight_trace_id_get_cpu_id_map(int cpu, struct coresight_trace_id_map *id_map) 261 + { 262 + return _coresight_trace_id_get_cpu_id(cpu, id_map); 263 + } 264 + EXPORT_SYMBOL_GPL(coresight_trace_id_get_cpu_id_map); 265 + 226 266 void coresight_trace_id_put_cpu_id(int cpu) 227 267 { 228 - coresight_trace_id_map_put_cpu_id(cpu, &id_map_default); 268 + _coresight_trace_id_put_cpu_id(cpu, &id_map_default); 229 269 } 230 270 EXPORT_SYMBOL_GPL(coresight_trace_id_put_cpu_id); 231 271 272 + void coresight_trace_id_put_cpu_id_map(int cpu, struct coresight_trace_id_map *id_map) 273 + { 274 + _coresight_trace_id_put_cpu_id(cpu, id_map); 275 + } 276 + EXPORT_SYMBOL_GPL(coresight_trace_id_put_cpu_id_map); 277 + 232 278 int coresight_trace_id_read_cpu_id(int cpu) 233 279 { 234 - return _coresight_trace_id_read_cpu_id(cpu); 280 + return _coresight_trace_id_read_cpu_id(cpu, &id_map_default); 235 281 } 236 282 EXPORT_SYMBOL_GPL(coresight_trace_id_read_cpu_id); 283 + 284 + int coresight_trace_id_read_cpu_id_map(int cpu, struct coresight_trace_id_map *id_map) 285 + { 286 + return _coresight_trace_id_read_cpu_id(cpu, id_map); 287 + } 288 + EXPORT_SYMBOL_GPL(coresight_trace_id_read_cpu_id_map); 237 289 238 290 int coresight_trace_id_get_system_id(void) 239 291 { ··· 265 281 } 266 282 EXPORT_SYMBOL_GPL(coresight_trace_id_put_system_id); 267 283 268 - void coresight_trace_id_perf_start(void) 284 + void coresight_trace_id_perf_start(struct coresight_trace_id_map *id_map) 269 285 { 270 - atomic_inc(&perf_cs_etm_session_active); 271 - PERF_SESSION(atomic_read(&perf_cs_etm_session_active)); 286 + atomic_inc(&id_map->perf_cs_etm_session_active); 287 + PERF_SESSION(atomic_read(&id_map->perf_cs_etm_session_active)); 272 288 } 273 289 EXPORT_SYMBOL_GPL(coresight_trace_id_perf_start); 274 290 275 - void coresight_trace_id_perf_stop(void) 291 + void coresight_trace_id_perf_stop(struct coresight_trace_id_map *id_map) 276 292 { 277 - if (!atomic_dec_return(&perf_cs_etm_session_active)) 278 - coresight_trace_id_release_all_pending(); 279 - PERF_SESSION(atomic_read(&perf_cs_etm_session_active)); 293 + if (!atomic_dec_return(&id_map->perf_cs_etm_session_active)) 294 + coresight_trace_id_release_all(id_map); 295 + PERF_SESSION(atomic_read(&id_map->perf_cs_etm_session_active)); 280 296 } 281 297 EXPORT_SYMBOL_GPL(coresight_trace_id_perf_stop);
+32 -38
drivers/hwtracing/coresight/coresight-trace-id.h
··· 17 17 * released when done. 18 18 * 19 19 * In order to ensure that a consistent cpu / ID matching is maintained 20 - * throughout a perf cs_etm event session - a session in progress flag will 21 - * be maintained, and released IDs not cleared until the perf session is 22 - * complete. This allows the same CPU to be re-allocated its prior ID. 20 + * throughout a perf cs_etm event session - a session in progress flag will be 21 + * maintained for each sink, and IDs are cleared when all the perf sessions 22 + * complete. This allows the same CPU to be re-allocated its prior ID when 23 + * events are scheduled in and out. 23 24 * 24 25 * 25 26 * Trace ID maps will be created and initialised to prevent architecturally ··· 33 32 #include <linux/bitops.h> 34 33 #include <linux/types.h> 35 34 36 - 37 - /* architecturally we have 128 IDs some of which are reserved */ 38 - #define CORESIGHT_TRACE_IDS_MAX 128 39 - 40 35 /* ID 0 is reserved */ 41 36 #define CORESIGHT_TRACE_ID_RES_0 0 42 37 ··· 42 45 /* check an ID is in the valid range */ 43 46 #define IS_VALID_CS_TRACE_ID(id) \ 44 47 ((id > CORESIGHT_TRACE_ID_RES_0) && (id < CORESIGHT_TRACE_ID_RES_TOP)) 45 - 46 - /** 47 - * Trace ID map. 48 - * 49 - * @used_ids: Bitmap to register available (bit = 0) and in use (bit = 1) IDs. 50 - * Initialised so that the reserved IDs are permanently marked as 51 - * in use. 52 - * @pend_rel_ids: CPU IDs that have been released by the trace source but not 53 - * yet marked as available, to allow re-allocation to the same 54 - * CPU during a perf session. 55 - */ 56 - struct coresight_trace_id_map { 57 - DECLARE_BITMAP(used_ids, CORESIGHT_TRACE_IDS_MAX); 58 - DECLARE_BITMAP(pend_rel_ids, CORESIGHT_TRACE_IDS_MAX); 59 - }; 60 - 61 - /* Allocate and release IDs for a single default trace ID map */ 62 48 63 49 /** 64 50 * Read and optionally allocate a CoreSight trace ID and associate with a CPU. ··· 59 79 int coresight_trace_id_get_cpu_id(int cpu); 60 80 61 81 /** 82 + * Version of coresight_trace_id_get_cpu_id() that allows the ID map to operate 83 + * on to be provided. 84 + */ 85 + int coresight_trace_id_get_cpu_id_map(int cpu, struct coresight_trace_id_map *id_map); 86 + 87 + /** 62 88 * Release an allocated trace ID associated with the CPU. 63 89 * 64 - * This will release the CoreSight trace ID associated with the CPU, 65 - * unless a perf session is in operation. 66 - * 67 - * If a perf session is in operation then the ID will be marked as pending 68 - * release. 90 + * This will release the CoreSight trace ID associated with the CPU. 69 91 * 70 92 * @cpu: The CPU index to release the associated trace ID. 71 93 */ 72 94 void coresight_trace_id_put_cpu_id(int cpu); 95 + 96 + /** 97 + * Version of coresight_trace_id_put_cpu_id() that allows the ID map to operate 98 + * on to be provided. 99 + */ 100 + void coresight_trace_id_put_cpu_id_map(int cpu, struct coresight_trace_id_map *id_map); 73 101 74 102 /** 75 103 * Read the current allocated CoreSight Trace ID value for the CPU. ··· 98 110 * return: current value, will be 0 if unallocated. 99 111 */ 100 112 int coresight_trace_id_read_cpu_id(int cpu); 113 + 114 + /** 115 + * Version of coresight_trace_id_read_cpu_id() that allows the ID map to operate 116 + * on to be provided. 117 + */ 118 + int coresight_trace_id_read_cpu_id_map(int cpu, struct coresight_trace_id_map *id_map); 101 119 102 120 /** 103 121 * Allocate a CoreSight trace ID for a system component. ··· 130 136 /** 131 137 * Notify the Trace ID allocator that a perf session is starting. 132 138 * 133 - * Increase the perf session reference count - called by perf when setting up 134 - * a trace event. 139 + * Increase the perf session reference count - called by perf when setting up a 140 + * trace event. 135 141 * 136 - * This reference count is used by the ID allocator to ensure that trace IDs 137 - * associated with a CPU cannot change or be released during a perf session. 142 + * Perf sessions never free trace IDs to ensure that the ID associated with a 143 + * CPU cannot change during their and other's concurrent sessions. Instead, 144 + * this refcount is used so that the last event to finish always frees all IDs. 138 145 */ 139 - void coresight_trace_id_perf_start(void); 146 + void coresight_trace_id_perf_start(struct coresight_trace_id_map *id_map); 140 147 141 148 /** 142 149 * Notify the ID allocator that a perf session is stopping. 143 150 * 144 - * Decrease the perf session reference count. 145 - * if this causes the count to go to zero, then all Trace IDs marked as pending 146 - * release, will be released. 151 + * Decrease the perf session reference count. If this causes the count to go to 152 + * zero, then all Trace IDs will be released. 147 153 */ 148 - void coresight_trace_id_perf_stop(void); 154 + void coresight_trace_id_perf_stop(struct coresight_trace_id_map *id_map); 149 155 150 156 #endif /* _CORESIGHT_TRACE_ID_H */
+13 -4
include/linux/coresight-pmu.h
··· 49 49 * Interpretation of the PERF_RECORD_AUX_OUTPUT_HW_ID payload. 50 50 * Used to associate a CPU with the CoreSight Trace ID. 51 51 * [07:00] - Trace ID - uses 8 bits to make value easy to read in file. 52 - * [59:08] - Unused (SBZ) 53 - * [63:60] - Version 52 + * [39:08] - Sink ID - as reported in /sys/bus/event_source/devices/cs_etm/sinks/ 53 + * Added in minor version 1. 54 + * [55:40] - Unused (SBZ) 55 + * [59:56] - Minor Version - previously existing fields are compatible with 56 + * all minor versions. 57 + * [63:60] - Major Version - previously existing fields mean different things 58 + * in new major versions. 54 59 */ 55 60 #define CS_AUX_HW_ID_TRACE_ID_MASK GENMASK_ULL(7, 0) 56 - #define CS_AUX_HW_ID_VERSION_MASK GENMASK_ULL(63, 60) 61 + #define CS_AUX_HW_ID_SINK_ID_MASK GENMASK_ULL(39, 8) 57 62 58 - #define CS_AUX_HW_ID_CURR_VERSION 0 63 + #define CS_AUX_HW_ID_MINOR_VERSION_MASK GENMASK_ULL(59, 56) 64 + #define CS_AUX_HW_ID_MAJOR_VERSION_MASK GENMASK_ULL(63, 60) 65 + 66 + #define CS_AUX_HW_ID_MAJOR_VERSION 0 67 + #define CS_AUX_HW_ID_MINOR_VERSION 1 59 68 60 69 #endif
+20 -1
include/linux/coresight.h
··· 218 218 const char *target_name; 219 219 }; 220 220 221 + /* architecturally we have 128 IDs some of which are reserved */ 222 + #define CORESIGHT_TRACE_IDS_MAX 128 223 + 224 + /** 225 + * Trace ID map. 226 + * 227 + * @used_ids: Bitmap to register available (bit = 0) and in use (bit = 1) IDs. 228 + * Initialised so that the reserved IDs are permanently marked as 229 + * in use. 230 + * @perf_cs_etm_session_active: Number of Perf sessions using this ID map. 231 + */ 232 + struct coresight_trace_id_map { 233 + DECLARE_BITMAP(used_ids, CORESIGHT_TRACE_IDS_MAX); 234 + atomic_t __percpu *cpu_map; 235 + atomic_t perf_cs_etm_session_active; 236 + spinlock_t lock; 237 + }; 238 + 221 239 /** 222 240 * struct coresight_device - representation of a device as used by the framework 223 241 * @pdata: Platform data with device connections associated to this device. ··· 289 271 bool sysfs_sink_activated; 290 272 struct dev_ext_attribute *ea; 291 273 struct coresight_device *def_sink; 274 + struct coresight_trace_id_map perf_sink_id_map; 292 275 /* sysfs links between components */ 293 276 int nr_links; 294 277 bool has_conns_grp; ··· 384 365 struct coresight_ops_source { 385 366 int (*cpu_id)(struct coresight_device *csdev); 386 367 int (*enable)(struct coresight_device *csdev, struct perf_event *event, 387 - enum cs_mode mode); 368 + enum cs_mode mode, struct coresight_trace_id_map *id_map); 388 369 void (*disable)(struct coresight_device *csdev, 389 370 struct perf_event *event); 390 371 };