coresight: updates for Linux v6.12

CoreSight/hwtracing subsystem updates targeting Linux v6.12:
  - Miscellaneous fixes and cleanups
  - TraceID allocation per sink, allowing system with > 110 cores for
    perf tracing.
 
 Signed-off-by: Suzuki K Poulose <suzuki.poulose@arm.com>
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEEuFy0byloRoXZHaWBxcXRZPKyBqEFAmbOJmYACgkQxcXRZPKy
 BqFq5g/+I7WpRAHrLn1e995CGmBUc+HVQ2x5xrS7bSQbxSo/VEhCds/ID+DTfduw
 fwFjKo6GEo9SazGJ1JmSaPONffcGjjdkCp7z/xfd5U7mJ0Gj1FQKnr8Ppew52cfk
 oq7Yi63VPpoX/4mqweBQfCEPX+yikZ+BgxKjkQq+wunGy/3cHQbLTkVyg3mxzePu
 tEKuWihZ9+7lk7kXUzWM4Fmagxo98Lkj5zp8SmpdsHOl0+0BUsdywpTYcT8zbbjm
 QE3OR2fyVuYonVpGTTb+YDJWzonfKltHcKVPmLrsjFjXiv/7/Uxj7TJVS4z9KEKb
 hcDMwKOAoslhtJO+0dG0ZbY6LRvhmRpvy/BKS+Kfz8qwWjZW5YX0GwouRDTo71kq
 KxKoWS5cn4fZ1F7MuygGEre68laVq4/HJ369+jTk3oa7a1f9xTHYciZFQWdgc7DU
 7XK7FQ1g1D3ndD1tCYEzc9n/XFBiV0PZDELPoeaOo+fz4DGOgRRfrMstordS/RHL
 xIS+fD5OQxfn38mUrmReMsQssijZ9ysjqsBmHULVXBHIkz9N4xEsQINTRp7ARykG
 yb6RdrxblMswz0xzJBrekf0djr+tgPO0Je5+8BE/rCickrWttETNGWnrmWf+PPCa
 LrFaYavr0HpyvmqZRuY9fDIKAAAW+9E86kakMg/swlGj6mR15TA=
 =+38+
 -----END PGP SIGNATURE-----

Merge tag 'coresight-next-v6.12' of ssh://gitolite.kernel.org/pub/scm/linux/kernel/git/coresight/linux into char-misc-next

Suzuki writes:

coresight: updates for Linux v6.12

CoreSight/hwtracing subsystem updates targeting Linux v6.12:
 - Miscellaneous fixes and cleanups
 - TraceID allocation per sink, allowing system with > 110 cores for
   perf tracing.

Signed-off-by: Suzuki K Poulose <suzuki.poulose@arm.com>

* tag 'coresight-next-v6.12' of ssh://gitolite.kernel.org/pub/scm/linux/kernel/git/coresight/linux:
  coresight: Make trace ID map spinlock local to the map
  coresight: Emit sink ID in the HW_ID packets
  coresight: Remove pending trace ID release mechanism
  coresight: Use per-sink trace ID maps for Perf sessions
  coresight: Make CPU id map a property of a trace ID map
  coresight: Expose map arguments in trace ID API
  coresight: Move struct coresight_trace_id_map to common header
  coresight: Clarify comments around the PID of the sink owner
  coresight: Remove unused ETM Perf stubs
  coresight: tmc: sg: Do not leak sg_table
  Coresight: Set correct cs_mode for dummy source to fix disable issue
  Coresight: Set correct cs_mode for TPDM to fix disable issue
  coresight: cti: use device_* to iterate over device child nodes
This commit is contained in:
Greg Kroah-Hartman 2024-09-03 12:07:03 +02:00
commit 88850f7cca
17 changed files with 221 additions and 190 deletions

View File

@ -487,23 +487,25 @@ struct coresight_device *coresight_get_sink(struct list_head *path)
return csdev;
}
u32 coresight_get_sink_id(struct coresight_device *csdev)
{
if (!csdev->ea)
return 0;
/*
* See function etm_perf_add_symlink_sink() to know where
* this comes from.
*/
return (u32) (unsigned long) csdev->ea->var;
}
static int coresight_sink_by_id(struct device *dev, const void *data)
{
struct coresight_device *csdev = to_coresight_device(dev);
unsigned long hash;
if (csdev->type == CORESIGHT_DEV_TYPE_SINK ||
csdev->type == CORESIGHT_DEV_TYPE_LINKSINK) {
if (!csdev->ea)
return 0;
/*
* See function etm_perf_add_symlink_sink() to know where
* this comes from.
*/
hash = (unsigned long)csdev->ea->var;
if ((u32)hash == *(u32 *)data)
csdev->type == CORESIGHT_DEV_TYPE_LINKSINK) {
if (coresight_get_sink_id(csdev) == *(u32 *)data)
return 1;
}
@ -902,6 +904,7 @@ static void coresight_device_release(struct device *dev)
struct coresight_device *csdev = to_coresight_device(dev);
fwnode_handle_put(csdev->dev.fwnode);
free_percpu(csdev->perf_sink_id_map.cpu_map);
kfree(csdev);
}
@ -1159,6 +1162,16 @@ struct coresight_device *coresight_register(struct coresight_desc *desc)
csdev->dev.fwnode = fwnode_handle_get(dev_fwnode(desc->dev));
dev_set_name(&csdev->dev, "%s", desc->name);
if (csdev->type == CORESIGHT_DEV_TYPE_SINK ||
csdev->type == CORESIGHT_DEV_TYPE_LINKSINK) {
spin_lock_init(&csdev->perf_sink_id_map.lock);
csdev->perf_sink_id_map.cpu_map = alloc_percpu(atomic_t);
if (!csdev->perf_sink_id_map.cpu_map) {
kfree(csdev);
ret = -ENOMEM;
goto err_out;
}
}
/*
* Make sure the device registration and the connection fixup
* are synchronised, so that we don't see uninitialised devices

View File

@ -416,20 +416,16 @@ static int cti_plat_create_impdef_connections(struct device *dev,
struct cti_drvdata *drvdata)
{
int rc = 0;
struct fwnode_handle *fwnode = dev_fwnode(dev);
struct fwnode_handle *child = NULL;
if (IS_ERR_OR_NULL(fwnode))
if (IS_ERR_OR_NULL(dev_fwnode(dev)))
return -EINVAL;
fwnode_for_each_child_node(fwnode, child) {
device_for_each_child_node_scoped(dev, child) {
if (cti_plat_node_name_eq(child, CTI_DT_CONNS))
rc = cti_plat_create_connection(dev, drvdata,
child);
rc = cti_plat_create_connection(dev, drvdata, child);
if (rc != 0)
break;
}
fwnode_handle_put(child);
return rc;
}

View File

@ -21,8 +21,12 @@ DEFINE_CORESIGHT_DEVLIST(source_devs, "dummy_source");
DEFINE_CORESIGHT_DEVLIST(sink_devs, "dummy_sink");
static int dummy_source_enable(struct coresight_device *csdev,
struct perf_event *event, enum cs_mode mode)
struct perf_event *event, enum cs_mode mode,
__maybe_unused struct coresight_trace_id_map *id_map)
{
if (!coresight_take_mode(csdev, mode))
return -EBUSY;
dev_dbg(csdev->dev.parent, "Dummy source enabled\n");
return 0;
@ -31,6 +35,7 @@ static int dummy_source_enable(struct coresight_device *csdev,
static void dummy_source_disable(struct coresight_device *csdev,
struct perf_event *event)
{
coresight_set_mode(csdev, CS_MODE_DISABLED);
dev_dbg(csdev->dev.parent, "Dummy source disabled\n");
}

View File

@ -229,14 +229,23 @@ static void free_event_data(struct work_struct *work)
struct list_head **ppath;
ppath = etm_event_cpu_path_ptr(event_data, cpu);
if (!(IS_ERR_OR_NULL(*ppath)))
coresight_release_path(*ppath);
*ppath = NULL;
coresight_trace_id_put_cpu_id(cpu);
}
if (!(IS_ERR_OR_NULL(*ppath))) {
struct coresight_device *sink = coresight_get_sink(*ppath);
/* mark perf event as done for trace id allocator */
coresight_trace_id_perf_stop();
/*
* Mark perf event as done for trace id allocator, but don't call
* coresight_trace_id_put_cpu_id_map() on individual IDs. Perf sessions
* never free trace IDs to ensure that the ID associated with a CPU
* cannot change during their and other's concurrent sessions. Instead,
* a refcount is used so that the last event to call
* coresight_trace_id_perf_stop() frees all IDs.
*/
coresight_trace_id_perf_stop(&sink->perf_sink_id_map);
coresight_release_path(*ppath);
}
*ppath = NULL;
}
free_percpu(event_data->path);
kfree(event_data);
@ -325,9 +334,6 @@ static void *etm_setup_aux(struct perf_event *event, void **pages,
sink = user_sink = coresight_get_sink_by_id(id);
}
/* tell the trace ID allocator that a perf event is starting up */
coresight_trace_id_perf_start();
/* check if user wants a coresight configuration selected */
cfg_hash = (u32)((event->attr.config2 & GENMASK_ULL(63, 32)) >> 32);
if (cfg_hash) {
@ -401,13 +407,14 @@ static void *etm_setup_aux(struct perf_event *event, void **pages,
}
/* ensure we can allocate a trace ID for this CPU */
trace_id = coresight_trace_id_get_cpu_id(cpu);
trace_id = coresight_trace_id_get_cpu_id_map(cpu, &sink->perf_sink_id_map);
if (!IS_VALID_CS_TRACE_ID(trace_id)) {
cpumask_clear_cpu(cpu, mask);
coresight_release_path(path);
continue;
}
coresight_trace_id_perf_start(&sink->perf_sink_id_map);
*etm_event_cpu_path_ptr(event_data, cpu) = path;
}
@ -453,6 +460,7 @@ static void etm_event_start(struct perf_event *event, int flags)
struct coresight_device *sink, *csdev = per_cpu(csdev_src, cpu);
struct list_head *path;
u64 hw_id;
u8 trace_id;
if (!csdev)
goto fail;
@ -495,7 +503,8 @@ static void etm_event_start(struct perf_event *event, int flags)
goto fail_end_stop;
/* Finally enable the tracer */
if (source_ops(csdev)->enable(csdev, event, CS_MODE_PERF))
if (source_ops(csdev)->enable(csdev, event, CS_MODE_PERF,
&sink->perf_sink_id_map))
goto fail_disable_path;
/*
@ -504,10 +513,16 @@ static void etm_event_start(struct perf_event *event, int flags)
*/
if (!cpumask_test_cpu(cpu, &event_data->aux_hwid_done)) {
cpumask_set_cpu(cpu, &event_data->aux_hwid_done);
hw_id = FIELD_PREP(CS_AUX_HW_ID_VERSION_MASK,
CS_AUX_HW_ID_CURR_VERSION);
hw_id |= FIELD_PREP(CS_AUX_HW_ID_TRACE_ID_MASK,
coresight_trace_id_read_cpu_id(cpu));
trace_id = coresight_trace_id_read_cpu_id_map(cpu, &sink->perf_sink_id_map);
hw_id = FIELD_PREP(CS_AUX_HW_ID_MAJOR_VERSION_MASK,
CS_AUX_HW_ID_MAJOR_VERSION);
hw_id |= FIELD_PREP(CS_AUX_HW_ID_MINOR_VERSION_MASK,
CS_AUX_HW_ID_MINOR_VERSION);
hw_id |= FIELD_PREP(CS_AUX_HW_ID_TRACE_ID_MASK, trace_id);
hw_id |= FIELD_PREP(CS_AUX_HW_ID_SINK_ID_MASK, coresight_get_sink_id(sink));
perf_report_aux_output_id(event, hw_id);
}

View File

@ -62,7 +62,6 @@ struct etm_event_data {
struct list_head * __percpu *path;
};
#if IS_ENABLED(CONFIG_CORESIGHT)
int etm_perf_symlink(struct coresight_device *csdev, bool link);
int etm_perf_add_symlink_sink(struct coresight_device *csdev);
void etm_perf_del_symlink_sink(struct coresight_device *csdev);
@ -77,23 +76,6 @@ static inline void *etm_perf_sink_config(struct perf_output_handle *handle)
int etm_perf_add_symlink_cscfg(struct device *dev,
struct cscfg_config_desc *config_desc);
void etm_perf_del_symlink_cscfg(struct cscfg_config_desc *config_desc);
#else
static inline int etm_perf_symlink(struct coresight_device *csdev, bool link)
{ return -EINVAL; }
int etm_perf_add_symlink_sink(struct coresight_device *csdev)
{ return -EINVAL; }
void etm_perf_del_symlink_sink(struct coresight_device *csdev) {}
static inline void *etm_perf_sink_config(struct perf_output_handle *handle)
{
return NULL;
}
int etm_perf_add_symlink_cscfg(struct device *dev,
struct cscfg_config_desc *config_desc)
{ return -EINVAL; }
void etm_perf_del_symlink_cscfg(struct cscfg_config_desc *config_desc) {}
#endif /* CONFIG_CORESIGHT */
int __init etm_perf_init(void);
void etm_perf_exit(void);

View File

@ -481,7 +481,8 @@ void etm_release_trace_id(struct etm_drvdata *drvdata)
}
static int etm_enable_perf(struct coresight_device *csdev,
struct perf_event *event)
struct perf_event *event,
struct coresight_trace_id_map *id_map)
{
struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
int trace_id;
@ -500,7 +501,7 @@ static int etm_enable_perf(struct coresight_device *csdev,
* with perf locks - we know the ID cannot change until perf shuts down
* the session
*/
trace_id = coresight_trace_id_read_cpu_id(drvdata->cpu);
trace_id = coresight_trace_id_read_cpu_id_map(drvdata->cpu, id_map);
if (!IS_VALID_CS_TRACE_ID(trace_id)) {
dev_err(&drvdata->csdev->dev, "Failed to set trace ID for %s on CPU%d\n",
dev_name(&drvdata->csdev->dev), drvdata->cpu);
@ -553,7 +554,7 @@ static int etm_enable_sysfs(struct coresight_device *csdev)
}
static int etm_enable(struct coresight_device *csdev, struct perf_event *event,
enum cs_mode mode)
enum cs_mode mode, struct coresight_trace_id_map *id_map)
{
int ret;
struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
@ -568,7 +569,7 @@ static int etm_enable(struct coresight_device *csdev, struct perf_event *event,
ret = etm_enable_sysfs(csdev);
break;
case CS_MODE_PERF:
ret = etm_enable_perf(csdev, event);
ret = etm_enable_perf(csdev, event, id_map);
break;
default:
ret = -EINVAL;

View File

@ -752,7 +752,8 @@ static int etm4_parse_event_config(struct coresight_device *csdev,
}
static int etm4_enable_perf(struct coresight_device *csdev,
struct perf_event *event)
struct perf_event *event,
struct coresight_trace_id_map *id_map)
{
int ret = 0, trace_id;
struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
@ -775,7 +776,7 @@ static int etm4_enable_perf(struct coresight_device *csdev,
* with perf locks - we know the ID cannot change until perf shuts down
* the session
*/
trace_id = coresight_trace_id_read_cpu_id(drvdata->cpu);
trace_id = coresight_trace_id_read_cpu_id_map(drvdata->cpu, id_map);
if (!IS_VALID_CS_TRACE_ID(trace_id)) {
dev_err(&drvdata->csdev->dev, "Failed to set trace ID for %s on CPU%d\n",
dev_name(&drvdata->csdev->dev), drvdata->cpu);
@ -837,7 +838,7 @@ static int etm4_enable_sysfs(struct coresight_device *csdev)
}
static int etm4_enable(struct coresight_device *csdev, struct perf_event *event,
enum cs_mode mode)
enum cs_mode mode, struct coresight_trace_id_map *id_map)
{
int ret;
@ -851,7 +852,7 @@ static int etm4_enable(struct coresight_device *csdev, struct perf_event *event,
ret = etm4_enable_sysfs(csdev);
break;
case CS_MODE_PERF:
ret = etm4_enable_perf(csdev, event);
ret = etm4_enable_perf(csdev, event, id_map);
break;
default:
ret = -EINVAL;

View File

@ -148,6 +148,7 @@ int coresight_make_links(struct coresight_device *orig,
struct coresight_device *target);
void coresight_remove_links(struct coresight_device *orig,
struct coresight_connection *conn);
u32 coresight_get_sink_id(struct coresight_device *csdev);
#if IS_ENABLED(CONFIG_CORESIGHT_SOURCE_ETM3X)
extern int etm_readl_cp14(u32 off, unsigned int *val);

View File

@ -194,7 +194,8 @@ static void stm_enable_hw(struct stm_drvdata *drvdata)
}
static int stm_enable(struct coresight_device *csdev, struct perf_event *event,
enum cs_mode mode)
enum cs_mode mode,
__maybe_unused struct coresight_trace_id_map *trace_id)
{
struct stm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);

View File

@ -9,6 +9,7 @@
#include <linux/kernel.h>
#include "coresight-priv.h"
#include "coresight-trace-id.h"
/*
* Use IDR to map the hash of the source's device name
@ -63,7 +64,7 @@ static int coresight_enable_source_sysfs(struct coresight_device *csdev,
*/
lockdep_assert_held(&coresight_mutex);
if (coresight_get_mode(csdev) != CS_MODE_SYSFS) {
ret = source_ops(csdev)->enable(csdev, data, mode);
ret = source_ops(csdev)->enable(csdev, data, mode, NULL);
if (ret)
return ret;
}

View File

@ -36,7 +36,8 @@ struct etr_buf_hw {
* etr_perf_buffer - Perf buffer used for ETR
* @drvdata - The ETR drvdaga this buffer has been allocated for.
* @etr_buf - Actual buffer used by the ETR
* @pid - The PID this etr_perf_buffer belongs to.
* @pid - The PID of the session owner that etr_perf_buffer
* belongs to.
* @snaphost - Perf session mode
* @nr_pages - Number of pages in the ring buffer.
* @pages - Array of Pages in the ring buffer.
@ -261,6 +262,7 @@ void tmc_free_sg_table(struct tmc_sg_table *sg_table)
{
tmc_free_table_pages(sg_table);
tmc_free_data_pages(sg_table);
kfree(sg_table);
}
EXPORT_SYMBOL_GPL(tmc_free_sg_table);
@ -342,7 +344,6 @@ struct tmc_sg_table *tmc_alloc_sg_table(struct device *dev,
rc = tmc_alloc_table_pages(sg_table);
if (rc) {
tmc_free_sg_table(sg_table);
kfree(sg_table);
return ERR_PTR(rc);
}
@ -1662,7 +1663,7 @@ static int tmc_enable_etr_sink_perf(struct coresight_device *csdev, void *data)
goto unlock_out;
}
/* Get a handle on the pid of the process to monitor */
/* Get a handle on the pid of the session owner */
pid = etr_perf->pid;
/* Do not proceed if this device is associated with another session */

View File

@ -171,8 +171,9 @@ struct etr_buf {
* @csdev: component vitals needed by the framework.
* @miscdev: specifics to handle "/dev/xyz.tmc" entry.
* @spinlock: only one at a time pls.
* @pid: Process ID of the process being monitored by the session
* that is using this component.
* @pid: Process ID of the process that owns the session that is using
* this component. For example this would be the pid of the Perf
* process.
* @buf: Snapshot of the trace data for ETF/ETB.
* @etr_buf: details of buffer used in TMC-ETR
* @len: size of the available trace for ETF/ETB.

View File

@ -439,7 +439,8 @@ static void __tpdm_enable(struct tpdm_drvdata *drvdata)
}
static int tpdm_enable(struct coresight_device *csdev, struct perf_event *event,
enum cs_mode mode)
enum cs_mode mode,
__maybe_unused struct coresight_trace_id_map *id_map)
{
struct tpdm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
@ -449,6 +450,11 @@ static int tpdm_enable(struct coresight_device *csdev, struct perf_event *event,
return -EBUSY;
}
if (!coresight_take_mode(csdev, mode)) {
spin_unlock(&drvdata->spinlock);
return -EBUSY;
}
__tpdm_enable(drvdata);
drvdata->enable = true;
spin_unlock(&drvdata->spinlock);
@ -506,6 +512,7 @@ static void tpdm_disable(struct coresight_device *csdev,
}
__tpdm_disable(drvdata);
coresight_set_mode(csdev, CS_MODE_DISABLED);
drvdata->enable = false;
spin_unlock(&drvdata->spinlock);

View File

@ -3,6 +3,7 @@
* Copyright (c) 2022, Linaro Limited, All rights reserved.
* Author: Mike Leach <mike.leach@linaro.org>
*/
#include <linux/coresight.h>
#include <linux/coresight-pmu.h>
#include <linux/cpumask.h>
#include <linux/kernel.h>
@ -11,18 +12,12 @@
#include "coresight-trace-id.h"
/* Default trace ID map. Used on systems that don't require per sink mappings */
static struct coresight_trace_id_map id_map_default;
/* maintain a record of the mapping of IDs and pending releases per cpu */
static DEFINE_PER_CPU(atomic_t, cpu_id) = ATOMIC_INIT(0);
static cpumask_t cpu_id_release_pending;
/* perf session active counter */
static atomic_t perf_cs_etm_session_active = ATOMIC_INIT(0);
/* lock to protect id_map and cpu data */
static DEFINE_SPINLOCK(id_map_lock);
/* Default trace ID map. Used in sysfs mode and for system sources */
static DEFINE_PER_CPU(atomic_t, id_map_default_cpu_ids) = ATOMIC_INIT(0);
static struct coresight_trace_id_map id_map_default = {
.cpu_map = &id_map_default_cpu_ids,
.lock = __SPIN_LOCK_UNLOCKED(id_map_default.lock)
};
/* #define TRACE_ID_DEBUG 1 */
#if defined(TRACE_ID_DEBUG) || defined(CONFIG_COMPILE_TEST)
@ -32,7 +27,6 @@ static void coresight_trace_id_dump_table(struct coresight_trace_id_map *id_map,
{
pr_debug("%s id_map::\n", func_name);
pr_debug("Used = %*pb\n", CORESIGHT_TRACE_IDS_MAX, id_map->used_ids);
pr_debug("Pend = %*pb\n", CORESIGHT_TRACE_IDS_MAX, id_map->pend_rel_ids);
}
#define DUMP_ID_MAP(map) coresight_trace_id_dump_table(map, __func__)
#define DUMP_ID_CPU(cpu, id) pr_debug("%s called; cpu=%d, id=%d\n", __func__, cpu, id)
@ -46,9 +40,9 @@ static void coresight_trace_id_dump_table(struct coresight_trace_id_map *id_map,
#endif
/* unlocked read of current trace ID value for given CPU */
static int _coresight_trace_id_read_cpu_id(int cpu)
static int _coresight_trace_id_read_cpu_id(int cpu, struct coresight_trace_id_map *id_map)
{
return atomic_read(&per_cpu(cpu_id, cpu));
return atomic_read(per_cpu_ptr(id_map->cpu_map, cpu));
}
/* look for next available odd ID, return 0 if none found */
@ -119,49 +113,33 @@ static void coresight_trace_id_free(int id, struct coresight_trace_id_map *id_ma
clear_bit(id, id_map->used_ids);
}
static void coresight_trace_id_set_pend_rel(int id, struct coresight_trace_id_map *id_map)
{
if (WARN(!IS_VALID_CS_TRACE_ID(id), "Invalid Trace ID %d\n", id))
return;
set_bit(id, id_map->pend_rel_ids);
}
/*
* release all pending IDs for all current maps & clear CPU associations
*
* This currently operates on the default id map, but may be extended to
* operate on all registered id maps if per sink id maps are used.
* Release all IDs and clear CPU associations.
*/
static void coresight_trace_id_release_all_pending(void)
static void coresight_trace_id_release_all(struct coresight_trace_id_map *id_map)
{
struct coresight_trace_id_map *id_map = &id_map_default;
unsigned long flags;
int cpu, bit;
int cpu;
spin_lock_irqsave(&id_map_lock, flags);
for_each_set_bit(bit, id_map->pend_rel_ids, CORESIGHT_TRACE_ID_RES_TOP) {
clear_bit(bit, id_map->used_ids);
clear_bit(bit, id_map->pend_rel_ids);
}
for_each_cpu(cpu, &cpu_id_release_pending) {
atomic_set(&per_cpu(cpu_id, cpu), 0);
cpumask_clear_cpu(cpu, &cpu_id_release_pending);
}
spin_unlock_irqrestore(&id_map_lock, flags);
spin_lock_irqsave(&id_map->lock, flags);
bitmap_zero(id_map->used_ids, CORESIGHT_TRACE_IDS_MAX);
for_each_possible_cpu(cpu)
atomic_set(per_cpu_ptr(id_map->cpu_map, cpu), 0);
spin_unlock_irqrestore(&id_map->lock, flags);
DUMP_ID_MAP(id_map);
}
static int coresight_trace_id_map_get_cpu_id(int cpu, struct coresight_trace_id_map *id_map)
static int _coresight_trace_id_get_cpu_id(int cpu, struct coresight_trace_id_map *id_map)
{
unsigned long flags;
int id;
spin_lock_irqsave(&id_map_lock, flags);
spin_lock_irqsave(&id_map->lock, flags);
/* check for existing allocation for this CPU */
id = _coresight_trace_id_read_cpu_id(cpu);
id = _coresight_trace_id_read_cpu_id(cpu, id_map);
if (id)
goto get_cpu_id_clr_pend;
goto get_cpu_id_out_unlock;
/*
* Find a new ID.
@ -180,44 +158,32 @@ static int coresight_trace_id_map_get_cpu_id(int cpu, struct coresight_trace_id_
goto get_cpu_id_out_unlock;
/* allocate the new id to the cpu */
atomic_set(&per_cpu(cpu_id, cpu), id);
get_cpu_id_clr_pend:
/* we are (re)using this ID - so ensure it is not marked for release */
cpumask_clear_cpu(cpu, &cpu_id_release_pending);
clear_bit(id, id_map->pend_rel_ids);
atomic_set(per_cpu_ptr(id_map->cpu_map, cpu), id);
get_cpu_id_out_unlock:
spin_unlock_irqrestore(&id_map_lock, flags);
spin_unlock_irqrestore(&id_map->lock, flags);
DUMP_ID_CPU(cpu, id);
DUMP_ID_MAP(id_map);
return id;
}
static void coresight_trace_id_map_put_cpu_id(int cpu, struct coresight_trace_id_map *id_map)
static void _coresight_trace_id_put_cpu_id(int cpu, struct coresight_trace_id_map *id_map)
{
unsigned long flags;
int id;
/* check for existing allocation for this CPU */
id = _coresight_trace_id_read_cpu_id(cpu);
id = _coresight_trace_id_read_cpu_id(cpu, id_map);
if (!id)
return;
spin_lock_irqsave(&id_map_lock, flags);
spin_lock_irqsave(&id_map->lock, flags);
if (atomic_read(&perf_cs_etm_session_active)) {
/* set release at pending if perf still active */
coresight_trace_id_set_pend_rel(id, id_map);
cpumask_set_cpu(cpu, &cpu_id_release_pending);
} else {
/* otherwise clear id */
coresight_trace_id_free(id, id_map);
atomic_set(&per_cpu(cpu_id, cpu), 0);
}
coresight_trace_id_free(id, id_map);
atomic_set(per_cpu_ptr(id_map->cpu_map, cpu), 0);
spin_unlock_irqrestore(&id_map_lock, flags);
spin_unlock_irqrestore(&id_map->lock, flags);
DUMP_ID_CPU(cpu, id);
DUMP_ID_MAP(id_map);
}
@ -227,10 +193,10 @@ static int coresight_trace_id_map_get_system_id(struct coresight_trace_id_map *i
unsigned long flags;
int id;
spin_lock_irqsave(&id_map_lock, flags);
spin_lock_irqsave(&id_map->lock, flags);
/* prefer odd IDs for system components to avoid legacy CPU IDS */
id = coresight_trace_id_alloc_new_id(id_map, 0, true);
spin_unlock_irqrestore(&id_map_lock, flags);
spin_unlock_irqrestore(&id_map->lock, flags);
DUMP_ID(id);
DUMP_ID_MAP(id_map);
@ -241,9 +207,9 @@ static void coresight_trace_id_map_put_system_id(struct coresight_trace_id_map *
{
unsigned long flags;
spin_lock_irqsave(&id_map_lock, flags);
spin_lock_irqsave(&id_map->lock, flags);
coresight_trace_id_free(id, id_map);
spin_unlock_irqrestore(&id_map_lock, flags);
spin_unlock_irqrestore(&id_map->lock, flags);
DUMP_ID(id);
DUMP_ID_MAP(id_map);
@ -253,22 +219,40 @@ static void coresight_trace_id_map_put_system_id(struct coresight_trace_id_map *
int coresight_trace_id_get_cpu_id(int cpu)
{
return coresight_trace_id_map_get_cpu_id(cpu, &id_map_default);
return _coresight_trace_id_get_cpu_id(cpu, &id_map_default);
}
EXPORT_SYMBOL_GPL(coresight_trace_id_get_cpu_id);
int coresight_trace_id_get_cpu_id_map(int cpu, struct coresight_trace_id_map *id_map)
{
return _coresight_trace_id_get_cpu_id(cpu, id_map);
}
EXPORT_SYMBOL_GPL(coresight_trace_id_get_cpu_id_map);
void coresight_trace_id_put_cpu_id(int cpu)
{
coresight_trace_id_map_put_cpu_id(cpu, &id_map_default);
_coresight_trace_id_put_cpu_id(cpu, &id_map_default);
}
EXPORT_SYMBOL_GPL(coresight_trace_id_put_cpu_id);
void coresight_trace_id_put_cpu_id_map(int cpu, struct coresight_trace_id_map *id_map)
{
_coresight_trace_id_put_cpu_id(cpu, id_map);
}
EXPORT_SYMBOL_GPL(coresight_trace_id_put_cpu_id_map);
int coresight_trace_id_read_cpu_id(int cpu)
{
return _coresight_trace_id_read_cpu_id(cpu);
return _coresight_trace_id_read_cpu_id(cpu, &id_map_default);
}
EXPORT_SYMBOL_GPL(coresight_trace_id_read_cpu_id);
int coresight_trace_id_read_cpu_id_map(int cpu, struct coresight_trace_id_map *id_map)
{
return _coresight_trace_id_read_cpu_id(cpu, id_map);
}
EXPORT_SYMBOL_GPL(coresight_trace_id_read_cpu_id_map);
int coresight_trace_id_get_system_id(void)
{
return coresight_trace_id_map_get_system_id(&id_map_default);
@ -281,17 +265,17 @@ void coresight_trace_id_put_system_id(int id)
}
EXPORT_SYMBOL_GPL(coresight_trace_id_put_system_id);
void coresight_trace_id_perf_start(void)
void coresight_trace_id_perf_start(struct coresight_trace_id_map *id_map)
{
atomic_inc(&perf_cs_etm_session_active);
PERF_SESSION(atomic_read(&perf_cs_etm_session_active));
atomic_inc(&id_map->perf_cs_etm_session_active);
PERF_SESSION(atomic_read(&id_map->perf_cs_etm_session_active));
}
EXPORT_SYMBOL_GPL(coresight_trace_id_perf_start);
void coresight_trace_id_perf_stop(void)
void coresight_trace_id_perf_stop(struct coresight_trace_id_map *id_map)
{
if (!atomic_dec_return(&perf_cs_etm_session_active))
coresight_trace_id_release_all_pending();
PERF_SESSION(atomic_read(&perf_cs_etm_session_active));
if (!atomic_dec_return(&id_map->perf_cs_etm_session_active))
coresight_trace_id_release_all(id_map);
PERF_SESSION(atomic_read(&id_map->perf_cs_etm_session_active));
}
EXPORT_SYMBOL_GPL(coresight_trace_id_perf_stop);

View File

@ -17,9 +17,10 @@
* released when done.
*
* In order to ensure that a consistent cpu / ID matching is maintained
* throughout a perf cs_etm event session - a session in progress flag will
* be maintained, and released IDs not cleared until the perf session is
* complete. This allows the same CPU to be re-allocated its prior ID.
* throughout a perf cs_etm event session - a session in progress flag will be
* maintained for each sink, and IDs are cleared when all the perf sessions
* complete. This allows the same CPU to be re-allocated its prior ID when
* events are scheduled in and out.
*
*
* Trace ID maps will be created and initialised to prevent architecturally
@ -32,10 +33,6 @@
#include <linux/bitops.h>
#include <linux/types.h>
/* architecturally we have 128 IDs some of which are reserved */
#define CORESIGHT_TRACE_IDS_MAX 128
/* ID 0 is reserved */
#define CORESIGHT_TRACE_ID_RES_0 0
@ -46,23 +43,6 @@
#define IS_VALID_CS_TRACE_ID(id) \
((id > CORESIGHT_TRACE_ID_RES_0) && (id < CORESIGHT_TRACE_ID_RES_TOP))
/**
* Trace ID map.
*
* @used_ids: Bitmap to register available (bit = 0) and in use (bit = 1) IDs.
* Initialised so that the reserved IDs are permanently marked as
* in use.
* @pend_rel_ids: CPU IDs that have been released by the trace source but not
* yet marked as available, to allow re-allocation to the same
* CPU during a perf session.
*/
struct coresight_trace_id_map {
DECLARE_BITMAP(used_ids, CORESIGHT_TRACE_IDS_MAX);
DECLARE_BITMAP(pend_rel_ids, CORESIGHT_TRACE_IDS_MAX);
};
/* Allocate and release IDs for a single default trace ID map */
/**
* Read and optionally allocate a CoreSight trace ID and associate with a CPU.
*
@ -78,19 +58,27 @@ struct coresight_trace_id_map {
*/
int coresight_trace_id_get_cpu_id(int cpu);
/**
* Version of coresight_trace_id_get_cpu_id() that allows the ID map to operate
* on to be provided.
*/
int coresight_trace_id_get_cpu_id_map(int cpu, struct coresight_trace_id_map *id_map);
/**
* Release an allocated trace ID associated with the CPU.
*
* This will release the CoreSight trace ID associated with the CPU,
* unless a perf session is in operation.
*
* If a perf session is in operation then the ID will be marked as pending
* release.
* This will release the CoreSight trace ID associated with the CPU.
*
* @cpu: The CPU index to release the associated trace ID.
*/
void coresight_trace_id_put_cpu_id(int cpu);
/**
* Version of coresight_trace_id_put_cpu_id() that allows the ID map to operate
* on to be provided.
*/
void coresight_trace_id_put_cpu_id_map(int cpu, struct coresight_trace_id_map *id_map);
/**
* Read the current allocated CoreSight Trace ID value for the CPU.
*
@ -111,6 +99,12 @@ void coresight_trace_id_put_cpu_id(int cpu);
*/
int coresight_trace_id_read_cpu_id(int cpu);
/**
* Version of coresight_trace_id_read_cpu_id() that allows the ID map to operate
* on to be provided.
*/
int coresight_trace_id_read_cpu_id_map(int cpu, struct coresight_trace_id_map *id_map);
/**
* Allocate a CoreSight trace ID for a system component.
*
@ -136,21 +130,21 @@ void coresight_trace_id_put_system_id(int id);
/**
* Notify the Trace ID allocator that a perf session is starting.
*
* Increase the perf session reference count - called by perf when setting up
* a trace event.
* Increase the perf session reference count - called by perf when setting up a
* trace event.
*
* This reference count is used by the ID allocator to ensure that trace IDs
* associated with a CPU cannot change or be released during a perf session.
* Perf sessions never free trace IDs to ensure that the ID associated with a
* CPU cannot change during their and other's concurrent sessions. Instead,
* this refcount is used so that the last event to finish always frees all IDs.
*/
void coresight_trace_id_perf_start(void);
void coresight_trace_id_perf_start(struct coresight_trace_id_map *id_map);
/**
* Notify the ID allocator that a perf session is stopping.
*
* Decrease the perf session reference count.
* if this causes the count to go to zero, then all Trace IDs marked as pending
* release, will be released.
* Decrease the perf session reference count. If this causes the count to go to
* zero, then all Trace IDs will be released.
*/
void coresight_trace_id_perf_stop(void);
void coresight_trace_id_perf_stop(struct coresight_trace_id_map *id_map);
#endif /* _CORESIGHT_TRACE_ID_H */

View File

@ -49,12 +49,21 @@
* Interpretation of the PERF_RECORD_AUX_OUTPUT_HW_ID payload.
* Used to associate a CPU with the CoreSight Trace ID.
* [07:00] - Trace ID - uses 8 bits to make value easy to read in file.
* [59:08] - Unused (SBZ)
* [63:60] - Version
* [39:08] - Sink ID - as reported in /sys/bus/event_source/devices/cs_etm/sinks/
* Added in minor version 1.
* [55:40] - Unused (SBZ)
* [59:56] - Minor Version - previously existing fields are compatible with
* all minor versions.
* [63:60] - Major Version - previously existing fields mean different things
* in new major versions.
*/
#define CS_AUX_HW_ID_TRACE_ID_MASK GENMASK_ULL(7, 0)
#define CS_AUX_HW_ID_VERSION_MASK GENMASK_ULL(63, 60)
#define CS_AUX_HW_ID_SINK_ID_MASK GENMASK_ULL(39, 8)
#define CS_AUX_HW_ID_CURR_VERSION 0
#define CS_AUX_HW_ID_MINOR_VERSION_MASK GENMASK_ULL(59, 56)
#define CS_AUX_HW_ID_MAJOR_VERSION_MASK GENMASK_ULL(63, 60)
#define CS_AUX_HW_ID_MAJOR_VERSION 0
#define CS_AUX_HW_ID_MINOR_VERSION 1
#endif

View File

@ -218,6 +218,24 @@ struct coresight_sysfs_link {
const char *target_name;
};
/* architecturally we have 128 IDs some of which are reserved */
#define CORESIGHT_TRACE_IDS_MAX 128
/**
* Trace ID map.
*
* @used_ids: Bitmap to register available (bit = 0) and in use (bit = 1) IDs.
* Initialised so that the reserved IDs are permanently marked as
* in use.
* @perf_cs_etm_session_active: Number of Perf sessions using this ID map.
*/
struct coresight_trace_id_map {
DECLARE_BITMAP(used_ids, CORESIGHT_TRACE_IDS_MAX);
atomic_t __percpu *cpu_map;
atomic_t perf_cs_etm_session_active;
spinlock_t lock;
};
/**
* struct coresight_device - representation of a device as used by the framework
* @pdata: Platform data with device connections associated to this device.
@ -271,6 +289,7 @@ struct coresight_device {
bool sysfs_sink_activated;
struct dev_ext_attribute *ea;
struct coresight_device *def_sink;
struct coresight_trace_id_map perf_sink_id_map;
/* sysfs links between components */
int nr_links;
bool has_conns_grp;
@ -365,7 +384,7 @@ struct coresight_ops_link {
struct coresight_ops_source {
int (*cpu_id)(struct coresight_device *csdev);
int (*enable)(struct coresight_device *csdev, struct perf_event *event,
enum cs_mode mode);
enum cs_mode mode, struct coresight_trace_id_map *id_map);
void (*disable)(struct coresight_device *csdev,
struct perf_event *event);
};