cxl for v6.8

- Add support for parsing the Coherent Device Attribute Table (CDAT)
 
 - Add support for calculating a platform CXL QoS class from CDAT data
 
 - Unify the tracing of EFI CXL Events with native CXL Events.
 
 - Add Get Timestamp support
 
 - Miscellaneous cleanups and fixups
 -----BEGIN PGP SIGNATURE-----
 
 iHUEABYKAB0WIQSbo+XnGs+rwLz9XGXfioYZHlFsZwUCZaHVvAAKCRDfioYZHlFs
 Z3sCAQDPHSsHmj845k4lvKbWjys3eh78MKKEFyTXLQgYhOlsGAEAigQY2ZiSum52
 nwdIgpOOADNt0Iq6yXuLsmn9xvY9bAU=
 =HjCl
 -----END PGP SIGNATURE-----

Merge tag 'cxl-for-6.8' of git://git.kernel.org/pub/scm/linux/kernel/git/cxl/cxl

Pull CXL (Compute Express Link) updates from Dan Williams:
 "The bulk of this update is support for enumerating the performance
  capabilities of CXL memory targets and connecting that to a platform
  CXL memory QoS class. Some follow-on work remains to hook up this data
  into core-mm policy, but that is saved for v6.9.

  The next significant update is unifying how CXL event records (things
  like background scrub errors) are processed between so called
  "firmware first" and native error record retrieval. The CXL driver
  handler that processes the record retrieved from the device mailbox is
  now the handler for that same record format coming from an EFI/ACPI
  notification source.

  This also contains miscellaneous feature updates, like Get Timestamp,
  and other fixups.

  Summary:

   - Add support for parsing the Coherent Device Attribute Table (CDAT)

   - Add support for calculating a platform CXL QoS class from CDAT data

   - Unify the tracing of EFI CXL Events with native CXL Events.

   - Add Get Timestamp support

   - Miscellaneous cleanups and fixups"

* tag 'cxl-for-6.8' of git://git.kernel.org/pub/scm/linux/kernel/git/cxl/cxl: (41 commits)
  cxl/core: use sysfs_emit() for attr's _show()
  cxl/pci: Register for and process CPER events
  PCI: Introduce cleanup helpers for device reference counts and locks
  acpi/ghes: Process CXL Component Events
  cxl/events: Create a CXL event union
  cxl/events: Separate UUID from event structures
  cxl/events: Remove passing a UUID to known event traces
  cxl/events: Create common event UUID defines
  cxl/events: Promote CXL event structures to a core header
  cxl: Refactor to use __free() for cxl_root allocation in cxl_endpoint_port_probe()
  cxl: Refactor to use __free() for cxl_root allocation in cxl_find_nvdimm_bridge()
  cxl: Fix device reference leak in cxl_port_perf_data_calculate()
  cxl: Convert find_cxl_root() to return a 'struct cxl_root *'
  cxl: Introduce put_cxl_root() helper
  cxl/port: Fix missing target list lock
  cxl/port: Fix decoder initialization when nr_targets > interleave_ways
  cxl/region: fix x9 interleave typo
  cxl/trace: Pass UUID explicitly to event traces
  cxl/region: use %pap format to print resource_size_t
  cxl/region: Add dev_dbg() detail on failure to allocate HPA space
  ...
This commit is contained in:
Linus Torvalds 2024-01-18 16:22:43 -08:00
commit db5ccb9eb2
37 changed files with 1844 additions and 328 deletions

View File

@ -28,6 +28,23 @@ Description:
Payload in the CXL-2.0 specification.
What: /sys/bus/cxl/devices/memX/ram/qos_class
Date: May, 2023
KernelVersion: v6.8
Contact: linux-cxl@vger.kernel.org
Description:
(RO) For CXL host platforms that support "QoS Telemmetry"
this attribute conveys a comma delimited list of platform
specific cookies that identifies a QoS performance class
for the volatile partition of the CXL mem device. These
class-ids can be compared against a similar "qos_class"
published for a root decoder. While it is not required
that the endpoints map their local memory-class to a
matching platform class, mismatches are not recommended
and there are platform specific performance related
side-effects that may result. First class-id is displayed.
What: /sys/bus/cxl/devices/memX/pmem/size
Date: December, 2020
KernelVersion: v5.12
@ -38,6 +55,23 @@ Description:
Payload in the CXL-2.0 specification.
What: /sys/bus/cxl/devices/memX/pmem/qos_class
Date: May, 2023
KernelVersion: v6.8
Contact: linux-cxl@vger.kernel.org
Description:
(RO) For CXL host platforms that support "QoS Telemmetry"
this attribute conveys a comma delimited list of platform
specific cookies that identifies a QoS performance class
for the persistent partition of the CXL mem device. These
class-ids can be compared against a similar "qos_class"
published for a root decoder. While it is not required
that the endpoints map their local memory-class to a
matching platform class, mismatches are not recommended
and there are platform specific performance related
side-effects that may result. First class-id is displayed.
What: /sys/bus/cxl/devices/memX/serial
Date: January, 2022
KernelVersion: v5.18

View File

@ -5280,6 +5280,7 @@ M: Dan Williams <dan.j.williams@intel.com>
L: linux-cxl@vger.kernel.org
S: Maintained
F: drivers/cxl/
F: include/linux/cxl-event.h
F: include/uapi/linux/cxl_mem.h
F: tools/testing/cxl/

View File

@ -26,6 +26,7 @@
#include <linux/interrupt.h>
#include <linux/timer.h>
#include <linux/cper.h>
#include <linux/cxl-event.h>
#include <linux/platform_device.h>
#include <linux/mutex.h>
#include <linux/ratelimit.h>
@ -673,6 +674,78 @@ static void ghes_defer_non_standard_event(struct acpi_hest_generic_data *gdata,
schedule_work(&entry->work);
}
/*
* Only a single callback can be registered for CXL CPER events.
*/
static DECLARE_RWSEM(cxl_cper_rw_sem);
static cxl_cper_callback cper_callback;
/* CXL Event record UUIDs are formatted as GUIDs and reported in section type */
/*
* General Media Event Record
* CXL rev 3.0 Section 8.2.9.2.1.1; Table 8-43
*/
#define CPER_SEC_CXL_GEN_MEDIA_GUID \
GUID_INIT(0xfbcd0a77, 0xc260, 0x417f, \
0x85, 0xa9, 0x08, 0x8b, 0x16, 0x21, 0xeb, 0xa6)
/*
* DRAM Event Record
* CXL rev 3.0 section 8.2.9.2.1.2; Table 8-44
*/
#define CPER_SEC_CXL_DRAM_GUID \
GUID_INIT(0x601dcbb3, 0x9c06, 0x4eab, \
0xb8, 0xaf, 0x4e, 0x9b, 0xfb, 0x5c, 0x96, 0x24)
/*
* Memory Module Event Record
* CXL rev 3.0 section 8.2.9.2.1.3; Table 8-45
*/
#define CPER_SEC_CXL_MEM_MODULE_GUID \
GUID_INIT(0xfe927475, 0xdd59, 0x4339, \
0xa5, 0x86, 0x79, 0xba, 0xb1, 0x13, 0xb7, 0x74)
static void cxl_cper_post_event(enum cxl_event_type event_type,
struct cxl_cper_event_rec *rec)
{
if (rec->hdr.length <= sizeof(rec->hdr) ||
rec->hdr.length > sizeof(*rec)) {
pr_err(FW_WARN "CXL CPER Invalid section length (%u)\n",
rec->hdr.length);
return;
}
if (!(rec->hdr.validation_bits & CPER_CXL_COMP_EVENT_LOG_VALID)) {
pr_err(FW_WARN "CXL CPER invalid event\n");
return;
}
guard(rwsem_read)(&cxl_cper_rw_sem);
if (cper_callback)
cper_callback(event_type, rec);
}
int cxl_cper_register_callback(cxl_cper_callback callback)
{
guard(rwsem_write)(&cxl_cper_rw_sem);
if (cper_callback)
return -EINVAL;
cper_callback = callback;
return 0;
}
EXPORT_SYMBOL_NS_GPL(cxl_cper_register_callback, CXL);
int cxl_cper_unregister_callback(cxl_cper_callback callback)
{
guard(rwsem_write)(&cxl_cper_rw_sem);
if (callback != cper_callback)
return -EINVAL;
cper_callback = NULL;
return 0;
}
EXPORT_SYMBOL_NS_GPL(cxl_cper_unregister_callback, CXL);
static bool ghes_do_proc(struct ghes *ghes,
const struct acpi_hest_generic_status *estatus)
{
@ -707,6 +780,22 @@ static bool ghes_do_proc(struct ghes *ghes,
}
else if (guid_equal(sec_type, &CPER_SEC_PROC_ARM)) {
queued = ghes_handle_arm_hw_error(gdata, sev, sync);
} else if (guid_equal(sec_type, &CPER_SEC_CXL_GEN_MEDIA_GUID)) {
struct cxl_cper_event_rec *rec =
acpi_hest_get_payload(gdata);
cxl_cper_post_event(CXL_CPER_EVENT_GEN_MEDIA, rec);
} else if (guid_equal(sec_type, &CPER_SEC_CXL_DRAM_GUID)) {
struct cxl_cper_event_rec *rec =
acpi_hest_get_payload(gdata);
cxl_cper_post_event(CXL_CPER_EVENT_DRAM, rec);
} else if (guid_equal(sec_type,
&CPER_SEC_CXL_MEM_MODULE_GUID)) {
struct cxl_cper_event_rec *rec =
acpi_hest_get_payload(gdata);
cxl_cper_post_event(CXL_CPER_EVENT_MEM_MODULE, rec);
} else {
void *err = acpi_hest_get_payload(gdata);

View File

@ -58,14 +58,22 @@ struct target_cache {
struct node_cache_attrs cache_attrs;
};
enum {
NODE_ACCESS_CLASS_0 = 0,
NODE_ACCESS_CLASS_1,
NODE_ACCESS_CLASS_GENPORT_SINK,
NODE_ACCESS_CLASS_MAX,
};
struct memory_target {
struct list_head node;
unsigned int memory_pxm;
unsigned int processor_pxm;
struct resource memregions;
struct node_hmem_attrs hmem_attrs[2];
struct access_coordinate coord[NODE_ACCESS_CLASS_MAX];
struct list_head caches;
struct node_cache_attrs cache_attrs;
u8 gen_port_device_handle[ACPI_SRAT_DEVICE_HANDLE_SIZE];
bool registered;
};
@ -100,6 +108,47 @@ static struct memory_target *find_mem_target(unsigned int mem_pxm)
return NULL;
}
static struct memory_target *acpi_find_genport_target(u32 uid)
{
struct memory_target *target;
u32 target_uid;
u8 *uid_ptr;
list_for_each_entry(target, &targets, node) {
uid_ptr = target->gen_port_device_handle + 8;
target_uid = *(u32 *)uid_ptr;
if (uid == target_uid)
return target;
}
return NULL;
}
/**
* acpi_get_genport_coordinates - Retrieve the access coordinates for a generic port
* @uid: ACPI unique id
* @coord: The access coordinates written back out for the generic port
*
* Return: 0 on success. Errno on failure.
*
* Only supports device handles that are ACPI. Assume ACPI0016 HID for CXL.
*/
int acpi_get_genport_coordinates(u32 uid,
struct access_coordinate *coord)
{
struct memory_target *target;
guard(mutex)(&target_lock);
target = acpi_find_genport_target(uid);
if (!target)
return -ENOENT;
*coord = target->coord[NODE_ACCESS_CLASS_GENPORT_SINK];
return 0;
}
EXPORT_SYMBOL_NS_GPL(acpi_get_genport_coordinates, CXL);
static __init void alloc_memory_initiator(unsigned int cpu_pxm)
{
struct memory_initiator *initiator;
@ -120,8 +169,7 @@ static __init void alloc_memory_initiator(unsigned int cpu_pxm)
list_add_tail(&initiator->node, &initiators);
}
static __init void alloc_memory_target(unsigned int mem_pxm,
resource_size_t start, resource_size_t len)
static __init struct memory_target *alloc_target(unsigned int mem_pxm)
{
struct memory_target *target;
@ -129,7 +177,7 @@ static __init void alloc_memory_target(unsigned int mem_pxm,
if (!target) {
target = kzalloc(sizeof(*target), GFP_KERNEL);
if (!target)
return;
return NULL;
target->memory_pxm = mem_pxm;
target->processor_pxm = PXM_INVAL;
target->memregions = (struct resource) {
@ -142,6 +190,19 @@ static __init void alloc_memory_target(unsigned int mem_pxm,
INIT_LIST_HEAD(&target->caches);
}
return target;
}
static __init void alloc_memory_target(unsigned int mem_pxm,
resource_size_t start,
resource_size_t len)
{
struct memory_target *target;
target = alloc_target(mem_pxm);
if (!target)
return;
/*
* There are potentially multiple ranges per PXM, so record each
* in the per-target memregions resource tree.
@ -152,6 +213,18 @@ static __init void alloc_memory_target(unsigned int mem_pxm,
start, start + len, mem_pxm);
}
static __init void alloc_genport_target(unsigned int mem_pxm, u8 *handle)
{
struct memory_target *target;
target = alloc_target(mem_pxm);
if (!target)
return;
memcpy(target->gen_port_device_handle, handle,
ACPI_SRAT_DEVICE_HANDLE_SIZE);
}
static __init const char *hmat_data_type(u8 type)
{
switch (type) {
@ -228,24 +301,24 @@ static void hmat_update_target_access(struct memory_target *target,
{
switch (type) {
case ACPI_HMAT_ACCESS_LATENCY:
target->hmem_attrs[access].read_latency = value;
target->hmem_attrs[access].write_latency = value;
target->coord[access].read_latency = value;
target->coord[access].write_latency = value;
break;
case ACPI_HMAT_READ_LATENCY:
target->hmem_attrs[access].read_latency = value;
target->coord[access].read_latency = value;
break;
case ACPI_HMAT_WRITE_LATENCY:
target->hmem_attrs[access].write_latency = value;
target->coord[access].write_latency = value;
break;
case ACPI_HMAT_ACCESS_BANDWIDTH:
target->hmem_attrs[access].read_bandwidth = value;
target->hmem_attrs[access].write_bandwidth = value;
target->coord[access].read_bandwidth = value;
target->coord[access].write_bandwidth = value;
break;
case ACPI_HMAT_READ_BANDWIDTH:
target->hmem_attrs[access].read_bandwidth = value;
target->coord[access].read_bandwidth = value;
break;
case ACPI_HMAT_WRITE_BANDWIDTH:
target->hmem_attrs[access].write_bandwidth = value;
target->coord[access].write_bandwidth = value;
break;
default:
break;
@ -291,11 +364,28 @@ static __init void hmat_add_locality(struct acpi_hmat_locality *hmat_loc)
}
}
static __init void hmat_update_target(unsigned int tgt_pxm, unsigned int init_pxm,
u8 mem_hier, u8 type, u32 value)
{
struct memory_target *target = find_mem_target(tgt_pxm);
if (mem_hier != ACPI_HMAT_MEMORY)
return;
if (target && target->processor_pxm == init_pxm) {
hmat_update_target_access(target, type, value,
NODE_ACCESS_CLASS_0);
/* If the node has a CPU, update access 1 */
if (node_state(pxm_to_node(init_pxm), N_CPU))
hmat_update_target_access(target, type, value,
NODE_ACCESS_CLASS_1);
}
}
static __init int hmat_parse_locality(union acpi_subtable_headers *header,
const unsigned long end)
{
struct acpi_hmat_locality *hmat_loc = (void *)header;
struct memory_target *target;
unsigned int init, targ, total_size, ipds, tpds;
u32 *inits, *targs, value;
u16 *entries;
@ -336,15 +426,8 @@ static __init int hmat_parse_locality(union acpi_subtable_headers *header,
inits[init], targs[targ], value,
hmat_data_type_suffix(type));
if (mem_hier == ACPI_HMAT_MEMORY) {
target = find_mem_target(targs[targ]);
if (target && target->processor_pxm == inits[init]) {
hmat_update_target_access(target, type, value, 0);
/* If the node has a CPU, update access 1 */
if (node_state(pxm_to_node(inits[init]), N_CPU))
hmat_update_target_access(target, type, value, 1);
}
}
hmat_update_target(targs[targ], inits[init],
mem_hier, type, value);
}
}
@ -491,6 +574,27 @@ static __init int srat_parse_mem_affinity(union acpi_subtable_headers *header,
return 0;
}
static __init int srat_parse_genport_affinity(union acpi_subtable_headers *header,
const unsigned long end)
{
struct acpi_srat_generic_affinity *ga = (void *)header;
if (!ga)
return -EINVAL;
if (!(ga->flags & ACPI_SRAT_GENERIC_AFFINITY_ENABLED))
return 0;
/* Skip PCI device_handle for now */
if (ga->device_handle_type != 0)
return 0;
alloc_genport_target(ga->proximity_domain,
(u8 *)ga->device_handle);
return 0;
}
static u32 hmat_initiator_perf(struct memory_target *target,
struct memory_initiator *initiator,
struct acpi_hmat_locality *hmat_loc)
@ -592,6 +696,11 @@ static void hmat_update_target_attrs(struct memory_target *target,
u32 best = 0;
int i;
/* Don't update for generic port if there's no device handle */
if (access == NODE_ACCESS_CLASS_GENPORT_SINK &&
!(*(u16 *)target->gen_port_device_handle))
return;
bitmap_zero(p_nodes, MAX_NUMNODES);
/*
* If the Address Range Structure provides a local processor pxm, set
@ -661,6 +770,14 @@ static void __hmat_register_target_initiators(struct memory_target *target,
}
}
static void hmat_register_generic_target_initiators(struct memory_target *target)
{
static DECLARE_BITMAP(p_nodes, MAX_NUMNODES);
__hmat_register_target_initiators(target, p_nodes,
NODE_ACCESS_CLASS_GENPORT_SINK);
}
static void hmat_register_target_initiators(struct memory_target *target)
{
static DECLARE_BITMAP(p_nodes, MAX_NUMNODES);
@ -681,7 +798,7 @@ static void hmat_register_target_cache(struct memory_target *target)
static void hmat_register_target_perf(struct memory_target *target, int access)
{
unsigned mem_nid = pxm_to_node(target->memory_pxm);
node_set_perf_attrs(mem_nid, &target->hmem_attrs[access], access);
node_set_perf_attrs(mem_nid, &target->coord[access], access);
}
static void hmat_register_target_devices(struct memory_target *target)
@ -712,6 +829,17 @@ static void hmat_register_target(struct memory_target *target)
*/
hmat_register_target_devices(target);
/*
* Register generic port perf numbers. The nid may not be
* initialized and is still NUMA_NO_NODE.
*/
mutex_lock(&target_lock);
if (*(u16 *)target->gen_port_device_handle) {
hmat_register_generic_target_initiators(target);
target->registered = true;
}
mutex_unlock(&target_lock);
/*
* Skip offline nodes. This can happen when memory
* marked EFI_MEMORY_SP, "specific purpose", is applied
@ -726,8 +854,8 @@ static void hmat_register_target(struct memory_target *target)
if (!target->registered) {
hmat_register_target_initiators(target);
hmat_register_target_cache(target);
hmat_register_target_perf(target, 0);
hmat_register_target_perf(target, 1);
hmat_register_target_perf(target, NODE_ACCESS_CLASS_0);
hmat_register_target_perf(target, NODE_ACCESS_CLASS_1);
target->registered = true;
}
mutex_unlock(&target_lock);
@ -765,7 +893,7 @@ static int hmat_set_default_dram_perf(void)
int rc;
int nid, pxm;
struct memory_target *target;
struct node_hmem_attrs *attrs;
struct access_coordinate *attrs;
if (!default_dram_type)
return -EIO;
@ -775,7 +903,7 @@ static int hmat_set_default_dram_perf(void)
target = find_mem_target(pxm);
if (!target)
continue;
attrs = &target->hmem_attrs[1];
attrs = &target->coord[1];
rc = mt_set_default_dram_perf(nid, attrs, "ACPI HMAT");
if (rc)
return rc;
@ -789,7 +917,7 @@ static int hmat_calculate_adistance(struct notifier_block *self,
{
static DECLARE_BITMAP(p_nodes, MAX_NUMNODES);
struct memory_target *target;
struct node_hmem_attrs *perf;
struct access_coordinate *perf;
int *adist = data;
int pxm;
@ -802,7 +930,7 @@ static int hmat_calculate_adistance(struct notifier_block *self,
hmat_update_target_attrs(target, p_nodes, 1);
mutex_unlock(&target_lock);
perf = &target->hmem_attrs[1];
perf = &target->coord[1];
if (mt_perf_to_adistance(perf, adist))
return NOTIFY_OK;
@ -870,6 +998,13 @@ static __init int hmat_init(void)
ACPI_SRAT_TYPE_MEMORY_AFFINITY,
srat_parse_mem_affinity, 0) < 0)
goto out_put;
if (acpi_table_parse_entries(ACPI_SIG_SRAT,
sizeof(struct acpi_table_srat),
ACPI_SRAT_TYPE_GENERIC_PORT_AFFINITY,
srat_parse_genport_affinity, 0) < 0)
goto out_put;
acpi_put_table(tbl);
status = acpi_get_table(ACPI_SIG_HMAT, 0, &tbl);

View File

@ -251,8 +251,9 @@ int __init_or_acpilib acpi_table_parse_entries_array(
return -ENODEV;
}
count = acpi_parse_entries_array(id, table_size, table_header,
proc, proc_num, max_entries);
count = acpi_parse_entries_array(id, table_size,
(union fw_table_header *)table_header,
proc, proc_num, max_entries);
acpi_put_table(table_header);
return count;

View File

@ -74,14 +74,14 @@ static BIN_ATTR_RO(cpulist, CPULIST_FILE_MAX_BYTES);
* @dev: Device for this memory access class
* @list_node: List element in the node's access list
* @access: The access class rank
* @hmem_attrs: Heterogeneous memory performance attributes
* @coord: Heterogeneous memory performance coordinates
*/
struct node_access_nodes {
struct device dev;
struct list_head list_node;
unsigned int access;
#ifdef CONFIG_HMEM_REPORTING
struct node_hmem_attrs hmem_attrs;
struct access_coordinate coord;
#endif
};
#define to_access_nodes(dev) container_of(dev, struct node_access_nodes, dev)
@ -167,7 +167,7 @@ static ssize_t property##_show(struct device *dev, \
char *buf) \
{ \
return sysfs_emit(buf, "%u\n", \
to_access_nodes(dev)->hmem_attrs.property); \
to_access_nodes(dev)->coord.property); \
} \
static DEVICE_ATTR_RO(property)
@ -187,10 +187,10 @@ static struct attribute *access_attrs[] = {
/**
* node_set_perf_attrs - Set the performance values for given access class
* @nid: Node identifier to be set
* @hmem_attrs: Heterogeneous memory performance attributes
* @coord: Heterogeneous memory performance coordinates
* @access: The access class the for the given attributes
*/
void node_set_perf_attrs(unsigned int nid, struct node_hmem_attrs *hmem_attrs,
void node_set_perf_attrs(unsigned int nid, struct access_coordinate *coord,
unsigned int access)
{
struct node_access_nodes *c;
@ -205,7 +205,7 @@ void node_set_perf_attrs(unsigned int nid, struct node_hmem_attrs *hmem_attrs,
if (!c)
return;
c->hmem_attrs = *hmem_attrs;
c->coord = *coord;
for (i = 0; access_attrs[i] != NULL; i++) {
if (sysfs_add_file_to_group(&c->dev.kobj, access_attrs[i],
"initiators")) {

View File

@ -5,6 +5,7 @@ menuconfig CXL_BUS
select FW_LOADER
select FW_UPLOAD
select PCI_DOE
select FIRMWARE_TABLE
help
CXL is a bus that is electrically compatible with PCI Express, but
layers three protocols on that signalling (CXL.io, CXL.cache, and
@ -54,8 +55,10 @@ config CXL_MEM_RAW_COMMANDS
config CXL_ACPI
tristate "CXL ACPI: Platform Support"
depends on ACPI
depends on ACPI_NUMA
default CXL_BUS
select ACPI_TABLE_LIB
select ACPI_HMAT
help
Enable support for host managed device memory (HDM) resources
published by a platform's ACPI CXL memory layout description. See

View File

@ -6,6 +6,7 @@
#include <linux/kernel.h>
#include <linux/acpi.h>
#include <linux/pci.h>
#include <linux/node.h>
#include <asm/div64.h>
#include "cxlpci.h"
#include "cxl.h"
@ -17,6 +18,10 @@ struct cxl_cxims_data {
u64 xormaps[] __counted_by(nr_maps);
};
static const guid_t acpi_cxl_qtg_id_guid =
GUID_INIT(0xF365F9A6, 0xA7DE, 0x4071,
0xA6, 0x6A, 0xB4, 0x0C, 0x0B, 0x4F, 0x8E, 0x52);
/*
* Find a targets entry (n) in the host bridge interleave list.
* CXL Specification 3.0 Table 9-22
@ -194,6 +199,123 @@ struct cxl_cfmws_context {
int id;
};
/**
* cxl_acpi_evaluate_qtg_dsm - Retrieve QTG ids via ACPI _DSM
* @handle: ACPI handle
* @coord: performance access coordinates
* @entries: number of QTG IDs to return
* @qos_class: int array provided by caller to return QTG IDs
*
* Return: number of QTG IDs returned, or -errno for errors
*
* Issue QTG _DSM with accompanied bandwidth and latency data in order to get
* the QTG IDs that are suitable for the performance point in order of most
* suitable to least suitable. Write back array of QTG IDs and return the
* actual number of QTG IDs written back.
*/
static int
cxl_acpi_evaluate_qtg_dsm(acpi_handle handle, struct access_coordinate *coord,
int entries, int *qos_class)
{
union acpi_object *out_obj, *out_buf, *obj;
union acpi_object in_array[4] = {
[0].integer = { ACPI_TYPE_INTEGER, coord->read_latency },
[1].integer = { ACPI_TYPE_INTEGER, coord->write_latency },
[2].integer = { ACPI_TYPE_INTEGER, coord->read_bandwidth },
[3].integer = { ACPI_TYPE_INTEGER, coord->write_bandwidth },
};
union acpi_object in_obj = {
.package = {
.type = ACPI_TYPE_PACKAGE,
.count = 4,
.elements = in_array,
},
};
int count, pkg_entries, i;
u16 max_qtg;
int rc;
if (!entries)
return -EINVAL;
out_obj = acpi_evaluate_dsm(handle, &acpi_cxl_qtg_id_guid, 1, 1, &in_obj);
if (!out_obj)
return -ENXIO;
if (out_obj->type != ACPI_TYPE_PACKAGE) {
rc = -ENXIO;
goto out;
}
/* Check Max QTG ID */
obj = &out_obj->package.elements[0];
if (obj->type != ACPI_TYPE_INTEGER) {
rc = -ENXIO;
goto out;
}
max_qtg = obj->integer.value;
/* It's legal to have 0 QTG entries */
pkg_entries = out_obj->package.count;
if (pkg_entries <= 1) {
rc = 0;
goto out;
}
/* Retrieve QTG IDs package */
obj = &out_obj->package.elements[1];
if (obj->type != ACPI_TYPE_PACKAGE) {
rc = -ENXIO;
goto out;
}
pkg_entries = obj->package.count;
count = min(entries, pkg_entries);
for (i = 0; i < count; i++) {
u16 qtg_id;
out_buf = &obj->package.elements[i];
if (out_buf->type != ACPI_TYPE_INTEGER) {
rc = -ENXIO;
goto out;
}
qtg_id = out_buf->integer.value;
if (qtg_id > max_qtg)
pr_warn("QTG ID %u greater than MAX %u\n",
qtg_id, max_qtg);
qos_class[i] = qtg_id;
}
rc = count;
out:
ACPI_FREE(out_obj);
return rc;
}
static int cxl_acpi_qos_class(struct cxl_root *cxl_root,
struct access_coordinate *coord, int entries,
int *qos_class)
{
struct device *dev = cxl_root->port.uport_dev;
acpi_handle handle;
if (!dev_is_platform(dev))
return -ENODEV;
handle = ACPI_HANDLE(dev);
if (!handle)
return -ENODEV;
return cxl_acpi_evaluate_qtg_dsm(handle, coord, entries, qos_class);
}
static const struct cxl_root_ops acpi_root_ops = {
.qos_class = cxl_acpi_qos_class,
};
static int cxl_parse_cfmws(union acpi_subtable_headers *header, void *arg,
const unsigned long end)
{
@ -389,8 +511,29 @@ static int cxl_get_chbs(struct device *dev, struct acpi_device *hb,
return 0;
}
static int get_genport_coordinates(struct device *dev, struct cxl_dport *dport)
{
struct acpi_device *hb = to_cxl_host_bridge(NULL, dev);
u32 uid;
int rc;
if (kstrtou32(acpi_device_uid(hb), 0, &uid))
return -EINVAL;
rc = acpi_get_genport_coordinates(uid, &dport->hb_coord);
if (rc < 0)
return rc;
/* Adjust back to picoseconds from nanoseconds */
dport->hb_coord.read_latency *= 1000;
dport->hb_coord.write_latency *= 1000;
return 0;
}
static int add_host_bridge_dport(struct device *match, void *arg)
{
int ret;
acpi_status rc;
struct device *bridge;
struct cxl_dport *dport;
@ -440,6 +583,10 @@ static int add_host_bridge_dport(struct device *match, void *arg)
if (IS_ERR(dport))
return PTR_ERR(dport);
ret = get_genport_coordinates(match, dport);
if (ret)
dev_dbg(match, "Failed to get generic port perf coordinates.\n");
return 0;
}
@ -656,6 +803,7 @@ static int cxl_acpi_probe(struct platform_device *pdev)
{
int rc;
struct resource *cxl_res;
struct cxl_root *cxl_root;
struct cxl_port *root_port;
struct device *host = &pdev->dev;
struct acpi_device *adev = ACPI_COMPANION(host);
@ -675,9 +823,10 @@ static int cxl_acpi_probe(struct platform_device *pdev)
cxl_res->end = -1;
cxl_res->flags = IORESOURCE_MEM;
root_port = devm_cxl_add_port(host, host, CXL_RESOURCE_NONE, NULL);
if (IS_ERR(root_port))
return PTR_ERR(root_port);
cxl_root = devm_cxl_add_root(host, &acpi_root_ops);
if (IS_ERR(cxl_root))
return PTR_ERR(cxl_root);
root_port = &cxl_root->port;
rc = bus_for_each_dev(adev->dev.bus, NULL, root_port,
add_host_bridge_dport);

View File

@ -13,5 +13,6 @@ cxl_core-y += mbox.o
cxl_core-y += pci.o
cxl_core-y += hdm.o
cxl_core-y += pmu.o
cxl_core-y += cdat.o
cxl_core-$(CONFIG_TRACING) += trace.o
cxl_core-$(CONFIG_CXL_REGION) += region.o

521
drivers/cxl/core/cdat.c Normal file
View File

@ -0,0 +1,521 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright(c) 2023 Intel Corporation. All rights reserved. */
#include <linux/acpi.h>
#include <linux/xarray.h>
#include <linux/fw_table.h>
#include <linux/node.h>
#include <linux/overflow.h>
#include "cxlpci.h"
#include "cxlmem.h"
#include "core.h"
#include "cxl.h"
struct dsmas_entry {
struct range dpa_range;
u8 handle;
struct access_coordinate coord;
int entries;
int qos_class;
};
static int cdat_dsmas_handler(union acpi_subtable_headers *header, void *arg,
const unsigned long end)
{
struct acpi_cdat_header *hdr = &header->cdat;
struct acpi_cdat_dsmas *dsmas;
int size = sizeof(*hdr) + sizeof(*dsmas);
struct xarray *dsmas_xa = arg;
struct dsmas_entry *dent;
u16 len;
int rc;
len = le16_to_cpu((__force __le16)hdr->length);
if (len != size || (unsigned long)hdr + len > end) {
pr_warn("Malformed DSMAS table length: (%u:%u)\n", size, len);
return -EINVAL;
}
/* Skip common header */
dsmas = (struct acpi_cdat_dsmas *)(hdr + 1);
dent = kzalloc(sizeof(*dent), GFP_KERNEL);
if (!dent)
return -ENOMEM;
dent->handle = dsmas->dsmad_handle;
dent->dpa_range.start = le64_to_cpu((__force __le64)dsmas->dpa_base_address);
dent->dpa_range.end = le64_to_cpu((__force __le64)dsmas->dpa_base_address) +
le64_to_cpu((__force __le64)dsmas->dpa_length) - 1;
rc = xa_insert(dsmas_xa, dent->handle, dent, GFP_KERNEL);
if (rc) {
kfree(dent);
return rc;
}
return 0;
}
static void cxl_access_coordinate_set(struct access_coordinate *coord,
int access, unsigned int val)
{
switch (access) {
case ACPI_HMAT_ACCESS_LATENCY:
coord->read_latency = val;
coord->write_latency = val;
break;
case ACPI_HMAT_READ_LATENCY:
coord->read_latency = val;
break;
case ACPI_HMAT_WRITE_LATENCY:
coord->write_latency = val;
break;
case ACPI_HMAT_ACCESS_BANDWIDTH:
coord->read_bandwidth = val;
coord->write_bandwidth = val;
break;
case ACPI_HMAT_READ_BANDWIDTH:
coord->read_bandwidth = val;
break;
case ACPI_HMAT_WRITE_BANDWIDTH:
coord->write_bandwidth = val;
break;
}
}
static int cdat_dslbis_handler(union acpi_subtable_headers *header, void *arg,
const unsigned long end)
{
struct acpi_cdat_header *hdr = &header->cdat;
struct acpi_cdat_dslbis *dslbis;
int size = sizeof(*hdr) + sizeof(*dslbis);
struct xarray *dsmas_xa = arg;
struct dsmas_entry *dent;
__le64 le_base;
__le16 le_val;
u64 val;
u16 len;
int rc;
len = le16_to_cpu((__force __le16)hdr->length);
if (len != size || (unsigned long)hdr + len > end) {
pr_warn("Malformed DSLBIS table length: (%u:%u)\n", size, len);
return -EINVAL;
}
/* Skip common header */
dslbis = (struct acpi_cdat_dslbis *)(hdr + 1);
/* Skip unrecognized data type */
if (dslbis->data_type > ACPI_HMAT_WRITE_BANDWIDTH)
return 0;
/* Not a memory type, skip */
if ((dslbis->flags & ACPI_HMAT_MEMORY_HIERARCHY) != ACPI_HMAT_MEMORY)
return 0;
dent = xa_load(dsmas_xa, dslbis->handle);
if (!dent) {
pr_warn("No matching DSMAS entry for DSLBIS entry.\n");
return 0;
}
le_base = (__force __le64)dslbis->entry_base_unit;
le_val = (__force __le16)dslbis->entry[0];
rc = check_mul_overflow(le64_to_cpu(le_base),
le16_to_cpu(le_val), &val);
if (rc)
pr_warn("DSLBIS value overflowed.\n");
cxl_access_coordinate_set(&dent->coord, dslbis->data_type, val);
return 0;
}
static int cdat_table_parse_output(int rc)
{
if (rc < 0)
return rc;
if (rc == 0)
return -ENOENT;
return 0;
}
static int cxl_cdat_endpoint_process(struct cxl_port *port,
struct xarray *dsmas_xa)
{
int rc;
rc = cdat_table_parse(ACPI_CDAT_TYPE_DSMAS, cdat_dsmas_handler,
dsmas_xa, port->cdat.table);
rc = cdat_table_parse_output(rc);
if (rc)
return rc;
rc = cdat_table_parse(ACPI_CDAT_TYPE_DSLBIS, cdat_dslbis_handler,
dsmas_xa, port->cdat.table);
return cdat_table_parse_output(rc);
}
static int cxl_port_perf_data_calculate(struct cxl_port *port,
struct xarray *dsmas_xa)
{
struct access_coordinate c;
struct dsmas_entry *dent;
int valid_entries = 0;
unsigned long index;
int rc;
rc = cxl_endpoint_get_perf_coordinates(port, &c);
if (rc) {
dev_dbg(&port->dev, "Failed to retrieve perf coordinates.\n");
return rc;
}
struct cxl_root *cxl_root __free(put_cxl_root) = find_cxl_root(port);
if (!cxl_root)
return -ENODEV;
if (!cxl_root->ops || !cxl_root->ops->qos_class)
return -EOPNOTSUPP;
xa_for_each(dsmas_xa, index, dent) {
int qos_class;
dent->coord.read_latency = dent->coord.read_latency +
c.read_latency;
dent->coord.write_latency = dent->coord.write_latency +
c.write_latency;
dent->coord.read_bandwidth = min_t(int, c.read_bandwidth,
dent->coord.read_bandwidth);
dent->coord.write_bandwidth = min_t(int, c.write_bandwidth,
dent->coord.write_bandwidth);
dent->entries = 1;
rc = cxl_root->ops->qos_class(cxl_root, &dent->coord, 1,
&qos_class);
if (rc != 1)
continue;
valid_entries++;
dent->qos_class = qos_class;
}
if (!valid_entries)
return -ENOENT;
return 0;
}
static void add_perf_entry(struct device *dev, struct dsmas_entry *dent,
struct list_head *list)
{
struct cxl_dpa_perf *dpa_perf;
dpa_perf = kzalloc(sizeof(*dpa_perf), GFP_KERNEL);
if (!dpa_perf)
return;
dpa_perf->dpa_range = dent->dpa_range;
dpa_perf->coord = dent->coord;
dpa_perf->qos_class = dent->qos_class;
list_add_tail(&dpa_perf->list, list);
dev_dbg(dev,
"DSMAS: dpa: %#llx qos: %d read_bw: %d write_bw %d read_lat: %d write_lat: %d\n",
dent->dpa_range.start, dpa_perf->qos_class,
dent->coord.read_bandwidth, dent->coord.write_bandwidth,
dent->coord.read_latency, dent->coord.write_latency);
}
static void free_perf_ents(void *data)
{
struct cxl_memdev_state *mds = data;
struct cxl_dpa_perf *dpa_perf, *n;
LIST_HEAD(discard);
list_splice_tail_init(&mds->ram_perf_list, &discard);
list_splice_tail_init(&mds->pmem_perf_list, &discard);
list_for_each_entry_safe(dpa_perf, n, &discard, list) {
list_del(&dpa_perf->list);
kfree(dpa_perf);
}
}
static void cxl_memdev_set_qos_class(struct cxl_dev_state *cxlds,
struct xarray *dsmas_xa)
{
struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
struct device *dev = cxlds->dev;
struct range pmem_range = {
.start = cxlds->pmem_res.start,
.end = cxlds->pmem_res.end,
};
struct range ram_range = {
.start = cxlds->ram_res.start,
.end = cxlds->ram_res.end,
};
struct dsmas_entry *dent;
unsigned long index;
xa_for_each(dsmas_xa, index, dent) {
if (resource_size(&cxlds->ram_res) &&
range_contains(&ram_range, &dent->dpa_range))
add_perf_entry(dev, dent, &mds->ram_perf_list);
else if (resource_size(&cxlds->pmem_res) &&
range_contains(&pmem_range, &dent->dpa_range))
add_perf_entry(dev, dent, &mds->pmem_perf_list);
else
dev_dbg(dev, "no partition for dsmas dpa: %#llx\n",
dent->dpa_range.start);
}
devm_add_action_or_reset(&cxlds->cxlmd->dev, free_perf_ents, mds);
}
static int match_cxlrd_qos_class(struct device *dev, void *data)
{
int dev_qos_class = *(int *)data;
struct cxl_root_decoder *cxlrd;
if (!is_root_decoder(dev))
return 0;
cxlrd = to_cxl_root_decoder(dev);
if (cxlrd->qos_class == CXL_QOS_CLASS_INVALID)
return 0;
if (cxlrd->qos_class == dev_qos_class)
return 1;
return 0;
}
static void cxl_qos_match(struct cxl_port *root_port,
struct list_head *work_list,
struct list_head *discard_list)
{
struct cxl_dpa_perf *dpa_perf, *n;
list_for_each_entry_safe(dpa_perf, n, work_list, list) {
int rc;
if (dpa_perf->qos_class == CXL_QOS_CLASS_INVALID)
return;
rc = device_for_each_child(&root_port->dev,
(void *)&dpa_perf->qos_class,
match_cxlrd_qos_class);
if (!rc)
list_move_tail(&dpa_perf->list, discard_list);
}
}
static int match_cxlrd_hb(struct device *dev, void *data)
{
struct device *host_bridge = data;
struct cxl_switch_decoder *cxlsd;
struct cxl_root_decoder *cxlrd;
if (!is_root_decoder(dev))
return 0;
cxlrd = to_cxl_root_decoder(dev);
cxlsd = &cxlrd->cxlsd;
guard(rwsem_read)(&cxl_region_rwsem);
for (int i = 0; i < cxlsd->nr_targets; i++) {
if (host_bridge == cxlsd->target[i]->dport_dev)
return 1;
}
return 0;
}
static void discard_dpa_perf(struct list_head *list)
{
struct cxl_dpa_perf *dpa_perf, *n;
list_for_each_entry_safe(dpa_perf, n, list, list) {
list_del(&dpa_perf->list);
kfree(dpa_perf);
}
}
DEFINE_FREE(dpa_perf, struct list_head *, if (!list_empty(_T)) discard_dpa_perf(_T))
static int cxl_qos_class_verify(struct cxl_memdev *cxlmd)
{
struct cxl_dev_state *cxlds = cxlmd->cxlds;
struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
LIST_HEAD(__discard);
struct list_head *discard __free(dpa_perf) = &__discard;
struct cxl_port *root_port;
int rc;
struct cxl_root *cxl_root __free(put_cxl_root) =
find_cxl_root(cxlmd->endpoint);
if (!cxl_root)
return -ENODEV;
root_port = &cxl_root->port;
/* Check that the QTG IDs are all sane between end device and root decoders */
cxl_qos_match(root_port, &mds->ram_perf_list, discard);
cxl_qos_match(root_port, &mds->pmem_perf_list, discard);
/* Check to make sure that the device's host bridge is under a root decoder */
rc = device_for_each_child(&root_port->dev,
(void *)cxlmd->endpoint->host_bridge,
match_cxlrd_hb);
if (!rc) {
list_splice_tail_init(&mds->ram_perf_list, discard);
list_splice_tail_init(&mds->pmem_perf_list, discard);
}
return rc;
}
static void discard_dsmas(struct xarray *xa)
{
unsigned long index;
void *ent;
xa_for_each(xa, index, ent) {
xa_erase(xa, index);
kfree(ent);
}
xa_destroy(xa);
}
DEFINE_FREE(dsmas, struct xarray *, if (_T) discard_dsmas(_T))
void cxl_endpoint_parse_cdat(struct cxl_port *port)
{
struct cxl_memdev *cxlmd = to_cxl_memdev(port->uport_dev);
struct cxl_dev_state *cxlds = cxlmd->cxlds;
struct xarray __dsmas_xa;
struct xarray *dsmas_xa __free(dsmas) = &__dsmas_xa;
int rc;
xa_init(&__dsmas_xa);
if (!port->cdat.table)
return;
rc = cxl_cdat_endpoint_process(port, dsmas_xa);
if (rc < 0) {
dev_dbg(&port->dev, "Failed to parse CDAT: %d\n", rc);
return;
}
rc = cxl_port_perf_data_calculate(port, dsmas_xa);
if (rc) {
dev_dbg(&port->dev, "Failed to do perf coord calculations.\n");
return;
}
cxl_memdev_set_qos_class(cxlds, dsmas_xa);
cxl_qos_class_verify(cxlmd);
}
EXPORT_SYMBOL_NS_GPL(cxl_endpoint_parse_cdat, CXL);
static int cdat_sslbis_handler(union acpi_subtable_headers *header, void *arg,
const unsigned long end)
{
struct acpi_cdat_sslbis *sslbis;
int size = sizeof(header->cdat) + sizeof(*sslbis);
struct cxl_port *port = arg;
struct device *dev = &port->dev;
struct acpi_cdat_sslbe *entry;
int remain, entries, i;
u16 len;
len = le16_to_cpu((__force __le16)header->cdat.length);
remain = len - size;
if (!remain || remain % sizeof(*entry) ||
(unsigned long)header + len > end) {
dev_warn(dev, "Malformed SSLBIS table length: (%u)\n", len);
return -EINVAL;
}
/* Skip common header */
sslbis = (struct acpi_cdat_sslbis *)((unsigned long)header +
sizeof(header->cdat));
/* Unrecognized data type, we can skip */
if (sslbis->data_type > ACPI_HMAT_WRITE_BANDWIDTH)
return 0;
entries = remain / sizeof(*entry);
entry = (struct acpi_cdat_sslbe *)((unsigned long)header + sizeof(*sslbis));
for (i = 0; i < entries; i++) {
u16 x = le16_to_cpu((__force __le16)entry->portx_id);
u16 y = le16_to_cpu((__force __le16)entry->porty_id);
__le64 le_base;
__le16 le_val;
struct cxl_dport *dport;
unsigned long index;
u16 dsp_id;
u64 val;
switch (x) {
case ACPI_CDAT_SSLBIS_US_PORT:
dsp_id = y;
break;
case ACPI_CDAT_SSLBIS_ANY_PORT:
switch (y) {
case ACPI_CDAT_SSLBIS_US_PORT:
dsp_id = x;
break;
case ACPI_CDAT_SSLBIS_ANY_PORT:
dsp_id = ACPI_CDAT_SSLBIS_ANY_PORT;
break;
default:
dsp_id = y;
break;
}
break;
default:
dsp_id = x;
break;
}
le_base = (__force __le64)sslbis->entry_base_unit;
le_val = (__force __le16)entry->latency_or_bandwidth;
if (check_mul_overflow(le64_to_cpu(le_base),
le16_to_cpu(le_val), &val))
dev_warn(dev, "SSLBIS value overflowed!\n");
xa_for_each(&port->dports, index, dport) {
if (dsp_id == ACPI_CDAT_SSLBIS_ANY_PORT ||
dsp_id == dport->port_id)
cxl_access_coordinate_set(&dport->sw_coord,
sslbis->data_type,
val);
}
entry++;
}
return 0;
}
void cxl_switch_parse_cdat(struct cxl_port *port)
{
int rc;
if (!port->cdat.table)
return;
rc = cdat_table_parse(ACPI_CDAT_TYPE_SSLBIS, cdat_sslbis_handler,
port, port->cdat.table);
rc = cdat_table_parse_output(rc);
if (rc)
dev_dbg(&port->dev, "Failed to parse SSLBIS: %d\n", rc);
}
EXPORT_SYMBOL_NS_GPL(cxl_switch_parse_cdat, CXL);
MODULE_IMPORT_NS(CXL);

View File

@ -88,4 +88,6 @@ enum cxl_poison_trace_type {
CXL_POISON_TRACE_CLEAR,
};
long cxl_pci_get_latency(struct pci_dev *pdev);
#endif /* __CXL_CORE_H__ */

View File

@ -63,6 +63,7 @@ static struct cxl_mem_command cxl_mem_commands[CXL_MEM_COMMAND_ID_MAX] = {
CXL_CMD(GET_SHUTDOWN_STATE, 0, 0x1, 0),
CXL_CMD(SET_SHUTDOWN_STATE, 0x1, 0, 0),
CXL_CMD(GET_SCAN_MEDIA_CAPS, 0x10, 0x4, 0),
CXL_CMD(GET_TIMESTAMP, 0, 0x8, 0),
};
/*
@ -836,54 +837,37 @@ int cxl_enumerate_cmds(struct cxl_memdev_state *mds)
}
EXPORT_SYMBOL_NS_GPL(cxl_enumerate_cmds, CXL);
/*
* General Media Event Record
* CXL rev 3.0 Section 8.2.9.2.1.1; Table 8-43
*/
static const uuid_t gen_media_event_uuid =
UUID_INIT(0xfbcd0a77, 0xc260, 0x417f,
0x85, 0xa9, 0x08, 0x8b, 0x16, 0x21, 0xeb, 0xa6);
/*
* DRAM Event Record
* CXL rev 3.0 section 8.2.9.2.1.2; Table 8-44
*/
static const uuid_t dram_event_uuid =
UUID_INIT(0x601dcbb3, 0x9c06, 0x4eab,
0xb8, 0xaf, 0x4e, 0x9b, 0xfb, 0x5c, 0x96, 0x24);
/*
* Memory Module Event Record
* CXL rev 3.0 section 8.2.9.2.1.3; Table 8-45
*/
static const uuid_t mem_mod_event_uuid =
UUID_INIT(0xfe927475, 0xdd59, 0x4339,
0xa5, 0x86, 0x79, 0xba, 0xb1, 0x13, 0xb7, 0x74);
static void cxl_event_trace_record(const struct cxl_memdev *cxlmd,
enum cxl_event_log_type type,
struct cxl_event_record_raw *record)
void cxl_event_trace_record(const struct cxl_memdev *cxlmd,
enum cxl_event_log_type type,
enum cxl_event_type event_type,
const uuid_t *uuid, union cxl_event *evt)
{
uuid_t *id = &record->hdr.id;
if (event_type == CXL_CPER_EVENT_GEN_MEDIA)
trace_cxl_general_media(cxlmd, type, &evt->gen_media);
else if (event_type == CXL_CPER_EVENT_DRAM)
trace_cxl_dram(cxlmd, type, &evt->dram);
else if (event_type == CXL_CPER_EVENT_MEM_MODULE)
trace_cxl_memory_module(cxlmd, type, &evt->mem_module);
else
trace_cxl_generic_event(cxlmd, type, uuid, &evt->generic);
}
EXPORT_SYMBOL_NS_GPL(cxl_event_trace_record, CXL);
if (uuid_equal(id, &gen_media_event_uuid)) {
struct cxl_event_gen_media *rec =
(struct cxl_event_gen_media *)record;
static void __cxl_event_trace_record(const struct cxl_memdev *cxlmd,
enum cxl_event_log_type type,
struct cxl_event_record_raw *record)
{
enum cxl_event_type ev_type = CXL_CPER_EVENT_GENERIC;
const uuid_t *uuid = &record->id;
trace_cxl_general_media(cxlmd, type, rec);
} else if (uuid_equal(id, &dram_event_uuid)) {
struct cxl_event_dram *rec = (struct cxl_event_dram *)record;
if (uuid_equal(uuid, &CXL_EVENT_GEN_MEDIA_UUID))
ev_type = CXL_CPER_EVENT_GEN_MEDIA;
else if (uuid_equal(uuid, &CXL_EVENT_DRAM_UUID))
ev_type = CXL_CPER_EVENT_DRAM;
else if (uuid_equal(uuid, &CXL_EVENT_MEM_MODULE_UUID))
ev_type = CXL_CPER_EVENT_MEM_MODULE;
trace_cxl_dram(cxlmd, type, rec);
} else if (uuid_equal(id, &mem_mod_event_uuid)) {
struct cxl_event_mem_module *rec =
(struct cxl_event_mem_module *)record;
trace_cxl_memory_module(cxlmd, type, rec);
} else {
/* For unknown record types print just the header */
trace_cxl_generic_event(cxlmd, type, record);
}
cxl_event_trace_record(cxlmd, type, ev_type, uuid, &record->event);
}
static int cxl_clear_event_record(struct cxl_memdev_state *mds,
@ -926,7 +910,10 @@ static int cxl_clear_event_record(struct cxl_memdev_state *mds,
*/
i = 0;
for (cnt = 0; cnt < total; cnt++) {
payload->handles[i++] = get_pl->records[cnt].hdr.handle;
struct cxl_event_record_raw *raw = &get_pl->records[cnt];
struct cxl_event_generic *gen = &raw->event.generic;
payload->handles[i++] = gen->hdr.handle;
dev_dbg(mds->cxlds.dev, "Event log '%d': Clearing %u\n", log,
le16_to_cpu(payload->handles[i]));
@ -991,8 +978,8 @@ static void cxl_mem_get_records_log(struct cxl_memdev_state *mds,
break;
for (i = 0; i < nr_rec; i++)
cxl_event_trace_record(cxlmd, type,
&payload->records[i]);
__cxl_event_trace_record(cxlmd, type,
&payload->records[i]);
if (payload->flags & CXL_GET_EVENT_FLAG_OVERFLOW)
trace_cxl_overflow(cxlmd, type, payload);
@ -1404,6 +1391,8 @@ struct cxl_memdev_state *cxl_memdev_state_create(struct device *dev)
mds->cxlds.reg_map.host = dev;
mds->cxlds.reg_map.resource = CXL_RESOURCE_NONE;
mds->cxlds.type = CXL_DEVTYPE_CLASSMEM;
INIT_LIST_HEAD(&mds->ram_perf_list);
INIT_LIST_HEAD(&mds->pmem_perf_list);
return mds;
}

View File

@ -114,7 +114,7 @@ static DEVICE_ATTR_RO(serial);
static ssize_t numa_node_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
return sprintf(buf, "%d\n", dev_to_node(dev));
return sysfs_emit(buf, "%d\n", dev_to_node(dev));
}
static DEVICE_ATTR_RO(numa_node);

View File

@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright(c) 2021 Intel Corporation. All rights reserved. */
#include <linux/units.h>
#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/device.h>
#include <linux/delay.h>
@ -979,3 +980,38 @@ pci_ers_result_t cxl_error_detected(struct pci_dev *pdev,
return PCI_ERS_RESULT_NEED_RESET;
}
EXPORT_SYMBOL_NS_GPL(cxl_error_detected, CXL);
static int cxl_flit_size(struct pci_dev *pdev)
{
if (cxl_pci_flit_256(pdev))
return 256;
return 68;
}
/**
* cxl_pci_get_latency - calculate the link latency for the PCIe link
* @pdev: PCI device
*
* return: calculated latency or 0 for no latency
*
* CXL Memory Device SW Guide v1.0 2.11.4 Link latency calculation
* Link latency = LinkPropagationLatency + FlitLatency + RetimerLatency
* LinkProgationLatency is negligible, so 0 will be used
* RetimerLatency is assumed to be negligible and 0 will be used
* FlitLatency = FlitSize / LinkBandwidth
* FlitSize is defined by spec. CXL rev3.0 4.2.1.
* 68B flit is used up to 32GT/s. >32GT/s, 256B flit size is used.
* The FlitLatency is converted to picoseconds.
*/
long cxl_pci_get_latency(struct pci_dev *pdev)
{
long bw;
bw = pcie_link_speed_mbps(pdev);
if (bw < 0)
return 0;
bw /= BITS_PER_BYTE;
return cxl_flit_size(pdev) * MEGA / bw;
}

View File

@ -64,14 +64,14 @@ static int match_nvdimm_bridge(struct device *dev, void *data)
struct cxl_nvdimm_bridge *cxl_find_nvdimm_bridge(struct cxl_memdev *cxlmd)
{
struct cxl_port *port = find_cxl_root(cxlmd->endpoint);
struct cxl_root *cxl_root __free(put_cxl_root) =
find_cxl_root(cxlmd->endpoint);
struct device *dev;
if (!port)
if (!cxl_root)
return NULL;
dev = device_find_child(&port->dev, NULL, match_nvdimm_bridge);
put_device(&port->dev);
dev = device_find_child(&cxl_root->port.dev, NULL, match_nvdimm_bridge);
if (!dev)
return NULL;

View File

@ -9,6 +9,7 @@
#include <linux/pci.h>
#include <linux/slab.h>
#include <linux/idr.h>
#include <linux/node.h>
#include <cxlmem.h>
#include <cxlpci.h>
#include <cxl.h>
@ -172,14 +173,10 @@ static ssize_t target_list_show(struct device *dev,
{
struct cxl_switch_decoder *cxlsd = to_cxl_switch_decoder(dev);
ssize_t offset;
unsigned int seq;
int rc;
do {
seq = read_seqbegin(&cxlsd->target_lock);
rc = emit_target_list(cxlsd, buf);
} while (read_seqretry(&cxlsd->target_lock, seq));
guard(rwsem_read)(&cxl_region_rwsem);
rc = emit_target_list(cxlsd, buf);
if (rc < 0)
return rc;
offset = rc;
@ -541,7 +538,10 @@ static void cxl_port_release(struct device *dev)
xa_destroy(&port->dports);
xa_destroy(&port->regions);
ida_free(&cxl_port_ida, port->id);
kfree(port);
if (is_cxl_root(port))
kfree(to_cxl_root(port));
else
kfree(port);
}
static ssize_t decoders_committed_show(struct device *dev,
@ -669,17 +669,31 @@ static struct lock_class_key cxl_port_key;
static struct cxl_port *cxl_port_alloc(struct device *uport_dev,
struct cxl_dport *parent_dport)
{
struct cxl_port *port;
struct cxl_root *cxl_root __free(kfree) = NULL;
struct cxl_port *port, *_port __free(kfree) = NULL;
struct device *dev;
int rc;
port = kzalloc(sizeof(*port), GFP_KERNEL);
if (!port)
return ERR_PTR(-ENOMEM);
/* No parent_dport, root cxl_port */
if (!parent_dport) {
cxl_root = kzalloc(sizeof(*cxl_root), GFP_KERNEL);
if (!cxl_root)
return ERR_PTR(-ENOMEM);
} else {
_port = kzalloc(sizeof(*port), GFP_KERNEL);
if (!_port)
return ERR_PTR(-ENOMEM);
}
rc = ida_alloc(&cxl_port_ida, GFP_KERNEL);
if (rc < 0)
goto err;
return ERR_PTR(rc);
if (cxl_root)
port = &no_free_ptr(cxl_root)->port;
else
port = no_free_ptr(_port);
port->id = rc;
port->uport_dev = uport_dev;
@ -731,10 +745,6 @@ static struct cxl_port *cxl_port_alloc(struct device *uport_dev,
dev->type = &cxl_port_type;
return port;
err:
kfree(port);
return ERR_PTR(rc);
}
static int cxl_setup_comp_regs(struct device *host, struct cxl_register_map *map,
@ -841,6 +851,9 @@ static struct cxl_port *__devm_cxl_add_port(struct device *host,
if (rc)
return ERR_PTR(rc);
if (parent_dport && dev_is_pci(uport_dev))
port->pci_latency = cxl_pci_get_latency(to_pci_dev(uport_dev));
return port;
err:
@ -884,6 +897,22 @@ struct cxl_port *devm_cxl_add_port(struct device *host,
}
EXPORT_SYMBOL_NS_GPL(devm_cxl_add_port, CXL);
struct cxl_root *devm_cxl_add_root(struct device *host,
const struct cxl_root_ops *ops)
{
struct cxl_root *cxl_root;
struct cxl_port *port;
port = devm_cxl_add_port(host, host, CXL_RESOURCE_NONE, NULL);
if (IS_ERR(port))
return (struct cxl_root *)port;
cxl_root = to_cxl_root(port);
cxl_root->ops = ops;
return cxl_root;
}
EXPORT_SYMBOL_NS_GPL(devm_cxl_add_root, CXL);
struct pci_bus *cxl_port_to_pci_bus(struct cxl_port *port)
{
/* There is no pci_bus associated with a CXL platform-root port */
@ -939,7 +968,7 @@ static bool dev_is_cxl_root_child(struct device *dev)
return false;
}
struct cxl_port *find_cxl_root(struct cxl_port *port)
struct cxl_root *find_cxl_root(struct cxl_port *port)
{
struct cxl_port *iter = port;
@ -949,10 +978,19 @@ struct cxl_port *find_cxl_root(struct cxl_port *port)
if (!iter)
return NULL;
get_device(&iter->dev);
return iter;
return to_cxl_root(iter);
}
EXPORT_SYMBOL_NS_GPL(find_cxl_root, CXL);
void put_cxl_root(struct cxl_root *cxl_root)
{
if (!cxl_root)
return;
put_device(&cxl_root->port.dev);
}
EXPORT_SYMBOL_NS_GPL(put_cxl_root, CXL);
static struct cxl_dport *find_dport(struct cxl_port *port, int id)
{
struct cxl_dport *dport;
@ -1108,6 +1146,9 @@ __devm_cxl_add_dport(struct cxl_port *port, struct device *dport_dev,
if (rc)
return ERR_PTR(rc);
if (dev_is_pci(dport_dev))
dport->link_latency = cxl_pci_get_latency(to_pci_dev(dport_dev));
return dport;
}
@ -1633,7 +1674,7 @@ EXPORT_SYMBOL_NS_GPL(cxl_mem_find_port, CXL);
static int decoder_populate_targets(struct cxl_switch_decoder *cxlsd,
struct cxl_port *port, int *target_map)
{
int i, rc = 0;
int i;
if (!target_map)
return 0;
@ -1643,19 +1684,16 @@ static int decoder_populate_targets(struct cxl_switch_decoder *cxlsd,
if (xa_empty(&port->dports))
return -EINVAL;
write_seqlock(&cxlsd->target_lock);
for (i = 0; i < cxlsd->nr_targets; i++) {
guard(rwsem_write)(&cxl_region_rwsem);
for (i = 0; i < cxlsd->cxld.interleave_ways; i++) {
struct cxl_dport *dport = find_dport(port, target_map[i]);
if (!dport) {
rc = -ENXIO;
break;
}
if (!dport)
return -ENXIO;
cxlsd->target[i] = dport;
}
write_sequnlock(&cxlsd->target_lock);
return rc;
return 0;
}
struct cxl_dport *cxl_hb_modulo(struct cxl_root_decoder *cxlrd, int pos)
@ -1725,7 +1763,6 @@ static int cxl_switch_decoder_init(struct cxl_port *port,
return -EINVAL;
cxlsd->nr_targets = nr_targets;
seqlock_init(&cxlsd->target_lock);
return cxl_decoder_init(port, &cxlsd->cxld);
}
@ -2059,6 +2096,80 @@ bool schedule_cxl_memdev_detach(struct cxl_memdev *cxlmd)
}
EXPORT_SYMBOL_NS_GPL(schedule_cxl_memdev_detach, CXL);
static void combine_coordinates(struct access_coordinate *c1,
struct access_coordinate *c2)
{
if (c2->write_bandwidth)
c1->write_bandwidth = min(c1->write_bandwidth,
c2->write_bandwidth);
c1->write_latency += c2->write_latency;
if (c2->read_bandwidth)
c1->read_bandwidth = min(c1->read_bandwidth,
c2->read_bandwidth);
c1->read_latency += c2->read_latency;
}
/**
* cxl_endpoint_get_perf_coordinates - Retrieve performance numbers stored in dports
* of CXL path
* @port: endpoint cxl_port
* @coord: output performance data
*
* Return: errno on failure, 0 on success.
*/
int cxl_endpoint_get_perf_coordinates(struct cxl_port *port,
struct access_coordinate *coord)
{
struct access_coordinate c = {
.read_bandwidth = UINT_MAX,
.write_bandwidth = UINT_MAX,
};
struct cxl_port *iter = port;
struct cxl_dport *dport;
struct pci_dev *pdev;
unsigned int bw;
if (!is_cxl_endpoint(port))
return -EINVAL;
dport = iter->parent_dport;
/*
* Exit the loop when the parent port of the current port is cxl root.
* The iterative loop starts at the endpoint and gathers the
* latency of the CXL link from the current iter to the next downstream
* port each iteration. If the parent is cxl root then there is
* nothing to gather.
*/
while (iter && !is_cxl_root(to_cxl_port(iter->dev.parent))) {
combine_coordinates(&c, &dport->sw_coord);
c.write_latency += dport->link_latency;
c.read_latency += dport->link_latency;
iter = to_cxl_port(iter->dev.parent);
dport = iter->parent_dport;
}
/* Augment with the generic port (host bridge) perf data */
combine_coordinates(&c, &dport->hb_coord);
/* Get the calculated PCI paths bandwidth */
pdev = to_pci_dev(port->uport_dev->parent);
bw = pcie_bandwidth_available(pdev, NULL, NULL, NULL);
if (bw == 0)
return -ENXIO;
bw /= BITS_PER_BYTE;
c.write_bandwidth = min(c.write_bandwidth, bw);
c.read_bandwidth = min(c.read_bandwidth, bw);
*coord = c;
return 0;
}
EXPORT_SYMBOL_NS_GPL(cxl_endpoint_get_perf_coordinates, CXL);
/* for user tooling to ensure port disable work has completed */
static ssize_t flush_store(const struct bus_type *bus, const char *buf, size_t count)
{

View File

@ -397,7 +397,7 @@ static ssize_t interleave_ways_store(struct device *dev,
return rc;
/*
* Even for x3, x9, and x12 interleaves the region interleave must be a
* Even for x3, x6, and x12 interleaves the region interleave must be a
* power of 2 multiple of the host bridge interleave.
*/
if (!is_power_of_2(val / cxld->interleave_ways) ||
@ -552,8 +552,9 @@ static int alloc_hpa(struct cxl_region *cxlr, resource_size_t size)
res = alloc_free_mem_region(cxlrd->res, size, SZ_256M,
dev_name(&cxlr->dev));
if (IS_ERR(res)) {
dev_dbg(&cxlr->dev, "failed to allocate HPA: %ld\n",
PTR_ERR(res));
dev_dbg(&cxlr->dev,
"HPA allocation error (%ld) for size:%pap in %s %pr\n",
PTR_ERR(res), &size, cxlrd->res->name, cxlrd->res);
return PTR_ERR(res);
}
@ -2083,13 +2084,13 @@ static struct cxl_region *to_cxl_region(struct device *dev)
return container_of(dev, struct cxl_region, dev);
}
static void unregister_region(void *dev)
static void unregister_region(void *_cxlr)
{
struct cxl_region *cxlr = to_cxl_region(dev);
struct cxl_region *cxlr = _cxlr;
struct cxl_region_params *p = &cxlr->params;
int i;
device_del(dev);
device_del(&cxlr->dev);
/*
* Now that region sysfs is shutdown, the parameter block is now
@ -2100,7 +2101,7 @@ static void unregister_region(void *dev)
detach_target(cxlr, i);
cxl_region_iomem_release(cxlr);
put_device(dev);
put_device(&cxlr->dev);
}
static struct lock_class_key cxl_region_key;

View File

@ -181,6 +181,7 @@ TRACE_EVENT(cxl_overflow,
* 1) Add CXL_EVT_TP_entry to TP_STRUCT__entry
* 2) Use CXL_EVT_TP_fast_assign within TP_fast_assign;
* pass the dev, log, and CXL event header
* NOTE: The uuid must be assigned by the specific trace event
* 3) Use CXL_EVT_TP_printk() instead of TP_printk()
*
* See the generic_event tracepoint as an example.
@ -203,7 +204,6 @@ TRACE_EVENT(cxl_overflow,
__assign_str(host, dev_name((cxlmd)->dev.parent)); \
__entry->log = (l); \
__entry->serial = (cxlmd)->cxlds->serial; \
memcpy(&__entry->hdr_uuid, &(hdr).id, sizeof(uuid_t)); \
__entry->hdr_length = (hdr).length; \
__entry->hdr_flags = get_unaligned_le24((hdr).flags); \
__entry->hdr_handle = le16_to_cpu((hdr).handle); \
@ -225,9 +225,9 @@ TRACE_EVENT(cxl_overflow,
TRACE_EVENT(cxl_generic_event,
TP_PROTO(const struct cxl_memdev *cxlmd, enum cxl_event_log_type log,
struct cxl_event_record_raw *rec),
const uuid_t *uuid, struct cxl_event_generic *gen_rec),
TP_ARGS(cxlmd, log, rec),
TP_ARGS(cxlmd, log, uuid, gen_rec),
TP_STRUCT__entry(
CXL_EVT_TP_entry
@ -235,8 +235,9 @@ TRACE_EVENT(cxl_generic_event,
),
TP_fast_assign(
CXL_EVT_TP_fast_assign(cxlmd, log, rec->hdr);
memcpy(__entry->data, &rec->data, CXL_EVENT_RECORD_DATA_LENGTH);
CXL_EVT_TP_fast_assign(cxlmd, log, gen_rec->hdr);
memcpy(&__entry->hdr_uuid, uuid, sizeof(uuid_t));
memcpy(__entry->data, gen_rec->data, CXL_EVENT_RECORD_DATA_LENGTH);
),
CXL_EVT_TP_printk("%s",
@ -337,6 +338,7 @@ TRACE_EVENT(cxl_general_media,
TP_fast_assign(
CXL_EVT_TP_fast_assign(cxlmd, log, rec->hdr);
memcpy(&__entry->hdr_uuid, &CXL_EVENT_GEN_MEDIA_UUID, sizeof(uuid_t));
/* General Media */
__entry->dpa = le64_to_cpu(rec->phys_addr);
@ -423,6 +425,7 @@ TRACE_EVENT(cxl_dram,
TP_fast_assign(
CXL_EVT_TP_fast_assign(cxlmd, log, rec->hdr);
memcpy(&__entry->hdr_uuid, &CXL_EVENT_DRAM_UUID, sizeof(uuid_t));
/* DRAM */
__entry->dpa = le64_to_cpu(rec->phys_addr);
@ -570,6 +573,7 @@ TRACE_EVENT(cxl_memory_module,
TP_fast_assign(
CXL_EVT_TP_fast_assign(cxlmd, log, rec->hdr);
memcpy(&__entry->hdr_uuid, &CXL_EVENT_MEM_MODULE_UUID, sizeof(uuid_t));
/* Memory Module Event */
__entry->event_type = rec->event_type;

View File

@ -8,6 +8,7 @@
#include <linux/bitfield.h>
#include <linux/bitops.h>
#include <linux/log2.h>
#include <linux/node.h>
#include <linux/io.h>
/**
@ -412,7 +413,6 @@ struct cxl_endpoint_decoder {
/**
* struct cxl_switch_decoder - Switch specific CXL HDM Decoder
* @cxld: base cxl_decoder object
* @target_lock: coordinate coherent reads of the target list
* @nr_targets: number of elements in @target
* @target: active ordered target list in current decoder configuration
*
@ -424,7 +424,6 @@ struct cxl_endpoint_decoder {
*/
struct cxl_switch_decoder {
struct cxl_decoder cxld;
seqlock_t target_lock;
int nr_targets;
struct cxl_dport *target[];
};
@ -590,6 +589,7 @@ struct cxl_dax_region {
* @depth: How deep this port is relative to the root. depth 0 is the root.
* @cdat: Cached CDAT data
* @cdat_available: Should a CDAT attribute be available in sysfs
* @pci_latency: Upstream latency in picoseconds
*/
struct cxl_port {
struct device dev;
@ -612,6 +612,30 @@ struct cxl_port {
size_t length;
} cdat;
bool cdat_available;
long pci_latency;
};
/**
* struct cxl_root - logical collection of root cxl_port items
*
* @port: cxl_port member
* @ops: cxl root operations
*/
struct cxl_root {
struct cxl_port port;
const struct cxl_root_ops *ops;
};
static inline struct cxl_root *
to_cxl_root(const struct cxl_port *port)
{
return container_of(port, struct cxl_root, port);
}
struct cxl_root_ops {
int (*qos_class)(struct cxl_root *cxl_root,
struct access_coordinate *coord, int entries,
int *qos_class);
};
static inline struct cxl_dport *
@ -634,6 +658,9 @@ struct cxl_rcrb_info {
* @rch: Indicate whether this dport was enumerated in RCH or VH mode
* @port: reference to cxl_port that contains this downstream port
* @regs: Dport parsed register blocks
* @sw_coord: access coordinates (performance) for switch from CDAT
* @hb_coord: access coordinates (performance) from ACPI generic port (host bridge)
* @link_latency: calculated PCIe downstream latency
*/
struct cxl_dport {
struct device *dport_dev;
@ -643,6 +670,9 @@ struct cxl_dport {
bool rch;
struct cxl_port *port;
struct cxl_regs regs;
struct access_coordinate sw_coord;
struct access_coordinate hb_coord;
long link_latency;
};
/**
@ -700,7 +730,12 @@ struct cxl_port *devm_cxl_add_port(struct device *host,
struct device *uport_dev,
resource_size_t component_reg_phys,
struct cxl_dport *parent_dport);
struct cxl_port *find_cxl_root(struct cxl_port *port);
struct cxl_root *devm_cxl_add_root(struct device *host,
const struct cxl_root_ops *ops);
struct cxl_root *find_cxl_root(struct cxl_port *port);
void put_cxl_root(struct cxl_root *cxl_root);
DEFINE_FREE(put_cxl_root, struct cxl_root *, if (_T) put_cxl_root(_T))
int devm_cxl_enumerate_ports(struct cxl_memdev *cxlmd);
void cxl_bus_rescan(void);
void cxl_bus_drain(void);
@ -839,6 +874,12 @@ static inline struct cxl_dax_region *to_cxl_dax_region(struct device *dev)
}
#endif
void cxl_endpoint_parse_cdat(struct cxl_port *port);
void cxl_switch_parse_cdat(struct cxl_port *port);
int cxl_endpoint_get_perf_coordinates(struct cxl_port *port,
struct access_coordinate *coord);
/*
* Unit test builds overrides this to __weak, find the 'strong' version
* of these symbols in tools/testing/cxl/.

View File

@ -6,6 +6,8 @@
#include <linux/cdev.h>
#include <linux/uuid.h>
#include <linux/rcuwait.h>
#include <linux/cxl-event.h>
#include <linux/node.h>
#include "cxl.h"
/* CXL 2.0 8.2.8.5.1.1 Memory Device Status Register */
@ -391,6 +393,20 @@ enum cxl_devtype {
CXL_DEVTYPE_CLASSMEM,
};
/**
* struct cxl_dpa_perf - DPA performance property entry
* @list - list entry
* @dpa_range - range for DPA address
* @coord - QoS performance data (i.e. latency, bandwidth)
* @qos_class - QoS Class cookies
*/
struct cxl_dpa_perf {
struct list_head list;
struct range dpa_range;
struct access_coordinate coord;
int qos_class;
};
/**
* struct cxl_dev_state - The driver device state
*
@ -455,6 +471,8 @@ struct cxl_dev_state {
* @security: security driver state info
* @fw: firmware upload / activation state
* @mbox_send: @dev specific transport for transmitting mailbox commands
* @ram_perf_list: performance data entries matched to RAM
* @pmem_perf_list: performance data entries matched to PMEM
*
* See CXL 3.0 8.2.9.8.2 Capacity Configuration and Label Storage for
* details on capacity parameters.
@ -475,6 +493,10 @@ struct cxl_memdev_state {
u64 active_persistent_bytes;
u64 next_volatile_bytes;
u64 next_persistent_bytes;
struct list_head ram_perf_list;
struct list_head pmem_perf_list;
struct cxl_event_state event;
struct cxl_poison_state poison;
struct cxl_security_state security;
@ -503,6 +525,7 @@ enum cxl_opcode {
CXL_MBOX_OP_GET_FW_INFO = 0x0200,
CXL_MBOX_OP_TRANSFER_FW = 0x0201,
CXL_MBOX_OP_ACTIVATE_FW = 0x0202,
CXL_MBOX_OP_GET_TIMESTAMP = 0x0300,
CXL_MBOX_OP_SET_TIMESTAMP = 0x0301,
CXL_MBOX_OP_GET_SUPPORTED_LOGS = 0x0400,
CXL_MBOX_OP_GET_LOG = 0x0401,
@ -580,25 +603,28 @@ struct cxl_mbox_identify {
} __packed;
/*
* Common Event Record Format
* CXL rev 3.0 section 8.2.9.2.1; Table 8-42
* General Media Event Record UUID
* CXL rev 3.0 Section 8.2.9.2.1.1; Table 8-43
*/
struct cxl_event_record_hdr {
uuid_t id;
u8 length;
u8 flags[3];
__le16 handle;
__le16 related_handle;
__le64 timestamp;
u8 maint_op_class;
u8 reserved[15];
} __packed;
#define CXL_EVENT_GEN_MEDIA_UUID \
UUID_INIT(0xfbcd0a77, 0xc260, 0x417f, 0x85, 0xa9, 0x08, 0x8b, 0x16, \
0x21, 0xeb, 0xa6)
#define CXL_EVENT_RECORD_DATA_LENGTH 0x50
struct cxl_event_record_raw {
struct cxl_event_record_hdr hdr;
u8 data[CXL_EVENT_RECORD_DATA_LENGTH];
} __packed;
/*
* DRAM Event Record UUID
* CXL rev 3.0 section 8.2.9.2.1.2; Table 8-44
*/
#define CXL_EVENT_DRAM_UUID \
UUID_INIT(0x601dcbb3, 0x9c06, 0x4eab, 0xb8, 0xaf, 0x4e, 0x9b, 0xfb, \
0x5c, 0x96, 0x24)
/*
* Memory Module Event Record UUID
* CXL rev 3.0 section 8.2.9.2.1.3; Table 8-45
*/
#define CXL_EVENT_MEM_MODULE_UUID \
UUID_INIT(0xfe927475, 0xdd59, 0x4339, 0xa5, 0x86, 0x79, 0xba, 0xb1, \
0x13, 0xb7, 0x74)
/*
* Get Event Records output payload
@ -641,74 +667,6 @@ struct cxl_mbox_clear_event_payload {
} __packed;
#define CXL_CLEAR_EVENT_MAX_HANDLES U8_MAX
/*
* General Media Event Record
* CXL rev 3.0 Section 8.2.9.2.1.1; Table 8-43
*/
#define CXL_EVENT_GEN_MED_COMP_ID_SIZE 0x10
struct cxl_event_gen_media {
struct cxl_event_record_hdr hdr;
__le64 phys_addr;
u8 descriptor;
u8 type;
u8 transaction_type;
u8 validity_flags[2];
u8 channel;
u8 rank;
u8 device[3];
u8 component_id[CXL_EVENT_GEN_MED_COMP_ID_SIZE];
u8 reserved[46];
} __packed;
/*
* DRAM Event Record - DER
* CXL rev 3.0 section 8.2.9.2.1.2; Table 3-44
*/
#define CXL_EVENT_DER_CORRECTION_MASK_SIZE 0x20
struct cxl_event_dram {
struct cxl_event_record_hdr hdr;
__le64 phys_addr;
u8 descriptor;
u8 type;
u8 transaction_type;
u8 validity_flags[2];
u8 channel;
u8 rank;
u8 nibble_mask[3];
u8 bank_group;
u8 bank;
u8 row[3];
u8 column[2];
u8 correction_mask[CXL_EVENT_DER_CORRECTION_MASK_SIZE];
u8 reserved[0x17];
} __packed;
/*
* Get Health Info Record
* CXL rev 3.0 section 8.2.9.8.3.1; Table 8-100
*/
struct cxl_get_health_info {
u8 health_status;
u8 media_status;
u8 add_status;
u8 life_used;
u8 device_temp[2];
u8 dirty_shutdown_cnt[4];
u8 cor_vol_err_cnt[4];
u8 cor_per_err_cnt[4];
} __packed;
/*
* Memory Module Event Record
* CXL rev 3.0 section 8.2.9.2.1.3; Table 8-45
*/
struct cxl_event_mem_module {
struct cxl_event_record_hdr hdr;
u8 event_type;
struct cxl_get_health_info info;
u8 reserved[0x3d];
} __packed;
struct cxl_mbox_get_partition_info {
__le64 active_volatile_cap;
__le64 active_persistent_cap;
@ -866,6 +824,10 @@ void set_exclusive_cxl_commands(struct cxl_memdev_state *mds,
void clear_exclusive_cxl_commands(struct cxl_memdev_state *mds,
unsigned long *cmds);
void cxl_mem_get_event_records(struct cxl_memdev_state *mds, u32 status);
void cxl_event_trace_record(const struct cxl_memdev *cxlmd,
enum cxl_event_log_type type,
enum cxl_event_type event_type,
const uuid_t *uuid, union cxl_event *evt);
int cxl_set_timestamp(struct cxl_memdev_state *mds);
int cxl_poison_state_init(struct cxl_memdev_state *mds);
int cxl_mem_get_poison(struct cxl_memdev *cxlmd, u64 offset, u64 len,

View File

@ -85,6 +85,19 @@ struct cdat_entry_header {
__le16 length;
} __packed;
/*
* CXL v3.0 6.2.3 Table 6-4
* The table indicates that if PCIe Flit Mode is set, then CXL is in 256B flits
* mode, otherwise it's 68B flits mode.
*/
static inline bool cxl_pci_flit_256(struct pci_dev *pdev)
{
u16 lnksta2;
pcie_capability_read_word(pdev, PCI_EXP_LNKSTA2, &lnksta2);
return lnksta2 & PCI_EXP_LNKSTA2_FLIT;
}
int devm_cxl_port_enumerate_dports(struct cxl_port *port);
struct cxl_dev_state;
int cxl_hdm_decode_init(struct cxl_dev_state *cxlds, struct cxl_hdm *cxlhdm,

View File

@ -215,23 +215,78 @@ static ssize_t trigger_poison_list_store(struct device *dev,
}
static DEVICE_ATTR_WO(trigger_poison_list);
static ssize_t ram_qos_class_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
struct cxl_dev_state *cxlds = cxlmd->cxlds;
struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
struct cxl_dpa_perf *dpa_perf;
if (!dev->driver)
return -ENOENT;
if (list_empty(&mds->ram_perf_list))
return -ENOENT;
dpa_perf = list_first_entry(&mds->ram_perf_list, struct cxl_dpa_perf,
list);
return sysfs_emit(buf, "%d\n", dpa_perf->qos_class);
}
static struct device_attribute dev_attr_ram_qos_class =
__ATTR(qos_class, 0444, ram_qos_class_show, NULL);
static ssize_t pmem_qos_class_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
struct cxl_dev_state *cxlds = cxlmd->cxlds;
struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlds);
struct cxl_dpa_perf *dpa_perf;
if (!dev->driver)
return -ENOENT;
if (list_empty(&mds->pmem_perf_list))
return -ENOENT;
dpa_perf = list_first_entry(&mds->pmem_perf_list, struct cxl_dpa_perf,
list);
return sysfs_emit(buf, "%d\n", dpa_perf->qos_class);
}
static struct device_attribute dev_attr_pmem_qos_class =
__ATTR(qos_class, 0444, pmem_qos_class_show, NULL);
static umode_t cxl_mem_visible(struct kobject *kobj, struct attribute *a, int n)
{
if (a == &dev_attr_trigger_poison_list.attr) {
struct device *dev = kobj_to_dev(kobj);
struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
struct cxl_memdev_state *mds =
to_cxl_memdev_state(cxlmd->cxlds);
struct device *dev = kobj_to_dev(kobj);
struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
if (a == &dev_attr_trigger_poison_list.attr)
if (!test_bit(CXL_POISON_ENABLED_LIST,
mds->poison.enabled_cmds))
return 0;
}
if (a == &dev_attr_pmem_qos_class.attr)
if (list_empty(&mds->pmem_perf_list))
return 0;
if (a == &dev_attr_ram_qos_class.attr)
if (list_empty(&mds->ram_perf_list))
return 0;
return a->mode;
}
static struct attribute *cxl_mem_attrs[] = {
&dev_attr_trigger_poison_list.attr,
&dev_attr_ram_qos_class.attr,
&dev_attr_pmem_qos_class.attr,
NULL
};

View File

@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright(c) 2020 Intel Corporation. All rights reserved. */
#include <asm-generic/unaligned.h>
#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/moduleparam.h>
#include <linux/module.h>
@ -969,6 +970,61 @@ static struct pci_driver cxl_pci_driver = {
},
};
#define CXL_EVENT_HDR_FLAGS_REC_SEVERITY GENMASK(1, 0)
static void cxl_cper_event_call(enum cxl_event_type ev_type,
struct cxl_cper_event_rec *rec)
{
struct cper_cxl_event_devid *device_id = &rec->hdr.device_id;
struct pci_dev *pdev __free(pci_dev_put) = NULL;
enum cxl_event_log_type log_type;
struct cxl_dev_state *cxlds;
unsigned int devfn;
u32 hdr_flags;
devfn = PCI_DEVFN(device_id->device_num, device_id->func_num);
pdev = pci_get_domain_bus_and_slot(device_id->segment_num,
device_id->bus_num, devfn);
if (!pdev)
return;
guard(pci_dev)(pdev);
if (pdev->driver != &cxl_pci_driver)
return;
cxlds = pci_get_drvdata(pdev);
if (!cxlds)
return;
/* Fabricate a log type */
hdr_flags = get_unaligned_le24(rec->event.generic.hdr.flags);
log_type = FIELD_GET(CXL_EVENT_HDR_FLAGS_REC_SEVERITY, hdr_flags);
cxl_event_trace_record(cxlds->cxlmd, log_type, ev_type,
&uuid_null, &rec->event);
}
static int __init cxl_pci_driver_init(void)
{
int rc;
rc = cxl_cper_register_callback(cxl_cper_event_call);
if (rc)
return rc;
rc = pci_register_driver(&cxl_pci_driver);
if (rc)
cxl_cper_unregister_callback(cxl_cper_event_call);
return rc;
}
static void __exit cxl_pci_driver_exit(void)
{
pci_unregister_driver(&cxl_pci_driver);
cxl_cper_unregister_callback(cxl_cper_event_call);
}
module_init(cxl_pci_driver_init);
module_exit(cxl_pci_driver_exit);
MODULE_LICENSE("GPL v2");
module_pci_driver(cxl_pci_driver);
MODULE_IMPORT_NS(CXL);

View File

@ -69,6 +69,8 @@ static int cxl_switch_port_probe(struct cxl_port *port)
if (rc < 0)
return rc;
cxl_switch_parse_cdat(port);
cxlhdm = devm_cxl_setup_hdm(port, NULL);
if (!IS_ERR(cxlhdm))
return devm_cxl_enumerate_decoders(cxlhdm, NULL);
@ -109,6 +111,7 @@ static int cxl_endpoint_port_probe(struct cxl_port *port)
/* Cache the data early to ensure is_visible() works */
read_cdat_data(port);
cxl_endpoint_parse_cdat(port);
get_device(&cxlmd->dev);
rc = devm_add_action_or_reset(&port->dev, schedule_detach, cxlmd);
@ -127,14 +130,15 @@ static int cxl_endpoint_port_probe(struct cxl_port *port)
* This can't fail in practice as CXL root exit unregisters all
* descendant ports and that in turn synchronizes with cxl_port_probe()
*/
root = find_cxl_root(port);
struct cxl_root *cxl_root __free(put_cxl_root) = find_cxl_root(port);
root = &cxl_root->port;
/*
* Now that all endpoint decoders are successfully enumerated, try to
* assemble regions from committed decoders
*/
device_for_each_child(&port->dev, root, discover_region);
put_device(&root->dev);
return 0;
}

View File

@ -6295,6 +6295,41 @@ int pcie_set_mps(struct pci_dev *dev, int mps)
}
EXPORT_SYMBOL(pcie_set_mps);
static enum pci_bus_speed to_pcie_link_speed(u16 lnksta)
{
return pcie_link_speed[FIELD_GET(PCI_EXP_LNKSTA_CLS, lnksta)];
}
int pcie_link_speed_mbps(struct pci_dev *pdev)
{
u16 lnksta;
int err;
err = pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnksta);
if (err)
return err;
switch (to_pcie_link_speed(lnksta)) {
case PCIE_SPEED_2_5GT:
return 2500;
case PCIE_SPEED_5_0GT:
return 5000;
case PCIE_SPEED_8_0GT:
return 8000;
case PCIE_SPEED_16_0GT:
return 16000;
case PCIE_SPEED_32_0GT:
return 32000;
case PCIE_SPEED_64_0GT:
return 64000;
default:
break;
}
return -EINVAL;
}
EXPORT_SYMBOL(pcie_link_speed_mbps);
/**
* pcie_bandwidth_available - determine minimum link settings of a PCIe
* device and its bandwidth limitation
@ -6328,8 +6363,7 @@ u32 pcie_bandwidth_available(struct pci_dev *dev, struct pci_dev **limiting_dev,
while (dev) {
pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta);
next_speed = pcie_link_speed[FIELD_GET(PCI_EXP_LNKSTA_CLS,
lnksta)];
next_speed = to_pcie_link_speed(lnksta);
next_width = FIELD_GET(PCI_EXP_LNKSTA_NLW, lnksta);
next_bw = next_width * PCIE_SPEED2MBS_ENC(next_speed);

View File

@ -15,6 +15,7 @@
#include <linux/mod_devicetable.h>
#include <linux/property.h>
#include <linux/uuid.h>
#include <linux/node.h>
struct irq_domain;
struct irq_domain_ops;
@ -431,6 +432,16 @@ int thermal_acpi_hot_trip_temp(struct acpi_device *adev, int *ret_temp);
int thermal_acpi_critical_trip_temp(struct acpi_device *adev, int *ret_temp);
#endif
#ifdef CONFIG_ACPI_HMAT
int acpi_get_genport_coordinates(u32 uid, struct access_coordinate *coord);
#else
static inline int acpi_get_genport_coordinates(u32 uid,
struct access_coordinate *coord)
{
return -EOPNOTSUPP;
}
#endif
#ifdef CONFIG_ACPI_NUMA
int acpi_map_pxm_to_node(int pxm);
int acpi_get_node(acpi_handle handle);

161
include/linux/cxl-event.h Normal file
View File

@ -0,0 +1,161 @@
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright(c) 2023 Intel Corporation. */
#ifndef _LINUX_CXL_EVENT_H
#define _LINUX_CXL_EVENT_H
/*
* Common Event Record Format
* CXL rev 3.0 section 8.2.9.2.1; Table 8-42
*/
struct cxl_event_record_hdr {
u8 length;
u8 flags[3];
__le16 handle;
__le16 related_handle;
__le64 timestamp;
u8 maint_op_class;
u8 reserved[15];
} __packed;
#define CXL_EVENT_RECORD_DATA_LENGTH 0x50
struct cxl_event_generic {
struct cxl_event_record_hdr hdr;
u8 data[CXL_EVENT_RECORD_DATA_LENGTH];
} __packed;
/*
* General Media Event Record
* CXL rev 3.0 Section 8.2.9.2.1.1; Table 8-43
*/
#define CXL_EVENT_GEN_MED_COMP_ID_SIZE 0x10
struct cxl_event_gen_media {
struct cxl_event_record_hdr hdr;
__le64 phys_addr;
u8 descriptor;
u8 type;
u8 transaction_type;
u8 validity_flags[2];
u8 channel;
u8 rank;
u8 device[3];
u8 component_id[CXL_EVENT_GEN_MED_COMP_ID_SIZE];
u8 reserved[46];
} __packed;
/*
* DRAM Event Record - DER
* CXL rev 3.0 section 8.2.9.2.1.2; Table 3-44
*/
#define CXL_EVENT_DER_CORRECTION_MASK_SIZE 0x20
struct cxl_event_dram {
struct cxl_event_record_hdr hdr;
__le64 phys_addr;
u8 descriptor;
u8 type;
u8 transaction_type;
u8 validity_flags[2];
u8 channel;
u8 rank;
u8 nibble_mask[3];
u8 bank_group;
u8 bank;
u8 row[3];
u8 column[2];
u8 correction_mask[CXL_EVENT_DER_CORRECTION_MASK_SIZE];
u8 reserved[0x17];
} __packed;
/*
* Get Health Info Record
* CXL rev 3.0 section 8.2.9.8.3.1; Table 8-100
*/
struct cxl_get_health_info {
u8 health_status;
u8 media_status;
u8 add_status;
u8 life_used;
u8 device_temp[2];
u8 dirty_shutdown_cnt[4];
u8 cor_vol_err_cnt[4];
u8 cor_per_err_cnt[4];
} __packed;
/*
* Memory Module Event Record
* CXL rev 3.0 section 8.2.9.2.1.3; Table 8-45
*/
struct cxl_event_mem_module {
struct cxl_event_record_hdr hdr;
u8 event_type;
struct cxl_get_health_info info;
u8 reserved[0x3d];
} __packed;
union cxl_event {
struct cxl_event_generic generic;
struct cxl_event_gen_media gen_media;
struct cxl_event_dram dram;
struct cxl_event_mem_module mem_module;
} __packed;
/*
* Common Event Record Format; in event logs
* CXL rev 3.0 section 8.2.9.2.1; Table 8-42
*/
struct cxl_event_record_raw {
uuid_t id;
union cxl_event event;
} __packed;
enum cxl_event_type {
CXL_CPER_EVENT_GENERIC,
CXL_CPER_EVENT_GEN_MEDIA,
CXL_CPER_EVENT_DRAM,
CXL_CPER_EVENT_MEM_MODULE,
};
#define CPER_CXL_DEVICE_ID_VALID BIT(0)
#define CPER_CXL_DEVICE_SN_VALID BIT(1)
#define CPER_CXL_COMP_EVENT_LOG_VALID BIT(2)
struct cxl_cper_event_rec {
struct {
u32 length;
u64 validation_bits;
struct cper_cxl_event_devid {
u16 vendor_id;
u16 device_id;
u8 func_num;
u8 device_num;
u8 bus_num;
u16 segment_num;
u16 slot_num; /* bits 2:0 reserved */
u8 reserved;
} __packed device_id;
struct cper_cxl_event_sn {
u32 lower_dw;
u32 upper_dw;
} __packed dev_serial_num;
} __packed hdr;
union cxl_event event;
} __packed;
typedef void (*cxl_cper_callback)(enum cxl_event_type type,
struct cxl_cper_event_rec *rec);
#ifdef CONFIG_ACPI_APEI_GHES
int cxl_cper_register_callback(cxl_cper_callback callback);
int cxl_cper_unregister_callback(cxl_cper_callback callback);
#else
static inline int cxl_cper_register_callback(cxl_cper_callback callback)
{
return 0;
}
static inline int cxl_cper_unregister_callback(cxl_cper_callback callback)
{
return 0;
}
#endif
#endif /* _LINUX_CXL_EVENT_H */

View File

@ -25,16 +25,35 @@ struct acpi_subtable_proc {
int count;
};
union fw_table_header {
struct acpi_table_header acpi;
struct acpi_table_cdat cdat;
};
union acpi_subtable_headers {
struct acpi_subtable_header common;
struct acpi_hmat_structure hmat;
struct acpi_prmt_module_header prmt;
struct acpi_cedt_header cedt;
struct acpi_cdat_header cdat;
};
int acpi_parse_entries_array(char *id, unsigned long table_size,
struct acpi_table_header *table_header,
union fw_table_header *table_header,
struct acpi_subtable_proc *proc,
int proc_num, unsigned int max_entries);
int cdat_table_parse(enum acpi_cdat_type type,
acpi_tbl_entry_handler_arg handler_arg, void *arg,
struct acpi_table_cdat *table_header);
/* CXL is the only non-ACPI consumer of the FIRMWARE_TABLE library */
#if IS_ENABLED(CONFIG_ACPI) && !IS_ENABLED(CONFIG_CXL_BUS)
#define EXPORT_SYMBOL_FWTBL_LIB(x) EXPORT_SYMBOL_ACPI_LIB(x)
#define __init_or_fwtbl_lib __init_or_acpilib
#else
#define EXPORT_SYMBOL_FWTBL_LIB(x) EXPORT_SYMBOL_NS_GPL(x, CXL)
#define __init_or_fwtbl_lib
#endif
#endif

View File

@ -33,7 +33,7 @@ struct memory_dev_type {
struct kref kref;
};
struct node_hmem_attrs;
struct access_coordinate;
#ifdef CONFIG_NUMA
extern bool numa_demotion_enabled;
@ -45,9 +45,9 @@ void clear_node_memory_type(int node, struct memory_dev_type *memtype);
int register_mt_adistance_algorithm(struct notifier_block *nb);
int unregister_mt_adistance_algorithm(struct notifier_block *nb);
int mt_calc_adistance(int node, int *adist);
int mt_set_default_dram_perf(int nid, struct node_hmem_attrs *perf,
int mt_set_default_dram_perf(int nid, struct access_coordinate *perf,
const char *source);
int mt_perf_to_adistance(struct node_hmem_attrs *perf, int *adist);
int mt_perf_to_adistance(struct access_coordinate *perf, int *adist);
#ifdef CONFIG_MIGRATION
int next_demotion_node(int node);
void node_get_allowed_targets(pg_data_t *pgdat, nodemask_t *targets);
@ -126,13 +126,13 @@ static inline int mt_calc_adistance(int node, int *adist)
return NOTIFY_DONE;
}
static inline int mt_set_default_dram_perf(int nid, struct node_hmem_attrs *perf,
static inline int mt_set_default_dram_perf(int nid, struct access_coordinate *perf,
const char *source)
{
return -EIO;
}
static inline int mt_perf_to_adistance(struct node_hmem_attrs *perf, int *adist)
static inline int mt_perf_to_adistance(struct access_coordinate *perf, int *adist)
{
return -EIO;
}

View File

@ -20,14 +20,14 @@
#include <linux/list.h>
/**
* struct node_hmem_attrs - heterogeneous memory performance attributes
* struct access_coordinate - generic performance coordinates container
*
* @read_bandwidth: Read bandwidth in MB/s
* @write_bandwidth: Write bandwidth in MB/s
* @read_latency: Read latency in nanoseconds
* @write_latency: Write latency in nanoseconds
*/
struct node_hmem_attrs {
struct access_coordinate {
unsigned int read_bandwidth;
unsigned int write_bandwidth;
unsigned int read_latency;
@ -65,7 +65,7 @@ struct node_cache_attrs {
#ifdef CONFIG_HMEM_REPORTING
void node_add_cache(unsigned int nid, struct node_cache_attrs *cache_attrs);
void node_set_perf_attrs(unsigned int nid, struct node_hmem_attrs *hmem_attrs,
void node_set_perf_attrs(unsigned int nid, struct access_coordinate *coord,
unsigned access);
#else
static inline void node_add_cache(unsigned int nid,
@ -74,7 +74,7 @@ static inline void node_add_cache(unsigned int nid,
}
static inline void node_set_perf_attrs(unsigned int nid,
struct node_hmem_attrs *hmem_attrs,
struct access_coordinate *coord,
unsigned access)
{
}

View File

@ -1171,6 +1171,7 @@ int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge);
u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp);
struct pci_dev *pci_dev_get(struct pci_dev *dev);
void pci_dev_put(struct pci_dev *dev);
DEFINE_FREE(pci_dev_put, struct pci_dev *, if (_T) pci_dev_put(_T))
void pci_remove_bus(struct pci_bus *b);
void pci_stop_and_remove_bus_device(struct pci_dev *dev);
void pci_stop_and_remove_bus_device_locked(struct pci_dev *dev);
@ -1367,6 +1368,7 @@ int pcie_set_mps(struct pci_dev *dev, int mps);
u32 pcie_bandwidth_available(struct pci_dev *dev, struct pci_dev **limiting_dev,
enum pci_bus_speed *speed,
enum pcie_link_width *width);
int pcie_link_speed_mbps(struct pci_dev *pdev);
void pcie_print_link_status(struct pci_dev *dev);
int pcie_reset_flr(struct pci_dev *dev, bool probe);
int pcie_flr(struct pci_dev *dev);
@ -1877,6 +1879,7 @@ void pci_cfg_access_unlock(struct pci_dev *dev);
void pci_dev_lock(struct pci_dev *dev);
int pci_dev_trylock(struct pci_dev *dev);
void pci_dev_unlock(struct pci_dev *dev);
DEFINE_GUARD(pci_dev, struct pci_dev *, pci_dev_lock(_T), pci_dev_unlock(_T))
/*
* PCI domain support. Sometimes called PCI segment (eg by ACPI),

View File

@ -46,6 +46,7 @@
___C(GET_SCAN_MEDIA_CAPS, "Get Scan Media Capabilities"), \
___DEPRECATED(SCAN_MEDIA, "Scan Media"), \
___DEPRECATED(GET_SCAN_MEDIA, "Get Scan Media Results"), \
___C(GET_TIMESTAMP, "Get Timestamp"), \
___C(MAX, "invalid / last command")
#define ___C(a, b) CXL_MEM_COMMAND_ID_##a

View File

@ -12,12 +12,14 @@
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/fw_table.h>
enum acpi_subtable_type {
ACPI_SUBTABLE_COMMON,
ACPI_SUBTABLE_HMAT,
ACPI_SUBTABLE_PRMT,
ACPI_SUBTABLE_CEDT,
CDAT_SUBTABLE,
};
struct acpi_subtable_entry {
@ -25,7 +27,7 @@ struct acpi_subtable_entry {
enum acpi_subtable_type type;
};
static unsigned long __init_or_acpilib
static unsigned long __init_or_fwtbl_lib
acpi_get_entry_type(struct acpi_subtable_entry *entry)
{
switch (entry->type) {
@ -37,11 +39,13 @@ acpi_get_entry_type(struct acpi_subtable_entry *entry)
return 0;
case ACPI_SUBTABLE_CEDT:
return entry->hdr->cedt.type;
case CDAT_SUBTABLE:
return entry->hdr->cdat.type;
}
return 0;
}
static unsigned long __init_or_acpilib
static unsigned long __init_or_fwtbl_lib
acpi_get_entry_length(struct acpi_subtable_entry *entry)
{
switch (entry->type) {
@ -53,11 +57,16 @@ acpi_get_entry_length(struct acpi_subtable_entry *entry)
return entry->hdr->prmt.length;
case ACPI_SUBTABLE_CEDT:
return entry->hdr->cedt.length;
case CDAT_SUBTABLE: {
__le16 length = (__force __le16)entry->hdr->cdat.length;
return le16_to_cpu(length);
}
}
return 0;
}
static unsigned long __init_or_acpilib
static unsigned long __init_or_fwtbl_lib
acpi_get_subtable_header_length(struct acpi_subtable_entry *entry)
{
switch (entry->type) {
@ -69,11 +78,13 @@ acpi_get_subtable_header_length(struct acpi_subtable_entry *entry)
return sizeof(entry->hdr->prmt);
case ACPI_SUBTABLE_CEDT:
return sizeof(entry->hdr->cedt);
case CDAT_SUBTABLE:
return sizeof(entry->hdr->cdat);
}
return 0;
}
static enum acpi_subtable_type __init_or_acpilib
static enum acpi_subtable_type __init_or_fwtbl_lib
acpi_get_subtable_type(char *id)
{
if (strncmp(id, ACPI_SIG_HMAT, 4) == 0)
@ -82,12 +93,27 @@ acpi_get_subtable_type(char *id)
return ACPI_SUBTABLE_PRMT;
if (strncmp(id, ACPI_SIG_CEDT, 4) == 0)
return ACPI_SUBTABLE_CEDT;
if (strncmp(id, ACPI_SIG_CDAT, 4) == 0)
return CDAT_SUBTABLE;
return ACPI_SUBTABLE_COMMON;
}
static __init_or_acpilib int call_handler(struct acpi_subtable_proc *proc,
union acpi_subtable_headers *hdr,
unsigned long end)
static unsigned long __init_or_fwtbl_lib
acpi_table_get_length(enum acpi_subtable_type type,
union fw_table_header *header)
{
if (type == CDAT_SUBTABLE) {
__le32 length = (__force __le32)header->cdat.length;
return le32_to_cpu(length);
}
return header->acpi.length;
}
static __init_or_fwtbl_lib int call_handler(struct acpi_subtable_proc *proc,
union acpi_subtable_headers *hdr,
unsigned long end)
{
if (proc->handler)
return proc->handler(hdr, end);
@ -119,22 +145,25 @@ static __init_or_acpilib int call_handler(struct acpi_subtable_proc *proc,
* On success returns sum of all matching entries for all proc handlers.
* Otherwise, -ENODEV or -EINVAL is returned.
*/
int __init_or_acpilib
int __init_or_fwtbl_lib
acpi_parse_entries_array(char *id, unsigned long table_size,
struct acpi_table_header *table_header,
union fw_table_header *table_header,
struct acpi_subtable_proc *proc,
int proc_num, unsigned int max_entries)
{
unsigned long table_end, subtable_len, entry_len;
struct acpi_subtable_entry entry;
enum acpi_subtable_type type;
int count = 0;
int i;
table_end = (unsigned long)table_header + table_header->length;
type = acpi_get_subtable_type(id);
table_end = (unsigned long)table_header +
acpi_table_get_length(type, table_header);
/* Parse all entries looking for a match. */
entry.type = acpi_get_subtable_type(id);
entry.type = type;
entry.hdr = (union acpi_subtable_headers *)
((unsigned long)table_header + table_size);
subtable_len = acpi_get_subtable_header_length(&entry);
@ -174,3 +203,25 @@ acpi_parse_entries_array(char *id, unsigned long table_size,
return count;
}
int __init_or_fwtbl_lib
cdat_table_parse(enum acpi_cdat_type type,
acpi_tbl_entry_handler_arg handler_arg,
void *arg,
struct acpi_table_cdat *table_header)
{
struct acpi_subtable_proc proc = {
.id = type,
.handler_arg = handler_arg,
.arg = arg,
};
if (!table_header)
return -EINVAL;
return acpi_parse_entries_array(ACPI_SIG_CDAT,
sizeof(struct acpi_table_cdat),
(union fw_table_header *)table_header,
&proc, 1, 0);
}
EXPORT_SYMBOL_FWTBL_LIB(cdat_table_parse);

View File

@ -109,7 +109,7 @@ static struct demotion_nodes *node_demotion __read_mostly;
static BLOCKING_NOTIFIER_HEAD(mt_adistance_algorithms);
static bool default_dram_perf_error;
static struct node_hmem_attrs default_dram_perf;
static struct access_coordinate default_dram_perf;
static int default_dram_perf_ref_nid = NUMA_NO_NODE;
static const char *default_dram_perf_ref_source;
@ -601,15 +601,15 @@ void clear_node_memory_type(int node, struct memory_dev_type *memtype)
}
EXPORT_SYMBOL_GPL(clear_node_memory_type);
static void dump_hmem_attrs(struct node_hmem_attrs *attrs, const char *prefix)
static void dump_hmem_attrs(struct access_coordinate *coord, const char *prefix)
{
pr_info(
"%sread_latency: %u, write_latency: %u, read_bandwidth: %u, write_bandwidth: %u\n",
prefix, attrs->read_latency, attrs->write_latency,
attrs->read_bandwidth, attrs->write_bandwidth);
prefix, coord->read_latency, coord->write_latency,
coord->read_bandwidth, coord->write_bandwidth);
}
int mt_set_default_dram_perf(int nid, struct node_hmem_attrs *perf,
int mt_set_default_dram_perf(int nid, struct access_coordinate *perf,
const char *source)
{
int rc = 0;
@ -666,7 +666,7 @@ int mt_set_default_dram_perf(int nid, struct node_hmem_attrs *perf,
return rc;
}
int mt_perf_to_adistance(struct node_hmem_attrs *perf, int *adist)
int mt_perf_to_adistance(struct access_coordinate *perf, int *adist)
{
if (default_dram_perf_error)
return -EIO;

View File

@ -58,6 +58,7 @@ cxl_core-y += $(CXL_CORE_SRC)/mbox.o
cxl_core-y += $(CXL_CORE_SRC)/pci.o
cxl_core-y += $(CXL_CORE_SRC)/hdm.o
cxl_core-y += $(CXL_CORE_SRC)/pmu.o
cxl_core-y += $(CXL_CORE_SRC)/cdat.o
cxl_core-$(CONFIG_TRACING) += $(CXL_CORE_SRC)/trace.o
cxl_core-$(CONFIG_CXL_REGION) += $(CXL_CORE_SRC)/region.o
cxl_core-y += config_check.o

View File

@ -68,15 +68,19 @@ static struct acpi_device acpi0017_mock;
static struct acpi_device host_bridge[NR_BRIDGES] = {
[0] = {
.handle = &host_bridge[0],
.pnp.unique_id = "0",
},
[1] = {
.handle = &host_bridge[1],
.pnp.unique_id = "1",
},
[2] = {
.handle = &host_bridge[2],
.pnp.unique_id = "2",
},
[3] = {
.handle = &host_bridge[3],
.pnp.unique_id = "3",
},
};

View File

@ -251,7 +251,8 @@ static int mock_get_event(struct device *dev, struct cxl_mbox_cmd *cmd)
for (i = 0; i < CXL_TEST_EVENT_CNT && !event_log_empty(log); i++) {
memcpy(&pl->records[i], event_get_current(log),
sizeof(pl->records[i]));
pl->records[i].hdr.handle = event_get_cur_event_handle(log);
pl->records[i].event.generic.hdr.handle =
event_get_cur_event_handle(log);
log->cur_idx++;
}
@ -337,87 +338,109 @@ static void cxl_mock_event_trigger(struct device *dev)
}
struct cxl_event_record_raw maint_needed = {
.hdr = {
.id = UUID_INIT(0xBA5EBA11, 0xABCD, 0xEFEB,
0xa5, 0x5a, 0xa5, 0x5a, 0xa5, 0xa5, 0x5a, 0xa5),
.length = sizeof(struct cxl_event_record_raw),
.flags[0] = CXL_EVENT_RECORD_FLAG_MAINT_NEEDED,
/* .handle = Set dynamically */
.related_handle = cpu_to_le16(0xa5b6),
.id = UUID_INIT(0xBA5EBA11, 0xABCD, 0xEFEB,
0xa5, 0x5a, 0xa5, 0x5a, 0xa5, 0xa5, 0x5a, 0xa5),
.event.generic = {
.hdr = {
.length = sizeof(struct cxl_event_record_raw),
.flags[0] = CXL_EVENT_RECORD_FLAG_MAINT_NEEDED,
/* .handle = Set dynamically */
.related_handle = cpu_to_le16(0xa5b6),
},
.data = { 0xDE, 0xAD, 0xBE, 0xEF },
},
.data = { 0xDE, 0xAD, 0xBE, 0xEF },
};
struct cxl_event_record_raw hardware_replace = {
.hdr = {
.id = UUID_INIT(0xABCDEFEB, 0xBA11, 0xBA5E,
0xa5, 0x5a, 0xa5, 0x5a, 0xa5, 0xa5, 0x5a, 0xa5),
.length = sizeof(struct cxl_event_record_raw),
.flags[0] = CXL_EVENT_RECORD_FLAG_HW_REPLACE,
/* .handle = Set dynamically */
.related_handle = cpu_to_le16(0xb6a5),
.id = UUID_INIT(0xABCDEFEB, 0xBA11, 0xBA5E,
0xa5, 0x5a, 0xa5, 0x5a, 0xa5, 0xa5, 0x5a, 0xa5),
.event.generic = {
.hdr = {
.length = sizeof(struct cxl_event_record_raw),
.flags[0] = CXL_EVENT_RECORD_FLAG_HW_REPLACE,
/* .handle = Set dynamically */
.related_handle = cpu_to_le16(0xb6a5),
},
.data = { 0xDE, 0xAD, 0xBE, 0xEF },
},
.data = { 0xDE, 0xAD, 0xBE, 0xEF },
};
struct cxl_event_gen_media gen_media = {
.hdr = {
.id = UUID_INIT(0xfbcd0a77, 0xc260, 0x417f,
0x85, 0xa9, 0x08, 0x8b, 0x16, 0x21, 0xeb, 0xa6),
.length = sizeof(struct cxl_event_gen_media),
.flags[0] = CXL_EVENT_RECORD_FLAG_PERMANENT,
/* .handle = Set dynamically */
.related_handle = cpu_to_le16(0),
struct cxl_test_gen_media {
uuid_t id;
struct cxl_event_gen_media rec;
} __packed;
struct cxl_test_gen_media gen_media = {
.id = CXL_EVENT_GEN_MEDIA_UUID,
.rec = {
.hdr = {
.length = sizeof(struct cxl_test_gen_media),
.flags[0] = CXL_EVENT_RECORD_FLAG_PERMANENT,
/* .handle = Set dynamically */
.related_handle = cpu_to_le16(0),
},
.phys_addr = cpu_to_le64(0x2000),
.descriptor = CXL_GMER_EVT_DESC_UNCORECTABLE_EVENT,
.type = CXL_GMER_MEM_EVT_TYPE_DATA_PATH_ERROR,
.transaction_type = CXL_GMER_TRANS_HOST_WRITE,
/* .validity_flags = <set below> */
.channel = 1,
.rank = 30
},
.phys_addr = cpu_to_le64(0x2000),
.descriptor = CXL_GMER_EVT_DESC_UNCORECTABLE_EVENT,
.type = CXL_GMER_MEM_EVT_TYPE_DATA_PATH_ERROR,
.transaction_type = CXL_GMER_TRANS_HOST_WRITE,
/* .validity_flags = <set below> */
.channel = 1,
.rank = 30
};
struct cxl_event_dram dram = {
.hdr = {
.id = UUID_INIT(0x601dcbb3, 0x9c06, 0x4eab,
0xb8, 0xaf, 0x4e, 0x9b, 0xfb, 0x5c, 0x96, 0x24),
.length = sizeof(struct cxl_event_dram),
.flags[0] = CXL_EVENT_RECORD_FLAG_PERF_DEGRADED,
/* .handle = Set dynamically */
.related_handle = cpu_to_le16(0),
struct cxl_test_dram {
uuid_t id;
struct cxl_event_dram rec;
} __packed;
struct cxl_test_dram dram = {
.id = CXL_EVENT_DRAM_UUID,
.rec = {
.hdr = {
.length = sizeof(struct cxl_test_dram),
.flags[0] = CXL_EVENT_RECORD_FLAG_PERF_DEGRADED,
/* .handle = Set dynamically */
.related_handle = cpu_to_le16(0),
},
.phys_addr = cpu_to_le64(0x8000),
.descriptor = CXL_GMER_EVT_DESC_THRESHOLD_EVENT,
.type = CXL_GMER_MEM_EVT_TYPE_INV_ADDR,
.transaction_type = CXL_GMER_TRANS_INTERNAL_MEDIA_SCRUB,
/* .validity_flags = <set below> */
.channel = 1,
.bank_group = 5,
.bank = 2,
.column = {0xDE, 0xAD},
},
.phys_addr = cpu_to_le64(0x8000),
.descriptor = CXL_GMER_EVT_DESC_THRESHOLD_EVENT,
.type = CXL_GMER_MEM_EVT_TYPE_INV_ADDR,
.transaction_type = CXL_GMER_TRANS_INTERNAL_MEDIA_SCRUB,
/* .validity_flags = <set below> */
.channel = 1,
.bank_group = 5,
.bank = 2,
.column = {0xDE, 0xAD},
};
struct cxl_event_mem_module mem_module = {
.hdr = {
.id = UUID_INIT(0xfe927475, 0xdd59, 0x4339,
0xa5, 0x86, 0x79, 0xba, 0xb1, 0x13, 0xb7, 0x74),
.length = sizeof(struct cxl_event_mem_module),
/* .handle = Set dynamically */
.related_handle = cpu_to_le16(0),
struct cxl_test_mem_module {
uuid_t id;
struct cxl_event_mem_module rec;
} __packed;
struct cxl_test_mem_module mem_module = {
.id = CXL_EVENT_MEM_MODULE_UUID,
.rec = {
.hdr = {
.length = sizeof(struct cxl_test_mem_module),
/* .handle = Set dynamically */
.related_handle = cpu_to_le16(0),
},
.event_type = CXL_MMER_TEMP_CHANGE,
.info = {
.health_status = CXL_DHI_HS_PERFORMANCE_DEGRADED,
.media_status = CXL_DHI_MS_ALL_DATA_LOST,
.add_status = (CXL_DHI_AS_CRITICAL << 2) |
(CXL_DHI_AS_WARNING << 4) |
(CXL_DHI_AS_WARNING << 5),
.device_temp = { 0xDE, 0xAD},
.dirty_shutdown_cnt = { 0xde, 0xad, 0xbe, 0xef },
.cor_vol_err_cnt = { 0xde, 0xad, 0xbe, 0xef },
.cor_per_err_cnt = { 0xde, 0xad, 0xbe, 0xef },
}
},
.event_type = CXL_MMER_TEMP_CHANGE,
.info = {
.health_status = CXL_DHI_HS_PERFORMANCE_DEGRADED,
.media_status = CXL_DHI_MS_ALL_DATA_LOST,
.add_status = (CXL_DHI_AS_CRITICAL << 2) |
(CXL_DHI_AS_WARNING << 4) |
(CXL_DHI_AS_WARNING << 5),
.device_temp = { 0xDE, 0xAD},
.dirty_shutdown_cnt = { 0xde, 0xad, 0xbe, 0xef },
.cor_vol_err_cnt = { 0xde, 0xad, 0xbe, 0xef },
.cor_per_err_cnt = { 0xde, 0xad, 0xbe, 0xef },
}
};
static int mock_set_timestamp(struct cxl_dev_state *cxlds,
@ -439,11 +462,11 @@ static int mock_set_timestamp(struct cxl_dev_state *cxlds,
static void cxl_mock_add_event_logs(struct mock_event_store *mes)
{
put_unaligned_le16(CXL_GMER_VALID_CHANNEL | CXL_GMER_VALID_RANK,
&gen_media.validity_flags);
&gen_media.rec.validity_flags);
put_unaligned_le16(CXL_DER_VALID_CHANNEL | CXL_DER_VALID_BANK_GROUP |
CXL_DER_VALID_BANK | CXL_DER_VALID_COLUMN,
&dram.validity_flags);
&dram.rec.validity_flags);
mes_add_event(mes, CXL_EVENT_TYPE_INFO, &maint_needed);
mes_add_event(mes, CXL_EVENT_TYPE_INFO,