mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-12-29 17:25:38 +00:00
cxl changes for v6.13
- Constify range_contains() input parameters to prevent changes. - Add support for displaying RCD capabilities in sysfs to support lspci for CXL device. - Downgrade warning message to debug in cxl_probe_component_regs(). - Add support for adding a printf specifier '$pra' to emit 'struct range' content. - Add sanity tests for 'struct resource'. - Add documentation for special case. - Add %pra for 'struct range'. - Add %pra usage in CXL code. - Add preparation code for DCD support - Add range_overlaps(). - Add CDAT DSMAS table shared and read only flag in ACPICA. - Add documentation to 'struct dev_dax_range'. - Delay event buffer allocation in CXL PCI code until needed. - Use guard() in cxl_dpa_set_mode(). - Refactor create region code to consolidate common code. -----BEGIN PGP SIGNATURE----- iQIzBAABCAAdFiEE5DAy15EJMCV1R6v9YGjFFmlTOEoFAmc84dMACgkQYGjFFmlT OEoGTg//cSJlQ9X7+xZDbngnzpJwcLzQkR/FXDfe3obtmgs7woDJgNNcYnKSlgyf wal47Q0UM/1Hv8Dtfrt62Ay1fmOvDL2GSpey35NVJGCEpIsfOqqk1zTCgfgwRHTO MZJLnOSFUIlDYlVz8ljLNHnNqPjr7dCoUh9tdBefvkw59FqbkHNcWI8hG1lh1SR4 2frtJcqVg54S6vJa2eeWmNVpxz7RZvPFrb8TJzhdrGM8PkTMNFA2oJINAf0j00Ev 8/T6HXTxXvFtNhBH0dtMO1MFh1d6Qr/zFnX/gmrnPWl1l/12HFDMBIZIzq/Whjpo +7hQ5xK3cwkMevFgFrAhwdZMj8maR84x1dbFItoThaoeDIQ4sGfyQEMPsbkZP/Sc 67i5hQFIBZc+ORLB0W+z9Da52ZFGyVw/xsCmDRzXCw4s7N2twpydIoA7Pvu9NN1X 3JVF35NrsRZ+PyuGWEitNjo0Rj6swNpBC5Xv/T1mgFtSgvVuk1T2QtSHJcPoQyzQ zbijsCKmvJYbdJBnPiotdrBs1BUxBsP9dBT9IxWzMy6lcEpTJrYpUheRCk2tSHFa Kk8O8IYNiBKZaSpN9UHKaGzr43H8gNbLf4svSIiu1lZJTSSdtWqfZZYjXFBgB1Vb l2gBCDmPJ0y7WKZSCa53UmQiOusr+l3Pi+OflZEfCy6JxbSqTTM= =GNlu -----END PGP SIGNATURE----- Merge tag 'cxl-for-6.13' of git://git.kernel.org/pub/scm/linux/kernel/git/cxl/cxl Pull cxl updates from Dave Jiang: - Constify range_contains() input parameters to prevent changes - Add support for displaying RCD capabilities in sysfs to support lspci for CXL device - Downgrade warning message to debug in cxl_probe_component_regs() - Add support for adding a printf specifier '%pra' to emit 'struct range' content: - Add sanity tests for 'struct resource' - Add documentation for special case - Add %pra for 'struct range' - Add %pra usage in CXL code - Add preparation code for DCD support: - Add range_overlaps() - Add CDAT DSMAS table shared and read only flag in ACPICA - Add documentation to 'struct dev_dax_range' - Delay event buffer allocation in CXL PCI code until needed - Use guard() in cxl_dpa_set_mode() - Refactor create region code to consolidate common code * tag 'cxl-for-6.13' of git://git.kernel.org/pub/scm/linux/kernel/git/cxl/cxl: cxl/region: Refactor common create region code cxl/hdm: Use guard() in cxl_dpa_set_mode() cxl/pci: Delay event buffer allocation dax: Document struct dev_dax_range ACPI/CDAT: Add CDAT/DSMAS shared and read only flag values range: Add range_overlaps() cxl/cdat: Use %pra for dpa range outputs printf: Add print format (%pra) for struct range Documentation/printf: struct resource add start == end special case test printf: Add very basic struct resource tests cxl: downgrade a warning message to debug level in cxl_probe_component_regs() cxl/pci: Add sysfs attribute for CXL 1.1 device link status cxl/core/regs: Add rcd_pcie_cap initialization kernel/range: Const-ify range_contains parameters
This commit is contained in:
commit
563cb0b1e7
@ -209,12 +209,17 @@ Struct Resources
|
||||
::
|
||||
|
||||
%pr [mem 0x60000000-0x6fffffff flags 0x2200] or
|
||||
[mem 0x60000000 flags 0x2200] or
|
||||
[mem 0x0000000060000000-0x000000006fffffff flags 0x2200]
|
||||
[mem 0x0000000060000000 flags 0x2200]
|
||||
%pR [mem 0x60000000-0x6fffffff pref] or
|
||||
[mem 0x60000000 pref] or
|
||||
[mem 0x0000000060000000-0x000000006fffffff pref]
|
||||
[mem 0x0000000060000000 pref]
|
||||
|
||||
For printing struct resources. The ``R`` and ``r`` specifiers result in a
|
||||
printed resource with (R) or without (r) a decoded flags member.
|
||||
printed resource with (R) or without (r) a decoded flags member. If start is
|
||||
equal to end only print the start value.
|
||||
|
||||
Passed by reference.
|
||||
|
||||
@ -231,6 +236,19 @@ width of the CPU data path.
|
||||
|
||||
Passed by reference.
|
||||
|
||||
Struct Range
|
||||
------------
|
||||
|
||||
::
|
||||
|
||||
%pra [range 0x0000000060000000-0x000000006fffffff] or
|
||||
[range 0x0000000060000000]
|
||||
|
||||
For printing struct range. struct range holds an arbitrary range of u64
|
||||
values. If start is equal to end only print the start value.
|
||||
|
||||
Passed by reference.
|
||||
|
||||
DMA address types dma_addr_t
|
||||
----------------------------
|
||||
|
||||
|
@ -247,8 +247,8 @@ static void update_perf_entry(struct device *dev, struct dsmas_entry *dent,
|
||||
dpa_perf->dpa_range = dent->dpa_range;
|
||||
dpa_perf->qos_class = dent->qos_class;
|
||||
dev_dbg(dev,
|
||||
"DSMAS: dpa: %#llx qos: %d read_bw: %d write_bw %d read_lat: %d write_lat: %d\n",
|
||||
dent->dpa_range.start, dpa_perf->qos_class,
|
||||
"DSMAS: dpa: %pra qos: %d read_bw: %d write_bw %d read_lat: %d write_lat: %d\n",
|
||||
&dent->dpa_range, dpa_perf->qos_class,
|
||||
dent->coord[ACCESS_COORDINATE_CPU].read_bandwidth,
|
||||
dent->coord[ACCESS_COORDINATE_CPU].write_bandwidth,
|
||||
dent->coord[ACCESS_COORDINATE_CPU].read_latency,
|
||||
@ -279,8 +279,8 @@ static void cxl_memdev_set_qos_class(struct cxl_dev_state *cxlds,
|
||||
range_contains(&pmem_range, &dent->dpa_range))
|
||||
update_perf_entry(dev, dent, &mds->pmem_perf);
|
||||
else
|
||||
dev_dbg(dev, "no partition for dsmas dpa: %#llx\n",
|
||||
dent->dpa_range.start);
|
||||
dev_dbg(dev, "no partition for dsmas dpa: %pra\n",
|
||||
&dent->dpa_range);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -89,6 +89,11 @@ resource_size_t __rcrb_to_component(struct device *dev,
|
||||
enum cxl_rcrb which);
|
||||
u16 cxl_rcrb_to_aer(struct device *dev, resource_size_t rcrb);
|
||||
|
||||
#define PCI_RCRB_CAP_LIST_ID_MASK GENMASK(7, 0)
|
||||
#define PCI_RCRB_CAP_HDR_ID_MASK GENMASK(7, 0)
|
||||
#define PCI_RCRB_CAP_HDR_NEXT_MASK GENMASK(15, 8)
|
||||
#define PCI_CAP_EXP_SIZEOF 0x3c
|
||||
|
||||
extern struct rw_semaphore cxl_dpa_rwsem;
|
||||
extern struct rw_semaphore cxl_region_rwsem;
|
||||
|
||||
|
@ -424,7 +424,6 @@ int cxl_dpa_set_mode(struct cxl_endpoint_decoder *cxled,
|
||||
struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
|
||||
struct cxl_dev_state *cxlds = cxlmd->cxlds;
|
||||
struct device *dev = &cxled->cxld.dev;
|
||||
int rc;
|
||||
|
||||
switch (mode) {
|
||||
case CXL_DECODER_RAM:
|
||||
@ -435,11 +434,9 @@ int cxl_dpa_set_mode(struct cxl_endpoint_decoder *cxled,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
down_write(&cxl_dpa_rwsem);
|
||||
if (cxled->cxld.flags & CXL_DECODER_F_ENABLE) {
|
||||
rc = -EBUSY;
|
||||
goto out;
|
||||
}
|
||||
guard(rwsem_write)(&cxl_dpa_rwsem);
|
||||
if (cxled->cxld.flags & CXL_DECODER_F_ENABLE)
|
||||
return -EBUSY;
|
||||
|
||||
/*
|
||||
* Only allow modes that are supported by the current partition
|
||||
@ -447,21 +444,15 @@ int cxl_dpa_set_mode(struct cxl_endpoint_decoder *cxled,
|
||||
*/
|
||||
if (mode == CXL_DECODER_PMEM && !resource_size(&cxlds->pmem_res)) {
|
||||
dev_dbg(dev, "no available pmem capacity\n");
|
||||
rc = -ENXIO;
|
||||
goto out;
|
||||
return -ENXIO;
|
||||
}
|
||||
if (mode == CXL_DECODER_RAM && !resource_size(&cxlds->ram_res)) {
|
||||
dev_dbg(dev, "no available ram capacity\n");
|
||||
rc = -ENXIO;
|
||||
goto out;
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
cxled->mode = mode;
|
||||
rc = 0;
|
||||
out:
|
||||
up_write(&cxl_dpa_rwsem);
|
||||
|
||||
return rc;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, unsigned long long size)
|
||||
|
@ -2537,9 +2537,8 @@ static struct cxl_region *__create_region(struct cxl_root_decoder *cxlrd,
|
||||
return devm_cxl_add_region(cxlrd, id, mode, CXL_DECODER_HOSTONLYMEM);
|
||||
}
|
||||
|
||||
static ssize_t create_pmem_region_store(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
const char *buf, size_t len)
|
||||
static ssize_t create_region_store(struct device *dev, const char *buf,
|
||||
size_t len, enum cxl_decoder_mode mode)
|
||||
{
|
||||
struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev);
|
||||
struct cxl_region *cxlr;
|
||||
@ -2549,31 +2548,26 @@ static ssize_t create_pmem_region_store(struct device *dev,
|
||||
if (rc != 1)
|
||||
return -EINVAL;
|
||||
|
||||
cxlr = __create_region(cxlrd, CXL_DECODER_PMEM, id);
|
||||
cxlr = __create_region(cxlrd, mode, id);
|
||||
if (IS_ERR(cxlr))
|
||||
return PTR_ERR(cxlr);
|
||||
|
||||
return len;
|
||||
}
|
||||
|
||||
static ssize_t create_pmem_region_store(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
const char *buf, size_t len)
|
||||
{
|
||||
return create_region_store(dev, buf, len, CXL_DECODER_PMEM);
|
||||
}
|
||||
DEVICE_ATTR_RW(create_pmem_region);
|
||||
|
||||
static ssize_t create_ram_region_store(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
const char *buf, size_t len)
|
||||
{
|
||||
struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev);
|
||||
struct cxl_region *cxlr;
|
||||
int rc, id;
|
||||
|
||||
rc = sscanf(buf, "region%d\n", &id);
|
||||
if (rc != 1)
|
||||
return -EINVAL;
|
||||
|
||||
cxlr = __create_region(cxlrd, CXL_DECODER_RAM, id);
|
||||
if (IS_ERR(cxlr))
|
||||
return PTR_ERR(cxlr);
|
||||
|
||||
return len;
|
||||
return create_region_store(dev, buf, len, CXL_DECODER_RAM);
|
||||
}
|
||||
DEVICE_ATTR_RW(create_ram_region);
|
||||
|
||||
|
@ -52,7 +52,7 @@ void cxl_probe_component_regs(struct device *dev, void __iomem *base,
|
||||
cap_array = readl(base + CXL_CM_CAP_HDR_OFFSET);
|
||||
|
||||
if (FIELD_GET(CXL_CM_CAP_HDR_ID_MASK, cap_array) != CM_CAP_HDR_CAP_ID) {
|
||||
dev_err(dev,
|
||||
dev_dbg(dev,
|
||||
"Couldn't locate the CXL.cache and CXL.mem capability array header.\n");
|
||||
return;
|
||||
}
|
||||
@ -506,6 +506,62 @@ u16 cxl_rcrb_to_aer(struct device *dev, resource_size_t rcrb)
|
||||
return offset;
|
||||
}
|
||||
|
||||
static resource_size_t cxl_rcrb_to_linkcap(struct device *dev, struct cxl_dport *dport)
|
||||
{
|
||||
resource_size_t rcrb = dport->rcrb.base;
|
||||
void __iomem *addr;
|
||||
u32 cap_hdr;
|
||||
u16 offset;
|
||||
|
||||
if (!request_mem_region(rcrb, SZ_4K, "CXL RCRB"))
|
||||
return CXL_RESOURCE_NONE;
|
||||
|
||||
addr = ioremap(rcrb, SZ_4K);
|
||||
if (!addr) {
|
||||
dev_err(dev, "Failed to map region %pr\n", addr);
|
||||
release_mem_region(rcrb, SZ_4K);
|
||||
return CXL_RESOURCE_NONE;
|
||||
}
|
||||
|
||||
offset = FIELD_GET(PCI_RCRB_CAP_LIST_ID_MASK, readw(addr + PCI_CAPABILITY_LIST));
|
||||
cap_hdr = readl(addr + offset);
|
||||
while ((FIELD_GET(PCI_RCRB_CAP_HDR_ID_MASK, cap_hdr)) != PCI_CAP_ID_EXP) {
|
||||
offset = FIELD_GET(PCI_RCRB_CAP_HDR_NEXT_MASK, cap_hdr);
|
||||
if (offset == 0 || offset > SZ_4K) {
|
||||
offset = 0;
|
||||
break;
|
||||
}
|
||||
cap_hdr = readl(addr + offset);
|
||||
}
|
||||
|
||||
iounmap(addr);
|
||||
release_mem_region(rcrb, SZ_4K);
|
||||
if (!offset)
|
||||
return CXL_RESOURCE_NONE;
|
||||
|
||||
return offset;
|
||||
}
|
||||
|
||||
int cxl_dport_map_rcd_linkcap(struct pci_dev *pdev, struct cxl_dport *dport)
|
||||
{
|
||||
void __iomem *dport_pcie_cap = NULL;
|
||||
resource_size_t pos;
|
||||
struct cxl_rcrb_info *ri;
|
||||
|
||||
ri = &dport->rcrb;
|
||||
pos = cxl_rcrb_to_linkcap(&pdev->dev, dport);
|
||||
if (pos == CXL_RESOURCE_NONE)
|
||||
return -ENXIO;
|
||||
|
||||
dport_pcie_cap = devm_cxl_iomap_block(&pdev->dev,
|
||||
ri->base + pos,
|
||||
PCI_CAP_EXP_SIZEOF);
|
||||
dport->regs.rcd_pcie_cap = dport_pcie_cap;
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_NS_GPL(cxl_dport_map_rcd_linkcap, CXL);
|
||||
|
||||
resource_size_t __rcrb_to_component(struct device *dev, struct cxl_rcrb_info *ri,
|
||||
enum cxl_rcrb which)
|
||||
{
|
||||
|
@ -235,6 +235,14 @@ struct cxl_regs {
|
||||
struct_group_tagged(cxl_rch_regs, rch_regs,
|
||||
void __iomem *dport_aer;
|
||||
);
|
||||
|
||||
/*
|
||||
* RCD upstream port specific PCIe cap register
|
||||
* @pcie_cap: CXL 3.0 8.2.1.2 RCD Upstream Port RCRB
|
||||
*/
|
||||
struct_group_tagged(cxl_rcd_regs, rcd_regs,
|
||||
void __iomem *rcd_pcie_cap;
|
||||
);
|
||||
};
|
||||
|
||||
struct cxl_reg_map {
|
||||
@ -304,6 +312,7 @@ int cxl_setup_regs(struct cxl_register_map *map);
|
||||
struct cxl_dport;
|
||||
resource_size_t cxl_rcd_component_reg_phys(struct device *dev,
|
||||
struct cxl_dport *dport);
|
||||
int cxl_dport_map_rcd_linkcap(struct pci_dev *pdev, struct cxl_dport *dport);
|
||||
|
||||
#define CXL_RESOURCE_NONE ((resource_size_t) -1)
|
||||
#define CXL_TARGET_STRLEN 20
|
||||
|
@ -475,9 +475,9 @@ static bool is_cxl_restricted(struct pci_dev *pdev)
|
||||
}
|
||||
|
||||
static int cxl_rcrb_get_comp_regs(struct pci_dev *pdev,
|
||||
struct cxl_register_map *map)
|
||||
struct cxl_register_map *map,
|
||||
struct cxl_dport *dport)
|
||||
{
|
||||
struct cxl_dport *dport;
|
||||
resource_size_t component_reg_phys;
|
||||
|
||||
*map = (struct cxl_register_map) {
|
||||
@ -513,11 +513,24 @@ static int cxl_pci_setup_regs(struct pci_dev *pdev, enum cxl_regloc_type type,
|
||||
* is an RCH and try to extract the Component Registers from
|
||||
* an RCRB.
|
||||
*/
|
||||
if (rc && type == CXL_REGLOC_RBI_COMPONENT && is_cxl_restricted(pdev))
|
||||
rc = cxl_rcrb_get_comp_regs(pdev, map);
|
||||
if (rc && type == CXL_REGLOC_RBI_COMPONENT && is_cxl_restricted(pdev)) {
|
||||
struct cxl_dport *dport;
|
||||
struct cxl_port *port __free(put_cxl_port) =
|
||||
cxl_pci_find_port(pdev, &dport);
|
||||
if (!port)
|
||||
return -EPROBE_DEFER;
|
||||
|
||||
if (rc)
|
||||
rc = cxl_rcrb_get_comp_regs(pdev, map, dport);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
rc = cxl_dport_map_rcd_linkcap(pdev, dport);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
} else if (rc) {
|
||||
return rc;
|
||||
}
|
||||
|
||||
return cxl_setup_regs(map);
|
||||
}
|
||||
@ -764,10 +777,6 @@ static int cxl_event_config(struct pci_host_bridge *host_bridge,
|
||||
return 0;
|
||||
}
|
||||
|
||||
rc = cxl_mem_alloc_event_buf(mds);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
rc = cxl_event_get_int_policy(mds, &policy);
|
||||
if (rc)
|
||||
return rc;
|
||||
@ -781,6 +790,10 @@ static int cxl_event_config(struct pci_host_bridge *host_bridge,
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
rc = cxl_mem_alloc_event_buf(mds);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
rc = cxl_event_irqsetup(mds);
|
||||
if (rc)
|
||||
return rc;
|
||||
@ -807,6 +820,83 @@ static int cxl_pci_type3_init_mailbox(struct cxl_dev_state *cxlds)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static ssize_t rcd_pcie_cap_emit(struct device *dev, u16 offset, char *buf, size_t width)
|
||||
{
|
||||
struct cxl_dev_state *cxlds = dev_get_drvdata(dev);
|
||||
struct cxl_memdev *cxlmd = cxlds->cxlmd;
|
||||
struct device *root_dev;
|
||||
struct cxl_dport *dport;
|
||||
struct cxl_port *root __free(put_cxl_port) =
|
||||
cxl_mem_find_port(cxlmd, &dport);
|
||||
|
||||
if (!root)
|
||||
return -ENXIO;
|
||||
|
||||
root_dev = root->uport_dev;
|
||||
if (!root_dev)
|
||||
return -ENXIO;
|
||||
|
||||
guard(device)(root_dev);
|
||||
if (!root_dev->driver)
|
||||
return -ENXIO;
|
||||
|
||||
switch (width) {
|
||||
case 2:
|
||||
return sysfs_emit(buf, "%#x\n",
|
||||
readw(dport->regs.rcd_pcie_cap + offset));
|
||||
case 4:
|
||||
return sysfs_emit(buf, "%#x\n",
|
||||
readl(dport->regs.rcd_pcie_cap + offset));
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
static ssize_t rcd_link_cap_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
return rcd_pcie_cap_emit(dev, PCI_EXP_LNKCAP, buf, sizeof(u32));
|
||||
}
|
||||
static DEVICE_ATTR_RO(rcd_link_cap);
|
||||
|
||||
static ssize_t rcd_link_ctrl_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
return rcd_pcie_cap_emit(dev, PCI_EXP_LNKCTL, buf, sizeof(u16));
|
||||
}
|
||||
static DEVICE_ATTR_RO(rcd_link_ctrl);
|
||||
|
||||
static ssize_t rcd_link_status_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
return rcd_pcie_cap_emit(dev, PCI_EXP_LNKSTA, buf, sizeof(u16));
|
||||
}
|
||||
static DEVICE_ATTR_RO(rcd_link_status);
|
||||
|
||||
static struct attribute *cxl_rcd_attrs[] = {
|
||||
&dev_attr_rcd_link_cap.attr,
|
||||
&dev_attr_rcd_link_ctrl.attr,
|
||||
&dev_attr_rcd_link_status.attr,
|
||||
NULL
|
||||
};
|
||||
|
||||
static umode_t cxl_rcd_visible(struct kobject *kobj, struct attribute *a, int n)
|
||||
{
|
||||
struct device *dev = kobj_to_dev(kobj);
|
||||
struct pci_dev *pdev = to_pci_dev(dev);
|
||||
|
||||
if (is_cxl_restricted(pdev))
|
||||
return a->mode;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct attribute_group cxl_rcd_group = {
|
||||
.attrs = cxl_rcd_attrs,
|
||||
.is_visible = cxl_rcd_visible,
|
||||
};
|
||||
__ATTRIBUTE_GROUPS(cxl_rcd);
|
||||
|
||||
static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
|
||||
{
|
||||
struct pci_host_bridge *host_bridge = pci_find_host_bridge(pdev->bus);
|
||||
@ -1016,6 +1106,7 @@ static struct pci_driver cxl_pci_driver = {
|
||||
.id_table = cxl_mem_pci_tbl,
|
||||
.probe = cxl_pci_probe,
|
||||
.err_handler = &cxl_error_handlers,
|
||||
.dev_groups = cxl_rcd_groups,
|
||||
.driver = {
|
||||
.probe_type = PROBE_PREFER_ASYNCHRONOUS,
|
||||
},
|
||||
|
@ -40,12 +40,30 @@ struct dax_region {
|
||||
struct device *youngest;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct dax_mapping - device to display mapping range attributes
|
||||
* @dev: device representing this range
|
||||
* @range_id: index within dev_dax ranges array
|
||||
* @id: ida of this mapping
|
||||
*/
|
||||
struct dax_mapping {
|
||||
struct device dev;
|
||||
int range_id;
|
||||
int id;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct dev_dax_range - tuple represenging a range of memory used by dev_dax
|
||||
* @pgoff: page offset
|
||||
* @range: resource-span
|
||||
* @mapping: reference to the dax_mapping for this range
|
||||
*/
|
||||
struct dev_dax_range {
|
||||
unsigned long pgoff;
|
||||
struct range range;
|
||||
struct dax_mapping *mapping;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct dev_dax - instance data for a subdivision of a dax region, and
|
||||
* data while the device is activated in the driver.
|
||||
@ -58,7 +76,7 @@ struct dax_mapping {
|
||||
* @dev - device core
|
||||
* @pgmap - pgmap for memmap setup / lifetime (driver owned)
|
||||
* @nr_range: size of @ranges
|
||||
* @ranges: resource-span + pgoff tuples for the instance
|
||||
* @ranges: range tuples of memory used
|
||||
*/
|
||||
struct dev_dax {
|
||||
struct dax_region *region;
|
||||
@ -72,11 +90,7 @@ struct dev_dax {
|
||||
struct dev_pagemap *pgmap;
|
||||
bool memmap_on_memory;
|
||||
int nr_range;
|
||||
struct dev_dax_range {
|
||||
unsigned long pgoff;
|
||||
struct range range;
|
||||
struct dax_mapping *mapping;
|
||||
} *ranges;
|
||||
struct dev_dax_range *ranges;
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -111,8 +111,8 @@ static struct rb_node *__tree_search(struct rb_root *root, u64 file_offset,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int range_overlaps(struct btrfs_ordered_extent *entry, u64 file_offset,
|
||||
u64 len)
|
||||
static int btrfs_range_overlaps(struct btrfs_ordered_extent *entry, u64 file_offset,
|
||||
u64 len)
|
||||
{
|
||||
if (file_offset + len <= entry->file_offset ||
|
||||
entry->file_offset + entry->num_bytes <= file_offset)
|
||||
@ -985,7 +985,7 @@ struct btrfs_ordered_extent *btrfs_lookup_ordered_range(
|
||||
|
||||
while (1) {
|
||||
entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
|
||||
if (range_overlaps(entry, file_offset, len))
|
||||
if (btrfs_range_overlaps(entry, file_offset, len))
|
||||
break;
|
||||
|
||||
if (entry->file_offset >= file_offset + len) {
|
||||
@ -1114,12 +1114,12 @@ struct btrfs_ordered_extent *btrfs_lookup_first_ordered_range(
|
||||
}
|
||||
if (prev) {
|
||||
entry = rb_entry(prev, struct btrfs_ordered_extent, rb_node);
|
||||
if (range_overlaps(entry, file_offset, len))
|
||||
if (btrfs_range_overlaps(entry, file_offset, len))
|
||||
goto out;
|
||||
}
|
||||
if (next) {
|
||||
entry = rb_entry(next, struct btrfs_ordered_extent, rb_node);
|
||||
if (range_overlaps(entry, file_offset, len))
|
||||
if (btrfs_range_overlaps(entry, file_offset, len))
|
||||
goto out;
|
||||
}
|
||||
/* No ordered extent in the range */
|
||||
|
@ -403,6 +403,8 @@ struct acpi_cdat_dsmas {
|
||||
/* Flags for subtable above */
|
||||
|
||||
#define ACPI_CDAT_DSMAS_NON_VOLATILE (1 << 2)
|
||||
#define ACPI_CDAT_DSMAS_SHAREABLE (1 << 3)
|
||||
#define ACPI_CDAT_DSMAS_READ_ONLY (1 << 6)
|
||||
|
||||
/* Subtable 1: Device scoped Latency and Bandwidth Information Structure (DSLBIS) */
|
||||
|
||||
|
@ -13,11 +13,20 @@ static inline u64 range_len(const struct range *range)
|
||||
return range->end - range->start + 1;
|
||||
}
|
||||
|
||||
static inline bool range_contains(struct range *r1, struct range *r2)
|
||||
/* True if r1 completely contains r2 */
|
||||
static inline bool range_contains(const struct range *r1,
|
||||
const struct range *r2)
|
||||
{
|
||||
return r1->start <= r2->start && r1->end >= r2->end;
|
||||
}
|
||||
|
||||
/* True if any part of r1 overlaps r2 */
|
||||
static inline bool range_overlaps(const struct range *r1,
|
||||
const struct range *r2)
|
||||
{
|
||||
return r1->start <= r2->end && r1->end >= r2->start;
|
||||
}
|
||||
|
||||
int add_range(struct range *range, int az, int nr_range,
|
||||
u64 start, u64 end);
|
||||
|
||||
@ -31,4 +40,10 @@ int clean_sort_range(struct range *range, int az);
|
||||
|
||||
void sort_range(struct range *range, int nr_range);
|
||||
|
||||
#define DEFINE_RANGE(_start, _end) \
|
||||
(struct range) { \
|
||||
.start = (_start), \
|
||||
.end = (_end), \
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -386,6 +386,66 @@ kernel_ptr(void)
|
||||
static void __init
|
||||
struct_resource(void)
|
||||
{
|
||||
struct resource test_resource = {
|
||||
.start = 0xc0ffee00,
|
||||
.end = 0xc0ffee00,
|
||||
.flags = IORESOURCE_MEM,
|
||||
};
|
||||
|
||||
test("[mem 0xc0ffee00 flags 0x200]",
|
||||
"%pr", &test_resource);
|
||||
|
||||
test_resource = (struct resource) {
|
||||
.start = 0xc0ffee,
|
||||
.end = 0xba5eba11,
|
||||
.flags = IORESOURCE_MEM,
|
||||
};
|
||||
test("[mem 0x00c0ffee-0xba5eba11 flags 0x200]",
|
||||
"%pr", &test_resource);
|
||||
|
||||
test_resource = (struct resource) {
|
||||
.start = 0xba5eba11,
|
||||
.end = 0xc0ffee,
|
||||
.flags = IORESOURCE_MEM,
|
||||
};
|
||||
test("[mem 0xba5eba11-0x00c0ffee flags 0x200]",
|
||||
"%pr", &test_resource);
|
||||
|
||||
test_resource = (struct resource) {
|
||||
.start = 0xba5eba11,
|
||||
.end = 0xba5eca11,
|
||||
.flags = IORESOURCE_MEM,
|
||||
};
|
||||
|
||||
test("[mem 0xba5eba11-0xba5eca11 flags 0x200]",
|
||||
"%pr", &test_resource);
|
||||
|
||||
test_resource = (struct resource) {
|
||||
.start = 0xba11,
|
||||
.end = 0xca10,
|
||||
.flags = IORESOURCE_IO |
|
||||
IORESOURCE_DISABLED |
|
||||
IORESOURCE_UNSET,
|
||||
};
|
||||
|
||||
test("[io size 0x1000 disabled]",
|
||||
"%pR", &test_resource);
|
||||
}
|
||||
|
||||
static void __init
|
||||
struct_range(void)
|
||||
{
|
||||
struct range test_range = DEFINE_RANGE(0xc0ffee00ba5eba11,
|
||||
0xc0ffee00ba5eba11);
|
||||
test("[range 0xc0ffee00ba5eba11]", "%pra", &test_range);
|
||||
|
||||
test_range = DEFINE_RANGE(0xc0ffee, 0xba5eba11);
|
||||
test("[range 0x0000000000c0ffee-0x00000000ba5eba11]",
|
||||
"%pra", &test_range);
|
||||
|
||||
test_range = DEFINE_RANGE(0xba5eba11, 0xc0ffee);
|
||||
test("[range 0x00000000ba5eba11-0x0000000000c0ffee]",
|
||||
"%pra", &test_range);
|
||||
}
|
||||
|
||||
static void __init
|
||||
@ -763,6 +823,7 @@ test_pointer(void)
|
||||
symbol_ptr();
|
||||
kernel_ptr();
|
||||
struct_resource();
|
||||
struct_range();
|
||||
addr();
|
||||
escaped_str();
|
||||
hex_string();
|
||||
|
@ -1039,6 +1039,20 @@ static const struct printf_spec default_dec04_spec = {
|
||||
.flags = ZEROPAD,
|
||||
};
|
||||
|
||||
static noinline_for_stack
|
||||
char *hex_range(char *buf, char *end, u64 start_val, u64 end_val,
|
||||
struct printf_spec spec)
|
||||
{
|
||||
buf = number(buf, end, start_val, spec);
|
||||
if (start_val == end_val)
|
||||
return buf;
|
||||
|
||||
if (buf < end)
|
||||
*buf = '-';
|
||||
++buf;
|
||||
return number(buf, end, end_val, spec);
|
||||
}
|
||||
|
||||
static noinline_for_stack
|
||||
char *resource_string(char *buf, char *end, struct resource *res,
|
||||
struct printf_spec spec, const char *fmt)
|
||||
@ -1115,11 +1129,7 @@ char *resource_string(char *buf, char *end, struct resource *res,
|
||||
p = string_nocheck(p, pend, "size ", str_spec);
|
||||
p = number(p, pend, resource_size(res), *specp);
|
||||
} else {
|
||||
p = number(p, pend, res->start, *specp);
|
||||
if (res->start != res->end) {
|
||||
*p++ = '-';
|
||||
p = number(p, pend, res->end, *specp);
|
||||
}
|
||||
p = hex_range(p, pend, res->start, res->end, *specp);
|
||||
}
|
||||
if (decode) {
|
||||
if (res->flags & IORESOURCE_MEM_64)
|
||||
@ -1140,6 +1150,31 @@ char *resource_string(char *buf, char *end, struct resource *res,
|
||||
return string_nocheck(buf, end, sym, spec);
|
||||
}
|
||||
|
||||
static noinline_for_stack
|
||||
char *range_string(char *buf, char *end, const struct range *range,
|
||||
struct printf_spec spec, const char *fmt)
|
||||
{
|
||||
char sym[sizeof("[range 0x0123456789abcdef-0x0123456789abcdef]")];
|
||||
char *p = sym, *pend = sym + sizeof(sym);
|
||||
|
||||
struct printf_spec range_spec = {
|
||||
.field_width = 2 + 2 * sizeof(range->start), /* 0x + 2 * 8 */
|
||||
.flags = SPECIAL | SMALL | ZEROPAD,
|
||||
.base = 16,
|
||||
.precision = -1,
|
||||
};
|
||||
|
||||
if (check_pointer(&buf, end, range, spec))
|
||||
return buf;
|
||||
|
||||
p = string_nocheck(p, pend, "[range ", default_str_spec);
|
||||
p = hex_range(p, pend, range->start, range->end, range_spec);
|
||||
*p++ = ']';
|
||||
*p = '\0';
|
||||
|
||||
return string_nocheck(buf, end, sym, spec);
|
||||
}
|
||||
|
||||
static noinline_for_stack
|
||||
char *hex_string(char *buf, char *end, u8 *addr, struct printf_spec spec,
|
||||
const char *fmt)
|
||||
@ -2229,6 +2264,15 @@ char *fwnode_string(char *buf, char *end, struct fwnode_handle *fwnode,
|
||||
return widen_string(buf, buf - buf_start, end, spec);
|
||||
}
|
||||
|
||||
static noinline_for_stack
|
||||
char *resource_or_range(const char *fmt, char *buf, char *end, void *ptr,
|
||||
struct printf_spec spec)
|
||||
{
|
||||
if (*fmt == 'r' && fmt[1] == 'a')
|
||||
return range_string(buf, end, ptr, spec, fmt);
|
||||
return resource_string(buf, end, ptr, spec, fmt);
|
||||
}
|
||||
|
||||
int __init no_hash_pointers_enable(char *str)
|
||||
{
|
||||
if (no_hash_pointers)
|
||||
@ -2277,6 +2321,7 @@ char *rust_fmt_argument(char *buf, char *end, void *ptr);
|
||||
* - 'Bb' as above with module build ID (for use in backtraces)
|
||||
* - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
|
||||
* - 'r' For raw struct resource, e.g., [mem 0x0-0x1f flags 0x201]
|
||||
* - 'ra' For struct ranges, e.g., [range 0x0000000000000000 - 0x00000000000000ff]
|
||||
* - 'b[l]' For a bitmap, the number of bits is determined by the field
|
||||
* width which must be explicitly specified either as part of the
|
||||
* format string '%32b[l]' or through '%*b[l]', [l] selects
|
||||
@ -2401,7 +2446,7 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
|
||||
return symbol_string(buf, end, ptr, spec, fmt);
|
||||
case 'R':
|
||||
case 'r':
|
||||
return resource_string(buf, end, ptr, spec, fmt);
|
||||
return resource_or_range(fmt, buf, end, ptr, spec);
|
||||
case 'h':
|
||||
return hex_string(buf, end, ptr, spec, fmt);
|
||||
case 'b':
|
||||
|
Loading…
Reference in New Issue
Block a user