mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2024-12-28 16:52:18 +00:00
Merge branch 'cxl/for-6.13/dcd-prep' into cxl-for-next
Add preparation patches for coming soon DCD changes. - Add range_overlaps() - Add CDAT/DSMAS shared and read only flag in ACPICA - Add documentation to struct dev_dax_range - Delay event buffer allocation in CXL PCI - Use guard() in cxl_dpa_set_mode() - Refactor common create region code to reduce redudant code
This commit is contained in:
commit
a83383e2ae
@ -424,7 +424,6 @@ int cxl_dpa_set_mode(struct cxl_endpoint_decoder *cxled,
|
||||
struct cxl_memdev *cxlmd = cxled_to_memdev(cxled);
|
||||
struct cxl_dev_state *cxlds = cxlmd->cxlds;
|
||||
struct device *dev = &cxled->cxld.dev;
|
||||
int rc;
|
||||
|
||||
switch (mode) {
|
||||
case CXL_DECODER_RAM:
|
||||
@ -435,11 +434,9 @@ int cxl_dpa_set_mode(struct cxl_endpoint_decoder *cxled,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
down_write(&cxl_dpa_rwsem);
|
||||
if (cxled->cxld.flags & CXL_DECODER_F_ENABLE) {
|
||||
rc = -EBUSY;
|
||||
goto out;
|
||||
}
|
||||
guard(rwsem_write)(&cxl_dpa_rwsem);
|
||||
if (cxled->cxld.flags & CXL_DECODER_F_ENABLE)
|
||||
return -EBUSY;
|
||||
|
||||
/*
|
||||
* Only allow modes that are supported by the current partition
|
||||
@ -447,21 +444,15 @@ int cxl_dpa_set_mode(struct cxl_endpoint_decoder *cxled,
|
||||
*/
|
||||
if (mode == CXL_DECODER_PMEM && !resource_size(&cxlds->pmem_res)) {
|
||||
dev_dbg(dev, "no available pmem capacity\n");
|
||||
rc = -ENXIO;
|
||||
goto out;
|
||||
return -ENXIO;
|
||||
}
|
||||
if (mode == CXL_DECODER_RAM && !resource_size(&cxlds->ram_res)) {
|
||||
dev_dbg(dev, "no available ram capacity\n");
|
||||
rc = -ENXIO;
|
||||
goto out;
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
cxled->mode = mode;
|
||||
rc = 0;
|
||||
out:
|
||||
up_write(&cxl_dpa_rwsem);
|
||||
|
||||
return rc;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int cxl_dpa_alloc(struct cxl_endpoint_decoder *cxled, unsigned long long size)
|
||||
|
@ -2536,9 +2536,8 @@ static struct cxl_region *__create_region(struct cxl_root_decoder *cxlrd,
|
||||
return devm_cxl_add_region(cxlrd, id, mode, CXL_DECODER_HOSTONLYMEM);
|
||||
}
|
||||
|
||||
static ssize_t create_pmem_region_store(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
const char *buf, size_t len)
|
||||
static ssize_t create_region_store(struct device *dev, const char *buf,
|
||||
size_t len, enum cxl_decoder_mode mode)
|
||||
{
|
||||
struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev);
|
||||
struct cxl_region *cxlr;
|
||||
@ -2548,31 +2547,26 @@ static ssize_t create_pmem_region_store(struct device *dev,
|
||||
if (rc != 1)
|
||||
return -EINVAL;
|
||||
|
||||
cxlr = __create_region(cxlrd, CXL_DECODER_PMEM, id);
|
||||
cxlr = __create_region(cxlrd, mode, id);
|
||||
if (IS_ERR(cxlr))
|
||||
return PTR_ERR(cxlr);
|
||||
|
||||
return len;
|
||||
}
|
||||
|
||||
static ssize_t create_pmem_region_store(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
const char *buf, size_t len)
|
||||
{
|
||||
return create_region_store(dev, buf, len, CXL_DECODER_PMEM);
|
||||
}
|
||||
DEVICE_ATTR_RW(create_pmem_region);
|
||||
|
||||
static ssize_t create_ram_region_store(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
const char *buf, size_t len)
|
||||
{
|
||||
struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev);
|
||||
struct cxl_region *cxlr;
|
||||
int rc, id;
|
||||
|
||||
rc = sscanf(buf, "region%d\n", &id);
|
||||
if (rc != 1)
|
||||
return -EINVAL;
|
||||
|
||||
cxlr = __create_region(cxlrd, CXL_DECODER_RAM, id);
|
||||
if (IS_ERR(cxlr))
|
||||
return PTR_ERR(cxlr);
|
||||
|
||||
return len;
|
||||
return create_region_store(dev, buf, len, CXL_DECODER_RAM);
|
||||
}
|
||||
DEVICE_ATTR_RW(create_ram_region);
|
||||
|
||||
|
@ -777,10 +777,6 @@ static int cxl_event_config(struct pci_host_bridge *host_bridge,
|
||||
return 0;
|
||||
}
|
||||
|
||||
rc = cxl_mem_alloc_event_buf(mds);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
rc = cxl_event_get_int_policy(mds, &policy);
|
||||
if (rc)
|
||||
return rc;
|
||||
@ -794,6 +790,10 @@ static int cxl_event_config(struct pci_host_bridge *host_bridge,
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
rc = cxl_mem_alloc_event_buf(mds);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
rc = cxl_event_irqsetup(mds);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
@ -40,12 +40,30 @@ struct dax_region {
|
||||
struct device *youngest;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct dax_mapping - device to display mapping range attributes
|
||||
* @dev: device representing this range
|
||||
* @range_id: index within dev_dax ranges array
|
||||
* @id: ida of this mapping
|
||||
*/
|
||||
struct dax_mapping {
|
||||
struct device dev;
|
||||
int range_id;
|
||||
int id;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct dev_dax_range - tuple represenging a range of memory used by dev_dax
|
||||
* @pgoff: page offset
|
||||
* @range: resource-span
|
||||
* @mapping: reference to the dax_mapping for this range
|
||||
*/
|
||||
struct dev_dax_range {
|
||||
unsigned long pgoff;
|
||||
struct range range;
|
||||
struct dax_mapping *mapping;
|
||||
};
|
||||
|
||||
/**
|
||||
* struct dev_dax - instance data for a subdivision of a dax region, and
|
||||
* data while the device is activated in the driver.
|
||||
@ -58,7 +76,7 @@ struct dax_mapping {
|
||||
* @dev - device core
|
||||
* @pgmap - pgmap for memmap setup / lifetime (driver owned)
|
||||
* @nr_range: size of @ranges
|
||||
* @ranges: resource-span + pgoff tuples for the instance
|
||||
* @ranges: range tuples of memory used
|
||||
*/
|
||||
struct dev_dax {
|
||||
struct dax_region *region;
|
||||
@ -72,11 +90,7 @@ struct dev_dax {
|
||||
struct dev_pagemap *pgmap;
|
||||
bool memmap_on_memory;
|
||||
int nr_range;
|
||||
struct dev_dax_range {
|
||||
unsigned long pgoff;
|
||||
struct range range;
|
||||
struct dax_mapping *mapping;
|
||||
} *ranges;
|
||||
struct dev_dax_range *ranges;
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -111,8 +111,8 @@ static struct rb_node *__tree_search(struct rb_root *root, u64 file_offset,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int range_overlaps(struct btrfs_ordered_extent *entry, u64 file_offset,
|
||||
u64 len)
|
||||
static int btrfs_range_overlaps(struct btrfs_ordered_extent *entry, u64 file_offset,
|
||||
u64 len)
|
||||
{
|
||||
if (file_offset + len <= entry->file_offset ||
|
||||
entry->file_offset + entry->num_bytes <= file_offset)
|
||||
@ -985,7 +985,7 @@ struct btrfs_ordered_extent *btrfs_lookup_ordered_range(
|
||||
|
||||
while (1) {
|
||||
entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
|
||||
if (range_overlaps(entry, file_offset, len))
|
||||
if (btrfs_range_overlaps(entry, file_offset, len))
|
||||
break;
|
||||
|
||||
if (entry->file_offset >= file_offset + len) {
|
||||
@ -1114,12 +1114,12 @@ struct btrfs_ordered_extent *btrfs_lookup_first_ordered_range(
|
||||
}
|
||||
if (prev) {
|
||||
entry = rb_entry(prev, struct btrfs_ordered_extent, rb_node);
|
||||
if (range_overlaps(entry, file_offset, len))
|
||||
if (btrfs_range_overlaps(entry, file_offset, len))
|
||||
goto out;
|
||||
}
|
||||
if (next) {
|
||||
entry = rb_entry(next, struct btrfs_ordered_extent, rb_node);
|
||||
if (range_overlaps(entry, file_offset, len))
|
||||
if (btrfs_range_overlaps(entry, file_offset, len))
|
||||
goto out;
|
||||
}
|
||||
/* No ordered extent in the range */
|
||||
|
@ -403,6 +403,8 @@ struct acpi_cdat_dsmas {
|
||||
/* Flags for subtable above */
|
||||
|
||||
#define ACPI_CDAT_DSMAS_NON_VOLATILE (1 << 2)
|
||||
#define ACPI_CDAT_DSMAS_SHAREABLE (1 << 3)
|
||||
#define ACPI_CDAT_DSMAS_READ_ONLY (1 << 6)
|
||||
|
||||
/* Subtable 1: Device scoped Latency and Bandwidth Information Structure (DSLBIS) */
|
||||
|
||||
|
@ -13,12 +13,20 @@ static inline u64 range_len(const struct range *range)
|
||||
return range->end - range->start + 1;
|
||||
}
|
||||
|
||||
/* True if r1 completely contains r2 */
|
||||
static inline bool range_contains(const struct range *r1,
|
||||
const struct range *r2)
|
||||
{
|
||||
return r1->start <= r2->start && r1->end >= r2->end;
|
||||
}
|
||||
|
||||
/* True if any part of r1 overlaps r2 */
|
||||
static inline bool range_overlaps(const struct range *r1,
|
||||
const struct range *r2)
|
||||
{
|
||||
return r1->start <= r2->end && r1->end >= r2->start;
|
||||
}
|
||||
|
||||
int add_range(struct range *range, int az, int nr_range,
|
||||
u64 start, u64 end);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user