mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-08 14:13:53 +00:00
a72a30af55
Patch series "Improved Memory Tier Creation for CPUless NUMA Nodes", v11. When a memory device, such as CXL1.1 type3 memory, is emulated as normal memory (E820_TYPE_RAM), the memory device is indistinguishable from normal DRAM in terms of memory tiering with the current implementation. The current memory tiering assigns all detected normal memory nodes to the same DRAM tier. This results in normal memory devices with different attributions being unable to be assigned to the correct memory tier, leading to the inability to migrate pages between different types of memory. https://lore.kernel.org/linux-mm/PH0PR08MB7955E9F08CCB64F23963B5C3A860A@PH0PR08MB7955.namprd08.prod.outlook.com/T/ This patchset automatically resolves the issues. It delays the initialization of memory tiers for CPUless NUMA nodes until they obtain HMAT information and after all devices are initialized at boot time, eliminating the need for user intervention. If no HMAT is specified, it falls back to using `default_dram_type`. Example usecase: We have CXL memory on the host, and we create VMs with a new system memory device backed by host CXL memory. We inject CXL memory performance attributes through QEMU, and the guest now sees memory nodes with performance attributes in HMAT. With this change, we enable the guest kernel to construct the correct memory tiering for the memory nodes. This patch (of 2): Since different memory devices require finding, allocating, and putting memory types, these common steps are abstracted in this patch, enhancing the scalability and conciseness of the code. Link: https://lkml.kernel.org/r/20240405000707.2670063-1-horenchuang@bytedance.com Link: https://lkml.kernel.org/r/20240405000707.2670063-2-horenchuang@bytedance.com Signed-off-by: Ho-Ren (Jack) Chuang <horenchuang@bytedance.com> Reviewed-by: "Huang, Ying" <ying.huang@intel.com> Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawie.com> Cc: Alistair Popple <apopple@nvidia.com> Cc: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com> Cc: Dan Williams <dan.j.williams@intel.com> Cc: Dave Jiang <dave.jiang@intel.com> Cc: Gregory Price <gourry.memverge@gmail.com> Cc: Hao Xiang <hao.xiang@bytedance.com> Cc: Jonathan Cameron <Jonathan.Cameron@huawei.com> Cc: Michal Hocko <mhocko@suse.com> Cc: Ravi Jonnalagadda <ravis.opensrc@micron.com> Cc: SeongJae Park <sj@kernel.org> Cc: Tejun Heo <tj@kernel.org> Cc: Vishal Verma <vishal.l.verma@intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
154 lines
3.9 KiB
C
154 lines
3.9 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef _LINUX_MEMORY_TIERS_H
|
|
#define _LINUX_MEMORY_TIERS_H
|
|
|
|
#include <linux/types.h>
|
|
#include <linux/nodemask.h>
|
|
#include <linux/kref.h>
|
|
#include <linux/mmzone.h>
|
|
#include <linux/notifier.h>
|
|
/*
|
|
* Each tier cover a abstrace distance chunk size of 128
|
|
*/
|
|
#define MEMTIER_CHUNK_BITS 7
|
|
#define MEMTIER_CHUNK_SIZE (1 << MEMTIER_CHUNK_BITS)
|
|
/*
|
|
* Smaller abstract distance values imply faster (higher) memory tiers. Offset
|
|
* the DRAM adistance so that we can accommodate devices with a slightly lower
|
|
* adistance value (slightly faster) than default DRAM adistance to be part of
|
|
* the same memory tier.
|
|
*/
|
|
#define MEMTIER_ADISTANCE_DRAM ((4 * MEMTIER_CHUNK_SIZE) + (MEMTIER_CHUNK_SIZE >> 1))
|
|
|
|
struct memory_tier;
|
|
struct memory_dev_type {
|
|
/* list of memory types that are part of same tier as this type */
|
|
struct list_head tier_sibling;
|
|
/* list of memory types that are managed by one driver */
|
|
struct list_head list;
|
|
/* abstract distance for this specific memory type */
|
|
int adistance;
|
|
/* Nodes of same abstract distance */
|
|
nodemask_t nodes;
|
|
struct kref kref;
|
|
};
|
|
|
|
struct access_coordinate;
|
|
|
|
#ifdef CONFIG_NUMA
|
|
extern bool numa_demotion_enabled;
|
|
extern struct memory_dev_type *default_dram_type;
|
|
struct memory_dev_type *alloc_memory_type(int adistance);
|
|
void put_memory_type(struct memory_dev_type *memtype);
|
|
void init_node_memory_type(int node, struct memory_dev_type *default_type);
|
|
void clear_node_memory_type(int node, struct memory_dev_type *memtype);
|
|
int register_mt_adistance_algorithm(struct notifier_block *nb);
|
|
int unregister_mt_adistance_algorithm(struct notifier_block *nb);
|
|
int mt_calc_adistance(int node, int *adist);
|
|
int mt_set_default_dram_perf(int nid, struct access_coordinate *perf,
|
|
const char *source);
|
|
int mt_perf_to_adistance(struct access_coordinate *perf, int *adist);
|
|
struct memory_dev_type *mt_find_alloc_memory_type(int adist,
|
|
struct list_head *memory_types);
|
|
void mt_put_memory_types(struct list_head *memory_types);
|
|
#ifdef CONFIG_MIGRATION
|
|
int next_demotion_node(int node);
|
|
void node_get_allowed_targets(pg_data_t *pgdat, nodemask_t *targets);
|
|
bool node_is_toptier(int node);
|
|
#else
|
|
static inline int next_demotion_node(int node)
|
|
{
|
|
return NUMA_NO_NODE;
|
|
}
|
|
|
|
static inline void node_get_allowed_targets(pg_data_t *pgdat, nodemask_t *targets)
|
|
{
|
|
*targets = NODE_MASK_NONE;
|
|
}
|
|
|
|
static inline bool node_is_toptier(int node)
|
|
{
|
|
return true;
|
|
}
|
|
#endif
|
|
|
|
#else
|
|
|
|
#define numa_demotion_enabled false
|
|
#define default_dram_type NULL
|
|
/*
|
|
* CONFIG_NUMA implementation returns non NULL error.
|
|
*/
|
|
static inline struct memory_dev_type *alloc_memory_type(int adistance)
|
|
{
|
|
return NULL;
|
|
}
|
|
|
|
static inline void put_memory_type(struct memory_dev_type *memtype)
|
|
{
|
|
|
|
}
|
|
|
|
static inline void init_node_memory_type(int node, struct memory_dev_type *default_type)
|
|
{
|
|
|
|
}
|
|
|
|
static inline void clear_node_memory_type(int node, struct memory_dev_type *memtype)
|
|
{
|
|
|
|
}
|
|
|
|
static inline int next_demotion_node(int node)
|
|
{
|
|
return NUMA_NO_NODE;
|
|
}
|
|
|
|
static inline void node_get_allowed_targets(pg_data_t *pgdat, nodemask_t *targets)
|
|
{
|
|
*targets = NODE_MASK_NONE;
|
|
}
|
|
|
|
static inline bool node_is_toptier(int node)
|
|
{
|
|
return true;
|
|
}
|
|
|
|
static inline int register_mt_adistance_algorithm(struct notifier_block *nb)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline int unregister_mt_adistance_algorithm(struct notifier_block *nb)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline int mt_calc_adistance(int node, int *adist)
|
|
{
|
|
return NOTIFY_DONE;
|
|
}
|
|
|
|
static inline int mt_set_default_dram_perf(int nid, struct access_coordinate *perf,
|
|
const char *source)
|
|
{
|
|
return -EIO;
|
|
}
|
|
|
|
static inline int mt_perf_to_adistance(struct access_coordinate *perf, int *adist)
|
|
{
|
|
return -EIO;
|
|
}
|
|
|
|
static inline struct memory_dev_type *mt_find_alloc_memory_type(int adist,
|
|
struct list_head *memory_types)
|
|
{
|
|
return NULL;
|
|
}
|
|
|
|
static inline void mt_put_memory_types(struct list_head *memory_types)
|
|
{
|
|
}
|
|
#endif /* CONFIG_NUMA */
|
|
#endif /* _LINUX_MEMORY_TIERS_H */
|