mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-01 02:36:02 +00:00
823430c8e9
The current memory tier initialization process is distributed across two different functions, memory_tier_init() and memory_tier_late_init(). This design is hard to maintain. Thus, this patch is proposed to reduce the possible code paths by consolidating different initialization patches into one. The earlier discussion with Jonathan and Ying is listed here: https://lore.kernel.org/lkml/20240405150244.00004b49@Huawei.com/ If we want to put these two initializations together, they must be placed together in the later function. Because only at that time, the HMAT information will be ready, adist between nodes can be calculated, and memory tiering can be established based on the adist. So we position the initialization at memory_tier_init() to the memory_tier_late_init() call. Moreover, it's natural to keep memory_tier initialization in drivers at device_initcall() level. If we simply move the set_node_memory_tier() from memory_tier_init() to late_initcall(), it will result in HMAT not registering the mt_adistance_algorithm callback function, because set_node_memory_tier() is not performed during the memory tiering initialization phase, leading to a lack of correct default_dram information. Therefore, we introduced a nodemask to pass the information of the default DRAM nodes. The reason for not choosing to reuse default_dram_type->nodes is that it is not clean enough. So in the end, we use a __initdata variable, which is a variable that is released once initialization is complete, including both CPU and memory nodes for HMAT to iterate through. Link: https://lkml.kernel.org/r/20240704072646.437579-1-horen.chuang@linux.dev Signed-off-by: Ho-Ren (Jack) Chuang <horenchuang@bytedance.com> Suggested-by: Jonathan Cameron <Jonathan.Cameron@huawei.com> Reviewed-by: "Huang, Ying" <ying.huang@intel.com> Reviewed-by: Jonathan Cameron <Jonathan.Cameron@huawei.com> Cc: Alistair Popple <apopple@nvidia.com> Cc: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com> Cc: Dan Williams <dan.j.williams@intel.com> Cc: Dave Jiang <dave.jiang@intel.com> Cc: Gregory Price <gourry.memverge@gmail.com> Cc: Len Brown <lenb@kernel.org> Cc: Michal Hocko <mhocko@suse.com> Cc: Rafael J. Wysocki <rafael@kernel.org> Cc: Ravi Jonnalagadda <ravis.opensrc@micron.com> Cc: SeongJae Park <sj@kernel.org> Cc: Tejun Heo <tj@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
156 lines
3.9 KiB
C
156 lines
3.9 KiB
C
/* SPDX-License-Identifier: GPL-2.0 */
|
|
#ifndef _LINUX_MEMORY_TIERS_H
|
|
#define _LINUX_MEMORY_TIERS_H
|
|
|
|
#include <linux/types.h>
|
|
#include <linux/nodemask.h>
|
|
#include <linux/kref.h>
|
|
#include <linux/mmzone.h>
|
|
#include <linux/notifier.h>
|
|
/*
|
|
* Each tier cover a abstrace distance chunk size of 128
|
|
*/
|
|
#define MEMTIER_CHUNK_BITS 7
|
|
#define MEMTIER_CHUNK_SIZE (1 << MEMTIER_CHUNK_BITS)
|
|
/*
|
|
* Smaller abstract distance values imply faster (higher) memory tiers. Offset
|
|
* the DRAM adistance so that we can accommodate devices with a slightly lower
|
|
* adistance value (slightly faster) than default DRAM adistance to be part of
|
|
* the same memory tier.
|
|
*/
|
|
#define MEMTIER_ADISTANCE_DRAM ((4 * MEMTIER_CHUNK_SIZE) + (MEMTIER_CHUNK_SIZE >> 1))
|
|
|
|
struct memory_tier;
|
|
struct memory_dev_type {
|
|
/* list of memory types that are part of same tier as this type */
|
|
struct list_head tier_sibling;
|
|
/* list of memory types that are managed by one driver */
|
|
struct list_head list;
|
|
/* abstract distance for this specific memory type */
|
|
int adistance;
|
|
/* Nodes of same abstract distance */
|
|
nodemask_t nodes;
|
|
struct kref kref;
|
|
};
|
|
|
|
struct access_coordinate;
|
|
|
|
#ifdef CONFIG_NUMA
|
|
extern bool numa_demotion_enabled;
|
|
extern struct memory_dev_type *default_dram_type;
|
|
extern nodemask_t default_dram_nodes;
|
|
struct memory_dev_type *alloc_memory_type(int adistance);
|
|
void put_memory_type(struct memory_dev_type *memtype);
|
|
void init_node_memory_type(int node, struct memory_dev_type *default_type);
|
|
void clear_node_memory_type(int node, struct memory_dev_type *memtype);
|
|
int register_mt_adistance_algorithm(struct notifier_block *nb);
|
|
int unregister_mt_adistance_algorithm(struct notifier_block *nb);
|
|
int mt_calc_adistance(int node, int *adist);
|
|
int mt_set_default_dram_perf(int nid, struct access_coordinate *perf,
|
|
const char *source);
|
|
int mt_perf_to_adistance(struct access_coordinate *perf, int *adist);
|
|
struct memory_dev_type *mt_find_alloc_memory_type(int adist,
|
|
struct list_head *memory_types);
|
|
void mt_put_memory_types(struct list_head *memory_types);
|
|
#ifdef CONFIG_MIGRATION
|
|
int next_demotion_node(int node);
|
|
void node_get_allowed_targets(pg_data_t *pgdat, nodemask_t *targets);
|
|
bool node_is_toptier(int node);
|
|
#else
|
|
static inline int next_demotion_node(int node)
|
|
{
|
|
return NUMA_NO_NODE;
|
|
}
|
|
|
|
static inline void node_get_allowed_targets(pg_data_t *pgdat, nodemask_t *targets)
|
|
{
|
|
*targets = NODE_MASK_NONE;
|
|
}
|
|
|
|
static inline bool node_is_toptier(int node)
|
|
{
|
|
return true;
|
|
}
|
|
#endif
|
|
|
|
#else
|
|
|
|
#define numa_demotion_enabled false
|
|
#define default_dram_type NULL
|
|
#define default_dram_nodes NODE_MASK_NONE
|
|
/*
|
|
* CONFIG_NUMA implementation returns non NULL error.
|
|
*/
|
|
static inline struct memory_dev_type *alloc_memory_type(int adistance)
|
|
{
|
|
return NULL;
|
|
}
|
|
|
|
static inline void put_memory_type(struct memory_dev_type *memtype)
|
|
{
|
|
|
|
}
|
|
|
|
static inline void init_node_memory_type(int node, struct memory_dev_type *default_type)
|
|
{
|
|
|
|
}
|
|
|
|
static inline void clear_node_memory_type(int node, struct memory_dev_type *memtype)
|
|
{
|
|
|
|
}
|
|
|
|
static inline int next_demotion_node(int node)
|
|
{
|
|
return NUMA_NO_NODE;
|
|
}
|
|
|
|
static inline void node_get_allowed_targets(pg_data_t *pgdat, nodemask_t *targets)
|
|
{
|
|
*targets = NODE_MASK_NONE;
|
|
}
|
|
|
|
static inline bool node_is_toptier(int node)
|
|
{
|
|
return true;
|
|
}
|
|
|
|
static inline int register_mt_adistance_algorithm(struct notifier_block *nb)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline int unregister_mt_adistance_algorithm(struct notifier_block *nb)
|
|
{
|
|
return 0;
|
|
}
|
|
|
|
static inline int mt_calc_adistance(int node, int *adist)
|
|
{
|
|
return NOTIFY_DONE;
|
|
}
|
|
|
|
static inline int mt_set_default_dram_perf(int nid, struct access_coordinate *perf,
|
|
const char *source)
|
|
{
|
|
return -EIO;
|
|
}
|
|
|
|
static inline int mt_perf_to_adistance(struct access_coordinate *perf, int *adist)
|
|
{
|
|
return -EIO;
|
|
}
|
|
|
|
static inline struct memory_dev_type *mt_find_alloc_memory_type(int adist,
|
|
struct list_head *memory_types)
|
|
{
|
|
return NULL;
|
|
}
|
|
|
|
static inline void mt_put_memory_types(struct list_head *memory_types)
|
|
{
|
|
}
|
|
#endif /* CONFIG_NUMA */
|
|
#endif /* _LINUX_MEMORY_TIERS_H */
|