mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-14 17:53:39 +00:00
Merge branch 'linux-next' of git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git
This commit is contained in:
commit
6cdf8ecdc7
@ -717,7 +717,7 @@ static void battery_hook_unregister_unlocked(struct acpi_battery_hook *hook)
|
||||
}
|
||||
list_del_init(&hook->list);
|
||||
|
||||
pr_info("extension unregistered: %s\n", hook->name);
|
||||
pr_info("hook unregistered: %s\n", hook->name);
|
||||
}
|
||||
|
||||
void battery_hook_unregister(struct acpi_battery_hook *hook)
|
||||
@ -751,18 +751,18 @@ void battery_hook_register(struct acpi_battery_hook *hook)
|
||||
if (hook->add_battery(battery->bat, hook)) {
|
||||
/*
|
||||
* If a add-battery returns non-zero,
|
||||
* the registration of the extension has failed,
|
||||
* the registration of the hook has failed,
|
||||
* and we will not add it to the list of loaded
|
||||
* hooks.
|
||||
*/
|
||||
pr_err("extension failed to load: %s", hook->name);
|
||||
pr_err("hook failed to load: %s", hook->name);
|
||||
battery_hook_unregister_unlocked(hook);
|
||||
goto end;
|
||||
}
|
||||
|
||||
power_supply_changed(battery->bat);
|
||||
}
|
||||
pr_info("new extension: %s\n", hook->name);
|
||||
pr_info("new hook: %s\n", hook->name);
|
||||
end:
|
||||
mutex_unlock(&hook_mutex);
|
||||
}
|
||||
@ -805,10 +805,10 @@ static void battery_hook_add_battery(struct acpi_battery *battery)
|
||||
list_for_each_entry_safe(hook_node, tmp, &battery_hook_list, list) {
|
||||
if (hook_node->add_battery(battery->bat, hook_node)) {
|
||||
/*
|
||||
* The notification of the extensions has failed, to
|
||||
* prevent further errors we will unload the extension.
|
||||
* The notification of the hook has failed, to
|
||||
* prevent further errors we will unload the hook.
|
||||
*/
|
||||
pr_err("error in extension, unloading: %s",
|
||||
pr_err("error in hook, unloading: %s",
|
||||
hook_node->name);
|
||||
battery_hook_unregister_unlocked(hook_node);
|
||||
}
|
||||
|
@ -29,7 +29,7 @@ BGRT_SHOW(type, image_type);
|
||||
BGRT_SHOW(xoffset, image_offset_x);
|
||||
BGRT_SHOW(yoffset, image_offset_y);
|
||||
|
||||
static BIN_ATTR_SIMPLE_RO(image);
|
||||
static __ro_after_init BIN_ATTR_SIMPLE_RO(image);
|
||||
|
||||
static struct attribute *bgrt_attributes[] = {
|
||||
&bgrt_attr_version.attr,
|
||||
@ -40,14 +40,14 @@ static struct attribute *bgrt_attributes[] = {
|
||||
NULL,
|
||||
};
|
||||
|
||||
static struct bin_attribute *bgrt_bin_attributes[] = {
|
||||
static const struct bin_attribute *const bgrt_bin_attributes[] = {
|
||||
&bin_attr_image,
|
||||
NULL,
|
||||
};
|
||||
|
||||
static const struct attribute_group bgrt_attribute_group = {
|
||||
.attrs = bgrt_attributes,
|
||||
.bin_attrs = bgrt_bin_attributes,
|
||||
.bin_attrs_new = bgrt_bin_attributes,
|
||||
};
|
||||
|
||||
int __init acpi_parse_bgrt(struct acpi_table_header *table)
|
||||
|
@ -152,6 +152,7 @@ static const struct acpi_device_id pch_fivr_device_ids[] = {
|
||||
{"INTC1064", 0},
|
||||
{"INTC106B", 0},
|
||||
{"INTC10A3", 0},
|
||||
{"INTC10D7", 0},
|
||||
{"", 0},
|
||||
};
|
||||
MODULE_DEVICE_TABLE(acpi, pch_fivr_device_ids);
|
||||
|
@ -236,6 +236,8 @@ static const struct acpi_device_id int3407_device_ids[] = {
|
||||
{"INTC106D", 0},
|
||||
{"INTC10A4", 0},
|
||||
{"INTC10A5", 0},
|
||||
{"INTC10D8", 0},
|
||||
{"INTC10D9", 0},
|
||||
{"", 0},
|
||||
};
|
||||
MODULE_DEVICE_TABLE(acpi, int3407_device_ids);
|
||||
|
@ -55,6 +55,12 @@ static const struct acpi_device_id int340x_thermal_device_ids[] = {
|
||||
{"INTC10A3"},
|
||||
{"INTC10A4"},
|
||||
{"INTC10A5"},
|
||||
{"INTC10D4"},
|
||||
{"INTC10D5"},
|
||||
{"INTC10D6"},
|
||||
{"INTC10D7"},
|
||||
{"INTC10D8"},
|
||||
{"INTC10D9"},
|
||||
{""},
|
||||
};
|
||||
|
||||
|
@ -19,6 +19,7 @@
|
||||
{"INTC1063", }, /* Fan for Meteor Lake generation */ \
|
||||
{"INTC106A", }, /* Fan for Lunar Lake generation */ \
|
||||
{"INTC10A2", }, /* Fan for Raptor Lake generation */ \
|
||||
{"INTC10D6", }, /* Fan for Panther Lake generation */ \
|
||||
{"PNP0C0B", } /* Generic ACPI fan */
|
||||
|
||||
#define ACPI_FPS_NAME_LEN 20
|
||||
|
@ -371,19 +371,25 @@ static int acpi_fan_probe(struct platform_device *pdev)
|
||||
result = sysfs_create_link(&pdev->dev.kobj,
|
||||
&cdev->device.kobj,
|
||||
"thermal_cooling");
|
||||
if (result)
|
||||
if (result) {
|
||||
dev_err(&pdev->dev, "Failed to create sysfs link 'thermal_cooling'\n");
|
||||
goto err_unregister;
|
||||
}
|
||||
|
||||
result = sysfs_create_link(&cdev->device.kobj,
|
||||
&pdev->dev.kobj,
|
||||
"device");
|
||||
if (result) {
|
||||
dev_err(&pdev->dev, "Failed to create sysfs link 'device'\n");
|
||||
goto err_end;
|
||||
goto err_remove_link;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err_remove_link:
|
||||
sysfs_remove_link(&pdev->dev.kobj, "thermal_cooling");
|
||||
err_unregister:
|
||||
thermal_cooling_device_unregister(cdev);
|
||||
err_end:
|
||||
if (fan->acpi4)
|
||||
acpi_fan_delete_attributes(device);
|
||||
|
@ -624,8 +624,7 @@ static void init_crs_csi2_swnodes(struct crs_csi2 *csi2)
|
||||
if (!fwnode_property_present(adev_fwnode, "rotation")) {
|
||||
struct acpi_pld_info *pld;
|
||||
|
||||
status = acpi_get_physical_device_location(handle, &pld);
|
||||
if (ACPI_SUCCESS(status)) {
|
||||
if (acpi_get_physical_device_location(handle, &pld)) {
|
||||
swnodes->dev_props[NEXT_PROPERTY(prop_index, DEV_ROTATION)] =
|
||||
PROPERTY_ENTRY_U32("rotation",
|
||||
pld->rotation * 45U);
|
||||
|
@ -607,7 +607,27 @@ acpi_status acpi_os_remove_interrupt_handler(u32 gsi, acpi_osd_handler handler)
|
||||
|
||||
void acpi_os_sleep(u64 ms)
|
||||
{
|
||||
msleep(ms);
|
||||
u64 usec = ms * USEC_PER_MSEC, delta_us = 50;
|
||||
|
||||
/*
|
||||
* Use a hrtimer because the timer wheel timers are optimized for
|
||||
* cancelation before they expire and this timer is not going to be
|
||||
* canceled.
|
||||
*
|
||||
* Set the delta between the requested sleep time and the effective
|
||||
* deadline to at least 50 us in case there is an opportunity for timer
|
||||
* coalescing.
|
||||
*
|
||||
* Moreover, longer sleeps can be assumed to need somewhat less timer
|
||||
* precision, so sacrifice some of it for making the timer a more likely
|
||||
* candidate for coalescing by setting the delta to 1% of the sleep time
|
||||
* if it is above 5 ms (this value is chosen so that the delta is a
|
||||
* continuous function of the sleep time).
|
||||
*/
|
||||
if (ms > 5)
|
||||
delta_us = (USEC_PER_MSEC / 100) * ms;
|
||||
|
||||
usleep_range(usec, usec + delta_us);
|
||||
}
|
||||
|
||||
void acpi_os_stall(u32 us)
|
||||
|
@ -1492,7 +1492,7 @@ acpi_graph_get_remote_endpoint(const struct fwnode_handle *__fwnode)
|
||||
static bool acpi_fwnode_device_is_available(const struct fwnode_handle *fwnode)
|
||||
{
|
||||
if (!is_acpi_device_node(fwnode))
|
||||
return false;
|
||||
return true;
|
||||
|
||||
return acpi_device_is_present(to_acpi_device_node(fwnode));
|
||||
}
|
||||
|
@ -723,10 +723,8 @@ int acpi_tie_acpi_dev(struct acpi_device *adev)
|
||||
static void acpi_store_pld_crc(struct acpi_device *adev)
|
||||
{
|
||||
struct acpi_pld_info *pld;
|
||||
acpi_status status;
|
||||
|
||||
status = acpi_get_physical_device_location(adev->handle, &pld);
|
||||
if (ACPI_FAILURE(status))
|
||||
if (!acpi_get_physical_device_location(adev->handle, &pld))
|
||||
return;
|
||||
|
||||
adev->pld_crc = crc32(~0, pld, sizeof(*pld));
|
||||
|
@ -319,7 +319,7 @@ struct acpi_data_attr {
|
||||
};
|
||||
|
||||
static ssize_t acpi_table_show(struct file *filp, struct kobject *kobj,
|
||||
struct bin_attribute *bin_attr, char *buf,
|
||||
const struct bin_attribute *bin_attr, char *buf,
|
||||
loff_t offset, size_t count)
|
||||
{
|
||||
struct acpi_table_attr *table_attr =
|
||||
@ -372,7 +372,7 @@ static int acpi_table_attr_init(struct kobject *tables_obj,
|
||||
}
|
||||
|
||||
table_attr->attr.size = table_header->length;
|
||||
table_attr->attr.read = acpi_table_show;
|
||||
table_attr->attr.read_new = acpi_table_show;
|
||||
table_attr->attr.attr.name = table_attr->filename;
|
||||
table_attr->attr.attr.mode = 0400;
|
||||
|
||||
@ -412,7 +412,7 @@ acpi_status acpi_sysfs_table_handler(u32 event, void *table, void *context)
|
||||
}
|
||||
|
||||
static ssize_t acpi_data_show(struct file *filp, struct kobject *kobj,
|
||||
struct bin_attribute *bin_attr, char *buf,
|
||||
const struct bin_attribute *bin_attr, char *buf,
|
||||
loff_t offset, size_t count)
|
||||
{
|
||||
struct acpi_data_attr *data_attr;
|
||||
@ -495,7 +495,7 @@ static int acpi_table_data_init(struct acpi_table_header *th)
|
||||
if (!data_attr)
|
||||
return -ENOMEM;
|
||||
sysfs_attr_init(&data_attr->attr.attr);
|
||||
data_attr->attr.read = acpi_data_show;
|
||||
data_attr->attr.read_new = acpi_data_show;
|
||||
data_attr->attr.attr.mode = 0400;
|
||||
return acpi_data_objs[i].fn(th, data_attr);
|
||||
}
|
||||
|
@ -56,7 +56,7 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header)
|
||||
(struct acpi_madt_local_apic *)header;
|
||||
pr_debug("LAPIC (acpi_id[0x%02x] lapic_id[0x%02x] %s)\n",
|
||||
p->processor_id, p->id,
|
||||
(p->lapic_flags & ACPI_MADT_ENABLED) ? "enabled" : "disabled");
|
||||
str_enabled_disabled(p->lapic_flags & ACPI_MADT_ENABLED));
|
||||
}
|
||||
break;
|
||||
|
||||
@ -66,7 +66,7 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header)
|
||||
(struct acpi_madt_local_x2apic *)header;
|
||||
pr_debug("X2APIC (apic_id[0x%02x] uid[0x%02x] %s)\n",
|
||||
p->local_apic_id, p->uid,
|
||||
(p->lapic_flags & ACPI_MADT_ENABLED) ? "enabled" : "disabled");
|
||||
str_enabled_disabled(p->lapic_flags & ACPI_MADT_ENABLED));
|
||||
}
|
||||
break;
|
||||
|
||||
@ -160,7 +160,7 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header)
|
||||
(struct acpi_madt_local_sapic *)header;
|
||||
pr_debug("LSAPIC (acpi_id[0x%02x] lsapic_id[0x%02x] lsapic_eid[0x%02x] %s)\n",
|
||||
p->processor_id, p->id, p->eid,
|
||||
(p->lapic_flags & ACPI_MADT_ENABLED) ? "enabled" : "disabled");
|
||||
str_enabled_disabled(p->lapic_flags & ACPI_MADT_ENABLED));
|
||||
}
|
||||
break;
|
||||
|
||||
@ -183,7 +183,7 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header)
|
||||
pr_debug("GICC (acpi_id[0x%04x] address[%llx] MPIDR[0x%llx] %s)\n",
|
||||
p->uid, p->base_address,
|
||||
p->arm_mpidr,
|
||||
(p->flags & ACPI_MADT_ENABLED) ? "enabled" : "disabled");
|
||||
str_enabled_disabled(p->flags & ACPI_MADT_ENABLED));
|
||||
|
||||
}
|
||||
break;
|
||||
@ -218,7 +218,7 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header)
|
||||
|
||||
pr_debug("CORE PIC (processor_id[0x%02x] core_id[0x%02x] %s)\n",
|
||||
p->processor_id, p->core_id,
|
||||
(p->flags & ACPI_MADT_ENABLED) ? "enabled" : "disabled");
|
||||
str_enabled_disabled(p->flags & ACPI_MADT_ENABLED));
|
||||
}
|
||||
break;
|
||||
|
||||
@ -228,7 +228,7 @@ void acpi_table_print_madt_entry(struct acpi_subtable_header *header)
|
||||
|
||||
pr_debug("RISC-V INTC (acpi_uid[0x%04x] hart_id[0x%llx] %s)\n",
|
||||
p->uid, p->hart_id,
|
||||
(p->flags & ACPI_MADT_ENABLED) ? "enabled" : "disabled");
|
||||
str_enabled_disabled(p->flags & ACPI_MADT_ENABLED));
|
||||
}
|
||||
break;
|
||||
|
||||
|
@ -494,7 +494,7 @@ bool acpi_device_dep(acpi_handle target, acpi_handle match)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(acpi_device_dep);
|
||||
|
||||
acpi_status
|
||||
bool
|
||||
acpi_get_physical_device_location(acpi_handle handle, struct acpi_pld_info **pld)
|
||||
{
|
||||
acpi_status status;
|
||||
@ -502,9 +502,8 @@ acpi_get_physical_device_location(acpi_handle handle, struct acpi_pld_info **pld
|
||||
union acpi_object *output;
|
||||
|
||||
status = acpi_evaluate_object(handle, "_PLD", NULL, &buffer);
|
||||
|
||||
if (ACPI_FAILURE(status))
|
||||
return status;
|
||||
return false;
|
||||
|
||||
output = buffer.pointer;
|
||||
|
||||
@ -523,7 +522,7 @@ acpi_get_physical_device_location(acpi_handle handle, struct acpi_pld_info **pld
|
||||
|
||||
out:
|
||||
kfree(buffer.pointer);
|
||||
return status;
|
||||
return ACPI_SUCCESS(status);
|
||||
}
|
||||
EXPORT_SYMBOL(acpi_get_physical_device_location);
|
||||
|
||||
|
@ -13,13 +13,11 @@
|
||||
bool dev_add_physical_location(struct device *dev)
|
||||
{
|
||||
struct acpi_pld_info *pld;
|
||||
acpi_status status;
|
||||
|
||||
if (!has_acpi_companion(dev))
|
||||
return false;
|
||||
|
||||
status = acpi_get_physical_device_location(ACPI_HANDLE(dev), &pld);
|
||||
if (ACPI_FAILURE(status))
|
||||
if (!acpi_get_physical_device_location(ACPI_HANDLE(dev), &pld))
|
||||
return false;
|
||||
|
||||
dev->physical_location =
|
||||
|
@ -914,7 +914,7 @@ static void device_resume(struct device *dev, pm_message_t state, bool async)
|
||||
goto Complete;
|
||||
|
||||
if (dev->power.direct_complete) {
|
||||
/* Match the pm_runtime_disable() in __device_suspend(). */
|
||||
/* Match the pm_runtime_disable() in device_suspend(). */
|
||||
pm_runtime_enable(dev);
|
||||
goto Complete;
|
||||
}
|
||||
|
@ -6,7 +6,6 @@
|
||||
#include <linux/export.h>
|
||||
#include <linux/pm_qos.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <linux/pm_wakeup.h>
|
||||
#include <linux/atomic.h>
|
||||
#include <linux/jiffies.h>
|
||||
#include "power.h"
|
||||
|
@ -32,7 +32,6 @@ TRACE_EVENT(amd_pstate_perf,
|
||||
u64 aperf,
|
||||
u64 tsc,
|
||||
unsigned int cpu_id,
|
||||
bool changed,
|
||||
bool fast_switch
|
||||
),
|
||||
|
||||
@ -44,7 +43,6 @@ TRACE_EVENT(amd_pstate_perf,
|
||||
aperf,
|
||||
tsc,
|
||||
cpu_id,
|
||||
changed,
|
||||
fast_switch
|
||||
),
|
||||
|
||||
@ -57,7 +55,6 @@ TRACE_EVENT(amd_pstate_perf,
|
||||
__field(unsigned long long, aperf)
|
||||
__field(unsigned long long, tsc)
|
||||
__field(unsigned int, cpu_id)
|
||||
__field(bool, changed)
|
||||
__field(bool, fast_switch)
|
||||
),
|
||||
|
||||
@ -70,11 +67,10 @@ TRACE_EVENT(amd_pstate_perf,
|
||||
__entry->aperf = aperf;
|
||||
__entry->tsc = tsc;
|
||||
__entry->cpu_id = cpu_id;
|
||||
__entry->changed = changed;
|
||||
__entry->fast_switch = fast_switch;
|
||||
),
|
||||
|
||||
TP_printk("amd_min_perf=%lu amd_des_perf=%lu amd_max_perf=%lu freq=%llu mperf=%llu aperf=%llu tsc=%llu cpu_id=%u changed=%s fast_switch=%s",
|
||||
TP_printk("amd_min_perf=%lu amd_des_perf=%lu amd_max_perf=%lu freq=%llu mperf=%llu aperf=%llu tsc=%llu cpu_id=%u fast_switch=%s",
|
||||
(unsigned long)__entry->min_perf,
|
||||
(unsigned long)__entry->target_perf,
|
||||
(unsigned long)__entry->capacity,
|
||||
@ -83,11 +79,55 @@ TRACE_EVENT(amd_pstate_perf,
|
||||
(unsigned long long)__entry->aperf,
|
||||
(unsigned long long)__entry->tsc,
|
||||
(unsigned int)__entry->cpu_id,
|
||||
(__entry->changed) ? "true" : "false",
|
||||
(__entry->fast_switch) ? "true" : "false"
|
||||
)
|
||||
);
|
||||
|
||||
TRACE_EVENT(amd_pstate_epp_perf,
|
||||
|
||||
TP_PROTO(unsigned int cpu_id,
|
||||
unsigned int highest_perf,
|
||||
unsigned int epp,
|
||||
unsigned int min_perf,
|
||||
unsigned int max_perf,
|
||||
bool boost
|
||||
),
|
||||
|
||||
TP_ARGS(cpu_id,
|
||||
highest_perf,
|
||||
epp,
|
||||
min_perf,
|
||||
max_perf,
|
||||
boost),
|
||||
|
||||
TP_STRUCT__entry(
|
||||
__field(unsigned int, cpu_id)
|
||||
__field(unsigned int, highest_perf)
|
||||
__field(unsigned int, epp)
|
||||
__field(unsigned int, min_perf)
|
||||
__field(unsigned int, max_perf)
|
||||
__field(bool, boost)
|
||||
),
|
||||
|
||||
TP_fast_assign(
|
||||
__entry->cpu_id = cpu_id;
|
||||
__entry->highest_perf = highest_perf;
|
||||
__entry->epp = epp;
|
||||
__entry->min_perf = min_perf;
|
||||
__entry->max_perf = max_perf;
|
||||
__entry->boost = boost;
|
||||
),
|
||||
|
||||
TP_printk("cpu%u: [%u<->%u]/%u, epp=%u, boost=%u",
|
||||
(unsigned int)__entry->cpu_id,
|
||||
(unsigned int)__entry->min_perf,
|
||||
(unsigned int)__entry->max_perf,
|
||||
(unsigned int)__entry->highest_perf,
|
||||
(unsigned int)__entry->epp,
|
||||
(bool)__entry->boost
|
||||
)
|
||||
);
|
||||
|
||||
#endif /* _AMD_PSTATE_TRACE_H */
|
||||
|
||||
/* This part must be outside protection */
|
||||
|
@ -207,7 +207,6 @@ static void amd_pstate_ut_check_freq(u32 index)
|
||||
int cpu = 0;
|
||||
struct cpufreq_policy *policy = NULL;
|
||||
struct amd_cpudata *cpudata = NULL;
|
||||
u32 nominal_freq_khz;
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
policy = cpufreq_cpu_get(cpu);
|
||||
@ -215,14 +214,13 @@ static void amd_pstate_ut_check_freq(u32 index)
|
||||
break;
|
||||
cpudata = policy->driver_data;
|
||||
|
||||
nominal_freq_khz = cpudata->nominal_freq*1000;
|
||||
if (!((cpudata->max_freq >= nominal_freq_khz) &&
|
||||
(nominal_freq_khz > cpudata->lowest_nonlinear_freq) &&
|
||||
if (!((cpudata->max_freq >= cpudata->nominal_freq) &&
|
||||
(cpudata->nominal_freq > cpudata->lowest_nonlinear_freq) &&
|
||||
(cpudata->lowest_nonlinear_freq > cpudata->min_freq) &&
|
||||
(cpudata->min_freq > 0))) {
|
||||
amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
|
||||
pr_err("%s cpu%d max=%d >= nominal=%d > lowest_nonlinear=%d > min=%d > 0, the formula is incorrect!\n",
|
||||
__func__, cpu, cpudata->max_freq, nominal_freq_khz,
|
||||
__func__, cpu, cpudata->max_freq, cpudata->nominal_freq,
|
||||
cpudata->lowest_nonlinear_freq, cpudata->min_freq);
|
||||
goto skip_test;
|
||||
}
|
||||
@ -236,13 +234,13 @@ static void amd_pstate_ut_check_freq(u32 index)
|
||||
|
||||
if (cpudata->boost_supported) {
|
||||
if ((policy->max == cpudata->max_freq) ||
|
||||
(policy->max == nominal_freq_khz))
|
||||
(policy->max == cpudata->nominal_freq))
|
||||
amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_PASS;
|
||||
else {
|
||||
amd_pstate_ut_cases[index].result = AMD_PSTATE_UT_RESULT_FAIL;
|
||||
pr_err("%s cpu%d policy_max=%d should be equal cpu_max=%d or cpu_nominal=%d !\n",
|
||||
__func__, cpu, policy->max, cpudata->max_freq,
|
||||
nominal_freq_khz);
|
||||
cpudata->nominal_freq);
|
||||
goto skip_test;
|
||||
}
|
||||
} else {
|
||||
|
@ -22,6 +22,7 @@
|
||||
|
||||
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
||||
|
||||
#include <linux/bitfield.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/init.h>
|
||||
@ -88,6 +89,11 @@ static bool cppc_enabled;
|
||||
static bool amd_pstate_prefcore = true;
|
||||
static struct quirk_entry *quirks;
|
||||
|
||||
#define AMD_CPPC_MAX_PERF_MASK GENMASK(7, 0)
|
||||
#define AMD_CPPC_MIN_PERF_MASK GENMASK(15, 8)
|
||||
#define AMD_CPPC_DES_PERF_MASK GENMASK(23, 16)
|
||||
#define AMD_CPPC_EPP_PERF_MASK GENMASK(31, 24)
|
||||
|
||||
/*
|
||||
* AMD Energy Preference Performance (EPP)
|
||||
* The EPP is used in the CCLK DPM controller to drive
|
||||
@ -180,120 +186,145 @@ static inline int get_mode_idx_from_str(const char *str, size_t size)
|
||||
static DEFINE_MUTEX(amd_pstate_limits_lock);
|
||||
static DEFINE_MUTEX(amd_pstate_driver_lock);
|
||||
|
||||
static s16 amd_pstate_get_epp(struct amd_cpudata *cpudata, u64 cppc_req_cached)
|
||||
static s16 msr_get_epp(struct amd_cpudata *cpudata)
|
||||
{
|
||||
u64 value;
|
||||
int ret;
|
||||
|
||||
ret = rdmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, &value);
|
||||
if (ret < 0) {
|
||||
pr_debug("Could not retrieve energy perf value (%d)\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return FIELD_GET(AMD_CPPC_EPP_PERF_MASK, value);
|
||||
}
|
||||
|
||||
DEFINE_STATIC_CALL(amd_pstate_get_epp, msr_get_epp);
|
||||
|
||||
static inline s16 amd_pstate_get_epp(struct amd_cpudata *cpudata)
|
||||
{
|
||||
return static_call(amd_pstate_get_epp)(cpudata);
|
||||
}
|
||||
|
||||
static s16 shmem_get_epp(struct amd_cpudata *cpudata)
|
||||
{
|
||||
u64 epp;
|
||||
int ret;
|
||||
|
||||
if (cpu_feature_enabled(X86_FEATURE_CPPC)) {
|
||||
if (!cppc_req_cached) {
|
||||
epp = rdmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ,
|
||||
&cppc_req_cached);
|
||||
if (epp)
|
||||
return epp;
|
||||
}
|
||||
epp = (cppc_req_cached >> 24) & 0xFF;
|
||||
} else {
|
||||
ret = cppc_get_epp_perf(cpudata->cpu, &epp);
|
||||
if (ret < 0) {
|
||||
pr_debug("Could not retrieve energy perf value (%d)\n", ret);
|
||||
return -EIO;
|
||||
}
|
||||
ret = cppc_get_epp_perf(cpudata->cpu, &epp);
|
||||
if (ret < 0) {
|
||||
pr_debug("Could not retrieve energy perf value (%d)\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return (s16)(epp & 0xff);
|
||||
}
|
||||
|
||||
static int amd_pstate_get_energy_pref_index(struct amd_cpudata *cpudata)
|
||||
static int msr_update_perf(struct amd_cpudata *cpudata, u32 min_perf,
|
||||
u32 des_perf, u32 max_perf, u32 epp, bool fast_switch)
|
||||
{
|
||||
s16 epp;
|
||||
int index = -EINVAL;
|
||||
u64 value, prev;
|
||||
|
||||
epp = amd_pstate_get_epp(cpudata, 0);
|
||||
if (epp < 0)
|
||||
return epp;
|
||||
value = prev = READ_ONCE(cpudata->cppc_req_cached);
|
||||
|
||||
switch (epp) {
|
||||
case AMD_CPPC_EPP_PERFORMANCE:
|
||||
index = EPP_INDEX_PERFORMANCE;
|
||||
break;
|
||||
case AMD_CPPC_EPP_BALANCE_PERFORMANCE:
|
||||
index = EPP_INDEX_BALANCE_PERFORMANCE;
|
||||
break;
|
||||
case AMD_CPPC_EPP_BALANCE_POWERSAVE:
|
||||
index = EPP_INDEX_BALANCE_POWERSAVE;
|
||||
break;
|
||||
case AMD_CPPC_EPP_POWERSAVE:
|
||||
index = EPP_INDEX_POWERSAVE;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
value &= ~(AMD_CPPC_MAX_PERF_MASK | AMD_CPPC_MIN_PERF_MASK |
|
||||
AMD_CPPC_DES_PERF_MASK | AMD_CPPC_EPP_PERF_MASK);
|
||||
value |= FIELD_PREP(AMD_CPPC_MAX_PERF_MASK, max_perf);
|
||||
value |= FIELD_PREP(AMD_CPPC_DES_PERF_MASK, des_perf);
|
||||
value |= FIELD_PREP(AMD_CPPC_MIN_PERF_MASK, min_perf);
|
||||
value |= FIELD_PREP(AMD_CPPC_EPP_PERF_MASK, epp);
|
||||
|
||||
if (value == prev)
|
||||
return 0;
|
||||
|
||||
if (fast_switch) {
|
||||
wrmsrl(MSR_AMD_CPPC_REQ, value);
|
||||
return 0;
|
||||
} else {
|
||||
int ret = wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value);
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
return index;
|
||||
}
|
||||
WRITE_ONCE(cpudata->cppc_req_cached, value);
|
||||
WRITE_ONCE(cpudata->epp_cached, epp);
|
||||
|
||||
static void msr_update_perf(struct amd_cpudata *cpudata, u32 min_perf,
|
||||
u32 des_perf, u32 max_perf, bool fast_switch)
|
||||
{
|
||||
if (fast_switch)
|
||||
wrmsrl(MSR_AMD_CPPC_REQ, READ_ONCE(cpudata->cppc_req_cached));
|
||||
else
|
||||
wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ,
|
||||
READ_ONCE(cpudata->cppc_req_cached));
|
||||
return 0;
|
||||
}
|
||||
|
||||
DEFINE_STATIC_CALL(amd_pstate_update_perf, msr_update_perf);
|
||||
|
||||
static inline void amd_pstate_update_perf(struct amd_cpudata *cpudata,
|
||||
static inline int amd_pstate_update_perf(struct amd_cpudata *cpudata,
|
||||
u32 min_perf, u32 des_perf,
|
||||
u32 max_perf, bool fast_switch)
|
||||
u32 max_perf, u32 epp,
|
||||
bool fast_switch)
|
||||
{
|
||||
static_call(amd_pstate_update_perf)(cpudata, min_perf, des_perf,
|
||||
max_perf, fast_switch);
|
||||
return static_call(amd_pstate_update_perf)(cpudata, min_perf, des_perf,
|
||||
max_perf, epp, fast_switch);
|
||||
}
|
||||
|
||||
static int amd_pstate_set_epp(struct amd_cpudata *cpudata, u32 epp)
|
||||
static int msr_set_epp(struct amd_cpudata *cpudata, u32 epp)
|
||||
{
|
||||
u64 value, prev;
|
||||
int ret;
|
||||
struct cppc_perf_ctrls perf_ctrls;
|
||||
|
||||
if (cpu_feature_enabled(X86_FEATURE_CPPC)) {
|
||||
u64 value = READ_ONCE(cpudata->cppc_req_cached);
|
||||
value = prev = READ_ONCE(cpudata->cppc_req_cached);
|
||||
value &= ~AMD_CPPC_EPP_PERF_MASK;
|
||||
value |= FIELD_PREP(AMD_CPPC_EPP_PERF_MASK, epp);
|
||||
|
||||
value &= ~GENMASK_ULL(31, 24);
|
||||
value |= (u64)epp << 24;
|
||||
WRITE_ONCE(cpudata->cppc_req_cached, value);
|
||||
if (value == prev)
|
||||
return 0;
|
||||
|
||||
ret = wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value);
|
||||
if (!ret)
|
||||
cpudata->epp_cached = epp;
|
||||
} else {
|
||||
amd_pstate_update_perf(cpudata, cpudata->min_limit_perf, 0U,
|
||||
cpudata->max_limit_perf, false);
|
||||
|
||||
perf_ctrls.energy_perf = epp;
|
||||
ret = cppc_set_epp_perf(cpudata->cpu, &perf_ctrls, 1);
|
||||
if (ret) {
|
||||
pr_debug("failed to set energy perf value (%d)\n", ret);
|
||||
return ret;
|
||||
}
|
||||
cpudata->epp_cached = epp;
|
||||
ret = wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value);
|
||||
if (ret) {
|
||||
pr_err("failed to set energy perf value (%d)\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* update both so that msr_update_perf() can effectively check */
|
||||
WRITE_ONCE(cpudata->epp_cached, epp);
|
||||
WRITE_ONCE(cpudata->cppc_req_cached, value);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int amd_pstate_set_energy_pref_index(struct amd_cpudata *cpudata,
|
||||
int pref_index)
|
||||
DEFINE_STATIC_CALL(amd_pstate_set_epp, msr_set_epp);
|
||||
|
||||
static inline int amd_pstate_set_epp(struct amd_cpudata *cpudata, u32 epp)
|
||||
{
|
||||
return static_call(amd_pstate_set_epp)(cpudata, epp);
|
||||
}
|
||||
|
||||
static int shmem_set_epp(struct amd_cpudata *cpudata, u32 epp)
|
||||
{
|
||||
int epp = -EINVAL;
|
||||
int ret;
|
||||
struct cppc_perf_ctrls perf_ctrls;
|
||||
|
||||
if (epp == cpudata->epp_cached)
|
||||
return 0;
|
||||
|
||||
perf_ctrls.energy_perf = epp;
|
||||
ret = cppc_set_epp_perf(cpudata->cpu, &perf_ctrls, 1);
|
||||
if (ret) {
|
||||
pr_debug("failed to set energy perf value (%d)\n", ret);
|
||||
return ret;
|
||||
}
|
||||
WRITE_ONCE(cpudata->epp_cached, epp);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int amd_pstate_set_energy_pref_index(struct cpufreq_policy *policy,
|
||||
int pref_index)
|
||||
{
|
||||
struct amd_cpudata *cpudata = policy->driver_data;
|
||||
int epp;
|
||||
|
||||
if (!pref_index)
|
||||
epp = cpudata->epp_default;
|
||||
|
||||
if (epp == -EINVAL)
|
||||
else
|
||||
epp = epp_values[pref_index];
|
||||
|
||||
if (epp > 0 && cpudata->policy == CPUFREQ_POLICY_PERFORMANCE) {
|
||||
@ -301,9 +332,15 @@ static int amd_pstate_set_energy_pref_index(struct amd_cpudata *cpudata,
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
ret = amd_pstate_set_epp(cpudata, epp);
|
||||
if (trace_amd_pstate_epp_perf_enabled()) {
|
||||
trace_amd_pstate_epp_perf(cpudata->cpu, cpudata->highest_perf,
|
||||
epp,
|
||||
FIELD_GET(AMD_CPPC_MIN_PERF_MASK, cpudata->cppc_req_cached),
|
||||
FIELD_GET(AMD_CPPC_MAX_PERF_MASK, cpudata->cppc_req_cached),
|
||||
policy->boost_enabled);
|
||||
}
|
||||
|
||||
return ret;
|
||||
return amd_pstate_set_epp(cpudata, epp);
|
||||
}
|
||||
|
||||
static inline int msr_cppc_enable(bool enable)
|
||||
@ -442,17 +479,23 @@ static inline int amd_pstate_init_perf(struct amd_cpudata *cpudata)
|
||||
return static_call(amd_pstate_init_perf)(cpudata);
|
||||
}
|
||||
|
||||
static void shmem_update_perf(struct amd_cpudata *cpudata,
|
||||
u32 min_perf, u32 des_perf,
|
||||
u32 max_perf, bool fast_switch)
|
||||
static int shmem_update_perf(struct amd_cpudata *cpudata, u32 min_perf,
|
||||
u32 des_perf, u32 max_perf, u32 epp, bool fast_switch)
|
||||
{
|
||||
struct cppc_perf_ctrls perf_ctrls;
|
||||
|
||||
if (cppc_state == AMD_PSTATE_ACTIVE) {
|
||||
int ret = shmem_set_epp(cpudata, epp);
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
perf_ctrls.max_perf = max_perf;
|
||||
perf_ctrls.min_perf = min_perf;
|
||||
perf_ctrls.desired_perf = des_perf;
|
||||
|
||||
cppc_set_perf(cpudata->cpu, &perf_ctrls);
|
||||
return cppc_set_perf(cpudata->cpu, &perf_ctrls);
|
||||
}
|
||||
|
||||
static inline bool amd_pstate_sample(struct amd_cpudata *cpudata)
|
||||
@ -493,14 +536,8 @@ static void amd_pstate_update(struct amd_cpudata *cpudata, u32 min_perf,
|
||||
{
|
||||
unsigned long max_freq;
|
||||
struct cpufreq_policy *policy = cpufreq_cpu_get(cpudata->cpu);
|
||||
u64 prev = READ_ONCE(cpudata->cppc_req_cached);
|
||||
u32 nominal_perf = READ_ONCE(cpudata->nominal_perf);
|
||||
u64 value = prev;
|
||||
|
||||
min_perf = clamp_t(unsigned long, min_perf, cpudata->min_limit_perf,
|
||||
cpudata->max_limit_perf);
|
||||
max_perf = clamp_t(unsigned long, max_perf, cpudata->min_limit_perf,
|
||||
cpudata->max_limit_perf);
|
||||
des_perf = clamp_t(unsigned long, des_perf, min_perf, max_perf);
|
||||
|
||||
max_freq = READ_ONCE(cpudata->max_limit_freq);
|
||||
@ -511,34 +548,18 @@ static void amd_pstate_update(struct amd_cpudata *cpudata, u32 min_perf,
|
||||
des_perf = 0;
|
||||
}
|
||||
|
||||
value &= ~AMD_CPPC_MIN_PERF(~0L);
|
||||
value |= AMD_CPPC_MIN_PERF(min_perf);
|
||||
|
||||
value &= ~AMD_CPPC_DES_PERF(~0L);
|
||||
value |= AMD_CPPC_DES_PERF(des_perf);
|
||||
|
||||
/* limit the max perf when core performance boost feature is disabled */
|
||||
if (!cpudata->boost_supported)
|
||||
max_perf = min_t(unsigned long, nominal_perf, max_perf);
|
||||
|
||||
value &= ~AMD_CPPC_MAX_PERF(~0L);
|
||||
value |= AMD_CPPC_MAX_PERF(max_perf);
|
||||
|
||||
if (trace_amd_pstate_perf_enabled() && amd_pstate_sample(cpudata)) {
|
||||
trace_amd_pstate_perf(min_perf, des_perf, max_perf, cpudata->freq,
|
||||
cpudata->cur.mperf, cpudata->cur.aperf, cpudata->cur.tsc,
|
||||
cpudata->cpu, (value != prev), fast_switch);
|
||||
cpudata->cpu, fast_switch);
|
||||
}
|
||||
|
||||
if (value == prev)
|
||||
goto cpufreq_policy_put;
|
||||
amd_pstate_update_perf(cpudata, min_perf, des_perf, max_perf, 0, fast_switch);
|
||||
|
||||
WRITE_ONCE(cpudata->cppc_req_cached, value);
|
||||
|
||||
amd_pstate_update_perf(cpudata, min_perf, des_perf,
|
||||
max_perf, fast_switch);
|
||||
|
||||
cpufreq_policy_put:
|
||||
cpufreq_cpu_put(policy);
|
||||
}
|
||||
|
||||
@ -570,7 +591,7 @@ static int amd_pstate_verify(struct cpufreq_policy_data *policy_data)
|
||||
|
||||
static int amd_pstate_update_min_max_limit(struct cpufreq_policy *policy)
|
||||
{
|
||||
u32 max_limit_perf, min_limit_perf, lowest_perf, max_perf, max_freq;
|
||||
u32 max_limit_perf, min_limit_perf, max_perf, max_freq;
|
||||
struct amd_cpudata *cpudata = policy->driver_data;
|
||||
|
||||
max_perf = READ_ONCE(cpudata->highest_perf);
|
||||
@ -578,12 +599,8 @@ static int amd_pstate_update_min_max_limit(struct cpufreq_policy *policy)
|
||||
max_limit_perf = div_u64(policy->max * max_perf, max_freq);
|
||||
min_limit_perf = div_u64(policy->min * max_perf, max_freq);
|
||||
|
||||
lowest_perf = READ_ONCE(cpudata->lowest_perf);
|
||||
if (min_limit_perf < lowest_perf)
|
||||
min_limit_perf = lowest_perf;
|
||||
|
||||
if (max_limit_perf < min_limit_perf)
|
||||
max_limit_perf = min_limit_perf;
|
||||
if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE)
|
||||
min_limit_perf = min(cpudata->nominal_perf, max_limit_perf);
|
||||
|
||||
WRITE_ONCE(cpudata->max_limit_perf, max_limit_perf);
|
||||
WRITE_ONCE(cpudata->min_limit_perf, min_limit_perf);
|
||||
@ -704,8 +721,8 @@ static int amd_pstate_cpu_boost_update(struct cpufreq_policy *policy, bool on)
|
||||
|
||||
if (on)
|
||||
policy->cpuinfo.max_freq = max_freq;
|
||||
else if (policy->cpuinfo.max_freq > nominal_freq * 1000)
|
||||
policy->cpuinfo.max_freq = nominal_freq * 1000;
|
||||
else if (policy->cpuinfo.max_freq > nominal_freq)
|
||||
policy->cpuinfo.max_freq = nominal_freq;
|
||||
|
||||
policy->max = policy->cpuinfo.max_freq;
|
||||
|
||||
@ -727,12 +744,11 @@ static int amd_pstate_set_boost(struct cpufreq_policy *policy, int state)
|
||||
pr_err("Boost mode is not supported by this processor or SBIOS\n");
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
mutex_lock(&amd_pstate_driver_lock);
|
||||
guard(mutex)(&amd_pstate_driver_lock);
|
||||
|
||||
ret = amd_pstate_cpu_boost_update(policy, state);
|
||||
WRITE_ONCE(cpudata->boost_state, !ret ? state : false);
|
||||
policy->boost_enabled = !ret ? state : false;
|
||||
refresh_frequency_limits(policy);
|
||||
mutex_unlock(&amd_pstate_driver_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -752,9 +768,6 @@ static int amd_pstate_init_boost_support(struct amd_cpudata *cpudata)
|
||||
goto exit_err;
|
||||
}
|
||||
|
||||
/* at least one CPU supports CPB, even if others fail later on to set up */
|
||||
current_pstate_driver->boost_enabled = true;
|
||||
|
||||
ret = rdmsrl_on_cpu(cpudata->cpu, MSR_K7_HWCR, &boost_val);
|
||||
if (ret) {
|
||||
pr_err_once("failed to read initial CPU boost state!\n");
|
||||
@ -802,7 +815,7 @@ static void amd_pstate_init_prefcore(struct amd_cpudata *cpudata)
|
||||
* sched_set_itmt_support(true) has been called and it is valid to
|
||||
* update them at any time after it has been called.
|
||||
*/
|
||||
sched_set_itmt_core_prio((int)READ_ONCE(cpudata->highest_perf), cpudata->cpu);
|
||||
sched_set_itmt_core_prio((int)READ_ONCE(cpudata->prefcore_ranking), cpudata->cpu);
|
||||
|
||||
schedule_work(&sched_prefcore_work);
|
||||
}
|
||||
@ -823,7 +836,8 @@ static void amd_pstate_update_limits(unsigned int cpu)
|
||||
if (!amd_pstate_prefcore)
|
||||
return;
|
||||
|
||||
mutex_lock(&amd_pstate_driver_lock);
|
||||
guard(mutex)(&amd_pstate_driver_lock);
|
||||
|
||||
ret = amd_get_highest_perf(cpu, &cur_high);
|
||||
if (ret)
|
||||
goto free_cpufreq_put;
|
||||
@ -843,7 +857,6 @@ free_cpufreq_put:
|
||||
if (!highest_perf_changed)
|
||||
cpufreq_update_policy(cpu);
|
||||
|
||||
mutex_unlock(&amd_pstate_driver_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -895,9 +908,8 @@ static int amd_pstate_init_freq(struct amd_cpudata *cpudata)
|
||||
{
|
||||
int ret;
|
||||
u32 min_freq, max_freq;
|
||||
u32 nominal_perf, nominal_freq;
|
||||
u32 highest_perf, nominal_perf, nominal_freq;
|
||||
u32 lowest_nonlinear_perf, lowest_nonlinear_freq;
|
||||
u32 boost_ratio, lowest_nonlinear_ratio;
|
||||
struct cppc_perf_caps cppc_perf;
|
||||
|
||||
ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf);
|
||||
@ -905,29 +917,25 @@ static int amd_pstate_init_freq(struct amd_cpudata *cpudata)
|
||||
return ret;
|
||||
|
||||
if (quirks && quirks->lowest_freq)
|
||||
min_freq = quirks->lowest_freq * 1000;
|
||||
min_freq = quirks->lowest_freq;
|
||||
else
|
||||
min_freq = cppc_perf.lowest_freq * 1000;
|
||||
min_freq = cppc_perf.lowest_freq;
|
||||
|
||||
if (quirks && quirks->nominal_freq)
|
||||
nominal_freq = quirks->nominal_freq ;
|
||||
nominal_freq = quirks->nominal_freq;
|
||||
else
|
||||
nominal_freq = cppc_perf.nominal_freq;
|
||||
|
||||
highest_perf = READ_ONCE(cpudata->highest_perf);
|
||||
nominal_perf = READ_ONCE(cpudata->nominal_perf);
|
||||
|
||||
boost_ratio = div_u64(cpudata->highest_perf << SCHED_CAPACITY_SHIFT, nominal_perf);
|
||||
max_freq = (nominal_freq * boost_ratio >> SCHED_CAPACITY_SHIFT) * 1000;
|
||||
max_freq = div_u64((u64)highest_perf * nominal_freq, nominal_perf);
|
||||
|
||||
lowest_nonlinear_perf = READ_ONCE(cpudata->lowest_nonlinear_perf);
|
||||
lowest_nonlinear_ratio = div_u64(lowest_nonlinear_perf << SCHED_CAPACITY_SHIFT,
|
||||
nominal_perf);
|
||||
lowest_nonlinear_freq = (nominal_freq * lowest_nonlinear_ratio >> SCHED_CAPACITY_SHIFT) * 1000;
|
||||
|
||||
WRITE_ONCE(cpudata->min_freq, min_freq);
|
||||
WRITE_ONCE(cpudata->lowest_nonlinear_freq, lowest_nonlinear_freq);
|
||||
WRITE_ONCE(cpudata->nominal_freq, nominal_freq);
|
||||
WRITE_ONCE(cpudata->max_freq, max_freq);
|
||||
lowest_nonlinear_freq = div_u64((u64)nominal_freq * lowest_nonlinear_perf, nominal_perf);
|
||||
WRITE_ONCE(cpudata->min_freq, min_freq * 1000);
|
||||
WRITE_ONCE(cpudata->lowest_nonlinear_freq, lowest_nonlinear_freq * 1000);
|
||||
WRITE_ONCE(cpudata->nominal_freq, nominal_freq * 1000);
|
||||
WRITE_ONCE(cpudata->max_freq, max_freq * 1000);
|
||||
|
||||
/**
|
||||
* Below values need to be initialized correctly, otherwise driver will fail to load
|
||||
@ -937,13 +945,13 @@ static int amd_pstate_init_freq(struct amd_cpudata *cpudata)
|
||||
*/
|
||||
if (min_freq <= 0 || max_freq <= 0 || nominal_freq <= 0 || min_freq > max_freq) {
|
||||
pr_err("min_freq(%d) or max_freq(%d) or nominal_freq(%d) value is incorrect\n",
|
||||
min_freq, max_freq, nominal_freq * 1000);
|
||||
min_freq, max_freq, nominal_freq);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (lowest_nonlinear_freq <= min_freq || lowest_nonlinear_freq > nominal_freq * 1000) {
|
||||
if (lowest_nonlinear_freq <= min_freq || lowest_nonlinear_freq > nominal_freq) {
|
||||
pr_err("lowest_nonlinear_freq(%d) value is out of range [min_freq(%d), nominal_freq(%d)]\n",
|
||||
lowest_nonlinear_freq, min_freq, nominal_freq * 1000);
|
||||
lowest_nonlinear_freq, min_freq, nominal_freq);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -1160,7 +1168,6 @@ static ssize_t show_energy_performance_available_preferences(
|
||||
static ssize_t store_energy_performance_preference(
|
||||
struct cpufreq_policy *policy, const char *buf, size_t count)
|
||||
{
|
||||
struct amd_cpudata *cpudata = policy->driver_data;
|
||||
char str_preference[21];
|
||||
ssize_t ret;
|
||||
|
||||
@ -1172,11 +1179,11 @@ static ssize_t store_energy_performance_preference(
|
||||
if (ret < 0)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&amd_pstate_limits_lock);
|
||||
ret = amd_pstate_set_energy_pref_index(cpudata, ret);
|
||||
mutex_unlock(&amd_pstate_limits_lock);
|
||||
guard(mutex)(&amd_pstate_limits_lock);
|
||||
|
||||
return ret ?: count;
|
||||
ret = amd_pstate_set_energy_pref_index(policy, ret);
|
||||
|
||||
return ret ? ret : count;
|
||||
}
|
||||
|
||||
static ssize_t show_energy_performance_preference(
|
||||
@ -1185,9 +1192,22 @@ static ssize_t show_energy_performance_preference(
|
||||
struct amd_cpudata *cpudata = policy->driver_data;
|
||||
int preference;
|
||||
|
||||
preference = amd_pstate_get_energy_pref_index(cpudata);
|
||||
if (preference < 0)
|
||||
return preference;
|
||||
switch (cpudata->epp_cached) {
|
||||
case AMD_CPPC_EPP_PERFORMANCE:
|
||||
preference = EPP_INDEX_PERFORMANCE;
|
||||
break;
|
||||
case AMD_CPPC_EPP_BALANCE_PERFORMANCE:
|
||||
preference = EPP_INDEX_BALANCE_PERFORMANCE;
|
||||
break;
|
||||
case AMD_CPPC_EPP_BALANCE_POWERSAVE:
|
||||
preference = EPP_INDEX_BALANCE_POWERSAVE;
|
||||
break;
|
||||
case AMD_CPPC_EPP_POWERSAVE:
|
||||
preference = EPP_INDEX_POWERSAVE;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return sysfs_emit(buf, "%s\n", energy_perf_strings[preference]);
|
||||
}
|
||||
@ -1236,6 +1256,9 @@ static int amd_pstate_register_driver(int mode)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* at least one CPU supports CPB */
|
||||
current_pstate_driver->boost_enabled = cpu_feature_enabled(X86_FEATURE_CPB);
|
||||
|
||||
ret = cpufreq_register_driver(current_pstate_driver);
|
||||
if (ret) {
|
||||
amd_pstate_driver_cleanup();
|
||||
@ -1340,13 +1363,10 @@ EXPORT_SYMBOL_GPL(amd_pstate_update_status);
|
||||
static ssize_t status_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
ssize_t ret;
|
||||
|
||||
mutex_lock(&amd_pstate_driver_lock);
|
||||
ret = amd_pstate_show_status(buf);
|
||||
mutex_unlock(&amd_pstate_driver_lock);
|
||||
guard(mutex)(&amd_pstate_driver_lock);
|
||||
|
||||
return ret;
|
||||
return amd_pstate_show_status(buf);
|
||||
}
|
||||
|
||||
static ssize_t status_store(struct device *a, struct device_attribute *b,
|
||||
@ -1355,9 +1375,8 @@ static ssize_t status_store(struct device *a, struct device_attribute *b,
|
||||
char *p = memchr(buf, '\n', count);
|
||||
int ret;
|
||||
|
||||
mutex_lock(&amd_pstate_driver_lock);
|
||||
guard(mutex)(&amd_pstate_driver_lock);
|
||||
ret = amd_pstate_update_status(buf, p ? p - buf : count);
|
||||
mutex_unlock(&amd_pstate_driver_lock);
|
||||
|
||||
return ret < 0 ? ret : count;
|
||||
}
|
||||
@ -1451,7 +1470,6 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)
|
||||
return -ENOMEM;
|
||||
|
||||
cpudata->cpu = policy->cpu;
|
||||
cpudata->epp_policy = 0;
|
||||
|
||||
ret = amd_pstate_init_perf(cpudata);
|
||||
if (ret)
|
||||
@ -1477,8 +1495,6 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)
|
||||
|
||||
policy->driver_data = cpudata;
|
||||
|
||||
cpudata->epp_cached = cpudata->epp_default = amd_pstate_get_epp(cpudata, 0);
|
||||
|
||||
policy->min = policy->cpuinfo.min_freq;
|
||||
policy->max = policy->cpuinfo.max_freq;
|
||||
|
||||
@ -1489,10 +1505,13 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)
|
||||
* the default cpufreq governor is neither powersave nor performance.
|
||||
*/
|
||||
if (amd_pstate_acpi_pm_profile_server() ||
|
||||
amd_pstate_acpi_pm_profile_undefined())
|
||||
amd_pstate_acpi_pm_profile_undefined()) {
|
||||
policy->policy = CPUFREQ_POLICY_PERFORMANCE;
|
||||
else
|
||||
cpudata->epp_default = amd_pstate_get_epp(cpudata);
|
||||
} else {
|
||||
policy->policy = CPUFREQ_POLICY_POWERSAVE;
|
||||
cpudata->epp_default = AMD_CPPC_EPP_BALANCE_PERFORMANCE;
|
||||
}
|
||||
|
||||
if (cpu_feature_enabled(X86_FEATURE_CPPC)) {
|
||||
ret = rdmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, &value);
|
||||
@ -1505,6 +1524,9 @@ static int amd_pstate_epp_cpu_init(struct cpufreq_policy *policy)
|
||||
return ret;
|
||||
WRITE_ONCE(cpudata->cppc_cap1_cached, value);
|
||||
}
|
||||
ret = amd_pstate_set_epp(cpudata, cpudata->epp_default);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
current_pstate_driver->adjust_perf = NULL;
|
||||
|
||||
@ -1530,51 +1552,24 @@ static void amd_pstate_epp_cpu_exit(struct cpufreq_policy *policy)
|
||||
static int amd_pstate_epp_update_limit(struct cpufreq_policy *policy)
|
||||
{
|
||||
struct amd_cpudata *cpudata = policy->driver_data;
|
||||
u32 max_perf, min_perf;
|
||||
u64 value;
|
||||
s16 epp;
|
||||
u32 epp;
|
||||
|
||||
max_perf = READ_ONCE(cpudata->highest_perf);
|
||||
min_perf = READ_ONCE(cpudata->lowest_perf);
|
||||
amd_pstate_update_min_max_limit(policy);
|
||||
|
||||
max_perf = clamp_t(unsigned long, max_perf, cpudata->min_limit_perf,
|
||||
cpudata->max_limit_perf);
|
||||
min_perf = clamp_t(unsigned long, min_perf, cpudata->min_limit_perf,
|
||||
cpudata->max_limit_perf);
|
||||
value = READ_ONCE(cpudata->cppc_req_cached);
|
||||
|
||||
if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE)
|
||||
min_perf = min(cpudata->nominal_perf, max_perf);
|
||||
|
||||
/* Initial min/max values for CPPC Performance Controls Register */
|
||||
value &= ~AMD_CPPC_MIN_PERF(~0L);
|
||||
value |= AMD_CPPC_MIN_PERF(min_perf);
|
||||
|
||||
value &= ~AMD_CPPC_MAX_PERF(~0L);
|
||||
value |= AMD_CPPC_MAX_PERF(max_perf);
|
||||
|
||||
/* CPPC EPP feature require to set zero to the desire perf bit */
|
||||
value &= ~AMD_CPPC_DES_PERF(~0L);
|
||||
value |= AMD_CPPC_DES_PERF(0);
|
||||
|
||||
cpudata->epp_policy = cpudata->policy;
|
||||
|
||||
/* Get BIOS pre-defined epp value */
|
||||
epp = amd_pstate_get_epp(cpudata, value);
|
||||
if (epp < 0) {
|
||||
/**
|
||||
* This return value can only be negative for shared_memory
|
||||
* systems where EPP register read/write not supported.
|
||||
*/
|
||||
return epp;
|
||||
}
|
||||
|
||||
if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE)
|
||||
epp = 0;
|
||||
else
|
||||
epp = READ_ONCE(cpudata->epp_cached);
|
||||
|
||||
WRITE_ONCE(cpudata->cppc_req_cached, value);
|
||||
return amd_pstate_set_epp(cpudata, epp);
|
||||
if (trace_amd_pstate_epp_perf_enabled()) {
|
||||
trace_amd_pstate_epp_perf(cpudata->cpu, cpudata->highest_perf, epp,
|
||||
cpudata->min_limit_perf,
|
||||
cpudata->max_limit_perf,
|
||||
policy->boost_enabled);
|
||||
}
|
||||
|
||||
return amd_pstate_update_perf(cpudata, cpudata->min_limit_perf, 0U,
|
||||
cpudata->max_limit_perf, epp, false);
|
||||
}
|
||||
|
||||
static int amd_pstate_epp_set_policy(struct cpufreq_policy *policy)
|
||||
@ -1603,87 +1598,63 @@ static int amd_pstate_epp_set_policy(struct cpufreq_policy *policy)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void amd_pstate_epp_reenable(struct amd_cpudata *cpudata)
|
||||
static int amd_pstate_epp_reenable(struct cpufreq_policy *policy)
|
||||
{
|
||||
struct cppc_perf_ctrls perf_ctrls;
|
||||
u64 value, max_perf;
|
||||
struct amd_cpudata *cpudata = policy->driver_data;
|
||||
u64 max_perf;
|
||||
int ret;
|
||||
|
||||
ret = amd_pstate_cppc_enable(true);
|
||||
if (ret)
|
||||
pr_err("failed to enable amd pstate during resume, return %d\n", ret);
|
||||
|
||||
value = READ_ONCE(cpudata->cppc_req_cached);
|
||||
max_perf = READ_ONCE(cpudata->highest_perf);
|
||||
|
||||
if (cpu_feature_enabled(X86_FEATURE_CPPC)) {
|
||||
wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value);
|
||||
} else {
|
||||
perf_ctrls.max_perf = max_perf;
|
||||
cppc_set_perf(cpudata->cpu, &perf_ctrls);
|
||||
perf_ctrls.energy_perf = AMD_CPPC_ENERGY_PERF_PREF(cpudata->epp_cached);
|
||||
cppc_set_epp_perf(cpudata->cpu, &perf_ctrls, 1);
|
||||
if (trace_amd_pstate_epp_perf_enabled()) {
|
||||
trace_amd_pstate_epp_perf(cpudata->cpu, cpudata->highest_perf,
|
||||
cpudata->epp_cached,
|
||||
FIELD_GET(AMD_CPPC_MIN_PERF_MASK, cpudata->cppc_req_cached),
|
||||
max_perf, policy->boost_enabled);
|
||||
}
|
||||
|
||||
return amd_pstate_update_perf(cpudata, 0, 0, max_perf, cpudata->epp_cached, false);
|
||||
}
|
||||
|
||||
static int amd_pstate_epp_cpu_online(struct cpufreq_policy *policy)
|
||||
{
|
||||
struct amd_cpudata *cpudata = policy->driver_data;
|
||||
int ret;
|
||||
|
||||
pr_debug("AMD CPU Core %d going online\n", cpudata->cpu);
|
||||
|
||||
if (cppc_state == AMD_PSTATE_ACTIVE) {
|
||||
amd_pstate_epp_reenable(cpudata);
|
||||
cpudata->suspended = false;
|
||||
}
|
||||
ret = amd_pstate_epp_reenable(policy);
|
||||
if (ret)
|
||||
return ret;
|
||||
cpudata->suspended = false;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void amd_pstate_epp_offline(struct cpufreq_policy *policy)
|
||||
{
|
||||
struct amd_cpudata *cpudata = policy->driver_data;
|
||||
struct cppc_perf_ctrls perf_ctrls;
|
||||
int min_perf;
|
||||
u64 value;
|
||||
|
||||
min_perf = READ_ONCE(cpudata->lowest_perf);
|
||||
value = READ_ONCE(cpudata->cppc_req_cached);
|
||||
|
||||
mutex_lock(&amd_pstate_limits_lock);
|
||||
if (cpu_feature_enabled(X86_FEATURE_CPPC)) {
|
||||
cpudata->epp_policy = CPUFREQ_POLICY_UNKNOWN;
|
||||
|
||||
/* Set max perf same as min perf */
|
||||
value &= ~AMD_CPPC_MAX_PERF(~0L);
|
||||
value |= AMD_CPPC_MAX_PERF(min_perf);
|
||||
value &= ~AMD_CPPC_MIN_PERF(~0L);
|
||||
value |= AMD_CPPC_MIN_PERF(min_perf);
|
||||
wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ, value);
|
||||
} else {
|
||||
perf_ctrls.desired_perf = 0;
|
||||
perf_ctrls.min_perf = min_perf;
|
||||
perf_ctrls.max_perf = min_perf;
|
||||
cppc_set_perf(cpudata->cpu, &perf_ctrls);
|
||||
perf_ctrls.energy_perf = AMD_CPPC_ENERGY_PERF_PREF(HWP_EPP_BALANCE_POWERSAVE);
|
||||
cppc_set_epp_perf(cpudata->cpu, &perf_ctrls, 1);
|
||||
}
|
||||
mutex_unlock(&amd_pstate_limits_lock);
|
||||
}
|
||||
|
||||
static int amd_pstate_epp_cpu_offline(struct cpufreq_policy *policy)
|
||||
{
|
||||
struct amd_cpudata *cpudata = policy->driver_data;
|
||||
|
||||
pr_debug("AMD CPU Core %d going offline\n", cpudata->cpu);
|
||||
int min_perf;
|
||||
|
||||
if (cpudata->suspended)
|
||||
return 0;
|
||||
|
||||
if (cppc_state == AMD_PSTATE_ACTIVE)
|
||||
amd_pstate_epp_offline(policy);
|
||||
min_perf = READ_ONCE(cpudata->lowest_perf);
|
||||
|
||||
return 0;
|
||||
guard(mutex)(&amd_pstate_limits_lock);
|
||||
|
||||
if (trace_amd_pstate_epp_perf_enabled()) {
|
||||
trace_amd_pstate_epp_perf(cpudata->cpu, cpudata->highest_perf,
|
||||
AMD_CPPC_EPP_BALANCE_POWERSAVE,
|
||||
min_perf, min_perf, policy->boost_enabled);
|
||||
}
|
||||
|
||||
return amd_pstate_update_perf(cpudata, min_perf, 0, min_perf,
|
||||
AMD_CPPC_EPP_BALANCE_POWERSAVE, false);
|
||||
}
|
||||
|
||||
static int amd_pstate_epp_suspend(struct cpufreq_policy *policy)
|
||||
@ -1711,12 +1682,10 @@ static int amd_pstate_epp_resume(struct cpufreq_policy *policy)
|
||||
struct amd_cpudata *cpudata = policy->driver_data;
|
||||
|
||||
if (cpudata->suspended) {
|
||||
mutex_lock(&amd_pstate_limits_lock);
|
||||
guard(mutex)(&amd_pstate_limits_lock);
|
||||
|
||||
/* enable amd pstate from suspend state*/
|
||||
amd_pstate_epp_reenable(cpudata);
|
||||
|
||||
mutex_unlock(&amd_pstate_limits_lock);
|
||||
amd_pstate_epp_reenable(policy);
|
||||
|
||||
cpudata->suspended = false;
|
||||
}
|
||||
@ -1869,6 +1838,8 @@ static int __init amd_pstate_init(void)
|
||||
static_call_update(amd_pstate_cppc_enable, shmem_cppc_enable);
|
||||
static_call_update(amd_pstate_init_perf, shmem_init_perf);
|
||||
static_call_update(amd_pstate_update_perf, shmem_update_perf);
|
||||
static_call_update(amd_pstate_get_epp, shmem_get_epp);
|
||||
static_call_update(amd_pstate_set_epp, shmem_set_epp);
|
||||
}
|
||||
|
||||
if (amd_pstate_prefcore) {
|
||||
|
@ -57,7 +57,6 @@ struct amd_aperf_mperf {
|
||||
* @hw_prefcore: check whether HW supports preferred core featue.
|
||||
* Only when hw_prefcore and early prefcore param are true,
|
||||
* AMD P-State driver supports preferred core featue.
|
||||
* @epp_policy: Last saved policy used to set energy-performance preference
|
||||
* @epp_cached: Cached CPPC energy-performance preference value
|
||||
* @policy: Cpufreq policy value
|
||||
* @cppc_cap1_cached Cached MSR_AMD_CPPC_CAP1 register value
|
||||
@ -94,13 +93,11 @@ struct amd_cpudata {
|
||||
bool hw_prefcore;
|
||||
|
||||
/* EPP feature related attributes*/
|
||||
s16 epp_policy;
|
||||
s16 epp_cached;
|
||||
u32 policy;
|
||||
u64 cppc_cap1_cached;
|
||||
bool suspended;
|
||||
s16 epp_default;
|
||||
bool boost_state;
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -1538,7 +1538,7 @@ static int cpufreq_online(unsigned int cpu)
|
||||
|
||||
/*
|
||||
* Register with the energy model before
|
||||
* sugov_eas_rebuild_sd() is called, which will result
|
||||
* em_rebuild_sched_domains() is called, which will result
|
||||
* in rebuilding of the sched domains, which should only be done
|
||||
* once the energy model is properly initialized for the policy
|
||||
* first.
|
||||
|
@ -28,6 +28,7 @@
|
||||
#include <linux/pm_qos.h>
|
||||
#include <linux/bitfield.h>
|
||||
#include <trace/events/power.h>
|
||||
#include <linux/units.h>
|
||||
|
||||
#include <asm/cpu.h>
|
||||
#include <asm/div64.h>
|
||||
@ -302,11 +303,11 @@ static bool hwp_is_hybrid;
|
||||
|
||||
static struct cpufreq_driver *intel_pstate_driver __read_mostly;
|
||||
|
||||
#define HYBRID_SCALING_FACTOR 78741
|
||||
#define HYBRID_SCALING_FACTOR_ADL 78741
|
||||
#define HYBRID_SCALING_FACTOR_MTL 80000
|
||||
#define HYBRID_SCALING_FACTOR_LNL 86957
|
||||
|
||||
static int hybrid_scaling_factor = HYBRID_SCALING_FACTOR;
|
||||
static int hybrid_scaling_factor;
|
||||
|
||||
static inline int core_get_scaling(void)
|
||||
{
|
||||
@ -414,18 +415,15 @@ static int intel_pstate_get_cppc_guaranteed(int cpu)
|
||||
static int intel_pstate_cppc_get_scaling(int cpu)
|
||||
{
|
||||
struct cppc_perf_caps cppc_perf;
|
||||
int ret;
|
||||
|
||||
ret = cppc_get_perf_caps(cpu, &cppc_perf);
|
||||
|
||||
/*
|
||||
* If the nominal frequency and the nominal performance are not
|
||||
* zero and the ratio between them is not 100, return the hybrid
|
||||
* scaling factor.
|
||||
* Compute the perf-to-frequency scaling factor for the given CPU if
|
||||
* possible, unless it would be 0.
|
||||
*/
|
||||
if (!ret && cppc_perf.nominal_perf && cppc_perf.nominal_freq &&
|
||||
cppc_perf.nominal_perf * 100 != cppc_perf.nominal_freq)
|
||||
return hybrid_scaling_factor;
|
||||
if (!cppc_get_perf_caps(cpu, &cppc_perf) &&
|
||||
cppc_perf.nominal_perf && cppc_perf.nominal_freq)
|
||||
return div_u64(cppc_perf.nominal_freq * KHZ_PER_MHZ,
|
||||
cppc_perf.nominal_perf);
|
||||
|
||||
return core_get_scaling();
|
||||
}
|
||||
@ -2211,24 +2209,30 @@ static void hybrid_get_type(void *data)
|
||||
|
||||
static int hwp_get_cpu_scaling(int cpu)
|
||||
{
|
||||
u8 cpu_type = 0;
|
||||
if (hybrid_scaling_factor) {
|
||||
u8 cpu_type = 0;
|
||||
|
||||
smp_call_function_single(cpu, hybrid_get_type, &cpu_type, 1);
|
||||
/* P-cores have a smaller perf level-to-freqency scaling factor. */
|
||||
if (cpu_type == 0x40)
|
||||
return hybrid_scaling_factor;
|
||||
smp_call_function_single(cpu, hybrid_get_type, &cpu_type, 1);
|
||||
|
||||
/* Use default core scaling for E-cores */
|
||||
if (cpu_type == 0x20)
|
||||
/*
|
||||
* Return the hybrid scaling factor for P-cores and use the
|
||||
* default core scaling for E-cores.
|
||||
*/
|
||||
if (cpu_type == 0x40)
|
||||
return hybrid_scaling_factor;
|
||||
|
||||
if (cpu_type == 0x20)
|
||||
return core_get_scaling();
|
||||
}
|
||||
|
||||
/* Use core scaling on non-hybrid systems. */
|
||||
if (!cpu_feature_enabled(X86_FEATURE_HYBRID_CPU))
|
||||
return core_get_scaling();
|
||||
|
||||
/*
|
||||
* If reached here, this system is either non-hybrid (like Tiger
|
||||
* Lake) or hybrid-capable (like Alder Lake or Raptor Lake) with
|
||||
* no E cores (in which case CPUID for hybrid support is 0).
|
||||
*
|
||||
* The CPPC nominal_frequency field is 0 for non-hybrid systems,
|
||||
* so the default core scaling will be used for them.
|
||||
* The system is hybrid, but the hybrid scaling factor is not known or
|
||||
* the CPU type is not one of the above, so use CPPC to compute the
|
||||
* scaling factor for this CPU.
|
||||
*/
|
||||
return intel_pstate_cppc_get_scaling(cpu);
|
||||
}
|
||||
@ -2709,7 +2713,7 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
|
||||
}
|
||||
|
||||
cpu->epp_powersave = -EINVAL;
|
||||
cpu->epp_policy = 0;
|
||||
cpu->epp_policy = CPUFREQ_POLICY_UNKNOWN;
|
||||
|
||||
intel_pstate_get_cpu_pstates(cpu);
|
||||
|
||||
@ -3665,8 +3669,12 @@ static const struct x86_cpu_id intel_epp_default[] = {
|
||||
};
|
||||
|
||||
static const struct x86_cpu_id intel_hybrid_scaling_factor[] = {
|
||||
X86_MATCH_VFM(INTEL_ALDERLAKE, HYBRID_SCALING_FACTOR_ADL),
|
||||
X86_MATCH_VFM(INTEL_ALDERLAKE_L, HYBRID_SCALING_FACTOR_ADL),
|
||||
X86_MATCH_VFM(INTEL_RAPTORLAKE, HYBRID_SCALING_FACTOR_ADL),
|
||||
X86_MATCH_VFM(INTEL_RAPTORLAKE_P, HYBRID_SCALING_FACTOR_ADL),
|
||||
X86_MATCH_VFM(INTEL_RAPTORLAKE_S, HYBRID_SCALING_FACTOR_ADL),
|
||||
X86_MATCH_VFM(INTEL_METEORLAKE_L, HYBRID_SCALING_FACTOR_MTL),
|
||||
X86_MATCH_VFM(INTEL_ARROWLAKE, HYBRID_SCALING_FACTOR_MTL),
|
||||
X86_MATCH_VFM(INTEL_LUNARLAKE_M, HYBRID_SCALING_FACTOR_LNL),
|
||||
{}
|
||||
};
|
||||
|
@ -1651,6 +1651,7 @@ static const struct x86_cpu_id intel_idle_ids[] __initconst = {
|
||||
X86_MATCH_VFM(INTEL_ATOM_TREMONT_D, &idle_cpu_snr),
|
||||
X86_MATCH_VFM(INTEL_ATOM_CRESTMONT, &idle_cpu_grr),
|
||||
X86_MATCH_VFM(INTEL_ATOM_CRESTMONT_X, &idle_cpu_srf),
|
||||
X86_MATCH_VFM(INTEL_ATOM_DARKMONT_X, &idle_cpu_srf),
|
||||
{}
|
||||
};
|
||||
|
||||
|
@ -2,6 +2,7 @@
|
||||
/* Author: Dan Scally <djrscally@gmail.com> */
|
||||
|
||||
#include <linux/acpi.h>
|
||||
#include <acpi/acpi_bus.h>
|
||||
#include <linux/cleanup.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/i2c.h>
|
||||
@ -107,7 +108,6 @@ static const char * const ipu_vcm_types[] = {
|
||||
"lc898212axb",
|
||||
};
|
||||
|
||||
#if IS_ENABLED(CONFIG_ACPI)
|
||||
/*
|
||||
* Used to figure out IVSC acpi device by ipu_bridge_get_ivsc_acpi_dev()
|
||||
* instead of device and driver match to probe IVSC device.
|
||||
@ -127,11 +127,11 @@ static struct acpi_device *ipu_bridge_get_ivsc_acpi_dev(struct acpi_device *adev
|
||||
const struct acpi_device_id *acpi_id = &ivsc_acpi_ids[i];
|
||||
struct acpi_device *consumer, *ivsc_adev;
|
||||
|
||||
acpi_handle handle = acpi_device_handle(adev);
|
||||
acpi_handle handle = acpi_device_handle(ACPI_PTR(adev));
|
||||
for_each_acpi_dev_match(ivsc_adev, acpi_id->id, NULL, -1)
|
||||
/* camera sensor depends on IVSC in DSDT if exist */
|
||||
for_each_acpi_consumer_dev(ivsc_adev, consumer)
|
||||
if (consumer->handle == handle) {
|
||||
if (ACPI_PTR(consumer->handle) == handle) {
|
||||
acpi_dev_put(consumer);
|
||||
return ivsc_adev;
|
||||
}
|
||||
@ -139,12 +139,6 @@ static struct acpi_device *ipu_bridge_get_ivsc_acpi_dev(struct acpi_device *adev
|
||||
|
||||
return NULL;
|
||||
}
|
||||
#else
|
||||
static struct acpi_device *ipu_bridge_get_ivsc_acpi_dev(struct acpi_device *adev)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
#endif
|
||||
|
||||
static int ipu_bridge_match_ivsc_dev(struct device *dev, const void *adev)
|
||||
{
|
||||
@ -259,12 +253,8 @@ static enum v4l2_fwnode_orientation ipu_bridge_parse_orientation(struct acpi_dev
|
||||
{
|
||||
enum v4l2_fwnode_orientation orientation;
|
||||
struct acpi_pld_info *pld = NULL;
|
||||
acpi_status status = AE_ERROR;
|
||||
|
||||
#if IS_ENABLED(CONFIG_ACPI)
|
||||
status = acpi_get_physical_device_location(adev->handle, &pld);
|
||||
#endif
|
||||
if (ACPI_FAILURE(status)) {
|
||||
if (!acpi_get_physical_device_location(ACPI_PTR(adev->handle), &pld)) {
|
||||
dev_warn(ADEV_DEV(adev), "_PLD call failed, using default orientation\n");
|
||||
return V4L2_FWNODE_ORIENTATION_EXTERNAL;
|
||||
}
|
||||
@ -498,9 +488,7 @@ static void ipu_bridge_create_connection_swnodes(struct ipu_bridge *bridge,
|
||||
if (sensor->csi_dev) {
|
||||
const char *device_hid = "";
|
||||
|
||||
#if IS_ENABLED(CONFIG_ACPI)
|
||||
device_hid = acpi_device_hid(sensor->ivsc_adev);
|
||||
#endif
|
||||
|
||||
snprintf(sensor->ivsc_name, sizeof(sensor->ivsc_name), "%s-%u",
|
||||
device_hid, sensor->link);
|
||||
@ -671,11 +659,7 @@ static int ipu_bridge_connect_sensor(const struct ipu_sensor_config *cfg,
|
||||
struct acpi_device *adev = NULL;
|
||||
int ret;
|
||||
|
||||
#if IS_ENABLED(CONFIG_ACPI)
|
||||
for_each_acpi_dev_match(adev, cfg->hid, NULL, -1) {
|
||||
#else
|
||||
while (true) {
|
||||
#endif
|
||||
if (!ACPI_PTR(adev->status.enabled))
|
||||
continue;
|
||||
|
||||
@ -768,15 +752,10 @@ static int ipu_bridge_ivsc_is_ready(void)
|
||||
unsigned int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(ipu_supported_sensors); i++) {
|
||||
#if IS_ENABLED(CONFIG_ACPI)
|
||||
const struct ipu_sensor_config *cfg =
|
||||
&ipu_supported_sensors[i];
|
||||
|
||||
for_each_acpi_dev_match(sensor_adev, cfg->hid, NULL, -1) {
|
||||
#else
|
||||
while (true) {
|
||||
sensor_adev = NULL;
|
||||
#endif
|
||||
if (!ACPI_PTR(sensor_adev->status.enabled))
|
||||
continue;
|
||||
|
||||
|
@ -1265,6 +1265,7 @@ static const struct x86_cpu_id rapl_ids[] __initconst = {
|
||||
X86_MATCH_VFM(INTEL_SAPPHIRERAPIDS_X, &rapl_defaults_spr_server),
|
||||
X86_MATCH_VFM(INTEL_EMERALDRAPIDS_X, &rapl_defaults_spr_server),
|
||||
X86_MATCH_VFM(INTEL_LUNARLAKE_M, &rapl_defaults_core),
|
||||
X86_MATCH_VFM(INTEL_PANTHERLAKE_L, &rapl_defaults_core),
|
||||
X86_MATCH_VFM(INTEL_ARROWLAKE_H, &rapl_defaults_core),
|
||||
X86_MATCH_VFM(INTEL_ARROWLAKE, &rapl_defaults_core),
|
||||
X86_MATCH_VFM(INTEL_ARROWLAKE_U, &rapl_defaults_core),
|
||||
|
@ -21,8 +21,8 @@ config INTEL_TCC
|
||||
|
||||
config X86_PKG_TEMP_THERMAL
|
||||
tristate "X86 package temperature thermal driver"
|
||||
depends on X86_THERMAL_VECTOR
|
||||
select THERMAL_GOV_USER_SPACE
|
||||
depends on X86_THERMAL_VECTOR && NET
|
||||
select THERMAL_NETLINK
|
||||
select INTEL_TCC
|
||||
default m
|
||||
help
|
||||
|
@ -5,8 +5,8 @@
|
||||
|
||||
config INT340X_THERMAL
|
||||
tristate "ACPI INT340X thermal drivers"
|
||||
depends on X86_64 && ACPI && PCI
|
||||
select THERMAL_GOV_USER_SPACE
|
||||
depends on X86_64 && ACPI && PCI && NET
|
||||
select THERMAL_NETLINK
|
||||
select ACPI_THERMAL_REL
|
||||
select ACPI_FAN
|
||||
select ACPI_THERMAL_LIB
|
||||
|
@ -521,7 +521,6 @@ static struct thermal_zone_device_ops int3400_thermal_ops = {
|
||||
};
|
||||
|
||||
static struct thermal_zone_params int3400_thermal_params = {
|
||||
.governor_name = "user_space",
|
||||
.no_hwmon = true,
|
||||
};
|
||||
|
||||
@ -690,6 +689,7 @@ static const struct acpi_device_id int3400_thermal_match[] = {
|
||||
{"INTC1042", 0},
|
||||
{"INTC1068", 0},
|
||||
{"INTC10A0", 0},
|
||||
{"INTC10D4", 0},
|
||||
{}
|
||||
};
|
||||
|
||||
|
@ -275,6 +275,7 @@ static const struct acpi_device_id int3403_device_ids[] = {
|
||||
{"INTC1062", 0},
|
||||
{"INTC1069", 0},
|
||||
{"INTC10A1", 0},
|
||||
{"INTC10D5", 0},
|
||||
{"", 0},
|
||||
};
|
||||
MODULE_DEVICE_TABLE(acpi, int3403_device_ids);
|
||||
|
@ -105,7 +105,6 @@ static int int340x_thermal_read_trips(struct acpi_device *zone_adev,
|
||||
}
|
||||
|
||||
static struct thermal_zone_params int340x_thermal_params = {
|
||||
.governor_name = "user_space",
|
||||
.no_hwmon = true,
|
||||
};
|
||||
|
||||
|
@ -30,6 +30,7 @@
|
||||
#define PCI_DEVICE_ID_INTEL_RPL_THERMAL 0xA71D
|
||||
#define PCI_DEVICE_ID_INTEL_SKL_THERMAL 0x1903
|
||||
#define PCI_DEVICE_ID_INTEL_TGL_THERMAL 0x9A03
|
||||
#define PCI_DEVICE_ID_INTEL_PTL_THERMAL 0xB01D
|
||||
|
||||
struct power_config {
|
||||
u32 index;
|
||||
|
@ -272,7 +272,6 @@ static const struct thermal_zone_device_ops tzone_ops = {
|
||||
};
|
||||
|
||||
static struct thermal_zone_params tzone_params = {
|
||||
.governor_name = "user_space",
|
||||
.no_hwmon = true,
|
||||
};
|
||||
|
||||
@ -495,6 +494,9 @@ static const struct pci_device_id proc_thermal_pci_ids[] = {
|
||||
PROC_THERMAL_FEATURE_DVFS | PROC_THERMAL_FEATURE_DLVR | PROC_THERMAL_FEATURE_WT_HINT) },
|
||||
{ PCI_DEVICE_DATA(INTEL, RPL_THERMAL, PROC_THERMAL_FEATURE_RAPL |
|
||||
PROC_THERMAL_FEATURE_FIVR | PROC_THERMAL_FEATURE_DVFS | PROC_THERMAL_FEATURE_WT_REQ) },
|
||||
{ PCI_DEVICE_DATA(INTEL, PTL_THERMAL, PROC_THERMAL_FEATURE_RAPL |
|
||||
PROC_THERMAL_FEATURE_DLVR | PROC_THERMAL_FEATURE_MSI_SUPPORT |
|
||||
PROC_THERMAL_FEATURE_WT_HINT | PROC_THERMAL_FEATURE_POWER_FLOOR) },
|
||||
{ },
|
||||
};
|
||||
|
||||
|
@ -213,8 +213,7 @@ usb_acpi_get_connect_type(struct usb_port *port_dev, acpi_handle *handle)
|
||||
* no connectable, the port would be not used.
|
||||
*/
|
||||
|
||||
status = acpi_get_physical_device_location(handle, &pld);
|
||||
if (ACPI_SUCCESS(status) && pld)
|
||||
if (acpi_get_physical_device_location(handle, &pld) && pld)
|
||||
port_dev->location = USB_ACPI_LOCATION_VALID |
|
||||
pld->group_token << 8 | pld->group_position;
|
||||
|
||||
|
@ -43,9 +43,6 @@ acpi_status
|
||||
acpi_evaluate_ost(acpi_handle handle, u32 source_event, u32 status_code,
|
||||
struct acpi_buffer *status_buf);
|
||||
|
||||
acpi_status
|
||||
acpi_get_physical_device_location(acpi_handle handle, struct acpi_pld_info **pld);
|
||||
|
||||
bool acpi_has_method(acpi_handle handle, char *name);
|
||||
acpi_status acpi_execute_simple_method(acpi_handle handle, char *method,
|
||||
u64 arg);
|
||||
@ -60,6 +57,9 @@ bool acpi_check_dsm(acpi_handle handle, const guid_t *guid, u64 rev, u64 funcs);
|
||||
union acpi_object *acpi_evaluate_dsm(acpi_handle handle, const guid_t *guid,
|
||||
u64 rev, u64 func, union acpi_object *argv4);
|
||||
#ifdef CONFIG_ACPI
|
||||
bool
|
||||
acpi_get_physical_device_location(acpi_handle handle, struct acpi_pld_info **pld);
|
||||
|
||||
static inline union acpi_object *
|
||||
acpi_evaluate_dsm_typed(acpi_handle handle, const guid_t *guid, u64 rev,
|
||||
u64 func, union acpi_object *argv4,
|
||||
@ -1003,6 +1003,23 @@ static inline int unregister_acpi_bus_type(void *bus) { return 0; }
|
||||
|
||||
static inline int acpi_wait_for_acpi_ipmi(void) { return 0; }
|
||||
|
||||
static inline const char *acpi_device_hid(struct acpi_device *device)
|
||||
{
|
||||
return "";
|
||||
}
|
||||
|
||||
static inline bool
|
||||
acpi_get_physical_device_location(acpi_handle handle, struct acpi_pld_info **pld)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
#define for_each_acpi_consumer_dev(supplier, consumer) \
|
||||
for (consumer = NULL; false && (supplier);)
|
||||
|
||||
#define for_each_acpi_dev_match(adev, hid, uid, hrv) \
|
||||
for (adev = NULL; false && (hid) && (uid) && (hrv); )
|
||||
|
||||
#endif /* CONFIG_ACPI */
|
||||
|
||||
#endif /*__ACPI_BUS_H__*/
|
||||
|
@ -854,6 +854,11 @@ static inline struct fwnode_handle *acpi_fwnode_handle(struct acpi_device *adev)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline acpi_handle acpi_device_handle(struct acpi_device *adev)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline bool has_acpi_companion(struct device *dev)
|
||||
{
|
||||
return false;
|
||||
|
@ -179,6 +179,7 @@ int em_dev_compute_costs(struct device *dev, struct em_perf_state *table,
|
||||
int em_dev_update_chip_binning(struct device *dev);
|
||||
int em_update_performance_limits(struct em_perf_domain *pd,
|
||||
unsigned long freq_min_khz, unsigned long freq_max_khz);
|
||||
void em_rebuild_sched_domains(void);
|
||||
|
||||
/**
|
||||
* em_pd_get_efficient_state() - Get an efficient performance state from the EM
|
||||
@ -404,6 +405,7 @@ int em_update_performance_limits(struct em_perf_domain *pd,
|
||||
{
|
||||
return -EINVAL;
|
||||
}
|
||||
static inline void em_rebuild_sched_domains(void) {}
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
@ -240,4 +240,21 @@ static inline int device_init_wakeup(struct device *dev, bool enable)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void device_disable_wakeup(void *dev)
|
||||
{
|
||||
device_init_wakeup(dev, false);
|
||||
}
|
||||
|
||||
/**
|
||||
* devm_device_init_wakeup - Resource managed device wakeup initialization.
|
||||
* @dev: Device to handle.
|
||||
*
|
||||
* This function is the devm managed version of device_init_wakeup(dev, true).
|
||||
*/
|
||||
static inline int devm_device_init_wakeup(struct device *dev)
|
||||
{
|
||||
device_init_wakeup(dev, true);
|
||||
return devm_add_action_or_reset(dev, device_disable_wakeup, dev);
|
||||
}
|
||||
|
||||
#endif /* _LINUX_PM_WAKEUP_H */
|
||||
|
@ -9,7 +9,6 @@
|
||||
|
||||
#include <linux/device.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/pm_wakeup.h>
|
||||
|
||||
#include "power.h"
|
||||
|
||||
|
@ -908,3 +908,20 @@ int em_update_performance_limits(struct em_perf_domain *pd,
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(em_update_performance_limits);
|
||||
|
||||
static void rebuild_sd_workfn(struct work_struct *work)
|
||||
{
|
||||
rebuild_sched_domains_energy();
|
||||
}
|
||||
|
||||
void em_rebuild_sched_domains(void)
|
||||
{
|
||||
static DECLARE_WORK(rebuild_sd_work, rebuild_sd_workfn);
|
||||
|
||||
/*
|
||||
* When called from the cpufreq_register_driver() path, the
|
||||
* cpu_hotplug_lock is already held, so use a work item to
|
||||
* avoid nested locking in rebuild_sched_domains().
|
||||
*/
|
||||
schedule_work(&rebuild_sd_work);
|
||||
}
|
||||
|
@ -83,7 +83,7 @@ static bool sugov_should_update_freq(struct sugov_policy *sg_policy, u64 time)
|
||||
|
||||
if (unlikely(sg_policy->limits_changed)) {
|
||||
sg_policy->limits_changed = false;
|
||||
sg_policy->need_freq_update = true;
|
||||
sg_policy->need_freq_update = cpufreq_driver_test_flags(CPUFREQ_NEED_UPDATE_LIMITS);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -96,7 +96,7 @@ static bool sugov_update_next_freq(struct sugov_policy *sg_policy, u64 time,
|
||||
unsigned int next_freq)
|
||||
{
|
||||
if (sg_policy->need_freq_update)
|
||||
sg_policy->need_freq_update = cpufreq_driver_test_flags(CPUFREQ_NEED_UPDATE_LIMITS);
|
||||
sg_policy->need_freq_update = false;
|
||||
else if (sg_policy->next_freq == next_freq)
|
||||
return false;
|
||||
|
||||
@ -604,31 +604,6 @@ static const struct kobj_type sugov_tunables_ktype = {
|
||||
|
||||
/********************** cpufreq governor interface *********************/
|
||||
|
||||
#ifdef CONFIG_ENERGY_MODEL
|
||||
static void rebuild_sd_workfn(struct work_struct *work)
|
||||
{
|
||||
rebuild_sched_domains_energy();
|
||||
}
|
||||
|
||||
static DECLARE_WORK(rebuild_sd_work, rebuild_sd_workfn);
|
||||
|
||||
/*
|
||||
* EAS shouldn't be attempted without sugov, so rebuild the sched_domains
|
||||
* on governor changes to make sure the scheduler knows about it.
|
||||
*/
|
||||
static void sugov_eas_rebuild_sd(void)
|
||||
{
|
||||
/*
|
||||
* When called from the cpufreq_register_driver() path, the
|
||||
* cpu_hotplug_lock is already held, so use a work item to
|
||||
* avoid nested locking in rebuild_sched_domains().
|
||||
*/
|
||||
schedule_work(&rebuild_sd_work);
|
||||
}
|
||||
#else
|
||||
static inline void sugov_eas_rebuild_sd(void) { };
|
||||
#endif
|
||||
|
||||
struct cpufreq_governor schedutil_gov;
|
||||
|
||||
static struct sugov_policy *sugov_policy_alloc(struct cpufreq_policy *policy)
|
||||
@ -784,7 +759,11 @@ static int sugov_init(struct cpufreq_policy *policy)
|
||||
goto fail;
|
||||
|
||||
out:
|
||||
sugov_eas_rebuild_sd();
|
||||
/*
|
||||
* Schedutil is the preferred governor for EAS, so rebuild sched domains
|
||||
* on governor changes to make sure the scheduler knows about them.
|
||||
*/
|
||||
em_rebuild_sched_domains();
|
||||
mutex_unlock(&global_tunables_lock);
|
||||
return 0;
|
||||
|
||||
@ -826,7 +805,7 @@ static void sugov_exit(struct cpufreq_policy *policy)
|
||||
sugov_policy_free(sg_policy);
|
||||
cpufreq_disable_fast_switch(policy);
|
||||
|
||||
sugov_eas_rebuild_sd();
|
||||
em_rebuild_sched_domains();
|
||||
}
|
||||
|
||||
static int sugov_start(struct cpufreq_policy *policy)
|
||||
|
@ -87,11 +87,19 @@ INSTALL_SCRIPT = ${INSTALL} -m 644
|
||||
# to something more interesting, like "arm-linux-". If you want
|
||||
# to compile vs uClibc, that can be done here as well.
|
||||
CROSS ?= #/usr/i386-linux-uclibc/usr/bin/i386-uclibc-
|
||||
ifneq ($(CROSS), )
|
||||
CC = $(CROSS)gcc
|
||||
LD = $(CROSS)gcc
|
||||
AR = $(CROSS)ar
|
||||
STRIP = $(CROSS)strip
|
||||
RANLIB = $(CROSS)ranlib
|
||||
else
|
||||
CC ?= $(CROSS)gcc
|
||||
LD ?= $(CROSS)gcc
|
||||
AR ?= $(CROSS)ar
|
||||
STRIP ?= $(CROSS)strip
|
||||
RANLIB ?= $(CROSS)ranlib
|
||||
endif
|
||||
HOSTCC = gcc
|
||||
MKDIR = mkdir
|
||||
|
||||
|
@ -11,6 +11,7 @@ HAVE_PYCONFIG := $(shell if which python-config >/dev/null 2>&1; then echo 1; el
|
||||
LIB_DIR := ../../lib
|
||||
PY_INCLUDE = $(firstword $(shell python-config --includes))
|
||||
OBJECTS_LIB = $(wildcard $(LIB_DIR)/*.o)
|
||||
INSTALL_DIR = $(shell python3 -c "import site; print(site.getsitepackages()[0])")
|
||||
|
||||
all: _raw_pylibcpupower.so
|
||||
|
||||
@ -28,6 +29,15 @@ else ifeq ($(HAVE_PYCONFIG),0)
|
||||
endif
|
||||
swig -python raw_pylibcpupower.swg
|
||||
|
||||
# Only installs the Python bindings
|
||||
install: _raw_pylibcpupower.so
|
||||
install -D _raw_pylibcpupower.so $(INSTALL_DIR)/_raw_pylibcpupower.so
|
||||
install -D raw_pylibcpupower.py $(INSTALL_DIR)/raw_pylibcpupower.py
|
||||
|
||||
uninstall:
|
||||
rm -f $(INSTALL_DIR)/_raw_pylibcpupower.so
|
||||
rm -f $(INSTALL_DIR)/raw_pylibcpupower.py
|
||||
|
||||
# Will only clean the bindings folder; will not clean the actual cpupower folder
|
||||
clean:
|
||||
rm -f raw_pylibcpupower.py raw_pylibcpupower_wrap.c raw_pylibcpupower_wrap.o _raw_pylibcpupower.so
|
||||
|
@ -48,6 +48,31 @@ To run the test script:
|
||||
$ python test_raw_pylibcpupower.py
|
||||
|
||||
|
||||
developing/using the bindings directly
|
||||
--------------------------------------
|
||||
|
||||
You need to add the Python bindings directory to your $PYTHONPATH.
|
||||
|
||||
You would set the path in the Bash terminal or in the Bash profile:
|
||||
|
||||
PYTHONPATH=~/linux/tools/power/cpupower/bindings/python:$PYTHONPATH
|
||||
|
||||
This allows you to set a specific repo of the bindings to use.
|
||||
|
||||
|
||||
installing/uninstalling
|
||||
-----------------------
|
||||
|
||||
Python uses a system specific site-packages folder to look up modules to import
|
||||
by default. You do not need to install cpupower to use the SWIG bindings.
|
||||
|
||||
You can install and uninstall the bindings to the site-packages with:
|
||||
|
||||
sudo make install
|
||||
|
||||
sudo make uninstall
|
||||
|
||||
|
||||
credits
|
||||
-------
|
||||
|
||||
|
@ -134,6 +134,9 @@ void cpufreq_put_stats(struct cpufreq_stats *stats);
|
||||
|
||||
unsigned long cpufreq_get_transitions(unsigned int cpu);
|
||||
|
||||
char *cpufreq_get_energy_performance_preference(unsigned int cpu);
|
||||
void cpufreq_put_energy_performance_preference(char *ptr);
|
||||
|
||||
int cpufreq_set_policy(unsigned int cpu, struct cpufreq_policy *policy);
|
||||
|
||||
int cpufreq_modify_policy_min(unsigned int cpu, unsigned long min_freq);
|
||||
|
@ -102,6 +102,10 @@ unsigned long cpufreq_get_sysfs_value_from_table(unsigned int cpu,
|
||||
if (len == 0)
|
||||
return 0;
|
||||
|
||||
if (!strcmp(linebuf, "enabled\n"))
|
||||
return 1;
|
||||
if (!strcmp(linebuf, "disabled\n"))
|
||||
return 0;
|
||||
value = strtoul(linebuf, &endp, 0);
|
||||
|
||||
if (endp == linebuf || errno == ERANGE)
|
||||
@ -123,12 +127,14 @@ static unsigned long sysfs_cpufreq_get_one_value(unsigned int cpu,
|
||||
enum cpufreq_string {
|
||||
SCALING_DRIVER,
|
||||
SCALING_GOVERNOR,
|
||||
ENERGY_PERFORMANCE_PREFERENCE,
|
||||
MAX_CPUFREQ_STRING_FILES
|
||||
};
|
||||
|
||||
static const char *cpufreq_string_files[MAX_CPUFREQ_STRING_FILES] = {
|
||||
[SCALING_DRIVER] = "scaling_driver",
|
||||
[SCALING_GOVERNOR] = "scaling_governor",
|
||||
[ENERGY_PERFORMANCE_PREFERENCE] = "energy_performance_preference",
|
||||
};
|
||||
|
||||
|
||||
@ -203,6 +209,18 @@ unsigned long cpufreq_get_transition_latency(unsigned int cpu)
|
||||
return sysfs_cpufreq_get_one_value(cpu, CPUINFO_LATENCY);
|
||||
}
|
||||
|
||||
char *cpufreq_get_energy_performance_preference(unsigned int cpu)
|
||||
{
|
||||
return sysfs_cpufreq_get_one_string(cpu, ENERGY_PERFORMANCE_PREFERENCE);
|
||||
}
|
||||
|
||||
void cpufreq_put_energy_performance_preference(char *ptr)
|
||||
{
|
||||
if (!ptr)
|
||||
return;
|
||||
free(ptr);
|
||||
}
|
||||
|
||||
int cpufreq_get_hardware_limits(unsigned int cpu,
|
||||
unsigned long *min,
|
||||
unsigned long *max)
|
||||
|
@ -68,6 +68,14 @@ unsigned long cpufreq_get_freq_hardware(unsigned int cpu);
|
||||
unsigned long cpufreq_get_transition_latency(unsigned int cpu);
|
||||
|
||||
|
||||
/* determine energy performance preference
|
||||
*
|
||||
* returns NULL on failure, else the string that represents the energy performance
|
||||
* preference requested.
|
||||
*/
|
||||
char *cpufreq_get_energy_performance_preference(unsigned int cpu);
|
||||
void cpufreq_put_energy_performance_preference(char *ptr);
|
||||
|
||||
/* determine hardware CPU frequency limits
|
||||
*
|
||||
* These may be limited further by thermal, energy or other
|
||||
|
@ -120,7 +120,6 @@ static void print_duration(unsigned long duration)
|
||||
} else
|
||||
printf("%lu ns", duration);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
static int get_boost_mode_x86(unsigned int cpu)
|
||||
@ -255,7 +254,12 @@ static int get_freq_kernel(unsigned int cpu, unsigned int human)
|
||||
|
||||
static int get_freq_hardware(unsigned int cpu, unsigned int human)
|
||||
{
|
||||
unsigned long freq = cpufreq_get_freq_hardware(cpu);
|
||||
unsigned long freq;
|
||||
|
||||
if (cpupower_cpu_info.caps & CPUPOWER_CAP_APERF)
|
||||
return -EINVAL;
|
||||
|
||||
freq = cpufreq_get_freq_hardware(cpu);
|
||||
printf(_(" current CPU frequency: "));
|
||||
if (!freq) {
|
||||
printf("Unable to call hardware\n");
|
||||
@ -418,12 +422,32 @@ static int get_freq_stats(unsigned int cpu, unsigned int human)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* --epp / -z */
|
||||
|
||||
static int get_epp(unsigned int cpu, bool interactive)
|
||||
{
|
||||
char *epp;
|
||||
|
||||
epp = cpufreq_get_energy_performance_preference(cpu);
|
||||
if (!epp)
|
||||
return -EINVAL;
|
||||
if (interactive)
|
||||
printf(_(" energy performance preference: %s\n"), epp);
|
||||
|
||||
cpufreq_put_energy_performance_preference(epp);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* --latency / -y */
|
||||
|
||||
static int get_latency(unsigned int cpu, unsigned int human)
|
||||
{
|
||||
unsigned long latency = cpufreq_get_transition_latency(cpu);
|
||||
|
||||
if (!get_epp(cpu, false))
|
||||
return -EINVAL;
|
||||
|
||||
printf(_(" maximum transition latency: "));
|
||||
if (!latency || latency == UINT_MAX) {
|
||||
printf(_(" Cannot determine or is not supported.\n"));
|
||||
@ -457,6 +481,7 @@ static void debug_output_one(unsigned int cpu)
|
||||
get_related_cpus(cpu);
|
||||
get_affected_cpus(cpu);
|
||||
get_latency(cpu, 1);
|
||||
get_epp(cpu, true);
|
||||
get_hardware_limits(cpu, 1);
|
||||
|
||||
freqs = cpufreq_get_available_frequencies(cpu);
|
||||
@ -497,6 +522,7 @@ static struct option info_opts[] = {
|
||||
{"human", no_argument, NULL, 'm'},
|
||||
{"no-rounding", no_argument, NULL, 'n'},
|
||||
{"performance", no_argument, NULL, 'c'},
|
||||
{"epp", no_argument, NULL, 'z'},
|
||||
{ },
|
||||
};
|
||||
|
||||
@ -510,7 +536,7 @@ int cmd_freq_info(int argc, char **argv)
|
||||
int output_param = 0;
|
||||
|
||||
do {
|
||||
ret = getopt_long(argc, argv, "oefwldpgrasmybnc", info_opts,
|
||||
ret = getopt_long(argc, argv, "oefwldpgrasmybncz", info_opts,
|
||||
NULL);
|
||||
switch (ret) {
|
||||
case '?':
|
||||
@ -534,6 +560,7 @@ int cmd_freq_info(int argc, char **argv)
|
||||
case 's':
|
||||
case 'y':
|
||||
case 'c':
|
||||
case 'z':
|
||||
if (output_param) {
|
||||
output_param = -1;
|
||||
cont = 0;
|
||||
@ -643,6 +670,9 @@ int cmd_freq_info(int argc, char **argv)
|
||||
case 'c':
|
||||
ret = get_perf_cap(cpu);
|
||||
break;
|
||||
case 'z':
|
||||
ret = get_epp(cpu, true);
|
||||
break;
|
||||
}
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -177,6 +177,8 @@ enum amd_pstate_value {
|
||||
AMD_PSTATE_HIGHEST_PERF,
|
||||
AMD_PSTATE_MAX_FREQ,
|
||||
AMD_PSTATE_LOWEST_NONLINEAR_FREQ,
|
||||
AMD_PSTATE_HW_PREFCORE,
|
||||
AMD_PSTATE_PREFCORE_RANKING,
|
||||
MAX_AMD_PSTATE_VALUE_READ_FILES,
|
||||
};
|
||||
|
||||
@ -184,6 +186,8 @@ static const char *amd_pstate_value_files[MAX_AMD_PSTATE_VALUE_READ_FILES] = {
|
||||
[AMD_PSTATE_HIGHEST_PERF] = "amd_pstate_highest_perf",
|
||||
[AMD_PSTATE_MAX_FREQ] = "amd_pstate_max_freq",
|
||||
[AMD_PSTATE_LOWEST_NONLINEAR_FREQ] = "amd_pstate_lowest_nonlinear_freq",
|
||||
[AMD_PSTATE_HW_PREFCORE] = "amd_pstate_hw_prefcore",
|
||||
[AMD_PSTATE_PREFCORE_RANKING] = "amd_pstate_prefcore_ranking",
|
||||
};
|
||||
|
||||
static unsigned long amd_pstate_get_data(unsigned int cpu,
|
||||
@ -215,7 +219,9 @@ void amd_pstate_boost_init(unsigned int cpu, int *support, int *active)
|
||||
|
||||
void amd_pstate_show_perf_and_freq(unsigned int cpu, int no_rounding)
|
||||
{
|
||||
printf(_(" AMD PSTATE Highest Performance: %lu. Maximum Frequency: "),
|
||||
|
||||
printf(_(" amd-pstate limits:\n"));
|
||||
printf(_(" Highest Performance: %lu. Maximum Frequency: "),
|
||||
amd_pstate_get_data(cpu, AMD_PSTATE_HIGHEST_PERF));
|
||||
/*
|
||||
* If boost isn't active, the cpuinfo_max doesn't indicate real max
|
||||
@ -224,22 +230,26 @@ void amd_pstate_show_perf_and_freq(unsigned int cpu, int no_rounding)
|
||||
print_speed(amd_pstate_get_data(cpu, AMD_PSTATE_MAX_FREQ), no_rounding);
|
||||
printf(".\n");
|
||||
|
||||
printf(_(" AMD PSTATE Nominal Performance: %lu. Nominal Frequency: "),
|
||||
printf(_(" Nominal Performance: %lu. Nominal Frequency: "),
|
||||
acpi_cppc_get_data(cpu, NOMINAL_PERF));
|
||||
print_speed(acpi_cppc_get_data(cpu, NOMINAL_FREQ) * 1000,
|
||||
no_rounding);
|
||||
printf(".\n");
|
||||
|
||||
printf(_(" AMD PSTATE Lowest Non-linear Performance: %lu. Lowest Non-linear Frequency: "),
|
||||
printf(_(" Lowest Non-linear Performance: %lu. Lowest Non-linear Frequency: "),
|
||||
acpi_cppc_get_data(cpu, LOWEST_NONLINEAR_PERF));
|
||||
print_speed(amd_pstate_get_data(cpu, AMD_PSTATE_LOWEST_NONLINEAR_FREQ),
|
||||
no_rounding);
|
||||
printf(".\n");
|
||||
|
||||
printf(_(" AMD PSTATE Lowest Performance: %lu. Lowest Frequency: "),
|
||||
printf(_(" Lowest Performance: %lu. Lowest Frequency: "),
|
||||
acpi_cppc_get_data(cpu, LOWEST_PERF));
|
||||
print_speed(acpi_cppc_get_data(cpu, LOWEST_FREQ) * 1000, no_rounding);
|
||||
printf(".\n");
|
||||
|
||||
printf(_(" Preferred Core Support: %lu. Preferred Core Ranking: %lu.\n"),
|
||||
amd_pstate_get_data(cpu, AMD_PSTATE_HW_PREFCORE),
|
||||
amd_pstate_get_data(cpu, AMD_PSTATE_PREFCORE_RANKING));
|
||||
}
|
||||
|
||||
/* AMD P-State Helper Functions ************************************/
|
||||
|
@ -117,7 +117,7 @@ static int hsw_ext_start(void)
|
||||
|
||||
for (num = 0; num < HSW_EXT_CSTATE_COUNT; num++) {
|
||||
for (cpu = 0; cpu < cpu_count; cpu++) {
|
||||
hsw_ext_get_count(num, &val, cpu);
|
||||
is_valid[cpu] = !hsw_ext_get_count(num, &val, cpu);
|
||||
previous_count[num][cpu] = val;
|
||||
}
|
||||
}
|
||||
@ -134,7 +134,7 @@ static int hsw_ext_stop(void)
|
||||
|
||||
for (num = 0; num < HSW_EXT_CSTATE_COUNT; num++) {
|
||||
for (cpu = 0; cpu < cpu_count; cpu++) {
|
||||
is_valid[cpu] = !hsw_ext_get_count(num, &val, cpu);
|
||||
is_valid[cpu] |= !hsw_ext_get_count(num, &val, cpu);
|
||||
current_count[num][cpu] = val;
|
||||
}
|
||||
}
|
||||
|
@ -33,7 +33,7 @@ static int mperf_get_count_percent(unsigned int self_id, double *percent,
|
||||
unsigned int cpu);
|
||||
static int mperf_get_count_freq(unsigned int id, unsigned long long *count,
|
||||
unsigned int cpu);
|
||||
static struct timespec time_start, time_end;
|
||||
static struct timespec *time_start, *time_end;
|
||||
|
||||
static cstate_t mperf_cstates[MPERF_CSTATE_COUNT] = {
|
||||
{
|
||||
@ -148,7 +148,7 @@ static int mperf_measure_stats(unsigned int cpu)
|
||||
ret = get_aperf_mperf(cpu, &aval, &mval);
|
||||
aperf_current_count[cpu] = aval;
|
||||
mperf_current_count[cpu] = mval;
|
||||
is_valid[cpu] = !ret;
|
||||
is_valid[cpu] |= !ret;
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -174,7 +174,7 @@ static int mperf_get_count_percent(unsigned int id, double *percent,
|
||||
dprint("%s: TSC Ref - mperf_diff: %llu, tsc_diff: %llu\n",
|
||||
mperf_cstates[id].name, mperf_diff, tsc_diff);
|
||||
} else if (max_freq_mode == MAX_FREQ_SYSFS) {
|
||||
timediff = max_frequency * timespec_diff_us(time_start, time_end);
|
||||
timediff = max_frequency * timespec_diff_us(time_start[cpu], time_end[cpu]);
|
||||
*percent = 100.0 * mperf_diff / timediff;
|
||||
dprint("%s: MAXFREQ - mperf_diff: %llu, time_diff: %llu\n",
|
||||
mperf_cstates[id].name, mperf_diff, timediff);
|
||||
@ -207,7 +207,7 @@ static int mperf_get_count_freq(unsigned int id, unsigned long long *count,
|
||||
if (max_freq_mode == MAX_FREQ_TSC_REF) {
|
||||
/* Calculate max_freq from TSC count */
|
||||
tsc_diff = tsc_at_measure_end[cpu] - tsc_at_measure_start[cpu];
|
||||
time_diff = timespec_diff_us(time_start, time_end);
|
||||
time_diff = timespec_diff_us(time_start[cpu], time_end[cpu]);
|
||||
max_frequency = tsc_diff / time_diff;
|
||||
}
|
||||
|
||||
@ -226,9 +226,8 @@ static int mperf_start(void)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
clock_gettime(CLOCK_REALTIME, &time_start);
|
||||
|
||||
for (cpu = 0; cpu < cpu_count; cpu++) {
|
||||
clock_gettime(CLOCK_REALTIME, &time_start[cpu]);
|
||||
mperf_get_tsc(&tsc_at_measure_start[cpu]);
|
||||
mperf_init_stats(cpu);
|
||||
}
|
||||
@ -243,9 +242,9 @@ static int mperf_stop(void)
|
||||
for (cpu = 0; cpu < cpu_count; cpu++) {
|
||||
mperf_measure_stats(cpu);
|
||||
mperf_get_tsc(&tsc_at_measure_end[cpu]);
|
||||
clock_gettime(CLOCK_REALTIME, &time_end[cpu]);
|
||||
}
|
||||
|
||||
clock_gettime(CLOCK_REALTIME, &time_end);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -349,6 +348,8 @@ struct cpuidle_monitor *mperf_register(void)
|
||||
aperf_current_count = calloc(cpu_count, sizeof(unsigned long long));
|
||||
tsc_at_measure_start = calloc(cpu_count, sizeof(unsigned long long));
|
||||
tsc_at_measure_end = calloc(cpu_count, sizeof(unsigned long long));
|
||||
time_start = calloc(cpu_count, sizeof(struct timespec));
|
||||
time_end = calloc(cpu_count, sizeof(struct timespec));
|
||||
mperf_monitor.name_len = strlen(mperf_monitor.name);
|
||||
return &mperf_monitor;
|
||||
}
|
||||
@ -361,6 +362,8 @@ void mperf_unregister(void)
|
||||
free(aperf_current_count);
|
||||
free(tsc_at_measure_start);
|
||||
free(tsc_at_measure_end);
|
||||
free(time_start);
|
||||
free(time_end);
|
||||
free(is_valid);
|
||||
}
|
||||
|
||||
|
@ -151,7 +151,7 @@ static int nhm_stop(void)
|
||||
|
||||
for (num = 0; num < NHM_CSTATE_COUNT; num++) {
|
||||
for (cpu = 0; cpu < cpu_count; cpu++) {
|
||||
is_valid[cpu] = !nhm_get_count(num, &val, cpu);
|
||||
is_valid[cpu] |= !nhm_get_count(num, &val, cpu);
|
||||
current_count[num][cpu] = val;
|
||||
}
|
||||
}
|
||||
|
@ -115,7 +115,7 @@ static int snb_start(void)
|
||||
|
||||
for (num = 0; num < SNB_CSTATE_COUNT; num++) {
|
||||
for (cpu = 0; cpu < cpu_count; cpu++) {
|
||||
snb_get_count(num, &val, cpu);
|
||||
is_valid[cpu] = !snb_get_count(num, &val, cpu);
|
||||
previous_count[num][cpu] = val;
|
||||
}
|
||||
}
|
||||
@ -132,7 +132,7 @@ static int snb_stop(void)
|
||||
|
||||
for (num = 0; num < SNB_CSTATE_COUNT; num++) {
|
||||
for (cpu = 0; cpu < cpu_count; cpu++) {
|
||||
is_valid[cpu] = !snb_get_count(num, &val, cpu);
|
||||
is_valid[cpu] |= !snb_get_count(num, &val, cpu);
|
||||
current_count[num][cpu] = val;
|
||||
}
|
||||
}
|
||||
|
2
tools/testing/selftests/cpufreq/.gitignore
vendored
Normal file
2
tools/testing/selftests/cpufreq/.gitignore
vendored
Normal file
@ -0,0 +1,2 @@
|
||||
# SPDX-License-Identifier: GPL-2.0-only
|
||||
cpufreq_selftest.*
|
@ -3,6 +3,7 @@ all:
|
||||
|
||||
TEST_PROGS := main.sh
|
||||
TEST_FILES := cpu.sh cpufreq.sh governor.sh module.sh special-tests.sh
|
||||
EXTRA_CLEAN := cpufreq_selftest.dmesg_cpufreq.txt cpufreq_selftest.dmesg_full.txt cpufreq_selftest.txt
|
||||
|
||||
include ../lib.mk
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user