mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-01 10:45:49 +00:00
Merge branch 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux-acpi-2.6
* 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux-acpi-2.6: (41 commits) ACPICA: hw: Don't carry spinlock over suspend ACPICA: hw: remove use_lock flag from acpi_hw_register_{read, write} ACPI: cpuidle: port idle timer suspend/resume workaround to cpuidle ACPI: clean up acpi_enter_sleep_state_prep Hibernation: Make sure that ACPI is enabled in acpi_hibernation_finish ACPI: suppress uninitialized var warning cpuidle: consolidate 2.6.22 cpuidle branch into one patch ACPI: thinkpad-acpi: skip blanks before the data when parsing sysfs ACPI: AC: Add sysfs interface ACPI: SBS: Add sysfs alarm ACPI: SBS: Add ACPI_PROCFS around procfs handling code. ACPI: SBS: Add support for power_supply class (and sysfs) ACPI: SBS: Make SBS reads table-driven. ACPI: SBS: Simplify data structures in SBS ACPI: SBS: Split host controller (ACPI0001) from SBS driver (ACPI0002) ACPI: EC: Add new query handler to list head. ACPI: Add acpi_bus_generate_event4() function ACPI: Battery: add sysfs alarm ACPI: Battery: Add sysfs support ACPI: Battery: Misc clean-ups, no functional changes ... Fix up conflicts in drivers/misc/thinkpad_acpi.[ch] manually
This commit is contained in:
commit
c4ec207173
@ -105,10 +105,15 @@ The version of thinkpad-acpi's sysfs interface is exported by the driver
|
||||
as a driver attribute (see below).
|
||||
|
||||
Sysfs driver attributes are on the driver's sysfs attribute space,
|
||||
for 2.6.20 this is /sys/bus/platform/drivers/thinkpad_acpi/.
|
||||
for 2.6.23 this is /sys/bus/platform/drivers/thinkpad_acpi/ and
|
||||
/sys/bus/platform/drivers/thinkpad_hwmon/
|
||||
|
||||
Sysfs device attributes are on the driver's sysfs attribute space,
|
||||
for 2.6.20 this is /sys/devices/platform/thinkpad_acpi/.
|
||||
Sysfs device attributes are on the thinkpad_acpi device sysfs attribute
|
||||
space, for 2.6.23 this is /sys/devices/platform/thinkpad_acpi/.
|
||||
|
||||
Sysfs device attributes for the sensors and fan are on the
|
||||
thinkpad_hwmon device's sysfs attribute space, but you should locate it
|
||||
looking for a hwmon device with the name attribute of "thinkpad".
|
||||
|
||||
Driver version
|
||||
--------------
|
||||
@ -766,7 +771,7 @@ Temperature sensors
|
||||
-------------------
|
||||
|
||||
procfs: /proc/acpi/ibm/thermal
|
||||
sysfs device attributes: (hwmon) temp*_input
|
||||
sysfs device attributes: (hwmon "thinkpad") temp*_input
|
||||
|
||||
Most ThinkPads include six or more separate temperature sensors but only
|
||||
expose the CPU temperature through the standard ACPI methods. This
|
||||
@ -989,7 +994,9 @@ Fan control and monitoring: fan speed, fan enable/disable
|
||||
---------------------------------------------------------
|
||||
|
||||
procfs: /proc/acpi/ibm/fan
|
||||
sysfs device attributes: (hwmon) fan_input, pwm1, pwm1_enable
|
||||
sysfs device attributes: (hwmon "thinkpad") fan1_input, pwm1,
|
||||
pwm1_enable
|
||||
sysfs hwmon driver attributes: fan_watchdog
|
||||
|
||||
NOTE NOTE NOTE: fan control operations are disabled by default for
|
||||
safety reasons. To enable them, the module parameter "fan_control=1"
|
||||
@ -1131,7 +1138,7 @@ hwmon device attribute fan1_input:
|
||||
which can take up to two minutes. May return rubbish on older
|
||||
ThinkPads.
|
||||
|
||||
driver attribute fan_watchdog:
|
||||
hwmon driver attribute fan_watchdog:
|
||||
Fan safety watchdog timer interval, in seconds. Minimum is
|
||||
1 second, maximum is 120 seconds. 0 disables the watchdog.
|
||||
|
||||
@ -1233,3 +1240,9 @@ Sysfs interface changelog:
|
||||
layer, the radio switch generates input event EV_RADIO,
|
||||
and the driver enables hot key handling by default in
|
||||
the firmware.
|
||||
|
||||
0x020000: ABI fix: added a separate hwmon platform device and
|
||||
driver, which must be located by name (thinkpad)
|
||||
and the hwmon class for libsensors4 (lm-sensors 3)
|
||||
compatibility. Moved all hwmon attributes to this
|
||||
new platform device.
|
||||
|
@ -1082,6 +1082,8 @@ endif # APM
|
||||
|
||||
source "arch/x86/kernel/cpu/cpufreq/Kconfig"
|
||||
|
||||
source "drivers/cpuidle/Kconfig"
|
||||
|
||||
endmenu
|
||||
|
||||
menu "Bus options (PCI, PCMCIA, EISA, MCA, ISA)"
|
||||
|
@ -725,6 +725,8 @@ source "drivers/acpi/Kconfig"
|
||||
|
||||
source "arch/x86/kernel/cpufreq/Kconfig"
|
||||
|
||||
source "drivers/cpuidle/Kconfig"
|
||||
|
||||
endmenu
|
||||
|
||||
menu "Bus options (PCI etc.)"
|
||||
|
@ -76,6 +76,7 @@ obj-$(CONFIG_MCA) += mca/
|
||||
obj-$(CONFIG_EISA) += eisa/
|
||||
obj-$(CONFIG_LGUEST_GUEST) += lguest/
|
||||
obj-$(CONFIG_CPU_FREQ) += cpufreq/
|
||||
obj-$(CONFIG_CPU_IDLE) += cpuidle/
|
||||
obj-$(CONFIG_MMC) += mmc/
|
||||
obj-$(CONFIG_NEW_LEDS) += leds/
|
||||
obj-$(CONFIG_INFINIBAND) += infiniband/
|
||||
|
@ -88,7 +88,7 @@ config ACPI_PROC_EVENT
|
||||
|
||||
config ACPI_AC
|
||||
tristate "AC Adapter"
|
||||
depends on X86
|
||||
depends on X86 && POWER_SUPPLY
|
||||
default y
|
||||
help
|
||||
This driver adds support for the AC Adapter object, which indicates
|
||||
@ -97,7 +97,7 @@ config ACPI_AC
|
||||
|
||||
config ACPI_BATTERY
|
||||
tristate "Battery"
|
||||
depends on X86
|
||||
depends on X86 && POWER_SUPPLY
|
||||
default y
|
||||
help
|
||||
This driver adds support for battery information through
|
||||
@ -117,6 +117,7 @@ config ACPI_BUTTON
|
||||
config ACPI_VIDEO
|
||||
tristate "Video"
|
||||
depends on X86 && BACKLIGHT_CLASS_DEVICE && VIDEO_OUTPUT_CONTROL
|
||||
depends on INPUT
|
||||
help
|
||||
This driver implement the ACPI Extensions For Display Adapters
|
||||
for integrated graphics devices on motherboard, as specified in
|
||||
@ -349,12 +350,11 @@ config ACPI_HOTPLUG_MEMORY
|
||||
$>modprobe acpi_memhotplug
|
||||
|
||||
config ACPI_SBS
|
||||
tristate "Smart Battery System (EXPERIMENTAL)"
|
||||
tristate "Smart Battery System"
|
||||
depends on X86
|
||||
depends on EXPERIMENTAL
|
||||
depends on POWER_SUPPLY
|
||||
help
|
||||
This driver adds support for the Smart Battery System.
|
||||
A "Smart Battery" is quite old and quite rare compared
|
||||
to today's ACPI "Control Method" battery.
|
||||
This driver adds support for the Smart Battery System, another
|
||||
type of access to battery information, found on some laptops.
|
||||
|
||||
endif # ACPI
|
||||
|
@ -60,3 +60,4 @@ obj-$(CONFIG_ACPI_TOSHIBA) += toshiba_acpi.o
|
||||
obj-$(CONFIG_ACPI_HOTPLUG_MEMORY) += acpi_memhotplug.o
|
||||
obj-y += cm_sbs.o
|
||||
obj-$(CONFIG_ACPI_SBS) += sbs.o
|
||||
obj-$(CONFIG_ACPI_SBS) += sbshc.o
|
||||
|
@ -29,6 +29,7 @@
|
||||
#include <linux/types.h>
|
||||
#include <linux/proc_fs.h>
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/power_supply.h>
|
||||
#include <acpi/acpi_bus.h>
|
||||
#include <acpi/acpi_drivers.h>
|
||||
|
||||
@ -72,16 +73,37 @@ static struct acpi_driver acpi_ac_driver = {
|
||||
};
|
||||
|
||||
struct acpi_ac {
|
||||
struct power_supply charger;
|
||||
struct acpi_device * device;
|
||||
unsigned long state;
|
||||
};
|
||||
|
||||
#define to_acpi_ac(x) container_of(x, struct acpi_ac, charger);
|
||||
|
||||
static const struct file_operations acpi_ac_fops = {
|
||||
.open = acpi_ac_open_fs,
|
||||
.read = seq_read,
|
||||
.llseek = seq_lseek,
|
||||
.release = single_release,
|
||||
};
|
||||
static int get_ac_property(struct power_supply *psy,
|
||||
enum power_supply_property psp,
|
||||
union power_supply_propval *val)
|
||||
{
|
||||
struct acpi_ac *ac = to_acpi_ac(psy);
|
||||
switch (psp) {
|
||||
case POWER_SUPPLY_PROP_ONLINE:
|
||||
val->intval = ac->state;
|
||||
break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static enum power_supply_property ac_props[] = {
|
||||
POWER_SUPPLY_PROP_ONLINE,
|
||||
};
|
||||
|
||||
/* --------------------------------------------------------------------------
|
||||
AC Adapter Management
|
||||
@ -208,6 +230,7 @@ static void acpi_ac_notify(acpi_handle handle, u32 event, void *data)
|
||||
acpi_bus_generate_netlink_event(device->pnp.device_class,
|
||||
device->dev.bus_id, event,
|
||||
(u32) ac->state);
|
||||
kobject_uevent(&ac->charger.dev->kobj, KOBJ_CHANGE);
|
||||
break;
|
||||
default:
|
||||
ACPI_DEBUG_PRINT((ACPI_DB_INFO,
|
||||
@ -244,7 +267,12 @@ static int acpi_ac_add(struct acpi_device *device)
|
||||
result = acpi_ac_add_fs(device);
|
||||
if (result)
|
||||
goto end;
|
||||
|
||||
ac->charger.name = acpi_device_bid(device);
|
||||
ac->charger.type = POWER_SUPPLY_TYPE_MAINS;
|
||||
ac->charger.properties = ac_props;
|
||||
ac->charger.num_properties = ARRAY_SIZE(ac_props);
|
||||
ac->charger.get_property = get_ac_property;
|
||||
power_supply_register(&ac->device->dev, &ac->charger);
|
||||
status = acpi_install_notify_handler(device->handle,
|
||||
ACPI_ALL_NOTIFY, acpi_ac_notify,
|
||||
ac);
|
||||
@ -279,7 +307,8 @@ static int acpi_ac_remove(struct acpi_device *device, int type)
|
||||
|
||||
status = acpi_remove_notify_handler(device->handle,
|
||||
ACPI_ALL_NOTIFY, acpi_ac_notify);
|
||||
|
||||
if (ac->charger.dev)
|
||||
power_supply_unregister(&ac->charger);
|
||||
acpi_ac_remove_fs(device);
|
||||
|
||||
kfree(ac);
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -286,15 +286,11 @@ DECLARE_WAIT_QUEUE_HEAD(acpi_bus_event_queue);
|
||||
|
||||
extern int event_is_open;
|
||||
|
||||
int acpi_bus_generate_proc_event(struct acpi_device *device, u8 type, int data)
|
||||
int acpi_bus_generate_proc_event4(const char *device_class, const char *bus_id, u8 type, int data)
|
||||
{
|
||||
struct acpi_bus_event *event = NULL;
|
||||
struct acpi_bus_event *event;
|
||||
unsigned long flags = 0;
|
||||
|
||||
|
||||
if (!device)
|
||||
return -EINVAL;
|
||||
|
||||
/* drop event on the floor if no one's listening */
|
||||
if (!event_is_open)
|
||||
return 0;
|
||||
@ -303,8 +299,8 @@ int acpi_bus_generate_proc_event(struct acpi_device *device, u8 type, int data)
|
||||
if (!event)
|
||||
return -ENOMEM;
|
||||
|
||||
strcpy(event->device_class, device->pnp.device_class);
|
||||
strcpy(event->bus_id, device->pnp.bus_id);
|
||||
strcpy(event->device_class, device_class);
|
||||
strcpy(event->bus_id, bus_id);
|
||||
event->type = type;
|
||||
event->data = data;
|
||||
|
||||
@ -315,6 +311,17 @@ int acpi_bus_generate_proc_event(struct acpi_device *device, u8 type, int data)
|
||||
wake_up_interruptible(&acpi_bus_event_queue);
|
||||
|
||||
return 0;
|
||||
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL_GPL(acpi_bus_generate_proc_event4);
|
||||
|
||||
int acpi_bus_generate_proc_event(struct acpi_device *device, u8 type, int data)
|
||||
{
|
||||
if (!device)
|
||||
return -EINVAL;
|
||||
return acpi_bus_generate_proc_event4(device->pnp.device_class,
|
||||
device->pnp.bus_id, type, data);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL(acpi_bus_generate_proc_event);
|
||||
|
@ -121,6 +121,7 @@ static struct acpi_ec {
|
||||
atomic_t event_count;
|
||||
wait_queue_head_t wait;
|
||||
struct list_head list;
|
||||
u8 handlers_installed;
|
||||
} *boot_ec, *first_ec;
|
||||
|
||||
/* --------------------------------------------------------------------------
|
||||
@ -425,7 +426,7 @@ int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit,
|
||||
handler->func = func;
|
||||
handler->data = data;
|
||||
mutex_lock(&ec->lock);
|
||||
list_add_tail(&handler->node, &ec->list);
|
||||
list_add(&handler->node, &ec->list);
|
||||
mutex_unlock(&ec->lock);
|
||||
return 0;
|
||||
}
|
||||
@ -440,7 +441,6 @@ void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit)
|
||||
if (query_bit == handler->query_bit) {
|
||||
list_del(&handler->node);
|
||||
kfree(handler);
|
||||
break;
|
||||
}
|
||||
}
|
||||
mutex_unlock(&ec->lock);
|
||||
@ -680,32 +680,50 @@ ec_parse_device(acpi_handle handle, u32 Level, void *context, void **retval)
|
||||
status = acpi_evaluate_integer(handle, "_GPE", NULL, &ec->gpe);
|
||||
if (ACPI_FAILURE(status))
|
||||
return status;
|
||||
|
||||
/* Find and register all query methods */
|
||||
acpi_walk_namespace(ACPI_TYPE_METHOD, handle, 1,
|
||||
acpi_ec_register_query_methods, ec, NULL);
|
||||
|
||||
/* Use the global lock for all EC transactions? */
|
||||
acpi_evaluate_integer(handle, "_GLK", NULL, &ec->global_lock);
|
||||
|
||||
ec->handle = handle;
|
||||
|
||||
printk(KERN_INFO PREFIX "GPE = 0x%lx, I/O: command/status = 0x%lx, data = 0x%lx\n",
|
||||
ec->gpe, ec->command_addr, ec->data_addr);
|
||||
|
||||
return AE_CTRL_TERMINATE;
|
||||
}
|
||||
|
||||
static void ec_remove_handlers(struct acpi_ec *ec)
|
||||
{
|
||||
if (ACPI_FAILURE(acpi_remove_address_space_handler(ec->handle,
|
||||
ACPI_ADR_SPACE_EC, &acpi_ec_space_handler)))
|
||||
printk(KERN_ERR PREFIX "failed to remove space handler\n");
|
||||
if (ACPI_FAILURE(acpi_remove_gpe_handler(NULL, ec->gpe,
|
||||
&acpi_ec_gpe_handler)))
|
||||
printk(KERN_ERR PREFIX "failed to remove gpe handler\n");
|
||||
ec->handlers_installed = 0;
|
||||
}
|
||||
|
||||
static int acpi_ec_add(struct acpi_device *device)
|
||||
{
|
||||
struct acpi_ec *ec = NULL;
|
||||
|
||||
if (!device)
|
||||
return -EINVAL;
|
||||
|
||||
strcpy(acpi_device_name(device), ACPI_EC_DEVICE_NAME);
|
||||
strcpy(acpi_device_class(device), ACPI_EC_CLASS);
|
||||
|
||||
/* Check for boot EC */
|
||||
if (boot_ec) {
|
||||
if (boot_ec->handle == device->handle) {
|
||||
/* Pre-loaded EC from DSDT, just move pointer */
|
||||
ec = boot_ec;
|
||||
boot_ec = NULL;
|
||||
goto end;
|
||||
} else if (boot_ec->handle == ACPI_ROOT_OBJECT) {
|
||||
/* ECDT-based EC, time to shut it down */
|
||||
ec_remove_handlers(boot_ec);
|
||||
kfree(boot_ec);
|
||||
first_ec = boot_ec = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
ec = make_acpi_ec();
|
||||
if (!ec)
|
||||
return -ENOMEM;
|
||||
@ -715,25 +733,14 @@ static int acpi_ec_add(struct acpi_device *device)
|
||||
kfree(ec);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Check if we found the boot EC */
|
||||
if (boot_ec) {
|
||||
if (boot_ec->gpe == ec->gpe) {
|
||||
/* We might have incorrect info for GL at boot time */
|
||||
mutex_lock(&boot_ec->lock);
|
||||
boot_ec->global_lock = ec->global_lock;
|
||||
/* Copy handlers from new ec into boot ec */
|
||||
list_splice(&ec->list, &boot_ec->list);
|
||||
mutex_unlock(&boot_ec->lock);
|
||||
kfree(ec);
|
||||
ec = boot_ec;
|
||||
}
|
||||
} else
|
||||
first_ec = ec;
|
||||
ec->handle = device->handle;
|
||||
end:
|
||||
if (!first_ec)
|
||||
first_ec = ec;
|
||||
acpi_driver_data(device) = ec;
|
||||
|
||||
acpi_ec_add_fs(device);
|
||||
printk(KERN_INFO PREFIX "GPE = 0x%lx, I/O: command/status = 0x%lx, data = 0x%lx\n",
|
||||
ec->gpe, ec->command_addr, ec->data_addr);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -756,10 +763,7 @@ static int acpi_ec_remove(struct acpi_device *device, int type)
|
||||
acpi_driver_data(device) = NULL;
|
||||
if (ec == first_ec)
|
||||
first_ec = NULL;
|
||||
|
||||
/* Don't touch boot EC */
|
||||
if (boot_ec != ec)
|
||||
kfree(ec);
|
||||
kfree(ec);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -789,6 +793,8 @@ ec_parse_io_ports(struct acpi_resource *resource, void *context)
|
||||
static int ec_install_handlers(struct acpi_ec *ec)
|
||||
{
|
||||
acpi_status status;
|
||||
if (ec->handlers_installed)
|
||||
return 0;
|
||||
status = acpi_install_gpe_handler(NULL, ec->gpe,
|
||||
ACPI_GPE_EDGE_TRIGGERED,
|
||||
&acpi_ec_gpe_handler, ec);
|
||||
@ -807,6 +813,7 @@ static int ec_install_handlers(struct acpi_ec *ec)
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
ec->handlers_installed = 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -823,41 +830,22 @@ static int acpi_ec_start(struct acpi_device *device)
|
||||
if (!ec)
|
||||
return -EINVAL;
|
||||
|
||||
/* Boot EC is already working */
|
||||
if (ec != boot_ec)
|
||||
ret = ec_install_handlers(ec);
|
||||
ret = ec_install_handlers(ec);
|
||||
|
||||
/* EC is fully operational, allow queries */
|
||||
atomic_set(&ec->query_pending, 0);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int acpi_ec_stop(struct acpi_device *device, int type)
|
||||
{
|
||||
acpi_status status;
|
||||
struct acpi_ec *ec;
|
||||
|
||||
if (!device)
|
||||
return -EINVAL;
|
||||
|
||||
ec = acpi_driver_data(device);
|
||||
if (!ec)
|
||||
return -EINVAL;
|
||||
|
||||
/* Don't touch boot EC */
|
||||
if (ec == boot_ec)
|
||||
return 0;
|
||||
|
||||
status = acpi_remove_address_space_handler(ec->handle,
|
||||
ACPI_ADR_SPACE_EC,
|
||||
&acpi_ec_space_handler);
|
||||
if (ACPI_FAILURE(status))
|
||||
return -ENODEV;
|
||||
|
||||
status = acpi_remove_gpe_handler(NULL, ec->gpe, &acpi_ec_gpe_handler);
|
||||
if (ACPI_FAILURE(status))
|
||||
return -ENODEV;
|
||||
ec_remove_handlers(ec);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -877,7 +865,7 @@ int __init acpi_ec_ecdt_probe(void)
|
||||
status = acpi_get_table(ACPI_SIG_ECDT, 1,
|
||||
(struct acpi_table_header **)&ecdt_ptr);
|
||||
if (ACPI_SUCCESS(status)) {
|
||||
printk(KERN_INFO PREFIX "EC description table is found, configuring boot EC\n\n");
|
||||
printk(KERN_INFO PREFIX "EC description table is found, configuring boot EC\n");
|
||||
boot_ec->command_addr = ecdt_ptr->control.address;
|
||||
boot_ec->data_addr = ecdt_ptr->data.address;
|
||||
boot_ec->gpe = ecdt_ptr->gpe;
|
||||
@ -899,7 +887,6 @@ int __init acpi_ec_ecdt_probe(void)
|
||||
error:
|
||||
kfree(boot_ec);
|
||||
boot_ec = NULL;
|
||||
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
|
@ -239,10 +239,8 @@ u32 acpi_ev_fixed_event_detect(void)
|
||||
* Read the fixed feature status and enable registers, as all the cases
|
||||
* depend on their values. Ignore errors here.
|
||||
*/
|
||||
(void)acpi_hw_register_read(ACPI_MTX_DO_NOT_LOCK,
|
||||
ACPI_REGISTER_PM1_STATUS, &fixed_status);
|
||||
(void)acpi_hw_register_read(ACPI_MTX_DO_NOT_LOCK,
|
||||
ACPI_REGISTER_PM1_ENABLE, &fixed_enable);
|
||||
(void)acpi_hw_register_read(ACPI_REGISTER_PM1_STATUS, &fixed_status);
|
||||
(void)acpi_hw_register_read(ACPI_REGISTER_PM1_ENABLE, &fixed_enable);
|
||||
|
||||
ACPI_DEBUG_PRINT((ACPI_DB_INTERRUPTS,
|
||||
"Fixed Event Block: Enable %08X Status %08X\n",
|
||||
|
@ -75,8 +75,7 @@ acpi_status acpi_hw_clear_acpi_status(void)
|
||||
|
||||
lock_flags = acpi_os_acquire_lock(acpi_gbl_hardware_lock);
|
||||
|
||||
status = acpi_hw_register_write(ACPI_MTX_DO_NOT_LOCK,
|
||||
ACPI_REGISTER_PM1_STATUS,
|
||||
status = acpi_hw_register_write(ACPI_REGISTER_PM1_STATUS,
|
||||
ACPI_BITMASK_ALL_FIXED_STATUS);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
goto unlock_and_exit;
|
||||
@ -259,7 +258,7 @@ struct acpi_bit_register_info *acpi_hw_get_bit_register_info(u32 register_id)
|
||||
*
|
||||
******************************************************************************/
|
||||
|
||||
acpi_status acpi_get_register(u32 register_id, u32 * return_value)
|
||||
acpi_status acpi_get_register_unlocked(u32 register_id, u32 * return_value)
|
||||
{
|
||||
u32 register_value = 0;
|
||||
struct acpi_bit_register_info *bit_reg_info;
|
||||
@ -276,8 +275,7 @@ acpi_status acpi_get_register(u32 register_id, u32 * return_value)
|
||||
|
||||
/* Read from the register */
|
||||
|
||||
status = acpi_hw_register_read(ACPI_MTX_LOCK,
|
||||
bit_reg_info->parent_register,
|
||||
status = acpi_hw_register_read(bit_reg_info->parent_register,
|
||||
®ister_value);
|
||||
|
||||
if (ACPI_SUCCESS(status)) {
|
||||
@ -298,6 +296,16 @@ acpi_status acpi_get_register(u32 register_id, u32 * return_value)
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
acpi_status acpi_get_register(u32 register_id, u32 * return_value)
|
||||
{
|
||||
acpi_status status;
|
||||
acpi_cpu_flags flags;
|
||||
flags = acpi_os_acquire_lock(acpi_gbl_hardware_lock);
|
||||
status = acpi_get_register_unlocked(register_id, return_value);
|
||||
acpi_os_release_lock(acpi_gbl_hardware_lock, flags);
|
||||
return status;
|
||||
}
|
||||
|
||||
ACPI_EXPORT_SYMBOL(acpi_get_register)
|
||||
|
||||
/*******************************************************************************
|
||||
@ -335,8 +343,7 @@ acpi_status acpi_set_register(u32 register_id, u32 value)
|
||||
|
||||
/* Always do a register read first so we can insert the new bits */
|
||||
|
||||
status = acpi_hw_register_read(ACPI_MTX_DO_NOT_LOCK,
|
||||
bit_reg_info->parent_register,
|
||||
status = acpi_hw_register_read(bit_reg_info->parent_register,
|
||||
®ister_value);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
goto unlock_and_exit;
|
||||
@ -363,8 +370,7 @@ acpi_status acpi_set_register(u32 register_id, u32 value)
|
||||
bit_reg_info->
|
||||
access_bit_mask);
|
||||
if (value) {
|
||||
status = acpi_hw_register_write(ACPI_MTX_DO_NOT_LOCK,
|
||||
ACPI_REGISTER_PM1_STATUS,
|
||||
status = acpi_hw_register_write(ACPI_REGISTER_PM1_STATUS,
|
||||
(u16) value);
|
||||
register_value = 0;
|
||||
}
|
||||
@ -377,8 +383,7 @@ acpi_status acpi_set_register(u32 register_id, u32 value)
|
||||
bit_reg_info->access_bit_mask,
|
||||
value);
|
||||
|
||||
status = acpi_hw_register_write(ACPI_MTX_DO_NOT_LOCK,
|
||||
ACPI_REGISTER_PM1_ENABLE,
|
||||
status = acpi_hw_register_write(ACPI_REGISTER_PM1_ENABLE,
|
||||
(u16) register_value);
|
||||
break;
|
||||
|
||||
@ -397,15 +402,13 @@ acpi_status acpi_set_register(u32 register_id, u32 value)
|
||||
bit_reg_info->access_bit_mask,
|
||||
value);
|
||||
|
||||
status = acpi_hw_register_write(ACPI_MTX_DO_NOT_LOCK,
|
||||
ACPI_REGISTER_PM1_CONTROL,
|
||||
status = acpi_hw_register_write(ACPI_REGISTER_PM1_CONTROL,
|
||||
(u16) register_value);
|
||||
break;
|
||||
|
||||
case ACPI_REGISTER_PM2_CONTROL:
|
||||
|
||||
status = acpi_hw_register_read(ACPI_MTX_DO_NOT_LOCK,
|
||||
ACPI_REGISTER_PM2_CONTROL,
|
||||
status = acpi_hw_register_read(ACPI_REGISTER_PM2_CONTROL,
|
||||
®ister_value);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
goto unlock_and_exit;
|
||||
@ -430,8 +433,7 @@ acpi_status acpi_set_register(u32 register_id, u32 value)
|
||||
xpm2_control_block.
|
||||
address)));
|
||||
|
||||
status = acpi_hw_register_write(ACPI_MTX_DO_NOT_LOCK,
|
||||
ACPI_REGISTER_PM2_CONTROL,
|
||||
status = acpi_hw_register_write(ACPI_REGISTER_PM2_CONTROL,
|
||||
(u8) (register_value));
|
||||
break;
|
||||
|
||||
@ -461,8 +463,7 @@ ACPI_EXPORT_SYMBOL(acpi_set_register)
|
||||
*
|
||||
* FUNCTION: acpi_hw_register_read
|
||||
*
|
||||
* PARAMETERS: use_lock - Lock hardware? True/False
|
||||
* register_id - ACPI Register ID
|
||||
* PARAMETERS: register_id - ACPI Register ID
|
||||
* return_value - Where the register value is returned
|
||||
*
|
||||
* RETURN: Status and the value read.
|
||||
@ -471,19 +472,14 @@ ACPI_EXPORT_SYMBOL(acpi_set_register)
|
||||
*
|
||||
******************************************************************************/
|
||||
acpi_status
|
||||
acpi_hw_register_read(u8 use_lock, u32 register_id, u32 * return_value)
|
||||
acpi_hw_register_read(u32 register_id, u32 * return_value)
|
||||
{
|
||||
u32 value1 = 0;
|
||||
u32 value2 = 0;
|
||||
acpi_status status;
|
||||
acpi_cpu_flags lock_flags = 0;
|
||||
|
||||
ACPI_FUNCTION_TRACE(hw_register_read);
|
||||
|
||||
if (ACPI_MTX_LOCK == use_lock) {
|
||||
lock_flags = acpi_os_acquire_lock(acpi_gbl_hardware_lock);
|
||||
}
|
||||
|
||||
switch (register_id) {
|
||||
case ACPI_REGISTER_PM1_STATUS: /* 16-bit access */
|
||||
|
||||
@ -491,7 +487,7 @@ acpi_hw_register_read(u8 use_lock, u32 register_id, u32 * return_value)
|
||||
acpi_hw_low_level_read(16, &value1,
|
||||
&acpi_gbl_FADT.xpm1a_event_block);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
goto unlock_and_exit;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
/* PM1B is optional */
|
||||
@ -507,7 +503,7 @@ acpi_hw_register_read(u8 use_lock, u32 register_id, u32 * return_value)
|
||||
status =
|
||||
acpi_hw_low_level_read(16, &value1, &acpi_gbl_xpm1a_enable);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
goto unlock_and_exit;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
/* PM1B is optional */
|
||||
@ -523,7 +519,7 @@ acpi_hw_register_read(u8 use_lock, u32 register_id, u32 * return_value)
|
||||
acpi_hw_low_level_read(16, &value1,
|
||||
&acpi_gbl_FADT.xpm1a_control_block);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
goto unlock_and_exit;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
status =
|
||||
@ -558,10 +554,7 @@ acpi_hw_register_read(u8 use_lock, u32 register_id, u32 * return_value)
|
||||
break;
|
||||
}
|
||||
|
||||
unlock_and_exit:
|
||||
if (ACPI_MTX_LOCK == use_lock) {
|
||||
acpi_os_release_lock(acpi_gbl_hardware_lock, lock_flags);
|
||||
}
|
||||
exit:
|
||||
|
||||
if (ACPI_SUCCESS(status)) {
|
||||
*return_value = value1;
|
||||
@ -574,8 +567,7 @@ acpi_hw_register_read(u8 use_lock, u32 register_id, u32 * return_value)
|
||||
*
|
||||
* FUNCTION: acpi_hw_register_write
|
||||
*
|
||||
* PARAMETERS: use_lock - Lock hardware? True/False
|
||||
* register_id - ACPI Register ID
|
||||
* PARAMETERS: register_id - ACPI Register ID
|
||||
* Value - The value to write
|
||||
*
|
||||
* RETURN: Status
|
||||
@ -597,28 +589,22 @@ acpi_hw_register_read(u8 use_lock, u32 register_id, u32 * return_value)
|
||||
*
|
||||
******************************************************************************/
|
||||
|
||||
acpi_status acpi_hw_register_write(u8 use_lock, u32 register_id, u32 value)
|
||||
acpi_status acpi_hw_register_write(u32 register_id, u32 value)
|
||||
{
|
||||
acpi_status status;
|
||||
acpi_cpu_flags lock_flags = 0;
|
||||
u32 read_value;
|
||||
|
||||
ACPI_FUNCTION_TRACE(hw_register_write);
|
||||
|
||||
if (ACPI_MTX_LOCK == use_lock) {
|
||||
lock_flags = acpi_os_acquire_lock(acpi_gbl_hardware_lock);
|
||||
}
|
||||
|
||||
switch (register_id) {
|
||||
case ACPI_REGISTER_PM1_STATUS: /* 16-bit access */
|
||||
|
||||
/* Perform a read first to preserve certain bits (per ACPI spec) */
|
||||
|
||||
status = acpi_hw_register_read(ACPI_MTX_DO_NOT_LOCK,
|
||||
ACPI_REGISTER_PM1_STATUS,
|
||||
status = acpi_hw_register_read(ACPI_REGISTER_PM1_STATUS,
|
||||
&read_value);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
goto unlock_and_exit;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
/* Insert the bits to be preserved */
|
||||
@ -632,7 +618,7 @@ acpi_status acpi_hw_register_write(u8 use_lock, u32 register_id, u32 value)
|
||||
acpi_hw_low_level_write(16, value,
|
||||
&acpi_gbl_FADT.xpm1a_event_block);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
goto unlock_and_exit;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
/* PM1B is optional */
|
||||
@ -647,7 +633,7 @@ acpi_status acpi_hw_register_write(u8 use_lock, u32 register_id, u32 value)
|
||||
status =
|
||||
acpi_hw_low_level_write(16, value, &acpi_gbl_xpm1a_enable);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
goto unlock_and_exit;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
/* PM1B is optional */
|
||||
@ -661,11 +647,10 @@ acpi_status acpi_hw_register_write(u8 use_lock, u32 register_id, u32 value)
|
||||
/*
|
||||
* Perform a read first to preserve certain bits (per ACPI spec)
|
||||
*/
|
||||
status = acpi_hw_register_read(ACPI_MTX_DO_NOT_LOCK,
|
||||
ACPI_REGISTER_PM1_CONTROL,
|
||||
status = acpi_hw_register_read(ACPI_REGISTER_PM1_CONTROL,
|
||||
&read_value);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
goto unlock_and_exit;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
/* Insert the bits to be preserved */
|
||||
@ -679,7 +664,7 @@ acpi_status acpi_hw_register_write(u8 use_lock, u32 register_id, u32 value)
|
||||
acpi_hw_low_level_write(16, value,
|
||||
&acpi_gbl_FADT.xpm1a_control_block);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
goto unlock_and_exit;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
status =
|
||||
@ -728,11 +713,7 @@ acpi_status acpi_hw_register_write(u8 use_lock, u32 register_id, u32 value)
|
||||
break;
|
||||
}
|
||||
|
||||
unlock_and_exit:
|
||||
if (ACPI_MTX_LOCK == use_lock) {
|
||||
acpi_os_release_lock(acpi_gbl_hardware_lock, lock_flags);
|
||||
}
|
||||
|
||||
exit:
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
|
@ -234,15 +234,11 @@ acpi_status acpi_enter_sleep_state_prep(u8 sleep_state)
|
||||
"While executing method _SST"));
|
||||
}
|
||||
|
||||
/*
|
||||
* 1) Disable/Clear all GPEs
|
||||
*/
|
||||
status = acpi_hw_disable_all_gpes();
|
||||
if (ACPI_FAILURE(status)) {
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
/* Disable/Clear all GPEs */
|
||||
|
||||
return_ACPI_STATUS(AE_OK);
|
||||
status = acpi_hw_disable_all_gpes();
|
||||
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
ACPI_EXPORT_SYMBOL(acpi_enter_sleep_state_prep)
|
||||
@ -313,8 +309,7 @@ acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state)
|
||||
|
||||
/* Get current value of PM1A control */
|
||||
|
||||
status = acpi_hw_register_read(ACPI_MTX_DO_NOT_LOCK,
|
||||
ACPI_REGISTER_PM1_CONTROL, &PM1Acontrol);
|
||||
status = acpi_hw_register_read(ACPI_REGISTER_PM1_CONTROL, &PM1Acontrol);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
@ -341,15 +336,13 @@ acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state)
|
||||
|
||||
/* Write #1: fill in SLP_TYP data */
|
||||
|
||||
status = acpi_hw_register_write(ACPI_MTX_DO_NOT_LOCK,
|
||||
ACPI_REGISTER_PM1A_CONTROL,
|
||||
status = acpi_hw_register_write(ACPI_REGISTER_PM1A_CONTROL,
|
||||
PM1Acontrol);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
status = acpi_hw_register_write(ACPI_MTX_DO_NOT_LOCK,
|
||||
ACPI_REGISTER_PM1B_CONTROL,
|
||||
status = acpi_hw_register_write(ACPI_REGISTER_PM1B_CONTROL,
|
||||
PM1Bcontrol);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
return_ACPI_STATUS(status);
|
||||
@ -364,15 +357,13 @@ acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state)
|
||||
|
||||
ACPI_FLUSH_CPU_CACHE();
|
||||
|
||||
status = acpi_hw_register_write(ACPI_MTX_DO_NOT_LOCK,
|
||||
ACPI_REGISTER_PM1A_CONTROL,
|
||||
status = acpi_hw_register_write(ACPI_REGISTER_PM1A_CONTROL,
|
||||
PM1Acontrol);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
status = acpi_hw_register_write(ACPI_MTX_DO_NOT_LOCK,
|
||||
ACPI_REGISTER_PM1B_CONTROL,
|
||||
status = acpi_hw_register_write(ACPI_REGISTER_PM1B_CONTROL,
|
||||
PM1Bcontrol);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
return_ACPI_STATUS(status);
|
||||
@ -392,8 +383,7 @@ acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state)
|
||||
*/
|
||||
acpi_os_stall(10000000);
|
||||
|
||||
status = acpi_hw_register_write(ACPI_MTX_DO_NOT_LOCK,
|
||||
ACPI_REGISTER_PM1_CONTROL,
|
||||
status = acpi_hw_register_write(ACPI_REGISTER_PM1_CONTROL,
|
||||
sleep_enable_reg_info->
|
||||
access_bit_mask);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
@ -404,7 +394,8 @@ acpi_status asmlinkage acpi_enter_sleep_state(u8 sleep_state)
|
||||
/* Wait until we enter sleep state */
|
||||
|
||||
do {
|
||||
status = acpi_get_register(ACPI_BITREG_WAKE_STATUS, &in_value);
|
||||
status = acpi_get_register_unlocked(ACPI_BITREG_WAKE_STATUS,
|
||||
&in_value);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
@ -520,8 +511,7 @@ acpi_status acpi_leave_sleep_state(u8 sleep_state)
|
||||
|
||||
/* Get current value of PM1A control */
|
||||
|
||||
status = acpi_hw_register_read(ACPI_MTX_DO_NOT_LOCK,
|
||||
ACPI_REGISTER_PM1_CONTROL,
|
||||
status = acpi_hw_register_read(ACPI_REGISTER_PM1_CONTROL,
|
||||
&PM1Acontrol);
|
||||
if (ACPI_SUCCESS(status)) {
|
||||
|
||||
@ -543,11 +533,9 @@ acpi_status acpi_leave_sleep_state(u8 sleep_state)
|
||||
|
||||
/* Just ignore any errors */
|
||||
|
||||
(void)acpi_hw_register_write(ACPI_MTX_DO_NOT_LOCK,
|
||||
ACPI_REGISTER_PM1A_CONTROL,
|
||||
(void)acpi_hw_register_write(ACPI_REGISTER_PM1A_CONTROL,
|
||||
PM1Acontrol);
|
||||
(void)acpi_hw_register_write(ACPI_MTX_DO_NOT_LOCK,
|
||||
ACPI_REGISTER_PM1B_CONTROL,
|
||||
(void)acpi_hw_register_write(ACPI_REGISTER_PM1B_CONTROL,
|
||||
PM1Bcontrol);
|
||||
}
|
||||
}
|
||||
|
@ -1042,14 +1042,6 @@ static int __init acpi_wake_gpes_always_on_setup(char *str)
|
||||
|
||||
__setup("acpi_wake_gpes_always_on", acpi_wake_gpes_always_on_setup);
|
||||
|
||||
/*
|
||||
* max_cstate is defined in the base kernel so modules can
|
||||
* change it w/o depending on the state of the processor module.
|
||||
*/
|
||||
unsigned int max_cstate = ACPI_PROCESSOR_MAX_POWER;
|
||||
|
||||
EXPORT_SYMBOL(max_cstate);
|
||||
|
||||
/*
|
||||
* Acquire a spinlock.
|
||||
*
|
||||
|
@ -44,6 +44,7 @@
|
||||
#include <linux/seq_file.h>
|
||||
#include <linux/dmi.h>
|
||||
#include <linux/moduleparam.h>
|
||||
#include <linux/cpuidle.h>
|
||||
|
||||
#include <asm/io.h>
|
||||
#include <asm/system.h>
|
||||
@ -1049,11 +1050,13 @@ static int __init acpi_processor_init(void)
|
||||
return -ENOMEM;
|
||||
acpi_processor_dir->owner = THIS_MODULE;
|
||||
|
||||
result = cpuidle_register_driver(&acpi_idle_driver);
|
||||
if (result < 0)
|
||||
goto out_proc;
|
||||
|
||||
result = acpi_bus_register_driver(&acpi_processor_driver);
|
||||
if (result < 0) {
|
||||
remove_proc_entry(ACPI_PROCESSOR_CLASS, acpi_root_dir);
|
||||
return result;
|
||||
}
|
||||
if (result < 0)
|
||||
goto out_cpuidle;
|
||||
|
||||
acpi_processor_install_hotplug_notify();
|
||||
|
||||
@ -1062,11 +1065,18 @@ static int __init acpi_processor_init(void)
|
||||
acpi_processor_ppc_init();
|
||||
|
||||
return 0;
|
||||
|
||||
out_cpuidle:
|
||||
cpuidle_unregister_driver(&acpi_idle_driver);
|
||||
|
||||
out_proc:
|
||||
remove_proc_entry(ACPI_PROCESSOR_CLASS, acpi_root_dir);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
static void __exit acpi_processor_exit(void)
|
||||
{
|
||||
|
||||
acpi_processor_ppc_exit();
|
||||
|
||||
acpi_thermal_cpufreq_exit();
|
||||
@ -1075,6 +1085,8 @@ static void __exit acpi_processor_exit(void)
|
||||
|
||||
acpi_bus_unregister_driver(&acpi_processor_driver);
|
||||
|
||||
cpuidle_unregister_driver(&acpi_idle_driver);
|
||||
|
||||
remove_proc_entry(ACPI_PROCESSOR_CLASS, acpi_root_dir);
|
||||
|
||||
return;
|
||||
|
@ -40,6 +40,7 @@
|
||||
#include <linux/sched.h> /* need_resched() */
|
||||
#include <linux/latency.h>
|
||||
#include <linux/clockchips.h>
|
||||
#include <linux/cpuidle.h>
|
||||
|
||||
/*
|
||||
* Include the apic definitions for x86 to have the APIC timer related defines
|
||||
@ -64,14 +65,22 @@ ACPI_MODULE_NAME("processor_idle");
|
||||
#define ACPI_PROCESSOR_FILE_POWER "power"
|
||||
#define US_TO_PM_TIMER_TICKS(t) ((t * (PM_TIMER_FREQUENCY/1000)) / 1000)
|
||||
#define PM_TIMER_TICK_NS (1000000000ULL/PM_TIMER_FREQUENCY)
|
||||
#ifndef CONFIG_CPU_IDLE
|
||||
#define C2_OVERHEAD 4 /* 1us (3.579 ticks per us) */
|
||||
#define C3_OVERHEAD 4 /* 1us (3.579 ticks per us) */
|
||||
static void (*pm_idle_save) (void) __read_mostly;
|
||||
module_param(max_cstate, uint, 0644);
|
||||
#else
|
||||
#define C2_OVERHEAD 1 /* 1us */
|
||||
#define C3_OVERHEAD 1 /* 1us */
|
||||
#endif
|
||||
#define PM_TIMER_TICKS_TO_US(p) (((p) * 1000)/(PM_TIMER_FREQUENCY/1000))
|
||||
|
||||
static unsigned int max_cstate __read_mostly = ACPI_PROCESSOR_MAX_POWER;
|
||||
module_param(max_cstate, uint, 0000);
|
||||
static unsigned int nocst __read_mostly;
|
||||
module_param(nocst, uint, 0000);
|
||||
|
||||
#ifndef CONFIG_CPU_IDLE
|
||||
/*
|
||||
* bm_history -- bit-mask with a bit per jiffy of bus-master activity
|
||||
* 1000 HZ: 0xFFFFFFFF: 32 jiffies = 32ms
|
||||
@ -82,9 +91,10 @@ module_param(nocst, uint, 0000);
|
||||
static unsigned int bm_history __read_mostly =
|
||||
(HZ >= 800 ? 0xFFFFFFFF : ((1U << (HZ / 25)) - 1));
|
||||
module_param(bm_history, uint, 0644);
|
||||
/* --------------------------------------------------------------------------
|
||||
Power Management
|
||||
-------------------------------------------------------------------------- */
|
||||
|
||||
static int acpi_processor_set_power_policy(struct acpi_processor *pr);
|
||||
|
||||
#endif
|
||||
|
||||
/*
|
||||
* IBM ThinkPad R40e crashes mysteriously when going into C2 or C3.
|
||||
@ -177,6 +187,18 @@ static inline u32 ticks_elapsed(u32 t1, u32 t2)
|
||||
return ((0xFFFFFFFF - t1) + t2);
|
||||
}
|
||||
|
||||
static inline u32 ticks_elapsed_in_us(u32 t1, u32 t2)
|
||||
{
|
||||
if (t2 >= t1)
|
||||
return PM_TIMER_TICKS_TO_US(t2 - t1);
|
||||
else if (!(acpi_gbl_FADT.flags & ACPI_FADT_32BIT_TIMER))
|
||||
return PM_TIMER_TICKS_TO_US(((0x00FFFFFF - t1) + t2) & 0x00FFFFFF);
|
||||
else
|
||||
return PM_TIMER_TICKS_TO_US((0xFFFFFFFF - t1) + t2);
|
||||
}
|
||||
|
||||
#ifndef CONFIG_CPU_IDLE
|
||||
|
||||
static void
|
||||
acpi_processor_power_activate(struct acpi_processor *pr,
|
||||
struct acpi_processor_cx *new)
|
||||
@ -248,6 +270,7 @@ static void acpi_cstate_enter(struct acpi_processor_cx *cstate)
|
||||
unused = inl(acpi_gbl_FADT.xpm_timer_block.address);
|
||||
}
|
||||
}
|
||||
#endif /* !CONFIG_CPU_IDLE */
|
||||
|
||||
#ifdef ARCH_APICTIMER_STOPS_ON_C3
|
||||
|
||||
@ -330,6 +353,7 @@ int acpi_processor_resume(struct acpi_device * device)
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifndef CONFIG_CPU_IDLE
|
||||
static void acpi_processor_idle(void)
|
||||
{
|
||||
struct acpi_processor *pr = NULL;
|
||||
@ -427,7 +451,7 @@ static void acpi_processor_idle(void)
|
||||
* an SMP system. We do it here instead of doing it at _CST/P_LVL
|
||||
* detection phase, to work cleanly with logical CPU hotplug.
|
||||
*/
|
||||
if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) &&
|
||||
if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) &&
|
||||
!pr->flags.has_cst && !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
|
||||
cx = &pr->power.states[ACPI_STATE_C1];
|
||||
#endif
|
||||
@ -727,6 +751,7 @@ static int acpi_processor_set_power_policy(struct acpi_processor *pr)
|
||||
|
||||
return 0;
|
||||
}
|
||||
#endif /* !CONFIG_CPU_IDLE */
|
||||
|
||||
static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr)
|
||||
{
|
||||
@ -744,7 +769,7 @@ static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr)
|
||||
#ifndef CONFIG_HOTPLUG_CPU
|
||||
/*
|
||||
* Check for P_LVL2_UP flag before entering C2 and above on
|
||||
* an SMP system.
|
||||
* an SMP system.
|
||||
*/
|
||||
if ((num_online_cpus() > 1) &&
|
||||
!(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
|
||||
@ -945,7 +970,12 @@ static void acpi_processor_power_verify_c2(struct acpi_processor_cx *cx)
|
||||
* Normalize the C2 latency to expidite policy
|
||||
*/
|
||||
cx->valid = 1;
|
||||
|
||||
#ifndef CONFIG_CPU_IDLE
|
||||
cx->latency_ticks = US_TO_PM_TIMER_TICKS(cx->latency);
|
||||
#else
|
||||
cx->latency_ticks = cx->latency;
|
||||
#endif
|
||||
|
||||
return;
|
||||
}
|
||||
@ -1025,7 +1055,12 @@ static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
|
||||
* use this in our C3 policy
|
||||
*/
|
||||
cx->valid = 1;
|
||||
|
||||
#ifndef CONFIG_CPU_IDLE
|
||||
cx->latency_ticks = US_TO_PM_TIMER_TICKS(cx->latency);
|
||||
#else
|
||||
cx->latency_ticks = cx->latency;
|
||||
#endif
|
||||
|
||||
return;
|
||||
}
|
||||
@ -1090,6 +1125,7 @@ static int acpi_processor_get_power_info(struct acpi_processor *pr)
|
||||
|
||||
pr->power.count = acpi_processor_power_verify(pr);
|
||||
|
||||
#ifndef CONFIG_CPU_IDLE
|
||||
/*
|
||||
* Set Default Policy
|
||||
* ------------------
|
||||
@ -1101,6 +1137,7 @@ static int acpi_processor_get_power_info(struct acpi_processor *pr)
|
||||
result = acpi_processor_set_power_policy(pr);
|
||||
if (result)
|
||||
return result;
|
||||
#endif
|
||||
|
||||
/*
|
||||
* if one state of type C2 or C3 is available, mark this
|
||||
@ -1117,35 +1154,6 @@ static int acpi_processor_get_power_info(struct acpi_processor *pr)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int acpi_processor_cst_has_changed(struct acpi_processor *pr)
|
||||
{
|
||||
int result = 0;
|
||||
|
||||
|
||||
if (!pr)
|
||||
return -EINVAL;
|
||||
|
||||
if (nocst) {
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
if (!pr->flags.power_setup_done)
|
||||
return -ENODEV;
|
||||
|
||||
/* Fall back to the default idle loop */
|
||||
pm_idle = pm_idle_save;
|
||||
synchronize_sched(); /* Relies on interrupts forcing exit from idle. */
|
||||
|
||||
pr->flags.power = 0;
|
||||
result = acpi_processor_get_power_info(pr);
|
||||
if ((pr->flags.power == 1) && (pr->flags.power_setup_done))
|
||||
pm_idle = acpi_processor_idle;
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
/* proc interface */
|
||||
|
||||
static int acpi_processor_power_seq_show(struct seq_file *seq, void *offset)
|
||||
{
|
||||
struct acpi_processor *pr = seq->private;
|
||||
@ -1227,6 +1235,35 @@ static const struct file_operations acpi_processor_power_fops = {
|
||||
.release = single_release,
|
||||
};
|
||||
|
||||
#ifndef CONFIG_CPU_IDLE
|
||||
|
||||
int acpi_processor_cst_has_changed(struct acpi_processor *pr)
|
||||
{
|
||||
int result = 0;
|
||||
|
||||
|
||||
if (!pr)
|
||||
return -EINVAL;
|
||||
|
||||
if (nocst) {
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
if (!pr->flags.power_setup_done)
|
||||
return -ENODEV;
|
||||
|
||||
/* Fall back to the default idle loop */
|
||||
pm_idle = pm_idle_save;
|
||||
synchronize_sched(); /* Relies on interrupts forcing exit from idle. */
|
||||
|
||||
pr->flags.power = 0;
|
||||
result = acpi_processor_get_power_info(pr);
|
||||
if ((pr->flags.power == 1) && (pr->flags.power_setup_done))
|
||||
pm_idle = acpi_processor_idle;
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
static void smp_callback(void *v)
|
||||
{
|
||||
@ -1249,8 +1286,367 @@ static int acpi_processor_latency_notify(struct notifier_block *b,
|
||||
static struct notifier_block acpi_processor_latency_notifier = {
|
||||
.notifier_call = acpi_processor_latency_notify,
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
#else /* CONFIG_CPU_IDLE */
|
||||
|
||||
/**
|
||||
* acpi_idle_bm_check - checks if bus master activity was detected
|
||||
*/
|
||||
static int acpi_idle_bm_check(void)
|
||||
{
|
||||
u32 bm_status = 0;
|
||||
|
||||
acpi_get_register(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status);
|
||||
if (bm_status)
|
||||
acpi_set_register(ACPI_BITREG_BUS_MASTER_STATUS, 1);
|
||||
/*
|
||||
* PIIX4 Erratum #18: Note that BM_STS doesn't always reflect
|
||||
* the true state of bus mastering activity; forcing us to
|
||||
* manually check the BMIDEA bit of each IDE channel.
|
||||
*/
|
||||
else if (errata.piix4.bmisx) {
|
||||
if ((inb_p(errata.piix4.bmisx + 0x02) & 0x01)
|
||||
|| (inb_p(errata.piix4.bmisx + 0x0A) & 0x01))
|
||||
bm_status = 1;
|
||||
}
|
||||
return bm_status;
|
||||
}
|
||||
|
||||
/**
|
||||
* acpi_idle_update_bm_rld - updates the BM_RLD bit depending on target state
|
||||
* @pr: the processor
|
||||
* @target: the new target state
|
||||
*/
|
||||
static inline void acpi_idle_update_bm_rld(struct acpi_processor *pr,
|
||||
struct acpi_processor_cx *target)
|
||||
{
|
||||
if (pr->flags.bm_rld_set && target->type != ACPI_STATE_C3) {
|
||||
acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0);
|
||||
pr->flags.bm_rld_set = 0;
|
||||
}
|
||||
|
||||
if (!pr->flags.bm_rld_set && target->type == ACPI_STATE_C3) {
|
||||
acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 1);
|
||||
pr->flags.bm_rld_set = 1;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* acpi_idle_do_entry - a helper function that does C2 and C3 type entry
|
||||
* @cx: cstate data
|
||||
*/
|
||||
static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx)
|
||||
{
|
||||
if (cx->space_id == ACPI_CSTATE_FFH) {
|
||||
/* Call into architectural FFH based C-state */
|
||||
acpi_processor_ffh_cstate_enter(cx);
|
||||
} else {
|
||||
int unused;
|
||||
/* IO port based C-state */
|
||||
inb(cx->address);
|
||||
/* Dummy wait op - must do something useless after P_LVL2 read
|
||||
because chipsets cannot guarantee that STPCLK# signal
|
||||
gets asserted in time to freeze execution properly. */
|
||||
unused = inl(acpi_gbl_FADT.xpm_timer_block.address);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* acpi_idle_enter_c1 - enters an ACPI C1 state-type
|
||||
* @dev: the target CPU
|
||||
* @state: the state data
|
||||
*
|
||||
* This is equivalent to the HALT instruction.
|
||||
*/
|
||||
static int acpi_idle_enter_c1(struct cpuidle_device *dev,
|
||||
struct cpuidle_state *state)
|
||||
{
|
||||
struct acpi_processor *pr;
|
||||
struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
|
||||
pr = processors[smp_processor_id()];
|
||||
|
||||
if (unlikely(!pr))
|
||||
return 0;
|
||||
|
||||
if (pr->flags.bm_check)
|
||||
acpi_idle_update_bm_rld(pr, cx);
|
||||
|
||||
current_thread_info()->status &= ~TS_POLLING;
|
||||
/*
|
||||
* TS_POLLING-cleared state must be visible before we test
|
||||
* NEED_RESCHED:
|
||||
*/
|
||||
smp_mb();
|
||||
if (!need_resched())
|
||||
safe_halt();
|
||||
current_thread_info()->status |= TS_POLLING;
|
||||
|
||||
cx->usage++;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* acpi_idle_enter_simple - enters an ACPI state without BM handling
|
||||
* @dev: the target CPU
|
||||
* @state: the state data
|
||||
*/
|
||||
static int acpi_idle_enter_simple(struct cpuidle_device *dev,
|
||||
struct cpuidle_state *state)
|
||||
{
|
||||
struct acpi_processor *pr;
|
||||
struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
|
||||
u32 t1, t2;
|
||||
pr = processors[smp_processor_id()];
|
||||
|
||||
if (unlikely(!pr))
|
||||
return 0;
|
||||
|
||||
if (acpi_idle_suspend)
|
||||
return(acpi_idle_enter_c1(dev, state));
|
||||
|
||||
if (pr->flags.bm_check)
|
||||
acpi_idle_update_bm_rld(pr, cx);
|
||||
|
||||
local_irq_disable();
|
||||
current_thread_info()->status &= ~TS_POLLING;
|
||||
/*
|
||||
* TS_POLLING-cleared state must be visible before we test
|
||||
* NEED_RESCHED:
|
||||
*/
|
||||
smp_mb();
|
||||
|
||||
if (unlikely(need_resched())) {
|
||||
current_thread_info()->status |= TS_POLLING;
|
||||
local_irq_enable();
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (cx->type == ACPI_STATE_C3)
|
||||
ACPI_FLUSH_CPU_CACHE();
|
||||
|
||||
t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
|
||||
acpi_state_timer_broadcast(pr, cx, 1);
|
||||
acpi_idle_do_entry(cx);
|
||||
t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
|
||||
|
||||
#if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86_TSC)
|
||||
/* TSC could halt in idle, so notify users */
|
||||
mark_tsc_unstable("TSC halts in idle");;
|
||||
#endif
|
||||
|
||||
local_irq_enable();
|
||||
current_thread_info()->status |= TS_POLLING;
|
||||
|
||||
cx->usage++;
|
||||
|
||||
acpi_state_timer_broadcast(pr, cx, 0);
|
||||
cx->time += ticks_elapsed(t1, t2);
|
||||
return ticks_elapsed_in_us(t1, t2);
|
||||
}
|
||||
|
||||
static int c3_cpu_count;
|
||||
static DEFINE_SPINLOCK(c3_lock);
|
||||
|
||||
/**
|
||||
* acpi_idle_enter_bm - enters C3 with proper BM handling
|
||||
* @dev: the target CPU
|
||||
* @state: the state data
|
||||
*
|
||||
* If BM is detected, the deepest non-C3 idle state is entered instead.
|
||||
*/
|
||||
static int acpi_idle_enter_bm(struct cpuidle_device *dev,
|
||||
struct cpuidle_state *state)
|
||||
{
|
||||
struct acpi_processor *pr;
|
||||
struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
|
||||
u32 t1, t2;
|
||||
pr = processors[smp_processor_id()];
|
||||
|
||||
if (unlikely(!pr))
|
||||
return 0;
|
||||
|
||||
if (acpi_idle_suspend)
|
||||
return(acpi_idle_enter_c1(dev, state));
|
||||
|
||||
local_irq_disable();
|
||||
current_thread_info()->status &= ~TS_POLLING;
|
||||
/*
|
||||
* TS_POLLING-cleared state must be visible before we test
|
||||
* NEED_RESCHED:
|
||||
*/
|
||||
smp_mb();
|
||||
|
||||
if (unlikely(need_resched())) {
|
||||
current_thread_info()->status |= TS_POLLING;
|
||||
local_irq_enable();
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Must be done before busmaster disable as we might need to
|
||||
* access HPET !
|
||||
*/
|
||||
acpi_state_timer_broadcast(pr, cx, 1);
|
||||
|
||||
if (acpi_idle_bm_check()) {
|
||||
cx = pr->power.bm_state;
|
||||
|
||||
acpi_idle_update_bm_rld(pr, cx);
|
||||
|
||||
t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
|
||||
acpi_idle_do_entry(cx);
|
||||
t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
|
||||
} else {
|
||||
acpi_idle_update_bm_rld(pr, cx);
|
||||
|
||||
spin_lock(&c3_lock);
|
||||
c3_cpu_count++;
|
||||
/* Disable bus master arbitration when all CPUs are in C3 */
|
||||
if (c3_cpu_count == num_online_cpus())
|
||||
acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1);
|
||||
spin_unlock(&c3_lock);
|
||||
|
||||
t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
|
||||
acpi_idle_do_entry(cx);
|
||||
t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
|
||||
|
||||
spin_lock(&c3_lock);
|
||||
/* Re-enable bus master arbitration */
|
||||
if (c3_cpu_count == num_online_cpus())
|
||||
acpi_set_register(ACPI_BITREG_ARB_DISABLE, 0);
|
||||
c3_cpu_count--;
|
||||
spin_unlock(&c3_lock);
|
||||
}
|
||||
|
||||
#if defined (CONFIG_GENERIC_TIME) && defined (CONFIG_X86_TSC)
|
||||
/* TSC could halt in idle, so notify users */
|
||||
mark_tsc_unstable("TSC halts in idle");
|
||||
#endif
|
||||
|
||||
local_irq_enable();
|
||||
current_thread_info()->status |= TS_POLLING;
|
||||
|
||||
cx->usage++;
|
||||
|
||||
acpi_state_timer_broadcast(pr, cx, 0);
|
||||
cx->time += ticks_elapsed(t1, t2);
|
||||
return ticks_elapsed_in_us(t1, t2);
|
||||
}
|
||||
|
||||
struct cpuidle_driver acpi_idle_driver = {
|
||||
.name = "acpi_idle",
|
||||
.owner = THIS_MODULE,
|
||||
};
|
||||
|
||||
/**
|
||||
* acpi_processor_setup_cpuidle - prepares and configures CPUIDLE
|
||||
* @pr: the ACPI processor
|
||||
*/
|
||||
static int acpi_processor_setup_cpuidle(struct acpi_processor *pr)
|
||||
{
|
||||
int i, count = 0;
|
||||
struct acpi_processor_cx *cx;
|
||||
struct cpuidle_state *state;
|
||||
struct cpuidle_device *dev = &pr->power.dev;
|
||||
|
||||
if (!pr->flags.power_setup_done)
|
||||
return -EINVAL;
|
||||
|
||||
if (pr->flags.power == 0) {
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
|
||||
cx = &pr->power.states[i];
|
||||
state = &dev->states[count];
|
||||
|
||||
if (!cx->valid)
|
||||
continue;
|
||||
|
||||
#ifdef CONFIG_HOTPLUG_CPU
|
||||
if ((cx->type != ACPI_STATE_C1) && (num_online_cpus() > 1) &&
|
||||
!pr->flags.has_cst &&
|
||||
!(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
|
||||
continue;
|
||||
#endif
|
||||
cpuidle_set_statedata(state, cx);
|
||||
|
||||
snprintf(state->name, CPUIDLE_NAME_LEN, "C%d", i);
|
||||
state->exit_latency = cx->latency;
|
||||
state->target_residency = cx->latency * 6;
|
||||
state->power_usage = cx->power;
|
||||
|
||||
state->flags = 0;
|
||||
switch (cx->type) {
|
||||
case ACPI_STATE_C1:
|
||||
state->flags |= CPUIDLE_FLAG_SHALLOW;
|
||||
state->enter = acpi_idle_enter_c1;
|
||||
break;
|
||||
|
||||
case ACPI_STATE_C2:
|
||||
state->flags |= CPUIDLE_FLAG_BALANCED;
|
||||
state->flags |= CPUIDLE_FLAG_TIME_VALID;
|
||||
state->enter = acpi_idle_enter_simple;
|
||||
break;
|
||||
|
||||
case ACPI_STATE_C3:
|
||||
state->flags |= CPUIDLE_FLAG_DEEP;
|
||||
state->flags |= CPUIDLE_FLAG_TIME_VALID;
|
||||
state->flags |= CPUIDLE_FLAG_CHECK_BM;
|
||||
state->enter = pr->flags.bm_check ?
|
||||
acpi_idle_enter_bm :
|
||||
acpi_idle_enter_simple;
|
||||
break;
|
||||
}
|
||||
|
||||
count++;
|
||||
}
|
||||
|
||||
dev->state_count = count;
|
||||
|
||||
if (!count)
|
||||
return -EINVAL;
|
||||
|
||||
/* find the deepest state that can handle active BM */
|
||||
if (pr->flags.bm_check) {
|
||||
for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++)
|
||||
if (pr->power.states[i].type == ACPI_STATE_C3)
|
||||
break;
|
||||
pr->power.bm_state = &pr->power.states[i-1];
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int acpi_processor_cst_has_changed(struct acpi_processor *pr)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (!pr)
|
||||
return -EINVAL;
|
||||
|
||||
if (nocst) {
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
if (!pr->flags.power_setup_done)
|
||||
return -ENODEV;
|
||||
|
||||
cpuidle_pause_and_lock();
|
||||
cpuidle_disable_device(&pr->power.dev);
|
||||
acpi_processor_get_power_info(pr);
|
||||
acpi_processor_setup_cpuidle(pr);
|
||||
ret = cpuidle_enable_device(&pr->power.dev);
|
||||
cpuidle_resume_and_unlock();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
#endif /* CONFIG_CPU_IDLE */
|
||||
|
||||
int __cpuinit acpi_processor_power_init(struct acpi_processor *pr,
|
||||
struct acpi_device *device)
|
||||
{
|
||||
@ -1267,7 +1663,7 @@ int __cpuinit acpi_processor_power_init(struct acpi_processor *pr,
|
||||
"ACPI: processor limited to max C-state %d\n",
|
||||
max_cstate);
|
||||
first_run++;
|
||||
#ifdef CONFIG_SMP
|
||||
#if !defined (CONFIG_CPU_IDLE) && defined (CONFIG_SMP)
|
||||
register_latency_notifier(&acpi_processor_latency_notifier);
|
||||
#endif
|
||||
}
|
||||
@ -1285,6 +1681,7 @@ int __cpuinit acpi_processor_power_init(struct acpi_processor *pr,
|
||||
}
|
||||
|
||||
acpi_processor_get_power_info(pr);
|
||||
pr->flags.power_setup_done = 1;
|
||||
|
||||
/*
|
||||
* Install the idle handler if processor power management is supported.
|
||||
@ -1292,6 +1689,13 @@ int __cpuinit acpi_processor_power_init(struct acpi_processor *pr,
|
||||
* platforms that only support C1.
|
||||
*/
|
||||
if ((pr->flags.power) && (!boot_option_idle_override)) {
|
||||
#ifdef CONFIG_CPU_IDLE
|
||||
acpi_processor_setup_cpuidle(pr);
|
||||
pr->power.dev.cpu = pr->id;
|
||||
if (cpuidle_register_device(&pr->power.dev))
|
||||
return -EIO;
|
||||
#endif
|
||||
|
||||
printk(KERN_INFO PREFIX "CPU%d (power states:", pr->id);
|
||||
for (i = 1; i <= pr->power.count; i++)
|
||||
if (pr->power.states[i].valid)
|
||||
@ -1299,10 +1703,12 @@ int __cpuinit acpi_processor_power_init(struct acpi_processor *pr,
|
||||
pr->power.states[i].type);
|
||||
printk(")\n");
|
||||
|
||||
#ifndef CONFIG_CPU_IDLE
|
||||
if (pr->id == 0) {
|
||||
pm_idle_save = pm_idle;
|
||||
pm_idle = acpi_processor_idle;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
/* 'power' [R] */
|
||||
@ -1316,21 +1722,24 @@ int __cpuinit acpi_processor_power_init(struct acpi_processor *pr,
|
||||
entry->owner = THIS_MODULE;
|
||||
}
|
||||
|
||||
pr->flags.power_setup_done = 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int acpi_processor_power_exit(struct acpi_processor *pr,
|
||||
struct acpi_device *device)
|
||||
{
|
||||
|
||||
#ifdef CONFIG_CPU_IDLE
|
||||
if ((pr->flags.power) && (!boot_option_idle_override))
|
||||
cpuidle_unregister_device(&pr->power.dev);
|
||||
#endif
|
||||
pr->flags.power_setup_done = 0;
|
||||
|
||||
if (acpi_device_dir(device))
|
||||
remove_proc_entry(ACPI_PROCESSOR_FILE_POWER,
|
||||
acpi_device_dir(device));
|
||||
|
||||
#ifndef CONFIG_CPU_IDLE
|
||||
|
||||
/* Unregister the idle handler when processor #0 is removed. */
|
||||
if (pr->id == 0) {
|
||||
pm_idle = pm_idle_save;
|
||||
@ -1345,6 +1754,7 @@ int acpi_processor_power_exit(struct acpi_processor *pr,
|
||||
unregister_latency_notifier(&acpi_processor_latency_notifier);
|
||||
#endif
|
||||
}
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
1901
drivers/acpi/sbs.c
1901
drivers/acpi/sbs.c
File diff suppressed because it is too large
Load Diff
309
drivers/acpi/sbshc.c
Normal file
309
drivers/acpi/sbshc.c
Normal file
@ -0,0 +1,309 @@
|
||||
/*
|
||||
* SMBus driver for ACPI Embedded Controller (v0.1)
|
||||
*
|
||||
* Copyright (c) 2007 Alexey Starikovskiy
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation version 2.
|
||||
*/
|
||||
|
||||
#include <acpi/acpi_bus.h>
|
||||
#include <acpi/acpi_drivers.h>
|
||||
#include <acpi/actypes.h>
|
||||
#include <linux/wait.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include "sbshc.h"
|
||||
|
||||
#define ACPI_SMB_HC_CLASS "smbus_host_controller"
|
||||
#define ACPI_SMB_HC_DEVICE_NAME "ACPI SMBus HC"
|
||||
|
||||
struct acpi_smb_hc {
|
||||
struct acpi_ec *ec;
|
||||
struct mutex lock;
|
||||
wait_queue_head_t wait;
|
||||
u8 offset;
|
||||
u8 query_bit;
|
||||
smbus_alarm_callback callback;
|
||||
void *context;
|
||||
};
|
||||
|
||||
static int acpi_smbus_hc_add(struct acpi_device *device);
|
||||
static int acpi_smbus_hc_remove(struct acpi_device *device, int type);
|
||||
|
||||
static const struct acpi_device_id sbs_device_ids[] = {
|
||||
{"ACPI0001", 0},
|
||||
{"ACPI0005", 0},
|
||||
{"", 0},
|
||||
};
|
||||
|
||||
MODULE_DEVICE_TABLE(acpi, sbs_device_ids);
|
||||
|
||||
static struct acpi_driver acpi_smb_hc_driver = {
|
||||
.name = "smbus_hc",
|
||||
.class = ACPI_SMB_HC_CLASS,
|
||||
.ids = sbs_device_ids,
|
||||
.ops = {
|
||||
.add = acpi_smbus_hc_add,
|
||||
.remove = acpi_smbus_hc_remove,
|
||||
},
|
||||
};
|
||||
|
||||
union acpi_smb_status {
|
||||
u8 raw;
|
||||
struct {
|
||||
u8 status:5;
|
||||
u8 reserved:1;
|
||||
u8 alarm:1;
|
||||
u8 done:1;
|
||||
} fields;
|
||||
};
|
||||
|
||||
enum acpi_smb_status_codes {
|
||||
SMBUS_OK = 0,
|
||||
SMBUS_UNKNOWN_FAILURE = 0x07,
|
||||
SMBUS_DEVICE_ADDRESS_NACK = 0x10,
|
||||
SMBUS_DEVICE_ERROR = 0x11,
|
||||
SMBUS_DEVICE_COMMAND_ACCESS_DENIED = 0x12,
|
||||
SMBUS_UNKNOWN_ERROR = 0x13,
|
||||
SMBUS_DEVICE_ACCESS_DENIED = 0x17,
|
||||
SMBUS_TIMEOUT = 0x18,
|
||||
SMBUS_HOST_UNSUPPORTED_PROTOCOL = 0x19,
|
||||
SMBUS_BUSY = 0x1a,
|
||||
SMBUS_PEC_ERROR = 0x1f,
|
||||
};
|
||||
|
||||
enum acpi_smb_offset {
|
||||
ACPI_SMB_PROTOCOL = 0, /* protocol, PEC */
|
||||
ACPI_SMB_STATUS = 1, /* status */
|
||||
ACPI_SMB_ADDRESS = 2, /* address */
|
||||
ACPI_SMB_COMMAND = 3, /* command */
|
||||
ACPI_SMB_DATA = 4, /* 32 data registers */
|
||||
ACPI_SMB_BLOCK_COUNT = 0x24, /* number of data bytes */
|
||||
ACPI_SMB_ALARM_ADDRESS = 0x25, /* alarm address */
|
||||
ACPI_SMB_ALARM_DATA = 0x26, /* 2 bytes alarm data */
|
||||
};
|
||||
|
||||
static inline int smb_hc_read(struct acpi_smb_hc *hc, u8 address, u8 *data)
|
||||
{
|
||||
return ec_read(hc->offset + address, data);
|
||||
}
|
||||
|
||||
static inline int smb_hc_write(struct acpi_smb_hc *hc, u8 address, u8 data)
|
||||
{
|
||||
return ec_write(hc->offset + address, data);
|
||||
}
|
||||
|
||||
static inline int smb_check_done(struct acpi_smb_hc *hc)
|
||||
{
|
||||
union acpi_smb_status status = {.raw = 0};
|
||||
smb_hc_read(hc, ACPI_SMB_STATUS, &status.raw);
|
||||
return status.fields.done && (status.fields.status == SMBUS_OK);
|
||||
}
|
||||
|
||||
static int wait_transaction_complete(struct acpi_smb_hc *hc, int timeout)
|
||||
{
|
||||
if (wait_event_timeout(hc->wait, smb_check_done(hc),
|
||||
msecs_to_jiffies(timeout)))
|
||||
return 0;
|
||||
else
|
||||
return -ETIME;
|
||||
}
|
||||
|
||||
int acpi_smbus_transaction(struct acpi_smb_hc *hc, u8 protocol, u8 address,
|
||||
u8 command, u8 *data, u8 length)
|
||||
{
|
||||
int ret = -EFAULT, i;
|
||||
u8 temp, sz = 0;
|
||||
|
||||
mutex_lock(&hc->lock);
|
||||
if (smb_hc_read(hc, ACPI_SMB_PROTOCOL, &temp))
|
||||
goto end;
|
||||
if (temp) {
|
||||
ret = -EBUSY;
|
||||
goto end;
|
||||
}
|
||||
smb_hc_write(hc, ACPI_SMB_COMMAND, command);
|
||||
smb_hc_write(hc, ACPI_SMB_COMMAND, command);
|
||||
if (!(protocol & 0x01)) {
|
||||
smb_hc_write(hc, ACPI_SMB_BLOCK_COUNT, length);
|
||||
for (i = 0; i < length; ++i)
|
||||
smb_hc_write(hc, ACPI_SMB_DATA + i, data[i]);
|
||||
}
|
||||
smb_hc_write(hc, ACPI_SMB_ADDRESS, address << 1);
|
||||
smb_hc_write(hc, ACPI_SMB_PROTOCOL, protocol);
|
||||
/*
|
||||
* Wait for completion. Save the status code, data size,
|
||||
* and data into the return package (if required by the protocol).
|
||||
*/
|
||||
ret = wait_transaction_complete(hc, 1000);
|
||||
if (ret || !(protocol & 0x01))
|
||||
goto end;
|
||||
switch (protocol) {
|
||||
case SMBUS_RECEIVE_BYTE:
|
||||
case SMBUS_READ_BYTE:
|
||||
sz = 1;
|
||||
break;
|
||||
case SMBUS_READ_WORD:
|
||||
sz = 2;
|
||||
break;
|
||||
case SMBUS_READ_BLOCK:
|
||||
if (smb_hc_read(hc, ACPI_SMB_BLOCK_COUNT, &sz)) {
|
||||
ret = -EFAULT;
|
||||
goto end;
|
||||
}
|
||||
sz &= 0x1f;
|
||||
break;
|
||||
}
|
||||
for (i = 0; i < sz; ++i)
|
||||
smb_hc_read(hc, ACPI_SMB_DATA + i, &data[i]);
|
||||
end:
|
||||
mutex_unlock(&hc->lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
int acpi_smbus_read(struct acpi_smb_hc *hc, u8 protocol, u8 address,
|
||||
u8 command, u8 *data)
|
||||
{
|
||||
return acpi_smbus_transaction(hc, protocol, address, command, data, 0);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL_GPL(acpi_smbus_read);
|
||||
|
||||
int acpi_smbus_write(struct acpi_smb_hc *hc, u8 protocol, u8 address,
|
||||
u8 command, u8 *data, u8 length)
|
||||
{
|
||||
return acpi_smbus_transaction(hc, protocol, address, command, data, length);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL_GPL(acpi_smbus_write);
|
||||
|
||||
int acpi_smbus_register_callback(struct acpi_smb_hc *hc,
|
||||
smbus_alarm_callback callback, void *context)
|
||||
{
|
||||
mutex_lock(&hc->lock);
|
||||
hc->callback = callback;
|
||||
hc->context = context;
|
||||
mutex_unlock(&hc->lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL_GPL(acpi_smbus_register_callback);
|
||||
|
||||
int acpi_smbus_unregister_callback(struct acpi_smb_hc *hc)
|
||||
{
|
||||
mutex_lock(&hc->lock);
|
||||
hc->callback = NULL;
|
||||
hc->context = NULL;
|
||||
mutex_unlock(&hc->lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL_GPL(acpi_smbus_unregister_callback);
|
||||
|
||||
static void acpi_smbus_callback(void *context)
|
||||
{
|
||||
struct acpi_smb_hc *hc = context;
|
||||
|
||||
if (hc->callback)
|
||||
hc->callback(hc->context);
|
||||
}
|
||||
|
||||
static int smbus_alarm(void *context)
|
||||
{
|
||||
struct acpi_smb_hc *hc = context;
|
||||
union acpi_smb_status status;
|
||||
if (smb_hc_read(hc, ACPI_SMB_STATUS, &status.raw))
|
||||
return 0;
|
||||
/* Check if it is only a completion notify */
|
||||
if (status.fields.done)
|
||||
wake_up(&hc->wait);
|
||||
if (!status.fields.alarm)
|
||||
return 0;
|
||||
mutex_lock(&hc->lock);
|
||||
smb_hc_write(hc, ACPI_SMB_STATUS, status.raw);
|
||||
if (hc->callback)
|
||||
acpi_os_execute(OSL_GPE_HANDLER, acpi_smbus_callback, hc);
|
||||
mutex_unlock(&hc->lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
typedef int (*acpi_ec_query_func) (void *data);
|
||||
|
||||
extern int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit,
|
||||
acpi_handle handle, acpi_ec_query_func func,
|
||||
void *data);
|
||||
|
||||
static int acpi_smbus_hc_add(struct acpi_device *device)
|
||||
{
|
||||
int status;
|
||||
unsigned long val;
|
||||
struct acpi_smb_hc *hc;
|
||||
|
||||
if (!device)
|
||||
return -EINVAL;
|
||||
|
||||
status = acpi_evaluate_integer(device->handle, "_EC", NULL, &val);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
printk(KERN_ERR PREFIX "error obtaining _EC.\n");
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
strcpy(acpi_device_name(device), ACPI_SMB_HC_DEVICE_NAME);
|
||||
strcpy(acpi_device_class(device), ACPI_SMB_HC_CLASS);
|
||||
|
||||
hc = kzalloc(sizeof(struct acpi_smb_hc), GFP_KERNEL);
|
||||
if (!hc)
|
||||
return -ENOMEM;
|
||||
mutex_init(&hc->lock);
|
||||
init_waitqueue_head(&hc->wait);
|
||||
|
||||
hc->ec = acpi_driver_data(device->parent);
|
||||
hc->offset = (val >> 8) & 0xff;
|
||||
hc->query_bit = val & 0xff;
|
||||
acpi_driver_data(device) = hc;
|
||||
|
||||
acpi_ec_add_query_handler(hc->ec, hc->query_bit, NULL, smbus_alarm, hc);
|
||||
printk(KERN_INFO PREFIX "SBS HC: EC = 0x%p, offset = 0x%0x, query_bit = 0x%0x\n",
|
||||
hc->ec, hc->offset, hc->query_bit);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
extern void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit);
|
||||
|
||||
static int acpi_smbus_hc_remove(struct acpi_device *device, int type)
|
||||
{
|
||||
struct acpi_smb_hc *hc;
|
||||
|
||||
if (!device)
|
||||
return -EINVAL;
|
||||
|
||||
hc = acpi_driver_data(device);
|
||||
acpi_ec_remove_query_handler(hc->ec, hc->query_bit);
|
||||
kfree(hc);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __init acpi_smb_hc_init(void)
|
||||
{
|
||||
int result;
|
||||
|
||||
result = acpi_bus_register_driver(&acpi_smb_hc_driver);
|
||||
if (result < 0)
|
||||
return -ENODEV;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __exit acpi_smb_hc_exit(void)
|
||||
{
|
||||
acpi_bus_unregister_driver(&acpi_smb_hc_driver);
|
||||
}
|
||||
|
||||
module_init(acpi_smb_hc_init);
|
||||
module_exit(acpi_smb_hc_exit);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_AUTHOR("Alexey Starikovskiy");
|
||||
MODULE_DESCRIPTION("ACPI SMBus HC driver");
|
27
drivers/acpi/sbshc.h
Normal file
27
drivers/acpi/sbshc.h
Normal file
@ -0,0 +1,27 @@
|
||||
struct acpi_smb_hc;
|
||||
enum acpi_smb_protocol {
|
||||
SMBUS_WRITE_QUICK = 2,
|
||||
SMBUS_READ_QUICK = 3,
|
||||
SMBUS_SEND_BYTE = 4,
|
||||
SMBUS_RECEIVE_BYTE = 5,
|
||||
SMBUS_WRITE_BYTE = 6,
|
||||
SMBUS_READ_BYTE = 7,
|
||||
SMBUS_WRITE_WORD = 8,
|
||||
SMBUS_READ_WORD = 9,
|
||||
SMBUS_WRITE_BLOCK = 0xa,
|
||||
SMBUS_READ_BLOCK = 0xb,
|
||||
SMBUS_PROCESS_CALL = 0xc,
|
||||
SMBUS_BLOCK_PROCESS_CALL = 0xd,
|
||||
};
|
||||
|
||||
static const u8 SMBUS_PEC = 0x80;
|
||||
|
||||
typedef void (*smbus_alarm_callback)(void *context);
|
||||
|
||||
extern int acpi_smbus_read(struct acpi_smb_hc *hc, u8 protocol, u8 address,
|
||||
u8 command, u8 * data);
|
||||
extern int acpi_smbus_write(struct acpi_smb_hc *hc, u8 protocol, u8 slave_address,
|
||||
u8 command, u8 * data, u8 length);
|
||||
extern int acpi_smbus_register_callback(struct acpi_smb_hc *hc,
|
||||
smbus_alarm_callback callback, void *context);
|
||||
extern int acpi_smbus_unregister_callback(struct acpi_smb_hc *hc);
|
@ -44,7 +44,6 @@ int acpi_sleep_prepare(u32 acpi_state)
|
||||
ACPI_FLUSH_CPU_CACHE();
|
||||
acpi_enable_wakeup_device_prep(acpi_state);
|
||||
#endif
|
||||
acpi_gpe_sleep_prepare(acpi_state);
|
||||
acpi_enter_sleep_state_prep(acpi_state);
|
||||
return 0;
|
||||
}
|
||||
@ -268,6 +267,11 @@ static void acpi_hibernation_leave(void)
|
||||
|
||||
static void acpi_hibernation_finish(void)
|
||||
{
|
||||
/*
|
||||
* If ACPI is not enabled by the BIOS and the boot kernel, we need to
|
||||
* enable it here.
|
||||
*/
|
||||
acpi_enable();
|
||||
acpi_leave_sleep_state(ACPI_STATE_S4);
|
||||
acpi_disable_wakeup_device(ACPI_STATE_S4);
|
||||
|
||||
|
@ -5,6 +5,5 @@ extern int acpi_suspend (u32 state);
|
||||
extern void acpi_enable_wakeup_device_prep(u8 sleep_state);
|
||||
extern void acpi_enable_wakeup_device(u8 sleep_state);
|
||||
extern void acpi_disable_wakeup_device(u8 sleep_state);
|
||||
extern void acpi_gpe_sleep_prepare(u32 sleep_state);
|
||||
|
||||
extern int acpi_sleep_prepare(u32 acpi_state);
|
||||
|
@ -64,36 +64,29 @@ void acpi_enable_wakeup_device(u8 sleep_state)
|
||||
ACPI_FUNCTION_TRACE("acpi_enable_wakeup_device");
|
||||
spin_lock(&acpi_device_lock);
|
||||
list_for_each_safe(node, next, &acpi_wakeup_device_list) {
|
||||
struct acpi_device *dev = container_of(node,
|
||||
struct acpi_device,
|
||||
wakeup_list);
|
||||
|
||||
struct acpi_device *dev =
|
||||
container_of(node, struct acpi_device, wakeup_list);
|
||||
if (!dev->wakeup.flags.valid)
|
||||
continue;
|
||||
/* If users want to disable run-wake GPE,
|
||||
* we only disable it for wake and leave it for runtime
|
||||
*/
|
||||
if (dev->wakeup.flags.run_wake && !dev->wakeup.state.enabled) {
|
||||
spin_unlock(&acpi_device_lock);
|
||||
acpi_set_gpe_type(dev->wakeup.gpe_device,
|
||||
dev->wakeup.gpe_number,
|
||||
ACPI_GPE_TYPE_RUNTIME);
|
||||
/* Re-enable it, since set_gpe_type will disable it */
|
||||
acpi_enable_gpe(dev->wakeup.gpe_device,
|
||||
dev->wakeup.gpe_number, ACPI_ISR);
|
||||
spin_lock(&acpi_device_lock);
|
||||
if (!dev->wakeup.state.enabled ||
|
||||
sleep_state > (u32) dev->wakeup.sleep_state) {
|
||||
if (dev->wakeup.flags.run_wake) {
|
||||
spin_unlock(&acpi_device_lock);
|
||||
/* set_gpe_type will disable GPE, leave it like that */
|
||||
acpi_set_gpe_type(dev->wakeup.gpe_device,
|
||||
dev->wakeup.gpe_number,
|
||||
ACPI_GPE_TYPE_RUNTIME);
|
||||
spin_lock(&acpi_device_lock);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!dev->wakeup.flags.valid ||
|
||||
!dev->wakeup.state.enabled ||
|
||||
(sleep_state > (u32) dev->wakeup.sleep_state))
|
||||
continue;
|
||||
|
||||
spin_unlock(&acpi_device_lock);
|
||||
/* run-wake GPE has been enabled */
|
||||
if (!dev->wakeup.flags.run_wake)
|
||||
acpi_enable_gpe(dev->wakeup.gpe_device,
|
||||
dev->wakeup.gpe_number, ACPI_ISR);
|
||||
dev->wakeup.state.active = 1;
|
||||
spin_lock(&acpi_device_lock);
|
||||
}
|
||||
spin_unlock(&acpi_device_lock);
|
||||
@ -112,27 +105,26 @@ void acpi_disable_wakeup_device(u8 sleep_state)
|
||||
|
||||
spin_lock(&acpi_device_lock);
|
||||
list_for_each_safe(node, next, &acpi_wakeup_device_list) {
|
||||
struct acpi_device *dev = container_of(node,
|
||||
struct acpi_device,
|
||||
wakeup_list);
|
||||
struct acpi_device *dev =
|
||||
container_of(node, struct acpi_device, wakeup_list);
|
||||
|
||||
if (dev->wakeup.flags.run_wake && !dev->wakeup.state.enabled) {
|
||||
spin_unlock(&acpi_device_lock);
|
||||
acpi_set_gpe_type(dev->wakeup.gpe_device,
|
||||
dev->wakeup.gpe_number,
|
||||
ACPI_GPE_TYPE_WAKE_RUN);
|
||||
/* Re-enable it, since set_gpe_type will disable it */
|
||||
acpi_enable_gpe(dev->wakeup.gpe_device,
|
||||
dev->wakeup.gpe_number, ACPI_NOT_ISR);
|
||||
spin_lock(&acpi_device_lock);
|
||||
if (!dev->wakeup.flags.valid)
|
||||
continue;
|
||||
if (!dev->wakeup.state.enabled ||
|
||||
sleep_state > (u32) dev->wakeup.sleep_state) {
|
||||
if (dev->wakeup.flags.run_wake) {
|
||||
spin_unlock(&acpi_device_lock);
|
||||
acpi_set_gpe_type(dev->wakeup.gpe_device,
|
||||
dev->wakeup.gpe_number,
|
||||
ACPI_GPE_TYPE_WAKE_RUN);
|
||||
/* Re-enable it, since set_gpe_type will disable it */
|
||||
acpi_enable_gpe(dev->wakeup.gpe_device,
|
||||
dev->wakeup.gpe_number, ACPI_NOT_ISR);
|
||||
spin_lock(&acpi_device_lock);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!dev->wakeup.flags.valid ||
|
||||
!dev->wakeup.state.active ||
|
||||
(sleep_state > (u32) dev->wakeup.sleep_state))
|
||||
continue;
|
||||
|
||||
spin_unlock(&acpi_device_lock);
|
||||
acpi_disable_wakeup_device_power(dev);
|
||||
/* Never disable run-wake GPE */
|
||||
@ -142,7 +134,6 @@ void acpi_disable_wakeup_device(u8 sleep_state)
|
||||
acpi_clear_gpe(dev->wakeup.gpe_device,
|
||||
dev->wakeup.gpe_number, ACPI_NOT_ISR);
|
||||
}
|
||||
dev->wakeup.state.active = 0;
|
||||
spin_lock(&acpi_device_lock);
|
||||
}
|
||||
spin_unlock(&acpi_device_lock);
|
||||
@ -160,48 +151,20 @@ static int __init acpi_wakeup_device_init(void)
|
||||
struct acpi_device *dev = container_of(node,
|
||||
struct acpi_device,
|
||||
wakeup_list);
|
||||
|
||||
/* In case user doesn't load button driver */
|
||||
if (dev->wakeup.flags.run_wake && !dev->wakeup.state.enabled) {
|
||||
spin_unlock(&acpi_device_lock);
|
||||
acpi_set_gpe_type(dev->wakeup.gpe_device,
|
||||
dev->wakeup.gpe_number,
|
||||
ACPI_GPE_TYPE_WAKE_RUN);
|
||||
acpi_enable_gpe(dev->wakeup.gpe_device,
|
||||
dev->wakeup.gpe_number, ACPI_NOT_ISR);
|
||||
dev->wakeup.state.enabled = 1;
|
||||
spin_lock(&acpi_device_lock);
|
||||
}
|
||||
if (!dev->wakeup.flags.run_wake || dev->wakeup.state.enabled)
|
||||
continue;
|
||||
spin_unlock(&acpi_device_lock);
|
||||
acpi_set_gpe_type(dev->wakeup.gpe_device,
|
||||
dev->wakeup.gpe_number,
|
||||
ACPI_GPE_TYPE_WAKE_RUN);
|
||||
acpi_enable_gpe(dev->wakeup.gpe_device,
|
||||
dev->wakeup.gpe_number, ACPI_NOT_ISR);
|
||||
dev->wakeup.state.enabled = 1;
|
||||
spin_lock(&acpi_device_lock);
|
||||
}
|
||||
spin_unlock(&acpi_device_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
late_initcall(acpi_wakeup_device_init);
|
||||
|
||||
/*
|
||||
* Disable all wakeup GPEs before entering requested sleep state.
|
||||
* @sleep_state: ACPI state
|
||||
* Since acpi_enter_sleep_state() will disable all
|
||||
* RUNTIME GPEs, we simply mark all GPES that
|
||||
* are not enabled for wakeup from requested state as RUNTIME.
|
||||
*/
|
||||
void acpi_gpe_sleep_prepare(u32 sleep_state)
|
||||
{
|
||||
struct list_head *node, *next;
|
||||
|
||||
list_for_each_safe(node, next, &acpi_wakeup_device_list) {
|
||||
struct acpi_device *dev = container_of(node,
|
||||
struct acpi_device,
|
||||
wakeup_list);
|
||||
|
||||
/* The GPE can wakeup system from this state, don't touch it */
|
||||
if ((u32) dev->wakeup.sleep_state >= sleep_state)
|
||||
continue;
|
||||
/* acpi_set_gpe_type will automatically disable GPE */
|
||||
acpi_set_gpe_type(dev->wakeup.gpe_device,
|
||||
dev->wakeup.gpe_number,
|
||||
ACPI_GPE_TYPE_RUNTIME);
|
||||
}
|
||||
}
|
||||
|
@ -400,7 +400,7 @@ acpi_tb_parse_root_table(acpi_physical_address rsdp_address, u8 flags)
|
||||
u32 table_count;
|
||||
struct acpi_table_header *table;
|
||||
acpi_physical_address address;
|
||||
acpi_physical_address rsdt_address;
|
||||
acpi_physical_address uninitialized_var(rsdt_address);
|
||||
u32 length;
|
||||
u8 *table_entry;
|
||||
acpi_status status;
|
||||
|
@ -195,6 +195,7 @@ struct acpi_thermal {
|
||||
struct acpi_thermal_trips trips;
|
||||
struct acpi_handle_list devices;
|
||||
struct timer_list timer;
|
||||
struct mutex lock;
|
||||
};
|
||||
|
||||
static const struct file_operations acpi_thermal_state_fops = {
|
||||
@ -711,6 +712,7 @@ static void acpi_thermal_check(void *data)
|
||||
int result = 0;
|
||||
struct acpi_thermal *tz = data;
|
||||
unsigned long sleep_time = 0;
|
||||
unsigned long timeout_jiffies = 0;
|
||||
int i = 0;
|
||||
struct acpi_thermal_state state;
|
||||
|
||||
@ -720,11 +722,15 @@ static void acpi_thermal_check(void *data)
|
||||
return;
|
||||
}
|
||||
|
||||
/* Check if someone else is already running */
|
||||
if (!mutex_trylock(&tz->lock))
|
||||
return;
|
||||
|
||||
state = tz->state;
|
||||
|
||||
result = acpi_thermal_get_temperature(tz);
|
||||
if (result)
|
||||
return;
|
||||
goto unlock;
|
||||
|
||||
memset(&tz->state, 0, sizeof(tz->state));
|
||||
|
||||
@ -787,10 +793,13 @@ static void acpi_thermal_check(void *data)
|
||||
* a thermal event occurs). Note that _TSP and _TZD values are
|
||||
* given in 1/10th seconds (we must covert to milliseconds).
|
||||
*/
|
||||
if (tz->state.passive)
|
||||
if (tz->state.passive) {
|
||||
sleep_time = tz->trips.passive.tsp * 100;
|
||||
else if (tz->polling_frequency > 0)
|
||||
timeout_jiffies = jiffies + (HZ * sleep_time) / 1000;
|
||||
} else if (tz->polling_frequency > 0) {
|
||||
sleep_time = tz->polling_frequency * 100;
|
||||
timeout_jiffies = round_jiffies(jiffies + (HZ * sleep_time) / 1000);
|
||||
}
|
||||
|
||||
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "%s: temperature[%lu] sleep[%lu]\n",
|
||||
tz->name, tz->temperature, sleep_time));
|
||||
@ -804,17 +813,16 @@ static void acpi_thermal_check(void *data)
|
||||
del_timer(&(tz->timer));
|
||||
} else {
|
||||
if (timer_pending(&(tz->timer)))
|
||||
mod_timer(&(tz->timer),
|
||||
jiffies + (HZ * sleep_time) / 1000);
|
||||
mod_timer(&(tz->timer), timeout_jiffies);
|
||||
else {
|
||||
tz->timer.data = (unsigned long)tz;
|
||||
tz->timer.function = acpi_thermal_run;
|
||||
tz->timer.expires = jiffies + (HZ * sleep_time) / 1000;
|
||||
tz->timer.expires = timeout_jiffies;
|
||||
add_timer(&(tz->timer));
|
||||
}
|
||||
}
|
||||
|
||||
return;
|
||||
unlock:
|
||||
mutex_unlock(&tz->lock);
|
||||
}
|
||||
|
||||
/* --------------------------------------------------------------------------
|
||||
@ -1251,7 +1259,7 @@ static int acpi_thermal_add(struct acpi_device *device)
|
||||
strcpy(acpi_device_name(device), ACPI_THERMAL_DEVICE_NAME);
|
||||
strcpy(acpi_device_class(device), ACPI_THERMAL_CLASS);
|
||||
acpi_driver_data(device) = tz;
|
||||
|
||||
mutex_init(&tz->lock);
|
||||
result = acpi_thermal_get_info(tz);
|
||||
if (result)
|
||||
goto end;
|
||||
@ -1321,7 +1329,7 @@ static int acpi_thermal_remove(struct acpi_device *device, int type)
|
||||
}
|
||||
|
||||
acpi_thermal_remove_fs(device);
|
||||
|
||||
mutex_destroy(&tz->lock);
|
||||
kfree(tz);
|
||||
return 0;
|
||||
}
|
||||
|
@ -409,14 +409,17 @@ acpi_video_device_lcd_query_levels(struct acpi_video_device *device,
|
||||
static int
|
||||
acpi_video_device_lcd_set_level(struct acpi_video_device *device, int level)
|
||||
{
|
||||
int status;
|
||||
int status = AE_OK;
|
||||
union acpi_object arg0 = { ACPI_TYPE_INTEGER };
|
||||
struct acpi_object_list args = { 1, &arg0 };
|
||||
|
||||
|
||||
arg0.integer.value = level;
|
||||
status = acpi_evaluate_object(device->dev->handle, "_BCM", &args, NULL);
|
||||
|
||||
if (device->cap._BCM)
|
||||
status = acpi_evaluate_object(device->dev->handle, "_BCM",
|
||||
&args, NULL);
|
||||
device->brightness->curr = level;
|
||||
return status;
|
||||
}
|
||||
|
||||
@ -424,11 +427,11 @@ static int
|
||||
acpi_video_device_lcd_get_level_current(struct acpi_video_device *device,
|
||||
unsigned long *level)
|
||||
{
|
||||
int status;
|
||||
|
||||
status = acpi_evaluate_integer(device->dev->handle, "_BQC", NULL, level);
|
||||
|
||||
return status;
|
||||
if (device->cap._BQC)
|
||||
return acpi_evaluate_integer(device->dev->handle, "_BQC", NULL,
|
||||
level);
|
||||
*level = device->brightness->curr;
|
||||
return AE_OK;
|
||||
}
|
||||
|
||||
static int
|
||||
@ -1633,9 +1636,20 @@ static int
|
||||
acpi_video_get_next_level(struct acpi_video_device *device,
|
||||
u32 level_current, u32 event)
|
||||
{
|
||||
int min, max, min_above, max_below, i, l;
|
||||
int min, max, min_above, max_below, i, l, delta = 255;
|
||||
max = max_below = 0;
|
||||
min = min_above = 255;
|
||||
/* Find closest level to level_current */
|
||||
for (i = 0; i < device->brightness->count; i++) {
|
||||
l = device->brightness->levels[i];
|
||||
if (abs(l - level_current) < abs(delta)) {
|
||||
delta = l - level_current;
|
||||
if (!delta)
|
||||
break;
|
||||
}
|
||||
}
|
||||
/* Ajust level_current to closest available level */
|
||||
level_current += delta;
|
||||
for (i = 0; i < device->brightness->count; i++) {
|
||||
l = device->brightness->levels[i];
|
||||
if (l < min)
|
||||
|
20
drivers/cpuidle/Kconfig
Normal file
20
drivers/cpuidle/Kconfig
Normal file
@ -0,0 +1,20 @@
|
||||
|
||||
config CPU_IDLE
|
||||
bool "CPU idle PM support"
|
||||
help
|
||||
CPU idle is a generic framework for supporting software-controlled
|
||||
idle processor power management. It includes modular cross-platform
|
||||
governors that can be swapped during runtime.
|
||||
|
||||
If you're using a mobile platform that supports CPU idle PM (e.g.
|
||||
an ACPI-capable notebook), you should say Y here.
|
||||
|
||||
config CPU_IDLE_GOV_LADDER
|
||||
bool
|
||||
depends on CPU_IDLE
|
||||
default y
|
||||
|
||||
config CPU_IDLE_GOV_MENU
|
||||
bool
|
||||
depends on CPU_IDLE && NO_HZ
|
||||
default y
|
5
drivers/cpuidle/Makefile
Normal file
5
drivers/cpuidle/Makefile
Normal file
@ -0,0 +1,5 @@
|
||||
#
|
||||
# Makefile for cpuidle.
|
||||
#
|
||||
|
||||
obj-y += cpuidle.o driver.o governor.o sysfs.o governors/
|
295
drivers/cpuidle/cpuidle.c
Normal file
295
drivers/cpuidle/cpuidle.c
Normal file
@ -0,0 +1,295 @@
|
||||
/*
|
||||
* cpuidle.c - core cpuidle infrastructure
|
||||
*
|
||||
* (C) 2006-2007 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
|
||||
* Shaohua Li <shaohua.li@intel.com>
|
||||
* Adam Belay <abelay@novell.com>
|
||||
*
|
||||
* This code is licenced under the GPL.
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/notifier.h>
|
||||
#include <linux/latency.h>
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/cpuidle.h>
|
||||
|
||||
#include "cpuidle.h"
|
||||
|
||||
DEFINE_PER_CPU(struct cpuidle_device *, cpuidle_devices);
|
||||
EXPORT_PER_CPU_SYMBOL_GPL(cpuidle_devices);
|
||||
|
||||
DEFINE_MUTEX(cpuidle_lock);
|
||||
LIST_HEAD(cpuidle_detected_devices);
|
||||
static void (*pm_idle_old)(void);
|
||||
|
||||
static int enabled_devices;
|
||||
|
||||
/**
|
||||
* cpuidle_idle_call - the main idle loop
|
||||
*
|
||||
* NOTE: no locks or semaphores should be used here
|
||||
*/
|
||||
static void cpuidle_idle_call(void)
|
||||
{
|
||||
struct cpuidle_device *dev = __get_cpu_var(cpuidle_devices);
|
||||
struct cpuidle_state *target_state;
|
||||
int next_state;
|
||||
|
||||
/* check if the device is ready */
|
||||
if (!dev || !dev->enabled) {
|
||||
if (pm_idle_old)
|
||||
pm_idle_old();
|
||||
else
|
||||
local_irq_enable();
|
||||
return;
|
||||
}
|
||||
|
||||
/* ask the governor for the next state */
|
||||
next_state = cpuidle_curr_governor->select(dev);
|
||||
if (need_resched())
|
||||
return;
|
||||
target_state = &dev->states[next_state];
|
||||
|
||||
/* enter the state and update stats */
|
||||
dev->last_residency = target_state->enter(dev, target_state);
|
||||
dev->last_state = target_state;
|
||||
target_state->time += dev->last_residency;
|
||||
target_state->usage++;
|
||||
|
||||
/* give the governor an opportunity to reflect on the outcome */
|
||||
if (cpuidle_curr_governor->reflect)
|
||||
cpuidle_curr_governor->reflect(dev);
|
||||
}
|
||||
|
||||
/**
|
||||
* cpuidle_install_idle_handler - installs the cpuidle idle loop handler
|
||||
*/
|
||||
void cpuidle_install_idle_handler(void)
|
||||
{
|
||||
if (enabled_devices && (pm_idle != cpuidle_idle_call)) {
|
||||
/* Make sure all changes finished before we switch to new idle */
|
||||
smp_wmb();
|
||||
pm_idle = cpuidle_idle_call;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* cpuidle_uninstall_idle_handler - uninstalls the cpuidle idle loop handler
|
||||
*/
|
||||
void cpuidle_uninstall_idle_handler(void)
|
||||
{
|
||||
if (enabled_devices && (pm_idle != pm_idle_old)) {
|
||||
pm_idle = pm_idle_old;
|
||||
cpu_idle_wait();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* cpuidle_pause_and_lock - temporarily disables CPUIDLE
|
||||
*/
|
||||
void cpuidle_pause_and_lock(void)
|
||||
{
|
||||
mutex_lock(&cpuidle_lock);
|
||||
cpuidle_uninstall_idle_handler();
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL_GPL(cpuidle_pause_and_lock);
|
||||
|
||||
/**
|
||||
* cpuidle_resume_and_unlock - resumes CPUIDLE operation
|
||||
*/
|
||||
void cpuidle_resume_and_unlock(void)
|
||||
{
|
||||
cpuidle_install_idle_handler();
|
||||
mutex_unlock(&cpuidle_lock);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL_GPL(cpuidle_resume_and_unlock);
|
||||
|
||||
/**
|
||||
* cpuidle_enable_device - enables idle PM for a CPU
|
||||
* @dev: the CPU
|
||||
*
|
||||
* This function must be called between cpuidle_pause_and_lock and
|
||||
* cpuidle_resume_and_unlock when used externally.
|
||||
*/
|
||||
int cpuidle_enable_device(struct cpuidle_device *dev)
|
||||
{
|
||||
int ret, i;
|
||||
|
||||
if (dev->enabled)
|
||||
return 0;
|
||||
if (!cpuidle_curr_driver || !cpuidle_curr_governor)
|
||||
return -EIO;
|
||||
if (!dev->state_count)
|
||||
return -EINVAL;
|
||||
|
||||
if ((ret = cpuidle_add_state_sysfs(dev)))
|
||||
return ret;
|
||||
|
||||
if (cpuidle_curr_governor->enable &&
|
||||
(ret = cpuidle_curr_governor->enable(dev)))
|
||||
goto fail_sysfs;
|
||||
|
||||
for (i = 0; i < dev->state_count; i++) {
|
||||
dev->states[i].usage = 0;
|
||||
dev->states[i].time = 0;
|
||||
}
|
||||
dev->last_residency = 0;
|
||||
dev->last_state = NULL;
|
||||
|
||||
smp_wmb();
|
||||
|
||||
dev->enabled = 1;
|
||||
|
||||
enabled_devices++;
|
||||
return 0;
|
||||
|
||||
fail_sysfs:
|
||||
cpuidle_remove_state_sysfs(dev);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL_GPL(cpuidle_enable_device);
|
||||
|
||||
/**
|
||||
* cpuidle_disable_device - disables idle PM for a CPU
|
||||
* @dev: the CPU
|
||||
*
|
||||
* This function must be called between cpuidle_pause_and_lock and
|
||||
* cpuidle_resume_and_unlock when used externally.
|
||||
*/
|
||||
void cpuidle_disable_device(struct cpuidle_device *dev)
|
||||
{
|
||||
if (!dev->enabled)
|
||||
return;
|
||||
if (!cpuidle_curr_driver || !cpuidle_curr_governor)
|
||||
return;
|
||||
|
||||
dev->enabled = 0;
|
||||
|
||||
if (cpuidle_curr_governor->disable)
|
||||
cpuidle_curr_governor->disable(dev);
|
||||
|
||||
cpuidle_remove_state_sysfs(dev);
|
||||
enabled_devices--;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL_GPL(cpuidle_disable_device);
|
||||
|
||||
/**
|
||||
* cpuidle_register_device - registers a CPU's idle PM feature
|
||||
* @dev: the cpu
|
||||
*/
|
||||
int cpuidle_register_device(struct cpuidle_device *dev)
|
||||
{
|
||||
int ret;
|
||||
struct sys_device *sys_dev = get_cpu_sysdev((unsigned long)dev->cpu);
|
||||
|
||||
if (!sys_dev)
|
||||
return -EINVAL;
|
||||
if (!try_module_get(cpuidle_curr_driver->owner))
|
||||
return -EINVAL;
|
||||
|
||||
init_completion(&dev->kobj_unregister);
|
||||
|
||||
mutex_lock(&cpuidle_lock);
|
||||
|
||||
per_cpu(cpuidle_devices, dev->cpu) = dev;
|
||||
list_add(&dev->device_list, &cpuidle_detected_devices);
|
||||
if ((ret = cpuidle_add_sysfs(sys_dev))) {
|
||||
mutex_unlock(&cpuidle_lock);
|
||||
module_put(cpuidle_curr_driver->owner);
|
||||
return ret;
|
||||
}
|
||||
|
||||
cpuidle_enable_device(dev);
|
||||
cpuidle_install_idle_handler();
|
||||
|
||||
mutex_unlock(&cpuidle_lock);
|
||||
|
||||
return 0;
|
||||
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL_GPL(cpuidle_register_device);
|
||||
|
||||
/**
|
||||
* cpuidle_unregister_device - unregisters a CPU's idle PM feature
|
||||
* @dev: the cpu
|
||||
*/
|
||||
void cpuidle_unregister_device(struct cpuidle_device *dev)
|
||||
{
|
||||
struct sys_device *sys_dev = get_cpu_sysdev((unsigned long)dev->cpu);
|
||||
|
||||
cpuidle_pause_and_lock();
|
||||
|
||||
cpuidle_disable_device(dev);
|
||||
|
||||
cpuidle_remove_sysfs(sys_dev);
|
||||
list_del(&dev->device_list);
|
||||
wait_for_completion(&dev->kobj_unregister);
|
||||
per_cpu(cpuidle_devices, dev->cpu) = NULL;
|
||||
|
||||
cpuidle_resume_and_unlock();
|
||||
|
||||
module_put(cpuidle_curr_driver->owner);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL_GPL(cpuidle_unregister_device);
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
|
||||
static void smp_callback(void *v)
|
||||
{
|
||||
/* we already woke the CPU up, nothing more to do */
|
||||
}
|
||||
|
||||
/*
|
||||
* This function gets called when a part of the kernel has a new latency
|
||||
* requirement. This means we need to get all processors out of their C-state,
|
||||
* and then recalculate a new suitable C-state. Just do a cross-cpu IPI; that
|
||||
* wakes them all right up.
|
||||
*/
|
||||
static int cpuidle_latency_notify(struct notifier_block *b,
|
||||
unsigned long l, void *v)
|
||||
{
|
||||
smp_call_function(smp_callback, NULL, 0, 1);
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
static struct notifier_block cpuidle_latency_notifier = {
|
||||
.notifier_call = cpuidle_latency_notify,
|
||||
};
|
||||
|
||||
#define latency_notifier_init(x) do { register_latency_notifier(x); } while (0)
|
||||
|
||||
#else /* CONFIG_SMP */
|
||||
|
||||
#define latency_notifier_init(x) do { } while (0)
|
||||
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
/**
|
||||
* cpuidle_init - core initializer
|
||||
*/
|
||||
static int __init cpuidle_init(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
pm_idle_old = pm_idle;
|
||||
|
||||
ret = cpuidle_add_class_sysfs(&cpu_sysdev_class);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
latency_notifier_init(&cpuidle_latency_notifier);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
core_initcall(cpuidle_init);
|
33
drivers/cpuidle/cpuidle.h
Normal file
33
drivers/cpuidle/cpuidle.h
Normal file
@ -0,0 +1,33 @@
|
||||
/*
|
||||
* cpuidle.h - The internal header file
|
||||
*/
|
||||
|
||||
#ifndef __DRIVER_CPUIDLE_H
|
||||
#define __DRIVER_CPUIDLE_H
|
||||
|
||||
#include <linux/sysdev.h>
|
||||
|
||||
/* For internal use only */
|
||||
extern struct cpuidle_governor *cpuidle_curr_governor;
|
||||
extern struct cpuidle_driver *cpuidle_curr_driver;
|
||||
extern struct list_head cpuidle_governors;
|
||||
extern struct list_head cpuidle_detected_devices;
|
||||
extern struct mutex cpuidle_lock;
|
||||
extern spinlock_t cpuidle_driver_lock;
|
||||
|
||||
/* idle loop */
|
||||
extern void cpuidle_install_idle_handler(void);
|
||||
extern void cpuidle_uninstall_idle_handler(void);
|
||||
|
||||
/* governors */
|
||||
extern int cpuidle_switch_governor(struct cpuidle_governor *gov);
|
||||
|
||||
/* sysfs */
|
||||
extern int cpuidle_add_class_sysfs(struct sysdev_class *cls);
|
||||
extern void cpuidle_remove_class_sysfs(struct sysdev_class *cls);
|
||||
extern int cpuidle_add_state_sysfs(struct cpuidle_device *device);
|
||||
extern void cpuidle_remove_state_sysfs(struct cpuidle_device *device);
|
||||
extern int cpuidle_add_sysfs(struct sys_device *sysdev);
|
||||
extern void cpuidle_remove_sysfs(struct sys_device *sysdev);
|
||||
|
||||
#endif /* __DRIVER_CPUIDLE_H */
|
56
drivers/cpuidle/driver.c
Normal file
56
drivers/cpuidle/driver.c
Normal file
@ -0,0 +1,56 @@
|
||||
/*
|
||||
* driver.c - driver support
|
||||
*
|
||||
* (C) 2006-2007 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
|
||||
* Shaohua Li <shaohua.li@intel.com>
|
||||
* Adam Belay <abelay@novell.com>
|
||||
*
|
||||
* This code is licenced under the GPL.
|
||||
*/
|
||||
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/cpuidle.h>
|
||||
|
||||
#include "cpuidle.h"
|
||||
|
||||
struct cpuidle_driver *cpuidle_curr_driver;
|
||||
DEFINE_SPINLOCK(cpuidle_driver_lock);
|
||||
|
||||
/**
|
||||
* cpuidle_register_driver - registers a driver
|
||||
* @drv: the driver
|
||||
*/
|
||||
int cpuidle_register_driver(struct cpuidle_driver *drv)
|
||||
{
|
||||
if (!drv)
|
||||
return -EINVAL;
|
||||
|
||||
spin_lock(&cpuidle_driver_lock);
|
||||
if (cpuidle_curr_driver) {
|
||||
spin_unlock(&cpuidle_driver_lock);
|
||||
return -EBUSY;
|
||||
}
|
||||
cpuidle_curr_driver = drv;
|
||||
spin_unlock(&cpuidle_driver_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL_GPL(cpuidle_register_driver);
|
||||
|
||||
/**
|
||||
* cpuidle_unregister_driver - unregisters a driver
|
||||
* @drv: the driver
|
||||
*/
|
||||
void cpuidle_unregister_driver(struct cpuidle_driver *drv)
|
||||
{
|
||||
if (!drv)
|
||||
return;
|
||||
|
||||
spin_lock(&cpuidle_driver_lock);
|
||||
cpuidle_curr_driver = NULL;
|
||||
spin_unlock(&cpuidle_driver_lock);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL_GPL(cpuidle_unregister_driver);
|
141
drivers/cpuidle/governor.c
Normal file
141
drivers/cpuidle/governor.c
Normal file
@ -0,0 +1,141 @@
|
||||
/*
|
||||
* governor.c - governor support
|
||||
*
|
||||
* (C) 2006-2007 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
|
||||
* Shaohua Li <shaohua.li@intel.com>
|
||||
* Adam Belay <abelay@novell.com>
|
||||
*
|
||||
* This code is licenced under the GPL.
|
||||
*/
|
||||
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/cpuidle.h>
|
||||
|
||||
#include "cpuidle.h"
|
||||
|
||||
LIST_HEAD(cpuidle_governors);
|
||||
struct cpuidle_governor *cpuidle_curr_governor;
|
||||
|
||||
/**
|
||||
* __cpuidle_find_governor - finds a governor of the specified name
|
||||
* @str: the name
|
||||
*
|
||||
* Must be called with cpuidle_lock aquired.
|
||||
*/
|
||||
static struct cpuidle_governor * __cpuidle_find_governor(const char *str)
|
||||
{
|
||||
struct cpuidle_governor *gov;
|
||||
|
||||
list_for_each_entry(gov, &cpuidle_governors, governor_list)
|
||||
if (!strnicmp(str, gov->name, CPUIDLE_NAME_LEN))
|
||||
return gov;
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* cpuidle_switch_governor - changes the governor
|
||||
* @gov: the new target governor
|
||||
*
|
||||
* NOTE: "gov" can be NULL to specify disabled
|
||||
* Must be called with cpuidle_lock aquired.
|
||||
*/
|
||||
int cpuidle_switch_governor(struct cpuidle_governor *gov)
|
||||
{
|
||||
struct cpuidle_device *dev;
|
||||
|
||||
if (gov == cpuidle_curr_governor)
|
||||
return 0;
|
||||
|
||||
cpuidle_uninstall_idle_handler();
|
||||
|
||||
if (cpuidle_curr_governor) {
|
||||
list_for_each_entry(dev, &cpuidle_detected_devices, device_list)
|
||||
cpuidle_disable_device(dev);
|
||||
module_put(cpuidle_curr_governor->owner);
|
||||
}
|
||||
|
||||
cpuidle_curr_governor = gov;
|
||||
|
||||
if (gov) {
|
||||
if (!try_module_get(cpuidle_curr_governor->owner))
|
||||
return -EINVAL;
|
||||
list_for_each_entry(dev, &cpuidle_detected_devices, device_list)
|
||||
cpuidle_enable_device(dev);
|
||||
cpuidle_install_idle_handler();
|
||||
printk(KERN_INFO "cpuidle: using governor %s\n", gov->name);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* cpuidle_register_governor - registers a governor
|
||||
* @gov: the governor
|
||||
*/
|
||||
int cpuidle_register_governor(struct cpuidle_governor *gov)
|
||||
{
|
||||
int ret = -EEXIST;
|
||||
|
||||
if (!gov || !gov->select)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&cpuidle_lock);
|
||||
if (__cpuidle_find_governor(gov->name) == NULL) {
|
||||
ret = 0;
|
||||
list_add_tail(&gov->governor_list, &cpuidle_governors);
|
||||
if (!cpuidle_curr_governor ||
|
||||
cpuidle_curr_governor->rating < gov->rating)
|
||||
cpuidle_switch_governor(gov);
|
||||
}
|
||||
mutex_unlock(&cpuidle_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL_GPL(cpuidle_register_governor);
|
||||
|
||||
/**
|
||||
* cpuidle_replace_governor - find a replacement governor
|
||||
* @exclude_rating: the rating that will be skipped while looking for
|
||||
* new governor.
|
||||
*/
|
||||
static struct cpuidle_governor *cpuidle_replace_governor(int exclude_rating)
|
||||
{
|
||||
struct cpuidle_governor *gov;
|
||||
struct cpuidle_governor *ret_gov = NULL;
|
||||
unsigned int max_rating = 0;
|
||||
|
||||
list_for_each_entry(gov, &cpuidle_governors, governor_list) {
|
||||
if (gov->rating == exclude_rating)
|
||||
continue;
|
||||
if (gov->rating > max_rating) {
|
||||
max_rating = gov->rating;
|
||||
ret_gov = gov;
|
||||
}
|
||||
}
|
||||
|
||||
return ret_gov;
|
||||
}
|
||||
|
||||
/**
|
||||
* cpuidle_unregister_governor - unregisters a governor
|
||||
* @gov: the governor
|
||||
*/
|
||||
void cpuidle_unregister_governor(struct cpuidle_governor *gov)
|
||||
{
|
||||
if (!gov)
|
||||
return;
|
||||
|
||||
mutex_lock(&cpuidle_lock);
|
||||
if (gov == cpuidle_curr_governor) {
|
||||
struct cpuidle_governor *new_gov;
|
||||
new_gov = cpuidle_replace_governor(gov->rating);
|
||||
cpuidle_switch_governor(new_gov);
|
||||
}
|
||||
list_del(&gov->governor_list);
|
||||
mutex_unlock(&cpuidle_lock);
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL_GPL(cpuidle_unregister_governor);
|
6
drivers/cpuidle/governors/Makefile
Normal file
6
drivers/cpuidle/governors/Makefile
Normal file
@ -0,0 +1,6 @@
|
||||
#
|
||||
# Makefile for cpuidle governors.
|
||||
#
|
||||
|
||||
obj-$(CONFIG_CPU_IDLE_GOV_LADDER) += ladder.o
|
||||
obj-$(CONFIG_CPU_IDLE_GOV_MENU) += menu.o
|
166
drivers/cpuidle/governors/ladder.c
Normal file
166
drivers/cpuidle/governors/ladder.c
Normal file
@ -0,0 +1,166 @@
|
||||
/*
|
||||
* ladder.c - the residency ladder algorithm
|
||||
*
|
||||
* Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
|
||||
* Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
|
||||
* Copyright (C) 2004, 2005 Dominik Brodowski <linux@brodo.de>
|
||||
*
|
||||
* (C) 2006-2007 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
|
||||
* Shaohua Li <shaohua.li@intel.com>
|
||||
* Adam Belay <abelay@novell.com>
|
||||
*
|
||||
* This code is licenced under the GPL.
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/cpuidle.h>
|
||||
#include <linux/latency.h>
|
||||
#include <linux/moduleparam.h>
|
||||
#include <linux/jiffies.h>
|
||||
|
||||
#include <asm/io.h>
|
||||
#include <asm/uaccess.h>
|
||||
|
||||
#define PROMOTION_COUNT 4
|
||||
#define DEMOTION_COUNT 1
|
||||
|
||||
struct ladder_device_state {
|
||||
struct {
|
||||
u32 promotion_count;
|
||||
u32 demotion_count;
|
||||
u32 promotion_time;
|
||||
u32 demotion_time;
|
||||
} threshold;
|
||||
struct {
|
||||
int promotion_count;
|
||||
int demotion_count;
|
||||
} stats;
|
||||
};
|
||||
|
||||
struct ladder_device {
|
||||
struct ladder_device_state states[CPUIDLE_STATE_MAX];
|
||||
int last_state_idx;
|
||||
};
|
||||
|
||||
static DEFINE_PER_CPU(struct ladder_device, ladder_devices);
|
||||
|
||||
/**
|
||||
* ladder_do_selection - prepares private data for a state change
|
||||
* @ldev: the ladder device
|
||||
* @old_idx: the current state index
|
||||
* @new_idx: the new target state index
|
||||
*/
|
||||
static inline void ladder_do_selection(struct ladder_device *ldev,
|
||||
int old_idx, int new_idx)
|
||||
{
|
||||
ldev->states[old_idx].stats.promotion_count = 0;
|
||||
ldev->states[old_idx].stats.demotion_count = 0;
|
||||
ldev->last_state_idx = new_idx;
|
||||
}
|
||||
|
||||
/**
|
||||
* ladder_select_state - selects the next state to enter
|
||||
* @dev: the CPU
|
||||
*/
|
||||
static int ladder_select_state(struct cpuidle_device *dev)
|
||||
{
|
||||
struct ladder_device *ldev = &__get_cpu_var(ladder_devices);
|
||||
struct ladder_device_state *last_state;
|
||||
int last_residency, last_idx = ldev->last_state_idx;
|
||||
|
||||
if (unlikely(!ldev))
|
||||
return 0;
|
||||
|
||||
last_state = &ldev->states[last_idx];
|
||||
|
||||
if (dev->states[last_idx].flags & CPUIDLE_FLAG_TIME_VALID)
|
||||
last_residency = cpuidle_get_last_residency(dev) - dev->states[last_idx].exit_latency;
|
||||
else
|
||||
last_residency = last_state->threshold.promotion_time + 1;
|
||||
|
||||
/* consider promotion */
|
||||
if (last_idx < dev->state_count - 1 &&
|
||||
last_residency > last_state->threshold.promotion_time &&
|
||||
dev->states[last_idx + 1].exit_latency <= system_latency_constraint()) {
|
||||
last_state->stats.promotion_count++;
|
||||
last_state->stats.demotion_count = 0;
|
||||
if (last_state->stats.promotion_count >= last_state->threshold.promotion_count) {
|
||||
ladder_do_selection(ldev, last_idx, last_idx + 1);
|
||||
return last_idx + 1;
|
||||
}
|
||||
}
|
||||
|
||||
/* consider demotion */
|
||||
if (last_idx > 0 &&
|
||||
last_residency < last_state->threshold.demotion_time) {
|
||||
last_state->stats.demotion_count++;
|
||||
last_state->stats.promotion_count = 0;
|
||||
if (last_state->stats.demotion_count >= last_state->threshold.demotion_count) {
|
||||
ladder_do_selection(ldev, last_idx, last_idx - 1);
|
||||
return last_idx - 1;
|
||||
}
|
||||
}
|
||||
|
||||
/* otherwise remain at the current state */
|
||||
return last_idx;
|
||||
}
|
||||
|
||||
/**
|
||||
* ladder_enable_device - setup for the governor
|
||||
* @dev: the CPU
|
||||
*/
|
||||
static int ladder_enable_device(struct cpuidle_device *dev)
|
||||
{
|
||||
int i;
|
||||
struct ladder_device *ldev = &per_cpu(ladder_devices, dev->cpu);
|
||||
struct ladder_device_state *lstate;
|
||||
struct cpuidle_state *state;
|
||||
|
||||
ldev->last_state_idx = 0;
|
||||
|
||||
for (i = 0; i < dev->state_count; i++) {
|
||||
state = &dev->states[i];
|
||||
lstate = &ldev->states[i];
|
||||
|
||||
lstate->stats.promotion_count = 0;
|
||||
lstate->stats.demotion_count = 0;
|
||||
|
||||
lstate->threshold.promotion_count = PROMOTION_COUNT;
|
||||
lstate->threshold.demotion_count = DEMOTION_COUNT;
|
||||
|
||||
if (i < dev->state_count - 1)
|
||||
lstate->threshold.promotion_time = state->exit_latency;
|
||||
if (i > 0)
|
||||
lstate->threshold.demotion_time = state->exit_latency;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct cpuidle_governor ladder_governor = {
|
||||
.name = "ladder",
|
||||
.rating = 10,
|
||||
.enable = ladder_enable_device,
|
||||
.select = ladder_select_state,
|
||||
.owner = THIS_MODULE,
|
||||
};
|
||||
|
||||
/**
|
||||
* init_ladder - initializes the governor
|
||||
*/
|
||||
static int __init init_ladder(void)
|
||||
{
|
||||
return cpuidle_register_governor(&ladder_governor);
|
||||
}
|
||||
|
||||
/**
|
||||
* exit_ladder - exits the governor
|
||||
*/
|
||||
static void __exit exit_ladder(void)
|
||||
{
|
||||
cpuidle_unregister_governor(&ladder_governor);
|
||||
}
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
module_init(init_ladder);
|
||||
module_exit(exit_ladder);
|
137
drivers/cpuidle/governors/menu.c
Normal file
137
drivers/cpuidle/governors/menu.c
Normal file
@ -0,0 +1,137 @@
|
||||
/*
|
||||
* menu.c - the menu idle governor
|
||||
*
|
||||
* Copyright (C) 2006-2007 Adam Belay <abelay@novell.com>
|
||||
*
|
||||
* This code is licenced under the GPL.
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/cpuidle.h>
|
||||
#include <linux/latency.h>
|
||||
#include <linux/time.h>
|
||||
#include <linux/ktime.h>
|
||||
#include <linux/hrtimer.h>
|
||||
#include <linux/tick.h>
|
||||
|
||||
#define BREAK_FUZZ 4 /* 4 us */
|
||||
|
||||
struct menu_device {
|
||||
int last_state_idx;
|
||||
|
||||
unsigned int expected_us;
|
||||
unsigned int predicted_us;
|
||||
unsigned int last_measured_us;
|
||||
unsigned int elapsed_us;
|
||||
};
|
||||
|
||||
static DEFINE_PER_CPU(struct menu_device, menu_devices);
|
||||
|
||||
/**
|
||||
* menu_select - selects the next idle state to enter
|
||||
* @dev: the CPU
|
||||
*/
|
||||
static int menu_select(struct cpuidle_device *dev)
|
||||
{
|
||||
struct menu_device *data = &__get_cpu_var(menu_devices);
|
||||
int i;
|
||||
|
||||
/* determine the expected residency time */
|
||||
data->expected_us =
|
||||
(u32) ktime_to_ns(tick_nohz_get_sleep_length()) / 1000;
|
||||
|
||||
/* find the deepest idle state that satisfies our constraints */
|
||||
for (i = 1; i < dev->state_count; i++) {
|
||||
struct cpuidle_state *s = &dev->states[i];
|
||||
|
||||
if (s->target_residency > data->expected_us)
|
||||
break;
|
||||
if (s->target_residency > data->predicted_us)
|
||||
break;
|
||||
if (s->exit_latency > system_latency_constraint())
|
||||
break;
|
||||
}
|
||||
|
||||
data->last_state_idx = i - 1;
|
||||
return i - 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* menu_reflect - attempts to guess what happened after entry
|
||||
* @dev: the CPU
|
||||
*
|
||||
* NOTE: it's important to be fast here because this operation will add to
|
||||
* the overall exit latency.
|
||||
*/
|
||||
static void menu_reflect(struct cpuidle_device *dev)
|
||||
{
|
||||
struct menu_device *data = &__get_cpu_var(menu_devices);
|
||||
int last_idx = data->last_state_idx;
|
||||
unsigned int measured_us =
|
||||
cpuidle_get_last_residency(dev) + data->elapsed_us;
|
||||
struct cpuidle_state *target = &dev->states[last_idx];
|
||||
|
||||
/*
|
||||
* Ugh, this idle state doesn't support residency measurements, so we
|
||||
* are basically lost in the dark. As a compromise, assume we slept
|
||||
* for one full standard timer tick. However, be aware that this
|
||||
* could potentially result in a suboptimal state transition.
|
||||
*/
|
||||
if (!(target->flags & CPUIDLE_FLAG_TIME_VALID))
|
||||
measured_us = USEC_PER_SEC / HZ;
|
||||
|
||||
/* Predict time remaining until next break event */
|
||||
if (measured_us + BREAK_FUZZ < data->expected_us - target->exit_latency) {
|
||||
data->predicted_us = max(measured_us, data->last_measured_us);
|
||||
data->last_measured_us = measured_us;
|
||||
data->elapsed_us = 0;
|
||||
} else {
|
||||
if (data->elapsed_us < data->elapsed_us + measured_us)
|
||||
data->elapsed_us = measured_us;
|
||||
else
|
||||
data->elapsed_us = -1;
|
||||
data->predicted_us = max(measured_us, data->last_measured_us);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* menu_enable_device - scans a CPU's states and does setup
|
||||
* @dev: the CPU
|
||||
*/
|
||||
static int menu_enable_device(struct cpuidle_device *dev)
|
||||
{
|
||||
struct menu_device *data = &per_cpu(menu_devices, dev->cpu);
|
||||
|
||||
memset(data, 0, sizeof(struct menu_device));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct cpuidle_governor menu_governor = {
|
||||
.name = "menu",
|
||||
.rating = 20,
|
||||
.enable = menu_enable_device,
|
||||
.select = menu_select,
|
||||
.reflect = menu_reflect,
|
||||
.owner = THIS_MODULE,
|
||||
};
|
||||
|
||||
/**
|
||||
* init_menu - initializes the governor
|
||||
*/
|
||||
static int __init init_menu(void)
|
||||
{
|
||||
return cpuidle_register_governor(&menu_governor);
|
||||
}
|
||||
|
||||
/**
|
||||
* exit_menu - exits the governor
|
||||
*/
|
||||
static void __exit exit_menu(void)
|
||||
{
|
||||
cpuidle_unregister_governor(&menu_governor);
|
||||
}
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
module_init(init_menu);
|
||||
module_exit(exit_menu);
|
361
drivers/cpuidle/sysfs.c
Normal file
361
drivers/cpuidle/sysfs.c
Normal file
@ -0,0 +1,361 @@
|
||||
/*
|
||||
* sysfs.c - sysfs support
|
||||
*
|
||||
* (C) 2006-2007 Shaohua Li <shaohua.li@intel.com>
|
||||
*
|
||||
* This code is licenced under the GPL.
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/cpuidle.h>
|
||||
#include <linux/sysfs.h>
|
||||
#include <linux/cpu.h>
|
||||
|
||||
#include "cpuidle.h"
|
||||
|
||||
static unsigned int sysfs_switch;
|
||||
static int __init cpuidle_sysfs_setup(char *unused)
|
||||
{
|
||||
sysfs_switch = 1;
|
||||
return 1;
|
||||
}
|
||||
__setup("cpuidle_sysfs_switch", cpuidle_sysfs_setup);
|
||||
|
||||
static ssize_t show_available_governors(struct sys_device *dev, char *buf)
|
||||
{
|
||||
ssize_t i = 0;
|
||||
struct cpuidle_governor *tmp;
|
||||
|
||||
mutex_lock(&cpuidle_lock);
|
||||
list_for_each_entry(tmp, &cpuidle_governors, governor_list) {
|
||||
if (i >= (ssize_t) ((PAGE_SIZE/sizeof(char)) - CPUIDLE_NAME_LEN - 2))
|
||||
goto out;
|
||||
i += scnprintf(&buf[i], CPUIDLE_NAME_LEN, "%s ", tmp->name);
|
||||
}
|
||||
|
||||
out:
|
||||
i+= sprintf(&buf[i], "\n");
|
||||
mutex_unlock(&cpuidle_lock);
|
||||
return i;
|
||||
}
|
||||
|
||||
static ssize_t show_current_driver(struct sys_device *dev, char *buf)
|
||||
{
|
||||
ssize_t ret;
|
||||
|
||||
spin_lock(&cpuidle_driver_lock);
|
||||
if (cpuidle_curr_driver)
|
||||
ret = sprintf(buf, "%s\n", cpuidle_curr_driver->name);
|
||||
else
|
||||
ret = sprintf(buf, "none\n");
|
||||
spin_unlock(&cpuidle_driver_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static ssize_t show_current_governor(struct sys_device *dev, char *buf)
|
||||
{
|
||||
ssize_t ret;
|
||||
|
||||
mutex_lock(&cpuidle_lock);
|
||||
if (cpuidle_curr_governor)
|
||||
ret = sprintf(buf, "%s\n", cpuidle_curr_governor->name);
|
||||
else
|
||||
ret = sprintf(buf, "none\n");
|
||||
mutex_unlock(&cpuidle_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static ssize_t store_current_governor(struct sys_device *dev,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
char gov_name[CPUIDLE_NAME_LEN];
|
||||
int ret = -EINVAL;
|
||||
size_t len = count;
|
||||
struct cpuidle_governor *gov;
|
||||
|
||||
if (!len || len >= sizeof(gov_name))
|
||||
return -EINVAL;
|
||||
|
||||
memcpy(gov_name, buf, len);
|
||||
gov_name[len] = '\0';
|
||||
if (gov_name[len - 1] == '\n')
|
||||
gov_name[--len] = '\0';
|
||||
|
||||
mutex_lock(&cpuidle_lock);
|
||||
|
||||
list_for_each_entry(gov, &cpuidle_governors, governor_list) {
|
||||
if (strlen(gov->name) == len && !strcmp(gov->name, gov_name)) {
|
||||
ret = cpuidle_switch_governor(gov);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
mutex_unlock(&cpuidle_lock);
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
else
|
||||
return count;
|
||||
}
|
||||
|
||||
static SYSDEV_ATTR(current_driver, 0444, show_current_driver, NULL);
|
||||
static SYSDEV_ATTR(current_governor_ro, 0444, show_current_governor, NULL);
|
||||
|
||||
static struct attribute *cpuclass_default_attrs[] = {
|
||||
&attr_current_driver.attr,
|
||||
&attr_current_governor_ro.attr,
|
||||
NULL
|
||||
};
|
||||
|
||||
static SYSDEV_ATTR(available_governors, 0444, show_available_governors, NULL);
|
||||
static SYSDEV_ATTR(current_governor, 0644, show_current_governor,
|
||||
store_current_governor);
|
||||
|
||||
static struct attribute *cpuclass_switch_attrs[] = {
|
||||
&attr_available_governors.attr,
|
||||
&attr_current_driver.attr,
|
||||
&attr_current_governor.attr,
|
||||
NULL
|
||||
};
|
||||
|
||||
static struct attribute_group cpuclass_attr_group = {
|
||||
.attrs = cpuclass_default_attrs,
|
||||
.name = "cpuidle",
|
||||
};
|
||||
|
||||
/**
|
||||
* cpuidle_add_class_sysfs - add CPU global sysfs attributes
|
||||
*/
|
||||
int cpuidle_add_class_sysfs(struct sysdev_class *cls)
|
||||
{
|
||||
if (sysfs_switch)
|
||||
cpuclass_attr_group.attrs = cpuclass_switch_attrs;
|
||||
|
||||
return sysfs_create_group(&cls->kset.kobj, &cpuclass_attr_group);
|
||||
}
|
||||
|
||||
/**
|
||||
* cpuidle_remove_class_sysfs - remove CPU global sysfs attributes
|
||||
*/
|
||||
void cpuidle_remove_class_sysfs(struct sysdev_class *cls)
|
||||
{
|
||||
sysfs_remove_group(&cls->kset.kobj, &cpuclass_attr_group);
|
||||
}
|
||||
|
||||
struct cpuidle_attr {
|
||||
struct attribute attr;
|
||||
ssize_t (*show)(struct cpuidle_device *, char *);
|
||||
ssize_t (*store)(struct cpuidle_device *, const char *, size_t count);
|
||||
};
|
||||
|
||||
#define define_one_ro(_name, show) \
|
||||
static struct cpuidle_attr attr_##_name = __ATTR(_name, 0444, show, NULL)
|
||||
#define define_one_rw(_name, show, store) \
|
||||
static struct cpuidle_attr attr_##_name = __ATTR(_name, 0644, show, store)
|
||||
|
||||
#define kobj_to_cpuidledev(k) container_of(k, struct cpuidle_device, kobj)
|
||||
#define attr_to_cpuidleattr(a) container_of(a, struct cpuidle_attr, attr)
|
||||
static ssize_t cpuidle_show(struct kobject * kobj, struct attribute * attr ,char * buf)
|
||||
{
|
||||
int ret = -EIO;
|
||||
struct cpuidle_device *dev = kobj_to_cpuidledev(kobj);
|
||||
struct cpuidle_attr * cattr = attr_to_cpuidleattr(attr);
|
||||
|
||||
if (cattr->show) {
|
||||
mutex_lock(&cpuidle_lock);
|
||||
ret = cattr->show(dev, buf);
|
||||
mutex_unlock(&cpuidle_lock);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static ssize_t cpuidle_store(struct kobject * kobj, struct attribute * attr,
|
||||
const char * buf, size_t count)
|
||||
{
|
||||
int ret = -EIO;
|
||||
struct cpuidle_device *dev = kobj_to_cpuidledev(kobj);
|
||||
struct cpuidle_attr * cattr = attr_to_cpuidleattr(attr);
|
||||
|
||||
if (cattr->store) {
|
||||
mutex_lock(&cpuidle_lock);
|
||||
ret = cattr->store(dev, buf, count);
|
||||
mutex_unlock(&cpuidle_lock);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct sysfs_ops cpuidle_sysfs_ops = {
|
||||
.show = cpuidle_show,
|
||||
.store = cpuidle_store,
|
||||
};
|
||||
|
||||
static void cpuidle_sysfs_release(struct kobject *kobj)
|
||||
{
|
||||
struct cpuidle_device *dev = kobj_to_cpuidledev(kobj);
|
||||
|
||||
complete(&dev->kobj_unregister);
|
||||
}
|
||||
|
||||
static struct kobj_type ktype_cpuidle = {
|
||||
.sysfs_ops = &cpuidle_sysfs_ops,
|
||||
.release = cpuidle_sysfs_release,
|
||||
};
|
||||
|
||||
struct cpuidle_state_attr {
|
||||
struct attribute attr;
|
||||
ssize_t (*show)(struct cpuidle_state *, char *);
|
||||
ssize_t (*store)(struct cpuidle_state *, const char *, size_t);
|
||||
};
|
||||
|
||||
#define define_one_state_ro(_name, show) \
|
||||
static struct cpuidle_state_attr attr_##_name = __ATTR(_name, 0444, show, NULL)
|
||||
|
||||
#define define_show_state_function(_name) \
|
||||
static ssize_t show_state_##_name(struct cpuidle_state *state, char *buf) \
|
||||
{ \
|
||||
return sprintf(buf, "%u\n", state->_name);\
|
||||
}
|
||||
|
||||
static ssize_t show_state_name(struct cpuidle_state *state, char *buf)
|
||||
{
|
||||
return sprintf(buf, "%s\n", state->name);
|
||||
}
|
||||
|
||||
define_show_state_function(exit_latency)
|
||||
define_show_state_function(power_usage)
|
||||
define_show_state_function(usage)
|
||||
define_show_state_function(time)
|
||||
define_one_state_ro(name, show_state_name);
|
||||
define_one_state_ro(latency, show_state_exit_latency);
|
||||
define_one_state_ro(power, show_state_power_usage);
|
||||
define_one_state_ro(usage, show_state_usage);
|
||||
define_one_state_ro(time, show_state_time);
|
||||
|
||||
static struct attribute *cpuidle_state_default_attrs[] = {
|
||||
&attr_name.attr,
|
||||
&attr_latency.attr,
|
||||
&attr_power.attr,
|
||||
&attr_usage.attr,
|
||||
&attr_time.attr,
|
||||
NULL
|
||||
};
|
||||
|
||||
#define kobj_to_state_obj(k) container_of(k, struct cpuidle_state_kobj, kobj)
|
||||
#define kobj_to_state(k) (kobj_to_state_obj(k)->state)
|
||||
#define attr_to_stateattr(a) container_of(a, struct cpuidle_state_attr, attr)
|
||||
static ssize_t cpuidle_state_show(struct kobject * kobj,
|
||||
struct attribute * attr ,char * buf)
|
||||
{
|
||||
int ret = -EIO;
|
||||
struct cpuidle_state *state = kobj_to_state(kobj);
|
||||
struct cpuidle_state_attr * cattr = attr_to_stateattr(attr);
|
||||
|
||||
if (cattr->show)
|
||||
ret = cattr->show(state, buf);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static struct sysfs_ops cpuidle_state_sysfs_ops = {
|
||||
.show = cpuidle_state_show,
|
||||
};
|
||||
|
||||
static void cpuidle_state_sysfs_release(struct kobject *kobj)
|
||||
{
|
||||
struct cpuidle_state_kobj *state_obj = kobj_to_state_obj(kobj);
|
||||
|
||||
complete(&state_obj->kobj_unregister);
|
||||
}
|
||||
|
||||
static struct kobj_type ktype_state_cpuidle = {
|
||||
.sysfs_ops = &cpuidle_state_sysfs_ops,
|
||||
.default_attrs = cpuidle_state_default_attrs,
|
||||
.release = cpuidle_state_sysfs_release,
|
||||
};
|
||||
|
||||
static void inline cpuidle_free_state_kobj(struct cpuidle_device *device, int i)
|
||||
{
|
||||
kobject_unregister(&device->kobjs[i]->kobj);
|
||||
wait_for_completion(&device->kobjs[i]->kobj_unregister);
|
||||
kfree(device->kobjs[i]);
|
||||
device->kobjs[i] = NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* cpuidle_add_driver_sysfs - adds driver-specific sysfs attributes
|
||||
* @device: the target device
|
||||
*/
|
||||
int cpuidle_add_state_sysfs(struct cpuidle_device *device)
|
||||
{
|
||||
int i, ret = -ENOMEM;
|
||||
struct cpuidle_state_kobj *kobj;
|
||||
|
||||
/* state statistics */
|
||||
for (i = 0; i < device->state_count; i++) {
|
||||
kobj = kzalloc(sizeof(struct cpuidle_state_kobj), GFP_KERNEL);
|
||||
if (!kobj)
|
||||
goto error_state;
|
||||
kobj->state = &device->states[i];
|
||||
init_completion(&kobj->kobj_unregister);
|
||||
|
||||
kobj->kobj.parent = &device->kobj;
|
||||
kobj->kobj.ktype = &ktype_state_cpuidle;
|
||||
kobject_set_name(&kobj->kobj, "state%d", i);
|
||||
ret = kobject_register(&kobj->kobj);
|
||||
if (ret) {
|
||||
kfree(kobj);
|
||||
goto error_state;
|
||||
}
|
||||
device->kobjs[i] = kobj;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
error_state:
|
||||
for (i = i - 1; i >= 0; i--)
|
||||
cpuidle_free_state_kobj(device, i);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* cpuidle_remove_driver_sysfs - removes driver-specific sysfs attributes
|
||||
* @device: the target device
|
||||
*/
|
||||
void cpuidle_remove_state_sysfs(struct cpuidle_device *device)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < device->state_count; i++)
|
||||
cpuidle_free_state_kobj(device, i);
|
||||
}
|
||||
|
||||
/**
|
||||
* cpuidle_add_sysfs - creates a sysfs instance for the target device
|
||||
* @sysdev: the target device
|
||||
*/
|
||||
int cpuidle_add_sysfs(struct sys_device *sysdev)
|
||||
{
|
||||
int cpu = sysdev->id;
|
||||
struct cpuidle_device *dev;
|
||||
|
||||
dev = per_cpu(cpuidle_devices, cpu);
|
||||
dev->kobj.parent = &sysdev->kobj;
|
||||
dev->kobj.ktype = &ktype_cpuidle;
|
||||
kobject_set_name(&dev->kobj, "%s", "cpuidle");
|
||||
return kobject_register(&dev->kobj);
|
||||
}
|
||||
|
||||
/**
|
||||
* cpuidle_remove_sysfs - deletes a sysfs instance on the target device
|
||||
* @sysdev: the target device
|
||||
*/
|
||||
void cpuidle_remove_sysfs(struct sys_device *sysdev)
|
||||
{
|
||||
int cpu = sysdev->id;
|
||||
struct cpuidle_device *dev;
|
||||
|
||||
dev = per_cpu(cpuidle_devices, cpu);
|
||||
kobject_unregister(&dev->kobj);
|
||||
}
|
@ -111,6 +111,21 @@ config ASUS_LAPTOP
|
||||
|
||||
If you have an ACPI-compatible ASUS laptop, say Y or M here.
|
||||
|
||||
config FUJITSU_LAPTOP
|
||||
tristate "Fujitsu Laptop Extras"
|
||||
depends on X86
|
||||
depends on ACPI
|
||||
depends on BACKLIGHT_CLASS_DEVICE
|
||||
---help---
|
||||
This is a driver for laptops built by Fujitsu:
|
||||
|
||||
* P2xxx/P5xxx/S6xxx/S7xxx series Lifebooks
|
||||
* Possibly other Fujitsu laptop models
|
||||
|
||||
It adds support for LCD brightness control.
|
||||
|
||||
If you have a Fujitsu laptop, say Y or M here.
|
||||
|
||||
config MSI_LAPTOP
|
||||
tristate "MSI Laptop Extras"
|
||||
depends on X86
|
||||
@ -134,6 +149,7 @@ config SONY_LAPTOP
|
||||
tristate "Sony Laptop Extras"
|
||||
depends on X86 && ACPI
|
||||
select BACKLIGHT_CLASS_DEVICE
|
||||
depends on INPUT
|
||||
---help---
|
||||
This mini-driver drives the SNC and SPIC devices present in the ACPI
|
||||
BIOS of the Sony Vaio laptops.
|
||||
@ -156,6 +172,7 @@ config THINKPAD_ACPI
|
||||
select BACKLIGHT_CLASS_DEVICE
|
||||
select HWMON
|
||||
select NVRAM
|
||||
depends on INPUT
|
||||
---help---
|
||||
This is a driver for the IBM and Lenovo ThinkPad laptops. It adds
|
||||
support for Fn-Fx key combinations, Bluetooth control, video
|
||||
|
@ -15,4 +15,5 @@ obj-$(CONFIG_PHANTOM) += phantom.o
|
||||
obj-$(CONFIG_SGI_IOC4) += ioc4.o
|
||||
obj-$(CONFIG_SONY_LAPTOP) += sony-laptop.o
|
||||
obj-$(CONFIG_THINKPAD_ACPI) += thinkpad_acpi.o
|
||||
obj-$(CONFIG_FUJITSU_LAPTOP) += fujitsu-laptop.o
|
||||
obj-$(CONFIG_EEPROM_93CX6) += eeprom_93cx6.o
|
||||
|
358
drivers/misc/fujitsu-laptop.c
Normal file
358
drivers/misc/fujitsu-laptop.c
Normal file
@ -0,0 +1,358 @@
|
||||
/*-*-linux-c-*-*/
|
||||
|
||||
/*
|
||||
Copyright (C) 2007 Jonathan Woithe <jwoithe@physics.adelaide.edu.au>
|
||||
Based on earlier work:
|
||||
Copyright (C) 2003 Shane Spencer <shane@bogomip.com>
|
||||
Adrian Yee <brewt-fujitsu@brewt.org>
|
||||
|
||||
Templated from msi-laptop.c which is copyright by its respective authors.
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful, but
|
||||
WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program; if not, write to the Free Software
|
||||
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
|
||||
02110-1301, USA.
|
||||
*/
|
||||
|
||||
/*
|
||||
* fujitsu-laptop.c - Fujitsu laptop support, providing access to additional
|
||||
* features made available on a range of Fujitsu laptops including the
|
||||
* P2xxx/P5xxx/S6xxx/S7xxx series.
|
||||
*
|
||||
* This driver exports a few files in /sys/devices/platform/fujitsu-laptop/;
|
||||
* others may be added at a later date.
|
||||
*
|
||||
* lcd_level - Screen brightness: contains a single integer in the
|
||||
* range 0..7. (rw)
|
||||
*
|
||||
* In addition to these platform device attributes the driver
|
||||
* registers itself in the Linux backlight control subsystem and is
|
||||
* available to userspace under /sys/class/backlight/fujitsu-laptop/.
|
||||
*
|
||||
* This driver has been tested on a Fujitsu Lifebook S7020. It should
|
||||
* work on most P-series and S-series Lifebooks, but YMMV.
|
||||
*/
|
||||
|
||||
#include <linux/module.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/acpi.h>
|
||||
#include <linux/dmi.h>
|
||||
#include <linux/backlight.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/autoconf.h>
|
||||
|
||||
#define FUJITSU_DRIVER_VERSION "0.3"
|
||||
|
||||
#define FUJITSU_LCD_N_LEVELS 8
|
||||
|
||||
#define ACPI_FUJITSU_CLASS "fujitsu"
|
||||
#define ACPI_FUJITSU_HID "FUJ02B1"
|
||||
#define ACPI_FUJITSU_DRIVER_NAME "Fujitsu laptop FUJ02B1 ACPI extras driver"
|
||||
#define ACPI_FUJITSU_DEVICE_NAME "Fujitsu FUJ02B1"
|
||||
|
||||
struct fujitsu_t {
|
||||
acpi_handle acpi_handle;
|
||||
struct backlight_device *bl_device;
|
||||
struct platform_device *pf_device;
|
||||
|
||||
unsigned long fuj02b1_state;
|
||||
unsigned int brightness_changed;
|
||||
unsigned int brightness_level;
|
||||
};
|
||||
|
||||
static struct fujitsu_t *fujitsu;
|
||||
|
||||
/* Hardware access */
|
||||
|
||||
static int set_lcd_level(int level)
|
||||
{
|
||||
acpi_status status = AE_OK;
|
||||
union acpi_object arg0 = { ACPI_TYPE_INTEGER };
|
||||
struct acpi_object_list arg_list = { 1, &arg0 };
|
||||
acpi_handle handle = NULL;
|
||||
|
||||
if (level < 0 || level >= FUJITSU_LCD_N_LEVELS)
|
||||
return -EINVAL;
|
||||
|
||||
if (!fujitsu)
|
||||
return -EINVAL;
|
||||
|
||||
status = acpi_get_handle(fujitsu->acpi_handle, "SBLL", &handle);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "SBLL not present\n"));
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
arg0.integer.value = level;
|
||||
|
||||
status = acpi_evaluate_object(handle, NULL, &arg_list, NULL);
|
||||
if (ACPI_FAILURE(status))
|
||||
return -ENODEV;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int get_lcd_level(void)
|
||||
{
|
||||
unsigned long state = 0;
|
||||
acpi_status status = AE_OK;
|
||||
|
||||
// Get the Brightness
|
||||
status =
|
||||
acpi_evaluate_integer(fujitsu->acpi_handle, "GBLL", NULL, &state);
|
||||
if (status < 0)
|
||||
return status;
|
||||
|
||||
fujitsu->fuj02b1_state = state;
|
||||
fujitsu->brightness_level = state & 0x0fffffff;
|
||||
|
||||
if (state & 0x80000000)
|
||||
fujitsu->brightness_changed = 1;
|
||||
else
|
||||
fujitsu->brightness_changed = 0;
|
||||
|
||||
if (status < 0)
|
||||
return status;
|
||||
|
||||
return fujitsu->brightness_level;
|
||||
}
|
||||
|
||||
/* Backlight device stuff */
|
||||
|
||||
static int bl_get_brightness(struct backlight_device *b)
|
||||
{
|
||||
return get_lcd_level();
|
||||
}
|
||||
|
||||
static int bl_update_status(struct backlight_device *b)
|
||||
{
|
||||
return set_lcd_level(b->props.brightness);
|
||||
}
|
||||
|
||||
static struct backlight_ops fujitsubl_ops = {
|
||||
.get_brightness = bl_get_brightness,
|
||||
.update_status = bl_update_status,
|
||||
};
|
||||
|
||||
/* Platform device */
|
||||
|
||||
static ssize_t show_lcd_level(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
|
||||
int ret;
|
||||
|
||||
ret = get_lcd_level();
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
return sprintf(buf, "%i\n", ret);
|
||||
}
|
||||
|
||||
static ssize_t store_lcd_level(struct device *dev,
|
||||
struct device_attribute *attr, const char *buf,
|
||||
size_t count)
|
||||
{
|
||||
|
||||
int level, ret;
|
||||
|
||||
if (sscanf(buf, "%i", &level) != 1
|
||||
|| (level < 0 || level >= FUJITSU_LCD_N_LEVELS))
|
||||
return -EINVAL;
|
||||
|
||||
ret = set_lcd_level(level);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
static DEVICE_ATTR(lcd_level, 0644, show_lcd_level, store_lcd_level);
|
||||
|
||||
static struct attribute *fujitsupf_attributes[] = {
|
||||
&dev_attr_lcd_level.attr,
|
||||
NULL
|
||||
};
|
||||
|
||||
static struct attribute_group fujitsupf_attribute_group = {
|
||||
.attrs = fujitsupf_attributes
|
||||
};
|
||||
|
||||
static struct platform_driver fujitsupf_driver = {
|
||||
.driver = {
|
||||
.name = "fujitsu-laptop",
|
||||
.owner = THIS_MODULE,
|
||||
}
|
||||
};
|
||||
|
||||
/* ACPI device */
|
||||
|
||||
int acpi_fujitsu_add(struct acpi_device *device)
|
||||
{
|
||||
int result = 0;
|
||||
int state = 0;
|
||||
|
||||
ACPI_FUNCTION_TRACE("acpi_fujitsu_add");
|
||||
|
||||
if (!device)
|
||||
return -EINVAL;
|
||||
|
||||
fujitsu->acpi_handle = device->handle;
|
||||
sprintf(acpi_device_name(device), "%s", ACPI_FUJITSU_DEVICE_NAME);
|
||||
sprintf(acpi_device_class(device), "%s", ACPI_FUJITSU_CLASS);
|
||||
acpi_driver_data(device) = fujitsu;
|
||||
|
||||
result = acpi_bus_get_power(fujitsu->acpi_handle, &state);
|
||||
if (result) {
|
||||
ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
|
||||
"Error reading power state\n"));
|
||||
goto end;
|
||||
}
|
||||
|
||||
printk(KERN_INFO PREFIX "%s [%s] (%s)\n",
|
||||
acpi_device_name(device), acpi_device_bid(device),
|
||||
!device->power.state ? "on" : "off");
|
||||
|
||||
end:
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
int acpi_fujitsu_remove(struct acpi_device *device, int type)
|
||||
{
|
||||
ACPI_FUNCTION_TRACE("acpi_fujitsu_remove");
|
||||
|
||||
if (!device || !acpi_driver_data(device))
|
||||
return -EINVAL;
|
||||
fujitsu->acpi_handle = 0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct acpi_device_id fujitsu_device_ids[] = {
|
||||
{ACPI_FUJITSU_HID, 0},
|
||||
{"", 0},
|
||||
};
|
||||
|
||||
static struct acpi_driver acpi_fujitsu_driver = {
|
||||
.name = ACPI_FUJITSU_DRIVER_NAME,
|
||||
.class = ACPI_FUJITSU_CLASS,
|
||||
.ids = fujitsu_device_ids,
|
||||
.ops = {
|
||||
.add = acpi_fujitsu_add,
|
||||
.remove = acpi_fujitsu_remove,
|
||||
},
|
||||
};
|
||||
|
||||
/* Initialization */
|
||||
|
||||
static int __init fujitsu_init(void)
|
||||
{
|
||||
int ret, result;
|
||||
|
||||
if (acpi_disabled)
|
||||
return -ENODEV;
|
||||
|
||||
fujitsu = kmalloc(sizeof(struct fujitsu_t), GFP_KERNEL);
|
||||
if (!fujitsu)
|
||||
return -ENOMEM;
|
||||
memset(fujitsu, 0, sizeof(struct fujitsu_t));
|
||||
|
||||
result = acpi_bus_register_driver(&acpi_fujitsu_driver);
|
||||
if (result < 0) {
|
||||
ret = -ENODEV;
|
||||
goto fail_acpi;
|
||||
}
|
||||
|
||||
/* Register backlight stuff */
|
||||
|
||||
fujitsu->bl_device =
|
||||
backlight_device_register("fujitsu-laptop", NULL, NULL,
|
||||
&fujitsubl_ops);
|
||||
if (IS_ERR(fujitsu->bl_device))
|
||||
return PTR_ERR(fujitsu->bl_device);
|
||||
|
||||
fujitsu->bl_device->props.max_brightness = FUJITSU_LCD_N_LEVELS - 1;
|
||||
ret = platform_driver_register(&fujitsupf_driver);
|
||||
if (ret)
|
||||
goto fail_backlight;
|
||||
|
||||
/* Register platform stuff */
|
||||
|
||||
fujitsu->pf_device = platform_device_alloc("fujitsu-laptop", -1);
|
||||
if (!fujitsu->pf_device) {
|
||||
ret = -ENOMEM;
|
||||
goto fail_platform_driver;
|
||||
}
|
||||
|
||||
ret = platform_device_add(fujitsu->pf_device);
|
||||
if (ret)
|
||||
goto fail_platform_device1;
|
||||
|
||||
ret =
|
||||
sysfs_create_group(&fujitsu->pf_device->dev.kobj,
|
||||
&fujitsupf_attribute_group);
|
||||
if (ret)
|
||||
goto fail_platform_device2;
|
||||
|
||||
printk(KERN_INFO "fujitsu-laptop: driver " FUJITSU_DRIVER_VERSION
|
||||
" successfully loaded.\n");
|
||||
|
||||
return 0;
|
||||
|
||||
fail_platform_device2:
|
||||
|
||||
platform_device_del(fujitsu->pf_device);
|
||||
|
||||
fail_platform_device1:
|
||||
|
||||
platform_device_put(fujitsu->pf_device);
|
||||
|
||||
fail_platform_driver:
|
||||
|
||||
platform_driver_unregister(&fujitsupf_driver);
|
||||
|
||||
fail_backlight:
|
||||
|
||||
backlight_device_unregister(fujitsu->bl_device);
|
||||
|
||||
fail_acpi:
|
||||
|
||||
kfree(fujitsu);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void __exit fujitsu_cleanup(void)
|
||||
{
|
||||
sysfs_remove_group(&fujitsu->pf_device->dev.kobj,
|
||||
&fujitsupf_attribute_group);
|
||||
platform_device_unregister(fujitsu->pf_device);
|
||||
platform_driver_unregister(&fujitsupf_driver);
|
||||
backlight_device_unregister(fujitsu->bl_device);
|
||||
|
||||
acpi_bus_unregister_driver(&acpi_fujitsu_driver);
|
||||
|
||||
kfree(fujitsu);
|
||||
|
||||
printk(KERN_INFO "fujitsu-laptop: driver unloaded.\n");
|
||||
}
|
||||
|
||||
module_init(fujitsu_init);
|
||||
module_exit(fujitsu_cleanup);
|
||||
|
||||
MODULE_AUTHOR("Jonathan Woithe");
|
||||
MODULE_DESCRIPTION("Fujitsu laptop extras support");
|
||||
MODULE_VERSION(FUJITSU_DRIVER_VERSION);
|
||||
MODULE_LICENSE("GPL");
|
@ -1173,7 +1173,8 @@ static struct acpi_driver sony_nc_driver = {
|
||||
#define SONYPI_TYPE3_OFFSET 0x12
|
||||
|
||||
struct sony_pic_ioport {
|
||||
struct acpi_resource_io io;
|
||||
struct acpi_resource_io io1;
|
||||
struct acpi_resource_io io2;
|
||||
struct list_head list;
|
||||
};
|
||||
|
||||
@ -1443,11 +1444,11 @@ static u8 sony_pic_call1(u8 dev)
|
||||
{
|
||||
u8 v1, v2;
|
||||
|
||||
wait_on_command(inb_p(spic_dev.cur_ioport->io.minimum + 4) & 2,
|
||||
wait_on_command(inb_p(spic_dev.cur_ioport->io1.minimum + 4) & 2,
|
||||
ITERATIONS_LONG);
|
||||
outb(dev, spic_dev.cur_ioport->io.minimum + 4);
|
||||
v1 = inb_p(spic_dev.cur_ioport->io.minimum + 4);
|
||||
v2 = inb_p(spic_dev.cur_ioport->io.minimum);
|
||||
outb(dev, spic_dev.cur_ioport->io1.minimum + 4);
|
||||
v1 = inb_p(spic_dev.cur_ioport->io1.minimum + 4);
|
||||
v2 = inb_p(spic_dev.cur_ioport->io1.minimum);
|
||||
dprintk("sony_pic_call1: 0x%.4x\n", (v2 << 8) | v1);
|
||||
return v2;
|
||||
}
|
||||
@ -1456,13 +1457,13 @@ static u8 sony_pic_call2(u8 dev, u8 fn)
|
||||
{
|
||||
u8 v1;
|
||||
|
||||
wait_on_command(inb_p(spic_dev.cur_ioport->io.minimum + 4) & 2,
|
||||
wait_on_command(inb_p(spic_dev.cur_ioport->io1.minimum + 4) & 2,
|
||||
ITERATIONS_LONG);
|
||||
outb(dev, spic_dev.cur_ioport->io.minimum + 4);
|
||||
wait_on_command(inb_p(spic_dev.cur_ioport->io.minimum + 4) & 2,
|
||||
outb(dev, spic_dev.cur_ioport->io1.minimum + 4);
|
||||
wait_on_command(inb_p(spic_dev.cur_ioport->io1.minimum + 4) & 2,
|
||||
ITERATIONS_LONG);
|
||||
outb(fn, spic_dev.cur_ioport->io.minimum);
|
||||
v1 = inb_p(spic_dev.cur_ioport->io.minimum);
|
||||
outb(fn, spic_dev.cur_ioport->io1.minimum);
|
||||
v1 = inb_p(spic_dev.cur_ioport->io1.minimum);
|
||||
dprintk("sony_pic_call2: 0x%.4x\n", v1);
|
||||
return v1;
|
||||
}
|
||||
@ -1471,13 +1472,13 @@ static u8 sony_pic_call3(u8 dev, u8 fn, u8 v)
|
||||
{
|
||||
u8 v1;
|
||||
|
||||
wait_on_command(inb_p(spic_dev.cur_ioport->io.minimum + 4) & 2, ITERATIONS_LONG);
|
||||
outb(dev, spic_dev.cur_ioport->io.minimum + 4);
|
||||
wait_on_command(inb_p(spic_dev.cur_ioport->io.minimum + 4) & 2, ITERATIONS_LONG);
|
||||
outb(fn, spic_dev.cur_ioport->io.minimum);
|
||||
wait_on_command(inb_p(spic_dev.cur_ioport->io.minimum + 4) & 2, ITERATIONS_LONG);
|
||||
outb(v, spic_dev.cur_ioport->io.minimum);
|
||||
v1 = inb_p(spic_dev.cur_ioport->io.minimum);
|
||||
wait_on_command(inb_p(spic_dev.cur_ioport->io1.minimum + 4) & 2, ITERATIONS_LONG);
|
||||
outb(dev, spic_dev.cur_ioport->io1.minimum + 4);
|
||||
wait_on_command(inb_p(spic_dev.cur_ioport->io1.minimum + 4) & 2, ITERATIONS_LONG);
|
||||
outb(fn, spic_dev.cur_ioport->io1.minimum);
|
||||
wait_on_command(inb_p(spic_dev.cur_ioport->io1.minimum + 4) & 2, ITERATIONS_LONG);
|
||||
outb(v, spic_dev.cur_ioport->io1.minimum);
|
||||
v1 = inb_p(spic_dev.cur_ioport->io1.minimum);
|
||||
dprintk("sony_pic_call3: 0x%.4x\n", v1);
|
||||
return v1;
|
||||
}
|
||||
@ -2074,7 +2075,18 @@ sony_pic_read_possible_resource(struct acpi_resource *resource, void *context)
|
||||
|
||||
switch (resource->type) {
|
||||
case ACPI_RESOURCE_TYPE_START_DEPENDENT:
|
||||
{
|
||||
/* start IO enumeration */
|
||||
struct sony_pic_ioport *ioport = kzalloc(sizeof(*ioport), GFP_KERNEL);
|
||||
if (!ioport)
|
||||
return AE_ERROR;
|
||||
|
||||
list_add(&ioport->list, &dev->ioports);
|
||||
return AE_OK;
|
||||
}
|
||||
|
||||
case ACPI_RESOURCE_TYPE_END_DEPENDENT:
|
||||
/* end IO enumeration */
|
||||
return AE_OK;
|
||||
|
||||
case ACPI_RESOURCE_TYPE_IRQ:
|
||||
@ -2101,7 +2113,7 @@ sony_pic_read_possible_resource(struct acpi_resource *resource, void *context)
|
||||
if (!interrupt)
|
||||
return AE_ERROR;
|
||||
|
||||
list_add_tail(&interrupt->list, &dev->interrupts);
|
||||
list_add(&interrupt->list, &dev->interrupts);
|
||||
interrupt->irq.triggering = p->triggering;
|
||||
interrupt->irq.polarity = p->polarity;
|
||||
interrupt->irq.sharable = p->sharable;
|
||||
@ -2113,18 +2125,27 @@ sony_pic_read_possible_resource(struct acpi_resource *resource, void *context)
|
||||
case ACPI_RESOURCE_TYPE_IO:
|
||||
{
|
||||
struct acpi_resource_io *io = &resource->data.io;
|
||||
struct sony_pic_ioport *ioport = NULL;
|
||||
struct sony_pic_ioport *ioport =
|
||||
list_first_entry(&dev->ioports, struct sony_pic_ioport, list);
|
||||
if (!io) {
|
||||
dprintk("Blank IO resource\n");
|
||||
return AE_OK;
|
||||
}
|
||||
|
||||
ioport = kzalloc(sizeof(*ioport), GFP_KERNEL);
|
||||
if (!ioport)
|
||||
if (!ioport->io1.minimum) {
|
||||
memcpy(&ioport->io1, io, sizeof(*io));
|
||||
dprintk("IO1 at 0x%.4x (0x%.2x)\n", ioport->io1.minimum,
|
||||
ioport->io1.address_length);
|
||||
}
|
||||
else if (!ioport->io2.minimum) {
|
||||
memcpy(&ioport->io2, io, sizeof(*io));
|
||||
dprintk("IO2 at 0x%.4x (0x%.2x)\n", ioport->io2.minimum,
|
||||
ioport->io2.address_length);
|
||||
}
|
||||
else {
|
||||
printk(KERN_ERR DRV_PFX "Unknown SPIC Type, more than 2 IO Ports\n");
|
||||
return AE_ERROR;
|
||||
|
||||
list_add_tail(&ioport->list, &dev->ioports);
|
||||
memcpy(&ioport->io, io, sizeof(*io));
|
||||
}
|
||||
return AE_OK;
|
||||
}
|
||||
default:
|
||||
@ -2199,10 +2220,22 @@ static int sony_pic_enable(struct acpi_device *device,
|
||||
{
|
||||
acpi_status status;
|
||||
int result = 0;
|
||||
/* Type 1 resource layout is:
|
||||
* IO
|
||||
* IO
|
||||
* IRQNoFlags
|
||||
* End
|
||||
*
|
||||
* Type 2 and 3 resource layout is:
|
||||
* IO
|
||||
* IRQNoFlags
|
||||
* End
|
||||
*/
|
||||
struct {
|
||||
struct acpi_resource io_res;
|
||||
struct acpi_resource irq_res;
|
||||
struct acpi_resource end;
|
||||
struct acpi_resource res1;
|
||||
struct acpi_resource res2;
|
||||
struct acpi_resource res3;
|
||||
struct acpi_resource res4;
|
||||
} *resource;
|
||||
struct acpi_buffer buffer = { 0, NULL };
|
||||
|
||||
@ -2217,21 +2250,49 @@ static int sony_pic_enable(struct acpi_device *device,
|
||||
buffer.length = sizeof(*resource) + 1;
|
||||
buffer.pointer = resource;
|
||||
|
||||
/* setup io resource */
|
||||
resource->io_res.type = ACPI_RESOURCE_TYPE_IO;
|
||||
resource->io_res.length = sizeof(struct acpi_resource);
|
||||
memcpy(&resource->io_res.data.io, &ioport->io,
|
||||
sizeof(struct acpi_resource_io));
|
||||
/* setup Type 1 resources */
|
||||
if (spic_dev.model == SONYPI_DEVICE_TYPE1) {
|
||||
|
||||
/* setup irq resource */
|
||||
resource->irq_res.type = ACPI_RESOURCE_TYPE_IRQ;
|
||||
resource->irq_res.length = sizeof(struct acpi_resource);
|
||||
memcpy(&resource->irq_res.data.irq, &irq->irq,
|
||||
sizeof(struct acpi_resource_irq));
|
||||
/* we requested a shared irq */
|
||||
resource->irq_res.data.irq.sharable = ACPI_SHARED;
|
||||
/* setup io resources */
|
||||
resource->res1.type = ACPI_RESOURCE_TYPE_IO;
|
||||
resource->res1.length = sizeof(struct acpi_resource);
|
||||
memcpy(&resource->res1.data.io, &ioport->io1,
|
||||
sizeof(struct acpi_resource_io));
|
||||
|
||||
resource->end.type = ACPI_RESOURCE_TYPE_END_TAG;
|
||||
resource->res2.type = ACPI_RESOURCE_TYPE_IO;
|
||||
resource->res2.length = sizeof(struct acpi_resource);
|
||||
memcpy(&resource->res2.data.io, &ioport->io2,
|
||||
sizeof(struct acpi_resource_io));
|
||||
|
||||
/* setup irq resource */
|
||||
resource->res3.type = ACPI_RESOURCE_TYPE_IRQ;
|
||||
resource->res3.length = sizeof(struct acpi_resource);
|
||||
memcpy(&resource->res3.data.irq, &irq->irq,
|
||||
sizeof(struct acpi_resource_irq));
|
||||
/* we requested a shared irq */
|
||||
resource->res3.data.irq.sharable = ACPI_SHARED;
|
||||
|
||||
resource->res4.type = ACPI_RESOURCE_TYPE_END_TAG;
|
||||
|
||||
}
|
||||
/* setup Type 2/3 resources */
|
||||
else {
|
||||
/* setup io resource */
|
||||
resource->res1.type = ACPI_RESOURCE_TYPE_IO;
|
||||
resource->res1.length = sizeof(struct acpi_resource);
|
||||
memcpy(&resource->res1.data.io, &ioport->io1,
|
||||
sizeof(struct acpi_resource_io));
|
||||
|
||||
/* setup irq resource */
|
||||
resource->res2.type = ACPI_RESOURCE_TYPE_IRQ;
|
||||
resource->res2.length = sizeof(struct acpi_resource);
|
||||
memcpy(&resource->res2.data.irq, &irq->irq,
|
||||
sizeof(struct acpi_resource_irq));
|
||||
/* we requested a shared irq */
|
||||
resource->res2.data.irq.sharable = ACPI_SHARED;
|
||||
|
||||
resource->res3.type = ACPI_RESOURCE_TYPE_END_TAG;
|
||||
}
|
||||
|
||||
/* Attempt to set the resource */
|
||||
dprintk("Evaluating _SRS\n");
|
||||
@ -2239,7 +2300,7 @@ static int sony_pic_enable(struct acpi_device *device,
|
||||
|
||||
/* check for total failure */
|
||||
if (ACPI_FAILURE(status)) {
|
||||
printk(KERN_ERR DRV_PFX "Error evaluating _SRS");
|
||||
printk(KERN_ERR DRV_PFX "Error evaluating _SRS\n");
|
||||
result = -ENODEV;
|
||||
goto end;
|
||||
}
|
||||
@ -2268,11 +2329,14 @@ static irqreturn_t sony_pic_irq(int irq, void *dev_id)
|
||||
|
||||
struct sony_pic_dev *dev = (struct sony_pic_dev *) dev_id;
|
||||
|
||||
ev = inb_p(dev->cur_ioport->io.minimum);
|
||||
data_mask = inb_p(dev->cur_ioport->io.minimum + dev->evport_offset);
|
||||
ev = inb_p(dev->cur_ioport->io1.minimum);
|
||||
if (dev->cur_ioport->io2.minimum)
|
||||
data_mask = inb_p(dev->cur_ioport->io2.minimum);
|
||||
else
|
||||
data_mask = inb_p(dev->cur_ioport->io1.minimum + dev->evport_offset);
|
||||
|
||||
dprintk("event ([%.2x] [%.2x]) at port 0x%.4x(+0x%.2x)\n",
|
||||
ev, data_mask, dev->cur_ioport->io.minimum, dev->evport_offset);
|
||||
ev, data_mask, dev->cur_ioport->io1.minimum, dev->evport_offset);
|
||||
|
||||
if (ev == 0x00 || ev == 0xff)
|
||||
return IRQ_HANDLED;
|
||||
@ -2323,8 +2387,11 @@ static int sony_pic_remove(struct acpi_device *device, int type)
|
||||
}
|
||||
|
||||
free_irq(spic_dev.cur_irq->irq.interrupts[0], &spic_dev);
|
||||
release_region(spic_dev.cur_ioport->io.minimum,
|
||||
spic_dev.cur_ioport->io.address_length);
|
||||
release_region(spic_dev.cur_ioport->io1.minimum,
|
||||
spic_dev.cur_ioport->io1.address_length);
|
||||
if (spic_dev.cur_ioport->io2.minimum)
|
||||
release_region(spic_dev.cur_ioport->io2.minimum,
|
||||
spic_dev.cur_ioport->io2.address_length);
|
||||
|
||||
sonypi_compat_exit();
|
||||
|
||||
@ -2397,14 +2464,36 @@ static int sony_pic_add(struct acpi_device *device)
|
||||
goto err_remove_input;
|
||||
|
||||
/* request io port */
|
||||
list_for_each_entry(io, &spic_dev.ioports, list) {
|
||||
if (request_region(io->io.minimum, io->io.address_length,
|
||||
list_for_each_entry_reverse(io, &spic_dev.ioports, list) {
|
||||
if (request_region(io->io1.minimum, io->io1.address_length,
|
||||
"Sony Programable I/O Device")) {
|
||||
dprintk("I/O port: 0x%.4x (0x%.4x) + 0x%.2x\n",
|
||||
io->io.minimum, io->io.maximum,
|
||||
io->io.address_length);
|
||||
spic_dev.cur_ioport = io;
|
||||
break;
|
||||
dprintk("I/O port1: 0x%.4x (0x%.4x) + 0x%.2x\n",
|
||||
io->io1.minimum, io->io1.maximum,
|
||||
io->io1.address_length);
|
||||
/* Type 1 have 2 ioports */
|
||||
if (io->io2.minimum) {
|
||||
if (request_region(io->io2.minimum,
|
||||
io->io2.address_length,
|
||||
"Sony Programable I/O Device")) {
|
||||
dprintk("I/O port2: 0x%.4x (0x%.4x) + 0x%.2x\n",
|
||||
io->io2.minimum, io->io2.maximum,
|
||||
io->io2.address_length);
|
||||
spic_dev.cur_ioport = io;
|
||||
break;
|
||||
}
|
||||
else {
|
||||
dprintk("Unable to get I/O port2: "
|
||||
"0x%.4x (0x%.4x) + 0x%.2x\n",
|
||||
io->io2.minimum, io->io2.maximum,
|
||||
io->io2.address_length);
|
||||
release_region(io->io1.minimum,
|
||||
io->io1.address_length);
|
||||
}
|
||||
}
|
||||
else {
|
||||
spic_dev.cur_ioport = io;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (!spic_dev.cur_ioport) {
|
||||
@ -2414,7 +2503,7 @@ static int sony_pic_add(struct acpi_device *device)
|
||||
}
|
||||
|
||||
/* request IRQ */
|
||||
list_for_each_entry(irq, &spic_dev.interrupts, list) {
|
||||
list_for_each_entry_reverse(irq, &spic_dev.interrupts, list) {
|
||||
if (!request_irq(irq->irq.interrupts[0], sony_pic_irq,
|
||||
IRQF_SHARED, "sony-laptop", &spic_dev)) {
|
||||
dprintk("IRQ: %d - triggering: %d - "
|
||||
@ -2462,8 +2551,11 @@ static int sony_pic_add(struct acpi_device *device)
|
||||
free_irq(spic_dev.cur_irq->irq.interrupts[0], &spic_dev);
|
||||
|
||||
err_release_region:
|
||||
release_region(spic_dev.cur_ioport->io.minimum,
|
||||
spic_dev.cur_ioport->io.address_length);
|
||||
release_region(spic_dev.cur_ioport->io1.minimum,
|
||||
spic_dev.cur_ioport->io1.address_length);
|
||||
if (spic_dev.cur_ioport->io2.minimum)
|
||||
release_region(spic_dev.cur_ioport->io2.minimum,
|
||||
spic_dev.cur_ioport->io2.address_length);
|
||||
|
||||
err_remove_compat:
|
||||
sonypi_compat_exit();
|
||||
|
@ -22,7 +22,7 @@
|
||||
*/
|
||||
|
||||
#define IBM_VERSION "0.16"
|
||||
#define TPACPI_SYSFS_VERSION 0x010000
|
||||
#define TPACPI_SYSFS_VERSION 0x020000
|
||||
|
||||
/*
|
||||
* Changelog:
|
||||
@ -117,6 +117,12 @@ IBM_BIOS_MODULE_ALIAS("K[U,X-Z]");
|
||||
|
||||
#define __unused __attribute__ ((unused))
|
||||
|
||||
static enum {
|
||||
TPACPI_LIFE_INIT = 0,
|
||||
TPACPI_LIFE_RUNNING,
|
||||
TPACPI_LIFE_EXITING,
|
||||
} tpacpi_lifecycle;
|
||||
|
||||
/****************************************************************************
|
||||
****************************************************************************
|
||||
*
|
||||
@ -342,6 +348,9 @@ static void dispatch_acpi_notify(acpi_handle handle, u32 event, void *data)
|
||||
{
|
||||
struct ibm_struct *ibm = data;
|
||||
|
||||
if (tpacpi_lifecycle != TPACPI_LIFE_RUNNING)
|
||||
return;
|
||||
|
||||
if (!ibm || !ibm->acpi || !ibm->acpi->notify)
|
||||
return;
|
||||
|
||||
@ -517,8 +526,10 @@ static char *next_cmd(char **cmds)
|
||||
****************************************************************************/
|
||||
|
||||
static struct platform_device *tpacpi_pdev;
|
||||
static struct platform_device *tpacpi_sensors_pdev;
|
||||
static struct device *tpacpi_hwmon;
|
||||
static struct input_dev *tpacpi_inputdev;
|
||||
static struct mutex tpacpi_inputdev_send_mutex;
|
||||
|
||||
|
||||
static int tpacpi_resume_handler(struct platform_device *pdev)
|
||||
@ -543,6 +554,12 @@ static struct platform_driver tpacpi_pdriver = {
|
||||
.resume = tpacpi_resume_handler,
|
||||
};
|
||||
|
||||
static struct platform_driver tpacpi_hwmon_pdriver = {
|
||||
.driver = {
|
||||
.name = IBM_HWMON_DRVR_NAME,
|
||||
.owner = THIS_MODULE,
|
||||
},
|
||||
};
|
||||
|
||||
/*************************************************************************
|
||||
* thinkpad-acpi driver attributes
|
||||
@ -692,6 +709,8 @@ static int parse_strtoul(const char *buf,
|
||||
{
|
||||
char *endp;
|
||||
|
||||
while (*buf && isspace(*buf))
|
||||
buf++;
|
||||
*value = simple_strtoul(buf, &endp, 0);
|
||||
while (*endp && isspace(*endp))
|
||||
endp++;
|
||||
@ -989,6 +1008,7 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
|
||||
|
||||
int res, i;
|
||||
int status;
|
||||
int hkeyv;
|
||||
|
||||
vdbg_printk(TPACPI_DBG_INIT, "initializing hotkey subdriver\n");
|
||||
|
||||
@ -1014,18 +1034,35 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
|
||||
return res;
|
||||
|
||||
/* mask not supported on 570, 600e/x, 770e, 770x, A21e, A2xm/p,
|
||||
A30, R30, R31, T20-22, X20-21, X22-24 */
|
||||
tp_features.hotkey_mask =
|
||||
acpi_evalf(hkey_handle, NULL, "DHKN", "qv");
|
||||
A30, R30, R31, T20-22, X20-21, X22-24. Detected by checking
|
||||
for HKEY interface version 0x100 */
|
||||
if (acpi_evalf(hkey_handle, &hkeyv, "MHKV", "qd")) {
|
||||
if ((hkeyv >> 8) != 1) {
|
||||
printk(IBM_ERR "unknown version of the "
|
||||
"HKEY interface: 0x%x\n", hkeyv);
|
||||
printk(IBM_ERR "please report this to %s\n",
|
||||
IBM_MAIL);
|
||||
} else {
|
||||
/*
|
||||
* MHKV 0x100 in A31, R40, R40e,
|
||||
* T4x, X31, and later
|
||||
* */
|
||||
tp_features.hotkey_mask = 1;
|
||||
}
|
||||
}
|
||||
|
||||
vdbg_printk(TPACPI_DBG_INIT, "hotkey masks are %s\n",
|
||||
str_supported(tp_features.hotkey_mask));
|
||||
|
||||
if (tp_features.hotkey_mask) {
|
||||
/* MHKA available in A31, R40, R40e, T4x, X31, and later */
|
||||
if (!acpi_evalf(hkey_handle, &hotkey_all_mask,
|
||||
"MHKA", "qd"))
|
||||
"MHKA", "qd")) {
|
||||
printk(IBM_ERR
|
||||
"missing MHKA handler, "
|
||||
"please report this to %s\n",
|
||||
IBM_MAIL);
|
||||
hotkey_all_mask = 0x080cU; /* FN+F12, FN+F4, FN+F3 */
|
||||
}
|
||||
}
|
||||
|
||||
res = hotkey_get(&hotkey_orig_status, &hotkey_orig_mask);
|
||||
@ -1131,6 +1168,8 @@ static void tpacpi_input_send_key(unsigned int scancode,
|
||||
unsigned int keycode)
|
||||
{
|
||||
if (keycode != KEY_RESERVED) {
|
||||
mutex_lock(&tpacpi_inputdev_send_mutex);
|
||||
|
||||
input_report_key(tpacpi_inputdev, keycode, 1);
|
||||
if (keycode == KEY_UNKNOWN)
|
||||
input_event(tpacpi_inputdev, EV_MSC, MSC_SCAN,
|
||||
@ -1142,6 +1181,8 @@ static void tpacpi_input_send_key(unsigned int scancode,
|
||||
input_event(tpacpi_inputdev, EV_MSC, MSC_SCAN,
|
||||
scancode);
|
||||
input_sync(tpacpi_inputdev);
|
||||
|
||||
mutex_unlock(&tpacpi_inputdev_send_mutex);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1149,18 +1190,47 @@ static void tpacpi_input_send_radiosw(void)
|
||||
{
|
||||
int wlsw;
|
||||
|
||||
if (tp_features.hotkey_wlsw && !hotkey_get_wlsw(&wlsw))
|
||||
mutex_lock(&tpacpi_inputdev_send_mutex);
|
||||
|
||||
if (tp_features.hotkey_wlsw && !hotkey_get_wlsw(&wlsw)) {
|
||||
input_report_switch(tpacpi_inputdev,
|
||||
SW_RADIO, !!wlsw);
|
||||
input_sync(tpacpi_inputdev);
|
||||
}
|
||||
|
||||
mutex_unlock(&tpacpi_inputdev_send_mutex);
|
||||
}
|
||||
|
||||
static void hotkey_notify(struct ibm_struct *ibm, u32 event)
|
||||
{
|
||||
u32 hkey;
|
||||
unsigned int keycode, scancode;
|
||||
int send_acpi_ev = 0;
|
||||
int send_acpi_ev;
|
||||
int ignore_acpi_ev;
|
||||
|
||||
if (event != 0x80) {
|
||||
printk(IBM_ERR "unknown HKEY notification event %d\n", event);
|
||||
/* forward it to userspace, maybe it knows how to handle it */
|
||||
acpi_bus_generate_netlink_event(ibm->acpi->device->pnp.device_class,
|
||||
ibm->acpi->device->dev.bus_id,
|
||||
event, 0);
|
||||
return;
|
||||
}
|
||||
|
||||
while (1) {
|
||||
if (!acpi_evalf(hkey_handle, &hkey, "MHKP", "d")) {
|
||||
printk(IBM_ERR "failed to retrieve HKEY event\n");
|
||||
return;
|
||||
}
|
||||
|
||||
if (hkey == 0) {
|
||||
/* queue empty */
|
||||
return;
|
||||
}
|
||||
|
||||
send_acpi_ev = 0;
|
||||
ignore_acpi_ev = 0;
|
||||
|
||||
if (event == 0x80 && acpi_evalf(hkey_handle, &hkey, "MHKP", "d")) {
|
||||
switch (hkey >> 12) {
|
||||
case 1:
|
||||
/* 0x1000-0x1FFF: key presses */
|
||||
@ -1182,9 +1252,11 @@ static void hotkey_notify(struct ibm_struct *ibm, u32 event)
|
||||
* eat up known LID events */
|
||||
if (hkey != 0x5001 && hkey != 0x5002) {
|
||||
printk(IBM_ERR
|
||||
"unknown LID-related hotkey event: 0x%04x\n",
|
||||
hkey);
|
||||
"unknown LID-related HKEY event: 0x%04x\n",
|
||||
hkey);
|
||||
send_acpi_ev = 1;
|
||||
} else {
|
||||
ignore_acpi_ev = 1;
|
||||
}
|
||||
break;
|
||||
case 7:
|
||||
@ -1202,21 +1274,18 @@ static void hotkey_notify(struct ibm_struct *ibm, u32 event)
|
||||
printk(IBM_NOTICE "unhandled HKEY event 0x%04x\n", hkey);
|
||||
send_acpi_ev = 1;
|
||||
}
|
||||
} else {
|
||||
printk(IBM_ERR "unknown hotkey notification event %d\n", event);
|
||||
hkey = 0;
|
||||
send_acpi_ev = 1;
|
||||
}
|
||||
|
||||
/* Legacy events */
|
||||
if (send_acpi_ev || hotkey_report_mode < 2)
|
||||
acpi_bus_generate_proc_event(ibm->acpi->device, event, hkey);
|
||||
/* Legacy events */
|
||||
if (!ignore_acpi_ev && (send_acpi_ev || hotkey_report_mode < 2)) {
|
||||
acpi_bus_generate_proc_event(ibm->acpi->device, event, hkey);
|
||||
}
|
||||
|
||||
/* netlink events */
|
||||
if (send_acpi_ev) {
|
||||
acpi_bus_generate_netlink_event(ibm->acpi->device->pnp.device_class,
|
||||
ibm->acpi->device->dev.bus_id,
|
||||
event, hkey);
|
||||
/* netlink events */
|
||||
if (!ignore_acpi_ev && send_acpi_ev) {
|
||||
acpi_bus_generate_netlink_event(ibm->acpi->device->pnp.device_class,
|
||||
ibm->acpi->device->dev.bus_id,
|
||||
event, hkey);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -2812,7 +2881,7 @@ static int __init thermal_init(struct ibm_init_struct *iibm)
|
||||
|
||||
switch(thermal_read_mode) {
|
||||
case TPACPI_THERMAL_TPEC_16:
|
||||
res = sysfs_create_group(&tpacpi_pdev->dev.kobj,
|
||||
res = sysfs_create_group(&tpacpi_sensors_pdev->dev.kobj,
|
||||
&thermal_temp_input16_group);
|
||||
if (res)
|
||||
return res;
|
||||
@ -2820,7 +2889,7 @@ static int __init thermal_init(struct ibm_init_struct *iibm)
|
||||
case TPACPI_THERMAL_TPEC_8:
|
||||
case TPACPI_THERMAL_ACPI_TMP07:
|
||||
case TPACPI_THERMAL_ACPI_UPDT:
|
||||
res = sysfs_create_group(&tpacpi_pdev->dev.kobj,
|
||||
res = sysfs_create_group(&tpacpi_sensors_pdev->dev.kobj,
|
||||
&thermal_temp_input8_group);
|
||||
if (res)
|
||||
return res;
|
||||
@ -2837,13 +2906,13 @@ static void thermal_exit(void)
|
||||
{
|
||||
switch(thermal_read_mode) {
|
||||
case TPACPI_THERMAL_TPEC_16:
|
||||
sysfs_remove_group(&tpacpi_pdev->dev.kobj,
|
||||
sysfs_remove_group(&tpacpi_sensors_pdev->dev.kobj,
|
||||
&thermal_temp_input16_group);
|
||||
break;
|
||||
case TPACPI_THERMAL_TPEC_8:
|
||||
case TPACPI_THERMAL_ACPI_TMP07:
|
||||
case TPACPI_THERMAL_ACPI_UPDT:
|
||||
sysfs_remove_group(&tpacpi_pdev->dev.kobj,
|
||||
sysfs_remove_group(&tpacpi_sensors_pdev->dev.kobj,
|
||||
&thermal_temp_input16_group);
|
||||
break;
|
||||
case TPACPI_THERMAL_NONE:
|
||||
@ -3626,7 +3695,7 @@ static struct device_attribute dev_attr_fan_fan1_input =
|
||||
__ATTR(fan1_input, S_IRUGO,
|
||||
fan_fan1_input_show, NULL);
|
||||
|
||||
/* sysfs fan fan_watchdog (driver) ------------------------------------- */
|
||||
/* sysfs fan fan_watchdog (hwmon driver) ------------------------------- */
|
||||
static ssize_t fan_fan_watchdog_show(struct device_driver *drv,
|
||||
char *buf)
|
||||
{
|
||||
@ -3768,10 +3837,10 @@ static int __init fan_init(struct ibm_init_struct *iibm)
|
||||
|
||||
if (fan_status_access_mode != TPACPI_FAN_NONE ||
|
||||
fan_control_access_mode != TPACPI_FAN_WR_NONE) {
|
||||
rc = sysfs_create_group(&tpacpi_pdev->dev.kobj,
|
||||
rc = sysfs_create_group(&tpacpi_sensors_pdev->dev.kobj,
|
||||
&fan_attr_group);
|
||||
if (!(rc < 0))
|
||||
rc = driver_create_file(&tpacpi_pdriver.driver,
|
||||
rc = driver_create_file(&tpacpi_hwmon_pdriver.driver,
|
||||
&driver_attr_fan_watchdog);
|
||||
if (rc < 0)
|
||||
return rc;
|
||||
@ -3854,8 +3923,8 @@ static void fan_exit(void)
|
||||
vdbg_printk(TPACPI_DBG_EXIT, "cancelling any pending fan watchdog tasks\n");
|
||||
|
||||
/* FIXME: can we really do this unconditionally? */
|
||||
sysfs_remove_group(&tpacpi_pdev->dev.kobj, &fan_attr_group);
|
||||
driver_remove_file(&tpacpi_pdriver.driver, &driver_attr_fan_watchdog);
|
||||
sysfs_remove_group(&tpacpi_sensors_pdev->dev.kobj, &fan_attr_group);
|
||||
driver_remove_file(&tpacpi_hwmon_pdriver.driver, &driver_attr_fan_watchdog);
|
||||
|
||||
cancel_delayed_work(&fan_watchdog_task);
|
||||
flush_scheduled_work();
|
||||
@ -3888,6 +3957,9 @@ static void fan_watchdog_fire(struct work_struct *ignored)
|
||||
{
|
||||
int rc;
|
||||
|
||||
if (tpacpi_lifecycle != TPACPI_LIFE_RUNNING)
|
||||
return;
|
||||
|
||||
printk(IBM_NOTICE "fan watchdog: enabling fan\n");
|
||||
rc = fan_set_enable();
|
||||
if (rc < 0) {
|
||||
@ -3908,7 +3980,8 @@ static void fan_watchdog_reset(void)
|
||||
if (fan_watchdog_active)
|
||||
cancel_delayed_work(&fan_watchdog_task);
|
||||
|
||||
if (fan_watchdog_maxinterval > 0) {
|
||||
if (fan_watchdog_maxinterval > 0 &&
|
||||
tpacpi_lifecycle != TPACPI_LIFE_EXITING) {
|
||||
fan_watchdog_active = 1;
|
||||
if (!schedule_delayed_work(&fan_watchdog_task,
|
||||
msecs_to_jiffies(fan_watchdog_maxinterval
|
||||
@ -4302,6 +4375,19 @@ static struct ibm_struct fan_driver_data = {
|
||||
****************************************************************************
|
||||
****************************************************************************/
|
||||
|
||||
/* sysfs name ---------------------------------------------------------- */
|
||||
static ssize_t thinkpad_acpi_pdev_name_show(struct device *dev,
|
||||
struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
return snprintf(buf, PAGE_SIZE, "%s\n", IBM_NAME);
|
||||
}
|
||||
|
||||
static struct device_attribute dev_attr_thinkpad_acpi_pdev_name =
|
||||
__ATTR(name, S_IRUGO, thinkpad_acpi_pdev_name_show, NULL);
|
||||
|
||||
/* --------------------------------------------------------------------- */
|
||||
|
||||
/* /proc support */
|
||||
static struct proc_dir_entry *proc_dir;
|
||||
|
||||
@ -4674,6 +4760,8 @@ static int __init thinkpad_acpi_module_init(void)
|
||||
{
|
||||
int ret, i;
|
||||
|
||||
tpacpi_lifecycle = TPACPI_LIFE_INIT;
|
||||
|
||||
/* Parameter checking */
|
||||
if (hotkey_report_mode > 2)
|
||||
return -EINVAL;
|
||||
@ -4702,19 +4790,31 @@ static int __init thinkpad_acpi_module_init(void)
|
||||
|
||||
ret = platform_driver_register(&tpacpi_pdriver);
|
||||
if (ret) {
|
||||
printk(IBM_ERR "unable to register platform driver\n");
|
||||
printk(IBM_ERR "unable to register main platform driver\n");
|
||||
thinkpad_acpi_module_exit();
|
||||
return ret;
|
||||
}
|
||||
tp_features.platform_drv_registered = 1;
|
||||
|
||||
ret = platform_driver_register(&tpacpi_hwmon_pdriver);
|
||||
if (ret) {
|
||||
printk(IBM_ERR "unable to register hwmon platform driver\n");
|
||||
thinkpad_acpi_module_exit();
|
||||
return ret;
|
||||
}
|
||||
tp_features.sensors_pdrv_registered = 1;
|
||||
|
||||
ret = tpacpi_create_driver_attributes(&tpacpi_pdriver.driver);
|
||||
if (!ret) {
|
||||
tp_features.platform_drv_attrs_registered = 1;
|
||||
ret = tpacpi_create_driver_attributes(&tpacpi_hwmon_pdriver.driver);
|
||||
}
|
||||
if (ret) {
|
||||
printk(IBM_ERR "unable to create sysfs driver attributes\n");
|
||||
thinkpad_acpi_module_exit();
|
||||
return ret;
|
||||
}
|
||||
tp_features.platform_drv_attrs_registered = 1;
|
||||
tp_features.sensors_pdrv_attrs_registered = 1;
|
||||
|
||||
|
||||
/* Device initialization */
|
||||
@ -4727,7 +4827,26 @@ static int __init thinkpad_acpi_module_init(void)
|
||||
thinkpad_acpi_module_exit();
|
||||
return ret;
|
||||
}
|
||||
tpacpi_hwmon = hwmon_device_register(&tpacpi_pdev->dev);
|
||||
tpacpi_sensors_pdev = platform_device_register_simple(
|
||||
IBM_HWMON_DRVR_NAME,
|
||||
-1, NULL, 0);
|
||||
if (IS_ERR(tpacpi_sensors_pdev)) {
|
||||
ret = PTR_ERR(tpacpi_sensors_pdev);
|
||||
tpacpi_sensors_pdev = NULL;
|
||||
printk(IBM_ERR "unable to register hwmon platform device\n");
|
||||
thinkpad_acpi_module_exit();
|
||||
return ret;
|
||||
}
|
||||
ret = device_create_file(&tpacpi_sensors_pdev->dev,
|
||||
&dev_attr_thinkpad_acpi_pdev_name);
|
||||
if (ret) {
|
||||
printk(IBM_ERR
|
||||
"unable to create sysfs hwmon device attributes\n");
|
||||
thinkpad_acpi_module_exit();
|
||||
return ret;
|
||||
}
|
||||
tp_features.sensors_pdev_attrs_registered = 1;
|
||||
tpacpi_hwmon = hwmon_device_register(&tpacpi_sensors_pdev->dev);
|
||||
if (IS_ERR(tpacpi_hwmon)) {
|
||||
ret = PTR_ERR(tpacpi_hwmon);
|
||||
tpacpi_hwmon = NULL;
|
||||
@ -4735,6 +4854,7 @@ static int __init thinkpad_acpi_module_init(void)
|
||||
thinkpad_acpi_module_exit();
|
||||
return ret;
|
||||
}
|
||||
mutex_init(&tpacpi_inputdev_send_mutex);
|
||||
tpacpi_inputdev = input_allocate_device();
|
||||
if (!tpacpi_inputdev) {
|
||||
printk(IBM_ERR "unable to allocate input device\n");
|
||||
@ -4769,6 +4889,7 @@ static int __init thinkpad_acpi_module_init(void)
|
||||
tp_features.input_device_registered = 1;
|
||||
}
|
||||
|
||||
tpacpi_lifecycle = TPACPI_LIFE_RUNNING;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -4776,6 +4897,8 @@ static void thinkpad_acpi_module_exit(void)
|
||||
{
|
||||
struct ibm_struct *ibm, *itmp;
|
||||
|
||||
tpacpi_lifecycle = TPACPI_LIFE_EXITING;
|
||||
|
||||
list_for_each_entry_safe_reverse(ibm, itmp,
|
||||
&tpacpi_all_drivers,
|
||||
all_drivers) {
|
||||
@ -4794,12 +4917,22 @@ static void thinkpad_acpi_module_exit(void)
|
||||
if (tpacpi_hwmon)
|
||||
hwmon_device_unregister(tpacpi_hwmon);
|
||||
|
||||
if (tp_features.sensors_pdev_attrs_registered)
|
||||
device_remove_file(&tpacpi_sensors_pdev->dev,
|
||||
&dev_attr_thinkpad_acpi_pdev_name);
|
||||
if (tpacpi_sensors_pdev)
|
||||
platform_device_unregister(tpacpi_sensors_pdev);
|
||||
if (tpacpi_pdev)
|
||||
platform_device_unregister(tpacpi_pdev);
|
||||
|
||||
if (tp_features.sensors_pdrv_attrs_registered)
|
||||
tpacpi_remove_driver_attributes(&tpacpi_hwmon_pdriver.driver);
|
||||
if (tp_features.platform_drv_attrs_registered)
|
||||
tpacpi_remove_driver_attributes(&tpacpi_pdriver.driver);
|
||||
|
||||
if (tp_features.sensors_pdrv_registered)
|
||||
platform_driver_unregister(&tpacpi_hwmon_pdriver);
|
||||
|
||||
if (tp_features.platform_drv_registered)
|
||||
platform_driver_unregister(&tpacpi_pdriver);
|
||||
|
||||
|
@ -58,13 +58,14 @@
|
||||
|
||||
#define IBM_NAME "thinkpad"
|
||||
#define IBM_DESC "ThinkPad ACPI Extras"
|
||||
#define IBM_FILE "thinkpad_acpi"
|
||||
#define IBM_FILE IBM_NAME "_acpi"
|
||||
#define IBM_URL "http://ibm-acpi.sf.net/"
|
||||
#define IBM_MAIL "ibm-acpi-devel@lists.sourceforge.net"
|
||||
|
||||
#define IBM_PROC_DIR "ibm"
|
||||
#define IBM_ACPI_EVENT_PREFIX "ibm"
|
||||
#define IBM_DRVR_NAME IBM_FILE
|
||||
#define IBM_HWMON_DRVR_NAME IBM_NAME "_hwmon"
|
||||
|
||||
#define IBM_LOG IBM_FILE ": "
|
||||
#define IBM_ERR KERN_ERR IBM_LOG
|
||||
@ -171,6 +172,7 @@ static int parse_strtoul(const char *buf, unsigned long max,
|
||||
|
||||
/* Device model */
|
||||
static struct platform_device *tpacpi_pdev;
|
||||
static struct platform_device *tpacpi_sensors_pdev;
|
||||
static struct device *tpacpi_hwmon;
|
||||
static struct platform_driver tpacpi_pdriver;
|
||||
static struct input_dev *tpacpi_inputdev;
|
||||
@ -233,22 +235,25 @@ struct ibm_init_struct {
|
||||
|
||||
static struct {
|
||||
#ifdef CONFIG_THINKPAD_ACPI_BAY
|
||||
u16 bay_status:1;
|
||||
u16 bay_eject:1;
|
||||
u16 bay_status2:1;
|
||||
u16 bay_eject2:1;
|
||||
u32 bay_status:1;
|
||||
u32 bay_eject:1;
|
||||
u32 bay_status2:1;
|
||||
u32 bay_eject2:1;
|
||||
#endif
|
||||
u16 bluetooth:1;
|
||||
u16 hotkey:1;
|
||||
u16 hotkey_mask:1;
|
||||
u16 hotkey_wlsw:1;
|
||||
u16 light:1;
|
||||
u16 light_status:1;
|
||||
u16 wan:1;
|
||||
u16 fan_ctrl_status_undef:1;
|
||||
u16 input_device_registered:1;
|
||||
u16 platform_drv_registered:1;
|
||||
u16 platform_drv_attrs_registered:1;
|
||||
u32 bluetooth:1;
|
||||
u32 hotkey:1;
|
||||
u32 hotkey_mask:1;
|
||||
u32 hotkey_wlsw:1;
|
||||
u32 light:1;
|
||||
u32 light_status:1;
|
||||
u32 wan:1;
|
||||
u32 fan_ctrl_status_undef:1;
|
||||
u32 input_device_registered:1;
|
||||
u32 platform_drv_registered:1;
|
||||
u32 platform_drv_attrs_registered:1;
|
||||
u32 sensors_pdrv_registered:1;
|
||||
u32 sensors_pdrv_attrs_registered:1;
|
||||
u32 sensors_pdev_attrs_registered:1;
|
||||
} tp_features;
|
||||
|
||||
struct thinkpad_id_data {
|
||||
|
@ -1858,14 +1858,6 @@ static void ipw2100_down(struct ipw2100_priv *priv)
|
||||
|
||||
modify_acceptable_latency("ipw2100", INFINITE_LATENCY);
|
||||
|
||||
#ifdef ACPI_CSTATE_LIMIT_DEFINED
|
||||
if (priv->config & CFG_C3_DISABLED) {
|
||||
IPW_DEBUG_INFO(": Resetting C3 transitions.\n");
|
||||
acpi_set_cstate_limit(priv->cstate_limit);
|
||||
priv->config &= ~CFG_C3_DISABLED;
|
||||
}
|
||||
#endif
|
||||
|
||||
/* We have to signal any supplicant if we are disassociating */
|
||||
if (associated)
|
||||
wireless_send_event(priv->net_dev, SIOCGIWAP, &wrqu, NULL);
|
||||
@ -2091,14 +2083,6 @@ static void isr_indicate_rf_kill(struct ipw2100_priv *priv, u32 status)
|
||||
/* RF_KILL is now enabled (else we wouldn't be here) */
|
||||
priv->status |= STATUS_RF_KILL_HW;
|
||||
|
||||
#ifdef ACPI_CSTATE_LIMIT_DEFINED
|
||||
if (priv->config & CFG_C3_DISABLED) {
|
||||
IPW_DEBUG_INFO(": Resetting C3 transitions.\n");
|
||||
acpi_set_cstate_limit(priv->cstate_limit);
|
||||
priv->config &= ~CFG_C3_DISABLED;
|
||||
}
|
||||
#endif
|
||||
|
||||
/* Make sure the RF Kill check timer is running */
|
||||
priv->stop_rf_kill = 0;
|
||||
cancel_delayed_work(&priv->rf_kill);
|
||||
@ -2329,23 +2313,10 @@ static void ipw2100_corruption_detected(struct ipw2100_priv *priv, int i)
|
||||
u32 match, reg;
|
||||
int j;
|
||||
#endif
|
||||
#ifdef ACPI_CSTATE_LIMIT_DEFINED
|
||||
int limit;
|
||||
#endif
|
||||
|
||||
IPW_DEBUG_INFO(": PCI latency error detected at 0x%04zX.\n",
|
||||
i * sizeof(struct ipw2100_status));
|
||||
|
||||
#ifdef ACPI_CSTATE_LIMIT_DEFINED
|
||||
IPW_DEBUG_INFO(": Disabling C3 transitions.\n");
|
||||
limit = acpi_get_cstate_limit();
|
||||
if (limit > 2) {
|
||||
priv->cstate_limit = limit;
|
||||
acpi_set_cstate_limit(2);
|
||||
priv->config |= CFG_C3_DISABLED;
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef IPW2100_DEBUG_C3
|
||||
/* Halt the fimrware so we can get a good image */
|
||||
write_register(priv->net_dev, IPW_REG_RESET_REG,
|
||||
|
@ -479,7 +479,6 @@ enum {
|
||||
#define CFG_ASSOCIATE (1<<6)
|
||||
#define CFG_FIXED_RATE (1<<7)
|
||||
#define CFG_ADHOC_CREATE (1<<8)
|
||||
#define CFG_C3_DISABLED (1<<9)
|
||||
#define CFG_PASSIVE_SCAN (1<<10)
|
||||
#ifdef CONFIG_IPW2100_MONITOR
|
||||
#define CFG_CRC_CHECK (1<<11)
|
||||
@ -508,7 +507,6 @@ struct ipw2100_priv {
|
||||
u8 bssid[ETH_ALEN];
|
||||
u8 channel;
|
||||
int last_mode;
|
||||
int cstate_limit;
|
||||
|
||||
unsigned long connect_start;
|
||||
unsigned long last_reset;
|
||||
|
@ -71,9 +71,9 @@ u32 acpi_hw_get_mode(void);
|
||||
struct acpi_bit_register_info *acpi_hw_get_bit_register_info(u32 register_id);
|
||||
|
||||
acpi_status
|
||||
acpi_hw_register_read(u8 use_lock, u32 register_id, u32 * return_value);
|
||||
acpi_hw_register_read(u32 register_id, u32 * return_value);
|
||||
|
||||
acpi_status acpi_hw_register_write(u8 use_lock, u32 register_id, u32 value);
|
||||
acpi_status acpi_hw_register_write(u32 register_id, u32 value);
|
||||
|
||||
acpi_status
|
||||
acpi_hw_low_level_read(u32 width,
|
||||
|
@ -264,7 +264,6 @@ struct acpi_device_wakeup_flags {
|
||||
|
||||
struct acpi_device_wakeup_state {
|
||||
u8 enabled:1;
|
||||
u8 active:1;
|
||||
};
|
||||
|
||||
struct acpi_device_wakeup {
|
||||
@ -333,6 +332,7 @@ int acpi_bus_get_power(acpi_handle handle, int *state);
|
||||
int acpi_bus_set_power(acpi_handle handle, int state);
|
||||
#ifdef CONFIG_ACPI_PROC_EVENT
|
||||
int acpi_bus_generate_proc_event(struct acpi_device *device, u8 type, int data);
|
||||
int acpi_bus_generate_proc_event4(const char *class, const char *bid, u8 type, int data);
|
||||
int acpi_bus_receive_event(struct acpi_bus_event *event);
|
||||
#else
|
||||
static inline int acpi_bus_generate_proc_event(struct acpi_device *device, u8 type, int data)
|
||||
|
@ -314,6 +314,8 @@ acpi_resource_to_address64(struct acpi_resource *resource,
|
||||
*/
|
||||
acpi_status acpi_get_register(u32 register_id, u32 * return_value);
|
||||
|
||||
acpi_status acpi_get_register_unlocked(u32 register_id, u32 *return_value);
|
||||
|
||||
acpi_status acpi_set_register(u32 register_id, u32 value);
|
||||
|
||||
acpi_status
|
||||
|
@ -3,6 +3,7 @@
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/cpuidle.h>
|
||||
|
||||
#include <asm/acpi.h>
|
||||
|
||||
@ -75,7 +76,9 @@ struct acpi_processor_cx {
|
||||
};
|
||||
|
||||
struct acpi_processor_power {
|
||||
struct cpuidle_device dev;
|
||||
struct acpi_processor_cx *state;
|
||||
struct acpi_processor_cx *bm_state;
|
||||
unsigned long bm_check_timestamp;
|
||||
u32 default_state;
|
||||
u32 bm_activity;
|
||||
@ -199,6 +202,7 @@ struct acpi_processor_flags {
|
||||
u8 bm_check:1;
|
||||
u8 has_cst:1;
|
||||
u8 power_setup_done:1;
|
||||
u8 bm_rld_set:1;
|
||||
};
|
||||
|
||||
struct acpi_processor {
|
||||
@ -322,6 +326,7 @@ int acpi_processor_power_exit(struct acpi_processor *pr,
|
||||
struct acpi_device *device);
|
||||
int acpi_processor_suspend(struct acpi_device * device, pm_message_t state);
|
||||
int acpi_processor_resume(struct acpi_device * device);
|
||||
extern struct cpuidle_driver acpi_idle_driver;
|
||||
|
||||
/* in processor_thermal.c */
|
||||
int acpi_processor_get_limit_info(struct acpi_processor *pr);
|
||||
|
@ -189,32 +189,6 @@ extern int ec_transaction(u8 command,
|
||||
extern int acpi_blacklisted(void);
|
||||
extern void acpi_bios_year(char *s);
|
||||
|
||||
#define ACPI_CSTATE_LIMIT_DEFINED /* for driver builds */
|
||||
#ifdef CONFIG_ACPI
|
||||
|
||||
/*
|
||||
* Set highest legal C-state
|
||||
* 0: C0 okay, but not C1
|
||||
* 1: C1 okay, but not C2
|
||||
* 2: C2 okay, but not C3 etc.
|
||||
*/
|
||||
|
||||
extern unsigned int max_cstate;
|
||||
|
||||
static inline unsigned int acpi_get_cstate_limit(void)
|
||||
{
|
||||
return max_cstate;
|
||||
}
|
||||
static inline void acpi_set_cstate_limit(unsigned int new_limit)
|
||||
{
|
||||
max_cstate = new_limit;
|
||||
return;
|
||||
}
|
||||
#else
|
||||
static inline unsigned int acpi_get_cstate_limit(void) { return 0; }
|
||||
static inline void acpi_set_cstate_limit(unsigned int new_limit) { return; }
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_ACPI_NUMA
|
||||
int acpi_get_pxm(acpi_handle handle);
|
||||
int acpi_get_node(acpi_handle *handle);
|
||||
|
180
include/linux/cpuidle.h
Normal file
180
include/linux/cpuidle.h
Normal file
@ -0,0 +1,180 @@
|
||||
/*
|
||||
* cpuidle.h - a generic framework for CPU idle power management
|
||||
*
|
||||
* (C) 2007 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
|
||||
* Shaohua Li <shaohua.li@intel.com>
|
||||
* Adam Belay <abelay@novell.com>
|
||||
*
|
||||
* This code is licenced under the GPL.
|
||||
*/
|
||||
|
||||
#ifndef _LINUX_CPUIDLE_H
|
||||
#define _LINUX_CPUIDLE_H
|
||||
|
||||
#include <linux/percpu.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/kobject.h>
|
||||
#include <linux/completion.h>
|
||||
|
||||
#define CPUIDLE_STATE_MAX 8
|
||||
#define CPUIDLE_NAME_LEN 16
|
||||
|
||||
struct cpuidle_device;
|
||||
|
||||
|
||||
/****************************
|
||||
* CPUIDLE DEVICE INTERFACE *
|
||||
****************************/
|
||||
|
||||
struct cpuidle_state {
|
||||
char name[CPUIDLE_NAME_LEN];
|
||||
void *driver_data;
|
||||
|
||||
unsigned int flags;
|
||||
unsigned int exit_latency; /* in US */
|
||||
unsigned int power_usage; /* in mW */
|
||||
unsigned int target_residency; /* in US */
|
||||
|
||||
unsigned int usage;
|
||||
unsigned int time; /* in US */
|
||||
|
||||
int (*enter) (struct cpuidle_device *dev,
|
||||
struct cpuidle_state *state);
|
||||
};
|
||||
|
||||
/* Idle State Flags */
|
||||
#define CPUIDLE_FLAG_TIME_VALID (0x01) /* is residency time measurable? */
|
||||
#define CPUIDLE_FLAG_CHECK_BM (0x02) /* BM activity will exit state */
|
||||
#define CPUIDLE_FLAG_SHALLOW (0x10) /* low latency, minimal savings */
|
||||
#define CPUIDLE_FLAG_BALANCED (0x20) /* medium latency, moderate savings */
|
||||
#define CPUIDLE_FLAG_DEEP (0x40) /* high latency, large savings */
|
||||
|
||||
#define CPUIDLE_DRIVER_FLAGS_MASK (0xFFFF0000)
|
||||
|
||||
/**
|
||||
* cpuidle_get_statedata - retrieves private driver state data
|
||||
* @state: the state
|
||||
*/
|
||||
static inline void * cpuidle_get_statedata(struct cpuidle_state *state)
|
||||
{
|
||||
return state->driver_data;
|
||||
}
|
||||
|
||||
/**
|
||||
* cpuidle_set_statedata - stores private driver state data
|
||||
* @state: the state
|
||||
* @data: the private data
|
||||
*/
|
||||
static inline void
|
||||
cpuidle_set_statedata(struct cpuidle_state *state, void *data)
|
||||
{
|
||||
state->driver_data = data;
|
||||
}
|
||||
|
||||
struct cpuidle_state_kobj {
|
||||
struct cpuidle_state *state;
|
||||
struct completion kobj_unregister;
|
||||
struct kobject kobj;
|
||||
};
|
||||
|
||||
struct cpuidle_device {
|
||||
int enabled:1;
|
||||
unsigned int cpu;
|
||||
|
||||
int last_residency;
|
||||
int state_count;
|
||||
struct cpuidle_state states[CPUIDLE_STATE_MAX];
|
||||
struct cpuidle_state_kobj *kobjs[CPUIDLE_STATE_MAX];
|
||||
struct cpuidle_state *last_state;
|
||||
|
||||
struct list_head device_list;
|
||||
struct kobject kobj;
|
||||
struct completion kobj_unregister;
|
||||
void *governor_data;
|
||||
};
|
||||
|
||||
DECLARE_PER_CPU(struct cpuidle_device *, cpuidle_devices);
|
||||
|
||||
/**
|
||||
* cpuidle_get_last_residency - retrieves the last state's residency time
|
||||
* @dev: the target CPU
|
||||
*
|
||||
* NOTE: this value is invalid if CPUIDLE_FLAG_TIME_VALID isn't set
|
||||
*/
|
||||
static inline int cpuidle_get_last_residency(struct cpuidle_device *dev)
|
||||
{
|
||||
return dev->last_residency;
|
||||
}
|
||||
|
||||
|
||||
/****************************
|
||||
* CPUIDLE DRIVER INTERFACE *
|
||||
****************************/
|
||||
|
||||
struct cpuidle_driver {
|
||||
char name[CPUIDLE_NAME_LEN];
|
||||
struct module *owner;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_CPU_IDLE
|
||||
|
||||
extern int cpuidle_register_driver(struct cpuidle_driver *drv);
|
||||
extern void cpuidle_unregister_driver(struct cpuidle_driver *drv);
|
||||
extern int cpuidle_register_device(struct cpuidle_device *dev);
|
||||
extern void cpuidle_unregister_device(struct cpuidle_device *dev);
|
||||
|
||||
extern void cpuidle_pause_and_lock(void);
|
||||
extern void cpuidle_resume_and_unlock(void);
|
||||
extern int cpuidle_enable_device(struct cpuidle_device *dev);
|
||||
extern void cpuidle_disable_device(struct cpuidle_device *dev);
|
||||
|
||||
#else
|
||||
|
||||
static inline int cpuidle_register_driver(struct cpuidle_driver *drv)
|
||||
{return 0;}
|
||||
static inline void cpuidle_unregister_driver(struct cpuidle_driver *drv) { }
|
||||
static inline int cpuidle_register_device(struct cpuidle_device *dev)
|
||||
{return 0;}
|
||||
static inline void cpuidle_unregister_device(struct cpuidle_device *dev) { }
|
||||
|
||||
static inline void cpuidle_pause_and_lock(void) { }
|
||||
static inline void cpuidle_resume_and_unlock(void) { }
|
||||
static inline int cpuidle_enable_device(struct cpuidle_device *dev)
|
||||
{return 0;}
|
||||
static inline void cpuidle_disable_device(struct cpuidle_device *dev) { }
|
||||
|
||||
#endif
|
||||
|
||||
/******************************
|
||||
* CPUIDLE GOVERNOR INTERFACE *
|
||||
******************************/
|
||||
|
||||
struct cpuidle_governor {
|
||||
char name[CPUIDLE_NAME_LEN];
|
||||
struct list_head governor_list;
|
||||
unsigned int rating;
|
||||
|
||||
int (*enable) (struct cpuidle_device *dev);
|
||||
void (*disable) (struct cpuidle_device *dev);
|
||||
|
||||
int (*select) (struct cpuidle_device *dev);
|
||||
void (*reflect) (struct cpuidle_device *dev);
|
||||
|
||||
struct module *owner;
|
||||
};
|
||||
|
||||
#ifdef CONFIG_CPU_IDLE
|
||||
|
||||
extern int cpuidle_register_governor(struct cpuidle_governor *gov);
|
||||
extern void cpuidle_unregister_governor(struct cpuidle_governor *gov);
|
||||
|
||||
#else
|
||||
|
||||
static inline int cpuidle_register_governor(struct cpuidle_governor *gov)
|
||||
{return 0;}
|
||||
static inline void cpuidle_unregister_governor(struct cpuidle_governor *gov) { }
|
||||
|
||||
#endif
|
||||
|
||||
#endif /* _LINUX_CPUIDLE_H */
|
@ -40,6 +40,7 @@ enum tick_nohz_mode {
|
||||
* @idle_sleeps: Number of idle calls, where the sched tick was stopped
|
||||
* @idle_entrytime: Time when the idle call was entered
|
||||
* @idle_sleeptime: Sum of the time slept in idle with sched tick stopped
|
||||
* @sleep_length: Duration of the current idle sleep
|
||||
*/
|
||||
struct tick_sched {
|
||||
struct hrtimer sched_timer;
|
||||
@ -52,6 +53,7 @@ struct tick_sched {
|
||||
unsigned long idle_sleeps;
|
||||
ktime_t idle_entrytime;
|
||||
ktime_t idle_sleeptime;
|
||||
ktime_t sleep_length;
|
||||
unsigned long last_jiffies;
|
||||
unsigned long next_jiffies;
|
||||
ktime_t idle_expires;
|
||||
@ -100,10 +102,17 @@ static inline int tick_check_oneshot_change(int allow_nohz) { return 0; }
|
||||
extern void tick_nohz_stop_sched_tick(void);
|
||||
extern void tick_nohz_restart_sched_tick(void);
|
||||
extern void tick_nohz_update_jiffies(void);
|
||||
extern ktime_t tick_nohz_get_sleep_length(void);
|
||||
# else
|
||||
static inline void tick_nohz_stop_sched_tick(void) { }
|
||||
static inline void tick_nohz_restart_sched_tick(void) { }
|
||||
static inline void tick_nohz_update_jiffies(void) { }
|
||||
static inline ktime_t tick_nohz_get_sleep_length(void)
|
||||
{
|
||||
ktime_t len = { .tv64 = NSEC_PER_SEC/HZ };
|
||||
|
||||
return len;
|
||||
}
|
||||
# endif /* !NO_HZ */
|
||||
|
||||
#endif
|
||||
|
@ -153,6 +153,7 @@ void tick_nohz_stop_sched_tick(void)
|
||||
unsigned long seq, last_jiffies, next_jiffies, delta_jiffies, flags;
|
||||
struct tick_sched *ts;
|
||||
ktime_t last_update, expires, now, delta;
|
||||
struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev;
|
||||
int cpu;
|
||||
|
||||
local_irq_save(flags);
|
||||
@ -302,10 +303,25 @@ void tick_nohz_stop_sched_tick(void)
|
||||
out:
|
||||
ts->next_jiffies = next_jiffies;
|
||||
ts->last_jiffies = last_jiffies;
|
||||
ts->sleep_length = ktime_sub(dev->next_event, now);
|
||||
end:
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
/**
|
||||
* tick_nohz_get_sleep_length - return the length of the current sleep
|
||||
*
|
||||
* Called from power state control code with interrupts disabled
|
||||
*/
|
||||
ktime_t tick_nohz_get_sleep_length(void)
|
||||
{
|
||||
struct tick_sched *ts = &__get_cpu_var(tick_cpu_sched);
|
||||
|
||||
return ts->sleep_length;
|
||||
}
|
||||
|
||||
EXPORT_SYMBOL_GPL(tick_nohz_get_sleep_length);
|
||||
|
||||
/**
|
||||
* nohz_restart_sched_tick - restart the idle tick from the idle task
|
||||
*
|
||||
|
Loading…
Reference in New Issue
Block a user