mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2025-01-10 15:19:51 +00:00
Merge back earlier cpufreq changes for v4.11.
This commit is contained in:
commit
56c7303e62
@ -8,6 +8,8 @@
|
||||
|
||||
Dominik Brodowski <linux@brodo.de>
|
||||
David Kimdon <dwhedon@debian.org>
|
||||
Rafael J. Wysocki <rafael.j.wysocki@intel.com>
|
||||
Viresh Kumar <viresh.kumar@linaro.org>
|
||||
|
||||
|
||||
|
||||
@ -36,10 +38,11 @@ speed limits (like LCD drivers on ARM architecture). Additionally, the
|
||||
kernel "constant" loops_per_jiffy is updated on frequency changes
|
||||
here.
|
||||
|
||||
Reference counting is done by cpufreq_get_cpu and cpufreq_put_cpu,
|
||||
which make sure that the cpufreq processor driver is correctly
|
||||
registered with the core, and will not be unloaded until
|
||||
cpufreq_put_cpu is called.
|
||||
Reference counting of the cpufreq policies is done by cpufreq_cpu_get
|
||||
and cpufreq_cpu_put, which make sure that the cpufreq driver is
|
||||
correctly registered with the core, and will not be unloaded until
|
||||
cpufreq_put_cpu is called. That also ensures that the respective cpufreq
|
||||
policy doesn't get freed while being used.
|
||||
|
||||
2. CPUFreq notifiers
|
||||
====================
|
||||
@ -69,18 +72,16 @@ CPUFreq policy notifier is called twice for a policy transition:
|
||||
The phase is specified in the second argument to the notifier.
|
||||
|
||||
The third argument, a void *pointer, points to a struct cpufreq_policy
|
||||
consisting of five values: cpu, min, max, policy and max_cpu_freq. min
|
||||
and max are the lower and upper frequencies (in kHz) of the new
|
||||
policy, policy the new policy, cpu the number of the affected CPU; and
|
||||
max_cpu_freq the maximum supported CPU frequency. This value is given
|
||||
for informational purposes only.
|
||||
consisting of several values, including min, max (the lower and upper
|
||||
frequencies (in kHz) of the new policy).
|
||||
|
||||
|
||||
2.2 CPUFreq transition notifiers
|
||||
--------------------------------
|
||||
|
||||
These are notified twice when the CPUfreq driver switches the CPU core
|
||||
frequency and this change has any external implications.
|
||||
These are notified twice for each online CPU in the policy, when the
|
||||
CPUfreq driver switches the CPU core frequency and this change has no
|
||||
any external implications.
|
||||
|
||||
The second argument specifies the phase - CPUFREQ_PRECHANGE or
|
||||
CPUFREQ_POSTCHANGE.
|
||||
@ -90,6 +91,7 @@ values:
|
||||
cpu - number of the affected CPU
|
||||
old - old frequency
|
||||
new - new frequency
|
||||
flags - flags of the cpufreq driver
|
||||
|
||||
3. CPUFreq Table Generation with Operating Performance Point (OPP)
|
||||
==================================================================
|
||||
|
@ -9,6 +9,8 @@
|
||||
|
||||
|
||||
Dominik Brodowski <linux@brodo.de>
|
||||
Rafael J. Wysocki <rafael.j.wysocki@intel.com>
|
||||
Viresh Kumar <viresh.kumar@linaro.org>
|
||||
|
||||
|
||||
|
||||
@ -49,49 +51,65 @@ using cpufreq_register_driver()
|
||||
|
||||
What shall this struct cpufreq_driver contain?
|
||||
|
||||
cpufreq_driver.name - The name of this driver.
|
||||
.name - The name of this driver.
|
||||
|
||||
cpufreq_driver.init - A pointer to the per-CPU initialization
|
||||
function.
|
||||
.init - A pointer to the per-policy initialization function.
|
||||
|
||||
cpufreq_driver.verify - A pointer to a "verification" function.
|
||||
.verify - A pointer to a "verification" function.
|
||||
|
||||
cpufreq_driver.setpolicy _or_
|
||||
cpufreq_driver.target/
|
||||
target_index - See below on the differences.
|
||||
.setpolicy _or_ .fast_switch _or_ .target _or_ .target_index - See
|
||||
below on the differences.
|
||||
|
||||
And optionally
|
||||
|
||||
cpufreq_driver.exit - A pointer to a per-CPU cleanup
|
||||
function called during CPU_POST_DEAD
|
||||
phase of cpu hotplug process.
|
||||
.flags - Hints for the cpufreq core.
|
||||
|
||||
cpufreq_driver.stop_cpu - A pointer to a per-CPU stop function
|
||||
called during CPU_DOWN_PREPARE phase of
|
||||
cpu hotplug process.
|
||||
.driver_data - cpufreq driver specific data.
|
||||
|
||||
cpufreq_driver.resume - A pointer to a per-CPU resume function
|
||||
which is called with interrupts disabled
|
||||
and _before_ the pre-suspend frequency
|
||||
and/or policy is restored by a call to
|
||||
->target/target_index or ->setpolicy.
|
||||
.resolve_freq - Returns the most appropriate frequency for a target
|
||||
frequency. Doesn't change the frequency though.
|
||||
|
||||
cpufreq_driver.attr - A pointer to a NULL-terminated list of
|
||||
"struct freq_attr" which allow to
|
||||
export values to sysfs.
|
||||
.get_intermediate and target_intermediate - Used to switch to stable
|
||||
frequency while changing CPU frequency.
|
||||
|
||||
cpufreq_driver.get_intermediate
|
||||
and target_intermediate Used to switch to stable frequency while
|
||||
changing CPU frequency.
|
||||
.get - Returns current frequency of the CPU.
|
||||
|
||||
.bios_limit - Returns HW/BIOS max frequency limitations for the CPU.
|
||||
|
||||
.exit - A pointer to a per-policy cleanup function called during
|
||||
CPU_POST_DEAD phase of cpu hotplug process.
|
||||
|
||||
.stop_cpu - A pointer to a per-policy stop function called during
|
||||
CPU_DOWN_PREPARE phase of cpu hotplug process.
|
||||
|
||||
.suspend - A pointer to a per-policy suspend function which is called
|
||||
with interrupts disabled and _after_ the governor is stopped for the
|
||||
policy.
|
||||
|
||||
.resume - A pointer to a per-policy resume function which is called
|
||||
with interrupts disabled and _before_ the governor is started again.
|
||||
|
||||
.ready - A pointer to a per-policy ready function which is called after
|
||||
the policy is fully initialized.
|
||||
|
||||
.attr - A pointer to a NULL-terminated list of "struct freq_attr" which
|
||||
allow to export values to sysfs.
|
||||
|
||||
.boost_enabled - If set, boost frequencies are enabled.
|
||||
|
||||
.set_boost - A pointer to a per-policy function to enable/disable boost
|
||||
frequencies.
|
||||
|
||||
|
||||
1.2 Per-CPU Initialization
|
||||
--------------------------
|
||||
|
||||
Whenever a new CPU is registered with the device model, or after the
|
||||
cpufreq driver registers itself, the per-CPU initialization function
|
||||
cpufreq_driver.init is called. It takes a struct cpufreq_policy
|
||||
*policy as argument. What to do now?
|
||||
cpufreq driver registers itself, the per-policy initialization function
|
||||
cpufreq_driver.init is called if no cpufreq policy existed for the CPU.
|
||||
Note that the .init() and .exit() routines are called only once for the
|
||||
policy and not for each CPU managed by the policy. It takes a struct
|
||||
cpufreq_policy *policy as argument. What to do now?
|
||||
|
||||
If necessary, activate the CPUfreq support on your CPU.
|
||||
|
||||
@ -117,47 +135,45 @@ policy->governor must contain the "default policy" for
|
||||
cpufreq_driver.setpolicy or
|
||||
cpufreq_driver.target/target_index is called
|
||||
with these values.
|
||||
policy->cpus Update this with the masks of the
|
||||
(online + offline) CPUs that do DVFS
|
||||
along with this CPU (i.e. that share
|
||||
clock/voltage rails with it).
|
||||
|
||||
For setting some of these values (cpuinfo.min[max]_freq, policy->min[max]), the
|
||||
frequency table helpers might be helpful. See the section 2 for more information
|
||||
on them.
|
||||
|
||||
SMP systems normally have same clock source for a group of cpus. For these the
|
||||
.init() would be called only once for the first online cpu. Here the .init()
|
||||
routine must initialize policy->cpus with mask of all possible cpus (Online +
|
||||
Offline) that share the clock. Then the core would copy this mask onto
|
||||
policy->related_cpus and will reset policy->cpus to carry only online cpus.
|
||||
|
||||
|
||||
1.3 verify
|
||||
------------
|
||||
----------
|
||||
|
||||
When the user decides a new policy (consisting of
|
||||
"policy,governor,min,max") shall be set, this policy must be validated
|
||||
so that incompatible values can be corrected. For verifying these
|
||||
values, a frequency table helper and/or the
|
||||
cpufreq_verify_within_limits(struct cpufreq_policy *policy, unsigned
|
||||
int min_freq, unsigned int max_freq) function might be helpful. See
|
||||
section 2 for details on frequency table helpers.
|
||||
values cpufreq_verify_within_limits(struct cpufreq_policy *policy,
|
||||
unsigned int min_freq, unsigned int max_freq) function might be helpful.
|
||||
See section 2 for details on frequency table helpers.
|
||||
|
||||
You need to make sure that at least one valid frequency (or operating
|
||||
range) is within policy->min and policy->max. If necessary, increase
|
||||
policy->max first, and only if this is no solution, decrease policy->min.
|
||||
|
||||
|
||||
1.4 target/target_index or setpolicy?
|
||||
----------------------------
|
||||
1.4 target or target_index or setpolicy or fast_switch?
|
||||
-------------------------------------------------------
|
||||
|
||||
Most cpufreq drivers or even most cpu frequency scaling algorithms
|
||||
only allow the CPU to be set to one frequency. For these, you use the
|
||||
->target/target_index call.
|
||||
only allow the CPU frequency to be set to predefined fixed values. For
|
||||
these, you use the ->target(), ->target_index() or ->fast_switch()
|
||||
callbacks.
|
||||
|
||||
Some cpufreq-capable processors switch the frequency between certain
|
||||
limits on their own. These shall use the ->setpolicy call
|
||||
Some cpufreq capable processors switch the frequency between certain
|
||||
limits on their own. These shall use the ->setpolicy() callback.
|
||||
|
||||
|
||||
1.5. target/target_index
|
||||
-------------
|
||||
------------------------
|
||||
|
||||
The target_index call has two arguments: struct cpufreq_policy *policy,
|
||||
and unsigned int index (into the exposed frequency table).
|
||||
@ -186,9 +202,20 @@ actual frequency must be determined using the following rules:
|
||||
Here again the frequency table helper might assist you - see section 2
|
||||
for details.
|
||||
|
||||
1.6. fast_switch
|
||||
----------------
|
||||
|
||||
1.6 setpolicy
|
||||
---------------
|
||||
This function is used for frequency switching from scheduler's context.
|
||||
Not all drivers are expected to implement it, as sleeping from within
|
||||
this callback isn't allowed. This callback must be highly optimized to
|
||||
do switching as fast as possible.
|
||||
|
||||
This function has two arguments: struct cpufreq_policy *policy and
|
||||
unsigned int target_frequency.
|
||||
|
||||
|
||||
1.7 setpolicy
|
||||
-------------
|
||||
|
||||
The setpolicy call only takes a struct cpufreq_policy *policy as
|
||||
argument. You need to set the lower limit of the in-processor or
|
||||
@ -198,7 +225,7 @@ setting when policy->policy is CPUFREQ_POLICY_PERFORMANCE, and a
|
||||
powersaving-oriented setting when CPUFREQ_POLICY_POWERSAVE. Also check
|
||||
the reference implementation in drivers/cpufreq/longrun.c
|
||||
|
||||
1.7 get_intermediate and target_intermediate
|
||||
1.8 get_intermediate and target_intermediate
|
||||
--------------------------------------------
|
||||
|
||||
Only for drivers with target_index() and CPUFREQ_ASYNC_NOTIFICATION unset.
|
||||
@ -222,42 +249,36 @@ failures as core would send notifications for that.
|
||||
|
||||
As most cpufreq processors only allow for being set to a few specific
|
||||
frequencies, a "frequency table" with some functions might assist in
|
||||
some work of the processor driver. Such a "frequency table" consists
|
||||
of an array of struct cpufreq_frequency_table entries, with any value in
|
||||
"driver_data" you want to use, and the corresponding frequency in
|
||||
"frequency". At the end of the table, you need to add a
|
||||
cpufreq_frequency_table entry with frequency set to CPUFREQ_TABLE_END. And
|
||||
if you want to skip one entry in the table, set the frequency to
|
||||
CPUFREQ_ENTRY_INVALID. The entries don't need to be in ascending
|
||||
order.
|
||||
some work of the processor driver. Such a "frequency table" consists of
|
||||
an array of struct cpufreq_frequency_table entries, with driver specific
|
||||
values in "driver_data", the corresponding frequency in "frequency" and
|
||||
flags set. At the end of the table, you need to add a
|
||||
cpufreq_frequency_table entry with frequency set to CPUFREQ_TABLE_END.
|
||||
And if you want to skip one entry in the table, set the frequency to
|
||||
CPUFREQ_ENTRY_INVALID. The entries don't need to be in sorted in any
|
||||
particular order, but if they are cpufreq core will do DVFS a bit
|
||||
quickly for them as search for best match is faster.
|
||||
|
||||
By calling cpufreq_table_validate_and_show(struct cpufreq_policy *policy,
|
||||
struct cpufreq_frequency_table *table);
|
||||
the cpuinfo.min_freq and cpuinfo.max_freq values are detected, and
|
||||
policy->min and policy->max are set to the same values. This is
|
||||
helpful for the per-CPU initialization stage.
|
||||
By calling cpufreq_table_validate_and_show(), the cpuinfo.min_freq and
|
||||
cpuinfo.max_freq values are detected, and policy->min and policy->max
|
||||
are set to the same values. This is helpful for the per-CPU
|
||||
initialization stage.
|
||||
|
||||
int cpufreq_frequency_table_verify(struct cpufreq_policy *policy,
|
||||
struct cpufreq_frequency_table *table);
|
||||
assures that at least one valid frequency is within policy->min and
|
||||
policy->max, and all other criteria are met. This is helpful for the
|
||||
->verify call.
|
||||
cpufreq_frequency_table_verify() assures that at least one valid
|
||||
frequency is within policy->min and policy->max, and all other criteria
|
||||
are met. This is helpful for the ->verify call.
|
||||
|
||||
int cpufreq_frequency_table_target(struct cpufreq_policy *policy,
|
||||
unsigned int target_freq,
|
||||
unsigned int relation);
|
||||
|
||||
is the corresponding frequency table helper for the ->target
|
||||
stage. Just pass the values to this function, and this function
|
||||
returns the number of the frequency table entry which contains
|
||||
the frequency the CPU shall be set to.
|
||||
cpufreq_frequency_table_target() is the corresponding frequency table
|
||||
helper for the ->target stage. Just pass the values to this function,
|
||||
and this function returns the of the frequency table entry which
|
||||
contains the frequency the CPU shall be set to.
|
||||
|
||||
The following macros can be used as iterators over cpufreq_frequency_table:
|
||||
|
||||
cpufreq_for_each_entry(pos, table) - iterates over all entries of frequency
|
||||
table.
|
||||
|
||||
cpufreq-for_each_valid_entry(pos, table) - iterates over all entries,
|
||||
cpufreq_for_each_valid_entry(pos, table) - iterates over all entries,
|
||||
excluding CPUFREQ_ENTRY_INVALID frequencies.
|
||||
Use arguments "pos" - a cpufreq_frequency_table * as a loop cursor and
|
||||
"table" - the cpufreq_frequency_table * you want to iterate over.
|
||||
|
@ -34,10 +34,10 @@ cpufreq stats provides following statistics (explained in detail below).
|
||||
- total_trans
|
||||
- trans_table
|
||||
|
||||
All the statistics will be from the time the stats driver has been inserted
|
||||
to the time when a read of a particular statistic is done. Obviously, stats
|
||||
driver will not have any information about the frequency transitions before
|
||||
the stats driver insertion.
|
||||
All the statistics will be from the time the stats driver has been inserted
|
||||
(or the time the stats were reset) to the time when a read of a particular
|
||||
statistic is done. Obviously, stats driver will not have any information
|
||||
about the frequency transitions before the stats driver insertion.
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
<mysystem>:/sys/devices/system/cpu/cpu0/cpufreq/stats # ls -l
|
||||
@ -110,25 +110,13 @@ Config Main Menu
|
||||
CPU Frequency scaling --->
|
||||
[*] CPU Frequency scaling
|
||||
[*] CPU frequency translation statistics
|
||||
[*] CPU frequency translation statistics details
|
||||
|
||||
|
||||
"CPU Frequency scaling" (CONFIG_CPU_FREQ) should be enabled to configure
|
||||
cpufreq-stats.
|
||||
|
||||
"CPU frequency translation statistics" (CONFIG_CPU_FREQ_STAT) provides the
|
||||
basic statistics which includes time_in_state and total_trans.
|
||||
statistics which includes time_in_state, total_trans and trans_table.
|
||||
|
||||
"CPU frequency translation statistics details" (CONFIG_CPU_FREQ_STAT_DETAILS)
|
||||
provides fine grained cpufreq stats by trans_table. The reason for having a
|
||||
separate config option for trans_table is:
|
||||
- trans_table goes against the traditional /sysfs rule of one value per
|
||||
interface. It provides a whole bunch of value in a 2 dimensional matrix
|
||||
form.
|
||||
|
||||
Once these two options are enabled and your CPU supports cpufrequency, you
|
||||
Once this option is enabled and your CPU supports cpufrequency, you
|
||||
will be able to see the CPU frequency statistics in /sysfs.
|
||||
|
||||
|
||||
|
||||
|
||||
|
@ -10,6 +10,8 @@
|
||||
|
||||
Dominik Brodowski <linux@brodo.de>
|
||||
some additions and corrections by Nico Golde <nico@ngolde.de>
|
||||
Rafael J. Wysocki <rafael.j.wysocki@intel.com>
|
||||
Viresh Kumar <viresh.kumar@linaro.org>
|
||||
|
||||
|
||||
|
||||
@ -28,32 +30,27 @@ Contents:
|
||||
2.3 Userspace
|
||||
2.4 Ondemand
|
||||
2.5 Conservative
|
||||
2.6 Schedutil
|
||||
|
||||
3. The Governor Interface in the CPUfreq Core
|
||||
|
||||
4. References
|
||||
|
||||
|
||||
1. What Is A CPUFreq Governor?
|
||||
==============================
|
||||
|
||||
Most cpufreq drivers (except the intel_pstate and longrun) or even most
|
||||
cpu frequency scaling algorithms only offer the CPU to be set to one
|
||||
frequency. In order to offer dynamic frequency scaling, the cpufreq
|
||||
core must be able to tell these drivers of a "target frequency". So
|
||||
these specific drivers will be transformed to offer a "->target/target_index"
|
||||
call instead of the existing "->setpolicy" call. For "longrun", all
|
||||
stays the same, though.
|
||||
cpu frequency scaling algorithms only allow the CPU frequency to be set
|
||||
to predefined fixed values. In order to offer dynamic frequency
|
||||
scaling, the cpufreq core must be able to tell these drivers of a
|
||||
"target frequency". So these specific drivers will be transformed to
|
||||
offer a "->target/target_index/fast_switch()" call instead of the
|
||||
"->setpolicy()" call. For set_policy drivers, all stays the same,
|
||||
though.
|
||||
|
||||
How to decide what frequency within the CPUfreq policy should be used?
|
||||
That's done using "cpufreq governors". Two are already in this patch
|
||||
-- they're the already existing "powersave" and "performance" which
|
||||
set the frequency statically to the lowest or highest frequency,
|
||||
respectively. At least two more such governors will be ready for
|
||||
addition in the near future, but likely many more as there are various
|
||||
different theories and models about dynamic frequency scaling
|
||||
around. Using such a generic interface as cpufreq offers to scaling
|
||||
governors, these can be tested extensively, and the best one can be
|
||||
selected for each specific use.
|
||||
That's done using "cpufreq governors".
|
||||
|
||||
Basically, it's the following flow graph:
|
||||
|
||||
@ -71,7 +68,7 @@ CPU can be set to switch independently | CPU can only be set
|
||||
/ the limits of policy->{min,max}
|
||||
/ \
|
||||
/ \
|
||||
Using the ->setpolicy call, Using the ->target/target_index call,
|
||||
Using the ->setpolicy call, Using the ->target/target_index/fast_switch call,
|
||||
the limits and the the frequency closest
|
||||
"policy" is set. to target_freq is set.
|
||||
It is assured that it
|
||||
@ -109,114 +106,159 @@ directory.
|
||||
2.4 Ondemand
|
||||
------------
|
||||
|
||||
The CPUfreq governor "ondemand" sets the CPU depending on the
|
||||
current usage. To do this the CPU must have the capability to
|
||||
switch the frequency very quickly. There are a number of sysfs file
|
||||
accessible parameters:
|
||||
The CPUfreq governor "ondemand" sets the CPU frequency depending on the
|
||||
current system load. Load estimation is triggered by the scheduler
|
||||
through the update_util_data->func hook; when triggered, cpufreq checks
|
||||
the CPU-usage statistics over the last period and the governor sets the
|
||||
CPU accordingly. The CPU must have the capability to switch the
|
||||
frequency very quickly.
|
||||
|
||||
sampling_rate: measured in uS (10^-6 seconds), this is how often you
|
||||
want the kernel to look at the CPU usage and to make decisions on
|
||||
what to do about the frequency. Typically this is set to values of
|
||||
around '10000' or more. It's default value is (cmp. with users-guide.txt):
|
||||
transition_latency * 1000
|
||||
Be aware that transition latency is in ns and sampling_rate is in us, so you
|
||||
get the same sysfs value by default.
|
||||
Sampling rate should always get adjusted considering the transition latency
|
||||
To set the sampling rate 750 times as high as the transition latency
|
||||
in the bash (as said, 1000 is default), do:
|
||||
echo `$(($(cat cpuinfo_transition_latency) * 750 / 1000)) \
|
||||
>ondemand/sampling_rate
|
||||
Sysfs files:
|
||||
|
||||
sampling_rate_min:
|
||||
The sampling rate is limited by the HW transition latency:
|
||||
transition_latency * 100
|
||||
Or by kernel restrictions:
|
||||
If CONFIG_NO_HZ_COMMON is set, the limit is 10ms fixed.
|
||||
If CONFIG_NO_HZ_COMMON is not set or nohz=off boot parameter is used, the
|
||||
limits depend on the CONFIG_HZ option:
|
||||
HZ=1000: min=20000us (20ms)
|
||||
HZ=250: min=80000us (80ms)
|
||||
HZ=100: min=200000us (200ms)
|
||||
The highest value of kernel and HW latency restrictions is shown and
|
||||
used as the minimum sampling rate.
|
||||
* sampling_rate:
|
||||
|
||||
up_threshold: defines what the average CPU usage between the samplings
|
||||
of 'sampling_rate' needs to be for the kernel to make a decision on
|
||||
whether it should increase the frequency. For example when it is set
|
||||
to its default value of '95' it means that between the checking
|
||||
intervals the CPU needs to be on average more than 95% in use to then
|
||||
decide that the CPU frequency needs to be increased.
|
||||
Measured in uS (10^-6 seconds), this is how often you want the kernel
|
||||
to look at the CPU usage and to make decisions on what to do about the
|
||||
frequency. Typically this is set to values of around '10000' or more.
|
||||
It's default value is (cmp. with users-guide.txt): transition_latency
|
||||
* 1000. Be aware that transition latency is in ns and sampling_rate
|
||||
is in us, so you get the same sysfs value by default. Sampling rate
|
||||
should always get adjusted considering the transition latency to set
|
||||
the sampling rate 750 times as high as the transition latency in the
|
||||
bash (as said, 1000 is default), do:
|
||||
|
||||
ignore_nice_load: this parameter takes a value of '0' or '1'. When
|
||||
set to '0' (its default), all processes are counted towards the
|
||||
'cpu utilisation' value. When set to '1', the processes that are
|
||||
run with a 'nice' value will not count (and thus be ignored) in the
|
||||
overall usage calculation. This is useful if you are running a CPU
|
||||
intensive calculation on your laptop that you do not care how long it
|
||||
takes to complete as you can 'nice' it and prevent it from taking part
|
||||
in the deciding process of whether to increase your CPU frequency.
|
||||
$ echo `$(($(cat cpuinfo_transition_latency) * 750 / 1000)) > ondemand/sampling_rate
|
||||
|
||||
sampling_down_factor: this parameter controls the rate at which the
|
||||
kernel makes a decision on when to decrease the frequency while running
|
||||
at top speed. When set to 1 (the default) decisions to reevaluate load
|
||||
are made at the same interval regardless of current clock speed. But
|
||||
when set to greater than 1 (e.g. 100) it acts as a multiplier for the
|
||||
scheduling interval for reevaluating load when the CPU is at its top
|
||||
speed due to high load. This improves performance by reducing the overhead
|
||||
of load evaluation and helping the CPU stay at its top speed when truly
|
||||
busy, rather than shifting back and forth in speed. This tunable has no
|
||||
effect on behavior at lower speeds/lower CPU loads.
|
||||
* sampling_rate_min:
|
||||
|
||||
powersave_bias: this parameter takes a value between 0 to 1000. It
|
||||
defines the percentage (times 10) value of the target frequency that
|
||||
will be shaved off of the target. For example, when set to 100 -- 10%,
|
||||
when ondemand governor would have targeted 1000 MHz, it will target
|
||||
1000 MHz - (10% of 1000 MHz) = 900 MHz instead. This is set to 0
|
||||
(disabled) by default.
|
||||
When AMD frequency sensitivity powersave bias driver --
|
||||
drivers/cpufreq/amd_freq_sensitivity.c is loaded, this parameter
|
||||
defines the workload frequency sensitivity threshold in which a lower
|
||||
frequency is chosen instead of ondemand governor's original target.
|
||||
The frequency sensitivity is a hardware reported (on AMD Family 16h
|
||||
Processors and above) value between 0 to 100% that tells software how
|
||||
the performance of the workload running on a CPU will change when
|
||||
frequency changes. A workload with sensitivity of 0% (memory/IO-bound)
|
||||
will not perform any better on higher core frequency, whereas a
|
||||
workload with sensitivity of 100% (CPU-bound) will perform better
|
||||
higher the frequency. When the driver is loaded, this is set to 400
|
||||
by default -- for CPUs running workloads with sensitivity value below
|
||||
40%, a lower frequency is chosen. Unloading the driver or writing 0
|
||||
will disable this feature.
|
||||
The sampling rate is limited by the HW transition latency:
|
||||
transition_latency * 100
|
||||
|
||||
Or by kernel restrictions:
|
||||
- If CONFIG_NO_HZ_COMMON is set, the limit is 10ms fixed.
|
||||
- If CONFIG_NO_HZ_COMMON is not set or nohz=off boot parameter is
|
||||
used, the limits depend on the CONFIG_HZ option:
|
||||
HZ=1000: min=20000us (20ms)
|
||||
HZ=250: min=80000us (80ms)
|
||||
HZ=100: min=200000us (200ms)
|
||||
|
||||
The highest value of kernel and HW latency restrictions is shown and
|
||||
used as the minimum sampling rate.
|
||||
|
||||
* up_threshold:
|
||||
|
||||
This defines what the average CPU usage between the samplings of
|
||||
'sampling_rate' needs to be for the kernel to make a decision on
|
||||
whether it should increase the frequency. For example when it is set
|
||||
to its default value of '95' it means that between the checking
|
||||
intervals the CPU needs to be on average more than 95% in use to then
|
||||
decide that the CPU frequency needs to be increased.
|
||||
|
||||
* ignore_nice_load:
|
||||
|
||||
This parameter takes a value of '0' or '1'. When set to '0' (its
|
||||
default), all processes are counted towards the 'cpu utilisation'
|
||||
value. When set to '1', the processes that are run with a 'nice'
|
||||
value will not count (and thus be ignored) in the overall usage
|
||||
calculation. This is useful if you are running a CPU intensive
|
||||
calculation on your laptop that you do not care how long it takes to
|
||||
complete as you can 'nice' it and prevent it from taking part in the
|
||||
deciding process of whether to increase your CPU frequency.
|
||||
|
||||
* sampling_down_factor:
|
||||
|
||||
This parameter controls the rate at which the kernel makes a decision
|
||||
on when to decrease the frequency while running at top speed. When set
|
||||
to 1 (the default) decisions to reevaluate load are made at the same
|
||||
interval regardless of current clock speed. But when set to greater
|
||||
than 1 (e.g. 100) it acts as a multiplier for the scheduling interval
|
||||
for reevaluating load when the CPU is at its top speed due to high
|
||||
load. This improves performance by reducing the overhead of load
|
||||
evaluation and helping the CPU stay at its top speed when truly busy,
|
||||
rather than shifting back and forth in speed. This tunable has no
|
||||
effect on behavior at lower speeds/lower CPU loads.
|
||||
|
||||
* powersave_bias:
|
||||
|
||||
This parameter takes a value between 0 to 1000. It defines the
|
||||
percentage (times 10) value of the target frequency that will be
|
||||
shaved off of the target. For example, when set to 100 -- 10%, when
|
||||
ondemand governor would have targeted 1000 MHz, it will target
|
||||
1000 MHz - (10% of 1000 MHz) = 900 MHz instead. This is set to 0
|
||||
(disabled) by default.
|
||||
|
||||
When AMD frequency sensitivity powersave bias driver --
|
||||
drivers/cpufreq/amd_freq_sensitivity.c is loaded, this parameter
|
||||
defines the workload frequency sensitivity threshold in which a lower
|
||||
frequency is chosen instead of ondemand governor's original target.
|
||||
The frequency sensitivity is a hardware reported (on AMD Family 16h
|
||||
Processors and above) value between 0 to 100% that tells software how
|
||||
the performance of the workload running on a CPU will change when
|
||||
frequency changes. A workload with sensitivity of 0% (memory/IO-bound)
|
||||
will not perform any better on higher core frequency, whereas a
|
||||
workload with sensitivity of 100% (CPU-bound) will perform better
|
||||
higher the frequency. When the driver is loaded, this is set to 400 by
|
||||
default -- for CPUs running workloads with sensitivity value below
|
||||
40%, a lower frequency is chosen. Unloading the driver or writing 0
|
||||
will disable this feature.
|
||||
|
||||
|
||||
2.5 Conservative
|
||||
----------------
|
||||
|
||||
The CPUfreq governor "conservative", much like the "ondemand"
|
||||
governor, sets the CPU depending on the current usage. It differs in
|
||||
behaviour in that it gracefully increases and decreases the CPU speed
|
||||
rather than jumping to max speed the moment there is any load on the
|
||||
CPU. This behaviour more suitable in a battery powered environment.
|
||||
The governor is tweaked in the same manner as the "ondemand" governor
|
||||
through sysfs with the addition of:
|
||||
governor, sets the CPU frequency depending on the current usage. It
|
||||
differs in behaviour in that it gracefully increases and decreases the
|
||||
CPU speed rather than jumping to max speed the moment there is any load
|
||||
on the CPU. This behaviour is more suitable in a battery powered
|
||||
environment. The governor is tweaked in the same manner as the
|
||||
"ondemand" governor through sysfs with the addition of:
|
||||
|
||||
freq_step: this describes what percentage steps the cpu freq should be
|
||||
increased and decreased smoothly by. By default the cpu frequency will
|
||||
increase in 5% chunks of your maximum cpu frequency. You can change this
|
||||
value to anywhere between 0 and 100 where '0' will effectively lock your
|
||||
CPU at a speed regardless of its load whilst '100' will, in theory, make
|
||||
it behave identically to the "ondemand" governor.
|
||||
* freq_step:
|
||||
|
||||
down_threshold: same as the 'up_threshold' found for the "ondemand"
|
||||
governor but for the opposite direction. For example when set to its
|
||||
default value of '20' it means that if the CPU usage needs to be below
|
||||
20% between samples to have the frequency decreased.
|
||||
This describes what percentage steps the cpu freq should be increased
|
||||
and decreased smoothly by. By default the cpu frequency will increase
|
||||
in 5% chunks of your maximum cpu frequency. You can change this value
|
||||
to anywhere between 0 and 100 where '0' will effectively lock your CPU
|
||||
at a speed regardless of its load whilst '100' will, in theory, make
|
||||
it behave identically to the "ondemand" governor.
|
||||
|
||||
* down_threshold:
|
||||
|
||||
Same as the 'up_threshold' found for the "ondemand" governor but for
|
||||
the opposite direction. For example when set to its default value of
|
||||
'20' it means that if the CPU usage needs to be below 20% between
|
||||
samples to have the frequency decreased.
|
||||
|
||||
* sampling_down_factor:
|
||||
|
||||
Similar functionality as in "ondemand" governor. But in
|
||||
"conservative", it controls the rate at which the kernel makes a
|
||||
decision on when to decrease the frequency while running in any speed.
|
||||
Load for frequency increase is still evaluated every sampling rate.
|
||||
|
||||
|
||||
2.6 Schedutil
|
||||
-------------
|
||||
|
||||
The "schedutil" governor aims at better integration with the Linux
|
||||
kernel scheduler. Load estimation is achieved through the scheduler's
|
||||
Per-Entity Load Tracking (PELT) mechanism, which also provides
|
||||
information about the recent load [1]. This governor currently does
|
||||
load based DVFS only for tasks managed by CFS. RT and DL scheduler tasks
|
||||
are always run at the highest frequency. Unlike all the other
|
||||
governors, the code is located under the kernel/sched/ directory.
|
||||
|
||||
Sysfs files:
|
||||
|
||||
* rate_limit_us:
|
||||
|
||||
This contains a value in microseconds. The governor waits for
|
||||
rate_limit_us time before reevaluating the load again, after it has
|
||||
evaluated the load once.
|
||||
|
||||
For an in-depth comparison with the other governors refer to [2].
|
||||
|
||||
sampling_down_factor: similar functionality as in "ondemand" governor.
|
||||
But in "conservative", it controls the rate at which the kernel makes
|
||||
a decision on when to decrease the frequency while running in any
|
||||
speed. Load for frequency increase is still evaluated every
|
||||
sampling rate.
|
||||
|
||||
3. The Governor Interface in the CPUfreq Core
|
||||
=============================================
|
||||
@ -225,26 +267,10 @@ A new governor must register itself with the CPUfreq core using
|
||||
"cpufreq_register_governor". The struct cpufreq_governor, which has to
|
||||
be passed to that function, must contain the following values:
|
||||
|
||||
governor->name - A unique name for this governor
|
||||
governor->governor - The governor callback function
|
||||
governor->owner - .THIS_MODULE for the governor module (if
|
||||
appropriate)
|
||||
|
||||
The governor->governor callback is called with the current (or to-be-set)
|
||||
cpufreq_policy struct for that CPU, and an unsigned int event. The
|
||||
following events are currently defined:
|
||||
|
||||
CPUFREQ_GOV_START: This governor shall start its duty for the CPU
|
||||
policy->cpu
|
||||
CPUFREQ_GOV_STOP: This governor shall end its duty for the CPU
|
||||
policy->cpu
|
||||
CPUFREQ_GOV_LIMITS: The limits for CPU policy->cpu have changed to
|
||||
policy->min and policy->max.
|
||||
|
||||
If you need other "events" externally of your driver, _only_ use the
|
||||
cpufreq_governor_l(unsigned int cpu, unsigned int event) call to the
|
||||
CPUfreq core to ensure proper locking.
|
||||
governor->name - A unique name for this governor.
|
||||
governor->owner - .THIS_MODULE for the governor module (if appropriate).
|
||||
|
||||
plus a set of hooks to the functions implementing the governor's logic.
|
||||
|
||||
The CPUfreq governor may call the CPU processor driver using one of
|
||||
these two functions:
|
||||
@ -258,12 +284,18 @@ int __cpufreq_driver_target(struct cpufreq_policy *policy,
|
||||
unsigned int relation);
|
||||
|
||||
target_freq must be within policy->min and policy->max, of course.
|
||||
What's the difference between these two functions? When your governor
|
||||
still is in a direct code path of a call to governor->governor, the
|
||||
per-CPU cpufreq lock is still held in the cpufreq core, and there's
|
||||
no need to lock it again (in fact, this would cause a deadlock). So
|
||||
use __cpufreq_driver_target only in these cases. In all other cases
|
||||
(for example, when there's a "daemonized" function that wakes up
|
||||
every second), use cpufreq_driver_target to lock the cpufreq per-CPU
|
||||
lock before the command is passed to the cpufreq processor driver.
|
||||
What's the difference between these two functions? When your governor is
|
||||
in a direct code path of a call to governor callbacks, like
|
||||
governor->start(), the policy->rwsem is still held in the cpufreq core,
|
||||
and there's no need to lock it again (in fact, this would cause a
|
||||
deadlock). So use __cpufreq_driver_target only in these cases. In all
|
||||
other cases (for example, when there's a "daemonized" function that
|
||||
wakes up every second), use cpufreq_driver_target to take policy->rwsem
|
||||
before the command is passed to the cpufreq driver.
|
||||
|
||||
4. References
|
||||
=============
|
||||
|
||||
[1] Per-entity load tracking: https://lwn.net/Articles/531853/
|
||||
[2] Improvements in CPU frequency management: https://lwn.net/Articles/682391/
|
||||
|
||||
|
@ -18,16 +18,29 @@
|
||||
|
||||
Documents in this directory:
|
||||
----------------------------
|
||||
core.txt - General description of the CPUFreq core and
|
||||
of CPUFreq notifiers
|
||||
|
||||
cpu-drivers.txt - How to implement a new cpufreq processor driver
|
||||
amd-powernow.txt - AMD powernow driver specific file.
|
||||
|
||||
boost.txt - Frequency boosting support.
|
||||
|
||||
core.txt - General description of the CPUFreq core and
|
||||
of CPUFreq notifiers.
|
||||
|
||||
cpu-drivers.txt - How to implement a new cpufreq processor driver.
|
||||
|
||||
cpufreq-nforce2.txt - nVidia nForce2 platform specific file.
|
||||
|
||||
cpufreq-stats.txt - General description of sysfs cpufreq stats.
|
||||
|
||||
governors.txt - What are cpufreq governors and how to
|
||||
implement them?
|
||||
|
||||
index.txt - File index, Mailing list and Links (this document)
|
||||
|
||||
intel-pstate.txt - Intel pstate cpufreq driver specific file.
|
||||
|
||||
pcc-cpufreq.txt - PCC cpufreq driver specific file.
|
||||
|
||||
user-guide.txt - User Guide to CPUFreq
|
||||
|
||||
|
||||
@ -35,9 +48,7 @@ Mailing List
|
||||
------------
|
||||
There is a CPU frequency changing CVS commit and general list where
|
||||
you can report bugs, problems or submit patches. To post a message,
|
||||
send an email to linux-pm@vger.kernel.org, to subscribe go to
|
||||
http://vger.kernel.org/vger-lists.html#linux-pm and follow the
|
||||
instructions there.
|
||||
send an email to linux-pm@vger.kernel.org.
|
||||
|
||||
Links
|
||||
-----
|
||||
@ -48,7 +59,7 @@ how to access the CVS repository:
|
||||
* http://cvs.arm.linux.org.uk/
|
||||
|
||||
the CPUFreq Mailing list:
|
||||
* http://vger.kernel.org/vger-lists.html#cpufreq
|
||||
* http://vger.kernel.org/vger-lists.html#linux-pm
|
||||
|
||||
Clock and voltage scaling for the SA-1100:
|
||||
* http://www.lartmaker.nl/projects/scaling
|
||||
|
@ -85,6 +85,21 @@ Sysfs will show :
|
||||
Refer to "Intel® 64 and IA-32 Architectures Software Developer’s Manual
|
||||
Volume 3: System Programming Guide" to understand ratios.
|
||||
|
||||
There is one more sysfs attribute in /sys/devices/system/cpu/intel_pstate/
|
||||
that can be used for controlling the operation mode of the driver:
|
||||
|
||||
status: Three settings are possible:
|
||||
"off" - The driver is not in use at this time.
|
||||
"active" - The driver works as a P-state governor (default).
|
||||
"passive" - The driver works as a regular cpufreq one and collaborates
|
||||
with the generic cpufreq governors (it sets P-states as
|
||||
requested by those governors).
|
||||
The current setting is returned by reads from this attribute. Writing one
|
||||
of the above strings to it changes the operation mode as indicated by that
|
||||
string, if possible. If HW-managed P-states (HWP) are enabled, it is not
|
||||
possible to change the driver's operation mode and attempts to write to
|
||||
this attribute will fail.
|
||||
|
||||
cpufreq sysfs for Intel P-State
|
||||
|
||||
Since this driver registers with cpufreq, cpufreq sysfs is also presented.
|
||||
|
@ -18,7 +18,7 @@
|
||||
Contents:
|
||||
---------
|
||||
1. Supported Architectures and Processors
|
||||
1.1 ARM
|
||||
1.1 ARM and ARM64
|
||||
1.2 x86
|
||||
1.3 sparc64
|
||||
1.4 ppc
|
||||
@ -37,16 +37,10 @@ Contents:
|
||||
1. Supported Architectures and Processors
|
||||
=========================================
|
||||
|
||||
1.1 ARM
|
||||
-------
|
||||
|
||||
The following ARM processors are supported by cpufreq:
|
||||
|
||||
ARM Integrator
|
||||
ARM-SA1100
|
||||
ARM-SA1110
|
||||
Intel PXA
|
||||
1.1 ARM and ARM64
|
||||
-----------------
|
||||
|
||||
Almost all ARM and ARM64 platforms support CPU frequency scaling.
|
||||
|
||||
1.2 x86
|
||||
-------
|
||||
@ -69,6 +63,7 @@ Transmeta Crusoe
|
||||
Transmeta Efficeon
|
||||
VIA Cyrix 3 / C3
|
||||
various processors on some ACPI 2.0-compatible systems [*]
|
||||
And many more
|
||||
|
||||
[*] Only if "ACPI Processor Performance States" are available
|
||||
to the ACPI<->BIOS interface.
|
||||
@ -147,10 +142,19 @@ mounted it at /sys, the cpufreq interface is located in a subdirectory
|
||||
"cpufreq" within the cpu-device directory
|
||||
(e.g. /sys/devices/system/cpu/cpu0/cpufreq/ for the first CPU).
|
||||
|
||||
affected_cpus : List of Online CPUs that require software
|
||||
coordination of frequency.
|
||||
|
||||
cpuinfo_cur_freq : Current frequency of the CPU as obtained from
|
||||
the hardware, in KHz. This is the frequency
|
||||
the CPU actually runs at.
|
||||
|
||||
cpuinfo_min_freq : this file shows the minimum operating
|
||||
frequency the processor can run at(in kHz)
|
||||
|
||||
cpuinfo_max_freq : this file shows the maximum operating
|
||||
frequency the processor can run at(in kHz)
|
||||
|
||||
cpuinfo_transition_latency The time it takes on this CPU to
|
||||
switch between two frequencies in nano
|
||||
seconds. If unknown or known to be
|
||||
@ -163,25 +167,30 @@ cpuinfo_transition_latency The time it takes on this CPU to
|
||||
userspace daemon. Make sure to not
|
||||
switch the frequency too often
|
||||
resulting in performance loss.
|
||||
scaling_driver : this file shows what cpufreq driver is
|
||||
used to set the frequency on this CPU
|
||||
|
||||
related_cpus : List of Online + Offline CPUs that need software
|
||||
coordination of frequency.
|
||||
|
||||
scaling_available_frequencies : List of available frequencies, in KHz.
|
||||
|
||||
scaling_available_governors : this file shows the CPUfreq governors
|
||||
available in this kernel. You can see the
|
||||
currently activated governor in
|
||||
|
||||
scaling_cur_freq : Current frequency of the CPU as determined by
|
||||
the governor and cpufreq core, in KHz. This is
|
||||
the frequency the kernel thinks the CPU runs
|
||||
at.
|
||||
|
||||
scaling_driver : this file shows what cpufreq driver is
|
||||
used to set the frequency on this CPU
|
||||
|
||||
scaling_governor, and by "echoing" the name of another
|
||||
governor you can change it. Please note
|
||||
that some governors won't load - they only
|
||||
work on some specific architectures or
|
||||
processors.
|
||||
|
||||
cpuinfo_cur_freq : Current frequency of the CPU as obtained from
|
||||
the hardware, in KHz. This is the frequency
|
||||
the CPU actually runs at.
|
||||
|
||||
scaling_available_frequencies : List of available frequencies, in KHz.
|
||||
|
||||
scaling_min_freq and
|
||||
scaling_max_freq show the current "policy limits" (in
|
||||
kHz). By echoing new values into these
|
||||
@ -190,16 +199,11 @@ scaling_max_freq show the current "policy limits" (in
|
||||
first set scaling_max_freq, then
|
||||
scaling_min_freq.
|
||||
|
||||
affected_cpus : List of Online CPUs that require software
|
||||
coordination of frequency.
|
||||
|
||||
related_cpus : List of Online + Offline CPUs that need software
|
||||
coordination of frequency.
|
||||
|
||||
scaling_cur_freq : Current frequency of the CPU as determined by
|
||||
the governor and cpufreq core, in KHz. This is
|
||||
the frequency the kernel thinks the CPU runs
|
||||
at.
|
||||
scaling_setspeed This can be read to get the currently programmed
|
||||
value by the governor. This can be written to
|
||||
change the current frequency for a group of
|
||||
CPUs, represented by a policy. This is supported
|
||||
currently only by the userspace governor.
|
||||
|
||||
bios_limit : If the BIOS tells the OS to limit a CPU to
|
||||
lower frequencies, the user can read out the
|
||||
|
@ -15,6 +15,9 @@ Properties:
|
||||
Second cell specifies the irq distribution mode to cores
|
||||
0=Round Robin; 1=cpu0, 2=cpu1, 4=cpu2, 8=cpu3
|
||||
|
||||
The second cell in interrupts property is deprecated and may be ignored by
|
||||
the kernel.
|
||||
|
||||
intc accessed via the special ARC AUX register interface, hence "reg" property
|
||||
is not specified.
|
||||
|
||||
|
@ -7,7 +7,7 @@ have dual GMAC each represented by a child node..
|
||||
* Ethernet controller node
|
||||
|
||||
Required properties:
|
||||
- compatible: Should be "mediatek,mt7623-eth"
|
||||
- compatible: Should be "mediatek,mt2701-eth"
|
||||
- reg: Address and length of the register set for the device
|
||||
- interrupts: Should contain the three frame engines interrupts in numeric
|
||||
order. These are fe_int0, fe_int1 and fe_int2.
|
||||
|
@ -19,8 +19,9 @@ Optional Properties:
|
||||
specifications. If neither of these are specified, the default is to
|
||||
assume clause 22.
|
||||
|
||||
If the phy's identifier is known then the list may contain an entry
|
||||
of the form: "ethernet-phy-idAAAA.BBBB" where
|
||||
If the PHY reports an incorrect ID (or none at all) then the
|
||||
"compatible" list may contain an entry with the correct PHY ID in the
|
||||
form: "ethernet-phy-idAAAA.BBBB" where
|
||||
AAAA - The value of the 16 bit Phy Identifier 1 register as
|
||||
4 hex digits. This is the chip vendor OUI bits 3:18
|
||||
BBBB - The value of the 16 bit Phy Identifier 2 register as
|
||||
|
@ -212,10 +212,11 @@ asynchronous manner and the value may not be very precise. To see a precise
|
||||
snapshot of a moment, you can see /proc/<pid>/smaps file and scan page table.
|
||||
It's slow but very precise.
|
||||
|
||||
Table 1-2: Contents of the status files (as of 4.1)
|
||||
Table 1-2: Contents of the status files (as of 4.8)
|
||||
..............................................................................
|
||||
Field Content
|
||||
Name filename of the executable
|
||||
Umask file mode creation mask
|
||||
State state (R is running, S is sleeping, D is sleeping
|
||||
in an uninterruptible wait, Z is zombie,
|
||||
T is traced or stopped)
|
||||
@ -226,7 +227,6 @@ Table 1-2: Contents of the status files (as of 4.1)
|
||||
TracerPid PID of process tracing this process (0 if not)
|
||||
Uid Real, effective, saved set, and file system UIDs
|
||||
Gid Real, effective, saved set, and file system GIDs
|
||||
Umask file mode creation mask
|
||||
FDSize number of file descriptor slots currently allocated
|
||||
Groups supplementary group list
|
||||
NStgid descendant namespace thread group ID hierarchy
|
||||
@ -236,6 +236,7 @@ Table 1-2: Contents of the status files (as of 4.1)
|
||||
VmPeak peak virtual memory size
|
||||
VmSize total program size
|
||||
VmLck locked memory size
|
||||
VmPin pinned memory size
|
||||
VmHWM peak resident set size ("high water mark")
|
||||
VmRSS size of memory portions. It contains the three
|
||||
following parts (VmRSS = RssAnon + RssFile + RssShmem)
|
||||
|
@ -35,9 +35,7 @@ only one way to cause the system to go into the Suspend-To-RAM state (write
|
||||
The default suspend mode (ie. the one to be used without writing anything into
|
||||
/sys/power/mem_sleep) is either "deep" (if Suspend-To-RAM is supported) or
|
||||
"s2idle", but it can be overridden by the value of the "mem_sleep_default"
|
||||
parameter in the kernel command line. On some ACPI-based systems, depending on
|
||||
the information in the FADT, the default may be "s2idle" even if Suspend-To-RAM
|
||||
is supported.
|
||||
parameter in the kernel command line.
|
||||
|
||||
The properties of all of the sleep states are described below.
|
||||
|
||||
|
23
MAINTAINERS
23
MAINTAINERS
@ -3567,7 +3567,7 @@ F: drivers/infiniband/hw/cxgb3/
|
||||
F: include/uapi/rdma/cxgb3-abi.h
|
||||
|
||||
CXGB4 ETHERNET DRIVER (CXGB4)
|
||||
M: Hariprasad S <hariprasad@chelsio.com>
|
||||
M: Ganesh Goudar <ganeshgr@chelsio.com>
|
||||
L: netdev@vger.kernel.org
|
||||
W: http://www.chelsio.com
|
||||
S: Supported
|
||||
@ -4100,12 +4100,18 @@ F: drivers/gpu/drm/bridge/
|
||||
|
||||
DRM DRIVER FOR BOCHS VIRTUAL GPU
|
||||
M: Gerd Hoffmann <kraxel@redhat.com>
|
||||
S: Odd Fixes
|
||||
L: virtualization@lists.linux-foundation.org
|
||||
T: git git://git.kraxel.org/linux drm-qemu
|
||||
S: Maintained
|
||||
F: drivers/gpu/drm/bochs/
|
||||
|
||||
DRM DRIVER FOR QEMU'S CIRRUS DEVICE
|
||||
M: Dave Airlie <airlied@redhat.com>
|
||||
S: Odd Fixes
|
||||
M: Gerd Hoffmann <kraxel@redhat.com>
|
||||
L: virtualization@lists.linux-foundation.org
|
||||
T: git git://git.kraxel.org/linux drm-qemu
|
||||
S: Obsolete
|
||||
W: https://www.kraxel.org/blog/2014/10/qemu-using-cirrus-considered-harmful/
|
||||
F: drivers/gpu/drm/cirrus/
|
||||
|
||||
RADEON and AMDGPU DRM DRIVERS
|
||||
@ -4147,7 +4153,7 @@ F: Documentation/gpu/i915.rst
|
||||
INTEL GVT-g DRIVERS (Intel GPU Virtualization)
|
||||
M: Zhenyu Wang <zhenyuw@linux.intel.com>
|
||||
M: Zhi Wang <zhi.a.wang@intel.com>
|
||||
L: igvt-g-dev@lists.01.org
|
||||
L: intel-gvt-dev@lists.freedesktop.org
|
||||
L: intel-gfx@lists.freedesktop.org
|
||||
W: https://01.org/igvt-g
|
||||
T: git https://github.com/01org/gvt-linux.git
|
||||
@ -4298,7 +4304,10 @@ F: Documentation/devicetree/bindings/display/renesas,du.txt
|
||||
|
||||
DRM DRIVER FOR QXL VIRTUAL GPU
|
||||
M: Dave Airlie <airlied@redhat.com>
|
||||
S: Odd Fixes
|
||||
M: Gerd Hoffmann <kraxel@redhat.com>
|
||||
L: virtualization@lists.linux-foundation.org
|
||||
T: git git://git.kraxel.org/linux drm-qemu
|
||||
S: Maintained
|
||||
F: drivers/gpu/drm/qxl/
|
||||
F: include/uapi/drm/qxl_drm.h
|
||||
|
||||
@ -13092,6 +13101,7 @@ M: David Airlie <airlied@linux.ie>
|
||||
M: Gerd Hoffmann <kraxel@redhat.com>
|
||||
L: dri-devel@lists.freedesktop.org
|
||||
L: virtualization@lists.linux-foundation.org
|
||||
T: git git://git.kraxel.org/linux drm-qemu
|
||||
S: Maintained
|
||||
F: drivers/gpu/drm/virtio/
|
||||
F: include/uapi/linux/virtio_gpu.h
|
||||
@ -13443,6 +13453,7 @@ F: arch/x86/
|
||||
|
||||
X86 PLATFORM DRIVERS
|
||||
M: Darren Hart <dvhart@infradead.org>
|
||||
M: Andy Shevchenko <andy@infradead.org>
|
||||
L: platform-driver-x86@vger.kernel.org
|
||||
T: git git://git.infradead.org/users/dvhart/linux-platform-drivers-x86.git
|
||||
S: Maintained
|
||||
@ -13614,6 +13625,7 @@ F: drivers/net/hamradio/z8530.h
|
||||
|
||||
ZBUD COMPRESSED PAGE ALLOCATOR
|
||||
M: Seth Jennings <sjenning@redhat.com>
|
||||
M: Dan Streetman <ddstreet@ieee.org>
|
||||
L: linux-mm@kvack.org
|
||||
S: Maintained
|
||||
F: mm/zbud.c
|
||||
@ -13669,6 +13681,7 @@ F: Documentation/vm/zsmalloc.txt
|
||||
|
||||
ZSWAP COMPRESSED SWAP CACHING
|
||||
M: Seth Jennings <sjenning@redhat.com>
|
||||
M: Dan Streetman <ddstreet@ieee.org>
|
||||
L: linux-mm@kvack.org
|
||||
S: Maintained
|
||||
F: mm/zswap.c
|
||||
|
4
Makefile
4
Makefile
@ -1,8 +1,8 @@
|
||||
VERSION = 4
|
||||
PATCHLEVEL = 10
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc5
|
||||
NAME = Anniversary Edition
|
||||
EXTRAVERSION = -rc6
|
||||
NAME = Fearless Coyote
|
||||
|
||||
# *DOCUMENTATION*
|
||||
# To see a list of typical targets execute "make help"
|
||||
|
@ -26,7 +26,9 @@ static inline void __delay(unsigned long loops)
|
||||
" lp 1f \n"
|
||||
" nop \n"
|
||||
"1: \n"
|
||||
: : "r"(loops));
|
||||
:
|
||||
: "r"(loops)
|
||||
: "lp_count");
|
||||
}
|
||||
|
||||
extern void __bad_udelay(void);
|
||||
|
@ -71,14 +71,14 @@ ENTRY(stext)
|
||||
GET_CPU_ID r5
|
||||
cmp r5, 0
|
||||
mov.nz r0, r5
|
||||
#ifdef CONFIG_ARC_SMP_HALT_ON_RESET
|
||||
; Non-Master can proceed as system would be booted sufficiently
|
||||
jnz first_lines_of_secondary
|
||||
#else
|
||||
bz .Lmaster_proceed
|
||||
|
||||
; Non-Masters wait for Master to boot enough and bring them up
|
||||
jnz arc_platform_smp_wait_to_boot
|
||||
#endif
|
||||
; Master falls thru
|
||||
; when they resume, tail-call to entry point
|
||||
mov blink, @first_lines_of_secondary
|
||||
j arc_platform_smp_wait_to_boot
|
||||
|
||||
.Lmaster_proceed:
|
||||
#endif
|
||||
|
||||
; Clear BSS before updating any globals
|
||||
|
@ -93,11 +93,10 @@ static void mcip_probe_n_setup(void)
|
||||
READ_BCR(ARC_REG_MCIP_BCR, mp);
|
||||
|
||||
sprintf(smp_cpuinfo_buf,
|
||||
"Extn [SMP]\t: ARConnect (v%d): %d cores with %s%s%s%s%s\n",
|
||||
"Extn [SMP]\t: ARConnect (v%d): %d cores with %s%s%s%s\n",
|
||||
mp.ver, mp.num_cores,
|
||||
IS_AVAIL1(mp.ipi, "IPI "),
|
||||
IS_AVAIL1(mp.idu, "IDU "),
|
||||
IS_AVAIL1(mp.llm, "LLM "),
|
||||
IS_AVAIL1(mp.dbg, "DEBUG "),
|
||||
IS_AVAIL1(mp.gfrc, "GFRC"));
|
||||
|
||||
@ -175,7 +174,6 @@ static void idu_irq_unmask(struct irq_data *data)
|
||||
raw_spin_unlock_irqrestore(&mcip_lock, flags);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
static int
|
||||
idu_irq_set_affinity(struct irq_data *data, const struct cpumask *cpumask,
|
||||
bool force)
|
||||
@ -205,12 +203,27 @@ idu_irq_set_affinity(struct irq_data *data, const struct cpumask *cpumask,
|
||||
|
||||
return IRQ_SET_MASK_OK;
|
||||
}
|
||||
#endif
|
||||
|
||||
static void idu_irq_enable(struct irq_data *data)
|
||||
{
|
||||
/*
|
||||
* By default send all common interrupts to all available online CPUs.
|
||||
* The affinity of common interrupts in IDU must be set manually since
|
||||
* in some cases the kernel will not call irq_set_affinity() by itself:
|
||||
* 1. When the kernel is not configured with support of SMP.
|
||||
* 2. When the kernel is configured with support of SMP but upper
|
||||
* interrupt controllers does not support setting of the affinity
|
||||
* and cannot propagate it to IDU.
|
||||
*/
|
||||
idu_irq_set_affinity(data, cpu_online_mask, false);
|
||||
idu_irq_unmask(data);
|
||||
}
|
||||
|
||||
static struct irq_chip idu_irq_chip = {
|
||||
.name = "MCIP IDU Intc",
|
||||
.irq_mask = idu_irq_mask,
|
||||
.irq_unmask = idu_irq_unmask,
|
||||
.irq_enable = idu_irq_enable,
|
||||
#ifdef CONFIG_SMP
|
||||
.irq_set_affinity = idu_irq_set_affinity,
|
||||
#endif
|
||||
@ -243,36 +256,14 @@ static int idu_irq_xlate(struct irq_domain *d, struct device_node *n,
|
||||
const u32 *intspec, unsigned int intsize,
|
||||
irq_hw_number_t *out_hwirq, unsigned int *out_type)
|
||||
{
|
||||
irq_hw_number_t hwirq = *out_hwirq = intspec[0];
|
||||
int distri = intspec[1];
|
||||
unsigned long flags;
|
||||
|
||||
/*
|
||||
* Ignore value of interrupt distribution mode for common interrupts in
|
||||
* IDU which resides in intspec[1] since setting an affinity using value
|
||||
* from Device Tree is deprecated in ARC.
|
||||
*/
|
||||
*out_hwirq = intspec[0];
|
||||
*out_type = IRQ_TYPE_NONE;
|
||||
|
||||
/* XXX: validate distribution scheme again online cpu mask */
|
||||
if (distri == 0) {
|
||||
/* 0 - Round Robin to all cpus, otherwise 1 bit per core */
|
||||
raw_spin_lock_irqsave(&mcip_lock, flags);
|
||||
idu_set_dest(hwirq, BIT(num_online_cpus()) - 1);
|
||||
idu_set_mode(hwirq, IDU_M_TRIG_LEVEL, IDU_M_DISTRI_RR);
|
||||
raw_spin_unlock_irqrestore(&mcip_lock, flags);
|
||||
} else {
|
||||
/*
|
||||
* DEST based distribution for Level Triggered intr can only
|
||||
* have 1 CPU, so generalize it to always contain 1 cpu
|
||||
*/
|
||||
int cpu = ffs(distri);
|
||||
|
||||
if (cpu != fls(distri))
|
||||
pr_warn("IDU irq %lx distri mode set to cpu %x\n",
|
||||
hwirq, cpu);
|
||||
|
||||
raw_spin_lock_irqsave(&mcip_lock, flags);
|
||||
idu_set_dest(hwirq, cpu);
|
||||
idu_set_mode(hwirq, IDU_M_TRIG_LEVEL, IDU_M_DISTRI_DEST);
|
||||
raw_spin_unlock_irqrestore(&mcip_lock, flags);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -90,22 +90,37 @@ void __init smp_cpus_done(unsigned int max_cpus)
|
||||
*/
|
||||
static volatile int wake_flag;
|
||||
|
||||
#ifdef CONFIG_ISA_ARCOMPACT
|
||||
|
||||
#define __boot_read(f) f
|
||||
#define __boot_write(f, v) f = v
|
||||
|
||||
#else
|
||||
|
||||
#define __boot_read(f) arc_read_uncached_32(&f)
|
||||
#define __boot_write(f, v) arc_write_uncached_32(&f, v)
|
||||
|
||||
#endif
|
||||
|
||||
static void arc_default_smp_cpu_kick(int cpu, unsigned long pc)
|
||||
{
|
||||
BUG_ON(cpu == 0);
|
||||
wake_flag = cpu;
|
||||
|
||||
__boot_write(wake_flag, cpu);
|
||||
}
|
||||
|
||||
void arc_platform_smp_wait_to_boot(int cpu)
|
||||
{
|
||||
while (wake_flag != cpu)
|
||||
/* for halt-on-reset, we've waited already */
|
||||
if (IS_ENABLED(CONFIG_ARC_SMP_HALT_ON_RESET))
|
||||
return;
|
||||
|
||||
while (__boot_read(wake_flag) != cpu)
|
||||
;
|
||||
|
||||
wake_flag = 0;
|
||||
__asm__ __volatile__("j @first_lines_of_secondary \n");
|
||||
__boot_write(wake_flag, 0);
|
||||
}
|
||||
|
||||
|
||||
const char *arc_platform_smp_cpuinfo(void)
|
||||
{
|
||||
return plat_smp_ops.info ? : "";
|
||||
|
@ -241,8 +241,9 @@ int misaligned_fixup(unsigned long address, struct pt_regs *regs,
|
||||
if (state.fault)
|
||||
goto fault;
|
||||
|
||||
/* clear any remanants of delay slot */
|
||||
if (delay_mode(regs)) {
|
||||
regs->ret = regs->bta;
|
||||
regs->ret = regs->bta ~1U;
|
||||
regs->status32 &= ~STATUS_DE_MASK;
|
||||
} else {
|
||||
regs->ret += state.instr_len;
|
||||
|
@ -24,7 +24,7 @@ CONFIG_ARM_APPENDED_DTB=y
|
||||
CONFIG_ARM_ATAG_DTB_COMPAT=y
|
||||
CONFIG_CMDLINE="root=/dev/ram0 rw ramdisk=8192 initrd=0x41000000,8M console=ttySAC1,115200 init=/linuxrc mem=256M"
|
||||
CONFIG_CPU_FREQ=y
|
||||
CONFIG_CPU_FREQ_STAT_DETAILS=y
|
||||
CONFIG_CPU_FREQ_STAT=y
|
||||
CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
|
||||
CONFIG_CPU_FREQ_GOV_POWERSAVE=m
|
||||
CONFIG_CPU_FREQ_GOV_USERSPACE=m
|
||||
|
@ -58,7 +58,7 @@ CONFIG_ZBOOT_ROM_BSS=0x0
|
||||
CONFIG_ARM_APPENDED_DTB=y
|
||||
CONFIG_ARM_ATAG_DTB_COMPAT=y
|
||||
CONFIG_CPU_FREQ=y
|
||||
CONFIG_CPU_FREQ_STAT_DETAILS=y
|
||||
CONFIG_CPU_FREQ_STAT=y
|
||||
CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
|
||||
CONFIG_CPU_IDLE=y
|
||||
CONFIG_ARM_KIRKWOOD_CPUIDLE=y
|
||||
|
@ -132,7 +132,7 @@ CONFIG_ARM_ATAG_DTB_COMPAT=y
|
||||
CONFIG_KEXEC=y
|
||||
CONFIG_EFI=y
|
||||
CONFIG_CPU_FREQ=y
|
||||
CONFIG_CPU_FREQ_STAT_DETAILS=y
|
||||
CONFIG_CPU_FREQ_STAT=y
|
||||
CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
|
||||
CONFIG_CPU_FREQ_GOV_POWERSAVE=m
|
||||
CONFIG_CPU_FREQ_GOV_USERSPACE=m
|
||||
|
@ -44,7 +44,7 @@ CONFIG_ZBOOT_ROM_BSS=0x0
|
||||
CONFIG_ARM_APPENDED_DTB=y
|
||||
CONFIG_ARM_ATAG_DTB_COMPAT=y
|
||||
CONFIG_CPU_FREQ=y
|
||||
CONFIG_CPU_FREQ_STAT_DETAILS=y
|
||||
CONFIG_CPU_FREQ_STAT=y
|
||||
CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
|
||||
CONFIG_CPU_IDLE=y
|
||||
CONFIG_ARM_KIRKWOOD_CPUIDLE=y
|
||||
|
@ -97,7 +97,7 @@ CONFIG_ZBOOT_ROM_BSS=0x0
|
||||
CONFIG_CMDLINE="root=/dev/ram0 ro"
|
||||
CONFIG_KEXEC=y
|
||||
CONFIG_CPU_FREQ=y
|
||||
CONFIG_CPU_FREQ_STAT_DETAILS=y
|
||||
CONFIG_CPU_FREQ_STAT=y
|
||||
CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
|
||||
CONFIG_CPU_FREQ_GOV_POWERSAVE=m
|
||||
CONFIG_CPU_FREQ_GOV_USERSPACE=m
|
||||
|
@ -38,7 +38,7 @@ CONFIG_ZBOOT_ROM_BSS=0x0
|
||||
CONFIG_ARM_APPENDED_DTB=y
|
||||
CONFIG_KEXEC=y
|
||||
CONFIG_CPU_FREQ=y
|
||||
CONFIG_CPU_FREQ_STAT_DETAILS=y
|
||||
CONFIG_CPU_FREQ_STAT=y
|
||||
CONFIG_CPU_FREQ_GOV_POWERSAVE=y
|
||||
CONFIG_CPU_FREQ_GOV_USERSPACE=y
|
||||
CONFIG_CPU_FREQ_GOV_ONDEMAND=y
|
||||
|
@ -11,6 +11,7 @@
|
||||
* for more details.
|
||||
*/
|
||||
|
||||
#include <linux/acpi.h>
|
||||
#include <linux/cpu.h>
|
||||
#include <linux/cpumask.h>
|
||||
#include <linux/init.h>
|
||||
@ -209,7 +210,12 @@ static struct notifier_block init_cpu_capacity_notifier = {
|
||||
|
||||
static int __init register_cpufreq_notifier(void)
|
||||
{
|
||||
if (cap_parsing_failed)
|
||||
/*
|
||||
* on ACPI-based systems we need to use the default cpu capacity
|
||||
* until we have the necessary code to parse the cpu capacity, so
|
||||
* skip registering cpufreq notifier.
|
||||
*/
|
||||
if (!acpi_disabled || cap_parsing_failed)
|
||||
return -EINVAL;
|
||||
|
||||
if (!alloc_cpumask_var(&cpus_to_visit, GFP_KERNEL)) {
|
||||
|
@ -139,7 +139,7 @@ static inline void atomic64_dec(atomic64_t *v)
|
||||
#define atomic64_sub_and_test(i,v) (atomic64_sub_return((i), (v)) == 0)
|
||||
#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
|
||||
#define atomic64_inc_and_test(v) (atomic64_inc_return((v)) == 0)
|
||||
|
||||
#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
|
||||
|
||||
#define atomic_cmpxchg(v, old, new) (cmpxchg(&(v)->counter, old, new))
|
||||
#define atomic_xchg(v, new) (xchg(&(v)->counter, new))
|
||||
@ -161,6 +161,39 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
|
||||
return c;
|
||||
}
|
||||
|
||||
static inline int atomic64_add_unless(atomic64_t *v, long long i, long long u)
|
||||
{
|
||||
long long c, old;
|
||||
|
||||
c = atomic64_read(v);
|
||||
for (;;) {
|
||||
if (unlikely(c == u))
|
||||
break;
|
||||
old = atomic64_cmpxchg(v, c, c + i);
|
||||
if (likely(old == c))
|
||||
break;
|
||||
c = old;
|
||||
}
|
||||
return c != u;
|
||||
}
|
||||
|
||||
static inline long long atomic64_dec_if_positive(atomic64_t *v)
|
||||
{
|
||||
long long c, old, dec;
|
||||
|
||||
c = atomic64_read(v);
|
||||
for (;;) {
|
||||
dec = c - 1;
|
||||
if (unlikely(dec < 0))
|
||||
break;
|
||||
old = atomic64_cmpxchg((v), c, dec);
|
||||
if (likely(old == c))
|
||||
break;
|
||||
c = old;
|
||||
}
|
||||
return dec;
|
||||
}
|
||||
|
||||
#define ATOMIC_OP(op) \
|
||||
static inline int atomic_fetch_##op(int i, atomic_t *v) \
|
||||
{ \
|
||||
|
@ -40,7 +40,6 @@ CONFIG_PM_STD_PARTITION="/dev/hda3"
|
||||
CONFIG_CPU_FREQ=y
|
||||
CONFIG_CPU_FREQ_DEBUG=y
|
||||
CONFIG_CPU_FREQ_STAT=m
|
||||
CONFIG_CPU_FREQ_STAT_DETAILS=y
|
||||
CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
|
||||
CONFIG_CPU_FREQ_GOV_POWERSAVE=m
|
||||
CONFIG_CPU_FREQ_GOV_USERSPACE=m
|
||||
|
@ -16,7 +16,7 @@
|
||||
struct task_struct;
|
||||
struct thread_struct;
|
||||
|
||||
#if !defined(CONFIG_LAZY_SAVE_FPU)
|
||||
#if defined(CONFIG_FPU) && !defined(CONFIG_LAZY_SAVE_FPU)
|
||||
struct fpu_state_struct;
|
||||
extern asmlinkage void fpu_save(struct fpu_state_struct *);
|
||||
#define switch_fpu(prev, next) \
|
||||
|
@ -6,7 +6,7 @@
|
||||
#endif
|
||||
|
||||
#include <linux/compiler.h>
|
||||
#include <asm/types.h> /* for BITS_PER_LONG/SHIFT_PER_LONG */
|
||||
#include <asm/types.h>
|
||||
#include <asm/byteorder.h>
|
||||
#include <asm/barrier.h>
|
||||
#include <linux/atomic.h>
|
||||
@ -17,6 +17,12 @@
|
||||
* to include/asm-i386/bitops.h or kerneldoc
|
||||
*/
|
||||
|
||||
#if __BITS_PER_LONG == 64
|
||||
#define SHIFT_PER_LONG 6
|
||||
#else
|
||||
#define SHIFT_PER_LONG 5
|
||||
#endif
|
||||
|
||||
#define CHOP_SHIFTCOUNT(x) (((unsigned long) (x)) & (BITS_PER_LONG - 1))
|
||||
|
||||
|
||||
|
@ -3,10 +3,8 @@
|
||||
|
||||
#if defined(__LP64__)
|
||||
#define __BITS_PER_LONG 64
|
||||
#define SHIFT_PER_LONG 6
|
||||
#else
|
||||
#define __BITS_PER_LONG 32
|
||||
#define SHIFT_PER_LONG 5
|
||||
#endif
|
||||
|
||||
#include <asm-generic/bitsperlong.h>
|
||||
|
@ -1,6 +1,7 @@
|
||||
#ifndef _PARISC_SWAB_H
|
||||
#define _PARISC_SWAB_H
|
||||
|
||||
#include <asm/bitsperlong.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/compiler.h>
|
||||
|
||||
@ -38,7 +39,7 @@ static inline __attribute_const__ __u32 __arch_swab32(__u32 x)
|
||||
}
|
||||
#define __arch_swab32 __arch_swab32
|
||||
|
||||
#if BITS_PER_LONG > 32
|
||||
#if __BITS_PER_LONG > 32
|
||||
/*
|
||||
** From "PA-RISC 2.0 Architecture", HP Professional Books.
|
||||
** See Appendix I page 8 , "Endian Byte Swapping".
|
||||
@ -61,6 +62,6 @@ static inline __attribute_const__ __u64 __arch_swab64(__u64 x)
|
||||
return x;
|
||||
}
|
||||
#define __arch_swab64 __arch_swab64
|
||||
#endif /* BITS_PER_LONG > 32 */
|
||||
#endif /* __BITS_PER_LONG > 32 */
|
||||
|
||||
#endif /* _PARISC_SWAB_H */
|
||||
|
@ -62,7 +62,6 @@ CONFIG_MPC8610_HPCD=y
|
||||
CONFIG_GEF_SBC610=y
|
||||
CONFIG_CPU_FREQ=y
|
||||
CONFIG_CPU_FREQ_STAT=m
|
||||
CONFIG_CPU_FREQ_STAT_DETAILS=y
|
||||
CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE=y
|
||||
CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
|
||||
CONFIG_CPU_FREQ_GOV_POWERSAVE=m
|
||||
|
@ -963,6 +963,11 @@ static int s390_fpregs_set(struct task_struct *target,
|
||||
if (target == current)
|
||||
save_fpu_regs();
|
||||
|
||||
if (MACHINE_HAS_VX)
|
||||
convert_vx_to_fp(fprs, target->thread.fpu.vxrs);
|
||||
else
|
||||
memcpy(&fprs, target->thread.fpu.fprs, sizeof(fprs));
|
||||
|
||||
/* If setting FPC, must validate it first. */
|
||||
if (count > 0 && pos < offsetof(s390_fp_regs, fprs)) {
|
||||
u32 ufpc[2] = { target->thread.fpu.fpc, 0 };
|
||||
@ -1067,6 +1072,9 @@ static int s390_vxrs_low_set(struct task_struct *target,
|
||||
if (target == current)
|
||||
save_fpu_regs();
|
||||
|
||||
for (i = 0; i < __NUM_VXRS_LOW; i++)
|
||||
vxrs[i] = *((__u64 *)(target->thread.fpu.vxrs + i) + 1);
|
||||
|
||||
rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1);
|
||||
if (rc == 0)
|
||||
for (i = 0; i < __NUM_VXRS_LOW; i++)
|
||||
|
@ -202,7 +202,7 @@ static inline pgste_t ptep_xchg_start(struct mm_struct *mm,
|
||||
return pgste;
|
||||
}
|
||||
|
||||
static inline void ptep_xchg_commit(struct mm_struct *mm,
|
||||
static inline pte_t ptep_xchg_commit(struct mm_struct *mm,
|
||||
unsigned long addr, pte_t *ptep,
|
||||
pgste_t pgste, pte_t old, pte_t new)
|
||||
{
|
||||
@ -220,6 +220,7 @@ static inline void ptep_xchg_commit(struct mm_struct *mm,
|
||||
} else {
|
||||
*ptep = new;
|
||||
}
|
||||
return old;
|
||||
}
|
||||
|
||||
pte_t ptep_xchg_direct(struct mm_struct *mm, unsigned long addr,
|
||||
@ -231,7 +232,7 @@ pte_t ptep_xchg_direct(struct mm_struct *mm, unsigned long addr,
|
||||
preempt_disable();
|
||||
pgste = ptep_xchg_start(mm, addr, ptep);
|
||||
old = ptep_flush_direct(mm, addr, ptep);
|
||||
ptep_xchg_commit(mm, addr, ptep, pgste, old, new);
|
||||
old = ptep_xchg_commit(mm, addr, ptep, pgste, old, new);
|
||||
preempt_enable();
|
||||
return old;
|
||||
}
|
||||
@ -246,7 +247,7 @@ pte_t ptep_xchg_lazy(struct mm_struct *mm, unsigned long addr,
|
||||
preempt_disable();
|
||||
pgste = ptep_xchg_start(mm, addr, ptep);
|
||||
old = ptep_flush_lazy(mm, addr, ptep);
|
||||
ptep_xchg_commit(mm, addr, ptep, pgste, old, new);
|
||||
old = ptep_xchg_commit(mm, addr, ptep, pgste, old, new);
|
||||
preempt_enable();
|
||||
return old;
|
||||
}
|
||||
|
@ -25,7 +25,7 @@ CONFIG_SH_SH7785LCR=y
|
||||
CONFIG_NO_HZ=y
|
||||
CONFIG_HIGH_RES_TIMERS=y
|
||||
CONFIG_CPU_FREQ=y
|
||||
CONFIG_CPU_FREQ_STAT_DETAILS=y
|
||||
CONFIG_CPU_FREQ_STAT=y
|
||||
CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
|
||||
CONFIG_SH_CPU_FREQ=y
|
||||
CONFIG_HEARTBEAT=y
|
||||
|
@ -111,7 +111,7 @@ static int tile_gpr_set(struct task_struct *target,
|
||||
const void *kbuf, const void __user *ubuf)
|
||||
{
|
||||
int ret;
|
||||
struct pt_regs regs;
|
||||
struct pt_regs regs = *task_pt_regs(target);
|
||||
|
||||
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, ®s, 0,
|
||||
sizeof(regs));
|
||||
|
@ -852,23 +852,18 @@ acpi_tb_install_and_load_table(acpi_physical_address address,
|
||||
|
||||
ACPI_FUNCTION_TRACE(tb_install_and_load_table);
|
||||
|
||||
(void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
|
||||
|
||||
/* Install the table and load it into the namespace */
|
||||
|
||||
status = acpi_tb_install_standard_table(address, flags, TRUE,
|
||||
override, &i);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
goto unlock_and_exit;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
(void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
|
||||
status = acpi_tb_load_table(i, acpi_gbl_root_node);
|
||||
(void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
|
||||
|
||||
unlock_and_exit:
|
||||
exit:
|
||||
*table_index = i;
|
||||
(void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
|
||||
return_ACPI_STATUS(status);
|
||||
}
|
||||
|
||||
|
@ -217,6 +217,10 @@ acpi_tb_install_standard_table(acpi_physical_address address,
|
||||
goto release_and_exit;
|
||||
}
|
||||
|
||||
/* Acquire the table lock */
|
||||
|
||||
(void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
|
||||
|
||||
if (reload) {
|
||||
/*
|
||||
* Validate the incoming table signature.
|
||||
@ -244,7 +248,7 @@ acpi_tb_install_standard_table(acpi_physical_address address,
|
||||
new_table_desc.signature.integer));
|
||||
|
||||
status = AE_BAD_SIGNATURE;
|
||||
goto release_and_exit;
|
||||
goto unlock_and_exit;
|
||||
}
|
||||
|
||||
/* Check if table is already registered */
|
||||
@ -279,7 +283,7 @@ acpi_tb_install_standard_table(acpi_physical_address address,
|
||||
/* Table is still loaded, this is an error */
|
||||
|
||||
status = AE_ALREADY_EXISTS;
|
||||
goto release_and_exit;
|
||||
goto unlock_and_exit;
|
||||
} else {
|
||||
/*
|
||||
* Table was unloaded, allow it to be reloaded.
|
||||
@ -290,6 +294,7 @@ acpi_tb_install_standard_table(acpi_physical_address address,
|
||||
* indicate the re-installation.
|
||||
*/
|
||||
acpi_tb_uninstall_table(&new_table_desc);
|
||||
(void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
|
||||
*table_index = i;
|
||||
return_ACPI_STATUS(AE_OK);
|
||||
}
|
||||
@ -303,11 +308,19 @@ acpi_tb_install_standard_table(acpi_physical_address address,
|
||||
|
||||
/* Invoke table handler if present */
|
||||
|
||||
(void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
|
||||
if (acpi_gbl_table_handler) {
|
||||
(void)acpi_gbl_table_handler(ACPI_TABLE_EVENT_INSTALL,
|
||||
new_table_desc.pointer,
|
||||
acpi_gbl_table_handler_context);
|
||||
}
|
||||
(void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
|
||||
|
||||
unlock_and_exit:
|
||||
|
||||
/* Release the table lock */
|
||||
|
||||
(void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
|
||||
|
||||
release_and_exit:
|
||||
|
||||
|
@ -75,10 +75,8 @@ static int acpi_processor_ppc_notifier(struct notifier_block *nb,
|
||||
struct acpi_processor *pr;
|
||||
unsigned int ppc = 0;
|
||||
|
||||
if (event == CPUFREQ_START && ignore_ppc <= 0) {
|
||||
if (ignore_ppc < 0)
|
||||
ignore_ppc = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (ignore_ppc)
|
||||
return 0;
|
||||
|
@ -674,14 +674,6 @@ static void acpi_sleep_suspend_setup(void)
|
||||
if (acpi_sleep_state_supported(i))
|
||||
sleep_states[i] = 1;
|
||||
|
||||
/*
|
||||
* Use suspend-to-idle by default if ACPI_FADT_LOW_POWER_S0 is set and
|
||||
* the default suspend mode was not selected from the command line.
|
||||
*/
|
||||
if (acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0 &&
|
||||
mem_sleep_default > PM_SUSPEND_MEM)
|
||||
mem_sleep_default = PM_SUSPEND_FREEZE;
|
||||
|
||||
suspend_set_ops(old_suspend_ordering ?
|
||||
&acpi_suspend_ops_old : &acpi_suspend_ops);
|
||||
freeze_set_ops(&acpi_freeze_ops);
|
||||
|
@ -305,17 +305,6 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "Dell System XPS L702X"),
|
||||
},
|
||||
},
|
||||
{
|
||||
/* https://bugzilla.redhat.com/show_bug.cgi?id=1204476 */
|
||||
/* https://bugs.launchpad.net/ubuntu/+source/linux-lts-trusty/+bug/1416940 */
|
||||
.callback = video_detect_force_native,
|
||||
.ident = "HP Pavilion dv6",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv6 Notebook PC"),
|
||||
},
|
||||
},
|
||||
|
||||
{ },
|
||||
};
|
||||
|
||||
|
@ -408,14 +408,14 @@ static ssize_t show_valid_zones(struct device *dev,
|
||||
sprintf(buf, "%s", zone->name);
|
||||
|
||||
/* MMOP_ONLINE_KERNEL */
|
||||
zone_shift = zone_can_shift(start_pfn, nr_pages, ZONE_NORMAL);
|
||||
zone_can_shift(start_pfn, nr_pages, ZONE_NORMAL, &zone_shift);
|
||||
if (zone_shift) {
|
||||
strcat(buf, " ");
|
||||
strcat(buf, (zone + zone_shift)->name);
|
||||
}
|
||||
|
||||
/* MMOP_ONLINE_MOVABLE */
|
||||
zone_shift = zone_can_shift(start_pfn, nr_pages, ZONE_MOVABLE);
|
||||
zone_can_shift(start_pfn, nr_pages, ZONE_MOVABLE, &zone_shift);
|
||||
if (zone_shift) {
|
||||
strcat(buf, " ");
|
||||
strcat(buf, (zone + zone_shift)->name);
|
||||
|
@ -197,13 +197,13 @@ struct blkfront_info
|
||||
/* Number of pages per ring buffer. */
|
||||
unsigned int nr_ring_pages;
|
||||
struct request_queue *rq;
|
||||
unsigned int feature_flush;
|
||||
unsigned int feature_fua;
|
||||
unsigned int feature_flush:1;
|
||||
unsigned int feature_fua:1;
|
||||
unsigned int feature_discard:1;
|
||||
unsigned int feature_secdiscard:1;
|
||||
unsigned int feature_persistent:1;
|
||||
unsigned int discard_granularity;
|
||||
unsigned int discard_alignment;
|
||||
unsigned int feature_persistent:1;
|
||||
/* Number of 4KB segments handled */
|
||||
unsigned int max_indirect_segments;
|
||||
int is_ready;
|
||||
@ -2223,7 +2223,7 @@ static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo)
|
||||
}
|
||||
else
|
||||
grants = info->max_indirect_segments;
|
||||
psegs = grants / GRANTS_PER_PSEG;
|
||||
psegs = DIV_ROUND_UP(grants, GRANTS_PER_PSEG);
|
||||
|
||||
err = fill_grant_buffer(rinfo,
|
||||
(grants + INDIRECT_GREFS(grants)) * BLK_RING_SIZE(info));
|
||||
@ -2323,13 +2323,16 @@ static void blkfront_gather_backend_features(struct blkfront_info *info)
|
||||
blkfront_setup_discard(info);
|
||||
|
||||
info->feature_persistent =
|
||||
xenbus_read_unsigned(info->xbdev->otherend,
|
||||
"feature-persistent", 0);
|
||||
!!xenbus_read_unsigned(info->xbdev->otherend,
|
||||
"feature-persistent", 0);
|
||||
|
||||
indirect_segments = xenbus_read_unsigned(info->xbdev->otherend,
|
||||
"feature-max-indirect-segments", 0);
|
||||
info->max_indirect_segments = min(indirect_segments,
|
||||
xen_blkif_max_segments);
|
||||
if (indirect_segments > xen_blkif_max_segments)
|
||||
indirect_segments = xen_blkif_max_segments;
|
||||
if (indirect_segments <= BLKIF_MAX_SEGMENTS_PER_REQUEST)
|
||||
indirect_segments = 0;
|
||||
info->max_indirect_segments = indirect_segments;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -2652,6 +2655,9 @@ static int __init xlblk_init(void)
|
||||
if (!xen_domain())
|
||||
return -ENODEV;
|
||||
|
||||
if (xen_blkif_max_segments < BLKIF_MAX_SEGMENTS_PER_REQUEST)
|
||||
xen_blkif_max_segments = BLKIF_MAX_SEGMENTS_PER_REQUEST;
|
||||
|
||||
if (xen_blkif_max_ring_order > XENBUS_MAX_RING_GRANT_ORDER) {
|
||||
pr_info("Invalid max_ring_order (%d), will use default max: %d.\n",
|
||||
xen_blkif_max_ring_order, XENBUS_MAX_RING_GRANT_ORDER);
|
||||
|
@ -37,14 +37,6 @@ config CPU_FREQ_STAT
|
||||
|
||||
If in doubt, say N.
|
||||
|
||||
config CPU_FREQ_STAT_DETAILS
|
||||
bool "CPU frequency transition statistics details"
|
||||
depends on CPU_FREQ_STAT
|
||||
help
|
||||
Show detailed CPU frequency transition table in sysfs.
|
||||
|
||||
If in doubt, say N.
|
||||
|
||||
choice
|
||||
prompt "Default CPUFreq governor"
|
||||
default CPU_FREQ_DEFAULT_GOV_USERSPACE if ARM_SA1100_CPUFREQ || ARM_SA1110_CPUFREQ
|
||||
|
@ -1078,15 +1078,11 @@ err_free_policy:
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy, bool notify)
|
||||
static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy)
|
||||
{
|
||||
struct kobject *kobj;
|
||||
struct completion *cmp;
|
||||
|
||||
if (notify)
|
||||
blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
|
||||
CPUFREQ_REMOVE_POLICY, policy);
|
||||
|
||||
down_write(&policy->rwsem);
|
||||
cpufreq_stats_free_table(policy);
|
||||
kobj = &policy->kobj;
|
||||
@ -1104,7 +1100,7 @@ static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy, bool notify)
|
||||
pr_debug("wait complete\n");
|
||||
}
|
||||
|
||||
static void cpufreq_policy_free(struct cpufreq_policy *policy, bool notify)
|
||||
static void cpufreq_policy_free(struct cpufreq_policy *policy)
|
||||
{
|
||||
unsigned long flags;
|
||||
int cpu;
|
||||
@ -1117,7 +1113,7 @@ static void cpufreq_policy_free(struct cpufreq_policy *policy, bool notify)
|
||||
per_cpu(cpufreq_cpu_data, cpu) = NULL;
|
||||
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
|
||||
|
||||
cpufreq_policy_put_kobj(policy, notify);
|
||||
cpufreq_policy_put_kobj(policy);
|
||||
free_cpumask_var(policy->real_cpus);
|
||||
free_cpumask_var(policy->related_cpus);
|
||||
free_cpumask_var(policy->cpus);
|
||||
@ -1244,17 +1240,12 @@ static int cpufreq_online(unsigned int cpu)
|
||||
goto out_exit_policy;
|
||||
|
||||
cpufreq_stats_create_table(policy);
|
||||
blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
|
||||
CPUFREQ_CREATE_POLICY, policy);
|
||||
|
||||
write_lock_irqsave(&cpufreq_driver_lock, flags);
|
||||
list_add(&policy->policy_list, &cpufreq_policy_list);
|
||||
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
|
||||
}
|
||||
|
||||
blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
|
||||
CPUFREQ_START, policy);
|
||||
|
||||
ret = cpufreq_init_policy(policy);
|
||||
if (ret) {
|
||||
pr_err("%s: Failed to initialize policy for cpu: %d (%d)\n",
|
||||
@ -1282,7 +1273,7 @@ out_exit_policy:
|
||||
if (cpufreq_driver->exit)
|
||||
cpufreq_driver->exit(policy);
|
||||
out_free_policy:
|
||||
cpufreq_policy_free(policy, !new_policy);
|
||||
cpufreq_policy_free(policy);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -1403,7 +1394,7 @@ static void cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
|
||||
remove_cpu_dev_symlink(policy, dev);
|
||||
|
||||
if (cpumask_empty(policy->real_cpus))
|
||||
cpufreq_policy_free(policy, true);
|
||||
cpufreq_policy_free(policy);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -25,9 +25,7 @@ struct cpufreq_stats {
|
||||
unsigned int last_index;
|
||||
u64 *time_in_state;
|
||||
unsigned int *freq_table;
|
||||
#ifdef CONFIG_CPU_FREQ_STAT_DETAILS
|
||||
unsigned int *trans_table;
|
||||
#endif
|
||||
};
|
||||
|
||||
static int cpufreq_stats_update(struct cpufreq_stats *stats)
|
||||
@ -46,9 +44,7 @@ static void cpufreq_stats_clear_table(struct cpufreq_stats *stats)
|
||||
unsigned int count = stats->max_state;
|
||||
|
||||
memset(stats->time_in_state, 0, count * sizeof(u64));
|
||||
#ifdef CONFIG_CPU_FREQ_STAT_DETAILS
|
||||
memset(stats->trans_table, 0, count * count * sizeof(int));
|
||||
#endif
|
||||
stats->last_time = get_jiffies_64();
|
||||
stats->total_trans = 0;
|
||||
}
|
||||
@ -84,7 +80,6 @@ static ssize_t store_reset(struct cpufreq_policy *policy, const char *buf,
|
||||
return count;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_CPU_FREQ_STAT_DETAILS
|
||||
static ssize_t show_trans_table(struct cpufreq_policy *policy, char *buf)
|
||||
{
|
||||
struct cpufreq_stats *stats = policy->stats;
|
||||
@ -129,7 +124,6 @@ static ssize_t show_trans_table(struct cpufreq_policy *policy, char *buf)
|
||||
return len;
|
||||
}
|
||||
cpufreq_freq_attr_ro(trans_table);
|
||||
#endif
|
||||
|
||||
cpufreq_freq_attr_ro(total_trans);
|
||||
cpufreq_freq_attr_ro(time_in_state);
|
||||
@ -139,9 +133,7 @@ static struct attribute *default_attrs[] = {
|
||||
&total_trans.attr,
|
||||
&time_in_state.attr,
|
||||
&reset.attr,
|
||||
#ifdef CONFIG_CPU_FREQ_STAT_DETAILS
|
||||
&trans_table.attr,
|
||||
#endif
|
||||
NULL
|
||||
};
|
||||
static struct attribute_group stats_attr_group = {
|
||||
@ -200,9 +192,7 @@ void cpufreq_stats_create_table(struct cpufreq_policy *policy)
|
||||
|
||||
alloc_size = count * sizeof(int) + count * sizeof(u64);
|
||||
|
||||
#ifdef CONFIG_CPU_FREQ_STAT_DETAILS
|
||||
alloc_size += count * count * sizeof(int);
|
||||
#endif
|
||||
|
||||
/* Allocate memory for time_in_state/freq_table/trans_table in one go */
|
||||
stats->time_in_state = kzalloc(alloc_size, GFP_KERNEL);
|
||||
@ -211,9 +201,7 @@ void cpufreq_stats_create_table(struct cpufreq_policy *policy)
|
||||
|
||||
stats->freq_table = (unsigned int *)(stats->time_in_state + count);
|
||||
|
||||
#ifdef CONFIG_CPU_FREQ_STAT_DETAILS
|
||||
stats->trans_table = stats->freq_table + count;
|
||||
#endif
|
||||
|
||||
stats->max_state = count;
|
||||
|
||||
@ -259,8 +247,6 @@ void cpufreq_stats_record_transition(struct cpufreq_policy *policy,
|
||||
cpufreq_stats_update(stats);
|
||||
|
||||
stats->last_index = new_index;
|
||||
#ifdef CONFIG_CPU_FREQ_STAT_DETAILS
|
||||
stats->trans_table[old_index * stats->max_state + new_index]++;
|
||||
#endif
|
||||
stats->total_trans++;
|
||||
}
|
||||
|
@ -358,6 +358,8 @@ static struct pstate_funcs pstate_funcs __read_mostly;
|
||||
static int hwp_active __read_mostly;
|
||||
static bool per_cpu_limits __read_mostly;
|
||||
|
||||
static bool driver_registered __read_mostly;
|
||||
|
||||
#ifdef CONFIG_ACPI
|
||||
static bool acpi_ppc;
|
||||
#endif
|
||||
@ -394,6 +396,7 @@ static struct perf_limits *limits = &performance_limits;
|
||||
static struct perf_limits *limits = &powersave_limits;
|
||||
#endif
|
||||
|
||||
static DEFINE_MUTEX(intel_pstate_driver_lock);
|
||||
static DEFINE_MUTEX(intel_pstate_limits_lock);
|
||||
|
||||
#ifdef CONFIG_ACPI
|
||||
@ -538,7 +541,6 @@ static void intel_pstate_exit_perf_limits(struct cpufreq_policy *policy)
|
||||
|
||||
acpi_processor_unregister_performance(policy->cpu);
|
||||
}
|
||||
|
||||
#else
|
||||
static inline void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy)
|
||||
{
|
||||
@ -873,7 +875,10 @@ static void intel_pstate_hwp_set(struct cpufreq_policy *policy)
|
||||
|
||||
rdmsrl_on_cpu(cpu, MSR_HWP_CAPABILITIES, &cap);
|
||||
hw_min = HWP_LOWEST_PERF(cap);
|
||||
hw_max = HWP_HIGHEST_PERF(cap);
|
||||
if (limits->no_turbo)
|
||||
hw_max = HWP_GUARANTEED_PERF(cap);
|
||||
else
|
||||
hw_max = HWP_HIGHEST_PERF(cap);
|
||||
range = hw_max - hw_min;
|
||||
|
||||
max_perf_pct = perf_limits->max_perf_pct;
|
||||
@ -887,11 +892,6 @@ static void intel_pstate_hwp_set(struct cpufreq_policy *policy)
|
||||
|
||||
adj_range = max_perf_pct * range / 100;
|
||||
max = hw_min + adj_range;
|
||||
if (limits->no_turbo) {
|
||||
hw_max = HWP_GUARANTEED_PERF(cap);
|
||||
if (hw_max < max)
|
||||
max = hw_max;
|
||||
}
|
||||
|
||||
value &= ~HWP_MAX_PERF(~0L);
|
||||
value |= HWP_MAX_PERF(max);
|
||||
@ -1007,37 +1007,59 @@ static int pid_param_get(void *data, u64 *val)
|
||||
}
|
||||
DEFINE_SIMPLE_ATTRIBUTE(fops_pid_param, pid_param_get, pid_param_set, "%llu\n");
|
||||
|
||||
static struct dentry *debugfs_parent;
|
||||
|
||||
struct pid_param {
|
||||
char *name;
|
||||
void *value;
|
||||
struct dentry *dentry;
|
||||
};
|
||||
|
||||
static struct pid_param pid_files[] = {
|
||||
{"sample_rate_ms", &pid_params.sample_rate_ms},
|
||||
{"d_gain_pct", &pid_params.d_gain_pct},
|
||||
{"i_gain_pct", &pid_params.i_gain_pct},
|
||||
{"deadband", &pid_params.deadband},
|
||||
{"setpoint", &pid_params.setpoint},
|
||||
{"p_gain_pct", &pid_params.p_gain_pct},
|
||||
{NULL, NULL}
|
||||
{"sample_rate_ms", &pid_params.sample_rate_ms, },
|
||||
{"d_gain_pct", &pid_params.d_gain_pct, },
|
||||
{"i_gain_pct", &pid_params.i_gain_pct, },
|
||||
{"deadband", &pid_params.deadband, },
|
||||
{"setpoint", &pid_params.setpoint, },
|
||||
{"p_gain_pct", &pid_params.p_gain_pct, },
|
||||
{NULL, NULL, }
|
||||
};
|
||||
|
||||
static void __init intel_pstate_debug_expose_params(void)
|
||||
static void intel_pstate_debug_expose_params(void)
|
||||
{
|
||||
struct dentry *debugfs_parent;
|
||||
int i = 0;
|
||||
int i;
|
||||
|
||||
debugfs_parent = debugfs_create_dir("pstate_snb", NULL);
|
||||
if (IS_ERR_OR_NULL(debugfs_parent))
|
||||
return;
|
||||
while (pid_files[i].name) {
|
||||
debugfs_create_file(pid_files[i].name, 0660,
|
||||
debugfs_parent, pid_files[i].value,
|
||||
&fops_pid_param);
|
||||
i++;
|
||||
|
||||
for (i = 0; pid_files[i].name; i++) {
|
||||
struct dentry *dentry;
|
||||
|
||||
dentry = debugfs_create_file(pid_files[i].name, 0660,
|
||||
debugfs_parent, pid_files[i].value,
|
||||
&fops_pid_param);
|
||||
if (!IS_ERR(dentry))
|
||||
pid_files[i].dentry = dentry;
|
||||
}
|
||||
}
|
||||
|
||||
static void intel_pstate_debug_hide_params(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (IS_ERR_OR_NULL(debugfs_parent))
|
||||
return;
|
||||
|
||||
for (i = 0; pid_files[i].name; i++) {
|
||||
debugfs_remove(pid_files[i].dentry);
|
||||
pid_files[i].dentry = NULL;
|
||||
}
|
||||
|
||||
debugfs_remove(debugfs_parent);
|
||||
debugfs_parent = NULL;
|
||||
}
|
||||
|
||||
/************************** debugfs end ************************/
|
||||
|
||||
/************************** sysfs begin ************************/
|
||||
@ -1048,6 +1070,34 @@ static void __init intel_pstate_debug_expose_params(void)
|
||||
return sprintf(buf, "%u\n", limits->object); \
|
||||
}
|
||||
|
||||
static ssize_t intel_pstate_show_status(char *buf);
|
||||
static int intel_pstate_update_status(const char *buf, size_t size);
|
||||
|
||||
static ssize_t show_status(struct kobject *kobj,
|
||||
struct attribute *attr, char *buf)
|
||||
{
|
||||
ssize_t ret;
|
||||
|
||||
mutex_lock(&intel_pstate_driver_lock);
|
||||
ret = intel_pstate_show_status(buf);
|
||||
mutex_unlock(&intel_pstate_driver_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static ssize_t store_status(struct kobject *a, struct attribute *b,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
char *p = memchr(buf, '\n', count);
|
||||
int ret;
|
||||
|
||||
mutex_lock(&intel_pstate_driver_lock);
|
||||
ret = intel_pstate_update_status(buf, p ? p - buf : count);
|
||||
mutex_unlock(&intel_pstate_driver_lock);
|
||||
|
||||
return ret < 0 ? ret : count;
|
||||
}
|
||||
|
||||
static ssize_t show_turbo_pct(struct kobject *kobj,
|
||||
struct attribute *attr, char *buf)
|
||||
{
|
||||
@ -1055,12 +1105,22 @@ static ssize_t show_turbo_pct(struct kobject *kobj,
|
||||
int total, no_turbo, turbo_pct;
|
||||
uint32_t turbo_fp;
|
||||
|
||||
mutex_lock(&intel_pstate_driver_lock);
|
||||
|
||||
if (!driver_registered) {
|
||||
mutex_unlock(&intel_pstate_driver_lock);
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
cpu = all_cpu_data[0];
|
||||
|
||||
total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1;
|
||||
no_turbo = cpu->pstate.max_pstate - cpu->pstate.min_pstate + 1;
|
||||
turbo_fp = div_fp(no_turbo, total);
|
||||
turbo_pct = 100 - fp_toint(mul_fp(turbo_fp, int_tofp(100)));
|
||||
|
||||
mutex_unlock(&intel_pstate_driver_lock);
|
||||
|
||||
return sprintf(buf, "%u\n", turbo_pct);
|
||||
}
|
||||
|
||||
@ -1070,8 +1130,18 @@ static ssize_t show_num_pstates(struct kobject *kobj,
|
||||
struct cpudata *cpu;
|
||||
int total;
|
||||
|
||||
mutex_lock(&intel_pstate_driver_lock);
|
||||
|
||||
if (!driver_registered) {
|
||||
mutex_unlock(&intel_pstate_driver_lock);
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
cpu = all_cpu_data[0];
|
||||
total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1;
|
||||
|
||||
mutex_unlock(&intel_pstate_driver_lock);
|
||||
|
||||
return sprintf(buf, "%u\n", total);
|
||||
}
|
||||
|
||||
@ -1080,12 +1150,21 @@ static ssize_t show_no_turbo(struct kobject *kobj,
|
||||
{
|
||||
ssize_t ret;
|
||||
|
||||
mutex_lock(&intel_pstate_driver_lock);
|
||||
|
||||
if (!driver_registered) {
|
||||
mutex_unlock(&intel_pstate_driver_lock);
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
update_turbo_state();
|
||||
if (limits->turbo_disabled)
|
||||
ret = sprintf(buf, "%u\n", limits->turbo_disabled);
|
||||
else
|
||||
ret = sprintf(buf, "%u\n", limits->no_turbo);
|
||||
|
||||
mutex_unlock(&intel_pstate_driver_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -1099,12 +1178,20 @@ static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
|
||||
if (ret != 1)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&intel_pstate_driver_lock);
|
||||
|
||||
if (!driver_registered) {
|
||||
mutex_unlock(&intel_pstate_driver_lock);
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
mutex_lock(&intel_pstate_limits_lock);
|
||||
|
||||
update_turbo_state();
|
||||
if (limits->turbo_disabled) {
|
||||
pr_warn("Turbo disabled by BIOS or unavailable on processor\n");
|
||||
mutex_unlock(&intel_pstate_limits_lock);
|
||||
mutex_unlock(&intel_pstate_driver_lock);
|
||||
return -EPERM;
|
||||
}
|
||||
|
||||
@ -1114,6 +1201,8 @@ static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
|
||||
|
||||
intel_pstate_update_policies();
|
||||
|
||||
mutex_unlock(&intel_pstate_driver_lock);
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
@ -1127,6 +1216,13 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
|
||||
if (ret != 1)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&intel_pstate_driver_lock);
|
||||
|
||||
if (!driver_registered) {
|
||||
mutex_unlock(&intel_pstate_driver_lock);
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
mutex_lock(&intel_pstate_limits_lock);
|
||||
|
||||
limits->max_sysfs_pct = clamp_t(int, input, 0 , 100);
|
||||
@ -1142,6 +1238,8 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
|
||||
|
||||
intel_pstate_update_policies();
|
||||
|
||||
mutex_unlock(&intel_pstate_driver_lock);
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
@ -1155,6 +1253,13 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
|
||||
if (ret != 1)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&intel_pstate_driver_lock);
|
||||
|
||||
if (!driver_registered) {
|
||||
mutex_unlock(&intel_pstate_driver_lock);
|
||||
return -EAGAIN;
|
||||
}
|
||||
|
||||
mutex_lock(&intel_pstate_limits_lock);
|
||||
|
||||
limits->min_sysfs_pct = clamp_t(int, input, 0 , 100);
|
||||
@ -1170,12 +1275,15 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
|
||||
|
||||
intel_pstate_update_policies();
|
||||
|
||||
mutex_unlock(&intel_pstate_driver_lock);
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
show_one(max_perf_pct, max_perf_pct);
|
||||
show_one(min_perf_pct, min_perf_pct);
|
||||
|
||||
define_one_global_rw(status);
|
||||
define_one_global_rw(no_turbo);
|
||||
define_one_global_rw(max_perf_pct);
|
||||
define_one_global_rw(min_perf_pct);
|
||||
@ -1183,6 +1291,7 @@ define_one_global_ro(turbo_pct);
|
||||
define_one_global_ro(num_pstates);
|
||||
|
||||
static struct attribute *intel_pstate_attributes[] = {
|
||||
&status.attr,
|
||||
&no_turbo.attr,
|
||||
&turbo_pct.attr,
|
||||
&num_pstates.attr,
|
||||
@ -1364,48 +1473,71 @@ static int core_get_max_pstate_physical(void)
|
||||
return (value >> 8) & 0xFF;
|
||||
}
|
||||
|
||||
static int core_get_tdp_ratio(u64 plat_info)
|
||||
{
|
||||
/* Check how many TDP levels present */
|
||||
if (plat_info & 0x600000000) {
|
||||
u64 tdp_ctrl;
|
||||
u64 tdp_ratio;
|
||||
int tdp_msr;
|
||||
int err;
|
||||
|
||||
/* Get the TDP level (0, 1, 2) to get ratios */
|
||||
err = rdmsrl_safe(MSR_CONFIG_TDP_CONTROL, &tdp_ctrl);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
/* TDP MSR are continuous starting at 0x648 */
|
||||
tdp_msr = MSR_CONFIG_TDP_NOMINAL + (tdp_ctrl & 0x03);
|
||||
err = rdmsrl_safe(tdp_msr, &tdp_ratio);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
/* For level 1 and 2, bits[23:16] contain the ratio */
|
||||
if (tdp_ctrl & 0x03)
|
||||
tdp_ratio >>= 16;
|
||||
|
||||
tdp_ratio &= 0xff; /* ratios are only 8 bits long */
|
||||
pr_debug("tdp_ratio %x\n", (int)tdp_ratio);
|
||||
|
||||
return (int)tdp_ratio;
|
||||
}
|
||||
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
static int core_get_max_pstate(void)
|
||||
{
|
||||
u64 tar;
|
||||
u64 plat_info;
|
||||
int max_pstate;
|
||||
int tdp_ratio;
|
||||
int err;
|
||||
|
||||
rdmsrl(MSR_PLATFORM_INFO, plat_info);
|
||||
max_pstate = (plat_info >> 8) & 0xFF;
|
||||
|
||||
tdp_ratio = core_get_tdp_ratio(plat_info);
|
||||
if (tdp_ratio <= 0)
|
||||
return max_pstate;
|
||||
|
||||
if (hwp_active) {
|
||||
/* Turbo activation ratio is not used on HWP platforms */
|
||||
return tdp_ratio;
|
||||
}
|
||||
|
||||
err = rdmsrl_safe(MSR_TURBO_ACTIVATION_RATIO, &tar);
|
||||
if (!err) {
|
||||
int tar_levels;
|
||||
|
||||
/* Do some sanity checking for safety */
|
||||
if (plat_info & 0x600000000) {
|
||||
u64 tdp_ctrl;
|
||||
u64 tdp_ratio;
|
||||
int tdp_msr;
|
||||
|
||||
err = rdmsrl_safe(MSR_CONFIG_TDP_CONTROL, &tdp_ctrl);
|
||||
if (err)
|
||||
goto skip_tar;
|
||||
|
||||
tdp_msr = MSR_CONFIG_TDP_NOMINAL + (tdp_ctrl & 0x3);
|
||||
err = rdmsrl_safe(tdp_msr, &tdp_ratio);
|
||||
if (err)
|
||||
goto skip_tar;
|
||||
|
||||
/* For level 1 and 2, bits[23:16] contain the ratio */
|
||||
if (tdp_ctrl)
|
||||
tdp_ratio >>= 16;
|
||||
|
||||
tdp_ratio &= 0xff; /* ratios are only 8 bits long */
|
||||
if (tdp_ratio - 1 == tar) {
|
||||
max_pstate = tar;
|
||||
pr_debug("max_pstate=TAC %x\n", max_pstate);
|
||||
} else {
|
||||
goto skip_tar;
|
||||
}
|
||||
tar_levels = tar & 0xff;
|
||||
if (tdp_ratio - 1 == tar_levels) {
|
||||
max_pstate = tar_levels;
|
||||
pr_debug("max_pstate=TAC %x\n", max_pstate);
|
||||
}
|
||||
}
|
||||
|
||||
skip_tar:
|
||||
return max_pstate;
|
||||
}
|
||||
|
||||
@ -2035,7 +2167,8 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
|
||||
limits = &performance_limits;
|
||||
perf_limits = limits;
|
||||
}
|
||||
if (policy->max >= policy->cpuinfo.max_freq) {
|
||||
if (policy->max >= policy->cpuinfo.max_freq &&
|
||||
!limits->no_turbo) {
|
||||
pr_debug("set performance\n");
|
||||
intel_pstate_set_performance_limits(perf_limits);
|
||||
goto out;
|
||||
@ -2071,12 +2204,37 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
|
||||
|
||||
static int intel_pstate_verify_policy(struct cpufreq_policy *policy)
|
||||
{
|
||||
struct cpudata *cpu = all_cpu_data[policy->cpu];
|
||||
struct perf_limits *perf_limits;
|
||||
|
||||
if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
|
||||
perf_limits = &performance_limits;
|
||||
else
|
||||
perf_limits = &powersave_limits;
|
||||
|
||||
update_turbo_state();
|
||||
policy->cpuinfo.max_freq = perf_limits->turbo_disabled ||
|
||||
perf_limits->no_turbo ?
|
||||
cpu->pstate.max_freq :
|
||||
cpu->pstate.turbo_freq;
|
||||
|
||||
cpufreq_verify_within_cpu_limits(policy);
|
||||
|
||||
if (policy->policy != CPUFREQ_POLICY_POWERSAVE &&
|
||||
policy->policy != CPUFREQ_POLICY_PERFORMANCE)
|
||||
return -EINVAL;
|
||||
|
||||
/* When per-CPU limits are used, sysfs limits are not used */
|
||||
if (!per_cpu_limits) {
|
||||
unsigned int max_freq, min_freq;
|
||||
|
||||
max_freq = policy->cpuinfo.max_freq *
|
||||
limits->max_sysfs_pct / 100;
|
||||
min_freq = policy->cpuinfo.max_freq *
|
||||
limits->min_sysfs_pct / 100;
|
||||
cpufreq_verify_within_limits(policy, min_freq, max_freq);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -2287,6 +2445,111 @@ static struct cpufreq_driver intel_cpufreq = {
|
||||
|
||||
static struct cpufreq_driver *intel_pstate_driver = &intel_pstate;
|
||||
|
||||
static void intel_pstate_driver_cleanup(void)
|
||||
{
|
||||
unsigned int cpu;
|
||||
|
||||
get_online_cpus();
|
||||
for_each_online_cpu(cpu) {
|
||||
if (all_cpu_data[cpu]) {
|
||||
if (intel_pstate_driver == &intel_pstate)
|
||||
intel_pstate_clear_update_util_hook(cpu);
|
||||
|
||||
kfree(all_cpu_data[cpu]);
|
||||
all_cpu_data[cpu] = NULL;
|
||||
}
|
||||
}
|
||||
put_online_cpus();
|
||||
}
|
||||
|
||||
static int intel_pstate_register_driver(void)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = cpufreq_register_driver(intel_pstate_driver);
|
||||
if (ret) {
|
||||
intel_pstate_driver_cleanup();
|
||||
return ret;
|
||||
}
|
||||
|
||||
mutex_lock(&intel_pstate_limits_lock);
|
||||
driver_registered = true;
|
||||
mutex_unlock(&intel_pstate_limits_lock);
|
||||
|
||||
if (intel_pstate_driver == &intel_pstate && !hwp_active &&
|
||||
pstate_funcs.get_target_pstate != get_target_pstate_use_cpu_load)
|
||||
intel_pstate_debug_expose_params();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int intel_pstate_unregister_driver(void)
|
||||
{
|
||||
if (hwp_active)
|
||||
return -EBUSY;
|
||||
|
||||
if (intel_pstate_driver == &intel_pstate && !hwp_active &&
|
||||
pstate_funcs.get_target_pstate != get_target_pstate_use_cpu_load)
|
||||
intel_pstate_debug_hide_params();
|
||||
|
||||
mutex_lock(&intel_pstate_limits_lock);
|
||||
driver_registered = false;
|
||||
mutex_unlock(&intel_pstate_limits_lock);
|
||||
|
||||
cpufreq_unregister_driver(intel_pstate_driver);
|
||||
intel_pstate_driver_cleanup();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static ssize_t intel_pstate_show_status(char *buf)
|
||||
{
|
||||
if (!driver_registered)
|
||||
return sprintf(buf, "off\n");
|
||||
|
||||
return sprintf(buf, "%s\n", intel_pstate_driver == &intel_pstate ?
|
||||
"active" : "passive");
|
||||
}
|
||||
|
||||
static int intel_pstate_update_status(const char *buf, size_t size)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (size == 3 && !strncmp(buf, "off", size))
|
||||
return driver_registered ?
|
||||
intel_pstate_unregister_driver() : -EINVAL;
|
||||
|
||||
if (size == 6 && !strncmp(buf, "active", size)) {
|
||||
if (driver_registered) {
|
||||
if (intel_pstate_driver == &intel_pstate)
|
||||
return 0;
|
||||
|
||||
ret = intel_pstate_unregister_driver();
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
intel_pstate_driver = &intel_pstate;
|
||||
return intel_pstate_register_driver();
|
||||
}
|
||||
|
||||
if (size == 7 && !strncmp(buf, "passive", size)) {
|
||||
if (driver_registered) {
|
||||
if (intel_pstate_driver != &intel_pstate)
|
||||
return 0;
|
||||
|
||||
ret = intel_pstate_unregister_driver();
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
intel_pstate_driver = &intel_cpufreq;
|
||||
return intel_pstate_register_driver();
|
||||
}
|
||||
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static int no_load __initdata;
|
||||
static int no_hwp __initdata;
|
||||
static int hwp_only __initdata;
|
||||
@ -2474,9 +2737,9 @@ static const struct x86_cpu_id hwp_support_ids[] __initconst = {
|
||||
|
||||
static int __init intel_pstate_init(void)
|
||||
{
|
||||
int cpu, rc = 0;
|
||||
const struct x86_cpu_id *id;
|
||||
struct cpu_defaults *cpu_def;
|
||||
int rc = 0;
|
||||
|
||||
if (no_load)
|
||||
return -ENODEV;
|
||||
@ -2508,45 +2771,29 @@ hwp_cpu_matched:
|
||||
if (intel_pstate_platform_pwr_mgmt_exists())
|
||||
return -ENODEV;
|
||||
|
||||
if (!hwp_active && hwp_only)
|
||||
return -ENOTSUPP;
|
||||
|
||||
pr_info("Intel P-state driver initializing\n");
|
||||
|
||||
all_cpu_data = vzalloc(sizeof(void *) * num_possible_cpus());
|
||||
if (!all_cpu_data)
|
||||
return -ENOMEM;
|
||||
|
||||
if (!hwp_active && hwp_only)
|
||||
goto out;
|
||||
|
||||
intel_pstate_request_control_from_smm();
|
||||
|
||||
rc = cpufreq_register_driver(intel_pstate_driver);
|
||||
if (rc)
|
||||
goto out;
|
||||
|
||||
if (intel_pstate_driver == &intel_pstate && !hwp_active &&
|
||||
pstate_funcs.get_target_pstate != get_target_pstate_use_cpu_load)
|
||||
intel_pstate_debug_expose_params();
|
||||
|
||||
intel_pstate_sysfs_expose_params();
|
||||
|
||||
mutex_lock(&intel_pstate_driver_lock);
|
||||
rc = intel_pstate_register_driver();
|
||||
mutex_unlock(&intel_pstate_driver_lock);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
if (hwp_active)
|
||||
pr_info("HWP enabled\n");
|
||||
|
||||
return rc;
|
||||
out:
|
||||
get_online_cpus();
|
||||
for_each_online_cpu(cpu) {
|
||||
if (all_cpu_data[cpu]) {
|
||||
if (intel_pstate_driver == &intel_pstate)
|
||||
intel_pstate_clear_update_util_hook(cpu);
|
||||
|
||||
kfree(all_cpu_data[cpu]);
|
||||
}
|
||||
}
|
||||
|
||||
put_online_cpus();
|
||||
vfree(all_cpu_data);
|
||||
return -ENODEV;
|
||||
return 0;
|
||||
}
|
||||
device_initcall(intel_pstate_init);
|
||||
|
||||
|
@ -144,6 +144,7 @@ static struct powernv_pstate_info {
|
||||
unsigned int max;
|
||||
unsigned int nominal;
|
||||
unsigned int nr_pstates;
|
||||
bool wof_enabled;
|
||||
} powernv_pstate_info;
|
||||
|
||||
/* Use following macros for conversions between pstate_id and index */
|
||||
@ -203,6 +204,7 @@ static int init_powernv_pstates(void)
|
||||
const __be32 *pstate_ids, *pstate_freqs;
|
||||
u32 len_ids, len_freqs;
|
||||
u32 pstate_min, pstate_max, pstate_nominal;
|
||||
u32 pstate_turbo, pstate_ultra_turbo;
|
||||
|
||||
power_mgt = of_find_node_by_path("/ibm,opal/power-mgt");
|
||||
if (!power_mgt) {
|
||||
@ -225,8 +227,29 @@ static int init_powernv_pstates(void)
|
||||
pr_warn("ibm,pstate-nominal not found\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
if (of_property_read_u32(power_mgt, "ibm,pstate-ultra-turbo",
|
||||
&pstate_ultra_turbo)) {
|
||||
powernv_pstate_info.wof_enabled = false;
|
||||
goto next;
|
||||
}
|
||||
|
||||
if (of_property_read_u32(power_mgt, "ibm,pstate-turbo",
|
||||
&pstate_turbo)) {
|
||||
powernv_pstate_info.wof_enabled = false;
|
||||
goto next;
|
||||
}
|
||||
|
||||
if (pstate_turbo == pstate_ultra_turbo)
|
||||
powernv_pstate_info.wof_enabled = false;
|
||||
else
|
||||
powernv_pstate_info.wof_enabled = true;
|
||||
|
||||
next:
|
||||
pr_info("cpufreq pstate min %d nominal %d max %d\n", pstate_min,
|
||||
pstate_nominal, pstate_max);
|
||||
pr_info("Workload Optimized Frequency is %s in the platform\n",
|
||||
(powernv_pstate_info.wof_enabled) ? "enabled" : "disabled");
|
||||
|
||||
pstate_ids = of_get_property(power_mgt, "ibm,pstate-ids", &len_ids);
|
||||
if (!pstate_ids) {
|
||||
@ -268,6 +291,13 @@ static int init_powernv_pstates(void)
|
||||
powernv_pstate_info.nominal = i;
|
||||
else if (id == pstate_min)
|
||||
powernv_pstate_info.min = i;
|
||||
|
||||
if (powernv_pstate_info.wof_enabled && id == pstate_turbo) {
|
||||
int j;
|
||||
|
||||
for (j = i - 1; j >= (int)powernv_pstate_info.max; j--)
|
||||
powernv_freqs[j].flags = CPUFREQ_BOOST_FREQ;
|
||||
}
|
||||
}
|
||||
|
||||
/* End of list marker entry */
|
||||
@ -305,9 +335,12 @@ static ssize_t cpuinfo_nominal_freq_show(struct cpufreq_policy *policy,
|
||||
struct freq_attr cpufreq_freq_attr_cpuinfo_nominal_freq =
|
||||
__ATTR_RO(cpuinfo_nominal_freq);
|
||||
|
||||
#define SCALING_BOOST_FREQS_ATTR_INDEX 2
|
||||
|
||||
static struct freq_attr *powernv_cpu_freq_attr[] = {
|
||||
&cpufreq_freq_attr_scaling_available_freqs,
|
||||
&cpufreq_freq_attr_cpuinfo_nominal_freq,
|
||||
&cpufreq_freq_attr_scaling_boost_freqs,
|
||||
NULL,
|
||||
};
|
||||
|
||||
@ -1013,11 +1046,22 @@ static int __init powernv_cpufreq_init(void)
|
||||
register_reboot_notifier(&powernv_cpufreq_reboot_nb);
|
||||
opal_message_notifier_register(OPAL_MSG_OCC, &powernv_cpufreq_opal_nb);
|
||||
|
||||
rc = cpufreq_register_driver(&powernv_cpufreq_driver);
|
||||
if (!rc)
|
||||
return 0;
|
||||
if (powernv_pstate_info.wof_enabled)
|
||||
powernv_cpufreq_driver.boost_enabled = true;
|
||||
else
|
||||
powernv_cpu_freq_attr[SCALING_BOOST_FREQS_ATTR_INDEX] = NULL;
|
||||
|
||||
pr_info("Failed to register the cpufreq driver (%d)\n", rc);
|
||||
rc = cpufreq_register_driver(&powernv_cpufreq_driver);
|
||||
if (rc) {
|
||||
pr_info("Failed to register the cpufreq driver (%d)\n", rc);
|
||||
goto cleanup_notifiers;
|
||||
}
|
||||
|
||||
if (powernv_pstate_info.wof_enabled)
|
||||
cpufreq_enable_boost_support();
|
||||
|
||||
return 0;
|
||||
cleanup_notifiers:
|
||||
unregister_all_notifiers();
|
||||
clean_chip_info();
|
||||
out:
|
||||
|
@ -100,9 +100,6 @@ static int pmi_notifier(struct notifier_block *nb,
|
||||
/* Should this really be called for CPUFREQ_ADJUST and CPUFREQ_NOTIFY
|
||||
* policy events?)
|
||||
*/
|
||||
if (event == CPUFREQ_START)
|
||||
return 0;
|
||||
|
||||
node = cbe_cpu_to_node(policy->cpu);
|
||||
|
||||
pr_debug("got notified, event=%lu, node=%u\n", event, node);
|
||||
|
@ -1723,7 +1723,7 @@ static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip)
|
||||
}
|
||||
|
||||
/**
|
||||
* _gpiochip_irqchip_add() - adds an irqchip to a gpiochip
|
||||
* gpiochip_irqchip_add_key() - adds an irqchip to a gpiochip
|
||||
* @gpiochip: the gpiochip to add the irqchip to
|
||||
* @irqchip: the irqchip to add to the gpiochip
|
||||
* @first_irq: if not dynamically assigned, the base (first) IRQ to
|
||||
@ -1749,13 +1749,13 @@ static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip)
|
||||
* the pins on the gpiochip can generate a unique IRQ. Everything else
|
||||
* need to be open coded.
|
||||
*/
|
||||
int _gpiochip_irqchip_add(struct gpio_chip *gpiochip,
|
||||
struct irq_chip *irqchip,
|
||||
unsigned int first_irq,
|
||||
irq_flow_handler_t handler,
|
||||
unsigned int type,
|
||||
bool nested,
|
||||
struct lock_class_key *lock_key)
|
||||
int gpiochip_irqchip_add_key(struct gpio_chip *gpiochip,
|
||||
struct irq_chip *irqchip,
|
||||
unsigned int first_irq,
|
||||
irq_flow_handler_t handler,
|
||||
unsigned int type,
|
||||
bool nested,
|
||||
struct lock_class_key *lock_key)
|
||||
{
|
||||
struct device_node *of_node;
|
||||
bool irq_base_set = false;
|
||||
@ -1840,7 +1840,7 @@ int _gpiochip_irqchip_add(struct gpio_chip *gpiochip,
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(_gpiochip_irqchip_add);
|
||||
EXPORT_SYMBOL_GPL(gpiochip_irqchip_add_key);
|
||||
|
||||
#else /* CONFIG_GPIOLIB_IRQCHIP */
|
||||
|
||||
|
@ -83,6 +83,13 @@ int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
if (!(*out_ring && (*out_ring)->adev)) {
|
||||
DRM_ERROR("Ring %d is not initialized on IP %d\n",
|
||||
ring, ip_type);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -2512,6 +2512,8 @@ static int dce_v10_0_cursor_move_locked(struct drm_crtc *crtc,
|
||||
|
||||
WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
|
||||
WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
|
||||
WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
|
||||
((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -2537,7 +2539,6 @@ static int dce_v10_0_crtc_cursor_set2(struct drm_crtc *crtc,
|
||||
int32_t hot_y)
|
||||
{
|
||||
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
|
||||
struct amdgpu_device *adev = crtc->dev->dev_private;
|
||||
struct drm_gem_object *obj;
|
||||
struct amdgpu_bo *aobj;
|
||||
int ret;
|
||||
@ -2578,7 +2579,9 @@ static int dce_v10_0_crtc_cursor_set2(struct drm_crtc *crtc,
|
||||
|
||||
dce_v10_0_lock_cursor(crtc, true);
|
||||
|
||||
if (hot_x != amdgpu_crtc->cursor_hot_x ||
|
||||
if (width != amdgpu_crtc->cursor_width ||
|
||||
height != amdgpu_crtc->cursor_height ||
|
||||
hot_x != amdgpu_crtc->cursor_hot_x ||
|
||||
hot_y != amdgpu_crtc->cursor_hot_y) {
|
||||
int x, y;
|
||||
|
||||
@ -2587,16 +2590,10 @@ static int dce_v10_0_crtc_cursor_set2(struct drm_crtc *crtc,
|
||||
|
||||
dce_v10_0_cursor_move_locked(crtc, x, y);
|
||||
|
||||
amdgpu_crtc->cursor_hot_x = hot_x;
|
||||
amdgpu_crtc->cursor_hot_y = hot_y;
|
||||
}
|
||||
|
||||
if (width != amdgpu_crtc->cursor_width ||
|
||||
height != amdgpu_crtc->cursor_height) {
|
||||
WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
|
||||
(width - 1) << 16 | (height - 1));
|
||||
amdgpu_crtc->cursor_width = width;
|
||||
amdgpu_crtc->cursor_height = height;
|
||||
amdgpu_crtc->cursor_hot_x = hot_x;
|
||||
amdgpu_crtc->cursor_hot_y = hot_y;
|
||||
}
|
||||
|
||||
dce_v10_0_show_cursor(crtc);
|
||||
@ -2620,7 +2617,6 @@ unpin:
|
||||
static void dce_v10_0_cursor_reset(struct drm_crtc *crtc)
|
||||
{
|
||||
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
|
||||
struct amdgpu_device *adev = crtc->dev->dev_private;
|
||||
|
||||
if (amdgpu_crtc->cursor_bo) {
|
||||
dce_v10_0_lock_cursor(crtc, true);
|
||||
@ -2628,10 +2624,6 @@ static void dce_v10_0_cursor_reset(struct drm_crtc *crtc)
|
||||
dce_v10_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
|
||||
amdgpu_crtc->cursor_y);
|
||||
|
||||
WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
|
||||
(amdgpu_crtc->cursor_width - 1) << 16 |
|
||||
(amdgpu_crtc->cursor_height - 1));
|
||||
|
||||
dce_v10_0_show_cursor(crtc);
|
||||
|
||||
dce_v10_0_lock_cursor(crtc, false);
|
||||
|
@ -2532,6 +2532,8 @@ static int dce_v11_0_cursor_move_locked(struct drm_crtc *crtc,
|
||||
|
||||
WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
|
||||
WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
|
||||
WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
|
||||
((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -2557,7 +2559,6 @@ static int dce_v11_0_crtc_cursor_set2(struct drm_crtc *crtc,
|
||||
int32_t hot_y)
|
||||
{
|
||||
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
|
||||
struct amdgpu_device *adev = crtc->dev->dev_private;
|
||||
struct drm_gem_object *obj;
|
||||
struct amdgpu_bo *aobj;
|
||||
int ret;
|
||||
@ -2598,7 +2599,9 @@ static int dce_v11_0_crtc_cursor_set2(struct drm_crtc *crtc,
|
||||
|
||||
dce_v11_0_lock_cursor(crtc, true);
|
||||
|
||||
if (hot_x != amdgpu_crtc->cursor_hot_x ||
|
||||
if (width != amdgpu_crtc->cursor_width ||
|
||||
height != amdgpu_crtc->cursor_height ||
|
||||
hot_x != amdgpu_crtc->cursor_hot_x ||
|
||||
hot_y != amdgpu_crtc->cursor_hot_y) {
|
||||
int x, y;
|
||||
|
||||
@ -2607,16 +2610,10 @@ static int dce_v11_0_crtc_cursor_set2(struct drm_crtc *crtc,
|
||||
|
||||
dce_v11_0_cursor_move_locked(crtc, x, y);
|
||||
|
||||
amdgpu_crtc->cursor_hot_x = hot_x;
|
||||
amdgpu_crtc->cursor_hot_y = hot_y;
|
||||
}
|
||||
|
||||
if (width != amdgpu_crtc->cursor_width ||
|
||||
height != amdgpu_crtc->cursor_height) {
|
||||
WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
|
||||
(width - 1) << 16 | (height - 1));
|
||||
amdgpu_crtc->cursor_width = width;
|
||||
amdgpu_crtc->cursor_height = height;
|
||||
amdgpu_crtc->cursor_hot_x = hot_x;
|
||||
amdgpu_crtc->cursor_hot_y = hot_y;
|
||||
}
|
||||
|
||||
dce_v11_0_show_cursor(crtc);
|
||||
@ -2640,7 +2637,6 @@ unpin:
|
||||
static void dce_v11_0_cursor_reset(struct drm_crtc *crtc)
|
||||
{
|
||||
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
|
||||
struct amdgpu_device *adev = crtc->dev->dev_private;
|
||||
|
||||
if (amdgpu_crtc->cursor_bo) {
|
||||
dce_v11_0_lock_cursor(crtc, true);
|
||||
@ -2648,10 +2644,6 @@ static void dce_v11_0_cursor_reset(struct drm_crtc *crtc)
|
||||
dce_v11_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
|
||||
amdgpu_crtc->cursor_y);
|
||||
|
||||
WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
|
||||
(amdgpu_crtc->cursor_width - 1) << 16 |
|
||||
(amdgpu_crtc->cursor_height - 1));
|
||||
|
||||
dce_v11_0_show_cursor(crtc);
|
||||
|
||||
dce_v11_0_lock_cursor(crtc, false);
|
||||
|
@ -1859,6 +1859,8 @@ static int dce_v6_0_cursor_move_locked(struct drm_crtc *crtc,
|
||||
struct amdgpu_device *adev = crtc->dev->dev_private;
|
||||
int xorigin = 0, yorigin = 0;
|
||||
|
||||
int w = amdgpu_crtc->cursor_width;
|
||||
|
||||
amdgpu_crtc->cursor_x = x;
|
||||
amdgpu_crtc->cursor_y = y;
|
||||
|
||||
@ -1878,6 +1880,8 @@ static int dce_v6_0_cursor_move_locked(struct drm_crtc *crtc,
|
||||
|
||||
WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
|
||||
WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
|
||||
WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
|
||||
((w - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1903,7 +1907,6 @@ static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc,
|
||||
int32_t hot_y)
|
||||
{
|
||||
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
|
||||
struct amdgpu_device *adev = crtc->dev->dev_private;
|
||||
struct drm_gem_object *obj;
|
||||
struct amdgpu_bo *aobj;
|
||||
int ret;
|
||||
@ -1944,7 +1947,9 @@ static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc,
|
||||
|
||||
dce_v6_0_lock_cursor(crtc, true);
|
||||
|
||||
if (hot_x != amdgpu_crtc->cursor_hot_x ||
|
||||
if (width != amdgpu_crtc->cursor_width ||
|
||||
height != amdgpu_crtc->cursor_height ||
|
||||
hot_x != amdgpu_crtc->cursor_hot_x ||
|
||||
hot_y != amdgpu_crtc->cursor_hot_y) {
|
||||
int x, y;
|
||||
|
||||
@ -1953,16 +1958,10 @@ static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc,
|
||||
|
||||
dce_v6_0_cursor_move_locked(crtc, x, y);
|
||||
|
||||
amdgpu_crtc->cursor_hot_x = hot_x;
|
||||
amdgpu_crtc->cursor_hot_y = hot_y;
|
||||
}
|
||||
|
||||
if (width != amdgpu_crtc->cursor_width ||
|
||||
height != amdgpu_crtc->cursor_height) {
|
||||
WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
|
||||
(width - 1) << 16 | (height - 1));
|
||||
amdgpu_crtc->cursor_width = width;
|
||||
amdgpu_crtc->cursor_height = height;
|
||||
amdgpu_crtc->cursor_hot_x = hot_x;
|
||||
amdgpu_crtc->cursor_hot_y = hot_y;
|
||||
}
|
||||
|
||||
dce_v6_0_show_cursor(crtc);
|
||||
@ -1986,7 +1985,6 @@ unpin:
|
||||
static void dce_v6_0_cursor_reset(struct drm_crtc *crtc)
|
||||
{
|
||||
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
|
||||
struct amdgpu_device *adev = crtc->dev->dev_private;
|
||||
|
||||
if (amdgpu_crtc->cursor_bo) {
|
||||
dce_v6_0_lock_cursor(crtc, true);
|
||||
@ -1994,10 +1992,6 @@ static void dce_v6_0_cursor_reset(struct drm_crtc *crtc)
|
||||
dce_v6_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
|
||||
amdgpu_crtc->cursor_y);
|
||||
|
||||
WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
|
||||
(amdgpu_crtc->cursor_width - 1) << 16 |
|
||||
(amdgpu_crtc->cursor_height - 1));
|
||||
|
||||
dce_v6_0_show_cursor(crtc);
|
||||
dce_v6_0_lock_cursor(crtc, false);
|
||||
}
|
||||
|
@ -2363,6 +2363,8 @@ static int dce_v8_0_cursor_move_locked(struct drm_crtc *crtc,
|
||||
|
||||
WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
|
||||
WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
|
||||
WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
|
||||
((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -2388,7 +2390,6 @@ static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc,
|
||||
int32_t hot_y)
|
||||
{
|
||||
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
|
||||
struct amdgpu_device *adev = crtc->dev->dev_private;
|
||||
struct drm_gem_object *obj;
|
||||
struct amdgpu_bo *aobj;
|
||||
int ret;
|
||||
@ -2429,7 +2430,9 @@ static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc,
|
||||
|
||||
dce_v8_0_lock_cursor(crtc, true);
|
||||
|
||||
if (hot_x != amdgpu_crtc->cursor_hot_x ||
|
||||
if (width != amdgpu_crtc->cursor_width ||
|
||||
height != amdgpu_crtc->cursor_height ||
|
||||
hot_x != amdgpu_crtc->cursor_hot_x ||
|
||||
hot_y != amdgpu_crtc->cursor_hot_y) {
|
||||
int x, y;
|
||||
|
||||
@ -2438,16 +2441,10 @@ static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc,
|
||||
|
||||
dce_v8_0_cursor_move_locked(crtc, x, y);
|
||||
|
||||
amdgpu_crtc->cursor_hot_x = hot_x;
|
||||
amdgpu_crtc->cursor_hot_y = hot_y;
|
||||
}
|
||||
|
||||
if (width != amdgpu_crtc->cursor_width ||
|
||||
height != amdgpu_crtc->cursor_height) {
|
||||
WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
|
||||
(width - 1) << 16 | (height - 1));
|
||||
amdgpu_crtc->cursor_width = width;
|
||||
amdgpu_crtc->cursor_height = height;
|
||||
amdgpu_crtc->cursor_hot_x = hot_x;
|
||||
amdgpu_crtc->cursor_hot_y = hot_y;
|
||||
}
|
||||
|
||||
dce_v8_0_show_cursor(crtc);
|
||||
@ -2471,7 +2468,6 @@ unpin:
|
||||
static void dce_v8_0_cursor_reset(struct drm_crtc *crtc)
|
||||
{
|
||||
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
|
||||
struct amdgpu_device *adev = crtc->dev->dev_private;
|
||||
|
||||
if (amdgpu_crtc->cursor_bo) {
|
||||
dce_v8_0_lock_cursor(crtc, true);
|
||||
@ -2479,10 +2475,6 @@ static void dce_v8_0_cursor_reset(struct drm_crtc *crtc)
|
||||
dce_v8_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
|
||||
amdgpu_crtc->cursor_y);
|
||||
|
||||
WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
|
||||
(amdgpu_crtc->cursor_width - 1) << 16 |
|
||||
(amdgpu_crtc->cursor_height - 1));
|
||||
|
||||
dce_v8_0_show_cursor(crtc);
|
||||
|
||||
dce_v8_0_lock_cursor(crtc, false);
|
||||
|
@ -627,11 +627,8 @@ static const struct drm_encoder_helper_funcs dce_virtual_encoder_helper_funcs =
|
||||
|
||||
static void dce_virtual_encoder_destroy(struct drm_encoder *encoder)
|
||||
{
|
||||
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
|
||||
|
||||
kfree(amdgpu_encoder->enc_priv);
|
||||
drm_encoder_cleanup(encoder);
|
||||
kfree(amdgpu_encoder);
|
||||
kfree(encoder);
|
||||
}
|
||||
|
||||
static const struct drm_encoder_funcs dce_virtual_encoder_funcs = {
|
||||
|
@ -44,6 +44,7 @@ MODULE_FIRMWARE("radeon/tahiti_mc.bin");
|
||||
MODULE_FIRMWARE("radeon/pitcairn_mc.bin");
|
||||
MODULE_FIRMWARE("radeon/verde_mc.bin");
|
||||
MODULE_FIRMWARE("radeon/oland_mc.bin");
|
||||
MODULE_FIRMWARE("radeon/si58_mc.bin");
|
||||
|
||||
#define MC_SEQ_MISC0__MT__MASK 0xf0000000
|
||||
#define MC_SEQ_MISC0__MT__GDDR1 0x10000000
|
||||
@ -113,6 +114,7 @@ static int gmc_v6_0_init_microcode(struct amdgpu_device *adev)
|
||||
const char *chip_name;
|
||||
char fw_name[30];
|
||||
int err;
|
||||
bool is_58_fw = false;
|
||||
|
||||
DRM_DEBUG("\n");
|
||||
|
||||
@ -135,7 +137,14 @@ static int gmc_v6_0_init_microcode(struct amdgpu_device *adev)
|
||||
default: BUG();
|
||||
}
|
||||
|
||||
snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
|
||||
/* this memory configuration requires special firmware */
|
||||
if (((RREG32(mmMC_SEQ_MISC0) & 0xff000000) >> 24) == 0x58)
|
||||
is_58_fw = true;
|
||||
|
||||
if (is_58_fw)
|
||||
snprintf(fw_name, sizeof(fw_name), "radeon/si58_mc.bin");
|
||||
else
|
||||
snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
|
||||
err = request_firmware(&adev->mc.fw, fw_name, adev->dev);
|
||||
if (err)
|
||||
goto out;
|
||||
@ -463,19 +472,11 @@ static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
|
||||
WREG32(mmVM_CONTEXT1_CNTL,
|
||||
VM_CONTEXT1_CNTL__ENABLE_CONTEXT_MASK |
|
||||
(1UL << VM_CONTEXT1_CNTL__PAGE_TABLE_DEPTH__SHIFT) |
|
||||
((amdgpu_vm_block_size - 9) << VM_CONTEXT1_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT) |
|
||||
VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
|
||||
VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK |
|
||||
VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
|
||||
VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK |
|
||||
VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
|
||||
VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK |
|
||||
VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
|
||||
VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK |
|
||||
VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
|
||||
VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK |
|
||||
VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
|
||||
VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK);
|
||||
((amdgpu_vm_block_size - 9) << VM_CONTEXT1_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT));
|
||||
if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
|
||||
gmc_v6_0_set_fault_enable_default(adev, false);
|
||||
else
|
||||
gmc_v6_0_set_fault_enable_default(adev, true);
|
||||
|
||||
gmc_v6_0_gart_flush_gpu_tlb(adev, 0);
|
||||
dev_info(adev->dev, "PCIE GART of %uM enabled (table at 0x%016llX).\n",
|
||||
@ -754,7 +755,10 @@ static int gmc_v6_0_late_init(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
|
||||
if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS)
|
||||
return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
|
||||
else
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int gmc_v6_0_sw_init(void *handle)
|
||||
|
@ -64,6 +64,7 @@ MODULE_FIRMWARE("radeon/oland_smc.bin");
|
||||
MODULE_FIRMWARE("radeon/oland_k_smc.bin");
|
||||
MODULE_FIRMWARE("radeon/hainan_smc.bin");
|
||||
MODULE_FIRMWARE("radeon/hainan_k_smc.bin");
|
||||
MODULE_FIRMWARE("radeon/banks_k_2_smc.bin");
|
||||
|
||||
union power_info {
|
||||
struct _ATOM_POWERPLAY_INFO info;
|
||||
@ -3487,17 +3488,6 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
|
||||
(adev->pdev->device == 0x6817) ||
|
||||
(adev->pdev->device == 0x6806))
|
||||
max_mclk = 120000;
|
||||
} else if (adev->asic_type == CHIP_OLAND) {
|
||||
if ((adev->pdev->revision == 0xC7) ||
|
||||
(adev->pdev->revision == 0x80) ||
|
||||
(adev->pdev->revision == 0x81) ||
|
||||
(adev->pdev->revision == 0x83) ||
|
||||
(adev->pdev->revision == 0x87) ||
|
||||
(adev->pdev->device == 0x6604) ||
|
||||
(adev->pdev->device == 0x6605)) {
|
||||
max_sclk = 75000;
|
||||
max_mclk = 80000;
|
||||
}
|
||||
} else if (adev->asic_type == CHIP_HAINAN) {
|
||||
if ((adev->pdev->revision == 0x81) ||
|
||||
(adev->pdev->revision == 0x83) ||
|
||||
@ -3506,7 +3496,6 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
|
||||
(adev->pdev->device == 0x6665) ||
|
||||
(adev->pdev->device == 0x6667)) {
|
||||
max_sclk = 75000;
|
||||
max_mclk = 80000;
|
||||
}
|
||||
}
|
||||
/* Apply dpm quirks */
|
||||
@ -7713,10 +7702,11 @@ static int si_dpm_init_microcode(struct amdgpu_device *adev)
|
||||
((adev->pdev->device == 0x6660) ||
|
||||
(adev->pdev->device == 0x6663) ||
|
||||
(adev->pdev->device == 0x6665) ||
|
||||
(adev->pdev->device == 0x6667))) ||
|
||||
((adev->pdev->revision == 0xc3) &&
|
||||
(adev->pdev->device == 0x6665)))
|
||||
(adev->pdev->device == 0x6667))))
|
||||
chip_name = "hainan_k";
|
||||
else if ((adev->pdev->revision == 0xc3) &&
|
||||
(adev->pdev->device == 0x6665))
|
||||
chip_name = "banks_k_2";
|
||||
else
|
||||
chip_name = "hainan";
|
||||
break;
|
||||
|
@ -40,13 +40,14 @@
|
||||
#include "smu/smu_7_0_1_sh_mask.h"
|
||||
|
||||
static void uvd_v4_2_mc_resume(struct amdgpu_device *adev);
|
||||
static void uvd_v4_2_init_cg(struct amdgpu_device *adev);
|
||||
static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev);
|
||||
static void uvd_v4_2_set_irq_funcs(struct amdgpu_device *adev);
|
||||
static int uvd_v4_2_start(struct amdgpu_device *adev);
|
||||
static void uvd_v4_2_stop(struct amdgpu_device *adev);
|
||||
static int uvd_v4_2_set_clockgating_state(void *handle,
|
||||
enum amd_clockgating_state state);
|
||||
static void uvd_v4_2_set_dcm(struct amdgpu_device *adev,
|
||||
bool sw_mode);
|
||||
/**
|
||||
* uvd_v4_2_ring_get_rptr - get read pointer
|
||||
*
|
||||
@ -140,7 +141,8 @@ static int uvd_v4_2_sw_fini(void *handle)
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
static void uvd_v4_2_enable_mgcg(struct amdgpu_device *adev,
|
||||
bool enable);
|
||||
/**
|
||||
* uvd_v4_2_hw_init - start and test UVD block
|
||||
*
|
||||
@ -155,8 +157,7 @@ static int uvd_v4_2_hw_init(void *handle)
|
||||
uint32_t tmp;
|
||||
int r;
|
||||
|
||||
uvd_v4_2_init_cg(adev);
|
||||
uvd_v4_2_set_clockgating_state(adev, AMD_CG_STATE_GATE);
|
||||
uvd_v4_2_enable_mgcg(adev, true);
|
||||
amdgpu_asic_set_uvd_clocks(adev, 10000, 10000);
|
||||
r = uvd_v4_2_start(adev);
|
||||
if (r)
|
||||
@ -266,11 +267,13 @@ static int uvd_v4_2_start(struct amdgpu_device *adev)
|
||||
struct amdgpu_ring *ring = &adev->uvd.ring;
|
||||
uint32_t rb_bufsz;
|
||||
int i, j, r;
|
||||
|
||||
/* disable byte swapping */
|
||||
u32 lmi_swap_cntl = 0;
|
||||
u32 mp_swap_cntl = 0;
|
||||
|
||||
WREG32(mmUVD_CGC_GATE, 0);
|
||||
uvd_v4_2_set_dcm(adev, true);
|
||||
|
||||
uvd_v4_2_mc_resume(adev);
|
||||
|
||||
/* disable interupt */
|
||||
@ -406,6 +409,8 @@ static void uvd_v4_2_stop(struct amdgpu_device *adev)
|
||||
|
||||
/* Unstall UMC and register bus */
|
||||
WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
|
||||
|
||||
uvd_v4_2_set_dcm(adev, false);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -619,19 +624,6 @@ static void uvd_v4_2_set_dcm(struct amdgpu_device *adev,
|
||||
WREG32_UVD_CTX(ixUVD_CGC_CTRL2, tmp2);
|
||||
}
|
||||
|
||||
static void uvd_v4_2_init_cg(struct amdgpu_device *adev)
|
||||
{
|
||||
bool hw_mode = true;
|
||||
|
||||
if (hw_mode) {
|
||||
uvd_v4_2_set_dcm(adev, false);
|
||||
} else {
|
||||
u32 tmp = RREG32(mmUVD_CGC_CTRL);
|
||||
tmp &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
|
||||
WREG32(mmUVD_CGC_CTRL, tmp);
|
||||
}
|
||||
}
|
||||
|
||||
static bool uvd_v4_2_is_idle(void *handle)
|
||||
{
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
@ -685,17 +677,6 @@ static int uvd_v4_2_process_interrupt(struct amdgpu_device *adev,
|
||||
static int uvd_v4_2_set_clockgating_state(void *handle,
|
||||
enum amd_clockgating_state state)
|
||||
{
|
||||
bool gate = false;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
|
||||
return 0;
|
||||
|
||||
if (state == AMD_CG_STATE_GATE)
|
||||
gate = true;
|
||||
|
||||
uvd_v4_2_enable_mgcg(adev, gate);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -711,9 +692,6 @@ static int uvd_v4_2_set_powergating_state(void *handle,
|
||||
*/
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD))
|
||||
return 0;
|
||||
|
||||
if (state == AMD_PG_STATE_GATE) {
|
||||
uvd_v4_2_stop(adev);
|
||||
return 0;
|
||||
|
@ -43,9 +43,13 @@
|
||||
|
||||
#define GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT 0x04
|
||||
#define GRBM_GFX_INDEX__VCE_INSTANCE_MASK 0x10
|
||||
#define GRBM_GFX_INDEX__VCE_ALL_PIPE 0x07
|
||||
|
||||
#define mmVCE_LMI_VCPU_CACHE_40BIT_BAR0 0x8616
|
||||
#define mmVCE_LMI_VCPU_CACHE_40BIT_BAR1 0x8617
|
||||
#define mmVCE_LMI_VCPU_CACHE_40BIT_BAR2 0x8618
|
||||
#define mmGRBM_GFX_INDEX_DEFAULT 0xE0000000
|
||||
|
||||
#define VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK 0x02
|
||||
|
||||
#define VCE_V3_0_FW_SIZE (384 * 1024)
|
||||
@ -54,6 +58,9 @@
|
||||
|
||||
#define FW_52_8_3 ((52 << 24) | (8 << 16) | (3 << 8))
|
||||
|
||||
#define GET_VCE_INSTANCE(i) ((i) << GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT \
|
||||
| GRBM_GFX_INDEX__VCE_ALL_PIPE)
|
||||
|
||||
static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx);
|
||||
static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev);
|
||||
static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev);
|
||||
@ -175,7 +182,7 @@ static void vce_v3_0_set_vce_sw_clock_gating(struct amdgpu_device *adev,
|
||||
WREG32(mmVCE_UENC_CLOCK_GATING_2, data);
|
||||
|
||||
data = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
|
||||
data &= ~0xffc00000;
|
||||
data &= ~0x3ff;
|
||||
WREG32(mmVCE_UENC_REG_CLOCK_GATING, data);
|
||||
|
||||
data = RREG32(mmVCE_UENC_DMA_DCLK_CTRL);
|
||||
@ -249,7 +256,7 @@ static int vce_v3_0_start(struct amdgpu_device *adev)
|
||||
if (adev->vce.harvest_config & (1 << idx))
|
||||
continue;
|
||||
|
||||
WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, idx);
|
||||
WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(idx));
|
||||
vce_v3_0_mc_resume(adev, idx);
|
||||
WREG32_FIELD(VCE_STATUS, JOB_BUSY, 1);
|
||||
|
||||
@ -273,7 +280,7 @@ static int vce_v3_0_start(struct amdgpu_device *adev)
|
||||
}
|
||||
}
|
||||
|
||||
WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0);
|
||||
WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT);
|
||||
mutex_unlock(&adev->grbm_idx_mutex);
|
||||
|
||||
return 0;
|
||||
@ -288,7 +295,7 @@ static int vce_v3_0_stop(struct amdgpu_device *adev)
|
||||
if (adev->vce.harvest_config & (1 << idx))
|
||||
continue;
|
||||
|
||||
WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, idx);
|
||||
WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(idx));
|
||||
|
||||
if (adev->asic_type >= CHIP_STONEY)
|
||||
WREG32_P(mmVCE_VCPU_CNTL, 0, ~0x200001);
|
||||
@ -306,7 +313,7 @@ static int vce_v3_0_stop(struct amdgpu_device *adev)
|
||||
vce_v3_0_set_vce_sw_clock_gating(adev, false);
|
||||
}
|
||||
|
||||
WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0);
|
||||
WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT);
|
||||
mutex_unlock(&adev->grbm_idx_mutex);
|
||||
|
||||
return 0;
|
||||
@ -586,17 +593,17 @@ static bool vce_v3_0_check_soft_reset(void *handle)
|
||||
* VCE team suggest use bit 3--bit 6 for busy status check
|
||||
*/
|
||||
mutex_lock(&adev->grbm_idx_mutex);
|
||||
WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0);
|
||||
WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0));
|
||||
if (RREG32(mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) {
|
||||
srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1);
|
||||
srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1);
|
||||
}
|
||||
WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0x10);
|
||||
WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1));
|
||||
if (RREG32(mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) {
|
||||
srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1);
|
||||
srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1);
|
||||
}
|
||||
WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0);
|
||||
WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0));
|
||||
mutex_unlock(&adev->grbm_idx_mutex);
|
||||
|
||||
if (srbm_soft_reset) {
|
||||
@ -734,7 +741,7 @@ static int vce_v3_0_set_clockgating_state(void *handle,
|
||||
if (adev->vce.harvest_config & (1 << i))
|
||||
continue;
|
||||
|
||||
WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, i);
|
||||
WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(i));
|
||||
|
||||
if (enable) {
|
||||
/* initialize VCE_CLOCK_GATING_A: Clock ON/OFF delay */
|
||||
@ -753,7 +760,7 @@ static int vce_v3_0_set_clockgating_state(void *handle,
|
||||
vce_v3_0_set_vce_sw_clock_gating(adev, enable);
|
||||
}
|
||||
|
||||
WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0);
|
||||
WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT);
|
||||
mutex_unlock(&adev->grbm_idx_mutex);
|
||||
|
||||
return 0;
|
||||
|
@ -200,7 +200,7 @@ int cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
|
||||
cgs_set_clockgating_state(
|
||||
hwmgr->device,
|
||||
AMD_IP_BLOCK_TYPE_VCE,
|
||||
AMD_CG_STATE_UNGATE);
|
||||
AMD_CG_STATE_GATE);
|
||||
cgs_set_powergating_state(
|
||||
hwmgr->device,
|
||||
AMD_IP_BLOCK_TYPE_VCE,
|
||||
@ -218,7 +218,7 @@ int cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
|
||||
cgs_set_clockgating_state(
|
||||
hwmgr->device,
|
||||
AMD_IP_BLOCK_TYPE_VCE,
|
||||
AMD_PG_STATE_GATE);
|
||||
AMD_PG_STATE_UNGATE);
|
||||
cz_dpm_update_vce_dpm(hwmgr);
|
||||
cz_enable_disable_vce_dpm(hwmgr, true);
|
||||
return 0;
|
||||
|
@ -1402,14 +1402,22 @@ int cz_dpm_update_vce_dpm(struct pp_hwmgr *hwmgr)
|
||||
cz_hwmgr->vce_dpm.hard_min_clk,
|
||||
PPSMC_MSG_SetEclkHardMin));
|
||||
} else {
|
||||
/*EPR# 419220 -HW limitation to to */
|
||||
cz_hwmgr->vce_dpm.hard_min_clk = hwmgr->vce_arbiter.ecclk;
|
||||
smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
|
||||
PPSMC_MSG_SetEclkHardMin,
|
||||
cz_get_eclk_level(hwmgr,
|
||||
cz_hwmgr->vce_dpm.hard_min_clk,
|
||||
PPSMC_MSG_SetEclkHardMin));
|
||||
|
||||
/*Program HardMin based on the vce_arbiter.ecclk */
|
||||
if (hwmgr->vce_arbiter.ecclk == 0) {
|
||||
smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
|
||||
PPSMC_MSG_SetEclkHardMin, 0);
|
||||
/* disable ECLK DPM 0. Otherwise VCE could hang if
|
||||
* switching SCLK from DPM 0 to 6/7 */
|
||||
smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
|
||||
PPSMC_MSG_SetEclkSoftMin, 1);
|
||||
} else {
|
||||
cz_hwmgr->vce_dpm.hard_min_clk = hwmgr->vce_arbiter.ecclk;
|
||||
smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
|
||||
PPSMC_MSG_SetEclkHardMin,
|
||||
cz_get_eclk_level(hwmgr,
|
||||
cz_hwmgr->vce_dpm.hard_min_clk,
|
||||
PPSMC_MSG_SetEclkHardMin));
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -113,6 +113,7 @@ struct ast_private {
|
||||
struct ttm_bo_kmap_obj cache_kmap;
|
||||
int next_cursor;
|
||||
bool support_wide_screen;
|
||||
bool DisableP2A;
|
||||
|
||||
enum ast_tx_chip tx_chip_type;
|
||||
u8 dp501_maxclk;
|
||||
|
@ -124,6 +124,12 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post)
|
||||
} else
|
||||
*need_post = false;
|
||||
|
||||
/* Check P2A Access */
|
||||
ast->DisableP2A = true;
|
||||
data = ast_read32(ast, 0xf004);
|
||||
if (data != 0xFFFFFFFF)
|
||||
ast->DisableP2A = false;
|
||||
|
||||
/* Check if we support wide screen */
|
||||
switch (ast->chip) {
|
||||
case AST1180:
|
||||
@ -140,15 +146,17 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post)
|
||||
ast->support_wide_screen = true;
|
||||
else {
|
||||
ast->support_wide_screen = false;
|
||||
/* Read SCU7c (silicon revision register) */
|
||||
ast_write32(ast, 0xf004, 0x1e6e0000);
|
||||
ast_write32(ast, 0xf000, 0x1);
|
||||
data = ast_read32(ast, 0x1207c);
|
||||
data &= 0x300;
|
||||
if (ast->chip == AST2300 && data == 0x0) /* ast1300 */
|
||||
ast->support_wide_screen = true;
|
||||
if (ast->chip == AST2400 && data == 0x100) /* ast1400 */
|
||||
ast->support_wide_screen = true;
|
||||
if (ast->DisableP2A == false) {
|
||||
/* Read SCU7c (silicon revision register) */
|
||||
ast_write32(ast, 0xf004, 0x1e6e0000);
|
||||
ast_write32(ast, 0xf000, 0x1);
|
||||
data = ast_read32(ast, 0x1207c);
|
||||
data &= 0x300;
|
||||
if (ast->chip == AST2300 && data == 0x0) /* ast1300 */
|
||||
ast->support_wide_screen = true;
|
||||
if (ast->chip == AST2400 && data == 0x100) /* ast1400 */
|
||||
ast->support_wide_screen = true;
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
@ -216,80 +224,81 @@ static int ast_get_dram_info(struct drm_device *dev)
|
||||
uint32_t data, data2;
|
||||
uint32_t denum, num, div, ref_pll;
|
||||
|
||||
ast_write32(ast, 0xf004, 0x1e6e0000);
|
||||
ast_write32(ast, 0xf000, 0x1);
|
||||
|
||||
|
||||
ast_write32(ast, 0x10000, 0xfc600309);
|
||||
|
||||
do {
|
||||
if (pci_channel_offline(dev->pdev))
|
||||
return -EIO;
|
||||
} while (ast_read32(ast, 0x10000) != 0x01);
|
||||
data = ast_read32(ast, 0x10004);
|
||||
|
||||
if (data & 0x40)
|
||||
if (ast->DisableP2A)
|
||||
{
|
||||
ast->dram_bus_width = 16;
|
||||
ast->dram_type = AST_DRAM_1Gx16;
|
||||
ast->mclk = 396;
|
||||
}
|
||||
else
|
||||
ast->dram_bus_width = 32;
|
||||
{
|
||||
ast_write32(ast, 0xf004, 0x1e6e0000);
|
||||
ast_write32(ast, 0xf000, 0x1);
|
||||
data = ast_read32(ast, 0x10004);
|
||||
|
||||
if (ast->chip == AST2300 || ast->chip == AST2400) {
|
||||
switch (data & 0x03) {
|
||||
case 0:
|
||||
ast->dram_type = AST_DRAM_512Mx16;
|
||||
break;
|
||||
default:
|
||||
case 1:
|
||||
ast->dram_type = AST_DRAM_1Gx16;
|
||||
if (data & 0x40)
|
||||
ast->dram_bus_width = 16;
|
||||
else
|
||||
ast->dram_bus_width = 32;
|
||||
|
||||
if (ast->chip == AST2300 || ast->chip == AST2400) {
|
||||
switch (data & 0x03) {
|
||||
case 0:
|
||||
ast->dram_type = AST_DRAM_512Mx16;
|
||||
break;
|
||||
default:
|
||||
case 1:
|
||||
ast->dram_type = AST_DRAM_1Gx16;
|
||||
break;
|
||||
case 2:
|
||||
ast->dram_type = AST_DRAM_2Gx16;
|
||||
break;
|
||||
case 3:
|
||||
ast->dram_type = AST_DRAM_4Gx16;
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
switch (data & 0x0c) {
|
||||
case 0:
|
||||
case 4:
|
||||
ast->dram_type = AST_DRAM_512Mx16;
|
||||
break;
|
||||
case 8:
|
||||
if (data & 0x40)
|
||||
ast->dram_type = AST_DRAM_1Gx16;
|
||||
else
|
||||
ast->dram_type = AST_DRAM_512Mx32;
|
||||
break;
|
||||
case 0xc:
|
||||
ast->dram_type = AST_DRAM_1Gx32;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
data = ast_read32(ast, 0x10120);
|
||||
data2 = ast_read32(ast, 0x10170);
|
||||
if (data2 & 0x2000)
|
||||
ref_pll = 14318;
|
||||
else
|
||||
ref_pll = 12000;
|
||||
|
||||
denum = data & 0x1f;
|
||||
num = (data & 0x3fe0) >> 5;
|
||||
data = (data & 0xc000) >> 14;
|
||||
switch (data) {
|
||||
case 3:
|
||||
div = 0x4;
|
||||
break;
|
||||
case 2:
|
||||
ast->dram_type = AST_DRAM_2Gx16;
|
||||
case 1:
|
||||
div = 0x2;
|
||||
break;
|
||||
case 3:
|
||||
ast->dram_type = AST_DRAM_4Gx16;
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
switch (data & 0x0c) {
|
||||
case 0:
|
||||
case 4:
|
||||
ast->dram_type = AST_DRAM_512Mx16;
|
||||
break;
|
||||
case 8:
|
||||
if (data & 0x40)
|
||||
ast->dram_type = AST_DRAM_1Gx16;
|
||||
else
|
||||
ast->dram_type = AST_DRAM_512Mx32;
|
||||
break;
|
||||
case 0xc:
|
||||
ast->dram_type = AST_DRAM_1Gx32;
|
||||
default:
|
||||
div = 0x1;
|
||||
break;
|
||||
}
|
||||
ast->mclk = ref_pll * (num + 2) / (denum + 2) * (div * 1000);
|
||||
}
|
||||
|
||||
data = ast_read32(ast, 0x10120);
|
||||
data2 = ast_read32(ast, 0x10170);
|
||||
if (data2 & 0x2000)
|
||||
ref_pll = 14318;
|
||||
else
|
||||
ref_pll = 12000;
|
||||
|
||||
denum = data & 0x1f;
|
||||
num = (data & 0x3fe0) >> 5;
|
||||
data = (data & 0xc000) >> 14;
|
||||
switch (data) {
|
||||
case 3:
|
||||
div = 0x4;
|
||||
break;
|
||||
case 2:
|
||||
case 1:
|
||||
div = 0x2;
|
||||
break;
|
||||
default:
|
||||
div = 0x1;
|
||||
break;
|
||||
}
|
||||
ast->mclk = ref_pll * (num + 2) / (denum + 2) * (div * 1000);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -379,12 +379,20 @@ void ast_post_gpu(struct drm_device *dev)
|
||||
ast_open_key(ast);
|
||||
ast_set_def_ext_reg(dev);
|
||||
|
||||
if (ast->chip == AST2300 || ast->chip == AST2400)
|
||||
ast_init_dram_2300(dev);
|
||||
else
|
||||
ast_init_dram_reg(dev);
|
||||
if (ast->DisableP2A == false)
|
||||
{
|
||||
if (ast->chip == AST2300 || ast->chip == AST2400)
|
||||
ast_init_dram_2300(dev);
|
||||
else
|
||||
ast_init_dram_reg(dev);
|
||||
|
||||
ast_init_3rdtx(dev);
|
||||
ast_init_3rdtx(dev);
|
||||
}
|
||||
else
|
||||
{
|
||||
if (ast->tx_chip_type != AST_TX_NONE)
|
||||
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa3, 0xcf, 0x80); /* Enable DVO */
|
||||
}
|
||||
}
|
||||
|
||||
/* AST 2300 DRAM settings */
|
||||
|
@ -1382,6 +1382,7 @@ int analogix_dp_bind(struct device *dev, struct drm_device *drm_dev,
|
||||
|
||||
pm_runtime_enable(dev);
|
||||
|
||||
pm_runtime_get_sync(dev);
|
||||
phy_power_on(dp->phy);
|
||||
|
||||
analogix_dp_init_dp(dp);
|
||||
@ -1414,9 +1415,15 @@ int analogix_dp_bind(struct device *dev, struct drm_device *drm_dev,
|
||||
goto err_disable_pm_runtime;
|
||||
}
|
||||
|
||||
phy_power_off(dp->phy);
|
||||
pm_runtime_put(dev);
|
||||
|
||||
return 0;
|
||||
|
||||
err_disable_pm_runtime:
|
||||
|
||||
phy_power_off(dp->phy);
|
||||
pm_runtime_put(dev);
|
||||
pm_runtime_disable(dev);
|
||||
|
||||
return ret;
|
||||
|
@ -7,3 +7,12 @@ config DRM_CIRRUS_QEMU
|
||||
This is a KMS driver for emulated cirrus device in qemu.
|
||||
It is *NOT* intended for real cirrus devices. This requires
|
||||
the modesetting userspace X.org driver.
|
||||
|
||||
Cirrus is obsolete, the hardware was designed in the 90ies
|
||||
and can't keep up with todays needs. More background:
|
||||
https://www.kraxel.org/blog/2014/10/qemu-using-cirrus-considered-harmful/
|
||||
|
||||
Better alternatives are:
|
||||
- stdvga (DRM_BOCHS, qemu -vga std, default in qemu 2.2+)
|
||||
- qxl (DRM_QXL, qemu -vga qxl, works best with spice)
|
||||
- virtio (DRM_VIRTIO_GPU), qemu -vga virtio)
|
||||
|
@ -291,15 +291,15 @@ drm_atomic_get_crtc_state(struct drm_atomic_state *state,
|
||||
EXPORT_SYMBOL(drm_atomic_get_crtc_state);
|
||||
|
||||
static void set_out_fence_for_crtc(struct drm_atomic_state *state,
|
||||
struct drm_crtc *crtc, s64 __user *fence_ptr)
|
||||
struct drm_crtc *crtc, s32 __user *fence_ptr)
|
||||
{
|
||||
state->crtcs[drm_crtc_index(crtc)].out_fence_ptr = fence_ptr;
|
||||
}
|
||||
|
||||
static s64 __user *get_out_fence_for_crtc(struct drm_atomic_state *state,
|
||||
static s32 __user *get_out_fence_for_crtc(struct drm_atomic_state *state,
|
||||
struct drm_crtc *crtc)
|
||||
{
|
||||
s64 __user *fence_ptr;
|
||||
s32 __user *fence_ptr;
|
||||
|
||||
fence_ptr = state->crtcs[drm_crtc_index(crtc)].out_fence_ptr;
|
||||
state->crtcs[drm_crtc_index(crtc)].out_fence_ptr = NULL;
|
||||
@ -512,7 +512,7 @@ int drm_atomic_crtc_set_property(struct drm_crtc *crtc,
|
||||
state->color_mgmt_changed |= replaced;
|
||||
return ret;
|
||||
} else if (property == config->prop_out_fence_ptr) {
|
||||
s64 __user *fence_ptr = u64_to_user_ptr(val);
|
||||
s32 __user *fence_ptr = u64_to_user_ptr(val);
|
||||
|
||||
if (!fence_ptr)
|
||||
return 0;
|
||||
@ -1915,7 +1915,7 @@ EXPORT_SYMBOL(drm_atomic_clean_old_fb);
|
||||
*/
|
||||
|
||||
struct drm_out_fence_state {
|
||||
s64 __user *out_fence_ptr;
|
||||
s32 __user *out_fence_ptr;
|
||||
struct sync_file *sync_file;
|
||||
int fd;
|
||||
};
|
||||
@ -1952,7 +1952,7 @@ static int prepare_crtc_signaling(struct drm_device *dev,
|
||||
return 0;
|
||||
|
||||
for_each_crtc_in_state(state, crtc, crtc_state, i) {
|
||||
u64 __user *fence_ptr;
|
||||
s32 __user *fence_ptr;
|
||||
|
||||
fence_ptr = get_out_fence_for_crtc(crtc_state->state, crtc);
|
||||
|
||||
|
@ -1460,6 +1460,13 @@ drm_mode_create_from_cmdline_mode(struct drm_device *dev,
|
||||
return NULL;
|
||||
|
||||
mode->type |= DRM_MODE_TYPE_USERDEF;
|
||||
/* fix up 1368x768: GFT/CVT can't express 1366 width due to alignment */
|
||||
if (cmd->xres == 1366 && mode->hdisplay == 1368) {
|
||||
mode->hdisplay = 1366;
|
||||
mode->hsync_start--;
|
||||
mode->hsync_end--;
|
||||
drm_mode_set_name(mode);
|
||||
}
|
||||
drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
|
||||
return mode;
|
||||
}
|
||||
|
@ -143,8 +143,18 @@ void drm_kms_helper_poll_enable_locked(struct drm_device *dev)
|
||||
}
|
||||
|
||||
if (dev->mode_config.delayed_event) {
|
||||
/*
|
||||
* FIXME:
|
||||
*
|
||||
* Use short (1s) delay to handle the initial delayed event.
|
||||
* This delay should not be needed, but Optimus/nouveau will
|
||||
* fail in a mysterious way if the delayed event is handled as
|
||||
* soon as possible like it is done in
|
||||
* drm_helper_probe_single_connector_modes() in case the poll
|
||||
* was enabled before.
|
||||
*/
|
||||
poll = true;
|
||||
delay = 0;
|
||||
delay = HZ;
|
||||
}
|
||||
|
||||
if (poll)
|
||||
|
@ -116,9 +116,14 @@ static int etnaviv_iommu_find_iova(struct etnaviv_iommu *mmu,
|
||||
struct list_head list;
|
||||
bool found;
|
||||
|
||||
/*
|
||||
* XXX: The DRM_MM_SEARCH_BELOW is really a hack to trick
|
||||
* drm_mm into giving out a low IOVA after address space
|
||||
* rollover. This needs a proper fix.
|
||||
*/
|
||||
ret = drm_mm_insert_node_in_range(&mmu->mm, node,
|
||||
size, 0, mmu->last_iova, ~0UL,
|
||||
DRM_MM_SEARCH_DEFAULT);
|
||||
mmu->last_iova ? DRM_MM_SEARCH_DEFAULT : DRM_MM_SEARCH_BELOW);
|
||||
|
||||
if (ret != -ENOSPC)
|
||||
break;
|
||||
|
@ -46,7 +46,8 @@ enum decon_flag_bits {
|
||||
BIT_CLKS_ENABLED,
|
||||
BIT_IRQS_ENABLED,
|
||||
BIT_WIN_UPDATED,
|
||||
BIT_SUSPENDED
|
||||
BIT_SUSPENDED,
|
||||
BIT_REQUEST_UPDATE
|
||||
};
|
||||
|
||||
struct decon_context {
|
||||
@ -141,12 +142,6 @@ static void decon_commit(struct exynos_drm_crtc *crtc)
|
||||
m->crtc_vsync_end = m->crtc_vsync_start + 1;
|
||||
}
|
||||
|
||||
decon_set_bits(ctx, DECON_VIDCON0, VIDCON0_ENVID, 0);
|
||||
|
||||
/* enable clock gate */
|
||||
val = CMU_CLKGAGE_MODE_SFR_F | CMU_CLKGAGE_MODE_MEM_F;
|
||||
writel(val, ctx->addr + DECON_CMU);
|
||||
|
||||
if (ctx->out_type & (IFTYPE_I80 | I80_HW_TRG))
|
||||
decon_setup_trigger(ctx);
|
||||
|
||||
@ -315,6 +310,7 @@ static void decon_update_plane(struct exynos_drm_crtc *crtc,
|
||||
|
||||
/* window enable */
|
||||
decon_set_bits(ctx, DECON_WINCONx(win), WINCONx_ENWIN_F, ~0);
|
||||
set_bit(BIT_REQUEST_UPDATE, &ctx->flags);
|
||||
}
|
||||
|
||||
static void decon_disable_plane(struct exynos_drm_crtc *crtc,
|
||||
@ -327,6 +323,7 @@ static void decon_disable_plane(struct exynos_drm_crtc *crtc,
|
||||
return;
|
||||
|
||||
decon_set_bits(ctx, DECON_WINCONx(win), WINCONx_ENWIN_F, 0);
|
||||
set_bit(BIT_REQUEST_UPDATE, &ctx->flags);
|
||||
}
|
||||
|
||||
static void decon_atomic_flush(struct exynos_drm_crtc *crtc)
|
||||
@ -340,8 +337,8 @@ static void decon_atomic_flush(struct exynos_drm_crtc *crtc)
|
||||
for (i = ctx->first_win; i < WINDOWS_NR; i++)
|
||||
decon_shadow_protect_win(ctx, i, false);
|
||||
|
||||
/* standalone update */
|
||||
decon_set_bits(ctx, DECON_UPDATE, STANDALONE_UPDATE_F, ~0);
|
||||
if (test_and_clear_bit(BIT_REQUEST_UPDATE, &ctx->flags))
|
||||
decon_set_bits(ctx, DECON_UPDATE, STANDALONE_UPDATE_F, ~0);
|
||||
|
||||
if (ctx->out_type & IFTYPE_I80)
|
||||
set_bit(BIT_WIN_UPDATED, &ctx->flags);
|
||||
|
@ -37,13 +37,6 @@
|
||||
#include "i915_drv.h"
|
||||
#include "gvt.h"
|
||||
|
||||
#define MB_TO_BYTES(mb) ((mb) << 20ULL)
|
||||
#define BYTES_TO_MB(b) ((b) >> 20ULL)
|
||||
|
||||
#define HOST_LOW_GM_SIZE MB_TO_BYTES(128)
|
||||
#define HOST_HIGH_GM_SIZE MB_TO_BYTES(384)
|
||||
#define HOST_FENCE 4
|
||||
|
||||
static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm)
|
||||
{
|
||||
struct intel_gvt *gvt = vgpu->gvt;
|
||||
@ -165,6 +158,14 @@ void intel_vgpu_write_fence(struct intel_vgpu *vgpu,
|
||||
POSTING_READ(fence_reg_lo);
|
||||
}
|
||||
|
||||
static void _clear_vgpu_fence(struct intel_vgpu *vgpu)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < vgpu_fence_sz(vgpu); i++)
|
||||
intel_vgpu_write_fence(vgpu, i, 0);
|
||||
}
|
||||
|
||||
static void free_vgpu_fence(struct intel_vgpu *vgpu)
|
||||
{
|
||||
struct intel_gvt *gvt = vgpu->gvt;
|
||||
@ -178,9 +179,9 @@ static void free_vgpu_fence(struct intel_vgpu *vgpu)
|
||||
intel_runtime_pm_get(dev_priv);
|
||||
|
||||
mutex_lock(&dev_priv->drm.struct_mutex);
|
||||
_clear_vgpu_fence(vgpu);
|
||||
for (i = 0; i < vgpu_fence_sz(vgpu); i++) {
|
||||
reg = vgpu->fence.regs[i];
|
||||
intel_vgpu_write_fence(vgpu, i, 0);
|
||||
list_add_tail(®->link,
|
||||
&dev_priv->mm.fence_list);
|
||||
}
|
||||
@ -208,13 +209,14 @@ static int alloc_vgpu_fence(struct intel_vgpu *vgpu)
|
||||
continue;
|
||||
list_del(pos);
|
||||
vgpu->fence.regs[i] = reg;
|
||||
intel_vgpu_write_fence(vgpu, i, 0);
|
||||
if (++i == vgpu_fence_sz(vgpu))
|
||||
break;
|
||||
}
|
||||
if (i != vgpu_fence_sz(vgpu))
|
||||
goto out_free_fence;
|
||||
|
||||
_clear_vgpu_fence(vgpu);
|
||||
|
||||
mutex_unlock(&dev_priv->drm.struct_mutex);
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
return 0;
|
||||
@ -313,6 +315,22 @@ void intel_vgpu_free_resource(struct intel_vgpu *vgpu)
|
||||
free_resource(vgpu);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_vgpu_reset_resource - reset resource state owned by a vGPU
|
||||
* @vgpu: a vGPU
|
||||
*
|
||||
* This function is used to reset resource state owned by a vGPU.
|
||||
*
|
||||
*/
|
||||
void intel_vgpu_reset_resource(struct intel_vgpu *vgpu)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
|
||||
|
||||
intel_runtime_pm_get(dev_priv);
|
||||
_clear_vgpu_fence(vgpu);
|
||||
intel_runtime_pm_put(dev_priv);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_alloc_vgpu_resource - allocate HW resource for a vGPU
|
||||
* @vgpu: vGPU
|
||||
|
@ -282,3 +282,77 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_vgpu_init_cfg_space - init vGPU configuration space when create vGPU
|
||||
*
|
||||
* @vgpu: a vGPU
|
||||
* @primary: is the vGPU presented as primary
|
||||
*
|
||||
*/
|
||||
void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu,
|
||||
bool primary)
|
||||
{
|
||||
struct intel_gvt *gvt = vgpu->gvt;
|
||||
const struct intel_gvt_device_info *info = &gvt->device_info;
|
||||
u16 *gmch_ctl;
|
||||
int i;
|
||||
|
||||
memcpy(vgpu_cfg_space(vgpu), gvt->firmware.cfg_space,
|
||||
info->cfg_space_size);
|
||||
|
||||
if (!primary) {
|
||||
vgpu_cfg_space(vgpu)[PCI_CLASS_DEVICE] =
|
||||
INTEL_GVT_PCI_CLASS_VGA_OTHER;
|
||||
vgpu_cfg_space(vgpu)[PCI_CLASS_PROG] =
|
||||
INTEL_GVT_PCI_CLASS_VGA_OTHER;
|
||||
}
|
||||
|
||||
/* Show guest that there isn't any stolen memory.*/
|
||||
gmch_ctl = (u16 *)(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_GMCH_CONTROL);
|
||||
*gmch_ctl &= ~(BDW_GMCH_GMS_MASK << BDW_GMCH_GMS_SHIFT);
|
||||
|
||||
intel_vgpu_write_pci_bar(vgpu, PCI_BASE_ADDRESS_2,
|
||||
gvt_aperture_pa_base(gvt), true);
|
||||
|
||||
vgpu_cfg_space(vgpu)[PCI_COMMAND] &= ~(PCI_COMMAND_IO
|
||||
| PCI_COMMAND_MEMORY
|
||||
| PCI_COMMAND_MASTER);
|
||||
/*
|
||||
* Clear the bar upper 32bit and let guest to assign the new value
|
||||
*/
|
||||
memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_1, 0, 4);
|
||||
memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_3, 0, 4);
|
||||
memset(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_OPREGION, 0, 4);
|
||||
|
||||
for (i = 0; i < INTEL_GVT_MAX_BAR_NUM; i++) {
|
||||
vgpu->cfg_space.bar[i].size = pci_resource_len(
|
||||
gvt->dev_priv->drm.pdev, i * 2);
|
||||
vgpu->cfg_space.bar[i].tracked = false;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_vgpu_reset_cfg_space - reset vGPU configuration space
|
||||
*
|
||||
* @vgpu: a vGPU
|
||||
*
|
||||
*/
|
||||
void intel_vgpu_reset_cfg_space(struct intel_vgpu *vgpu)
|
||||
{
|
||||
u8 cmd = vgpu_cfg_space(vgpu)[PCI_COMMAND];
|
||||
bool primary = vgpu_cfg_space(vgpu)[PCI_CLASS_DEVICE] !=
|
||||
INTEL_GVT_PCI_CLASS_VGA_OTHER;
|
||||
|
||||
if (cmd & PCI_COMMAND_MEMORY) {
|
||||
trap_gttmmio(vgpu, false);
|
||||
map_aperture(vgpu, false);
|
||||
}
|
||||
|
||||
/**
|
||||
* Currently we only do such reset when vGPU is not
|
||||
* owned by any VM, so we simply restore entire cfg
|
||||
* space to default value.
|
||||
*/
|
||||
intel_vgpu_init_cfg_space(vgpu, primary);
|
||||
}
|
||||
|
@ -481,7 +481,6 @@ struct parser_exec_state {
|
||||
(s->vgpu->gvt->device_info.gmadr_bytes_in_cmd >> 2)
|
||||
|
||||
static unsigned long bypass_scan_mask = 0;
|
||||
static bool bypass_batch_buffer_scan = true;
|
||||
|
||||
/* ring ALL, type = 0 */
|
||||
static struct sub_op_bits sub_op_mi[] = {
|
||||
@ -1525,9 +1524,6 @@ static int batch_buffer_needs_scan(struct parser_exec_state *s)
|
||||
{
|
||||
struct intel_gvt *gvt = s->vgpu->gvt;
|
||||
|
||||
if (bypass_batch_buffer_scan)
|
||||
return 0;
|
||||
|
||||
if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)) {
|
||||
/* BDW decides privilege based on address space */
|
||||
if (cmd_val(s, 0) & (1 << 8))
|
||||
|
@ -364,58 +364,30 @@ static void free_workload(struct intel_vgpu_workload *workload)
|
||||
#define get_desc_from_elsp_dwords(ed, i) \
|
||||
((struct execlist_ctx_descriptor_format *)&((ed)->data[i * 2]))
|
||||
|
||||
|
||||
#define BATCH_BUFFER_ADDR_MASK ((1UL << 32) - (1U << 2))
|
||||
#define BATCH_BUFFER_ADDR_HIGH_MASK ((1UL << 16) - (1U))
|
||||
static int set_gma_to_bb_cmd(struct intel_shadow_bb_entry *entry_obj,
|
||||
unsigned long add, int gmadr_bytes)
|
||||
{
|
||||
if (WARN_ON(gmadr_bytes != 4 && gmadr_bytes != 8))
|
||||
return -1;
|
||||
|
||||
*((u32 *)(entry_obj->bb_start_cmd_va + (1 << 2))) = add &
|
||||
BATCH_BUFFER_ADDR_MASK;
|
||||
if (gmadr_bytes == 8) {
|
||||
*((u32 *)(entry_obj->bb_start_cmd_va + (2 << 2))) =
|
||||
add & BATCH_BUFFER_ADDR_HIGH_MASK;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
|
||||
{
|
||||
int gmadr_bytes = workload->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
|
||||
const int gmadr_bytes = workload->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
|
||||
struct intel_shadow_bb_entry *entry_obj;
|
||||
|
||||
/* pin the gem object to ggtt */
|
||||
if (!list_empty(&workload->shadow_bb)) {
|
||||
struct intel_shadow_bb_entry *entry_obj =
|
||||
list_first_entry(&workload->shadow_bb,
|
||||
struct intel_shadow_bb_entry,
|
||||
list);
|
||||
struct intel_shadow_bb_entry *temp;
|
||||
list_for_each_entry(entry_obj, &workload->shadow_bb, list) {
|
||||
struct i915_vma *vma;
|
||||
|
||||
list_for_each_entry_safe(entry_obj, temp, &workload->shadow_bb,
|
||||
list) {
|
||||
struct i915_vma *vma;
|
||||
|
||||
vma = i915_gem_object_ggtt_pin(entry_obj->obj, NULL, 0,
|
||||
4, 0);
|
||||
if (IS_ERR(vma)) {
|
||||
gvt_err("Cannot pin\n");
|
||||
return;
|
||||
}
|
||||
|
||||
/* FIXME: we are not tracking our pinned VMA leaving it
|
||||
* up to the core to fix up the stray pin_count upon
|
||||
* free.
|
||||
*/
|
||||
|
||||
/* update the relocate gma with shadow batch buffer*/
|
||||
set_gma_to_bb_cmd(entry_obj,
|
||||
i915_ggtt_offset(vma),
|
||||
gmadr_bytes);
|
||||
vma = i915_gem_object_ggtt_pin(entry_obj->obj, NULL, 0, 4, 0);
|
||||
if (IS_ERR(vma)) {
|
||||
gvt_err("Cannot pin\n");
|
||||
return;
|
||||
}
|
||||
|
||||
/* FIXME: we are not tracking our pinned VMA leaving it
|
||||
* up to the core to fix up the stray pin_count upon
|
||||
* free.
|
||||
*/
|
||||
|
||||
/* update the relocate gma with shadow batch buffer*/
|
||||
entry_obj->bb_start_cmd_va[1] = i915_ggtt_offset(vma);
|
||||
if (gmadr_bytes == 8)
|
||||
entry_obj->bb_start_cmd_va[2] = 0;
|
||||
}
|
||||
}
|
||||
|
||||
@ -826,7 +798,7 @@ int intel_vgpu_init_execlist(struct intel_vgpu *vgpu)
|
||||
INIT_LIST_HEAD(&vgpu->workload_q_head[i]);
|
||||
}
|
||||
|
||||
vgpu->workloads = kmem_cache_create("gvt-g vgpu workload",
|
||||
vgpu->workloads = kmem_cache_create("gvt-g_vgpu_workload",
|
||||
sizeof(struct intel_vgpu_workload), 0,
|
||||
SLAB_HWCACHE_ALIGN,
|
||||
NULL);
|
||||
|
@ -240,15 +240,8 @@ static inline int get_pse_type(int type)
|
||||
static u64 read_pte64(struct drm_i915_private *dev_priv, unsigned long index)
|
||||
{
|
||||
void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index;
|
||||
u64 pte;
|
||||
|
||||
#ifdef readq
|
||||
pte = readq(addr);
|
||||
#else
|
||||
pte = ioread32(addr);
|
||||
pte |= (u64)ioread32(addr + 4) << 32;
|
||||
#endif
|
||||
return pte;
|
||||
return readq(addr);
|
||||
}
|
||||
|
||||
static void write_pte64(struct drm_i915_private *dev_priv,
|
||||
@ -256,12 +249,8 @@ static void write_pte64(struct drm_i915_private *dev_priv,
|
||||
{
|
||||
void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index;
|
||||
|
||||
#ifdef writeq
|
||||
writeq(pte, addr);
|
||||
#else
|
||||
iowrite32((u32)pte, addr);
|
||||
iowrite32(pte >> 32, addr + 4);
|
||||
#endif
|
||||
|
||||
I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
|
||||
POSTING_READ(GFX_FLSH_CNTL_GEN6);
|
||||
}
|
||||
@ -1380,8 +1369,7 @@ static int gen8_mm_alloc_page_table(struct intel_vgpu_mm *mm)
|
||||
info->gtt_entry_size;
|
||||
mem = kzalloc(mm->has_shadow_page_table ?
|
||||
mm->page_table_entry_size * 2
|
||||
: mm->page_table_entry_size,
|
||||
GFP_ATOMIC);
|
||||
: mm->page_table_entry_size, GFP_KERNEL);
|
||||
if (!mem)
|
||||
return -ENOMEM;
|
||||
mm->virtual_page_table = mem;
|
||||
@ -1532,7 +1520,7 @@ struct intel_vgpu_mm *intel_vgpu_create_mm(struct intel_vgpu *vgpu,
|
||||
struct intel_vgpu_mm *mm;
|
||||
int ret;
|
||||
|
||||
mm = kzalloc(sizeof(*mm), GFP_ATOMIC);
|
||||
mm = kzalloc(sizeof(*mm), GFP_KERNEL);
|
||||
if (!mm) {
|
||||
ret = -ENOMEM;
|
||||
goto fail;
|
||||
@ -1886,30 +1874,27 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu,
|
||||
struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
|
||||
int page_entry_num = GTT_PAGE_SIZE >>
|
||||
vgpu->gvt->device_info.gtt_entry_size_shift;
|
||||
struct page *scratch_pt;
|
||||
void *scratch_pt;
|
||||
unsigned long mfn;
|
||||
int i;
|
||||
void *p;
|
||||
|
||||
if (WARN_ON(type < GTT_TYPE_PPGTT_PTE_PT || type >= GTT_TYPE_MAX))
|
||||
return -EINVAL;
|
||||
|
||||
scratch_pt = alloc_page(GFP_KERNEL | GFP_ATOMIC | __GFP_ZERO);
|
||||
scratch_pt = (void *)get_zeroed_page(GFP_KERNEL);
|
||||
if (!scratch_pt) {
|
||||
gvt_err("fail to allocate scratch page\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
p = kmap_atomic(scratch_pt);
|
||||
mfn = intel_gvt_hypervisor_virt_to_mfn(p);
|
||||
mfn = intel_gvt_hypervisor_virt_to_mfn(scratch_pt);
|
||||
if (mfn == INTEL_GVT_INVALID_ADDR) {
|
||||
gvt_err("fail to translate vaddr:0x%llx\n", (u64)p);
|
||||
kunmap_atomic(p);
|
||||
__free_page(scratch_pt);
|
||||
gvt_err("fail to translate vaddr:0x%lx\n", (unsigned long)scratch_pt);
|
||||
free_page((unsigned long)scratch_pt);
|
||||
return -EFAULT;
|
||||
}
|
||||
gtt->scratch_pt[type].page_mfn = mfn;
|
||||
gtt->scratch_pt[type].page = scratch_pt;
|
||||
gtt->scratch_pt[type].page = virt_to_page(scratch_pt);
|
||||
gvt_dbg_mm("vgpu%d create scratch_pt: type %d mfn=0x%lx\n",
|
||||
vgpu->id, type, mfn);
|
||||
|
||||
@ -1918,7 +1903,7 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu,
|
||||
* scratch_pt[type] indicate the scratch pt/scratch page used by the
|
||||
* 'type' pt.
|
||||
* e.g. scratch_pt[GTT_TYPE_PPGTT_PDE_PT] is used by
|
||||
* GTT_TYPE_PPGTT_PDE_PT level pt, that means this scatch_pt it self
|
||||
* GTT_TYPE_PPGTT_PDE_PT level pt, that means this scratch_pt it self
|
||||
* is GTT_TYPE_PPGTT_PTE_PT, and full filled by scratch page mfn.
|
||||
*/
|
||||
if (type > GTT_TYPE_PPGTT_PTE_PT && type < GTT_TYPE_MAX) {
|
||||
@ -1936,11 +1921,9 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu,
|
||||
se.val64 |= PPAT_CACHED_INDEX;
|
||||
|
||||
for (i = 0; i < page_entry_num; i++)
|
||||
ops->set_entry(p, &se, i, false, 0, vgpu);
|
||||
ops->set_entry(scratch_pt, &se, i, false, 0, vgpu);
|
||||
}
|
||||
|
||||
kunmap_atomic(p);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -2208,7 +2191,7 @@ int intel_vgpu_g2v_destroy_ppgtt_mm(struct intel_vgpu *vgpu,
|
||||
int intel_gvt_init_gtt(struct intel_gvt *gvt)
|
||||
{
|
||||
int ret;
|
||||
void *page_addr;
|
||||
void *page;
|
||||
|
||||
gvt_dbg_core("init gtt\n");
|
||||
|
||||
@ -2221,17 +2204,14 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt)
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
gvt->gtt.scratch_ggtt_page =
|
||||
alloc_page(GFP_KERNEL | GFP_ATOMIC | __GFP_ZERO);
|
||||
if (!gvt->gtt.scratch_ggtt_page) {
|
||||
page = (void *)get_zeroed_page(GFP_KERNEL);
|
||||
if (!page) {
|
||||
gvt_err("fail to allocate scratch ggtt page\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
gvt->gtt.scratch_ggtt_page = virt_to_page(page);
|
||||
|
||||
page_addr = page_address(gvt->gtt.scratch_ggtt_page);
|
||||
|
||||
gvt->gtt.scratch_ggtt_mfn =
|
||||
intel_gvt_hypervisor_virt_to_mfn(page_addr);
|
||||
gvt->gtt.scratch_ggtt_mfn = intel_gvt_hypervisor_virt_to_mfn(page);
|
||||
if (gvt->gtt.scratch_ggtt_mfn == INTEL_GVT_INVALID_ADDR) {
|
||||
gvt_err("fail to translate scratch ggtt page\n");
|
||||
__free_page(gvt->gtt.scratch_ggtt_page);
|
||||
@ -2297,3 +2277,30 @@ void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu)
|
||||
for (offset = 0; offset < num_entries; offset++)
|
||||
ops->set_entry(NULL, &e, index + offset, false, 0, vgpu);
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_vgpu_reset_gtt - reset the all GTT related status
|
||||
* @vgpu: a vGPU
|
||||
* @dmlr: true for vGPU Device Model Level Reset, false for GT Reset
|
||||
*
|
||||
* This function is called from vfio core to reset reset all
|
||||
* GTT related status, including GGTT, PPGTT, scratch page.
|
||||
*
|
||||
*/
|
||||
void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu, bool dmlr)
|
||||
{
|
||||
int i;
|
||||
|
||||
ppgtt_free_all_shadow_page(vgpu);
|
||||
if (!dmlr)
|
||||
return;
|
||||
|
||||
intel_vgpu_reset_ggtt(vgpu);
|
||||
|
||||
/* clear scratch page for security */
|
||||
for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) {
|
||||
if (vgpu->gtt.scratch_pt[i].page != NULL)
|
||||
memset(page_address(vgpu->gtt.scratch_pt[i].page),
|
||||
0, PAGE_SIZE);
|
||||
}
|
||||
}
|
||||
|
@ -208,6 +208,7 @@ extern void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu);
|
||||
void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu);
|
||||
|
||||
extern int intel_gvt_init_gtt(struct intel_gvt *gvt);
|
||||
extern void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu, bool dmlr);
|
||||
extern void intel_gvt_clean_gtt(struct intel_gvt *gvt);
|
||||
|
||||
extern struct intel_vgpu_mm *intel_gvt_find_ppgtt_mm(struct intel_vgpu *vgpu,
|
||||
|
@ -201,6 +201,8 @@ void intel_gvt_clean_device(struct drm_i915_private *dev_priv)
|
||||
intel_gvt_hypervisor_host_exit(&dev_priv->drm.pdev->dev, gvt);
|
||||
intel_gvt_clean_vgpu_types(gvt);
|
||||
|
||||
idr_destroy(&gvt->vgpu_idr);
|
||||
|
||||
kfree(dev_priv->gvt);
|
||||
dev_priv->gvt = NULL;
|
||||
}
|
||||
@ -237,6 +239,8 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
|
||||
|
||||
gvt_dbg_core("init gvt device\n");
|
||||
|
||||
idr_init(&gvt->vgpu_idr);
|
||||
|
||||
mutex_init(&gvt->lock);
|
||||
gvt->dev_priv = dev_priv;
|
||||
|
||||
@ -244,7 +248,7 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
|
||||
|
||||
ret = intel_gvt_setup_mmio_info(gvt);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto out_clean_idr;
|
||||
|
||||
ret = intel_gvt_load_firmware(gvt);
|
||||
if (ret)
|
||||
@ -313,6 +317,8 @@ out_free_firmware:
|
||||
intel_gvt_free_firmware(gvt);
|
||||
out_clean_mmio_info:
|
||||
intel_gvt_clean_mmio_info(gvt);
|
||||
out_clean_idr:
|
||||
idr_destroy(&gvt->vgpu_idr);
|
||||
kfree(gvt);
|
||||
return ret;
|
||||
}
|
||||
|
@ -323,6 +323,7 @@ struct intel_vgpu_creation_params {
|
||||
|
||||
int intel_vgpu_alloc_resource(struct intel_vgpu *vgpu,
|
||||
struct intel_vgpu_creation_params *param);
|
||||
void intel_vgpu_reset_resource(struct intel_vgpu *vgpu);
|
||||
void intel_vgpu_free_resource(struct intel_vgpu *vgpu);
|
||||
void intel_vgpu_write_fence(struct intel_vgpu *vgpu,
|
||||
u32 fence, u64 value);
|
||||
@ -375,6 +376,8 @@ void intel_gvt_clean_vgpu_types(struct intel_gvt *gvt);
|
||||
struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
|
||||
struct intel_vgpu_type *type);
|
||||
void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu);
|
||||
void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
|
||||
unsigned int engine_mask);
|
||||
void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu);
|
||||
|
||||
|
||||
@ -411,6 +414,10 @@ int intel_gvt_ggtt_index_g2h(struct intel_vgpu *vgpu, unsigned long g_index,
|
||||
int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index,
|
||||
unsigned long *g_index);
|
||||
|
||||
void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu,
|
||||
bool primary);
|
||||
void intel_vgpu_reset_cfg_space(struct intel_vgpu *vgpu);
|
||||
|
||||
int intel_vgpu_emulate_cfg_read(struct intel_vgpu *vgpu, unsigned int offset,
|
||||
void *p_data, unsigned int bytes);
|
||||
|
||||
@ -424,7 +431,6 @@ void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu);
|
||||
int intel_vgpu_init_opregion(struct intel_vgpu *vgpu, u32 gpa);
|
||||
|
||||
int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci);
|
||||
int setup_vgpu_mmio(struct intel_vgpu *vgpu);
|
||||
void populate_pvinfo_page(struct intel_vgpu *vgpu);
|
||||
|
||||
struct intel_gvt_ops {
|
||||
|
@ -93,7 +93,8 @@ static void write_vreg(struct intel_vgpu *vgpu, unsigned int offset,
|
||||
static int new_mmio_info(struct intel_gvt *gvt,
|
||||
u32 offset, u32 flags, u32 size,
|
||||
u32 addr_mask, u32 ro_mask, u32 device,
|
||||
void *read, void *write)
|
||||
int (*read)(struct intel_vgpu *, unsigned int, void *, unsigned int),
|
||||
int (*write)(struct intel_vgpu *, unsigned int, void *, unsigned int))
|
||||
{
|
||||
struct intel_gvt_mmio_info *info, *p;
|
||||
u32 start, end, i;
|
||||
@ -219,7 +220,7 @@ static int mul_force_wake_write(struct intel_vgpu *vgpu,
|
||||
default:
|
||||
/*should not hit here*/
|
||||
gvt_err("invalid forcewake offset 0x%x\n", offset);
|
||||
return 1;
|
||||
return -EINVAL;
|
||||
}
|
||||
} else {
|
||||
ack_reg_offset = FORCEWAKE_ACK_HSW_REG;
|
||||
@ -230,77 +231,45 @@ static int mul_force_wake_write(struct intel_vgpu *vgpu,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int handle_device_reset(struct intel_vgpu *vgpu, unsigned int offset,
|
||||
void *p_data, unsigned int bytes, unsigned long bitmap)
|
||||
{
|
||||
struct intel_gvt_workload_scheduler *scheduler =
|
||||
&vgpu->gvt->scheduler;
|
||||
|
||||
vgpu->resetting = true;
|
||||
|
||||
intel_vgpu_stop_schedule(vgpu);
|
||||
/*
|
||||
* The current_vgpu will set to NULL after stopping the
|
||||
* scheduler when the reset is triggered by current vgpu.
|
||||
*/
|
||||
if (scheduler->current_vgpu == NULL) {
|
||||
mutex_unlock(&vgpu->gvt->lock);
|
||||
intel_gvt_wait_vgpu_idle(vgpu);
|
||||
mutex_lock(&vgpu->gvt->lock);
|
||||
}
|
||||
|
||||
intel_vgpu_reset_execlist(vgpu, bitmap);
|
||||
|
||||
/* full GPU reset */
|
||||
if (bitmap == 0xff) {
|
||||
mutex_unlock(&vgpu->gvt->lock);
|
||||
intel_vgpu_clean_gtt(vgpu);
|
||||
mutex_lock(&vgpu->gvt->lock);
|
||||
setup_vgpu_mmio(vgpu);
|
||||
populate_pvinfo_page(vgpu);
|
||||
intel_vgpu_init_gtt(vgpu);
|
||||
}
|
||||
|
||||
vgpu->resetting = false;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
|
||||
void *p_data, unsigned int bytes)
|
||||
void *p_data, unsigned int bytes)
|
||||
{
|
||||
unsigned int engine_mask = 0;
|
||||
u32 data;
|
||||
u64 bitmap = 0;
|
||||
|
||||
write_vreg(vgpu, offset, p_data, bytes);
|
||||
data = vgpu_vreg(vgpu, offset);
|
||||
|
||||
if (data & GEN6_GRDOM_FULL) {
|
||||
gvt_dbg_mmio("vgpu%d: request full GPU reset\n", vgpu->id);
|
||||
bitmap = 0xff;
|
||||
engine_mask = ALL_ENGINES;
|
||||
} else {
|
||||
if (data & GEN6_GRDOM_RENDER) {
|
||||
gvt_dbg_mmio("vgpu%d: request RCS reset\n", vgpu->id);
|
||||
engine_mask |= (1 << RCS);
|
||||
}
|
||||
if (data & GEN6_GRDOM_MEDIA) {
|
||||
gvt_dbg_mmio("vgpu%d: request VCS reset\n", vgpu->id);
|
||||
engine_mask |= (1 << VCS);
|
||||
}
|
||||
if (data & GEN6_GRDOM_BLT) {
|
||||
gvt_dbg_mmio("vgpu%d: request BCS Reset\n", vgpu->id);
|
||||
engine_mask |= (1 << BCS);
|
||||
}
|
||||
if (data & GEN6_GRDOM_VECS) {
|
||||
gvt_dbg_mmio("vgpu%d: request VECS Reset\n", vgpu->id);
|
||||
engine_mask |= (1 << VECS);
|
||||
}
|
||||
if (data & GEN8_GRDOM_MEDIA2) {
|
||||
gvt_dbg_mmio("vgpu%d: request VCS2 Reset\n", vgpu->id);
|
||||
if (HAS_BSD2(vgpu->gvt->dev_priv))
|
||||
engine_mask |= (1 << VCS2);
|
||||
}
|
||||
}
|
||||
if (data & GEN6_GRDOM_RENDER) {
|
||||
gvt_dbg_mmio("vgpu%d: request RCS reset\n", vgpu->id);
|
||||
bitmap |= (1 << RCS);
|
||||
}
|
||||
if (data & GEN6_GRDOM_MEDIA) {
|
||||
gvt_dbg_mmio("vgpu%d: request VCS reset\n", vgpu->id);
|
||||
bitmap |= (1 << VCS);
|
||||
}
|
||||
if (data & GEN6_GRDOM_BLT) {
|
||||
gvt_dbg_mmio("vgpu%d: request BCS Reset\n", vgpu->id);
|
||||
bitmap |= (1 << BCS);
|
||||
}
|
||||
if (data & GEN6_GRDOM_VECS) {
|
||||
gvt_dbg_mmio("vgpu%d: request VECS Reset\n", vgpu->id);
|
||||
bitmap |= (1 << VECS);
|
||||
}
|
||||
if (data & GEN8_GRDOM_MEDIA2) {
|
||||
gvt_dbg_mmio("vgpu%d: request VCS2 Reset\n", vgpu->id);
|
||||
if (HAS_BSD2(vgpu->gvt->dev_priv))
|
||||
bitmap |= (1 << VCS2);
|
||||
}
|
||||
return handle_device_reset(vgpu, offset, p_data, bytes, bitmap);
|
||||
|
||||
intel_gvt_reset_vgpu_locked(vgpu, false, engine_mask);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int gmbus_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
|
||||
@ -974,7 +943,7 @@ static int sbi_data_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool sbi_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
|
||||
static int sbi_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
|
||||
void *p_data, unsigned int bytes)
|
||||
{
|
||||
u32 data;
|
||||
@ -1366,7 +1335,6 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
|
||||
static int gvt_reg_tlb_control_handler(struct intel_vgpu *vgpu,
|
||||
unsigned int offset, void *p_data, unsigned int bytes)
|
||||
{
|
||||
int rc = 0;
|
||||
unsigned int id = 0;
|
||||
|
||||
write_vreg(vgpu, offset, p_data, bytes);
|
||||
@ -1389,12 +1357,11 @@ static int gvt_reg_tlb_control_handler(struct intel_vgpu *vgpu,
|
||||
id = VECS;
|
||||
break;
|
||||
default:
|
||||
rc = -EINVAL;
|
||||
break;
|
||||
return -EINVAL;
|
||||
}
|
||||
set_bit(id, (void *)vgpu->tlb_handle_pending);
|
||||
|
||||
return rc;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ring_reset_ctl_write(struct intel_vgpu *vgpu,
|
||||
|
@ -230,8 +230,8 @@ static struct intel_vgpu_type *intel_gvt_find_vgpu_type(struct intel_gvt *gvt,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static ssize_t available_instance_show(struct kobject *kobj, struct device *dev,
|
||||
char *buf)
|
||||
static ssize_t available_instances_show(struct kobject *kobj,
|
||||
struct device *dev, char *buf)
|
||||
{
|
||||
struct intel_vgpu_type *type;
|
||||
unsigned int num = 0;
|
||||
@ -269,12 +269,12 @@ static ssize_t description_show(struct kobject *kobj, struct device *dev,
|
||||
type->fence);
|
||||
}
|
||||
|
||||
static MDEV_TYPE_ATTR_RO(available_instance);
|
||||
static MDEV_TYPE_ATTR_RO(available_instances);
|
||||
static MDEV_TYPE_ATTR_RO(device_api);
|
||||
static MDEV_TYPE_ATTR_RO(description);
|
||||
|
||||
static struct attribute *type_attrs[] = {
|
||||
&mdev_type_attr_available_instance.attr,
|
||||
&mdev_type_attr_available_instances.attr,
|
||||
&mdev_type_attr_device_api.attr,
|
||||
&mdev_type_attr_description.attr,
|
||||
NULL,
|
||||
@ -398,6 +398,7 @@ static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
|
||||
struct intel_vgpu_type *type;
|
||||
struct device *pdev;
|
||||
void *gvt;
|
||||
int ret;
|
||||
|
||||
pdev = mdev_parent_dev(mdev);
|
||||
gvt = kdev_to_i915(pdev)->gvt;
|
||||
@ -406,13 +407,15 @@ static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
|
||||
if (!type) {
|
||||
gvt_err("failed to find type %s to create\n",
|
||||
kobject_name(kobj));
|
||||
return -EINVAL;
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
vgpu = intel_gvt_ops->vgpu_create(gvt, type);
|
||||
if (IS_ERR_OR_NULL(vgpu)) {
|
||||
gvt_err("create intel vgpu failed\n");
|
||||
return -EINVAL;
|
||||
ret = vgpu == NULL ? -EFAULT : PTR_ERR(vgpu);
|
||||
gvt_err("failed to create intel vgpu: %d\n", ret);
|
||||
goto out;
|
||||
}
|
||||
|
||||
INIT_WORK(&vgpu->vdev.release_work, intel_vgpu_release_work);
|
||||
@ -422,7 +425,10 @@ static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
|
||||
|
||||
gvt_dbg_core("intel_vgpu_create succeeded for mdev: %s\n",
|
||||
dev_name(mdev_dev(mdev)));
|
||||
return 0;
|
||||
ret = 0;
|
||||
|
||||
out:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int intel_vgpu_remove(struct mdev_device *mdev)
|
||||
|
@ -125,25 +125,12 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
|
||||
if (WARN_ON(!reg_is_mmio(gvt, offset + bytes - 1)))
|
||||
goto err;
|
||||
|
||||
mmio = intel_gvt_find_mmio_info(gvt, rounddown(offset, 4));
|
||||
if (!mmio && !vgpu->mmio.disable_warn_untrack) {
|
||||
gvt_err("vgpu%d: read untracked MMIO %x len %d val %x\n",
|
||||
vgpu->id, offset, bytes, *(u32 *)p_data);
|
||||
|
||||
if (offset == 0x206c) {
|
||||
gvt_err("------------------------------------------\n");
|
||||
gvt_err("vgpu%d: likely triggers a gfx reset\n",
|
||||
vgpu->id);
|
||||
gvt_err("------------------------------------------\n");
|
||||
vgpu->mmio.disable_warn_untrack = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (!intel_gvt_mmio_is_unalign(gvt, offset)) {
|
||||
if (WARN_ON(!IS_ALIGNED(offset, bytes)))
|
||||
goto err;
|
||||
}
|
||||
|
||||
mmio = intel_gvt_find_mmio_info(gvt, rounddown(offset, 4));
|
||||
if (mmio) {
|
||||
if (!intel_gvt_mmio_is_unalign(gvt, mmio->offset)) {
|
||||
if (WARN_ON(offset + bytes > mmio->offset + mmio->size))
|
||||
@ -152,9 +139,23 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
|
||||
goto err;
|
||||
}
|
||||
ret = mmio->read(vgpu, offset, p_data, bytes);
|
||||
} else
|
||||
} else {
|
||||
ret = intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
|
||||
|
||||
if (!vgpu->mmio.disable_warn_untrack) {
|
||||
gvt_err("vgpu%d: read untracked MMIO %x(%dB) val %x\n",
|
||||
vgpu->id, offset, bytes, *(u32 *)p_data);
|
||||
|
||||
if (offset == 0x206c) {
|
||||
gvt_err("------------------------------------------\n");
|
||||
gvt_err("vgpu%d: likely triggers a gfx reset\n",
|
||||
vgpu->id);
|
||||
gvt_err("------------------------------------------\n");
|
||||
vgpu->mmio.disable_warn_untrack = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
@ -302,3 +303,56 @@ err:
|
||||
mutex_unlock(&gvt->lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* intel_vgpu_reset_mmio - reset virtual MMIO space
|
||||
* @vgpu: a vGPU
|
||||
*
|
||||
*/
|
||||
void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu)
|
||||
{
|
||||
struct intel_gvt *gvt = vgpu->gvt;
|
||||
const struct intel_gvt_device_info *info = &gvt->device_info;
|
||||
|
||||
memcpy(vgpu->mmio.vreg, gvt->firmware.mmio, info->mmio_size);
|
||||
memcpy(vgpu->mmio.sreg, gvt->firmware.mmio, info->mmio_size);
|
||||
|
||||
vgpu_vreg(vgpu, GEN6_GT_THREAD_STATUS_REG) = 0;
|
||||
|
||||
/* set the bit 0:2(Core C-State ) to C0 */
|
||||
vgpu_vreg(vgpu, GEN6_GT_CORE_STATUS) = 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_vgpu_init_mmio - init MMIO space
|
||||
* @vgpu: a vGPU
|
||||
*
|
||||
* Returns:
|
||||
* Zero on success, negative error code if failed
|
||||
*/
|
||||
int intel_vgpu_init_mmio(struct intel_vgpu *vgpu)
|
||||
{
|
||||
const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
|
||||
|
||||
vgpu->mmio.vreg = vzalloc(info->mmio_size * 2);
|
||||
if (!vgpu->mmio.vreg)
|
||||
return -ENOMEM;
|
||||
|
||||
vgpu->mmio.sreg = vgpu->mmio.vreg + info->mmio_size;
|
||||
|
||||
intel_vgpu_reset_mmio(vgpu);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_vgpu_clean_mmio - clean MMIO space
|
||||
* @vgpu: a vGPU
|
||||
*
|
||||
*/
|
||||
void intel_vgpu_clean_mmio(struct intel_vgpu *vgpu)
|
||||
{
|
||||
vfree(vgpu->mmio.vreg);
|
||||
vgpu->mmio.vreg = vgpu->mmio.sreg = NULL;
|
||||
}
|
||||
|
@ -86,6 +86,10 @@ struct intel_gvt_mmio_info *intel_gvt_find_mmio_info(struct intel_gvt *gvt,
|
||||
*offset; \
|
||||
})
|
||||
|
||||
int intel_vgpu_init_mmio(struct intel_vgpu *vgpu);
|
||||
void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu);
|
||||
void intel_vgpu_clean_mmio(struct intel_vgpu *vgpu);
|
||||
|
||||
int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa);
|
||||
|
||||
int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, u64 pa,
|
||||
|
@ -36,9 +36,9 @@ static int init_vgpu_opregion(struct intel_vgpu *vgpu, u32 gpa)
|
||||
vgpu->id))
|
||||
return -EINVAL;
|
||||
|
||||
vgpu_opregion(vgpu)->va = (void *)__get_free_pages(GFP_ATOMIC |
|
||||
GFP_DMA32 | __GFP_ZERO,
|
||||
INTEL_GVT_OPREGION_PORDER);
|
||||
vgpu_opregion(vgpu)->va = (void *)__get_free_pages(GFP_KERNEL |
|
||||
__GFP_ZERO,
|
||||
get_order(INTEL_GVT_OPREGION_SIZE));
|
||||
|
||||
if (!vgpu_opregion(vgpu)->va)
|
||||
return -ENOMEM;
|
||||
@ -97,7 +97,7 @@ void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu)
|
||||
if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_XEN) {
|
||||
map_vgpu_opregion(vgpu, false);
|
||||
free_pages((unsigned long)vgpu_opregion(vgpu)->va,
|
||||
INTEL_GVT_OPREGION_PORDER);
|
||||
get_order(INTEL_GVT_OPREGION_SIZE));
|
||||
|
||||
vgpu_opregion(vgpu)->va = NULL;
|
||||
}
|
||||
|
@ -50,8 +50,7 @@
|
||||
#define INTEL_GVT_OPREGION_PARM 0x204
|
||||
|
||||
#define INTEL_GVT_OPREGION_PAGES 2
|
||||
#define INTEL_GVT_OPREGION_PORDER 1
|
||||
#define INTEL_GVT_OPREGION_SIZE (2 * 4096)
|
||||
#define INTEL_GVT_OPREGION_SIZE (INTEL_GVT_OPREGION_PAGES * PAGE_SIZE)
|
||||
|
||||
#define VGT_SPRSTRIDE(pipe) _PIPE(pipe, _SPRA_STRIDE, _PLANE_STRIDE_2_B)
|
||||
|
||||
|
@ -350,13 +350,15 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
|
||||
{
|
||||
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
|
||||
struct intel_vgpu_workload *workload;
|
||||
struct intel_vgpu *vgpu;
|
||||
int event;
|
||||
|
||||
mutex_lock(&gvt->lock);
|
||||
|
||||
workload = scheduler->current_workload[ring_id];
|
||||
vgpu = workload->vgpu;
|
||||
|
||||
if (!workload->status && !workload->vgpu->resetting) {
|
||||
if (!workload->status && !vgpu->resetting) {
|
||||
wait_event(workload->shadow_ctx_status_wq,
|
||||
!atomic_read(&workload->shadow_ctx_active));
|
||||
|
||||
@ -364,8 +366,7 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
|
||||
|
||||
for_each_set_bit(event, workload->pending_events,
|
||||
INTEL_GVT_EVENT_MAX)
|
||||
intel_vgpu_trigger_virtual_event(workload->vgpu,
|
||||
event);
|
||||
intel_vgpu_trigger_virtual_event(vgpu, event);
|
||||
}
|
||||
|
||||
gvt_dbg_sched("ring id %d complete workload %p status %d\n",
|
||||
@ -373,11 +374,10 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
|
||||
|
||||
scheduler->current_workload[ring_id] = NULL;
|
||||
|
||||
atomic_dec(&workload->vgpu->running_workload_num);
|
||||
|
||||
list_del_init(&workload->list);
|
||||
workload->complete(workload);
|
||||
|
||||
atomic_dec(&vgpu->running_workload_num);
|
||||
wake_up(&scheduler->workload_complete_wq);
|
||||
mutex_unlock(&gvt->lock);
|
||||
}
|
||||
@ -459,11 +459,11 @@ complete:
|
||||
gvt_dbg_sched("will complete workload %p\n, status: %d\n",
|
||||
workload, workload->status);
|
||||
|
||||
complete_current_workload(gvt, ring_id);
|
||||
|
||||
if (workload->req)
|
||||
i915_gem_request_put(fetch_and_zero(&workload->req));
|
||||
|
||||
complete_current_workload(gvt, ring_id);
|
||||
|
||||
if (need_force_wake)
|
||||
intel_uncore_forcewake_put(gvt->dev_priv,
|
||||
FORCEWAKE_ALL);
|
||||
|
@ -113,7 +113,7 @@ struct intel_shadow_bb_entry {
|
||||
struct drm_i915_gem_object *obj;
|
||||
void *va;
|
||||
unsigned long len;
|
||||
void *bb_start_cmd_va;
|
||||
u32 *bb_start_cmd_va;
|
||||
};
|
||||
|
||||
#define workload_q_head(vgpu, ring_id) \
|
||||
|
@ -35,79 +35,6 @@
|
||||
#include "gvt.h"
|
||||
#include "i915_pvinfo.h"
|
||||
|
||||
static void clean_vgpu_mmio(struct intel_vgpu *vgpu)
|
||||
{
|
||||
vfree(vgpu->mmio.vreg);
|
||||
vgpu->mmio.vreg = vgpu->mmio.sreg = NULL;
|
||||
}
|
||||
|
||||
int setup_vgpu_mmio(struct intel_vgpu *vgpu)
|
||||
{
|
||||
struct intel_gvt *gvt = vgpu->gvt;
|
||||
const struct intel_gvt_device_info *info = &gvt->device_info;
|
||||
|
||||
if (vgpu->mmio.vreg)
|
||||
memset(vgpu->mmio.vreg, 0, info->mmio_size * 2);
|
||||
else {
|
||||
vgpu->mmio.vreg = vzalloc(info->mmio_size * 2);
|
||||
if (!vgpu->mmio.vreg)
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
vgpu->mmio.sreg = vgpu->mmio.vreg + info->mmio_size;
|
||||
|
||||
memcpy(vgpu->mmio.vreg, gvt->firmware.mmio, info->mmio_size);
|
||||
memcpy(vgpu->mmio.sreg, gvt->firmware.mmio, info->mmio_size);
|
||||
|
||||
vgpu_vreg(vgpu, GEN6_GT_THREAD_STATUS_REG) = 0;
|
||||
|
||||
/* set the bit 0:2(Core C-State ) to C0 */
|
||||
vgpu_vreg(vgpu, GEN6_GT_CORE_STATUS) = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void setup_vgpu_cfg_space(struct intel_vgpu *vgpu,
|
||||
struct intel_vgpu_creation_params *param)
|
||||
{
|
||||
struct intel_gvt *gvt = vgpu->gvt;
|
||||
const struct intel_gvt_device_info *info = &gvt->device_info;
|
||||
u16 *gmch_ctl;
|
||||
int i;
|
||||
|
||||
memcpy(vgpu_cfg_space(vgpu), gvt->firmware.cfg_space,
|
||||
info->cfg_space_size);
|
||||
|
||||
if (!param->primary) {
|
||||
vgpu_cfg_space(vgpu)[PCI_CLASS_DEVICE] =
|
||||
INTEL_GVT_PCI_CLASS_VGA_OTHER;
|
||||
vgpu_cfg_space(vgpu)[PCI_CLASS_PROG] =
|
||||
INTEL_GVT_PCI_CLASS_VGA_OTHER;
|
||||
}
|
||||
|
||||
/* Show guest that there isn't any stolen memory.*/
|
||||
gmch_ctl = (u16 *)(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_GMCH_CONTROL);
|
||||
*gmch_ctl &= ~(BDW_GMCH_GMS_MASK << BDW_GMCH_GMS_SHIFT);
|
||||
|
||||
intel_vgpu_write_pci_bar(vgpu, PCI_BASE_ADDRESS_2,
|
||||
gvt_aperture_pa_base(gvt), true);
|
||||
|
||||
vgpu_cfg_space(vgpu)[PCI_COMMAND] &= ~(PCI_COMMAND_IO
|
||||
| PCI_COMMAND_MEMORY
|
||||
| PCI_COMMAND_MASTER);
|
||||
/*
|
||||
* Clear the bar upper 32bit and let guest to assign the new value
|
||||
*/
|
||||
memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_1, 0, 4);
|
||||
memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_3, 0, 4);
|
||||
memset(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_OPREGION, 0, 4);
|
||||
|
||||
for (i = 0; i < INTEL_GVT_MAX_BAR_NUM; i++) {
|
||||
vgpu->cfg_space.bar[i].size = pci_resource_len(
|
||||
gvt->dev_priv->drm.pdev, i * 2);
|
||||
vgpu->cfg_space.bar[i].tracked = false;
|
||||
}
|
||||
}
|
||||
|
||||
void populate_pvinfo_page(struct intel_vgpu *vgpu)
|
||||
{
|
||||
/* setup the ballooning information */
|
||||
@ -177,7 +104,7 @@ int intel_gvt_init_vgpu_types(struct intel_gvt *gvt)
|
||||
if (low_avail / min_low == 0)
|
||||
break;
|
||||
gvt->types[i].low_gm_size = min_low;
|
||||
gvt->types[i].high_gm_size = 3 * gvt->types[i].low_gm_size;
|
||||
gvt->types[i].high_gm_size = max((min_low<<3), MB_TO_BYTES(384U));
|
||||
gvt->types[i].fence = 4;
|
||||
gvt->types[i].max_instance = low_avail / min_low;
|
||||
gvt->types[i].avail_instance = gvt->types[i].max_instance;
|
||||
@ -217,7 +144,7 @@ static void intel_gvt_update_vgpu_types(struct intel_gvt *gvt)
|
||||
*/
|
||||
low_gm_avail = MB_TO_BYTES(256) - HOST_LOW_GM_SIZE -
|
||||
gvt->gm.vgpu_allocated_low_gm_size;
|
||||
high_gm_avail = MB_TO_BYTES(256) * 3 - HOST_HIGH_GM_SIZE -
|
||||
high_gm_avail = MB_TO_BYTES(256) * 8UL - HOST_HIGH_GM_SIZE -
|
||||
gvt->gm.vgpu_allocated_high_gm_size;
|
||||
fence_avail = gvt_fence_sz(gvt) - HOST_FENCE -
|
||||
gvt->fence.vgpu_allocated_fence_num;
|
||||
@ -268,7 +195,7 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
|
||||
intel_vgpu_clean_gtt(vgpu);
|
||||
intel_gvt_hypervisor_detach_vgpu(vgpu);
|
||||
intel_vgpu_free_resource(vgpu);
|
||||
clean_vgpu_mmio(vgpu);
|
||||
intel_vgpu_clean_mmio(vgpu);
|
||||
vfree(vgpu);
|
||||
|
||||
intel_gvt_update_vgpu_types(gvt);
|
||||
@ -300,11 +227,11 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
|
||||
vgpu->gvt = gvt;
|
||||
bitmap_zero(vgpu->tlb_handle_pending, I915_NUM_ENGINES);
|
||||
|
||||
setup_vgpu_cfg_space(vgpu, param);
|
||||
intel_vgpu_init_cfg_space(vgpu, param->primary);
|
||||
|
||||
ret = setup_vgpu_mmio(vgpu);
|
||||
ret = intel_vgpu_init_mmio(vgpu);
|
||||
if (ret)
|
||||
goto out_free_vgpu;
|
||||
goto out_clean_idr;
|
||||
|
||||
ret = intel_vgpu_alloc_resource(vgpu, param);
|
||||
if (ret)
|
||||
@ -354,7 +281,9 @@ out_detach_hypervisor_vgpu:
|
||||
out_clean_vgpu_resource:
|
||||
intel_vgpu_free_resource(vgpu);
|
||||
out_clean_vgpu_mmio:
|
||||
clean_vgpu_mmio(vgpu);
|
||||
intel_vgpu_clean_mmio(vgpu);
|
||||
out_clean_idr:
|
||||
idr_remove(&gvt->vgpu_idr, vgpu->id);
|
||||
out_free_vgpu:
|
||||
vfree(vgpu);
|
||||
mutex_unlock(&gvt->lock);
|
||||
@ -398,7 +327,75 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_gvt_reset_vgpu - reset a virtual GPU
|
||||
* intel_gvt_reset_vgpu_locked - reset a virtual GPU by DMLR or GT reset
|
||||
* @vgpu: virtual GPU
|
||||
* @dmlr: vGPU Device Model Level Reset or GT Reset
|
||||
* @engine_mask: engines to reset for GT reset
|
||||
*
|
||||
* This function is called when user wants to reset a virtual GPU through
|
||||
* device model reset or GT reset. The caller should hold the gvt lock.
|
||||
*
|
||||
* vGPU Device Model Level Reset (DMLR) simulates the PCI level reset to reset
|
||||
* the whole vGPU to default state as when it is created. This vGPU function
|
||||
* is required both for functionary and security concerns.The ultimate goal
|
||||
* of vGPU FLR is that reuse a vGPU instance by virtual machines. When we
|
||||
* assign a vGPU to a virtual machine we must isse such reset first.
|
||||
*
|
||||
* Full GT Reset and Per-Engine GT Reset are soft reset flow for GPU engines
|
||||
* (Render, Blitter, Video, Video Enhancement). It is defined by GPU Spec.
|
||||
* Unlike the FLR, GT reset only reset particular resource of a vGPU per
|
||||
* the reset request. Guest driver can issue a GT reset by programming the
|
||||
* virtual GDRST register to reset specific virtual GPU engine or all
|
||||
* engines.
|
||||
*
|
||||
* The parameter dev_level is to identify if we will do DMLR or GT reset.
|
||||
* The parameter engine_mask is to specific the engines that need to be
|
||||
* resetted. If value ALL_ENGINES is given for engine_mask, it means
|
||||
* the caller requests a full GT reset that we will reset all virtual
|
||||
* GPU engines. For FLR, engine_mask is ignored.
|
||||
*/
|
||||
void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
|
||||
unsigned int engine_mask)
|
||||
{
|
||||
struct intel_gvt *gvt = vgpu->gvt;
|
||||
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
|
||||
|
||||
gvt_dbg_core("------------------------------------------\n");
|
||||
gvt_dbg_core("resseting vgpu%d, dmlr %d, engine_mask %08x\n",
|
||||
vgpu->id, dmlr, engine_mask);
|
||||
vgpu->resetting = true;
|
||||
|
||||
intel_vgpu_stop_schedule(vgpu);
|
||||
/*
|
||||
* The current_vgpu will set to NULL after stopping the
|
||||
* scheduler when the reset is triggered by current vgpu.
|
||||
*/
|
||||
if (scheduler->current_vgpu == NULL) {
|
||||
mutex_unlock(&gvt->lock);
|
||||
intel_gvt_wait_vgpu_idle(vgpu);
|
||||
mutex_lock(&gvt->lock);
|
||||
}
|
||||
|
||||
intel_vgpu_reset_execlist(vgpu, dmlr ? ALL_ENGINES : engine_mask);
|
||||
|
||||
/* full GPU reset or device model level reset */
|
||||
if (engine_mask == ALL_ENGINES || dmlr) {
|
||||
intel_vgpu_reset_gtt(vgpu, dmlr);
|
||||
intel_vgpu_reset_resource(vgpu);
|
||||
intel_vgpu_reset_mmio(vgpu);
|
||||
populate_pvinfo_page(vgpu);
|
||||
|
||||
if (dmlr)
|
||||
intel_vgpu_reset_cfg_space(vgpu);
|
||||
}
|
||||
|
||||
vgpu->resetting = false;
|
||||
gvt_dbg_core("reset vgpu%d done\n", vgpu->id);
|
||||
gvt_dbg_core("------------------------------------------\n");
|
||||
}
|
||||
|
||||
/**
|
||||
* intel_gvt_reset_vgpu - reset a virtual GPU (Function Level)
|
||||
* @vgpu: virtual GPU
|
||||
*
|
||||
* This function is called when user wants to reset a virtual GPU.
|
||||
@ -406,4 +403,7 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
|
||||
*/
|
||||
void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu)
|
||||
{
|
||||
mutex_lock(&vgpu->gvt->lock);
|
||||
intel_gvt_reset_vgpu_locked(vgpu, true, 0);
|
||||
mutex_unlock(&vgpu->gvt->lock);
|
||||
}
|
||||
|
@ -2378,7 +2378,7 @@ static int intel_runtime_suspend(struct device *kdev)
|
||||
|
||||
assert_forcewakes_inactive(dev_priv);
|
||||
|
||||
if (!IS_VALLEYVIEW(dev_priv) || !IS_CHERRYVIEW(dev_priv))
|
||||
if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
|
||||
intel_hpd_poll_init(dev_priv);
|
||||
|
||||
DRM_DEBUG_KMS("Device suspended\n");
|
||||
|
@ -1977,6 +1977,11 @@ struct drm_i915_private {
|
||||
|
||||
struct i915_frontbuffer_tracking fb_tracking;
|
||||
|
||||
struct intel_atomic_helper {
|
||||
struct llist_head free_list;
|
||||
struct work_struct free_work;
|
||||
} atomic_helper;
|
||||
|
||||
u16 orig_clock;
|
||||
|
||||
bool mchbar_need_disable;
|
||||
|
@ -595,47 +595,21 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
|
||||
struct drm_i915_gem_pwrite *args,
|
||||
struct drm_file *file)
|
||||
{
|
||||
struct drm_device *dev = obj->base.dev;
|
||||
void *vaddr = obj->phys_handle->vaddr + args->offset;
|
||||
char __user *user_data = u64_to_user_ptr(args->data_ptr);
|
||||
int ret;
|
||||
|
||||
/* We manually control the domain here and pretend that it
|
||||
* remains coherent i.e. in the GTT domain, like shmem_pwrite.
|
||||
*/
|
||||
lockdep_assert_held(&obj->base.dev->struct_mutex);
|
||||
ret = i915_gem_object_wait(obj,
|
||||
I915_WAIT_INTERRUPTIBLE |
|
||||
I915_WAIT_LOCKED |
|
||||
I915_WAIT_ALL,
|
||||
MAX_SCHEDULE_TIMEOUT,
|
||||
to_rps_client(file));
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
intel_fb_obj_invalidate(obj, ORIGIN_CPU);
|
||||
if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
|
||||
unsigned long unwritten;
|
||||
|
||||
/* The physical object once assigned is fixed for the lifetime
|
||||
* of the obj, so we can safely drop the lock and continue
|
||||
* to access vaddr.
|
||||
*/
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
unwritten = copy_from_user(vaddr, user_data, args->size);
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
if (unwritten) {
|
||||
ret = -EFAULT;
|
||||
goto out;
|
||||
}
|
||||
}
|
||||
if (copy_from_user(vaddr, user_data, args->size))
|
||||
return -EFAULT;
|
||||
|
||||
drm_clflush_virt_range(vaddr, args->size);
|
||||
i915_gem_chipset_flush(to_i915(dev));
|
||||
i915_gem_chipset_flush(to_i915(obj->base.dev));
|
||||
|
||||
out:
|
||||
intel_fb_obj_flush(obj, false, ORIGIN_CPU);
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void *i915_gem_object_alloc(struct drm_device *dev)
|
||||
|
@ -199,6 +199,7 @@ found:
|
||||
}
|
||||
|
||||
/* Unbinding will emit any required flushes */
|
||||
ret = 0;
|
||||
while (!list_empty(&eviction_list)) {
|
||||
vma = list_first_entry(&eviction_list,
|
||||
struct i915_vma,
|
||||
|
@ -185,6 +185,7 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
|
||||
return ret;
|
||||
}
|
||||
|
||||
trace_i915_vma_bind(vma, bind_flags);
|
||||
ret = vma->vm->bind_vma(vma, cache_level, bind_flags);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -499,6 +499,7 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector)
|
||||
struct drm_i915_private *dev_priv = to_i915(crt->base.base.dev);
|
||||
struct edid *edid;
|
||||
struct i2c_adapter *i2c;
|
||||
bool ret = false;
|
||||
|
||||
BUG_ON(crt->base.type != INTEL_OUTPUT_ANALOG);
|
||||
|
||||
@ -515,17 +516,17 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector)
|
||||
*/
|
||||
if (!is_digital) {
|
||||
DRM_DEBUG_KMS("CRT detected via DDC:0x50 [EDID]\n");
|
||||
return true;
|
||||
ret = true;
|
||||
} else {
|
||||
DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [EDID reports a digital panel]\n");
|
||||
}
|
||||
|
||||
DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [EDID reports a digital panel]\n");
|
||||
} else {
|
||||
DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [no valid EDID found]\n");
|
||||
}
|
||||
|
||||
kfree(edid);
|
||||
|
||||
return false;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static enum drm_connector_status
|
||||
|
@ -2251,6 +2251,9 @@ void intel_unpin_fb_obj(struct drm_framebuffer *fb, unsigned int rotation)
|
||||
intel_fill_fb_ggtt_view(&view, fb, rotation);
|
||||
vma = i915_gem_object_to_ggtt(obj, &view);
|
||||
|
||||
if (WARN_ON_ONCE(!vma))
|
||||
return;
|
||||
|
||||
i915_vma_unpin_fence(vma);
|
||||
i915_gem_object_unpin_from_display_plane(vma);
|
||||
}
|
||||
@ -2585,8 +2588,9 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv,
|
||||
* We only keep the x/y offsets, so push all of the
|
||||
* gtt offset into the x/y offsets.
|
||||
*/
|
||||
_intel_adjust_tile_offset(&x, &y, tile_size,
|
||||
tile_width, tile_height, pitch_tiles,
|
||||
_intel_adjust_tile_offset(&x, &y,
|
||||
tile_width, tile_height,
|
||||
tile_size, pitch_tiles,
|
||||
gtt_offset_rotated * tile_size, 0);
|
||||
|
||||
gtt_offset_rotated += rot_info->plane[i].width * rot_info->plane[i].height;
|
||||
@ -2967,6 +2971,9 @@ int skl_check_plane_surface(struct intel_plane_state *plane_state)
|
||||
unsigned int rotation = plane_state->base.rotation;
|
||||
int ret;
|
||||
|
||||
if (!plane_state->base.visible)
|
||||
return 0;
|
||||
|
||||
/* Rotate src coordinates to match rotated GTT view */
|
||||
if (drm_rotation_90_or_270(rotation))
|
||||
drm_rect_rotate(&plane_state->base.src,
|
||||
@ -6846,6 +6853,12 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
|
||||
}
|
||||
|
||||
state = drm_atomic_state_alloc(crtc->dev);
|
||||
if (!state) {
|
||||
DRM_DEBUG_KMS("failed to disable [CRTC:%d:%s], out of memory",
|
||||
crtc->base.id, crtc->name);
|
||||
return;
|
||||
}
|
||||
|
||||
state->acquire_ctx = crtc->dev->mode_config.acquire_ctx;
|
||||
|
||||
/* Everything's already locked, -EDEADLK can't happen. */
|
||||
@ -11243,6 +11256,7 @@ found:
|
||||
}
|
||||
|
||||
old->restore_state = restore_state;
|
||||
drm_atomic_state_put(state);
|
||||
|
||||
/* let the connector get through one full cycle before testing */
|
||||
intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
|
||||
@ -14512,8 +14526,14 @@ intel_atomic_commit_ready(struct i915_sw_fence *fence,
|
||||
break;
|
||||
|
||||
case FENCE_FREE:
|
||||
drm_atomic_state_put(&state->base);
|
||||
break;
|
||||
{
|
||||
struct intel_atomic_helper *helper =
|
||||
&to_i915(state->base.dev)->atomic_helper;
|
||||
|
||||
if (llist_add(&state->freed, &helper->free_list))
|
||||
schedule_work(&helper->free_work);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return NOTIFY_DONE;
|
||||
@ -16392,6 +16412,18 @@ fail:
|
||||
drm_modeset_acquire_fini(&ctx);
|
||||
}
|
||||
|
||||
static void intel_atomic_helper_free_state(struct work_struct *work)
|
||||
{
|
||||
struct drm_i915_private *dev_priv =
|
||||
container_of(work, typeof(*dev_priv), atomic_helper.free_work);
|
||||
struct intel_atomic_state *state, *next;
|
||||
struct llist_node *freed;
|
||||
|
||||
freed = llist_del_all(&dev_priv->atomic_helper.free_list);
|
||||
llist_for_each_entry_safe(state, next, freed, freed)
|
||||
drm_atomic_state_put(&state->base);
|
||||
}
|
||||
|
||||
int intel_modeset_init(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
@ -16411,6 +16443,9 @@ int intel_modeset_init(struct drm_device *dev)
|
||||
|
||||
dev->mode_config.funcs = &intel_mode_funcs;
|
||||
|
||||
INIT_WORK(&dev_priv->atomic_helper.free_work,
|
||||
intel_atomic_helper_free_state);
|
||||
|
||||
intel_init_quirks(dev);
|
||||
|
||||
intel_init_pm(dev_priv);
|
||||
@ -17024,7 +17059,8 @@ void intel_display_resume(struct drm_device *dev)
|
||||
|
||||
if (ret)
|
||||
DRM_ERROR("Restoring old state failed with %i\n", ret);
|
||||
drm_atomic_state_put(state);
|
||||
if (state)
|
||||
drm_atomic_state_put(state);
|
||||
}
|
||||
|
||||
void intel_modeset_gem_init(struct drm_device *dev)
|
||||
@ -17094,6 +17130,9 @@ void intel_modeset_cleanup(struct drm_device *dev)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = to_i915(dev);
|
||||
|
||||
flush_work(&dev_priv->atomic_helper.free_work);
|
||||
WARN_ON(!llist_empty(&dev_priv->atomic_helper.free_list));
|
||||
|
||||
intel_disable_gt_powersave(dev_priv);
|
||||
|
||||
/*
|
||||
|
@ -370,6 +370,8 @@ struct intel_atomic_state {
|
||||
struct skl_wm_values wm_results;
|
||||
|
||||
struct i915_sw_fence commit_ready;
|
||||
|
||||
struct llist_node freed;
|
||||
};
|
||||
|
||||
struct intel_plane_state {
|
||||
|
@ -742,6 +742,9 @@ void intel_fbdev_initial_config_async(struct drm_device *dev)
|
||||
{
|
||||
struct intel_fbdev *ifbdev = to_i915(dev)->fbdev;
|
||||
|
||||
if (!ifbdev)
|
||||
return;
|
||||
|
||||
ifbdev->cookie = async_schedule(intel_fbdev_initial_config, ifbdev);
|
||||
}
|
||||
|
||||
|
@ -979,18 +979,8 @@ static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine,
|
||||
uint32_t *batch,
|
||||
uint32_t index)
|
||||
{
|
||||
struct drm_i915_private *dev_priv = engine->i915;
|
||||
uint32_t l3sqc4_flush = (0x40400000 | GEN8_LQSC_FLUSH_COHERENT_LINES);
|
||||
|
||||
/*
|
||||
* WaDisableLSQCROPERFforOCL:kbl
|
||||
* This WA is implemented in skl_init_clock_gating() but since
|
||||
* this batch updates GEN8_L3SQCREG4 with default value we need to
|
||||
* set this bit here to retain the WA during flush.
|
||||
*/
|
||||
if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_E0))
|
||||
l3sqc4_flush |= GEN8_LQSC_RO_PERF_DIS;
|
||||
|
||||
wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8 |
|
||||
MI_SRM_LRM_GLOBAL_GTT));
|
||||
wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4);
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user