mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-09 15:29:16 +00:00
clk: samsung: Improve clk-cpu.c style
clk-cpu.c has numerous style issues reported by checkpatch and easily identified otherwise. Give it some love and fix those warnings where it makes sense. Also make stabilization time a named constant to get rid of the magic number in clk-cpu.c. No functional change. Signed-off-by: Sam Protsenko <semen.protsenko@linaro.org> Link: https://lore.kernel.org/r/20240224202053.25313-3-semen.protsenko@linaro.org Signed-off-by: Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
This commit is contained in:
parent
76dedb9c0b
commit
f707e891eb
@ -16,18 +16,18 @@
|
||||
* of the SoC or supplied after the SoC characterization.
|
||||
*
|
||||
* The below implementation of the CPU clock allows the rate changes of the CPU
|
||||
* clock and the corresponding rate changes of the auxillary clocks of the CPU
|
||||
* clock and the corresponding rate changes of the auxiliary clocks of the CPU
|
||||
* domain. The platform clock driver provides a clock register configuration
|
||||
* for each configurable rate which is then used to program the clock hardware
|
||||
* registers to acheive a fast co-oridinated rate change for all the CPU domain
|
||||
* registers to achieve a fast coordinated rate change for all the CPU domain
|
||||
* clocks.
|
||||
*
|
||||
* On a rate change request for the CPU clock, the rate change is propagated
|
||||
* upto the PLL supplying the clock to the CPU domain clock blocks. While the
|
||||
* up to the PLL supplying the clock to the CPU domain clock blocks. While the
|
||||
* CPU domain PLL is reconfigured, the CPU domain clocks are driven using an
|
||||
* alternate clock source. If required, the alternate clock source is divided
|
||||
* down in order to keep the output clock rate within the previous OPP limits.
|
||||
*/
|
||||
*/
|
||||
|
||||
#include <linux/errno.h>
|
||||
#include <linux/io.h>
|
||||
@ -50,17 +50,19 @@
|
||||
#define E5433_DIV_STAT_CPU0 0x500
|
||||
#define E5433_DIV_STAT_CPU1 0x504
|
||||
|
||||
#define E4210_DIV0_RATIO0_MASK 0x7
|
||||
#define E4210_DIV1_HPM_MASK (0x7 << 4)
|
||||
#define E4210_DIV1_COPY_MASK (0x7 << 0)
|
||||
#define E4210_MUX_HPM_MASK (1 << 20)
|
||||
#define E4210_DIV0_RATIO0_MASK GENMASK(2, 0)
|
||||
#define E4210_DIV1_HPM_MASK GENMASK(6, 4)
|
||||
#define E4210_DIV1_COPY_MASK GENMASK(2, 0)
|
||||
#define E4210_MUX_HPM_MASK BIT(20)
|
||||
#define E4210_DIV0_ATB_SHIFT 16
|
||||
#define E4210_DIV0_ATB_MASK (DIV_MASK << E4210_DIV0_ATB_SHIFT)
|
||||
|
||||
/* Divider stabilization time, msec */
|
||||
#define MAX_STAB_TIME 10
|
||||
#define MAX_DIV 8
|
||||
#define DIV_MASK 7
|
||||
#define DIV_MASK_ALL 0xffffffff
|
||||
#define MUX_MASK 7
|
||||
#define DIV_MASK GENMASK(2, 0)
|
||||
#define DIV_MASK_ALL GENMASK(31, 0)
|
||||
#define MUX_MASK GENMASK(2, 0)
|
||||
|
||||
/*
|
||||
* Helper function to wait until divider(s) have stabilized after the divider
|
||||
@ -68,7 +70,7 @@
|
||||
*/
|
||||
static void wait_until_divider_stable(void __iomem *div_reg, unsigned long mask)
|
||||
{
|
||||
unsigned long timeout = jiffies + msecs_to_jiffies(10);
|
||||
unsigned long timeout = jiffies + msecs_to_jiffies(MAX_STAB_TIME);
|
||||
|
||||
do {
|
||||
if (!(readl(div_reg) & mask))
|
||||
@ -86,9 +88,9 @@ static void wait_until_divider_stable(void __iomem *div_reg, unsigned long mask)
|
||||
* value was changed.
|
||||
*/
|
||||
static void wait_until_mux_stable(void __iomem *mux_reg, u32 mux_pos,
|
||||
unsigned long mux_value)
|
||||
unsigned long mux_value)
|
||||
{
|
||||
unsigned long timeout = jiffies + msecs_to_jiffies(10);
|
||||
unsigned long timeout = jiffies + msecs_to_jiffies(MAX_STAB_TIME);
|
||||
|
||||
do {
|
||||
if (((readl(mux_reg) >> mux_pos) & MUX_MASK) == mux_value)
|
||||
@ -101,18 +103,18 @@ static void wait_until_mux_stable(void __iomem *mux_reg, u32 mux_pos,
|
||||
pr_err("%s: re-parenting mux timed-out\n", __func__);
|
||||
}
|
||||
|
||||
/* common round rate callback useable for all types of CPU clocks */
|
||||
static long exynos_cpuclk_round_rate(struct clk_hw *hw,
|
||||
unsigned long drate, unsigned long *prate)
|
||||
/* common round rate callback usable for all types of CPU clocks */
|
||||
static long exynos_cpuclk_round_rate(struct clk_hw *hw, unsigned long drate,
|
||||
unsigned long *prate)
|
||||
{
|
||||
struct clk_hw *parent = clk_hw_get_parent(hw);
|
||||
*prate = clk_hw_round_rate(parent, drate);
|
||||
return *prate;
|
||||
}
|
||||
|
||||
/* common recalc rate callback useable for all types of CPU clocks */
|
||||
/* common recalc rate callback usable for all types of CPU clocks */
|
||||
static unsigned long exynos_cpuclk_recalc_rate(struct clk_hw *hw,
|
||||
unsigned long parent_rate)
|
||||
unsigned long parent_rate)
|
||||
{
|
||||
/*
|
||||
* The CPU clock output (armclk) rate is the same as its parent
|
||||
@ -135,7 +137,7 @@ static const struct clk_ops exynos_cpuclk_clk_ops = {
|
||||
* dividers to be programmed.
|
||||
*/
|
||||
static void exynos_set_safe_div(void __iomem *base, unsigned long div,
|
||||
unsigned long mask)
|
||||
unsigned long mask)
|
||||
{
|
||||
unsigned long div0;
|
||||
|
||||
@ -151,7 +153,6 @@ static int exynos_cpuclk_pre_rate_change(struct clk_notifier_data *ndata,
|
||||
{
|
||||
const struct exynos_cpuclk_cfg_data *cfg_data = cpuclk->cfg;
|
||||
unsigned long alt_prate = clk_hw_get_rate(cpuclk->alt_parent);
|
||||
unsigned long alt_div = 0, alt_div_mask = DIV_MASK;
|
||||
unsigned long div0, div1 = 0, mux_reg;
|
||||
unsigned long flags;
|
||||
|
||||
@ -187,6 +188,7 @@ static int exynos_cpuclk_pre_rate_change(struct clk_notifier_data *ndata,
|
||||
*/
|
||||
if (alt_prate > ndata->old_rate || ndata->old_rate > ndata->new_rate) {
|
||||
unsigned long tmp_rate = min(ndata->old_rate, ndata->new_rate);
|
||||
unsigned long alt_div, alt_div_mask = DIV_MASK;
|
||||
|
||||
alt_div = DIV_ROUND_UP(alt_prate, tmp_rate) - 1;
|
||||
WARN_ON(alt_div >= MAX_DIV);
|
||||
@ -215,7 +217,7 @@ static int exynos_cpuclk_pre_rate_change(struct clk_notifier_data *ndata,
|
||||
if (cpuclk->flags & CLK_CPU_HAS_DIV1) {
|
||||
writel(div1, base + E4210_DIV_CPU1);
|
||||
wait_until_divider_stable(base + E4210_DIV_STAT_CPU1,
|
||||
DIV_MASK_ALL);
|
||||
DIV_MASK_ALL);
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(cpuclk->lock, flags);
|
||||
@ -263,7 +265,7 @@ static int exynos_cpuclk_post_rate_change(struct clk_notifier_data *ndata,
|
||||
* dividers to be programmed.
|
||||
*/
|
||||
static void exynos5433_set_safe_div(void __iomem *base, unsigned long div,
|
||||
unsigned long mask)
|
||||
unsigned long mask)
|
||||
{
|
||||
unsigned long div0;
|
||||
|
||||
@ -279,7 +281,6 @@ static int exynos5433_cpuclk_pre_rate_change(struct clk_notifier_data *ndata,
|
||||
{
|
||||
const struct exynos_cpuclk_cfg_data *cfg_data = cpuclk->cfg;
|
||||
unsigned long alt_prate = clk_hw_get_rate(cpuclk->alt_parent);
|
||||
unsigned long alt_div = 0, alt_div_mask = DIV_MASK;
|
||||
unsigned long div0, div1 = 0, mux_reg;
|
||||
unsigned long flags;
|
||||
|
||||
@ -309,6 +310,7 @@ static int exynos5433_cpuclk_pre_rate_change(struct clk_notifier_data *ndata,
|
||||
*/
|
||||
if (alt_prate > ndata->old_rate || ndata->old_rate > ndata->new_rate) {
|
||||
unsigned long tmp_rate = min(ndata->old_rate, ndata->new_rate);
|
||||
unsigned long alt_div, alt_div_mask = DIV_MASK;
|
||||
|
||||
alt_div = DIV_ROUND_UP(alt_prate, tmp_rate) - 1;
|
||||
WARN_ON(alt_div >= MAX_DIV);
|
||||
@ -358,7 +360,7 @@ static int exynos5433_cpuclk_post_rate_change(struct clk_notifier_data *ndata,
|
||||
* notifications of the parent clock of cpuclk.
|
||||
*/
|
||||
static int exynos_cpuclk_notifier_cb(struct notifier_block *nb,
|
||||
unsigned long event, void *data)
|
||||
unsigned long event, void *data)
|
||||
{
|
||||
struct clk_notifier_data *ndata = data;
|
||||
struct exynos_cpuclk *cpuclk;
|
||||
@ -381,7 +383,7 @@ static int exynos_cpuclk_notifier_cb(struct notifier_block *nb,
|
||||
* notifications of the parent clock of cpuclk.
|
||||
*/
|
||||
static int exynos5433_cpuclk_notifier_cb(struct notifier_block *nb,
|
||||
unsigned long event, void *data)
|
||||
unsigned long event, void *data)
|
||||
{
|
||||
struct clk_notifier_data *ndata = data;
|
||||
struct exynos_cpuclk *cpuclk;
|
||||
@ -438,11 +440,10 @@ static int __init exynos_register_cpu_clock(struct samsung_clk_provider *ctx,
|
||||
else
|
||||
cpuclk->clk_nb.notifier_call = exynos_cpuclk_notifier_cb;
|
||||
|
||||
|
||||
ret = clk_notifier_register(parent->clk, &cpuclk->clk_nb);
|
||||
if (ret) {
|
||||
pr_err("%s: failed to register clock notifier for %s\n",
|
||||
__func__, name);
|
||||
__func__, name);
|
||||
goto free_cpuclk;
|
||||
}
|
||||
|
||||
@ -454,7 +455,7 @@ static int __init exynos_register_cpu_clock(struct samsung_clk_provider *ctx,
|
||||
|
||||
ret = clk_hw_register(NULL, &cpuclk->hw);
|
||||
if (ret) {
|
||||
pr_err("%s: could not register cpuclk %s\n", __func__, name);
|
||||
pr_err("%s: could not register cpuclk %s\n", __func__, name);
|
||||
goto free_cpuclk_data;
|
||||
}
|
||||
|
||||
@ -482,8 +483,8 @@ void __init samsung_clk_register_cpu(struct samsung_clk_provider *ctx,
|
||||
for (num_cfgs = 0; list->cfg[num_cfgs].prate != 0; )
|
||||
num_cfgs++;
|
||||
|
||||
exynos_register_cpu_clock(ctx, list->id, list->name, hws[list->parent_id],
|
||||
hws[list->alt_parent_id], list->offset, list->cfg, num_cfgs,
|
||||
list->flags);
|
||||
exynos_register_cpu_clock(ctx, list->id, list->name,
|
||||
hws[list->parent_id], hws[list->alt_parent_id],
|
||||
list->offset, list->cfg, num_cfgs, list->flags);
|
||||
}
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user