sparc/vdso: Add helper function for 64-bit right shift on 32-bit target

Add helper function for 64-bit right shift on 32-bit target so that
clang does not emit a runtime library call.

Signed-off-by: Koakuma <koachan@protonmail.com>
Reviewed-by: Andreas Larsson <andreas@gaisler.com>
Link: https://lore.kernel.org/r/20240808-sparc-shr64-v2-1-fd18f1b2cea9@protonmail.com
Signed-off-by: Andreas Larsson <andreas@gaisler.com>
This commit is contained in:
Koakuma 2024-08-08 09:05:00 +07:00 committed by Andreas Larsson
parent 8467d8b282
commit b6370b338e

View File

@ -86,6 +86,11 @@ notrace static long vdso_fallback_gettimeofday(struct __kernel_old_timeval *tv,
}
#ifdef CONFIG_SPARC64
notrace static __always_inline u64 __shr64(u64 val, int amt)
{
return val >> amt;
}
notrace static __always_inline u64 vread_tick(void)
{
u64 ret;
@ -102,6 +107,21 @@ notrace static __always_inline u64 vread_tick_stick(void)
return ret;
}
#else
notrace static __always_inline u64 __shr64(u64 val, int amt)
{
u64 ret;
__asm__ __volatile__("sllx %H1, 32, %%g1\n\t"
"srl %L1, 0, %L1\n\t"
"or %%g1, %L1, %%g1\n\t"
"srlx %%g1, %2, %L0\n\t"
"srlx %L0, 32, %H0"
: "=r" (ret)
: "r" (val), "r" (amt)
: "g1");
return ret;
}
notrace static __always_inline u64 vread_tick(void)
{
register unsigned long long ret asm("o4");
@ -154,7 +174,7 @@ notrace static __always_inline int do_realtime(struct vvar_data *vvar,
ts->tv_sec = vvar->wall_time_sec;
ns = vvar->wall_time_snsec;
ns += vgetsns(vvar);
ns >>= vvar->clock.shift;
ns = __shr64(ns, vvar->clock.shift);
} while (unlikely(vvar_read_retry(vvar, seq)));
ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
@ -174,7 +194,7 @@ notrace static __always_inline int do_realtime_stick(struct vvar_data *vvar,
ts->tv_sec = vvar->wall_time_sec;
ns = vvar->wall_time_snsec;
ns += vgetsns_stick(vvar);
ns >>= vvar->clock.shift;
ns = __shr64(ns, vvar->clock.shift);
} while (unlikely(vvar_read_retry(vvar, seq)));
ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
@ -194,7 +214,7 @@ notrace static __always_inline int do_monotonic(struct vvar_data *vvar,
ts->tv_sec = vvar->monotonic_time_sec;
ns = vvar->monotonic_time_snsec;
ns += vgetsns(vvar);
ns >>= vvar->clock.shift;
ns = __shr64(ns, vvar->clock.shift);
} while (unlikely(vvar_read_retry(vvar, seq)));
ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
@ -214,7 +234,7 @@ notrace static __always_inline int do_monotonic_stick(struct vvar_data *vvar,
ts->tv_sec = vvar->monotonic_time_sec;
ns = vvar->monotonic_time_snsec;
ns += vgetsns_stick(vvar);
ns >>= vvar->clock.shift;
ns = __shr64(ns, vvar->clock.shift);
} while (unlikely(vvar_read_retry(vvar, seq)));
ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);