mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-12-28 16:56:26 +00:00
locking/atomic: scripts: simplify raw_atomic*() definitions
Currently each ordering variant has several potential definitions, with a mixture of preprocessor and C definitions, including several copies of its C prototype, e.g. | #if defined(arch_atomic_fetch_andnot_acquire) | #define raw_atomic_fetch_andnot_acquire arch_atomic_fetch_andnot_acquire | #elif defined(arch_atomic_fetch_andnot_relaxed) | static __always_inline int | raw_atomic_fetch_andnot_acquire(int i, atomic_t *v) | { | int ret = arch_atomic_fetch_andnot_relaxed(i, v); | __atomic_acquire_fence(); | return ret; | } | #elif defined(arch_atomic_fetch_andnot) | #define raw_atomic_fetch_andnot_acquire arch_atomic_fetch_andnot | #else | static __always_inline int | raw_atomic_fetch_andnot_acquire(int i, atomic_t *v) | { | return raw_atomic_fetch_and_acquire(~i, v); | } | #endif Make this a bit simpler by defining the C prototype once, and writing the various potential definitions as plain C code guarded by ifdeffery. For example, the above becomes: | static __always_inline int | raw_atomic_fetch_andnot_acquire(int i, atomic_t *v) | { | #if defined(arch_atomic_fetch_andnot_acquire) | return arch_atomic_fetch_andnot_acquire(i, v); | #elif defined(arch_atomic_fetch_andnot_relaxed) | int ret = arch_atomic_fetch_andnot_relaxed(i, v); | __atomic_acquire_fence(); | return ret; | #elif defined(arch_atomic_fetch_andnot) | return arch_atomic_fetch_andnot(i, v); | #else | return raw_atomic_fetch_and_acquire(~i, v); | #endif | } Which is far easier to read. As we now always have a single copy of the C prototype wrapping all the potential definitions, we now have an obvious single location for kerneldoc comments. At the same time, the fallbacks for raw_atomic*_xhcg() are made to use 'new' rather than 'i' as the name of the new value. This is what the existing fallback template used, and is more consistent with the raw_atomic{_try,}cmpxchg() fallbacks. There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Kees Cook <keescook@chromium.org> Link: https://lore.kernel.org/r/20230605070124.3741859-24-mark.rutland@arm.com
This commit is contained in:
parent
630399469f
commit
1d78814d41
File diff suppressed because it is too large
Load Diff
@ -462,33 +462,33 @@ atomic_fetch_xor_relaxed(int i, atomic_t *v)
|
||||
}
|
||||
|
||||
static __always_inline int
|
||||
atomic_xchg(atomic_t *v, int i)
|
||||
atomic_xchg(atomic_t *v, int new)
|
||||
{
|
||||
kcsan_mb();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
return raw_atomic_xchg(v, i);
|
||||
return raw_atomic_xchg(v, new);
|
||||
}
|
||||
|
||||
static __always_inline int
|
||||
atomic_xchg_acquire(atomic_t *v, int i)
|
||||
atomic_xchg_acquire(atomic_t *v, int new)
|
||||
{
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
return raw_atomic_xchg_acquire(v, i);
|
||||
return raw_atomic_xchg_acquire(v, new);
|
||||
}
|
||||
|
||||
static __always_inline int
|
||||
atomic_xchg_release(atomic_t *v, int i)
|
||||
atomic_xchg_release(atomic_t *v, int new)
|
||||
{
|
||||
kcsan_release();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
return raw_atomic_xchg_release(v, i);
|
||||
return raw_atomic_xchg_release(v, new);
|
||||
}
|
||||
|
||||
static __always_inline int
|
||||
atomic_xchg_relaxed(atomic_t *v, int i)
|
||||
atomic_xchg_relaxed(atomic_t *v, int new)
|
||||
{
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
return raw_atomic_xchg_relaxed(v, i);
|
||||
return raw_atomic_xchg_relaxed(v, new);
|
||||
}
|
||||
|
||||
static __always_inline int
|
||||
@ -1103,33 +1103,33 @@ atomic64_fetch_xor_relaxed(s64 i, atomic64_t *v)
|
||||
}
|
||||
|
||||
static __always_inline s64
|
||||
atomic64_xchg(atomic64_t *v, s64 i)
|
||||
atomic64_xchg(atomic64_t *v, s64 new)
|
||||
{
|
||||
kcsan_mb();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
return raw_atomic64_xchg(v, i);
|
||||
return raw_atomic64_xchg(v, new);
|
||||
}
|
||||
|
||||
static __always_inline s64
|
||||
atomic64_xchg_acquire(atomic64_t *v, s64 i)
|
||||
atomic64_xchg_acquire(atomic64_t *v, s64 new)
|
||||
{
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
return raw_atomic64_xchg_acquire(v, i);
|
||||
return raw_atomic64_xchg_acquire(v, new);
|
||||
}
|
||||
|
||||
static __always_inline s64
|
||||
atomic64_xchg_release(atomic64_t *v, s64 i)
|
||||
atomic64_xchg_release(atomic64_t *v, s64 new)
|
||||
{
|
||||
kcsan_release();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
return raw_atomic64_xchg_release(v, i);
|
||||
return raw_atomic64_xchg_release(v, new);
|
||||
}
|
||||
|
||||
static __always_inline s64
|
||||
atomic64_xchg_relaxed(atomic64_t *v, s64 i)
|
||||
atomic64_xchg_relaxed(atomic64_t *v, s64 new)
|
||||
{
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
return raw_atomic64_xchg_relaxed(v, i);
|
||||
return raw_atomic64_xchg_relaxed(v, new);
|
||||
}
|
||||
|
||||
static __always_inline s64
|
||||
@ -1744,33 +1744,33 @@ atomic_long_fetch_xor_relaxed(long i, atomic_long_t *v)
|
||||
}
|
||||
|
||||
static __always_inline long
|
||||
atomic_long_xchg(atomic_long_t *v, long i)
|
||||
atomic_long_xchg(atomic_long_t *v, long new)
|
||||
{
|
||||
kcsan_mb();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
return raw_atomic_long_xchg(v, i);
|
||||
return raw_atomic_long_xchg(v, new);
|
||||
}
|
||||
|
||||
static __always_inline long
|
||||
atomic_long_xchg_acquire(atomic_long_t *v, long i)
|
||||
atomic_long_xchg_acquire(atomic_long_t *v, long new)
|
||||
{
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
return raw_atomic_long_xchg_acquire(v, i);
|
||||
return raw_atomic_long_xchg_acquire(v, new);
|
||||
}
|
||||
|
||||
static __always_inline long
|
||||
atomic_long_xchg_release(atomic_long_t *v, long i)
|
||||
atomic_long_xchg_release(atomic_long_t *v, long new)
|
||||
{
|
||||
kcsan_release();
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
return raw_atomic_long_xchg_release(v, i);
|
||||
return raw_atomic_long_xchg_release(v, new);
|
||||
}
|
||||
|
||||
static __always_inline long
|
||||
atomic_long_xchg_relaxed(atomic_long_t *v, long i)
|
||||
atomic_long_xchg_relaxed(atomic_long_t *v, long new)
|
||||
{
|
||||
instrument_atomic_read_write(v, sizeof(*v));
|
||||
return raw_atomic_long_xchg_relaxed(v, i);
|
||||
return raw_atomic_long_xchg_relaxed(v, new);
|
||||
}
|
||||
|
||||
static __always_inline long
|
||||
@ -2231,4 +2231,4 @@ atomic_long_dec_if_positive(atomic_long_t *v)
|
||||
|
||||
|
||||
#endif /* _LINUX_ATOMIC_INSTRUMENTED_H */
|
||||
// f6502977180430e61c1a7c4e5e665f04f501fb8d
|
||||
// a4c3d2b229f907654cc53cb5d40e80f7fed1ec9c
|
||||
|
@ -622,42 +622,42 @@ raw_atomic_long_fetch_xor_relaxed(long i, atomic_long_t *v)
|
||||
}
|
||||
|
||||
static __always_inline long
|
||||
raw_atomic_long_xchg(atomic_long_t *v, long i)
|
||||
raw_atomic_long_xchg(atomic_long_t *v, long new)
|
||||
{
|
||||
#ifdef CONFIG_64BIT
|
||||
return raw_atomic64_xchg(v, i);
|
||||
return raw_atomic64_xchg(v, new);
|
||||
#else
|
||||
return raw_atomic_xchg(v, i);
|
||||
return raw_atomic_xchg(v, new);
|
||||
#endif
|
||||
}
|
||||
|
||||
static __always_inline long
|
||||
raw_atomic_long_xchg_acquire(atomic_long_t *v, long i)
|
||||
raw_atomic_long_xchg_acquire(atomic_long_t *v, long new)
|
||||
{
|
||||
#ifdef CONFIG_64BIT
|
||||
return raw_atomic64_xchg_acquire(v, i);
|
||||
return raw_atomic64_xchg_acquire(v, new);
|
||||
#else
|
||||
return raw_atomic_xchg_acquire(v, i);
|
||||
return raw_atomic_xchg_acquire(v, new);
|
||||
#endif
|
||||
}
|
||||
|
||||
static __always_inline long
|
||||
raw_atomic_long_xchg_release(atomic_long_t *v, long i)
|
||||
raw_atomic_long_xchg_release(atomic_long_t *v, long new)
|
||||
{
|
||||
#ifdef CONFIG_64BIT
|
||||
return raw_atomic64_xchg_release(v, i);
|
||||
return raw_atomic64_xchg_release(v, new);
|
||||
#else
|
||||
return raw_atomic_xchg_release(v, i);
|
||||
return raw_atomic_xchg_release(v, new);
|
||||
#endif
|
||||
}
|
||||
|
||||
static __always_inline long
|
||||
raw_atomic_long_xchg_relaxed(atomic_long_t *v, long i)
|
||||
raw_atomic_long_xchg_relaxed(atomic_long_t *v, long new)
|
||||
{
|
||||
#ifdef CONFIG_64BIT
|
||||
return raw_atomic64_xchg_relaxed(v, i);
|
||||
return raw_atomic64_xchg_relaxed(v, new);
|
||||
#else
|
||||
return raw_atomic_xchg_relaxed(v, i);
|
||||
return raw_atomic_xchg_relaxed(v, new);
|
||||
#endif
|
||||
}
|
||||
|
||||
@ -872,4 +872,4 @@ raw_atomic_long_dec_if_positive(atomic_long_t *v)
|
||||
}
|
||||
|
||||
#endif /* _LINUX_ATOMIC_LONG_H */
|
||||
// ad09f849db0db5b30c82e497eeb9056a394c5f22
|
||||
// e785d25cc3f220b7d473d36aac9da85dd7eb13a8
|
||||
|
@ -27,7 +27,7 @@ and vF i v
|
||||
andnot vF i v
|
||||
or vF i v
|
||||
xor vF i v
|
||||
xchg I v i
|
||||
xchg I v i:new
|
||||
cmpxchg I v i:old i:new
|
||||
try_cmpxchg B v p:old i:new
|
||||
sub_and_test b i v
|
||||
|
@ -1,9 +1,5 @@
|
||||
cat <<EOF
|
||||
static __always_inline ${ret}
|
||||
raw_${atomic}_${pfx}${name}${sfx}_acquire(${params})
|
||||
{
|
||||
${ret} ret = arch_${atomic}_${pfx}${name}${sfx}_relaxed(${args});
|
||||
__atomic_acquire_fence();
|
||||
return ret;
|
||||
}
|
||||
EOF
|
||||
|
@ -1,7 +1,3 @@
|
||||
cat <<EOF
|
||||
static __always_inline bool
|
||||
raw_${atomic}_add_negative${order}(${int} i, ${atomic}_t *v)
|
||||
{
|
||||
return raw_${atomic}_add_return${order}(i, v) < 0;
|
||||
}
|
||||
EOF
|
||||
|
@ -1,7 +1,3 @@
|
||||
cat << EOF
|
||||
static __always_inline bool
|
||||
raw_${atomic}_add_unless(${atomic}_t *v, ${int} a, ${int} u)
|
||||
{
|
||||
return raw_${atomic}_fetch_add_unless(v, a, u) != u;
|
||||
}
|
||||
EOF
|
||||
|
@ -1,7 +1,3 @@
|
||||
cat <<EOF
|
||||
static __always_inline ${ret}
|
||||
raw_${atomic}_${pfx}andnot${sfx}${order}(${int} i, ${atomic}_t *v)
|
||||
{
|
||||
${retstmt}raw_${atomic}_${pfx}and${sfx}${order}(~i, v);
|
||||
}
|
||||
EOF
|
||||
|
@ -1,7 +1,3 @@
|
||||
cat <<EOF
|
||||
static __always_inline ${int}
|
||||
raw_${atomic}_cmpxchg${order}(${atomic}_t *v, ${int} old, ${int} new)
|
||||
{
|
||||
return raw_cmpxchg${order}(&v->counter, old, new);
|
||||
}
|
||||
EOF
|
||||
|
@ -1,7 +1,3 @@
|
||||
cat <<EOF
|
||||
static __always_inline ${ret}
|
||||
raw_${atomic}_${pfx}dec${sfx}${order}(${atomic}_t *v)
|
||||
{
|
||||
${retstmt}raw_${atomic}_${pfx}sub${sfx}${order}(1, v);
|
||||
}
|
||||
EOF
|
||||
|
@ -1,7 +1,3 @@
|
||||
cat <<EOF
|
||||
static __always_inline bool
|
||||
raw_${atomic}_dec_and_test(${atomic}_t *v)
|
||||
{
|
||||
return raw_${atomic}_dec_return(v) == 0;
|
||||
}
|
||||
EOF
|
||||
|
@ -1,7 +1,4 @@
|
||||
cat <<EOF
|
||||
static __always_inline ${ret}
|
||||
raw_${atomic}_dec_if_positive(${atomic}_t *v)
|
||||
{
|
||||
${int} dec, c = raw_${atomic}_read(v);
|
||||
|
||||
do {
|
||||
@ -11,5 +8,4 @@ raw_${atomic}_dec_if_positive(${atomic}_t *v)
|
||||
} while (!raw_${atomic}_try_cmpxchg(v, &c, dec));
|
||||
|
||||
return dec;
|
||||
}
|
||||
EOF
|
||||
|
@ -1,7 +1,4 @@
|
||||
cat <<EOF
|
||||
static __always_inline bool
|
||||
raw_${atomic}_dec_unless_positive(${atomic}_t *v)
|
||||
{
|
||||
${int} c = raw_${atomic}_read(v);
|
||||
|
||||
do {
|
||||
@ -10,5 +7,4 @@ raw_${atomic}_dec_unless_positive(${atomic}_t *v)
|
||||
} while (!raw_${atomic}_try_cmpxchg(v, &c, c - 1));
|
||||
|
||||
return true;
|
||||
}
|
||||
EOF
|
||||
|
@ -1,11 +1,7 @@
|
||||
cat <<EOF
|
||||
static __always_inline ${ret}
|
||||
raw_${atomic}_${pfx}${name}${sfx}(${params})
|
||||
{
|
||||
${ret} ret;
|
||||
__atomic_pre_full_fence();
|
||||
ret = arch_${atomic}_${pfx}${name}${sfx}_relaxed(${args});
|
||||
__atomic_post_full_fence();
|
||||
return ret;
|
||||
}
|
||||
EOF
|
||||
|
@ -1,7 +1,4 @@
|
||||
cat << EOF
|
||||
static __always_inline ${int}
|
||||
raw_${atomic}_fetch_add_unless(${atomic}_t *v, ${int} a, ${int} u)
|
||||
{
|
||||
${int} c = raw_${atomic}_read(v);
|
||||
|
||||
do {
|
||||
@ -10,5 +7,4 @@ raw_${atomic}_fetch_add_unless(${atomic}_t *v, ${int} a, ${int} u)
|
||||
} while (!raw_${atomic}_try_cmpxchg(v, &c, c + a));
|
||||
|
||||
return c;
|
||||
}
|
||||
EOF
|
||||
|
@ -1,7 +1,3 @@
|
||||
cat <<EOF
|
||||
static __always_inline ${ret}
|
||||
raw_${atomic}_${pfx}inc${sfx}${order}(${atomic}_t *v)
|
||||
{
|
||||
${retstmt}raw_${atomic}_${pfx}add${sfx}${order}(1, v);
|
||||
}
|
||||
EOF
|
||||
|
@ -1,7 +1,3 @@
|
||||
cat <<EOF
|
||||
static __always_inline bool
|
||||
raw_${atomic}_inc_and_test(${atomic}_t *v)
|
||||
{
|
||||
return raw_${atomic}_inc_return(v) == 0;
|
||||
}
|
||||
EOF
|
||||
|
@ -1,7 +1,3 @@
|
||||
cat <<EOF
|
||||
static __always_inline bool
|
||||
raw_${atomic}_inc_not_zero(${atomic}_t *v)
|
||||
{
|
||||
return raw_${atomic}_add_unless(v, 1, 0);
|
||||
}
|
||||
EOF
|
||||
|
@ -1,7 +1,4 @@
|
||||
cat <<EOF
|
||||
static __always_inline bool
|
||||
raw_${atomic}_inc_unless_negative(${atomic}_t *v)
|
||||
{
|
||||
${int} c = raw_${atomic}_read(v);
|
||||
|
||||
do {
|
||||
@ -10,5 +7,4 @@ raw_${atomic}_inc_unless_negative(${atomic}_t *v)
|
||||
} while (!raw_${atomic}_try_cmpxchg(v, &c, c + 1));
|
||||
|
||||
return true;
|
||||
}
|
||||
EOF
|
||||
|
@ -1,7 +1,4 @@
|
||||
cat <<EOF
|
||||
static __always_inline ${ret}
|
||||
raw_${atomic}_read_acquire(const ${atomic}_t *v)
|
||||
{
|
||||
${int} ret;
|
||||
|
||||
if (__native_word(${atomic}_t)) {
|
||||
@ -12,5 +9,4 @@ raw_${atomic}_read_acquire(const ${atomic}_t *v)
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
EOF
|
||||
|
@ -1,8 +1,4 @@
|
||||
cat <<EOF
|
||||
static __always_inline ${ret}
|
||||
raw_${atomic}_${pfx}${name}${sfx}_release(${params})
|
||||
{
|
||||
__atomic_release_fence();
|
||||
${retstmt}arch_${atomic}_${pfx}${name}${sfx}_relaxed(${args});
|
||||
}
|
||||
EOF
|
||||
|
@ -1,12 +1,8 @@
|
||||
cat <<EOF
|
||||
static __always_inline void
|
||||
raw_${atomic}_set_release(${atomic}_t *v, ${int} i)
|
||||
{
|
||||
if (__native_word(${atomic}_t)) {
|
||||
smp_store_release(&(v)->counter, i);
|
||||
} else {
|
||||
__atomic_release_fence();
|
||||
raw_${atomic}_set(v, i);
|
||||
}
|
||||
}
|
||||
EOF
|
||||
|
@ -1,7 +1,3 @@
|
||||
cat <<EOF
|
||||
static __always_inline bool
|
||||
raw_${atomic}_sub_and_test(${int} i, ${atomic}_t *v)
|
||||
{
|
||||
return raw_${atomic}_sub_return(i, v) == 0;
|
||||
}
|
||||
EOF
|
||||
|
@ -1,11 +1,7 @@
|
||||
cat <<EOF
|
||||
static __always_inline bool
|
||||
raw_${atomic}_try_cmpxchg${order}(${atomic}_t *v, ${int} *old, ${int} new)
|
||||
{
|
||||
${int} r, o = *old;
|
||||
r = raw_${atomic}_cmpxchg${order}(v, o, new);
|
||||
if (unlikely(r != o))
|
||||
*old = r;
|
||||
return likely(r == o);
|
||||
}
|
||||
EOF
|
||||
|
@ -1,7 +1,3 @@
|
||||
cat <<EOF
|
||||
static __always_inline ${int}
|
||||
raw_${atomic}_xchg${order}(${atomic}_t *v, ${int} new)
|
||||
{
|
||||
return raw_xchg${order}(&v->counter, new);
|
||||
}
|
||||
EOF
|
||||
|
@ -60,13 +60,23 @@ gen_proto_order_variant()
|
||||
local name="$1"; shift
|
||||
local sfx="$1"; shift
|
||||
local order="$1"; shift
|
||||
local atomic="$1"
|
||||
local atomic="$1"; shift
|
||||
local int="$1"; shift
|
||||
|
||||
local atomicname="${atomic}_${pfx}${name}${sfx}${order}"
|
||||
local basename="${atomic}_${pfx}${name}${sfx}"
|
||||
|
||||
local template="$(find_fallback_template "${pfx}" "${name}" "${sfx}" "${order}")"
|
||||
|
||||
local ret="$(gen_ret_type "${meta}" "${int}")"
|
||||
local retstmt="$(gen_ret_stmt "${meta}")"
|
||||
local params="$(gen_params "${int}" "${atomic}" "$@")"
|
||||
local args="$(gen_args "$@")"
|
||||
|
||||
printf "static __always_inline ${ret}\n"
|
||||
printf "raw_${atomicname}(${params})\n"
|
||||
printf "{\n"
|
||||
|
||||
# Where there is no possible fallback, this order variant is mandatory
|
||||
# and must be provided by arch code. Add a comment to the header to
|
||||
# make this obvious.
|
||||
@ -75,33 +85,35 @@ gen_proto_order_variant()
|
||||
# define this order variant as a C function without a preprocessor
|
||||
# symbol.
|
||||
if [ -z ${template} ] && [ -z "${order}" ] && ! meta_has_relaxed "${meta}"; then
|
||||
printf "#define raw_${atomicname} arch_${atomicname}\n\n"
|
||||
printf "\t${retstmt}arch_${atomicname}(${args});\n"
|
||||
printf "}\n\n"
|
||||
return
|
||||
fi
|
||||
|
||||
printf "#if defined(arch_${atomicname})\n"
|
||||
printf "#define raw_${atomicname} arch_${atomicname}\n"
|
||||
printf "\t${retstmt}arch_${atomicname}(${args});\n"
|
||||
|
||||
# Allow FULL/ACQUIRE/RELEASE ops to be defined in terms of RELAXED ops
|
||||
if [ "${order}" != "_relaxed" ] && meta_has_relaxed "${meta}"; then
|
||||
printf "#elif defined(arch_${basename}_relaxed)\n"
|
||||
gen_order_fallback "${meta}" "${pfx}" "${name}" "${sfx}" "${order}" "$@"
|
||||
gen_order_fallback "${meta}" "${pfx}" "${name}" "${sfx}" "${order}" "${atomic}" "${int}" "$@"
|
||||
fi
|
||||
|
||||
# Allow ACQUIRE/RELEASE/RELAXED ops to be defined in terms of FULL ops
|
||||
if [ ! -z "${order}" ]; then
|
||||
printf "#elif defined(arch_${basename})\n"
|
||||
printf "#define raw_${atomicname} arch_${basename}\n"
|
||||
printf "\t${retstmt}arch_${basename}(${args});\n"
|
||||
fi
|
||||
|
||||
printf "#else\n"
|
||||
if [ ! -z "${template}" ]; then
|
||||
gen_proto_fallback "${meta}" "${pfx}" "${name}" "${sfx}" "${order}" "$@"
|
||||
gen_proto_fallback "${meta}" "${pfx}" "${name}" "${sfx}" "${order}" "${atomic}" "${int}" "$@"
|
||||
else
|
||||
printf "#error \"Unable to define raw_${atomicname}\"\n"
|
||||
fi
|
||||
|
||||
printf "#endif\n\n"
|
||||
printf "#endif\n"
|
||||
printf "}\n\n"
|
||||
}
|
||||
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user