mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-12-28 16:56:26 +00:00
11512c197d
In commit15d9da3f81
("binder: use bitmap for faster descriptor lookup"), it was incorrectly assumed that references to the context manager node should always get descriptor zero assigned to them. However, if the context manager dies and a new process takes its place, then assigning descriptor zero to the new context manager might lead to collisions, as there could still be references to the older node. This issue was reported by syzbot with the following trace: kernel BUG at drivers/android/binder.c:1173! Internal error: Oops - BUG: 00000000f2000800 [#1] PREEMPT SMP Modules linked in: CPU: 1 PID: 447 Comm: binder-util Not tainted 6.10.0-rc6-00348-g31643d84b8c3 #10 Hardware name: linux,dummy-virt (DT) pstate: 60000005 (nZCv daif -PAN -UAO -TCO -DIT -SSBS BTYPE=--) pc : binder_inc_ref_for_node+0x500/0x544 lr : binder_inc_ref_for_node+0x1e4/0x544 sp : ffff80008112b940 x29: ffff80008112b940 x28: ffff0e0e40310780 x27: 0000000000000000 x26: 0000000000000001 x25: ffff0e0e40310738 x24: ffff0e0e4089ba34 x23: ffff0e0e40310b00 x22: ffff80008112bb50 x21: ffffaf7b8f246970 x20: ffffaf7b8f773f08 x19: ffff0e0e4089b800 x18: 0000000000000000 x17: 0000000000000000 x16: 0000000000000000 x15: 000000002de4aa60 x14: 0000000000000000 x13: 2de4acf000000000 x12: 0000000000000020 x11: 0000000000000018 x10: 0000000000000020 x9 : ffffaf7b90601000 x8 : ffff0e0e48739140 x7 : 0000000000000000 x6 : 000000000000003f x5 : ffff0e0e40310b28 x4 : 0000000000000000 x3 : ffff0e0e40310720 x2 : ffff0e0e40310728 x1 : 0000000000000000 x0 : ffff0e0e40310710 Call trace: binder_inc_ref_for_node+0x500/0x544 binder_transaction+0xf68/0x2620 binder_thread_write+0x5bc/0x139c binder_ioctl+0xef4/0x10c8 [...] This patch adds back the previous behavior of assigning the next non-zero descriptor if references to previous context managers still exist. It amends both strategies, the newer dbitmap code and also the legacy slow_desc_lookup_olocked(), by allowing them to start looking for available descriptors at a given offset. Fixes:15d9da3f81
("binder: use bitmap for faster descriptor lookup") Cc: stable@vger.kernel.org Reported-and-tested-by: syzbot+3dae065ca76952a67257@syzkaller.appspotmail.com Closes: https://lore.kernel.org/all/000000000000c1c0a0061d1e6979@google.com/ Reviewed-by: Alice Ryhl <aliceryhl@google.com> Signed-off-by: Carlos Llamas <cmllamas@google.com> Link: https://lore.kernel.org/r/20240722150512.4192473-1-cmllamas@google.com Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
169 lines
4.1 KiB
C
169 lines
4.1 KiB
C
/* SPDX-License-Identifier: GPL-2.0-only */
|
|
/*
|
|
* Copyright 2024 Google LLC
|
|
*
|
|
* dbitmap - dynamically sized bitmap library.
|
|
*
|
|
* Used by the binder driver to optimize the allocation of the smallest
|
|
* available descriptor ID. Each bit in the bitmap represents the state
|
|
* of an ID.
|
|
*
|
|
* A dbitmap can grow or shrink as needed. This part has been designed
|
|
* considering that users might need to briefly release their locks in
|
|
* order to allocate memory for the new bitmap. These operations then,
|
|
* are verified to determine if the grow or shrink is sill valid.
|
|
*
|
|
* This library does not provide protection against concurrent access
|
|
* by itself. Binder uses the proc->outer_lock for this purpose.
|
|
*/
|
|
|
|
#ifndef _LINUX_DBITMAP_H
|
|
#define _LINUX_DBITMAP_H
|
|
#include <linux/bitmap.h>
|
|
|
|
#define NBITS_MIN BITS_PER_TYPE(unsigned long)
|
|
|
|
struct dbitmap {
|
|
unsigned int nbits;
|
|
unsigned long *map;
|
|
};
|
|
|
|
static inline int dbitmap_enabled(struct dbitmap *dmap)
|
|
{
|
|
return !!dmap->nbits;
|
|
}
|
|
|
|
static inline void dbitmap_free(struct dbitmap *dmap)
|
|
{
|
|
dmap->nbits = 0;
|
|
kfree(dmap->map);
|
|
}
|
|
|
|
/* Returns the nbits that a dbitmap can shrink to, 0 if not possible. */
|
|
static inline unsigned int dbitmap_shrink_nbits(struct dbitmap *dmap)
|
|
{
|
|
unsigned int bit;
|
|
|
|
if (dmap->nbits <= NBITS_MIN)
|
|
return 0;
|
|
|
|
/*
|
|
* Determine if the bitmap can shrink based on the position of
|
|
* its last set bit. If the bit is within the first quarter of
|
|
* the bitmap then shrinking is possible. In this case, the
|
|
* bitmap should shrink to half its current size.
|
|
*/
|
|
bit = find_last_bit(dmap->map, dmap->nbits);
|
|
if (bit < (dmap->nbits >> 2))
|
|
return dmap->nbits >> 1;
|
|
|
|
/* find_last_bit() returns dmap->nbits when no bits are set. */
|
|
if (bit == dmap->nbits)
|
|
return NBITS_MIN;
|
|
|
|
return 0;
|
|
}
|
|
|
|
/* Replace the internal bitmap with a new one of different size */
|
|
static inline void
|
|
dbitmap_replace(struct dbitmap *dmap, unsigned long *new, unsigned int nbits)
|
|
{
|
|
bitmap_copy(new, dmap->map, min(dmap->nbits, nbits));
|
|
kfree(dmap->map);
|
|
dmap->map = new;
|
|
dmap->nbits = nbits;
|
|
}
|
|
|
|
static inline void
|
|
dbitmap_shrink(struct dbitmap *dmap, unsigned long *new, unsigned int nbits)
|
|
{
|
|
if (!new)
|
|
return;
|
|
|
|
/*
|
|
* Verify that shrinking to @nbits is still possible. The @new
|
|
* bitmap might have been allocated without locks, so this call
|
|
* could now be outdated. In this case, free @new and move on.
|
|
*/
|
|
if (!dbitmap_enabled(dmap) || dbitmap_shrink_nbits(dmap) != nbits) {
|
|
kfree(new);
|
|
return;
|
|
}
|
|
|
|
dbitmap_replace(dmap, new, nbits);
|
|
}
|
|
|
|
/* Returns the nbits that a dbitmap can grow to. */
|
|
static inline unsigned int dbitmap_grow_nbits(struct dbitmap *dmap)
|
|
{
|
|
return dmap->nbits << 1;
|
|
}
|
|
|
|
static inline void
|
|
dbitmap_grow(struct dbitmap *dmap, unsigned long *new, unsigned int nbits)
|
|
{
|
|
/*
|
|
* Verify that growing to @nbits is still possible. The @new
|
|
* bitmap might have been allocated without locks, so this call
|
|
* could now be outdated. In this case, free @new and move on.
|
|
*/
|
|
if (!dbitmap_enabled(dmap) || nbits <= dmap->nbits) {
|
|
kfree(new);
|
|
return;
|
|
}
|
|
|
|
/*
|
|
* Check for ENOMEM after confirming the grow operation is still
|
|
* required. This ensures we only disable the dbitmap when it's
|
|
* necessary. Once the dbitmap is disabled, binder will fallback
|
|
* to slow_desc_lookup_olocked().
|
|
*/
|
|
if (!new) {
|
|
dbitmap_free(dmap);
|
|
return;
|
|
}
|
|
|
|
dbitmap_replace(dmap, new, nbits);
|
|
}
|
|
|
|
/*
|
|
* Finds and sets the next zero bit in the bitmap. Upon success @bit
|
|
* is populated with the index and 0 is returned. Otherwise, -ENOSPC
|
|
* is returned to indicate that a dbitmap_grow() is needed.
|
|
*/
|
|
static inline int
|
|
dbitmap_acquire_next_zero_bit(struct dbitmap *dmap, unsigned long offset,
|
|
unsigned long *bit)
|
|
{
|
|
unsigned long n;
|
|
|
|
n = find_next_zero_bit(dmap->map, dmap->nbits, offset);
|
|
if (n == dmap->nbits)
|
|
return -ENOSPC;
|
|
|
|
*bit = n;
|
|
set_bit(n, dmap->map);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static inline void
|
|
dbitmap_clear_bit(struct dbitmap *dmap, unsigned long bit)
|
|
{
|
|
clear_bit(bit, dmap->map);
|
|
}
|
|
|
|
static inline int dbitmap_init(struct dbitmap *dmap)
|
|
{
|
|
dmap->map = bitmap_zalloc(NBITS_MIN, GFP_KERNEL);
|
|
if (!dmap->map) {
|
|
dmap->nbits = 0;
|
|
return -ENOMEM;
|
|
}
|
|
|
|
dmap->nbits = NBITS_MIN;
|
|
|
|
return 0;
|
|
}
|
|
#endif
|