linux-stable/security/selinux/ss/sidtab.h
GONG, Ruiqi abe3c63144 selinux: enable use of both GFP_KERNEL and GFP_ATOMIC in convert_context()
The following warning was triggered on a hardware environment:

  SELinux: Converting 162 SID table entries...
  BUG: sleeping function called from invalid context at
       __might_sleep+0x60/0x74 0x0
  in_atomic(): 1, irqs_disabled(): 128, non_block: 0, pid: 5943, name: tar
  CPU: 7 PID: 5943 Comm: tar Tainted: P O 5.10.0 #1
  Call trace:
   dump_backtrace+0x0/0x1c8
   show_stack+0x18/0x28
   dump_stack+0xe8/0x15c
   ___might_sleep+0x168/0x17c
   __might_sleep+0x60/0x74
   __kmalloc_track_caller+0xa0/0x7dc
   kstrdup+0x54/0xac
   convert_context+0x48/0x2e4
   sidtab_context_to_sid+0x1c4/0x36c
   security_context_to_sid_core+0x168/0x238
   security_context_to_sid_default+0x14/0x24
   inode_doinit_use_xattr+0x164/0x1e4
   inode_doinit_with_dentry+0x1c0/0x488
   selinux_d_instantiate+0x20/0x34
   security_d_instantiate+0x70/0xbc
   d_splice_alias+0x4c/0x3c0
   ext4_lookup+0x1d8/0x200 [ext4]
   __lookup_slow+0x12c/0x1e4
   walk_component+0x100/0x200
   path_lookupat+0x88/0x118
   filename_lookup+0x98/0x130
   user_path_at_empty+0x48/0x60
   vfs_statx+0x84/0x140
   vfs_fstatat+0x20/0x30
   __se_sys_newfstatat+0x30/0x74
   __arm64_sys_newfstatat+0x1c/0x2c
   el0_svc_common.constprop.0+0x100/0x184
   do_el0_svc+0x1c/0x2c
   el0_svc+0x20/0x34
   el0_sync_handler+0x80/0x17c
   el0_sync+0x13c/0x140
  SELinux: Context system_u:object_r:pssp_rsyslog_log_t:s0:c0 is
           not valid (left unmapped).

It was found that within a critical section of spin_lock_irqsave in
sidtab_context_to_sid(), convert_context() (hooked by
sidtab_convert_params.func) might cause the process to sleep via
allocating memory with GFP_KERNEL, which is problematic.

As Ondrej pointed out [1], convert_context()/sidtab_convert_params.func
has another caller sidtab_convert_tree(), which is okay with GFP_KERNEL.
Therefore, fix this problem by adding a gfp_t argument for
convert_context()/sidtab_convert_params.func and pass GFP_KERNEL/_ATOMIC
properly in individual callers.

Cc: stable@vger.kernel.org
Link: https://lore.kernel.org/all/20221018120111.1474581-1-gongruiqi1@huawei.com/ [1]
Reported-by: Tan Ninghao <tanninghao1@huawei.com>
Fixes: ee1a84fdfe ("selinux: overhaul sidtab to fix bug and improve performance")
Signed-off-by: GONG, Ruiqi <gongruiqi1@huawei.com>
Reviewed-by: Ondrej Mosnacek <omosnace@redhat.com>
[PM: wrap long BUG() output lines, tweak subject line]
Signed-off-by: Paul Moore <paul@paul-moore.com>
2022-10-19 09:55:53 -04:00

160 lines
4.4 KiB
C

/* SPDX-License-Identifier: GPL-2.0 */
/*
* A security identifier table (sidtab) is a lookup table
* of security context structures indexed by SID value.
*
* Original author: Stephen Smalley, <sds@tycho.nsa.gov>
* Author: Ondrej Mosnacek, <omosnacek@gmail.com>
*
* Copyright (C) 2018 Red Hat, Inc.
*/
#ifndef _SS_SIDTAB_H_
#define _SS_SIDTAB_H_
#include <linux/spinlock_types.h>
#include <linux/log2.h>
#include <linux/hashtable.h>
#include "context.h"
struct sidtab_entry {
u32 sid;
u32 hash;
struct context context;
#if CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE > 0
struct sidtab_str_cache __rcu *cache;
#endif
struct hlist_node list;
};
union sidtab_entry_inner {
struct sidtab_node_inner *ptr_inner;
struct sidtab_node_leaf *ptr_leaf;
};
/* align node size to page boundary */
#define SIDTAB_NODE_ALLOC_SHIFT PAGE_SHIFT
#define SIDTAB_NODE_ALLOC_SIZE PAGE_SIZE
#define size_to_shift(size) ((size) == 1 ? 1 : (const_ilog2((size) - 1) + 1))
#define SIDTAB_INNER_SHIFT \
(SIDTAB_NODE_ALLOC_SHIFT - size_to_shift(sizeof(union sidtab_entry_inner)))
#define SIDTAB_INNER_ENTRIES ((size_t)1 << SIDTAB_INNER_SHIFT)
#define SIDTAB_LEAF_ENTRIES \
(SIDTAB_NODE_ALLOC_SIZE / sizeof(struct sidtab_entry))
#define SIDTAB_MAX_BITS 32
#define SIDTAB_MAX U32_MAX
/* ensure enough tree levels for SIDTAB_MAX entries */
#define SIDTAB_MAX_LEVEL \
DIV_ROUND_UP(SIDTAB_MAX_BITS - size_to_shift(SIDTAB_LEAF_ENTRIES), \
SIDTAB_INNER_SHIFT)
struct sidtab_node_leaf {
struct sidtab_entry entries[SIDTAB_LEAF_ENTRIES];
};
struct sidtab_node_inner {
union sidtab_entry_inner entries[SIDTAB_INNER_ENTRIES];
};
struct sidtab_isid_entry {
int set;
struct sidtab_entry entry;
};
struct sidtab_convert_params {
int (*func)(struct context *oldc, struct context *newc, void *args, gfp_t gfp_flags);
void *args;
struct sidtab *target;
};
#define SIDTAB_HASH_BITS CONFIG_SECURITY_SELINUX_SIDTAB_HASH_BITS
#define SIDTAB_HASH_BUCKETS (1 << SIDTAB_HASH_BITS)
struct sidtab {
/*
* lock-free read access only for as many items as a prior read of
* 'count'
*/
union sidtab_entry_inner roots[SIDTAB_MAX_LEVEL + 1];
/*
* access atomically via {READ|WRITE}_ONCE(); only increment under
* spinlock
*/
u32 count;
/* access only under spinlock */
struct sidtab_convert_params *convert;
bool frozen;
spinlock_t lock;
#if CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE > 0
/* SID -> context string cache */
u32 cache_free_slots;
struct list_head cache_lru_list;
spinlock_t cache_lock;
#endif
/* index == SID - 1 (no entry for SECSID_NULL) */
struct sidtab_isid_entry isids[SECINITSID_NUM];
/* Hash table for fast reverse context-to-sid lookups. */
DECLARE_HASHTABLE(context_to_sid, SIDTAB_HASH_BITS);
};
int sidtab_init(struct sidtab *s);
int sidtab_set_initial(struct sidtab *s, u32 sid, struct context *context);
struct sidtab_entry *sidtab_search_entry(struct sidtab *s, u32 sid);
struct sidtab_entry *sidtab_search_entry_force(struct sidtab *s, u32 sid);
static inline struct context *sidtab_search(struct sidtab *s, u32 sid)
{
struct sidtab_entry *entry = sidtab_search_entry(s, sid);
return entry ? &entry->context : NULL;
}
static inline struct context *sidtab_search_force(struct sidtab *s, u32 sid)
{
struct sidtab_entry *entry = sidtab_search_entry_force(s, sid);
return entry ? &entry->context : NULL;
}
int sidtab_convert(struct sidtab *s, struct sidtab_convert_params *params);
void sidtab_cancel_convert(struct sidtab *s);
void sidtab_freeze_begin(struct sidtab *s, unsigned long *flags) __acquires(&s->lock);
void sidtab_freeze_end(struct sidtab *s, unsigned long *flags) __releases(&s->lock);
int sidtab_context_to_sid(struct sidtab *s, struct context *context, u32 *sid);
void sidtab_destroy(struct sidtab *s);
int sidtab_hash_stats(struct sidtab *sidtab, char *page);
#if CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE > 0
void sidtab_sid2str_put(struct sidtab *s, struct sidtab_entry *entry,
const char *str, u32 str_len);
int sidtab_sid2str_get(struct sidtab *s, struct sidtab_entry *entry,
char **out, u32 *out_len);
#else
static inline void sidtab_sid2str_put(struct sidtab *s,
struct sidtab_entry *entry,
const char *str, u32 str_len)
{
}
static inline int sidtab_sid2str_get(struct sidtab *s,
struct sidtab_entry *entry,
char **out, u32 *out_len)
{
return -ENOENT;
}
#endif /* CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE > 0 */
#endif /* _SS_SIDTAB_H_ */