mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-12-29 17:25:38 +00:00
Merge branch 'akpm' (patches from Andrew)
Merge more updates from Andrew Morton: "Various misc subsystems, before getting into the post-linux-next material. 41 patches. Subsystems affected by this patch series: procfs, misc, core-kernel, lib, checkpatch, init, pipe, minix, fat, cgroups, kexec, kdump, taskstats, panic, kcov, resource, and ubsan" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (41 commits) Revert "ubsan, kcsan: Don't combine sanitizer with kcov on clang" kernel/resource: fix kfree() of bootmem memory again kcov: properly handle subsequent mmap calls kcov: split ioctl handling into locked and unlocked parts panic: move panic_print before kmsg dumpers panic: add option to dump all CPUs backtraces in panic_print docs: sysctl/kernel: add missing bit to panic_print taskstats: remove unneeded dead assignment kasan: no need to unset panic_on_warn in end_report() ubsan: no need to unset panic_on_warn in ubsan_epilogue() panic: unset panic_on_warn inside panic() docs: kdump: add scp example to write out the dump file docs: kdump: update description about sysfs file system support arm64: mm: use IS_ENABLED(CONFIG_KEXEC_CORE) instead of #ifdef x86/setup: use IS_ENABLED(CONFIG_KEXEC_CORE) instead of #ifdef riscv: mm: init: use IS_ENABLED(CONFIG_KEXEC_CORE) instead of #ifdef kexec: make crashk_res, crashk_low_res and crash_notes symbols always visible cgroup: use irqsave in cgroup_rstat_flush_locked(). fat: use pointer to simple type in put_user() minix: fix bug when opening a file with O_DIRECT ...
This commit is contained in:
commit
52deda9551
@ -146,9 +146,9 @@ System kernel config options
|
||||
CONFIG_SYSFS=y
|
||||
|
||||
Note that "sysfs file system support" might not appear in the "Pseudo
|
||||
filesystems" menu if "Configure standard kernel features (for small
|
||||
systems)" is not enabled in "General Setup." In this case, check the
|
||||
.config file itself to ensure that sysfs is turned on, as follows::
|
||||
filesystems" menu if "Configure standard kernel features (expert users)"
|
||||
is not enabled in "General Setup." In this case, check the .config file
|
||||
itself to ensure that sysfs is turned on, as follows::
|
||||
|
||||
grep 'CONFIG_SYSFS' .config
|
||||
|
||||
@ -533,6 +533,10 @@ the following command::
|
||||
|
||||
cp /proc/vmcore <dump-file>
|
||||
|
||||
or use scp to write out the dump file between hosts on a network, e.g::
|
||||
|
||||
scp /proc/vmcore remote_username@remote_ip:<dump-file>
|
||||
|
||||
You can also use makedumpfile utility to write out the dump file
|
||||
with specified options to filter out unwanted contents, e.g::
|
||||
|
||||
|
@ -3792,6 +3792,11 @@
|
||||
bit 3: print locks info if CONFIG_LOCKDEP is on
|
||||
bit 4: print ftrace buffer
|
||||
bit 5: print all printk messages in buffer
|
||||
bit 6: print all CPUs backtrace (if available in the arch)
|
||||
*Be aware* that this option may print a _lot_ of lines,
|
||||
so there are risks of losing older messages in the log.
|
||||
Use this option carefully, maybe worth to setup a
|
||||
bigger log buffer with "log_buf_len" along with this.
|
||||
|
||||
panic_on_taint= Bitmask for conditionally calling panic() in add_taint()
|
||||
Format: <hex>[,nousertaint]
|
||||
|
@ -763,6 +763,8 @@ bit 1 print system memory info
|
||||
bit 2 print timer info
|
||||
bit 3 print locks info if ``CONFIG_LOCKDEP`` is on
|
||||
bit 4 print ftrace buffer
|
||||
bit 5 print all printk messages in buffer
|
||||
bit 6 print all CPUs backtrace (if available in the arch)
|
||||
===== ============================================
|
||||
|
||||
So for example to print tasks and memory info on panic, user can::
|
||||
|
@ -100,3 +100,5 @@ have already built it.
|
||||
|
||||
The optional make variable CF can be used to pass arguments to sparse. The
|
||||
build system passes -Wbitwise to sparse automatically.
|
||||
|
||||
Note that sparse defines the __CHECKER__ preprocessor symbol.
|
||||
|
@ -90,7 +90,6 @@ phys_addr_t __ro_after_init arm64_dma_phys_limit;
|
||||
phys_addr_t __ro_after_init arm64_dma_phys_limit = PHYS_MASK + 1;
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_KEXEC_CORE
|
||||
/*
|
||||
* reserve_crashkernel() - reserves memory for crash kernel
|
||||
*
|
||||
@ -104,6 +103,9 @@ static void __init reserve_crashkernel(void)
|
||||
unsigned long long crash_max = arm64_dma_phys_limit;
|
||||
int ret;
|
||||
|
||||
if (!IS_ENABLED(CONFIG_KEXEC_CORE))
|
||||
return;
|
||||
|
||||
ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(),
|
||||
&crash_size, &crash_base);
|
||||
/* no crashkernel= or invalid value specified */
|
||||
@ -136,11 +138,6 @@ static void __init reserve_crashkernel(void)
|
||||
crashk_res.start = crash_base;
|
||||
crashk_res.end = crash_base + crash_size - 1;
|
||||
}
|
||||
#else
|
||||
static void __init reserve_crashkernel(void)
|
||||
{
|
||||
}
|
||||
#endif /* CONFIG_KEXEC_CORE */
|
||||
|
||||
/*
|
||||
* Return the maximum physical address for a zone accessible by the given bits
|
||||
|
@ -957,7 +957,6 @@ static inline void setup_vm_final(void)
|
||||
}
|
||||
#endif /* CONFIG_MMU */
|
||||
|
||||
#ifdef CONFIG_KEXEC_CORE
|
||||
/*
|
||||
* reserve_crashkernel() - reserves memory for crash kernel
|
||||
*
|
||||
@ -974,6 +973,8 @@ static void __init reserve_crashkernel(void)
|
||||
|
||||
int ret = 0;
|
||||
|
||||
if (!IS_ENABLED(CONFIG_KEXEC_CORE))
|
||||
return;
|
||||
/*
|
||||
* Don't reserve a region for a crash kernel on a crash kernel
|
||||
* since it doesn't make much sense and we have limited memory
|
||||
@ -1023,7 +1024,6 @@ static void __init reserve_crashkernel(void)
|
||||
crashk_res.start = crash_base;
|
||||
crashk_res.end = crash_base + crash_size - 1;
|
||||
}
|
||||
#endif /* CONFIG_KEXEC_CORE */
|
||||
|
||||
void __init paging_init(void)
|
||||
{
|
||||
@ -1037,9 +1037,7 @@ void __init misc_mem_init(void)
|
||||
arch_numa_init();
|
||||
sparse_init();
|
||||
zone_sizes_init();
|
||||
#ifdef CONFIG_KEXEC_CORE
|
||||
reserve_crashkernel();
|
||||
#endif
|
||||
memblock_dump_all();
|
||||
}
|
||||
|
||||
|
@ -411,8 +411,6 @@ static void __init memblock_x86_reserve_range_setup_data(void)
|
||||
* --------- Crashkernel reservation ------------------------------
|
||||
*/
|
||||
|
||||
#ifdef CONFIG_KEXEC_CORE
|
||||
|
||||
/* 16M alignment for crash kernel regions */
|
||||
#define CRASH_ALIGN SZ_16M
|
||||
|
||||
@ -490,6 +488,9 @@ static void __init reserve_crashkernel(void)
|
||||
bool high = false;
|
||||
int ret;
|
||||
|
||||
if (!IS_ENABLED(CONFIG_KEXEC_CORE))
|
||||
return;
|
||||
|
||||
total_mem = memblock_phys_mem_size();
|
||||
|
||||
/* crashkernel=XM */
|
||||
@ -555,11 +556,6 @@ static void __init reserve_crashkernel(void)
|
||||
crashk_res.end = crash_base + crash_size - 1;
|
||||
insert_resource(&iomem_resource, &crashk_res);
|
||||
}
|
||||
#else
|
||||
static void __init reserve_crashkernel(void)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
|
||||
static struct resource standard_io_resources[] = {
|
||||
{ .name = "dma1", .start = 0x00, .end = 0x1f,
|
||||
|
@ -722,7 +722,7 @@ static int func(struct dir_context *ctx, const char *name, int name_len, \
|
||||
if (name_len >= sizeof(d1->d_name)) \
|
||||
name_len = sizeof(d1->d_name) - 1; \
|
||||
\
|
||||
if (put_user(0, d2->d_name) || \
|
||||
if (put_user(0, &d2->d_name[0]) || \
|
||||
put_user(0, &d2->d_reclen) || \
|
||||
copy_to_user(d1->d_name, name, name_len) || \
|
||||
put_user(0, d1->d_name + name_len) || \
|
||||
|
@ -448,7 +448,8 @@ static const struct address_space_operations minix_aops = {
|
||||
.writepage = minix_writepage,
|
||||
.write_begin = minix_write_begin,
|
||||
.write_end = generic_write_end,
|
||||
.bmap = minix_bmap
|
||||
.bmap = minix_bmap,
|
||||
.direct_IO = noop_direct_IO
|
||||
};
|
||||
|
||||
static const struct inode_operations minix_symlink_inode_operations = {
|
||||
|
13
fs/pipe.c
13
fs/pipe.c
@ -607,7 +607,7 @@ pipe_write(struct kiocb *iocb, struct iov_iter *from)
|
||||
static long pipe_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
|
||||
{
|
||||
struct pipe_inode_info *pipe = filp->private_data;
|
||||
int count, head, tail, mask;
|
||||
unsigned int count, head, tail, mask;
|
||||
|
||||
switch (cmd) {
|
||||
case FIONREAD:
|
||||
@ -804,7 +804,7 @@ struct pipe_inode_info *alloc_pipe_info(void)
|
||||
if (too_many_pipe_buffers_hard(user_bufs) && pipe_is_unprivileged_user())
|
||||
goto out_revert_acct;
|
||||
|
||||
pipe->bufs = kcalloc(pipe_bufs, sizeof(struct pipe_buffer),
|
||||
pipe->bufs = kvcalloc(pipe_bufs, sizeof(struct pipe_buffer),
|
||||
GFP_KERNEL_ACCOUNT);
|
||||
|
||||
if (pipe->bufs) {
|
||||
@ -829,7 +829,7 @@ struct pipe_inode_info *alloc_pipe_info(void)
|
||||
|
||||
void free_pipe_info(struct pipe_inode_info *pipe)
|
||||
{
|
||||
int i;
|
||||
unsigned int i;
|
||||
|
||||
#ifdef CONFIG_WATCH_QUEUE
|
||||
if (pipe->watch_queue)
|
||||
@ -849,7 +849,7 @@ void free_pipe_info(struct pipe_inode_info *pipe)
|
||||
#endif
|
||||
if (pipe->tmp_page)
|
||||
__free_page(pipe->tmp_page);
|
||||
kfree(pipe->bufs);
|
||||
kvfree(pipe->bufs);
|
||||
kfree(pipe);
|
||||
}
|
||||
|
||||
@ -1264,8 +1264,7 @@ int pipe_resize_ring(struct pipe_inode_info *pipe, unsigned int nr_slots)
|
||||
if (nr_slots < n)
|
||||
return -EBUSY;
|
||||
|
||||
bufs = kcalloc(nr_slots, sizeof(*bufs),
|
||||
GFP_KERNEL_ACCOUNT | __GFP_NOWARN);
|
||||
bufs = kvcalloc(nr_slots, sizeof(*bufs), GFP_KERNEL_ACCOUNT);
|
||||
if (unlikely(!bufs))
|
||||
return -ENOMEM;
|
||||
|
||||
@ -1292,7 +1291,7 @@ int pipe_resize_ring(struct pipe_inode_info *pipe, unsigned int nr_slots)
|
||||
head = n;
|
||||
tail = 0;
|
||||
|
||||
kfree(pipe->bufs);
|
||||
kvfree(pipe->bufs);
|
||||
pipe->bufs = bufs;
|
||||
pipe->ring_size = nr_slots;
|
||||
if (pipe->max_usage > nr_slots)
|
||||
|
@ -1764,25 +1764,25 @@ static const char *proc_pid_get_link(struct dentry *dentry,
|
||||
|
||||
static int do_proc_readlink(struct path *path, char __user *buffer, int buflen)
|
||||
{
|
||||
char *tmp = (char *)__get_free_page(GFP_KERNEL);
|
||||
char *tmp = kmalloc(PATH_MAX, GFP_KERNEL);
|
||||
char *pathname;
|
||||
int len;
|
||||
|
||||
if (!tmp)
|
||||
return -ENOMEM;
|
||||
|
||||
pathname = d_path(path, tmp, PAGE_SIZE);
|
||||
pathname = d_path(path, tmp, PATH_MAX);
|
||||
len = PTR_ERR(pathname);
|
||||
if (IS_ERR(pathname))
|
||||
goto out;
|
||||
len = tmp + PAGE_SIZE - 1 - pathname;
|
||||
len = tmp + PATH_MAX - 1 - pathname;
|
||||
|
||||
if (len > buflen)
|
||||
len = buflen;
|
||||
if (copy_to_user(buffer, pathname, len))
|
||||
len = -EFAULT;
|
||||
out:
|
||||
free_page((unsigned long)tmp);
|
||||
kfree(tmp);
|
||||
return len;
|
||||
}
|
||||
|
||||
|
@ -62,7 +62,8 @@ core_param(novmcoredd, vmcoredd_disabled, bool, 0);
|
||||
/* Device Dump Size */
|
||||
static size_t vmcoredd_orig_sz;
|
||||
|
||||
static DECLARE_RWSEM(vmcore_cb_rwsem);
|
||||
static DEFINE_SPINLOCK(vmcore_cb_lock);
|
||||
DEFINE_STATIC_SRCU(vmcore_cb_srcu);
|
||||
/* List of registered vmcore callbacks. */
|
||||
static LIST_HEAD(vmcore_cb_list);
|
||||
/* Whether the vmcore has been opened once. */
|
||||
@ -70,8 +71,8 @@ static bool vmcore_opened;
|
||||
|
||||
void register_vmcore_cb(struct vmcore_cb *cb)
|
||||
{
|
||||
down_write(&vmcore_cb_rwsem);
|
||||
INIT_LIST_HEAD(&cb->next);
|
||||
spin_lock(&vmcore_cb_lock);
|
||||
list_add_tail(&cb->next, &vmcore_cb_list);
|
||||
/*
|
||||
* Registering a vmcore callback after the vmcore was opened is
|
||||
@ -79,14 +80,14 @@ void register_vmcore_cb(struct vmcore_cb *cb)
|
||||
*/
|
||||
if (vmcore_opened)
|
||||
pr_warn_once("Unexpected vmcore callback registration\n");
|
||||
up_write(&vmcore_cb_rwsem);
|
||||
spin_unlock(&vmcore_cb_lock);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(register_vmcore_cb);
|
||||
|
||||
void unregister_vmcore_cb(struct vmcore_cb *cb)
|
||||
{
|
||||
down_write(&vmcore_cb_rwsem);
|
||||
list_del(&cb->next);
|
||||
spin_lock(&vmcore_cb_lock);
|
||||
list_del_rcu(&cb->next);
|
||||
/*
|
||||
* Unregistering a vmcore callback after the vmcore was opened is
|
||||
* very unusual (e.g., forced driver removal), but we cannot stop
|
||||
@ -94,7 +95,9 @@ void unregister_vmcore_cb(struct vmcore_cb *cb)
|
||||
*/
|
||||
if (vmcore_opened)
|
||||
pr_warn_once("Unexpected vmcore callback unregistration\n");
|
||||
up_write(&vmcore_cb_rwsem);
|
||||
spin_unlock(&vmcore_cb_lock);
|
||||
|
||||
synchronize_srcu(&vmcore_cb_srcu);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(unregister_vmcore_cb);
|
||||
|
||||
@ -103,9 +106,8 @@ static bool pfn_is_ram(unsigned long pfn)
|
||||
struct vmcore_cb *cb;
|
||||
bool ret = true;
|
||||
|
||||
lockdep_assert_held_read(&vmcore_cb_rwsem);
|
||||
|
||||
list_for_each_entry(cb, &vmcore_cb_list, next) {
|
||||
list_for_each_entry_srcu(cb, &vmcore_cb_list, next,
|
||||
srcu_read_lock_held(&vmcore_cb_srcu)) {
|
||||
if (unlikely(!cb->pfn_is_ram))
|
||||
continue;
|
||||
ret = cb->pfn_is_ram(cb, pfn);
|
||||
@ -118,9 +120,9 @@ static bool pfn_is_ram(unsigned long pfn)
|
||||
|
||||
static int open_vmcore(struct inode *inode, struct file *file)
|
||||
{
|
||||
down_read(&vmcore_cb_rwsem);
|
||||
spin_lock(&vmcore_cb_lock);
|
||||
vmcore_opened = true;
|
||||
up_read(&vmcore_cb_rwsem);
|
||||
spin_unlock(&vmcore_cb_lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -133,6 +135,7 @@ ssize_t read_from_oldmem(char *buf, size_t count,
|
||||
unsigned long pfn, offset;
|
||||
size_t nr_bytes;
|
||||
ssize_t read = 0, tmp;
|
||||
int idx;
|
||||
|
||||
if (!count)
|
||||
return 0;
|
||||
@ -140,7 +143,7 @@ ssize_t read_from_oldmem(char *buf, size_t count,
|
||||
offset = (unsigned long)(*ppos % PAGE_SIZE);
|
||||
pfn = (unsigned long)(*ppos / PAGE_SIZE);
|
||||
|
||||
down_read(&vmcore_cb_rwsem);
|
||||
idx = srcu_read_lock(&vmcore_cb_srcu);
|
||||
do {
|
||||
if (count > (PAGE_SIZE - offset))
|
||||
nr_bytes = PAGE_SIZE - offset;
|
||||
@ -165,7 +168,7 @@ ssize_t read_from_oldmem(char *buf, size_t count,
|
||||
offset, userbuf);
|
||||
}
|
||||
if (tmp < 0) {
|
||||
up_read(&vmcore_cb_rwsem);
|
||||
srcu_read_unlock(&vmcore_cb_srcu, idx);
|
||||
return tmp;
|
||||
}
|
||||
|
||||
@ -176,8 +179,8 @@ ssize_t read_from_oldmem(char *buf, size_t count,
|
||||
++pfn;
|
||||
offset = 0;
|
||||
} while (count);
|
||||
srcu_read_unlock(&vmcore_cb_srcu, idx);
|
||||
|
||||
up_read(&vmcore_cb_rwsem);
|
||||
return read;
|
||||
}
|
||||
|
||||
@ -477,7 +480,7 @@ static const struct vm_operations_struct vmcore_mmap_ops = {
|
||||
|
||||
/**
|
||||
* vmcore_alloc_buf - allocate buffer in vmalloc memory
|
||||
* @sizez: size of buffer
|
||||
* @size: size of buffer
|
||||
*
|
||||
* If CONFIG_MMU is defined, use vmalloc_user() to allow users to mmap
|
||||
* the buffer to user-space by means of remap_vmalloc_range().
|
||||
@ -568,18 +571,18 @@ static int vmcore_remap_oldmem_pfn(struct vm_area_struct *vma,
|
||||
unsigned long from, unsigned long pfn,
|
||||
unsigned long size, pgprot_t prot)
|
||||
{
|
||||
int ret;
|
||||
int ret, idx;
|
||||
|
||||
/*
|
||||
* Check if oldmem_pfn_is_ram was registered to avoid
|
||||
* looping over all pages without a reason.
|
||||
* Check if a callback was registered to avoid looping over all
|
||||
* pages without a reason.
|
||||
*/
|
||||
down_read(&vmcore_cb_rwsem);
|
||||
idx = srcu_read_lock(&vmcore_cb_srcu);
|
||||
if (!list_empty(&vmcore_cb_list))
|
||||
ret = remap_oldmem_pfn_checked(vma, from, pfn, size, prot);
|
||||
else
|
||||
ret = remap_oldmem_pfn_range(vma, from, pfn, size, prot);
|
||||
up_read(&vmcore_cb_rwsem);
|
||||
srcu_read_unlock(&vmcore_cb_srcu, idx);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -19,6 +19,9 @@
|
||||
*
|
||||
* Example:
|
||||
*
|
||||
* #include <linux/bitfield.h>
|
||||
* #include <linux/bits.h>
|
||||
*
|
||||
* #define REG_FIELD_A GENMASK(6, 0)
|
||||
* #define REG_FIELD_B BIT(7)
|
||||
* #define REG_FIELD_C GENMASK(15, 8)
|
||||
|
@ -11,6 +11,7 @@
|
||||
# define BTF_TYPE_TAG(value) /* nothing */
|
||||
#endif
|
||||
|
||||
/* sparse defines __CHECKER__; see Documentation/dev-tools/sparse.rst */
|
||||
#ifdef __CHECKER__
|
||||
/* address spaces */
|
||||
# define __kernel __attribute__((address_space(0)))
|
||||
@ -144,8 +145,6 @@ struct ftrace_likely_data {
|
||||
*/
|
||||
#define __naked __attribute__((__naked__)) notrace
|
||||
|
||||
#define __compiler_offsetof(a, b) __builtin_offsetof(a, b)
|
||||
|
||||
/*
|
||||
* Prefer gnu_inline, so that extern inline functions do not emit an
|
||||
* externally visible function. This makes extern inline behave as per gnu89
|
||||
|
@ -320,12 +320,19 @@ struct obs_kernel_param {
|
||||
__aligned(__alignof__(struct obs_kernel_param)) \
|
||||
= { __setup_str_##unique_id, fn, early }
|
||||
|
||||
/*
|
||||
* NOTE: __setup functions return values:
|
||||
* @fn returns 1 (or non-zero) if the option argument is "handled"
|
||||
* and returns 0 if the option argument is "not handled".
|
||||
*/
|
||||
#define __setup(str, fn) \
|
||||
__setup_param(str, fn, fn, 0)
|
||||
|
||||
/*
|
||||
* NOTE: fn is as per module_param, not __setup!
|
||||
* Emits warning if fn returns non-zero.
|
||||
* NOTE: @fn is as per module_param, not __setup!
|
||||
* I.e., @fn returns 0 for no error or non-zero for error
|
||||
* (possibly @fn returns a -errno value, but it does not matter).
|
||||
* Emits warning if @fn returns non-zero.
|
||||
*/
|
||||
#define early_param(str, fn) \
|
||||
__setup_param(str, fn, fn, 1)
|
||||
|
@ -20,6 +20,12 @@
|
||||
|
||||
#include <uapi/linux/kexec.h>
|
||||
|
||||
/* Location of a reserved region to hold the crash kernel.
|
||||
*/
|
||||
extern struct resource crashk_res;
|
||||
extern struct resource crashk_low_res;
|
||||
extern note_buf_t __percpu *crash_notes;
|
||||
|
||||
#ifdef CONFIG_KEXEC_CORE
|
||||
#include <linux/list.h>
|
||||
#include <linux/compat.h>
|
||||
@ -350,12 +356,6 @@ extern int kexec_load_disabled;
|
||||
#define KEXEC_FILE_FLAGS (KEXEC_FILE_UNLOAD | KEXEC_FILE_ON_CRASH | \
|
||||
KEXEC_FILE_NO_INITRAMFS)
|
||||
|
||||
/* Location of a reserved region to hold the crash kernel.
|
||||
*/
|
||||
extern struct resource crashk_res;
|
||||
extern struct resource crashk_low_res;
|
||||
extern note_buf_t __percpu *crash_notes;
|
||||
|
||||
/* flag to track if kexec reboot is in progress */
|
||||
extern bool kexec_in_progress;
|
||||
|
||||
|
@ -18,7 +18,7 @@
|
||||
* - the arch is not required to handle n==0 if implementing the fallback
|
||||
*/
|
||||
#ifndef CONFIG_ARCH_HAS_ILOG2_U32
|
||||
static inline __attribute__((const))
|
||||
static __always_inline __attribute__((const))
|
||||
int __ilog2_u32(u32 n)
|
||||
{
|
||||
return fls(n) - 1;
|
||||
@ -26,7 +26,7 @@ int __ilog2_u32(u32 n)
|
||||
#endif
|
||||
|
||||
#ifndef CONFIG_ARCH_HAS_ILOG2_U64
|
||||
static inline __attribute__((const))
|
||||
static __always_inline __attribute__((const))
|
||||
int __ilog2_u64(u64 n)
|
||||
{
|
||||
return fls64(n) - 1;
|
||||
|
@ -13,11 +13,7 @@ enum {
|
||||
};
|
||||
|
||||
#undef offsetof
|
||||
#ifdef __compiler_offsetof
|
||||
#define offsetof(TYPE, MEMBER) __compiler_offsetof(TYPE, MEMBER)
|
||||
#else
|
||||
#define offsetof(TYPE, MEMBER) ((size_t)&((TYPE *)0)->MEMBER)
|
||||
#endif
|
||||
#define offsetof(TYPE, MEMBER) __builtin_offsetof(TYPE, MEMBER)
|
||||
|
||||
/**
|
||||
* sizeof_field() - Report the size of a struct field in bytes
|
||||
|
@ -19,12 +19,12 @@
|
||||
* any application/library that wants linux/types.h.
|
||||
*/
|
||||
|
||||
/* sparse defines __CHECKER__; see Documentation/dev-tools/sparse.rst */
|
||||
#ifdef __CHECKER__
|
||||
#define __bitwise__ __attribute__((bitwise))
|
||||
#define __bitwise __attribute__((bitwise))
|
||||
#else
|
||||
#define __bitwise__
|
||||
#define __bitwise
|
||||
#endif
|
||||
#define __bitwise __bitwise__
|
||||
|
||||
typedef __u16 __bitwise __le16;
|
||||
typedef __u16 __bitwise __be16;
|
||||
|
14
init/main.c
14
init/main.c
@ -1192,7 +1192,7 @@ static int __init initcall_blacklist(char *str)
|
||||
}
|
||||
} while (str_entry);
|
||||
|
||||
return 0;
|
||||
return 1;
|
||||
}
|
||||
|
||||
static bool __init_or_module initcall_blacklisted(initcall_t fn)
|
||||
@ -1248,15 +1248,11 @@ trace_initcall_start_cb(void *data, initcall_t fn)
|
||||
static __init_or_module void
|
||||
trace_initcall_finish_cb(void *data, initcall_t fn, int ret)
|
||||
{
|
||||
ktime_t *calltime = (ktime_t *)data;
|
||||
ktime_t delta, rettime;
|
||||
unsigned long long duration;
|
||||
ktime_t rettime, *calltime = (ktime_t *)data;
|
||||
|
||||
rettime = ktime_get();
|
||||
delta = ktime_sub(rettime, *calltime);
|
||||
duration = (unsigned long long) ktime_to_ns(delta) >> 10;
|
||||
printk(KERN_DEBUG "initcall %pS returned %d after %lld usecs\n",
|
||||
fn, ret, duration);
|
||||
fn, ret, (unsigned long long)ktime_us_delta(rettime, *calltime));
|
||||
}
|
||||
|
||||
static ktime_t initcall_calltime;
|
||||
@ -1454,7 +1450,9 @@ static noinline void __init kernel_init_freeable(void);
|
||||
bool rodata_enabled __ro_after_init = true;
|
||||
static int __init set_debug_rodata(char *str)
|
||||
{
|
||||
return strtobool(str, &rodata_enabled);
|
||||
if (strtobool(str, &rodata_enabled))
|
||||
pr_warn("Invalid option string for rodata: '%s'\n", str);
|
||||
return 1;
|
||||
}
|
||||
__setup("rodata=", set_debug_rodata);
|
||||
#endif
|
||||
|
@ -153,8 +153,17 @@ static void cgroup_rstat_flush_locked(struct cgroup *cgrp, bool may_sleep)
|
||||
raw_spinlock_t *cpu_lock = per_cpu_ptr(&cgroup_rstat_cpu_lock,
|
||||
cpu);
|
||||
struct cgroup *pos = NULL;
|
||||
unsigned long flags;
|
||||
|
||||
raw_spin_lock(cpu_lock);
|
||||
/*
|
||||
* The _irqsave() is needed because cgroup_rstat_lock is
|
||||
* spinlock_t which is a sleeping lock on PREEMPT_RT. Acquiring
|
||||
* this lock with the _irq() suffix only disables interrupts on
|
||||
* a non-PREEMPT_RT kernel. The raw_spinlock_t below disables
|
||||
* interrupts on both configurations. The _irqsave() ensures
|
||||
* that interrupts are always disabled and later restored.
|
||||
*/
|
||||
raw_spin_lock_irqsave(cpu_lock, flags);
|
||||
while ((pos = cgroup_rstat_cpu_pop_updated(pos, cgrp, cpu))) {
|
||||
struct cgroup_subsys_state *css;
|
||||
|
||||
@ -166,7 +175,7 @@ static void cgroup_rstat_flush_locked(struct cgroup *cgrp, bool may_sleep)
|
||||
css->ss->css_rstat_flush(css, cpu);
|
||||
rcu_read_unlock();
|
||||
}
|
||||
raw_spin_unlock(cpu_lock);
|
||||
raw_spin_unlock_irqrestore(cpu_lock, flags);
|
||||
|
||||
/* if @may_sleep, play nice and yield if necessary */
|
||||
if (may_sleep && (need_resched() ||
|
||||
|
@ -459,37 +459,28 @@ void kcov_task_exit(struct task_struct *t)
|
||||
static int kcov_mmap(struct file *filep, struct vm_area_struct *vma)
|
||||
{
|
||||
int res = 0;
|
||||
void *area;
|
||||
struct kcov *kcov = vma->vm_file->private_data;
|
||||
unsigned long size, off;
|
||||
struct page *page;
|
||||
unsigned long flags;
|
||||
|
||||
area = vmalloc_user(vma->vm_end - vma->vm_start);
|
||||
if (!area)
|
||||
return -ENOMEM;
|
||||
|
||||
spin_lock_irqsave(&kcov->lock, flags);
|
||||
size = kcov->size * sizeof(unsigned long);
|
||||
if (kcov->mode != KCOV_MODE_INIT || vma->vm_pgoff != 0 ||
|
||||
if (kcov->area == NULL || vma->vm_pgoff != 0 ||
|
||||
vma->vm_end - vma->vm_start != size) {
|
||||
res = -EINVAL;
|
||||
goto exit;
|
||||
}
|
||||
if (!kcov->area) {
|
||||
kcov->area = area;
|
||||
vma->vm_flags |= VM_DONTEXPAND;
|
||||
spin_unlock_irqrestore(&kcov->lock, flags);
|
||||
for (off = 0; off < size; off += PAGE_SIZE) {
|
||||
page = vmalloc_to_page(kcov->area + off);
|
||||
if (vm_insert_page(vma, vma->vm_start + off, page))
|
||||
WARN_ONCE(1, "vm_insert_page() failed");
|
||||
}
|
||||
return 0;
|
||||
spin_unlock_irqrestore(&kcov->lock, flags);
|
||||
vma->vm_flags |= VM_DONTEXPAND;
|
||||
for (off = 0; off < size; off += PAGE_SIZE) {
|
||||
page = vmalloc_to_page(kcov->area + off);
|
||||
if (vm_insert_page(vma, vma->vm_start + off, page))
|
||||
WARN_ONCE(1, "vm_insert_page() failed");
|
||||
}
|
||||
return 0;
|
||||
exit:
|
||||
spin_unlock_irqrestore(&kcov->lock, flags);
|
||||
vfree(area);
|
||||
return res;
|
||||
}
|
||||
|
||||
@ -564,31 +555,12 @@ static int kcov_ioctl_locked(struct kcov *kcov, unsigned int cmd,
|
||||
unsigned long arg)
|
||||
{
|
||||
struct task_struct *t;
|
||||
unsigned long size, unused;
|
||||
unsigned long flags, unused;
|
||||
int mode, i;
|
||||
struct kcov_remote_arg *remote_arg;
|
||||
struct kcov_remote *remote;
|
||||
unsigned long flags;
|
||||
|
||||
switch (cmd) {
|
||||
case KCOV_INIT_TRACE:
|
||||
/*
|
||||
* Enable kcov in trace mode and setup buffer size.
|
||||
* Must happen before anything else.
|
||||
*/
|
||||
if (kcov->mode != KCOV_MODE_DISABLED)
|
||||
return -EBUSY;
|
||||
/*
|
||||
* Size must be at least 2 to hold current position and one PC.
|
||||
* Later we allocate size * sizeof(unsigned long) memory,
|
||||
* that must not overflow.
|
||||
*/
|
||||
size = arg;
|
||||
if (size < 2 || size > INT_MAX / sizeof(unsigned long))
|
||||
return -EINVAL;
|
||||
kcov->size = size;
|
||||
kcov->mode = KCOV_MODE_INIT;
|
||||
return 0;
|
||||
case KCOV_ENABLE:
|
||||
/*
|
||||
* Enable coverage for the current task.
|
||||
@ -692,9 +664,37 @@ static long kcov_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
|
||||
struct kcov_remote_arg *remote_arg = NULL;
|
||||
unsigned int remote_num_handles;
|
||||
unsigned long remote_arg_size;
|
||||
unsigned long flags;
|
||||
unsigned long size, flags;
|
||||
void *area;
|
||||
|
||||
if (cmd == KCOV_REMOTE_ENABLE) {
|
||||
kcov = filep->private_data;
|
||||
switch (cmd) {
|
||||
case KCOV_INIT_TRACE:
|
||||
/*
|
||||
* Enable kcov in trace mode and setup buffer size.
|
||||
* Must happen before anything else.
|
||||
*
|
||||
* First check the size argument - it must be at least 2
|
||||
* to hold the current position and one PC.
|
||||
*/
|
||||
size = arg;
|
||||
if (size < 2 || size > INT_MAX / sizeof(unsigned long))
|
||||
return -EINVAL;
|
||||
area = vmalloc_user(size * sizeof(unsigned long));
|
||||
if (area == NULL)
|
||||
return -ENOMEM;
|
||||
spin_lock_irqsave(&kcov->lock, flags);
|
||||
if (kcov->mode != KCOV_MODE_DISABLED) {
|
||||
spin_unlock_irqrestore(&kcov->lock, flags);
|
||||
vfree(area);
|
||||
return -EBUSY;
|
||||
}
|
||||
kcov->area = area;
|
||||
kcov->size = size;
|
||||
kcov->mode = KCOV_MODE_INIT;
|
||||
spin_unlock_irqrestore(&kcov->lock, flags);
|
||||
return 0;
|
||||
case KCOV_REMOTE_ENABLE:
|
||||
if (get_user(remote_num_handles, (unsigned __user *)(arg +
|
||||
offsetof(struct kcov_remote_arg, num_handles))))
|
||||
return -EFAULT;
|
||||
@ -710,16 +710,18 @@ static long kcov_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
|
||||
return -EINVAL;
|
||||
}
|
||||
arg = (unsigned long)remote_arg;
|
||||
fallthrough;
|
||||
default:
|
||||
/*
|
||||
* All other commands can be normally executed under a spin lock, so we
|
||||
* obtain and release it here in order to simplify kcov_ioctl_locked().
|
||||
*/
|
||||
spin_lock_irqsave(&kcov->lock, flags);
|
||||
res = kcov_ioctl_locked(kcov, cmd, arg);
|
||||
spin_unlock_irqrestore(&kcov->lock, flags);
|
||||
kfree(remote_arg);
|
||||
return res;
|
||||
}
|
||||
|
||||
kcov = filep->private_data;
|
||||
spin_lock_irqsave(&kcov->lock, flags);
|
||||
res = kcov_ioctl_locked(kcov, cmd, arg);
|
||||
spin_unlock_irqrestore(&kcov->lock, flags);
|
||||
|
||||
kfree(remote_arg);
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
static const struct file_operations kcov_fops = {
|
||||
|
@ -24,8 +24,7 @@
|
||||
static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
|
||||
|
||||
#define KERNEL_ATTR_RW(_name) \
|
||||
static struct kobj_attribute _name##_attr = \
|
||||
__ATTR(_name, 0644, _name##_show, _name##_store)
|
||||
static struct kobj_attribute _name##_attr = __ATTR_RW(_name)
|
||||
|
||||
/* current uevent sequence number */
|
||||
static ssize_t uevent_seqnum_show(struct kobject *kobj,
|
||||
|
@ -66,6 +66,7 @@ EXPORT_SYMBOL_GPL(panic_timeout);
|
||||
#define PANIC_PRINT_LOCK_INFO 0x00000008
|
||||
#define PANIC_PRINT_FTRACE_INFO 0x00000010
|
||||
#define PANIC_PRINT_ALL_PRINTK_MSG 0x00000020
|
||||
#define PANIC_PRINT_ALL_CPU_BT 0x00000040
|
||||
unsigned long panic_print;
|
||||
|
||||
ATOMIC_NOTIFIER_HEAD(panic_notifier_list);
|
||||
@ -147,10 +148,16 @@ void nmi_panic(struct pt_regs *regs, const char *msg)
|
||||
}
|
||||
EXPORT_SYMBOL(nmi_panic);
|
||||
|
||||
static void panic_print_sys_info(void)
|
||||
static void panic_print_sys_info(bool console_flush)
|
||||
{
|
||||
if (panic_print & PANIC_PRINT_ALL_PRINTK_MSG)
|
||||
console_flush_on_panic(CONSOLE_REPLAY_ALL);
|
||||
if (console_flush) {
|
||||
if (panic_print & PANIC_PRINT_ALL_PRINTK_MSG)
|
||||
console_flush_on_panic(CONSOLE_REPLAY_ALL);
|
||||
return;
|
||||
}
|
||||
|
||||
if (panic_print & PANIC_PRINT_ALL_CPU_BT)
|
||||
trigger_all_cpu_backtrace();
|
||||
|
||||
if (panic_print & PANIC_PRINT_TASK_INFO)
|
||||
show_state();
|
||||
@ -185,6 +192,16 @@ void panic(const char *fmt, ...)
|
||||
int old_cpu, this_cpu;
|
||||
bool _crash_kexec_post_notifiers = crash_kexec_post_notifiers;
|
||||
|
||||
if (panic_on_warn) {
|
||||
/*
|
||||
* This thread may hit another WARN() in the panic path.
|
||||
* Resetting this prevents additional WARN() from panicking the
|
||||
* system on this thread. Other threads are blocked by the
|
||||
* panic_mutex in panic().
|
||||
*/
|
||||
panic_on_warn = 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Disable local interrupts. This will prevent panic_smp_self_stop
|
||||
* from deadlocking the first cpu that invokes the panic, since
|
||||
@ -272,6 +289,8 @@ void panic(const char *fmt, ...)
|
||||
*/
|
||||
atomic_notifier_call_chain(&panic_notifier_list, 0, buf);
|
||||
|
||||
panic_print_sys_info(false);
|
||||
|
||||
kmsg_dump(KMSG_DUMP_PANIC);
|
||||
|
||||
/*
|
||||
@ -302,7 +321,7 @@ void panic(const char *fmt, ...)
|
||||
debug_locks_off();
|
||||
console_flush_on_panic(CONSOLE_FLUSH_PENDING);
|
||||
|
||||
panic_print_sys_info();
|
||||
panic_print_sys_info(true);
|
||||
|
||||
if (!panic_blink)
|
||||
panic_blink = no_blink;
|
||||
@ -576,16 +595,8 @@ void __warn(const char *file, int line, void *caller, unsigned taint,
|
||||
if (regs)
|
||||
show_regs(regs);
|
||||
|
||||
if (panic_on_warn) {
|
||||
/*
|
||||
* This thread may hit another WARN() in the panic path.
|
||||
* Resetting this prevents additional WARN() from panicking the
|
||||
* system on this thread. Other threads are blocked by the
|
||||
* panic_mutex in panic().
|
||||
*/
|
||||
panic_on_warn = 0;
|
||||
if (panic_on_warn)
|
||||
panic("panic_on_warn set ...\n");
|
||||
}
|
||||
|
||||
if (!regs)
|
||||
dump_stack();
|
||||
|
@ -56,14 +56,6 @@ struct resource_constraint {
|
||||
|
||||
static DEFINE_RWLOCK(resource_lock);
|
||||
|
||||
/*
|
||||
* For memory hotplug, there is no way to free resource entries allocated
|
||||
* by boot mem after the system is up. So for reusing the resource entry
|
||||
* we need to remember the resource.
|
||||
*/
|
||||
static struct resource *bootmem_resource_free;
|
||||
static DEFINE_SPINLOCK(bootmem_resource_lock);
|
||||
|
||||
static struct resource *next_resource(struct resource *p)
|
||||
{
|
||||
if (p->child)
|
||||
@ -160,36 +152,19 @@ __initcall(ioresources_init);
|
||||
|
||||
static void free_resource(struct resource *res)
|
||||
{
|
||||
if (!res)
|
||||
return;
|
||||
|
||||
if (!PageSlab(virt_to_head_page(res))) {
|
||||
spin_lock(&bootmem_resource_lock);
|
||||
res->sibling = bootmem_resource_free;
|
||||
bootmem_resource_free = res;
|
||||
spin_unlock(&bootmem_resource_lock);
|
||||
} else {
|
||||
/**
|
||||
* If the resource was allocated using memblock early during boot
|
||||
* we'll leak it here: we can only return full pages back to the
|
||||
* buddy and trying to be smart and reusing them eventually in
|
||||
* alloc_resource() overcomplicates resource handling.
|
||||
*/
|
||||
if (res && PageSlab(virt_to_head_page(res)))
|
||||
kfree(res);
|
||||
}
|
||||
}
|
||||
|
||||
static struct resource *alloc_resource(gfp_t flags)
|
||||
{
|
||||
struct resource *res = NULL;
|
||||
|
||||
spin_lock(&bootmem_resource_lock);
|
||||
if (bootmem_resource_free) {
|
||||
res = bootmem_resource_free;
|
||||
bootmem_resource_free = res->sibling;
|
||||
}
|
||||
spin_unlock(&bootmem_resource_lock);
|
||||
|
||||
if (res)
|
||||
memset(res, 0, sizeof(struct resource));
|
||||
else
|
||||
res = kzalloc(sizeof(struct resource), flags);
|
||||
|
||||
return res;
|
||||
return kzalloc(sizeof(struct resource), flags);
|
||||
}
|
||||
|
||||
/* Return the conflict entry if you can't request it */
|
||||
|
@ -113,13 +113,14 @@ static void send_cpu_listeners(struct sk_buff *skb,
|
||||
struct listener *s, *tmp;
|
||||
struct sk_buff *skb_next, *skb_cur = skb;
|
||||
void *reply = genlmsg_data(genlhdr);
|
||||
int rc, delcount = 0;
|
||||
int delcount = 0;
|
||||
|
||||
genlmsg_end(skb, reply);
|
||||
|
||||
rc = 0;
|
||||
down_read(&listeners->sem);
|
||||
list_for_each_entry(s, &listeners->list, list) {
|
||||
int rc;
|
||||
|
||||
skb_next = NULL;
|
||||
if (!list_is_last(&s->list, &listeners->list)) {
|
||||
skb_next = skb_clone(skb_cur, GFP_KERNEL);
|
||||
|
@ -208,20 +208,87 @@ config DEBUG_BUGVERBOSE
|
||||
|
||||
endmenu # "printk and dmesg options"
|
||||
|
||||
config DEBUG_KERNEL
|
||||
bool "Kernel debugging"
|
||||
help
|
||||
Say Y here if you are developing drivers or trying to debug and
|
||||
identify kernel problems.
|
||||
|
||||
config DEBUG_MISC
|
||||
bool "Miscellaneous debug code"
|
||||
default DEBUG_KERNEL
|
||||
depends on DEBUG_KERNEL
|
||||
help
|
||||
Say Y here if you need to enable miscellaneous debug code that should
|
||||
be under a more specific debug option but isn't.
|
||||
|
||||
menu "Compile-time checks and compiler options"
|
||||
|
||||
config DEBUG_INFO
|
||||
bool "Compile the kernel with debug info"
|
||||
depends on DEBUG_KERNEL && !COMPILE_TEST
|
||||
bool
|
||||
help
|
||||
If you say Y here the resulting kernel image will include
|
||||
debugging info resulting in a larger kernel image.
|
||||
A kernel debug info option other than "None" has been selected
|
||||
in the "Debug information" choice below, indicating that debug
|
||||
information will be generated for build targets.
|
||||
|
||||
choice
|
||||
prompt "Debug information"
|
||||
depends on DEBUG_KERNEL
|
||||
help
|
||||
Selecting something other than "None" results in a kernel image
|
||||
that will include debugging info resulting in a larger kernel image.
|
||||
This adds debug symbols to the kernel and modules (gcc -g), and
|
||||
is needed if you intend to use kernel crashdump or binary object
|
||||
tools like crash, kgdb, LKCD, gdb, etc on the kernel.
|
||||
Say Y here only if you plan to debug the kernel.
|
||||
|
||||
If unsure, say N.
|
||||
Choose which version of DWARF debug info to emit. If unsure,
|
||||
select "Toolchain default".
|
||||
|
||||
config DEBUG_INFO_NONE
|
||||
bool "Disable debug information"
|
||||
help
|
||||
Do not build the kernel with debugging information, which will
|
||||
result in a faster and smaller build.
|
||||
|
||||
config DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT
|
||||
bool "Rely on the toolchain's implicit default DWARF version"
|
||||
select DEBUG_INFO
|
||||
help
|
||||
The implicit default version of DWARF debug info produced by a
|
||||
toolchain changes over time.
|
||||
|
||||
This can break consumers of the debug info that haven't upgraded to
|
||||
support newer revisions, and prevent testing newer versions, but
|
||||
those should be less common scenarios.
|
||||
|
||||
config DEBUG_INFO_DWARF4
|
||||
bool "Generate DWARF Version 4 debuginfo"
|
||||
select DEBUG_INFO
|
||||
help
|
||||
Generate DWARF v4 debug info. This requires gcc 4.5+ and gdb 7.0+.
|
||||
|
||||
If you have consumers of DWARF debug info that are not ready for
|
||||
newer revisions of DWARF, you may wish to choose this or have your
|
||||
config select this.
|
||||
|
||||
config DEBUG_INFO_DWARF5
|
||||
bool "Generate DWARF Version 5 debuginfo"
|
||||
select DEBUG_INFO
|
||||
depends on !CC_IS_CLANG || (CC_IS_CLANG && (AS_IS_LLVM || (AS_IS_GNU && AS_VERSION >= 23502)))
|
||||
help
|
||||
Generate DWARF v5 debug info. Requires binutils 2.35.2, gcc 5.0+ (gcc
|
||||
5.0+ accepts the -gdwarf-5 flag but only had partial support for some
|
||||
draft features until 7.0), and gdb 8.0+.
|
||||
|
||||
Changes to the structure of debug info in Version 5 allow for around
|
||||
15-18% savings in resulting image and debug info section sizes as
|
||||
compared to DWARF Version 4. DWARF Version 5 standardizes previous
|
||||
extensions such as accelerators for symbol indexing and the format
|
||||
for fission (.dwo/.dwp) files. Users may not want to select this
|
||||
config if they rely on tooling that has not yet been updated to
|
||||
support DWARF Version 5.
|
||||
|
||||
endchoice # "Debug information"
|
||||
|
||||
if DEBUG_INFO
|
||||
|
||||
@ -267,56 +334,12 @@ config DEBUG_INFO_SPLIT
|
||||
to know about the .dwo files and include them.
|
||||
Incompatible with older versions of ccache.
|
||||
|
||||
choice
|
||||
prompt "DWARF version"
|
||||
help
|
||||
Which version of DWARF debug info to emit.
|
||||
|
||||
config DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT
|
||||
bool "Rely on the toolchain's implicit default DWARF version"
|
||||
help
|
||||
The implicit default version of DWARF debug info produced by a
|
||||
toolchain changes over time.
|
||||
|
||||
This can break consumers of the debug info that haven't upgraded to
|
||||
support newer revisions, and prevent testing newer versions, but
|
||||
those should be less common scenarios.
|
||||
|
||||
If unsure, say Y.
|
||||
|
||||
config DEBUG_INFO_DWARF4
|
||||
bool "Generate DWARF Version 4 debuginfo"
|
||||
help
|
||||
Generate DWARF v4 debug info. This requires gcc 4.5+ and gdb 7.0+.
|
||||
|
||||
If you have consumers of DWARF debug info that are not ready for
|
||||
newer revisions of DWARF, you may wish to choose this or have your
|
||||
config select this.
|
||||
|
||||
config DEBUG_INFO_DWARF5
|
||||
bool "Generate DWARF Version 5 debuginfo"
|
||||
depends on !CC_IS_CLANG || (CC_IS_CLANG && (AS_IS_LLVM || (AS_IS_GNU && AS_VERSION >= 23502)))
|
||||
depends on !DEBUG_INFO_BTF || PAHOLE_VERSION >= 121
|
||||
help
|
||||
Generate DWARF v5 debug info. Requires binutils 2.35.2, gcc 5.0+ (gcc
|
||||
5.0+ accepts the -gdwarf-5 flag but only had partial support for some
|
||||
draft features until 7.0), and gdb 8.0+.
|
||||
|
||||
Changes to the structure of debug info in Version 5 allow for around
|
||||
15-18% savings in resulting image and debug info section sizes as
|
||||
compared to DWARF Version 4. DWARF Version 5 standardizes previous
|
||||
extensions such as accelerators for symbol indexing and the format
|
||||
for fission (.dwo/.dwp) files. Users may not want to select this
|
||||
config if they rely on tooling that has not yet been updated to
|
||||
support DWARF Version 5.
|
||||
|
||||
endchoice # "DWARF version"
|
||||
|
||||
config DEBUG_INFO_BTF
|
||||
bool "Generate BTF typeinfo"
|
||||
depends on !DEBUG_INFO_SPLIT && !DEBUG_INFO_REDUCED
|
||||
depends on !GCC_PLUGIN_RANDSTRUCT || COMPILE_TEST
|
||||
depends on BPF_SYSCALL
|
||||
depends on !DEBUG_INFO_DWARF5 || PAHOLE_VERSION >= 121
|
||||
help
|
||||
Generate deduplicated BTF type information from DWARF debug info.
|
||||
Turning this on expects presence of pahole tool, which will convert
|
||||
@ -434,7 +457,8 @@ config SECTION_MISMATCH_WARN_ONLY
|
||||
If unsure, say Y.
|
||||
|
||||
config DEBUG_FORCE_FUNCTION_ALIGN_64B
|
||||
bool "Force all function address 64B aligned" if EXPERT
|
||||
bool "Force all function address 64B aligned"
|
||||
depends on EXPERT && (X86_64 || ARM64 || PPC32 || PPC64 || ARC)
|
||||
help
|
||||
There are cases that a commit from one domain changes the function
|
||||
address alignment of other domains, and cause magic performance
|
||||
@ -603,20 +627,6 @@ source "lib/Kconfig.kcsan"
|
||||
|
||||
endmenu
|
||||
|
||||
config DEBUG_KERNEL
|
||||
bool "Kernel debugging"
|
||||
help
|
||||
Say Y here if you are developing drivers or trying to debug and
|
||||
identify kernel problems.
|
||||
|
||||
config DEBUG_MISC
|
||||
bool "Miscellaneous debug code"
|
||||
default DEBUG_KERNEL
|
||||
depends on DEBUG_KERNEL
|
||||
help
|
||||
Say Y here if you need to enable miscellaneous debug code that should
|
||||
be under a more specific debug option but isn't.
|
||||
|
||||
menu "Networking Debugging"
|
||||
|
||||
source "net/Kconfig.debug"
|
||||
|
@ -10,21 +10,10 @@ config HAVE_KCSAN_COMPILER
|
||||
For the list of compilers that support KCSAN, please see
|
||||
<file:Documentation/dev-tools/kcsan.rst>.
|
||||
|
||||
config KCSAN_KCOV_BROKEN
|
||||
def_bool KCOV && CC_HAS_SANCOV_TRACE_PC
|
||||
depends on CC_IS_CLANG
|
||||
depends on !$(cc-option,-Werror=unused-command-line-argument -fsanitize=thread -fsanitize-coverage=trace-pc)
|
||||
help
|
||||
Some versions of clang support either KCSAN and KCOV but not the
|
||||
combination of the two.
|
||||
See https://bugs.llvm.org/show_bug.cgi?id=45831 for the status
|
||||
in newer releases.
|
||||
|
||||
menuconfig KCSAN
|
||||
bool "KCSAN: dynamic data race detector"
|
||||
depends on HAVE_ARCH_KCSAN && HAVE_KCSAN_COMPILER
|
||||
depends on DEBUG_KERNEL && !KASAN
|
||||
depends on !KCSAN_KCOV_BROKEN
|
||||
select STACKTRACE
|
||||
help
|
||||
The Kernel Concurrency Sanitizer (KCSAN) is a dynamic
|
||||
|
@ -27,16 +27,6 @@ config UBSAN_TRAP
|
||||
the system. For some system builders this is an acceptable
|
||||
trade-off.
|
||||
|
||||
config UBSAN_KCOV_BROKEN
|
||||
def_bool KCOV && CC_HAS_SANCOV_TRACE_PC
|
||||
depends on CC_IS_CLANG
|
||||
depends on !$(cc-option,-Werror=unused-command-line-argument -fsanitize=bounds -fsanitize-coverage=trace-pc)
|
||||
help
|
||||
Some versions of clang support either UBSAN or KCOV but not the
|
||||
combination of the two.
|
||||
See https://bugs.llvm.org/show_bug.cgi?id=45831 for the status
|
||||
in newer releases.
|
||||
|
||||
config CC_HAS_UBSAN_BOUNDS
|
||||
def_bool $(cc-option,-fsanitize=bounds)
|
||||
|
||||
@ -46,7 +36,6 @@ config CC_HAS_UBSAN_ARRAY_BOUNDS
|
||||
config UBSAN_BOUNDS
|
||||
bool "Perform array index bounds checking"
|
||||
default UBSAN
|
||||
depends on !UBSAN_KCOV_BROKEN
|
||||
depends on CC_HAS_UBSAN_ARRAY_BOUNDS || CC_HAS_UBSAN_BOUNDS
|
||||
help
|
||||
This option enables detection of directly indexed out of bounds
|
||||
@ -72,7 +61,6 @@ config UBSAN_ARRAY_BOUNDS
|
||||
config UBSAN_LOCAL_BOUNDS
|
||||
bool "Perform array local bounds checking"
|
||||
depends on UBSAN_TRAP
|
||||
depends on !UBSAN_KCOV_BROKEN
|
||||
depends on $(cc-option,-fsanitize=local-bounds)
|
||||
help
|
||||
This option enables -fsanitize=local-bounds which traps when an
|
||||
|
24
lib/bitmap.c
24
lib/bitmap.c
@ -492,6 +492,11 @@ EXPORT_SYMBOL(bitmap_print_to_pagebuf);
|
||||
* @list: indicates whether the bitmap must be list
|
||||
* true: print in decimal list format
|
||||
* false: print in hexadecimal bitmask format
|
||||
* @buf: buffer into which string is placed
|
||||
* @maskp: pointer to bitmap to convert
|
||||
* @nmaskbits: size of bitmap, in bits
|
||||
* @off: in the string from which we are copying, We copy to @buf
|
||||
* @count: the maximum number of bytes to print
|
||||
*/
|
||||
static int bitmap_print_to_buf(bool list, char *buf, const unsigned long *maskp,
|
||||
int nmaskbits, loff_t off, size_t count)
|
||||
@ -512,6 +517,11 @@ static int bitmap_print_to_buf(bool list, char *buf, const unsigned long *maskp,
|
||||
|
||||
/**
|
||||
* bitmap_print_bitmask_to_buf - convert bitmap to hex bitmask format ASCII string
|
||||
* @buf: buffer into which string is placed
|
||||
* @maskp: pointer to bitmap to convert
|
||||
* @nmaskbits: size of bitmap, in bits
|
||||
* @off: in the string from which we are copying, We copy to @buf
|
||||
* @count: the maximum number of bytes to print
|
||||
*
|
||||
* The bitmap_print_to_pagebuf() is used indirectly via its cpumap wrapper
|
||||
* cpumap_print_to_pagebuf() or directly by drivers to export hexadecimal
|
||||
@ -553,12 +563,6 @@ static int bitmap_print_to_buf(bool list, char *buf, const unsigned long *maskp,
|
||||
* move to use bin_attribute. In result, we have to pass the corresponding
|
||||
* parameters such as off, count from bin_attribute show entry to this API.
|
||||
*
|
||||
* @buf: buffer into which string is placed
|
||||
* @maskp: pointer to bitmap to convert
|
||||
* @nmaskbits: size of bitmap, in bits
|
||||
* @off: in the string from which we are copying, We copy to @buf
|
||||
* @count: the maximum number of bytes to print
|
||||
*
|
||||
* The role of cpumap_print_bitmask_to_buf() and cpumap_print_list_to_buf()
|
||||
* is similar with cpumap_print_to_pagebuf(), the difference is that
|
||||
* bitmap_print_to_pagebuf() mainly serves sysfs attribute with the assumption
|
||||
@ -597,6 +601,11 @@ EXPORT_SYMBOL(bitmap_print_bitmask_to_buf);
|
||||
|
||||
/**
|
||||
* bitmap_print_list_to_buf - convert bitmap to decimal list format ASCII string
|
||||
* @buf: buffer into which string is placed
|
||||
* @maskp: pointer to bitmap to convert
|
||||
* @nmaskbits: size of bitmap, in bits
|
||||
* @off: in the string from which we are copying, We copy to @buf
|
||||
* @count: the maximum number of bytes to print
|
||||
*
|
||||
* Everything is same with the above bitmap_print_bitmask_to_buf() except
|
||||
* the print format.
|
||||
@ -807,7 +816,8 @@ EXPORT_SYMBOL(bitmap_parselist);
|
||||
|
||||
|
||||
/**
|
||||
* bitmap_parselist_user()
|
||||
* bitmap_parselist_user() - convert user buffer's list format ASCII
|
||||
* string to bitmap
|
||||
*
|
||||
* @ubuf: pointer to user buffer containing string.
|
||||
* @ulen: buffer size in bytes. If string is smaller than this
|
||||
|
10
lib/ubsan.c
10
lib/ubsan.c
@ -154,16 +154,8 @@ static void ubsan_epilogue(void)
|
||||
|
||||
current->in_ubsan--;
|
||||
|
||||
if (panic_on_warn) {
|
||||
/*
|
||||
* This thread may hit another WARN() in the panic path.
|
||||
* Resetting this prevents additional WARN() from panicking the
|
||||
* system on this thread. Other threads are blocked by the
|
||||
* panic_mutex in panic().
|
||||
*/
|
||||
panic_on_warn = 0;
|
||||
if (panic_on_warn)
|
||||
panic("panic_on_warn set ...\n");
|
||||
}
|
||||
}
|
||||
|
||||
void __ubsan_handle_divrem_overflow(void *_data, void *lhs, void *rhs)
|
||||
|
@ -117,16 +117,8 @@ static void end_report(unsigned long *flags, unsigned long addr)
|
||||
pr_err("==================================================================\n");
|
||||
add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
|
||||
spin_unlock_irqrestore(&report_lock, *flags);
|
||||
if (panic_on_warn && !test_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags)) {
|
||||
/*
|
||||
* This thread may hit another WARN() in the panic path.
|
||||
* Resetting this prevents additional WARN() from panicking the
|
||||
* system on this thread. Other threads are blocked by the
|
||||
* panic_mutex in panic().
|
||||
*/
|
||||
panic_on_warn = 0;
|
||||
if (panic_on_warn && !test_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags))
|
||||
panic("panic_on_warn set ...\n");
|
||||
}
|
||||
if (kasan_arg_fault == KASAN_ARG_FAULT_PANIC)
|
||||
panic("kasan.fault=panic set ...\n");
|
||||
kasan_enable_current();
|
||||
|
@ -334,7 +334,7 @@ if ($user_codespellfile) {
|
||||
} elsif (!(-f $codespellfile)) {
|
||||
# If /usr/share/codespell/dictionary.txt is not present, try to find it
|
||||
# under codespell's install directory: <codespell_root>/data/dictionary.txt
|
||||
if (($codespell || $help) && which("codespell") ne "" && which("python") ne "") {
|
||||
if (($codespell || $help) && which("python3") ne "") {
|
||||
my $python_codespell_dict = << "EOF";
|
||||
|
||||
import os.path as op
|
||||
@ -344,7 +344,7 @@ codespell_file = op.join(codespell_dir, 'data', 'dictionary.txt')
|
||||
print(codespell_file, end='')
|
||||
EOF
|
||||
|
||||
my $codespell_dict = `python -c "$python_codespell_dict" 2> /dev/null`;
|
||||
my $codespell_dict = `python3 -c "$python_codespell_dict" 2> /dev/null`;
|
||||
$codespellfile = $codespell_dict if (-f $codespell_dict);
|
||||
}
|
||||
}
|
||||
@ -3926,7 +3926,7 @@ sub process {
|
||||
if ($prevline =~ /^[\+ ]};?\s*$/ &&
|
||||
$line =~ /^\+/ &&
|
||||
!($line =~ /^\+\s*$/ ||
|
||||
$line =~ /^\+\s*EXPORT_SYMBOL/ ||
|
||||
$line =~ /^\+\s*(?:EXPORT_SYMBOL|early_param)/ ||
|
||||
$line =~ /^\+\s*MODULE_/i ||
|
||||
$line =~ /^\+\s*\#\s*(?:end|elif|else)/ ||
|
||||
$line =~ /^\+[a-z_]*init/ ||
|
||||
@ -5551,6 +5551,7 @@ sub process {
|
||||
defined($stat) && defined($cond) &&
|
||||
$line =~ /\b(?:if|while|for)\s*\(/ && $line !~ /^.\s*#/) {
|
||||
my ($s, $c) = ($stat, $cond);
|
||||
my $fixed_assign_in_if = 0;
|
||||
|
||||
if ($c =~ /\bif\s*\(.*[^<>!=]=[^=].*/s) {
|
||||
if (ERROR("ASSIGN_IN_IF",
|
||||
@ -5575,6 +5576,7 @@ sub process {
|
||||
$newline .= ')';
|
||||
$newline .= " {" if (defined($brace));
|
||||
fix_insert_line($fixlinenr + 1, $newline);
|
||||
$fixed_assign_in_if = 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -5598,8 +5600,20 @@ sub process {
|
||||
$stat_real = "[...]\n$stat_real";
|
||||
}
|
||||
|
||||
ERROR("TRAILING_STATEMENTS",
|
||||
"trailing statements should be on next line\n" . $herecurr . $stat_real);
|
||||
if (ERROR("TRAILING_STATEMENTS",
|
||||
"trailing statements should be on next line\n" . $herecurr . $stat_real) &&
|
||||
!$fixed_assign_in_if &&
|
||||
$cond_lines == 0 &&
|
||||
$fix && $perl_version_ok &&
|
||||
$fixed[$fixlinenr] =~ /^\+(\s*)((?:if|while|for)\s*$balanced_parens)\s*(.*)$/) {
|
||||
my $indent = $1;
|
||||
my $test = $2;
|
||||
my $rest = rtrim($4);
|
||||
if ($rest =~ /;$/) {
|
||||
$fixed[$fixlinenr] = "\+$indent$test";
|
||||
fix_insert_line($fixlinenr + 1, "$indent\t$rest");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -7418,6 +7432,13 @@ sub process {
|
||||
WARN("MODULE_LICENSE",
|
||||
"unknown module license " . $extracted_string . "\n" . $herecurr);
|
||||
}
|
||||
if (!$file && $extracted_string eq '"GPL v2"') {
|
||||
if (WARN("MODULE_LICENSE",
|
||||
"Prefer \"GPL\" over \"GPL v2\" - see commit bf7fbeeae6db (\"module: Cure the MODULE_LICENSE \"GPL\" vs. \"GPL v2\" bogosity\")\n" . $herecurr) &&
|
||||
$fix) {
|
||||
$fixed[$fixlinenr] =~ s/\bMODULE_LICENSE\s*\(\s*"GPL v2"\s*\)/MODULE_LICENSE("GPL")/;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# check for sysctl duplicate constants
|
||||
|
@ -43,11 +43,10 @@ typedef __u8 u8;
|
||||
typedef __s8 s8;
|
||||
|
||||
#ifdef __CHECKER__
|
||||
#define __bitwise__ __attribute__((bitwise))
|
||||
#define __bitwise __attribute__((bitwise))
|
||||
#else
|
||||
#define __bitwise__
|
||||
#define __bitwise
|
||||
#endif
|
||||
#define __bitwise __bitwise__
|
||||
|
||||
#define __force
|
||||
#define __user
|
||||
|
Loading…
Reference in New Issue
Block a user