mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2024-12-29 17:25:38 +00:00
- hfs and hfsplus kmap API modernization from Fabio Francesco
- Valentin Schneider makes crash-kexec work properly when invoked from an NMI-time panic. - ntfs bugfixes from Hawkins Jiawei - Jiebin Sun improves IPC msg scalability by replacing atomic_t's with percpu counters. - nilfs2 cleanups from Minghao Chi - lots of other single patches all over the tree! -----BEGIN PGP SIGNATURE----- iHUEABYKAB0WIQTTMBEPP41GrTpTJgfdBJ7gKXxAjgUCY0Yf0gAKCRDdBJ7gKXxA joapAQDT1d1zu7T8yf9cQXkYnZVuBKCjxKE/IsYvqaq1a42MjQD/SeWZg0wV05B8 DhJPj9nkEp6R3Rj3Mssip+3vNuceAQM= =lUQY -----END PGP SIGNATURE----- Merge tag 'mm-nonmm-stable-2022-10-11' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm Pull non-MM updates from Andrew Morton: - hfs and hfsplus kmap API modernization (Fabio Francesco) - make crash-kexec work properly when invoked from an NMI-time panic (Valentin Schneider) - ntfs bugfixes (Hawkins Jiawei) - improve IPC msg scalability by replacing atomic_t's with percpu counters (Jiebin Sun) - nilfs2 cleanups (Minghao Chi) - lots of other single patches all over the tree! * tag 'mm-nonmm-stable-2022-10-11' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: (71 commits) include/linux/entry-common.h: remove has_signal comment of arch_do_signal_or_restart() prototype proc: test how it holds up with mapping'less process mailmap: update Frank Rowand email address ia64: mca: use strscpy() is more robust and safer init/Kconfig: fix unmet direct dependencies ia64: update config files nilfs2: replace WARN_ONs by nilfs_error for checkpoint acquisition failure fork: remove duplicate included header files init/main.c: remove unnecessary (void*) conversions proc: mark more files as permanent nilfs2: remove the unneeded result variable nilfs2: delete unnecessary checks before brelse() checkpatch: warn for non-standard fixes tag style usr/gen_init_cpio.c: remove unnecessary -1 values from int file ipc/msg: mitigate the lock contention with percpu counter percpu: add percpu_counter_add_local and percpu_counter_sub_local fs/ocfs2: fix repeated words in comments relay: use kvcalloc to alloc page array in relay_alloc_page_array proc: make config PROC_CHILDREN depend on PROC_FS fs: uninline inode_maybe_inc_iversion() ...
This commit is contained in:
commit
676cb49573
1
.mailmap
1
.mailmap
@ -137,6 +137,7 @@ Filipe Lautert <filipe@icewall.org>
|
||||
Finn Thain <fthain@linux-m68k.org> <fthain@telegraphics.com.au>
|
||||
Franck Bui-Huu <vagabon.xyz@gmail.com>
|
||||
Frank Rowand <frowand.list@gmail.com> <frank.rowand@am.sony.com>
|
||||
Frank Rowand <frowand.list@gmail.com> <frank.rowand@sony.com>
|
||||
Frank Rowand <frowand.list@gmail.com> <frank.rowand@sonymobile.com>
|
||||
Frank Rowand <frowand.list@gmail.com> <frowand@mvista.com>
|
||||
Frank Zago <fzago@systemfabricworks.com>
|
||||
|
@ -65,6 +65,11 @@ combining the following values:
|
||||
4 s3_beep
|
||||
= =======
|
||||
|
||||
arch
|
||||
====
|
||||
|
||||
The machine hardware name, the same output as ``uname -m``
|
||||
(e.g. ``x86_64`` or ``aarch64``).
|
||||
|
||||
auto_msgmni
|
||||
===========
|
||||
|
@ -612,6 +612,13 @@ Commit message
|
||||
|
||||
See: https://www.kernel.org/doc/html/latest/process/submitting-patches.html#describe-your-changes
|
||||
|
||||
**BAD_FIXES_TAG**
|
||||
The Fixes: tag is malformed or does not follow the community conventions.
|
||||
This can occur if the tag have been split into multiple lines (e.g., when
|
||||
pasted in an email program with word wrapping enabled).
|
||||
|
||||
See: https://www.kernel.org/doc/html/latest/process/submitting-patches.html#describe-your-changes
|
||||
|
||||
|
||||
Comparison style
|
||||
----------------
|
||||
|
@ -65,7 +65,7 @@ CONFIG_NFSD=m
|
||||
CONFIG_NLS_CODEPAGE_437=y
|
||||
CONFIG_MAGIC_SYSRQ=y
|
||||
CONFIG_DEBUG_KERNEL=y
|
||||
CONFIG_DEBUG_INFO=y
|
||||
CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
|
||||
CONFIG_ALPHA_LEGACY_START_ADDRESS=y
|
||||
CONFIG_MATHEMU=y
|
||||
CONFIG_CRYPTO_HMAC=y
|
||||
|
@ -36,8 +36,6 @@ extern void start_thread(struct pt_regs *, unsigned long, unsigned long);
|
||||
|
||||
/* Free all resources held by a thread. */
|
||||
struct task_struct;
|
||||
extern void release_thread(struct task_struct *);
|
||||
|
||||
unsigned long __get_wchan(struct task_struct *p);
|
||||
|
||||
#define KSTK_EIP(tsk) (task_pt_regs(tsk)->pc)
|
||||
|
@ -225,11 +225,6 @@ flush_thread(void)
|
||||
current_thread_info()->pcb.unique = 0;
|
||||
}
|
||||
|
||||
void
|
||||
release_thread(struct task_struct *dead_task)
|
||||
{
|
||||
}
|
||||
|
||||
/*
|
||||
* Copy architecture-specific thread state
|
||||
*/
|
||||
|
@ -491,9 +491,9 @@ setup_arch(char **cmdline_p)
|
||||
boot flags depending on the boot mode, we need some shorthand.
|
||||
This should do for installation. */
|
||||
if (strcmp(COMMAND_LINE, "INSTALL") == 0) {
|
||||
strlcpy(command_line, "root=/dev/fd0 load_ramdisk=1", sizeof command_line);
|
||||
strscpy(command_line, "root=/dev/fd0 load_ramdisk=1", sizeof(command_line));
|
||||
} else {
|
||||
strlcpy(command_line, COMMAND_LINE, sizeof command_line);
|
||||
strscpy(command_line, COMMAND_LINE, sizeof(command_line));
|
||||
}
|
||||
strcpy(boot_command_line, command_line);
|
||||
*cmdline_p = command_line;
|
||||
|
@ -90,7 +90,7 @@ CONFIG_TMPFS=y
|
||||
CONFIG_CONFIGFS_FS=y
|
||||
# CONFIG_MISC_FILESYSTEMS is not set
|
||||
# CONFIG_NETWORK_FILESYSTEMS is not set
|
||||
CONFIG_DEBUG_INFO=y
|
||||
CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
|
||||
CONFIG_STRIP_ASM_SYMS=y
|
||||
CONFIG_DEBUG_FS=y
|
||||
CONFIG_HEADERS_INSTALL=y
|
||||
|
@ -43,9 +43,6 @@ struct task_struct;
|
||||
#define task_pt_regs(p) \
|
||||
((struct pt_regs *)(THREAD_SIZE + (void *)task_stack_page(p)) - 1)
|
||||
|
||||
/* Free all resources held by a thread */
|
||||
#define release_thread(thread) do { } while (0)
|
||||
|
||||
/*
|
||||
* A lot of busy-wait loops in SMP are based off of non-volatile data otherwise
|
||||
* get optimised away by gcc
|
||||
|
@ -81,9 +81,6 @@ static inline void arch_thread_struct_whitelist(unsigned long *offset,
|
||||
/* Forward declaration, a strange C thing */
|
||||
struct task_struct;
|
||||
|
||||
/* Free all resources held by a thread. */
|
||||
extern void release_thread(struct task_struct *);
|
||||
|
||||
unsigned long __get_wchan(struct task_struct *p);
|
||||
|
||||
#define task_pt_regs(p) \
|
||||
|
@ -232,10 +232,6 @@ void flush_thread(void)
|
||||
thread_notify(THREAD_NOTIFY_FLUSH, thread);
|
||||
}
|
||||
|
||||
void release_thread(struct task_struct *dead_task)
|
||||
{
|
||||
}
|
||||
|
||||
asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
|
||||
|
||||
int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
|
||||
|
@ -323,9 +323,6 @@ static inline bool is_ttbr1_addr(unsigned long addr)
|
||||
/* Forward declaration, a strange C thing */
|
||||
struct task_struct;
|
||||
|
||||
/* Free all resources held by a thread. */
|
||||
extern void release_thread(struct task_struct *);
|
||||
|
||||
unsigned long __get_wchan(struct task_struct *p);
|
||||
|
||||
void update_sctlr_el1(u64 sctlr);
|
||||
|
@ -279,10 +279,6 @@ void flush_thread(void)
|
||||
flush_tagged_addr_state();
|
||||
}
|
||||
|
||||
void release_thread(struct task_struct *dead_task)
|
||||
{
|
||||
}
|
||||
|
||||
void arch_release_task_struct(struct task_struct *tsk)
|
||||
{
|
||||
fpsimd_release_task(tsk);
|
||||
|
@ -69,11 +69,6 @@ do { \
|
||||
/* Forward declaration, a strange C thing */
|
||||
struct task_struct;
|
||||
|
||||
/* Free all resources held by a thread. */
|
||||
static inline void release_thread(struct task_struct *dead_task)
|
||||
{
|
||||
}
|
||||
|
||||
/* Prepare to copy thread state - unlazy all lazy status */
|
||||
#define prepare_to_copy(tsk) do { } while (0)
|
||||
|
||||
|
@ -60,10 +60,6 @@ struct thread_struct {
|
||||
#define KSTK_EIP(tsk) (pt_elr(task_pt_regs(tsk)))
|
||||
#define KSTK_ESP(tsk) (pt_psp(task_pt_regs(tsk)))
|
||||
|
||||
/* Free all resources held by a thread; defined in process.c */
|
||||
extern void release_thread(struct task_struct *dead_task);
|
||||
|
||||
/* Get wait channel for task P. */
|
||||
extern unsigned long __get_wchan(struct task_struct *p);
|
||||
|
||||
/* The following stuff is pretty HEXAGON specific. */
|
||||
|
@ -112,13 +112,6 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Release any architecture-specific resources locked by thread
|
||||
*/
|
||||
void release_thread(struct task_struct *dead_task)
|
||||
{
|
||||
}
|
||||
|
||||
/*
|
||||
* Some archs flush debug and FPU info here
|
||||
*/
|
||||
|
@ -20,7 +20,6 @@ CONFIG_UNIX=y
|
||||
CONFIG_INET=y
|
||||
# CONFIG_IPV6 is not set
|
||||
CONFIG_BLK_DEV_LOOP=m
|
||||
CONFIG_BLK_DEV_CRYPTOLOOP=m
|
||||
CONFIG_BLK_DEV_NBD=m
|
||||
CONFIG_BLK_DEV_RAM=m
|
||||
CONFIG_ATA=m
|
||||
@ -91,7 +90,6 @@ CONFIG_NFS_V4=m
|
||||
CONFIG_NFSD=m
|
||||
CONFIG_NFSD_V4=y
|
||||
CONFIG_CIFS=m
|
||||
CONFIG_CIFS_STATS=y
|
||||
CONFIG_CIFS_XATTR=y
|
||||
CONFIG_CIFS_POSIX=y
|
||||
CONFIG_NLS_CODEPAGE_437=y
|
||||
|
@ -39,7 +39,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
|
||||
CONFIG_CONNECTOR=y
|
||||
# CONFIG_PNP_DEBUG_MESSAGES is not set
|
||||
CONFIG_BLK_DEV_LOOP=m
|
||||
CONFIG_BLK_DEV_CRYPTOLOOP=m
|
||||
CONFIG_BLK_DEV_NBD=m
|
||||
CONFIG_BLK_DEV_RAM=y
|
||||
CONFIG_SGI_XP=m
|
||||
@ -91,7 +90,6 @@ CONFIG_SERIAL_8250_SHARE_IRQ=y
|
||||
# CONFIG_HW_RANDOM is not set
|
||||
CONFIG_RTC_CLASS=y
|
||||
CONFIG_RTC_DRV_EFI=y
|
||||
CONFIG_RAW_DRIVER=m
|
||||
CONFIG_HPET=y
|
||||
CONFIG_AGP=m
|
||||
CONFIG_AGP_I460=m
|
||||
|
@ -31,11 +31,9 @@ CONFIG_IP_MULTICAST=y
|
||||
CONFIG_SYN_COOKIES=y
|
||||
# CONFIG_IPV6 is not set
|
||||
CONFIG_BLK_DEV_LOOP=m
|
||||
CONFIG_BLK_DEV_CRYPTOLOOP=m
|
||||
CONFIG_BLK_DEV_NBD=m
|
||||
CONFIG_BLK_DEV_RAM=y
|
||||
CONFIG_ATA=y
|
||||
CONFIG_BLK_DEV_IDECD=y
|
||||
CONFIG_ATA_GENERIC=y
|
||||
CONFIG_PATA_CMD64X=y
|
||||
CONFIG_ATA_PIIX=y
|
||||
@ -81,7 +79,6 @@ CONFIG_SERIAL_8250_SHARE_IRQ=y
|
||||
# CONFIG_HW_RANDOM is not set
|
||||
CONFIG_RTC_CLASS=y
|
||||
CONFIG_RTC_DRV_EFI=y
|
||||
CONFIG_RAW_DRIVER=m
|
||||
CONFIG_HPET=y
|
||||
CONFIG_AGP=m
|
||||
CONFIG_AGP_I460=m
|
||||
|
@ -36,7 +36,6 @@ CONFIG_IP_MULTICAST=y
|
||||
CONFIG_SYN_COOKIES=y
|
||||
# CONFIG_IPV6 is not set
|
||||
CONFIG_BLK_DEV_LOOP=m
|
||||
CONFIG_BLK_DEV_CRYPTOLOOP=m
|
||||
CONFIG_BLK_DEV_NBD=m
|
||||
CONFIG_BLK_DEV_RAM=y
|
||||
CONFIG_ATA=y
|
||||
@ -85,7 +84,6 @@ CONFIG_SERIAL_8250_SHARE_IRQ=y
|
||||
# CONFIG_HW_RANDOM is not set
|
||||
CONFIG_RTC_CLASS=y
|
||||
CONFIG_RTC_DRV_EFI=y
|
||||
CONFIG_RAW_DRIVER=m
|
||||
CONFIG_HPET=y
|
||||
CONFIG_AGP=m
|
||||
CONFIG_AGP_I460=m
|
||||
|
@ -30,7 +30,6 @@ CONFIG_PATA_CMD64X=y
|
||||
CONFIG_SCSI=y
|
||||
CONFIG_BLK_DEV_SD=y
|
||||
CONFIG_CHR_DEV_ST=y
|
||||
CONFIG_CHR_DEV_OSST=y
|
||||
CONFIG_BLK_DEV_SR=y
|
||||
CONFIG_CHR_DEV_SG=y
|
||||
CONFIG_SCSI_CONSTANTS=y
|
||||
|
@ -318,13 +318,6 @@ struct thread_struct {
|
||||
struct mm_struct;
|
||||
struct task_struct;
|
||||
|
||||
/*
|
||||
* Free all resources held by a thread. This is called after the
|
||||
* parent of DEAD_TASK has collected the exit status of the task via
|
||||
* wait().
|
||||
*/
|
||||
#define release_thread(dead_task)
|
||||
|
||||
/* Get wait channel for task P. */
|
||||
extern unsigned long __get_wchan (struct task_struct *p);
|
||||
|
||||
|
@ -1793,7 +1793,7 @@ format_mca_init_stack(void *mca_data, unsigned long offset,
|
||||
p->parent = p->real_parent = p->group_leader = p;
|
||||
INIT_LIST_HEAD(&p->children);
|
||||
INIT_LIST_HEAD(&p->sibling);
|
||||
strncpy(p->comm, type, sizeof(p->comm)-1);
|
||||
strscpy(p->comm, type, sizeof(p->comm)-1);
|
||||
}
|
||||
|
||||
/* Caller prevents this from being called after init */
|
||||
|
@ -552,7 +552,7 @@ setup_arch (char **cmdline_p)
|
||||
ia64_patch_vtop((u64) __start___vtop_patchlist, (u64) __end___vtop_patchlist);
|
||||
|
||||
*cmdline_p = __va(ia64_boot_param->command_line);
|
||||
strlcpy(boot_command_line, *cmdline_p, COMMAND_LINE_SIZE);
|
||||
strscpy(boot_command_line, *cmdline_p, COMMAND_LINE_SIZE);
|
||||
|
||||
efi_init();
|
||||
io_port_init();
|
||||
|
@ -166,3 +166,29 @@ ia64_mremap (unsigned long addr, unsigned long old_len, unsigned long new_len, u
|
||||
force_successful_syscall_return();
|
||||
return addr;
|
||||
}
|
||||
|
||||
asmlinkage long
|
||||
ia64_clock_getres(const clockid_t which_clock, struct __kernel_timespec __user *tp)
|
||||
{
|
||||
/*
|
||||
* ia64's clock_gettime() syscall is implemented as a vdso call
|
||||
* fsys_clock_gettime(). Currently it handles only
|
||||
* CLOCK_REALTIME and CLOCK_MONOTONIC. Both are based on
|
||||
* 'ar.itc' counter which gets incremented at a constant
|
||||
* frequency. It's usually 400MHz, ~2.5x times slower than CPU
|
||||
* clock frequency. Which is almost a 1ns hrtimer, but not quite.
|
||||
*
|
||||
* Let's special-case these timers to report correct precision
|
||||
* based on ITC frequency and not HZ frequency for supported
|
||||
* clocks.
|
||||
*/
|
||||
switch (which_clock) {
|
||||
case CLOCK_REALTIME:
|
||||
case CLOCK_MONOTONIC:
|
||||
s64 tick_ns = DIV_ROUND_UP(NSEC_PER_SEC, local_cpu_data->itc_freq);
|
||||
struct timespec64 rtn_tp = ns_to_timespec64(tick_ns);
|
||||
return put_timespec64(&rtn_tp, tp);
|
||||
}
|
||||
|
||||
return sys_clock_getres(which_clock, tp);
|
||||
}
|
||||
|
@ -240,7 +240,7 @@
|
||||
228 common timer_delete sys_timer_delete
|
||||
229 common clock_settime sys_clock_settime
|
||||
230 common clock_gettime sys_clock_gettime
|
||||
231 common clock_getres sys_clock_getres
|
||||
231 common clock_getres ia64_clock_getres
|
||||
232 common clock_nanosleep sys_clock_nanosleep
|
||||
233 common fstatfs64 sys_fstatfs64
|
||||
234 common statfs64 sys_statfs64
|
||||
|
@ -176,9 +176,6 @@ struct thread_struct {
|
||||
|
||||
struct task_struct;
|
||||
|
||||
/* Free all resources held by a thread. */
|
||||
#define release_thread(thread) do { } while (0)
|
||||
|
||||
enum idle_boot_override {IDLE_NO_OVERRIDE = 0, IDLE_HALT, IDLE_NOMWAIT, IDLE_POLL};
|
||||
|
||||
extern unsigned long boot_option_idle_override;
|
||||
|
@ -145,11 +145,6 @@ static inline void start_thread(struct pt_regs * regs, unsigned long pc,
|
||||
/* Forward declaration, a strange C thing */
|
||||
struct task_struct;
|
||||
|
||||
/* Free all resources held by a thread. */
|
||||
static inline void release_thread(struct task_struct *dead_task)
|
||||
{
|
||||
}
|
||||
|
||||
unsigned long __get_wchan(struct task_struct *p);
|
||||
void show_registers(struct pt_regs *regs);
|
||||
|
||||
|
@ -83,7 +83,7 @@ CONFIG_CIFS=y
|
||||
CONFIG_CIFS_STATS2=y
|
||||
CONFIG_ENCRYPTED_KEYS=y
|
||||
CONFIG_DMA_CMA=y
|
||||
CONFIG_DEBUG_INFO=y
|
||||
CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
|
||||
CONFIG_KGDB=y
|
||||
CONFIG_KGDB_TESTS=y
|
||||
CONFIG_KGDB_KDB=y
|
||||
|
@ -63,11 +63,6 @@ struct thread_struct {
|
||||
.pgdir = swapper_pg_dir, \
|
||||
}
|
||||
|
||||
/* Free all resources held by a thread. */
|
||||
static inline void release_thread(struct task_struct *dead_task)
|
||||
{
|
||||
}
|
||||
|
||||
unsigned long __get_wchan(struct task_struct *p);
|
||||
|
||||
/* The size allocated for kernel stacks. This _must_ be a power of two! */
|
||||
|
@ -72,7 +72,7 @@ CONFIG_LEDS_TRIGGER_TIMER=y
|
||||
CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
|
||||
CONFIG_CRC32_SARWATE=y
|
||||
CONFIG_PRINTK_TIME=y
|
||||
CONFIG_DEBUG_INFO=y
|
||||
CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
|
||||
CONFIG_DEBUG_INFO_REDUCED=y
|
||||
CONFIG_STRIP_ASM_SYMS=y
|
||||
CONFIG_DEBUG_FS=y
|
||||
|
@ -161,7 +161,7 @@ CONFIG_CRYPTO_SHA1_OCTEON=m
|
||||
CONFIG_CRYPTO_SHA256_OCTEON=m
|
||||
CONFIG_CRYPTO_SHA512_OCTEON=m
|
||||
CONFIG_CRYPTO_DES=y
|
||||
CONFIG_DEBUG_INFO=y
|
||||
CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
|
||||
CONFIG_DEBUG_FS=y
|
||||
CONFIG_MAGIC_SYSRQ=y
|
||||
# CONFIG_SCHED_DEBUG is not set
|
||||
|
@ -199,7 +199,7 @@ CONFIG_NLS_UTF8=y
|
||||
CONFIG_DMA_CMA=y
|
||||
CONFIG_CMA_SIZE_MBYTES=32
|
||||
CONFIG_PRINTK_TIME=y
|
||||
CONFIG_DEBUG_INFO=y
|
||||
CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
|
||||
CONFIG_STRIP_ASM_SYMS=y
|
||||
CONFIG_MAGIC_SYSRQ=y
|
||||
CONFIG_DEBUG_FS=y
|
||||
|
@ -113,7 +113,7 @@ CONFIG_PRINTK_TIME=y
|
||||
CONFIG_CONSOLE_LOGLEVEL_DEFAULT=15
|
||||
CONFIG_CONSOLE_LOGLEVEL_QUIET=15
|
||||
CONFIG_MESSAGE_LOGLEVEL_DEFAULT=7
|
||||
CONFIG_DEBUG_INFO=y
|
||||
CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
|
||||
CONFIG_STRIP_ASM_SYMS=y
|
||||
CONFIG_MAGIC_SYSRQ=y
|
||||
CONFIG_DEBUG_FS=y
|
||||
|
@ -116,7 +116,7 @@ CONFIG_PRINTK_TIME=y
|
||||
CONFIG_CONSOLE_LOGLEVEL_DEFAULT=15
|
||||
CONFIG_CONSOLE_LOGLEVEL_QUIET=15
|
||||
CONFIG_MESSAGE_LOGLEVEL_DEFAULT=7
|
||||
CONFIG_DEBUG_INFO=y
|
||||
CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
|
||||
CONFIG_STRIP_ASM_SYMS=y
|
||||
CONFIG_MAGIC_SYSRQ=y
|
||||
CONFIG_DEBUG_FS=y
|
||||
|
@ -82,7 +82,7 @@ CONFIG_ROOT_NFS=y
|
||||
# CONFIG_XZ_DEC_ARMTHUMB is not set
|
||||
# CONFIG_XZ_DEC_SPARC is not set
|
||||
CONFIG_PRINTK_TIME=y
|
||||
CONFIG_DEBUG_INFO=y
|
||||
CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
|
||||
CONFIG_DEBUG_INFO_REDUCED=y
|
||||
CONFIG_DEBUG_FS=y
|
||||
# CONFIG_SCHED_DEBUG is not set
|
||||
|
@ -113,7 +113,7 @@ CONFIG_CRYPTO_LZO=y
|
||||
CONFIG_CRC16=y
|
||||
CONFIG_XZ_DEC=y
|
||||
CONFIG_PRINTK_TIME=y
|
||||
CONFIG_DEBUG_INFO=y
|
||||
CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
|
||||
CONFIG_STRIP_ASM_SYMS=y
|
||||
CONFIG_DEBUG_FS=y
|
||||
CONFIG_MAGIC_SYSRQ=y
|
||||
|
@ -166,7 +166,7 @@ CONFIG_NLS_UTF8=y
|
||||
CONFIG_FONTS=y
|
||||
CONFIG_FONT_SUN8x16=y
|
||||
CONFIG_PRINTK_TIME=y
|
||||
CONFIG_DEBUG_INFO=y
|
||||
CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
|
||||
CONFIG_STRIP_ASM_SYMS=y
|
||||
CONFIG_READABLE_ASM=y
|
||||
CONFIG_KGDB=y
|
||||
|
@ -113,7 +113,7 @@ CONFIG_CRYPTO_LZO=y
|
||||
CONFIG_CRC16=y
|
||||
CONFIG_XZ_DEC=y
|
||||
CONFIG_PRINTK_TIME=y
|
||||
CONFIG_DEBUG_INFO=y
|
||||
CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
|
||||
CONFIG_STRIP_ASM_SYMS=y
|
||||
CONFIG_DEBUG_FS=y
|
||||
CONFIG_MAGIC_SYSRQ=y
|
||||
|
@ -344,9 +344,6 @@ struct thread_struct {
|
||||
|
||||
struct task_struct;
|
||||
|
||||
/* Free all resources held by a thread. */
|
||||
#define release_thread(thread) do { } while(0)
|
||||
|
||||
/*
|
||||
* Do necessary setup to start up a newly executed thread.
|
||||
*/
|
||||
|
@ -74,4 +74,4 @@ CONFIG_NFS_FS=y
|
||||
CONFIG_NFS_V3_ACL=y
|
||||
CONFIG_ROOT_NFS=y
|
||||
CONFIG_SUNRPC_DEBUG=y
|
||||
CONFIG_DEBUG_INFO=y
|
||||
CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
|
||||
|
@ -71,4 +71,4 @@ CONFIG_NFS_FS=y
|
||||
CONFIG_NFS_V3_ACL=y
|
||||
CONFIG_ROOT_NFS=y
|
||||
CONFIG_SUNRPC_DEBUG=y
|
||||
CONFIG_DEBUG_INFO=y
|
||||
CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
|
||||
|
@ -64,11 +64,6 @@ extern void start_thread(struct pt_regs *regs, unsigned long pc,
|
||||
|
||||
struct task_struct;
|
||||
|
||||
/* Free all resources held by a thread. */
|
||||
static inline void release_thread(struct task_struct *dead_task)
|
||||
{
|
||||
}
|
||||
|
||||
extern unsigned long __get_wchan(struct task_struct *p);
|
||||
|
||||
#define task_pt_regs(p) \
|
||||
|
@ -72,7 +72,6 @@ struct thread_struct {
|
||||
|
||||
|
||||
void start_thread(struct pt_regs *regs, unsigned long nip, unsigned long sp);
|
||||
void release_thread(struct task_struct *);
|
||||
unsigned long __get_wchan(struct task_struct *p);
|
||||
|
||||
#define cpu_relax() barrier()
|
||||
|
@ -125,10 +125,6 @@ void show_regs(struct pt_regs *regs)
|
||||
show_registers(regs);
|
||||
}
|
||||
|
||||
void release_thread(struct task_struct *dead_task)
|
||||
{
|
||||
}
|
||||
|
||||
/*
|
||||
* Copy the thread-specific (arch specific) info from the current
|
||||
* process to the new one p
|
||||
|
@ -266,9 +266,6 @@ on downward growing arches, it looks like this:
|
||||
|
||||
struct mm_struct;
|
||||
|
||||
/* Free all resources held by a thread. */
|
||||
extern void release_thread(struct task_struct *);
|
||||
|
||||
extern unsigned long __get_wchan(struct task_struct *p);
|
||||
|
||||
#define KSTK_EIP(tsk) ((tsk)->thread.regs.iaoq[0])
|
||||
|
@ -146,10 +146,6 @@ void flush_thread(void)
|
||||
*/
|
||||
}
|
||||
|
||||
void release_thread(struct task_struct *dead_task)
|
||||
{
|
||||
}
|
||||
|
||||
/*
|
||||
* Idle thread support
|
||||
*
|
||||
|
@ -75,7 +75,6 @@ extern int _chrp_type;
|
||||
|
||||
struct task_struct;
|
||||
void start_thread(struct pt_regs *regs, unsigned long fdptr, unsigned long sp);
|
||||
void release_thread(struct task_struct *);
|
||||
|
||||
#define TS_FPR(i) fp_state.fpr[i][TS_FPROFFSET]
|
||||
#define TS_CKFPR(i) ckfp_state.fpr[i][TS_FPROFFSET]
|
||||
|
@ -1655,11 +1655,6 @@ EXPORT_SYMBOL_GPL(set_thread_tidr);
|
||||
|
||||
#endif /* CONFIG_PPC64 */
|
||||
|
||||
void
|
||||
release_thread(struct task_struct *t)
|
||||
{
|
||||
}
|
||||
|
||||
/*
|
||||
* this gets called so that we can store coprocessor state into memory and
|
||||
* copy the current task into the new thread.
|
||||
|
@ -65,11 +65,6 @@ static inline void arch_thread_struct_whitelist(unsigned long *offset,
|
||||
extern void start_thread(struct pt_regs *regs,
|
||||
unsigned long pc, unsigned long sp);
|
||||
|
||||
/* Free all resources held by a thread. */
|
||||
static inline void release_thread(struct task_struct *dead_task)
|
||||
{
|
||||
}
|
||||
|
||||
extern unsigned long __get_wchan(struct task_struct *p);
|
||||
|
||||
|
||||
|
@ -186,9 +186,6 @@ struct pt_regs;
|
||||
void show_registers(struct pt_regs *regs);
|
||||
void show_cacheinfo(struct seq_file *m);
|
||||
|
||||
/* Free all resources held by a thread. */
|
||||
static inline void release_thread(struct task_struct *tsk) { }
|
||||
|
||||
/* Free guarded storage control block */
|
||||
void guarded_storage_release(struct task_struct *tsk);
|
||||
void gs_load_bc_cb(struct pt_regs *regs);
|
||||
|
@ -85,7 +85,7 @@ CONFIG_DEBUG_FS=y
|
||||
CONFIG_DEBUG_KERNEL=y
|
||||
# CONFIG_DEBUG_PREEMPT is not set
|
||||
# CONFIG_DEBUG_BUGVERBOSE is not set
|
||||
CONFIG_DEBUG_INFO=y
|
||||
CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
|
||||
# CONFIG_FTRACE is not set
|
||||
# CONFIG_CRYPTO_ANSI_CPRNG is not set
|
||||
# CONFIG_CRYPTO_HW is not set
|
||||
|
@ -116,7 +116,7 @@ CONFIG_MAGIC_SYSRQ=y
|
||||
CONFIG_DEBUG_KERNEL=y
|
||||
CONFIG_DEBUG_SHIRQ=y
|
||||
CONFIG_DETECT_HUNG_TASK=y
|
||||
CONFIG_DEBUG_INFO=y
|
||||
CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
|
||||
CONFIG_DEBUG_VM=y
|
||||
CONFIG_DWARF_UNWINDER=y
|
||||
# CONFIG_CRYPTO_ANSI_CPRNG is not set
|
||||
|
@ -107,7 +107,7 @@ CONFIG_DEBUG_SHIRQ=y
|
||||
CONFIG_DETECT_HUNG_TASK=y
|
||||
# CONFIG_SCHED_DEBUG is not set
|
||||
CONFIG_TIMER_STATS=y
|
||||
CONFIG_DEBUG_INFO=y
|
||||
CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
|
||||
CONFIG_CRYPTO=y
|
||||
CONFIG_CRYPTO_MD5=y
|
||||
CONFIG_CRYPTO_DES=y
|
||||
|
@ -84,7 +84,7 @@ CONFIG_MAGIC_SYSRQ=y
|
||||
CONFIG_DEBUG_KERNEL=y
|
||||
# CONFIG_SCHED_DEBUG is not set
|
||||
CONFIG_DEBUG_KOBJECT=y
|
||||
CONFIG_DEBUG_INFO=y
|
||||
CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
|
||||
CONFIG_FRAME_POINTER=y
|
||||
CONFIG_CRC_CCITT=m
|
||||
CONFIG_CRC16=m
|
||||
|
@ -79,5 +79,5 @@ CONFIG_DETECT_HUNG_TASK=y
|
||||
CONFIG_DEBUG_RT_MUTEXES=y
|
||||
CONFIG_DEBUG_LOCK_ALLOC=y
|
||||
CONFIG_DEBUG_SPINLOCK_SLEEP=y
|
||||
CONFIG_DEBUG_INFO=y
|
||||
CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
|
||||
CONFIG_DEBUG_SG=y
|
||||
|
@ -101,7 +101,7 @@ CONFIG_DEBUG_FS=y
|
||||
CONFIG_DEBUG_KERNEL=y
|
||||
CONFIG_DETECT_HUNG_TASK=y
|
||||
# CONFIG_DEBUG_PREEMPT is not set
|
||||
CONFIG_DEBUG_INFO=y
|
||||
CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
|
||||
CONFIG_CRYPTO_ECB=m
|
||||
CONFIG_CRYPTO_PCBC=m
|
||||
CONFIG_CRYPTO_HMAC=y
|
||||
|
@ -96,7 +96,7 @@ CONFIG_DEBUG_KERNEL=y
|
||||
# CONFIG_DEBUG_PREEMPT is not set
|
||||
CONFIG_DEBUG_LOCK_ALLOC=y
|
||||
CONFIG_DEBUG_LOCKING_API_SELFTESTS=y
|
||||
CONFIG_DEBUG_INFO=y
|
||||
CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
|
||||
CONFIG_SH_STANDARD_BIOS=y
|
||||
CONFIG_DEBUG_STACK_USAGE=y
|
||||
CONFIG_4KSTACKS=y
|
||||
|
@ -112,7 +112,7 @@ CONFIG_DETECT_HUNG_TASK=y
|
||||
CONFIG_DEBUG_OBJECTS=y
|
||||
CONFIG_DEBUG_MUTEXES=y
|
||||
CONFIG_DEBUG_SPINLOCK_SLEEP=y
|
||||
CONFIG_DEBUG_INFO=y
|
||||
CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
|
||||
CONFIG_DEBUG_VM=y
|
||||
CONFIG_DEBUG_LIST=y
|
||||
CONFIG_DEBUG_SG=y
|
||||
|
@ -131,7 +131,7 @@ CONFIG_DEBUG_KERNEL=y
|
||||
CONFIG_DETECT_HUNG_TASK=y
|
||||
# CONFIG_SCHED_DEBUG is not set
|
||||
CONFIG_TIMER_STATS=y
|
||||
CONFIG_DEBUG_INFO=y
|
||||
CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
|
||||
CONFIG_SH_STANDARD_BIOS=y
|
||||
CONFIG_CRYPTO_MD5=y
|
||||
CONFIG_CRYPTO_DES=y
|
||||
|
@ -93,7 +93,7 @@ CONFIG_CRAMFS=y
|
||||
CONFIG_NFS_FS=y
|
||||
CONFIG_ROOT_NFS=y
|
||||
CONFIG_DEBUG_KERNEL=y
|
||||
CONFIG_DEBUG_INFO=y
|
||||
CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
|
||||
CONFIG_FRAME_POINTER=y
|
||||
CONFIG_CRYPTO_ECB=m
|
||||
CONFIG_CRYPTO_PCBC=m
|
||||
|
@ -121,7 +121,7 @@ CONFIG_NLS_CODEPAGE_437=y
|
||||
CONFIG_NLS_CODEPAGE_932=y
|
||||
CONFIG_NLS_ISO8859_1=y
|
||||
CONFIG_DEBUG_KERNEL=y
|
||||
CONFIG_DEBUG_INFO=y
|
||||
CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
|
||||
CONFIG_FRAME_POINTER=y
|
||||
# CONFIG_CRYPTO_ANSI_CPRNG is not set
|
||||
CONFIG_CRC_CCITT=y
|
||||
|
@ -159,7 +159,7 @@ CONFIG_DEBUG_FS=y
|
||||
CONFIG_DEBUG_KERNEL=y
|
||||
# CONFIG_DETECT_SOFTLOCKUP is not set
|
||||
# CONFIG_SCHED_DEBUG is not set
|
||||
CONFIG_DEBUG_INFO=y
|
||||
CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
|
||||
CONFIG_FRAME_POINTER=y
|
||||
CONFIG_SH_STANDARD_BIOS=y
|
||||
CONFIG_CRYPTO_NULL=y
|
||||
|
@ -80,6 +80,6 @@ CONFIG_NLS_ISO8859_1=y
|
||||
CONFIG_DEBUG_KERNEL=y
|
||||
# CONFIG_SCHED_DEBUG is not set
|
||||
# CONFIG_DEBUG_BUGVERBOSE is not set
|
||||
CONFIG_DEBUG_INFO=y
|
||||
CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
|
||||
# CONFIG_FTRACE is not set
|
||||
# CONFIG_CRYPTO_ANSI_CPRNG is not set
|
||||
|
@ -141,7 +141,7 @@ CONFIG_DEBUG_KMEMLEAK=y
|
||||
CONFIG_DEBUG_SPINLOCK=y
|
||||
CONFIG_DEBUG_MUTEXES=y
|
||||
CONFIG_DEBUG_SPINLOCK_SLEEP=y
|
||||
CONFIG_DEBUG_INFO=y
|
||||
CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
|
||||
CONFIG_LATENCYTOP=y
|
||||
# CONFIG_FTRACE is not set
|
||||
CONFIG_CRYPTO_HMAC=y
|
||||
|
@ -138,7 +138,7 @@ CONFIG_PRINTK_TIME=y
|
||||
CONFIG_DEBUG_FS=y
|
||||
CONFIG_DEBUG_KERNEL=y
|
||||
CONFIG_DETECT_HUNG_TASK=y
|
||||
CONFIG_DEBUG_INFO=y
|
||||
CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
|
||||
CONFIG_FRAME_POINTER=y
|
||||
# CONFIG_FTRACE is not set
|
||||
# CONFIG_DUMP_CODE is not set
|
||||
|
@ -127,9 +127,6 @@ struct task_struct;
|
||||
|
||||
extern void start_thread(struct pt_regs *regs, unsigned long new_pc, unsigned long new_sp);
|
||||
|
||||
/* Free all resources held by a thread. */
|
||||
extern void release_thread(struct task_struct *);
|
||||
|
||||
/*
|
||||
* FPU lazy state save handling.
|
||||
*/
|
||||
|
@ -84,11 +84,6 @@ void flush_thread(void)
|
||||
#endif
|
||||
}
|
||||
|
||||
void release_thread(struct task_struct *dead_task)
|
||||
{
|
||||
/* do nothing */
|
||||
}
|
||||
|
||||
asmlinkage void ret_from_fork(void);
|
||||
asmlinkage void ret_from_kernel_thread(void);
|
||||
|
||||
|
@ -80,9 +80,6 @@ static inline void start_thread(struct pt_regs * regs, unsigned long pc,
|
||||
: "memory");
|
||||
}
|
||||
|
||||
/* Free all resources held by a thread. */
|
||||
#define release_thread(tsk) do { } while(0)
|
||||
|
||||
unsigned long __get_wchan(struct task_struct *);
|
||||
|
||||
#define task_pt_regs(tsk) ((tsk)->thread.kregs)
|
||||
|
@ -176,9 +176,6 @@ do { \
|
||||
regs->tstate &= ~TSTATE_PEF; \
|
||||
} while (0)
|
||||
|
||||
/* Free all resources held by a thread. */
|
||||
#define release_thread(tsk) do { } while (0)
|
||||
|
||||
unsigned long __get_wchan(struct task_struct *task);
|
||||
|
||||
#define task_pt_regs(tsk) (task_thread_info(tsk)->kregs)
|
||||
|
@ -69,5 +69,5 @@ CONFIG_JOLIET=y
|
||||
CONFIG_PROC_KCORE=y
|
||||
CONFIG_TMPFS=y
|
||||
CONFIG_NLS=y
|
||||
CONFIG_DEBUG_INFO=y
|
||||
CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
|
||||
CONFIG_DEBUG_KERNEL=y
|
||||
|
@ -67,6 +67,6 @@ CONFIG_JOLIET=y
|
||||
CONFIG_PROC_KCORE=y
|
||||
CONFIG_TMPFS=y
|
||||
CONFIG_NLS=y
|
||||
CONFIG_DEBUG_INFO=y
|
||||
CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
|
||||
CONFIG_FRAME_WARN=1024
|
||||
CONFIG_DEBUG_KERNEL=y
|
||||
|
@ -55,10 +55,6 @@ struct thread_struct {
|
||||
.request = { 0 } \
|
||||
}
|
||||
|
||||
static inline void release_thread(struct task_struct *task)
|
||||
{
|
||||
}
|
||||
|
||||
/*
|
||||
* User space process size: 3GB (default).
|
||||
*/
|
||||
|
@ -587,9 +587,6 @@ static inline void load_sp0(unsigned long sp0)
|
||||
|
||||
#endif /* CONFIG_PARAVIRT_XXL */
|
||||
|
||||
/* Free all resources held by a thread. */
|
||||
extern void release_thread(struct task_struct *);
|
||||
|
||||
unsigned long __get_wchan(struct task_struct *p);
|
||||
|
||||
/*
|
||||
|
@ -120,7 +120,7 @@ CONFIG_NLS_CODEPAGE_437=y
|
||||
CONFIG_NLS_ISO8859_1=y
|
||||
CONFIG_PRINTK_TIME=y
|
||||
CONFIG_DYNAMIC_DEBUG=y
|
||||
CONFIG_DEBUG_INFO=y
|
||||
CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
|
||||
CONFIG_MAGIC_SYSRQ=y
|
||||
CONFIG_LOCKUP_DETECTOR=y
|
||||
# CONFIG_SCHED_DEBUG is not set
|
||||
|
@ -100,7 +100,7 @@ CONFIG_NLS_CODEPAGE_437=y
|
||||
CONFIG_NLS_ISO8859_1=y
|
||||
CONFIG_PRINTK_TIME=y
|
||||
CONFIG_DYNAMIC_DEBUG=y
|
||||
CONFIG_DEBUG_INFO=y
|
||||
CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
|
||||
CONFIG_MAGIC_SYSRQ=y
|
||||
CONFIG_LOCKUP_DETECTOR=y
|
||||
# CONFIG_SCHED_DEBUG is not set
|
||||
|
@ -107,7 +107,7 @@ CONFIG_NLS_CODEPAGE_437=y
|
||||
CONFIG_NLS_ISO8859_1=y
|
||||
CONFIG_PRINTK_TIME=y
|
||||
CONFIG_DYNAMIC_DEBUG=y
|
||||
CONFIG_DEBUG_INFO=y
|
||||
CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
|
||||
CONFIG_MAGIC_SYSRQ=y
|
||||
CONFIG_LOCKUP_DETECTOR=y
|
||||
# CONFIG_SCHED_DEBUG is not set
|
||||
|
@ -105,7 +105,7 @@ CONFIG_NLS_CODEPAGE_437=y
|
||||
CONFIG_NLS_ISO8859_1=y
|
||||
CONFIG_PRINTK_TIME=y
|
||||
CONFIG_DYNAMIC_DEBUG=y
|
||||
CONFIG_DEBUG_INFO=y
|
||||
CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
|
||||
# CONFIG_FRAME_POINTER is not set
|
||||
CONFIG_MAGIC_SYSRQ=y
|
||||
CONFIG_DEBUG_VM=y
|
||||
|
@ -111,7 +111,7 @@ CONFIG_NLS_CODEPAGE_437=y
|
||||
CONFIG_NLS_ISO8859_1=y
|
||||
CONFIG_PRINTK_TIME=y
|
||||
CONFIG_DYNAMIC_DEBUG=y
|
||||
CONFIG_DEBUG_INFO=y
|
||||
CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
|
||||
CONFIG_MAGIC_SYSRQ=y
|
||||
CONFIG_DEBUG_VM=y
|
||||
CONFIG_LOCKUP_DETECTOR=y
|
||||
|
@ -97,7 +97,7 @@ CONFIG_CRYPTO_DEV_VIRTIO=y
|
||||
CONFIG_FONTS=y
|
||||
CONFIG_PRINTK_TIME=y
|
||||
CONFIG_DYNAMIC_DEBUG=y
|
||||
CONFIG_DEBUG_INFO=y
|
||||
CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
|
||||
CONFIG_MAGIC_SYSRQ=y
|
||||
# CONFIG_SCHED_DEBUG is not set
|
||||
CONFIG_SCHEDSTATS=y
|
||||
|
@ -102,7 +102,7 @@ CONFIG_CRYPTO_LZO=y
|
||||
CONFIG_CRYPTO_ANSI_CPRNG=y
|
||||
CONFIG_PRINTK_TIME=y
|
||||
CONFIG_DYNAMIC_DEBUG=y
|
||||
CONFIG_DEBUG_INFO=y
|
||||
CONFIG_DEBUG_INFO_DWARF_TOOLCHAIN_DEFAULT=y
|
||||
CONFIG_MAGIC_SYSRQ=y
|
||||
CONFIG_DETECT_HUNG_TASK=y
|
||||
# CONFIG_SCHED_DEBUG is not set
|
||||
|
@ -224,9 +224,6 @@ struct thread_struct {
|
||||
struct task_struct;
|
||||
struct mm_struct;
|
||||
|
||||
/* Free all resources held by a thread. */
|
||||
#define release_thread(thread) do { } while(0)
|
||||
|
||||
extern unsigned long __get_wchan(struct task_struct *p);
|
||||
|
||||
#define KSTK_EIP(tsk) (task_pt_regs(tsk)->pc)
|
||||
|
9
fs/aio.c
9
fs/aio.c
@ -951,16 +951,13 @@ static bool __get_reqs_available(struct kioctx *ctx)
|
||||
local_irq_save(flags);
|
||||
kcpu = this_cpu_ptr(ctx->cpu);
|
||||
if (!kcpu->reqs_available) {
|
||||
int old, avail = atomic_read(&ctx->reqs_available);
|
||||
int avail = atomic_read(&ctx->reqs_available);
|
||||
|
||||
do {
|
||||
if (avail < ctx->req_batch)
|
||||
goto out;
|
||||
|
||||
old = avail;
|
||||
avail = atomic_cmpxchg(&ctx->reqs_available,
|
||||
avail, avail - ctx->req_batch);
|
||||
} while (avail != old);
|
||||
} while (!atomic_try_cmpxchg(&ctx->reqs_available,
|
||||
&avail, avail - ctx->req_batch));
|
||||
|
||||
kcpu->reqs_available += ctx->req_batch;
|
||||
}
|
||||
|
14
fs/buffer.c
14
fs/buffer.c
@ -1453,19 +1453,15 @@ EXPORT_SYMBOL(set_bh_page);
|
||||
|
||||
static void discard_buffer(struct buffer_head * bh)
|
||||
{
|
||||
unsigned long b_state, b_state_old;
|
||||
unsigned long b_state;
|
||||
|
||||
lock_buffer(bh);
|
||||
clear_buffer_dirty(bh);
|
||||
bh->b_bdev = NULL;
|
||||
b_state = bh->b_state;
|
||||
for (;;) {
|
||||
b_state_old = cmpxchg(&bh->b_state, b_state,
|
||||
(b_state & ~BUFFER_FLAGS_DISCARD));
|
||||
if (b_state_old == b_state)
|
||||
break;
|
||||
b_state = b_state_old;
|
||||
}
|
||||
b_state = READ_ONCE(bh->b_state);
|
||||
do {
|
||||
} while (!try_cmpxchg(&bh->b_state, &b_state,
|
||||
b_state & ~BUFFER_FLAGS_DISCARD));
|
||||
unlock_buffer(bh);
|
||||
}
|
||||
|
||||
|
@ -1065,7 +1065,7 @@ static inline bool list_add_tail_lockless(struct list_head *new,
|
||||
* added to the list from another CPU: the winner observes
|
||||
* new->next == new.
|
||||
*/
|
||||
if (cmpxchg(&new->next, new, head) != new)
|
||||
if (!try_cmpxchg(&new->next, &new, head))
|
||||
return false;
|
||||
|
||||
/*
|
||||
|
@ -21,7 +21,6 @@ void hfs_bnode_read(struct hfs_bnode *node, void *buf, int off, int len)
|
||||
int pagenum;
|
||||
int bytes_read;
|
||||
int bytes_to_read;
|
||||
void *vaddr;
|
||||
|
||||
off += node->page_offset;
|
||||
pagenum = off >> PAGE_SHIFT;
|
||||
@ -33,9 +32,7 @@ void hfs_bnode_read(struct hfs_bnode *node, void *buf, int off, int len)
|
||||
page = node->page[pagenum];
|
||||
bytes_to_read = min_t(int, len - bytes_read, PAGE_SIZE - off);
|
||||
|
||||
vaddr = kmap_atomic(page);
|
||||
memcpy(buf + bytes_read, vaddr + off, bytes_to_read);
|
||||
kunmap_atomic(vaddr);
|
||||
memcpy_from_page(buf + bytes_read, page, off, bytes_to_read);
|
||||
|
||||
pagenum++;
|
||||
off = 0; /* page offset only applies to the first page */
|
||||
@ -80,8 +77,7 @@ void hfs_bnode_write(struct hfs_bnode *node, void *buf, int off, int len)
|
||||
off += node->page_offset;
|
||||
page = node->page[0];
|
||||
|
||||
memcpy(kmap(page) + off, buf, len);
|
||||
kunmap(page);
|
||||
memcpy_to_page(page, off, buf, len);
|
||||
set_page_dirty(page);
|
||||
}
|
||||
|
||||
@ -105,8 +101,7 @@ void hfs_bnode_clear(struct hfs_bnode *node, int off, int len)
|
||||
off += node->page_offset;
|
||||
page = node->page[0];
|
||||
|
||||
memset(kmap(page) + off, 0, len);
|
||||
kunmap(page);
|
||||
memzero_page(page, off, len);
|
||||
set_page_dirty(page);
|
||||
}
|
||||
|
||||
@ -123,9 +118,7 @@ void hfs_bnode_copy(struct hfs_bnode *dst_node, int dst,
|
||||
src_page = src_node->page[0];
|
||||
dst_page = dst_node->page[0];
|
||||
|
||||
memcpy(kmap(dst_page) + dst, kmap(src_page) + src, len);
|
||||
kunmap(src_page);
|
||||
kunmap(dst_page);
|
||||
memcpy_page(dst_page, dst, src_page, src, len);
|
||||
set_page_dirty(dst_page);
|
||||
}
|
||||
|
||||
@ -140,9 +133,9 @@ void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len)
|
||||
src += node->page_offset;
|
||||
dst += node->page_offset;
|
||||
page = node->page[0];
|
||||
ptr = kmap(page);
|
||||
ptr = kmap_local_page(page);
|
||||
memmove(ptr + dst, ptr + src, len);
|
||||
kunmap(page);
|
||||
kunmap_local(ptr);
|
||||
set_page_dirty(page);
|
||||
}
|
||||
|
||||
@ -346,13 +339,14 @@ struct hfs_bnode *hfs_bnode_find(struct hfs_btree *tree, u32 num)
|
||||
if (!test_bit(HFS_BNODE_NEW, &node->flags))
|
||||
return node;
|
||||
|
||||
desc = (struct hfs_bnode_desc *)(kmap(node->page[0]) + node->page_offset);
|
||||
desc = (struct hfs_bnode_desc *)(kmap_local_page(node->page[0]) +
|
||||
node->page_offset);
|
||||
node->prev = be32_to_cpu(desc->prev);
|
||||
node->next = be32_to_cpu(desc->next);
|
||||
node->num_recs = be16_to_cpu(desc->num_recs);
|
||||
node->type = desc->type;
|
||||
node->height = desc->height;
|
||||
kunmap(node->page[0]);
|
||||
kunmap_local(desc);
|
||||
|
||||
switch (node->type) {
|
||||
case HFS_NODE_HEADER:
|
||||
@ -436,14 +430,12 @@ struct hfs_bnode *hfs_bnode_create(struct hfs_btree *tree, u32 num)
|
||||
}
|
||||
|
||||
pagep = node->page;
|
||||
memset(kmap(*pagep) + node->page_offset, 0,
|
||||
min((int)PAGE_SIZE, (int)tree->node_size));
|
||||
memzero_page(*pagep, node->page_offset,
|
||||
min((int)PAGE_SIZE, (int)tree->node_size));
|
||||
set_page_dirty(*pagep);
|
||||
kunmap(*pagep);
|
||||
for (i = 1; i < tree->pages_per_bnode; i++) {
|
||||
memset(kmap(*++pagep), 0, PAGE_SIZE);
|
||||
memzero_page(*++pagep, 0, PAGE_SIZE);
|
||||
set_page_dirty(*pagep);
|
||||
kunmap(*pagep);
|
||||
}
|
||||
clear_bit(HFS_BNODE_NEW, &node->flags);
|
||||
wake_up(&node->lock_wq);
|
||||
|
@ -80,7 +80,8 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id, btree_keycmp ke
|
||||
goto free_inode;
|
||||
|
||||
/* Load the header */
|
||||
head = (struct hfs_btree_header_rec *)(kmap(page) + sizeof(struct hfs_bnode_desc));
|
||||
head = (struct hfs_btree_header_rec *)(kmap_local_page(page) +
|
||||
sizeof(struct hfs_bnode_desc));
|
||||
tree->root = be32_to_cpu(head->root);
|
||||
tree->leaf_count = be32_to_cpu(head->leaf_count);
|
||||
tree->leaf_head = be32_to_cpu(head->leaf_head);
|
||||
@ -119,11 +120,12 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id, btree_keycmp ke
|
||||
tree->node_size_shift = ffs(size) - 1;
|
||||
tree->pages_per_bnode = (tree->node_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
||||
|
||||
kunmap(page);
|
||||
kunmap_local(head);
|
||||
put_page(page);
|
||||
return tree;
|
||||
|
||||
fail_page:
|
||||
kunmap_local(head);
|
||||
put_page(page);
|
||||
free_inode:
|
||||
tree->inode->i_mapping->a_ops = &hfs_aops;
|
||||
@ -169,7 +171,8 @@ void hfs_btree_write(struct hfs_btree *tree)
|
||||
return;
|
||||
/* Load the header */
|
||||
page = node->page[0];
|
||||
head = (struct hfs_btree_header_rec *)(kmap(page) + sizeof(struct hfs_bnode_desc));
|
||||
head = (struct hfs_btree_header_rec *)(kmap_local_page(page) +
|
||||
sizeof(struct hfs_bnode_desc));
|
||||
|
||||
head->root = cpu_to_be32(tree->root);
|
||||
head->leaf_count = cpu_to_be32(tree->leaf_count);
|
||||
@ -180,7 +183,7 @@ void hfs_btree_write(struct hfs_btree *tree)
|
||||
head->attributes = cpu_to_be32(tree->attributes);
|
||||
head->depth = cpu_to_be16(tree->depth);
|
||||
|
||||
kunmap(page);
|
||||
kunmap_local(head);
|
||||
set_page_dirty(page);
|
||||
hfs_bnode_put(node);
|
||||
}
|
||||
@ -268,7 +271,7 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
|
||||
|
||||
off += node->page_offset;
|
||||
pagep = node->page + (off >> PAGE_SHIFT);
|
||||
data = kmap(*pagep);
|
||||
data = kmap_local_page(*pagep);
|
||||
off &= ~PAGE_MASK;
|
||||
idx = 0;
|
||||
|
||||
@ -281,7 +284,7 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
|
||||
idx += i;
|
||||
data[off] |= m;
|
||||
set_page_dirty(*pagep);
|
||||
kunmap(*pagep);
|
||||
kunmap_local(data);
|
||||
tree->free_nodes--;
|
||||
mark_inode_dirty(tree->inode);
|
||||
hfs_bnode_put(node);
|
||||
@ -290,14 +293,14 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
|
||||
}
|
||||
}
|
||||
if (++off >= PAGE_SIZE) {
|
||||
kunmap(*pagep);
|
||||
data = kmap(*++pagep);
|
||||
kunmap_local(data);
|
||||
data = kmap_local_page(*++pagep);
|
||||
off = 0;
|
||||
}
|
||||
idx += 8;
|
||||
len--;
|
||||
}
|
||||
kunmap(*pagep);
|
||||
kunmap_local(data);
|
||||
nidx = node->next;
|
||||
if (!nidx) {
|
||||
printk(KERN_DEBUG "create new bmap node...\n");
|
||||
@ -313,7 +316,7 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
|
||||
off = off16;
|
||||
off += node->page_offset;
|
||||
pagep = node->page + (off >> PAGE_SHIFT);
|
||||
data = kmap(*pagep);
|
||||
data = kmap_local_page(*pagep);
|
||||
off &= ~PAGE_MASK;
|
||||
}
|
||||
}
|
||||
@ -360,20 +363,20 @@ void hfs_bmap_free(struct hfs_bnode *node)
|
||||
}
|
||||
off += node->page_offset + nidx / 8;
|
||||
page = node->page[off >> PAGE_SHIFT];
|
||||
data = kmap(page);
|
||||
data = kmap_local_page(page);
|
||||
off &= ~PAGE_MASK;
|
||||
m = 1 << (~nidx & 7);
|
||||
byte = data[off];
|
||||
if (!(byte & m)) {
|
||||
pr_crit("trying to free free bnode %u(%d)\n",
|
||||
node->this, node->type);
|
||||
kunmap(page);
|
||||
kunmap_local(data);
|
||||
hfs_bnode_put(node);
|
||||
return;
|
||||
}
|
||||
data[off] = byte & ~m;
|
||||
set_page_dirty(page);
|
||||
kunmap(page);
|
||||
kunmap_local(data);
|
||||
hfs_bnode_put(node);
|
||||
tree->free_nodes++;
|
||||
mark_inode_dirty(tree->inode);
|
||||
|
@ -39,7 +39,7 @@ int hfsplus_block_allocate(struct super_block *sb, u32 size,
|
||||
start = size;
|
||||
goto out;
|
||||
}
|
||||
pptr = kmap(page);
|
||||
pptr = kmap_local_page(page);
|
||||
curr = pptr + (offset & (PAGE_CACHE_BITS - 1)) / 32;
|
||||
i = offset % 32;
|
||||
offset &= ~(PAGE_CACHE_BITS - 1);
|
||||
@ -74,7 +74,7 @@ int hfsplus_block_allocate(struct super_block *sb, u32 size,
|
||||
}
|
||||
curr++;
|
||||
}
|
||||
kunmap(page);
|
||||
kunmap_local(pptr);
|
||||
offset += PAGE_CACHE_BITS;
|
||||
if (offset >= size)
|
||||
break;
|
||||
@ -84,7 +84,7 @@ int hfsplus_block_allocate(struct super_block *sb, u32 size,
|
||||
start = size;
|
||||
goto out;
|
||||
}
|
||||
curr = pptr = kmap(page);
|
||||
curr = pptr = kmap_local_page(page);
|
||||
if ((size ^ offset) / PAGE_CACHE_BITS)
|
||||
end = pptr + PAGE_CACHE_BITS / 32;
|
||||
else
|
||||
@ -127,7 +127,7 @@ int hfsplus_block_allocate(struct super_block *sb, u32 size,
|
||||
len -= 32;
|
||||
}
|
||||
set_page_dirty(page);
|
||||
kunmap(page);
|
||||
kunmap_local(pptr);
|
||||
offset += PAGE_CACHE_BITS;
|
||||
page = read_mapping_page(mapping, offset / PAGE_CACHE_BITS,
|
||||
NULL);
|
||||
@ -135,7 +135,7 @@ int hfsplus_block_allocate(struct super_block *sb, u32 size,
|
||||
start = size;
|
||||
goto out;
|
||||
}
|
||||
pptr = kmap(page);
|
||||
pptr = kmap_local_page(page);
|
||||
curr = pptr;
|
||||
end = pptr + PAGE_CACHE_BITS / 32;
|
||||
}
|
||||
@ -151,7 +151,7 @@ int hfsplus_block_allocate(struct super_block *sb, u32 size,
|
||||
done:
|
||||
*curr = cpu_to_be32(n);
|
||||
set_page_dirty(page);
|
||||
kunmap(page);
|
||||
kunmap_local(pptr);
|
||||
*max = offset + (curr - pptr) * 32 + i - start;
|
||||
sbi->free_blocks -= *max;
|
||||
hfsplus_mark_mdb_dirty(sb);
|
||||
@ -185,7 +185,7 @@ int hfsplus_block_free(struct super_block *sb, u32 offset, u32 count)
|
||||
page = read_mapping_page(mapping, pnr, NULL);
|
||||
if (IS_ERR(page))
|
||||
goto kaboom;
|
||||
pptr = kmap(page);
|
||||
pptr = kmap_local_page(page);
|
||||
curr = pptr + (offset & (PAGE_CACHE_BITS - 1)) / 32;
|
||||
end = pptr + PAGE_CACHE_BITS / 32;
|
||||
len = count;
|
||||
@ -215,11 +215,11 @@ int hfsplus_block_free(struct super_block *sb, u32 offset, u32 count)
|
||||
if (!count)
|
||||
break;
|
||||
set_page_dirty(page);
|
||||
kunmap(page);
|
||||
kunmap_local(pptr);
|
||||
page = read_mapping_page(mapping, ++pnr, NULL);
|
||||
if (IS_ERR(page))
|
||||
goto kaboom;
|
||||
pptr = kmap(page);
|
||||
pptr = kmap_local_page(page);
|
||||
curr = pptr;
|
||||
end = pptr + PAGE_CACHE_BITS / 32;
|
||||
}
|
||||
@ -231,7 +231,7 @@ int hfsplus_block_free(struct super_block *sb, u32 offset, u32 count)
|
||||
}
|
||||
out:
|
||||
set_page_dirty(page);
|
||||
kunmap(page);
|
||||
kunmap_local(pptr);
|
||||
sbi->free_blocks += len;
|
||||
hfsplus_mark_mdb_dirty(sb);
|
||||
mutex_unlock(&sbi->alloc_mutex);
|
||||
|
@ -29,14 +29,12 @@ void hfs_bnode_read(struct hfs_bnode *node, void *buf, int off, int len)
|
||||
off &= ~PAGE_MASK;
|
||||
|
||||
l = min_t(int, len, PAGE_SIZE - off);
|
||||
memcpy(buf, kmap(*pagep) + off, l);
|
||||
kunmap(*pagep);
|
||||
memcpy_from_page(buf, *pagep, off, l);
|
||||
|
||||
while ((len -= l) != 0) {
|
||||
buf += l;
|
||||
l = min_t(int, len, PAGE_SIZE);
|
||||
memcpy(buf, kmap(*++pagep), l);
|
||||
kunmap(*pagep);
|
||||
memcpy_from_page(buf, *++pagep, 0, l);
|
||||
}
|
||||
}
|
||||
|
||||
@ -82,16 +80,14 @@ void hfs_bnode_write(struct hfs_bnode *node, void *buf, int off, int len)
|
||||
off &= ~PAGE_MASK;
|
||||
|
||||
l = min_t(int, len, PAGE_SIZE - off);
|
||||
memcpy(kmap(*pagep) + off, buf, l);
|
||||
memcpy_to_page(*pagep, off, buf, l);
|
||||
set_page_dirty(*pagep);
|
||||
kunmap(*pagep);
|
||||
|
||||
while ((len -= l) != 0) {
|
||||
buf += l;
|
||||
l = min_t(int, len, PAGE_SIZE);
|
||||
memcpy(kmap(*++pagep), buf, l);
|
||||
memcpy_to_page(*++pagep, 0, buf, l);
|
||||
set_page_dirty(*pagep);
|
||||
kunmap(*pagep);
|
||||
}
|
||||
}
|
||||
|
||||
@ -112,15 +108,13 @@ void hfs_bnode_clear(struct hfs_bnode *node, int off, int len)
|
||||
off &= ~PAGE_MASK;
|
||||
|
||||
l = min_t(int, len, PAGE_SIZE - off);
|
||||
memset(kmap(*pagep) + off, 0, l);
|
||||
memzero_page(*pagep, off, l);
|
||||
set_page_dirty(*pagep);
|
||||
kunmap(*pagep);
|
||||
|
||||
while ((len -= l) != 0) {
|
||||
l = min_t(int, len, PAGE_SIZE);
|
||||
memset(kmap(*++pagep), 0, l);
|
||||
memzero_page(*++pagep, 0, l);
|
||||
set_page_dirty(*pagep);
|
||||
kunmap(*pagep);
|
||||
}
|
||||
}
|
||||
|
||||
@ -142,24 +136,20 @@ void hfs_bnode_copy(struct hfs_bnode *dst_node, int dst,
|
||||
|
||||
if (src == dst) {
|
||||
l = min_t(int, len, PAGE_SIZE - src);
|
||||
memcpy(kmap(*dst_page) + src, kmap(*src_page) + src, l);
|
||||
kunmap(*src_page);
|
||||
memcpy_page(*dst_page, src, *src_page, src, l);
|
||||
set_page_dirty(*dst_page);
|
||||
kunmap(*dst_page);
|
||||
|
||||
while ((len -= l) != 0) {
|
||||
l = min_t(int, len, PAGE_SIZE);
|
||||
memcpy(kmap(*++dst_page), kmap(*++src_page), l);
|
||||
kunmap(*src_page);
|
||||
memcpy_page(*++dst_page, 0, *++src_page, 0, l);
|
||||
set_page_dirty(*dst_page);
|
||||
kunmap(*dst_page);
|
||||
}
|
||||
} else {
|
||||
void *src_ptr, *dst_ptr;
|
||||
|
||||
do {
|
||||
src_ptr = kmap(*src_page) + src;
|
||||
dst_ptr = kmap(*dst_page) + dst;
|
||||
dst_ptr = kmap_local_page(*dst_page) + dst;
|
||||
src_ptr = kmap_local_page(*src_page) + src;
|
||||
if (PAGE_SIZE - src < PAGE_SIZE - dst) {
|
||||
l = PAGE_SIZE - src;
|
||||
src = 0;
|
||||
@ -171,9 +161,9 @@ void hfs_bnode_copy(struct hfs_bnode *dst_node, int dst,
|
||||
}
|
||||
l = min(len, l);
|
||||
memcpy(dst_ptr, src_ptr, l);
|
||||
kunmap(*src_page);
|
||||
kunmap_local(src_ptr);
|
||||
set_page_dirty(*dst_page);
|
||||
kunmap(*dst_page);
|
||||
kunmap_local(dst_ptr);
|
||||
if (!dst)
|
||||
dst_page++;
|
||||
else
|
||||
@ -185,6 +175,7 @@ void hfs_bnode_copy(struct hfs_bnode *dst_node, int dst,
|
||||
void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len)
|
||||
{
|
||||
struct page **src_page, **dst_page;
|
||||
void *src_ptr, *dst_ptr;
|
||||
int l;
|
||||
|
||||
hfs_dbg(BNODE_MOD, "movebytes: %u,%u,%u\n", dst, src, len);
|
||||
@ -202,27 +193,28 @@ void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len)
|
||||
|
||||
if (src == dst) {
|
||||
while (src < len) {
|
||||
memmove(kmap(*dst_page), kmap(*src_page), src);
|
||||
kunmap(*src_page);
|
||||
dst_ptr = kmap_local_page(*dst_page);
|
||||
src_ptr = kmap_local_page(*src_page);
|
||||
memmove(dst_ptr, src_ptr, src);
|
||||
kunmap_local(src_ptr);
|
||||
set_page_dirty(*dst_page);
|
||||
kunmap(*dst_page);
|
||||
kunmap_local(dst_ptr);
|
||||
len -= src;
|
||||
src = PAGE_SIZE;
|
||||
src_page--;
|
||||
dst_page--;
|
||||
}
|
||||
src -= len;
|
||||
memmove(kmap(*dst_page) + src,
|
||||
kmap(*src_page) + src, len);
|
||||
kunmap(*src_page);
|
||||
dst_ptr = kmap_local_page(*dst_page);
|
||||
src_ptr = kmap_local_page(*src_page);
|
||||
memmove(dst_ptr + src, src_ptr + src, len);
|
||||
kunmap_local(src_ptr);
|
||||
set_page_dirty(*dst_page);
|
||||
kunmap(*dst_page);
|
||||
kunmap_local(dst_ptr);
|
||||
} else {
|
||||
void *src_ptr, *dst_ptr;
|
||||
|
||||
do {
|
||||
src_ptr = kmap(*src_page) + src;
|
||||
dst_ptr = kmap(*dst_page) + dst;
|
||||
dst_ptr = kmap_local_page(*dst_page) + dst;
|
||||
src_ptr = kmap_local_page(*src_page) + src;
|
||||
if (src < dst) {
|
||||
l = src;
|
||||
src = PAGE_SIZE;
|
||||
@ -234,9 +226,9 @@ void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len)
|
||||
}
|
||||
l = min(len, l);
|
||||
memmove(dst_ptr - l, src_ptr - l, l);
|
||||
kunmap(*src_page);
|
||||
kunmap_local(src_ptr);
|
||||
set_page_dirty(*dst_page);
|
||||
kunmap(*dst_page);
|
||||
kunmap_local(dst_ptr);
|
||||
if (dst == PAGE_SIZE)
|
||||
dst_page--;
|
||||
else
|
||||
@ -251,26 +243,27 @@ void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len)
|
||||
|
||||
if (src == dst) {
|
||||
l = min_t(int, len, PAGE_SIZE - src);
|
||||
memmove(kmap(*dst_page) + src,
|
||||
kmap(*src_page) + src, l);
|
||||
kunmap(*src_page);
|
||||
|
||||
dst_ptr = kmap_local_page(*dst_page) + src;
|
||||
src_ptr = kmap_local_page(*src_page) + src;
|
||||
memmove(dst_ptr, src_ptr, l);
|
||||
kunmap_local(src_ptr);
|
||||
set_page_dirty(*dst_page);
|
||||
kunmap(*dst_page);
|
||||
kunmap_local(dst_ptr);
|
||||
|
||||
while ((len -= l) != 0) {
|
||||
l = min_t(int, len, PAGE_SIZE);
|
||||
memmove(kmap(*++dst_page),
|
||||
kmap(*++src_page), l);
|
||||
kunmap(*src_page);
|
||||
dst_ptr = kmap_local_page(*++dst_page);
|
||||
src_ptr = kmap_local_page(*++src_page);
|
||||
memmove(dst_ptr, src_ptr, l);
|
||||
kunmap_local(src_ptr);
|
||||
set_page_dirty(*dst_page);
|
||||
kunmap(*dst_page);
|
||||
kunmap_local(dst_ptr);
|
||||
}
|
||||
} else {
|
||||
void *src_ptr, *dst_ptr;
|
||||
|
||||
do {
|
||||
src_ptr = kmap(*src_page) + src;
|
||||
dst_ptr = kmap(*dst_page) + dst;
|
||||
dst_ptr = kmap_local_page(*dst_page) + dst;
|
||||
src_ptr = kmap_local_page(*src_page) + src;
|
||||
if (PAGE_SIZE - src <
|
||||
PAGE_SIZE - dst) {
|
||||
l = PAGE_SIZE - src;
|
||||
@ -283,9 +276,9 @@ void hfs_bnode_move(struct hfs_bnode *node, int dst, int src, int len)
|
||||
}
|
||||
l = min(len, l);
|
||||
memmove(dst_ptr, src_ptr, l);
|
||||
kunmap(*src_page);
|
||||
kunmap_local(src_ptr);
|
||||
set_page_dirty(*dst_page);
|
||||
kunmap(*dst_page);
|
||||
kunmap_local(dst_ptr);
|
||||
if (!dst)
|
||||
dst_page++;
|
||||
else
|
||||
@ -498,14 +491,14 @@ struct hfs_bnode *hfs_bnode_find(struct hfs_btree *tree, u32 num)
|
||||
if (!test_bit(HFS_BNODE_NEW, &node->flags))
|
||||
return node;
|
||||
|
||||
desc = (struct hfs_bnode_desc *)(kmap(node->page[0]) +
|
||||
node->page_offset);
|
||||
desc = (struct hfs_bnode_desc *)(kmap_local_page(node->page[0]) +
|
||||
node->page_offset);
|
||||
node->prev = be32_to_cpu(desc->prev);
|
||||
node->next = be32_to_cpu(desc->next);
|
||||
node->num_recs = be16_to_cpu(desc->num_recs);
|
||||
node->type = desc->type;
|
||||
node->height = desc->height;
|
||||
kunmap(node->page[0]);
|
||||
kunmap_local(desc);
|
||||
|
||||
switch (node->type) {
|
||||
case HFS_NODE_HEADER:
|
||||
@ -589,14 +582,12 @@ struct hfs_bnode *hfs_bnode_create(struct hfs_btree *tree, u32 num)
|
||||
}
|
||||
|
||||
pagep = node->page;
|
||||
memset(kmap(*pagep) + node->page_offset, 0,
|
||||
min_t(int, PAGE_SIZE, tree->node_size));
|
||||
memzero_page(*pagep, node->page_offset,
|
||||
min_t(int, PAGE_SIZE, tree->node_size));
|
||||
set_page_dirty(*pagep);
|
||||
kunmap(*pagep);
|
||||
for (i = 1; i < tree->pages_per_bnode; i++) {
|
||||
memset(kmap(*++pagep), 0, PAGE_SIZE);
|
||||
memzero_page(*++pagep, 0, PAGE_SIZE);
|
||||
set_page_dirty(*pagep);
|
||||
kunmap(*pagep);
|
||||
}
|
||||
clear_bit(HFS_BNODE_NEW, &node->flags);
|
||||
wake_up(&node->lock_wq);
|
||||
|
@ -163,7 +163,7 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id)
|
||||
goto free_inode;
|
||||
|
||||
/* Load the header */
|
||||
head = (struct hfs_btree_header_rec *)(kmap(page) +
|
||||
head = (struct hfs_btree_header_rec *)(kmap_local_page(page) +
|
||||
sizeof(struct hfs_bnode_desc));
|
||||
tree->root = be32_to_cpu(head->root);
|
||||
tree->leaf_count = be32_to_cpu(head->leaf_count);
|
||||
@ -240,11 +240,12 @@ struct hfs_btree *hfs_btree_open(struct super_block *sb, u32 id)
|
||||
(tree->node_size + PAGE_SIZE - 1) >>
|
||||
PAGE_SHIFT;
|
||||
|
||||
kunmap(page);
|
||||
kunmap_local(head);
|
||||
put_page(page);
|
||||
return tree;
|
||||
|
||||
fail_page:
|
||||
kunmap_local(head);
|
||||
put_page(page);
|
||||
free_inode:
|
||||
tree->inode->i_mapping->a_ops = &hfsplus_aops;
|
||||
@ -291,7 +292,7 @@ int hfs_btree_write(struct hfs_btree *tree)
|
||||
return -EIO;
|
||||
/* Load the header */
|
||||
page = node->page[0];
|
||||
head = (struct hfs_btree_header_rec *)(kmap(page) +
|
||||
head = (struct hfs_btree_header_rec *)(kmap_local_page(page) +
|
||||
sizeof(struct hfs_bnode_desc));
|
||||
|
||||
head->root = cpu_to_be32(tree->root);
|
||||
@ -303,7 +304,7 @@ int hfs_btree_write(struct hfs_btree *tree)
|
||||
head->attributes = cpu_to_be32(tree->attributes);
|
||||
head->depth = cpu_to_be16(tree->depth);
|
||||
|
||||
kunmap(page);
|
||||
kunmap_local(head);
|
||||
set_page_dirty(page);
|
||||
hfs_bnode_put(node);
|
||||
return 0;
|
||||
@ -394,7 +395,7 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
|
||||
|
||||
off += node->page_offset;
|
||||
pagep = node->page + (off >> PAGE_SHIFT);
|
||||
data = kmap(*pagep);
|
||||
data = kmap_local_page(*pagep);
|
||||
off &= ~PAGE_MASK;
|
||||
idx = 0;
|
||||
|
||||
@ -407,7 +408,7 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
|
||||
idx += i;
|
||||
data[off] |= m;
|
||||
set_page_dirty(*pagep);
|
||||
kunmap(*pagep);
|
||||
kunmap_local(data);
|
||||
tree->free_nodes--;
|
||||
mark_inode_dirty(tree->inode);
|
||||
hfs_bnode_put(node);
|
||||
@ -417,14 +418,14 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
|
||||
}
|
||||
}
|
||||
if (++off >= PAGE_SIZE) {
|
||||
kunmap(*pagep);
|
||||
data = kmap(*++pagep);
|
||||
kunmap_local(data);
|
||||
data = kmap_local_page(*++pagep);
|
||||
off = 0;
|
||||
}
|
||||
idx += 8;
|
||||
len--;
|
||||
}
|
||||
kunmap(*pagep);
|
||||
kunmap_local(data);
|
||||
nidx = node->next;
|
||||
if (!nidx) {
|
||||
hfs_dbg(BNODE_MOD, "create new bmap node\n");
|
||||
@ -440,7 +441,7 @@ struct hfs_bnode *hfs_bmap_alloc(struct hfs_btree *tree)
|
||||
off = off16;
|
||||
off += node->page_offset;
|
||||
pagep = node->page + (off >> PAGE_SHIFT);
|
||||
data = kmap(*pagep);
|
||||
data = kmap_local_page(*pagep);
|
||||
off &= ~PAGE_MASK;
|
||||
}
|
||||
}
|
||||
@ -490,7 +491,7 @@ void hfs_bmap_free(struct hfs_bnode *node)
|
||||
}
|
||||
off += node->page_offset + nidx / 8;
|
||||
page = node->page[off >> PAGE_SHIFT];
|
||||
data = kmap(page);
|
||||
data = kmap_local_page(page);
|
||||
off &= ~PAGE_MASK;
|
||||
m = 1 << (~nidx & 7);
|
||||
byte = data[off];
|
||||
@ -498,13 +499,13 @@ void hfs_bmap_free(struct hfs_bnode *node)
|
||||
pr_crit("trying to free free bnode "
|
||||
"%u(%d)\n",
|
||||
node->this, node->type);
|
||||
kunmap(page);
|
||||
kunmap_local(data);
|
||||
hfs_bnode_put(node);
|
||||
return;
|
||||
}
|
||||
data[off] = byte & ~m;
|
||||
set_page_dirty(page);
|
||||
kunmap(page);
|
||||
kunmap_local(data);
|
||||
hfs_bnode_put(node);
|
||||
tree->free_nodes++;
|
||||
mark_inode_dirty(tree->inode);
|
||||
|
@ -67,8 +67,7 @@ static loff_t zisofs_uncompress_block(struct inode *inode, loff_t block_start,
|
||||
for ( i = 0 ; i < pcount ; i++ ) {
|
||||
if (!pages[i])
|
||||
continue;
|
||||
memset(page_address(pages[i]), 0, PAGE_SIZE);
|
||||
flush_dcache_page(pages[i]);
|
||||
memzero_page(pages[i], 0, PAGE_SIZE);
|
||||
SetPageUptodate(pages[i]);
|
||||
}
|
||||
return ((loff_t)pcount) << PAGE_SHIFT;
|
||||
@ -120,7 +119,7 @@ static loff_t zisofs_uncompress_block(struct inode *inode, loff_t block_start,
|
||||
zerr != Z_STREAM_END) {
|
||||
if (!stream.avail_out) {
|
||||
if (pages[curpage]) {
|
||||
stream.next_out = page_address(pages[curpage])
|
||||
stream.next_out = kmap_local_page(pages[curpage])
|
||||
+ poffset;
|
||||
stream.avail_out = PAGE_SIZE - poffset;
|
||||
poffset = 0;
|
||||
@ -176,6 +175,10 @@ static loff_t zisofs_uncompress_block(struct inode *inode, loff_t block_start,
|
||||
flush_dcache_page(pages[curpage]);
|
||||
SetPageUptodate(pages[curpage]);
|
||||
}
|
||||
if (stream.next_out != (unsigned char *)zisofs_sink_page) {
|
||||
kunmap_local(stream.next_out);
|
||||
stream.next_out = NULL;
|
||||
}
|
||||
curpage++;
|
||||
}
|
||||
if (!stream.avail_in)
|
||||
@ -183,6 +186,8 @@ static loff_t zisofs_uncompress_block(struct inode *inode, loff_t block_start,
|
||||
}
|
||||
inflate_out:
|
||||
zlib_inflateEnd(&stream);
|
||||
if (stream.next_out && stream.next_out != (unsigned char *)zisofs_sink_page)
|
||||
kunmap_local(stream.next_out);
|
||||
|
||||
z_eio:
|
||||
mutex_unlock(&zisofs_zlib_lock);
|
||||
@ -283,9 +288,7 @@ static int zisofs_fill_pages(struct inode *inode, int full_page, int pcount,
|
||||
}
|
||||
|
||||
if (poffset && *pages) {
|
||||
memset(page_address(*pages) + poffset, 0,
|
||||
PAGE_SIZE - poffset);
|
||||
flush_dcache_page(*pages);
|
||||
memzero_page(*pages, poffset, PAGE_SIZE - poffset);
|
||||
SetPageUptodate(*pages);
|
||||
}
|
||||
return 0;
|
||||
@ -343,10 +346,8 @@ static int zisofs_read_folio(struct file *file, struct folio *folio)
|
||||
for (i = 0; i < pcount; i++, index++) {
|
||||
if (i != full_page)
|
||||
pages[i] = grab_cache_page_nowait(mapping, index);
|
||||
if (pages[i]) {
|
||||
if (pages[i])
|
||||
ClearPageError(pages[i]);
|
||||
kmap(pages[i]);
|
||||
}
|
||||
}
|
||||
|
||||
err = zisofs_fill_pages(inode, full_page, pcount, pages);
|
||||
@ -357,7 +358,6 @@ static int zisofs_read_folio(struct file *file, struct folio *folio)
|
||||
flush_dcache_page(pages[i]);
|
||||
if (i == full_page && err)
|
||||
SetPageError(pages[i]);
|
||||
kunmap(pages[i]);
|
||||
unlock_page(pages[i]);
|
||||
if (i != full_page)
|
||||
put_page(pages[i]);
|
||||
|
46
fs/libfs.c
46
fs/libfs.c
@ -15,6 +15,7 @@
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/namei.h>
|
||||
#include <linux/exportfs.h>
|
||||
#include <linux/iversion.h>
|
||||
#include <linux/writeback.h>
|
||||
#include <linux/buffer_head.h> /* sync_mapping_buffers */
|
||||
#include <linux/fs_context.h>
|
||||
@ -1520,3 +1521,48 @@ void generic_set_encrypted_ci_d_ops(struct dentry *dentry)
|
||||
#endif
|
||||
}
|
||||
EXPORT_SYMBOL(generic_set_encrypted_ci_d_ops);
|
||||
|
||||
/**
|
||||
* inode_maybe_inc_iversion - increments i_version
|
||||
* @inode: inode with the i_version that should be updated
|
||||
* @force: increment the counter even if it's not necessary?
|
||||
*
|
||||
* Every time the inode is modified, the i_version field must be seen to have
|
||||
* changed by any observer.
|
||||
*
|
||||
* If "force" is set or the QUERIED flag is set, then ensure that we increment
|
||||
* the value, and clear the queried flag.
|
||||
*
|
||||
* In the common case where neither is set, then we can return "false" without
|
||||
* updating i_version.
|
||||
*
|
||||
* If this function returns false, and no other metadata has changed, then we
|
||||
* can avoid logging the metadata.
|
||||
*/
|
||||
bool inode_maybe_inc_iversion(struct inode *inode, bool force)
|
||||
{
|
||||
u64 cur, new;
|
||||
|
||||
/*
|
||||
* The i_version field is not strictly ordered with any other inode
|
||||
* information, but the legacy inode_inc_iversion code used a spinlock
|
||||
* to serialize increments.
|
||||
*
|
||||
* Here, we add full memory barriers to ensure that any de-facto
|
||||
* ordering with other info is preserved.
|
||||
*
|
||||
* This barrier pairs with the barrier in inode_query_iversion()
|
||||
*/
|
||||
smp_mb();
|
||||
cur = inode_peek_iversion_raw(inode);
|
||||
do {
|
||||
/* If flag is clear then we needn't do anything */
|
||||
if (!force && !(cur & I_VERSION_QUERIED))
|
||||
return false;
|
||||
|
||||
/* Since lowest bit is flag, add 2 to avoid it */
|
||||
new = (cur & ~I_VERSION_QUERIED) + I_VERSION_INCREMENT;
|
||||
} while (!atomic64_try_cmpxchg(&inode->i_version, &cur, new));
|
||||
return true;
|
||||
}
|
||||
EXPORT_SYMBOL(inode_maybe_inc_iversion);
|
||||
|
@ -1668,8 +1668,7 @@ static int nilfs_btree_check_delete(struct nilfs_bmap *btree, __u64 key)
|
||||
maxkey = nilfs_btree_node_get_key(node, nchildren - 1);
|
||||
nextmaxkey = (nchildren > 1) ?
|
||||
nilfs_btree_node_get_key(node, nchildren - 2) : 0;
|
||||
if (bh != NULL)
|
||||
brelse(bh);
|
||||
brelse(bh);
|
||||
|
||||
return (maxkey == key) && (nextmaxkey < NILFS_BMAP_LARGE_LOW);
|
||||
}
|
||||
@ -1717,8 +1716,7 @@ static int nilfs_btree_gather_data(struct nilfs_bmap *btree,
|
||||
ptrs[i] = le64_to_cpu(dptrs[i]);
|
||||
}
|
||||
|
||||
if (bh != NULL)
|
||||
brelse(bh);
|
||||
brelse(bh);
|
||||
|
||||
return nitems;
|
||||
}
|
||||
|
@ -875,9 +875,11 @@ static int nilfs_segctor_create_checkpoint(struct nilfs_sc_info *sci)
|
||||
nilfs_mdt_mark_dirty(nilfs->ns_cpfile);
|
||||
nilfs_cpfile_put_checkpoint(
|
||||
nilfs->ns_cpfile, nilfs->ns_cno, bh_cp);
|
||||
} else
|
||||
WARN_ON(err == -EINVAL || err == -ENOENT);
|
||||
|
||||
} else if (err == -EINVAL || err == -ENOENT) {
|
||||
nilfs_error(sci->sc_super,
|
||||
"checkpoint creation failed due to metadata corruption.");
|
||||
err = -EIO;
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -891,7 +893,11 @@ static int nilfs_segctor_fill_in_checkpoint(struct nilfs_sc_info *sci)
|
||||
err = nilfs_cpfile_get_checkpoint(nilfs->ns_cpfile, nilfs->ns_cno, 0,
|
||||
&raw_cp, &bh_cp);
|
||||
if (unlikely(err)) {
|
||||
WARN_ON(err == -EINVAL || err == -ENOENT);
|
||||
if (err == -EINVAL || err == -ENOENT) {
|
||||
nilfs_error(sci->sc_super,
|
||||
"checkpoint finalization failed due to metadata corruption.");
|
||||
err = -EIO;
|
||||
}
|
||||
goto failed_ibh;
|
||||
}
|
||||
raw_cp->cp_snapshot_list.ssl_next = 0;
|
||||
@ -2235,7 +2241,6 @@ int nilfs_construct_segment(struct super_block *sb)
|
||||
struct the_nilfs *nilfs = sb->s_fs_info;
|
||||
struct nilfs_sc_info *sci = nilfs->ns_writer;
|
||||
struct nilfs_transaction_info *ti;
|
||||
int err;
|
||||
|
||||
if (!sci)
|
||||
return -EROFS;
|
||||
@ -2243,8 +2248,7 @@ int nilfs_construct_segment(struct super_block *sb)
|
||||
/* A call inside transactions causes a deadlock. */
|
||||
BUG_ON((ti = current->journal_info) && ti->ti_magic == NILFS_TI_MAGIC);
|
||||
|
||||
err = nilfs_segctor_sync(sci);
|
||||
return err;
|
||||
return nilfs_segctor_sync(sci);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -594,17 +594,37 @@ static int ntfs_attr_find(const ATTR_TYPE type, const ntfschar *name,
|
||||
for (;; a = (ATTR_RECORD*)((u8*)a + le32_to_cpu(a->length))) {
|
||||
u8 *mrec_end = (u8 *)ctx->mrec +
|
||||
le32_to_cpu(ctx->mrec->bytes_allocated);
|
||||
u8 *name_end = (u8 *)a + le16_to_cpu(a->name_offset) +
|
||||
a->name_length * sizeof(ntfschar);
|
||||
if ((u8*)a < (u8*)ctx->mrec || (u8*)a > mrec_end ||
|
||||
name_end > mrec_end)
|
||||
u8 *name_end;
|
||||
|
||||
/* check whether ATTR_RECORD wrap */
|
||||
if ((u8 *)a < (u8 *)ctx->mrec)
|
||||
break;
|
||||
|
||||
/* check whether Attribute Record Header is within bounds */
|
||||
if ((u8 *)a > mrec_end ||
|
||||
(u8 *)a + sizeof(ATTR_RECORD) > mrec_end)
|
||||
break;
|
||||
|
||||
/* check whether ATTR_RECORD's name is within bounds */
|
||||
name_end = (u8 *)a + le16_to_cpu(a->name_offset) +
|
||||
a->name_length * sizeof(ntfschar);
|
||||
if (name_end > mrec_end)
|
||||
break;
|
||||
|
||||
ctx->attr = a;
|
||||
if (unlikely(le32_to_cpu(a->type) > le32_to_cpu(type) ||
|
||||
a->type == AT_END))
|
||||
return -ENOENT;
|
||||
if (unlikely(!a->length))
|
||||
break;
|
||||
|
||||
/* check whether ATTR_RECORD's length wrap */
|
||||
if ((u8 *)a + le32_to_cpu(a->length) < (u8 *)a)
|
||||
break;
|
||||
/* check whether ATTR_RECORD's length is within bounds */
|
||||
if ((u8 *)a + le32_to_cpu(a->length) > mrec_end)
|
||||
break;
|
||||
|
||||
if (a->type != type)
|
||||
continue;
|
||||
/*
|
||||
|
@ -1829,6 +1829,13 @@ int ntfs_read_inode_mount(struct inode *vi)
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
/* Sanity check offset to the first attribute */
|
||||
if (le16_to_cpu(m->attrs_offset) >= le32_to_cpu(m->bytes_allocated)) {
|
||||
ntfs_error(sb, "Incorrect mft offset to the first attribute %u in superblock.",
|
||||
le16_to_cpu(m->attrs_offset));
|
||||
goto err_out;
|
||||
}
|
||||
|
||||
/* Need this to sanity check attribute list references to $MFT. */
|
||||
vi->i_generation = ni->seq_no = le16_to_cpu(m->sequence_number);
|
||||
|
||||
|
@ -527,7 +527,7 @@ struct ocfs2_extent_block
|
||||
* value -1 (0xFFFF) is OCFS2_INVALID_SLOT. This marks a slot empty.
|
||||
*/
|
||||
struct ocfs2_slot_map {
|
||||
/*00*/ __le16 sm_slots[0];
|
||||
/*00*/ DECLARE_FLEX_ARRAY(__le16, sm_slots);
|
||||
/*
|
||||
* Actual on-disk size is one block. OCFS2_MAX_SLOTS is 255,
|
||||
* 255 * sizeof(__le16) == 512B, within the 512B block minimum blocksize.
|
||||
@ -548,7 +548,7 @@ struct ocfs2_extended_slot {
|
||||
* i_size.
|
||||
*/
|
||||
struct ocfs2_slot_map_extended {
|
||||
/*00*/ struct ocfs2_extended_slot se_slots[0];
|
||||
/*00*/ DECLARE_FLEX_ARRAY(struct ocfs2_extended_slot, se_slots);
|
||||
/*
|
||||
* Actual size is i_size of the slot_map system file. It should
|
||||
* match s_max_slots * sizeof(struct ocfs2_extended_slot)
|
||||
@ -727,7 +727,7 @@ struct ocfs2_dinode {
|
||||
struct ocfs2_extent_list i_list;
|
||||
struct ocfs2_truncate_log i_dealloc;
|
||||
struct ocfs2_inline_data i_data;
|
||||
__u8 i_symlink[0];
|
||||
DECLARE_FLEX_ARRAY(__u8, i_symlink);
|
||||
} id2;
|
||||
/* Actual on-disk size is one block */
|
||||
};
|
||||
@ -892,7 +892,7 @@ struct ocfs2_group_desc
|
||||
/*30*/ struct ocfs2_block_check bg_check; /* Error checking */
|
||||
__le64 bg_reserved2;
|
||||
/*40*/ union {
|
||||
__u8 bg_bitmap[0];
|
||||
DECLARE_FLEX_ARRAY(__u8, bg_bitmap);
|
||||
struct {
|
||||
/*
|
||||
* Block groups may be discontiguous when
|
||||
|
@ -2614,7 +2614,7 @@ static inline unsigned int ocfs2_cow_align_length(struct super_block *sb,
|
||||
}
|
||||
|
||||
/*
|
||||
* Calculate out the start and number of virtual clusters we need to to CoW.
|
||||
* Calculate out the start and number of virtual clusters we need to CoW.
|
||||
*
|
||||
* cpos is vitual start cluster position we want to do CoW in a
|
||||
* file and write_len is the cluster length.
|
||||
|
@ -334,10 +334,10 @@ int ocfs2_cluster_connect(const char *stack_name,
|
||||
goto out;
|
||||
}
|
||||
|
||||
strlcpy(new_conn->cc_name, group, GROUP_NAME_MAX + 1);
|
||||
strscpy(new_conn->cc_name, group, GROUP_NAME_MAX + 1);
|
||||
new_conn->cc_namelen = grouplen;
|
||||
if (cluster_name_len)
|
||||
strlcpy(new_conn->cc_cluster_name, cluster_name,
|
||||
strscpy(new_conn->cc_cluster_name, cluster_name,
|
||||
CLUSTER_NAME_MAX + 1);
|
||||
new_conn->cc_cluster_name_len = cluster_name_len;
|
||||
new_conn->cc_recovery_handler = recovery_handler;
|
||||
|
@ -106,7 +106,7 @@ int ocfs2_claim_clusters(handle_t *handle,
|
||||
u32 *cluster_start,
|
||||
u32 *num_clusters);
|
||||
/*
|
||||
* Use this variant of ocfs2_claim_clusters to specify a maxiumum
|
||||
* Use this variant of ocfs2_claim_clusters to specify a maximum
|
||||
* number of clusters smaller than the allocation reserved.
|
||||
*/
|
||||
int __ocfs2_claim_clusters(handle_t *handle,
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user