diff --git a/Documentation/rust/general-information.rst b/Documentation/rust/general-information.rst index e3f388ef4ee4..6146b49b6a98 100644 --- a/Documentation/rust/general-information.rst +++ b/Documentation/rust/general-information.rst @@ -15,6 +15,8 @@ but not `std `_. Crates for use in the kernel must opt into this behavior using the ``#![no_std]`` attribute. +.. _rust_code_documentation: + Code documentation ------------------ @@ -22,10 +24,17 @@ Rust kernel code is documented using ``rustdoc``, its built-in documentation generator. The generated HTML docs include integrated search, linked items (e.g. types, -functions, constants), source code, etc. They may be read at (TODO: link when -in mainline and generated alongside the rest of the documentation): +functions, constants), source code, etc. They may be read at: - http://kernel.org/ + https://rust.docs.kernel.org + +For linux-next, please see: + + https://rust.docs.kernel.org/next/ + +There are also tags for each main release, e.g.: + + https://rust.docs.kernel.org/6.10/ The docs can also be easily generated and read locally. This is quite fast (same order as compiling the code itself) and no special tools or environment @@ -75,7 +84,7 @@ should provide as-safe-as-possible abstractions as needed. .. code-block:: rust/bindings/ - (rust/helpers.c) + (rust/helpers/) include/ -----+ <-+ | | @@ -112,7 +121,7 @@ output files in the ``rust/bindings/`` directory. For parts of the C header that ``bindgen`` does not auto generate, e.g. C ``inline`` functions or non-trivial macros, it is acceptable to add a small -wrapper function to ``rust/helpers.c`` to make it available for the Rust side as +wrapper function to ``rust/helpers/`` to make it available for the Rust side as well. Abstractions @@ -142,3 +151,11 @@ configuration: #[cfg(CONFIG_X="y")] // Enabled as a built-in (`y`) #[cfg(CONFIG_X="m")] // Enabled as a module (`m`) #[cfg(not(CONFIG_X))] // Disabled + +For other predicates that Rust's ``cfg`` does not support, e.g. expressions with +numerical comparisons, one may define a new Kconfig symbol: + +.. code-block:: kconfig + + config RUSTC_VERSION_MIN_107900 + def_bool y if RUSTC_VERSION >= 107900 diff --git a/Documentation/rust/index.rst b/Documentation/rust/index.rst index 46d35bd395cf..55dcde9e9e7e 100644 --- a/Documentation/rust/index.rst +++ b/Documentation/rust/index.rst @@ -25,13 +25,27 @@ support is still in development/experimental, especially for certain kernel configurations. +Code documentation +------------------ + +Given a kernel configuration, the kernel may generate Rust code documentation, +i.e. HTML rendered by the ``rustdoc`` tool. + .. only:: rustdoc and html - You can also browse `rustdoc documentation `_. + This kernel documentation was built with `Rust code documentation + `_. .. only:: not rustdoc and html - This documentation does not include rustdoc generated information. + This kernel documentation was not built with Rust code documentation. + +A pregenerated version is provided at: + + https://rust.docs.kernel.org + +Please see the :ref:`Code documentation ` section for +more details. .. toctree:: :maxdepth: 1 diff --git a/Documentation/rust/quick-start.rst b/Documentation/rust/quick-start.rst index 8e3ad9678719..2d107982c87b 100644 --- a/Documentation/rust/quick-start.rst +++ b/Documentation/rust/quick-start.rst @@ -39,8 +39,8 @@ of the box, e.g.:: Debian ****** -Debian Unstable (Sid), outside of the freeze period, provides recent Rust -releases and thus it should generally work out of the box, e.g.:: +Debian Testing and Debian Unstable (Sid), outside of the freeze period, provide +recent Rust releases and thus they should generally work out of the box, e.g.:: apt install rustc rust-src bindgen rustfmt rust-clippy diff --git a/MAINTAINERS b/MAINTAINERS index 7bfef98226d9..098d13ab6b34 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -20144,6 +20144,7 @@ R: Björn Roy Baron R: Benno Lossin R: Andreas Hindborg R: Alice Ryhl +R: Trevor Gross L: rust-for-linux@vger.kernel.org S: Supported W: https://rust-for-linux.com diff --git a/Makefile b/Makefile index dfc7b0753e50..265dd990a9b6 100644 --- a/Makefile +++ b/Makefile @@ -645,9 +645,11 @@ endif # The expansion should be delayed until arch/$(SRCARCH)/Makefile is included. # Some architectures define CROSS_COMPILE in arch/$(SRCARCH)/Makefile. -# CC_VERSION_TEXT is referenced from Kconfig (so it needs export), -# and from include/config/auto.conf.cmd to detect the compiler upgrade. +# CC_VERSION_TEXT and RUSTC_VERSION_TEXT are referenced from Kconfig (so they +# need export), and from include/config/auto.conf.cmd to detect the compiler +# upgrade. CC_VERSION_TEXT = $(subst $(pound),,$(shell LC_ALL=C $(CC) --version 2>/dev/null | head -n 1)) +RUSTC_VERSION_TEXT = $(subst $(pound),,$(shell $(RUSTC) --version 2>/dev/null)) ifneq ($(findstring clang,$(CC_VERSION_TEXT)),) include $(srctree)/scripts/Makefile.clang @@ -668,7 +670,7 @@ ifdef config-build # KBUILD_DEFCONFIG may point out an alternative default configuration # used for 'make defconfig' include $(srctree)/arch/$(SRCARCH)/Makefile -export KBUILD_DEFCONFIG KBUILD_KCONFIG CC_VERSION_TEXT +export KBUILD_DEFCONFIG KBUILD_KCONFIG CC_VERSION_TEXT RUSTC_VERSION_TEXT config: outputmakefile scripts_basic FORCE $(Q)$(MAKE) $(build)=scripts/kconfig $@ @@ -924,6 +926,7 @@ ifdef CONFIG_SHADOW_CALL_STACK ifndef CONFIG_DYNAMIC_SCS CC_FLAGS_SCS := -fsanitize=shadow-call-stack KBUILD_CFLAGS += $(CC_FLAGS_SCS) +KBUILD_RUSTFLAGS += -Zsanitizer=shadow-call-stack endif export CC_FLAGS_SCS endif @@ -948,6 +951,16 @@ endif ifdef CONFIG_CFI_CLANG CC_FLAGS_CFI := -fsanitize=kcfi +ifdef CONFIG_CFI_ICALL_NORMALIZE_INTEGERS + CC_FLAGS_CFI += -fsanitize-cfi-icall-experimental-normalize-integers +endif +ifdef CONFIG_RUST + # Always pass -Zsanitizer-cfi-normalize-integers as CONFIG_RUST selects + # CONFIG_CFI_ICALL_NORMALIZE_INTEGERS. + RUSTC_FLAGS_CFI := -Zsanitizer=kcfi -Zsanitizer-cfi-normalize-integers + KBUILD_RUSTFLAGS += $(RUSTC_FLAGS_CFI) + export RUSTC_FLAGS_CFI +endif KBUILD_CFLAGS += $(CC_FLAGS_CFI) export CC_FLAGS_CFI endif diff --git a/arch/Kconfig b/arch/Kconfig index 405c85ab86f2..98157b38f5cf 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -835,6 +835,22 @@ config CFI_CLANG https://clang.llvm.org/docs/ControlFlowIntegrity.html +config CFI_ICALL_NORMALIZE_INTEGERS + bool "Normalize CFI tags for integers" + depends on CFI_CLANG + depends on $(cc-option,-fsanitize=kcfi -fsanitize-cfi-icall-experimental-normalize-integers) + help + This option normalizes the CFI tags for integer types so that all + integer types of the same size and signedness receive the same CFI + tag. + + The option is separate from CONFIG_RUST because it affects the ABI. + When working with build systems that care about the ABI, it is + convenient to be able to turn on this flag first, before Rust is + turned on. + + This option is necessary for using CFI with Rust. If unsure, say N. + config CFI_PERMISSIVE bool "Use CFI in permissive mode" depends on CFI_CLANG diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 49f054dcd4de..3e29b44d2d7b 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -235,7 +235,7 @@ config ARM64 select HAVE_FUNCTION_ARG_ACCESS_API select MMU_GATHER_RCU_TABLE_FREE select HAVE_RSEQ - select HAVE_RUST if CPU_LITTLE_ENDIAN + select HAVE_RUST if RUSTC_SUPPORTS_ARM64 select HAVE_STACKPROTECTOR select HAVE_SYSCALL_TRACEPOINTS select HAVE_KPROBES @@ -270,6 +270,18 @@ config ARM64 help ARM 64-bit (AArch64) Linux support. +config RUSTC_SUPPORTS_ARM64 + def_bool y + depends on CPU_LITTLE_ENDIAN + # Shadow call stack is only supported on certain rustc versions. + # + # When using the UNWIND_PATCH_PAC_INTO_SCS option, rustc version 1.80+ is + # required due to use of the -Zfixed-x18 flag. + # + # Otherwise, rustc version 1.82+ is required due to use of the + # -Zsanitizer=shadow-call-stack flag. + depends on !SHADOW_CALL_STACK || RUSTC_VERSION >= 108200 || RUSTC_VERSION >= 108000 && UNWIND_PATCH_PAC_INTO_SCS + config CLANG_SUPPORTS_DYNAMIC_FTRACE_WITH_ARGS def_bool CC_IS_CLANG # https://github.com/ClangBuiltLinux/linux/issues/1507 diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index f6bc3da1ef11..b058c4803efb 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile @@ -57,9 +57,11 @@ KBUILD_AFLAGS += $(call cc-option,-mabi=lp64) ifneq ($(CONFIG_UNWIND_TABLES),y) KBUILD_CFLAGS += -fno-asynchronous-unwind-tables -fno-unwind-tables KBUILD_AFLAGS += -fno-asynchronous-unwind-tables -fno-unwind-tables +KBUILD_RUSTFLAGS += -Cforce-unwind-tables=n else KBUILD_CFLAGS += -fasynchronous-unwind-tables KBUILD_AFLAGS += -fasynchronous-unwind-tables +KBUILD_RUSTFLAGS += -Cforce-unwind-tables=y -Zuse-sync-unwind=n endif ifeq ($(CONFIG_STACKPROTECTOR_PER_TASK),y) @@ -114,6 +116,7 @@ endif ifeq ($(CONFIG_SHADOW_CALL_STACK), y) KBUILD_CFLAGS += -ffixed-x18 +KBUILD_RUSTFLAGS += -Zfixed-x18 endif ifeq ($(CONFIG_CPU_BIG_ENDIAN), y) diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index b6d515db869b..22dc5ea4196c 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -177,7 +177,7 @@ config RISCV select HAVE_REGS_AND_STACK_ACCESS_API select HAVE_RETHOOK if !XIP_KERNEL select HAVE_RSEQ - select HAVE_RUST if 64BIT + select HAVE_RUST if RUSTC_SUPPORTS_RISCV select HAVE_SAMPLE_FTRACE_DIRECT select HAVE_SAMPLE_FTRACE_DIRECT_MULTI select HAVE_STACKPROTECTOR @@ -209,6 +209,13 @@ config RISCV select USER_STACKTRACE_SUPPORT select ZONE_DMA32 if 64BIT +config RUSTC_SUPPORTS_RISCV + def_bool y + depends on 64BIT + # Shadow call stack requires rustc version 1.82+ due to use of the + # -Zsanitizer=shadow-call-stack flag. + depends on !SHADOW_CALL_STACK || RUSTC_VERSION >= 108200 + config CLANG_SUPPORTS_DYNAMIC_FTRACE def_bool CC_IS_CLANG # https://github.com/ClangBuiltLinux/linux/issues/1817 diff --git a/arch/x86/Makefile b/arch/x86/Makefile index 801fd85c3ef6..cd75e78a06c1 100644 --- a/arch/x86/Makefile +++ b/arch/x86/Makefile @@ -24,11 +24,15 @@ RETPOLINE_CFLAGS += $(call cc-option,-mindirect-branch-cs-prefix) ifdef CONFIG_MITIGATION_RETHUNK RETHUNK_CFLAGS := -mfunction-return=thunk-extern +RETHUNK_RUSTFLAGS := -Zfunction-return=thunk-extern RETPOLINE_CFLAGS += $(RETHUNK_CFLAGS) +RETPOLINE_RUSTFLAGS += $(RETHUNK_RUSTFLAGS) endif export RETHUNK_CFLAGS +export RETHUNK_RUSTFLAGS export RETPOLINE_CFLAGS +export RETPOLINE_RUSTFLAGS export RETPOLINE_VDSO_CFLAGS # For gcc stack alignment is specified with -mpreferred-stack-boundary, @@ -218,9 +222,10 @@ KBUILD_CFLAGS += -fno-asynchronous-unwind-tables # Avoid indirect branches in kernel to deal with Spectre ifdef CONFIG_MITIGATION_RETPOLINE KBUILD_CFLAGS += $(RETPOLINE_CFLAGS) + KBUILD_RUSTFLAGS += $(RETPOLINE_RUSTFLAGS) # Additionally, avoid generating expensive indirect jumps which # are subject to retpolines for small number of switch cases. - # clang turns off jump table generation by default when under + # LLVM turns off jump table generation by default when under # retpoline builds, however, gcc does not for x86. This has # only been fixed starting from gcc stable version 8.4.0 and # onwards, but not for older ones. See gcc bug #86952. @@ -237,6 +242,10 @@ ifdef CONFIG_CALL_PADDING PADDING_CFLAGS := -fpatchable-function-entry=$(CONFIG_FUNCTION_PADDING_BYTES),$(CONFIG_FUNCTION_PADDING_BYTES) KBUILD_CFLAGS += $(PADDING_CFLAGS) export PADDING_CFLAGS + +PADDING_RUSTFLAGS := -Zpatchable-function-entry=$(CONFIG_FUNCTION_PADDING_BYTES),$(CONFIG_FUNCTION_PADDING_BYTES) +KBUILD_RUSTFLAGS += $(PADDING_RUSTFLAGS) +export PADDING_RUSTFLAGS endif KBUILD_LDFLAGS += -m elf_$(UTS_MACHINE) diff --git a/init/Kconfig b/init/Kconfig index b05467014041..fbd0cb06a50a 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -60,6 +60,13 @@ config LLD_VERSION default $(ld-version) if LD_IS_LLD default 0 +config RUSTC_VERSION + int + default $(shell,$(srctree)/scripts/rustc-version.sh $(RUSTC)) + help + It does not depend on `RUST` since that one may need to use the version + in a `depends on`. + config RUST_IS_AVAILABLE def_bool $(success,$(srctree)/scripts/rust_is_available.sh) help @@ -1935,12 +1942,14 @@ config RUST bool "Rust support" depends on HAVE_RUST depends on RUST_IS_AVAILABLE - depends on !CFI_CLANG depends on !MODVERSIONS - depends on !GCC_PLUGINS + depends on !GCC_PLUGIN_RANDSTRUCT depends on !RANDSTRUCT - depends on !SHADOW_CALL_STACK depends on !DEBUG_INFO_BTF || PAHOLE_HAS_LANG_EXCLUDE + depends on !CFI_CLANG || RUSTC_VERSION >= 107900 && $(cc-option,-fsanitize=kcfi -fsanitize-cfi-icall-experimental-normalize-integers) + select CFI_ICALL_NORMALIZE_INTEGERS if CFI_CLANG + depends on !CALL_PADDING || RUSTC_VERSION >= 108000 + depends on !KASAN_SW_TAGS help Enables Rust support in the kernel. @@ -1957,7 +1966,9 @@ config RUST config RUSTC_VERSION_TEXT string depends on RUST - default "$(shell,$(RUSTC) --version 2>/dev/null)" + default "$(RUSTC_VERSION_TEXT)" + help + See `CC_VERSION_TEXT`. config BINDGEN_VERSION_TEXT string diff --git a/mm/kasan/Makefile b/mm/kasan/Makefile index 7634dd2a6128..b88543e5c0cc 100644 --- a/mm/kasan/Makefile +++ b/mm/kasan/Makefile @@ -44,7 +44,8 @@ ifndef CONFIG_CC_HAS_KASAN_MEMINTRINSIC_PREFIX CFLAGS_KASAN_TEST += -fno-builtin endif -CFLAGS_kasan_test.o := $(CFLAGS_KASAN_TEST) +CFLAGS_kasan_test_c.o := $(CFLAGS_KASAN_TEST) +RUSTFLAGS_kasan_test_rust.o := $(RUSTFLAGS_KASAN) CFLAGS_kasan_test_module.o := $(CFLAGS_KASAN_TEST) obj-y := common.o report.o @@ -52,5 +53,10 @@ obj-$(CONFIG_KASAN_GENERIC) += init.o generic.o report_generic.o shadow.o quaran obj-$(CONFIG_KASAN_HW_TAGS) += hw_tags.o report_hw_tags.o tags.o report_tags.o obj-$(CONFIG_KASAN_SW_TAGS) += init.o report_sw_tags.o shadow.o sw_tags.o tags.o report_tags.o +kasan_test-objs := kasan_test_c.o +ifdef CONFIG_RUST + kasan_test-objs += kasan_test_rust.o +endif + obj-$(CONFIG_KASAN_KUNIT_TEST) += kasan_test.o obj-$(CONFIG_KASAN_MODULE_TEST) += kasan_test_module.o diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h index fb2b9ac0659a..f438a6cdc964 100644 --- a/mm/kasan/kasan.h +++ b/mm/kasan/kasan.h @@ -555,6 +555,12 @@ static inline bool kasan_arch_is_ready(void) { return true; } void kasan_kunit_test_suite_start(void); void kasan_kunit_test_suite_end(void); +#ifdef CONFIG_RUST +char kasan_test_rust_uaf(void); +#else +static inline char kasan_test_rust_uaf(void) { return '\0'; } +#endif + #else /* CONFIG_KASAN_KUNIT_TEST */ static inline void kasan_kunit_test_suite_start(void) { } diff --git a/mm/kasan/kasan_test.c b/mm/kasan/kasan_test_c.c similarity index 99% rename from mm/kasan/kasan_test.c rename to mm/kasan/kasan_test_c.c index 567d33b493e2..a181e4780d9d 100644 --- a/mm/kasan/kasan_test.c +++ b/mm/kasan/kasan_test_c.c @@ -1944,6 +1944,16 @@ static void match_all_mem_tag(struct kunit *test) kfree(ptr); } +/* + * Check that Rust performing a use-after-free using `unsafe` is detected. + * This is a smoke test to make sure that Rust is being sanitized properly. + */ +static void rust_uaf(struct kunit *test) +{ + KASAN_TEST_NEEDS_CONFIG_ON(test, CONFIG_RUST); + KUNIT_EXPECT_KASAN_FAIL(test, kasan_test_rust_uaf()); +} + static struct kunit_case kasan_kunit_test_cases[] = { KUNIT_CASE(kmalloc_oob_right), KUNIT_CASE(kmalloc_oob_left), @@ -2017,6 +2027,7 @@ static struct kunit_case kasan_kunit_test_cases[] = { KUNIT_CASE(match_all_not_assigned), KUNIT_CASE(match_all_ptr_tag), KUNIT_CASE(match_all_mem_tag), + KUNIT_CASE(rust_uaf), {} }; diff --git a/mm/kasan/kasan_test_rust.rs b/mm/kasan/kasan_test_rust.rs new file mode 100644 index 000000000000..caa7175964ef --- /dev/null +++ b/mm/kasan/kasan_test_rust.rs @@ -0,0 +1,21 @@ +// SPDX-License-Identifier: GPL-2.0 + +//! Helper crate for KASAN testing. +//! +//! Provides behavior to check the sanitization of Rust code. + +use core::ptr::addr_of_mut; +use kernel::prelude::*; + +/// Trivial UAF - allocate a big vector, grab a pointer partway through, +/// drop the vector, and touch it. +#[no_mangle] +pub extern "C" fn kasan_test_rust_uaf() -> u8 { + let mut v: Vec = Vec::new(); + for _ in 0..4096 { + v.push(0x42, GFP_KERNEL).unwrap(); + } + let ptr: *mut u8 = addr_of_mut!(v[2048]); + drop(v); + unsafe { *ptr } +} diff --git a/rust/Makefile b/rust/Makefile index f168d2c98a15..b5e0a73b78f3 100644 --- a/rust/Makefile +++ b/rust/Makefile @@ -8,16 +8,16 @@ always-$(CONFIG_RUST) += exports_core_generated.h # Missing prototypes are expected in the helpers since these are exported # for Rust only, thus there is no header nor prototypes. -obj-$(CONFIG_RUST) += helpers.o -CFLAGS_REMOVE_helpers.o = -Wmissing-prototypes -Wmissing-declarations +obj-$(CONFIG_RUST) += helpers/helpers.o +CFLAGS_REMOVE_helpers/helpers.o = -Wmissing-prototypes -Wmissing-declarations always-$(CONFIG_RUST) += libmacros.so no-clean-files += libmacros.so always-$(CONFIG_RUST) += bindings/bindings_generated.rs bindings/bindings_helpers_generated.rs obj-$(CONFIG_RUST) += alloc.o bindings.o kernel.o -always-$(CONFIG_RUST) += exports_alloc_generated.h exports_bindings_generated.h \ - exports_kernel_generated.h +always-$(CONFIG_RUST) += exports_alloc_generated.h exports_helpers_generated.h \ + exports_bindings_generated.h exports_kernel_generated.h always-$(CONFIG_RUST) += uapi/uapi_generated.rs obj-$(CONFIG_RUST) += uapi.o @@ -63,6 +63,7 @@ quiet_cmd_rustdoc = RUSTDOC $(if $(rustdoc_host),H, ) $< OBJTREE=$(abspath $(objtree)) \ $(RUSTDOC) $(if $(rustdoc_host),$(rust_common_flags),$(rust_flags)) \ $(rustc_target_flags) -L$(objtree)/$(obj) \ + -Zunstable-options --generate-link-to-definition \ --output $(rustdoc_output) \ --crate-name $(subst rustdoc-,,$@) \ $(if $(rustdoc_host),,--sysroot=/dev/null) \ @@ -270,7 +271,7 @@ quiet_cmd_bindgen = BINDGEN $@ cmd_bindgen = \ $(BINDGEN) $< $(bindgen_target_flags) \ --use-core --with-derive-default --ctypes-prefix core::ffi --no-layout-tests \ - --no-debug '.*' \ + --no-debug '.*' --enable-function-attribute-detection \ -o $@ -- $(bindgen_c_flags_final) -DMODULE \ $(bindgen_target_cflags) $(bindgen_target_extra) @@ -299,13 +300,13 @@ $(obj)/bindings/bindings_helpers_generated.rs: private bindgen_target_cflags = \ -I$(objtree)/$(obj) -Wno-missing-prototypes -Wno-missing-declarations $(obj)/bindings/bindings_helpers_generated.rs: private bindgen_target_extra = ; \ sed -Ei 's/pub fn rust_helper_([a-zA-Z0-9_]*)/#[link_name="rust_helper_\1"]\n pub fn \1/g' $@ -$(obj)/bindings/bindings_helpers_generated.rs: $(src)/helpers.c FORCE +$(obj)/bindings/bindings_helpers_generated.rs: $(src)/helpers/helpers.c FORCE $(call if_changed_dep,bindgen) quiet_cmd_exports = EXPORTS $@ cmd_exports = \ $(NM) -p --defined-only $< \ - | awk '/ (T|R|D|B) / {printf "EXPORT_SYMBOL_RUST_GPL(%s);\n",$$3}' > $@ + | awk '$$2~/(T|R|D|B)/ && $$3!~/__cfi/ {printf "EXPORT_SYMBOL_RUST_GPL(%s);\n",$$3}' > $@ $(obj)/exports_core_generated.h: $(obj)/core.o FORCE $(call if_changed,exports) @@ -313,6 +314,18 @@ $(obj)/exports_core_generated.h: $(obj)/core.o FORCE $(obj)/exports_alloc_generated.h: $(obj)/alloc.o FORCE $(call if_changed,exports) +# Even though Rust kernel modules should never use the bindings directly, +# symbols from the `bindings` crate and the C helpers need to be exported +# because Rust generics and inlined functions may not get their code generated +# in the crate where they are defined. Other helpers, called from non-inline +# functions, may not be exported, in principle. However, in general, the Rust +# compiler does not guarantee codegen will be performed for a non-inline +# function either. Therefore, we export all symbols from helpers and bindings. +# In the future, this may be revisited to reduce the number of exports after +# the compiler is informed about the places codegen is required. +$(obj)/exports_helpers_generated.h: $(obj)/helpers/helpers.o FORCE + $(call if_changed,exports) + $(obj)/exports_bindings_generated.h: $(obj)/bindings.o FORCE $(call if_changed,exports) @@ -329,9 +342,7 @@ quiet_cmd_rustc_procmacro = $(RUSTC_OR_CLIPPY_QUIET) P $@ --crate-name $(patsubst lib%.so,%,$(notdir $@)) $< # Procedural macros can only be used with the `rustc` that compiled it. -# Therefore, to get `libmacros.so` automatically recompiled when the compiler -# version changes, we add `core.o` as a dependency (even if it is not needed). -$(obj)/libmacros.so: $(src)/macros/lib.rs $(obj)/core.o FORCE +$(obj)/libmacros.so: $(src)/macros/lib.rs FORCE +$(call if_changed_dep,rustc_procmacro) quiet_cmd_rustc_library = $(if $(skip_clippy),RUSTC,$(RUSTC_OR_CLIPPY_QUIET)) L $@ @@ -344,7 +355,8 @@ quiet_cmd_rustc_library = $(if $(skip_clippy),RUSTC,$(RUSTC_OR_CLIPPY_QUIET)) L --crate-type rlib -L$(objtree)/$(obj) \ --crate-name $(patsubst %.o,%,$(notdir $@)) $< \ --sysroot=/dev/null \ - $(if $(rustc_objcopy),;$(OBJCOPY) $(rustc_objcopy) $@) + $(if $(rustc_objcopy),;$(OBJCOPY) $(rustc_objcopy) $@) \ + $(cmd_objtool) rust-analyzer: $(Q)$(srctree)/scripts/generate_rust_analyzer.py \ @@ -366,44 +378,50 @@ ifneq ($(or $(CONFIG_ARM64),$(and $(CONFIG_RISCV),$(CONFIG_64BIT))),) __ashlti3 __lshrti3 endif +define rule_rustc_library + $(call cmd_and_fixdep,rustc_library) + $(call cmd,gen_objtooldep) +endef + $(obj)/core.o: private skip_clippy = 1 $(obj)/core.o: private skip_flags = -Wunreachable_pub $(obj)/core.o: private rustc_objcopy = $(foreach sym,$(redirect-intrinsics),--redefine-sym $(sym)=__rust$(sym)) $(obj)/core.o: private rustc_target_flags = $(core-cfgs) -$(obj)/core.o: $(RUST_LIB_SRC)/core/src/lib.rs FORCE - +$(call if_changed_dep,rustc_library) +$(obj)/core.o: $(RUST_LIB_SRC)/core/src/lib.rs \ + $(wildcard $(objtree)/include/config/RUSTC_VERSION_TEXT) FORCE + +$(call if_changed_rule,rustc_library) ifneq ($(or $(CONFIG_X86_64),$(CONFIG_X86_32)),) $(obj)/core.o: scripts/target.json endif $(obj)/compiler_builtins.o: private rustc_objcopy = -w -W '__*' $(obj)/compiler_builtins.o: $(src)/compiler_builtins.rs $(obj)/core.o FORCE - +$(call if_changed_dep,rustc_library) + +$(call if_changed_rule,rustc_library) $(obj)/alloc.o: private skip_clippy = 1 $(obj)/alloc.o: private skip_flags = -Wunreachable_pub $(obj)/alloc.o: private rustc_target_flags = $(alloc-cfgs) $(obj)/alloc.o: $(RUST_LIB_SRC)/alloc/src/lib.rs $(obj)/compiler_builtins.o FORCE - +$(call if_changed_dep,rustc_library) + +$(call if_changed_rule,rustc_library) $(obj)/build_error.o: $(src)/build_error.rs $(obj)/compiler_builtins.o FORCE - +$(call if_changed_dep,rustc_library) + +$(call if_changed_rule,rustc_library) $(obj)/bindings.o: $(src)/bindings/lib.rs \ $(obj)/compiler_builtins.o \ $(obj)/bindings/bindings_generated.rs \ $(obj)/bindings/bindings_helpers_generated.rs FORCE - +$(call if_changed_dep,rustc_library) + +$(call if_changed_rule,rustc_library) $(obj)/uapi.o: $(src)/uapi/lib.rs \ $(obj)/compiler_builtins.o \ $(obj)/uapi/uapi_generated.rs FORCE - +$(call if_changed_dep,rustc_library) + +$(call if_changed_rule,rustc_library) $(obj)/kernel.o: private rustc_target_flags = --extern alloc \ --extern build_error --extern macros --extern bindings --extern uapi $(obj)/kernel.o: $(src)/kernel/lib.rs $(obj)/alloc.o $(obj)/build_error.o \ $(obj)/libmacros.so $(obj)/bindings.o $(obj)/uapi.o FORCE - +$(call if_changed_dep,rustc_library) + +$(call if_changed_rule,rustc_library) endif # CONFIG_RUST diff --git a/rust/bindings/bindings_helper.h b/rust/bindings/bindings_helper.h index b940a5777330..ae82e9c941af 100644 --- a/rust/bindings/bindings_helper.h +++ b/rust/bindings/bindings_helper.h @@ -7,8 +7,8 @@ */ #include -#include #include +#include #include #include #include diff --git a/rust/exports.c b/rust/exports.c index 3803c21d1403..e5695f3b45b7 100644 --- a/rust/exports.c +++ b/rust/exports.c @@ -17,6 +17,7 @@ #include "exports_core_generated.h" #include "exports_alloc_generated.h" +#include "exports_helpers_generated.h" #include "exports_bindings_generated.h" #include "exports_kernel_generated.h" diff --git a/rust/helpers.c b/rust/helpers.c deleted file mode 100644 index 92d3c03ae1bd..000000000000 --- a/rust/helpers.c +++ /dev/null @@ -1,239 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Non-trivial C macros cannot be used in Rust. Similarly, inlined C functions - * cannot be called either. This file explicitly creates functions ("helpers") - * that wrap those so that they can be called from Rust. - * - * Even though Rust kernel modules should never use the bindings directly, some - * of these helpers need to be exported because Rust generics and inlined - * functions may not get their code generated in the crate where they are - * defined. Other helpers, called from non-inline functions, may not be - * exported, in principle. However, in general, the Rust compiler does not - * guarantee codegen will be performed for a non-inline function either. - * Therefore, this file exports all the helpers. In the future, this may be - * revisited to reduce the number of exports after the compiler is informed - * about the places codegen is required. - * - * All symbols are exported as GPL-only to guarantee no GPL-only feature is - * accidentally exposed. - * - * Sorted alphabetically. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -__noreturn void rust_helper_BUG(void) -{ - BUG(); -} -EXPORT_SYMBOL_GPL(rust_helper_BUG); - -unsigned long rust_helper_copy_from_user(void *to, const void __user *from, - unsigned long n) -{ - return copy_from_user(to, from, n); -} -EXPORT_SYMBOL_GPL(rust_helper_copy_from_user); - -unsigned long rust_helper_copy_to_user(void __user *to, const void *from, - unsigned long n) -{ - return copy_to_user(to, from, n); -} -EXPORT_SYMBOL_GPL(rust_helper_copy_to_user); - -void rust_helper_mutex_lock(struct mutex *lock) -{ - mutex_lock(lock); -} -EXPORT_SYMBOL_GPL(rust_helper_mutex_lock); - -void rust_helper___spin_lock_init(spinlock_t *lock, const char *name, - struct lock_class_key *key) -{ -#ifdef CONFIG_DEBUG_SPINLOCK - __raw_spin_lock_init(spinlock_check(lock), name, key, LD_WAIT_CONFIG); -#else - spin_lock_init(lock); -#endif -} -EXPORT_SYMBOL_GPL(rust_helper___spin_lock_init); - -void rust_helper_spin_lock(spinlock_t *lock) -{ - spin_lock(lock); -} -EXPORT_SYMBOL_GPL(rust_helper_spin_lock); - -void rust_helper_spin_unlock(spinlock_t *lock) -{ - spin_unlock(lock); -} -EXPORT_SYMBOL_GPL(rust_helper_spin_unlock); - -void rust_helper_init_wait(struct wait_queue_entry *wq_entry) -{ - init_wait(wq_entry); -} -EXPORT_SYMBOL_GPL(rust_helper_init_wait); - -int rust_helper_signal_pending(struct task_struct *t) -{ - return signal_pending(t); -} -EXPORT_SYMBOL_GPL(rust_helper_signal_pending); - -struct page *rust_helper_alloc_pages(gfp_t gfp_mask, unsigned int order) -{ - return alloc_pages(gfp_mask, order); -} -EXPORT_SYMBOL_GPL(rust_helper_alloc_pages); - -void *rust_helper_kmap_local_page(struct page *page) -{ - return kmap_local_page(page); -} -EXPORT_SYMBOL_GPL(rust_helper_kmap_local_page); - -void rust_helper_kunmap_local(const void *addr) -{ - kunmap_local(addr); -} -EXPORT_SYMBOL_GPL(rust_helper_kunmap_local); - -refcount_t rust_helper_REFCOUNT_INIT(int n) -{ - return (refcount_t)REFCOUNT_INIT(n); -} -EXPORT_SYMBOL_GPL(rust_helper_REFCOUNT_INIT); - -void rust_helper_refcount_inc(refcount_t *r) -{ - refcount_inc(r); -} -EXPORT_SYMBOL_GPL(rust_helper_refcount_inc); - -bool rust_helper_refcount_dec_and_test(refcount_t *r) -{ - return refcount_dec_and_test(r); -} -EXPORT_SYMBOL_GPL(rust_helper_refcount_dec_and_test); - -__force void *rust_helper_ERR_PTR(long err) -{ - return ERR_PTR(err); -} -EXPORT_SYMBOL_GPL(rust_helper_ERR_PTR); - -bool rust_helper_IS_ERR(__force const void *ptr) -{ - return IS_ERR(ptr); -} -EXPORT_SYMBOL_GPL(rust_helper_IS_ERR); - -long rust_helper_PTR_ERR(__force const void *ptr) -{ - return PTR_ERR(ptr); -} -EXPORT_SYMBOL_GPL(rust_helper_PTR_ERR); - -const char *rust_helper_errname(int err) -{ - return errname(err); -} -EXPORT_SYMBOL_GPL(rust_helper_errname); - -struct task_struct *rust_helper_get_current(void) -{ - return current; -} -EXPORT_SYMBOL_GPL(rust_helper_get_current); - -void rust_helper_get_task_struct(struct task_struct *t) -{ - get_task_struct(t); -} -EXPORT_SYMBOL_GPL(rust_helper_get_task_struct); - -void rust_helper_put_task_struct(struct task_struct *t) -{ - put_task_struct(t); -} -EXPORT_SYMBOL_GPL(rust_helper_put_task_struct); - -struct kunit *rust_helper_kunit_get_current_test(void) -{ - return kunit_get_current_test(); -} -EXPORT_SYMBOL_GPL(rust_helper_kunit_get_current_test); - -void rust_helper_init_work_with_key(struct work_struct *work, work_func_t func, - bool onstack, const char *name, - struct lock_class_key *key) -{ - __init_work(work, onstack); - work->data = (atomic_long_t)WORK_DATA_INIT(); - lockdep_init_map(&work->lockdep_map, name, key, 0); - INIT_LIST_HEAD(&work->entry); - work->func = func; -} -EXPORT_SYMBOL_GPL(rust_helper_init_work_with_key); - -void * __must_check __realloc_size(2) -rust_helper_krealloc(const void *objp, size_t new_size, gfp_t flags) -{ - return krealloc(objp, new_size, flags); -} -EXPORT_SYMBOL_GPL(rust_helper_krealloc); - -/* - * `bindgen` binds the C `size_t` type as the Rust `usize` type, so we can - * use it in contexts where Rust expects a `usize` like slice (array) indices. - * `usize` is defined to be the same as C's `uintptr_t` type (can hold any - * pointer) but not necessarily the same as `size_t` (can hold the size of any - * single object). Most modern platforms use the same concrete integer type for - * both of them, but in case we find ourselves on a platform where - * that's not true, fail early instead of risking ABI or - * integer-overflow issues. - * - * If your platform fails this assertion, it means that you are in - * danger of integer-overflow bugs (even if you attempt to add - * `--no-size_t-is-usize`). It may be easiest to change the kernel ABI on - * your platform such that `size_t` matches `uintptr_t` (i.e., to increase - * `size_t`, because `uintptr_t` has to be at least as big as `size_t`). - */ -static_assert( - sizeof(size_t) == sizeof(uintptr_t) && - __alignof__(size_t) == __alignof__(uintptr_t), - "Rust code expects C `size_t` to match Rust `usize`" -); - -// This will soon be moved to a separate file, so no need to merge with above. -#include -#include - -void *rust_helper_blk_mq_rq_to_pdu(struct request *rq) -{ - return blk_mq_rq_to_pdu(rq); -} -EXPORT_SYMBOL_GPL(rust_helper_blk_mq_rq_to_pdu); - -struct request *rust_helper_blk_mq_rq_from_pdu(void *pdu) -{ - return blk_mq_rq_from_pdu(pdu); -} -EXPORT_SYMBOL_GPL(rust_helper_blk_mq_rq_from_pdu); diff --git a/rust/helpers/blk.c b/rust/helpers/blk.c new file mode 100644 index 000000000000..cc9f4e6a2d23 --- /dev/null +++ b/rust/helpers/blk.c @@ -0,0 +1,14 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include + +void *rust_helper_blk_mq_rq_to_pdu(struct request *rq) +{ + return blk_mq_rq_to_pdu(rq); +} + +struct request *rust_helper_blk_mq_rq_from_pdu(void *pdu) +{ + return blk_mq_rq_from_pdu(pdu); +} diff --git a/rust/helpers/bug.c b/rust/helpers/bug.c new file mode 100644 index 000000000000..e2d13babc737 --- /dev/null +++ b/rust/helpers/bug.c @@ -0,0 +1,8 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include + +__noreturn void rust_helper_BUG(void) +{ + BUG(); +} diff --git a/rust/helpers/build_assert.c b/rust/helpers/build_assert.c new file mode 100644 index 000000000000..6a54b2680b14 --- /dev/null +++ b/rust/helpers/build_assert.c @@ -0,0 +1,25 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include + +/* + * `bindgen` binds the C `size_t` type as the Rust `usize` type, so we can + * use it in contexts where Rust expects a `usize` like slice (array) indices. + * `usize` is defined to be the same as C's `uintptr_t` type (can hold any + * pointer) but not necessarily the same as `size_t` (can hold the size of any + * single object). Most modern platforms use the same concrete integer type for + * both of them, but in case we find ourselves on a platform where + * that's not true, fail early instead of risking ABI or + * integer-overflow issues. + * + * If your platform fails this assertion, it means that you are in + * danger of integer-overflow bugs (even if you attempt to add + * `--no-size_t-is-usize`). It may be easiest to change the kernel ABI on + * your platform such that `size_t` matches `uintptr_t` (i.e., to increase + * `size_t`, because `uintptr_t` has to be at least as big as `size_t`). + */ +static_assert( + sizeof(size_t) == sizeof(uintptr_t) && + __alignof__(size_t) == __alignof__(uintptr_t), + "Rust code expects C `size_t` to match Rust `usize`" +); diff --git a/rust/helpers/build_bug.c b/rust/helpers/build_bug.c new file mode 100644 index 000000000000..e994f7b5928c --- /dev/null +++ b/rust/helpers/build_bug.c @@ -0,0 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include + +const char *rust_helper_errname(int err) +{ + return errname(err); +} diff --git a/rust/helpers/err.c b/rust/helpers/err.c new file mode 100644 index 000000000000..be3d45ef78a2 --- /dev/null +++ b/rust/helpers/err.c @@ -0,0 +1,19 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include + +__force void *rust_helper_ERR_PTR(long err) +{ + return ERR_PTR(err); +} + +bool rust_helper_IS_ERR(__force const void *ptr) +{ + return IS_ERR(ptr); +} + +long rust_helper_PTR_ERR(__force const void *ptr) +{ + return PTR_ERR(ptr); +} diff --git a/rust/helpers/helpers.c b/rust/helpers/helpers.c new file mode 100644 index 000000000000..30f40149f3a9 --- /dev/null +++ b/rust/helpers/helpers.c @@ -0,0 +1,26 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Non-trivial C macros cannot be used in Rust. Similarly, inlined C functions + * cannot be called either. This file explicitly creates functions ("helpers") + * that wrap those so that they can be called from Rust. + * + * Sorted alphabetically. + */ + +#include "blk.c" +#include "bug.c" +#include "build_assert.c" +#include "build_bug.c" +#include "err.c" +#include "kunit.c" +#include "mutex.c" +#include "page.c" +#include "rbtree.c" +#include "refcount.c" +#include "signal.c" +#include "slab.c" +#include "spinlock.c" +#include "task.c" +#include "uaccess.c" +#include "wait.c" +#include "workqueue.c" diff --git a/rust/helpers/kunit.c b/rust/helpers/kunit.c new file mode 100644 index 000000000000..9d725067eb3b --- /dev/null +++ b/rust/helpers/kunit.c @@ -0,0 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include + +struct kunit *rust_helper_kunit_get_current_test(void) +{ + return kunit_get_current_test(); +} diff --git a/rust/helpers/mutex.c b/rust/helpers/mutex.c new file mode 100644 index 000000000000..200db7e6279f --- /dev/null +++ b/rust/helpers/mutex.c @@ -0,0 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include + +void rust_helper_mutex_lock(struct mutex *lock) +{ + mutex_lock(lock); +} diff --git a/rust/helpers/page.c b/rust/helpers/page.c new file mode 100644 index 000000000000..b3f2b8fbf87f --- /dev/null +++ b/rust/helpers/page.c @@ -0,0 +1,19 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include + +struct page *rust_helper_alloc_pages(gfp_t gfp_mask, unsigned int order) +{ + return alloc_pages(gfp_mask, order); +} + +void *rust_helper_kmap_local_page(struct page *page) +{ + return kmap_local_page(page); +} + +void rust_helper_kunmap_local(const void *addr) +{ + kunmap_local(addr); +} diff --git a/rust/helpers/rbtree.c b/rust/helpers/rbtree.c new file mode 100644 index 000000000000..6d404b84a9b5 --- /dev/null +++ b/rust/helpers/rbtree.c @@ -0,0 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include + +void rust_helper_rb_link_node(struct rb_node *node, struct rb_node *parent, + struct rb_node **rb_link) +{ + rb_link_node(node, parent, rb_link); +} diff --git a/rust/helpers/refcount.c b/rust/helpers/refcount.c new file mode 100644 index 000000000000..f47afc148ec3 --- /dev/null +++ b/rust/helpers/refcount.c @@ -0,0 +1,19 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include + +refcount_t rust_helper_REFCOUNT_INIT(int n) +{ + return (refcount_t)REFCOUNT_INIT(n); +} + +void rust_helper_refcount_inc(refcount_t *r) +{ + refcount_inc(r); +} + +bool rust_helper_refcount_dec_and_test(refcount_t *r) +{ + return refcount_dec_and_test(r); +} diff --git a/rust/helpers/signal.c b/rust/helpers/signal.c new file mode 100644 index 000000000000..63c407f80c26 --- /dev/null +++ b/rust/helpers/signal.c @@ -0,0 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include + +int rust_helper_signal_pending(struct task_struct *t) +{ + return signal_pending(t); +} diff --git a/rust/helpers/slab.c b/rust/helpers/slab.c new file mode 100644 index 000000000000..f043e087f9d6 --- /dev/null +++ b/rust/helpers/slab.c @@ -0,0 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include + +void * __must_check __realloc_size(2) +rust_helper_krealloc(const void *objp, size_t new_size, gfp_t flags) +{ + return krealloc(objp, new_size, flags); +} diff --git a/rust/helpers/spinlock.c b/rust/helpers/spinlock.c new file mode 100644 index 000000000000..acc1376b833c --- /dev/null +++ b/rust/helpers/spinlock.c @@ -0,0 +1,24 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include + +void rust_helper___spin_lock_init(spinlock_t *lock, const char *name, + struct lock_class_key *key) +{ +#ifdef CONFIG_DEBUG_SPINLOCK + __raw_spin_lock_init(spinlock_check(lock), name, key, LD_WAIT_CONFIG); +#else + spin_lock_init(lock); +#endif +} + +void rust_helper_spin_lock(spinlock_t *lock) +{ + spin_lock(lock); +} + +void rust_helper_spin_unlock(spinlock_t *lock) +{ + spin_unlock(lock); +} diff --git a/rust/helpers/task.c b/rust/helpers/task.c new file mode 100644 index 000000000000..7ac789232d11 --- /dev/null +++ b/rust/helpers/task.c @@ -0,0 +1,19 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include + +struct task_struct *rust_helper_get_current(void) +{ + return current; +} + +void rust_helper_get_task_struct(struct task_struct *t) +{ + get_task_struct(t); +} + +void rust_helper_put_task_struct(struct task_struct *t) +{ + put_task_struct(t); +} diff --git a/rust/helpers/uaccess.c b/rust/helpers/uaccess.c new file mode 100644 index 000000000000..f49076f813cd --- /dev/null +++ b/rust/helpers/uaccess.c @@ -0,0 +1,15 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include + +unsigned long rust_helper_copy_from_user(void *to, const void __user *from, + unsigned long n) +{ + return copy_from_user(to, from, n); +} + +unsigned long rust_helper_copy_to_user(void __user *to, const void *from, + unsigned long n) +{ + return copy_to_user(to, from, n); +} diff --git a/rust/helpers/wait.c b/rust/helpers/wait.c new file mode 100644 index 000000000000..c7336bbf2750 --- /dev/null +++ b/rust/helpers/wait.c @@ -0,0 +1,9 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include + +void rust_helper_init_wait(struct wait_queue_entry *wq_entry) +{ + init_wait(wq_entry); +} diff --git a/rust/helpers/workqueue.c b/rust/helpers/workqueue.c new file mode 100644 index 000000000000..f59427acc323 --- /dev/null +++ b/rust/helpers/workqueue.c @@ -0,0 +1,15 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include + +void rust_helper_init_work_with_key(struct work_struct *work, work_func_t func, + bool onstack, const char *name, + struct lock_class_key *key) +{ + __init_work(work, onstack); + work->data = (atomic_long_t)WORK_DATA_INIT(); + lockdep_init_map(&work->lockdep_map, name, key, 0); + INIT_LIST_HEAD(&work->entry); + work->func = func; +} diff --git a/rust/kernel/alloc/box_ext.rs b/rust/kernel/alloc/box_ext.rs index 9f1c1c489189..7009ad78d4e0 100644 --- a/rust/kernel/alloc/box_ext.rs +++ b/rust/kernel/alloc/box_ext.rs @@ -4,7 +4,7 @@ use super::{AllocError, Flags}; use alloc::boxed::Box; -use core::mem::MaybeUninit; +use core::{mem::MaybeUninit, ptr, result::Result}; /// Extensions to [`Box`]. pub trait BoxExt: Sized { @@ -17,6 +17,24 @@ pub trait BoxExt: Sized { /// /// The allocation may fail, in which case an error is returned. fn new_uninit(flags: Flags) -> Result>, AllocError>; + + /// Drops the contents, but keeps the allocation. + /// + /// # Examples + /// + /// ``` + /// use kernel::alloc::{flags, box_ext::BoxExt}; + /// let value = Box::new([0; 32], flags::GFP_KERNEL)?; + /// assert_eq!(*value, [0; 32]); + /// let mut value = Box::drop_contents(value); + /// // Now we can re-use `value`: + /// value.write([1; 32]); + /// // SAFETY: We just wrote to it. + /// let value = unsafe { value.assume_init() }; + /// assert_eq!(*value, [1; 32]); + /// # Ok::<(), Error>(()) + /// ``` + fn drop_contents(this: Self) -> Box>; } impl BoxExt for Box { @@ -55,4 +73,17 @@ fn new_uninit(flags: Flags) -> Result>, AllocError> { // zero-sized types, we use `NonNull::dangling`. Ok(unsafe { Box::from_raw(ptr) }) } + + fn drop_contents(this: Self) -> Box> { + let ptr = Box::into_raw(this); + // SAFETY: `ptr` is valid, because it came from `Box::into_raw`. + unsafe { ptr::drop_in_place(ptr) }; + + // CAST: `MaybeUninit` is a transparent wrapper of `T`. + let ptr = ptr.cast::>(); + + // SAFETY: `ptr` is valid for writes, because it came from `Box::into_raw` and it is valid for + // reads, since the pointer came from `Box::into_raw` and the type is `MaybeUninit`. + unsafe { Box::from_raw(ptr) } + } } diff --git a/rust/kernel/error.rs b/rust/kernel/error.rs index 145f5c397009..6f1587a2524e 100644 --- a/rust/kernel/error.rs +++ b/rust/kernel/error.rs @@ -135,8 +135,11 @@ pub(crate) fn to_blk_status(self) -> bindings::blk_status_t { /// Returns the error encoded as a pointer. #[allow(dead_code)] pub(crate) fn to_ptr(self) -> *mut T { + #[cfg_attr(target_pointer_width = "32", allow(clippy::useless_conversion))] // SAFETY: `self.0` is a valid error due to its invariant. - unsafe { bindings::ERR_PTR(self.0.into()) as *mut _ } + unsafe { + bindings::ERR_PTR(self.0.into()) as *mut _ + } } /// Returns a string representing the error, if one exists. diff --git a/rust/kernel/init.rs b/rust/kernel/init.rs index 495c09ebe3a3..a17ac8762d8f 100644 --- a/rust/kernel/init.rs +++ b/rust/kernel/init.rs @@ -213,6 +213,7 @@ use crate::{ alloc::{box_ext::BoxExt, AllocError, Flags}, error::{self, Error}, + sync::Arc, sync::UniqueArc, types::{Opaque, ScopeGuard}, }; @@ -742,6 +743,74 @@ macro_rules! try_init { }; } +/// Asserts that a field on a struct using `#[pin_data]` is marked with `#[pin]` ie. that it is +/// structurally pinned. +/// +/// # Example +/// +/// This will succeed: +/// ``` +/// use kernel::assert_pinned; +/// #[pin_data] +/// struct MyStruct { +/// #[pin] +/// some_field: u64, +/// } +/// +/// assert_pinned!(MyStruct, some_field, u64); +/// ``` +/// +/// This will fail: +// TODO: replace with `compile_fail` when supported. +/// ```ignore +/// use kernel::assert_pinned; +/// #[pin_data] +/// struct MyStruct { +/// some_field: u64, +/// } +/// +/// assert_pinned!(MyStruct, some_field, u64); +/// ``` +/// +/// Some uses of the macro may trigger the `can't use generic parameters from outer item` error. To +/// work around this, you may pass the `inline` parameter to the macro. The `inline` parameter can +/// only be used when the macro is invoked from a function body. +/// ``` +/// use kernel::assert_pinned; +/// #[pin_data] +/// struct Foo { +/// #[pin] +/// elem: T, +/// } +/// +/// impl Foo { +/// fn project(self: Pin<&mut Self>) -> Pin<&mut T> { +/// assert_pinned!(Foo, elem, T, inline); +/// +/// // SAFETY: The field is structurally pinned. +/// unsafe { self.map_unchecked_mut(|me| &mut me.elem) } +/// } +/// } +/// ``` +#[macro_export] +macro_rules! assert_pinned { + ($ty:ty, $field:ident, $field_ty:ty, inline) => { + let _ = move |ptr: *mut $field_ty| { + // SAFETY: This code is unreachable. + let data = unsafe { <$ty as $crate::init::__internal::HasPinData>::__pin_data() }; + let init = $crate::init::__internal::AlwaysFail::<$field_ty>::new(); + // SAFETY: This code is unreachable. + unsafe { data.$field(ptr, init) }.ok(); + }; + }; + + ($ty:ty, $field:ident, $field_ty:ty) => { + const _: () = { + $crate::assert_pinned!($ty, $field, $field_ty, inline); + }; + }; +} + /// A pin-initializer for the type `T`. /// /// To use this initializer, you will need a suitable memory location that can hold a `T`. This can @@ -1107,11 +1176,17 @@ unsafe fn __pinned_init(self, slot: *mut T) -> Result<(), E> { /// Smart pointer that can initialize memory in-place. pub trait InPlaceInit: Sized { + /// Pinned version of `Self`. + /// + /// If a type already implicitly pins its pointee, `Pin` is unnecessary. In this case use + /// `Self`, otherwise just use `Pin`. + type PinnedSelf; + /// Use the given pin-initializer to pin-initialize a `T` inside of a new smart pointer of this /// type. /// /// If `T: !Unpin` it will not be able to move afterwards. - fn try_pin_init(init: impl PinInit, flags: Flags) -> Result, E> + fn try_pin_init(init: impl PinInit, flags: Flags) -> Result where E: From; @@ -1119,7 +1194,7 @@ fn try_pin_init(init: impl PinInit, flags: Flags) -> Result, /// type. /// /// If `T: !Unpin` it will not be able to move afterwards. - fn pin_init(init: impl PinInit, flags: Flags) -> error::Result> + fn pin_init(init: impl PinInit, flags: Flags) -> error::Result where Error: From, { @@ -1148,19 +1223,15 @@ fn init(init: impl Init, flags: Flags) -> error::Result } } -impl InPlaceInit for Box { +impl InPlaceInit for Arc { + type PinnedSelf = Self; + #[inline] - fn try_pin_init(init: impl PinInit, flags: Flags) -> Result, E> + fn try_pin_init(init: impl PinInit, flags: Flags) -> Result where E: From, { - let mut this = as BoxExt<_>>::new_uninit(flags)?; - let slot = this.as_mut_ptr(); - // SAFETY: When init errors/panics, slot will get deallocated but not dropped, - // slot is valid and will not be moved, because we pin it later. - unsafe { init.__pinned_init(slot)? }; - // SAFETY: All fields have been initialized. - Ok(unsafe { this.assume_init() }.into()) + UniqueArc::try_pin_init(init, flags).map(|u| u.into()) } #[inline] @@ -1168,29 +1239,39 @@ fn try_init(init: impl Init, flags: Flags) -> Result where E: From, { - let mut this = as BoxExt<_>>::new_uninit(flags)?; - let slot = this.as_mut_ptr(); - // SAFETY: When init errors/panics, slot will get deallocated but not dropped, - // slot is valid. - unsafe { init.__init(slot)? }; - // SAFETY: All fields have been initialized. - Ok(unsafe { this.assume_init() }) + UniqueArc::try_init(init, flags).map(|u| u.into()) + } +} + +impl InPlaceInit for Box { + type PinnedSelf = Pin; + + #[inline] + fn try_pin_init(init: impl PinInit, flags: Flags) -> Result + where + E: From, + { + as BoxExt<_>>::new_uninit(flags)?.write_pin_init(init) + } + + #[inline] + fn try_init(init: impl Init, flags: Flags) -> Result + where + E: From, + { + as BoxExt<_>>::new_uninit(flags)?.write_init(init) } } impl InPlaceInit for UniqueArc { + type PinnedSelf = Pin; + #[inline] - fn try_pin_init(init: impl PinInit, flags: Flags) -> Result, E> + fn try_pin_init(init: impl PinInit, flags: Flags) -> Result where E: From, { - let mut this = UniqueArc::new_uninit(flags)?; - let slot = this.as_mut_ptr(); - // SAFETY: When init errors/panics, slot will get deallocated but not dropped, - // slot is valid and will not be moved, because we pin it later. - unsafe { init.__pinned_init(slot)? }; - // SAFETY: All fields have been initialized. - Ok(unsafe { this.assume_init() }.into()) + UniqueArc::new_uninit(flags)?.write_pin_init(init) } #[inline] @@ -1198,13 +1279,67 @@ fn try_init(init: impl Init, flags: Flags) -> Result where E: From, { - let mut this = UniqueArc::new_uninit(flags)?; - let slot = this.as_mut_ptr(); + UniqueArc::new_uninit(flags)?.write_init(init) + } +} + +/// Smart pointer containing uninitialized memory and that can write a value. +pub trait InPlaceWrite { + /// The type `Self` turns into when the contents are initialized. + type Initialized; + + /// Use the given initializer to write a value into `self`. + /// + /// Does not drop the current value and considers it as uninitialized memory. + fn write_init(self, init: impl Init) -> Result; + + /// Use the given pin-initializer to write a value into `self`. + /// + /// Does not drop the current value and considers it as uninitialized memory. + fn write_pin_init(self, init: impl PinInit) -> Result, E>; +} + +impl InPlaceWrite for Box> { + type Initialized = Box; + + fn write_init(mut self, init: impl Init) -> Result { + let slot = self.as_mut_ptr(); // SAFETY: When init errors/panics, slot will get deallocated but not dropped, // slot is valid. unsafe { init.__init(slot)? }; // SAFETY: All fields have been initialized. - Ok(unsafe { this.assume_init() }) + Ok(unsafe { self.assume_init() }) + } + + fn write_pin_init(mut self, init: impl PinInit) -> Result, E> { + let slot = self.as_mut_ptr(); + // SAFETY: When init errors/panics, slot will get deallocated but not dropped, + // slot is valid and will not be moved, because we pin it later. + unsafe { init.__pinned_init(slot)? }; + // SAFETY: All fields have been initialized. + Ok(unsafe { self.assume_init() }.into()) + } +} + +impl InPlaceWrite for UniqueArc> { + type Initialized = UniqueArc; + + fn write_init(mut self, init: impl Init) -> Result { + let slot = self.as_mut_ptr(); + // SAFETY: When init errors/panics, slot will get deallocated but not dropped, + // slot is valid. + unsafe { init.__init(slot)? }; + // SAFETY: All fields have been initialized. + Ok(unsafe { self.assume_init() }) + } + + fn write_pin_init(mut self, init: impl PinInit) -> Result, E> { + let slot = self.as_mut_ptr(); + // SAFETY: When init errors/panics, slot will get deallocated but not dropped, + // slot is valid and will not be moved, because we pin it later. + unsafe { init.__pinned_init(slot)? }; + // SAFETY: All fields have been initialized. + Ok(unsafe { self.assume_init() }.into()) } } diff --git a/rust/kernel/init/__internal.rs b/rust/kernel/init/__internal.rs index db3372619ecd..13cefd37512f 100644 --- a/rust/kernel/init/__internal.rs +++ b/rust/kernel/init/__internal.rs @@ -228,3 +228,32 @@ pub unsafe fn new() -> Self { Self(()) } } + +/// Initializer that always fails. +/// +/// Used by [`assert_pinned!`]. +/// +/// [`assert_pinned!`]: crate::assert_pinned +pub struct AlwaysFail { + _t: PhantomData, +} + +impl AlwaysFail { + /// Creates a new initializer that always fails. + pub fn new() -> Self { + Self { _t: PhantomData } + } +} + +impl Default for AlwaysFail { + fn default() -> Self { + Self::new() + } +} + +// SAFETY: `__pinned_init` always fails, which is always okay. +unsafe impl PinInit for AlwaysFail { + unsafe fn __pinned_init(self, _slot: *mut T) -> Result<(), ()> { + Err(()) + } +} diff --git a/rust/kernel/lib.rs b/rust/kernel/lib.rs index 58ed400198bf..22a3bfa5a9e9 100644 --- a/rust/kernel/lib.rs +++ b/rust/kernel/lib.rs @@ -38,12 +38,14 @@ pub mod ioctl; #[cfg(CONFIG_KUNIT)] pub mod kunit; +pub mod list; #[cfg(CONFIG_NET)] pub mod net; pub mod page; pub mod prelude; pub mod print; pub mod sizes; +pub mod rbtree; mod static_assert; #[doc(hidden)] pub mod std_vendor; diff --git a/rust/kernel/list.rs b/rust/kernel/list.rs new file mode 100644 index 000000000000..5b4aec29eb67 --- /dev/null +++ b/rust/kernel/list.rs @@ -0,0 +1,686 @@ +// SPDX-License-Identifier: GPL-2.0 + +// Copyright (C) 2024 Google LLC. + +//! A linked list implementation. + +use crate::init::PinInit; +use crate::sync::ArcBorrow; +use crate::types::Opaque; +use core::iter::{DoubleEndedIterator, FusedIterator}; +use core::marker::PhantomData; +use core::ptr; + +mod impl_list_item_mod; +pub use self::impl_list_item_mod::{ + impl_has_list_links, impl_has_list_links_self_ptr, impl_list_item, HasListLinks, HasSelfPtr, +}; + +mod arc; +pub use self::arc::{impl_list_arc_safe, AtomicTracker, ListArc, ListArcSafe, TryNewListArc}; + +mod arc_field; +pub use self::arc_field::{define_list_arc_field_getter, ListArcField}; + +/// A linked list. +/// +/// All elements in this linked list will be [`ListArc`] references to the value. Since a value can +/// only have one `ListArc` (for each pair of prev/next pointers), this ensures that the same +/// prev/next pointers are not used for several linked lists. +/// +/// # Invariants +/// +/// * If the list is empty, then `first` is null. Otherwise, `first` points at the `ListLinks` +/// field of the first element in the list. +/// * All prev/next pointers in `ListLinks` fields of items in the list are valid and form a cycle. +/// * For every item in the list, the list owns the associated [`ListArc`] reference and has +/// exclusive access to the `ListLinks` field. +pub struct List, const ID: u64 = 0> { + first: *mut ListLinksFields, + _ty: PhantomData>, +} + +// SAFETY: This is a container of `ListArc`, and access to the container allows the same +// type of access to the `ListArc` elements. +unsafe impl Send for List +where + ListArc: Send, + T: ?Sized + ListItem, +{ +} +// SAFETY: This is a container of `ListArc`, and access to the container allows the same +// type of access to the `ListArc` elements. +unsafe impl Sync for List +where + ListArc: Sync, + T: ?Sized + ListItem, +{ +} + +/// Implemented by types where a [`ListArc`] can be inserted into a [`List`]. +/// +/// # Safety +/// +/// Implementers must ensure that they provide the guarantees documented on methods provided by +/// this trait. +/// +/// [`ListArc`]: ListArc +pub unsafe trait ListItem: ListArcSafe { + /// Views the [`ListLinks`] for this value. + /// + /// # Guarantees + /// + /// If there is a previous call to `prepare_to_insert` and there is no call to `post_remove` + /// since the most recent such call, then this returns the same pointer as the one returned by + /// the most recent call to `prepare_to_insert`. + /// + /// Otherwise, the returned pointer points at a read-only [`ListLinks`] with two null pointers. + /// + /// # Safety + /// + /// The provided pointer must point at a valid value. (It need not be in an `Arc`.) + unsafe fn view_links(me: *const Self) -> *mut ListLinks; + + /// View the full value given its [`ListLinks`] field. + /// + /// Can only be used when the value is in a list. + /// + /// # Guarantees + /// + /// * Returns the same pointer as the one passed to the most recent call to `prepare_to_insert`. + /// * The returned pointer is valid until the next call to `post_remove`. + /// + /// # Safety + /// + /// * The provided pointer must originate from the most recent call to `prepare_to_insert`, or + /// from a call to `view_links` that happened after the most recent call to + /// `prepare_to_insert`. + /// * Since the most recent call to `prepare_to_insert`, the `post_remove` method must not have + /// been called. + unsafe fn view_value(me: *mut ListLinks) -> *const Self; + + /// This is called when an item is inserted into a [`List`]. + /// + /// # Guarantees + /// + /// The caller is granted exclusive access to the returned [`ListLinks`] until `post_remove` is + /// called. + /// + /// # Safety + /// + /// * The provided pointer must point at a valid value in an [`Arc`]. + /// * Calls to `prepare_to_insert` and `post_remove` on the same value must alternate. + /// * The caller must own the [`ListArc`] for this value. + /// * The caller must not give up ownership of the [`ListArc`] unless `post_remove` has been + /// called after this call to `prepare_to_insert`. + /// + /// [`Arc`]: crate::sync::Arc + unsafe fn prepare_to_insert(me: *const Self) -> *mut ListLinks; + + /// This undoes a previous call to `prepare_to_insert`. + /// + /// # Guarantees + /// + /// The returned pointer is the pointer that was originally passed to `prepare_to_insert`. + /// + /// # Safety + /// + /// The provided pointer must be the pointer returned by the most recent call to + /// `prepare_to_insert`. + unsafe fn post_remove(me: *mut ListLinks) -> *const Self; +} + +#[repr(C)] +#[derive(Copy, Clone)] +struct ListLinksFields { + next: *mut ListLinksFields, + prev: *mut ListLinksFields, +} + +/// The prev/next pointers for an item in a linked list. +/// +/// # Invariants +/// +/// The fields are null if and only if this item is not in a list. +#[repr(transparent)] +pub struct ListLinks { + // This type is `!Unpin` for aliasing reasons as the pointers are part of an intrusive linked + // list. + inner: Opaque, +} + +// SAFETY: The only way to access/modify the pointers inside of `ListLinks` is via holding the +// associated `ListArc`. Since that type correctly implements `Send`, it is impossible to +// move this an instance of this type to a different thread if the pointees are `!Send`. +unsafe impl Send for ListLinks {} +// SAFETY: The type is opaque so immutable references to a ListLinks are useless. Therefore, it's +// okay to have immutable access to a ListLinks from several threads at once. +unsafe impl Sync for ListLinks {} + +impl ListLinks { + /// Creates a new initializer for this type. + pub fn new() -> impl PinInit { + // INVARIANT: Pin-init initializers can't be used on an existing `Arc`, so this value will + // not be constructed in an `Arc` that already has a `ListArc`. + ListLinks { + inner: Opaque::new(ListLinksFields { + prev: ptr::null_mut(), + next: ptr::null_mut(), + }), + } + } + + /// # Safety + /// + /// `me` must be dereferenceable. + #[inline] + unsafe fn fields(me: *mut Self) -> *mut ListLinksFields { + // SAFETY: The caller promises that the pointer is valid. + unsafe { Opaque::raw_get(ptr::addr_of!((*me).inner)) } + } + + /// # Safety + /// + /// `me` must be dereferenceable. + #[inline] + unsafe fn from_fields(me: *mut ListLinksFields) -> *mut Self { + me.cast() + } +} + +/// Similar to [`ListLinks`], but also contains a pointer to the full value. +/// +/// This type can be used instead of [`ListLinks`] to support lists with trait objects. +#[repr(C)] +pub struct ListLinksSelfPtr { + /// The `ListLinks` field inside this value. + /// + /// This is public so that it can be used with `impl_has_list_links!`. + pub inner: ListLinks, + // UnsafeCell is not enough here because we use `Opaque::uninit` as a dummy value, and + // `ptr::null()` doesn't work for `T: ?Sized`. + self_ptr: Opaque<*const T>, +} + +// SAFETY: The fields of a ListLinksSelfPtr can be moved across thread boundaries. +unsafe impl Send for ListLinksSelfPtr {} +// SAFETY: The type is opaque so immutable references to a ListLinksSelfPtr are useless. Therefore, +// it's okay to have immutable access to a ListLinks from several threads at once. +// +// Note that `inner` being a public field does not prevent this type from being opaque, since +// `inner` is a opaque type. +unsafe impl Sync for ListLinksSelfPtr {} + +impl ListLinksSelfPtr { + /// The offset from the [`ListLinks`] to the self pointer field. + pub const LIST_LINKS_SELF_PTR_OFFSET: usize = core::mem::offset_of!(Self, self_ptr); + + /// Creates a new initializer for this type. + pub fn new() -> impl PinInit { + // INVARIANT: Pin-init initializers can't be used on an existing `Arc`, so this value will + // not be constructed in an `Arc` that already has a `ListArc`. + Self { + inner: ListLinks { + inner: Opaque::new(ListLinksFields { + prev: ptr::null_mut(), + next: ptr::null_mut(), + }), + }, + self_ptr: Opaque::uninit(), + } + } +} + +impl, const ID: u64> List { + /// Creates a new empty list. + pub const fn new() -> Self { + Self { + first: ptr::null_mut(), + _ty: PhantomData, + } + } + + /// Returns whether this list is empty. + pub fn is_empty(&self) -> bool { + self.first.is_null() + } + + /// Add the provided item to the back of the list. + pub fn push_back(&mut self, item: ListArc) { + let raw_item = ListArc::into_raw(item); + // SAFETY: + // * We just got `raw_item` from a `ListArc`, so it's in an `Arc`. + // * Since we have ownership of the `ListArc`, `post_remove` must have been called after + // the most recent call to `prepare_to_insert`, if any. + // * We own the `ListArc`. + // * Removing items from this list is always done using `remove_internal_inner`, which + // calls `post_remove` before giving up ownership. + let list_links = unsafe { T::prepare_to_insert(raw_item) }; + // SAFETY: We have not yet called `post_remove`, so `list_links` is still valid. + let item = unsafe { ListLinks::fields(list_links) }; + + if self.first.is_null() { + self.first = item; + // SAFETY: The caller just gave us ownership of these fields. + // INVARIANT: A linked list with one item should be cyclic. + unsafe { + (*item).next = item; + (*item).prev = item; + } + } else { + let next = self.first; + // SAFETY: By the type invariant, this pointer is valid or null. We just checked that + // it's not null, so it must be valid. + let prev = unsafe { (*next).prev }; + // SAFETY: Pointers in a linked list are never dangling, and the caller just gave us + // ownership of the fields on `item`. + // INVARIANT: This correctly inserts `item` between `prev` and `next`. + unsafe { + (*item).next = next; + (*item).prev = prev; + (*prev).next = item; + (*next).prev = item; + } + } + } + + /// Add the provided item to the front of the list. + pub fn push_front(&mut self, item: ListArc) { + let raw_item = ListArc::into_raw(item); + // SAFETY: + // * We just got `raw_item` from a `ListArc`, so it's in an `Arc`. + // * If this requirement is violated, then the previous caller of `prepare_to_insert` + // violated the safety requirement that they can't give up ownership of the `ListArc` + // until they call `post_remove`. + // * We own the `ListArc`. + // * Removing items] from this list is always done using `remove_internal_inner`, which + // calls `post_remove` before giving up ownership. + let list_links = unsafe { T::prepare_to_insert(raw_item) }; + // SAFETY: We have not yet called `post_remove`, so `list_links` is still valid. + let item = unsafe { ListLinks::fields(list_links) }; + + if self.first.is_null() { + // SAFETY: The caller just gave us ownership of these fields. + // INVARIANT: A linked list with one item should be cyclic. + unsafe { + (*item).next = item; + (*item).prev = item; + } + } else { + let next = self.first; + // SAFETY: We just checked that `next` is non-null. + let prev = unsafe { (*next).prev }; + // SAFETY: Pointers in a linked list are never dangling, and the caller just gave us + // ownership of the fields on `item`. + // INVARIANT: This correctly inserts `item` between `prev` and `next`. + unsafe { + (*item).next = next; + (*item).prev = prev; + (*prev).next = item; + (*next).prev = item; + } + } + self.first = item; + } + + /// Removes the last item from this list. + pub fn pop_back(&mut self) -> Option> { + if self.first.is_null() { + return None; + } + + // SAFETY: We just checked that the list is not empty. + let last = unsafe { (*self.first).prev }; + // SAFETY: The last item of this list is in this list. + Some(unsafe { self.remove_internal(last) }) + } + + /// Removes the first item from this list. + pub fn pop_front(&mut self) -> Option> { + if self.first.is_null() { + return None; + } + + // SAFETY: The first item of this list is in this list. + Some(unsafe { self.remove_internal(self.first) }) + } + + /// Removes the provided item from this list and returns it. + /// + /// This returns `None` if the item is not in the list. (Note that by the safety requirements, + /// this means that the item is not in any list.) + /// + /// # Safety + /// + /// `item` must not be in a different linked list (with the same id). + pub unsafe fn remove(&mut self, item: &T) -> Option> { + let mut item = unsafe { ListLinks::fields(T::view_links(item)) }; + // SAFETY: The user provided a reference, and reference are never dangling. + // + // As for why this is not a data race, there are two cases: + // + // * If `item` is not in any list, then these fields are read-only and null. + // * If `item` is in this list, then we have exclusive access to these fields since we + // have a mutable reference to the list. + // + // In either case, there's no race. + let ListLinksFields { next, prev } = unsafe { *item }; + + debug_assert_eq!(next.is_null(), prev.is_null()); + if !next.is_null() { + // This is really a no-op, but this ensures that `item` is a raw pointer that was + // obtained without going through a pointer->reference->pointer conversion roundtrip. + // This ensures that the list is valid under the more restrictive strict provenance + // ruleset. + // + // SAFETY: We just checked that `next` is not null, and it's not dangling by the + // list invariants. + unsafe { + debug_assert_eq!(item, (*next).prev); + item = (*next).prev; + } + + // SAFETY: We just checked that `item` is in a list, so the caller guarantees that it + // is in this list. The pointers are in the right order. + Some(unsafe { self.remove_internal_inner(item, next, prev) }) + } else { + None + } + } + + /// Removes the provided item from the list. + /// + /// # Safety + /// + /// `item` must point at an item in this list. + unsafe fn remove_internal(&mut self, item: *mut ListLinksFields) -> ListArc { + // SAFETY: The caller promises that this pointer is not dangling, and there's no data race + // since we have a mutable reference to the list containing `item`. + let ListLinksFields { next, prev } = unsafe { *item }; + // SAFETY: The pointers are ok and in the right order. + unsafe { self.remove_internal_inner(item, next, prev) } + } + + /// Removes the provided item from the list. + /// + /// # Safety + /// + /// The `item` pointer must point at an item in this list, and we must have `(*item).next == + /// next` and `(*item).prev == prev`. + unsafe fn remove_internal_inner( + &mut self, + item: *mut ListLinksFields, + next: *mut ListLinksFields, + prev: *mut ListLinksFields, + ) -> ListArc { + // SAFETY: We have exclusive access to the pointers of items in the list, and the prev/next + // pointers are always valid for items in a list. + // + // INVARIANT: There are three cases: + // * If the list has at least three items, then after removing the item, `prev` and `next` + // will be next to each other. + // * If the list has two items, then the remaining item will point at itself. + // * If the list has one item, then `next == prev == item`, so these writes have no + // effect. The list remains unchanged and `item` is still in the list for now. + unsafe { + (*next).prev = prev; + (*prev).next = next; + } + // SAFETY: We have exclusive access to items in the list. + // INVARIANT: `item` is being removed, so the pointers should be null. + unsafe { + (*item).prev = ptr::null_mut(); + (*item).next = ptr::null_mut(); + } + // INVARIANT: There are three cases: + // * If `item` was not the first item, then `self.first` should remain unchanged. + // * If `item` was the first item and there is another item, then we just updated + // `prev->next` to `next`, which is the new first item, and setting `item->next` to null + // did not modify `prev->next`. + // * If `item` was the only item in the list, then `prev == item`, and we just set + // `item->next` to null, so this correctly sets `first` to null now that the list is + // empty. + if self.first == item { + // SAFETY: The `prev` pointer is the value that `item->prev` had when it was in this + // list, so it must be valid. There is no race since `prev` is still in the list and we + // still have exclusive access to the list. + self.first = unsafe { (*prev).next }; + } + + // SAFETY: `item` used to be in the list, so it is dereferenceable by the type invariants + // of `List`. + let list_links = unsafe { ListLinks::from_fields(item) }; + // SAFETY: Any pointer in the list originates from a `prepare_to_insert` call. + let raw_item = unsafe { T::post_remove(list_links) }; + // SAFETY: The above call to `post_remove` guarantees that we can recreate the `ListArc`. + unsafe { ListArc::from_raw(raw_item) } + } + + /// Moves all items from `other` into `self`. + /// + /// The items of `other` are added to the back of `self`, so the last item of `other` becomes + /// the last item of `self`. + pub fn push_all_back(&mut self, other: &mut List) { + // First, we insert the elements into `self`. At the end, we make `other` empty. + if self.is_empty() { + // INVARIANT: All of the elements in `other` become elements of `self`. + self.first = other.first; + } else if !other.is_empty() { + let other_first = other.first; + // SAFETY: The other list is not empty, so this pointer is valid. + let other_last = unsafe { (*other_first).prev }; + let self_first = self.first; + // SAFETY: The self list is not empty, so this pointer is valid. + let self_last = unsafe { (*self_first).prev }; + + // SAFETY: We have exclusive access to both lists, so we can update the pointers. + // INVARIANT: This correctly sets the pointers to merge both lists. We do not need to + // update `self.first` because the first element of `self` does not change. + unsafe { + (*self_first).prev = other_last; + (*other_last).next = self_first; + (*self_last).next = other_first; + (*other_first).prev = self_last; + } + } + + // INVARIANT: The other list is now empty, so update its pointer. + other.first = ptr::null_mut(); + } + + /// Returns a cursor to the first element of the list. + /// + /// If the list is empty, this returns `None`. + pub fn cursor_front(&mut self) -> Option> { + if self.first.is_null() { + None + } else { + Some(Cursor { + current: self.first, + list: self, + }) + } + } + + /// Creates an iterator over the list. + pub fn iter(&self) -> Iter<'_, T, ID> { + // INVARIANT: If the list is empty, both pointers are null. Otherwise, both pointers point + // at the first element of the same list. + Iter { + current: self.first, + stop: self.first, + _ty: PhantomData, + } + } +} + +impl, const ID: u64> Default for List { + fn default() -> Self { + List::new() + } +} + +impl, const ID: u64> Drop for List { + fn drop(&mut self) { + while let Some(item) = self.pop_front() { + drop(item); + } + } +} + +/// An iterator over a [`List`]. +/// +/// # Invariants +/// +/// * There must be a [`List`] that is immutably borrowed for the duration of `'a`. +/// * The `current` pointer is null or points at a value in that [`List`]. +/// * The `stop` pointer is equal to the `first` field of that [`List`]. +#[derive(Clone)] +pub struct Iter<'a, T: ?Sized + ListItem, const ID: u64 = 0> { + current: *mut ListLinksFields, + stop: *mut ListLinksFields, + _ty: PhantomData<&'a ListArc>, +} + +impl<'a, T: ?Sized + ListItem, const ID: u64> Iterator for Iter<'a, T, ID> { + type Item = ArcBorrow<'a, T>; + + fn next(&mut self) -> Option> { + if self.current.is_null() { + return None; + } + + let current = self.current; + + // SAFETY: We just checked that `current` is not null, so it is in a list, and hence not + // dangling. There's no race because the iterator holds an immutable borrow to the list. + let next = unsafe { (*current).next }; + // INVARIANT: If `current` was the last element of the list, then this updates it to null. + // Otherwise, we update it to the next element. + self.current = if next != self.stop { + next + } else { + ptr::null_mut() + }; + + // SAFETY: The `current` pointer points at a value in the list. + let item = unsafe { T::view_value(ListLinks::from_fields(current)) }; + // SAFETY: + // * All values in a list are stored in an `Arc`. + // * The value cannot be removed from the list for the duration of the lifetime annotated + // on the returned `ArcBorrow`, because removing it from the list would require mutable + // access to the list. However, the `ArcBorrow` is annotated with the iterator's + // lifetime, and the list is immutably borrowed for that lifetime. + // * Values in a list never have a `UniqueArc` reference. + Some(unsafe { ArcBorrow::from_raw(item) }) + } +} + +/// A cursor into a [`List`]. +/// +/// # Invariants +/// +/// The `current` pointer points a value in `list`. +pub struct Cursor<'a, T: ?Sized + ListItem, const ID: u64 = 0> { + current: *mut ListLinksFields, + list: &'a mut List, +} + +impl<'a, T: ?Sized + ListItem, const ID: u64> Cursor<'a, T, ID> { + /// Access the current element of this cursor. + pub fn current(&self) -> ArcBorrow<'_, T> { + // SAFETY: The `current` pointer points a value in the list. + let me = unsafe { T::view_value(ListLinks::from_fields(self.current)) }; + // SAFETY: + // * All values in a list are stored in an `Arc`. + // * The value cannot be removed from the list for the duration of the lifetime annotated + // on the returned `ArcBorrow`, because removing it from the list would require mutable + // access to the cursor or the list. However, the `ArcBorrow` holds an immutable borrow + // on the cursor, which in turn holds a mutable borrow on the list, so any such + // mutable access requires first releasing the immutable borrow on the cursor. + // * Values in a list never have a `UniqueArc` reference, because the list has a `ListArc` + // reference, and `UniqueArc` references must be unique. + unsafe { ArcBorrow::from_raw(me) } + } + + /// Move the cursor to the next element. + pub fn next(self) -> Option> { + // SAFETY: The `current` field is always in a list. + let next = unsafe { (*self.current).next }; + + if next == self.list.first { + None + } else { + // INVARIANT: Since `self.current` is in the `list`, its `next` pointer is also in the + // `list`. + Some(Cursor { + current: next, + list: self.list, + }) + } + } + + /// Move the cursor to the previous element. + pub fn prev(self) -> Option> { + // SAFETY: The `current` field is always in a list. + let prev = unsafe { (*self.current).prev }; + + if self.current == self.list.first { + None + } else { + // INVARIANT: Since `self.current` is in the `list`, its `prev` pointer is also in the + // `list`. + Some(Cursor { + current: prev, + list: self.list, + }) + } + } + + /// Remove the current element from the list. + pub fn remove(self) -> ListArc { + // SAFETY: The `current` pointer always points at a member of the list. + unsafe { self.list.remove_internal(self.current) } + } +} + +impl<'a, T: ?Sized + ListItem, const ID: u64> FusedIterator for Iter<'a, T, ID> {} + +impl<'a, T: ?Sized + ListItem, const ID: u64> IntoIterator for &'a List { + type IntoIter = Iter<'a, T, ID>; + type Item = ArcBorrow<'a, T>; + + fn into_iter(self) -> Iter<'a, T, ID> { + self.iter() + } +} + +/// An owning iterator into a [`List`]. +pub struct IntoIter, const ID: u64 = 0> { + list: List, +} + +impl, const ID: u64> Iterator for IntoIter { + type Item = ListArc; + + fn next(&mut self) -> Option> { + self.list.pop_front() + } +} + +impl, const ID: u64> FusedIterator for IntoIter {} + +impl, const ID: u64> DoubleEndedIterator for IntoIter { + fn next_back(&mut self) -> Option> { + self.list.pop_back() + } +} + +impl, const ID: u64> IntoIterator for List { + type IntoIter = IntoIter; + type Item = ListArc; + + fn into_iter(self) -> IntoIter { + IntoIter { list: self } + } +} diff --git a/rust/kernel/list/arc.rs b/rust/kernel/list/arc.rs new file mode 100644 index 000000000000..d801b9dc6291 --- /dev/null +++ b/rust/kernel/list/arc.rs @@ -0,0 +1,521 @@ +// SPDX-License-Identifier: GPL-2.0 + +// Copyright (C) 2024 Google LLC. + +//! A wrapper around `Arc` for linked lists. + +use crate::alloc::{AllocError, Flags}; +use crate::prelude::*; +use crate::sync::{Arc, ArcBorrow, UniqueArc}; +use core::marker::{PhantomPinned, Unsize}; +use core::ops::Deref; +use core::pin::Pin; +use core::sync::atomic::{AtomicBool, Ordering}; + +/// Declares that this type has some way to ensure that there is exactly one `ListArc` instance for +/// this id. +/// +/// Types that implement this trait should include some kind of logic for keeping track of whether +/// a [`ListArc`] exists or not. We refer to this logic as "the tracking inside `T`". +/// +/// We allow the case where the tracking inside `T` thinks that a [`ListArc`] exists, but actually, +/// there isn't a [`ListArc`]. However, we do not allow the opposite situation where a [`ListArc`] +/// exists, but the tracking thinks it doesn't. This is because the former can at most result in us +/// failing to create a [`ListArc`] when the operation could succeed, whereas the latter can result +/// in the creation of two [`ListArc`] references. Only the latter situation can lead to memory +/// safety issues. +/// +/// A consequence of the above is that you may implement the tracking inside `T` by not actually +/// keeping track of anything. To do this, you always claim that a [`ListArc`] exists, even if +/// there isn't one. This implementation is allowed by the above rule, but it means that +/// [`ListArc`] references can only be created if you have ownership of *all* references to the +/// refcounted object, as you otherwise have no way of knowing whether a [`ListArc`] exists. +pub trait ListArcSafe { + /// Informs the tracking inside this type that it now has a [`ListArc`] reference. + /// + /// This method may be called even if the tracking inside this type thinks that a `ListArc` + /// reference exists. (But only if that's not actually the case.) + /// + /// # Safety + /// + /// Must not be called if a [`ListArc`] already exist for this value. + unsafe fn on_create_list_arc_from_unique(self: Pin<&mut Self>); + + /// Informs the tracking inside this type that there is no [`ListArc`] reference anymore. + /// + /// # Safety + /// + /// Must only be called if there is no [`ListArc`] reference, but the tracking thinks there is. + unsafe fn on_drop_list_arc(&self); +} + +/// Declares that this type is able to safely attempt to create `ListArc`s at any time. +/// +/// # Safety +/// +/// The guarantees of `try_new_list_arc` must be upheld. +pub unsafe trait TryNewListArc: ListArcSafe { + /// Attempts to convert an `Arc` into an `ListArc`. Returns `true` if the + /// conversion was successful. + /// + /// This method should not be called directly. Use [`ListArc::try_from_arc`] instead. + /// + /// # Guarantees + /// + /// If this call returns `true`, then there is no [`ListArc`] pointing to this value. + /// Additionally, this call will have transitioned the tracking inside `Self` from not thinking + /// that a [`ListArc`] exists, to thinking that a [`ListArc`] exists. + fn try_new_list_arc(&self) -> bool; +} + +/// Declares that this type supports [`ListArc`]. +/// +/// This macro supports a few different strategies for implementing the tracking inside the type: +/// +/// * The `untracked` strategy does not actually keep track of whether a [`ListArc`] exists. When +/// using this strategy, the only way to create a [`ListArc`] is using a [`UniqueArc`]. +/// * The `tracked_by` strategy defers the tracking to a field of the struct. The user much specify +/// which field to defer the tracking to. The field must implement [`ListArcSafe`]. If the field +/// implements [`TryNewListArc`], then the type will also implement [`TryNewListArc`]. +/// +/// The `tracked_by` strategy is usually used by deferring to a field of type +/// [`AtomicTracker`]. However, it is also possible to defer the tracking to another struct +/// using also using this macro. +#[macro_export] +macro_rules! impl_list_arc_safe { + (impl$({$($generics:tt)*})? ListArcSafe<$num:tt> for $t:ty { untracked; } $($rest:tt)*) => { + impl$(<$($generics)*>)? $crate::list::ListArcSafe<$num> for $t { + unsafe fn on_create_list_arc_from_unique(self: ::core::pin::Pin<&mut Self>) {} + unsafe fn on_drop_list_arc(&self) {} + } + $crate::list::impl_list_arc_safe! { $($rest)* } + }; + + (impl$({$($generics:tt)*})? ListArcSafe<$num:tt> for $t:ty { + tracked_by $field:ident : $fty:ty; + } $($rest:tt)*) => { + impl$(<$($generics)*>)? $crate::list::ListArcSafe<$num> for $t { + unsafe fn on_create_list_arc_from_unique(self: ::core::pin::Pin<&mut Self>) { + $crate::assert_pinned!($t, $field, $fty, inline); + + // SAFETY: This field is structurally pinned as per the above assertion. + let field = unsafe { + ::core::pin::Pin::map_unchecked_mut(self, |me| &mut me.$field) + }; + // SAFETY: The caller promises that there is no `ListArc`. + unsafe { + <$fty as $crate::list::ListArcSafe<$num>>::on_create_list_arc_from_unique(field) + }; + } + unsafe fn on_drop_list_arc(&self) { + // SAFETY: The caller promises that there is no `ListArc` reference, and also + // promises that the tracking thinks there is a `ListArc` reference. + unsafe { <$fty as $crate::list::ListArcSafe<$num>>::on_drop_list_arc(&self.$field) }; + } + } + unsafe impl$(<$($generics)*>)? $crate::list::TryNewListArc<$num> for $t + where + $fty: TryNewListArc<$num>, + { + fn try_new_list_arc(&self) -> bool { + <$fty as $crate::list::TryNewListArc<$num>>::try_new_list_arc(&self.$field) + } + } + $crate::list::impl_list_arc_safe! { $($rest)* } + }; + + () => {}; +} +pub use impl_list_arc_safe; + +/// A wrapper around [`Arc`] that's guaranteed unique for the given id. +/// +/// The `ListArc` type can be thought of as a special reference to a refcounted object that owns the +/// permission to manipulate the `next`/`prev` pointers stored in the refcounted object. By ensuring +/// that each object has only one `ListArc` reference, the owner of that reference is assured +/// exclusive access to the `next`/`prev` pointers. When a `ListArc` is inserted into a [`List`], +/// the [`List`] takes ownership of the `ListArc` reference. +/// +/// There are various strategies to ensuring that a value has only one `ListArc` reference. The +/// simplest is to convert a [`UniqueArc`] into a `ListArc`. However, the refcounted object could +/// also keep track of whether a `ListArc` exists using a boolean, which could allow for the +/// creation of new `ListArc` references from an [`Arc`] reference. Whatever strategy is used, the +/// relevant tracking is referred to as "the tracking inside `T`", and the [`ListArcSafe`] trait +/// (and its subtraits) are used to update the tracking when a `ListArc` is created or destroyed. +/// +/// Note that we allow the case where the tracking inside `T` thinks that a `ListArc` exists, but +/// actually, there isn't a `ListArc`. However, we do not allow the opposite situation where a +/// `ListArc` exists, but the tracking thinks it doesn't. This is because the former can at most +/// result in us failing to create a `ListArc` when the operation could succeed, whereas the latter +/// can result in the creation of two `ListArc` references. +/// +/// While this `ListArc` is unique for the given id, there still might exist normal `Arc` +/// references to the object. +/// +/// # Invariants +/// +/// * Each reference counted object has at most one `ListArc` for each value of `ID`. +/// * The tracking inside `T` is aware that a `ListArc` reference exists. +/// +/// [`List`]: crate::list::List +#[repr(transparent)] +pub struct ListArc +where + T: ListArcSafe + ?Sized, +{ + arc: Arc, +} + +impl, const ID: u64> ListArc { + /// Constructs a new reference counted instance of `T`. + #[inline] + pub fn new(contents: T, flags: Flags) -> Result { + Ok(Self::from(UniqueArc::new(contents, flags)?)) + } + + /// Use the given initializer to in-place initialize a `T`. + /// + /// If `T: !Unpin` it will not be able to move afterwards. + // We don't implement `InPlaceInit` because `ListArc` is implicitly pinned. This is similar to + // what we do for `Arc`. + #[inline] + pub fn pin_init(init: impl PinInit, flags: Flags) -> Result + where + E: From, + { + Ok(Self::from(UniqueArc::try_pin_init(init, flags)?)) + } + + /// Use the given initializer to in-place initialize a `T`. + /// + /// This is equivalent to [`ListArc::pin_init`], since a [`ListArc`] is always pinned. + #[inline] + pub fn init(init: impl Init, flags: Flags) -> Result + where + E: From, + { + Ok(Self::from(UniqueArc::try_init(init, flags)?)) + } +} + +impl From> for ListArc +where + T: ListArcSafe + ?Sized, +{ + /// Convert a [`UniqueArc`] into a [`ListArc`]. + #[inline] + fn from(unique: UniqueArc) -> Self { + Self::from(Pin::from(unique)) + } +} + +impl From>> for ListArc +where + T: ListArcSafe + ?Sized, +{ + /// Convert a pinned [`UniqueArc`] into a [`ListArc`]. + #[inline] + fn from(mut unique: Pin>) -> Self { + // SAFETY: We have a `UniqueArc`, so there is no `ListArc`. + unsafe { T::on_create_list_arc_from_unique(unique.as_mut()) }; + let arc = Arc::from(unique); + // SAFETY: We just called `on_create_list_arc_from_unique` on an arc without a `ListArc`, + // so we can create a `ListArc`. + unsafe { Self::transmute_from_arc(arc) } + } +} + +impl ListArc +where + T: ListArcSafe + ?Sized, +{ + /// Creates two `ListArc`s from a [`UniqueArc`]. + /// + /// The two ids must be different. + #[inline] + pub fn pair_from_unique(unique: UniqueArc) -> (Self, ListArc) + where + T: ListArcSafe, + { + Self::pair_from_pin_unique(Pin::from(unique)) + } + + /// Creates two `ListArc`s from a pinned [`UniqueArc`]. + /// + /// The two ids must be different. + #[inline] + pub fn pair_from_pin_unique( + mut unique: Pin>, + ) -> (Self, ListArc) + where + T: ListArcSafe, + { + build_assert!(ID != ID2); + + // SAFETY: We have a `UniqueArc`, so there is no `ListArc`. + unsafe { >::on_create_list_arc_from_unique(unique.as_mut()) }; + // SAFETY: We have a `UniqueArc`, so there is no `ListArc`. + unsafe { >::on_create_list_arc_from_unique(unique.as_mut()) }; + + let arc1 = Arc::from(unique); + let arc2 = Arc::clone(&arc1); + + // SAFETY: We just called `on_create_list_arc_from_unique` on an arc without a `ListArc` + // for both IDs (which are different), so we can create two `ListArc`s. + unsafe { + ( + Self::transmute_from_arc(arc1), + ListArc::transmute_from_arc(arc2), + ) + } + } + + /// Try to create a new `ListArc`. + /// + /// This fails if this value already has a `ListArc`. + pub fn try_from_arc(arc: Arc) -> Result> + where + T: TryNewListArc, + { + if arc.try_new_list_arc() { + // SAFETY: The `try_new_list_arc` method returned true, so we made the tracking think + // that a `ListArc` exists. This lets us create a `ListArc`. + Ok(unsafe { Self::transmute_from_arc(arc) }) + } else { + Err(arc) + } + } + + /// Try to create a new `ListArc`. + /// + /// This fails if this value already has a `ListArc`. + pub fn try_from_arc_borrow(arc: ArcBorrow<'_, T>) -> Option + where + T: TryNewListArc, + { + if arc.try_new_list_arc() { + // SAFETY: The `try_new_list_arc` method returned true, so we made the tracking think + // that a `ListArc` exists. This lets us create a `ListArc`. + Some(unsafe { Self::transmute_from_arc(Arc::from(arc)) }) + } else { + None + } + } + + /// Try to create a new `ListArc`. + /// + /// If it's not possible to create a new `ListArc`, then the `Arc` is dropped. This will never + /// run the destructor of the value. + pub fn try_from_arc_or_drop(arc: Arc) -> Option + where + T: TryNewListArc, + { + match Self::try_from_arc(arc) { + Ok(list_arc) => Some(list_arc), + Err(arc) => Arc::into_unique_or_drop(arc).map(Self::from), + } + } + + /// Transmutes an [`Arc`] into a `ListArc` without updating the tracking inside `T`. + /// + /// # Safety + /// + /// * The value must not already have a `ListArc` reference. + /// * The tracking inside `T` must think that there is a `ListArc` reference. + #[inline] + unsafe fn transmute_from_arc(arc: Arc) -> Self { + // INVARIANT: By the safety requirements, the invariants on `ListArc` are satisfied. + Self { arc } + } + + /// Transmutes a `ListArc` into an [`Arc`] without updating the tracking inside `T`. + /// + /// After this call, the tracking inside `T` will still think that there is a `ListArc` + /// reference. + #[inline] + fn transmute_to_arc(self) -> Arc { + // Use a transmute to skip destructor. + // + // SAFETY: ListArc is repr(transparent). + unsafe { core::mem::transmute(self) } + } + + /// Convert ownership of this `ListArc` into a raw pointer. + /// + /// The returned pointer is indistinguishable from pointers returned by [`Arc::into_raw`]. The + /// tracking inside `T` will still think that a `ListArc` exists after this call. + #[inline] + pub fn into_raw(self) -> *const T { + Arc::into_raw(Self::transmute_to_arc(self)) + } + + /// Take ownership of the `ListArc` from a raw pointer. + /// + /// # Safety + /// + /// * `ptr` must satisfy the safety requirements of [`Arc::from_raw`]. + /// * The value must not already have a `ListArc` reference. + /// * The tracking inside `T` must think that there is a `ListArc` reference. + #[inline] + pub unsafe fn from_raw(ptr: *const T) -> Self { + // SAFETY: The pointer satisfies the safety requirements for `Arc::from_raw`. + let arc = unsafe { Arc::from_raw(ptr) }; + // SAFETY: The value doesn't already have a `ListArc` reference, but the tracking thinks it + // does. + unsafe { Self::transmute_from_arc(arc) } + } + + /// Converts the `ListArc` into an [`Arc`]. + #[inline] + pub fn into_arc(self) -> Arc { + let arc = Self::transmute_to_arc(self); + // SAFETY: There is no longer a `ListArc`, but the tracking thinks there is. + unsafe { T::on_drop_list_arc(&arc) }; + arc + } + + /// Clone a `ListArc` into an [`Arc`]. + #[inline] + pub fn clone_arc(&self) -> Arc { + self.arc.clone() + } + + /// Returns a reference to an [`Arc`] from the given [`ListArc`]. + /// + /// This is useful when the argument of a function call is an [`&Arc`] (e.g., in a method + /// receiver), but we have a [`ListArc`] instead. + /// + /// [`&Arc`]: Arc + #[inline] + pub fn as_arc(&self) -> &Arc { + &self.arc + } + + /// Returns an [`ArcBorrow`] from the given [`ListArc`]. + /// + /// This is useful when the argument of a function call is an [`ArcBorrow`] (e.g., in a method + /// receiver), but we have an [`Arc`] instead. Getting an [`ArcBorrow`] is free when optimised. + #[inline] + pub fn as_arc_borrow(&self) -> ArcBorrow<'_, T> { + self.arc.as_arc_borrow() + } + + /// Compare whether two [`ListArc`] pointers reference the same underlying object. + #[inline] + pub fn ptr_eq(this: &Self, other: &Self) -> bool { + Arc::ptr_eq(&this.arc, &other.arc) + } +} + +impl Deref for ListArc +where + T: ListArcSafe + ?Sized, +{ + type Target = T; + + #[inline] + fn deref(&self) -> &Self::Target { + self.arc.deref() + } +} + +impl Drop for ListArc +where + T: ListArcSafe + ?Sized, +{ + #[inline] + fn drop(&mut self) { + // SAFETY: There is no longer a `ListArc`, but the tracking thinks there is by the type + // invariants on `Self`. + unsafe { T::on_drop_list_arc(&self.arc) }; + } +} + +impl AsRef> for ListArc +where + T: ListArcSafe + ?Sized, +{ + #[inline] + fn as_ref(&self) -> &Arc { + self.as_arc() + } +} + +// This is to allow [`ListArc`] (and variants) to be used as the type of `self`. +impl core::ops::Receiver for ListArc where T: ListArcSafe + ?Sized {} + +// This is to allow coercion from `ListArc` to `ListArc` if `T` can be converted to the +// dynamically-sized type (DST) `U`. +impl core::ops::CoerceUnsized> for ListArc +where + T: ListArcSafe + Unsize + ?Sized, + U: ListArcSafe + ?Sized, +{ +} + +// This is to allow `ListArc` to be dispatched on when `ListArc` can be coerced into +// `ListArc`. +impl core::ops::DispatchFromDyn> for ListArc +where + T: ListArcSafe + Unsize + ?Sized, + U: ListArcSafe + ?Sized, +{ +} + +/// A utility for tracking whether a [`ListArc`] exists using an atomic. +/// +/// # Invariant +/// +/// If the boolean is `false`, then there is no [`ListArc`] for this value. +#[repr(transparent)] +pub struct AtomicTracker { + inner: AtomicBool, + // This value needs to be pinned to justify the INVARIANT: comment in `AtomicTracker::new`. + _pin: PhantomPinned, +} + +impl AtomicTracker { + /// Creates a new initializer for this type. + pub fn new() -> impl PinInit { + // INVARIANT: Pin-init initializers can't be used on an existing `Arc`, so this value will + // not be constructed in an `Arc` that already has a `ListArc`. + Self { + inner: AtomicBool::new(false), + _pin: PhantomPinned, + } + } + + fn project_inner(self: Pin<&mut Self>) -> &mut AtomicBool { + // SAFETY: The `inner` field is not structurally pinned, so we may obtain a mutable + // reference to it even if we only have a pinned reference to `self`. + unsafe { &mut Pin::into_inner_unchecked(self).inner } + } +} + +impl ListArcSafe for AtomicTracker { + unsafe fn on_create_list_arc_from_unique(self: Pin<&mut Self>) { + // INVARIANT: We just created a ListArc, so the boolean should be true. + *self.project_inner().get_mut() = true; + } + + unsafe fn on_drop_list_arc(&self) { + // INVARIANT: We just dropped a ListArc, so the boolean should be false. + self.inner.store(false, Ordering::Release); + } +} + +// SAFETY: If this method returns `true`, then by the type invariant there is no `ListArc` before +// this call, so it is okay to create a new `ListArc`. +// +// The acquire ordering will synchronize with the release store from the destruction of any +// previous `ListArc`, so if there was a previous `ListArc`, then the destruction of the previous +// `ListArc` happens-before the creation of the new `ListArc`. +unsafe impl TryNewListArc for AtomicTracker { + fn try_new_list_arc(&self) -> bool { + // INVARIANT: If this method returns true, then the boolean used to be false, and is no + // longer false, so it is okay for the caller to create a new [`ListArc`]. + self.inner + .compare_exchange(false, true, Ordering::Acquire, Ordering::Relaxed) + .is_ok() + } +} diff --git a/rust/kernel/list/arc_field.rs b/rust/kernel/list/arc_field.rs new file mode 100644 index 000000000000..2330f673427a --- /dev/null +++ b/rust/kernel/list/arc_field.rs @@ -0,0 +1,96 @@ +// SPDX-License-Identifier: GPL-2.0 + +// Copyright (C) 2024 Google LLC. + +//! A field that is exclusively owned by a [`ListArc`]. +//! +//! This can be used to have reference counted struct where one of the reference counted pointers +//! has exclusive access to a field of the struct. +//! +//! [`ListArc`]: crate::list::ListArc + +use core::cell::UnsafeCell; + +/// A field owned by a specific [`ListArc`]. +/// +/// [`ListArc`]: crate::list::ListArc +pub struct ListArcField { + value: UnsafeCell, +} + +// SAFETY: If the inner type is thread-safe, then it's also okay for `ListArc` to be thread-safe. +unsafe impl Send for ListArcField {} +// SAFETY: If the inner type is thread-safe, then it's also okay for `ListArc` to be thread-safe. +unsafe impl Sync for ListArcField {} + +impl ListArcField { + /// Creates a new `ListArcField`. + pub fn new(value: T) -> Self { + Self { + value: UnsafeCell::new(value), + } + } + + /// Access the value when we have exclusive access to the `ListArcField`. + /// + /// This allows access to the field using an `UniqueArc` instead of a `ListArc`. + pub fn get_mut(&mut self) -> &mut T { + self.value.get_mut() + } + + /// Unsafely assert that you have shared access to the `ListArc` for this field. + /// + /// # Safety + /// + /// The caller must have shared access to the `ListArc` containing the struct with this + /// field for the duration of the returned reference. + pub unsafe fn assert_ref(&self) -> &T { + // SAFETY: The caller has shared access to the `ListArc`, so they also have shared access + // to this field. + unsafe { &*self.value.get() } + } + + /// Unsafely assert that you have mutable access to the `ListArc` for this field. + /// + /// # Safety + /// + /// The caller must have mutable access to the `ListArc` containing the struct with this + /// field for the duration of the returned reference. + #[allow(clippy::mut_from_ref)] + pub unsafe fn assert_mut(&self) -> &mut T { + // SAFETY: The caller has exclusive access to the `ListArc`, so they also have exclusive + // access to this field. + unsafe { &mut *self.value.get() } + } +} + +/// Defines getters for a [`ListArcField`]. +#[macro_export] +macro_rules! define_list_arc_field_getter { + ($pub:vis fn $name:ident(&self $(<$id:tt>)?) -> &$typ:ty { $field:ident } + $($rest:tt)* + ) => { + $pub fn $name<'a>(self: &'a $crate::list::ListArc) -> &'a $typ { + let field = &(&**self).$field; + // SAFETY: We have a shared reference to the `ListArc`. + unsafe { $crate::list::ListArcField::<$typ $(, $id)?>::assert_ref(field) } + } + + $crate::list::define_list_arc_field_getter!($($rest)*); + }; + + ($pub:vis fn $name:ident(&mut self $(<$id:tt>)?) -> &mut $typ:ty { $field:ident } + $($rest:tt)* + ) => { + $pub fn $name<'a>(self: &'a mut $crate::list::ListArc) -> &'a mut $typ { + let field = &(&**self).$field; + // SAFETY: We have a mutable reference to the `ListArc`. + unsafe { $crate::list::ListArcField::<$typ $(, $id)?>::assert_mut(field) } + } + + $crate::list::define_list_arc_field_getter!($($rest)*); + }; + + () => {}; +} +pub use define_list_arc_field_getter; diff --git a/rust/kernel/list/impl_list_item_mod.rs b/rust/kernel/list/impl_list_item_mod.rs new file mode 100644 index 000000000000..a0438537cee1 --- /dev/null +++ b/rust/kernel/list/impl_list_item_mod.rs @@ -0,0 +1,274 @@ +// SPDX-License-Identifier: GPL-2.0 + +// Copyright (C) 2024 Google LLC. + +//! Helpers for implementing list traits safely. + +use crate::list::ListLinks; + +/// Declares that this type has a `ListLinks` field at a fixed offset. +/// +/// This trait is only used to help implement `ListItem` safely. If `ListItem` is implemented +/// manually, then this trait is not needed. Use the [`impl_has_list_links!`] macro to implement +/// this trait. +/// +/// # Safety +/// +/// All values of this type must have a `ListLinks` field at the given offset. +/// +/// The behavior of `raw_get_list_links` must not be changed. +pub unsafe trait HasListLinks { + /// The offset of the `ListLinks` field. + const OFFSET: usize; + + /// Returns a pointer to the [`ListLinks`] field. + /// + /// # Safety + /// + /// The provided pointer must point at a valid struct of type `Self`. + /// + /// [`ListLinks`]: ListLinks + // We don't really need this method, but it's necessary for the implementation of + // `impl_has_list_links!` to be correct. + #[inline] + unsafe fn raw_get_list_links(ptr: *mut Self) -> *mut ListLinks { + // SAFETY: The caller promises that the pointer is valid. The implementer promises that the + // `OFFSET` constant is correct. + unsafe { (ptr as *mut u8).add(Self::OFFSET) as *mut ListLinks } + } +} + +/// Implements the [`HasListLinks`] trait for the given type. +#[macro_export] +macro_rules! impl_has_list_links { + ($(impl$(<$($implarg:ident),*>)? + HasListLinks$(<$id:tt>)? + for $self:ident $(<$($selfarg:ty),*>)? + { self$(.$field:ident)* } + )*) => {$( + // SAFETY: The implementation of `raw_get_list_links` only compiles if the field has the + // right type. + // + // The behavior of `raw_get_list_links` is not changed since the `addr_of_mut!` macro is + // equivalent to the pointer offset operation in the trait definition. + unsafe impl$(<$($implarg),*>)? $crate::list::HasListLinks$(<$id>)? for + $self $(<$($selfarg),*>)? + { + const OFFSET: usize = ::core::mem::offset_of!(Self, $($field).*) as usize; + + #[inline] + unsafe fn raw_get_list_links(ptr: *mut Self) -> *mut $crate::list::ListLinks$(<$id>)? { + // SAFETY: The caller promises that the pointer is not dangling. We know that this + // expression doesn't follow any pointers, as the `offset_of!` invocation above + // would otherwise not compile. + unsafe { ::core::ptr::addr_of_mut!((*ptr)$(.$field)*) } + } + } + )*}; +} +pub use impl_has_list_links; + +/// Declares that the `ListLinks` field in this struct is inside a `ListLinksSelfPtr`. +/// +/// # Safety +/// +/// The `ListLinks` field of this struct at the offset `HasListLinks::OFFSET` must be +/// inside a `ListLinksSelfPtr`. +pub unsafe trait HasSelfPtr +where + Self: HasListLinks, +{ +} + +/// Implements the [`HasListLinks`] and [`HasSelfPtr`] traits for the given type. +#[macro_export] +macro_rules! impl_has_list_links_self_ptr { + ($(impl$({$($implarg:tt)*})? + HasSelfPtr<$item_type:ty $(, $id:tt)?> + for $self:ident $(<$($selfarg:ty),*>)? + { self.$field:ident } + )*) => {$( + // SAFETY: The implementation of `raw_get_list_links` only compiles if the field has the + // right type. + unsafe impl$(<$($implarg)*>)? $crate::list::HasSelfPtr<$item_type $(, $id)?> for + $self $(<$($selfarg),*>)? + {} + + unsafe impl$(<$($implarg)*>)? $crate::list::HasListLinks$(<$id>)? for + $self $(<$($selfarg),*>)? + { + const OFFSET: usize = ::core::mem::offset_of!(Self, $field) as usize; + + #[inline] + unsafe fn raw_get_list_links(ptr: *mut Self) -> *mut $crate::list::ListLinks$(<$id>)? { + // SAFETY: The caller promises that the pointer is not dangling. + let ptr: *mut $crate::list::ListLinksSelfPtr<$item_type $(, $id)?> = + unsafe { ::core::ptr::addr_of_mut!((*ptr).$field) }; + ptr.cast() + } + } + )*}; +} +pub use impl_has_list_links_self_ptr; + +/// Implements the [`ListItem`] trait for the given type. +/// +/// Requires that the type implements [`HasListLinks`]. Use the [`impl_has_list_links!`] macro to +/// implement that trait. +/// +/// [`ListItem`]: crate::list::ListItem +#[macro_export] +macro_rules! impl_list_item { + ( + $(impl$({$($generics:tt)*})? ListItem<$num:tt> for $t:ty { + using ListLinks; + })* + ) => {$( + // SAFETY: See GUARANTEES comment on each method. + unsafe impl$(<$($generics)*>)? $crate::list::ListItem<$num> for $t { + // GUARANTEES: + // * This returns the same pointer as `prepare_to_insert` because `prepare_to_insert` + // is implemented in terms of `view_links`. + // * By the type invariants of `ListLinks`, the `ListLinks` has two null pointers when + // this value is not in a list. + unsafe fn view_links(me: *const Self) -> *mut $crate::list::ListLinks<$num> { + // SAFETY: The caller guarantees that `me` points at a valid value of type `Self`. + unsafe { + >::raw_get_list_links(me.cast_mut()) + } + } + + // GUARANTEES: + // * `me` originates from the most recent call to `prepare_to_insert`, which just added + // `offset` to the pointer passed to `prepare_to_insert`. This method subtracts + // `offset` from `me` so it returns the pointer originally passed to + // `prepare_to_insert`. + // * The pointer remains valid until the next call to `post_remove` because the caller + // of the most recent call to `prepare_to_insert` promised to retain ownership of the + // `ListArc` containing `Self` until the next call to `post_remove`. The value cannot + // be destroyed while a `ListArc` reference exists. + unsafe fn view_value(me: *mut $crate::list::ListLinks<$num>) -> *const Self { + let offset = >::OFFSET; + // SAFETY: `me` originates from the most recent call to `prepare_to_insert`, so it + // points at the field at offset `offset` in a value of type `Self`. Thus, + // subtracting `offset` from `me` is still in-bounds of the allocation. + unsafe { (me as *const u8).sub(offset) as *const Self } + } + + // GUARANTEES: + // This implementation of `ListItem` will not give out exclusive access to the same + // `ListLinks` several times because calls to `prepare_to_insert` and `post_remove` + // must alternate and exclusive access is given up when `post_remove` is called. + // + // Other invocations of `impl_list_item!` also cannot give out exclusive access to the + // same `ListLinks` because you can only implement `ListItem` once for each value of + // `ID`, and the `ListLinks` fields only work with the specified `ID`. + unsafe fn prepare_to_insert(me: *const Self) -> *mut $crate::list::ListLinks<$num> { + // SAFETY: The caller promises that `me` points at a valid value. + unsafe { >::view_links(me) } + } + + // GUARANTEES: + // * `me` originates from the most recent call to `prepare_to_insert`, which just added + // `offset` to the pointer passed to `prepare_to_insert`. This method subtracts + // `offset` from `me` so it returns the pointer originally passed to + // `prepare_to_insert`. + unsafe fn post_remove(me: *mut $crate::list::ListLinks<$num>) -> *const Self { + let offset = >::OFFSET; + // SAFETY: `me` originates from the most recent call to `prepare_to_insert`, so it + // points at the field at offset `offset` in a value of type `Self`. Thus, + // subtracting `offset` from `me` is still in-bounds of the allocation. + unsafe { (me as *const u8).sub(offset) as *const Self } + } + } + )*}; + + ( + $(impl$({$($generics:tt)*})? ListItem<$num:tt> for $t:ty { + using ListLinksSelfPtr; + })* + ) => {$( + // SAFETY: See GUARANTEES comment on each method. + unsafe impl$(<$($generics)*>)? $crate::list::ListItem<$num> for $t { + // GUARANTEES: + // This implementation of `ListItem` will not give out exclusive access to the same + // `ListLinks` several times because calls to `prepare_to_insert` and `post_remove` + // must alternate and exclusive access is given up when `post_remove` is called. + // + // Other invocations of `impl_list_item!` also cannot give out exclusive access to the + // same `ListLinks` because you can only implement `ListItem` once for each value of + // `ID`, and the `ListLinks` fields only work with the specified `ID`. + unsafe fn prepare_to_insert(me: *const Self) -> *mut $crate::list::ListLinks<$num> { + // SAFETY: The caller promises that `me` points at a valid value of type `Self`. + let links_field = unsafe { >::view_links(me) }; + + let spoff = $crate::list::ListLinksSelfPtr::::LIST_LINKS_SELF_PTR_OFFSET; + // Goes via the offset as the field is private. + // + // SAFETY: The constant is equal to `offset_of!(ListLinksSelfPtr, self_ptr)`, so + // the pointer stays in bounds of the allocation. + let self_ptr = unsafe { (links_field as *const u8).add(spoff) } + as *const $crate::types::Opaque<*const Self>; + let cell_inner = $crate::types::Opaque::raw_get(self_ptr); + + // SAFETY: This value is not accessed in any other places than `prepare_to_insert`, + // `post_remove`, or `view_value`. By the safety requirements of those methods, + // none of these three methods may be called in parallel with this call to + // `prepare_to_insert`, so this write will not race with any other access to the + // value. + unsafe { ::core::ptr::write(cell_inner, me) }; + + links_field + } + + // GUARANTEES: + // * This returns the same pointer as `prepare_to_insert` because `prepare_to_insert` + // returns the return value of `view_links`. + // * By the type invariants of `ListLinks`, the `ListLinks` has two null pointers when + // this value is not in a list. + unsafe fn view_links(me: *const Self) -> *mut $crate::list::ListLinks<$num> { + // SAFETY: The caller promises that `me` points at a valid value of type `Self`. + unsafe { >::raw_get_list_links(me.cast_mut()) } + } + + // This function is also used as the implementation of `post_remove`, so the caller + // may choose to satisfy the safety requirements of `post_remove` instead of the safety + // requirements for `view_value`. + // + // GUARANTEES: (always) + // * This returns the same pointer as the one passed to the most recent call to + // `prepare_to_insert` since that call wrote that pointer to this location. The value + // is only modified in `prepare_to_insert`, so it has not been modified since the + // most recent call. + // + // GUARANTEES: (only when using the `view_value` safety requirements) + // * The pointer remains valid until the next call to `post_remove` because the caller + // of the most recent call to `prepare_to_insert` promised to retain ownership of the + // `ListArc` containing `Self` until the next call to `post_remove`. The value cannot + // be destroyed while a `ListArc` reference exists. + unsafe fn view_value(links_field: *mut $crate::list::ListLinks<$num>) -> *const Self { + let spoff = $crate::list::ListLinksSelfPtr::::LIST_LINKS_SELF_PTR_OFFSET; + // SAFETY: The constant is equal to `offset_of!(ListLinksSelfPtr, self_ptr)`, so + // the pointer stays in bounds of the allocation. + let self_ptr = unsafe { (links_field as *const u8).add(spoff) } + as *const ::core::cell::UnsafeCell<*const Self>; + let cell_inner = ::core::cell::UnsafeCell::raw_get(self_ptr); + // SAFETY: This is not a data race, because the only function that writes to this + // value is `prepare_to_insert`, but by the safety requirements the + // `prepare_to_insert` method may not be called in parallel with `view_value` or + // `post_remove`. + unsafe { ::core::ptr::read(cell_inner) } + } + + // GUARANTEES: + // The first guarantee of `view_value` is exactly what `post_remove` guarantees. + unsafe fn post_remove(me: *mut $crate::list::ListLinks<$num>) -> *const Self { + // SAFETY: This specific implementation of `view_value` allows the caller to + // promise the safety requirements of `post_remove` instead of the safety + // requirements for `view_value`. + unsafe { >::view_value(me) } + } + } + )*}; +} +pub use impl_list_item; diff --git a/rust/kernel/prelude.rs b/rust/kernel/prelude.rs index b37a0b3180fb..4571daec0961 100644 --- a/rust/kernel/prelude.rs +++ b/rust/kernel/prelude.rs @@ -37,6 +37,6 @@ pub use super::{str::CStr, ThisModule}; -pub use super::init::{InPlaceInit, Init, PinInit}; +pub use super::init::{InPlaceInit, InPlaceWrite, Init, PinInit}; pub use super::current; diff --git a/rust/kernel/print.rs b/rust/kernel/print.rs index a78aa3514a0a..508b0221256c 100644 --- a/rust/kernel/print.rs +++ b/rust/kernel/print.rs @@ -4,7 +4,7 @@ //! //! C header: [`include/linux/printk.h`](srctree/include/linux/printk.h) //! -//! Reference: +//! Reference: use core::{ ffi::{c_char, c_void}, @@ -197,7 +197,7 @@ macro_rules! print_macro ( /// Mimics the interface of [`std::print!`]. See [`core::fmt`] and /// `alloc::format!` for information about the formatting syntax. /// -/// [`pr_emerg`]: https://www.kernel.org/doc/html/latest/core-api/printk-basics.html#c.pr_emerg +/// [`pr_emerg`]: https://docs.kernel.org/core-api/printk-basics.html#c.pr_emerg /// [`std::print!`]: https://doc.rust-lang.org/std/macro.print.html /// /// # Examples @@ -221,7 +221,7 @@ macro_rules! pr_emerg ( /// Mimics the interface of [`std::print!`]. See [`core::fmt`] and /// `alloc::format!` for information about the formatting syntax. /// -/// [`pr_alert`]: https://www.kernel.org/doc/html/latest/core-api/printk-basics.html#c.pr_alert +/// [`pr_alert`]: https://docs.kernel.org/core-api/printk-basics.html#c.pr_alert /// [`std::print!`]: https://doc.rust-lang.org/std/macro.print.html /// /// # Examples @@ -245,7 +245,7 @@ macro_rules! pr_alert ( /// Mimics the interface of [`std::print!`]. See [`core::fmt`] and /// `alloc::format!` for information about the formatting syntax. /// -/// [`pr_crit`]: https://www.kernel.org/doc/html/latest/core-api/printk-basics.html#c.pr_crit +/// [`pr_crit`]: https://docs.kernel.org/core-api/printk-basics.html#c.pr_crit /// [`std::print!`]: https://doc.rust-lang.org/std/macro.print.html /// /// # Examples @@ -269,7 +269,7 @@ macro_rules! pr_crit ( /// Mimics the interface of [`std::print!`]. See [`core::fmt`] and /// `alloc::format!` for information about the formatting syntax. /// -/// [`pr_err`]: https://www.kernel.org/doc/html/latest/core-api/printk-basics.html#c.pr_err +/// [`pr_err`]: https://docs.kernel.org/core-api/printk-basics.html#c.pr_err /// [`std::print!`]: https://doc.rust-lang.org/std/macro.print.html /// /// # Examples @@ -293,7 +293,7 @@ macro_rules! pr_err ( /// Mimics the interface of [`std::print!`]. See [`core::fmt`] and /// `alloc::format!` for information about the formatting syntax. /// -/// [`pr_warn`]: https://www.kernel.org/doc/html/latest/core-api/printk-basics.html#c.pr_warn +/// [`pr_warn`]: https://docs.kernel.org/core-api/printk-basics.html#c.pr_warn /// [`std::print!`]: https://doc.rust-lang.org/std/macro.print.html /// /// # Examples @@ -317,7 +317,7 @@ macro_rules! pr_warn ( /// Mimics the interface of [`std::print!`]. See [`core::fmt`] and /// `alloc::format!` for information about the formatting syntax. /// -/// [`pr_notice`]: https://www.kernel.org/doc/html/latest/core-api/printk-basics.html#c.pr_notice +/// [`pr_notice`]: https://docs.kernel.org/core-api/printk-basics.html#c.pr_notice /// [`std::print!`]: https://doc.rust-lang.org/std/macro.print.html /// /// # Examples @@ -341,7 +341,7 @@ macro_rules! pr_notice ( /// Mimics the interface of [`std::print!`]. See [`core::fmt`] and /// `alloc::format!` for information about the formatting syntax. /// -/// [`pr_info`]: https://www.kernel.org/doc/html/latest/core-api/printk-basics.html#c.pr_info +/// [`pr_info`]: https://docs.kernel.org/core-api/printk-basics.html#c.pr_info /// [`std::print!`]: https://doc.rust-lang.org/std/macro.print.html /// /// # Examples @@ -367,7 +367,7 @@ macro_rules! pr_info ( /// Mimics the interface of [`std::print!`]. See [`core::fmt`] and /// `alloc::format!` for information about the formatting syntax. /// -/// [`pr_debug`]: https://www.kernel.org/doc/html/latest/core-api/printk-basics.html#c.pr_debug +/// [`pr_debug`]: https://docs.kernel.org/core-api/printk-basics.html#c.pr_debug /// [`std::print!`]: https://doc.rust-lang.org/std/macro.print.html /// /// # Examples @@ -395,7 +395,7 @@ macro_rules! pr_debug ( /// `alloc::format!` for information about the formatting syntax. /// /// [`pr_info!`]: crate::pr_info! -/// [`pr_cont`]: https://www.kernel.org/doc/html/latest/core-api/printk-basics.html#c.pr_cont +/// [`pr_cont`]: https://docs.kernel.org/core-api/printk-basics.html#c.pr_cont /// [`std::print!`]: https://doc.rust-lang.org/std/macro.print.html /// /// # Examples diff --git a/rust/kernel/rbtree.rs b/rust/kernel/rbtree.rs new file mode 100644 index 000000000000..25eb36fd1cdc --- /dev/null +++ b/rust/kernel/rbtree.rs @@ -0,0 +1,1278 @@ +// SPDX-License-Identifier: GPL-2.0 + +//! Red-black trees. +//! +//! C header: [`include/linux/rbtree.h`](srctree/include/linux/rbtree.h) +//! +//! Reference: + +use crate::{alloc::Flags, bindings, container_of, error::Result, prelude::*}; +use alloc::boxed::Box; +use core::{ + cmp::{Ord, Ordering}, + marker::PhantomData, + mem::MaybeUninit, + ptr::{addr_of_mut, from_mut, NonNull}, +}; + +/// A red-black tree with owned nodes. +/// +/// It is backed by the kernel C red-black trees. +/// +/// # Examples +/// +/// In the example below we do several operations on a tree. We note that insertions may fail if +/// the system is out of memory. +/// +/// ``` +/// use kernel::{alloc::flags, rbtree::{RBTree, RBTreeNode, RBTreeNodeReservation}}; +/// +/// // Create a new tree. +/// let mut tree = RBTree::new(); +/// +/// // Insert three elements. +/// tree.try_create_and_insert(20, 200, flags::GFP_KERNEL)?; +/// tree.try_create_and_insert(10, 100, flags::GFP_KERNEL)?; +/// tree.try_create_and_insert(30, 300, flags::GFP_KERNEL)?; +/// +/// // Check the nodes we just inserted. +/// { +/// assert_eq!(tree.get(&10).unwrap(), &100); +/// assert_eq!(tree.get(&20).unwrap(), &200); +/// assert_eq!(tree.get(&30).unwrap(), &300); +/// } +/// +/// // Iterate over the nodes we just inserted. +/// { +/// let mut iter = tree.iter(); +/// assert_eq!(iter.next().unwrap(), (&10, &100)); +/// assert_eq!(iter.next().unwrap(), (&20, &200)); +/// assert_eq!(iter.next().unwrap(), (&30, &300)); +/// assert!(iter.next().is_none()); +/// } +/// +/// // Print all elements. +/// for (key, value) in &tree { +/// pr_info!("{} = {}\n", key, value); +/// } +/// +/// // Replace one of the elements. +/// tree.try_create_and_insert(10, 1000, flags::GFP_KERNEL)?; +/// +/// // Check that the tree reflects the replacement. +/// { +/// let mut iter = tree.iter(); +/// assert_eq!(iter.next().unwrap(), (&10, &1000)); +/// assert_eq!(iter.next().unwrap(), (&20, &200)); +/// assert_eq!(iter.next().unwrap(), (&30, &300)); +/// assert!(iter.next().is_none()); +/// } +/// +/// // Change the value of one of the elements. +/// *tree.get_mut(&30).unwrap() = 3000; +/// +/// // Check that the tree reflects the update. +/// { +/// let mut iter = tree.iter(); +/// assert_eq!(iter.next().unwrap(), (&10, &1000)); +/// assert_eq!(iter.next().unwrap(), (&20, &200)); +/// assert_eq!(iter.next().unwrap(), (&30, &3000)); +/// assert!(iter.next().is_none()); +/// } +/// +/// // Remove an element. +/// tree.remove(&10); +/// +/// // Check that the tree reflects the removal. +/// { +/// let mut iter = tree.iter(); +/// assert_eq!(iter.next().unwrap(), (&20, &200)); +/// assert_eq!(iter.next().unwrap(), (&30, &3000)); +/// assert!(iter.next().is_none()); +/// } +/// +/// # Ok::<(), Error>(()) +/// ``` +/// +/// In the example below, we first allocate a node, acquire a spinlock, then insert the node into +/// the tree. This is useful when the insertion context does not allow sleeping, for example, when +/// holding a spinlock. +/// +/// ``` +/// use kernel::{alloc::flags, rbtree::{RBTree, RBTreeNode}, sync::SpinLock}; +/// +/// fn insert_test(tree: &SpinLock>) -> Result { +/// // Pre-allocate node. This may fail (as it allocates memory). +/// let node = RBTreeNode::new(10, 100, flags::GFP_KERNEL)?; +/// +/// // Insert node while holding the lock. It is guaranteed to succeed with no allocation +/// // attempts. +/// let mut guard = tree.lock(); +/// guard.insert(node); +/// Ok(()) +/// } +/// ``` +/// +/// In the example below, we reuse an existing node allocation from an element we removed. +/// +/// ``` +/// use kernel::{alloc::flags, rbtree::{RBTree, RBTreeNodeReservation}}; +/// +/// // Create a new tree. +/// let mut tree = RBTree::new(); +/// +/// // Insert three elements. +/// tree.try_create_and_insert(20, 200, flags::GFP_KERNEL)?; +/// tree.try_create_and_insert(10, 100, flags::GFP_KERNEL)?; +/// tree.try_create_and_insert(30, 300, flags::GFP_KERNEL)?; +/// +/// // Check the nodes we just inserted. +/// { +/// let mut iter = tree.iter(); +/// assert_eq!(iter.next().unwrap(), (&10, &100)); +/// assert_eq!(iter.next().unwrap(), (&20, &200)); +/// assert_eq!(iter.next().unwrap(), (&30, &300)); +/// assert!(iter.next().is_none()); +/// } +/// +/// // Remove a node, getting back ownership of it. +/// let existing = tree.remove(&30).unwrap(); +/// +/// // Check that the tree reflects the removal. +/// { +/// let mut iter = tree.iter(); +/// assert_eq!(iter.next().unwrap(), (&10, &100)); +/// assert_eq!(iter.next().unwrap(), (&20, &200)); +/// assert!(iter.next().is_none()); +/// } +/// +/// // Create a preallocated reservation that we can re-use later. +/// let reservation = RBTreeNodeReservation::new(flags::GFP_KERNEL)?; +/// +/// // Insert a new node into the tree, reusing the previous allocation. This is guaranteed to +/// // succeed (no memory allocations). +/// tree.insert(reservation.into_node(15, 150)); +/// +/// // Check that the tree reflect the new insertion. +/// { +/// let mut iter = tree.iter(); +/// assert_eq!(iter.next().unwrap(), (&10, &100)); +/// assert_eq!(iter.next().unwrap(), (&15, &150)); +/// assert_eq!(iter.next().unwrap(), (&20, &200)); +/// assert!(iter.next().is_none()); +/// } +/// +/// # Ok::<(), Error>(()) +/// ``` +/// +/// # Invariants +/// +/// Non-null parent/children pointers stored in instances of the `rb_node` C struct are always +/// valid, and pointing to a field of our internal representation of a node. +pub struct RBTree { + root: bindings::rb_root, + _p: PhantomData>, +} + +// SAFETY: An [`RBTree`] allows the same kinds of access to its values that a struct allows to its +// fields, so we use the same Send condition as would be used for a struct with K and V fields. +unsafe impl Send for RBTree {} + +// SAFETY: An [`RBTree`] allows the same kinds of access to its values that a struct allows to its +// fields, so we use the same Sync condition as would be used for a struct with K and V fields. +unsafe impl Sync for RBTree {} + +impl RBTree { + /// Creates a new and empty tree. + pub fn new() -> Self { + Self { + // INVARIANT: There are no nodes in the tree, so the invariant holds vacuously. + root: bindings::rb_root::default(), + _p: PhantomData, + } + } + + /// Returns an iterator over the tree nodes, sorted by key. + pub fn iter(&self) -> Iter<'_, K, V> { + Iter { + _tree: PhantomData, + // INVARIANT: + // - `self.root` is a valid pointer to a tree root. + // - `bindings::rb_first` produces a valid pointer to a node given `root` is valid. + iter_raw: IterRaw { + // SAFETY: by the invariants, all pointers are valid. + next: unsafe { bindings::rb_first(&self.root) }, + _phantom: PhantomData, + }, + } + } + + /// Returns a mutable iterator over the tree nodes, sorted by key. + pub fn iter_mut(&mut self) -> IterMut<'_, K, V> { + IterMut { + _tree: PhantomData, + // INVARIANT: + // - `self.root` is a valid pointer to a tree root. + // - `bindings::rb_first` produces a valid pointer to a node given `root` is valid. + iter_raw: IterRaw { + // SAFETY: by the invariants, all pointers are valid. + next: unsafe { bindings::rb_first(from_mut(&mut self.root)) }, + _phantom: PhantomData, + }, + } + } + + /// Returns an iterator over the keys of the nodes in the tree, in sorted order. + pub fn keys(&self) -> impl Iterator { + self.iter().map(|(k, _)| k) + } + + /// Returns an iterator over the values of the nodes in the tree, sorted by key. + pub fn values(&self) -> impl Iterator { + self.iter().map(|(_, v)| v) + } + + /// Returns a mutable iterator over the values of the nodes in the tree, sorted by key. + pub fn values_mut(&mut self) -> impl Iterator { + self.iter_mut().map(|(_, v)| v) + } + + /// Returns a cursor over the tree nodes, starting with the smallest key. + pub fn cursor_front(&mut self) -> Option> { + let root = addr_of_mut!(self.root); + // SAFETY: `self.root` is always a valid root node + let current = unsafe { bindings::rb_first(root) }; + NonNull::new(current).map(|current| { + // INVARIANT: + // - `current` is a valid node in the [`RBTree`] pointed to by `self`. + Cursor { + current, + tree: self, + } + }) + } + + /// Returns a cursor over the tree nodes, starting with the largest key. + pub fn cursor_back(&mut self) -> Option> { + let root = addr_of_mut!(self.root); + // SAFETY: `self.root` is always a valid root node + let current = unsafe { bindings::rb_last(root) }; + NonNull::new(current).map(|current| { + // INVARIANT: + // - `current` is a valid node in the [`RBTree`] pointed to by `self`. + Cursor { + current, + tree: self, + } + }) + } +} + +impl RBTree +where + K: Ord, +{ + /// Tries to insert a new value into the tree. + /// + /// It overwrites a node if one already exists with the same key and returns it (containing the + /// key/value pair). Returns [`None`] if a node with the same key didn't already exist. + /// + /// Returns an error if it cannot allocate memory for the new node. + pub fn try_create_and_insert( + &mut self, + key: K, + value: V, + flags: Flags, + ) -> Result>> { + Ok(self.insert(RBTreeNode::new(key, value, flags)?)) + } + + /// Inserts a new node into the tree. + /// + /// It overwrites a node if one already exists with the same key and returns it (containing the + /// key/value pair). Returns [`None`] if a node with the same key didn't already exist. + /// + /// This function always succeeds. + pub fn insert(&mut self, node: RBTreeNode) -> Option> { + match self.raw_entry(&node.node.key) { + RawEntry::Occupied(entry) => Some(entry.replace(node)), + RawEntry::Vacant(entry) => { + entry.insert(node); + None + } + } + } + + fn raw_entry(&mut self, key: &K) -> RawEntry<'_, K, V> { + let raw_self: *mut RBTree = self; + // The returned `RawEntry` is used to call either `rb_link_node` or `rb_replace_node`. + // The parameters of `bindings::rb_link_node` are as follows: + // - `node`: A pointer to an uninitialized node being inserted. + // - `parent`: A pointer to an existing node in the tree. One of its child pointers must be + // null, and `node` will become a child of `parent` by replacing that child pointer + // with a pointer to `node`. + // - `rb_link`: A pointer to either the left-child or right-child field of `parent`. This + // specifies which child of `parent` should hold `node` after this call. The + // value of `*rb_link` must be null before the call to `rb_link_node`. If the + // red/black tree is empty, then it’s also possible for `parent` to be null. In + // this case, `rb_link` is a pointer to the `root` field of the red/black tree. + // + // We will traverse the tree looking for a node that has a null pointer as its child, + // representing an empty subtree where we can insert our new node. We need to make sure + // that we preserve the ordering of the nodes in the tree. In each iteration of the loop + // we store `parent` and `child_field_of_parent`, and the new `node` will go somewhere + // in the subtree of `parent` that `child_field_of_parent` points at. Once + // we find an empty subtree, we can insert the new node using `rb_link_node`. + let mut parent = core::ptr::null_mut(); + let mut child_field_of_parent: &mut *mut bindings::rb_node = + // SAFETY: `raw_self` is a valid pointer to the `RBTree` (created from `self` above). + unsafe { &mut (*raw_self).root.rb_node }; + while !(*child_field_of_parent).is_null() { + let curr = *child_field_of_parent; + // SAFETY: All links fields we create are in a `Node`. + let node = unsafe { container_of!(curr, Node, links) }; + + // SAFETY: `node` is a non-null node so it is valid by the type invariants. + match key.cmp(unsafe { &(*node).key }) { + // SAFETY: `curr` is a non-null node so it is valid by the type invariants. + Ordering::Less => child_field_of_parent = unsafe { &mut (*curr).rb_left }, + // SAFETY: `curr` is a non-null node so it is valid by the type invariants. + Ordering::Greater => child_field_of_parent = unsafe { &mut (*curr).rb_right }, + Ordering::Equal => { + return RawEntry::Occupied(OccupiedEntry { + rbtree: self, + node_links: curr, + }) + } + } + parent = curr; + } + + RawEntry::Vacant(RawVacantEntry { + rbtree: raw_self, + parent, + child_field_of_parent, + _phantom: PhantomData, + }) + } + + /// Gets the given key's corresponding entry in the map for in-place manipulation. + pub fn entry(&mut self, key: K) -> Entry<'_, K, V> { + match self.raw_entry(&key) { + RawEntry::Occupied(entry) => Entry::Occupied(entry), + RawEntry::Vacant(entry) => Entry::Vacant(VacantEntry { raw: entry, key }), + } + } + + /// Used for accessing the given node, if it exists. + pub fn find_mut(&mut self, key: &K) -> Option> { + match self.raw_entry(key) { + RawEntry::Occupied(entry) => Some(entry), + RawEntry::Vacant(_entry) => None, + } + } + + /// Returns a reference to the value corresponding to the key. + pub fn get(&self, key: &K) -> Option<&V> { + let mut node = self.root.rb_node; + while !node.is_null() { + // SAFETY: By the type invariant of `Self`, all non-null `rb_node` pointers stored in `self` + // point to the links field of `Node` objects. + let this = unsafe { container_of!(node, Node, links) }; + // SAFETY: `this` is a non-null node so it is valid by the type invariants. + node = match key.cmp(unsafe { &(*this).key }) { + // SAFETY: `node` is a non-null node so it is valid by the type invariants. + Ordering::Less => unsafe { (*node).rb_left }, + // SAFETY: `node` is a non-null node so it is valid by the type invariants. + Ordering::Greater => unsafe { (*node).rb_right }, + // SAFETY: `node` is a non-null node so it is valid by the type invariants. + Ordering::Equal => return Some(unsafe { &(*this).value }), + } + } + None + } + + /// Returns a mutable reference to the value corresponding to the key. + pub fn get_mut(&mut self, key: &K) -> Option<&mut V> { + self.find_mut(key).map(|node| node.into_mut()) + } + + /// Removes the node with the given key from the tree. + /// + /// It returns the node that was removed if one exists, or [`None`] otherwise. + pub fn remove_node(&mut self, key: &K) -> Option> { + self.find_mut(key).map(OccupiedEntry::remove_node) + } + + /// Removes the node with the given key from the tree. + /// + /// It returns the value that was removed if one exists, or [`None`] otherwise. + pub fn remove(&mut self, key: &K) -> Option { + self.find_mut(key).map(OccupiedEntry::remove) + } + + /// Returns a cursor over the tree nodes based on the given key. + /// + /// If the given key exists, the cursor starts there. + /// Otherwise it starts with the first larger key in sort order. + /// If there is no larger key, it returns [`None`]. + pub fn cursor_lower_bound(&mut self, key: &K) -> Option> + where + K: Ord, + { + let mut node = self.root.rb_node; + let mut best_match: Option>> = None; + while !node.is_null() { + // SAFETY: By the type invariant of `Self`, all non-null `rb_node` pointers stored in `self` + // point to the links field of `Node` objects. + let this = unsafe { container_of!(node, Node, links) }.cast_mut(); + // SAFETY: `this` is a non-null node so it is valid by the type invariants. + let this_key = unsafe { &(*this).key }; + // SAFETY: `node` is a non-null node so it is valid by the type invariants. + let left_child = unsafe { (*node).rb_left }; + // SAFETY: `node` is a non-null node so it is valid by the type invariants. + let right_child = unsafe { (*node).rb_right }; + match key.cmp(this_key) { + Ordering::Equal => { + best_match = NonNull::new(this); + break; + } + Ordering::Greater => { + node = right_child; + } + Ordering::Less => { + let is_better_match = match best_match { + None => true, + Some(best) => { + // SAFETY: `best` is a non-null node so it is valid by the type invariants. + let best_key = unsafe { &(*best.as_ptr()).key }; + best_key > this_key + } + }; + if is_better_match { + best_match = NonNull::new(this); + } + node = left_child; + } + }; + } + + let best = best_match?; + + // SAFETY: `best` is a non-null node so it is valid by the type invariants. + let links = unsafe { addr_of_mut!((*best.as_ptr()).links) }; + + NonNull::new(links).map(|current| { + // INVARIANT: + // - `current` is a valid node in the [`RBTree`] pointed to by `self`. + Cursor { + current, + tree: self, + } + }) + } +} + +impl Default for RBTree { + fn default() -> Self { + Self::new() + } +} + +impl Drop for RBTree { + fn drop(&mut self) { + // SAFETY: `root` is valid as it's embedded in `self` and we have a valid `self`. + let mut next = unsafe { bindings::rb_first_postorder(&self.root) }; + + // INVARIANT: The loop invariant is that all tree nodes from `next` in postorder are valid. + while !next.is_null() { + // SAFETY: All links fields we create are in a `Node`. + let this = unsafe { container_of!(next, Node, links) }; + + // Find out what the next node is before disposing of the current one. + // SAFETY: `next` and all nodes in postorder are still valid. + next = unsafe { bindings::rb_next_postorder(next) }; + + // INVARIANT: This is the destructor, so we break the type invariant during clean-up, + // but it is not observable. The loop invariant is still maintained. + + // SAFETY: `this` is valid per the loop invariant. + unsafe { drop(Box::from_raw(this.cast_mut())) }; + } + } +} + +/// A bidirectional cursor over the tree nodes, sorted by key. +/// +/// # Examples +/// +/// In the following example, we obtain a cursor to the first element in the tree. +/// The cursor allows us to iterate bidirectionally over key/value pairs in the tree. +/// +/// ``` +/// use kernel::{alloc::flags, rbtree::RBTree}; +/// +/// // Create a new tree. +/// let mut tree = RBTree::new(); +/// +/// // Insert three elements. +/// tree.try_create_and_insert(10, 100, flags::GFP_KERNEL)?; +/// tree.try_create_and_insert(20, 200, flags::GFP_KERNEL)?; +/// tree.try_create_and_insert(30, 300, flags::GFP_KERNEL)?; +/// +/// // Get a cursor to the first element. +/// let mut cursor = tree.cursor_front().unwrap(); +/// let mut current = cursor.current(); +/// assert_eq!(current, (&10, &100)); +/// +/// // Move the cursor, updating it to the 2nd element. +/// cursor = cursor.move_next().unwrap(); +/// current = cursor.current(); +/// assert_eq!(current, (&20, &200)); +/// +/// // Peek at the next element without impacting the cursor. +/// let next = cursor.peek_next().unwrap(); +/// assert_eq!(next, (&30, &300)); +/// current = cursor.current(); +/// assert_eq!(current, (&20, &200)); +/// +/// // Moving past the last element causes the cursor to return [`None`]. +/// cursor = cursor.move_next().unwrap(); +/// current = cursor.current(); +/// assert_eq!(current, (&30, &300)); +/// let cursor = cursor.move_next(); +/// assert!(cursor.is_none()); +/// +/// # Ok::<(), Error>(()) +/// ``` +/// +/// A cursor can also be obtained at the last element in the tree. +/// +/// ``` +/// use kernel::{alloc::flags, rbtree::RBTree}; +/// +/// // Create a new tree. +/// let mut tree = RBTree::new(); +/// +/// // Insert three elements. +/// tree.try_create_and_insert(10, 100, flags::GFP_KERNEL)?; +/// tree.try_create_and_insert(20, 200, flags::GFP_KERNEL)?; +/// tree.try_create_and_insert(30, 300, flags::GFP_KERNEL)?; +/// +/// let mut cursor = tree.cursor_back().unwrap(); +/// let current = cursor.current(); +/// assert_eq!(current, (&30, &300)); +/// +/// # Ok::<(), Error>(()) +/// ``` +/// +/// Obtaining a cursor returns [`None`] if the tree is empty. +/// +/// ``` +/// use kernel::rbtree::RBTree; +/// +/// let mut tree: RBTree = RBTree::new(); +/// assert!(tree.cursor_front().is_none()); +/// +/// # Ok::<(), Error>(()) +/// ``` +/// +/// [`RBTree::cursor_lower_bound`] can be used to start at an arbitrary node in the tree. +/// +/// ``` +/// use kernel::{alloc::flags, rbtree::RBTree}; +/// +/// // Create a new tree. +/// let mut tree = RBTree::new(); +/// +/// // Insert five elements. +/// tree.try_create_and_insert(10, 100, flags::GFP_KERNEL)?; +/// tree.try_create_and_insert(20, 200, flags::GFP_KERNEL)?; +/// tree.try_create_and_insert(30, 300, flags::GFP_KERNEL)?; +/// tree.try_create_and_insert(40, 400, flags::GFP_KERNEL)?; +/// tree.try_create_and_insert(50, 500, flags::GFP_KERNEL)?; +/// +/// // If the provided key exists, a cursor to that key is returned. +/// let cursor = tree.cursor_lower_bound(&20).unwrap(); +/// let current = cursor.current(); +/// assert_eq!(current, (&20, &200)); +/// +/// // If the provided key doesn't exist, a cursor to the first larger element in sort order is returned. +/// let cursor = tree.cursor_lower_bound(&25).unwrap(); +/// let current = cursor.current(); +/// assert_eq!(current, (&30, &300)); +/// +/// // If there is no larger key, [`None`] is returned. +/// let cursor = tree.cursor_lower_bound(&55); +/// assert!(cursor.is_none()); +/// +/// # Ok::<(), Error>(()) +/// ``` +/// +/// The cursor allows mutation of values in the tree. +/// +/// ``` +/// use kernel::{alloc::flags, rbtree::RBTree}; +/// +/// // Create a new tree. +/// let mut tree = RBTree::new(); +/// +/// // Insert three elements. +/// tree.try_create_and_insert(10, 100, flags::GFP_KERNEL)?; +/// tree.try_create_and_insert(20, 200, flags::GFP_KERNEL)?; +/// tree.try_create_and_insert(30, 300, flags::GFP_KERNEL)?; +/// +/// // Retrieve a cursor. +/// let mut cursor = tree.cursor_front().unwrap(); +/// +/// // Get a mutable reference to the current value. +/// let (k, v) = cursor.current_mut(); +/// *v = 1000; +/// +/// // The updated value is reflected in the tree. +/// let updated = tree.get(&10).unwrap(); +/// assert_eq!(updated, &1000); +/// +/// # Ok::<(), Error>(()) +/// ``` +/// +/// It also allows node removal. The following examples demonstrate the behavior of removing the current node. +/// +/// ``` +/// use kernel::{alloc::flags, rbtree::RBTree}; +/// +/// // Create a new tree. +/// let mut tree = RBTree::new(); +/// +/// // Insert three elements. +/// tree.try_create_and_insert(10, 100, flags::GFP_KERNEL)?; +/// tree.try_create_and_insert(20, 200, flags::GFP_KERNEL)?; +/// tree.try_create_and_insert(30, 300, flags::GFP_KERNEL)?; +/// +/// // Remove the first element. +/// let mut cursor = tree.cursor_front().unwrap(); +/// let mut current = cursor.current(); +/// assert_eq!(current, (&10, &100)); +/// cursor = cursor.remove_current().0.unwrap(); +/// +/// // If a node exists after the current element, it is returned. +/// current = cursor.current(); +/// assert_eq!(current, (&20, &200)); +/// +/// // Get a cursor to the last element, and remove it. +/// cursor = tree.cursor_back().unwrap(); +/// current = cursor.current(); +/// assert_eq!(current, (&30, &300)); +/// +/// // Since there is no next node, the previous node is returned. +/// cursor = cursor.remove_current().0.unwrap(); +/// current = cursor.current(); +/// assert_eq!(current, (&20, &200)); +/// +/// // Removing the last element in the tree returns [`None`]. +/// assert!(cursor.remove_current().0.is_none()); +/// +/// # Ok::<(), Error>(()) +/// ``` +/// +/// Nodes adjacent to the current node can also be removed. +/// +/// ``` +/// use kernel::{alloc::flags, rbtree::RBTree}; +/// +/// // Create a new tree. +/// let mut tree = RBTree::new(); +/// +/// // Insert three elements. +/// tree.try_create_and_insert(10, 100, flags::GFP_KERNEL)?; +/// tree.try_create_and_insert(20, 200, flags::GFP_KERNEL)?; +/// tree.try_create_and_insert(30, 300, flags::GFP_KERNEL)?; +/// +/// // Get a cursor to the first element. +/// let mut cursor = tree.cursor_front().unwrap(); +/// let mut current = cursor.current(); +/// assert_eq!(current, (&10, &100)); +/// +/// // Calling `remove_prev` from the first element returns [`None`]. +/// assert!(cursor.remove_prev().is_none()); +/// +/// // Get a cursor to the last element. +/// cursor = tree.cursor_back().unwrap(); +/// current = cursor.current(); +/// assert_eq!(current, (&30, &300)); +/// +/// // Calling `remove_prev` removes and returns the middle element. +/// assert_eq!(cursor.remove_prev().unwrap().to_key_value(), (20, 200)); +/// +/// // Calling `remove_next` from the last element returns [`None`]. +/// assert!(cursor.remove_next().is_none()); +/// +/// // Move to the first element +/// cursor = cursor.move_prev().unwrap(); +/// current = cursor.current(); +/// assert_eq!(current, (&10, &100)); +/// +/// // Calling `remove_next` removes and returns the last element. +/// assert_eq!(cursor.remove_next().unwrap().to_key_value(), (30, 300)); +/// +/// # Ok::<(), Error>(()) +/// +/// ``` +/// +/// # Invariants +/// - `current` points to a node that is in the same [`RBTree`] as `tree`. +pub struct Cursor<'a, K, V> { + tree: &'a mut RBTree, + current: NonNull, +} + +// SAFETY: The [`Cursor`] has exclusive access to both `K` and `V`, so it is sufficient to require them to be `Send`. +// The cursor only gives out immutable references to the keys, but since it has excusive access to those same +// keys, `Send` is sufficient. `Sync` would be okay, but it is more restrictive to the user. +unsafe impl<'a, K: Send, V: Send> Send for Cursor<'a, K, V> {} + +// SAFETY: The [`Cursor`] gives out immutable references to K and mutable references to V, +// so it has the same thread safety requirements as mutable references. +unsafe impl<'a, K: Sync, V: Sync> Sync for Cursor<'a, K, V> {} + +impl<'a, K, V> Cursor<'a, K, V> { + /// The current node + pub fn current(&self) -> (&K, &V) { + // SAFETY: + // - `self.current` is a valid node by the type invariants. + // - We have an immutable reference by the function signature. + unsafe { Self::to_key_value(self.current) } + } + + /// The current node, with a mutable value + pub fn current_mut(&mut self) -> (&K, &mut V) { + // SAFETY: + // - `self.current` is a valid node by the type invariants. + // - We have an mutable reference by the function signature. + unsafe { Self::to_key_value_mut(self.current) } + } + + /// Remove the current node from the tree. + /// + /// Returns a tuple where the first element is a cursor to the next node, if it exists, + /// else the previous node, else [`None`] (if the tree becomes empty). The second element + /// is the removed node. + pub fn remove_current(self) -> (Option, RBTreeNode) { + let prev = self.get_neighbor_raw(Direction::Prev); + let next = self.get_neighbor_raw(Direction::Next); + // SAFETY: By the type invariant of `Self`, all non-null `rb_node` pointers stored in `self` + // point to the links field of `Node` objects. + let this = unsafe { container_of!(self.current.as_ptr(), Node, links) }.cast_mut(); + // SAFETY: `this` is valid by the type invariants as described above. + let node = unsafe { Box::from_raw(this) }; + let node = RBTreeNode { node }; + // SAFETY: The reference to the tree used to create the cursor outlives the cursor, so + // the tree cannot change. By the tree invariant, all nodes are valid. + unsafe { bindings::rb_erase(&mut (*this).links, addr_of_mut!(self.tree.root)) }; + + let current = match (prev, next) { + (_, Some(next)) => next, + (Some(prev), None) => prev, + (None, None) => { + return (None, node); + } + }; + + ( + // INVARIANT: + // - `current` is a valid node in the [`RBTree`] pointed to by `self.tree`. + Some(Self { + current, + tree: self.tree, + }), + node, + ) + } + + /// Remove the previous node, returning it if it exists. + pub fn remove_prev(&mut self) -> Option> { + self.remove_neighbor(Direction::Prev) + } + + /// Remove the next node, returning it if it exists. + pub fn remove_next(&mut self) -> Option> { + self.remove_neighbor(Direction::Next) + } + + fn remove_neighbor(&mut self, direction: Direction) -> Option> { + if let Some(neighbor) = self.get_neighbor_raw(direction) { + let neighbor = neighbor.as_ptr(); + // SAFETY: The reference to the tree used to create the cursor outlives the cursor, so + // the tree cannot change. By the tree invariant, all nodes are valid. + unsafe { bindings::rb_erase(neighbor, addr_of_mut!(self.tree.root)) }; + // SAFETY: By the type invariant of `Self`, all non-null `rb_node` pointers stored in `self` + // point to the links field of `Node` objects. + let this = unsafe { container_of!(neighbor, Node, links) }.cast_mut(); + // SAFETY: `this` is valid by the type invariants as described above. + let node = unsafe { Box::from_raw(this) }; + return Some(RBTreeNode { node }); + } + None + } + + /// Move the cursor to the previous node, returning [`None`] if it doesn't exist. + pub fn move_prev(self) -> Option { + self.mv(Direction::Prev) + } + + /// Move the cursor to the next node, returning [`None`] if it doesn't exist. + pub fn move_next(self) -> Option { + self.mv(Direction::Next) + } + + fn mv(self, direction: Direction) -> Option { + // INVARIANT: + // - `neighbor` is a valid node in the [`RBTree`] pointed to by `self.tree`. + self.get_neighbor_raw(direction).map(|neighbor| Self { + tree: self.tree, + current: neighbor, + }) + } + + /// Access the previous node without moving the cursor. + pub fn peek_prev(&self) -> Option<(&K, &V)> { + self.peek(Direction::Prev) + } + + /// Access the previous node without moving the cursor. + pub fn peek_next(&self) -> Option<(&K, &V)> { + self.peek(Direction::Next) + } + + fn peek(&self, direction: Direction) -> Option<(&K, &V)> { + self.get_neighbor_raw(direction).map(|neighbor| { + // SAFETY: + // - `neighbor` is a valid tree node. + // - By the function signature, we have an immutable reference to `self`. + unsafe { Self::to_key_value(neighbor) } + }) + } + + /// Access the previous node mutably without moving the cursor. + pub fn peek_prev_mut(&mut self) -> Option<(&K, &mut V)> { + self.peek_mut(Direction::Prev) + } + + /// Access the next node mutably without moving the cursor. + pub fn peek_next_mut(&mut self) -> Option<(&K, &mut V)> { + self.peek_mut(Direction::Next) + } + + fn peek_mut(&mut self, direction: Direction) -> Option<(&K, &mut V)> { + self.get_neighbor_raw(direction).map(|neighbor| { + // SAFETY: + // - `neighbor` is a valid tree node. + // - By the function signature, we have a mutable reference to `self`. + unsafe { Self::to_key_value_mut(neighbor) } + }) + } + + fn get_neighbor_raw(&self, direction: Direction) -> Option> { + // SAFETY: `self.current` is valid by the type invariants. + let neighbor = unsafe { + match direction { + Direction::Prev => bindings::rb_prev(self.current.as_ptr()), + Direction::Next => bindings::rb_next(self.current.as_ptr()), + } + }; + + NonNull::new(neighbor) + } + + /// SAFETY: + /// - `node` must be a valid pointer to a node in an [`RBTree`]. + /// - The caller has immutable access to `node` for the duration of 'b. + unsafe fn to_key_value<'b>(node: NonNull) -> (&'b K, &'b V) { + // SAFETY: the caller guarantees that `node` is a valid pointer in an `RBTree`. + let (k, v) = unsafe { Self::to_key_value_raw(node) }; + // SAFETY: the caller guarantees immutable access to `node`. + (k, unsafe { &*v }) + } + + /// SAFETY: + /// - `node` must be a valid pointer to a node in an [`RBTree`]. + /// - The caller has mutable access to `node` for the duration of 'b. + unsafe fn to_key_value_mut<'b>(node: NonNull) -> (&'b K, &'b mut V) { + // SAFETY: the caller guarantees that `node` is a valid pointer in an `RBTree`. + let (k, v) = unsafe { Self::to_key_value_raw(node) }; + // SAFETY: the caller guarantees mutable access to `node`. + (k, unsafe { &mut *v }) + } + + /// SAFETY: + /// - `node` must be a valid pointer to a node in an [`RBTree`]. + /// - The caller has immutable access to the key for the duration of 'b. + unsafe fn to_key_value_raw<'b>(node: NonNull) -> (&'b K, *mut V) { + // SAFETY: By the type invariant of `Self`, all non-null `rb_node` pointers stored in `self` + // point to the links field of `Node` objects. + let this = unsafe { container_of!(node.as_ptr(), Node, links) }.cast_mut(); + // SAFETY: The passed `node` is the current node or a non-null neighbor, + // thus `this` is valid by the type invariants. + let k = unsafe { &(*this).key }; + // SAFETY: The passed `node` is the current node or a non-null neighbor, + // thus `this` is valid by the type invariants. + let v = unsafe { addr_of_mut!((*this).value) }; + (k, v) + } +} + +/// Direction for [`Cursor`] operations. +enum Direction { + /// the node immediately before, in sort order + Prev, + /// the node immediately after, in sort order + Next, +} + +impl<'a, K, V> IntoIterator for &'a RBTree { + type Item = (&'a K, &'a V); + type IntoIter = Iter<'a, K, V>; + + fn into_iter(self) -> Self::IntoIter { + self.iter() + } +} + +/// An iterator over the nodes of a [`RBTree`]. +/// +/// Instances are created by calling [`RBTree::iter`]. +pub struct Iter<'a, K, V> { + _tree: PhantomData<&'a RBTree>, + iter_raw: IterRaw, +} + +// SAFETY: The [`Iter`] gives out immutable references to K and V, so it has the same +// thread safety requirements as immutable references. +unsafe impl<'a, K: Sync, V: Sync> Send for Iter<'a, K, V> {} + +// SAFETY: The [`Iter`] gives out immutable references to K and V, so it has the same +// thread safety requirements as immutable references. +unsafe impl<'a, K: Sync, V: Sync> Sync for Iter<'a, K, V> {} + +impl<'a, K, V> Iterator for Iter<'a, K, V> { + type Item = (&'a K, &'a V); + + fn next(&mut self) -> Option { + // SAFETY: Due to `self._tree`, `k` and `v` are valid for the lifetime of `'a`. + self.iter_raw.next().map(|(k, v)| unsafe { (&*k, &*v) }) + } +} + +impl<'a, K, V> IntoIterator for &'a mut RBTree { + type Item = (&'a K, &'a mut V); + type IntoIter = IterMut<'a, K, V>; + + fn into_iter(self) -> Self::IntoIter { + self.iter_mut() + } +} + +/// A mutable iterator over the nodes of a [`RBTree`]. +/// +/// Instances are created by calling [`RBTree::iter_mut`]. +pub struct IterMut<'a, K, V> { + _tree: PhantomData<&'a mut RBTree>, + iter_raw: IterRaw, +} + +// SAFETY: The [`IterMut`] has exclusive access to both `K` and `V`, so it is sufficient to require them to be `Send`. +// The iterator only gives out immutable references to the keys, but since the iterator has excusive access to those same +// keys, `Send` is sufficient. `Sync` would be okay, but it is more restrictive to the user. +unsafe impl<'a, K: Send, V: Send> Send for IterMut<'a, K, V> {} + +// SAFETY: The [`IterMut`] gives out immutable references to K and mutable references to V, so it has the same +// thread safety requirements as mutable references. +unsafe impl<'a, K: Sync, V: Sync> Sync for IterMut<'a, K, V> {} + +impl<'a, K, V> Iterator for IterMut<'a, K, V> { + type Item = (&'a K, &'a mut V); + + fn next(&mut self) -> Option { + self.iter_raw.next().map(|(k, v)| + // SAFETY: Due to `&mut self`, we have exclusive access to `k` and `v`, for the lifetime of `'a`. + unsafe { (&*k, &mut *v) }) + } +} + +/// A raw iterator over the nodes of a [`RBTree`]. +/// +/// # Invariants +/// - `self.next` is a valid pointer. +/// - `self.next` points to a node stored inside of a valid `RBTree`. +struct IterRaw { + next: *mut bindings::rb_node, + _phantom: PhantomData (K, V)>, +} + +impl Iterator for IterRaw { + type Item = (*mut K, *mut V); + + fn next(&mut self) -> Option { + if self.next.is_null() { + return None; + } + + // SAFETY: By the type invariant of `IterRaw`, `self.next` is a valid node in an `RBTree`, + // and by the type invariant of `RBTree`, all nodes point to the links field of `Node` objects. + let cur = unsafe { container_of!(self.next, Node, links) }.cast_mut(); + + // SAFETY: `self.next` is a valid tree node by the type invariants. + self.next = unsafe { bindings::rb_next(self.next) }; + + // SAFETY: By the same reasoning above, it is safe to dereference the node. + Some(unsafe { (addr_of_mut!((*cur).key), addr_of_mut!((*cur).value)) }) + } +} + +/// A memory reservation for a red-black tree node. +/// +/// +/// It contains the memory needed to hold a node that can be inserted into a red-black tree. One +/// can be obtained by directly allocating it ([`RBTreeNodeReservation::new`]). +pub struct RBTreeNodeReservation { + node: Box>>, +} + +impl RBTreeNodeReservation { + /// Allocates memory for a node to be eventually initialised and inserted into the tree via a + /// call to [`RBTree::insert`]. + pub fn new(flags: Flags) -> Result> { + Ok(RBTreeNodeReservation { + node: as BoxExt<_>>::new_uninit(flags)?, + }) + } +} + +// SAFETY: This doesn't actually contain K or V, and is just a memory allocation. Those can always +// be moved across threads. +unsafe impl Send for RBTreeNodeReservation {} + +// SAFETY: This doesn't actually contain K or V, and is just a memory allocation. +unsafe impl Sync for RBTreeNodeReservation {} + +impl RBTreeNodeReservation { + /// Initialises a node reservation. + /// + /// It then becomes an [`RBTreeNode`] that can be inserted into a tree. + pub fn into_node(mut self, key: K, value: V) -> RBTreeNode { + self.node.write(Node { + key, + value, + links: bindings::rb_node::default(), + }); + // SAFETY: We just wrote to it. + let node = unsafe { self.node.assume_init() }; + RBTreeNode { node } + } +} + +/// A red-black tree node. +/// +/// The node is fully initialised (with key and value) and can be inserted into a tree without any +/// extra allocations or failure paths. +pub struct RBTreeNode { + node: Box>, +} + +impl RBTreeNode { + /// Allocates and initialises a node that can be inserted into the tree via + /// [`RBTree::insert`]. + pub fn new(key: K, value: V, flags: Flags) -> Result> { + Ok(RBTreeNodeReservation::new(flags)?.into_node(key, value)) + } + + /// Get the key and value from inside the node. + pub fn to_key_value(self) -> (K, V) { + (self.node.key, self.node.value) + } +} + +// SAFETY: If K and V can be sent across threads, then it's also okay to send [`RBTreeNode`] across +// threads. +unsafe impl Send for RBTreeNode {} + +// SAFETY: If K and V can be accessed without synchronization, then it's also okay to access +// [`RBTreeNode`] without synchronization. +unsafe impl Sync for RBTreeNode {} + +impl RBTreeNode { + /// Drop the key and value, but keep the allocation. + /// + /// It then becomes a reservation that can be re-initialised into a different node (i.e., with + /// a different key and/or value). + /// + /// The existing key and value are dropped in-place as part of this operation, that is, memory + /// may be freed (but only for the key/value; memory for the node itself is kept for reuse). + pub fn into_reservation(self) -> RBTreeNodeReservation { + RBTreeNodeReservation { + node: Box::drop_contents(self.node), + } + } +} + +/// A view into a single entry in a map, which may either be vacant or occupied. +/// +/// This enum is constructed from the [`RBTree::entry`]. +/// +/// [`entry`]: fn@RBTree::entry +pub enum Entry<'a, K, V> { + /// This [`RBTree`] does not have a node with this key. + Vacant(VacantEntry<'a, K, V>), + /// This [`RBTree`] already has a node with this key. + Occupied(OccupiedEntry<'a, K, V>), +} + +/// Like [`Entry`], except that it doesn't have ownership of the key. +enum RawEntry<'a, K, V> { + Vacant(RawVacantEntry<'a, K, V>), + Occupied(OccupiedEntry<'a, K, V>), +} + +/// A view into a vacant entry in a [`RBTree`]. It is part of the [`Entry`] enum. +pub struct VacantEntry<'a, K, V> { + key: K, + raw: RawVacantEntry<'a, K, V>, +} + +/// Like [`VacantEntry`], but doesn't hold on to the key. +/// +/// # Invariants +/// - `parent` may be null if the new node becomes the root. +/// - `child_field_of_parent` is a valid pointer to the left-child or right-child of `parent`. If `parent` is +/// null, it is a pointer to the root of the [`RBTree`]. +struct RawVacantEntry<'a, K, V> { + rbtree: *mut RBTree, + /// The node that will become the parent of the new node if we insert one. + parent: *mut bindings::rb_node, + /// This points to the left-child or right-child field of `parent`, or `root` if `parent` is + /// null. + child_field_of_parent: *mut *mut bindings::rb_node, + _phantom: PhantomData<&'a mut RBTree>, +} + +impl<'a, K, V> RawVacantEntry<'a, K, V> { + /// Inserts the given node into the [`RBTree`] at this entry. + /// + /// The `node` must have a key such that inserting it here does not break the ordering of this + /// [`RBTree`]. + fn insert(self, node: RBTreeNode) -> &'a mut V { + let node = Box::into_raw(node.node); + + // SAFETY: `node` is valid at least until we call `Box::from_raw`, which only happens when + // the node is removed or replaced. + let node_links = unsafe { addr_of_mut!((*node).links) }; + + // INVARIANT: We are linking in a new node, which is valid. It remains valid because we + // "forgot" it with `Box::into_raw`. + // SAFETY: The type invariants of `RawVacantEntry` are exactly the safety requirements of `rb_link_node`. + unsafe { bindings::rb_link_node(node_links, self.parent, self.child_field_of_parent) }; + + // SAFETY: All pointers are valid. `node` has just been inserted into the tree. + unsafe { bindings::rb_insert_color(node_links, addr_of_mut!((*self.rbtree).root)) }; + + // SAFETY: The node is valid until we remove it from the tree. + unsafe { &mut (*node).value } + } +} + +impl<'a, K, V> VacantEntry<'a, K, V> { + /// Inserts the given node into the [`RBTree`] at this entry. + pub fn insert(self, value: V, reservation: RBTreeNodeReservation) -> &'a mut V { + self.raw.insert(reservation.into_node(self.key, value)) + } +} + +/// A view into an occupied entry in a [`RBTree`]. It is part of the [`Entry`] enum. +/// +/// # Invariants +/// - `node_links` is a valid, non-null pointer to a tree node in `self.rbtree` +pub struct OccupiedEntry<'a, K, V> { + rbtree: &'a mut RBTree, + /// The node that this entry corresponds to. + node_links: *mut bindings::rb_node, +} + +impl<'a, K, V> OccupiedEntry<'a, K, V> { + /// Gets a reference to the value in the entry. + pub fn get(&self) -> &V { + // SAFETY: + // - `self.node_links` is a valid pointer to a node in the tree. + // - We have shared access to the underlying tree, and can thus give out a shared reference. + unsafe { &(*container_of!(self.node_links, Node, links)).value } + } + + /// Gets a mutable reference to the value in the entry. + pub fn get_mut(&mut self) -> &mut V { + // SAFETY: + // - `self.node_links` is a valid pointer to a node in the tree. + // - We have exclusive access to the underlying tree, and can thus give out a mutable reference. + unsafe { &mut (*(container_of!(self.node_links, Node, links).cast_mut())).value } + } + + /// Converts the entry into a mutable reference to its value. + /// + /// If you need multiple references to the `OccupiedEntry`, see [`self#get_mut`]. + pub fn into_mut(self) -> &'a mut V { + // SAFETY: + // - `self.node_links` is a valid pointer to a node in the tree. + // - This consumes the `&'a mut RBTree`, therefore it can give out a mutable reference that lives for `'a`. + unsafe { &mut (*(container_of!(self.node_links, Node, links).cast_mut())).value } + } + + /// Remove this entry from the [`RBTree`]. + pub fn remove_node(self) -> RBTreeNode { + // SAFETY: The node is a node in the tree, so it is valid. + unsafe { bindings::rb_erase(self.node_links, &mut self.rbtree.root) }; + + // INVARIANT: The node is being returned and the caller may free it, however, it was + // removed from the tree. So the invariants still hold. + RBTreeNode { + // SAFETY: The node was a node in the tree, but we removed it, so we can convert it + // back into a box. + node: unsafe { + Box::from_raw(container_of!(self.node_links, Node, links).cast_mut()) + }, + } + } + + /// Takes the value of the entry out of the map, and returns it. + pub fn remove(self) -> V { + self.remove_node().node.value + } + + /// Swap the current node for the provided node. + /// + /// The key of both nodes must be equal. + fn replace(self, node: RBTreeNode) -> RBTreeNode { + let node = Box::into_raw(node.node); + + // SAFETY: `node` is valid at least until we call `Box::from_raw`, which only happens when + // the node is removed or replaced. + let new_node_links = unsafe { addr_of_mut!((*node).links) }; + + // SAFETY: This updates the pointers so that `new_node_links` is in the tree where + // `self.node_links` used to be. + unsafe { + bindings::rb_replace_node(self.node_links, new_node_links, &mut self.rbtree.root) + }; + + // SAFETY: + // - `self.node_ptr` produces a valid pointer to a node in the tree. + // - Now that we removed this entry from the tree, we can convert the node to a box. + let old_node = + unsafe { Box::from_raw(container_of!(self.node_links, Node, links).cast_mut()) }; + + RBTreeNode { node: old_node } + } +} + +struct Node { + links: bindings::rb_node, + key: K, + value: V, +} diff --git a/rust/kernel/std_vendor.rs b/rust/kernel/std_vendor.rs index 39679a960c1a..67bf9d37ddb5 100644 --- a/rust/kernel/std_vendor.rs +++ b/rust/kernel/std_vendor.rs @@ -136,7 +136,7 @@ /// /// [`std::dbg`]: https://doc.rust-lang.org/std/macro.dbg.html /// [`eprintln`]: https://doc.rust-lang.org/std/macro.eprintln.html -/// [`printk`]: https://www.kernel.org/doc/html/latest/core-api/printk-basics.html +/// [`printk`]: https://docs.kernel.org/core-api/printk-basics.html /// [`pr_info`]: crate::pr_info! /// [`pr_debug`]: crate::pr_debug! #[macro_export] diff --git a/rust/kernel/sync/arc.rs b/rust/kernel/sync/arc.rs index 3673496c2363..3021f30fd822 100644 --- a/rust/kernel/sync/arc.rs +++ b/rust/kernel/sync/arc.rs @@ -12,12 +12,13 @@ //! 2. It does not support weak references, which allows it to be half the size. //! 3. It saturates the reference count instead of aborting when it goes over a threshold. //! 4. It does not provide a `get_mut` method, so the ref counted object is pinned. +//! 5. The object in [`Arc`] is pinned implicitly. //! //! [`Arc`]: https://doc.rust-lang.org/std/sync/struct.Arc.html use crate::{ alloc::{box_ext::BoxExt, AllocError, Flags}, - error::{self, Error}, + bindings, init::{self, InPlaceInit, Init, PinInit}, try_init, types::{ForeignOwnable, Opaque}, @@ -209,28 +210,6 @@ pub fn new(contents: T, flags: Flags) -> Result { // `Arc` object. Ok(unsafe { Self::from_inner(Box::leak(inner).into()) }) } - - /// Use the given initializer to in-place initialize a `T`. - /// - /// If `T: !Unpin` it will not be able to move afterwards. - #[inline] - pub fn pin_init(init: impl PinInit, flags: Flags) -> error::Result - where - Error: From, - { - UniqueArc::pin_init(init, flags).map(|u| u.into()) - } - - /// Use the given initializer to in-place initialize a `T`. - /// - /// This is equivalent to [`Arc::pin_init`], since an [`Arc`] is always pinned. - #[inline] - pub fn init(init: impl Init, flags: Flags) -> error::Result - where - Error: From, - { - UniqueArc::init(init, flags).map(|u| u.into()) - } } impl Arc { diff --git a/rust/kernel/types.rs b/rust/kernel/types.rs index bd189d646adb..9e7ca066355c 100644 --- a/rust/kernel/types.rs +++ b/rust/kernel/types.rs @@ -7,8 +7,9 @@ use core::{ cell::UnsafeCell, marker::{PhantomData, PhantomPinned}, - mem::MaybeUninit, + mem::{ManuallyDrop, MaybeUninit}, ops::{Deref, DerefMut}, + pin::Pin, ptr::NonNull, }; @@ -26,7 +27,10 @@ pub trait ForeignOwnable: Sized { /// Converts a Rust-owned object to a foreign-owned one. /// - /// The foreign representation is a pointer to void. + /// The foreign representation is a pointer to void. There are no guarantees for this pointer. + /// For example, it might be invalid, dangling or pointing to uninitialized memory. Using it in + /// any way except for [`ForeignOwnable::from_foreign`], [`ForeignOwnable::borrow`], + /// [`ForeignOwnable::try_from_foreign`] can result in undefined behavior. fn into_foreign(self) -> *const core::ffi::c_void; /// Borrows a foreign-owned object. @@ -89,6 +93,32 @@ unsafe fn from_foreign(ptr: *const core::ffi::c_void) -> Self { } } +impl ForeignOwnable for Pin> { + type Borrowed<'a> = Pin<&'a T>; + + fn into_foreign(self) -> *const core::ffi::c_void { + // SAFETY: We are still treating the box as pinned. + Box::into_raw(unsafe { Pin::into_inner_unchecked(self) }) as _ + } + + unsafe fn borrow<'a>(ptr: *const core::ffi::c_void) -> Pin<&'a T> { + // SAFETY: The safety requirements for this function ensure that the object is still alive, + // so it is safe to dereference the raw pointer. + // The safety requirements of `from_foreign` also ensure that the object remains alive for + // the lifetime of the returned value. + let r = unsafe { &*ptr.cast() }; + + // SAFETY: This pointer originates from a `Pin>`. + unsafe { Pin::new_unchecked(r) } + } + + unsafe fn from_foreign(ptr: *const core::ffi::c_void) -> Self { + // SAFETY: The safety requirements of this function ensure that `ptr` comes from a previous + // call to `Self::into_foreign`. + unsafe { Pin::new_unchecked(Box::from_raw(ptr as _)) } + } +} + impl ForeignOwnable for () { type Borrowed<'a> = (); @@ -366,6 +396,35 @@ pub unsafe fn from_raw(ptr: NonNull) -> Self { _p: PhantomData, } } + + /// Consumes the `ARef`, returning a raw pointer. + /// + /// This function does not change the refcount. After calling this function, the caller is + /// responsible for the refcount previously managed by the `ARef`. + /// + /// # Examples + /// + /// ``` + /// use core::ptr::NonNull; + /// use kernel::types::{ARef, AlwaysRefCounted}; + /// + /// struct Empty {} + /// + /// unsafe impl AlwaysRefCounted for Empty { + /// fn inc_ref(&self) {} + /// unsafe fn dec_ref(_obj: NonNull) {} + /// } + /// + /// let mut data = Empty {}; + /// let ptr = NonNull::::new(&mut data as *mut _).unwrap(); + /// let data_ref: ARef = unsafe { ARef::from_raw(ptr) }; + /// let raw_ptr: NonNull = ARef::into_raw(data_ref); + /// + /// assert_eq!(ptr, raw_ptr); + /// ``` + pub fn into_raw(me: Self) -> NonNull { + ManuallyDrop::new(me).ptr + } } impl Clone for ARef { diff --git a/rust/macros/lib.rs b/rust/macros/lib.rs index 5be0cb9db3ee..a626b1145e5c 100644 --- a/rust/macros/lib.rs +++ b/rust/macros/lib.rs @@ -2,6 +2,10 @@ //! Crate for all kernel procedural macros. +// When fixdep scans this, it will find this string `CONFIG_RUSTC_VERSION_TEXT` +// and thus add a dependency on `include/config/RUSTC_VERSION_TEXT`, which is +// touched by Kconfig when the version string from the compiler changes. + #[macro_use] mod quote; mod concat_idents; diff --git a/rust/macros/module.rs b/rust/macros/module.rs index 7a5b899e47b7..aef3b132f32b 100644 --- a/rust/macros/module.rs +++ b/rust/macros/module.rs @@ -260,6 +260,12 @@ mod __module_init {{ unsafe {{ __init() }} }} + #[cfg(MODULE)] + #[doc(hidden)] + #[used] + #[link_section = \".init.data\"] + static __UNIQUE_ID___addressable_init_module: unsafe extern \"C\" fn() -> i32 = init_module; + #[cfg(MODULE)] #[doc(hidden)] #[no_mangle] @@ -273,6 +279,12 @@ mod __module_init {{ unsafe {{ __exit() }} }} + #[cfg(MODULE)] + #[doc(hidden)] + #[used] + #[link_section = \".exit.data\"] + static __UNIQUE_ID___addressable_cleanup_module: extern \"C\" fn() = cleanup_module; + // Built-in modules are initialized through an initcall pointer // and the identifiers need to be unique. #[cfg(not(MODULE))] diff --git a/scripts/Kconfig.include b/scripts/Kconfig.include index 3500a3d62f0d..785a491e5996 100644 --- a/scripts/Kconfig.include +++ b/scripts/Kconfig.include @@ -64,3 +64,11 @@ ld-version := $(shell,set -- $(ld-info) && echo $2) cc-option-bit = $(if-success,$(CC) -Werror $(1) -E -x c /dev/null -o /dev/null,$(1)) m32-flag := $(cc-option-bit,-m32) m64-flag := $(cc-option-bit,-m64) + +# $(rustc-option,) +# Return y if the Rust compiler supports , n otherwise +# Calls to this should be guarded so that they are not evaluated if +# CONFIG_RUST_IS_AVAILABLE is not set. +# If you are testing for unstable features, consider testing RUSTC_VERSION +# instead, as features may have different completeness while available. +rustc-option = $(success,trap "rm -rf .tmp_$$" EXIT; mkdir .tmp_$$; $(RUSTC) $(1) --crate-type=rlib /dev/null --out-dir=.tmp_$$ -o .tmp_$$/tmp.rlib) diff --git a/scripts/Makefile.build b/scripts/Makefile.build index 8403eba15457..8f423a1faf50 100644 --- a/scripts/Makefile.build +++ b/scripts/Makefile.build @@ -273,10 +273,15 @@ rust_common_cmd = \ # would not match each other. quiet_cmd_rustc_o_rs = $(RUSTC_OR_CLIPPY_QUIET) $(quiet_modtag) $@ - cmd_rustc_o_rs = $(rust_common_cmd) --emit=obj=$@ $< + cmd_rustc_o_rs = $(rust_common_cmd) --emit=obj=$@ $< $(cmd_objtool) + +define rule_rustc_o_rs + $(call cmd_and_fixdep,rustc_o_rs) + $(call cmd,gen_objtooldep) +endef $(obj)/%.o: $(obj)/%.rs FORCE - +$(call if_changed_dep,rustc_o_rs) + +$(call if_changed_rule,rustc_o_rs) quiet_cmd_rustc_rsi_rs = $(RUSTC_OR_CLIPPY_QUIET) $(quiet_modtag) $@ cmd_rustc_rsi_rs = \ diff --git a/scripts/Makefile.compiler b/scripts/Makefile.compiler index 92be0c9a13ee..057305eae85c 100644 --- a/scripts/Makefile.compiler +++ b/scripts/Makefile.compiler @@ -72,3 +72,18 @@ clang-min-version = $(call test-ge, $(CONFIG_CLANG_VERSION), $1) # ld-option # Usage: KBUILD_LDFLAGS += $(call ld-option, -X, -Y) ld-option = $(call try-run, $(LD) $(KBUILD_LDFLAGS) $(1) -v,$(1),$(2),$(3)) + +# __rustc-option +# Usage: MY_RUSTFLAGS += $(call __rustc-option,$(RUSTC),$(MY_RUSTFLAGS),-Cinstrument-coverage,-Zinstrument-coverage) +__rustc-option = $(call try-run,\ + $(1) $(2) $(3) --crate-type=rlib /dev/null --out-dir=$$TMPOUT -o "$$TMP",$(3),$(4)) + +# rustc-option +# Usage: rustflags-y += $(call rustc-option,-Cinstrument-coverage,-Zinstrument-coverage) +rustc-option = $(call __rustc-option, $(RUSTC),\ + $(KBUILD_RUSTFLAGS),$(1),$(2)) + +# rustc-option-yn +# Usage: flag := $(call rustc-option-yn,-Cinstrument-coverage) +rustc-option-yn = $(call try-run,\ + $(RUSTC) $(KBUILD_RUSTFLAGS) $(1) --crate-type=rlib /dev/null --out-dir=$$TMPOUT -o "$$TMP",y,n) diff --git a/scripts/Makefile.kasan b/scripts/Makefile.kasan index aab4154af00a..693dbbebebba 100644 --- a/scripts/Makefile.kasan +++ b/scripts/Makefile.kasan @@ -12,6 +12,11 @@ endif KASAN_SHADOW_OFFSET ?= $(CONFIG_KASAN_SHADOW_OFFSET) cc-param = $(call cc-option, -mllvm -$(1), $(call cc-option, --param $(1))) +rustc-param = $(call rustc-option, -Cllvm-args=-$(1),) + +check-args = $(foreach arg,$(2),$(call $(1),$(arg))) + +kasan_params := ifdef CONFIG_KASAN_STACK stack_enable := 1 @@ -41,39 +46,59 @@ CFLAGS_KASAN := $(call cc-option, -fsanitize=kernel-address \ $(call cc-option, -fsanitize=kernel-address \ -mllvm -asan-mapping-offset=$(KASAN_SHADOW_OFFSET))) -# Now, add other parameters enabled similarly in both GCC and Clang. -# As some of them are not supported by older compilers, use cc-param. -CFLAGS_KASAN += $(call cc-param,asan-instrumentation-with-call-threshold=$(call_threshold)) \ - $(call cc-param,asan-stack=$(stack_enable)) \ - $(call cc-param,asan-instrument-allocas=1) \ - $(call cc-param,asan-globals=1) +# The minimum supported `rustc` version has a minimum supported LLVM +# version late enough that we can assume support for -asan-mapping-offset. +RUSTFLAGS_KASAN := -Zsanitizer=kernel-address \ + -Zsanitizer-recover=kernel-address \ + -Cllvm-args=-asan-mapping-offset=$(KASAN_SHADOW_OFFSET) + +# Now, add other parameters enabled similarly in GCC, Clang, and rustc. +# As some of them are not supported by older compilers, these will be filtered +# through `cc-param` or `rust-param` as applicable. +kasan_params += asan-instrumentation-with-call-threshold=$(call_threshold) \ + asan-stack=$(stack_enable) \ + asan-instrument-allocas=1 \ + asan-globals=1 # Instrument memcpy/memset/memmove calls by using instrumented __asan_mem*() # instead. With compilers that don't support this option, compiler-inserted # memintrinsics won't be checked by KASAN on GENERIC_ENTRY architectures. -CFLAGS_KASAN += $(call cc-param,asan-kernel-mem-intrinsic-prefix=1) +kasan_params += asan-kernel-mem-intrinsic-prefix=1 endif # CONFIG_KASAN_GENERIC ifdef CONFIG_KASAN_SW_TAGS +CFLAGS_KASAN := -fsanitize=kernel-hwaddress + +# This sets flags that will enable SW_TAGS KASAN once enabled in Rust. These +# will not work today, and is guarded against in dependencies for CONFIG_RUST. +RUSTFLAGS_KASAN := -Zsanitizer=kernel-hwaddress \ + -Zsanitizer-recover=kernel-hwaddress + ifdef CONFIG_KASAN_INLINE - instrumentation_flags := $(call cc-param,hwasan-mapping-offset=$(KASAN_SHADOW_OFFSET)) + kasan_params += hwasan-mapping-offset=$(KASAN_SHADOW_OFFSET) else - instrumentation_flags := $(call cc-param,hwasan-instrument-with-calls=1) + kasan_params += hwasan-instrument-with-calls=1 endif -CFLAGS_KASAN := -fsanitize=kernel-hwaddress \ - $(call cc-param,hwasan-instrument-stack=$(stack_enable)) \ - $(call cc-param,hwasan-use-short-granules=0) \ - $(call cc-param,hwasan-inline-all-checks=0) \ - $(instrumentation_flags) +kasan_params += hwasan-instrument-stack=$(stack_enable) \ + hwasan-use-short-granules=0 \ + hwasan-inline-all-checks=0 # Instrument memcpy/memset/memmove calls by using instrumented __hwasan_mem*(). ifeq ($(call clang-min-version, 150000)$(call gcc-min-version, 130000),y) - CFLAGS_KASAN += $(call cc-param,hwasan-kernel-mem-intrinsic-prefix=1) + kasan_params += hwasan-kernel-mem-intrinsic-prefix=1 endif endif # CONFIG_KASAN_SW_TAGS -export CFLAGS_KASAN CFLAGS_KASAN_NOSANITIZE +# Add all as-supported KASAN LLVM parameters requested by the configuration. +CFLAGS_KASAN += $(call check-args, cc-param, $(kasan_params)) + +ifdef CONFIG_RUST + # Avoid calling `rustc-param` unless Rust is enabled. + RUSTFLAGS_KASAN += $(call check-args, rustc-param, $(kasan_params)) +endif # CONFIG_RUST + +export CFLAGS_KASAN CFLAGS_KASAN_NOSANITIZE RUSTFLAGS_KASAN diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib index 29bfd6ed3e3f..01a9f567d5af 100644 --- a/scripts/Makefile.lib +++ b/scripts/Makefile.lib @@ -146,6 +146,9 @@ ifneq ($(CONFIG_KASAN_HW_TAGS),y) _c_flags += $(if $(patsubst n%,, \ $(KASAN_SANITIZE_$(target-stem).o)$(KASAN_SANITIZE)$(is-kernel-object)), \ $(CFLAGS_KASAN), $(CFLAGS_KASAN_NOSANITIZE)) +_rust_flags += $(if $(patsubst n%,, \ + $(KASAN_SANITIZE_$(target-stem).o)$(KASAN_SANITIZE)$(is-kernel-object)), \ + $(RUSTFLAGS_KASAN)) endif endif diff --git a/scripts/generate_rust_target.rs b/scripts/generate_rust_target.rs index 404edf7587e0..0d00ac3723b5 100644 --- a/scripts/generate_rust_target.rs +++ b/scripts/generate_rust_target.rs @@ -20,12 +20,28 @@ enum Value { Boolean(bool), Number(i32), String(String), + Array(Vec), Object(Object), } type Object = Vec<(String, Value)>; -/// Minimal "almost JSON" generator (e.g. no `null`s, no arrays, no escaping), +fn comma_sep( + seq: &[T], + formatter: &mut Formatter<'_>, + f: impl Fn(&mut Formatter<'_>, &T) -> Result, +) -> Result { + if let [ref rest @ .., ref last] = seq[..] { + for v in rest { + f(formatter, v)?; + formatter.write_str(",")?; + } + f(formatter, last)?; + } + Ok(()) +} + +/// Minimal "almost JSON" generator (e.g. no `null`s, no escaping), /// enough for this purpose. impl Display for Value { fn fmt(&self, formatter: &mut Formatter<'_>) -> Result { @@ -33,59 +49,67 @@ fn fmt(&self, formatter: &mut Formatter<'_>) -> Result { Value::Boolean(boolean) => write!(formatter, "{}", boolean), Value::Number(number) => write!(formatter, "{}", number), Value::String(string) => write!(formatter, "\"{}\"", string), + Value::Array(values) => { + formatter.write_str("[")?; + comma_sep(&values[..], formatter, |formatter, v| v.fmt(formatter))?; + formatter.write_str("]") + } Value::Object(object) => { formatter.write_str("{")?; - if let [ref rest @ .., ref last] = object[..] { - for (key, value) in rest { - write!(formatter, "\"{}\": {},", key, value)?; - } - write!(formatter, "\"{}\": {}", last.0, last.1)?; - } + comma_sep(&object[..], formatter, |formatter, v| { + write!(formatter, "\"{}\": {}", v.0, v.1) + })?; formatter.write_str("}") } } } } +impl From for Value { + fn from(value: bool) -> Self { + Self::Boolean(value) + } +} + +impl From for Value { + fn from(value: i32) -> Self { + Self::Number(value) + } +} + +impl From for Value { + fn from(value: String) -> Self { + Self::String(value) + } +} + +impl From<&str> for Value { + fn from(value: &str) -> Self { + Self::String(value.to_string()) + } +} + +impl From for Value { + fn from(object: Object) -> Self { + Self::Object(object) + } +} + +impl, const N: usize> From<[T; N]> for Value { + fn from(i: [T; N]) -> Self { + Self::Array(i.into_iter().map(|v| v.into()).collect()) + } +} + struct TargetSpec(Object); impl TargetSpec { fn new() -> TargetSpec { TargetSpec(Vec::new()) } -} -trait Push { - fn push(&mut self, key: &str, value: T); -} - -impl Push for TargetSpec { - fn push(&mut self, key: &str, value: bool) { - self.0.push((key.to_string(), Value::Boolean(value))); - } -} - -impl Push for TargetSpec { - fn push(&mut self, key: &str, value: i32) { - self.0.push((key.to_string(), Value::Number(value))); - } -} - -impl Push for TargetSpec { - fn push(&mut self, key: &str, value: String) { - self.0.push((key.to_string(), Value::String(value))); - } -} - -impl Push<&str> for TargetSpec { - fn push(&mut self, key: &str, value: &str) { - self.push(key, value.to_string()); - } -} - -impl Push for TargetSpec { - fn push(&mut self, key: &str, value: Object) { - self.0.push((key.to_string(), Value::Object(value))); + fn push(&mut self, key: &str, value: impl Into) { + self.0.push((key.to_string(), value.into())); } } @@ -164,10 +188,26 @@ fn main() { ); let mut features = "-mmx,+soft-float".to_string(); if cfg.has("MITIGATION_RETPOLINE") { + // The kernel uses `-mretpoline-external-thunk` (for Clang), which Clang maps to the + // target feature of the same name plus the other two target features in + // `clang/lib/Driver/ToolChains/Arch/X86.cpp`. These should be eventually enabled via + // `-Ctarget-feature` when `rustc` starts recognizing them (or via a new dedicated + // flag); see https://github.com/rust-lang/rust/issues/116852. features += ",+retpoline-external-thunk"; + features += ",+retpoline-indirect-branches"; + features += ",+retpoline-indirect-calls"; + } + if cfg.has("MITIGATION_SLS") { + // The kernel uses `-mharden-sls=all`, which Clang maps to both these target features in + // `clang/lib/Driver/ToolChains/Arch/X86.cpp`. These should be eventually enabled via + // `-Ctarget-feature` when `rustc` starts recognizing them (or via a new dedicated + // flag); see https://github.com/rust-lang/rust/issues/116851. + features += ",+harden-sls-ijmp"; + features += ",+harden-sls-ret"; } ts.push("features", features); ts.push("llvm-target", "x86_64-linux-gnu"); + ts.push("supported-sanitizers", ["kcfi", "kernel-address"]); ts.push("target-pointer-width", "64"); } else if cfg.has("X86_32") { // This only works on UML, as i386 otherwise needs regparm support in rustc diff --git a/scripts/rustc-version.sh b/scripts/rustc-version.sh new file mode 100755 index 000000000000..4e22593e2eab --- /dev/null +++ b/scripts/rustc-version.sh @@ -0,0 +1,26 @@ +#!/bin/sh +# SPDX-License-Identifier: GPL-2.0 +# +# Usage: $ ./rustc-version.sh rustc +# +# Print the Rust compiler version in a 6 or 7-digit form. + +# Convert the version string x.y.z to a canonical up-to-7-digits form. +# +# Note that this function uses one more digit (compared to other +# instances in other version scripts) to give a bit more space to +# `rustc` since it will reach 1.100.0 in late 2026. +get_canonical_version() +{ + IFS=. + set -- $1 + echo $((100000 * $1 + 100 * $2 + $3)) +} + +if output=$("$@" --version 2>/dev/null); then + set -- $output + get_canonical_version $2 +else + echo 0 + exit 1 +fi diff --git a/tools/objtool/check.c b/tools/objtool/check.c index 01237d167223..d086f207a3d3 100644 --- a/tools/objtool/check.c +++ b/tools/objtool/check.c @@ -177,6 +177,52 @@ static bool is_sibling_call(struct instruction *insn) return (is_static_jump(insn) && insn_call_dest(insn)); } +/* + * Checks if a string ends with another. + */ +static bool str_ends_with(const char *s, const char *sub) +{ + const int slen = strlen(s); + const int sublen = strlen(sub); + + if (sublen > slen) + return 0; + + return !memcmp(s + slen - sublen, sub, sublen); +} + +/* + * Checks if a function is a Rust "noreturn" one. + */ +static bool is_rust_noreturn(const struct symbol *func) +{ + /* + * If it does not start with "_R", then it is not a Rust symbol. + */ + if (strncmp(func->name, "_R", 2)) + return false; + + /* + * These are just heuristics -- we do not control the precise symbol + * name, due to the crate disambiguators (which depend on the compiler) + * as well as changes to the source code itself between versions (since + * these come from the Rust standard library). + */ + return str_ends_with(func->name, "_4core5sliceSp15copy_from_slice17len_mismatch_fail") || + str_ends_with(func->name, "_4core6option13unwrap_failed") || + str_ends_with(func->name, "_4core6result13unwrap_failed") || + str_ends_with(func->name, "_4core9panicking5panic") || + str_ends_with(func->name, "_4core9panicking9panic_fmt") || + str_ends_with(func->name, "_4core9panicking14panic_explicit") || + str_ends_with(func->name, "_4core9panicking14panic_nounwind") || + str_ends_with(func->name, "_4core9panicking18panic_bounds_check") || + str_ends_with(func->name, "_4core9panicking19assert_failed_inner") || + str_ends_with(func->name, "_4core9panicking36panic_misaligned_pointer_dereference") || + strstr(func->name, "_4core9panicking11panic_const24panic_const_") || + (strstr(func->name, "_4core5slice5index24slice_") && + str_ends_with(func->name, "_fail")); +} + /* * This checks to see if the given function is a "noreturn" function. * @@ -202,10 +248,14 @@ static bool __dead_end_function(struct objtool_file *file, struct symbol *func, if (!func) return false; - if (func->bind == STB_GLOBAL || func->bind == STB_WEAK) + if (func->bind == STB_GLOBAL || func->bind == STB_WEAK) { + if (is_rust_noreturn(func)) + return true; + for (i = 0; i < ARRAY_SIZE(global_noreturns); i++) if (!strcmp(func->name, global_noreturns[i])) return true; + } if (func->bind == STB_WEAK) return false; diff --git a/tools/objtool/noreturns.h b/tools/objtool/noreturns.h index 1e8141ef1b15..e7da92489167 100644 --- a/tools/objtool/noreturns.h +++ b/tools/objtool/noreturns.h @@ -39,6 +39,8 @@ NORETURN(panic) NORETURN(panic_smp_self_stop) NORETURN(rest_init) NORETURN(rewind_stack_and_make_dead) +NORETURN(rust_begin_unwind) +NORETURN(rust_helper_BUG) NORETURN(sev_es_terminate) NORETURN(snp_abort) NORETURN(start_kernel)