# Conflicts:
#	Makefile
#	include/uapi/linux/android/binder.h
This commit is contained in:
Stephen Rothwell 2022-06-28 15:41:14 +10:00
commit 6d2c4faac6
180 changed files with 37618 additions and 67 deletions

5
.gitignore vendored
View File

@ -37,6 +37,7 @@
*.o
*.o.*
*.patch
*.rmeta
*.s
*.so
*.so.dbg
@ -97,6 +98,7 @@ modules.order
!.gitattributes
!.gitignore
!.mailmap
!.rustfmt.toml
#
# Generated include files
@ -162,3 +164,6 @@ x509.genkey
# Documentation toolchain
sphinx_*/
# Rust analyzer configuration
/rust-project.json

12
.rustfmt.toml Normal file
View File

@ -0,0 +1,12 @@
edition = "2021"
newline_style = "Unix"
# Unstable options that help catching some mistakes in formatting and that we may want to enable
# when they become stable.
#
# They are kept here since they are useful to run from time to time.
#format_code_in_doc_comments = true
#reorder_impl_items = true
#comment_width = 100
#wrap_comments = true
#normalize_comments = true

View File

@ -625,6 +625,16 @@ Examples::
%p4cc Y10 little-endian (0x20303159)
%p4cc NV12 big-endian (0xb231564e)
Rust
----
::
%pA
Only intended to be used from Rust code to format ``core::fmt::Arguments``.
Do *not* use it from C.
Thanks
======

View File

@ -14,6 +14,9 @@ when it is embedded in source files.
reasons. The kernel source contains tens of thousands of kernel-doc
comments. Please stick to the style described here.
.. note:: kernel-doc does not cover Rust code: please see
Documentation/rust/general-information.rst instead.
The kernel-doc structure is extracted from the comments, and proper
`Sphinx C Domain`_ function and type descriptions with anchors are
generated from them. The descriptions are filtered for special kernel-doc

View File

@ -82,6 +82,7 @@ merged much easier.
maintainer/index
fault-injection/index
livepatch/index
rust/index
Kernel API documentation

View File

@ -48,6 +48,10 @@ KCFLAGS
-------
Additional options to the C compiler (for built-in and modules).
KRUSTFLAGS
----------
Additional options to the Rust compiler (for built-in and modules).
CFLAGS_KERNEL
-------------
Additional options for $(CC) when used to compile
@ -57,6 +61,15 @@ CFLAGS_MODULE
-------------
Additional module specific options to use for $(CC).
RUSTFLAGS_KERNEL
----------------
Additional options for $(RUSTC) when used to compile
code that is compiled as built-in.
RUSTFLAGS_MODULE
----------------
Additional module specific options to use for $(RUSTC).
LDFLAGS_MODULE
--------------
Additional options used for $(LD) when linking modules.
@ -69,6 +82,10 @@ HOSTCXXFLAGS
------------
Additional flags to be passed to $(HOSTCXX) when building host programs.
HOSTRUSTFLAGS
-------------
Additional flags to be passed to $(HOSTRUSTC) when building host programs.
HOSTLDFLAGS
-----------
Additional flags to be passed when linking host programs.

View File

@ -29,8 +29,9 @@ This document describes the Linux kernel Makefiles.
--- 4.1 Simple Host Program
--- 4.2 Composite Host Programs
--- 4.3 Using C++ for host programs
--- 4.4 Controlling compiler options for host programs
--- 4.5 When host programs are actually built
--- 4.4 Using Rust for host programs
--- 4.5 Controlling compiler options for host programs
--- 4.6 When host programs are actually built
=== 5 Userspace Program support
--- 5.1 Simple Userspace Program
@ -835,7 +836,24 @@ Both possibilities are described in the following.
qconf-cxxobjs := qconf.o
qconf-objs := check.o
4.4 Controlling compiler options for host programs
4.4 Using Rust for host programs
--------------------------------
Kbuild offers support for host programs written in Rust. However,
since a Rust toolchain is not mandatory for kernel compilation,
it may only be used in scenarios where Rust is required to be
available (e.g. when ``CONFIG_RUST`` is enabled).
Example::
hostprogs := target
target-rust := y
Kbuild will compile ``target`` using ``target.rs`` as the crate root,
located in the same directory as the ``Makefile``. The crate may
consist of several source files (see ``samples/rust/hostprogs``).
4.5 Controlling compiler options for host programs
--------------------------------------------------
When compiling host programs, it is possible to set specific flags.
@ -867,7 +885,7 @@ Both possibilities are described in the following.
When linking qconf, it will be passed the extra option
"-L$(QTDIR)/lib".
4.5 When host programs are actually built
4.6 When host programs are actually built
-----------------------------------------
Kbuild will only build host-programs when they are referenced
@ -1181,6 +1199,17 @@ When kbuild executes, the following steps are followed (roughly):
The first example utilises the trick that a config option expands
to 'y' when selected.
KBUILD_RUSTFLAGS
$(RUSTC) compiler flags
Default value - see top level Makefile
Append or modify as required per architecture.
Often, the KBUILD_RUSTFLAGS variable depends on the configuration.
Note that target specification file generation (for ``--target``)
is handled in ``scripts/generate_rust_target.rs``.
KBUILD_AFLAGS_KERNEL
Assembler options specific for built-in
@ -1208,6 +1237,19 @@ When kbuild executes, the following steps are followed (roughly):
are used for $(CC).
From commandline CFLAGS_MODULE shall be used (see kbuild.rst).
KBUILD_RUSTFLAGS_KERNEL
$(RUSTC) options specific for built-in
$(KBUILD_RUSTFLAGS_KERNEL) contains extra Rust compiler flags used to
compile resident kernel code.
KBUILD_RUSTFLAGS_MODULE
Options for $(RUSTC) when building modules
$(KBUILD_RUSTFLAGS_MODULE) is used to add arch-specific options that
are used for $(RUSTC).
From commandline RUSTFLAGS_MODULE shall be used (see kbuild.rst).
KBUILD_LDFLAGS_MODULE
Options for $(LD) when linking modules

View File

@ -31,6 +31,8 @@ you probably needn't concern yourself with pcmciautils.
====================== =============== ========================================
GNU C 5.1 gcc --version
Clang/LLVM (optional) 11.0.0 clang --version
Rust (optional) 1.60.0 rustc --version
bindgen (optional) 0.56.0 bindgen --version
GNU make 3.81 make --version
bash 4.2 bash --version
binutils 2.23 ld -v
@ -80,6 +82,29 @@ kernels. Older releases aren't guaranteed to work, and we may drop workarounds
from the kernel that were used to support older versions. Please see additional
docs on :ref:`Building Linux with Clang/LLVM <kbuild_llvm>`.
Rust (optional)
---------------
A particular version of the Rust toolchain is required. Newer versions may or
may not work because the kernel depends on some unstable Rust features, for
the moment.
Each Rust toolchain comes with several "components", some of which are required
(like ``rustc``) and some that are optional. The ``rust-src`` component (which
is optional) needs to be installed to build the kernel. Other components are
useful for developing.
Please see Documentation/rust/quick-start.rst for instructions on how to
satisfy the build requirements of Rust support. In particular, the ``Makefile``
target ``rustavailable`` is useful to check why the Rust toolchain may not
be detected.
bindgen (optional)
------------------
``bindgen`` is used to generate the Rust bindings to the C side of the kernel.
It depends on ``libclang``.
Make
----
@ -348,6 +373,12 @@ Sphinx
Please see :ref:`sphinx_install` in :ref:`Documentation/doc-guide/sphinx.rst <sphinxdoc>`
for details about Sphinx requirements.
rustdoc
-------
``rustdoc`` is used to generate the documentation for Rust code. Please see
Documentation/rust/general-information.rst for more information.
Getting updated software
========================
@ -364,6 +395,16 @@ Clang/LLVM
- :ref:`Getting LLVM <getting_llvm>`.
Rust
----
- Documentation/rust/quick-start.rst.
bindgen
-------
- Documentation/rust/quick-start.rst.
Make
----

View File

@ -0,0 +1,25 @@
.. SPDX-License-Identifier: GPL-2.0
Arch Support
============
Currently, the Rust compiler (``rustc``) uses LLVM for code generation,
which limits the supported architectures that can be targeted. In addition,
support for building the kernel with LLVM/Clang varies (please see
Documentation/kbuild/llvm.rst). This support is needed for ``bindgen``
which uses ``libclang``.
Below is a general summary of architectures that currently work. Level of
support corresponds to ``S`` values in the ``MAINTAINERS`` file.
============ ================ ==============================================
Architecture Level of support Constraints
============ ================ ==============================================
``arm`` Maintained ``armv6`` and compatible only,
``RUST_OPT_LEVEL >= 2``.
``arm64`` Maintained None.
``powerpc`` Maintained ``ppc64le`` only, ``RUST_OPT_LEVEL < 2``
requires ``CONFIG_THREAD_SHIFT=15``.
``riscv`` Maintained ``riscv64`` only.
``x86`` Maintained ``x86_64`` only.
============ ================ ==============================================

View File

@ -0,0 +1,216 @@
.. SPDX-License-Identifier: GPL-2.0
Coding Guidelines
=================
This document describes how to write Rust code in the kernel.
Style & formatting
------------------
The code should be formatted using ``rustfmt``. In this way, a person
contributing from time to time to the kernel does not need to learn and
remember one more style guide. More importantly, reviewers and maintainers
do not need to spend time pointing out style issues anymore, and thus
less patch roundtrips may be needed to land a change.
.. note:: Conventions on comments and documentation are not checked by
``rustfmt``. Thus those are still needed to be taken care of.
The default settings of ``rustfmt`` are used. This means the idiomatic Rust
style is followed. For instance, 4 spaces are used for indentation rather
than tabs.
It is convenient to instruct editors/IDEs to format while typing,
when saving or at commit time. However, if for some reason reformatting
the entire kernel Rust sources is needed at some point, the following can be
run::
make LLVM=1 rustfmt
It is also possible to check if everything is formatted (printing a diff
otherwise), for instance for a CI, with::
make LLVM=1 rustfmtcheck
Like ``clang-format`` for the rest of the kernel, ``rustfmt`` works on
individual files, and does not require a kernel configuration. Sometimes it may
even work with broken code.
Comments
--------
"Normal" comments (i.e. ``//``, rather than code documentation which starts
with ``///`` or ``//!``) are written in Markdown the same way as documentation
comments are, even though they will not be rendered. This improves consistency,
simplifies the rules and allows to move content between the two kinds of
comments more easily. For instance:
.. code-block:: rust
// `object` is ready to be handled now.
f(object);
Furthermore, just like documentation, comments are capitalized at the beginning
of a sentence and ended with a period (even if it is a single sentence). This
includes ``// SAFETY:``, ``// TODO:`` and other "tagged" comments, e.g.:
.. code-block:: rust
// FIXME: The error should be handled properly.
Comments should not be used for documentation purposes: comments are intended
for implementation details, not users. This distinction is useful even if the
reader of the source file is both an implementor and a user of an API. In fact,
sometimes it is useful to use both comments and documentation at the same time.
For instance, for a ``TODO`` list or to comment on the documentation itself.
For the latter case, comments can be inserted in the middle; that is, closer to
the line of documentation to be commented. For any other case, comments are
written after the documentation, e.g.:
.. code-block:: rust
/// Returns a new [`Foo`].
///
/// # Examples
///
// TODO: Find a better example.
/// ```
/// let foo = f(42);
/// ```
// FIXME: Use fallible approach.
pub fn f(x: i32) -> Foo {
// ...
}
One special kind of comments are the ``// SAFETY:`` comments. These must appear
before every ``unsafe`` block, and they explain why the code inside the block is
correct/sound, i.e. why it cannot trigger undefined behavior in any case, e.g.:
.. code-block:: rust
// SAFETY: `p` is valid by the safety requirements.
unsafe { *p = 0; }
``// SAFETY:`` comments are not to be confused with the ``# Safety`` sections
in code documentation. ``# Safety`` sections specify the contract that callers
(for functions) or implementors (for traits) need to abide by. ``// SAFETY:``
comments show why a call (for functions) or implementation (for traits) actually
respects the preconditions stated in a ``# Safety`` section or the language
reference.
Code documentation
------------------
Rust kernel code is not documented like C kernel code (i.e. via kernel-doc).
Instead, the usual system for documenting Rust code is used: the ``rustdoc``
tool, which uses Markdown (a lightweight markup language).
To learn Markdown, there are many guides available out there. For instance,
the one at:
https://commonmark.org/help/
This is how a well-documented Rust function may look like:
.. code-block:: rust
/// Returns the contained [`Some`] value, consuming the `self` value,
/// without checking that the value is not [`None`].
///
/// # Safety
///
/// Calling this method on [`None`] is *[undefined behavior]*.
///
/// [undefined behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
///
/// # Examples
///
/// ```
/// let x = Some("air");
/// assert_eq!(unsafe { x.unwrap_unchecked() }, "air");
/// ```
pub unsafe fn unwrap_unchecked(self) -> T {
match self {
Some(val) => val,
// SAFETY: The safety contract must be upheld by the caller.
None => unsafe { hint::unreachable_unchecked() },
}
}
This example showcases a few ``rustdoc`` features and some conventions followed
in the kernel:
- The first paragraph must be a single sentence briefly describing what
the documented item does. Further explanations must go in extra paragraphs.
- Unsafe functions must document their safety preconditions under
a ``# Safety`` section.
- While not shown here, if a function may panic, the conditions under which
that happens must be described under a ``# Panics`` section.
Please note that panicking should be very rare and used only with a good
reason. In almost all cases, a fallible approach should be used, typically
returning a ``Result``.
- If providing examples of usage would help readers, they must be written in
a section called ``# Examples``.
- Rust items (functions, types, constants...) must be linked appropriately
(``rustdoc`` will create a link automatically).
- Any ``unsafe`` block must be preceded by a ``// SAFETY:`` comment
describing why the code inside is sound.
While sometimes the reason might look trivial and therefore unneeded,
writing these comments is not just a good way of documenting what has been
taken into account, but most importantly, it provides a way to know that
there are no *extra* implicit constraints.
To learn more about how to write documentation for Rust and extra features,
please take a look at the ``rustdoc`` book at:
https://doc.rust-lang.org/rustdoc/how-to-write-documentation.html
Naming
------
Rust kernel code follows the usual Rust naming conventions:
https://rust-lang.github.io/api-guidelines/naming.html
When existing C concepts (e.g. macros, functions, objects...) are wrapped into
a Rust abstraction, a name as close as reasonably possible to the C side should
be used in order to avoid confusion and to improve readability when switching
back and forth between the C and Rust sides. For instance, macros such as
``pr_info`` from C are named the same in the Rust side.
Having said that, casing should be adjusted to follow the Rust naming
conventions, and namespacing introduced by modules and types should not be
repeated in the item names. For instance, when wrapping constants like:
.. code-block:: c
#define GPIO_LINE_DIRECTION_IN 0
#define GPIO_LINE_DIRECTION_OUT 1
The equivalent in Rust may look like (ignoring documentation):
.. code-block:: rust
pub mod gpio {
pub enum LineDirection {
In = bindings::GPIO_LINE_DIRECTION_IN as _,
Out = bindings::GPIO_LINE_DIRECTION_OUT as _,
}
}
That is, the equivalent of ``GPIO_LINE_DIRECTION_IN`` would be referred to as
``gpio::LineDirection::In``. In particular, it should not be named
``gpio::gpio_line_direction::GPIO_LINE_DIRECTION_IN``.

View File

@ -0,0 +1,79 @@
.. SPDX-License-Identifier: GPL-2.0
General Information
===================
This document contains useful information to know when working with
the Rust support in the kernel.
Code documentation
------------------
Rust kernel code is documented using ``rustdoc``, its built-in documentation
generator.
The generated HTML docs include integrated search, linked items (e.g. types,
functions, constants), source code, etc. They may be read at (TODO: link when
in mainline and generated alongside the rest of the documentation):
http://kernel.org/
The docs can also be easily generated and read locally. This is quite fast
(same order as compiling the code itself) and no special tools or environment
are needed. This has the added advantage that they will be tailored to
the particular kernel configuration used. To generate them, use the ``rustdoc``
target with the same invocation used for compilation, e.g.::
make LLVM=1 rustdoc
To read the docs locally in your web browser, run e.g.::
xdg-open rust/doc/kernel/index.html
To learn about how to write the documentation, please see coding-guidelines.rst.
Extra lints
-----------
While ``rustc`` is a very helpful compiler, some extra lints and analyses are
available via ``clippy``, a Rust linter. To enable it, pass ``CLIPPY=1`` to
the same invocation used for compilation, e.g.::
make LLVM=1 CLIPPY=1
Please note that Clippy may change code generation, thus it should not be
enabled while building a production kernel.
Abstractions vs. bindings
-------------------------
Abstractions are Rust code wrapping kernel functionality from the C side.
In order to use functions and types from the C side, bindings are created.
Bindings are the declarations for Rust of those functions and types from
the C side.
For instance, one may write a ``Mutex`` abstraction in Rust which wraps
a ``struct mutex`` from the C side and calls its functions through the bindings.
Abstractions are not available for all the kernel internal APIs and concepts,
but it is intended that coverage is expanded as time goes on. "Leaf" modules
(e.g. drivers) should not use the C bindings directly. Instead, subsystems
should provide as-safe-as-possible abstractions as needed.
Conditional compilation
-----------------------
Rust code has access to conditional compilation based on the kernel
configuration:
.. code-block:: rust
#[cfg(CONFIG_X)] // Enabled (`y` or `m`)
#[cfg(CONFIG_X="y")] // Enabled as a built-in (`y`)
#[cfg(CONFIG_X="m")] // Enabled as a module (`m`)
#[cfg(not(CONFIG_X))] // Disabled

View File

@ -0,0 +1,22 @@
.. SPDX-License-Identifier: GPL-2.0
Rust
====
Documentation related to Rust within the kernel. To start using Rust
in the kernel, please read the quick-start.rst guide.
.. toctree::
:maxdepth: 1
quick-start
general-information
coding-guidelines
arch-support
.. only:: subproject and html
Indices
=======
* :ref:`genindex`

View File

@ -0,0 +1,232 @@
.. SPDX-License-Identifier: GPL-2.0
Quick Start
===========
This document describes how to get started with kernel development in Rust.
Requirements: Building
----------------------
This section explains how to fetch the tools needed for building.
Some of these requirements might be available from Linux distributions
under names like ``rustc``, ``rust-src``, ``rust-bindgen``, etc. However,
at the time of writing, they are likely not to be recent enough unless
the distribution tracks the latest releases.
To easily check whether the requirements are met, the following target
can be used::
make LLVM=1 rustavailable
This triggers the same logic used by Kconfig to determine whether
``RUST_IS_AVAILABLE`` should be enabled; but it also explains why not
if that is the case.
rustc
*****
A particular version of the Rust compiler is required. Newer versions may or
may not work because, for the moment, the kernel depends on some unstable
Rust features.
If ``rustup`` is being used, enter the checked out source code directory
and run::
rustup override set $(scripts/min-tool-version.sh rustc)
Otherwise, fetch a standalone installer or install ``rustup`` from:
https://www.rust-lang.org
Rust standard library source
****************************
The Rust standard library source is required because the build system will
cross-compile ``core`` and ``alloc``.
If ``rustup`` is being used, run::
rustup component add rust-src
The components are installed per toolchain, thus upgrading the Rust compiler
version later on requires re-adding the component.
Otherwise, if a standalone installer is used, the Rust repository may be cloned
into the installation folder of the toolchain::
git clone --recurse-submodules \
--branch $(scripts/min-tool-version.sh rustc) \
https://github.com/rust-lang/rust \
$(rustc --print sysroot)/lib/rustlib/src/rust
In this case, upgrading the Rust compiler version later on requires manually
updating this clone.
libclang
********
``libclang`` (part of LLVM) is used by ``bindgen`` to understand the C code
in the kernel, which means LLVM needs to be installed; like when the kernel
is compiled with ``CC=clang`` or ``LLVM=1``.
Linux distributions are likely to have a suitable one available, so it is
best to check that first.
There are also some binaries for several systems and architectures uploaded at:
https://releases.llvm.org/download.html
Otherwise, building LLVM takes quite a while, but it is not a complex process:
https://llvm.org/docs/GettingStarted.html#getting-the-source-code-and-building-llvm
Please see Documentation/kbuild/llvm.rst for more information and further ways
to fetch pre-built releases and distribution packages.
bindgen
*******
The bindings to the C side of the kernel are generated at build time using
the ``bindgen`` tool. A particular version is required.
Install it via (note that this will download and build the tool from source)::
cargo install --locked --version $(scripts/min-tool-version.sh bindgen) bindgen
Requirements: Developing
------------------------
This section explains how to fetch the tools needed for developing. That is,
they are not needed when just building the kernel.
rustfmt
*******
The ``rustfmt`` tool is used to automatically format all the Rust kernel code,
including the generated C bindings (for details, please see
coding-guidelines.rst).
If ``rustup`` is being used, its ``default`` profile already installs the tool,
thus nothing needs to be done. If another profile is being used, the component
can be installed manually::
rustup component add rustfmt
The standalone installers also come with ``rustfmt``.
clippy
******
``clippy`` is a Rust linter. Running it provides extra warnings for Rust code.
It can be run by passing ``CLIPPY=1`` to ``make`` (for details, please see
general-information.rst).
If ``rustup`` is being used, its ``default`` profile already installs the tool,
thus nothing needs to be done. If another profile is being used, the component
can be installed manually::
rustup component add clippy
The standalone installers also come with ``clippy``.
cargo
*****
``cargo`` is the Rust native build system. It is currently required to run
the tests since it is used to build a custom standard library that contains
the facilities provided by the custom ``alloc`` in the kernel. The tests can
be run using the ``rusttest`` Make target.
If ``rustup`` is being used, all the profiles already install the tool,
thus nothing needs to be done.
The standalone installers also come with ``cargo``.
rustdoc
*******
``rustdoc`` is the documentation tool for Rust. It generates pretty HTML
documentation for Rust code (for details, please see
general-information.rst).
``rustdoc`` is also used to test the examples provided in documented Rust code
(called doctests or documentation tests). The ``rusttest`` Make target uses
this feature.
If ``rustup`` is being used, all the profiles already install the tool,
thus nothing needs to be done.
The standalone installers also come with ``rustdoc``.
rust-analyzer
*************
The `rust-analyzer <https://rust-analyzer.github.io/>`_ language server can
be used with many editors to enable syntax highlighting, completion, go to
definition, and other features.
``rust-analyzer`` needs a configuration file, ``rust-project.json``, which
can be generated by the ``rust-analyzer`` Make target.
Configuration
-------------
``Rust support`` (``CONFIG_RUST``) needs to be enabled in the ``General setup``
menu. The option is only shown if a suitable Rust toolchain is found (see
above), as long as the other requirements are met. In turn, this will make
visible the rest of options that depend on Rust.
Afterwards, go to::
Kernel hacking
-> Sample kernel code
-> Rust samples
And enable some sample modules either as built-in or as loadable.
Building
--------
Building a kernel with a complete LLVM toolchain is the best supported setup
at the moment. That is::
make LLVM=1
For architectures that do not support a full LLVM toolchain, use::
make CC=clang
Using GCC also works for some configurations, but it is very experimental at
the moment.
Hacking
-------
To dive deeper, take a look at the source code of the samples
at ``samples/rust/``, the Rust support code under ``rust/`` and
the ``Rust hacking`` menu under ``Kernel hacking``.
If GDB/Binutils is used and Rust symbols are not getting demangled, the reason
is the toolchain does not support Rust's new v0 mangling scheme yet.
There are a few ways out:
- Install a newer release (GDB >= 10.2, Binutils >= 2.36).
- Some versions of GDB (e.g. vanilla GDB 10.1) are able to use
the pre-demangled names embedded in the debug info (``CONFIG_DEBUG_INFO``).

View File

@ -17527,6 +17527,21 @@ L: linux-rdma@vger.kernel.org
S: Maintained
F: drivers/infiniband/ulp/rtrs/
RUST
M: Miguel Ojeda <ojeda@kernel.org>
M: Alex Gaynor <alex.gaynor@gmail.com>
M: Wedson Almeida Filho <wedsonaf@google.com>
L: rust-for-linux@vger.kernel.org
S: Supported
W: https://github.com/Rust-for-Linux/linux
B: https://github.com/Rust-for-Linux/linux/issues
T: git https://github.com/Rust-for-Linux/linux.git rust-next
F: Documentation/rust/
F: rust/
F: samples/rust/
F: scripts/*rust*
K: \b(?i:rust)\b
RXRPC SOCKETS (AF_RXRPC)
M: David Howells <dhowells@redhat.com>
M: Marc Dionne <marc.dionne@auristor.com>

175
Makefile
View File

@ -120,6 +120,15 @@ endif
export KBUILD_CHECKSRC
# Enable "clippy" (a linter) as part of the Rust compilation.
#
# Use 'make CLIPPY=1' to enable it.
ifeq ("$(origin CLIPPY)", "command line")
KBUILD_CLIPPY := $(CLIPPY)
endif
export KBUILD_CLIPPY
# Use make M=dir or set the environment variable KBUILD_EXTMOD to specify the
# directory of external module to build. Setting M= takes precedence.
ifeq ("$(origin M)", "command line")
@ -267,7 +276,7 @@ no-dot-config-targets := $(clean-targets) \
cscope gtags TAGS tags help% %docs check% coccicheck \
$(version_h) headers headers_% archheaders archscripts \
%asm-generic kernelversion %src-pkg dt_binding_check \
outputmakefile
outputmakefile rustavailable rustfmt rustfmtcheck
# Installation targets should not require compiler. Unfortunately, vdso_install
# is an exception where build artifacts may be updated. This must be fixed.
no-compiler-targets := $(no-dot-config-targets) install dtbs_install \
@ -437,6 +446,7 @@ HOSTCC = gcc
HOSTCXX = g++
endif
HOSTPKG_CONFIG = pkg-config
HOSTRUSTC = rustc
KBUILD_USERHOSTCFLAGS := -Wall -Wmissing-prototypes -Wstrict-prototypes \
-O2 -fomit-frame-pointer -std=gnu11 \
@ -444,8 +454,26 @@ KBUILD_USERHOSTCFLAGS := -Wall -Wmissing-prototypes -Wstrict-prototypes \
KBUILD_USERCFLAGS := $(KBUILD_USERHOSTCFLAGS) $(USERCFLAGS)
KBUILD_USERLDFLAGS := $(USERLDFLAGS)
# These flags apply to all Rust code in the tree, including the kernel and
# host programs.
export rust_common_flags := --edition=2021 \
-Zbinary_dep_depinfo=y \
-Dunsafe_op_in_unsafe_fn -Drust_2018_idioms \
-Dunreachable_pub -Dnon_ascii_idents \
-Wmissing_docs \
-Drustdoc::missing_crate_level_docs \
-Dclippy::correctness -Dclippy::style \
-Dclippy::suspicious -Dclippy::complexity \
-Dclippy::perf \
-Dclippy::let_unit_value -Dclippy::mut_mut \
-Dclippy::needless_bitwise_bool \
-Dclippy::needless_continue \
-Wclippy::dbg_macro
KBUILD_HOSTCFLAGS := $(KBUILD_USERHOSTCFLAGS) $(HOST_LFS_CFLAGS) $(HOSTCFLAGS)
KBUILD_HOSTCXXFLAGS := -Wall -O2 $(HOST_LFS_CFLAGS) $(HOSTCXXFLAGS)
KBUILD_HOSTRUSTFLAGS := $(rust_common_flags) -O -Cstrip=debuginfo \
-Zallow-features= $(HOSTRUSTFLAGS)
KBUILD_HOSTLDFLAGS := $(HOST_LFS_LDFLAGS) $(HOSTLDFLAGS)
KBUILD_HOSTLDLIBS := $(HOST_LFS_LIBS) $(HOSTLDLIBS)
@ -470,6 +498,12 @@ OBJDUMP = $(CROSS_COMPILE)objdump
READELF = $(CROSS_COMPILE)readelf
STRIP = $(CROSS_COMPILE)strip
endif
RUSTC = rustc
RUSTDOC = rustdoc
RUSTFMT = rustfmt
CLIPPY_DRIVER = clippy-driver
BINDGEN = bindgen
CARGO = cargo
PAHOLE = pahole
RESOLVE_BTFIDS = $(objtree)/tools/bpf/resolve_btfids/resolve_btfids
LEX = flex
@ -495,9 +529,11 @@ CHECKFLAGS := -D__linux__ -Dlinux -D__STDC__ -Dunix -D__unix__ \
-Wbitwise -Wno-return-void -Wno-unknown-attribute $(CF)
NOSTDINC_FLAGS :=
CFLAGS_MODULE =
RUSTFLAGS_MODULE =
AFLAGS_MODULE =
LDFLAGS_MODULE =
CFLAGS_KERNEL =
RUSTFLAGS_KERNEL =
AFLAGS_KERNEL =
LDFLAGS_vmlinux =
@ -526,15 +562,42 @@ KBUILD_CFLAGS := -Wall -Wundef -Werror=strict-prototypes -Wno-trigraphs \
-Werror=return-type -Wno-format-security \
-std=gnu11
KBUILD_CPPFLAGS := -D__KERNEL__
KBUILD_RUSTFLAGS := $(rust_common_flags) \
--target=$(objtree)/rust/target.json \
-Cpanic=abort -Cembed-bitcode=n -Clto=n \
-Cforce-unwind-tables=n -Ccodegen-units=1 \
-Csymbol-mangling-version=v0 \
-Crelocation-model=static \
-Zfunction-sections=n \
-Dclippy::float_arithmetic
KBUILD_AFLAGS_KERNEL :=
KBUILD_CFLAGS_KERNEL :=
KBUILD_RUSTFLAGS_KERNEL :=
KBUILD_AFLAGS_MODULE := -DMODULE
KBUILD_CFLAGS_MODULE := -DMODULE
KBUILD_RUSTFLAGS_MODULE := --cfg MODULE
KBUILD_LDFLAGS_MODULE :=
KBUILD_LDFLAGS :=
CLANG_FLAGS :=
ifeq ($(KBUILD_CLIPPY),1)
RUSTC_OR_CLIPPY_QUIET := CLIPPY
RUSTC_OR_CLIPPY = $(CLIPPY_DRIVER)
else
RUSTC_OR_CLIPPY_QUIET := RUSTC
RUSTC_OR_CLIPPY = $(RUSTC)
endif
ifdef RUST_LIB_SRC
export RUST_LIB_SRC
endif
export RUSTC_BOOTSTRAP := 1
export ARCH SRCARCH CONFIG_SHELL BASH HOSTCC KBUILD_HOSTCFLAGS CROSS_COMPILE LD CC HOSTPKG_CONFIG
export RUSTC RUSTDOC RUSTFMT RUSTC_OR_CLIPPY_QUIET RUSTC_OR_CLIPPY BINDGEN CARGO
export HOSTRUSTC KBUILD_HOSTRUSTFLAGS
export CPP AR NM STRIP OBJCOPY OBJDUMP READELF PAHOLE RESOLVE_BTFIDS LEX YACC AWK INSTALLKERNEL
export PERL PYTHON3 CHECK CHECKFLAGS MAKE UTS_MACHINE HOSTCXX
export KGZIP KBZIP2 KLZOP LZMA LZ4 XZ ZSTD
@ -543,9 +606,10 @@ export KBUILD_USERCFLAGS KBUILD_USERLDFLAGS
export KBUILD_CPPFLAGS NOSTDINC_FLAGS LINUXINCLUDE OBJCOPYFLAGS KBUILD_LDFLAGS
export KBUILD_CFLAGS CFLAGS_KERNEL CFLAGS_MODULE
export KBUILD_RUSTFLAGS RUSTFLAGS_KERNEL RUSTFLAGS_MODULE
export KBUILD_AFLAGS AFLAGS_KERNEL AFLAGS_MODULE
export KBUILD_AFLAGS_MODULE KBUILD_CFLAGS_MODULE KBUILD_LDFLAGS_MODULE
export KBUILD_AFLAGS_KERNEL KBUILD_CFLAGS_KERNEL
export KBUILD_AFLAGS_MODULE KBUILD_CFLAGS_MODULE KBUILD_RUSTFLAGS_MODULE KBUILD_LDFLAGS_MODULE
export KBUILD_AFLAGS_KERNEL KBUILD_CFLAGS_KERNEL KBUILD_RUSTFLAGS_KERNEL
export PAHOLE_FLAGS
# Files to ignore in find ... statements
@ -726,7 +790,7 @@ $(KCONFIG_CONFIG):
#
# Do not use $(call cmd,...) here. That would suppress prompts from syncconfig,
# so you cannot notice that Kconfig is waiting for the user input.
%/config/auto.conf %/config/auto.conf.cmd %/generated/autoconf.h: $(KCONFIG_CONFIG)
%/config/auto.conf %/config/auto.conf.cmd %/generated/autoconf.h %/generated/rustc_cfg: $(KCONFIG_CONFIG)
$(Q)$(kecho) " SYNC $@"
$(Q)$(MAKE) -f $(srctree)/Makefile syncconfig
else # !may-sync-config
@ -755,12 +819,28 @@ KBUILD_CFLAGS += $(call cc-disable-warning, address-of-packed-member)
ifdef CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE
KBUILD_CFLAGS += -O2
KBUILD_RUSTFLAGS_OPT_LEVEL_MAP := 2
else ifdef CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE_O3
KBUILD_CFLAGS += -O3
KBUILD_RUSTFLAGS_OPT_LEVEL_MAP := 3
else ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE
KBUILD_CFLAGS += -Os
KBUILD_RUSTFLAGS_OPT_LEVEL_MAP := s
endif
# Always set `debug-assertions` and `overflow-checks` because their default
# depends on `opt-level` and `debug-assertions`, respectively.
KBUILD_RUSTFLAGS += -Cdebug-assertions=$(if $(CONFIG_RUST_DEBUG_ASSERTIONS),y,n)
KBUILD_RUSTFLAGS += -Coverflow-checks=$(if $(CONFIG_RUST_OVERFLOW_CHECKS),y,n)
KBUILD_RUSTFLAGS += -Copt-level=$\
$(if $(CONFIG_RUST_OPT_LEVEL_SIMILAR_AS_CHOSEN_FOR_C),$(KBUILD_RUSTFLAGS_OPT_LEVEL_MAP))$\
$(if $(CONFIG_RUST_OPT_LEVEL_0),0)$\
$(if $(CONFIG_RUST_OPT_LEVEL_1),1)$\
$(if $(CONFIG_RUST_OPT_LEVEL_2),2)$\
$(if $(CONFIG_RUST_OPT_LEVEL_3),3)$\
$(if $(CONFIG_RUST_OPT_LEVEL_S),s)$\
$(if $(CONFIG_RUST_OPT_LEVEL_Z),z)
# Tell gcc to never replace conditional load with a non-conditional one
ifdef CONFIG_CC_IS_GCC
# gcc-10 renamed --param=allow-store-data-races=0 to
@ -791,6 +871,9 @@ KBUILD_CFLAGS-$(CONFIG_WERROR) += -Werror
KBUILD_CFLAGS-$(CONFIG_CC_NO_ARRAY_BOUNDS) += -Wno-array-bounds
KBUILD_CFLAGS += $(KBUILD_CFLAGS-y) $(CONFIG_CC_IMPLICIT_FALLTHROUGH)
KBUILD_RUSTFLAGS-$(CONFIG_WERROR) += -Dwarnings
KBUILD_RUSTFLAGS += $(KBUILD_RUSTFLAGS-y)
ifdef CONFIG_CC_IS_CLANG
KBUILD_CPPFLAGS += -Qunused-arguments
# The kernel builds with '-std=gnu11' so use of GNU extensions is acceptable.
@ -811,12 +894,15 @@ KBUILD_CFLAGS += $(call cc-disable-warning, dangling-pointer)
ifdef CONFIG_FRAME_POINTER
KBUILD_CFLAGS += -fno-omit-frame-pointer -fno-optimize-sibling-calls
KBUILD_RUSTFLAGS += -Cforce-frame-pointers=y
else
# Some targets (ARM with Thumb2, for example), can't be built with frame
# pointers. For those, we don't have FUNCTION_TRACER automatically
# select FRAME_POINTER. However, FUNCTION_TRACER adds -pg, and this is
# incompatible with -fomit-frame-pointer with current GCC, so we don't use
# -fomit-frame-pointer with FUNCTION_TRACER.
# In the Rust target specification, "frame-pointer" is set explicitly
# to "may-omit".
ifndef CONFIG_FUNCTION_TRACER
KBUILD_CFLAGS += -fomit-frame-pointer
endif
@ -881,8 +967,10 @@ ifdef CONFIG_DEBUG_SECTION_MISMATCH
KBUILD_CFLAGS += -fno-inline-functions-called-once
endif
# `rustc`'s `-Zfunction-sections` applies to data too (as of 1.59.0).
ifdef CONFIG_LD_DEAD_CODE_DATA_ELIMINATION
KBUILD_CFLAGS_KERNEL += -ffunction-sections -fdata-sections
KBUILD_RUSTFLAGS_KERNEL += -Zfunction-sections=y
LDFLAGS_vmlinux += --gc-sections
endif
@ -1025,10 +1113,11 @@ include $(addprefix $(srctree)/, $(include-y))
# Do not add $(call cc-option,...) below this line. When you build the kernel
# from the clean source tree, the GCC plugins do not exist at this point.
# Add user supplied CPPFLAGS, AFLAGS and CFLAGS as the last assignments
# Add user supplied CPPFLAGS, AFLAGS, CFLAGS and RUSTFLAGS as the last assignments
KBUILD_CPPFLAGS += $(KCPPFLAGS)
KBUILD_AFLAGS += $(KAFLAGS)
KBUILD_CFLAGS += $(KCFLAGS)
KBUILD_RUSTFLAGS += $(KRUSTFLAGS)
KBUILD_LDFLAGS_MODULE += --build-id=sha1
LDFLAGS_vmlinux += --build-id=sha1
@ -1098,6 +1187,7 @@ ifeq ($(KBUILD_EXTMOD),)
core-y += kernel/ certs/ mm/ fs/ ipc/ security/ crypto/
core-$(CONFIG_BLOCK) += block/
core-$(CONFIG_IO_URING) += io_uring/
core-$(CONFIG_RUST) += rust/
vmlinux-dirs := $(patsubst %/,%,$(filter %/, \
$(core-y) $(core-m) $(drivers-y) $(drivers-m) \
@ -1202,6 +1292,10 @@ prepare0: archprepare
# All the preparing..
prepare: prepare0
ifdef CONFIG_RUST
$(Q)$(CONFIG_SHELL) $(srctree)/scripts/rust-is-available.sh -v
$(Q)$(MAKE) $(build)=rust
endif
PHONY += remove-stale-files
remove-stale-files:
@ -1491,7 +1585,7 @@ endif # CONFIG_MODULES
# Directories & files removed with 'make clean'
CLEAN_FILES += include/ksym vmlinux.symvers modules-only.symvers \
modules.builtin modules.builtin.modinfo modules.nsdeps \
compile_commands.json .thinlto-cache
compile_commands.json .thinlto-cache rust/test rust/doc
# Directories & files removed with 'make mrproper'
MRPROPER_FILES += include/config include/generated \
@ -1502,7 +1596,8 @@ MRPROPER_FILES += include/config include/generated \
certs/signing_key.pem \
certs/x509.genkey \
vmlinux-gdb.py \
*.spec
*.spec \
rust/target.json rust/libmacros.so
# clean - Delete most, but leave enough to build external modules
#
@ -1527,6 +1622,9 @@ $(mrproper-dirs):
mrproper: clean $(mrproper-dirs)
$(call cmd,rmfiles)
@find . $(RCS_FIND_IGNORE) \
\( -name '*.rmeta' \) \
-type f -print | xargs rm -f
# distclean
#
@ -1614,6 +1712,23 @@ help:
@echo ' kselftest-merge - Merge all the config dependencies of'
@echo ' kselftest to existing .config.'
@echo ''
@echo 'Rust targets:'
@echo ' rustavailable - Checks whether the Rust toolchain is'
@echo ' available and, if not, explains why.'
@echo ' rustfmt - Reformat all the Rust code in the kernel'
@echo ' rustfmtcheck - Checks if all the Rust code in the kernel'
@echo ' is formatted, printing a diff otherwise.'
@echo ' rustdoc - Generate Rust documentation'
@echo ' (requires kernel .config)'
@echo ' rusttest - Runs the Rust tests'
@echo ' (requires kernel .config; downloads external repos)'
@echo ' rust-analyzer - Generate rust-project.json rust-analyzer support file'
@echo ' (requires kernel .config)'
@echo ' dir/file.[os] - Build specified target only'
@echo ' dir/file.i - Build macro expanded source, similar to C preprocessing'
@echo ' (run with RUSTFMT=n to skip reformatting if needed)'
@echo ' dir/file.ll - Build the LLVM assembly file'
@echo ''
@$(if $(dtstree), \
echo 'Devicetree:'; \
echo '* dtbs - Build device tree blobs for enabled boards'; \
@ -1686,6 +1801,52 @@ PHONY += $(DOC_TARGETS)
$(DOC_TARGETS):
$(Q)$(MAKE) $(build)=Documentation $@
# Rust targets
# ---------------------------------------------------------------------------
# "Is Rust available?" target
PHONY += rustavailable
rustavailable:
$(Q)$(CONFIG_SHELL) $(srctree)/scripts/rust-is-available.sh -v && echo >&2 "Rust is available!"
# Documentation target
#
# Using the singular to avoid running afoul of `no-dot-config-targets`.
PHONY += rustdoc
rustdoc: prepare
$(Q)$(MAKE) $(build)=rust $@
# Testing target
PHONY += rusttest
rusttest: prepare
$(Q)$(MAKE) $(build)=rust $@
# Formatting targets
PHONY += rustfmt rustfmtcheck
# We skip `rust/alloc` since we want to minimize the diff w.r.t. upstream.
#
# We match using absolute paths since `find` does not resolve them
# when matching, which is a problem when e.g. `srctree` is `..`.
# We `grep` afterwards in order to remove the directory entry itself.
rustfmt:
$(Q)find $(abs_srctree) -type f -name '*.rs' \
-o -path $(abs_srctree)/rust/alloc -prune \
-o -path $(abs_objtree)/rust/test -prune \
| grep -Fv $(abs_srctree)/rust/alloc \
| grep -Fv $(abs_objtree)/rust/test \
| grep -Fv generated \
| xargs $(RUSTFMT) $(rustfmt_flags)
rustfmtcheck: rustfmt_flags = --check
rustfmtcheck: rustfmt
# IDE support targets
PHONY += rust-analyzer
rust-analyzer:
$(Q)$(MAKE) $(build)=rust $@
# Misc
# ---------------------------------------------------------------------------

View File

@ -353,6 +353,12 @@ config HAVE_RSEQ
This symbol should be selected by an architecture if it
supports an implementation of restartable sequences.
config HAVE_RUST
bool
help
This symbol should be selected by an architecture if it
supports Rust.
config HAVE_FUNCTION_ARG_ACCESS_API
bool
help

View File

@ -116,6 +116,7 @@ config ARM
select MMU_GATHER_RCU_TABLE_FREE if SMP && ARM_LPAE
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_RSEQ
select HAVE_RUST if CPU_32v6 || CPU_32v6K
select HAVE_STACKPROTECTOR
select HAVE_SYSCALL_TRACEPOINTS
select HAVE_UID16

View File

@ -202,6 +202,7 @@ config ARM64
select HAVE_FUNCTION_ARG_ACCESS_API
select MMU_GATHER_RCU_TABLE_FREE
select HAVE_RSEQ
select HAVE_RUST
select HAVE_STACKPROTECTOR
select HAVE_SYSCALL_TRACEPOINTS
select HAVE_KPROBES

View File

@ -243,6 +243,7 @@ config PPC
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_RELIABLE_STACKTRACE
select HAVE_RSEQ
select HAVE_RUST if PPC64 && CPU_LITTLE_ENDIAN
select HAVE_SETUP_PER_CPU_AREA if PPC64
select HAVE_SOFTIRQ_ON_OWN_STACK
select HAVE_STACKPROTECTOR if PPC32 && $(cc-option,-mstack-protector-guard=tls -mstack-protector-guard-reg=r2)

View File

@ -104,6 +104,7 @@ config RISCV
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_RUST if 64BIT
select HAVE_FUNCTION_ARG_ACCESS_API
select HAVE_STACKPROTECTOR
select HAVE_SYSCALL_TRACEPOINTS

View File

@ -26,6 +26,8 @@ ifeq ($(CONFIG_ARCH_RV64I),y)
KBUILD_CFLAGS += -mabi=lp64
KBUILD_AFLAGS += -mabi=lp64
KBUILD_RUSTFLAGS += -Ctarget-cpu=generic-rv64
KBUILD_LDFLAGS += -melf64lriscv
else
BITS := 32
@ -33,6 +35,9 @@ else
KBUILD_CFLAGS += -mabi=ilp32
KBUILD_AFLAGS += -mabi=ilp32
KBUILD_RUSTFLAGS += -Ctarget-cpu=generic-rv32
KBUILD_LDFLAGS += -melf32lriscv
endif

View File

@ -25,6 +25,7 @@ config UML
select TRACE_IRQFLAGS_SUPPORT
select TTY # Needed for line.c
select HAVE_ARCH_VMAP_STACK
select HAVE_RUST if X86_64
config MMU
bool

View File

@ -257,6 +257,7 @@ config X86
select HAVE_STATIC_CALL_INLINE if HAVE_OBJTOOL
select HAVE_PREEMPT_DYNAMIC_CALL
select HAVE_RSEQ
select HAVE_RUST if X86_64
select HAVE_SYSCALL_TRACEPOINTS
select HAVE_UACCESS_VALIDATION if HAVE_OBJTOOL
select HAVE_UNSTABLE_SCHED_CLOCK

View File

@ -21,6 +21,8 @@ ifdef CONFIG_CC_IS_CLANG
RETPOLINE_CFLAGS := -mretpoline-external-thunk
RETPOLINE_VDSO_CFLAGS := -mretpoline
endif
RETPOLINE_RUSTFLAGS := -Ctarget-feature=+retpoline-external-thunk
export RETPOLINE_CFLAGS
export RETPOLINE_VDSO_CFLAGS
@ -61,6 +63,8 @@ export BITS
# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=53383
#
KBUILD_CFLAGS += -mno-sse -mno-mmx -mno-sse2 -mno-3dnow -mno-avx
KBUILD_RUSTFLAGS += -Ctarget-feature=-mmx,-sse,-sse2,-sse3,-ssse3,-sse4.1,-sse4.2
KBUILD_RUSTFLAGS += -Ctarget-feature=-3dnow,-3dnowa,-avx,-avx2,+soft-float
ifeq ($(CONFIG_X86_KERNEL_IBT),y)
#
@ -148,8 +152,17 @@ else
cflags-$(CONFIG_GENERIC_CPU) += -mtune=generic
KBUILD_CFLAGS += $(cflags-y)
rustflags-$(CONFIG_MK8) += -Ctarget-cpu=k8
rustflags-$(CONFIG_MPSC) += -Ctarget-cpu=nocona
rustflags-$(CONFIG_MCORE2) += -Ctarget-cpu=core2
rustflags-$(CONFIG_MATOM) += -Ctarget-cpu=atom
rustflags-$(CONFIG_GENERIC_CPU) += -Ztune-cpu=generic
KBUILD_RUSTFLAGS += $(rustflags-y)
KBUILD_CFLAGS += -mno-red-zone
KBUILD_CFLAGS += -mcmodel=kernel
KBUILD_RUSTFLAGS += -Cno-redzone=y
KBUILD_RUSTFLAGS += -Ccode-model=kernel
endif
#
@ -185,6 +198,7 @@ ifdef CONFIG_RETPOLINE
ifndef CONFIG_CC_IS_CLANG
KBUILD_CFLAGS += -fno-jump-tables
endif
KBUILD_RUSTFLAGS += $(RETPOLINE_RUSTFLAGS)
endif
ifdef CONFIG_SLS

View File

@ -20,6 +20,12 @@ config ANDROID_BINDER_IPC
Android process, using Binder to identify, invoke and pass arguments
between said processes.
config ANDROID_BINDER_IPC_RUST
bool "Android Binder IPC Driver in Rust"
depends on MMU && RUST
help
Implementation of the Binder IPC in Rust.
config ANDROID_BINDERFS
bool "Android Binderfs filesystem"
depends on ANDROID_BINDER_IPC

View File

@ -4,3 +4,5 @@ ccflags-y += -I$(src) # needed for trace events
obj-$(CONFIG_ANDROID_BINDERFS) += binderfs.o
obj-$(CONFIG_ANDROID_BINDER_IPC) += binder.o binder_alloc.o
obj-$(CONFIG_ANDROID_BINDER_IPC_SELFTEST) += binder_alloc_selftest.o
obj-$(CONFIG_ANDROID_BINDER_IPC_RUST) += rust_binder.o

View File

@ -0,0 +1,266 @@
// SPDX-License-Identifier: GPL-2.0
use core::mem::{replace, size_of, MaybeUninit};
use kernel::{
bindings, linked_list::List, pages::Pages, prelude::*, sync::Ref, user_ptr::UserSlicePtrReader,
};
use crate::{
defs::*,
node::NodeRef,
process::{AllocationInfo, Process},
thread::{BinderError, BinderResult},
transaction::FileInfo,
};
pub(crate) struct Allocation<'a> {
pub(crate) offset: usize,
size: usize,
pub(crate) ptr: usize,
pages: Ref<[Pages<0>]>,
pub(crate) process: &'a Process,
allocation_info: Option<AllocationInfo>,
free_on_drop: bool,
file_list: List<Box<FileInfo>>,
}
impl<'a> Allocation<'a> {
pub(crate) fn new(
process: &'a Process,
offset: usize,
size: usize,
ptr: usize,
pages: Ref<[Pages<0>]>,
) -> Self {
Self {
process,
offset,
size,
ptr,
pages,
allocation_info: None,
free_on_drop: true,
file_list: List::new(),
}
}
pub(crate) fn take_file_list(&mut self) -> List<Box<FileInfo>> {
replace(&mut self.file_list, List::new())
}
pub(crate) fn add_file_info(&mut self, file: Box<FileInfo>) {
self.file_list.push_back(file);
}
fn iterate<T>(&self, mut offset: usize, mut size: usize, mut cb: T) -> Result
where
T: FnMut(&Pages<0>, usize, usize) -> Result,
{
// Check that the request is within the buffer.
if offset.checked_add(size).ok_or(EINVAL)? > self.size {
return Err(EINVAL);
}
offset += self.offset;
let mut page_index = offset >> bindings::PAGE_SHIFT;
offset &= (1 << bindings::PAGE_SHIFT) - 1;
while size > 0 {
let available = core::cmp::min(size, (1 << bindings::PAGE_SHIFT) as usize - offset);
cb(&self.pages[page_index], offset, available)?;
size -= available;
page_index += 1;
offset = 0;
}
Ok(())
}
pub(crate) fn copy_into(
&self,
reader: &mut UserSlicePtrReader,
offset: usize,
size: usize,
) -> Result {
self.iterate(offset, size, |page, offset, to_copy| {
page.copy_into_page(reader, offset, to_copy)
})
}
pub(crate) fn read<T>(&self, offset: usize) -> Result<T> {
let mut out = MaybeUninit::<T>::uninit();
let mut out_offset = 0;
self.iterate(offset, size_of::<T>(), |page, offset, to_copy| {
// SAFETY: Data buffer is allocated on the stack.
unsafe {
page.read(
(out.as_mut_ptr() as *mut u8).add(out_offset),
offset,
to_copy,
)
}?;
out_offset += to_copy;
Ok(())
})?;
// SAFETY: We just initialised the data.
Ok(unsafe { out.assume_init() })
}
pub(crate) fn write<T>(&self, offset: usize, obj: &T) -> Result {
let mut obj_offset = 0;
self.iterate(offset, size_of::<T>(), |page, offset, to_copy| {
// SAFETY: The sum of `offset` and `to_copy` is bounded by the size of T.
let obj_ptr = unsafe { (obj as *const T as *const u8).add(obj_offset) };
// SAFETY: We have a reference to the object, so the pointer is valid.
unsafe { page.write(obj_ptr, offset, to_copy) }?;
obj_offset += to_copy;
Ok(())
})
}
pub(crate) fn keep_alive(mut self) {
self.process
.buffer_make_freeable(self.offset, self.allocation_info.take());
self.free_on_drop = false;
}
pub(crate) fn set_info(&mut self, info: AllocationInfo) {
self.allocation_info = Some(info);
}
}
impl Drop for Allocation<'_> {
fn drop(&mut self) {
if !self.free_on_drop {
return;
}
if let Some(info) = &self.allocation_info {
let offsets = info.offsets.clone();
let view = AllocationView::new(self, offsets.start);
for i in offsets.step_by(size_of::<usize>()) {
if view.cleanup_object(i).is_err() {
pr_warn!("Error cleaning up object at offset {}\n", i)
}
}
}
self.process.buffer_raw_free(self.ptr);
}
}
pub(crate) struct AllocationView<'a, 'b> {
pub(crate) alloc: &'a mut Allocation<'b>,
limit: usize,
}
impl<'a, 'b> AllocationView<'a, 'b> {
pub(crate) fn new(alloc: &'a mut Allocation<'b>, limit: usize) -> Self {
AllocationView { alloc, limit }
}
pub(crate) fn read<T>(&self, offset: usize) -> Result<T> {
if offset.checked_add(size_of::<T>()).ok_or(EINVAL)? > self.limit {
return Err(EINVAL);
}
self.alloc.read(offset)
}
pub(crate) fn write<T>(&self, offset: usize, obj: &T) -> Result {
if offset.checked_add(size_of::<T>()).ok_or(EINVAL)? > self.limit {
return Err(EINVAL);
}
self.alloc.write(offset, obj)
}
pub(crate) fn transfer_binder_object<T>(
&self,
offset: usize,
strong: bool,
get_node: T,
) -> BinderResult
where
T: FnOnce(&bindings::flat_binder_object) -> BinderResult<NodeRef>,
{
// TODO: Do we want this function to take a &mut self?
let obj = self.read::<bindings::flat_binder_object>(offset)?;
let node_ref = get_node(&obj)?;
if core::ptr::eq(&*node_ref.node.owner, self.alloc.process) {
// The receiving process is the owner of the node, so send it a binder object (instead
// of a handle).
let (ptr, cookie) = node_ref.node.get_id();
let newobj = bindings::flat_binder_object {
hdr: bindings::binder_object_header {
type_: if strong {
BINDER_TYPE_BINDER
} else {
BINDER_TYPE_WEAK_BINDER
},
},
flags: obj.flags,
__bindgen_anon_1: bindings::flat_binder_object__bindgen_ty_1 { binder: ptr as _ },
cookie: cookie as _,
};
self.write(offset, &newobj)?;
// Increment the user ref count on the node. It will be decremented as part of the
// destruction of the buffer, when we see a binder or weak-binder object.
node_ref.node.update_refcount(true, strong);
} else {
// The receiving process is different from the owner, so we need to insert a handle to
// the binder object.
let handle = self
.alloc
.process
.insert_or_update_handle(node_ref, false)?;
let newobj = bindings::flat_binder_object {
hdr: bindings::binder_object_header {
type_: if strong {
BINDER_TYPE_HANDLE
} else {
BINDER_TYPE_WEAK_HANDLE
},
},
flags: obj.flags,
// TODO: To avoid padding, we write to `binder` instead of `handle` here. We need a
// better solution though.
__bindgen_anon_1: bindings::flat_binder_object__bindgen_ty_1 {
binder: handle as _,
},
..bindings::flat_binder_object::default()
};
if self.write(offset, &newobj).is_err() {
// Decrement ref count on the handle we just created.
let _ = self.alloc.process.update_ref(handle, false, strong);
return Err(BinderError::new_failed());
}
}
Ok(())
}
fn cleanup_object(&self, index_offset: usize) -> Result {
let offset = self.alloc.read(index_offset)?;
let header = self.read::<bindings::binder_object_header>(offset)?;
// TODO: Handle other types.
match header.type_ {
BINDER_TYPE_WEAK_BINDER | BINDER_TYPE_BINDER => {
let obj = self.read::<bindings::flat_binder_object>(offset)?;
let strong = header.type_ == BINDER_TYPE_BINDER;
// SAFETY: The type is `BINDER_TYPE_{WEAK_}BINDER`, so the `binder` field is
// populated.
let ptr = unsafe { obj.__bindgen_anon_1.binder } as usize;
let cookie = obj.cookie as usize;
self.alloc.process.update_node(ptr, cookie, strong, false);
Ok(())
}
BINDER_TYPE_WEAK_HANDLE | BINDER_TYPE_HANDLE => {
let obj = self.read::<bindings::flat_binder_object>(offset)?;
let strong = header.type_ == BINDER_TYPE_HANDLE;
// SAFETY: The type is `BINDER_TYPE_{WEAK_}HANDLE`, so the `handle` field is
// populated.
let handle = unsafe { obj.__bindgen_anon_1.handle } as _;
self.alloc.process.update_ref(handle, false, strong)
}
_ => Ok(()),
}
}
}

View File

@ -0,0 +1,80 @@
// SPDX-License-Identifier: GPL-2.0
use kernel::{
bindings,
prelude::*,
security,
sync::{Mutex, Ref, UniqueRef},
};
use crate::{
node::NodeRef,
thread::{BinderError, BinderResult},
};
struct Manager {
node: Option<NodeRef>,
uid: Option<bindings::kuid_t>,
}
pub(crate) struct Context {
manager: Mutex<Manager>,
}
#[allow(clippy::non_send_fields_in_send_ty)]
unsafe impl Send for Context {}
unsafe impl Sync for Context {}
impl Context {
pub(crate) fn new() -> Result<Ref<Self>> {
let mut ctx = Pin::from(UniqueRef::try_new(Self {
// SAFETY: Init is called below.
manager: unsafe {
Mutex::new(Manager {
node: None,
uid: None,
})
},
})?);
// SAFETY: `manager` is also pinned when `ctx` is.
let manager = unsafe { ctx.as_mut().map_unchecked_mut(|c| &mut c.manager) };
kernel::mutex_init!(manager, "Context::manager");
Ok(ctx.into())
}
pub(crate) fn set_manager_node(&self, node_ref: NodeRef) -> Result {
let mut manager = self.manager.lock();
if manager.node.is_some() {
return Err(EBUSY);
}
security::binder_set_context_mgr(&node_ref.node.owner.cred)?;
// TODO: Get the actual caller id.
let caller_uid = bindings::kuid_t::default();
if let Some(ref uid) = manager.uid {
if uid.val != caller_uid.val {
return Err(EPERM);
}
}
manager.node = Some(node_ref);
manager.uid = Some(caller_uid);
Ok(())
}
pub(crate) fn unset_manager_node(&self) {
let node_ref = self.manager.lock().node.take();
drop(node_ref);
}
pub(crate) fn get_manager_node(&self, strong: bool) -> BinderResult<NodeRef> {
self.manager
.lock()
.node
.as_ref()
.ok_or_else(BinderError::new_dead)?
.clone(strong)
}
}

99
drivers/android/defs.rs Normal file
View File

@ -0,0 +1,99 @@
// SPDX-License-Identifier: GPL-2.0
use core::ops::{Deref, DerefMut};
use kernel::{
bindings,
bindings::*,
io_buffer::{ReadableFromBytes, WritableToBytes},
};
macro_rules! pub_no_prefix {
($prefix:ident, $($newname:ident),+) => {
$(pub(crate) const $newname: u32 = concat_idents!($prefix, $newname);)+
};
}
pub_no_prefix!(
binder_driver_return_protocol_,
BR_OK,
BR_ERROR,
BR_TRANSACTION,
BR_REPLY,
BR_DEAD_REPLY,
BR_TRANSACTION_COMPLETE,
BR_INCREFS,
BR_ACQUIRE,
BR_RELEASE,
BR_DECREFS,
BR_NOOP,
BR_SPAWN_LOOPER,
BR_DEAD_BINDER,
BR_CLEAR_DEATH_NOTIFICATION_DONE,
BR_FAILED_REPLY
);
pub_no_prefix!(
binder_driver_command_protocol_,
BC_TRANSACTION,
BC_REPLY,
BC_FREE_BUFFER,
BC_INCREFS,
BC_ACQUIRE,
BC_RELEASE,
BC_DECREFS,
BC_INCREFS_DONE,
BC_ACQUIRE_DONE,
BC_REGISTER_LOOPER,
BC_ENTER_LOOPER,
BC_EXIT_LOOPER,
BC_REQUEST_DEATH_NOTIFICATION,
BC_CLEAR_DEATH_NOTIFICATION,
BC_DEAD_BINDER_DONE
);
pub_no_prefix!(transaction_flags_, TF_ONE_WAY, TF_ACCEPT_FDS);
pub(crate) use bindings::{
BINDER_TYPE_BINDER, BINDER_TYPE_FD, BINDER_TYPE_HANDLE, BINDER_TYPE_WEAK_BINDER,
BINDER_TYPE_WEAK_HANDLE, FLAT_BINDER_FLAG_ACCEPTS_FDS,
};
macro_rules! decl_wrapper {
($newname:ident, $wrapped:ty) => {
#[derive(Copy, Clone, Default)]
pub(crate) struct $newname($wrapped);
// TODO: This must be justified by inspecting the type, so should live outside the macro or
// the macro should be somehow marked unsafe.
unsafe impl ReadableFromBytes for $newname {}
unsafe impl WritableToBytes for $newname {}
impl Deref for $newname {
type Target = $wrapped;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl DerefMut for $newname {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
};
}
decl_wrapper!(BinderNodeDebugInfo, bindings::binder_node_debug_info);
decl_wrapper!(BinderNodeInfoForRef, bindings::binder_node_info_for_ref);
decl_wrapper!(FlatBinderObject, bindings::flat_binder_object);
decl_wrapper!(BinderTransactionData, bindings::binder_transaction_data);
decl_wrapper!(BinderWriteRead, bindings::binder_write_read);
decl_wrapper!(BinderVersion, bindings::binder_version);
impl BinderVersion {
pub(crate) fn current() -> Self {
Self(bindings::binder_version {
protocol_version: bindings::BINDER_CURRENT_PROTOCOL_VERSION as _,
})
}
}

476
drivers/android/node.rs Normal file
View File

@ -0,0 +1,476 @@
// SPDX-License-Identifier: GPL-2.0
use core::sync::atomic::{AtomicU64, Ordering};
use kernel::{
io_buffer::IoBufferWriter,
linked_list::{GetLinks, Links, List},
prelude::*,
sync::{Guard, LockedBy, Mutex, Ref, SpinLock},
user_ptr::UserSlicePtrWriter,
};
use crate::{
defs::*,
process::{Process, ProcessInner},
thread::{BinderError, BinderResult, Thread},
DeliverToRead,
};
struct CountState {
count: usize,
has_count: bool,
is_biased: bool,
}
impl CountState {
fn new() -> Self {
Self {
count: 0,
has_count: false,
is_biased: false,
}
}
fn add_bias(&mut self) {
self.count += 1;
self.is_biased = true;
}
}
struct NodeInner {
strong: CountState,
weak: CountState,
death_list: List<Ref<NodeDeath>>,
}
struct NodeDeathInner {
dead: bool,
cleared: bool,
notification_done: bool,
/// Indicates whether the normal flow was interrupted by removing the handle. In this case, we
/// need behave as if the death notification didn't exist (i.e., we don't deliver anything to
/// the user.
aborted: bool,
}
pub(crate) struct NodeDeath {
node: Ref<Node>,
process: Ref<Process>,
// TODO: Make this private.
pub(crate) cookie: usize,
work_links: Links<dyn DeliverToRead>,
// TODO: Add the moment we're using this for two lists, which isn't safe because we want to
// remove from the list without knowing the list it's in. We need to separate this out.
death_links: Links<NodeDeath>,
inner: SpinLock<NodeDeathInner>,
}
impl NodeDeath {
/// Constructs a new node death notification object.
///
/// # Safety
///
/// The caller must call `NodeDeath::init` before using the notification object.
pub(crate) unsafe fn new(node: Ref<Node>, process: Ref<Process>, cookie: usize) -> Self {
Self {
node,
process,
cookie,
work_links: Links::new(),
death_links: Links::new(),
inner: unsafe {
SpinLock::new(NodeDeathInner {
dead: false,
cleared: false,
notification_done: false,
aborted: false,
})
},
}
}
pub(crate) fn init(self: Pin<&mut Self>) {
// SAFETY: `inner` is pinned when `self` is.
let inner = unsafe { self.map_unchecked_mut(|n| &mut n.inner) };
kernel::spinlock_init!(inner, "NodeDeath::inner");
}
/// Sets the cleared flag to `true`.
///
/// It removes `self` from the node's death notification list if needed. It must only be called
/// once.
///
/// Returns whether it needs to be queued.
pub(crate) fn set_cleared(self: &Ref<Self>, abort: bool) -> bool {
let (needs_removal, needs_queueing) = {
// Update state and determine if we need to queue a work item. We only need to do it
// when the node is not dead or if the user already completed the death notification.
let mut inner = self.inner.lock();
inner.cleared = true;
if abort {
inner.aborted = true;
}
(!inner.dead, !inner.dead || inner.notification_done)
};
// Remove death notification from node.
if needs_removal {
let mut owner_inner = self.node.owner.inner.lock();
let node_inner = self.node.inner.access_mut(&mut owner_inner);
unsafe { node_inner.death_list.remove(self) };
}
needs_queueing
}
/// Sets the 'notification done' flag to `true`.
///
/// Returns whether it needs to be queued.
pub(crate) fn set_notification_done(self: Ref<Self>, thread: &Thread) {
let needs_queueing = {
let mut inner = self.inner.lock();
inner.notification_done = true;
inner.cleared
};
if needs_queueing {
let _ = thread.push_work_if_looper(self);
}
}
/// Sets the 'dead' flag to `true` and queues work item if needed.
pub(crate) fn set_dead(self: Ref<Self>) {
let needs_queueing = {
let mut inner = self.inner.lock();
if inner.cleared {
false
} else {
inner.dead = true;
true
}
};
if needs_queueing {
// Push the death notification to the target process. There is nothing else to do if
// it's already dead.
let process = self.process.clone();
let _ = process.push_work(self);
}
}
}
impl GetLinks for NodeDeath {
type EntryType = NodeDeath;
fn get_links(data: &NodeDeath) -> &Links<NodeDeath> {
&data.death_links
}
}
impl DeliverToRead for NodeDeath {
fn do_work(self: Ref<Self>, _thread: &Thread, writer: &mut UserSlicePtrWriter) -> Result<bool> {
let done = {
let inner = self.inner.lock();
if inner.aborted {
return Ok(true);
}
inner.cleared && (!inner.dead || inner.notification_done)
};
let cookie = self.cookie;
let cmd = if done {
BR_CLEAR_DEATH_NOTIFICATION_DONE
} else {
let process = self.process.clone();
let mut process_inner = process.inner.lock();
let inner = self.inner.lock();
if inner.aborted {
return Ok(true);
}
// We're still holding the inner lock, so it cannot be aborted while we insert it into
// the delivered list.
process_inner.death_delivered(self.clone());
BR_DEAD_BINDER
};
writer.write(&cmd)?;
writer.write(&cookie)?;
// Mimic the original code: we stop processing work items when we get to a death
// notification.
Ok(cmd != BR_DEAD_BINDER)
}
fn get_links(&self) -> &Links<dyn DeliverToRead> {
&self.work_links
}
}
pub(crate) struct Node {
pub(crate) global_id: u64,
ptr: usize,
cookie: usize,
pub(crate) flags: u32,
pub(crate) owner: Ref<Process>,
inner: LockedBy<NodeInner, Mutex<ProcessInner>>,
links: Links<dyn DeliverToRead>,
}
impl Node {
pub(crate) fn new(ptr: usize, cookie: usize, flags: u32, owner: Ref<Process>) -> Self {
static NEXT_ID: AtomicU64 = AtomicU64::new(1);
let inner = LockedBy::new(
&owner.inner,
NodeInner {
strong: CountState::new(),
weak: CountState::new(),
death_list: List::new(),
},
);
Self {
global_id: NEXT_ID.fetch_add(1, Ordering::Relaxed),
ptr,
cookie,
flags,
owner,
inner,
links: Links::new(),
}
}
pub(crate) fn get_id(&self) -> (usize, usize) {
(self.ptr, self.cookie)
}
pub(crate) fn next_death(
&self,
guard: &mut Guard<'_, Mutex<ProcessInner>>,
) -> Option<Ref<NodeDeath>> {
self.inner.access_mut(guard).death_list.pop_front()
}
pub(crate) fn add_death(
&self,
death: Ref<NodeDeath>,
guard: &mut Guard<'_, Mutex<ProcessInner>>,
) {
self.inner.access_mut(guard).death_list.push_back(death);
}
pub(crate) fn update_refcount_locked(
&self,
inc: bool,
strong: bool,
biased: bool,
owner_inner: &mut ProcessInner,
) -> bool {
let inner = self.inner.access_from_mut(owner_inner);
// Get a reference to the state we'll update.
let state = if strong {
&mut inner.strong
} else {
&mut inner.weak
};
// Update biased state: if the count is not biased, there is nothing to do; otherwise,
// we're removing the bias, so mark the state as such.
if biased {
if !state.is_biased {
return false;
}
state.is_biased = false;
}
// Update the count and determine whether we need to push work.
// TODO: Here we may want to check the weak count being zero but the strong count being 1,
// because in such cases, we won't deliver anything to userspace, so we shouldn't queue
// either.
if inc {
state.count += 1;
!state.has_count
} else {
state.count -= 1;
state.count == 0 && state.has_count
}
}
pub(crate) fn update_refcount(self: &Ref<Self>, inc: bool, strong: bool) {
self.owner
.inner
.lock()
.update_node_refcount(self, inc, strong, false, None);
}
pub(crate) fn populate_counts(
&self,
out: &mut BinderNodeInfoForRef,
guard: &Guard<'_, Mutex<ProcessInner>>,
) {
let inner = self.inner.access(guard);
out.strong_count = inner.strong.count as _;
out.weak_count = inner.weak.count as _;
}
pub(crate) fn populate_debug_info(
&self,
out: &mut BinderNodeDebugInfo,
guard: &Guard<'_, Mutex<ProcessInner>>,
) {
out.ptr = self.ptr as _;
out.cookie = self.cookie as _;
let inner = self.inner.access(guard);
if inner.strong.has_count {
out.has_strong_ref = 1;
}
if inner.weak.has_count {
out.has_weak_ref = 1;
}
}
pub(crate) fn force_has_count(&self, guard: &mut Guard<'_, Mutex<ProcessInner>>) {
let inner = self.inner.access_mut(guard);
inner.strong.has_count = true;
inner.weak.has_count = true;
}
fn write(&self, writer: &mut UserSlicePtrWriter, code: u32) -> Result {
writer.write(&code)?;
writer.write(&self.ptr)?;
writer.write(&self.cookie)?;
Ok(())
}
}
impl DeliverToRead for Node {
fn do_work(self: Ref<Self>, _thread: &Thread, writer: &mut UserSlicePtrWriter) -> Result<bool> {
let mut owner_inner = self.owner.inner.lock();
let inner = self.inner.access_mut(&mut owner_inner);
let strong = inner.strong.count > 0;
let has_strong = inner.strong.has_count;
let weak = strong || inner.weak.count > 0;
let has_weak = inner.weak.has_count;
inner.weak.has_count = weak;
inner.strong.has_count = strong;
if !weak {
// Remove the node if there are no references to it.
owner_inner.remove_node(self.ptr);
} else {
if !has_weak {
inner.weak.add_bias();
}
if !has_strong && strong {
inner.strong.add_bias();
}
}
drop(owner_inner);
// This could be done more compactly but we write out all the posibilities for
// compatibility with the original implementation wrt the order of events.
if weak && !has_weak {
self.write(writer, BR_INCREFS)?;
}
if strong && !has_strong {
self.write(writer, BR_ACQUIRE)?;
}
if !strong && has_strong {
self.write(writer, BR_RELEASE)?;
}
if !weak && has_weak {
self.write(writer, BR_DECREFS)?;
}
Ok(true)
}
fn get_links(&self) -> &Links<dyn DeliverToRead> {
&self.links
}
}
pub(crate) struct NodeRef {
pub(crate) node: Ref<Node>,
strong_count: usize,
weak_count: usize,
}
impl NodeRef {
pub(crate) fn new(node: Ref<Node>, strong_count: usize, weak_count: usize) -> Self {
Self {
node,
strong_count,
weak_count,
}
}
pub(crate) fn absorb(&mut self, mut other: Self) {
self.strong_count += other.strong_count;
self.weak_count += other.weak_count;
other.strong_count = 0;
other.weak_count = 0;
}
pub(crate) fn clone(&self, strong: bool) -> BinderResult<NodeRef> {
if strong && self.strong_count == 0 {
return Err(BinderError::new_failed());
}
Ok(self
.node
.owner
.inner
.lock()
.new_node_ref(self.node.clone(), strong, None))
}
/// Updates (increments or decrements) the number of references held against the node. If the
/// count being updated transitions from 0 to 1 or from 1 to 0, the node is notified by having
/// its `update_refcount` function called.
///
/// Returns whether `self` should be removed (when both counts are zero).
pub(crate) fn update(&mut self, inc: bool, strong: bool) -> bool {
if strong && self.strong_count == 0 {
return false;
}
let (count, other_count) = if strong {
(&mut self.strong_count, self.weak_count)
} else {
(&mut self.weak_count, self.strong_count)
};
if inc {
if *count == 0 {
self.node.update_refcount(true, strong);
}
*count += 1;
} else {
*count -= 1;
if *count == 0 {
self.node.update_refcount(false, strong);
return other_count == 0;
}
}
false
}
}
impl Drop for NodeRef {
fn drop(&mut self) {
if self.strong_count > 0 {
self.node.update_refcount(false, true);
}
if self.weak_count > 0 {
self.node.update_refcount(false, false);
}
}
}

960
drivers/android/process.rs Normal file
View File

@ -0,0 +1,960 @@
// SPDX-License-Identifier: GPL-2.0
use core::{convert::TryFrom, mem::take, ops::Range};
use kernel::{
bindings,
cred::Credential,
file::{self, File, IoctlCommand, IoctlHandler, PollTable},
io_buffer::{IoBufferReader, IoBufferWriter},
linked_list::List,
mm,
pages::Pages,
prelude::*,
rbtree::RBTree,
sync::{Guard, Mutex, Ref, RefBorrow, UniqueRef},
task::Task,
user_ptr::{UserSlicePtr, UserSlicePtrReader},
};
use crate::{
allocation::Allocation,
context::Context,
defs::*,
node::{Node, NodeDeath, NodeRef},
range_alloc::RangeAllocator,
thread::{BinderError, BinderResult, Thread},
DeliverToRead, DeliverToReadListAdapter, Either,
};
// TODO: Review this:
// Lock order: Process::node_refs -> Process::inner -> Thread::inner
pub(crate) struct AllocationInfo {
/// Range within the allocation where we can find the offsets to the object descriptors.
pub(crate) offsets: Range<usize>,
}
struct Mapping {
address: usize,
alloc: RangeAllocator<AllocationInfo>,
pages: Ref<[Pages<0>]>,
}
impl Mapping {
fn new(address: usize, size: usize, pages: Ref<[Pages<0>]>) -> Result<Self> {
let alloc = RangeAllocator::new(size)?;
Ok(Self {
address,
alloc,
pages,
})
}
}
// TODO: Make this private.
pub(crate) struct ProcessInner {
is_manager: bool,
is_dead: bool,
threads: RBTree<i32, Ref<Thread>>,
ready_threads: List<Ref<Thread>>,
work: List<DeliverToReadListAdapter>,
mapping: Option<Mapping>,
nodes: RBTree<usize, Ref<Node>>,
delivered_deaths: List<Ref<NodeDeath>>,
/// The number of requested threads that haven't registered yet.
requested_thread_count: u32,
/// The maximum number of threads used by the process thread pool.
max_threads: u32,
/// The number of threads the started and registered with the thread pool.
started_thread_count: u32,
}
impl ProcessInner {
fn new() -> Self {
Self {
is_manager: false,
is_dead: false,
threads: RBTree::new(),
ready_threads: List::new(),
work: List::new(),
mapping: None,
nodes: RBTree::new(),
requested_thread_count: 0,
max_threads: 0,
started_thread_count: 0,
delivered_deaths: List::new(),
}
}
fn push_work(&mut self, work: Ref<dyn DeliverToRead>) -> BinderResult {
// Try to find a ready thread to which to push the work.
if let Some(thread) = self.ready_threads.pop_front() {
// Push to thread while holding state lock. This prevents the thread from giving up
// (for example, because of a signal) when we're about to deliver work.
thread.push_work(work)
} else if self.is_dead {
Err(BinderError::new_dead())
} else {
// There are no ready threads. Push work to process queue.
self.work.push_back(work);
// Wake up polling threads, if any.
for thread in self.threads.values() {
thread.notify_if_poll_ready();
}
Ok(())
}
}
// TODO: Should this be private?
pub(crate) fn remove_node(&mut self, ptr: usize) {
self.nodes.remove(&ptr);
}
/// Updates the reference count on the given node.
// TODO: Decide if this should be private.
pub(crate) fn update_node_refcount(
&mut self,
node: &Ref<Node>,
inc: bool,
strong: bool,
biased: bool,
othread: Option<&Thread>,
) {
let push = node.update_refcount_locked(inc, strong, biased, self);
// If we decided that we need to push work, push either to the process or to a thread if
// one is specified.
if push {
if let Some(thread) = othread {
thread.push_work_deferred(node.clone());
} else {
let _ = self.push_work(node.clone());
// Nothing to do: `push_work` may fail if the process is dead, but that's ok as in
// that case, it doesn't care about the notification.
}
}
}
// TODO: Make this private.
pub(crate) fn new_node_ref(
&mut self,
node: Ref<Node>,
strong: bool,
thread: Option<&Thread>,
) -> NodeRef {
self.update_node_refcount(&node, true, strong, false, thread);
let strong_count = if strong { 1 } else { 0 };
NodeRef::new(node, strong_count, 1 - strong_count)
}
/// Returns an existing node with the given pointer and cookie, if one exists.
///
/// Returns an error if a node with the given pointer but a different cookie exists.
fn get_existing_node(&self, ptr: usize, cookie: usize) -> Result<Option<Ref<Node>>> {
match self.nodes.get(&ptr) {
None => Ok(None),
Some(node) => {
let (_, node_cookie) = node.get_id();
if node_cookie == cookie {
Ok(Some(node.clone()))
} else {
Err(EINVAL)
}
}
}
}
/// Returns a reference to an existing node with the given pointer and cookie. It requires a
/// mutable reference because it needs to increment the ref count on the node, which may
/// require pushing work to the work queue (to notify userspace of 0 to 1 transitions).
fn get_existing_node_ref(
&mut self,
ptr: usize,
cookie: usize,
strong: bool,
thread: Option<&Thread>,
) -> Result<Option<NodeRef>> {
Ok(self
.get_existing_node(ptr, cookie)?
.map(|node| self.new_node_ref(node, strong, thread)))
}
fn register_thread(&mut self) -> bool {
if self.requested_thread_count == 0 {
return false;
}
self.requested_thread_count -= 1;
self.started_thread_count += 1;
true
}
/// Finds a delivered death notification with the given cookie, removes it from the thread's
/// delivered list, and returns it.
fn pull_delivered_death(&mut self, cookie: usize) -> Option<Ref<NodeDeath>> {
let mut cursor = self.delivered_deaths.cursor_front_mut();
while let Some(death) = cursor.current() {
if death.cookie == cookie {
return cursor.remove_current();
}
cursor.move_next();
}
None
}
pub(crate) fn death_delivered(&mut self, death: Ref<NodeDeath>) {
self.delivered_deaths.push_back(death);
}
}
struct NodeRefInfo {
node_ref: NodeRef,
death: Option<Ref<NodeDeath>>,
}
impl NodeRefInfo {
fn new(node_ref: NodeRef) -> Self {
Self {
node_ref,
death: None,
}
}
}
struct ProcessNodeRefs {
by_handle: RBTree<u32, NodeRefInfo>,
by_global_id: RBTree<u64, u32>,
}
impl ProcessNodeRefs {
fn new() -> Self {
Self {
by_handle: RBTree::new(),
by_global_id: RBTree::new(),
}
}
}
pub(crate) struct Process {
ctx: Ref<Context>,
// The task leader (process).
pub(crate) task: Task,
// Credential associated with file when `Process` is created.
pub(crate) cred: ARef<Credential>,
// TODO: For now this a mutex because we have allocations in RangeAllocator while holding the
// lock. We may want to split up the process state at some point to use a spin lock for the
// other fields.
// TODO: Make this private again.
pub(crate) inner: Mutex<ProcessInner>,
// References are in a different mutex to avoid recursive acquisition when
// incrementing/decrementing a node in another process.
node_refs: Mutex<ProcessNodeRefs>,
}
#[allow(clippy::non_send_fields_in_send_ty)]
unsafe impl Send for Process {}
unsafe impl Sync for Process {}
impl Process {
fn new(ctx: Ref<Context>, cred: ARef<Credential>) -> Result<Ref<Self>> {
let mut process = Pin::from(UniqueRef::try_new(Self {
ctx,
cred,
task: Task::current().group_leader().clone(),
// SAFETY: `inner` is initialised in the call to `mutex_init` below.
inner: unsafe { Mutex::new(ProcessInner::new()) },
// SAFETY: `node_refs` is initialised in the call to `mutex_init` below.
node_refs: unsafe { Mutex::new(ProcessNodeRefs::new()) },
})?);
// SAFETY: `inner` is pinned when `Process` is.
let pinned = unsafe { process.as_mut().map_unchecked_mut(|p| &mut p.inner) };
kernel::mutex_init!(pinned, "Process::inner");
// SAFETY: `node_refs` is pinned when `Process` is.
let pinned = unsafe { process.as_mut().map_unchecked_mut(|p| &mut p.node_refs) };
kernel::mutex_init!(pinned, "Process::node_refs");
Ok(process.into())
}
/// Attempts to fetch a work item from the process queue.
pub(crate) fn get_work(&self) -> Option<Ref<dyn DeliverToRead>> {
self.inner.lock().work.pop_front()
}
/// Attempts to fetch a work item from the process queue. If none is available, it registers the
/// given thread as ready to receive work directly.
///
/// This must only be called when the thread is not participating in a transaction chain; when
/// it is, work will always be delivered directly to the thread (and not through the process
/// queue).
pub(crate) fn get_work_or_register<'a>(
&'a self,
thread: &'a Ref<Thread>,
) -> Either<Ref<dyn DeliverToRead>, Registration<'a>> {
let mut inner = self.inner.lock();
// Try to get work from the process queue.
if let Some(work) = inner.work.pop_front() {
return Either::Left(work);
}
// Register the thread as ready.
Either::Right(Registration::new(self, thread, &mut inner))
}
fn get_thread(self: RefBorrow<'_, Self>, id: i32) -> Result<Ref<Thread>> {
// TODO: Consider using read/write locks here instead.
{
let inner = self.inner.lock();
if let Some(thread) = inner.threads.get(&id) {
return Ok(thread.clone());
}
}
// Allocate a new `Thread` without holding any locks.
let ta = Thread::new(id, self.into())?;
let node = RBTree::try_allocate_node(id, ta.clone())?;
let mut inner = self.inner.lock();
// Recheck. It's possible the thread was create while we were not holding the lock.
if let Some(thread) = inner.threads.get(&id) {
return Ok(thread.clone());
}
inner.threads.insert(node);
Ok(ta)
}
pub(crate) fn push_work(&self, work: Ref<dyn DeliverToRead>) -> BinderResult {
self.inner.lock().push_work(work)
}
fn set_as_manager(
self: RefBorrow<'_, Self>,
info: Option<FlatBinderObject>,
thread: &Thread,
) -> Result {
let (ptr, cookie, flags) = if let Some(obj) = info {
(
// SAFETY: The object type for this ioctl is implicitly `BINDER_TYPE_BINDER`, so it
// is safe to access the `binder` field.
unsafe { obj.__bindgen_anon_1.binder },
obj.cookie,
obj.flags,
)
} else {
(0, 0, 0)
};
let node_ref = self.get_node(ptr as _, cookie as _, flags as _, true, Some(thread))?;
let node = node_ref.node.clone();
self.ctx.set_manager_node(node_ref)?;
self.inner.lock().is_manager = true;
// Force the state of the node to prevent the delivery of acquire/increfs.
let mut owner_inner = node.owner.inner.lock();
node.force_has_count(&mut owner_inner);
Ok(())
}
pub(crate) fn get_node(
self: RefBorrow<'_, Self>,
ptr: usize,
cookie: usize,
flags: u32,
strong: bool,
thread: Option<&Thread>,
) -> Result<NodeRef> {
// Try to find an existing node.
{
let mut inner = self.inner.lock();
if let Some(node) = inner.get_existing_node_ref(ptr, cookie, strong, thread)? {
return Ok(node);
}
}
// Allocate the node before reacquiring the lock.
let node = Ref::try_new(Node::new(ptr, cookie, flags, self.into()))?;
let rbnode = RBTree::try_allocate_node(ptr, node.clone())?;
let mut inner = self.inner.lock();
if let Some(node) = inner.get_existing_node_ref(ptr, cookie, strong, thread)? {
return Ok(node);
}
inner.nodes.insert(rbnode);
Ok(inner.new_node_ref(node, strong, thread))
}
pub(crate) fn insert_or_update_handle(
&self,
node_ref: NodeRef,
is_mananger: bool,
) -> Result<u32> {
{
let mut refs = self.node_refs.lock();
// Do a lookup before inserting.
if let Some(handle_ref) = refs.by_global_id.get(&node_ref.node.global_id) {
let handle = *handle_ref;
let info = refs.by_handle.get_mut(&handle).unwrap();
info.node_ref.absorb(node_ref);
return Ok(handle);
}
}
// Reserve memory for tree nodes.
let reserve1 = RBTree::try_reserve_node()?;
let reserve2 = RBTree::try_reserve_node()?;
let mut refs = self.node_refs.lock();
// Do a lookup again as node may have been inserted before the lock was reacquired.
if let Some(handle_ref) = refs.by_global_id.get(&node_ref.node.global_id) {
let handle = *handle_ref;
let info = refs.by_handle.get_mut(&handle).unwrap();
info.node_ref.absorb(node_ref);
return Ok(handle);
}
// Find id.
let mut target = if is_mananger { 0 } else { 1 };
for handle in refs.by_handle.keys() {
if *handle > target {
break;
}
if *handle == target {
target = target.checked_add(1).ok_or(ENOMEM)?;
}
}
// Ensure the process is still alive while we insert a new reference.
let inner = self.inner.lock();
if inner.is_dead {
return Err(ESRCH);
}
refs.by_global_id
.insert(reserve1.into_node(node_ref.node.global_id, target));
refs.by_handle
.insert(reserve2.into_node(target, NodeRefInfo::new(node_ref)));
Ok(target)
}
pub(crate) fn get_transaction_node(&self, handle: u32) -> BinderResult<NodeRef> {
// When handle is zero, try to get the context manager.
if handle == 0 {
self.ctx.get_manager_node(true)
} else {
self.get_node_from_handle(handle, true)
}
}
pub(crate) fn get_node_from_handle(&self, handle: u32, strong: bool) -> BinderResult<NodeRef> {
self.node_refs
.lock()
.by_handle
.get(&handle)
.ok_or(ENOENT)?
.node_ref
.clone(strong)
}
pub(crate) fn remove_from_delivered_deaths(&self, death: &Ref<NodeDeath>) {
let mut inner = self.inner.lock();
let removed = unsafe { inner.delivered_deaths.remove(death) };
drop(inner);
drop(removed);
}
pub(crate) fn update_ref(&self, handle: u32, inc: bool, strong: bool) -> Result {
if inc && handle == 0 {
if let Ok(node_ref) = self.ctx.get_manager_node(strong) {
if core::ptr::eq(self, &*node_ref.node.owner) {
return Err(EINVAL);
}
let _ = self.insert_or_update_handle(node_ref, true);
return Ok(());
}
}
// To preserve original binder behaviour, we only fail requests where the manager tries to
// increment references on itself.
let mut refs = self.node_refs.lock();
if let Some(info) = refs.by_handle.get_mut(&handle) {
if info.node_ref.update(inc, strong) {
// Clean up death if there is one attached to this node reference.
if let Some(death) = info.death.take() {
death.set_cleared(true);
self.remove_from_delivered_deaths(&death);
}
// Remove reference from process tables.
let id = info.node_ref.node.global_id;
refs.by_handle.remove(&handle);
refs.by_global_id.remove(&id);
}
}
Ok(())
}
/// Decrements the refcount of the given node, if one exists.
pub(crate) fn update_node(&self, ptr: usize, cookie: usize, strong: bool, biased: bool) {
let mut inner = self.inner.lock();
if let Ok(Some(node)) = inner.get_existing_node(ptr, cookie) {
inner.update_node_refcount(&node, false, strong, biased, None);
}
}
pub(crate) fn inc_ref_done(&self, reader: &mut UserSlicePtrReader, strong: bool) -> Result {
let ptr = reader.read::<usize>()?;
let cookie = reader.read::<usize>()?;
self.update_node(ptr, cookie, strong, true);
Ok(())
}
pub(crate) fn buffer_alloc(&self, size: usize) -> BinderResult<Allocation<'_>> {
let mut inner = self.inner.lock();
let mapping = inner.mapping.as_mut().ok_or_else(BinderError::new_dead)?;
let offset = mapping.alloc.reserve_new(size)?;
Ok(Allocation::new(
self,
offset,
size,
mapping.address + offset,
mapping.pages.clone(),
))
}
// TODO: Review if we want an Option or a Result.
pub(crate) fn buffer_get(&self, ptr: usize) -> Option<Allocation<'_>> {
let mut inner = self.inner.lock();
let mapping = inner.mapping.as_mut()?;
let offset = ptr.checked_sub(mapping.address)?;
let (size, odata) = mapping.alloc.reserve_existing(offset).ok()?;
let mut alloc = Allocation::new(self, offset, size, ptr, mapping.pages.clone());
if let Some(data) = odata {
alloc.set_info(data);
}
Some(alloc)
}
pub(crate) fn buffer_raw_free(&self, ptr: usize) {
let mut inner = self.inner.lock();
if let Some(ref mut mapping) = &mut inner.mapping {
if ptr < mapping.address
|| mapping
.alloc
.reservation_abort(ptr - mapping.address)
.is_err()
{
pr_warn!(
"Pointer {:x} failed to free, base = {:x}\n",
ptr,
mapping.address
);
}
}
}
pub(crate) fn buffer_make_freeable(&self, offset: usize, data: Option<AllocationInfo>) {
let mut inner = self.inner.lock();
if let Some(ref mut mapping) = &mut inner.mapping {
if mapping.alloc.reservation_commit(offset, data).is_err() {
pr_warn!("Offset {} failed to be marked freeable\n", offset);
}
}
}
fn create_mapping(&self, vma: &mut mm::virt::Area) -> Result {
let size = core::cmp::min(vma.end() - vma.start(), bindings::SZ_4M as usize);
let page_count = size / kernel::PAGE_SIZE;
// Allocate and map all pages.
//
// N.B. If we fail halfway through mapping these pages, the kernel will unmap them.
let mut pages = Vec::new();
pages.try_reserve_exact(page_count)?;
let mut address = vma.start();
for _ in 0..page_count {
let page = Pages::<0>::new()?;
vma.insert_page(address, &page)?;
pages.try_push(page)?;
address += kernel::PAGE_SIZE;
}
let ref_pages = Ref::try_from(pages)?;
// Save pages for later.
let mut inner = self.inner.lock();
match &inner.mapping {
None => inner.mapping = Some(Mapping::new(vma.start(), size, ref_pages)?),
Some(_) => return Err(EBUSY),
}
Ok(())
}
fn version(&self, data: UserSlicePtr) -> Result {
data.writer().write(&BinderVersion::current())
}
pub(crate) fn register_thread(&self) -> bool {
self.inner.lock().register_thread()
}
fn remove_thread(&self, thread: Ref<Thread>) {
self.inner.lock().threads.remove(&thread.id);
thread.release();
}
fn set_max_threads(&self, max: u32) {
self.inner.lock().max_threads = max;
}
fn get_node_debug_info(&self, data: UserSlicePtr) -> Result {
let (mut reader, mut writer) = data.reader_writer();
// Read the starting point.
let ptr = reader.read::<BinderNodeDebugInfo>()?.ptr as usize;
let mut out = BinderNodeDebugInfo::default();
{
let inner = self.inner.lock();
for (node_ptr, node) in &inner.nodes {
if *node_ptr > ptr {
node.populate_debug_info(&mut out, &inner);
break;
}
}
}
writer.write(&out)
}
fn get_node_info_from_ref(&self, data: UserSlicePtr) -> Result {
let (mut reader, mut writer) = data.reader_writer();
let mut out = reader.read::<BinderNodeInfoForRef>()?;
if out.strong_count != 0
|| out.weak_count != 0
|| out.reserved1 != 0
|| out.reserved2 != 0
|| out.reserved3 != 0
{
return Err(EINVAL);
}
// Only the context manager is allowed to use this ioctl.
if !self.inner.lock().is_manager {
return Err(EPERM);
}
let node_ref = self
.get_node_from_handle(out.handle, true)
.or(Err(EINVAL))?;
// Get the counts from the node.
{
let owner_inner = node_ref.node.owner.inner.lock();
node_ref.node.populate_counts(&mut out, &owner_inner);
}
// Write the result back.
writer.write(&out)
}
pub(crate) fn needs_thread(&self) -> bool {
let mut inner = self.inner.lock();
let ret = inner.requested_thread_count == 0
&& inner.ready_threads.is_empty()
&& inner.started_thread_count < inner.max_threads;
if ret {
inner.requested_thread_count += 1
};
ret
}
pub(crate) fn request_death(
self: &Ref<Self>,
reader: &mut UserSlicePtrReader,
thread: &Thread,
) -> Result {
let handle: u32 = reader.read()?;
let cookie: usize = reader.read()?;
// TODO: First two should result in error, but not the others.
// TODO: Do we care about the context manager dying?
// Queue BR_ERROR if we can't allocate memory for the death notification.
let death = UniqueRef::try_new_uninit().map_err(|err| {
thread.push_return_work(BR_ERROR);
err
})?;
let mut refs = self.node_refs.lock();
let info = refs.by_handle.get_mut(&handle).ok_or(EINVAL)?;
// Nothing to do if there is already a death notification request for this handle.
if info.death.is_some() {
return Ok(());
}
let death = {
let mut pinned = Pin::from(death.write(
// SAFETY: `init` is called below.
unsafe { NodeDeath::new(info.node_ref.node.clone(), self.clone(), cookie) },
));
pinned.as_mut().init();
Ref::<NodeDeath>::from(pinned)
};
info.death = Some(death.clone());
// Register the death notification.
{
let mut owner_inner = info.node_ref.node.owner.inner.lock();
if owner_inner.is_dead {
drop(owner_inner);
let _ = self.push_work(death);
} else {
info.node_ref.node.add_death(death, &mut owner_inner);
}
}
Ok(())
}
pub(crate) fn clear_death(&self, reader: &mut UserSlicePtrReader, thread: &Thread) -> Result {
let handle: u32 = reader.read()?;
let cookie: usize = reader.read()?;
let mut refs = self.node_refs.lock();
let info = refs.by_handle.get_mut(&handle).ok_or(EINVAL)?;
let death = info.death.take().ok_or(EINVAL)?;
if death.cookie != cookie {
info.death = Some(death);
return Err(EINVAL);
}
// Update state and determine if we need to queue a work item. We only need to do it when
// the node is not dead or if the user already completed the death notification.
if death.set_cleared(false) {
let _ = thread.push_work_if_looper(death);
}
Ok(())
}
pub(crate) fn dead_binder_done(&self, cookie: usize, thread: &Thread) {
if let Some(death) = self.inner.lock().pull_delivered_death(cookie) {
death.set_notification_done(thread);
}
}
}
impl IoctlHandler for Process {
type Target<'a> = RefBorrow<'a, Process>;
fn write(
this: RefBorrow<'_, Process>,
_file: &File,
cmd: u32,
reader: &mut UserSlicePtrReader,
) -> Result<i32> {
let thread = this.get_thread(Task::current().pid())?;
match cmd {
bindings::BINDER_SET_MAX_THREADS => this.set_max_threads(reader.read()?),
bindings::BINDER_SET_CONTEXT_MGR => this.set_as_manager(None, &thread)?,
bindings::BINDER_THREAD_EXIT => this.remove_thread(thread),
bindings::BINDER_SET_CONTEXT_MGR_EXT => {
this.set_as_manager(Some(reader.read()?), &thread)?
}
_ => return Err(EINVAL),
}
Ok(0)
}
fn read_write(
this: RefBorrow<'_, Process>,
file: &File,
cmd: u32,
data: UserSlicePtr,
) -> Result<i32> {
let thread = this.get_thread(Task::current().pid())?;
match cmd {
bindings::BINDER_WRITE_READ => thread.write_read(data, file.is_blocking())?,
bindings::BINDER_GET_NODE_DEBUG_INFO => this.get_node_debug_info(data)?,
bindings::BINDER_GET_NODE_INFO_FOR_REF => this.get_node_info_from_ref(data)?,
bindings::BINDER_VERSION => this.version(data)?,
_ => return Err(EINVAL),
}
Ok(0)
}
}
impl file::Operations for Process {
type Data = Ref<Self>;
type OpenData = Ref<Context>;
kernel::declare_file_operations!(ioctl, compat_ioctl, mmap, poll);
fn open(ctx: &Ref<Context>, file: &File) -> Result<Self::Data> {
Self::new(ctx.clone(), file.cred().into())
}
fn release(obj: Self::Data, _file: &File) {
// Mark this process as dead. We'll do the same for the threads later.
obj.inner.lock().is_dead = true;
// If this process is the manager, unset it.
if obj.inner.lock().is_manager {
obj.ctx.unset_manager_node();
}
// TODO: Do this in a worker?
// Cancel all pending work items.
while let Some(work) = obj.get_work() {
work.cancel();
}
// Free any resources kept alive by allocated buffers.
let omapping = obj.inner.lock().mapping.take();
if let Some(mut mapping) = omapping {
let address = mapping.address;
let pages = mapping.pages.clone();
mapping.alloc.for_each(|offset, size, odata| {
let ptr = offset + address;
let mut alloc = Allocation::new(&obj, offset, size, ptr, pages.clone());
if let Some(data) = odata {
alloc.set_info(data);
}
drop(alloc)
});
}
// Drop all references. We do this dance with `swap` to avoid destroying the references
// while holding the lock.
let mut refs = obj.node_refs.lock();
let mut node_refs = take(&mut refs.by_handle);
drop(refs);
// Remove all death notifications from the nodes (that belong to a different process).
for info in node_refs.values_mut() {
let death = if let Some(existing) = info.death.take() {
existing
} else {
continue;
};
death.set_cleared(false);
}
// Do similar dance for the state lock.
let mut inner = obj.inner.lock();
let threads = take(&mut inner.threads);
let nodes = take(&mut inner.nodes);
drop(inner);
// Release all threads.
for thread in threads.values() {
thread.release();
}
// Deliver death notifications.
for node in nodes.values() {
loop {
let death = {
let mut inner = obj.inner.lock();
if let Some(death) = node.next_death(&mut inner) {
death
} else {
break;
}
};
death.set_dead();
}
}
}
fn ioctl(this: RefBorrow<'_, Process>, file: &File, cmd: &mut IoctlCommand) -> Result<i32> {
cmd.dispatch::<Self>(this, file)
}
fn compat_ioctl(
this: RefBorrow<'_, Process>,
file: &File,
cmd: &mut IoctlCommand,
) -> Result<i32> {
cmd.dispatch::<Self>(this, file)
}
fn mmap(this: RefBorrow<'_, Process>, _file: &File, vma: &mut mm::virt::Area) -> Result {
// We don't allow mmap to be used in a different process.
if !Task::current().group_leader().eq(&this.task) {
return Err(EINVAL);
}
if vma.start() == 0 {
return Err(EINVAL);
}
let mut flags = vma.flags();
use mm::virt::flags::*;
if flags & WRITE != 0 {
return Err(EPERM);
}
flags |= DONTCOPY | MIXEDMAP;
flags &= !MAYWRITE;
vma.set_flags(flags);
// TODO: Set ops. We need to learn when the user unmaps so that we can stop using it.
this.create_mapping(vma)
}
fn poll(this: RefBorrow<'_, Process>, file: &File, table: &PollTable) -> Result<u32> {
let thread = this.get_thread(Task::current().pid())?;
let (from_proc, mut mask) = thread.poll(file, table);
if mask == 0 && from_proc && !this.inner.lock().work.is_empty() {
mask |= bindings::POLLIN;
}
Ok(mask)
}
}
pub(crate) struct Registration<'a> {
process: &'a Process,
thread: &'a Ref<Thread>,
}
impl<'a> Registration<'a> {
fn new(
process: &'a Process,
thread: &'a Ref<Thread>,
guard: &mut Guard<'_, Mutex<ProcessInner>>,
) -> Self {
guard.ready_threads.push_back(thread.clone());
Self { process, thread }
}
}
impl Drop for Registration<'_> {
fn drop(&mut self) {
let mut inner = self.process.inner.lock();
unsafe { inner.ready_threads.remove(self.thread) };
}
}

View File

@ -0,0 +1,189 @@
// SPDX-License-Identifier: GPL-2.0
use core::ptr::NonNull;
use kernel::{
linked_list::{CursorMut, GetLinks, Links, List},
prelude::*,
};
pub(crate) struct RangeAllocator<T> {
list: List<Box<Descriptor<T>>>,
}
#[derive(Debug, PartialEq, Eq)]
enum DescriptorState {
Free,
Reserved,
Allocated,
}
impl<T> RangeAllocator<T> {
pub(crate) fn new(size: usize) -> Result<Self> {
let desc = Box::try_new(Descriptor::new(0, size))?;
let mut list = List::new();
list.push_back(desc);
Ok(Self { list })
}
fn find_best_match(&self, size: usize) -> Option<NonNull<Descriptor<T>>> {
// TODO: Use a binary tree instead of list for this lookup.
let mut best = None;
let mut best_size = usize::MAX;
let mut cursor = self.list.cursor_front();
while let Some(desc) = cursor.current() {
if desc.state == DescriptorState::Free {
if size == desc.size {
return Some(NonNull::from(desc));
}
if size < desc.size && desc.size < best_size {
best = Some(NonNull::from(desc));
best_size = desc.size;
}
}
cursor.move_next();
}
best
}
pub(crate) fn reserve_new(&mut self, size: usize) -> Result<usize> {
let desc_ptr = match self.find_best_match(size) {
None => return Err(ENOMEM),
Some(found) => found,
};
// SAFETY: We hold the only mutable reference to list, so it cannot have changed.
let desc = unsafe { &mut *desc_ptr.as_ptr() };
if desc.size == size {
desc.state = DescriptorState::Reserved;
return Ok(desc.offset);
}
// We need to break up the descriptor.
let new = Box::try_new(Descriptor::new(desc.offset + size, desc.size - size))?;
unsafe { self.list.insert_after(desc_ptr, new) };
desc.state = DescriptorState::Reserved;
desc.size = size;
Ok(desc.offset)
}
fn free_with_cursor(cursor: &mut CursorMut<'_, Box<Descriptor<T>>>) -> Result {
let mut size = match cursor.current() {
None => return Err(EINVAL),
Some(ref mut entry) => {
match entry.state {
DescriptorState::Free => return Err(EINVAL),
DescriptorState::Allocated => return Err(EPERM),
DescriptorState::Reserved => {}
}
entry.state = DescriptorState::Free;
entry.size
}
};
// Try to merge with the next entry.
if let Some(next) = cursor.peek_next() {
if next.state == DescriptorState::Free {
next.offset -= size;
next.size += size;
size = next.size;
cursor.remove_current();
}
}
// Try to merge with the previous entry.
if let Some(prev) = cursor.peek_prev() {
if prev.state == DescriptorState::Free {
prev.size += size;
cursor.remove_current();
}
}
Ok(())
}
fn find_at_offset(&mut self, offset: usize) -> Option<CursorMut<'_, Box<Descriptor<T>>>> {
let mut cursor = self.list.cursor_front_mut();
while let Some(desc) = cursor.current() {
if desc.offset == offset {
return Some(cursor);
}
if desc.offset > offset {
return None;
}
cursor.move_next();
}
None
}
pub(crate) fn reservation_abort(&mut self, offset: usize) -> Result {
// TODO: The force case is currently O(n), but could be made O(1) with unsafe.
let mut cursor = self.find_at_offset(offset).ok_or(EINVAL)?;
Self::free_with_cursor(&mut cursor)
}
pub(crate) fn reservation_commit(&mut self, offset: usize, data: Option<T>) -> Result {
// TODO: This is currently O(n), make it O(1).
let mut cursor = self.find_at_offset(offset).ok_or(ENOENT)?;
let desc = cursor.current().unwrap();
desc.state = DescriptorState::Allocated;
desc.data = data;
Ok(())
}
/// Takes an entry at the given offset from [`DescriptorState::Allocated`] to
/// [`DescriptorState::Reserved`].
///
/// Returns the size of the existing entry and the data associated with it.
pub(crate) fn reserve_existing(&mut self, offset: usize) -> Result<(usize, Option<T>)> {
// TODO: This is currently O(n), make it O(log n).
let mut cursor = self.find_at_offset(offset).ok_or(ENOENT)?;
let desc = cursor.current().unwrap();
if desc.state != DescriptorState::Allocated {
return Err(ENOENT);
}
desc.state = DescriptorState::Reserved;
Ok((desc.size, desc.data.take()))
}
pub(crate) fn for_each<F: Fn(usize, usize, Option<T>)>(&mut self, callback: F) {
let mut cursor = self.list.cursor_front_mut();
while let Some(desc) = cursor.current() {
if desc.state == DescriptorState::Allocated {
callback(desc.offset, desc.size, desc.data.take());
}
cursor.move_next();
}
}
}
struct Descriptor<T> {
state: DescriptorState,
size: usize,
offset: usize,
links: Links<Descriptor<T>>,
data: Option<T>,
}
impl<T> Descriptor<T> {
fn new(offset: usize, size: usize) -> Self {
Self {
size,
offset,
state: DescriptorState::Free,
links: Links::new(),
data: None,
}
}
}
impl<T> GetLinks for Descriptor<T> {
type EntryType = Self;
fn get_links(desc: &Self) -> &Links<Self> {
&desc.links
}
}

View File

@ -0,0 +1,111 @@
// SPDX-License-Identifier: GPL-2.0
//! Binder -- the Android IPC mechanism.
//!
//! TODO: This module is a work in progress.
use kernel::{
io_buffer::IoBufferWriter,
linked_list::{GetLinks, GetLinksWrapped, Links},
miscdev::Registration,
prelude::*,
str::CStr,
sync::Ref,
user_ptr::UserSlicePtrWriter,
};
mod allocation;
mod context;
mod defs;
mod node;
mod process;
mod range_alloc;
mod thread;
mod transaction;
use {context::Context, thread::Thread};
module! {
type: BinderModule,
name: b"rust_binder",
author: b"Wedson Almeida Filho",
description: b"Android Binder",
license: b"GPL",
}
enum Either<L, R> {
Left(L),
Right(R),
}
trait DeliverToRead {
/// Performs work. Returns true if remaining work items in the queue should be processed
/// immediately, or false if it should return to caller before processing additional work
/// items.
fn do_work(self: Ref<Self>, thread: &Thread, writer: &mut UserSlicePtrWriter) -> Result<bool>;
/// Cancels the given work item. This is called instead of [`DeliverToRead::do_work`] when work
/// won't be delivered.
fn cancel(self: Ref<Self>) {}
/// Returns the linked list links for the work item.
fn get_links(&self) -> &Links<dyn DeliverToRead>;
}
struct DeliverToReadListAdapter {}
impl GetLinks for DeliverToReadListAdapter {
type EntryType = dyn DeliverToRead;
fn get_links(data: &Self::EntryType) -> &Links<Self::EntryType> {
data.get_links()
}
}
impl GetLinksWrapped for DeliverToReadListAdapter {
type Wrapped = Ref<dyn DeliverToRead>;
}
struct DeliverCode {
code: u32,
links: Links<dyn DeliverToRead>,
}
impl DeliverCode {
fn new(code: u32) -> Self {
Self {
code,
links: Links::new(),
}
}
}
impl DeliverToRead for DeliverCode {
fn do_work(self: Ref<Self>, _thread: &Thread, writer: &mut UserSlicePtrWriter) -> Result<bool> {
writer.write(&self.code)?;
Ok(true)
}
fn get_links(&self) -> &Links<dyn DeliverToRead> {
&self.links
}
}
const fn ptr_align(value: usize) -> usize {
let size = core::mem::size_of::<usize>() - 1;
(value + size) & !size
}
unsafe impl Sync for BinderModule {}
struct BinderModule {
_reg: Pin<Box<Registration<process::Process>>>,
}
impl kernel::Module for BinderModule {
fn init(name: &'static CStr, _module: &'static kernel::ThisModule) -> Result<Self> {
let ctx = Context::new()?;
let reg = Registration::new_pinned(fmt!("{name}"), ctx)?;
Ok(Self { _reg: reg })
}
}

870
drivers/android/thread.rs Normal file
View File

@ -0,0 +1,870 @@
// SPDX-License-Identifier: GPL-2.0
use core::{
alloc::AllocError,
mem::size_of,
sync::atomic::{AtomicU32, Ordering},
};
use kernel::{
bindings,
file::{File, PollTable},
io_buffer::{IoBufferReader, IoBufferWriter},
linked_list::{GetLinks, Links, List},
prelude::*,
security,
sync::{CondVar, Ref, SpinLock, UniqueRef},
user_ptr::{UserSlicePtr, UserSlicePtrWriter},
};
use crate::{
allocation::{Allocation, AllocationView},
defs::*,
process::{AllocationInfo, Process},
ptr_align,
transaction::{FileInfo, Transaction},
DeliverCode, DeliverToRead, DeliverToReadListAdapter, Either,
};
pub(crate) type BinderResult<T = ()> = core::result::Result<T, BinderError>;
pub(crate) struct BinderError {
pub(crate) reply: u32,
}
impl BinderError {
pub(crate) fn new_failed() -> Self {
Self {
reply: BR_FAILED_REPLY,
}
}
pub(crate) fn new_dead() -> Self {
Self {
reply: BR_DEAD_REPLY,
}
}
}
impl From<Error> for BinderError {
fn from(_: Error) -> Self {
Self::new_failed()
}
}
impl From<AllocError> for BinderError {
fn from(_: AllocError) -> Self {
Self::new_failed()
}
}
const LOOPER_REGISTERED: u32 = 0x01;
const LOOPER_ENTERED: u32 = 0x02;
const LOOPER_EXITED: u32 = 0x04;
const LOOPER_INVALID: u32 = 0x08;
const LOOPER_WAITING: u32 = 0x10;
const LOOPER_POLL: u32 = 0x20;
struct InnerThread {
/// Determines the looper state of the thread. It is a bit-wise combination of the constants
/// prefixed with `LOOPER_`.
looper_flags: u32,
/// Determines if thread is dead.
is_dead: bool,
/// Work item used to deliver error codes to the thread that started a transaction. When set to
/// `Some(x)`, it will hold the only reference to the object so that it can update the error
/// code to be delivered before queuing it.
reply_work: Option<Ref<ThreadError>>,
/// Work item used to deliver error codes to the current thread. When set to `Some(x)`, it will
/// hold the only reference to the object so that it can update the error code to be delivered
/// before queuing.
return_work: Option<Ref<ThreadError>>,
/// Determines whether the work list below should be processed. When set to false, `work_list`
/// is treated as if it were empty.
process_work_list: bool,
work_list: List<DeliverToReadListAdapter>,
current_transaction: Option<Ref<Transaction>>,
}
impl InnerThread {
fn new() -> Self {
Self {
looper_flags: 0,
is_dead: false,
process_work_list: false,
work_list: List::new(),
current_transaction: None,
return_work: None,
reply_work: None,
}
}
fn set_reply_work(&mut self, reply_work: Ref<ThreadError>) {
self.reply_work = Some(reply_work);
}
fn push_reply_work(&mut self, code: u32) {
let work = self.reply_work.take();
self.push_existing_work(work, code);
}
fn set_return_work(&mut self, return_work: Ref<ThreadError>) {
self.return_work = Some(return_work);
}
fn push_return_work(&mut self, code: u32) {
let work = self.return_work.take();
self.push_existing_work(work, code);
}
fn push_existing_work(&mut self, owork: Option<Ref<ThreadError>>, code: u32) {
// TODO: Write some warning when the following fails. It should not happen, and
// if it does, there is likely something wrong.
if let Some(work) = owork {
// `error_code` is written to with relaxed semantics because the queue onto which it is
// being inserted is protected by a lock. The release barrier when the lock is released
// by the caller matches with the acquire barrier of the future reader to guarantee
// that `error_code` is visible.
work.error_code.store(code, Ordering::Relaxed);
self.push_work(work);
}
}
fn pop_work(&mut self) -> Option<Ref<dyn DeliverToRead>> {
if !self.process_work_list {
return None;
}
let ret = self.work_list.pop_front();
// Once the queue is drained, we stop processing it until a non-deferred item is pushed
// again onto it.
self.process_work_list = !self.work_list.is_empty();
ret
}
fn push_work_deferred(&mut self, work: Ref<dyn DeliverToRead>) {
self.work_list.push_back(work);
}
fn push_work(&mut self, work: Ref<dyn DeliverToRead>) {
self.push_work_deferred(work);
self.process_work_list = true;
}
fn has_work(&self) -> bool {
self.process_work_list && !self.work_list.is_empty()
}
/// Fetches the transaction the thread can reply to. If the thread has a pending transaction
/// (that it could respond to) but it has also issued a transaction, it must first wait for the
/// previously-issued transaction to complete.
fn pop_transaction_to_reply(&mut self, thread: &Thread) -> Result<Ref<Transaction>> {
let transaction = self.current_transaction.take().ok_or(EINVAL)?;
if core::ptr::eq(thread, transaction.from.as_ref()) {
self.current_transaction = Some(transaction);
return Err(EINVAL);
}
// Find a new current transaction for this thread.
self.current_transaction = transaction.find_from(thread);
Ok(transaction)
}
fn pop_transaction_replied(&mut self, transaction: &Ref<Transaction>) -> bool {
match self.current_transaction.take() {
None => false,
Some(old) => {
if !Ref::ptr_eq(transaction, &old) {
self.current_transaction = Some(old);
return false;
}
self.current_transaction = old.clone_next();
true
}
}
}
fn looper_enter(&mut self) {
self.looper_flags |= LOOPER_ENTERED;
if self.looper_flags & LOOPER_REGISTERED != 0 {
self.looper_flags |= LOOPER_INVALID;
}
}
fn looper_register(&mut self, valid: bool) {
self.looper_flags |= LOOPER_REGISTERED;
if !valid || self.looper_flags & LOOPER_ENTERED != 0 {
self.looper_flags |= LOOPER_INVALID;
}
}
fn looper_exit(&mut self) {
self.looper_flags |= LOOPER_EXITED;
}
/// Determines whether the thread is part of a pool, i.e., if it is a looper.
fn is_looper(&self) -> bool {
self.looper_flags & (LOOPER_ENTERED | LOOPER_REGISTERED) != 0
}
/// Determines whether the thread should attempt to fetch work items from the process queue
/// (when its own queue is empty). This is case when the thread is not part of a transaction
/// stack and it is registered as a looper.
fn should_use_process_work_queue(&self) -> bool {
self.current_transaction.is_none() && self.is_looper()
}
fn poll(&mut self) -> u32 {
self.looper_flags |= LOOPER_POLL;
if self.has_work() {
bindings::POLLIN
} else {
0
}
}
}
pub(crate) struct Thread {
pub(crate) id: i32,
pub(crate) process: Ref<Process>,
inner: SpinLock<InnerThread>,
work_condvar: CondVar,
links: Links<Thread>,
}
impl Thread {
pub(crate) fn new(id: i32, process: Ref<Process>) -> Result<Ref<Self>> {
let return_work = Ref::try_new(ThreadError::new(InnerThread::set_return_work))?;
let reply_work = Ref::try_new(ThreadError::new(InnerThread::set_reply_work))?;
let mut thread = Pin::from(UniqueRef::try_new(Self {
id,
process,
// SAFETY: `inner` is initialised in the call to `spinlock_init` below.
inner: unsafe { SpinLock::new(InnerThread::new()) },
// SAFETY: `work_condvar` is initialised in the call to `condvar_init` below.
work_condvar: unsafe { CondVar::new() },
links: Links::new(),
})?);
// SAFETY: `inner` is pinned when `thread` is.
let inner = unsafe { thread.as_mut().map_unchecked_mut(|t| &mut t.inner) };
kernel::spinlock_init!(inner, "Thread::inner");
// SAFETY: `work_condvar` is pinned when `thread` is.
let condvar = unsafe { thread.as_mut().map_unchecked_mut(|t| &mut t.work_condvar) };
kernel::condvar_init!(condvar, "Thread::work_condvar");
{
let mut inner = thread.inner.lock();
inner.set_reply_work(reply_work);
inner.set_return_work(return_work);
}
Ok(thread.into())
}
pub(crate) fn set_current_transaction(&self, transaction: Ref<Transaction>) {
self.inner.lock().current_transaction = Some(transaction);
}
/// Attempts to fetch a work item from the thread-local queue. The behaviour if the queue is
/// empty depends on `wait`: if it is true, the function waits for some work to be queued (or a
/// signal); otherwise it returns indicating that none is available.
fn get_work_local(self: &Ref<Self>, wait: bool) -> Result<Ref<dyn DeliverToRead>> {
// Try once if the caller does not want to wait.
if !wait {
return self.inner.lock().pop_work().ok_or(EAGAIN);
}
// Loop waiting only on the local queue (i.e., not registering with the process queue).
let mut inner = self.inner.lock();
loop {
if let Some(work) = inner.pop_work() {
return Ok(work);
}
inner.looper_flags |= LOOPER_WAITING;
let signal_pending = self.work_condvar.wait(&mut inner);
inner.looper_flags &= !LOOPER_WAITING;
if signal_pending {
return Err(ERESTARTSYS);
}
}
}
/// Attempts to fetch a work item from the thread-local queue, falling back to the process-wide
/// queue if none is available locally.
///
/// This must only be called when the thread is not participating in a transaction chain. If it
/// is, the local version (`get_work_local`) should be used instead.
fn get_work(self: &Ref<Self>, wait: bool) -> Result<Ref<dyn DeliverToRead>> {
// Try to get work from the thread's work queue, using only a local lock.
{
let mut inner = self.inner.lock();
if let Some(work) = inner.pop_work() {
return Ok(work);
}
}
// If the caller doesn't want to wait, try to grab work from the process queue.
//
// We know nothing will have been queued directly to the thread queue because it is not in
// a transaction and it is not in the process' ready list.
if !wait {
return self.process.get_work().ok_or(EAGAIN);
}
// Get work from the process queue. If none is available, atomically register as ready.
let reg = match self.process.get_work_or_register(self) {
Either::Left(work) => return Ok(work),
Either::Right(reg) => reg,
};
let mut inner = self.inner.lock();
loop {
if let Some(work) = inner.pop_work() {
return Ok(work);
}
inner.looper_flags |= LOOPER_WAITING;
let signal_pending = self.work_condvar.wait(&mut inner);
inner.looper_flags &= !LOOPER_WAITING;
if signal_pending {
// A signal is pending. We need to pull the thread off the list, then check the
// state again after it's off the list to ensure that something was not queued in
// the meantime. If something has been queued, we just return it (instead of the
// error).
drop(inner);
drop(reg);
return self.inner.lock().pop_work().ok_or(ERESTARTSYS);
}
}
}
pub(crate) fn push_work(&self, work: Ref<dyn DeliverToRead>) -> BinderResult {
{
let mut inner = self.inner.lock();
if inner.is_dead {
return Err(BinderError::new_dead());
}
inner.push_work(work);
}
self.work_condvar.notify_one();
Ok(())
}
/// Attempts to push to given work item to the thread if it's a looper thread (i.e., if it's
/// part of a thread pool) and is alive. Otherwise, push the work item to the process instead.
pub(crate) fn push_work_if_looper(&self, work: Ref<dyn DeliverToRead>) -> BinderResult {
let mut inner = self.inner.lock();
if inner.is_looper() && !inner.is_dead {
inner.push_work(work);
Ok(())
} else {
drop(inner);
self.process.push_work(work)
}
}
pub(crate) fn push_work_deferred(&self, work: Ref<dyn DeliverToRead>) {
self.inner.lock().push_work_deferred(work);
}
fn translate_object(
&self,
index_offset: usize,
view: &mut AllocationView<'_, '_>,
allow_fds: bool,
) -> BinderResult {
let offset = view.alloc.read(index_offset)?;
let header = view.read::<bindings::binder_object_header>(offset)?;
// TODO: Handle other types.
match header.type_ {
BINDER_TYPE_WEAK_BINDER | BINDER_TYPE_BINDER => {
let strong = header.type_ == BINDER_TYPE_BINDER;
view.transfer_binder_object(offset, strong, |obj| {
// SAFETY: `binder` is a `binder_uintptr_t`; any bit pattern is a valid
// representation.
let ptr = unsafe { obj.__bindgen_anon_1.binder } as _;
let cookie = obj.cookie as _;
let flags = obj.flags as _;
let node = self.process.as_ref_borrow().get_node(
ptr,
cookie,
flags,
strong,
Some(self),
)?;
security::binder_transfer_binder(&self.process.cred, &view.alloc.process.cred)?;
Ok(node)
})?;
}
BINDER_TYPE_WEAK_HANDLE | BINDER_TYPE_HANDLE => {
let strong = header.type_ == BINDER_TYPE_HANDLE;
view.transfer_binder_object(offset, strong, |obj| {
// SAFETY: `handle` is a `u32`; any bit pattern is a valid representation.
let handle = unsafe { obj.__bindgen_anon_1.handle } as _;
let node = self.process.get_node_from_handle(handle, strong)?;
security::binder_transfer_binder(&self.process.cred, &view.alloc.process.cred)?;
Ok(node)
})?;
}
BINDER_TYPE_FD => {
if !allow_fds {
return Err(BinderError::new_failed());
}
let obj = view.read::<bindings::binder_fd_object>(offset)?;
// SAFETY: `fd` is a `u32`; any bit pattern is a valid representation.
let fd = unsafe { obj.__bindgen_anon_1.fd };
let file = File::from_fd(fd)?;
security::binder_transfer_file(
&self.process.cred,
&view.alloc.process.cred,
&file,
)?;
let field_offset =
kernel::offset_of!(bindings::binder_fd_object, __bindgen_anon_1.fd) as usize;
let file_info = Box::try_new(FileInfo::new(file, offset + field_offset))?;
view.alloc.add_file_info(file_info);
}
_ => pr_warn!("Unsupported binder object type: {:x}\n", header.type_),
}
Ok(())
}
fn translate_objects(
&self,
alloc: &mut Allocation<'_>,
start: usize,
end: usize,
allow_fds: bool,
) -> BinderResult {
let mut view = AllocationView::new(alloc, start);
for i in (start..end).step_by(size_of::<usize>()) {
if let Err(err) = self.translate_object(i, &mut view, allow_fds) {
alloc.set_info(AllocationInfo { offsets: start..i });
return Err(err);
}
}
alloc.set_info(AllocationInfo {
offsets: start..end,
});
Ok(())
}
pub(crate) fn copy_transaction_data<'a>(
&self,
to_process: &'a Process,
tr: &BinderTransactionData,
allow_fds: bool,
) -> BinderResult<Allocation<'a>> {
let data_size = tr.data_size as _;
let adata_size = ptr_align(data_size);
let offsets_size = tr.offsets_size as _;
let aoffsets_size = ptr_align(offsets_size);
// This guarantees that at least `sizeof(usize)` bytes will be allocated.
let len = core::cmp::max(
adata_size.checked_add(aoffsets_size).ok_or(ENOMEM)?,
size_of::<usize>(),
);
let mut alloc = to_process.buffer_alloc(len)?;
// Copy raw data.
let mut reader = unsafe { UserSlicePtr::new(tr.data.ptr.buffer as _, data_size) }.reader();
alloc.copy_into(&mut reader, 0, data_size)?;
// Copy offsets if there are any.
if offsets_size > 0 {
let mut reader =
unsafe { UserSlicePtr::new(tr.data.ptr.offsets as _, offsets_size) }.reader();
alloc.copy_into(&mut reader, adata_size, offsets_size)?;
// Traverse the objects specified.
self.translate_objects(
&mut alloc,
adata_size,
adata_size + aoffsets_size,
allow_fds,
)?;
}
Ok(alloc)
}
fn unwind_transaction_stack(self: &Ref<Self>) {
let mut thread = self.clone();
while let Ok(transaction) = {
let mut inner = thread.inner.lock();
inner.pop_transaction_to_reply(thread.as_ref())
} {
let reply = Either::Right(BR_DEAD_REPLY);
if !transaction.from.deliver_single_reply(reply, &transaction) {
break;
}
thread = transaction.from.clone();
}
}
pub(crate) fn deliver_reply(
&self,
reply: Either<Ref<Transaction>, u32>,
transaction: &Ref<Transaction>,
) {
if self.deliver_single_reply(reply, transaction) {
transaction.from.unwind_transaction_stack();
}
}
/// Delivers a reply to the thread that started a transaction. The reply can either be a
/// reply-transaction or an error code to be delivered instead.
///
/// Returns whether the thread is dead. If it is, the caller is expected to unwind the
/// transaction stack by completing transactions for threads that are dead.
fn deliver_single_reply(
&self,
reply: Either<Ref<Transaction>, u32>,
transaction: &Ref<Transaction>,
) -> bool {
{
let mut inner = self.inner.lock();
if !inner.pop_transaction_replied(transaction) {
return false;
}
if inner.is_dead {
return true;
}
match reply {
Either::Left(work) => inner.push_work(work),
Either::Right(code) => inner.push_reply_work(code),
}
}
// Notify the thread now that we've released the inner lock.
self.work_condvar.notify_one();
false
}
/// Determines if the given transaction is the current transaction for this thread.
fn is_current_transaction(&self, transaction: &Ref<Transaction>) -> bool {
let inner = self.inner.lock();
match &inner.current_transaction {
None => false,
Some(current) => Ref::ptr_eq(current, transaction),
}
}
fn transaction<T>(self: &Ref<Self>, tr: &BinderTransactionData, inner: T)
where
T: FnOnce(&Ref<Self>, &BinderTransactionData) -> BinderResult,
{
if let Err(err) = inner(self, tr) {
self.inner.lock().push_return_work(err.reply);
}
}
fn reply_inner(self: &Ref<Self>, tr: &BinderTransactionData) -> BinderResult {
let orig = self.inner.lock().pop_transaction_to_reply(self)?;
if !orig.from.is_current_transaction(&orig) {
return Err(BinderError::new_failed());
}
// We need to complete the transaction even if we cannot complete building the reply.
(|| -> BinderResult<_> {
let completion = Ref::try_new(DeliverCode::new(BR_TRANSACTION_COMPLETE))?;
let process = orig.from.process.clone();
let allow_fds = orig.flags & TF_ACCEPT_FDS != 0;
let reply = Transaction::new_reply(self, process, tr, allow_fds)?;
self.inner.lock().push_work(completion);
orig.from.deliver_reply(Either::Left(reply), &orig);
Ok(())
})()
.map_err(|mut err| {
// At this point we only return `BR_TRANSACTION_COMPLETE` to the caller, and we must let
// the sender know that the transaction has completed (with an error in this case).
let reply = Either::Right(BR_FAILED_REPLY);
orig.from.deliver_reply(reply, &orig);
err.reply = BR_TRANSACTION_COMPLETE;
err
})
}
/// Determines the current top of the transaction stack. It fails if the top is in another
/// thread (i.e., this thread belongs to a stack but it has called another thread). The top is
/// [`None`] if the thread is not currently participating in a transaction stack.
fn top_of_transaction_stack(&self) -> Result<Option<Ref<Transaction>>> {
let inner = self.inner.lock();
Ok(if let Some(cur) = &inner.current_transaction {
if core::ptr::eq(self, cur.from.as_ref()) {
return Err(EINVAL);
}
Some(cur.clone())
} else {
None
})
}
fn oneway_transaction_inner(self: &Ref<Self>, tr: &BinderTransactionData) -> BinderResult {
let handle = unsafe { tr.target.handle };
let node_ref = self.process.get_transaction_node(handle)?;
security::binder_transaction(&self.process.cred, &node_ref.node.owner.cred)?;
let completion = Ref::try_new(DeliverCode::new(BR_TRANSACTION_COMPLETE))?;
let transaction = Transaction::new(node_ref, None, self, tr)?;
self.inner.lock().push_work(completion);
// TODO: Remove the completion on error?
transaction.submit()?;
Ok(())
}
fn transaction_inner(self: &Ref<Self>, tr: &BinderTransactionData) -> BinderResult {
let handle = unsafe { tr.target.handle };
let node_ref = self.process.get_transaction_node(handle)?;
security::binder_transaction(&self.process.cred, &node_ref.node.owner.cred)?;
// TODO: We need to ensure that there isn't a pending transaction in the work queue. How
// could this happen?
let top = self.top_of_transaction_stack()?;
let completion = Ref::try_new(DeliverCode::new(BR_TRANSACTION_COMPLETE))?;
let transaction = Transaction::new(node_ref, top, self, tr)?;
// Check that the transaction stack hasn't changed while the lock was released, then update
// it with the new transaction.
{
let mut inner = self.inner.lock();
if !transaction.is_stacked_on(&inner.current_transaction) {
return Err(BinderError::new_failed());
}
inner.current_transaction = Some(transaction.clone());
}
// We push the completion as a deferred work so that we wait for the reply before returning
// to userland.
self.push_work_deferred(completion);
// TODO: Remove completion if submission fails?
transaction.submit()?;
Ok(())
}
fn write(self: &Ref<Self>, req: &mut BinderWriteRead) -> Result {
let write_start = req.write_buffer.wrapping_add(req.write_consumed);
let write_len = req.write_size - req.write_consumed;
let mut reader = unsafe { UserSlicePtr::new(write_start as _, write_len as _).reader() };
while reader.len() >= size_of::<u32>() && self.inner.lock().return_work.is_some() {
let before = reader.len();
match reader.read::<u32>()? {
BC_TRANSACTION => {
let tr = reader.read::<BinderTransactionData>()?;
if tr.flags & TF_ONE_WAY != 0 {
self.transaction(&tr, Self::oneway_transaction_inner)
} else {
self.transaction(&tr, Self::transaction_inner)
}
}
BC_REPLY => self.transaction(&reader.read()?, Self::reply_inner),
BC_FREE_BUFFER => drop(self.process.buffer_get(reader.read()?)),
BC_INCREFS => self.process.update_ref(reader.read()?, true, false)?,
BC_ACQUIRE => self.process.update_ref(reader.read()?, true, true)?,
BC_RELEASE => self.process.update_ref(reader.read()?, false, true)?,
BC_DECREFS => self.process.update_ref(reader.read()?, false, false)?,
BC_INCREFS_DONE => self.process.inc_ref_done(&mut reader, false)?,
BC_ACQUIRE_DONE => self.process.inc_ref_done(&mut reader, true)?,
BC_REQUEST_DEATH_NOTIFICATION => self.process.request_death(&mut reader, self)?,
BC_CLEAR_DEATH_NOTIFICATION => self.process.clear_death(&mut reader, self)?,
BC_DEAD_BINDER_DONE => self.process.dead_binder_done(reader.read()?, self),
BC_REGISTER_LOOPER => {
let valid = self.process.register_thread();
self.inner.lock().looper_register(valid);
}
BC_ENTER_LOOPER => self.inner.lock().looper_enter(),
BC_EXIT_LOOPER => self.inner.lock().looper_exit(),
// TODO: Add support for BC_TRANSACTION_SG and BC_REPLY_SG.
// BC_ATTEMPT_ACQUIRE and BC_ACQUIRE_RESULT are no longer supported.
_ => return Err(EINVAL),
}
// Update the number of write bytes consumed.
req.write_consumed += (before - reader.len()) as u64;
}
Ok(())
}
fn read(self: &Ref<Self>, req: &mut BinderWriteRead, wait: bool) -> Result {
let read_start = req.read_buffer.wrapping_add(req.read_consumed);
let read_len = req.read_size - req.read_consumed;
let mut writer = unsafe { UserSlicePtr::new(read_start as _, read_len as _) }.writer();
let (in_pool, getter) = {
let inner = self.inner.lock();
(
inner.is_looper(),
if inner.should_use_process_work_queue() {
Self::get_work
} else {
Self::get_work_local
},
)
};
// Reserve some room at the beginning of the read buffer so that we can send a
// BR_SPAWN_LOOPER if we need to.
if req.read_consumed == 0 {
writer.write(&BR_NOOP)?;
}
// Loop doing work while there is room in the buffer.
let initial_len = writer.len();
while writer.len() >= size_of::<u32>() {
match getter(self, wait && initial_len == writer.len()) {
Ok(work) => {
if !work.do_work(self, &mut writer)? {
break;
}
}
Err(err) => {
// Propagate the error if we haven't written anything else.
if initial_len == writer.len() {
return Err(err);
} else {
break;
}
}
}
}
req.read_consumed += read_len - writer.len() as u64;
// Write BR_SPAWN_LOOPER if the process needs more threads for its pool.
if in_pool && self.process.needs_thread() {
let mut writer =
unsafe { UserSlicePtr::new(req.read_buffer as _, req.read_size as _) }.writer();
writer.write(&BR_SPAWN_LOOPER)?;
}
Ok(())
}
pub(crate) fn write_read(self: &Ref<Self>, data: UserSlicePtr, wait: bool) -> Result {
let (mut reader, mut writer) = data.reader_writer();
let mut req = reader.read::<BinderWriteRead>()?;
// TODO: `write(&req)` happens in all exit paths from here on. Find a better way to encode
// it.
// Go through the write buffer.
if req.write_size > 0 {
if let Err(err) = self.write(&mut req) {
req.read_consumed = 0;
writer.write(&req)?;
return Err(err);
}
}
// Go through the work queue.
let mut ret = Ok(());
if req.read_size > 0 {
ret = self.read(&mut req, wait);
}
// Write the request back so that the consumed fields are visible to the caller.
writer.write(&req)?;
ret
}
pub(crate) fn poll(&self, file: &File, table: &PollTable) -> (bool, u32) {
// SAFETY: `free_waiters` is called on release.
unsafe { table.register_wait(file, &self.work_condvar) };
let mut inner = self.inner.lock();
(inner.should_use_process_work_queue(), inner.poll())
}
pub(crate) fn notify_if_poll_ready(&self) {
// Determine if we need to notify. This requires the lock.
let inner = self.inner.lock();
let notify = inner.looper_flags & LOOPER_POLL != 0
&& inner.should_use_process_work_queue()
&& !inner.has_work();
drop(inner);
// Now that the lock is no longer held, notify the waiters if we have to.
if notify {
self.work_condvar.notify_one();
}
}
pub(crate) fn push_return_work(&self, code: u32) {
self.inner.lock().push_return_work(code)
}
pub(crate) fn release(self: &Ref<Self>) {
// Mark the thread as dead.
self.inner.lock().is_dead = true;
// Cancel all pending work items.
while let Ok(work) = self.get_work_local(false) {
work.cancel();
}
// Complete the transaction stack as far as we can.
self.unwind_transaction_stack();
// Remove epoll items if polling was ever used on the thread.
let poller = self.inner.lock().looper_flags & LOOPER_POLL != 0;
if poller {
self.work_condvar.free_waiters();
unsafe { bindings::synchronize_rcu() };
}
}
}
impl GetLinks for Thread {
type EntryType = Thread;
fn get_links(data: &Thread) -> &Links<Thread> {
&data.links
}
}
struct ThreadError {
error_code: AtomicU32,
return_fn: fn(&mut InnerThread, Ref<ThreadError>),
links: Links<dyn DeliverToRead>,
}
impl ThreadError {
fn new(return_fn: fn(&mut InnerThread, Ref<ThreadError>)) -> Self {
Self {
error_code: AtomicU32::new(BR_OK),
return_fn,
links: Links::new(),
}
}
}
impl DeliverToRead for ThreadError {
fn do_work(self: Ref<Self>, thread: &Thread, writer: &mut UserSlicePtrWriter) -> Result<bool> {
// See `ThreadInner::push_existing_work` for the reason why `error_code` is up to date even
// though we use relaxed semantics.
let code = self.error_code.load(Ordering::Relaxed);
// Return the `ThreadError` to the thread.
(self.return_fn)(&mut *thread.inner.lock(), self);
// Deliver the error code to userspace.
writer.write(&code)?;
Ok(true)
}
fn get_links(&self) -> &Links<dyn DeliverToRead> {
&self.links
}
}

View File

@ -0,0 +1,326 @@
// SPDX-License-Identifier: GPL-2.0
use core::sync::atomic::{AtomicBool, Ordering};
use kernel::{
bindings,
file::{File, FileDescriptorReservation},
io_buffer::IoBufferWriter,
linked_list::List,
linked_list::{GetLinks, Links},
prelude::*,
sync::{Ref, SpinLock, UniqueRef},
user_ptr::UserSlicePtrWriter,
ScopeGuard,
};
use crate::{
defs::*,
node::NodeRef,
process::Process,
ptr_align,
thread::{BinderResult, Thread},
DeliverToRead, Either,
};
struct TransactionInner {
file_list: List<Box<FileInfo>>,
}
pub(crate) struct Transaction {
inner: SpinLock<TransactionInner>,
// TODO: Node should be released when the buffer is released.
node_ref: Option<NodeRef>,
stack_next: Option<Ref<Transaction>>,
pub(crate) from: Ref<Thread>,
to: Ref<Process>,
free_allocation: AtomicBool,
code: u32,
pub(crate) flags: u32,
data_size: usize,
offsets_size: usize,
data_address: usize,
links: Links<dyn DeliverToRead>,
}
impl Transaction {
pub(crate) fn new(
node_ref: NodeRef,
stack_next: Option<Ref<Transaction>>,
from: &Ref<Thread>,
tr: &BinderTransactionData,
) -> BinderResult<Ref<Self>> {
let allow_fds = node_ref.node.flags & FLAT_BINDER_FLAG_ACCEPTS_FDS != 0;
let to = node_ref.node.owner.clone();
let mut alloc = from.copy_transaction_data(&to, tr, allow_fds)?;
let data_address = alloc.ptr;
let file_list = alloc.take_file_list();
alloc.keep_alive();
let mut tr = Pin::from(UniqueRef::try_new(Self {
// SAFETY: `spinlock_init` is called below.
inner: unsafe { SpinLock::new(TransactionInner { file_list }) },
node_ref: Some(node_ref),
stack_next,
from: from.clone(),
to,
code: tr.code,
flags: tr.flags,
data_size: tr.data_size as _,
data_address,
offsets_size: tr.offsets_size as _,
links: Links::new(),
free_allocation: AtomicBool::new(true),
})?);
// SAFETY: `inner` is pinned when `tr` is.
let pinned = unsafe { tr.as_mut().map_unchecked_mut(|t| &mut t.inner) };
kernel::spinlock_init!(pinned, "Transaction::inner");
Ok(tr.into())
}
pub(crate) fn new_reply(
from: &Ref<Thread>,
to: Ref<Process>,
tr: &BinderTransactionData,
allow_fds: bool,
) -> BinderResult<Ref<Self>> {
let mut alloc = from.copy_transaction_data(&to, tr, allow_fds)?;
let data_address = alloc.ptr;
let file_list = alloc.take_file_list();
alloc.keep_alive();
let mut tr = Pin::from(UniqueRef::try_new(Self {
// SAFETY: `spinlock_init` is called below.
inner: unsafe { SpinLock::new(TransactionInner { file_list }) },
node_ref: None,
stack_next: None,
from: from.clone(),
to,
code: tr.code,
flags: tr.flags,
data_size: tr.data_size as _,
data_address,
offsets_size: tr.offsets_size as _,
links: Links::new(),
free_allocation: AtomicBool::new(true),
})?);
// SAFETY: `inner` is pinned when `tr` is.
let pinned = unsafe { tr.as_mut().map_unchecked_mut(|t| &mut t.inner) };
kernel::spinlock_init!(pinned, "Transaction::inner");
Ok(tr.into())
}
/// Determines if the transaction is stacked on top of the given transaction.
pub(crate) fn is_stacked_on(&self, onext: &Option<Ref<Self>>) -> bool {
match (&self.stack_next, onext) {
(None, None) => true,
(Some(stack_next), Some(next)) => Ref::ptr_eq(stack_next, next),
_ => false,
}
}
/// Returns a pointer to the next transaction on the transaction stack, if there is one.
pub(crate) fn clone_next(&self) -> Option<Ref<Self>> {
let next = self.stack_next.as_ref()?;
Some(next.clone())
}
/// Searches in the transaction stack for a thread that belongs to the target process. This is
/// useful when finding a target for a new transaction: if the node belongs to a process that
/// is already part of the transaction stack, we reuse the thread.
fn find_target_thread(&self) -> Option<Ref<Thread>> {
let process = &self.node_ref.as_ref()?.node.owner;
let mut it = &self.stack_next;
while let Some(transaction) = it {
if Ref::ptr_eq(&transaction.from.process, process) {
return Some(transaction.from.clone());
}
it = &transaction.stack_next;
}
None
}
/// Searches in the transaction stack for a transaction originating at the given thread.
pub(crate) fn find_from(&self, thread: &Thread) -> Option<Ref<Transaction>> {
let mut it = &self.stack_next;
while let Some(transaction) = it {
if core::ptr::eq(thread, transaction.from.as_ref()) {
return Some(transaction.clone());
}
it = &transaction.stack_next;
}
None
}
/// Submits the transaction to a work queue. Use a thread if there is one in the transaction
/// stack, otherwise use the destination process.
pub(crate) fn submit(self: Ref<Self>) -> BinderResult {
if let Some(thread) = self.find_target_thread() {
thread.push_work(self)
} else {
let process = self.to.clone();
process.push_work(self)
}
}
/// Prepares the file list for delivery to the caller.
fn prepare_file_list(&self) -> Result<List<Box<FileInfo>>> {
// Get list of files that are being transferred as part of the transaction.
let mut file_list = core::mem::replace(&mut self.inner.lock().file_list, List::new());
// If the list is non-empty, prepare the buffer.
if !file_list.is_empty() {
let alloc = self.to.buffer_get(self.data_address).ok_or(ESRCH)?;
let cleanup = ScopeGuard::new(|| {
self.free_allocation.store(false, Ordering::Relaxed);
});
let mut it = file_list.cursor_front_mut();
while let Some(file_info) = it.current() {
let reservation = FileDescriptorReservation::new(bindings::O_CLOEXEC)?;
alloc.write(file_info.buffer_offset, &reservation.reserved_fd())?;
file_info.reservation = Some(reservation);
it.move_next();
}
alloc.keep_alive();
cleanup.dismiss();
}
Ok(file_list)
}
}
impl DeliverToRead for Transaction {
fn do_work(self: Ref<Self>, thread: &Thread, writer: &mut UserSlicePtrWriter) -> Result<bool> {
/* TODO: Initialise the following fields from tr:
pub sender_pid: pid_t,
pub sender_euid: uid_t,
*/
let send_failed_reply = ScopeGuard::new(|| {
if self.node_ref.is_some() && self.flags & TF_ONE_WAY == 0 {
let reply = Either::Right(BR_FAILED_REPLY);
self.from.deliver_reply(reply, &self);
}
});
let mut file_list = if let Ok(list) = self.prepare_file_list() {
list
} else {
// On failure to process the list, we send a reply back to the sender and ignore the
// transaction on the recipient.
return Ok(true);
};
let mut tr = BinderTransactionData::default();
if let Some(nref) = &self.node_ref {
let (ptr, cookie) = nref.node.get_id();
tr.target.ptr = ptr as _;
tr.cookie = cookie as _;
};
tr.code = self.code;
tr.flags = self.flags;
tr.data_size = self.data_size as _;
tr.data.ptr.buffer = self.data_address as _;
tr.offsets_size = self.offsets_size as _;
if tr.offsets_size > 0 {
tr.data.ptr.offsets = (self.data_address + ptr_align(self.data_size)) as _;
}
let code = if self.node_ref.is_none() {
BR_REPLY
} else {
BR_TRANSACTION
};
// Write the transaction code and data to the user buffer.
writer.write(&code)?;
writer.write(&tr)?;
// Dismiss the completion of transaction with a failure. No failure paths are allowed from
// here on out.
send_failed_reply.dismiss();
// Commit all files.
{
let mut it = file_list.cursor_front_mut();
while let Some(file_info) = it.current() {
if let Some(reservation) = file_info.reservation.take() {
if let Some(file) = file_info.file.take() {
reservation.commit(file);
}
}
it.move_next();
}
}
// When `drop` is called, we don't want the allocation to be freed because it is now the
// user's reponsibility to free it.
//
// `drop` is guaranteed to see this relaxed store because `Ref` guarantess that everything
// that happens when an object is referenced happens-before the eventual `drop`.
self.free_allocation.store(false, Ordering::Relaxed);
// When this is not a reply and not an async transaction, update `current_transaction`. If
// it's a reply, `current_transaction` has already been updated appropriately.
if self.node_ref.is_some() && tr.flags & TF_ONE_WAY == 0 {
thread.set_current_transaction(self);
}
Ok(false)
}
fn cancel(self: Ref<Self>) {
let reply = Either::Right(BR_DEAD_REPLY);
self.from.deliver_reply(reply, &self);
}
fn get_links(&self) -> &Links<dyn DeliverToRead> {
&self.links
}
}
impl Drop for Transaction {
fn drop(&mut self) {
if self.free_allocation.load(Ordering::Relaxed) {
self.to.buffer_get(self.data_address);
}
}
}
pub(crate) struct FileInfo {
links: Links<FileInfo>,
/// The file for which a descriptor will be created in the recipient process.
file: Option<ARef<File>>,
/// The file descriptor reservation on the recipient process.
reservation: Option<FileDescriptorReservation>,
/// The offset in the buffer where the file descriptor is stored.
buffer_offset: usize,
}
impl FileInfo {
pub(crate) fn new(file: ARef<File>, buffer_offset: usize) -> Self {
Self {
file: Some(file),
reservation: None,
buffer_offset,
links: Links::new(),
}
}
}
impl GetLinks for FileInfo {
type EntryType = Self;
fn get_links(data: &Self::EntryType) -> &Links<Self::EntryType> {
&data.links
}
}

View File

@ -482,6 +482,14 @@ config GPIO_PL061
help
Say yes here to support the PrimeCell PL061 GPIO device.
config GPIO_PL061_RUST
tristate "PrimeCell PL061 GPIO support written in Rust"
depends on ARM_AMBA && RUST
select IRQ_DOMAIN
select GPIOLIB_IRQCHIP
help
Say yes here to support the PrimeCell PL061 GPIO device
config GPIO_PMIC_EIC_SPRD
tristate "Spreadtrum PMIC EIC support"
depends on MFD_SC27XX_PMIC || COMPILE_TEST

View File

@ -118,6 +118,7 @@ obj-$(CONFIG_GPIO_PCIE_IDIO_24) += gpio-pcie-idio-24.o
obj-$(CONFIG_GPIO_PCI_IDIO_16) += gpio-pci-idio-16.o
obj-$(CONFIG_GPIO_PISOSR) += gpio-pisosr.o
obj-$(CONFIG_GPIO_PL061) += gpio-pl061.o
obj-$(CONFIG_GPIO_PL061_RUST) += gpio_pl061_rust.o
obj-$(CONFIG_GPIO_PMIC_EIC_SPRD) += gpio-pmic-eic-sprd.o
obj-$(CONFIG_GPIO_PXA) += gpio-pxa.o
obj-$(CONFIG_GPIO_RASPBERRYPI_EXP) += gpio-raspberrypi-exp.o

View File

@ -0,0 +1,370 @@
// SPDX-License-Identifier: GPL-2.0
//! Driver for the ARM PrimeCell(tm) General Purpose Input/Output (PL061).
//!
//! Based on the C driver written by Baruch Siach <baruch@tkos.co.il>.
use kernel::{
amba, bit, bits_iter, define_amba_id_table, device, gpio,
io_mem::IoMem,
irq::{self, ExtraResult, IrqData, LockedIrqData},
power,
prelude::*,
sync::{RawSpinLock, Ref, RefBorrow},
};
const GPIODIR: usize = 0x400;
const GPIOIS: usize = 0x404;
const GPIOIBE: usize = 0x408;
const GPIOIEV: usize = 0x40C;
const GPIOIE: usize = 0x410;
const GPIOMIS: usize = 0x418;
const GPIOIC: usize = 0x41C;
const GPIO_SIZE: usize = 0x1000;
const PL061_GPIO_NR: u16 = 8;
#[derive(Default)]
struct ContextSaveRegs {
gpio_data: u8,
gpio_dir: u8,
gpio_is: u8,
gpio_ibe: u8,
gpio_iev: u8,
gpio_ie: u8,
}
#[derive(Default)]
struct PL061DataInner {
csave_regs: ContextSaveRegs,
}
struct PL061Data {
dev: device::Device,
inner: RawSpinLock<PL061DataInner>,
}
struct PL061Resources {
base: IoMem<GPIO_SIZE>,
parent_irq: u32,
}
type PL061Registrations = gpio::RegistrationWithIrqChip<PL061Device>;
type DeviceData = device::Data<PL061Registrations, PL061Resources, PL061Data>;
struct PL061Device;
impl gpio::Chip for PL061Device {
type Data = Ref<DeviceData>;
kernel::declare_gpio_chip_operations!(
get_direction,
direction_input,
direction_output,
get,
set
);
fn get_direction(data: RefBorrow<'_, DeviceData>, offset: u32) -> Result<gpio::LineDirection> {
let pl061 = data.resources().ok_or(ENXIO)?;
Ok(if pl061.base.readb(GPIODIR) & bit(offset) != 0 {
gpio::LineDirection::Out
} else {
gpio::LineDirection::In
})
}
fn direction_input(data: RefBorrow<'_, DeviceData>, offset: u32) -> Result {
let _guard = data.inner.lock_irqdisable();
let pl061 = data.resources().ok_or(ENXIO)?;
let mut gpiodir = pl061.base.readb(GPIODIR);
gpiodir &= !bit(offset);
pl061.base.writeb(gpiodir, GPIODIR);
Ok(())
}
fn direction_output(data: RefBorrow<'_, DeviceData>, offset: u32, value: bool) -> Result {
let woffset = bit(offset + 2).into();
let _guard = data.inner.lock_irqdisable();
let pl061 = data.resources().ok_or(ENXIO)?;
pl061.base.try_writeb((value as u8) << offset, woffset)?;
let mut gpiodir = pl061.base.readb(GPIODIR);
gpiodir |= bit(offset);
pl061.base.writeb(gpiodir, GPIODIR);
// gpio value is set again, because pl061 doesn't allow to set value of a gpio pin before
// configuring it in OUT mode.
pl061.base.try_writeb((value as u8) << offset, woffset)?;
Ok(())
}
fn get(data: RefBorrow<'_, DeviceData>, offset: u32) -> Result<bool> {
let pl061 = data.resources().ok_or(ENXIO)?;
Ok(pl061.base.try_readb(bit(offset + 2).into())? != 0)
}
fn set(data: RefBorrow<'_, DeviceData>, offset: u32, value: bool) {
if let Some(pl061) = data.resources() {
let woffset = bit(offset + 2).into();
let _ = pl061.base.try_writeb((value as u8) << offset, woffset);
}
}
}
impl gpio::ChipWithIrqChip for PL061Device {
fn handle_irq_flow(
data: RefBorrow<'_, DeviceData>,
desc: &irq::Descriptor,
domain: &irq::Domain,
) {
let chained = desc.enter_chained();
if let Some(pl061) = data.resources() {
let pending = pl061.base.readb(GPIOMIS);
for offset in bits_iter(pending) {
domain.generic_handle_chained(offset, &chained);
}
}
}
}
impl irq::Chip for PL061Device {
type Data = Ref<DeviceData>;
kernel::declare_irq_chip_operations!(set_type, set_wake);
fn set_type(
data: RefBorrow<'_, DeviceData>,
irq_data: &mut LockedIrqData,
trigger: u32,
) -> Result<ExtraResult> {
let offset = irq_data.hwirq();
let bit = bit(offset);
if offset >= PL061_GPIO_NR.into() {
return Err(EINVAL);
}
if trigger & (irq::Type::LEVEL_HIGH | irq::Type::LEVEL_LOW) != 0
&& trigger & (irq::Type::EDGE_RISING | irq::Type::EDGE_FALLING) != 0
{
dev_err!(
data.dev,
"trying to configure line {} for both level and edge detection, choose one!\n",
offset
);
return Err(EINVAL);
}
let _guard = data.inner.lock_irqdisable();
let pl061 = data.resources().ok_or(ENXIO)?;
let mut gpioiev = pl061.base.readb(GPIOIEV);
let mut gpiois = pl061.base.readb(GPIOIS);
let mut gpioibe = pl061.base.readb(GPIOIBE);
if trigger & (irq::Type::LEVEL_HIGH | irq::Type::LEVEL_LOW) != 0 {
let polarity = trigger & irq::Type::LEVEL_HIGH != 0;
// Disable edge detection.
gpioibe &= !bit;
// Enable level detection.
gpiois |= bit;
// Select polarity.
if polarity {
gpioiev |= bit;
} else {
gpioiev &= !bit;
}
irq_data.set_level_handler();
dev_dbg!(
data.dev,
"line {}: IRQ on {} level\n",
offset,
if polarity { "HIGH" } else { "LOW" }
);
} else if (trigger & irq::Type::EDGE_BOTH) == irq::Type::EDGE_BOTH {
// Disable level detection.
gpiois &= !bit;
// Select both edges, settings this makes GPIOEV be ignored.
gpioibe |= bit;
irq_data.set_edge_handler();
dev_dbg!(data.dev, "line {}: IRQ on both edges\n", offset);
} else if trigger & (irq::Type::EDGE_RISING | irq::Type::EDGE_FALLING) != 0 {
let rising = trigger & irq::Type::EDGE_RISING != 0;
// Disable level detection.
gpiois &= !bit;
// Clear detection on both edges.
gpioibe &= !bit;
// Select edge.
if rising {
gpioiev |= bit;
} else {
gpioiev &= !bit;
}
irq_data.set_edge_handler();
dev_dbg!(
data.dev,
"line {}: IRQ on {} edge\n",
offset,
if rising { "RISING" } else { "FALLING}" }
);
} else {
// No trigger: disable everything.
gpiois &= !bit;
gpioibe &= !bit;
gpioiev &= !bit;
irq_data.set_bad_handler();
dev_warn!(data.dev, "no trigger selected for line {}\n", offset);
}
pl061.base.writeb(gpiois, GPIOIS);
pl061.base.writeb(gpioibe, GPIOIBE);
pl061.base.writeb(gpioiev, GPIOIEV);
Ok(ExtraResult::None)
}
fn mask(data: RefBorrow<'_, DeviceData>, irq_data: &IrqData) {
let mask = bit(irq_data.hwirq() % irq::HwNumber::from(PL061_GPIO_NR));
let _guard = data.inner.lock();
if let Some(pl061) = data.resources() {
let gpioie = pl061.base.readb(GPIOIE) & !mask;
pl061.base.writeb(gpioie, GPIOIE);
}
}
fn unmask(data: RefBorrow<'_, DeviceData>, irq_data: &IrqData) {
let mask = bit(irq_data.hwirq() % irq::HwNumber::from(PL061_GPIO_NR));
let _guard = data.inner.lock();
if let Some(pl061) = data.resources() {
let gpioie = pl061.base.readb(GPIOIE) | mask;
pl061.base.writeb(gpioie, GPIOIE);
}
}
// This gets called from the edge IRQ handler to ACK the edge IRQ in the GPIOIC
// (interrupt-clear) register. For level IRQs this is not needed: these go away when the level
// signal goes away.
fn ack(data: RefBorrow<'_, DeviceData>, irq_data: &IrqData) {
let mask = bit(irq_data.hwirq() % irq::HwNumber::from(PL061_GPIO_NR));
let _guard = data.inner.lock();
if let Some(pl061) = data.resources() {
pl061.base.writeb(mask.into(), GPIOIC);
}
}
fn set_wake(data: RefBorrow<'_, DeviceData>, _irq_data: &IrqData, on: bool) -> Result {
let pl061 = data.resources().ok_or(ENXIO)?;
irq::set_wake(pl061.parent_irq, on)
}
}
impl amba::Driver for PL061Device {
type Data = Ref<DeviceData>;
type PowerOps = Self;
define_amba_id_table! {(), [
({id: 0x00041061, mask: 0x000fffff}, None),
]}
fn probe(dev: &mut amba::Device, _data: Option<&Self::IdInfo>) -> Result<Ref<DeviceData>> {
let res = dev.take_resource().ok_or(ENXIO)?;
let irq = dev.irq(0).ok_or(ENXIO)?;
let mut data = kernel::new_device_data!(
gpio::RegistrationWithIrqChip::new(),
PL061Resources {
// SAFETY: This device doesn't support DMA.
base: unsafe { IoMem::try_new(res)? },
parent_irq: irq,
},
PL061Data {
dev: device::Device::from_dev(dev),
// SAFETY: We call `rawspinlock_init` below.
inner: unsafe { RawSpinLock::new(PL061DataInner::default()) },
},
"PL061::Registrations"
)?;
// SAFETY: General part of the data is pinned when `data` is.
let gen_inner = unsafe { data.as_mut().map_unchecked_mut(|d| &mut (**d).inner) };
kernel::rawspinlock_init!(gen_inner, "PL061Data::inner");
let data = Ref::<DeviceData>::from(data);
data.resources().ok_or(ENXIO)?.base.writeb(0, GPIOIE); // disable irqs
data.registrations()
.ok_or(ENXIO)?
.as_pinned_mut()
.register::<Self>(PL061_GPIO_NR, None, dev, data.clone(), irq)?;
dev_info!(data.dev, "PL061 GPIO chip registered\n");
Ok(data)
}
}
impl power::Operations for PL061Device {
type Data = Ref<DeviceData>;
fn suspend(data: RefBorrow<'_, DeviceData>) -> Result {
let mut inner = data.inner.lock();
let pl061 = data.resources().ok_or(ENXIO)?;
inner.csave_regs.gpio_data = 0;
inner.csave_regs.gpio_dir = pl061.base.readb(GPIODIR);
inner.csave_regs.gpio_is = pl061.base.readb(GPIOIS);
inner.csave_regs.gpio_ibe = pl061.base.readb(GPIOIBE);
inner.csave_regs.gpio_iev = pl061.base.readb(GPIOIEV);
inner.csave_regs.gpio_ie = pl061.base.readb(GPIOIE);
for offset in 0..PL061_GPIO_NR {
if inner.csave_regs.gpio_dir & bit(offset) != 0 {
if let Ok(v) = <Self as gpio::Chip>::get(data, offset.into()) {
inner.csave_regs.gpio_data |= (v as u8) << offset;
}
}
}
Ok(())
}
fn resume(data: RefBorrow<'_, DeviceData>) -> Result {
let inner = data.inner.lock();
let pl061 = data.resources().ok_or(ENXIO)?;
for offset in 0..PL061_GPIO_NR {
if inner.csave_regs.gpio_dir & bit(offset) != 0 {
let value = inner.csave_regs.gpio_data & bit(offset) != 0;
let _ = <Self as gpio::Chip>::direction_output(data, offset.into(), value);
} else {
let _ = <Self as gpio::Chip>::direction_input(data, offset.into());
}
}
pl061.base.writeb(inner.csave_regs.gpio_is, GPIOIS);
pl061.base.writeb(inner.csave_regs.gpio_ibe, GPIOIBE);
pl061.base.writeb(inner.csave_regs.gpio_iev, GPIOIEV);
pl061.base.writeb(inner.csave_regs.gpio_ie, GPIOIE);
Ok(())
}
fn freeze(data: RefBorrow<'_, DeviceData>) -> Result {
Self::suspend(data)
}
fn restore(data: RefBorrow<'_, DeviceData>) -> Result {
Self::resume(data)
}
}
module_amba_driver! {
type: PL061Device,
name: b"pl061_gpio",
author: b"Wedson Almeida Filho",
license: b"GPL",
}

View File

@ -15,7 +15,7 @@
#include <asm/sections.h>
#define KSYM_NAME_LEN 128
#define KSYM_NAME_LEN 512
#define KSYM_SYMBOL_LEN (sizeof("%s+%#lx/%#lx [%s %s]") + \
(KSYM_NAME_LEN - 1) + \
2*(BITS_PER_LONG*3/10) + (MODULE_NAME_LEN - 1) + \

View File

@ -99,11 +99,17 @@
extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
struct lock_class_key *key, short inner);
static inline void _raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
struct lock_class_key *key)
{
__raw_spin_lock_init(lock, name, key, LD_WAIT_SPIN);
}
# define raw_spin_lock_init(lock) \
do { \
static struct lock_class_key __key; \
\
__raw_spin_lock_init((lock), #lock, &__key, LD_WAIT_SPIN); \
_raw_spin_lock_init((lock), #lock, &__key); \
} while (0)
#else
@ -326,12 +332,17 @@ static __always_inline raw_spinlock_t *spinlock_check(spinlock_t *lock)
#ifdef CONFIG_DEBUG_SPINLOCK
# define spin_lock_init(lock) \
do { \
static struct lock_class_key __key; \
\
__raw_spin_lock_init(spinlock_check(lock), \
#lock, &__key, LD_WAIT_CONFIG); \
static inline void __spin_lock_init(spinlock_t *lock, const char *name,
struct lock_class_key *key)
{
__raw_spin_lock_init(spinlock_check(lock), name, key, LD_WAIT_CONFIG);
}
# define spin_lock_init(lock) \
do { \
static struct lock_class_key __key; \
\
__spin_lock_init(lock, #lock, &__key); \
} while (0)
#else

View File

@ -251,20 +251,22 @@ struct binder_extended_error {
__s32 param;
};
#define BINDER_WRITE_READ _IOWR('b', 1, struct binder_write_read)
#define BINDER_SET_IDLE_TIMEOUT _IOW('b', 3, __s64)
#define BINDER_SET_MAX_THREADS _IOW('b', 5, __u32)
#define BINDER_SET_IDLE_PRIORITY _IOW('b', 6, __s32)
#define BINDER_SET_CONTEXT_MGR _IOW('b', 7, __s32)
#define BINDER_THREAD_EXIT _IOW('b', 8, __s32)
#define BINDER_VERSION _IOWR('b', 9, struct binder_version)
#define BINDER_GET_NODE_DEBUG_INFO _IOWR('b', 11, struct binder_node_debug_info)
#define BINDER_GET_NODE_INFO_FOR_REF _IOWR('b', 12, struct binder_node_info_for_ref)
#define BINDER_SET_CONTEXT_MGR_EXT _IOW('b', 13, struct flat_binder_object)
#define BINDER_FREEZE _IOW('b', 14, struct binder_freeze_info)
#define BINDER_GET_FROZEN_INFO _IOWR('b', 15, struct binder_frozen_status_info)
#define BINDER_ENABLE_ONEWAY_SPAM_DETECTION _IOW('b', 16, __u32)
#define BINDER_GET_EXTENDED_ERROR _IOWR('b', 17, struct binder_extended_error)
enum {
BINDER_WRITE_READ = _IOWR('b', 1, struct binder_write_read),
BINDER_SET_IDLE_TIMEOUT = _IOW('b', 3, __s64),
BINDER_SET_MAX_THREADS = _IOW('b', 5, __u32),
BINDER_SET_IDLE_PRIORITY = _IOW('b', 6, __s32),
BINDER_SET_CONTEXT_MGR = _IOW('b', 7, __s32),
BINDER_THREAD_EXIT = _IOW('b', 8, __s32),
BINDER_VERSION = _IOWR('b', 9, struct binder_version),
BINDER_GET_NODE_DEBUG_INFO = _IOWR('b', 11, struct binder_node_debug_info),
BINDER_GET_NODE_INFO_FOR_REF = _IOWR('b', 12, struct binder_node_info_for_ref),
BINDER_SET_CONTEXT_MGR_EXT = _IOW('b', 13, struct flat_binder_object),
BINDER_FREEZE = _IOW('b', 14, struct binder_freeze_info),
BINDER_GET_FROZEN_INFO = _IOWR('b', 15, struct binder_frozen_status_info),
BINDER_ENABLE_ONEWAY_SPAM_DETECTION = _IOW('b', 16, __u32),
BINDER_GET_EXTENDED_ERROR = _IOWR('b', 17, struct binder_extended_error),
};
/*
* NOTE: Two special error codes you should check for when calling

View File

@ -60,6 +60,23 @@ config LLD_VERSION
default $(ld-version) if LD_IS_LLD
default 0
config RUST_IS_AVAILABLE
# Because some common tools like 'diff' don't support permissions of
# the files, 'rust-is-available.sh' in some trees that managed with such
# tools result in having no execution permission. As a temporal work
# around, we specify the interpreter ('/bin/sh'). It will be unneeded
# once 'rust-is-available.sh' is merged in the mainline with its execution
# permission.
def_bool $(success,/bin/sh $(srctree)/scripts/rust-is-available.sh)
help
This shows whether a suitable Rust toolchain is available (found).
Please see Documentation/rust/quick-start.rst for instructions on how
to satify the build requirements of Rust support.
In particular, the Makefile target 'rustavailable' is useful to check
why the Rust toolchain is not being detected.
config CC_CAN_LINK
bool
default $(success,$(srctree)/scripts/cc-can-link.sh $(CC) $(CLANG_FLAGS) $(USERCFLAGS) $(USERLDFLAGS) $(m64-flag)) if 64BIT
@ -151,7 +168,8 @@ config WERROR
default COMPILE_TEST
help
A kernel build should not cause any compiler warnings, and this
enables the '-Werror' flag to enforce that rule by default.
enables the '-Werror' (for C) and '-Dwarnings' (for Rust) flags
to enforce that rule by default.
However, if you have a new (or very old) compiler with odd and
unusual warnings, or you have some architecture with problems,
@ -1898,6 +1916,37 @@ config PROFILING
Say Y here to enable the extended profiling support mechanisms used
by profilers.
config RUST
bool "Rust support"
depends on HAVE_RUST
depends on RUST_IS_AVAILABLE
depends on !MODVERSIONS
depends on !GCC_PLUGINS
depends on !DEBUG_INFO_BTF
select CONSTRUCTORS
help
Enables Rust support in the kernel.
This allows other Rust-related options, like drivers written in Rust,
to be selected.
It is also required to be able to load external kernel modules
written in Rust.
See Documentation/rust/ for more information.
If unsure, say N.
config RUSTC_VERSION_TEXT
string
depends on RUST
default $(shell,command -v $(RUSTC) >/dev/null 2>&1 && $(RUSTC) --version || echo n)
config BINDGEN_VERSION_TEXT
string
depends on RUST
default $(shell,command -v $(BINDGEN) >/dev/null 2>&1 && $(BINDGEN) --version || echo n)
#
# Place an empty function call at each tracepoint site. Can be
# dynamically changed for a probe function.

View File

@ -70,12 +70,20 @@ static unsigned int kallsyms_expand_symbol(unsigned int off,
data = &kallsyms_names[off];
len = *data;
data++;
off++;
/* If MSB is 1, it is a "big" symbol, so needs an additional byte. */
if ((len & 0x80) != 0) {
len = (len & 0x7F) | (*data << 7);
data++;
off++;
}
/*
* Update the offset to return the offset for the next symbol on
* the compressed stream.
*/
off += len + 1;
off += len;
/*
* For every byte on the compressed symbol data, copy the table
@ -128,7 +136,7 @@ static char kallsyms_get_symbol_type(unsigned int off)
static unsigned int get_symbol_offset(unsigned long pos)
{
const u8 *name;
int i;
int i, len;
/*
* Use the closest marker we have. We have markers every 256 positions,
@ -142,8 +150,18 @@ static unsigned int get_symbol_offset(unsigned long pos)
* so we just need to add the len to the current pointer for every
* symbol we wish to skip.
*/
for (i = 0; i < (pos & 0xFF); i++)
name = name + (*name) + 1;
for (i = 0; i < (pos & 0xFF); i++) {
len = *name;
/*
* If MSB is 1, it is a "big" symbol, so we need to look into
* the next byte (and skip it, too).
*/
if ((len & 0x80) != 0)
len = ((len & 0x7F) | (name[1] << 7)) + 1;
name = name + len + 1;
}
return name - kallsyms_names;
}

View File

@ -213,7 +213,7 @@ static int klp_resolve_symbols(Elf_Shdr *sechdrs, const char *strtab,
* we use the smallest/strictest upper bound possible (56, based on
* the current definition of MODULE_NAME_LEN) to prevent overflows.
*/
BUILD_BUG_ON(MODULE_NAME_LEN < 56 || KSYM_NAME_LEN != 128);
BUILD_BUG_ON(MODULE_NAME_LEN < 56 || KSYM_NAME_LEN != 512);
relas = (Elf_Rela *) relasec->sh_addr;
/* For each rela in this klp relocation section */
@ -227,7 +227,7 @@ static int klp_resolve_symbols(Elf_Shdr *sechdrs, const char *strtab,
/* Format: .klp.sym.sym_objname.sym_name,sympos */
cnt = sscanf(strtab + sym->st_name,
".klp.sym.%55[^.].%127[^,],%lu",
".klp.sym.%55[^.].%511[^,],%lu",
sym_objname, sym_name, &sympos);
if (cnt != 3) {
pr_err("symbol %s has an incorrectly formatted name\n",

View File

@ -2688,6 +2688,161 @@ config HYPERV_TESTING
endmenu # "Kernel Testing and Coverage"
menu "Rust hacking"
config RUST_DEBUG_ASSERTIONS
bool "Debug assertions"
depends on RUST
help
Enables rustc's `-Cdebug-assertions` codegen option.
This flag lets you turn `cfg(debug_assertions)` conditional
compilation on or off. This can be used to enable extra debugging
code in development but not in production. For example, it controls
the behavior of the standard library's `debug_assert!` macro.
Note that this will apply to all Rust code, including `core`.
If unsure, say N.
config RUST_OVERFLOW_CHECKS
bool "Overflow checks"
default y
depends on RUST
help
Enables rustc's `-Coverflow-checks` codegen option.
This flag allows you to control the behavior of runtime integer
overflow. When overflow-checks are enabled, a Rust panic will occur
on overflow.
Note that this will apply to all Rust code, including `core`.
If unsure, say Y.
choice
prompt "Optimization level"
default RUST_OPT_LEVEL_SIMILAR_AS_CHOSEN_FOR_C
depends on RUST
help
Controls rustc's `-Copt-level` codegen option.
This flag controls the optimization level.
If unsure, say "Similar as chosen for C".
config RUST_OPT_LEVEL_SIMILAR_AS_CHOSEN_FOR_C
bool "Similar as chosen for C"
help
This choice will pick a similar optimization level as chosen in
the "Compiler optimization level" for C:
-O2 is currently mapped to -Copt-level=2
-O3 is currently mapped to -Copt-level=3
-Os is currently mapped to -Copt-level=s
The mapping may change over time to follow the intended semantics
of the choice for C as sensibly as possible.
This is the default.
config RUST_OPT_LEVEL_0
bool "No optimizations (-Copt-level=0)"
help
Not recommended for most purposes. It may come in handy for debugging
suspected optimizer bugs, unexpected undefined behavior, etc.
Note that this level will *not* enable debug assertions nor overflow
checks on its own (like it happens when interacting with rustc
directly). Use the corresponding configuration options to control
that instead, orthogonally.
Note this level may cause excessive stack usage, which can lead to stack
overflow and subsequent crashes.
config RUST_OPT_LEVEL_1
bool "Basic optimizations (-Copt-level=1)"
help
Useful for debugging without getting too lost, but without
the overhead and boilerplate of no optimizations at all.
Note this level may cause excessive stack usage, which can lead to stack
overflow and subsequent crashes.
config RUST_OPT_LEVEL_2
bool "Some optimizations (-Copt-level=2)"
help
The sensible choice in most cases.
config RUST_OPT_LEVEL_3
bool "All optimizations (-Copt-level=3)"
help
Yet more performance (hopefully).
config RUST_OPT_LEVEL_S
bool "Optimize for size (-Copt-level=s)"
help
Smaller kernel, ideally without too much performance loss.
config RUST_OPT_LEVEL_Z
bool "Optimize for size, no loop vectorization (-Copt-level=z)"
help
Like the previous level, but also turn off loop vectorization.
endchoice
choice
prompt "Build-time assertions"
default RUST_BUILD_ASSERT_ALLOW if RUST_OPT_LEVEL_0
default RUST_BUILD_ASSERT_DENY if !RUST_OPT_LEVEL_0
depends on RUST
help
Controls how are `build_error!` and `build_assert!` handled during build.
If calls to them exist in the binary, it may indicate a violated invariant
or that the optimizer failed to verify the invariant during compilation.
You can choose to abort compilation or ignore them during build and let the
check be carried to runtime.
If optimizations are turned off, you cannot select "Deny".
If unsure, say "Deny".
config RUST_BUILD_ASSERT_ALLOW
bool "Allow"
help
Unoptimized calls to `build_error!` will be converted to `panic!`
and checked at runtime.
config RUST_BUILD_ASSERT_WARN
bool "Warn"
help
Unoptimized calls to `build_error!` will be converted to `panic!`
and checked at runtime, but warnings will be generated when building.
config RUST_BUILD_ASSERT_DENY
bool "Deny"
depends on !RUST_OPT_LEVEL_0
help
Unoptimized calls to `build_error!` will abort compilation.
endchoice
config RUST_KERNEL_KUNIT_TEST
bool "KUnit test for the `kernel` crate" if !KUNIT_ALL_TESTS
depends on RUST && KUNIT=y
default KUNIT_ALL_TESTS
help
This builds the documentation tests of the `kernel` crate
as KUnit tests.
For more information on KUnit and unit tests in general,
please refer to the KUnit documentation in Documentation/dev-tools/kunit/.
If unsure, say N.
endmenu # "Rust"
source "Documentation/Kconfig"
endmenu # Kernel hacking

View File

@ -2246,6 +2246,9 @@ int __init no_hash_pointers_enable(char *str)
}
early_param("no_hash_pointers", no_hash_pointers_enable);
/* Used for Rust formatting ('%pA'). */
char *rust_fmt_argument(char *buf, char *end, void *ptr);
/*
* Show a '%p' thing. A kernel extension is that the '%p' is followed
* by an extra set of alphanumeric characters that are extended format
@ -2372,6 +2375,10 @@ early_param("no_hash_pointers", no_hash_pointers_enable);
*
* Note: The default behaviour (unadorned %p) is to hash the address,
* rendering it useful as a unique identifier.
*
* There is also a '%pA' format specifier, but it is only intended to be used
* from Rust code to format core::fmt::Arguments. Do *not* use it from C.
* See rust/kernel/print.rs for details.
*/
static noinline_for_stack
char *pointer(const char *fmt, char *buf, char *end, void *ptr,
@ -2444,6 +2451,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
return device_node_string(buf, end, ptr, spec, fmt + 1);
case 'f':
return fwnode_string(buf, end, ptr, spec, fmt + 1);
case 'A':
if (!IS_ENABLED(CONFIG_RUST)) {
WARN_ONCE(1, "Please remove %%pA from non-Rust code\n");
return error_string(buf, end, "(%pA?)", spec);
}
return rust_fmt_argument(buf, end, ptr);
case 'x':
return pointer_string(buf, end, ptr, spec);
case 'e':

10
rust/.gitignore vendored Normal file
View File

@ -0,0 +1,10 @@
# SPDX-License-Identifier: GPL-2.0
target.json
bindings_generated.rs
bindings_helpers_generated.rs
exports_*_generated.h
doctests_kernel_generated.rs
doctests_kernel_generated_kunit.c
doc/
test/

398
rust/Makefile Normal file
View File

@ -0,0 +1,398 @@
# SPDX-License-Identifier: GPL-2.0
always-$(CONFIG_RUST) += target.json
no-clean-files += target.json
obj-$(CONFIG_RUST) += core.o compiler_builtins.o
always-$(CONFIG_RUST) += exports_core_generated.h
# Missing prototypes are expected in the helpers since these are exported
# for Rust only, thus there is no header nor prototypes.
obj-$(CONFIG_RUST) += helpers.o
CFLAGS_REMOVE_helpers.o = -Wmissing-prototypes -Wmissing-declarations
always-$(CONFIG_RUST) += libmacros.so
no-clean-files += libmacros.so
always-$(CONFIG_RUST) += bindings_generated.rs bindings_helpers_generated.rs
obj-$(CONFIG_RUST) += alloc.o kernel.o
always-$(CONFIG_RUST) += exports_alloc_generated.h exports_kernel_generated.h
ifdef CONFIG_RUST_BUILD_ASSERT_DENY
always-$(CONFIG_RUST) += build_error.o
else
obj-$(CONFIG_RUST) += build_error.o
endif
obj-$(CONFIG_RUST) += exports.o
obj-$(CONFIG_RUST_KERNEL_KUNIT_TEST) += doctests_kernel_generated.o
obj-$(CONFIG_RUST_KERNEL_KUNIT_TEST) += doctests_kernel_generated_kunit.o
# Avoids running `$(RUSTC)` for the sysroot when it may not be available.
ifdef CONFIG_RUST
# `$(rust_flags)` is passed in case the user added `--sysroot`.
rustc_sysroot := $(shell $(RUSTC) $(rust_flags) --print sysroot)
rustc_host_target := $(shell $(RUSTC) --version --verbose | grep -F 'host: ' | cut -d' ' -f2)
RUST_LIB_SRC ?= $(rustc_sysroot)/lib/rustlib/src/rust/library
ifeq ($(quiet),silent_)
cargo_quiet=-q
rust_test_quiet=-q
rustdoc_test_quiet=--test-args -q
rustdoc_test_kernel_quiet=>/dev/null
else ifeq ($(quiet),quiet_)
rust_test_quiet=-q
rustdoc_test_quiet=--test-args -q
rustdoc_test_kernel_quiet=>/dev/null
else
cargo_quiet=--verbose
endif
core-cfgs = \
--cfg no_fp_fmt_parse
alloc-cfgs = \
--cfg no_global_oom_handling \
--cfg no_rc \
--cfg no_sync
quiet_cmd_rustdoc = RUSTDOC $(if $(rustdoc_host),H, ) $<
cmd_rustdoc = \
OBJTREE=$(abspath $(objtree)) \
$(RUSTDOC) $(if $(rustdoc_host),$(rust_common_flags),$(rust_flags)) \
$(rustc_target_flags) -L$(objtree)/$(obj) \
--output $(objtree)/$(obj)/doc \
--crate-name $(subst rustdoc-,,$@) \
@$(objtree)/include/generated/rustc_cfg $<
# The `html_logo_url` and `html_favicon_url` forms of the `doc` attribute
# can be used to specify a custom logo. However:
# - The given value is used as-is, thus it cannot be relative or a local file
# (unlike the non-custom case) since the generated docs have subfolders.
# - It requires adding it to every crate.
# - It requires changing `core` which comes from the sysroot.
#
# Using `-Zcrate-attr` would solve the last two points, but not the first.
# The https://github.com/rust-lang/rfcs/pull/3226 RFC suggests two new
# command-like flags to solve the issue. Meanwhile, we use the non-custom case
# and then retouch the generated files.
rustdoc: rustdoc-core rustdoc-macros rustdoc-compiler_builtins \
rustdoc-alloc rustdoc-kernel
$(Q)cp $(srctree)/Documentation/logo.gif $(objtree)/$(obj)/doc
$(Q)find $(objtree)/$(obj)/doc -name '*.html' -type f -print0 | xargs -0 sed -Ei \
-e 's:rust-logo\.svg:logo.gif:g' \
-e 's:rust-logo\.png:logo.gif:g' \
-e 's:favicon\.svg:logo.gif:g' \
-e 's:<link rel="alternate icon" type="image/png" href="[./]*favicon-(16x16|32x32)\.png">::g'
rustdoc-macros: private rustdoc_host = yes
rustdoc-macros: private rustc_target_flags = --crate-type proc-macro \
--extern proc_macro
rustdoc-macros: $(src)/macros/lib.rs FORCE
$(call if_changed,rustdoc)
rustdoc-core: private rustc_target_flags = $(core-cfgs)
rustdoc-core: $(RUST_LIB_SRC)/core/src/lib.rs FORCE
$(call if_changed,rustdoc)
rustdoc-compiler_builtins: $(src)/compiler_builtins.rs rustdoc-core FORCE
$(call if_changed,rustdoc)
# We need to allow `rustdoc::broken_intra_doc_links` because some
# `no_global_oom_handling` functions refer to non-`no_global_oom_handling`
# functions. Ideally `rustdoc` would have a way to distinguish broken links
# due to things that are "configured out" vs. entirely non-existing ones.
rustdoc-alloc: private rustc_target_flags = $(alloc-cfgs) \
-Arustdoc::broken_intra_doc_links
rustdoc-alloc: $(src)/alloc/lib.rs rustdoc-core rustdoc-compiler_builtins FORCE
$(call if_changed,rustdoc)
rustdoc-kernel: private rustc_target_flags = --extern alloc \
--extern build_error --extern macros=$(objtree)/$(obj)/libmacros.so
rustdoc-kernel: $(src)/kernel/lib.rs rustdoc-core rustdoc-macros \
rustdoc-compiler_builtins rustdoc-alloc $(obj)/libmacros.so \
$(obj)/bindings_generated.rs $(obj)/bindings_helpers_generated.rs FORCE
$(call if_changed,rustdoc)
quiet_cmd_rustc_test_library = RUSTC TL $<
cmd_rustc_test_library = \
OBJTREE=$(abspath $(objtree)) \
$(RUSTC) $(rust_common_flags) \
@$(objtree)/include/generated/rustc_cfg $(rustc_target_flags) \
--crate-type $(if $(rustc_test_library_proc),proc-macro,rlib) \
--out-dir $(objtree)/$(obj)/test --cfg testlib \
--sysroot $(objtree)/$(obj)/test/sysroot \
-L$(objtree)/$(obj)/test \
--crate-name $(subst rusttest-,,$(subst rusttestlib-,,$@)) $<
rusttestlib-build_error: $(src)/build_error.rs rusttest-prepare FORCE
$(call if_changed,rustc_test_library)
rusttestlib-macros: private rustc_target_flags = --extern proc_macro
rusttestlib-macros: private rustc_test_library_proc = yes
rusttestlib-macros: $(src)/macros/lib.rs rusttest-prepare FORCE
$(call if_changed,rustc_test_library)
quiet_cmd_rustdoc_test = RUSTDOC T $<
cmd_rustdoc_test = \
OBJTREE=$(abspath $(objtree)) \
$(RUSTDOC) --test $(rust_common_flags) \
@$(objtree)/include/generated/rustc_cfg \
$(rustc_target_flags) $(rustdoc_test_target_flags) \
--sysroot $(objtree)/$(obj)/test/sysroot $(rustdoc_test_quiet) \
-L$(objtree)/$(obj)/test --output $(objtree)/$(obj)/doc \
--crate-name $(subst rusttest-,,$@) $<
quiet_cmd_rustdoc_test_kernel = RUSTDOC TK $<
cmd_rustdoc_test_kernel = \
rm -rf $(objtree)/$(obj)/test/doctests/kernel; \
mkdir -p $(objtree)/$(obj)/test/doctests/kernel; \
OBJTREE=$(abspath $(objtree)) \
$(RUSTDOC) --test $(rust_flags) \
@$(objtree)/include/generated/rustc_cfg \
-L$(objtree)/$(obj) --extern alloc --extern kernel \
--extern build_error --extern macros \
--no-run --crate-name kernel -Zunstable-options \
--test-builder $(srctree)/scripts/rustdoc_test_builder.py \
$< $(rustdoc_test_kernel_quiet); \
$(srctree)/scripts/rustdoc_test_gen.py
%/doctests_kernel_generated.rs %/doctests_kernel_generated_kunit.c: $(src)/kernel/lib.rs $(obj)/kernel.o FORCE
$(call if_changed,rustdoc_test_kernel)
# We cannot use `-Zpanic-abort-tests` because some tests are dynamic,
# so for the moment we skip `-Cpanic=abort`.
quiet_cmd_rustc_test = RUSTC T $<
cmd_rustc_test = \
OBJTREE=$(abspath $(objtree)) \
$(RUSTC) --test $(rust_common_flags) \
@$(objtree)/include/generated/rustc_cfg \
$(rustc_target_flags) --out-dir $(objtree)/$(obj)/test \
--sysroot $(objtree)/$(obj)/test/sysroot \
-L$(objtree)/$(obj)/test \
--crate-name $(subst rusttest-,,$@) $<; \
$(objtree)/$(obj)/test/$(subst rusttest-,,$@) $(rust_test_quiet) \
$(rustc_test_run_flags)
rusttest: rusttest-macros rusttest-kernel
# This prepares a custom sysroot with our custom `alloc` instead of
# the standard one.
#
# This requires several hacks:
# - Unlike `core` and `alloc`, `std` depends on more than a dozen crates,
# including third-party crates that need to be downloaded, plus custom
# `build.rs` steps. Thus hardcoding things here is not maintainable.
# - `cargo` knows how to build the standard library, but it is an unstable
# feature so far (`-Zbuild-std`).
# - `cargo` only considers the use case of building the standard library
# to use it in a given package. Thus we need to create a dummy package
# and pick the generated libraries from there.
# - Since we only keep a subset of upstream `alloc` in-tree, we need
# to recreate it on the fly by putting our sources on top.
# - The usual ways of modifying the dependency graph in `cargo` do not seem
# to apply for the `-Zbuild-std` steps, thus we have to mislead it
# by modifying the sources in the sysroot.
# - To avoid messing with the user's Rust installation, we create a clone
# of the sysroot. However, `cargo` ignores `RUSTFLAGS` in the `-Zbuild-std`
# steps, thus we use a wrapper binary passed via `RUSTC` to pass the flag.
#
# In the future, we hope to avoid the whole ordeal by either:
# - Making the `test` crate not depend on `std` (either improving upstream
# or having our own custom crate).
# - Making the tests run in kernel space (requires the previous point).
# - Making `std` and friends be more like a "normal" crate, so that
# `-Zbuild-std` and related hacks are not needed.
quiet_cmd_rustsysroot = RUSTSYSROOT
cmd_rustsysroot = \
rm -rf $(objtree)/$(obj)/test; \
mkdir -p $(objtree)/$(obj)/test; \
cp -a $(rustc_sysroot) $(objtree)/$(obj)/test/sysroot; \
cp -r $(srctree)/$(src)/alloc/* \
$(objtree)/$(obj)/test/sysroot/lib/rustlib/src/rust/library/alloc/src; \
echo '\#!/bin/sh' > $(objtree)/$(obj)/test/rustc_sysroot; \
echo "$(RUSTC) --sysroot=$(abspath $(objtree)/$(obj)/test/sysroot) \"\$$@\"" \
>> $(objtree)/$(obj)/test/rustc_sysroot; \
chmod u+x $(objtree)/$(obj)/test/rustc_sysroot; \
$(CARGO) -q new $(objtree)/$(obj)/test/dummy; \
RUSTC=$(objtree)/$(obj)/test/rustc_sysroot $(CARGO) $(cargo_quiet) \
test -Zbuild-std --target $(rustc_host_target) \
--manifest-path $(objtree)/$(obj)/test/dummy/Cargo.toml; \
rm $(objtree)/$(obj)/test/sysroot/lib/rustlib/$(rustc_host_target)/lib/*; \
cp $(objtree)/$(obj)/test/dummy/target/$(rustc_host_target)/debug/deps/* \
$(objtree)/$(obj)/test/sysroot/lib/rustlib/$(rustc_host_target)/lib
rusttest-prepare: FORCE
$(call if_changed,rustsysroot)
rusttest-macros: private rustc_target_flags = --extern proc_macro
rusttest-macros: private rustdoc_test_target_flags = --crate-type proc-macro
rusttest-macros: $(src)/macros/lib.rs rusttest-prepare FORCE
$(call if_changed,rustc_test)
$(call if_changed,rustdoc_test)
rusttest-kernel: private rustc_target_flags = --extern alloc \
--extern build_error --extern macros
rusttest-kernel: private rustc_test_run_flags = --skip bindgen_test_layout_
rusttest-kernel: $(src)/kernel/lib.rs rusttest-prepare \
rusttestlib-build_error rusttestlib-macros FORCE
$(call if_changed,rustc_test)
$(call if_changed,rustc_test_library)
filechk_rust_target = $(objtree)/scripts/generate_rust_target < $<
$(obj)/target.json: $(objtree)/include/config/auto.conf FORCE
$(call filechk,rust_target)
ifdef CONFIG_CC_IS_CLANG
bindgen_c_flags = $(c_flags)
else
# bindgen relies on libclang to parse C. Ideally, bindgen would support a GCC
# plugin backend and/or the Clang driver would be perfectly compatible with GCC.
#
# For the moment, here we are tweaking the flags on the fly. This is a hack,
# and some kernel configurations may not work (e.g. `GCC_PLUGIN_RANDSTRUCT`
# if we end up using one of those structs).
bindgen_skip_c_flags := -mno-fp-ret-in-387 -mpreferred-stack-boundary=% \
-mskip-rax-setup -mgeneral-regs-only -msign-return-address=% \
-mindirect-branch=thunk-extern -mindirect-branch-register \
-mrecord-mcount -mabi=lp64 -mstack-protector-guard% -mtraceback=no \
-mno-pointers-to-nested-functions -mno-string -mno-strict-align \
-mstrict-align \
-fconserve-stack -falign-jumps=% -falign-loops=% \
-femit-struct-debug-baseonly -fno-ipa-cp-clone -fno-ipa-sra \
-fno-partial-inlining -fplugin-arg-arm_ssp_per_task_plugin-% \
-fno-reorder-blocks -fno-allow-store-data-races -fasan-shadow-offset=% \
-fzero-call-used-regs=% -fno-stack-clash-protection \
-fno-inline-functions-called-once \
--param=% --param asan-%
# Derived from `scripts/Makefile.clang`.
BINDGEN_TARGET_arm := arm-linux-gnueabi
BINDGEN_TARGET_arm64 := aarch64-linux-gnu
BINDGEN_TARGET_powerpc := powerpc64le-linux-gnu
BINDGEN_TARGET_riscv := riscv64-linux-gnu
BINDGEN_TARGET_x86 := x86_64-linux-gnu
BINDGEN_TARGET := $(BINDGEN_TARGET_$(SRCARCH))
# All warnings are inhibited since GCC builds are very experimental,
# many GCC warnings are not supported by Clang, they may only appear in
# some configurations, with new GCC versions, etc.
bindgen_extra_c_flags = -w --target=$(BINDGEN_TARGET)
bindgen_c_flags = $(filter-out $(bindgen_skip_c_flags), $(c_flags)) \
$(bindgen_extra_c_flags)
endif
ifdef CONFIG_LTO
bindgen_c_flags_lto = $(filter-out $(CC_FLAGS_LTO), $(bindgen_c_flags))
else
bindgen_c_flags_lto = $(bindgen_c_flags)
endif
bindgen_c_flags_final = $(bindgen_c_flags_lto)
quiet_cmd_bindgen = BINDGEN $@
cmd_bindgen = \
$(BINDGEN) $< $(bindgen_target_flags) \
--use-core --with-derive-default --ctypes-prefix c_types \
--no-debug '.*' \
--size_t-is-usize -o $@ -- $(bindgen_c_flags_final) -DMODULE \
$(bindgen_target_cflags) $(bindgen_target_extra)
$(obj)/bindings_generated.rs: private bindgen_target_flags = \
$(shell grep -v '^\#\|^$$' $(srctree)/$(src)/bindgen_parameters)
$(obj)/bindings_generated.rs: $(src)/kernel/bindings_helper.h \
$(src)/bindgen_parameters FORCE
$(call if_changed_dep,bindgen)
# See `CFLAGS_REMOVE_helpers.o` above. In addition, Clang on C does not warn
# with `-Wmissing-declarations` (unlike GCC), so it is not strictly needed here
# given it is `libclang`; but for consistency, future Clang changes and/or
# a potential future GCC backend for `bindgen`, we disable it too.
$(obj)/bindings_helpers_generated.rs: private bindgen_target_flags = \
--blacklist-type '.*' --whitelist-var '' \
--whitelist-function 'rust_helper_.*'
$(obj)/bindings_helpers_generated.rs: private bindgen_target_cflags = \
-I$(objtree)/$(obj) -Wno-missing-prototypes -Wno-missing-declarations
$(obj)/bindings_helpers_generated.rs: private bindgen_target_extra = ; \
sed -Ei 's/pub fn rust_helper_([a-zA-Z0-9_]*)/#[link_name="rust_helper_\1"]\n pub fn \1/g' $@
$(obj)/bindings_helpers_generated.rs: $(src)/helpers.c FORCE
$(call if_changed_dep,bindgen)
quiet_cmd_exports = EXPORTS $@
cmd_exports = \
$(NM) -p --defined-only $< \
| grep -E ' (T|R|D) ' | cut -d ' ' -f 3 \
| xargs -Isymbol \
echo 'EXPORT_SYMBOL_RUST_GPL(symbol);' > $@
$(obj)/exports_core_generated.h: $(obj)/core.o FORCE
$(call if_changed,exports)
$(obj)/exports_alloc_generated.h: $(obj)/alloc.o FORCE
$(call if_changed,exports)
$(obj)/exports_kernel_generated.h: $(obj)/kernel.o FORCE
$(call if_changed,exports)
quiet_cmd_rustc_procmacro = $(RUSTC_OR_CLIPPY_QUIET) P $@
cmd_rustc_procmacro = \
$(RUSTC_OR_CLIPPY) $(rust_common_flags) \
--emit=dep-info,link --extern proc_macro \
--crate-type proc-macro --out-dir $(objtree)/$(obj) \
--crate-name $(patsubst lib%.so,%,$(notdir $@)) $<; \
mv $(objtree)/$(obj)/$(patsubst lib%.so,%,$(notdir $@)).d $(depfile); \
sed -i '/^\#/d' $(depfile)
# Procedural macros can only be used with the `rustc` that compiled it.
# Therefore, to get `libmacros.so` automatically recompiled when the compiler
# version changes, we add `core.o` as a dependency (even if it is not needed).
$(obj)/libmacros.so: $(src)/macros/lib.rs $(obj)/core.o FORCE
$(call if_changed_dep,rustc_procmacro)
quiet_cmd_rustc_library = $(if $(skip_clippy),RUSTC,$(RUSTC_OR_CLIPPY_QUIET)) L $@
cmd_rustc_library = \
OBJTREE=$(abspath $(objtree)) \
$(if $(skip_clippy),$(RUSTC),$(RUSTC_OR_CLIPPY)) \
$(filter-out $(skip_flags),$(rust_flags) $(rustc_target_flags)) \
--emit=dep-info,obj,metadata --crate-type rlib \
--out-dir $(objtree)/$(obj) -L$(objtree)/$(obj) \
--crate-name $(patsubst %.o,%,$(notdir $@)) $<; \
mv $(objtree)/$(obj)/$(patsubst %.o,%,$(notdir $@)).d $(depfile); \
sed -i '/^\#/d' $(depfile) \
$(if $(rustc_objcopy),;$(OBJCOPY) $(rustc_objcopy) $@)
rust-analyzer:
$(Q)$(srctree)/scripts/generate_rust_analyzer.py $(srctree) $(objtree) \
$(RUST_LIB_SRC) > $(objtree)/rust-project.json
$(obj)/core.o: private skip_clippy = 1
$(obj)/core.o: private skip_flags = -Dunreachable_pub
$(obj)/core.o: private rustc_target_flags = $(core-cfgs)
$(obj)/core.o: $(RUST_LIB_SRC)/core/src/lib.rs $(obj)/target.json FORCE
$(call if_changed_dep,rustc_library)
$(obj)/compiler_builtins.o: private rustc_objcopy = -w -W '__*'
$(obj)/compiler_builtins.o: $(src)/compiler_builtins.rs $(obj)/core.o FORCE
$(call if_changed_dep,rustc_library)
$(obj)/alloc.o: private skip_clippy = 1
$(obj)/alloc.o: private skip_flags = -Dunreachable_pub
$(obj)/alloc.o: private rustc_target_flags = $(alloc-cfgs)
$(obj)/alloc.o: $(src)/alloc/lib.rs $(obj)/compiler_builtins.o FORCE
$(call if_changed_dep,rustc_library)
$(obj)/build_error.o: $(src)/build_error.rs $(obj)/compiler_builtins.o FORCE
$(call if_changed_dep,rustc_library)
$(obj)/kernel.o: private rustc_target_flags = --extern alloc \
--extern build_error --extern macros
$(obj)/kernel.o: $(src)/kernel/lib.rs $(obj)/alloc.o $(obj)/build_error.o \
$(obj)/libmacros.so $(obj)/bindings_generated.rs \
$(obj)/bindings_helpers_generated.rs FORCE
$(call if_changed_dep,rustc_library)
endif # CONFIG_RUST

33
rust/alloc/README.md Normal file
View File

@ -0,0 +1,33 @@
# `alloc`
These source files come from the Rust standard library, hosted in
the https://github.com/rust-lang/rust repository, licensed under
"Apache-2.0 OR MIT" and adapted for kernel use. For copyright details,
see https://github.com/rust-lang/rust/blob/master/COPYRIGHT.
Please note that these files should be kept as close as possible to
upstream. In general, only additions should be performed (e.g. new
methods). Eventually, changes should make it into upstream so that,
at some point, this fork can be dropped from the kernel tree.
## Rationale
On one hand, kernel folks wanted to keep `alloc` in-tree to have more
freedom in both workflow and actual features if actually needed
(e.g. receiver types if we ended up using them), which is reasonable.
On the other hand, Rust folks wanted to keep `alloc` as close as
upstream as possible and avoid as much divergence as possible, which
is also reasonable.
We agreed on a middle-ground: we would keep a subset of `alloc`
in-tree that would be as small and as close as possible to upstream.
Then, upstream can start adding the functions that we add to `alloc`
etc., until we reach a point where the kernel already knows exactly
what it needs in `alloc` and all the new methods are merged into
upstream, so that we can drop `alloc` from the kernel tree and go back
to using the upstream one.
By doing this, the kernel can go a bit faster now, and Rust can
slowly incorporate and discuss the changes as needed.

438
rust/alloc/alloc.rs Normal file
View File

@ -0,0 +1,438 @@
// SPDX-License-Identifier: Apache-2.0 OR MIT
//! Memory allocation APIs
#![stable(feature = "alloc_module", since = "1.28.0")]
#[cfg(not(test))]
use core::intrinsics;
use core::intrinsics::{min_align_of_val, size_of_val};
use core::ptr::Unique;
#[cfg(not(test))]
use core::ptr::{self, NonNull};
#[stable(feature = "alloc_module", since = "1.28.0")]
#[doc(inline)]
pub use core::alloc::*;
#[cfg(test)]
mod tests;
extern "Rust" {
// These are the magic symbols to call the global allocator. rustc generates
// them to call `__rg_alloc` etc. if there is a `#[global_allocator]` attribute
// (the code expanding that attribute macro generates those functions), or to call
// the default implementations in libstd (`__rdl_alloc` etc. in `library/std/src/alloc.rs`)
// otherwise.
// The rustc fork of LLVM also special-cases these function names to be able to optimize them
// like `malloc`, `realloc`, and `free`, respectively.
#[rustc_allocator]
#[rustc_allocator_nounwind]
fn __rust_alloc(size: usize, align: usize) -> *mut u8;
#[rustc_allocator_nounwind]
fn __rust_dealloc(ptr: *mut u8, size: usize, align: usize);
#[rustc_allocator_nounwind]
fn __rust_realloc(ptr: *mut u8, old_size: usize, align: usize, new_size: usize) -> *mut u8;
#[rustc_allocator_nounwind]
fn __rust_alloc_zeroed(size: usize, align: usize) -> *mut u8;
}
/// The global memory allocator.
///
/// This type implements the [`Allocator`] trait by forwarding calls
/// to the allocator registered with the `#[global_allocator]` attribute
/// if there is one, or the `std` crates default.
///
/// Note: while this type is unstable, the functionality it provides can be
/// accessed through the [free functions in `alloc`](self#functions).
#[unstable(feature = "allocator_api", issue = "32838")]
#[derive(Copy, Clone, Default, Debug)]
#[cfg(not(test))]
pub struct Global;
#[cfg(test)]
pub use std::alloc::Global;
/// Allocate memory with the global allocator.
///
/// This function forwards calls to the [`GlobalAlloc::alloc`] method
/// of the allocator registered with the `#[global_allocator]` attribute
/// if there is one, or the `std` crates default.
///
/// This function is expected to be deprecated in favor of the `alloc` method
/// of the [`Global`] type when it and the [`Allocator`] trait become stable.
///
/// # Safety
///
/// See [`GlobalAlloc::alloc`].
///
/// # Examples
///
/// ```
/// use std::alloc::{alloc, dealloc, Layout};
///
/// unsafe {
/// let layout = Layout::new::<u16>();
/// let ptr = alloc(layout);
///
/// *(ptr as *mut u16) = 42;
/// assert_eq!(*(ptr as *mut u16), 42);
///
/// dealloc(ptr, layout);
/// }
/// ```
#[stable(feature = "global_alloc", since = "1.28.0")]
#[must_use = "losing the pointer will leak memory"]
#[inline]
pub unsafe fn alloc(layout: Layout) -> *mut u8 {
unsafe { __rust_alloc(layout.size(), layout.align()) }
}
/// Deallocate memory with the global allocator.
///
/// This function forwards calls to the [`GlobalAlloc::dealloc`] method
/// of the allocator registered with the `#[global_allocator]` attribute
/// if there is one, or the `std` crates default.
///
/// This function is expected to be deprecated in favor of the `dealloc` method
/// of the [`Global`] type when it and the [`Allocator`] trait become stable.
///
/// # Safety
///
/// See [`GlobalAlloc::dealloc`].
#[stable(feature = "global_alloc", since = "1.28.0")]
#[inline]
pub unsafe fn dealloc(ptr: *mut u8, layout: Layout) {
unsafe { __rust_dealloc(ptr, layout.size(), layout.align()) }
}
/// Reallocate memory with the global allocator.
///
/// This function forwards calls to the [`GlobalAlloc::realloc`] method
/// of the allocator registered with the `#[global_allocator]` attribute
/// if there is one, or the `std` crates default.
///
/// This function is expected to be deprecated in favor of the `realloc` method
/// of the [`Global`] type when it and the [`Allocator`] trait become stable.
///
/// # Safety
///
/// See [`GlobalAlloc::realloc`].
#[stable(feature = "global_alloc", since = "1.28.0")]
#[must_use = "losing the pointer will leak memory"]
#[inline]
pub unsafe fn realloc(ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 {
unsafe { __rust_realloc(ptr, layout.size(), layout.align(), new_size) }
}
/// Allocate zero-initialized memory with the global allocator.
///
/// This function forwards calls to the [`GlobalAlloc::alloc_zeroed`] method
/// of the allocator registered with the `#[global_allocator]` attribute
/// if there is one, or the `std` crates default.
///
/// This function is expected to be deprecated in favor of the `alloc_zeroed` method
/// of the [`Global`] type when it and the [`Allocator`] trait become stable.
///
/// # Safety
///
/// See [`GlobalAlloc::alloc_zeroed`].
///
/// # Examples
///
/// ```
/// use std::alloc::{alloc_zeroed, dealloc, Layout};
///
/// unsafe {
/// let layout = Layout::new::<u16>();
/// let ptr = alloc_zeroed(layout);
///
/// assert_eq!(*(ptr as *mut u16), 0);
///
/// dealloc(ptr, layout);
/// }
/// ```
#[stable(feature = "global_alloc", since = "1.28.0")]
#[must_use = "losing the pointer will leak memory"]
#[inline]
pub unsafe fn alloc_zeroed(layout: Layout) -> *mut u8 {
unsafe { __rust_alloc_zeroed(layout.size(), layout.align()) }
}
#[cfg(not(test))]
impl Global {
#[inline]
fn alloc_impl(&self, layout: Layout, zeroed: bool) -> Result<NonNull<[u8]>, AllocError> {
match layout.size() {
0 => Ok(NonNull::slice_from_raw_parts(layout.dangling(), 0)),
// SAFETY: `layout` is non-zero in size,
size => unsafe {
let raw_ptr = if zeroed { alloc_zeroed(layout) } else { alloc(layout) };
let ptr = NonNull::new(raw_ptr).ok_or(AllocError)?;
Ok(NonNull::slice_from_raw_parts(ptr, size))
},
}
}
// SAFETY: Same as `Allocator::grow`
#[inline]
unsafe fn grow_impl(
&self,
ptr: NonNull<u8>,
old_layout: Layout,
new_layout: Layout,
zeroed: bool,
) -> Result<NonNull<[u8]>, AllocError> {
debug_assert!(
new_layout.size() >= old_layout.size(),
"`new_layout.size()` must be greater than or equal to `old_layout.size()`"
);
match old_layout.size() {
0 => self.alloc_impl(new_layout, zeroed),
// SAFETY: `new_size` is non-zero as `old_size` is greater than or equal to `new_size`
// as required by safety conditions. Other conditions must be upheld by the caller
old_size if old_layout.align() == new_layout.align() => unsafe {
let new_size = new_layout.size();
// `realloc` probably checks for `new_size >= old_layout.size()` or something similar.
intrinsics::assume(new_size >= old_layout.size());
let raw_ptr = realloc(ptr.as_ptr(), old_layout, new_size);
let ptr = NonNull::new(raw_ptr).ok_or(AllocError)?;
if zeroed {
raw_ptr.add(old_size).write_bytes(0, new_size - old_size);
}
Ok(NonNull::slice_from_raw_parts(ptr, new_size))
},
// SAFETY: because `new_layout.size()` must be greater than or equal to `old_size`,
// both the old and new memory allocation are valid for reads and writes for `old_size`
// bytes. Also, because the old allocation wasn't yet deallocated, it cannot overlap
// `new_ptr`. Thus, the call to `copy_nonoverlapping` is safe. The safety contract
// for `dealloc` must be upheld by the caller.
old_size => unsafe {
let new_ptr = self.alloc_impl(new_layout, zeroed)?;
ptr::copy_nonoverlapping(ptr.as_ptr(), new_ptr.as_mut_ptr(), old_size);
self.deallocate(ptr, old_layout);
Ok(new_ptr)
},
}
}
}
#[unstable(feature = "allocator_api", issue = "32838")]
#[cfg(not(test))]
unsafe impl Allocator for Global {
#[inline]
fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
self.alloc_impl(layout, false)
}
#[inline]
fn allocate_zeroed(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
self.alloc_impl(layout, true)
}
#[inline]
unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) {
if layout.size() != 0 {
// SAFETY: `layout` is non-zero in size,
// other conditions must be upheld by the caller
unsafe { dealloc(ptr.as_ptr(), layout) }
}
}
#[inline]
unsafe fn grow(
&self,
ptr: NonNull<u8>,
old_layout: Layout,
new_layout: Layout,
) -> Result<NonNull<[u8]>, AllocError> {
// SAFETY: all conditions must be upheld by the caller
unsafe { self.grow_impl(ptr, old_layout, new_layout, false) }
}
#[inline]
unsafe fn grow_zeroed(
&self,
ptr: NonNull<u8>,
old_layout: Layout,
new_layout: Layout,
) -> Result<NonNull<[u8]>, AllocError> {
// SAFETY: all conditions must be upheld by the caller
unsafe { self.grow_impl(ptr, old_layout, new_layout, true) }
}
#[inline]
unsafe fn shrink(
&self,
ptr: NonNull<u8>,
old_layout: Layout,
new_layout: Layout,
) -> Result<NonNull<[u8]>, AllocError> {
debug_assert!(
new_layout.size() <= old_layout.size(),
"`new_layout.size()` must be smaller than or equal to `old_layout.size()`"
);
match new_layout.size() {
// SAFETY: conditions must be upheld by the caller
0 => unsafe {
self.deallocate(ptr, old_layout);
Ok(NonNull::slice_from_raw_parts(new_layout.dangling(), 0))
},
// SAFETY: `new_size` is non-zero. Other conditions must be upheld by the caller
new_size if old_layout.align() == new_layout.align() => unsafe {
// `realloc` probably checks for `new_size <= old_layout.size()` or something similar.
intrinsics::assume(new_size <= old_layout.size());
let raw_ptr = realloc(ptr.as_ptr(), old_layout, new_size);
let ptr = NonNull::new(raw_ptr).ok_or(AllocError)?;
Ok(NonNull::slice_from_raw_parts(ptr, new_size))
},
// SAFETY: because `new_size` must be smaller than or equal to `old_layout.size()`,
// both the old and new memory allocation are valid for reads and writes for `new_size`
// bytes. Also, because the old allocation wasn't yet deallocated, it cannot overlap
// `new_ptr`. Thus, the call to `copy_nonoverlapping` is safe. The safety contract
// for `dealloc` must be upheld by the caller.
new_size => unsafe {
let new_ptr = self.allocate(new_layout)?;
ptr::copy_nonoverlapping(ptr.as_ptr(), new_ptr.as_mut_ptr(), new_size);
self.deallocate(ptr, old_layout);
Ok(new_ptr)
},
}
}
}
/// The allocator for unique pointers.
#[cfg(all(not(no_global_oom_handling), not(test)))]
#[lang = "exchange_malloc"]
#[inline]
unsafe fn exchange_malloc(size: usize, align: usize) -> *mut u8 {
let layout = unsafe { Layout::from_size_align_unchecked(size, align) };
match Global.allocate(layout) {
Ok(ptr) => ptr.as_mut_ptr(),
Err(_) => handle_alloc_error(layout),
}
}
#[cfg_attr(not(test), lang = "box_free")]
#[inline]
#[rustc_const_unstable(feature = "const_box", issue = "92521")]
// This signature has to be the same as `Box`, otherwise an ICE will happen.
// When an additional parameter to `Box` is added (like `A: Allocator`), this has to be added here as
// well.
// For example if `Box` is changed to `struct Box<T: ?Sized, A: Allocator>(Unique<T>, A)`,
// this function has to be changed to `fn box_free<T: ?Sized, A: Allocator>(Unique<T>, A)` as well.
pub(crate) const unsafe fn box_free<T: ?Sized, A: ~const Allocator + ~const Drop>(
ptr: Unique<T>,
alloc: A,
) {
unsafe {
let size = size_of_val(ptr.as_ref());
let align = min_align_of_val(ptr.as_ref());
let layout = Layout::from_size_align_unchecked(size, align);
alloc.deallocate(From::from(ptr.cast()), layout)
}
}
// # Allocation error handler
#[cfg(not(no_global_oom_handling))]
extern "Rust" {
// This is the magic symbol to call the global alloc error handler. rustc generates
// it to call `__rg_oom` if there is a `#[alloc_error_handler]`, or to call the
// default implementations below (`__rdl_oom`) otherwise.
fn __rust_alloc_error_handler(size: usize, align: usize) -> !;
}
/// Abort on memory allocation error or failure.
///
/// Callers of memory allocation APIs wishing to abort computation
/// in response to an allocation error are encouraged to call this function,
/// rather than directly invoking `panic!` or similar.
///
/// The default behavior of this function is to print a message to standard error
/// and abort the process.
/// It can be replaced with [`set_alloc_error_hook`] and [`take_alloc_error_hook`].
///
/// [`set_alloc_error_hook`]: ../../std/alloc/fn.set_alloc_error_hook.html
/// [`take_alloc_error_hook`]: ../../std/alloc/fn.take_alloc_error_hook.html
#[stable(feature = "global_alloc", since = "1.28.0")]
#[rustc_const_unstable(feature = "const_alloc_error", issue = "92523")]
#[cfg(all(not(no_global_oom_handling), not(test)))]
#[cold]
pub const fn handle_alloc_error(layout: Layout) -> ! {
const fn ct_error(_: Layout) -> ! {
panic!("allocation failed");
}
fn rt_error(layout: Layout) -> ! {
unsafe {
__rust_alloc_error_handler(layout.size(), layout.align());
}
}
unsafe { core::intrinsics::const_eval_select((layout,), ct_error, rt_error) }
}
// For alloc test `std::alloc::handle_alloc_error` can be used directly.
#[cfg(all(not(no_global_oom_handling), test))]
pub use std::alloc::handle_alloc_error;
#[cfg(all(not(no_global_oom_handling), not(any(target_os = "hermit", test))))]
#[doc(hidden)]
#[allow(unused_attributes)]
#[unstable(feature = "alloc_internals", issue = "none")]
pub mod __alloc_error_handler {
use crate::alloc::Layout;
// called via generated `__rust_alloc_error_handler`
// if there is no `#[alloc_error_handler]`
#[rustc_std_internal_symbol]
pub unsafe extern "C-unwind" fn __rdl_oom(size: usize, _align: usize) -> ! {
panic!("memory allocation of {} bytes failed", size)
}
// if there is an `#[alloc_error_handler]`
#[rustc_std_internal_symbol]
pub unsafe extern "C-unwind" fn __rg_oom(size: usize, align: usize) -> ! {
let layout = unsafe { Layout::from_size_align_unchecked(size, align) };
extern "Rust" {
#[lang = "oom"]
fn oom_impl(layout: Layout) -> !;
}
unsafe { oom_impl(layout) }
}
}
/// Specialize clones into pre-allocated, uninitialized memory.
/// Used by `Box::clone` and `Rc`/`Arc::make_mut`.
pub(crate) trait WriteCloneIntoRaw: Sized {
unsafe fn write_clone_into_raw(&self, target: *mut Self);
}
impl<T: Clone> WriteCloneIntoRaw for T {
#[inline]
default unsafe fn write_clone_into_raw(&self, target: *mut Self) {
// Having allocated *first* may allow the optimizer to create
// the cloned value in-place, skipping the local and move.
unsafe { target.write(self.clone()) };
}
}
impl<T: Copy> WriteCloneIntoRaw for T {
#[inline]
unsafe fn write_clone_into_raw(&self, target: *mut Self) {
// We can always copy in-place, without ever involving a local value.
unsafe { target.copy_from_nonoverlapping(self, 1) };
}
}

498
rust/alloc/borrow.rs Normal file
View File

@ -0,0 +1,498 @@
// SPDX-License-Identifier: Apache-2.0 OR MIT
//! A module for working with borrowed data.
#![stable(feature = "rust1", since = "1.0.0")]
use core::cmp::Ordering;
use core::hash::{Hash, Hasher};
use core::ops::Deref;
#[cfg(not(no_global_oom_handling))]
use core::ops::{Add, AddAssign};
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::borrow::{Borrow, BorrowMut};
use crate::fmt;
#[cfg(not(no_global_oom_handling))]
use crate::string::String;
use Cow::*;
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, B: ?Sized> Borrow<B> for Cow<'a, B>
where
B: ToOwned,
<B as ToOwned>::Owned: 'a,
{
fn borrow(&self) -> &B {
&**self
}
}
/// A generalization of `Clone` to borrowed data.
///
/// Some types make it possible to go from borrowed to owned, usually by
/// implementing the `Clone` trait. But `Clone` works only for going from `&T`
/// to `T`. The `ToOwned` trait generalizes `Clone` to construct owned data
/// from any borrow of a given type.
#[cfg_attr(not(test), rustc_diagnostic_item = "ToOwned")]
#[stable(feature = "rust1", since = "1.0.0")]
pub trait ToOwned {
/// The resulting type after obtaining ownership.
#[stable(feature = "rust1", since = "1.0.0")]
type Owned: Borrow<Self>;
/// Creates owned data from borrowed data, usually by cloning.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// let s: &str = "a";
/// let ss: String = s.to_owned();
///
/// let v: &[i32] = &[1, 2];
/// let vv: Vec<i32> = v.to_owned();
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[must_use = "cloning is often expensive and is not expected to have side effects"]
fn to_owned(&self) -> Self::Owned;
/// Uses borrowed data to replace owned data, usually by cloning.
///
/// This is borrow-generalized version of `Clone::clone_from`.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// # #![feature(toowned_clone_into)]
/// let mut s: String = String::new();
/// "hello".clone_into(&mut s);
///
/// let mut v: Vec<i32> = Vec::new();
/// [1, 2][..].clone_into(&mut v);
/// ```
#[unstable(feature = "toowned_clone_into", reason = "recently added", issue = "41263")]
fn clone_into(&self, target: &mut Self::Owned) {
*target = self.to_owned();
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> ToOwned for T
where
T: Clone,
{
type Owned = T;
fn to_owned(&self) -> T {
self.clone()
}
fn clone_into(&self, target: &mut T) {
target.clone_from(self);
}
}
/// A clone-on-write smart pointer.
///
/// The type `Cow` is a smart pointer providing clone-on-write functionality: it
/// can enclose and provide immutable access to borrowed data, and clone the
/// data lazily when mutation or ownership is required. The type is designed to
/// work with general borrowed data via the `Borrow` trait.
///
/// `Cow` implements `Deref`, which means that you can call
/// non-mutating methods directly on the data it encloses. If mutation
/// is desired, `to_mut` will obtain a mutable reference to an owned
/// value, cloning if necessary.
///
/// If you need reference-counting pointers, note that
/// [`Rc::make_mut`][crate::rc::Rc::make_mut] and
/// [`Arc::make_mut`][crate::sync::Arc::make_mut] can provide clone-on-write
/// functionality as well.
///
/// # Examples
///
/// ```
/// use std::borrow::Cow;
///
/// fn abs_all(input: &mut Cow<[i32]>) {
/// for i in 0..input.len() {
/// let v = input[i];
/// if v < 0 {
/// // Clones into a vector if not already owned.
/// input.to_mut()[i] = -v;
/// }
/// }
/// }
///
/// // No clone occurs because `input` doesn't need to be mutated.
/// let slice = [0, 1, 2];
/// let mut input = Cow::from(&slice[..]);
/// abs_all(&mut input);
///
/// // Clone occurs because `input` needs to be mutated.
/// let slice = [-1, 0, 1];
/// let mut input = Cow::from(&slice[..]);
/// abs_all(&mut input);
///
/// // No clone occurs because `input` is already owned.
/// let mut input = Cow::from(vec![-1, 0, 1]);
/// abs_all(&mut input);
/// ```
///
/// Another example showing how to keep `Cow` in a struct:
///
/// ```
/// use std::borrow::Cow;
///
/// struct Items<'a, X: 'a> where [X]: ToOwned<Owned = Vec<X>> {
/// values: Cow<'a, [X]>,
/// }
///
/// impl<'a, X: Clone + 'a> Items<'a, X> where [X]: ToOwned<Owned = Vec<X>> {
/// fn new(v: Cow<'a, [X]>) -> Self {
/// Items { values: v }
/// }
/// }
///
/// // Creates a container from borrowed values of a slice
/// let readonly = [1, 2];
/// let borrowed = Items::new((&readonly[..]).into());
/// match borrowed {
/// Items { values: Cow::Borrowed(b) } => println!("borrowed {:?}", b),
/// _ => panic!("expect borrowed value"),
/// }
///
/// let mut clone_on_write = borrowed;
/// // Mutates the data from slice into owned vec and pushes a new value on top
/// clone_on_write.values.to_mut().push(3);
/// println!("clone_on_write = {:?}", clone_on_write.values);
///
/// // The data was mutated. Let's check it out.
/// match clone_on_write {
/// Items { values: Cow::Owned(_) } => println!("clone_on_write contains owned data"),
/// _ => panic!("expect owned data"),
/// }
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[cfg_attr(not(test), rustc_diagnostic_item = "Cow")]
pub enum Cow<'a, B: ?Sized + 'a>
where
B: ToOwned,
{
/// Borrowed data.
#[stable(feature = "rust1", since = "1.0.0")]
Borrowed(#[stable(feature = "rust1", since = "1.0.0")] &'a B),
/// Owned data.
#[stable(feature = "rust1", since = "1.0.0")]
Owned(#[stable(feature = "rust1", since = "1.0.0")] <B as ToOwned>::Owned),
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<B: ?Sized + ToOwned> Clone for Cow<'_, B> {
fn clone(&self) -> Self {
match *self {
Borrowed(b) => Borrowed(b),
Owned(ref o) => {
let b: &B = o.borrow();
Owned(b.to_owned())
}
}
}
fn clone_from(&mut self, source: &Self) {
match (self, source) {
(&mut Owned(ref mut dest), &Owned(ref o)) => o.borrow().clone_into(dest),
(t, s) => *t = s.clone(),
}
}
}
impl<B: ?Sized + ToOwned> Cow<'_, B> {
/// Returns true if the data is borrowed, i.e. if `to_mut` would require additional work.
///
/// # Examples
///
/// ```
/// #![feature(cow_is_borrowed)]
/// use std::borrow::Cow;
///
/// let cow = Cow::Borrowed("moo");
/// assert!(cow.is_borrowed());
///
/// let bull: Cow<'_, str> = Cow::Owned("...moo?".to_string());
/// assert!(!bull.is_borrowed());
/// ```
#[unstable(feature = "cow_is_borrowed", issue = "65143")]
#[rustc_const_unstable(feature = "const_cow_is_borrowed", issue = "65143")]
pub const fn is_borrowed(&self) -> bool {
match *self {
Borrowed(_) => true,
Owned(_) => false,
}
}
/// Returns true if the data is owned, i.e. if `to_mut` would be a no-op.
///
/// # Examples
///
/// ```
/// #![feature(cow_is_borrowed)]
/// use std::borrow::Cow;
///
/// let cow: Cow<'_, str> = Cow::Owned("moo".to_string());
/// assert!(cow.is_owned());
///
/// let bull = Cow::Borrowed("...moo?");
/// assert!(!bull.is_owned());
/// ```
#[unstable(feature = "cow_is_borrowed", issue = "65143")]
#[rustc_const_unstable(feature = "const_cow_is_borrowed", issue = "65143")]
pub const fn is_owned(&self) -> bool {
!self.is_borrowed()
}
/// Acquires a mutable reference to the owned form of the data.
///
/// Clones the data if it is not already owned.
///
/// # Examples
///
/// ```
/// use std::borrow::Cow;
///
/// let mut cow = Cow::Borrowed("foo");
/// cow.to_mut().make_ascii_uppercase();
///
/// assert_eq!(
/// cow,
/// Cow::Owned(String::from("FOO")) as Cow<str>
/// );
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn to_mut(&mut self) -> &mut <B as ToOwned>::Owned {
match *self {
Borrowed(borrowed) => {
*self = Owned(borrowed.to_owned());
match *self {
Borrowed(..) => unreachable!(),
Owned(ref mut owned) => owned,
}
}
Owned(ref mut owned) => owned,
}
}
/// Extracts the owned data.
///
/// Clones the data if it is not already owned.
///
/// # Examples
///
/// Calling `into_owned` on a `Cow::Borrowed` clones the underlying data
/// and becomes a `Cow::Owned`:
///
/// ```
/// use std::borrow::Cow;
///
/// let s = "Hello world!";
/// let cow = Cow::Borrowed(s);
///
/// assert_eq!(
/// cow.into_owned(),
/// String::from(s)
/// );
/// ```
///
/// Calling `into_owned` on a `Cow::Owned` is a no-op:
///
/// ```
/// use std::borrow::Cow;
///
/// let s = "Hello world!";
/// let cow: Cow<str> = Cow::Owned(String::from(s));
///
/// assert_eq!(
/// cow.into_owned(),
/// String::from(s)
/// );
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
pub fn into_owned(self) -> <B as ToOwned>::Owned {
match self {
Borrowed(borrowed) => borrowed.to_owned(),
Owned(owned) => owned,
}
}
}
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_const_unstable(feature = "const_deref", issue = "88955")]
impl<B: ?Sized + ToOwned> const Deref for Cow<'_, B>
where
B::Owned: ~const Borrow<B>,
{
type Target = B;
fn deref(&self) -> &B {
match *self {
Borrowed(borrowed) => borrowed,
Owned(ref owned) => owned.borrow(),
}
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<B: ?Sized> Eq for Cow<'_, B> where B: Eq + ToOwned {}
#[stable(feature = "rust1", since = "1.0.0")]
impl<B: ?Sized> Ord for Cow<'_, B>
where
B: Ord + ToOwned,
{
#[inline]
fn cmp(&self, other: &Self) -> Ordering {
Ord::cmp(&**self, &**other)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, 'b, B: ?Sized, C: ?Sized> PartialEq<Cow<'b, C>> for Cow<'a, B>
where
B: PartialEq<C> + ToOwned,
C: ToOwned,
{
#[inline]
fn eq(&self, other: &Cow<'b, C>) -> bool {
PartialEq::eq(&**self, &**other)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, B: ?Sized> PartialOrd for Cow<'a, B>
where
B: PartialOrd + ToOwned,
{
#[inline]
fn partial_cmp(&self, other: &Cow<'a, B>) -> Option<Ordering> {
PartialOrd::partial_cmp(&**self, &**other)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<B: ?Sized> fmt::Debug for Cow<'_, B>
where
B: fmt::Debug + ToOwned<Owned: fmt::Debug>,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
Borrowed(ref b) => fmt::Debug::fmt(b, f),
Owned(ref o) => fmt::Debug::fmt(o, f),
}
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<B: ?Sized> fmt::Display for Cow<'_, B>
where
B: fmt::Display + ToOwned<Owned: fmt::Display>,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
Borrowed(ref b) => fmt::Display::fmt(b, f),
Owned(ref o) => fmt::Display::fmt(o, f),
}
}
}
#[stable(feature = "default", since = "1.11.0")]
impl<B: ?Sized> Default for Cow<'_, B>
where
B: ToOwned<Owned: Default>,
{
/// Creates an owned Cow<'a, B> with the default value for the contained owned value.
fn default() -> Self {
Owned(<B as ToOwned>::Owned::default())
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<B: ?Sized> Hash for Cow<'_, B>
where
B: Hash + ToOwned,
{
#[inline]
fn hash<H: Hasher>(&self, state: &mut H) {
Hash::hash(&**self, state)
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: ?Sized + ToOwned> AsRef<T> for Cow<'_, T> {
fn as_ref(&self) -> &T {
self
}
}
#[cfg(not(no_global_oom_handling))]
#[stable(feature = "cow_add", since = "1.14.0")]
impl<'a> Add<&'a str> for Cow<'a, str> {
type Output = Cow<'a, str>;
#[inline]
fn add(mut self, rhs: &'a str) -> Self::Output {
self += rhs;
self
}
}
#[cfg(not(no_global_oom_handling))]
#[stable(feature = "cow_add", since = "1.14.0")]
impl<'a> Add<Cow<'a, str>> for Cow<'a, str> {
type Output = Cow<'a, str>;
#[inline]
fn add(mut self, rhs: Cow<'a, str>) -> Self::Output {
self += rhs;
self
}
}
#[cfg(not(no_global_oom_handling))]
#[stable(feature = "cow_add", since = "1.14.0")]
impl<'a> AddAssign<&'a str> for Cow<'a, str> {
fn add_assign(&mut self, rhs: &'a str) {
if self.is_empty() {
*self = Cow::Borrowed(rhs)
} else if !rhs.is_empty() {
if let Cow::Borrowed(lhs) = *self {
let mut s = String::with_capacity(lhs.len() + rhs.len());
s.push_str(lhs);
*self = Cow::Owned(s);
}
self.to_mut().push_str(rhs);
}
}
}
#[cfg(not(no_global_oom_handling))]
#[stable(feature = "cow_add", since = "1.14.0")]
impl<'a> AddAssign<Cow<'a, str>> for Cow<'a, str> {
fn add_assign(&mut self, rhs: Cow<'a, str>) {
if self.is_empty() {
*self = rhs
} else if !rhs.is_empty() {
if let Cow::Borrowed(lhs) = *self {
let mut s = String::with_capacity(lhs.len() + rhs.len());
s.push_str(lhs);
*self = Cow::Owned(s);
}
self.to_mut().push_str(&rhs);
}
}
}

2007
rust/alloc/boxed.rs Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,156 @@
// SPDX-License-Identifier: Apache-2.0 OR MIT
//! Collection types.
#![stable(feature = "rust1", since = "1.0.0")]
#[cfg(not(no_global_oom_handling))]
pub mod binary_heap;
#[cfg(not(no_global_oom_handling))]
mod btree;
#[cfg(not(no_global_oom_handling))]
pub mod linked_list;
#[cfg(not(no_global_oom_handling))]
pub mod vec_deque;
#[cfg(not(no_global_oom_handling))]
#[stable(feature = "rust1", since = "1.0.0")]
pub mod btree_map {
//! An ordered map based on a B-Tree.
#[stable(feature = "rust1", since = "1.0.0")]
pub use super::btree::map::*;
}
#[cfg(not(no_global_oom_handling))]
#[stable(feature = "rust1", since = "1.0.0")]
pub mod btree_set {
//! An ordered set based on a B-Tree.
#[stable(feature = "rust1", since = "1.0.0")]
pub use super::btree::set::*;
}
#[cfg(not(no_global_oom_handling))]
#[stable(feature = "rust1", since = "1.0.0")]
#[doc(no_inline)]
pub use binary_heap::BinaryHeap;
#[cfg(not(no_global_oom_handling))]
#[stable(feature = "rust1", since = "1.0.0")]
#[doc(no_inline)]
pub use btree_map::BTreeMap;
#[cfg(not(no_global_oom_handling))]
#[stable(feature = "rust1", since = "1.0.0")]
#[doc(no_inline)]
pub use btree_set::BTreeSet;
#[cfg(not(no_global_oom_handling))]
#[stable(feature = "rust1", since = "1.0.0")]
#[doc(no_inline)]
pub use linked_list::LinkedList;
#[cfg(not(no_global_oom_handling))]
#[stable(feature = "rust1", since = "1.0.0")]
#[doc(no_inline)]
pub use vec_deque::VecDeque;
use crate::alloc::{Layout, LayoutError};
use core::fmt::Display;
/// The error type for `try_reserve` methods.
#[derive(Clone, PartialEq, Eq, Debug)]
#[stable(feature = "try_reserve", since = "1.57.0")]
pub struct TryReserveError {
kind: TryReserveErrorKind,
}
impl TryReserveError {
/// Details about the allocation that caused the error
#[inline]
#[must_use]
#[unstable(
feature = "try_reserve_kind",
reason = "Uncertain how much info should be exposed",
issue = "48043"
)]
pub fn kind(&self) -> TryReserveErrorKind {
self.kind.clone()
}
}
/// Details of the allocation that caused a `TryReserveError`
#[derive(Clone, PartialEq, Eq, Debug)]
#[unstable(
feature = "try_reserve_kind",
reason = "Uncertain how much info should be exposed",
issue = "48043"
)]
pub enum TryReserveErrorKind {
/// Error due to the computed capacity exceeding the collection's maximum
/// (usually `isize::MAX` bytes).
CapacityOverflow,
/// The memory allocator returned an error
AllocError {
/// The layout of allocation request that failed
layout: Layout,
#[doc(hidden)]
#[unstable(
feature = "container_error_extra",
issue = "none",
reason = "\
Enable exposing the allocators custom error value \
if an associated type is added in the future: \
https://github.com/rust-lang/wg-allocators/issues/23"
)]
non_exhaustive: (),
},
}
#[unstable(
feature = "try_reserve_kind",
reason = "Uncertain how much info should be exposed",
issue = "48043"
)]
impl From<TryReserveErrorKind> for TryReserveError {
#[inline]
fn from(kind: TryReserveErrorKind) -> Self {
Self { kind }
}
}
#[unstable(feature = "try_reserve_kind", reason = "new API", issue = "48043")]
impl From<LayoutError> for TryReserveErrorKind {
/// Always evaluates to [`TryReserveErrorKind::CapacityOverflow`].
#[inline]
fn from(_: LayoutError) -> Self {
TryReserveErrorKind::CapacityOverflow
}
}
#[stable(feature = "try_reserve", since = "1.57.0")]
impl Display for TryReserveError {
fn fmt(
&self,
fmt: &mut core::fmt::Formatter<'_>,
) -> core::result::Result<(), core::fmt::Error> {
fmt.write_str("memory allocation failed")?;
let reason = match self.kind {
TryReserveErrorKind::CapacityOverflow => {
" because the computed capacity exceeded the collection's maximum"
}
TryReserveErrorKind::AllocError { .. } => {
" because the memory allocator returned a error"
}
};
fmt.write_str(reason)
}
}
/// An intermediate trait for specialization of `Extend`.
#[doc(hidden)]
trait SpecExtend<I: IntoIterator> {
/// Extends `self` with the contents of the given iterator.
fn spec_extend(&mut self, iter: I);
}

601
rust/alloc/fmt.rs Normal file
View File

@ -0,0 +1,601 @@
// SPDX-License-Identifier: Apache-2.0 OR MIT
//! Utilities for formatting and printing `String`s.
//!
//! This module contains the runtime support for the [`format!`] syntax extension.
//! This macro is implemented in the compiler to emit calls to this module in
//! order to format arguments at runtime into strings.
//!
//! # Usage
//!
//! The [`format!`] macro is intended to be familiar to those coming from C's
//! `printf`/`fprintf` functions or Python's `str.format` function.
//!
//! Some examples of the [`format!`] extension are:
//!
//! ```
//! format!("Hello"); // => "Hello"
//! format!("Hello, {}!", "world"); // => "Hello, world!"
//! format!("The number is {}", 1); // => "The number is 1"
//! format!("{:?}", (3, 4)); // => "(3, 4)"
//! format!("{value}", value=4); // => "4"
//! let people = "Rustaceans";
//! format!("Hello {people}!"); // => "Hello Rustaceans!"
//! format!("{} {}", 1, 2); // => "1 2"
//! format!("{:04}", 42); // => "0042" with leading zeros
//! format!("{:#?}", (100, 200)); // => "(
//! // 100,
//! // 200,
//! // )"
//! ```
//!
//! From these, you can see that the first argument is a format string. It is
//! required by the compiler for this to be a string literal; it cannot be a
//! variable passed in (in order to perform validity checking). The compiler
//! will then parse the format string and determine if the list of arguments
//! provided is suitable to pass to this format string.
//!
//! To convert a single value to a string, use the [`to_string`] method. This
//! will use the [`Display`] formatting trait.
//!
//! ## Positional parameters
//!
//! Each formatting argument is allowed to specify which value argument it's
//! referencing, and if omitted it is assumed to be "the next argument". For
//! example, the format string `{} {} {}` would take three parameters, and they
//! would be formatted in the same order as they're given. The format string
//! `{2} {1} {0}`, however, would format arguments in reverse order.
//!
//! Things can get a little tricky once you start intermingling the two types of
//! positional specifiers. The "next argument" specifier can be thought of as an
//! iterator over the argument. Each time a "next argument" specifier is seen,
//! the iterator advances. This leads to behavior like this:
//!
//! ```
//! format!("{1} {} {0} {}", 1, 2); // => "2 1 1 2"
//! ```
//!
//! The internal iterator over the argument has not been advanced by the time
//! the first `{}` is seen, so it prints the first argument. Then upon reaching
//! the second `{}`, the iterator has advanced forward to the second argument.
//! Essentially, parameters that explicitly name their argument do not affect
//! parameters that do not name an argument in terms of positional specifiers.
//!
//! A format string is required to use all of its arguments, otherwise it is a
//! compile-time error. You may refer to the same argument more than once in the
//! format string.
//!
//! ## Named parameters
//!
//! Rust itself does not have a Python-like equivalent of named parameters to a
//! function, but the [`format!`] macro is a syntax extension that allows it to
//! leverage named parameters. Named parameters are listed at the end of the
//! argument list and have the syntax:
//!
//! ```text
//! identifier '=' expression
//! ```
//!
//! For example, the following [`format!`] expressions all use named arguments:
//!
//! ```
//! format!("{argument}", argument = "test"); // => "test"
//! format!("{name} {}", 1, name = 2); // => "2 1"
//! format!("{a} {c} {b}", a="a", b='b', c=3); // => "a 3 b"
//! ```
//!
//! If a named parameter does not appear in the argument list, `format!` will
//! reference a variable with that name in the current scope.
//!
//! ```
//! let argument = 2 + 2;
//! format!("{argument}"); // => "4"
//!
//! fn make_string(a: u32, b: &str) -> String {
//! format!("{b} {a}")
//! }
//! make_string(927, "label"); // => "label 927"
//! ```
//!
//! It is not valid to put positional parameters (those without names) after
//! arguments that have names. Like with positional parameters, it is not
//! valid to provide named parameters that are unused by the format string.
//!
//! # Formatting Parameters
//!
//! Each argument being formatted can be transformed by a number of formatting
//! parameters (corresponding to `format_spec` in [the syntax](#syntax)). These
//! parameters affect the string representation of what's being formatted.
//!
//! ## Width
//!
//! ```
//! // All of these print "Hello x !"
//! println!("Hello {:5}!", "x");
//! println!("Hello {:1$}!", "x", 5);
//! println!("Hello {1:0$}!", 5, "x");
//! println!("Hello {:width$}!", "x", width = 5);
//! let width = 5;
//! println!("Hello {:width$}!", "x");
//! ```
//!
//! This is a parameter for the "minimum width" that the format should take up.
//! If the value's string does not fill up this many characters, then the
//! padding specified by fill/alignment will be used to take up the required
//! space (see below).
//!
//! The value for the width can also be provided as a [`usize`] in the list of
//! parameters by adding a postfix `$`, indicating that the second argument is
//! a [`usize`] specifying the width.
//!
//! Referring to an argument with the dollar syntax does not affect the "next
//! argument" counter, so it's usually a good idea to refer to arguments by
//! position, or use named arguments.
//!
//! ## Fill/Alignment
//!
//! ```
//! assert_eq!(format!("Hello {:<5}!", "x"), "Hello x !");
//! assert_eq!(format!("Hello {:-<5}!", "x"), "Hello x----!");
//! assert_eq!(format!("Hello {:^5}!", "x"), "Hello x !");
//! assert_eq!(format!("Hello {:>5}!", "x"), "Hello x!");
//! ```
//!
//! The optional fill character and alignment is provided normally in conjunction with the
//! [`width`](#width) parameter. It must be defined before `width`, right after the `:`.
//! This indicates that if the value being formatted is smaller than
//! `width` some extra characters will be printed around it.
//! Filling comes in the following variants for different alignments:
//!
//! * `[fill]<` - the argument is left-aligned in `width` columns
//! * `[fill]^` - the argument is center-aligned in `width` columns
//! * `[fill]>` - the argument is right-aligned in `width` columns
//!
//! The default [fill/alignment](#fillalignment) for non-numerics is a space and
//! left-aligned. The
//! default for numeric formatters is also a space character but with right-alignment. If
//! the `0` flag (see below) is specified for numerics, then the implicit fill character is
//! `0`.
//!
//! Note that alignment might not be implemented by some types. In particular, it
//! is not generally implemented for the `Debug` trait. A good way to ensure
//! padding is applied is to format your input, then pad this resulting string
//! to obtain your output:
//!
//! ```
//! println!("Hello {:^15}!", format!("{:?}", Some("hi"))); // => "Hello Some("hi") !"
//! ```
//!
//! ## Sign/`#`/`0`
//!
//! ```
//! assert_eq!(format!("Hello {:+}!", 5), "Hello +5!");
//! assert_eq!(format!("{:#x}!", 27), "0x1b!");
//! assert_eq!(format!("Hello {:05}!", 5), "Hello 00005!");
//! assert_eq!(format!("Hello {:05}!", -5), "Hello -0005!");
//! assert_eq!(format!("{:#010x}!", 27), "0x0000001b!");
//! ```
//!
//! These are all flags altering the behavior of the formatter.
//!
//! * `+` - This is intended for numeric types and indicates that the sign
//! should always be printed. Positive signs are never printed by
//! default, and the negative sign is only printed by default for signed values.
//! This flag indicates that the correct sign (`+` or `-`) should always be printed.
//! * `-` - Currently not used
//! * `#` - This flag indicates that the "alternate" form of printing should
//! be used. The alternate forms are:
//! * `#?` - pretty-print the [`Debug`] formatting (adds linebreaks and indentation)
//! * `#x` - precedes the argument with a `0x`
//! * `#X` - precedes the argument with a `0x`
//! * `#b` - precedes the argument with a `0b`
//! * `#o` - precedes the argument with a `0o`
//! * `0` - This is used to indicate for integer formats that the padding to `width` should
//! both be done with a `0` character as well as be sign-aware. A format
//! like `{:08}` would yield `00000001` for the integer `1`, while the
//! same format would yield `-0000001` for the integer `-1`. Notice that
//! the negative version has one fewer zero than the positive version.
//! Note that padding zeros are always placed after the sign (if any)
//! and before the digits. When used together with the `#` flag, a similar
//! rule applies: padding zeros are inserted after the prefix but before
//! the digits. The prefix is included in the total width.
//!
//! ## Precision
//!
//! For non-numeric types, this can be considered a "maximum width". If the resulting string is
//! longer than this width, then it is truncated down to this many characters and that truncated
//! value is emitted with proper `fill`, `alignment` and `width` if those parameters are set.
//!
//! For integral types, this is ignored.
//!
//! For floating-point types, this indicates how many digits after the decimal point should be
//! printed.
//!
//! There are three possible ways to specify the desired `precision`:
//!
//! 1. An integer `.N`:
//!
//! the integer `N` itself is the precision.
//!
//! 2. An integer or name followed by dollar sign `.N$`:
//!
//! use format *argument* `N` (which must be a `usize`) as the precision.
//!
//! 3. An asterisk `.*`:
//!
//! `.*` means that this `{...}` is associated with *two* format inputs rather than one: the
//! first input holds the `usize` precision, and the second holds the value to print. Note that
//! in this case, if one uses the format string `{<arg>:<spec>.*}`, then the `<arg>` part refers
//! to the *value* to print, and the `precision` must come in the input preceding `<arg>`.
//!
//! For example, the following calls all print the same thing `Hello x is 0.01000`:
//!
//! ```
//! // Hello {arg 0 ("x")} is {arg 1 (0.01) with precision specified inline (5)}
//! println!("Hello {0} is {1:.5}", "x", 0.01);
//!
//! // Hello {arg 1 ("x")} is {arg 2 (0.01) with precision specified in arg 0 (5)}
//! println!("Hello {1} is {2:.0$}", 5, "x", 0.01);
//!
//! // Hello {arg 0 ("x")} is {arg 2 (0.01) with precision specified in arg 1 (5)}
//! println!("Hello {0} is {2:.1$}", "x", 5, 0.01);
//!
//! // Hello {next arg ("x")} is {second of next two args (0.01) with precision
//! // specified in first of next two args (5)}
//! println!("Hello {} is {:.*}", "x", 5, 0.01);
//!
//! // Hello {next arg ("x")} is {arg 2 (0.01) with precision
//! // specified in its predecessor (5)}
//! println!("Hello {} is {2:.*}", "x", 5, 0.01);
//!
//! // Hello {next arg ("x")} is {arg "number" (0.01) with precision specified
//! // in arg "prec" (5)}
//! println!("Hello {} is {number:.prec$}", "x", prec = 5, number = 0.01);
//! ```
//!
//! While these:
//!
//! ```
//! println!("{}, `{name:.*}` has 3 fractional digits", "Hello", 3, name=1234.56);
//! println!("{}, `{name:.*}` has 3 characters", "Hello", 3, name="1234.56");
//! println!("{}, `{name:>8.*}` has 3 right-aligned characters", "Hello", 3, name="1234.56");
//! ```
//!
//! print three significantly different things:
//!
//! ```text
//! Hello, `1234.560` has 3 fractional digits
//! Hello, `123` has 3 characters
//! Hello, ` 123` has 3 right-aligned characters
//! ```
//!
//! ## Localization
//!
//! In some programming languages, the behavior of string formatting functions
//! depends on the operating system's locale setting. The format functions
//! provided by Rust's standard library do not have any concept of locale and
//! will produce the same results on all systems regardless of user
//! configuration.
//!
//! For example, the following code will always print `1.5` even if the system
//! locale uses a decimal separator other than a dot.
//!
//! ```
//! println!("The value is {}", 1.5);
//! ```
//!
//! # Escaping
//!
//! The literal characters `{` and `}` may be included in a string by preceding
//! them with the same character. For example, the `{` character is escaped with
//! `{{` and the `}` character is escaped with `}}`.
//!
//! ```
//! assert_eq!(format!("Hello {{}}"), "Hello {}");
//! assert_eq!(format!("{{ Hello"), "{ Hello");
//! ```
//!
//! # Syntax
//!
//! To summarize, here you can find the full grammar of format strings.
//! The syntax for the formatting language used is drawn from other languages,
//! so it should not be too alien. Arguments are formatted with Python-like
//! syntax, meaning that arguments are surrounded by `{}` instead of the C-like
//! `%`. The actual grammar for the formatting syntax is:
//!
//! ```text
//! format_string := text [ maybe_format text ] *
//! maybe_format := '{' '{' | '}' '}' | format
//! format := '{' [ argument ] [ ':' format_spec ] '}'
//! argument := integer | identifier
//!
//! format_spec := [[fill]align][sign]['#']['0'][width]['.' precision]type
//! fill := character
//! align := '<' | '^' | '>'
//! sign := '+' | '-'
//! width := count
//! precision := count | '*'
//! type := '' | '?' | 'x?' | 'X?' | identifier
//! count := parameter | integer
//! parameter := argument '$'
//! ```
//! In the above grammar, `text` must not contain any `'{'` or `'}'` characters.
//!
//! # Formatting traits
//!
//! When requesting that an argument be formatted with a particular type, you
//! are actually requesting that an argument ascribes to a particular trait.
//! This allows multiple actual types to be formatted via `{:x}` (like [`i8`] as
//! well as [`isize`]). The current mapping of types to traits is:
//!
//! * *nothing* ⇒ [`Display`]
//! * `?` ⇒ [`Debug`]
//! * `x?` ⇒ [`Debug`] with lower-case hexadecimal integers
//! * `X?` ⇒ [`Debug`] with upper-case hexadecimal integers
//! * `o` ⇒ [`Octal`]
//! * `x` ⇒ [`LowerHex`]
//! * `X` ⇒ [`UpperHex`]
//! * `p` ⇒ [`Pointer`]
//! * `b` ⇒ [`Binary`]
//! * `e` ⇒ [`LowerExp`]
//! * `E` ⇒ [`UpperExp`]
//!
//! What this means is that any type of argument which implements the
//! [`fmt::Binary`][`Binary`] trait can then be formatted with `{:b}`. Implementations
//! are provided for these traits for a number of primitive types by the
//! standard library as well. If no format is specified (as in `{}` or `{:6}`),
//! then the format trait used is the [`Display`] trait.
//!
//! When implementing a format trait for your own type, you will have to
//! implement a method of the signature:
//!
//! ```
//! # #![allow(dead_code)]
//! # use std::fmt;
//! # struct Foo; // our custom type
//! # impl fmt::Display for Foo {
//! fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
//! # write!(f, "testing, testing")
//! # } }
//! ```
//!
//! Your type will be passed as `self` by-reference, and then the function
//! should emit output into the `f.buf` stream. It is up to each format trait
//! implementation to correctly adhere to the requested formatting parameters.
//! The values of these parameters will be listed in the fields of the
//! [`Formatter`] struct. In order to help with this, the [`Formatter`] struct also
//! provides some helper methods.
//!
//! Additionally, the return value of this function is [`fmt::Result`] which is a
//! type alias of <code>[Result]<(), [std::fmt::Error]></code>. Formatting implementations
//! should ensure that they propagate errors from the [`Formatter`] (e.g., when
//! calling [`write!`]). However, they should never return errors spuriously. That
//! is, a formatting implementation must and may only return an error if the
//! passed-in [`Formatter`] returns an error. This is because, contrary to what
//! the function signature might suggest, string formatting is an infallible
//! operation. This function only returns a result because writing to the
//! underlying stream might fail and it must provide a way to propagate the fact
//! that an error has occurred back up the stack.
//!
//! An example of implementing the formatting traits would look
//! like:
//!
//! ```
//! use std::fmt;
//!
//! #[derive(Debug)]
//! struct Vector2D {
//! x: isize,
//! y: isize,
//! }
//!
//! impl fmt::Display for Vector2D {
//! fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
//! // The `f` value implements the `Write` trait, which is what the
//! // write! macro is expecting. Note that this formatting ignores the
//! // various flags provided to format strings.
//! write!(f, "({}, {})", self.x, self.y)
//! }
//! }
//!
//! // Different traits allow different forms of output of a type. The meaning
//! // of this format is to print the magnitude of a vector.
//! impl fmt::Binary for Vector2D {
//! fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
//! let magnitude = (self.x * self.x + self.y * self.y) as f64;
//! let magnitude = magnitude.sqrt();
//!
//! // Respect the formatting flags by using the helper method
//! // `pad_integral` on the Formatter object. See the method
//! // documentation for details, and the function `pad` can be used
//! // to pad strings.
//! let decimals = f.precision().unwrap_or(3);
//! let string = format!("{:.*}", decimals, magnitude);
//! f.pad_integral(true, "", &string)
//! }
//! }
//!
//! fn main() {
//! let myvector = Vector2D { x: 3, y: 4 };
//!
//! println!("{}", myvector); // => "(3, 4)"
//! println!("{:?}", myvector); // => "Vector2D {x: 3, y:4}"
//! println!("{:10.3b}", myvector); // => " 5.000"
//! }
//! ```
//!
//! ### `fmt::Display` vs `fmt::Debug`
//!
//! These two formatting traits have distinct purposes:
//!
//! - [`fmt::Display`][`Display`] implementations assert that the type can be faithfully
//! represented as a UTF-8 string at all times. It is **not** expected that
//! all types implement the [`Display`] trait.
//! - [`fmt::Debug`][`Debug`] implementations should be implemented for **all** public types.
//! Output will typically represent the internal state as faithfully as possible.
//! The purpose of the [`Debug`] trait is to facilitate debugging Rust code. In
//! most cases, using `#[derive(Debug)]` is sufficient and recommended.
//!
//! Some examples of the output from both traits:
//!
//! ```
//! assert_eq!(format!("{} {:?}", 3, 4), "3 4");
//! assert_eq!(format!("{} {:?}", 'a', 'b'), "a 'b'");
//! assert_eq!(format!("{} {:?}", "foo\n", "bar\n"), "foo\n \"bar\\n\"");
//! ```
//!
//! # Related macros
//!
//! There are a number of related macros in the [`format!`] family. The ones that
//! are currently implemented are:
//!
//! ```ignore (only-for-syntax-highlight)
//! format! // described above
//! write! // first argument is a &mut io::Write, the destination
//! writeln! // same as write but appends a newline
//! print! // the format string is printed to the standard output
//! println! // same as print but appends a newline
//! eprint! // the format string is printed to the standard error
//! eprintln! // same as eprint but appends a newline
//! format_args! // described below.
//! ```
//!
//! ### `write!`
//!
//! This and [`writeln!`] are two macros which are used to emit the format string
//! to a specified stream. This is used to prevent intermediate allocations of
//! format strings and instead directly write the output. Under the hood, this
//! function is actually invoking the [`write_fmt`] function defined on the
//! [`std::io::Write`] trait. Example usage is:
//!
//! ```
//! # #![allow(unused_must_use)]
//! use std::io::Write;
//! let mut w = Vec::new();
//! write!(&mut w, "Hello {}!", "world");
//! ```
//!
//! ### `print!`
//!
//! This and [`println!`] emit their output to stdout. Similarly to the [`write!`]
//! macro, the goal of these macros is to avoid intermediate allocations when
//! printing output. Example usage is:
//!
//! ```
//! print!("Hello {}!", "world");
//! println!("I have a newline {}", "character at the end");
//! ```
//! ### `eprint!`
//!
//! The [`eprint!`] and [`eprintln!`] macros are identical to
//! [`print!`] and [`println!`], respectively, except they emit their
//! output to stderr.
//!
//! ### `format_args!`
//!
//! This is a curious macro used to safely pass around
//! an opaque object describing the format string. This object
//! does not require any heap allocations to create, and it only
//! references information on the stack. Under the hood, all of
//! the related macros are implemented in terms of this. First
//! off, some example usage is:
//!
//! ```
//! # #![allow(unused_must_use)]
//! use std::fmt;
//! use std::io::{self, Write};
//!
//! let mut some_writer = io::stdout();
//! write!(&mut some_writer, "{}", format_args!("print with a {}", "macro"));
//!
//! fn my_fmt_fn(args: fmt::Arguments) {
//! write!(&mut io::stdout(), "{}", args);
//! }
//! my_fmt_fn(format_args!(", or a {} too", "function"));
//! ```
//!
//! The result of the [`format_args!`] macro is a value of type [`fmt::Arguments`].
//! This structure can then be passed to the [`write`] and [`format`] functions
//! inside this module in order to process the format string.
//! The goal of this macro is to even further prevent intermediate allocations
//! when dealing with formatting strings.
//!
//! For example, a logging library could use the standard formatting syntax, but
//! it would internally pass around this structure until it has been determined
//! where output should go to.
//!
//! [`fmt::Result`]: Result "fmt::Result"
//! [Result]: core::result::Result "std::result::Result"
//! [std::fmt::Error]: Error "fmt::Error"
//! [`write`]: write() "fmt::write"
//! [`to_string`]: crate::string::ToString::to_string "ToString::to_string"
//! [`write_fmt`]: ../../std/io/trait.Write.html#method.write_fmt
//! [`std::io::Write`]: ../../std/io/trait.Write.html
//! [`print!`]: ../../std/macro.print.html "print!"
//! [`println!`]: ../../std/macro.println.html "println!"
//! [`eprint!`]: ../../std/macro.eprint.html "eprint!"
//! [`eprintln!`]: ../../std/macro.eprintln.html "eprintln!"
//! [`fmt::Arguments`]: Arguments "fmt::Arguments"
//! [`format`]: format() "fmt::format"
#![stable(feature = "rust1", since = "1.0.0")]
#[unstable(feature = "fmt_internals", issue = "none")]
pub use core::fmt::rt;
#[stable(feature = "fmt_flags_align", since = "1.28.0")]
pub use core::fmt::Alignment;
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::fmt::Error;
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::fmt::{write, ArgumentV1, Arguments};
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::fmt::{Binary, Octal};
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::fmt::{Debug, Display};
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::fmt::{DebugList, DebugMap, DebugSet, DebugStruct, DebugTuple};
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::fmt::{Formatter, Result, Write};
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::fmt::{LowerExp, UpperExp};
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::fmt::{LowerHex, Pointer, UpperHex};
#[cfg(not(no_global_oom_handling))]
use crate::string;
/// The `format` function takes an [`Arguments`] struct and returns the resulting
/// formatted string.
///
/// The [`Arguments`] instance can be created with the [`format_args!`] macro.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// use std::fmt;
///
/// let s = fmt::format(format_args!("Hello, {}!", "world"));
/// assert_eq!(s, "Hello, world!");
/// ```
///
/// Please note that using [`format!`] might be preferable.
/// Example:
///
/// ```
/// let s = format!("Hello, {}!", "world");
/// assert_eq!(s, "Hello, world!");
/// ```
///
/// [`format_args!`]: core::format_args
/// [`format!`]: crate::format
#[cfg(not(no_global_oom_handling))]
#[must_use]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn format(args: Arguments<'_>) -> string::String {
let capacity = args.estimated_capacity();
let mut output = string::String::with_capacity(capacity);
output.write_fmt(args).expect("a formatting trait implementation returned an error");
output
}

226
rust/alloc/lib.rs Normal file
View File

@ -0,0 +1,226 @@
// SPDX-License-Identifier: Apache-2.0 OR MIT
//! # The Rust core allocation and collections library
//!
//! This library provides smart pointers and collections for managing
//! heap-allocated values.
//!
//! This library, like libcore, normally doesnt need to be used directly
//! since its contents are re-exported in the [`std` crate](../std/index.html).
//! Crates that use the `#![no_std]` attribute however will typically
//! not depend on `std`, so theyd use this crate instead.
//!
//! ## Boxed values
//!
//! The [`Box`] type is a smart pointer type. There can only be one owner of a
//! [`Box`], and the owner can decide to mutate the contents, which live on the
//! heap.
//!
//! This type can be sent among threads efficiently as the size of a `Box` value
//! is the same as that of a pointer. Tree-like data structures are often built
//! with boxes because each node often has only one owner, the parent.
//!
//! ## Reference counted pointers
//!
//! The [`Rc`] type is a non-threadsafe reference-counted pointer type intended
//! for sharing memory within a thread. An [`Rc`] pointer wraps a type, `T`, and
//! only allows access to `&T`, a shared reference.
//!
//! This type is useful when inherited mutability (such as using [`Box`]) is too
//! constraining for an application, and is often paired with the [`Cell`] or
//! [`RefCell`] types in order to allow mutation.
//!
//! ## Atomically reference counted pointers
//!
//! The [`Arc`] type is the threadsafe equivalent of the [`Rc`] type. It
//! provides all the same functionality of [`Rc`], except it requires that the
//! contained type `T` is shareable. Additionally, [`Arc<T>`][`Arc`] is itself
//! sendable while [`Rc<T>`][`Rc`] is not.
//!
//! This type allows for shared access to the contained data, and is often
//! paired with synchronization primitives such as mutexes to allow mutation of
//! shared resources.
//!
//! ## Collections
//!
//! Implementations of the most common general purpose data structures are
//! defined in this library. They are re-exported through the
//! [standard collections library](../std/collections/index.html).
//!
//! ## Heap interfaces
//!
//! The [`alloc`](alloc/index.html) module defines the low-level interface to the
//! default global allocator. It is not compatible with the libc allocator API.
//!
//! [`Arc`]: sync
//! [`Box`]: boxed
//! [`Cell`]: core::cell
//! [`Rc`]: rc
//! [`RefCell`]: core::cell
// To run liballoc tests without x.py without ending up with two copies of liballoc, Miri needs to be
// able to "empty" this crate. See <https://github.com/rust-lang/miri-test-libstd/issues/4>.
// rustc itself never sets the feature, so this line has no affect there.
#![cfg(any(not(feature = "miri-test-libstd"), test, doctest))]
#![allow(unused_attributes)]
#![stable(feature = "alloc", since = "1.36.0")]
#![doc(
html_playground_url = "https://play.rust-lang.org/",
issue_tracker_base_url = "https://github.com/rust-lang/rust/issues/",
test(no_crate_inject, attr(allow(unused_variables), deny(warnings)))
)]
#![doc(cfg_hide(
not(test),
not(any(test, bootstrap)),
any(not(feature = "miri-test-libstd"), test, doctest),
no_global_oom_handling,
not(no_global_oom_handling),
target_has_atomic = "ptr"
))]
#![no_std]
#![needs_allocator]
//
// Lints:
#![deny(unsafe_op_in_unsafe_fn)]
#![warn(deprecated_in_future)]
#![warn(missing_debug_implementations)]
#![warn(missing_docs)]
#![allow(explicit_outlives_requirements)]
//
// Library features:
#![feature(alloc_layout_extra)]
#![feature(allocator_api)]
#![feature(array_chunks)]
#![feature(array_methods)]
#![feature(array_windows)]
#![feature(async_iterator)]
#![feature(coerce_unsized)]
#![cfg_attr(not(no_global_oom_handling), feature(const_alloc_error))]
#![feature(const_box)]
#![cfg_attr(not(no_global_oom_handling), feature(const_btree_new))]
#![feature(const_cow_is_borrowed)]
#![feature(const_convert)]
#![feature(const_size_of_val)]
#![feature(const_align_of_val)]
#![feature(const_ptr_read)]
#![feature(const_maybe_uninit_write)]
#![feature(const_maybe_uninit_as_mut_ptr)]
#![feature(const_refs_to_cell)]
#![feature(core_intrinsics)]
#![feature(const_eval_select)]
#![feature(const_pin)]
#![feature(dispatch_from_dyn)]
#![feature(exact_size_is_empty)]
#![feature(extend_one)]
#![feature(fmt_internals)]
#![feature(fn_traits)]
#![feature(inplace_iteration)]
#![feature(iter_advance_by)]
#![feature(layout_for_ptr)]
#![feature(maybe_uninit_slice)]
#![cfg_attr(test, feature(new_uninit))]
#![feature(nonnull_slice_from_raw_parts)]
#![feature(pattern)]
#![feature(ptr_internals)]
#![feature(receiver_trait)]
#![feature(set_ptr_value)]
#![feature(slice_group_by)]
#![feature(slice_ptr_get)]
#![feature(slice_ptr_len)]
#![feature(slice_range)]
#![feature(str_internals)]
#![feature(trusted_len)]
#![feature(trusted_random_access)]
#![feature(try_trait_v2)]
#![feature(unicode_internals)]
#![feature(unsize)]
//
// Language features:
#![feature(allocator_internals)]
#![feature(allow_internal_unstable)]
#![feature(associated_type_bounds)]
#![feature(box_syntax)]
#![feature(cfg_sanitize)]
#![cfg_attr(bootstrap, feature(cfg_target_has_atomic))]
#![feature(const_deref)]
#![feature(const_fn_trait_bound)]
#![feature(const_mut_refs)]
#![feature(const_ptr_write)]
#![feature(const_precise_live_drops)]
#![feature(const_trait_impl)]
#![feature(const_try)]
#![feature(dropck_eyepatch)]
#![feature(exclusive_range_pattern)]
#![feature(fundamental)]
#![cfg_attr(not(test), feature(generator_trait))]
#![feature(lang_items)]
#![feature(min_specialization)]
#![feature(negative_impls)]
#![feature(never_type)]
#![feature(nll)] // Not necessary, but here to test the `nll` feature.
#![feature(rustc_allow_const_fn_unstable)]
#![feature(rustc_attrs)]
#![feature(staged_api)]
#![cfg_attr(test, feature(test))]
#![feature(unboxed_closures)]
#![feature(unsized_fn_params)]
#![feature(c_unwind)]
//
// Rustdoc features:
#![feature(doc_cfg)]
#![feature(doc_cfg_hide)]
// Technically, this is a bug in rustdoc: rustdoc sees the documentation on `#[lang = slice_alloc]`
// blocks is for `&[T]`, which also has documentation using this feature in `core`, and gets mad
// that the feature-gate isn't enabled. Ideally, it wouldn't check for the feature gate for docs
// from other crates, but since this can only appear for lang items, it doesn't seem worth fixing.
#![feature(intra_doc_pointers)]
// Allow testing this library
#[cfg(test)]
#[macro_use]
extern crate std;
#[cfg(test)]
extern crate test;
// Module with internal macros used by other modules (needs to be included before other modules).
#[macro_use]
mod macros;
mod raw_vec;
// Heaps provided for low-level allocation strategies
pub mod alloc;
// Primitive types using the heaps above
// Need to conditionally define the mod from `boxed.rs` to avoid
// duplicating the lang-items when building in test cfg; but also need
// to allow code to have `use boxed::Box;` declarations.
#[cfg(not(test))]
pub mod boxed;
#[cfg(test)]
mod boxed {
pub use std::boxed::Box;
}
pub mod borrow;
pub mod collections;
pub mod fmt;
#[cfg(not(no_rc))]
pub mod rc;
pub mod slice;
pub mod str;
pub mod string;
#[cfg(all(not(no_sync), target_has_atomic = "ptr"))]
pub mod sync;
#[cfg(all(not(no_global_oom_handling), target_has_atomic = "ptr"))]
pub mod task;
#[cfg(test)]
mod tests;
pub mod vec;
#[doc(hidden)]
#[unstable(feature = "liballoc_internals", issue = "none", reason = "implementation detail")]
pub mod __export {
pub use core::format_args;
}

127
rust/alloc/macros.rs Normal file
View File

@ -0,0 +1,127 @@
// SPDX-License-Identifier: Apache-2.0 OR MIT
/// Creates a [`Vec`] containing the arguments.
///
/// `vec!` allows `Vec`s to be defined with the same syntax as array expressions.
/// There are two forms of this macro:
///
/// - Create a [`Vec`] containing a given list of elements:
///
/// ```
/// let v = vec![1, 2, 3];
/// assert_eq!(v[0], 1);
/// assert_eq!(v[1], 2);
/// assert_eq!(v[2], 3);
/// ```
///
/// - Create a [`Vec`] from a given element and size:
///
/// ```
/// let v = vec![1; 3];
/// assert_eq!(v, [1, 1, 1]);
/// ```
///
/// Note that unlike array expressions this syntax supports all elements
/// which implement [`Clone`] and the number of elements doesn't have to be
/// a constant.
///
/// This will use `clone` to duplicate an expression, so one should be careful
/// using this with types having a nonstandard `Clone` implementation. For
/// example, `vec![Rc::new(1); 5]` will create a vector of five references
/// to the same boxed integer value, not five references pointing to independently
/// boxed integers.
///
/// Also, note that `vec![expr; 0]` is allowed, and produces an empty vector.
/// This will still evaluate `expr`, however, and immediately drop the resulting value, so
/// be mindful of side effects.
///
/// [`Vec`]: crate::vec::Vec
#[cfg(all(not(no_global_oom_handling), not(test)))]
#[macro_export]
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_diagnostic_item = "vec_macro"]
#[allow_internal_unstable(box_syntax, liballoc_internals)]
macro_rules! vec {
() => (
$crate::__rust_force_expr!($crate::vec::Vec::new())
);
($elem:expr; $n:expr) => (
$crate::__rust_force_expr!($crate::vec::from_elem($elem, $n))
);
($($x:expr),+ $(,)?) => (
$crate::__rust_force_expr!(<[_]>::into_vec(box [$($x),+]))
);
}
// HACK(japaric): with cfg(test) the inherent `[T]::into_vec` method, which is
// required for this macro definition, is not available. Instead use the
// `slice::into_vec` function which is only available with cfg(test)
// NB see the slice::hack module in slice.rs for more information
#[cfg(test)]
macro_rules! vec {
() => (
$crate::vec::Vec::new()
);
($elem:expr; $n:expr) => (
$crate::vec::from_elem($elem, $n)
);
($($x:expr),*) => (
$crate::slice::into_vec(box [$($x),*])
);
($($x:expr,)*) => (vec![$($x),*])
}
/// Creates a `String` using interpolation of runtime expressions.
///
/// The first argument `format!` receives is a format string. This must be a string
/// literal. The power of the formatting string is in the `{}`s contained.
///
/// Additional parameters passed to `format!` replace the `{}`s within the
/// formatting string in the order given unless named or positional parameters
/// are used; see [`std::fmt`] for more information.
///
/// A common use for `format!` is concatenation and interpolation of strings.
/// The same convention is used with [`print!`] and [`write!`] macros,
/// depending on the intended destination of the string.
///
/// To convert a single value to a string, use the [`to_string`] method. This
/// will use the [`Display`] formatting trait.
///
/// [`std::fmt`]: ../std/fmt/index.html
/// [`print!`]: ../std/macro.print.html
/// [`write!`]: core::write
/// [`to_string`]: crate::string::ToString
/// [`Display`]: core::fmt::Display
///
/// # Panics
///
/// `format!` panics if a formatting trait implementation returns an error.
/// This indicates an incorrect implementation
/// since `fmt::Write for String` never returns an error itself.
///
/// # Examples
///
/// ```
/// format!("test");
/// format!("hello {}", "world!");
/// format!("x = {}, y = {y}", 10, y = 30);
/// ```
#[macro_export]
#[stable(feature = "rust1", since = "1.0.0")]
#[cfg_attr(not(test), rustc_diagnostic_item = "format_macro")]
macro_rules! format {
($($arg:tt)*) => {{
let res = $crate::fmt::format($crate::__export::format_args!($($arg)*));
res
}}
}
/// Force AST node to an expression to improve diagnostics in pattern position.
#[doc(hidden)]
#[macro_export]
#[unstable(feature = "liballoc_internals", issue = "none", reason = "implementation detail")]
macro_rules! __rust_force_expr {
($e:expr) => {
$e
};
}

567
rust/alloc/raw_vec.rs Normal file
View File

@ -0,0 +1,567 @@
// SPDX-License-Identifier: Apache-2.0 OR MIT
#![unstable(feature = "raw_vec_internals", reason = "unstable const warnings", issue = "none")]
use core::alloc::LayoutError;
use core::cmp;
use core::intrinsics;
use core::mem::{self, ManuallyDrop, MaybeUninit};
use core::ops::Drop;
use core::ptr::{self, NonNull, Unique};
use core::slice;
#[cfg(not(no_global_oom_handling))]
use crate::alloc::handle_alloc_error;
use crate::alloc::{Allocator, Global, Layout};
use crate::boxed::Box;
use crate::collections::TryReserveError;
use crate::collections::TryReserveErrorKind::*;
#[cfg(test)]
mod tests;
enum AllocInit {
/// The contents of the new memory are uninitialized.
Uninitialized,
#[allow(dead_code)]
/// The new memory is guaranteed to be zeroed.
Zeroed,
}
/// A low-level utility for more ergonomically allocating, reallocating, and deallocating
/// a buffer of memory on the heap without having to worry about all the corner cases
/// involved. This type is excellent for building your own data structures like Vec and VecDeque.
/// In particular:
///
/// * Produces `Unique::dangling()` on zero-sized types.
/// * Produces `Unique::dangling()` on zero-length allocations.
/// * Avoids freeing `Unique::dangling()`.
/// * Catches all overflows in capacity computations (promotes them to "capacity overflow" panics).
/// * Guards against 32-bit systems allocating more than isize::MAX bytes.
/// * Guards against overflowing your length.
/// * Calls `handle_alloc_error` for fallible allocations.
/// * Contains a `ptr::Unique` and thus endows the user with all related benefits.
/// * Uses the excess returned from the allocator to use the largest available capacity.
///
/// This type does not in anyway inspect the memory that it manages. When dropped it *will*
/// free its memory, but it *won't* try to drop its contents. It is up to the user of `RawVec`
/// to handle the actual things *stored* inside of a `RawVec`.
///
/// Note that the excess of a zero-sized types is always infinite, so `capacity()` always returns
/// `usize::MAX`. This means that you need to be careful when round-tripping this type with a
/// `Box<[T]>`, since `capacity()` won't yield the length.
#[allow(missing_debug_implementations)]
pub(crate) struct RawVec<T, A: Allocator = Global> {
ptr: Unique<T>,
cap: usize,
alloc: A,
}
impl<T> RawVec<T, Global> {
/// HACK(Centril): This exists because stable `const fn` can only call stable `const fn`, so
/// they cannot call `Self::new()`.
///
/// If you change `RawVec<T>::new` or dependencies, please take care to not introduce anything
/// that would truly const-call something unstable.
pub const NEW: Self = Self::new();
/// Creates the biggest possible `RawVec` (on the system heap)
/// without allocating. If `T` has positive size, then this makes a
/// `RawVec` with capacity `0`. If `T` is zero-sized, then it makes a
/// `RawVec` with capacity `usize::MAX`. Useful for implementing
/// delayed allocation.
#[must_use]
pub const fn new() -> Self {
Self::new_in(Global)
}
/// Creates a `RawVec` (on the system heap) with exactly the
/// capacity and alignment requirements for a `[T; capacity]`. This is
/// equivalent to calling `RawVec::new` when `capacity` is `0` or `T` is
/// zero-sized. Note that if `T` is zero-sized this means you will
/// *not* get a `RawVec` with the requested capacity.
///
/// # Panics
///
/// Panics if the requested capacity exceeds `isize::MAX` bytes.
///
/// # Aborts
///
/// Aborts on OOM.
#[cfg(not(any(no_global_oom_handling, test)))]
#[must_use]
#[inline]
pub fn with_capacity(capacity: usize) -> Self {
Self::with_capacity_in(capacity, Global)
}
/// Like `with_capacity`, but guarantees the buffer is zeroed.
#[cfg(not(any(no_global_oom_handling, test)))]
#[must_use]
#[inline]
pub fn with_capacity_zeroed(capacity: usize) -> Self {
Self::with_capacity_zeroed_in(capacity, Global)
}
}
impl<T, A: Allocator> RawVec<T, A> {
// Tiny Vecs are dumb. Skip to:
// - 8 if the element size is 1, because any heap allocators is likely
// to round up a request of less than 8 bytes to at least 8 bytes.
// - 4 if elements are moderate-sized (<= 1 KiB).
// - 1 otherwise, to avoid wasting too much space for very short Vecs.
pub(crate) const MIN_NON_ZERO_CAP: usize = if mem::size_of::<T>() == 1 {
8
} else if mem::size_of::<T>() <= 1024 {
4
} else {
1
};
/// Like `new`, but parameterized over the choice of allocator for
/// the returned `RawVec`.
#[rustc_allow_const_fn_unstable(const_fn)]
pub const fn new_in(alloc: A) -> Self {
// `cap: 0` means "unallocated". zero-sized types are ignored.
Self { ptr: Unique::dangling(), cap: 0, alloc }
}
/// Like `with_capacity`, but parameterized over the choice of
/// allocator for the returned `RawVec`.
#[cfg(not(no_global_oom_handling))]
#[inline]
pub fn with_capacity_in(capacity: usize, alloc: A) -> Self {
Self::allocate_in(capacity, AllocInit::Uninitialized, alloc)
}
/// Like `try_with_capacity`, but parameterized over the choice of
/// allocator for the returned `RawVec`.
#[inline]
pub fn try_with_capacity_in(capacity: usize, alloc: A) -> Result<Self, TryReserveError> {
Self::try_allocate_in(capacity, AllocInit::Uninitialized, alloc)
}
/// Like `with_capacity_zeroed`, but parameterized over the choice
/// of allocator for the returned `RawVec`.
#[cfg(not(no_global_oom_handling))]
#[inline]
pub fn with_capacity_zeroed_in(capacity: usize, alloc: A) -> Self {
Self::allocate_in(capacity, AllocInit::Zeroed, alloc)
}
/// Converts the entire buffer into `Box<[MaybeUninit<T>]>` with the specified `len`.
///
/// Note that this will correctly reconstitute any `cap` changes
/// that may have been performed. (See description of type for details.)
///
/// # Safety
///
/// * `len` must be greater than or equal to the most recently requested capacity, and
/// * `len` must be less than or equal to `self.capacity()`.
///
/// Note, that the requested capacity and `self.capacity()` could differ, as
/// an allocator could overallocate and return a greater memory block than requested.
pub unsafe fn into_box(self, len: usize) -> Box<[MaybeUninit<T>], A> {
// Sanity-check one half of the safety requirement (we cannot check the other half).
debug_assert!(
len <= self.capacity(),
"`len` must be smaller than or equal to `self.capacity()`"
);
let me = ManuallyDrop::new(self);
unsafe {
let slice = slice::from_raw_parts_mut(me.ptr() as *mut MaybeUninit<T>, len);
Box::from_raw_in(slice, ptr::read(&me.alloc))
}
}
#[cfg(not(no_global_oom_handling))]
fn allocate_in(capacity: usize, init: AllocInit, alloc: A) -> Self {
if mem::size_of::<T>() == 0 {
Self::new_in(alloc)
} else {
// We avoid `unwrap_or_else` here because it bloats the amount of
// LLVM IR generated.
let layout = match Layout::array::<T>(capacity) {
Ok(layout) => layout,
Err(_) => capacity_overflow(),
};
match alloc_guard(layout.size()) {
Ok(_) => {}
Err(_) => capacity_overflow(),
}
let result = match init {
AllocInit::Uninitialized => alloc.allocate(layout),
AllocInit::Zeroed => alloc.allocate_zeroed(layout),
};
let ptr = match result {
Ok(ptr) => ptr,
Err(_) => handle_alloc_error(layout),
};
// Allocators currently return a `NonNull<[u8]>` whose length
// matches the size requested. If that ever changes, the capacity
// here should change to `ptr.len() / mem::size_of::<T>()`.
Self {
ptr: unsafe { Unique::new_unchecked(ptr.cast().as_ptr()) },
cap: capacity,
alloc,
}
}
}
fn try_allocate_in(capacity: usize, init: AllocInit, alloc: A) -> Result<Self, TryReserveError> {
if mem::size_of::<T>() == 0 {
return Ok(Self::new_in(alloc));
}
let layout = Layout::array::<T>(capacity).map_err(|_| CapacityOverflow)?;
alloc_guard(layout.size())?;
let result = match init {
AllocInit::Uninitialized => alloc.allocate(layout),
AllocInit::Zeroed => alloc.allocate_zeroed(layout),
};
let ptr = result.map_err(|_| AllocError { layout, non_exhaustive: () })?;
// Allocators currently return a `NonNull<[u8]>` whose length
// matches the size requested. If that ever changes, the capacity
// here should change to `ptr.len() / mem::size_of::<T>()`.
Ok(Self {
ptr: unsafe { Unique::new_unchecked(ptr.cast().as_ptr()) },
cap: capacity,
alloc,
})
}
/// Reconstitutes a `RawVec` from a pointer, capacity, and allocator.
///
/// # Safety
///
/// The `ptr` must be allocated (via the given allocator `alloc`), and with the given
/// `capacity`.
/// The `capacity` cannot exceed `isize::MAX` for sized types. (only a concern on 32-bit
/// systems). ZST vectors may have a capacity up to `usize::MAX`.
/// If the `ptr` and `capacity` come from a `RawVec` created via `alloc`, then this is
/// guaranteed.
#[inline]
pub unsafe fn from_raw_parts_in(ptr: *mut T, capacity: usize, alloc: A) -> Self {
Self { ptr: unsafe { Unique::new_unchecked(ptr) }, cap: capacity, alloc }
}
/// Gets a raw pointer to the start of the allocation. Note that this is
/// `Unique::dangling()` if `capacity == 0` or `T` is zero-sized. In the former case, you must
/// be careful.
#[inline]
pub fn ptr(&self) -> *mut T {
self.ptr.as_ptr()
}
/// Gets the capacity of the allocation.
///
/// This will always be `usize::MAX` if `T` is zero-sized.
#[inline(always)]
pub fn capacity(&self) -> usize {
if mem::size_of::<T>() == 0 { usize::MAX } else { self.cap }
}
/// Returns a shared reference to the allocator backing this `RawVec`.
pub fn allocator(&self) -> &A {
&self.alloc
}
fn current_memory(&self) -> Option<(NonNull<u8>, Layout)> {
if mem::size_of::<T>() == 0 || self.cap == 0 {
None
} else {
// We have an allocated chunk of memory, so we can bypass runtime
// checks to get our current layout.
unsafe {
let align = mem::align_of::<T>();
let size = mem::size_of::<T>() * self.cap;
let layout = Layout::from_size_align_unchecked(size, align);
Some((self.ptr.cast().into(), layout))
}
}
}
/// Ensures that the buffer contains at least enough space to hold `len +
/// additional` elements. If it doesn't already have enough capacity, will
/// reallocate enough space plus comfortable slack space to get amortized
/// *O*(1) behavior. Will limit this behavior if it would needlessly cause
/// itself to panic.
///
/// If `len` exceeds `self.capacity()`, this may fail to actually allocate
/// the requested space. This is not really unsafe, but the unsafe
/// code *you* write that relies on the behavior of this function may break.
///
/// This is ideal for implementing a bulk-push operation like `extend`.
///
/// # Panics
///
/// Panics if the new capacity exceeds `isize::MAX` bytes.
///
/// # Aborts
///
/// Aborts on OOM.
#[cfg(not(no_global_oom_handling))]
#[inline]
pub fn reserve(&mut self, len: usize, additional: usize) {
// Callers expect this function to be very cheap when there is already sufficient capacity.
// Therefore, we move all the resizing and error-handling logic from grow_amortized and
// handle_reserve behind a call, while making sure that this function is likely to be
// inlined as just a comparison and a call if the comparison fails.
#[cold]
fn do_reserve_and_handle<T, A: Allocator>(
slf: &mut RawVec<T, A>,
len: usize,
additional: usize,
) {
handle_reserve(slf.grow_amortized(len, additional));
}
if self.needs_to_grow(len, additional) {
do_reserve_and_handle(self, len, additional);
}
}
/// A specialized version of `reserve()` used only by the hot and
/// oft-instantiated `Vec::push()`, which does its own capacity check.
#[cfg(not(no_global_oom_handling))]
#[inline(never)]
pub fn reserve_for_push(&mut self, len: usize) {
handle_reserve(self.grow_amortized(len, 1));
}
/// The same as `reserve`, but returns on errors instead of panicking or aborting.
pub fn try_reserve(&mut self, len: usize, additional: usize) -> Result<(), TryReserveError> {
if self.needs_to_grow(len, additional) {
self.grow_amortized(len, additional)
} else {
Ok(())
}
}
/// The same as `reserve_for_push`, but returns on errors instead of panicking or aborting.
#[inline(never)]
pub fn try_reserve_for_push(&mut self, len: usize) -> Result<(), TryReserveError> {
self.grow_amortized(len, 1)
}
/// Ensures that the buffer contains at least enough space to hold `len +
/// additional` elements. If it doesn't already, will reallocate the
/// minimum possible amount of memory necessary. Generally this will be
/// exactly the amount of memory necessary, but in principle the allocator
/// is free to give back more than we asked for.
///
/// If `len` exceeds `self.capacity()`, this may fail to actually allocate
/// the requested space. This is not really unsafe, but the unsafe code
/// *you* write that relies on the behavior of this function may break.
///
/// # Panics
///
/// Panics if the new capacity exceeds `isize::MAX` bytes.
///
/// # Aborts
///
/// Aborts on OOM.
#[cfg(not(no_global_oom_handling))]
pub fn reserve_exact(&mut self, len: usize, additional: usize) {
handle_reserve(self.try_reserve_exact(len, additional));
}
/// The same as `reserve_exact`, but returns on errors instead of panicking or aborting.
pub fn try_reserve_exact(
&mut self,
len: usize,
additional: usize,
) -> Result<(), TryReserveError> {
if self.needs_to_grow(len, additional) { self.grow_exact(len, additional) } else { Ok(()) }
}
/// Shrinks the buffer down to the specified capacity. If the given amount
/// is 0, actually completely deallocates.
///
/// # Panics
///
/// Panics if the given amount is *larger* than the current capacity.
///
/// # Aborts
///
/// Aborts on OOM.
#[cfg(not(no_global_oom_handling))]
pub fn shrink_to_fit(&mut self, cap: usize) {
handle_reserve(self.shrink(cap));
}
/// Tries to shrink the buffer down to the specified capacity. If the given amount
/// is 0, actually completely deallocates.
///
/// # Panics
///
/// Panics if the given amount is *larger* than the current capacity.
pub fn try_shrink_to_fit(&mut self, cap: usize) -> Result<(), TryReserveError> {
self.shrink(cap)
}
}
impl<T, A: Allocator> RawVec<T, A> {
/// Returns if the buffer needs to grow to fulfill the needed extra capacity.
/// Mainly used to make inlining reserve-calls possible without inlining `grow`.
fn needs_to_grow(&self, len: usize, additional: usize) -> bool {
additional > self.capacity().wrapping_sub(len)
}
fn set_ptr_and_cap(&mut self, ptr: NonNull<[u8]>, cap: usize) {
// Allocators currently return a `NonNull<[u8]>` whose length matches
// the size requested. If that ever changes, the capacity here should
// change to `ptr.len() / mem::size_of::<T>()`.
self.ptr = unsafe { Unique::new_unchecked(ptr.cast().as_ptr()) };
self.cap = cap;
}
// This method is usually instantiated many times. So we want it to be as
// small as possible, to improve compile times. But we also want as much of
// its contents to be statically computable as possible, to make the
// generated code run faster. Therefore, this method is carefully written
// so that all of the code that depends on `T` is within it, while as much
// of the code that doesn't depend on `T` as possible is in functions that
// are non-generic over `T`.
fn grow_amortized(&mut self, len: usize, additional: usize) -> Result<(), TryReserveError> {
// This is ensured by the calling contexts.
debug_assert!(additional > 0);
if mem::size_of::<T>() == 0 {
// Since we return a capacity of `usize::MAX` when `elem_size` is
// 0, getting to here necessarily means the `RawVec` is overfull.
return Err(CapacityOverflow.into());
}
// Nothing we can really do about these checks, sadly.
let required_cap = len.checked_add(additional).ok_or(CapacityOverflow)?;
// This guarantees exponential growth. The doubling cannot overflow
// because `cap <= isize::MAX` and the type of `cap` is `usize`.
let cap = cmp::max(self.cap * 2, required_cap);
let cap = cmp::max(Self::MIN_NON_ZERO_CAP, cap);
let new_layout = Layout::array::<T>(cap);
// `finish_grow` is non-generic over `T`.
let ptr = finish_grow(new_layout, self.current_memory(), &mut self.alloc)?;
self.set_ptr_and_cap(ptr, cap);
Ok(())
}
// The constraints on this method are much the same as those on
// `grow_amortized`, but this method is usually instantiated less often so
// it's less critical.
fn grow_exact(&mut self, len: usize, additional: usize) -> Result<(), TryReserveError> {
if mem::size_of::<T>() == 0 {
// Since we return a capacity of `usize::MAX` when the type size is
// 0, getting to here necessarily means the `RawVec` is overfull.
return Err(CapacityOverflow.into());
}
let cap = len.checked_add(additional).ok_or(CapacityOverflow)?;
let new_layout = Layout::array::<T>(cap);
// `finish_grow` is non-generic over `T`.
let ptr = finish_grow(new_layout, self.current_memory(), &mut self.alloc)?;
self.set_ptr_and_cap(ptr, cap);
Ok(())
}
fn shrink(&mut self, cap: usize) -> Result<(), TryReserveError> {
assert!(cap <= self.capacity(), "Tried to shrink to a larger capacity");
let (ptr, layout) = if let Some(mem) = self.current_memory() { mem } else { return Ok(()) };
let new_size = cap * mem::size_of::<T>();
let ptr = unsafe {
let new_layout = Layout::from_size_align_unchecked(new_size, layout.align());
self.alloc
.shrink(ptr, layout, new_layout)
.map_err(|_| AllocError { layout: new_layout, non_exhaustive: () })?
};
self.set_ptr_and_cap(ptr, cap);
Ok(())
}
}
// This function is outside `RawVec` to minimize compile times. See the comment
// above `RawVec::grow_amortized` for details. (The `A` parameter isn't
// significant, because the number of different `A` types seen in practice is
// much smaller than the number of `T` types.)
#[inline(never)]
fn finish_grow<A>(
new_layout: Result<Layout, LayoutError>,
current_memory: Option<(NonNull<u8>, Layout)>,
alloc: &mut A,
) -> Result<NonNull<[u8]>, TryReserveError>
where
A: Allocator,
{
// Check for the error here to minimize the size of `RawVec::grow_*`.
let new_layout = new_layout.map_err(|_| CapacityOverflow)?;
alloc_guard(new_layout.size())?;
let memory = if let Some((ptr, old_layout)) = current_memory {
debug_assert_eq!(old_layout.align(), new_layout.align());
unsafe {
// The allocator checks for alignment equality
intrinsics::assume(old_layout.align() == new_layout.align());
alloc.grow(ptr, old_layout, new_layout)
}
} else {
alloc.allocate(new_layout)
};
memory.map_err(|_| AllocError { layout: new_layout, non_exhaustive: () }.into())
}
unsafe impl<#[may_dangle] T, A: Allocator> Drop for RawVec<T, A> {
/// Frees the memory owned by the `RawVec` *without* trying to drop its contents.
fn drop(&mut self) {
if let Some((ptr, layout)) = self.current_memory() {
unsafe { self.alloc.deallocate(ptr, layout) }
}
}
}
// Central function for reserve error handling.
#[cfg(not(no_global_oom_handling))]
#[inline]
fn handle_reserve(result: Result<(), TryReserveError>) {
match result.map_err(|e| e.kind()) {
Err(CapacityOverflow) => capacity_overflow(),
Err(AllocError { layout, .. }) => handle_alloc_error(layout),
Ok(()) => { /* yay */ }
}
}
// We need to guarantee the following:
// * We don't ever allocate `> isize::MAX` byte-size objects.
// * We don't overflow `usize::MAX` and actually allocate too little.
//
// On 64-bit we just need to check for overflow since trying to allocate
// `> isize::MAX` bytes will surely fail. On 32-bit and 16-bit we need to add
// an extra guard for this in case we're running on a platform which can use
// all 4GB in user-space, e.g., PAE or x32.
#[inline]
fn alloc_guard(alloc_size: usize) -> Result<(), TryReserveError> {
if usize::BITS < 64 && alloc_size > isize::MAX as usize {
Err(CapacityOverflow.into())
} else {
Ok(())
}
}
// One central function responsible for reporting capacity overflows. This'll
// ensure that the code generation related to these panics is minimal as there's
// only one location which panics rather than a bunch throughout the module.
#[cfg(not(no_global_oom_handling))]
fn capacity_overflow() -> ! {
panic!("capacity overflow");
}

1282
rust/alloc/slice.rs Normal file

File diff suppressed because it is too large Load Diff

632
rust/alloc/str.rs Normal file
View File

@ -0,0 +1,632 @@
// SPDX-License-Identifier: Apache-2.0 OR MIT
//! Unicode string slices.
//!
//! *[See also the `str` primitive type](str).*
//!
//! The `&str` type is one of the two main string types, the other being `String`.
//! Unlike its `String` counterpart, its contents are borrowed.
//!
//! # Basic Usage
//!
//! A basic string declaration of `&str` type:
//!
//! ```
//! let hello_world = "Hello, World!";
//! ```
//!
//! Here we have declared a string literal, also known as a string slice.
//! String literals have a static lifetime, which means the string `hello_world`
//! is guaranteed to be valid for the duration of the entire program.
//! We can explicitly specify `hello_world`'s lifetime as well:
//!
//! ```
//! let hello_world: &'static str = "Hello, world!";
//! ```
#![stable(feature = "rust1", since = "1.0.0")]
// Many of the usings in this module are only used in the test configuration.
// It's cleaner to just turn off the unused_imports warning than to fix them.
#![allow(unused_imports)]
use core::borrow::{Borrow, BorrowMut};
use core::iter::FusedIterator;
use core::mem;
use core::ptr;
use core::str::pattern::{DoubleEndedSearcher, Pattern, ReverseSearcher, Searcher};
use core::unicode::conversions;
use crate::borrow::ToOwned;
use crate::boxed::Box;
use crate::collections::TryReserveError;
use crate::slice::{Concat, Join, SliceIndex};
use crate::string::String;
use crate::vec::Vec;
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::str::pattern;
#[stable(feature = "encode_utf16", since = "1.8.0")]
pub use core::str::EncodeUtf16;
#[stable(feature = "split_ascii_whitespace", since = "1.34.0")]
pub use core::str::SplitAsciiWhitespace;
#[stable(feature = "split_inclusive", since = "1.51.0")]
pub use core::str::SplitInclusive;
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::str::SplitWhitespace;
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::str::{from_utf8, from_utf8_mut, Bytes, CharIndices, Chars};
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::str::{from_utf8_unchecked, from_utf8_unchecked_mut, ParseBoolError};
#[stable(feature = "str_escape", since = "1.34.0")]
pub use core::str::{EscapeDebug, EscapeDefault, EscapeUnicode};
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::str::{FromStr, Utf8Error};
#[allow(deprecated)]
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::str::{Lines, LinesAny};
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::str::{MatchIndices, RMatchIndices};
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::str::{Matches, RMatches};
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::str::{RSplit, Split};
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::str::{RSplitN, SplitN};
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::str::{RSplitTerminator, SplitTerminator};
/// Note: `str` in `Concat<str>` is not meaningful here.
/// This type parameter of the trait only exists to enable another impl.
#[cfg(not(no_global_oom_handling))]
#[unstable(feature = "slice_concat_ext", issue = "27747")]
impl<S: Borrow<str>> Concat<str> for [S] {
type Output = String;
fn concat(slice: &Self) -> String {
Join::join(slice, "")
}
}
#[cfg(not(no_global_oom_handling))]
#[unstable(feature = "slice_concat_ext", issue = "27747")]
impl<S: Borrow<str>> Join<&str> for [S] {
type Output = String;
fn join(slice: &Self, sep: &str) -> String {
unsafe { String::from_utf8_unchecked(join_generic_copy(slice, sep.as_bytes())) }
}
}
#[cfg(not(no_global_oom_handling))]
macro_rules! specialize_for_lengths {
($separator:expr, $target:expr, $iter:expr; $($num:expr),*) => {{
let mut target = $target;
let iter = $iter;
let sep_bytes = $separator;
match $separator.len() {
$(
// loops with hardcoded sizes run much faster
// specialize the cases with small separator lengths
$num => {
for s in iter {
copy_slice_and_advance!(target, sep_bytes);
let content_bytes = s.borrow().as_ref();
copy_slice_and_advance!(target, content_bytes);
}
},
)*
_ => {
// arbitrary non-zero size fallback
for s in iter {
copy_slice_and_advance!(target, sep_bytes);
let content_bytes = s.borrow().as_ref();
copy_slice_and_advance!(target, content_bytes);
}
}
}
target
}}
}
#[cfg(not(no_global_oom_handling))]
macro_rules! copy_slice_and_advance {
($target:expr, $bytes:expr) => {
let len = $bytes.len();
let (head, tail) = { $target }.split_at_mut(len);
head.copy_from_slice($bytes);
$target = tail;
};
}
// Optimized join implementation that works for both Vec<T> (T: Copy) and String's inner vec
// Currently (2018-05-13) there is a bug with type inference and specialization (see issue #36262)
// For this reason SliceConcat<T> is not specialized for T: Copy and SliceConcat<str> is the
// only user of this function. It is left in place for the time when that is fixed.
//
// the bounds for String-join are S: Borrow<str> and for Vec-join Borrow<[T]>
// [T] and str both impl AsRef<[T]> for some T
// => s.borrow().as_ref() and we always have slices
#[cfg(not(no_global_oom_handling))]
fn join_generic_copy<B, T, S>(slice: &[S], sep: &[T]) -> Vec<T>
where
T: Copy,
B: AsRef<[T]> + ?Sized,
S: Borrow<B>,
{
let sep_len = sep.len();
let mut iter = slice.iter();
// the first slice is the only one without a separator preceding it
let first = match iter.next() {
Some(first) => first,
None => return vec![],
};
// compute the exact total length of the joined Vec
// if the `len` calculation overflows, we'll panic
// we would have run out of memory anyway and the rest of the function requires
// the entire Vec pre-allocated for safety
let reserved_len = sep_len
.checked_mul(iter.len())
.and_then(|n| {
slice.iter().map(|s| s.borrow().as_ref().len()).try_fold(n, usize::checked_add)
})
.expect("attempt to join into collection with len > usize::MAX");
// prepare an uninitialized buffer
let mut result = Vec::with_capacity(reserved_len);
debug_assert!(result.capacity() >= reserved_len);
result.extend_from_slice(first.borrow().as_ref());
unsafe {
let pos = result.len();
let target = result.spare_capacity_mut().get_unchecked_mut(..reserved_len - pos);
// Convert the separator and slices to slices of MaybeUninit
// to simplify implementation in specialize_for_lengths
let sep_uninit = core::slice::from_raw_parts(sep.as_ptr().cast(), sep.len());
let iter_uninit = iter.map(|it| {
let it = it.borrow().as_ref();
core::slice::from_raw_parts(it.as_ptr().cast(), it.len())
});
// copy separator and slices over without bounds checks
// generate loops with hardcoded offsets for small separators
// massive improvements possible (~ x2)
let remain = specialize_for_lengths!(sep_uninit, target, iter_uninit; 0, 1, 2, 3, 4);
// A weird borrow implementation may return different
// slices for the length calculation and the actual copy.
// Make sure we don't expose uninitialized bytes to the caller.
let result_len = reserved_len - remain.len();
result.set_len(result_len);
}
result
}
#[stable(feature = "rust1", since = "1.0.0")]
impl Borrow<str> for String {
#[inline]
fn borrow(&self) -> &str {
&self[..]
}
}
#[stable(feature = "string_borrow_mut", since = "1.36.0")]
impl BorrowMut<str> for String {
#[inline]
fn borrow_mut(&mut self) -> &mut str {
&mut self[..]
}
}
#[cfg(not(no_global_oom_handling))]
#[stable(feature = "rust1", since = "1.0.0")]
impl ToOwned for str {
type Owned = String;
#[inline]
fn to_owned(&self) -> String {
unsafe { String::from_utf8_unchecked(self.as_bytes().to_owned()) }
}
fn clone_into(&self, target: &mut String) {
let mut b = mem::take(target).into_bytes();
self.as_bytes().clone_into(&mut b);
*target = unsafe { String::from_utf8_unchecked(b) }
}
}
/// Methods for string slices.
#[lang = "str_alloc"]
#[cfg(not(test))]
impl str {
/// Converts a `Box<str>` into a `Box<[u8]>` without copying or allocating.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// let s = "this is a string";
/// let boxed_str = s.to_owned().into_boxed_str();
/// let boxed_bytes = boxed_str.into_boxed_bytes();
/// assert_eq!(*boxed_bytes, *s.as_bytes());
/// ```
#[stable(feature = "str_box_extras", since = "1.20.0")]
#[must_use = "`self` will be dropped if the result is not used"]
#[inline]
pub fn into_boxed_bytes(self: Box<str>) -> Box<[u8]> {
self.into()
}
/// Replaces all matches of a pattern with another string.
///
/// `replace` creates a new [`String`], and copies the data from this string slice into it.
/// While doing so, it attempts to find matches of a pattern. If it finds any, it
/// replaces them with the replacement string slice.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// let s = "this is old";
///
/// assert_eq!("this is new", s.replace("old", "new"));
/// ```
///
/// When the pattern doesn't match:
///
/// ```
/// let s = "this is old";
/// assert_eq!(s, s.replace("cookie monster", "little lamb"));
/// ```
#[cfg(not(no_global_oom_handling))]
#[must_use = "this returns the replaced string as a new allocation, \
without modifying the original"]
#[stable(feature = "rust1", since = "1.0.0")]
#[inline]
pub fn replace<'a, P: Pattern<'a>>(&'a self, from: P, to: &str) -> String {
let mut result = String::new();
let mut last_end = 0;
for (start, part) in self.match_indices(from) {
result.push_str(unsafe { self.get_unchecked(last_end..start) });
result.push_str(to);
last_end = start + part.len();
}
result.push_str(unsafe { self.get_unchecked(last_end..self.len()) });
result
}
/// Replaces first N matches of a pattern with another string.
///
/// `replacen` creates a new [`String`], and copies the data from this string slice into it.
/// While doing so, it attempts to find matches of a pattern. If it finds any, it
/// replaces them with the replacement string slice at most `count` times.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// let s = "foo foo 123 foo";
/// assert_eq!("new new 123 foo", s.replacen("foo", "new", 2));
/// assert_eq!("faa fao 123 foo", s.replacen('o', "a", 3));
/// assert_eq!("foo foo new23 foo", s.replacen(char::is_numeric, "new", 1));
/// ```
///
/// When the pattern doesn't match:
///
/// ```
/// let s = "this is old";
/// assert_eq!(s, s.replacen("cookie monster", "little lamb", 10));
/// ```
#[cfg(not(no_global_oom_handling))]
#[must_use = "this returns the replaced string as a new allocation, \
without modifying the original"]
#[stable(feature = "str_replacen", since = "1.16.0")]
pub fn replacen<'a, P: Pattern<'a>>(&'a self, pat: P, to: &str, count: usize) -> String {
// Hope to reduce the times of re-allocation
let mut result = String::with_capacity(32);
let mut last_end = 0;
for (start, part) in self.match_indices(pat).take(count) {
result.push_str(unsafe { self.get_unchecked(last_end..start) });
result.push_str(to);
last_end = start + part.len();
}
result.push_str(unsafe { self.get_unchecked(last_end..self.len()) });
result
}
/// Returns the lowercase equivalent of this string slice, as a new [`String`].
///
/// 'Lowercase' is defined according to the terms of the Unicode Derived Core Property
/// `Lowercase`.
///
/// Since some characters can expand into multiple characters when changing
/// the case, this function returns a [`String`] instead of modifying the
/// parameter in-place.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// let s = "HELLO";
///
/// assert_eq!("hello", s.to_lowercase());
/// ```
///
/// A tricky example, with sigma:
///
/// ```
/// let sigma = "Σ";
///
/// assert_eq!("σ", sigma.to_lowercase());
///
/// // but at the end of a word, it's ς, not σ:
/// let odysseus = "ὈΔΥΣΣΕΎΣ";
///
/// assert_eq!("ὀδυσσεύς", odysseus.to_lowercase());
/// ```
///
/// Languages without case are not changed:
///
/// ```
/// let new_year = "农历新年";
///
/// assert_eq!(new_year, new_year.to_lowercase());
/// ```
#[cfg(not(no_global_oom_handling))]
#[must_use = "this returns the lowercase string as a new String, \
without modifying the original"]
#[stable(feature = "unicode_case_mapping", since = "1.2.0")]
pub fn to_lowercase(&self) -> String {
let mut s = String::with_capacity(self.len());
for (i, c) in self[..].char_indices() {
if c == 'Σ' {
// Σ maps to σ, except at the end of a word where it maps to ς.
// This is the only conditional (contextual) but language-independent mapping
// in `SpecialCasing.txt`,
// so hard-code it rather than have a generic "condition" mechanism.
// See https://github.com/rust-lang/rust/issues/26035
map_uppercase_sigma(self, i, &mut s)
} else {
match conversions::to_lower(c) {
[a, '\0', _] => s.push(a),
[a, b, '\0'] => {
s.push(a);
s.push(b);
}
[a, b, c] => {
s.push(a);
s.push(b);
s.push(c);
}
}
}
}
return s;
fn map_uppercase_sigma(from: &str, i: usize, to: &mut String) {
// See https://www.unicode.org/versions/Unicode7.0.0/ch03.pdf#G33992
// for the definition of `Final_Sigma`.
debug_assert!('Σ'.len_utf8() == 2);
let is_word_final = case_ignoreable_then_cased(from[..i].chars().rev())
&& !case_ignoreable_then_cased(from[i + 2..].chars());
to.push_str(if is_word_final { "ς" } else { "σ" });
}
fn case_ignoreable_then_cased<I: Iterator<Item = char>>(iter: I) -> bool {
use core::unicode::{Case_Ignorable, Cased};
match iter.skip_while(|&c| Case_Ignorable(c)).next() {
Some(c) => Cased(c),
None => false,
}
}
}
/// Returns the uppercase equivalent of this string slice, as a new [`String`].
///
/// 'Uppercase' is defined according to the terms of the Unicode Derived Core Property
/// `Uppercase`.
///
/// Since some characters can expand into multiple characters when changing
/// the case, this function returns a [`String`] instead of modifying the
/// parameter in-place.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// let s = "hello";
///
/// assert_eq!("HELLO", s.to_uppercase());
/// ```
///
/// Scripts without case are not changed:
///
/// ```
/// let new_year = "农历新年";
///
/// assert_eq!(new_year, new_year.to_uppercase());
/// ```
///
/// One character can become multiple:
/// ```
/// let s = "tschüß";
///
/// assert_eq!("TSCHÜSS", s.to_uppercase());
/// ```
#[cfg(not(no_global_oom_handling))]
#[must_use = "this returns the uppercase string as a new String, \
without modifying the original"]
#[stable(feature = "unicode_case_mapping", since = "1.2.0")]
pub fn to_uppercase(&self) -> String {
let mut s = String::with_capacity(self.len());
for c in self[..].chars() {
match conversions::to_upper(c) {
[a, '\0', _] => s.push(a),
[a, b, '\0'] => {
s.push(a);
s.push(b);
}
[a, b, c] => {
s.push(a);
s.push(b);
s.push(c);
}
}
}
s
}
/// Converts a [`Box<str>`] into a [`String`] without copying or allocating.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// let string = String::from("birthday gift");
/// let boxed_str = string.clone().into_boxed_str();
///
/// assert_eq!(boxed_str.into_string(), string);
/// ```
#[stable(feature = "box_str", since = "1.4.0")]
#[must_use = "`self` will be dropped if the result is not used"]
#[inline]
pub fn into_string(self: Box<str>) -> String {
let slice = Box::<[u8]>::from(self);
unsafe { String::from_utf8_unchecked(slice.into_vec()) }
}
/// Creates a new [`String`] by repeating a string `n` times.
///
/// # Panics
///
/// This function will panic if the capacity would overflow.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// assert_eq!("abc".repeat(4), String::from("abcabcabcabc"));
/// ```
///
/// A panic upon overflow:
///
/// ```should_panic
/// // this will panic at runtime
/// let huge = "0123456789abcdef".repeat(usize::MAX);
/// ```
#[cfg(not(no_global_oom_handling))]
#[must_use]
#[stable(feature = "repeat_str", since = "1.16.0")]
pub fn repeat(&self, n: usize) -> String {
unsafe { String::from_utf8_unchecked(self.as_bytes().repeat(n)) }
}
/// Returns a copy of this string where each character is mapped to its
/// ASCII upper case equivalent.
///
/// ASCII letters 'a' to 'z' are mapped to 'A' to 'Z',
/// but non-ASCII letters are unchanged.
///
/// To uppercase the value in-place, use [`make_ascii_uppercase`].
///
/// To uppercase ASCII characters in addition to non-ASCII characters, use
/// [`to_uppercase`].
///
/// # Examples
///
/// ```
/// let s = "Grüße, Jürgen ❤";
///
/// assert_eq!("GRüßE, JüRGEN ❤", s.to_ascii_uppercase());
/// ```
///
/// [`make_ascii_uppercase`]: str::make_ascii_uppercase
/// [`to_uppercase`]: #method.to_uppercase
#[cfg(not(no_global_oom_handling))]
#[must_use = "to uppercase the value in-place, use `make_ascii_uppercase()`"]
#[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
#[inline]
pub fn to_ascii_uppercase(&self) -> String {
let mut bytes = self.as_bytes().to_vec();
bytes.make_ascii_uppercase();
// make_ascii_uppercase() preserves the UTF-8 invariant.
unsafe { String::from_utf8_unchecked(bytes) }
}
/// Returns a copy of this string where each character is mapped to its
/// ASCII lower case equivalent.
///
/// ASCII letters 'A' to 'Z' are mapped to 'a' to 'z',
/// but non-ASCII letters are unchanged.
///
/// To lowercase the value in-place, use [`make_ascii_lowercase`].
///
/// To lowercase ASCII characters in addition to non-ASCII characters, use
/// [`to_lowercase`].
///
/// # Examples
///
/// ```
/// let s = "Grüße, Jürgen ❤";
///
/// assert_eq!("grüße, jürgen ❤", s.to_ascii_lowercase());
/// ```
///
/// [`make_ascii_lowercase`]: str::make_ascii_lowercase
/// [`to_lowercase`]: #method.to_lowercase
#[cfg(not(no_global_oom_handling))]
#[must_use = "to lowercase the value in-place, use `make_ascii_lowercase()`"]
#[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
#[inline]
pub fn to_ascii_lowercase(&self) -> String {
let mut bytes = self.as_bytes().to_vec();
bytes.make_ascii_lowercase();
// make_ascii_lowercase() preserves the UTF-8 invariant.
unsafe { String::from_utf8_unchecked(bytes) }
}
/// Tries to create a `String`.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// let s: &str = "a";
/// let ss: String = s.try_to_owned().unwrap();
/// ```
#[inline]
#[stable(feature = "kernel", since = "1.0.0")]
pub fn try_to_owned(&self) -> Result<String, TryReserveError> {
unsafe { Ok(String::from_utf8_unchecked(self.as_bytes().try_to_vec()?)) }
}
}
/// Converts a boxed slice of bytes to a boxed string slice without checking
/// that the string contains valid UTF-8.
///
/// # Examples
///
/// Basic usage:
///
/// ```
/// let smile_utf8 = Box::new([226, 152, 186]);
/// let smile = unsafe { std::str::from_boxed_utf8_unchecked(smile_utf8) };
///
/// assert_eq!("☺", &*smile);
/// ```
#[stable(feature = "str_box_extras", since = "1.20.0")]
#[must_use]
#[inline]
pub unsafe fn from_boxed_utf8_unchecked(v: Box<[u8]>) -> Box<str> {
unsafe { Box::from_raw(Box::into_raw(v) as *mut str) }
}

2869
rust/alloc/string.rs Normal file

File diff suppressed because it is too large Load Diff

186
rust/alloc/vec/drain.rs Normal file
View File

@ -0,0 +1,186 @@
// SPDX-License-Identifier: Apache-2.0 OR MIT
use crate::alloc::{Allocator, Global};
use core::fmt;
use core::iter::{FusedIterator, TrustedLen};
use core::mem;
use core::ptr::{self, NonNull};
use core::slice::{self};
use super::Vec;
/// A draining iterator for `Vec<T>`.
///
/// This `struct` is created by [`Vec::drain`].
/// See its documentation for more.
///
/// # Example
///
/// ```
/// let mut v = vec![0, 1, 2];
/// let iter: std::vec::Drain<_> = v.drain(..);
/// ```
#[stable(feature = "drain", since = "1.6.0")]
pub struct Drain<
'a,
T: 'a,
#[unstable(feature = "allocator_api", issue = "32838")] A: Allocator + 'a = Global,
> {
/// Index of tail to preserve
pub(super) tail_start: usize,
/// Length of tail
pub(super) tail_len: usize,
/// Current remaining range to remove
pub(super) iter: slice::Iter<'a, T>,
pub(super) vec: NonNull<Vec<T, A>>,
}
#[stable(feature = "collection_debug", since = "1.17.0")]
impl<T: fmt::Debug, A: Allocator> fmt::Debug for Drain<'_, T, A> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_tuple("Drain").field(&self.iter.as_slice()).finish()
}
}
impl<'a, T, A: Allocator> Drain<'a, T, A> {
/// Returns the remaining items of this iterator as a slice.
///
/// # Examples
///
/// ```
/// let mut vec = vec!['a', 'b', 'c'];
/// let mut drain = vec.drain(..);
/// assert_eq!(drain.as_slice(), &['a', 'b', 'c']);
/// let _ = drain.next().unwrap();
/// assert_eq!(drain.as_slice(), &['b', 'c']);
/// ```
#[must_use]
#[stable(feature = "vec_drain_as_slice", since = "1.46.0")]
pub fn as_slice(&self) -> &[T] {
self.iter.as_slice()
}
/// Returns a reference to the underlying allocator.
#[unstable(feature = "allocator_api", issue = "32838")]
#[must_use]
#[inline]
pub fn allocator(&self) -> &A {
unsafe { self.vec.as_ref().allocator() }
}
}
#[stable(feature = "vec_drain_as_slice", since = "1.46.0")]
impl<'a, T, A: Allocator> AsRef<[T]> for Drain<'a, T, A> {
fn as_ref(&self) -> &[T] {
self.as_slice()
}
}
#[stable(feature = "drain", since = "1.6.0")]
unsafe impl<T: Sync, A: Sync + Allocator> Sync for Drain<'_, T, A> {}
#[stable(feature = "drain", since = "1.6.0")]
unsafe impl<T: Send, A: Send + Allocator> Send for Drain<'_, T, A> {}
#[stable(feature = "drain", since = "1.6.0")]
impl<T, A: Allocator> Iterator for Drain<'_, T, A> {
type Item = T;
#[inline]
fn next(&mut self) -> Option<T> {
self.iter.next().map(|elt| unsafe { ptr::read(elt as *const _) })
}
fn size_hint(&self) -> (usize, Option<usize>) {
self.iter.size_hint()
}
}
#[stable(feature = "drain", since = "1.6.0")]
impl<T, A: Allocator> DoubleEndedIterator for Drain<'_, T, A> {
#[inline]
fn next_back(&mut self) -> Option<T> {
self.iter.next_back().map(|elt| unsafe { ptr::read(elt as *const _) })
}
}
#[stable(feature = "drain", since = "1.6.0")]
impl<T, A: Allocator> Drop for Drain<'_, T, A> {
fn drop(&mut self) {
/// Moves back the un-`Drain`ed elements to restore the original `Vec`.
struct DropGuard<'r, 'a, T, A: Allocator>(&'r mut Drain<'a, T, A>);
impl<'r, 'a, T, A: Allocator> Drop for DropGuard<'r, 'a, T, A> {
fn drop(&mut self) {
if self.0.tail_len > 0 {
unsafe {
let source_vec = self.0.vec.as_mut();
// memmove back untouched tail, update to new length
let start = source_vec.len();
let tail = self.0.tail_start;
if tail != start {
let src = source_vec.as_ptr().add(tail);
let dst = source_vec.as_mut_ptr().add(start);
ptr::copy(src, dst, self.0.tail_len);
}
source_vec.set_len(start + self.0.tail_len);
}
}
}
}
let iter = mem::replace(&mut self.iter, (&mut []).iter());
let drop_len = iter.len();
let mut vec = self.vec;
if mem::size_of::<T>() == 0 {
// ZSTs have no identity, so we don't need to move them around, we only need to drop the correct amount.
// this can be achieved by manipulating the Vec length instead of moving values out from `iter`.
unsafe {
let vec = vec.as_mut();
let old_len = vec.len();
vec.set_len(old_len + drop_len + self.tail_len);
vec.truncate(old_len + self.tail_len);
}
return;
}
// ensure elements are moved back into their appropriate places, even when drop_in_place panics
let _guard = DropGuard(self);
if drop_len == 0 {
return;
}
// as_slice() must only be called when iter.len() is > 0 because
// vec::Splice modifies vec::Drain fields and may grow the vec which would invalidate
// the iterator's internal pointers. Creating a reference to deallocated memory
// is invalid even when it is zero-length
let drop_ptr = iter.as_slice().as_ptr();
unsafe {
// drop_ptr comes from a slice::Iter which only gives us a &[T] but for drop_in_place
// a pointer with mutable provenance is necessary. Therefore we must reconstruct
// it from the original vec but also avoid creating a &mut to the front since that could
// invalidate raw pointers to it which some unsafe code might rely on.
let vec_ptr = vec.as_mut().as_mut_ptr();
let drop_offset = drop_ptr.offset_from(vec_ptr) as usize;
let to_drop = ptr::slice_from_raw_parts_mut(vec_ptr.add(drop_offset), drop_len);
ptr::drop_in_place(to_drop);
}
}
}
#[stable(feature = "drain", since = "1.6.0")]
impl<T, A: Allocator> ExactSizeIterator for Drain<'_, T, A> {
fn is_empty(&self) -> bool {
self.iter.is_empty()
}
}
#[unstable(feature = "trusted_len", issue = "37572")]
unsafe impl<T, A: Allocator> TrustedLen for Drain<'_, T, A> {}
#[stable(feature = "fused", since = "1.26.0")]
impl<T, A: Allocator> FusedIterator for Drain<'_, T, A> {}

View File

@ -0,0 +1,145 @@
// SPDX-License-Identifier: Apache-2.0 OR MIT
use crate::alloc::{Allocator, Global};
use core::ptr::{self};
use core::slice::{self};
use super::Vec;
/// An iterator which uses a closure to determine if an element should be removed.
///
/// This struct is created by [`Vec::drain_filter`].
/// See its documentation for more.
///
/// # Example
///
/// ```
/// #![feature(drain_filter)]
///
/// let mut v = vec![0, 1, 2];
/// let iter: std::vec::DrainFilter<_, _> = v.drain_filter(|x| *x % 2 == 0);
/// ```
#[unstable(feature = "drain_filter", reason = "recently added", issue = "43244")]
#[derive(Debug)]
pub struct DrainFilter<
'a,
T,
F,
#[unstable(feature = "allocator_api", issue = "32838")] A: Allocator = Global,
> where
F: FnMut(&mut T) -> bool,
{
pub(super) vec: &'a mut Vec<T, A>,
/// The index of the item that will be inspected by the next call to `next`.
pub(super) idx: usize,
/// The number of items that have been drained (removed) thus far.
pub(super) del: usize,
/// The original length of `vec` prior to draining.
pub(super) old_len: usize,
/// The filter test predicate.
pub(super) pred: F,
/// A flag that indicates a panic has occurred in the filter test predicate.
/// This is used as a hint in the drop implementation to prevent consumption
/// of the remainder of the `DrainFilter`. Any unprocessed items will be
/// backshifted in the `vec`, but no further items will be dropped or
/// tested by the filter predicate.
pub(super) panic_flag: bool,
}
impl<T, F, A: Allocator> DrainFilter<'_, T, F, A>
where
F: FnMut(&mut T) -> bool,
{
/// Returns a reference to the underlying allocator.
#[unstable(feature = "allocator_api", issue = "32838")]
#[inline]
pub fn allocator(&self) -> &A {
self.vec.allocator()
}
}
#[unstable(feature = "drain_filter", reason = "recently added", issue = "43244")]
impl<T, F, A: Allocator> Iterator for DrainFilter<'_, T, F, A>
where
F: FnMut(&mut T) -> bool,
{
type Item = T;
fn next(&mut self) -> Option<T> {
unsafe {
while self.idx < self.old_len {
let i = self.idx;
let v = slice::from_raw_parts_mut(self.vec.as_mut_ptr(), self.old_len);
self.panic_flag = true;
let drained = (self.pred)(&mut v[i]);
self.panic_flag = false;
// Update the index *after* the predicate is called. If the index
// is updated prior and the predicate panics, the element at this
// index would be leaked.
self.idx += 1;
if drained {
self.del += 1;
return Some(ptr::read(&v[i]));
} else if self.del > 0 {
let del = self.del;
let src: *const T = &v[i];
let dst: *mut T = &mut v[i - del];
ptr::copy_nonoverlapping(src, dst, 1);
}
}
None
}
}
fn size_hint(&self) -> (usize, Option<usize>) {
(0, Some(self.old_len - self.idx))
}
}
#[unstable(feature = "drain_filter", reason = "recently added", issue = "43244")]
impl<T, F, A: Allocator> Drop for DrainFilter<'_, T, F, A>
where
F: FnMut(&mut T) -> bool,
{
fn drop(&mut self) {
struct BackshiftOnDrop<'a, 'b, T, F, A: Allocator>
where
F: FnMut(&mut T) -> bool,
{
drain: &'b mut DrainFilter<'a, T, F, A>,
}
impl<'a, 'b, T, F, A: Allocator> Drop for BackshiftOnDrop<'a, 'b, T, F, A>
where
F: FnMut(&mut T) -> bool,
{
fn drop(&mut self) {
unsafe {
if self.drain.idx < self.drain.old_len && self.drain.del > 0 {
// This is a pretty messed up state, and there isn't really an
// obviously right thing to do. We don't want to keep trying
// to execute `pred`, so we just backshift all the unprocessed
// elements and tell the vec that they still exist. The backshift
// is required to prevent a double-drop of the last successfully
// drained item prior to a panic in the predicate.
let ptr = self.drain.vec.as_mut_ptr();
let src = ptr.add(self.drain.idx);
let dst = src.sub(self.drain.del);
let tail_len = self.drain.old_len - self.drain.idx;
src.copy_to(dst, tail_len);
}
self.drain.vec.set_len(self.drain.old_len - self.drain.del);
}
}
}
let backshift = BackshiftOnDrop { drain: self };
// Attempt to consume any remaining elements if the filter predicate
// has not yet panicked. We'll backshift any remaining elements
// whether we've already panicked or if the consumption here panics.
if !backshift.drain.panic_flag {
backshift.drain.for_each(drop);
}
}
}

356
rust/alloc/vec/into_iter.rs Normal file
View File

@ -0,0 +1,356 @@
// SPDX-License-Identifier: Apache-2.0 OR MIT
use crate::alloc::{Allocator, Global};
use crate::raw_vec::RawVec;
use core::fmt;
use core::intrinsics::arith_offset;
use core::iter::{
FusedIterator, InPlaceIterable, SourceIter, TrustedLen, TrustedRandomAccessNoCoerce,
};
use core::marker::PhantomData;
use core::mem::{self};
use core::ptr::{self, NonNull};
use core::slice::{self};
/// An iterator that moves out of a vector.
///
/// This `struct` is created by the `into_iter` method on [`Vec`](super::Vec)
/// (provided by the [`IntoIterator`] trait).
///
/// # Example
///
/// ```
/// let v = vec![0, 1, 2];
/// let iter: std::vec::IntoIter<_> = v.into_iter();
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_insignificant_dtor]
pub struct IntoIter<
T,
#[unstable(feature = "allocator_api", issue = "32838")] A: Allocator = Global,
> {
pub(super) buf: NonNull<T>,
pub(super) phantom: PhantomData<T>,
pub(super) cap: usize,
pub(super) alloc: A,
pub(super) ptr: *const T,
pub(super) end: *const T,
}
#[stable(feature = "vec_intoiter_debug", since = "1.13.0")]
impl<T: fmt::Debug, A: Allocator> fmt::Debug for IntoIter<T, A> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_tuple("IntoIter").field(&self.as_slice()).finish()
}
}
impl<T, A: Allocator> IntoIter<T, A> {
/// Returns the remaining items of this iterator as a slice.
///
/// # Examples
///
/// ```
/// let vec = vec!['a', 'b', 'c'];
/// let mut into_iter = vec.into_iter();
/// assert_eq!(into_iter.as_slice(), &['a', 'b', 'c']);
/// let _ = into_iter.next().unwrap();
/// assert_eq!(into_iter.as_slice(), &['b', 'c']);
/// ```
#[stable(feature = "vec_into_iter_as_slice", since = "1.15.0")]
pub fn as_slice(&self) -> &[T] {
unsafe { slice::from_raw_parts(self.ptr, self.len()) }
}
/// Returns the remaining items of this iterator as a mutable slice.
///
/// # Examples
///
/// ```
/// let vec = vec!['a', 'b', 'c'];
/// let mut into_iter = vec.into_iter();
/// assert_eq!(into_iter.as_slice(), &['a', 'b', 'c']);
/// into_iter.as_mut_slice()[2] = 'z';
/// assert_eq!(into_iter.next().unwrap(), 'a');
/// assert_eq!(into_iter.next().unwrap(), 'b');
/// assert_eq!(into_iter.next().unwrap(), 'z');
/// ```
#[stable(feature = "vec_into_iter_as_slice", since = "1.15.0")]
pub fn as_mut_slice(&mut self) -> &mut [T] {
unsafe { &mut *self.as_raw_mut_slice() }
}
/// Returns a reference to the underlying allocator.
#[unstable(feature = "allocator_api", issue = "32838")]
#[inline]
pub fn allocator(&self) -> &A {
&self.alloc
}
fn as_raw_mut_slice(&mut self) -> *mut [T] {
ptr::slice_from_raw_parts_mut(self.ptr as *mut T, self.len())
}
/// Drops remaining elements and relinquishes the backing allocation.
///
/// This is roughly equivalent to the following, but more efficient
///
/// ```
/// # let mut into_iter = Vec::<u8>::with_capacity(10).into_iter();
/// (&mut into_iter).for_each(core::mem::drop);
/// unsafe { core::ptr::write(&mut into_iter, Vec::new().into_iter()); }
/// ```
#[cfg(not(no_global_oom_handling))]
pub(super) fn forget_allocation_drop_remaining(&mut self) {
let remaining = self.as_raw_mut_slice();
// overwrite the individual fields instead of creating a new
// struct and then overwriting &mut self.
// this creates less assembly
self.cap = 0;
self.buf = unsafe { NonNull::new_unchecked(RawVec::NEW.ptr()) };
self.ptr = self.buf.as_ptr();
self.end = self.buf.as_ptr();
unsafe {
ptr::drop_in_place(remaining);
}
}
}
#[stable(feature = "vec_intoiter_as_ref", since = "1.46.0")]
impl<T, A: Allocator> AsRef<[T]> for IntoIter<T, A> {
fn as_ref(&self) -> &[T] {
self.as_slice()
}
}
#[stable(feature = "rust1", since = "1.0.0")]
unsafe impl<T: Send, A: Allocator + Send> Send for IntoIter<T, A> {}
#[stable(feature = "rust1", since = "1.0.0")]
unsafe impl<T: Sync, A: Allocator + Sync> Sync for IntoIter<T, A> {}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T, A: Allocator> Iterator for IntoIter<T, A> {
type Item = T;
#[inline]
fn next(&mut self) -> Option<T> {
if self.ptr as *const _ == self.end {
None
} else if mem::size_of::<T>() == 0 {
// purposefully don't use 'ptr.offset' because for
// vectors with 0-size elements this would return the
// same pointer.
self.ptr = unsafe { arith_offset(self.ptr as *const i8, 1) as *mut T };
// Make up a value of this ZST.
Some(unsafe { mem::zeroed() })
} else {
let old = self.ptr;
self.ptr = unsafe { self.ptr.offset(1) };
Some(unsafe { ptr::read(old) })
}
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
let exact = if mem::size_of::<T>() == 0 {
(self.end as usize).wrapping_sub(self.ptr as usize)
} else {
unsafe { self.end.offset_from(self.ptr) as usize }
};
(exact, Some(exact))
}
#[inline]
fn advance_by(&mut self, n: usize) -> Result<(), usize> {
let step_size = self.len().min(n);
let to_drop = ptr::slice_from_raw_parts_mut(self.ptr as *mut T, step_size);
if mem::size_of::<T>() == 0 {
// SAFETY: due to unchecked casts of unsigned amounts to signed offsets the wraparound
// effectively results in unsigned pointers representing positions 0..usize::MAX,
// which is valid for ZSTs.
self.ptr = unsafe { arith_offset(self.ptr as *const i8, step_size as isize) as *mut T }
} else {
// SAFETY: the min() above ensures that step_size is in bounds
self.ptr = unsafe { self.ptr.add(step_size) };
}
// SAFETY: the min() above ensures that step_size is in bounds
unsafe {
ptr::drop_in_place(to_drop);
}
if step_size < n {
return Err(step_size);
}
Ok(())
}
#[inline]
fn count(self) -> usize {
self.len()
}
#[doc(hidden)]
unsafe fn __iterator_get_unchecked(&mut self, i: usize) -> Self::Item
where
Self: TrustedRandomAccessNoCoerce,
{
// SAFETY: the caller must guarantee that `i` is in bounds of the
// `Vec<T>`, so `i` cannot overflow an `isize`, and the `self.ptr.add(i)`
// is guaranteed to pointer to an element of the `Vec<T>` and
// thus guaranteed to be valid to dereference.
//
// Also note the implementation of `Self: TrustedRandomAccess` requires
// that `T: Copy` so reading elements from the buffer doesn't invalidate
// them for `Drop`.
unsafe {
if mem::size_of::<T>() == 0 { mem::zeroed() } else { ptr::read(self.ptr.add(i)) }
}
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T, A: Allocator> DoubleEndedIterator for IntoIter<T, A> {
#[inline]
fn next_back(&mut self) -> Option<T> {
if self.end == self.ptr {
None
} else if mem::size_of::<T>() == 0 {
// See above for why 'ptr.offset' isn't used
self.end = unsafe { arith_offset(self.end as *const i8, -1) as *mut T };
// Make up a value of this ZST.
Some(unsafe { mem::zeroed() })
} else {
self.end = unsafe { self.end.offset(-1) };
Some(unsafe { ptr::read(self.end) })
}
}
#[inline]
fn advance_back_by(&mut self, n: usize) -> Result<(), usize> {
let step_size = self.len().min(n);
if mem::size_of::<T>() == 0 {
// SAFETY: same as for advance_by()
self.end = unsafe {
arith_offset(self.end as *const i8, step_size.wrapping_neg() as isize) as *mut T
}
} else {
// SAFETY: same as for advance_by()
self.end = unsafe { self.end.offset(step_size.wrapping_neg() as isize) };
}
let to_drop = ptr::slice_from_raw_parts_mut(self.end as *mut T, step_size);
// SAFETY: same as for advance_by()
unsafe {
ptr::drop_in_place(to_drop);
}
if step_size < n {
return Err(step_size);
}
Ok(())
}
}
#[stable(feature = "rust1", since = "1.0.0")]
impl<T, A: Allocator> ExactSizeIterator for IntoIter<T, A> {
fn is_empty(&self) -> bool {
self.ptr == self.end
}
}
#[stable(feature = "fused", since = "1.26.0")]
impl<T, A: Allocator> FusedIterator for IntoIter<T, A> {}
#[unstable(feature = "trusted_len", issue = "37572")]
unsafe impl<T, A: Allocator> TrustedLen for IntoIter<T, A> {}
#[doc(hidden)]
#[unstable(issue = "none", feature = "std_internals")]
#[rustc_unsafe_specialization_marker]
pub trait NonDrop {}
// T: Copy as approximation for !Drop since get_unchecked does not advance self.ptr
// and thus we can't implement drop-handling
#[unstable(issue = "none", feature = "std_internals")]
impl<T: Copy> NonDrop for T {}
#[doc(hidden)]
#[unstable(issue = "none", feature = "std_internals")]
// TrustedRandomAccess (without NoCoerce) must not be implemented because
// subtypes/supertypes of `T` might not be `NonDrop`
unsafe impl<T, A: Allocator> TrustedRandomAccessNoCoerce for IntoIter<T, A>
where
T: NonDrop,
{
const MAY_HAVE_SIDE_EFFECT: bool = false;
}
#[cfg(not(no_global_oom_handling))]
#[stable(feature = "vec_into_iter_clone", since = "1.8.0")]
impl<T: Clone, A: Allocator + Clone> Clone for IntoIter<T, A> {
#[cfg(not(test))]
fn clone(&self) -> Self {
self.as_slice().to_vec_in(self.alloc.clone()).into_iter()
}
#[cfg(test)]
fn clone(&self) -> Self {
crate::slice::to_vec(self.as_slice(), self.alloc.clone()).into_iter()
}
}
#[stable(feature = "rust1", since = "1.0.0")]
unsafe impl<#[may_dangle] T, A: Allocator> Drop for IntoIter<T, A> {
fn drop(&mut self) {
struct DropGuard<'a, T, A: Allocator>(&'a mut IntoIter<T, A>);
impl<T, A: Allocator> Drop for DropGuard<'_, T, A> {
fn drop(&mut self) {
unsafe {
// `IntoIter::alloc` is not used anymore after this
let alloc = ptr::read(&self.0.alloc);
// RawVec handles deallocation
let _ = RawVec::from_raw_parts_in(self.0.buf.as_ptr(), self.0.cap, alloc);
}
}
}
let guard = DropGuard(self);
// destroy the remaining elements
unsafe {
ptr::drop_in_place(guard.0.as_raw_mut_slice());
}
// now `guard` will be dropped and do the rest
}
}
#[unstable(issue = "none", feature = "inplace_iteration")]
#[doc(hidden)]
unsafe impl<T, A: Allocator> InPlaceIterable for IntoIter<T, A> {}
#[unstable(issue = "none", feature = "inplace_iteration")]
#[doc(hidden)]
unsafe impl<T, A: Allocator> SourceIter for IntoIter<T, A> {
type Source = Self;
#[inline]
unsafe fn as_inner(&mut self) -> &mut Self::Source {
self
}
}
// internal helper trait for in-place iteration specialization.
#[rustc_specialization_trait]
pub(crate) trait AsIntoIter {
type Item;
fn as_into_iter(&mut self) -> &mut IntoIter<Self::Item>;
}
impl<T> AsIntoIter for IntoIter<T> {
type Item = T;
fn as_into_iter(&mut self) -> &mut IntoIter<Self::Item> {
self
}
}

106
rust/alloc/vec/is_zero.rs Normal file
View File

@ -0,0 +1,106 @@
// SPDX-License-Identifier: Apache-2.0 OR MIT
use crate::boxed::Box;
#[rustc_specialization_trait]
pub(super) unsafe trait IsZero {
/// Whether this value is zero
fn is_zero(&self) -> bool;
}
macro_rules! impl_is_zero {
($t:ty, $is_zero:expr) => {
unsafe impl IsZero for $t {
#[inline]
fn is_zero(&self) -> bool {
$is_zero(*self)
}
}
};
}
impl_is_zero!(i16, |x| x == 0);
impl_is_zero!(i32, |x| x == 0);
impl_is_zero!(i64, |x| x == 0);
impl_is_zero!(i128, |x| x == 0);
impl_is_zero!(isize, |x| x == 0);
impl_is_zero!(u16, |x| x == 0);
impl_is_zero!(u32, |x| x == 0);
impl_is_zero!(u64, |x| x == 0);
impl_is_zero!(u128, |x| x == 0);
impl_is_zero!(usize, |x| x == 0);
impl_is_zero!(bool, |x| x == false);
impl_is_zero!(char, |x| x == '\0');
impl_is_zero!(f32, |x: f32| x.to_bits() == 0);
impl_is_zero!(f64, |x: f64| x.to_bits() == 0);
unsafe impl<T> IsZero for *const T {
#[inline]
fn is_zero(&self) -> bool {
(*self).is_null()
}
}
unsafe impl<T> IsZero for *mut T {
#[inline]
fn is_zero(&self) -> bool {
(*self).is_null()
}
}
// `Option<&T>` and `Option<Box<T>>` are guaranteed to represent `None` as null.
// For fat pointers, the bytes that would be the pointer metadata in the `Some`
// variant are padding in the `None` variant, so ignoring them and
// zero-initializing instead is ok.
// `Option<&mut T>` never implements `Clone`, so there's no need for an impl of
// `SpecFromElem`.
unsafe impl<T: ?Sized> IsZero for Option<&T> {
#[inline]
fn is_zero(&self) -> bool {
self.is_none()
}
}
unsafe impl<T: ?Sized> IsZero for Option<Box<T>> {
#[inline]
fn is_zero(&self) -> bool {
self.is_none()
}
}
// `Option<num::NonZeroU32>` and similar have a representation guarantee that
// they're the same size as the corresponding `u32` type, as well as a guarantee
// that transmuting between `NonZeroU32` and `Option<num::NonZeroU32>` works.
// While the documentation officially makes it UB to transmute from `None`,
// we're the standard library so we can make extra inferences, and we know that
// the only niche available to represent `None` is the one that's all zeros.
macro_rules! impl_is_zero_option_of_nonzero {
($($t:ident,)+) => {$(
unsafe impl IsZero for Option<core::num::$t> {
#[inline]
fn is_zero(&self) -> bool {
self.is_none()
}
}
)+};
}
impl_is_zero_option_of_nonzero!(
NonZeroU8,
NonZeroU16,
NonZeroU32,
NonZeroU64,
NonZeroU128,
NonZeroI8,
NonZeroI16,
NonZeroI32,
NonZeroI64,
NonZeroI128,
NonZeroUsize,
NonZeroIsize,
);

3362
rust/alloc/vec/mod.rs Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,49 @@
// SPDX-License-Identifier: Apache-2.0 OR MIT
use crate::alloc::Allocator;
#[cfg(not(no_global_oom_handling))]
use crate::borrow::Cow;
use super::Vec;
macro_rules! __impl_slice_eq1 {
([$($vars:tt)*] $lhs:ty, $rhs:ty $(where $ty:ty: $bound:ident)?, #[$stability:meta]) => {
#[$stability]
impl<T, U, $($vars)*> PartialEq<$rhs> for $lhs
where
T: PartialEq<U>,
$($ty: $bound)?
{
#[inline]
fn eq(&self, other: &$rhs) -> bool { self[..] == other[..] }
#[inline]
fn ne(&self, other: &$rhs) -> bool { self[..] != other[..] }
}
}
}
__impl_slice_eq1! { [A: Allocator] Vec<T, A>, Vec<U, A>, #[stable(feature = "rust1", since = "1.0.0")] }
__impl_slice_eq1! { [A: Allocator] Vec<T, A>, &[U], #[stable(feature = "rust1", since = "1.0.0")] }
__impl_slice_eq1! { [A: Allocator] Vec<T, A>, &mut [U], #[stable(feature = "rust1", since = "1.0.0")] }
__impl_slice_eq1! { [A: Allocator] &[T], Vec<U, A>, #[stable(feature = "partialeq_vec_for_ref_slice", since = "1.46.0")] }
__impl_slice_eq1! { [A: Allocator] &mut [T], Vec<U, A>, #[stable(feature = "partialeq_vec_for_ref_slice", since = "1.46.0")] }
__impl_slice_eq1! { [A: Allocator] Vec<T, A>, [U], #[stable(feature = "partialeq_vec_for_slice", since = "1.48.0")] }
__impl_slice_eq1! { [A: Allocator] [T], Vec<U, A>, #[stable(feature = "partialeq_vec_for_slice", since = "1.48.0")] }
#[cfg(not(no_global_oom_handling))]
__impl_slice_eq1! { [A: Allocator] Cow<'_, [T]>, Vec<U, A> where T: Clone, #[stable(feature = "rust1", since = "1.0.0")] }
#[cfg(not(no_global_oom_handling))]
__impl_slice_eq1! { [] Cow<'_, [T]>, &[U] where T: Clone, #[stable(feature = "rust1", since = "1.0.0")] }
#[cfg(not(no_global_oom_handling))]
__impl_slice_eq1! { [] Cow<'_, [T]>, &mut [U] where T: Clone, #[stable(feature = "rust1", since = "1.0.0")] }
__impl_slice_eq1! { [A: Allocator, const N: usize] Vec<T, A>, [U; N], #[stable(feature = "rust1", since = "1.0.0")] }
__impl_slice_eq1! { [A: Allocator, const N: usize] Vec<T, A>, &[U; N], #[stable(feature = "rust1", since = "1.0.0")] }
// NOTE: some less important impls are omitted to reduce code bloat
// FIXME(Centril): Reconsider this?
//__impl_slice_eq1! { [const N: usize] Vec<A>, &mut [B; N], }
//__impl_slice_eq1! { [const N: usize] [A; N], Vec<B>, }
//__impl_slice_eq1! { [const N: usize] &[A; N], Vec<B>, }
//__impl_slice_eq1! { [const N: usize] &mut [A; N], Vec<B>, }
//__impl_slice_eq1! { [const N: usize] Cow<'a, [A]>, [B; N], }
//__impl_slice_eq1! { [const N: usize] Cow<'a, [A]>, &[B; N], }
//__impl_slice_eq1! { [const N: usize] Cow<'a, [A]>, &mut [B; N], }

View File

@ -0,0 +1,30 @@
// SPDX-License-Identifier: Apache-2.0 OR MIT
// Set the length of the vec when the `SetLenOnDrop` value goes out of scope.
//
// The idea is: The length field in SetLenOnDrop is a local variable
// that the optimizer will see does not alias with any stores through the Vec's data
// pointer. This is a workaround for alias analysis issue #32155
pub(super) struct SetLenOnDrop<'a> {
len: &'a mut usize,
local_len: usize,
}
impl<'a> SetLenOnDrop<'a> {
#[inline]
pub(super) fn new(len: &'a mut usize) -> Self {
SetLenOnDrop { local_len: *len, len }
}
#[inline]
pub(super) fn increment_len(&mut self, increment: usize) {
self.local_len += increment;
}
}
impl Drop for SetLenOnDrop<'_> {
#[inline]
fn drop(&mut self) {
*self.len = self.local_len;
}
}

View File

@ -0,0 +1,174 @@
// SPDX-License-Identifier: Apache-2.0 OR MIT
use crate::alloc::Allocator;
use crate::collections::{TryReserveError, TryReserveErrorKind};
use core::iter::TrustedLen;
use core::ptr::{self};
use core::slice::{self};
use super::{IntoIter, SetLenOnDrop, Vec};
// Specialization trait used for Vec::extend
#[cfg(not(no_global_oom_handling))]
pub(super) trait SpecExtend<T, I> {
fn spec_extend(&mut self, iter: I);
}
// Specialization trait used for Vec::try_extend
pub(super) trait TrySpecExtend<T, I> {
fn try_spec_extend(&mut self, iter: I) -> Result<(), TryReserveError>;
}
#[cfg(not(no_global_oom_handling))]
impl<T, I, A: Allocator> SpecExtend<T, I> for Vec<T, A>
where
I: Iterator<Item = T>,
{
default fn spec_extend(&mut self, iter: I) {
self.extend_desugared(iter)
}
}
impl<T, I, A: Allocator> TrySpecExtend<T, I> for Vec<T, A>
where
I: Iterator<Item = T>,
{
default fn try_spec_extend(&mut self, iter: I) -> Result<(), TryReserveError> {
self.try_extend_desugared(iter)
}
}
#[cfg(not(no_global_oom_handling))]
impl<T, I, A: Allocator> SpecExtend<T, I> for Vec<T, A>
where
I: TrustedLen<Item = T>,
{
default fn spec_extend(&mut self, iterator: I) {
// This is the case for a TrustedLen iterator.
let (low, high) = iterator.size_hint();
if let Some(additional) = high {
debug_assert_eq!(
low,
additional,
"TrustedLen iterator's size hint is not exact: {:?}",
(low, high)
);
self.reserve(additional);
unsafe {
let mut ptr = self.as_mut_ptr().add(self.len());
let mut local_len = SetLenOnDrop::new(&mut self.len);
iterator.for_each(move |element| {
ptr::write(ptr, element);
ptr = ptr.offset(1);
// Since the loop executes user code which can panic we have to bump the pointer
// after each step.
// NB can't overflow since we would have had to alloc the address space
local_len.increment_len(1);
});
}
} else {
// Per TrustedLen contract a `None` upper bound means that the iterator length
// truly exceeds usize::MAX, which would eventually lead to a capacity overflow anyway.
// Since the other branch already panics eagerly (via `reserve()`) we do the same here.
// This avoids additional codegen for a fallback code path which would eventually
// panic anyway.
panic!("capacity overflow");
}
}
}
impl<T, I, A: Allocator> TrySpecExtend<T, I> for Vec<T, A>
where
I: TrustedLen<Item = T>,
{
default fn try_spec_extend(&mut self, iterator: I) -> Result<(), TryReserveError> {
// This is the case for a TrustedLen iterator.
let (low, high) = iterator.size_hint();
if let Some(additional) = high {
debug_assert_eq!(
low,
additional,
"TrustedLen iterator's size hint is not exact: {:?}",
(low, high)
);
self.try_reserve(additional)?;
unsafe {
let mut ptr = self.as_mut_ptr().add(self.len());
let mut local_len = SetLenOnDrop::new(&mut self.len);
iterator.for_each(move |element| {
ptr::write(ptr, element);
ptr = ptr.offset(1);
// Since the loop executes user code which can panic we have to bump the pointer
// after each step.
// NB can't overflow since we would have had to alloc the address space
local_len.increment_len(1);
});
}
Ok(())
} else {
Err(TryReserveErrorKind::CapacityOverflow.into())
}
}
}
#[cfg(not(no_global_oom_handling))]
impl<T, A: Allocator> SpecExtend<T, IntoIter<T>> for Vec<T, A> {
fn spec_extend(&mut self, mut iterator: IntoIter<T>) {
unsafe {
self.append_elements(iterator.as_slice() as _);
}
iterator.ptr = iterator.end;
}
}
impl<T, A: Allocator> TrySpecExtend<T, IntoIter<T>> for Vec<T, A> {
fn try_spec_extend(&mut self, mut iterator: IntoIter<T>) -> Result<(), TryReserveError> {
unsafe {
self.try_append_elements(iterator.as_slice() as _)?;
}
iterator.ptr = iterator.end;
Ok(())
}
}
#[cfg(not(no_global_oom_handling))]
impl<'a, T: 'a, I, A: Allocator + 'a> SpecExtend<&'a T, I> for Vec<T, A>
where
I: Iterator<Item = &'a T>,
T: Clone,
{
default fn spec_extend(&mut self, iterator: I) {
self.spec_extend(iterator.cloned())
}
}
impl<'a, T: 'a, I, A: Allocator + 'a> TrySpecExtend<&'a T, I> for Vec<T, A>
where
I: Iterator<Item = &'a T>,
T: Clone,
{
default fn try_spec_extend(&mut self, iterator: I) -> Result<(), TryReserveError> {
self.try_spec_extend(iterator.cloned())
}
}
#[cfg(not(no_global_oom_handling))]
impl<'a, T: 'a, A: Allocator + 'a> SpecExtend<&'a T, slice::Iter<'a, T>> for Vec<T, A>
where
T: Copy,
{
fn spec_extend(&mut self, iterator: slice::Iter<'a, T>) {
let slice = iterator.as_slice();
unsafe { self.append_elements(slice) };
}
}
impl<'a, T: 'a, A: Allocator + 'a> TrySpecExtend<&'a T, slice::Iter<'a, T>> for Vec<T, A>
where
T: Copy,
{
fn try_spec_extend(&mut self, iterator: slice::Iter<'a, T>) -> Result<(), TryReserveError> {
let slice = iterator.as_slice();
unsafe { self.try_append_elements(slice) }
}
}

17
rust/bindgen_parameters Normal file
View File

@ -0,0 +1,17 @@
# SPDX-License-Identifier: GPL-2.0
--opaque-type xregs_state
--opaque-type desc_struct
--opaque-type arch_lbr_state
--opaque-type local_apic
# `try` is a reserved keyword since Rust 2018; solved in `bindgen` v0.59.2,
# commit 2aed6b021680 ("context: Escape the try keyword properly").
--opaque-type kunit_try_catch
# If SMP is disabled, `arch_spinlock_t` is defined as a ZST which triggers a Rust
# warning. We don't need to peek into it anyway.
--opaque-type spinlock
# `seccomp`'s comment gets understood as a doctest
--no-doc-comments

29
rust/build_error.rs Normal file
View File

@ -0,0 +1,29 @@
// SPDX-License-Identifier: GPL-2.0
//! Build-time error.
//!
//! This crate provides a function `build_error`, which will panic in
//! compile-time if executed in const context, and will cause a build error
//! if not executed at compile time and the optimizer does not optimise away the
//! call.
//!
//! It is used by `build_assert!` in the kernel crate, allowing checking of
//! conditions that could be checked statically, but could not be enforced in
//! Rust yet (e.g. perform some checks in const functions, but those
//! functions could still be called in the runtime).
#![no_std]
/// Panics if executed in const context, or triggers a build error if not.
#[inline(never)]
#[cold]
#[no_mangle]
#[track_caller]
pub const fn build_error(msg: &'static str) -> ! {
panic!("{}", msg);
}
#[cfg(CONFIG_RUST_BUILD_ASSERT_WARN)]
#[link_section = ".gnu.warning.build_error"]
#[used]
static BUILD_ERROR_WARNING: [u8; 45] = *b"call to build_error present after compilation";

57
rust/compiler_builtins.rs Normal file
View File

@ -0,0 +1,57 @@
// SPDX-License-Identifier: GPL-2.0
//! Our own `compiler_builtins`.
//!
//! Rust provides [`compiler_builtins`] as a port of LLVM's [`compiler-rt`].
//! Since we do not need the vast majority of them, we avoid the dependency
//! by providing this file.
//!
//! At the moment, some builtins are required that should not be. For instance,
//! [`core`] has 128-bit integers functionality which we should not be compiling
//! in. We will work with upstream [`core`] to provide feature flags to disable
//! the parts we do not need. For the moment, we define them to [`panic!`] at
//! runtime for simplicity to catch mistakes, instead of performing surgery
//! on `core.o`.
//!
//! In any case, all these symbols are weakened to ensure we do not override
//! those that may be provided by the rest of the kernel.
//!
//! [`compiler_builtins`]: https://github.com/rust-lang/compiler-builtins
//! [`compiler-rt`]: https://compiler-rt.llvm.org/
#![feature(compiler_builtins)]
#![compiler_builtins]
#![no_builtins]
#![no_std]
macro_rules! define_panicking_intrinsics(
($reason: tt, { $($ident: ident, )* }) => {
$(
#[doc(hidden)]
#[no_mangle]
pub extern "C" fn $ident() {
panic!($reason);
}
)*
}
);
define_panicking_intrinsics!("`i128` should not be used", {
__ashrti3,
__muloti4,
__multi3,
});
define_panicking_intrinsics!("`u128` should not be used", {
__ashlti3,
__lshrti3,
__udivmodti4,
__udivti3,
__umodti3,
});
#[cfg(target_arch = "arm")]
define_panicking_intrinsics!("`u64` division/modulo should not be used", {
__aeabi_uldivmod,
__mulodi4,
});

20
rust/exports.c Normal file
View File

@ -0,0 +1,20 @@
// SPDX-License-Identifier: GPL-2.0
/*
* A hack to export Rust symbols for loadable modules without having to redo
* the entire `include/linux/export.h` logic in Rust.
*
* This requires the Rust's new/future `v0` mangling scheme because the default
* one ("legacy") uses invalid characters for C identifiers (thus we cannot use
* the `EXPORT_SYMBOL_*` macros).
*
* All symbols are exported as GPL-only to guarantee no GPL-only feature is
* accidentally exposed.
*/
#include <linux/module.h>
#define EXPORT_SYMBOL_RUST_GPL(sym) extern int sym; EXPORT_SYMBOL_GPL(sym);
#include "exports_core_generated.h"
#include "exports_alloc_generated.h"
#include "exports_kernel_generated.h"

644
rust/helpers.c Normal file
View File

@ -0,0 +1,644 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Non-trivial C macros cannot be used in Rust. Similarly, inlined C functions
* cannot be called either. This file explicitly creates functions ("helpers")
* that wrap those so that they can be called from Rust.
*
* Even though Rust kernel modules should never use directly the bindings, some
* of these helpers need to be exported because Rust generics and inlined
* functions may not get their code generated in the crate where they are
* defined. Other helpers, called from non-inline functions, may not be
* exported, in principle. However, in general, the Rust compiler does not
* guarantee codegen will be performed for a non-inline function either.
* Therefore, this file exports all the helpers. In the future, this may be
* revisited to reduce the number of exports after the compiler is informed
* about the places codegen is required.
*
* All symbols are exported as GPL-only to guarantee no GPL-only feature is
* accidentally exposed.
*/
#include <linux/amba/bus.h>
#include <linux/bug.h>
#include <linux/build_bug.h>
#include <linux/clk.h>
#include <linux/errname.h>
#include <linux/gfp.h>
#include <linux/highmem.h>
#include <linux/io.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
#include <linux/irq.h>
#include <linux/mutex.h>
#include <linux/netdevice.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/sched/signal.h>
#include <linux/security.h>
#include <linux/skbuff.h>
#include <linux/uaccess.h>
#include <linux/uio.h>
__noreturn void rust_helper_BUG(void)
{
BUG();
}
EXPORT_SYMBOL_GPL(rust_helper_BUG);
void rust_helper_clk_disable_unprepare(struct clk *clk)
{
return clk_disable_unprepare(clk);
}
EXPORT_SYMBOL_GPL(rust_helper_clk_disable_unprepare);
int rust_helper_clk_prepare_enable(struct clk *clk)
{
return clk_prepare_enable(clk);
}
EXPORT_SYMBOL_GPL(rust_helper_clk_prepare_enable);
unsigned long rust_helper_copy_from_user(void *to, const void __user *from, unsigned long n)
{
return copy_from_user(to, from, n);
}
EXPORT_SYMBOL_GPL(rust_helper_copy_from_user);
unsigned long rust_helper_copy_to_user(void __user *to, const void *from, unsigned long n)
{
return copy_to_user(to, from, n);
}
EXPORT_SYMBOL_GPL(rust_helper_copy_to_user);
unsigned long rust_helper_clear_user(void __user *to, unsigned long n)
{
return clear_user(to, n);
}
EXPORT_SYMBOL_GPL(rust_helper_clear_user);
void __iomem *rust_helper_ioremap(resource_size_t offset, unsigned long size)
{
return ioremap(offset, size);
}
EXPORT_SYMBOL_GPL(rust_helper_ioremap);
u8 rust_helper_readb(const volatile void __iomem *addr)
{
return readb(addr);
}
EXPORT_SYMBOL_GPL(rust_helper_readb);
u16 rust_helper_readw(const volatile void __iomem *addr)
{
return readw(addr);
}
EXPORT_SYMBOL_GPL(rust_helper_readw);
u32 rust_helper_readl(const volatile void __iomem *addr)
{
return readl(addr);
}
EXPORT_SYMBOL_GPL(rust_helper_readl);
#ifdef CONFIG_64BIT
u64 rust_helper_readq(const volatile void __iomem *addr)
{
return readq(addr);
}
EXPORT_SYMBOL_GPL(rust_helper_readq);
#endif
void rust_helper_writeb(u8 value, volatile void __iomem *addr)
{
writeb(value, addr);
}
EXPORT_SYMBOL_GPL(rust_helper_writeb);
void rust_helper_writew(u16 value, volatile void __iomem *addr)
{
writew(value, addr);
}
EXPORT_SYMBOL_GPL(rust_helper_writew);
void rust_helper_writel(u32 value, volatile void __iomem *addr)
{
writel(value, addr);
}
EXPORT_SYMBOL_GPL(rust_helper_writel);
#ifdef CONFIG_64BIT
void rust_helper_writeq(u64 value, volatile void __iomem *addr)
{
writeq(value, addr);
}
EXPORT_SYMBOL_GPL(rust_helper_writeq);
#endif
u8 rust_helper_readb_relaxed(const volatile void __iomem *addr)
{
return readb_relaxed(addr);
}
EXPORT_SYMBOL_GPL(rust_helper_readb_relaxed);
u16 rust_helper_readw_relaxed(const volatile void __iomem *addr)
{
return readw_relaxed(addr);
}
EXPORT_SYMBOL_GPL(rust_helper_readw_relaxed);
u32 rust_helper_readl_relaxed(const volatile void __iomem *addr)
{
return readl_relaxed(addr);
}
EXPORT_SYMBOL_GPL(rust_helper_readl_relaxed);
#ifdef CONFIG_64BIT
u64 rust_helper_readq_relaxed(const volatile void __iomem *addr)
{
return readq_relaxed(addr);
}
EXPORT_SYMBOL_GPL(rust_helper_readq_relaxed);
#endif
void rust_helper_writeb_relaxed(u8 value, volatile void __iomem *addr)
{
writeb_relaxed(value, addr);
}
EXPORT_SYMBOL_GPL(rust_helper_writeb_relaxed);
void rust_helper_writew_relaxed(u16 value, volatile void __iomem *addr)
{
writew_relaxed(value, addr);
}
EXPORT_SYMBOL_GPL(rust_helper_writew_relaxed);
void rust_helper_writel_relaxed(u32 value, volatile void __iomem *addr)
{
writel_relaxed(value, addr);
}
EXPORT_SYMBOL_GPL(rust_helper_writel_relaxed);
#ifdef CONFIG_64BIT
void rust_helper_writeq_relaxed(u64 value, volatile void __iomem *addr)
{
writeq_relaxed(value, addr);
}
EXPORT_SYMBOL_GPL(rust_helper_writeq_relaxed);
#endif
void rust_helper_memcpy_fromio(void *to, const volatile void __iomem *from, long count)
{
memcpy_fromio(to, from, count);
}
EXPORT_SYMBOL_GPL(rust_helper_memcpy_fromio);
void rust_helper___spin_lock_init(spinlock_t *lock, const char *name,
struct lock_class_key *key)
{
#ifdef CONFIG_DEBUG_SPINLOCK
__spin_lock_init(lock, name, key);
#else
spin_lock_init(lock);
#endif
}
EXPORT_SYMBOL_GPL(rust_helper___spin_lock_init);
void rust_helper_spin_lock(spinlock_t *lock)
{
spin_lock(lock);
}
EXPORT_SYMBOL_GPL(rust_helper_spin_lock);
void rust_helper_spin_unlock(spinlock_t *lock)
{
spin_unlock(lock);
}
EXPORT_SYMBOL_GPL(rust_helper_spin_unlock);
unsigned long rust_helper_spin_lock_irqsave(spinlock_t *lock)
{
unsigned long flags;
spin_lock_irqsave(lock, flags);
return flags;
}
EXPORT_SYMBOL_GPL(rust_helper_spin_lock_irqsave);
void rust_helper_spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
{
spin_unlock_irqrestore(lock, flags);
}
EXPORT_SYMBOL_GPL(rust_helper_spin_unlock_irqrestore);
void rust_helper__raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
struct lock_class_key *key)
{
#ifdef CONFIG_DEBUG_SPINLOCK
_raw_spin_lock_init(lock, name, key);
#else
raw_spin_lock_init(lock);
#endif
}
EXPORT_SYMBOL_GPL(rust_helper__raw_spin_lock_init);
void rust_helper_raw_spin_lock(raw_spinlock_t *lock)
{
raw_spin_lock(lock);
}
EXPORT_SYMBOL_GPL(rust_helper_raw_spin_lock);
void rust_helper_raw_spin_unlock(raw_spinlock_t *lock)
{
raw_spin_unlock(lock);
}
EXPORT_SYMBOL_GPL(rust_helper_raw_spin_unlock);
unsigned long rust_helper_raw_spin_lock_irqsave(raw_spinlock_t *lock)
{
unsigned long flags;
raw_spin_lock_irqsave(lock, flags);
return flags;
}
EXPORT_SYMBOL_GPL(rust_helper_raw_spin_lock_irqsave);
void rust_helper_raw_spin_unlock_irqrestore(raw_spinlock_t *lock,
unsigned long flags)
{
raw_spin_unlock_irqrestore(lock, flags);
}
EXPORT_SYMBOL_GPL(rust_helper_raw_spin_unlock_irqrestore);
void rust_helper_init_wait(struct wait_queue_entry *wq_entry)
{
init_wait(wq_entry);
}
EXPORT_SYMBOL_GPL(rust_helper_init_wait);
void rust_helper_init_waitqueue_func_entry(struct wait_queue_entry *wq_entry,
wait_queue_func_t func)
{
init_waitqueue_func_entry(wq_entry, func);
}
EXPORT_SYMBOL_GPL(rust_helper_init_waitqueue_func_entry);
int rust_helper_signal_pending(struct task_struct *t)
{
return signal_pending(t);
}
EXPORT_SYMBOL_GPL(rust_helper_signal_pending);
struct page *rust_helper_alloc_pages(gfp_t gfp_mask, unsigned int order)
{
return alloc_pages(gfp_mask, order);
}
EXPORT_SYMBOL_GPL(rust_helper_alloc_pages);
void *rust_helper_kmap(struct page *page)
{
return kmap(page);
}
EXPORT_SYMBOL_GPL(rust_helper_kmap);
void rust_helper_kunmap(struct page *page)
{
return kunmap(page);
}
EXPORT_SYMBOL_GPL(rust_helper_kunmap);
int rust_helper_cond_resched(void)
{
return cond_resched();
}
EXPORT_SYMBOL_GPL(rust_helper_cond_resched);
size_t rust_helper_copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
{
return copy_from_iter(addr, bytes, i);
}
EXPORT_SYMBOL_GPL(rust_helper_copy_from_iter);
size_t rust_helper_copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
{
return copy_to_iter(addr, bytes, i);
}
EXPORT_SYMBOL_GPL(rust_helper_copy_to_iter);
bool rust_helper_IS_ERR(__force const void *ptr)
{
return IS_ERR(ptr);
}
EXPORT_SYMBOL_GPL(rust_helper_IS_ERR);
long rust_helper_PTR_ERR(__force const void *ptr)
{
return PTR_ERR(ptr);
}
EXPORT_SYMBOL_GPL(rust_helper_PTR_ERR);
const char *rust_helper_errname(int err)
{
return errname(err);
}
EXPORT_SYMBOL_GPL(rust_helper_errname);
void rust_helper_mutex_lock(struct mutex *lock)
{
mutex_lock(lock);
}
EXPORT_SYMBOL_GPL(rust_helper_mutex_lock);
void rust_helper_amba_set_drvdata(struct amba_device *dev, void *data)
{
amba_set_drvdata(dev, data);
}
EXPORT_SYMBOL_GPL(rust_helper_amba_set_drvdata);
void *rust_helper_amba_get_drvdata(struct amba_device *dev)
{
return amba_get_drvdata(dev);
}
EXPORT_SYMBOL_GPL(rust_helper_amba_get_drvdata);
void *
rust_helper_platform_get_drvdata(const struct platform_device *pdev)
{
return platform_get_drvdata(pdev);
}
EXPORT_SYMBOL_GPL(rust_helper_platform_get_drvdata);
void
rust_helper_platform_set_drvdata(struct platform_device *pdev,
void *data)
{
return platform_set_drvdata(pdev, data);
}
EXPORT_SYMBOL_GPL(rust_helper_platform_set_drvdata);
refcount_t rust_helper_REFCOUNT_INIT(int n)
{
return (refcount_t)REFCOUNT_INIT(n);
}
EXPORT_SYMBOL_GPL(rust_helper_REFCOUNT_INIT);
void rust_helper_refcount_inc(refcount_t *r)
{
refcount_inc(r);
}
EXPORT_SYMBOL_GPL(rust_helper_refcount_inc);
bool rust_helper_refcount_dec_and_test(refcount_t *r)
{
return refcount_dec_and_test(r);
}
EXPORT_SYMBOL_GPL(rust_helper_refcount_dec_and_test);
void rust_helper_rb_link_node(struct rb_node *node, struct rb_node *parent,
struct rb_node **rb_link)
{
rb_link_node(node, parent, rb_link);
}
EXPORT_SYMBOL_GPL(rust_helper_rb_link_node);
struct task_struct *rust_helper_get_current(void)
{
return current;
}
EXPORT_SYMBOL_GPL(rust_helper_get_current);
void rust_helper_get_task_struct(struct task_struct *t)
{
get_task_struct(t);
}
EXPORT_SYMBOL_GPL(rust_helper_get_task_struct);
void rust_helper_put_task_struct(struct task_struct *t)
{
put_task_struct(t);
}
EXPORT_SYMBOL_GPL(rust_helper_put_task_struct);
int rust_helper_security_binder_set_context_mgr(const struct cred *mgr)
{
return security_binder_set_context_mgr(mgr);
}
EXPORT_SYMBOL_GPL(rust_helper_security_binder_set_context_mgr);
int rust_helper_security_binder_transaction(const struct cred *from,
const struct cred *to)
{
return security_binder_transaction(from, to);
}
EXPORT_SYMBOL_GPL(rust_helper_security_binder_transaction);
int rust_helper_security_binder_transfer_binder(const struct cred *from,
const struct cred *to)
{
return security_binder_transfer_binder(from, to);
}
EXPORT_SYMBOL_GPL(rust_helper_security_binder_transfer_binder);
int rust_helper_security_binder_transfer_file(const struct cred *from,
const struct cred *to,
struct file *file)
{
return security_binder_transfer_file(from, to, file);
}
EXPORT_SYMBOL_GPL(rust_helper_security_binder_transfer_file);
struct file *rust_helper_get_file(struct file *f)
{
return get_file(f);
}
EXPORT_SYMBOL_GPL(rust_helper_get_file);
void rust_helper_rcu_read_lock(void)
{
rcu_read_lock();
}
EXPORT_SYMBOL_GPL(rust_helper_rcu_read_lock);
void rust_helper_rcu_read_unlock(void)
{
rcu_read_unlock();
}
EXPORT_SYMBOL_GPL(rust_helper_rcu_read_unlock);
void rust_helper_synchronize_rcu(void)
{
synchronize_rcu();
}
EXPORT_SYMBOL_GPL(rust_helper_synchronize_rcu);
void *rust_helper_dev_get_drvdata(struct device *dev)
{
return dev_get_drvdata(dev);
}
EXPORT_SYMBOL_GPL(rust_helper_dev_get_drvdata);
const char *rust_helper_dev_name(const struct device *dev)
{
return dev_name(dev);
}
EXPORT_SYMBOL_GPL(rust_helper_dev_name);
void rust_helper___seqcount_init(seqcount_t *s, const char *name,
struct lock_class_key *key)
{
__seqcount_init(s, name, key);
}
EXPORT_SYMBOL_GPL(rust_helper___seqcount_init);
unsigned rust_helper_read_seqcount_begin(seqcount_t *s)
{
return read_seqcount_begin(s);
}
EXPORT_SYMBOL_GPL(rust_helper_read_seqcount_begin);
int rust_helper_read_seqcount_retry(seqcount_t *s, unsigned start)
{
return read_seqcount_retry(s, start);
}
EXPORT_SYMBOL_GPL(rust_helper_read_seqcount_retry);
void rust_helper_write_seqcount_begin(seqcount_t *s)
{
do_write_seqcount_begin(s);
}
EXPORT_SYMBOL_GPL(rust_helper_write_seqcount_begin);
void rust_helper_write_seqcount_end(seqcount_t *s)
{
do_write_seqcount_end(s);
}
EXPORT_SYMBOL_GPL(rust_helper_write_seqcount_end);
void rust_helper_irq_set_handler_locked(struct irq_data *data,
irq_flow_handler_t handler)
{
irq_set_handler_locked(data, handler);
}
EXPORT_SYMBOL_GPL(rust_helper_irq_set_handler_locked);
void *rust_helper_irq_data_get_irq_chip_data(struct irq_data *d)
{
return irq_data_get_irq_chip_data(d);
}
EXPORT_SYMBOL_GPL(rust_helper_irq_data_get_irq_chip_data);
struct irq_chip *rust_helper_irq_desc_get_chip(struct irq_desc *desc)
{
return irq_desc_get_chip(desc);
}
EXPORT_SYMBOL_GPL(rust_helper_irq_desc_get_chip);
void *rust_helper_irq_desc_get_handler_data(struct irq_desc *desc)
{
return irq_desc_get_handler_data(desc);
}
EXPORT_SYMBOL_GPL(rust_helper_irq_desc_get_handler_data);
void rust_helper_chained_irq_enter(struct irq_chip *chip,
struct irq_desc *desc)
{
chained_irq_enter(chip, desc);
}
EXPORT_SYMBOL_GPL(rust_helper_chained_irq_enter);
void rust_helper_chained_irq_exit(struct irq_chip *chip,
struct irq_desc *desc)
{
chained_irq_exit(chip, desc);
}
EXPORT_SYMBOL_GPL(rust_helper_chained_irq_exit);
const struct cred *rust_helper_get_cred(const struct cred *cred)
{
return get_cred(cred);
}
EXPORT_SYMBOL_GPL(rust_helper_get_cred);
void rust_helper_put_cred(const struct cred *cred)
{
put_cred(cred);
}
EXPORT_SYMBOL_GPL(rust_helper_put_cred);
const struct of_device_id *rust_helper_of_match_device(
const struct of_device_id *matches, const struct device *dev)
{
return of_match_device(matches, dev);
}
EXPORT_SYMBOL_GPL(rust_helper_of_match_device);
void rust_helper_init_completion(struct completion *c)
{
init_completion(c);
}
EXPORT_SYMBOL_GPL(rust_helper_init_completion);
struct sk_buff *rust_helper_skb_get(struct sk_buff *skb)
{
return skb_get(skb);
}
EXPORT_SYMBOL_GPL(rust_helper_skb_get);
unsigned int rust_helper_skb_headlen(const struct sk_buff *skb)
{
return skb_headlen(skb);
}
EXPORT_SYMBOL_GPL(rust_helper_skb_headlen);
void rust_helper_dev_hold(struct net_device *dev)
{
return dev_hold(dev);
}
EXPORT_SYMBOL_GPL(rust_helper_dev_hold);
void rust_helper_dev_put(struct net_device *dev)
{
return dev_put(dev);
}
EXPORT_SYMBOL_GPL(rust_helper_dev_put);
struct net *rust_helper_get_net(struct net *net)
{
return get_net(net);
}
EXPORT_SYMBOL_GPL(rust_helper_get_net);
void rust_helper_put_net(struct net *net)
{
return put_net(net);
}
EXPORT_SYMBOL_GPL(rust_helper_put_net);
unsigned int rust_helper_NF_QUEUE_NR(unsigned int n)
{
return NF_QUEUE_NR(n);
}
EXPORT_SYMBOL_GPL(rust_helper_NF_QUEUE_NR);
/*
* We use `bindgen`'s `--size_t-is-usize` option to bind the C `size_t` type
* as the Rust `usize` type, so we can use it in contexts where Rust
* expects a `usize` like slice (array) indices. `usize` is defined to be
* the same as C's `uintptr_t` type (can hold any pointer) but not
* necessarily the same as `size_t` (can hold the size of any single
* object). Most modern platforms use the same concrete integer type for
* both of them, but in case we find ourselves on a platform where
* that's not true, fail early instead of risking ABI or
* integer-overflow issues.
*
* If your platform fails this assertion, it means that you are in
* danger of integer-overflow bugs (even if you attempt to remove
* `--size_t-is-usize`). It may be easiest to change the kernel ABI on
* your platform such that `size_t` matches `uintptr_t` (i.e., to increase
* `size_t`, because `uintptr_t` has to be at least as big as `size_t`).
*/
static_assert(
sizeof(size_t) == sizeof(uintptr_t) &&
__alignof__(size_t) == __alignof__(uintptr_t),
"Rust code expects C `size_t` to match Rust `usize`"
);

65
rust/kernel/allocator.rs Normal file
View File

@ -0,0 +1,65 @@
// SPDX-License-Identifier: GPL-2.0
//! Allocator support.
use core::alloc::{GlobalAlloc, Layout};
use core::ptr;
use crate::bindings;
use crate::c_types;
struct KernelAllocator;
unsafe impl GlobalAlloc for KernelAllocator {
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
// `krealloc()` is used instead of `kmalloc()` because the latter is
// an inline function and cannot be bound to as a result.
unsafe { bindings::krealloc(ptr::null(), layout.size(), bindings::GFP_KERNEL) as *mut u8 }
}
unsafe fn dealloc(&self, ptr: *mut u8, _layout: Layout) {
unsafe {
bindings::kfree(ptr as *const c_types::c_void);
}
}
}
#[global_allocator]
static ALLOCATOR: KernelAllocator = KernelAllocator;
// `rustc` only generates these for some crate types. Even then, we would need
// to extract the object file that has them from the archive. For the moment,
// let's generate them ourselves instead.
//
// Note that `#[no_mangle]` implies exported too, nowadays.
#[no_mangle]
fn __rust_alloc(size: usize, _align: usize) -> *mut u8 {
unsafe { bindings::krealloc(core::ptr::null(), size, bindings::GFP_KERNEL) as *mut u8 }
}
#[no_mangle]
fn __rust_dealloc(ptr: *mut u8, _size: usize, _align: usize) {
unsafe { bindings::kfree(ptr as *const c_types::c_void) };
}
#[no_mangle]
fn __rust_realloc(ptr: *mut u8, _old_size: usize, _align: usize, new_size: usize) -> *mut u8 {
unsafe {
bindings::krealloc(
ptr as *const c_types::c_void,
new_size,
bindings::GFP_KERNEL,
) as *mut u8
}
}
#[no_mangle]
fn __rust_alloc_zeroed(size: usize, _align: usize) -> *mut u8 {
unsafe {
bindings::krealloc(
core::ptr::null(),
size,
bindings::GFP_KERNEL | bindings::__GFP_ZERO,
) as *mut u8
}
}

257
rust/kernel/amba.rs Normal file
View File

@ -0,0 +1,257 @@
// SPDX-License-Identifier: GPL-2.0
//! Amba devices and drivers.
//!
//! C header: [`include/linux/amba/bus.h`](../../../../include/linux/amba/bus.h)
use crate::{
bindings, c_types, device, driver, error::from_kernel_result, io_mem::Resource, power,
str::CStr, to_result, types::PointerWrapper, Result, ThisModule,
};
/// A registration of an amba driver.
pub type Registration<T> = driver::Registration<Adapter<T>>;
/// Id of an Amba device.
#[derive(Clone, Copy)]
pub struct DeviceId {
/// Device id.
pub id: u32,
/// Mask that identifies which bits are valid in the device id.
pub mask: u32,
}
// SAFETY: `ZERO` is all zeroed-out and `to_rawid` stores `offset` in `amba_id::data`.
unsafe impl const driver::RawDeviceId for DeviceId {
type RawType = bindings::amba_id;
const ZERO: Self::RawType = bindings::amba_id {
id: 0,
mask: 0,
data: core::ptr::null_mut(),
};
fn to_rawid(&self, offset: isize) -> Self::RawType {
bindings::amba_id {
id: self.id,
mask: self.mask,
data: offset as _,
}
}
}
/// An amba driver.
pub trait Driver {
/// Data stored on device by driver.
type Data: PointerWrapper + Send + Sync + driver::DeviceRemoval = ();
/// The type that implements the power-management operations.
///
/// The default is a type that implements no power-management operations. Drivers that do
/// implement them need to specify the type (commonly [`Self`]).
type PowerOps: power::Operations<Data = Self::Data> = power::NoOperations<Self::Data>;
/// The type holding information about each device id supported by the driver.
type IdInfo: 'static = ();
/// The table of device ids supported by the driver.
const ID_TABLE: Option<driver::IdTable<'static, DeviceId, Self::IdInfo>> = None;
/// Probes for the device with the given id.
fn probe(dev: &mut Device, id_info: Option<&Self::IdInfo>) -> Result<Self::Data>;
/// Cleans any resources up that are associated with the device.
///
/// This is called when the driver is detached from the device.
fn remove(_data: &Self::Data) {}
}
/// An adapter for the registration of Amba drivers.
pub struct Adapter<T: Driver>(T);
impl<T: Driver> driver::DriverOps for Adapter<T> {
type RegType = bindings::amba_driver;
unsafe fn register(
reg: *mut bindings::amba_driver,
name: &'static CStr,
module: &'static ThisModule,
) -> Result {
// SAFETY: By the safety requirements of this function (defined in the trait definition),
// `reg` is non-null and valid.
let amba = unsafe { &mut *reg };
amba.drv.name = name.as_char_ptr();
amba.drv.owner = module.0;
amba.probe = Some(probe_callback::<T>);
amba.remove = Some(remove_callback::<T>);
if let Some(t) = T::ID_TABLE {
amba.id_table = t.as_ref();
}
if cfg!(CONFIG_PM) {
// SAFETY: `probe_callback` sets the driver data after calling `T::Data::into_pointer`,
// and we guarantee that `T::Data` is the same as `T::PowerOps::Data` by a constraint
// in the type declaration.
amba.drv.pm = unsafe { power::OpsTable::<T::PowerOps>::build() };
}
// SAFETY: By the safety requirements of this function, `reg` is valid and fully
// initialised.
to_result(|| unsafe { bindings::amba_driver_register(reg) })
}
unsafe fn unregister(reg: *mut bindings::amba_driver) {
// SAFETY: By the safety requirements of this function (defined in the trait definition),
// `reg` was passed (and updated) by a previous successful call to `amba_driver_register`.
unsafe { bindings::amba_driver_unregister(reg) };
}
}
unsafe extern "C" fn probe_callback<T: Driver>(
adev: *mut bindings::amba_device,
aid: *const bindings::amba_id,
) -> c_types::c_int {
from_kernel_result! {
// SAFETY: `adev` is valid by the contract with the C code. `dev` is alive only for the
// duration of this call, so it is guaranteed to remain alive for the lifetime of `dev`.
let mut dev = unsafe { Device::from_ptr(adev) };
// SAFETY: `aid` is valid by the requirements the contract with the C code.
let offset = unsafe { (*aid).data };
let info = if offset.is_null() {
None
} else {
// SAFETY: The offset comes from a previous call to `offset_from` in `IdArray::new`,
// which guarantees that the resulting pointer is within the table.
let ptr = unsafe { aid.cast::<u8>().offset(offset as _).cast::<Option<T::IdInfo>>() };
// SAFETY: The id table has a static lifetime, so `ptr` is guaranteed to be valid for
// read.
unsafe { (&*ptr).as_ref() }
};
let data = T::probe(&mut dev, info)?;
let ptr = T::Data::into_pointer(data);
// SAFETY: `adev` is valid for write by the contract with the C code.
unsafe { bindings::amba_set_drvdata(adev, ptr as _) };
Ok(0)
}
}
unsafe extern "C" fn remove_callback<T: Driver>(adev: *mut bindings::amba_device) {
// SAFETY: `adev` is valid by the contract with the C code.
let ptr = unsafe { bindings::amba_get_drvdata(adev) };
// SAFETY: The value returned by `amba_get_drvdata` was stored by a previous call to
// `amba_set_drvdata` in `probe_callback` above; the value comes from a call to
// `T::Data::into_pointer`.
let data = unsafe { T::Data::from_pointer(ptr) };
T::remove(&data);
<T::Data as driver::DeviceRemoval>::device_remove(&data);
}
/// An Amba device.
///
/// # Invariants
///
/// The field `ptr` is non-null and valid for the lifetime of the object.
pub struct Device {
ptr: *mut bindings::amba_device,
res: Option<Resource>,
}
impl Device {
/// Creates a new device from the given pointer.
///
/// # Safety
///
/// `ptr` must be non-null and valid. It must remain valid for the lifetime of the returned
/// instance.
unsafe fn from_ptr(ptr: *mut bindings::amba_device) -> Self {
// SAFETY: The safety requirements of the function ensure that `ptr` is valid.
let dev = unsafe { &mut *ptr };
// INVARIANT: The safety requirements of the function ensure the lifetime invariant.
Self {
ptr,
res: Resource::new(dev.res.start, dev.res.end),
}
}
/// Returns the io mem resource associated with the device, if there is one.
///
/// Ownership of the resource is transferred to the caller, so subsequent calls to this
/// function will return [`None`].
pub fn take_resource(&mut self) -> Option<Resource> {
self.res.take()
}
/// Returns the index-th irq associated with the device, if one exists.
pub fn irq(&self, index: usize) -> Option<u32> {
// SAFETY: By the type invariants, `self.ptr` is valid for read.
let dev = unsafe { &*self.ptr };
if index >= dev.irq.len() || dev.irq[index] == 0 {
None
} else {
Some(dev.irq[index])
}
}
}
// SAFETY: The device returned by `raw_device` is the raw Amba device.
unsafe impl device::RawDevice for Device {
fn raw_device(&self) -> *mut bindings::device {
// SAFETY: By the type invariants, we know that `self.ptr` is non-null and valid.
unsafe { &mut (*self.ptr).dev }
}
}
/// Declares a kernel module that exposes a single amba driver.
///
/// # Examples
///
/// ```ignore
/// # use kernel::{amba, define_amba_id_table, module_amba_driver};
/// #
/// struct MyDriver;
/// impl amba::Driver for MyDriver {
/// // [...]
/// # fn probe(_dev: &mut amba::Device, _id: Option<&Self::IdInfo>) -> Result {
/// # Ok(())
/// # }
/// # define_amba_id_table! {(), [
/// # ({ id: 0x00041061, mask: 0x000fffff }, None),
/// # ]}
/// }
///
/// module_amba_driver! {
/// type: MyDriver,
/// name: b"module_name",
/// author: b"Author name",
/// license: b"GPL",
/// }
/// ```
#[macro_export]
macro_rules! module_amba_driver {
($($f:tt)*) => {
$crate::module_driver!(<T>, $crate::amba::Adapter<T>, { $($f)* });
};
}
/// Defines the id table for amba devices.
///
/// # Examples
///
/// ```
/// # use kernel::{amba, define_amba_id_table};
/// #
/// # struct Sample;
/// # impl kernel::amba::Driver for Sample {
/// # fn probe(_dev: &mut amba::Device, _id: Option<&Self::IdInfo>) -> Result {
/// # Ok(())
/// # }
/// define_amba_id_table! {(), [
/// ({ id: 0x00041061, mask: 0x000fffff }, None),
/// ]}
/// # }
/// ```
#[macro_export]
macro_rules! define_amba_id_table {
($data_type:ty, $($t:tt)*) => {
type IdInfo = $data_type;
$crate::define_id_table!(ID_TABLE, $crate::amba::DeviceId, $data_type, $($t)*);
};
}

47
rust/kernel/bindings.rs Normal file
View File

@ -0,0 +1,47 @@
// SPDX-License-Identifier: GPL-2.0
//! Bindings.
//!
//! Imports the generated bindings by `bindgen`.
// See https://github.com/rust-lang/rust-bindgen/issues/1651.
#![cfg_attr(test, allow(deref_nullptr))]
#![cfg_attr(test, allow(unaligned_references))]
#![cfg_attr(test, allow(unsafe_op_in_unsafe_fn))]
#![allow(
clippy::all,
non_camel_case_types,
non_upper_case_globals,
non_snake_case,
improper_ctypes,
unreachable_pub,
unsafe_op_in_unsafe_fn
)]
mod bindings_raw {
// Use glob import here to expose all helpers.
// Symbols defined within the module will take precedence to the glob import.
pub use super::bindings_helper::*;
use crate::c_types;
include!(concat!(env!("OBJTREE"), "/rust/bindings_generated.rs"));
}
// When both a directly exposed symbol and a helper exists for the same function,
// the directly exposed symbol is preferred and the helper becomes dead code, so
// ignore the warning here.
#[allow(dead_code)]
mod bindings_helper {
// Import the generated bindings for types.
use super::bindings_raw::*;
use crate::c_types;
include!(concat!(
env!("OBJTREE"),
"/rust/bindings_helpers_generated.rs"
));
}
pub use bindings_raw::*;
pub const GFP_KERNEL: gfp_t = BINDINGS_GFP_KERNEL;
pub const __GFP_ZERO: gfp_t = BINDINGS___GFP_ZERO;
pub const __GFP_HIGHMEM: gfp_t = ___GFP_HIGHMEM;

View File

@ -0,0 +1,46 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Header that contains the code (mostly headers) for which Rust bindings
* will be automatically generated by `bindgen`.
*
* Sorted alphabetically.
*/
#include <kunit/test.h>
#include <linux/amba/bus.h>
#include <linux/cdev.h>
#include <linux/clk.h>
#include <linux/errname.h>
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/gpio/driver.h>
#include <linux/hw_random.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irqdomain.h>
#include <linux/irq.h>
#include <linux/miscdevice.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/netfilter_arp.h>
#include <linux/netfilter.h>
#include <linux/netfilter_ipv4.h>
#include <linux/netfilter_ipv6.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/poll.h>
#include <linux/random.h>
#include <linux/security.h>
#include <linux/slab.h>
#include <linux/sysctl.h>
#include <linux/uaccess.h>
#include <linux/uio.h>
#include <uapi/linux/android/binder.h>
/* `bindgen` gets confused at certain things. */
const gfp_t BINDINGS_GFP_KERNEL = GFP_KERNEL;
const gfp_t BINDINGS___GFP_ZERO = __GFP_ZERO;
const __poll_t BINDINGS_EPOLLIN = EPOLLIN;
const __poll_t BINDINGS_EPOLLOUT = EPOLLOUT;
const __poll_t BINDINGS_EPOLLERR = EPOLLERR;
const __poll_t BINDINGS_EPOLLHUP = EPOLLHUP;

View File

@ -0,0 +1,82 @@
// SPDX-License-Identifier: GPL-2.0
//! Build-time assert.
/// Fails the build if the code path calling `build_error!` can possibly be executed.
///
/// If the macro is executed in const context, `build_error!` will panic.
/// If the compiler or optimizer cannot guarantee that `build_error!` can never
/// be called, a build error will be triggered.
///
/// # Examples
/// ```
/// # use kernel::build_error;
/// #[inline]
/// fn foo(a: usize) -> usize {
/// a.checked_add(1).unwrap_or_else(|| build_error!("overflow"))
/// }
///
/// assert_eq!(foo(usize::MAX - 1), usize::MAX); // OK.
/// // foo(usize::MAX); // Fails to compile.
/// ```
#[macro_export]
macro_rules! build_error {
() => {{
$crate::build_error("")
}};
($msg:expr) => {{
$crate::build_error($msg)
}};
}
/// Asserts that a boolean expression is `true` at compile time.
///
/// If the condition is evaluated to `false` in const context, `build_assert!`
/// will panic. If the compiler or optimizer cannot guarantee the condition will
/// be evaluated to `true`, a build error will be triggered.
///
/// [`static_assert!`] should be preferred to `build_assert!` whenever possible.
///
/// # Examples
///
/// These examples show that different types of [`assert!`] will trigger errors
/// at different stage of compilation. It is preferred to err as early as
/// possible, so [`static_assert!`] should be used whenever possible.
// TODO: Could be `compile_fail` when supported.
/// ```ignore
/// fn foo() {
/// static_assert!(1 > 1); // Compile-time error
/// build_assert!(1 > 1); // Build-time error
/// assert!(1 > 1); // Run-time error
/// }
/// ```
///
/// When the condition refers to generic parameters or parameters of an inline function,
/// [`static_assert!`] cannot be used. Use `build_assert!` in this scenario.
/// ```
/// fn foo<const N: usize>() {
/// // `static_assert!(N > 1);` is not allowed
/// build_assert!(N > 1); // Build-time check
/// assert!(N > 1); // Run-time check
/// }
///
/// #[inline]
/// fn bar(n: usize) {
/// // `static_assert!(n > 1);` is not allowed
/// build_assert!(n > 1); // Build-time check
/// assert!(n > 1); // Run-time check
/// }
/// ```
#[macro_export]
macro_rules! build_assert {
($cond:expr $(,)?) => {{
if !$cond {
$crate::build_error(concat!("assertion failed: ", stringify!($cond)));
}
}};
($cond:expr, $msg:expr) => {{
if !$cond {
$crate::build_error($msg);
}
}};
}

119
rust/kernel/c_types.rs Normal file
View File

@ -0,0 +1,119 @@
// SPDX-License-Identifier: GPL-2.0
//! C types for the bindings.
//!
//! The bindings generated by `bindgen` use these types to map to the C ones.
//!
//! C's standard integer types may differ in width depending on
//! the architecture, thus we need to conditionally compile those.
#![allow(non_camel_case_types)]
#[cfg(any(target_arch = "arm", target_arch = "x86", target_arch = "riscv32",))]
mod c {
/// C `void` type.
pub type c_void = core::ffi::c_void;
/// C `char` type.
pub type c_char = i8;
/// C `signed char` type.
pub type c_schar = i8;
/// C `unsigned char` type.
pub type c_uchar = u8;
/// C `short` type.
pub type c_short = i16;
/// C `unsigned short` type.
pub type c_ushort = u16;
/// C `int` type.
pub type c_int = i32;
/// C `unsigned int` type.
pub type c_uint = u32;
/// C `long` type.
pub type c_long = i32;
/// C `unsigned long` type.
pub type c_ulong = u32;
/// C `long long` type.
pub type c_longlong = i64;
/// C `unsigned long long` type.
pub type c_ulonglong = u64;
/// C `ssize_t` type (typically defined in `<sys/types.h>` by POSIX).
///
/// For some 32-bit architectures like this one, the kernel defines it as
/// `int`, i.e. it is an [`i32`].
pub type c_ssize_t = isize;
/// C `size_t` type (typically defined in `<stddef.h>`).
///
/// For some 32-bit architectures like this one, the kernel defines it as
/// `unsigned int`, i.e. it is an [`u32`].
pub type c_size_t = usize;
}
#[cfg(any(
target_arch = "aarch64",
target_arch = "x86_64",
target_arch = "powerpc64",
target_arch = "riscv64",
))]
mod c {
/// C `void` type.
pub type c_void = core::ffi::c_void;
/// C `char` type.
pub type c_char = i8;
/// C `signed char` type.
pub type c_schar = i8;
/// C `unsigned char` type.
pub type c_uchar = u8;
/// C `short` type.
pub type c_short = i16;
/// C `unsigned short` type.
pub type c_ushort = u16;
/// C `int` type.
pub type c_int = i32;
/// C `unsigned int` type.
pub type c_uint = u32;
/// C `long` type.
pub type c_long = i64;
/// C `unsigned long` type.
pub type c_ulong = u64;
/// C `long long` type.
pub type c_longlong = i64;
/// C `unsigned long long` type.
pub type c_ulonglong = u64;
/// C `ssize_t` type (typically defined in `<sys/types.h>` by POSIX).
///
/// For 64-bit architectures like this one, the kernel defines it as
/// `long`, i.e. it is an [`i64`].
pub type c_ssize_t = isize;
/// C `size_t` type (typically defined in `<stddef.h>`).
///
/// For 64-bit architectures like this one, the kernel defines it as
/// `unsigned long`, i.e. it is an [`u64`].
pub type c_size_t = usize;
}
pub use c::*;

207
rust/kernel/chrdev.rs Normal file
View File

@ -0,0 +1,207 @@
// SPDX-License-Identifier: GPL-2.0
//! Character devices.
//!
//! Also called "char devices", `chrdev`, `cdev`.
//!
//! C header: [`include/linux/cdev.h`](../../../../include/linux/cdev.h)
//!
//! Reference: <https://www.kernel.org/doc/html/latest/core-api/kernel-api.html#char-devices>
use alloc::boxed::Box;
use core::convert::TryInto;
use core::marker::PhantomPinned;
use core::pin::Pin;
use crate::bindings;
use crate::c_types;
use crate::error::{code::*, Error, Result};
use crate::file;
use crate::str::CStr;
/// Character device.
///
/// # Invariants
///
/// - [`self.0`] is valid and non-null.
/// - [`(*self.0).ops`] is valid, non-null and has static lifetime.
/// - [`(*self.0).owner`] is valid and, if non-null, has module lifetime.
struct Cdev(*mut bindings::cdev);
impl Cdev {
fn alloc(
fops: &'static bindings::file_operations,
module: &'static crate::ThisModule,
) -> Result<Self> {
// SAFETY: FFI call.
let cdev = unsafe { bindings::cdev_alloc() };
if cdev.is_null() {
return Err(ENOMEM);
}
// SAFETY: `cdev` is valid and non-null since `cdev_alloc()`
// returned a valid pointer which was null-checked.
unsafe {
(*cdev).ops = fops;
(*cdev).owner = module.0;
}
// INVARIANTS:
// - [`self.0`] is valid and non-null.
// - [`(*self.0).ops`] is valid, non-null and has static lifetime,
// because it was coerced from a reference with static lifetime.
// - [`(*self.0).owner`] is valid and, if non-null, has module lifetime,
// guaranteed by the [`ThisModule`] invariant.
Ok(Self(cdev))
}
fn add(&mut self, dev: bindings::dev_t, count: c_types::c_uint) -> Result {
// SAFETY: According to the type invariants:
// - [`self.0`] can be safely passed to [`bindings::cdev_add`].
// - [`(*self.0).ops`] will live at least as long as [`self.0`].
// - [`(*self.0).owner`] will live at least as long as the
// module, which is an implicit requirement.
let rc = unsafe { bindings::cdev_add(self.0, dev, count) };
if rc != 0 {
return Err(Error::from_kernel_errno(rc));
}
Ok(())
}
}
impl Drop for Cdev {
fn drop(&mut self) {
// SAFETY: [`self.0`] is valid and non-null by the type invariants.
unsafe {
bindings::cdev_del(self.0);
}
}
}
struct RegistrationInner<const N: usize> {
dev: bindings::dev_t,
used: usize,
cdevs: [Option<Cdev>; N],
_pin: PhantomPinned,
}
/// Character device registration.
///
/// May contain up to a fixed number (`N`) of devices. Must be pinned.
pub struct Registration<const N: usize> {
name: &'static CStr,
minors_start: u16,
this_module: &'static crate::ThisModule,
inner: Option<RegistrationInner<N>>,
}
impl<const N: usize> Registration<{ N }> {
/// Creates a [`Registration`] object for a character device.
///
/// This does *not* register the device: see [`Self::register()`].
///
/// This associated function is intended to be used when you need to avoid
/// a memory allocation, e.g. when the [`Registration`] is a member of
/// a bigger structure inside your [`crate::Module`] instance. If you
/// are going to pin the registration right away, call
/// [`Self::new_pinned()`] instead.
pub fn new(
name: &'static CStr,
minors_start: u16,
this_module: &'static crate::ThisModule,
) -> Self {
Registration {
name,
minors_start,
this_module,
inner: None,
}
}
/// Creates a pinned [`Registration`] object for a character device.
///
/// This does *not* register the device: see [`Self::register()`].
pub fn new_pinned(
name: &'static CStr,
minors_start: u16,
this_module: &'static crate::ThisModule,
) -> Result<Pin<Box<Self>>> {
Ok(Pin::from(Box::try_new(Self::new(
name,
minors_start,
this_module,
))?))
}
/// Registers a character device.
///
/// You may call this once per device type, up to `N` times.
pub fn register<T: file::Operations<OpenData = ()>>(self: Pin<&mut Self>) -> Result {
// SAFETY: We must ensure that we never move out of `this`.
let this = unsafe { self.get_unchecked_mut() };
if this.inner.is_none() {
let mut dev: bindings::dev_t = 0;
// SAFETY: Calling unsafe function. `this.name` has `'static`
// lifetime.
let res = unsafe {
bindings::alloc_chrdev_region(
&mut dev,
this.minors_start.into(),
N.try_into()?,
this.name.as_char_ptr(),
)
};
if res != 0 {
return Err(Error::from_kernel_errno(res));
}
const NONE: Option<Cdev> = None;
this.inner = Some(RegistrationInner {
dev,
used: 0,
cdevs: [NONE; N],
_pin: PhantomPinned,
});
}
let mut inner = this.inner.as_mut().unwrap();
if inner.used == N {
return Err(EINVAL);
}
// SAFETY: The adapter doesn't retrieve any state yet, so it's compatible with any
// registration.
let fops = unsafe { file::OperationsVtable::<Self, T>::build() };
let mut cdev = Cdev::alloc(fops, this.this_module)?;
cdev.add(inner.dev + inner.used as bindings::dev_t, 1)?;
inner.cdevs[inner.used].replace(cdev);
inner.used += 1;
Ok(())
}
}
impl<const N: usize> file::OpenAdapter<()> for Registration<{ N }> {
unsafe fn convert(_inode: *mut bindings::inode, _file: *mut bindings::file) -> *const () {
// TODO: Update the SAFETY comment on the call to `FileOperationsVTable::build` above once
// this is updated to retrieve state.
&()
}
}
// SAFETY: `Registration` does not expose any of its state across threads
// (it is fine for multiple threads to have a shared reference to it).
unsafe impl<const N: usize> Sync for Registration<{ N }> {}
impl<const N: usize> Drop for Registration<{ N }> {
fn drop(&mut self) {
if let Some(inner) = self.inner.as_mut() {
// Replicate kernel C behaviour: drop [`Cdev`]s before calling
// [`bindings::unregister_chrdev_region`].
for i in 0..inner.used {
inner.cdevs[i].take();
}
// SAFETY: [`self.inner`] is Some, so [`inner.dev`] was previously
// created using [`bindings::alloc_chrdev_region`].
unsafe {
bindings::unregister_chrdev_region(inner.dev, N.try_into().unwrap());
}
}
}
}

79
rust/kernel/clk.rs Normal file
View File

@ -0,0 +1,79 @@
// SPDX-License-Identifier: GPL-2.0
//! Common clock framework.
//!
//! C header: [`include/linux/clk.h`](../../../../include/linux/clk.h)
use crate::{bindings, error::Result, to_result};
use core::mem::ManuallyDrop;
/// Represents `struct clk *`.
///
/// # Invariants
///
/// The pointer is valid.
pub struct Clk(*mut bindings::clk);
impl Clk {
/// Creates new clock structure from a raw pointer.
///
/// # Safety
///
/// The pointer must be valid.
pub unsafe fn new(clk: *mut bindings::clk) -> Self {
Self(clk)
}
/// Returns value of the rate field of `struct clk`.
pub fn get_rate(&self) -> usize {
// SAFETY: The pointer is valid by the type invariant.
unsafe { bindings::clk_get_rate(self.0) as usize }
}
/// Prepares and enables the underlying hardware clock.
///
/// This function should not be called in atomic context.
pub fn prepare_enable(self) -> Result<EnabledClk> {
// SAFETY: The pointer is valid by the type invariant.
to_result(|| unsafe { bindings::clk_prepare_enable(self.0) })?;
Ok(EnabledClk(self))
}
}
impl Drop for Clk {
fn drop(&mut self) {
// SAFETY: The pointer is valid by the type invariant.
unsafe { bindings::clk_put(self.0) };
}
}
// SAFETY: `Clk` is not restricted to a single thread so it is safe
// to move it between threads.
unsafe impl Send for Clk {}
/// A clock variant that is prepared and enabled.
pub struct EnabledClk(Clk);
impl EnabledClk {
/// Returns value of the rate field of `struct clk`.
pub fn get_rate(&self) -> usize {
self.0.get_rate()
}
/// Disables and later unprepares the underlying hardware clock prematurely.
///
/// This function should not be called in atomic context.
pub fn disable_unprepare(self) -> Clk {
let mut clk = ManuallyDrop::new(self);
// SAFETY: The pointer is valid by the type invariant.
unsafe { bindings::clk_disable_unprepare(clk.0 .0) };
core::mem::replace(&mut clk.0, Clk(core::ptr::null_mut()))
}
}
impl Drop for EnabledClk {
fn drop(&mut self) {
// SAFETY: The pointer is valid by the type invariant.
unsafe { bindings::clk_disable_unprepare(self.0 .0) };
}
}

46
rust/kernel/cred.rs Normal file
View File

@ -0,0 +1,46 @@
// SPDX-License-Identifier: GPL-2.0
//! Credentials management.
//!
//! C header: [`include/linux/cred.h`](../../../../include/linux/cred.h)
//!
//! Reference: <https://www.kernel.org/doc/html/latest/security/credentials.html>
use crate::{bindings, AlwaysRefCounted};
use core::cell::UnsafeCell;
/// Wraps the kernel's `struct cred`.
///
/// # Invariants
///
/// Instances of this type are always ref-counted, that is, a call to `get_cred` ensures that the
/// allocation remains valid at least until the matching call to `put_cred`.
#[repr(transparent)]
pub struct Credential(pub(crate) UnsafeCell<bindings::cred>);
impl Credential {
/// Creates a reference to a [`Credential`] from a valid pointer.
///
/// # Safety
///
/// The caller must ensure that `ptr` is valid and remains valid for the lifetime of the
/// returned [`Credential`] reference.
pub(crate) unsafe fn from_ptr<'a>(ptr: *const bindings::cred) -> &'a Self {
// SAFETY: The safety requirements guarantee the validity of the dereference, while the
// `Credential` type being transparent makes the cast ok.
unsafe { &*ptr.cast() }
}
}
// SAFETY: The type invariants guarantee that `Credential` is always ref-counted.
unsafe impl AlwaysRefCounted for Credential {
fn inc_ref(&self) {
// SAFETY: The existence of a shared reference means that the refcount is nonzero.
unsafe { bindings::get_cred(self.0.get()) };
}
unsafe fn dec_ref(obj: core::ptr::NonNull<Self>) {
// SAFETY: The safety requirements guarantee that the refcount is nonzero.
unsafe { bindings::put_cred(obj.cast().as_ptr()) };
}
}

546
rust/kernel/device.rs Normal file
View File

@ -0,0 +1,546 @@
// SPDX-License-Identifier: GPL-2.0
//! Generic devices that are part of the kernel's driver model.
//!
//! C header: [`include/linux/device.h`](../../../../include/linux/device.h)
#[cfg(CONFIG_COMMON_CLK)]
use crate::{clk::Clk, error::from_kernel_err_ptr};
use crate::{
bindings,
revocable::{Revocable, RevocableGuard},
str::CStr,
sync::{NeedsLockClass, RevocableMutex, RevocableMutexGuard, UniqueRef},
Result,
};
use core::{
fmt,
ops::{Deref, DerefMut},
pin::Pin,
};
#[cfg(CONFIG_PRINTK)]
use crate::{c_str, c_types};
/// A raw device.
///
/// # Safety
///
/// Implementers must ensure that the `*mut device` returned by [`RawDevice::raw_device`] is
/// related to `self`, that is, actions on it will affect `self`. For example, if one calls
/// `get_device`, then the refcount on the device represented by `self` will be incremented.
///
/// Additionally, implementers must ensure that the device is never renamed. Commit a5462516aa994
/// has details on why `device_rename` should not be used.
pub unsafe trait RawDevice {
/// Returns the raw `struct device` related to `self`.
fn raw_device(&self) -> *mut bindings::device;
/// Returns the name of the device.
fn name(&self) -> &CStr {
let ptr = self.raw_device();
// SAFETY: `ptr` is valid because `self` keeps it alive.
let name = unsafe { bindings::dev_name(ptr) };
// SAFETY: The name of the device remains valid while it is alive (because the device is
// never renamed, per the safety requirement of this trait). This is guaranteed to be the
// case because the reference to `self` outlives the one of the returned `CStr` (enforced
// by the compiler because of their lifetimes).
unsafe { CStr::from_char_ptr(name) }
}
/// Lookups a clock producer consumed by this device.
///
/// Returns a managed reference to the clock producer.
#[cfg(CONFIG_COMMON_CLK)]
fn clk_get(&self, id: Option<&CStr>) -> Result<Clk> {
let id_ptr = match id {
Some(cstr) => cstr.as_char_ptr(),
None => core::ptr::null(),
};
// SAFETY: `id_ptr` is optional and may be either a valid pointer
// from the type invariant or NULL otherwise.
let clk_ptr = unsafe { from_kernel_err_ptr(bindings::clk_get(self.raw_device(), id_ptr)) }?;
// SAFETY: Clock is initialized with valid pointer returned from `bindings::clk_get` call.
unsafe { Ok(Clk::new(clk_ptr)) }
}
/// Prints an emergency-level message (level 0) prefixed with device information.
///
/// More details are available from [`dev_emerg`].
fn pr_emerg(&self, args: fmt::Arguments<'_>) {
// SAFETY: `klevel` is null-terminated, uses one of the kernel constants.
unsafe { self.printk(bindings::KERN_EMERG, args) };
}
/// Prints an alert-level message (level 1) prefixed with device information.
///
/// More details are available from [`dev_alert`].
fn pr_alert(&self, args: fmt::Arguments<'_>) {
// SAFETY: `klevel` is null-terminated, uses one of the kernel constants.
unsafe { self.printk(bindings::KERN_ALERT, args) };
}
/// Prints a critical-level message (level 2) prefixed with device information.
///
/// More details are available from [`dev_crit`].
fn pr_crit(&self, args: fmt::Arguments<'_>) {
// SAFETY: `klevel` is null-terminated, uses one of the kernel constants.
unsafe { self.printk(bindings::KERN_CRIT, args) };
}
/// Prints an error-level message (level 3) prefixed with device information.
///
/// More details are available from [`dev_err`].
fn pr_err(&self, args: fmt::Arguments<'_>) {
// SAFETY: `klevel` is null-terminated, uses one of the kernel constants.
unsafe { self.printk(bindings::KERN_ERR, args) };
}
/// Prints a warning-level message (level 4) prefixed with device information.
///
/// More details are available from [`dev_warn`].
fn pr_warn(&self, args: fmt::Arguments<'_>) {
// SAFETY: `klevel` is null-terminated, uses one of the kernel constants.
unsafe { self.printk(bindings::KERN_WARNING, args) };
}
/// Prints a notice-level message (level 5) prefixed with device information.
///
/// More details are available from [`dev_notice`].
fn pr_notice(&self, args: fmt::Arguments<'_>) {
// SAFETY: `klevel` is null-terminated, uses one of the kernel constants.
unsafe { self.printk(bindings::KERN_NOTICE, args) };
}
/// Prints an info-level message (level 6) prefixed with device information.
///
/// More details are available from [`dev_info`].
fn pr_info(&self, args: fmt::Arguments<'_>) {
// SAFETY: `klevel` is null-terminated, uses one of the kernel constants.
unsafe { self.printk(bindings::KERN_INFO, args) };
}
/// Prints a debug-level message (level 7) prefixed with device information.
///
/// More details are available from [`dev_dbg`].
fn pr_dbg(&self, args: fmt::Arguments<'_>) {
if cfg!(debug_assertions) {
// SAFETY: `klevel` is null-terminated, uses one of the kernel constants.
unsafe { self.printk(bindings::KERN_DEBUG, args) };
}
}
/// Prints the provided message to the console.
///
/// # Safety
///
/// Callers must ensure that `klevel` is null-terminated; in particular, one of the
/// `KERN_*`constants, for example, `KERN_CRIT`, `KERN_ALERT`, etc.
#[cfg_attr(not(CONFIG_PRINTK), allow(unused_variables))]
unsafe fn printk(&self, klevel: &[u8], msg: fmt::Arguments<'_>) {
// SAFETY: `klevel` is null-terminated and one of the kernel constants. `self.raw_device`
// is valid because `self` is valid. The "%pA" format string expects a pointer to
// `fmt::Arguments`, which is what we're passing as the last argument.
#[cfg(CONFIG_PRINTK)]
unsafe {
bindings::_dev_printk(
klevel as *const _ as *const c_types::c_char,
self.raw_device(),
c_str!("%pA").as_char_ptr(),
&msg as *const _ as *const c_types::c_void,
)
};
}
}
/// A ref-counted device.
///
/// # Invariants
///
/// `ptr` is valid, non-null, and has a non-zero reference count. One of the references is owned by
/// `self`, and will be decremented when `self` is dropped.
pub struct Device {
pub(crate) ptr: *mut bindings::device,
}
// SAFETY: `Device` only holds a pointer to a C device, which is safe to be used from any thread.
unsafe impl Send for Device {}
// SAFETY: `Device` only holds a pointer to a C device, references to which are safe to be used
// from any thread.
unsafe impl Sync for Device {}
impl Device {
/// Creates a new device instance.
///
/// # Safety
///
/// Callers must ensure that `ptr` is valid, non-null, and has a non-zero reference count.
pub unsafe fn new(ptr: *mut bindings::device) -> Self {
// SAFETY: By the safety requirements, ptr is valid and its refcounted will be incremented.
unsafe { bindings::get_device(ptr) };
// INVARIANT: The safety requirements satisfy all but one invariant, which is that `self`
// owns a reference. This is satisfied by the call to `get_device` above.
Self { ptr }
}
/// Creates a new device instance from an existing [`RawDevice`] instance.
pub fn from_dev(dev: &dyn RawDevice) -> Self {
// SAFETY: The requirements are satisfied by the existence of `RawDevice` and its safety
// requirements.
unsafe { Self::new(dev.raw_device()) }
}
}
// SAFETY: The device returned by `raw_device` is the one for which we hold a reference.
unsafe impl RawDevice for Device {
fn raw_device(&self) -> *mut bindings::device {
self.ptr
}
}
impl Drop for Device {
fn drop(&mut self) {
// SAFETY: By the type invariants, we know that `self` owns a reference, so it is safe to
// relinquish it now.
unsafe { bindings::put_device(self.ptr) };
}
}
/// Device data.
///
/// When a device is removed (for whatever reason, for example, because the device was unplugged or
/// because the user decided to unbind the driver), the driver is given a chance to clean its state
/// up, and all io resources should ideally not be used anymore.
///
/// However, the device data is reference-counted because other subsystems hold pointers to it. So
/// some device state must be freed and not used anymore, while others must remain accessible.
///
/// This struct separates the device data into three categories:
/// 1. Registrations: are destroyed when the device is removed, but before the io resources
/// become inaccessible.
/// 2. Io resources: are available until the device is removed.
/// 3. General data: remain available as long as the ref count is nonzero.
///
/// This struct implements the `DeviceRemoval` trait so that it can clean resources up even if not
/// explicitly called by the device drivers.
pub struct Data<T, U, V> {
registrations: RevocableMutex<T>,
resources: Revocable<U>,
general: V,
}
/// Safely creates an new reference-counted instance of [`Data`].
#[doc(hidden)]
#[macro_export]
macro_rules! new_device_data {
($reg:expr, $res:expr, $gen:expr, $name:literal) => {{
static mut CLASS1: core::mem::MaybeUninit<$crate::bindings::lock_class_key> =
core::mem::MaybeUninit::uninit();
static mut CLASS2: core::mem::MaybeUninit<$crate::bindings::lock_class_key> =
core::mem::MaybeUninit::uninit();
let regs = $reg;
let res = $res;
let gen = $gen;
let name = $crate::c_str!($name);
// SAFETY: `CLASS1` and `CLASS2` are never used by Rust code directly; the C portion of the
// kernel may change it though.
unsafe {
$crate::device::Data::try_new(
regs,
res,
gen,
name,
CLASS1.as_mut_ptr(),
CLASS2.as_mut_ptr(),
)
}
}};
}
impl<T, U, V> Data<T, U, V> {
/// Creates a new instance of `Data`.
///
/// It is recommended that the [`new_device_data`] macro be used as it automatically creates
/// the lock classes.
///
/// # Safety
///
/// `key1` and `key2` must point to valid memory locations and remain valid until `self` is
/// dropped.
pub unsafe fn try_new(
registrations: T,
resources: U,
general: V,
name: &'static CStr,
key1: *mut bindings::lock_class_key,
key2: *mut bindings::lock_class_key,
) -> Result<Pin<UniqueRef<Self>>> {
let mut ret = Pin::from(UniqueRef::try_new(Self {
// SAFETY: We call `RevocableMutex::init` below.
registrations: unsafe { RevocableMutex::new(registrations) },
resources: Revocable::new(resources),
general,
})?);
// SAFETY: `Data::registrations` is pinned when `Data` is.
let pinned = unsafe { ret.as_mut().map_unchecked_mut(|d| &mut d.registrations) };
// SAFETY: The safety requirements of this function satisfy those of `RevocableMutex::init`.
unsafe { pinned.init(name, key1, key2) };
Ok(ret)
}
/// Returns the resources if they're still available.
pub fn resources(&self) -> Option<RevocableGuard<'_, U>> {
self.resources.try_access()
}
/// Returns the locked registrations if they're still available.
pub fn registrations(&self) -> Option<RevocableMutexGuard<'_, T>> {
self.registrations.try_write()
}
}
impl<T, U, V> crate::driver::DeviceRemoval for Data<T, U, V> {
fn device_remove(&self) {
// We revoke the registrations first so that resources are still available to them during
// unregistration.
self.registrations.revoke();
// Release resources now. General data remains available.
self.resources.revoke();
}
}
impl<T, U, V> Deref for Data<T, U, V> {
type Target = V;
fn deref(&self) -> &V {
&self.general
}
}
impl<T, U, V> DerefMut for Data<T, U, V> {
fn deref_mut(&mut self) -> &mut V {
&mut self.general
}
}
#[doc(hidden)]
#[macro_export]
macro_rules! dev_printk {
($method:ident, $dev:expr, $($f:tt)*) => {
{
// We have an explicity `use` statement here so that callers of this macro are not
// required to explicitly use the `RawDevice` trait to use its functions.
use $crate::device::RawDevice;
($dev).$method(core::format_args!($($f)*));
}
}
}
/// Prints an emergency-level message (level 0) prefixed with device information.
///
/// This level should be used if the system is unusable.
///
/// Equivalent to the kernel's `dev_emerg` macro.
///
/// Mimics the interface of [`std::print!`]. More information about the syntax is available from
/// [`core::fmt`] and [`alloc::format!`].
///
/// [`std::print!`]: https://doc.rust-lang.org/std/macro.print.html
///
/// # Examples
///
/// ```
/// # use kernel::device::Device;
///
/// fn example(dev: &Device) {
/// dev_emerg!(dev, "hello {}\n", "there");
/// }
/// ```
#[macro_export]
macro_rules! dev_emerg {
($($f:tt)*) => { $crate::dev_printk!(pr_emerg, $($f)*); }
}
/// Prints an alert-level message (level 1) prefixed with device information.
///
/// This level should be used if action must be taken immediately.
///
/// Equivalent to the kernel's `dev_alert` macro.
///
/// Mimics the interface of [`std::print!`]. More information about the syntax is available from
/// [`core::fmt`] and [`alloc::format!`].
///
/// [`std::print!`]: https://doc.rust-lang.org/std/macro.print.html
///
/// # Examples
///
/// ```
/// # use kernel::device::Device;
///
/// fn example(dev: &Device) {
/// dev_alert!(dev, "hello {}\n", "there");
/// }
/// ```
#[macro_export]
macro_rules! dev_alert {
($($f:tt)*) => { $crate::dev_printk!(pr_alert, $($f)*); }
}
/// Prints a critical-level message (level 2) prefixed with device information.
///
/// This level should be used in critical conditions.
///
/// Equivalent to the kernel's `dev_crit` macro.
///
/// Mimics the interface of [`std::print!`]. More information about the syntax is available from
/// [`core::fmt`] and [`alloc::format!`].
///
/// [`std::print!`]: https://doc.rust-lang.org/std/macro.print.html
///
/// # Examples
///
/// ```
/// # use kernel::device::Device;
///
/// fn example(dev: &Device) {
/// dev_crit!(dev, "hello {}\n", "there");
/// }
/// ```
#[macro_export]
macro_rules! dev_crit {
($($f:tt)*) => { $crate::dev_printk!(pr_crit, $($f)*); }
}
/// Prints an error-level message (level 3) prefixed with device information.
///
/// This level should be used in error conditions.
///
/// Equivalent to the kernel's `dev_err` macro.
///
/// Mimics the interface of [`std::print!`]. More information about the syntax is available from
/// [`core::fmt`] and [`alloc::format!`].
///
/// [`std::print!`]: https://doc.rust-lang.org/std/macro.print.html
///
/// # Examples
///
/// ```
/// # use kernel::device::Device;
///
/// fn example(dev: &Device) {
/// dev_err!(dev, "hello {}\n", "there");
/// }
/// ```
#[macro_export]
macro_rules! dev_err {
($($f:tt)*) => { $crate::dev_printk!(pr_err, $($f)*); }
}
/// Prints a warning-level message (level 4) prefixed with device information.
///
/// This level should be used in warning conditions.
///
/// Equivalent to the kernel's `dev_warn` macro.
///
/// Mimics the interface of [`std::print!`]. More information about the syntax is available from
/// [`core::fmt`] and [`alloc::format!`].
///
/// [`std::print!`]: https://doc.rust-lang.org/std/macro.print.html
///
/// # Examples
///
/// ```
/// # use kernel::device::Device;
///
/// fn example(dev: &Device) {
/// dev_warn!(dev, "hello {}\n", "there");
/// }
/// ```
#[macro_export]
macro_rules! dev_warn {
($($f:tt)*) => { $crate::dev_printk!(pr_warn, $($f)*); }
}
/// Prints a notice-level message (level 5) prefixed with device information.
///
/// This level should be used in normal but significant conditions.
///
/// Equivalent to the kernel's `dev_notice` macro.
///
/// Mimics the interface of [`std::print!`]. More information about the syntax is available from
/// [`core::fmt`] and [`alloc::format!`].
///
/// [`std::print!`]: https://doc.rust-lang.org/std/macro.print.html
///
/// # Examples
///
/// ```
/// # use kernel::device::Device;
///
/// fn example(dev: &Device) {
/// dev_notice!(dev, "hello {}\n", "there");
/// }
/// ```
#[macro_export]
macro_rules! dev_notice {
($($f:tt)*) => { $crate::dev_printk!(pr_notice, $($f)*); }
}
/// Prints an info-level message (level 6) prefixed with device information.
///
/// This level should be used for informational messages.
///
/// Equivalent to the kernel's `dev_info` macro.
///
/// Mimics the interface of [`std::print!`]. More information about the syntax is available from
/// [`core::fmt`] and [`alloc::format!`].
///
/// [`std::print!`]: https://doc.rust-lang.org/std/macro.print.html
///
/// # Examples
///
/// ```
/// # use kernel::device::Device;
///
/// fn example(dev: &Device) {
/// dev_info!(dev, "hello {}\n", "there");
/// }
/// ```
#[macro_export]
macro_rules! dev_info {
($($f:tt)*) => { $crate::dev_printk!(pr_info, $($f)*); }
}
/// Prints a debug-level message (level 7) prefixed with device information.
///
/// This level should be used for debug messages.
///
/// Equivalent to the kernel's `dev_dbg` macro, except that it doesn't support dynamic debug yet.
///
/// Mimics the interface of [`std::print!`]. More information about the syntax is available from
/// [`core::fmt`] and [`alloc::format!`].
///
/// [`std::print!`]: https://doc.rust-lang.org/std/macro.print.html
///
/// # Examples
///
/// ```
/// # use kernel::device::Device;
///
/// fn example(dev: &Device) {
/// dev_dbg!(dev, "hello {}\n", "there");
/// }
/// ```
#[macro_export]
macro_rules! dev_dbg {
($($f:tt)*) => { $crate::dev_printk!(pr_dbg, $($f)*); }
}

442
rust/kernel/driver.rs Normal file
View File

@ -0,0 +1,442 @@
// SPDX-License-Identifier: GPL-2.0
//! Generic support for drivers of different buses (e.g., PCI, Platform, Amba, etc.).
//!
//! Each bus/subsystem is expected to implement [`DriverOps`], which allows drivers to register
//! using the [`Registration`] class.
use crate::{error::code::*, str::CStr, sync::Ref, Result, ThisModule};
use alloc::boxed::Box;
use core::{cell::UnsafeCell, marker::PhantomData, ops::Deref, pin::Pin};
/// A subsystem (e.g., PCI, Platform, Amba, etc.) that allows drivers to be written for it.
pub trait DriverOps {
/// The type that holds information about the registration. This is typically a struct defined
/// by the C portion of the kernel.
type RegType: Default;
/// Registers a driver.
///
/// # Safety
///
/// `reg` must point to valid, initialised, and writable memory. It may be modified by this
/// function to hold registration state.
///
/// On success, `reg` must remain pinned and valid until the matching call to
/// [`DriverOps::unregister`].
unsafe fn register(
reg: *mut Self::RegType,
name: &'static CStr,
module: &'static ThisModule,
) -> Result;
/// Unregisters a driver previously registered with [`DriverOps::register`].
///
/// # Safety
///
/// `reg` must point to valid writable memory, initialised by a previous successful call to
/// [`DriverOps::register`].
unsafe fn unregister(reg: *mut Self::RegType);
}
/// The registration of a driver.
pub struct Registration<T: DriverOps> {
is_registered: bool,
concrete_reg: UnsafeCell<T::RegType>,
}
// SAFETY: `Registration` has no fields or methods accessible via `&Registration`, so it is safe to
// share references to it with multiple threads as nothing can be done.
unsafe impl<T: DriverOps> Sync for Registration<T> {}
impl<T: DriverOps> Registration<T> {
/// Creates a new instance of the registration object.
pub fn new() -> Self {
Self {
is_registered: false,
concrete_reg: UnsafeCell::new(T::RegType::default()),
}
}
/// Allocates a pinned registration object and registers it.
///
/// Returns a pinned heap-allocated representation of the registration.
pub fn new_pinned(name: &'static CStr, module: &'static ThisModule) -> Result<Pin<Box<Self>>> {
let mut reg = Pin::from(Box::try_new(Self::new())?);
reg.as_mut().register(name, module)?;
Ok(reg)
}
/// Registers a driver with its subsystem.
///
/// It must be pinned because the memory block that represents the registration is potentially
/// self-referential.
pub fn register(
self: Pin<&mut Self>,
name: &'static CStr,
module: &'static ThisModule,
) -> Result {
// SAFETY: We never move out of `this`.
let this = unsafe { self.get_unchecked_mut() };
if this.is_registered {
// Already registered.
return Err(EINVAL);
}
// SAFETY: `concrete_reg` was initialised via its default constructor. It is only freed
// after `Self::drop` is called, which first calls `T::unregister`.
unsafe { T::register(this.concrete_reg.get(), name, module) }?;
this.is_registered = true;
Ok(())
}
}
impl<T: DriverOps> Default for Registration<T> {
fn default() -> Self {
Self::new()
}
}
impl<T: DriverOps> Drop for Registration<T> {
fn drop(&mut self) {
if self.is_registered {
// SAFETY: This path only runs if a previous call to `T::register` completed
// successfully.
unsafe { T::unregister(self.concrete_reg.get()) };
}
}
}
/// Conversion from a device id to a raw device id.
///
/// This is meant to be implemented by buses/subsystems so that they can use [`IdTable`] to
/// guarantee (at compile-time) zero-termination of device id tables provided by drivers.
///
/// # Safety
///
/// Implementers must ensure that:
/// - [`RawDeviceId::ZERO`] is actually a zeroed-out version of the raw device id.
/// - [`RawDeviceId::to_rawid`] stores `offset` in the context/data field of the raw device id so
/// that buses can recover the pointer to the data.
pub unsafe trait RawDeviceId {
/// The raw type that holds the device id.
///
/// Id tables created from [`Self`] are going to hold this type in its zero-terminated array.
type RawType: Copy;
/// A zeroed-out representation of the raw device id.
///
/// Id tables created from [`Self`] use [`Self::ZERO`] as the sentinel to indicate the end of
/// the table.
const ZERO: Self::RawType;
/// Converts an id into a raw id.
///
/// `offset` is the offset from the memory location where the raw device id is stored to the
/// location where its associated context information is stored. Implementations must store
/// this in the appropriate context/data field of the raw type.
fn to_rawid(&self, offset: isize) -> Self::RawType;
}
/// A zero-terminated device id array, followed by context data.
#[repr(C)]
pub struct IdArray<T: RawDeviceId, U, const N: usize> {
ids: [T::RawType; N],
sentinel: T::RawType,
id_infos: [Option<U>; N],
}
impl<T: RawDeviceId, U, const N: usize> IdArray<T, U, N> {
/// Creates a new instance of the array.
///
/// The contents are derived from the given identifiers and context information.
pub const fn new(ids: [T; N], infos: [Option<U>; N]) -> Self
where
T: ~const RawDeviceId + Copy,
{
let mut array = Self {
ids: [T::ZERO; N],
sentinel: T::ZERO,
id_infos: infos,
};
let mut i = 0usize;
while i < N {
// SAFETY: Both pointers are within `array` (or one byte beyond), consequently they are
// derived from the same allocated object. We are using a `u8` pointer, whose size 1,
// so the pointers are necessarily 1-byte aligned.
let offset = unsafe {
(&array.id_infos[i] as *const _ as *const u8)
.offset_from(&array.ids[i] as *const _ as _)
};
array.ids[i] = ids[i].to_rawid(offset);
i += 1;
}
array
}
/// Returns an `IdTable` backed by `self`.
///
/// This is used to essentially erase the array size.
pub const fn as_table(&self) -> IdTable<'_, T, U> {
IdTable {
first: &self.ids[0],
_p: PhantomData,
}
}
}
/// A device id table.
///
/// The table is guaranteed to be zero-terminated and to be followed by an array of context data of
/// type `Option<U>`.
pub struct IdTable<'a, T: RawDeviceId, U> {
first: &'a T::RawType,
_p: PhantomData<&'a U>,
}
impl<T: RawDeviceId, U> const AsRef<T::RawType> for IdTable<'_, T, U> {
fn as_ref(&self) -> &T::RawType {
self.first
}
}
/// Counts the number of parenthesis-delimited, comma-separated items.
///
/// # Examples
///
/// ```
/// # use kernel::count_paren_items;
///
/// assert_eq!(0, count_paren_items!());
/// assert_eq!(1, count_paren_items!((A)));
/// assert_eq!(1, count_paren_items!((A),));
/// assert_eq!(2, count_paren_items!((A), (B)));
/// assert_eq!(2, count_paren_items!((A), (B),));
/// assert_eq!(3, count_paren_items!((A), (B), (C)));
/// assert_eq!(3, count_paren_items!((A), (B), (C),));
/// ```
#[macro_export]
macro_rules! count_paren_items {
(($($item:tt)*), $($remaining:tt)*) => { 1 + $crate::count_paren_items!($($remaining)*) };
(($($item:tt)*)) => { 1 };
() => { 0 };
}
/// Converts a comma-separated list of pairs into an array with the first element. That is, it
/// discards the second element of the pair.
///
/// Additionally, it automatically introduces a type if the first element is warpped in curly
/// braces, for example, if it's `{v: 10}`, it becomes `X { v: 10 }`; this is to avoid repeating
/// the type.
///
/// # Examples
///
/// ```
/// # use kernel::first_item;
///
/// #[derive(PartialEq, Debug)]
/// struct X {
/// v: u32,
/// }
///
/// assert_eq!([] as [X; 0], first_item!(X, ));
/// assert_eq!([X { v: 10 }], first_item!(X, ({ v: 10 }, Y)));
/// assert_eq!([X { v: 10 }], first_item!(X, ({ v: 10 }, Y),));
/// assert_eq!([X { v: 10 }], first_item!(X, (X { v: 10 }, Y)));
/// assert_eq!([X { v: 10 }], first_item!(X, (X { v: 10 }, Y),));
/// assert_eq!([X { v: 10 }, X { v: 20 }], first_item!(X, ({ v: 10 }, Y), ({ v: 20 }, Y)));
/// assert_eq!([X { v: 10 }, X { v: 20 }], first_item!(X, ({ v: 10 }, Y), ({ v: 20 }, Y),));
/// assert_eq!([X { v: 10 }, X { v: 20 }], first_item!(X, (X { v: 10 }, Y), (X { v: 20 }, Y)));
/// assert_eq!([X { v: 10 }, X { v: 20 }], first_item!(X, (X { v: 10 }, Y), (X { v: 20 }, Y),));
/// assert_eq!([X { v: 10 }, X { v: 20 }, X { v: 30 }],
/// first_item!(X, ({ v: 10 }, Y), ({ v: 20 }, Y), ({v: 30}, Y)));
/// assert_eq!([X { v: 10 }, X { v: 20 }, X { v: 30 }],
/// first_item!(X, ({ v: 10 }, Y), ({ v: 20 }, Y), ({v: 30}, Y),));
/// assert_eq!([X { v: 10 }, X { v: 20 }, X { v: 30 }],
/// first_item!(X, (X { v: 10 }, Y), (X { v: 20 }, Y), (X {v: 30}, Y)));
/// assert_eq!([X { v: 10 }, X { v: 20 }, X { v: 30 }],
/// first_item!(X, (X { v: 10 }, Y), (X { v: 20 }, Y), (X {v: 30}, Y),));
/// ```
#[macro_export]
macro_rules! first_item {
($id_type:ty, $(({$($first:tt)*}, $second:expr)),* $(,)?) => {
{
type IdType = $id_type;
[$(IdType{$($first)*},)*]
}
};
($id_type:ty, $(($first:expr, $second:expr)),* $(,)?) => { [$($first,)*] };
}
/// Converts a comma-separated list of pairs into an array with the second element. That is, it
/// discards the first element of the pair.
///
/// # Examples
///
/// ```
/// # use kernel::second_item;
///
/// assert_eq!([] as [u32; 0], second_item!());
/// assert_eq!([10u32], second_item!((X, 10u32)));
/// assert_eq!([10u32], second_item!((X, 10u32),));
/// assert_eq!([10u32], second_item!(({X}, 10u32)));
/// assert_eq!([10u32], second_item!(({X}, 10u32),));
/// assert_eq!([10u32, 20], second_item!((X, 10u32), (X, 20)));
/// assert_eq!([10u32, 20], second_item!((X, 10u32), (X, 20),));
/// assert_eq!([10u32, 20], second_item!(({X}, 10u32), ({X}, 20)));
/// assert_eq!([10u32, 20], second_item!(({X}, 10u32), ({X}, 20),));
/// assert_eq!([10u32, 20, 30], second_item!((X, 10u32), (X, 20), (X, 30)));
/// assert_eq!([10u32, 20, 30], second_item!((X, 10u32), (X, 20), (X, 30),));
/// assert_eq!([10u32, 20, 30], second_item!(({X}, 10u32), ({X}, 20), ({X}, 30)));
/// assert_eq!([10u32, 20, 30], second_item!(({X}, 10u32), ({X}, 20), ({X}, 30),));
/// ```
#[macro_export]
macro_rules! second_item {
($(({$($first:tt)*}, $second:expr)),* $(,)?) => { [$($second,)*] };
($(($first:expr, $second:expr)),* $(,)?) => { [$($second,)*] };
}
/// Defines a new constant [`IdArray`] with a concise syntax.
///
/// It is meant to be used by buses and subsystems to create a similar macro with their device id
/// type already specified, i.e., with fewer parameters to the end user.
///
/// # Examples
///
// TODO: Exported but not usable by kernel modules (requires `const_trait_impl`).
/// ```ignore
/// #![feature(const_trait_impl)]
/// # use kernel::{define_id_array, driver::RawDeviceId};
///
/// #[derive(Copy, Clone)]
/// struct Id(u32);
///
/// // SAFETY: `ZERO` is all zeroes and `to_rawid` stores `offset` as the second element of the raw
/// // device id pair.
/// unsafe impl const RawDeviceId for Id {
/// type RawType = (u64, isize);
/// const ZERO: Self::RawType = (0, 0);
/// fn to_rawid(&self, offset: isize) -> Self::RawType {
/// (self.0 as u64 + 1, offset)
/// }
/// }
///
/// define_id_array!(A1, Id, (), []);
/// define_id_array!(A2, Id, &'static [u8], [(Id(10), None)]);
/// define_id_array!(A3, Id, &'static [u8], [(Id(10), Some(b"id1")), ]);
/// define_id_array!(A4, Id, &'static [u8], [(Id(10), Some(b"id1")), (Id(20), Some(b"id2"))]);
/// define_id_array!(A5, Id, &'static [u8], [(Id(10), Some(b"id1")), (Id(20), Some(b"id2")), ]);
/// define_id_array!(A6, Id, &'static [u8], [(Id(10), None), (Id(20), Some(b"id2")), ]);
/// define_id_array!(A7, Id, &'static [u8], [(Id(10), Some(b"id1")), (Id(20), None), ]);
/// define_id_array!(A8, Id, &'static [u8], [(Id(10), None), (Id(20), None), ]);
/// ```
#[macro_export]
macro_rules! define_id_array {
($table_name:ident, $id_type:ty, $data_type:ty, [ $($t:tt)* ]) => {
const $table_name:
$crate::driver::IdArray<$id_type, $data_type, { $crate::count_paren_items!($($t)*) }> =
$crate::driver::IdArray::new(
$crate::first_item!($id_type, $($t)*), $crate::second_item!($($t)*));
};
}
/// Defines a new constant [`IdTable`] with a concise syntax.
///
/// It is meant to be used by buses and subsystems to create a similar macro with their device id
/// type already specified, i.e., with fewer parameters to the end user.
///
/// # Examples
///
// TODO: Exported but not usable by kernel modules (requires `const_trait_impl`).
/// ```ignore
/// #![feature(const_trait_impl)]
/// # use kernel::{define_id_table, driver::RawDeviceId};
///
/// #[derive(Copy, Clone)]
/// struct Id(u32);
///
/// // SAFETY: `ZERO` is all zeroes and `to_rawid` stores `offset` as the second element of the raw
/// // device id pair.
/// unsafe impl const RawDeviceId for Id {
/// type RawType = (u64, isize);
/// const ZERO: Self::RawType = (0, 0);
/// fn to_rawid(&self, offset: isize) -> Self::RawType {
/// (self.0 as u64 + 1, offset)
/// }
/// }
///
/// define_id_table!(T1, Id, &'static [u8], [(Id(10), None)]);
/// define_id_table!(T2, Id, &'static [u8], [(Id(10), Some(b"id1")), ]);
/// define_id_table!(T3, Id, &'static [u8], [(Id(10), Some(b"id1")), (Id(20), Some(b"id2"))]);
/// define_id_table!(T4, Id, &'static [u8], [(Id(10), Some(b"id1")), (Id(20), Some(b"id2")), ]);
/// define_id_table!(T5, Id, &'static [u8], [(Id(10), None), (Id(20), Some(b"id2")), ]);
/// define_id_table!(T6, Id, &'static [u8], [(Id(10), Some(b"id1")), (Id(20), None), ]);
/// define_id_table!(T7, Id, &'static [u8], [(Id(10), None), (Id(20), None), ]);
/// ```
#[macro_export]
macro_rules! define_id_table {
($table_name:ident, $id_type:ty, $data_type:ty, [ $($t:tt)* ]) => {
const $table_name: Option<$crate::driver::IdTable<'static, $id_type, $data_type>> = {
$crate::define_id_array!(ARRAY, $id_type, $data_type, [ $($t)* ]);
Some(ARRAY.as_table())
};
};
}
/// Custom code within device removal.
pub trait DeviceRemoval {
/// Cleans resources up when the device is removed.
///
/// This is called when a device is removed and offers implementers the chance to run some code
/// that cleans state up.
fn device_remove(&self);
}
impl DeviceRemoval for () {
fn device_remove(&self) {}
}
impl<T: DeviceRemoval> DeviceRemoval for Ref<T> {
fn device_remove(&self) {
self.deref().device_remove();
}
}
impl<T: DeviceRemoval> DeviceRemoval for Box<T> {
fn device_remove(&self) {
self.deref().device_remove();
}
}
/// A kernel module that only registers the given driver on init.
///
/// This is a helper struct to make it easier to define single-functionality modules, in this case,
/// modules that offer a single driver.
pub struct Module<T: DriverOps> {
_driver: Pin<Box<Registration<T>>>,
}
impl<T: DriverOps> crate::Module for Module<T> {
fn init(name: &'static CStr, module: &'static ThisModule) -> Result<Self> {
Ok(Self {
_driver: Registration::new_pinned(name, module)?,
})
}
}
/// Declares a kernel module that exposes a single driver.
///
/// It is meant to be used as a helper by other subsystems so they can more easily expose their own
/// macros.
#[macro_export]
macro_rules! module_driver {
(<$gen_type:ident>, $driver_ops:ty, { type: $type:ty, $($f:tt)* }) => {
type Ops<$gen_type> = $driver_ops;
type ModuleType = $crate::driver::Module<Ops<$type>>;
$crate::prelude::module! {
type: ModuleType,
$($f)*
}
}
}

565
rust/kernel/error.rs Normal file
View File

@ -0,0 +1,565 @@
// SPDX-License-Identifier: GPL-2.0
//! Kernel errors.
//!
//! C header: [`include/uapi/asm-generic/errno-base.h`](../../../include/uapi/asm-generic/errno-base.h)
use crate::str::CStr;
use crate::{bindings, c_types};
use alloc::{
alloc::{AllocError, LayoutError},
collections::TryReserveError,
};
use core::convert::From;
use core::fmt;
use core::num::TryFromIntError;
use core::str::{self, Utf8Error};
/// Contains the C-compatible error codes.
pub mod code {
macro_rules! declare_err {
($err:tt $(,)? $($doc:expr),+) => {
$(
#[doc = $doc]
)*
pub const $err: super::Error = super::Error(-(crate::bindings::$err as i32));
};
}
declare_err!(EPERM, "Operation not permitted.");
declare_err!(ENOENT, "No such file or directory.");
declare_err!(ESRCH, "No such process.");
declare_err!(EINTR, "Interrupted system call.");
declare_err!(EIO, "I/O error.");
declare_err!(ENXIO, "No such device or address.");
declare_err!(E2BIG, "Argument list too long.");
declare_err!(ENOEXEC, "Exec format error.");
declare_err!(EBADF, "Bad file number.");
declare_err!(ECHILD, "Exec format error.");
declare_err!(EAGAIN, "Try again.");
declare_err!(ENOMEM, "Out of memory.");
declare_err!(EACCES, "Permission denied.");
declare_err!(EFAULT, "Bad address.");
declare_err!(ENOTBLK, "Block device required.");
declare_err!(EBUSY, "Device or resource busy.");
declare_err!(EEXIST, "File exists.");
declare_err!(EXDEV, "Cross-device link.");
declare_err!(ENODEV, "No such device.");
declare_err!(ENOTDIR, "Not a directory.");
declare_err!(EISDIR, "Is a directory.");
declare_err!(EINVAL, "Invalid argument.");
declare_err!(ENFILE, "File table overflow.");
declare_err!(EMFILE, "Too many open files.");
declare_err!(ENOTTY, "Not a typewriter.");
declare_err!(ETXTBSY, "Text file busy.");
declare_err!(EFBIG, "File too large.");
declare_err!(ENOSPC, "No space left on device.");
declare_err!(ESPIPE, "Illegal seek.");
declare_err!(EROFS, "Read-only file system.");
declare_err!(EMLINK, "Too many links.");
declare_err!(EPIPE, "Broken pipe.");
declare_err!(EDOM, "Math argument out of domain of func.");
declare_err!(ERANGE, "Math result not representable.");
declare_err!(EDEADLK, "Resource deadlock would occur");
declare_err!(ENAMETOOLONG, "File name too long");
declare_err!(ENOLCK, "No record locks available");
declare_err!(
ENOSYS,
"Invalid system call number.",
"",
"This error code is special: arch syscall entry code will return",
"[`ENOSYS`] if users try to call a syscall that doesn't exist.",
"To keep failures of syscalls that really do exist distinguishable from",
"failures due to attempts to use a nonexistent syscall, syscall",
"implementations should refrain from returning [`ENOSYS`]."
);
declare_err!(ENOTEMPTY, "Directory not empty.");
declare_err!(ELOOP, "Too many symbolic links encountered.");
declare_err!(EWOULDBLOCK, "Operation would block.");
declare_err!(ENOMSG, "No message of desired type.");
declare_err!(EIDRM, "Identifier removed.");
declare_err!(ECHRNG, "Channel number out of range.");
declare_err!(EL2NSYNC, "Level 2 not synchronized.");
declare_err!(EL3HLT, "Level 3 halted.");
declare_err!(EL3RST, "Level 3 reset.");
declare_err!(ELNRNG, "Link number out of range.");
declare_err!(EUNATCH, "Protocol driver not attached.");
declare_err!(ENOCSI, "No CSI structure available.");
declare_err!(EL2HLT, "Level 2 halted.");
declare_err!(EBADE, "Invalid exchange.");
declare_err!(EBADR, "Invalid request descriptor.");
declare_err!(EXFULL, "Exchange full.");
declare_err!(ENOANO, "No anode.");
declare_err!(EBADRQC, "Invalid request code.");
declare_err!(EBADSLT, "Invalid slot.");
declare_err!(EDEADLOCK, "Resource deadlock would occur.");
declare_err!(EBFONT, "Bad font file format.");
declare_err!(ENOSTR, "Device not a stream.");
declare_err!(ENODATA, "No data available.");
declare_err!(ETIME, "Timer expired.");
declare_err!(ENOSR, "Out of streams resources.");
declare_err!(ENONET, "Machine is not on the network.");
declare_err!(ENOPKG, "Package not installed.");
declare_err!(EREMOTE, "Object is remote.");
declare_err!(ENOLINK, "Link has been severed.");
declare_err!(EADV, "Advertise error.");
declare_err!(ESRMNT, "Srmount error.");
declare_err!(ECOMM, "Communication error on send.");
declare_err!(EPROTO, "Protocol error.");
declare_err!(EMULTIHOP, "Multihop attempted.");
declare_err!(EDOTDOT, "RFS specific error.");
declare_err!(EBADMSG, "Not a data message.");
declare_err!(EOVERFLOW, "Value too large for defined data type.");
declare_err!(ENOTUNIQ, "Name not unique on network.");
declare_err!(EBADFD, "File descriptor in bad state.");
declare_err!(EREMCHG, "Remote address changed.");
declare_err!(ELIBACC, "Can not access a needed shared library.");
declare_err!(ELIBBAD, "Accessing a corrupted shared library.");
declare_err!(ELIBSCN, ".lib section in a.out corrupted.");
declare_err!(ELIBMAX, "Attempting to link in too many shared libraries.");
declare_err!(ELIBEXEC, "Cannot exec a shared library directly.");
declare_err!(EILSEQ, "Illegal byte sequence.");
declare_err!(ERESTART, "Interrupted system call should be restarted.");
declare_err!(ESTRPIPE, "Streams pipe error.");
declare_err!(EUSERS, "Too many users.");
declare_err!(ENOTSOCK, "Socket operation on non-socket.");
declare_err!(EDESTADDRREQ, "Destination address required.");
declare_err!(EMSGSIZE, "Message too long.");
declare_err!(EPROTOTYPE, "Protocol wrong type for socket.");
declare_err!(ENOPROTOOPT, "Protocol not available.");
declare_err!(EPROTONOSUPPORT, "Protocol not supported.");
declare_err!(ESOCKTNOSUPPORT, "Socket type not supported.");
declare_err!(EOPNOTSUPP, "Operation not supported on transport endpoint.");
declare_err!(EPFNOSUPPORT, "Protocol family not supported.");
declare_err!(EAFNOSUPPORT, "Address family not supported by protocol.");
declare_err!(EADDRINUSE, "Address already in use.");
declare_err!(EADDRNOTAVAIL, "Cannot assign requested address.");
declare_err!(ENETDOWN, "Network is down.");
declare_err!(ENETUNREACH, "Network is unreachable.");
declare_err!(ENETRESET, "Network dropped connection because of reset.");
declare_err!(ECONNABORTED, "Software caused connection abort.");
declare_err!(ECONNRESET, "Connection reset by peer.");
declare_err!(ENOBUFS, "No buffer space available.");
declare_err!(EISCONN, "Transport endpoint is already connected.");
declare_err!(ENOTCONN, "Transport endpoint is not connected.");
declare_err!(ESHUTDOWN, "Cannot send after transport endpoint shutdown.");
declare_err!(ETOOMANYREFS, "Too many references: cannot splice.");
declare_err!(ETIMEDOUT, "Connection timed out.");
declare_err!(ECONNREFUSED, "Connection refused.");
declare_err!(EHOSTDOWN, "Host is down.");
declare_err!(EHOSTUNREACH, "No route to host.");
declare_err!(EALREADY, "Operation already in progress.");
declare_err!(EINPROGRESS, "Operation now in progress.");
declare_err!(ESTALE, "Stale file handle.");
declare_err!(EUCLEAN, "Structure needs cleaning.");
declare_err!(ENOTNAM, "Not a XENIX named type file.");
declare_err!(ENAVAIL, "No XENIX semaphores available.");
declare_err!(EISNAM, "Is a named type file.");
declare_err!(EREMOTEIO, "Remote I/O error.");
declare_err!(EDQUOT, "Quota exceeded.");
declare_err!(ENOMEDIUM, "No medium found.");
declare_err!(EMEDIUMTYPE, "Wrong medium type.");
declare_err!(ECANCELED, "Operation Canceled.");
declare_err!(ENOKEY, "Required key not available.");
declare_err!(EKEYEXPIRED, "Key has expired.");
declare_err!(EKEYREVOKED, "Key has been revoked.");
declare_err!(EKEYREJECTED, "Key was rejected by service.");
declare_err!(EOWNERDEAD, "Owner died.", "", "For robust mutexes.");
declare_err!(ENOTRECOVERABLE, "State not recoverable.");
declare_err!(ERFKILL, "Operation not possible due to RF-kill.");
declare_err!(EHWPOISON, "Memory page has hardware error.");
declare_err!(ERESTARTSYS, "Restart the system call.");
declare_err!(ENOTSUPP, "Operation is not supported.");
}
/// Generic integer kernel error.
///
/// The kernel defines a set of integer generic error codes based on C and
/// POSIX ones. These codes may have a more specific meaning in some contexts.
///
/// # Invariants
///
/// The value is a valid `errno` (i.e. `>= -MAX_ERRNO && < 0`).
#[derive(Clone, Copy, PartialEq, Eq)]
pub struct Error(c_types::c_int);
impl Error {
/// Creates an [`Error`] from a kernel error code.
///
/// It is a bug to pass an out-of-range `errno`. `EINVAL` would
/// be returned in such a case.
pub(crate) fn from_kernel_errno(errno: c_types::c_int) -> Error {
if errno < -(bindings::MAX_ERRNO as i32) || errno >= 0 {
// TODO: Make it a `WARN_ONCE` once available.
crate::pr_warn!(
"attempted to create `Error` with out of range `errno`: {}",
errno
);
return code::EINVAL;
}
// INVARIANT: The check above ensures the type invariant
// will hold.
Error(errno)
}
/// Creates an [`Error`] from a kernel error code.
///
/// # Safety
///
/// `errno` must be within error code range (i.e. `>= -MAX_ERRNO && < 0`).
pub(crate) unsafe fn from_kernel_errno_unchecked(errno: c_types::c_int) -> Error {
// INVARIANT: The contract ensures the type invariant
// will hold.
Error(errno)
}
/// Returns the kernel error code.
pub fn to_kernel_errno(self) -> c_types::c_int {
self.0
}
/// Returns a string representing the error, if one exists.
#[cfg(not(testlib))]
pub fn name(&self) -> Option<&'static CStr> {
// SAFETY: Just an FFI call, there are no extra safety requirements.
let ptr = unsafe { bindings::errname(-self.0) };
if ptr.is_null() {
None
} else {
// SAFETY: The string returned by `errname` is static and `NUL`-terminated.
Some(unsafe { CStr::from_char_ptr(ptr) })
}
}
/// Returns a string representing the error, if one exists.
///
/// When `testlib` is configured, this always returns `None` to avoid the dependency on a
/// kernel function so that tests that use this (e.g., by calling [`Result::unwrap`]) can still
/// run in userspace.
#[cfg(testlib)]
pub fn name(&self) -> Option<&'static CStr> {
None
}
}
impl fmt::Debug for Error {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self.name() {
// Print out number if no name can be found.
None => f.debug_tuple("Error").field(&-self.0).finish(),
// SAFETY: These strings are ASCII-only.
Some(name) => f
.debug_tuple(unsafe { str::from_utf8_unchecked(name) })
.finish(),
}
}
}
impl From<TryFromIntError> for Error {
fn from(_: TryFromIntError) -> Error {
code::EINVAL
}
}
impl From<Utf8Error> for Error {
fn from(_: Utf8Error) -> Error {
code::EINVAL
}
}
impl From<TryReserveError> for Error {
fn from(_: TryReserveError) -> Error {
code::ENOMEM
}
}
impl From<LayoutError> for Error {
fn from(_: LayoutError) -> Error {
code::ENOMEM
}
}
impl From<core::fmt::Error> for Error {
fn from(_: core::fmt::Error) -> Error {
code::EINVAL
}
}
impl From<core::convert::Infallible> for Error {
fn from(e: core::convert::Infallible) -> Error {
match e {}
}
}
/// A [`Result`] with an [`Error`] error type.
///
/// To be used as the return type for functions that may fail.
///
/// # Error codes in C and Rust
///
/// In C, it is common that functions indicate success or failure through
/// their return value; modifying or returning extra data through non-`const`
/// pointer parameters. In particular, in the kernel, functions that may fail
/// typically return an `int` that represents a generic error code. We model
/// those as [`Error`].
///
/// In Rust, it is idiomatic to model functions that may fail as returning
/// a [`Result`]. Since in the kernel many functions return an error code,
/// [`Result`] is a type alias for a [`core::result::Result`] that uses
/// [`Error`] as its error type.
///
/// Note that even if a function does not return anything when it succeeds,
/// it should still be modeled as returning a `Result` rather than
/// just an [`Error`].
pub type Result<T = ()> = core::result::Result<T, Error>;
impl From<AllocError> for Error {
fn from(_: AllocError) -> Error {
code::ENOMEM
}
}
// # Invariant: `-bindings::MAX_ERRNO` fits in an `i16`.
crate::static_assert!(bindings::MAX_ERRNO <= -(i16::MIN as i32) as u32);
pub(crate) fn from_kernel_result_helper<T>(r: Result<T>) -> T
where
T: From<i16>,
{
match r {
Ok(v) => v,
// NO-OVERFLOW: negative `errno`s are no smaller than `-bindings::MAX_ERRNO`,
// `-bindings::MAX_ERRNO` fits in an `i16` as per invariant above,
// therefore a negative `errno` always fits in an `i16` and will not overflow.
Err(e) => T::from(e.to_kernel_errno() as i16),
}
}
/// Transforms a [`crate::error::Result<T>`] to a kernel C integer result.
///
/// This is useful when calling Rust functions that return [`crate::error::Result<T>`]
/// from inside `extern "C"` functions that need to return an integer
/// error result.
///
/// `T` should be convertible to an `i16` via `From<i16>`.
///
/// # Examples
///
/// ```ignore
/// # use kernel::from_kernel_result;
/// # use kernel::c_types;
/// # use kernel::bindings;
/// unsafe extern "C" fn probe_callback(
/// pdev: *mut bindings::platform_device,
/// ) -> c_types::c_int {
/// from_kernel_result! {
/// let ptr = devm_alloc(pdev)?;
/// bindings::platform_set_drvdata(pdev, ptr);
/// Ok(0)
/// }
/// }
/// ```
macro_rules! from_kernel_result {
($($tt:tt)*) => {{
$crate::error::from_kernel_result_helper((|| {
$($tt)*
})())
}};
}
pub(crate) use from_kernel_result;
/// Transform a kernel "error pointer" to a normal pointer.
///
/// Some kernel C API functions return an "error pointer" which optionally
/// embeds an `errno`. Callers are supposed to check the returned pointer
/// for errors. This function performs the check and converts the "error pointer"
/// to a normal pointer in an idiomatic fashion.
///
/// # Examples
///
/// ```ignore
/// # use kernel::from_kernel_err_ptr;
/// # use kernel::c_types;
/// # use kernel::bindings;
/// fn devm_platform_ioremap_resource(
/// pdev: &mut PlatformDevice,
/// index: u32,
/// ) -> Result<*mut c_types::c_void> {
/// // SAFETY: FFI call.
/// unsafe {
/// from_kernel_err_ptr(bindings::devm_platform_ioremap_resource(
/// pdev.to_ptr(),
/// index,
/// ))
/// }
/// }
/// ```
// TODO: Remove `dead_code` marker once an in-kernel client is available.
#[allow(dead_code)]
pub(crate) fn from_kernel_err_ptr<T>(ptr: *mut T) -> Result<*mut T> {
// CAST: Casting a pointer to `*const c_types::c_void` is always valid.
let const_ptr: *const c_types::c_void = ptr.cast();
// SAFETY: The FFI function does not deref the pointer.
if unsafe { bindings::IS_ERR(const_ptr) } {
// SAFETY: The FFI function does not deref the pointer.
let err = unsafe { bindings::PTR_ERR(const_ptr) };
// CAST: If `IS_ERR()` returns `true`,
// then `PTR_ERR()` is guaranteed to return a
// negative value greater-or-equal to `-bindings::MAX_ERRNO`,
// which always fits in an `i16`, as per the invariant above.
// And an `i16` always fits in an `i32`. So casting `err` to
// an `i32` can never overflow, and is always valid.
//
// SAFETY: `IS_ERR()` ensures `err` is a
// negative value greater-or-equal to `-bindings::MAX_ERRNO`.
return Err(unsafe { Error::from_kernel_errno_unchecked(err as i32) });
}
Ok(ptr)
}
/// Calls a kernel function that returns an integer error code on failure and converts the result
/// to a [`Result`].
pub fn to_result(func: impl FnOnce() -> c_types::c_int) -> Result {
let err = func();
if err < 0 {
Err(Error::from_kernel_errno(err))
} else {
Ok(())
}
}

860
rust/kernel/file.rs Normal file
View File

@ -0,0 +1,860 @@
// SPDX-License-Identifier: GPL-2.0
//! Files and file descriptors.
//!
//! C headers: [`include/linux/fs.h`](../../../../include/linux/fs.h) and
//! [`include/linux/file.h`](../../../../include/linux/file.h)
use crate::{
bindings, c_types,
cred::Credential,
error::{code::*, from_kernel_result, Error, Result},
io_buffer::{IoBufferReader, IoBufferWriter},
iov_iter::IovIter,
mm,
sync::CondVar,
types::PointerWrapper,
user_ptr::{UserSlicePtr, UserSlicePtrReader, UserSlicePtrWriter},
ARef, AlwaysRefCounted,
};
use core::convert::{TryFrom, TryInto};
use core::{cell::UnsafeCell, marker, mem, ptr};
/// Wraps the kernel's `struct file`.
///
/// # Invariants
///
/// Instances of this type are always ref-counted, that is, a call to `get_file` ensures that the
/// allocation remains valid at least until the matching call to `fput`.
#[repr(transparent)]
pub struct File(pub(crate) UnsafeCell<bindings::file>);
// TODO: Accessing fields of `struct file` through the pointer is UB because other threads may be
// writing to them. However, this is how the C code currently operates: naked reads and writes to
// fields. Even if we used relaxed atomics on the Rust side, we can't force this on the C side.
impl File {
/// Constructs a new [`struct file`] wrapper from a file descriptor.
///
/// The file descriptor belongs to the current process.
pub fn from_fd(fd: u32) -> Result<ARef<Self>> {
// SAFETY: FFI call, there are no requirements on `fd`.
let ptr = ptr::NonNull::new(unsafe { bindings::fget(fd) }).ok_or(EBADF)?;
// SAFETY: `fget` increments the refcount before returning.
Ok(unsafe { ARef::from_raw(ptr.cast()) })
}
/// Creates a reference to a [`File`] from a valid pointer.
///
/// # Safety
///
/// The caller must ensure that `ptr` is valid and remains valid for the lifetime of the
/// returned [`File`] instance.
pub(crate) unsafe fn from_ptr<'a>(ptr: *const bindings::file) -> &'a File {
// SAFETY: The safety requirements guarantee the validity of the dereference, while the
// `File` type being transparent makes the cast ok.
unsafe { &*ptr.cast() }
}
/// Returns the current seek/cursor/pointer position (`struct file::f_pos`).
pub fn pos(&self) -> u64 {
// SAFETY: The file is valid because the shared reference guarantees a nonzero refcount.
unsafe { core::ptr::addr_of!((*self.0.get()).f_pos).read() as _ }
}
/// Returns whether the file is in blocking mode.
pub fn is_blocking(&self) -> bool {
self.flags() & bindings::O_NONBLOCK == 0
}
/// Returns the credentials of the task that originally opened the file.
pub fn cred(&self) -> &Credential {
// SAFETY: The file is valid because the shared reference guarantees a nonzero refcount.
let ptr = unsafe { core::ptr::addr_of!((*self.0.get()).f_cred).read() };
// SAFETY: The lifetimes of `self` and `Credential` are tied, so it is guaranteed that
// the credential pointer remains valid (because the file is still alive, and it doesn't
// change over the lifetime of a file).
unsafe { Credential::from_ptr(ptr) }
}
/// Returns the flags associated with the file.
pub fn flags(&self) -> u32 {
// SAFETY: The file is valid because the shared reference guarantees a nonzero refcount.
unsafe { core::ptr::addr_of!((*self.0.get()).f_flags).read() }
}
}
// SAFETY: The type invariants guarantee that `File` is always ref-counted.
unsafe impl AlwaysRefCounted for File {
fn inc_ref(&self) {
// SAFETY: The existence of a shared reference means that the refcount is nonzero.
unsafe { bindings::get_file(self.0.get()) };
}
unsafe fn dec_ref(obj: ptr::NonNull<Self>) {
// SAFETY: The safety requirements guarantee that the refcount is nonzero.
unsafe { bindings::fput(obj.cast().as_ptr()) }
}
}
/// A file descriptor reservation.
///
/// This allows the creation of a file descriptor in two steps: first, we reserve a slot for it,
/// then we commit or drop the reservation. The first step may fail (e.g., the current process ran
/// out of available slots), but commit and drop never fail (and are mutually exclusive).
pub struct FileDescriptorReservation {
fd: u32,
}
impl FileDescriptorReservation {
/// Creates a new file descriptor reservation.
pub fn new(flags: u32) -> Result<Self> {
// SAFETY: FFI call, there are no safety requirements on `flags`.
let fd = unsafe { bindings::get_unused_fd_flags(flags) };
if fd < 0 {
return Err(Error::from_kernel_errno(fd));
}
Ok(Self { fd: fd as _ })
}
/// Returns the file descriptor number that was reserved.
pub fn reserved_fd(&self) -> u32 {
self.fd
}
/// Commits the reservation.
///
/// The previously reserved file descriptor is bound to `file`.
pub fn commit(self, file: ARef<File>) {
// SAFETY: `self.fd` was previously returned by `get_unused_fd_flags`, and `file.ptr` is
// guaranteed to have an owned ref count by its type invariants.
unsafe { bindings::fd_install(self.fd, file.0.get()) };
// `fd_install` consumes both the file descriptor and the file reference, so we cannot run
// the destructors.
core::mem::forget(self);
core::mem::forget(file);
}
}
impl Drop for FileDescriptorReservation {
fn drop(&mut self) {
// SAFETY: `self.fd` was returned by a previous call to `get_unused_fd_flags`.
unsafe { bindings::put_unused_fd(self.fd) };
}
}
/// Wraps the kernel's `struct poll_table_struct`.
///
/// # Invariants
///
/// The pointer `PollTable::ptr` is null or valid.
pub struct PollTable {
ptr: *mut bindings::poll_table_struct,
}
impl PollTable {
/// Constructors a new `struct poll_table_struct` wrapper.
///
/// # Safety
///
/// The pointer `ptr` must be either null or a valid pointer for the lifetime of the object.
unsafe fn from_ptr(ptr: *mut bindings::poll_table_struct) -> Self {
Self { ptr }
}
/// Associates the given file and condition variable to this poll table. It means notifying the
/// condition variable will notify the poll table as well; additionally, the association
/// between the condition variable and the file will automatically be undone by the kernel when
/// the file is destructed. To unilaterally remove the association before then, one can call
/// [`CondVar::free_waiters`].
///
/// # Safety
///
/// If the condition variable is destroyed before the file, then [`CondVar::free_waiters`] must
/// be called to ensure that all waiters are flushed out.
pub unsafe fn register_wait<'a>(&self, file: &'a File, cv: &'a CondVar) {
if self.ptr.is_null() {
return;
}
// SAFETY: `PollTable::ptr` is guaranteed to be valid by the type invariants and the null
// check above.
let table = unsafe { &*self.ptr };
if let Some(proc) = table._qproc {
// SAFETY: All pointers are known to be valid.
unsafe { proc(file.0.get() as _, cv.wait_list.get(), self.ptr) }
}
}
}
/// Equivalent to [`std::io::SeekFrom`].
///
/// [`std::io::SeekFrom`]: https://doc.rust-lang.org/std/io/enum.SeekFrom.html
pub enum SeekFrom {
/// Equivalent to C's `SEEK_SET`.
Start(u64),
/// Equivalent to C's `SEEK_END`.
End(i64),
/// Equivalent to C's `SEEK_CUR`.
Current(i64),
}
pub(crate) struct OperationsVtable<A, T>(marker::PhantomData<A>, marker::PhantomData<T>);
impl<A: OpenAdapter<T::OpenData>, T: Operations> OperationsVtable<A, T> {
/// Called by the VFS when an inode should be opened.
///
/// Calls `T::open` on the returned value of `A::convert`.
///
/// # Safety
///
/// The returned value of `A::convert` must be a valid non-null pointer and
/// `T:open` must return a valid non-null pointer on an `Ok` result.
unsafe extern "C" fn open_callback(
inode: *mut bindings::inode,
file: *mut bindings::file,
) -> c_types::c_int {
from_kernel_result! {
// SAFETY: `A::convert` must return a valid non-null pointer that
// should point to data in the inode or file that lives longer
// than the following use of `T::open`.
let arg = unsafe { A::convert(inode, file) };
// SAFETY: The C contract guarantees that `file` is valid. Additionally,
// `fileref` never outlives this function, so it is guaranteed to be
// valid.
let fileref = unsafe { File::from_ptr(file) };
// SAFETY: `arg` was previously returned by `A::convert` and must
// be a valid non-null pointer.
let ptr = T::open(unsafe { &*arg }, fileref)?.into_pointer();
// SAFETY: The C contract guarantees that `private_data` is available
// for implementers of the file operations (no other C code accesses
// it), so we know that there are no concurrent threads/CPUs accessing
// it (it's not visible to any other Rust code).
unsafe { (*file).private_data = ptr as *mut c_types::c_void };
Ok(0)
}
}
unsafe extern "C" fn read_callback(
file: *mut bindings::file,
buf: *mut c_types::c_char,
len: c_types::c_size_t,
offset: *mut bindings::loff_t,
) -> c_types::c_ssize_t {
from_kernel_result! {
let mut data = unsafe { UserSlicePtr::new(buf as *mut c_types::c_void, len).writer() };
// SAFETY: `private_data` was initialised by `open_callback` with a value returned by
// `T::Data::into_pointer`. `T::Data::from_pointer` is only called by the
// `release` callback, which the C API guarantees that will be called only when all
// references to `file` have been released, so we know it can't be called while this
// function is running.
let f = unsafe { T::Data::borrow((*file).private_data) };
// No `FMODE_UNSIGNED_OFFSET` support, so `offset` must be in [0, 2^63).
// See discussion in https://github.com/fishinabarrel/linux-kernel-module-rust/pull/113
let read = T::read(
f,
unsafe { File::from_ptr(file) },
&mut data,
unsafe { *offset }.try_into()?,
)?;
unsafe { (*offset) += bindings::loff_t::try_from(read).unwrap() };
Ok(read as _)
}
}
unsafe extern "C" fn read_iter_callback(
iocb: *mut bindings::kiocb,
raw_iter: *mut bindings::iov_iter,
) -> isize {
from_kernel_result! {
let mut iter = unsafe { IovIter::from_ptr(raw_iter) };
let file = unsafe { (*iocb).ki_filp };
let offset = unsafe { (*iocb).ki_pos };
// SAFETY: `private_data` was initialised by `open_callback` with a value returned by
// `T::Data::into_pointer`. `T::Data::from_pointer` is only called by the
// `release` callback, which the C API guarantees that will be called only when all
// references to `file` have been released, so we know it can't be called while this
// function is running.
let f = unsafe { T::Data::borrow((*file).private_data) };
let read =
T::read(f, unsafe { File::from_ptr(file) }, &mut iter, offset.try_into()?)?;
unsafe { (*iocb).ki_pos += bindings::loff_t::try_from(read).unwrap() };
Ok(read as _)
}
}
unsafe extern "C" fn write_callback(
file: *mut bindings::file,
buf: *const c_types::c_char,
len: c_types::c_size_t,
offset: *mut bindings::loff_t,
) -> c_types::c_ssize_t {
from_kernel_result! {
let mut data = unsafe { UserSlicePtr::new(buf as *mut c_types::c_void, len).reader() };
// SAFETY: `private_data` was initialised by `open_callback` with a value returned by
// `T::Data::into_pointer`. `T::Data::from_pointer` is only called by the
// `release` callback, which the C API guarantees that will be called only when all
// references to `file` have been released, so we know it can't be called while this
// function is running.
let f = unsafe { T::Data::borrow((*file).private_data) };
// No `FMODE_UNSIGNED_OFFSET` support, so `offset` must be in [0, 2^63).
// See discussion in https://github.com/fishinabarrel/linux-kernel-module-rust/pull/113
let written = T::write(
f,
unsafe { File::from_ptr(file) },
&mut data,
unsafe { *offset }.try_into()?
)?;
unsafe { (*offset) += bindings::loff_t::try_from(written).unwrap() };
Ok(written as _)
}
}
unsafe extern "C" fn write_iter_callback(
iocb: *mut bindings::kiocb,
raw_iter: *mut bindings::iov_iter,
) -> isize {
from_kernel_result! {
let mut iter = unsafe { IovIter::from_ptr(raw_iter) };
let file = unsafe { (*iocb).ki_filp };
let offset = unsafe { (*iocb).ki_pos };
// SAFETY: `private_data` was initialised by `open_callback` with a value returned by
// `T::Data::into_pointer`. `T::Data::from_pointer` is only called by the
// `release` callback, which the C API guarantees that will be called only when all
// references to `file` have been released, so we know it can't be called while this
// function is running.
let f = unsafe { T::Data::borrow((*file).private_data) };
let written =
T::write(f, unsafe { File::from_ptr(file) }, &mut iter, offset.try_into()?)?;
unsafe { (*iocb).ki_pos += bindings::loff_t::try_from(written).unwrap() };
Ok(written as _)
}
}
unsafe extern "C" fn release_callback(
_inode: *mut bindings::inode,
file: *mut bindings::file,
) -> c_types::c_int {
let ptr = mem::replace(unsafe { &mut (*file).private_data }, ptr::null_mut());
T::release(unsafe { T::Data::from_pointer(ptr as _) }, unsafe {
File::from_ptr(file)
});
0
}
unsafe extern "C" fn llseek_callback(
file: *mut bindings::file,
offset: bindings::loff_t,
whence: c_types::c_int,
) -> bindings::loff_t {
from_kernel_result! {
let off = match whence as u32 {
bindings::SEEK_SET => SeekFrom::Start(offset.try_into()?),
bindings::SEEK_CUR => SeekFrom::Current(offset),
bindings::SEEK_END => SeekFrom::End(offset),
_ => return Err(EINVAL),
};
// SAFETY: `private_data` was initialised by `open_callback` with a value returned by
// `T::Data::into_pointer`. `T::Data::from_pointer` is only called by the
// `release` callback, which the C API guarantees that will be called only when all
// references to `file` have been released, so we know it can't be called while this
// function is running.
let f = unsafe { T::Data::borrow((*file).private_data) };
let off = T::seek(f, unsafe { File::from_ptr(file) }, off)?;
Ok(off as bindings::loff_t)
}
}
unsafe extern "C" fn unlocked_ioctl_callback(
file: *mut bindings::file,
cmd: c_types::c_uint,
arg: c_types::c_ulong,
) -> c_types::c_long {
from_kernel_result! {
// SAFETY: `private_data` was initialised by `open_callback` with a value returned by
// `T::Data::into_pointer`. `T::Data::from_pointer` is only called by the
// `release` callback, which the C API guarantees that will be called only when all
// references to `file` have been released, so we know it can't be called while this
// function is running.
let f = unsafe { T::Data::borrow((*file).private_data) };
let mut cmd = IoctlCommand::new(cmd as _, arg as _);
let ret = T::ioctl(f, unsafe { File::from_ptr(file) }, &mut cmd)?;
Ok(ret as _)
}
}
unsafe extern "C" fn compat_ioctl_callback(
file: *mut bindings::file,
cmd: c_types::c_uint,
arg: c_types::c_ulong,
) -> c_types::c_long {
from_kernel_result! {
// SAFETY: `private_data` was initialised by `open_callback` with a value returned by
// `T::Data::into_pointer`. `T::Data::from_pointer` is only called by the
// `release` callback, which the C API guarantees that will be called only when all
// references to `file` have been released, so we know it can't be called while this
// function is running.
let f = unsafe { T::Data::borrow((*file).private_data) };
let mut cmd = IoctlCommand::new(cmd as _, arg as _);
let ret = T::compat_ioctl(f, unsafe { File::from_ptr(file) }, &mut cmd)?;
Ok(ret as _)
}
}
unsafe extern "C" fn mmap_callback(
file: *mut bindings::file,
vma: *mut bindings::vm_area_struct,
) -> c_types::c_int {
from_kernel_result! {
// SAFETY: `private_data` was initialised by `open_callback` with a value returned by
// `T::Data::into_pointer`. `T::Data::from_pointer` is only called by the
// `release` callback, which the C API guarantees that will be called only when all
// references to `file` have been released, so we know it can't be called while this
// function is running.
let f = unsafe { T::Data::borrow((*file).private_data) };
// SAFETY: The C API guarantees that `vma` is valid for the duration of this call.
// `area` only lives within this call, so it is guaranteed to be valid.
let mut area = unsafe { mm::virt::Area::from_ptr(vma) };
// SAFETY: The C API guarantees that `file` is valid for the duration of this call,
// which is longer than the lifetime of the file reference.
T::mmap(f, unsafe { File::from_ptr(file) }, &mut area)?;
Ok(0)
}
}
unsafe extern "C" fn fsync_callback(
file: *mut bindings::file,
start: bindings::loff_t,
end: bindings::loff_t,
datasync: c_types::c_int,
) -> c_types::c_int {
from_kernel_result! {
let start = start.try_into()?;
let end = end.try_into()?;
let datasync = datasync != 0;
// SAFETY: `private_data` was initialised by `open_callback` with a value returned by
// `T::Data::into_pointer`. `T::Data::from_pointer` is only called by the
// `release` callback, which the C API guarantees that will be called only when all
// references to `file` have been released, so we know it can't be called while this
// function is running.
let f = unsafe { T::Data::borrow((*file).private_data) };
let res = T::fsync(f, unsafe { File::from_ptr(file) }, start, end, datasync)?;
Ok(res.try_into().unwrap())
}
}
unsafe extern "C" fn poll_callback(
file: *mut bindings::file,
wait: *mut bindings::poll_table_struct,
) -> bindings::__poll_t {
// SAFETY: `private_data` was initialised by `open_callback` with a value returned by
// `T::Data::into_pointer`. `T::Data::from_pointer` is only called by the `release`
// callback, which the C API guarantees that will be called only when all references to
// `file` have been released, so we know it can't be called while this function is running.
let f = unsafe { T::Data::borrow((*file).private_data) };
match T::poll(f, unsafe { File::from_ptr(file) }, unsafe {
&PollTable::from_ptr(wait)
}) {
Ok(v) => v,
Err(_) => bindings::POLLERR,
}
}
const VTABLE: bindings::file_operations = bindings::file_operations {
open: Some(Self::open_callback),
release: Some(Self::release_callback),
read: if T::TO_USE.read {
Some(Self::read_callback)
} else {
None
},
write: if T::TO_USE.write {
Some(Self::write_callback)
} else {
None
},
llseek: if T::TO_USE.seek {
Some(Self::llseek_callback)
} else {
None
},
check_flags: None,
compat_ioctl: if T::TO_USE.compat_ioctl {
Some(Self::compat_ioctl_callback)
} else {
None
},
copy_file_range: None,
fallocate: None,
fadvise: None,
fasync: None,
flock: None,
flush: None,
fsync: if T::TO_USE.fsync {
Some(Self::fsync_callback)
} else {
None
},
get_unmapped_area: None,
iterate: None,
iterate_shared: None,
iopoll: None,
lock: None,
mmap: if T::TO_USE.mmap {
Some(Self::mmap_callback)
} else {
None
},
mmap_supported_flags: 0,
owner: ptr::null_mut(),
poll: if T::TO_USE.poll {
Some(Self::poll_callback)
} else {
None
},
read_iter: if T::TO_USE.read_iter {
Some(Self::read_iter_callback)
} else {
None
},
remap_file_range: None,
sendpage: None,
setlease: None,
show_fdinfo: None,
splice_read: None,
splice_write: None,
unlocked_ioctl: if T::TO_USE.ioctl {
Some(Self::unlocked_ioctl_callback)
} else {
None
},
write_iter: if T::TO_USE.write_iter {
Some(Self::write_iter_callback)
} else {
None
},
};
/// Builds an instance of [`struct file_operations`].
///
/// # Safety
///
/// The caller must ensure that the adapter is compatible with the way the device is registered.
pub(crate) const unsafe fn build() -> &'static bindings::file_operations {
&Self::VTABLE
}
}
/// Represents which fields of [`struct file_operations`] should be populated with pointers.
pub struct ToUse {
/// The `read` field of [`struct file_operations`].
pub read: bool,
/// The `read_iter` field of [`struct file_operations`].
pub read_iter: bool,
/// The `write` field of [`struct file_operations`].
pub write: bool,
/// The `write_iter` field of [`struct file_operations`].
pub write_iter: bool,
/// The `llseek` field of [`struct file_operations`].
pub seek: bool,
/// The `unlocked_ioctl` field of [`struct file_operations`].
pub ioctl: bool,
/// The `compat_ioctl` field of [`struct file_operations`].
pub compat_ioctl: bool,
/// The `fsync` field of [`struct file_operations`].
pub fsync: bool,
/// The `mmap` field of [`struct file_operations`].
pub mmap: bool,
/// The `poll` field of [`struct file_operations`].
pub poll: bool,
}
/// A constant version where all values are to set to `false`, that is, all supported fields will
/// be set to null pointers.
pub const USE_NONE: ToUse = ToUse {
read: false,
read_iter: false,
write: false,
write_iter: false,
seek: false,
ioctl: false,
compat_ioctl: false,
fsync: false,
mmap: false,
poll: false,
};
/// Defines the [`Operations::TO_USE`] field based on a list of fields to be populated.
#[macro_export]
macro_rules! declare_file_operations {
() => {
const TO_USE: $crate::file::ToUse = $crate::file::USE_NONE;
};
($($i:ident),+) => {
const TO_USE: kernel::file::ToUse =
$crate::file::ToUse {
$($i: true),+ ,
..$crate::file::USE_NONE
};
};
}
/// Allows the handling of ioctls defined with the `_IO`, `_IOR`, `_IOW`, and `_IOWR` macros.
///
/// For each macro, there is a handler function that takes the appropriate types as arguments.
pub trait IoctlHandler: Sync {
/// The type of the first argument to each associated function.
type Target<'a>;
/// Handles ioctls defined with the `_IO` macro, that is, with no buffer as argument.
fn pure(_this: Self::Target<'_>, _file: &File, _cmd: u32, _arg: usize) -> Result<i32> {
Err(EINVAL)
}
/// Handles ioctls defined with the `_IOR` macro, that is, with an output buffer provided as
/// argument.
fn read(
_this: Self::Target<'_>,
_file: &File,
_cmd: u32,
_writer: &mut UserSlicePtrWriter,
) -> Result<i32> {
Err(EINVAL)
}
/// Handles ioctls defined with the `_IOW` macro, that is, with an input buffer provided as
/// argument.
fn write(
_this: Self::Target<'_>,
_file: &File,
_cmd: u32,
_reader: &mut UserSlicePtrReader,
) -> Result<i32> {
Err(EINVAL)
}
/// Handles ioctls defined with the `_IOWR` macro, that is, with a buffer for both input and
/// output provided as argument.
fn read_write(
_this: Self::Target<'_>,
_file: &File,
_cmd: u32,
_data: UserSlicePtr,
) -> Result<i32> {
Err(EINVAL)
}
}
/// Represents an ioctl command.
///
/// It can use the components of an ioctl command to dispatch ioctls using
/// [`IoctlCommand::dispatch`].
pub struct IoctlCommand {
cmd: u32,
arg: usize,
user_slice: Option<UserSlicePtr>,
}
impl IoctlCommand {
/// Constructs a new [`IoctlCommand`].
fn new(cmd: u32, arg: usize) -> Self {
let size = (cmd >> bindings::_IOC_SIZESHIFT) & bindings::_IOC_SIZEMASK;
// SAFETY: We only create one instance of the user slice per ioctl call, so TOCTOU issues
// are not possible.
let user_slice = Some(unsafe { UserSlicePtr::new(arg as _, size as _) });
Self {
cmd,
arg,
user_slice,
}
}
/// Dispatches the given ioctl to the appropriate handler based on the value of the command. It
/// also creates a [`UserSlicePtr`], [`UserSlicePtrReader`], or [`UserSlicePtrWriter`]
/// depending on the direction of the buffer of the command.
///
/// It is meant to be used in implementations of [`Operations::ioctl`] and
/// [`Operations::compat_ioctl`].
pub fn dispatch<T: IoctlHandler>(
&mut self,
handler: T::Target<'_>,
file: &File,
) -> Result<i32> {
let dir = (self.cmd >> bindings::_IOC_DIRSHIFT) & bindings::_IOC_DIRMASK;
if dir == bindings::_IOC_NONE {
return T::pure(handler, file, self.cmd, self.arg);
}
let data = self.user_slice.take().ok_or(EINVAL)?;
const READ_WRITE: u32 = bindings::_IOC_READ | bindings::_IOC_WRITE;
match dir {
bindings::_IOC_WRITE => T::write(handler, file, self.cmd, &mut data.reader()),
bindings::_IOC_READ => T::read(handler, file, self.cmd, &mut data.writer()),
READ_WRITE => T::read_write(handler, file, self.cmd, data),
_ => Err(EINVAL),
}
}
/// Returns the raw 32-bit value of the command and the ptr-sized argument.
pub fn raw(&self) -> (u32, usize) {
(self.cmd, self.arg)
}
}
/// Trait for extracting file open arguments from kernel data structures.
///
/// This is meant to be implemented by registration managers.
pub trait OpenAdapter<T: Sync> {
/// Converts untyped data stored in [`struct inode`] and [`struct file`] (when [`struct
/// file_operations::open`] is called) into the given type. For example, for `miscdev`
/// devices, a pointer to the registered [`struct miscdev`] is stored in [`struct
/// file::private_data`].
///
/// # Safety
///
/// This function must be called only when [`struct file_operations::open`] is being called for
/// a file that was registered by the implementer. The returned pointer must be valid and
/// not-null.
unsafe fn convert(_inode: *mut bindings::inode, _file: *mut bindings::file) -> *const T;
}
/// Corresponds to the kernel's `struct file_operations`.
///
/// You implement this trait whenever you would create a `struct file_operations`.
///
/// File descriptors may be used from multiple threads/processes concurrently, so your type must be
/// [`Sync`]. It must also be [`Send`] because [`Operations::release`] will be called from the
/// thread that decrements that associated file's refcount to zero.
pub trait Operations {
/// The methods to use to populate [`struct file_operations`].
const TO_USE: ToUse;
/// The type of the context data returned by [`Operations::open`] and made available to
/// other methods.
type Data: PointerWrapper + Send + Sync = ();
/// The type of the context data passed to [`Operations::open`].
type OpenData: Sync = ();
/// Creates a new instance of this file.
///
/// Corresponds to the `open` function pointer in `struct file_operations`.
fn open(context: &Self::OpenData, file: &File) -> Result<Self::Data>;
/// Cleans up after the last reference to the file goes away.
///
/// Note that context data is moved, so it will be freed automatically unless the
/// implementation moves it elsewhere.
///
/// Corresponds to the `release` function pointer in `struct file_operations`.
fn release(_data: Self::Data, _file: &File) {}
/// Reads data from this file to the caller's buffer.
///
/// Corresponds to the `read` and `read_iter` function pointers in `struct file_operations`.
fn read(
_data: <Self::Data as PointerWrapper>::Borrowed<'_>,
_file: &File,
_writer: &mut impl IoBufferWriter,
_offset: u64,
) -> Result<usize> {
Err(EINVAL)
}
/// Writes data from the caller's buffer to this file.
///
/// Corresponds to the `write` and `write_iter` function pointers in `struct file_operations`.
fn write(
_data: <Self::Data as PointerWrapper>::Borrowed<'_>,
_file: &File,
_reader: &mut impl IoBufferReader,
_offset: u64,
) -> Result<usize> {
Err(EINVAL)
}
/// Changes the position of the file.
///
/// Corresponds to the `llseek` function pointer in `struct file_operations`.
fn seek(
_data: <Self::Data as PointerWrapper>::Borrowed<'_>,
_file: &File,
_offset: SeekFrom,
) -> Result<u64> {
Err(EINVAL)
}
/// Performs IO control operations that are specific to the file.
///
/// Corresponds to the `unlocked_ioctl` function pointer in `struct file_operations`.
fn ioctl(
_data: <Self::Data as PointerWrapper>::Borrowed<'_>,
_file: &File,
_cmd: &mut IoctlCommand,
) -> Result<i32> {
Err(ENOTTY)
}
/// Performs 32-bit IO control operations on that are specific to the file on 64-bit kernels.
///
/// Corresponds to the `compat_ioctl` function pointer in `struct file_operations`.
fn compat_ioctl(
_data: <Self::Data as PointerWrapper>::Borrowed<'_>,
_file: &File,
_cmd: &mut IoctlCommand,
) -> Result<i32> {
Err(ENOTTY)
}
/// Syncs pending changes to this file.
///
/// Corresponds to the `fsync` function pointer in `struct file_operations`.
fn fsync(
_data: <Self::Data as PointerWrapper>::Borrowed<'_>,
_file: &File,
_start: u64,
_end: u64,
_datasync: bool,
) -> Result<u32> {
Err(EINVAL)
}
/// Maps areas of the caller's virtual memory with device/file memory.
///
/// Corresponds to the `mmap` function pointer in `struct file_operations`.
fn mmap(
_data: <Self::Data as PointerWrapper>::Borrowed<'_>,
_file: &File,
_vma: &mut mm::virt::Area,
) -> Result {
Err(EINVAL)
}
/// Checks the state of the file and optionally registers for notification when the state
/// changes.
///
/// Corresponds to the `poll` function pointer in `struct file_operations`.
fn poll(
_data: <Self::Data as PointerWrapper>::Borrowed<'_>,
_file: &File,
_table: &PollTable,
) -> Result<u32> {
Ok(bindings::POLLIN | bindings::POLLOUT | bindings::POLLRDNORM | bindings::POLLWRNORM)
}
}

478
rust/kernel/gpio.rs Normal file
View File

@ -0,0 +1,478 @@
// SPDX-License-Identifier: GPL-2.0
//! Support for gpio device drivers.
//!
//! C header: [`include/linux/gpio/driver.h`](../../../../include/linux/gpio/driver.h)
use crate::{
bindings, c_types, device, error::code::*, error::from_kernel_result, types::PointerWrapper,
Error, Result,
};
use core::{
cell::UnsafeCell,
marker::{PhantomData, PhantomPinned},
pin::Pin,
};
#[cfg(CONFIG_GPIOLIB_IRQCHIP)]
pub use irqchip::{ChipWithIrqChip, RegistrationWithIrqChip};
/// The direction of a gpio line.
pub enum LineDirection {
/// Direction is input.
In = bindings::GPIO_LINE_DIRECTION_IN as _,
/// Direction is output.
Out = bindings::GPIO_LINE_DIRECTION_OUT as _,
}
/// A gpio chip.
pub trait Chip {
/// Context data associated with the gpio chip.
///
/// It determines the type of the context data passed to each of the methods of the trait.
type Data: PointerWrapper + Sync + Send;
/// The methods to use to populate [`struct gpio_chip`]. This is typically populated with
/// [`declare_gpio_chip_operations`].
const TO_USE: ToUse;
/// Returns the direction of the given gpio line.
fn get_direction(
_data: <Self::Data as PointerWrapper>::Borrowed<'_>,
_offset: u32,
) -> Result<LineDirection> {
Err(ENOTSUPP)
}
/// Configures the direction as input of the given gpio line.
fn direction_input(
_data: <Self::Data as PointerWrapper>::Borrowed<'_>,
_offset: u32,
) -> Result {
Err(EIO)
}
/// Configures the direction as output of the given gpio line.
///
/// The value that will be initially output is also specified.
fn direction_output(
_data: <Self::Data as PointerWrapper>::Borrowed<'_>,
_offset: u32,
_value: bool,
) -> Result {
Err(ENOTSUPP)
}
/// Returns the current value of the given gpio line.
fn get(_data: <Self::Data as PointerWrapper>::Borrowed<'_>, _offset: u32) -> Result<bool> {
Err(EIO)
}
/// Sets the value of the given gpio line.
fn set(_data: <Self::Data as PointerWrapper>::Borrowed<'_>, _offset: u32, _value: bool) {}
}
/// Represents which fields of [`struct gpio_chip`] should be populated with pointers.
///
/// This is typically populated with the [`declare_gpio_chip_operations`] macro.
pub struct ToUse {
/// The `get_direction` field of [`struct gpio_chip`].
pub get_direction: bool,
/// The `direction_input` field of [`struct gpio_chip`].
pub direction_input: bool,
/// The `direction_output` field of [`struct gpio_chip`].
pub direction_output: bool,
/// The `get` field of [`struct gpio_chip`].
pub get: bool,
/// The `set` field of [`struct gpio_chip`].
pub set: bool,
}
/// A constant version where all values are set to `false`, that is, all supported fields will be
/// set to null pointers.
pub const USE_NONE: ToUse = ToUse {
get_direction: false,
direction_input: false,
direction_output: false,
get: false,
set: false,
};
/// Defines the [`Chip::TO_USE`] field based on a list of fields to be populated.
#[macro_export]
macro_rules! declare_gpio_chip_operations {
() => {
const TO_USE: $crate::gpio::ToUse = $crate::gpio::USE_NONE;
};
($($i:ident),+) => {
#[allow(clippy::needless_update)]
const TO_USE: $crate::gpio::ToUse =
$crate::gpio::ToUse {
$($i: true),+ ,
..$crate::gpio::USE_NONE
};
};
}
/// A registration of a gpio chip.
pub struct Registration<T: Chip> {
gc: UnsafeCell<bindings::gpio_chip>,
parent: Option<device::Device>,
_p: PhantomData<T>,
_pin: PhantomPinned,
}
impl<T: Chip> Registration<T> {
/// Creates a new [`Registration`] but does not register it yet.
///
/// It is allowed to move.
pub fn new() -> Self {
Self {
parent: None,
gc: UnsafeCell::new(bindings::gpio_chip::default()),
_pin: PhantomPinned,
_p: PhantomData,
}
}
/// Registers a gpio chip with the rest of the kernel.
pub fn register(
self: Pin<&mut Self>,
gpio_count: u16,
base: Option<i32>,
parent: &dyn device::RawDevice,
data: T::Data,
) -> Result {
if self.parent.is_some() {
// Already registered.
return Err(EINVAL);
}
// SAFETY: We never move out of `this`.
let this = unsafe { self.get_unchecked_mut() };
{
let gc = this.gc.get_mut();
// Set up the callbacks.
gc.request = Some(bindings::gpiochip_generic_request);
gc.free = Some(bindings::gpiochip_generic_free);
if T::TO_USE.get_direction {
gc.get_direction = Some(get_direction_callback::<T>);
}
if T::TO_USE.direction_input {
gc.direction_input = Some(direction_input_callback::<T>);
}
if T::TO_USE.direction_output {
gc.direction_output = Some(direction_output_callback::<T>);
}
if T::TO_USE.get {
gc.get = Some(get_callback::<T>);
}
if T::TO_USE.set {
gc.set = Some(set_callback::<T>);
}
// When a base is not explicitly given, use -1 for one to be picked.
if let Some(b) = base {
gc.base = b;
} else {
gc.base = -1;
}
gc.ngpio = gpio_count;
gc.parent = parent.raw_device();
gc.label = parent.name().as_char_ptr();
// TODO: Define `gc.owner` as well.
}
let data_pointer = <T::Data as PointerWrapper>::into_pointer(data);
// SAFETY: `gc` was initilised above, so it is valid.
let ret = unsafe {
bindings::gpiochip_add_data_with_key(
this.gc.get(),
data_pointer as _,
core::ptr::null_mut(),
core::ptr::null_mut(),
)
};
if ret < 0 {
// SAFETY: `data_pointer` was returned by `into_pointer` above.
unsafe { T::Data::from_pointer(data_pointer) };
return Err(Error::from_kernel_errno(ret));
}
this.parent = Some(device::Device::from_dev(parent));
Ok(())
}
}
// SAFETY: `Registration` doesn't offer any methods or access to fields when shared between threads
// or CPUs, so it is safe to share it.
unsafe impl<T: Chip> Sync for Registration<T> {}
// SAFETY: Registration with and unregistration from the gpio subsystem can happen from any thread.
// Additionally, `T::Data` (which is dropped during unregistration) is `Send`, so it is ok to move
// `Registration` to different threads.
#[allow(clippy::non_send_fields_in_send_ty)]
unsafe impl<T: Chip> Send for Registration<T> {}
impl<T: Chip> Default for Registration<T> {
fn default() -> Self {
Self::new()
}
}
impl<T: Chip> Drop for Registration<T> {
/// Removes the registration from the kernel if it has completed successfully before.
fn drop(&mut self) {
if self.parent.is_some() {
// Get a pointer to the data stored in chip before destroying it.
// SAFETY: `gc` was during registration, which is guaranteed to have succeeded (because
// `parent` is `Some(_)`, so it remains valid.
let data_pointer = unsafe { bindings::gpiochip_get_data(self.gc.get()) };
// SAFETY: By the same argument above, `gc` is still valid.
unsafe { bindings::gpiochip_remove(self.gc.get()) };
// Free data as well.
// SAFETY: `data_pointer` was returned by `into_pointer` during registration.
unsafe { <T::Data as PointerWrapper>::from_pointer(data_pointer) };
}
}
}
unsafe extern "C" fn get_direction_callback<T: Chip>(
gc: *mut bindings::gpio_chip,
offset: c_types::c_uint,
) -> c_types::c_int {
from_kernel_result! {
// SAFETY: The value stored as chip data was returned by `into_pointer` during registration.
let data = unsafe { T::Data::borrow(bindings::gpiochip_get_data(gc)) };
Ok(T::get_direction(data, offset)? as i32)
}
}
unsafe extern "C" fn direction_input_callback<T: Chip>(
gc: *mut bindings::gpio_chip,
offset: c_types::c_uint,
) -> c_types::c_int {
from_kernel_result! {
// SAFETY: The value stored as chip data was returned by `into_pointer` during registration.
let data = unsafe { T::Data::borrow(bindings::gpiochip_get_data(gc)) };
T::direction_input(data, offset)?;
Ok(0)
}
}
unsafe extern "C" fn direction_output_callback<T: Chip>(
gc: *mut bindings::gpio_chip,
offset: c_types::c_uint,
value: c_types::c_int,
) -> c_types::c_int {
from_kernel_result! {
// SAFETY: The value stored as chip data was returned by `into_pointer` during registration.
let data = unsafe { T::Data::borrow(bindings::gpiochip_get_data(gc)) };
T::direction_output(data, offset, value != 0)?;
Ok(0)
}
}
unsafe extern "C" fn get_callback<T: Chip>(
gc: *mut bindings::gpio_chip,
offset: c_types::c_uint,
) -> c_types::c_int {
from_kernel_result! {
// SAFETY: The value stored as chip data was returned by `into_pointer` during registration.
let data = unsafe { T::Data::borrow(bindings::gpiochip_get_data(gc)) };
let v = T::get(data, offset)?;
Ok(v as _)
}
}
unsafe extern "C" fn set_callback<T: Chip>(
gc: *mut bindings::gpio_chip,
offset: c_types::c_uint,
value: c_types::c_int,
) {
// SAFETY: The value stored as chip data was returned by `into_pointer` during registration.
let data = unsafe { T::Data::borrow(bindings::gpiochip_get_data(gc)) };
T::set(data, offset, value != 0);
}
#[cfg(CONFIG_GPIOLIB_IRQCHIP)]
mod irqchip {
use super::*;
use crate::irq;
/// A gpio chip that includes an irq chip.
pub trait ChipWithIrqChip: Chip {
/// Implements the irq flow for the gpio chip.
fn handle_irq_flow(
_data: <Self::Data as PointerWrapper>::Borrowed<'_>,
_desc: &irq::Descriptor,
_domain: &irq::Domain,
);
}
/// A registration of a gpio chip that includes an irq chip.
pub struct RegistrationWithIrqChip<T: ChipWithIrqChip> {
reg: Registration<T>,
irq_chip: UnsafeCell<bindings::irq_chip>,
parent_irq: u32,
}
impl<T: ChipWithIrqChip> RegistrationWithIrqChip<T> {
/// Creates a new [`RegistrationWithIrqChip`] but does not register it yet.
///
/// It is allowed to move.
pub fn new() -> Self {
Self {
reg: Registration::new(),
irq_chip: UnsafeCell::new(bindings::irq_chip::default()),
parent_irq: 0,
}
}
/// Registers a gpio chip and its irq chip with the rest of the kernel.
pub fn register<U: irq::Chip<Data = T::Data>>(
mut self: Pin<&mut Self>,
gpio_count: u16,
base: Option<i32>,
parent: &dyn device::RawDevice,
data: T::Data,
parent_irq: u32,
) -> Result {
if self.reg.parent.is_some() {
// Already registered.
return Err(EINVAL);
}
// SAFETY: We never move out of `this`.
let this = unsafe { self.as_mut().get_unchecked_mut() };
// Initialise the irq_chip.
{
let irq_chip = this.irq_chip.get_mut();
irq_chip.name = parent.name().as_char_ptr();
// SAFETY: The gpio subsystem configures a pointer to `gpio_chip` as the irq chip
// data, so we use `IrqChipAdapter` to convert to the `T::Data`, which is the same
// as `irq::Chip::Data` per the bound above.
unsafe { irq::init_chip::<IrqChipAdapter<U>>(irq_chip) };
}
// Initialise gc irq state.
{
let girq = &mut this.reg.gc.get_mut().irq;
girq.chip = this.irq_chip.get();
// SAFETY: By leaving `parent_handler_data` set to `null`, the gpio subsystem
// initialises it to a pointer to the gpio chip, which is what `FlowHandler<T>`
// expects.
girq.parent_handler = unsafe { irq::new_flow_handler::<FlowHandler<T>>() };
girq.num_parents = 1;
girq.parents = &mut this.parent_irq;
this.parent_irq = parent_irq;
girq.default_type = bindings::IRQ_TYPE_NONE;
girq.handler = Some(bindings::handle_bad_irq);
}
// SAFETY: `reg` is pinned when `self` is.
let pinned = unsafe { self.map_unchecked_mut(|r| &mut r.reg) };
pinned.register(gpio_count, base, parent, data)
}
}
impl<T: ChipWithIrqChip> Default for RegistrationWithIrqChip<T> {
fn default() -> Self {
Self::new()
}
}
// SAFETY: `RegistrationWithIrqChip` doesn't offer any methods or access to fields when shared
// between threads or CPUs, so it is safe to share it.
unsafe impl<T: ChipWithIrqChip> Sync for RegistrationWithIrqChip<T> {}
// SAFETY: Registration with and unregistration from the gpio subsystem (including irq chips for
// them) can happen from any thread. Additionally, `T::Data` (which is dropped during
// unregistration) is `Send`, so it is ok to move `Registration` to different threads.
#[allow(clippy::non_send_fields_in_send_ty)]
unsafe impl<T: ChipWithIrqChip> Send for RegistrationWithIrqChip<T> where T::Data: Send {}
struct FlowHandler<T: ChipWithIrqChip>(PhantomData<T>);
impl<T: ChipWithIrqChip> irq::FlowHandler for FlowHandler<T> {
type Data = *mut bindings::gpio_chip;
fn handle_irq_flow(gc: *mut bindings::gpio_chip, desc: &irq::Descriptor) {
// SAFETY: `FlowHandler` is only used in gpio chips, and it is removed when the gpio is
// unregistered, so we know that `gc` must still be valid. We also know that the value
// stored as gpio data was returned by `T::Data::into_pointer` again because
// `FlowHandler` is a private structure only used in this way.
let data = unsafe { T::Data::borrow(bindings::gpiochip_get_data(gc)) };
// SAFETY: `gc` is valid (see comment above), so we can dereference it.
let domain = unsafe { irq::Domain::from_ptr((*gc).irq.domain) };
T::handle_irq_flow(data, desc, &domain);
}
}
/// Adapter from an irq chip with `gpio_chip` pointer as context to one where the gpio chip
/// data is passed as context.
struct IrqChipAdapter<T: irq::Chip>(PhantomData<T>);
impl<T: irq::Chip> irq::Chip for IrqChipAdapter<T> {
type Data = *mut bindings::gpio_chip;
const TO_USE: irq::ToUse = T::TO_USE;
fn ack(gc: *mut bindings::gpio_chip, irq_data: &irq::IrqData) {
// SAFETY: `IrqChipAdapter` is a private struct, only used when the data stored in the
// gpio chip is known to come from `T::Data`, and only valid while the gpio chip is
// registered, so `gc` is valid.
let data = unsafe { T::Data::borrow(bindings::gpiochip_get_data(gc as _)) };
T::ack(data, irq_data);
}
fn mask(gc: *mut bindings::gpio_chip, irq_data: &irq::IrqData) {
// SAFETY: `IrqChipAdapter` is a private struct, only used when the data stored in the
// gpio chip is known to come from `T::Data`, and only valid while the gpio chip is
// registered, so `gc` is valid.
let data = unsafe { T::Data::borrow(bindings::gpiochip_get_data(gc as _)) };
T::mask(data, irq_data);
}
fn unmask(gc: *mut bindings::gpio_chip, irq_data: &irq::IrqData) {
// SAFETY: `IrqChipAdapter` is a private struct, only used when the data stored in the
// gpio chip is known to come from `T::Data`, and only valid while the gpio chip is
// registered, so `gc` is valid.
let data = unsafe { T::Data::borrow(bindings::gpiochip_get_data(gc as _)) };
T::unmask(data, irq_data);
}
fn set_type(
gc: *mut bindings::gpio_chip,
irq_data: &mut irq::LockedIrqData,
flow_type: u32,
) -> Result<irq::ExtraResult> {
// SAFETY: `IrqChipAdapter` is a private struct, only used when the data stored in the
// gpio chip is known to come from `T::Data`, and only valid while the gpio chip is
// registered, so `gc` is valid.
let data = unsafe { T::Data::borrow(bindings::gpiochip_get_data(gc as _)) };
T::set_type(data, irq_data, flow_type)
}
fn set_wake(gc: *mut bindings::gpio_chip, irq_data: &irq::IrqData, on: bool) -> Result {
// SAFETY: `IrqChipAdapter` is a private struct, only used when the data stored in the
// gpio chip is known to come from `T::Data`, and only valid while the gpio chip is
// registered, so `gc` is valid.
let data = unsafe { T::Data::borrow(bindings::gpiochip_get_data(gc as _)) };
T::set_wake(data, irq_data, on)
}
}
}

242
rust/kernel/hwrng.rs Normal file
View File

@ -0,0 +1,242 @@
// SPDX-License-Identifier: GPL-2.0
//! Hardware Random Number Generator.
//!
//! C header: [`include/linux/hw_random.h`](../../../../include/linux/hw_random.h)
use alloc::{boxed::Box, slice::from_raw_parts_mut};
use crate::{
bindings, c_types, error::code::*, error::from_kernel_result, str::CString, to_result,
types::PointerWrapper, Result, ScopeGuard,
};
use core::{cell::UnsafeCell, fmt, marker::PhantomData, pin::Pin};
/// This trait is implemented in order to provide callbacks to `struct hwrng`.
pub trait Operations {
/// The methods to use to populate [`struct hwrng`].
const TO_USE: ToUse;
/// The pointer type that will be used to hold user-defined data type.
type Data: PointerWrapper + Send + Sync = ();
/// Initialization callback, can be left undefined.
fn init(_data: <Self::Data as PointerWrapper>::Borrowed<'_>) -> Result {
Err(EINVAL)
}
/// Cleanup callback, can be left undefined.
fn cleanup(_data: Self::Data) {}
/// Read data into the provided buffer.
/// Drivers can fill up to max bytes of data into the buffer.
/// The buffer is aligned for any type and its size is a multiple of 4 and >= 32 bytes.
fn read(
data: <Self::Data as PointerWrapper>::Borrowed<'_>,
buffer: &mut [u8],
wait: bool,
) -> Result<u32>;
}
/// Registration structure for Hardware Random Number Generator driver.
pub struct Registration<T: Operations> {
hwrng: UnsafeCell<bindings::hwrng>,
name: Option<CString>,
registered: bool,
_p: PhantomData<T>,
}
impl<T: Operations> Registration<T> {
/// Creates new instance of registration.
///
/// The data must be registered.
pub fn new() -> Self {
Self {
hwrng: UnsafeCell::new(bindings::hwrng::default()),
name: None,
registered: false,
_p: PhantomData,
}
}
/// Returns a registered and pinned, heap-allocated representation of the registration.
pub fn new_pinned(
name: fmt::Arguments<'_>,
quality: u16,
data: T::Data,
) -> Result<Pin<Box<Self>>> {
let mut reg = Pin::from(Box::try_new(Self::new())?);
reg.as_mut().register(name, quality, data)?;
Ok(reg)
}
/// Registers a hwrng device within the rest of the kernel.
///
/// It must be pinned because the memory block that represents
/// the registration may be self-referential.
pub fn register(
self: Pin<&mut Self>,
name: fmt::Arguments<'_>,
quality: u16,
data: T::Data,
) -> Result {
// SAFETY: We never move out of `this`.
let this = unsafe { self.get_unchecked_mut() };
if this.registered {
return Err(EINVAL);
}
let data_pointer = data.into_pointer();
// SAFETY: `data_pointer` comes from the call to `data.into_pointer()` above.
let guard = ScopeGuard::new(|| unsafe {
T::Data::from_pointer(data_pointer);
});
let name = CString::try_from_fmt(name)?;
// SAFETY: Registration is pinned and contains allocated and set to zero `bindings::hwrng` structure.
Self::init_hwrng(
unsafe { &mut *this.hwrng.get() },
&name,
quality,
data_pointer,
);
// SAFETY: `bindings::hwrng` is initialized above which guarantees safety.
to_result(|| unsafe { bindings::hwrng_register(this.hwrng.get()) })?;
this.registered = true;
this.name = Some(name);
guard.dismiss();
Ok(())
}
fn init_hwrng(
hwrng: &mut bindings::hwrng,
name: &CString,
quality: u16,
data: *const c_types::c_void,
) {
hwrng.name = name.as_char_ptr();
hwrng.init = if T::TO_USE.init {
Some(Self::init_callback)
} else {
None
};
hwrng.cleanup = if T::TO_USE.cleanup {
Some(Self::cleanup_callback)
} else {
None
};
hwrng.data_present = None;
hwrng.data_read = None;
hwrng.read = Some(Self::read_callback);
hwrng.priv_ = data as _;
hwrng.quality = quality;
// SAFETY: All fields are properly initialized as
// remaining fields `list`, `ref` and `cleanup_done` are already
// zeroed by `bindings::hwrng::default()` call.
}
unsafe extern "C" fn init_callback(rng: *mut bindings::hwrng) -> c_types::c_int {
from_kernel_result! {
// SAFETY: `priv` private data field was initialized during creation of
// the `bindings::hwrng` in `Self::init_hwrng` method. This callback is only
// called once the driver is registered.
let data = unsafe { T::Data::borrow((*rng).priv_ as *const _) };
T::init(data)?;
Ok(0)
}
}
unsafe extern "C" fn cleanup_callback(rng: *mut bindings::hwrng) {
// SAFETY: `priv` private data field was initialized during creation of
// the `bindings::hwrng` in `Self::init_hwrng` method. This callback is only
// called once the driver is registered.
let data = unsafe { T::Data::from_pointer((*rng).priv_ as *const _) };
T::cleanup(data);
}
unsafe extern "C" fn read_callback(
rng: *mut bindings::hwrng,
data: *mut c_types::c_void,
max: usize,
wait: bindings::bool_,
) -> c_types::c_int {
from_kernel_result! {
// SAFETY: `priv` private data field was initialized during creation of
// the `bindings::hwrng` in `Self::init_hwrng` method. This callback is only
// called once the driver is registered.
let drv_data = unsafe { T::Data::borrow((*rng).priv_ as *const _) };
// SAFETY: Slice is created from `data` and `max` arguments that are C's buffer
// along with its size in bytes that are safe for this conversion.
let buffer = unsafe { from_raw_parts_mut(data as *mut u8, max) };
let ret = T::read(drv_data, buffer, wait)?;
Ok(ret as _)
}
}
}
impl<T: Operations> Default for Registration<T> {
fn default() -> Self {
Self::new()
}
}
/// Represents which callbacks of [`struct hwrng`] should be populated with pointers.
pub struct ToUse {
/// The `init` field of [`struct hwrng`].
pub init: bool,
/// The `cleanup` field of [`struct hwrng`].
pub cleanup: bool,
}
/// A constant version where all values are to set to `false`, that is, all supported fields will
/// be set to null pointers.
pub const USE_NONE: ToUse = ToUse {
init: false,
cleanup: false,
};
/// Defines the [`Operations::TO_USE`] field based on a list of fields to be populated.
#[macro_export]
macro_rules! declare_hwrng_operations {
() => {
const TO_USE: $crate::hwrng::ToUse = $crate::hwrng::USE_NONE;
};
($($i:ident),+) => {
#[allow(clippy::needless_update)]
const TO_USE: kernel::hwrng::ToUse =
$crate::hwrng::ToUse {
$($i: true),+ ,
..$crate::hwrng::USE_NONE
};
};
}
// SAFETY: `Registration` does not expose any of its state across threads.
unsafe impl<T: Operations> Sync for Registration<T> {}
// SAFETY: `Registration` is not restricted to a single thread,
// its `T::Data` is also `Send` so it may be moved to different threads.
#[allow(clippy::non_send_fields_in_send_ty)]
unsafe impl<T: Operations> Send for Registration<T> {}
impl<T: Operations> Drop for Registration<T> {
/// Removes the registration from the kernel if it has completed successfully before.
fn drop(&mut self) {
// SAFETY: The instance of Registration<T> is unregistered only
// after being initialized and registered before.
if self.registered {
unsafe { bindings::hwrng_unregister(self.hwrng.get()) };
}
}
}

153
rust/kernel/io_buffer.rs Normal file
View File

@ -0,0 +1,153 @@
// SPDX-License-Identifier: GPL-2.0
//! Buffers used in IO.
use crate::Result;
use alloc::vec::Vec;
use core::mem::{size_of, MaybeUninit};
/// Represents a buffer to be read from during IO.
pub trait IoBufferReader {
/// Returns the number of bytes left to be read from the io buffer.
///
/// Note that even reading less than this number of bytes may fail.
fn len(&self) -> usize;
/// Returns `true` if no data is available in the io buffer.
fn is_empty(&self) -> bool {
self.len() == 0
}
/// Reads raw data from the io buffer into a raw kernel buffer.
///
/// # Safety
///
/// The output buffer must be valid.
unsafe fn read_raw(&mut self, out: *mut u8, len: usize) -> Result;
/// Reads all data remaining in the io buffer.
///
/// Returns `EFAULT` if the address does not currently point to mapped, readable memory.
fn read_all(&mut self) -> Result<Vec<u8>> {
let mut data = Vec::<u8>::new();
data.try_resize(self.len(), 0)?;
// SAFETY: The output buffer is valid as we just allocated it.
unsafe { self.read_raw(data.as_mut_ptr(), data.len())? };
Ok(data)
}
/// Reads a byte slice from the io buffer.
///
/// Returns `EFAULT` if the byte slice is bigger than the remaining size of the user slice or
/// if the address does not currently point to mapped, readable memory.
fn read_slice(&mut self, data: &mut [u8]) -> Result {
// SAFETY: The output buffer is valid as it's coming from a live reference.
unsafe { self.read_raw(data.as_mut_ptr(), data.len()) }
}
/// Reads the contents of a plain old data (POD) type from the io buffer.
fn read<T: ReadableFromBytes>(&mut self) -> Result<T> {
let mut out = MaybeUninit::<T>::uninit();
// SAFETY: The buffer is valid as it was just allocated.
unsafe { self.read_raw(out.as_mut_ptr() as _, size_of::<T>()) }?;
// SAFETY: We just initialised the data.
Ok(unsafe { out.assume_init() })
}
}
/// Represents a buffer to be written to during IO.
pub trait IoBufferWriter {
/// Returns the number of bytes left to be written into the io buffer.
///
/// Note that even writing less than this number of bytes may fail.
fn len(&self) -> usize;
/// Returns `true` if the io buffer cannot hold any additional data.
fn is_empty(&self) -> bool {
self.len() == 0
}
/// Writes zeroes to the io buffer.
///
/// Differently from the other write functions, `clear` will zero as much as it can and update
/// the writer internal state to reflect this. It will, however, return an error if it cannot
/// clear `len` bytes.
///
/// For example, if a caller requests that 100 bytes be cleared but a segfault happens after
/// 20 bytes, then EFAULT is returned and the writer is advanced by 20 bytes.
fn clear(&mut self, len: usize) -> Result;
/// Writes a byte slice into the io buffer.
///
/// Returns `EFAULT` if the byte slice is bigger than the remaining size of the io buffer or if
/// the address does not currently point to mapped, writable memory.
fn write_slice(&mut self, data: &[u8]) -> Result {
// SAFETY: The input buffer is valid as it's coming from a live reference.
unsafe { self.write_raw(data.as_ptr(), data.len()) }
}
/// Writes raw data to the io buffer from a raw kernel buffer.
///
/// # Safety
///
/// The input buffer must be valid.
unsafe fn write_raw(&mut self, data: *const u8, len: usize) -> Result;
/// Writes the contents of the given data into the io buffer.
fn write<T: WritableToBytes>(&mut self, data: &T) -> Result {
// SAFETY: The input buffer is valid as it's coming from a live
// reference to a type that implements `WritableToBytes`.
unsafe { self.write_raw(data as *const T as _, size_of::<T>()) }
}
}
/// Specifies that a type is safely readable from byte slices.
///
/// Not all types can be safely read from byte slices; examples from
/// <https://doc.rust-lang.org/reference/behavior-considered-undefined.html> include `bool`
/// that must be either `0` or `1`, and `char` that cannot be a surrogate or above `char::MAX`.
///
/// # Safety
///
/// Implementers must ensure that the type is made up only of types that can be safely read from
/// arbitrary byte sequences (e.g., `u32`, `u64`, etc.).
pub unsafe trait ReadableFromBytes {}
// SAFETY: All bit patterns are acceptable values of the types below.
unsafe impl ReadableFromBytes for u8 {}
unsafe impl ReadableFromBytes for u16 {}
unsafe impl ReadableFromBytes for u32 {}
unsafe impl ReadableFromBytes for u64 {}
unsafe impl ReadableFromBytes for usize {}
unsafe impl ReadableFromBytes for i8 {}
unsafe impl ReadableFromBytes for i16 {}
unsafe impl ReadableFromBytes for i32 {}
unsafe impl ReadableFromBytes for i64 {}
unsafe impl ReadableFromBytes for isize {}
/// Specifies that a type is safely writable to byte slices.
///
/// This means that we don't read undefined values (which leads to UB) in preparation for writing
/// to the byte slice. It also ensures that no potentially sensitive information is leaked into the
/// byte slices.
///
/// # Safety
///
/// A type must not include padding bytes and must be fully initialised to safely implement
/// [`WritableToBytes`] (i.e., it doesn't contain [`MaybeUninit`] fields). A composition of
/// writable types in a structure is not necessarily writable because it may result in padding
/// bytes.
pub unsafe trait WritableToBytes {}
// SAFETY: Initialised instances of the following types have no uninitialised portions.
unsafe impl WritableToBytes for u8 {}
unsafe impl WritableToBytes for u16 {}
unsafe impl WritableToBytes for u32 {}
unsafe impl WritableToBytes for u64 {}
unsafe impl WritableToBytes for usize {}
unsafe impl WritableToBytes for i8 {}
unsafe impl WritableToBytes for i16 {}
unsafe impl WritableToBytes for i32 {}
unsafe impl WritableToBytes for i64 {}
unsafe impl WritableToBytes for isize {}

275
rust/kernel/io_mem.rs Normal file
View File

@ -0,0 +1,275 @@
// SPDX-License-Identifier: GPL-2.0
//! Memory-mapped IO.
//!
//! C header: [`include/asm-generic/io.h`](../../../../include/asm-generic/io.h)
#![allow(dead_code)]
use crate::{bindings, error::code::*, Result};
use core::convert::TryInto;
/// Represents a memory resource.
pub struct Resource {
offset: bindings::resource_size_t,
size: bindings::resource_size_t,
}
impl Resource {
pub(crate) fn new(
start: bindings::resource_size_t,
end: bindings::resource_size_t,
) -> Option<Self> {
if start == 0 {
return None;
}
Some(Self {
offset: start,
size: end.checked_sub(start)?.checked_add(1)?,
})
}
}
/// Represents a memory block of at least `SIZE` bytes.
///
/// # Invariants
///
/// `ptr` is a non-null and valid address of at least `SIZE` bytes and returned by an `ioremap`
/// variant. `ptr` is also 8-byte aligned.
///
/// # Examples
///
/// ```
/// # use kernel::prelude::*;
/// use kernel::io_mem::{IoMem, Resource};
///
/// fn test(res: Resource) -> Result {
/// // Create an io mem block of at least 100 bytes.
/// // SAFETY: No DMA operations are initiated through `mem`.
/// let mem = unsafe { IoMem::<100>::try_new(res) }?;
///
/// // Read one byte from offset 10.
/// let v = mem.readb(10);
///
/// // Write value to offset 20.
/// mem.writeb(v, 20);
///
/// Ok(())
/// }
///
/// ```
pub struct IoMem<const SIZE: usize> {
ptr: usize,
}
macro_rules! define_read {
($(#[$attr:meta])* $name:ident, $try_name:ident, $type_name:ty) => {
/// Reads IO data from the given offset known, at compile time.
///
/// If the offset is not known at compile time, the build will fail.
$(#[$attr])*
pub fn $name(&self, offset: usize) -> $type_name {
Self::check_offset::<$type_name>(offset);
let ptr = self.ptr.wrapping_add(offset);
// SAFETY: The type invariants guarantee that `ptr` is a valid pointer. The check above
// guarantees that the code won't build if `offset` makes the read go out of bounds
// (including the type size).
unsafe { bindings::$name(ptr as _) }
}
/// Reads IO data from the given offset.
///
/// It fails if/when the offset (plus the type size) is out of bounds.
$(#[$attr])*
pub fn $try_name(&self, offset: usize) -> Result<$type_name> {
if !Self::offset_ok::<$type_name>(offset) {
return Err(EINVAL);
}
let ptr = self.ptr.wrapping_add(offset);
// SAFETY: The type invariants guarantee that `ptr` is a valid pointer. The check above
// returns an error if `offset` would make the read go out of bounds (including the
// type size).
Ok(unsafe { bindings::$name(ptr as _) })
}
};
}
macro_rules! define_write {
($(#[$attr:meta])* $name:ident, $try_name:ident, $type_name:ty) => {
/// Writes IO data to the given offset, known at compile time.
///
/// If the offset is not known at compile time, the build will fail.
$(#[$attr])*
pub fn $name(&self, value: $type_name, offset: usize) {
Self::check_offset::<$type_name>(offset);
let ptr = self.ptr.wrapping_add(offset);
// SAFETY: The type invariants guarantee that `ptr` is a valid pointer. The check above
// guarantees that the code won't link if `offset` makes the write go out of bounds
// (including the type size).
unsafe { bindings::$name(value, ptr as _) }
}
/// Writes IO data to the given offset.
///
/// It fails if/when the offset (plus the type size) is out of bounds.
$(#[$attr])*
pub fn $try_name(&self, value: $type_name, offset: usize) -> Result {
if !Self::offset_ok::<$type_name>(offset) {
return Err(EINVAL);
}
let ptr = self.ptr.wrapping_add(offset);
// SAFETY: The type invariants guarantee that `ptr` is a valid pointer. The check above
// returns an error if `offset` would make the write go out of bounds (including the
// type size).
unsafe { bindings::$name(value, ptr as _) };
Ok(())
}
};
}
impl<const SIZE: usize> IoMem<SIZE> {
/// Tries to create a new instance of a memory block.
///
/// The resource described by `res` is mapped into the CPU's address space so that it can be
/// accessed directly. It is also consumed by this function so that it can't be mapped again
/// to a different address.
///
/// # Safety
///
/// Callers must ensure that either (a) the resulting interface cannot be used to initiate DMA
/// operations, or (b) that DMA operations initiated via the returned interface use DMA handles
/// allocated through the `dma` module.
pub unsafe fn try_new(res: Resource) -> Result<Self> {
// Check that the resource has at least `SIZE` bytes in it.
if res.size < SIZE.try_into()? {
return Err(EINVAL);
}
// To be able to check pointers at compile time based only on offsets, we need to guarantee
// that the base pointer is minimally aligned. So we conservatively expect at least 8 bytes.
if res.offset % 8 != 0 {
crate::pr_err!("Physical address is not 64-bit aligned: {:x}", res.offset);
return Err(EDOM);
}
// Try to map the resource.
// SAFETY: Just mapping the memory range.
let addr = unsafe { bindings::ioremap(res.offset, res.size as _) };
if addr.is_null() {
Err(ENOMEM)
} else {
// INVARIANT: `addr` is non-null and was returned by `ioremap`, so it is valid. It is
// also 8-byte aligned because we checked it above.
Ok(Self { ptr: addr as usize })
}
}
const fn offset_ok<T>(offset: usize) -> bool {
let type_size = core::mem::size_of::<T>();
if let Some(end) = offset.checked_add(type_size) {
end <= SIZE && offset % type_size == 0
} else {
false
}
}
fn offset_ok_of_val<T: ?Sized>(offset: usize, value: &T) -> bool {
let value_size = core::mem::size_of_val(value);
let value_alignment = core::mem::align_of_val(value);
if let Some(end) = offset.checked_add(value_size) {
end <= SIZE && offset % value_alignment == 0
} else {
false
}
}
const fn check_offset<T>(offset: usize) {
crate::build_assert!(Self::offset_ok::<T>(offset), "IoMem offset overflow");
}
/// Copy memory block from an i/o memory by filling the specified buffer with it.
///
/// # Examples
/// ```
/// use kernel::io_mem::{self, IoMem, Resource};
///
/// fn test(res: Resource) -> Result {
/// // Create an i/o memory block of at least 100 bytes.
/// let mem = unsafe { IoMem::<100>::try_new(res) }?;
///
/// let mut buffer: [u8; 32] = [0; 32];
///
/// // Memcpy 16 bytes from an offset 10 of i/o memory block into the buffer.
/// mem.try_memcpy_fromio(&mut buffer[..16], 10)?;
///
/// Ok(())
/// }
/// ```
pub fn try_memcpy_fromio(&self, buffer: &mut [u8], offset: usize) -> Result {
if !Self::offset_ok_of_val(offset, buffer) {
return Err(EINVAL);
}
let ptr = self.ptr.wrapping_add(offset);
// SAFETY:
// - The type invariants guarantee that `ptr` is a valid pointer.
// - The bounds of `buffer` are checked with a call to `offset_ok_of_val()`.
unsafe {
bindings::memcpy_fromio(
buffer.as_mut_ptr() as *mut _,
ptr as *const _,
buffer.len() as _,
)
};
Ok(())
}
define_read!(readb, try_readb, u8);
define_read!(readw, try_readw, u16);
define_read!(readl, try_readl, u32);
define_read!(
#[cfg(CONFIG_64BIT)]
readq,
try_readq,
u64
);
define_read!(readb_relaxed, try_readb_relaxed, u8);
define_read!(readw_relaxed, try_readw_relaxed, u16);
define_read!(readl_relaxed, try_readl_relaxed, u32);
define_read!(
#[cfg(CONFIG_64BIT)]
readq_relaxed,
try_readq_relaxed,
u64
);
define_write!(writeb, try_writeb, u8);
define_write!(writew, try_writew, u16);
define_write!(writel, try_writel, u32);
define_write!(
#[cfg(CONFIG_64BIT)]
writeq,
try_writeq,
u64
);
define_write!(writeb_relaxed, try_writeb_relaxed, u8);
define_write!(writew_relaxed, try_writew_relaxed, u16);
define_write!(writel_relaxed, try_writel_relaxed, u32);
define_write!(
#[cfg(CONFIG_64BIT)]
writeq_relaxed,
try_writeq_relaxed,
u64
);
}
impl<const SIZE: usize> Drop for IoMem<SIZE> {
fn drop(&mut self) {
// SAFETY: By the type invariant, `self.ptr` is a value returned by a previous successful
// call to `ioremap`.
unsafe { bindings::iounmap(self.ptr as _) };
}
}

81
rust/kernel/iov_iter.rs Normal file
View File

@ -0,0 +1,81 @@
// SPDX-License-Identifier: GPL-2.0
//! IO vector iterators.
//!
//! C header: [`include/linux/uio.h`](../../../../include/linux/uio.h)
use crate::{
bindings,
error::code::*,
io_buffer::{IoBufferReader, IoBufferWriter},
Result,
};
/// Wraps the kernel's `struct iov_iter`.
///
/// # Invariants
///
/// The pointer `IovIter::ptr` is non-null and valid.
pub struct IovIter {
ptr: *mut bindings::iov_iter,
}
impl IovIter {
fn common_len(&self) -> usize {
// SAFETY: `IovIter::ptr` is guaranteed to be valid by the type invariants.
unsafe { (*self.ptr).count }
}
/// Constructs a new [`struct iov_iter`] wrapper.
///
/// # Safety
///
/// The pointer `ptr` must be non-null and valid for the lifetime of the object.
pub(crate) unsafe fn from_ptr(ptr: *mut bindings::iov_iter) -> Self {
// INVARIANTS: the safety contract ensures the type invariant will hold.
Self { ptr }
}
}
impl IoBufferWriter for IovIter {
fn len(&self) -> usize {
self.common_len()
}
fn clear(&mut self, mut len: usize) -> Result {
while len > 0 {
// SAFETY: `IovIter::ptr` is guaranteed to be valid by the type invariants.
let written = unsafe { bindings::iov_iter_zero(len, self.ptr) };
if written == 0 {
return Err(EFAULT);
}
len -= written;
}
Ok(())
}
unsafe fn write_raw(&mut self, data: *const u8, len: usize) -> Result {
let res = unsafe { bindings::copy_to_iter(data as _, len, self.ptr) };
if res != len {
Err(EFAULT)
} else {
Ok(())
}
}
}
impl IoBufferReader for IovIter {
fn len(&self) -> usize {
self.common_len()
}
unsafe fn read_raw(&mut self, out: *mut u8, len: usize) -> Result {
let res = unsafe { bindings::copy_from_iter(out as _, len, self.ptr) };
if res != len {
Err(EFAULT)
} else {
Ok(())
}
}
}

411
rust/kernel/irq.rs Normal file
View File

@ -0,0 +1,411 @@
// SPDX-License-Identifier: GPL-2.0
//! Interrupts and interrupt chips.
//!
//! See <https://www.kernel.org/doc/Documentation/core-api/genericirq.rst>.
//!
//! C headers: [`include/linux/irq.h`](../../../../include/linux/irq.h) and
//! [`include/linux/interrupt.h`](../../../../include/linux/interrupt.h).
#![allow(dead_code)]
use crate::{bindings, c_types, error::from_kernel_result, types::PointerWrapper, Error, Result};
use core::ops::Deref;
/// The type of irq hardware numbers.
pub type HwNumber = bindings::irq_hw_number_t;
/// Wraps the kernel's `struct irq_data`.
///
/// # Invariants
///
/// The pointer `IrqData::ptr` is non-null and valid.
pub struct IrqData {
ptr: *mut bindings::irq_data,
}
impl IrqData {
/// Creates a new `IrqData` instance from a raw pointer.
///
/// # Safety
///
/// Callers must ensure that `ptr` is non-null and valid when the function is called, and that
/// it remains valid for the lifetime of the return [`IrqData`] instance.
unsafe fn from_ptr(ptr: *mut bindings::irq_data) -> Self {
// INVARIANTS: By the safety requirements, the instance we're creating satisfies the type
// invariants.
Self { ptr }
}
/// Returns the hardware irq number.
pub fn hwirq(&self) -> HwNumber {
// SAFETY: By the type invariants, it's ok to dereference `ptr`.
unsafe { (*self.ptr).hwirq }
}
}
/// Wraps the kernel's `struct irq_data` when it is locked.
///
/// Being locked allows additional operations to be performed on the data.
pub struct LockedIrqData(IrqData);
impl LockedIrqData {
/// Sets the high-level irq flow handler to the builtin one for level-triggered irqs.
pub fn set_level_handler(&mut self) {
// SAFETY: By the type invariants of `self.0`, we know `self.0.ptr` is valid.
unsafe { bindings::irq_set_handler_locked(self.0.ptr, Some(bindings::handle_level_irq)) };
}
/// Sets the high-level irq flow handler to the builtin one for edge-triggered irqs.
pub fn set_edge_handler(&mut self) {
// SAFETY: By the type invariants of `self.0`, we know `self.0.ptr` is valid.
unsafe { bindings::irq_set_handler_locked(self.0.ptr, Some(bindings::handle_edge_irq)) };
}
/// Sets the high-level irq flow handler to the builtin one for bad irqs.
pub fn set_bad_handler(&mut self) {
// SAFETY: By the type invariants of `self.0`, we know `self.0.ptr` is valid.
unsafe { bindings::irq_set_handler_locked(self.0.ptr, Some(bindings::handle_bad_irq)) };
}
}
impl Deref for LockedIrqData {
type Target = IrqData;
fn deref(&self) -> &Self::Target {
&self.0
}
}
/// Extra information returned by some of the [`Chip`] methods on success.
pub enum ExtraResult {
/// Indicates that the caller (irq core) will update the descriptor state.
None = bindings::IRQ_SET_MASK_OK as _,
/// Indicates that the callee (irq chip implementation) already updated the descriptor state.
NoCopy = bindings::IRQ_SET_MASK_OK_NOCOPY as _,
/// Same as [`ExtraResult::None`] in terms of updating descriptor state. It is used in stacked
/// irq chips to indicate that descendant chips should be skipped.
Done = bindings::IRQ_SET_MASK_OK_DONE as _,
}
/// An irq chip.
///
/// It is a trait for the functions defined in [`struct irq_chip`].
///
/// [`struct irq_chip`]: ../../../include/linux/irq.h
pub trait Chip: Sized {
/// The type of the context data stored in the irq chip and made available on each callback.
type Data: PointerWrapper;
/// The methods to use to populate [`struct irq_chip`]. This is typically populated with
/// [`declare_irq_chip_operations`].
const TO_USE: ToUse;
/// Called at the start of a new interrupt.
fn ack(data: <Self::Data as PointerWrapper>::Borrowed<'_>, irq_data: &IrqData);
/// Masks an interrupt source.
fn mask(data: <Self::Data as PointerWrapper>::Borrowed<'_>, irq_data: &IrqData);
/// Unmasks an interrupt source.
fn unmask(_data: <Self::Data as PointerWrapper>::Borrowed<'_>, irq_data: &IrqData);
/// Sets the flow type of an interrupt.
///
/// The flow type is a combination of the constants in [`Type`].
fn set_type(
_data: <Self::Data as PointerWrapper>::Borrowed<'_>,
_irq_data: &mut LockedIrqData,
_flow_type: u32,
) -> Result<ExtraResult> {
Ok(ExtraResult::None)
}
/// Enables or disables power-management wake-on of an interrupt.
fn set_wake(
_data: <Self::Data as PointerWrapper>::Borrowed<'_>,
_irq_data: &IrqData,
_on: bool,
) -> Result {
Ok(())
}
}
/// Initialises `chip` with the callbacks defined in `T`.
///
/// # Safety
///
/// The caller must ensure that the value stored in the irq chip data is the result of calling
/// [`PointerWrapper::into_pointer] for the [`T::Data`] type.
pub(crate) unsafe fn init_chip<T: Chip>(chip: &mut bindings::irq_chip) {
chip.irq_ack = Some(irq_ack_callback::<T>);
chip.irq_mask = Some(irq_mask_callback::<T>);
chip.irq_unmask = Some(irq_unmask_callback::<T>);
if T::TO_USE.set_type {
chip.irq_set_type = Some(irq_set_type_callback::<T>);
}
if T::TO_USE.set_wake {
chip.irq_set_wake = Some(irq_set_wake_callback::<T>);
}
}
/// Represents which fields of [`struct irq_chip`] should be populated with pointers.
///
/// This is typically populated with the [`declare_irq_chip_operations`] macro.
pub struct ToUse {
/// The `irq_set_type` field of [`struct irq_chip`].
pub set_type: bool,
/// The `irq_set_wake` field of [`struct irq_chip`].
pub set_wake: bool,
}
/// A constant version where all values are to set to `false`, that is, all supported fields will
/// be set to null pointers.
pub const USE_NONE: ToUse = ToUse {
set_type: false,
set_wake: false,
};
/// Defines the [`Chip::TO_USE`] field based on a list of fields to be populated.
#[macro_export]
macro_rules! declare_irq_chip_operations {
() => {
const TO_USE: $crate::irq::ToUse = $crate::irq::USE_NONE;
};
($($i:ident),+) => {
#[allow(clippy::needless_update)]
const TO_USE: $crate::irq::ToUse =
$crate::irq::ToUse {
$($i: true),+ ,
..$crate::irq::USE_NONE
};
};
}
/// Enables or disables power-management wake-on for the given irq number.
pub fn set_wake(irq: u32, on: bool) -> Result {
// SAFETY: Just an FFI call, there are no extra requirements for safety.
let ret = unsafe { bindings::irq_set_irq_wake(irq, on as _) };
if ret < 0 {
Err(Error::from_kernel_errno(ret))
} else {
Ok(())
}
}
unsafe extern "C" fn irq_ack_callback<T: Chip>(irq_data: *mut bindings::irq_data) {
// SAFETY: The safety requirements of `init_chip`, which is the only place that uses this
// callback, ensure that the value stored as irq chip data comes from a previous call to
// `PointerWrapper::into_pointer`.
let data = unsafe { T::Data::borrow(bindings::irq_data_get_irq_chip_data(irq_data)) };
// SAFETY: The value returned by `IrqData` is only valid until the end of this function, and
// `irq_data` is guaranteed to be valid until then (by the contract with C code).
T::ack(data, unsafe { &IrqData::from_ptr(irq_data) })
}
unsafe extern "C" fn irq_mask_callback<T: Chip>(irq_data: *mut bindings::irq_data) {
// SAFETY: The safety requirements of `init_chip`, which is the only place that uses this
// callback, ensure that the value stored as irq chip data comes from a previous call to
// `PointerWrapper::into_pointer`.
let data = unsafe { T::Data::borrow(bindings::irq_data_get_irq_chip_data(irq_data)) };
// SAFETY: The value returned by `IrqData` is only valid until the end of this function, and
// `irq_data` is guaranteed to be valid until then (by the contract with C code).
T::mask(data, unsafe { &IrqData::from_ptr(irq_data) })
}
unsafe extern "C" fn irq_unmask_callback<T: Chip>(irq_data: *mut bindings::irq_data) {
// SAFETY: The safety requirements of `init_chip`, which is the only place that uses this
// callback, ensure that the value stored as irq chip data comes from a previous call to
// `PointerWrapper::into_pointer`.
let data = unsafe { T::Data::borrow(bindings::irq_data_get_irq_chip_data(irq_data)) };
// SAFETY: The value returned by `IrqData` is only valid until the end of this function, and
// `irq_data` is guaranteed to be valid until then (by the contract with C code).
T::unmask(data, unsafe { &IrqData::from_ptr(irq_data) })
}
unsafe extern "C" fn irq_set_type_callback<T: Chip>(
irq_data: *mut bindings::irq_data,
flow_type: c_types::c_uint,
) -> c_types::c_int {
from_kernel_result! {
// SAFETY: The safety requirements of `init_chip`, which is the only place that uses this
// callback, ensure that the value stored as irq chip data comes from a previous call to
// `PointerWrapper::into_pointer`.
let data = unsafe { T::Data::borrow(bindings::irq_data_get_irq_chip_data(irq_data)) };
// SAFETY: The value returned by `IrqData` is only valid until the end of this function, and
// `irq_data` is guaranteed to be valid until then (by the contract with C code).
let ret = T::set_type(data, &mut LockedIrqData(unsafe { IrqData::from_ptr(irq_data) }), flow_type)?;
Ok(ret as _)
}
}
unsafe extern "C" fn irq_set_wake_callback<T: Chip>(
irq_data: *mut bindings::irq_data,
on: c_types::c_uint,
) -> c_types::c_int {
from_kernel_result! {
// SAFETY: The safety requirements of `init_chip`, which is the only place that uses this
// callback, ensure that the value stored as irq chip data comes from a previous call to
// `PointerWrapper::into_pointer`.
let data = unsafe { T::Data::borrow(bindings::irq_data_get_irq_chip_data(irq_data)) };
// SAFETY: The value returned by `IrqData` is only valid until the end of this function, and
// `irq_data` is guaranteed to be valid until then (by the contract with C code).
T::set_wake(data, unsafe { &IrqData::from_ptr(irq_data) }, on != 0)?;
Ok(0)
}
}
/// Contains constants that describes how an interrupt can be triggered.
///
/// It is tagged with `non_exhaustive` to prevent users from instantiating it.
#[non_exhaustive]
pub struct Type;
impl Type {
/// The interrupt cannot be triggered.
pub const NONE: u32 = bindings::IRQ_TYPE_NONE;
/// The interrupt is triggered when the signal goes from low to high.
pub const EDGE_RISING: u32 = bindings::IRQ_TYPE_EDGE_RISING;
/// The interrupt is triggered when the signal goes from high to low.
pub const EDGE_FALLING: u32 = bindings::IRQ_TYPE_EDGE_FALLING;
/// The interrupt is triggered when the signal goes from low to high and when it goes to high
/// to low.
pub const EDGE_BOTH: u32 = bindings::IRQ_TYPE_EDGE_BOTH;
/// The interrupt is triggered while the signal is held high.
pub const LEVEL_HIGH: u32 = bindings::IRQ_TYPE_LEVEL_HIGH;
/// The interrupt is triggered while the signal is held low.
pub const LEVEL_LOW: u32 = bindings::IRQ_TYPE_LEVEL_LOW;
}
/// Wraps the kernel's `struct irq_desc`.
///
/// # Invariants
///
/// The pointer `Descriptor::ptr` is non-null and valid.
pub struct Descriptor {
pub(crate) ptr: *mut bindings::irq_desc,
}
impl Descriptor {
/// Constructs a new `struct irq_desc` wrapper.
///
/// # Safety
///
/// The pointer `ptr` must be non-null and valid for the lifetime of the returned object.
unsafe fn from_ptr(ptr: *mut bindings::irq_desc) -> Self {
// INVARIANT: The safety requirements ensure the invariant.
Self { ptr }
}
/// Calls `chained_irq_enter` and returns a guard that calls `chained_irq_exit` once dropped.
///
/// It is meant to be used by chained irq handlers to dispatch irqs to the next handlers.
pub fn enter_chained(&self) -> ChainedGuard<'_> {
// SAFETY: By the type invariants, `ptr` is always non-null and valid.
let irq_chip = unsafe { bindings::irq_desc_get_chip(self.ptr) };
// SAFETY: By the type invariants, `ptr` is always non-null and valid. `irq_chip` was just
// returned from `ptr`, so it is still valid too.
unsafe { bindings::chained_irq_enter(irq_chip, self.ptr) };
ChainedGuard {
desc: self,
irq_chip,
}
}
}
/// A guard to call `chained_irq_exit` after `chained_irq_enter` was called.
///
/// It is also used as evidence that a previous `chained_irq_enter` was called. So there are no
/// public constructors and it is only created after indeed calling `chained_irq_enter`.
pub struct ChainedGuard<'a> {
desc: &'a Descriptor,
irq_chip: *mut bindings::irq_chip,
}
impl Drop for ChainedGuard<'_> {
fn drop(&mut self) {
// SAFETY: The lifetime of `ChainedGuard` guarantees that `self.desc` remains valid, so it
// also guarantess `irq_chip` (which was returned from it) and `self.desc.ptr` (guaranteed
// by the type invariants).
unsafe { bindings::chained_irq_exit(self.irq_chip, self.desc.ptr) };
}
}
/// Wraps the kernel's `struct irq_domain`.
///
/// # Invariants
///
/// The pointer `Domain::ptr` is non-null and valid.
#[cfg(CONFIG_IRQ_DOMAIN)]
pub struct Domain {
ptr: *mut bindings::irq_domain,
}
#[cfg(CONFIG_IRQ_DOMAIN)]
impl Domain {
/// Constructs a new `struct irq_domain` wrapper.
///
/// # Safety
///
/// The pointer `ptr` must be non-null and valid for the lifetime of the returned object.
pub(crate) unsafe fn from_ptr(ptr: *mut bindings::irq_domain) -> Self {
// INVARIANT: The safety requirements ensure the invariant.
Self { ptr }
}
/// Invokes the chained handler of the given hw irq of the given domain.
///
/// It requires evidence that `chained_irq_enter` was called, which is done by passing a
/// `ChainedGuard` instance.
pub fn generic_handle_chained(&self, hwirq: u32, _guard: &ChainedGuard<'_>) {
// SAFETY: `ptr` is valid by the type invariants.
unsafe { bindings::generic_handle_domain_irq(self.ptr, hwirq) };
}
}
/// A high-level irq flow handler.
pub trait FlowHandler {
/// The data associated with the handler.
type Data: PointerWrapper;
/// Implements the irq flow for the given descriptor.
fn handle_irq_flow(data: <Self::Data as PointerWrapper>::Borrowed<'_>, desc: &Descriptor);
}
/// Returns the raw irq flow handler corresponding to the (high-level) one defined in `T`.
///
/// # Safety
///
/// The caller must ensure that the value stored in the irq handler data (as returned by
/// `irq_desc_get_handler_data`) is the result of calling [`PointerWrapper::into_pointer] for the
/// [`T::Data`] type.
pub(crate) unsafe fn new_flow_handler<T: FlowHandler>() -> bindings::irq_flow_handler_t {
Some(irq_flow_handler::<T>)
}
unsafe extern "C" fn irq_flow_handler<T: FlowHandler>(desc: *mut bindings::irq_desc) {
// SAFETY: By the safety requirements of `new_flow_handler`, we know that the value returned by
// `irq_desc_get_handler_data` comes from calling `T::Data::into_pointer`. `desc` is valid by
// the C API contract.
let data = unsafe { T::Data::borrow(bindings::irq_desc_get_handler_data(desc)) };
// SAFETY: The C API guarantees that `desc` is valid for the duration of this call, which
// outlives the lifetime returned by `from_desc`.
T::handle_irq_flow(data, &unsafe { Descriptor::from_ptr(desc) });
}

6
rust/kernel/kasync.rs Normal file
View File

@ -0,0 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
//! Kernel async functionality.
#[cfg(CONFIG_NET)]
pub mod net;

322
rust/kernel/kasync/net.rs Normal file
View File

@ -0,0 +1,322 @@
// SPDX-License-Identifier: GPL-2.0
//! Async networking.
use crate::{bindings, c_types, error::code::*, net, sync::NoWaitLock, types::Opaque, Result};
use core::{
future::Future,
marker::{PhantomData, PhantomPinned},
ops::Deref,
pin::Pin,
task::{Context, Poll, Waker},
};
/// A socket listening on a TCP port.
///
/// The [`TcpListener::accept`] method is meant to be used in async contexts.
pub struct TcpListener {
listener: net::TcpListener,
}
impl TcpListener {
/// Creates a new TCP listener.
///
/// It is configured to listen on the given socket address for the given namespace.
pub fn try_new(ns: &net::Namespace, addr: &net::SocketAddr) -> Result<Self> {
Ok(Self {
listener: net::TcpListener::try_new(ns, addr)?,
})
}
/// Accepts a new connection.
///
/// Returns a future that when ready indicates the result of the accept operation; on success,
/// it contains the newly-accepted tcp stream.
pub fn accept(&self) -> impl Future<Output = Result<TcpStream>> + '_ {
SocketFuture::from_listener(
self,
bindings::BINDINGS_EPOLLIN | bindings::BINDINGS_EPOLLERR,
|| {
Ok(TcpStream {
stream: self.listener.accept(false)?,
})
},
)
}
}
impl Deref for TcpListener {
type Target = net::TcpListener;
fn deref(&self) -> &Self::Target {
&self.listener
}
}
/// A connected TCP socket.
///
/// The potentially blocking methods (e.g., [`TcpStream::read`], [`TcpStream::write`]) are meant
/// to be used in async contexts.
///
/// # Examples
///
/// ```
/// # use kernel::prelude::*;
/// # use kernel::kasync::net::TcpStream;
/// async fn echo_server(stream: TcpStream) -> Result {
/// let mut buf = [0u8; 1024];
/// loop {
/// let n = stream.read(&mut buf).await?;
/// if n == 0 {
/// return Ok(());
/// }
/// stream.write_all(&buf[..n]).await?;
/// }
/// }
/// ```
pub struct TcpStream {
stream: net::TcpStream,
}
impl TcpStream {
/// Reads data from a connected socket.
///
/// Returns a future that when ready indicates the result of the read operation; on success, it
/// contains the number of bytes read, which will be zero if the connection is closed.
pub fn read<'a>(&'a self, buf: &'a mut [u8]) -> impl Future<Output = Result<usize>> + 'a {
SocketFuture::from_stream(
self,
bindings::BINDINGS_EPOLLIN | bindings::BINDINGS_EPOLLHUP | bindings::BINDINGS_EPOLLERR,
|| self.stream.read(buf, false),
)
}
/// Writes data to the connected socket.
///
/// Returns a future that when ready indicates the result of the write operation; on success, it
/// contains the number of bytes written.
pub fn write<'a>(&'a self, buf: &'a [u8]) -> impl Future<Output = Result<usize>> + 'a {
SocketFuture::from_stream(
self,
bindings::BINDINGS_EPOLLOUT | bindings::BINDINGS_EPOLLHUP | bindings::BINDINGS_EPOLLERR,
|| self.stream.write(buf, false),
)
}
/// Writes all the data to the connected socket.
///
/// Returns a future that when ready indicates the result of the write operation; on success, it
/// has written all the data.
pub async fn write_all<'a>(&'a self, buf: &'a [u8]) -> Result {
let mut rem = buf;
while !rem.is_empty() {
let n = self.write(rem).await?;
rem = &rem[n..];
}
Ok(())
}
}
impl Deref for TcpStream {
type Target = net::TcpStream;
fn deref(&self) -> &Self::Target {
&self.stream
}
}
/// A future for a socket operation.
///
/// # Invariants
///
/// `sock` is always non-null and valid for the duration of the lifetime of the instance.
struct SocketFuture<'a, Out, F: FnMut() -> Result<Out> + Send + 'a> {
sock: *mut bindings::socket,
mask: u32,
is_queued: bool,
wq_entry: Opaque<bindings::wait_queue_entry>,
waker: NoWaitLock<Option<Waker>>,
_p: PhantomData<&'a ()>,
_pin: PhantomPinned,
operation: F,
}
// SAFETY: A kernel socket can be used from any thread, `wq_entry` is only used on drop and when
// `is_queued` is initially `false`.
unsafe impl<Out, F: FnMut() -> Result<Out> + Send> Send for SocketFuture<'_, Out, F> {}
impl<'a, Out, F: FnMut() -> Result<Out> + Send + 'a> SocketFuture<'a, Out, F> {
/// Creates a new socket future.
///
/// # Safety
///
/// Callers must ensure that `sock` is non-null, valid, and remains valid for the lifetime
/// (`'a`) of the returned instance.
unsafe fn new(sock: *mut bindings::socket, mask: u32, operation: F) -> Self {
Self {
sock,
mask,
is_queued: false,
wq_entry: Opaque::uninit(),
waker: NoWaitLock::new(None),
operation,
_p: PhantomData,
_pin: PhantomPinned,
}
}
/// Creates a new socket future for a tcp listener.
fn from_listener(listener: &'a TcpListener, mask: u32, operation: F) -> Self {
// SAFETY: The socket is guaranteed to remain valid because it is bound to the reference to
// the listener (whose existence guarantees the socket remains valid).
unsafe { Self::new(listener.listener.sock, mask, operation) }
}
/// Creates a new socket future for a tcp stream.
fn from_stream(stream: &'a TcpStream, mask: u32, operation: F) -> Self {
// SAFETY: The socket is guaranteed to remain valid because it is bound to the reference to
// the stream (whose existence guarantees the socket remains valid).
unsafe { Self::new(stream.stream.sock, mask, operation) }
}
/// Callback called when the socket changes state.
///
/// If the state matches the one we're waiting on, we wake up the task so that the future can be
/// polled again.
unsafe extern "C" fn wake_callback(
wq_entry: *mut bindings::wait_queue_entry,
_mode: c_types::c_uint,
_flags: c_types::c_int,
key: *mut c_types::c_void,
) -> c_types::c_int {
let mask = key as u32;
// SAFETY: The future is valid while this callback is called because we remove from the
// queue on drop.
//
// There is a potential soundness issue here because we're generating a shared reference to
// `Self` while `Self::poll` has a mutable (unique) reference. However, for `!Unpin` types
// (like `Self`), `&mut T` is treated as `*mut T` per
// https://github.com/rust-lang/rust/issues/63818 -- so we avoid the unsoundness. Once a
// more definitive solution is available, we can change this to use it.
let s = unsafe { &*crate::container_of!(wq_entry, Self, wq_entry) };
if mask & s.mask == 0 {
// Nothing to do as this notification doesn't interest us.
return 0;
}
// If we can't acquire the waker lock, the waker is in the process of being modified. Our
// attempt to acquire the lock will be reported to the lock owner, so it will trigger the
// wake up.
if let Some(guard) = s.waker.try_lock() {
if let Some(ref w) = *guard {
let cloned = w.clone();
drop(guard);
cloned.wake();
return 1;
}
}
0
}
/// Poll the future once.
///
/// It calls the operation and converts `EAGAIN` errors into a pending state.
fn poll_once(self: Pin<&mut Self>) -> Poll<Result<Out>> {
// SAFETY: We never move out of `this`.
let this = unsafe { self.get_unchecked_mut() };
match (this.operation)() {
Ok(s) => Poll::Ready(Ok(s)),
Err(e) => {
if e == EAGAIN {
Poll::Pending
} else {
Poll::Ready(Err(e))
}
}
}
}
/// Updates the waker stored in the future.
///
/// It automatically triggers a wake up on races with the reactor.
fn set_waker(&self, waker: &Waker) {
if let Some(mut guard) = self.waker.try_lock() {
let old = core::mem::replace(&mut *guard, Some(waker.clone()));
let contention = guard.unlock();
drop(old);
if !contention {
return;
}
}
// We either couldn't store the waker because the existing one is being awakened, or the
// reactor tried to acquire the lock while we held it (contention). In either case, we just
// wake it up to ensure we don't miss any notification.
waker.wake_by_ref();
}
}
impl<Out, F: FnMut() -> Result<Out> + Send> Future for SocketFuture<'_, Out, F> {
type Output = Result<Out>;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
match self.as_mut().poll_once() {
Poll::Ready(r) => Poll::Ready(r),
Poll::Pending => {
// Store away the latest waker every time we may `Pending`.
self.set_waker(cx.waker());
if self.is_queued {
// Nothing else to do was the waiter is already queued.
return Poll::Pending;
}
// SAFETY: We never move out of `this`.
let this = unsafe { self.as_mut().get_unchecked_mut() };
this.is_queued = true;
// SAFETY: `wq_entry` is valid for write.
unsafe {
bindings::init_waitqueue_func_entry(
this.wq_entry.get(),
Some(Self::wake_callback),
)
};
// SAFETY: `wq_entry` was just initialised above and is valid for read/write.
// By the type invariants, the socket is always valid.
unsafe {
bindings::add_wait_queue(
core::ptr::addr_of_mut!((*this.sock).wq.wait),
this.wq_entry.get(),
)
};
// If the future wasn't queued yet, we need to poll again in case it reached
// the desired state between the last poll and being queued (in which case we
// would have missed the notification).
self.poll_once()
}
}
}
}
impl<Out, F: FnMut() -> Result<Out> + Send> Drop for SocketFuture<'_, Out, F> {
fn drop(&mut self) {
if !self.is_queued {
return;
}
// SAFETY: `wq_entry` is initialised because `is_queued` is set to `true`, so it is valid
// for read/write. By the type invariants, the socket is always valid.
unsafe {
bindings::remove_wait_queue(
core::ptr::addr_of_mut!((*self.sock).wq.wait),
self.wq_entry.get(),
)
};
}
}

91
rust/kernel/kunit.rs Normal file
View File

@ -0,0 +1,91 @@
// SPDX-License-Identifier: GPL-2.0
//! KUnit-based macros for Rust unit tests.
//!
//! C header: [`include/kunit/test.h`](../../../../../include/kunit/test.h)
//!
//! Reference: <https://www.kernel.org/doc/html/latest/dev-tools/kunit/index.html>
/// Asserts that a boolean expression is `true` at runtime.
///
/// Public but hidden since it should only be used from generated tests.
///
/// Unlike the one in `core`, this one does not panic; instead, it is mapped to the KUnit
/// facilities. See [`assert!`] for more details.
#[doc(hidden)]
#[macro_export]
macro_rules! kunit_assert {
($test:expr, $cond:expr $(,)?) => {{
if !$cond {
#[repr(transparent)]
struct Location($crate::bindings::kunit_loc);
#[repr(transparent)]
struct UnaryAssert($crate::bindings::kunit_unary_assert);
// SAFETY: There is only a static instance and in that one the pointer field
// points to an immutable C string.
unsafe impl Sync for Location {}
// SAFETY: There is only a static instance and in that one the pointer field
// points to an immutable C string.
unsafe impl Sync for UnaryAssert {}
static FILE: &'static $crate::str::CStr = $crate::c_str!(core::file!());
static LOCATION: Location = Location($crate::bindings::kunit_loc {
file: FILE.as_char_ptr(),
line: core::line!() as i32,
});
static CONDITION: &'static $crate::str::CStr = $crate::c_str!(stringify!($cond));
static ASSERTION: UnaryAssert = UnaryAssert($crate::bindings::kunit_unary_assert {
assert: $crate::bindings::kunit_assert {
format: Some($crate::bindings::kunit_unary_assert_format),
},
condition: CONDITION.as_char_ptr(),
expected_true: true,
});
// SAFETY:
// - FFI call.
// - The `test` pointer is valid because this hidden macro should only be called by
// the generated documentation tests which forward the test pointer given by KUnit.
// - The string pointers (`file` and `condition`) point to null-terminated ones.
// - The function pointer (`format`) points to the proper function.
// - The pointers passed will remain valid since they point to statics.
// - The format string is allowed to be null.
// - There are, however, problems with this: first of all, this will end up stopping
// the thread, without running destructors. While that is problematic in itself,
// it is considered UB to have what is effectively an forced foreign unwind
// with `extern "C"` ABI. One could observe the stack that is now gone from
// another thread. We should avoid pinning stack variables to prevent library UB,
// too. For the moment, given test failures are reported immediately before the
// next test runs, that test failures should be fixed and that KUnit is explicitly
// documented as not suitable for production environments, we feel it is reasonable.
unsafe {
$crate::bindings::kunit_do_failed_assertion(
$test,
core::ptr::addr_of!(LOCATION.0),
$crate::bindings::kunit_assert_type_KUNIT_ASSERTION,
core::ptr::addr_of!(ASSERTION.0.assert),
core::ptr::null(),
);
}
}
}};
}
/// Asserts that two expressions are equal to each other (using [`PartialEq`]).
///
/// Public but hidden since it should only be used from generated tests.
///
/// Unlike the one in `core`, this one does not panic; instead, it is mapped to the KUnit
/// facilities. See [`assert!`] for more details.
#[doc(hidden)]
#[macro_export]
macro_rules! kunit_assert_eq {
($test:expr, $left:expr, $right:expr $(,)?) => {{
// For the moment, we just forward to the expression assert because,
// for binary asserts, KUnit supports only a few types (e.g. integers).
$crate::kunit_assert!($test, $left == $right);
}};
}

261
rust/kernel/lib.rs Normal file
View File

@ -0,0 +1,261 @@
// SPDX-License-Identifier: GPL-2.0
//! The `kernel` crate.
//!
//! This crate contains the kernel APIs that have been ported or wrapped for
//! usage by Rust code in the kernel and is shared by all of them.
//!
//! In other words, all the rest of the Rust code in the kernel (e.g. kernel
//! modules written in Rust) depends on [`core`], [`alloc`] and this crate.
//!
//! If you need a kernel C API that is not ported or wrapped yet here, then
//! do so first instead of bypassing this crate.
#![no_std]
#![feature(allocator_api)]
#![feature(associated_type_defaults)]
#![feature(concat_idents)]
#![feature(const_fn_trait_bound)]
#![feature(const_mut_refs)]
#![feature(const_ptr_offset_from)]
#![feature(const_refs_to_cell)]
#![feature(const_trait_impl)]
#![feature(doc_cfg)]
#![feature(generic_associated_types)]
#![feature(ptr_metadata)]
#![feature(receiver_trait)]
#![feature(coerce_unsized)]
#![feature(dispatch_from_dyn)]
#![feature(unsize)]
// Ensure conditional compilation based on the kernel configuration works;
// otherwise we may silently break things like initcall handling.
#[cfg(not(CONFIG_RUST))]
compile_error!("Missing kernel configuration for conditional compilation");
#[cfg(not(test))]
#[cfg(not(testlib))]
mod allocator;
#[doc(hidden)]
pub mod bindings;
#[cfg(CONFIG_ARM_AMBA)]
pub mod amba;
pub mod c_types;
pub mod chrdev;
#[cfg(CONFIG_COMMON_CLK)]
pub mod clk;
pub mod cred;
pub mod device;
pub mod driver;
pub mod error;
pub mod file;
pub mod gpio;
pub mod hwrng;
pub mod irq;
pub mod kasync;
pub mod miscdev;
pub mod mm;
#[cfg(CONFIG_NET)]
pub mod net;
pub mod pages;
pub mod power;
pub mod revocable;
pub mod security;
pub mod str;
pub mod task;
pub mod linked_list;
mod raw_list;
pub mod rbtree;
#[doc(hidden)]
pub mod module_param;
mod build_assert;
pub mod prelude;
pub mod print;
pub mod random;
mod static_assert;
#[doc(hidden)]
pub mod std_vendor;
pub mod sync;
#[cfg(any(CONFIG_SYSCTL, doc))]
#[doc(cfg(CONFIG_SYSCTL))]
pub mod sysctl;
pub mod io_buffer;
#[cfg(CONFIG_HAS_IOMEM)]
pub mod io_mem;
pub mod iov_iter;
pub mod of;
pub mod platform;
mod types;
pub mod user_ptr;
#[cfg(CONFIG_KUNIT)]
pub mod kunit;
#[doc(hidden)]
pub use build_error::build_error;
pub use crate::error::{to_result, Error, Result};
pub use crate::types::{
bit, bits_iter, ARef, AlwaysRefCounted, Bool, False, Mode, Opaque, ScopeGuard, True,
};
use core::marker::PhantomData;
/// Page size defined in terms of the `PAGE_SHIFT` macro from C.
///
/// [`PAGE_SHIFT`]: ../../../include/asm-generic/page.h
pub const PAGE_SIZE: usize = 1 << bindings::PAGE_SHIFT;
/// Prefix to appear before log messages printed from within the kernel crate.
const __LOG_PREFIX: &[u8] = b"rust_kernel\0";
/// The top level entrypoint to implementing a kernel module.
///
/// For any teardown or cleanup operations, your type may implement [`Drop`].
pub trait Module: Sized + Sync {
/// Called at module initialization time.
///
/// Use this method to perform whatever setup or registration your module
/// should do.
///
/// Equivalent to the `module_init` macro in the C API.
fn init(name: &'static str::CStr, module: &'static ThisModule) -> Result<Self>;
}
/// Equivalent to `THIS_MODULE` in the C API.
///
/// C header: `include/linux/export.h`
pub struct ThisModule(*mut bindings::module);
// SAFETY: `THIS_MODULE` may be used from all threads within a module.
unsafe impl Sync for ThisModule {}
impl ThisModule {
/// Creates a [`ThisModule`] given the `THIS_MODULE` pointer.
///
/// # Safety
///
/// The pointer must be equal to the right `THIS_MODULE`.
pub const unsafe fn from_ptr(ptr: *mut bindings::module) -> ThisModule {
ThisModule(ptr)
}
/// Locks the module parameters to access them.
///
/// Returns a [`KParamGuard`] that will release the lock when dropped.
pub fn kernel_param_lock(&self) -> KParamGuard<'_> {
// SAFETY: `kernel_param_lock` will check if the pointer is null and
// use the built-in mutex in that case.
#[cfg(CONFIG_SYSFS)]
unsafe {
bindings::kernel_param_lock(self.0)
}
KParamGuard {
#[cfg(CONFIG_SYSFS)]
this_module: self,
phantom: PhantomData,
}
}
}
/// Scoped lock on the kernel parameters of [`ThisModule`].
///
/// Lock will be released when this struct is dropped.
pub struct KParamGuard<'a> {
#[cfg(CONFIG_SYSFS)]
this_module: &'a ThisModule,
phantom: PhantomData<&'a ()>,
}
#[cfg(CONFIG_SYSFS)]
impl<'a> Drop for KParamGuard<'a> {
fn drop(&mut self) {
// SAFETY: `kernel_param_lock` will check if the pointer is null and
// use the built-in mutex in that case. The existence of `self`
// guarantees that the lock is held.
unsafe { bindings::kernel_param_unlock(self.this_module.0) }
}
}
/// Calculates the offset of a field from the beginning of the struct it belongs to.
///
/// # Example
///
/// ```
/// # use kernel::prelude::*;
/// # use kernel::offset_of;
/// struct Test {
/// a: u64,
/// b: u32,
/// }
///
/// assert_eq!(offset_of!(Test, b), 8);
/// ```
#[macro_export]
macro_rules! offset_of {
($type:ty, $($f:tt)*) => {{
let tmp = core::mem::MaybeUninit::<$type>::uninit();
let outer = tmp.as_ptr();
// To avoid warnings when nesting `unsafe` blocks.
#[allow(unused_unsafe)]
// SAFETY: The pointer is valid and aligned, just not initialised; `addr_of` ensures that
// we don't actually read from `outer` (which would be UB) nor create an intermediate
// reference.
let inner = unsafe { core::ptr::addr_of!((*outer).$($f)*) } as *const u8;
// To avoid warnings when nesting `unsafe` blocks.
#[allow(unused_unsafe)]
// SAFETY: The two pointers are within the same allocation block.
unsafe { inner.offset_from(outer as *const u8) }
}}
}
/// Produces a pointer to an object from a pointer to one of its fields.
///
/// # Safety
///
/// Callers must ensure that the pointer to the field is in fact a pointer to the specified field,
/// as opposed to a pointer to another object of the same type. If this condition is not met,
/// any dereference of the resulting pointer is UB.
///
/// # Example
///
/// ```
/// # use kernel::container_of;
/// struct Test {
/// a: u64,
/// b: u32,
/// }
///
/// let test = Test { a: 10, b: 20 };
/// let b_ptr = &test.b;
/// let test_alias = container_of!(b_ptr, Test, b);
/// assert!(core::ptr::eq(&test, test_alias));
/// ```
#[macro_export]
macro_rules! container_of {
($ptr:expr, $type:ty, $($f:tt)*) => {{
let ptr = $ptr as *const _ as *const u8;
let offset = $crate::offset_of!($type, $($f)*);
ptr.wrapping_offset(-offset) as *const $type
}}
}
#[cfg(not(any(testlib, test)))]
#[panic_handler]
fn panic(info: &core::panic::PanicInfo<'_>) -> ! {
pr_emerg!("{}\n", info);
// SAFETY: FFI call.
unsafe { bindings::BUG() };
// Bindgen currently does not recognize `__noreturn` so `BUG` returns `()`
// instead of `!`.
// https://github.com/rust-lang/rust-bindgen/issues/2094
loop {}
}

247
rust/kernel/linked_list.rs Normal file
View File

@ -0,0 +1,247 @@
// SPDX-License-Identifier: GPL-2.0
//! Linked lists.
//!
//! TODO: This module is a work in progress.
use alloc::boxed::Box;
use core::ptr::NonNull;
pub use crate::raw_list::{Cursor, GetLinks, Links};
use crate::{raw_list, raw_list::RawList, sync::Ref};
// TODO: Use the one from `kernel::file_operations::PointerWrapper` instead.
/// Wraps an object to be inserted in a linked list.
pub trait Wrapper<T: ?Sized> {
/// Converts the wrapped object into a pointer that represents it.
fn into_pointer(self) -> NonNull<T>;
/// Converts the object back from the pointer representation.
///
/// # Safety
///
/// The passed pointer must come from a previous call to [`Wrapper::into_pointer()`].
unsafe fn from_pointer(ptr: NonNull<T>) -> Self;
/// Returns a reference to the wrapped object.
fn as_ref(&self) -> &T;
}
impl<T: ?Sized> Wrapper<T> for Box<T> {
fn into_pointer(self) -> NonNull<T> {
NonNull::new(Box::into_raw(self)).unwrap()
}
unsafe fn from_pointer(ptr: NonNull<T>) -> Self {
unsafe { Box::from_raw(ptr.as_ptr()) }
}
fn as_ref(&self) -> &T {
AsRef::as_ref(self)
}
}
impl<T: ?Sized> Wrapper<T> for Ref<T> {
fn into_pointer(self) -> NonNull<T> {
NonNull::new(Ref::into_raw(self) as _).unwrap()
}
unsafe fn from_pointer(ptr: NonNull<T>) -> Self {
// SAFETY: The safety requirements of `from_pointer` satisfy the ones from `Ref::from_raw`.
unsafe { Ref::from_raw(ptr.as_ptr() as _) }
}
fn as_ref(&self) -> &T {
AsRef::as_ref(self)
}
}
impl<T: ?Sized> Wrapper<T> for &T {
fn into_pointer(self) -> NonNull<T> {
NonNull::from(self)
}
unsafe fn from_pointer(ptr: NonNull<T>) -> Self {
unsafe { &*ptr.as_ptr() }
}
fn as_ref(&self) -> &T {
self
}
}
/// A descriptor of wrapped list elements.
pub trait GetLinksWrapped: GetLinks {
/// Specifies which wrapper (e.g., `Box` and `Arc`) wraps the list entries.
type Wrapped: Wrapper<Self::EntryType>;
}
impl<T: ?Sized> GetLinksWrapped for Box<T>
where
Box<T>: GetLinks,
{
type Wrapped = Box<<Box<T> as GetLinks>::EntryType>;
}
impl<T: GetLinks + ?Sized> GetLinks for Box<T> {
type EntryType = T::EntryType;
fn get_links(data: &Self::EntryType) -> &Links<Self::EntryType> {
<T as GetLinks>::get_links(data)
}
}
impl<T: ?Sized> GetLinksWrapped for Ref<T>
where
Ref<T>: GetLinks,
{
type Wrapped = Ref<<Ref<T> as GetLinks>::EntryType>;
}
impl<T: GetLinks + ?Sized> GetLinks for Ref<T> {
type EntryType = T::EntryType;
fn get_links(data: &Self::EntryType) -> &Links<Self::EntryType> {
<T as GetLinks>::get_links(data)
}
}
/// A linked list.
///
/// Elements in the list are wrapped and ownership is transferred to the list while the element is
/// in the list.
pub struct List<G: GetLinksWrapped> {
list: RawList<G>,
}
impl<G: GetLinksWrapped> List<G> {
/// Constructs a new empty linked list.
pub fn new() -> Self {
Self {
list: RawList::new(),
}
}
/// Returns whether the list is empty.
pub fn is_empty(&self) -> bool {
self.list.is_empty()
}
/// Adds the given object to the end (back) of the list.
///
/// It is dropped if it's already on this (or another) list; this can happen for
/// reference-counted objects, so dropping means decrementing the reference count.
pub fn push_back(&mut self, data: G::Wrapped) {
let ptr = data.into_pointer();
// SAFETY: We took ownership of the entry, so it is safe to insert it.
if !unsafe { self.list.push_back(ptr.as_ref()) } {
// If insertion failed, rebuild object so that it can be freed.
// SAFETY: We just called `into_pointer` above.
unsafe { G::Wrapped::from_pointer(ptr) };
}
}
/// Inserts the given object after `existing`.
///
/// It is dropped if it's already on this (or another) list; this can happen for
/// reference-counted objects, so dropping means decrementing the reference count.
///
/// # Safety
///
/// Callers must ensure that `existing` points to a valid entry that is on the list.
pub unsafe fn insert_after(&mut self, existing: NonNull<G::EntryType>, data: G::Wrapped) {
let ptr = data.into_pointer();
let entry = unsafe { &*existing.as_ptr() };
if unsafe { !self.list.insert_after(entry, ptr.as_ref()) } {
// If insertion failed, rebuild object so that it can be freed.
unsafe { G::Wrapped::from_pointer(ptr) };
}
}
/// Removes the given entry.
///
/// # Safety
///
/// Callers must ensure that `data` is either on this list or in no list. It being on another
/// list leads to memory unsafety.
pub unsafe fn remove(&mut self, data: &G::Wrapped) -> Option<G::Wrapped> {
let entry_ref = Wrapper::as_ref(data);
if unsafe { self.list.remove(entry_ref) } {
Some(unsafe { G::Wrapped::from_pointer(NonNull::from(entry_ref)) })
} else {
None
}
}
/// Removes the element currently at the front of the list and returns it.
///
/// Returns `None` if the list is empty.
pub fn pop_front(&mut self) -> Option<G::Wrapped> {
let front = self.list.pop_front()?;
// SAFETY: Elements on the list were inserted after a call to `into_pointer `.
Some(unsafe { G::Wrapped::from_pointer(front) })
}
/// Returns a cursor starting on the first (front) element of the list.
pub fn cursor_front(&self) -> Cursor<'_, G> {
self.list.cursor_front()
}
/// Returns a mutable cursor starting on the first (front) element of the list.
pub fn cursor_front_mut(&mut self) -> CursorMut<'_, G> {
CursorMut::new(self.list.cursor_front_mut())
}
}
impl<G: GetLinksWrapped> Default for List<G> {
fn default() -> Self {
Self::new()
}
}
impl<G: GetLinksWrapped> Drop for List<G> {
fn drop(&mut self) {
while self.pop_front().is_some() {}
}
}
/// A list cursor that allows traversing a linked list and inspecting & mutating elements.
pub struct CursorMut<'a, G: GetLinksWrapped> {
cursor: raw_list::CursorMut<'a, G>,
}
impl<'a, G: GetLinksWrapped> CursorMut<'a, G> {
fn new(cursor: raw_list::CursorMut<'a, G>) -> Self {
Self { cursor }
}
/// Returns the element the cursor is currently positioned on.
pub fn current(&mut self) -> Option<&mut G::EntryType> {
self.cursor.current()
}
/// Removes the element the cursor is currently positioned on.
///
/// After removal, it advances the cursor to the next element.
pub fn remove_current(&mut self) -> Option<G::Wrapped> {
let ptr = self.cursor.remove_current()?;
// SAFETY: Elements on the list were inserted after a call to `into_pointer `.
Some(unsafe { G::Wrapped::from_pointer(ptr) })
}
/// Returns the element immediately after the one the cursor is positioned on.
pub fn peek_next(&mut self) -> Option<&mut G::EntryType> {
self.cursor.peek_next()
}
/// Returns the element immediately before the one the cursor is positioned on.
pub fn peek_prev(&mut self) -> Option<&mut G::EntryType> {
self.cursor.peek_prev()
}
/// Moves the cursor to the next element.
pub fn move_next(&mut self) {
self.cursor.move_next();
}
}

291
rust/kernel/miscdev.rs Normal file
View File

@ -0,0 +1,291 @@
// SPDX-License-Identifier: GPL-2.0
//! Miscellaneous devices.
//!
//! C header: [`include/linux/miscdevice.h`](../../../../include/linux/miscdevice.h)
//!
//! Reference: <https://www.kernel.org/doc/html/latest/driver-api/misc_devices.html>
use crate::bindings;
use crate::error::{code::*, Error, Result};
use crate::file;
use crate::{device, str::CStr, str::CString, ThisModule};
use alloc::boxed::Box;
use core::marker::PhantomPinned;
use core::{fmt, mem::MaybeUninit, pin::Pin};
/// Options which can be used to configure how a misc device is registered.
///
/// # Examples
///
/// ```
/// # use kernel::{c_str, device::RawDevice, file, miscdev, prelude::*};
/// fn example(
/// reg: Pin<&mut miscdev::Registration<impl file::Operations<OpenData = ()>>>,
/// parent: &dyn RawDevice,
/// ) -> Result {
/// miscdev::Options::new()
/// .mode(0o600)
/// .minor(10)
/// .parent(parent)
/// .register(reg, fmt!("sample"), ())
/// }
/// ```
#[derive(Default)]
pub struct Options<'a> {
minor: Option<i32>,
mode: Option<u16>,
parent: Option<&'a dyn device::RawDevice>,
}
impl<'a> Options<'a> {
/// Creates new [`Options`] instance with the required fields.
pub const fn new() -> Self {
Self {
minor: None,
mode: None,
parent: None,
}
}
/// Sets the minor device number.
pub const fn minor(&mut self, v: i32) -> &mut Self {
self.minor = Some(v);
self
}
/// Sets the device mode.
///
/// This is usually an octal number and describes who can perform read/write/execute operations
/// on the device.
pub const fn mode(&mut self, m: u16) -> &mut Self {
self.mode = Some(m);
self
}
/// Sets the device parent.
pub const fn parent(&mut self, p: &'a dyn device::RawDevice) -> &mut Self {
self.parent = Some(p);
self
}
/// Registers a misc device using the configured options.
pub fn register<T: file::Operations>(
&self,
reg: Pin<&mut Registration<T>>,
name: fmt::Arguments<'_>,
open_data: T::OpenData,
) -> Result {
reg.register_with_options(name, open_data, self)
}
/// Allocates a new registration of a misc device and completes the registration with the
/// configured options.
pub fn register_new<T: file::Operations>(
&self,
name: fmt::Arguments<'_>,
open_data: T::OpenData,
) -> Result<Pin<Box<Registration<T>>>> {
let mut r = Pin::from(Box::try_new(Registration::new())?);
self.register(r.as_mut(), name, open_data)?;
Ok(r)
}
}
/// A registration of a miscellaneous device.
///
/// # Invariants
///
/// `Context` is always initialised when `registered` is `true`, and not initialised otherwise.
pub struct Registration<T: file::Operations> {
registered: bool,
mdev: bindings::miscdevice,
name: Option<CString>,
_pin: PhantomPinned,
/// Context initialised on construction and made available to all file instances on
/// [`file::Operations::open`].
open_data: MaybeUninit<T::OpenData>,
}
impl<T: file::Operations> Registration<T> {
/// Creates a new [`Registration`] but does not register it yet.
///
/// It is allowed to move.
pub fn new() -> Self {
// INVARIANT: `registered` is `false` and `open_data` is not initialised.
Self {
registered: false,
mdev: bindings::miscdevice::default(),
name: None,
_pin: PhantomPinned,
open_data: MaybeUninit::uninit(),
}
}
/// Registers a miscellaneous device.
///
/// Returns a pinned heap-allocated representation of the registration.
pub fn new_pinned(name: fmt::Arguments<'_>, open_data: T::OpenData) -> Result<Pin<Box<Self>>> {
Options::new().register_new(name, open_data)
}
/// Registers a miscellaneous device with the rest of the kernel.
///
/// It must be pinned because the memory block that represents the registration is
/// self-referential.
pub fn register(
self: Pin<&mut Self>,
name: fmt::Arguments<'_>,
open_data: T::OpenData,
) -> Result {
Options::new().register(self, name, open_data)
}
/// Registers a miscellaneous device with the rest of the kernel. Additional optional settings
/// are provided via the `opts` parameter.
///
/// It must be pinned because the memory block that represents the registration is
/// self-referential.
pub fn register_with_options(
self: Pin<&mut Self>,
name: fmt::Arguments<'_>,
open_data: T::OpenData,
opts: &Options<'_>,
) -> Result {
// SAFETY: We must ensure that we never move out of `this`.
let this = unsafe { self.get_unchecked_mut() };
if this.registered {
// Already registered.
return Err(EINVAL);
}
let name = CString::try_from_fmt(name)?;
// SAFETY: The adapter is compatible with `misc_register`.
this.mdev.fops = unsafe { file::OperationsVtable::<Self, T>::build() };
this.mdev.name = name.as_char_ptr();
this.mdev.minor = opts.minor.unwrap_or(bindings::MISC_DYNAMIC_MINOR as i32);
this.mdev.mode = opts.mode.unwrap_or(0);
this.mdev.parent = opts
.parent
.map_or(core::ptr::null_mut(), |p| p.raw_device());
// We write to `open_data` here because as soon as `misc_register` succeeds, the file can be
// opened, so we need `open_data` configured ahead of time.
//
// INVARIANT: `registered` is set to `true`, but `open_data` is also initialised.
this.registered = true;
this.open_data.write(open_data);
let ret = unsafe { bindings::misc_register(&mut this.mdev) };
if ret < 0 {
// INVARIANT: `registered` is set back to `false` and the `open_data` is destructued.
this.registered = false;
// SAFETY: `open_data` was initialised a few lines above.
unsafe { this.open_data.assume_init_drop() };
return Err(Error::from_kernel_errno(ret));
}
this.name = Some(name);
Ok(())
}
}
impl<T: file::Operations> Default for Registration<T> {
fn default() -> Self {
Self::new()
}
}
impl<T: file::Operations> file::OpenAdapter<T::OpenData> for Registration<T> {
unsafe fn convert(
_inode: *mut bindings::inode,
file: *mut bindings::file,
) -> *const T::OpenData {
// SAFETY: The caller must guarantee that `file` is valid.
let reg = crate::container_of!(unsafe { (*file).private_data }, Self, mdev);
// SAFETY: This function is only called while the misc device is still registered, so the
// registration must be valid. Additionally, the type invariants guarantee that while the
// miscdev is registered, `open_data` is initialised.
unsafe { (*reg).open_data.as_ptr() }
}
}
// SAFETY: The only method is `register()`, which requires a (pinned) mutable `Registration`, so it
// is safe to pass `&Registration` to multiple threads because it offers no interior mutability.
unsafe impl<T: file::Operations> Sync for Registration<T> {}
// SAFETY: All functions work from any thread. So as long as the `Registration::open_data` is
// `Send`, so is `Registration<T>`.
unsafe impl<T: file::Operations> Send for Registration<T> where T::OpenData: Send {}
impl<T: file::Operations> Drop for Registration<T> {
/// Removes the registration from the kernel if it has completed successfully before.
fn drop(&mut self) {
if self.registered {
// SAFETY: `registered` being `true` indicates that a previous call to `misc_register`
// succeeded.
unsafe { bindings::misc_deregister(&mut self.mdev) };
// SAFETY: The type invariant guarantees that `open_data` is initialised when
// `registered` is `true`.
unsafe { self.open_data.assume_init_drop() };
}
}
}
/// Kernel module that exposes a single miscdev device implemented by `T`.
pub struct Module<T: file::Operations<OpenData = ()>> {
_dev: Pin<Box<Registration<T>>>,
}
impl<T: file::Operations<OpenData = ()>> crate::Module for Module<T> {
fn init(name: &'static CStr, _module: &'static ThisModule) -> Result<Self> {
Ok(Self {
_dev: Registration::new_pinned(crate::fmt!("{name}"), ())?,
})
}
}
/// Declares a kernel module that exposes a single misc device.
///
/// The `type` argument should be a type which implements the [`FileOpener`] trait. Also accepts
/// various forms of kernel metadata.
///
/// C header: [`include/linux/moduleparam.h`](../../../include/linux/moduleparam.h)
///
/// [`FileOpener`]: ../kernel/file_operations/trait.FileOpener.html
///
/// # Examples
///
/// ```ignore
/// use kernel::prelude::*;
///
/// module_misc_device! {
/// type: MyFile,
/// name: b"my_miscdev_kernel_module",
/// author: b"Rust for Linux Contributors",
/// description: b"My very own misc device kernel module!",
/// license: b"GPL",
/// }
///
/// #[derive(Default)]
/// struct MyFile;
///
/// impl kernel::file::Operations for MyFile {
/// kernel::declare_file_operations!();
/// }
/// ```
#[macro_export]
macro_rules! module_misc_device {
(type: $type:ty, $($f:tt)*) => {
type ModuleType = kernel::miscdev::Module<$type>;
module! {
type: ModuleType,
$($f)*
}
}
}

149
rust/kernel/mm.rs Normal file
View File

@ -0,0 +1,149 @@
// SPDX-License-Identifier: GPL-2.0
//! Memory management.
//!
//! C header: [`include/linux/mm.h`](../../../../include/linux/mm.h)
use crate::{bindings, pages, to_result, Result};
/// Virtual memory.
pub mod virt {
use super::*;
/// A wrapper for the kernel's `struct vm_area_struct`.
///
/// It represents an area of virtual memory.
///
/// # Invariants
///
/// `vma` is always non-null and valid.
pub struct Area {
vma: *mut bindings::vm_area_struct,
}
impl Area {
/// Creates a new instance of a virtual memory area.
///
/// # Safety
///
/// Callers must ensure that `vma` is non-null and valid for the duration of the new area's
/// lifetime.
pub(crate) unsafe fn from_ptr(vma: *mut bindings::vm_area_struct) -> Self {
// INVARIANTS: The safety requirements guarantee the invariants.
Self { vma }
}
/// Returns the flags associated with the virtual memory area.
///
/// The possible flags are a combination of the constants in [`flags`].
pub fn flags(&self) -> usize {
// SAFETY: `self.vma` is valid by the type invariants.
unsafe { (*self.vma).vm_flags as _ }
}
/// Sets the flags associated with the virtual memory area.
///
/// The possible flags are a combination of the constants in [`flags`].
pub fn set_flags(&mut self, flags: usize) {
// SAFETY: `self.vma` is valid by the type invariants.
unsafe { (*self.vma).vm_flags = flags as _ };
}
/// Returns the start address of the virtual memory area.
pub fn start(&self) -> usize {
// SAFETY: `self.vma` is valid by the type invariants.
unsafe { (*self.vma).vm_start as _ }
}
/// Returns the end address of the virtual memory area.
pub fn end(&self) -> usize {
// SAFETY: `self.vma` is valid by the type invariants.
unsafe { (*self.vma).vm_end as _ }
}
/// Maps a single page at the given address within the virtual memory area.
pub fn insert_page(&mut self, address: usize, page: &pages::Pages<0>) -> Result {
// SAFETY: The page is guaranteed to be order 0 by the type system. The range of
// `address` is already checked by `vm_insert_page`. `self.vma` and `page.pages` are
// guaranteed by their repective type invariants to be valid.
to_result(|| unsafe { bindings::vm_insert_page(self.vma, address as _, page.pages) })
}
}
/// Container for [`Area`] flags.
pub mod flags {
use crate::bindings;
/// No flags are set.
pub const NONE: usize = bindings::VM_NONE as _;
/// Mapping allows reads.
pub const READ: usize = bindings::VM_READ as _;
/// Mapping allows writes.
pub const WRITE: usize = bindings::VM_WRITE as _;
/// Mapping allows execution.
pub const EXEC: usize = bindings::VM_EXEC as _;
/// Mapping is shared.
pub const SHARED: usize = bindings::VM_SHARED as _;
/// Mapping may be updated to allow reads.
pub const MAYREAD: usize = bindings::VM_MAYREAD as _;
/// Mapping may be updated to allow writes.
pub const MAYWRITE: usize = bindings::VM_MAYWRITE as _;
/// Mapping may be updated to allow execution.
pub const MAYEXEC: usize = bindings::VM_MAYEXEC as _;
/// Mapping may be updated to be shared.
pub const MAYSHARE: usize = bindings::VM_MAYSHARE as _;
/// Do not copy this vma on fork.
pub const DONTCOPY: usize = bindings::VM_DONTCOPY as _;
/// Cannot expand with mremap().
pub const DONTEXPAND: usize = bindings::VM_DONTEXPAND as _;
/// Lock the pages covered when they are faulted in.
pub const LOCKONFAULT: usize = bindings::VM_LOCKONFAULT as _;
/// Is a VM accounted object.
pub const ACCOUNT: usize = bindings::VM_ACCOUNT as _;
/// should the VM suppress accounting.
pub const NORESERVE: usize = bindings::VM_NORESERVE as _;
/// Huge TLB Page VM.
pub const HUGETLB: usize = bindings::VM_HUGETLB as _;
/// Synchronous page faults.
pub const SYNC: usize = bindings::VM_SYNC as _;
/// Architecture-specific flag.
pub const ARCH_1: usize = bindings::VM_ARCH_1 as _;
/// Wipe VMA contents in child..
pub const WIPEONFORK: usize = bindings::VM_WIPEONFORK as _;
/// Do not include in the core dump.
pub const DONTDUMP: usize = bindings::VM_DONTDUMP as _;
/// Not soft dirty clean area.
pub const SOFTDIRTY: usize = bindings::VM_SOFTDIRTY as _;
/// Can contain "struct page" and pure PFN pages.
pub const MIXEDMAP: usize = bindings::VM_MIXEDMAP as _;
/// MADV_HUGEPAGE marked this vma.
pub const HUGEPAGE: usize = bindings::VM_HUGEPAGE as _;
/// MADV_NOHUGEPAGE marked this vma.
pub const NOHUGEPAGE: usize = bindings::VM_NOHUGEPAGE as _;
/// KSM may merge identical pages.
pub const MERGEABLE: usize = bindings::VM_MERGEABLE as _;
}
}

498
rust/kernel/module_param.rs Normal file
View File

@ -0,0 +1,498 @@
// SPDX-License-Identifier: GPL-2.0
//! Types for module parameters.
//!
//! C header: [`include/linux/moduleparam.h`](../../../include/linux/moduleparam.h)
use crate::error::{code::*, from_kernel_result};
use crate::str::{CStr, Formatter};
use core::fmt::Write;
/// Types that can be used for module parameters.
///
/// Note that displaying the type in `sysfs` will fail if
/// [`alloc::string::ToString::to_string`] (as implemented through the
/// [`core::fmt::Display`] trait) writes more than [`PAGE_SIZE`]
/// bytes (including an additional null terminator).
///
/// [`PAGE_SIZE`]: `crate::PAGE_SIZE`
pub trait ModuleParam: core::fmt::Display + core::marker::Sized {
/// The `ModuleParam` will be used by the kernel module through this type.
///
/// This may differ from `Self` if, for example, `Self` needs to track
/// ownership without exposing it or allocate extra space for other possible
/// parameter values. See [`StringParam`] or [`ArrayParam`] for examples.
type Value: ?Sized;
/// Whether the parameter is allowed to be set without an argument.
///
/// Setting this to `true` allows the parameter to be passed without an
/// argument (e.g. just `module.param` instead of `module.param=foo`).
const NOARG_ALLOWED: bool;
/// Convert a parameter argument into the parameter value.
///
/// `None` should be returned when parsing of the argument fails.
/// `arg == None` indicates that the parameter was passed without an
/// argument. If `NOARG_ALLOWED` is set to `false` then `arg` is guaranteed
/// to always be `Some(_)`.
///
/// Parameters passed at boot time will be set before [`kmalloc`] is
/// available (even if the module is loaded at a later time). However, in
/// this case, the argument buffer will be valid for the entire lifetime of
/// the kernel. So implementations of this method which need to allocate
/// should first check that the allocator is available (with
/// [`crate::bindings::slab_is_available`]) and when it is not available
/// provide an alternative implementation which doesn't allocate. In cases
/// where the allocator is not available it is safe to save references to
/// `arg` in `Self`, but in other cases a copy should be made.
///
/// [`kmalloc`]: ../../../include/linux/slab.h
fn try_from_param_arg(arg: Option<&'static [u8]>) -> Option<Self>;
/// Get the current value of the parameter for use in the kernel module.
///
/// This function should not be used directly. Instead use the wrapper
/// `read` which will be generated by [`macros::module`].
fn value(&self) -> &Self::Value;
/// Set the module parameter from a string.
///
/// Used to set the parameter value when loading the module or when set
/// through `sysfs`.
///
/// # Safety
///
/// If `val` is non-null then it must point to a valid null-terminated
/// string. The `arg` field of `param` must be an instance of `Self`.
unsafe extern "C" fn set_param(
val: *const crate::c_types::c_char,
param: *const crate::bindings::kernel_param,
) -> crate::c_types::c_int {
let arg = if val.is_null() {
None
} else {
Some(unsafe { CStr::from_char_ptr(val).as_bytes() })
};
match Self::try_from_param_arg(arg) {
Some(new_value) => {
let old_value = unsafe { (*param).__bindgen_anon_1.arg as *mut Self };
let _ = unsafe { core::ptr::replace(old_value, new_value) };
0
}
None => EINVAL.to_kernel_errno(),
}
}
/// Write a string representation of the current parameter value to `buf`.
///
/// Used for displaying the current parameter value in `sysfs`.
///
/// # Safety
///
/// `buf` must be a buffer of length at least `kernel::PAGE_SIZE` that is
/// writeable. The `arg` field of `param` must be an instance of `Self`.
unsafe extern "C" fn get_param(
buf: *mut crate::c_types::c_char,
param: *const crate::bindings::kernel_param,
) -> crate::c_types::c_int {
from_kernel_result! {
// SAFETY: The C contracts guarantees that the buffer is at least `PAGE_SIZE` bytes.
let mut f = unsafe { Formatter::from_buffer(buf.cast(), crate::PAGE_SIZE) };
unsafe { write!(f, "{}\0", *((*param).__bindgen_anon_1.arg as *mut Self)) }?;
Ok(f.bytes_written().try_into()?)
}
}
/// Drop the parameter.
///
/// Called when unloading a module.
///
/// # Safety
///
/// The `arg` field of `param` must be an instance of `Self`.
unsafe extern "C" fn free(arg: *mut crate::c_types::c_void) {
unsafe { core::ptr::drop_in_place(arg as *mut Self) };
}
}
/// Trait for parsing integers.
///
/// Strings beginning with `0x`, `0o`, or `0b` are parsed as hex, octal, or
/// binary respectively. Strings beginning with `0` otherwise are parsed as
/// octal. Anything else is parsed as decimal. A leading `+` or `-` is also
/// permitted. Any string parsed by [`kstrtol()`] or [`kstrtoul()`] will be
/// successfully parsed.
///
/// [`kstrtol()`]: https://www.kernel.org/doc/html/latest/core-api/kernel-api.html#c.kstrtol
/// [`kstrtoul()`]: https://www.kernel.org/doc/html/latest/core-api/kernel-api.html#c.kstrtoul
trait ParseInt: Sized {
fn from_str_radix(src: &str, radix: u32) -> Result<Self, core::num::ParseIntError>;
fn checked_neg(self) -> Option<Self>;
fn from_str_unsigned(src: &str) -> Result<Self, core::num::ParseIntError> {
let (radix, digits) = if let Some(n) = src.strip_prefix("0x") {
(16, n)
} else if let Some(n) = src.strip_prefix("0X") {
(16, n)
} else if let Some(n) = src.strip_prefix("0o") {
(8, n)
} else if let Some(n) = src.strip_prefix("0O") {
(8, n)
} else if let Some(n) = src.strip_prefix("0b") {
(2, n)
} else if let Some(n) = src.strip_prefix("0B") {
(2, n)
} else if src.starts_with('0') {
(8, src)
} else {
(10, src)
};
Self::from_str_radix(digits, radix)
}
fn from_str(src: &str) -> Option<Self> {
match src.bytes().next() {
None => None,
Some(b'-') => Self::from_str_unsigned(&src[1..]).ok()?.checked_neg(),
Some(b'+') => Some(Self::from_str_unsigned(&src[1..]).ok()?),
Some(_) => Some(Self::from_str_unsigned(src).ok()?),
}
}
}
macro_rules! impl_parse_int {
($ty:ident) => {
impl ParseInt for $ty {
fn from_str_radix(src: &str, radix: u32) -> Result<Self, core::num::ParseIntError> {
$ty::from_str_radix(src, radix)
}
fn checked_neg(self) -> Option<Self> {
self.checked_neg()
}
}
};
}
impl_parse_int!(i8);
impl_parse_int!(u8);
impl_parse_int!(i16);
impl_parse_int!(u16);
impl_parse_int!(i32);
impl_parse_int!(u32);
impl_parse_int!(i64);
impl_parse_int!(u64);
impl_parse_int!(isize);
impl_parse_int!(usize);
macro_rules! impl_module_param {
($ty:ident) => {
impl ModuleParam for $ty {
type Value = $ty;
const NOARG_ALLOWED: bool = false;
fn try_from_param_arg(arg: Option<&'static [u8]>) -> Option<Self> {
let bytes = arg?;
let utf8 = core::str::from_utf8(bytes).ok()?;
<$ty as crate::module_param::ParseInt>::from_str(utf8)
}
fn value(&self) -> &Self::Value {
self
}
}
};
}
#[doc(hidden)]
#[macro_export]
/// Generate a static [`kernel_param_ops`](../../../include/linux/moduleparam.h) struct.
///
/// # Example
/// ```ignore
/// make_param_ops!(
/// /// Documentation for new param ops.
/// PARAM_OPS_MYTYPE, // Name for the static.
/// MyType // A type which implements [`ModuleParam`].
/// );
/// ```
macro_rules! make_param_ops {
($ops:ident, $ty:ty) => {
$crate::make_param_ops!(
#[doc=""]
$ops,
$ty
);
};
($(#[$meta:meta])* $ops:ident, $ty:ty) => {
$(#[$meta])*
///
/// Static [`kernel_param_ops`](../../../include/linux/moduleparam.h)
/// struct generated by [`make_param_ops`].
pub static $ops: $crate::bindings::kernel_param_ops = $crate::bindings::kernel_param_ops {
flags: if <$ty as $crate::module_param::ModuleParam>::NOARG_ALLOWED {
$crate::bindings::KERNEL_PARAM_OPS_FL_NOARG
} else {
0
},
set: Some(<$ty as $crate::module_param::ModuleParam>::set_param),
get: Some(<$ty as $crate::module_param::ModuleParam>::get_param),
free: Some(<$ty as $crate::module_param::ModuleParam>::free),
};
};
}
impl_module_param!(i8);
impl_module_param!(u8);
impl_module_param!(i16);
impl_module_param!(u16);
impl_module_param!(i32);
impl_module_param!(u32);
impl_module_param!(i64);
impl_module_param!(u64);
impl_module_param!(isize);
impl_module_param!(usize);
make_param_ops!(
/// Rust implementation of [`kernel_param_ops`](../../../include/linux/moduleparam.h)
/// for [`i8`].
PARAM_OPS_I8,
i8
);
make_param_ops!(
/// Rust implementation of [`kernel_param_ops`](../../../include/linux/moduleparam.h)
/// for [`u8`].
PARAM_OPS_U8,
u8
);
make_param_ops!(
/// Rust implementation of [`kernel_param_ops`](../../../include/linux/moduleparam.h)
/// for [`i16`].
PARAM_OPS_I16,
i16
);
make_param_ops!(
/// Rust implementation of [`kernel_param_ops`](../../../include/linux/moduleparam.h)
/// for [`u16`].
PARAM_OPS_U16,
u16
);
make_param_ops!(
/// Rust implementation of [`kernel_param_ops`](../../../include/linux/moduleparam.h)
/// for [`i32`].
PARAM_OPS_I32,
i32
);
make_param_ops!(
/// Rust implementation of [`kernel_param_ops`](../../../include/linux/moduleparam.h)
/// for [`u32`].
PARAM_OPS_U32,
u32
);
make_param_ops!(
/// Rust implementation of [`kernel_param_ops`](../../../include/linux/moduleparam.h)
/// for [`i64`].
PARAM_OPS_I64,
i64
);
make_param_ops!(
/// Rust implementation of [`kernel_param_ops`](../../../include/linux/moduleparam.h)
/// for [`u64`].
PARAM_OPS_U64,
u64
);
make_param_ops!(
/// Rust implementation of [`kernel_param_ops`](../../../include/linux/moduleparam.h)
/// for [`isize`].
PARAM_OPS_ISIZE,
isize
);
make_param_ops!(
/// Rust implementation of [`kernel_param_ops`](../../../include/linux/moduleparam.h)
/// for [`usize`].
PARAM_OPS_USIZE,
usize
);
impl ModuleParam for bool {
type Value = bool;
const NOARG_ALLOWED: bool = true;
fn try_from_param_arg(arg: Option<&'static [u8]>) -> Option<Self> {
match arg {
None => Some(true),
Some(b"y") | Some(b"Y") | Some(b"1") | Some(b"true") => Some(true),
Some(b"n") | Some(b"N") | Some(b"0") | Some(b"false") => Some(false),
_ => None,
}
}
fn value(&self) -> &Self::Value {
self
}
}
make_param_ops!(
/// Rust implementation of [`kernel_param_ops`](../../../include/linux/moduleparam.h)
/// for [`bool`].
PARAM_OPS_BOOL,
bool
);
/// An array of at __most__ `N` values.
///
/// # Invariant
///
/// The first `self.used` elements of `self.values` are initialized.
pub struct ArrayParam<T, const N: usize> {
values: [core::mem::MaybeUninit<T>; N],
used: usize,
}
impl<T, const N: usize> ArrayParam<T, { N }> {
fn values(&self) -> &[T] {
// SAFETY: The invariant maintained by `ArrayParam` allows us to cast
// the first `self.used` elements to `T`.
unsafe {
&*(&self.values[0..self.used] as *const [core::mem::MaybeUninit<T>] as *const [T])
}
}
}
impl<T: Copy, const N: usize> ArrayParam<T, { N }> {
const fn new() -> Self {
// INVARIANT: The first `self.used` elements of `self.values` are
// initialized.
ArrayParam {
values: [core::mem::MaybeUninit::uninit(); N],
used: 0,
}
}
const fn push(&mut self, val: T) {
if self.used < N {
// INVARIANT: The first `self.used` elements of `self.values` are
// initialized.
self.values[self.used] = core::mem::MaybeUninit::new(val);
self.used += 1;
}
}
/// Create an instance of `ArrayParam` initialized with `vals`.
///
/// This function is only meant to be used in the [`module::module`] macro.
pub const fn create(vals: &[T]) -> Self {
let mut result = ArrayParam::new();
let mut i = 0;
while i < vals.len() {
result.push(vals[i]);
i += 1;
}
result
}
}
impl<T: core::fmt::Display, const N: usize> core::fmt::Display for ArrayParam<T, { N }> {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
for val in self.values() {
write!(f, "{},", val)?;
}
Ok(())
}
}
impl<T: Copy + core::fmt::Display + ModuleParam, const N: usize> ModuleParam
for ArrayParam<T, { N }>
{
type Value = [T];
const NOARG_ALLOWED: bool = false;
fn try_from_param_arg(arg: Option<&'static [u8]>) -> Option<Self> {
arg.and_then(|args| {
let mut result = Self::new();
for arg in args.split(|b| *b == b',') {
result.push(T::try_from_param_arg(Some(arg))?);
}
Some(result)
})
}
fn value(&self) -> &Self::Value {
self.values()
}
}
/// A C-style string parameter.
///
/// The Rust version of the [`charp`] parameter. This type is meant to be
/// used by the [`macros::module`] macro, not handled directly. Instead use the
/// `read` method generated by that macro.
///
/// [`charp`]: ../../../include/linux/moduleparam.h
pub enum StringParam {
/// A borrowed parameter value.
///
/// Either the default value (which is static in the module) or borrowed
/// from the original argument buffer used to set the value.
Ref(&'static [u8]),
/// A value that was allocated when the parameter was set.
///
/// The value needs to be freed when the parameter is reset or the module is
/// unloaded.
Owned(alloc::vec::Vec<u8>),
}
impl StringParam {
fn bytes(&self) -> &[u8] {
match self {
StringParam::Ref(bytes) => *bytes,
StringParam::Owned(vec) => &vec[..],
}
}
}
impl core::fmt::Display for StringParam {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
let bytes = self.bytes();
match core::str::from_utf8(bytes) {
Ok(utf8) => write!(f, "{}", utf8),
Err(_) => write!(f, "{:?}", bytes),
}
}
}
impl ModuleParam for StringParam {
type Value = [u8];
const NOARG_ALLOWED: bool = false;
fn try_from_param_arg(arg: Option<&'static [u8]>) -> Option<Self> {
// SAFETY: It is always safe to call [`slab_is_available`](../../../include/linux/slab.h).
let slab_available = unsafe { crate::bindings::slab_is_available() };
arg.and_then(|arg| {
if slab_available {
let mut vec = alloc::vec::Vec::new();
vec.try_extend_from_slice(arg).ok()?;
Some(StringParam::Owned(vec))
} else {
Some(StringParam::Ref(arg))
}
})
}
fn value(&self) -> &Self::Value {
self.bytes()
}
}
make_param_ops!(
/// Rust implementation of [`kernel_param_ops`](../../../include/linux/moduleparam.h)
/// for [`StringParam`].
PARAM_OPS_STR,
StringParam
);

Some files were not shown because too many files have changed in this diff Show More