summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/testing/sysfs-bus-counter8
-rw-r--r--Documentation/ABI/testing/sysfs-bus-i2c-devices-m24lr100
-rw-r--r--Documentation/devicetree/bindings/eeprom/at25.yaml8
-rw-r--r--Documentation/devicetree/bindings/eeprom/st,m24lr.yaml52
-rw-r--r--Documentation/devicetree/bindings/extcon/extcon-rt8973a.txt23
-rw-r--r--Documentation/devicetree/bindings/extcon/linux,extcon-usb-gpio.yaml6
-rw-r--r--Documentation/devicetree/bindings/extcon/maxim,max14526.yaml80
-rw-r--r--Documentation/devicetree/bindings/extcon/richtek,rt8973a-muic.yaml49
-rw-r--r--Documentation/devicetree/bindings/interconnect/qcom,glymur-rpmh.yaml172
-rw-r--r--Documentation/devicetree/bindings/interconnect/qcom,osm-l3.yaml5
-rw-r--r--Documentation/devicetree/bindings/misc/qcom,fastrpc.yaml2
-rw-r--r--Documentation/devicetree/bindings/nvmem/airoha,an8855-efuse.yaml123
-rw-r--r--Documentation/devicetree/bindings/nvmem/layouts/kontron,sl28-vpd.yaml7
-rw-r--r--Documentation/devicetree/bindings/nvmem/nxp,s32g-ocotp-nvmem.yaml45
-rw-r--r--Documentation/devicetree/bindings/slimbus/qcom,slim.yaml86
-rw-r--r--Documentation/devicetree/bindings/slimbus/slimbus.yaml13
-rw-r--r--Documentation/devicetree/bindings/w1/fsl-imx-owire.yaml4
-rw-r--r--Documentation/netlink/specs/binder.yaml93
-rw-r--r--MAINTAINERS1
-rw-r--r--arch/sparc/kernel/apc.c3
-rw-r--r--drivers/android/Kconfig16
-rw-r--r--drivers/android/Makefile3
-rw-r--r--drivers/android/binder.c160
-rw-r--r--drivers/android/binder/Makefile9
-rw-r--r--drivers/android/binder/allocation.rs602
-rw-r--r--drivers/android/binder/context.rs180
-rw-r--r--drivers/android/binder/deferred_close.rs204
-rw-r--r--drivers/android/binder/defs.rs182
-rw-r--r--drivers/android/binder/error.rs99
-rw-r--r--drivers/android/binder/freeze.rs388
-rw-r--r--drivers/android/binder/node.rs1131
-rw-r--r--drivers/android/binder/node/wrapper.rs78
-rw-r--r--drivers/android/binder/page_range.rs734
-rw-r--r--drivers/android/binder/page_range_helper.c24
-rw-r--r--drivers/android/binder/page_range_helper.h15
-rw-r--r--drivers/android/binder/process.rs1696
-rw-r--r--drivers/android/binder/range_alloc/array.rs251
-rw-r--r--drivers/android/binder/range_alloc/mod.rs329
-rw-r--r--drivers/android/binder/range_alloc/tree.rs488
-rw-r--r--drivers/android/binder/rust_binder.h23
-rw-r--r--drivers/android/binder/rust_binder_events.c59
-rw-r--r--drivers/android/binder/rust_binder_events.h36
-rw-r--r--drivers/android/binder/rust_binder_internal.h87
-rw-r--r--drivers/android/binder/rust_binder_main.rs627
-rw-r--r--drivers/android/binder/rust_binderfs.c850
-rw-r--r--drivers/android/binder/stats.rs89
-rw-r--r--drivers/android/binder/thread.rs1596
-rw-r--r--drivers/android/binder/trace.rs16
-rw-r--r--drivers/android/binder/transaction.rs456
-rw-r--r--drivers/android/binder_internal.h4
-rw-r--r--drivers/android/binder_netlink.c31
-rw-r--r--drivers/android/binder_netlink.h20
-rw-r--r--drivers/android/binder_trace.h37
-rw-r--r--drivers/android/binderfs.c8
-rw-r--r--drivers/android/dbitmap.h1
-rw-r--r--drivers/cdx/cdx_msi.c1
-rw-r--r--drivers/char/Makefile1
-rw-r--r--drivers/char/adi.c2
-rw-r--r--drivers/char/hpet.c2
-rw-r--r--drivers/char/misc.c21
-rw-r--r--drivers/char/misc_minor_kunit.c (renamed from drivers/misc/misc_minor_kunit.c)95
-rw-r--r--drivers/comedi/Kconfig9
-rw-r--r--drivers/comedi/drivers/Makefile1
-rw-r--r--drivers/comedi/drivers/adl_pci7250.c220
-rw-r--r--drivers/counter/ti-ecap-capture.c12
-rw-r--r--drivers/extcon/Kconfig13
-rw-r--r--drivers/extcon/Makefile1
-rw-r--r--drivers/extcon/extcon-adc-jack.c2
-rw-r--r--drivers/extcon/extcon-axp288.c2
-rw-r--r--drivers/extcon/extcon-fsa9480.c2
-rw-r--r--drivers/extcon/extcon-max14526.c302
-rw-r--r--drivers/extcon/extcon-qcom-spmi-misc.c2
-rw-r--r--drivers/greybus/svc.c3
-rw-r--r--drivers/iio/adc/pac1934.c20
-rw-r--r--drivers/iio/adc/xilinx-ams.c47
-rw-r--r--drivers/iio/dac/ad5360.c2
-rw-r--r--drivers/iio/dac/ad5421.c2
-rw-r--r--drivers/iio/frequency/adf4350.c20
-rw-r--r--drivers/interconnect/core.c2
-rw-r--r--drivers/interconnect/qcom/Kconfig9
-rw-r--r--drivers/interconnect/qcom/Makefile2
-rw-r--r--drivers/interconnect/qcom/glymur.c2543
-rw-r--r--drivers/interconnect/qcom/icc-rpmh.h2
-rw-r--r--drivers/misc/Makefile1
-rw-r--r--drivers/misc/ad525x_dpot.c7
-rw-r--r--drivers/misc/amd-sbi/Kconfig1
-rw-r--r--drivers/misc/apds990x.c1
-rw-r--r--drivers/misc/cardreader/rts5227.c13
-rw-r--r--drivers/misc/cardreader/rts5228.c12
-rw-r--r--drivers/misc/cardreader/rts5249.c16
-rw-r--r--drivers/misc/cardreader/rts5264.c20
-rw-r--r--drivers/misc/cardreader/rts5264.h1
-rw-r--r--drivers/misc/cardreader/rtsx_pcr.h2
-rw-r--r--drivers/misc/dw-xdata-pcie.c5
-rw-r--r--drivers/misc/eeprom/Kconfig18
-rw-r--r--drivers/misc/eeprom/Makefile1
-rw-r--r--drivers/misc/eeprom/at25.c67
-rw-r--r--drivers/misc/eeprom/m24lr.c606
-rw-r--r--drivers/misc/fastrpc.c54
-rw-r--r--drivers/misc/genwqe/card_ddcb.c2
-rw-r--r--drivers/misc/hisi_hikey_usb.c3
-rw-r--r--drivers/misc/ibmasm/ibmasmfs.c12
-rw-r--r--drivers/misc/lis3lv02d/Kconfig4
-rw-r--r--drivers/misc/mei/bus-fixup.c6
-rw-r--r--drivers/misc/mei/bus.c26
-rw-r--r--drivers/misc/mei/client.c82
-rw-r--r--drivers/misc/mei/client.h6
-rw-r--r--drivers/misc/mei/dma-ring.c8
-rw-r--r--drivers/misc/mei/gsc-me.c20
-rw-r--r--drivers/misc/mei/hbm.c121
-rw-r--r--drivers/misc/mei/hw-me.c153
-rw-r--r--drivers/misc/mei/hw-txe.c60
-rw-r--r--drivers/misc/mei/hw.h2
-rw-r--r--drivers/misc/mei/init.c66
-rw-r--r--drivers/misc/mei/interrupt.c45
-rw-r--r--drivers/misc/mei/main.c137
-rw-r--r--drivers/misc/mei/mei_dev.h24
-rw-r--r--drivers/misc/mei/pci-me.c20
-rw-r--r--drivers/misc/mei/pci-txe.c4
-rw-r--r--drivers/misc/mei/platform-vsc.c26
-rw-r--r--drivers/nvmem/Kconfig21
-rw-r--r--drivers/nvmem/Makefile4
-rw-r--r--drivers/nvmem/an8855-efuse.c68
-rw-r--r--drivers/nvmem/s32g-ocotp-nvmem.c100
-rw-r--r--drivers/parisc/eisa_eeprom.c2
-rw-r--r--drivers/peci/controller/peci-npcm.c1
-rw-r--r--drivers/pps/kapi.c5
-rw-r--r--drivers/pps/pps.c5
-rw-r--r--drivers/siox/siox-bus-gpio.c3
-rw-r--r--drivers/slimbus/Kconfig7
-rw-r--r--drivers/slimbus/Makefile3
-rw-r--r--drivers/slimbus/messaging.c4
-rw-r--r--drivers/slimbus/qcom-ctrl.c735
-rw-r--r--drivers/uio/uio_aec.c2
-rw-r--r--drivers/uio/uio_cif.c2
-rw-r--r--drivers/uio/uio_dmem_genirq.c23
-rw-r--r--drivers/uio/uio_hv_generic.c7
-rw-r--r--drivers/uio/uio_netx.c2
-rw-r--r--drivers/uio/uio_pdrv_genirq.c24
-rw-r--r--drivers/uio/uio_sercos3.c2
-rw-r--r--drivers/w1/masters/matrox_w1.c10
-rw-r--r--include/dt-bindings/interconnect/qcom,glymur-rpmh.h205
-rw-r--r--include/linux/iio/frequency/adf4350.h2
-rw-r--r--include/linux/miscdevice.h9
-rw-r--r--include/linux/rtsx_pci.h2
-rw-r--r--include/uapi/linux/android/binder.h2
-rw-r--r--include/uapi/linux/android/binder_netlink.h37
-rw-r--r--include/uapi/misc/fastrpc.h2
-rw-r--r--rust/bindings/bindings_helper.h8
-rw-r--r--rust/helpers/binder.c26
-rw-r--r--rust/helpers/helpers.c1
-rw-r--r--rust/helpers/page.c8
-rw-r--r--rust/helpers/security.c24
-rw-r--r--rust/kernel/cred.rs6
-rw-r--r--rust/kernel/fs.rs3
-rw-r--r--rust/kernel/fs/kiocb.rs68
-rw-r--r--rust/kernel/iov.rs314
-rw-r--r--rust/kernel/lib.rs1
-rw-r--r--rust/kernel/miscdevice.rs63
-rw-r--r--rust/kernel/page.rs6
-rw-r--r--rust/kernel/security.rs37
-rw-r--r--rust/uapi/uapi_helper.h1
-rw-r--r--samples/rust/rust_misc_device.rs39
-rw-r--r--tools/testing/selftests/filesystems/binderfs/binderfs_test.c1
164 files changed, 16717 insertions, 1579 deletions
diff --git a/Documentation/ABI/testing/sysfs-bus-counter b/Documentation/ABI/testing/sysfs-bus-counter
index 3e8259e56d38..3e7eddd8aff3 100644
--- a/Documentation/ABI/testing/sysfs-bus-counter
+++ b/Documentation/ABI/testing/sysfs-bus-counter
@@ -309,26 +309,26 @@ Description:
What: /sys/bus/counter/devices/counterX/cascade_counts_enable_component_id
What: /sys/bus/counter/devices/counterX/external_input_phase_clock_select_component_id
-What: /sys/bus/counter/devices/counterX/countY/compare_component_id
What: /sys/bus/counter/devices/counterX/countY/capture_component_id
What: /sys/bus/counter/devices/counterX/countY/ceiling_component_id
-What: /sys/bus/counter/devices/counterX/countY/floor_component_id
+What: /sys/bus/counter/devices/counterX/countY/compare_component_id
What: /sys/bus/counter/devices/counterX/countY/count_mode_component_id
What: /sys/bus/counter/devices/counterX/countY/direction_component_id
What: /sys/bus/counter/devices/counterX/countY/enable_component_id
What: /sys/bus/counter/devices/counterX/countY/error_noise_component_id
+What: /sys/bus/counter/devices/counterX/countY/floor_component_id
+What: /sys/bus/counter/devices/counterX/countY/num_overflows_component_id
What: /sys/bus/counter/devices/counterX/countY/prescaler_component_id
What: /sys/bus/counter/devices/counterX/countY/preset_component_id
What: /sys/bus/counter/devices/counterX/countY/preset_enable_component_id
What: /sys/bus/counter/devices/counterX/countY/signalZ_action_component_id
-What: /sys/bus/counter/devices/counterX/countY/num_overflows_component_id
What: /sys/bus/counter/devices/counterX/signalY/cable_fault_component_id
What: /sys/bus/counter/devices/counterX/signalY/cable_fault_enable_component_id
What: /sys/bus/counter/devices/counterX/signalY/filter_clock_prescaler_component_id
+What: /sys/bus/counter/devices/counterX/signalY/frequency_component_id
What: /sys/bus/counter/devices/counterX/signalY/index_polarity_component_id
What: /sys/bus/counter/devices/counterX/signalY/polarity_component_id
What: /sys/bus/counter/devices/counterX/signalY/synchronous_mode_component_id
-What: /sys/bus/counter/devices/counterX/signalY/frequency_component_id
KernelVersion: 5.16
Contact: linux-iio@vger.kernel.org
Description:
diff --git a/Documentation/ABI/testing/sysfs-bus-i2c-devices-m24lr b/Documentation/ABI/testing/sysfs-bus-i2c-devices-m24lr
new file mode 100644
index 000000000000..7c51ce8d38ba
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-i2c-devices-m24lr
@@ -0,0 +1,100 @@
+What: /sys/bus/i2c/devices/<busnum>-<primary-addr>/unlock
+Date: 2025-07-04
+KernelVersion: 6.17
+Contact: Abd-Alrhman Masalkhi <abd.masalkhi@gmail.com>
+Description:
+ Write-only attribute used to present a password and unlock
+ access to protected areas of the M24LR chip, including
+ configuration registers such as the Sector Security Status
+ (SSS) bytes. A valid password must be written to enable write
+ access to these regions via the I2C interface.
+
+ Format:
+ - Hexadecimal string representing a 32-bit (4-byte) password
+ - Accepts 1 to 8 hex digits (e.g., "c", "1F", "a1b2c3d4")
+ - No "0x" prefix, whitespace, or trailing newline
+ - Case-insensitive
+
+ Behavior:
+ - If the password matches the internal stored value,
+ access to protected memory/configuration is granted
+ - If the password does not match the internally stored value,
+ it will fail silently
+
+What: /sys/bus/i2c/devices/<busnum>-<primary-addr>/new_pass
+Date: 2025-07-04
+KernelVersion: 6.17
+Contact: Abd-Alrhman Masalkhi <abd.masalkhi@gmail.com>
+Description:
+ Write-only attribute used to update the password required to
+ unlock the M24LR chip.
+
+ Format:
+ - Hexadecimal string representing a new 32-bit password
+ - Accepts 1 to 8 hex digits (e.g., "1A", "ffff", "c0ffee00")
+ - No "0x" prefix, whitespace, or trailing newline
+ - Case-insensitive
+
+ Behavior:
+ - Overwrites the current password stored in the I2C password
+ register
+ - Requires the device to be unlocked before changing the
+ password
+ - If the device is locked, the write silently fails
+
+What: /sys/bus/i2c/devices/<busnum>-<primary-addr>/uid
+Date: 2025-07-04
+KernelVersion: 6.17
+Contact: Abd-Alrhman Masalkhi <abd.masalkhi@gmail.com>
+Description:
+ Read-only attribute that exposes the 8-byte unique identifier
+ programmed into the M24LR chip at the factory.
+
+ Format:
+ - Lowercase hexadecimal string representing a 64-bit value
+ - 1 to 16 hex digits (e.g., "e00204f12345678")
+ - No "0x" prefix
+ - Includes a trailing newline
+
+What: /sys/bus/i2c/devices/<busnum>-<primary-addr>/total_sectors
+Date: 2025-07-04
+KernelVersion: 6.17
+Contact: Abd-Alrhman Masalkhi <abd.masalkhi@gmail.com>
+Description:
+ Read-only attribute that exposes the total number of EEPROM
+ sectors available in the M24LR chip.
+
+ Format:
+ - 1 to 2 hex digits (e.g. "F")
+ - No "0x" prefix
+ - Includes a trailing newline
+
+ Notes:
+ - Value is encoded by the chip and corresponds to the EEPROM
+ size (e.g., 3 = 4 kbit for M24LR04E-R)
+
+What: /sys/bus/i2c/devices/<busnum>-<primary-addr>/sss
+Date: 2025-07-04
+KernelVersion: 6.17
+Contact: Abd-Alrhman Masalkhi <abd.masalkhi@gmail.com>
+Description:
+ Read/write binary attribute representing the Sector Security
+ Status (SSS) bytes for all EEPROM sectors in STMicroelectronics
+ M24LR chips.
+
+ Each EEPROM sector has one SSS byte, which controls I2C and
+ RF access through protection bits and optional password
+ authentication.
+
+ Format:
+ - The file contains one byte per EEPROM sector
+ - Byte at offset N corresponds to sector N
+ - Binary access only; use tools like dd, Python, or C that
+ support byte-level I/O and offset control.
+
+ Notes:
+ - The number of valid bytes in this file is equal to the
+ value exposed by 'total_sectors' file
+ - Write access requires prior password authentication in
+ I2C mode
+ - Refer to the M24LR datasheet for full SSS bit layout
diff --git a/Documentation/devicetree/bindings/eeprom/at25.yaml b/Documentation/devicetree/bindings/eeprom/at25.yaml
index c31e5e719525..00e0f07b44f8 100644
--- a/Documentation/devicetree/bindings/eeprom/at25.yaml
+++ b/Documentation/devicetree/bindings/eeprom/at25.yaml
@@ -56,6 +56,7 @@ properties:
$ref: /schemas/types.yaml#/definitions/uint32
description:
Total eeprom size in bytes.
+ Also used for FRAMs without device ID where the size cannot be detected.
address-width:
$ref: /schemas/types.yaml#/definitions/uint32
@@ -146,4 +147,11 @@ examples:
reg = <1>;
spi-max-frequency = <40000000>;
};
+
+ fram@2 {
+ compatible = "cypress,fm25", "atmel,at25";
+ reg = <2>;
+ spi-max-frequency = <20000000>;
+ size = <2048>;
+ };
};
diff --git a/Documentation/devicetree/bindings/eeprom/st,m24lr.yaml b/Documentation/devicetree/bindings/eeprom/st,m24lr.yaml
new file mode 100644
index 000000000000..0a0820e9d11f
--- /dev/null
+++ b/Documentation/devicetree/bindings/eeprom/st,m24lr.yaml
@@ -0,0 +1,52 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/eeprom/st,m24lr.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: STMicroelectronics M24LR NFC/RFID EEPROM
+
+maintainers:
+ - Abd-Alrhman Masalkhi <abd.masalkhi@gmail.com>
+
+description:
+ STMicroelectronics M24LR series are dual-interface (RF + I2C)
+ EEPROM chips. These devices support I2C-based access to both
+ memory and a system area that controls authentication and configuration.
+ They expose two I2C addresses, one for the system parameter sector and
+ one for the EEPROM.
+
+allOf:
+ - $ref: /schemas/nvmem/nvmem.yaml#
+
+properties:
+ compatible:
+ enum:
+ - st,m24lr04e-r
+ - st,m24lr16e-r
+ - st,m24lr64e-r
+
+ reg:
+ items:
+ - description: I2C address used for control/system registers
+ - description: I2C address used for EEPROM memory access
+
+required:
+ - compatible
+ - reg
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ eeprom@57 {
+ compatible = "st,m24lr04e-r";
+ reg = <0x57>, /* primary-device */
+ <0x53>; /* secondary-device */
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/extcon/extcon-rt8973a.txt b/Documentation/devicetree/bindings/extcon/extcon-rt8973a.txt
deleted file mode 100644
index cfcf455ad4de..000000000000
--- a/Documentation/devicetree/bindings/extcon/extcon-rt8973a.txt
+++ /dev/null
@@ -1,23 +0,0 @@
-
-* Richtek RT8973A - Micro USB Switch device
-
-The Richtek RT8973A is Micro USB Switch with OVP and I2C interface. The RT8973A
-is a USB port accessory detector and switch that is optimized to protect low
-voltage system from abnormal high input voltage (up to 28V) and supports high
-speed USB operation. Also, RT8973A support 'auto-configuration' mode.
-If auto-configuration mode is enabled, RT8973A would control internal h/w patch
-for USB D-/D+ switching.
-
-Required properties:
-- compatible: Should be "richtek,rt8973a-muic"
-- reg: Specifies the I2C slave address of the MUIC block. It should be 0x14
-- interrupts: Interrupt specifiers for detection interrupt sources.
-
-Example:
-
- rt8973a@14 {
- compatible = "richtek,rt8973a-muic";
- interrupt-parent = <&gpx1>;
- interrupts = <5 0>;
- reg = <0x14>;
- };
diff --git a/Documentation/devicetree/bindings/extcon/linux,extcon-usb-gpio.yaml b/Documentation/devicetree/bindings/extcon/linux,extcon-usb-gpio.yaml
index 8856107bdd33..8f29d333602b 100644
--- a/Documentation/devicetree/bindings/extcon/linux,extcon-usb-gpio.yaml
+++ b/Documentation/devicetree/bindings/extcon/linux,extcon-usb-gpio.yaml
@@ -25,6 +25,12 @@ properties:
required:
- compatible
+anyOf:
+ - required:
+ - id-gpios
+ - required:
+ - vbus-gpios
+
additionalProperties: false
examples:
diff --git a/Documentation/devicetree/bindings/extcon/maxim,max14526.yaml b/Documentation/devicetree/bindings/extcon/maxim,max14526.yaml
new file mode 100644
index 000000000000..7eb5918df1c2
--- /dev/null
+++ b/Documentation/devicetree/bindings/extcon/maxim,max14526.yaml
@@ -0,0 +1,80 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/extcon/maxim,max14526.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Maxim MAX14526 MicroUSB Integrated Circuit (MUIC)
+
+maintainers:
+ - Svyatoslav Ryhel <clamor95@gmail.com>
+
+properties:
+ compatible:
+ const: maxim,max14526
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+ connector:
+ $ref: /schemas/connector/usb-connector.yaml#
+
+ port:
+ $ref: /schemas/graph.yaml#/properties/port
+
+required:
+ - compatible
+ - reg
+ - interrupts
+ - connector
+ - port
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ muic@44 {
+ compatible = "maxim,max14526";
+ reg = <0x44>;
+
+ interrupt-parent = <&gpio>;
+ interrupts = <72 IRQ_TYPE_EDGE_FALLING>;
+
+ connector {
+ compatible = "usb-b-connector";
+ label = "micro-USB";
+ type = "micro";
+ };
+
+ port {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ muic_to_charger: endpoint@0 {
+ reg = <0>;
+ remote-endpoint = <&charger_input>;
+ };
+
+ muic_to_usb: endpoint@1 {
+ reg = <1>;
+ remote-endpoint = <&usb_input>;
+ };
+
+ muic_to_mhl: endpoint@2 {
+ reg = <2>;
+ remote-endpoint = <&mhl_input>;
+ };
+ };
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/extcon/richtek,rt8973a-muic.yaml b/Documentation/devicetree/bindings/extcon/richtek,rt8973a-muic.yaml
new file mode 100644
index 000000000000..f9e0d816c025
--- /dev/null
+++ b/Documentation/devicetree/bindings/extcon/richtek,rt8973a-muic.yaml
@@ -0,0 +1,49 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/extcon/richtek,rt8973a-muic.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Richtek RT8973A MUIC
+
+maintainers:
+ - Chanwoo Choi <cw00.choi@samsung.com>
+
+description:
+ The Richtek RT8973A is Micro USB Switch with OVP and I2C interface. The RT8973A
+ is a USB port accessory detector and switch that is optimized to protect low
+ voltage system from abnormal high input voltage (up to 28V) and supports high
+ speed USB operation. Also, RT8973A support 'auto-configuration' mode.
+ If auto-configuration mode is enabled, RT8973A would control internal h/w patch
+ for USB D-/D+ switching.
+
+properties:
+ compatible:
+ const: richtek,rt8973a-muic
+
+ reg:
+ maxItems: 1
+
+ interrupts:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+ - interrupts
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ usb-switch@14 {
+ compatible = "richtek,rt8973a-muic";
+ reg = <0x14>;
+ interrupt-parent = <&gpio>;
+ interrupts = <1 IRQ_TYPE_EDGE_FALLING>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/interconnect/qcom,glymur-rpmh.yaml b/Documentation/devicetree/bindings/interconnect/qcom,glymur-rpmh.yaml
new file mode 100644
index 000000000000..d55a7bcf5591
--- /dev/null
+++ b/Documentation/devicetree/bindings/interconnect/qcom,glymur-rpmh.yaml
@@ -0,0 +1,172 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/interconnect/qcom,glymur-rpmh.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Qualcomm RPMh Network-On-Chip Interconnect on GLYMUR
+
+maintainers:
+ - Raviteja Laggyshetty <raviteja.laggyshetty@oss.qualcomm.com>
+
+description: |
+ RPMh interconnect providers support system bandwidth requirements through
+ RPMh hardware accelerators known as Bus Clock Manager (BCM). The provider is
+ able to communicate with the BCM through the Resource State Coordinator (RSC)
+ associated with each execution environment. Provider nodes must point to at
+ least one RPMh device child node pertaining to their RSC and each provider
+ can map to multiple RPMh resources.
+
+ See also: include/dt-bindings/interconnect/qcom,glymur-rpmh.h
+
+properties:
+ compatible:
+ enum:
+ - qcom,glymur-aggre1-noc
+ - qcom,glymur-aggre2-noc
+ - qcom,glymur-aggre3-noc
+ - qcom,glymur-aggre4-noc
+ - qcom,glymur-clk-virt
+ - qcom,glymur-cnoc-cfg
+ - qcom,glymur-cnoc-main
+ - qcom,glymur-hscnoc
+ - qcom,glymur-lpass-ag-noc
+ - qcom,glymur-lpass-lpiaon-noc
+ - qcom,glymur-lpass-lpicx-noc
+ - qcom,glymur-mc-virt
+ - qcom,glymur-mmss-noc
+ - qcom,glymur-nsinoc
+ - qcom,glymur-nsp-noc
+ - qcom,glymur-oobm-ss-noc
+ - qcom,glymur-pcie-east-anoc
+ - qcom,glymur-pcie-east-slv-noc
+ - qcom,glymur-pcie-west-anoc
+ - qcom,glymur-pcie-west-slv-noc
+ - qcom,glymur-system-noc
+
+ reg:
+ maxItems: 1
+
+ clocks:
+ minItems: 1
+ maxItems: 4
+
+required:
+ - compatible
+
+allOf:
+ - $ref: qcom,rpmh-common.yaml#
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,glymur-clk-virt
+ - qcom,glymur-mc-virt
+ then:
+ properties:
+ reg: false
+ else:
+ required:
+ - reg
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,glymur-pcie-west-anoc
+ then:
+ properties:
+ clocks:
+ items:
+ - description: aggre PCIE_3A WEST AXI clock
+ - description: aggre PCIE_3B WEST AXI clock
+ - description: aggre PCIE_4 WEST AXI clock
+ - description: aggre PCIE_6 WEST AXI clock
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,glymur-pcie-east-anoc
+ then:
+ properties:
+ clocks:
+ items:
+ - description: aggre PCIE_5 EAST AXI clock
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,glymur-aggre2-noc
+ then:
+ properties:
+ clocks:
+ items:
+ - description: aggre USB3 TERT AXI clock
+ - description: aggre USB4_2 AXI clock
+ - description: aggre UFS PHY AXI clock
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,glymur-aggre4-noc
+ then:
+ properties:
+ clocks:
+ items:
+ - description: aggre USB3 PRIM AXI clock
+ - description: aggre USB3 SEC AXI clock
+ - description: aggre USB4_0 AXI clock
+ - description: aggre USB4_1 AXI clock
+
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - qcom,glymur-pcie-west-anoc
+ - qcom,glymur-pcie-east-anoc
+ - qcom,glymur-aggre2-noc
+ - qcom,glymur-aggre4-noc
+ then:
+ required:
+ - clocks
+ else:
+ properties:
+ clocks: false
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/clock/qcom,glymur-gcc.h>
+ clk_virt: interconnect-0 {
+ compatible = "qcom,glymur-clk-virt";
+ #interconnect-cells = <2>;
+ qcom,bcm-voters = <&apps_bcm_voter>;
+ };
+
+ aggre1_noc: interconnect@16e0000 {
+ compatible = "qcom,glymur-aggre1-noc";
+ reg = <0x016e0000 0x14400>;
+ #interconnect-cells = <2>;
+ qcom,bcm-voters = <&apps_bcm_voter>;
+ };
+
+ aggre4_noc: interconnect@1740000 {
+ compatible = "qcom,glymur-aggre4-noc";
+ reg = <0x01740000 0x14400>;
+ #interconnect-cells = <2>;
+ qcom,bcm-voters = <&apps_bcm_voter>;
+ clocks = <&gcc GCC_AGGRE_USB3_PRIM_AXI_CLK>,
+ <&gcc GCC_AGGRE_USB3_SEC_AXI_CLK>,
+ <&gcc GCC_AGGRE_USB4_0_AXI_CLK>,
+ <&gcc GCC_AGGRE_USB4_1_AXI_CLK>;
+ };
diff --git a/Documentation/devicetree/bindings/interconnect/qcom,osm-l3.yaml b/Documentation/devicetree/bindings/interconnect/qcom,osm-l3.yaml
index ab5a921c3495..4b9b98fbe8f2 100644
--- a/Documentation/devicetree/bindings/interconnect/qcom,osm-l3.yaml
+++ b/Documentation/devicetree/bindings/interconnect/qcom,osm-l3.yaml
@@ -41,6 +41,11 @@ properties:
- qcom,qcs8300-epss-l3
- const: qcom,sa8775p-epss-l3
- const: qcom,epss-l3
+ - items:
+ - enum:
+ - qcom,qcs615-osm-l3
+ - const: qcom,sm8150-osm-l3
+ - const: qcom,osm-l3
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/misc/qcom,fastrpc.yaml b/Documentation/devicetree/bindings/misc/qcom,fastrpc.yaml
index 0840a3d92513..3f6199fc9ae6 100644
--- a/Documentation/devicetree/bindings/misc/qcom,fastrpc.yaml
+++ b/Documentation/devicetree/bindings/misc/qcom,fastrpc.yaml
@@ -27,6 +27,8 @@ properties:
- sdsp
- cdsp
- cdsp1
+ - gdsp0
+ - gdsp1
memory-region:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/nvmem/airoha,an8855-efuse.yaml b/Documentation/devicetree/bindings/nvmem/airoha,an8855-efuse.yaml
new file mode 100644
index 000000000000..9802d9ea2176
--- /dev/null
+++ b/Documentation/devicetree/bindings/nvmem/airoha,an8855-efuse.yaml
@@ -0,0 +1,123 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/nvmem/airoha,an8855-efuse.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Airoha AN8855 Switch EFUSE
+
+maintainers:
+ - Christian Marangi <ansuelsmth@gmail.com>
+
+description:
+ Airoha AN8855 EFUSE used to calibrate internal PHYs and store additional
+ configuration info.
+
+$ref: nvmem.yaml#
+
+properties:
+ compatible:
+ const: airoha,an8855-efuse
+
+ '#nvmem-cell-cells':
+ const: 0
+
+required:
+ - compatible
+ - '#nvmem-cell-cells'
+
+unevaluatedProperties: false
+
+examples:
+ - |
+ efuse {
+ compatible = "airoha,an8855-efuse";
+
+ #nvmem-cell-cells = <0>;
+
+ nvmem-layout {
+ compatible = "fixed-layout";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ shift_sel_port0_tx_a: shift-sel-port0-tx-a@c {
+ reg = <0xc 0x4>;
+ };
+
+ shift_sel_port0_tx_b: shift-sel-port0-tx-b@10 {
+ reg = <0x10 0x4>;
+ };
+
+ shift_sel_port0_tx_c: shift-sel-port0-tx-c@14 {
+ reg = <0x14 0x4>;
+ };
+
+ shift_sel_port0_tx_d: shift-sel-port0-tx-d@18 {
+ reg = <0x18 0x4>;
+ };
+
+ shift_sel_port1_tx_a: shift-sel-port1-tx-a@1c {
+ reg = <0x1c 0x4>;
+ };
+
+ shift_sel_port1_tx_b: shift-sel-port1-tx-b@20 {
+ reg = <0x20 0x4>;
+ };
+
+ shift_sel_port1_tx_c: shift-sel-port1-tx-c@24 {
+ reg = <0x24 0x4>;
+ };
+
+ shift_sel_port1_tx_d: shift-sel-port1-tx-d@28 {
+ reg = <0x28 0x4>;
+ };
+
+ shift_sel_port2_tx_a: shift-sel-port2-tx-a@2c {
+ reg = <0x2c 0x4>;
+ };
+
+ shift_sel_port2_tx_b: shift-sel-port2-tx-b@30 {
+ reg = <0x30 0x4>;
+ };
+
+ shift_sel_port2_tx_c: shift-sel-port2-tx-c@34 {
+ reg = <0x34 0x4>;
+ };
+
+ shift_sel_port2_tx_d: shift-sel-port2-tx-d@38 {
+ reg = <0x38 0x4>;
+ };
+
+ shift_sel_port3_tx_a: shift-sel-port3-tx-a@4c {
+ reg = <0x4c 0x4>;
+ };
+
+ shift_sel_port3_tx_b: shift-sel-port3-tx-b@50 {
+ reg = <0x50 0x4>;
+ };
+
+ shift_sel_port3_tx_c: shift-sel-port3-tx-c@54 {
+ reg = <0x54 0x4>;
+ };
+
+ shift_sel_port3_tx_d: shift-sel-port3-tx-d@58 {
+ reg = <0x58 0x4>;
+ };
+
+ shift_sel_port4_tx_a: shift-sel-port4-tx-a@5c {
+ reg = <0x5c 0x4>;
+ };
+
+ shift_sel_port4_tx_b: shift-sel-port4-tx-b@60 {
+ reg = <0x60 0x4>;
+ };
+
+ shift_sel_port4_tx_c: shift-sel-port4-tx-c@64 {
+ reg = <0x64 0x4>;
+ };
+
+ shift_sel_port4_tx_d: shift-sel-port4-tx-d@68 {
+ reg = <0x68 0x4>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/nvmem/layouts/kontron,sl28-vpd.yaml b/Documentation/devicetree/bindings/nvmem/layouts/kontron,sl28-vpd.yaml
index c713e23819f1..afd1919c6b1c 100644
--- a/Documentation/devicetree/bindings/nvmem/layouts/kontron,sl28-vpd.yaml
+++ b/Documentation/devicetree/bindings/nvmem/layouts/kontron,sl28-vpd.yaml
@@ -19,7 +19,12 @@ select: false
properties:
compatible:
- const: kontron,sl28-vpd
+ oneOf:
+ - items:
+ - enum:
+ - kontron,sa67-vpd
+ - const: kontron,sl28-vpd
+ - const: kontron,sl28-vpd
serial-number:
type: object
diff --git a/Documentation/devicetree/bindings/nvmem/nxp,s32g-ocotp-nvmem.yaml b/Documentation/devicetree/bindings/nvmem/nxp,s32g-ocotp-nvmem.yaml
new file mode 100644
index 000000000000..8d46e7d28da6
--- /dev/null
+++ b/Documentation/devicetree/bindings/nvmem/nxp,s32g-ocotp-nvmem.yaml
@@ -0,0 +1,45 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/nvmem/nxp,s32g-ocotp-nvmem.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: NXP S32G OCOTP NVMEM driver
+
+maintainers:
+ - Ciprian Costea <ciprianmarian.costea@nxp.com>
+
+description:
+ The drivers provides an interface to access One Time
+ Programmable memory pages, such as TMU fuse values.
+
+properties:
+ compatible:
+ oneOf:
+ - enum:
+ - nxp,s32g2-ocotp
+ - items:
+ - enum:
+ - nxp,s32g3-ocotp
+ - nxp,s32r45-ocotp
+ - const: nxp,s32g2-ocotp
+ reg:
+ maxItems: 1
+
+required:
+ - compatible
+ - reg
+
+unevaluatedProperties: false
+
+allOf:
+ - $ref: nvmem.yaml#
+
+examples:
+ - |
+ nvmem@400a4000 {
+ compatible = "nxp,s32g2-ocotp";
+ reg = <0x400a4000 0x400>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ };
diff --git a/Documentation/devicetree/bindings/slimbus/qcom,slim.yaml b/Documentation/devicetree/bindings/slimbus/qcom,slim.yaml
deleted file mode 100644
index 883bda58ca97..000000000000
--- a/Documentation/devicetree/bindings/slimbus/qcom,slim.yaml
+++ /dev/null
@@ -1,86 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
-%YAML 1.2
----
-$id: http://devicetree.org/schemas/slimbus/qcom,slim.yaml#
-$schema: http://devicetree.org/meta-schemas/core.yaml#
-
-title: Qualcomm SoC SLIMbus controller
-
-maintainers:
- - Krzysztof Kozlowski <krzysztof.kozlowski@linaro.org>
- - Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
-
-description:
- SLIMbus controller used when applications processor controls SLIMbus master
- component.
-
-allOf:
- - $ref: slimbus.yaml#
-
-properties:
- compatible:
- items:
- - enum:
- - qcom,apq8064-slim
- - const: qcom,slim
-
- reg:
- items:
- - description: Physical address of controller register blocks
- - description: SLEW RATE register
-
- reg-names:
- items:
- - const: ctrl
- - const: slew
-
- clocks:
- items:
- - description: Interface clock for this controller
- - description: Interrupt for controller core's BAM
-
- clock-names:
- items:
- - const: iface
- - const: core
-
- interrupts:
- maxItems: 1
-
-required:
- - compatible
- - reg
- - reg-names
- - clocks
- - clock-names
- - interrupts
-
-unevaluatedProperties: false
-
-examples:
- - |
- #include <dt-bindings/clock/qcom,gcc-msm8960.h>
- #include <dt-bindings/clock/qcom,lcc-msm8960.h>
- #include <dt-bindings/interrupt-controller/arm-gic.h>
-
- soc {
- #address-cells = <1>;
- #size-cells = <1>;
- ranges;
-
- slim@28080000 {
- compatible = "qcom,apq8064-slim", "qcom,slim";
- reg = <0x28080000 0x2000>, <0x80207c 4>;
- reg-names = "ctrl", "slew";
- interrupts = <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&lcc SLIMBUS_SRC>, <&lcc AUDIO_SLIMBUS_CLK>;
- clock-names = "iface", "core";
- #address-cells = <2>;
- #size-cells = <0>;
-
- audio-codec@1,0 {
- compatible = "slim217,60";
- reg = <1 0>;
- };
- };
- };
diff --git a/Documentation/devicetree/bindings/slimbus/slimbus.yaml b/Documentation/devicetree/bindings/slimbus/slimbus.yaml
index 3b8cae9d1016..89017d9cda10 100644
--- a/Documentation/devicetree/bindings/slimbus/slimbus.yaml
+++ b/Documentation/devicetree/bindings/slimbus/slimbus.yaml
@@ -68,8 +68,6 @@ additionalProperties: true
examples:
- |
- #include <dt-bindings/clock/qcom,gcc-msm8960.h>
- #include <dt-bindings/clock/qcom,lcc-msm8960.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
soc {
@@ -78,17 +76,14 @@ examples:
ranges;
slim@28080000 {
- compatible = "qcom,apq8064-slim", "qcom,slim";
- reg = <0x28080000 0x2000>, <0x80207c 4>;
- reg-names = "ctrl", "slew";
- interrupts = <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&lcc SLIMBUS_SRC>, <&lcc AUDIO_SLIMBUS_CLK>;
- clock-names = "iface", "core";
+ compatible = "qcom,slim-ngd-v1.5.0";
+ reg = <0x091c0000 0x2c000>;
+ interrupts = <GIC_SPI 163 IRQ_TYPE_LEVEL_HIGH>;
#address-cells = <2>;
#size-cells = <0>;
audio-codec@1,0 {
- compatible = "slim217,60";
+ compatible = "slim217,1a0";
reg = <1 0>;
};
};
diff --git a/Documentation/devicetree/bindings/w1/fsl-imx-owire.yaml b/Documentation/devicetree/bindings/w1/fsl-imx-owire.yaml
index 55adea827c34..2c1bbc0eb05a 100644
--- a/Documentation/devicetree/bindings/w1/fsl-imx-owire.yaml
+++ b/Documentation/devicetree/bindings/w1/fsl-imx-owire.yaml
@@ -24,6 +24,9 @@ properties:
reg:
maxItems: 1
+ interrupts:
+ maxItems: 1
+
clocks:
maxItems: 1
@@ -40,5 +43,6 @@ examples:
owire@63fa4000 {
compatible = "fsl,imx53-owire", "fsl,imx21-owire";
reg = <0x63fa4000 0x4000>;
+ interrupts = <88>;
clocks = <&clks IMX5_CLK_OWIRE_GATE>;
};
diff --git a/Documentation/netlink/specs/binder.yaml b/Documentation/netlink/specs/binder.yaml
new file mode 100644
index 000000000000..0f0575ad1265
--- /dev/null
+++ b/Documentation/netlink/specs/binder.yaml
@@ -0,0 +1,93 @@
+# SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
+#
+# Copyright 2025 Google LLC
+#
+---
+name: binder
+protocol: genetlink
+uapi-header: linux/android/binder_netlink.h
+doc: Binder interface over generic netlink
+
+attribute-sets:
+ -
+ name: report
+ doc: |
+ Attributes included within a transaction failure report. The elements
+ correspond directly with the specific transaction that failed, along
+ with the error returned to the sender e.g. BR_DEAD_REPLY.
+
+ attributes:
+ -
+ name: error
+ type: u32
+ doc: The enum binder_driver_return_protocol returned to the sender.
+ -
+ name: context
+ type: string
+ doc: The binder context where the transaction occurred.
+ -
+ name: from-pid
+ type: u32
+ doc: The PID of the sender process.
+ -
+ name: from-tid
+ type: u32
+ doc: The TID of the sender thread.
+ -
+ name: to-pid
+ type: u32
+ doc: |
+ The PID of the recipient process. This attribute may not be present
+ if the target could not be determined.
+ -
+ name: to-tid
+ type: u32
+ doc: |
+ The TID of the recipient thread. This attribute may not be present
+ if the target could not be determined.
+ -
+ name: is-reply
+ type: flag
+ doc: When present, indicates the failed transaction is a reply.
+ -
+ name: flags
+ type: u32
+ doc: The bitmask of enum transaction_flags from the transaction.
+ -
+ name: code
+ type: u32
+ doc: The application-defined code from the transaction.
+ -
+ name: data-size
+ type: u32
+ doc: The transaction payload size in bytes.
+
+operations:
+ list:
+ -
+ name: report
+ doc: |
+ A multicast event sent to userspace subscribers to notify them about
+ binder transaction failures. The generated report provides the full
+ details of the specific transaction that failed. The intention is for
+ programs to monitor these events and react to the failures as needed.
+
+ attribute-set: report
+ mcgrp: report
+ event:
+ attributes:
+ - error
+ - context
+ - from-pid
+ - from-tid
+ - to-pid
+ - to-tid
+ - is-reply
+ - flags
+ - code
+ - data-size
+
+mcast-groups:
+ list:
+ -
+ name: report
diff --git a/MAINTAINERS b/MAINTAINERS
index f090c2f6e63a..320ee3865865 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1795,6 +1795,7 @@ M: Suren Baghdasaryan <surenb@google.com>
L: linux-kernel@vger.kernel.org
S: Supported
T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging.git
+F: Documentation/netlink/specs/binder.yaml
F: drivers/android/
ANDROID GOLDFISH PIC DRIVER
diff --git a/arch/sparc/kernel/apc.c b/arch/sparc/kernel/apc.c
index d44725d37e30..849db20e7165 100644
--- a/arch/sparc/kernel/apc.c
+++ b/arch/sparc/kernel/apc.c
@@ -28,7 +28,6 @@
* #define APC_DEBUG_LED
*/
-#define APC_MINOR MISC_DYNAMIC_MINOR
#define APC_OBPNAME "power-management"
#define APC_DEVNAME "apc"
@@ -138,7 +137,7 @@ static const struct file_operations apc_fops = {
.llseek = noop_llseek,
};
-static struct miscdevice apc_miscdev = { APC_MINOR, APC_DEVNAME, &apc_fops };
+static struct miscdevice apc_miscdev = { MISC_DYNAMIC_MINOR, APC_DEVNAME, &apc_fops };
static int apc_probe(struct platform_device *op)
{
diff --git a/drivers/android/Kconfig b/drivers/android/Kconfig
index 5b3b8041f827..e2e402c9d175 100644
--- a/drivers/android/Kconfig
+++ b/drivers/android/Kconfig
@@ -4,6 +4,7 @@ menu "Android"
config ANDROID_BINDER_IPC
bool "Android Binder IPC Driver"
depends on MMU
+ depends on NET
default n
help
Binder is used in Android for both communication between processes,
@@ -13,6 +14,19 @@ config ANDROID_BINDER_IPC
Android process, using Binder to identify, invoke and pass arguments
between said processes.
+config ANDROID_BINDER_IPC_RUST
+ bool "Rust version of Android Binder IPC Driver"
+ depends on RUST && MMU && !ANDROID_BINDER_IPC
+ help
+ This enables the Rust implementation of the Binder driver.
+
+ Binder is used in Android for both communication between processes,
+ and remote method invocation.
+
+ This means one Android process can call a method/routine in another
+ Android process, using Binder to identify, invoke and pass arguments
+ between said processes.
+
config ANDROID_BINDERFS
bool "Android Binderfs filesystem"
depends on ANDROID_BINDER_IPC
@@ -27,7 +41,7 @@ config ANDROID_BINDERFS
config ANDROID_BINDER_DEVICES
string "Android Binder devices"
- depends on ANDROID_BINDER_IPC
+ depends on ANDROID_BINDER_IPC || ANDROID_BINDER_IPC_RUST
default "binder,hwbinder,vndbinder"
help
Default value for the binder.devices parameter.
diff --git a/drivers/android/Makefile b/drivers/android/Makefile
index c5d47be0276c..e0c650d3898e 100644
--- a/drivers/android/Makefile
+++ b/drivers/android/Makefile
@@ -2,5 +2,6 @@
ccflags-y += -I$(src) # needed for trace events
obj-$(CONFIG_ANDROID_BINDERFS) += binderfs.o
-obj-$(CONFIG_ANDROID_BINDER_IPC) += binder.o binder_alloc.o
+obj-$(CONFIG_ANDROID_BINDER_IPC) += binder.o binder_alloc.o binder_netlink.o
obj-$(CONFIG_ANDROID_BINDER_ALLOC_KUNIT_TEST) += tests/
+obj-$(CONFIG_ANDROID_BINDER_IPC_RUST) += binder/
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 312b462e349d..8c99ceaa303b 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -74,6 +74,7 @@
#include <linux/cacheflush.h>
+#include "binder_netlink.h"
#include "binder_internal.h"
#include "binder_trace.h"
@@ -2993,6 +2994,69 @@ static void binder_set_txn_from_error(struct binder_transaction *t, int id,
binder_thread_dec_tmpref(from);
}
+/**
+ * binder_netlink_report() - report a transaction failure via netlink
+ * @proc: the binder proc sending the transaction
+ * @t: the binder transaction that failed
+ * @data_size: the user provided data size for the transaction
+ * @error: enum binder_driver_return_protocol returned to sender
+ */
+static void binder_netlink_report(struct binder_proc *proc,
+ struct binder_transaction *t,
+ u32 data_size,
+ u32 error)
+{
+ const char *context = proc->context->name;
+ struct sk_buff *skb;
+ void *hdr;
+
+ if (!genl_has_listeners(&binder_nl_family, &init_net,
+ BINDER_NLGRP_REPORT))
+ return;
+
+ trace_binder_netlink_report(context, t, data_size, error);
+
+ skb = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ if (!skb)
+ return;
+
+ hdr = genlmsg_put(skb, 0, 0, &binder_nl_family, 0, BINDER_CMD_REPORT);
+ if (!hdr)
+ goto free_skb;
+
+ if (nla_put_u32(skb, BINDER_A_REPORT_ERROR, error) ||
+ nla_put_string(skb, BINDER_A_REPORT_CONTEXT, context) ||
+ nla_put_u32(skb, BINDER_A_REPORT_FROM_PID, t->from_pid) ||
+ nla_put_u32(skb, BINDER_A_REPORT_FROM_TID, t->from_tid))
+ goto cancel_skb;
+
+ if (t->to_proc &&
+ nla_put_u32(skb, BINDER_A_REPORT_TO_PID, t->to_proc->pid))
+ goto cancel_skb;
+
+ if (t->to_thread &&
+ nla_put_u32(skb, BINDER_A_REPORT_TO_TID, t->to_thread->pid))
+ goto cancel_skb;
+
+ if (t->is_reply && nla_put_flag(skb, BINDER_A_REPORT_IS_REPLY))
+ goto cancel_skb;
+
+ if (nla_put_u32(skb, BINDER_A_REPORT_FLAGS, t->flags) ||
+ nla_put_u32(skb, BINDER_A_REPORT_CODE, t->code) ||
+ nla_put_u32(skb, BINDER_A_REPORT_DATA_SIZE, data_size))
+ goto cancel_skb;
+
+ genlmsg_end(skb, hdr);
+ genlmsg_multicast(&binder_nl_family, skb, 0, BINDER_NLGRP_REPORT,
+ GFP_KERNEL);
+ return;
+
+cancel_skb:
+ genlmsg_cancel(skb, hdr);
+free_skb:
+ nlmsg_free(skb);
+}
+
static void binder_transaction(struct binder_proc *proc,
struct binder_thread *thread,
struct binder_transaction_data *tr, int reply,
@@ -3042,6 +3106,32 @@ static void binder_transaction(struct binder_proc *proc,
binder_set_extended_error(&thread->ee, t_debug_id, BR_OK, 0);
binder_inner_proc_unlock(proc);
+ t = kzalloc(sizeof(*t), GFP_KERNEL);
+ if (!t) {
+ binder_txn_error("%d:%d cannot allocate transaction\n",
+ thread->pid, proc->pid);
+ return_error = BR_FAILED_REPLY;
+ return_error_param = -ENOMEM;
+ return_error_line = __LINE__;
+ goto err_alloc_t_failed;
+ }
+ INIT_LIST_HEAD(&t->fd_fixups);
+ binder_stats_created(BINDER_STAT_TRANSACTION);
+ spin_lock_init(&t->lock);
+ t->debug_id = t_debug_id;
+ t->start_time = t_start_time;
+ t->from_pid = proc->pid;
+ t->from_tid = thread->pid;
+ t->sender_euid = task_euid(proc->tsk);
+ t->code = tr->code;
+ t->flags = tr->flags;
+ t->priority = task_nice(current);
+ t->work.type = BINDER_WORK_TRANSACTION;
+ t->is_async = !reply && (tr->flags & TF_ONE_WAY);
+ t->is_reply = reply;
+ if (!reply && !(tr->flags & TF_ONE_WAY))
+ t->from = thread;
+
if (reply) {
binder_inner_proc_lock(proc);
in_reply_to = thread->transaction_stack;
@@ -3228,24 +3318,13 @@ static void binder_transaction(struct binder_proc *proc,
}
binder_inner_proc_unlock(proc);
}
+
+ t->to_proc = target_proc;
+ t->to_thread = target_thread;
if (target_thread)
e->to_thread = target_thread->pid;
e->to_proc = target_proc->pid;
- /* TODO: reuse incoming transaction for reply */
- t = kzalloc(sizeof(*t), GFP_KERNEL);
- if (t == NULL) {
- binder_txn_error("%d:%d cannot allocate transaction\n",
- thread->pid, proc->pid);
- return_error = BR_FAILED_REPLY;
- return_error_param = -ENOMEM;
- return_error_line = __LINE__;
- goto err_alloc_t_failed;
- }
- INIT_LIST_HEAD(&t->fd_fixups);
- binder_stats_created(BINDER_STAT_TRANSACTION);
- spin_lock_init(&t->lock);
-
tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
if (tcomplete == NULL) {
binder_txn_error("%d:%d cannot allocate work for transaction\n",
@@ -3257,9 +3336,6 @@ static void binder_transaction(struct binder_proc *proc,
}
binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE);
- t->debug_id = t_debug_id;
- t->start_time = t_start_time;
-
if (reply)
binder_debug(BINDER_DEBUG_TRANSACTION,
"%d:%d BC_REPLY %d -> %d:%d, data size %lld-%lld-%lld\n",
@@ -3275,19 +3351,6 @@ static void binder_transaction(struct binder_proc *proc,
(u64)tr->data_size, (u64)tr->offsets_size,
(u64)extra_buffers_size);
- if (!reply && !(tr->flags & TF_ONE_WAY))
- t->from = thread;
- else
- t->from = NULL;
- t->from_pid = proc->pid;
- t->from_tid = thread->pid;
- t->sender_euid = task_euid(proc->tsk);
- t->to_proc = target_proc;
- t->to_thread = target_thread;
- t->code = tr->code;
- t->flags = tr->flags;
- t->priority = task_nice(current);
-
if (target_node && target_node->txn_security_ctx) {
u32 secid;
size_t added_size;
@@ -3680,11 +3743,13 @@ static void binder_transaction(struct binder_proc *proc,
return_error_line = __LINE__;
goto err_copy_data_failed;
}
- if (t->buffer->oneway_spam_suspect)
+ if (t->buffer->oneway_spam_suspect) {
tcomplete->type = BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT;
- else
+ binder_netlink_report(proc, t, tr->data_size,
+ BR_ONEWAY_SPAM_SUSPECT);
+ } else {
tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE;
- t->work.type = BINDER_WORK_TRANSACTION;
+ }
if (reply) {
binder_enqueue_thread_work(thread, tcomplete);
@@ -3712,7 +3777,6 @@ static void binder_transaction(struct binder_proc *proc,
* the target replies (or there is an error).
*/
binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete);
- t->need_reply = 1;
t->from_parent = thread->transaction_stack;
thread->transaction_stack = t;
binder_inner_proc_unlock(proc);
@@ -3733,8 +3797,11 @@ static void binder_transaction(struct binder_proc *proc,
* process and is put in a pending queue, waiting for the target
* process to be unfrozen.
*/
- if (return_error == BR_TRANSACTION_PENDING_FROZEN)
+ if (return_error == BR_TRANSACTION_PENDING_FROZEN) {
tcomplete->type = BINDER_WORK_TRANSACTION_PENDING;
+ binder_netlink_report(proc, t, tr->data_size,
+ return_error);
+ }
binder_enqueue_thread_work(thread, tcomplete);
if (return_error &&
return_error != BR_TRANSACTION_PENDING_FROZEN)
@@ -3783,9 +3850,6 @@ err_get_secctx_failed:
err_alloc_tcomplete_failed:
if (trace_binder_txn_latency_free_enabled())
binder_txn_latency_free(t);
- kfree(t);
- binder_stats_deleted(BINDER_STAT_TRANSACTION);
-err_alloc_t_failed:
err_bad_todo_list:
err_bad_call_stack:
err_empty_call_stack:
@@ -3796,6 +3860,11 @@ err_invalid_target_handle:
binder_dec_node_tmpref(target_node);
}
+ binder_netlink_report(proc, t, tr->data_size, return_error);
+ kfree(t);
+ binder_stats_deleted(BINDER_STAT_TRANSACTION);
+err_alloc_t_failed:
+
binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
"%d:%d transaction %s to %d:%d failed %d/%d/%d, code %u size %lld-%lld line %d\n",
proc->pid, thread->pid, reply ? "reply" :
@@ -6324,13 +6393,13 @@ static void print_binder_transaction_ilocked(struct seq_file *m,
spin_lock(&t->lock);
to_proc = t->to_proc;
seq_printf(m,
- "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld r%d elapsed %lldms",
+ "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld a%d r%d elapsed %lldms",
prefix, t->debug_id, t,
t->from_pid,
t->from_tid,
to_proc ? to_proc->pid : 0,
t->to_thread ? t->to_thread->pid : 0,
- t->code, t->flags, t->priority, t->need_reply,
+ t->code, t->flags, t->priority, t->is_async, t->is_reply,
ktime_ms_delta(current_time, t->start_time));
spin_unlock(&t->lock);
@@ -7062,12 +7131,19 @@ static int __init binder_init(void)
}
}
- ret = init_binderfs();
+ ret = genl_register_family(&binder_nl_family);
if (ret)
goto err_init_binder_device_failed;
+ ret = init_binderfs();
+ if (ret)
+ goto err_init_binderfs_failed;
+
return ret;
+err_init_binderfs_failed:
+ genl_unregister_family(&binder_nl_family);
+
err_init_binder_device_failed:
hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) {
misc_deregister(&device->miscdev);
@@ -7088,5 +7164,3 @@ device_initcall(binder_init);
#define CREATE_TRACE_POINTS
#include "binder_trace.h"
-
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/android/binder/Makefile b/drivers/android/binder/Makefile
new file mode 100644
index 000000000000..09eabb527fa0
--- /dev/null
+++ b/drivers/android/binder/Makefile
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0-only
+ccflags-y += -I$(src) # needed for trace events
+
+obj-$(CONFIG_ANDROID_BINDER_IPC_RUST) += rust_binder.o
+rust_binder-y := \
+ rust_binder_main.o \
+ rust_binderfs.o \
+ rust_binder_events.o \
+ page_range_helper.o
diff --git a/drivers/android/binder/allocation.rs b/drivers/android/binder/allocation.rs
new file mode 100644
index 000000000000..7f65a9c3a0e5
--- /dev/null
+++ b/drivers/android/binder/allocation.rs
@@ -0,0 +1,602 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2025 Google LLC.
+
+use core::mem::{size_of, size_of_val, MaybeUninit};
+use core::ops::Range;
+
+use kernel::{
+ bindings,
+ fs::file::{File, FileDescriptorReservation},
+ prelude::*,
+ sync::{aref::ARef, Arc},
+ transmute::{AsBytes, FromBytes},
+ uaccess::UserSliceReader,
+ uapi,
+};
+
+use crate::{
+ deferred_close::DeferredFdCloser,
+ defs::*,
+ node::{Node, NodeRef},
+ process::Process,
+ DArc,
+};
+
+#[derive(Default)]
+pub(crate) struct AllocationInfo {
+ /// Range within the allocation where we can find the offsets to the object descriptors.
+ pub(crate) offsets: Option<Range<usize>>,
+ /// The target node of the transaction this allocation is associated to.
+ /// Not set for replies.
+ pub(crate) target_node: Option<NodeRef>,
+ /// When this allocation is dropped, call `pending_oneway_finished` on the node.
+ ///
+ /// This is used to serialize oneway transaction on the same node. Binder guarantees that
+ /// oneway transactions to the same node are delivered sequentially in the order they are sent.
+ pub(crate) oneway_node: Option<DArc<Node>>,
+ /// Zero the data in the buffer on free.
+ pub(crate) clear_on_free: bool,
+ /// List of files embedded in this transaction.
+ file_list: FileList,
+}
+
+/// Represents an allocation that the kernel is currently using.
+///
+/// When allocations are idle, the range allocator holds the data related to them.
+///
+/// # Invariants
+///
+/// This allocation corresponds to an allocation in the range allocator, so the relevant pages are
+/// marked in use in the page range.
+pub(crate) struct Allocation {
+ pub(crate) offset: usize,
+ size: usize,
+ pub(crate) ptr: usize,
+ pub(crate) process: Arc<Process>,
+ allocation_info: Option<AllocationInfo>,
+ free_on_drop: bool,
+ pub(crate) oneway_spam_detected: bool,
+ #[allow(dead_code)]
+ pub(crate) debug_id: usize,
+}
+
+impl Allocation {
+ pub(crate) fn new(
+ process: Arc<Process>,
+ debug_id: usize,
+ offset: usize,
+ size: usize,
+ ptr: usize,
+ oneway_spam_detected: bool,
+ ) -> Self {
+ Self {
+ process,
+ offset,
+ size,
+ ptr,
+ debug_id,
+ oneway_spam_detected,
+ allocation_info: None,
+ free_on_drop: true,
+ }
+ }
+
+ fn size_check(&self, offset: usize, size: usize) -> Result {
+ let overflow_fail = offset.checked_add(size).is_none();
+ let cmp_size_fail = offset.wrapping_add(size) > self.size;
+ if overflow_fail || cmp_size_fail {
+ return Err(EFAULT);
+ }
+ Ok(())
+ }
+
+ pub(crate) fn copy_into(
+ &self,
+ reader: &mut UserSliceReader,
+ offset: usize,
+ size: usize,
+ ) -> Result {
+ self.size_check(offset, size)?;
+
+ // SAFETY: While this object exists, the range allocator will keep the range allocated, and
+ // in turn, the pages will be marked as in use.
+ unsafe {
+ self.process
+ .pages
+ .copy_from_user_slice(reader, self.offset + offset, size)
+ }
+ }
+
+ pub(crate) fn read<T: FromBytes>(&self, offset: usize) -> Result<T> {
+ self.size_check(offset, size_of::<T>())?;
+
+ // SAFETY: While this object exists, the range allocator will keep the range allocated, and
+ // in turn, the pages will be marked as in use.
+ unsafe { self.process.pages.read(self.offset + offset) }
+ }
+
+ pub(crate) fn write<T: ?Sized>(&self, offset: usize, obj: &T) -> Result {
+ self.size_check(offset, size_of_val::<T>(obj))?;
+
+ // SAFETY: While this object exists, the range allocator will keep the range allocated, and
+ // in turn, the pages will be marked as in use.
+ unsafe { self.process.pages.write(self.offset + offset, obj) }
+ }
+
+ pub(crate) fn fill_zero(&self) -> Result {
+ // SAFETY: While this object exists, the range allocator will keep the range allocated, and
+ // in turn, the pages will be marked as in use.
+ unsafe { self.process.pages.fill_zero(self.offset, self.size) }
+ }
+
+ pub(crate) fn keep_alive(mut self) {
+ self.process
+ .buffer_make_freeable(self.offset, self.allocation_info.take());
+ self.free_on_drop = false;
+ }
+
+ pub(crate) fn set_info(&mut self, info: AllocationInfo) {
+ self.allocation_info = Some(info);
+ }
+
+ pub(crate) fn get_or_init_info(&mut self) -> &mut AllocationInfo {
+ self.allocation_info.get_or_insert_with(Default::default)
+ }
+
+ pub(crate) fn set_info_offsets(&mut self, offsets: Range<usize>) {
+ self.get_or_init_info().offsets = Some(offsets);
+ }
+
+ pub(crate) fn set_info_oneway_node(&mut self, oneway_node: DArc<Node>) {
+ self.get_or_init_info().oneway_node = Some(oneway_node);
+ }
+
+ pub(crate) fn set_info_clear_on_drop(&mut self) {
+ self.get_or_init_info().clear_on_free = true;
+ }
+
+ pub(crate) fn set_info_target_node(&mut self, target_node: NodeRef) {
+ self.get_or_init_info().target_node = Some(target_node);
+ }
+
+ /// Reserve enough space to push at least `num_fds` fds.
+ pub(crate) fn info_add_fd_reserve(&mut self, num_fds: usize) -> Result {
+ self.get_or_init_info()
+ .file_list
+ .files_to_translate
+ .reserve(num_fds, GFP_KERNEL)?;
+
+ Ok(())
+ }
+
+ pub(crate) fn info_add_fd(
+ &mut self,
+ file: ARef<File>,
+ buffer_offset: usize,
+ close_on_free: bool,
+ ) -> Result {
+ self.get_or_init_info().file_list.files_to_translate.push(
+ FileEntry {
+ file,
+ buffer_offset,
+ close_on_free,
+ },
+ GFP_KERNEL,
+ )?;
+
+ Ok(())
+ }
+
+ pub(crate) fn set_info_close_on_free(&mut self, cof: FdsCloseOnFree) {
+ self.get_or_init_info().file_list.close_on_free = cof.0;
+ }
+
+ pub(crate) fn translate_fds(&mut self) -> Result<TranslatedFds> {
+ let file_list = match self.allocation_info.as_mut() {
+ Some(info) => &mut info.file_list,
+ None => return Ok(TranslatedFds::new()),
+ };
+
+ let files = core::mem::take(&mut file_list.files_to_translate);
+
+ let num_close_on_free = files.iter().filter(|entry| entry.close_on_free).count();
+ let mut close_on_free = KVec::with_capacity(num_close_on_free, GFP_KERNEL)?;
+
+ let mut reservations = KVec::with_capacity(files.len(), GFP_KERNEL)?;
+ for file_info in files {
+ let res = FileDescriptorReservation::get_unused_fd_flags(bindings::O_CLOEXEC)?;
+ let fd = res.reserved_fd();
+ self.write::<u32>(file_info.buffer_offset, &fd)?;
+
+ reservations.push(
+ Reservation {
+ res,
+ file: file_info.file,
+ },
+ GFP_KERNEL,
+ )?;
+ if file_info.close_on_free {
+ close_on_free.push(fd, GFP_KERNEL)?;
+ }
+ }
+
+ Ok(TranslatedFds {
+ reservations,
+ close_on_free: FdsCloseOnFree(close_on_free),
+ })
+ }
+
+ /// Should the looper return to userspace when freeing this allocation?
+ pub(crate) fn looper_need_return_on_free(&self) -> bool {
+ // Closing fds involves pushing task_work for execution when we return to userspace. Hence,
+ // we should return to userspace asap if we are closing fds.
+ match self.allocation_info {
+ Some(ref info) => !info.file_list.close_on_free.is_empty(),
+ None => false,
+ }
+ }
+}
+
+impl Drop for Allocation {
+ fn drop(&mut self) {
+ if !self.free_on_drop {
+ return;
+ }
+
+ if let Some(mut info) = self.allocation_info.take() {
+ if let Some(oneway_node) = info.oneway_node.as_ref() {
+ oneway_node.pending_oneway_finished();
+ }
+
+ info.target_node = None;
+
+ if let Some(offsets) = info.offsets.clone() {
+ let view = AllocationView::new(self, offsets.start);
+ for i in offsets.step_by(size_of::<usize>()) {
+ if view.cleanup_object(i).is_err() {
+ pr_warn!("Error cleaning up object at offset {}\n", i)
+ }
+ }
+ }
+
+ for &fd in &info.file_list.close_on_free {
+ let closer = match DeferredFdCloser::new(GFP_KERNEL) {
+ Ok(closer) => closer,
+ Err(kernel::alloc::AllocError) => {
+ // Ignore allocation failures.
+ break;
+ }
+ };
+
+ // Here, we ignore errors. The operation can fail if the fd is not valid, or if the
+ // method is called from a kthread. However, this is always called from a syscall,
+ // so the latter case cannot happen, and we don't care about the first case.
+ let _ = closer.close_fd(fd);
+ }
+
+ if info.clear_on_free {
+ if let Err(e) = self.fill_zero() {
+ pr_warn!("Failed to clear data on free: {:?}", e);
+ }
+ }
+ }
+
+ self.process.buffer_raw_free(self.ptr);
+ }
+}
+
+/// A wrapper around `Allocation` that is being created.
+///
+/// If the allocation is destroyed while wrapped in this wrapper, then the allocation will be
+/// considered to be part of a failed transaction. Successful transactions avoid that by calling
+/// `success`, which skips the destructor.
+#[repr(transparent)]
+pub(crate) struct NewAllocation(pub(crate) Allocation);
+
+impl NewAllocation {
+ pub(crate) fn success(self) -> Allocation {
+ // This skips the destructor.
+ //
+ // SAFETY: This type is `#[repr(transparent)]`, so the layout matches.
+ unsafe { core::mem::transmute(self) }
+ }
+}
+
+impl core::ops::Deref for NewAllocation {
+ type Target = Allocation;
+ fn deref(&self) -> &Allocation {
+ &self.0
+ }
+}
+
+impl core::ops::DerefMut for NewAllocation {
+ fn deref_mut(&mut self) -> &mut Allocation {
+ &mut self.0
+ }
+}
+
+/// A view into the beginning of an allocation.
+///
+/// All attempts to read or write outside of the view will fail. To intentionally access outside of
+/// this view, use the `alloc` field of this struct directly.
+pub(crate) struct AllocationView<'a> {
+ pub(crate) alloc: &'a mut Allocation,
+ limit: usize,
+}
+
+impl<'a> AllocationView<'a> {
+ pub(crate) fn new(alloc: &'a mut Allocation, limit: usize) -> Self {
+ AllocationView { alloc, limit }
+ }
+
+ pub(crate) fn read<T: FromBytes>(&self, offset: usize) -> Result<T> {
+ if offset.checked_add(size_of::<T>()).ok_or(EINVAL)? > self.limit {
+ return Err(EINVAL);
+ }
+ self.alloc.read(offset)
+ }
+
+ pub(crate) fn write<T: AsBytes>(&self, offset: usize, obj: &T) -> Result {
+ if offset.checked_add(size_of::<T>()).ok_or(EINVAL)? > self.limit {
+ return Err(EINVAL);
+ }
+ self.alloc.write(offset, obj)
+ }
+
+ pub(crate) fn copy_into(
+ &self,
+ reader: &mut UserSliceReader,
+ offset: usize,
+ size: usize,
+ ) -> Result {
+ if offset.checked_add(size).ok_or(EINVAL)? > self.limit {
+ return Err(EINVAL);
+ }
+ self.alloc.copy_into(reader, offset, size)
+ }
+
+ pub(crate) fn transfer_binder_object(
+ &self,
+ offset: usize,
+ obj: &uapi::flat_binder_object,
+ strong: bool,
+ node_ref: NodeRef,
+ ) -> Result {
+ let mut newobj = FlatBinderObject::default();
+ let node = node_ref.node.clone();
+ if Arc::ptr_eq(&node_ref.node.owner, &self.alloc.process) {
+ // The receiving process is the owner of the node, so send it a binder object (instead
+ // of a handle).
+ let (ptr, cookie) = node.get_id();
+ newobj.hdr.type_ = if strong {
+ BINDER_TYPE_BINDER
+ } else {
+ BINDER_TYPE_WEAK_BINDER
+ };
+ newobj.flags = obj.flags;
+ newobj.__bindgen_anon_1.binder = ptr as _;
+ newobj.cookie = cookie as _;
+ self.write(offset, &newobj)?;
+ // Increment the user ref count on the node. It will be decremented as part of the
+ // destruction of the buffer, when we see a binder or weak-binder object.
+ node.update_refcount(true, 1, strong);
+ } else {
+ // The receiving process is different from the owner, so we need to insert a handle to
+ // the binder object.
+ let handle = self
+ .alloc
+ .process
+ .as_arc_borrow()
+ .insert_or_update_handle(node_ref, false)?;
+ newobj.hdr.type_ = if strong {
+ BINDER_TYPE_HANDLE
+ } else {
+ BINDER_TYPE_WEAK_HANDLE
+ };
+ newobj.flags = obj.flags;
+ newobj.__bindgen_anon_1.handle = handle;
+ if self.write(offset, &newobj).is_err() {
+ // Decrement ref count on the handle we just created.
+ let _ = self
+ .alloc
+ .process
+ .as_arc_borrow()
+ .update_ref(handle, false, strong);
+ return Err(EINVAL);
+ }
+ }
+
+ Ok(())
+ }
+
+ fn cleanup_object(&self, index_offset: usize) -> Result {
+ let offset = self.alloc.read(index_offset)?;
+ let header = self.read::<BinderObjectHeader>(offset)?;
+ match header.type_ {
+ BINDER_TYPE_WEAK_BINDER | BINDER_TYPE_BINDER => {
+ let obj = self.read::<FlatBinderObject>(offset)?;
+ let strong = header.type_ == BINDER_TYPE_BINDER;
+ // SAFETY: The type is `BINDER_TYPE_{WEAK_}BINDER`, so the `binder` field is
+ // populated.
+ let ptr = unsafe { obj.__bindgen_anon_1.binder };
+ let cookie = obj.cookie;
+ self.alloc.process.update_node(ptr, cookie, strong);
+ Ok(())
+ }
+ BINDER_TYPE_WEAK_HANDLE | BINDER_TYPE_HANDLE => {
+ let obj = self.read::<FlatBinderObject>(offset)?;
+ let strong = header.type_ == BINDER_TYPE_HANDLE;
+ // SAFETY: The type is `BINDER_TYPE_{WEAK_}HANDLE`, so the `handle` field is
+ // populated.
+ let handle = unsafe { obj.__bindgen_anon_1.handle };
+ self.alloc
+ .process
+ .as_arc_borrow()
+ .update_ref(handle, false, strong)
+ }
+ _ => Ok(()),
+ }
+ }
+}
+
+/// A binder object as it is serialized.
+///
+/// # Invariants
+///
+/// All bytes must be initialized, and the value of `self.hdr.type_` must be one of the allowed
+/// types.
+#[repr(C)]
+pub(crate) union BinderObject {
+ hdr: uapi::binder_object_header,
+ fbo: uapi::flat_binder_object,
+ fdo: uapi::binder_fd_object,
+ bbo: uapi::binder_buffer_object,
+ fdao: uapi::binder_fd_array_object,
+}
+
+/// A view into a `BinderObject` that can be used in a match statement.
+pub(crate) enum BinderObjectRef<'a> {
+ Binder(&'a mut uapi::flat_binder_object),
+ Handle(&'a mut uapi::flat_binder_object),
+ Fd(&'a mut uapi::binder_fd_object),
+ Ptr(&'a mut uapi::binder_buffer_object),
+ Fda(&'a mut uapi::binder_fd_array_object),
+}
+
+impl BinderObject {
+ pub(crate) fn read_from(reader: &mut UserSliceReader) -> Result<BinderObject> {
+ let object = Self::read_from_inner(|slice| {
+ let read_len = usize::min(slice.len(), reader.len());
+ reader.clone_reader().read_slice(&mut slice[..read_len])?;
+ Ok(())
+ })?;
+
+ // If we used a object type smaller than the largest object size, then we've read more
+ // bytes than we needed to. However, we used `.clone_reader()` to avoid advancing the
+ // original reader. Now, we call `skip` so that the caller's reader is advanced by the
+ // right amount.
+ //
+ // The `skip` call fails if the reader doesn't have `size` bytes available. This could
+ // happen if the type header corresponds to an object type that is larger than the rest of
+ // the reader.
+ //
+ // Any extra bytes beyond the size of the object are inaccessible after this call, so
+ // reading them again from the `reader` later does not result in TOCTOU bugs.
+ reader.skip(object.size())?;
+
+ Ok(object)
+ }
+
+ /// Use the provided reader closure to construct a `BinderObject`.
+ ///
+ /// The closure should write the bytes for the object into the provided slice.
+ pub(crate) fn read_from_inner<R>(reader: R) -> Result<BinderObject>
+ where
+ R: FnOnce(&mut [u8; size_of::<BinderObject>()]) -> Result<()>,
+ {
+ let mut obj = MaybeUninit::<BinderObject>::zeroed();
+
+ // SAFETY: The lengths of `BinderObject` and `[u8; size_of::<BinderObject>()]` are equal,
+ // and the byte array has an alignment requirement of one, so the pointer cast is okay.
+ // Additionally, `obj` was initialized to zeros, so the byte array will not be
+ // uninitialized.
+ (reader)(unsafe { &mut *obj.as_mut_ptr().cast() })?;
+
+ // SAFETY: The entire object is initialized, so accessing this field is safe.
+ let type_ = unsafe { obj.assume_init_ref().hdr.type_ };
+ if Self::type_to_size(type_).is_none() {
+ // The value of `obj.hdr_type_` was invalid.
+ return Err(EINVAL);
+ }
+
+ // SAFETY: All bytes are initialized (since we zeroed them at the start) and we checked
+ // that `self.hdr.type_` is one of the allowed types, so the type invariants are satisfied.
+ unsafe { Ok(obj.assume_init()) }
+ }
+
+ pub(crate) fn as_ref(&mut self) -> BinderObjectRef<'_> {
+ use BinderObjectRef::*;
+ // SAFETY: The constructor ensures that all bytes of `self` are initialized, and all
+ // variants of this union accept all initialized bit patterns.
+ unsafe {
+ match self.hdr.type_ {
+ BINDER_TYPE_WEAK_BINDER | BINDER_TYPE_BINDER => Binder(&mut self.fbo),
+ BINDER_TYPE_WEAK_HANDLE | BINDER_TYPE_HANDLE => Handle(&mut self.fbo),
+ BINDER_TYPE_FD => Fd(&mut self.fdo),
+ BINDER_TYPE_PTR => Ptr(&mut self.bbo),
+ BINDER_TYPE_FDA => Fda(&mut self.fdao),
+ // SAFETY: By the type invariant, the value of `self.hdr.type_` cannot have any
+ // other value than the ones checked above.
+ _ => core::hint::unreachable_unchecked(),
+ }
+ }
+ }
+
+ pub(crate) fn size(&self) -> usize {
+ // SAFETY: The entire object is initialized, so accessing this field is safe.
+ let type_ = unsafe { self.hdr.type_ };
+
+ // SAFETY: The type invariants guarantee that the type field is correct.
+ unsafe { Self::type_to_size(type_).unwrap_unchecked() }
+ }
+
+ fn type_to_size(type_: u32) -> Option<usize> {
+ match type_ {
+ BINDER_TYPE_WEAK_BINDER => Some(size_of::<uapi::flat_binder_object>()),
+ BINDER_TYPE_BINDER => Some(size_of::<uapi::flat_binder_object>()),
+ BINDER_TYPE_WEAK_HANDLE => Some(size_of::<uapi::flat_binder_object>()),
+ BINDER_TYPE_HANDLE => Some(size_of::<uapi::flat_binder_object>()),
+ BINDER_TYPE_FD => Some(size_of::<uapi::binder_fd_object>()),
+ BINDER_TYPE_PTR => Some(size_of::<uapi::binder_buffer_object>()),
+ BINDER_TYPE_FDA => Some(size_of::<uapi::binder_fd_array_object>()),
+ _ => None,
+ }
+ }
+}
+
+#[derive(Default)]
+struct FileList {
+ files_to_translate: KVec<FileEntry>,
+ close_on_free: KVec<u32>,
+}
+
+struct FileEntry {
+ /// The file for which a descriptor will be created in the recipient process.
+ file: ARef<File>,
+ /// The offset in the buffer where the file descriptor is stored.
+ buffer_offset: usize,
+ /// Whether this fd should be closed when the allocation is freed.
+ close_on_free: bool,
+}
+
+pub(crate) struct TranslatedFds {
+ reservations: KVec<Reservation>,
+ /// If commit is called, then these fds should be closed. (If commit is not called, then they
+ /// shouldn't be closed.)
+ close_on_free: FdsCloseOnFree,
+}
+
+struct Reservation {
+ res: FileDescriptorReservation,
+ file: ARef<File>,
+}
+
+impl TranslatedFds {
+ pub(crate) fn new() -> Self {
+ Self {
+ reservations: KVec::new(),
+ close_on_free: FdsCloseOnFree(KVec::new()),
+ }
+ }
+
+ pub(crate) fn commit(self) -> FdsCloseOnFree {
+ for entry in self.reservations {
+ entry.res.fd_install(entry.file);
+ }
+
+ self.close_on_free
+ }
+}
+
+pub(crate) struct FdsCloseOnFree(KVec<u32>);
diff --git a/drivers/android/binder/context.rs b/drivers/android/binder/context.rs
new file mode 100644
index 000000000000..3d135ec03ca7
--- /dev/null
+++ b/drivers/android/binder/context.rs
@@ -0,0 +1,180 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2025 Google LLC.
+
+use kernel::{
+ error::Error,
+ list::{List, ListArc, ListLinks},
+ prelude::*,
+ security,
+ str::{CStr, CString},
+ sync::{Arc, Mutex},
+ task::Kuid,
+};
+
+use crate::{error::BinderError, node::NodeRef, process::Process};
+
+kernel::sync::global_lock! {
+ // SAFETY: We call `init` in the module initializer, so it's initialized before first use.
+ pub(crate) unsafe(uninit) static CONTEXTS: Mutex<ContextList> = ContextList {
+ list: List::new(),
+ };
+}
+
+pub(crate) struct ContextList {
+ list: List<Context>,
+}
+
+pub(crate) fn get_all_contexts() -> Result<KVec<Arc<Context>>> {
+ let lock = CONTEXTS.lock();
+
+ let count = lock.list.iter().count();
+
+ let mut ctxs = KVec::with_capacity(count, GFP_KERNEL)?;
+ for ctx in &lock.list {
+ ctxs.push(Arc::from(ctx), GFP_KERNEL)?;
+ }
+ Ok(ctxs)
+}
+
+/// This struct keeps track of the processes using this context, and which process is the context
+/// manager.
+struct Manager {
+ node: Option<NodeRef>,
+ uid: Option<Kuid>,
+ all_procs: List<Process>,
+}
+
+/// There is one context per binder file (/dev/binder, /dev/hwbinder, etc)
+#[pin_data]
+pub(crate) struct Context {
+ #[pin]
+ manager: Mutex<Manager>,
+ pub(crate) name: CString,
+ #[pin]
+ links: ListLinks,
+}
+
+kernel::list::impl_list_arc_safe! {
+ impl ListArcSafe<0> for Context { untracked; }
+}
+kernel::list::impl_list_item! {
+ impl ListItem<0> for Context {
+ using ListLinks { self.links };
+ }
+}
+
+impl Context {
+ pub(crate) fn new(name: &CStr) -> Result<Arc<Self>> {
+ let name = CString::try_from(name)?;
+ let list_ctx = ListArc::pin_init::<Error>(
+ try_pin_init!(Context {
+ name,
+ links <- ListLinks::new(),
+ manager <- kernel::new_mutex!(Manager {
+ all_procs: List::new(),
+ node: None,
+ uid: None,
+ }, "Context::manager"),
+ }),
+ GFP_KERNEL,
+ )?;
+
+ let ctx = list_ctx.clone_arc();
+ CONTEXTS.lock().list.push_back(list_ctx);
+
+ Ok(ctx)
+ }
+
+ /// Called when the file for this context is unlinked.
+ ///
+ /// No-op if called twice.
+ pub(crate) fn deregister(&self) {
+ // SAFETY: We never add the context to any other linked list than this one, so it is either
+ // in this list, or not in any list.
+ unsafe { CONTEXTS.lock().list.remove(self) };
+ }
+
+ pub(crate) fn register_process(self: &Arc<Self>, proc: ListArc<Process>) {
+ if !Arc::ptr_eq(self, &proc.ctx) {
+ pr_err!("Context::register_process called on the wrong context.");
+ return;
+ }
+ self.manager.lock().all_procs.push_back(proc);
+ }
+
+ pub(crate) fn deregister_process(self: &Arc<Self>, proc: &Process) {
+ if !Arc::ptr_eq(self, &proc.ctx) {
+ pr_err!("Context::deregister_process called on the wrong context.");
+ return;
+ }
+ // SAFETY: We just checked that this is the right list.
+ unsafe { self.manager.lock().all_procs.remove(proc) };
+ }
+
+ pub(crate) fn set_manager_node(&self, node_ref: NodeRef) -> Result {
+ let mut manager = self.manager.lock();
+ if manager.node.is_some() {
+ pr_warn!("BINDER_SET_CONTEXT_MGR already set");
+ return Err(EBUSY);
+ }
+ security::binder_set_context_mgr(&node_ref.node.owner.cred)?;
+
+ // If the context manager has been set before, ensure that we use the same euid.
+ let caller_uid = Kuid::current_euid();
+ if let Some(ref uid) = manager.uid {
+ if *uid != caller_uid {
+ return Err(EPERM);
+ }
+ }
+
+ manager.node = Some(node_ref);
+ manager.uid = Some(caller_uid);
+ Ok(())
+ }
+
+ pub(crate) fn unset_manager_node(&self) {
+ let node_ref = self.manager.lock().node.take();
+ drop(node_ref);
+ }
+
+ pub(crate) fn get_manager_node(&self, strong: bool) -> Result<NodeRef, BinderError> {
+ self.manager
+ .lock()
+ .node
+ .as_ref()
+ .ok_or_else(BinderError::new_dead)?
+ .clone(strong)
+ .map_err(BinderError::from)
+ }
+
+ pub(crate) fn for_each_proc<F>(&self, mut func: F)
+ where
+ F: FnMut(&Process),
+ {
+ let lock = self.manager.lock();
+ for proc in &lock.all_procs {
+ func(&proc);
+ }
+ }
+
+ pub(crate) fn get_all_procs(&self) -> Result<KVec<Arc<Process>>> {
+ let lock = self.manager.lock();
+ let count = lock.all_procs.iter().count();
+
+ let mut procs = KVec::with_capacity(count, GFP_KERNEL)?;
+ for proc in &lock.all_procs {
+ procs.push(Arc::from(proc), GFP_KERNEL)?;
+ }
+ Ok(procs)
+ }
+
+ pub(crate) fn get_procs_with_pid(&self, pid: i32) -> Result<KVec<Arc<Process>>> {
+ let orig = self.get_all_procs()?;
+ let mut backing = KVec::with_capacity(orig.len(), GFP_KERNEL)?;
+ for proc in orig.into_iter().filter(|proc| proc.task.pid() == pid) {
+ backing.push(proc, GFP_KERNEL)?;
+ }
+ Ok(backing)
+ }
+}
diff --git a/drivers/android/binder/deferred_close.rs b/drivers/android/binder/deferred_close.rs
new file mode 100644
index 000000000000..ac895c04d0cb
--- /dev/null
+++ b/drivers/android/binder/deferred_close.rs
@@ -0,0 +1,204 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2025 Google LLC.
+
+//! Logic for closing files in a deferred manner.
+//!
+//! This file could make sense to have in `kernel::fs`, but it was rejected for being too
+//! Binder-specific.
+
+use core::mem::MaybeUninit;
+use kernel::{
+ alloc::{AllocError, Flags},
+ bindings,
+ prelude::*,
+};
+
+/// Helper used for closing file descriptors in a way that is safe even if the file is currently
+/// held using `fdget`.
+///
+/// Additional motivation can be found in commit 80cd795630d6 ("binder: fix use-after-free due to
+/// ksys_close() during fdget()") and in the comments on `binder_do_fd_close`.
+pub(crate) struct DeferredFdCloser {
+ inner: KBox<DeferredFdCloserInner>,
+}
+
+/// SAFETY: This just holds an allocation with no real content, so there's no safety issue with
+/// moving it across threads.
+unsafe impl Send for DeferredFdCloser {}
+/// SAFETY: This just holds an allocation with no real content, so there's no safety issue with
+/// moving it across threads.
+unsafe impl Sync for DeferredFdCloser {}
+
+/// # Invariants
+///
+/// If the `file` pointer is non-null, then it points at a `struct file` and owns a refcount to
+/// that file.
+#[repr(C)]
+struct DeferredFdCloserInner {
+ twork: MaybeUninit<bindings::callback_head>,
+ file: *mut bindings::file,
+}
+
+impl DeferredFdCloser {
+ /// Create a new [`DeferredFdCloser`].
+ pub(crate) fn new(flags: Flags) -> Result<Self, AllocError> {
+ Ok(Self {
+ // INVARIANT: The `file` pointer is null, so the type invariant does not apply.
+ inner: KBox::new(
+ DeferredFdCloserInner {
+ twork: MaybeUninit::uninit(),
+ file: core::ptr::null_mut(),
+ },
+ flags,
+ )?,
+ })
+ }
+
+ /// Schedule a task work that closes the file descriptor when this task returns to userspace.
+ ///
+ /// Fails if this is called from a context where we cannot run work when returning to
+ /// userspace. (E.g., from a kthread.)
+ pub(crate) fn close_fd(self, fd: u32) -> Result<(), DeferredFdCloseError> {
+ use bindings::task_work_notify_mode_TWA_RESUME as TWA_RESUME;
+
+ // In this method, we schedule the task work before closing the file. This is because
+ // scheduling a task work is fallible, and we need to know whether it will fail before we
+ // attempt to close the file.
+
+ // Task works are not available on kthreads.
+ let current = kernel::current!();
+
+ // Check if this is a kthread.
+ // SAFETY: Reading `flags` from a task is always okay.
+ if unsafe { ((*current.as_ptr()).flags & bindings::PF_KTHREAD) != 0 } {
+ return Err(DeferredFdCloseError::TaskWorkUnavailable);
+ }
+
+ // Transfer ownership of the box's allocation to a raw pointer. This disables the
+ // destructor, so we must manually convert it back to a KBox to drop it.
+ //
+ // Until we convert it back to a `KBox`, there are no aliasing requirements on this
+ // pointer.
+ let inner = KBox::into_raw(self.inner);
+
+ // The `callback_head` field is first in the struct, so this cast correctly gives us a
+ // pointer to the field.
+ let callback_head = inner.cast::<bindings::callback_head>();
+ // SAFETY: This pointer offset operation does not go out-of-bounds.
+ let file_field = unsafe { core::ptr::addr_of_mut!((*inner).file) };
+
+ let current = current.as_ptr();
+
+ // SAFETY: This function currently has exclusive access to the `DeferredFdCloserInner`, so
+ // it is okay for us to perform unsynchronized writes to its `callback_head` field.
+ unsafe { bindings::init_task_work(callback_head, Some(Self::do_close_fd)) };
+
+ // SAFETY: This inserts the `DeferredFdCloserInner` into the task workqueue for the current
+ // task. If this operation is successful, then this transfers exclusive ownership of the
+ // `callback_head` field to the C side until it calls `do_close_fd`, and we don't touch or
+ // invalidate the field during that time.
+ //
+ // When the C side calls `do_close_fd`, the safety requirements of that method are
+ // satisfied because when a task work is executed, the callback is given ownership of the
+ // pointer.
+ //
+ // The file pointer is currently null. If it is changed to be non-null before `do_close_fd`
+ // is called, then that change happens due to the write at the end of this function, and
+ // that write has a safety comment that explains why the refcount can be dropped when
+ // `do_close_fd` runs.
+ let res = unsafe { bindings::task_work_add(current, callback_head, TWA_RESUME) };
+
+ if res != 0 {
+ // SAFETY: Scheduling the task work failed, so we still have ownership of the box, so
+ // we may destroy it.
+ unsafe { drop(KBox::from_raw(inner)) };
+
+ return Err(DeferredFdCloseError::TaskWorkUnavailable);
+ }
+
+ // This removes the fd from the fd table in `current`. The file is not fully closed until
+ // `filp_close` is called. We are given ownership of one refcount to the file.
+ //
+ // SAFETY: This is safe no matter what `fd` is. If the `fd` is valid (that is, if the
+ // pointer is non-null), then we call `filp_close` on the returned pointer as required by
+ // `file_close_fd`.
+ let file = unsafe { bindings::file_close_fd(fd) };
+ if file.is_null() {
+ // We don't clean up the task work since that might be expensive if the task work queue
+ // is long. Just let it execute and let it clean up for itself.
+ return Err(DeferredFdCloseError::BadFd);
+ }
+
+ // Acquire a second refcount to the file.
+ //
+ // SAFETY: The `file` pointer points at a file with a non-zero refcount.
+ unsafe { bindings::get_file(file) };
+
+ // This method closes the fd, consuming one of our two refcounts. There could be active
+ // light refcounts created from that fd, so we must ensure that the file has a positive
+ // refcount for the duration of those active light refcounts. We do that by holding on to
+ // the second refcount until the current task returns to userspace.
+ //
+ // SAFETY: The `file` pointer is valid. Passing `current->files` as the file table to close
+ // it in is correct, since we just got the `fd` from `file_close_fd` which also uses
+ // `current->files`.
+ //
+ // Note: fl_owner_t is currently a void pointer.
+ unsafe { bindings::filp_close(file, (*current).files as bindings::fl_owner_t) };
+
+ // We update the file pointer that the task work is supposed to fput. This transfers
+ // ownership of our last refcount.
+ //
+ // INVARIANT: This changes the `file` field of a `DeferredFdCloserInner` from null to
+ // non-null. This doesn't break the type invariant for `DeferredFdCloserInner` because we
+ // still own a refcount to the file, so we can pass ownership of that refcount to the
+ // `DeferredFdCloserInner`.
+ //
+ // When `do_close_fd` runs, it must be safe for it to `fput` the refcount. However, this is
+ // the case because all light refcounts that are associated with the fd we closed
+ // previously must be dropped when `do_close_fd`, since light refcounts must be dropped
+ // before returning to userspace.
+ //
+ // SAFETY: Task works are executed on the current thread right before we return to
+ // userspace, so this write is guaranteed to happen before `do_close_fd` is called, which
+ // means that a race is not possible here.
+ unsafe { *file_field = file };
+
+ Ok(())
+ }
+
+ /// # Safety
+ ///
+ /// The provided pointer must point at the `twork` field of a `DeferredFdCloserInner` stored in
+ /// a `KBox`, and the caller must pass exclusive ownership of that `KBox`. Furthermore, if the
+ /// file pointer is non-null, then it must be okay to release the refcount by calling `fput`.
+ unsafe extern "C" fn do_close_fd(inner: *mut bindings::callback_head) {
+ // SAFETY: The caller just passed us ownership of this box.
+ let inner = unsafe { KBox::from_raw(inner.cast::<DeferredFdCloserInner>()) };
+ if !inner.file.is_null() {
+ // SAFETY: By the type invariants, we own a refcount to this file, and the caller
+ // guarantees that dropping the refcount now is okay.
+ unsafe { bindings::fput(inner.file) };
+ }
+ // The allocation is freed when `inner` goes out of scope.
+ }
+}
+
+/// Represents a failure to close an fd in a deferred manner.
+#[derive(Copy, Clone, Debug, Eq, PartialEq)]
+pub(crate) enum DeferredFdCloseError {
+ /// Closing the fd failed because we were unable to schedule a task work.
+ TaskWorkUnavailable,
+ /// Closing the fd failed because the fd does not exist.
+ BadFd,
+}
+
+impl From<DeferredFdCloseError> for Error {
+ fn from(err: DeferredFdCloseError) -> Error {
+ match err {
+ DeferredFdCloseError::TaskWorkUnavailable => ESRCH,
+ DeferredFdCloseError::BadFd => EBADF,
+ }
+ }
+}
diff --git a/drivers/android/binder/defs.rs b/drivers/android/binder/defs.rs
new file mode 100644
index 000000000000..33f51b4139c7
--- /dev/null
+++ b/drivers/android/binder/defs.rs
@@ -0,0 +1,182 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2025 Google LLC.
+
+use core::mem::MaybeUninit;
+use core::ops::{Deref, DerefMut};
+use kernel::{
+ transmute::{AsBytes, FromBytes},
+ uapi::{self, *},
+};
+
+macro_rules! pub_no_prefix {
+ ($prefix:ident, $($newname:ident),+ $(,)?) => {
+ $(pub(crate) const $newname: u32 = kernel::macros::concat_idents!($prefix, $newname);)+
+ };
+}
+
+pub_no_prefix!(
+ binder_driver_return_protocol_,
+ BR_TRANSACTION,
+ BR_TRANSACTION_SEC_CTX,
+ BR_REPLY,
+ BR_DEAD_REPLY,
+ BR_FAILED_REPLY,
+ BR_FROZEN_REPLY,
+ BR_NOOP,
+ BR_SPAWN_LOOPER,
+ BR_TRANSACTION_COMPLETE,
+ BR_TRANSACTION_PENDING_FROZEN,
+ BR_ONEWAY_SPAM_SUSPECT,
+ BR_OK,
+ BR_ERROR,
+ BR_INCREFS,
+ BR_ACQUIRE,
+ BR_RELEASE,
+ BR_DECREFS,
+ BR_DEAD_BINDER,
+ BR_CLEAR_DEATH_NOTIFICATION_DONE,
+ BR_FROZEN_BINDER,
+ BR_CLEAR_FREEZE_NOTIFICATION_DONE,
+);
+
+pub_no_prefix!(
+ binder_driver_command_protocol_,
+ BC_TRANSACTION,
+ BC_TRANSACTION_SG,
+ BC_REPLY,
+ BC_REPLY_SG,
+ BC_FREE_BUFFER,
+ BC_ENTER_LOOPER,
+ BC_EXIT_LOOPER,
+ BC_REGISTER_LOOPER,
+ BC_INCREFS,
+ BC_ACQUIRE,
+ BC_RELEASE,
+ BC_DECREFS,
+ BC_INCREFS_DONE,
+ BC_ACQUIRE_DONE,
+ BC_REQUEST_DEATH_NOTIFICATION,
+ BC_CLEAR_DEATH_NOTIFICATION,
+ BC_DEAD_BINDER_DONE,
+ BC_REQUEST_FREEZE_NOTIFICATION,
+ BC_CLEAR_FREEZE_NOTIFICATION,
+ BC_FREEZE_NOTIFICATION_DONE,
+);
+
+pub_no_prefix!(
+ flat_binder_object_flags_,
+ FLAT_BINDER_FLAG_ACCEPTS_FDS,
+ FLAT_BINDER_FLAG_TXN_SECURITY_CTX
+);
+
+pub_no_prefix!(
+ transaction_flags_,
+ TF_ONE_WAY,
+ TF_ACCEPT_FDS,
+ TF_CLEAR_BUF,
+ TF_UPDATE_TXN
+);
+
+pub(crate) use uapi::{
+ BINDER_TYPE_BINDER, BINDER_TYPE_FD, BINDER_TYPE_FDA, BINDER_TYPE_HANDLE, BINDER_TYPE_PTR,
+ BINDER_TYPE_WEAK_BINDER, BINDER_TYPE_WEAK_HANDLE,
+};
+
+macro_rules! decl_wrapper {
+ ($newname:ident, $wrapped:ty) => {
+ // Define a wrapper around the C type. Use `MaybeUninit` to enforce that the value of
+ // padding bytes must be preserved.
+ #[derive(Copy, Clone)]
+ #[repr(transparent)]
+ pub(crate) struct $newname(MaybeUninit<$wrapped>);
+
+ // SAFETY: This macro is only used with types where this is ok.
+ unsafe impl FromBytes for $newname {}
+ // SAFETY: This macro is only used with types where this is ok.
+ unsafe impl AsBytes for $newname {}
+
+ impl Deref for $newname {
+ type Target = $wrapped;
+ fn deref(&self) -> &Self::Target {
+ // SAFETY: We use `MaybeUninit` only to preserve padding. The value must still
+ // always be valid.
+ unsafe { self.0.assume_init_ref() }
+ }
+ }
+
+ impl DerefMut for $newname {
+ fn deref_mut(&mut self) -> &mut Self::Target {
+ // SAFETY: We use `MaybeUninit` only to preserve padding. The value must still
+ // always be valid.
+ unsafe { self.0.assume_init_mut() }
+ }
+ }
+
+ impl Default for $newname {
+ fn default() -> Self {
+ // Create a new value of this type where all bytes (including padding) are zeroed.
+ Self(MaybeUninit::zeroed())
+ }
+ }
+ };
+}
+
+decl_wrapper!(BinderNodeDebugInfo, uapi::binder_node_debug_info);
+decl_wrapper!(BinderNodeInfoForRef, uapi::binder_node_info_for_ref);
+decl_wrapper!(FlatBinderObject, uapi::flat_binder_object);
+decl_wrapper!(BinderFdObject, uapi::binder_fd_object);
+decl_wrapper!(BinderFdArrayObject, uapi::binder_fd_array_object);
+decl_wrapper!(BinderObjectHeader, uapi::binder_object_header);
+decl_wrapper!(BinderBufferObject, uapi::binder_buffer_object);
+decl_wrapper!(BinderTransactionData, uapi::binder_transaction_data);
+decl_wrapper!(
+ BinderTransactionDataSecctx,
+ uapi::binder_transaction_data_secctx
+);
+decl_wrapper!(BinderTransactionDataSg, uapi::binder_transaction_data_sg);
+decl_wrapper!(BinderWriteRead, uapi::binder_write_read);
+decl_wrapper!(BinderVersion, uapi::binder_version);
+decl_wrapper!(BinderFrozenStatusInfo, uapi::binder_frozen_status_info);
+decl_wrapper!(BinderFreezeInfo, uapi::binder_freeze_info);
+decl_wrapper!(BinderFrozenStateInfo, uapi::binder_frozen_state_info);
+decl_wrapper!(BinderHandleCookie, uapi::binder_handle_cookie);
+decl_wrapper!(ExtendedError, uapi::binder_extended_error);
+
+impl BinderVersion {
+ pub(crate) fn current() -> Self {
+ Self(MaybeUninit::new(uapi::binder_version {
+ protocol_version: BINDER_CURRENT_PROTOCOL_VERSION as _,
+ }))
+ }
+}
+
+impl BinderTransactionData {
+ pub(crate) fn with_buffers_size(self, buffers_size: u64) -> BinderTransactionDataSg {
+ BinderTransactionDataSg(MaybeUninit::new(uapi::binder_transaction_data_sg {
+ transaction_data: *self,
+ buffers_size,
+ }))
+ }
+}
+
+impl BinderTransactionDataSecctx {
+ /// View the inner data as wrapped in `BinderTransactionData`.
+ pub(crate) fn tr_data(&mut self) -> &mut BinderTransactionData {
+ // SAFETY: Transparent wrapper is safe to transmute.
+ unsafe {
+ &mut *(&mut self.transaction_data as *mut uapi::binder_transaction_data
+ as *mut BinderTransactionData)
+ }
+ }
+}
+
+impl ExtendedError {
+ pub(crate) fn new(id: u32, command: u32, param: i32) -> Self {
+ Self(MaybeUninit::new(uapi::binder_extended_error {
+ id,
+ command,
+ param,
+ }))
+ }
+}
diff --git a/drivers/android/binder/error.rs b/drivers/android/binder/error.rs
new file mode 100644
index 000000000000..9921827267d0
--- /dev/null
+++ b/drivers/android/binder/error.rs
@@ -0,0 +1,99 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2025 Google LLC.
+
+use kernel::prelude::*;
+
+use crate::defs::*;
+
+pub(crate) type BinderResult<T = ()> = core::result::Result<T, BinderError>;
+
+/// An error that will be returned to userspace via the `BINDER_WRITE_READ` ioctl rather than via
+/// errno.
+pub(crate) struct BinderError {
+ pub(crate) reply: u32,
+ source: Option<Error>,
+}
+
+impl BinderError {
+ pub(crate) fn new_dead() -> Self {
+ Self {
+ reply: BR_DEAD_REPLY,
+ source: None,
+ }
+ }
+
+ pub(crate) fn new_frozen() -> Self {
+ Self {
+ reply: BR_FROZEN_REPLY,
+ source: None,
+ }
+ }
+
+ pub(crate) fn new_frozen_oneway() -> Self {
+ Self {
+ reply: BR_TRANSACTION_PENDING_FROZEN,
+ source: None,
+ }
+ }
+
+ pub(crate) fn is_dead(&self) -> bool {
+ self.reply == BR_DEAD_REPLY
+ }
+
+ pub(crate) fn as_errno(&self) -> kernel::ffi::c_int {
+ self.source.unwrap_or(EINVAL).to_errno()
+ }
+
+ pub(crate) fn should_pr_warn(&self) -> bool {
+ self.source.is_some()
+ }
+}
+
+/// Convert an errno into a `BinderError` and store the errno used to construct it. The errno
+/// should be stored as the thread's extended error when given to userspace.
+impl From<Error> for BinderError {
+ fn from(source: Error) -> Self {
+ Self {
+ reply: BR_FAILED_REPLY,
+ source: Some(source),
+ }
+ }
+}
+
+impl From<kernel::fs::file::BadFdError> for BinderError {
+ fn from(source: kernel::fs::file::BadFdError) -> Self {
+ BinderError::from(Error::from(source))
+ }
+}
+
+impl From<kernel::alloc::AllocError> for BinderError {
+ fn from(_: kernel::alloc::AllocError) -> Self {
+ Self {
+ reply: BR_FAILED_REPLY,
+ source: Some(ENOMEM),
+ }
+ }
+}
+
+impl core::fmt::Debug for BinderError {
+ fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
+ match self.reply {
+ BR_FAILED_REPLY => match self.source.as_ref() {
+ Some(source) => f
+ .debug_struct("BR_FAILED_REPLY")
+ .field("source", source)
+ .finish(),
+ None => f.pad("BR_FAILED_REPLY"),
+ },
+ BR_DEAD_REPLY => f.pad("BR_DEAD_REPLY"),
+ BR_FROZEN_REPLY => f.pad("BR_FROZEN_REPLY"),
+ BR_TRANSACTION_PENDING_FROZEN => f.pad("BR_TRANSACTION_PENDING_FROZEN"),
+ BR_TRANSACTION_COMPLETE => f.pad("BR_TRANSACTION_COMPLETE"),
+ _ => f
+ .debug_struct("BinderError")
+ .field("reply", &self.reply)
+ .finish(),
+ }
+ }
+}
diff --git a/drivers/android/binder/freeze.rs b/drivers/android/binder/freeze.rs
new file mode 100644
index 000000000000..e68c3c8bc55a
--- /dev/null
+++ b/drivers/android/binder/freeze.rs
@@ -0,0 +1,388 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2025 Google LLC.
+
+use kernel::{
+ alloc::AllocError,
+ list::ListArc,
+ prelude::*,
+ rbtree::{self, RBTreeNodeReservation},
+ seq_file::SeqFile,
+ seq_print,
+ sync::{Arc, UniqueArc},
+ uaccess::UserSliceReader,
+};
+
+use crate::{
+ defs::*, node::Node, process::Process, thread::Thread, BinderReturnWriter, DArc, DLArc,
+ DTRWrap, DeliverToRead,
+};
+
+#[derive(Clone, Copy, Eq, PartialEq, Ord, PartialOrd)]
+pub(crate) struct FreezeCookie(u64);
+
+/// Represents a listener for changes to the frozen state of a process.
+pub(crate) struct FreezeListener {
+ /// The node we are listening for.
+ pub(crate) node: DArc<Node>,
+ /// The cookie of this freeze listener.
+ cookie: FreezeCookie,
+ /// What value of `is_frozen` did we most recently tell userspace about?
+ last_is_frozen: Option<bool>,
+ /// We sent a `BR_FROZEN_BINDER` and we are waiting for `BC_FREEZE_NOTIFICATION_DONE` before
+ /// sending any other commands.
+ is_pending: bool,
+ /// Userspace sent `BC_CLEAR_FREEZE_NOTIFICATION` and we need to reply with
+ /// `BR_CLEAR_FREEZE_NOTIFICATION_DONE` as soon as possible. If `is_pending` is set, then we
+ /// must wait for it to be unset before we can reply.
+ is_clearing: bool,
+ /// Number of cleared duplicates that can't be deleted until userspace sends
+ /// `BC_FREEZE_NOTIFICATION_DONE`.
+ num_pending_duplicates: u64,
+ /// Number of cleared duplicates that can be deleted.
+ num_cleared_duplicates: u64,
+}
+
+impl FreezeListener {
+ /// Is it okay to create a new listener with the same cookie as this one for the provided node?
+ ///
+ /// Under some scenarios, userspace may delete a freeze listener and immediately recreate it
+ /// with the same cookie. This results in duplicate listeners. To avoid issues with ambiguity,
+ /// we allow this only if the new listener is for the same node, and we also require that the
+ /// old listener has already been cleared.
+ fn allow_duplicate(&self, node: &DArc<Node>) -> bool {
+ Arc::ptr_eq(&self.node, node) && self.is_clearing
+ }
+}
+
+type UninitFM = UniqueArc<core::mem::MaybeUninit<DTRWrap<FreezeMessage>>>;
+
+/// Represents a notification that the freeze state has changed.
+pub(crate) struct FreezeMessage {
+ cookie: FreezeCookie,
+}
+
+kernel::list::impl_list_arc_safe! {
+ impl ListArcSafe<0> for FreezeMessage {
+ untracked;
+ }
+}
+
+impl FreezeMessage {
+ fn new(flags: kernel::alloc::Flags) -> Result<UninitFM, AllocError> {
+ UniqueArc::new_uninit(flags)
+ }
+
+ fn init(ua: UninitFM, cookie: FreezeCookie) -> DLArc<FreezeMessage> {
+ match ua.pin_init_with(DTRWrap::new(FreezeMessage { cookie })) {
+ Ok(msg) => ListArc::from(msg),
+ Err(err) => match err {},
+ }
+ }
+}
+
+impl DeliverToRead for FreezeMessage {
+ fn do_work(
+ self: DArc<Self>,
+ thread: &Thread,
+ writer: &mut BinderReturnWriter<'_>,
+ ) -> Result<bool> {
+ let _removed_listener;
+ let mut node_refs = thread.process.node_refs.lock();
+ let Some(mut freeze_entry) = node_refs.freeze_listeners.find_mut(&self.cookie) else {
+ return Ok(true);
+ };
+ let freeze = freeze_entry.get_mut();
+
+ if freeze.num_cleared_duplicates > 0 {
+ freeze.num_cleared_duplicates -= 1;
+ drop(node_refs);
+ writer.write_code(BR_CLEAR_FREEZE_NOTIFICATION_DONE)?;
+ writer.write_payload(&self.cookie.0)?;
+ return Ok(true);
+ }
+
+ if freeze.is_pending {
+ return Ok(true);
+ }
+ if freeze.is_clearing {
+ _removed_listener = freeze_entry.remove_node();
+ drop(node_refs);
+ writer.write_code(BR_CLEAR_FREEZE_NOTIFICATION_DONE)?;
+ writer.write_payload(&self.cookie.0)?;
+ Ok(true)
+ } else {
+ let is_frozen = freeze.node.owner.inner.lock().is_frozen;
+ if freeze.last_is_frozen == Some(is_frozen) {
+ return Ok(true);
+ }
+
+ let mut state_info = BinderFrozenStateInfo::default();
+ state_info.is_frozen = is_frozen as u32;
+ state_info.cookie = freeze.cookie.0;
+ freeze.is_pending = true;
+ freeze.last_is_frozen = Some(is_frozen);
+ drop(node_refs);
+
+ writer.write_code(BR_FROZEN_BINDER)?;
+ writer.write_payload(&state_info)?;
+ // BR_FROZEN_BINDER notifications can cause transactions
+ Ok(false)
+ }
+ }
+
+ fn cancel(self: DArc<Self>) {}
+
+ fn should_sync_wakeup(&self) -> bool {
+ false
+ }
+
+ #[inline(never)]
+ fn debug_print(&self, m: &SeqFile, prefix: &str, _tprefix: &str) -> Result<()> {
+ seq_print!(m, "{}has frozen binder\n", prefix);
+ Ok(())
+ }
+}
+
+impl FreezeListener {
+ pub(crate) fn on_process_exit(&self, proc: &Arc<Process>) {
+ if !self.is_clearing {
+ self.node.remove_freeze_listener(proc);
+ }
+ }
+}
+
+impl Process {
+ pub(crate) fn request_freeze_notif(
+ self: &Arc<Self>,
+ reader: &mut UserSliceReader,
+ ) -> Result<()> {
+ let hc = reader.read::<BinderHandleCookie>()?;
+ let handle = hc.handle;
+ let cookie = FreezeCookie(hc.cookie);
+
+ let msg = FreezeMessage::new(GFP_KERNEL)?;
+ let alloc = RBTreeNodeReservation::new(GFP_KERNEL)?;
+
+ let mut node_refs_guard = self.node_refs.lock();
+ let node_refs = &mut *node_refs_guard;
+ let Some(info) = node_refs.by_handle.get_mut(&handle) else {
+ pr_warn!("BC_REQUEST_FREEZE_NOTIFICATION invalid ref {}\n", handle);
+ return Err(EINVAL);
+ };
+ if info.freeze().is_some() {
+ pr_warn!("BC_REQUEST_FREEZE_NOTIFICATION already set\n");
+ return Err(EINVAL);
+ }
+ let node_ref = info.node_ref();
+ let freeze_entry = node_refs.freeze_listeners.entry(cookie);
+
+ if let rbtree::Entry::Occupied(ref dupe) = freeze_entry {
+ if !dupe.get().allow_duplicate(&node_ref.node) {
+ pr_warn!("BC_REQUEST_FREEZE_NOTIFICATION duplicate cookie\n");
+ return Err(EINVAL);
+ }
+ }
+
+ // All failure paths must come before this call, and all modifications must come after this
+ // call.
+ node_ref.node.add_freeze_listener(self, GFP_KERNEL)?;
+
+ match freeze_entry {
+ rbtree::Entry::Vacant(entry) => {
+ entry.insert(
+ FreezeListener {
+ cookie,
+ node: node_ref.node.clone(),
+ last_is_frozen: None,
+ is_pending: false,
+ is_clearing: false,
+ num_pending_duplicates: 0,
+ num_cleared_duplicates: 0,
+ },
+ alloc,
+ );
+ }
+ rbtree::Entry::Occupied(mut dupe) => {
+ let dupe = dupe.get_mut();
+ if dupe.is_pending {
+ dupe.num_pending_duplicates += 1;
+ } else {
+ dupe.num_cleared_duplicates += 1;
+ }
+ dupe.last_is_frozen = None;
+ dupe.is_pending = false;
+ dupe.is_clearing = false;
+ }
+ }
+
+ *info.freeze() = Some(cookie);
+ let msg = FreezeMessage::init(msg, cookie);
+ drop(node_refs_guard);
+ let _ = self.push_work(msg);
+ Ok(())
+ }
+
+ pub(crate) fn freeze_notif_done(self: &Arc<Self>, reader: &mut UserSliceReader) -> Result<()> {
+ let cookie = FreezeCookie(reader.read()?);
+ let alloc = FreezeMessage::new(GFP_KERNEL)?;
+ let mut node_refs_guard = self.node_refs.lock();
+ let node_refs = &mut *node_refs_guard;
+ let Some(freeze) = node_refs.freeze_listeners.get_mut(&cookie) else {
+ pr_warn!("BC_FREEZE_NOTIFICATION_DONE {:016x} not found\n", cookie.0);
+ return Err(EINVAL);
+ };
+ let mut clear_msg = None;
+ if freeze.num_pending_duplicates > 0 {
+ clear_msg = Some(FreezeMessage::init(alloc, cookie));
+ freeze.num_pending_duplicates -= 1;
+ freeze.num_cleared_duplicates += 1;
+ } else {
+ if !freeze.is_pending {
+ pr_warn!(
+ "BC_FREEZE_NOTIFICATION_DONE {:016x} not pending\n",
+ cookie.0
+ );
+ return Err(EINVAL);
+ }
+ if freeze.is_clearing {
+ // Immediately send another FreezeMessage for BR_CLEAR_FREEZE_NOTIFICATION_DONE.
+ clear_msg = Some(FreezeMessage::init(alloc, cookie));
+ }
+ freeze.is_pending = false;
+ }
+ drop(node_refs_guard);
+ if let Some(clear_msg) = clear_msg {
+ let _ = self.push_work(clear_msg);
+ }
+ Ok(())
+ }
+
+ pub(crate) fn clear_freeze_notif(self: &Arc<Self>, reader: &mut UserSliceReader) -> Result<()> {
+ let hc = reader.read::<BinderHandleCookie>()?;
+ let handle = hc.handle;
+ let cookie = FreezeCookie(hc.cookie);
+
+ let alloc = FreezeMessage::new(GFP_KERNEL)?;
+ let mut node_refs_guard = self.node_refs.lock();
+ let node_refs = &mut *node_refs_guard;
+ let Some(info) = node_refs.by_handle.get_mut(&handle) else {
+ pr_warn!("BC_CLEAR_FREEZE_NOTIFICATION invalid ref {}\n", handle);
+ return Err(EINVAL);
+ };
+ let Some(info_cookie) = info.freeze() else {
+ pr_warn!("BC_CLEAR_FREEZE_NOTIFICATION freeze notification not active\n");
+ return Err(EINVAL);
+ };
+ if *info_cookie != cookie {
+ pr_warn!("BC_CLEAR_FREEZE_NOTIFICATION freeze notification cookie mismatch\n");
+ return Err(EINVAL);
+ }
+ let Some(listener) = node_refs.freeze_listeners.get_mut(&cookie) else {
+ pr_warn!("BC_CLEAR_FREEZE_NOTIFICATION invalid cookie {}\n", handle);
+ return Err(EINVAL);
+ };
+ listener.is_clearing = true;
+ listener.node.remove_freeze_listener(self);
+ *info.freeze() = None;
+ let mut msg = None;
+ if !listener.is_pending {
+ msg = Some(FreezeMessage::init(alloc, cookie));
+ }
+ drop(node_refs_guard);
+
+ if let Some(msg) = msg {
+ let _ = self.push_work(msg);
+ }
+ Ok(())
+ }
+
+ fn get_freeze_cookie(&self, node: &DArc<Node>) -> Option<FreezeCookie> {
+ let node_refs = &mut *self.node_refs.lock();
+ let handle = node_refs.by_node.get(&node.global_id())?;
+ let node_ref = node_refs.by_handle.get_mut(handle)?;
+ *node_ref.freeze()
+ }
+
+ /// Creates a vector of every freeze listener on this process.
+ ///
+ /// Returns pairs of the remote process listening for notifications and the local node it is
+ /// listening on.
+ #[expect(clippy::type_complexity)]
+ fn find_freeze_recipients(&self) -> Result<KVVec<(DArc<Node>, Arc<Process>)>, AllocError> {
+ // Defined before `inner` to drop after releasing spinlock if `push_within_capacity` fails.
+ let mut node_proc_pair;
+
+ // We pre-allocate space for up to 8 recipients before we take the spinlock. However, if
+ // the allocation fails, use a vector with a capacity of zero instead of failing. After
+ // all, there might not be any freeze listeners, in which case this operation could still
+ // succeed.
+ let mut recipients =
+ KVVec::with_capacity(8, GFP_KERNEL).unwrap_or_else(|_err| KVVec::new());
+
+ let mut inner = self.lock_with_nodes();
+ let mut curr = inner.nodes.cursor_front();
+ while let Some(cursor) = curr {
+ let (key, node) = cursor.current();
+ let key = *key;
+ let list = node.freeze_list(&inner.inner);
+ let len = list.len();
+
+ if recipients.spare_capacity_mut().len() < len {
+ drop(inner);
+ recipients.reserve(len, GFP_KERNEL)?;
+ inner = self.lock_with_nodes();
+ // Find the node we were looking at and try again. If the set of nodes was changed,
+ // then just proceed to the next node. This is ok because we don't guarantee the
+ // inclusion of nodes that are added or removed in parallel with this operation.
+ curr = inner.nodes.cursor_lower_bound(&key);
+ continue;
+ }
+
+ for proc in list {
+ node_proc_pair = (node.clone(), proc.clone());
+ recipients
+ .push_within_capacity(node_proc_pair)
+ .map_err(|_| {
+ pr_err!(
+ "push_within_capacity failed even though we checked the capacity\n"
+ );
+ AllocError
+ })?;
+ }
+
+ curr = cursor.move_next();
+ }
+ Ok(recipients)
+ }
+
+ /// Prepare allocations for sending freeze messages.
+ pub(crate) fn prepare_freeze_messages(&self) -> Result<FreezeMessages, AllocError> {
+ let recipients = self.find_freeze_recipients()?;
+ let mut batch = KVVec::with_capacity(recipients.len(), GFP_KERNEL)?;
+ for (node, proc) in recipients {
+ let Some(cookie) = proc.get_freeze_cookie(&node) else {
+ // If the freeze listener was removed in the meantime, just discard the
+ // notification.
+ continue;
+ };
+ let msg_alloc = FreezeMessage::new(GFP_KERNEL)?;
+ let msg = FreezeMessage::init(msg_alloc, cookie);
+ batch.push((proc, msg), GFP_KERNEL)?;
+ }
+
+ Ok(FreezeMessages { batch })
+ }
+}
+
+pub(crate) struct FreezeMessages {
+ batch: KVVec<(Arc<Process>, DLArc<FreezeMessage>)>,
+}
+
+impl FreezeMessages {
+ pub(crate) fn send_messages(self) {
+ for (proc, msg) in self.batch {
+ let _ = proc.push_work(msg);
+ }
+ }
+}
diff --git a/drivers/android/binder/node.rs b/drivers/android/binder/node.rs
new file mode 100644
index 000000000000..ade895ef791e
--- /dev/null
+++ b/drivers/android/binder/node.rs
@@ -0,0 +1,1131 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2025 Google LLC.
+
+use kernel::{
+ list::{AtomicTracker, List, ListArc, ListLinks, TryNewListArc},
+ prelude::*,
+ seq_file::SeqFile,
+ seq_print,
+ sync::lock::{spinlock::SpinLockBackend, Guard},
+ sync::{Arc, LockedBy, SpinLock},
+};
+
+use crate::{
+ defs::*,
+ error::BinderError,
+ process::{NodeRefInfo, Process, ProcessInner},
+ thread::Thread,
+ transaction::Transaction,
+ BinderReturnWriter, DArc, DLArc, DTRWrap, DeliverToRead,
+};
+
+use core::mem;
+
+mod wrapper;
+pub(crate) use self::wrapper::CritIncrWrapper;
+
+#[derive(Debug)]
+pub(crate) struct CouldNotDeliverCriticalIncrement;
+
+/// Keeps track of how this node is scheduled.
+///
+/// There are two ways to schedule a node to a work list. Just schedule the node itself, or
+/// allocate a wrapper that references the node and schedule the wrapper. These wrappers exists to
+/// make it possible to "move" a node from one list to another - when `do_work` is called directly
+/// on the `Node`, then it's a no-op if there's also a pending wrapper.
+///
+/// Wrappers are generally only needed for zero-to-one refcount increments, and there are two cases
+/// of this: weak increments and strong increments. We call such increments "critical" because it
+/// is critical that they are delivered to the thread doing the increment. Some examples:
+///
+/// * One thread makes a zero-to-one strong increment, and another thread makes a zero-to-one weak
+/// increment. Delivering the node to the thread doing the weak increment is wrong, since the
+/// thread doing the strong increment may have ended a long time ago when the command is actually
+/// processed by userspace.
+///
+/// * We have a weak reference and are about to drop it on one thread. But then another thread does
+/// a zero-to-one strong increment. If the strong increment gets sent to the thread that was
+/// about to drop the weak reference, then the strong increment could be processed after the
+/// other thread has already exited, which would be too late.
+///
+/// Note that trying to create a `ListArc` to the node can succeed even if `has_normal_push` is
+/// set. This is because another thread might just have popped the node from a todo list, but not
+/// yet called `do_work`. However, if `has_normal_push` is false, then creating a `ListArc` should
+/// always succeed.
+///
+/// Like the other fields in `NodeInner`, the delivery state is protected by the process lock.
+struct DeliveryState {
+ /// Is the `Node` currently scheduled?
+ has_pushed_node: bool,
+
+ /// Is a wrapper currently scheduled?
+ ///
+ /// The wrapper is used only for strong zero2one increments.
+ has_pushed_wrapper: bool,
+
+ /// Is the currently scheduled `Node` scheduled due to a weak zero2one increment?
+ ///
+ /// Weak zero2one operations are always scheduled using the `Node`.
+ has_weak_zero2one: bool,
+
+ /// Is the currently scheduled wrapper/`Node` scheduled due to a strong zero2one increment?
+ ///
+ /// If `has_pushed_wrapper` is set, then the strong zero2one increment was scheduled using the
+ /// wrapper. Otherwise, `has_pushed_node` must be set and it was scheduled using the `Node`.
+ has_strong_zero2one: bool,
+}
+
+impl DeliveryState {
+ fn should_normal_push(&self) -> bool {
+ !self.has_pushed_node && !self.has_pushed_wrapper
+ }
+
+ fn did_normal_push(&mut self) {
+ assert!(self.should_normal_push());
+ self.has_pushed_node = true;
+ }
+
+ fn should_push_weak_zero2one(&self) -> bool {
+ !self.has_weak_zero2one && !self.has_strong_zero2one
+ }
+
+ fn can_push_weak_zero2one_normally(&self) -> bool {
+ !self.has_pushed_node
+ }
+
+ fn did_push_weak_zero2one(&mut self) {
+ assert!(self.should_push_weak_zero2one());
+ assert!(self.can_push_weak_zero2one_normally());
+ self.has_pushed_node = true;
+ self.has_weak_zero2one = true;
+ }
+
+ fn should_push_strong_zero2one(&self) -> bool {
+ !self.has_strong_zero2one
+ }
+
+ fn can_push_strong_zero2one_normally(&self) -> bool {
+ !self.has_pushed_node
+ }
+
+ fn did_push_strong_zero2one(&mut self) {
+ assert!(self.should_push_strong_zero2one());
+ assert!(self.can_push_strong_zero2one_normally());
+ self.has_pushed_node = true;
+ self.has_strong_zero2one = true;
+ }
+
+ fn did_push_strong_zero2one_wrapper(&mut self) {
+ assert!(self.should_push_strong_zero2one());
+ assert!(!self.can_push_strong_zero2one_normally());
+ self.has_pushed_wrapper = true;
+ self.has_strong_zero2one = true;
+ }
+}
+
+struct CountState {
+ /// The reference count.
+ count: usize,
+ /// Whether the process that owns this node thinks that we hold a refcount on it. (Note that
+ /// even if count is greater than one, we only increment it once in the owning process.)
+ has_count: bool,
+}
+
+impl CountState {
+ fn new() -> Self {
+ Self {
+ count: 0,
+ has_count: false,
+ }
+ }
+}
+
+struct NodeInner {
+ /// Strong refcounts held on this node by `NodeRef` objects.
+ strong: CountState,
+ /// Weak refcounts held on this node by `NodeRef` objects.
+ weak: CountState,
+ delivery_state: DeliveryState,
+ /// The binder driver guarantees that oneway transactions sent to the same node are serialized,
+ /// that is, userspace will not be given the next one until it has finished processing the
+ /// previous oneway transaction. This is done to avoid the case where two oneway transactions
+ /// arrive in opposite order from the order in which they were sent. (E.g., they could be
+ /// delivered to two different threads, which could appear as-if they were sent in opposite
+ /// order.)
+ ///
+ /// To fix that, we store pending oneway transactions in a separate list in the node, and don't
+ /// deliver the next oneway transaction until userspace signals that it has finished processing
+ /// the previous oneway transaction by calling the `BC_FREE_BUFFER` ioctl.
+ oneway_todo: List<DTRWrap<Transaction>>,
+ /// Keeps track of whether this node has a pending oneway transaction.
+ ///
+ /// When this is true, incoming oneway transactions are stored in `oneway_todo`, instead of
+ /// being delivered directly to the process.
+ has_oneway_transaction: bool,
+ /// List of processes to deliver a notification to when this node is destroyed (usually due to
+ /// the process dying).
+ death_list: List<DTRWrap<NodeDeath>, 1>,
+ /// List of processes to deliver freeze notifications to.
+ freeze_list: KVVec<Arc<Process>>,
+ /// The number of active BR_INCREFS or BR_ACQUIRE operations. (should be maximum two)
+ ///
+ /// If this is non-zero, then we postpone any BR_RELEASE or BR_DECREFS notifications until the
+ /// active operations have ended. This avoids the situation an increment and decrement get
+ /// reordered from userspace's perspective.
+ active_inc_refs: u8,
+ /// List of `NodeRefInfo` objects that reference this node.
+ refs: List<NodeRefInfo, { NodeRefInfo::LIST_NODE }>,
+}
+
+#[pin_data]
+pub(crate) struct Node {
+ pub(crate) debug_id: usize,
+ ptr: u64,
+ pub(crate) cookie: u64,
+ pub(crate) flags: u32,
+ pub(crate) owner: Arc<Process>,
+ inner: LockedBy<NodeInner, ProcessInner>,
+ #[pin]
+ links_track: AtomicTracker,
+}
+
+kernel::list::impl_list_arc_safe! {
+ impl ListArcSafe<0> for Node {
+ tracked_by links_track: AtomicTracker;
+ }
+}
+
+// Make `oneway_todo` work.
+kernel::list::impl_list_item! {
+ impl ListItem<0> for DTRWrap<Transaction> {
+ using ListLinks { self.links.inner };
+ }
+}
+
+impl Node {
+ pub(crate) fn new(
+ ptr: u64,
+ cookie: u64,
+ flags: u32,
+ owner: Arc<Process>,
+ ) -> impl PinInit<Self> {
+ pin_init!(Self {
+ inner: LockedBy::new(
+ &owner.inner,
+ NodeInner {
+ strong: CountState::new(),
+ weak: CountState::new(),
+ delivery_state: DeliveryState {
+ has_pushed_node: false,
+ has_pushed_wrapper: false,
+ has_weak_zero2one: false,
+ has_strong_zero2one: false,
+ },
+ death_list: List::new(),
+ oneway_todo: List::new(),
+ freeze_list: KVVec::new(),
+ has_oneway_transaction: false,
+ active_inc_refs: 0,
+ refs: List::new(),
+ },
+ ),
+ debug_id: super::next_debug_id(),
+ ptr,
+ cookie,
+ flags,
+ owner,
+ links_track <- AtomicTracker::new(),
+ })
+ }
+
+ pub(crate) fn has_oneway_transaction(&self, owner_inner: &mut ProcessInner) -> bool {
+ let inner = self.inner.access_mut(owner_inner);
+ inner.has_oneway_transaction
+ }
+
+ #[inline(never)]
+ pub(crate) fn full_debug_print(
+ &self,
+ m: &SeqFile,
+ owner_inner: &mut ProcessInner,
+ ) -> Result<()> {
+ let inner = self.inner.access_mut(owner_inner);
+ seq_print!(
+ m,
+ " node {}: u{:016x} c{:016x} hs {} hw {} cs {} cw {}",
+ self.debug_id,
+ self.ptr,
+ self.cookie,
+ inner.strong.has_count,
+ inner.weak.has_count,
+ inner.strong.count,
+ inner.weak.count,
+ );
+ if !inner.refs.is_empty() {
+ seq_print!(m, " proc");
+ for node_ref in &inner.refs {
+ seq_print!(m, " {}", node_ref.process.task.pid());
+ }
+ }
+ seq_print!(m, "\n");
+ for t in &inner.oneway_todo {
+ t.debug_print_inner(m, " pending async transaction ");
+ }
+ Ok(())
+ }
+
+ /// Insert the `NodeRef` into this `refs` list.
+ ///
+ /// # Safety
+ ///
+ /// It must be the case that `info.node_ref.node` is this node.
+ pub(crate) unsafe fn insert_node_info(
+ &self,
+ info: ListArc<NodeRefInfo, { NodeRefInfo::LIST_NODE }>,
+ ) {
+ self.inner
+ .access_mut(&mut self.owner.inner.lock())
+ .refs
+ .push_front(info);
+ }
+
+ /// Insert the `NodeRef` into this `refs` list.
+ ///
+ /// # Safety
+ ///
+ /// It must be the case that `info.node_ref.node` is this node.
+ pub(crate) unsafe fn remove_node_info(
+ &self,
+ info: &NodeRefInfo,
+ ) -> Option<ListArc<NodeRefInfo, { NodeRefInfo::LIST_NODE }>> {
+ // SAFETY: We always insert `NodeRefInfo` objects into the `refs` list of the node that it
+ // references in `info.node_ref.node`. That is this node, so `info` cannot possibly be in
+ // the `refs` list of another node.
+ unsafe {
+ self.inner
+ .access_mut(&mut self.owner.inner.lock())
+ .refs
+ .remove(info)
+ }
+ }
+
+ /// An id that is unique across all binder nodes on the system. Used as the key in the
+ /// `by_node` map.
+ pub(crate) fn global_id(&self) -> usize {
+ self as *const Node as usize
+ }
+
+ pub(crate) fn get_id(&self) -> (u64, u64) {
+ (self.ptr, self.cookie)
+ }
+
+ pub(crate) fn add_death(
+ &self,
+ death: ListArc<DTRWrap<NodeDeath>, 1>,
+ guard: &mut Guard<'_, ProcessInner, SpinLockBackend>,
+ ) {
+ self.inner.access_mut(guard).death_list.push_back(death);
+ }
+
+ pub(crate) fn inc_ref_done_locked(
+ self: &DArc<Node>,
+ _strong: bool,
+ owner_inner: &mut ProcessInner,
+ ) -> Option<DLArc<Node>> {
+ let inner = self.inner.access_mut(owner_inner);
+ if inner.active_inc_refs == 0 {
+ pr_err!("inc_ref_done called when no active inc_refs");
+ return None;
+ }
+
+ inner.active_inc_refs -= 1;
+ if inner.active_inc_refs == 0 {
+ // Having active inc_refs can inhibit dropping of ref-counts. Calculate whether we
+ // would send a refcount decrement, and if so, tell the caller to schedule us.
+ let strong = inner.strong.count > 0;
+ let has_strong = inner.strong.has_count;
+ let weak = strong || inner.weak.count > 0;
+ let has_weak = inner.weak.has_count;
+
+ let should_drop_weak = !weak && has_weak;
+ let should_drop_strong = !strong && has_strong;
+
+ // If we want to drop the ref-count again, tell the caller to schedule a work node for
+ // that.
+ let need_push = should_drop_weak || should_drop_strong;
+
+ if need_push && inner.delivery_state.should_normal_push() {
+ let list_arc = ListArc::try_from_arc(self.clone()).ok().unwrap();
+ inner.delivery_state.did_normal_push();
+ Some(list_arc)
+ } else {
+ None
+ }
+ } else {
+ None
+ }
+ }
+
+ pub(crate) fn update_refcount_locked(
+ self: &DArc<Node>,
+ inc: bool,
+ strong: bool,
+ count: usize,
+ owner_inner: &mut ProcessInner,
+ ) -> Option<DLArc<Node>> {
+ let is_dead = owner_inner.is_dead;
+ let inner = self.inner.access_mut(owner_inner);
+
+ // Get a reference to the state we'll update.
+ let state = if strong {
+ &mut inner.strong
+ } else {
+ &mut inner.weak
+ };
+
+ // Update the count and determine whether we need to push work.
+ let need_push = if inc {
+ state.count += count;
+ // TODO: This method shouldn't be used for zero-to-one increments.
+ !is_dead && !state.has_count
+ } else {
+ if state.count < count {
+ pr_err!("Failure: refcount underflow!");
+ return None;
+ }
+ state.count -= count;
+ !is_dead && state.count == 0 && state.has_count
+ };
+
+ if need_push && inner.delivery_state.should_normal_push() {
+ let list_arc = ListArc::try_from_arc(self.clone()).ok().unwrap();
+ inner.delivery_state.did_normal_push();
+ Some(list_arc)
+ } else {
+ None
+ }
+ }
+
+ pub(crate) fn incr_refcount_allow_zero2one(
+ self: &DArc<Self>,
+ strong: bool,
+ owner_inner: &mut ProcessInner,
+ ) -> Result<Option<DLArc<Node>>, CouldNotDeliverCriticalIncrement> {
+ let is_dead = owner_inner.is_dead;
+ let inner = self.inner.access_mut(owner_inner);
+
+ // Get a reference to the state we'll update.
+ let state = if strong {
+ &mut inner.strong
+ } else {
+ &mut inner.weak
+ };
+
+ // Update the count and determine whether we need to push work.
+ state.count += 1;
+ if is_dead || state.has_count {
+ return Ok(None);
+ }
+
+ // Userspace needs to be notified of this.
+ if !strong && inner.delivery_state.should_push_weak_zero2one() {
+ assert!(inner.delivery_state.can_push_weak_zero2one_normally());
+ let list_arc = ListArc::try_from_arc(self.clone()).ok().unwrap();
+ inner.delivery_state.did_push_weak_zero2one();
+ Ok(Some(list_arc))
+ } else if strong && inner.delivery_state.should_push_strong_zero2one() {
+ if inner.delivery_state.can_push_strong_zero2one_normally() {
+ let list_arc = ListArc::try_from_arc(self.clone()).ok().unwrap();
+ inner.delivery_state.did_push_strong_zero2one();
+ Ok(Some(list_arc))
+ } else {
+ state.count -= 1;
+ Err(CouldNotDeliverCriticalIncrement)
+ }
+ } else {
+ // Work is already pushed, and we don't need to push again.
+ Ok(None)
+ }
+ }
+
+ pub(crate) fn incr_refcount_allow_zero2one_with_wrapper(
+ self: &DArc<Self>,
+ strong: bool,
+ wrapper: CritIncrWrapper,
+ owner_inner: &mut ProcessInner,
+ ) -> Option<DLArc<dyn DeliverToRead>> {
+ match self.incr_refcount_allow_zero2one(strong, owner_inner) {
+ Ok(Some(node)) => Some(node as _),
+ Ok(None) => None,
+ Err(CouldNotDeliverCriticalIncrement) => {
+ assert!(strong);
+ let inner = self.inner.access_mut(owner_inner);
+ inner.strong.count += 1;
+ inner.delivery_state.did_push_strong_zero2one_wrapper();
+ Some(wrapper.init(self.clone()))
+ }
+ }
+ }
+
+ pub(crate) fn update_refcount(self: &DArc<Self>, inc: bool, count: usize, strong: bool) {
+ self.owner
+ .inner
+ .lock()
+ .update_node_refcount(self, inc, strong, count, None);
+ }
+
+ pub(crate) fn populate_counts(
+ &self,
+ out: &mut BinderNodeInfoForRef,
+ guard: &Guard<'_, ProcessInner, SpinLockBackend>,
+ ) {
+ let inner = self.inner.access(guard);
+ out.strong_count = inner.strong.count as _;
+ out.weak_count = inner.weak.count as _;
+ }
+
+ pub(crate) fn populate_debug_info(
+ &self,
+ out: &mut BinderNodeDebugInfo,
+ guard: &Guard<'_, ProcessInner, SpinLockBackend>,
+ ) {
+ out.ptr = self.ptr as _;
+ out.cookie = self.cookie as _;
+ let inner = self.inner.access(guard);
+ if inner.strong.has_count {
+ out.has_strong_ref = 1;
+ }
+ if inner.weak.has_count {
+ out.has_weak_ref = 1;
+ }
+ }
+
+ pub(crate) fn force_has_count(&self, guard: &mut Guard<'_, ProcessInner, SpinLockBackend>) {
+ let inner = self.inner.access_mut(guard);
+ inner.strong.has_count = true;
+ inner.weak.has_count = true;
+ }
+
+ fn write(&self, writer: &mut BinderReturnWriter<'_>, code: u32) -> Result {
+ writer.write_code(code)?;
+ writer.write_payload(&self.ptr)?;
+ writer.write_payload(&self.cookie)?;
+ Ok(())
+ }
+
+ pub(crate) fn submit_oneway(
+ &self,
+ transaction: DLArc<Transaction>,
+ guard: &mut Guard<'_, ProcessInner, SpinLockBackend>,
+ ) -> Result<(), (BinderError, DLArc<dyn DeliverToRead>)> {
+ if guard.is_dead {
+ return Err((BinderError::new_dead(), transaction));
+ }
+
+ let inner = self.inner.access_mut(guard);
+ if inner.has_oneway_transaction {
+ inner.oneway_todo.push_back(transaction);
+ } else {
+ inner.has_oneway_transaction = true;
+ guard.push_work(transaction)?;
+ }
+ Ok(())
+ }
+
+ pub(crate) fn release(&self) {
+ let mut guard = self.owner.inner.lock();
+ while let Some(work) = self.inner.access_mut(&mut guard).oneway_todo.pop_front() {
+ drop(guard);
+ work.into_arc().cancel();
+ guard = self.owner.inner.lock();
+ }
+
+ let death_list = core::mem::take(&mut self.inner.access_mut(&mut guard).death_list);
+ drop(guard);
+ for death in death_list {
+ death.into_arc().set_dead();
+ }
+ }
+
+ pub(crate) fn pending_oneway_finished(&self) {
+ let mut guard = self.owner.inner.lock();
+ if guard.is_dead {
+ // Cleanup will happen in `Process::deferred_release`.
+ return;
+ }
+
+ let inner = self.inner.access_mut(&mut guard);
+
+ let transaction = inner.oneway_todo.pop_front();
+ inner.has_oneway_transaction = transaction.is_some();
+ if let Some(transaction) = transaction {
+ match guard.push_work(transaction) {
+ Ok(()) => {}
+ Err((_err, work)) => {
+ // Process is dead.
+ // This shouldn't happen due to the `is_dead` check, but if it does, just drop
+ // the transaction and return.
+ drop(guard);
+ drop(work);
+ }
+ }
+ }
+ }
+
+ /// Finds an outdated transaction that the given transaction can replace.
+ ///
+ /// If one is found, it is removed from the list and returned.
+ pub(crate) fn take_outdated_transaction(
+ &self,
+ new: &Transaction,
+ guard: &mut Guard<'_, ProcessInner, SpinLockBackend>,
+ ) -> Option<DLArc<Transaction>> {
+ let inner = self.inner.access_mut(guard);
+ let mut cursor = inner.oneway_todo.cursor_front();
+ while let Some(next) = cursor.peek_next() {
+ if new.can_replace(&next) {
+ return Some(next.remove());
+ }
+ cursor.move_next();
+ }
+ None
+ }
+
+ /// This is split into a separate function since it's called by both `Node::do_work` and
+ /// `NodeWrapper::do_work`.
+ fn do_work_locked(
+ &self,
+ writer: &mut BinderReturnWriter<'_>,
+ mut guard: Guard<'_, ProcessInner, SpinLockBackend>,
+ ) -> Result<bool> {
+ let inner = self.inner.access_mut(&mut guard);
+ let strong = inner.strong.count > 0;
+ let has_strong = inner.strong.has_count;
+ let weak = strong || inner.weak.count > 0;
+ let has_weak = inner.weak.has_count;
+
+ if weak && !has_weak {
+ inner.weak.has_count = true;
+ inner.active_inc_refs += 1;
+ }
+
+ if strong && !has_strong {
+ inner.strong.has_count = true;
+ inner.active_inc_refs += 1;
+ }
+
+ let no_active_inc_refs = inner.active_inc_refs == 0;
+ let should_drop_weak = no_active_inc_refs && (!weak && has_weak);
+ let should_drop_strong = no_active_inc_refs && (!strong && has_strong);
+ if should_drop_weak {
+ inner.weak.has_count = false;
+ }
+ if should_drop_strong {
+ inner.strong.has_count = false;
+ }
+ if no_active_inc_refs && !weak {
+ // Remove the node if there are no references to it.
+ guard.remove_node(self.ptr);
+ }
+ drop(guard);
+
+ if weak && !has_weak {
+ self.write(writer, BR_INCREFS)?;
+ }
+ if strong && !has_strong {
+ self.write(writer, BR_ACQUIRE)?;
+ }
+ if should_drop_strong {
+ self.write(writer, BR_RELEASE)?;
+ }
+ if should_drop_weak {
+ self.write(writer, BR_DECREFS)?;
+ }
+
+ Ok(true)
+ }
+
+ pub(crate) fn add_freeze_listener(
+ &self,
+ process: &Arc<Process>,
+ flags: kernel::alloc::Flags,
+ ) -> Result {
+ let mut vec_alloc = KVVec::<Arc<Process>>::new();
+ loop {
+ let mut guard = self.owner.inner.lock();
+ // Do not check for `guard.dead`. The `dead` flag that matters here is the owner of the
+ // listener, no the target.
+ let inner = self.inner.access_mut(&mut guard);
+ let len = inner.freeze_list.len();
+ if len >= inner.freeze_list.capacity() {
+ if len >= vec_alloc.capacity() {
+ drop(guard);
+ vec_alloc = KVVec::with_capacity((1 + len).next_power_of_two(), flags)?;
+ continue;
+ }
+ mem::swap(&mut inner.freeze_list, &mut vec_alloc);
+ for elem in vec_alloc.drain_all() {
+ inner.freeze_list.push_within_capacity(elem)?;
+ }
+ }
+ inner.freeze_list.push_within_capacity(process.clone())?;
+ return Ok(());
+ }
+ }
+
+ pub(crate) fn remove_freeze_listener(&self, p: &Arc<Process>) {
+ let _unused_capacity;
+ let mut guard = self.owner.inner.lock();
+ let inner = self.inner.access_mut(&mut guard);
+ let len = inner.freeze_list.len();
+ inner.freeze_list.retain(|proc| !Arc::ptr_eq(proc, p));
+ if len == inner.freeze_list.len() {
+ pr_warn!(
+ "Could not remove freeze listener for {}\n",
+ p.pid_in_current_ns()
+ );
+ }
+ if inner.freeze_list.is_empty() {
+ _unused_capacity = mem::replace(&mut inner.freeze_list, KVVec::new());
+ }
+ }
+
+ pub(crate) fn freeze_list<'a>(&'a self, guard: &'a ProcessInner) -> &'a [Arc<Process>] {
+ &self.inner.access(guard).freeze_list
+ }
+}
+
+impl DeliverToRead for Node {
+ fn do_work(
+ self: DArc<Self>,
+ _thread: &Thread,
+ writer: &mut BinderReturnWriter<'_>,
+ ) -> Result<bool> {
+ let mut owner_inner = self.owner.inner.lock();
+ let inner = self.inner.access_mut(&mut owner_inner);
+
+ assert!(inner.delivery_state.has_pushed_node);
+ if inner.delivery_state.has_pushed_wrapper {
+ // If the wrapper is scheduled, then we are either a normal push or weak zero2one
+ // increment, and the wrapper is a strong zero2one increment, so the wrapper always
+ // takes precedence over us.
+ assert!(inner.delivery_state.has_strong_zero2one);
+ inner.delivery_state.has_pushed_node = false;
+ inner.delivery_state.has_weak_zero2one = false;
+ return Ok(true);
+ }
+
+ inner.delivery_state.has_pushed_node = false;
+ inner.delivery_state.has_weak_zero2one = false;
+ inner.delivery_state.has_strong_zero2one = false;
+
+ self.do_work_locked(writer, owner_inner)
+ }
+
+ fn cancel(self: DArc<Self>) {}
+
+ fn should_sync_wakeup(&self) -> bool {
+ false
+ }
+
+ #[inline(never)]
+ fn debug_print(&self, m: &SeqFile, prefix: &str, _tprefix: &str) -> Result<()> {
+ seq_print!(
+ m,
+ "{}node work {}: u{:016x} c{:016x}\n",
+ prefix,
+ self.debug_id,
+ self.ptr,
+ self.cookie,
+ );
+ Ok(())
+ }
+}
+
+/// Represents something that holds one or more ref-counts to a `Node`.
+///
+/// Whenever process A holds a refcount to a node owned by a different process B, then process A
+/// will store a `NodeRef` that refers to the `Node` in process B. When process A releases the
+/// refcount, we destroy the NodeRef, which decrements the ref-count in process A.
+///
+/// This type is also used for some other cases. For example, a transaction allocation holds a
+/// refcount on the target node, and this is implemented by storing a `NodeRef` in the allocation
+/// so that the destructor of the allocation will drop a refcount of the `Node`.
+pub(crate) struct NodeRef {
+ pub(crate) node: DArc<Node>,
+ /// How many times does this NodeRef hold a refcount on the Node?
+ strong_node_count: usize,
+ weak_node_count: usize,
+ /// How many times does userspace hold a refcount on this NodeRef?
+ strong_count: usize,
+ weak_count: usize,
+}
+
+impl NodeRef {
+ pub(crate) fn new(node: DArc<Node>, strong_count: usize, weak_count: usize) -> Self {
+ Self {
+ node,
+ strong_node_count: strong_count,
+ weak_node_count: weak_count,
+ strong_count,
+ weak_count,
+ }
+ }
+
+ pub(crate) fn absorb(&mut self, mut other: Self) {
+ assert!(
+ Arc::ptr_eq(&self.node, &other.node),
+ "absorb called with differing nodes"
+ );
+ self.strong_node_count += other.strong_node_count;
+ self.weak_node_count += other.weak_node_count;
+ self.strong_count += other.strong_count;
+ self.weak_count += other.weak_count;
+ other.strong_count = 0;
+ other.weak_count = 0;
+ other.strong_node_count = 0;
+ other.weak_node_count = 0;
+
+ if self.strong_node_count >= 2 || self.weak_node_count >= 2 {
+ let mut guard = self.node.owner.inner.lock();
+ let inner = self.node.inner.access_mut(&mut guard);
+
+ if self.strong_node_count >= 2 {
+ inner.strong.count -= self.strong_node_count - 1;
+ self.strong_node_count = 1;
+ assert_ne!(inner.strong.count, 0);
+ }
+ if self.weak_node_count >= 2 {
+ inner.weak.count -= self.weak_node_count - 1;
+ self.weak_node_count = 1;
+ assert_ne!(inner.weak.count, 0);
+ }
+ }
+ }
+
+ pub(crate) fn get_count(&self) -> (usize, usize) {
+ (self.strong_count, self.weak_count)
+ }
+
+ pub(crate) fn clone(&self, strong: bool) -> Result<NodeRef> {
+ if strong && self.strong_count == 0 {
+ return Err(EINVAL);
+ }
+ Ok(self
+ .node
+ .owner
+ .inner
+ .lock()
+ .new_node_ref(self.node.clone(), strong, None))
+ }
+
+ /// Updates (increments or decrements) the number of references held against the node. If the
+ /// count being updated transitions from 0 to 1 or from 1 to 0, the node is notified by having
+ /// its `update_refcount` function called.
+ ///
+ /// Returns whether `self` should be removed (when both counts are zero).
+ pub(crate) fn update(&mut self, inc: bool, strong: bool) -> bool {
+ if strong && self.strong_count == 0 {
+ return false;
+ }
+ let (count, node_count, other_count) = if strong {
+ (
+ &mut self.strong_count,
+ &mut self.strong_node_count,
+ self.weak_count,
+ )
+ } else {
+ (
+ &mut self.weak_count,
+ &mut self.weak_node_count,
+ self.strong_count,
+ )
+ };
+ if inc {
+ if *count == 0 {
+ *node_count = 1;
+ self.node.update_refcount(true, 1, strong);
+ }
+ *count += 1;
+ } else {
+ if *count == 0 {
+ pr_warn!(
+ "pid {} performed invalid decrement on ref\n",
+ kernel::current!().pid()
+ );
+ return false;
+ }
+ *count -= 1;
+ if *count == 0 {
+ self.node.update_refcount(false, *node_count, strong);
+ *node_count = 0;
+ return other_count == 0;
+ }
+ }
+ false
+ }
+}
+
+impl Drop for NodeRef {
+ // This destructor is called conditionally from `Allocation::drop`. That branch is often
+ // mispredicted. Inlining this method call reduces the cost of those branch mispredictions.
+ #[inline(always)]
+ fn drop(&mut self) {
+ if self.strong_node_count > 0 {
+ self.node
+ .update_refcount(false, self.strong_node_count, true);
+ }
+ if self.weak_node_count > 0 {
+ self.node
+ .update_refcount(false, self.weak_node_count, false);
+ }
+ }
+}
+
+struct NodeDeathInner {
+ dead: bool,
+ cleared: bool,
+ notification_done: bool,
+ /// Indicates whether the normal flow was interrupted by removing the handle. In this case, we
+ /// need behave as if the death notification didn't exist (i.e., we don't deliver anything to
+ /// the user.
+ aborted: bool,
+}
+
+/// Used to deliver notifications when a process dies.
+///
+/// A process can request to be notified when a process dies using `BC_REQUEST_DEATH_NOTIFICATION`.
+/// This will make the driver send a `BR_DEAD_BINDER` to userspace when the process dies (or
+/// immediately if it is already dead). Userspace is supposed to respond with `BC_DEAD_BINDER_DONE`
+/// once it has processed the notification.
+///
+/// Userspace can unregister from death notifications using the `BC_CLEAR_DEATH_NOTIFICATION`
+/// command. In this case, the kernel will respond with `BR_CLEAR_DEATH_NOTIFICATION_DONE` once the
+/// notification has been removed. Note that if the remote process dies before the kernel has
+/// responded with `BR_CLEAR_DEATH_NOTIFICATION_DONE`, then the kernel will still send a
+/// `BR_DEAD_BINDER`, which userspace must be able to process. In this case, the kernel will wait
+/// for the `BC_DEAD_BINDER_DONE` command before it sends `BR_CLEAR_DEATH_NOTIFICATION_DONE`.
+///
+/// Note that even if the kernel sends a `BR_DEAD_BINDER`, this does not remove the death
+/// notification. Userspace must still remove it manually using `BC_CLEAR_DEATH_NOTIFICATION`.
+///
+/// If a process uses `BC_RELEASE` to destroy its last refcount on a node that has an active death
+/// registration, then the death registration is immediately deleted (we implement this using the
+/// `aborted` field). However, userspace is not supposed to delete a `NodeRef` without first
+/// deregistering death notifications, so this codepath is not executed under normal circumstances.
+#[pin_data]
+pub(crate) struct NodeDeath {
+ node: DArc<Node>,
+ process: Arc<Process>,
+ pub(crate) cookie: u64,
+ #[pin]
+ links_track: AtomicTracker<0>,
+ /// Used by the owner `Node` to store a list of registered death notifications.
+ ///
+ /// # Invariants
+ ///
+ /// Only ever used with the `death_list` list of `self.node`.
+ #[pin]
+ death_links: ListLinks<1>,
+ /// Used by the process to keep track of the death notifications for which we have sent a
+ /// `BR_DEAD_BINDER` but not yet received a `BC_DEAD_BINDER_DONE`.
+ ///
+ /// # Invariants
+ ///
+ /// Only ever used with the `delivered_deaths` list of `self.process`.
+ #[pin]
+ delivered_links: ListLinks<2>,
+ #[pin]
+ delivered_links_track: AtomicTracker<2>,
+ #[pin]
+ inner: SpinLock<NodeDeathInner>,
+}
+
+impl NodeDeath {
+ /// Constructs a new node death notification object.
+ pub(crate) fn new(
+ node: DArc<Node>,
+ process: Arc<Process>,
+ cookie: u64,
+ ) -> impl PinInit<DTRWrap<Self>> {
+ DTRWrap::new(pin_init!(
+ Self {
+ node,
+ process,
+ cookie,
+ links_track <- AtomicTracker::new(),
+ death_links <- ListLinks::new(),
+ delivered_links <- ListLinks::new(),
+ delivered_links_track <- AtomicTracker::new(),
+ inner <- kernel::new_spinlock!(NodeDeathInner {
+ dead: false,
+ cleared: false,
+ notification_done: false,
+ aborted: false,
+ }, "NodeDeath::inner"),
+ }
+ ))
+ }
+
+ /// Sets the cleared flag to `true`.
+ ///
+ /// It removes `self` from the node's death notification list if needed.
+ ///
+ /// Returns whether it needs to be queued.
+ pub(crate) fn set_cleared(self: &DArc<Self>, abort: bool) -> bool {
+ let (needs_removal, needs_queueing) = {
+ // Update state and determine if we need to queue a work item. We only need to do it
+ // when the node is not dead or if the user already completed the death notification.
+ let mut inner = self.inner.lock();
+ if abort {
+ inner.aborted = true;
+ }
+ if inner.cleared {
+ // Already cleared.
+ return false;
+ }
+ inner.cleared = true;
+ (!inner.dead, !inner.dead || inner.notification_done)
+ };
+
+ // Remove death notification from node.
+ if needs_removal {
+ let mut owner_inner = self.node.owner.inner.lock();
+ let node_inner = self.node.inner.access_mut(&mut owner_inner);
+ // SAFETY: A `NodeDeath` is never inserted into the death list of any node other than
+ // its owner, so it is either in this death list or in no death list.
+ unsafe { node_inner.death_list.remove(self) };
+ }
+ needs_queueing
+ }
+
+ /// Sets the 'notification done' flag to `true`.
+ pub(crate) fn set_notification_done(self: DArc<Self>, thread: &Thread) {
+ let needs_queueing = {
+ let mut inner = self.inner.lock();
+ inner.notification_done = true;
+ inner.cleared
+ };
+ if needs_queueing {
+ if let Some(death) = ListArc::try_from_arc_or_drop(self) {
+ let _ = thread.push_work_if_looper(death);
+ }
+ }
+ }
+
+ /// Sets the 'dead' flag to `true` and queues work item if needed.
+ pub(crate) fn set_dead(self: DArc<Self>) {
+ let needs_queueing = {
+ let mut inner = self.inner.lock();
+ if inner.cleared {
+ false
+ } else {
+ inner.dead = true;
+ true
+ }
+ };
+ if needs_queueing {
+ // Push the death notification to the target process. There is nothing else to do if
+ // it's already dead.
+ if let Some(death) = ListArc::try_from_arc_or_drop(self) {
+ let process = death.process.clone();
+ let _ = process.push_work(death);
+ }
+ }
+ }
+}
+
+kernel::list::impl_list_arc_safe! {
+ impl ListArcSafe<0> for NodeDeath {
+ tracked_by links_track: AtomicTracker;
+ }
+}
+
+kernel::list::impl_list_arc_safe! {
+ impl ListArcSafe<1> for DTRWrap<NodeDeath> { untracked; }
+}
+kernel::list::impl_list_item! {
+ impl ListItem<1> for DTRWrap<NodeDeath> {
+ using ListLinks { self.wrapped.death_links };
+ }
+}
+
+kernel::list::impl_list_arc_safe! {
+ impl ListArcSafe<2> for DTRWrap<NodeDeath> {
+ tracked_by wrapped: NodeDeath;
+ }
+}
+kernel::list::impl_list_arc_safe! {
+ impl ListArcSafe<2> for NodeDeath {
+ tracked_by delivered_links_track: AtomicTracker<2>;
+ }
+}
+kernel::list::impl_list_item! {
+ impl ListItem<2> for DTRWrap<NodeDeath> {
+ using ListLinks { self.wrapped.delivered_links };
+ }
+}
+
+impl DeliverToRead for NodeDeath {
+ fn do_work(
+ self: DArc<Self>,
+ _thread: &Thread,
+ writer: &mut BinderReturnWriter<'_>,
+ ) -> Result<bool> {
+ let done = {
+ let inner = self.inner.lock();
+ if inner.aborted {
+ return Ok(true);
+ }
+ inner.cleared && (!inner.dead || inner.notification_done)
+ };
+
+ let cookie = self.cookie;
+ let cmd = if done {
+ BR_CLEAR_DEATH_NOTIFICATION_DONE
+ } else {
+ let process = self.process.clone();
+ let mut process_inner = process.inner.lock();
+ let inner = self.inner.lock();
+ if inner.aborted {
+ return Ok(true);
+ }
+ // We're still holding the inner lock, so it cannot be aborted while we insert it into
+ // the delivered list.
+ process_inner.death_delivered(self.clone());
+ BR_DEAD_BINDER
+ };
+
+ writer.write_code(cmd)?;
+ writer.write_payload(&cookie)?;
+ // DEAD_BINDER notifications can cause transactions, so stop processing work items when we
+ // get to a death notification.
+ Ok(cmd != BR_DEAD_BINDER)
+ }
+
+ fn cancel(self: DArc<Self>) {}
+
+ fn should_sync_wakeup(&self) -> bool {
+ false
+ }
+
+ #[inline(never)]
+ fn debug_print(&self, m: &SeqFile, prefix: &str, _tprefix: &str) -> Result<()> {
+ let inner = self.inner.lock();
+
+ let dead_binder = inner.dead && !inner.notification_done;
+
+ if dead_binder {
+ if inner.cleared {
+ seq_print!(m, "{}has cleared dead binder\n", prefix);
+ } else {
+ seq_print!(m, "{}has dead binder\n", prefix);
+ }
+ } else {
+ seq_print!(m, "{}has cleared death notification\n", prefix);
+ }
+
+ Ok(())
+ }
+}
diff --git a/drivers/android/binder/node/wrapper.rs b/drivers/android/binder/node/wrapper.rs
new file mode 100644
index 000000000000..43294c050502
--- /dev/null
+++ b/drivers/android/binder/node/wrapper.rs
@@ -0,0 +1,78 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2025 Google LLC.
+
+use kernel::{list::ListArc, prelude::*, seq_file::SeqFile, seq_print, sync::UniqueArc};
+
+use crate::{node::Node, thread::Thread, BinderReturnWriter, DArc, DLArc, DTRWrap, DeliverToRead};
+
+use core::mem::MaybeUninit;
+
+pub(crate) struct CritIncrWrapper {
+ inner: UniqueArc<MaybeUninit<DTRWrap<NodeWrapper>>>,
+}
+
+impl CritIncrWrapper {
+ pub(crate) fn new() -> Result<Self> {
+ Ok(CritIncrWrapper {
+ inner: UniqueArc::new_uninit(GFP_KERNEL)?,
+ })
+ }
+
+ pub(super) fn init(self, node: DArc<Node>) -> DLArc<dyn DeliverToRead> {
+ match self.inner.pin_init_with(DTRWrap::new(NodeWrapper { node })) {
+ Ok(initialized) => ListArc::from(initialized) as _,
+ Err(err) => match err {},
+ }
+ }
+}
+
+struct NodeWrapper {
+ node: DArc<Node>,
+}
+
+kernel::list::impl_list_arc_safe! {
+ impl ListArcSafe<0> for NodeWrapper {
+ untracked;
+ }
+}
+
+impl DeliverToRead for NodeWrapper {
+ fn do_work(
+ self: DArc<Self>,
+ _thread: &Thread,
+ writer: &mut BinderReturnWriter<'_>,
+ ) -> Result<bool> {
+ let node = &self.node;
+ let mut owner_inner = node.owner.inner.lock();
+ let inner = node.inner.access_mut(&mut owner_inner);
+
+ let ds = &mut inner.delivery_state;
+
+ assert!(ds.has_pushed_wrapper);
+ assert!(ds.has_strong_zero2one);
+ ds.has_pushed_wrapper = false;
+ ds.has_strong_zero2one = false;
+
+ node.do_work_locked(writer, owner_inner)
+ }
+
+ fn cancel(self: DArc<Self>) {}
+
+ fn should_sync_wakeup(&self) -> bool {
+ false
+ }
+
+ #[inline(never)]
+ fn debug_print(&self, m: &SeqFile, prefix: &str, _tprefix: &str) -> Result<()> {
+ seq_print!(
+ m,
+ "{}node work {}: u{:016x} c{:016x}\n",
+ prefix,
+ self.node.debug_id,
+ self.node.ptr,
+ self.node.cookie,
+ );
+ Ok(())
+ }
+}
diff --git a/drivers/android/binder/page_range.rs b/drivers/android/binder/page_range.rs
new file mode 100644
index 000000000000..9379038f61f5
--- /dev/null
+++ b/drivers/android/binder/page_range.rs
@@ -0,0 +1,734 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2025 Google LLC.
+
+//! This module has utilities for managing a page range where unused pages may be reclaimed by a
+//! vma shrinker.
+
+// To avoid deadlocks, locks are taken in the order:
+//
+// 1. mmap lock
+// 2. spinlock
+// 3. lru spinlock
+//
+// The shrinker will use trylock methods because it locks them in a different order.
+
+use core::{
+ marker::PhantomPinned,
+ mem::{size_of, size_of_val, MaybeUninit},
+ ptr,
+};
+
+use kernel::{
+ bindings,
+ error::Result,
+ ffi::{c_ulong, c_void},
+ mm::{virt, Mm, MmWithUser},
+ new_mutex, new_spinlock,
+ page::{Page, PAGE_SHIFT, PAGE_SIZE},
+ prelude::*,
+ str::CStr,
+ sync::{aref::ARef, Mutex, SpinLock},
+ task::Pid,
+ transmute::FromBytes,
+ types::Opaque,
+ uaccess::UserSliceReader,
+};
+
+/// Represents a shrinker that can be registered with the kernel.
+///
+/// Each shrinker can be used by many `ShrinkablePageRange` objects.
+#[repr(C)]
+pub(crate) struct Shrinker {
+ inner: Opaque<*mut bindings::shrinker>,
+ list_lru: Opaque<bindings::list_lru>,
+}
+
+// SAFETY: The shrinker and list_lru are thread safe.
+unsafe impl Send for Shrinker {}
+// SAFETY: The shrinker and list_lru are thread safe.
+unsafe impl Sync for Shrinker {}
+
+impl Shrinker {
+ /// Create a new shrinker.
+ ///
+ /// # Safety
+ ///
+ /// Before using this shrinker with a `ShrinkablePageRange`, the `register` method must have
+ /// been called exactly once, and it must not have returned an error.
+ pub(crate) const unsafe fn new() -> Self {
+ Self {
+ inner: Opaque::uninit(),
+ list_lru: Opaque::uninit(),
+ }
+ }
+
+ /// Register this shrinker with the kernel.
+ pub(crate) fn register(&'static self, name: &CStr) -> Result<()> {
+ // SAFETY: These fields are not yet used, so it's okay to zero them.
+ unsafe {
+ self.inner.get().write(ptr::null_mut());
+ self.list_lru.get().write_bytes(0, 1);
+ }
+
+ // SAFETY: The field is not yet used, so we can initialize it.
+ let ret = unsafe { bindings::__list_lru_init(self.list_lru.get(), false, ptr::null_mut()) };
+ if ret != 0 {
+ return Err(Error::from_errno(ret));
+ }
+
+ // SAFETY: The `name` points at a valid c string.
+ let shrinker = unsafe { bindings::shrinker_alloc(0, name.as_char_ptr()) };
+ if shrinker.is_null() {
+ // SAFETY: We initialized it, so its okay to destroy it.
+ unsafe { bindings::list_lru_destroy(self.list_lru.get()) };
+ return Err(Error::from_errno(ret));
+ }
+
+ // SAFETY: We're about to register the shrinker, and these are the fields we need to
+ // initialize. (All other fields are already zeroed.)
+ unsafe {
+ (&raw mut (*shrinker).count_objects).write(Some(rust_shrink_count));
+ (&raw mut (*shrinker).scan_objects).write(Some(rust_shrink_scan));
+ (&raw mut (*shrinker).private_data).write(self.list_lru.get().cast());
+ }
+
+ // SAFETY: The new shrinker has been fully initialized, so we can register it.
+ unsafe { bindings::shrinker_register(shrinker) };
+
+ // SAFETY: This initializes the pointer to the shrinker so that we can use it.
+ unsafe { self.inner.get().write(shrinker) };
+
+ Ok(())
+ }
+}
+
+/// A container that manages a page range in a vma.
+///
+/// The pages can be thought of as an array of booleans of whether the pages are usable. The
+/// methods `use_range` and `stop_using_range` set all booleans in a range to true or false
+/// respectively. Initially, no pages are allocated. When a page is not used, it is not freed
+/// immediately. Instead, it is made available to the memory shrinker to free it if the device is
+/// under memory pressure.
+///
+/// It's okay for `use_range` and `stop_using_range` to race with each other, although there's no
+/// way to know whether an index ends up with true or false if a call to `use_range` races with
+/// another call to `stop_using_range` on a given index.
+///
+/// It's also okay for the two methods to race with themselves, e.g. if two threads call
+/// `use_range` on the same index, then that's fine and neither call will return until the page is
+/// allocated and mapped.
+///
+/// The methods that read or write to a range require that the page is marked as in use. So it is
+/// _not_ okay to call `stop_using_range` on a page that is in use by the methods that read or
+/// write to the page.
+#[pin_data(PinnedDrop)]
+pub(crate) struct ShrinkablePageRange {
+ /// Shrinker object registered with the kernel.
+ shrinker: &'static Shrinker,
+ /// Pid using this page range. Only used as debugging information.
+ pid: Pid,
+ /// The mm for the relevant process.
+ mm: ARef<Mm>,
+ /// Used to synchronize calls to `vm_insert_page` and `zap_page_range_single`.
+ #[pin]
+ mm_lock: Mutex<()>,
+ /// Spinlock protecting changes to pages.
+ #[pin]
+ lock: SpinLock<Inner>,
+
+ /// Must not move, since page info has pointers back.
+ #[pin]
+ _pin: PhantomPinned,
+}
+
+struct Inner {
+ /// Array of pages.
+ ///
+ /// Since this is also accessed by the shrinker, we can't use a `Box`, which asserts exclusive
+ /// ownership. To deal with that, we manage it using raw pointers.
+ pages: *mut PageInfo,
+ /// Length of the `pages` array.
+ size: usize,
+ /// The address of the vma to insert the pages into.
+ vma_addr: usize,
+}
+
+// SAFETY: proper locking is in place for `Inner`
+unsafe impl Send for Inner {}
+
+type StableMmGuard =
+ kernel::sync::lock::Guard<'static, (), kernel::sync::lock::mutex::MutexBackend>;
+
+/// An array element that describes the current state of a page.
+///
+/// There are three states:
+///
+/// * Free. The page is None. The `lru` element is not queued.
+/// * Available. The page is Some. The `lru` element is queued to the shrinker's lru.
+/// * Used. The page is Some. The `lru` element is not queued.
+///
+/// When an element is available, the shrinker is able to free the page.
+#[repr(C)]
+struct PageInfo {
+ lru: bindings::list_head,
+ page: Option<Page>,
+ range: *const ShrinkablePageRange,
+}
+
+impl PageInfo {
+ /// # Safety
+ ///
+ /// The caller ensures that writing to `me.page` is ok, and that the page is not currently set.
+ unsafe fn set_page(me: *mut PageInfo, page: Page) {
+ // SAFETY: This pointer offset is in bounds.
+ let ptr = unsafe { &raw mut (*me).page };
+
+ // SAFETY: The pointer is valid for writing, so also valid for reading.
+ if unsafe { (*ptr).is_some() } {
+ pr_err!("set_page called when there is already a page");
+ // SAFETY: We will initialize the page again below.
+ unsafe { ptr::drop_in_place(ptr) };
+ }
+
+ // SAFETY: The pointer is valid for writing.
+ unsafe { ptr::write(ptr, Some(page)) };
+ }
+
+ /// # Safety
+ ///
+ /// The caller ensures that reading from `me.page` is ok for the duration of 'a.
+ unsafe fn get_page<'a>(me: *const PageInfo) -> Option<&'a Page> {
+ // SAFETY: This pointer offset is in bounds.
+ let ptr = unsafe { &raw const (*me).page };
+
+ // SAFETY: The pointer is valid for reading.
+ unsafe { (*ptr).as_ref() }
+ }
+
+ /// # Safety
+ ///
+ /// The caller ensures that writing to `me.page` is ok for the duration of 'a.
+ unsafe fn take_page(me: *mut PageInfo) -> Option<Page> {
+ // SAFETY: This pointer offset is in bounds.
+ let ptr = unsafe { &raw mut (*me).page };
+
+ // SAFETY: The pointer is valid for reading.
+ unsafe { (*ptr).take() }
+ }
+
+ /// Add this page to the lru list, if not already in the list.
+ ///
+ /// # Safety
+ ///
+ /// The pointer must be valid, and it must be the right shrinker and nid.
+ unsafe fn list_lru_add(me: *mut PageInfo, nid: i32, shrinker: &'static Shrinker) {
+ // SAFETY: This pointer offset is in bounds.
+ let lru_ptr = unsafe { &raw mut (*me).lru };
+ // SAFETY: The lru pointer is valid, and we're not using it with any other lru list.
+ unsafe { bindings::list_lru_add(shrinker.list_lru.get(), lru_ptr, nid, ptr::null_mut()) };
+ }
+
+ /// Remove this page from the lru list, if it is in the list.
+ ///
+ /// # Safety
+ ///
+ /// The pointer must be valid, and it must be the right shrinker and nid.
+ unsafe fn list_lru_del(me: *mut PageInfo, nid: i32, shrinker: &'static Shrinker) {
+ // SAFETY: This pointer offset is in bounds.
+ let lru_ptr = unsafe { &raw mut (*me).lru };
+ // SAFETY: The lru pointer is valid, and we're not using it with any other lru list.
+ unsafe { bindings::list_lru_del(shrinker.list_lru.get(), lru_ptr, nid, ptr::null_mut()) };
+ }
+}
+
+impl ShrinkablePageRange {
+ /// Create a new `ShrinkablePageRange` using the given shrinker.
+ pub(crate) fn new(shrinker: &'static Shrinker) -> impl PinInit<Self, Error> {
+ try_pin_init!(Self {
+ shrinker,
+ pid: kernel::current!().pid(),
+ mm: ARef::from(&**kernel::current!().mm().ok_or(ESRCH)?),
+ mm_lock <- new_mutex!((), "ShrinkablePageRange::mm"),
+ lock <- new_spinlock!(Inner {
+ pages: ptr::null_mut(),
+ size: 0,
+ vma_addr: 0,
+ }, "ShrinkablePageRange"),
+ _pin: PhantomPinned,
+ })
+ }
+
+ pub(crate) fn stable_trylock_mm(&self) -> Option<StableMmGuard> {
+ // SAFETY: This extends the duration of the reference. Since this call happens before
+ // `mm_lock` is taken in the destructor of `ShrinkablePageRange`, the destructor will block
+ // until the returned guard is dropped. This ensures that the guard is valid until dropped.
+ let mm_lock = unsafe { &*ptr::from_ref(&self.mm_lock) };
+
+ mm_lock.try_lock()
+ }
+
+ /// Register a vma with this page range. Returns the size of the region.
+ pub(crate) fn register_with_vma(&self, vma: &virt::VmaNew) -> Result<usize> {
+ let num_bytes = usize::min(vma.end() - vma.start(), bindings::SZ_4M as usize);
+ let num_pages = num_bytes >> PAGE_SHIFT;
+
+ if !ptr::eq::<Mm>(&*self.mm, &**vma.mm()) {
+ pr_debug!("Failed to register with vma: invalid vma->vm_mm");
+ return Err(EINVAL);
+ }
+ if num_pages == 0 {
+ pr_debug!("Failed to register with vma: size zero");
+ return Err(EINVAL);
+ }
+
+ let mut pages = KVVec::<PageInfo>::with_capacity(num_pages, GFP_KERNEL)?;
+
+ // SAFETY: This just initializes the pages array.
+ unsafe {
+ let self_ptr = self as *const ShrinkablePageRange;
+ for i in 0..num_pages {
+ let info = pages.as_mut_ptr().add(i);
+ (&raw mut (*info).range).write(self_ptr);
+ (&raw mut (*info).page).write(None);
+ let lru = &raw mut (*info).lru;
+ (&raw mut (*lru).next).write(lru);
+ (&raw mut (*lru).prev).write(lru);
+ }
+ }
+
+ let mut inner = self.lock.lock();
+ if inner.size > 0 {
+ pr_debug!("Failed to register with vma: already registered");
+ drop(inner);
+ return Err(EBUSY);
+ }
+
+ inner.pages = pages.into_raw_parts().0;
+ inner.size = num_pages;
+ inner.vma_addr = vma.start();
+
+ Ok(num_pages)
+ }
+
+ /// Make sure that the given pages are allocated and mapped.
+ ///
+ /// Must not be called from an atomic context.
+ pub(crate) fn use_range(&self, start: usize, end: usize) -> Result<()> {
+ if start >= end {
+ return Ok(());
+ }
+ let mut inner = self.lock.lock();
+ assert!(end <= inner.size);
+
+ for i in start..end {
+ // SAFETY: This pointer offset is in bounds.
+ let page_info = unsafe { inner.pages.add(i) };
+
+ // SAFETY: The pointer is valid, and we hold the lock so reading from the page is okay.
+ if let Some(page) = unsafe { PageInfo::get_page(page_info) } {
+ // Since we're going to use the page, we should remove it from the lru list so that
+ // the shrinker will not free it.
+ //
+ // SAFETY: The pointer is valid, and this is the right shrinker.
+ //
+ // The shrinker can't free the page between the check and this call to
+ // `list_lru_del` because we hold the lock.
+ unsafe { PageInfo::list_lru_del(page_info, page.nid(), self.shrinker) };
+ } else {
+ // We have to allocate a new page. Use the slow path.
+ drop(inner);
+ // SAFETY: `i < end <= inner.size` so `i` is in bounds.
+ match unsafe { self.use_page_slow(i) } {
+ Ok(()) => {}
+ Err(err) => {
+ pr_warn!("Error in use_page_slow: {:?}", err);
+ return Err(err);
+ }
+ }
+ inner = self.lock.lock();
+ }
+ }
+ Ok(())
+ }
+
+ /// Mark the given page as in use, slow path.
+ ///
+ /// Must not be called from an atomic context.
+ ///
+ /// # Safety
+ ///
+ /// Assumes that `i` is in bounds.
+ #[cold]
+ unsafe fn use_page_slow(&self, i: usize) -> Result<()> {
+ let new_page = Page::alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO)?;
+
+ let mm_mutex = self.mm_lock.lock();
+ let inner = self.lock.lock();
+
+ // SAFETY: This pointer offset is in bounds.
+ let page_info = unsafe { inner.pages.add(i) };
+
+ // SAFETY: The pointer is valid, and we hold the lock so reading from the page is okay.
+ if let Some(page) = unsafe { PageInfo::get_page(page_info) } {
+ // The page was already there, or someone else added the page while we didn't hold the
+ // spinlock.
+ //
+ // SAFETY: The pointer is valid, and this is the right shrinker.
+ //
+ // The shrinker can't free the page between the check and this call to
+ // `list_lru_del` because we hold the lock.
+ unsafe { PageInfo::list_lru_del(page_info, page.nid(), self.shrinker) };
+ return Ok(());
+ }
+
+ let vma_addr = inner.vma_addr;
+ // Release the spinlock while we insert the page into the vma.
+ drop(inner);
+
+ // No overflow since we stay in bounds of the vma.
+ let user_page_addr = vma_addr + (i << PAGE_SHIFT);
+
+ // We use `mmput_async` when dropping the `mm` because `use_page_slow` is usually used from
+ // a remote process. If the call to `mmput` races with the process shutting down, then the
+ // caller of `use_page_slow` becomes responsible for cleaning up the `mm`, which doesn't
+ // happen until it returns to userspace. However, the caller might instead go to sleep and
+ // wait for the owner of the `mm` to wake it up, which doesn't happen because it's in the
+ // middle of a shutdown process that won't complete until the `mm` is dropped. This can
+ // amount to a deadlock.
+ //
+ // Using `mmput_async` avoids this, because then the `mm` cleanup is instead queued to a
+ // workqueue.
+ MmWithUser::into_mmput_async(self.mm.mmget_not_zero().ok_or(ESRCH)?)
+ .mmap_read_lock()
+ .vma_lookup(vma_addr)
+ .ok_or(ESRCH)?
+ .as_mixedmap_vma()
+ .ok_or(ESRCH)?
+ .vm_insert_page(user_page_addr, &new_page)
+ .inspect_err(|err| {
+ pr_warn!(
+ "Failed to vm_insert_page({}): vma_addr:{} i:{} err:{:?}",
+ user_page_addr,
+ vma_addr,
+ i,
+ err
+ )
+ })?;
+
+ let inner = self.lock.lock();
+
+ // SAFETY: The `page_info` pointer is valid and currently does not have a page. The page
+ // can be written to since we hold the lock.
+ //
+ // We released and reacquired the spinlock since we checked that the page is null, but we
+ // always hold the mm_lock mutex when setting the page to a non-null value, so it's not
+ // possible for someone else to have changed it since our check.
+ unsafe { PageInfo::set_page(page_info, new_page) };
+
+ drop(inner);
+ drop(mm_mutex);
+
+ Ok(())
+ }
+
+ /// If the given page is in use, then mark it as available so that the shrinker can free it.
+ ///
+ /// May be called from an atomic context.
+ pub(crate) fn stop_using_range(&self, start: usize, end: usize) {
+ if start >= end {
+ return;
+ }
+ let inner = self.lock.lock();
+ assert!(end <= inner.size);
+
+ for i in (start..end).rev() {
+ // SAFETY: The pointer is in bounds.
+ let page_info = unsafe { inner.pages.add(i) };
+
+ // SAFETY: Okay for reading since we have the lock.
+ if let Some(page) = unsafe { PageInfo::get_page(page_info) } {
+ // SAFETY: The pointer is valid, and it's the right shrinker.
+ unsafe { PageInfo::list_lru_add(page_info, page.nid(), self.shrinker) };
+ }
+ }
+ }
+
+ /// Helper for reading or writing to a range of bytes that may overlap with several pages.
+ ///
+ /// # Safety
+ ///
+ /// All pages touched by this operation must be in use for the duration of this call.
+ unsafe fn iterate<T>(&self, mut offset: usize, mut size: usize, mut cb: T) -> Result
+ where
+ T: FnMut(&Page, usize, usize) -> Result,
+ {
+ if size == 0 {
+ return Ok(());
+ }
+
+ let (pages, num_pages) = {
+ let inner = self.lock.lock();
+ (inner.pages, inner.size)
+ };
+ let num_bytes = num_pages << PAGE_SHIFT;
+
+ // Check that the request is within the buffer.
+ if offset.checked_add(size).ok_or(EFAULT)? > num_bytes {
+ return Err(EFAULT);
+ }
+
+ let mut page_index = offset >> PAGE_SHIFT;
+ offset &= PAGE_SIZE - 1;
+ while size > 0 {
+ let available = usize::min(size, PAGE_SIZE - offset);
+ // SAFETY: The pointer is in bounds.
+ let page_info = unsafe { pages.add(page_index) };
+ // SAFETY: The caller guarantees that this page is in the "in use" state for the
+ // duration of this call to `iterate`, so nobody will change the page.
+ let page = unsafe { PageInfo::get_page(page_info) };
+ if page.is_none() {
+ pr_warn!("Page is null!");
+ }
+ let page = page.ok_or(EFAULT)?;
+ cb(page, offset, available)?;
+ size -= available;
+ page_index += 1;
+ offset = 0;
+ }
+ Ok(())
+ }
+
+ /// Copy from userspace into this page range.
+ ///
+ /// # Safety
+ ///
+ /// All pages touched by this operation must be in use for the duration of this call.
+ pub(crate) unsafe fn copy_from_user_slice(
+ &self,
+ reader: &mut UserSliceReader,
+ offset: usize,
+ size: usize,
+ ) -> Result {
+ // SAFETY: `self.iterate` has the same safety requirements as `copy_from_user_slice`.
+ unsafe {
+ self.iterate(offset, size, |page, offset, to_copy| {
+ page.copy_from_user_slice_raw(reader, offset, to_copy)
+ })
+ }
+ }
+
+ /// Copy from this page range into kernel space.
+ ///
+ /// # Safety
+ ///
+ /// All pages touched by this operation must be in use for the duration of this call.
+ pub(crate) unsafe fn read<T: FromBytes>(&self, offset: usize) -> Result<T> {
+ let mut out = MaybeUninit::<T>::uninit();
+ let mut out_offset = 0;
+ // SAFETY: `self.iterate` has the same safety requirements as `read`.
+ unsafe {
+ self.iterate(offset, size_of::<T>(), |page, offset, to_copy| {
+ // SAFETY: The sum of `offset` and `to_copy` is bounded by the size of T.
+ let obj_ptr = (out.as_mut_ptr() as *mut u8).add(out_offset);
+ // SAFETY: The pointer points is in-bounds of the `out` variable, so it is valid.
+ page.read_raw(obj_ptr, offset, to_copy)?;
+ out_offset += to_copy;
+ Ok(())
+ })?;
+ }
+ // SAFETY: We just initialised the data.
+ Ok(unsafe { out.assume_init() })
+ }
+
+ /// Copy from kernel space into this page range.
+ ///
+ /// # Safety
+ ///
+ /// All pages touched by this operation must be in use for the duration of this call.
+ pub(crate) unsafe fn write<T: ?Sized>(&self, offset: usize, obj: &T) -> Result {
+ let mut obj_offset = 0;
+ // SAFETY: `self.iterate` has the same safety requirements as `write`.
+ unsafe {
+ self.iterate(offset, size_of_val(obj), |page, offset, to_copy| {
+ // SAFETY: The sum of `offset` and `to_copy` is bounded by the size of T.
+ let obj_ptr = (obj as *const T as *const u8).add(obj_offset);
+ // SAFETY: We have a reference to the object, so the pointer is valid.
+ page.write_raw(obj_ptr, offset, to_copy)?;
+ obj_offset += to_copy;
+ Ok(())
+ })
+ }
+ }
+
+ /// Write zeroes to the given range.
+ ///
+ /// # Safety
+ ///
+ /// All pages touched by this operation must be in use for the duration of this call.
+ pub(crate) unsafe fn fill_zero(&self, offset: usize, size: usize) -> Result {
+ // SAFETY: `self.iterate` has the same safety requirements as `copy_into`.
+ unsafe {
+ self.iterate(offset, size, |page, offset, len| {
+ page.fill_zero_raw(offset, len)
+ })
+ }
+ }
+}
+
+#[pinned_drop]
+impl PinnedDrop for ShrinkablePageRange {
+ fn drop(self: Pin<&mut Self>) {
+ let (pages, size) = {
+ let lock = self.lock.lock();
+ (lock.pages, lock.size)
+ };
+
+ if size == 0 {
+ return;
+ }
+
+ // Note: This call is also necessary for the safety of `stable_trylock_mm`.
+ let mm_lock = self.mm_lock.lock();
+
+ // This is the destructor, so unlike the other methods, we only need to worry about races
+ // with the shrinker here. Since we hold the `mm_lock`, we also can't race with the
+ // shrinker, and after this loop, the shrinker will not access any of our pages since we
+ // removed them from the lru list.
+ for i in 0..size {
+ // SAFETY: Loop is in-bounds of the size.
+ let p_ptr = unsafe { pages.add(i) };
+ // SAFETY: No other readers, so we can read.
+ if let Some(p) = unsafe { PageInfo::get_page(p_ptr) } {
+ // SAFETY: The pointer is valid and it's the right shrinker.
+ unsafe { PageInfo::list_lru_del(p_ptr, p.nid(), self.shrinker) };
+ }
+ }
+
+ drop(mm_lock);
+
+ // SAFETY: `pages` was allocated as an `KVVec<PageInfo>` with capacity `size`. Furthermore,
+ // all `size` elements are initialized. Also, the array is no longer shared with the
+ // shrinker due to the above loop.
+ drop(unsafe { KVVec::from_raw_parts(pages, size, size) });
+ }
+}
+
+/// # Safety
+/// Called by the shrinker.
+#[no_mangle]
+unsafe extern "C" fn rust_shrink_count(
+ shrink: *mut bindings::shrinker,
+ _sc: *mut bindings::shrink_control,
+) -> c_ulong {
+ // SAFETY: We can access our own private data.
+ let list_lru = unsafe { (*shrink).private_data.cast::<bindings::list_lru>() };
+ // SAFETY: Accessing the lru list is okay. Just an FFI call.
+ unsafe { bindings::list_lru_count(list_lru) }
+}
+
+/// # Safety
+/// Called by the shrinker.
+#[no_mangle]
+unsafe extern "C" fn rust_shrink_scan(
+ shrink: *mut bindings::shrinker,
+ sc: *mut bindings::shrink_control,
+) -> c_ulong {
+ // SAFETY: We can access our own private data.
+ let list_lru = unsafe { (*shrink).private_data.cast::<bindings::list_lru>() };
+ // SAFETY: Caller guarantees that it is safe to read this field.
+ let nr_to_scan = unsafe { (*sc).nr_to_scan };
+ // SAFETY: Accessing the lru list is okay. Just an FFI call.
+ unsafe {
+ bindings::list_lru_walk(
+ list_lru,
+ Some(bindings::rust_shrink_free_page_wrap),
+ ptr::null_mut(),
+ nr_to_scan,
+ )
+ }
+}
+
+const LRU_SKIP: bindings::lru_status = bindings::lru_status_LRU_SKIP;
+const LRU_REMOVED_ENTRY: bindings::lru_status = bindings::lru_status_LRU_REMOVED_RETRY;
+
+/// # Safety
+/// Called by the shrinker.
+#[no_mangle]
+unsafe extern "C" fn rust_shrink_free_page(
+ item: *mut bindings::list_head,
+ lru: *mut bindings::list_lru_one,
+ _cb_arg: *mut c_void,
+) -> bindings::lru_status {
+ // Fields that should survive after unlocking the lru lock.
+ let page;
+ let page_index;
+ let mm;
+ let mmap_read;
+ let mm_mutex;
+ let vma_addr;
+
+ {
+ // CAST: The `list_head` field is first in `PageInfo`.
+ let info = item as *mut PageInfo;
+ // SAFETY: The `range` field of `PageInfo` is immutable.
+ let range = unsafe { &*((*info).range) };
+
+ mm = match range.mm.mmget_not_zero() {
+ Some(mm) => MmWithUser::into_mmput_async(mm),
+ None => return LRU_SKIP,
+ };
+
+ mm_mutex = match range.stable_trylock_mm() {
+ Some(guard) => guard,
+ None => return LRU_SKIP,
+ };
+
+ mmap_read = match mm.mmap_read_trylock() {
+ Some(guard) => guard,
+ None => return LRU_SKIP,
+ };
+
+ // We can't lock it normally here, since we hold the lru lock.
+ let inner = match range.lock.try_lock() {
+ Some(inner) => inner,
+ None => return LRU_SKIP,
+ };
+
+ // SAFETY: The item is in this lru list, so it's okay to remove it.
+ unsafe { bindings::list_lru_isolate(lru, item) };
+
+ // SAFETY: Both pointers are in bounds of the same allocation.
+ page_index = unsafe { info.offset_from(inner.pages) } as usize;
+
+ // SAFETY: We hold the spinlock, so we can take the page.
+ //
+ // This sets the page pointer to zero before we unmap it from the vma. However, we call
+ // `zap_page_range` before we release the mmap lock, so `use_page_slow` will not be able to
+ // insert a new page until after our call to `zap_page_range`.
+ page = unsafe { PageInfo::take_page(info) };
+ vma_addr = inner.vma_addr;
+
+ // From this point on, we don't access this PageInfo or ShrinkablePageRange again, because
+ // they can be freed at any point after we unlock `lru_lock`. This is with the exception of
+ // `mm_mutex` which is kept alive by holding the lock.
+ }
+
+ // SAFETY: The lru lock is locked when this method is called.
+ unsafe { bindings::spin_unlock(&raw mut (*lru).lock) };
+
+ if let Some(vma) = mmap_read.vma_lookup(vma_addr) {
+ let user_page_addr = vma_addr + (page_index << PAGE_SHIFT);
+ vma.zap_page_range_single(user_page_addr, PAGE_SIZE);
+ }
+
+ drop(mmap_read);
+ drop(mm_mutex);
+ drop(mm);
+ drop(page);
+
+ // SAFETY: We just unlocked the lru lock, but it should be locked when we return.
+ unsafe { bindings::spin_lock(&raw mut (*lru).lock) };
+
+ LRU_REMOVED_ENTRY
+}
diff --git a/drivers/android/binder/page_range_helper.c b/drivers/android/binder/page_range_helper.c
new file mode 100644
index 000000000000..496887723ee0
--- /dev/null
+++ b/drivers/android/binder/page_range_helper.c
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/* C helper for page_range.rs to work around a CFI violation.
+ *
+ * Bindgen currently pretends that `enum lru_status` is the same as an integer.
+ * This assumption is fine ABI-wise, but once you add CFI to the mix, it
+ * triggers a CFI violation because `enum lru_status` gets a different CFI tag.
+ *
+ * This file contains a workaround until bindgen can be fixed.
+ *
+ * Copyright (C) 2025 Google LLC.
+ */
+#include "page_range_helper.h"
+
+unsigned int rust_shrink_free_page(struct list_head *item,
+ struct list_lru_one *list,
+ void *cb_arg);
+
+enum lru_status
+rust_shrink_free_page_wrap(struct list_head *item, struct list_lru_one *list,
+ void *cb_arg)
+{
+ return rust_shrink_free_page(item, list, cb_arg);
+}
diff --git a/drivers/android/binder/page_range_helper.h b/drivers/android/binder/page_range_helper.h
new file mode 100644
index 000000000000..18dd2dd117b2
--- /dev/null
+++ b/drivers/android/binder/page_range_helper.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2025 Google, Inc.
+ */
+
+#ifndef _LINUX_PAGE_RANGE_HELPER_H
+#define _LINUX_PAGE_RANGE_HELPER_H
+
+#include <linux/list_lru.h>
+
+enum lru_status
+rust_shrink_free_page_wrap(struct list_head *item, struct list_lru_one *list,
+ void *cb_arg);
+
+#endif /* _LINUX_PAGE_RANGE_HELPER_H */
diff --git a/drivers/android/binder/process.rs b/drivers/android/binder/process.rs
new file mode 100644
index 000000000000..f13a747e784c
--- /dev/null
+++ b/drivers/android/binder/process.rs
@@ -0,0 +1,1696 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2025 Google LLC.
+
+//! This module defines the `Process` type, which represents a process using a particular binder
+//! context.
+//!
+//! The `Process` object keeps track of all of the resources that this process owns in the binder
+//! context.
+//!
+//! There is one `Process` object for each binder fd that a process has opened, so processes using
+//! several binder contexts have several `Process` objects. This ensures that the contexts are
+//! fully separated.
+
+use core::mem::take;
+
+use kernel::{
+ bindings,
+ cred::Credential,
+ error::Error,
+ fs::file::{self, File},
+ list::{List, ListArc, ListArcField, ListLinks},
+ mm,
+ prelude::*,
+ rbtree::{self, RBTree, RBTreeNode, RBTreeNodeReservation},
+ seq_file::SeqFile,
+ seq_print,
+ sync::poll::PollTable,
+ sync::{
+ lock::{spinlock::SpinLockBackend, Guard},
+ Arc, ArcBorrow, CondVar, CondVarTimeoutResult, Mutex, SpinLock, UniqueArc,
+ },
+ task::Task,
+ types::ARef,
+ uaccess::{UserSlice, UserSliceReader},
+ uapi,
+ workqueue::{self, Work},
+};
+
+use crate::{
+ allocation::{Allocation, AllocationInfo, NewAllocation},
+ context::Context,
+ defs::*,
+ error::{BinderError, BinderResult},
+ node::{CouldNotDeliverCriticalIncrement, CritIncrWrapper, Node, NodeDeath, NodeRef},
+ page_range::ShrinkablePageRange,
+ range_alloc::{RangeAllocator, ReserveNew, ReserveNewArgs},
+ stats::BinderStats,
+ thread::{PushWorkRes, Thread},
+ BinderfsProcFile, DArc, DLArc, DTRWrap, DeliverToRead,
+};
+
+#[path = "freeze.rs"]
+mod freeze;
+use self::freeze::{FreezeCookie, FreezeListener};
+
+struct Mapping {
+ address: usize,
+ alloc: RangeAllocator<AllocationInfo>,
+}
+
+impl Mapping {
+ fn new(address: usize, size: usize) -> Self {
+ Self {
+ address,
+ alloc: RangeAllocator::new(size),
+ }
+ }
+}
+
+// bitflags for defer_work.
+const PROC_DEFER_FLUSH: u8 = 1;
+const PROC_DEFER_RELEASE: u8 = 2;
+
+/// The fields of `Process` protected by the spinlock.
+pub(crate) struct ProcessInner {
+ is_manager: bool,
+ pub(crate) is_dead: bool,
+ threads: RBTree<i32, Arc<Thread>>,
+ /// INVARIANT: Threads pushed to this list must be owned by this process.
+ ready_threads: List<Thread>,
+ nodes: RBTree<u64, DArc<Node>>,
+ mapping: Option<Mapping>,
+ work: List<DTRWrap<dyn DeliverToRead>>,
+ delivered_deaths: List<DTRWrap<NodeDeath>, 2>,
+
+ /// The number of requested threads that haven't registered yet.
+ requested_thread_count: u32,
+ /// The maximum number of threads used by the process thread pool.
+ max_threads: u32,
+ /// The number of threads the started and registered with the thread pool.
+ started_thread_count: u32,
+
+ /// Bitmap of deferred work to do.
+ defer_work: u8,
+
+ /// Number of transactions to be transmitted before processes in freeze_wait
+ /// are woken up.
+ outstanding_txns: u32,
+ /// Process is frozen and unable to service binder transactions.
+ pub(crate) is_frozen: bool,
+ /// Process received sync transactions since last frozen.
+ pub(crate) sync_recv: bool,
+ /// Process received async transactions since last frozen.
+ pub(crate) async_recv: bool,
+ pub(crate) binderfs_file: Option<BinderfsProcFile>,
+ /// Check for oneway spam
+ oneway_spam_detection_enabled: bool,
+}
+
+impl ProcessInner {
+ fn new() -> Self {
+ Self {
+ is_manager: false,
+ is_dead: false,
+ threads: RBTree::new(),
+ ready_threads: List::new(),
+ mapping: None,
+ nodes: RBTree::new(),
+ work: List::new(),
+ delivered_deaths: List::new(),
+ requested_thread_count: 0,
+ max_threads: 0,
+ started_thread_count: 0,
+ defer_work: 0,
+ outstanding_txns: 0,
+ is_frozen: false,
+ sync_recv: false,
+ async_recv: false,
+ binderfs_file: None,
+ oneway_spam_detection_enabled: false,
+ }
+ }
+
+ /// Schedule the work item for execution on this process.
+ ///
+ /// If any threads are ready for work, then the work item is given directly to that thread and
+ /// it is woken up. Otherwise, it is pushed to the process work list.
+ ///
+ /// This call can fail only if the process is dead. In this case, the work item is returned to
+ /// the caller so that the caller can drop it after releasing the inner process lock. This is
+ /// necessary since the destructor of `Transaction` will take locks that can't necessarily be
+ /// taken while holding the inner process lock.
+ pub(crate) fn push_work(
+ &mut self,
+ work: DLArc<dyn DeliverToRead>,
+ ) -> Result<(), (BinderError, DLArc<dyn DeliverToRead>)> {
+ // Try to find a ready thread to which to push the work.
+ if let Some(thread) = self.ready_threads.pop_front() {
+ // Push to thread while holding state lock. This prevents the thread from giving up
+ // (for example, because of a signal) when we're about to deliver work.
+ match thread.push_work(work) {
+ PushWorkRes::Ok => Ok(()),
+ PushWorkRes::FailedDead(work) => Err((BinderError::new_dead(), work)),
+ }
+ } else if self.is_dead {
+ Err((BinderError::new_dead(), work))
+ } else {
+ let sync = work.should_sync_wakeup();
+
+ // Didn't find a thread waiting for proc work; this can happen
+ // in two scenarios:
+ // 1. All threads are busy handling transactions
+ // In that case, one of those threads should call back into
+ // the kernel driver soon and pick up this work.
+ // 2. Threads are using the (e)poll interface, in which case
+ // they may be blocked on the waitqueue without having been
+ // added to waiting_threads. For this case, we just iterate
+ // over all threads not handling transaction work, and
+ // wake them all up. We wake all because we don't know whether
+ // a thread that called into (e)poll is handling non-binder
+ // work currently.
+ self.work.push_back(work);
+
+ // Wake up polling threads, if any.
+ for thread in self.threads.values() {
+ thread.notify_if_poll_ready(sync);
+ }
+
+ Ok(())
+ }
+ }
+
+ pub(crate) fn remove_node(&mut self, ptr: u64) {
+ self.nodes.remove(&ptr);
+ }
+
+ /// Updates the reference count on the given node.
+ pub(crate) fn update_node_refcount(
+ &mut self,
+ node: &DArc<Node>,
+ inc: bool,
+ strong: bool,
+ count: usize,
+ othread: Option<&Thread>,
+ ) {
+ let push = node.update_refcount_locked(inc, strong, count, self);
+
+ // If we decided that we need to push work, push either to the process or to a thread if
+ // one is specified.
+ if let Some(node) = push {
+ if let Some(thread) = othread {
+ thread.push_work_deferred(node);
+ } else {
+ let _ = self.push_work(node);
+ // Nothing to do: `push_work` may fail if the process is dead, but that's ok as in
+ // that case, it doesn't care about the notification.
+ }
+ }
+ }
+
+ pub(crate) fn new_node_ref(
+ &mut self,
+ node: DArc<Node>,
+ strong: bool,
+ thread: Option<&Thread>,
+ ) -> NodeRef {
+ self.update_node_refcount(&node, true, strong, 1, thread);
+ let strong_count = if strong { 1 } else { 0 };
+ NodeRef::new(node, strong_count, 1 - strong_count)
+ }
+
+ pub(crate) fn new_node_ref_with_thread(
+ &mut self,
+ node: DArc<Node>,
+ strong: bool,
+ thread: &Thread,
+ wrapper: Option<CritIncrWrapper>,
+ ) -> Result<NodeRef, CouldNotDeliverCriticalIncrement> {
+ let push = match wrapper {
+ None => node
+ .incr_refcount_allow_zero2one(strong, self)?
+ .map(|node| node as _),
+ Some(wrapper) => node.incr_refcount_allow_zero2one_with_wrapper(strong, wrapper, self),
+ };
+ if let Some(node) = push {
+ thread.push_work_deferred(node);
+ }
+ let strong_count = if strong { 1 } else { 0 };
+ Ok(NodeRef::new(node, strong_count, 1 - strong_count))
+ }
+
+ /// Returns an existing node with the given pointer and cookie, if one exists.
+ ///
+ /// Returns an error if a node with the given pointer but a different cookie exists.
+ fn get_existing_node(&self, ptr: u64, cookie: u64) -> Result<Option<DArc<Node>>> {
+ match self.nodes.get(&ptr) {
+ None => Ok(None),
+ Some(node) => {
+ let (_, node_cookie) = node.get_id();
+ if node_cookie == cookie {
+ Ok(Some(node.clone()))
+ } else {
+ Err(EINVAL)
+ }
+ }
+ }
+ }
+
+ fn register_thread(&mut self) -> bool {
+ if self.requested_thread_count == 0 {
+ return false;
+ }
+
+ self.requested_thread_count -= 1;
+ self.started_thread_count += 1;
+ true
+ }
+
+ /// Finds a delivered death notification with the given cookie, removes it from the thread's
+ /// delivered list, and returns it.
+ fn pull_delivered_death(&mut self, cookie: u64) -> Option<DArc<NodeDeath>> {
+ let mut cursor = self.delivered_deaths.cursor_front();
+ while let Some(next) = cursor.peek_next() {
+ if next.cookie == cookie {
+ return Some(next.remove().into_arc());
+ }
+ cursor.move_next();
+ }
+ None
+ }
+
+ pub(crate) fn death_delivered(&mut self, death: DArc<NodeDeath>) {
+ if let Some(death) = ListArc::try_from_arc_or_drop(death) {
+ self.delivered_deaths.push_back(death);
+ } else {
+ pr_warn!("Notification added to `delivered_deaths` twice.");
+ }
+ }
+
+ pub(crate) fn add_outstanding_txn(&mut self) {
+ self.outstanding_txns += 1;
+ }
+
+ fn txns_pending_locked(&self) -> bool {
+ if self.outstanding_txns > 0 {
+ return true;
+ }
+ for thread in self.threads.values() {
+ if thread.has_current_transaction() {
+ return true;
+ }
+ }
+ false
+ }
+}
+
+/// Used to keep track of a node that this process has a handle to.
+#[pin_data]
+pub(crate) struct NodeRefInfo {
+ debug_id: usize,
+ /// The refcount that this process owns to the node.
+ node_ref: ListArcField<NodeRef, { Self::LIST_PROC }>,
+ death: ListArcField<Option<DArc<NodeDeath>>, { Self::LIST_PROC }>,
+ /// Cookie of the active freeze listener for this node.
+ freeze: ListArcField<Option<FreezeCookie>, { Self::LIST_PROC }>,
+ /// Used to store this `NodeRefInfo` in the node's `refs` list.
+ #[pin]
+ links: ListLinks<{ Self::LIST_NODE }>,
+ /// The handle for this `NodeRefInfo`.
+ handle: u32,
+ /// The process that has a handle to the node.
+ pub(crate) process: Arc<Process>,
+}
+
+impl NodeRefInfo {
+ /// The id used for the `Node::refs` list.
+ pub(crate) const LIST_NODE: u64 = 0x2da16350fb724a10;
+ /// The id used for the `ListArc` in `ProcessNodeRefs`.
+ const LIST_PROC: u64 = 0xd703a5263dcc8650;
+
+ fn new(node_ref: NodeRef, handle: u32, process: Arc<Process>) -> impl PinInit<Self> {
+ pin_init!(Self {
+ debug_id: super::next_debug_id(),
+ node_ref: ListArcField::new(node_ref),
+ death: ListArcField::new(None),
+ freeze: ListArcField::new(None),
+ links <- ListLinks::new(),
+ handle,
+ process,
+ })
+ }
+
+ kernel::list::define_list_arc_field_getter! {
+ pub(crate) fn death(&mut self<{Self::LIST_PROC}>) -> &mut Option<DArc<NodeDeath>> { death }
+ pub(crate) fn freeze(&mut self<{Self::LIST_PROC}>) -> &mut Option<FreezeCookie> { freeze }
+ pub(crate) fn node_ref(&mut self<{Self::LIST_PROC}>) -> &mut NodeRef { node_ref }
+ pub(crate) fn node_ref2(&self<{Self::LIST_PROC}>) -> &NodeRef { node_ref }
+ }
+}
+
+kernel::list::impl_list_arc_safe! {
+ impl ListArcSafe<{Self::LIST_NODE}> for NodeRefInfo { untracked; }
+ impl ListArcSafe<{Self::LIST_PROC}> for NodeRefInfo { untracked; }
+}
+kernel::list::impl_list_item! {
+ impl ListItem<{Self::LIST_NODE}> for NodeRefInfo {
+ using ListLinks { self.links };
+ }
+}
+
+/// Keeps track of references this process has to nodes owned by other processes.
+///
+/// TODO: Currently, the rbtree requires two allocations per node reference, and two tree
+/// traversals to look up a node by `Node::global_id`. Once the rbtree is more powerful, these
+/// extra costs should be eliminated.
+struct ProcessNodeRefs {
+ /// Used to look up nodes using the 32-bit id that this process knows it by.
+ by_handle: RBTree<u32, ListArc<NodeRefInfo, { NodeRefInfo::LIST_PROC }>>,
+ /// Used to look up nodes without knowing their local 32-bit id. The usize is the address of
+ /// the underlying `Node` struct as returned by `Node::global_id`.
+ by_node: RBTree<usize, u32>,
+ /// Used to look up a `FreezeListener` by cookie.
+ ///
+ /// There might be multiple freeze listeners for the same node, but at most one of them is
+ /// active.
+ freeze_listeners: RBTree<FreezeCookie, FreezeListener>,
+}
+
+impl ProcessNodeRefs {
+ fn new() -> Self {
+ Self {
+ by_handle: RBTree::new(),
+ by_node: RBTree::new(),
+ freeze_listeners: RBTree::new(),
+ }
+ }
+}
+
+/// A process using binder.
+///
+/// Strictly speaking, there can be multiple of these per process. There is one for each binder fd
+/// that a process has opened, so processes using several binder contexts have several `Process`
+/// objects. This ensures that the contexts are fully separated.
+#[pin_data]
+pub(crate) struct Process {
+ pub(crate) ctx: Arc<Context>,
+
+ // The task leader (process).
+ pub(crate) task: ARef<Task>,
+
+ // Credential associated with file when `Process` is created.
+ pub(crate) cred: ARef<Credential>,
+
+ #[pin]
+ pub(crate) inner: SpinLock<ProcessInner>,
+
+ #[pin]
+ pub(crate) pages: ShrinkablePageRange,
+
+ // Waitqueue of processes waiting for all outstanding transactions to be
+ // processed.
+ #[pin]
+ freeze_wait: CondVar,
+
+ // Node references are in a different lock to avoid recursive acquisition when
+ // incrementing/decrementing a node in another process.
+ #[pin]
+ node_refs: Mutex<ProcessNodeRefs>,
+
+ // Work node for deferred work item.
+ #[pin]
+ defer_work: Work<Process>,
+
+ // Links for process list in Context.
+ #[pin]
+ links: ListLinks,
+
+ pub(crate) stats: BinderStats,
+}
+
+kernel::impl_has_work! {
+ impl HasWork<Process> for Process { self.defer_work }
+}
+
+kernel::list::impl_list_arc_safe! {
+ impl ListArcSafe<0> for Process { untracked; }
+}
+kernel::list::impl_list_item! {
+ impl ListItem<0> for Process {
+ using ListLinks { self.links };
+ }
+}
+
+impl workqueue::WorkItem for Process {
+ type Pointer = Arc<Process>;
+
+ fn run(me: Arc<Self>) {
+ let defer;
+ {
+ let mut inner = me.inner.lock();
+ defer = inner.defer_work;
+ inner.defer_work = 0;
+ }
+
+ if defer & PROC_DEFER_FLUSH != 0 {
+ me.deferred_flush();
+ }
+ if defer & PROC_DEFER_RELEASE != 0 {
+ me.deferred_release();
+ }
+ }
+}
+
+impl Process {
+ fn new(ctx: Arc<Context>, cred: ARef<Credential>) -> Result<Arc<Self>> {
+ let current = kernel::current!();
+ let list_process = ListArc::pin_init::<Error>(
+ try_pin_init!(Process {
+ ctx,
+ cred,
+ inner <- kernel::new_spinlock!(ProcessInner::new(), "Process::inner"),
+ pages <- ShrinkablePageRange::new(&super::BINDER_SHRINKER),
+ node_refs <- kernel::new_mutex!(ProcessNodeRefs::new(), "Process::node_refs"),
+ freeze_wait <- kernel::new_condvar!("Process::freeze_wait"),
+ task: current.group_leader().into(),
+ defer_work <- kernel::new_work!("Process::defer_work"),
+ links <- ListLinks::new(),
+ stats: BinderStats::new(),
+ }),
+ GFP_KERNEL,
+ )?;
+
+ let process = list_process.clone_arc();
+ process.ctx.register_process(list_process);
+
+ Ok(process)
+ }
+
+ pub(crate) fn pid_in_current_ns(&self) -> kernel::task::Pid {
+ self.task.tgid_nr_ns(None)
+ }
+
+ #[inline(never)]
+ pub(crate) fn debug_print_stats(&self, m: &SeqFile, ctx: &Context) -> Result<()> {
+ seq_print!(m, "proc {}\n", self.pid_in_current_ns());
+ seq_print!(m, "context {}\n", &*ctx.name);
+
+ let inner = self.inner.lock();
+ seq_print!(m, " threads: {}\n", inner.threads.iter().count());
+ seq_print!(
+ m,
+ " requested threads: {}+{}/{}\n",
+ inner.requested_thread_count,
+ inner.started_thread_count,
+ inner.max_threads,
+ );
+ if let Some(mapping) = &inner.mapping {
+ seq_print!(
+ m,
+ " free oneway space: {}\n",
+ mapping.alloc.free_oneway_space()
+ );
+ seq_print!(m, " buffers: {}\n", mapping.alloc.count_buffers());
+ }
+ seq_print!(
+ m,
+ " outstanding transactions: {}\n",
+ inner.outstanding_txns
+ );
+ seq_print!(m, " nodes: {}\n", inner.nodes.iter().count());
+ drop(inner);
+
+ {
+ let mut refs = self.node_refs.lock();
+ let (mut count, mut weak, mut strong) = (0, 0, 0);
+ for r in refs.by_handle.values_mut() {
+ let node_ref = r.node_ref();
+ let (nstrong, nweak) = node_ref.get_count();
+ count += 1;
+ weak += nweak;
+ strong += nstrong;
+ }
+ seq_print!(m, " refs: {count} s {strong} w {weak}\n");
+ }
+
+ self.stats.debug_print(" ", m);
+
+ Ok(())
+ }
+
+ #[inline(never)]
+ pub(crate) fn debug_print(&self, m: &SeqFile, ctx: &Context, print_all: bool) -> Result<()> {
+ seq_print!(m, "proc {}\n", self.pid_in_current_ns());
+ seq_print!(m, "context {}\n", &*ctx.name);
+
+ let mut all_threads = KVec::new();
+ let mut all_nodes = KVec::new();
+ loop {
+ let inner = self.inner.lock();
+ let num_threads = inner.threads.iter().count();
+ let num_nodes = inner.nodes.iter().count();
+
+ if all_threads.capacity() < num_threads || all_nodes.capacity() < num_nodes {
+ drop(inner);
+ all_threads.reserve(num_threads, GFP_KERNEL)?;
+ all_nodes.reserve(num_nodes, GFP_KERNEL)?;
+ continue;
+ }
+
+ for thread in inner.threads.values() {
+ assert!(all_threads.len() < all_threads.capacity());
+ let _ = all_threads.push(thread.clone(), GFP_ATOMIC);
+ }
+
+ for node in inner.nodes.values() {
+ assert!(all_nodes.len() < all_nodes.capacity());
+ let _ = all_nodes.push(node.clone(), GFP_ATOMIC);
+ }
+
+ break;
+ }
+
+ for thread in all_threads {
+ thread.debug_print(m, print_all)?;
+ }
+
+ let mut inner = self.inner.lock();
+ for node in all_nodes {
+ if print_all || node.has_oneway_transaction(&mut inner) {
+ node.full_debug_print(m, &mut inner)?;
+ }
+ }
+ drop(inner);
+
+ if print_all {
+ let mut refs = self.node_refs.lock();
+ for r in refs.by_handle.values_mut() {
+ let node_ref = r.node_ref();
+ let dead = node_ref.node.owner.inner.lock().is_dead;
+ let (strong, weak) = node_ref.get_count();
+ let debug_id = node_ref.node.debug_id;
+
+ seq_print!(
+ m,
+ " ref {}: desc {} {}node {debug_id} s {strong} w {weak}",
+ r.debug_id,
+ r.handle,
+ if dead { "dead " } else { "" },
+ );
+ }
+ }
+
+ let inner = self.inner.lock();
+ for work in &inner.work {
+ work.debug_print(m, " ", " pending transaction ")?;
+ }
+ for _death in &inner.delivered_deaths {
+ seq_print!(m, " has delivered dead binder\n");
+ }
+ if let Some(mapping) = &inner.mapping {
+ mapping.alloc.debug_print(m)?;
+ }
+ drop(inner);
+
+ Ok(())
+ }
+
+ /// Attempts to fetch a work item from the process queue.
+ pub(crate) fn get_work(&self) -> Option<DLArc<dyn DeliverToRead>> {
+ self.inner.lock().work.pop_front()
+ }
+
+ /// Attempts to fetch a work item from the process queue. If none is available, it registers the
+ /// given thread as ready to receive work directly.
+ ///
+ /// This must only be called when the thread is not participating in a transaction chain; when
+ /// it is, work will always be delivered directly to the thread (and not through the process
+ /// queue).
+ pub(crate) fn get_work_or_register<'a>(
+ &'a self,
+ thread: &'a Arc<Thread>,
+ ) -> GetWorkOrRegister<'a> {
+ let mut inner = self.inner.lock();
+ // Try to get work from the process queue.
+ if let Some(work) = inner.work.pop_front() {
+ return GetWorkOrRegister::Work(work);
+ }
+
+ // Register the thread as ready.
+ GetWorkOrRegister::Register(Registration::new(thread, &mut inner))
+ }
+
+ fn get_current_thread(self: ArcBorrow<'_, Self>) -> Result<Arc<Thread>> {
+ let id = {
+ let current = kernel::current!();
+ if !core::ptr::eq(current.group_leader(), &*self.task) {
+ pr_err!("get_current_thread was called from the wrong process.");
+ return Err(EINVAL);
+ }
+ current.pid()
+ };
+
+ {
+ let inner = self.inner.lock();
+ if let Some(thread) = inner.threads.get(&id) {
+ return Ok(thread.clone());
+ }
+ }
+
+ // Allocate a new `Thread` without holding any locks.
+ let reservation = RBTreeNodeReservation::new(GFP_KERNEL)?;
+ let ta: Arc<Thread> = Thread::new(id, self.into())?;
+
+ let mut inner = self.inner.lock();
+ match inner.threads.entry(id) {
+ rbtree::Entry::Vacant(entry) => {
+ entry.insert(ta.clone(), reservation);
+ Ok(ta)
+ }
+ rbtree::Entry::Occupied(_entry) => {
+ pr_err!("Cannot create two threads with the same id.");
+ Err(EINVAL)
+ }
+ }
+ }
+
+ pub(crate) fn push_work(&self, work: DLArc<dyn DeliverToRead>) -> BinderResult {
+ // If push_work fails, drop the work item outside the lock.
+ let res = self.inner.lock().push_work(work);
+ match res {
+ Ok(()) => Ok(()),
+ Err((err, work)) => {
+ drop(work);
+ Err(err)
+ }
+ }
+ }
+
+ fn set_as_manager(
+ self: ArcBorrow<'_, Self>,
+ info: Option<FlatBinderObject>,
+ thread: &Thread,
+ ) -> Result {
+ let (ptr, cookie, flags) = if let Some(obj) = info {
+ (
+ // SAFETY: The object type for this ioctl is implicitly `BINDER_TYPE_BINDER`, so it
+ // is safe to access the `binder` field.
+ unsafe { obj.__bindgen_anon_1.binder },
+ obj.cookie,
+ obj.flags,
+ )
+ } else {
+ (0, 0, 0)
+ };
+ let node_ref = self.get_node(ptr, cookie, flags as _, true, thread)?;
+ let node = node_ref.node.clone();
+ self.ctx.set_manager_node(node_ref)?;
+ self.inner.lock().is_manager = true;
+
+ // Force the state of the node to prevent the delivery of acquire/increfs.
+ let mut owner_inner = node.owner.inner.lock();
+ node.force_has_count(&mut owner_inner);
+ Ok(())
+ }
+
+ fn get_node_inner(
+ self: ArcBorrow<'_, Self>,
+ ptr: u64,
+ cookie: u64,
+ flags: u32,
+ strong: bool,
+ thread: &Thread,
+ wrapper: Option<CritIncrWrapper>,
+ ) -> Result<Result<NodeRef, CouldNotDeliverCriticalIncrement>> {
+ // Try to find an existing node.
+ {
+ let mut inner = self.inner.lock();
+ if let Some(node) = inner.get_existing_node(ptr, cookie)? {
+ return Ok(inner.new_node_ref_with_thread(node, strong, thread, wrapper));
+ }
+ }
+
+ // Allocate the node before reacquiring the lock.
+ let node = DTRWrap::arc_pin_init(Node::new(ptr, cookie, flags, self.into()))?.into_arc();
+ let rbnode = RBTreeNode::new(ptr, node.clone(), GFP_KERNEL)?;
+ let mut inner = self.inner.lock();
+ if let Some(node) = inner.get_existing_node(ptr, cookie)? {
+ return Ok(inner.new_node_ref_with_thread(node, strong, thread, wrapper));
+ }
+
+ inner.nodes.insert(rbnode);
+ // This can only fail if someone has already pushed the node to a list, but we just created
+ // it and still hold the lock, so it can't fail right now.
+ let node_ref = inner
+ .new_node_ref_with_thread(node, strong, thread, wrapper)
+ .unwrap();
+
+ Ok(Ok(node_ref))
+ }
+
+ pub(crate) fn get_node(
+ self: ArcBorrow<'_, Self>,
+ ptr: u64,
+ cookie: u64,
+ flags: u32,
+ strong: bool,
+ thread: &Thread,
+ ) -> Result<NodeRef> {
+ let mut wrapper = None;
+ for _ in 0..2 {
+ match self.get_node_inner(ptr, cookie, flags, strong, thread, wrapper) {
+ Err(err) => return Err(err),
+ Ok(Ok(node_ref)) => return Ok(node_ref),
+ Ok(Err(CouldNotDeliverCriticalIncrement)) => {
+ wrapper = Some(CritIncrWrapper::new()?);
+ }
+ }
+ }
+ // We only get a `CouldNotDeliverCriticalIncrement` error if `wrapper` is `None`, so the
+ // loop should run at most twice.
+ unreachable!()
+ }
+
+ pub(crate) fn insert_or_update_handle(
+ self: ArcBorrow<'_, Process>,
+ node_ref: NodeRef,
+ is_mananger: bool,
+ ) -> Result<u32> {
+ {
+ let mut refs = self.node_refs.lock();
+
+ // Do a lookup before inserting.
+ if let Some(handle_ref) = refs.by_node.get(&node_ref.node.global_id()) {
+ let handle = *handle_ref;
+ let info = refs.by_handle.get_mut(&handle).unwrap();
+ info.node_ref().absorb(node_ref);
+ return Ok(handle);
+ }
+ }
+
+ // Reserve memory for tree nodes.
+ let reserve1 = RBTreeNodeReservation::new(GFP_KERNEL)?;
+ let reserve2 = RBTreeNodeReservation::new(GFP_KERNEL)?;
+ let info = UniqueArc::new_uninit(GFP_KERNEL)?;
+
+ let mut refs = self.node_refs.lock();
+
+ // Do a lookup again as node may have been inserted before the lock was reacquired.
+ if let Some(handle_ref) = refs.by_node.get(&node_ref.node.global_id()) {
+ let handle = *handle_ref;
+ let info = refs.by_handle.get_mut(&handle).unwrap();
+ info.node_ref().absorb(node_ref);
+ return Ok(handle);
+ }
+
+ // Find id.
+ let mut target: u32 = if is_mananger { 0 } else { 1 };
+ for handle in refs.by_handle.keys() {
+ if *handle > target {
+ break;
+ }
+ if *handle == target {
+ target = target.checked_add(1).ok_or(ENOMEM)?;
+ }
+ }
+
+ let gid = node_ref.node.global_id();
+ let (info_proc, info_node) = {
+ let info_init = NodeRefInfo::new(node_ref, target, self.into());
+ match info.pin_init_with(info_init) {
+ Ok(info) => ListArc::pair_from_pin_unique(info),
+ // error is infallible
+ Err(err) => match err {},
+ }
+ };
+
+ // Ensure the process is still alive while we insert a new reference.
+ //
+ // This releases the lock before inserting the nodes, but since `is_dead` is set as the
+ // first thing in `deferred_release`, process cleanup will not miss the items inserted into
+ // `refs` below.
+ if self.inner.lock().is_dead {
+ return Err(ESRCH);
+ }
+
+ // SAFETY: `info_proc` and `info_node` reference the same node, so we are inserting
+ // `info_node` into the right node's `refs` list.
+ unsafe { info_proc.node_ref2().node.insert_node_info(info_node) };
+
+ refs.by_node.insert(reserve1.into_node(gid, target));
+ refs.by_handle.insert(reserve2.into_node(target, info_proc));
+ Ok(target)
+ }
+
+ pub(crate) fn get_transaction_node(&self, handle: u32) -> BinderResult<NodeRef> {
+ // When handle is zero, try to get the context manager.
+ if handle == 0 {
+ Ok(self.ctx.get_manager_node(true)?)
+ } else {
+ Ok(self.get_node_from_handle(handle, true)?)
+ }
+ }
+
+ pub(crate) fn get_node_from_handle(&self, handle: u32, strong: bool) -> Result<NodeRef> {
+ self.node_refs
+ .lock()
+ .by_handle
+ .get_mut(&handle)
+ .ok_or(ENOENT)?
+ .node_ref()
+ .clone(strong)
+ }
+
+ pub(crate) fn remove_from_delivered_deaths(&self, death: &DArc<NodeDeath>) {
+ let mut inner = self.inner.lock();
+ // SAFETY: By the invariant on the `delivered_links` field, this is the right linked list.
+ let removed = unsafe { inner.delivered_deaths.remove(death) };
+ drop(inner);
+ drop(removed);
+ }
+
+ pub(crate) fn update_ref(
+ self: ArcBorrow<'_, Process>,
+ handle: u32,
+ inc: bool,
+ strong: bool,
+ ) -> Result {
+ if inc && handle == 0 {
+ if let Ok(node_ref) = self.ctx.get_manager_node(strong) {
+ if core::ptr::eq(&*self, &*node_ref.node.owner) {
+ return Err(EINVAL);
+ }
+ let _ = self.insert_or_update_handle(node_ref, true);
+ return Ok(());
+ }
+ }
+
+ // To preserve original binder behaviour, we only fail requests where the manager tries to
+ // increment references on itself.
+ let mut refs = self.node_refs.lock();
+ if let Some(info) = refs.by_handle.get_mut(&handle) {
+ if info.node_ref().update(inc, strong) {
+ // Clean up death if there is one attached to this node reference.
+ if let Some(death) = info.death().take() {
+ death.set_cleared(true);
+ self.remove_from_delivered_deaths(&death);
+ }
+
+ // Remove reference from process tables, and from the node's `refs` list.
+
+ // SAFETY: We are removing the `NodeRefInfo` from the right node.
+ unsafe { info.node_ref2().node.remove_node_info(info) };
+
+ let id = info.node_ref().node.global_id();
+ refs.by_handle.remove(&handle);
+ refs.by_node.remove(&id);
+ }
+ } else {
+ // All refs are cleared in process exit, so this warning is expected in that case.
+ if !self.inner.lock().is_dead {
+ pr_warn!("{}: no such ref {handle}\n", self.pid_in_current_ns());
+ }
+ }
+ Ok(())
+ }
+
+ /// Decrements the refcount of the given node, if one exists.
+ pub(crate) fn update_node(&self, ptr: u64, cookie: u64, strong: bool) {
+ let mut inner = self.inner.lock();
+ if let Ok(Some(node)) = inner.get_existing_node(ptr, cookie) {
+ inner.update_node_refcount(&node, false, strong, 1, None);
+ }
+ }
+
+ pub(crate) fn inc_ref_done(&self, reader: &mut UserSliceReader, strong: bool) -> Result {
+ let ptr = reader.read::<u64>()?;
+ let cookie = reader.read::<u64>()?;
+ let mut inner = self.inner.lock();
+ if let Ok(Some(node)) = inner.get_existing_node(ptr, cookie) {
+ if let Some(node) = node.inc_ref_done_locked(strong, &mut inner) {
+ // This only fails if the process is dead.
+ let _ = inner.push_work(node);
+ }
+ }
+ Ok(())
+ }
+
+ pub(crate) fn buffer_alloc(
+ self: &Arc<Self>,
+ debug_id: usize,
+ size: usize,
+ is_oneway: bool,
+ from_pid: i32,
+ ) -> BinderResult<NewAllocation> {
+ use kernel::page::PAGE_SIZE;
+
+ let mut reserve_new_args = ReserveNewArgs {
+ debug_id,
+ size,
+ is_oneway,
+ pid: from_pid,
+ ..ReserveNewArgs::default()
+ };
+
+ let (new_alloc, addr) = loop {
+ let mut inner = self.inner.lock();
+ let mapping = inner.mapping.as_mut().ok_or_else(BinderError::new_dead)?;
+ let alloc_request = match mapping.alloc.reserve_new(reserve_new_args)? {
+ ReserveNew::Success(new_alloc) => break (new_alloc, mapping.address),
+ ReserveNew::NeedAlloc(request) => request,
+ };
+ drop(inner);
+ // We need to allocate memory and then call `reserve_new` again.
+ reserve_new_args = alloc_request.make_alloc()?;
+ };
+
+ let res = Allocation::new(
+ self.clone(),
+ debug_id,
+ new_alloc.offset,
+ size,
+ addr + new_alloc.offset,
+ new_alloc.oneway_spam_detected,
+ );
+
+ // This allocation will be marked as in use until the `Allocation` is used to free it.
+ //
+ // This method can't be called while holding a lock, so we release the lock first. It's
+ // okay for several threads to use the method on the same index at the same time. In that
+ // case, one of the calls will allocate the given page (if missing), and the other call
+ // will wait for the other call to finish allocating the page.
+ //
+ // We will not call `stop_using_range` in parallel with this on the same page, because the
+ // allocation can only be removed via the destructor of the `Allocation` object that we
+ // currently own.
+ match self.pages.use_range(
+ new_alloc.offset / PAGE_SIZE,
+ (new_alloc.offset + size).div_ceil(PAGE_SIZE),
+ ) {
+ Ok(()) => {}
+ Err(err) => {
+ pr_warn!("use_range failure {:?}", err);
+ return Err(err.into());
+ }
+ }
+
+ Ok(NewAllocation(res))
+ }
+
+ pub(crate) fn buffer_get(self: &Arc<Self>, ptr: usize) -> Option<Allocation> {
+ let mut inner = self.inner.lock();
+ let mapping = inner.mapping.as_mut()?;
+ let offset = ptr.checked_sub(mapping.address)?;
+ let (size, debug_id, odata) = mapping.alloc.reserve_existing(offset).ok()?;
+ let mut alloc = Allocation::new(self.clone(), debug_id, offset, size, ptr, false);
+ if let Some(data) = odata {
+ alloc.set_info(data);
+ }
+ Some(alloc)
+ }
+
+ pub(crate) fn buffer_raw_free(&self, ptr: usize) {
+ let mut inner = self.inner.lock();
+ if let Some(ref mut mapping) = &mut inner.mapping {
+ let offset = match ptr.checked_sub(mapping.address) {
+ Some(offset) => offset,
+ None => return,
+ };
+
+ let freed_range = match mapping.alloc.reservation_abort(offset) {
+ Ok(freed_range) => freed_range,
+ Err(_) => {
+ pr_warn!(
+ "Pointer {:x} failed to free, base = {:x}\n",
+ ptr,
+ mapping.address
+ );
+ return;
+ }
+ };
+
+ // No more allocations in this range. Mark them as not in use.
+ //
+ // Must be done before we release the lock so that `use_range` is not used on these
+ // indices until `stop_using_range` returns.
+ self.pages
+ .stop_using_range(freed_range.start_page_idx, freed_range.end_page_idx);
+ }
+ }
+
+ pub(crate) fn buffer_make_freeable(&self, offset: usize, mut data: Option<AllocationInfo>) {
+ let mut inner = self.inner.lock();
+ if let Some(ref mut mapping) = &mut inner.mapping {
+ if mapping.alloc.reservation_commit(offset, &mut data).is_err() {
+ pr_warn!("Offset {} failed to be marked freeable\n", offset);
+ }
+ }
+ }
+
+ fn create_mapping(&self, vma: &mm::virt::VmaNew) -> Result {
+ use kernel::page::PAGE_SIZE;
+ let size = usize::min(vma.end() - vma.start(), bindings::SZ_4M as usize);
+ let mapping = Mapping::new(vma.start(), size);
+ let page_count = self.pages.register_with_vma(vma)?;
+ if page_count * PAGE_SIZE != size {
+ return Err(EINVAL);
+ }
+
+ // Save range allocator for later.
+ self.inner.lock().mapping = Some(mapping);
+
+ Ok(())
+ }
+
+ fn version(&self, data: UserSlice) -> Result {
+ data.writer().write(&BinderVersion::current())
+ }
+
+ pub(crate) fn register_thread(&self) -> bool {
+ self.inner.lock().register_thread()
+ }
+
+ fn remove_thread(&self, thread: Arc<Thread>) {
+ self.inner.lock().threads.remove(&thread.id);
+ thread.release();
+ }
+
+ fn set_max_threads(&self, max: u32) {
+ self.inner.lock().max_threads = max;
+ }
+
+ fn set_oneway_spam_detection_enabled(&self, enabled: u32) {
+ self.inner.lock().oneway_spam_detection_enabled = enabled != 0;
+ }
+
+ pub(crate) fn is_oneway_spam_detection_enabled(&self) -> bool {
+ self.inner.lock().oneway_spam_detection_enabled
+ }
+
+ fn get_node_debug_info(&self, data: UserSlice) -> Result {
+ let (mut reader, mut writer) = data.reader_writer();
+
+ // Read the starting point.
+ let ptr = reader.read::<BinderNodeDebugInfo>()?.ptr;
+ let mut out = BinderNodeDebugInfo::default();
+
+ {
+ let inner = self.inner.lock();
+ for (node_ptr, node) in &inner.nodes {
+ if *node_ptr > ptr {
+ node.populate_debug_info(&mut out, &inner);
+ break;
+ }
+ }
+ }
+
+ writer.write(&out)
+ }
+
+ fn get_node_info_from_ref(&self, data: UserSlice) -> Result {
+ let (mut reader, mut writer) = data.reader_writer();
+ let mut out = reader.read::<BinderNodeInfoForRef>()?;
+
+ if out.strong_count != 0
+ || out.weak_count != 0
+ || out.reserved1 != 0
+ || out.reserved2 != 0
+ || out.reserved3 != 0
+ {
+ return Err(EINVAL);
+ }
+
+ // Only the context manager is allowed to use this ioctl.
+ if !self.inner.lock().is_manager {
+ return Err(EPERM);
+ }
+
+ {
+ let mut node_refs = self.node_refs.lock();
+ let node_info = node_refs.by_handle.get_mut(&out.handle).ok_or(ENOENT)?;
+ let node_ref = node_info.node_ref();
+ let owner_inner = node_ref.node.owner.inner.lock();
+ node_ref.node.populate_counts(&mut out, &owner_inner);
+ }
+
+ // Write the result back.
+ writer.write(&out)
+ }
+
+ pub(crate) fn needs_thread(&self) -> bool {
+ let mut inner = self.inner.lock();
+ let ret = inner.requested_thread_count == 0
+ && inner.ready_threads.is_empty()
+ && inner.started_thread_count < inner.max_threads;
+ if ret {
+ inner.requested_thread_count += 1
+ }
+ ret
+ }
+
+ pub(crate) fn request_death(
+ self: &Arc<Self>,
+ reader: &mut UserSliceReader,
+ thread: &Thread,
+ ) -> Result {
+ let handle: u32 = reader.read()?;
+ let cookie: u64 = reader.read()?;
+
+ // Queue BR_ERROR if we can't allocate memory for the death notification.
+ let death = UniqueArc::new_uninit(GFP_KERNEL).inspect_err(|_| {
+ thread.push_return_work(BR_ERROR);
+ })?;
+ let mut refs = self.node_refs.lock();
+ let Some(info) = refs.by_handle.get_mut(&handle) else {
+ pr_warn!("BC_REQUEST_DEATH_NOTIFICATION invalid ref {handle}\n");
+ return Ok(());
+ };
+
+ // Nothing to do if there is already a death notification request for this handle.
+ if info.death().is_some() {
+ pr_warn!("BC_REQUEST_DEATH_NOTIFICATION death notification already set\n");
+ return Ok(());
+ }
+
+ let death = {
+ let death_init = NodeDeath::new(info.node_ref().node.clone(), self.clone(), cookie);
+ match death.pin_init_with(death_init) {
+ Ok(death) => death,
+ // error is infallible
+ Err(err) => match err {},
+ }
+ };
+
+ // Register the death notification.
+ {
+ let owner = info.node_ref2().node.owner.clone();
+ let mut owner_inner = owner.inner.lock();
+ if owner_inner.is_dead {
+ let death = Arc::from(death);
+ *info.death() = Some(death.clone());
+ drop(owner_inner);
+ death.set_dead();
+ } else {
+ let death = ListArc::from(death);
+ *info.death() = Some(death.clone_arc());
+ info.node_ref().node.add_death(death, &mut owner_inner);
+ }
+ }
+ Ok(())
+ }
+
+ pub(crate) fn clear_death(&self, reader: &mut UserSliceReader, thread: &Thread) -> Result {
+ let handle: u32 = reader.read()?;
+ let cookie: u64 = reader.read()?;
+
+ let mut refs = self.node_refs.lock();
+ let Some(info) = refs.by_handle.get_mut(&handle) else {
+ pr_warn!("BC_CLEAR_DEATH_NOTIFICATION invalid ref {handle}\n");
+ return Ok(());
+ };
+
+ let Some(death) = info.death().take() else {
+ pr_warn!("BC_CLEAR_DEATH_NOTIFICATION death notification not active\n");
+ return Ok(());
+ };
+ if death.cookie != cookie {
+ *info.death() = Some(death);
+ pr_warn!("BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch\n");
+ return Ok(());
+ }
+
+ // Update state and determine if we need to queue a work item. We only need to do it when
+ // the node is not dead or if the user already completed the death notification.
+ if death.set_cleared(false) {
+ if let Some(death) = ListArc::try_from_arc_or_drop(death) {
+ let _ = thread.push_work_if_looper(death);
+ }
+ }
+
+ Ok(())
+ }
+
+ pub(crate) fn dead_binder_done(&self, cookie: u64, thread: &Thread) {
+ if let Some(death) = self.inner.lock().pull_delivered_death(cookie) {
+ death.set_notification_done(thread);
+ }
+ }
+
+ /// Locks the spinlock and move the `nodes` rbtree out.
+ ///
+ /// This allows you to iterate through `nodes` while also allowing you to give other parts of
+ /// the codebase exclusive access to `ProcessInner`.
+ pub(crate) fn lock_with_nodes(&self) -> WithNodes<'_> {
+ let mut inner = self.inner.lock();
+ WithNodes {
+ nodes: take(&mut inner.nodes),
+ inner,
+ }
+ }
+
+ fn deferred_flush(&self) {
+ let inner = self.inner.lock();
+ for thread in inner.threads.values() {
+ thread.exit_looper();
+ }
+ }
+
+ fn deferred_release(self: Arc<Self>) {
+ let is_manager = {
+ let mut inner = self.inner.lock();
+ inner.is_dead = true;
+ inner.is_frozen = false;
+ inner.sync_recv = false;
+ inner.async_recv = false;
+ inner.is_manager
+ };
+
+ if is_manager {
+ self.ctx.unset_manager_node();
+ }
+
+ self.ctx.deregister_process(&self);
+
+ let binderfs_file = self.inner.lock().binderfs_file.take();
+ drop(binderfs_file);
+
+ // Release threads.
+ let threads = {
+ let mut inner = self.inner.lock();
+ let threads = take(&mut inner.threads);
+ let ready = take(&mut inner.ready_threads);
+ drop(inner);
+ drop(ready);
+
+ for thread in threads.values() {
+ thread.release();
+ }
+ threads
+ };
+
+ // Release nodes.
+ {
+ while let Some(node) = {
+ let mut lock = self.inner.lock();
+ lock.nodes.cursor_front().map(|c| c.remove_current().1)
+ } {
+ node.to_key_value().1.release();
+ }
+ }
+
+ // Clean up death listeners and remove nodes from external node info lists.
+ for info in self.node_refs.lock().by_handle.values_mut() {
+ // SAFETY: We are removing the `NodeRefInfo` from the right node.
+ unsafe { info.node_ref2().node.remove_node_info(info) };
+
+ // Remove all death notifications from the nodes (that belong to a different process).
+ let death = if let Some(existing) = info.death().take() {
+ existing
+ } else {
+ continue;
+ };
+ death.set_cleared(false);
+ }
+
+ // Clean up freeze listeners.
+ let freeze_listeners = take(&mut self.node_refs.lock().freeze_listeners);
+ for listener in freeze_listeners.values() {
+ listener.on_process_exit(&self);
+ }
+ drop(freeze_listeners);
+
+ // Release refs on foreign nodes.
+ {
+ let mut refs = self.node_refs.lock();
+ let by_handle = take(&mut refs.by_handle);
+ let by_node = take(&mut refs.by_node);
+ drop(refs);
+ drop(by_node);
+ drop(by_handle);
+ }
+
+ // Cancel all pending work items.
+ while let Some(work) = self.get_work() {
+ work.into_arc().cancel();
+ }
+
+ let delivered_deaths = take(&mut self.inner.lock().delivered_deaths);
+ drop(delivered_deaths);
+
+ // Free any resources kept alive by allocated buffers.
+ let omapping = self.inner.lock().mapping.take();
+ if let Some(mut mapping) = omapping {
+ let address = mapping.address;
+ mapping
+ .alloc
+ .take_for_each(|offset, size, debug_id, odata| {
+ let ptr = offset + address;
+ pr_warn!(
+ "{}: removing orphan mapping {offset}:{size}\n",
+ self.pid_in_current_ns()
+ );
+ let mut alloc =
+ Allocation::new(self.clone(), debug_id, offset, size, ptr, false);
+ if let Some(data) = odata {
+ alloc.set_info(data);
+ }
+ drop(alloc)
+ });
+ }
+
+ // calls to synchronize_rcu() in thread drop will happen here
+ drop(threads);
+ }
+
+ pub(crate) fn drop_outstanding_txn(&self) {
+ let wake = {
+ let mut inner = self.inner.lock();
+ if inner.outstanding_txns == 0 {
+ pr_err!("outstanding_txns underflow");
+ return;
+ }
+ inner.outstanding_txns -= 1;
+ inner.is_frozen && inner.outstanding_txns == 0
+ };
+
+ if wake {
+ self.freeze_wait.notify_all();
+ }
+ }
+
+ pub(crate) fn ioctl_freeze(&self, info: &BinderFreezeInfo) -> Result {
+ if info.enable == 0 {
+ let msgs = self.prepare_freeze_messages()?;
+ let mut inner = self.inner.lock();
+ inner.sync_recv = false;
+ inner.async_recv = false;
+ inner.is_frozen = false;
+ drop(inner);
+ msgs.send_messages();
+ return Ok(());
+ }
+
+ let mut inner = self.inner.lock();
+ inner.sync_recv = false;
+ inner.async_recv = false;
+ inner.is_frozen = true;
+
+ if info.timeout_ms > 0 {
+ let mut jiffies = kernel::time::msecs_to_jiffies(info.timeout_ms);
+ while jiffies > 0 {
+ if inner.outstanding_txns == 0 {
+ break;
+ }
+
+ match self
+ .freeze_wait
+ .wait_interruptible_timeout(&mut inner, jiffies)
+ {
+ CondVarTimeoutResult::Signal { .. } => {
+ inner.is_frozen = false;
+ return Err(ERESTARTSYS);
+ }
+ CondVarTimeoutResult::Woken { jiffies: remaining } => {
+ jiffies = remaining;
+ }
+ CondVarTimeoutResult::Timeout => {
+ jiffies = 0;
+ }
+ }
+ }
+ }
+
+ if inner.txns_pending_locked() {
+ inner.is_frozen = false;
+ Err(EAGAIN)
+ } else {
+ drop(inner);
+ match self.prepare_freeze_messages() {
+ Ok(batch) => {
+ batch.send_messages();
+ Ok(())
+ }
+ Err(kernel::alloc::AllocError) => {
+ self.inner.lock().is_frozen = false;
+ Err(ENOMEM)
+ }
+ }
+ }
+ }
+}
+
+fn get_frozen_status(data: UserSlice) -> Result {
+ let (mut reader, mut writer) = data.reader_writer();
+
+ let mut info = reader.read::<BinderFrozenStatusInfo>()?;
+ info.sync_recv = 0;
+ info.async_recv = 0;
+ let mut found = false;
+
+ for ctx in crate::context::get_all_contexts()? {
+ ctx.for_each_proc(|proc| {
+ if proc.task.pid() == info.pid as _ {
+ found = true;
+ let inner = proc.inner.lock();
+ let txns_pending = inner.txns_pending_locked();
+ info.async_recv |= inner.async_recv as u32;
+ info.sync_recv |= inner.sync_recv as u32;
+ info.sync_recv |= (txns_pending as u32) << 1;
+ }
+ });
+ }
+
+ if found {
+ writer.write(&info)?;
+ Ok(())
+ } else {
+ Err(EINVAL)
+ }
+}
+
+fn ioctl_freeze(reader: &mut UserSliceReader) -> Result {
+ let info = reader.read::<BinderFreezeInfo>()?;
+
+ // Very unlikely for there to be more than 3, since a process normally uses at most binder and
+ // hwbinder.
+ let mut procs = KVec::with_capacity(3, GFP_KERNEL)?;
+
+ let ctxs = crate::context::get_all_contexts()?;
+ for ctx in ctxs {
+ for proc in ctx.get_procs_with_pid(info.pid as i32)? {
+ procs.push(proc, GFP_KERNEL)?;
+ }
+ }
+
+ for proc in procs {
+ proc.ioctl_freeze(&info)?;
+ }
+ Ok(())
+}
+
+/// The ioctl handler.
+impl Process {
+ /// Ioctls that are write-only from the perspective of userspace.
+ ///
+ /// The kernel will only read from the pointer that userspace provided to us.
+ fn ioctl_write_only(
+ this: ArcBorrow<'_, Process>,
+ _file: &File,
+ cmd: u32,
+ reader: &mut UserSliceReader,
+ ) -> Result {
+ let thread = this.get_current_thread()?;
+ match cmd {
+ uapi::BINDER_SET_MAX_THREADS => this.set_max_threads(reader.read()?),
+ uapi::BINDER_THREAD_EXIT => this.remove_thread(thread),
+ uapi::BINDER_SET_CONTEXT_MGR => this.set_as_manager(None, &thread)?,
+ uapi::BINDER_SET_CONTEXT_MGR_EXT => {
+ this.set_as_manager(Some(reader.read()?), &thread)?
+ }
+ uapi::BINDER_ENABLE_ONEWAY_SPAM_DETECTION => {
+ this.set_oneway_spam_detection_enabled(reader.read()?)
+ }
+ uapi::BINDER_FREEZE => ioctl_freeze(reader)?,
+ _ => return Err(EINVAL),
+ }
+ Ok(())
+ }
+
+ /// Ioctls that are read/write from the perspective of userspace.
+ ///
+ /// The kernel will both read from and write to the pointer that userspace provided to us.
+ fn ioctl_write_read(
+ this: ArcBorrow<'_, Process>,
+ file: &File,
+ cmd: u32,
+ data: UserSlice,
+ ) -> Result {
+ let thread = this.get_current_thread()?;
+ let blocking = (file.flags() & file::flags::O_NONBLOCK) == 0;
+ match cmd {
+ uapi::BINDER_WRITE_READ => thread.write_read(data, blocking)?,
+ uapi::BINDER_GET_NODE_DEBUG_INFO => this.get_node_debug_info(data)?,
+ uapi::BINDER_GET_NODE_INFO_FOR_REF => this.get_node_info_from_ref(data)?,
+ uapi::BINDER_VERSION => this.version(data)?,
+ uapi::BINDER_GET_FROZEN_INFO => get_frozen_status(data)?,
+ uapi::BINDER_GET_EXTENDED_ERROR => thread.get_extended_error(data)?,
+ _ => return Err(EINVAL),
+ }
+ Ok(())
+ }
+}
+
+/// The file operations supported by `Process`.
+impl Process {
+ pub(crate) fn open(ctx: ArcBorrow<'_, Context>, file: &File) -> Result<Arc<Process>> {
+ Self::new(ctx.into(), ARef::from(file.cred()))
+ }
+
+ pub(crate) fn release(this: Arc<Process>, _file: &File) {
+ let binderfs_file;
+ let should_schedule;
+ {
+ let mut inner = this.inner.lock();
+ should_schedule = inner.defer_work == 0;
+ inner.defer_work |= PROC_DEFER_RELEASE;
+ binderfs_file = inner.binderfs_file.take();
+ }
+
+ if should_schedule {
+ // Ignore failures to schedule to the workqueue. Those just mean that we're already
+ // scheduled for execution.
+ let _ = workqueue::system().enqueue(this);
+ }
+
+ drop(binderfs_file);
+ }
+
+ pub(crate) fn flush(this: ArcBorrow<'_, Process>) -> Result {
+ let should_schedule;
+ {
+ let mut inner = this.inner.lock();
+ should_schedule = inner.defer_work == 0;
+ inner.defer_work |= PROC_DEFER_FLUSH;
+ }
+
+ if should_schedule {
+ // Ignore failures to schedule to the workqueue. Those just mean that we're already
+ // scheduled for execution.
+ let _ = workqueue::system().enqueue(Arc::from(this));
+ }
+ Ok(())
+ }
+
+ pub(crate) fn ioctl(this: ArcBorrow<'_, Process>, file: &File, cmd: u32, arg: usize) -> Result {
+ use kernel::ioctl::{_IOC_DIR, _IOC_SIZE};
+ use kernel::uapi::{_IOC_READ, _IOC_WRITE};
+
+ crate::trace::trace_ioctl(cmd, arg);
+
+ let user_slice = UserSlice::new(UserPtr::from_addr(arg), _IOC_SIZE(cmd));
+
+ const _IOC_READ_WRITE: u32 = _IOC_READ | _IOC_WRITE;
+
+ match _IOC_DIR(cmd) {
+ _IOC_WRITE => Self::ioctl_write_only(this, file, cmd, &mut user_slice.reader()),
+ _IOC_READ_WRITE => Self::ioctl_write_read(this, file, cmd, user_slice),
+ _ => Err(EINVAL),
+ }
+ }
+
+ pub(crate) fn compat_ioctl(
+ this: ArcBorrow<'_, Process>,
+ file: &File,
+ cmd: u32,
+ arg: usize,
+ ) -> Result {
+ Self::ioctl(this, file, cmd, arg)
+ }
+
+ pub(crate) fn mmap(
+ this: ArcBorrow<'_, Process>,
+ _file: &File,
+ vma: &mm::virt::VmaNew,
+ ) -> Result {
+ // We don't allow mmap to be used in a different process.
+ if !core::ptr::eq(kernel::current!().group_leader(), &*this.task) {
+ return Err(EINVAL);
+ }
+ if vma.start() == 0 {
+ return Err(EINVAL);
+ }
+
+ vma.try_clear_maywrite().map_err(|_| EPERM)?;
+ vma.set_dontcopy();
+ vma.set_mixedmap();
+
+ // TODO: Set ops. We need to learn when the user unmaps so that we can stop using it.
+ this.create_mapping(vma)
+ }
+
+ pub(crate) fn poll(
+ this: ArcBorrow<'_, Process>,
+ file: &File,
+ table: PollTable<'_>,
+ ) -> Result<u32> {
+ let thread = this.get_current_thread()?;
+ let (from_proc, mut mask) = thread.poll(file, table);
+ if mask == 0 && from_proc && !this.inner.lock().work.is_empty() {
+ mask |= bindings::POLLIN;
+ }
+ Ok(mask)
+ }
+}
+
+/// Represents that a thread has registered with the `ready_threads` list of its process.
+///
+/// The destructor of this type will unregister the thread from the list of ready threads.
+pub(crate) struct Registration<'a> {
+ thread: &'a Arc<Thread>,
+}
+
+impl<'a> Registration<'a> {
+ fn new(thread: &'a Arc<Thread>, guard: &mut Guard<'_, ProcessInner, SpinLockBackend>) -> Self {
+ assert!(core::ptr::eq(&thread.process.inner, guard.lock_ref()));
+ // INVARIANT: We are pushing this thread to the right `ready_threads` list.
+ if let Ok(list_arc) = ListArc::try_from_arc(thread.clone()) {
+ guard.ready_threads.push_front(list_arc);
+ } else {
+ // It is an error to hit this branch, and it should not be reachable. We try to do
+ // something reasonable when the failure path happens. Most likely, the thread in
+ // question will sleep forever.
+ pr_err!("Same thread registered with `ready_threads` twice.");
+ }
+ Self { thread }
+ }
+}
+
+impl Drop for Registration<'_> {
+ fn drop(&mut self) {
+ let mut inner = self.thread.process.inner.lock();
+ // SAFETY: The thread has the invariant that we never push it to any other linked list than
+ // the `ready_threads` list of its parent process. Therefore, the thread is either in that
+ // list, or in no list.
+ unsafe { inner.ready_threads.remove(self.thread) };
+ }
+}
+
+pub(crate) struct WithNodes<'a> {
+ pub(crate) inner: Guard<'a, ProcessInner, SpinLockBackend>,
+ pub(crate) nodes: RBTree<u64, DArc<Node>>,
+}
+
+impl Drop for WithNodes<'_> {
+ fn drop(&mut self) {
+ core::mem::swap(&mut self.nodes, &mut self.inner.nodes);
+ if self.nodes.iter().next().is_some() {
+ pr_err!("nodes array was modified while using lock_with_nodes\n");
+ }
+ }
+}
+
+pub(crate) enum GetWorkOrRegister<'a> {
+ Work(DLArc<dyn DeliverToRead>),
+ Register(Registration<'a>),
+}
diff --git a/drivers/android/binder/range_alloc/array.rs b/drivers/android/binder/range_alloc/array.rs
new file mode 100644
index 000000000000..07e1dec2ce63
--- /dev/null
+++ b/drivers/android/binder/range_alloc/array.rs
@@ -0,0 +1,251 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2025 Google LLC.
+
+use kernel::{
+ page::{PAGE_MASK, PAGE_SIZE},
+ prelude::*,
+ seq_file::SeqFile,
+ seq_print,
+ task::Pid,
+};
+
+use crate::range_alloc::{DescriptorState, FreedRange, Range};
+
+/// Keeps track of allocations in a process' mmap.
+///
+/// Each process has an mmap where the data for incoming transactions will be placed. This struct
+/// keeps track of allocations made in the mmap. For each allocation, we store a descriptor that
+/// has metadata related to the allocation. We also keep track of available free space.
+pub(super) struct ArrayRangeAllocator<T> {
+ /// This stores all ranges that are allocated. Unlike the tree based allocator, we do *not*
+ /// store the free ranges.
+ ///
+ /// Sorted by offset.
+ pub(super) ranges: KVec<Range<T>>,
+ size: usize,
+ free_oneway_space: usize,
+}
+
+struct FindEmptyRes {
+ /// Which index in `ranges` should we insert the new range at?
+ ///
+ /// Inserting the new range at this index keeps `ranges` sorted.
+ insert_at_idx: usize,
+ /// Which offset should we insert the new range at?
+ insert_at_offset: usize,
+}
+
+impl<T> ArrayRangeAllocator<T> {
+ pub(crate) fn new(size: usize, alloc: EmptyArrayAlloc<T>) -> Self {
+ Self {
+ ranges: alloc.ranges,
+ size,
+ free_oneway_space: size / 2,
+ }
+ }
+
+ pub(crate) fn free_oneway_space(&self) -> usize {
+ self.free_oneway_space
+ }
+
+ pub(crate) fn count_buffers(&self) -> usize {
+ self.ranges.len()
+ }
+
+ pub(crate) fn total_size(&self) -> usize {
+ self.size
+ }
+
+ pub(crate) fn is_full(&self) -> bool {
+ self.ranges.len() == self.ranges.capacity()
+ }
+
+ pub(crate) fn debug_print(&self, m: &SeqFile) -> Result<()> {
+ for range in &self.ranges {
+ seq_print!(
+ m,
+ " buffer {}: {} size {} pid {} oneway {}",
+ 0,
+ range.offset,
+ range.size,
+ range.state.pid(),
+ range.state.is_oneway(),
+ );
+ if let DescriptorState::Reserved(_) = range.state {
+ seq_print!(m, " reserved\n");
+ } else {
+ seq_print!(m, " allocated\n");
+ }
+ }
+ Ok(())
+ }
+
+ /// Find somewhere to put a new range.
+ ///
+ /// Unlike the tree implementation, we do not bother to find the smallest gap. The idea is that
+ /// fragmentation isn't a big issue when we don't have many ranges.
+ ///
+ /// Returns the index that the new range should have in `self.ranges` after insertion.
+ fn find_empty_range(&self, size: usize) -> Option<FindEmptyRes> {
+ let after_last_range = self.ranges.last().map(Range::endpoint).unwrap_or(0);
+
+ if size <= self.total_size() - after_last_range {
+ // We can put the range at the end, so just do that.
+ Some(FindEmptyRes {
+ insert_at_idx: self.ranges.len(),
+ insert_at_offset: after_last_range,
+ })
+ } else {
+ let mut end_of_prev = 0;
+ for (i, range) in self.ranges.iter().enumerate() {
+ // Does it fit before the i'th range?
+ if size <= range.offset - end_of_prev {
+ return Some(FindEmptyRes {
+ insert_at_idx: i,
+ insert_at_offset: end_of_prev,
+ });
+ }
+ end_of_prev = range.endpoint();
+ }
+ None
+ }
+ }
+
+ pub(crate) fn reserve_new(
+ &mut self,
+ debug_id: usize,
+ size: usize,
+ is_oneway: bool,
+ pid: Pid,
+ ) -> Result<usize> {
+ // Compute new value of free_oneway_space, which is set only on success.
+ let new_oneway_space = if is_oneway {
+ match self.free_oneway_space.checked_sub(size) {
+ Some(new_oneway_space) => new_oneway_space,
+ None => return Err(ENOSPC),
+ }
+ } else {
+ self.free_oneway_space
+ };
+
+ let FindEmptyRes {
+ insert_at_idx,
+ insert_at_offset,
+ } = self.find_empty_range(size).ok_or(ENOSPC)?;
+ self.free_oneway_space = new_oneway_space;
+
+ let new_range = Range {
+ offset: insert_at_offset,
+ size,
+ state: DescriptorState::new(is_oneway, debug_id, pid),
+ };
+ // Insert the value at the given index to keep the array sorted.
+ self.ranges
+ .insert_within_capacity(insert_at_idx, new_range)
+ .ok()
+ .unwrap();
+
+ Ok(insert_at_offset)
+ }
+
+ pub(crate) fn reservation_abort(&mut self, offset: usize) -> Result<FreedRange> {
+ // This could use a binary search, but linear scans are usually faster for small arrays.
+ let i = self
+ .ranges
+ .iter()
+ .position(|range| range.offset == offset)
+ .ok_or(EINVAL)?;
+ let range = &self.ranges[i];
+
+ if let DescriptorState::Allocated(_) = range.state {
+ return Err(EPERM);
+ }
+
+ let size = range.size;
+ let offset = range.offset;
+
+ if range.state.is_oneway() {
+ self.free_oneway_space += size;
+ }
+
+ // This computes the range of pages that are no longer used by *any* allocated range. The
+ // caller will mark them as unused, which means that they can be freed if the system comes
+ // under memory pressure.
+ let mut freed_range = FreedRange::interior_pages(offset, size);
+ #[expect(clippy::collapsible_if)] // reads better like this
+ if offset % PAGE_SIZE != 0 {
+ if i == 0 || self.ranges[i - 1].endpoint() <= (offset & PAGE_MASK) {
+ freed_range.start_page_idx -= 1;
+ }
+ }
+ if range.endpoint() % PAGE_SIZE != 0 {
+ let page_after = (range.endpoint() & PAGE_MASK) + PAGE_SIZE;
+ if i + 1 == self.ranges.len() || page_after <= self.ranges[i + 1].offset {
+ freed_range.end_page_idx += 1;
+ }
+ }
+
+ self.ranges.remove(i)?;
+ Ok(freed_range)
+ }
+
+ pub(crate) fn reservation_commit(&mut self, offset: usize, data: &mut Option<T>) -> Result {
+ // This could use a binary search, but linear scans are usually faster for small arrays.
+ let range = self
+ .ranges
+ .iter_mut()
+ .find(|range| range.offset == offset)
+ .ok_or(ENOENT)?;
+
+ let DescriptorState::Reserved(reservation) = &range.state else {
+ return Err(ENOENT);
+ };
+
+ range.state = DescriptorState::Allocated(reservation.clone().allocate(data.take()));
+ Ok(())
+ }
+
+ pub(crate) fn reserve_existing(&mut self, offset: usize) -> Result<(usize, usize, Option<T>)> {
+ // This could use a binary search, but linear scans are usually faster for small arrays.
+ let range = self
+ .ranges
+ .iter_mut()
+ .find(|range| range.offset == offset)
+ .ok_or(ENOENT)?;
+
+ let DescriptorState::Allocated(allocation) = &mut range.state else {
+ return Err(ENOENT);
+ };
+
+ let data = allocation.take();
+ let debug_id = allocation.reservation.debug_id;
+ range.state = DescriptorState::Reserved(allocation.reservation.clone());
+ Ok((range.size, debug_id, data))
+ }
+
+ pub(crate) fn take_for_each<F: Fn(usize, usize, usize, Option<T>)>(&mut self, callback: F) {
+ for range in self.ranges.iter_mut() {
+ if let DescriptorState::Allocated(allocation) = &mut range.state {
+ callback(
+ range.offset,
+ range.size,
+ allocation.reservation.debug_id,
+ allocation.data.take(),
+ );
+ }
+ }
+ }
+}
+
+pub(crate) struct EmptyArrayAlloc<T> {
+ ranges: KVec<Range<T>>,
+}
+
+impl<T> EmptyArrayAlloc<T> {
+ pub(crate) fn try_new(capacity: usize) -> Result<Self> {
+ Ok(Self {
+ ranges: KVec::with_capacity(capacity, GFP_KERNEL)?,
+ })
+ }
+}
diff --git a/drivers/android/binder/range_alloc/mod.rs b/drivers/android/binder/range_alloc/mod.rs
new file mode 100644
index 000000000000..2301e2bc1a1f
--- /dev/null
+++ b/drivers/android/binder/range_alloc/mod.rs
@@ -0,0 +1,329 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2025 Google LLC.
+
+use kernel::{page::PAGE_SIZE, prelude::*, seq_file::SeqFile, task::Pid};
+
+mod tree;
+use self::tree::{FromArrayAllocs, ReserveNewTreeAlloc, TreeRangeAllocator};
+
+mod array;
+use self::array::{ArrayRangeAllocator, EmptyArrayAlloc};
+
+enum DescriptorState<T> {
+ Reserved(Reservation),
+ Allocated(Allocation<T>),
+}
+
+impl<T> DescriptorState<T> {
+ fn new(is_oneway: bool, debug_id: usize, pid: Pid) -> Self {
+ DescriptorState::Reserved(Reservation {
+ debug_id,
+ is_oneway,
+ pid,
+ })
+ }
+
+ fn pid(&self) -> Pid {
+ match self {
+ DescriptorState::Reserved(inner) => inner.pid,
+ DescriptorState::Allocated(inner) => inner.reservation.pid,
+ }
+ }
+
+ fn is_oneway(&self) -> bool {
+ match self {
+ DescriptorState::Reserved(inner) => inner.is_oneway,
+ DescriptorState::Allocated(inner) => inner.reservation.is_oneway,
+ }
+ }
+}
+
+#[derive(Clone)]
+struct Reservation {
+ debug_id: usize,
+ is_oneway: bool,
+ pid: Pid,
+}
+
+impl Reservation {
+ fn allocate<T>(self, data: Option<T>) -> Allocation<T> {
+ Allocation {
+ data,
+ reservation: self,
+ }
+ }
+}
+
+struct Allocation<T> {
+ reservation: Reservation,
+ data: Option<T>,
+}
+
+impl<T> Allocation<T> {
+ fn deallocate(self) -> (Reservation, Option<T>) {
+ (self.reservation, self.data)
+ }
+
+ fn debug_id(&self) -> usize {
+ self.reservation.debug_id
+ }
+
+ fn take(&mut self) -> Option<T> {
+ self.data.take()
+ }
+}
+
+/// The array implementation must switch to the tree if it wants to go beyond this number of
+/// ranges.
+const TREE_THRESHOLD: usize = 8;
+
+/// Represents a range of pages that have just become completely free.
+#[derive(Copy, Clone)]
+pub(crate) struct FreedRange {
+ pub(crate) start_page_idx: usize,
+ pub(crate) end_page_idx: usize,
+}
+
+impl FreedRange {
+ fn interior_pages(offset: usize, size: usize) -> FreedRange {
+ FreedRange {
+ // Divide round up
+ start_page_idx: offset.div_ceil(PAGE_SIZE),
+ // Divide round down
+ end_page_idx: (offset + size) / PAGE_SIZE,
+ }
+ }
+}
+
+struct Range<T> {
+ offset: usize,
+ size: usize,
+ state: DescriptorState<T>,
+}
+
+impl<T> Range<T> {
+ fn endpoint(&self) -> usize {
+ self.offset + self.size
+ }
+}
+
+pub(crate) struct RangeAllocator<T> {
+ inner: Impl<T>,
+}
+
+enum Impl<T> {
+ Empty(usize),
+ Array(ArrayRangeAllocator<T>),
+ Tree(TreeRangeAllocator<T>),
+}
+
+impl<T> RangeAllocator<T> {
+ pub(crate) fn new(size: usize) -> Self {
+ Self {
+ inner: Impl::Empty(size),
+ }
+ }
+
+ pub(crate) fn free_oneway_space(&self) -> usize {
+ match &self.inner {
+ Impl::Empty(size) => size / 2,
+ Impl::Array(array) => array.free_oneway_space(),
+ Impl::Tree(tree) => tree.free_oneway_space(),
+ }
+ }
+
+ pub(crate) fn count_buffers(&self) -> usize {
+ match &self.inner {
+ Impl::Empty(_size) => 0,
+ Impl::Array(array) => array.count_buffers(),
+ Impl::Tree(tree) => tree.count_buffers(),
+ }
+ }
+
+ pub(crate) fn debug_print(&self, m: &SeqFile) -> Result<()> {
+ match &self.inner {
+ Impl::Empty(_size) => Ok(()),
+ Impl::Array(array) => array.debug_print(m),
+ Impl::Tree(tree) => tree.debug_print(m),
+ }
+ }
+
+ /// Try to reserve a new buffer, using the provided allocation if necessary.
+ pub(crate) fn reserve_new(&mut self, mut args: ReserveNewArgs<T>) -> Result<ReserveNew<T>> {
+ match &mut self.inner {
+ Impl::Empty(size) => {
+ let empty_array = match args.empty_array_alloc.take() {
+ Some(empty_array) => ArrayRangeAllocator::new(*size, empty_array),
+ None => {
+ return Ok(ReserveNew::NeedAlloc(ReserveNewNeedAlloc {
+ args,
+ need_empty_array_alloc: true,
+ need_new_tree_alloc: false,
+ need_tree_alloc: false,
+ }))
+ }
+ };
+
+ self.inner = Impl::Array(empty_array);
+ self.reserve_new(args)
+ }
+ Impl::Array(array) if array.is_full() => {
+ let allocs = match args.new_tree_alloc {
+ Some(ref mut allocs) => allocs,
+ None => {
+ return Ok(ReserveNew::NeedAlloc(ReserveNewNeedAlloc {
+ args,
+ need_empty_array_alloc: false,
+ need_new_tree_alloc: true,
+ need_tree_alloc: true,
+ }))
+ }
+ };
+
+ let new_tree =
+ TreeRangeAllocator::from_array(array.total_size(), &mut array.ranges, allocs);
+
+ self.inner = Impl::Tree(new_tree);
+ self.reserve_new(args)
+ }
+ Impl::Array(array) => {
+ let offset =
+ array.reserve_new(args.debug_id, args.size, args.is_oneway, args.pid)?;
+ Ok(ReserveNew::Success(ReserveNewSuccess {
+ offset,
+ oneway_spam_detected: false,
+ _empty_array_alloc: args.empty_array_alloc,
+ _new_tree_alloc: args.new_tree_alloc,
+ _tree_alloc: args.tree_alloc,
+ }))
+ }
+ Impl::Tree(tree) => {
+ let alloc = match args.tree_alloc {
+ Some(alloc) => alloc,
+ None => {
+ return Ok(ReserveNew::NeedAlloc(ReserveNewNeedAlloc {
+ args,
+ need_empty_array_alloc: false,
+ need_new_tree_alloc: false,
+ need_tree_alloc: true,
+ }));
+ }
+ };
+ let (offset, oneway_spam_detected) =
+ tree.reserve_new(args.debug_id, args.size, args.is_oneway, args.pid, alloc)?;
+ Ok(ReserveNew::Success(ReserveNewSuccess {
+ offset,
+ oneway_spam_detected,
+ _empty_array_alloc: args.empty_array_alloc,
+ _new_tree_alloc: args.new_tree_alloc,
+ _tree_alloc: None,
+ }))
+ }
+ }
+ }
+
+ /// Deletes the allocations at `offset`.
+ pub(crate) fn reservation_abort(&mut self, offset: usize) -> Result<FreedRange> {
+ match &mut self.inner {
+ Impl::Empty(_size) => Err(EINVAL),
+ Impl::Array(array) => array.reservation_abort(offset),
+ Impl::Tree(tree) => {
+ let freed_range = tree.reservation_abort(offset)?;
+ if tree.is_empty() {
+ self.inner = Impl::Empty(tree.total_size());
+ }
+ Ok(freed_range)
+ }
+ }
+ }
+
+ /// Called when an allocation is no longer in use by the kernel.
+ ///
+ /// The value in `data` will be stored, if any. A mutable reference is used to avoid dropping
+ /// the `T` when an error is returned.
+ pub(crate) fn reservation_commit(&mut self, offset: usize, data: &mut Option<T>) -> Result {
+ match &mut self.inner {
+ Impl::Empty(_size) => Err(EINVAL),
+ Impl::Array(array) => array.reservation_commit(offset, data),
+ Impl::Tree(tree) => tree.reservation_commit(offset, data),
+ }
+ }
+
+ /// Called when the kernel starts using an allocation.
+ ///
+ /// Returns the size of the existing entry and the data associated with it.
+ pub(crate) fn reserve_existing(&mut self, offset: usize) -> Result<(usize, usize, Option<T>)> {
+ match &mut self.inner {
+ Impl::Empty(_size) => Err(EINVAL),
+ Impl::Array(array) => array.reserve_existing(offset),
+ Impl::Tree(tree) => tree.reserve_existing(offset),
+ }
+ }
+
+ /// Call the provided callback at every allocated region.
+ ///
+ /// This destroys the range allocator. Used only during shutdown.
+ pub(crate) fn take_for_each<F: Fn(usize, usize, usize, Option<T>)>(&mut self, callback: F) {
+ match &mut self.inner {
+ Impl::Empty(_size) => {}
+ Impl::Array(array) => array.take_for_each(callback),
+ Impl::Tree(tree) => tree.take_for_each(callback),
+ }
+ }
+}
+
+/// The arguments for `reserve_new`.
+#[derive(Default)]
+pub(crate) struct ReserveNewArgs<T> {
+ pub(crate) size: usize,
+ pub(crate) is_oneway: bool,
+ pub(crate) debug_id: usize,
+ pub(crate) pid: Pid,
+ pub(crate) empty_array_alloc: Option<EmptyArrayAlloc<T>>,
+ pub(crate) new_tree_alloc: Option<FromArrayAllocs<T>>,
+ pub(crate) tree_alloc: Option<ReserveNewTreeAlloc<T>>,
+}
+
+/// The return type of `ReserveNew`.
+pub(crate) enum ReserveNew<T> {
+ Success(ReserveNewSuccess<T>),
+ NeedAlloc(ReserveNewNeedAlloc<T>),
+}
+
+/// Returned by `reserve_new` when the reservation was successul.
+pub(crate) struct ReserveNewSuccess<T> {
+ pub(crate) offset: usize,
+ pub(crate) oneway_spam_detected: bool,
+
+ // If the user supplied an allocation that we did not end up using, then we return it here.
+ // The caller will kfree it outside of the lock.
+ _empty_array_alloc: Option<EmptyArrayAlloc<T>>,
+ _new_tree_alloc: Option<FromArrayAllocs<T>>,
+ _tree_alloc: Option<ReserveNewTreeAlloc<T>>,
+}
+
+/// Returned by `reserve_new` to request the caller to make an allocation before calling the method
+/// again.
+pub(crate) struct ReserveNewNeedAlloc<T> {
+ args: ReserveNewArgs<T>,
+ need_empty_array_alloc: bool,
+ need_new_tree_alloc: bool,
+ need_tree_alloc: bool,
+}
+
+impl<T> ReserveNewNeedAlloc<T> {
+ /// Make the necessary allocations for another call to `reserve_new`.
+ pub(crate) fn make_alloc(mut self) -> Result<ReserveNewArgs<T>> {
+ if self.need_empty_array_alloc && self.args.empty_array_alloc.is_none() {
+ self.args.empty_array_alloc = Some(EmptyArrayAlloc::try_new(TREE_THRESHOLD)?);
+ }
+ if self.need_new_tree_alloc && self.args.new_tree_alloc.is_none() {
+ self.args.new_tree_alloc = Some(FromArrayAllocs::try_new(TREE_THRESHOLD)?);
+ }
+ if self.need_tree_alloc && self.args.tree_alloc.is_none() {
+ self.args.tree_alloc = Some(ReserveNewTreeAlloc::try_new()?);
+ }
+ Ok(self.args)
+ }
+}
diff --git a/drivers/android/binder/range_alloc/tree.rs b/drivers/android/binder/range_alloc/tree.rs
new file mode 100644
index 000000000000..7b1a248fcb02
--- /dev/null
+++ b/drivers/android/binder/range_alloc/tree.rs
@@ -0,0 +1,488 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2025 Google LLC.
+
+use kernel::{
+ page::PAGE_SIZE,
+ prelude::*,
+ rbtree::{RBTree, RBTreeNode, RBTreeNodeReservation},
+ seq_file::SeqFile,
+ seq_print,
+ task::Pid,
+};
+
+use crate::range_alloc::{DescriptorState, FreedRange, Range};
+
+/// Keeps track of allocations in a process' mmap.
+///
+/// Each process has an mmap where the data for incoming transactions will be placed. This struct
+/// keeps track of allocations made in the mmap. For each allocation, we store a descriptor that
+/// has metadata related to the allocation. We also keep track of available free space.
+pub(super) struct TreeRangeAllocator<T> {
+ /// This collection contains descriptors for *both* ranges containing an allocation, *and* free
+ /// ranges between allocations. The free ranges get merged, so there are never two free ranges
+ /// next to each other.
+ tree: RBTree<usize, Descriptor<T>>,
+ /// Contains an entry for every free range in `self.tree`. This tree sorts the ranges by size,
+ /// letting us look up the smallest range whose size is at least some lower bound.
+ free_tree: RBTree<FreeKey, ()>,
+ size: usize,
+ free_oneway_space: usize,
+}
+
+impl<T> TreeRangeAllocator<T> {
+ pub(crate) fn from_array(
+ size: usize,
+ ranges: &mut KVec<Range<T>>,
+ alloc: &mut FromArrayAllocs<T>,
+ ) -> Self {
+ let mut tree = TreeRangeAllocator {
+ tree: RBTree::new(),
+ free_tree: RBTree::new(),
+ size,
+ free_oneway_space: size / 2,
+ };
+
+ let mut free_offset = 0;
+ for range in ranges.drain_all() {
+ let free_size = range.offset - free_offset;
+ if free_size > 0 {
+ let free_node = alloc.free_tree.pop().unwrap();
+ tree.free_tree
+ .insert(free_node.into_node((free_size, free_offset), ()));
+ let tree_node = alloc.tree.pop().unwrap();
+ tree.tree.insert(
+ tree_node.into_node(free_offset, Descriptor::new(free_offset, free_size)),
+ );
+ }
+ free_offset = range.endpoint();
+
+ if range.state.is_oneway() {
+ tree.free_oneway_space = tree.free_oneway_space.saturating_sub(range.size);
+ }
+
+ let free_res = alloc.free_tree.pop().unwrap();
+ let tree_node = alloc.tree.pop().unwrap();
+ let mut desc = Descriptor::new(range.offset, range.size);
+ desc.state = Some((range.state, free_res));
+ tree.tree.insert(tree_node.into_node(range.offset, desc));
+ }
+
+ // After the last range, we may need a free range.
+ if free_offset < size {
+ let free_size = size - free_offset;
+ let free_node = alloc.free_tree.pop().unwrap();
+ tree.free_tree
+ .insert(free_node.into_node((free_size, free_offset), ()));
+ let tree_node = alloc.tree.pop().unwrap();
+ tree.tree
+ .insert(tree_node.into_node(free_offset, Descriptor::new(free_offset, free_size)));
+ }
+
+ tree
+ }
+
+ pub(crate) fn is_empty(&self) -> bool {
+ let mut tree_iter = self.tree.values();
+ // There's always at least one range, because index zero is either the start of a free or
+ // allocated range.
+ let first_value = tree_iter.next().unwrap();
+ if tree_iter.next().is_some() {
+ // There are never two free ranges next to each other, so if there is more than one
+ // descriptor, then at least one of them must hold an allocated range.
+ return false;
+ }
+ // There is only one descriptor. Return true if it is for a free range.
+ first_value.state.is_none()
+ }
+
+ pub(crate) fn total_size(&self) -> usize {
+ self.size
+ }
+
+ pub(crate) fn free_oneway_space(&self) -> usize {
+ self.free_oneway_space
+ }
+
+ pub(crate) fn count_buffers(&self) -> usize {
+ self.tree
+ .values()
+ .filter(|desc| desc.state.is_some())
+ .count()
+ }
+
+ pub(crate) fn debug_print(&self, m: &SeqFile) -> Result<()> {
+ for desc in self.tree.values() {
+ let state = match &desc.state {
+ Some(state) => &state.0,
+ None => continue,
+ };
+ seq_print!(
+ m,
+ " buffer: {} size {} pid {}",
+ desc.offset,
+ desc.size,
+ state.pid(),
+ );
+ if state.is_oneway() {
+ seq_print!(m, " oneway");
+ }
+ match state {
+ DescriptorState::Reserved(_res) => {
+ seq_print!(m, " reserved\n");
+ }
+ DescriptorState::Allocated(_alloc) => {
+ seq_print!(m, " allocated\n");
+ }
+ }
+ }
+ Ok(())
+ }
+
+ fn find_best_match(&mut self, size: usize) -> Option<&mut Descriptor<T>> {
+ let free_cursor = self.free_tree.cursor_lower_bound(&(size, 0))?;
+ let ((_, offset), ()) = free_cursor.current();
+ self.tree.get_mut(offset)
+ }
+
+ /// Try to reserve a new buffer, using the provided allocation if necessary.
+ pub(crate) fn reserve_new(
+ &mut self,
+ debug_id: usize,
+ size: usize,
+ is_oneway: bool,
+ pid: Pid,
+ alloc: ReserveNewTreeAlloc<T>,
+ ) -> Result<(usize, bool)> {
+ // Compute new value of free_oneway_space, which is set only on success.
+ let new_oneway_space = if is_oneway {
+ match self.free_oneway_space.checked_sub(size) {
+ Some(new_oneway_space) => new_oneway_space,
+ None => return Err(ENOSPC),
+ }
+ } else {
+ self.free_oneway_space
+ };
+
+ // Start detecting spammers once we have less than 20%
+ // of async space left (which is less than 10% of total
+ // buffer size).
+ //
+ // (This will short-circut, so `low_oneway_space` is
+ // only called when necessary.)
+ let oneway_spam_detected =
+ is_oneway && new_oneway_space < self.size / 10 && self.low_oneway_space(pid);
+
+ let (found_size, found_off, tree_node, free_tree_node) = match self.find_best_match(size) {
+ None => {
+ pr_warn!("ENOSPC from range_alloc.reserve_new - size: {}", size);
+ return Err(ENOSPC);
+ }
+ Some(desc) => {
+ let found_size = desc.size;
+ let found_offset = desc.offset;
+
+ // In case we need to break up the descriptor
+ let new_desc = Descriptor::new(found_offset + size, found_size - size);
+ let (tree_node, free_tree_node, desc_node_res) = alloc.initialize(new_desc);
+
+ desc.state = Some((
+ DescriptorState::new(is_oneway, debug_id, pid),
+ desc_node_res,
+ ));
+ desc.size = size;
+
+ (found_size, found_offset, tree_node, free_tree_node)
+ }
+ };
+ self.free_oneway_space = new_oneway_space;
+ self.free_tree.remove(&(found_size, found_off));
+
+ if found_size != size {
+ self.tree.insert(tree_node);
+ self.free_tree.insert(free_tree_node);
+ }
+
+ Ok((found_off, oneway_spam_detected))
+ }
+
+ pub(crate) fn reservation_abort(&mut self, offset: usize) -> Result<FreedRange> {
+ let mut cursor = self.tree.cursor_lower_bound(&offset).ok_or_else(|| {
+ pr_warn!(
+ "EINVAL from range_alloc.reservation_abort - offset: {}",
+ offset
+ );
+ EINVAL
+ })?;
+
+ let (_, desc) = cursor.current_mut();
+
+ if desc.offset != offset {
+ pr_warn!(
+ "EINVAL from range_alloc.reservation_abort - offset: {}",
+ offset
+ );
+ return Err(EINVAL);
+ }
+
+ let (reservation, free_node_res) = desc.try_change_state(|state| match state {
+ Some((DescriptorState::Reserved(reservation), free_node_res)) => {
+ (None, Ok((reservation, free_node_res)))
+ }
+ None => {
+ pr_warn!(
+ "EINVAL from range_alloc.reservation_abort - offset: {}",
+ offset
+ );
+ (None, Err(EINVAL))
+ }
+ allocated => {
+ pr_warn!(
+ "EPERM from range_alloc.reservation_abort - offset: {}",
+ offset
+ );
+ (allocated, Err(EPERM))
+ }
+ })?;
+
+ let mut size = desc.size;
+ let mut offset = desc.offset;
+ let free_oneway_space_add = if reservation.is_oneway { size } else { 0 };
+
+ self.free_oneway_space += free_oneway_space_add;
+
+ let mut freed_range = FreedRange::interior_pages(offset, size);
+ // Compute how large the next free region needs to be to include one more page in
+ // the newly freed range.
+ let add_next_page_needed = match (offset + size) % PAGE_SIZE {
+ 0 => usize::MAX,
+ unalign => PAGE_SIZE - unalign,
+ };
+ // Compute how large the previous free region needs to be to include one more page
+ // in the newly freed range.
+ let add_prev_page_needed = match offset % PAGE_SIZE {
+ 0 => usize::MAX,
+ unalign => unalign,
+ };
+
+ // Merge next into current if next is free
+ let remove_next = match cursor.peek_next() {
+ Some((_, next)) if next.state.is_none() => {
+ if next.size >= add_next_page_needed {
+ freed_range.end_page_idx += 1;
+ }
+ self.free_tree.remove(&(next.size, next.offset));
+ size += next.size;
+ true
+ }
+ _ => false,
+ };
+
+ if remove_next {
+ let (_, desc) = cursor.current_mut();
+ desc.size = size;
+ cursor.remove_next();
+ }
+
+ // Merge current into prev if prev is free
+ match cursor.peek_prev_mut() {
+ Some((_, prev)) if prev.state.is_none() => {
+ if prev.size >= add_prev_page_needed {
+ freed_range.start_page_idx -= 1;
+ }
+ // merge previous with current, remove current
+ self.free_tree.remove(&(prev.size, prev.offset));
+ offset = prev.offset;
+ size += prev.size;
+ prev.size = size;
+ cursor.remove_current();
+ }
+ _ => {}
+ };
+
+ self.free_tree
+ .insert(free_node_res.into_node((size, offset), ()));
+
+ Ok(freed_range)
+ }
+
+ pub(crate) fn reservation_commit(&mut self, offset: usize, data: &mut Option<T>) -> Result {
+ let desc = self.tree.get_mut(&offset).ok_or(ENOENT)?;
+
+ desc.try_change_state(|state| match state {
+ Some((DescriptorState::Reserved(reservation), free_node_res)) => (
+ Some((
+ DescriptorState::Allocated(reservation.allocate(data.take())),
+ free_node_res,
+ )),
+ Ok(()),
+ ),
+ other => (other, Err(ENOENT)),
+ })
+ }
+
+ /// Takes an entry at the given offset from [`DescriptorState::Allocated`] to
+ /// [`DescriptorState::Reserved`].
+ ///
+ /// Returns the size of the existing entry and the data associated with it.
+ pub(crate) fn reserve_existing(&mut self, offset: usize) -> Result<(usize, usize, Option<T>)> {
+ let desc = self.tree.get_mut(&offset).ok_or_else(|| {
+ pr_warn!(
+ "ENOENT from range_alloc.reserve_existing - offset: {}",
+ offset
+ );
+ ENOENT
+ })?;
+
+ let (debug_id, data) = desc.try_change_state(|state| match state {
+ Some((DescriptorState::Allocated(allocation), free_node_res)) => {
+ let (reservation, data) = allocation.deallocate();
+ let debug_id = reservation.debug_id;
+ (
+ Some((DescriptorState::Reserved(reservation), free_node_res)),
+ Ok((debug_id, data)),
+ )
+ }
+ other => {
+ pr_warn!(
+ "ENOENT from range_alloc.reserve_existing - offset: {}",
+ offset
+ );
+ (other, Err(ENOENT))
+ }
+ })?;
+
+ Ok((desc.size, debug_id, data))
+ }
+
+ /// Call the provided callback at every allocated region.
+ ///
+ /// This destroys the range allocator. Used only during shutdown.
+ pub(crate) fn take_for_each<F: Fn(usize, usize, usize, Option<T>)>(&mut self, callback: F) {
+ for (_, desc) in self.tree.iter_mut() {
+ if let Some((DescriptorState::Allocated(allocation), _)) = &mut desc.state {
+ callback(
+ desc.offset,
+ desc.size,
+ allocation.debug_id(),
+ allocation.take(),
+ );
+ }
+ }
+ }
+
+ /// Find the amount and size of buffers allocated by the current caller.
+ ///
+ /// The idea is that once we cross the threshold, whoever is responsible
+ /// for the low async space is likely to try to send another async transaction,
+ /// and at some point we'll catch them in the act. This is more efficient
+ /// than keeping a map per pid.
+ fn low_oneway_space(&self, calling_pid: Pid) -> bool {
+ let mut total_alloc_size = 0;
+ let mut num_buffers = 0;
+ for (_, desc) in self.tree.iter() {
+ if let Some((state, _)) = &desc.state {
+ if state.is_oneway() && state.pid() == calling_pid {
+ total_alloc_size += desc.size;
+ num_buffers += 1;
+ }
+ }
+ }
+
+ // Warn if this pid has more than 50 transactions, or more than 50% of
+ // async space (which is 25% of total buffer size). Oneway spam is only
+ // detected when the threshold is exceeded.
+ num_buffers > 50 || total_alloc_size > self.size / 4
+ }
+}
+
+type TreeDescriptorState<T> = (DescriptorState<T>, FreeNodeRes);
+struct Descriptor<T> {
+ size: usize,
+ offset: usize,
+ state: Option<TreeDescriptorState<T>>,
+}
+
+impl<T> Descriptor<T> {
+ fn new(offset: usize, size: usize) -> Self {
+ Self {
+ size,
+ offset,
+ state: None,
+ }
+ }
+
+ fn try_change_state<F, Data>(&mut self, f: F) -> Result<Data>
+ where
+ F: FnOnce(Option<TreeDescriptorState<T>>) -> (Option<TreeDescriptorState<T>>, Result<Data>),
+ {
+ let (new_state, result) = f(self.state.take());
+ self.state = new_state;
+ result
+ }
+}
+
+// (Descriptor.size, Descriptor.offset)
+type FreeKey = (usize, usize);
+type FreeNodeRes = RBTreeNodeReservation<FreeKey, ()>;
+
+/// An allocation for use by `reserve_new`.
+pub(crate) struct ReserveNewTreeAlloc<T> {
+ tree_node_res: RBTreeNodeReservation<usize, Descriptor<T>>,
+ free_tree_node_res: FreeNodeRes,
+ desc_node_res: FreeNodeRes,
+}
+
+impl<T> ReserveNewTreeAlloc<T> {
+ pub(crate) fn try_new() -> Result<Self> {
+ let tree_node_res = RBTreeNodeReservation::new(GFP_KERNEL)?;
+ let free_tree_node_res = RBTreeNodeReservation::new(GFP_KERNEL)?;
+ let desc_node_res = RBTreeNodeReservation::new(GFP_KERNEL)?;
+ Ok(Self {
+ tree_node_res,
+ free_tree_node_res,
+ desc_node_res,
+ })
+ }
+
+ fn initialize(
+ self,
+ desc: Descriptor<T>,
+ ) -> (
+ RBTreeNode<usize, Descriptor<T>>,
+ RBTreeNode<FreeKey, ()>,
+ FreeNodeRes,
+ ) {
+ let size = desc.size;
+ let offset = desc.offset;
+ (
+ self.tree_node_res.into_node(offset, desc),
+ self.free_tree_node_res.into_node((size, offset), ()),
+ self.desc_node_res,
+ )
+ }
+}
+
+/// An allocation for creating a tree from an `ArrayRangeAllocator`.
+pub(crate) struct FromArrayAllocs<T> {
+ tree: KVec<RBTreeNodeReservation<usize, Descriptor<T>>>,
+ free_tree: KVec<RBTreeNodeReservation<FreeKey, ()>>,
+}
+
+impl<T> FromArrayAllocs<T> {
+ pub(crate) fn try_new(len: usize) -> Result<Self> {
+ let num_descriptors = 2 * len + 1;
+
+ let mut tree = KVec::with_capacity(num_descriptors, GFP_KERNEL)?;
+ for _ in 0..num_descriptors {
+ tree.push(RBTreeNodeReservation::new(GFP_KERNEL)?, GFP_KERNEL)?;
+ }
+
+ let mut free_tree = KVec::with_capacity(num_descriptors, GFP_KERNEL)?;
+ for _ in 0..num_descriptors {
+ free_tree.push(RBTreeNodeReservation::new(GFP_KERNEL)?, GFP_KERNEL)?;
+ }
+
+ Ok(Self { tree, free_tree })
+ }
+}
diff --git a/drivers/android/binder/rust_binder.h b/drivers/android/binder/rust_binder.h
new file mode 100644
index 000000000000..31806890ed1a
--- /dev/null
+++ b/drivers/android/binder/rust_binder.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2025 Google, Inc.
+ */
+
+#ifndef _LINUX_RUST_BINDER_H
+#define _LINUX_RUST_BINDER_H
+
+#include <uapi/linux/android/binder.h>
+#include <uapi/linux/android/binderfs.h>
+
+/*
+ * These symbols are exposed by `rust_binderfs.c` and exist here so that Rust
+ * Binder can call them.
+ */
+int init_rust_binderfs(void);
+
+struct dentry;
+struct inode;
+struct dentry *rust_binderfs_create_proc_file(struct inode *nodp, int pid);
+void rust_binderfs_remove_file(struct dentry *dentry);
+
+#endif
diff --git a/drivers/android/binder/rust_binder_events.c b/drivers/android/binder/rust_binder_events.c
new file mode 100644
index 000000000000..488b1470060c
--- /dev/null
+++ b/drivers/android/binder/rust_binder_events.c
@@ -0,0 +1,59 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* rust_binder_events.c
+ *
+ * Rust Binder tracepoints.
+ *
+ * Copyright 2025 Google LLC
+ */
+
+#include "rust_binder.h"
+
+const char * const binder_command_strings[] = {
+ "BC_TRANSACTION",
+ "BC_REPLY",
+ "BC_ACQUIRE_RESULT",
+ "BC_FREE_BUFFER",
+ "BC_INCREFS",
+ "BC_ACQUIRE",
+ "BC_RELEASE",
+ "BC_DECREFS",
+ "BC_INCREFS_DONE",
+ "BC_ACQUIRE_DONE",
+ "BC_ATTEMPT_ACQUIRE",
+ "BC_REGISTER_LOOPER",
+ "BC_ENTER_LOOPER",
+ "BC_EXIT_LOOPER",
+ "BC_REQUEST_DEATH_NOTIFICATION",
+ "BC_CLEAR_DEATH_NOTIFICATION",
+ "BC_DEAD_BINDER_DONE",
+ "BC_TRANSACTION_SG",
+ "BC_REPLY_SG",
+};
+
+const char * const binder_return_strings[] = {
+ "BR_ERROR",
+ "BR_OK",
+ "BR_TRANSACTION",
+ "BR_REPLY",
+ "BR_ACQUIRE_RESULT",
+ "BR_DEAD_REPLY",
+ "BR_TRANSACTION_COMPLETE",
+ "BR_INCREFS",
+ "BR_ACQUIRE",
+ "BR_RELEASE",
+ "BR_DECREFS",
+ "BR_ATTEMPT_ACQUIRE",
+ "BR_NOOP",
+ "BR_SPAWN_LOOPER",
+ "BR_FINISHED",
+ "BR_DEAD_BINDER",
+ "BR_CLEAR_DEATH_NOTIFICATION_DONE",
+ "BR_FAILED_REPLY",
+ "BR_FROZEN_REPLY",
+ "BR_ONEWAY_SPAM_SUSPECT",
+ "BR_TRANSACTION_PENDING_FROZEN"
+};
+
+#define CREATE_TRACE_POINTS
+#define CREATE_RUST_TRACE_POINTS
+#include "rust_binder_events.h"
diff --git a/drivers/android/binder/rust_binder_events.h b/drivers/android/binder/rust_binder_events.h
new file mode 100644
index 000000000000..2f3efbf9dba6
--- /dev/null
+++ b/drivers/android/binder/rust_binder_events.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2025 Google, Inc.
+ */
+
+#undef TRACE_SYSTEM
+#undef TRACE_INCLUDE_FILE
+#undef TRACE_INCLUDE_PATH
+#define TRACE_SYSTEM rust_binder
+#define TRACE_INCLUDE_FILE rust_binder_events
+#define TRACE_INCLUDE_PATH ../drivers/android/binder
+
+#if !defined(_RUST_BINDER_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _RUST_BINDER_TRACE_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(rust_binder_ioctl,
+ TP_PROTO(unsigned int cmd, unsigned long arg),
+ TP_ARGS(cmd, arg),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, cmd)
+ __field(unsigned long, arg)
+ ),
+ TP_fast_assign(
+ __entry->cmd = cmd;
+ __entry->arg = arg;
+ ),
+ TP_printk("cmd=0x%x arg=0x%lx", __entry->cmd, __entry->arg)
+);
+
+#endif /* _RUST_BINDER_TRACE_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/drivers/android/binder/rust_binder_internal.h b/drivers/android/binder/rust_binder_internal.h
new file mode 100644
index 000000000000..78288fe7964d
--- /dev/null
+++ b/drivers/android/binder/rust_binder_internal.h
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* rust_binder_internal.h
+ *
+ * This file contains internal data structures used by Rust Binder. Mostly,
+ * these are type definitions used only by binderfs or things that Rust Binder
+ * define and export to binderfs.
+ *
+ * It does not include things exported by binderfs to Rust Binder since this
+ * file is not included as input to bindgen.
+ *
+ * Copyright (C) 2025 Google LLC.
+ */
+
+#ifndef _LINUX_RUST_BINDER_INTERNAL_H
+#define _LINUX_RUST_BINDER_INTERNAL_H
+
+#define RUST_BINDERFS_SUPER_MAGIC 0x6c6f6f71
+
+#include <linux/seq_file.h>
+#include <uapi/linux/android/binder.h>
+#include <uapi/linux/android/binderfs.h>
+
+/*
+ * The internal data types in the Rust Binder driver are opaque to C, so we use
+ * void pointer typedefs for these types.
+ */
+typedef void *rust_binder_context;
+
+/**
+ * struct binder_device - information about a binder device node
+ * @minor: the minor number used by this device
+ * @ctx: the Rust Context used by this device, or null for binder-control
+ *
+ * This is used as the private data for files directly in binderfs, but not
+ * files in the binder_logs subdirectory. This struct owns a refcount on `ctx`
+ * and the entry for `minor` in `binderfs_minors`. For binder-control `ctx` is
+ * null.
+ */
+struct binder_device {
+ int minor;
+ rust_binder_context ctx;
+};
+
+int rust_binder_stats_show(struct seq_file *m, void *unused);
+int rust_binder_state_show(struct seq_file *m, void *unused);
+int rust_binder_transactions_show(struct seq_file *m, void *unused);
+int rust_binder_proc_show(struct seq_file *m, void *pid);
+
+extern const struct file_operations rust_binder_fops;
+rust_binder_context rust_binder_new_context(char *name);
+void rust_binder_remove_context(rust_binder_context device);
+
+/**
+ * binderfs_mount_opts - mount options for binderfs
+ * @max: maximum number of allocatable binderfs binder devices
+ * @stats_mode: enable binder stats in binderfs.
+ */
+struct binderfs_mount_opts {
+ int max;
+ int stats_mode;
+};
+
+/**
+ * binderfs_info - information about a binderfs mount
+ * @ipc_ns: The ipc namespace the binderfs mount belongs to.
+ * @control_dentry: This records the dentry of this binderfs mount
+ * binder-control device.
+ * @root_uid: uid that needs to be used when a new binder device is
+ * created.
+ * @root_gid: gid that needs to be used when a new binder device is
+ * created.
+ * @mount_opts: The mount options in use.
+ * @device_count: The current number of allocated binder devices.
+ * @proc_log_dir: Pointer to the directory dentry containing process-specific
+ * logs.
+ */
+struct binderfs_info {
+ struct ipc_namespace *ipc_ns;
+ struct dentry *control_dentry;
+ kuid_t root_uid;
+ kgid_t root_gid;
+ struct binderfs_mount_opts mount_opts;
+ int device_count;
+ struct dentry *proc_log_dir;
+};
+
+#endif /* _LINUX_RUST_BINDER_INTERNAL_H */
diff --git a/drivers/android/binder/rust_binder_main.rs b/drivers/android/binder/rust_binder_main.rs
new file mode 100644
index 000000000000..6773b7c273ec
--- /dev/null
+++ b/drivers/android/binder/rust_binder_main.rs
@@ -0,0 +1,627 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2025 Google LLC.
+
+//! Binder -- the Android IPC mechanism.
+#![recursion_limit = "256"]
+#![allow(
+ clippy::as_underscore,
+ clippy::ref_as_ptr,
+ clippy::ptr_as_ptr,
+ clippy::cast_lossless
+)]
+
+use kernel::{
+ bindings::{self, seq_file},
+ fs::File,
+ list::{ListArc, ListArcSafe, ListLinksSelfPtr, TryNewListArc},
+ prelude::*,
+ seq_file::SeqFile,
+ seq_print,
+ sync::poll::PollTable,
+ sync::Arc,
+ task::Pid,
+ transmute::AsBytes,
+ types::ForeignOwnable,
+ uaccess::UserSliceWriter,
+};
+
+use crate::{context::Context, page_range::Shrinker, process::Process, thread::Thread};
+
+use core::{
+ ptr::NonNull,
+ sync::atomic::{AtomicBool, AtomicUsize, Ordering},
+};
+
+mod allocation;
+mod context;
+mod deferred_close;
+mod defs;
+mod error;
+mod node;
+mod page_range;
+mod process;
+mod range_alloc;
+mod stats;
+mod thread;
+mod trace;
+mod transaction;
+
+#[allow(warnings)] // generated bindgen code
+mod binderfs {
+ use kernel::bindings::{dentry, inode};
+
+ extern "C" {
+ pub fn init_rust_binderfs() -> kernel::ffi::c_int;
+ }
+ extern "C" {
+ pub fn rust_binderfs_create_proc_file(
+ nodp: *mut inode,
+ pid: kernel::ffi::c_int,
+ ) -> *mut dentry;
+ }
+ extern "C" {
+ pub fn rust_binderfs_remove_file(dentry: *mut dentry);
+ }
+ pub type rust_binder_context = *mut kernel::ffi::c_void;
+ #[repr(C)]
+ #[derive(Copy, Clone)]
+ pub struct binder_device {
+ pub minor: kernel::ffi::c_int,
+ pub ctx: rust_binder_context,
+ }
+ impl Default for binder_device {
+ fn default() -> Self {
+ let mut s = ::core::mem::MaybeUninit::<Self>::uninit();
+ unsafe {
+ ::core::ptr::write_bytes(s.as_mut_ptr(), 0, 1);
+ s.assume_init()
+ }
+ }
+ }
+}
+
+module! {
+ type: BinderModule,
+ name: "rust_binder",
+ authors: ["Wedson Almeida Filho", "Alice Ryhl"],
+ description: "Android Binder",
+ license: "GPL",
+}
+
+fn next_debug_id() -> usize {
+ static NEXT_DEBUG_ID: AtomicUsize = AtomicUsize::new(0);
+
+ NEXT_DEBUG_ID.fetch_add(1, Ordering::Relaxed)
+}
+
+/// Provides a single place to write Binder return values via the
+/// supplied `UserSliceWriter`.
+pub(crate) struct BinderReturnWriter<'a> {
+ writer: UserSliceWriter,
+ thread: &'a Thread,
+}
+
+impl<'a> BinderReturnWriter<'a> {
+ fn new(writer: UserSliceWriter, thread: &'a Thread) -> Self {
+ BinderReturnWriter { writer, thread }
+ }
+
+ /// Write a return code back to user space.
+ /// Should be a `BR_` constant from [`defs`] e.g. [`defs::BR_TRANSACTION_COMPLETE`].
+ fn write_code(&mut self, code: u32) -> Result {
+ stats::GLOBAL_STATS.inc_br(code);
+ self.thread.process.stats.inc_br(code);
+ self.writer.write(&code)
+ }
+
+ /// Write something *other than* a return code to user space.
+ fn write_payload<T: AsBytes>(&mut self, payload: &T) -> Result {
+ self.writer.write(payload)
+ }
+
+ fn len(&self) -> usize {
+ self.writer.len()
+ }
+}
+
+/// Specifies how a type should be delivered to the read part of a BINDER_WRITE_READ ioctl.
+///
+/// When a value is pushed to the todo list for a process or thread, it is stored as a trait object
+/// with the type `Arc<dyn DeliverToRead>`. Trait objects are a Rust feature that lets you
+/// implement dynamic dispatch over many different types. This lets us store many different types
+/// in the todo list.
+trait DeliverToRead: ListArcSafe + Send + Sync {
+ /// Performs work. Returns true if remaining work items in the queue should be processed
+ /// immediately, or false if it should return to caller before processing additional work
+ /// items.
+ fn do_work(
+ self: DArc<Self>,
+ thread: &Thread,
+ writer: &mut BinderReturnWriter<'_>,
+ ) -> Result<bool>;
+
+ /// Cancels the given work item. This is called instead of [`DeliverToRead::do_work`] when work
+ /// won't be delivered.
+ fn cancel(self: DArc<Self>);
+
+ /// Should we use `wake_up_interruptible_sync` or `wake_up_interruptible` when scheduling this
+ /// work item?
+ ///
+ /// Generally only set to true for non-oneway transactions.
+ fn should_sync_wakeup(&self) -> bool;
+
+ fn debug_print(&self, m: &SeqFile, prefix: &str, transaction_prefix: &str) -> Result<()>;
+}
+
+// Wrapper around a `DeliverToRead` with linked list links.
+#[pin_data]
+struct DTRWrap<T: ?Sized> {
+ #[pin]
+ links: ListLinksSelfPtr<DTRWrap<dyn DeliverToRead>>,
+ #[pin]
+ wrapped: T,
+}
+kernel::list::impl_list_arc_safe! {
+ impl{T: ListArcSafe + ?Sized} ListArcSafe<0> for DTRWrap<T> {
+ tracked_by wrapped: T;
+ }
+}
+kernel::list::impl_list_item! {
+ impl ListItem<0> for DTRWrap<dyn DeliverToRead> {
+ using ListLinksSelfPtr { self.links };
+ }
+}
+
+impl<T: ?Sized> core::ops::Deref for DTRWrap<T> {
+ type Target = T;
+ fn deref(&self) -> &T {
+ &self.wrapped
+ }
+}
+
+type DArc<T> = kernel::sync::Arc<DTRWrap<T>>;
+type DLArc<T> = kernel::list::ListArc<DTRWrap<T>>;
+
+impl<T: ListArcSafe> DTRWrap<T> {
+ fn new(val: impl PinInit<T>) -> impl PinInit<Self> {
+ pin_init!(Self {
+ links <- ListLinksSelfPtr::new(),
+ wrapped <- val,
+ })
+ }
+
+ fn arc_try_new(val: T) -> Result<DLArc<T>, kernel::alloc::AllocError> {
+ ListArc::pin_init(
+ try_pin_init!(Self {
+ links <- ListLinksSelfPtr::new(),
+ wrapped: val,
+ }),
+ GFP_KERNEL,
+ )
+ .map_err(|_| kernel::alloc::AllocError)
+ }
+
+ fn arc_pin_init(init: impl PinInit<T>) -> Result<DLArc<T>, kernel::error::Error> {
+ ListArc::pin_init(
+ try_pin_init!(Self {
+ links <- ListLinksSelfPtr::new(),
+ wrapped <- init,
+ }),
+ GFP_KERNEL,
+ )
+ }
+}
+
+struct DeliverCode {
+ code: u32,
+ skip: AtomicBool,
+}
+
+kernel::list::impl_list_arc_safe! {
+ impl ListArcSafe<0> for DeliverCode { untracked; }
+}
+
+impl DeliverCode {
+ fn new(code: u32) -> Self {
+ Self {
+ code,
+ skip: AtomicBool::new(false),
+ }
+ }
+
+ /// Disable this DeliverCode and make it do nothing.
+ ///
+ /// This is used instead of removing it from the work list, since `LinkedList::remove` is
+ /// unsafe, whereas this method is not.
+ fn skip(&self) {
+ self.skip.store(true, Ordering::Relaxed);
+ }
+}
+
+impl DeliverToRead for DeliverCode {
+ fn do_work(
+ self: DArc<Self>,
+ _thread: &Thread,
+ writer: &mut BinderReturnWriter<'_>,
+ ) -> Result<bool> {
+ if !self.skip.load(Ordering::Relaxed) {
+ writer.write_code(self.code)?;
+ }
+ Ok(true)
+ }
+
+ fn cancel(self: DArc<Self>) {}
+
+ fn should_sync_wakeup(&self) -> bool {
+ false
+ }
+
+ fn debug_print(&self, m: &SeqFile, prefix: &str, _tprefix: &str) -> Result<()> {
+ seq_print!(m, "{}", prefix);
+ if self.skip.load(Ordering::Relaxed) {
+ seq_print!(m, "(skipped) ");
+ }
+ if self.code == defs::BR_TRANSACTION_COMPLETE {
+ seq_print!(m, "transaction complete\n");
+ } else {
+ seq_print!(m, "transaction error: {}\n", self.code);
+ }
+ Ok(())
+ }
+}
+
+fn ptr_align(value: usize) -> Option<usize> {
+ let size = core::mem::size_of::<usize>() - 1;
+ Some(value.checked_add(size)? & !size)
+}
+
+// SAFETY: We call register in `init`.
+static BINDER_SHRINKER: Shrinker = unsafe { Shrinker::new() };
+
+struct BinderModule {}
+
+impl kernel::Module for BinderModule {
+ fn init(_module: &'static kernel::ThisModule) -> Result<Self> {
+ // SAFETY: The module initializer never runs twice, so we only call this once.
+ unsafe { crate::context::CONTEXTS.init() };
+
+ pr_warn!("Loaded Rust Binder.");
+
+ BINDER_SHRINKER.register(kernel::c_str!("android-binder"))?;
+
+ // SAFETY: The module is being loaded, so we can initialize binderfs.
+ unsafe { kernel::error::to_result(binderfs::init_rust_binderfs())? };
+
+ Ok(Self {})
+ }
+}
+
+/// Makes the inner type Sync.
+#[repr(transparent)]
+pub struct AssertSync<T>(T);
+// SAFETY: Used only to insert `file_operations` into a global, which is safe.
+unsafe impl<T> Sync for AssertSync<T> {}
+
+/// File operations that rust_binderfs.c can use.
+#[no_mangle]
+#[used]
+pub static rust_binder_fops: AssertSync<kernel::bindings::file_operations> = {
+ // SAFETY: All zeroes is safe for the `file_operations` type.
+ let zeroed_ops = unsafe { core::mem::MaybeUninit::zeroed().assume_init() };
+
+ let ops = kernel::bindings::file_operations {
+ owner: THIS_MODULE.as_ptr(),
+ poll: Some(rust_binder_poll),
+ unlocked_ioctl: Some(rust_binder_unlocked_ioctl),
+ compat_ioctl: Some(rust_binder_compat_ioctl),
+ mmap: Some(rust_binder_mmap),
+ open: Some(rust_binder_open),
+ release: Some(rust_binder_release),
+ flush: Some(rust_binder_flush),
+ ..zeroed_ops
+ };
+ AssertSync(ops)
+};
+
+/// # Safety
+/// Only called by binderfs.
+#[no_mangle]
+unsafe extern "C" fn rust_binder_new_context(
+ name: *const kernel::ffi::c_char,
+) -> *mut kernel::ffi::c_void {
+ // SAFETY: The caller will always provide a valid c string here.
+ let name = unsafe { kernel::str::CStr::from_char_ptr(name) };
+ match Context::new(name) {
+ Ok(ctx) => Arc::into_foreign(ctx),
+ Err(_err) => core::ptr::null_mut(),
+ }
+}
+
+/// # Safety
+/// Only called by binderfs.
+#[no_mangle]
+unsafe extern "C" fn rust_binder_remove_context(device: *mut kernel::ffi::c_void) {
+ if !device.is_null() {
+ // SAFETY: The caller ensures that the `device` pointer came from a previous call to
+ // `rust_binder_new_device`.
+ let ctx = unsafe { Arc::<Context>::from_foreign(device) };
+ ctx.deregister();
+ drop(ctx);
+ }
+}
+
+/// # Safety
+/// Only called by binderfs.
+unsafe extern "C" fn rust_binder_open(
+ inode: *mut bindings::inode,
+ file_ptr: *mut bindings::file,
+) -> kernel::ffi::c_int {
+ // SAFETY: The `rust_binderfs.c` file ensures that `i_private` is set to a
+ // `struct binder_device`.
+ let device = unsafe { (*inode).i_private } as *const binderfs::binder_device;
+
+ assert!(!device.is_null());
+
+ // SAFETY: The `rust_binderfs.c` file ensures that `device->ctx` holds a binder context when
+ // using the rust binder fops.
+ let ctx = unsafe { Arc::<Context>::borrow((*device).ctx) };
+
+ // SAFETY: The caller provides a valid file pointer to a new `struct file`.
+ let file = unsafe { File::from_raw_file(file_ptr) };
+ let process = match Process::open(ctx, file) {
+ Ok(process) => process,
+ Err(err) => return err.to_errno(),
+ };
+
+ // SAFETY: This is an `inode` for a newly created binder file.
+ match unsafe { BinderfsProcFile::new(inode, process.task.pid()) } {
+ Ok(Some(file)) => process.inner.lock().binderfs_file = Some(file),
+ Ok(None) => { /* pid already exists */ }
+ Err(err) => return err.to_errno(),
+ }
+
+ // SAFETY: This file is associated with Rust binder, so we own the `private_data` field.
+ unsafe { (*file_ptr).private_data = process.into_foreign() };
+ 0
+}
+
+/// # Safety
+/// Only called by binderfs.
+unsafe extern "C" fn rust_binder_release(
+ _inode: *mut bindings::inode,
+ file: *mut bindings::file,
+) -> kernel::ffi::c_int {
+ // SAFETY: We previously set `private_data` in `rust_binder_open`.
+ let process = unsafe { Arc::<Process>::from_foreign((*file).private_data) };
+ // SAFETY: The caller ensures that the file is valid.
+ let file = unsafe { File::from_raw_file(file) };
+ Process::release(process, file);
+ 0
+}
+
+/// # Safety
+/// Only called by binderfs.
+unsafe extern "C" fn rust_binder_compat_ioctl(
+ file: *mut bindings::file,
+ cmd: kernel::ffi::c_uint,
+ arg: kernel::ffi::c_ulong,
+) -> kernel::ffi::c_long {
+ // SAFETY: We previously set `private_data` in `rust_binder_open`.
+ let f = unsafe { Arc::<Process>::borrow((*file).private_data) };
+ // SAFETY: The caller ensures that the file is valid.
+ match Process::compat_ioctl(f, unsafe { File::from_raw_file(file) }, cmd as _, arg as _) {
+ Ok(()) => 0,
+ Err(err) => err.to_errno() as isize,
+ }
+}
+
+/// # Safety
+/// Only called by binderfs.
+unsafe extern "C" fn rust_binder_unlocked_ioctl(
+ file: *mut bindings::file,
+ cmd: kernel::ffi::c_uint,
+ arg: kernel::ffi::c_ulong,
+) -> kernel::ffi::c_long {
+ // SAFETY: We previously set `private_data` in `rust_binder_open`.
+ let f = unsafe { Arc::<Process>::borrow((*file).private_data) };
+ // SAFETY: The caller ensures that the file is valid.
+ match Process::ioctl(f, unsafe { File::from_raw_file(file) }, cmd as _, arg as _) {
+ Ok(()) => 0,
+ Err(err) => err.to_errno() as isize,
+ }
+}
+
+/// # Safety
+/// Only called by binderfs.
+unsafe extern "C" fn rust_binder_mmap(
+ file: *mut bindings::file,
+ vma: *mut bindings::vm_area_struct,
+) -> kernel::ffi::c_int {
+ // SAFETY: We previously set `private_data` in `rust_binder_open`.
+ let f = unsafe { Arc::<Process>::borrow((*file).private_data) };
+ // SAFETY: The caller ensures that the vma is valid.
+ let area = unsafe { kernel::mm::virt::VmaNew::from_raw(vma) };
+ // SAFETY: The caller ensures that the file is valid.
+ match Process::mmap(f, unsafe { File::from_raw_file(file) }, area) {
+ Ok(()) => 0,
+ Err(err) => err.to_errno(),
+ }
+}
+
+/// # Safety
+/// Only called by binderfs.
+unsafe extern "C" fn rust_binder_poll(
+ file: *mut bindings::file,
+ wait: *mut bindings::poll_table_struct,
+) -> bindings::__poll_t {
+ // SAFETY: We previously set `private_data` in `rust_binder_open`.
+ let f = unsafe { Arc::<Process>::borrow((*file).private_data) };
+ // SAFETY: The caller ensures that the file is valid.
+ let fileref = unsafe { File::from_raw_file(file) };
+ // SAFETY: The caller ensures that the `PollTable` is valid.
+ match Process::poll(f, fileref, unsafe { PollTable::from_raw(wait) }) {
+ Ok(v) => v,
+ Err(_) => bindings::POLLERR,
+ }
+}
+
+/// # Safety
+/// Only called by binderfs.
+unsafe extern "C" fn rust_binder_flush(
+ file: *mut bindings::file,
+ _id: bindings::fl_owner_t,
+) -> kernel::ffi::c_int {
+ // SAFETY: We previously set `private_data` in `rust_binder_open`.
+ let f = unsafe { Arc::<Process>::borrow((*file).private_data) };
+ match Process::flush(f) {
+ Ok(()) => 0,
+ Err(err) => err.to_errno(),
+ }
+}
+
+/// # Safety
+/// Only called by binderfs.
+#[no_mangle]
+unsafe extern "C" fn rust_binder_stats_show(
+ ptr: *mut seq_file,
+ _: *mut kernel::ffi::c_void,
+) -> kernel::ffi::c_int {
+ // SAFETY: The caller ensures that the pointer is valid and exclusive for the duration in which
+ // this method is called.
+ let m = unsafe { SeqFile::from_raw(ptr) };
+ if let Err(err) = rust_binder_stats_show_impl(m) {
+ seq_print!(m, "failed to generate state: {:?}\n", err);
+ }
+ 0
+}
+
+/// # Safety
+/// Only called by binderfs.
+#[no_mangle]
+unsafe extern "C" fn rust_binder_state_show(
+ ptr: *mut seq_file,
+ _: *mut kernel::ffi::c_void,
+) -> kernel::ffi::c_int {
+ // SAFETY: The caller ensures that the pointer is valid and exclusive for the duration in which
+ // this method is called.
+ let m = unsafe { SeqFile::from_raw(ptr) };
+ if let Err(err) = rust_binder_state_show_impl(m) {
+ seq_print!(m, "failed to generate state: {:?}\n", err);
+ }
+ 0
+}
+
+/// # Safety
+/// Only called by binderfs.
+#[no_mangle]
+unsafe extern "C" fn rust_binder_proc_show(
+ ptr: *mut seq_file,
+ _: *mut kernel::ffi::c_void,
+) -> kernel::ffi::c_int {
+ // SAFETY: Accessing the private field of `seq_file` is okay.
+ let pid = (unsafe { (*ptr).private }) as usize as Pid;
+ // SAFETY: The caller ensures that the pointer is valid and exclusive for the duration in which
+ // this method is called.
+ let m = unsafe { SeqFile::from_raw(ptr) };
+ if let Err(err) = rust_binder_proc_show_impl(m, pid) {
+ seq_print!(m, "failed to generate state: {:?}\n", err);
+ }
+ 0
+}
+
+/// # Safety
+/// Only called by binderfs.
+#[no_mangle]
+unsafe extern "C" fn rust_binder_transactions_show(
+ ptr: *mut seq_file,
+ _: *mut kernel::ffi::c_void,
+) -> kernel::ffi::c_int {
+ // SAFETY: The caller ensures that the pointer is valid and exclusive for the duration in which
+ // this method is called.
+ let m = unsafe { SeqFile::from_raw(ptr) };
+ if let Err(err) = rust_binder_transactions_show_impl(m) {
+ seq_print!(m, "failed to generate state: {:?}\n", err);
+ }
+ 0
+}
+
+fn rust_binder_transactions_show_impl(m: &SeqFile) -> Result<()> {
+ seq_print!(m, "binder transactions:\n");
+ let contexts = context::get_all_contexts()?;
+ for ctx in contexts {
+ let procs = ctx.get_all_procs()?;
+ for proc in procs {
+ proc.debug_print(m, &ctx, false)?;
+ seq_print!(m, "\n");
+ }
+ }
+ Ok(())
+}
+
+fn rust_binder_stats_show_impl(m: &SeqFile) -> Result<()> {
+ seq_print!(m, "binder stats:\n");
+ stats::GLOBAL_STATS.debug_print("", m);
+ let contexts = context::get_all_contexts()?;
+ for ctx in contexts {
+ let procs = ctx.get_all_procs()?;
+ for proc in procs {
+ proc.debug_print_stats(m, &ctx)?;
+ seq_print!(m, "\n");
+ }
+ }
+ Ok(())
+}
+
+fn rust_binder_state_show_impl(m: &SeqFile) -> Result<()> {
+ seq_print!(m, "binder state:\n");
+ let contexts = context::get_all_contexts()?;
+ for ctx in contexts {
+ let procs = ctx.get_all_procs()?;
+ for proc in procs {
+ proc.debug_print(m, &ctx, true)?;
+ seq_print!(m, "\n");
+ }
+ }
+ Ok(())
+}
+
+fn rust_binder_proc_show_impl(m: &SeqFile, pid: Pid) -> Result<()> {
+ seq_print!(m, "binder proc state:\n");
+ let contexts = context::get_all_contexts()?;
+ for ctx in contexts {
+ let procs = ctx.get_procs_with_pid(pid)?;
+ for proc in procs {
+ proc.debug_print(m, &ctx, true)?;
+ seq_print!(m, "\n");
+ }
+ }
+ Ok(())
+}
+
+struct BinderfsProcFile(NonNull<bindings::dentry>);
+
+// SAFETY: Safe to drop any thread.
+unsafe impl Send for BinderfsProcFile {}
+
+impl BinderfsProcFile {
+ /// # Safety
+ ///
+ /// Takes an inode from a newly created binder file.
+ unsafe fn new(nodp: *mut bindings::inode, pid: i32) -> Result<Option<Self>> {
+ // SAFETY: The caller passes an `inode` for a newly created binder file.
+ let dentry = unsafe { binderfs::rust_binderfs_create_proc_file(nodp, pid) };
+ match kernel::error::from_err_ptr(dentry) {
+ Ok(dentry) => Ok(NonNull::new(dentry).map(Self)),
+ Err(err) if err == EEXIST => Ok(None),
+ Err(err) => Err(err),
+ }
+ }
+}
+
+impl Drop for BinderfsProcFile {
+ fn drop(&mut self) {
+ // SAFETY: This is a dentry from `rust_binderfs_remove_file` that has not been deleted yet.
+ unsafe { binderfs::rust_binderfs_remove_file(self.0.as_ptr()) };
+ }
+}
diff --git a/drivers/android/binder/rust_binderfs.c b/drivers/android/binder/rust_binderfs.c
new file mode 100644
index 000000000000..6b497146b698
--- /dev/null
+++ b/drivers/android/binder/rust_binderfs.c
@@ -0,0 +1,850 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/compiler_types.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/fsnotify.h>
+#include <linux/gfp.h>
+#include <linux/idr.h>
+#include <linux/init.h>
+#include <linux/ipc_namespace.h>
+#include <linux/kdev_t.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/namei.h>
+#include <linux/magic.h>
+#include <linux/major.h>
+#include <linux/miscdevice.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/mount.h>
+#include <linux/fs_parser.h>
+#include <linux/sched.h>
+#include <linux/seq_file.h>
+#include <linux/slab.h>
+#include <linux/spinlock_types.h>
+#include <linux/stddef.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include <linux/user_namespace.h>
+#include <linux/xarray.h>
+#include <uapi/asm-generic/errno-base.h>
+#include <uapi/linux/android/binder.h>
+#include <uapi/linux/android/binderfs.h>
+
+#include "rust_binder.h"
+#include "rust_binder_internal.h"
+
+#define FIRST_INODE 1
+#define SECOND_INODE 2
+#define INODE_OFFSET 3
+#define BINDERFS_MAX_MINOR (1U << MINORBITS)
+/* Ensure that the initial ipc namespace always has devices available. */
+#define BINDERFS_MAX_MINOR_CAPPED (BINDERFS_MAX_MINOR - 4)
+
+DEFINE_SHOW_ATTRIBUTE(rust_binder_stats);
+DEFINE_SHOW_ATTRIBUTE(rust_binder_state);
+DEFINE_SHOW_ATTRIBUTE(rust_binder_transactions);
+DEFINE_SHOW_ATTRIBUTE(rust_binder_proc);
+
+char *rust_binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES;
+module_param_named(rust_devices, rust_binder_devices_param, charp, 0444);
+
+static dev_t binderfs_dev;
+static DEFINE_MUTEX(binderfs_minors_mutex);
+static DEFINE_IDA(binderfs_minors);
+
+enum binderfs_param {
+ Opt_max,
+ Opt_stats_mode,
+};
+
+enum binderfs_stats_mode {
+ binderfs_stats_mode_unset,
+ binderfs_stats_mode_global,
+};
+
+struct binder_features {
+ bool oneway_spam_detection;
+ bool extended_error;
+ bool freeze_notification;
+};
+
+static const struct constant_table binderfs_param_stats[] = {
+ { "global", binderfs_stats_mode_global },
+ {}
+};
+
+static const struct fs_parameter_spec binderfs_fs_parameters[] = {
+ fsparam_u32("max", Opt_max),
+ fsparam_enum("stats", Opt_stats_mode, binderfs_param_stats),
+ {}
+};
+
+static struct binder_features binder_features = {
+ .oneway_spam_detection = true,
+ .extended_error = true,
+ .freeze_notification = true,
+};
+
+static inline struct binderfs_info *BINDERFS_SB(const struct super_block *sb)
+{
+ return sb->s_fs_info;
+}
+
+/**
+ * binderfs_binder_device_create - allocate inode from super block of a
+ * binderfs mount
+ * @ref_inode: inode from wich the super block will be taken
+ * @userp: buffer to copy information about new device for userspace to
+ * @req: struct binderfs_device as copied from userspace
+ *
+ * This function allocates a new binder_device and reserves a new minor
+ * number for it.
+ * Minor numbers are limited and tracked globally in binderfs_minors. The
+ * function will stash a struct binder_device for the specific binder
+ * device in i_private of the inode.
+ * It will go on to allocate a new inode from the super block of the
+ * filesystem mount, stash a struct binder_device in its i_private field
+ * and attach a dentry to that inode.
+ *
+ * Return: 0 on success, negative errno on failure
+ */
+static int binderfs_binder_device_create(struct inode *ref_inode,
+ struct binderfs_device __user *userp,
+ struct binderfs_device *req)
+{
+ int minor, ret;
+ struct dentry *dentry, *root;
+ struct binder_device *device = NULL;
+ rust_binder_context ctx = NULL;
+ struct inode *inode = NULL;
+ struct super_block *sb = ref_inode->i_sb;
+ struct binderfs_info *info = sb->s_fs_info;
+#if defined(CONFIG_IPC_NS)
+ bool use_reserve = (info->ipc_ns == &init_ipc_ns);
+#else
+ bool use_reserve = true;
+#endif
+
+ /* Reserve new minor number for the new device. */
+ mutex_lock(&binderfs_minors_mutex);
+ if (++info->device_count <= info->mount_opts.max)
+ minor = ida_alloc_max(&binderfs_minors,
+ use_reserve ? BINDERFS_MAX_MINOR :
+ BINDERFS_MAX_MINOR_CAPPED,
+ GFP_KERNEL);
+ else
+ minor = -ENOSPC;
+ if (minor < 0) {
+ --info->device_count;
+ mutex_unlock(&binderfs_minors_mutex);
+ return minor;
+ }
+ mutex_unlock(&binderfs_minors_mutex);
+
+ ret = -ENOMEM;
+ device = kzalloc(sizeof(*device), GFP_KERNEL);
+ if (!device)
+ goto err;
+
+ req->name[BINDERFS_MAX_NAME] = '\0'; /* NUL-terminate */
+
+ ctx = rust_binder_new_context(req->name);
+ if (!ctx)
+ goto err;
+
+ inode = new_inode(sb);
+ if (!inode)
+ goto err;
+
+ inode->i_ino = minor + INODE_OFFSET;
+ simple_inode_init_ts(inode);
+ init_special_inode(inode, S_IFCHR | 0600,
+ MKDEV(MAJOR(binderfs_dev), minor));
+ inode->i_fop = &rust_binder_fops;
+ inode->i_uid = info->root_uid;
+ inode->i_gid = info->root_gid;
+
+ req->major = MAJOR(binderfs_dev);
+ req->minor = minor;
+ device->ctx = ctx;
+ device->minor = minor;
+
+ if (userp && copy_to_user(userp, req, sizeof(*req))) {
+ ret = -EFAULT;
+ goto err;
+ }
+
+ root = sb->s_root;
+ inode_lock(d_inode(root));
+
+ /* look it up */
+ dentry = lookup_noperm(&QSTR(req->name), root);
+ if (IS_ERR(dentry)) {
+ inode_unlock(d_inode(root));
+ ret = PTR_ERR(dentry);
+ goto err;
+ }
+
+ if (d_really_is_positive(dentry)) {
+ /* already exists */
+ dput(dentry);
+ inode_unlock(d_inode(root));
+ ret = -EEXIST;
+ goto err;
+ }
+
+ inode->i_private = device;
+ d_instantiate(dentry, inode);
+ fsnotify_create(root->d_inode, dentry);
+ inode_unlock(d_inode(root));
+
+ return 0;
+
+err:
+ kfree(device);
+ rust_binder_remove_context(ctx);
+ mutex_lock(&binderfs_minors_mutex);
+ --info->device_count;
+ ida_free(&binderfs_minors, minor);
+ mutex_unlock(&binderfs_minors_mutex);
+ iput(inode);
+
+ return ret;
+}
+
+/**
+ * binder_ctl_ioctl - handle binder device node allocation requests
+ *
+ * The request handler for the binder-control device. All requests operate on
+ * the binderfs mount the binder-control device resides in:
+ * - BINDER_CTL_ADD
+ * Allocate a new binder device.
+ *
+ * Return: %0 on success, negative errno on failure.
+ */
+static long binder_ctl_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ int ret = -EINVAL;
+ struct inode *inode = file_inode(file);
+ struct binderfs_device __user *device = (struct binderfs_device __user *)arg;
+ struct binderfs_device device_req;
+
+ switch (cmd) {
+ case BINDER_CTL_ADD:
+ ret = copy_from_user(&device_req, device, sizeof(device_req));
+ if (ret) {
+ ret = -EFAULT;
+ break;
+ }
+
+ ret = binderfs_binder_device_create(inode, device, &device_req);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static void binderfs_evict_inode(struct inode *inode)
+{
+ struct binder_device *device = inode->i_private;
+ struct binderfs_info *info = BINDERFS_SB(inode->i_sb);
+
+ clear_inode(inode);
+
+ if (!S_ISCHR(inode->i_mode) || !device)
+ return;
+
+ mutex_lock(&binderfs_minors_mutex);
+ --info->device_count;
+ ida_free(&binderfs_minors, device->minor);
+ mutex_unlock(&binderfs_minors_mutex);
+
+ /* ctx is null for binder-control, but this function ignores null pointers */
+ rust_binder_remove_context(device->ctx);
+
+ kfree(device);
+}
+
+static int binderfs_fs_context_parse_param(struct fs_context *fc,
+ struct fs_parameter *param)
+{
+ int opt;
+ struct binderfs_mount_opts *ctx = fc->fs_private;
+ struct fs_parse_result result;
+
+ opt = fs_parse(fc, binderfs_fs_parameters, param, &result);
+ if (opt < 0)
+ return opt;
+
+ switch (opt) {
+ case Opt_max:
+ if (result.uint_32 > BINDERFS_MAX_MINOR)
+ return invalfc(fc, "Bad value for '%s'", param->key);
+
+ ctx->max = result.uint_32;
+ break;
+ case Opt_stats_mode:
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ ctx->stats_mode = result.uint_32;
+ break;
+ default:
+ return invalfc(fc, "Unsupported parameter '%s'", param->key);
+ }
+
+ return 0;
+}
+
+static int binderfs_fs_context_reconfigure(struct fs_context *fc)
+{
+ struct binderfs_mount_opts *ctx = fc->fs_private;
+ struct binderfs_info *info = BINDERFS_SB(fc->root->d_sb);
+
+ if (info->mount_opts.stats_mode != ctx->stats_mode)
+ return invalfc(fc, "Binderfs stats mode cannot be changed during a remount");
+
+ info->mount_opts.stats_mode = ctx->stats_mode;
+ info->mount_opts.max = ctx->max;
+ return 0;
+}
+
+static int binderfs_show_options(struct seq_file *seq, struct dentry *root)
+{
+ struct binderfs_info *info = BINDERFS_SB(root->d_sb);
+
+ if (info->mount_opts.max <= BINDERFS_MAX_MINOR)
+ seq_printf(seq, ",max=%d", info->mount_opts.max);
+
+ switch (info->mount_opts.stats_mode) {
+ case binderfs_stats_mode_unset:
+ break;
+ case binderfs_stats_mode_global:
+ seq_puts(seq, ",stats=global");
+ break;
+ }
+
+ return 0;
+}
+
+static const struct super_operations binderfs_super_ops = {
+ .evict_inode = binderfs_evict_inode,
+ .show_options = binderfs_show_options,
+ .statfs = simple_statfs,
+};
+
+static inline bool is_binderfs_control_device(const struct dentry *dentry)
+{
+ struct binderfs_info *info = dentry->d_sb->s_fs_info;
+
+ return info->control_dentry == dentry;
+}
+
+static int binderfs_rename(struct mnt_idmap *idmap,
+ struct inode *old_dir, struct dentry *old_dentry,
+ struct inode *new_dir, struct dentry *new_dentry,
+ unsigned int flags)
+{
+ if (is_binderfs_control_device(old_dentry) ||
+ is_binderfs_control_device(new_dentry))
+ return -EPERM;
+
+ return simple_rename(idmap, old_dir, old_dentry, new_dir,
+ new_dentry, flags);
+}
+
+static int binderfs_unlink(struct inode *dir, struct dentry *dentry)
+{
+ if (is_binderfs_control_device(dentry))
+ return -EPERM;
+
+ return simple_unlink(dir, dentry);
+}
+
+static const struct file_operations binder_ctl_fops = {
+ .owner = THIS_MODULE,
+ .open = nonseekable_open,
+ .unlocked_ioctl = binder_ctl_ioctl,
+ .compat_ioctl = binder_ctl_ioctl,
+ .llseek = noop_llseek,
+};
+
+/**
+ * binderfs_binder_ctl_create - create a new binder-control device
+ * @sb: super block of the binderfs mount
+ *
+ * This function creates a new binder-control device node in the binderfs mount
+ * referred to by @sb.
+ *
+ * Return: 0 on success, negative errno on failure
+ */
+static int binderfs_binder_ctl_create(struct super_block *sb)
+{
+ int minor, ret;
+ struct dentry *dentry;
+ struct binder_device *device;
+ struct inode *inode = NULL;
+ struct dentry *root = sb->s_root;
+ struct binderfs_info *info = sb->s_fs_info;
+#if defined(CONFIG_IPC_NS)
+ bool use_reserve = (info->ipc_ns == &init_ipc_ns);
+#else
+ bool use_reserve = true;
+#endif
+
+ device = kzalloc(sizeof(*device), GFP_KERNEL);
+ if (!device)
+ return -ENOMEM;
+
+ /* If we have already created a binder-control node, return. */
+ if (info->control_dentry) {
+ ret = 0;
+ goto out;
+ }
+
+ ret = -ENOMEM;
+ inode = new_inode(sb);
+ if (!inode)
+ goto out;
+
+ /* Reserve a new minor number for the new device. */
+ mutex_lock(&binderfs_minors_mutex);
+ minor = ida_alloc_max(&binderfs_minors,
+ use_reserve ? BINDERFS_MAX_MINOR :
+ BINDERFS_MAX_MINOR_CAPPED,
+ GFP_KERNEL);
+ mutex_unlock(&binderfs_minors_mutex);
+ if (minor < 0) {
+ ret = minor;
+ goto out;
+ }
+
+ inode->i_ino = SECOND_INODE;
+ simple_inode_init_ts(inode);
+ init_special_inode(inode, S_IFCHR | 0600,
+ MKDEV(MAJOR(binderfs_dev), minor));
+ inode->i_fop = &binder_ctl_fops;
+ inode->i_uid = info->root_uid;
+ inode->i_gid = info->root_gid;
+
+ device->minor = minor;
+ device->ctx = NULL;
+
+ dentry = d_alloc_name(root, "binder-control");
+ if (!dentry)
+ goto out;
+
+ inode->i_private = device;
+ info->control_dentry = dentry;
+ d_add(dentry, inode);
+
+ return 0;
+
+out:
+ kfree(device);
+ iput(inode);
+
+ return ret;
+}
+
+static const struct inode_operations binderfs_dir_inode_operations = {
+ .lookup = simple_lookup,
+ .rename = binderfs_rename,
+ .unlink = binderfs_unlink,
+};
+
+static struct inode *binderfs_make_inode(struct super_block *sb, int mode)
+{
+ struct inode *ret;
+
+ ret = new_inode(sb);
+ if (ret) {
+ ret->i_ino = iunique(sb, BINDERFS_MAX_MINOR + INODE_OFFSET);
+ ret->i_mode = mode;
+ simple_inode_init_ts(ret);
+ }
+ return ret;
+}
+
+static struct dentry *binderfs_create_dentry(struct dentry *parent,
+ const char *name)
+{
+ struct dentry *dentry;
+
+ dentry = lookup_noperm(&QSTR(name), parent);
+ if (IS_ERR(dentry))
+ return dentry;
+
+ /* Return error if the file/dir already exists. */
+ if (d_really_is_positive(dentry)) {
+ dput(dentry);
+ return ERR_PTR(-EEXIST);
+ }
+
+ return dentry;
+}
+
+void rust_binderfs_remove_file(struct dentry *dentry)
+{
+ struct inode *parent_inode;
+
+ parent_inode = d_inode(dentry->d_parent);
+ inode_lock(parent_inode);
+ if (simple_positive(dentry)) {
+ dget(dentry);
+ simple_unlink(parent_inode, dentry);
+ d_delete(dentry);
+ dput(dentry);
+ }
+ inode_unlock(parent_inode);
+}
+
+static struct dentry *rust_binderfs_create_file(struct dentry *parent, const char *name,
+ const struct file_operations *fops,
+ void *data)
+{
+ struct dentry *dentry;
+ struct inode *new_inode, *parent_inode;
+ struct super_block *sb;
+
+ parent_inode = d_inode(parent);
+ inode_lock(parent_inode);
+
+ dentry = binderfs_create_dentry(parent, name);
+ if (IS_ERR(dentry))
+ goto out;
+
+ sb = parent_inode->i_sb;
+ new_inode = binderfs_make_inode(sb, S_IFREG | 0444);
+ if (!new_inode) {
+ dput(dentry);
+ dentry = ERR_PTR(-ENOMEM);
+ goto out;
+ }
+
+ new_inode->i_fop = fops;
+ new_inode->i_private = data;
+ d_instantiate(dentry, new_inode);
+ fsnotify_create(parent_inode, dentry);
+
+out:
+ inode_unlock(parent_inode);
+ return dentry;
+}
+
+struct dentry *rust_binderfs_create_proc_file(struct inode *nodp, int pid)
+{
+ struct binderfs_info *info = nodp->i_sb->s_fs_info;
+ struct dentry *dir = info->proc_log_dir;
+ char strbuf[20 + 1];
+ void *data = (void *)(unsigned long) pid;
+
+ if (!dir)
+ return NULL;
+
+ snprintf(strbuf, sizeof(strbuf), "%u", pid);
+ return rust_binderfs_create_file(dir, strbuf, &rust_binder_proc_fops, data);
+}
+
+static struct dentry *binderfs_create_dir(struct dentry *parent,
+ const char *name)
+{
+ struct dentry *dentry;
+ struct inode *new_inode, *parent_inode;
+ struct super_block *sb;
+
+ parent_inode = d_inode(parent);
+ inode_lock(parent_inode);
+
+ dentry = binderfs_create_dentry(parent, name);
+ if (IS_ERR(dentry))
+ goto out;
+
+ sb = parent_inode->i_sb;
+ new_inode = binderfs_make_inode(sb, S_IFDIR | 0755);
+ if (!new_inode) {
+ dput(dentry);
+ dentry = ERR_PTR(-ENOMEM);
+ goto out;
+ }
+
+ new_inode->i_fop = &simple_dir_operations;
+ new_inode->i_op = &simple_dir_inode_operations;
+
+ set_nlink(new_inode, 2);
+ d_instantiate(dentry, new_inode);
+ inc_nlink(parent_inode);
+ fsnotify_mkdir(parent_inode, dentry);
+
+out:
+ inode_unlock(parent_inode);
+ return dentry;
+}
+
+static int binder_features_show(struct seq_file *m, void *unused)
+{
+ bool *feature = m->private;
+
+ seq_printf(m, "%d\n", *feature);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(binder_features);
+
+static int init_binder_features(struct super_block *sb)
+{
+ struct dentry *dentry, *dir;
+
+ dir = binderfs_create_dir(sb->s_root, "features");
+ if (IS_ERR(dir))
+ return PTR_ERR(dir);
+
+ dentry = rust_binderfs_create_file(dir, "oneway_spam_detection",
+ &binder_features_fops,
+ &binder_features.oneway_spam_detection);
+ if (IS_ERR(dentry))
+ return PTR_ERR(dentry);
+
+ dentry = rust_binderfs_create_file(dir, "extended_error",
+ &binder_features_fops,
+ &binder_features.extended_error);
+ if (IS_ERR(dentry))
+ return PTR_ERR(dentry);
+
+ dentry = rust_binderfs_create_file(dir, "freeze_notification",
+ &binder_features_fops,
+ &binder_features.freeze_notification);
+ if (IS_ERR(dentry))
+ return PTR_ERR(dentry);
+
+ return 0;
+}
+
+static int init_binder_logs(struct super_block *sb)
+{
+ struct dentry *binder_logs_root_dir, *dentry, *proc_log_dir;
+ struct binderfs_info *info;
+ int ret = 0;
+
+ binder_logs_root_dir = binderfs_create_dir(sb->s_root,
+ "binder_logs");
+ if (IS_ERR(binder_logs_root_dir)) {
+ ret = PTR_ERR(binder_logs_root_dir);
+ goto out;
+ }
+
+ dentry = rust_binderfs_create_file(binder_logs_root_dir, "stats",
+ &rust_binder_stats_fops, NULL);
+ if (IS_ERR(dentry)) {
+ ret = PTR_ERR(dentry);
+ goto out;
+ }
+
+ dentry = rust_binderfs_create_file(binder_logs_root_dir, "state",
+ &rust_binder_state_fops, NULL);
+ if (IS_ERR(dentry)) {
+ ret = PTR_ERR(dentry);
+ goto out;
+ }
+
+ dentry = rust_binderfs_create_file(binder_logs_root_dir, "transactions",
+ &rust_binder_transactions_fops, NULL);
+ if (IS_ERR(dentry)) {
+ ret = PTR_ERR(dentry);
+ goto out;
+ }
+
+ proc_log_dir = binderfs_create_dir(binder_logs_root_dir, "proc");
+ if (IS_ERR(proc_log_dir)) {
+ ret = PTR_ERR(proc_log_dir);
+ goto out;
+ }
+ info = sb->s_fs_info;
+ info->proc_log_dir = proc_log_dir;
+
+out:
+ return ret;
+}
+
+static int binderfs_fill_super(struct super_block *sb, struct fs_context *fc)
+{
+ int ret;
+ struct binderfs_info *info;
+ struct binderfs_mount_opts *ctx = fc->fs_private;
+ struct inode *inode = NULL;
+ struct binderfs_device device_info = {};
+ const char *name;
+ size_t len;
+
+ sb->s_blocksize = PAGE_SIZE;
+ sb->s_blocksize_bits = PAGE_SHIFT;
+
+ /*
+ * The binderfs filesystem can be mounted by userns root in a
+ * non-initial userns. By default such mounts have the SB_I_NODEV flag
+ * set in s_iflags to prevent security issues where userns root can
+ * just create random device nodes via mknod() since it owns the
+ * filesystem mount. But binderfs does not allow to create any files
+ * including devices nodes. The only way to create binder devices nodes
+ * is through the binder-control device which userns root is explicitly
+ * allowed to do. So removing the SB_I_NODEV flag from s_iflags is both
+ * necessary and safe.
+ */
+ sb->s_iflags &= ~SB_I_NODEV;
+ sb->s_iflags |= SB_I_NOEXEC;
+ sb->s_magic = RUST_BINDERFS_SUPER_MAGIC;
+ sb->s_op = &binderfs_super_ops;
+ sb->s_time_gran = 1;
+
+ sb->s_fs_info = kzalloc(sizeof(struct binderfs_info), GFP_KERNEL);
+ if (!sb->s_fs_info)
+ return -ENOMEM;
+ info = sb->s_fs_info;
+
+ info->ipc_ns = get_ipc_ns(current->nsproxy->ipc_ns);
+
+ info->root_gid = make_kgid(sb->s_user_ns, 0);
+ if (!gid_valid(info->root_gid))
+ info->root_gid = GLOBAL_ROOT_GID;
+ info->root_uid = make_kuid(sb->s_user_ns, 0);
+ if (!uid_valid(info->root_uid))
+ info->root_uid = GLOBAL_ROOT_UID;
+ info->mount_opts.max = ctx->max;
+ info->mount_opts.stats_mode = ctx->stats_mode;
+
+ inode = new_inode(sb);
+ if (!inode)
+ return -ENOMEM;
+
+ inode->i_ino = FIRST_INODE;
+ inode->i_fop = &simple_dir_operations;
+ inode->i_mode = S_IFDIR | 0755;
+ simple_inode_init_ts(inode);
+ inode->i_op = &binderfs_dir_inode_operations;
+ set_nlink(inode, 2);
+
+ sb->s_root = d_make_root(inode);
+ if (!sb->s_root)
+ return -ENOMEM;
+
+ ret = binderfs_binder_ctl_create(sb);
+ if (ret)
+ return ret;
+
+ name = rust_binder_devices_param;
+ for (len = strcspn(name, ","); len > 0; len = strcspn(name, ",")) {
+ strscpy(device_info.name, name, len + 1);
+ ret = binderfs_binder_device_create(inode, NULL, &device_info);
+ if (ret)
+ return ret;
+ name += len;
+ if (*name == ',')
+ name++;
+ }
+
+ ret = init_binder_features(sb);
+ if (ret)
+ return ret;
+
+ if (info->mount_opts.stats_mode == binderfs_stats_mode_global)
+ return init_binder_logs(sb);
+
+ return 0;
+}
+
+static int binderfs_fs_context_get_tree(struct fs_context *fc)
+{
+ return get_tree_nodev(fc, binderfs_fill_super);
+}
+
+static void binderfs_fs_context_free(struct fs_context *fc)
+{
+ struct binderfs_mount_opts *ctx = fc->fs_private;
+
+ kfree(ctx);
+}
+
+static const struct fs_context_operations binderfs_fs_context_ops = {
+ .free = binderfs_fs_context_free,
+ .get_tree = binderfs_fs_context_get_tree,
+ .parse_param = binderfs_fs_context_parse_param,
+ .reconfigure = binderfs_fs_context_reconfigure,
+};
+
+static int binderfs_init_fs_context(struct fs_context *fc)
+{
+ struct binderfs_mount_opts *ctx;
+
+ ctx = kzalloc(sizeof(struct binderfs_mount_opts), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->max = BINDERFS_MAX_MINOR;
+ ctx->stats_mode = binderfs_stats_mode_unset;
+
+ fc->fs_private = ctx;
+ fc->ops = &binderfs_fs_context_ops;
+
+ return 0;
+}
+
+static void binderfs_kill_super(struct super_block *sb)
+{
+ struct binderfs_info *info = sb->s_fs_info;
+
+ /*
+ * During inode eviction struct binderfs_info is needed.
+ * So first wipe the super_block then free struct binderfs_info.
+ */
+ kill_litter_super(sb);
+
+ if (info && info->ipc_ns)
+ put_ipc_ns(info->ipc_ns);
+
+ kfree(info);
+}
+
+static struct file_system_type binder_fs_type = {
+ .name = "binder",
+ .init_fs_context = binderfs_init_fs_context,
+ .parameters = binderfs_fs_parameters,
+ .kill_sb = binderfs_kill_super,
+ .fs_flags = FS_USERNS_MOUNT,
+};
+
+int init_rust_binderfs(void)
+{
+ int ret;
+ const char *name;
+ size_t len;
+
+ /* Verify that the default binderfs device names are valid. */
+ name = rust_binder_devices_param;
+ for (len = strcspn(name, ","); len > 0; len = strcspn(name, ",")) {
+ if (len > BINDERFS_MAX_NAME)
+ return -E2BIG;
+ name += len;
+ if (*name == ',')
+ name++;
+ }
+
+ /* Allocate new major number for binderfs. */
+ ret = alloc_chrdev_region(&binderfs_dev, 0, BINDERFS_MAX_MINOR,
+ "rust_binder");
+ if (ret)
+ return ret;
+
+ ret = register_filesystem(&binder_fs_type);
+ if (ret) {
+ unregister_chrdev_region(binderfs_dev, BINDERFS_MAX_MINOR);
+ return ret;
+ }
+
+ return ret;
+}
diff --git a/drivers/android/binder/stats.rs b/drivers/android/binder/stats.rs
new file mode 100644
index 000000000000..a83ec111d2cb
--- /dev/null
+++ b/drivers/android/binder/stats.rs
@@ -0,0 +1,89 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2025 Google LLC.
+
+//! Keep track of statistics for binder_logs.
+
+use crate::defs::*;
+use core::sync::atomic::{AtomicU32, Ordering::Relaxed};
+use kernel::{ioctl::_IOC_NR, seq_file::SeqFile, seq_print};
+
+const BC_COUNT: usize = _IOC_NR(BC_REPLY_SG) as usize + 1;
+const BR_COUNT: usize = _IOC_NR(BR_TRANSACTION_PENDING_FROZEN) as usize + 1;
+
+pub(crate) static GLOBAL_STATS: BinderStats = BinderStats::new();
+
+pub(crate) struct BinderStats {
+ bc: [AtomicU32; BC_COUNT],
+ br: [AtomicU32; BR_COUNT],
+}
+
+impl BinderStats {
+ pub(crate) const fn new() -> Self {
+ #[expect(clippy::declare_interior_mutable_const)]
+ const ZERO: AtomicU32 = AtomicU32::new(0);
+
+ Self {
+ bc: [ZERO; BC_COUNT],
+ br: [ZERO; BR_COUNT],
+ }
+ }
+
+ pub(crate) fn inc_bc(&self, bc: u32) {
+ let idx = _IOC_NR(bc) as usize;
+ if let Some(bc_ref) = self.bc.get(idx) {
+ bc_ref.fetch_add(1, Relaxed);
+ }
+ }
+
+ pub(crate) fn inc_br(&self, br: u32) {
+ let idx = _IOC_NR(br) as usize;
+ if let Some(br_ref) = self.br.get(idx) {
+ br_ref.fetch_add(1, Relaxed);
+ }
+ }
+
+ pub(crate) fn debug_print(&self, prefix: &str, m: &SeqFile) {
+ for (i, cnt) in self.bc.iter().enumerate() {
+ let cnt = cnt.load(Relaxed);
+ if cnt > 0 {
+ seq_print!(m, "{}{}: {}\n", prefix, command_string(i), cnt);
+ }
+ }
+ for (i, cnt) in self.br.iter().enumerate() {
+ let cnt = cnt.load(Relaxed);
+ if cnt > 0 {
+ seq_print!(m, "{}{}: {}\n", prefix, return_string(i), cnt);
+ }
+ }
+ }
+}
+
+mod strings {
+ use core::str::from_utf8_unchecked;
+ use kernel::str::CStr;
+
+ extern "C" {
+ static binder_command_strings: [*const u8; super::BC_COUNT];
+ static binder_return_strings: [*const u8; super::BR_COUNT];
+ }
+
+ pub(super) fn command_string(i: usize) -> &'static str {
+ // SAFETY: Accessing `binder_command_strings` is always safe.
+ let c_str_ptr = unsafe { binder_command_strings[i] };
+ // SAFETY: The `binder_command_strings` array only contains nul-terminated strings.
+ let bytes = unsafe { CStr::from_char_ptr(c_str_ptr) }.as_bytes();
+ // SAFETY: The `binder_command_strings` array only contains strings with ascii-chars.
+ unsafe { from_utf8_unchecked(bytes) }
+ }
+
+ pub(super) fn return_string(i: usize) -> &'static str {
+ // SAFETY: Accessing `binder_return_strings` is always safe.
+ let c_str_ptr = unsafe { binder_return_strings[i] };
+ // SAFETY: The `binder_command_strings` array only contains nul-terminated strings.
+ let bytes = unsafe { CStr::from_char_ptr(c_str_ptr) }.as_bytes();
+ // SAFETY: The `binder_command_strings` array only contains strings with ascii-chars.
+ unsafe { from_utf8_unchecked(bytes) }
+ }
+}
+use strings::{command_string, return_string};
diff --git a/drivers/android/binder/thread.rs b/drivers/android/binder/thread.rs
new file mode 100644
index 000000000000..7e34ccd394f8
--- /dev/null
+++ b/drivers/android/binder/thread.rs
@@ -0,0 +1,1596 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2025 Google LLC.
+
+//! This module defines the `Thread` type, which represents a userspace thread that is using
+//! binder.
+//!
+//! The `Process` object stores all of the threads in an rb tree.
+
+use kernel::{
+ bindings,
+ fs::{File, LocalFile},
+ list::{AtomicTracker, List, ListArc, ListLinks, TryNewListArc},
+ prelude::*,
+ security,
+ seq_file::SeqFile,
+ seq_print,
+ sync::poll::{PollCondVar, PollTable},
+ sync::{Arc, SpinLock},
+ task::Task,
+ types::ARef,
+ uaccess::UserSlice,
+ uapi,
+};
+
+use crate::{
+ allocation::{Allocation, AllocationView, BinderObject, BinderObjectRef, NewAllocation},
+ defs::*,
+ error::BinderResult,
+ process::{GetWorkOrRegister, Process},
+ ptr_align,
+ stats::GLOBAL_STATS,
+ transaction::Transaction,
+ BinderReturnWriter, DArc, DLArc, DTRWrap, DeliverCode, DeliverToRead,
+};
+
+use core::{
+ mem::size_of,
+ sync::atomic::{AtomicU32, Ordering},
+};
+
+/// Stores the layout of the scatter-gather entries. This is used during the `translate_objects`
+/// call and is discarded when it returns.
+struct ScatterGatherState {
+ /// A struct that tracks the amount of unused buffer space.
+ unused_buffer_space: UnusedBufferSpace,
+ /// Scatter-gather entries to copy.
+ sg_entries: KVec<ScatterGatherEntry>,
+ /// Indexes into `sg_entries` corresponding to the last binder_buffer_object that
+ /// was processed and all of its ancestors. The array is in sorted order.
+ ancestors: KVec<usize>,
+}
+
+/// This entry specifies an additional buffer that should be copied using the scatter-gather
+/// mechanism.
+struct ScatterGatherEntry {
+ /// The index in the offset array of the BINDER_TYPE_PTR that this entry originates from.
+ obj_index: usize,
+ /// Offset in target buffer.
+ offset: usize,
+ /// User address in source buffer.
+ sender_uaddr: usize,
+ /// Number of bytes to copy.
+ length: usize,
+ /// The minimum offset of the next fixup in this buffer.
+ fixup_min_offset: usize,
+ /// The offsets within this buffer that contain pointers which should be translated.
+ pointer_fixups: KVec<PointerFixupEntry>,
+}
+
+/// This entry specifies that a fixup should happen at `target_offset` of the
+/// buffer. If `skip` is nonzero, then the fixup is a `binder_fd_array_object`
+/// and is applied later. Otherwise if `skip` is zero, then the size of the
+/// fixup is `sizeof::<u64>()` and `pointer_value` is written to the buffer.
+struct PointerFixupEntry {
+ /// The number of bytes to skip, or zero for a `binder_buffer_object` fixup.
+ skip: usize,
+ /// The translated pointer to write when `skip` is zero.
+ pointer_value: u64,
+ /// The offset at which the value should be written. The offset is relative
+ /// to the original buffer.
+ target_offset: usize,
+}
+
+/// Return type of `apply_and_validate_fixup_in_parent`.
+struct ParentFixupInfo {
+ /// The index of the parent buffer in `sg_entries`.
+ parent_sg_index: usize,
+ /// The number of ancestors of the buffer.
+ ///
+ /// The buffer is considered an ancestor of itself, so this is always at
+ /// least one.
+ num_ancestors: usize,
+ /// New value of `fixup_min_offset` if this fixup is applied.
+ new_min_offset: usize,
+ /// The offset of the fixup in the target buffer.
+ target_offset: usize,
+}
+
+impl ScatterGatherState {
+ /// Called when a `binder_buffer_object` or `binder_fd_array_object` tries
+ /// to access a region in its parent buffer. These accesses have various
+ /// restrictions, which this method verifies.
+ ///
+ /// The `parent_offset` and `length` arguments describe the offset and
+ /// length of the access in the parent buffer.
+ ///
+ /// # Detailed restrictions
+ ///
+ /// Obviously the fixup must be in-bounds for the parent buffer.
+ ///
+ /// For safety reasons, we only allow fixups inside a buffer to happen
+ /// at increasing offsets; additionally, we only allow fixup on the last
+ /// buffer object that was verified, or one of its parents.
+ ///
+ /// Example of what is allowed:
+ ///
+ /// A
+ /// B (parent = A, offset = 0)
+ /// C (parent = A, offset = 16)
+ /// D (parent = C, offset = 0)
+ /// E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset)
+ ///
+ /// Examples of what is not allowed:
+ ///
+ /// Decreasing offsets within the same parent:
+ /// A
+ /// C (parent = A, offset = 16)
+ /// B (parent = A, offset = 0) // decreasing offset within A
+ ///
+ /// Arcerring to a parent that wasn't the last object or any of its parents:
+ /// A
+ /// B (parent = A, offset = 0)
+ /// C (parent = A, offset = 0)
+ /// C (parent = A, offset = 16)
+ /// D (parent = B, offset = 0) // B is not A or any of A's parents
+ fn validate_parent_fixup(
+ &self,
+ parent: usize,
+ parent_offset: usize,
+ length: usize,
+ ) -> Result<ParentFixupInfo> {
+ // Using `position` would also be correct, but `rposition` avoids
+ // quadratic running times.
+ let ancestors_i = self
+ .ancestors
+ .iter()
+ .copied()
+ .rposition(|sg_idx| self.sg_entries[sg_idx].obj_index == parent)
+ .ok_or(EINVAL)?;
+ let sg_idx = self.ancestors[ancestors_i];
+ let sg_entry = match self.sg_entries.get(sg_idx) {
+ Some(sg_entry) => sg_entry,
+ None => {
+ pr_err!(
+ "self.ancestors[{}] is {}, but self.sg_entries.len() is {}",
+ ancestors_i,
+ sg_idx,
+ self.sg_entries.len()
+ );
+ return Err(EINVAL);
+ }
+ };
+ if sg_entry.fixup_min_offset > parent_offset {
+ pr_warn!(
+ "validate_parent_fixup: fixup_min_offset={}, parent_offset={}",
+ sg_entry.fixup_min_offset,
+ parent_offset
+ );
+ return Err(EINVAL);
+ }
+ let new_min_offset = parent_offset.checked_add(length).ok_or(EINVAL)?;
+ if new_min_offset > sg_entry.length {
+ pr_warn!(
+ "validate_parent_fixup: new_min_offset={}, sg_entry.length={}",
+ new_min_offset,
+ sg_entry.length
+ );
+ return Err(EINVAL);
+ }
+ let target_offset = sg_entry.offset.checked_add(parent_offset).ok_or(EINVAL)?;
+ // The `ancestors_i + 1` operation can't overflow since the output of the addition is at
+ // most `self.ancestors.len()`, which also fits in a usize.
+ Ok(ParentFixupInfo {
+ parent_sg_index: sg_idx,
+ num_ancestors: ancestors_i + 1,
+ new_min_offset,
+ target_offset,
+ })
+ }
+}
+
+/// Keeps track of how much unused buffer space is left. The initial amount is the number of bytes
+/// requested by the user using the `buffers_size` field of `binder_transaction_data_sg`. Each time
+/// we translate an object of type `BINDER_TYPE_PTR`, some of the unused buffer space is consumed.
+struct UnusedBufferSpace {
+ /// The start of the remaining space.
+ offset: usize,
+ /// The end of the remaining space.
+ limit: usize,
+}
+impl UnusedBufferSpace {
+ /// Claim the next `size` bytes from the unused buffer space. The offset for the claimed chunk
+ /// into the buffer is returned.
+ fn claim_next(&mut self, size: usize) -> Result<usize> {
+ // We require every chunk to be aligned.
+ let size = ptr_align(size).ok_or(EINVAL)?;
+ let new_offset = self.offset.checked_add(size).ok_or(EINVAL)?;
+
+ if new_offset <= self.limit {
+ let offset = self.offset;
+ self.offset = new_offset;
+ Ok(offset)
+ } else {
+ Err(EINVAL)
+ }
+ }
+}
+
+pub(crate) enum PushWorkRes {
+ Ok,
+ FailedDead(DLArc<dyn DeliverToRead>),
+}
+
+impl PushWorkRes {
+ fn is_ok(&self) -> bool {
+ match self {
+ PushWorkRes::Ok => true,
+ PushWorkRes::FailedDead(_) => false,
+ }
+ }
+}
+
+/// The fields of `Thread` protected by the spinlock.
+struct InnerThread {
+ /// Determines the looper state of the thread. It is a bit-wise combination of the constants
+ /// prefixed with `LOOPER_`.
+ looper_flags: u32,
+
+ /// Determines whether the looper should return.
+ looper_need_return: bool,
+
+ /// Determines if thread is dead.
+ is_dead: bool,
+
+ /// Work item used to deliver error codes to the thread that started a transaction. Stored here
+ /// so that it can be reused.
+ reply_work: DArc<ThreadError>,
+
+ /// Work item used to deliver error codes to the current thread. Stored here so that it can be
+ /// reused.
+ return_work: DArc<ThreadError>,
+
+ /// Determines whether the work list below should be processed. When set to false, `work_list`
+ /// is treated as if it were empty.
+ process_work_list: bool,
+ /// List of work items to deliver to userspace.
+ work_list: List<DTRWrap<dyn DeliverToRead>>,
+ current_transaction: Option<DArc<Transaction>>,
+
+ /// Extended error information for this thread.
+ extended_error: ExtendedError,
+}
+
+const LOOPER_REGISTERED: u32 = 0x01;
+const LOOPER_ENTERED: u32 = 0x02;
+const LOOPER_EXITED: u32 = 0x04;
+const LOOPER_INVALID: u32 = 0x08;
+const LOOPER_WAITING: u32 = 0x10;
+const LOOPER_WAITING_PROC: u32 = 0x20;
+const LOOPER_POLL: u32 = 0x40;
+
+impl InnerThread {
+ fn new() -> Result<Self> {
+ fn next_err_id() -> u32 {
+ static EE_ID: AtomicU32 = AtomicU32::new(0);
+ EE_ID.fetch_add(1, Ordering::Relaxed)
+ }
+
+ Ok(Self {
+ looper_flags: 0,
+ looper_need_return: false,
+ is_dead: false,
+ process_work_list: false,
+ reply_work: ThreadError::try_new()?,
+ return_work: ThreadError::try_new()?,
+ work_list: List::new(),
+ current_transaction: None,
+ extended_error: ExtendedError::new(next_err_id(), BR_OK, 0),
+ })
+ }
+
+ fn pop_work(&mut self) -> Option<DLArc<dyn DeliverToRead>> {
+ if !self.process_work_list {
+ return None;
+ }
+
+ let ret = self.work_list.pop_front();
+ self.process_work_list = !self.work_list.is_empty();
+ ret
+ }
+
+ fn push_work(&mut self, work: DLArc<dyn DeliverToRead>) -> PushWorkRes {
+ if self.is_dead {
+ PushWorkRes::FailedDead(work)
+ } else {
+ self.work_list.push_back(work);
+ self.process_work_list = true;
+ PushWorkRes::Ok
+ }
+ }
+
+ fn push_reply_work(&mut self, code: u32) {
+ if let Ok(work) = ListArc::try_from_arc(self.reply_work.clone()) {
+ work.set_error_code(code);
+ self.push_work(work);
+ } else {
+ pr_warn!("Thread reply work is already in use.");
+ }
+ }
+
+ fn push_return_work(&mut self, reply: u32) {
+ if let Ok(work) = ListArc::try_from_arc(self.return_work.clone()) {
+ work.set_error_code(reply);
+ self.push_work(work);
+ } else {
+ pr_warn!("Thread return work is already in use.");
+ }
+ }
+
+ /// Used to push work items that do not need to be processed immediately and can wait until the
+ /// thread gets another work item.
+ fn push_work_deferred(&mut self, work: DLArc<dyn DeliverToRead>) {
+ self.work_list.push_back(work);
+ }
+
+ /// Fetches the transaction this thread can reply to. If the thread has a pending transaction
+ /// (that it could respond to) but it has also issued a transaction, it must first wait for the
+ /// previously-issued transaction to complete.
+ ///
+ /// The `thread` parameter should be the thread containing this `ThreadInner`.
+ fn pop_transaction_to_reply(&mut self, thread: &Thread) -> Result<DArc<Transaction>> {
+ let transaction = self.current_transaction.take().ok_or(EINVAL)?;
+ if core::ptr::eq(thread, transaction.from.as_ref()) {
+ self.current_transaction = Some(transaction);
+ return Err(EINVAL);
+ }
+ // Find a new current transaction for this thread.
+ self.current_transaction = transaction.find_from(thread).cloned();
+ Ok(transaction)
+ }
+
+ fn pop_transaction_replied(&mut self, transaction: &DArc<Transaction>) -> bool {
+ match self.current_transaction.take() {
+ None => false,
+ Some(old) => {
+ if !Arc::ptr_eq(transaction, &old) {
+ self.current_transaction = Some(old);
+ return false;
+ }
+ self.current_transaction = old.clone_next();
+ true
+ }
+ }
+ }
+
+ fn looper_enter(&mut self) {
+ self.looper_flags |= LOOPER_ENTERED;
+ if self.looper_flags & LOOPER_REGISTERED != 0 {
+ self.looper_flags |= LOOPER_INVALID;
+ }
+ }
+
+ fn looper_register(&mut self, valid: bool) {
+ self.looper_flags |= LOOPER_REGISTERED;
+ if !valid || self.looper_flags & LOOPER_ENTERED != 0 {
+ self.looper_flags |= LOOPER_INVALID;
+ }
+ }
+
+ fn looper_exit(&mut self) {
+ self.looper_flags |= LOOPER_EXITED;
+ }
+
+ /// Determines whether the thread is part of a pool, i.e., if it is a looper.
+ fn is_looper(&self) -> bool {
+ self.looper_flags & (LOOPER_ENTERED | LOOPER_REGISTERED) != 0
+ }
+
+ /// Determines whether the thread should attempt to fetch work items from the process queue.
+ /// This is generally case when the thread is registered as a looper and not part of a
+ /// transaction stack. But if there is local work, we want to return to userspace before we
+ /// deliver any remote work.
+ fn should_use_process_work_queue(&self) -> bool {
+ self.current_transaction.is_none() && !self.process_work_list && self.is_looper()
+ }
+
+ fn poll(&mut self) -> u32 {
+ self.looper_flags |= LOOPER_POLL;
+ if self.process_work_list || self.looper_need_return {
+ bindings::POLLIN
+ } else {
+ 0
+ }
+ }
+}
+
+/// This represents a thread that's used with binder.
+#[pin_data]
+pub(crate) struct Thread {
+ pub(crate) id: i32,
+ pub(crate) process: Arc<Process>,
+ pub(crate) task: ARef<Task>,
+ #[pin]
+ inner: SpinLock<InnerThread>,
+ #[pin]
+ work_condvar: PollCondVar,
+ /// Used to insert this thread into the process' `ready_threads` list.
+ ///
+ /// INVARIANT: May never be used for any other list than the `self.process.ready_threads`.
+ #[pin]
+ links: ListLinks,
+ #[pin]
+ links_track: AtomicTracker,
+}
+
+kernel::list::impl_list_arc_safe! {
+ impl ListArcSafe<0> for Thread {
+ tracked_by links_track: AtomicTracker;
+ }
+}
+kernel::list::impl_list_item! {
+ impl ListItem<0> for Thread {
+ using ListLinks { self.links };
+ }
+}
+
+impl Thread {
+ pub(crate) fn new(id: i32, process: Arc<Process>) -> Result<Arc<Self>> {
+ let inner = InnerThread::new()?;
+
+ Arc::pin_init(
+ try_pin_init!(Thread {
+ id,
+ process,
+ task: ARef::from(&**kernel::current!()),
+ inner <- kernel::new_spinlock!(inner, "Thread::inner"),
+ work_condvar <- kernel::new_poll_condvar!("Thread::work_condvar"),
+ links <- ListLinks::new(),
+ links_track <- AtomicTracker::new(),
+ }),
+ GFP_KERNEL,
+ )
+ }
+
+ #[inline(never)]
+ pub(crate) fn debug_print(self: &Arc<Self>, m: &SeqFile, print_all: bool) -> Result<()> {
+ let inner = self.inner.lock();
+
+ if print_all || inner.current_transaction.is_some() || !inner.work_list.is_empty() {
+ seq_print!(
+ m,
+ " thread {}: l {:02x} need_return {}\n",
+ self.id,
+ inner.looper_flags,
+ inner.looper_need_return,
+ );
+ }
+
+ let mut t_opt = inner.current_transaction.as_ref();
+ while let Some(t) = t_opt {
+ if Arc::ptr_eq(&t.from, self) {
+ t.debug_print_inner(m, " outgoing transaction ");
+ t_opt = t.from_parent.as_ref();
+ } else if Arc::ptr_eq(&t.to, &self.process) {
+ t.debug_print_inner(m, " incoming transaction ");
+ t_opt = t.find_from(self);
+ } else {
+ t.debug_print_inner(m, " bad transaction ");
+ t_opt = None;
+ }
+ }
+
+ for work in &inner.work_list {
+ work.debug_print(m, " ", " pending transaction ")?;
+ }
+ Ok(())
+ }
+
+ pub(crate) fn get_extended_error(&self, data: UserSlice) -> Result {
+ let mut writer = data.writer();
+ let ee = self.inner.lock().extended_error;
+ writer.write(&ee)?;
+ Ok(())
+ }
+
+ pub(crate) fn set_current_transaction(&self, transaction: DArc<Transaction>) {
+ self.inner.lock().current_transaction = Some(transaction);
+ }
+
+ pub(crate) fn has_current_transaction(&self) -> bool {
+ self.inner.lock().current_transaction.is_some()
+ }
+
+ /// Attempts to fetch a work item from the thread-local queue. The behaviour if the queue is
+ /// empty depends on `wait`: if it is true, the function waits for some work to be queued (or a
+ /// signal); otherwise it returns indicating that none is available.
+ fn get_work_local(self: &Arc<Self>, wait: bool) -> Result<Option<DLArc<dyn DeliverToRead>>> {
+ {
+ let mut inner = self.inner.lock();
+ if inner.looper_need_return {
+ return Ok(inner.pop_work());
+ }
+ }
+
+ // Try once if the caller does not want to wait.
+ if !wait {
+ return self.inner.lock().pop_work().ok_or(EAGAIN).map(Some);
+ }
+
+ // Loop waiting only on the local queue (i.e., not registering with the process queue).
+ let mut inner = self.inner.lock();
+ loop {
+ if let Some(work) = inner.pop_work() {
+ return Ok(Some(work));
+ }
+
+ inner.looper_flags |= LOOPER_WAITING;
+ let signal_pending = self.work_condvar.wait_interruptible_freezable(&mut inner);
+ inner.looper_flags &= !LOOPER_WAITING;
+
+ if signal_pending {
+ return Err(EINTR);
+ }
+ if inner.looper_need_return {
+ return Ok(None);
+ }
+ }
+ }
+
+ /// Attempts to fetch a work item from the thread-local queue, falling back to the process-wide
+ /// queue if none is available locally.
+ ///
+ /// This must only be called when the thread is not participating in a transaction chain. If it
+ /// is, the local version (`get_work_local`) should be used instead.
+ fn get_work(self: &Arc<Self>, wait: bool) -> Result<Option<DLArc<dyn DeliverToRead>>> {
+ // Try to get work from the thread's work queue, using only a local lock.
+ {
+ let mut inner = self.inner.lock();
+ if let Some(work) = inner.pop_work() {
+ return Ok(Some(work));
+ }
+ if inner.looper_need_return {
+ drop(inner);
+ return Ok(self.process.get_work());
+ }
+ }
+
+ // If the caller doesn't want to wait, try to grab work from the process queue.
+ //
+ // We know nothing will have been queued directly to the thread queue because it is not in
+ // a transaction and it is not in the process' ready list.
+ if !wait {
+ return self.process.get_work().ok_or(EAGAIN).map(Some);
+ }
+
+ // Get work from the process queue. If none is available, atomically register as ready.
+ let reg = match self.process.get_work_or_register(self) {
+ GetWorkOrRegister::Work(work) => return Ok(Some(work)),
+ GetWorkOrRegister::Register(reg) => reg,
+ };
+
+ let mut inner = self.inner.lock();
+ loop {
+ if let Some(work) = inner.pop_work() {
+ return Ok(Some(work));
+ }
+
+ inner.looper_flags |= LOOPER_WAITING | LOOPER_WAITING_PROC;
+ let signal_pending = self.work_condvar.wait_interruptible_freezable(&mut inner);
+ inner.looper_flags &= !(LOOPER_WAITING | LOOPER_WAITING_PROC);
+
+ if signal_pending || inner.looper_need_return {
+ // We need to return now. We need to pull the thread off the list of ready threads
+ // (by dropping `reg`), then check the state again after it's off the list to
+ // ensure that something was not queued in the meantime. If something has been
+ // queued, we just return it (instead of the error).
+ drop(inner);
+ drop(reg);
+
+ let res = match self.inner.lock().pop_work() {
+ Some(work) => Ok(Some(work)),
+ None if signal_pending => Err(EINTR),
+ None => Ok(None),
+ };
+ return res;
+ }
+ }
+ }
+
+ /// Push the provided work item to be delivered to user space via this thread.
+ ///
+ /// Returns whether the item was successfully pushed. This can only fail if the thread is dead.
+ pub(crate) fn push_work(&self, work: DLArc<dyn DeliverToRead>) -> PushWorkRes {
+ let sync = work.should_sync_wakeup();
+
+ let res = self.inner.lock().push_work(work);
+
+ if res.is_ok() {
+ if sync {
+ self.work_condvar.notify_sync();
+ } else {
+ self.work_condvar.notify_one();
+ }
+ }
+
+ res
+ }
+
+ /// Attempts to push to given work item to the thread if it's a looper thread (i.e., if it's
+ /// part of a thread pool) and is alive. Otherwise, push the work item to the process instead.
+ pub(crate) fn push_work_if_looper(&self, work: DLArc<dyn DeliverToRead>) -> BinderResult {
+ let mut inner = self.inner.lock();
+ if inner.is_looper() && !inner.is_dead {
+ inner.push_work(work);
+ Ok(())
+ } else {
+ drop(inner);
+ self.process.push_work(work)
+ }
+ }
+
+ pub(crate) fn push_work_deferred(&self, work: DLArc<dyn DeliverToRead>) {
+ self.inner.lock().push_work_deferred(work);
+ }
+
+ pub(crate) fn push_return_work(&self, reply: u32) {
+ self.inner.lock().push_return_work(reply);
+ }
+
+ fn translate_object(
+ &self,
+ obj_index: usize,
+ offset: usize,
+ object: BinderObjectRef<'_>,
+ view: &mut AllocationView<'_>,
+ allow_fds: bool,
+ sg_state: &mut ScatterGatherState,
+ ) -> BinderResult {
+ match object {
+ BinderObjectRef::Binder(obj) => {
+ let strong = obj.hdr.type_ == BINDER_TYPE_BINDER;
+ // SAFETY: `binder` is a `binder_uintptr_t`; any bit pattern is a valid
+ // representation.
+ let ptr = unsafe { obj.__bindgen_anon_1.binder } as _;
+ let cookie = obj.cookie as _;
+ let flags = obj.flags as _;
+ let node = self
+ .process
+ .as_arc_borrow()
+ .get_node(ptr, cookie, flags, strong, self)?;
+ security::binder_transfer_binder(&self.process.cred, &view.alloc.process.cred)?;
+ view.transfer_binder_object(offset, obj, strong, node)?;
+ }
+ BinderObjectRef::Handle(obj) => {
+ let strong = obj.hdr.type_ == BINDER_TYPE_HANDLE;
+ // SAFETY: `handle` is a `u32`; any bit pattern is a valid representation.
+ let handle = unsafe { obj.__bindgen_anon_1.handle } as _;
+ let node = self.process.get_node_from_handle(handle, strong)?;
+ security::binder_transfer_binder(&self.process.cred, &view.alloc.process.cred)?;
+ view.transfer_binder_object(offset, obj, strong, node)?;
+ }
+ BinderObjectRef::Fd(obj) => {
+ if !allow_fds {
+ return Err(EPERM.into());
+ }
+
+ // SAFETY: `fd` is a `u32`; any bit pattern is a valid representation.
+ let fd = unsafe { obj.__bindgen_anon_1.fd };
+ let file = LocalFile::fget(fd)?;
+ // SAFETY: The binder driver never calls `fdget_pos` and this code runs from an
+ // ioctl, so there are no active calls to `fdget_pos` on this thread.
+ let file = unsafe { LocalFile::assume_no_fdget_pos(file) };
+ security::binder_transfer_file(
+ &self.process.cred,
+ &view.alloc.process.cred,
+ &file,
+ )?;
+
+ let mut obj_write = BinderFdObject::default();
+ obj_write.hdr.type_ = BINDER_TYPE_FD;
+ // This will be overwritten with the actual fd when the transaction is received.
+ obj_write.__bindgen_anon_1.fd = u32::MAX;
+ obj_write.cookie = obj.cookie;
+ view.write::<BinderFdObject>(offset, &obj_write)?;
+
+ const FD_FIELD_OFFSET: usize =
+ core::mem::offset_of!(uapi::binder_fd_object, __bindgen_anon_1.fd);
+
+ let field_offset = offset + FD_FIELD_OFFSET;
+
+ view.alloc.info_add_fd(file, field_offset, false)?;
+ }
+ BinderObjectRef::Ptr(obj) => {
+ let obj_length = obj.length.try_into().map_err(|_| EINVAL)?;
+ let alloc_offset = match sg_state.unused_buffer_space.claim_next(obj_length) {
+ Ok(alloc_offset) => alloc_offset,
+ Err(err) => {
+ pr_warn!(
+ "Failed to claim space for a BINDER_TYPE_PTR. (offset: {}, limit: {}, size: {})",
+ sg_state.unused_buffer_space.offset,
+ sg_state.unused_buffer_space.limit,
+ obj_length,
+ );
+ return Err(err.into());
+ }
+ };
+
+ let sg_state_idx = sg_state.sg_entries.len();
+ sg_state.sg_entries.push(
+ ScatterGatherEntry {
+ obj_index,
+ offset: alloc_offset,
+ sender_uaddr: obj.buffer as _,
+ length: obj_length,
+ pointer_fixups: KVec::new(),
+ fixup_min_offset: 0,
+ },
+ GFP_KERNEL,
+ )?;
+
+ let buffer_ptr_in_user_space = (view.alloc.ptr + alloc_offset) as u64;
+
+ if obj.flags & uapi::BINDER_BUFFER_FLAG_HAS_PARENT == 0 {
+ sg_state.ancestors.clear();
+ sg_state.ancestors.push(sg_state_idx, GFP_KERNEL)?;
+ } else {
+ // Another buffer also has a pointer to this buffer, and we need to fixup that
+ // pointer too.
+
+ let parent_index = usize::try_from(obj.parent).map_err(|_| EINVAL)?;
+ let parent_offset = usize::try_from(obj.parent_offset).map_err(|_| EINVAL)?;
+
+ let info = sg_state.validate_parent_fixup(
+ parent_index,
+ parent_offset,
+ size_of::<u64>(),
+ )?;
+
+ sg_state.ancestors.truncate(info.num_ancestors);
+ sg_state.ancestors.push(sg_state_idx, GFP_KERNEL)?;
+
+ let parent_entry = match sg_state.sg_entries.get_mut(info.parent_sg_index) {
+ Some(parent_entry) => parent_entry,
+ None => {
+ pr_err!(
+ "validate_parent_fixup returned index out of bounds for sg.entries"
+ );
+ return Err(EINVAL.into());
+ }
+ };
+
+ parent_entry.fixup_min_offset = info.new_min_offset;
+ parent_entry.pointer_fixups.push(
+ PointerFixupEntry {
+ skip: 0,
+ pointer_value: buffer_ptr_in_user_space,
+ target_offset: info.target_offset,
+ },
+ GFP_KERNEL,
+ )?;
+ }
+
+ let mut obj_write = BinderBufferObject::default();
+ obj_write.hdr.type_ = BINDER_TYPE_PTR;
+ obj_write.flags = obj.flags;
+ obj_write.buffer = buffer_ptr_in_user_space;
+ obj_write.length = obj.length;
+ obj_write.parent = obj.parent;
+ obj_write.parent_offset = obj.parent_offset;
+ view.write::<BinderBufferObject>(offset, &obj_write)?;
+ }
+ BinderObjectRef::Fda(obj) => {
+ if !allow_fds {
+ return Err(EPERM.into());
+ }
+ let parent_index = usize::try_from(obj.parent).map_err(|_| EINVAL)?;
+ let parent_offset = usize::try_from(obj.parent_offset).map_err(|_| EINVAL)?;
+ let num_fds = usize::try_from(obj.num_fds).map_err(|_| EINVAL)?;
+ let fds_len = num_fds.checked_mul(size_of::<u32>()).ok_or(EINVAL)?;
+
+ let info = sg_state.validate_parent_fixup(parent_index, parent_offset, fds_len)?;
+ view.alloc.info_add_fd_reserve(num_fds)?;
+
+ sg_state.ancestors.truncate(info.num_ancestors);
+ let parent_entry = match sg_state.sg_entries.get_mut(info.parent_sg_index) {
+ Some(parent_entry) => parent_entry,
+ None => {
+ pr_err!(
+ "validate_parent_fixup returned index out of bounds for sg.entries"
+ );
+ return Err(EINVAL.into());
+ }
+ };
+
+ parent_entry.fixup_min_offset = info.new_min_offset;
+ parent_entry
+ .pointer_fixups
+ .push(
+ PointerFixupEntry {
+ skip: fds_len,
+ pointer_value: 0,
+ target_offset: info.target_offset,
+ },
+ GFP_KERNEL,
+ )
+ .map_err(|_| ENOMEM)?;
+
+ let fda_uaddr = parent_entry
+ .sender_uaddr
+ .checked_add(parent_offset)
+ .ok_or(EINVAL)?;
+ let mut fda_bytes = KVec::new();
+ UserSlice::new(UserPtr::from_addr(fda_uaddr as _), fds_len)
+ .read_all(&mut fda_bytes, GFP_KERNEL)?;
+
+ if fds_len != fda_bytes.len() {
+ pr_err!("UserSlice::read_all returned wrong length in BINDER_TYPE_FDA");
+ return Err(EINVAL.into());
+ }
+
+ for i in (0..fds_len).step_by(size_of::<u32>()) {
+ let fd = {
+ let mut fd_bytes = [0u8; size_of::<u32>()];
+ fd_bytes.copy_from_slice(&fda_bytes[i..i + size_of::<u32>()]);
+ u32::from_ne_bytes(fd_bytes)
+ };
+
+ let file = LocalFile::fget(fd)?;
+ // SAFETY: The binder driver never calls `fdget_pos` and this code runs from an
+ // ioctl, so there are no active calls to `fdget_pos` on this thread.
+ let file = unsafe { LocalFile::assume_no_fdget_pos(file) };
+ security::binder_transfer_file(
+ &self.process.cred,
+ &view.alloc.process.cred,
+ &file,
+ )?;
+
+ // The `validate_parent_fixup` call ensuers that this addition will not
+ // overflow.
+ view.alloc.info_add_fd(file, info.target_offset + i, true)?;
+ }
+ drop(fda_bytes);
+
+ let mut obj_write = BinderFdArrayObject::default();
+ obj_write.hdr.type_ = BINDER_TYPE_FDA;
+ obj_write.num_fds = obj.num_fds;
+ obj_write.parent = obj.parent;
+ obj_write.parent_offset = obj.parent_offset;
+ view.write::<BinderFdArrayObject>(offset, &obj_write)?;
+ }
+ }
+ Ok(())
+ }
+
+ fn apply_sg(&self, alloc: &mut Allocation, sg_state: &mut ScatterGatherState) -> BinderResult {
+ for sg_entry in &mut sg_state.sg_entries {
+ let mut end_of_previous_fixup = sg_entry.offset;
+ let offset_end = sg_entry.offset.checked_add(sg_entry.length).ok_or(EINVAL)?;
+
+ let mut reader =
+ UserSlice::new(UserPtr::from_addr(sg_entry.sender_uaddr), sg_entry.length).reader();
+ for fixup in &mut sg_entry.pointer_fixups {
+ let fixup_len = if fixup.skip == 0 {
+ size_of::<u64>()
+ } else {
+ fixup.skip
+ };
+
+ let target_offset_end = fixup.target_offset.checked_add(fixup_len).ok_or(EINVAL)?;
+ if fixup.target_offset < end_of_previous_fixup || offset_end < target_offset_end {
+ pr_warn!(
+ "Fixups oob {} {} {} {}",
+ fixup.target_offset,
+ end_of_previous_fixup,
+ offset_end,
+ target_offset_end
+ );
+ return Err(EINVAL.into());
+ }
+
+ let copy_off = end_of_previous_fixup;
+ let copy_len = fixup.target_offset - end_of_previous_fixup;
+ if let Err(err) = alloc.copy_into(&mut reader, copy_off, copy_len) {
+ pr_warn!("Failed copying into alloc: {:?}", err);
+ return Err(err.into());
+ }
+ if fixup.skip == 0 {
+ let res = alloc.write::<u64>(fixup.target_offset, &fixup.pointer_value);
+ if let Err(err) = res {
+ pr_warn!("Failed copying ptr into alloc: {:?}", err);
+ return Err(err.into());
+ }
+ }
+ if let Err(err) = reader.skip(fixup_len) {
+ pr_warn!("Failed skipping {} from reader: {:?}", fixup_len, err);
+ return Err(err.into());
+ }
+ end_of_previous_fixup = target_offset_end;
+ }
+ let copy_off = end_of_previous_fixup;
+ let copy_len = offset_end - end_of_previous_fixup;
+ if let Err(err) = alloc.copy_into(&mut reader, copy_off, copy_len) {
+ pr_warn!("Failed copying remainder into alloc: {:?}", err);
+ return Err(err.into());
+ }
+ }
+ Ok(())
+ }
+
+ /// This method copies the payload of a transaction into the target process.
+ ///
+ /// The resulting payload will have several different components, which will be stored next to
+ /// each other in the allocation. Furthermore, various objects can be embedded in the payload,
+ /// and those objects have to be translated so that they make sense to the target transaction.
+ pub(crate) fn copy_transaction_data(
+ &self,
+ to_process: Arc<Process>,
+ tr: &BinderTransactionDataSg,
+ debug_id: usize,
+ allow_fds: bool,
+ txn_security_ctx_offset: Option<&mut usize>,
+ ) -> BinderResult<NewAllocation> {
+ let trd = &tr.transaction_data;
+ let is_oneway = trd.flags & TF_ONE_WAY != 0;
+ let mut secctx = if let Some(offset) = txn_security_ctx_offset {
+ let secid = self.process.cred.get_secid();
+ let ctx = match security::SecurityCtx::from_secid(secid) {
+ Ok(ctx) => ctx,
+ Err(err) => {
+ pr_warn!("Failed to get security ctx for id {}: {:?}", secid, err);
+ return Err(err.into());
+ }
+ };
+ Some((offset, ctx))
+ } else {
+ None
+ };
+
+ let data_size = trd.data_size.try_into().map_err(|_| EINVAL)?;
+ let aligned_data_size = ptr_align(data_size).ok_or(EINVAL)?;
+ let offsets_size = trd.offsets_size.try_into().map_err(|_| EINVAL)?;
+ let aligned_offsets_size = ptr_align(offsets_size).ok_or(EINVAL)?;
+ let buffers_size = tr.buffers_size.try_into().map_err(|_| EINVAL)?;
+ let aligned_buffers_size = ptr_align(buffers_size).ok_or(EINVAL)?;
+ let aligned_secctx_size = match secctx.as_ref() {
+ Some((_offset, ctx)) => ptr_align(ctx.len()).ok_or(EINVAL)?,
+ None => 0,
+ };
+
+ // This guarantees that at least `sizeof(usize)` bytes will be allocated.
+ let len = usize::max(
+ aligned_data_size
+ .checked_add(aligned_offsets_size)
+ .and_then(|sum| sum.checked_add(aligned_buffers_size))
+ .and_then(|sum| sum.checked_add(aligned_secctx_size))
+ .ok_or(ENOMEM)?,
+ size_of::<usize>(),
+ );
+ let secctx_off = aligned_data_size + aligned_offsets_size + aligned_buffers_size;
+ let mut alloc =
+ match to_process.buffer_alloc(debug_id, len, is_oneway, self.process.task.pid()) {
+ Ok(alloc) => alloc,
+ Err(err) => {
+ pr_warn!(
+ "Failed to allocate buffer. len:{}, is_oneway:{}",
+ len,
+ is_oneway
+ );
+ return Err(err);
+ }
+ };
+
+ // SAFETY: This accesses a union field, but it's okay because the field's type is valid for
+ // all bit-patterns.
+ let trd_data_ptr = unsafe { &trd.data.ptr };
+ let mut buffer_reader =
+ UserSlice::new(UserPtr::from_addr(trd_data_ptr.buffer as _), data_size).reader();
+ let mut end_of_previous_object = 0;
+ let mut sg_state = None;
+
+ // Copy offsets if there are any.
+ if offsets_size > 0 {
+ {
+ let mut reader =
+ UserSlice::new(UserPtr::from_addr(trd_data_ptr.offsets as _), offsets_size)
+ .reader();
+ alloc.copy_into(&mut reader, aligned_data_size, offsets_size)?;
+ }
+
+ let offsets_start = aligned_data_size;
+ let offsets_end = aligned_data_size + aligned_offsets_size;
+
+ // This state is used for BINDER_TYPE_PTR objects.
+ let sg_state = sg_state.insert(ScatterGatherState {
+ unused_buffer_space: UnusedBufferSpace {
+ offset: offsets_end,
+ limit: len,
+ },
+ sg_entries: KVec::new(),
+ ancestors: KVec::new(),
+ });
+
+ // Traverse the objects specified.
+ let mut view = AllocationView::new(&mut alloc, data_size);
+ for (index, index_offset) in (offsets_start..offsets_end)
+ .step_by(size_of::<usize>())
+ .enumerate()
+ {
+ let offset = view.alloc.read(index_offset)?;
+
+ if offset < end_of_previous_object {
+ pr_warn!("Got transaction with invalid offset.");
+ return Err(EINVAL.into());
+ }
+
+ // Copy data between two objects.
+ if end_of_previous_object < offset {
+ view.copy_into(
+ &mut buffer_reader,
+ end_of_previous_object,
+ offset - end_of_previous_object,
+ )?;
+ }
+
+ let mut object = BinderObject::read_from(&mut buffer_reader)?;
+
+ match self.translate_object(
+ index,
+ offset,
+ object.as_ref(),
+ &mut view,
+ allow_fds,
+ sg_state,
+ ) {
+ Ok(()) => end_of_previous_object = offset + object.size(),
+ Err(err) => {
+ pr_warn!("Error while translating object.");
+ return Err(err);
+ }
+ }
+
+ // Update the indexes containing objects to clean up.
+ let offset_after_object = index_offset + size_of::<usize>();
+ view.alloc
+ .set_info_offsets(offsets_start..offset_after_object);
+ }
+ }
+
+ // Copy remaining raw data.
+ alloc.copy_into(
+ &mut buffer_reader,
+ end_of_previous_object,
+ data_size - end_of_previous_object,
+ )?;
+
+ if let Some(sg_state) = sg_state.as_mut() {
+ if let Err(err) = self.apply_sg(&mut alloc, sg_state) {
+ pr_warn!("Failure in apply_sg: {:?}", err);
+ return Err(err);
+ }
+ }
+
+ if let Some((off_out, secctx)) = secctx.as_mut() {
+ if let Err(err) = alloc.write(secctx_off, secctx.as_bytes()) {
+ pr_warn!("Failed to write security context: {:?}", err);
+ return Err(err.into());
+ }
+ **off_out = secctx_off;
+ }
+ Ok(alloc)
+ }
+
+ fn unwind_transaction_stack(self: &Arc<Self>) {
+ let mut thread = self.clone();
+ while let Ok(transaction) = {
+ let mut inner = thread.inner.lock();
+ inner.pop_transaction_to_reply(thread.as_ref())
+ } {
+ let reply = Err(BR_DEAD_REPLY);
+ if !transaction.from.deliver_single_reply(reply, &transaction) {
+ break;
+ }
+
+ thread = transaction.from.clone();
+ }
+ }
+
+ pub(crate) fn deliver_reply(
+ &self,
+ reply: Result<DLArc<Transaction>, u32>,
+ transaction: &DArc<Transaction>,
+ ) {
+ if self.deliver_single_reply(reply, transaction) {
+ transaction.from.unwind_transaction_stack();
+ }
+ }
+
+ /// Delivers a reply to the thread that started a transaction. The reply can either be a
+ /// reply-transaction or an error code to be delivered instead.
+ ///
+ /// Returns whether the thread is dead. If it is, the caller is expected to unwind the
+ /// transaction stack by completing transactions for threads that are dead.
+ fn deliver_single_reply(
+ &self,
+ reply: Result<DLArc<Transaction>, u32>,
+ transaction: &DArc<Transaction>,
+ ) -> bool {
+ if let Ok(transaction) = &reply {
+ transaction.set_outstanding(&mut self.process.inner.lock());
+ }
+
+ {
+ let mut inner = self.inner.lock();
+ if !inner.pop_transaction_replied(transaction) {
+ return false;
+ }
+
+ if inner.is_dead {
+ return true;
+ }
+
+ match reply {
+ Ok(work) => {
+ inner.push_work(work);
+ }
+ Err(code) => inner.push_reply_work(code),
+ }
+ }
+
+ // Notify the thread now that we've released the inner lock.
+ self.work_condvar.notify_sync();
+ false
+ }
+
+ /// Determines if the given transaction is the current transaction for this thread.
+ fn is_current_transaction(&self, transaction: &DArc<Transaction>) -> bool {
+ let inner = self.inner.lock();
+ match &inner.current_transaction {
+ None => false,
+ Some(current) => Arc::ptr_eq(current, transaction),
+ }
+ }
+
+ /// Determines the current top of the transaction stack. It fails if the top is in another
+ /// thread (i.e., this thread belongs to a stack but it has called another thread). The top is
+ /// [`None`] if the thread is not currently participating in a transaction stack.
+ fn top_of_transaction_stack(&self) -> Result<Option<DArc<Transaction>>> {
+ let inner = self.inner.lock();
+ if let Some(cur) = &inner.current_transaction {
+ if core::ptr::eq(self, cur.from.as_ref()) {
+ pr_warn!("got new transaction with bad transaction stack");
+ return Err(EINVAL);
+ }
+ Ok(Some(cur.clone()))
+ } else {
+ Ok(None)
+ }
+ }
+
+ fn transaction<T>(self: &Arc<Self>, tr: &BinderTransactionDataSg, inner: T)
+ where
+ T: FnOnce(&Arc<Self>, &BinderTransactionDataSg) -> BinderResult,
+ {
+ if let Err(err) = inner(self, tr) {
+ if err.should_pr_warn() {
+ let mut ee = self.inner.lock().extended_error;
+ ee.command = err.reply;
+ ee.param = err.as_errno();
+ pr_warn!(
+ "Transaction failed: {:?} my_pid:{}",
+ err,
+ self.process.pid_in_current_ns()
+ );
+ }
+
+ self.push_return_work(err.reply);
+ }
+ }
+
+ fn transaction_inner(self: &Arc<Self>, tr: &BinderTransactionDataSg) -> BinderResult {
+ // SAFETY: Handle's type has no invalid bit patterns.
+ let handle = unsafe { tr.transaction_data.target.handle };
+ let node_ref = self.process.get_transaction_node(handle)?;
+ security::binder_transaction(&self.process.cred, &node_ref.node.owner.cred)?;
+ // TODO: We need to ensure that there isn't a pending transaction in the work queue. How
+ // could this happen?
+ let top = self.top_of_transaction_stack()?;
+ let list_completion = DTRWrap::arc_try_new(DeliverCode::new(BR_TRANSACTION_COMPLETE))?;
+ let completion = list_completion.clone_arc();
+ let transaction = Transaction::new(node_ref, top, self, tr)?;
+
+ // Check that the transaction stack hasn't changed while the lock was released, then update
+ // it with the new transaction.
+ {
+ let mut inner = self.inner.lock();
+ if !transaction.is_stacked_on(&inner.current_transaction) {
+ pr_warn!("Transaction stack changed during transaction!");
+ return Err(EINVAL.into());
+ }
+ inner.current_transaction = Some(transaction.clone_arc());
+ // We push the completion as a deferred work so that we wait for the reply before
+ // returning to userland.
+ inner.push_work_deferred(list_completion);
+ }
+
+ if let Err(e) = transaction.submit() {
+ completion.skip();
+ // Define `transaction` first to drop it after `inner`.
+ let transaction;
+ let mut inner = self.inner.lock();
+ transaction = inner.current_transaction.take().unwrap();
+ inner.current_transaction = transaction.clone_next();
+ Err(e)
+ } else {
+ Ok(())
+ }
+ }
+
+ fn reply_inner(self: &Arc<Self>, tr: &BinderTransactionDataSg) -> BinderResult {
+ let orig = self.inner.lock().pop_transaction_to_reply(self)?;
+ if !orig.from.is_current_transaction(&orig) {
+ return Err(EINVAL.into());
+ }
+
+ // We need to complete the transaction even if we cannot complete building the reply.
+ let out = (|| -> BinderResult<_> {
+ let completion = DTRWrap::arc_try_new(DeliverCode::new(BR_TRANSACTION_COMPLETE))?;
+ let process = orig.from.process.clone();
+ let allow_fds = orig.flags & TF_ACCEPT_FDS != 0;
+ let reply = Transaction::new_reply(self, process, tr, allow_fds)?;
+ self.inner.lock().push_work(completion);
+ orig.from.deliver_reply(Ok(reply), &orig);
+ Ok(())
+ })()
+ .map_err(|mut err| {
+ // At this point we only return `BR_TRANSACTION_COMPLETE` to the caller, and we must let
+ // the sender know that the transaction has completed (with an error in this case).
+ pr_warn!(
+ "Failure {:?} during reply - delivering BR_FAILED_REPLY to sender.",
+ err
+ );
+ let reply = Err(BR_FAILED_REPLY);
+ orig.from.deliver_reply(reply, &orig);
+ err.reply = BR_TRANSACTION_COMPLETE;
+ err
+ });
+
+ out
+ }
+
+ fn oneway_transaction_inner(self: &Arc<Self>, tr: &BinderTransactionDataSg) -> BinderResult {
+ // SAFETY: The `handle` field is valid for all possible byte values, so reading from the
+ // union is okay.
+ let handle = unsafe { tr.transaction_data.target.handle };
+ let node_ref = self.process.get_transaction_node(handle)?;
+ security::binder_transaction(&self.process.cred, &node_ref.node.owner.cred)?;
+ let transaction = Transaction::new(node_ref, None, self, tr)?;
+ let code = if self.process.is_oneway_spam_detection_enabled()
+ && transaction.oneway_spam_detected
+ {
+ BR_ONEWAY_SPAM_SUSPECT
+ } else {
+ BR_TRANSACTION_COMPLETE
+ };
+ let list_completion = DTRWrap::arc_try_new(DeliverCode::new(code))?;
+ let completion = list_completion.clone_arc();
+ self.inner.lock().push_work(list_completion);
+ match transaction.submit() {
+ Ok(()) => Ok(()),
+ Err(err) => {
+ completion.skip();
+ Err(err)
+ }
+ }
+ }
+
+ fn write(self: &Arc<Self>, req: &mut BinderWriteRead) -> Result {
+ let write_start = req.write_buffer.wrapping_add(req.write_consumed);
+ let write_len = req.write_size.saturating_sub(req.write_consumed);
+ let mut reader =
+ UserSlice::new(UserPtr::from_addr(write_start as _), write_len as _).reader();
+
+ while reader.len() >= size_of::<u32>() && self.inner.lock().return_work.is_unused() {
+ let before = reader.len();
+ let cmd = reader.read::<u32>()?;
+ GLOBAL_STATS.inc_bc(cmd);
+ self.process.stats.inc_bc(cmd);
+ match cmd {
+ BC_TRANSACTION => {
+ let tr = reader.read::<BinderTransactionData>()?.with_buffers_size(0);
+ if tr.transaction_data.flags & TF_ONE_WAY != 0 {
+ self.transaction(&tr, Self::oneway_transaction_inner);
+ } else {
+ self.transaction(&tr, Self::transaction_inner);
+ }
+ }
+ BC_TRANSACTION_SG => {
+ let tr = reader.read::<BinderTransactionDataSg>()?;
+ if tr.transaction_data.flags & TF_ONE_WAY != 0 {
+ self.transaction(&tr, Self::oneway_transaction_inner);
+ } else {
+ self.transaction(&tr, Self::transaction_inner);
+ }
+ }
+ BC_REPLY => {
+ let tr = reader.read::<BinderTransactionData>()?.with_buffers_size(0);
+ self.transaction(&tr, Self::reply_inner)
+ }
+ BC_REPLY_SG => {
+ let tr = reader.read::<BinderTransactionDataSg>()?;
+ self.transaction(&tr, Self::reply_inner)
+ }
+ BC_FREE_BUFFER => {
+ let buffer = self.process.buffer_get(reader.read()?);
+ if let Some(buffer) = &buffer {
+ if buffer.looper_need_return_on_free() {
+ self.inner.lock().looper_need_return = true;
+ }
+ }
+ drop(buffer);
+ }
+ BC_INCREFS => {
+ self.process
+ .as_arc_borrow()
+ .update_ref(reader.read()?, true, false)?
+ }
+ BC_ACQUIRE => {
+ self.process
+ .as_arc_borrow()
+ .update_ref(reader.read()?, true, true)?
+ }
+ BC_RELEASE => {
+ self.process
+ .as_arc_borrow()
+ .update_ref(reader.read()?, false, true)?
+ }
+ BC_DECREFS => {
+ self.process
+ .as_arc_borrow()
+ .update_ref(reader.read()?, false, false)?
+ }
+ BC_INCREFS_DONE => self.process.inc_ref_done(&mut reader, false)?,
+ BC_ACQUIRE_DONE => self.process.inc_ref_done(&mut reader, true)?,
+ BC_REQUEST_DEATH_NOTIFICATION => self.process.request_death(&mut reader, self)?,
+ BC_CLEAR_DEATH_NOTIFICATION => self.process.clear_death(&mut reader, self)?,
+ BC_DEAD_BINDER_DONE => self.process.dead_binder_done(reader.read()?, self),
+ BC_REGISTER_LOOPER => {
+ let valid = self.process.register_thread();
+ self.inner.lock().looper_register(valid);
+ }
+ BC_ENTER_LOOPER => self.inner.lock().looper_enter(),
+ BC_EXIT_LOOPER => self.inner.lock().looper_exit(),
+ BC_REQUEST_FREEZE_NOTIFICATION => self.process.request_freeze_notif(&mut reader)?,
+ BC_CLEAR_FREEZE_NOTIFICATION => self.process.clear_freeze_notif(&mut reader)?,
+ BC_FREEZE_NOTIFICATION_DONE => self.process.freeze_notif_done(&mut reader)?,
+
+ // Fail if given an unknown error code.
+ // BC_ATTEMPT_ACQUIRE and BC_ACQUIRE_RESULT are no longer supported.
+ _ => return Err(EINVAL),
+ }
+ // Update the number of write bytes consumed.
+ req.write_consumed += (before - reader.len()) as u64;
+ }
+
+ Ok(())
+ }
+
+ fn read(self: &Arc<Self>, req: &mut BinderWriteRead, wait: bool) -> Result {
+ let read_start = req.read_buffer.wrapping_add(req.read_consumed);
+ let read_len = req.read_size.saturating_sub(req.read_consumed);
+ let mut writer = BinderReturnWriter::new(
+ UserSlice::new(UserPtr::from_addr(read_start as _), read_len as _).writer(),
+ self,
+ );
+ let (in_pool, use_proc_queue) = {
+ let inner = self.inner.lock();
+ (inner.is_looper(), inner.should_use_process_work_queue())
+ };
+
+ let getter = if use_proc_queue {
+ Self::get_work
+ } else {
+ Self::get_work_local
+ };
+
+ // Reserve some room at the beginning of the read buffer so that we can send a
+ // BR_SPAWN_LOOPER if we need to.
+ let mut has_noop_placeholder = false;
+ if req.read_consumed == 0 {
+ if let Err(err) = writer.write_code(BR_NOOP) {
+ pr_warn!("Failure when writing BR_NOOP at beginning of buffer.");
+ return Err(err);
+ }
+ has_noop_placeholder = true;
+ }
+
+ // Loop doing work while there is room in the buffer.
+ let initial_len = writer.len();
+ while writer.len() >= size_of::<uapi::binder_transaction_data_secctx>() + 4 {
+ match getter(self, wait && initial_len == writer.len()) {
+ Ok(Some(work)) => match work.into_arc().do_work(self, &mut writer) {
+ Ok(true) => {}
+ Ok(false) => break,
+ Err(err) => {
+ return Err(err);
+ }
+ },
+ Ok(None) => {
+ break;
+ }
+ Err(err) => {
+ // Propagate the error if we haven't written anything else.
+ if err != EINTR && err != EAGAIN {
+ pr_warn!("Failure in work getter: {:?}", err);
+ }
+ if initial_len == writer.len() {
+ return Err(err);
+ } else {
+ break;
+ }
+ }
+ }
+ }
+
+ req.read_consumed += read_len - writer.len() as u64;
+
+ // Write BR_SPAWN_LOOPER if the process needs more threads for its pool.
+ if has_noop_placeholder && in_pool && self.process.needs_thread() {
+ let mut writer =
+ UserSlice::new(UserPtr::from_addr(req.read_buffer as _), req.read_size as _)
+ .writer();
+ writer.write(&BR_SPAWN_LOOPER)?;
+ }
+ Ok(())
+ }
+
+ pub(crate) fn write_read(self: &Arc<Self>, data: UserSlice, wait: bool) -> Result {
+ let (mut reader, mut writer) = data.reader_writer();
+ let mut req = reader.read::<BinderWriteRead>()?;
+
+ // Go through the write buffer.
+ let mut ret = Ok(());
+ if req.write_size > 0 {
+ ret = self.write(&mut req);
+ if let Err(err) = ret {
+ pr_warn!(
+ "Write failure {:?} in pid:{}",
+ err,
+ self.process.pid_in_current_ns()
+ );
+ req.read_consumed = 0;
+ writer.write(&req)?;
+ self.inner.lock().looper_need_return = false;
+ return ret;
+ }
+ }
+
+ // Go through the work queue.
+ if req.read_size > 0 {
+ ret = self.read(&mut req, wait);
+ if ret.is_err() && ret != Err(EINTR) {
+ pr_warn!(
+ "Read failure {:?} in pid:{}",
+ ret,
+ self.process.pid_in_current_ns()
+ );
+ }
+ }
+
+ // Write the request back so that the consumed fields are visible to the caller.
+ writer.write(&req)?;
+
+ self.inner.lock().looper_need_return = false;
+
+ ret
+ }
+
+ pub(crate) fn poll(&self, file: &File, table: PollTable<'_>) -> (bool, u32) {
+ table.register_wait(file, &self.work_condvar);
+ let mut inner = self.inner.lock();
+ (inner.should_use_process_work_queue(), inner.poll())
+ }
+
+ /// Make the call to `get_work` or `get_work_local` return immediately, if any.
+ pub(crate) fn exit_looper(&self) {
+ let mut inner = self.inner.lock();
+ let should_notify = inner.looper_flags & LOOPER_WAITING != 0;
+ if should_notify {
+ inner.looper_need_return = true;
+ }
+ drop(inner);
+
+ if should_notify {
+ self.work_condvar.notify_one();
+ }
+ }
+
+ pub(crate) fn notify_if_poll_ready(&self, sync: bool) {
+ // Determine if we need to notify. This requires the lock.
+ let inner = self.inner.lock();
+ let notify = inner.looper_flags & LOOPER_POLL != 0 && inner.should_use_process_work_queue();
+ drop(inner);
+
+ // Now that the lock is no longer held, notify the waiters if we have to.
+ if notify {
+ if sync {
+ self.work_condvar.notify_sync();
+ } else {
+ self.work_condvar.notify_one();
+ }
+ }
+ }
+
+ pub(crate) fn release(self: &Arc<Self>) {
+ self.inner.lock().is_dead = true;
+
+ //self.work_condvar.clear();
+ self.unwind_transaction_stack();
+
+ // Cancel all pending work items.
+ while let Ok(Some(work)) = self.get_work_local(false) {
+ work.into_arc().cancel();
+ }
+ }
+}
+
+#[pin_data]
+struct ThreadError {
+ error_code: AtomicU32,
+ #[pin]
+ links_track: AtomicTracker,
+}
+
+impl ThreadError {
+ fn try_new() -> Result<DArc<Self>> {
+ DTRWrap::arc_pin_init(pin_init!(Self {
+ error_code: AtomicU32::new(BR_OK),
+ links_track <- AtomicTracker::new(),
+ }))
+ .map(ListArc::into_arc)
+ }
+
+ fn set_error_code(&self, code: u32) {
+ self.error_code.store(code, Ordering::Relaxed);
+ }
+
+ fn is_unused(&self) -> bool {
+ self.error_code.load(Ordering::Relaxed) == BR_OK
+ }
+}
+
+impl DeliverToRead for ThreadError {
+ fn do_work(
+ self: DArc<Self>,
+ _thread: &Thread,
+ writer: &mut BinderReturnWriter<'_>,
+ ) -> Result<bool> {
+ let code = self.error_code.load(Ordering::Relaxed);
+ self.error_code.store(BR_OK, Ordering::Relaxed);
+ writer.write_code(code)?;
+ Ok(true)
+ }
+
+ fn cancel(self: DArc<Self>) {}
+
+ fn should_sync_wakeup(&self) -> bool {
+ false
+ }
+
+ fn debug_print(&self, m: &SeqFile, prefix: &str, _tprefix: &str) -> Result<()> {
+ seq_print!(
+ m,
+ "{}transaction error: {}\n",
+ prefix,
+ self.error_code.load(Ordering::Relaxed)
+ );
+ Ok(())
+ }
+}
+
+kernel::list::impl_list_arc_safe! {
+ impl ListArcSafe<0> for ThreadError {
+ tracked_by links_track: AtomicTracker;
+ }
+}
diff --git a/drivers/android/binder/trace.rs b/drivers/android/binder/trace.rs
new file mode 100644
index 000000000000..af0e4392805e
--- /dev/null
+++ b/drivers/android/binder/trace.rs
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2025 Google LLC.
+
+use kernel::ffi::{c_uint, c_ulong};
+use kernel::tracepoint::declare_trace;
+
+declare_trace! {
+ unsafe fn rust_binder_ioctl(cmd: c_uint, arg: c_ulong);
+}
+
+#[inline]
+pub(crate) fn trace_ioctl(cmd: u32, arg: usize) {
+ // SAFETY: Always safe to call.
+ unsafe { rust_binder_ioctl(cmd, arg as c_ulong) }
+}
diff --git a/drivers/android/binder/transaction.rs b/drivers/android/binder/transaction.rs
new file mode 100644
index 000000000000..02512175d622
--- /dev/null
+++ b/drivers/android/binder/transaction.rs
@@ -0,0 +1,456 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2025 Google LLC.
+
+use core::sync::atomic::{AtomicBool, Ordering};
+use kernel::{
+ prelude::*,
+ seq_file::SeqFile,
+ seq_print,
+ sync::{Arc, SpinLock},
+ task::Kuid,
+ time::{Instant, Monotonic},
+ types::ScopeGuard,
+};
+
+use crate::{
+ allocation::{Allocation, TranslatedFds},
+ defs::*,
+ error::{BinderError, BinderResult},
+ node::{Node, NodeRef},
+ process::{Process, ProcessInner},
+ ptr_align,
+ thread::{PushWorkRes, Thread},
+ BinderReturnWriter, DArc, DLArc, DTRWrap, DeliverToRead,
+};
+
+#[pin_data(PinnedDrop)]
+pub(crate) struct Transaction {
+ pub(crate) debug_id: usize,
+ target_node: Option<DArc<Node>>,
+ pub(crate) from_parent: Option<DArc<Transaction>>,
+ pub(crate) from: Arc<Thread>,
+ pub(crate) to: Arc<Process>,
+ #[pin]
+ allocation: SpinLock<Option<Allocation>>,
+ is_outstanding: AtomicBool,
+ code: u32,
+ pub(crate) flags: u32,
+ data_size: usize,
+ offsets_size: usize,
+ data_address: usize,
+ sender_euid: Kuid,
+ txn_security_ctx_off: Option<usize>,
+ pub(crate) oneway_spam_detected: bool,
+ start_time: Instant<Monotonic>,
+}
+
+kernel::list::impl_list_arc_safe! {
+ impl ListArcSafe<0> for Transaction { untracked; }
+}
+
+impl Transaction {
+ pub(crate) fn new(
+ node_ref: NodeRef,
+ from_parent: Option<DArc<Transaction>>,
+ from: &Arc<Thread>,
+ tr: &BinderTransactionDataSg,
+ ) -> BinderResult<DLArc<Self>> {
+ let debug_id = super::next_debug_id();
+ let trd = &tr.transaction_data;
+ let allow_fds = node_ref.node.flags & FLAT_BINDER_FLAG_ACCEPTS_FDS != 0;
+ let txn_security_ctx = node_ref.node.flags & FLAT_BINDER_FLAG_TXN_SECURITY_CTX != 0;
+ let mut txn_security_ctx_off = if txn_security_ctx { Some(0) } else { None };
+ let to = node_ref.node.owner.clone();
+ let mut alloc = match from.copy_transaction_data(
+ to.clone(),
+ tr,
+ debug_id,
+ allow_fds,
+ txn_security_ctx_off.as_mut(),
+ ) {
+ Ok(alloc) => alloc,
+ Err(err) => {
+ if !err.is_dead() {
+ pr_warn!("Failure in copy_transaction_data: {:?}", err);
+ }
+ return Err(err);
+ }
+ };
+ let oneway_spam_detected = alloc.oneway_spam_detected;
+ if trd.flags & TF_ONE_WAY != 0 {
+ if from_parent.is_some() {
+ pr_warn!("Oneway transaction should not be in a transaction stack.");
+ return Err(EINVAL.into());
+ }
+ alloc.set_info_oneway_node(node_ref.node.clone());
+ }
+ if trd.flags & TF_CLEAR_BUF != 0 {
+ alloc.set_info_clear_on_drop();
+ }
+ let target_node = node_ref.node.clone();
+ alloc.set_info_target_node(node_ref);
+ let data_address = alloc.ptr;
+
+ Ok(DTRWrap::arc_pin_init(pin_init!(Transaction {
+ debug_id,
+ target_node: Some(target_node),
+ from_parent,
+ sender_euid: from.process.task.euid(),
+ from: from.clone(),
+ to,
+ code: trd.code,
+ flags: trd.flags,
+ data_size: trd.data_size as _,
+ offsets_size: trd.offsets_size as _,
+ data_address,
+ allocation <- kernel::new_spinlock!(Some(alloc.success()), "Transaction::new"),
+ is_outstanding: AtomicBool::new(false),
+ txn_security_ctx_off,
+ oneway_spam_detected,
+ start_time: Instant::now(),
+ }))?)
+ }
+
+ pub(crate) fn new_reply(
+ from: &Arc<Thread>,
+ to: Arc<Process>,
+ tr: &BinderTransactionDataSg,
+ allow_fds: bool,
+ ) -> BinderResult<DLArc<Self>> {
+ let debug_id = super::next_debug_id();
+ let trd = &tr.transaction_data;
+ let mut alloc = match from.copy_transaction_data(to.clone(), tr, debug_id, allow_fds, None)
+ {
+ Ok(alloc) => alloc,
+ Err(err) => {
+ pr_warn!("Failure in copy_transaction_data: {:?}", err);
+ return Err(err);
+ }
+ };
+ let oneway_spam_detected = alloc.oneway_spam_detected;
+ if trd.flags & TF_CLEAR_BUF != 0 {
+ alloc.set_info_clear_on_drop();
+ }
+ Ok(DTRWrap::arc_pin_init(pin_init!(Transaction {
+ debug_id,
+ target_node: None,
+ from_parent: None,
+ sender_euid: from.process.task.euid(),
+ from: from.clone(),
+ to,
+ code: trd.code,
+ flags: trd.flags,
+ data_size: trd.data_size as _,
+ offsets_size: trd.offsets_size as _,
+ data_address: alloc.ptr,
+ allocation <- kernel::new_spinlock!(Some(alloc.success()), "Transaction::new"),
+ is_outstanding: AtomicBool::new(false),
+ txn_security_ctx_off: None,
+ oneway_spam_detected,
+ start_time: Instant::now(),
+ }))?)
+ }
+
+ #[inline(never)]
+ pub(crate) fn debug_print_inner(&self, m: &SeqFile, prefix: &str) {
+ seq_print!(
+ m,
+ "{}{}: from {}:{} to {} code {:x} flags {:x} elapsed {}ms",
+ prefix,
+ self.debug_id,
+ self.from.process.task.pid(),
+ self.from.id,
+ self.to.task.pid(),
+ self.code,
+ self.flags,
+ self.start_time.elapsed().as_millis(),
+ );
+ if let Some(target_node) = &self.target_node {
+ seq_print!(m, " node {}", target_node.debug_id);
+ }
+ seq_print!(m, " size {}:{}\n", self.data_size, self.offsets_size);
+ }
+
+ /// Determines if the transaction is stacked on top of the given transaction.
+ pub(crate) fn is_stacked_on(&self, onext: &Option<DArc<Self>>) -> bool {
+ match (&self.from_parent, onext) {
+ (None, None) => true,
+ (Some(from_parent), Some(next)) => Arc::ptr_eq(from_parent, next),
+ _ => false,
+ }
+ }
+
+ /// Returns a pointer to the next transaction on the transaction stack, if there is one.
+ pub(crate) fn clone_next(&self) -> Option<DArc<Self>> {
+ Some(self.from_parent.as_ref()?.clone())
+ }
+
+ /// Searches in the transaction stack for a thread that belongs to the target process. This is
+ /// useful when finding a target for a new transaction: if the node belongs to a process that
+ /// is already part of the transaction stack, we reuse the thread.
+ fn find_target_thread(&self) -> Option<Arc<Thread>> {
+ let mut it = &self.from_parent;
+ while let Some(transaction) = it {
+ if Arc::ptr_eq(&transaction.from.process, &self.to) {
+ return Some(transaction.from.clone());
+ }
+ it = &transaction.from_parent;
+ }
+ None
+ }
+
+ /// Searches in the transaction stack for a transaction originating at the given thread.
+ pub(crate) fn find_from(&self, thread: &Thread) -> Option<&DArc<Transaction>> {
+ let mut it = &self.from_parent;
+ while let Some(transaction) = it {
+ if core::ptr::eq(thread, transaction.from.as_ref()) {
+ return Some(transaction);
+ }
+
+ it = &transaction.from_parent;
+ }
+ None
+ }
+
+ pub(crate) fn set_outstanding(&self, to_process: &mut ProcessInner) {
+ // No race because this method is only called once.
+ if !self.is_outstanding.load(Ordering::Relaxed) {
+ self.is_outstanding.store(true, Ordering::Relaxed);
+ to_process.add_outstanding_txn();
+ }
+ }
+
+ /// Decrement `outstanding_txns` in `to` if it hasn't already been decremented.
+ fn drop_outstanding_txn(&self) {
+ // No race because this is called at most twice, and one of the calls are in the
+ // destructor, which is guaranteed to not race with any other operations on the
+ // transaction. It also cannot race with `set_outstanding`, since submission happens
+ // before delivery.
+ if self.is_outstanding.load(Ordering::Relaxed) {
+ self.is_outstanding.store(false, Ordering::Relaxed);
+ self.to.drop_outstanding_txn();
+ }
+ }
+
+ /// Submits the transaction to a work queue. Uses a thread if there is one in the transaction
+ /// stack, otherwise uses the destination process.
+ ///
+ /// Not used for replies.
+ pub(crate) fn submit(self: DLArc<Self>) -> BinderResult {
+ // Defined before `process_inner` so that the destructor runs after releasing the lock.
+ let mut _t_outdated;
+
+ let oneway = self.flags & TF_ONE_WAY != 0;
+ let process = self.to.clone();
+ let mut process_inner = process.inner.lock();
+
+ self.set_outstanding(&mut process_inner);
+
+ if oneway {
+ if let Some(target_node) = self.target_node.clone() {
+ if process_inner.is_frozen {
+ process_inner.async_recv = true;
+ if self.flags & TF_UPDATE_TXN != 0 {
+ if let Some(t_outdated) =
+ target_node.take_outdated_transaction(&self, &mut process_inner)
+ {
+ // Save the transaction to be dropped after locks are released.
+ _t_outdated = t_outdated;
+ }
+ }
+ }
+ match target_node.submit_oneway(self, &mut process_inner) {
+ Ok(()) => {}
+ Err((err, work)) => {
+ drop(process_inner);
+ // Drop work after releasing process lock.
+ drop(work);
+ return Err(err);
+ }
+ }
+
+ if process_inner.is_frozen {
+ return Err(BinderError::new_frozen_oneway());
+ } else {
+ return Ok(());
+ }
+ } else {
+ pr_err!("Failed to submit oneway transaction to node.");
+ }
+ }
+
+ if process_inner.is_frozen {
+ process_inner.sync_recv = true;
+ return Err(BinderError::new_frozen());
+ }
+
+ let res = if let Some(thread) = self.find_target_thread() {
+ match thread.push_work(self) {
+ PushWorkRes::Ok => Ok(()),
+ PushWorkRes::FailedDead(me) => Err((BinderError::new_dead(), me)),
+ }
+ } else {
+ process_inner.push_work(self)
+ };
+ drop(process_inner);
+
+ match res {
+ Ok(()) => Ok(()),
+ Err((err, work)) => {
+ // Drop work after releasing process lock.
+ drop(work);
+ Err(err)
+ }
+ }
+ }
+
+ /// Check whether one oneway transaction can supersede another.
+ pub(crate) fn can_replace(&self, old: &Transaction) -> bool {
+ if self.from.process.task.pid() != old.from.process.task.pid() {
+ return false;
+ }
+
+ if self.flags & old.flags & (TF_ONE_WAY | TF_UPDATE_TXN) != (TF_ONE_WAY | TF_UPDATE_TXN) {
+ return false;
+ }
+
+ let target_node_match = match (self.target_node.as_ref(), old.target_node.as_ref()) {
+ (None, None) => true,
+ (Some(tn1), Some(tn2)) => Arc::ptr_eq(tn1, tn2),
+ _ => false,
+ };
+
+ self.code == old.code && self.flags == old.flags && target_node_match
+ }
+
+ fn prepare_file_list(&self) -> Result<TranslatedFds> {
+ let mut alloc = self.allocation.lock().take().ok_or(ESRCH)?;
+
+ match alloc.translate_fds() {
+ Ok(translated) => {
+ *self.allocation.lock() = Some(alloc);
+ Ok(translated)
+ }
+ Err(err) => {
+ // Free the allocation eagerly.
+ drop(alloc);
+ Err(err)
+ }
+ }
+ }
+}
+
+impl DeliverToRead for Transaction {
+ fn do_work(
+ self: DArc<Self>,
+ thread: &Thread,
+ writer: &mut BinderReturnWriter<'_>,
+ ) -> Result<bool> {
+ let send_failed_reply = ScopeGuard::new(|| {
+ if self.target_node.is_some() && self.flags & TF_ONE_WAY == 0 {
+ let reply = Err(BR_FAILED_REPLY);
+ self.from.deliver_reply(reply, &self);
+ }
+ self.drop_outstanding_txn();
+ });
+
+ let files = if let Ok(list) = self.prepare_file_list() {
+ list
+ } else {
+ // On failure to process the list, we send a reply back to the sender and ignore the
+ // transaction on the recipient.
+ return Ok(true);
+ };
+
+ let mut tr_sec = BinderTransactionDataSecctx::default();
+ let tr = tr_sec.tr_data();
+ if let Some(target_node) = &self.target_node {
+ let (ptr, cookie) = target_node.get_id();
+ tr.target.ptr = ptr as _;
+ tr.cookie = cookie as _;
+ };
+ tr.code = self.code;
+ tr.flags = self.flags;
+ tr.data_size = self.data_size as _;
+ tr.data.ptr.buffer = self.data_address as _;
+ tr.offsets_size = self.offsets_size as _;
+ if tr.offsets_size > 0 {
+ tr.data.ptr.offsets = (self.data_address + ptr_align(self.data_size).unwrap()) as _;
+ }
+ tr.sender_euid = self.sender_euid.into_uid_in_current_ns();
+ tr.sender_pid = 0;
+ if self.target_node.is_some() && self.flags & TF_ONE_WAY == 0 {
+ // Not a reply and not one-way.
+ tr.sender_pid = self.from.process.pid_in_current_ns();
+ }
+ let code = if self.target_node.is_none() {
+ BR_REPLY
+ } else if self.txn_security_ctx_off.is_some() {
+ BR_TRANSACTION_SEC_CTX
+ } else {
+ BR_TRANSACTION
+ };
+
+ // Write the transaction code and data to the user buffer.
+ writer.write_code(code)?;
+ if let Some(off) = self.txn_security_ctx_off {
+ tr_sec.secctx = (self.data_address + off) as u64;
+ writer.write_payload(&tr_sec)?;
+ } else {
+ writer.write_payload(&*tr)?;
+ }
+
+ let mut alloc = self.allocation.lock().take().ok_or(ESRCH)?;
+
+ // Dismiss the completion of transaction with a failure. No failure paths are allowed from
+ // here on out.
+ send_failed_reply.dismiss();
+
+ // Commit files, and set FDs in FDA to be closed on buffer free.
+ let close_on_free = files.commit();
+ alloc.set_info_close_on_free(close_on_free);
+
+ // It is now the user's responsibility to clear the allocation.
+ alloc.keep_alive();
+
+ self.drop_outstanding_txn();
+
+ // When this is not a reply and not a oneway transaction, update `current_transaction`. If
+ // it's a reply, `current_transaction` has already been updated appropriately.
+ if self.target_node.is_some() && tr_sec.transaction_data.flags & TF_ONE_WAY == 0 {
+ thread.set_current_transaction(self);
+ }
+
+ Ok(false)
+ }
+
+ fn cancel(self: DArc<Self>) {
+ let allocation = self.allocation.lock().take();
+ drop(allocation);
+
+ // If this is not a reply or oneway transaction, then send a dead reply.
+ if self.target_node.is_some() && self.flags & TF_ONE_WAY == 0 {
+ let reply = Err(BR_DEAD_REPLY);
+ self.from.deliver_reply(reply, &self);
+ }
+
+ self.drop_outstanding_txn();
+ }
+
+ fn should_sync_wakeup(&self) -> bool {
+ self.flags & TF_ONE_WAY == 0
+ }
+
+ fn debug_print(&self, m: &SeqFile, _prefix: &str, tprefix: &str) -> Result<()> {
+ self.debug_print_inner(m, tprefix);
+ Ok(())
+ }
+}
+
+#[pinned_drop]
+impl PinnedDrop for Transaction {
+ fn drop(self: Pin<&mut Self>) {
+ self.drop_outstanding_txn();
+ }
+}
diff --git a/drivers/android/binder_internal.h b/drivers/android/binder_internal.h
index 8b08976146ba..342574bfd28a 100644
--- a/drivers/android/binder_internal.h
+++ b/drivers/android/binder_internal.h
@@ -537,8 +537,8 @@ struct binder_transaction {
struct binder_proc *to_proc;
struct binder_thread *to_thread;
struct binder_transaction *to_parent;
- unsigned need_reply:1;
- /* unsigned is_dead:1; */ /* not used at the moment */
+ unsigned is_async:1;
+ unsigned is_reply:1;
struct binder_buffer *buffer;
unsigned int code;
diff --git a/drivers/android/binder_netlink.c b/drivers/android/binder_netlink.c
new file mode 100644
index 000000000000..d05397a50ca6
--- /dev/null
+++ b/drivers/android/binder_netlink.c
@@ -0,0 +1,31 @@
+// SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)
+/* Do not edit directly, auto-generated from: */
+/* Documentation/netlink/specs/binder.yaml */
+/* YNL-GEN kernel source */
+
+#include <net/netlink.h>
+#include <net/genetlink.h>
+
+#include "binder_netlink.h"
+
+#include <uapi/linux/android/binder_netlink.h>
+
+/* Ops table for binder */
+static const struct genl_split_ops binder_nl_ops[] = {
+};
+
+static const struct genl_multicast_group binder_nl_mcgrps[] = {
+ [BINDER_NLGRP_REPORT] = { "report", },
+};
+
+struct genl_family binder_nl_family __ro_after_init = {
+ .name = BINDER_FAMILY_NAME,
+ .version = BINDER_FAMILY_VERSION,
+ .netnsok = true,
+ .parallel_ops = true,
+ .module = THIS_MODULE,
+ .split_ops = binder_nl_ops,
+ .n_split_ops = ARRAY_SIZE(binder_nl_ops),
+ .mcgrps = binder_nl_mcgrps,
+ .n_mcgrps = ARRAY_SIZE(binder_nl_mcgrps),
+};
diff --git a/drivers/android/binder_netlink.h b/drivers/android/binder_netlink.h
new file mode 100644
index 000000000000..882c7a6b537e
--- /dev/null
+++ b/drivers/android/binder_netlink.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
+/* Do not edit directly, auto-generated from: */
+/* Documentation/netlink/specs/binder.yaml */
+/* YNL-GEN kernel header */
+
+#ifndef _LINUX_BINDER_GEN_H
+#define _LINUX_BINDER_GEN_H
+
+#include <net/netlink.h>
+#include <net/genetlink.h>
+
+#include <uapi/linux/android/binder_netlink.h>
+
+enum {
+ BINDER_NLGRP_REPORT,
+};
+
+extern struct genl_family binder_nl_family;
+
+#endif /* _LINUX_BINDER_GEN_H */
diff --git a/drivers/android/binder_trace.h b/drivers/android/binder_trace.h
index 97a78e5623db..fa5eb61cf580 100644
--- a/drivers/android/binder_trace.h
+++ b/drivers/android/binder_trace.h
@@ -402,6 +402,43 @@ TRACE_EVENT(binder_return,
"unknown")
);
+TRACE_EVENT(binder_netlink_report,
+ TP_PROTO(const char *context,
+ struct binder_transaction *t,
+ u32 data_size,
+ u32 error),
+ TP_ARGS(context, t, data_size, error),
+ TP_STRUCT__entry(
+ __field(const char *, context)
+ __field(u32, error)
+ __field(int, from_pid)
+ __field(int, from_tid)
+ __field(int, to_pid)
+ __field(int, to_tid)
+ __field(bool, is_reply)
+ __field(unsigned int, flags)
+ __field(unsigned int, code)
+ __field(size_t, data_size)
+ ),
+ TP_fast_assign(
+ __entry->context = context;
+ __entry->error = error;
+ __entry->from_pid = t->from_pid;
+ __entry->from_tid = t->from_tid;
+ __entry->to_pid = t->to_proc ? t->to_proc->pid : 0;
+ __entry->to_tid = t->to_thread ? t->to_thread->pid : 0;
+ __entry->is_reply = t->is_reply;
+ __entry->flags = t->flags;
+ __entry->code = t->code;
+ __entry->data_size = data_size;
+ ),
+ TP_printk("from %d:%d to %d:%d context=%s error=%d is_reply=%d flags=0x%x code=0x%x size=%zu",
+ __entry->from_pid, __entry->from_tid,
+ __entry->to_pid, __entry->to_tid,
+ __entry->context, __entry->error, __entry->is_reply,
+ __entry->flags, __entry->code, __entry->data_size)
+);
+
#endif /* _BINDER_TRACE_H */
#undef TRACE_INCLUDE_PATH
diff --git a/drivers/android/binderfs.c b/drivers/android/binderfs.c
index 0d9d95a7fb60..be8e64eb39ec 100644
--- a/drivers/android/binderfs.c
+++ b/drivers/android/binderfs.c
@@ -59,6 +59,7 @@ struct binder_features {
bool oneway_spam_detection;
bool extended_error;
bool freeze_notification;
+ bool transaction_report;
};
static const struct constant_table binderfs_param_stats[] = {
@@ -76,6 +77,7 @@ static struct binder_features binder_features = {
.oneway_spam_detection = true,
.extended_error = true,
.freeze_notification = true,
+ .transaction_report = true,
};
static inline struct binderfs_info *BINDERFS_SB(const struct super_block *sb)
@@ -601,6 +603,12 @@ static int init_binder_features(struct super_block *sb)
if (IS_ERR(dentry))
return PTR_ERR(dentry);
+ dentry = binderfs_create_file(dir, "transaction_report",
+ &binder_features_fops,
+ &binder_features.transaction_report);
+ if (IS_ERR(dentry))
+ return PTR_ERR(dentry);
+
return 0;
}
diff --git a/drivers/android/dbitmap.h b/drivers/android/dbitmap.h
index 956f1bd087d1..c7299ce8b374 100644
--- a/drivers/android/dbitmap.h
+++ b/drivers/android/dbitmap.h
@@ -37,6 +37,7 @@ static inline void dbitmap_free(struct dbitmap *dmap)
{
dmap->nbits = 0;
kfree(dmap->map);
+ dmap->map = NULL;
}
/* Returns the nbits that a dbitmap can shrink to, 0 if not possible. */
diff --git a/drivers/cdx/cdx_msi.c b/drivers/cdx/cdx_msi.c
index 3388a5d1462c..91b95422b263 100644
--- a/drivers/cdx/cdx_msi.c
+++ b/drivers/cdx/cdx_msi.c
@@ -174,6 +174,7 @@ struct irq_domain *cdx_msi_domain_init(struct device *dev)
}
parent = irq_find_matching_fwnode(of_fwnode_handle(parent_node), DOMAIN_BUS_NEXUS);
+ of_node_put(parent_node);
if (!parent || !msi_get_domain_info(parent)) {
dev_err(dev, "unable to locate ITS domain\n");
return NULL;
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index e9b360cdc99a..1291369b9126 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -6,6 +6,7 @@
obj-y += mem.o random.o
obj-$(CONFIG_TTY_PRINTK) += ttyprintk.o
obj-y += misc.o
+obj-$(CONFIG_TEST_MISC_MINOR) += misc_minor_kunit.o
obj-$(CONFIG_ATARI_DSP56K) += dsp56k.o
obj-$(CONFIG_VIRTIO_CONSOLE) += virtio_console.o
obj-$(CONFIG_UV_MMTIMER) += uv_mmtimer.o
diff --git a/drivers/char/adi.c b/drivers/char/adi.c
index f9bec10a6064..4312b0cc391c 100644
--- a/drivers/char/adi.c
+++ b/drivers/char/adi.c
@@ -131,7 +131,7 @@ static ssize_t adi_write(struct file *file, const char __user *buf,
ssize_t ret;
int i;
- if (count <= 0)
+ if (count == 0)
return -EINVAL;
ver_buf_sz = min_t(size_t, count, MAX_BUF_SZ);
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
index 0713ea2b2a51..4f5ccd3a1f56 100644
--- a/drivers/char/hpet.c
+++ b/drivers/char/hpet.c
@@ -867,7 +867,7 @@ int hpet_alloc(struct hpet_data *hdp)
printk(KERN_INFO "hpet%u: at MMIO 0x%lx, IRQ%s",
hpetp->hp_which, hdp->hd_phys_address,
- hpetp->hp_ntimer > 1 ? "s" : "");
+ str_plural(hpetp->hp_ntimer));
for (i = 0; i < hpetp->hp_ntimer; i++)
printk(KERN_CONT "%s %u", i > 0 ? "," : "", hdp->hd_irq[i]);
printk(KERN_CONT "\n");
diff --git a/drivers/char/misc.c b/drivers/char/misc.c
index 558302a64dd9..726516fb0a3b 100644
--- a/drivers/char/misc.c
+++ b/drivers/char/misc.c
@@ -132,7 +132,8 @@ static int misc_open(struct inode *inode, struct file *file)
break;
}
- if (!new_fops) {
+ /* Only request module for fixed minor code */
+ if (!new_fops && minor < MISC_DYNAMIC_MINOR) {
mutex_unlock(&misc_mtx);
request_module("char-major-%d-%d", MISC_MAJOR, minor);
mutex_lock(&misc_mtx);
@@ -144,10 +145,11 @@ static int misc_open(struct inode *inode, struct file *file)
new_fops = fops_get(iter->fops);
break;
}
- if (!new_fops)
- goto fail;
}
+ if (!new_fops)
+ goto fail;
+
/*
* Place the miscdevice in the file's
* private_data so it can be used by the
@@ -210,6 +212,12 @@ int misc_register(struct miscdevice *misc)
int err = 0;
bool is_dynamic = (misc->minor == MISC_DYNAMIC_MINOR);
+ if (misc->minor > MISC_DYNAMIC_MINOR) {
+ pr_err("Invalid fixed minor %d for miscdevice '%s'\n",
+ misc->minor, misc->name);
+ return -EINVAL;
+ }
+
INIT_LIST_HEAD(&misc->list);
mutex_lock(&misc_mtx);
@@ -275,13 +283,12 @@ EXPORT_SYMBOL(misc_register);
void misc_deregister(struct miscdevice *misc)
{
- if (WARN_ON(list_empty(&misc->list)))
- return;
-
mutex_lock(&misc_mtx);
- list_del(&misc->list);
+ list_del_init(&misc->list);
device_destroy(&misc_class, MKDEV(MISC_MAJOR, misc->minor));
misc_minor_free(misc->minor);
+ if (misc->minor > MISC_DYNAMIC_MINOR)
+ misc->minor = MISC_DYNAMIC_MINOR;
mutex_unlock(&misc_mtx);
}
EXPORT_SYMBOL(misc_deregister);
diff --git a/drivers/misc/misc_minor_kunit.c b/drivers/char/misc_minor_kunit.c
index 30eceac5f1b6..6fc8b05169c5 100644
--- a/drivers/misc/misc_minor_kunit.c
+++ b/drivers/char/misc_minor_kunit.c
@@ -7,12 +7,6 @@
#include <linux/file.h>
#include <linux/init_syscalls.h>
-/* dynamic minor (2) */
-static struct miscdevice dev_dynamic_minor = {
- .minor = 2,
- .name = "dev_dynamic_minor",
-};
-
/* static minor (LCD_MINOR) */
static struct miscdevice dev_static_minor = {
.minor = LCD_MINOR,
@@ -25,16 +19,6 @@ static struct miscdevice dev_misc_dynamic_minor = {
.name = "dev_misc_dynamic_minor",
};
-static void kunit_dynamic_minor(struct kunit *test)
-{
- int ret;
-
- ret = misc_register(&dev_dynamic_minor);
- KUNIT_EXPECT_EQ(test, 0, ret);
- KUNIT_EXPECT_EQ(test, 2, dev_dynamic_minor.minor);
- misc_deregister(&dev_dynamic_minor);
-}
-
static void kunit_static_minor(struct kunit *test)
{
int ret;
@@ -157,13 +141,7 @@ static bool is_valid_dynamic_minor(int minor)
{
if (minor < 0)
return false;
- if (minor == MISC_DYNAMIC_MINOR)
- return false;
- if (minor >= 0 && minor <= 15)
- return false;
- if (minor >= 128 && minor < MISC_DYNAMIC_MINOR)
- return false;
- return true;
+ return minor > MISC_DYNAMIC_MINOR;
}
static int miscdev_test_open(struct inode *inode, struct file *file)
@@ -557,7 +535,7 @@ static void __init miscdev_test_conflict(struct kunit *test)
*/
miscstat.minor = miscdyn.minor;
ret = misc_register(&miscstat);
- KUNIT_EXPECT_EQ(test, ret, -EBUSY);
+ KUNIT_EXPECT_EQ(test, ret, -EINVAL);
if (ret == 0)
misc_deregister(&miscstat);
@@ -590,8 +568,9 @@ static void __init miscdev_test_conflict_reverse(struct kunit *test)
misc_deregister(&miscdyn);
ret = misc_register(&miscstat);
- KUNIT_EXPECT_EQ(test, ret, 0);
- KUNIT_EXPECT_EQ(test, miscstat.minor, miscdyn.minor);
+ KUNIT_EXPECT_EQ(test, ret, -EINVAL);
+ if (ret == 0)
+ misc_deregister(&miscstat);
/*
* Try to register a dynamic minor after registering a static minor
@@ -601,25 +580,81 @@ static void __init miscdev_test_conflict_reverse(struct kunit *test)
miscdyn.minor = MISC_DYNAMIC_MINOR;
ret = misc_register(&miscdyn);
KUNIT_EXPECT_EQ(test, ret, 0);
- KUNIT_EXPECT_NE(test, miscdyn.minor, miscstat.minor);
+ KUNIT_EXPECT_EQ(test, miscdyn.minor, miscstat.minor);
KUNIT_EXPECT_TRUE(test, is_valid_dynamic_minor(miscdyn.minor));
if (ret == 0)
misc_deregister(&miscdyn);
+}
- miscdev_test_can_open(test, &miscstat);
+/* Take minor(> MISC_DYNAMIC_MINOR) as invalid when register miscdevice */
+static void miscdev_test_invalid_input(struct kunit *test)
+{
+ struct miscdevice misc_test = {
+ .minor = MISC_DYNAMIC_MINOR + 1,
+ .name = "misc_test",
+ .fops = &miscdev_test_fops,
+ };
+ int ret;
- misc_deregister(&miscstat);
+ ret = misc_register(&misc_test);
+ KUNIT_EXPECT_EQ(test, ret, -EINVAL);
+ if (ret == 0)
+ misc_deregister(&misc_test);
+}
+
+/*
+ * Verify if @miscdyn_a can still be registered successfully without
+ * reinitialization even if its minor ever owned was requested by
+ * another miscdevice such as @miscdyn_b.
+ */
+static void miscdev_test_dynamic_reentry(struct kunit *test)
+{
+ struct miscdevice miscdyn_a = {
+ .name = "miscdyn_a",
+ .minor = MISC_DYNAMIC_MINOR,
+ .fops = &miscdev_test_fops,
+ };
+ struct miscdevice miscdyn_b = {
+ .name = "miscdyn_b",
+ .minor = MISC_DYNAMIC_MINOR,
+ .fops = &miscdev_test_fops,
+ };
+ int ret, minor_a;
+
+ ret = misc_register(&miscdyn_a);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+ KUNIT_EXPECT_TRUE(test, is_valid_dynamic_minor(miscdyn_a.minor));
+ minor_a = miscdyn_a.minor;
+ if (ret != 0)
+ return;
+ misc_deregister(&miscdyn_a);
+
+ ret = misc_register(&miscdyn_b);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+ KUNIT_EXPECT_EQ(test, miscdyn_b.minor, minor_a);
+ if (ret != 0)
+ return;
+
+ ret = misc_register(&miscdyn_a);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+ KUNIT_EXPECT_TRUE(test, is_valid_dynamic_minor(miscdyn_a.minor));
+ KUNIT_EXPECT_NE(test, miscdyn_a.minor, miscdyn_b.minor);
+ if (ret == 0)
+ misc_deregister(&miscdyn_a);
+
+ misc_deregister(&miscdyn_b);
}
static struct kunit_case test_cases[] = {
- KUNIT_CASE(kunit_dynamic_minor),
KUNIT_CASE(kunit_static_minor),
KUNIT_CASE(kunit_misc_dynamic_minor),
+ KUNIT_CASE(miscdev_test_invalid_input),
KUNIT_CASE_PARAM(miscdev_test_twice, miscdev_gen_params),
KUNIT_CASE_PARAM(miscdev_test_duplicate_minor, miscdev_gen_params),
KUNIT_CASE(miscdev_test_duplicate_name),
KUNIT_CASE(miscdev_test_duplicate_name_leak),
KUNIT_CASE_PARAM(miscdev_test_duplicate_error, miscdev_gen_params),
+ KUNIT_CASE(miscdev_test_dynamic_reentry),
{}
};
diff --git a/drivers/comedi/Kconfig b/drivers/comedi/Kconfig
index 93c68a40a17b..6dcc2567de6d 100644
--- a/drivers/comedi/Kconfig
+++ b/drivers/comedi/Kconfig
@@ -705,6 +705,15 @@ config COMEDI_ADL_PCI6208
To compile this driver as a module, choose M here: the module will be
called adl_pci6208.
+config COMEDI_ADL_PCI7250
+ tristate "ADLink PCI-7250 support"
+ help
+ Enable support for ADLink PCI-7250/LPCI-7250/LPCIe-7250 relay output
+ and isolated digital input boards.
+
+ To compile this driver as a module, choose M here: the module will be
+ called adl_pci7250.
+
config COMEDI_ADL_PCI7X3X
tristate "ADLink PCI-723X/743X isolated digital i/o board support"
depends on HAS_IOPORT
diff --git a/drivers/comedi/drivers/Makefile b/drivers/comedi/drivers/Makefile
index b24ac00cab73..7b99a431330d 100644
--- a/drivers/comedi/drivers/Makefile
+++ b/drivers/comedi/drivers/Makefile
@@ -73,6 +73,7 @@ obj-$(CONFIG_COMEDI_ADDI_APCI_3120) += addi_apci_3120.o
obj-$(CONFIG_COMEDI_ADDI_APCI_3501) += addi_apci_3501.o
obj-$(CONFIG_COMEDI_ADDI_APCI_3XXX) += addi_apci_3xxx.o
obj-$(CONFIG_COMEDI_ADL_PCI6208) += adl_pci6208.o
+obj-$(CONFIG_COMEDI_ADL_PCI7250) += adl_pci7250.o
obj-$(CONFIG_COMEDI_ADL_PCI7X3X) += adl_pci7x3x.o
obj-$(CONFIG_COMEDI_ADL_PCI8164) += adl_pci8164.o
obj-$(CONFIG_COMEDI_ADL_PCI9111) += adl_pci9111.o
diff --git a/drivers/comedi/drivers/adl_pci7250.c b/drivers/comedi/drivers/adl_pci7250.c
new file mode 100644
index 000000000000..78c85a402435
--- /dev/null
+++ b/drivers/comedi/drivers/adl_pci7250.c
@@ -0,0 +1,220 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * adl_pci7250.c
+ *
+ * Comedi driver for ADLink PCI-7250 series cards.
+ *
+ * Copyright (C) 2015, 2025 Ian Abbott <abbotti@mev.co.uk>
+ */
+
+/*
+ * Driver: adl_pci7250
+ * Description: Driver for the ADLINK PCI-7250 relay output & digital input card
+ * Devices: [ADLINK] PCI-7250 (adl_pci7250) LPCI-7250 LPCIe-7250
+ * Author: Ian Abbott <abbotti@mev.co.uk>
+ * Status: works
+ * Updated: Mon, 02 Jun 2025 13:54:11 +0100
+ *
+ * The driver assumes that 3 PCI-7251 modules are fitted to the PCI-7250,
+ * giving 32 channels of relay outputs and 32 channels of isolated digital
+ * inputs. That is also the case for the LPCI-7250 and older LPCIe-7250
+ * cards although they do not physically support the PCI-7251 modules.
+ * Newer LPCIe-7250 cards have a different PCI subsystem device ID, so
+ * set the number of channels to 8 for these cards.
+ *
+ * Not fitting the PCI-7251 modules shouldn't do any harm, but the extra
+ * inputs and relay outputs won't work!
+ *
+ * Configuration Options: not applicable, uses PCI auto config
+ */
+
+#include <linux/module.h>
+#include <linux/comedi/comedi_pci.h>
+
+static unsigned char adl_pci7250_read8(struct comedi_device *dev,
+ unsigned int offset)
+{
+#ifdef CONFIG_HAS_IOPORT
+ if (!dev->mmio)
+ return inb(dev->iobase + offset);
+#endif
+ return readb(dev->mmio + offset);
+}
+
+static void adl_pci7250_write8(struct comedi_device *dev, unsigned int offset,
+ unsigned char val)
+{
+#ifdef CONFIG_HAS_IOPORT
+ if (!dev->mmio) {
+ outb(val, dev->iobase + offset);
+ return;
+ }
+#endif
+ writeb(val, dev->mmio + offset);
+}
+
+static int adl_pci7250_do_insn_bits(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
+{
+ unsigned int mask = comedi_dio_update_state(s, data);
+
+ if (mask) {
+ unsigned int state = s->state;
+ unsigned int i;
+
+ for (i = 0; i * 8 < s->n_chan; i++) {
+ if ((mask & 0xffu) != 0) {
+ /* write relay data to even offset registers */
+ adl_pci7250_write8(dev, i * 2, state & 0xffu);
+ }
+ state >>= 8;
+ mask >>= 8;
+ }
+ }
+
+ data[1] = s->state;
+
+ return 2;
+}
+
+static int adl_pci7250_di_insn_bits(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
+{
+ unsigned int value = 0;
+ unsigned int i;
+
+ for (i = 0; i * 8 < s->n_chan; i++) {
+ /* read DI value from odd offset registers */
+ value |= (unsigned int)adl_pci7250_read8(dev, i * 2 + 1) <<
+ (i * 8);
+ }
+
+ data[1] = value;
+
+ return 2;
+}
+
+static int pci7250_auto_attach(struct comedi_device *dev,
+ unsigned long context_unused)
+{
+ struct pci_dev *pcidev = comedi_to_pci_dev(dev);
+ struct comedi_subdevice *s;
+ unsigned int max_chans;
+ unsigned int i;
+ int ret;
+
+ ret = comedi_pci_enable(dev);
+ if (ret)
+ return ret;
+
+ if (pci_resource_len(pcidev, 2) < 8)
+ return -ENXIO;
+
+ /*
+ * Newer LPCIe-7250 boards use MMIO. Older LPCIe-7250, LPCI-7250, and
+ * PCI-7250 boards use Port I/O.
+ */
+ if (pci_resource_flags(pcidev, 2) & IORESOURCE_MEM) {
+ dev->mmio = pci_ioremap_bar(pcidev, 2);
+ if (!dev->mmio)
+ return -ENOMEM;
+ } else if (IS_ENABLED(CONFIG_HAS_IOPORT)) {
+ dev->iobase = pci_resource_start(pcidev, 2);
+ } else {
+ dev_err(dev->class_dev,
+ "error! need I/O port support\n");
+ return -ENXIO;
+ }
+
+ if (pcidev->subsystem_device == 0x7000) {
+ /*
+ * This is a newer LPCIe-7250 variant and cannot possibly
+ * have PCI-7251 modules fitted, so limit the number of
+ * channels to 8.
+ */
+ max_chans = 8;
+ } else {
+ /*
+ * It is unknown whether the board is a PCI-7250, an LPCI-7250,
+ * or an older LPCIe-7250 variant, so treat it as a PCI-7250
+ * and assume it can have PCI-7251 modules fitted to increase
+ * the number of channels to a maximum of 32.
+ */
+ max_chans = 32;
+ }
+
+ ret = comedi_alloc_subdevices(dev, 2);
+ if (ret)
+ return ret;
+
+ /* Relay digital output. */
+ s = &dev->subdevices[0];
+ s->type = COMEDI_SUBD_DO;
+ s->subdev_flags = SDF_WRITABLE;
+ s->n_chan = max_chans;
+ s->maxdata = 1;
+ s->range_table = &range_digital;
+ s->insn_bits = adl_pci7250_do_insn_bits;
+ /* Read initial state of relays from the even offset registers. */
+ s->state = 0;
+ for (i = 0; i * 8 < max_chans; i++) {
+ s->state |= (unsigned int)adl_pci7250_read8(dev, i * 2) <<
+ (i * 8);
+ }
+
+ /* Isolated digital input. */
+ s = &dev->subdevices[1];
+ s->type = COMEDI_SUBD_DI;
+ s->subdev_flags = SDF_READABLE;
+ s->n_chan = max_chans;
+ s->maxdata = 1;
+ s->range_table = &range_digital;
+ s->insn_bits = adl_pci7250_di_insn_bits;
+
+ return 0;
+}
+
+static struct comedi_driver adl_pci7250_driver = {
+ .driver_name = "adl_pci7250",
+ .module = THIS_MODULE,
+ .auto_attach = pci7250_auto_attach,
+ .detach = comedi_pci_detach,
+};
+
+static int adl_pci7250_pci_probe(struct pci_dev *dev,
+ const struct pci_device_id *id)
+{
+ return comedi_pci_auto_config(dev, &adl_pci7250_driver,
+ id->driver_data);
+}
+
+static const struct pci_device_id adl_pci7250_pci_table[] = {
+#ifdef CONFIG_HAS_IOPORT
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050,
+ 0x9999, 0x7250) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADLINK, 0x7250,
+ 0x9999, 0x7250) },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADLINK, 0x7250,
+ PCI_VENDOR_ID_ADLINK, 0x7250) },
+#endif
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_ADLINK, 0x7250,
+ PCI_VENDOR_ID_ADLINK, 0x7000) }, /* newer LPCIe-7250 */
+ { 0 }
+};
+MODULE_DEVICE_TABLE(pci, adl_pci7250_pci_table);
+
+static struct pci_driver adl_pci7250_pci_driver = {
+ .name = "adl_pci7250",
+ .id_table = adl_pci7250_pci_table,
+ .probe = adl_pci7250_pci_probe,
+ .remove = comedi_pci_auto_unconfig,
+};
+module_comedi_pci_driver(adl_pci7250_driver, adl_pci7250_pci_driver);
+
+MODULE_AUTHOR("Comedi https://www.comedi.org");
+MODULE_DESCRIPTION("Comedi driver for ADLink PCI-7250 series boards");
+MODULE_LICENSE("GPL");
diff --git a/drivers/counter/ti-ecap-capture.c b/drivers/counter/ti-ecap-capture.c
index 3faaf7f60539..3586a7ab9887 100644
--- a/drivers/counter/ti-ecap-capture.c
+++ b/drivers/counter/ti-ecap-capture.c
@@ -465,11 +465,6 @@ static irqreturn_t ecap_cnt_isr(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static void ecap_cnt_pm_disable(void *dev)
-{
- pm_runtime_disable(dev);
-}
-
static int ecap_cnt_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -523,12 +518,9 @@ static int ecap_cnt_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, counter_dev);
- pm_runtime_enable(dev);
-
- /* Register a cleanup callback to care for disabling PM */
- ret = devm_add_action_or_reset(dev, ecap_cnt_pm_disable, dev);
+ ret = devm_pm_runtime_enable(dev);
if (ret)
- return dev_err_probe(dev, ret, "failed to add pm disable action\n");
+ return ret;
ret = devm_counter_add(dev, counter_dev);
if (ret)
diff --git a/drivers/extcon/Kconfig b/drivers/extcon/Kconfig
index a6f6d467aacf..aec46bf03302 100644
--- a/drivers/extcon/Kconfig
+++ b/drivers/extcon/Kconfig
@@ -134,6 +134,19 @@ config EXTCON_MAX8997
Maxim MAX8997 PMIC. The MAX8997 MUIC is a USB port accessory
detector and switch.
+config EXTCON_MAX14526
+ tristate "Maxim MAX14526 EXTCON Support"
+ depends on I2C
+ select IRQ_DOMAIN
+ select REGMAP_I2C
+ help
+ If you say yes here you get support for the Maxim MAX14526
+ MUIC device. The MAX14526 MUIC is a USB port accessory
+ detector and switch. The MAX14526 is designed to simplify
+ interface requirements on portable devices by multiplexing
+ common inputs (USB, UART, Microphone, Stereo Audio and
+ Composite Video) on a single micro/mini USB connector.
+
config EXTCON_PALMAS
tristate "Palmas USB EXTCON support"
depends on MFD_PALMAS
diff --git a/drivers/extcon/Makefile b/drivers/extcon/Makefile
index 0d6d23faf748..6482f2bfd661 100644
--- a/drivers/extcon/Makefile
+++ b/drivers/extcon/Makefile
@@ -18,6 +18,7 @@ obj-$(CONFIG_EXTCON_MAX3355) += extcon-max3355.o
obj-$(CONFIG_EXTCON_MAX77693) += extcon-max77693.o
obj-$(CONFIG_EXTCON_MAX77843) += extcon-max77843.o
obj-$(CONFIG_EXTCON_MAX8997) += extcon-max8997.o
+obj-$(CONFIG_EXTCON_MAX14526) += extcon-max14526.o
obj-$(CONFIG_EXTCON_PALMAS) += extcon-palmas.o
obj-$(CONFIG_EXTCON_PTN5150) += extcon-ptn5150.o
obj-$(CONFIG_EXTCON_QCOM_SPMI_MISC) += extcon-qcom-spmi-misc.o
diff --git a/drivers/extcon/extcon-adc-jack.c b/drivers/extcon/extcon-adc-jack.c
index 46c40d85c2ac..7e3c9f38297b 100644
--- a/drivers/extcon/extcon-adc-jack.c
+++ b/drivers/extcon/extcon-adc-jack.c
@@ -164,6 +164,8 @@ static void adc_jack_remove(struct platform_device *pdev)
{
struct adc_jack_data *data = platform_get_drvdata(pdev);
+ if (data->wakeup_source)
+ device_init_wakeup(&pdev->dev, false);
free_irq(data->irq, data);
cancel_work_sync(&data->handler.work);
}
diff --git a/drivers/extcon/extcon-axp288.c b/drivers/extcon/extcon-axp288.c
index d3bcbe839c09..19856dddade6 100644
--- a/drivers/extcon/extcon-axp288.c
+++ b/drivers/extcon/extcon-axp288.c
@@ -470,7 +470,7 @@ static int axp288_extcon_probe(struct platform_device *pdev)
if (ret < 0)
return ret;
- device_init_wakeup(dev, true);
+ devm_device_init_wakeup(dev);
platform_set_drvdata(pdev, info);
return 0;
diff --git a/drivers/extcon/extcon-fsa9480.c b/drivers/extcon/extcon-fsa9480.c
index b11b43171063..a031eb0914a0 100644
--- a/drivers/extcon/extcon-fsa9480.c
+++ b/drivers/extcon/extcon-fsa9480.c
@@ -317,7 +317,7 @@ static int fsa9480_probe(struct i2c_client *client)
return ret;
}
- device_init_wakeup(info->dev, true);
+ devm_device_init_wakeup(info->dev);
fsa9480_detect_dev(info);
return 0;
diff --git a/drivers/extcon/extcon-max14526.c b/drivers/extcon/extcon-max14526.c
new file mode 100644
index 000000000000..3750a5c20612
--- /dev/null
+++ b/drivers/extcon/extcon-max14526.c
@@ -0,0 +1,302 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+
+#include <linux/device.h>
+#include <linux/devm-helpers.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/extcon-provider.h>
+#include <linux/i2c.h>
+#include <linux/mod_devicetable.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/pm.h>
+#include <linux/regmap.h>
+
+/* I2C addresses of MUIC internal registers */
+#define MAX14526_DEVICE_ID 0x00
+#define MAX14526_ID 0x02
+
+/* CONTROL_1 register masks */
+#define MAX14526_CONTROL_1 0x01
+#define ID_2P2 BIT(6)
+#define ID_620 BIT(5)
+#define ID_200 BIT(4)
+#define VLDO BIT(3)
+#define SEMREN BIT(2)
+#define ADC_EN BIT(1)
+#define CP_EN BIT(0)
+
+/* CONTROL_2 register masks */
+#define MAX14526_CONTROL_2 0x02
+#define INTPOL BIT(7)
+#define INT_EN BIT(6)
+#define MIC_LP BIT(5)
+#define CP_AUD BIT(4)
+#define CHG_TYPE BIT(1)
+#define USB_DET_DIS BIT(0)
+
+/* SW_CONTROL register masks */
+#define MAX14526_SW_CONTROL 0x03
+#define SW_DATA 0x00
+#define SW_UART 0x01
+#define SW_AUDIO 0x02
+#define SW_OPEN 0x07
+
+/* INT_STATUS register masks */
+#define MAX14526_INT_STAT 0x04
+#define CHGDET BIT(7)
+#define MR_COMP BIT(6)
+#define SENDEND BIT(5)
+#define V_VBUS BIT(4)
+
+/* STATUS register masks */
+#define MAX14526_STATUS 0x05
+#define CPORT BIT(7)
+#define CHPORT BIT(6)
+#define C1COMP BIT(0)
+
+enum max14526_idno_resistance {
+ MAX14526_GND,
+ MAX14526_24KOHM,
+ MAX14526_56KOHM,
+ MAX14526_100KOHM,
+ MAX14526_130KOHM,
+ MAX14526_180KOHM,
+ MAX14526_240KOHM,
+ MAX14526_330KOHM,
+ MAX14526_430KOHM,
+ MAX14526_620KOHM,
+ MAX14526_910KOHM,
+ MAX14526_OPEN
+};
+
+enum max14526_field_idx {
+ VENDOR_ID, CHIP_REV, /* DEVID */
+ DM, DP, /* SW_CONTROL */
+ MAX14526_N_REGMAP_FIELDS
+};
+
+static const struct reg_field max14526_reg_field[MAX14526_N_REGMAP_FIELDS] = {
+ [VENDOR_ID] = REG_FIELD(MAX14526_DEVICE_ID, 4, 7),
+ [CHIP_REV] = REG_FIELD(MAX14526_DEVICE_ID, 0, 3),
+ [DM] = REG_FIELD(MAX14526_SW_CONTROL, 0, 2),
+ [DP] = REG_FIELD(MAX14526_SW_CONTROL, 3, 5),
+};
+
+struct max14526_data {
+ struct i2c_client *client;
+ struct extcon_dev *edev;
+
+ struct regmap *regmap;
+ struct regmap_field *rfield[MAX14526_N_REGMAP_FIELDS];
+
+ int last_state;
+ int cable;
+};
+
+enum max14526_muic_modes {
+ MAX14526_OTG = MAX14526_GND, /* no power */
+ MAX14526_MHL = MAX14526_56KOHM, /* no power */
+ MAX14526_OTG_Y = MAX14526_GND | V_VBUS,
+ MAX14526_MHL_CHG = MAX14526_GND | V_VBUS | CHGDET,
+ MAX14526_NONE = MAX14526_OPEN,
+ MAX14526_USB = MAX14526_OPEN | V_VBUS,
+ MAX14526_CHG = MAX14526_OPEN | V_VBUS | CHGDET,
+};
+
+static const unsigned int max14526_extcon_cable[] = {
+ EXTCON_USB,
+ EXTCON_USB_HOST,
+ EXTCON_CHG_USB_FAST,
+ EXTCON_DISP_MHL,
+ EXTCON_NONE,
+};
+
+static int max14526_ap_usb_mode(struct max14526_data *priv)
+{
+ struct device *dev = &priv->client->dev;
+ int ret;
+
+ /* Enable USB Path */
+ ret = regmap_field_write(priv->rfield[DM], SW_DATA);
+ if (ret)
+ return ret;
+
+ ret = regmap_field_write(priv->rfield[DP], SW_DATA);
+ if (ret)
+ return ret;
+
+ /* Enable 200K, Charger Pump and ADC */
+ ret = regmap_write(priv->regmap, MAX14526_CONTROL_1,
+ ID_200 | ADC_EN | CP_EN);
+ if (ret)
+ return ret;
+
+ dev_dbg(dev, "AP USB mode set\n");
+
+ return 0;
+}
+
+static irqreturn_t max14526_interrupt(int irq, void *dev_id)
+{
+ struct max14526_data *priv = dev_id;
+ struct device *dev = &priv->client->dev;
+ int state, ret;
+
+ /*
+ * Upon an MUIC IRQ (MUIC_INT_N falls), wait at least 70ms
+ * before reading INT_STAT and STATUS. After the reads,
+ * MUIC_INT_N returns to high (but the INT_STAT and STATUS
+ * contents will be held).
+ */
+ msleep(100);
+
+ ret = regmap_read(priv->regmap, MAX14526_INT_STAT, &state);
+ if (ret)
+ dev_err(dev, "failed to read MUIC state %d\n", ret);
+
+ if (state == priv->last_state)
+ return IRQ_HANDLED;
+
+ /* Detach previous device */
+ extcon_set_state_sync(priv->edev, priv->cable, false);
+
+ switch (state) {
+ case MAX14526_USB:
+ priv->cable = EXTCON_USB;
+ break;
+
+ case MAX14526_CHG:
+ priv->cable = EXTCON_CHG_USB_FAST;
+ break;
+
+ case MAX14526_OTG:
+ case MAX14526_OTG_Y:
+ priv->cable = EXTCON_USB_HOST;
+ break;
+
+ case MAX14526_MHL:
+ case MAX14526_MHL_CHG:
+ priv->cable = EXTCON_DISP_MHL;
+ break;
+
+ case MAX14526_NONE:
+ default:
+ priv->cable = EXTCON_NONE;
+ break;
+ }
+
+ extcon_set_state_sync(priv->edev, priv->cable, true);
+
+ priv->last_state = state;
+
+ return IRQ_HANDLED;
+}
+
+static const struct regmap_config max14526_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .max_register = MAX14526_STATUS,
+};
+
+static int max14526_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct max14526_data *priv;
+ int ret, dev_id, rev, i;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->client = client;
+ i2c_set_clientdata(client, priv);
+
+ priv->regmap = devm_regmap_init_i2c(client, &max14526_regmap_config);
+ if (IS_ERR(priv->regmap))
+ return dev_err_probe(dev, PTR_ERR(priv->regmap), "cannot allocate regmap\n");
+
+ for (i = 0; i < MAX14526_N_REGMAP_FIELDS; i++) {
+ priv->rfield[i] = devm_regmap_field_alloc(dev, priv->regmap,
+ max14526_reg_field[i]);
+ if (IS_ERR(priv->rfield[i]))
+ return dev_err_probe(dev, PTR_ERR(priv->rfield[i]),
+ "cannot allocate regmap field\n");
+ }
+
+ /* Detect if MUIC version is supported */
+ ret = regmap_field_read(priv->rfield[VENDOR_ID], &dev_id);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to read MUIC ID\n");
+
+ regmap_field_read(priv->rfield[CHIP_REV], &rev);
+
+ if (dev_id == MAX14526_ID)
+ dev_info(dev, "detected MAX14526 MUIC with id 0x%x, rev 0x%x\n", dev_id, rev);
+ else
+ dev_err_probe(dev, -EINVAL, "MUIC vendor id 0x%X is not recognized\n", dev_id);
+
+ priv->edev = devm_extcon_dev_allocate(dev, max14526_extcon_cable);
+ if (IS_ERR(priv->edev))
+ return dev_err_probe(dev, (IS_ERR(priv->edev)),
+ "failed to allocate extcon device\n");
+
+ ret = devm_extcon_dev_register(dev, priv->edev);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "failed to register extcon device\n");
+
+ ret = max14526_ap_usb_mode(priv);
+ if (ret < 0)
+ return dev_err_probe(dev, ret, "failed to set AP USB mode\n");
+
+ regmap_write_bits(priv->regmap, MAX14526_CONTROL_2, INT_EN, INT_EN);
+ regmap_write_bits(priv->regmap, MAX14526_CONTROL_2, USB_DET_DIS, (u32)~USB_DET_DIS);
+
+ ret = devm_request_threaded_irq(dev, client->irq, NULL, &max14526_interrupt,
+ IRQF_ONESHOT | IRQF_SHARED, client->name, priv);
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to register IRQ\n");
+
+ irq_wake_thread(client->irq, priv);
+
+ return 0;
+}
+
+static int max14526_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct max14526_data *priv = i2c_get_clientdata(client);
+
+ irq_wake_thread(client->irq, priv);
+
+ return 0;
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(max14526_pm_ops, NULL, max14526_resume);
+
+static const struct of_device_id max14526_match[] = {
+ { .compatible = "maxim,max14526" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, max14526_match);
+
+static const struct i2c_device_id max14526_id[] = {
+ { "max14526" },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, max14526_id);
+
+static struct i2c_driver max14526_driver = {
+ .driver = {
+ .name = "max14526",
+ .of_match_table = max14526_match,
+ .pm = &max14526_pm_ops,
+ },
+ .probe = max14526_probe,
+ .id_table = max14526_id,
+};
+module_i2c_driver(max14526_driver);
+
+MODULE_AUTHOR("Svyatoslav Ryhel <clamor95@gmail.com>");
+MODULE_DESCRIPTION("MAX14526 extcon driver to support MUIC");
+MODULE_LICENSE("GPL");
diff --git a/drivers/extcon/extcon-qcom-spmi-misc.c b/drivers/extcon/extcon-qcom-spmi-misc.c
index 53de581a393a..afaba5685c3d 100644
--- a/drivers/extcon/extcon-qcom-spmi-misc.c
+++ b/drivers/extcon/extcon-qcom-spmi-misc.c
@@ -155,7 +155,7 @@ static int qcom_usb_extcon_probe(struct platform_device *pdev)
}
platform_set_drvdata(pdev, info);
- device_init_wakeup(dev, 1);
+ devm_device_init_wakeup(dev);
/* Perform initial detection */
qcom_usb_extcon_detect_cable(&info->wq_detcable.work);
diff --git a/drivers/greybus/svc.c b/drivers/greybus/svc.c
index 4256467fcd35..35ea7147dca6 100644
--- a/drivers/greybus/svc.c
+++ b/drivers/greybus/svc.c
@@ -10,6 +10,7 @@
#include <linux/kstrtox.h>
#include <linux/workqueue.h>
#include <linux/greybus.h>
+#include <linux/string_choices.h>
#define SVC_INTF_EJECT_TIMEOUT 9000
#define SVC_INTF_ACTIVATE_TIMEOUT 6000
@@ -73,7 +74,7 @@ static ssize_t watchdog_show(struct device *dev, struct device_attribute *attr,
struct gb_svc *svc = to_gb_svc(dev);
return sprintf(buf, "%s\n",
- gb_svc_watchdog_enabled(svc) ? "enabled" : "disabled");
+ str_enabled_disabled(gb_svc_watchdog_enabled(svc)));
}
static ssize_t watchdog_store(struct device *dev,
diff --git a/drivers/iio/adc/pac1934.c b/drivers/iio/adc/pac1934.c
index 575766a09782..48df16509260 100644
--- a/drivers/iio/adc/pac1934.c
+++ b/drivers/iio/adc/pac1934.c
@@ -88,6 +88,7 @@
#define PAC1934_VPOWER_3_ADDR 0x19
#define PAC1934_VPOWER_4_ADDR 0x1A
#define PAC1934_REFRESH_V_REG_ADDR 0x1F
+#define PAC1934_SLOW_REG_ADDR 0x20
#define PAC1934_CTRL_STAT_REGS_ADDR 0x1C
#define PAC1934_PID_REG_ADDR 0xFD
#define PAC1934_MID_REG_ADDR 0xFE
@@ -1265,8 +1266,23 @@ static int pac1934_chip_configure(struct pac1934_chip_info *info)
/* no SLOW triggered REFRESH, clear POR */
regs[PAC1934_SLOW_REG_OFF] = 0;
- ret = i2c_smbus_write_block_data(client, PAC1934_CTRL_STAT_REGS_ADDR,
- ARRAY_SIZE(regs), (u8 *)regs);
+ /*
+ * Write the three bytes sequentially, as the device does not support
+ * block write.
+ */
+ ret = i2c_smbus_write_byte_data(client, PAC1934_CTRL_STAT_REGS_ADDR,
+ regs[PAC1934_CHANNEL_DIS_REG_OFF]);
+ if (ret)
+ return ret;
+
+ ret = i2c_smbus_write_byte_data(client,
+ PAC1934_CTRL_STAT_REGS_ADDR + PAC1934_NEG_PWR_REG_OFF,
+ regs[PAC1934_NEG_PWR_REG_OFF]);
+ if (ret)
+ return ret;
+
+ ret = i2c_smbus_write_byte_data(client, PAC1934_SLOW_REG_ADDR,
+ regs[PAC1934_SLOW_REG_OFF]);
if (ret)
return ret;
diff --git a/drivers/iio/adc/xilinx-ams.c b/drivers/iio/adc/xilinx-ams.c
index 76dd0343f5f7..124470c92529 100644
--- a/drivers/iio/adc/xilinx-ams.c
+++ b/drivers/iio/adc/xilinx-ams.c
@@ -118,7 +118,7 @@
#define AMS_ALARM_THRESHOLD_OFF_10 0x10
#define AMS_ALARM_THRESHOLD_OFF_20 0x20
-#define AMS_ALARM_THR_DIRECT_MASK BIT(1)
+#define AMS_ALARM_THR_DIRECT_MASK BIT(0)
#define AMS_ALARM_THR_MIN 0x0000
#define AMS_ALARM_THR_MAX (BIT(16) - 1)
@@ -389,6 +389,29 @@ static void ams_update_pl_alarm(struct ams *ams, unsigned long alarm_mask)
ams_pl_update_reg(ams, AMS_REG_CONFIG3, AMS_REGCFG3_ALARM_MASK, cfg);
}
+static void ams_unmask(struct ams *ams)
+{
+ unsigned int status, unmask;
+
+ status = readl(ams->base + AMS_ISR_0);
+
+ /* Clear those bits which are not active anymore */
+ unmask = (ams->current_masked_alarm ^ status) & ams->current_masked_alarm;
+
+ /* Clear status of disabled alarm */
+ unmask |= ams->intr_mask;
+
+ ams->current_masked_alarm &= status;
+
+ /* Also clear those which are masked out anyway */
+ ams->current_masked_alarm &= ~ams->intr_mask;
+
+ /* Clear the interrupts before we unmask them */
+ writel(unmask, ams->base + AMS_ISR_0);
+
+ ams_update_intrmask(ams, ~AMS_ALARM_MASK, ~AMS_ALARM_MASK);
+}
+
static void ams_update_alarm(struct ams *ams, unsigned long alarm_mask)
{
unsigned long flags;
@@ -401,6 +424,7 @@ static void ams_update_alarm(struct ams *ams, unsigned long alarm_mask)
spin_lock_irqsave(&ams->intr_lock, flags);
ams_update_intrmask(ams, AMS_ISR0_ALARM_MASK, ~alarm_mask);
+ ams_unmask(ams);
spin_unlock_irqrestore(&ams->intr_lock, flags);
}
@@ -1035,28 +1059,9 @@ static void ams_handle_events(struct iio_dev *indio_dev, unsigned long events)
static void ams_unmask_worker(struct work_struct *work)
{
struct ams *ams = container_of(work, struct ams, ams_unmask_work.work);
- unsigned int status, unmask;
spin_lock_irq(&ams->intr_lock);
-
- status = readl(ams->base + AMS_ISR_0);
-
- /* Clear those bits which are not active anymore */
- unmask = (ams->current_masked_alarm ^ status) & ams->current_masked_alarm;
-
- /* Clear status of disabled alarm */
- unmask |= ams->intr_mask;
-
- ams->current_masked_alarm &= status;
-
- /* Also clear those which are masked out anyway */
- ams->current_masked_alarm &= ~ams->intr_mask;
-
- /* Clear the interrupts before we unmask them */
- writel(unmask, ams->base + AMS_ISR_0);
-
- ams_update_intrmask(ams, ~AMS_ALARM_MASK, ~AMS_ALARM_MASK);
-
+ ams_unmask(ams);
spin_unlock_irq(&ams->intr_lock);
/* If still pending some alarm re-trigger the timer */
diff --git a/drivers/iio/dac/ad5360.c b/drivers/iio/dac/ad5360.c
index a57b0a093112..8271849b1c83 100644
--- a/drivers/iio/dac/ad5360.c
+++ b/drivers/iio/dac/ad5360.c
@@ -262,7 +262,7 @@ static int ad5360_update_ctrl(struct iio_dev *indio_dev, unsigned int set,
unsigned int clr)
{
struct ad5360_state *st = iio_priv(indio_dev);
- unsigned int ret;
+ int ret;
mutex_lock(&st->lock);
diff --git a/drivers/iio/dac/ad5421.c b/drivers/iio/dac/ad5421.c
index 1462ee640b16..d9d7031c4432 100644
--- a/drivers/iio/dac/ad5421.c
+++ b/drivers/iio/dac/ad5421.c
@@ -186,7 +186,7 @@ static int ad5421_update_ctrl(struct iio_dev *indio_dev, unsigned int set,
unsigned int clr)
{
struct ad5421_state *st = iio_priv(indio_dev);
- unsigned int ret;
+ int ret;
mutex_lock(&st->lock);
diff --git a/drivers/iio/frequency/adf4350.c b/drivers/iio/frequency/adf4350.c
index 6665409a9a87..ed1741165f55 100644
--- a/drivers/iio/frequency/adf4350.c
+++ b/drivers/iio/frequency/adf4350.c
@@ -149,6 +149,19 @@ static int adf4350_set_freq(struct adf4350_state *st, unsigned long long freq)
if (freq > ADF4350_MAX_OUT_FREQ || freq < st->min_out_freq)
return -EINVAL;
+ st->r4_rf_div_sel = 0;
+
+ /*
+ * !\TODO: The below computation is making sure we get a power of 2
+ * shift (st->r4_rf_div_sel) so that freq becomes higher or equal to
+ * ADF4350_MIN_VCO_FREQ. This might be simplified with fls()/fls_long()
+ * and friends.
+ */
+ while (freq < ADF4350_MIN_VCO_FREQ) {
+ freq <<= 1;
+ st->r4_rf_div_sel++;
+ }
+
if (freq > ADF4350_MAX_FREQ_45_PRESC) {
prescaler = ADF4350_REG1_PRESCALER;
mdiv = 75;
@@ -157,13 +170,6 @@ static int adf4350_set_freq(struct adf4350_state *st, unsigned long long freq)
mdiv = 23;
}
- st->r4_rf_div_sel = 0;
-
- while (freq < ADF4350_MIN_VCO_FREQ) {
- freq <<= 1;
- st->r4_rf_div_sel++;
- }
-
/*
* Allow a predefined reference division factor
* if not set, compute our own
diff --git a/drivers/interconnect/core.c b/drivers/interconnect/core.c
index 3ebf37ddfc18..6cc979b26151 100644
--- a/drivers/interconnect/core.c
+++ b/drivers/interconnect/core.c
@@ -385,7 +385,7 @@ struct icc_node_data *of_icc_get_from_provider(const struct of_phandle_args *spe
mutex_lock(&icc_lock);
list_for_each_entry(provider, &icc_providers, provider_list) {
- if (provider->dev->of_node == spec->np) {
+ if (device_match_of_node(provider->dev, spec->np)) {
if (provider->xlate_extended) {
data = provider->xlate_extended(spec, provider->data);
if (!IS_ERR(data)) {
diff --git a/drivers/interconnect/qcom/Kconfig b/drivers/interconnect/qcom/Kconfig
index 31dc4781abef..5b4bb9f1382b 100644
--- a/drivers/interconnect/qcom/Kconfig
+++ b/drivers/interconnect/qcom/Kconfig
@@ -8,6 +8,15 @@ config INTERCONNECT_QCOM
config INTERCONNECT_QCOM_BCM_VOTER
tristate
+config INTERCONNECT_QCOM_GLYMUR
+ tristate "Qualcomm GLYMUR interconnect driver"
+ depends on INTERCONNECT_QCOM_RPMH_POSSIBLE
+ select INTERCONNECT_QCOM_RPMH
+ select INTERCONNECT_QCOM_BCM_VOTER
+ help
+ This is a driver for the Qualcomm Network-on-Chip on glymur-based
+ platforms.
+
config INTERCONNECT_QCOM_MSM8909
tristate "Qualcomm MSM8909 interconnect driver"
depends on INTERCONNECT_QCOM
diff --git a/drivers/interconnect/qcom/Makefile b/drivers/interconnect/qcom/Makefile
index f16ac242eba5..cf8cba73ee3e 100644
--- a/drivers/interconnect/qcom/Makefile
+++ b/drivers/interconnect/qcom/Makefile
@@ -4,6 +4,7 @@ obj-$(CONFIG_INTERCONNECT_QCOM) += interconnect_qcom.o
interconnect_qcom-y := icc-common.o
icc-bcm-voter-objs := bcm-voter.o
+qnoc-glymur-objs := glymur.o
qnoc-milos-objs := milos.o
qnoc-msm8909-objs := msm8909.o
qnoc-msm8916-objs := msm8916.o
@@ -46,6 +47,7 @@ qnoc-x1e80100-objs := x1e80100.o
icc-smd-rpm-objs := smd-rpm.o icc-rpm.o icc-rpm-clocks.o
obj-$(CONFIG_INTERCONNECT_QCOM_BCM_VOTER) += icc-bcm-voter.o
+obj-$(CONFIG_INTERCONNECT_QCOM_GLYMUR) += qnoc-glymur.o
obj-$(CONFIG_INTERCONNECT_QCOM_MILOS) += qnoc-milos.o
obj-$(CONFIG_INTERCONNECT_QCOM_MSM8909) += qnoc-msm8909.o
obj-$(CONFIG_INTERCONNECT_QCOM_MSM8916) += qnoc-msm8916.o
diff --git a/drivers/interconnect/qcom/glymur.c b/drivers/interconnect/qcom/glymur.c
new file mode 100644
index 000000000000..cf20b5752dbb
--- /dev/null
+++ b/drivers/interconnect/qcom/glymur.c
@@ -0,0 +1,2543 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2025, Qualcomm Innovation Center, Inc. All rights reserved.
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/interconnect.h>
+#include <linux/interconnect-provider.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <dt-bindings/interconnect/qcom,glymur-rpmh.h>
+
+#include "bcm-voter.h"
+#include "icc-rpmh.h"
+
+static struct qcom_icc_node qup0_core_slave = {
+ .name = "qup0_core_slave",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qup1_core_slave = {
+ .name = "qup1_core_slave",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qup2_core_slave = {
+ .name = "qup2_core_slave",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_ahb2phy0 = {
+ .name = "qhs_ahb2phy0",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_ahb2phy1 = {
+ .name = "qhs_ahb2phy1",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_ahb2phy2 = {
+ .name = "qhs_ahb2phy2",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_ahb2phy3 = {
+ .name = "qhs_ahb2phy3",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_av1_enc_cfg = {
+ .name = "qhs_av1_enc_cfg",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_camera_cfg = {
+ .name = "qhs_camera_cfg",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_clk_ctl = {
+ .name = "qhs_clk_ctl",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_crypto0_cfg = {
+ .name = "qhs_crypto0_cfg",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_display_cfg = {
+ .name = "qhs_display_cfg",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_gpuss_cfg = {
+ .name = "qhs_gpuss_cfg",
+ .channels = 1,
+ .buswidth = 8,
+};
+
+static struct qcom_icc_node qhs_imem_cfg = {
+ .name = "qhs_imem_cfg",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_pcie0_cfg = {
+ .name = "qhs_pcie0_cfg",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_pcie1_cfg = {
+ .name = "qhs_pcie1_cfg",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_pcie2_cfg = {
+ .name = "qhs_pcie2_cfg",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_pcie3a_cfg = {
+ .name = "qhs_pcie3a_cfg",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_pcie3b_cfg = {
+ .name = "qhs_pcie3b_cfg",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_pcie4_cfg = {
+ .name = "qhs_pcie4_cfg",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_pcie5_cfg = {
+ .name = "qhs_pcie5_cfg",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_pcie6_cfg = {
+ .name = "qhs_pcie6_cfg",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_pcie_rscc = {
+ .name = "qhs_pcie_rscc",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_pdm = {
+ .name = "qhs_pdm",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_prng = {
+ .name = "qhs_prng",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_qdss_cfg = {
+ .name = "qhs_qdss_cfg",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_qspi = {
+ .name = "qhs_qspi",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_qup0 = {
+ .name = "qhs_qup0",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_qup1 = {
+ .name = "qhs_qup1",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_qup2 = {
+ .name = "qhs_qup2",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_sdc2 = {
+ .name = "qhs_sdc2",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_sdc4 = {
+ .name = "qhs_sdc4",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_smmuv3_cfg = {
+ .name = "qhs_smmuv3_cfg",
+ .channels = 1,
+ .buswidth = 8,
+};
+
+static struct qcom_icc_node qhs_tcsr = {
+ .name = "qhs_tcsr",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_tlmm = {
+ .name = "qhs_tlmm",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_ufs_mem_cfg = {
+ .name = "qhs_ufs_mem_cfg",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_usb2_0_cfg = {
+ .name = "qhs_usb2_0_cfg",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_usb3_0_cfg = {
+ .name = "qhs_usb3_0_cfg",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_usb3_1_cfg = {
+ .name = "qhs_usb3_1_cfg",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_usb3_2_cfg = {
+ .name = "qhs_usb3_2_cfg",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_usb3_mp_cfg = {
+ .name = "qhs_usb3_mp_cfg",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_usb4_0_cfg = {
+ .name = "qhs_usb4_0_cfg",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_usb4_1_cfg = {
+ .name = "qhs_usb4_1_cfg",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_usb4_2_cfg = {
+ .name = "qhs_usb4_2_cfg",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_venus_cfg = {
+ .name = "qhs_venus_cfg",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qss_lpass_qtb_cfg = {
+ .name = "qss_lpass_qtb_cfg",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qss_nsp_qtb_cfg = {
+ .name = "qss_nsp_qtb_cfg",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node xs_qdss_stm = {
+ .name = "xs_qdss_stm",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node xs_sys_tcu_cfg = {
+ .name = "xs_sys_tcu_cfg",
+ .channels = 1,
+ .buswidth = 8,
+};
+
+static struct qcom_icc_node qhs_aoss = {
+ .name = "qhs_aoss",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_ipc_router = {
+ .name = "qhs_ipc_router",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_soccp = {
+ .name = "qhs_soccp",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_tme_cfg = {
+ .name = "qhs_tme_cfg",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qns_apss = {
+ .name = "qns_apss",
+ .channels = 1,
+ .buswidth = 8,
+};
+
+static struct qcom_icc_node qxs_boot_imem = {
+ .name = "qxs_boot_imem",
+ .channels = 1,
+ .buswidth = 16,
+};
+
+static struct qcom_icc_node qxs_imem = {
+ .name = "qxs_imem",
+ .channels = 1,
+ .buswidth = 8,
+};
+
+static struct qcom_icc_node ebi = {
+ .name = "ebi",
+ .channels = 12,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node srvc_mnoc = {
+ .name = "srvc_mnoc",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node srvc_nsinoc = {
+ .name = "srvc_nsinoc",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node srvc_pcie_east_aggre_noc = {
+ .name = "srvc_pcie_east_aggre_noc",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_hscnoc_pcie_east_ms_mpu_cfg = {
+ .name = "qhs_hscnoc_pcie_east_ms_mpu_cfg",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node srvc_pcie_east = {
+ .name = "srvc_pcie_east",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node xs_pcie_0 = {
+ .name = "xs_pcie_0",
+ .channels = 1,
+ .buswidth = 16,
+};
+
+static struct qcom_icc_node xs_pcie_1 = {
+ .name = "xs_pcie_1",
+ .channels = 1,
+ .buswidth = 32,
+};
+
+static struct qcom_icc_node xs_pcie_5 = {
+ .name = "xs_pcie_5",
+ .channels = 1,
+ .buswidth = 32,
+};
+
+static struct qcom_icc_node srvc_pcie_west_aggre_noc = {
+ .name = "srvc_pcie_west_aggre_noc",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node qhs_hscnoc_pcie_west_ms_mpu_cfg = {
+ .name = "qhs_hscnoc_pcie_west_ms_mpu_cfg",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node srvc_pcie_west = {
+ .name = "srvc_pcie_west",
+ .channels = 1,
+ .buswidth = 4,
+};
+
+static struct qcom_icc_node xs_pcie_2 = {
+ .name = "xs_pcie_2",
+ .channels = 1,
+ .buswidth = 16,
+};
+
+static struct qcom_icc_node xs_pcie_3a = {
+ .name = "xs_pcie_3a",
+ .channels = 1,
+ .buswidth = 64,
+};
+
+static struct qcom_icc_node xs_pcie_3b = {
+ .name = "xs_pcie_3b",
+ .channels = 1,
+ .buswidth = 32,
+};
+
+static struct qcom_icc_node xs_pcie_4 = {
+ .name = "xs_pcie_4",
+ .channels = 1,
+ .buswidth = 16,
+};
+
+static struct qcom_icc_node xs_pcie_6 = {
+ .name = "xs_pcie_6",
+ .channels = 1,
+ .buswidth = 16,
+};
+
+static struct qcom_icc_node qup0_core_master = {
+ .name = "qup0_core_master",
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qup0_core_slave },
+};
+
+static struct qcom_icc_node qup1_core_master = {
+ .name = "qup1_core_master",
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qup1_core_slave },
+};
+
+static struct qcom_icc_node qup2_core_master = {
+ .name = "qup2_core_master",
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qup2_core_slave },
+};
+
+static struct qcom_icc_node llcc_mc = {
+ .name = "llcc_mc",
+ .channels = 12,
+ .buswidth = 4,
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &ebi },
+};
+
+static struct qcom_icc_node qsm_mnoc_cfg = {
+ .name = "qsm_mnoc_cfg",
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &srvc_mnoc },
+};
+
+static struct qcom_icc_node qsm_pcie_east_anoc_cfg = {
+ .name = "qsm_pcie_east_anoc_cfg",
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &srvc_pcie_east_aggre_noc },
+};
+
+static struct qcom_icc_node qnm_hscnoc_pcie_east = {
+ .name = "qnm_hscnoc_pcie_east",
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 3,
+ .link_nodes = (struct qcom_icc_node *[]) { &xs_pcie_0, &xs_pcie_1,
+ &xs_pcie_5 },
+};
+
+static struct qcom_icc_node qsm_cnoc_pcie_east_slave_cfg = {
+ .name = "qsm_cnoc_pcie_east_slave_cfg",
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 2,
+ .link_nodes = (struct qcom_icc_node *[]) { &qhs_hscnoc_pcie_east_ms_mpu_cfg,
+ &srvc_pcie_east },
+};
+
+static struct qcom_icc_node qsm_pcie_west_anoc_cfg = {
+ .name = "qsm_pcie_west_anoc_cfg",
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &srvc_pcie_west_aggre_noc },
+};
+
+static struct qcom_icc_node qnm_hscnoc_pcie_west = {
+ .name = "qnm_hscnoc_pcie_west",
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 5,
+ .link_nodes = (struct qcom_icc_node *[]) { &xs_pcie_2, &xs_pcie_3a,
+ &xs_pcie_3b, &xs_pcie_4,
+ &xs_pcie_6 },
+};
+
+static struct qcom_icc_node qsm_cnoc_pcie_west_slave_cfg = {
+ .name = "qsm_cnoc_pcie_west_slave_cfg",
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 2,
+ .link_nodes = (struct qcom_icc_node *[]) { &qhs_hscnoc_pcie_west_ms_mpu_cfg,
+ &srvc_pcie_west },
+};
+
+static struct qcom_icc_node qss_cnoc_pcie_slave_east_cfg = {
+ .name = "qss_cnoc_pcie_slave_east_cfg",
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qsm_cnoc_pcie_east_slave_cfg },
+};
+
+static struct qcom_icc_node qss_cnoc_pcie_slave_west_cfg = {
+ .name = "qss_cnoc_pcie_slave_west_cfg",
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qsm_cnoc_pcie_west_slave_cfg },
+};
+
+static struct qcom_icc_node qss_mnoc_cfg = {
+ .name = "qss_mnoc_cfg",
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qsm_mnoc_cfg },
+};
+
+static struct qcom_icc_node qss_pcie_east_anoc_cfg = {
+ .name = "qss_pcie_east_anoc_cfg",
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qsm_pcie_east_anoc_cfg },
+};
+
+static struct qcom_icc_node qss_pcie_west_anoc_cfg = {
+ .name = "qss_pcie_west_anoc_cfg",
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qsm_pcie_west_anoc_cfg },
+};
+
+static struct qcom_icc_node qns_llcc = {
+ .name = "qns_llcc",
+ .channels = 12,
+ .buswidth = 16,
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &llcc_mc },
+};
+
+static struct qcom_icc_node qns_pcie_east = {
+ .name = "qns_pcie_east",
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qnm_hscnoc_pcie_east },
+};
+
+static struct qcom_icc_node qns_pcie_west = {
+ .name = "qns_pcie_west",
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qnm_hscnoc_pcie_west },
+};
+
+static struct qcom_icc_node qsm_cfg = {
+ .name = "qsm_cfg",
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 51,
+ .link_nodes = (struct qcom_icc_node *[]) { &qhs_ahb2phy0, &qhs_ahb2phy1,
+ &qhs_ahb2phy2, &qhs_ahb2phy3,
+ &qhs_av1_enc_cfg, &qhs_camera_cfg,
+ &qhs_clk_ctl, &qhs_crypto0_cfg,
+ &qhs_display_cfg, &qhs_gpuss_cfg,
+ &qhs_imem_cfg, &qhs_pcie0_cfg,
+ &qhs_pcie1_cfg, &qhs_pcie2_cfg,
+ &qhs_pcie3a_cfg, &qhs_pcie3b_cfg,
+ &qhs_pcie4_cfg, &qhs_pcie5_cfg,
+ &qhs_pcie6_cfg, &qhs_pcie_rscc,
+ &qhs_pdm, &qhs_prng,
+ &qhs_qdss_cfg, &qhs_qspi,
+ &qhs_qup0, &qhs_qup1,
+ &qhs_qup2, &qhs_sdc2,
+ &qhs_sdc4, &qhs_smmuv3_cfg,
+ &qhs_tcsr, &qhs_tlmm,
+ &qhs_ufs_mem_cfg, &qhs_usb2_0_cfg,
+ &qhs_usb3_0_cfg, &qhs_usb3_1_cfg,
+ &qhs_usb3_2_cfg, &qhs_usb3_mp_cfg,
+ &qhs_usb4_0_cfg, &qhs_usb4_1_cfg,
+ &qhs_usb4_2_cfg, &qhs_venus_cfg,
+ &qss_cnoc_pcie_slave_east_cfg, &qss_cnoc_pcie_slave_west_cfg,
+ &qss_lpass_qtb_cfg, &qss_mnoc_cfg,
+ &qss_nsp_qtb_cfg, &qss_pcie_east_anoc_cfg,
+ &qss_pcie_west_anoc_cfg, &xs_qdss_stm,
+ &xs_sys_tcu_cfg },
+};
+
+static struct qcom_icc_node xm_gic = {
+ .name = "xm_gic",
+ .channels = 1,
+ .buswidth = 8,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x33000 },
+ .prio = 0,
+ .urg_fwd = 1,
+ .prio_fwd_disable = 0,
+ },
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_llcc },
+};
+
+static struct qcom_icc_node qss_cfg = {
+ .name = "qss_cfg",
+ .channels = 1,
+ .buswidth = 4,
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qsm_cfg },
+};
+
+static struct qcom_icc_node qnm_hscnoc_cnoc = {
+ .name = "qnm_hscnoc_cnoc",
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 8,
+ .link_nodes = (struct qcom_icc_node *[]) { &qhs_aoss, &qhs_ipc_router,
+ &qhs_soccp, &qhs_tme_cfg,
+ &qns_apss, &qss_cfg,
+ &qxs_boot_imem, &qxs_imem },
+};
+
+static struct qcom_icc_node qns_hscnoc_cnoc = {
+ .name = "qns_hscnoc_cnoc",
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qnm_hscnoc_cnoc },
+};
+
+static struct qcom_icc_node alm_gpu_tcu = {
+ .name = "alm_gpu_tcu",
+ .channels = 1,
+ .buswidth = 8,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x933000 },
+ .prio = 1,
+ .urg_fwd = 0,
+ .prio_fwd_disable = 1,
+ },
+ .num_links = 2,
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_hscnoc_cnoc, &qns_llcc },
+};
+
+static struct qcom_icc_node alm_pcie_qtc = {
+ .name = "alm_pcie_qtc",
+ .channels = 1,
+ .buswidth = 8,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x51f000 },
+ .prio = 3,
+ .urg_fwd = 0,
+ .prio_fwd_disable = 1,
+ },
+ .num_links = 2,
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_hscnoc_cnoc, &qns_llcc },
+};
+
+static struct qcom_icc_node alm_sys_tcu = {
+ .name = "alm_sys_tcu",
+ .channels = 1,
+ .buswidth = 8,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x51f080 },
+ .prio = 6,
+ .urg_fwd = 0,
+ .prio_fwd_disable = 1,
+ },
+ .num_links = 2,
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_hscnoc_cnoc, &qns_llcc },
+};
+
+static struct qcom_icc_node chm_apps = {
+ .name = "chm_apps",
+ .channels = 6,
+ .buswidth = 32,
+ .num_links = 4,
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_hscnoc_cnoc, &qns_llcc,
+ &qns_pcie_east, &qns_pcie_west },
+};
+
+static struct qcom_icc_node qnm_aggre_noc_east = {
+ .name = "qnm_aggre_noc_east",
+ .channels = 1,
+ .buswidth = 32,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x934000 },
+ .prio = 2,
+ .urg_fwd = 1,
+ .prio_fwd_disable = 1,
+ },
+ .num_links = 4,
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_hscnoc_cnoc, &qns_llcc,
+ &qns_pcie_east, &qns_pcie_west },
+};
+
+static struct qcom_icc_node qnm_gpu = {
+ .name = "qnm_gpu",
+ .channels = 4,
+ .buswidth = 32,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 4,
+ .port_offsets = { 0x935000, 0x936000, 0x937000, 0x938000 },
+ .prio = 0,
+ .urg_fwd = 1,
+ .prio_fwd_disable = 1,
+ },
+ .num_links = 4,
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_hscnoc_cnoc, &qns_llcc,
+ &qns_pcie_east, &qns_pcie_west },
+};
+
+static struct qcom_icc_node qnm_lpass = {
+ .name = "qnm_lpass",
+ .channels = 1,
+ .buswidth = 16,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x939000 },
+ .prio = 0,
+ .urg_fwd = 1,
+ .prio_fwd_disable = 0,
+ },
+ .num_links = 4,
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_hscnoc_cnoc, &qns_llcc,
+ &qns_pcie_east, &qns_pcie_west },
+};
+
+static struct qcom_icc_node qnm_mnoc_hf = {
+ .name = "qnm_mnoc_hf",
+ .channels = 2,
+ .buswidth = 32,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 2,
+ .port_offsets = { 0x721000, 0x721080 },
+ .prio = 0,
+ .urg_fwd = 1,
+ .prio_fwd_disable = 0,
+ },
+ .num_links = 4,
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_hscnoc_cnoc, &qns_llcc,
+ &qns_pcie_east, &qns_pcie_west },
+};
+
+static struct qcom_icc_node qnm_mnoc_sf = {
+ .name = "qnm_mnoc_sf",
+ .channels = 2,
+ .buswidth = 32,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 2,
+ .port_offsets = { 0x721100, 0x721180 },
+ .prio = 0,
+ .urg_fwd = 1,
+ .prio_fwd_disable = 0,
+ },
+ .num_links = 4,
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_hscnoc_cnoc, &qns_llcc,
+ &qns_pcie_east, &qns_pcie_west },
+};
+
+static struct qcom_icc_node qnm_nsp_noc = {
+ .name = "qnm_nsp_noc",
+ .channels = 4,
+ .buswidth = 32,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 4,
+ .port_offsets = { 0x816000, 0x816080, 0x816100, 0x816180 },
+ .prio = 0,
+ .urg_fwd = 1,
+ .prio_fwd_disable = 1,
+ },
+ .num_links = 4,
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_hscnoc_cnoc, &qns_llcc,
+ &qns_pcie_east, &qns_pcie_west },
+};
+
+static struct qcom_icc_node qnm_pcie_east = {
+ .name = "qnm_pcie_east",
+ .channels = 1,
+ .buswidth = 32,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x93a000 },
+ .prio = 2,
+ .urg_fwd = 1,
+ .prio_fwd_disable = 1,
+ },
+ .num_links = 2,
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_hscnoc_cnoc, &qns_llcc },
+};
+
+static struct qcom_icc_node qnm_pcie_west = {
+ .name = "qnm_pcie_west",
+ .channels = 1,
+ .buswidth = 64,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x721200 },
+ .prio = 2,
+ .urg_fwd = 1,
+ .prio_fwd_disable = 1,
+ },
+ .num_links = 2,
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_hscnoc_cnoc, &qns_llcc },
+};
+
+static struct qcom_icc_node qnm_snoc_sf = {
+ .name = "qnm_snoc_sf",
+ .channels = 1,
+ .buswidth = 64,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x51f100 },
+ .prio = 2,
+ .urg_fwd = 1,
+ .prio_fwd_disable = 1,
+ },
+ .num_links = 4,
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_hscnoc_cnoc, &qns_llcc,
+ &qns_pcie_east, &qns_pcie_west },
+};
+
+static struct qcom_icc_node qxm_wlan_q6 = {
+ .name = "qxm_wlan_q6",
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 4,
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_hscnoc_cnoc, &qns_llcc,
+ &qns_pcie_east, &qns_pcie_west },
+};
+
+static struct qcom_icc_node qns_a4noc_hscnoc = {
+ .name = "qns_a4noc_hscnoc",
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qnm_aggre_noc_east },
+};
+
+static struct qcom_icc_node qns_lpass_ag_noc_gemnoc = {
+ .name = "qns_lpass_ag_noc_gemnoc",
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qnm_lpass },
+};
+
+static struct qcom_icc_node qns_mem_noc_hf = {
+ .name = "qns_mem_noc_hf",
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qnm_mnoc_hf },
+};
+
+static struct qcom_icc_node qns_mem_noc_sf = {
+ .name = "qns_mem_noc_sf",
+ .channels = 2,
+ .buswidth = 32,
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qnm_mnoc_sf },
+};
+
+static struct qcom_icc_node qns_nsp_hscnoc = {
+ .name = "qns_nsp_hscnoc",
+ .channels = 4,
+ .buswidth = 32,
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qnm_nsp_noc },
+};
+
+static struct qcom_icc_node qns_pcie_east_mem_noc = {
+ .name = "qns_pcie_east_mem_noc",
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qnm_pcie_east },
+};
+
+static struct qcom_icc_node qns_pcie_west_mem_noc = {
+ .name = "qns_pcie_west_mem_noc",
+ .channels = 1,
+ .buswidth = 64,
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qnm_pcie_west },
+};
+
+static struct qcom_icc_node qns_gemnoc_sf = {
+ .name = "qns_gemnoc_sf",
+ .channels = 1,
+ .buswidth = 64,
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qnm_snoc_sf },
+};
+
+static struct qcom_icc_node xm_usb3_0 = {
+ .name = "xm_usb3_0",
+ .channels = 1,
+ .buswidth = 8,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0xa000 },
+ .prio = 0,
+ .urg_fwd = 0,
+ .prio_fwd_disable = 1,
+ },
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_a4noc_hscnoc },
+};
+
+static struct qcom_icc_node xm_usb3_1 = {
+ .name = "xm_usb3_1",
+ .channels = 1,
+ .buswidth = 8,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0xb000 },
+ .prio = 0,
+ .urg_fwd = 0,
+ .prio_fwd_disable = 1,
+ },
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_a4noc_hscnoc },
+};
+
+static struct qcom_icc_node xm_usb4_0 = {
+ .name = "xm_usb4_0",
+ .channels = 1,
+ .buswidth = 16,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0xc000 },
+ .prio = 0,
+ .urg_fwd = 0,
+ .prio_fwd_disable = 1,
+ },
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_a4noc_hscnoc },
+};
+
+static struct qcom_icc_node xm_usb4_1 = {
+ .name = "xm_usb4_1",
+ .channels = 1,
+ .buswidth = 32,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0xd000 },
+ .prio = 0,
+ .urg_fwd = 0,
+ .prio_fwd_disable = 1,
+ },
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_a4noc_hscnoc },
+};
+
+static struct qcom_icc_node qnm_lpiaon_noc = {
+ .name = "qnm_lpiaon_noc",
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_lpass_ag_noc_gemnoc },
+};
+
+static struct qcom_icc_node qnm_av1_enc = {
+ .name = "qnm_av1_enc",
+ .channels = 1,
+ .buswidth = 32,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x30000 },
+ .prio = 4,
+ .urg_fwd = 1,
+ .prio_fwd_disable = 1,
+ },
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_mem_noc_sf },
+};
+
+static struct qcom_icc_node qnm_camnoc_hf = {
+ .name = "qnm_camnoc_hf",
+ .channels = 2,
+ .buswidth = 32,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 2,
+ .port_offsets = { 0x29000, 0x2a000 },
+ .prio = 0,
+ .urg_fwd = 1,
+ .prio_fwd_disable = 0,
+ },
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_mem_noc_hf },
+};
+
+static struct qcom_icc_node qnm_camnoc_icp = {
+ .name = "qnm_camnoc_icp",
+ .channels = 1,
+ .buswidth = 8,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x2b000 },
+ .prio = 4,
+ .urg_fwd = 1,
+ .prio_fwd_disable = 1,
+ },
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_mem_noc_sf },
+};
+
+static struct qcom_icc_node qnm_camnoc_sf = {
+ .name = "qnm_camnoc_sf",
+ .channels = 2,
+ .buswidth = 32,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 2,
+ .port_offsets = { 0x2c000, 0x2d000 },
+ .prio = 0,
+ .urg_fwd = 1,
+ .prio_fwd_disable = 0,
+ },
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_mem_noc_sf },
+};
+
+static struct qcom_icc_node qnm_eva = {
+ .name = "qnm_eva",
+ .channels = 1,
+ .buswidth = 32,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x34000 },
+ .prio = 0,
+ .urg_fwd = 1,
+ .prio_fwd_disable = 0,
+ },
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_mem_noc_sf },
+};
+
+static struct qcom_icc_node qnm_mdp = {
+ .name = "qnm_mdp",
+ .channels = 2,
+ .buswidth = 32,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 2,
+ .port_offsets = { 0x2e000, 0x2f000 },
+ .prio = 0,
+ .urg_fwd = 1,
+ .prio_fwd_disable = 0,
+ },
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_mem_noc_hf },
+};
+
+static struct qcom_icc_node qnm_vapss_hcp = {
+ .name = "qnm_vapss_hcp",
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_mem_noc_sf },
+};
+
+static struct qcom_icc_node qnm_video = {
+ .name = "qnm_video",
+ .channels = 4,
+ .buswidth = 32,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 4,
+ .port_offsets = { 0x31000, 0x32000, 0x37000, 0x38000 },
+ .prio = 0,
+ .urg_fwd = 1,
+ .prio_fwd_disable = 0,
+ },
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_mem_noc_sf },
+};
+
+static struct qcom_icc_node qnm_video_cv_cpu = {
+ .name = "qnm_video_cv_cpu",
+ .channels = 1,
+ .buswidth = 8,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x33000 },
+ .prio = 4,
+ .urg_fwd = 1,
+ .prio_fwd_disable = 1,
+ },
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_mem_noc_sf },
+};
+
+static struct qcom_icc_node qnm_video_v_cpu = {
+ .name = "qnm_video_v_cpu",
+ .channels = 1,
+ .buswidth = 8,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x35000 },
+ .prio = 4,
+ .urg_fwd = 1,
+ .prio_fwd_disable = 1,
+ },
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_mem_noc_sf },
+};
+
+static struct qcom_icc_node qnm_nsp = {
+ .name = "qnm_nsp",
+ .channels = 4,
+ .buswidth = 32,
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_nsp_hscnoc },
+};
+
+static struct qcom_icc_node xm_pcie_0 = {
+ .name = "xm_pcie_0",
+ .channels = 1,
+ .buswidth = 16,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0xb000 },
+ .prio = 2,
+ .urg_fwd = 0,
+ .prio_fwd_disable = 0,
+ },
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_pcie_east_mem_noc },
+};
+
+static struct qcom_icc_node xm_pcie_1 = {
+ .name = "xm_pcie_1",
+ .channels = 1,
+ .buswidth = 32,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0xc000 },
+ .prio = 2,
+ .urg_fwd = 0,
+ .prio_fwd_disable = 0,
+ },
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_pcie_east_mem_noc },
+};
+
+static struct qcom_icc_node xm_pcie_5 = {
+ .name = "xm_pcie_5",
+ .channels = 1,
+ .buswidth = 32,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0xd000 },
+ .prio = 2,
+ .urg_fwd = 0,
+ .prio_fwd_disable = 0,
+ },
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_pcie_east_mem_noc },
+};
+
+static struct qcom_icc_node xm_pcie_2 = {
+ .name = "xm_pcie_2",
+ .channels = 1,
+ .buswidth = 16,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0xd000 },
+ .prio = 2,
+ .urg_fwd = 0,
+ .prio_fwd_disable = 0,
+ },
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_pcie_west_mem_noc },
+};
+
+static struct qcom_icc_node xm_pcie_3a = {
+ .name = "xm_pcie_3a",
+ .channels = 1,
+ .buswidth = 64,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0xd200 },
+ .prio = 2,
+ .urg_fwd = 0,
+ .prio_fwd_disable = 0,
+ },
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_pcie_west_mem_noc },
+};
+
+static struct qcom_icc_node xm_pcie_3b = {
+ .name = "xm_pcie_3b",
+ .channels = 1,
+ .buswidth = 32,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0xd400 },
+ .prio = 2,
+ .urg_fwd = 0,
+ .prio_fwd_disable = 0,
+ },
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_pcie_west_mem_noc },
+};
+
+static struct qcom_icc_node xm_pcie_4 = {
+ .name = "xm_pcie_4",
+ .channels = 1,
+ .buswidth = 16,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0xd600 },
+ .prio = 2,
+ .urg_fwd = 0,
+ .prio_fwd_disable = 0,
+ },
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_pcie_west_mem_noc },
+};
+
+static struct qcom_icc_node xm_pcie_6 = {
+ .name = "xm_pcie_6",
+ .channels = 1,
+ .buswidth = 16,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0xd800 },
+ .prio = 2,
+ .urg_fwd = 0,
+ .prio_fwd_disable = 0,
+ },
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_pcie_west_mem_noc },
+};
+
+static struct qcom_icc_node qnm_aggre1_noc = {
+ .name = "qnm_aggre1_noc",
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_gemnoc_sf },
+};
+
+static struct qcom_icc_node qnm_aggre2_noc = {
+ .name = "qnm_aggre2_noc",
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_gemnoc_sf },
+};
+
+static struct qcom_icc_node qnm_aggre3_noc = {
+ .name = "qnm_aggre3_noc",
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_gemnoc_sf },
+};
+
+static struct qcom_icc_node qnm_nsi_noc = {
+ .name = "qnm_nsi_noc",
+ .channels = 1,
+ .buswidth = 8,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x1c000 },
+ .prio = 0,
+ .urg_fwd = 0,
+ .prio_fwd_disable = 1,
+ },
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_gemnoc_sf },
+};
+
+static struct qcom_icc_node qnm_oobmss = {
+ .name = "qnm_oobmss",
+ .channels = 1,
+ .buswidth = 16,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x1b000 },
+ .prio = 0,
+ .urg_fwd = 0,
+ .prio_fwd_disable = 1,
+ },
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_gemnoc_sf },
+};
+
+static struct qcom_icc_node qns_a1noc_snoc = {
+ .name = "qns_a1noc_snoc",
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qnm_aggre1_noc },
+};
+
+static struct qcom_icc_node qns_a2noc_snoc = {
+ .name = "qns_a2noc_snoc",
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qnm_aggre2_noc },
+};
+
+static struct qcom_icc_node qns_a3noc_snoc = {
+ .name = "qns_a3noc_snoc",
+ .channels = 1,
+ .buswidth = 32,
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qnm_aggre3_noc },
+};
+
+static struct qcom_icc_node qns_lpass_aggnoc = {
+ .name = "qns_lpass_aggnoc",
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qnm_lpiaon_noc },
+};
+
+static struct qcom_icc_node qns_system_noc = {
+ .name = "qns_system_noc",
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qnm_nsi_noc },
+};
+
+static struct qcom_icc_node qns_oobmss_snoc = {
+ .name = "qns_oobmss_snoc",
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qnm_oobmss },
+};
+
+static struct qcom_icc_node qxm_crypto = {
+ .name = "qxm_crypto",
+ .channels = 1,
+ .buswidth = 8,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0xb000 },
+ .prio = 0,
+ .urg_fwd = 1,
+ .prio_fwd_disable = 1,
+ },
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_a1noc_snoc },
+};
+
+static struct qcom_icc_node qxm_soccp = {
+ .name = "qxm_soccp",
+ .channels = 1,
+ .buswidth = 8,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0xe000 },
+ .prio = 0,
+ .urg_fwd = 1,
+ .prio_fwd_disable = 1,
+ },
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_a1noc_snoc },
+};
+
+static struct qcom_icc_node xm_qdss_etr_0 = {
+ .name = "xm_qdss_etr_0",
+ .channels = 1,
+ .buswidth = 8,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0xc000 },
+ .prio = 0,
+ .urg_fwd = 0,
+ .prio_fwd_disable = 1,
+ },
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_a1noc_snoc },
+};
+
+static struct qcom_icc_node xm_qdss_etr_1 = {
+ .name = "xm_qdss_etr_1",
+ .channels = 1,
+ .buswidth = 8,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0xd000 },
+ .prio = 0,
+ .urg_fwd = 0,
+ .prio_fwd_disable = 1,
+ },
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_a1noc_snoc },
+};
+
+static struct qcom_icc_node xm_ufs_mem = {
+ .name = "xm_ufs_mem",
+ .channels = 1,
+ .buswidth = 16,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0xa000 },
+ .prio = 0,
+ .urg_fwd = 0,
+ .prio_fwd_disable = 1,
+ },
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_a2noc_snoc },
+};
+
+static struct qcom_icc_node xm_usb3_2 = {
+ .name = "xm_usb3_2",
+ .channels = 1,
+ .buswidth = 8,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x8000 },
+ .prio = 0,
+ .urg_fwd = 0,
+ .prio_fwd_disable = 1,
+ },
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_a2noc_snoc },
+};
+
+static struct qcom_icc_node xm_usb4_2 = {
+ .name = "xm_usb4_2",
+ .channels = 1,
+ .buswidth = 16,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x9000 },
+ .prio = 0,
+ .urg_fwd = 0,
+ .prio_fwd_disable = 1,
+ },
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_a2noc_snoc },
+};
+
+static struct qcom_icc_node qhm_qspi = {
+ .name = "qhm_qspi",
+ .channels = 1,
+ .buswidth = 4,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x10000 },
+ .prio = 0,
+ .urg_fwd = 0,
+ .prio_fwd_disable = 1,
+ },
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_a3noc_snoc },
+};
+
+static struct qcom_icc_node qhm_qup0 = {
+ .name = "qhm_qup0",
+ .channels = 1,
+ .buswidth = 4,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x11000 },
+ .prio = 0,
+ .urg_fwd = 0,
+ .prio_fwd_disable = 1,
+ },
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_a3noc_snoc },
+};
+
+static struct qcom_icc_node qhm_qup1 = {
+ .name = "qhm_qup1",
+ .channels = 1,
+ .buswidth = 4,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x12000 },
+ .prio = 0,
+ .urg_fwd = 0,
+ .prio_fwd_disable = 1,
+ },
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_a3noc_snoc },
+};
+
+static struct qcom_icc_node qhm_qup2 = {
+ .name = "qhm_qup2",
+ .channels = 1,
+ .buswidth = 4,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x13000 },
+ .prio = 0,
+ .urg_fwd = 0,
+ .prio_fwd_disable = 1,
+ },
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_a3noc_snoc },
+};
+
+static struct qcom_icc_node qxm_sp = {
+ .name = "qxm_sp",
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_a3noc_snoc },
+};
+
+static struct qcom_icc_node xm_sdc2 = {
+ .name = "xm_sdc2",
+ .channels = 1,
+ .buswidth = 8,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x18000 },
+ .prio = 0,
+ .urg_fwd = 0,
+ .prio_fwd_disable = 1,
+ },
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_a3noc_snoc },
+};
+
+static struct qcom_icc_node xm_sdc4 = {
+ .name = "xm_sdc4",
+ .channels = 1,
+ .buswidth = 8,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x14000 },
+ .prio = 0,
+ .urg_fwd = 0,
+ .prio_fwd_disable = 1,
+ },
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_a3noc_snoc },
+};
+
+static struct qcom_icc_node xm_usb2_0 = {
+ .name = "xm_usb2_0",
+ .channels = 1,
+ .buswidth = 8,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x15000 },
+ .prio = 0,
+ .urg_fwd = 0,
+ .prio_fwd_disable = 1,
+ },
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_a3noc_snoc },
+};
+
+static struct qcom_icc_node xm_usb3_mp = {
+ .name = "xm_usb3_mp",
+ .channels = 1,
+ .buswidth = 16,
+ .qosbox = &(const struct qcom_icc_qosbox) {
+ .num_ports = 1,
+ .port_offsets = { 0x16000 },
+ .prio = 0,
+ .urg_fwd = 0,
+ .prio_fwd_disable = 1,
+ },
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_a3noc_snoc },
+};
+
+static struct qcom_icc_node qnm_lpass_lpinoc = {
+ .name = "qnm_lpass_lpinoc",
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_lpass_aggnoc },
+};
+
+static struct qcom_icc_node xm_cpucp = {
+ .name = "xm_cpucp",
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 2,
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_system_noc, &srvc_nsinoc },
+};
+
+static struct qcom_icc_node xm_mem_sp = {
+ .name = "xm_mem_sp",
+ .channels = 1,
+ .buswidth = 8,
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_oobmss_snoc },
+};
+
+static struct qcom_icc_node qns_lpi_aon_noc = {
+ .name = "qns_lpi_aon_noc",
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qnm_lpass_lpinoc },
+};
+
+static struct qcom_icc_node qnm_lpinoc_dsp_qns4m = {
+ .name = "qnm_lpinoc_dsp_qns4m",
+ .channels = 1,
+ .buswidth = 16,
+ .num_links = 1,
+ .link_nodes = (struct qcom_icc_node *[]) { &qns_lpi_aon_noc },
+};
+
+static struct qcom_icc_bcm bcm_acv = {
+ .name = "ACV",
+ .enable_mask = BIT(3),
+ .num_nodes = 1,
+ .nodes = { &ebi },
+};
+
+static struct qcom_icc_bcm bcm_ce0 = {
+ .name = "CE0",
+ .num_nodes = 1,
+ .nodes = { &qxm_crypto },
+};
+
+static struct qcom_icc_bcm bcm_cn0 = {
+ .name = "CN0",
+ .keepalive = true,
+ .enable_mask = BIT(0),
+ .num_nodes = 60,
+ .nodes = { &qsm_cfg, &qhs_ahb2phy0,
+ &qhs_ahb2phy1, &qhs_ahb2phy2,
+ &qhs_ahb2phy3, &qhs_av1_enc_cfg,
+ &qhs_camera_cfg, &qhs_clk_ctl,
+ &qhs_crypto0_cfg, &qhs_gpuss_cfg,
+ &qhs_imem_cfg, &qhs_pcie0_cfg,
+ &qhs_pcie1_cfg, &qhs_pcie2_cfg,
+ &qhs_pcie3a_cfg, &qhs_pcie3b_cfg,
+ &qhs_pcie4_cfg, &qhs_pcie5_cfg,
+ &qhs_pcie6_cfg, &qhs_pcie_rscc,
+ &qhs_pdm, &qhs_prng,
+ &qhs_qdss_cfg, &qhs_qspi,
+ &qhs_qup0, &qhs_qup1,
+ &qhs_qup2, &qhs_sdc2,
+ &qhs_sdc4, &qhs_smmuv3_cfg,
+ &qhs_tcsr, &qhs_tlmm,
+ &qhs_ufs_mem_cfg, &qhs_usb2_0_cfg,
+ &qhs_usb3_0_cfg, &qhs_usb3_1_cfg,
+ &qhs_usb3_2_cfg, &qhs_usb3_mp_cfg,
+ &qhs_usb4_0_cfg, &qhs_usb4_1_cfg,
+ &qhs_usb4_2_cfg, &qhs_venus_cfg,
+ &qss_cnoc_pcie_slave_east_cfg, &qss_cnoc_pcie_slave_west_cfg,
+ &qss_lpass_qtb_cfg, &qss_mnoc_cfg,
+ &qss_nsp_qtb_cfg, &qss_pcie_east_anoc_cfg,
+ &qss_pcie_west_anoc_cfg, &xs_qdss_stm,
+ &xs_sys_tcu_cfg, &qnm_hscnoc_cnoc,
+ &qhs_aoss, &qhs_ipc_router,
+ &qhs_soccp, &qhs_tme_cfg,
+ &qns_apss, &qss_cfg,
+ &qxs_boot_imem, &qxs_imem },
+};
+
+static struct qcom_icc_bcm bcm_cn1 = {
+ .name = "CN1",
+ .num_nodes = 1,
+ .nodes = { &qhs_display_cfg },
+};
+
+static struct qcom_icc_bcm bcm_co0 = {
+ .name = "CO0",
+ .enable_mask = BIT(0),
+ .num_nodes = 2,
+ .nodes = { &qnm_nsp, &qns_nsp_hscnoc },
+};
+
+static struct qcom_icc_bcm bcm_lp0 = {
+ .name = "LP0",
+ .num_nodes = 2,
+ .nodes = { &qnm_lpass_lpinoc, &qns_lpass_aggnoc },
+};
+
+static struct qcom_icc_bcm bcm_mc0 = {
+ .name = "MC0",
+ .keepalive = true,
+ .num_nodes = 1,
+ .nodes = { &ebi },
+};
+
+static struct qcom_icc_bcm bcm_mm0 = {
+ .name = "MM0",
+ .num_nodes = 1,
+ .nodes = { &qns_mem_noc_hf },
+};
+
+static struct qcom_icc_bcm bcm_mm1 = {
+ .name = "MM1",
+ .enable_mask = BIT(0),
+ .num_nodes = 11,
+ .nodes = { &qnm_av1_enc, &qnm_camnoc_hf,
+ &qnm_camnoc_icp, &qnm_camnoc_sf,
+ &qnm_eva, &qnm_mdp,
+ &qnm_vapss_hcp, &qnm_video,
+ &qnm_video_cv_cpu, &qnm_video_v_cpu,
+ &qns_mem_noc_sf },
+};
+
+static struct qcom_icc_bcm bcm_qup0 = {
+ .name = "QUP0",
+ .vote_scale = 1,
+ .keepalive = true,
+ .num_nodes = 1,
+ .nodes = { &qup0_core_slave },
+};
+
+static struct qcom_icc_bcm bcm_qup1 = {
+ .name = "QUP1",
+ .vote_scale = 1,
+ .keepalive = true,
+ .num_nodes = 1,
+ .nodes = { &qup1_core_slave },
+};
+
+static struct qcom_icc_bcm bcm_qup2 = {
+ .name = "QUP2",
+ .vote_scale = 1,
+ .keepalive = true,
+ .num_nodes = 1,
+ .nodes = { &qup2_core_slave },
+};
+
+static struct qcom_icc_bcm bcm_sh0 = {
+ .name = "SH0",
+ .keepalive = true,
+ .num_nodes = 1,
+ .nodes = { &qns_llcc },
+};
+
+static struct qcom_icc_bcm bcm_sh1 = {
+ .name = "SH1",
+ .enable_mask = BIT(0),
+ .num_nodes = 18,
+ .nodes = { &alm_gpu_tcu, &alm_pcie_qtc,
+ &alm_sys_tcu, &chm_apps,
+ &qnm_aggre_noc_east, &qnm_gpu,
+ &qnm_lpass, &qnm_mnoc_hf,
+ &qnm_mnoc_sf, &qnm_nsp_noc,
+ &qnm_pcie_east, &qnm_pcie_west,
+ &qnm_snoc_sf, &qxm_wlan_q6,
+ &xm_gic, &qns_hscnoc_cnoc,
+ &qns_pcie_east, &qns_pcie_west },
+};
+
+static struct qcom_icc_bcm bcm_sn0 = {
+ .name = "SN0",
+ .keepalive = true,
+ .num_nodes = 1,
+ .nodes = { &qns_gemnoc_sf },
+};
+
+static struct qcom_icc_bcm bcm_sn1 = {
+ .name = "SN1",
+ .enable_mask = BIT(0),
+ .num_nodes = 1,
+ .nodes = { &qnm_oobmss },
+};
+
+static struct qcom_icc_bcm bcm_sn2 = {
+ .name = "SN2",
+ .num_nodes = 1,
+ .nodes = { &qnm_aggre1_noc },
+};
+
+static struct qcom_icc_bcm bcm_sn3 = {
+ .name = "SN3",
+ .num_nodes = 1,
+ .nodes = { &qnm_aggre2_noc },
+};
+
+static struct qcom_icc_bcm bcm_sn4 = {
+ .name = "SN4",
+ .num_nodes = 1,
+ .nodes = { &qnm_aggre3_noc },
+};
+
+static struct qcom_icc_bcm bcm_sn5 = {
+ .name = "SN5",
+ .num_nodes = 1,
+ .nodes = { &qns_a4noc_hscnoc },
+};
+
+static struct qcom_icc_bcm bcm_sn6 = {
+ .name = "SN6",
+ .num_nodes = 4,
+ .nodes = { &qns_pcie_east_mem_noc, &qnm_hscnoc_pcie_east,
+ &qns_pcie_west_mem_noc, &qnm_hscnoc_pcie_west },
+};
+
+static struct qcom_icc_bcm * const aggre1_noc_bcms[] = {
+ &bcm_ce0,
+};
+
+static struct qcom_icc_node * const aggre1_noc_nodes[] = {
+ [MASTER_CRYPTO] = &qxm_crypto,
+ [MASTER_SOCCP_PROC] = &qxm_soccp,
+ [MASTER_QDSS_ETR] = &xm_qdss_etr_0,
+ [MASTER_QDSS_ETR_1] = &xm_qdss_etr_1,
+ [SLAVE_A1NOC_SNOC] = &qns_a1noc_snoc,
+};
+
+static const struct regmap_config glymur_aggre1_noc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x14400,
+ .fast_io = true,
+};
+
+static const struct qcom_icc_desc glymur_aggre1_noc = {
+ .config = &glymur_aggre1_noc_regmap_config,
+ .nodes = aggre1_noc_nodes,
+ .num_nodes = ARRAY_SIZE(aggre1_noc_nodes),
+ .bcms = aggre1_noc_bcms,
+ .num_bcms = ARRAY_SIZE(aggre1_noc_bcms),
+ .alloc_dyn_id = true,
+};
+
+static struct qcom_icc_node * const aggre2_noc_nodes[] = {
+ [MASTER_UFS_MEM] = &xm_ufs_mem,
+ [MASTER_USB3_2] = &xm_usb3_2,
+ [MASTER_USB4_2] = &xm_usb4_2,
+ [SLAVE_A2NOC_SNOC] = &qns_a2noc_snoc,
+};
+
+static const struct regmap_config glymur_aggre2_noc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x14400,
+ .fast_io = true,
+};
+
+static const struct qcom_icc_desc glymur_aggre2_noc = {
+ .config = &glymur_aggre2_noc_regmap_config,
+ .nodes = aggre2_noc_nodes,
+ .num_nodes = ARRAY_SIZE(aggre2_noc_nodes),
+ .alloc_dyn_id = true,
+ .qos_requires_clocks = true,
+};
+
+static struct qcom_icc_node * const aggre3_noc_nodes[] = {
+ [MASTER_QSPI_0] = &qhm_qspi,
+ [MASTER_QUP_0] = &qhm_qup0,
+ [MASTER_QUP_1] = &qhm_qup1,
+ [MASTER_QUP_2] = &qhm_qup2,
+ [MASTER_SP] = &qxm_sp,
+ [MASTER_SDCC_2] = &xm_sdc2,
+ [MASTER_SDCC_4] = &xm_sdc4,
+ [MASTER_USB2] = &xm_usb2_0,
+ [MASTER_USB3_MP] = &xm_usb3_mp,
+ [SLAVE_A3NOC_SNOC] = &qns_a3noc_snoc,
+};
+
+static const struct regmap_config glymur_aggre3_noc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x1d400,
+ .fast_io = true,
+};
+
+static const struct qcom_icc_desc glymur_aggre3_noc = {
+ .config = &glymur_aggre3_noc_regmap_config,
+ .nodes = aggre3_noc_nodes,
+ .num_nodes = ARRAY_SIZE(aggre3_noc_nodes),
+ .alloc_dyn_id = true,
+};
+
+static struct qcom_icc_bcm * const aggre4_noc_bcms[] = {
+ &bcm_sn5,
+};
+
+static struct qcom_icc_node * const aggre4_noc_nodes[] = {
+ [MASTER_USB3_0] = &xm_usb3_0,
+ [MASTER_USB3_1] = &xm_usb3_1,
+ [MASTER_USB4_0] = &xm_usb4_0,
+ [MASTER_USB4_1] = &xm_usb4_1,
+ [SLAVE_A4NOC_HSCNOC] = &qns_a4noc_hscnoc,
+};
+
+static const struct regmap_config glymur_aggre4_noc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x14400,
+ .fast_io = true,
+};
+
+static const struct qcom_icc_desc glymur_aggre4_noc = {
+ .config = &glymur_aggre4_noc_regmap_config,
+ .nodes = aggre4_noc_nodes,
+ .num_nodes = ARRAY_SIZE(aggre4_noc_nodes),
+ .bcms = aggre4_noc_bcms,
+ .num_bcms = ARRAY_SIZE(aggre4_noc_bcms),
+ .alloc_dyn_id = true,
+ .qos_requires_clocks = true,
+};
+
+static struct qcom_icc_bcm * const clk_virt_bcms[] = {
+ &bcm_qup0,
+ &bcm_qup1,
+ &bcm_qup2,
+};
+
+static struct qcom_icc_node * const clk_virt_nodes[] = {
+ [MASTER_QUP_CORE_0] = &qup0_core_master,
+ [MASTER_QUP_CORE_1] = &qup1_core_master,
+ [MASTER_QUP_CORE_2] = &qup2_core_master,
+ [SLAVE_QUP_CORE_0] = &qup0_core_slave,
+ [SLAVE_QUP_CORE_1] = &qup1_core_slave,
+ [SLAVE_QUP_CORE_2] = &qup2_core_slave,
+};
+
+static const struct qcom_icc_desc glymur_clk_virt = {
+ .nodes = clk_virt_nodes,
+ .num_nodes = ARRAY_SIZE(clk_virt_nodes),
+ .bcms = clk_virt_bcms,
+ .num_bcms = ARRAY_SIZE(clk_virt_bcms),
+ .alloc_dyn_id = true,
+};
+
+static struct qcom_icc_bcm * const cnoc_cfg_bcms[] = {
+ &bcm_cn0,
+ &bcm_cn1,
+};
+
+static struct qcom_icc_node * const cnoc_cfg_nodes[] = {
+ [MASTER_CNOC_CFG] = &qsm_cfg,
+ [SLAVE_AHB2PHY_SOUTH] = &qhs_ahb2phy0,
+ [SLAVE_AHB2PHY_NORTH] = &qhs_ahb2phy1,
+ [SLAVE_AHB2PHY_2] = &qhs_ahb2phy2,
+ [SLAVE_AHB2PHY_3] = &qhs_ahb2phy3,
+ [SLAVE_AV1_ENC_CFG] = &qhs_av1_enc_cfg,
+ [SLAVE_CAMERA_CFG] = &qhs_camera_cfg,
+ [SLAVE_CLK_CTL] = &qhs_clk_ctl,
+ [SLAVE_CRYPTO_0_CFG] = &qhs_crypto0_cfg,
+ [SLAVE_DISPLAY_CFG] = &qhs_display_cfg,
+ [SLAVE_GFX3D_CFG] = &qhs_gpuss_cfg,
+ [SLAVE_IMEM_CFG] = &qhs_imem_cfg,
+ [SLAVE_PCIE_0_CFG] = &qhs_pcie0_cfg,
+ [SLAVE_PCIE_1_CFG] = &qhs_pcie1_cfg,
+ [SLAVE_PCIE_2_CFG] = &qhs_pcie2_cfg,
+ [SLAVE_PCIE_3A_CFG] = &qhs_pcie3a_cfg,
+ [SLAVE_PCIE_3B_CFG] = &qhs_pcie3b_cfg,
+ [SLAVE_PCIE_4_CFG] = &qhs_pcie4_cfg,
+ [SLAVE_PCIE_5_CFG] = &qhs_pcie5_cfg,
+ [SLAVE_PCIE_6_CFG] = &qhs_pcie6_cfg,
+ [SLAVE_PCIE_RSCC] = &qhs_pcie_rscc,
+ [SLAVE_PDM] = &qhs_pdm,
+ [SLAVE_PRNG] = &qhs_prng,
+ [SLAVE_QDSS_CFG] = &qhs_qdss_cfg,
+ [SLAVE_QSPI_0] = &qhs_qspi,
+ [SLAVE_QUP_0] = &qhs_qup0,
+ [SLAVE_QUP_1] = &qhs_qup1,
+ [SLAVE_QUP_2] = &qhs_qup2,
+ [SLAVE_SDCC_2] = &qhs_sdc2,
+ [SLAVE_SDCC_4] = &qhs_sdc4,
+ [SLAVE_SMMUV3_CFG] = &qhs_smmuv3_cfg,
+ [SLAVE_TCSR] = &qhs_tcsr,
+ [SLAVE_TLMM] = &qhs_tlmm,
+ [SLAVE_UFS_MEM_CFG] = &qhs_ufs_mem_cfg,
+ [SLAVE_USB2] = &qhs_usb2_0_cfg,
+ [SLAVE_USB3_0] = &qhs_usb3_0_cfg,
+ [SLAVE_USB3_1] = &qhs_usb3_1_cfg,
+ [SLAVE_USB3_2] = &qhs_usb3_2_cfg,
+ [SLAVE_USB3_MP] = &qhs_usb3_mp_cfg,
+ [SLAVE_USB4_0] = &qhs_usb4_0_cfg,
+ [SLAVE_USB4_1] = &qhs_usb4_1_cfg,
+ [SLAVE_USB4_2] = &qhs_usb4_2_cfg,
+ [SLAVE_VENUS_CFG] = &qhs_venus_cfg,
+ [SLAVE_CNOC_PCIE_SLAVE_EAST_CFG] = &qss_cnoc_pcie_slave_east_cfg,
+ [SLAVE_CNOC_PCIE_SLAVE_WEST_CFG] = &qss_cnoc_pcie_slave_west_cfg,
+ [SLAVE_LPASS_QTB_CFG] = &qss_lpass_qtb_cfg,
+ [SLAVE_CNOC_MNOC_CFG] = &qss_mnoc_cfg,
+ [SLAVE_NSP_QTB_CFG] = &qss_nsp_qtb_cfg,
+ [SLAVE_PCIE_EAST_ANOC_CFG] = &qss_pcie_east_anoc_cfg,
+ [SLAVE_PCIE_WEST_ANOC_CFG] = &qss_pcie_west_anoc_cfg,
+ [SLAVE_QDSS_STM] = &xs_qdss_stm,
+ [SLAVE_TCU] = &xs_sys_tcu_cfg,
+};
+
+static const struct regmap_config glymur_cnoc_cfg_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x6600,
+ .fast_io = true,
+};
+
+static const struct qcom_icc_desc glymur_cnoc_cfg = {
+ .config = &glymur_cnoc_cfg_regmap_config,
+ .nodes = cnoc_cfg_nodes,
+ .num_nodes = ARRAY_SIZE(cnoc_cfg_nodes),
+ .bcms = cnoc_cfg_bcms,
+ .num_bcms = ARRAY_SIZE(cnoc_cfg_bcms),
+ .alloc_dyn_id = true,
+};
+
+static struct qcom_icc_bcm * const cnoc_main_bcms[] = {
+ &bcm_cn0,
+};
+
+static struct qcom_icc_node * const cnoc_main_nodes[] = {
+ [MASTER_HSCNOC_CNOC] = &qnm_hscnoc_cnoc,
+ [SLAVE_AOSS] = &qhs_aoss,
+ [SLAVE_IPC_ROUTER_CFG] = &qhs_ipc_router,
+ [SLAVE_SOCCP] = &qhs_soccp,
+ [SLAVE_TME_CFG] = &qhs_tme_cfg,
+ [SLAVE_APPSS] = &qns_apss,
+ [SLAVE_CNOC_CFG] = &qss_cfg,
+ [SLAVE_BOOT_IMEM] = &qxs_boot_imem,
+ [SLAVE_IMEM] = &qxs_imem,
+};
+
+static const struct regmap_config glymur_cnoc_main_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x17080,
+ .fast_io = true,
+};
+
+static const struct qcom_icc_desc glymur_cnoc_main = {
+ .config = &glymur_cnoc_main_regmap_config,
+ .nodes = cnoc_main_nodes,
+ .num_nodes = ARRAY_SIZE(cnoc_main_nodes),
+ .bcms = cnoc_main_bcms,
+ .num_bcms = ARRAY_SIZE(cnoc_main_bcms),
+ .alloc_dyn_id = true,
+};
+
+static struct qcom_icc_bcm * const hscnoc_bcms[] = {
+ &bcm_sh0,
+ &bcm_sh1,
+};
+
+static struct qcom_icc_node * const hscnoc_nodes[] = {
+ [MASTER_GPU_TCU] = &alm_gpu_tcu,
+ [MASTER_PCIE_TCU] = &alm_pcie_qtc,
+ [MASTER_SYS_TCU] = &alm_sys_tcu,
+ [MASTER_APPSS_PROC] = &chm_apps,
+ [MASTER_AGGRE_NOC_EAST] = &qnm_aggre_noc_east,
+ [MASTER_GFX3D] = &qnm_gpu,
+ [MASTER_LPASS_GEM_NOC] = &qnm_lpass,
+ [MASTER_MNOC_HF_MEM_NOC] = &qnm_mnoc_hf,
+ [MASTER_MNOC_SF_MEM_NOC] = &qnm_mnoc_sf,
+ [MASTER_COMPUTE_NOC] = &qnm_nsp_noc,
+ [MASTER_PCIE_EAST] = &qnm_pcie_east,
+ [MASTER_PCIE_WEST] = &qnm_pcie_west,
+ [MASTER_SNOC_SF_MEM_NOC] = &qnm_snoc_sf,
+ [MASTER_WLAN_Q6] = &qxm_wlan_q6,
+ [MASTER_GIC] = &xm_gic,
+ [SLAVE_HSCNOC_CNOC] = &qns_hscnoc_cnoc,
+ [SLAVE_LLCC] = &qns_llcc,
+ [SLAVE_PCIE_EAST] = &qns_pcie_east,
+ [SLAVE_PCIE_WEST] = &qns_pcie_west,
+};
+
+static const struct regmap_config glymur_hscnoc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x93a080,
+ .fast_io = true,
+};
+
+static const struct qcom_icc_desc glymur_hscnoc = {
+ .config = &glymur_hscnoc_regmap_config,
+ .nodes = hscnoc_nodes,
+ .num_nodes = ARRAY_SIZE(hscnoc_nodes),
+ .bcms = hscnoc_bcms,
+ .num_bcms = ARRAY_SIZE(hscnoc_bcms),
+ .alloc_dyn_id = true,
+};
+
+static struct qcom_icc_node * const lpass_ag_noc_nodes[] = {
+ [MASTER_LPIAON_NOC] = &qnm_lpiaon_noc,
+ [SLAVE_LPASS_GEM_NOC] = &qns_lpass_ag_noc_gemnoc,
+};
+
+static const struct regmap_config glymur_lpass_ag_noc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0xe080,
+ .fast_io = true,
+};
+
+static const struct qcom_icc_desc glymur_lpass_ag_noc = {
+ .config = &glymur_lpass_ag_noc_regmap_config,
+ .nodes = lpass_ag_noc_nodes,
+ .num_nodes = ARRAY_SIZE(lpass_ag_noc_nodes),
+ .alloc_dyn_id = true,
+};
+
+static struct qcom_icc_bcm * const lpass_lpiaon_noc_bcms[] = {
+ &bcm_lp0,
+};
+
+static struct qcom_icc_node * const lpass_lpiaon_noc_nodes[] = {
+ [MASTER_LPASS_LPINOC] = &qnm_lpass_lpinoc,
+ [SLAVE_LPIAON_NOC_LPASS_AG_NOC] = &qns_lpass_aggnoc,
+};
+
+static const struct regmap_config glymur_lpass_lpiaon_noc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x19080,
+ .fast_io = true,
+};
+
+static const struct qcom_icc_desc glymur_lpass_lpiaon_noc = {
+ .config = &glymur_lpass_lpiaon_noc_regmap_config,
+ .nodes = lpass_lpiaon_noc_nodes,
+ .num_nodes = ARRAY_SIZE(lpass_lpiaon_noc_nodes),
+ .bcms = lpass_lpiaon_noc_bcms,
+ .num_bcms = ARRAY_SIZE(lpass_lpiaon_noc_bcms),
+ .alloc_dyn_id = true,
+};
+
+static struct qcom_icc_node * const lpass_lpicx_noc_nodes[] = {
+ [MASTER_LPASS_PROC] = &qnm_lpinoc_dsp_qns4m,
+ [SLAVE_LPICX_NOC_LPIAON_NOC] = &qns_lpi_aon_noc,
+};
+
+static const struct regmap_config glymur_lpass_lpicx_noc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x44080,
+ .fast_io = true,
+};
+
+static const struct qcom_icc_desc glymur_lpass_lpicx_noc = {
+ .config = &glymur_lpass_lpicx_noc_regmap_config,
+ .nodes = lpass_lpicx_noc_nodes,
+ .num_nodes = ARRAY_SIZE(lpass_lpicx_noc_nodes),
+ .alloc_dyn_id = true,
+};
+
+static struct qcom_icc_bcm * const mc_virt_bcms[] = {
+ &bcm_acv,
+ &bcm_mc0,
+};
+
+static struct qcom_icc_node * const mc_virt_nodes[] = {
+ [MASTER_LLCC] = &llcc_mc,
+ [SLAVE_EBI1] = &ebi,
+};
+
+static const struct qcom_icc_desc glymur_mc_virt = {
+ .nodes = mc_virt_nodes,
+ .num_nodes = ARRAY_SIZE(mc_virt_nodes),
+ .bcms = mc_virt_bcms,
+ .num_bcms = ARRAY_SIZE(mc_virt_bcms),
+ .alloc_dyn_id = true,
+};
+
+static struct qcom_icc_bcm * const mmss_noc_bcms[] = {
+ &bcm_mm0,
+ &bcm_mm1,
+};
+
+static struct qcom_icc_node * const mmss_noc_nodes[] = {
+ [MASTER_AV1_ENC] = &qnm_av1_enc,
+ [MASTER_CAMNOC_HF] = &qnm_camnoc_hf,
+ [MASTER_CAMNOC_ICP] = &qnm_camnoc_icp,
+ [MASTER_CAMNOC_SF] = &qnm_camnoc_sf,
+ [MASTER_EVA] = &qnm_eva,
+ [MASTER_MDP] = &qnm_mdp,
+ [MASTER_CDSP_HCP] = &qnm_vapss_hcp,
+ [MASTER_VIDEO] = &qnm_video,
+ [MASTER_VIDEO_CV_PROC] = &qnm_video_cv_cpu,
+ [MASTER_VIDEO_V_PROC] = &qnm_video_v_cpu,
+ [MASTER_CNOC_MNOC_CFG] = &qsm_mnoc_cfg,
+ [SLAVE_MNOC_HF_MEM_NOC] = &qns_mem_noc_hf,
+ [SLAVE_MNOC_SF_MEM_NOC] = &qns_mem_noc_sf,
+ [SLAVE_SERVICE_MNOC] = &srvc_mnoc,
+};
+
+static const struct regmap_config glymur_mmss_noc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x5b800,
+ .fast_io = true,
+};
+
+static const struct qcom_icc_desc glymur_mmss_noc = {
+ .config = &glymur_mmss_noc_regmap_config,
+ .nodes = mmss_noc_nodes,
+ .num_nodes = ARRAY_SIZE(mmss_noc_nodes),
+ .bcms = mmss_noc_bcms,
+ .num_bcms = ARRAY_SIZE(mmss_noc_bcms),
+ .alloc_dyn_id = true,
+};
+
+static struct qcom_icc_node * const nsinoc_nodes[] = {
+ [MASTER_CPUCP] = &xm_cpucp,
+ [SLAVE_NSINOC_SYSTEM_NOC] = &qns_system_noc,
+ [SLAVE_SERVICE_NSINOC] = &srvc_nsinoc,
+};
+
+static const struct regmap_config glymur_nsinoc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x14080,
+ .fast_io = true,
+};
+
+static const struct qcom_icc_desc glymur_nsinoc = {
+ .config = &glymur_nsinoc_regmap_config,
+ .nodes = nsinoc_nodes,
+ .num_nodes = ARRAY_SIZE(nsinoc_nodes),
+ .alloc_dyn_id = true,
+};
+
+static struct qcom_icc_bcm * const nsp_noc_bcms[] = {
+ &bcm_co0,
+};
+
+static struct qcom_icc_node * const nsp_noc_nodes[] = {
+ [MASTER_CDSP_PROC] = &qnm_nsp,
+ [SLAVE_NSP0_HSC_NOC] = &qns_nsp_hscnoc,
+};
+
+static const struct regmap_config glymur_nsp_noc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x21280,
+ .fast_io = true,
+};
+
+static const struct qcom_icc_desc glymur_nsp_noc = {
+ .config = &glymur_nsp_noc_regmap_config,
+ .nodes = nsp_noc_nodes,
+ .num_nodes = ARRAY_SIZE(nsp_noc_nodes),
+ .bcms = nsp_noc_bcms,
+ .num_bcms = ARRAY_SIZE(nsp_noc_bcms),
+ .alloc_dyn_id = true,
+};
+
+static struct qcom_icc_node * const oobm_ss_noc_nodes[] = {
+ [MASTER_OOBMSS_SP_PROC] = &xm_mem_sp,
+ [SLAVE_OOBMSS_SNOC] = &qns_oobmss_snoc,
+};
+
+static const struct regmap_config glymur_oobm_ss_noc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x1e080,
+ .fast_io = true,
+};
+
+static const struct qcom_icc_desc glymur_oobm_ss_noc = {
+ .config = &glymur_oobm_ss_noc_regmap_config,
+ .nodes = oobm_ss_noc_nodes,
+ .num_nodes = ARRAY_SIZE(oobm_ss_noc_nodes),
+ .alloc_dyn_id = true,
+};
+
+static struct qcom_icc_bcm * const pcie_east_anoc_bcms[] = {
+ &bcm_sn6,
+};
+
+static struct qcom_icc_node * const pcie_east_anoc_nodes[] = {
+ [MASTER_PCIE_EAST_ANOC_CFG] = &qsm_pcie_east_anoc_cfg,
+ [MASTER_PCIE_0] = &xm_pcie_0,
+ [MASTER_PCIE_1] = &xm_pcie_1,
+ [MASTER_PCIE_5] = &xm_pcie_5,
+ [SLAVE_PCIE_EAST_MEM_NOC] = &qns_pcie_east_mem_noc,
+ [SLAVE_SERVICE_PCIE_EAST_AGGRE_NOC] = &srvc_pcie_east_aggre_noc,
+};
+
+static const struct regmap_config glymur_pcie_east_anoc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0xf300,
+ .fast_io = true,
+};
+
+static const struct qcom_icc_desc glymur_pcie_east_anoc = {
+ .config = &glymur_pcie_east_anoc_regmap_config,
+ .nodes = pcie_east_anoc_nodes,
+ .num_nodes = ARRAY_SIZE(pcie_east_anoc_nodes),
+ .bcms = pcie_east_anoc_bcms,
+ .num_bcms = ARRAY_SIZE(pcie_east_anoc_bcms),
+ .alloc_dyn_id = true,
+ .qos_requires_clocks = true,
+};
+
+static struct qcom_icc_bcm * const pcie_east_slv_noc_bcms[] = {
+ &bcm_sn6,
+};
+
+static struct qcom_icc_node * const pcie_east_slv_noc_nodes[] = {
+ [MASTER_HSCNOC_PCIE_EAST] = &qnm_hscnoc_pcie_east,
+ [MASTER_CNOC_PCIE_EAST_SLAVE_CFG] = &qsm_cnoc_pcie_east_slave_cfg,
+ [SLAVE_HSCNOC_PCIE_EAST_MS_MPU_CFG] = &qhs_hscnoc_pcie_east_ms_mpu_cfg,
+ [SLAVE_SERVICE_PCIE_EAST] = &srvc_pcie_east,
+ [SLAVE_PCIE_0] = &xs_pcie_0,
+ [SLAVE_PCIE_1] = &xs_pcie_1,
+ [SLAVE_PCIE_5] = &xs_pcie_5,
+};
+
+static const struct regmap_config glymur_pcie_east_slv_noc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0xe080,
+ .fast_io = true,
+};
+
+static const struct qcom_icc_desc glymur_pcie_east_slv_noc = {
+ .config = &glymur_pcie_east_slv_noc_regmap_config,
+ .nodes = pcie_east_slv_noc_nodes,
+ .num_nodes = ARRAY_SIZE(pcie_east_slv_noc_nodes),
+ .bcms = pcie_east_slv_noc_bcms,
+ .num_bcms = ARRAY_SIZE(pcie_east_slv_noc_bcms),
+ .alloc_dyn_id = true,
+};
+
+static struct qcom_icc_bcm * const pcie_west_anoc_bcms[] = {
+ &bcm_sn6,
+};
+
+static struct qcom_icc_node * const pcie_west_anoc_nodes[] = {
+ [MASTER_PCIE_WEST_ANOC_CFG] = &qsm_pcie_west_anoc_cfg,
+ [MASTER_PCIE_2] = &xm_pcie_2,
+ [MASTER_PCIE_3A] = &xm_pcie_3a,
+ [MASTER_PCIE_3B] = &xm_pcie_3b,
+ [MASTER_PCIE_4] = &xm_pcie_4,
+ [MASTER_PCIE_6] = &xm_pcie_6,
+ [SLAVE_PCIE_WEST_MEM_NOC] = &qns_pcie_west_mem_noc,
+ [SLAVE_SERVICE_PCIE_WEST_AGGRE_NOC] = &srvc_pcie_west_aggre_noc,
+};
+
+static const struct regmap_config glymur_pcie_west_anoc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0xf580,
+ .fast_io = true,
+};
+
+static const struct qcom_icc_desc glymur_pcie_west_anoc = {
+ .config = &glymur_pcie_west_anoc_regmap_config,
+ .nodes = pcie_west_anoc_nodes,
+ .num_nodes = ARRAY_SIZE(pcie_west_anoc_nodes),
+ .bcms = pcie_west_anoc_bcms,
+ .num_bcms = ARRAY_SIZE(pcie_west_anoc_bcms),
+ .alloc_dyn_id = true,
+ .qos_requires_clocks = true,
+};
+
+static struct qcom_icc_bcm * const pcie_west_slv_noc_bcms[] = {
+ &bcm_sn6,
+};
+
+static struct qcom_icc_node * const pcie_west_slv_noc_nodes[] = {
+ [MASTER_HSCNOC_PCIE_WEST] = &qnm_hscnoc_pcie_west,
+ [MASTER_CNOC_PCIE_WEST_SLAVE_CFG] = &qsm_cnoc_pcie_west_slave_cfg,
+ [SLAVE_HSCNOC_PCIE_WEST_MS_MPU_CFG] = &qhs_hscnoc_pcie_west_ms_mpu_cfg,
+ [SLAVE_SERVICE_PCIE_WEST] = &srvc_pcie_west,
+ [SLAVE_PCIE_2] = &xs_pcie_2,
+ [SLAVE_PCIE_3A] = &xs_pcie_3a,
+ [SLAVE_PCIE_3B] = &xs_pcie_3b,
+ [SLAVE_PCIE_4] = &xs_pcie_4,
+ [SLAVE_PCIE_6] = &xs_pcie_6,
+};
+
+static const struct regmap_config glymur_pcie_west_slv_noc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0xf180,
+ .fast_io = true,
+};
+
+static const struct qcom_icc_desc glymur_pcie_west_slv_noc = {
+ .config = &glymur_pcie_west_slv_noc_regmap_config,
+ .nodes = pcie_west_slv_noc_nodes,
+ .num_nodes = ARRAY_SIZE(pcie_west_slv_noc_nodes),
+ .bcms = pcie_west_slv_noc_bcms,
+ .num_bcms = ARRAY_SIZE(pcie_west_slv_noc_bcms),
+ .alloc_dyn_id = true,
+};
+
+static struct qcom_icc_bcm * const system_noc_bcms[] = {
+ &bcm_sn0,
+ &bcm_sn1,
+ &bcm_sn2,
+ &bcm_sn3,
+ &bcm_sn4,
+};
+
+static struct qcom_icc_node * const system_noc_nodes[] = {
+ [MASTER_A1NOC_SNOC] = &qnm_aggre1_noc,
+ [MASTER_A2NOC_SNOC] = &qnm_aggre2_noc,
+ [MASTER_A3NOC_SNOC] = &qnm_aggre3_noc,
+ [MASTER_NSINOC_SNOC] = &qnm_nsi_noc,
+ [MASTER_OOBMSS] = &qnm_oobmss,
+ [SLAVE_SNOC_GEM_NOC_SF] = &qns_gemnoc_sf,
+};
+
+static const struct regmap_config glymur_system_noc_regmap_config = {
+ .reg_bits = 32,
+ .reg_stride = 4,
+ .val_bits = 32,
+ .max_register = 0x1c080,
+ .fast_io = true,
+};
+
+static const struct qcom_icc_desc glymur_system_noc = {
+ .config = &glymur_system_noc_regmap_config,
+ .nodes = system_noc_nodes,
+ .num_nodes = ARRAY_SIZE(system_noc_nodes),
+ .bcms = system_noc_bcms,
+ .num_bcms = ARRAY_SIZE(system_noc_bcms),
+ .alloc_dyn_id = true,
+};
+
+static const struct of_device_id qnoc_of_match[] = {
+ { .compatible = "qcom,glymur-aggre1-noc", .data = &glymur_aggre1_noc},
+ { .compatible = "qcom,glymur-aggre2-noc", .data = &glymur_aggre2_noc},
+ { .compatible = "qcom,glymur-aggre3-noc", .data = &glymur_aggre3_noc},
+ { .compatible = "qcom,glymur-aggre4-noc", .data = &glymur_aggre4_noc},
+ { .compatible = "qcom,glymur-clk-virt", .data = &glymur_clk_virt},
+ { .compatible = "qcom,glymur-cnoc-cfg", .data = &glymur_cnoc_cfg},
+ { .compatible = "qcom,glymur-cnoc-main", .data = &glymur_cnoc_main},
+ { .compatible = "qcom,glymur-hscnoc", .data = &glymur_hscnoc},
+ { .compatible = "qcom,glymur-lpass-ag-noc", .data = &glymur_lpass_ag_noc},
+ { .compatible = "qcom,glymur-lpass-lpiaon-noc", .data = &glymur_lpass_lpiaon_noc},
+ { .compatible = "qcom,glymur-lpass-lpicx-noc", .data = &glymur_lpass_lpicx_noc},
+ { .compatible = "qcom,glymur-mc-virt", .data = &glymur_mc_virt},
+ { .compatible = "qcom,glymur-mmss-noc", .data = &glymur_mmss_noc},
+ { .compatible = "qcom,glymur-nsinoc", .data = &glymur_nsinoc},
+ { .compatible = "qcom,glymur-nsp-noc", .data = &glymur_nsp_noc},
+ { .compatible = "qcom,glymur-oobm-ss-noc", .data = &glymur_oobm_ss_noc},
+ { .compatible = "qcom,glymur-pcie-east-anoc", .data = &glymur_pcie_east_anoc},
+ { .compatible = "qcom,glymur-pcie-east-slv-noc", .data = &glymur_pcie_east_slv_noc},
+ { .compatible = "qcom,glymur-pcie-west-anoc", .data = &glymur_pcie_west_anoc},
+ { .compatible = "qcom,glymur-pcie-west-slv-noc", .data = &glymur_pcie_west_slv_noc},
+ { .compatible = "qcom,glymur-system-noc", .data = &glymur_system_noc},
+ { }
+};
+MODULE_DEVICE_TABLE(of, qnoc_of_match);
+
+static struct platform_driver qnoc_driver = {
+ .probe = qcom_icc_rpmh_probe,
+ .remove = qcom_icc_rpmh_remove,
+ .driver = {
+ .name = "qnoc-glymur",
+ .of_match_table = qnoc_of_match,
+ .sync_state = icc_sync_state,
+ },
+};
+
+static int __init qnoc_driver_init(void)
+{
+ return platform_driver_register(&qnoc_driver);
+}
+core_initcall(qnoc_driver_init);
+
+static void __exit qnoc_driver_exit(void)
+{
+ platform_driver_unregister(&qnoc_driver);
+}
+module_exit(qnoc_driver_exit);
+
+MODULE_DESCRIPTION("GLYMUR NoC driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/interconnect/qcom/icc-rpmh.h b/drivers/interconnect/qcom/icc-rpmh.h
index bd8d730249b1..307f48412563 100644
--- a/drivers/interconnect/qcom/icc-rpmh.h
+++ b/drivers/interconnect/qcom/icc-rpmh.h
@@ -53,7 +53,7 @@ struct bcm_db {
u8 reserved;
};
-#define MAX_PORTS 2
+#define MAX_PORTS 4
/**
* struct qcom_icc_qosbox - Qualcomm specific QoS config
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index e2e66f5f4fb8..b32a2597d246 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -23,7 +23,6 @@ obj-$(CONFIG_SENSORS_BH1770) += bh1770glc.o
obj-$(CONFIG_SENSORS_APDS990X) += apds990x.o
obj-$(CONFIG_ENCLOSURE_SERVICES) += enclosure.o
obj-$(CONFIG_KGDB_TESTS) += kgdbts.o
-obj-$(CONFIG_TEST_MISC_MINOR) += misc_minor_kunit.o
obj-$(CONFIG_SGI_XP) += sgi-xp/
obj-$(CONFIG_SGI_GRU) += sgi-gru/
obj-$(CONFIG_SMPRO_ERRMON) += smpro-errmon.o
diff --git a/drivers/misc/ad525x_dpot.c b/drivers/misc/ad525x_dpot.c
index 756ef6912b5a..04683b981e54 100644
--- a/drivers/misc/ad525x_dpot.c
+++ b/drivers/misc/ad525x_dpot.c
@@ -73,6 +73,7 @@
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include "ad525x_dpot.h"
@@ -418,10 +419,8 @@ static ssize_t sysfs_show_reg(struct device *dev,
s32 value;
if (reg & DPOT_ADDR_OTP_EN)
- return sprintf(buf, "%s\n",
- test_bit(DPOT_RDAC_MASK & reg, data->otp_en_mask) ?
- "enabled" : "disabled");
-
+ return sprintf(buf, "%s\n", str_enabled_disabled(
+ test_bit(DPOT_RDAC_MASK & reg, data->otp_en_mask)));
mutex_lock(&data->update_lock);
value = dpot_read(data, reg);
diff --git a/drivers/misc/amd-sbi/Kconfig b/drivers/misc/amd-sbi/Kconfig
index 4840831c84ca..4aae0733d0fc 100644
--- a/drivers/misc/amd-sbi/Kconfig
+++ b/drivers/misc/amd-sbi/Kconfig
@@ -2,6 +2,7 @@
config AMD_SBRMI_I2C
tristate "AMD side band RMI support"
depends on I2C
+ select REGMAP_I2C
help
Side band RMI over I2C support for AMD out of band management.
diff --git a/drivers/misc/apds990x.c b/drivers/misc/apds990x.c
index e7d73c972f65..58946c4ff1a5 100644
--- a/drivers/misc/apds990x.c
+++ b/drivers/misc/apds990x.c
@@ -984,7 +984,6 @@ static ssize_t apds990x_power_state_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%d\n", !pm_runtime_suspended(dev));
- return 0;
}
static ssize_t apds990x_power_state_store(struct device *dev,
diff --git a/drivers/misc/cardreader/rts5227.c b/drivers/misc/cardreader/rts5227.c
index cd512284bfb3..46444bb47f65 100644
--- a/drivers/misc/cardreader/rts5227.c
+++ b/drivers/misc/cardreader/rts5227.c
@@ -79,6 +79,10 @@ static void rts5227_fetch_vendor_settings(struct rtsx_pcr *pcr)
pcr->sd30_drive_sel_3v3 = rtsx_reg_to_sd30_drive_sel_3v3(reg);
if (rtsx_reg_check_reverse_socket(reg))
pcr->flags |= PCR_REVERSE_SOCKET;
+ if (rtsx_reg_check_cd_reverse(reg))
+ pcr->option.sd_cd_reverse_en = 1;
+ if (rtsx_reg_check_wp_reverse(reg))
+ pcr->option.sd_wp_reverse_en = 1;
}
static void rts5227_init_from_cfg(struct rtsx_pcr *pcr)
@@ -127,8 +131,10 @@ static int rts5227_extra_init_hw(struct rtsx_pcr *pcr)
/* Configure force_clock_req */
if (pcr->flags & PCR_REVERSE_SOCKET)
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG, 0x30, 0x30);
- else
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG, 0x30, 0x00);
+ else {
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG, 0x20, option->sd_cd_reverse_en << 5);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG, 0x10, option->sd_wp_reverse_en << 4);
+ }
if (CHK_PCI_PID(pcr, 0x522A))
rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, RTS522A_AUTOLOAD_CFG1,
@@ -350,6 +356,8 @@ void rts5227_init_params(struct rtsx_pcr *pcr)
pcr->ms_pull_ctl_disable_tbl = rts5227_ms_pull_ctl_disable_tbl;
pcr->reg_pm_ctrl3 = PM_CTRL3;
+ pcr->option.sd_cd_reverse_en = 0;
+ pcr->option.sd_wp_reverse_en = 0;
}
static int rts522a_optimize_phy(struct rtsx_pcr *pcr)
@@ -508,5 +516,4 @@ void rts522a_init_params(struct rtsx_pcr *pcr)
pcr->hw_param.interrupt_en |= SD_OC_INT_EN;
pcr->hw_param.ocp_glitch = SD_OCP_GLITCH_10M;
pcr->option.sd_800mA_ocp_thd = RTS522A_OCP_THD_800;
-
}
diff --git a/drivers/misc/cardreader/rts5228.c b/drivers/misc/cardreader/rts5228.c
index 0c7f10bcf6f1..db7e735ac24f 100644
--- a/drivers/misc/cardreader/rts5228.c
+++ b/drivers/misc/cardreader/rts5228.c
@@ -84,6 +84,10 @@ static void rtsx5228_fetch_vendor_settings(struct rtsx_pcr *pcr)
pcr->sd30_drive_sel_3v3 = rtsx_reg_to_sd30_drive_sel_3v3(reg);
if (rtsx_reg_check_reverse_socket(reg))
pcr->flags |= PCR_REVERSE_SOCKET;
+ if (rtsx_reg_check_cd_reverse(reg))
+ pcr->option.sd_cd_reverse_en = 1;
+ if (rtsx_reg_check_wp_reverse(reg))
+ pcr->option.sd_wp_reverse_en = 1;
}
static int rts5228_optimize_phy(struct rtsx_pcr *pcr)
@@ -432,8 +436,10 @@ static int rts5228_extra_init_hw(struct rtsx_pcr *pcr)
if (pcr->flags & PCR_REVERSE_SOCKET)
rtsx_pci_write_register(pcr, PETXCFG, 0x30, 0x30);
- else
- rtsx_pci_write_register(pcr, PETXCFG, 0x30, 0x00);
+ else {
+ rtsx_pci_write_register(pcr, PETXCFG, 0x20, option->sd_cd_reverse_en << 5);
+ rtsx_pci_write_register(pcr, PETXCFG, 0x10, option->sd_wp_reverse_en << 4);
+ }
/*
* If u_force_clkreq_0 is enabled, CLKREQ# PIN will be forced
@@ -720,4 +726,6 @@ void rts5228_init_params(struct rtsx_pcr *pcr)
hw_param->interrupt_en |= SD_OC_INT_EN;
hw_param->ocp_glitch = SD_OCP_GLITCH_800U;
option->sd_800mA_ocp_thd = RTS5228_LDO1_OCP_THD_930;
+ option->sd_cd_reverse_en = 0;
+ option->sd_wp_reverse_en = 0;
}
diff --git a/drivers/misc/cardreader/rts5249.c b/drivers/misc/cardreader/rts5249.c
index 6c81040e18be..38aefd8db452 100644
--- a/drivers/misc/cardreader/rts5249.c
+++ b/drivers/misc/cardreader/rts5249.c
@@ -60,6 +60,7 @@ static void rtsx_base_fetch_vendor_settings(struct rtsx_pcr *pcr)
pci_read_config_dword(pdev, PCR_SETTING_REG1, &reg);
pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG1, reg);
+ pci_write_config_dword(pdev, 0x718, 0x0007C000);
if (!rtsx_vendor_setting_valid(reg)) {
pcr_dbg(pcr, "skip fetch vendor setting\n");
@@ -82,6 +83,10 @@ static void rtsx_base_fetch_vendor_settings(struct rtsx_pcr *pcr)
pcr->sd30_drive_sel_3v3 = rtsx_reg_to_sd30_drive_sel_3v3(reg);
if (rtsx_reg_check_reverse_socket(reg))
pcr->flags |= PCR_REVERSE_SOCKET;
+ if (rtsx_reg_check_cd_reverse(reg))
+ pcr->option.sd_cd_reverse_en = 1;
+ if (rtsx_reg_check_wp_reverse(reg))
+ pcr->option.sd_wp_reverse_en = 1;
}
static void rts5249_init_from_cfg(struct rtsx_pcr *pcr)
@@ -254,9 +259,11 @@ static int rts5249_extra_init_hw(struct rtsx_pcr *pcr)
/* Configure driving */
rts5249_fill_driving(pcr, OUTPUT_3V3);
if (pcr->flags & PCR_REVERSE_SOCKET)
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG, 0xB0, 0xB0);
- else
- rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG, 0xB0, 0x80);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG, 0x30, 0x30);
+ else {
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG, 0x20, option->sd_cd_reverse_en << 5);
+ rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG, 0x10, option->sd_wp_reverse_en << 4);
+ }
rtsx_pci_send_cmd(pcr, CMD_TIMEOUT_DEF);
@@ -572,6 +579,9 @@ void rts5249_init_params(struct rtsx_pcr *pcr)
option->ltr_l1off_sspwrgate = LTR_L1OFF_SSPWRGATE_5249_DEF;
option->ltr_l1off_snooze_sspwrgate =
LTR_L1OFF_SNOOZE_SSPWRGATE_5249_DEF;
+
+ option->sd_cd_reverse_en = 0;
+ option->sd_wp_reverse_en = 0;
}
static int rts524a_write_phy(struct rtsx_pcr *pcr, u8 addr, u16 val)
diff --git a/drivers/misc/cardreader/rts5264.c b/drivers/misc/cardreader/rts5264.c
index d050c9fff7ac..99a2d5ea6421 100644
--- a/drivers/misc/cardreader/rts5264.c
+++ b/drivers/misc/cardreader/rts5264.c
@@ -527,8 +527,16 @@ static void rts5264_init_from_hw(struct rtsx_pcr *pcr)
pcr->rtd3_en = rts5264_reg_to_rtd3(lval2);
- if (rts5264_reg_check_reverse_socket(lval2))
- pcr->flags |= PCR_REVERSE_SOCKET;
+ if (rts5264_reg_check_reverse_socket(lval2)) {
+ if (is_version_higher_than(pcr, PID_5264, RTS5264_IC_VER_B))
+ pcr->option.sd_cd_reverse_en = 1;
+ else
+ pcr->flags |= PCR_REVERSE_SOCKET;
+ }
+
+ if (rts5264_reg_check_wp_reverse(lval2) &&
+ is_version_higher_than(pcr, PID_5264, RTS5264_IC_VER_B))
+ pcr->option.sd_wp_reverse_en = 1;
pci_read_config_dword(pdev, setting_reg1, &lval1);
pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", setting_reg1, lval1);
@@ -622,8 +630,10 @@ static int rts5264_extra_init_hw(struct rtsx_pcr *pcr)
if (pcr->flags & PCR_REVERSE_SOCKET)
rtsx_pci_write_register(pcr, PETXCFG, 0x30, 0x30);
- else
- rtsx_pci_write_register(pcr, PETXCFG, 0x30, 0x00);
+ else {
+ rtsx_pci_write_register(pcr, PETXCFG, 0x20, option->sd_cd_reverse_en << 5);
+ rtsx_pci_write_register(pcr, PETXCFG, 0x10, option->sd_wp_reverse_en << 4);
+ }
/*
* If u_force_clkreq_0 is enabled, CLKREQ# PIN will be forced
@@ -957,4 +967,6 @@ void rts5264_init_params(struct rtsx_pcr *pcr)
hw_param->interrupt_en |= (SD_OC_INT_EN | SD_OVP_INT_EN);
hw_param->ocp_glitch = SD_OCP_GLITCH_800U | SDVIO_OCP_GLITCH_800U;
option->sd_800mA_ocp_thd = RTS5264_LDO1_OCP_THD_1150;
+ option->sd_cd_reverse_en = 0;
+ option->sd_wp_reverse_en = 0;
}
diff --git a/drivers/misc/cardreader/rts5264.h b/drivers/misc/cardreader/rts5264.h
index f3e81daa708d..611ee253367c 100644
--- a/drivers/misc/cardreader/rts5264.h
+++ b/drivers/misc/cardreader/rts5264.h
@@ -14,6 +14,7 @@
#define rts5264_reg_to_aspm(reg) \
(((~(reg) >> 28) & 0x02) | (((reg) >> 28) & 0x01))
#define rts5264_reg_check_reverse_socket(reg) ((reg) & 0x04)
+#define rts5264_reg_check_wp_reverse(reg) ((reg) & 0x8000)
#define rts5264_reg_to_sd30_drive_sel_1v8(reg) (((reg) >> 22) & 0x03)
#define rts5264_reg_to_sd30_drive_sel_3v3(reg) (((reg) >> 16) & 0x03)
#define rts5264_reg_to_rtd3(reg) ((reg) & 0x08)
diff --git a/drivers/misc/cardreader/rtsx_pcr.h b/drivers/misc/cardreader/rtsx_pcr.h
index 8e5951b61143..40562ff2be13 100644
--- a/drivers/misc/cardreader/rtsx_pcr.h
+++ b/drivers/misc/cardreader/rtsx_pcr.h
@@ -100,6 +100,8 @@ static inline u8 map_sd_drive(int idx)
#define rtsx_reg_to_sd30_drive_sel_3v3(reg) (((reg) >> 5) & 0x03)
#define rtsx_reg_to_card_drive_sel(reg) ((((reg) >> 25) & 0x01) << 6)
#define rtsx_reg_check_reverse_socket(reg) ((reg) & 0x4000)
+#define rtsx_reg_check_cd_reverse(reg) ((reg) & 0x800000)
+#define rtsx_reg_check_wp_reverse(reg) ((reg) & 0x400000)
#define rts5209_reg_to_aspm(reg) (((reg) >> 5) & 0x03)
#define rts5209_reg_check_ms_pmos(reg) (!((reg) & 0x08))
#define rts5209_reg_to_sd30_drive_sel_1v8(reg) (((reg) >> 3) & 0x07)
diff --git a/drivers/misc/dw-xdata-pcie.c b/drivers/misc/dw-xdata-pcie.c
index efd0ca8cc925..a604c0e9c038 100644
--- a/drivers/misc/dw-xdata-pcie.c
+++ b/drivers/misc/dw-xdata-pcie.c
@@ -16,6 +16,7 @@
#include <linux/mutex.h>
#include <linux/delay.h>
#include <linux/pci.h>
+#include <linux/string_choices.h>
#define DW_XDATA_DRIVER_NAME "dw-xdata-pcie"
@@ -132,7 +133,7 @@ static void dw_xdata_start(struct dw_xdata *dw, bool write)
if (!(status & STATUS_DONE))
dev_dbg(dev, "xData: started %s direction\n",
- write ? "write" : "read");
+ str_write_read(write));
}
static void dw_xdata_perf_meas(struct dw_xdata *dw, u64 *data, bool write)
@@ -195,7 +196,7 @@ static void dw_xdata_perf(struct dw_xdata *dw, u64 *rate, bool write)
mutex_unlock(&dw->mutex);
dev_dbg(dev, "xData: time=%llu us, %s=%llu MB/s\n",
- diff, write ? "write" : "read", *rate);
+ diff, str_write_read(write), *rate);
}
static struct dw_xdata *misc_dev_to_dw(struct miscdevice *misc_dev)
diff --git a/drivers/misc/eeprom/Kconfig b/drivers/misc/eeprom/Kconfig
index 0bef5b93bd6d..4d0ce47aa282 100644
--- a/drivers/misc/eeprom/Kconfig
+++ b/drivers/misc/eeprom/Kconfig
@@ -120,4 +120,22 @@ config EEPROM_EE1004
This driver can also be built as a module. If so, the module
will be called ee1004.
+config EEPROM_M24LR
+ tristate "STMicroelectronics M24LR RFID/NFC EEPROM support"
+ depends on I2C && SYSFS
+ select REGMAP_I2C
+ select NVMEM
+ select NVMEM_SYSFS
+ help
+ This enables support for STMicroelectronics M24LR RFID/NFC EEPROM
+ chips. These dual-interface devices expose two I2C addresses:
+ one for EEPROM memory access and another for control and system
+ configuration (e.g. UID, password handling).
+
+ This driver provides a sysfs interface for control functions and
+ integrates with the nvmem subsystem for EEPROM access.
+
+ To compile this driver as a module, choose M here: the
+ module will be called m24lr.
+
endmenu
diff --git a/drivers/misc/eeprom/Makefile b/drivers/misc/eeprom/Makefile
index 65794e526d5d..8f311fd6a4ce 100644
--- a/drivers/misc/eeprom/Makefile
+++ b/drivers/misc/eeprom/Makefile
@@ -7,3 +7,4 @@ obj-$(CONFIG_EEPROM_93XX46) += eeprom_93xx46.o
obj-$(CONFIG_EEPROM_DIGSY_MTC_CFG) += digsy_mtc_eeprom.o
obj-$(CONFIG_EEPROM_IDT_89HPESX) += idt_89hpesx.o
obj-$(CONFIG_EEPROM_EE1004) += ee1004.o
+obj-$(CONFIG_EEPROM_M24LR) += m24lr.o
diff --git a/drivers/misc/eeprom/at25.c b/drivers/misc/eeprom/at25.c
index 2d0492867054..e2868f7bdb03 100644
--- a/drivers/misc/eeprom/at25.c
+++ b/drivers/misc/eeprom/at25.c
@@ -379,37 +379,49 @@ static int at25_fram_to_chip(struct device *dev, struct spi_eeprom *chip)
struct at25_data *at25 = container_of(chip, struct at25_data, chip);
u8 sernum[FM25_SN_LEN];
u8 id[FM25_ID_LEN];
+ u32 val;
int i;
strscpy(chip->name, "fm25", sizeof(chip->name));
- /* Get ID of chip */
- fm25_aux_read(at25, id, FM25_RDID, FM25_ID_LEN);
- /* There are inside-out FRAM variations, detect them and reverse the ID bytes */
- if (id[6] == 0x7f && id[2] == 0xc2)
- for (i = 0; i < ARRAY_SIZE(id) / 2; i++) {
- u8 tmp = id[i];
- int j = ARRAY_SIZE(id) - i - 1;
+ if (!device_property_read_u32(dev, "size", &val)) {
+ chip->byte_len = val;
+ } else {
+ /* Get ID of chip */
+ fm25_aux_read(at25, id, FM25_RDID, FM25_ID_LEN);
+ /* There are inside-out FRAM variations, detect them and reverse the ID bytes */
+ if (id[6] == 0x7f && id[2] == 0xc2)
+ for (i = 0; i < ARRAY_SIZE(id) / 2; i++) {
+ u8 tmp = id[i];
+ int j = ARRAY_SIZE(id) - i - 1;
+
+ id[i] = id[j];
+ id[j] = tmp;
+ }
+ if (id[6] != 0xc2) {
+ dev_err(dev, "Error: no Cypress FRAM with device ID (manufacturer ID bank 7: %02x)\n", id[6]);
+ return -ENODEV;
+ }
- id[i] = id[j];
- id[j] = tmp;
+ switch (id[7]) {
+ case 0x21 ... 0x26:
+ chip->byte_len = BIT(id[7] - 0x21 + 4) * 1024;
+ break;
+ case 0x2a ... 0x30:
+ /* CY15B116QN ... CY15B116QN */
+ chip->byte_len = BIT(((id[7] >> 1) & 0xf) + 13);
+ break;
+ default:
+ dev_err(dev, "Error: unsupported size (id %02x)\n", id[7]);
+ return -ENODEV;
}
- if (id[6] != 0xc2) {
- dev_err(dev, "Error: no Cypress FRAM (id %02x)\n", id[6]);
- return -ENODEV;
- }
- switch (id[7]) {
- case 0x21 ... 0x26:
- chip->byte_len = BIT(id[7] - 0x21 + 4) * 1024;
- break;
- case 0x2a ... 0x30:
- /* CY15B116QN ... CY15B116QN */
- chip->byte_len = BIT(((id[7] >> 1) & 0xf) + 13);
- break;
- default:
- dev_err(dev, "Error: unsupported size (id %02x)\n", id[7]);
- return -ENODEV;
+ if (id[8]) {
+ fm25_aux_read(at25, sernum, FM25_RDSN, FM25_SN_LEN);
+ /* Swap byte order */
+ for (i = 0; i < FM25_SN_LEN; i++)
+ at25->sernum[i] = sernum[FM25_SN_LEN - 1 - i];
+ }
}
if (chip->byte_len > 64 * 1024)
@@ -417,13 +429,6 @@ static int at25_fram_to_chip(struct device *dev, struct spi_eeprom *chip)
else
chip->flags |= EE_ADDR2;
- if (id[8]) {
- fm25_aux_read(at25, sernum, FM25_RDSN, FM25_SN_LEN);
- /* Swap byte order */
- for (i = 0; i < FM25_SN_LEN; i++)
- at25->sernum[i] = sernum[FM25_SN_LEN - 1 - i];
- }
-
chip->page_size = PAGE_SIZE;
return 0;
}
diff --git a/drivers/misc/eeprom/m24lr.c b/drivers/misc/eeprom/m24lr.c
new file mode 100644
index 000000000000..7a9fd45a8e46
--- /dev/null
+++ b/drivers/misc/eeprom/m24lr.c
@@ -0,0 +1,606 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * m24lr.c - Sysfs control interface for ST M24LR series RFID/NFC chips
+ *
+ * Copyright (c) 2025 Abd-Alrhman Masalkhi <abd.masalkhi@gmail.com>
+ *
+ * This driver implements both the sysfs-based control interface and EEPROM
+ * access for STMicroelectronics M24LR series chips (e.g., M24LR04E-R).
+ * It provides access to control registers for features such as password
+ * authentication, memory protection, and device configuration. In addition,
+ * it manages read and write operations to the EEPROM region of the chip.
+ */
+
+#include <linux/device.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/nvmem-provider.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+
+#define M24LR_WRITE_TIMEOUT 25u
+#define M24LR_READ_TIMEOUT (M24LR_WRITE_TIMEOUT)
+
+/**
+ * struct m24lr_chip - describes chip-specific sysfs layout
+ * @sss_len: the length of the sss region
+ * @page_size: chip-specific limit on the maximum number of bytes allowed
+ * in a single write operation.
+ * @eeprom_size: size of the EEPROM in byte
+ *
+ * Supports multiple M24LR chip variants (e.g., M24LRxx) by allowing each
+ * to define its own set of sysfs attributes, depending on its available
+ * registers and features.
+ */
+struct m24lr_chip {
+ unsigned int sss_len;
+ unsigned int page_size;
+ unsigned int eeprom_size;
+};
+
+/**
+ * struct m24lr - core driver data for M24LR chip control
+ * @uid: 64 bits unique identifier stored in the device
+ * @sss_len: the length of the sss region
+ * @page_size: chip-specific limit on the maximum number of bytes allowed
+ * in a single write operation.
+ * @eeprom_size: size of the EEPROM in byte
+ * @ctl_regmap: regmap interface for accessing the system parameter sector
+ * @eeprom_regmap: regmap interface for accessing the EEPROM
+ * @lock: mutex to synchronize operations to the device
+ *
+ * Central data structure holding the state and resources used by the
+ * M24LR device driver.
+ */
+struct m24lr {
+ u64 uid;
+ unsigned int sss_len;
+ unsigned int page_size;
+ unsigned int eeprom_size;
+ struct regmap *ctl_regmap;
+ struct regmap *eeprom_regmap;
+ struct mutex lock; /* synchronize operations to the device */
+};
+
+static const struct regmap_range m24lr_ctl_vo_ranges[] = {
+ regmap_reg_range(0, 63),
+};
+
+static const struct regmap_access_table m24lr_ctl_vo_table = {
+ .yes_ranges = m24lr_ctl_vo_ranges,
+ .n_yes_ranges = ARRAY_SIZE(m24lr_ctl_vo_ranges),
+};
+
+static const struct regmap_config m24lr_ctl_regmap_conf = {
+ .name = "m24lr_ctl",
+ .reg_stride = 1,
+ .reg_bits = 16,
+ .val_bits = 8,
+ .disable_locking = false,
+ .cache_type = REGCACHE_RBTREE,/* Flat can't be used, there's huge gap */
+ .volatile_table = &m24lr_ctl_vo_table,
+};
+
+/* Chip descriptor for M24LR04E-R variant */
+static const struct m24lr_chip m24lr04e_r_chip = {
+ .page_size = 4,
+ .eeprom_size = 512,
+ .sss_len = 4,
+};
+
+/* Chip descriptor for M24LR16E-R variant */
+static const struct m24lr_chip m24lr16e_r_chip = {
+ .page_size = 4,
+ .eeprom_size = 2048,
+ .sss_len = 16,
+};
+
+/* Chip descriptor for M24LR64E-R variant */
+static const struct m24lr_chip m24lr64e_r_chip = {
+ .page_size = 4,
+ .eeprom_size = 8192,
+ .sss_len = 64,
+};
+
+static const struct i2c_device_id m24lr_ids[] = {
+ { "m24lr04e-r", (kernel_ulong_t)&m24lr04e_r_chip},
+ { "m24lr16e-r", (kernel_ulong_t)&m24lr16e_r_chip},
+ { "m24lr64e-r", (kernel_ulong_t)&m24lr64e_r_chip},
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, m24lr_ids);
+
+static const struct of_device_id m24lr_of_match[] = {
+ { .compatible = "st,m24lr04e-r", .data = &m24lr04e_r_chip},
+ { .compatible = "st,m24lr16e-r", .data = &m24lr16e_r_chip},
+ { .compatible = "st,m24lr64e-r", .data = &m24lr64e_r_chip},
+ { }
+};
+MODULE_DEVICE_TABLE(of, m24lr_of_match);
+
+/**
+ * m24lr_regmap_read - read data using regmap with retry on failure
+ * @regmap: regmap instance for the device
+ * @buf: buffer to store the read data
+ * @size: number of bytes to read
+ * @offset: starting register address
+ *
+ * Attempts to read a block of data from the device with retries and timeout.
+ * Some M24LR chips may transiently NACK reads (e.g., during internal write
+ * cycles), so this function retries with a short sleep until the timeout
+ * expires.
+ *
+ * Returns:
+ * Number of bytes read on success,
+ * -ETIMEDOUT if the read fails within the timeout window.
+ */
+static ssize_t m24lr_regmap_read(struct regmap *regmap, u8 *buf,
+ size_t size, unsigned int offset)
+{
+ int err;
+ unsigned long timeout, read_time;
+ ssize_t ret = -ETIMEDOUT;
+
+ timeout = jiffies + msecs_to_jiffies(M24LR_READ_TIMEOUT);
+ do {
+ read_time = jiffies;
+
+ err = regmap_bulk_read(regmap, offset, buf, size);
+ if (!err) {
+ ret = size;
+ break;
+ }
+
+ usleep_range(1000, 2000);
+ } while (time_before(read_time, timeout));
+
+ return ret;
+}
+
+/**
+ * m24lr_regmap_write - write data using regmap with retry on failure
+ * @regmap: regmap instance for the device
+ * @buf: buffer containing the data to write
+ * @size: number of bytes to write
+ * @offset: starting register address
+ *
+ * Attempts to write a block of data to the device with retries and a timeout.
+ * Some M24LR devices may NACK I2C writes while an internal write operation
+ * is in progress. This function retries the write operation with a short delay
+ * until it succeeds or the timeout is reached.
+ *
+ * Returns:
+ * Number of bytes written on success,
+ * -ETIMEDOUT if the write fails within the timeout window.
+ */
+static ssize_t m24lr_regmap_write(struct regmap *regmap, const u8 *buf,
+ size_t size, unsigned int offset)
+{
+ int err;
+ unsigned long timeout, write_time;
+ ssize_t ret = -ETIMEDOUT;
+
+ timeout = jiffies + msecs_to_jiffies(M24LR_WRITE_TIMEOUT);
+
+ do {
+ write_time = jiffies;
+
+ err = regmap_bulk_write(regmap, offset, buf, size);
+ if (!err) {
+ ret = size;
+ break;
+ }
+
+ usleep_range(1000, 2000);
+ } while (time_before(write_time, timeout));
+
+ return ret;
+}
+
+static ssize_t m24lr_read(struct m24lr *m24lr, u8 *buf, size_t size,
+ unsigned int offset, bool is_eeprom)
+{
+ struct regmap *regmap;
+ ssize_t ret;
+
+ if (is_eeprom)
+ regmap = m24lr->eeprom_regmap;
+ else
+ regmap = m24lr->ctl_regmap;
+
+ mutex_lock(&m24lr->lock);
+ ret = m24lr_regmap_read(regmap, buf, size, offset);
+ mutex_unlock(&m24lr->lock);
+
+ return ret;
+}
+
+/**
+ * m24lr_write - write buffer to M24LR device with page alignment handling
+ * @m24lr: pointer to driver context
+ * @buf: data buffer to write
+ * @size: number of bytes to write
+ * @offset: target register address in the device
+ * @is_eeprom: true if the write should target the EEPROM,
+ * false if it should target the system parameters sector.
+ *
+ * Writes data to the M24LR device using regmap, split into chunks no larger
+ * than page_size to respect device-specific write limitations (e.g., page
+ * size or I2C hold-time concerns). Each chunk is aligned to the page boundary
+ * defined by page_size.
+ *
+ * Returns:
+ * Total number of bytes written on success,
+ * A negative error code if any write fails.
+ */
+static ssize_t m24lr_write(struct m24lr *m24lr, const u8 *buf, size_t size,
+ unsigned int offset, bool is_eeprom)
+{
+ unsigned int n, next_sector;
+ struct regmap *regmap;
+ ssize_t ret = 0;
+ ssize_t err;
+
+ if (is_eeprom)
+ regmap = m24lr->eeprom_regmap;
+ else
+ regmap = m24lr->ctl_regmap;
+
+ n = min_t(unsigned int, size, m24lr->page_size);
+ next_sector = roundup(offset + 1, m24lr->page_size);
+ if (offset + n > next_sector)
+ n = next_sector - offset;
+
+ mutex_lock(&m24lr->lock);
+ while (n) {
+ err = m24lr_regmap_write(regmap, buf + offset, n, offset);
+ if (IS_ERR_VALUE(err)) {
+ if (!ret)
+ ret = err;
+
+ break;
+ }
+
+ offset += n;
+ size -= n;
+ ret += n;
+ n = min_t(unsigned int, size, m24lr->page_size);
+ }
+ mutex_unlock(&m24lr->lock);
+
+ return ret;
+}
+
+/**
+ * m24lr_write_pass - Write password to M24LR043-R using secure format
+ * @m24lr: Pointer to device control structure
+ * @buf: Input buffer containing hex-encoded password
+ * @count: Number of bytes in @buf
+ * @code: Operation code to embed between password copies
+ *
+ * This function parses a 4-byte password, encodes it in big-endian format,
+ * and constructs a 9-byte sequence of the form:
+ *
+ * [BE(password), code, BE(password)]
+ *
+ * The result is written to register 0x0900 (2304), which is the password
+ * register in M24LR04E-R chip.
+ *
+ * Return: Number of bytes written on success, or negative error code on failure
+ */
+static ssize_t m24lr_write_pass(struct m24lr *m24lr, const char *buf,
+ size_t count, u8 code)
+{
+ __be32 be_pass;
+ u8 output[9];
+ ssize_t ret;
+ u32 pass;
+ int err;
+
+ if (!count)
+ return -EINVAL;
+
+ if (count > 8)
+ return -EINVAL;
+
+ err = kstrtou32(buf, 16, &pass);
+ if (err)
+ return err;
+
+ be_pass = cpu_to_be32(pass);
+
+ memcpy(output, &be_pass, sizeof(be_pass));
+ output[4] = code;
+ memcpy(output + 5, &be_pass, sizeof(be_pass));
+
+ mutex_lock(&m24lr->lock);
+ ret = m24lr_regmap_write(m24lr->ctl_regmap, output, 9, 2304);
+ mutex_unlock(&m24lr->lock);
+
+ return ret;
+}
+
+static ssize_t m24lr_read_reg_le(struct m24lr *m24lr, u64 *val,
+ unsigned int reg_addr,
+ unsigned int reg_size)
+{
+ ssize_t ret;
+ __le64 input = 0;
+
+ ret = m24lr_read(m24lr, (u8 *)&input, reg_size, reg_addr, false);
+ if (IS_ERR_VALUE(ret))
+ return ret;
+
+ if (ret != reg_size)
+ return -EINVAL;
+
+ switch (reg_size) {
+ case 1:
+ *val = *(u8 *)&input;
+ break;
+ case 2:
+ *val = le16_to_cpu((__le16)input);
+ break;
+ case 4:
+ *val = le32_to_cpu((__le32)input);
+ break;
+ case 8:
+ *val = le64_to_cpu((__le64)input);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int m24lr_nvmem_read(void *priv, unsigned int offset, void *val,
+ size_t bytes)
+{
+ ssize_t err;
+ struct m24lr *m24lr = priv;
+
+ if (!bytes)
+ return bytes;
+
+ if (offset + bytes > m24lr->eeprom_size)
+ return -EINVAL;
+
+ err = m24lr_read(m24lr, val, bytes, offset, true);
+ if (IS_ERR_VALUE(err))
+ return err;
+
+ return 0;
+}
+
+static int m24lr_nvmem_write(void *priv, unsigned int offset, void *val,
+ size_t bytes)
+{
+ ssize_t err;
+ struct m24lr *m24lr = priv;
+
+ if (!bytes)
+ return -EINVAL;
+
+ if (offset + bytes > m24lr->eeprom_size)
+ return -EINVAL;
+
+ err = m24lr_write(m24lr, val, bytes, offset, true);
+ if (IS_ERR_VALUE(err))
+ return err;
+
+ return 0;
+}
+
+static ssize_t m24lr_ctl_sss_read(struct file *filep, struct kobject *kobj,
+ const struct bin_attribute *attr, char *buf,
+ loff_t offset, size_t count)
+{
+ struct m24lr *m24lr = attr->private;
+
+ if (!count)
+ return count;
+
+ if (size_add(offset, count) > m24lr->sss_len)
+ return -EINVAL;
+
+ return m24lr_read(m24lr, buf, count, offset, false);
+}
+
+static ssize_t m24lr_ctl_sss_write(struct file *filep, struct kobject *kobj,
+ const struct bin_attribute *attr, char *buf,
+ loff_t offset, size_t count)
+{
+ struct m24lr *m24lr = attr->private;
+
+ if (!count)
+ return -EINVAL;
+
+ if (size_add(offset, count) > m24lr->sss_len)
+ return -EINVAL;
+
+ return m24lr_write(m24lr, buf, count, offset, false);
+}
+static BIN_ATTR(sss, 0600, m24lr_ctl_sss_read, m24lr_ctl_sss_write, 0);
+
+static ssize_t new_pass_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct m24lr *m24lr = i2c_get_clientdata(to_i2c_client(dev));
+
+ return m24lr_write_pass(m24lr, buf, count, 7);
+}
+static DEVICE_ATTR_WO(new_pass);
+
+static ssize_t unlock_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct m24lr *m24lr = i2c_get_clientdata(to_i2c_client(dev));
+
+ return m24lr_write_pass(m24lr, buf, count, 9);
+}
+static DEVICE_ATTR_WO(unlock);
+
+static ssize_t uid_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct m24lr *m24lr = i2c_get_clientdata(to_i2c_client(dev));
+
+ return sysfs_emit(buf, "%llx\n", m24lr->uid);
+}
+static DEVICE_ATTR_RO(uid);
+
+static ssize_t total_sectors_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct m24lr *m24lr = i2c_get_clientdata(to_i2c_client(dev));
+
+ return sysfs_emit(buf, "%x\n", m24lr->sss_len);
+}
+static DEVICE_ATTR_RO(total_sectors);
+
+static struct attribute *m24lr_ctl_dev_attrs[] = {
+ &dev_attr_unlock.attr,
+ &dev_attr_new_pass.attr,
+ &dev_attr_uid.attr,
+ &dev_attr_total_sectors.attr,
+ NULL,
+};
+
+static const struct m24lr_chip *m24lr_get_chip(struct device *dev)
+{
+ const struct m24lr_chip *ret;
+ const struct i2c_device_id *id;
+
+ id = i2c_match_id(m24lr_ids, to_i2c_client(dev));
+
+ if (dev->of_node && of_match_device(m24lr_of_match, dev))
+ ret = of_device_get_match_data(dev);
+ else if (id)
+ ret = (void *)id->driver_data;
+ else
+ ret = acpi_device_get_match_data(dev);
+
+ return ret;
+}
+
+static int m24lr_probe(struct i2c_client *client)
+{
+ struct regmap_config eeprom_regmap_conf = {0};
+ struct nvmem_config nvmem_conf = {0};
+ struct device *dev = &client->dev;
+ struct i2c_client *eeprom_client;
+ const struct m24lr_chip *chip;
+ struct regmap *eeprom_regmap;
+ struct nvmem_device *nvmem;
+ struct regmap *ctl_regmap;
+ struct m24lr *m24lr;
+ u32 regs[2];
+ long err;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
+ return -EOPNOTSUPP;
+
+ chip = m24lr_get_chip(dev);
+ if (!chip)
+ return -ENODEV;
+
+ m24lr = devm_kzalloc(dev, sizeof(struct m24lr), GFP_KERNEL);
+ if (!m24lr)
+ return -ENOMEM;
+
+ err = device_property_read_u32_array(dev, "reg", regs, ARRAY_SIZE(regs));
+ if (err)
+ return dev_err_probe(dev, err, "Failed to read 'reg' property\n");
+
+ /* Create a second I2C client for the eeprom interface */
+ eeprom_client = devm_i2c_new_dummy_device(dev, client->adapter, regs[1]);
+ if (IS_ERR(eeprom_client))
+ return dev_err_probe(dev, PTR_ERR(eeprom_client),
+ "Failed to create dummy I2C client for the EEPROM\n");
+
+ ctl_regmap = devm_regmap_init_i2c(client, &m24lr_ctl_regmap_conf);
+ if (IS_ERR(ctl_regmap))
+ return dev_err_probe(dev, PTR_ERR(ctl_regmap),
+ "Failed to init regmap\n");
+
+ eeprom_regmap_conf.name = "m24lr_eeprom";
+ eeprom_regmap_conf.reg_bits = 16;
+ eeprom_regmap_conf.val_bits = 8;
+ eeprom_regmap_conf.disable_locking = true;
+ eeprom_regmap_conf.max_register = chip->eeprom_size - 1;
+
+ eeprom_regmap = devm_regmap_init_i2c(eeprom_client,
+ &eeprom_regmap_conf);
+ if (IS_ERR(eeprom_regmap))
+ return dev_err_probe(dev, PTR_ERR(eeprom_regmap),
+ "Failed to init regmap\n");
+
+ mutex_init(&m24lr->lock);
+ m24lr->sss_len = chip->sss_len;
+ m24lr->page_size = chip->page_size;
+ m24lr->eeprom_size = chip->eeprom_size;
+ m24lr->eeprom_regmap = eeprom_regmap;
+ m24lr->ctl_regmap = ctl_regmap;
+
+ nvmem_conf.dev = &eeprom_client->dev;
+ nvmem_conf.owner = THIS_MODULE;
+ nvmem_conf.type = NVMEM_TYPE_EEPROM;
+ nvmem_conf.reg_read = m24lr_nvmem_read;
+ nvmem_conf.reg_write = m24lr_nvmem_write;
+ nvmem_conf.size = chip->eeprom_size;
+ nvmem_conf.word_size = 1;
+ nvmem_conf.stride = 1;
+ nvmem_conf.priv = m24lr;
+
+ nvmem = devm_nvmem_register(dev, &nvmem_conf);
+ if (IS_ERR(nvmem))
+ return dev_err_probe(dev, PTR_ERR(nvmem),
+ "Failed to register nvmem\n");
+
+ i2c_set_clientdata(client, m24lr);
+ i2c_set_clientdata(eeprom_client, m24lr);
+
+ bin_attr_sss.size = chip->sss_len;
+ bin_attr_sss.private = m24lr;
+ err = sysfs_create_bin_file(&dev->kobj, &bin_attr_sss);
+ if (err)
+ return dev_err_probe(dev, err,
+ "Failed to create sss bin file\n");
+
+ /* test by reading the uid, if success store it */
+ err = m24lr_read_reg_le(m24lr, &m24lr->uid, 2324, sizeof(m24lr->uid));
+ if (IS_ERR_VALUE(err))
+ goto remove_bin_file;
+
+ return 0;
+
+remove_bin_file:
+ sysfs_remove_bin_file(&dev->kobj, &bin_attr_sss);
+
+ return err;
+}
+
+static void m24lr_remove(struct i2c_client *client)
+{
+ sysfs_remove_bin_file(&client->dev.kobj, &bin_attr_sss);
+}
+
+ATTRIBUTE_GROUPS(m24lr_ctl_dev);
+
+static struct i2c_driver m24lr_driver = {
+ .driver = {
+ .name = "m24lr",
+ .of_match_table = m24lr_of_match,
+ .dev_groups = m24lr_ctl_dev_groups,
+ },
+ .probe = m24lr_probe,
+ .remove = m24lr_remove,
+ .id_table = m24lr_ids,
+};
+module_i2c_driver(m24lr_driver);
+
+MODULE_AUTHOR("Abd-Alrhman Masalkhi");
+MODULE_DESCRIPTION("st m24lr control driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c
index 53e88a1bc430..8e1d97873423 100644
--- a/drivers/misc/fastrpc.c
+++ b/drivers/misc/fastrpc.c
@@ -27,8 +27,7 @@
#define MDSP_DOMAIN_ID (1)
#define SDSP_DOMAIN_ID (2)
#define CDSP_DOMAIN_ID (3)
-#define CDSP1_DOMAIN_ID (4)
-#define FASTRPC_DEV_MAX 5 /* adsp, mdsp, slpi, cdsp, cdsp1 */
+#define GDSP_DOMAIN_ID (4)
#define FASTRPC_MAX_SESSIONS 14
#define FASTRPC_MAX_VMIDS 16
#define FASTRPC_ALIGN 128
@@ -106,8 +105,6 @@
#define miscdev_to_fdevice(d) container_of(d, struct fastrpc_device, miscdev)
-static const char *domains[FASTRPC_DEV_MAX] = { "adsp", "mdsp",
- "sdsp", "cdsp", "cdsp1" };
struct fastrpc_phy_page {
u64 addr; /* physical address */
u64 size; /* size of contiguous region */
@@ -1723,7 +1720,6 @@ static int fastrpc_get_info_from_kernel(struct fastrpc_ioctl_capability *cap,
uint32_t attribute_id = cap->attribute_id;
uint32_t *dsp_attributes;
unsigned long flags;
- uint32_t domain = cap->domain;
int err;
spin_lock_irqsave(&cctx->lock, flags);
@@ -1741,7 +1737,7 @@ static int fastrpc_get_info_from_kernel(struct fastrpc_ioctl_capability *cap,
err = fastrpc_get_info_from_dsp(fl, dsp_attributes, FASTRPC_MAX_DSP_ATTRIBUTES);
if (err == DSP_UNSUPPORTED_API) {
dev_info(&cctx->rpdev->dev,
- "Warning: DSP capabilities not supported on domain: %d\n", domain);
+ "Warning: DSP capabilities not supported\n");
kfree(dsp_attributes);
return -EOPNOTSUPP;
} else if (err) {
@@ -1769,17 +1765,6 @@ static int fastrpc_get_dsp_info(struct fastrpc_user *fl, char __user *argp)
return -EFAULT;
cap.capability = 0;
- if (cap.domain >= FASTRPC_DEV_MAX) {
- dev_err(&fl->cctx->rpdev->dev, "Error: Invalid domain id:%d, err:%d\n",
- cap.domain, err);
- return -ECHRNG;
- }
-
- /* Fastrpc Capablities does not support modem domain */
- if (cap.domain == MDSP_DOMAIN_ID) {
- dev_err(&fl->cctx->rpdev->dev, "Error: modem not supported %d\n", err);
- return -ECHRNG;
- }
if (cap.attribute_id >= FASTRPC_MAX_DSP_ATTRIBUTES) {
dev_err(&fl->cctx->rpdev->dev, "Error: invalid attribute: %d, err: %d\n",
@@ -2255,6 +2240,22 @@ static int fastrpc_device_register(struct device *dev, struct fastrpc_channel_ct
return err;
}
+static int fastrpc_get_domain_id(const char *domain)
+{
+ if (!strncmp(domain, "adsp", 4))
+ return ADSP_DOMAIN_ID;
+ else if (!strncmp(domain, "cdsp", 4))
+ return CDSP_DOMAIN_ID;
+ else if (!strncmp(domain, "mdsp", 4))
+ return MDSP_DOMAIN_ID;
+ else if (!strncmp(domain, "sdsp", 4))
+ return SDSP_DOMAIN_ID;
+ else if (!strncmp(domain, "gdsp", 4))
+ return GDSP_DOMAIN_ID;
+
+ return -EINVAL;
+}
+
static int fastrpc_rpmsg_probe(struct rpmsg_device *rpdev)
{
struct device *rdev = &rpdev->dev;
@@ -2270,15 +2271,10 @@ static int fastrpc_rpmsg_probe(struct rpmsg_device *rpdev)
return err;
}
- for (i = 0; i < FASTRPC_DEV_MAX; i++) {
- if (!strcmp(domains[i], domain)) {
- domain_id = i;
- break;
- }
- }
+ domain_id = fastrpc_get_domain_id(domain);
if (domain_id < 0) {
- dev_info(rdev, "FastRPC Invalid Domain ID %d\n", domain_id);
+ dev_info(rdev, "FastRPC Domain %s not supported\n", domain);
return -EINVAL;
}
@@ -2325,21 +2321,21 @@ static int fastrpc_rpmsg_probe(struct rpmsg_device *rpdev)
case ADSP_DOMAIN_ID:
case MDSP_DOMAIN_ID:
case SDSP_DOMAIN_ID:
- /* Unsigned PD offloading is only supported on CDSP and CDSP1 */
+ /* Unsigned PD offloading is only supported on CDSP and GDSP */
data->unsigned_support = false;
- err = fastrpc_device_register(rdev, data, secure_dsp, domains[domain_id]);
+ err = fastrpc_device_register(rdev, data, secure_dsp, domain);
if (err)
goto err_free_data;
break;
case CDSP_DOMAIN_ID:
- case CDSP1_DOMAIN_ID:
+ case GDSP_DOMAIN_ID:
data->unsigned_support = true;
/* Create both device nodes so that we can allow both Signed and Unsigned PD */
- err = fastrpc_device_register(rdev, data, true, domains[domain_id]);
+ err = fastrpc_device_register(rdev, data, true, domain);
if (err)
goto err_free_data;
- err = fastrpc_device_register(rdev, data, false, domains[domain_id]);
+ err = fastrpc_device_register(rdev, data, false, domain);
if (err)
goto err_deregister_fdev;
break;
diff --git a/drivers/misc/genwqe/card_ddcb.c b/drivers/misc/genwqe/card_ddcb.c
index 500b1feaf1f6..fd7d5cd50d39 100644
--- a/drivers/misc/genwqe/card_ddcb.c
+++ b/drivers/misc/genwqe/card_ddcb.c
@@ -923,7 +923,7 @@ int __genwqe_execute_raw_ddcb(struct genwqe_dev *cd,
}
if (cmd->asv_length > DDCB_ASV_LENGTH) {
dev_err(&pci_dev->dev, "[%s] err: wrong asv_length of %d\n",
- __func__, cmd->asiv_length);
+ __func__, cmd->asv_length);
return -EINVAL;
}
rc = __genwqe_enqueue_ddcb(cd, req, f_flags);
diff --git a/drivers/misc/hisi_hikey_usb.c b/drivers/misc/hisi_hikey_usb.c
index ffe7b945a298..2c6e448a47f1 100644
--- a/drivers/misc/hisi_hikey_usb.c
+++ b/drivers/misc/hisi_hikey_usb.c
@@ -18,6 +18,7 @@
#include <linux/property.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
+#include <linux/string_choices.h>
#include <linux/usb/role.h>
#define DEVICE_DRIVER_NAME "hisi_hikey_usb"
@@ -67,7 +68,7 @@ static void hub_power_ctrl(struct hisi_hikey_usb *hisi_hikey_usb, int value)
if (ret)
dev_err(hisi_hikey_usb->dev,
"Can't switch regulator state to %s\n",
- value ? "enabled" : "disabled");
+ str_enabled_disabled(value));
}
static void usb_switch_ctrl(struct hisi_hikey_usb *hisi_hikey_usb,
diff --git a/drivers/misc/ibmasm/ibmasmfs.c b/drivers/misc/ibmasm/ibmasmfs.c
index c44de892a61e..2d5c1df82732 100644
--- a/drivers/misc/ibmasm/ibmasmfs.c
+++ b/drivers/misc/ibmasm/ibmasmfs.c
@@ -525,15 +525,9 @@ static ssize_t remote_settings_file_write(struct file *file, const char __user *
if (*offset != 0)
return 0;
- buff = kzalloc (count + 1, GFP_KERNEL);
- if (!buff)
- return -ENOMEM;
-
-
- if (copy_from_user(buff, ubuff, count)) {
- kfree(buff);
- return -EFAULT;
- }
+ buff = memdup_user_nul(ubuff, count);
+ if (IS_ERR(buff))
+ return PTR_ERR(buff);
value = simple_strtoul(buff, NULL, 10);
writel(value, address);
diff --git a/drivers/misc/lis3lv02d/Kconfig b/drivers/misc/lis3lv02d/Kconfig
index 56005243a230..9d546a42a563 100644
--- a/drivers/misc/lis3lv02d/Kconfig
+++ b/drivers/misc/lis3lv02d/Kconfig
@@ -4,7 +4,7 @@
#
config SENSORS_LIS3_SPI
- tristate "STMicroeletronics LIS3LV02Dx three-axis digital accelerometer (SPI)"
+ tristate "STMicroelectronics LIS3LV02Dx three-axis digital accelerometer (SPI)"
depends on !ACPI && SPI_MASTER && INPUT
select SENSORS_LIS3LV02D
help
@@ -20,7 +20,7 @@ config SENSORS_LIS3_SPI
is called lis3lv02d_spi.
config SENSORS_LIS3_I2C
- tristate "STMicroeletronics LIS3LV02Dx three-axis digital accelerometer (I2C)"
+ tristate "STMicroelectronics LIS3LV02Dx three-axis digital accelerometer (I2C)"
depends on I2C && INPUT
select SENSORS_LIS3LV02D
help
diff --git a/drivers/misc/mei/bus-fixup.c b/drivers/misc/mei/bus-fixup.c
index 90dba20b2de7..e6a1d3534663 100644
--- a/drivers/misc/mei/bus-fixup.c
+++ b/drivers/misc/mei/bus-fixup.c
@@ -386,7 +386,7 @@ static int mei_nfc_if_version(struct mei_cl *cl,
ret = __mei_cl_send(cl, (u8 *)&cmd, sizeof(cmd), 0,
MEI_CL_IO_TX_BLOCKING);
if (ret < 0) {
- dev_err(bus->dev, "Could not send IF version cmd ret = %d\n", ret);
+ dev_err(&bus->dev, "Could not send IF version cmd ret = %d\n", ret);
return ret;
}
@@ -401,14 +401,14 @@ static int mei_nfc_if_version(struct mei_cl *cl,
bytes_recv = __mei_cl_recv(cl, (u8 *)reply, if_version_length, &vtag,
0, 0);
if (bytes_recv < 0 || (size_t)bytes_recv < if_version_length) {
- dev_err(bus->dev, "Could not read IF version ret = %d\n", bytes_recv);
+ dev_err(&bus->dev, "Could not read IF version ret = %d\n", bytes_recv);
ret = -EIO;
goto err;
}
memcpy(ver, reply->data, sizeof(*ver));
- dev_info(bus->dev, "NFC MEI VERSION: IVN 0x%x Vendor ID 0x%x Type 0x%x\n",
+ dev_info(&bus->dev, "NFC MEI VERSION: IVN 0x%x Vendor ID 0x%x Type 0x%x\n",
ver->fw_ivn, ver->vendor_id, ver->radio_type);
err:
diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c
index 5cc3ad07d5be..38735a41f750 100644
--- a/drivers/misc/mei/bus.c
+++ b/drivers/misc/mei/bus.c
@@ -637,7 +637,7 @@ EXPORT_SYMBOL_GPL(mei_cldev_enabled);
*/
static bool mei_cl_bus_module_get(struct mei_cl_device *cldev)
{
- return try_module_get(cldev->bus->dev->driver->owner);
+ return try_module_get(cldev->bus->parent->driver->owner);
}
/**
@@ -647,7 +647,7 @@ static bool mei_cl_bus_module_get(struct mei_cl_device *cldev)
*/
static void mei_cl_bus_module_put(struct mei_cl_device *cldev)
{
- module_put(cldev->bus->dev->driver->owner);
+ module_put(cldev->bus->parent->driver->owner);
}
/**
@@ -814,7 +814,7 @@ int mei_cldev_enable(struct mei_cl_device *cldev)
ret = mei_cl_connect(cl, cldev->me_cl, NULL);
if (ret < 0) {
- dev_err(&cldev->dev, "cannot connect\n");
+ dev_dbg(&cldev->dev, "cannot connect\n");
mei_cl_bus_vtag_free(cldev);
}
@@ -1285,16 +1285,20 @@ static const struct bus_type mei_cl_bus_type = {
static struct mei_device *mei_dev_bus_get(struct mei_device *bus)
{
- if (bus)
- get_device(bus->dev);
+ if (bus) {
+ get_device(&bus->dev);
+ get_device(bus->parent);
+ }
return bus;
}
static void mei_dev_bus_put(struct mei_device *bus)
{
- if (bus)
- put_device(bus->dev);
+ if (bus) {
+ put_device(bus->parent);
+ put_device(&bus->dev);
+ }
}
static void mei_cl_bus_dev_release(struct device *dev)
@@ -1328,7 +1332,7 @@ static const struct device_type mei_cl_device_type = {
static inline void mei_cl_bus_set_name(struct mei_cl_device *cldev)
{
dev_set_name(&cldev->dev, "%s-%pUl",
- dev_name(cldev->bus->dev),
+ dev_name(cldev->bus->parent),
mei_me_cl_uuid(cldev->me_cl));
}
@@ -1357,7 +1361,7 @@ static struct mei_cl_device *mei_cl_bus_dev_alloc(struct mei_device *bus,
}
device_initialize(&cldev->dev);
- cldev->dev.parent = bus->dev;
+ cldev->dev.parent = bus->parent;
cldev->dev.bus = &mei_cl_bus_type;
cldev->dev.type = &mei_cl_device_type;
cldev->bus = mei_dev_bus_get(bus);
@@ -1492,7 +1496,7 @@ static void mei_cl_bus_dev_init(struct mei_device *bus,
WARN_ON(!mutex_is_locked(&bus->cl_bus_lock));
- dev_dbg(bus->dev, "initializing %pUl", mei_me_cl_uuid(me_cl));
+ dev_dbg(&bus->dev, "initializing %pUl", mei_me_cl_uuid(me_cl));
if (me_cl->bus_added)
return;
@@ -1543,7 +1547,7 @@ static void mei_cl_bus_rescan(struct mei_device *bus)
}
mutex_unlock(&bus->cl_bus_lock);
- dev_dbg(bus->dev, "rescan end");
+ dev_dbg(&bus->dev, "rescan end");
}
void mei_cl_bus_rescan_work(struct work_struct *work)
diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c
index 3db07d2a881f..159e8b841564 100644
--- a/drivers/misc/mei/client.c
+++ b/drivers/misc/mei/client.c
@@ -262,7 +262,7 @@ void mei_me_cl_rm_by_uuid(struct mei_device *dev, const uuid_le *uuid)
{
struct mei_me_client *me_cl;
- dev_dbg(dev->dev, "remove %pUl\n", uuid);
+ dev_dbg(&dev->dev, "remove %pUl\n", uuid);
down_write(&dev->me_clients_rwsem);
me_cl = __mei_me_cl_by_uuid(dev, uuid);
@@ -635,12 +635,12 @@ int mei_cl_link(struct mei_cl *cl)
id = find_first_zero_bit(dev->host_clients_map, MEI_CLIENTS_MAX);
if (id >= MEI_CLIENTS_MAX) {
- dev_err(dev->dev, "id exceeded %d", MEI_CLIENTS_MAX);
+ dev_err(&dev->dev, "id exceeded %d", MEI_CLIENTS_MAX);
return -EMFILE;
}
if (dev->open_handle_count >= MEI_MAX_OPEN_HANDLE_COUNT) {
- dev_err(dev->dev, "open_handle_count exceeded %d",
+ dev_err(&dev->dev, "open_handle_count exceeded %d",
MEI_MAX_OPEN_HANDLE_COUNT);
return -EMFILE;
}
@@ -709,9 +709,9 @@ void mei_host_client_init(struct mei_device *dev)
schedule_work(&dev->bus_rescan_work);
- pm_runtime_mark_last_busy(dev->dev);
- dev_dbg(dev->dev, "rpm: autosuspend\n");
- pm_request_autosuspend(dev->dev);
+ pm_runtime_mark_last_busy(dev->parent);
+ dev_dbg(&dev->dev, "rpm: autosuspend\n");
+ pm_request_autosuspend(dev->parent);
}
/**
@@ -724,12 +724,12 @@ bool mei_hbuf_acquire(struct mei_device *dev)
{
if (mei_pg_state(dev) == MEI_PG_ON ||
mei_pg_in_transition(dev)) {
- dev_dbg(dev->dev, "device is in pg\n");
+ dev_dbg(&dev->dev, "device is in pg\n");
return false;
}
if (!dev->hbuf_is_ready) {
- dev_dbg(dev->dev, "hbuf is not ready\n");
+ dev_dbg(&dev->dev, "hbuf is not ready\n");
return false;
}
@@ -981,9 +981,9 @@ int mei_cl_disconnect(struct mei_cl *cl)
return 0;
}
- rets = pm_runtime_get(dev->dev);
+ rets = pm_runtime_get(dev->parent);
if (rets < 0 && rets != -EINPROGRESS) {
- pm_runtime_put_noidle(dev->dev);
+ pm_runtime_put_noidle(dev->parent);
cl_err(dev, cl, "rpm: get failed %d\n", rets);
return rets;
}
@@ -991,8 +991,8 @@ int mei_cl_disconnect(struct mei_cl *cl)
rets = __mei_cl_disconnect(cl);
cl_dbg(dev, cl, "rpm: autosuspend\n");
- pm_runtime_mark_last_busy(dev->dev);
- pm_runtime_put_autosuspend(dev->dev);
+ pm_runtime_mark_last_busy(dev->parent);
+ pm_runtime_put_autosuspend(dev->parent);
return rets;
}
@@ -1118,9 +1118,9 @@ int mei_cl_connect(struct mei_cl *cl, struct mei_me_client *me_cl,
goto nortpm;
}
- rets = pm_runtime_get(dev->dev);
+ rets = pm_runtime_get(dev->parent);
if (rets < 0 && rets != -EINPROGRESS) {
- pm_runtime_put_noidle(dev->dev);
+ pm_runtime_put_noidle(dev->parent);
cl_err(dev, cl, "rpm: get failed %d\n", rets);
goto nortpm;
}
@@ -1167,8 +1167,8 @@ int mei_cl_connect(struct mei_cl *cl, struct mei_me_client *me_cl,
rets = cl->status;
out:
cl_dbg(dev, cl, "rpm: autosuspend\n");
- pm_runtime_mark_last_busy(dev->dev);
- pm_runtime_put_autosuspend(dev->dev);
+ pm_runtime_mark_last_busy(dev->parent);
+ pm_runtime_put_autosuspend(dev->parent);
mei_io_cb_free(cb);
@@ -1517,9 +1517,9 @@ int mei_cl_notify_request(struct mei_cl *cl,
if (!mei_cl_is_connected(cl))
return -ENODEV;
- rets = pm_runtime_get(dev->dev);
+ rets = pm_runtime_get(dev->parent);
if (rets < 0 && rets != -EINPROGRESS) {
- pm_runtime_put_noidle(dev->dev);
+ pm_runtime_put_noidle(dev->parent);
cl_err(dev, cl, "rpm: get failed %d\n", rets);
return rets;
}
@@ -1554,8 +1554,8 @@ int mei_cl_notify_request(struct mei_cl *cl,
out:
cl_dbg(dev, cl, "rpm: autosuspend\n");
- pm_runtime_mark_last_busy(dev->dev);
- pm_runtime_put_autosuspend(dev->dev);
+ pm_runtime_mark_last_busy(dev->parent);
+ pm_runtime_put_autosuspend(dev->parent);
mei_io_cb_free(cb);
return rets;
@@ -1683,9 +1683,9 @@ int mei_cl_read_start(struct mei_cl *cl, size_t length, const struct file *fp)
mei_cl_set_read_by_fp(cl, fp);
- rets = pm_runtime_get(dev->dev);
+ rets = pm_runtime_get(dev->parent);
if (rets < 0 && rets != -EINPROGRESS) {
- pm_runtime_put_noidle(dev->dev);
+ pm_runtime_put_noidle(dev->parent);
cl_err(dev, cl, "rpm: get failed %d\n", rets);
goto nortpm;
}
@@ -1702,8 +1702,8 @@ int mei_cl_read_start(struct mei_cl *cl, size_t length, const struct file *fp)
out:
cl_dbg(dev, cl, "rpm: autosuspend\n");
- pm_runtime_mark_last_busy(dev->dev);
- pm_runtime_put_autosuspend(dev->dev);
+ pm_runtime_mark_last_busy(dev->parent);
+ pm_runtime_put_autosuspend(dev->parent);
nortpm:
if (rets)
mei_io_cb_free(cb);
@@ -1972,9 +1972,9 @@ ssize_t mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, unsigned long time
blocking = cb->blocking;
data = buf->data;
- rets = pm_runtime_get(dev->dev);
+ rets = pm_runtime_get(dev->parent);
if (rets < 0 && rets != -EINPROGRESS) {
- pm_runtime_put_noidle(dev->dev);
+ pm_runtime_put_noidle(dev->parent);
cl_err(dev, cl, "rpm: get failed %zd\n", rets);
goto free;
}
@@ -2092,8 +2092,8 @@ out:
rets = buf_len;
err:
cl_dbg(dev, cl, "rpm: autosuspend\n");
- pm_runtime_mark_last_busy(dev->dev);
- pm_runtime_put_autosuspend(dev->dev);
+ pm_runtime_mark_last_busy(dev->parent);
+ pm_runtime_put_autosuspend(dev->parent);
free:
mei_io_cb_free(cb);
@@ -2119,8 +2119,8 @@ void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb)
if (waitqueue_active(&cl->tx_wait)) {
wake_up_interruptible(&cl->tx_wait);
} else {
- pm_runtime_mark_last_busy(dev->dev);
- pm_request_autosuspend(dev->dev);
+ pm_runtime_mark_last_busy(dev->parent);
+ pm_request_autosuspend(dev->parent);
}
break;
@@ -2251,7 +2251,7 @@ int mei_cl_irq_dma_unmap(struct mei_cl *cl, struct mei_cl_cb *cb,
static int mei_cl_dma_alloc(struct mei_cl *cl, u8 buf_id, size_t size)
{
- cl->dma.vaddr = dmam_alloc_coherent(cl->dev->dev, size,
+ cl->dma.vaddr = dmam_alloc_coherent(&cl->dev->dev, size,
&cl->dma.daddr, GFP_KERNEL);
if (!cl->dma.vaddr)
return -ENOMEM;
@@ -2265,7 +2265,7 @@ static int mei_cl_dma_alloc(struct mei_cl *cl, u8 buf_id, size_t size)
static void mei_cl_dma_free(struct mei_cl *cl)
{
cl->dma.buffer_id = 0;
- dmam_free_coherent(cl->dev->dev,
+ dmam_free_coherent(&cl->dev->dev,
cl->dma.size, cl->dma.vaddr, cl->dma.daddr);
cl->dma.size = 0;
cl->dma.vaddr = NULL;
@@ -2321,16 +2321,16 @@ int mei_cl_dma_alloc_and_map(struct mei_cl *cl, const struct file *fp,
return -EPROTO;
}
- rets = pm_runtime_get(dev->dev);
+ rets = pm_runtime_get(dev->parent);
if (rets < 0 && rets != -EINPROGRESS) {
- pm_runtime_put_noidle(dev->dev);
+ pm_runtime_put_noidle(dev->parent);
cl_err(dev, cl, "rpm: get failed %d\n", rets);
return rets;
}
rets = mei_cl_dma_alloc(cl, buffer_id, size);
if (rets) {
- pm_runtime_put_noidle(dev->dev);
+ pm_runtime_put_noidle(dev->parent);
return rets;
}
@@ -2366,8 +2366,8 @@ out:
mei_cl_dma_free(cl);
cl_dbg(dev, cl, "rpm: autosuspend\n");
- pm_runtime_mark_last_busy(dev->dev);
- pm_runtime_put_autosuspend(dev->dev);
+ pm_runtime_mark_last_busy(dev->parent);
+ pm_runtime_put_autosuspend(dev->parent);
mei_io_cb_free(cb);
return rets;
@@ -2406,9 +2406,9 @@ int mei_cl_dma_unmap(struct mei_cl *cl, const struct file *fp)
if (!cl->dma_mapped)
return -EPROTO;
- rets = pm_runtime_get(dev->dev);
+ rets = pm_runtime_get(dev->parent);
if (rets < 0 && rets != -EINPROGRESS) {
- pm_runtime_put_noidle(dev->dev);
+ pm_runtime_put_noidle(dev->parent);
cl_err(dev, cl, "rpm: get failed %d\n", rets);
return rets;
}
@@ -2444,8 +2444,8 @@ int mei_cl_dma_unmap(struct mei_cl *cl, const struct file *fp)
mei_cl_dma_free(cl);
out:
cl_dbg(dev, cl, "rpm: autosuspend\n");
- pm_runtime_mark_last_busy(dev->dev);
- pm_runtime_put_autosuspend(dev->dev);
+ pm_runtime_mark_last_busy(dev->parent);
+ pm_runtime_put_autosuspend(dev->parent);
mei_io_cb_free(cb);
return rets;
diff --git a/drivers/misc/mei/client.h b/drivers/misc/mei/client.h
index 01ed26a148c4..031114478bcb 100644
--- a/drivers/misc/mei/client.h
+++ b/drivers/misc/mei/client.h
@@ -275,12 +275,12 @@ int mei_cl_dma_unmap(struct mei_cl *cl, const struct file *fp);
#define MEI_CL_PRM(cl) (cl)->host_client_id, mei_cl_me_id(cl)
#define cl_dbg(dev, cl, format, arg...) \
- dev_dbg((dev)->dev, MEI_CL_FMT format, MEI_CL_PRM(cl), ##arg)
+ dev_dbg(&(dev)->dev, MEI_CL_FMT format, MEI_CL_PRM(cl), ##arg)
#define cl_warn(dev, cl, format, arg...) \
- dev_warn((dev)->dev, MEI_CL_FMT format, MEI_CL_PRM(cl), ##arg)
+ dev_warn(&(dev)->dev, MEI_CL_FMT format, MEI_CL_PRM(cl), ##arg)
#define cl_err(dev, cl, format, arg...) \
- dev_err((dev)->dev, MEI_CL_FMT format, MEI_CL_PRM(cl), ##arg)
+ dev_err(&(dev)->dev, MEI_CL_FMT format, MEI_CL_PRM(cl), ##arg)
#endif /* _MEI_CLIENT_H_ */
diff --git a/drivers/misc/mei/dma-ring.c b/drivers/misc/mei/dma-ring.c
index 651e77ef82bd..6277c4a5b0fd 100644
--- a/drivers/misc/mei/dma-ring.c
+++ b/drivers/misc/mei/dma-ring.c
@@ -30,7 +30,7 @@ static int mei_dmam_dscr_alloc(struct mei_device *dev,
if (dscr->vaddr)
return 0;
- dscr->vaddr = dmam_alloc_coherent(dev->dev, dscr->size, &dscr->daddr,
+ dscr->vaddr = dmam_alloc_coherent(dev->parent, dscr->size, &dscr->daddr,
GFP_KERNEL);
if (!dscr->vaddr)
return -ENOMEM;
@@ -50,7 +50,7 @@ static void mei_dmam_dscr_free(struct mei_device *dev,
if (!dscr->vaddr)
return;
- dmam_free_coherent(dev->dev, dscr->size, dscr->vaddr, dscr->daddr);
+ dmam_free_coherent(dev->parent, dscr->size, dscr->vaddr, dscr->daddr);
dscr->vaddr = NULL;
}
@@ -177,7 +177,7 @@ void mei_dma_ring_read(struct mei_device *dev, unsigned char *buf, u32 len)
if (WARN_ON(!ctrl))
return;
- dev_dbg(dev->dev, "reading from dma %u bytes\n", len);
+ dev_dbg(&dev->dev, "reading from dma %u bytes\n", len);
if (!len)
return;
@@ -254,7 +254,7 @@ void mei_dma_ring_write(struct mei_device *dev, unsigned char *buf, u32 len)
if (WARN_ON(!ctrl))
return;
- dev_dbg(dev->dev, "writing to dma %u bytes\n", len);
+ dev_dbg(&dev->dev, "writing to dma %u bytes\n", len);
hbuf_depth = mei_dma_ring_hbuf_depth(dev);
wr_idx = READ_ONCE(ctrl->hbuf_wr_idx) & (hbuf_depth - 1);
slots = mei_data2slots(len);
diff --git a/drivers/misc/mei/gsc-me.c b/drivers/misc/mei/gsc-me.c
index 5a8c26c3df13..93cba090ea08 100644
--- a/drivers/misc/mei/gsc-me.c
+++ b/drivers/misc/mei/gsc-me.c
@@ -106,11 +106,15 @@ static int mei_gsc_probe(struct auxiliary_device *aux_dev,
}
}
+ ret = mei_register(dev, device);
+ if (ret)
+ goto deinterrupt;
+
pm_runtime_get_noresume(device);
pm_runtime_set_active(device);
pm_runtime_enable(device);
- /* Continue to char device setup in spite of firmware handshake failure.
+ /* Continue in spite of firmware handshake failure.
* In order to provide access to the firmware status registers to the user
* space via sysfs.
*/
@@ -120,18 +124,12 @@ static int mei_gsc_probe(struct auxiliary_device *aux_dev,
pm_runtime_set_autosuspend_delay(device, MEI_GSC_RPM_TIMEOUT);
pm_runtime_use_autosuspend(device);
- ret = mei_register(dev, device);
- if (ret)
- goto register_err;
-
pm_runtime_put_noidle(device);
return 0;
-register_err:
- mei_stop(dev);
+deinterrupt:
if (!mei_me_hw_use_polling(hw))
devm_free_irq(device, hw->irq, dev);
-
err:
dev_err(device, "probe failed: %d\n", ret);
dev_set_drvdata(device, NULL);
@@ -152,13 +150,13 @@ static void mei_gsc_remove(struct auxiliary_device *aux_dev)
if (mei_me_hw_use_polling(hw))
kthread_stop(hw->polling_thread);
- mei_deregister(dev);
-
pm_runtime_disable(&aux_dev->dev);
mei_disable_interrupts(dev);
if (!mei_me_hw_use_polling(hw))
devm_free_irq(&aux_dev->dev, hw->irq, dev);
+
+ mei_deregister(dev);
}
static int __maybe_unused mei_gsc_pm_suspend(struct device *device)
@@ -252,7 +250,7 @@ static int __maybe_unused mei_gsc_pm_runtime_resume(struct device *device)
irq_ret = mei_me_irq_thread_handler(1, dev);
if (irq_ret != IRQ_HANDLED)
- dev_err(dev->dev, "thread handler fail %d\n", irq_ret);
+ dev_err(&dev->dev, "thread handler fail %d\n", irq_ret);
return 0;
}
diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c
index 4fe9a2752d43..ccd9df5d1c7d 100644
--- a/drivers/misc/mei/hbm.c
+++ b/drivers/misc/mei/hbm.c
@@ -239,7 +239,7 @@ int mei_hbm_start_wait(struct mei_device *dev)
if (ret == 0 && (dev->hbm_state <= MEI_HBM_STARTING)) {
dev->hbm_state = MEI_HBM_IDLE;
- dev_err(dev->dev, "waiting for mei start failed\n");
+ dev_err(&dev->dev, "waiting for mei start failed\n");
return -ETIME;
}
return 0;
@@ -271,8 +271,7 @@ int mei_hbm_start_req(struct mei_device *dev)
dev->hbm_state = MEI_HBM_IDLE;
ret = mei_hbm_write_message(dev, &mei_hdr, &req);
if (ret) {
- dev_err(dev->dev, "version message write failed: ret = %d\n",
- ret);
+ dev_err(&dev->dev, "version message write failed: ret = %d\n", ret);
return ret;
}
@@ -312,8 +311,7 @@ static int mei_hbm_dma_setup_req(struct mei_device *dev)
ret = mei_hbm_write_message(dev, &mei_hdr, &req);
if (ret) {
- dev_err(dev->dev, "dma setup request write failed: ret = %d.\n",
- ret);
+ dev_err(&dev->dev, "dma setup request write failed: ret = %d.\n", ret);
return ret;
}
@@ -351,8 +349,7 @@ static int mei_hbm_capabilities_req(struct mei_device *dev)
ret = mei_hbm_write_message(dev, &mei_hdr, &req);
if (ret) {
- dev_err(dev->dev,
- "capabilities request write failed: ret = %d.\n", ret);
+ dev_err(&dev->dev, "capabilities request write failed: ret = %d.\n", ret);
return ret;
}
@@ -386,8 +383,7 @@ static int mei_hbm_enum_clients_req(struct mei_device *dev)
ret = mei_hbm_write_message(dev, &mei_hdr, &req);
if (ret) {
- dev_err(dev->dev, "enumeration request write failed: ret = %d.\n",
- ret);
+ dev_err(&dev->dev, "enumeration request write failed: ret = %d.\n", ret);
return ret;
}
dev->hbm_state = MEI_HBM_ENUM_CLIENTS;
@@ -443,7 +439,7 @@ static int mei_hbm_add_cl_resp(struct mei_device *dev, u8 addr, u8 status)
struct hbm_add_client_response resp;
int ret;
- dev_dbg(dev->dev, "adding client response\n");
+ dev_dbg(&dev->dev, "adding client response\n");
mei_hbm_hdr(&mei_hdr, sizeof(resp));
@@ -454,8 +450,7 @@ static int mei_hbm_add_cl_resp(struct mei_device *dev, u8 addr, u8 status)
ret = mei_hbm_write_message(dev, &mei_hdr, &resp);
if (ret)
- dev_err(dev->dev, "add client response write failed: ret = %d\n",
- ret);
+ dev_err(&dev->dev, "add client response write failed: ret = %d\n", ret);
return ret;
}
@@ -752,7 +747,7 @@ static int mei_hbm_prop_req(struct mei_device *dev, unsigned long start_idx)
ret = mei_hbm_write_message(dev, &mei_hdr, &req);
if (ret) {
- dev_err(dev->dev, "properties request write failed: ret = %d\n",
+ dev_err(&dev->dev, "properties request write failed: ret = %d\n",
ret);
return ret;
}
@@ -788,7 +783,7 @@ int mei_hbm_pg(struct mei_device *dev, u8 pg_cmd)
ret = mei_hbm_write_message(dev, &mei_hdr, &req);
if (ret)
- dev_err(dev->dev, "power gate command write failed.\n");
+ dev_err(&dev->dev, "power gate command write failed.\n");
return ret;
}
EXPORT_SYMBOL_GPL(mei_hbm_pg);
@@ -847,7 +842,7 @@ static int mei_hbm_add_single_tx_flow_ctrl_creds(struct mei_device *dev,
me_cl = mei_me_cl_by_id(dev, fctrl->me_addr);
if (!me_cl) {
- dev_err(dev->dev, "no such me client %d\n", fctrl->me_addr);
+ dev_err(&dev->dev, "no such me client %d\n", fctrl->me_addr);
return -ENOENT;
}
@@ -857,7 +852,7 @@ static int mei_hbm_add_single_tx_flow_ctrl_creds(struct mei_device *dev,
}
me_cl->tx_flow_ctrl_creds++;
- dev_dbg(dev->dev, "recv flow ctrl msg ME %d (single) creds = %d.\n",
+ dev_dbg(&dev->dev, "recv flow ctrl msg ME %d (single) creds = %d.\n",
fctrl->me_addr, me_cl->tx_flow_ctrl_creds);
rets = 0;
@@ -1085,7 +1080,7 @@ static int mei_hbm_pg_enter_res(struct mei_device *dev)
{
if (mei_pg_state(dev) != MEI_PG_OFF ||
dev->pg_event != MEI_PG_EVENT_WAIT) {
- dev_err(dev->dev, "hbm: pg entry response: state mismatch [%s, %d]\n",
+ dev_err(&dev->dev, "hbm: pg entry response: state mismatch [%s, %d]\n",
mei_pg_state_str(mei_pg_state(dev)), dev->pg_event);
return -EPROTO;
}
@@ -1103,7 +1098,7 @@ static int mei_hbm_pg_enter_res(struct mei_device *dev)
*/
void mei_hbm_pg_resume(struct mei_device *dev)
{
- pm_request_resume(dev->dev);
+ pm_request_resume(dev->parent);
}
EXPORT_SYMBOL_GPL(mei_hbm_pg_resume);
@@ -1119,7 +1114,7 @@ static int mei_hbm_pg_exit_res(struct mei_device *dev)
if (mei_pg_state(dev) != MEI_PG_ON ||
(dev->pg_event != MEI_PG_EVENT_WAIT &&
dev->pg_event != MEI_PG_EVENT_IDLE)) {
- dev_err(dev->dev, "hbm: pg exit response: state mismatch [%s, %d]\n",
+ dev_err(&dev->dev, "hbm: pg exit response: state mismatch [%s, %d]\n",
mei_pg_state_str(mei_pg_state(dev)), dev->pg_event);
return -EPROTO;
}
@@ -1276,19 +1271,19 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
* hbm is put to idle during system reset
*/
if (dev->hbm_state == MEI_HBM_IDLE) {
- dev_dbg(dev->dev, "hbm: state is idle ignore spurious messages\n");
+ dev_dbg(&dev->dev, "hbm: state is idle ignore spurious messages\n");
return 0;
}
switch (mei_msg->hbm_cmd) {
case HOST_START_RES_CMD:
- dev_dbg(dev->dev, "hbm: start: response message received.\n");
+ dev_dbg(&dev->dev, "hbm: start: response message received.\n");
dev->init_clients_timer = 0;
version_res = (struct hbm_host_version_response *)mei_msg;
- dev_dbg(dev->dev, "HBM VERSION: DRIVER=%02d:%02d DEVICE=%02d:%02d\n",
+ dev_dbg(&dev->dev, "HBM VERSION: DRIVER=%02d:%02d DEVICE=%02d:%02d\n",
HBM_MAJOR_VERSION, HBM_MINOR_VERSION,
version_res->me_max_version.major_version,
version_res->me_max_version.minor_version);
@@ -1304,11 +1299,11 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
}
if (!mei_hbm_version_is_supported(dev)) {
- dev_warn(dev->dev, "hbm: start: version mismatch - stopping the driver.\n");
+ dev_warn(&dev->dev, "hbm: start: version mismatch - stopping the driver.\n");
dev->hbm_state = MEI_HBM_STOPPED;
if (mei_hbm_stop_req(dev)) {
- dev_err(dev->dev, "hbm: start: failed to send stop request\n");
+ dev_err(&dev->dev, "hbm: start: failed to send stop request\n");
return -EIO;
}
break;
@@ -1320,10 +1315,10 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
dev->hbm_state != MEI_HBM_STARTING) {
if (dev->dev_state == MEI_DEV_POWER_DOWN ||
dev->dev_state == MEI_DEV_POWERING_DOWN) {
- dev_dbg(dev->dev, "hbm: start: on shutdown, ignoring\n");
+ dev_dbg(&dev->dev, "hbm: start: on shutdown, ignoring\n");
return 0;
}
- dev_err(dev->dev, "hbm: start: state mismatch, [%d, %d]\n",
+ dev_err(&dev->dev, "hbm: start: state mismatch, [%d, %d]\n",
dev->dev_state, dev->hbm_state);
return -EPROTO;
}
@@ -1337,7 +1332,7 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
if (dev->hbm_f_dr_supported) {
if (mei_dmam_ring_alloc(dev))
- dev_info(dev->dev, "running w/o dma ring\n");
+ dev_info(&dev->dev, "running w/o dma ring\n");
if (mei_dma_ring_is_allocated(dev)) {
if (mei_hbm_dma_setup_req(dev))
return -EIO;
@@ -1357,7 +1352,7 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
break;
case MEI_HBM_CAPABILITIES_RES_CMD:
- dev_dbg(dev->dev, "hbm: capabilities response: message received.\n");
+ dev_dbg(&dev->dev, "hbm: capabilities response: message received.\n");
dev->init_clients_timer = 0;
@@ -1365,10 +1360,10 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
dev->hbm_state != MEI_HBM_CAP_SETUP) {
if (dev->dev_state == MEI_DEV_POWER_DOWN ||
dev->dev_state == MEI_DEV_POWERING_DOWN) {
- dev_dbg(dev->dev, "hbm: capabilities response: on shutdown, ignoring\n");
+ dev_dbg(&dev->dev, "hbm: capabilities response: on shutdown, ignoring\n");
return 0;
}
- dev_err(dev->dev, "hbm: capabilities response: state mismatch, [%d, %d]\n",
+ dev_err(&dev->dev, "hbm: capabilities response: state mismatch, [%d, %d]\n",
dev->dev_state, dev->hbm_state);
return -EPROTO;
}
@@ -1384,7 +1379,7 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
if (dev->hbm_f_dr_supported) {
if (mei_dmam_ring_alloc(dev))
- dev_info(dev->dev, "running w/o dma ring\n");
+ dev_info(&dev->dev, "running w/o dma ring\n");
if (mei_dma_ring_is_allocated(dev)) {
if (mei_hbm_dma_setup_req(dev))
return -EIO;
@@ -1400,7 +1395,7 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
break;
case MEI_HBM_DMA_SETUP_RES_CMD:
- dev_dbg(dev->dev, "hbm: dma setup response: message received.\n");
+ dev_dbg(&dev->dev, "hbm: dma setup response: message received.\n");
dev->init_clients_timer = 0;
@@ -1408,10 +1403,10 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
dev->hbm_state != MEI_HBM_DR_SETUP) {
if (dev->dev_state == MEI_DEV_POWER_DOWN ||
dev->dev_state == MEI_DEV_POWERING_DOWN) {
- dev_dbg(dev->dev, "hbm: dma setup response: on shutdown, ignoring\n");
+ dev_dbg(&dev->dev, "hbm: dma setup response: on shutdown, ignoring\n");
return 0;
}
- dev_err(dev->dev, "hbm: dma setup response: state mismatch, [%d, %d]\n",
+ dev_err(&dev->dev, "hbm: dma setup response: state mismatch, [%d, %d]\n",
dev->dev_state, dev->hbm_state);
return -EPROTO;
}
@@ -1422,9 +1417,9 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
u8 status = dma_setup_res->status;
if (status == MEI_HBMS_NOT_ALLOWED) {
- dev_dbg(dev->dev, "hbm: dma setup not allowed\n");
+ dev_dbg(&dev->dev, "hbm: dma setup not allowed\n");
} else {
- dev_info(dev->dev, "hbm: dma setup response: failure = %d %s\n",
+ dev_info(&dev->dev, "hbm: dma setup response: failure = %d %s\n",
status,
mei_hbm_status_str(status));
}
@@ -1437,38 +1432,38 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
break;
case CLIENT_CONNECT_RES_CMD:
- dev_dbg(dev->dev, "hbm: client connect response: message received.\n");
+ dev_dbg(&dev->dev, "hbm: client connect response: message received.\n");
mei_hbm_cl_res(dev, cl_cmd, MEI_FOP_CONNECT);
break;
case CLIENT_DISCONNECT_RES_CMD:
- dev_dbg(dev->dev, "hbm: client disconnect response: message received.\n");
+ dev_dbg(&dev->dev, "hbm: client disconnect response: message received.\n");
mei_hbm_cl_res(dev, cl_cmd, MEI_FOP_DISCONNECT);
break;
case MEI_FLOW_CONTROL_CMD:
- dev_dbg(dev->dev, "hbm: client flow control response: message received.\n");
+ dev_dbg(&dev->dev, "hbm: client flow control response: message received.\n");
fctrl = (struct hbm_flow_control *)mei_msg;
mei_hbm_cl_tx_flow_ctrl_creds_res(dev, fctrl);
break;
case MEI_PG_ISOLATION_ENTRY_RES_CMD:
- dev_dbg(dev->dev, "hbm: power gate isolation entry response received\n");
+ dev_dbg(&dev->dev, "hbm: power gate isolation entry response received\n");
ret = mei_hbm_pg_enter_res(dev);
if (ret)
return ret;
break;
case MEI_PG_ISOLATION_EXIT_REQ_CMD:
- dev_dbg(dev->dev, "hbm: power gate isolation exit request received\n");
+ dev_dbg(&dev->dev, "hbm: power gate isolation exit request received\n");
ret = mei_hbm_pg_exit_res(dev);
if (ret)
return ret;
break;
case HOST_CLIENT_PROPERTIES_RES_CMD:
- dev_dbg(dev->dev, "hbm: properties response: message received.\n");
+ dev_dbg(&dev->dev, "hbm: properties response: message received.\n");
dev->init_clients_timer = 0;
@@ -1476,10 +1471,10 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
dev->hbm_state != MEI_HBM_CLIENT_PROPERTIES) {
if (dev->dev_state == MEI_DEV_POWER_DOWN ||
dev->dev_state == MEI_DEV_POWERING_DOWN) {
- dev_dbg(dev->dev, "hbm: properties response: on shutdown, ignoring\n");
+ dev_dbg(&dev->dev, "hbm: properties response: on shutdown, ignoring\n");
return 0;
}
- dev_err(dev->dev, "hbm: properties response: state mismatch, [%d, %d]\n",
+ dev_err(&dev->dev, "hbm: properties response: state mismatch, [%d, %d]\n",
dev->dev_state, dev->hbm_state);
return -EPROTO;
}
@@ -1487,10 +1482,10 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
props_res = (struct hbm_props_response *)mei_msg;
if (props_res->status == MEI_HBMS_CLIENT_NOT_FOUND) {
- dev_dbg(dev->dev, "hbm: properties response: %d CLIENT_NOT_FOUND\n",
+ dev_dbg(&dev->dev, "hbm: properties response: %d CLIENT_NOT_FOUND\n",
props_res->me_addr);
} else if (props_res->status) {
- dev_err(dev->dev, "hbm: properties response: wrong status = %d %s\n",
+ dev_err(&dev->dev, "hbm: properties response: wrong status = %d %s\n",
props_res->status,
mei_hbm_status_str(props_res->status));
return -EPROTO;
@@ -1505,7 +1500,7 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
break;
case HOST_ENUM_RES_CMD:
- dev_dbg(dev->dev, "hbm: enumeration response: message received\n");
+ dev_dbg(&dev->dev, "hbm: enumeration response: message received\n");
dev->init_clients_timer = 0;
@@ -1519,10 +1514,10 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
dev->hbm_state != MEI_HBM_ENUM_CLIENTS) {
if (dev->dev_state == MEI_DEV_POWER_DOWN ||
dev->dev_state == MEI_DEV_POWERING_DOWN) {
- dev_dbg(dev->dev, "hbm: enumeration response: on shutdown, ignoring\n");
+ dev_dbg(&dev->dev, "hbm: enumeration response: on shutdown, ignoring\n");
return 0;
}
- dev_err(dev->dev, "hbm: enumeration response: state mismatch, [%d, %d]\n",
+ dev_err(&dev->dev, "hbm: enumeration response: state mismatch, [%d, %d]\n",
dev->dev_state, dev->hbm_state);
return -EPROTO;
}
@@ -1536,77 +1531,77 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
break;
case HOST_STOP_RES_CMD:
- dev_dbg(dev->dev, "hbm: stop response: message received\n");
+ dev_dbg(&dev->dev, "hbm: stop response: message received\n");
dev->init_clients_timer = 0;
if (dev->hbm_state != MEI_HBM_STOPPED) {
- dev_err(dev->dev, "hbm: stop response: state mismatch, [%d, %d]\n",
+ dev_err(&dev->dev, "hbm: stop response: state mismatch, [%d, %d]\n",
dev->dev_state, dev->hbm_state);
return -EPROTO;
}
mei_set_devstate(dev, MEI_DEV_POWER_DOWN);
- dev_info(dev->dev, "hbm: stop response: resetting.\n");
+ dev_info(&dev->dev, "hbm: stop response: resetting.\n");
/* force the reset */
return -EPROTO;
case CLIENT_DISCONNECT_REQ_CMD:
- dev_dbg(dev->dev, "hbm: disconnect request: message received\n");
+ dev_dbg(&dev->dev, "hbm: disconnect request: message received\n");
disconnect_req = (struct hbm_client_connect_request *)mei_msg;
mei_hbm_fw_disconnect_req(dev, disconnect_req);
break;
case ME_STOP_REQ_CMD:
- dev_dbg(dev->dev, "hbm: stop request: message received\n");
+ dev_dbg(&dev->dev, "hbm: stop request: message received\n");
dev->hbm_state = MEI_HBM_STOPPED;
if (mei_hbm_stop_req(dev)) {
- dev_err(dev->dev, "hbm: stop request: failed to send stop request\n");
+ dev_err(&dev->dev, "hbm: stop request: failed to send stop request\n");
return -EIO;
}
break;
case MEI_HBM_ADD_CLIENT_REQ_CMD:
- dev_dbg(dev->dev, "hbm: add client request received\n");
+ dev_dbg(&dev->dev, "hbm: add client request received\n");
/*
* after the host receives the enum_resp
* message clients may be added or removed
*/
if (dev->hbm_state <= MEI_HBM_ENUM_CLIENTS ||
dev->hbm_state >= MEI_HBM_STOPPED) {
- dev_err(dev->dev, "hbm: add client: state mismatch, [%d, %d]\n",
+ dev_err(&dev->dev, "hbm: add client: state mismatch, [%d, %d]\n",
dev->dev_state, dev->hbm_state);
return -EPROTO;
}
add_cl_req = (struct hbm_add_client_request *)mei_msg;
ret = mei_hbm_fw_add_cl_req(dev, add_cl_req);
if (ret) {
- dev_err(dev->dev, "hbm: add client: failed to send response %d\n",
+ dev_err(&dev->dev, "hbm: add client: failed to send response %d\n",
ret);
return -EIO;
}
- dev_dbg(dev->dev, "hbm: add client request processed\n");
+ dev_dbg(&dev->dev, "hbm: add client request processed\n");
break;
case MEI_HBM_NOTIFY_RES_CMD:
- dev_dbg(dev->dev, "hbm: notify response received\n");
+ dev_dbg(&dev->dev, "hbm: notify response received\n");
mei_hbm_cl_res(dev, cl_cmd, notify_res_to_fop(cl_cmd));
break;
case MEI_HBM_NOTIFICATION_CMD:
- dev_dbg(dev->dev, "hbm: notification\n");
+ dev_dbg(&dev->dev, "hbm: notification\n");
mei_hbm_cl_notify(dev, cl_cmd);
break;
case MEI_HBM_CLIENT_DMA_MAP_RES_CMD:
- dev_dbg(dev->dev, "hbm: client dma map response: message received.\n");
+ dev_dbg(&dev->dev, "hbm: client dma map response: message received.\n");
client_dma_res = (struct hbm_client_dma_response *)mei_msg;
mei_hbm_cl_dma_map_res(dev, client_dma_res);
break;
case MEI_HBM_CLIENT_DMA_UNMAP_RES_CMD:
- dev_dbg(dev->dev, "hbm: client dma unmap response: message received.\n");
+ dev_dbg(&dev->dev, "hbm: client dma unmap response: message received.\n");
client_dma_res = (struct hbm_client_dma_response *)mei_msg;
mei_hbm_cl_dma_unmap_res(dev, client_dma_res);
break;
diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c
index d11a0740b47c..d4612c659784 100644
--- a/drivers/misc/mei/hw-me.c
+++ b/drivers/misc/mei/hw-me.c
@@ -84,7 +84,7 @@ static inline u32 mei_me_mecsr_read(const struct mei_device *dev)
u32 reg;
reg = mei_me_reg_read(to_me_hw(dev), ME_CSR_HA);
- trace_mei_reg_read(dev->dev, "ME_CSR_HA", ME_CSR_HA, reg);
+ trace_mei_reg_read(&dev->dev, "ME_CSR_HA", ME_CSR_HA, reg);
return reg;
}
@@ -101,7 +101,7 @@ static inline u32 mei_hcsr_read(const struct mei_device *dev)
u32 reg;
reg = mei_me_reg_read(to_me_hw(dev), H_CSR);
- trace_mei_reg_read(dev->dev, "H_CSR", H_CSR, reg);
+ trace_mei_reg_read(&dev->dev, "H_CSR", H_CSR, reg);
return reg;
}
@@ -114,7 +114,7 @@ static inline u32 mei_hcsr_read(const struct mei_device *dev)
*/
static inline void mei_hcsr_write(struct mei_device *dev, u32 reg)
{
- trace_mei_reg_write(dev->dev, "H_CSR", H_CSR, reg);
+ trace_mei_reg_write(&dev->dev, "H_CSR", H_CSR, reg);
mei_me_reg_write(to_me_hw(dev), H_CSR, reg);
}
@@ -156,7 +156,7 @@ static inline u32 mei_me_d0i3c_read(const struct mei_device *dev)
u32 reg;
reg = mei_me_reg_read(to_me_hw(dev), H_D0I3C);
- trace_mei_reg_read(dev->dev, "H_D0I3C", H_D0I3C, reg);
+ trace_mei_reg_read(&dev->dev, "H_D0I3C", H_D0I3C, reg);
return reg;
}
@@ -169,7 +169,7 @@ static inline u32 mei_me_d0i3c_read(const struct mei_device *dev)
*/
static inline void mei_me_d0i3c_write(struct mei_device *dev, u32 reg)
{
- trace_mei_reg_write(dev->dev, "H_D0I3C", H_D0I3C, reg);
+ trace_mei_reg_write(&dev->dev, "H_D0I3C", H_D0I3C, reg);
mei_me_reg_write(to_me_hw(dev), H_D0I3C, reg);
}
@@ -189,7 +189,7 @@ static int mei_me_trc_status(struct mei_device *dev, u32 *trc)
return -EOPNOTSUPP;
*trc = mei_me_reg_read(hw, ME_TRC);
- trace_mei_reg_read(dev->dev, "ME_TRC", ME_TRC, *trc);
+ trace_mei_reg_read(&dev->dev, "ME_TRC", ME_TRC, *trc);
return 0;
}
@@ -217,7 +217,7 @@ static int mei_me_fw_status(struct mei_device *dev,
for (i = 0; i < fw_src->count && i < MEI_FW_STATUS_MAX; i++) {
ret = hw->read_fws(dev, fw_src->status[i],
&fw_status->status[i]);
- trace_mei_pci_cfg_read(dev->dev, "PCI_CFG_HFS_X",
+ trace_mei_pci_cfg_read(&dev->dev, "PCI_CFG_HFS_X",
fw_src->status[i],
fw_status->status[i]);
if (ret)
@@ -251,7 +251,7 @@ static int mei_me_hw_config(struct mei_device *dev)
reg = 0;
hw->read_fws(dev, PCI_CFG_HFS_1, &reg);
- trace_mei_pci_cfg_read(dev->dev, "PCI_CFG_HFS_1", PCI_CFG_HFS_1, reg);
+ trace_mei_pci_cfg_read(&dev->dev, "PCI_CFG_HFS_1", PCI_CFG_HFS_1, reg);
hw->d0i3_supported =
((reg & PCI_CFG_HFS_1_D0I3_MSK) == PCI_CFG_HFS_1_D0I3_MSK);
@@ -447,7 +447,7 @@ static void mei_gsc_pxp_check(struct mei_device *dev)
return;
hw->read_fws(dev, PCI_CFG_HFS_5, &fwsts5);
- trace_mei_pci_cfg_read(dev->dev, "PCI_CFG_HFS_5", PCI_CFG_HFS_5, fwsts5);
+ trace_mei_pci_cfg_read(&dev->dev, "PCI_CFG_HFS_5", PCI_CFG_HFS_5, fwsts5);
if ((fwsts5 & GSC_CFG_HFS_5_BOOT_TYPE_MSK) == GSC_CFG_HFS_5_BOOT_TYPE_PXP) {
if (dev->gsc_reset_to_pxp == MEI_DEV_RESET_TO_PXP_DEFAULT)
@@ -460,10 +460,10 @@ static void mei_gsc_pxp_check(struct mei_device *dev)
return;
if ((fwsts5 & GSC_CFG_HFS_5_BOOT_TYPE_MSK) == GSC_CFG_HFS_5_BOOT_TYPE_PXP) {
- dev_dbg(dev->dev, "pxp mode is ready 0x%08x\n", fwsts5);
+ dev_dbg(&dev->dev, "pxp mode is ready 0x%08x\n", fwsts5);
dev->pxp_mode = MEI_DEV_PXP_READY;
} else {
- dev_dbg(dev->dev, "pxp mode is not ready 0x%08x\n", fwsts5);
+ dev_dbg(&dev->dev, "pxp mode is not ready 0x%08x\n", fwsts5);
}
}
@@ -482,7 +482,7 @@ static int mei_me_hw_ready_wait(struct mei_device *dev)
dev->timeouts.hw_ready);
mutex_lock(&dev->device_lock);
if (!dev->recvd_hw_ready) {
- dev_err(dev->dev, "wait hw ready failed\n");
+ dev_err(&dev->dev, "wait hw ready failed\n");
return -ETIME;
}
@@ -494,43 +494,6 @@ static int mei_me_hw_ready_wait(struct mei_device *dev)
}
/**
- * mei_me_check_fw_reset - check for the firmware reset error and exception conditions
- *
- * @dev: mei device
- */
-static void mei_me_check_fw_reset(struct mei_device *dev)
-{
- struct mei_fw_status fw_status;
- char fw_sts_str[MEI_FW_STATUS_STR_SZ] = {0};
- int ret;
- u32 fw_pm_event = 0;
-
- if (!dev->saved_fw_status_flag)
- goto end;
-
- if (dev->gsc_reset_to_pxp == MEI_DEV_RESET_TO_PXP_PERFORMED) {
- ret = mei_fw_status(dev, &fw_status);
- if (!ret) {
- fw_pm_event = fw_status.status[1] & PCI_CFG_HFS_2_PM_EVENT_MASK;
- if (fw_pm_event != PCI_CFG_HFS_2_PM_CMOFF_TO_CMX_ERROR &&
- fw_pm_event != PCI_CFG_HFS_2_PM_CM_RESET_ERROR)
- goto end;
- } else {
- dev_err(dev->dev, "failed to read firmware status: %d\n", ret);
- }
- }
-
- mei_fw_status2str(&dev->saved_fw_status, fw_sts_str, sizeof(fw_sts_str));
- dev_warn(dev->dev, "unexpected reset: fw_pm_event = 0x%x, dev_state = %u fw status = %s\n",
- fw_pm_event, dev->saved_dev_state, fw_sts_str);
-
-end:
- if (dev->gsc_reset_to_pxp == MEI_DEV_RESET_TO_PXP_PERFORMED)
- dev->gsc_reset_to_pxp = MEI_DEV_RESET_TO_PXP_DONE;
- dev->saved_fw_status_flag = false;
-}
-
-/**
* mei_me_hw_start - hw start routine
*
* @dev: mei device
@@ -540,11 +503,12 @@ static int mei_me_hw_start(struct mei_device *dev)
{
int ret = mei_me_hw_ready_wait(dev);
- if (kind_is_gsc(dev) || kind_is_gscfi(dev))
- mei_me_check_fw_reset(dev);
+ if ((kind_is_gsc(dev) || kind_is_gscfi(dev)) &&
+ dev->gsc_reset_to_pxp == MEI_DEV_RESET_TO_PXP_PERFORMED)
+ dev->gsc_reset_to_pxp = MEI_DEV_RESET_TO_PXP_DONE;
if (ret)
return ret;
- dev_dbg(dev->dev, "hw is ready\n");
+ dev_dbg(&dev->dev, "hw is ready\n");
mei_me_host_set_ready(dev);
return ret;
@@ -644,14 +608,14 @@ static int mei_me_hbuf_write(struct mei_device *dev,
return -EINVAL;
if (!data && data_len) {
- dev_err(dev->dev, "wrong parameters null data with data_len = %zu\n", data_len);
+ dev_err(&dev->dev, "wrong parameters null data with data_len = %zu\n", data_len);
return -EINVAL;
}
- dev_dbg(dev->dev, MEI_HDR_FMT, MEI_HDR_PRM((struct mei_msg_hdr *)hdr));
+ dev_dbg(&dev->dev, MEI_HDR_FMT, MEI_HDR_PRM((struct mei_msg_hdr *)hdr));
empty_slots = mei_hbuf_empty_slots(dev);
- dev_dbg(dev->dev, "empty slots = %d.\n", empty_slots);
+ dev_dbg(&dev->dev, "empty slots = %d.\n", empty_slots);
if (empty_slots < 0)
return -EOVERFLOW;
@@ -706,7 +670,7 @@ static int mei_me_count_full_read_slots(struct mei_device *dev)
if (filled_slots > buffer_depth)
return -EOVERFLOW;
- dev_dbg(dev->dev, "filled_slots =%08x\n", filled_slots);
+ dev_dbg(&dev->dev, "filled_slots =%08x\n", filled_slots);
return (int)filled_slots;
}
@@ -748,11 +712,11 @@ static void mei_me_pg_set(struct mei_device *dev)
u32 reg;
reg = mei_me_reg_read(hw, H_HPG_CSR);
- trace_mei_reg_read(dev->dev, "H_HPG_CSR", H_HPG_CSR, reg);
+ trace_mei_reg_read(&dev->dev, "H_HPG_CSR", H_HPG_CSR, reg);
reg |= H_HPG_CSR_PGI;
- trace_mei_reg_write(dev->dev, "H_HPG_CSR", H_HPG_CSR, reg);
+ trace_mei_reg_write(&dev->dev, "H_HPG_CSR", H_HPG_CSR, reg);
mei_me_reg_write(hw, H_HPG_CSR, reg);
}
@@ -767,13 +731,13 @@ static void mei_me_pg_unset(struct mei_device *dev)
u32 reg;
reg = mei_me_reg_read(hw, H_HPG_CSR);
- trace_mei_reg_read(dev->dev, "H_HPG_CSR", H_HPG_CSR, reg);
+ trace_mei_reg_read(&dev->dev, "H_HPG_CSR", H_HPG_CSR, reg);
WARN(!(reg & H_HPG_CSR_PGI), "PGI is not set\n");
reg |= H_HPG_CSR_PGIHEXR;
- trace_mei_reg_write(dev->dev, "H_HPG_CSR", H_HPG_CSR, reg);
+ trace_mei_reg_write(&dev->dev, "H_HPG_CSR", H_HPG_CSR, reg);
mei_me_reg_write(hw, H_HPG_CSR, reg);
}
@@ -905,7 +869,7 @@ static bool mei_me_pg_is_enabled(struct mei_device *dev)
return true;
notsupported:
- dev_dbg(dev->dev, "pg: not supported: d0i3 = %d HGP = %d hbm version %d.%d ?= %d.%d\n",
+ dev_dbg(&dev->dev, "pg: not supported: d0i3 = %d HGP = %d hbm version %d.%d ?= %d.%d\n",
hw->d0i3_supported,
!!(reg & ME_PGIC_HRA),
dev->version.major_version,
@@ -974,7 +938,7 @@ static int mei_me_d0i3_enter_sync(struct mei_device *dev)
reg = mei_me_d0i3c_read(dev);
if (reg & H_D0I3C_I3) {
/* we are in d0i3, nothing to do */
- dev_dbg(dev->dev, "d0i3 set not needed\n");
+ dev_dbg(&dev->dev, "d0i3 set not needed\n");
ret = 0;
goto on;
}
@@ -1003,7 +967,7 @@ static int mei_me_d0i3_enter_sync(struct mei_device *dev)
reg = mei_me_d0i3_set(dev, true);
if (!(reg & H_D0I3C_CIP)) {
- dev_dbg(dev->dev, "d0i3 enter wait not needed\n");
+ dev_dbg(&dev->dev, "d0i3 enter wait not needed\n");
ret = 0;
goto on;
}
@@ -1027,7 +991,7 @@ on:
hw->pg_state = MEI_PG_ON;
out:
dev->pg_event = MEI_PG_EVENT_IDLE;
- dev_dbg(dev->dev, "d0i3 enter ret = %d\n", ret);
+ dev_dbg(&dev->dev, "d0i3 enter ret = %d\n", ret);
return ret;
}
@@ -1049,7 +1013,7 @@ static int mei_me_d0i3_enter(struct mei_device *dev)
reg = mei_me_d0i3c_read(dev);
if (reg & H_D0I3C_I3) {
/* we are in d0i3, nothing to do */
- dev_dbg(dev->dev, "already d0i3 : set not needed\n");
+ dev_dbg(&dev->dev, "already d0i3 : set not needed\n");
goto on;
}
@@ -1057,7 +1021,7 @@ static int mei_me_d0i3_enter(struct mei_device *dev)
on:
hw->pg_state = MEI_PG_ON;
dev->pg_event = MEI_PG_EVENT_IDLE;
- dev_dbg(dev->dev, "d0i3 enter\n");
+ dev_dbg(&dev->dev, "d0i3 enter\n");
return 0;
}
@@ -1079,14 +1043,14 @@ static int mei_me_d0i3_exit_sync(struct mei_device *dev)
reg = mei_me_d0i3c_read(dev);
if (!(reg & H_D0I3C_I3)) {
/* we are not in d0i3, nothing to do */
- dev_dbg(dev->dev, "d0i3 exit not needed\n");
+ dev_dbg(&dev->dev, "d0i3 exit not needed\n");
ret = 0;
goto off;
}
reg = mei_me_d0i3_unset(dev);
if (!(reg & H_D0I3C_CIP)) {
- dev_dbg(dev->dev, "d0i3 exit wait not needed\n");
+ dev_dbg(&dev->dev, "d0i3 exit wait not needed\n");
ret = 0;
goto off;
}
@@ -1111,7 +1075,7 @@ off:
out:
dev->pg_event = MEI_PG_EVENT_IDLE;
- dev_dbg(dev->dev, "d0i3 exit ret = %d\n", ret);
+ dev_dbg(&dev->dev, "d0i3 exit ret = %d\n", ret);
return ret;
}
@@ -1154,7 +1118,7 @@ static void mei_me_d0i3_intr(struct mei_device *dev, u32 intr_source)
* force H_RDY because it could be
* wiped off during PG
*/
- dev_dbg(dev->dev, "d0i3 set host ready\n");
+ dev_dbg(&dev->dev, "d0i3 set host ready\n");
mei_me_host_set_ready(dev);
}
} else {
@@ -1170,7 +1134,7 @@ static void mei_me_d0i3_intr(struct mei_device *dev, u32 intr_source)
* we got here because of HW initiated exit from D0i3.
* Start runtime pm resume sequence to exit low power state.
*/
- dev_dbg(dev->dev, "d0i3 want resume\n");
+ dev_dbg(&dev->dev, "d0i3 want resume\n");
mei_hbm_pg_resume(dev);
}
}
@@ -1250,7 +1214,7 @@ static int mei_me_hw_reset(struct mei_device *dev, bool intr_enable)
}
}
- pm_runtime_set_active(dev->dev);
+ pm_runtime_set_active(dev->parent);
hcsr = mei_hcsr_read(dev);
/* H_RST may be found lit before reset is started,
@@ -1259,7 +1223,7 @@ static int mei_me_hw_reset(struct mei_device *dev, bool intr_enable)
* we need to clean H_RST bit to start a successful reset sequence.
*/
if ((hcsr & H_RST) == H_RST) {
- dev_warn(dev->dev, "H_RST is set = 0x%08X", hcsr);
+ dev_warn(&dev->dev, "H_RST is set = 0x%08X", hcsr);
hcsr &= ~H_RST;
mei_hcsr_set(dev, hcsr);
hcsr = mei_hcsr_read(dev);
@@ -1280,10 +1244,10 @@ static int mei_me_hw_reset(struct mei_device *dev, bool intr_enable)
hcsr = mei_hcsr_read(dev);
if ((hcsr & H_RST) == 0)
- dev_warn(dev->dev, "H_RST is not set = 0x%08X", hcsr);
+ dev_warn(&dev->dev, "H_RST is not set = 0x%08X", hcsr);
if ((hcsr & H_RDY) == H_RDY)
- dev_warn(dev->dev, "H_RDY is not cleared 0x%08X", hcsr);
+ dev_warn(&dev->dev, "H_RDY is not cleared 0x%08X", hcsr);
if (!intr_enable) {
mei_me_hw_reset_release(dev);
@@ -1313,7 +1277,7 @@ irqreturn_t mei_me_irq_quick_handler(int irq, void *dev_id)
if (!me_intr_src(hcsr))
return IRQ_NONE;
- dev_dbg(dev->dev, "interrupt source 0x%08X\n", me_intr_src(hcsr));
+ dev_dbg(&dev->dev, "interrupt source 0x%08X\n", me_intr_src(hcsr));
/* disable interrupts on device */
me_intr_disable(dev, hcsr);
@@ -1339,7 +1303,7 @@ irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)
u32 hcsr;
int rets = 0;
- dev_dbg(dev->dev, "function called after ISR to handle the interrupt processing.\n");
+ dev_dbg(&dev->dev, "function called after ISR to handle the interrupt processing.\n");
/* initialize our complete list */
mutex_lock(&dev->device_lock);
@@ -1351,10 +1315,10 @@ irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)
/* check if ME wants a reset */
if (!mei_hw_is_ready(dev) && dev->dev_state != MEI_DEV_RESETTING) {
if (kind_is_gsc(dev) || kind_is_gscfi(dev)) {
- dev_dbg(dev->dev, "FW not ready: resetting: dev_state = %d\n",
+ dev_dbg(&dev->dev, "FW not ready: resetting: dev_state = %d\n",
dev->dev_state);
} else {
- dev_warn(dev->dev, "FW not ready: resetting: dev_state = %d\n",
+ dev_warn(&dev->dev, "FW not ready: resetting: dev_state = %d\n",
dev->dev_state);
}
if (dev->dev_state == MEI_DEV_POWERING_DOWN ||
@@ -1373,18 +1337,29 @@ irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)
/* check if we need to start the dev */
if (!mei_host_is_ready(dev)) {
if (mei_hw_is_ready(dev)) {
- dev_dbg(dev->dev, "we need to start the dev.\n");
- dev->recvd_hw_ready = true;
- wake_up(&dev->wait_hw_ready);
+ /* synchronized by dev mutex */
+ if (waitqueue_active(&dev->wait_hw_ready)) {
+ dev_dbg(&dev->dev, "we need to start the dev.\n");
+ dev->recvd_hw_ready = true;
+ wake_up(&dev->wait_hw_ready);
+ } else if (dev->dev_state != MEI_DEV_UNINITIALIZED &&
+ dev->dev_state != MEI_DEV_POWERING_DOWN &&
+ dev->dev_state != MEI_DEV_POWER_DOWN) {
+ dev_dbg(&dev->dev, "Force link reset.\n");
+ schedule_work(&dev->reset_work);
+ } else {
+ dev_dbg(&dev->dev, "Ignore this interrupt in state = %d\n",
+ dev->dev_state);
+ }
} else {
- dev_dbg(dev->dev, "Spurious Interrupt\n");
+ dev_dbg(&dev->dev, "Spurious Interrupt\n");
}
goto end;
}
/* check slots available for reading */
slots = mei_count_full_read_slots(dev);
while (slots > 0) {
- dev_dbg(dev->dev, "slots to read = %08x\n", slots);
+ dev_dbg(&dev->dev, "slots to read = %08x\n", slots);
rets = mei_irq_read_handler(dev, &cmpl_list, &slots);
/* There is a race between ME write and interrupt delivery:
* Not all data is always available immediately after the
@@ -1394,7 +1369,7 @@ irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)
break;
if (rets) {
- dev_err(dev->dev, "mei_irq_read_handler ret = %d, state = %d.\n",
+ dev_err(&dev->dev, "mei_irq_read_handler ret = %d, state = %d.\n",
rets, dev->dev_state);
if (dev->dev_state != MEI_DEV_RESETTING &&
dev->dev_state != MEI_DEV_DISABLED &&
@@ -1421,7 +1396,7 @@ irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)
mei_irq_compl_handler(dev, &cmpl_list);
end:
- dev_dbg(dev->dev, "interrupt thread end ret = %d\n", rets);
+ dev_dbg(&dev->dev, "interrupt thread end ret = %d\n", rets);
mei_me_intr_enable(dev);
mutex_unlock(&dev->device_lock);
return IRQ_HANDLED;
@@ -1453,7 +1428,7 @@ int mei_me_polling_thread(void *_dev)
irqreturn_t irq_ret;
long polling_timeout = MEI_POLLING_TIMEOUT_ACTIVE;
- dev_dbg(dev->dev, "kernel thread is running\n");
+ dev_dbg(&dev->dev, "kernel thread is running\n");
while (!kthread_should_stop()) {
struct mei_me_hw *hw = to_me_hw(dev);
u32 hcsr;
@@ -1470,7 +1445,7 @@ int mei_me_polling_thread(void *_dev)
polling_timeout = MEI_POLLING_TIMEOUT_ACTIVE;
irq_ret = mei_me_irq_thread_handler(1, dev);
if (irq_ret != IRQ_HANDLED)
- dev_err(dev->dev, "irq_ret %d\n", irq_ret);
+ dev_err(&dev->dev, "irq_ret %d\n", irq_ret);
} else {
/*
* Increase timeout by MEI_POLLING_TIMEOUT_ACTIVE
@@ -1804,7 +1779,7 @@ struct mei_device *mei_me_dev_init(struct device *parent,
struct mei_me_hw *hw;
int i;
- dev = devm_kzalloc(parent, sizeof(*dev) + sizeof(*hw), GFP_KERNEL);
+ dev = kzalloc(sizeof(*dev) + sizeof(*hw), GFP_KERNEL);
if (!dev)
return NULL;
diff --git a/drivers/misc/mei/hw-txe.c b/drivers/misc/mei/hw-txe.c
index e9476f9ae25d..e4688c391027 100644
--- a/drivers/misc/mei/hw-txe.c
+++ b/drivers/misc/mei/hw-txe.c
@@ -160,7 +160,7 @@ static bool mei_txe_aliveness_set(struct mei_device *dev, u32 req)
struct mei_txe_hw *hw = to_txe_hw(dev);
bool do_req = hw->aliveness != req;
- dev_dbg(dev->dev, "Aliveness current=%d request=%d\n",
+ dev_dbg(&dev->dev, "Aliveness current=%d request=%d\n",
hw->aliveness, req);
if (do_req) {
dev->pg_event = MEI_PG_EVENT_WAIT;
@@ -227,7 +227,7 @@ static int mei_txe_aliveness_poll(struct mei_device *dev, u32 expected)
hw->aliveness = mei_txe_aliveness_get(dev);
if (hw->aliveness == expected) {
dev->pg_event = MEI_PG_EVENT_IDLE;
- dev_dbg(dev->dev, "aliveness settled after %lld usecs\n",
+ dev_dbg(&dev->dev, "aliveness settled after %lld usecs\n",
ktime_to_us(ktime_sub(ktime_get(), start)));
return 0;
}
@@ -235,7 +235,7 @@ static int mei_txe_aliveness_poll(struct mei_device *dev, u32 expected)
} while (ktime_before(ktime_get(), stop));
dev->pg_event = MEI_PG_EVENT_IDLE;
- dev_err(dev->dev, "aliveness timed out\n");
+ dev_err(&dev->dev, "aliveness timed out\n");
return -ETIME;
}
@@ -270,10 +270,10 @@ static int mei_txe_aliveness_wait(struct mei_device *dev, u32 expected)
ret = hw->aliveness == expected ? 0 : -ETIME;
if (ret)
- dev_warn(dev->dev, "aliveness timed out = %ld aliveness = %d event = %d\n",
+ dev_warn(&dev->dev, "aliveness timed out = %ld aliveness = %d event = %d\n",
err, hw->aliveness, dev->pg_event);
else
- dev_dbg(dev->dev, "aliveness settled after = %d msec aliveness = %d event = %d\n",
+ dev_dbg(&dev->dev, "aliveness settled after = %d msec aliveness = %d event = %d\n",
jiffies_to_msecs(timeout - err),
hw->aliveness, dev->pg_event);
@@ -438,7 +438,7 @@ static void mei_txe_intr_enable(struct mei_device *dev)
*/
static void mei_txe_synchronize_irq(struct mei_device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev->dev);
+ struct pci_dev *pdev = to_pci_dev(dev->parent);
synchronize_irq(pdev->irq);
}
@@ -464,7 +464,7 @@ static bool mei_txe_pending_interrupts(struct mei_device *dev)
TXE_INTR_OUT_DB));
if (ret) {
- dev_dbg(dev->dev,
+ dev_dbg(&dev->dev,
"Pending Interrupts InReady=%01d Readiness=%01d, Aliveness=%01d, OutDoor=%01d\n",
!!(hw->intr_cause & TXE_INTR_IN_READY),
!!(hw->intr_cause & TXE_INTR_READINESS),
@@ -612,7 +612,7 @@ static int mei_txe_readiness_wait(struct mei_device *dev)
msecs_to_jiffies(SEC_RESET_WAIT_TIMEOUT));
mutex_lock(&dev->device_lock);
if (!dev->recvd_hw_ready) {
- dev_err(dev->dev, "wait for readiness failed\n");
+ dev_err(&dev->dev, "wait for readiness failed\n");
return -ETIME;
}
@@ -638,7 +638,7 @@ static int mei_txe_fw_status(struct mei_device *dev,
struct mei_fw_status *fw_status)
{
const struct mei_fw_status *fw_src = &mei_txe_fw_sts;
- struct pci_dev *pdev = to_pci_dev(dev->dev);
+ struct pci_dev *pdev = to_pci_dev(dev->parent);
int ret;
int i;
@@ -649,7 +649,7 @@ static int mei_txe_fw_status(struct mei_device *dev,
for (i = 0; i < fw_src->count && i < MEI_FW_STATUS_MAX; i++) {
ret = pci_read_config_dword(pdev, fw_src->status[i],
&fw_status->status[i]);
- trace_mei_pci_cfg_read(dev->dev, "PCI_CFG_HSF_X",
+ trace_mei_pci_cfg_read(&dev->dev, "PCI_CFG_HSF_X",
fw_src->status[i],
fw_status->status[i]);
if (ret)
@@ -677,7 +677,7 @@ static int mei_txe_hw_config(struct mei_device *dev)
hw->aliveness = mei_txe_aliveness_get(dev);
hw->readiness = mei_txe_readiness_get(dev);
- dev_dbg(dev->dev, "aliveness_resp = 0x%08x, readiness = 0x%08x.\n",
+ dev_dbg(&dev->dev, "aliveness_resp = 0x%08x, readiness = 0x%08x.\n",
hw->aliveness, hw->readiness);
return 0;
@@ -708,7 +708,7 @@ static int mei_txe_write(struct mei_device *dev,
if (WARN_ON(!hdr || !data || hdr_len & 0x3))
return -EINVAL;
- dev_dbg(dev->dev, MEI_HDR_FMT, MEI_HDR_PRM((struct mei_msg_hdr *)hdr));
+ dev_dbg(&dev->dev, MEI_HDR_FMT, MEI_HDR_PRM((struct mei_msg_hdr *)hdr));
dw_cnt = mei_data2slots(hdr_len + data_len);
if (dw_cnt > slots)
@@ -724,7 +724,7 @@ static int mei_txe_write(struct mei_device *dev,
char fw_sts_str[MEI_FW_STATUS_STR_SZ];
mei_fw_status_str(dev, fw_sts_str, MEI_FW_STATUS_STR_SZ);
- dev_err(dev->dev, "Input is not ready %s\n", fw_sts_str);
+ dev_err(&dev->dev, "Input is not ready %s\n", fw_sts_str);
return -EAGAIN;
}
@@ -828,13 +828,13 @@ static int mei_txe_read(struct mei_device *dev,
reg_buf = (u32 *)buf;
rem = len & 0x3;
- dev_dbg(dev->dev, "buffer-length = %lu buf[0]0x%08X\n",
+ dev_dbg(&dev->dev, "buffer-length = %lu buf[0]0x%08X\n",
len, mei_txe_out_data_read(dev, 0));
for (i = 0; i < len / MEI_SLOT_SIZE; i++) {
/* skip header: index starts from 1 */
reg = mei_txe_out_data_read(dev, i + 1);
- dev_dbg(dev->dev, "buf[%d] = 0x%08X\n", i, reg);
+ dev_dbg(&dev->dev, "buf[%d] = 0x%08X\n", i, reg);
*reg_buf++ = reg;
}
@@ -879,7 +879,7 @@ static int mei_txe_hw_reset(struct mei_device *dev, bool intr_enable)
*/
if (aliveness_req != hw->aliveness)
if (mei_txe_aliveness_poll(dev, aliveness_req) < 0) {
- dev_err(dev->dev, "wait for aliveness settle failed ... bailing out\n");
+ dev_err(&dev->dev, "wait for aliveness settle failed ... bailing out\n");
return -EIO;
}
@@ -889,7 +889,7 @@ static int mei_txe_hw_reset(struct mei_device *dev, bool intr_enable)
if (aliveness_req) {
mei_txe_aliveness_set(dev, 0);
if (mei_txe_aliveness_poll(dev, 0) < 0) {
- dev_err(dev->dev, "wait for aliveness failed ... bailing out\n");
+ dev_err(&dev->dev, "wait for aliveness failed ... bailing out\n");
return -EIO;
}
}
@@ -921,7 +921,7 @@ static int mei_txe_hw_start(struct mei_device *dev)
ret = mei_txe_readiness_wait(dev);
if (ret < 0) {
- dev_err(dev->dev, "waiting for readiness failed\n");
+ dev_err(&dev->dev, "waiting for readiness failed\n");
return ret;
}
@@ -937,11 +937,11 @@ static int mei_txe_hw_start(struct mei_device *dev)
ret = mei_txe_aliveness_set_sync(dev, 1);
if (ret < 0) {
- dev_err(dev->dev, "wait for aliveness failed ... bailing out\n");
+ dev_err(&dev->dev, "wait for aliveness failed ... bailing out\n");
return ret;
}
- pm_runtime_set_active(dev->dev);
+ pm_runtime_set_active(dev->parent);
/* enable input ready interrupts:
* SEC_IPC_HOST_INT_MASK.IPC_INPUT_READY_INT_MASK
@@ -1049,7 +1049,7 @@ irqreturn_t mei_txe_irq_thread_handler(int irq, void *dev_id)
s32 slots;
int rets = 0;
- dev_dbg(dev->dev, "irq thread: Interrupt Registers HHISR|HISR|SEC=%02X|%04X|%02X\n",
+ dev_dbg(&dev->dev, "irq thread: Interrupt Registers HHISR|HISR|SEC=%02X|%04X|%02X\n",
mei_txe_br_reg_read(hw, HHISR_REG),
mei_txe_br_reg_read(hw, HISR_REG),
mei_txe_sec_reg_read_silent(hw, SEC_IPC_HOST_INT_STATUS_REG));
@@ -1059,7 +1059,7 @@ irqreturn_t mei_txe_irq_thread_handler(int irq, void *dev_id)
mutex_lock(&dev->device_lock);
INIT_LIST_HEAD(&cmpl_list);
- if (pci_dev_msi_enabled(to_pci_dev(dev->dev)))
+ if (pci_dev_msi_enabled(to_pci_dev(dev->parent)))
mei_txe_check_and_ack_intrs(dev, true);
/* show irq events */
@@ -1073,17 +1073,17 @@ irqreturn_t mei_txe_irq_thread_handler(int irq, void *dev_id)
* or TXE driver resetting the HECI interface.
*/
if (test_and_clear_bit(TXE_INTR_READINESS_BIT, &hw->intr_cause)) {
- dev_dbg(dev->dev, "Readiness Interrupt was received...\n");
+ dev_dbg(&dev->dev, "Readiness Interrupt was received...\n");
/* Check if SeC is going through reset */
if (mei_txe_readiness_is_sec_rdy(hw->readiness)) {
- dev_dbg(dev->dev, "we need to start the dev.\n");
+ dev_dbg(&dev->dev, "we need to start the dev.\n");
dev->recvd_hw_ready = true;
} else {
dev->recvd_hw_ready = false;
if (dev->dev_state != MEI_DEV_RESETTING) {
- dev_warn(dev->dev, "FW not ready: resetting.\n");
+ dev_warn(&dev->dev, "FW not ready: resetting.\n");
schedule_work(&dev->reset_work);
goto end;
@@ -1100,7 +1100,7 @@ irqreturn_t mei_txe_irq_thread_handler(int irq, void *dev_id)
if (test_and_clear_bit(TXE_INTR_ALIVENESS_BIT, &hw->intr_cause)) {
/* Clear the interrupt cause */
- dev_dbg(dev->dev,
+ dev_dbg(&dev->dev,
"Aliveness Interrupt: Status: %d\n", hw->aliveness);
dev->pg_event = MEI_PG_EVENT_RECEIVED;
if (waitqueue_active(&hw->wait_aliveness_resp))
@@ -1118,7 +1118,7 @@ irqreturn_t mei_txe_irq_thread_handler(int irq, void *dev_id)
if (rets &&
(dev->dev_state != MEI_DEV_RESETTING &&
dev->dev_state != MEI_DEV_POWER_DOWN)) {
- dev_err(dev->dev,
+ dev_err(&dev->dev,
"mei_irq_read_handler ret = %d.\n", rets);
schedule_work(&dev->reset_work);
@@ -1136,7 +1136,7 @@ irqreturn_t mei_txe_irq_thread_handler(int irq, void *dev_id)
dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
rets = mei_irq_write_handler(dev, &cmpl_list);
if (rets && rets != -EMSGSIZE)
- dev_err(dev->dev, "mei_irq_write_handler ret = %d.\n",
+ dev_err(&dev->dev, "mei_irq_write_handler ret = %d.\n",
rets);
dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
}
@@ -1144,7 +1144,7 @@ irqreturn_t mei_txe_irq_thread_handler(int irq, void *dev_id)
mei_irq_compl_handler(dev, &cmpl_list);
end:
- dev_dbg(dev->dev, "interrupt thread end ret = %d\n", rets);
+ dev_dbg(&dev->dev, "interrupt thread end ret = %d\n", rets);
mutex_unlock(&dev->device_lock);
@@ -1197,7 +1197,7 @@ struct mei_device *mei_txe_dev_init(struct pci_dev *pdev)
struct mei_device *dev;
struct mei_txe_hw *hw;
- dev = devm_kzalloc(&pdev->dev, sizeof(*dev) + sizeof(*hw), GFP_KERNEL);
+ dev = kzalloc(sizeof(*dev) + sizeof(*hw), GFP_KERNEL);
if (!dev)
return NULL;
diff --git a/drivers/misc/mei/hw.h b/drivers/misc/mei/hw.h
index 2e9cf6f4efb6..3771aa09c592 100644
--- a/drivers/misc/mei/hw.h
+++ b/drivers/misc/mei/hw.h
@@ -27,6 +27,8 @@
#define MKHI_RCV_TIMEOUT 500 /* receive timeout in msec */
#define MKHI_RCV_TIMEOUT_SLOW 10000 /* receive timeout in msec, slow FW */
+#define MEI_LINK_RESET_WAIT_TIMEOUT_MSEC 500 /* Max wait timeout for link reset, in msec */
+
/*
* FW page size for DMA allocations
*/
diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c
index 8ef2b1df8ac7..b789c4d9c709 100644
--- a/drivers/misc/mei/init.c
+++ b/drivers/misc/mei/init.c
@@ -89,22 +89,6 @@ void mei_cancel_work(struct mei_device *dev)
}
EXPORT_SYMBOL_GPL(mei_cancel_work);
-static void mei_save_fw_status(struct mei_device *dev)
-{
- struct mei_fw_status fw_status;
- int ret;
-
- ret = mei_fw_status(dev, &fw_status);
- if (ret) {
- dev_err(dev->dev, "failed to read firmware status: %d\n", ret);
- return;
- }
-
- dev->saved_dev_state = dev->dev_state;
- dev->saved_fw_status_flag = true;
- memcpy(&dev->saved_fw_status, &fw_status, sizeof(fw_status));
-}
-
/**
* mei_reset - resets host and fw.
*
@@ -126,11 +110,10 @@ int mei_reset(struct mei_device *dev)
mei_fw_status_str(dev, fw_sts_str, MEI_FW_STATUS_STR_SZ);
if (kind_is_gsc(dev) || kind_is_gscfi(dev)) {
- dev_dbg(dev->dev, "unexpected reset: dev_state = %s fw status = %s\n",
+ dev_dbg(&dev->dev, "unexpected reset: dev_state = %s fw status = %s\n",
mei_dev_state_str(state), fw_sts_str);
- mei_save_fw_status(dev);
} else {
- dev_warn(dev->dev, "unexpected reset: dev_state = %s fw status = %s\n",
+ dev_warn(&dev->dev, "unexpected reset: dev_state = %s fw status = %s\n",
mei_dev_state_str(state), fw_sts_str);
}
}
@@ -150,7 +133,7 @@ int mei_reset(struct mei_device *dev)
dev->reset_count++;
if (dev->reset_count > MEI_MAX_CONSEC_RESET) {
- dev_err(dev->dev, "reset: reached maximal consecutive resets: disabling the device\n");
+ dev_err(&dev->dev, "reset: reached maximal consecutive resets: disabling the device\n");
mei_set_devstate(dev, MEI_DEV_DISABLED);
return -ENODEV;
}
@@ -170,12 +153,12 @@ int mei_reset(struct mei_device *dev)
memset(dev->rd_msg_hdr, 0, sizeof(dev->rd_msg_hdr));
if (ret) {
- dev_err(dev->dev, "hw_reset failed ret = %d\n", ret);
+ dev_err(&dev->dev, "hw_reset failed ret = %d\n", ret);
return ret;
}
if (state == MEI_DEV_POWER_DOWN) {
- dev_dbg(dev->dev, "powering down: end of reset\n");
+ dev_dbg(&dev->dev, "powering down: end of reset\n");
mei_set_devstate(dev, MEI_DEV_DISABLED);
return 0;
}
@@ -185,21 +168,21 @@ int mei_reset(struct mei_device *dev)
char fw_sts_str[MEI_FW_STATUS_STR_SZ];
mei_fw_status_str(dev, fw_sts_str, MEI_FW_STATUS_STR_SZ);
- dev_err(dev->dev, "hw_start failed ret = %d fw status = %s\n", ret, fw_sts_str);
+ dev_err(&dev->dev, "hw_start failed ret = %d fw status = %s\n", ret, fw_sts_str);
return ret;
}
if (dev->dev_state != MEI_DEV_RESETTING) {
- dev_dbg(dev->dev, "wrong state = %d on link start\n", dev->dev_state);
+ dev_dbg(&dev->dev, "wrong state = %d on link start\n", dev->dev_state);
return 0;
}
- dev_dbg(dev->dev, "link is established start sending messages.\n");
+ dev_dbg(&dev->dev, "link is established start sending messages.\n");
mei_set_devstate(dev, MEI_DEV_INIT_CLIENTS);
ret = mei_hbm_start_req(dev);
if (ret) {
- dev_err(dev->dev, "hbm_start failed ret = %d\n", ret);
+ dev_err(&dev->dev, "hbm_start failed ret = %d\n", ret);
mei_set_devstate(dev, MEI_DEV_RESETTING);
return ret;
}
@@ -228,7 +211,7 @@ int mei_start(struct mei_device *dev)
if (ret)
goto err;
- dev_dbg(dev->dev, "reset in start the mei device.\n");
+ dev_dbg(&dev->dev, "reset in start the mei device.\n");
dev->reset_count = 0;
do {
@@ -236,27 +219,27 @@ int mei_start(struct mei_device *dev)
ret = mei_reset(dev);
if (ret == -ENODEV || dev->dev_state == MEI_DEV_DISABLED) {
- dev_err(dev->dev, "reset failed ret = %d", ret);
+ dev_err(&dev->dev, "reset failed ret = %d", ret);
goto err;
}
} while (ret);
if (mei_hbm_start_wait(dev)) {
- dev_err(dev->dev, "HBM haven't started");
+ dev_err(&dev->dev, "HBM haven't started");
goto err;
}
if (!mei_hbm_version_is_supported(dev)) {
- dev_dbg(dev->dev, "MEI start failed.\n");
+ dev_dbg(&dev->dev, "MEI start failed.\n");
goto err;
}
- dev_dbg(dev->dev, "link layer has been established.\n");
+ dev_dbg(&dev->dev, "link layer has been established.\n");
mutex_unlock(&dev->device_lock);
return 0;
err:
- dev_err(dev->dev, "link layer initialization failed.\n");
+ dev_err(&dev->dev, "link layer initialization failed.\n");
mei_set_devstate(dev, MEI_DEV_DISABLED);
mutex_unlock(&dev->device_lock);
return -ENODEV;
@@ -284,7 +267,7 @@ int mei_restart(struct mei_device *dev)
mutex_unlock(&dev->device_lock);
if (err == -ENODEV || dev->dev_state == MEI_DEV_DISABLED) {
- dev_err(dev->dev, "device disabled = %d\n", err);
+ dev_err(&dev->dev, "device disabled = %d\n", err);
return -ENODEV;
}
@@ -313,7 +296,7 @@ static void mei_reset_work(struct work_struct *work)
mutex_unlock(&dev->device_lock);
if (dev->dev_state == MEI_DEV_DISABLED) {
- dev_err(dev->dev, "device disabled = %d\n", ret);
+ dev_err(&dev->dev, "device disabled = %d\n", ret);
return;
}
@@ -324,7 +307,7 @@ static void mei_reset_work(struct work_struct *work)
void mei_stop(struct mei_device *dev)
{
- dev_dbg(dev->dev, "stopping the device.\n");
+ dev_dbg(&dev->dev, "stopping the device.\n");
mutex_lock(&dev->device_lock);
mei_set_devstate(dev, MEI_DEV_POWERING_DOWN);
@@ -365,7 +348,7 @@ bool mei_write_is_idle(struct mei_device *dev)
list_empty(&dev->write_list) &&
list_empty(&dev->write_waiting_list));
- dev_dbg(dev->dev, "write pg: is idle[%d] state=%s ctrl=%01d write=%01d wwait=%01d\n",
+ dev_dbg(&dev->dev, "write pg: is idle[%d] state=%s ctrl=%01d write=%01d wwait=%01d\n",
idle,
mei_dev_state_str(dev->dev_state),
list_empty(&dev->ctrl_wr_list),
@@ -380,12 +363,12 @@ EXPORT_SYMBOL_GPL(mei_write_is_idle);
* mei_device_init - initialize mei_device structure
*
* @dev: the mei device
- * @device: the device structure
+ * @parent: the parent device
* @slow_fw: configure longer timeouts as FW is slow
* @hw_ops: hw operations
*/
void mei_device_init(struct mei_device *dev,
- struct device *device,
+ struct device *parent,
bool slow_fw,
const struct mei_hw_ops *hw_ops)
{
@@ -399,7 +382,8 @@ void mei_device_init(struct mei_device *dev,
init_waitqueue_head(&dev->wait_hw_ready);
init_waitqueue_head(&dev->wait_pg);
init_waitqueue_head(&dev->wait_hbm_start);
- dev->dev_state = MEI_DEV_INITIALIZING;
+ dev->dev_state = MEI_DEV_UNINITIALIZED;
+ init_waitqueue_head(&dev->wait_dev_state);
dev->reset_count = 0;
INIT_LIST_HEAD(&dev->write_list);
@@ -426,7 +410,7 @@ void mei_device_init(struct mei_device *dev,
dev->pg_event = MEI_PG_EVENT_IDLE;
dev->ops = hw_ops;
- dev->dev = device;
+ dev->parent = parent;
dev->timeouts.hw_ready = mei_secs_to_jiffies(MEI_HW_READY_TIMEOUT);
dev->timeouts.connect = MEI_CONNECT_TIMEOUT;
@@ -442,6 +426,6 @@ void mei_device_init(struct mei_device *dev,
dev->timeouts.hbm = mei_secs_to_jiffies(MEI_HBM_TIMEOUT);
dev->timeouts.mkhi_recv = msecs_to_jiffies(MKHI_RCV_TIMEOUT);
}
+ dev->timeouts.link_reset_wait = msecs_to_jiffies(MEI_LINK_RESET_WAIT_TIMEOUT_MSEC);
}
EXPORT_SYMBOL_GPL(mei_device_init);
-
diff --git a/drivers/misc/mei/interrupt.c b/drivers/misc/mei/interrupt.c
index d472f6bbe767..3aa66b6b0d36 100644
--- a/drivers/misc/mei/interrupt.c
+++ b/drivers/misc/mei/interrupt.c
@@ -76,7 +76,7 @@ static void mei_irq_discard_msg(struct mei_device *dev, struct mei_msg_hdr *hdr,
* that length fits into rd_msg_buf
*/
mei_read_slots(dev, dev->rd_msg_buf, discard_len);
- dev_dbg(dev->dev, "discarding message " MEI_HDR_FMT "\n",
+ dev_dbg(&dev->dev, "discarding message " MEI_HDR_FMT "\n",
MEI_HDR_PRM(hdr));
}
@@ -229,8 +229,8 @@ static int mei_cl_irq_read_msg(struct mei_cl *cl,
cl_dbg(dev, cl, "completed read length = %zu\n", cb->buf_idx);
list_move_tail(&cb->list, cmpl_list);
} else {
- pm_runtime_mark_last_busy(dev->dev);
- pm_request_autosuspend(dev->dev);
+ pm_runtime_mark_last_busy(dev->parent);
+ pm_request_autosuspend(dev->parent);
}
return 0;
@@ -310,8 +310,8 @@ static int mei_cl_irq_read(struct mei_cl *cl, struct mei_cl_cb *cb,
return ret;
}
- pm_runtime_mark_last_busy(dev->dev);
- pm_request_autosuspend(dev->dev);
+ pm_runtime_mark_last_busy(dev->parent);
+ pm_request_autosuspend(dev->parent);
list_move_tail(&cb->list, &cl->rd_pending);
@@ -373,21 +373,21 @@ int mei_irq_read_handler(struct mei_device *dev,
dev->rd_msg_hdr[0] = mei_read_hdr(dev);
dev->rd_msg_hdr_count = 1;
(*slots)--;
- dev_dbg(dev->dev, "slots =%08x.\n", *slots);
+ dev_dbg(&dev->dev, "slots =%08x.\n", *slots);
ret = hdr_is_valid(dev->rd_msg_hdr[0]);
if (ret) {
- dev_err(dev->dev, "corrupted message header 0x%08X\n",
+ dev_err(&dev->dev, "corrupted message header 0x%08X\n",
dev->rd_msg_hdr[0]);
goto end;
}
}
mei_hdr = (struct mei_msg_hdr *)dev->rd_msg_hdr;
- dev_dbg(dev->dev, MEI_HDR_FMT, MEI_HDR_PRM(mei_hdr));
+ dev_dbg(&dev->dev, MEI_HDR_FMT, MEI_HDR_PRM(mei_hdr));
if (mei_slots2data(*slots) < mei_hdr->length) {
- dev_err(dev->dev, "less data available than length=%08x.\n",
+ dev_err(&dev->dev, "less data available than length=%08x.\n",
*slots);
/* we can't read the message */
ret = -ENODATA;
@@ -402,18 +402,18 @@ int mei_irq_read_handler(struct mei_device *dev,
dev->rd_msg_hdr[1] = mei_read_hdr(dev);
dev->rd_msg_hdr_count++;
(*slots)--;
- dev_dbg(dev->dev, "extended header is %08x\n", dev->rd_msg_hdr[1]);
+ dev_dbg(&dev->dev, "extended header is %08x\n", dev->rd_msg_hdr[1]);
}
meta_hdr = ((struct mei_ext_meta_hdr *)&dev->rd_msg_hdr[1]);
if (check_add_overflow((u32)sizeof(*meta_hdr),
mei_slots2data(meta_hdr->size),
&hdr_size_ext)) {
- dev_err(dev->dev, "extended message size too big %d\n",
+ dev_err(&dev->dev, "extended message size too big %d\n",
meta_hdr->size);
return -EBADMSG;
}
if (hdr_size_left < hdr_size_ext) {
- dev_err(dev->dev, "corrupted message header len %d\n",
+ dev_err(&dev->dev, "corrupted message header len %d\n",
mei_hdr->length);
return -EBADMSG;
}
@@ -422,7 +422,7 @@ int mei_irq_read_handler(struct mei_device *dev,
ext_hdr_end = meta_hdr->size + 2;
for (i = dev->rd_msg_hdr_count; i < ext_hdr_end; i++) {
dev->rd_msg_hdr[i] = mei_read_hdr(dev);
- dev_dbg(dev->dev, "extended header %d is %08x\n", i,
+ dev_dbg(&dev->dev, "extended header %d is %08x\n", i,
dev->rd_msg_hdr[i]);
dev->rd_msg_hdr_count++;
(*slots)--;
@@ -431,7 +431,7 @@ int mei_irq_read_handler(struct mei_device *dev,
if (mei_hdr->dma_ring) {
if (hdr_size_left != sizeof(dev->rd_msg_hdr[ext_hdr_end])) {
- dev_err(dev->dev, "corrupted message header len %d\n",
+ dev_err(&dev->dev, "corrupted message header len %d\n",
mei_hdr->length);
return -EBADMSG;
}
@@ -446,8 +446,7 @@ int mei_irq_read_handler(struct mei_device *dev,
if (hdr_is_hbm(mei_hdr)) {
ret = mei_hbm_dispatch(dev, mei_hdr);
if (ret) {
- dev_dbg(dev->dev, "mei_hbm_dispatch failed ret = %d\n",
- ret);
+ dev_dbg(&dev->dev, "mei_hbm_dispatch failed ret = %d\n", ret);
goto end;
}
goto reset_slots;
@@ -474,7 +473,7 @@ int mei_irq_read_handler(struct mei_device *dev,
ret = 0;
goto reset_slots;
}
- dev_err(dev->dev, "no destination client found 0x%08X\n", dev->rd_msg_hdr[0]);
+ dev_err(&dev->dev, "no destination client found 0x%08X\n", dev->rd_msg_hdr[0]);
ret = -EBADMSG;
goto end;
@@ -485,7 +484,7 @@ reset_slots:
*slots = mei_count_full_read_slots(dev);
if (*slots == -EOVERFLOW) {
/* overflow - reset */
- dev_err(dev->dev, "resetting due to slots overflow.\n");
+ dev_err(&dev->dev, "resetting due to slots overflow.\n");
/* set the event since message has been read */
ret = -ERANGE;
goto end;
@@ -525,7 +524,7 @@ int mei_irq_write_handler(struct mei_device *dev, struct list_head *cmpl_list)
return -EMSGSIZE;
/* complete all waiting for write CB */
- dev_dbg(dev->dev, "complete all waiting for write cb.\n");
+ dev_dbg(&dev->dev, "complete all waiting for write cb.\n");
list_for_each_entry_safe(cb, next, &dev->write_waiting_list, list) {
cl = cb->cl;
@@ -537,7 +536,7 @@ int mei_irq_write_handler(struct mei_device *dev, struct list_head *cmpl_list)
}
/* complete control write list CB */
- dev_dbg(dev->dev, "complete control write list cb.\n");
+ dev_dbg(&dev->dev, "complete control write list cb.\n");
list_for_each_entry_safe(cb, next, &dev->ctrl_wr_list, list) {
cl = cb->cl;
switch (cb->fop_type) {
@@ -591,7 +590,7 @@ int mei_irq_write_handler(struct mei_device *dev, struct list_head *cmpl_list)
}
/* complete write list CB */
- dev_dbg(dev->dev, "complete write list cb.\n");
+ dev_dbg(&dev->dev, "complete write list cb.\n");
list_for_each_entry_safe(cb, next, &dev->write_list, list) {
cl = cb->cl;
ret = mei_cl_irq_write(cl, cb, cmpl_list);
@@ -656,7 +655,7 @@ void mei_timer(struct work_struct *work)
if (dev->init_clients_timer) {
if (--dev->init_clients_timer == 0) {
- dev_err(dev->dev, "timer: init clients timeout hbm_state = %d.\n",
+ dev_err(&dev->dev, "timer: init clients timeout hbm_state = %d.\n",
dev->hbm_state);
mei_reset(dev);
goto out;
@@ -672,7 +671,7 @@ void mei_timer(struct work_struct *work)
list_for_each_entry(cl, &dev->file_list, link) {
if (cl->timer_count) {
if (--cl->timer_count == 0) {
- dev_err(dev->dev, "timer: connect/disconnect timeout.\n");
+ dev_err(&dev->dev, "timer: connect/disconnect timeout.\n");
mei_connect_timeout(cl);
goto out;
}
diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
index 8a149a15b861..86a73684a373 100644
--- a/drivers/misc/mei/main.c
+++ b/drivers/misc/mei/main.c
@@ -51,12 +51,15 @@ static int mei_open(struct inode *inode, struct file *file)
int err;
- dev = container_of(inode->i_cdev, struct mei_device, cdev);
+ dev = idr_find(&mei_idr, iminor(inode));
+ if (!dev)
+ return -ENODEV;
+ get_device(&dev->dev);
mutex_lock(&dev->device_lock);
if (dev->dev_state != MEI_DEV_ENABLED) {
- dev_dbg(dev->dev, "dev_state != MEI_ENABLED dev_state = %s\n",
+ dev_dbg(&dev->dev, "dev_state != MEI_ENABLED dev_state = %s\n",
mei_dev_state_str(dev->dev_state));
err = -ENODEV;
goto err_unlock;
@@ -77,6 +80,7 @@ static int mei_open(struct inode *inode, struct file *file)
err_unlock:
mutex_unlock(&dev->device_lock);
+ put_device(&dev->dev);
return err;
}
@@ -152,6 +156,7 @@ out:
file->private_data = NULL;
mutex_unlock(&dev->device_lock);
+ put_device(&dev->dev);
return rets;
}
@@ -418,6 +423,7 @@ static int mei_ioctl_connect_client(struct file *file,
cl->state != MEI_FILE_DISCONNECTED)
return -EBUSY;
+retry:
/* find ME client we're trying to connect to */
me_cl = mei_me_cl_by_uuid(dev, in_client_uuid);
if (!me_cl) {
@@ -449,6 +455,28 @@ static int mei_ioctl_connect_client(struct file *file,
rets = mei_cl_connect(cl, me_cl, file);
+ if (rets && cl->status == -EFAULT &&
+ (dev->dev_state == MEI_DEV_RESETTING ||
+ dev->dev_state == MEI_DEV_INIT_CLIENTS)) {
+ /* in link reset, wait for it completion */
+ mutex_unlock(&dev->device_lock);
+ rets = wait_event_interruptible_timeout(dev->wait_dev_state,
+ dev->dev_state == MEI_DEV_ENABLED,
+ dev->timeouts.link_reset_wait);
+ mutex_lock(&dev->device_lock);
+ if (rets < 0) {
+ if (signal_pending(current))
+ rets = -EINTR;
+ goto end;
+ }
+ if (dev->dev_state != MEI_DEV_ENABLED) {
+ rets = -ETIME;
+ goto end;
+ }
+ mei_me_cl_put(me_cl);
+ goto retry;
+ }
+
end:
mei_me_cl_put(me_cl);
return rets;
@@ -477,7 +505,7 @@ static int mei_vt_support_check(struct mei_device *dev, const uuid_le *uuid)
me_cl = mei_me_cl_by_uuid(dev, uuid);
if (!me_cl) {
- dev_dbg(dev->dev, "Cannot connect to FW Client UUID = %pUl\n",
+ dev_dbg(&dev->dev, "Cannot connect to FW Client UUID = %pUl\n",
uuid);
return -ENOTTY;
}
@@ -641,7 +669,7 @@ static long mei_ioctl(struct file *file, unsigned int cmd, unsigned long data)
struct mei_cl *cl = file->private_data;
struct mei_connect_client_data conn;
struct mei_connect_client_data_vtag conn_vtag;
- const uuid_le *cl_uuid;
+ uuid_le cl_uuid;
struct mei_client *props;
u8 vtag;
u32 notify_get, notify_req;
@@ -669,18 +697,18 @@ static long mei_ioctl(struct file *file, unsigned int cmd, unsigned long data)
rets = -EFAULT;
goto out;
}
- cl_uuid = &conn.in_client_uuid;
+ cl_uuid = conn.in_client_uuid;
props = &conn.out_client_properties;
vtag = 0;
- rets = mei_vt_support_check(dev, cl_uuid);
+ rets = mei_vt_support_check(dev, &cl_uuid);
if (rets == -ENOTTY)
goto out;
if (!rets)
- rets = mei_ioctl_connect_vtag(file, cl_uuid, props,
+ rets = mei_ioctl_connect_vtag(file, &cl_uuid, props,
vtag);
else
- rets = mei_ioctl_connect_client(file, cl_uuid, props);
+ rets = mei_ioctl_connect_client(file, &cl_uuid, props);
if (rets)
goto out;
@@ -702,14 +730,14 @@ static long mei_ioctl(struct file *file, unsigned int cmd, unsigned long data)
goto out;
}
- cl_uuid = &conn_vtag.connect.in_client_uuid;
+ cl_uuid = conn_vtag.connect.in_client_uuid;
props = &conn_vtag.out_client_properties;
vtag = conn_vtag.connect.vtag;
- rets = mei_vt_support_check(dev, cl_uuid);
+ rets = mei_vt_support_check(dev, &cl_uuid);
if (rets == -EOPNOTSUPP)
cl_dbg(dev, cl, "FW Client %pUl does not support vtags\n",
- cl_uuid);
+ &cl_uuid);
if (rets)
goto out;
@@ -719,7 +747,7 @@ static long mei_ioctl(struct file *file, unsigned int cmd, unsigned long data)
goto out;
}
- rets = mei_ioctl_connect_vtag(file, cl_uuid, props, vtag);
+ rets = mei_ioctl_connect_vtag(file, &cl_uuid, props, vtag);
if (rets)
goto out;
@@ -1115,7 +1143,12 @@ void mei_set_devstate(struct mei_device *dev, enum mei_dev_state state)
dev->dev_state = state;
- clsdev = class_find_device_by_devt(&mei_class, dev->cdev.dev);
+ wake_up_interruptible_all(&dev->wait_dev_state);
+
+ if (!dev->cdev)
+ return;
+
+ clsdev = class_find_device_by_devt(&mei_class, dev->cdev->dev);
if (clsdev) {
sysfs_notify(&clsdev->kobj, NULL, "dev_state");
put_device(clsdev);
@@ -1191,7 +1224,7 @@ static int mei_minor_get(struct mei_device *dev)
if (ret >= 0)
dev->minor = ret;
else if (ret == -ENOSPC)
- dev_err(dev->dev, "too many mei devices\n");
+ dev_err(&dev->dev, "too many mei devices\n");
mutex_unlock(&mei_minor_lock);
return ret;
@@ -1200,56 +1233,81 @@ static int mei_minor_get(struct mei_device *dev)
/**
* mei_minor_free - mark device minor number as free
*
- * @dev: device pointer
+ * @minor: minor number to free
*/
-static void mei_minor_free(struct mei_device *dev)
+static void mei_minor_free(int minor)
{
mutex_lock(&mei_minor_lock);
- idr_remove(&mei_idr, dev->minor);
+ idr_remove(&mei_idr, minor);
mutex_unlock(&mei_minor_lock);
}
+static void mei_device_release(struct device *dev)
+{
+ kfree(dev_get_drvdata(dev));
+}
+
int mei_register(struct mei_device *dev, struct device *parent)
{
- struct device *clsdev; /* class device */
int ret, devno;
+ int minor;
ret = mei_minor_get(dev);
if (ret < 0)
return ret;
+ minor = dev->minor;
+
/* Fill in the data structures */
devno = MKDEV(MAJOR(mei_devt), dev->minor);
- cdev_init(&dev->cdev, &mei_fops);
- dev->cdev.owner = parent->driver->owner;
+
+ device_initialize(&dev->dev);
+ dev->dev.devt = devno;
+ dev->dev.class = &mei_class;
+ dev->dev.parent = parent;
+ dev->dev.groups = mei_groups;
+ dev->dev.release = mei_device_release;
+ dev_set_drvdata(&dev->dev, dev);
+
+ dev->cdev = cdev_alloc();
+ if (!dev->cdev) {
+ ret = -ENOMEM;
+ goto err;
+ }
+ dev->cdev->ops = &mei_fops;
+ dev->cdev->owner = parent->driver->owner;
+ cdev_set_parent(dev->cdev, &dev->dev.kobj);
/* Add the device */
- ret = cdev_add(&dev->cdev, devno, 1);
+ ret = cdev_add(dev->cdev, devno, 1);
if (ret) {
- dev_err(parent, "unable to add device %d:%d\n",
+ dev_err(parent, "unable to add cdev for device %d:%d\n",
MAJOR(mei_devt), dev->minor);
- goto err_dev_add;
+ goto err_del_cdev;
}
- clsdev = device_create_with_groups(&mei_class, parent, devno,
- dev, mei_groups,
- "mei%d", dev->minor);
+ ret = dev_set_name(&dev->dev, "mei%d", dev->minor);
+ if (ret) {
+ dev_err(parent, "unable to set name to device %d:%d ret = %d\n",
+ MAJOR(mei_devt), dev->minor, ret);
+ goto err_del_cdev;
+ }
- if (IS_ERR(clsdev)) {
- dev_err(parent, "unable to create device %d:%d\n",
- MAJOR(mei_devt), dev->minor);
- ret = PTR_ERR(clsdev);
- goto err_dev_create;
+ ret = device_add(&dev->dev);
+ if (ret) {
+ dev_err(parent, "unable to add device %d:%d ret = %d\n",
+ MAJOR(mei_devt), dev->minor, ret);
+ goto err_del_cdev;
}
- mei_dbgfs_register(dev, dev_name(clsdev));
+ mei_dbgfs_register(dev, dev_name(&dev->dev));
return 0;
-err_dev_create:
- cdev_del(&dev->cdev);
-err_dev_add:
- mei_minor_free(dev);
+err_del_cdev:
+ cdev_del(dev->cdev);
+err:
+ mei_minor_free(minor);
return ret;
}
EXPORT_SYMBOL_GPL(mei_register);
@@ -1257,15 +1315,16 @@ EXPORT_SYMBOL_GPL(mei_register);
void mei_deregister(struct mei_device *dev)
{
int devno;
+ int minor = dev->minor;
- devno = dev->cdev.dev;
- cdev_del(&dev->cdev);
+ devno = dev->cdev->dev;
+ cdev_del(dev->cdev);
mei_dbgfs_deregister(dev);
device_destroy(&mei_class, devno);
- mei_minor_free(dev);
+ mei_minor_free(minor);
}
EXPORT_SYMBOL_GPL(mei_deregister);
diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h
index 37d7fb15cad7..0bf8d552c3ea 100644
--- a/drivers/misc/mei/mei_dev.h
+++ b/drivers/misc/mei/mei_dev.h
@@ -57,7 +57,8 @@ enum file_state {
/* MEI device states */
enum mei_dev_state {
- MEI_DEV_INITIALIZING = 0,
+ MEI_DEV_UNINITIALIZED = 0,
+ MEI_DEV_INITIALIZING,
MEI_DEV_INIT_CLIENTS,
MEI_DEV_ENABLED,
MEI_DEV_RESETTING,
@@ -465,13 +466,15 @@ struct mei_dev_timeouts {
unsigned int d0i3; /* D0i3 set/unset max response time, in jiffies */
unsigned long hbm; /* HBM operation timeout, in jiffies */
unsigned long mkhi_recv; /* receive timeout, in jiffies */
+ unsigned long link_reset_wait; /* link reset wait timeout, in jiffies */
};
/**
* struct mei_device - MEI private device struct
*
- * @dev : device on a bus
- * @cdev : character device
+ * @parent : device on a bus
+ * @dev : device object
+ * @cdev : character device pointer
* @minor : minor number allocated for device
*
* @write_list : write pending list
@@ -494,6 +497,7 @@ struct mei_dev_timeouts {
*
* @reset_count : number of consecutive resets
* @dev_state : device state
+ * @wait_dev_state: wait queue for device state change
* @hbm_state : state of host bus message protocol
* @pxp_mode : PXP device mode
* @init_clients_timer : HBM init handshake timeout
@@ -547,17 +551,15 @@ struct mei_dev_timeouts {
*
* @dbgfs_dir : debugfs mei root directory
*
- * @saved_fw_status : saved firmware status
- * @saved_dev_state : saved device state
- * @saved_fw_status_flag : flag indicating that firmware status was saved
* @gsc_reset_to_pxp : state of reset to the PXP mode
*
* @ops: : hw specific operations
* @hw : hw specific data
*/
struct mei_device {
- struct device *dev;
- struct cdev cdev;
+ struct device *parent;
+ struct device dev;
+ struct cdev *cdev;
int minor;
struct list_head write_list;
@@ -585,6 +587,7 @@ struct mei_device {
*/
unsigned long reset_count;
enum mei_dev_state dev_state;
+ wait_queue_head_t wait_dev_state;
enum mei_hbm_state hbm_state;
enum mei_dev_pxp_mode pxp_mode;
u16 init_clients_timer;
@@ -648,9 +651,6 @@ struct mei_device {
struct dentry *dbgfs_dir;
#endif /* CONFIG_DEBUG_FS */
- struct mei_fw_status saved_fw_status;
- enum mei_dev_state saved_dev_state;
- bool saved_fw_status_flag;
enum mei_dev_reset_to_pxp gsc_reset_to_pxp;
const struct mei_hw_ops *ops;
@@ -703,7 +703,7 @@ static inline u32 mei_slots2data(int slots)
* mei init function prototypes
*/
void mei_device_init(struct mei_device *dev,
- struct device *device,
+ struct device *parent,
bool slow_fw,
const struct mei_hw_ops *hw_ops);
int mei_reset(struct mei_device *dev);
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index 3f9c60b579ae..b108a7c22388 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -143,7 +143,7 @@ static inline void mei_me_unset_pm_domain(struct mei_device *dev) {}
static int mei_me_read_fws(const struct mei_device *dev, int where, u32 *val)
{
- struct pci_dev *pdev = to_pci_dev(dev->dev);
+ struct pci_dev *pdev = to_pci_dev(dev->parent);
return pci_read_config_dword(pdev, where, val);
}
@@ -238,19 +238,19 @@ static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto end;
}
+ err = mei_register(dev, &pdev->dev);
+ if (err)
+ goto release_irq;
+
if (mei_start(dev)) {
dev_err(&pdev->dev, "init hw failure.\n");
err = -ENODEV;
- goto release_irq;
+ goto deregister;
}
pm_runtime_set_autosuspend_delay(&pdev->dev, MEI_ME_RPM_TIMEOUT);
pm_runtime_use_autosuspend(&pdev->dev);
- err = mei_register(dev, &pdev->dev);
- if (err)
- goto stop;
-
pci_set_drvdata(pdev, dev);
/*
@@ -280,8 +280,8 @@ static int mei_me_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
return 0;
-stop:
- mei_stop(dev);
+deregister:
+ mei_deregister(dev);
release_irq:
mei_cancel_work(dev);
mei_disable_interrupts(dev);
@@ -475,7 +475,7 @@ static int mei_me_pm_runtime_resume(struct device *device)
*/
static inline void mei_me_set_pm_domain(struct mei_device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev->dev);
+ struct pci_dev *pdev = to_pci_dev(dev->parent);
if (pdev->dev.bus && pdev->dev.bus->pm) {
dev->pg_domain.ops = *pdev->dev.bus->pm;
@@ -496,7 +496,7 @@ static inline void mei_me_set_pm_domain(struct mei_device *dev)
static inline void mei_me_unset_pm_domain(struct mei_device *dev)
{
/* stop using pm callbacks if any */
- dev_pm_domain_set(dev->dev, NULL);
+ dev_pm_domain_set(dev->parent, NULL);
}
static const struct dev_pm_ops mei_me_pm_ops = {
diff --git a/drivers/misc/mei/pci-txe.c b/drivers/misc/mei/pci-txe.c
index 2a584104ba38..c9eb5c5393e4 100644
--- a/drivers/misc/mei/pci-txe.c
+++ b/drivers/misc/mei/pci-txe.c
@@ -321,7 +321,7 @@ static int mei_txe_pm_runtime_resume(struct device *device)
*/
static inline void mei_txe_set_pm_domain(struct mei_device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev->dev);
+ struct pci_dev *pdev = to_pci_dev(dev->parent);
if (pdev->dev.bus && pdev->dev.bus->pm) {
dev->pg_domain.ops = *pdev->dev.bus->pm;
@@ -342,7 +342,7 @@ static inline void mei_txe_set_pm_domain(struct mei_device *dev)
static inline void mei_txe_unset_pm_domain(struct mei_device *dev)
{
/* stop using pm callbacks if any */
- dev_pm_domain_set(dev->dev, NULL);
+ dev_pm_domain_set(dev->parent, NULL);
}
static const struct dev_pm_ops mei_txe_pm_ops = {
diff --git a/drivers/misc/mei/platform-vsc.c b/drivers/misc/mei/platform-vsc.c
index b2b5a20ae3fa..288e7b72e942 100644
--- a/drivers/misc/mei/platform-vsc.c
+++ b/drivers/misc/mei/platform-vsc.c
@@ -152,7 +152,7 @@ static int mei_vsc_hw_start(struct mei_device *mei_dev)
MEI_VSC_POLL_TIMEOUT_US, true,
hw, &buf, sizeof(buf));
if (ret) {
- dev_err(mei_dev->dev, "wait fw ready failed: %d\n", ret);
+ dev_err(&mei_dev->dev, "wait fw ready failed: %d\n", ret);
return ret;
}
@@ -259,7 +259,7 @@ static int mei_vsc_hw_reset(struct mei_device *mei_dev, bool intr_enable)
if (!intr_enable)
return 0;
- return vsc_tp_init(hw->tp, mei_dev->dev);
+ return vsc_tp_init(hw->tp, mei_dev->parent);
}
static const struct mei_hw_ops mei_vsc_hw_ops = {
@@ -325,7 +325,7 @@ static void mei_vsc_event_cb(void *context)
mei_dev->hbuf_is_ready = mei_hbuf_is_ready(mei_dev);
ret = mei_irq_write_handler(mei_dev, &cmpl_list);
if (ret)
- dev_err(mei_dev->dev, "dispatch write request failed: %d\n", ret);
+ dev_err(&mei_dev->dev, "dispatch write request failed: %d\n", ret);
mei_dev->hbuf_is_ready = mei_hbuf_is_ready(mei_dev);
mei_irq_compl_handler(mei_dev, &cmpl_list);
@@ -343,12 +343,12 @@ static int mei_vsc_probe(struct platform_device *pdev)
if (!tp)
return dev_err_probe(dev, -ENODEV, "no platform data\n");
- mei_dev = devm_kzalloc(dev, size_add(sizeof(*mei_dev), sizeof(*hw)),
- GFP_KERNEL);
+ mei_dev = kzalloc(size_add(sizeof(*mei_dev), sizeof(*hw)), GFP_KERNEL);
if (!mei_dev)
return -ENOMEM;
mei_device_init(mei_dev, dev, false, &mei_vsc_hw_ops);
+
mei_dev->fw_f_fw_ver_supported = 0;
mei_dev->kind = "ivsc";
@@ -360,22 +360,22 @@ static int mei_vsc_probe(struct platform_device *pdev)
vsc_tp_register_event_cb(tp, mei_vsc_event_cb, mei_dev);
+ ret = mei_register(mei_dev, dev);
+ if (ret)
+ goto err_dereg;
+
ret = mei_start(mei_dev);
if (ret) {
dev_err_probe(dev, ret, "init hw failed\n");
goto err_cancel;
}
- ret = mei_register(mei_dev, dev);
- if (ret)
- goto err_stop;
-
- pm_runtime_enable(mei_dev->dev);
+ pm_runtime_enable(mei_dev->parent);
return 0;
-err_stop:
- mei_stop(mei_dev);
+err_dereg:
+ mei_deregister(mei_dev);
err_cancel:
mei_cancel_work(mei_dev);
@@ -392,7 +392,7 @@ static void mei_vsc_remove(struct platform_device *pdev)
struct mei_device *mei_dev = platform_get_drvdata(pdev);
struct mei_vsc_hw *hw = mei_dev_to_vsc_hw(mei_dev);
- pm_runtime_disable(mei_dev->dev);
+ pm_runtime_disable(mei_dev->parent);
mei_stop(mei_dev);
diff --git a/drivers/nvmem/Kconfig b/drivers/nvmem/Kconfig
index edd811444ce5..e0d88d3199c1 100644
--- a/drivers/nvmem/Kconfig
+++ b/drivers/nvmem/Kconfig
@@ -28,6 +28,17 @@ source "drivers/nvmem/layouts/Kconfig"
# Devices
+config NVMEM_AN8855_EFUSE
+ tristate "Airoha AN8855 eFuse support"
+ depends on MFD_AIROHA_AN8855 || COMPILE_TEST
+ help
+ Say y here to enable support for reading eFuses on Airoha AN8855
+ Switch. These are e.g. used to store factory programmed
+ calibration data required for the PHY.
+
+ This driver can also be built as a module. If so, the module will
+ be called nvmem-an8855-efuse.
+
config NVMEM_APPLE_EFUSES
tristate "Apple eFuse support"
depends on ARCH_APPLE || COMPILE_TEST
@@ -240,6 +251,16 @@ config NVMEM_NINTENDO_OTP
This driver can also be built as a module. If so, the module
will be called nvmem-nintendo-otp.
+config NVMEM_S32G_OCOTP
+ tristate "S32G SoC OCOTP support"
+ depends on ARCH_S32
+ help
+ This is a driver for the 'OCOTP' peripheral available on S32G
+ platforms.
+
+ If you say Y here, you will get support for the One Time
+ Programmable memory pages.
+
config NVMEM_QCOM_QFPROM
tristate "QCOM QFPROM Support"
depends on ARCH_QCOM || COMPILE_TEST
diff --git a/drivers/nvmem/Makefile b/drivers/nvmem/Makefile
index 2021d59688db..70a4464dcb1e 100644
--- a/drivers/nvmem/Makefile
+++ b/drivers/nvmem/Makefile
@@ -10,6 +10,8 @@ nvmem_layouts-y := layouts.o
obj-y += layouts/
# Devices
+obj-$(CONFIG_NVMEM_AN8855_EFUSE) += nvmem-an8855-efuse.o
+nvmem-an8855-efuse-y := an8855-efuse.o
obj-$(CONFIG_NVMEM_APPLE_EFUSES) += nvmem-apple-efuses.o
nvmem-apple-efuses-y := apple-efuses.o
obj-$(CONFIG_NVMEM_APPLE_SPMI) += apple_nvmem_spmi.o
@@ -79,6 +81,8 @@ obj-$(CONFIG_NVMEM_SUNPLUS_OCOTP) += nvmem_sunplus_ocotp.o
nvmem_sunplus_ocotp-y := sunplus-ocotp.o
obj-$(CONFIG_NVMEM_SUNXI_SID) += nvmem_sunxi_sid.o
nvmem_sunxi_sid-y := sunxi_sid.o
+obj-$(CONFIG_NVMEM_S32G_OCOTP) += nvmem-s32g-ocotp-nvmem.o
+nvmem-s32g-ocotp-nvmem-y := s32g-ocotp-nvmem.o
obj-$(CONFIG_NVMEM_U_BOOT_ENV) += nvmem_u-boot-env.o
nvmem_u-boot-env-y := u-boot-env.o
obj-$(CONFIG_NVMEM_UNIPHIER_EFUSE) += nvmem-uniphier-efuse.o
diff --git a/drivers/nvmem/an8855-efuse.c b/drivers/nvmem/an8855-efuse.c
new file mode 100644
index 000000000000..d1afde6f623f
--- /dev/null
+++ b/drivers/nvmem/an8855-efuse.c
@@ -0,0 +1,68 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Airoha AN8855 Switch EFUSE Driver
+ */
+
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/nvmem-provider.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+
+#define AN8855_EFUSE_CELL 50
+
+#define AN8855_EFUSE_DATA0 0x1000a500
+#define AN8855_EFUSE_R50O GENMASK(30, 24)
+
+static int an8855_efuse_read(void *context, unsigned int offset,
+ void *val, size_t bytes)
+{
+ struct regmap *regmap = context;
+
+ return regmap_bulk_read(regmap, AN8855_EFUSE_DATA0 + offset,
+ val, bytes / sizeof(u32));
+}
+
+static int an8855_efuse_probe(struct platform_device *pdev)
+{
+ struct nvmem_config an8855_nvmem_config = {
+ .name = "an8855-efuse",
+ .size = AN8855_EFUSE_CELL * sizeof(u32),
+ .stride = sizeof(u32),
+ .word_size = sizeof(u32),
+ .reg_read = an8855_efuse_read,
+ };
+ struct device *dev = &pdev->dev;
+ struct nvmem_device *nvmem;
+ struct regmap *regmap;
+
+ /* Assign NVMEM priv to MFD regmap */
+ regmap = dev_get_regmap(dev->parent, NULL);
+ if (!regmap)
+ return -ENOENT;
+
+ an8855_nvmem_config.priv = regmap;
+ an8855_nvmem_config.dev = dev;
+ nvmem = devm_nvmem_register(dev, &an8855_nvmem_config);
+
+ return PTR_ERR_OR_ZERO(nvmem);
+}
+
+static const struct of_device_id an8855_efuse_of_match[] = {
+ { .compatible = "airoha,an8855-efuse", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, an8855_efuse_of_match);
+
+static struct platform_driver an8855_efuse_driver = {
+ .probe = an8855_efuse_probe,
+ .driver = {
+ .name = "an8855-efuse",
+ .of_match_table = an8855_efuse_of_match,
+ },
+};
+module_platform_driver(an8855_efuse_driver);
+
+MODULE_AUTHOR("Christian Marangi <ansuelsmth@gmail.com>");
+MODULE_DESCRIPTION("Driver for AN8855 Switch EFUSE");
+MODULE_LICENSE("GPL");
diff --git a/drivers/nvmem/s32g-ocotp-nvmem.c b/drivers/nvmem/s32g-ocotp-nvmem.c
new file mode 100644
index 000000000000..119871ab3a94
--- /dev/null
+++ b/drivers/nvmem/s32g-ocotp-nvmem.c
@@ -0,0 +1,100 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright 2023-2025 NXP
+ */
+
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/nvmem-provider.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+struct s32g_ocotp_priv {
+ struct device *dev;
+ void __iomem *base;
+};
+
+static int s32g_ocotp_read(void *context, unsigned int offset,
+ void *val, size_t bytes)
+{
+ struct s32g_ocotp_priv *s32g_data = context;
+ u32 *dst = val;
+
+ while (bytes >= sizeof(u32)) {
+ *dst++ = ioread32(s32g_data->base + offset);
+
+ bytes -= sizeof(u32);
+ offset += sizeof(u32);
+ }
+
+ return 0;
+}
+
+static struct nvmem_keepout s32g_keepouts[] = {
+ { .start = 0, .end = 520 },
+ { .start = 540, .end = 564 },
+ { .start = 596, .end = 664 },
+ { .start = 668, .end = 676 },
+ { .start = 684, .end = 732 },
+ { .start = 744, .end = 864 },
+ { .start = 908, .end = 924 },
+ { .start = 928, .end = 936 },
+ { .start = 948, .end = 964 },
+ { .start = 968, .end = 976 },
+ { .start = 984, .end = 1012 },
+};
+
+static struct nvmem_config s32g_ocotp_nvmem_config = {
+ .name = "s32g-ocotp",
+ .add_legacy_fixed_of_cells = true,
+ .read_only = true,
+ .word_size = 4,
+ .reg_read = s32g_ocotp_read,
+ .keepout = s32g_keepouts,
+ .nkeepout = ARRAY_SIZE(s32g_keepouts),
+};
+
+static const struct of_device_id ocotp_of_match[] = {
+ { .compatible = "nxp,s32g2-ocotp" },
+ { /* sentinel */ }
+};
+
+static int s32g_ocotp_probe(struct platform_device *pdev)
+{
+ struct s32g_ocotp_priv *s32g_data;
+ struct device *dev = &pdev->dev;
+ struct nvmem_device *nvmem;
+ struct resource *res;
+
+ s32g_data = devm_kzalloc(dev, sizeof(*s32g_data), GFP_KERNEL);
+ if (!s32g_data)
+ return -ENOMEM;
+
+ s32g_data->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
+ if (IS_ERR(s32g_data->base))
+ return dev_err_probe(dev, PTR_ERR(s32g_data->base),
+ "Cannot map OCOTP device.\n");
+
+ s32g_data->dev = dev;
+ s32g_ocotp_nvmem_config.dev = dev;
+ s32g_ocotp_nvmem_config.priv = s32g_data;
+ s32g_ocotp_nvmem_config.size = resource_size(res);
+
+ nvmem = devm_nvmem_register(dev, &s32g_ocotp_nvmem_config);
+
+ return PTR_ERR_OR_ZERO(nvmem);
+}
+
+static struct platform_driver s32g_ocotp_driver = {
+ .probe = s32g_ocotp_probe,
+ .driver = {
+ .name = "s32g-ocotp",
+ .of_match_table = ocotp_of_match,
+ },
+};
+module_platform_driver(s32g_ocotp_driver);
+MODULE_AUTHOR("NXP");
+MODULE_DESCRIPTION("S32G OCOTP driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/parisc/eisa_eeprom.c b/drivers/parisc/eisa_eeprom.c
index 443b15422fc1..601cbb22574f 100644
--- a/drivers/parisc/eisa_eeprom.c
+++ b/drivers/parisc/eisa_eeprom.c
@@ -15,8 +15,6 @@
#include <linux/uaccess.h>
#include <asm/eisa_eeprom.h>
-#define EISA_EEPROM_MINOR 241
-
static loff_t eisa_eeprom_llseek(struct file *file, loff_t offset, int origin)
{
return fixed_size_llseek(file, offset, origin, HPEE_MAX_LENGTH);
diff --git a/drivers/peci/controller/peci-npcm.c b/drivers/peci/controller/peci-npcm.c
index c77591ca583d..931868991241 100644
--- a/drivers/peci/controller/peci-npcm.c
+++ b/drivers/peci/controller/peci-npcm.c
@@ -221,7 +221,6 @@ static const struct regmap_config npcm_peci_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
.max_register = NPCM_PECI_MAX_REG,
- .fast_io = true,
};
static const struct peci_controller_ops npcm_ops = {
diff --git a/drivers/pps/kapi.c b/drivers/pps/kapi.c
index 92d1b62ea239..e9389876229e 100644
--- a/drivers/pps/kapi.c
+++ b/drivers/pps/kapi.c
@@ -109,16 +109,13 @@ struct pps_device *pps_register_source(struct pps_source_info *info,
if (err < 0) {
pr_err("%s: unable to create char device\n",
info->name);
- goto kfree_pps;
+ goto pps_register_source_exit;
}
dev_dbg(&pps->dev, "new PPS source %s\n", info->name);
return pps;
-kfree_pps:
- kfree(pps);
-
pps_register_source_exit:
pr_err("%s: unable to register source\n", info->name);
diff --git a/drivers/pps/pps.c b/drivers/pps/pps.c
index 9463232af8d2..c6b8b6478276 100644
--- a/drivers/pps/pps.c
+++ b/drivers/pps/pps.c
@@ -374,6 +374,7 @@ int pps_register_cdev(struct pps_device *pps)
pps->info.name);
err = -EBUSY;
}
+ kfree(pps);
goto out_unlock;
}
pps->id = err;
@@ -383,13 +384,11 @@ int pps_register_cdev(struct pps_device *pps)
pps->dev.devt = MKDEV(pps_major, pps->id);
dev_set_drvdata(&pps->dev, pps);
dev_set_name(&pps->dev, "pps%d", pps->id);
+ pps->dev.release = pps_device_destruct;
err = device_register(&pps->dev);
if (err)
goto free_idr;
- /* Override the release function with our own */
- pps->dev.release = pps_device_destruct;
-
pr_debug("source %s got cdev (%d:%d)\n", pps->info.name, pps_major,
pps->id);
diff --git a/drivers/siox/siox-bus-gpio.c b/drivers/siox/siox-bus-gpio.c
index d6f936464063..413d5f92311c 100644
--- a/drivers/siox/siox-bus-gpio.c
+++ b/drivers/siox/siox-bus-gpio.c
@@ -93,8 +93,7 @@ static int siox_gpio_probe(struct platform_device *pdev)
smaster = devm_siox_master_alloc(dev, sizeof(*ddata));
if (!smaster)
- return dev_err_probe(dev, -ENOMEM,
- "failed to allocate siox master\n");
+ return -ENOMEM;
platform_set_drvdata(pdev, smaster);
ddata = siox_master_get_devdata(smaster);
diff --git a/drivers/slimbus/Kconfig b/drivers/slimbus/Kconfig
index a0fdf9d792cb..60b0dcbc0ebb 100644
--- a/drivers/slimbus/Kconfig
+++ b/drivers/slimbus/Kconfig
@@ -13,13 +13,6 @@ menuconfig SLIMBUS
if SLIMBUS
# SLIMbus controllers
-config SLIM_QCOM_CTRL
- tristate "Qualcomm SLIMbus Manager Component"
- depends on HAS_IOMEM
- help
- Select driver if Qualcomm's SLIMbus Manager Component is
- programmed using Linux kernel.
-
config SLIM_QCOM_NGD_CTRL
tristate "Qualcomm SLIMbus Satellite Non-Generic Device Component"
depends on HAS_IOMEM && DMA_ENGINE && NET
diff --git a/drivers/slimbus/Makefile b/drivers/slimbus/Makefile
index d9aa011b6804..3cfb41c3b592 100644
--- a/drivers/slimbus/Makefile
+++ b/drivers/slimbus/Makefile
@@ -6,8 +6,5 @@ obj-$(CONFIG_SLIMBUS) += slimbus.o
slimbus-y := core.o messaging.o sched.o stream.o
#Controllers
-obj-$(CONFIG_SLIM_QCOM_CTRL) += slim-qcom-ctrl.o
-slim-qcom-ctrl-y := qcom-ctrl.o
-
obj-$(CONFIG_SLIM_QCOM_NGD_CTRL) += slim-qcom-ngd-ctrl.o
slim-qcom-ngd-ctrl-y := qcom-ngd-ctrl.o
diff --git a/drivers/slimbus/messaging.c b/drivers/slimbus/messaging.c
index 6f01d944f9c6..e2dbe4a66b70 100644
--- a/drivers/slimbus/messaging.c
+++ b/drivers/slimbus/messaging.c
@@ -143,8 +143,6 @@ int slim_do_transfer(struct slim_controller *ctrl, struct slim_msg_txn *txn)
if (!txn->msg->comp)
txn->comp = &done;
- else
- txn->comp = txn->comp;
}
ret = ctrl->xfer_msg(ctrl, txn);
@@ -224,7 +222,7 @@ static u16 slim_slicesize(int code)
/**
* slim_xfer_msg() - Transfer a value info message on slim device
*
- * @sbdev: slim device to which this msg has to be transfered
+ * @sbdev: slim device to which this msg has to be transferred
* @msg: value info message pointer
* @mc: message code of the message
*
diff --git a/drivers/slimbus/qcom-ctrl.c b/drivers/slimbus/qcom-ctrl.c
deleted file mode 100644
index ab344f7472f2..000000000000
--- a/drivers/slimbus/qcom-ctrl.c
+++ /dev/null
@@ -1,735 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Copyright (c) 2011-2017, The Linux Foundation
- */
-
-#include <linux/irq.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/io.h>
-#include <linux/interrupt.h>
-#include <linux/platform_device.h>
-#include <linux/delay.h>
-#include <linux/clk.h>
-#include <linux/of.h>
-#include <linux/pm_runtime.h>
-#include "slimbus.h"
-
-/* Manager registers */
-#define MGR_CFG 0x200
-#define MGR_STATUS 0x204
-#define MGR_INT_EN 0x210
-#define MGR_INT_STAT 0x214
-#define MGR_INT_CLR 0x218
-#define MGR_TX_MSG 0x230
-#define MGR_RX_MSG 0x270
-#define MGR_IE_STAT 0x2F0
-#define MGR_VE_STAT 0x300
-#define MGR_CFG_ENABLE 1
-
-/* Framer registers */
-#define FRM_CFG 0x400
-#define FRM_STAT 0x404
-#define FRM_INT_EN 0x410
-#define FRM_INT_STAT 0x414
-#define FRM_INT_CLR 0x418
-#define FRM_WAKEUP 0x41C
-#define FRM_CLKCTL_DONE 0x420
-#define FRM_IE_STAT 0x430
-#define FRM_VE_STAT 0x440
-
-/* Interface registers */
-#define INTF_CFG 0x600
-#define INTF_STAT 0x604
-#define INTF_INT_EN 0x610
-#define INTF_INT_STAT 0x614
-#define INTF_INT_CLR 0x618
-#define INTF_IE_STAT 0x630
-#define INTF_VE_STAT 0x640
-
-/* Interrupt status bits */
-#define MGR_INT_TX_NACKED_2 BIT(25)
-#define MGR_INT_MSG_BUF_CONTE BIT(26)
-#define MGR_INT_RX_MSG_RCVD BIT(30)
-#define MGR_INT_TX_MSG_SENT BIT(31)
-
-/* Framer config register settings */
-#define FRM_ACTIVE 1
-#define CLK_GEAR 7
-#define ROOT_FREQ 11
-#define REF_CLK_GEAR 15
-#define INTR_WAKE 19
-
-#define SLIM_MSG_ASM_FIRST_WORD(l, mt, mc, dt, ad) \
- ((l) | ((mt) << 5) | ((mc) << 8) | ((dt) << 15) | ((ad) << 16))
-
-#define SLIM_ROOT_FREQ 24576000
-#define QCOM_SLIM_AUTOSUSPEND 1000
-
-/* MAX message size over control channel */
-#define SLIM_MSGQ_BUF_LEN 40
-#define QCOM_TX_MSGS 2
-#define QCOM_RX_MSGS 8
-#define QCOM_BUF_ALLOC_RETRIES 10
-
-#define CFG_PORT(r, v) ((v) ? CFG_PORT_V2(r) : CFG_PORT_V1(r))
-
-/* V2 Component registers */
-#define CFG_PORT_V2(r) ((r ## _V2))
-#define COMP_CFG_V2 4
-#define COMP_TRUST_CFG_V2 0x3000
-
-/* V1 Component registers */
-#define CFG_PORT_V1(r) ((r ## _V1))
-#define COMP_CFG_V1 0
-#define COMP_TRUST_CFG_V1 0x14
-
-/* Resource group info for manager, and non-ported generic device-components */
-#define EE_MGR_RSC_GRP (1 << 10)
-#define EE_NGD_2 (2 << 6)
-#define EE_NGD_1 0
-
-struct slim_ctrl_buf {
- void *base;
- spinlock_t lock;
- int head;
- int tail;
- int sl_sz;
- int n;
-};
-
-struct qcom_slim_ctrl {
- struct slim_controller ctrl;
- struct slim_framer framer;
- struct device *dev;
- void __iomem *base;
- void __iomem *slew_reg;
-
- struct slim_ctrl_buf rx;
- struct slim_ctrl_buf tx;
-
- struct completion **wr_comp;
- int irq;
- struct workqueue_struct *rxwq;
- struct work_struct wd;
- struct clk *rclk;
- struct clk *hclk;
-};
-
-static void qcom_slim_queue_tx(struct qcom_slim_ctrl *ctrl, void *buf,
- u8 len, u32 tx_reg)
-{
- int count = (len + 3) >> 2;
-
- __iowrite32_copy(ctrl->base + tx_reg, buf, count);
-
- /* Ensure Oder of subsequent writes */
- mb();
-}
-
-static void *slim_alloc_rxbuf(struct qcom_slim_ctrl *ctrl)
-{
- unsigned long flags;
- int idx;
-
- spin_lock_irqsave(&ctrl->rx.lock, flags);
- if ((ctrl->rx.tail + 1) % ctrl->rx.n == ctrl->rx.head) {
- spin_unlock_irqrestore(&ctrl->rx.lock, flags);
- dev_err(ctrl->dev, "RX QUEUE full!");
- return NULL;
- }
- idx = ctrl->rx.tail;
- ctrl->rx.tail = (ctrl->rx.tail + 1) % ctrl->rx.n;
- spin_unlock_irqrestore(&ctrl->rx.lock, flags);
-
- return ctrl->rx.base + (idx * ctrl->rx.sl_sz);
-}
-
-static void slim_ack_txn(struct qcom_slim_ctrl *ctrl, int err)
-{
- struct completion *comp;
- unsigned long flags;
- int idx;
-
- spin_lock_irqsave(&ctrl->tx.lock, flags);
- idx = ctrl->tx.head;
- ctrl->tx.head = (ctrl->tx.head + 1) % ctrl->tx.n;
- spin_unlock_irqrestore(&ctrl->tx.lock, flags);
-
- comp = ctrl->wr_comp[idx];
- ctrl->wr_comp[idx] = NULL;
-
- complete(comp);
-}
-
-static irqreturn_t qcom_slim_handle_tx_irq(struct qcom_slim_ctrl *ctrl,
- u32 stat)
-{
- int err = 0;
-
- if (stat & MGR_INT_TX_MSG_SENT)
- writel_relaxed(MGR_INT_TX_MSG_SENT,
- ctrl->base + MGR_INT_CLR);
-
- if (stat & MGR_INT_TX_NACKED_2) {
- u32 mgr_stat = readl_relaxed(ctrl->base + MGR_STATUS);
- u32 mgr_ie_stat = readl_relaxed(ctrl->base + MGR_IE_STAT);
- u32 frm_stat = readl_relaxed(ctrl->base + FRM_STAT);
- u32 frm_cfg = readl_relaxed(ctrl->base + FRM_CFG);
- u32 frm_intr_stat = readl_relaxed(ctrl->base + FRM_INT_STAT);
- u32 frm_ie_stat = readl_relaxed(ctrl->base + FRM_IE_STAT);
- u32 intf_stat = readl_relaxed(ctrl->base + INTF_STAT);
- u32 intf_intr_stat = readl_relaxed(ctrl->base + INTF_INT_STAT);
- u32 intf_ie_stat = readl_relaxed(ctrl->base + INTF_IE_STAT);
-
- writel_relaxed(MGR_INT_TX_NACKED_2, ctrl->base + MGR_INT_CLR);
-
- dev_err(ctrl->dev, "TX Nack MGR:int:0x%x, stat:0x%x\n",
- stat, mgr_stat);
- dev_err(ctrl->dev, "TX Nack MGR:ie:0x%x\n", mgr_ie_stat);
- dev_err(ctrl->dev, "TX Nack FRM:int:0x%x, stat:0x%x\n",
- frm_intr_stat, frm_stat);
- dev_err(ctrl->dev, "TX Nack FRM:cfg:0x%x, ie:0x%x\n",
- frm_cfg, frm_ie_stat);
- dev_err(ctrl->dev, "TX Nack INTF:intr:0x%x, stat:0x%x\n",
- intf_intr_stat, intf_stat);
- dev_err(ctrl->dev, "TX Nack INTF:ie:0x%x\n",
- intf_ie_stat);
- err = -ENOTCONN;
- }
-
- slim_ack_txn(ctrl, err);
-
- return IRQ_HANDLED;
-}
-
-static irqreturn_t qcom_slim_handle_rx_irq(struct qcom_slim_ctrl *ctrl,
- u32 stat)
-{
- u32 *rx_buf, pkt[10];
- bool q_rx = false;
- u8 mc, mt, len;
-
- pkt[0] = readl_relaxed(ctrl->base + MGR_RX_MSG);
- mt = SLIM_HEADER_GET_MT(pkt[0]);
- len = SLIM_HEADER_GET_RL(pkt[0]);
- mc = SLIM_HEADER_GET_MC(pkt[0]>>8);
-
- /*
- * this message cannot be handled by ISR, so
- * let work-queue handle it
- */
- if (mt == SLIM_MSG_MT_CORE && mc == SLIM_MSG_MC_REPORT_PRESENT) {
- rx_buf = (u32 *)slim_alloc_rxbuf(ctrl);
- if (!rx_buf) {
- dev_err(ctrl->dev, "dropping RX:0x%x due to RX full\n",
- pkt[0]);
- goto rx_ret_irq;
- }
- rx_buf[0] = pkt[0];
-
- } else {
- rx_buf = pkt;
- }
-
- __ioread32_copy(rx_buf + 1, ctrl->base + MGR_RX_MSG + 4,
- DIV_ROUND_UP(len, 4));
-
- switch (mc) {
-
- case SLIM_MSG_MC_REPORT_PRESENT:
- q_rx = true;
- break;
- case SLIM_MSG_MC_REPLY_INFORMATION:
- case SLIM_MSG_MC_REPLY_VALUE:
- slim_msg_response(&ctrl->ctrl, (u8 *)(rx_buf + 1),
- (u8)(*rx_buf >> 24), (len - 4));
- break;
- default:
- dev_err(ctrl->dev, "unsupported MC,%x MT:%x\n",
- mc, mt);
- break;
- }
-rx_ret_irq:
- writel(MGR_INT_RX_MSG_RCVD, ctrl->base +
- MGR_INT_CLR);
- if (q_rx)
- queue_work(ctrl->rxwq, &ctrl->wd);
-
- return IRQ_HANDLED;
-}
-
-static irqreturn_t qcom_slim_interrupt(int irq, void *d)
-{
- struct qcom_slim_ctrl *ctrl = d;
- u32 stat = readl_relaxed(ctrl->base + MGR_INT_STAT);
- int ret = IRQ_NONE;
-
- if (stat & MGR_INT_TX_MSG_SENT || stat & MGR_INT_TX_NACKED_2)
- ret = qcom_slim_handle_tx_irq(ctrl, stat);
-
- if (stat & MGR_INT_RX_MSG_RCVD)
- ret = qcom_slim_handle_rx_irq(ctrl, stat);
-
- return ret;
-}
-
-static int qcom_clk_pause_wakeup(struct slim_controller *sctrl)
-{
- struct qcom_slim_ctrl *ctrl = dev_get_drvdata(sctrl->dev);
-
- clk_prepare_enable(ctrl->hclk);
- clk_prepare_enable(ctrl->rclk);
- enable_irq(ctrl->irq);
-
- writel_relaxed(1, ctrl->base + FRM_WAKEUP);
- /* Make sure framer wakeup write goes through before ISR fires */
- mb();
- /*
- * HW Workaround: Currently, slave is reporting lost-sync messages
- * after SLIMbus comes out of clock pause.
- * Transaction with slave fail before slave reports that message
- * Give some time for that report to come
- * SLIMbus wakes up in clock gear 10 at 24.576MHz. With each superframe
- * being 250 usecs, we wait for 5-10 superframes here to ensure
- * we get the message
- */
- usleep_range(1250, 2500);
- return 0;
-}
-
-static void *slim_alloc_txbuf(struct qcom_slim_ctrl *ctrl,
- struct slim_msg_txn *txn,
- struct completion *done)
-{
- unsigned long flags;
- int idx;
-
- spin_lock_irqsave(&ctrl->tx.lock, flags);
- if (((ctrl->tx.head + 1) % ctrl->tx.n) == ctrl->tx.tail) {
- spin_unlock_irqrestore(&ctrl->tx.lock, flags);
- dev_err(ctrl->dev, "controller TX buf unavailable");
- return NULL;
- }
- idx = ctrl->tx.tail;
- ctrl->wr_comp[idx] = done;
- ctrl->tx.tail = (ctrl->tx.tail + 1) % ctrl->tx.n;
-
- spin_unlock_irqrestore(&ctrl->tx.lock, flags);
-
- return ctrl->tx.base + (idx * ctrl->tx.sl_sz);
-}
-
-
-static int qcom_xfer_msg(struct slim_controller *sctrl,
- struct slim_msg_txn *txn)
-{
- struct qcom_slim_ctrl *ctrl = dev_get_drvdata(sctrl->dev);
- DECLARE_COMPLETION_ONSTACK(done);
- void *pbuf = slim_alloc_txbuf(ctrl, txn, &done);
- unsigned long ms = txn->rl + HZ;
- u8 *puc;
- int ret = 0, retries = QCOM_BUF_ALLOC_RETRIES;
- unsigned long time_left;
- u8 la = txn->la;
- u32 *head;
- /* HW expects length field to be excluded */
- txn->rl--;
-
- /* spin till buffer is made available */
- if (!pbuf) {
- while (retries--) {
- usleep_range(10000, 15000);
- pbuf = slim_alloc_txbuf(ctrl, txn, &done);
- if (pbuf)
- break;
- }
- }
-
- if (retries < 0 && !pbuf)
- return -ENOMEM;
-
- puc = (u8 *)pbuf;
- head = (u32 *)pbuf;
-
- if (txn->dt == SLIM_MSG_DEST_LOGICALADDR) {
- *head = SLIM_MSG_ASM_FIRST_WORD(txn->rl, txn->mt,
- txn->mc, 0, la);
- puc += 3;
- } else {
- *head = SLIM_MSG_ASM_FIRST_WORD(txn->rl, txn->mt,
- txn->mc, 1, la);
- puc += 2;
- }
-
- if (slim_tid_txn(txn->mt, txn->mc))
- *(puc++) = txn->tid;
-
- if (slim_ec_txn(txn->mt, txn->mc)) {
- *(puc++) = (txn->ec & 0xFF);
- *(puc++) = (txn->ec >> 8) & 0xFF;
- }
-
- if (txn->msg && txn->msg->wbuf)
- memcpy(puc, txn->msg->wbuf, txn->msg->num_bytes);
-
- qcom_slim_queue_tx(ctrl, head, txn->rl, MGR_TX_MSG);
- time_left = wait_for_completion_timeout(&done, msecs_to_jiffies(ms));
-
- if (!time_left) {
- dev_err(ctrl->dev, "TX timed out:MC:0x%x,mt:0x%x", txn->mc,
- txn->mt);
- ret = -ETIMEDOUT;
- }
-
- return ret;
-
-}
-
-static int qcom_set_laddr(struct slim_controller *sctrl,
- struct slim_eaddr *ead, u8 laddr)
-{
- struct qcom_slim_ctrl *ctrl = dev_get_drvdata(sctrl->dev);
- struct {
- __be16 manf_id;
- __be16 prod_code;
- u8 dev_index;
- u8 instance;
- u8 laddr;
- } __packed p;
- struct slim_val_inf msg = {0};
- DEFINE_SLIM_EDEST_TXN(txn, SLIM_MSG_MC_ASSIGN_LOGICAL_ADDRESS,
- 10, laddr, &msg);
- int ret;
-
- p.manf_id = cpu_to_be16(ead->manf_id);
- p.prod_code = cpu_to_be16(ead->prod_code);
- p.dev_index = ead->dev_index;
- p.instance = ead->instance;
- p.laddr = laddr;
-
- msg.wbuf = (void *)&p;
- msg.num_bytes = 7;
- ret = slim_do_transfer(&ctrl->ctrl, &txn);
-
- if (ret)
- dev_err(ctrl->dev, "set LA:0x%x failed:ret:%d\n",
- laddr, ret);
- return ret;
-}
-
-static int slim_get_current_rxbuf(struct qcom_slim_ctrl *ctrl, void *buf)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&ctrl->rx.lock, flags);
- if (ctrl->rx.tail == ctrl->rx.head) {
- spin_unlock_irqrestore(&ctrl->rx.lock, flags);
- return -ENODATA;
- }
- memcpy(buf, ctrl->rx.base + (ctrl->rx.head * ctrl->rx.sl_sz),
- ctrl->rx.sl_sz);
-
- ctrl->rx.head = (ctrl->rx.head + 1) % ctrl->rx.n;
- spin_unlock_irqrestore(&ctrl->rx.lock, flags);
-
- return 0;
-}
-
-static void qcom_slim_rxwq(struct work_struct *work)
-{
- u8 buf[SLIM_MSGQ_BUF_LEN];
- u8 mc, mt;
- int ret;
- struct qcom_slim_ctrl *ctrl = container_of(work, struct qcom_slim_ctrl,
- wd);
-
- while ((slim_get_current_rxbuf(ctrl, buf)) != -ENODATA) {
- mt = SLIM_HEADER_GET_MT(buf[0]);
- mc = SLIM_HEADER_GET_MC(buf[1]);
- if (mt == SLIM_MSG_MT_CORE &&
- mc == SLIM_MSG_MC_REPORT_PRESENT) {
- struct slim_eaddr ea;
- u8 laddr;
-
- ea.manf_id = be16_to_cpup((__be16 *)&buf[2]);
- ea.prod_code = be16_to_cpup((__be16 *)&buf[4]);
- ea.dev_index = buf[6];
- ea.instance = buf[7];
-
- ret = slim_device_report_present(&ctrl->ctrl, &ea,
- &laddr);
- if (ret < 0)
- dev_err(ctrl->dev, "assign laddr failed:%d\n",
- ret);
- } else {
- dev_err(ctrl->dev, "unexpected message:mc:%x, mt:%x\n",
- mc, mt);
- }
- }
-}
-
-static void qcom_slim_prg_slew(struct platform_device *pdev,
- struct qcom_slim_ctrl *ctrl)
-{
- if (!ctrl->slew_reg) {
- /* SLEW RATE register for this SLIMbus */
- ctrl->slew_reg = devm_platform_ioremap_resource_byname(pdev, "slew");
- if (IS_ERR(ctrl->slew_reg))
- return;
- }
-
- writel_relaxed(1, ctrl->slew_reg);
- /* Make sure SLIMbus-slew rate enabling goes through */
- wmb();
-}
-
-static int qcom_slim_probe(struct platform_device *pdev)
-{
- struct qcom_slim_ctrl *ctrl;
- struct slim_controller *sctrl;
- int ret, ver;
-
- ctrl = devm_kzalloc(&pdev->dev, sizeof(*ctrl), GFP_KERNEL);
- if (!ctrl)
- return -ENOMEM;
-
- ctrl->hclk = devm_clk_get(&pdev->dev, "iface");
- if (IS_ERR(ctrl->hclk))
- return PTR_ERR(ctrl->hclk);
-
- ctrl->rclk = devm_clk_get(&pdev->dev, "core");
- if (IS_ERR(ctrl->rclk))
- return PTR_ERR(ctrl->rclk);
-
- ret = clk_set_rate(ctrl->rclk, SLIM_ROOT_FREQ);
- if (ret) {
- dev_err(&pdev->dev, "ref-clock set-rate failed:%d\n", ret);
- return ret;
- }
-
- ctrl->irq = platform_get_irq(pdev, 0);
- if (ctrl->irq < 0)
- return ctrl->irq;
-
- sctrl = &ctrl->ctrl;
- sctrl->dev = &pdev->dev;
- ctrl->dev = &pdev->dev;
- platform_set_drvdata(pdev, ctrl);
- dev_set_drvdata(ctrl->dev, ctrl);
-
- ctrl->base = devm_platform_ioremap_resource_byname(pdev, "ctrl");
- if (IS_ERR(ctrl->base))
- return PTR_ERR(ctrl->base);
-
- sctrl->set_laddr = qcom_set_laddr;
- sctrl->xfer_msg = qcom_xfer_msg;
- sctrl->wakeup = qcom_clk_pause_wakeup;
- ctrl->tx.n = QCOM_TX_MSGS;
- ctrl->tx.sl_sz = SLIM_MSGQ_BUF_LEN;
- ctrl->rx.n = QCOM_RX_MSGS;
- ctrl->rx.sl_sz = SLIM_MSGQ_BUF_LEN;
- ctrl->wr_comp = kcalloc(QCOM_TX_MSGS, sizeof(struct completion *),
- GFP_KERNEL);
- if (!ctrl->wr_comp)
- return -ENOMEM;
-
- spin_lock_init(&ctrl->rx.lock);
- spin_lock_init(&ctrl->tx.lock);
- INIT_WORK(&ctrl->wd, qcom_slim_rxwq);
- ctrl->rxwq = create_singlethread_workqueue("qcom_slim_rx");
- if (!ctrl->rxwq) {
- dev_err(ctrl->dev, "Failed to start Rx WQ\n");
- return -ENOMEM;
- }
-
- ctrl->framer.rootfreq = SLIM_ROOT_FREQ / 8;
- ctrl->framer.superfreq =
- ctrl->framer.rootfreq / SLIM_CL_PER_SUPERFRAME_DIV8;
- sctrl->a_framer = &ctrl->framer;
- sctrl->clkgear = SLIM_MAX_CLK_GEAR;
-
- qcom_slim_prg_slew(pdev, ctrl);
-
- ret = devm_request_irq(&pdev->dev, ctrl->irq, qcom_slim_interrupt,
- IRQF_TRIGGER_HIGH, "qcom_slim_irq", ctrl);
- if (ret) {
- dev_err(&pdev->dev, "request IRQ failed\n");
- goto err_request_irq_failed;
- }
-
- ret = clk_prepare_enable(ctrl->hclk);
- if (ret)
- goto err_hclk_enable_failed;
-
- ret = clk_prepare_enable(ctrl->rclk);
- if (ret)
- goto err_rclk_enable_failed;
-
- ctrl->tx.base = devm_kcalloc(&pdev->dev, ctrl->tx.n, ctrl->tx.sl_sz,
- GFP_KERNEL);
- if (!ctrl->tx.base) {
- ret = -ENOMEM;
- goto err;
- }
-
- ctrl->rx.base = devm_kcalloc(&pdev->dev,ctrl->rx.n, ctrl->rx.sl_sz,
- GFP_KERNEL);
- if (!ctrl->rx.base) {
- ret = -ENOMEM;
- goto err;
- }
-
- /* Register with framework before enabling frame, clock */
- ret = slim_register_controller(&ctrl->ctrl);
- if (ret) {
- dev_err(ctrl->dev, "error adding controller\n");
- goto err;
- }
-
- ver = readl_relaxed(ctrl->base);
- /* Version info in 16 MSbits */
- ver >>= 16;
- /* Component register initialization */
- writel(1, ctrl->base + CFG_PORT(COMP_CFG, ver));
- writel((EE_MGR_RSC_GRP | EE_NGD_2 | EE_NGD_1),
- ctrl->base + CFG_PORT(COMP_TRUST_CFG, ver));
-
- writel((MGR_INT_TX_NACKED_2 |
- MGR_INT_MSG_BUF_CONTE | MGR_INT_RX_MSG_RCVD |
- MGR_INT_TX_MSG_SENT), ctrl->base + MGR_INT_EN);
- writel(1, ctrl->base + MGR_CFG);
- /* Framer register initialization */
- writel((1 << INTR_WAKE) | (0xA << REF_CLK_GEAR) |
- (0xA << CLK_GEAR) | (1 << ROOT_FREQ) | (1 << FRM_ACTIVE) | 1,
- ctrl->base + FRM_CFG);
- writel(MGR_CFG_ENABLE, ctrl->base + MGR_CFG);
- writel(1, ctrl->base + INTF_CFG);
- writel(1, ctrl->base + CFG_PORT(COMP_CFG, ver));
-
- pm_runtime_use_autosuspend(&pdev->dev);
- pm_runtime_set_autosuspend_delay(&pdev->dev, QCOM_SLIM_AUTOSUSPEND);
- pm_runtime_set_active(&pdev->dev);
- pm_runtime_mark_last_busy(&pdev->dev);
- pm_runtime_enable(&pdev->dev);
-
- dev_dbg(ctrl->dev, "QCOM SB controller is up:ver:0x%x!\n", ver);
- return 0;
-
-err:
- clk_disable_unprepare(ctrl->rclk);
-err_rclk_enable_failed:
- clk_disable_unprepare(ctrl->hclk);
-err_hclk_enable_failed:
-err_request_irq_failed:
- destroy_workqueue(ctrl->rxwq);
- return ret;
-}
-
-static void qcom_slim_remove(struct platform_device *pdev)
-{
- struct qcom_slim_ctrl *ctrl = platform_get_drvdata(pdev);
-
- pm_runtime_disable(&pdev->dev);
- slim_unregister_controller(&ctrl->ctrl);
- clk_disable_unprepare(ctrl->rclk);
- clk_disable_unprepare(ctrl->hclk);
- destroy_workqueue(ctrl->rxwq);
-}
-
-/*
- * If PM_RUNTIME is not defined, these 2 functions become helper
- * functions to be called from system suspend/resume.
- */
-#ifdef CONFIG_PM
-static int qcom_slim_runtime_suspend(struct device *device)
-{
- struct qcom_slim_ctrl *ctrl = dev_get_drvdata(device);
- int ret;
-
- dev_dbg(device, "pm_runtime: suspending...\n");
- ret = slim_ctrl_clk_pause(&ctrl->ctrl, false, SLIM_CLK_UNSPECIFIED);
- if (ret) {
- dev_err(device, "clk pause not entered:%d", ret);
- } else {
- disable_irq(ctrl->irq);
- clk_disable_unprepare(ctrl->hclk);
- clk_disable_unprepare(ctrl->rclk);
- }
- return ret;
-}
-
-static int qcom_slim_runtime_resume(struct device *device)
-{
- struct qcom_slim_ctrl *ctrl = dev_get_drvdata(device);
- int ret = 0;
-
- dev_dbg(device, "pm_runtime: resuming...\n");
- ret = slim_ctrl_clk_pause(&ctrl->ctrl, true, 0);
- if (ret)
- dev_err(device, "clk pause not exited:%d", ret);
- return ret;
-}
-#endif
-
-#ifdef CONFIG_PM_SLEEP
-static int qcom_slim_suspend(struct device *dev)
-{
- int ret = 0;
-
- if (!pm_runtime_enabled(dev) ||
- (!pm_runtime_suspended(dev))) {
- dev_dbg(dev, "system suspend");
- ret = qcom_slim_runtime_suspend(dev);
- }
-
- return ret;
-}
-
-static int qcom_slim_resume(struct device *dev)
-{
- if (!pm_runtime_enabled(dev) || !pm_runtime_suspended(dev)) {
- int ret;
-
- dev_dbg(dev, "system resume");
- ret = qcom_slim_runtime_resume(dev);
- if (!ret) {
- pm_runtime_mark_last_busy(dev);
- pm_request_autosuspend(dev);
- }
- return ret;
-
- }
- return 0;
-}
-#endif /* CONFIG_PM_SLEEP */
-
-static const struct dev_pm_ops qcom_slim_dev_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(qcom_slim_suspend, qcom_slim_resume)
- SET_RUNTIME_PM_OPS(
- qcom_slim_runtime_suspend,
- qcom_slim_runtime_resume,
- NULL
- )
-};
-
-static const struct of_device_id qcom_slim_dt_match[] = {
- { .compatible = "qcom,slim", },
- {}
-};
-MODULE_DEVICE_TABLE(of, qcom_slim_dt_match);
-
-static struct platform_driver qcom_slim_driver = {
- .probe = qcom_slim_probe,
- .remove = qcom_slim_remove,
- .driver = {
- .name = "qcom_slim_ctrl",
- .of_match_table = qcom_slim_dt_match,
- .pm = &qcom_slim_dev_pm_ops,
- },
-};
-module_platform_driver(qcom_slim_driver);
-
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("Qualcomm SLIMbus Controller");
diff --git a/drivers/uio/uio_aec.c b/drivers/uio/uio_aec.c
index 8c164e51ff9e..dafcc5f44f24 100644
--- a/drivers/uio/uio_aec.c
+++ b/drivers/uio/uio_aec.c
@@ -33,7 +33,7 @@
#define MAILBOX 0x0F
-static struct pci_device_id ids[] = {
+static const struct pci_device_id ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_AEC, PCI_DEVICE_ID_AEC_VITCLTC), },
{ 0, }
};
diff --git a/drivers/uio/uio_cif.c b/drivers/uio/uio_cif.c
index 1cc3b8b5a345..4e4b589ddef1 100644
--- a/drivers/uio/uio_cif.c
+++ b/drivers/uio/uio_cif.c
@@ -105,7 +105,7 @@ static void hilscher_pci_remove(struct pci_dev *dev)
iounmap(info->mem[0].internal_addr);
}
-static struct pci_device_id hilscher_pci_ids[] = {
+static const struct pci_device_id hilscher_pci_ids[] = {
{
.vendor = PCI_VENDOR_ID_PLX,
.device = PCI_DEVICE_ID_PLX_9030,
diff --git a/drivers/uio/uio_dmem_genirq.c b/drivers/uio/uio_dmem_genirq.c
index 31aa75110ba5..41c18ec62a45 100644
--- a/drivers/uio/uio_dmem_genirq.c
+++ b/drivers/uio/uio_dmem_genirq.c
@@ -297,28 +297,6 @@ static int uio_dmem_genirq_probe(struct platform_device *pdev)
return devm_uio_register_device(&pdev->dev, priv->uioinfo);
}
-static int uio_dmem_genirq_runtime_nop(struct device *dev)
-{
- /* Runtime PM callback shared between ->runtime_suspend()
- * and ->runtime_resume(). Simply returns success.
- *
- * In this driver pm_runtime_get_sync() and pm_runtime_put_sync()
- * are used at open() and release() time. This allows the
- * Runtime PM code to turn off power to the device while the
- * device is unused, ie before open() and after release().
- *
- * This Runtime PM callback does not need to save or restore
- * any registers since user space is responsbile for hardware
- * register reinitialization after open().
- */
- return 0;
-}
-
-static const struct dev_pm_ops uio_dmem_genirq_dev_pm_ops = {
- .runtime_suspend = uio_dmem_genirq_runtime_nop,
- .runtime_resume = uio_dmem_genirq_runtime_nop,
-};
-
#ifdef CONFIG_OF
static const struct of_device_id uio_of_genirq_match[] = {
{ /* empty for now */ },
@@ -330,7 +308,6 @@ static struct platform_driver uio_dmem_genirq = {
.probe = uio_dmem_genirq_probe,
.driver = {
.name = DRIVER_NAME,
- .pm = &uio_dmem_genirq_dev_pm_ops,
.of_match_table = of_match_ptr(uio_of_genirq_match),
},
};
diff --git a/drivers/uio/uio_hv_generic.c b/drivers/uio/uio_hv_generic.c
index f19efad4d6f8..3f8e2e27697f 100644
--- a/drivers/uio/uio_hv_generic.c
+++ b/drivers/uio/uio_hv_generic.c
@@ -111,7 +111,6 @@ static void hv_uio_channel_cb(void *context)
struct hv_device *hv_dev;
struct hv_uio_private_data *pdata;
- chan->inbound.ring_buffer->interrupt_mask = 1;
virt_mb();
/*
@@ -183,8 +182,6 @@ hv_uio_new_channel(struct vmbus_channel *new_sc)
return;
}
- /* Disable interrupts on sub channel */
- new_sc->inbound.ring_buffer->interrupt_mask = 1;
set_channel_read_mode(new_sc, HV_CALL_ISR);
ret = hv_create_ring_sysfs(new_sc, hv_uio_ring_mmap);
if (ret) {
@@ -227,9 +224,7 @@ hv_uio_open(struct uio_info *info, struct inode *inode)
ret = vmbus_connect_ring(dev->channel,
hv_uio_channel_cb, dev->channel);
- if (ret == 0)
- dev->channel->inbound.ring_buffer->interrupt_mask = 1;
- else
+ if (ret)
atomic_dec(&pdata->refcnt);
return ret;
diff --git a/drivers/uio/uio_netx.c b/drivers/uio/uio_netx.c
index a1a58802c793..18917b2ac04c 100644
--- a/drivers/uio/uio_netx.c
+++ b/drivers/uio/uio_netx.c
@@ -127,7 +127,7 @@ static void netx_pci_remove(struct pci_dev *dev)
iounmap(info->mem[0].internal_addr);
}
-static struct pci_device_id netx_pci_ids[] = {
+static const struct pci_device_id netx_pci_ids[] = {
{
.vendor = PCI_VENDOR_ID_HILSCHER,
.device = PCI_DEVICE_ID_HILSCHER_NETX,
diff --git a/drivers/uio/uio_pdrv_genirq.c b/drivers/uio/uio_pdrv_genirq.c
index 2ec7d25e8264..0a1885d1b2e3 100644
--- a/drivers/uio/uio_pdrv_genirq.c
+++ b/drivers/uio/uio_pdrv_genirq.c
@@ -249,34 +249,11 @@ static int uio_pdrv_genirq_probe(struct platform_device *pdev)
return ret;
}
-static int uio_pdrv_genirq_runtime_nop(struct device *dev)
-{
- /* Runtime PM callback shared between ->runtime_suspend()
- * and ->runtime_resume(). Simply returns success.
- *
- * In this driver pm_runtime_get_sync() and pm_runtime_put_sync()
- * are used at open() and release() time. This allows the
- * Runtime PM code to turn off power to the device while the
- * device is unused, ie before open() and after release().
- *
- * This Runtime PM callback does not need to save or restore
- * any registers since user space is responsbile for hardware
- * register reinitialization after open().
- */
- return 0;
-}
-
-static const struct dev_pm_ops uio_pdrv_genirq_dev_pm_ops = {
- .runtime_suspend = uio_pdrv_genirq_runtime_nop,
- .runtime_resume = uio_pdrv_genirq_runtime_nop,
-};
-
#ifdef CONFIG_OF
static struct of_device_id uio_of_genirq_match[] = {
{ /* This is filled with module_parm */ },
{ /* Sentinel */ },
};
-MODULE_DEVICE_TABLE(of, uio_of_genirq_match);
module_param_string(of_id, uio_of_genirq_match[0].compatible, 128, 0);
MODULE_PARM_DESC(of_id, "Openfirmware id of the device to be handled by uio");
#endif
@@ -285,7 +262,6 @@ static struct platform_driver uio_pdrv_genirq = {
.probe = uio_pdrv_genirq_probe,
.driver = {
.name = DRIVER_NAME,
- .pm = &uio_pdrv_genirq_dev_pm_ops,
.of_match_table = of_match_ptr(uio_of_genirq_match),
},
};
diff --git a/drivers/uio/uio_sercos3.c b/drivers/uio/uio_sercos3.c
index b93a5f8f4cba..12afc2fa1a0b 100644
--- a/drivers/uio/uio_sercos3.c
+++ b/drivers/uio/uio_sercos3.c
@@ -191,7 +191,7 @@ static void sercos3_pci_remove(struct pci_dev *dev)
}
}
-static struct pci_device_id sercos3_pci_ids[] = {
+static const struct pci_device_id sercos3_pci_ids[] = {
{
.vendor = PCI_VENDOR_ID_PLX,
.device = PCI_DEVICE_ID_PLX_9030,
diff --git a/drivers/w1/masters/matrox_w1.c b/drivers/w1/masters/matrox_w1.c
index 2852cd2dc67c..146fa7c6e74e 100644
--- a/drivers/w1/masters/matrox_w1.c
+++ b/drivers/w1/masters/matrox_w1.c
@@ -47,7 +47,6 @@ struct matrox_device {
unsigned long phys_addr;
void __iomem *virt_addr;
- unsigned long found;
struct w1_bus_master *bus_master;
};
@@ -158,8 +157,6 @@ static int matrox_w1_probe(struct pci_dev *pdev, const struct pci_device_id *ent
pci_set_drvdata(pdev, dev);
- dev->found = 1;
-
dev_info(&pdev->dev, "Matrox G400 GPIO transport layer for 1-wire.\n");
return 0;
@@ -176,10 +173,9 @@ static void matrox_w1_remove(struct pci_dev *pdev)
{
struct matrox_device *dev = pci_get_drvdata(pdev);
- if (dev->found) {
- w1_remove_master_device(dev->bus_master);
- iounmap(dev->virt_addr);
- }
+ w1_remove_master_device(dev->bus_master);
+ iounmap(dev->virt_addr);
+
kfree(dev);
}
diff --git a/include/dt-bindings/interconnect/qcom,glymur-rpmh.h b/include/dt-bindings/interconnect/qcom,glymur-rpmh.h
new file mode 100644
index 000000000000..6a0e754345e4
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,glymur-rpmh.h
@@ -0,0 +1,205 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2025, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_GLYMUR_H
+#define __DT_BINDINGS_INTERCONNECT_QCOM_GLYMUR_H
+
+#define MASTER_CRYPTO 0
+#define MASTER_SOCCP_PROC 1
+#define MASTER_QDSS_ETR 2
+#define MASTER_QDSS_ETR_1 3
+#define SLAVE_A1NOC_SNOC 4
+
+#define MASTER_UFS_MEM 0
+#define MASTER_USB3_2 1
+#define MASTER_USB4_2 2
+#define SLAVE_A2NOC_SNOC 3
+
+#define MASTER_QSPI_0 0
+#define MASTER_QUP_0 1
+#define MASTER_QUP_1 2
+#define MASTER_QUP_2 3
+#define MASTER_SP 4
+#define MASTER_SDCC_2 5
+#define MASTER_SDCC_4 6
+#define MASTER_USB2 7
+#define MASTER_USB3_MP 8
+#define SLAVE_A3NOC_SNOC 9
+
+#define MASTER_USB3_0 0
+#define MASTER_USB3_1 1
+#define MASTER_USB4_0 2
+#define MASTER_USB4_1 3
+#define SLAVE_A4NOC_HSCNOC 4
+
+#define MASTER_QUP_CORE_0 0
+#define MASTER_QUP_CORE_1 1
+#define MASTER_QUP_CORE_2 2
+#define SLAVE_QUP_CORE_0 3
+#define SLAVE_QUP_CORE_1 4
+#define SLAVE_QUP_CORE_2 5
+
+#define MASTER_CNOC_CFG 0
+#define SLAVE_AHB2PHY_SOUTH 1
+#define SLAVE_AHB2PHY_NORTH 2
+#define SLAVE_AHB2PHY_2 3
+#define SLAVE_AHB2PHY_3 4
+#define SLAVE_AV1_ENC_CFG 5
+#define SLAVE_CAMERA_CFG 6
+#define SLAVE_CLK_CTL 7
+#define SLAVE_CRYPTO_0_CFG 8
+#define SLAVE_DISPLAY_CFG 9
+#define SLAVE_GFX3D_CFG 10
+#define SLAVE_IMEM_CFG 11
+#define SLAVE_PCIE_0_CFG 12
+#define SLAVE_PCIE_1_CFG 13
+#define SLAVE_PCIE_2_CFG 14
+#define SLAVE_PCIE_3A_CFG 15
+#define SLAVE_PCIE_3B_CFG 16
+#define SLAVE_PCIE_4_CFG 17
+#define SLAVE_PCIE_5_CFG 18
+#define SLAVE_PCIE_6_CFG 19
+#define SLAVE_PCIE_RSCC 20
+#define SLAVE_PDM 21
+#define SLAVE_PRNG 22
+#define SLAVE_QDSS_CFG 23
+#define SLAVE_QSPI_0 24
+#define SLAVE_QUP_0 25
+#define SLAVE_QUP_1 26
+#define SLAVE_QUP_2 27
+#define SLAVE_SDCC_2 28
+#define SLAVE_SDCC_4 29
+#define SLAVE_SMMUV3_CFG 30
+#define SLAVE_TCSR 31
+#define SLAVE_TLMM 32
+#define SLAVE_UFS_MEM_CFG 33
+#define SLAVE_USB2 34
+#define SLAVE_USB3_0 35
+#define SLAVE_USB3_1 36
+#define SLAVE_USB3_2 37
+#define SLAVE_USB3_MP 38
+#define SLAVE_USB4_0 39
+#define SLAVE_USB4_1 40
+#define SLAVE_USB4_2 41
+#define SLAVE_VENUS_CFG 42
+#define SLAVE_CNOC_PCIE_SLAVE_EAST_CFG 43
+#define SLAVE_CNOC_PCIE_SLAVE_WEST_CFG 44
+#define SLAVE_LPASS_QTB_CFG 45
+#define SLAVE_CNOC_MNOC_CFG 46
+#define SLAVE_NSP_QTB_CFG 47
+#define SLAVE_PCIE_EAST_ANOC_CFG 48
+#define SLAVE_PCIE_WEST_ANOC_CFG 49
+#define SLAVE_QDSS_STM 50
+#define SLAVE_TCU 51
+
+#define MASTER_HSCNOC_CNOC 0
+#define SLAVE_AOSS 1
+#define SLAVE_IPC_ROUTER_CFG 2
+#define SLAVE_SOCCP 3
+#define SLAVE_TME_CFG 4
+#define SLAVE_APPSS 5
+#define SLAVE_CNOC_CFG 6
+#define SLAVE_BOOT_IMEM 7
+#define SLAVE_IMEM 8
+
+#define MASTER_GPU_TCU 0
+#define MASTER_PCIE_TCU 1
+#define MASTER_SYS_TCU 2
+#define MASTER_APPSS_PROC 3
+#define MASTER_AGGRE_NOC_EAST 4
+#define MASTER_GFX3D 5
+#define MASTER_LPASS_GEM_NOC 6
+#define MASTER_MNOC_HF_MEM_NOC 7
+#define MASTER_MNOC_SF_MEM_NOC 8
+#define MASTER_COMPUTE_NOC 9
+#define MASTER_PCIE_EAST 10
+#define MASTER_PCIE_WEST 11
+#define MASTER_SNOC_SF_MEM_NOC 12
+#define MASTER_WLAN_Q6 13
+#define MASTER_GIC 14
+#define SLAVE_HSCNOC_CNOC 15
+#define SLAVE_LLCC 16
+#define SLAVE_PCIE_EAST 17
+#define SLAVE_PCIE_WEST 18
+
+#define MASTER_LPIAON_NOC 0
+#define SLAVE_LPASS_GEM_NOC 1
+
+#define MASTER_LPASS_LPINOC 0
+#define SLAVE_LPIAON_NOC_LPASS_AG_NOC 1
+
+#define MASTER_LPASS_PROC 0
+#define SLAVE_LPICX_NOC_LPIAON_NOC 1
+
+#define MASTER_LLCC 0
+#define SLAVE_EBI1 1
+
+#define MASTER_AV1_ENC 0
+#define MASTER_CAMNOC_HF 1
+#define MASTER_CAMNOC_ICP 2
+#define MASTER_CAMNOC_SF 3
+#define MASTER_EVA 4
+#define MASTER_MDP 5
+#define MASTER_CDSP_HCP 6
+#define MASTER_VIDEO 7
+#define MASTER_VIDEO_CV_PROC 8
+#define MASTER_VIDEO_V_PROC 9
+#define MASTER_CNOC_MNOC_CFG 10
+#define SLAVE_MNOC_HF_MEM_NOC 11
+#define SLAVE_MNOC_SF_MEM_NOC 12
+#define SLAVE_SERVICE_MNOC 13
+
+#define MASTER_CPUCP 0
+#define SLAVE_NSINOC_SYSTEM_NOC 1
+#define SLAVE_SERVICE_NSINOC 2
+
+#define MASTER_CDSP_PROC 0
+#define SLAVE_NSP0_HSC_NOC 1
+
+#define MASTER_OOBMSS_SP_PROC 0
+#define SLAVE_OOBMSS_SNOC 1
+
+#define MASTER_PCIE_EAST_ANOC_CFG 0
+#define MASTER_PCIE_0 1
+#define MASTER_PCIE_1 2
+#define MASTER_PCIE_5 3
+#define SLAVE_PCIE_EAST_MEM_NOC 4
+#define SLAVE_SERVICE_PCIE_EAST_AGGRE_NOC 5
+
+#define MASTER_HSCNOC_PCIE_EAST 0
+#define MASTER_CNOC_PCIE_EAST_SLAVE_CFG 1
+#define SLAVE_HSCNOC_PCIE_EAST_MS_MPU_CFG 2
+#define SLAVE_SERVICE_PCIE_EAST 3
+#define SLAVE_PCIE_0 4
+#define SLAVE_PCIE_1 5
+#define SLAVE_PCIE_5 6
+
+#define MASTER_PCIE_WEST_ANOC_CFG 0
+#define MASTER_PCIE_2 1
+#define MASTER_PCIE_3A 2
+#define MASTER_PCIE_3B 3
+#define MASTER_PCIE_4 4
+#define MASTER_PCIE_6 5
+#define SLAVE_PCIE_WEST_MEM_NOC 6
+#define SLAVE_SERVICE_PCIE_WEST_AGGRE_NOC 7
+
+#define MASTER_HSCNOC_PCIE_WEST 0
+#define MASTER_CNOC_PCIE_WEST_SLAVE_CFG 1
+#define SLAVE_HSCNOC_PCIE_WEST_MS_MPU_CFG 2
+#define SLAVE_SERVICE_PCIE_WEST 3
+#define SLAVE_PCIE_2 4
+#define SLAVE_PCIE_3A 5
+#define SLAVE_PCIE_3B 6
+#define SLAVE_PCIE_4 7
+#define SLAVE_PCIE_6 8
+
+#define MASTER_A1NOC_SNOC 0
+#define MASTER_A2NOC_SNOC 1
+#define MASTER_A3NOC_SNOC 2
+#define MASTER_NSINOC_SNOC 3
+#define MASTER_OOBMSS 4
+#define SLAVE_SNOC_GEM_NOC_SF 5
+
+#endif
diff --git a/include/linux/iio/frequency/adf4350.h b/include/linux/iio/frequency/adf4350.h
index de45cf2ee1e4..ce2086f97e3f 100644
--- a/include/linux/iio/frequency/adf4350.h
+++ b/include/linux/iio/frequency/adf4350.h
@@ -51,7 +51,7 @@
/* REG3 Bit Definitions */
#define ADF4350_REG3_12BIT_CLKDIV(x) ((x) << 3)
-#define ADF4350_REG3_12BIT_CLKDIV_MODE(x) ((x) << 16)
+#define ADF4350_REG3_12BIT_CLKDIV_MODE(x) ((x) << 15)
#define ADF4350_REG3_12BIT_CSR_EN (1 << 18)
#define ADF4351_REG3_CHARGE_CANCELLATION_EN (1 << 21)
#define ADF4351_REG3_ANTI_BACKLASH_3ns_EN (1 << 22)
diff --git a/include/linux/miscdevice.h b/include/linux/miscdevice.h
index 3e6deb00fc85..7d0aa718499c 100644
--- a/include/linux/miscdevice.h
+++ b/include/linux/miscdevice.h
@@ -70,7 +70,16 @@
#define UHID_MINOR 239
#define USERIO_MINOR 240
#define VHOST_VSOCK_MINOR 241
+#define EISA_EEPROM_MINOR 241
#define RFKILL_MINOR 242
+
+/*
+ * Misc char device minor code space division related to below macro:
+ *
+ * < 255 : Fixed minor code
+ * == 255 : Indicator to request dynamic minor code
+ * > 255 : Dynamic minor code requested, 1048320 minor codes totally.
+ */
#define MISC_DYNAMIC_MINOR 255
struct miscdevice {
diff --git a/include/linux/rtsx_pci.h b/include/linux/rtsx_pci.h
index 3b4c36705a9b..3c5689356004 100644
--- a/include/linux/rtsx_pci.h
+++ b/include/linux/rtsx_pci.h
@@ -1160,6 +1160,8 @@ struct rtsx_cr_option {
bool ocp_en;
u8 sd_400mA_ocp_thd;
u8 sd_800mA_ocp_thd;
+ u8 sd_cd_reverse_en;
+ u8 sd_wp_reverse_en;
};
/*
diff --git a/include/uapi/linux/android/binder.h b/include/uapi/linux/android/binder.h
index 1fd92021a573..03ee4c7010d7 100644
--- a/include/uapi/linux/android/binder.h
+++ b/include/uapi/linux/android/binder.h
@@ -38,7 +38,7 @@ enum {
BINDER_TYPE_PTR = B_PACK_CHARS('p', 't', '*', B_TYPE_LARGE),
};
-enum {
+enum flat_binder_object_flags {
FLAT_BINDER_FLAG_PRIORITY_MASK = 0xff,
FLAT_BINDER_FLAG_ACCEPTS_FDS = 0x100,
diff --git a/include/uapi/linux/android/binder_netlink.h b/include/uapi/linux/android/binder_netlink.h
new file mode 100644
index 000000000000..b218f96d6668
--- /dev/null
+++ b/include/uapi/linux/android/binder_netlink.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */
+/* Do not edit directly, auto-generated from: */
+/* Documentation/netlink/specs/binder.yaml */
+/* YNL-GEN uapi header */
+
+#ifndef _UAPI_LINUX_ANDROID_BINDER_NETLINK_H
+#define _UAPI_LINUX_ANDROID_BINDER_NETLINK_H
+
+#define BINDER_FAMILY_NAME "binder"
+#define BINDER_FAMILY_VERSION 1
+
+enum {
+ BINDER_A_REPORT_ERROR = 1,
+ BINDER_A_REPORT_CONTEXT,
+ BINDER_A_REPORT_FROM_PID,
+ BINDER_A_REPORT_FROM_TID,
+ BINDER_A_REPORT_TO_PID,
+ BINDER_A_REPORT_TO_TID,
+ BINDER_A_REPORT_IS_REPLY,
+ BINDER_A_REPORT_FLAGS,
+ BINDER_A_REPORT_CODE,
+ BINDER_A_REPORT_DATA_SIZE,
+
+ __BINDER_A_REPORT_MAX,
+ BINDER_A_REPORT_MAX = (__BINDER_A_REPORT_MAX - 1)
+};
+
+enum {
+ BINDER_CMD_REPORT = 1,
+
+ __BINDER_CMD_MAX,
+ BINDER_CMD_MAX = (__BINDER_CMD_MAX - 1)
+};
+
+#define BINDER_MCGRP_REPORT "report"
+
+#endif /* _UAPI_LINUX_ANDROID_BINDER_NETLINK_H */
diff --git a/include/uapi/misc/fastrpc.h b/include/uapi/misc/fastrpc.h
index f33d914d8f46..c6e2925f47e6 100644
--- a/include/uapi/misc/fastrpc.h
+++ b/include/uapi/misc/fastrpc.h
@@ -134,7 +134,7 @@ struct fastrpc_mem_unmap {
};
struct fastrpc_ioctl_capability {
- __u32 domain;
+ __u32 unused; /* deprecated, ignored by the kernel */
__u32 attribute_id;
__u32 capability; /* dsp capability */
__u32 reserved[4];
diff --git a/rust/bindings/bindings_helper.h b/rust/bindings/bindings_helper.h
index 84d60635e8a9..9b3a4ab95818 100644
--- a/rust/bindings/bindings_helper.h
+++ b/rust/bindings/bindings_helper.h
@@ -50,6 +50,7 @@
#include <linux/dma-mapping.h>
#include <linux/errname.h>
#include <linux/ethtool.h>
+#include <linux/fdtable.h>
#include <linux/file.h>
#include <linux/firmware.h>
#include <linux/fs.h>
@@ -71,6 +72,7 @@
#include <linux/sched.h>
#include <linux/security.h>
#include <linux/slab.h>
+#include <linux/task_work.h>
#include <linux/tracepoint.h>
#include <linux/wait.h>
#include <linux/workqueue.h>
@@ -99,3 +101,9 @@ const xa_mark_t RUST_CONST_HELPER_XA_PRESENT = XA_PRESENT;
const gfp_t RUST_CONST_HELPER_XA_FLAGS_ALLOC = XA_FLAGS_ALLOC;
const gfp_t RUST_CONST_HELPER_XA_FLAGS_ALLOC1 = XA_FLAGS_ALLOC1;
+
+#if IS_ENABLED(CONFIG_ANDROID_BINDER_IPC_RUST)
+#include "../../drivers/android/binder/rust_binder.h"
+#include "../../drivers/android/binder/rust_binder_events.h"
+#include "../../drivers/android/binder/page_range_helper.h"
+#endif
diff --git a/rust/helpers/binder.c b/rust/helpers/binder.c
new file mode 100644
index 000000000000..224d38a92f1d
--- /dev/null
+++ b/rust/helpers/binder.c
@@ -0,0 +1,26 @@
+// SPDX-License-Identifier: GPL-2.0
+
+/*
+ * Copyright (C) 2025 Google LLC.
+ */
+
+#include <linux/list_lru.h>
+#include <linux/task_work.h>
+
+unsigned long rust_helper_list_lru_count(struct list_lru *lru)
+{
+ return list_lru_count(lru);
+}
+
+unsigned long rust_helper_list_lru_walk(struct list_lru *lru,
+ list_lru_walk_cb isolate, void *cb_arg,
+ unsigned long nr_to_walk)
+{
+ return list_lru_walk(lru, isolate, cb_arg, nr_to_walk);
+}
+
+void rust_helper_init_task_work(struct callback_head *twork,
+ task_work_func_t func)
+{
+ init_task_work(twork, func);
+}
diff --git a/rust/helpers/helpers.c b/rust/helpers/helpers.c
index 7cf7fe95e41d..8e8277bdddca 100644
--- a/rust/helpers/helpers.c
+++ b/rust/helpers/helpers.c
@@ -8,6 +8,7 @@
*/
#include "auxiliary.c"
+#include "binder.c"
#include "blk.c"
#include "bug.c"
#include "build_assert.c"
diff --git a/rust/helpers/page.c b/rust/helpers/page.c
index b3f2b8fbf87f..7144de5a61db 100644
--- a/rust/helpers/page.c
+++ b/rust/helpers/page.c
@@ -2,6 +2,7 @@
#include <linux/gfp.h>
#include <linux/highmem.h>
+#include <linux/mm.h>
struct page *rust_helper_alloc_pages(gfp_t gfp_mask, unsigned int order)
{
@@ -17,3 +18,10 @@ void rust_helper_kunmap_local(const void *addr)
{
kunmap_local(addr);
}
+
+#ifndef NODE_NOT_IN_PAGE_FLAGS
+int rust_helper_page_to_nid(const struct page *page)
+{
+ return page_to_nid(page);
+}
+#endif
diff --git a/rust/helpers/security.c b/rust/helpers/security.c
index 0c4c2065df28..ca22da09548d 100644
--- a/rust/helpers/security.c
+++ b/rust/helpers/security.c
@@ -17,4 +17,28 @@ void rust_helper_security_release_secctx(struct lsm_context *cp)
{
security_release_secctx(cp);
}
+
+int rust_helper_security_binder_set_context_mgr(const struct cred *mgr)
+{
+ return security_binder_set_context_mgr(mgr);
+}
+
+int rust_helper_security_binder_transaction(const struct cred *from,
+ const struct cred *to)
+{
+ return security_binder_transaction(from, to);
+}
+
+int rust_helper_security_binder_transfer_binder(const struct cred *from,
+ const struct cred *to)
+{
+ return security_binder_transfer_binder(from, to);
+}
+
+int rust_helper_security_binder_transfer_file(const struct cred *from,
+ const struct cred *to,
+ const struct file *file)
+{
+ return security_binder_transfer_file(from, to, file);
+}
#endif
diff --git a/rust/kernel/cred.rs b/rust/kernel/cred.rs
index 2599f01e8b28..3aa2e4c4a50c 100644
--- a/rust/kernel/cred.rs
+++ b/rust/kernel/cred.rs
@@ -54,6 +54,12 @@ impl Credential {
unsafe { &*ptr.cast() }
}
+ /// Returns a raw pointer to the inner credential.
+ #[inline]
+ pub fn as_ptr(&self) -> *const bindings::cred {
+ self.0.get()
+ }
+
/// Get the id for this security context.
#[inline]
pub fn get_secid(&self) -> u32 {
diff --git a/rust/kernel/fs.rs b/rust/kernel/fs.rs
index 0121b38c59e6..6ba6bdf143cb 100644
--- a/rust/kernel/fs.rs
+++ b/rust/kernel/fs.rs
@@ -6,3 +6,6 @@
pub mod file;
pub use self::file::{File, LocalFile};
+
+mod kiocb;
+pub use self::kiocb::Kiocb;
diff --git a/rust/kernel/fs/kiocb.rs b/rust/kernel/fs/kiocb.rs
new file mode 100644
index 000000000000..84c936cd69b0
--- /dev/null
+++ b/rust/kernel/fs/kiocb.rs
@@ -0,0 +1,68 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2024 Google LLC.
+
+//! Kernel IO callbacks.
+//!
+//! C headers: [`include/linux/fs.h`](srctree/include/linux/fs.h)
+
+use core::marker::PhantomData;
+use core::ptr::NonNull;
+use kernel::types::ForeignOwnable;
+
+/// Wrapper for the kernel's `struct kiocb`.
+///
+/// Currently this abstractions is incomplete and is essentially just a tuple containing a
+/// reference to a file and a file position.
+///
+/// The type `T` represents the filesystem or driver specific data associated with the file.
+///
+/// # Invariants
+///
+/// `inner` points at a valid `struct kiocb` whose file has the type `T` as its private data.
+pub struct Kiocb<'a, T> {
+ inner: NonNull<bindings::kiocb>,
+ _phantom: PhantomData<&'a T>,
+}
+
+impl<'a, T: ForeignOwnable> Kiocb<'a, T> {
+ /// Create a `Kiocb` from a raw pointer.
+ ///
+ /// # Safety
+ ///
+ /// The pointer must reference a valid `struct kiocb` for the duration of `'a`. The private
+ /// data of the file must be `T`.
+ pub unsafe fn from_raw(kiocb: *mut bindings::kiocb) -> Self {
+ Self {
+ // SAFETY: If a pointer is valid it is not null.
+ inner: unsafe { NonNull::new_unchecked(kiocb) },
+ _phantom: PhantomData,
+ }
+ }
+
+ /// Access the underlying `struct kiocb` directly.
+ pub fn as_raw(&self) -> *mut bindings::kiocb {
+ self.inner.as_ptr()
+ }
+
+ /// Get the filesystem or driver specific data associated with the file.
+ pub fn file(&self) -> <T as ForeignOwnable>::Borrowed<'a> {
+ // SAFETY: We have shared access to this kiocb and hence the underlying file, so we can
+ // read the file's private data.
+ let private = unsafe { (*(*self.as_raw()).ki_filp).private_data };
+ // SAFETY: The kiocb has shared access to the private data.
+ unsafe { <T as ForeignOwnable>::borrow(private) }
+ }
+
+ /// Gets the current value of `ki_pos`.
+ pub fn ki_pos(&self) -> i64 {
+ // SAFETY: We have shared access to the kiocb, so we can read its `ki_pos` field.
+ unsafe { (*self.as_raw()).ki_pos }
+ }
+
+ /// Gets a mutable reference to the `ki_pos` field.
+ pub fn ki_pos_mut(&mut self) -> &mut i64 {
+ // SAFETY: We have exclusive access to the kiocb, so we can write to `ki_pos`.
+ unsafe { &mut (*self.as_raw()).ki_pos }
+ }
+}
diff --git a/rust/kernel/iov.rs b/rust/kernel/iov.rs
new file mode 100644
index 000000000000..43bae8923c46
--- /dev/null
+++ b/rust/kernel/iov.rs
@@ -0,0 +1,314 @@
+// SPDX-License-Identifier: GPL-2.0
+
+// Copyright (C) 2025 Google LLC.
+
+//! IO vectors.
+//!
+//! C headers: [`include/linux/iov_iter.h`](srctree/include/linux/iov_iter.h),
+//! [`include/linux/uio.h`](srctree/include/linux/uio.h)
+
+use crate::{
+ alloc::{Allocator, Flags},
+ bindings,
+ prelude::*,
+ types::Opaque,
+};
+use core::{marker::PhantomData, mem::MaybeUninit, ptr, slice};
+
+const ITER_SOURCE: bool = bindings::ITER_SOURCE != 0;
+const ITER_DEST: bool = bindings::ITER_DEST != 0;
+
+// Compile-time assertion for the above constants.
+const _: () = {
+ build_assert!(
+ ITER_SOURCE != ITER_DEST,
+ "ITER_DEST and ITER_SOURCE should be different."
+ );
+};
+
+/// An IO vector that acts as a source of data.
+///
+/// The data may come from many different sources. This includes both things in kernel-space and
+/// reading from userspace. It's not necessarily the case that the data source is immutable, so
+/// rewinding the IO vector to read the same data twice is not guaranteed to result in the same
+/// bytes. It's also possible that the data source is mapped in a thread-local manner using e.g.
+/// `kmap_local_page()`, so this type is not `Send` to ensure that the mapping is read from the
+/// right context in that scenario.
+///
+/// # Invariants
+///
+/// Must hold a valid `struct iov_iter` with `data_source` set to `ITER_SOURCE`. For the duration
+/// of `'data`, it must be safe to read from this IO vector using the standard C methods for this
+/// purpose.
+#[repr(transparent)]
+pub struct IovIterSource<'data> {
+ iov: Opaque<bindings::iov_iter>,
+ /// Represent to the type system that this value contains a pointer to readable data it does
+ /// not own.
+ _source: PhantomData<&'data [u8]>,
+}
+
+impl<'data> IovIterSource<'data> {
+ /// Obtain an `IovIterSource` from a raw pointer.
+ ///
+ /// # Safety
+ ///
+ /// * The referenced `struct iov_iter` must be valid and must only be accessed through the
+ /// returned reference for the duration of `'iov`.
+ /// * The referenced `struct iov_iter` must have `data_source` set to `ITER_SOURCE`.
+ /// * For the duration of `'data`, it must be safe to read from this IO vector using the
+ /// standard C methods for this purpose.
+ #[track_caller]
+ #[inline]
+ pub unsafe fn from_raw<'iov>(ptr: *mut bindings::iov_iter) -> &'iov mut IovIterSource<'data> {
+ // SAFETY: The caller ensures that `ptr` is valid.
+ let data_source = unsafe { (*ptr).data_source };
+ assert_eq!(data_source, ITER_SOURCE);
+
+ // SAFETY: The caller ensures the type invariants for the right durations, and
+ // `IovIterSource` is layout compatible with `struct iov_iter`.
+ unsafe { &mut *ptr.cast::<IovIterSource<'data>>() }
+ }
+
+ /// Access this as a raw `struct iov_iter`.
+ #[inline]
+ pub fn as_raw(&mut self) -> *mut bindings::iov_iter {
+ self.iov.get()
+ }
+
+ /// Returns the number of bytes available in this IO vector.
+ ///
+ /// Note that this may overestimate the number of bytes. For example, reading from userspace
+ /// memory could fail with `EFAULT`, which will be treated as the end of the IO vector.
+ #[inline]
+ pub fn len(&self) -> usize {
+ // SAFETY: We have shared access to this IO vector, so we can read its `count` field.
+ unsafe {
+ (*self.iov.get())
+ .__bindgen_anon_1
+ .__bindgen_anon_1
+ .as_ref()
+ .count
+ }
+ }
+
+ /// Returns whether there are any bytes left in this IO vector.
+ ///
+ /// This may return `true` even if there are no more bytes available. For example, reading from
+ /// userspace memory could fail with `EFAULT`, which will be treated as the end of the IO vector.
+ #[inline]
+ pub fn is_empty(&self) -> bool {
+ self.len() == 0
+ }
+
+ /// Advance this IO vector by `bytes` bytes.
+ ///
+ /// If `bytes` is larger than the size of this IO vector, it is advanced to the end.
+ #[inline]
+ pub fn advance(&mut self, bytes: usize) {
+ // SAFETY: By the type invariants, `self.iov` is a valid IO vector.
+ unsafe { bindings::iov_iter_advance(self.as_raw(), bytes) };
+ }
+
+ /// Advance this IO vector backwards by `bytes` bytes.
+ ///
+ /// # Safety
+ ///
+ /// The IO vector must not be reverted to before its beginning.
+ #[inline]
+ pub unsafe fn revert(&mut self, bytes: usize) {
+ // SAFETY: By the type invariants, `self.iov` is a valid IO vector, and the caller
+ // ensures that `bytes` is in bounds.
+ unsafe { bindings::iov_iter_revert(self.as_raw(), bytes) };
+ }
+
+ /// Read data from this IO vector.
+ ///
+ /// Returns the number of bytes that have been copied.
+ #[inline]
+ pub fn copy_from_iter(&mut self, out: &mut [u8]) -> usize {
+ // SAFETY: `Self::copy_from_iter_raw` guarantees that it will not write any uninitialized
+ // bytes in the provided buffer, so `out` is still a valid `u8` slice after this call.
+ let out = unsafe { &mut *(ptr::from_mut(out) as *mut [MaybeUninit<u8>]) };
+
+ self.copy_from_iter_raw(out).len()
+ }
+
+ /// Read data from this IO vector and append it to a vector.
+ ///
+ /// Returns the number of bytes that have been copied.
+ #[inline]
+ pub fn copy_from_iter_vec<A: Allocator>(
+ &mut self,
+ out: &mut Vec<u8, A>,
+ flags: Flags,
+ ) -> Result<usize> {
+ out.reserve(self.len(), flags)?;
+ let len = self.copy_from_iter_raw(out.spare_capacity_mut()).len();
+ // SAFETY:
+ // - `len` is the length of a subslice of the spare capacity, so `len` is at most the
+ // length of the spare capacity.
+ // - `Self::copy_from_iter_raw` guarantees that the first `len` bytes of the spare capacity
+ // have been initialized.
+ unsafe { out.inc_len(len) };
+ Ok(len)
+ }
+
+ /// Read data from this IO vector into potentially uninitialized memory.
+ ///
+ /// Returns the sub-slice of the output that has been initialized. If the returned slice is
+ /// shorter than the input buffer, then the entire IO vector has been read.
+ ///
+ /// This will never write uninitialized bytes to the provided buffer.
+ #[inline]
+ pub fn copy_from_iter_raw(&mut self, out: &mut [MaybeUninit<u8>]) -> &mut [u8] {
+ let capacity = out.len();
+ let out = out.as_mut_ptr().cast::<u8>();
+
+ // GUARANTEES: The C API guarantees that it does not write uninitialized bytes to the
+ // provided buffer.
+ // SAFETY:
+ // * By the type invariants, it is still valid to read from this IO vector.
+ // * `out` is valid for writing for `capacity` bytes because it comes from a slice of
+ // that length.
+ let len = unsafe { bindings::_copy_from_iter(out.cast(), capacity, self.as_raw()) };
+
+ // SAFETY: The underlying C api guarantees that initialized bytes have been written to the
+ // first `len` bytes of the spare capacity.
+ unsafe { slice::from_raw_parts_mut(out, len) }
+ }
+}
+
+/// An IO vector that acts as a destination for data.
+///
+/// IO vectors support many different types of destinations. This includes both buffers in
+/// kernel-space and writing to userspace. It's possible that the destination buffer is mapped in a
+/// thread-local manner using e.g. `kmap_local_page()`, so this type is not `Send` to ensure that
+/// the mapping is written to the right context in that scenario.
+///
+/// # Invariants
+///
+/// Must hold a valid `struct iov_iter` with `data_source` set to `ITER_DEST`. For the duration of
+/// `'data`, it must be safe to write to this IO vector using the standard C methods for this
+/// purpose.
+#[repr(transparent)]
+pub struct IovIterDest<'data> {
+ iov: Opaque<bindings::iov_iter>,
+ /// Represent to the type system that this value contains a pointer to writable data it does
+ /// not own.
+ _source: PhantomData<&'data mut [u8]>,
+}
+
+impl<'data> IovIterDest<'data> {
+ /// Obtain an `IovIterDest` from a raw pointer.
+ ///
+ /// # Safety
+ ///
+ /// * The referenced `struct iov_iter` must be valid and must only be accessed through the
+ /// returned reference for the duration of `'iov`.
+ /// * The referenced `struct iov_iter` must have `data_source` set to `ITER_DEST`.
+ /// * For the duration of `'data`, it must be safe to write to this IO vector using the
+ /// standard C methods for this purpose.
+ #[track_caller]
+ #[inline]
+ pub unsafe fn from_raw<'iov>(ptr: *mut bindings::iov_iter) -> &'iov mut IovIterDest<'data> {
+ // SAFETY: The caller ensures that `ptr` is valid.
+ let data_source = unsafe { (*ptr).data_source };
+ assert_eq!(data_source, ITER_DEST);
+
+ // SAFETY: The caller ensures the type invariants for the right durations, and
+ // `IovIterSource` is layout compatible with `struct iov_iter`.
+ unsafe { &mut *ptr.cast::<IovIterDest<'data>>() }
+ }
+
+ /// Access this as a raw `struct iov_iter`.
+ #[inline]
+ pub fn as_raw(&mut self) -> *mut bindings::iov_iter {
+ self.iov.get()
+ }
+
+ /// Returns the number of bytes available in this IO vector.
+ ///
+ /// Note that this may overestimate the number of bytes. For example, reading from userspace
+ /// memory could fail with EFAULT, which will be treated as the end of the IO vector.
+ #[inline]
+ pub fn len(&self) -> usize {
+ // SAFETY: We have shared access to this IO vector, so we can read its `count` field.
+ unsafe {
+ (*self.iov.get())
+ .__bindgen_anon_1
+ .__bindgen_anon_1
+ .as_ref()
+ .count
+ }
+ }
+
+ /// Returns whether there are any bytes left in this IO vector.
+ ///
+ /// This may return `true` even if there are no more bytes available. For example, reading from
+ /// userspace memory could fail with EFAULT, which will be treated as the end of the IO vector.
+ #[inline]
+ pub fn is_empty(&self) -> bool {
+ self.len() == 0
+ }
+
+ /// Advance this IO vector by `bytes` bytes.
+ ///
+ /// If `bytes` is larger than the size of this IO vector, it is advanced to the end.
+ #[inline]
+ pub fn advance(&mut self, bytes: usize) {
+ // SAFETY: By the type invariants, `self.iov` is a valid IO vector.
+ unsafe { bindings::iov_iter_advance(self.as_raw(), bytes) };
+ }
+
+ /// Advance this IO vector backwards by `bytes` bytes.
+ ///
+ /// # Safety
+ ///
+ /// The IO vector must not be reverted to before its beginning.
+ #[inline]
+ pub unsafe fn revert(&mut self, bytes: usize) {
+ // SAFETY: By the type invariants, `self.iov` is a valid IO vector, and the caller
+ // ensures that `bytes` is in bounds.
+ unsafe { bindings::iov_iter_revert(self.as_raw(), bytes) };
+ }
+
+ /// Write data to this IO vector.
+ ///
+ /// Returns the number of bytes that were written. If this is shorter than the provided slice,
+ /// then no more bytes can be written.
+ #[inline]
+ pub fn copy_to_iter(&mut self, input: &[u8]) -> usize {
+ // SAFETY:
+ // * By the type invariants, it is still valid to write to this IO vector.
+ // * `input` is valid for `input.len()` bytes.
+ unsafe { bindings::_copy_to_iter(input.as_ptr().cast(), input.len(), self.as_raw()) }
+ }
+
+ /// Utility for implementing `read_iter` given the full contents of the file.
+ ///
+ /// The full contents of the file being read from is represented by `contents`. This call will
+ /// write the appropriate sub-slice of `contents` and update the file position in `ppos` so
+ /// that the file will appear to contain `contents` even if takes multiple reads to read the
+ /// entire file.
+ #[inline]
+ pub fn simple_read_from_buffer(&mut self, ppos: &mut i64, contents: &[u8]) -> Result<usize> {
+ if *ppos < 0 {
+ return Err(EINVAL);
+ }
+ let Ok(pos) = usize::try_from(*ppos) else {
+ return Ok(0);
+ };
+ if pos >= contents.len() {
+ return Ok(0);
+ }
+
+ // BOUNDS: We just checked that `pos < contents.len()` above.
+ let num_written = self.copy_to_iter(&contents[pos..]);
+
+ // OVERFLOW: `pos+num_written <= contents.len() <= isize::MAX <= i64::MAX`.
+ *ppos = (pos + num_written) as i64;
+
+ Ok(num_written)
+ }
+}
diff --git a/rust/kernel/lib.rs b/rust/kernel/lib.rs
index ed53169e795c..99dbb7b2812e 100644
--- a/rust/kernel/lib.rs
+++ b/rust/kernel/lib.rs
@@ -92,6 +92,7 @@ pub mod fs;
pub mod init;
pub mod io;
pub mod ioctl;
+pub mod iov;
pub mod jump_label;
#[cfg(CONFIG_KUNIT)]
pub mod kunit;
diff --git a/rust/kernel/miscdevice.rs b/rust/kernel/miscdevice.rs
index 6373fe183b27..35630fc63875 100644
--- a/rust/kernel/miscdevice.rs
+++ b/rust/kernel/miscdevice.rs
@@ -13,7 +13,8 @@ use crate::{
device::Device,
error::{to_result, Error, Result, VTABLE_DEFAULT_ERROR},
ffi::{c_int, c_long, c_uint, c_ulong},
- fs::File,
+ fs::{File, Kiocb},
+ iov::{IovIterDest, IovIterSource},
mm::virt::VmaNew,
prelude::*,
seq_file::SeqFile,
@@ -141,6 +142,16 @@ pub trait MiscDevice: Sized {
build_error!(VTABLE_DEFAULT_ERROR)
}
+ /// Read from this miscdevice.
+ fn read_iter(_kiocb: Kiocb<'_, Self::Ptr>, _iov: &mut IovIterDest<'_>) -> Result<usize> {
+ build_error!(VTABLE_DEFAULT_ERROR)
+ }
+
+ /// Write to this miscdevice.
+ fn write_iter(_kiocb: Kiocb<'_, Self::Ptr>, _iov: &mut IovIterSource<'_>) -> Result<usize> {
+ build_error!(VTABLE_DEFAULT_ERROR)
+ }
+
/// Handler for ioctls.
///
/// The `cmd` argument is usually manipulated using the utilities in [`kernel::ioctl`].
@@ -247,6 +258,46 @@ impl<T: MiscDevice> MiscdeviceVTable<T> {
/// # Safety
///
+ /// `kiocb` must be correspond to a valid file that is associated with a
+ /// `MiscDeviceRegistration<T>`. `iter` must be a valid `struct iov_iter` for writing.
+ unsafe extern "C" fn read_iter(
+ kiocb: *mut bindings::kiocb,
+ iter: *mut bindings::iov_iter,
+ ) -> isize {
+ // SAFETY: The caller provides a valid `struct kiocb` associated with a
+ // `MiscDeviceRegistration<T>` file.
+ let kiocb = unsafe { Kiocb::from_raw(kiocb) };
+ // SAFETY: This is a valid `struct iov_iter` for writing.
+ let iov = unsafe { IovIterDest::from_raw(iter) };
+
+ match T::read_iter(kiocb, iov) {
+ Ok(res) => res as isize,
+ Err(err) => err.to_errno() as isize,
+ }
+ }
+
+ /// # Safety
+ ///
+ /// `kiocb` must be correspond to a valid file that is associated with a
+ /// `MiscDeviceRegistration<T>`. `iter` must be a valid `struct iov_iter` for writing.
+ unsafe extern "C" fn write_iter(
+ kiocb: *mut bindings::kiocb,
+ iter: *mut bindings::iov_iter,
+ ) -> isize {
+ // SAFETY: The caller provides a valid `struct kiocb` associated with a
+ // `MiscDeviceRegistration<T>` file.
+ let kiocb = unsafe { Kiocb::from_raw(kiocb) };
+ // SAFETY: This is a valid `struct iov_iter` for reading.
+ let iov = unsafe { IovIterSource::from_raw(iter) };
+
+ match T::write_iter(kiocb, iov) {
+ Ok(res) => res as isize,
+ Err(err) => err.to_errno() as isize,
+ }
+ }
+
+ /// # Safety
+ ///
/// `file` must be a valid file that is associated with a `MiscDeviceRegistration<T>`.
/// `vma` must be a vma that is currently being mmap'ed with this file.
unsafe extern "C" fn mmap(
@@ -341,6 +392,16 @@ impl<T: MiscDevice> MiscdeviceVTable<T> {
open: Some(Self::open),
release: Some(Self::release),
mmap: if T::HAS_MMAP { Some(Self::mmap) } else { None },
+ read_iter: if T::HAS_READ_ITER {
+ Some(Self::read_iter)
+ } else {
+ None
+ },
+ write_iter: if T::HAS_WRITE_ITER {
+ Some(Self::write_iter)
+ } else {
+ None
+ },
unlocked_ioctl: if T::HAS_IOCTL {
Some(Self::ioctl)
} else {
diff --git a/rust/kernel/page.rs b/rust/kernel/page.rs
index 7c1b17246ed5..811fe30e8e6f 100644
--- a/rust/kernel/page.rs
+++ b/rust/kernel/page.rs
@@ -85,6 +85,12 @@ impl Page {
self.page.as_ptr()
}
+ /// Get the node id containing this page.
+ pub fn nid(&self) -> i32 {
+ // SAFETY: Always safe to call with a valid page.
+ unsafe { bindings::page_to_nid(self.as_ptr()) }
+ }
+
/// Runs a piece of code with this page mapped to an address.
///
/// The page is unmapped when this call returns.
diff --git a/rust/kernel/security.rs b/rust/kernel/security.rs
index 0c63e9e7e564..9d271695265f 100644
--- a/rust/kernel/security.rs
+++ b/rust/kernel/security.rs
@@ -8,9 +8,46 @@
use crate::{
bindings,
+ cred::Credential,
error::{to_result, Result},
+ fs::File,
};
+/// Calls the security modules to determine if the given task can become the manager of a binder
+/// context.
+#[inline]
+pub fn binder_set_context_mgr(mgr: &Credential) -> Result {
+ // SAFETY: `mrg.0` is valid because the shared reference guarantees a nonzero refcount.
+ to_result(unsafe { bindings::security_binder_set_context_mgr(mgr.as_ptr()) })
+}
+
+/// Calls the security modules to determine if binder transactions are allowed from task `from` to
+/// task `to`.
+#[inline]
+pub fn binder_transaction(from: &Credential, to: &Credential) -> Result {
+ // SAFETY: `from` and `to` are valid because the shared references guarantee nonzero refcounts.
+ to_result(unsafe { bindings::security_binder_transaction(from.as_ptr(), to.as_ptr()) })
+}
+
+/// Calls the security modules to determine if task `from` is allowed to send binder objects
+/// (owned by itself or other processes) to task `to` through a binder transaction.
+#[inline]
+pub fn binder_transfer_binder(from: &Credential, to: &Credential) -> Result {
+ // SAFETY: `from` and `to` are valid because the shared references guarantee nonzero refcounts.
+ to_result(unsafe { bindings::security_binder_transfer_binder(from.as_ptr(), to.as_ptr()) })
+}
+
+/// Calls the security modules to determine if task `from` is allowed to send the given file to
+/// task `to` (which would get its own file descriptor) through a binder transaction.
+#[inline]
+pub fn binder_transfer_file(from: &Credential, to: &Credential, file: &File) -> Result {
+ // SAFETY: `from`, `to` and `file` are valid because the shared references guarantee nonzero
+ // refcounts.
+ to_result(unsafe {
+ bindings::security_binder_transfer_file(from.as_ptr(), to.as_ptr(), file.as_ptr())
+ })
+}
+
/// A security context string.
///
/// # Invariants
diff --git a/rust/uapi/uapi_helper.h b/rust/uapi/uapi_helper.h
index 1409441359f5..de3562b08d0c 100644
--- a/rust/uapi/uapi_helper.h
+++ b/rust/uapi/uapi_helper.h
@@ -9,6 +9,7 @@
#include <uapi/asm-generic/ioctl.h>
#include <uapi/drm/drm.h>
#include <uapi/drm/nova_drm.h>
+#include <uapi/linux/android/binder.h>
#include <uapi/linux/mdio.h>
#include <uapi/linux/mii.h>
#include <uapi/linux/ethtool.h>
diff --git a/samples/rust/rust_misc_device.rs b/samples/rust/rust_misc_device.rs
index e7ab77448f75..d69bc33dbd99 100644
--- a/samples/rust/rust_misc_device.rs
+++ b/samples/rust/rust_misc_device.rs
@@ -100,13 +100,13 @@ use core::pin::Pin;
use kernel::{
c_str,
device::Device,
- fs::File,
+ fs::{File, Kiocb},
ioctl::{_IO, _IOC_SIZE, _IOR, _IOW},
+ iov::{IovIterDest, IovIterSource},
miscdevice::{MiscDevice, MiscDeviceOptions, MiscDeviceRegistration},
new_mutex,
prelude::*,
- sync::Mutex,
- types::ARef,
+ sync::{aref::ARef, Mutex},
uaccess::{UserSlice, UserSliceReader, UserSliceWriter},
};
@@ -144,6 +144,7 @@ impl kernel::InPlaceModule for RustMiscDeviceModule {
struct Inner {
value: i32,
+ buffer: KVVec<u8>,
}
#[pin_data(PinnedDrop)]
@@ -165,7 +166,10 @@ impl MiscDevice for RustMiscDevice {
KBox::try_pin_init(
try_pin_init! {
RustMiscDevice {
- inner <- new_mutex!( Inner{ value: 0_i32 } ),
+ inner <- new_mutex!(Inner {
+ value: 0_i32,
+ buffer: KVVec::new(),
+ }),
dev: dev,
}
},
@@ -173,6 +177,33 @@ impl MiscDevice for RustMiscDevice {
)
}
+ fn read_iter(mut kiocb: Kiocb<'_, Self::Ptr>, iov: &mut IovIterDest<'_>) -> Result<usize> {
+ let me = kiocb.file();
+ dev_info!(me.dev, "Reading from Rust Misc Device Sample\n");
+
+ let inner = me.inner.lock();
+ // Read the buffer contents, taking the file position into account.
+ let read = iov.simple_read_from_buffer(kiocb.ki_pos_mut(), &inner.buffer)?;
+
+ Ok(read)
+ }
+
+ fn write_iter(mut kiocb: Kiocb<'_, Self::Ptr>, iov: &mut IovIterSource<'_>) -> Result<usize> {
+ let me = kiocb.file();
+ dev_info!(me.dev, "Writing to Rust Misc Device Sample\n");
+
+ let mut inner = me.inner.lock();
+
+ // Replace buffer contents.
+ inner.buffer.clear();
+ let len = iov.copy_from_iter_vec(&mut inner.buffer, GFP_KERNEL)?;
+
+ // Set position to zero so that future `read` calls will see the new contents.
+ *kiocb.ki_pos_mut() = 0;
+
+ Ok(len)
+ }
+
fn ioctl(me: Pin<&RustMiscDevice>, _file: &File, cmd: u32, arg: usize) -> Result<isize> {
dev_info!(me.dev, "IOCTLing Rust Misc Device Sample\n");
diff --git a/tools/testing/selftests/filesystems/binderfs/binderfs_test.c b/tools/testing/selftests/filesystems/binderfs/binderfs_test.c
index 81db85a5cc16..39a68078a79b 100644
--- a/tools/testing/selftests/filesystems/binderfs/binderfs_test.c
+++ b/tools/testing/selftests/filesystems/binderfs/binderfs_test.c
@@ -65,6 +65,7 @@ static int __do_binderfs_test(struct __test_metadata *_metadata)
"oneway_spam_detection",
"extended_error",
"freeze_notification",
+ "transaction_report",
};
change_mountns(_metadata);