diff options
248 files changed, 10321 insertions, 3298 deletions
diff --git a/Documentation/ABI/testing/sysfs-class-platform-profile b/Documentation/ABI/testing/sysfs-class-platform-profile index dc72adfb830a..fcab26894ec3 100644 --- a/Documentation/ABI/testing/sysfs-class-platform-profile +++ b/Documentation/ABI/testing/sysfs-class-platform-profile @@ -23,6 +23,8 @@ Description: This file contains a space-separated list of profiles supported power consumption with a slight bias towards performance performance High performance operation + max-power Higher performance operation that may exceed + internal battery draw limits when on AC power custom Driver defined custom profile ==================== ======================================== diff --git a/Documentation/ABI/testing/sysfs-driver-uniwill-laptop b/Documentation/ABI/testing/sysfs-driver-uniwill-laptop new file mode 100644 index 000000000000..eaeb659793d2 --- /dev/null +++ b/Documentation/ABI/testing/sysfs-driver-uniwill-laptop @@ -0,0 +1,53 @@ +What: /sys/bus/platform/devices/INOU0000:XX/fn_lock_toggle_enable +Date: November 2025 +KernelVersion: 6.19 +Contact: Armin Wolf <W_Armin@gmx.de> +Description: + Allows userspace applications to enable/disable the FN lock feature + of the integrated keyboard by writing "1"/"0" into this file. + + Reading this file returns the current enable status of the FN lock functionality. + +What: /sys/bus/platform/devices/INOU0000:XX/super_key_toggle_enable +Date: November 2025 +KernelVersion: 6.19 +Contact: Armin Wolf <W_Armin@gmx.de> +Description: + Allows userspace applications to enable/disable the super key functionality + of the integrated keyboard by writing "1"/"0" into this file. + + Reading this file returns the current enable status of the super key functionality. + +What: /sys/bus/platform/devices/INOU0000:XX/touchpad_toggle_enable +Date: November 2025 +KernelVersion: 6.19 +Contact: Armin Wolf <W_Armin@gmx.de> +Description: + Allows userspace applications to enable/disable the touchpad toggle functionality + of the integrated touchpad by writing "1"/"0" into this file. + + Reading this file returns the current enable status of the touchpad toggle + functionality. + +What: /sys/bus/platform/devices/INOU0000:XX/rainbow_animation +Date: November 2025 +KernelVersion: 6.19 +Contact: Armin Wolf <W_Armin@gmx.de> +Description: + Forces the integrated lightbar to display a rainbow animation when the machine + is not suspended. Writing "1"/"0" into this file enables/disables this + functionality. + + Reading this file returns the current status of the rainbow animation functionality. + +What: /sys/bus/platform/devices/INOU0000:XX/breathing_in_suspend +Date: November 2025 +KernelVersion: 6.19 +Contact: Armin Wolf <W_Armin@gmx.de> +Description: + Causes the integrated lightbar to display a breathing animation when the machine + has been suspended and is running on AC power. Writing "1"/"0" into this file + enables/disables this functionality. + + Reading this file returns the current status of the breathing animation + functionality. diff --git a/Documentation/ABI/testing/sysfs-platform-asus-wmi b/Documentation/ABI/testing/sysfs-platform-asus-wmi index 28144371a0f1..89acb6638df8 100644 --- a/Documentation/ABI/testing/sysfs-platform-asus-wmi +++ b/Documentation/ABI/testing/sysfs-platform-asus-wmi @@ -63,6 +63,7 @@ Date: Aug 2022 KernelVersion: 6.1 Contact: "Luke Jones" <luke@ljones.dev> Description: + DEPRECATED, WILL BE REMOVED SOON: please use asus-armoury Switch the GPU hardware MUX mode. Laptops with this feature can can be toggled to boot with only the dGPU (discrete mode) or in standard Optimus/Hybrid mode. On switch a reboot is required: @@ -75,6 +76,7 @@ Date: Aug 2022 KernelVersion: 5.17 Contact: "Luke Jones" <luke@ljones.dev> Description: + DEPRECATED, WILL BE REMOVED SOON: please use asus-armoury Disable discrete GPU: * 0 - Enable dGPU, * 1 - Disable dGPU @@ -84,6 +86,7 @@ Date: Aug 2022 KernelVersion: 5.17 Contact: "Luke Jones" <luke@ljones.dev> Description: + DEPRECATED, WILL BE REMOVED SOON: please use asus-armoury Enable the external GPU paired with ROG X-Flow laptops. Toggling this setting will also trigger ACPI to disable the dGPU: @@ -95,6 +98,7 @@ Date: Aug 2022 KernelVersion: 5.17 Contact: "Luke Jones" <luke@ljones.dev> Description: + DEPRECATED, WILL BE REMOVED SOON: please use asus-armoury Enable an LCD response-time boost to reduce or remove ghosting: * 0 - Disable, * 1 - Enable @@ -104,6 +108,7 @@ Date: Jun 2023 KernelVersion: 6.5 Contact: "Luke Jones" <luke@ljones.dev> Description: + DEPRECATED, WILL BE REMOVED SOON: please use asus-armoury Get the current charging mode being used: * 1 - Barrel connected charger, * 2 - USB-C charging @@ -114,6 +119,7 @@ Date: Jun 2023 KernelVersion: 6.5 Contact: "Luke Jones" <luke@ljones.dev> Description: + DEPRECATED, WILL BE REMOVED SOON: please use asus-armoury Show if the egpu (XG Mobile) is correctly connected: * 0 - False, * 1 - True @@ -123,6 +129,7 @@ Date: Jun 2023 KernelVersion: 6.5 Contact: "Luke Jones" <luke@ljones.dev> Description: + DEPRECATED, WILL BE REMOVED SOON: please use asus-armoury Change the mini-LED mode: * 0 - Single-zone, * 1 - Multi-zone @@ -133,6 +140,7 @@ Date: Apr 2024 KernelVersion: 6.10 Contact: "Luke Jones" <luke@ljones.dev> Description: + DEPRECATED, WILL BE REMOVED SOON: please use asus-armoury List the available mini-led modes. What: /sys/devices/platform/<platform>/ppt_pl1_spl @@ -140,6 +148,7 @@ Date: Jun 2023 KernelVersion: 6.5 Contact: "Luke Jones" <luke@ljones.dev> Description: + DEPRECATED, WILL BE REMOVED SOON: please use asus-armoury Set the Package Power Target total of CPU: PL1 on Intel, SPL on AMD. Shown on Intel+Nvidia or AMD+Nvidia based systems: @@ -150,6 +159,7 @@ Date: Jun 2023 KernelVersion: 6.5 Contact: "Luke Jones" <luke@ljones.dev> Description: + DEPRECATED, WILL BE REMOVED SOON: please use asus-armoury Set the Slow Package Power Tracking Limit of CPU: PL2 on Intel, SPPT, on AMD. Shown on Intel+Nvidia or AMD+Nvidia based systems: @@ -160,6 +170,7 @@ Date: Jun 2023 KernelVersion: 6.5 Contact: "Luke Jones" <luke@ljones.dev> Description: + DEPRECATED, WILL BE REMOVED SOON: please use asus-armoury Set the Fast Package Power Tracking Limit of CPU. AMD+Nvidia only: * min=5, max=250 @@ -168,6 +179,7 @@ Date: Jun 2023 KernelVersion: 6.5 Contact: "Luke Jones" <luke@ljones.dev> Description: + DEPRECATED, WILL BE REMOVED SOON: please use asus-armoury Set the APU SPPT limit. Shown on full AMD systems only: * min=5, max=130 @@ -176,6 +188,7 @@ Date: Jun 2023 KernelVersion: 6.5 Contact: "Luke Jones" <luke@ljones.dev> Description: + DEPRECATED, WILL BE REMOVED SOON: please use asus-armoury Set the platform SPPT limit. Shown on full AMD systems only: * min=5, max=130 @@ -184,6 +197,7 @@ Date: Jun 2023 KernelVersion: 6.5 Contact: "Luke Jones" <luke@ljones.dev> Description: + DEPRECATED, WILL BE REMOVED SOON: please use asus-armoury Set the dynamic boost limit of the Nvidia dGPU: * min=5, max=25 @@ -192,6 +206,7 @@ Date: Jun 2023 KernelVersion: 6.5 Contact: "Luke Jones" <luke@ljones.dev> Description: + DEPRECATED, WILL BE REMOVED SOON: please use asus-armoury Set the target temperature limit of the Nvidia dGPU: * min=75, max=87 @@ -200,6 +215,7 @@ Date: Apr 2024 KernelVersion: 6.10 Contact: "Luke Jones" <luke@ljones.dev> Description: + DEPRECATED, WILL BE REMOVED SOON: please use asus-armoury Set if the BIOS POST sound is played on boot. * 0 - False, * 1 - True @@ -209,6 +225,7 @@ Date: Apr 2024 KernelVersion: 6.10 Contact: "Luke Jones" <luke@ljones.dev> Description: + DEPRECATED, WILL BE REMOVED SOON: please use asus-armoury Set if the MCU can go in to low-power mode on system sleep * 0 - False, * 1 - True diff --git a/Documentation/ABI/testing/sysfs-platform-ayaneo-ec b/Documentation/ABI/testing/sysfs-platform-ayaneo-ec new file mode 100644 index 000000000000..4cffbf5fc7ca --- /dev/null +++ b/Documentation/ABI/testing/sysfs-platform-ayaneo-ec @@ -0,0 +1,19 @@ +What: /sys/devices/platform/ayaneo-ec/controller_power +Date: Nov 2025 +KernelVersion: 6.19 +Contact: "Antheas Kapenekakis" <lkml@antheas.dev> +Description: + Current controller power state. Allows turning on and off + the controller power (e.g. for power savings). Write 1 to + turn on, 0 to turn off. File is readable and writable. + +What: /sys/devices/platform/ayaneo-ec/controller_modules +Date: Nov 2025 +KernelVersion: 6.19 +Contact: "Antheas Kapenekakis" <lkml@antheas.dev> +Description: + Shows which controller modules are currently connected to + the device. Possible values are "left", "right" and "both". + File is read-only. The Windows software for this device + will only set controller power to 1 if both module sides + are connected (i.e. this file returns "both"). diff --git a/Documentation/admin-guide/device-mapper/dm-raid.rst b/Documentation/admin-guide/device-mapper/dm-raid.rst index bb17e26e3c1b..e11f10764770 100644 --- a/Documentation/admin-guide/device-mapper/dm-raid.rst +++ b/Documentation/admin-guide/device-mapper/dm-raid.rst @@ -20,10 +20,10 @@ The target is named "raid" and it accepts the following parameters:: raid0 RAID0 striping (no resilience) raid1 RAID1 mirroring raid4 RAID4 with dedicated last parity disk - raid5_n RAID5 with dedicated last parity disk supporting takeover + raid5_n RAID5 with dedicated last parity disk supporting takeover from/to raid1 Same as raid4 - - Transitory layout + - Transitory layout for takeover from/to raid1 raid5_la RAID5 left asymmetric - rotating parity 0 with data continuation @@ -48,8 +48,8 @@ The target is named "raid" and it accepts the following parameters:: raid6_n_6 RAID6 with dedicate parity disks - parity and Q-syndrome on the last 2 disks; - layout for takeover from/to raid4/raid5_n - raid6_la_6 Same as "raid_la" plus dedicated last Q-syndrome disk + layout for takeover from/to raid0/raid4/raid5_n + raid6_la_6 Same as "raid_la" plus dedicated last Q-syndrome disk supporting takeover from/to raid5 - layout for takeover from raid5_la from/to raid6 raid6_ra_6 Same as "raid5_ra" dedicated last Q-syndrome disk @@ -173,9 +173,9 @@ The target is named "raid" and it accepts the following parameters:: The delta_disks option value (-251 < N < +251) triggers device removal (negative value) or device addition (positive value) to any reshape supporting raid levels 4/5/6 and 10. - RAID levels 4/5/6 allow for addition of devices (metadata - and data device tuple), raid10_near and raid10_offset only - allow for device addition. raid10_far does not support any + RAID levels 4/5/6 allow for addition and removal of devices + (metadata and data device tuple), raid10_near and raid10_offset + only allow for device addition. raid10_far does not support any reshaping at all. A minimum of devices have to be kept to enforce resilience, which is 3 devices for raid4/5 and 4 devices for raid6. @@ -372,6 +372,72 @@ to safely enable discard support for RAID 4/5/6: 'devices_handle_discards_safely' +Takeover/Reshape Support +------------------------ +The target natively supports these two types of MDRAID conversions: + +o Takeover: Converts an array from one RAID level to another + +o Reshape: Changes the internal layout while maintaining the current RAID level + +Each operation is only valid under specific constraints imposed by the existing array's layout and configuration. + + +Takeover: +linear -> raid1 with N >= 2 mirrors +raid0 -> raid4 (add dedicated parity device) +raid0 -> raid5 (add dedicated parity device) +raid0 -> raid10 with near layout and N >= 2 mirror groups (raid0 stripes have to become first member within mirror groups) +raid1 -> linear +raid1 -> raid5 with 2 mirrors +raid4 -> raid5 w/ rotating parity +raid5 with dedicated parity device -> raid4 +raid5 -> raid6 (with dedicated Q-syndrome) +raid6 (with dedicated Q-syndrome) -> raid5 +raid10 with near layout and even number of disks -> raid0 (select any in-sync device from each mirror group) + +Reshape: +linear: not possible +raid0: not possible +raid1: change number of mirrors +raid4: add and remove stripes (minimum 3), change stripesize +raid5: add and remove stripes (minimum 3, special case 2 for raid1 takeover), change rotating parity algorithms, change stripesize +raid6: add and remove stripes (minimum 4), change rotating syndrome algorithms, change stripesize +raid10 near: add stripes (minimum 4), change stripesize, no stripe removal possible, change to offset layout +raid10 offset: add stripes, change stripesize, no stripe removal possible, change to near layout +raid10 far: not possible + +Table line examples: + +### raid1 -> raid5 +# +# 2 devices limitation in raid1. +# raid5 personality is able to just map 2 like raid1. +# Reshape after takeover to change to full raid5 layout + + 0 1960886272 raid raid1 3 0 region_size 2048 2 /dev/dm-0 /dev/dm-1 /dev/dm-2 /dev/dm-3 + +# dm-0 and dm-2 are e.g. 4MiB large metadata devices, dm-1 and dm-3 have to be at least 1960886272 big. +# +# Table line to takeover to raid5 + + 0 1960886272 raid raid5 3 0 region_size 2048 2 /dev/dm-0 /dev/dm-1 /dev/dm-2 /dev/dm-3 + +# Add required out-of-place reshape space to the beginniong of the given 2 data devices, +# allocate another metadata/data device tuple with the same sizes for the parity space +# and zero the first 4K of the metadata device. +# +# Example table of the out-of-place reshape space addition for one data device, e.g. dm-1 + + 0 8192 linear 8:0 0 1960903888 # <- must be free space segment + 8192 1960886272 linear 8:0 0 2048 # previous data segment + +# Mapping table for e.g. raid5_rs reshape causing the size of the raid device to double-fold once the reshape finishes. +# Check the status output (e.g. "dmsetup status $RaidDev") for progess. + + 0 $((2 * 1960886272)) raid raid5 7 0 region_size 2048 data_offset 8192 delta_disk 1 2 /dev/dm-0 /dev/dm-1 /dev/dm-2 /dev/dm-3 + + Version History --------------- diff --git a/Documentation/admin-guide/device-mapper/verity.rst b/Documentation/admin-guide/device-mapper/verity.rst index 8c3f1f967a3c..3ecab1cff9c6 100644 --- a/Documentation/admin-guide/device-mapper/verity.rst +++ b/Documentation/admin-guide/device-mapper/verity.rst @@ -236,8 +236,10 @@ is available at the cryptsetup project's wiki page Status ====== -V (for Valid) is returned if every check performed so far was valid. -If any check failed, C (for Corruption) is returned. +1. V (for Valid) is returned if every check performed so far was valid. + If any check failed, C (for Corruption) is returned. +2. Number of corrected blocks by Forward Error Correction. + '-' if Forward Error Correction is not enabled. Example ======= diff --git a/Documentation/admin-guide/laptops/index.rst b/Documentation/admin-guide/laptops/index.rst index db842b629303..6432c251dc95 100644 --- a/Documentation/admin-guide/laptops/index.rst +++ b/Documentation/admin-guide/laptops/index.rst @@ -17,3 +17,4 @@ Laptop Drivers sonypi thinkpad-acpi toshiba_haps + uniwill-laptop diff --git a/Documentation/admin-guide/laptops/uniwill-laptop.rst b/Documentation/admin-guide/laptops/uniwill-laptop.rst new file mode 100644 index 000000000000..a16baf15516b --- /dev/null +++ b/Documentation/admin-guide/laptops/uniwill-laptop.rst @@ -0,0 +1,60 @@ +.. SPDX-License-Identifier: GPL-2.0+ + +Uniwill laptop extra features +============================= + +On laptops manufactured by Uniwill (either directly or as ODM), the ``uniwill-laptop`` driver +handles various platform-specific features. + +Module Loading +-------------- + +The ``uniwill-laptop`` driver relies on a DMI table to automatically load on supported devices. +When using the ``force`` module parameter, this DMI check will be omitted, allowing the driver +to be loaded on unsupported devices for testing purposes. + +Hotkeys +------- + +Usually the FN keys work without a special driver. However as soon as the ``uniwill-laptop`` driver +is loaded, the FN keys need to be handled manually. This is done automatically by the driver itself. + +Keyboard settings +----------------- + +The ``uniwill-laptop`` driver allows the user to enable/disable: + + - the FN and super key lock functionality of the integrated keyboard + - the touchpad toggle functionality of the integrated touchpad + +See Documentation/ABI/testing/sysfs-driver-uniwill-laptop for details. + +Hwmon interface +--------------- + +The ``uniwill-laptop`` driver supports reading of the CPU and GPU temperature and supports up to +two fans. Userspace applications can access sensor readings over the hwmon sysfs interface. + +Platform profile +---------------- + +Support for changing the platform performance mode is currently not implemented. + +Battery Charging Control +------------------------ + +The ``uniwill-laptop`` driver supports controlling the battery charge limit. This happens over +the standard ``charge_control_end_threshold`` power supply sysfs attribute. All values +between 1 and 100 percent are supported. + +Additionally the driver signals the presence of battery charging issues through the standard +``health`` power supply sysfs attribute. + +Lightbar +-------- + +The ``uniwill-laptop`` driver exposes the lightbar found on some models as a standard multicolor +LED class device. The default name of this LED class device is ``uniwill:multicolor:status``. + +See Documentation/ABI/testing/sysfs-driver-uniwill-laptop for details on how to control the various +animation modes of the lightbar. diff --git a/Documentation/devicetree/bindings/eeprom/at24.yaml b/Documentation/devicetree/bindings/eeprom/at24.yaml index 50af7ccf6e21..c21282634780 100644 --- a/Documentation/devicetree/bindings/eeprom/at24.yaml +++ b/Documentation/devicetree/bindings/eeprom/at24.yaml @@ -131,6 +131,7 @@ properties: - const: atmel,24c32 - items: - enum: + - belling,bl24s64 - onnn,n24s64b - puya,p24c64f - const: atmel,24c64 diff --git a/Documentation/devicetree/bindings/i2c/i2c-rk3x.yaml b/Documentation/devicetree/bindings/i2c/i2c-rk3x.yaml index 4ac5a40a3886..91805fe8f393 100644 --- a/Documentation/devicetree/bindings/i2c/i2c-rk3x.yaml +++ b/Documentation/devicetree/bindings/i2c/i2c-rk3x.yaml @@ -37,6 +37,7 @@ properties: - rockchip,px30-i2c - rockchip,rk3308-i2c - rockchip,rk3328-i2c + - rockchip,rk3506-i2c - rockchip,rk3528-i2c - rockchip,rk3562-i2c - rockchip,rk3568-i2c diff --git a/Documentation/devicetree/bindings/i2c/qcom,i2c-cci.yaml b/Documentation/devicetree/bindings/i2c/qcom,i2c-cci.yaml index 9bc99d736343..33852a5ffca8 100644 --- a/Documentation/devicetree/bindings/i2c/qcom,i2c-cci.yaml +++ b/Documentation/devicetree/bindings/i2c/qcom,i2c-cci.yaml @@ -15,6 +15,7 @@ properties: oneOf: - enum: - qcom,msm8226-cci + - qcom,msm8953-cci - qcom,msm8974-cci - qcom,msm8996-cci @@ -25,6 +26,7 @@ properties: - items: - enum: + - qcom,kaanapali-cci - qcom,qcm2290-cci - qcom,sa8775p-cci - qcom,sc7280-cci @@ -128,6 +130,7 @@ allOf: compatible: contains: enum: + - qcom,kaanapali-cci - qcom,qcm2290-cci then: properties: @@ -146,6 +149,7 @@ allOf: - contains: enum: - qcom,msm8916-cci + - qcom,msm8953-cci - const: qcom,msm8996-cci then: diff --git a/Documentation/devicetree/bindings/input/ti,twl4030-keypad.yaml b/Documentation/devicetree/bindings/input/ti,twl4030-keypad.yaml new file mode 100644 index 000000000000..c69aa7f5cca7 --- /dev/null +++ b/Documentation/devicetree/bindings/input/ti,twl4030-keypad.yaml @@ -0,0 +1,59 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/input/ti,twl4030-keypad.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Texas Instruments TWL4030-family Keypad Controller + +maintainers: + - Peter Ujfalusi <peter.ujfalusi@gmail.com> + +description: + TWL4030's Keypad controller is used to interface a SoC with a matrix-type + keypad device. The keypad controller supports multiple row and column lines. + A key can be placed at each intersection of a unique row and a unique column. + The keypad controller can sense a key-press and key-release and report the + event using a interrupt to the cpu. + +allOf: + - $ref: matrix-keymap.yaml# + +properties: + compatible: + const: ti,twl4030-keypad + + interrupts: + maxItems: 1 + +required: + - compatible + - interrupts + - keypad,num-rows + - keypad,num-columns + - linux,keymap + +unevaluatedProperties: false + +examples: + - | + #include <dt-bindings/input/input.h> + + keypad { + compatible = "ti,twl4030-keypad"; + interrupts = <1>; + keypad,num-rows = <8>; + keypad,num-columns = <8>; + linux,keymap = < + /* row 0 */ + MATRIX_KEY(0, 0, KEY_1) + MATRIX_KEY(0, 1, KEY_2) + MATRIX_KEY(0, 2, KEY_3) + + /* ...and so on for a full 8x8 matrix... */ + + /* row 7 */ + MATRIX_KEY(7, 6, KEY_Y) + MATRIX_KEY(7, 7, KEY_Z) + >; + }; diff --git a/Documentation/devicetree/bindings/input/touchscreen/ar1021.txt b/Documentation/devicetree/bindings/input/touchscreen/ar1021.txt deleted file mode 100644 index 82019bd6094e..000000000000 --- a/Documentation/devicetree/bindings/input/touchscreen/ar1021.txt +++ /dev/null @@ -1,15 +0,0 @@ -* Microchip AR1020 and AR1021 touchscreen interface (I2C) - -Required properties: -- compatible : "microchip,ar1021-i2c" -- reg : I2C slave address -- interrupts : touch controller interrupt - -Example: - - touchscreen@4d { - compatible = "microchip,ar1021-i2c"; - reg = <0x4d>; - interrupt-parent = <&gpio3>; - interrupts = <11 IRQ_TYPE_LEVEL_HIGH>; - }; diff --git a/Documentation/devicetree/bindings/input/touchscreen/azoteq,iqs5xx.yaml b/Documentation/devicetree/bindings/input/touchscreen/azoteq,iqs5xx.yaml deleted file mode 100644 index b5f377215c09..000000000000 --- a/Documentation/devicetree/bindings/input/touchscreen/azoteq,iqs5xx.yaml +++ /dev/null @@ -1,75 +0,0 @@ -# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) -%YAML 1.2 ---- -$id: http://devicetree.org/schemas/input/touchscreen/azoteq,iqs5xx.yaml# -$schema: http://devicetree.org/meta-schemas/core.yaml# - -title: Azoteq IQS550/572/525 Trackpad/Touchscreen Controller - -maintainers: - - Jeff LaBundy <jeff@labundy.com> - -description: | - The Azoteq IQS550, IQS572 and IQS525 trackpad and touchscreen controllers - employ projected-capacitance sensing and can track up to five independent - contacts. - - Link to datasheet: https://www.azoteq.com/ - -allOf: - - $ref: touchscreen.yaml# - -properties: - compatible: - enum: - - azoteq,iqs550 - - azoteq,iqs572 - - azoteq,iqs525 - - reg: - maxItems: 1 - - interrupts: - maxItems: 1 - - reset-gpios: - maxItems: 1 - - wakeup-source: true - - touchscreen-size-x: true - touchscreen-size-y: true - touchscreen-inverted-x: true - touchscreen-inverted-y: true - touchscreen-swapped-x-y: true - -required: - - compatible - - reg - - interrupts - -additionalProperties: false - -examples: - - | - #include <dt-bindings/gpio/gpio.h> - #include <dt-bindings/interrupt-controller/irq.h> - - i2c { - #address-cells = <1>; - #size-cells = <0>; - - touchscreen@74 { - compatible = "azoteq,iqs550"; - reg = <0x74>; - interrupt-parent = <&gpio>; - interrupts = <27 IRQ_TYPE_LEVEL_HIGH>; - reset-gpios = <&gpio 22 (GPIO_ACTIVE_LOW | - GPIO_PUSH_PULL)>; - - touchscreen-size-x = <800>; - touchscreen-size-y = <480>; - }; - }; - -... diff --git a/Documentation/devicetree/bindings/input/touchscreen/himax,hx83112b.yaml b/Documentation/devicetree/bindings/input/touchscreen/himax,hx83112b.yaml deleted file mode 100644 index f5cfacb5e966..000000000000 --- a/Documentation/devicetree/bindings/input/touchscreen/himax,hx83112b.yaml +++ /dev/null @@ -1,64 +0,0 @@ -# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) -%YAML 1.2 ---- -$id: http://devicetree.org/schemas/input/touchscreen/himax,hx83112b.yaml# -$schema: http://devicetree.org/meta-schemas/core.yaml# - -title: Himax hx83112b touchscreen controller - -maintainers: - - Job Noorman <job@noorman.info> - -allOf: - - $ref: touchscreen.yaml# - -properties: - compatible: - enum: - - himax,hx83100a - - himax,hx83112b - - reg: - maxItems: 1 - - interrupts: - maxItems: 1 - - reset-gpios: - maxItems: 1 - - touchscreen-inverted-x: true - touchscreen-inverted-y: true - touchscreen-size-x: true - touchscreen-size-y: true - touchscreen-swapped-x-y: true - -additionalProperties: false - -required: - - compatible - - reg - - interrupts - - reset-gpios - - touchscreen-size-x - - touchscreen-size-y - -examples: - - | - #include <dt-bindings/interrupt-controller/irq.h> - #include <dt-bindings/gpio/gpio.h> - i2c { - #address-cells = <1>; - #size-cells = <0>; - touchscreen@48 { - compatible = "himax,hx83112b"; - reg = <0x48>; - interrupt-parent = <&tlmm>; - interrupts = <65 IRQ_TYPE_LEVEL_LOW>; - touchscreen-size-x = <1080>; - touchscreen-size-y = <2160>; - reset-gpios = <&tlmm 64 GPIO_ACTIVE_LOW>; - }; - }; - -... diff --git a/Documentation/devicetree/bindings/input/touchscreen/hynitron,cstxxx.yaml b/Documentation/devicetree/bindings/input/touchscreen/hynitron,cstxxx.yaml deleted file mode 100644 index 9cb5d4af00f7..000000000000 --- a/Documentation/devicetree/bindings/input/touchscreen/hynitron,cstxxx.yaml +++ /dev/null @@ -1,65 +0,0 @@ -# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) -%YAML 1.2 ---- -$id: http://devicetree.org/schemas/input/touchscreen/hynitron,cstxxx.yaml# -$schema: http://devicetree.org/meta-schemas/core.yaml# - -title: Hynitron cstxxx series touchscreen controller - -description: | - Bindings for Hynitron cstxxx series multi-touch touchscreen - controllers. - -maintainers: - - Chris Morgan <macromorgan@hotmail.com> - -allOf: - - $ref: touchscreen.yaml# - -properties: - compatible: - enum: - - hynitron,cst340 - - reg: - maxItems: 1 - - interrupts: - maxItems: 1 - - reset-gpios: - maxItems: 1 - - touchscreen-size-x: true - touchscreen-size-y: true - touchscreen-inverted-x: true - touchscreen-inverted-y: true - touchscreen-swapped-x-y: true - -additionalProperties: false - -required: - - compatible - - reg - - interrupts - - reset-gpios - -examples: - - | - #include <dt-bindings/gpio/gpio.h> - #include <dt-bindings/interrupt-controller/arm-gic.h> - i2c { - #address-cells = <1>; - #size-cells = <0>; - touchscreen@1a { - compatible = "hynitron,cst340"; - reg = <0x1a>; - interrupt-parent = <&gpio4>; - interrupts = <9 IRQ_TYPE_EDGE_FALLING>; - reset-gpios = <&gpio4 6 GPIO_ACTIVE_LOW>; - touchscreen-size-x = <640>; - touchscreen-size-y = <480>; - }; - }; - -... diff --git a/Documentation/devicetree/bindings/input/touchscreen/ilitek_ts_i2c.yaml b/Documentation/devicetree/bindings/input/touchscreen/ilitek_ts_i2c.yaml deleted file mode 100644 index 9f7328999756..000000000000 --- a/Documentation/devicetree/bindings/input/touchscreen/ilitek_ts_i2c.yaml +++ /dev/null @@ -1,76 +0,0 @@ -# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) -%YAML 1.2 ---- -$id: http://devicetree.org/schemas/input/touchscreen/ilitek_ts_i2c.yaml# -$schema: http://devicetree.org/meta-schemas/core.yaml# - -title: Ilitek I2C Touchscreen Controller - -maintainers: - - Dmitry Torokhov <dmitry.torokhov@gmail.com> - -allOf: - - $ref: touchscreen.yaml# - -properties: - compatible: - enum: - - ilitek,ili210x - - ilitek,ili2117 - - ilitek,ili2120 - - ilitek,ili2130 - - ilitek,ili2131 - - ilitek,ili2132 - - ilitek,ili2316 - - ilitek,ili2322 - - ilitek,ili2323 - - ilitek,ili2326 - - ilitek,ili251x - - ilitek,ili2520 - - ilitek,ili2521 - - reg: - maxItems: 1 - - interrupts: - maxItems: 1 - - reset-gpios: - maxItems: 1 - - wakeup-source: - type: boolean - description: touchscreen can be used as a wakeup source. - - touchscreen-size-x: true - touchscreen-size-y: true - touchscreen-inverted-x: true - touchscreen-inverted-y: true - touchscreen-swapped-x-y: true - -additionalProperties: false - -required: - - compatible - - reg - - interrupts - -examples: - - | - #include <dt-bindings/interrupt-controller/irq.h> - #include <dt-bindings/gpio/gpio.h> - i2c { - #address-cells = <1>; - #size-cells = <0>; - - touchscreen@41 { - compatible = "ilitek,ili2520"; - reg = <0x41>; - - interrupt-parent = <&gpio1>; - interrupts = <7 IRQ_TYPE_LEVEL_LOW>; - reset-gpios = <&gpio1 8 GPIO_ACTIVE_LOW>; - touchscreen-inverted-y; - wakeup-source; - }; - }; diff --git a/Documentation/devicetree/bindings/input/touchscreen/maxim,max11801.yaml b/Documentation/devicetree/bindings/input/touchscreen/maxim,max11801.yaml deleted file mode 100644 index 4f528d220199..000000000000 --- a/Documentation/devicetree/bindings/input/touchscreen/maxim,max11801.yaml +++ /dev/null @@ -1,46 +0,0 @@ -# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) -%YAML 1.2 ---- -$id: http://devicetree.org/schemas/input/touchscreen/maxim,max11801.yaml# -$schema: http://devicetree.org/meta-schemas/core.yaml# - -title: MAXI MAX11801 Resistive touch screen controller with i2c interface - -maintainers: - - Frank Li <Frank.Li@nxp.com> - -properties: - compatible: - const: maxim,max11801 - - reg: - maxItems: 1 - - interrupts: - maxItems: 1 - -allOf: - - $ref: touchscreen.yaml - -required: - - compatible - - reg - - interrupts - -unevaluatedProperties: false - -examples: - - | - #include <dt-bindings/interrupt-controller/irq.h> - - i2c { - #address-cells = <1>; - #size-cells = <0>; - - touchscreen@48 { - compatible = "maxim,max11801"; - reg = <0x48>; - interrupt-parent = <&gpio3>; - interrupts = <31 IRQ_TYPE_EDGE_FALLING>; - }; - }; diff --git a/Documentation/devicetree/bindings/input/touchscreen/melfas,mip4_ts.yaml b/Documentation/devicetree/bindings/input/touchscreen/melfas,mip4_ts.yaml new file mode 100644 index 000000000000..314be65c56ca --- /dev/null +++ b/Documentation/devicetree/bindings/input/touchscreen/melfas,mip4_ts.yaml @@ -0,0 +1,56 @@ +# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/input/touchscreen/melfas,mip4_ts.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: MELFAS MIP4 Touchscreen + +maintainers: + - Ariel D'Alessandro <ariel.dalessandro@collabora.com> + +properties: + compatible: + const: melfas,mip4_ts + + reg: + description: I2C address of the chip (0x48 or 0x34) + maxItems: 1 + + interrupts: + maxItems: 1 + + ce-gpios: + description: + GPIO connected to the CE (chip enable) pin of the chip (active high) + maxItems: 1 + +required: + - compatible + - reg + - interrupts + +additionalProperties: false + +examples: + - | + #include <dt-bindings/gpio/gpio.h> + #include <dt-bindings/interrupt-controller/irq.h> + + i2c { + #address-cells = <1>; + #size-cells = <0>; + + touchscreen@34 { + compatible = "melfas,mip4_ts"; + reg = <0x34>; + + interrupts-extended = <&tlmm 13 IRQ_TYPE_EDGE_FALLING>; + ce-gpios = <&tlmm 12 GPIO_ACTIVE_HIGH>; + + pinctrl-0 = <&touchscreen_default>; + pinctrl-names = "default"; + }; + }; + +... diff --git a/Documentation/devicetree/bindings/input/touchscreen/melfas_mip4.txt b/Documentation/devicetree/bindings/input/touchscreen/melfas_mip4.txt deleted file mode 100644 index b2ab5498e519..000000000000 --- a/Documentation/devicetree/bindings/input/touchscreen/melfas_mip4.txt +++ /dev/null @@ -1,20 +0,0 @@ -* MELFAS MIP4 Touchscreen - -Required properties: -- compatible: must be "melfas,mip4_ts" -- reg: I2C slave address of the chip (0x48 or 0x34) -- interrupts: interrupt to which the chip is connected - -Optional properties: -- ce-gpios: GPIO connected to the CE (chip enable) pin of the chip - -Example: - i2c@00000000 { - touchscreen: melfas_mip4@48 { - compatible = "melfas,mip4_ts"; - reg = <0x48>; - interrupt-parent = <&gpio>; - interrupts = <0 IRQ_TYPE_EDGE_FALLING>; - ce-gpios = <&gpio 0 GPIO_ACTIVE_HIGH>; - }; - }; diff --git a/Documentation/devicetree/bindings/input/touchscreen/semtech,sx8654.yaml b/Documentation/devicetree/bindings/input/touchscreen/semtech,sx8654.yaml deleted file mode 100644 index b2554064b688..000000000000 --- a/Documentation/devicetree/bindings/input/touchscreen/semtech,sx8654.yaml +++ /dev/null @@ -1,52 +0,0 @@ -# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) -%YAML 1.2 ---- -$id: http://devicetree.org/schemas/input/touchscreen/semtech,sx8654.yaml# -$schema: http://devicetree.org/meta-schemas/core.yaml# - -title: Semtech SX8654 I2C Touchscreen Controller - -maintainers: - - Frank Li <Frank.Li@nxp.com> - -properties: - compatible: - enum: - - semtech,sx8650 - - semtech,sx8654 - - semtech,sx8655 - - semtech,sx8656 - - reg: - maxItems: 1 - - interrupts: - maxItems: 1 - - reset-gpios: - maxItems: 1 - -required: - - compatible - - reg - - interrupts - -additionalProperties: false - -examples: - - | - #include <dt-bindings/gpio/gpio.h> - #include <dt-bindings/interrupt-controller/irq.h> - - i2c { - #address-cells = <1>; - #size-cells = <0>; - - touchscreen@48 { - compatible = "semtech,sx8654"; - reg = <0x48>; - interrupt-parent = <&gpio6>; - interrupts = <3 IRQ_TYPE_EDGE_FALLING>; - reset-gpios = <&gpio4 2 GPIO_ACTIVE_LOW>; - }; - }; diff --git a/Documentation/devicetree/bindings/input/touchscreen/trivial-touch.yaml b/Documentation/devicetree/bindings/input/touchscreen/trivial-touch.yaml new file mode 100644 index 000000000000..fa27c6754ca4 --- /dev/null +++ b/Documentation/devicetree/bindings/input/touchscreen/trivial-touch.yaml @@ -0,0 +1,84 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/input/touchscreen/trivial-touch.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Trivial touch screen controller with i2c interface + +maintainers: + - Frank Li <Frank.Li@nxp.com> + +properties: + compatible: + enum: + # The Azoteq IQS550, IQS572 and IQS525 trackpad and touchscreen controllers + - azoteq,iqs550 + - azoteq,iqs572 + - azoteq,iqs525 + # Himax hx83100a touchscreen controller + - himax,hx83100a + # Himax hx83112b touchscreen controller + - himax,hx83112b + # Hynitron cstxxx series touchscreen controller + - hynitron,cst340 + # Ilitek I2C Touchscreen Controller + - ilitek,ili210x + - ilitek,ili2117 + - ilitek,ili2120 + - ilitek,ili2130 + - ilitek,ili2131 + - ilitek,ili2132 + - ilitek,ili2316 + - ilitek,ili2322 + - ilitek,ili2323 + - ilitek,ili2326 + - ilitek,ili251x + - ilitek,ili2520 + - ilitek,ili2521 + # MAXI MAX11801 Resistive touch screen controller with i2c interface + - maxim,max11801 + # Microchip AR1020 and AR1021 touchscreen interface (I2C) + - microchip,ar1021-i2c + # Trivial touch screen controller with i2c interface + - semtech,sx8650 + - semtech,sx8654 + - semtech,sx8655 + - semtech,sx8656 + + reg: + maxItems: 1 + + interrupts: + maxItems: 1 + + reset-gpios: + maxItems: 1 + + wakeup-source: true + +allOf: + - $ref: touchscreen.yaml + +required: + - compatible + - reg + - interrupts + +unevaluatedProperties: false + +examples: + - | + #include <dt-bindings/interrupt-controller/irq.h> + + i2c { + #address-cells = <1>; + #size-cells = <0>; + + touchscreen@48 { + compatible = "maxim,max11801"; + reg = <0x48>; + interrupt-parent = <&gpio3>; + interrupts = <31 IRQ_TYPE_EDGE_FALLING>; + }; + }; diff --git a/Documentation/devicetree/bindings/input/twl4030-keypad.txt b/Documentation/devicetree/bindings/input/twl4030-keypad.txt deleted file mode 100644 index e4be2f76a717..000000000000 --- a/Documentation/devicetree/bindings/input/twl4030-keypad.txt +++ /dev/null @@ -1,27 +0,0 @@ -* TWL4030's Keypad Controller device tree bindings - -TWL4030's Keypad controller is used to interface a SoC with a matrix-type -keypad device. The keypad controller supports multiple row and column lines. -A key can be placed at each intersection of a unique row and a unique column. -The keypad controller can sense a key-press and key-release and report the -event using a interrupt to the cpu. - -This binding is based on the matrix-keymap binding with the following -changes: - - * keypad,num-rows and keypad,num-columns are required. - -Required SoC Specific Properties: -- compatible: should be one of the following - - "ti,twl4030-keypad": For controllers compatible with twl4030 keypad - controller. -- interrupt: should be one of the following - - <1>: For controllers compatible with twl4030 keypad controller. - -Example: - twl_keypad: keypad { - compatible = "ti,twl4030-keypad"; - interrupts = <1>; - keypad,num-rows = <8>; - keypad,num-columns = <8>; - }; diff --git a/Documentation/driver-api/wmi.rst b/Documentation/driver-api/wmi.rst index 4e8dbdb1fc67..db835b43c937 100644 --- a/Documentation/driver-api/wmi.rst +++ b/Documentation/driver-api/wmi.rst @@ -16,5 +16,5 @@ which will be bound to compatible WMI devices by the driver core. .. kernel-doc:: include/linux/wmi.h :internal: -.. kernel-doc:: drivers/platform/x86/wmi.c +.. kernel-doc:: drivers/platform/wmi/core.c :export: diff --git a/Documentation/i2c/busses/i2c-i801.rst b/Documentation/i2c/busses/i2c-i801.rst index 36c563ad3f06..c939a5bfc8d0 100644 --- a/Documentation/i2c/busses/i2c-i801.rst +++ b/Documentation/i2c/busses/i2c-i801.rst @@ -51,6 +51,7 @@ Supported adapters: * Intel Arrow Lake (SOC) * Intel Panther Lake (SOC) * Intel Wildcat Lake (SOC) + * Intel Diamond Rapids (SOC) Datasheets: Publicly available at the Intel website diff --git a/Documentation/power/runtime_pm.rst b/Documentation/power/runtime_pm.rst index 8246df3cecd7..455b9d135d85 100644 --- a/Documentation/power/runtime_pm.rst +++ b/Documentation/power/runtime_pm.rst @@ -443,13 +443,11 @@ drivers/base/power/runtime.c and include/linux/pm_runtime.h: necessary to execute the subsystem-level resume callback for the device to satisfy that request, otherwise 0 is returned - `int pm_runtime_barrier(struct device *dev);` + `void pm_runtime_barrier(struct device *dev);` - check if there's a resume request pending for the device and resume it (synchronously) in that case, cancel any other pending runtime PM requests regarding it and wait for all runtime PM operations on it in progress to - complete; returns 1 if there was a resume request pending and it was - necessary to execute the subsystem-level resume callback for the device to - satisfy that request, otherwise 0 is returned + complete `void pm_suspend_ignore_children(struct device *dev, bool enable);` - set/unset the power.ignore_children flag of the device diff --git a/Documentation/wmi/devices/lenovo-wmi-gamezone.rst b/Documentation/wmi/devices/lenovo-wmi-gamezone.rst index 997263e51a7d..1769ad3d57b9 100644 --- a/Documentation/wmi/devices/lenovo-wmi-gamezone.rst +++ b/Documentation/wmi/devices/lenovo-wmi-gamezone.rst @@ -19,27 +19,26 @@ WMI GUID ``887B54E3-DDDC-4B2C-8B88-68A26A8835D0`` The Gamezone Data WMI interface provides platform-profile and fan curve settings for devices that fall under the "Gaming Series" of Lenovo devices. It uses a notifier chain to inform other Lenovo WMI interface drivers of the -current platform profile when it changes. +current platform profile when it changes. The currently set profile can be +determined by the user on the hardware by looking at the color of the power +or profile LED, depending on the model. The following platform profiles are supported: - - low-power - - balanced - - balanced-performance - - performance - - custom + - low-power, blue LED + - balanced, white LED + - performance, red LED + - max-power, purple LED + - custom, purple LED -Balanced-Performance +Extreme Mode ~~~~~~~~~~~~~~~~~~~~ Some newer Lenovo "Gaming Series" laptops have an "Extreme Mode" profile -enabled in their BIOS. For these devices, the performance platform profile -corresponds to the BIOS Extreme Mode, while the balanced-performance -platform profile corresponds to the BIOS Performance mode. For legacy -devices, the performance platform profile will correspond with the BIOS -Performance mode. - -For some newer devices the "Extreme Mode" profile is incomplete in the BIOS -and setting it will cause undefined behavior. A BIOS bug quirk table is -provided to ensure these devices cannot set "Extreme Mode" from the driver. +enabled in their BIOS. When available, this mode will be represented by the +max-power platform profile. + +For a subset of these devices the "Extreme Mode" profile is incomplete in +the BIOS and setting it will cause undefined behavior. A BIOS bug quirk table +is provided to ensure these devices cannot set "Extreme Mode" from the driver. Custom Profile ~~~~~~~~~~~~~~ diff --git a/Documentation/wmi/devices/uniwill-laptop.rst b/Documentation/wmi/devices/uniwill-laptop.rst new file mode 100644 index 000000000000..e246bf293450 --- /dev/null +++ b/Documentation/wmi/devices/uniwill-laptop.rst @@ -0,0 +1,198 @@ +.. SPDX-License-Identifier: GPL-2.0-or-later + +======================================== +Uniwill Notebook driver (uniwill-laptop) +======================================== + +Introduction +============ + +Many notebooks manufactured by Uniwill (either directly or as ODM) provide a EC interface +for controlling various platform settings like sensors and fan control. This interface is +used by the ``uniwill-laptop`` driver to map those features onto standard kernel interfaces. + +EC WMI interface description +============================ + +The EC WMI interface description can be decoded from the embedded binary MOF (bmof) +data using the `bmfdec <https://github.com/pali/bmfdec>`_ utility: + +:: + + [WMI, Dynamic, Provider("WmiProv"), Locale("MS\\0x409"), + Description("Class used to operate methods on a ULong"), + guid("{ABBC0F6F-8EA1-11d1-00A0-C90629100000}")] + class AcpiTest_MULong { + [key, read] string InstanceName; + [read] boolean Active; + + [WmiMethodId(1), Implemented, read, write, Description("Return the contents of a ULong")] + void GetULong([out, Description("Ulong Data")] uint32 Data); + + [WmiMethodId(2), Implemented, read, write, Description("Set the contents of a ULong")] + void SetULong([in, Description("Ulong Data")] uint32 Data); + + [WmiMethodId(3), Implemented, read, write, + Description("Generate an event containing ULong data")] + void FireULong([in, Description("WMI requires a parameter")] uint32 Hack); + + [WmiMethodId(4), Implemented, read, write, Description("Get and Set the contents of a ULong")] + void GetSetULong([in, Description("Ulong Data")] uint64 Data, + [out, Description("Ulong Data")] uint32 Return); + + [WmiMethodId(5), Implemented, read, write, + Description("Get and Set the contents of a ULong for Dollby button")] + void GetButton([in, Description("Ulong Data")] uint64 Data, + [out, Description("Ulong Data")] uint32 Return); + }; + +Most of the WMI-related code was copied from the Windows driver samples, which unfortunately means +that the WMI-GUID is not unique. This makes the WMI-GUID unusable for autoloading. + +WMI method GetULong() +--------------------- + +This WMI method was copied from the Windows driver samples and has no function. + +WMI method SetULong() +--------------------- + +This WMI method was copied from the Windows driver samples and has no function. + +WMI method FireULong() +---------------------- + +This WMI method allows to inject a WMI event with a 32-bit payload. Its primary purpose seems +to be debugging. + +WMI method GetSetULong() +------------------------ + +This WMI method is used to communicate with the EC. The ``Data`` argument holds the following +information (starting with the least significant byte): + +1. 16-bit address +2. 16-bit data (set to ``0x0000`` when reading) +3. 16-bit operation (``0x0100`` for reading and ``0x0000`` for writing) +4. 16-bit reserved (set to ``0x0000``) + +The first 8 bits of the ``Return`` value contain the data returned by the EC when reading. +The special value ``0xFEFEFEFE`` is used to indicate a communication failure with the EC. + +WMI method GetButton() +---------------------- + +This WMI method is not implemented on all machines and has an unknown purpose. + +Reverse-Engineering the EC WMI interface +======================================== + +.. warning:: Randomly poking the EC can potentially cause damage to the machine and other unwanted + side effects, please be careful. + +The EC behind the ``GetSetULong`` method is used by the OEM software supplied by the manufacturer. +Reverse-engineering of this software is difficult since it uses an obfuscator, however some parts +are not obfuscated. In this case `dnSpy <https://github.com/dnSpy/dnSpy>`_ could also be helpful. + +The EC can be accessed under Windows using powershell (requires admin privileges): + +:: + + > $obj = Get-CimInstance -Namespace root/wmi -ClassName AcpiTest_MULong | Select-Object -First 1 + > Invoke-CimMethod -InputObject $obj -MethodName GetSetULong -Arguments @{Data = <input>} + +WMI event interface description +=============================== + +The WMI interface description can also be decoded from the embedded binary MOF (bmof) +data: + +:: + + [WMI, Dynamic, Provider("WmiProv"), Locale("MS\\0x409"), + Description("Class containing event generated ULong data"), + guid("{ABBC0F72-8EA1-11d1-00A0-C90629100000}")] + class AcpiTest_EventULong : WmiEvent { + [key, read] string InstanceName; + [read] boolean Active; + + [WmiDataId(1), read, write, Description("ULong Data")] uint32 ULong; + }; + +Most of the WMI-related code was again copied from the Windows driver samples, causing this WMI +interface to suffer from the same restrictions as the EC WMI interface described above. + +WMI event data +-------------- + +The WMI event data contains a single 32-bit value which is used to indicate various platform events. + +Reverse-Engineering the Uniwill WMI event interface +=================================================== + +The driver logs debug messages when receiving a WMI event. Thus enabling debug messages will be +useful for finding unknown event codes. + +EC ACPI interface description +============================= + +The ``INOU0000`` ACPI device is a virtual device used to access various hardware registers +available on notebooks manufactured by Uniwill. Reading and writing those registers happens +by calling ACPI control methods. The ``uniwill-laptop`` driver uses this device to communicate +with the EC because the ACPI control methods are faster than the WMI methods described above. + +ACPI control methods used for reading registers take a single ACPI integer containing the address +of the register to read and return a ACPI integer containing the data inside said register. ACPI +control methods used for writing registers however take two ACPI integers, with the additional +ACPI integer containing the data to be written into the register. Such ACPI control methods return +nothing. + +System memory +------------- + +System memory can be accessed with a granularity of either a single byte (``MMRB`` for reading and +``MMWB`` for writing) or four bytes (``MMRD`` for reading and ``MMWD`` for writing). Those ACPI +control methods are unused because they provide no benefit when compared to the native memory +access functions provided by the kernel. + +EC RAM +------ + +The internal RAM of the EC can be accessed with a granularity of a single byte using the ``ECRR`` +(read) and ``ECRW`` (write) ACPI control methods, with the maximum register address being ``0xFFF``. +The OEM software waits 6 ms after calling one of those ACPI control methods, likely to avoid +overwhelming the EC when being connected over LPC. + +PCI config space +---------------- + +The PCI config space can be accessed with a granularity of four bytes using the ``PCRD`` (read) and +``PCWD`` (write) ACPI control methods. The exact address format is unknown, and poking random PCI +devices might confuse the PCI subsystem. Because of this those ACPI control methods are not used. + +IO ports +-------- + +IO ports can be accessed with a granularity of four bytes using the ``IORD`` (read) and ``IOWD`` +(write) ACPI control methods. Those ACPI control methods are unused because they provide no benefit +when compared to the native IO port access functions provided by the kernel. + +CMOS RAM +-------- + +The CMOS RAM can be accessed with a granularity of a single byte using the ``RCMS`` (read) and +``WCMS`` ACPI control methods. Using those ACPI methods might interfere with the native CMOS RAM +access functions provided by the kernel due to the usage of indexed IO, so they are unused. + +Indexed IO +---------- + +Indexed IO with IO ports with a granularity of a single byte can be performed using the ``RIOP`` +(read) and ``WIOP`` (write) ACPI control methods. Those ACPI methods are unused because they +provide no benifit when compared to the native IO port access functions provided by the kernel. + +Special thanks go to github user `pobrn` which developed the +`qc71_laptop <https://github.com/pobrn/qc71_laptop>`_ driver on which this driver is partly based. +The same is true for Tuxedo Computers, which developed the +`tuxedo-drivers <https://gitlab.com/tuxedocomputers/development/packages/tuxedo-drivers>`_ package +which also served as a foundation for this driver. diff --git a/MAINTAINERS b/MAINTAINERS index 84cda6701685..c9e416ba74c6 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -402,7 +402,7 @@ S: Maintained F: Documentation/ABI/testing/sysfs-bus-wmi F: Documentation/driver-api/wmi.rst F: Documentation/wmi/ -F: drivers/platform/x86/wmi.c +F: drivers/platform/wmi/ F: include/uapi/linux/wmi.h ACRN HYPERVISOR SERVICE MODULE @@ -4262,6 +4262,13 @@ W: https://ez.analog.com/linux-software-drivers F: Documentation/devicetree/bindings/pwm/adi,axi-pwmgen.yaml F: drivers/pwm/pwm-axi-pwmgen.c +AYANEO PLATFORM EC DRIVER +M: Antheas Kapenekakis <lkml@antheas.dev> +L: platform-driver-x86@vger.kernel.org +S: Maintained +F: Documentation/ABI/testing/sysfs-platform-ayaneo +F: drivers/platform/x86/ayaneo-ec.c + AZ6007 DVB DRIVER M: Mauro Carvalho Chehab <mchehab@kernel.org> L: linux-media@vger.kernel.org @@ -7218,6 +7225,7 @@ DEVICE-MAPPER (LVM) M: Alasdair Kergon <agk@redhat.com> M: Mike Snitzer <snitzer@kernel.org> M: Mikulas Patocka <mpatocka@redhat.com> +M: Benjamin Marzinski <bmarzins@redhat.com> L: dm-devel@lists.linux.dev S: Maintained Q: http://patchwork.kernel.org/project/dm-devel/list/ @@ -26448,6 +26456,7 @@ F: include/trace/ F: kernel/trace/ F: kernel/tracepoint.c F: scripts/tracing/ +F: scripts/tracepoint-update.c F: tools/testing/selftests/ftrace/ TRACING MMIO ACCESSES (MMIOTRACE) @@ -26789,6 +26798,17 @@ L: linux-scsi@vger.kernel.org S: Maintained F: drivers/ufs/host/ufs-renesas.c +UNIWILL LAPTOP DRIVER +M: Armin Wolf <W_Armin@gmx.de> +L: platform-driver-x86@vger.kernel.org +S: Maintained +F: Documentation/ABI/testing/sysfs-driver-uniwill-laptop +F: Documentation/admin-guide/laptops/uniwill-laptop.rst +F: Documentation/wmi/devices/uniwill-laptop.rst +F: drivers/platform/x86/uniwill/uniwill-acpi.c +F: drivers/platform/x86/uniwill/uniwill-wmi.c +F: drivers/platform/x86/uniwill/uniwill-wmi.h + UNSORTED BLOCK IMAGES (UBI) M: Richard Weinberger <richard@nod.at> R: Zhihao Cheng <chengzhihao1@huawei.com> diff --git a/arch/alpha/include/asm/console.h b/arch/alpha/include/asm/console.h index 088b7b9eb15a..1cabdb6064bb 100644 --- a/arch/alpha/include/asm/console.h +++ b/arch/alpha/include/asm/console.h @@ -4,7 +4,7 @@ #include <uapi/asm/console.h> -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ extern long callback_puts(long unit, const char *s, long length); extern long callback_getc(long unit); extern long callback_open_console(void); @@ -26,5 +26,5 @@ struct crb_struct; struct hwrpb_struct; extern int callback_init_done; extern void * callback_init(void *); -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #endif /* __AXP_CONSOLE_H */ diff --git a/arch/alpha/include/asm/page.h b/arch/alpha/include/asm/page.h index 5ec4c77e432e..d2c6667d73e9 100644 --- a/arch/alpha/include/asm/page.h +++ b/arch/alpha/include/asm/page.h @@ -6,7 +6,7 @@ #include <asm/pal.h> #include <vdso/page.h> -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #define STRICT_MM_TYPECHECKS @@ -74,7 +74,7 @@ typedef struct page *pgtable_t; #define PAGE_OFFSET 0xfffffc0000000000 #endif -#endif /* !__ASSEMBLY__ */ +#endif /* !__ASSEMBLER__ */ #define __pa(x) ((unsigned long) (x) - PAGE_OFFSET) #define __va(x) ((void *)((unsigned long) (x) + PAGE_OFFSET)) diff --git a/arch/alpha/include/asm/pal.h b/arch/alpha/include/asm/pal.h index db2b3b18b34c..799a64c05198 100644 --- a/arch/alpha/include/asm/pal.h +++ b/arch/alpha/include/asm/pal.h @@ -4,7 +4,7 @@ #include <uapi/asm/pal.h> -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ extern void halt(void) __attribute__((noreturn)); #define __halt() __asm__ __volatile__ ("call_pal %0 #halt" : : "i" (PAL_halt)) @@ -183,5 +183,5 @@ qemu_get_vmtime(void) return v0; } -#endif /* !__ASSEMBLY__ */ +#endif /* !__ASSEMBLER__ */ #endif /* __ALPHA_PAL_H */ diff --git a/arch/alpha/include/asm/thread_info.h b/arch/alpha/include/asm/thread_info.h index 4a4d00b37986..98ccbca64984 100644 --- a/arch/alpha/include/asm/thread_info.h +++ b/arch/alpha/include/asm/thread_info.h @@ -4,14 +4,14 @@ #ifdef __KERNEL__ -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #include <asm/processor.h> #include <asm/types.h> #include <asm/hwrpb.h> #include <asm/sysinfo.h> #endif -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ struct thread_info { struct pcb_struct pcb; /* palcode state */ @@ -44,7 +44,7 @@ register struct thread_info *__current_thread_info __asm__("$8"); register unsigned long *current_stack_pointer __asm__ ("$30"); -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ /* Thread information allocation. */ #define THREAD_SIZE_ORDER 1 @@ -110,7 +110,7 @@ register unsigned long *current_stack_pointer __asm__ ("$30"); put_user(res, (int __user *)(value)); \ }) -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ extern void __save_fpu(void); static inline void save_fpu(void) diff --git a/arch/alpha/include/uapi/asm/ioctls.h b/arch/alpha/include/uapi/asm/ioctls.h index 971311605288..a09d04b49cc6 100644 --- a/arch/alpha/include/uapi/asm/ioctls.h +++ b/arch/alpha/include/uapi/asm/ioctls.h @@ -23,10 +23,10 @@ #define TCSETSW _IOW('t', 21, struct termios) #define TCSETSF _IOW('t', 22, struct termios) -#define TCGETA _IOR('t', 23, struct termio) -#define TCSETA _IOW('t', 24, struct termio) -#define TCSETAW _IOW('t', 25, struct termio) -#define TCSETAF _IOW('t', 28, struct termio) +#define TCGETA 0x40127417 +#define TCSETA 0x80127418 +#define TCSETAW 0x80127419 +#define TCSETAF 0x8012741c #define TCSBRK _IO('t', 29) #define TCXONC _IO('t', 30) diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index ff61891abe53..fa83c040ee2d 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -82,7 +82,7 @@ config ARM select HAS_IOPORT select HAVE_ARCH_AUDITSYSCALL if AEABI && !OABI_COMPAT select HAVE_ARCH_BITREVERSE if (CPU_32v7M || CPU_32v7) && !CPU_32v6 - select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL && !CPU_ENDIAN_BE32 && MMU + select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL && !CPU_ENDIAN_BE32 && MMU && (!PREEMPT_RT || !SMP) select HAVE_ARCH_KFENCE if MMU && !XIP_KERNEL select HAVE_ARCH_KGDB if !CPU_ENDIAN_BE32 && MMU select HAVE_ARCH_KASAN if MMU && !XIP_KERNEL @@ -1213,7 +1213,7 @@ config HIGHMEM config HIGHPTE bool "Allocate 2nd-level pagetables from highmem" if EXPERT - depends on HIGHMEM + depends on HIGHMEM && !PREEMPT_RT default y help The VM uses one page of physical memory for each page table. diff --git a/arch/arm/include/asm/word-at-a-time.h b/arch/arm/include/asm/word-at-a-time.h index f9a3897b06e7..5023f98d8293 100644 --- a/arch/arm/include/asm/word-at-a-time.h +++ b/arch/arm/include/asm/word-at-a-time.h @@ -67,7 +67,7 @@ static inline unsigned long find_zero(unsigned long mask) */ static inline unsigned long load_unaligned_zeropad(const void *addr) { - unsigned long ret, offset; + unsigned long ret, tmp; /* Load word from unaligned pointer addr */ asm( @@ -75,9 +75,9 @@ static inline unsigned long load_unaligned_zeropad(const void *addr) "2:\n" " .pushsection .text.fixup,\"ax\"\n" " .align 2\n" - "3: and %1, %2, #0x3\n" - " bic %2, %2, #0x3\n" - " ldr %0, [%2]\n" + "3: bic %1, %2, #0x3\n" + " ldr %0, [%1]\n" + " and %1, %2, #0x3\n" " lsl %1, %1, #0x3\n" #ifndef __ARMEB__ " lsr %0, %0, %1\n" @@ -90,7 +90,7 @@ static inline unsigned long load_unaligned_zeropad(const void *addr) " .align 3\n" " .long 1b, 3b\n" " .popsection" - : "=&r" (ret), "=&r" (offset) + : "=&r" (ret), "=&r" (tmp) : "r" (addr), "Qo" (*(unsigned long *)addr)); return ret; diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c index 3c6ddb1afdc4..812380f30ae3 100644 --- a/arch/arm/mm/alignment.c +++ b/arch/arm/mm/alignment.c @@ -19,10 +19,11 @@ #include <linux/init.h> #include <linux/sched/signal.h> #include <linux/uaccess.h> +#include <linux/unaligned.h> #include <asm/cp15.h> #include <asm/system_info.h> -#include <linux/unaligned.h> +#include <asm/system_misc.h> #include <asm/opcodes.h> #include "fault.h" @@ -809,6 +810,9 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs) int thumb2_32b = 0; int fault; + if (addr >= TASK_SIZE && user_mode(regs)) + harden_branch_predictor(); + if (interrupts_enabled(regs)) local_irq_enable(); diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index 2bc828a1940c..ed4330cc3f4e 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c @@ -128,6 +128,19 @@ static inline bool is_translation_fault(unsigned int fsr) return false; } +static inline bool is_permission_fault(unsigned int fsr) +{ + int fs = fsr_fs(fsr); +#ifdef CONFIG_ARM_LPAE + if ((fs & FS_MMU_NOLL_MASK) == FS_PERM_NOLL) + return true; +#else + if (fs == FS_L1_PERM || fs == FS_L2_PERM) + return true; +#endif + return false; +} + static void die_kernel_fault(const char *msg, struct mm_struct *mm, unsigned long addr, unsigned int fsr, struct pt_regs *regs) @@ -162,6 +175,8 @@ __do_kernel_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr, */ if (addr < PAGE_SIZE) { msg = "NULL pointer dereference"; + } else if (is_permission_fault(fsr) && fsr & FSR_LNX_PF) { + msg = "execution of memory"; } else { if (is_translation_fault(fsr) && kfence_handle_page_fault(addr, is_write_fault(fsr), regs)) @@ -183,9 +198,6 @@ __do_user_fault(unsigned long addr, unsigned int fsr, unsigned int sig, { struct task_struct *tsk = current; - if (addr > TASK_SIZE) - harden_branch_predictor(); - #ifdef CONFIG_DEBUG_USER if (((user_debug & UDBG_SEGV) && (sig == SIGSEGV)) || ((user_debug & UDBG_BUS) && (sig == SIGBUS))) { @@ -225,19 +237,6 @@ void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs) } #ifdef CONFIG_MMU -static inline bool is_permission_fault(unsigned int fsr) -{ - int fs = fsr_fs(fsr); -#ifdef CONFIG_ARM_LPAE - if ((fs & FS_MMU_NOLL_MASK) == FS_PERM_NOLL) - return true; -#else - if (fs == FS_L1_PERM || fs == FS_L2_PERM) - return true; -#endif - return false; -} - #ifdef CONFIG_CPU_TTBR0_PAN static inline bool ttbr0_usermode_access_allowed(struct pt_regs *regs) { @@ -260,6 +259,37 @@ static inline bool ttbr0_usermode_access_allowed(struct pt_regs *regs) #endif static int __kprobes +do_kernel_address_page_fault(struct mm_struct *mm, unsigned long addr, + unsigned int fsr, struct pt_regs *regs) +{ + if (user_mode(regs)) { + /* + * Fault from user mode for a kernel space address. User mode + * should not be faulting in kernel space, which includes the + * vector/khelper page. Handle the branch predictor hardening + * while interrupts are still disabled, then send a SIGSEGV. + */ + harden_branch_predictor(); + __do_user_fault(addr, fsr, SIGSEGV, SEGV_MAPERR, regs); + } else { + /* + * Fault from kernel mode. Enable interrupts if they were + * enabled in the parent context. Section (upper page table) + * translation faults are handled via do_translation_fault(), + * so we will only get here for a non-present kernel space + * PTE or PTE permission fault. This may happen in exceptional + * circumstances and need the fixup tables to be walked. + */ + if (interrupts_enabled(regs)) + local_irq_enable(); + + __do_kernel_fault(mm, addr, fsr, regs); + } + + return 0; +} + +static int __kprobes do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) { struct mm_struct *mm = current->mm; @@ -272,6 +302,12 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) if (kprobe_page_fault(regs, fsr)) return 0; + /* + * Handle kernel addresses faults separately, which avoids touching + * the mmap lock from contexts that are not able to sleep. + */ + if (addr >= TASK_SIZE) + return do_kernel_address_page_fault(mm, addr, fsr, regs); /* Enable interrupts if they were enabled in the parent context. */ if (interrupts_enabled(regs)) @@ -448,16 +484,20 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) * We enter here because the first level page table doesn't contain * a valid entry for the address. * - * If the address is in kernel space (>= TASK_SIZE), then we are - * probably faulting in the vmalloc() area. + * If this is a user address (addr < TASK_SIZE), we handle this as a + * normal page fault. This leaves the remainder of the function to handle + * kernel address translation faults. + * + * Since user mode is not permitted to access kernel addresses, pass these + * directly to do_kernel_address_page_fault() to handle. * - * If the init_task's first level page tables contains the relevant - * entry, we copy the it to this task. If not, we send the process - * a signal, fixup the exception, or oops the kernel. + * Otherwise, we're probably faulting in the vmalloc() area, so try to fix + * that up. Note that we must not take any locks or enable interrupts in + * this case. * - * NOTE! We MUST NOT take any locks for this case. We may be in an - * interrupt or a critical region, and should only copy the information - * from the master page table, nothing more. + * If vmalloc() fixup fails, that means the non-leaf page tables did not + * contain an entry for this address, so handle this via + * do_kernel_address_page_fault(). */ #ifdef CONFIG_MMU static int __kprobes @@ -523,7 +563,8 @@ do_translation_fault(unsigned long addr, unsigned int fsr, return 0; bad_area: - do_bad_area(addr, fsr, regs); + do_kernel_address_page_fault(current->mm, addr, fsr, regs); + return 0; } #else /* CONFIG_MMU */ @@ -543,7 +584,16 @@ do_translation_fault(unsigned long addr, unsigned int fsr, static int do_sect_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) { + /* + * If this is a kernel address, but from user mode, then userspace + * is trying bad stuff. Invoke the branch predictor handling. + * Interrupts are disabled here. + */ + if (addr >= TASK_SIZE && user_mode(regs)) + harden_branch_predictor(); + do_bad_area(addr, fsr, regs); + return 0; } #endif /* CONFIG_ARM_LPAE */ diff --git a/arch/csky/abiv1/inc/abi/regdef.h b/arch/csky/abiv1/inc/abi/regdef.h index 7b386fd67070..c75ecf2cafd7 100644 --- a/arch/csky/abiv1/inc/abi/regdef.h +++ b/arch/csky/abiv1/inc/abi/regdef.h @@ -3,7 +3,7 @@ #ifndef __ASM_CSKY_REGDEF_H #define __ASM_CSKY_REGDEF_H -#ifdef __ASSEMBLY__ +#ifdef __ASSEMBLER__ #define syscallid r1 #else #define syscallid "r1" diff --git a/arch/csky/abiv2/inc/abi/regdef.h b/arch/csky/abiv2/inc/abi/regdef.h index 0933addbc27b..fc08d56ccdbe 100644 --- a/arch/csky/abiv2/inc/abi/regdef.h +++ b/arch/csky/abiv2/inc/abi/regdef.h @@ -3,7 +3,7 @@ #ifndef __ASM_CSKY_REGDEF_H #define __ASM_CSKY_REGDEF_H -#ifdef __ASSEMBLY__ +#ifdef __ASSEMBLER__ #define syscallid r7 #else #define syscallid "r7" diff --git a/arch/csky/include/asm/barrier.h b/arch/csky/include/asm/barrier.h index 15de58b10aec..c33fdcfe3770 100644 --- a/arch/csky/include/asm/barrier.h +++ b/arch/csky/include/asm/barrier.h @@ -3,7 +3,7 @@ #ifndef __ASM_CSKY_BARRIER_H #define __ASM_CSKY_BARRIER_H -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #define nop() asm volatile ("nop\n":::"memory") @@ -84,5 +84,5 @@ #include <asm-generic/barrier.h> -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #endif /* __ASM_CSKY_BARRIER_H */ diff --git a/arch/csky/include/asm/cache.h b/arch/csky/include/asm/cache.h index 4b5c09bf1d25..d575482e0fce 100644 --- a/arch/csky/include/asm/cache.h +++ b/arch/csky/include/asm/cache.h @@ -10,7 +10,7 @@ #define ARCH_DMA_MINALIGN L1_CACHE_BYTES -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ void dcache_wb_line(unsigned long start); diff --git a/arch/csky/include/asm/ftrace.h b/arch/csky/include/asm/ftrace.h index 00f9f7647e3f..21532f218058 100644 --- a/arch/csky/include/asm/ftrace.h +++ b/arch/csky/include/asm/ftrace.h @@ -11,7 +11,7 @@ #define MCOUNT_ADDR ((unsigned long)_mcount) -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ extern void _mcount(unsigned long); @@ -28,5 +28,5 @@ struct dyn_arch_ftrace { void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, unsigned long frame_pointer); -#endif /* !__ASSEMBLY__ */ +#endif /* !__ASSEMBLER__ */ #endif /* __ASM_CSKY_FTRACE_H */ diff --git a/arch/csky/include/asm/jump_label.h b/arch/csky/include/asm/jump_label.h index ef2e37a10a0f..603b0caa8024 100644 --- a/arch/csky/include/asm/jump_label.h +++ b/arch/csky/include/asm/jump_label.h @@ -3,7 +3,7 @@ #ifndef __ASM_CSKY_JUMP_LABEL_H #define __ASM_CSKY_JUMP_LABEL_H -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #include <linux/types.h> @@ -48,5 +48,5 @@ void arch_jump_label_transform_static(struct jump_entry *entry, enum jump_label_type type); #define arch_jump_label_transform_static arch_jump_label_transform_static -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #endif /* __ASM_CSKY_JUMP_LABEL_H */ diff --git a/arch/csky/include/asm/page.h b/arch/csky/include/asm/page.h index 4911d0892b71..76774dbce869 100644 --- a/arch/csky/include/asm/page.h +++ b/arch/csky/include/asm/page.h @@ -26,7 +26,7 @@ #define PHYS_OFFSET_OFFSET (CONFIG_DRAM_BASE & (SSEG_SIZE - 1)) -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #include <linux/pfn.h> @@ -84,5 +84,5 @@ static inline unsigned long virt_to_pfn(const void *kaddr) #include <asm-generic/memory_model.h> #include <asm-generic/getorder.h> -#endif /* !__ASSEMBLY__ */ +#endif /* !__ASSEMBLER__ */ #endif /* __ASM_CSKY_PAGE_H */ diff --git a/arch/csky/include/asm/ptrace.h b/arch/csky/include/asm/ptrace.h index 0634b7895d81..5f01839e1184 100644 --- a/arch/csky/include/asm/ptrace.h +++ b/arch/csky/include/asm/ptrace.h @@ -8,7 +8,7 @@ #include <linux/types.h> #include <linux/compiler.h> -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #define PS_S 0x80000000 /* Supervisor Mode */ @@ -98,5 +98,5 @@ static inline unsigned long regs_get_register(struct pt_regs *regs, asmlinkage int syscall_trace_enter(struct pt_regs *regs); asmlinkage void syscall_trace_exit(struct pt_regs *regs); -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #endif /* __ASM_CSKY_PTRACE_H */ diff --git a/arch/csky/include/asm/sections.h b/arch/csky/include/asm/sections.h index 83e82b7c0f6c..ee5cdf226a9b 100644 --- a/arch/csky/include/asm/sections.h +++ b/arch/csky/include/asm/sections.h @@ -8,5 +8,6 @@ extern char _start[]; asmlinkage void csky_start(unsigned int unused, void *dtb_start); +asmlinkage void csky_start_secondary(void); #endif /* __ASM_SECTIONS_H */ diff --git a/arch/csky/include/asm/string.h b/arch/csky/include/asm/string.h index a0d81e9d6b8f..82e99f52b547 100644 --- a/arch/csky/include/asm/string.h +++ b/arch/csky/include/asm/string.h @@ -3,7 +3,7 @@ #ifndef _CSKY_STRING_MM_H_ #define _CSKY_STRING_MM_H_ -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #include <linux/types.h> #include <linux/compiler.h> #include <abi/string.h> diff --git a/arch/csky/include/asm/thread_info.h b/arch/csky/include/asm/thread_info.h index b5ed788f0c68..fdd4f8ad45ac 100644 --- a/arch/csky/include/asm/thread_info.h +++ b/arch/csky/include/asm/thread_info.h @@ -3,7 +3,7 @@ #ifndef _ASM_CSKY_THREAD_INFO_H #define _ASM_CSKY_THREAD_INFO_H -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ #include <asm/types.h> #include <asm/page.h> @@ -51,7 +51,7 @@ static inline struct thread_info *current_thread_info(void) return (struct thread_info *)(sp & ~(THREAD_SIZE - 1)); } -#endif /* !__ASSEMBLY__ */ +#endif /* !__ASSEMBLER__ */ #define TIF_SIGPENDING 0 /* signal pending */ #define TIF_NOTIFY_RESUME 1 /* callback before returning to user */ diff --git a/arch/csky/include/uapi/asm/ptrace.h b/arch/csky/include/uapi/asm/ptrace.h index 3be9c14334a6..90a5c36e4345 100644 --- a/arch/csky/include/uapi/asm/ptrace.h +++ b/arch/csky/include/uapi/asm/ptrace.h @@ -3,7 +3,7 @@ #ifndef _CSKY_PTRACE_H #define _CSKY_PTRACE_H -#ifndef __ASSEMBLY__ +#ifndef __ASSEMBLER__ struct pt_regs { unsigned long tls; @@ -47,5 +47,5 @@ struct user_fp { unsigned long reserved; }; -#endif /* __ASSEMBLY__ */ +#endif /* __ASSEMBLER__ */ #endif /* _CSKY_PTRACE_H */ diff --git a/arch/csky/mm/fault.c b/arch/csky/mm/fault.c index a6ca7dff4215..7ff401108985 100644 --- a/arch/csky/mm/fault.c +++ b/arch/csky/mm/fault.c @@ -45,8 +45,8 @@ static inline void csky_cmpxchg_fixup(struct pt_regs *regs) if (trap_no(regs) != VEC_TLBMODIFIED) return; - if (instruction_pointer(regs) == csky_cmpxchg_stw) - instruction_pointer_set(regs, csky_cmpxchg_ldw); + if (instruction_pointer(regs) == (unsigned long)&csky_cmpxchg_stw) + instruction_pointer_set(regs, (unsigned long)&csky_cmpxchg_ldw); return; } #endif diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 938e5df75b2d..0e5fad5f06ca 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -238,6 +238,7 @@ config S390 select HAVE_PERF_EVENTS select HAVE_PERF_REGS select HAVE_PERF_USER_STACK_DUMP + select HAVE_POSIX_CPU_TIMERS_TASK_WORK select HAVE_PREEMPT_DYNAMIC_KEY select HAVE_REGS_AND_STACK_ACCESS_API select HAVE_RELIABLE_STACKTRACE @@ -254,6 +255,7 @@ config S390 select HOTPLUG_SMT select IOMMU_HELPER if PCI select IOMMU_SUPPORT if PCI + select IRQ_MSI_LIB if PCI select KASAN_VMALLOC if KASAN select LOCK_MM_AND_FIND_VMA select MMU_GATHER_MERGE_VMAS diff --git a/arch/s390/boot/vmem.c b/arch/s390/boot/vmem.c index fbe64ffdfb96..7d6cc4c85af0 100644 --- a/arch/s390/boot/vmem.c +++ b/arch/s390/boot/vmem.c @@ -244,22 +244,10 @@ static void *boot_crst_alloc(unsigned long val) static pte_t *boot_pte_alloc(void) { - static void *pte_leftover; pte_t *pte; - /* - * handling pte_leftovers this way helps to avoid memory fragmentation - * during POPULATE_KASAN_MAP_SHADOW when EDAT is off - */ - if (!pte_leftover) { - pte_leftover = (void *)physmem_alloc_or_die(RR_VMEM, PAGE_SIZE, PAGE_SIZE); - pte = pte_leftover + _PAGE_TABLE_SIZE; - __arch_set_page_dat(pte, 1); - } else { - pte = pte_leftover; - pte_leftover = NULL; - } - + pte = (void *)physmem_alloc_or_die(RR_VMEM, PAGE_SIZE, PAGE_SIZE); + __arch_set_page_dat(pte, 1); memset64((u64 *)pte, _PAGE_INVALID, PTRS_PER_PTE); return pte; } diff --git a/arch/s390/include/asm/bug.h b/arch/s390/include/asm/bug.h index acb4b13d98c5..ee9221bb5d18 100644 --- a/arch/s390/include/asm/bug.h +++ b/arch/s390/include/asm/bug.h @@ -4,11 +4,14 @@ #include <linux/stringify.h> +#ifdef CONFIG_BUG + #ifndef CONFIG_DEBUG_BUGVERBOSE #define _BUGVERBOSE_LOCATION(file, line) #else #define __BUGVERBOSE_LOCATION(file, line) \ .pushsection .rodata.str, "aMS", @progbits, 1; \ + .align 2; \ 10002: .ascii file "\0"; \ .popsection; \ \ @@ -52,6 +55,8 @@ do { \ #define HAVE_ARCH_BUG +#endif /* CONFIG_BUG */ + #include <asm-generic/bug.h> #endif /* _ASM_S390_BUG_H */ diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h index 9240a363c893..c1d63b613bf9 100644 --- a/arch/s390/include/asm/page.h +++ b/arch/s390/include/asm/page.h @@ -166,6 +166,8 @@ static inline int page_reset_referenced(unsigned long addr) return CC_TRANSFORM(cc); } +int split_pud_page(pud_t *pudp, unsigned long addr); + /* Bits int the storage key */ #define _PAGE_CHANGED 0x02 /* HW changed bit */ #define _PAGE_REFERENCED 0x04 /* HW referenced bit */ diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h index a32f465ecf73..c0ff19dab580 100644 --- a/arch/s390/include/asm/pci.h +++ b/arch/s390/include/asm/pci.h @@ -5,6 +5,7 @@ #include <linux/pci.h> #include <linux/mutex.h> #include <linux/iommu.h> +#include <linux/irqdomain.h> #include <linux/pci_hotplug.h> #include <asm/pci_clp.h> #include <asm/pci_debug.h> @@ -109,6 +110,7 @@ struct zpci_bus { struct list_head resources; struct list_head bus_next; struct resource bus_resource; + struct irq_domain *msi_parent_domain; int topo; /* TID if topo_is_tid, PCHID otherwise */ int domain_nr; u8 multifunction : 1; @@ -310,6 +312,9 @@ int zpci_dma_exit_device(struct zpci_dev *zdev); /* IRQ */ int __init zpci_irq_init(void); void __init zpci_irq_exit(void); +int zpci_set_irq(struct zpci_dev *zdev); +int zpci_create_parent_msi_domain(struct zpci_bus *zbus); +void zpci_remove_parent_msi_domain(struct zpci_bus *zbus); /* FMB */ int zpci_fmb_enable_device(struct zpci_dev *); diff --git a/arch/s390/mm/gmap_helpers.c b/arch/s390/mm/gmap_helpers.c index 549f14ad08af..d41b19925a5a 100644 --- a/arch/s390/mm/gmap_helpers.c +++ b/arch/s390/mm/gmap_helpers.c @@ -47,6 +47,7 @@ static void ptep_zap_softleaf_entry(struct mm_struct *mm, softleaf_t entry) void gmap_helper_zap_one_page(struct mm_struct *mm, unsigned long vmaddr) { struct vm_area_struct *vma; + unsigned long pgstev; spinlock_t *ptl; pgste_t pgste; pte_t *ptep; @@ -65,9 +66,13 @@ void gmap_helper_zap_one_page(struct mm_struct *mm, unsigned long vmaddr) if (pte_swap(*ptep)) { preempt_disable(); pgste = pgste_get_lock(ptep); + pgstev = pgste_val(pgste); - ptep_zap_softleaf_entry(mm, softleaf_from_pte(*ptep)); - pte_clear(mm, vmaddr, ptep); + if ((pgstev & _PGSTE_GPS_USAGE_MASK) == _PGSTE_GPS_USAGE_UNUSED || + (pgstev & _PGSTE_GPS_ZERO)) { + ptep_zap_softleaf_entry(mm, softleaf_from_pte(*ptep)); + pte_clear(mm, vmaddr, ptep); + } pgste_set_unlock(ptep, pgste); preempt_enable(); diff --git a/arch/s390/mm/pageattr.c b/arch/s390/mm/pageattr.c index 3042647c9dbf..d3ce04a4b248 100644 --- a/arch/s390/mm/pageattr.c +++ b/arch/s390/mm/pageattr.c @@ -204,7 +204,7 @@ static int walk_pmd_level(pud_t *pudp, unsigned long addr, unsigned long end, return rc; } -static int split_pud_page(pud_t *pudp, unsigned long addr) +int split_pud_page(pud_t *pudp, unsigned long addr) { unsigned long pmd_addr, prot; pmd_t *pm_dir, *pmdp; diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c index d96587b84e81..eeadff45e0e1 100644 --- a/arch/s390/mm/vmem.c +++ b/arch/s390/mm/vmem.c @@ -330,10 +330,14 @@ static int modify_pud_table(p4d_t *p4d, unsigned long addr, unsigned long end, if (pud_leaf(*pud)) { if (IS_ALIGNED(addr, PUD_SIZE) && IS_ALIGNED(next, PUD_SIZE)) { + if (!direct) + vmem_free_pages(pud_deref(*pud), get_order(PUD_SIZE), altmap); pud_clear(pud); pages++; + continue; + } else { + split_pud_page(pud, addr & PUD_MASK); } - continue; } } else if (pud_none(*pud)) { if (IS_ALIGNED(addr, PUD_SIZE) && @@ -433,9 +437,15 @@ static int modify_pagetable(unsigned long start, unsigned long end, bool add, if (WARN_ON_ONCE(!PAGE_ALIGNED(start | end))) return -EINVAL; - /* Don't mess with any tables not fully in 1:1 mapping & vmemmap area */ + /* Don't mess with any tables not fully in 1:1 mapping, vmemmap & kasan area */ +#ifdef CONFIG_KASAN + if (WARN_ON_ONCE(!(start >= KASAN_SHADOW_START && end <= KASAN_SHADOW_END) && + end > __abs_lowcore)) + return -EINVAL; +#else if (WARN_ON_ONCE(end > __abs_lowcore)) return -EINVAL; +#endif for (addr = start; addr < end; addr = next) { next = pgd_addr_end(addr, end); pgd = pgd_offset_k(addr); diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c index 93d2c9c780fc..5a6ace9d875a 100644 --- a/arch/s390/pci/pci.c +++ b/arch/s390/pci/pci.c @@ -708,6 +708,12 @@ int zpci_reenable_device(struct zpci_dev *zdev) if (rc) return rc; + if (zdev->msi_nr_irqs > 0) { + rc = zpci_set_irq(zdev); + if (rc) + return rc; + } + rc = zpci_iommu_register_ioat(zdev, &status); if (rc) zpci_disable_device(zdev); diff --git a/arch/s390/pci/pci_bus.c b/arch/s390/pci/pci_bus.c index 72adc8f6e94f..66c4bd888b29 100644 --- a/arch/s390/pci/pci_bus.c +++ b/arch/s390/pci/pci_bus.c @@ -14,6 +14,7 @@ #include <linux/err.h> #include <linux/delay.h> #include <linux/seq_file.h> +#include <linux/irqdomain.h> #include <linux/jump_label.h> #include <linux/pci.h> #include <linux/printk.h> @@ -198,19 +199,27 @@ static int zpci_bus_create_pci_bus(struct zpci_bus *zbus, struct zpci_dev *fr, s zbus->multifunction = zpci_bus_is_multifunction_root(fr); zbus->max_bus_speed = fr->max_bus_speed; + if (zpci_create_parent_msi_domain(zbus)) + goto out_free_domain; + /* * Note that the zbus->resources are taken over and zbus->resources * is empty after a successful call */ bus = pci_create_root_bus(NULL, ZPCI_BUS_NR, ops, zbus, &zbus->resources); - if (!bus) { - zpci_free_domain(zbus->domain_nr); - return -EFAULT; - } + if (!bus) + goto out_remove_msi_domain; zbus->bus = bus; + dev_set_msi_domain(&zbus->bus->dev, zbus->msi_parent_domain); return 0; + +out_remove_msi_domain: + zpci_remove_parent_msi_domain(zbus); +out_free_domain: + zpci_free_domain(zbus->domain_nr); + return -ENOMEM; } static void zpci_bus_release(struct kref *kref) @@ -231,6 +240,7 @@ static void zpci_bus_release(struct kref *kref) mutex_lock(&zbus_list_lock); list_del(&zbus->bus_next); mutex_unlock(&zbus_list_lock); + zpci_remove_parent_msi_domain(zbus); kfree(zbus); } diff --git a/arch/s390/pci/pci_irq.c b/arch/s390/pci/pci_irq.c index 2a06df8c2498..e9dd45f3c09d 100644 --- a/arch/s390/pci/pci_irq.c +++ b/arch/s390/pci/pci_irq.c @@ -6,6 +6,7 @@ #include <linux/kernel_stat.h> #include <linux/pci.h> #include <linux/msi.h> +#include <linux/irqchip/irq-msi-lib.h> #include <linux/smp.h> #include <asm/isc.h> @@ -97,7 +98,7 @@ static int zpci_clear_directed_irq(struct zpci_dev *zdev) } /* Register adapter interruptions */ -static int zpci_set_irq(struct zpci_dev *zdev) +int zpci_set_irq(struct zpci_dev *zdev) { int rc; @@ -125,27 +126,53 @@ static int zpci_clear_irq(struct zpci_dev *zdev) static int zpci_set_irq_affinity(struct irq_data *data, const struct cpumask *dest, bool force) { - struct msi_desc *entry = irq_data_get_msi_desc(data); - struct msi_msg msg = entry->msg; - int cpu_addr = smp_cpu_get_cpu_address(cpumask_first(dest)); + irq_data_update_affinity(data, dest); + return IRQ_SET_MASK_OK; +} - msg.address_lo &= 0xff0000ff; - msg.address_lo |= (cpu_addr << 8); - pci_write_msi_msg(data->irq, &msg); +/* + * Encode the hwirq number for the parent domain. The encoding must be unique + * for each IRQ of each device in the parent domain, so it uses the devfn to + * identify the device and the msi_index to identify the IRQ within that device. + */ +static inline u32 zpci_encode_hwirq(u8 devfn, u16 msi_index) +{ + return (devfn << 16) | msi_index; +} - return IRQ_SET_MASK_OK; +static inline u16 zpci_decode_hwirq_msi_index(irq_hw_number_t hwirq) +{ + return hwirq & 0xffff; +} + +static void zpci_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) +{ + struct msi_desc *desc = irq_data_get_msi_desc(data); + struct zpci_dev *zdev = to_zpci_dev(desc->dev); + + if (irq_delivery == DIRECTED) { + int cpu = cpumask_first(irq_data_get_affinity_mask(data)); + + msg->address_lo = zdev->msi_addr & 0xff0000ff; + msg->address_lo |= (smp_cpu_get_cpu_address(cpu) << 8); + } else { + msg->address_lo = zdev->msi_addr & 0xffffffff; + } + msg->address_hi = zdev->msi_addr >> 32; + msg->data = zpci_decode_hwirq_msi_index(data->hwirq); } static struct irq_chip zpci_irq_chip = { .name = "PCI-MSI", - .irq_unmask = pci_msi_unmask_irq, - .irq_mask = pci_msi_mask_irq, + .irq_compose_msi_msg = zpci_compose_msi_msg, }; static void zpci_handle_cpu_local_irq(bool rescan) { struct airq_iv *dibv = zpci_ibv[smp_processor_id()]; union zpci_sic_iib iib = {{0}}; + struct irq_domain *msi_domain; + irq_hw_number_t hwirq; unsigned long bit; int irqs_on = 0; @@ -163,7 +190,9 @@ static void zpci_handle_cpu_local_irq(bool rescan) continue; } inc_irq_stat(IRQIO_MSI); - generic_handle_irq(airq_iv_get_data(dibv, bit)); + hwirq = airq_iv_get_data(dibv, bit); + msi_domain = (struct irq_domain *)airq_iv_get_ptr(dibv, bit); + generic_handle_domain_irq(msi_domain, hwirq); } } @@ -228,6 +257,8 @@ static void zpci_floating_irq_handler(struct airq_struct *airq, struct tpi_info *tpi_info) { union zpci_sic_iib iib = {{0}}; + struct irq_domain *msi_domain; + irq_hw_number_t hwirq; unsigned long si, ai; struct airq_iv *aibv; int irqs_on = 0; @@ -255,7 +286,9 @@ static void zpci_floating_irq_handler(struct airq_struct *airq, break; inc_irq_stat(IRQIO_MSI); airq_iv_lock(aibv, ai); - generic_handle_irq(airq_iv_get_data(aibv, ai)); + hwirq = airq_iv_get_data(aibv, ai); + msi_domain = (struct irq_domain *)airq_iv_get_ptr(aibv, ai); + generic_handle_domain_irq(msi_domain, hwirq); airq_iv_unlock(aibv, ai); } } @@ -277,7 +310,9 @@ static int __alloc_airq(struct zpci_dev *zdev, int msi_vecs, zdev->aisb = *bit; /* Create adapter interrupt vector */ - zdev->aibv = airq_iv_create(msi_vecs, AIRQ_IV_DATA | AIRQ_IV_BITLOCK, NULL); + zdev->aibv = airq_iv_create(msi_vecs, + AIRQ_IV_PTR | AIRQ_IV_DATA | AIRQ_IV_BITLOCK, + NULL); if (!zdev->aibv) return -ENOMEM; @@ -289,146 +324,220 @@ static int __alloc_airq(struct zpci_dev *zdev, int msi_vecs, return 0; } -int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) +bool arch_restore_msi_irqs(struct pci_dev *pdev) { - unsigned int hwirq, msi_vecs, irqs_per_msi, i, cpu; struct zpci_dev *zdev = to_zpci(pdev); - struct msi_desc *msi; - struct msi_msg msg; - unsigned long bit; - int cpu_addr; - int rc, irq; + zpci_set_irq(zdev); + return true; +} + +static struct airq_struct zpci_airq = { + .handler = zpci_floating_irq_handler, + .isc = PCI_ISC, +}; + +static void zpci_msi_teardown_directed(struct zpci_dev *zdev) +{ + airq_iv_free(zpci_ibv[0], zdev->msi_first_bit, zdev->max_msi); + zdev->msi_first_bit = -1U; + zdev->msi_nr_irqs = 0; +} + +static void zpci_msi_teardown_floating(struct zpci_dev *zdev) +{ + airq_iv_release(zdev->aibv); + zdev->aibv = NULL; + airq_iv_free_bit(zpci_sbv, zdev->aisb); zdev->aisb = -1UL; zdev->msi_first_bit = -1U; + zdev->msi_nr_irqs = 0; +} + +static void zpci_msi_teardown(struct irq_domain *domain, msi_alloc_info_t *arg) +{ + struct zpci_dev *zdev = to_zpci_dev(domain->dev); + + zpci_clear_irq(zdev); + if (irq_delivery == DIRECTED) + zpci_msi_teardown_directed(zdev); + else + zpci_msi_teardown_floating(zdev); +} + +static int zpci_msi_prepare(struct irq_domain *domain, + struct device *dev, int nvec, + msi_alloc_info_t *info) +{ + struct zpci_dev *zdev = to_zpci_dev(dev); + struct pci_dev *pdev = to_pci_dev(dev); + unsigned long bit; + int msi_vecs, rc; msi_vecs = min_t(unsigned int, nvec, zdev->max_msi); if (msi_vecs < nvec) { - pr_info("%s requested %d irqs, allocate system limit of %d", + pr_info("%s requested %d IRQs, allocate system limit of %d\n", pci_name(pdev), nvec, zdev->max_msi); } rc = __alloc_airq(zdev, msi_vecs, &bit); - if (rc < 0) + if (rc) { + pr_err("Allocating adapter IRQs for %s failed\n", pci_name(pdev)); return rc; + } - /* - * Request MSI interrupts: - * When using MSI, nvec_used interrupt sources and their irq - * descriptors are controlled through one msi descriptor. - * Thus the outer loop over msi descriptors shall run only once, - * while two inner loops iterate over the interrupt vectors. - * When using MSI-X, each interrupt vector/irq descriptor - * is bound to exactly one msi descriptor (nvec_used is one). - * So the inner loops are executed once, while the outer iterates - * over the MSI-X descriptors. - */ - hwirq = bit; - msi_for_each_desc(msi, &pdev->dev, MSI_DESC_NOTASSOCIATED) { - if (hwirq - bit >= msi_vecs) - break; - irqs_per_msi = min_t(unsigned int, msi_vecs, msi->nvec_used); - irq = __irq_alloc_descs(-1, 0, irqs_per_msi, 0, THIS_MODULE, - (irq_delivery == DIRECTED) ? - msi->affinity : NULL); - if (irq < 0) - return -ENOMEM; + zdev->msi_first_bit = bit; + zdev->msi_nr_irqs = msi_vecs; + rc = zpci_set_irq(zdev); + if (rc) { + pr_err("Registering adapter IRQs for %s failed\n", + pci_name(pdev)); + + if (irq_delivery == DIRECTED) + zpci_msi_teardown_directed(zdev); + else + zpci_msi_teardown_floating(zdev); + return rc; + } + return 0; +} - for (i = 0; i < irqs_per_msi; i++) { - rc = irq_set_msi_desc_off(irq, i, msi); - if (rc) - return rc; - irq_set_chip_and_handler(irq + i, &zpci_irq_chip, - handle_percpu_irq); - } +static int zpci_msi_domain_alloc(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs, void *args) +{ + struct msi_desc *desc = ((msi_alloc_info_t *)args)->desc; + struct zpci_dev *zdev = to_zpci_dev(desc->dev); + struct zpci_bus *zbus = zdev->zbus; + unsigned int cpu, hwirq; + unsigned long bit; + int i; - msg.data = hwirq - bit; - if (irq_delivery == DIRECTED) { - if (msi->affinity) - cpu = cpumask_first(&msi->affinity->mask); - else - cpu = 0; - cpu_addr = smp_cpu_get_cpu_address(cpu); + bit = zdev->msi_first_bit + desc->msi_index; + hwirq = zpci_encode_hwirq(zdev->devfn, desc->msi_index); - msg.address_lo = zdev->msi_addr & 0xff0000ff; - msg.address_lo |= (cpu_addr << 8); + if (desc->msi_index + nr_irqs > zdev->max_msi) + return -EINVAL; + for (i = 0; i < nr_irqs; i++) { + irq_domain_set_info(domain, virq + i, hwirq + i, + &zpci_irq_chip, zdev, + handle_percpu_irq, NULL, NULL); + + if (irq_delivery == DIRECTED) { for_each_possible_cpu(cpu) { - for (i = 0; i < irqs_per_msi; i++) - airq_iv_set_data(zpci_ibv[cpu], - hwirq + i, irq + i); + airq_iv_set_ptr(zpci_ibv[cpu], bit + i, + (unsigned long)zbus->msi_parent_domain); + airq_iv_set_data(zpci_ibv[cpu], bit + i, hwirq + i); } } else { - msg.address_lo = zdev->msi_addr & 0xffffffff; - for (i = 0; i < irqs_per_msi; i++) - airq_iv_set_data(zdev->aibv, hwirq + i, irq + i); + airq_iv_set_ptr(zdev->aibv, bit + i, + (unsigned long)zbus->msi_parent_domain); + airq_iv_set_data(zdev->aibv, bit + i, hwirq + i); } - msg.address_hi = zdev->msi_addr >> 32; - pci_write_msi_msg(irq, &msg); - hwirq += irqs_per_msi; } - zdev->msi_first_bit = bit; - zdev->msi_nr_irqs = hwirq - bit; - - rc = zpci_set_irq(zdev); - if (rc) - return rc; - - return (zdev->msi_nr_irqs == nvec) ? 0 : zdev->msi_nr_irqs; + return 0; } -void arch_teardown_msi_irqs(struct pci_dev *pdev) +static void zpci_msi_clear_airq(struct irq_data *d, int i) { - struct zpci_dev *zdev = to_zpci(pdev); - struct msi_desc *msi; - unsigned int i; - int rc; + struct msi_desc *desc = irq_data_get_msi_desc(d); + struct zpci_dev *zdev = to_zpci_dev(desc->dev); + unsigned long bit; + unsigned int cpu; + u16 msi_index; - /* Disable interrupts */ - rc = zpci_clear_irq(zdev); - if (rc) - return; + msi_index = zpci_decode_hwirq_msi_index(d->hwirq); + bit = zdev->msi_first_bit + msi_index; - /* Release MSI interrupts */ - msi_for_each_desc(msi, &pdev->dev, MSI_DESC_ASSOCIATED) { - for (i = 0; i < msi->nvec_used; i++) { - irq_set_msi_desc(msi->irq + i, NULL); - irq_free_desc(msi->irq + i); + if (irq_delivery == DIRECTED) { + for_each_possible_cpu(cpu) { + airq_iv_set_ptr(zpci_ibv[cpu], bit + i, 0); + airq_iv_set_data(zpci_ibv[cpu], bit + i, 0); } - msi->msg.address_lo = 0; - msi->msg.address_hi = 0; - msi->msg.data = 0; - msi->irq = 0; + } else { + airq_iv_set_ptr(zdev->aibv, bit + i, 0); + airq_iv_set_data(zdev->aibv, bit + i, 0); } +} - if (zdev->aisb != -1UL) { - zpci_ibv[zdev->aisb] = NULL; - airq_iv_free_bit(zpci_sbv, zdev->aisb); - zdev->aisb = -1UL; - } - if (zdev->aibv) { - airq_iv_release(zdev->aibv); - zdev->aibv = NULL; - } +static void zpci_msi_domain_free(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs) +{ + struct irq_data *d; + int i; - if ((irq_delivery == DIRECTED) && zdev->msi_first_bit != -1U) - airq_iv_free(zpci_ibv[0], zdev->msi_first_bit, zdev->msi_nr_irqs); + for (i = 0; i < nr_irqs; i++) { + d = irq_domain_get_irq_data(domain, virq + i); + zpci_msi_clear_airq(d, i); + irq_domain_reset_irq_data(d); + } } -bool arch_restore_msi_irqs(struct pci_dev *pdev) +static const struct irq_domain_ops zpci_msi_domain_ops = { + .alloc = zpci_msi_domain_alloc, + .free = zpci_msi_domain_free, +}; + +static bool zpci_init_dev_msi_info(struct device *dev, struct irq_domain *domain, + struct irq_domain *real_parent, + struct msi_domain_info *info) { - struct zpci_dev *zdev = to_zpci(pdev); + if (!msi_lib_init_dev_msi_info(dev, domain, real_parent, info)) + return false; + + info->ops->msi_prepare = zpci_msi_prepare; + info->ops->msi_teardown = zpci_msi_teardown; - zpci_set_irq(zdev); return true; } -static struct airq_struct zpci_airq = { - .handler = zpci_floating_irq_handler, - .isc = PCI_ISC, +static struct msi_parent_ops zpci_msi_parent_ops = { + .supported_flags = MSI_GENERIC_FLAGS_MASK | + MSI_FLAG_PCI_MSIX | + MSI_FLAG_MULTI_PCI_MSI, + .required_flags = MSI_FLAG_USE_DEF_DOM_OPS | + MSI_FLAG_USE_DEF_CHIP_OPS, + .init_dev_msi_info = zpci_init_dev_msi_info, }; +int zpci_create_parent_msi_domain(struct zpci_bus *zbus) +{ + char fwnode_name[18]; + + snprintf(fwnode_name, sizeof(fwnode_name), "ZPCI_MSI_DOM_%04x", zbus->domain_nr); + struct irq_domain_info info = { + .fwnode = irq_domain_alloc_named_fwnode(fwnode_name), + .ops = &zpci_msi_domain_ops, + }; + + if (!info.fwnode) { + pr_err("Failed to allocate fwnode for MSI IRQ domain\n"); + return -ENOMEM; + } + + if (irq_delivery == FLOATING) + zpci_msi_parent_ops.required_flags |= MSI_FLAG_NO_AFFINITY; + + zbus->msi_parent_domain = msi_create_parent_irq_domain(&info, &zpci_msi_parent_ops); + if (!zbus->msi_parent_domain) { + irq_domain_free_fwnode(info.fwnode); + pr_err("Failed to create MSI IRQ domain\n"); + return -ENOMEM; + } + + return 0; +} + +void zpci_remove_parent_msi_domain(struct zpci_bus *zbus) +{ + struct fwnode_handle *fn; + + fn = zbus->msi_parent_domain->fwnode; + irq_domain_remove(zbus->msi_parent_domain); + irq_domain_free_fwnode(fn); +} + static void __init cpu_enable_directed_irq(void *unused) { union zpci_sic_iib iib = {{0}}; @@ -465,6 +574,7 @@ static int __init zpci_directed_irq_init(void) * is only done on the first vector. */ zpci_ibv[cpu] = airq_iv_create(cache_line_size() * BITS_PER_BYTE, + AIRQ_IV_PTR | AIRQ_IV_DATA | AIRQ_IV_CACHELINE | (!cpu ? AIRQ_IV_ALLOC : 0), NULL); diff --git a/block/bio.c b/block/bio.c index fa5ff36b443f..e726c0e280a8 100644 --- a/block/bio.c +++ b/block/bio.c @@ -321,9 +321,13 @@ static struct bio *__bio_chain_endio(struct bio *bio) return parent; } +/* + * This function should only be used as a flag and must never be called. + * If execution reaches here, it indicates a serious programming error. + */ static void bio_chain_endio(struct bio *bio) { - bio_endio(__bio_chain_endio(bio)); + BUG(); } /** diff --git a/block/blk-mq-dma.c b/block/blk-mq-dma.c index b6dbc9767596..fb018fffffdc 100644 --- a/block/blk-mq-dma.c +++ b/block/blk-mq-dma.c @@ -199,6 +199,7 @@ static bool blk_dma_map_iter_start(struct request *req, struct device *dma_dev, if (blk_can_dma_map_iova(req, dma_dev) && dma_iova_try_alloc(dma_dev, state, vec.paddr, total_len)) return blk_rq_dma_map_iova(req, dma_dev, state, iter, &vec); + memset(state, 0, sizeof(*state)); return blk_dma_map_direct(req, dma_dev, iter, &vec); } diff --git a/block/blk-mq.c b/block/blk-mq.c index bd8b11c472a2..1978eef95dca 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -336,12 +336,12 @@ void blk_mq_quiesce_tagset(struct blk_mq_tag_set *set) { struct request_queue *q; - mutex_lock(&set->tag_list_lock); - list_for_each_entry(q, &set->tag_list, tag_set_list) { + rcu_read_lock(); + list_for_each_entry_rcu(q, &set->tag_list, tag_set_list) { if (!blk_queue_skip_tagset_quiesce(q)) blk_mq_quiesce_queue_nowait(q); } - mutex_unlock(&set->tag_list_lock); + rcu_read_unlock(); blk_mq_wait_quiesce_done(set); } @@ -351,12 +351,12 @@ void blk_mq_unquiesce_tagset(struct blk_mq_tag_set *set) { struct request_queue *q; - mutex_lock(&set->tag_list_lock); - list_for_each_entry(q, &set->tag_list, tag_set_list) { + rcu_read_lock(); + list_for_each_entry_rcu(q, &set->tag_list, tag_set_list) { if (!blk_queue_skip_tagset_quiesce(q)) blk_mq_unquiesce_queue(q); } - mutex_unlock(&set->tag_list_lock); + rcu_read_unlock(); } EXPORT_SYMBOL_GPL(blk_mq_unquiesce_tagset); @@ -4311,7 +4311,7 @@ static void blk_mq_del_queue_tag_set(struct request_queue *q) struct blk_mq_tag_set *set = q->tag_set; mutex_lock(&set->tag_list_lock); - list_del(&q->tag_set_list); + list_del_rcu(&q->tag_set_list); if (list_is_singular(&set->tag_list)) { /* just transitioned to unshared */ set->flags &= ~BLK_MQ_F_TAG_QUEUE_SHARED; @@ -4319,7 +4319,6 @@ static void blk_mq_del_queue_tag_set(struct request_queue *q) blk_mq_update_tag_set_shared(set, false); } mutex_unlock(&set->tag_list_lock); - INIT_LIST_HEAD(&q->tag_set_list); } static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set, @@ -4338,7 +4337,7 @@ static void blk_mq_add_queue_tag_set(struct blk_mq_tag_set *set, } if (set->flags & BLK_MQ_F_TAG_QUEUE_SHARED) queue_set_hctx_shared(q, true); - list_add_tail(&q->tag_set_list, &set->tag_list); + list_add_tail_rcu(&q->tag_set_list, &set->tag_list); mutex_unlock(&set->tag_list_lock); } @@ -5193,27 +5192,19 @@ EXPORT_SYMBOL_GPL(blk_mq_update_nr_hw_queues); static int blk_hctx_poll(struct request_queue *q, struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob, unsigned int flags) { - long state = get_current_state(); int ret; do { ret = q->mq_ops->poll(hctx, iob); - if (ret > 0) { - __set_current_state(TASK_RUNNING); + if (ret > 0) return ret; - } - - if (signal_pending_state(state, current)) - __set_current_state(TASK_RUNNING); - if (task_is_running(current)) + if (task_sigpending(current)) return 1; - if (ret < 0 || (flags & BLK_POLL_ONESHOT)) break; cpu_relax(); } while (!need_resched()); - __set_current_state(TASK_RUNNING); return 0; } diff --git a/block/blk-zoned.c b/block/blk-zoned.c index 394d8d74bba9..1c54678fae6b 100644 --- a/block/blk-zoned.c +++ b/block/blk-zoned.c @@ -2100,7 +2100,7 @@ static int blk_revalidate_seq_zone(struct blk_zone *zone, unsigned int idx, * we have a zone write plug for such zone if the device has a zone * write plug hash table. */ - if (!queue_emulates_zone_append(disk->queue) || !disk->zone_wplugs_hash) + if (!disk->zone_wplugs_hash) return 0; wp_offset = disk_zone_wplug_sync_wp_offset(disk, zone); diff --git a/drivers/acpi/platform_profile.c b/drivers/acpi/platform_profile.c index b43f4459a4f6..ea04a8c69215 100644 --- a/drivers/acpi/platform_profile.c +++ b/drivers/acpi/platform_profile.c @@ -37,6 +37,7 @@ static const char * const profile_names[] = { [PLATFORM_PROFILE_BALANCED] = "balanced", [PLATFORM_PROFILE_BALANCED_PERFORMANCE] = "balanced-performance", [PLATFORM_PROFILE_PERFORMANCE] = "performance", + [PLATFORM_PROFILE_MAX_POWER] = "max-power", [PLATFORM_PROFILE_CUSTOM] = "custom", }; static_assert(ARRAY_SIZE(profile_names) == PLATFORM_PROFILE_LAST); @@ -506,7 +507,8 @@ int platform_profile_cycle(void) if (err) return err; - if (profile == PLATFORM_PROFILE_CUSTOM || + if (profile == PLATFORM_PROFILE_MAX_POWER || + profile == PLATFORM_PROFILE_CUSTOM || profile == PLATFORM_PROFILE_LAST) return -EINVAL; @@ -515,7 +517,8 @@ int platform_profile_cycle(void) if (err) return err; - /* never iterate into a custom if all drivers supported it */ + /* never iterate into a custom or max power if all drivers supported it */ + clear_bit(PLATFORM_PROFILE_MAX_POWER, data.aggregate); clear_bit(PLATFORM_PROFILE_CUSTOM, data.aggregate); next = find_next_bit_wrap(data.aggregate, diff --git a/drivers/base/power/runtime-test.c b/drivers/base/power/runtime-test.c index 477feca804c7..1535ad2b0264 100644 --- a/drivers/base/power/runtime-test.c +++ b/drivers/base/power/runtime-test.c @@ -38,10 +38,6 @@ static void pm_runtime_already_suspended_test(struct kunit *test) KUNIT_EXPECT_TRUE(test, pm_runtime_suspended(dev)); pm_runtime_get_noresume(dev); - KUNIT_EXPECT_EQ(test, 0, pm_runtime_barrier(dev)); /* no wakeup needed */ - pm_runtime_put(dev); - - pm_runtime_get_noresume(dev); KUNIT_EXPECT_EQ(test, 1, pm_runtime_put_sync(dev)); KUNIT_EXPECT_EQ(test, 1, pm_runtime_suspend(dev)); @@ -174,7 +170,7 @@ static void pm_runtime_error_test(struct kunit *test) KUNIT_EXPECT_TRUE(test, pm_runtime_suspended(dev)); KUNIT_EXPECT_EQ(test, 0, pm_runtime_get(dev)); - KUNIT_EXPECT_EQ(test, 1, pm_runtime_barrier(dev)); /* resume was pending */ + pm_runtime_barrier(dev); pm_runtime_put(dev); pm_runtime_suspend(dev); /* flush the put(), to suspend */ KUNIT_EXPECT_TRUE(test, pm_runtime_suspended(dev)); @@ -225,7 +221,7 @@ static void pm_runtime_probe_active_test(struct kunit *test) KUNIT_EXPECT_TRUE(test, pm_runtime_active(dev)); /* Nothing to flush. We stay active. */ - KUNIT_EXPECT_EQ(test, 0, pm_runtime_barrier(dev)); + pm_runtime_barrier(dev); KUNIT_EXPECT_TRUE(test, pm_runtime_active(dev)); /* Ask for idle? Now we suspend. */ diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index 62707738caa4..84676cc24221 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c @@ -1467,30 +1467,20 @@ static void __pm_runtime_barrier(struct device *dev) * Next, make sure that all pending requests for the device have been flushed * from pm_wq and wait for all runtime PM operations involving the device in * progress to complete. - * - * Return value: - * 1, if there was a resume request pending and the device had to be woken up, - * 0, otherwise */ -int pm_runtime_barrier(struct device *dev) +void pm_runtime_barrier(struct device *dev) { - int retval = 0; - pm_runtime_get_noresume(dev); spin_lock_irq(&dev->power.lock); if (dev->power.request_pending - && dev->power.request == RPM_REQ_RESUME) { + && dev->power.request == RPM_REQ_RESUME) rpm_resume(dev, 0); - retval = 1; - } __pm_runtime_barrier(dev); spin_unlock_irq(&dev->power.lock); pm_runtime_put_noidle(dev); - - return retval; } EXPORT_SYMBOL_GPL(pm_runtime_barrier); diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c index 2c715df63f23..df9831783a13 100644 --- a/drivers/block/ublk_drv.c +++ b/drivers/block/ublk_drv.c @@ -926,6 +926,7 @@ static size_t ublk_copy_user_pages(const struct request *req, size_t done = 0; rq_for_each_segment(bv, req, iter) { + unsigned len; void *bv_buf; size_t copied; @@ -934,18 +935,17 @@ static size_t ublk_copy_user_pages(const struct request *req, continue; } - bv.bv_offset += offset; - bv.bv_len -= offset; - bv_buf = bvec_kmap_local(&bv); + len = bv.bv_len - offset; + bv_buf = kmap_local_page(bv.bv_page) + bv.bv_offset + offset; if (dir == ITER_DEST) - copied = copy_to_iter(bv_buf, bv.bv_len, uiter); + copied = copy_to_iter(bv_buf, len, uiter); else - copied = copy_from_iter(bv_buf, bv.bv_len, uiter); + copied = copy_from_iter(bv_buf, len, uiter); kunmap_local(bv_buf); done += copied; - if (copied < bv.bv_len) + if (copied < len) break; offset = 0; @@ -3673,6 +3673,19 @@ exit: return ret; } +static bool ublk_ctrl_uring_cmd_may_sleep(u32 cmd_op) +{ + switch (_IOC_NR(cmd_op)) { + case UBLK_CMD_GET_QUEUE_AFFINITY: + case UBLK_CMD_GET_DEV_INFO: + case UBLK_CMD_GET_DEV_INFO2: + case _IOC_NR(UBLK_U_CMD_GET_FEATURES): + return false; + default: + return true; + } +} + static int ublk_ctrl_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags) { @@ -3681,7 +3694,8 @@ static int ublk_ctrl_uring_cmd(struct io_uring_cmd *cmd, u32 cmd_op = cmd->cmd_op; int ret = -EINVAL; - if (issue_flags & IO_URING_F_NONBLOCK) + if (ublk_ctrl_uring_cmd_may_sleep(cmd_op) && + issue_flags & IO_URING_F_NONBLOCK) return -EAGAIN; ublk_ctrl_cmd_dump(cmd); diff --git a/drivers/hid/hid-asus.c b/drivers/hid/hid-asus.c index a444d41e53b6..472bca54642b 100644 --- a/drivers/hid/hid-asus.c +++ b/drivers/hid/hid-asus.c @@ -27,6 +27,7 @@ #include <linux/hid.h> #include <linux/module.h> #include <linux/platform_data/x86/asus-wmi.h> +#include <linux/platform_data/x86/asus-wmi-leds-ids.h> #include <linux/input/mt.h> #include <linux/usb.h> /* For to_usb_interface for T100 touchpad intf check */ #include <linux/power_supply.h> diff --git a/drivers/i2c/algos/i2c-algo-pcf.c b/drivers/i2c/algos/i2c-algo-pcf.c index fd563e845d4b..a87ecea7f510 100644 --- a/drivers/i2c/algos/i2c-algo-pcf.c +++ b/drivers/i2c/algos/i2c-algo-pcf.c @@ -23,17 +23,8 @@ #include "i2c-algo-pcf.h" -#define DEB2(x) if (i2c_debug >= 2) x -#define DEB3(x) if (i2c_debug >= 3) x /* print several statistical values */ -#define DEBPROTO(x) if (i2c_debug >= 9) x; - /* debug the protocol by showing transferred bits */ #define DEF_TIMEOUT 16 -/* - * module parameters: - */ -static int i2c_debug; - /* setting states on the bus with the right timing: */ #define set_pcf(adap, ctl, val) adap->setpcf(adap->data, ctl, val) @@ -47,27 +38,21 @@ static int i2c_debug; static void i2c_start(struct i2c_algo_pcf_data *adap) { - DEBPROTO(printk(KERN_DEBUG "S ")); set_pcf(adap, 1, I2C_PCF_START); } static void i2c_repstart(struct i2c_algo_pcf_data *adap) { - DEBPROTO(printk(" Sr ")); set_pcf(adap, 1, I2C_PCF_REPSTART); } static void i2c_stop(struct i2c_algo_pcf_data *adap) { - DEBPROTO(printk("P\n")); set_pcf(adap, 1, I2C_PCF_STOP); } static void handle_lab(struct i2c_algo_pcf_data *adap, const int *status) { - DEB2(printk(KERN_INFO - "i2c-algo-pcf.o: lost arbitration (CSR 0x%02x)\n", - *status)); /* * Cleanup from LAB -- reset and enable ESO. * This resets the PCF8584; since we've lost the bus, no @@ -88,9 +73,6 @@ static void handle_lab(struct i2c_algo_pcf_data *adap, const int *status) if (adap->lab_mdelay) mdelay(adap->lab_mdelay); - DEB2(printk(KERN_INFO - "i2c-algo-pcf.o: reset LAB condition (CSR 0x%02x)\n", - get_pcf(adap, 1))); } static int wait_for_bb(struct i2c_algo_pcf_data *adap) @@ -147,56 +129,48 @@ static int wait_for_pin(struct i2c_algo_pcf_data *adap, int *status) * * vdovikin: added detect code for PCF8584 */ -static int pcf_init_8584 (struct i2c_algo_pcf_data *adap) +static int pcf_init_8584(struct i2c_algo_pcf_data *adap) { unsigned char temp; - DEB3(printk(KERN_DEBUG "i2c-algo-pcf.o: PCF state 0x%02x\n", - get_pcf(adap, 1))); - /* S1=0x80: S0 selected, serial interface off */ set_pcf(adap, 1, I2C_PCF_PIN); /* * check to see S1 now used as R/W ctrl - * PCF8584 does that when ESO is zero */ - if (((temp = get_pcf(adap, 1)) & 0x7f) != (0)) { - DEB2(printk(KERN_ERR "i2c-algo-pcf.o: PCF detection failed -- can't select S0 (0x%02x).\n", temp)); + temp = get_pcf(adap, 1); + if ((temp & 0x7f) != 0) return -ENXIO; /* definitely not PCF8584 */ - } /* load own address in S0, effective address is (own << 1) */ i2c_outb(adap, get_own(adap)); /* check it's really written */ - if ((temp = i2c_inb(adap)) != get_own(adap)) { - DEB2(printk(KERN_ERR "i2c-algo-pcf.o: PCF detection failed -- can't set S0 (0x%02x).\n", temp)); + temp = i2c_inb(adap); + if (temp != get_own(adap)) return -ENXIO; - } /* S1=0xA0, next byte in S2 */ set_pcf(adap, 1, I2C_PCF_PIN | I2C_PCF_ES1); /* check to see S2 now selected */ - if (((temp = get_pcf(adap, 1)) & 0x7f) != I2C_PCF_ES1) { - DEB2(printk(KERN_ERR "i2c-algo-pcf.o: PCF detection failed -- can't select S2 (0x%02x).\n", temp)); + temp = get_pcf(adap, 1); + if ((temp & 0x7f) != I2C_PCF_ES1) return -ENXIO; - } /* load clock register S2 */ i2c_outb(adap, get_clock(adap)); /* check it's really written, the only 5 lowest bits does matter */ - if (((temp = i2c_inb(adap)) & 0x1f) != get_clock(adap)) { - DEB2(printk(KERN_ERR "i2c-algo-pcf.o: PCF detection failed -- can't set S2 (0x%02x).\n", temp)); + temp = i2c_inb(adap); + if ((temp & 0x1f) != get_clock(adap)) return -ENXIO; - } /* Enable serial interface, idle, S0 selected */ set_pcf(adap, 1, I2C_PCF_IDLE); /* check to see PCF is really idled and we can access status register */ - if ((temp = get_pcf(adap, 1)) != (I2C_PCF_PIN | I2C_PCF_BB)) { - DEB2(printk(KERN_ERR "i2c-algo-pcf.o: PCF detection failed -- can't select S1` (0x%02x).\n", temp)); + temp = get_pcf(adap, 1); + if (temp != (I2C_PCF_PIN | I2C_PCF_BB)) return -ENXIO; - } printk(KERN_DEBUG "i2c-algo-pcf.o: detected and initialized PCF8584.\n"); @@ -209,9 +183,7 @@ static int pcf_sendbytes(struct i2c_adapter *i2c_adap, const char *buf, struct i2c_algo_pcf_data *adap = i2c_adap->algo_data; int wrcount, status, timeout; - for (wrcount=0; wrcount<count; ++wrcount) { - DEB2(dev_dbg(&i2c_adap->dev, "i2c_write: writing %2.2X\n", - buf[wrcount] & 0xff)); + for (wrcount = 0; wrcount < count; ++wrcount) { i2c_outb(adap, buf[wrcount]); timeout = wait_for_pin(adap, &status); if (timeout) { @@ -246,7 +218,8 @@ static int pcf_readbytes(struct i2c_adapter *i2c_adap, char *buf, /* increment number of bytes to read by one -- read dummy byte */ for (i = 0; i <= count; i++) { - if ((wfp = wait_for_pin(adap, &status))) { + wfp = wait_for_pin(adap, &status); + if (wfp) { if (wfp == -EINTR) return -EINTR; /* arbitration lost */ @@ -280,7 +253,7 @@ static int pcf_readbytes(struct i2c_adapter *i2c_adap, char *buf, } -static int pcf_doAddress(struct i2c_algo_pcf_data *adap, +static void pcf_send_address(struct i2c_algo_pcf_data *adap, struct i2c_msg *msg) { unsigned char addr = i2c_8bit_addr_from_msg(msg); @@ -288,8 +261,6 @@ static int pcf_doAddress(struct i2c_algo_pcf_data *adap, if (msg->flags & I2C_M_REV_DIR_ADDR) addr ^= 1; i2c_outb(adap, addr); - - return 0; } static int pcf_xfer(struct i2c_adapter *i2c_adap, @@ -299,7 +270,7 @@ static int pcf_xfer(struct i2c_adapter *i2c_adap, struct i2c_algo_pcf_data *adap = i2c_adap->algo_data; struct i2c_msg *pmsg; int i; - int ret=0, timeout, status; + int timeout, status; if (adap->xfer_begin) adap->xfer_begin(adap->data); @@ -307,20 +278,15 @@ static int pcf_xfer(struct i2c_adapter *i2c_adap, /* Check for bus busy */ timeout = wait_for_bb(adap); if (timeout) { - DEB2(printk(KERN_ERR "i2c-algo-pcf.o: " - "Timeout waiting for BB in pcf_xfer\n");) i = -EIO; goto out; } - for (i = 0;ret >= 0 && i < num; i++) { - pmsg = &msgs[i]; - - DEB2(printk(KERN_DEBUG "i2c-algo-pcf.o: Doing %s %d bytes to 0x%02x - %d of %d messages\n", - str_read_write(pmsg->flags & I2C_M_RD), - pmsg->len, pmsg->addr, i + 1, num);) + for (i = 0; i < num; i++) { + int ret; - ret = pcf_doAddress(adap, pmsg); + pmsg = &msgs[i]; + pcf_send_address(adap, pmsg); /* Send START */ if (i == 0) @@ -335,8 +301,6 @@ static int pcf_xfer(struct i2c_adapter *i2c_adap, goto out; } i2c_stop(adap); - DEB2(printk(KERN_ERR "i2c-algo-pcf.o: Timeout waiting " - "for PIN(1) in pcf_xfer\n");) i = -EREMOTEIO; goto out; } @@ -344,35 +308,21 @@ static int pcf_xfer(struct i2c_adapter *i2c_adap, /* Check LRB (last rcvd bit - slave ack) */ if (status & I2C_PCF_LRB) { i2c_stop(adap); - DEB2(printk(KERN_ERR "i2c-algo-pcf.o: No LRB(1) in pcf_xfer\n");) i = -EREMOTEIO; goto out; } - DEB3(printk(KERN_DEBUG "i2c-algo-pcf.o: Msg %d, addr=0x%x, flags=0x%x, len=%d\n", - i, msgs[i].addr, msgs[i].flags, msgs[i].len);) if (pmsg->flags & I2C_M_RD) { ret = pcf_readbytes(i2c_adap, pmsg->buf, pmsg->len, (i + 1 == num)); - - if (ret != pmsg->len) { - DEB2(printk(KERN_DEBUG "i2c-algo-pcf.o: fail: " - "only read %d bytes.\n",ret)); - } else { - DEB2(printk(KERN_DEBUG "i2c-algo-pcf.o: read %d bytes.\n",ret)); - } } else { ret = pcf_sendbytes(i2c_adap, pmsg->buf, pmsg->len, (i + 1 == num)); - - if (ret != pmsg->len) { - DEB2(printk(KERN_DEBUG "i2c-algo-pcf.o: fail: " - "only wrote %d bytes.\n",ret)); - } else { - DEB2(printk(KERN_DEBUG "i2c-algo-pcf.o: wrote %d bytes.\n",ret)); - } } + + if (ret < 0) + goto out; } out: @@ -401,12 +351,11 @@ int i2c_pcf_add_bus(struct i2c_adapter *adap) struct i2c_algo_pcf_data *pcf_adap = adap->algo_data; int rval; - DEB2(dev_dbg(&adap->dev, "hw routines registered.\n")); - /* register new adapter to i2c module... */ adap->algo = &pcf_algo; - if ((rval = pcf_init_8584(pcf_adap))) + rval = pcf_init_8584(pcf_adap); + if (rval) return rval; rval = i2c_add_adapter(adap); @@ -418,7 +367,3 @@ EXPORT_SYMBOL(i2c_pcf_add_bus); MODULE_AUTHOR("Hans Berglund <hb@spacetec.no>"); MODULE_DESCRIPTION("I2C-Bus PCF8584 algorithm"); MODULE_LICENSE("GPL"); - -module_param(i2c_debug, int, S_IRUGO | S_IWUSR); -MODULE_PARM_DESC(i2c_debug, - "debug level - 0 off; 1 normal; 2,3 more verbose; 9 pcf-protocol"); diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index fd81e49638aa..cea87fcb4a1a 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig @@ -166,6 +166,7 @@ config I2C_I801 Arrow Lake (SOC) Panther Lake (SOC) Wildcat Lake (SOC) + Diamond Rapids (SOC) This driver can also be built as a module. If so, the module will be called i2c-i801. @@ -1474,7 +1475,7 @@ config I2C_ACORN config I2C_ELEKTOR tristate "Elektor ISA card" - depends on ISA && HAS_IOPORT_MAP && BROKEN_ON_SMP + depends on ISA && HAS_IOPORT_MAP select I2C_ALGOPCF help This supports the PCF8584 ISA bus I2C adapter. Say Y if you own diff --git a/drivers/i2c/busses/i2c-amd-mp2-pci.c b/drivers/i2c/busses/i2c-amd-mp2-pci.c index ef7370d3dbea..60edbabc2986 100644 --- a/drivers/i2c/busses/i2c-amd-mp2-pci.c +++ b/drivers/i2c/busses/i2c-amd-mp2-pci.c @@ -458,13 +458,16 @@ struct amd_mp2_dev *amd_mp2_find_device(void) { struct device *dev; struct pci_dev *pci_dev; + struct amd_mp2_dev *mp2_dev; dev = driver_find_next_device(&amd_mp2_pci_driver.driver, NULL); if (!dev) return NULL; pci_dev = to_pci_dev(dev); - return (struct amd_mp2_dev *)pci_get_drvdata(pci_dev); + mp2_dev = (struct amd_mp2_dev *)pci_get_drvdata(pci_dev); + put_device(dev); + return mp2_dev; } EXPORT_SYMBOL_GPL(amd_mp2_find_device); diff --git a/drivers/i2c/busses/i2c-bcm2835.c b/drivers/i2c/busses/i2c-bcm2835.c index 8554e790f8e3..0d7e2654a534 100644 --- a/drivers/i2c/busses/i2c-bcm2835.c +++ b/drivers/i2c/busses/i2c-bcm2835.c @@ -137,12 +137,14 @@ static int clk_bcm2835_i2c_set_rate(struct clk_hw *hw, unsigned long rate, return 0; } -static long clk_bcm2835_i2c_round_rate(struct clk_hw *hw, unsigned long rate, - unsigned long *parent_rate) +static int clk_bcm2835_i2c_determine_rate(struct clk_hw *hw, + struct clk_rate_request *req) { - u32 divider = clk_bcm2835_i2c_calc_divider(rate, *parent_rate); + u32 divider = clk_bcm2835_i2c_calc_divider(req->rate, req->best_parent_rate); - return DIV_ROUND_UP(*parent_rate, divider); + req->rate = DIV_ROUND_UP(req->best_parent_rate, divider); + + return 0; } static unsigned long clk_bcm2835_i2c_recalc_rate(struct clk_hw *hw, @@ -156,7 +158,7 @@ static unsigned long clk_bcm2835_i2c_recalc_rate(struct clk_hw *hw, static const struct clk_ops clk_bcm2835_i2c_ops = { .set_rate = clk_bcm2835_i2c_set_rate, - .round_rate = clk_bcm2835_i2c_round_rate, + .determine_rate = clk_bcm2835_i2c_determine_rate, .recalc_rate = clk_bcm2835_i2c_recalc_rate, }; diff --git a/drivers/i2c/busses/i2c-designware-core.h b/drivers/i2c/busses/i2c-designware-core.h index 347843b4f5dd..bb5ce0a382f9 100644 --- a/drivers/i2c/busses/i2c-designware-core.h +++ b/drivers/i2c/busses/i2c-designware-core.h @@ -78,6 +78,7 @@ #define DW_IC_TX_ABRT_SOURCE 0x80 #define DW_IC_ENABLE_STATUS 0x9c #define DW_IC_CLR_RESTART_DET 0xa8 +#define DW_IC_SMBUS_INTR_MASK 0xcc #define DW_IC_COMP_PARAM_1 0xf4 #define DW_IC_COMP_VERSION 0xf8 #define DW_IC_SDA_HOLD_MIN_VERS 0x3131312A /* "111*" == v1.11* */ @@ -330,7 +331,6 @@ struct dw_i2c_dev { struct i2c_dw_semaphore_callbacks { int (*probe)(struct dw_i2c_dev *dev); - void (*remove)(struct dw_i2c_dev *dev); }; int i2c_dw_init_regmap(struct dw_i2c_dev *dev); diff --git a/drivers/i2c/busses/i2c-designware-master.c b/drivers/i2c/busses/i2c-designware-master.c index 41e9b5ecad20..45bfca05bb30 100644 --- a/drivers/i2c/busses/i2c-designware-master.c +++ b/drivers/i2c/busses/i2c-designware-master.c @@ -220,6 +220,13 @@ static int i2c_dw_init_master(struct dw_i2c_dev *dev) /* Disable the adapter */ __i2c_dw_disable(dev); + /* + * Mask SMBus interrupts to block storms from broken + * firmware that leaves IC_SMBUS=1; the handler never + * services them. + */ + regmap_write(dev->map, DW_IC_SMBUS_INTR_MASK, 0); + /* Write standard speed timing parameters */ regmap_write(dev->map, DW_IC_SS_SCL_HCNT, dev->ss_hcnt); regmap_write(dev->map, DW_IC_SS_SCL_LCNT, dev->ss_lcnt); diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c index 34d881572351..7be99656a67d 100644 --- a/drivers/i2c/busses/i2c-designware-platdrv.c +++ b/drivers/i2c/busses/i2c-designware-platdrv.c @@ -197,15 +197,6 @@ static int i2c_dw_probe_lock_support(struct dw_i2c_dev *dev) return 0; } -static void i2c_dw_remove_lock_support(struct dw_i2c_dev *dev) -{ - if (dev->semaphore_idx < 0) - return; - - if (i2c_dw_semaphore_cb_table[dev->semaphore_idx].remove) - i2c_dw_semaphore_cb_table[dev->semaphore_idx].remove(dev); -} - static int dw_i2c_plat_probe(struct platform_device *pdev) { u32 flags = (uintptr_t)device_get_match_data(&pdev->dev); @@ -248,7 +239,7 @@ static int dw_i2c_plat_probe(struct platform_device *pdev) ret = i2c_dw_probe_lock_support(dev); if (ret) { - ret = dev_err_probe(device, ret, "failed to probe lock support\n"); + dev_err_probe(device, ret, "failed to probe lock support\n"); goto exit_reset; } @@ -339,8 +330,6 @@ static void dw_i2c_plat_remove(struct platform_device *pdev) i2c_dw_prepare_clk(dev, false); - i2c_dw_remove_lock_support(dev); - reset_control_assert(dev->rst); } diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c index 57fbec1259be..81e6e2d7ad3d 100644 --- a/drivers/i2c/busses/i2c-i801.c +++ b/drivers/i2c/busses/i2c-i801.c @@ -84,6 +84,7 @@ * Panther Lake-H (SOC) 0xe322 32 hard yes yes yes * Panther Lake-P (SOC) 0xe422 32 hard yes yes yes * Wildcat Lake-U (SOC) 0x4d22 32 hard yes yes yes + * Diamond Rapids (SOC) 0x5827 32 hard yes yes yes * * Features supported by this driver: * Software PEC no @@ -242,6 +243,7 @@ #define PCI_DEVICE_ID_INTEL_ALDER_LAKE_P_SMBUS 0x51a3 #define PCI_DEVICE_ID_INTEL_ALDER_LAKE_M_SMBUS 0x54a3 #define PCI_DEVICE_ID_INTEL_BIRCH_STREAM_SMBUS 0x5796 +#define PCI_DEVICE_ID_INTEL_DIAMOND_RAPIDS_SMBUS 0x5827 #define PCI_DEVICE_ID_INTEL_BROXTON_SMBUS 0x5ad4 #define PCI_DEVICE_ID_INTEL_ARROW_LAKE_H_SMBUS 0x7722 #define PCI_DEVICE_ID_INTEL_RAPTOR_LAKE_S_SMBUS 0x7a23 @@ -1054,6 +1056,7 @@ static const struct pci_device_id i801_ids[] = { { PCI_DEVICE_DATA(INTEL, METEOR_LAKE_SOC_S_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) }, { PCI_DEVICE_DATA(INTEL, METEOR_LAKE_PCH_S_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) }, { PCI_DEVICE_DATA(INTEL, BIRCH_STREAM_SMBUS, FEATURES_ICH5) }, + { PCI_DEVICE_DATA(INTEL, DIAMOND_RAPIDS_SMBUS, FEATURES_ICH5) }, { PCI_DEVICE_DATA(INTEL, ARROW_LAKE_H_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) }, { PCI_DEVICE_DATA(INTEL, PANTHER_LAKE_H_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) }, { PCI_DEVICE_DATA(INTEL, PANTHER_LAKE_P_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) }, diff --git a/drivers/i2c/busses/i2c-k1.c b/drivers/i2c/busses/i2c-k1.c index 6b918770e612..d42c03ef5db5 100644 --- a/drivers/i2c/busses/i2c-k1.c +++ b/drivers/i2c/busses/i2c-k1.c @@ -158,11 +158,16 @@ static int spacemit_i2c_handle_err(struct spacemit_i2c_dev *i2c) { dev_dbg(i2c->dev, "i2c error status: 0x%08x\n", i2c->status); - if (i2c->status & (SPACEMIT_SR_BED | SPACEMIT_SR_ALD)) { + /* Arbitration Loss Detected */ + if (i2c->status & SPACEMIT_SR_ALD) { spacemit_i2c_reset(i2c); return -EAGAIN; } + /* Bus Error No ACK/NAK */ + if (i2c->status & SPACEMIT_SR_BED) + spacemit_i2c_reset(i2c); + return i2c->status & SPACEMIT_SR_ACKNAK ? -ENXIO : -EIO; } @@ -224,6 +229,12 @@ static void spacemit_i2c_check_bus_release(struct spacemit_i2c_dev *i2c) } } +static inline void +spacemit_i2c_clear_int_status(struct spacemit_i2c_dev *i2c, u32 mask) +{ + writel(mask & SPACEMIT_I2C_INT_STATUS_MASK, i2c->base + SPACEMIT_ISR); +} + static void spacemit_i2c_init(struct spacemit_i2c_dev *i2c) { u32 val; @@ -267,12 +278,8 @@ static void spacemit_i2c_init(struct spacemit_i2c_dev *i2c) val = readl(i2c->base + SPACEMIT_IRCR); val |= SPACEMIT_RCR_SDA_GLITCH_NOFIX; writel(val, i2c->base + SPACEMIT_IRCR); -} -static inline void -spacemit_i2c_clear_int_status(struct spacemit_i2c_dev *i2c, u32 mask) -{ - writel(mask & SPACEMIT_I2C_INT_STATUS_MASK, i2c->base + SPACEMIT_ISR); + spacemit_i2c_clear_int_status(i2c, SPACEMIT_I2C_INT_STATUS_MASK); } static void spacemit_i2c_start(struct spacemit_i2c_dev *i2c) diff --git a/drivers/i2c/busses/i2c-qcom-cci.c b/drivers/i2c/busses/i2c-qcom-cci.c index e631d79baf14..884055df1560 100644 --- a/drivers/i2c/busses/i2c-qcom-cci.c +++ b/drivers/i2c/busses/i2c-qcom-cci.c @@ -783,8 +783,54 @@ static const struct cci_data cci_v2_data = { }, }; +static const struct cci_data cci_msm8953_data = { + .num_masters = 2, + .queue_size = { 64, 16 }, + .quirks = { + .max_write_len = 11, + .max_read_len = 12, + }, + .params[I2C_MODE_STANDARD] = { + .thigh = 78, + .tlow = 114, + .tsu_sto = 28, + .tsu_sta = 28, + .thd_dat = 10, + .thd_sta = 77, + .tbuf = 118, + .scl_stretch_en = 0, + .trdhld = 6, + .tsp = 1 + }, + .params[I2C_MODE_FAST] = { + .thigh = 20, + .tlow = 28, + .tsu_sto = 21, + .tsu_sta = 21, + .thd_dat = 13, + .thd_sta = 18, + .tbuf = 32, + .scl_stretch_en = 0, + .trdhld = 6, + .tsp = 3 + }, + .params[I2C_MODE_FAST_PLUS] = { + .thigh = 16, + .tlow = 22, + .tsu_sto = 17, + .tsu_sta = 18, + .thd_dat = 16, + .thd_sta = 15, + .tbuf = 19, + .scl_stretch_en = 1, + .trdhld = 3, + .tsp = 3 + }, +}; + static const struct of_device_id cci_dt_match[] = { { .compatible = "qcom,msm8226-cci", .data = &cci_v1_data}, + { .compatible = "qcom,msm8953-cci", .data = &cci_msm8953_data}, { .compatible = "qcom,msm8974-cci", .data = &cci_v1_5_data}, { .compatible = "qcom,msm8996-cci", .data = &cci_v2_data}, diff --git a/drivers/i2c/busses/i2c-stm32.c b/drivers/i2c/busses/i2c-stm32.c index f84ec056e36d..becf8977979f 100644 --- a/drivers/i2c/busses/i2c-stm32.c +++ b/drivers/i2c/busses/i2c-stm32.c @@ -27,8 +27,8 @@ struct stm32_i2c_dma *stm32_i2c_dma_request(struct device *dev, if (IS_ERR(dma->chan_tx)) { ret = PTR_ERR(dma->chan_tx); if (ret != -ENODEV) - ret = dev_err_probe(dev, ret, - "can't request DMA tx channel\n"); + dev_err_probe(dev, ret, "can't request DMA tx channel\n"); + goto fail_al; } @@ -48,8 +48,7 @@ struct stm32_i2c_dma *stm32_i2c_dma_request(struct device *dev, if (IS_ERR(dma->chan_rx)) { ret = PTR_ERR(dma->chan_rx); if (ret != -ENODEV) - ret = dev_err_probe(dev, ret, - "can't request DMA rx channel\n"); + dev_err_probe(dev, ret, "can't request DMA rx channel\n"); goto fail_tx; } diff --git a/drivers/input/misc/qnap-mcu-input.c b/drivers/input/misc/qnap-mcu-input.c index 76e62f0816c1..3be899bfc114 100644 --- a/drivers/input/misc/qnap-mcu-input.c +++ b/drivers/input/misc/qnap-mcu-input.c @@ -103,7 +103,7 @@ static int qnap_mcu_input_probe(struct platform_device *pdev) input = devm_input_allocate_device(dev); if (!input) - return dev_err_probe(dev, -ENOMEM, "no memory for input device\n"); + return -ENOMEM; idev->input = input; idev->dev = dev; diff --git a/drivers/input/touchscreen/cyttsp5.c b/drivers/input/touchscreen/cyttsp5.c index 071b7c9bf566..47f4271395a6 100644 --- a/drivers/input/touchscreen/cyttsp5.c +++ b/drivers/input/touchscreen/cyttsp5.c @@ -923,8 +923,8 @@ static int cyttsp5_i2c_probe(struct i2c_client *client) regmap = devm_regmap_init_i2c(client, &config); if (IS_ERR(regmap)) { - dev_err(&client->dev, "regmap allocation failed: %ld\n", - PTR_ERR(regmap)); + dev_err(&client->dev, "regmap allocation failed: %pe\n", + regmap); return PTR_ERR(regmap); } diff --git a/drivers/input/touchscreen/ti_am335x_tsc.c b/drivers/input/touchscreen/ti_am335x_tsc.c index 93d659ff90aa..d6edfab16770 100644 --- a/drivers/input/touchscreen/ti_am335x_tsc.c +++ b/drivers/input/touchscreen/ti_am335x_tsc.c @@ -389,6 +389,10 @@ static int titsc_parse_dt(struct platform_device *pdev, dev_warn(&pdev->dev, "invalid co-ordinate readouts, resetting it to 5\n"); ts_dev->coordinate_readouts = 5; + } else if (ts_dev->coordinate_readouts > 6) { + dev_warn(&pdev->dev, + "co-ordinate readouts too large, limiting to 6\n"); + ts_dev->coordinate_readouts = 6; } err = of_property_read_u32(node, "ti,charge-delay", diff --git a/drivers/input/touchscreen/zforce_ts.c b/drivers/input/touchscreen/zforce_ts.c index df42fdf36ae3..a360749fa076 100644 --- a/drivers/input/touchscreen/zforce_ts.c +++ b/drivers/input/touchscreen/zforce_ts.c @@ -747,8 +747,7 @@ static int zforce_probe(struct i2c_client *client) input_dev = devm_input_allocate_device(&client->dev); if (!input_dev) - return dev_err_probe(&client->dev, -ENOMEM, - "could not allocate input device\n"); + return -ENOMEM; ts->client = client; ts->input = input_dev; diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig index 104aa5355090..239c1744a926 100644 --- a/drivers/md/Kconfig +++ b/drivers/md/Kconfig @@ -299,6 +299,7 @@ config DM_CRYPT select CRYPTO select CRYPTO_CBC select CRYPTO_ESSIV + select CRYPTO_LIB_MD5 # needed by lmk IV mode help This device-mapper target allows you to create a device that transparently encrypts the data on it. You'll need to activate @@ -546,6 +547,7 @@ config DM_VERITY depends on BLK_DEV_DM select CRYPTO select CRYPTO_HASH + select CRYPTO_LIB_SHA256 select DM_BUFIO help This device-mapper target creates a read-only device that diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c index af345dc6fde1..82fdea7dea7a 100644 --- a/drivers/md/bcache/request.c +++ b/drivers/md/bcache/request.c @@ -1104,7 +1104,7 @@ static void detached_dev_end_io(struct bio *bio) } kfree(ddip); - bio->bi_end_io(bio); + bio_endio(bio); } static void detached_dev_do_request(struct bcache_device *d, struct bio *bio, @@ -1121,7 +1121,7 @@ static void detached_dev_do_request(struct bcache_device *d, struct bio *bio, ddip = kzalloc(sizeof(struct detached_dev_io_private), GFP_NOIO); if (!ddip) { bio->bi_status = BLK_STS_RESOURCE; - bio->bi_end_io(bio); + bio_endio(bio); return; } @@ -1136,7 +1136,7 @@ static void detached_dev_do_request(struct bcache_device *d, struct bio *bio, if ((bio_op(bio) == REQ_OP_DISCARD) && !bdev_max_discard_sectors(dc->bdev)) - bio->bi_end_io(bio); + detached_dev_end_io(bio); else submit_bio_noacct(bio); } diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c index e6d28be11c5c..5235f3e4924b 100644 --- a/drivers/md/dm-bufio.c +++ b/drivers/md/dm-bufio.c @@ -1374,7 +1374,7 @@ static void submit_io(struct dm_buffer *b, enum req_op op, unsigned short ioprio { unsigned int n_sectors; sector_t sector; - unsigned int offset, end; + unsigned int offset, end, align; b->end_io = end_io; @@ -1388,9 +1388,11 @@ static void submit_io(struct dm_buffer *b, enum req_op op, unsigned short ioprio b->c->write_callback(b); offset = b->write_start; end = b->write_end; - offset &= -DM_BUFIO_WRITE_ALIGN; - end += DM_BUFIO_WRITE_ALIGN - 1; - end &= -DM_BUFIO_WRITE_ALIGN; + align = max(DM_BUFIO_WRITE_ALIGN, + bdev_physical_block_size(b->c->bdev)); + offset &= -align; + end += align - 1; + end &= -align; if (unlikely(end > b->c->block_size)) end = b->c->block_size; diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h index a3c9f74fe2dc..1cda8618d74d 100644 --- a/drivers/md/dm-core.h +++ b/drivers/md/dm-core.h @@ -139,7 +139,6 @@ struct mapped_device { struct srcu_struct io_barrier; #ifdef CONFIG_BLK_DEV_ZONED - unsigned int nr_zones; void *zone_revalidate_map; struct task_struct *revalidate_map_task; #endif diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 5ef43231fe77..79704fbc523b 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -21,6 +21,7 @@ #include <linux/mempool.h> #include <linux/slab.h> #include <linux/crypto.h> +#include <linux/fips.h> #include <linux/workqueue.h> #include <linux/kthread.h> #include <linux/backing-dev.h> @@ -120,7 +121,6 @@ struct iv_benbi_private { #define LMK_SEED_SIZE 64 /* hash + 0 */ struct iv_lmk_private { - struct crypto_shash *hash_tfm; u8 *seed; }; @@ -254,22 +254,15 @@ static unsigned int max_write_size = 0; module_param(max_write_size, uint, 0644); MODULE_PARM_DESC(max_write_size, "Maximum size of a write request"); -static unsigned get_max_request_sectors(struct dm_target *ti, struct bio *bio) +static unsigned get_max_request_sectors(struct dm_target *ti, struct bio *bio, bool no_split) { struct crypt_config *cc = ti->private; unsigned val, sector_align; bool wrt = op_is_write(bio_op(bio)); - if (wrt) { - /* - * For zoned devices, splitting write operations creates the - * risk of deadlocking queue freeze operations with zone write - * plugging BIO work when the reminder of a split BIO is - * issued. So always allow the entire BIO to proceed. - */ - if (ti->emulate_zone_append) - return bio_sectors(bio); - + if (no_split) { + val = -1; + } else if (wrt) { val = min_not_zero(READ_ONCE(max_write_size), DM_CRYPT_DEFAULT_MAX_WRITE_SIZE); } else { @@ -465,10 +458,6 @@ static void crypt_iv_lmk_dtr(struct crypt_config *cc) { struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk; - if (lmk->hash_tfm && !IS_ERR(lmk->hash_tfm)) - crypto_free_shash(lmk->hash_tfm); - lmk->hash_tfm = NULL; - kfree_sensitive(lmk->seed); lmk->seed = NULL; } @@ -483,11 +472,10 @@ static int crypt_iv_lmk_ctr(struct crypt_config *cc, struct dm_target *ti, return -EINVAL; } - lmk->hash_tfm = crypto_alloc_shash("md5", 0, - CRYPTO_ALG_ALLOCATES_MEMORY); - if (IS_ERR(lmk->hash_tfm)) { - ti->error = "Error initializing LMK hash"; - return PTR_ERR(lmk->hash_tfm); + if (fips_enabled) { + ti->error = "LMK support is disabled due to FIPS"; + /* ... because it uses MD5. */ + return -EINVAL; } /* No seed in LMK version 2 */ @@ -498,7 +486,6 @@ static int crypt_iv_lmk_ctr(struct crypt_config *cc, struct dm_target *ti, lmk->seed = kzalloc(LMK_SEED_SIZE, GFP_KERNEL); if (!lmk->seed) { - crypt_iv_lmk_dtr(cc); ti->error = "Error kmallocing seed storage in LMK"; return -ENOMEM; } @@ -514,7 +501,7 @@ static int crypt_iv_lmk_init(struct crypt_config *cc) /* LMK seed is on the position of LMK_KEYS + 1 key */ if (lmk->seed) memcpy(lmk->seed, cc->key + (cc->tfms_count * subkey_size), - crypto_shash_digestsize(lmk->hash_tfm)); + MD5_DIGEST_SIZE); return 0; } @@ -529,55 +516,31 @@ static int crypt_iv_lmk_wipe(struct crypt_config *cc) return 0; } -static int crypt_iv_lmk_one(struct crypt_config *cc, u8 *iv, - struct dm_crypt_request *dmreq, - u8 *data) +static void crypt_iv_lmk_one(struct crypt_config *cc, u8 *iv, + struct dm_crypt_request *dmreq, u8 *data) { struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk; - SHASH_DESC_ON_STACK(desc, lmk->hash_tfm); - union { - struct md5_state md5state; - u8 state[CRYPTO_MD5_STATESIZE]; - } u; + struct md5_ctx ctx; __le32 buf[4]; - int i, r; - desc->tfm = lmk->hash_tfm; + md5_init(&ctx); - r = crypto_shash_init(desc); - if (r) - return r; - - if (lmk->seed) { - r = crypto_shash_update(desc, lmk->seed, LMK_SEED_SIZE); - if (r) - return r; - } + if (lmk->seed) + md5_update(&ctx, lmk->seed, LMK_SEED_SIZE); /* Sector is always 512B, block size 16, add data of blocks 1-31 */ - r = crypto_shash_update(desc, data + 16, 16 * 31); - if (r) - return r; + md5_update(&ctx, data + 16, 16 * 31); /* Sector is cropped to 56 bits here */ buf[0] = cpu_to_le32(dmreq->iv_sector & 0xFFFFFFFF); buf[1] = cpu_to_le32((((u64)dmreq->iv_sector >> 32) & 0x00FFFFFF) | 0x80000000); buf[2] = cpu_to_le32(4024); buf[3] = 0; - r = crypto_shash_update(desc, (u8 *)buf, sizeof(buf)); - if (r) - return r; + md5_update(&ctx, (u8 *)buf, sizeof(buf)); /* No MD5 padding here */ - r = crypto_shash_export(desc, &u.md5state); - if (r) - return r; - - for (i = 0; i < MD5_HASH_WORDS; i++) - __cpu_to_le32s(&u.md5state.hash[i]); - memcpy(iv, &u.md5state.hash, cc->iv_size); - - return 0; + cpu_to_le32_array(ctx.state.h, ARRAY_SIZE(ctx.state.h)); + memcpy(iv, ctx.state.h, cc->iv_size); } static int crypt_iv_lmk_gen(struct crypt_config *cc, u8 *iv, @@ -585,17 +548,15 @@ static int crypt_iv_lmk_gen(struct crypt_config *cc, u8 *iv, { struct scatterlist *sg; u8 *src; - int r = 0; if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) { sg = crypt_get_sg_data(cc, dmreq->sg_in); src = kmap_local_page(sg_page(sg)); - r = crypt_iv_lmk_one(cc, iv, dmreq, src + sg->offset); + crypt_iv_lmk_one(cc, iv, dmreq, src + sg->offset); kunmap_local(src); } else memset(iv, 0, cc->iv_size); - - return r; + return 0; } static int crypt_iv_lmk_post(struct crypt_config *cc, u8 *iv, @@ -603,21 +564,19 @@ static int crypt_iv_lmk_post(struct crypt_config *cc, u8 *iv, { struct scatterlist *sg; u8 *dst; - int r; if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) return 0; sg = crypt_get_sg_data(cc, dmreq->sg_out); dst = kmap_local_page(sg_page(sg)); - r = crypt_iv_lmk_one(cc, iv, dmreq, dst + sg->offset); + crypt_iv_lmk_one(cc, iv, dmreq, dst + sg->offset); /* Tweak the first block of plaintext sector */ - if (!r) - crypto_xor(dst + sg->offset, iv, cc->iv_size); + crypto_xor(dst + sg->offset, iv, cc->iv_size); kunmap_local(dst); - return r; + return 0; } static void crypt_iv_tcw_dtr(struct crypt_config *cc) @@ -1781,7 +1740,7 @@ static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone) bio_for_each_folio_all(fi, clone) { if (folio_test_large(fi.folio)) { percpu_counter_sub(&cc->n_allocated_pages, - 1 << folio_order(fi.folio)); + folio_nr_pages(fi.folio)); folio_put(fi.folio); } else { mempool_free(&fi.folio->page, &cc->page_pool); @@ -3496,6 +3455,7 @@ static int crypt_map(struct dm_target *ti, struct bio *bio) struct dm_crypt_io *io; struct crypt_config *cc = ti->private; unsigned max_sectors; + bool no_split; /* * If bio is REQ_PREFLUSH or REQ_OP_DISCARD, just bypass crypt queues. @@ -3513,10 +3473,20 @@ static int crypt_map(struct dm_target *ti, struct bio *bio) /* * Check if bio is too large, split as needed. + * + * For zoned devices, splitting write operations creates the + * risk of deadlocking queue freeze operations with zone write + * plugging BIO work when the reminder of a split BIO is + * issued. So always allow the entire BIO to proceed. */ - max_sectors = get_max_request_sectors(ti, bio); - if (unlikely(bio_sectors(bio) > max_sectors)) + no_split = (ti->emulate_zone_append && op_is_write(bio_op(bio))) || + (bio->bi_opf & REQ_ATOMIC); + max_sectors = get_max_request_sectors(ti, bio, no_split); + if (unlikely(bio_sectors(bio) > max_sectors)) { + if (unlikely(no_split)) + return DM_MAPIO_KILL; dm_accept_partial_bio(bio, max_sectors); + } /* * Ensure that bio is a multiple of internal sector encryption size @@ -3762,15 +3732,20 @@ static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits) if (ti->emulate_zone_append) limits->max_hw_sectors = min(limits->max_hw_sectors, BIO_MAX_VECS << PAGE_SECTORS_SHIFT); + + limits->atomic_write_hw_unit_max = min(limits->atomic_write_hw_unit_max, + BIO_MAX_VECS << PAGE_SHIFT); + limits->atomic_write_hw_max = min(limits->atomic_write_hw_max, + BIO_MAX_VECS << PAGE_SHIFT); } static struct target_type crypt_target = { .name = "crypt", - .version = {1, 28, 0}, + .version = {1, 29, 0}, .module = THIS_MODULE, .ctr = crypt_ctr, .dtr = crypt_dtr, - .features = DM_TARGET_ZONED_HM, + .features = DM_TARGET_ZONED_HM | DM_TARGET_ATOMIC_WRITES, .report_zones = crypt_report_zones, .map = crypt_map, .status = crypt_status, diff --git a/drivers/md/dm-ebs-target.c b/drivers/md/dm-ebs-target.c index 6abb31ca9662..b354e74a670e 100644 --- a/drivers/md/dm-ebs-target.c +++ b/drivers/md/dm-ebs-target.c @@ -103,7 +103,7 @@ static int __ebs_rw_bvec(struct ebs_c *ec, enum req_op op, struct bio_vec *bv, } else { flush_dcache_page(bv->bv_page); memcpy(ba, pa, cur_len); - dm_bufio_mark_partial_buffer_dirty(b, buf_off, buf_off + cur_len); + dm_bufio_mark_buffer_dirty(b); } dm_bufio_release(b); diff --git a/drivers/md/dm-exception-store.h b/drivers/md/dm-exception-store.h index b67976637538..061b4d310813 100644 --- a/drivers/md/dm-exception-store.h +++ b/drivers/md/dm-exception-store.h @@ -29,7 +29,7 @@ typedef sector_t chunk_t; * chunk within the device. */ struct dm_exception { - struct hlist_bl_node hash_list; + struct hlist_node hash_list; chunk_t old_chunk; chunk_t new_chunk; diff --git a/drivers/md/dm-log-writes.c b/drivers/md/dm-log-writes.c index 7bb7174f8f4f..f0c84e7a5daa 100644 --- a/drivers/md/dm-log-writes.c +++ b/drivers/md/dm-log-writes.c @@ -432,6 +432,7 @@ static int log_writes_kthread(void *arg) struct log_writes_c *lc = arg; sector_t sector = 0; + set_freezable(); while (!kthread_should_stop()) { bool super = false; bool logging_enabled; diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index aaf4a0a4b0eb..c18358271618 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c @@ -131,7 +131,7 @@ static void queue_if_no_path_timeout_work(struct timer_list *t); #define MPATHF_QUEUE_IO 0 /* Must we queue all I/O? */ #define MPATHF_QUEUE_IF_NO_PATH 1 /* Queue I/O if last path fails? */ #define MPATHF_SAVED_QUEUE_IF_NO_PATH 2 /* Saved state during suspension */ -#define MPATHF_RETAIN_ATTACHED_HW_HANDLER 3 /* If there's already a hw_handler present, don't change it. */ +/* MPATHF_RETAIN_ATTACHED_HW_HANDLER no longer has any effect */ #define MPATHF_PG_INIT_DISABLED 4 /* pg_init is not currently allowed */ #define MPATHF_PG_INIT_REQUIRED 5 /* pg_init needs calling? */ #define MPATHF_PG_INIT_DELAY_RETRY 6 /* Delay pg_init retry? */ @@ -237,16 +237,10 @@ static struct multipath *alloc_multipath(struct dm_target *ti) static int alloc_multipath_stage2(struct dm_target *ti, struct multipath *m) { - if (m->queue_mode == DM_TYPE_NONE) { + if (m->queue_mode == DM_TYPE_NONE) m->queue_mode = DM_TYPE_REQUEST_BASED; - } else if (m->queue_mode == DM_TYPE_BIO_BASED) { + else if (m->queue_mode == DM_TYPE_BIO_BASED) INIT_WORK(&m->process_queued_bios, process_queued_bios); - /* - * bio-based doesn't support any direct scsi_dh management; - * it just discovers if a scsi_dh is attached. - */ - set_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags); - } dm_table_set_type(ti->table, m->queue_mode); @@ -887,36 +881,30 @@ static int setup_scsi_dh(struct block_device *bdev, struct multipath *m, struct request_queue *q = bdev_get_queue(bdev); int r; - if (mpath_double_check_test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, m)) { -retain: - if (*attached_handler_name) { - /* - * Clear any hw_handler_params associated with a - * handler that isn't already attached. - */ - if (m->hw_handler_name && strcmp(*attached_handler_name, m->hw_handler_name)) { - kfree(m->hw_handler_params); - m->hw_handler_params = NULL; - } - - /* - * Reset hw_handler_name to match the attached handler - * - * NB. This modifies the table line to show the actual - * handler instead of the original table passed in. - */ - kfree(m->hw_handler_name); - m->hw_handler_name = *attached_handler_name; - *attached_handler_name = NULL; + if (*attached_handler_name) { + /* + * Clear any hw_handler_params associated with a + * handler that isn't already attached. + */ + if (m->hw_handler_name && strcmp(*attached_handler_name, + m->hw_handler_name)) { + kfree(m->hw_handler_params); + m->hw_handler_params = NULL; } + + /* + * Reset hw_handler_name to match the attached handler + * + * NB. This modifies the table line to show the actual + * handler instead of the original table passed in. + */ + kfree(m->hw_handler_name); + m->hw_handler_name = *attached_handler_name; + *attached_handler_name = NULL; } if (m->hw_handler_name) { r = scsi_dh_attach(q, m->hw_handler_name); - if (r == -EBUSY) { - DMINFO("retaining handler on device %pg", bdev); - goto retain; - } if (r < 0) { *error = "error attaching hardware handler"; return r; @@ -1138,7 +1126,7 @@ static int parse_features(struct dm_arg_set *as, struct multipath *m) } if (!strcasecmp(arg_name, "retain_attached_hw_handler")) { - set_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags); + /* no longer has any effect */ continue; } @@ -1823,7 +1811,6 @@ static void multipath_status(struct dm_target *ti, status_type_t type, DMEMIT("%u ", test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags) + (m->pg_init_retries > 0) * 2 + (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT) * 2 + - test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags) + (m->queue_mode != DM_TYPE_REQUEST_BASED) * 2); if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) @@ -1832,8 +1819,6 @@ static void multipath_status(struct dm_target *ti, status_type_t type, DMEMIT("pg_init_retries %u ", m->pg_init_retries); if (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT) DMEMIT("pg_init_delay_msecs %u ", m->pg_init_delay_msecs); - if (test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags)) - DMEMIT("retain_attached_hw_handler "); if (m->queue_mode != DM_TYPE_REQUEST_BASED) { switch (m->queue_mode) { case DM_TYPE_BIO_BASED: @@ -2307,7 +2292,7 @@ static struct target_type multipath_target = { .name = "multipath", .version = {1, 15, 0}, .features = DM_TARGET_SINGLETON | DM_TARGET_IMMUTABLE | - DM_TARGET_PASSES_INTEGRITY, + DM_TARGET_PASSES_INTEGRITY | DM_TARGET_ATOMIC_WRITES, .module = THIS_MODULE, .ctr = multipath_ctr, .dtr = multipath_dtr, diff --git a/drivers/md/dm-pcache/cache.c b/drivers/md/dm-pcache/cache.c index 698697a7a73c..534bf07b794f 100644 --- a/drivers/md/dm-pcache/cache.c +++ b/drivers/md/dm-pcache/cache.c @@ -10,7 +10,8 @@ struct kmem_cache *key_cache; static inline struct pcache_cache_info *get_cache_info_addr(struct pcache_cache *cache) { - return cache->cache_info_addr + cache->info_index; + return (struct pcache_cache_info *)((char *)cache->cache_info_addr + + (size_t)cache->info_index * PCACHE_CACHE_INFO_SIZE); } static void cache_info_write(struct pcache_cache *cache) @@ -21,10 +22,10 @@ static void cache_info_write(struct pcache_cache *cache) cache_info->header.crc = pcache_meta_crc(&cache_info->header, sizeof(struct pcache_cache_info)); + cache->info_index = (cache->info_index + 1) % PCACHE_META_INDEX_MAX; memcpy_flushcache(get_cache_info_addr(cache), cache_info, sizeof(struct pcache_cache_info)); - - cache->info_index = (cache->info_index + 1) % PCACHE_META_INDEX_MAX; + pmem_wmb(); } static void cache_info_init_default(struct pcache_cache *cache); @@ -49,6 +50,8 @@ static int cache_info_init(struct pcache_cache *cache, struct pcache_cache_optio return -EINVAL; } + cache->info_index = ((char *)cache_info_addr - (char *)cache->cache_info_addr) / PCACHE_CACHE_INFO_SIZE; + return 0; } @@ -93,10 +96,10 @@ void cache_pos_encode(struct pcache_cache *cache, pos_onmedia.header.seq = seq; pos_onmedia.header.crc = cache_pos_onmedia_crc(&pos_onmedia); + *index = (*index + 1) % PCACHE_META_INDEX_MAX; + memcpy_flushcache(pos_onmedia_addr, &pos_onmedia, sizeof(struct pcache_cache_pos_onmedia)); pmem_wmb(); - - *index = (*index + 1) % PCACHE_META_INDEX_MAX; } int cache_pos_decode(struct pcache_cache *cache, diff --git a/drivers/md/dm-pcache/cache_segment.c b/drivers/md/dm-pcache/cache_segment.c index f0b58980806e..9d92e2b067ed 100644 --- a/drivers/md/dm-pcache/cache_segment.c +++ b/drivers/md/dm-pcache/cache_segment.c @@ -26,11 +26,11 @@ static void cache_seg_info_write(struct pcache_cache_segment *cache_seg) seg_info->header.seq++; seg_info->header.crc = pcache_meta_crc(&seg_info->header, sizeof(struct pcache_segment_info)); + cache_seg->info_index = (cache_seg->info_index + 1) % PCACHE_META_INDEX_MAX; + seg_info_addr = get_seg_info_addr(cache_seg); memcpy_flushcache(seg_info_addr, seg_info, sizeof(struct pcache_segment_info)); pmem_wmb(); - - cache_seg->info_index = (cache_seg->info_index + 1) % PCACHE_META_INDEX_MAX; mutex_unlock(&cache_seg->info_lock); } @@ -56,7 +56,10 @@ static int cache_seg_info_load(struct pcache_cache_segment *cache_seg) ret = -EIO; goto out; } - cache_seg->info_index = cache_seg_info_addr - cache_seg_info_addr_base; + + cache_seg->info_index = + ((char *)cache_seg_info_addr - (char *)cache_seg_info_addr_base) / + PCACHE_SEG_INFO_SIZE; out: mutex_unlock(&cache_seg->info_lock); @@ -129,10 +132,10 @@ static void cache_seg_ctrl_write(struct pcache_cache_segment *cache_seg) cache_seg_gen.header.crc = pcache_meta_crc(&cache_seg_gen.header, sizeof(struct pcache_cache_seg_gen)); + cache_seg->gen_index = (cache_seg->gen_index + 1) % PCACHE_META_INDEX_MAX; + memcpy_flushcache(get_cache_seg_gen_addr(cache_seg), &cache_seg_gen, sizeof(struct pcache_cache_seg_gen)); pmem_wmb(); - - cache_seg->gen_index = (cache_seg->gen_index + 1) % PCACHE_META_INDEX_MAX; } static void cache_seg_ctrl_init(struct pcache_cache_segment *cache_seg) diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index c6f7129e43d3..4bacdc499984 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c @@ -2287,6 +2287,8 @@ static int super_init_validation(struct raid_set *rs, struct md_rdev *rdev) mddev->reshape_position = le64_to_cpu(sb->reshape_position); rs->raid_type = get_raid_type_by_ll(mddev->level, mddev->layout); + if (!rs->raid_type) + return -EINVAL; } } else { diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c index f40c18da4000..dbd148967de4 100644 --- a/drivers/md/dm-snap.c +++ b/drivers/md/dm-snap.c @@ -40,10 +40,15 @@ static const char dm_snapshot_merge_target_name[] = "snapshot-merge"; #define DM_TRACKED_CHUNK_HASH(x) ((unsigned long)(x) & \ (DM_TRACKED_CHUNK_HASH_SIZE - 1)) +struct dm_hlist_head { + struct hlist_head head; + spinlock_t lock; +}; + struct dm_exception_table { uint32_t hash_mask; unsigned int hash_shift; - struct hlist_bl_head *table; + struct dm_hlist_head *table; }; struct dm_snapshot { @@ -628,8 +633,8 @@ static uint32_t exception_hash(struct dm_exception_table *et, chunk_t chunk); /* Lock to protect access to the completed and pending exception hash tables. */ struct dm_exception_table_lock { - struct hlist_bl_head *complete_slot; - struct hlist_bl_head *pending_slot; + spinlock_t *complete_slot; + spinlock_t *pending_slot; }; static void dm_exception_table_lock_init(struct dm_snapshot *s, chunk_t chunk, @@ -638,20 +643,20 @@ static void dm_exception_table_lock_init(struct dm_snapshot *s, chunk_t chunk, struct dm_exception_table *complete = &s->complete; struct dm_exception_table *pending = &s->pending; - lock->complete_slot = &complete->table[exception_hash(complete, chunk)]; - lock->pending_slot = &pending->table[exception_hash(pending, chunk)]; + lock->complete_slot = &complete->table[exception_hash(complete, chunk)].lock; + lock->pending_slot = &pending->table[exception_hash(pending, chunk)].lock; } static void dm_exception_table_lock(struct dm_exception_table_lock *lock) { - hlist_bl_lock(lock->complete_slot); - hlist_bl_lock(lock->pending_slot); + spin_lock_nested(lock->complete_slot, 1); + spin_lock_nested(lock->pending_slot, 2); } static void dm_exception_table_unlock(struct dm_exception_table_lock *lock) { - hlist_bl_unlock(lock->pending_slot); - hlist_bl_unlock(lock->complete_slot); + spin_unlock(lock->pending_slot); + spin_unlock(lock->complete_slot); } static int dm_exception_table_init(struct dm_exception_table *et, @@ -661,13 +666,15 @@ static int dm_exception_table_init(struct dm_exception_table *et, et->hash_shift = hash_shift; et->hash_mask = size - 1; - et->table = kvmalloc_array(size, sizeof(struct hlist_bl_head), + et->table = kvmalloc_array(size, sizeof(struct dm_hlist_head), GFP_KERNEL); if (!et->table) return -ENOMEM; - for (i = 0; i < size; i++) - INIT_HLIST_BL_HEAD(et->table + i); + for (i = 0; i < size; i++) { + INIT_HLIST_HEAD(&et->table[i].head); + spin_lock_init(&et->table[i].lock); + } return 0; } @@ -675,16 +682,17 @@ static int dm_exception_table_init(struct dm_exception_table *et, static void dm_exception_table_exit(struct dm_exception_table *et, struct kmem_cache *mem) { - struct hlist_bl_head *slot; + struct dm_hlist_head *slot; struct dm_exception *ex; - struct hlist_bl_node *pos, *n; + struct hlist_node *pos; int i, size; size = et->hash_mask + 1; for (i = 0; i < size; i++) { slot = et->table + i; - hlist_bl_for_each_entry_safe(ex, pos, n, slot, hash_list) { + hlist_for_each_entry_safe(ex, pos, &slot->head, hash_list) { + hlist_del(&ex->hash_list); kmem_cache_free(mem, ex); cond_resched(); } @@ -700,7 +708,7 @@ static uint32_t exception_hash(struct dm_exception_table *et, chunk_t chunk) static void dm_remove_exception(struct dm_exception *e) { - hlist_bl_del(&e->hash_list); + hlist_del(&e->hash_list); } /* @@ -710,12 +718,11 @@ static void dm_remove_exception(struct dm_exception *e) static struct dm_exception *dm_lookup_exception(struct dm_exception_table *et, chunk_t chunk) { - struct hlist_bl_head *slot; - struct hlist_bl_node *pos; + struct hlist_head *slot; struct dm_exception *e; - slot = &et->table[exception_hash(et, chunk)]; - hlist_bl_for_each_entry(e, pos, slot, hash_list) + slot = &et->table[exception_hash(et, chunk)].head; + hlist_for_each_entry(e, slot, hash_list) if (chunk >= e->old_chunk && chunk <= e->old_chunk + dm_consecutive_chunk_count(e)) return e; @@ -762,18 +769,17 @@ static void free_pending_exception(struct dm_snap_pending_exception *pe) static void dm_insert_exception(struct dm_exception_table *eh, struct dm_exception *new_e) { - struct hlist_bl_head *l; - struct hlist_bl_node *pos; + struct hlist_head *l; struct dm_exception *e = NULL; - l = &eh->table[exception_hash(eh, new_e->old_chunk)]; + l = &eh->table[exception_hash(eh, new_e->old_chunk)].head; /* Add immediately if this table doesn't support consecutive chunks */ if (!eh->hash_shift) goto out; /* List is ordered by old_chunk */ - hlist_bl_for_each_entry(e, pos, l, hash_list) { + hlist_for_each_entry(e, l, hash_list) { /* Insert after an existing chunk? */ if (new_e->old_chunk == (e->old_chunk + dm_consecutive_chunk_count(e) + 1) && @@ -804,13 +810,13 @@ out: * Either the table doesn't support consecutive chunks or slot * l is empty. */ - hlist_bl_add_head(&new_e->hash_list, l); + hlist_add_head(&new_e->hash_list, l); } else if (new_e->old_chunk < e->old_chunk) { /* Add before an existing exception */ - hlist_bl_add_before(&new_e->hash_list, &e->hash_list); + hlist_add_before(&new_e->hash_list, &e->hash_list); } else { /* Add to l's tail: e is the last exception in this slot */ - hlist_bl_add_behind(&new_e->hash_list, &e->hash_list); + hlist_add_behind(&new_e->hash_list, &e->hash_list); } } @@ -820,7 +826,6 @@ out: */ static int dm_add_exception(void *context, chunk_t old, chunk_t new) { - struct dm_exception_table_lock lock; struct dm_snapshot *s = context; struct dm_exception *e; @@ -833,17 +838,7 @@ static int dm_add_exception(void *context, chunk_t old, chunk_t new) /* Consecutive_count is implicitly initialised to zero */ e->new_chunk = new; - /* - * Although there is no need to lock access to the exception tables - * here, if we don't then hlist_bl_add_head(), called by - * dm_insert_exception(), will complain about accessing the - * corresponding list without locking it first. - */ - dm_exception_table_lock_init(s, old, &lock); - - dm_exception_table_lock(&lock); dm_insert_exception(&s->complete, e); - dm_exception_table_unlock(&lock); return 0; } @@ -873,7 +868,7 @@ static int calc_max_buckets(void) /* use a fixed size of 2MB */ unsigned long mem = 2 * 1024 * 1024; - mem /= sizeof(struct hlist_bl_head); + mem /= sizeof(struct dm_hlist_head); return mem; } diff --git a/drivers/md/dm-sysfs.c b/drivers/md/dm-sysfs.c index bfaef27ca79f..22bc70923a83 100644 --- a/drivers/md/dm-sysfs.c +++ b/drivers/md/dm-sysfs.c @@ -86,17 +86,13 @@ static ssize_t dm_attr_uuid_show(struct mapped_device *md, char *buf) static ssize_t dm_attr_suspended_show(struct mapped_device *md, char *buf) { - sprintf(buf, "%d\n", dm_suspended_md(md)); - - return strlen(buf); + return sysfs_emit(buf, "%d\n", dm_suspended_md(md)); } static ssize_t dm_attr_use_blk_mq_show(struct mapped_device *md, char *buf) { /* Purely for userspace compatibility */ - sprintf(buf, "%d\n", true); - - return strlen(buf); + return sysfs_emit(buf, "%d\n", true); } static DM_ATTR_RO(name); diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index ad0a60a07b93..0522cd700e0e 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -2043,6 +2043,10 @@ bool dm_table_supports_size_change(struct dm_table *t, sector_t old_size, return true; } +/* + * This function will be skipped by noflush reloads of immutable request + * based devices (dm-mpath). + */ int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, struct queue_limits *limits) { diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index c84149ba4e38..52ffb495f5a8 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c @@ -395,13 +395,13 @@ static void begin_discard(struct discard_op *op, struct thin_c *tc, struct bio * op->bio = NULL; } -static int issue_discard(struct discard_op *op, dm_block_t data_b, dm_block_t data_e) +static void issue_discard(struct discard_op *op, dm_block_t data_b, dm_block_t data_e) { struct thin_c *tc = op->tc; sector_t s = block_to_sectors(tc->pool, data_b); sector_t len = block_to_sectors(tc->pool, data_e - data_b); - return __blkdev_issue_discard(tc->pool_dev->bdev, s, len, GFP_NOIO, &op->bio); + __blkdev_issue_discard(tc->pool_dev->bdev, s, len, GFP_NOIO, &op->bio); } static void end_discard(struct discard_op *op, int r) @@ -1113,9 +1113,7 @@ static void passdown_double_checking_shared_status(struct dm_thin_new_mapping *m break; } - r = issue_discard(&op, b, e); - if (r) - goto out; + issue_discard(&op, b, e); b = e; } @@ -1188,8 +1186,8 @@ static void process_prepared_discard_passdown_pt1(struct dm_thin_new_mapping *m) struct discard_op op; begin_discard(&op, tc, discard_parent); - r = issue_discard(&op, m->data_block, data_end); - end_discard(&op, r); + issue_discard(&op, m->data_block, data_end); + end_discard(&op, 0); } } @@ -4383,11 +4381,8 @@ static void thin_postsuspend(struct dm_target *ti) { struct thin_c *tc = ti->private; - /* - * The dm_noflush_suspending flag has been cleared by now, so - * unfortunately we must always run this. - */ - noflush_work(tc, do_noflush_stop); + if (dm_noflush_suspending(ti)) + noflush_work(tc, do_noflush_stop); } static int thin_preresume(struct dm_target *ti) diff --git a/drivers/md/dm-vdo/action-manager.c b/drivers/md/dm-vdo/action-manager.c index a0e5e7077d13..e3bba0b28aad 100644 --- a/drivers/md/dm-vdo/action-manager.c +++ b/drivers/md/dm-vdo/action-manager.c @@ -43,7 +43,7 @@ struct action { * @actions: The two action slots. * @current_action: The current action slot. * @zones: The number of zones in which an action is to be applied. - * @Scheduler: A function to schedule a default next action. + * @scheduler: A function to schedule a default next action. * @get_zone_thread_id: A function to get the id of the thread on which to apply an action to a * zone. * @initiator_thread_id: The ID of the thread on which actions may be initiated. diff --git a/drivers/md/dm-vdo/admin-state.c b/drivers/md/dm-vdo/admin-state.c index 3f9dba525154..da153fef085e 100644 --- a/drivers/md/dm-vdo/admin-state.c +++ b/drivers/md/dm-vdo/admin-state.c @@ -149,7 +149,8 @@ const struct admin_state_code *VDO_ADMIN_STATE_RESUMING = &VDO_CODE_RESUMING; /** * get_next_state() - Determine the state which should be set after a given operation completes * based on the operation and the current state. - * @operation The operation to be started. + * @state: The current admin state. + * @operation: The operation to be started. * * Return: The state to set when the operation completes or NULL if the operation can not be * started in the current state. @@ -187,6 +188,8 @@ static const struct admin_state_code *get_next_state(const struct admin_state *s /** * vdo_finish_operation() - Finish the current operation. + * @state: The current admin state. + * @result: The result of the operation. * * Will notify the operation waiter if there is one. This method should be used for operations * started with vdo_start_operation(). For operations which were started with vdo_start_draining(), @@ -214,8 +217,10 @@ bool vdo_finish_operation(struct admin_state *state, int result) /** * begin_operation() - Begin an operation if it may be started given the current state. - * @waiter A completion to notify when the operation is complete; may be NULL. - * @initiator The vdo_admin_initiator_fn to call if the operation may begin; may be NULL. + * @state: The current admin state. + * @operation: The operation to be started. + * @waiter: A completion to notify when the operation is complete; may be NULL. + * @initiator: The vdo_admin_initiator_fn to call if the operation may begin; may be NULL. * * Return: VDO_SUCCESS or an error. */ @@ -259,8 +264,10 @@ static int __must_check begin_operation(struct admin_state *state, /** * start_operation() - Start an operation if it may be started given the current state. - * @waiter A completion to notify when the operation is complete. - * @initiator The vdo_admin_initiator_fn to call if the operation may begin; may be NULL. + * @state: The current admin state. + * @operation: The operation to be started. + * @waiter: A completion to notify when the operation is complete; may be NULL. + * @initiator: The vdo_admin_initiator_fn to call if the operation may begin; may be NULL. * * Return: true if the operation was started. */ @@ -274,10 +281,10 @@ static inline bool __must_check start_operation(struct admin_state *state, /** * check_code() - Check the result of a state validation. - * @valid true if the code is of an appropriate type. - * @code The code which failed to be of the correct type. - * @what What the code failed to be, for logging. - * @waiter The completion to notify of the error; may be NULL. + * @valid: True if the code is of an appropriate type. + * @code: The code which failed to be of the correct type. + * @what: What the code failed to be, for logging. + * @waiter: The completion to notify of the error; may be NULL. * * If the result failed, log an invalid state error and, if there is a waiter, notify it. * @@ -301,7 +308,8 @@ static bool check_code(bool valid, const struct admin_state_code *code, const ch /** * assert_vdo_drain_operation() - Check that an operation is a drain. - * @waiter The completion to finish with an error if the operation is not a drain. + * @operation: The operation to check. + * @waiter: The completion to finish with an error if the operation is not a drain. * * Return: true if the specified operation is a drain. */ @@ -313,9 +321,10 @@ static bool __must_check assert_vdo_drain_operation(const struct admin_state_cod /** * vdo_start_draining() - Initiate a drain operation if the current state permits it. - * @operation The type of drain to initiate. - * @waiter The completion to notify when the drain is complete. - * @initiator The vdo_admin_initiator_fn to call if the operation may begin; may be NULL. + * @state: The current admin state. + * @operation: The type of drain to initiate. + * @waiter: The completion to notify when the drain is complete. + * @initiator: The vdo_admin_initiator_fn to call if the operation may begin; may be NULL. * * Return: true if the drain was initiated, if not the waiter will be notified. */ @@ -345,6 +354,7 @@ bool vdo_start_draining(struct admin_state *state, /** * vdo_finish_draining() - Finish a drain operation if one was in progress. + * @state: The current admin state. * * Return: true if the state was draining; will notify the waiter if so. */ @@ -355,6 +365,8 @@ bool vdo_finish_draining(struct admin_state *state) /** * vdo_finish_draining_with_result() - Finish a drain operation with a status code. + * @state: The current admin state. + * @result: The result of the drain operation. * * Return: true if the state was draining; will notify the waiter if so. */ @@ -365,7 +377,8 @@ bool vdo_finish_draining_with_result(struct admin_state *state, int result) /** * vdo_assert_load_operation() - Check that an operation is a load. - * @waiter The completion to finish with an error if the operation is not a load. + * @operation: The operation to check. + * @waiter: The completion to finish with an error if the operation is not a load. * * Return: true if the specified operation is a load. */ @@ -377,9 +390,10 @@ bool vdo_assert_load_operation(const struct admin_state_code *operation, /** * vdo_start_loading() - Initiate a load operation if the current state permits it. - * @operation The type of load to initiate. - * @waiter The completion to notify when the load is complete (may be NULL). - * @initiator The vdo_admin_initiator_fn to call if the operation may begin; may be NULL. + * @state: The current admin state. + * @operation: The type of load to initiate. + * @waiter: The completion to notify when the load is complete; may be NULL. + * @initiator: The vdo_admin_initiator_fn to call if the operation may begin; may be NULL. * * Return: true if the load was initiated, if not the waiter will be notified. */ @@ -393,6 +407,7 @@ bool vdo_start_loading(struct admin_state *state, /** * vdo_finish_loading() - Finish a load operation if one was in progress. + * @state: The current admin state. * * Return: true if the state was loading; will notify the waiter if so. */ @@ -403,7 +418,8 @@ bool vdo_finish_loading(struct admin_state *state) /** * vdo_finish_loading_with_result() - Finish a load operation with a status code. - * @result The result of the load operation. + * @state: The current admin state. + * @result: The result of the load operation. * * Return: true if the state was loading; will notify the waiter if so. */ @@ -414,7 +430,8 @@ bool vdo_finish_loading_with_result(struct admin_state *state, int result) /** * assert_vdo_resume_operation() - Check whether an admin_state_code is a resume operation. - * @waiter The completion to notify if the operation is not a resume operation; may be NULL. + * @operation: The operation to check. + * @waiter: The completion to notify if the operation is not a resume operation; may be NULL. * * Return: true if the code is a resume operation. */ @@ -427,9 +444,10 @@ static bool __must_check assert_vdo_resume_operation(const struct admin_state_co /** * vdo_start_resuming() - Initiate a resume operation if the current state permits it. - * @operation The type of resume to start. - * @waiter The completion to notify when the resume is complete (may be NULL). - * @initiator The vdo_admin_initiator_fn to call if the operation may begin; may be NULL. + * @state: The current admin state. + * @operation: The type of resume to start. + * @waiter: The completion to notify when the resume is complete; may be NULL. + * @initiator: The vdo_admin_initiator_fn to call if the operation may begin; may be NULL. * * Return: true if the resume was initiated, if not the waiter will be notified. */ @@ -443,6 +461,7 @@ bool vdo_start_resuming(struct admin_state *state, /** * vdo_finish_resuming() - Finish a resume operation if one was in progress. + * @state: The current admin state. * * Return: true if the state was resuming; will notify the waiter if so. */ @@ -453,7 +472,8 @@ bool vdo_finish_resuming(struct admin_state *state) /** * vdo_finish_resuming_with_result() - Finish a resume operation with a status code. - * @result The result of the resume operation. + * @state: The current admin state. + * @result: The result of the resume operation. * * Return: true if the state was resuming; will notify the waiter if so. */ @@ -465,6 +485,7 @@ bool vdo_finish_resuming_with_result(struct admin_state *state, int result) /** * vdo_resume_if_quiescent() - Change the state to normal operation if the current state is * quiescent. + * @state: The current admin state. * * Return: VDO_SUCCESS if the state resumed, VDO_INVALID_ADMIN_STATE otherwise. */ @@ -479,6 +500,8 @@ int vdo_resume_if_quiescent(struct admin_state *state) /** * vdo_start_operation() - Attempt to start an operation. + * @state: The current admin state. + * @operation: The operation to attempt to start. * * Return: VDO_SUCCESS if the operation was started, VDO_INVALID_ADMIN_STATE if not */ @@ -490,8 +513,10 @@ int vdo_start_operation(struct admin_state *state, /** * vdo_start_operation_with_waiter() - Attempt to start an operation. - * @waiter the completion to notify when the operation completes or fails to start; may be NULL. - * @initiator The vdo_admin_initiator_fn to call if the operation may begin; may be NULL. + * @state: The current admin state. + * @operation: The operation to attempt to start. + * @waiter: The completion to notify when the operation completes or fails to start; may be NULL. + * @initiator: The vdo_admin_initiator_fn to call if the operation may begin; may be NULL. * * Return: VDO_SUCCESS if the operation was started, VDO_INVALID_ADMIN_STATE if not */ diff --git a/drivers/md/dm-vdo/block-map.c b/drivers/md/dm-vdo/block-map.c index baf683cabb1b..a7db5b41155e 100644 --- a/drivers/md/dm-vdo/block-map.c +++ b/drivers/md/dm-vdo/block-map.c @@ -174,6 +174,7 @@ static inline struct vdo_page_completion *page_completion_from_waiter(struct vdo /** * initialize_info() - Initialize all page info structures and put them on the free list. + * @cache: The page cache. * * Return: VDO_SUCCESS or an error. */ @@ -209,6 +210,7 @@ static int initialize_info(struct vdo_page_cache *cache) /** * allocate_cache_components() - Allocate components of the cache which require their own * allocation. + * @cache: The page cache. * * The caller is responsible for all clean up on errors. * @@ -238,6 +240,8 @@ static int __must_check allocate_cache_components(struct vdo_page_cache *cache) /** * assert_on_cache_thread() - Assert that a function has been called on the VDO page cache's * thread. + * @cache: The page cache. + * @function_name: The funtion name to report if the assertion fails. */ static inline void assert_on_cache_thread(struct vdo_page_cache *cache, const char *function_name) @@ -271,6 +275,7 @@ static void report_cache_pressure(struct vdo_page_cache *cache) /** * get_page_state_name() - Return the name of a page state. + * @state: The page state to describe. * * If the page state is invalid a static string is returned and the invalid state is logged. * @@ -342,6 +347,8 @@ static void update_lru(struct page_info *info) /** * set_info_state() - Set the state of a page_info and put it on the right list, adjusting * counters. + * @info: The page info to update. + * @new_state: The new state to set. */ static void set_info_state(struct page_info *info, enum vdo_page_buffer_state new_state) { @@ -416,6 +423,7 @@ static int reset_page_info(struct page_info *info) /** * find_free_page() - Find a free page. + * @cache: The page cache. * * Return: A pointer to the page info structure (if found), NULL otherwise. */ @@ -433,6 +441,7 @@ static struct page_info * __must_check find_free_page(struct vdo_page_cache *cac /** * find_page() - Find the page info (if any) associated with a given pbn. + * @cache: The page cache. * @pbn: The absolute physical block number of the page. * * Return: The page info for the page if available, or NULL if not. @@ -449,6 +458,7 @@ static struct page_info * __must_check find_page(struct vdo_page_cache *cache, /** * select_lru_page() - Determine which page is least recently used. + * @cache: The page cache. * * Picks the least recently used from among the non-busy entries at the front of each of the lru * list. Since whenever we mark a page busy we also put it to the end of the list it is unlikely @@ -523,6 +533,8 @@ static void complete_waiter_with_page(struct vdo_waiter *waiter, void *page_info /** * distribute_page_over_waitq() - Complete a waitq of VDO page completions with a page result. + * @info: The loaded page info. + * @waitq: The list of waiting data_vios. * * Upon completion the waitq will be empty. * @@ -548,7 +560,9 @@ static unsigned int distribute_page_over_waitq(struct page_info *info, /** * set_persistent_error() - Set a persistent error which all requests will receive in the future. + * @cache: The page cache. * @context: A string describing what triggered the error. + * @result: The error result to set on the cache. * * Once triggered, all enqueued completions will get this error. Any future requests will result in * this error as well. @@ -581,6 +595,7 @@ static void set_persistent_error(struct vdo_page_cache *cache, const char *conte /** * validate_completed_page() - Check that a page completion which is being freed to the cache * referred to a valid page and is in a valid state. + * @completion: The page completion to check. * @writable: Whether a writable page is required. * * Return: VDO_SUCCESS if the page was valid, otherwise as error @@ -758,6 +773,8 @@ static void load_cache_page_endio(struct bio *bio) /** * launch_page_load() - Begin the process of loading a page. + * @info: The page info to launch. + * @pbn: The absolute physical block number of the page to load. * * Return: VDO_SUCCESS or an error code. */ @@ -836,6 +853,7 @@ static void save_pages(struct vdo_page_cache *cache) /** * schedule_page_save() - Add a page to the outgoing list of pages waiting to be saved. + * @info: The page info to save. * * Once in the list, a page may not be used until it has been written out. */ @@ -854,6 +872,7 @@ static void schedule_page_save(struct page_info *info) /** * launch_page_save() - Add a page to outgoing pages waiting to be saved, and then start saving * pages if another save is not in progress. + * @info: The page info to save. */ static void launch_page_save(struct page_info *info) { @@ -864,6 +883,7 @@ static void launch_page_save(struct page_info *info) /** * completion_needs_page() - Determine whether a given vdo_page_completion (as a waiter) is * requesting a given page number. + * @waiter: The page completion waiter to check. * @context: A pointer to the pbn of the desired page. * * Implements waiter_match_fn. @@ -880,6 +900,7 @@ static bool completion_needs_page(struct vdo_waiter *waiter, void *context) /** * allocate_free_page() - Allocate a free page to the first completion in the waiting queue, and * any other completions that match it in page number. + * @info: The page info to allocate a page for. */ static void allocate_free_page(struct page_info *info) { @@ -925,6 +946,7 @@ static void allocate_free_page(struct page_info *info) /** * discard_a_page() - Begin the process of discarding a page. + * @cache: The page cache. * * If no page is discardable, increments a count of deferred frees so that the next release of a * page which is no longer busy will kick off another discard cycle. This is an indication that the @@ -955,10 +977,6 @@ static void discard_a_page(struct vdo_page_cache *cache) launch_page_save(info); } -/** - * discard_page_for_completion() - Helper used to trigger a discard so that the completion can get - * a different page. - */ static void discard_page_for_completion(struct vdo_page_completion *vdo_page_comp) { struct vdo_page_cache *cache = vdo_page_comp->cache; @@ -1132,6 +1150,7 @@ static void write_pages(struct vdo_completion *flush_completion) /** * vdo_release_page_completion() - Release a VDO Page Completion. + * @completion: The page completion to release. * * The page referenced by this completion (if any) will no longer be held busy by this completion. * If a page becomes discardable and there are completions awaiting free pages then a new round of @@ -1172,10 +1191,6 @@ void vdo_release_page_completion(struct vdo_completion *completion) } } -/** - * load_page_for_completion() - Helper function to load a page as described by a VDO Page - * Completion. - */ static void load_page_for_completion(struct page_info *info, struct vdo_page_completion *vdo_page_comp) { @@ -1319,6 +1334,7 @@ int vdo_get_cached_page(struct vdo_completion *completion, /** * vdo_invalidate_page_cache() - Invalidate all entries in the VDO page cache. + * @cache: The page cache. * * There must not be any dirty pages in the cache. * @@ -1345,6 +1361,10 @@ int vdo_invalidate_page_cache(struct vdo_page_cache *cache) /** * get_tree_page_by_index() - Get the tree page for a given height and page index. + * @forest: The block map forest. + * @root_index: The root index of the tree to search. + * @height: The height in the tree. + * @page_index: The page index. * * Return: The requested page. */ @@ -2211,6 +2231,7 @@ static void allocate_block_map_page(struct block_map_zone *zone, /** * vdo_find_block_map_slot() - Find the block map slot in which the block map entry for a data_vio * resides and cache that result in the data_vio. + * @data_vio: The data vio. * * All ancestors in the tree will be allocated or loaded, as needed. */ @@ -2435,6 +2456,7 @@ static void deforest(struct forest *forest, size_t first_page_segment) /** * make_forest() - Make a collection of trees for a block_map, expanding the existing forest if * there is one. + * @map: The block map. * @entries: The number of entries the block map will hold. * * Return: VDO_SUCCESS or an error. @@ -2476,6 +2498,7 @@ static int make_forest(struct block_map *map, block_count_t entries) /** * replace_forest() - Replace a block_map's forest with the already-prepared larger forest. + * @map: The block map. */ static void replace_forest(struct block_map *map) { @@ -2492,6 +2515,7 @@ static void replace_forest(struct block_map *map) /** * finish_cursor() - Finish the traversal of a single tree. If it was the last cursor, finish the * traversal. + * @cursor: The cursor to complete. */ static void finish_cursor(struct cursor *cursor) { @@ -2549,6 +2573,7 @@ static void traversal_endio(struct bio *bio) /** * traverse() - Traverse a single block map tree. + * @cursor: A cursor tracking traversal progress. * * This is the recursive heart of the traversal process. */ @@ -2619,6 +2644,7 @@ static void traverse(struct cursor *cursor) /** * launch_cursor() - Start traversing a single block map tree now that the cursor has a VIO with * which to load pages. + * @waiter: The parent of the cursor to launch. * @context: The pooled_vio just acquired. * * Implements waiter_callback_fn. @@ -2636,6 +2662,8 @@ static void launch_cursor(struct vdo_waiter *waiter, void *context) /** * compute_boundary() - Compute the number of pages used at each level of the given root's tree. + * @map: The block map. + * @root_index: The tree root index. * * Return: The list of page counts as a boundary structure. */ @@ -2668,6 +2696,7 @@ static struct boundary compute_boundary(struct block_map *map, root_count_t root /** * vdo_traverse_forest() - Walk the entire forest of a block map. + * @map: The block map. * @callback: A function to call with the pbn of each allocated node in the forest. * @completion: The completion to notify on each traversed PBN, and when traversal completes. */ @@ -2707,6 +2736,9 @@ void vdo_traverse_forest(struct block_map *map, vdo_entry_callback_fn callback, /** * initialize_block_map_zone() - Initialize the per-zone portions of the block map. + * @map: The block map. + * @zone_number: The zone to initialize. + * @cache_size: The total block map cache size. * @maximum_age: The number of journal blocks before a dirtied page is considered old and must be * written out. */ @@ -3091,6 +3123,7 @@ static void fetch_mapping_page(struct data_vio *data_vio, bool modifiable, /** * clear_mapped_location() - Clear a data_vio's mapped block location, setting it to be unmapped. + * @data_vio: The data vio. * * This indicates the block map entry for the logical block is either unmapped or corrupted. */ @@ -3104,6 +3137,8 @@ static void clear_mapped_location(struct data_vio *data_vio) /** * set_mapped_location() - Decode and validate a block map entry, and set the mapped location of a * data_vio. + * @data_vio: The data vio. + * @entry: The new mapped entry to set. * * Return: VDO_SUCCESS or VDO_BAD_MAPPING if the map entry is invalid or an error code for any * other failure diff --git a/drivers/md/dm-vdo/completion.c b/drivers/md/dm-vdo/completion.c index 5ad85334632d..2f00acbb3b2b 100644 --- a/drivers/md/dm-vdo/completion.c +++ b/drivers/md/dm-vdo/completion.c @@ -65,6 +65,8 @@ static inline void assert_incomplete(struct vdo_completion *completion) /** * vdo_set_completion_result() - Set the result of a completion. + * @completion: The completion to update. + * @result: The result to set. * * Older errors will not be masked. */ @@ -77,6 +79,7 @@ void vdo_set_completion_result(struct vdo_completion *completion, int result) /** * vdo_launch_completion_with_priority() - Run or enqueue a completion. + * @completion: The completion to launch. * @priority: The priority at which to enqueue the completion. * * If called on the correct thread (i.e. the one specified in the completion's callback_thread_id @@ -125,6 +128,8 @@ void vdo_enqueue_completion(struct vdo_completion *completion, /** * vdo_requeue_completion_if_needed() - Requeue a completion if not called on the specified thread. + * @completion: The completion to requeue. + * @callback_thread_id: The thread on which to requeue the completion. * * Return: True if the completion was requeued; callers may not access the completion in this case. */ diff --git a/drivers/md/dm-vdo/data-vio.c b/drivers/md/dm-vdo/data-vio.c index 262e11581f2d..3333e1e5b02e 100644 --- a/drivers/md/dm-vdo/data-vio.c +++ b/drivers/md/dm-vdo/data-vio.c @@ -227,6 +227,7 @@ static inline u64 get_arrival_time(struct bio *bio) /** * check_for_drain_complete_locked() - Check whether a data_vio_pool has no outstanding data_vios * or waiters while holding the pool's lock. + * @pool: The data_vio pool. */ static bool check_for_drain_complete_locked(struct data_vio_pool *pool) { @@ -387,6 +388,7 @@ struct data_vio_compression_status advance_data_vio_compression_stage(struct dat /** * cancel_data_vio_compression() - Prevent this data_vio from being compressed or packed. + * @data_vio: The data_vio. * * Return: true if the data_vio is in the packer and the caller was the first caller to cancel it. */ @@ -483,6 +485,8 @@ static void attempt_logical_block_lock(struct vdo_completion *completion) /** * launch_data_vio() - (Re)initialize a data_vio to have a new logical block number, keeping the * same parent and other state and send it on its way. + * @data_vio: The data_vio to launch. + * @lbn: The logical block number. */ static void launch_data_vio(struct data_vio *data_vio, logical_block_number_t lbn) { @@ -641,6 +645,7 @@ static void update_limiter(struct limiter *limiter) /** * schedule_releases() - Ensure that release processing is scheduled. + * @pool: The data_vio pool. * * If this call switches the state to processing, enqueue. Otherwise, some other thread has already * done so. @@ -768,6 +773,8 @@ static void initialize_limiter(struct limiter *limiter, struct data_vio_pool *po /** * initialize_data_vio() - Allocate the components of a data_vio. + * @data_vio: The data_vio to initialize. + * @vdo: The vdo containing the data_vio. * * The caller is responsible for cleaning up the data_vio on error. * @@ -880,6 +887,7 @@ int make_data_vio_pool(struct vdo *vdo, data_vio_count_t pool_size, /** * free_data_vio_pool() - Free a data_vio_pool and the data_vios in it. + * @pool: The data_vio pool to free. * * All data_vios must be returned to the pool before calling this function. */ @@ -944,6 +952,8 @@ static void wait_permit(struct limiter *limiter, struct bio *bio) /** * vdo_launch_bio() - Acquire a data_vio from the pool, assign the bio to it, and launch it. + * @pool: The data_vio pool. + * @bio: The bio to launch. * * This will block if data_vios or discard permits are not available. */ @@ -994,6 +1004,7 @@ static void assert_on_vdo_cpu_thread(const struct vdo *vdo, const char *name) /** * drain_data_vio_pool() - Wait asynchronously for all data_vios to be returned to the pool. + * @pool: The data_vio pool. * @completion: The completion to notify when the pool has drained. */ void drain_data_vio_pool(struct data_vio_pool *pool, struct vdo_completion *completion) @@ -1005,6 +1016,7 @@ void drain_data_vio_pool(struct data_vio_pool *pool, struct vdo_completion *comp /** * resume_data_vio_pool() - Resume a data_vio pool. + * @pool: The data_vio pool. * @completion: The completion to notify when the pool has resumed. */ void resume_data_vio_pool(struct data_vio_pool *pool, struct vdo_completion *completion) @@ -1024,6 +1036,7 @@ static void dump_limiter(const char *name, struct limiter *limiter) /** * dump_data_vio_pool() - Dump a data_vio pool to the log. + * @pool: The data_vio pool. * @dump_vios: Whether to dump the details of each busy data_vio as well. */ void dump_data_vio_pool(struct data_vio_pool *pool, bool dump_vios) @@ -1114,6 +1127,7 @@ static void perform_cleanup_stage(struct data_vio *data_vio, /** * release_allocated_lock() - Release the PBN lock and/or the reference on the allocated block at * the end of processing a data_vio. + * @completion: The data_vio holding the lock. */ static void release_allocated_lock(struct vdo_completion *completion) { @@ -1194,6 +1208,7 @@ static void transfer_lock(struct data_vio *data_vio, struct lbn_lock *lock) /** * release_logical_lock() - Release the logical block lock and flush generation lock at the end of * processing a data_vio. + * @completion: The data_vio holding the lock. */ static void release_logical_lock(struct vdo_completion *completion) { @@ -1228,6 +1243,7 @@ static void clean_hash_lock(struct vdo_completion *completion) /** * finish_cleanup() - Make some assertions about a data_vio which has finished cleaning up. + * @data_vio: The data_vio. * * If it is part of a multi-block discard, starts on the next block, otherwise, returns it to the * pool. @@ -1342,6 +1358,7 @@ void handle_data_vio_error(struct vdo_completion *completion) /** * get_data_vio_operation_name() - Get the name of the last asynchronous operation performed on a * data_vio. + * @data_vio: The data_vio. */ const char *get_data_vio_operation_name(struct data_vio *data_vio) { @@ -1355,7 +1372,7 @@ const char *get_data_vio_operation_name(struct data_vio *data_vio) /** * data_vio_allocate_data_block() - Allocate a data block. - * + * @data_vio: The data_vio. * @write_lock_type: The type of write lock to obtain on the block. * @callback: The callback which will attempt an allocation in the current zone and continue if it * succeeds. @@ -1379,6 +1396,7 @@ void data_vio_allocate_data_block(struct data_vio *data_vio, /** * release_data_vio_allocation_lock() - Release the PBN lock on a data_vio's allocated block. + * @data_vio: The data_vio. * @reset: If true, the allocation will be reset (i.e. any allocated pbn will be forgotten). * * If the reference to the locked block is still provisional, it will be released as well. @@ -1399,6 +1417,7 @@ void release_data_vio_allocation_lock(struct data_vio *data_vio, bool reset) /** * uncompress_data_vio() - Uncompress the data a data_vio has just read. + * @data_vio: The data_vio. * @mapping_state: The mapping state indicating which fragment to decompress. * @buffer: The buffer to receive the uncompressed data. */ @@ -1519,6 +1538,7 @@ static void complete_zero_read(struct vdo_completion *completion) /** * read_block() - Read a block asynchronously. + * @completion: The data_vio doing the read. * * This is the callback registered in read_block_mapping(). */ @@ -1675,6 +1695,7 @@ static void journal_remapping(struct vdo_completion *completion) /** * read_old_block_mapping() - Get the previous PBN/LBN mapping of an in-progress write. + * @completion: The data_vio doing the read. * * Gets the previous PBN mapped to this LBN from the block map, so as to make an appropriate * journal entry referencing the removal of this LBN->PBN mapping. @@ -1704,6 +1725,7 @@ void update_metadata_for_data_vio_write(struct data_vio *data_vio, struct pbn_lo /** * pack_compressed_data() - Attempt to pack the compressed data_vio into a block. + * @completion: The data_vio. * * This is the callback registered in launch_compress_data_vio(). */ @@ -1725,6 +1747,7 @@ static void pack_compressed_data(struct vdo_completion *completion) /** * compress_data_vio() - Do the actual work of compressing the data on a CPU queue. + * @completion: The data_vio. * * This callback is registered in launch_compress_data_vio(). */ @@ -1754,6 +1777,7 @@ static void compress_data_vio(struct vdo_completion *completion) /** * launch_compress_data_vio() - Continue a write by attempting to compress the data. + * @data_vio: The data_vio. * * This is a re-entry point to vio_write used by hash locks. */ @@ -1796,7 +1820,8 @@ void launch_compress_data_vio(struct data_vio *data_vio) /** * hash_data_vio() - Hash the data in a data_vio and set the hash zone (which also flags the record * name as set). - + * @completion: The data_vio. + * * This callback is registered in prepare_for_dedupe(). */ static void hash_data_vio(struct vdo_completion *completion) @@ -1832,6 +1857,7 @@ static void prepare_for_dedupe(struct data_vio *data_vio) /** * write_bio_finished() - This is the bio_end_io function registered in write_block() to be called * when a data_vio's write to the underlying storage has completed. + * @bio: The bio to update. */ static void write_bio_finished(struct bio *bio) { @@ -1884,6 +1910,7 @@ void write_data_vio(struct data_vio *data_vio) /** * acknowledge_write_callback() - Acknowledge a write to the requestor. + * @completion: The data_vio. * * This callback is registered in allocate_block() and continue_write_with_block_map_slot(). */ @@ -1909,6 +1936,7 @@ static void acknowledge_write_callback(struct vdo_completion *completion) /** * allocate_block() - Attempt to allocate a block in the current allocation zone. + * @completion: The data_vio. * * This callback is registered in continue_write_with_block_map_slot(). */ @@ -1941,6 +1969,7 @@ static void allocate_block(struct vdo_completion *completion) /** * handle_allocation_error() - Handle an error attempting to allocate a block. + * @completion: The data_vio. * * This error handler is registered in continue_write_with_block_map_slot(). */ @@ -1970,6 +1999,7 @@ static int assert_is_discard(struct data_vio *data_vio) /** * continue_data_vio_with_block_map_slot() - Read the data_vio's mapping from the block map. + * @completion: The data_vio to continue. * * This callback is registered in launch_read_data_vio(). */ diff --git a/drivers/md/dm-vdo/dedupe.c b/drivers/md/dm-vdo/dedupe.c index 4d983092a152..75a26f3f4461 100644 --- a/drivers/md/dm-vdo/dedupe.c +++ b/drivers/md/dm-vdo/dedupe.c @@ -917,6 +917,8 @@ static int __must_check acquire_lock(struct hash_zone *zone, /** * enter_forked_lock() - Bind the data_vio to a new hash lock. + * @waiter: The data_vio's waiter link. + * @context: The new hash lock. * * Implements waiter_callback_fn. Binds the data_vio that was waiting to a new hash lock and waits * on that lock. @@ -971,7 +973,7 @@ static void fork_hash_lock(struct hash_lock *old_lock, struct data_vio *new_agen * path. * @lock: The hash lock. * @data_vio: The data_vio to deduplicate using the hash lock. - * @has_claim: true if the data_vio already has claimed an increment from the duplicate lock. + * @has_claim: True if the data_vio already has claimed an increment from the duplicate lock. * * If no increments are available, this will roll over to a new hash lock and launch the data_vio * as the writing agent for that lock. @@ -996,7 +998,7 @@ static void launch_dedupe(struct hash_lock *lock, struct data_vio *data_vio, * true copy of their data on disk. * @lock: The hash lock. * @agent: The data_vio acting as the agent for the lock. - * @agent_is_done: true only if the agent has already written or deduplicated against its data. + * @agent_is_done: True only if the agent has already written or deduplicated against its data. * * If the agent itself needs to deduplicate, an increment for it must already have been claimed * from the duplicate lock, ensuring the hash lock will still have a data_vio holding it. @@ -2146,8 +2148,8 @@ static void start_expiration_timer(struct dedupe_context *context) /** * report_dedupe_timeouts() - Record and eventually report that some dedupe requests reached their * expiration time without getting answers, so we timed them out. - * @zones: the hash zones. - * @timeouts: the number of newly timed out requests. + * @zones: The hash zones. + * @timeouts: The number of newly timed out requests. */ static void report_dedupe_timeouts(struct hash_zones *zones, unsigned int timeouts) { @@ -2509,6 +2511,8 @@ static void initiate_suspend_index(struct admin_state *state) /** * suspend_index() - Suspend the UDS index prior to draining hash zones. + * @context: Not used. + * @completion: The completion for the suspend operation. * * Implements vdo_action_preamble_fn */ @@ -2521,21 +2525,13 @@ static void suspend_index(void *context, struct vdo_completion *completion) initiate_suspend_index); } -/** - * initiate_drain() - Initiate a drain. - * - * Implements vdo_admin_initiator_fn. - */ +/** Implements vdo_admin_initiator_fn. */ static void initiate_drain(struct admin_state *state) { check_for_drain_complete(container_of(state, struct hash_zone, state)); } -/** - * drain_hash_zone() - Drain a hash zone. - * - * Implements vdo_zone_action_fn. - */ +/** Implements vdo_zone_action_fn. */ static void drain_hash_zone(void *context, zone_count_t zone_number, struct vdo_completion *parent) { @@ -2572,6 +2568,8 @@ static void launch_dedupe_state_change(struct hash_zones *zones) /** * resume_index() - Resume the UDS index prior to resuming hash zones. + * @context: Not used. + * @parent: The completion for the resume operation. * * Implements vdo_action_preamble_fn */ @@ -2602,11 +2600,7 @@ static void resume_index(void *context, struct vdo_completion *parent) vdo_finish_completion(parent); } -/** - * resume_hash_zone() - Resume a hash zone. - * - * Implements vdo_zone_action_fn. - */ +/** Implements vdo_zone_action_fn. */ static void resume_hash_zone(void *context, zone_count_t zone_number, struct vdo_completion *parent) { @@ -2634,7 +2628,7 @@ void vdo_resume_hash_zones(struct hash_zones *zones, struct vdo_completion *pare /** * get_hash_zone_statistics() - Add the statistics for this hash zone to the tally for all zones. * @zone: The hash zone to query. - * @tally: The tally + * @tally: The tally. */ static void get_hash_zone_statistics(const struct hash_zone *zone, struct hash_lock_statistics *tally) @@ -2680,8 +2674,8 @@ static void get_index_statistics(struct hash_zones *zones, /** * vdo_get_dedupe_statistics() - Tally the statistics from all the hash zones and the UDS index. - * @zones: The hash zones to query - * @stats: A structure to store the statistics + * @zones: The hash zones to query. + * @stats: A structure to store the statistics. * * Return: The sum of the hash lock statistics from all hash zones plus the statistics from the UDS * index @@ -2856,9 +2850,9 @@ void vdo_set_dedupe_index_min_timer_interval(unsigned int value) /** * acquire_context() - Acquire a dedupe context from a hash_zone if any are available. - * @zone: the hash zone + * @zone: The hash zone. * - * Return: A dedupe_context or NULL if none are available + * Return: A dedupe_context or NULL if none are available. */ static struct dedupe_context * __must_check acquire_context(struct hash_zone *zone) { diff --git a/drivers/md/dm-vdo/dm-vdo-target.c b/drivers/md/dm-vdo/dm-vdo-target.c index 0e04c2021682..6af40d40f255 100644 --- a/drivers/md/dm-vdo/dm-vdo-target.c +++ b/drivers/md/dm-vdo/dm-vdo-target.c @@ -1144,6 +1144,7 @@ static bool vdo_uses_device(struct vdo *vdo, const void *context) /** * get_thread_id_for_phase() - Get the thread id for the current phase of the admin operation in * progress. + * @vdo: The vdo. */ static thread_id_t __must_check get_thread_id_for_phase(struct vdo *vdo) { @@ -1188,9 +1189,9 @@ static struct vdo_completion *prepare_admin_completion(struct vdo *vdo, /** * advance_phase() - Increment the phase of the current admin operation and prepare the admin * completion to run on the thread for the next phase. - * @vdo: The on which an admin operation is being performed + * @vdo: The vdo on which an admin operation is being performed. * - * Return: The current phase + * Return: The current phase. */ static u32 advance_phase(struct vdo *vdo) { diff --git a/drivers/md/dm-vdo/encodings.c b/drivers/md/dm-vdo/encodings.c index b7cc0f41caca..dd59691be840 100644 --- a/drivers/md/dm-vdo/encodings.c +++ b/drivers/md/dm-vdo/encodings.c @@ -432,7 +432,10 @@ static void encode_block_map_state_2_0(u8 *buffer, size_t *offset, /** * vdo_compute_new_forest_pages() - Compute the number of pages which must be allocated at each * level in order to grow the forest to a new number of entries. + * @root_count: The number of block map roots. + * @old_sizes: The sizes of the old tree segments. * @entries: The new number of entries the block map must address. + * @new_sizes: The sizes of the new tree segments. * * Return: The total number of non-leaf pages required. */ @@ -462,6 +465,9 @@ block_count_t vdo_compute_new_forest_pages(root_count_t root_count, /** * encode_recovery_journal_state_7_0() - Encode the state of a recovery journal. + * @buffer: A buffer to store the encoding. + * @offset: The offset in the buffer at which to encode. + * @state: The recovery journal state to encode. * * Return: VDO_SUCCESS or an error code. */ @@ -484,6 +490,7 @@ static void encode_recovery_journal_state_7_0(u8 *buffer, size_t *offset, /** * decode_recovery_journal_state_7_0() - Decode the state of a recovery journal saved in a buffer. * @buffer: The buffer containing the saved state. + * @offset: The offset to start decoding from. * @state: A pointer to a recovery journal state to hold the result of a successful decode. * * Return: VDO_SUCCESS or an error code. @@ -544,6 +551,9 @@ const char *vdo_get_journal_operation_name(enum journal_operation operation) /** * encode_slab_depot_state_2_0() - Encode the state of a slab depot into a buffer. + * @buffer: A buffer to store the encoding. + * @offset: The offset in the buffer at which to encode. + * @state: The slab depot state to encode. */ static void encode_slab_depot_state_2_0(u8 *buffer, size_t *offset, struct slab_depot_state_2_0 state) @@ -570,6 +580,9 @@ static void encode_slab_depot_state_2_0(u8 *buffer, size_t *offset, /** * decode_slab_depot_state_2_0() - Decode slab depot component state version 2.0 from a buffer. + * @buffer: The buffer being decoded. + * @offset: The offset to start decoding from. + * @state: A pointer to a slab depot state to hold the decoded result. * * Return: VDO_SUCCESS or an error code. */ @@ -1156,6 +1169,9 @@ static struct vdo_component unpack_vdo_component_41_0(struct packed_vdo_componen /** * decode_vdo_component() - Decode the component data for the vdo itself out of the super block. + * @buffer: The buffer being decoded. + * @offset: The offset to start decoding from. + * @component: The vdo component structure to decode into. * * Return: VDO_SUCCESS or an error. */ @@ -1290,7 +1306,7 @@ void vdo_destroy_component_states(struct vdo_component_states *states) * understand. * @buffer: The buffer being decoded. * @offset: The offset to start decoding from. - * @geometry: The vdo geometry + * @geometry: The vdo geometry. * @states: An object to hold the successfully decoded state. * * Return: VDO_SUCCESS or an error. @@ -1329,7 +1345,7 @@ static int __must_check decode_components(u8 *buffer, size_t *offset, /** * vdo_decode_component_states() - Decode the payload of a super block. * @buffer: The buffer containing the encoded super block contents. - * @geometry: The vdo geometry + * @geometry: The vdo geometry. * @states: A pointer to hold the decoded states. * * Return: VDO_SUCCESS or an error. @@ -1383,6 +1399,9 @@ int vdo_validate_component_states(struct vdo_component_states *states, /** * vdo_encode_component_states() - Encode the state of all vdo components in the super block. + * @buffer: A buffer to store the encoding. + * @offset: The offset into the buffer to start the encoding. + * @states: The component states to encode. */ static void vdo_encode_component_states(u8 *buffer, size_t *offset, const struct vdo_component_states *states) @@ -1402,6 +1421,8 @@ static void vdo_encode_component_states(u8 *buffer, size_t *offset, /** * vdo_encode_super_block() - Encode a super block into its on-disk representation. + * @buffer: A buffer to store the encoding. + * @states: The component states to encode. */ void vdo_encode_super_block(u8 *buffer, struct vdo_component_states *states) { @@ -1426,6 +1447,7 @@ void vdo_encode_super_block(u8 *buffer, struct vdo_component_states *states) /** * vdo_decode_super_block() - Decode a super block from its on-disk representation. + * @buffer: The buffer to decode from. */ int vdo_decode_super_block(u8 *buffer) { diff --git a/drivers/md/dm-vdo/flush.c b/drivers/md/dm-vdo/flush.c index dd4fdee2ca0c..82a259ef1601 100644 --- a/drivers/md/dm-vdo/flush.c +++ b/drivers/md/dm-vdo/flush.c @@ -522,11 +522,7 @@ static void vdo_complete_flush(struct vdo_flush *flush) vdo_enqueue_completion(completion, BIO_Q_FLUSH_PRIORITY); } -/** - * initiate_drain() - Initiate a drain. - * - * Implements vdo_admin_initiator_fn. - */ +/** Implements vdo_admin_initiator_fn. */ static void initiate_drain(struct admin_state *state) { check_for_drain_complete(container_of(state, struct flusher, state)); diff --git a/drivers/md/dm-vdo/funnel-workqueue.c b/drivers/md/dm-vdo/funnel-workqueue.c index 0613c82bbe8e..8a79b33b8b09 100644 --- a/drivers/md/dm-vdo/funnel-workqueue.c +++ b/drivers/md/dm-vdo/funnel-workqueue.c @@ -372,6 +372,13 @@ static int make_simple_work_queue(const char *thread_name_prefix, const char *na /** * vdo_make_work_queue() - Create a work queue; if multiple threads are requested, completions will * be distributed to them in round-robin fashion. + * @thread_name_prefix: A prefix for the thread names to identify them as a vdo thread. + * @name: A base name to identify this queue. + * @owner: The vdo_thread structure to manage this queue. + * @type: The type of queue to create. + * @thread_count: The number of actual threads handling this queue. + * @thread_privates: An array of private contexts, one for each thread; may be NULL. + * @queue_ptr: A pointer to return the new work queue. * * Each queue is associated with a struct vdo_thread which has a single vdo thread id. Regardless * of the actual number of queues and threads allocated here, code outside of the queue diff --git a/drivers/md/dm-vdo/io-submitter.c b/drivers/md/dm-vdo/io-submitter.c index 11d47770b54d..e26d75f8366d 100644 --- a/drivers/md/dm-vdo/io-submitter.c +++ b/drivers/md/dm-vdo/io-submitter.c @@ -118,6 +118,7 @@ static void send_bio_to_device(struct vio *vio, struct bio *bio) /** * vdo_submit_vio() - Submits a vio's bio to the underlying block device. May block if the device * is busy. This callback should be used by vios which did not attempt to merge. + * @completion: The vio to submit. */ void vdo_submit_vio(struct vdo_completion *completion) { @@ -133,7 +134,7 @@ void vdo_submit_vio(struct vdo_completion *completion) * The list will always contain at least one entry (the bio for the vio on which it is called), but * other bios may have been merged with it as well. * - * Return: bio The head of the bio list to submit. + * Return: The head of the bio list to submit. */ static struct bio *get_bio_list(struct vio *vio) { @@ -158,6 +159,7 @@ static struct bio *get_bio_list(struct vio *vio) /** * submit_data_vio() - Submit a data_vio's bio to the storage below along with * any bios that have been merged with it. + * @completion: The vio to submit. * * Context: This call may block and so should only be called from a bio thread. */ @@ -184,7 +186,7 @@ static void submit_data_vio(struct vdo_completion *completion) * There are two types of merging possible, forward and backward, which are distinguished by a flag * that uses kernel elevator terminology. * - * Return: the vio to merge to, NULL if no merging is possible. + * Return: The vio to merge to, NULL if no merging is possible. */ static struct vio *get_mergeable_locked(struct int_map *map, struct vio *vio, bool back_merge) @@ -262,7 +264,7 @@ static int merge_to_next_head(struct int_map *bio_map, struct vio *vio, * * Currently this is only used for data_vios, but is broken out for future use with metadata vios. * - * Return: whether or not the vio was merged. + * Return: Whether or not the vio was merged. */ static bool try_bio_map_merge(struct vio *vio) { @@ -306,7 +308,7 @@ static bool try_bio_map_merge(struct vio *vio) /** * vdo_submit_data_vio() - Submit I/O for a data_vio. - * @data_vio: the data_vio for which to issue I/O. + * @data_vio: The data_vio for which to issue I/O. * * If possible, this I/O will be merged other pending I/Os. Otherwise, the data_vio will be sent to * the appropriate bio zone directly. @@ -321,13 +323,13 @@ void vdo_submit_data_vio(struct data_vio *data_vio) /** * __submit_metadata_vio() - Submit I/O for a metadata vio. - * @vio: the vio for which to issue I/O - * @physical: the physical block number to read or write - * @callback: the bio endio function which will be called after the I/O completes - * @error_handler: the handler for submission or I/O errors (may be NULL) - * @operation: the type of I/O to perform - * @data: the buffer to read or write (may be NULL) - * @size: the I/O amount in bytes + * @vio: The vio for which to issue I/O. + * @physical: The physical block number to read or write. + * @callback: The bio endio function which will be called after the I/O completes. + * @error_handler: The handler for submission or I/O errors; may be NULL. + * @operation: The type of I/O to perform. + * @data: The buffer to read or write; may be NULL. + * @size: The I/O amount in bytes. * * The vio is enqueued on a vdo bio queue so that bio submission (which may block) does not block * other vdo threads. @@ -441,7 +443,7 @@ int vdo_make_io_submitter(unsigned int thread_count, unsigned int rotation_inter /** * vdo_cleanup_io_submitter() - Tear down the io_submitter fields as needed for a physical layer. - * @io_submitter: The I/O submitter data to tear down (may be NULL). + * @io_submitter: The I/O submitter data to tear down; may be NULL. */ void vdo_cleanup_io_submitter(struct io_submitter *io_submitter) { diff --git a/drivers/md/dm-vdo/logical-zone.c b/drivers/md/dm-vdo/logical-zone.c index 026f031ffc9e..0a27e60a9dfd 100644 --- a/drivers/md/dm-vdo/logical-zone.c +++ b/drivers/md/dm-vdo/logical-zone.c @@ -159,21 +159,13 @@ static void check_for_drain_complete(struct logical_zone *zone) vdo_finish_draining(&zone->state); } -/** - * initiate_drain() - Initiate a drain. - * - * Implements vdo_admin_initiator_fn. - */ +/** Implements vdo_admin_initiator_fn. */ static void initiate_drain(struct admin_state *state) { check_for_drain_complete(container_of(state, struct logical_zone, state)); } -/** - * drain_logical_zone() - Drain a logical zone. - * - * Implements vdo_zone_action_fn. - */ +/** Implements vdo_zone_action_fn. */ static void drain_logical_zone(void *context, zone_count_t zone_number, struct vdo_completion *parent) { @@ -192,11 +184,7 @@ void vdo_drain_logical_zones(struct logical_zones *zones, parent); } -/** - * resume_logical_zone() - Resume a logical zone. - * - * Implements vdo_zone_action_fn. - */ +/** Implements vdo_zone_action_fn. */ static void resume_logical_zone(void *context, zone_count_t zone_number, struct vdo_completion *parent) { @@ -356,7 +344,7 @@ struct physical_zone *vdo_get_next_allocation_zone(struct logical_zone *zone) /** * vdo_dump_logical_zone() - Dump information about a logical zone to the log for debugging. - * @zone: The zone to dump + * @zone: The zone to dump. * * Context: the information is dumped in a thread-unsafe fashion. * diff --git a/drivers/md/dm-vdo/packer.c b/drivers/md/dm-vdo/packer.c index f70f5edabc10..666be6d557e1 100644 --- a/drivers/md/dm-vdo/packer.c +++ b/drivers/md/dm-vdo/packer.c @@ -35,10 +35,10 @@ static const struct version_number COMPRESSED_BLOCK_1_0 = { /** * vdo_get_compressed_block_fragment() - Get a reference to a compressed fragment from a compressed * block. - * @mapping_state [in] The mapping state for the look up. - * @compressed_block [in] The compressed block that was read from disk. - * @fragment_offset [out] The offset of the fragment within a compressed block. - * @fragment_size [out] The size of the fragment. + * @mapping_state: The mapping state describing the fragment. + * @block: The compressed block that was read from disk. + * @fragment_offset: The offset of the fragment within the compressed block. + * @fragment_size: The size of the fragment. * * Return: If a valid compressed fragment is found, VDO_SUCCESS; otherwise, VDO_INVALID_FRAGMENT if * the fragment is invalid. @@ -382,6 +382,7 @@ static void initialize_compressed_block(struct compressed_block *block, u16 size * @compression: The agent's compression_state to pack in to. * @data_vio: The data_vio to pack. * @offset: The offset into the compressed block at which to pack the fragment. + * @slot: The slot number in the compressed block. * @block: The compressed block which will be written out when batch is fully packed. * * Return: The new amount of space used. @@ -705,11 +706,7 @@ void vdo_increment_packer_flush_generation(struct packer *packer) vdo_flush_packer(packer); } -/** - * initiate_drain() - Initiate a drain. - * - * Implements vdo_admin_initiator_fn. - */ +/** Implements vdo_admin_initiator_fn. */ static void initiate_drain(struct admin_state *state) { struct packer *packer = container_of(state, struct packer, state); diff --git a/drivers/md/dm-vdo/physical-zone.c b/drivers/md/dm-vdo/physical-zone.c index a43b5c45fab7..686eb7d714e6 100644 --- a/drivers/md/dm-vdo/physical-zone.c +++ b/drivers/md/dm-vdo/physical-zone.c @@ -60,7 +60,7 @@ static inline bool has_lock_type(const struct pbn_lock *lock, enum pbn_lock_type * vdo_is_pbn_read_lock() - Check whether a pbn_lock is a read lock. * @lock: The lock to check. * - * Return: true if the lock is a read lock. + * Return: True if the lock is a read lock. */ bool vdo_is_pbn_read_lock(const struct pbn_lock *lock) { @@ -75,6 +75,7 @@ static inline void set_pbn_lock_type(struct pbn_lock *lock, enum pbn_lock_type t /** * vdo_downgrade_pbn_write_lock() - Downgrade a PBN write lock to a PBN read lock. * @lock: The PBN write lock to downgrade. + * @compressed_write: True if the written block was a compressed block. * * The lock holder count is cleared and the caller is responsible for setting the new count. */ @@ -582,7 +583,7 @@ static bool continue_allocating(struct data_vio *data_vio) * that fails try the next if possible. * @data_vio: The data_vio needing an allocation. * - * Return: true if a block was allocated, if not the data_vio will have been dispatched so the + * Return: True if a block was allocated, if not the data_vio will have been dispatched so the * caller must not touch it. */ bool vdo_allocate_block_in_zone(struct data_vio *data_vio) diff --git a/drivers/md/dm-vdo/recovery-journal.c b/drivers/md/dm-vdo/recovery-journal.c index de58184f538f..9cc0f0ff1664 100644 --- a/drivers/md/dm-vdo/recovery-journal.c +++ b/drivers/md/dm-vdo/recovery-journal.c @@ -109,7 +109,7 @@ static atomic_t *get_decrement_counter(struct recovery_journal *journal, * @journal: The recovery journal. * @lock_number: The lock to check. * - * Return: true if the journal zone is locked. + * Return: True if the journal zone is locked. */ static bool is_journal_zone_locked(struct recovery_journal *journal, block_count_t lock_number) @@ -217,7 +217,7 @@ static struct recovery_journal_block * __must_check pop_free_list(struct recover * Indicates it has any uncommitted entries, which includes both entries not written and entries * written but not yet acknowledged. * - * Return: true if the block has any uncommitted entries. + * Return: True if the block has any uncommitted entries. */ static inline bool __must_check is_block_dirty(const struct recovery_journal_block *block) { @@ -228,7 +228,7 @@ static inline bool __must_check is_block_dirty(const struct recovery_journal_blo * is_block_empty() - Check whether a journal block is empty. * @block: The block to check. * - * Return: true if the block has no entries. + * Return: True if the block has no entries. */ static inline bool __must_check is_block_empty(const struct recovery_journal_block *block) { @@ -239,7 +239,7 @@ static inline bool __must_check is_block_empty(const struct recovery_journal_blo * is_block_full() - Check whether a journal block is full. * @block: The block to check. * - * Return: true if the block is full. + * Return: True if the block is full. */ static inline bool __must_check is_block_full(const struct recovery_journal_block *block) { @@ -260,6 +260,8 @@ static void assert_on_journal_thread(struct recovery_journal *journal, /** * continue_waiter() - Release a data_vio from the journal. + * @waiter: The data_vio waiting on journal activity. + * @context: The result of the journal operation. * * Invoked whenever a data_vio is to be released from the journal, either because its entry was * committed to disk, or because there was an error. Implements waiter_callback_fn. @@ -273,7 +275,7 @@ static void continue_waiter(struct vdo_waiter *waiter, void *context) * has_block_waiters() - Check whether the journal has any waiters on any blocks. * @journal: The journal in question. * - * Return: true if any block has a waiter. + * Return: True if any block has a waiter. */ static inline bool has_block_waiters(struct recovery_journal *journal) { @@ -296,7 +298,7 @@ static void notify_commit_waiters(struct recovery_journal *journal); * suspend_lock_counter() - Prevent the lock counter from notifying. * @counter: The counter. * - * Return: true if the lock counter was not notifying and hence the suspend was efficacious. + * Return: True if the lock counter was not notifying and hence the suspend was efficacious. */ static bool suspend_lock_counter(struct lock_counter *counter) { @@ -416,7 +418,7 @@ sequence_number_t vdo_get_recovery_journal_current_sequence_number(struct recove * * The head is the lowest sequence number of the block map head and the slab journal head. * - * Return: the head of the journal. + * Return: The head of the journal. */ static inline sequence_number_t get_recovery_journal_head(const struct recovery_journal *journal) { @@ -535,7 +537,7 @@ static void initialize_journal_state(struct recovery_journal *journal) * vdo_get_recovery_journal_length() - Get the number of usable recovery journal blocks. * @journal_size: The size of the recovery journal in blocks. * - * Return: the number of recovery journal blocks usable for entries. + * Return: The number of recovery journal blocks usable for entries. */ block_count_t vdo_get_recovery_journal_length(block_count_t journal_size) { @@ -1078,6 +1080,8 @@ static void update_usages(struct recovery_journal *journal, struct data_vio *dat /** * assign_entry() - Assign an entry waiter to the active block. + * @waiter: The data_vio. + * @context: The recovery journal block. * * Implements waiter_callback_fn. */ @@ -1165,6 +1169,8 @@ static void recycle_journal_block(struct recovery_journal_block *block) /** * continue_committed_waiter() - invoked whenever a VIO is to be released from the journal because * its entry was committed to disk. + * @waiter: The data_vio waiting on a journal write. + * @context: A pointer to the recovery journal. * * Implements waiter_callback_fn. */ @@ -1362,6 +1368,8 @@ static void add_queued_recovery_entries(struct recovery_journal_block *block) /** * write_block() - Issue a block for writing. + * @waiter: The recovery journal block to write. + * @context: Not used. * * Implements waiter_callback_fn. */ @@ -1611,11 +1619,7 @@ void vdo_release_journal_entry_lock(struct recovery_journal *journal, smp_mb__after_atomic(); } -/** - * initiate_drain() - Initiate a drain. - * - * Implements vdo_admin_initiator_fn. - */ +/** Implements vdo_admin_initiator_fn. */ static void initiate_drain(struct admin_state *state) { check_for_drain_complete(container_of(state, struct recovery_journal, state)); diff --git a/drivers/md/dm-vdo/slab-depot.c b/drivers/md/dm-vdo/slab-depot.c index f3d80ff7bef5..034ecaa51f48 100644 --- a/drivers/md/dm-vdo/slab-depot.c +++ b/drivers/md/dm-vdo/slab-depot.c @@ -40,7 +40,7 @@ static const bool NORMAL_OPERATION = true; /** * get_lock() - Get the lock object for a slab journal block by sequence number. - * @journal: vdo_slab journal to retrieve from. + * @journal: The vdo_slab journal to retrieve from. * @sequence_number: Sequence number of the block. * * Return: The lock object for the given sequence number. @@ -110,7 +110,7 @@ static void initialize_journal_state(struct slab_journal *journal) * block_is_full() - Check whether a journal block is full. * @journal: The slab journal for the block. * - * Return: true if the tail block is full. + * Return: True if the tail block is full. */ static bool __must_check block_is_full(struct slab_journal *journal) { @@ -127,10 +127,11 @@ static void release_journal_locks(struct vdo_waiter *waiter, void *context); /** * is_slab_journal_blank() - Check whether a slab's journal is blank. + * @slab: The slab to check. * * A slab journal is blank if it has never had any entries recorded in it. * - * Return: true if the slab's journal has never been modified. + * Return: True if the slab's journal has never been modified. */ static bool is_slab_journal_blank(const struct vdo_slab *slab) { @@ -227,6 +228,7 @@ static u8 __must_check compute_fullness_hint(struct slab_depot *depot, /** * check_summary_drain_complete() - Check whether an allocators summary has finished draining. + * @allocator: The allocator to check. */ static void check_summary_drain_complete(struct block_allocator *allocator) { @@ -349,7 +351,7 @@ static void launch_write(struct slab_summary_block *block) /** * update_slab_summary_entry() - Update the entry for a slab. - * @slab: The slab whose entry is to be updated + * @slab: The slab whose entry is to be updated. * @waiter: The waiter that is updating the summary. * @tail_block_offset: The offset of the slab journal's tail block. * @load_ref_counts: Whether the reference counts must be loaded from disk on the vdo load. @@ -654,6 +656,7 @@ static void update_tail_block_location(struct slab_journal *journal) /** * reopen_slab_journal() - Reopen a slab's journal by emptying it and then adding pending entries. + * @slab: The slab to reopen. */ static void reopen_slab_journal(struct vdo_slab *slab) { @@ -839,8 +842,6 @@ static void commit_tail(struct slab_journal *journal) * @sbn: The slab block number of the entry to encode. * @operation: The type of the entry. * @increment: True if this is an increment. - * - * Exposed for unit tests. */ static void encode_slab_journal_entry(struct slab_journal_block_header *tail_header, slab_journal_payload *payload, @@ -951,7 +952,7 @@ static inline block_count_t journal_length(const struct slab_journal *journal) * @parent: The completion to notify when there is space to add the entry if the entry could not be * added immediately. * - * Return: true if the entry was added immediately. + * Return: True if the entry was added immediately. */ bool vdo_attempt_replay_into_slab(struct vdo_slab *slab, physical_block_number_t pbn, enum journal_operation operation, bool increment, @@ -1003,7 +1004,7 @@ bool vdo_attempt_replay_into_slab(struct vdo_slab *slab, physical_block_number_t * requires_reaping() - Check whether the journal must be reaped before adding new entries. * @journal: The journal to check. * - * Return: true if the journal must be reaped. + * Return: True if the journal must be reaped. */ static bool requires_reaping(const struct slab_journal *journal) { @@ -1275,6 +1276,8 @@ static void dirty_block(struct reference_block *block) /** * get_reference_block() - Get the reference block that covers the given block index. + * @slab: The slab containing the references. + * @index: The index of the physical block. */ static struct reference_block * __must_check get_reference_block(struct vdo_slab *slab, slab_block_number index) @@ -1379,7 +1382,8 @@ static void prioritize_slab(struct vdo_slab *slab) /** * adjust_free_block_count() - Adjust the free block count and (if needed) reprioritize the slab. - * @incremented: true if the free block count went up. + * @slab: The slab. + * @incremented: True if the free block count went up. */ static void adjust_free_block_count(struct vdo_slab *slab, bool incremented) { @@ -1885,6 +1889,7 @@ static void add_entries(struct slab_journal *journal) /** * reset_search_cursor() - Reset the free block search back to the first reference counter in the * first reference block of a slab. + * @slab: The slab. */ static void reset_search_cursor(struct vdo_slab *slab) { @@ -1892,17 +1897,17 @@ static void reset_search_cursor(struct vdo_slab *slab) cursor->block = cursor->first_block; cursor->index = 0; - /* Unit tests have slabs with only one reference block (and it's a runt). */ cursor->end_index = min_t(u32, COUNTS_PER_BLOCK, slab->block_count); } /** * advance_search_cursor() - Advance the search cursor to the start of the next reference block in - * a slab, + * a slab. + * @slab: The slab. * * Wraps around to the first reference block if the current block is the last reference block. * - * Return: true unless the cursor was at the last reference block. + * Return: True unless the cursor was at the last reference block. */ static bool advance_search_cursor(struct vdo_slab *slab) { @@ -1933,6 +1938,9 @@ static bool advance_search_cursor(struct vdo_slab *slab) /** * vdo_adjust_reference_count_for_rebuild() - Adjust the reference count of a block during rebuild. + * @depot: The slab depot. + * @pbn: The physical block number to adjust. + * @operation: The type opf operation. * * Return: VDO_SUCCESS or an error. */ @@ -2038,9 +2046,7 @@ static inline slab_block_number find_zero_byte_in_word(const u8 *word_ptr, * @slab: The slab counters to scan. * @index_ptr: A pointer to hold the array index of the free block. * - * Exposed for unit testing. - * - * Return: true if a free block was found in the specified range. + * Return: True if a free block was found in the specified range. */ static bool find_free_block(const struct vdo_slab *slab, slab_block_number *index_ptr) { @@ -2097,7 +2103,7 @@ static bool find_free_block(const struct vdo_slab *slab, slab_block_number *inde * @slab: The slab to search. * @free_index_ptr: A pointer to receive the array index of the zero reference count. * - * Return: true if an unreferenced counter was found. + * Return: True if an unreferenced counter was found. */ static bool search_current_reference_block(const struct vdo_slab *slab, slab_block_number *free_index_ptr) @@ -2116,7 +2122,7 @@ static bool search_current_reference_block(const struct vdo_slab *slab, * counter index saved in the search cursor and searching up to the end of the last reference * block. The search does not wrap. * - * Return: true if an unreferenced counter was found. + * Return: True if an unreferenced counter was found. */ static bool search_reference_blocks(struct vdo_slab *slab, slab_block_number *free_index_ptr) @@ -2136,6 +2142,8 @@ static bool search_reference_blocks(struct vdo_slab *slab, /** * make_provisional_reference() - Do the bookkeeping for making a provisional reference. + * @slab: The slab. + * @block_number: The index for the physical block to reference. */ static void make_provisional_reference(struct vdo_slab *slab, slab_block_number block_number) @@ -2155,6 +2163,7 @@ static void make_provisional_reference(struct vdo_slab *slab, /** * dirty_all_reference_blocks() - Mark all reference count blocks in a slab as dirty. + * @slab: The slab. */ static void dirty_all_reference_blocks(struct vdo_slab *slab) { @@ -2173,10 +2182,10 @@ static inline bool journal_points_equal(struct journal_point first, /** * match_bytes() - Check an 8-byte word for bytes matching the value specified - * @input: A word to examine the bytes of - * @match: The byte value sought + * @input: A word to examine the bytes of. + * @match: The byte value sought. * - * Return: 1 in each byte when the corresponding input byte matched, 0 otherwise + * Return: 1 in each byte when the corresponding input byte matched, 0 otherwise. */ static inline u64 match_bytes(u64 input, u8 match) { @@ -2191,12 +2200,12 @@ static inline u64 match_bytes(u64 input, u8 match) /** * count_valid_references() - Process a newly loaded refcount array - * @counters: the array of counters from a metadata block + * @counters: The array of counters from a metadata block. * - * Scan a 8-byte-aligned array of counters, fixing up any "provisional" values that weren't - * cleaned up at shutdown, changing them internally to "empty". + * Scan an 8-byte-aligned array of counters, fixing up any provisional values that + * weren't cleaned up at shutdown, changing them internally to zero. * - * Return: the number of blocks that are referenced (counters not "empty") + * Return: The number of blocks with a non-zero reference count. */ static unsigned int count_valid_references(vdo_refcount_t *counters) { @@ -2351,6 +2360,7 @@ static void load_reference_block_group(struct vdo_waiter *waiter, void *context) /** * load_reference_blocks() - Load a slab's reference blocks from the underlying storage into a * pre-allocated reference counter. + * @slab: The slab. */ static void load_reference_blocks(struct vdo_slab *slab) { @@ -2375,6 +2385,7 @@ static void load_reference_blocks(struct vdo_slab *slab) /** * drain_slab() - Drain all reference count I/O. + * @slab: The slab. * * Depending upon the type of drain being performed (as recorded in the ref_count's vdo_slab), the * reference blocks may be loaded from disk or dirty reference blocks may be written out. @@ -2564,6 +2575,7 @@ static void read_slab_journal_tail(struct vdo_waiter *waiter, void *context) /** * load_slab_journal() - Load a slab's journal by reading the journal's tail. + * @slab: The slab. */ static void load_slab_journal(struct vdo_slab *slab) { @@ -2663,11 +2675,7 @@ static void queue_slab(struct vdo_slab *slab) prioritize_slab(slab); } -/** - * initiate_slab_action() - Initiate a slab action. - * - * Implements vdo_admin_initiator_fn. - */ +/** Implements vdo_admin_initiator_fn. */ static void initiate_slab_action(struct admin_state *state) { struct vdo_slab *slab = container_of(state, struct vdo_slab, state); @@ -2720,7 +2728,7 @@ static struct vdo_slab *get_next_slab(struct slab_scrubber *scrubber) * has_slabs_to_scrub() - Check whether a scrubber has slabs to scrub. * @scrubber: The scrubber to check. * - * Return: true if the scrubber has slabs to scrub. + * Return: True if the scrubber has slabs to scrub. */ static inline bool __must_check has_slabs_to_scrub(struct slab_scrubber *scrubber) { @@ -2741,6 +2749,7 @@ static void uninitialize_scrubber_vio(struct slab_scrubber *scrubber) * finish_scrubbing() - Stop scrubbing, either because there are no more slabs to scrub or because * there's been an error. * @scrubber: The scrubber. + * @result: The result of the scrubbing operation. */ static void finish_scrubbing(struct slab_scrubber *scrubber, int result) { @@ -3132,11 +3141,13 @@ static struct vdo_slab *next_slab(struct slab_iterator *iterator) /** * abort_waiter() - Abort vios waiting to make journal entries when read-only. + * @waiter: A waiting data_vio. + * @context: Not used. * * This callback is invoked on all vios waiting to make slab journal entries after the VDO has gone * into read-only mode. Implements waiter_callback_fn. */ -static void abort_waiter(struct vdo_waiter *waiter, void *context __always_unused) +static void abort_waiter(struct vdo_waiter *waiter, void __always_unused *context) { struct reference_updater *updater = container_of(waiter, struct reference_updater, waiter); @@ -3536,7 +3547,7 @@ static void initiate_load(struct admin_state *state) /** * vdo_notify_slab_journals_are_recovered() - Inform a block allocator that its slab journals have * been recovered from the recovery journal. - * @completion The allocator completion + * @completion: The allocator completion. */ void vdo_notify_slab_journals_are_recovered(struct vdo_completion *completion) { @@ -3775,7 +3786,7 @@ static int initialize_slab_journal(struct vdo_slab *slab) * in the slab. * @allocator: The block allocator to which the slab belongs. * @slab_number: The slab number of the slab. - * @is_new: true if this slab is being allocated as part of a resize. + * @is_new: True if this slab is being allocated as part of a resize. * @slab_ptr: A pointer to receive the new slab. * * Return: VDO_SUCCESS or an error code. @@ -3894,11 +3905,7 @@ void vdo_abandon_new_slabs(struct slab_depot *depot) vdo_free(vdo_forget(depot->new_slabs)); } -/** - * get_allocator_thread_id() - Get the ID of the thread on which a given allocator operates. - * - * Implements vdo_zone_thread_getter_fn. - */ +/** Implements vdo_zone_thread_getter_fn. */ static thread_id_t get_allocator_thread_id(void *context, zone_count_t zone_number) { return ((struct slab_depot *) context)->allocators[zone_number].thread_id; @@ -3911,7 +3918,7 @@ static thread_id_t get_allocator_thread_id(void *context, zone_count_t zone_numb * @recovery_lock: The sequence number of the recovery journal block whose locks should be * released. * - * Return: true if the journal does hold a lock on the specified block (which it will release). + * Return: True if the journal released a lock on the specified block. */ static bool __must_check release_recovery_journal_lock(struct slab_journal *journal, sequence_number_t recovery_lock) @@ -3955,6 +3962,8 @@ static void release_tail_block_locks(void *context, zone_count_t zone_number, /** * prepare_for_tail_block_commit() - Prepare to commit oldest tail blocks. + * @context: The slab depot. + * @parent: The parent operation. * * Implements vdo_action_preamble_fn. */ @@ -3968,6 +3977,7 @@ static void prepare_for_tail_block_commit(void *context, struct vdo_completion * /** * schedule_tail_block_commit() - Schedule a tail block commit if necessary. + * @context: The slab depot. * * This method should not be called directly. Rather, call vdo_schedule_default_action() on the * depot's action manager. @@ -4361,6 +4371,7 @@ struct slab_depot_state_2_0 vdo_record_slab_depot(const struct slab_depot *depot /** * vdo_allocate_reference_counters() - Allocate the reference counters for all slabs in the depot. + * @depot: The slab depot. * * Context: This method may be called only before entering normal operation from the load thread. * @@ -4615,7 +4626,9 @@ static void load_summary_endio(struct bio *bio) } /** - * load_slab_summary() - The preamble of a load operation. + * load_slab_summary() - Load the slab summary before the slab data. + * @context: The slab depot. + * @parent: The load operation. * * Implements vdo_action_preamble_fn. */ @@ -4731,7 +4744,7 @@ void vdo_update_slab_depot_size(struct slab_depot *depot) * vdo_prepare_to_grow_slab_depot() - Allocate new memory needed for a resize of a slab depot to * the given size. * @depot: The depot to prepare to resize. - * @partition: The new depot partition + * @partition: The new depot partition. * * Return: VDO_SUCCESS or an error. */ @@ -4781,6 +4794,7 @@ int vdo_prepare_to_grow_slab_depot(struct slab_depot *depot, /** * finish_registration() - Finish registering new slabs now that all of the allocators have * received their new slabs. + * @context: The slab depot. * * Implements vdo_action_conclusion_fn. */ diff --git a/drivers/md/dm-vdo/vdo.c b/drivers/md/dm-vdo/vdo.c index 80b608674022..09fd0628d18c 100644 --- a/drivers/md/dm-vdo/vdo.c +++ b/drivers/md/dm-vdo/vdo.c @@ -181,6 +181,8 @@ static void assign_thread_ids(struct thread_config *config, /** * initialize_thread_config() - Initialize the thread mapping + * @counts: The number and types of threads to create. + * @config: The thread_config to initialize. * * If the logical, physical, and hash zone counts are all 0, a single thread will be shared by all * three plus the packer and recovery journal. Otherwise, there must be at least one of each type, @@ -884,6 +886,7 @@ const struct admin_state_code *vdo_get_admin_state(const struct vdo *vdo) /** * record_vdo() - Record the state of the VDO for encoding in the super block. + * @vdo: The vdo. */ static void record_vdo(struct vdo *vdo) { @@ -1277,7 +1280,7 @@ void vdo_enter_read_only_mode(struct vdo *vdo, int error_code) * vdo_is_read_only() - Check whether the VDO is read-only. * @vdo: The vdo. * - * Return: true if the vdo is read-only. + * Return: True if the vdo is read-only. * * This method may be called from any thread, as opposed to examining the VDO's state field which * is only safe to check from the admin thread. @@ -1291,7 +1294,7 @@ bool vdo_is_read_only(struct vdo *vdo) * vdo_in_read_only_mode() - Check whether a vdo is in read-only mode. * @vdo: The vdo to query. * - * Return: true if the vdo is in read-only mode. + * Return: True if the vdo is in read-only mode. */ bool vdo_in_read_only_mode(const struct vdo *vdo) { @@ -1302,7 +1305,7 @@ bool vdo_in_read_only_mode(const struct vdo *vdo) * vdo_in_recovery_mode() - Check whether the vdo is in recovery mode. * @vdo: The vdo to query. * - * Return: true if the vdo is in recovery mode. + * Return: True if the vdo is in recovery mode. */ bool vdo_in_recovery_mode(const struct vdo *vdo) { diff --git a/drivers/md/dm-vdo/vdo.h b/drivers/md/dm-vdo/vdo.h index 483ae873e002..1aaba73997b7 100644 --- a/drivers/md/dm-vdo/vdo.h +++ b/drivers/md/dm-vdo/vdo.h @@ -279,8 +279,10 @@ static inline bool vdo_uses_bio_ack_queue(struct vdo *vdo) /** * typedef vdo_filter_fn - Method type for vdo matching methods. + * @vdo: The vdo to match. + * @context: A parameter for the filter to use. * - * A filter function returns false if the vdo doesn't match. + * Return: True if the vdo matches the filter criteria, false if it doesn't. */ typedef bool (*vdo_filter_fn)(struct vdo *vdo, const void *context); diff --git a/drivers/md/dm-vdo/vio.c b/drivers/md/dm-vdo/vio.c index 8fc22fb14196..5ffc867d9c5e 100644 --- a/drivers/md/dm-vdo/vio.c +++ b/drivers/md/dm-vdo/vio.c @@ -398,8 +398,9 @@ void free_vio_pool(struct vio_pool *pool) /** * is_vio_pool_busy() - Check whether an vio pool has outstanding entries. + * @pool: The vio pool. * - * Return: true if the pool is busy. + * Return: True if the pool is busy. */ bool is_vio_pool_busy(struct vio_pool *pool) { diff --git a/drivers/md/dm-vdo/vio.h b/drivers/md/dm-vdo/vio.h index 4bfcb21901f1..7a8a6819aec4 100644 --- a/drivers/md/dm-vdo/vio.h +++ b/drivers/md/dm-vdo/vio.h @@ -156,8 +156,7 @@ static inline enum vdo_completion_priority get_metadata_priority(struct vio *vio /** * continue_vio() - Enqueue a vio to run its next callback. * @vio: The vio to continue. - * - * Return: The result of the current operation. + * @result: The result of the current operation. */ static inline void continue_vio(struct vio *vio, int result) { @@ -172,6 +171,9 @@ void vdo_count_completed_bios(struct bio *bio); /** * continue_vio_after_io() - Continue a vio now that its I/O has returned. + * @vio: The vio to continue. + * @callback: The next operation for this vio. + * @thread: Which thread to run the next operation on. */ static inline void continue_vio_after_io(struct vio *vio, vdo_action_fn callback, thread_id_t thread) diff --git a/drivers/md/dm-verity-fec.c b/drivers/md/dm-verity-fec.c index 72047b47a7a0..c79de517afee 100644 --- a/drivers/md/dm-verity-fec.c +++ b/drivers/md/dm-verity-fec.c @@ -177,9 +177,11 @@ error: if (r < 0 && neras) DMERR_LIMIT("%s: FEC %llu: failed to correct: %d", v->data_dev->name, (unsigned long long)rsb, r); - else if (r > 0) + else if (r > 0) { DMWARN_LIMIT("%s: FEC %llu: corrected %d errors", v->data_dev->name, (unsigned long long)rsb, r); + atomic64_inc(&v->fec->corrected); + } return r; } @@ -188,14 +190,13 @@ error: * Locate data block erasures using verity hashes. */ static int fec_is_erasure(struct dm_verity *v, struct dm_verity_io *io, - u8 *want_digest, u8 *data) + const u8 *want_digest, const u8 *data) { if (unlikely(verity_hash(v, io, data, 1 << v->data_dev_block_bits, - verity_io_real_digest(v, io)))) + io->tmp_digest))) return 0; - return memcmp(verity_io_real_digest(v, io), want_digest, - v->digest_size) != 0; + return memcmp(io->tmp_digest, want_digest, v->digest_size) != 0; } /* @@ -328,7 +329,7 @@ static int fec_alloc_bufs(struct dm_verity *v, struct dm_verity_fec_io *fio) if (fio->bufs[n]) continue; - fio->bufs[n] = mempool_alloc(&v->fec->extra_pool, GFP_NOWAIT); + fio->bufs[n] = kmem_cache_alloc(v->fec->cache, GFP_NOWAIT); /* we can manage with even one buffer if necessary */ if (unlikely(!fio->bufs[n])) break; @@ -362,7 +363,7 @@ static void fec_init_bufs(struct dm_verity *v, struct dm_verity_fec_io *fio) */ static int fec_decode_rsb(struct dm_verity *v, struct dm_verity_io *io, struct dm_verity_fec_io *fio, u64 rsb, u64 offset, - bool use_erasures) + const u8 *want_digest, bool use_erasures) { int r, neras = 0; unsigned int pos; @@ -388,12 +389,11 @@ static int fec_decode_rsb(struct dm_verity *v, struct dm_verity_io *io, /* Always re-validate the corrected block against the expected hash */ r = verity_hash(v, io, fio->output, 1 << v->data_dev_block_bits, - verity_io_real_digest(v, io)); + io->tmp_digest); if (unlikely(r < 0)) return r; - if (memcmp(verity_io_real_digest(v, io), verity_io_want_digest(v, io), - v->digest_size)) { + if (memcmp(io->tmp_digest, want_digest, v->digest_size)) { DMERR_LIMIT("%s: FEC %llu: failed to correct (%d erasures)", v->data_dev->name, (unsigned long long)rsb, neras); return -EILSEQ; @@ -404,7 +404,8 @@ static int fec_decode_rsb(struct dm_verity *v, struct dm_verity_io *io, /* Correct errors in a block. Copies corrected block to dest. */ int verity_fec_decode(struct dm_verity *v, struct dm_verity_io *io, - enum verity_block_type type, sector_t block, u8 *dest) + enum verity_block_type type, const u8 *want_digest, + sector_t block, u8 *dest) { int r; struct dm_verity_fec_io *fio = fec_io(io); @@ -413,10 +414,8 @@ int verity_fec_decode(struct dm_verity *v, struct dm_verity_io *io, if (!verity_fec_is_enabled(v)) return -EOPNOTSUPP; - if (fio->level >= DM_VERITY_FEC_MAX_RECURSION) { - DMWARN_LIMIT("%s: FEC: recursion too deep", v->data_dev->name); + if (fio->level) return -EIO; - } fio->level++; @@ -447,9 +446,9 @@ int verity_fec_decode(struct dm_verity *v, struct dm_verity_io *io, * them first. Do a second attempt with erasures if the corruption is * bad enough. */ - r = fec_decode_rsb(v, io, fio, rsb, offset, false); + r = fec_decode_rsb(v, io, fio, rsb, offset, want_digest, false); if (r < 0) { - r = fec_decode_rsb(v, io, fio, rsb, offset, true); + r = fec_decode_rsb(v, io, fio, rsb, offset, want_digest, true); if (r < 0) goto done; } @@ -479,7 +478,8 @@ void verity_fec_finish_io(struct dm_verity_io *io) mempool_free(fio->bufs[n], &f->prealloc_pool); fec_for_each_extra_buffer(fio, n) - mempool_free(fio->bufs[n], &f->extra_pool); + if (fio->bufs[n]) + kmem_cache_free(f->cache, fio->bufs[n]); mempool_free(fio->output, &f->output_pool); } @@ -531,7 +531,6 @@ void verity_fec_dtr(struct dm_verity *v) mempool_exit(&f->rs_pool); mempool_exit(&f->prealloc_pool); - mempool_exit(&f->extra_pool); mempool_exit(&f->output_pool); kmem_cache_destroy(f->cache); @@ -784,12 +783,6 @@ int verity_fec_ctr(struct dm_verity *v) return ret; } - ret = mempool_init_slab_pool(&f->extra_pool, 0, f->cache); - if (ret) { - ti->error = "Cannot allocate FEC buffer extra pool"; - return ret; - } - /* Preallocate an output buffer for each thread */ ret = mempool_init_kmalloc_pool(&f->output_pool, num_online_cpus(), 1 << v->data_dev_block_bits); diff --git a/drivers/md/dm-verity-fec.h b/drivers/md/dm-verity-fec.h index 09123a612953..5fd267873812 100644 --- a/drivers/md/dm-verity-fec.h +++ b/drivers/md/dm-verity-fec.h @@ -23,9 +23,6 @@ #define DM_VERITY_FEC_BUF_MAX \ (1 << (PAGE_SHIFT - DM_VERITY_FEC_BUF_RS_BITS)) -/* maximum recursion level for verity_fec_decode */ -#define DM_VERITY_FEC_MAX_RECURSION 4 - #define DM_VERITY_OPT_FEC_DEV "use_fec_from_device" #define DM_VERITY_OPT_FEC_BLOCKS "fec_blocks" #define DM_VERITY_OPT_FEC_START "fec_start" @@ -45,9 +42,9 @@ struct dm_verity_fec { unsigned char rsn; /* N of RS(M, N) */ mempool_t rs_pool; /* mempool for fio->rs */ mempool_t prealloc_pool; /* mempool for preallocated buffers */ - mempool_t extra_pool; /* mempool for extra buffers */ mempool_t output_pool; /* mempool for output */ struct kmem_cache *cache; /* cache for buffers */ + atomic64_t corrected; /* corrected errors */ }; /* per-bio data */ @@ -68,8 +65,8 @@ struct dm_verity_fec_io { extern bool verity_fec_is_enabled(struct dm_verity *v); extern int verity_fec_decode(struct dm_verity *v, struct dm_verity_io *io, - enum verity_block_type type, sector_t block, - u8 *dest); + enum verity_block_type type, const u8 *want_digest, + sector_t block, u8 *dest); extern unsigned int verity_fec_status_table(struct dm_verity *v, unsigned int sz, char *result, unsigned int maxlen); @@ -99,6 +96,7 @@ static inline bool verity_fec_is_enabled(struct dm_verity *v) static inline int verity_fec_decode(struct dm_verity *v, struct dm_verity_io *io, enum verity_block_type type, + const u8 *want_digest, sector_t block, u8 *dest) { return -EOPNOTSUPP; diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c index 66a00a8ccb39..5c17472d7896 100644 --- a/drivers/md/dm-verity-target.c +++ b/drivers/md/dm-verity-target.c @@ -117,11 +117,25 @@ static sector_t verity_position_at_level(struct dm_verity *v, sector_t block, int verity_hash(struct dm_verity *v, struct dm_verity_io *io, const u8 *data, size_t len, u8 *digest) { - struct shash_desc *desc = &io->hash_desc; + struct shash_desc *desc; int r; + if (likely(v->use_sha256_lib)) { + struct sha256_ctx *ctx = &io->hash_ctx.sha256; + + /* + * Fast path using SHA-256 library. This is enabled only for + * verity version 1, where the salt is at the beginning. + */ + *ctx = *v->initial_hashstate.sha256; + sha256_update(ctx, data, len); + sha256_final(ctx, digest); + return 0; + } + + desc = &io->hash_ctx.shash; desc->tfm = v->shash_tfm; - if (unlikely(v->initial_hashstate == NULL)) { + if (unlikely(v->initial_hashstate.shash == NULL)) { /* Version 0: salt at end */ r = crypto_shash_init(desc) ?: crypto_shash_update(desc, data, len) ?: @@ -129,7 +143,7 @@ int verity_hash(struct dm_verity *v, struct dm_verity_io *io, crypto_shash_final(desc, digest); } else { /* Version 1: salt at beginning */ - r = crypto_shash_import(desc, v->initial_hashstate) ?: + r = crypto_shash_import(desc, v->initial_hashstate.shash) ?: crypto_shash_finup(desc, data, len, digest); } if (unlikely(r)) @@ -215,12 +229,12 @@ out: * Verify hash of a metadata block pertaining to the specified data block * ("block" argument) at a specified level ("level" argument). * - * On successful return, verity_io_want_digest(v, io) contains the hash value - * for a lower tree level or for the data block (if we're at the lowest level). + * On successful return, want_digest contains the hash value for a lower tree + * level or for the data block (if we're at the lowest level). * * If "skip_unverified" is true, unverified buffer is skipped and 1 is returned. * If "skip_unverified" is false, unverified buffer is hashed and verified - * against current value of verity_io_want_digest(v, io). + * against current value of want_digest. */ static int verity_verify_level(struct dm_verity *v, struct dm_verity_io *io, sector_t block, int level, bool skip_unverified, @@ -259,7 +273,7 @@ static int verity_verify_level(struct dm_verity *v, struct dm_verity_io *io, if (IS_ERR(data)) return r; if (verity_fec_decode(v, io, DM_VERITY_BLOCK_TYPE_METADATA, - hash_block, data) == 0) { + want_digest, hash_block, data) == 0) { aux = dm_bufio_get_aux_data(buf); aux->hash_verified = 1; goto release_ok; @@ -279,11 +293,11 @@ static int verity_verify_level(struct dm_verity *v, struct dm_verity_io *io, } r = verity_hash(v, io, data, 1 << v->hash_dev_block_bits, - verity_io_real_digest(v, io)); + io->tmp_digest); if (unlikely(r < 0)) goto release_ret_r; - if (likely(memcmp(verity_io_real_digest(v, io), want_digest, + if (likely(memcmp(io->tmp_digest, want_digest, v->digest_size) == 0)) aux->hash_verified = 1; else if (static_branch_unlikely(&use_bh_wq_enabled) && io->in_bh) { @@ -294,7 +308,7 @@ static int verity_verify_level(struct dm_verity *v, struct dm_verity_io *io, r = -EAGAIN; goto release_ret_r; } else if (verity_fec_decode(v, io, DM_VERITY_BLOCK_TYPE_METADATA, - hash_block, data) == 0) + want_digest, hash_block, data) == 0) aux->hash_verified = 1; else if (verity_handle_err(v, DM_VERITY_BLOCK_TYPE_METADATA, @@ -358,7 +372,8 @@ out: } static noinline int verity_recheck(struct dm_verity *v, struct dm_verity_io *io, - sector_t cur_block, u8 *dest) + const u8 *want_digest, sector_t cur_block, + u8 *dest) { struct page *page; void *buffer; @@ -382,12 +397,11 @@ static noinline int verity_recheck(struct dm_verity *v, struct dm_verity_io *io, goto free_ret; r = verity_hash(v, io, buffer, 1 << v->data_dev_block_bits, - verity_io_real_digest(v, io)); + io->tmp_digest); if (unlikely(r)) goto free_ret; - if (memcmp(verity_io_real_digest(v, io), - verity_io_want_digest(v, io), v->digest_size)) { + if (memcmp(io->tmp_digest, want_digest, v->digest_size)) { r = -EIO; goto free_ret; } @@ -402,9 +416,13 @@ free_ret: static int verity_handle_data_hash_mismatch(struct dm_verity *v, struct dm_verity_io *io, - struct bio *bio, sector_t blkno, - u8 *data) + struct bio *bio, + struct pending_block *block) { + const u8 *want_digest = block->want_digest; + sector_t blkno = block->blkno; + u8 *data = block->data; + if (static_branch_unlikely(&use_bh_wq_enabled) && io->in_bh) { /* * Error handling code (FEC included) cannot be run in the @@ -412,14 +430,14 @@ static int verity_handle_data_hash_mismatch(struct dm_verity *v, */ return -EAGAIN; } - if (verity_recheck(v, io, blkno, data) == 0) { + if (verity_recheck(v, io, want_digest, blkno, data) == 0) { if (v->validated_blocks) set_bit(blkno, v->validated_blocks); return 0; } #if defined(CONFIG_DM_VERITY_FEC) - if (verity_fec_decode(v, io, DM_VERITY_BLOCK_TYPE_DATA, blkno, - data) == 0) + if (verity_fec_decode(v, io, DM_VERITY_BLOCK_TYPE_DATA, want_digest, + blkno, data) == 0) return 0; #endif if (bio->bi_status) @@ -433,6 +451,58 @@ static int verity_handle_data_hash_mismatch(struct dm_verity *v, return 0; } +static void verity_clear_pending_blocks(struct dm_verity_io *io) +{ + int i; + + for (i = io->num_pending - 1; i >= 0; i--) { + kunmap_local(io->pending_blocks[i].data); + io->pending_blocks[i].data = NULL; + } + io->num_pending = 0; +} + +static int verity_verify_pending_blocks(struct dm_verity *v, + struct dm_verity_io *io, + struct bio *bio) +{ + const unsigned int block_size = 1 << v->data_dev_block_bits; + int i, r; + + if (io->num_pending == 2) { + /* num_pending == 2 implies that the algorithm is SHA-256 */ + sha256_finup_2x(v->initial_hashstate.sha256, + io->pending_blocks[0].data, + io->pending_blocks[1].data, block_size, + io->pending_blocks[0].real_digest, + io->pending_blocks[1].real_digest); + } else { + for (i = 0; i < io->num_pending; i++) { + r = verity_hash(v, io, io->pending_blocks[i].data, + block_size, + io->pending_blocks[i].real_digest); + if (unlikely(r)) + return r; + } + } + + for (i = 0; i < io->num_pending; i++) { + struct pending_block *block = &io->pending_blocks[i]; + + if (likely(memcmp(block->real_digest, block->want_digest, + v->digest_size) == 0)) { + if (v->validated_blocks) + set_bit(block->blkno, v->validated_blocks); + } else { + r = verity_handle_data_hash_mismatch(v, io, bio, block); + if (unlikely(r)) + return r; + } + } + verity_clear_pending_blocks(io); + return 0; +} + /* * Verify one "dm_verity_io" structure. */ @@ -440,10 +510,14 @@ static int verity_verify_io(struct dm_verity_io *io) { struct dm_verity *v = io->v; const unsigned int block_size = 1 << v->data_dev_block_bits; + const int max_pending = v->use_sha256_finup_2x ? 2 : 1; struct bvec_iter iter_copy; struct bvec_iter *iter; struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_io_data_size); unsigned int b; + int r; + + io->num_pending = 0; if (static_branch_unlikely(&use_bh_wq_enabled) && io->in_bh) { /* @@ -457,21 +531,22 @@ static int verity_verify_io(struct dm_verity_io *io) for (b = 0; b < io->n_blocks; b++, bio_advance_iter(bio, iter, block_size)) { - int r; - sector_t cur_block = io->block + b; + sector_t blkno = io->block + b; + struct pending_block *block; bool is_zero; struct bio_vec bv; void *data; if (v->validated_blocks && bio->bi_status == BLK_STS_OK && - likely(test_bit(cur_block, v->validated_blocks))) + likely(test_bit(blkno, v->validated_blocks))) continue; - r = verity_hash_for_block(v, io, cur_block, - verity_io_want_digest(v, io), + block = &io->pending_blocks[io->num_pending]; + + r = verity_hash_for_block(v, io, blkno, block->want_digest, &is_zero); if (unlikely(r < 0)) - return r; + goto error; bv = bio_iter_iovec(bio, *iter); if (unlikely(bv.bv_len < block_size)) { @@ -482,7 +557,8 @@ static int verity_verify_io(struct dm_verity_io *io) * data block size to be greater than PAGE_SIZE. */ DMERR_LIMIT("unaligned io (data block spans pages)"); - return -EIO; + r = -EIO; + goto error; } data = bvec_kmap_local(&bv); @@ -496,29 +572,26 @@ static int verity_verify_io(struct dm_verity_io *io) kunmap_local(data); continue; } - - r = verity_hash(v, io, data, block_size, - verity_io_real_digest(v, io)); - if (unlikely(r < 0)) { - kunmap_local(data); - return r; + block->data = data; + block->blkno = blkno; + if (++io->num_pending == max_pending) { + r = verity_verify_pending_blocks(v, io, bio); + if (unlikely(r)) + goto error; } + } - if (likely(memcmp(verity_io_real_digest(v, io), - verity_io_want_digest(v, io), v->digest_size) == 0)) { - if (v->validated_blocks) - set_bit(cur_block, v->validated_blocks); - kunmap_local(data); - continue; - } - r = verity_handle_data_hash_mismatch(v, io, bio, cur_block, - data); - kunmap_local(data); + if (io->num_pending) { + r = verity_verify_pending_blocks(v, io, bio); if (unlikely(r)) - return r; + goto error; } return 0; + +error: + verity_clear_pending_blocks(io); + return r; } /* @@ -775,6 +848,10 @@ static void verity_status(struct dm_target *ti, status_type_t type, switch (type) { case STATUSTYPE_INFO: DMEMIT("%c", v->hash_failed ? 'C' : 'V'); + if (verity_fec_is_enabled(v)) + DMEMIT(" %lld", atomic64_read(&v->fec->corrected)); + else + DMEMIT(" -"); break; case STATUSTYPE_TABLE: DMEMIT("%u %s %s %u %u %llu %llu %s ", @@ -1004,7 +1081,7 @@ static void verity_dtr(struct dm_target *ti) kvfree(v->validated_blocks); kfree(v->salt); - kfree(v->initial_hashstate); + kfree(v->initial_hashstate.shash); kfree(v->root_digest); kfree(v->zero_digest); verity_free_sig(v); @@ -1069,8 +1146,7 @@ static int verity_alloc_zero_digest(struct dm_verity *v) if (!v->zero_digest) return r; - io = kmalloc(sizeof(*io) + crypto_shash_descsize(v->shash_tfm), - GFP_KERNEL); + io = kmalloc(v->ti->per_io_data_size, GFP_KERNEL); if (!io) return r; /* verity_dtr will free zero_digest */ @@ -1252,11 +1328,26 @@ static int verity_setup_hash_alg(struct dm_verity *v, const char *alg_name) } v->shash_tfm = shash; v->digest_size = crypto_shash_digestsize(shash); - DMINFO("%s using \"%s\"", alg_name, crypto_shash_driver_name(shash)); if ((1 << v->hash_dev_block_bits) < v->digest_size * 2) { ti->error = "Digest size too big"; return -EINVAL; } + if (likely(v->version && strcmp(alg_name, "sha256") == 0)) { + /* + * Fast path: use the library API for reduced overhead and + * interleaved hashing support. + */ + v->use_sha256_lib = true; + if (sha256_finup_2x_is_optimized()) + v->use_sha256_finup_2x = true; + ti->per_io_data_size = + offsetofend(struct dm_verity_io, hash_ctx.sha256); + } else { + /* Fallback case: use the generic crypto API. */ + ti->per_io_data_size = + offsetofend(struct dm_verity_io, hash_ctx.shash) + + crypto_shash_descsize(shash); + } return 0; } @@ -1277,7 +1368,18 @@ static int verity_setup_salt_and_hashstate(struct dm_verity *v, const char *arg) return -EINVAL; } } - if (v->version) { /* Version 1: salt at beginning */ + if (likely(v->use_sha256_lib)) { + /* Implies version 1: salt at beginning */ + v->initial_hashstate.sha256 = + kmalloc(sizeof(struct sha256_ctx), GFP_KERNEL); + if (!v->initial_hashstate.sha256) { + ti->error = "Cannot allocate initial hash state"; + return -ENOMEM; + } + sha256_init(v->initial_hashstate.sha256); + sha256_update(v->initial_hashstate.sha256, + v->salt, v->salt_size); + } else if (v->version) { /* Version 1: salt at beginning */ SHASH_DESC_ON_STACK(desc, v->shash_tfm); int r; @@ -1285,16 +1387,16 @@ static int verity_setup_salt_and_hashstate(struct dm_verity *v, const char *arg) * Compute the pre-salted hash state that can be passed to * crypto_shash_import() for each block later. */ - v->initial_hashstate = kmalloc( + v->initial_hashstate.shash = kmalloc( crypto_shash_statesize(v->shash_tfm), GFP_KERNEL); - if (!v->initial_hashstate) { + if (!v->initial_hashstate.shash) { ti->error = "Cannot allocate initial hash state"; return -ENOMEM; } desc->tfm = v->shash_tfm; r = crypto_shash_init(desc) ?: crypto_shash_update(desc, v->salt, v->salt_size) ?: - crypto_shash_export(desc, v->initial_hashstate); + crypto_shash_export(desc, v->initial_hashstate.shash); if (r) { ti->error = "Cannot set up initial hash state"; return r; @@ -1556,9 +1658,6 @@ static int verity_ctr(struct dm_target *ti, unsigned int argc, char **argv) goto bad; } - ti->per_io_data_size = sizeof(struct dm_verity_io) + - crypto_shash_descsize(v->shash_tfm); - r = verity_fec_ctr(v); if (r) goto bad; @@ -1690,7 +1789,7 @@ static struct target_type verity_target = { .name = "verity", /* Note: the LSMs depend on the singleton and immutable features */ .features = DM_TARGET_SINGLETON | DM_TARGET_IMMUTABLE, - .version = {1, 12, 0}, + .version = {1, 13, 0}, .module = THIS_MODULE, .ctr = verity_ctr, .dtr = verity_dtr, diff --git a/drivers/md/dm-verity.h b/drivers/md/dm-verity.h index 6d141abd965c..f975a9e5c5d6 100644 --- a/drivers/md/dm-verity.h +++ b/drivers/md/dm-verity.h @@ -16,6 +16,7 @@ #include <linux/device-mapper.h> #include <linux/interrupt.h> #include <crypto/hash.h> +#include <crypto/sha2.h> #define DM_VERITY_MAX_LEVELS 63 @@ -42,7 +43,10 @@ struct dm_verity { struct crypto_shash *shash_tfm; u8 *root_digest; /* digest of the root block */ u8 *salt; /* salt: its size is salt_size */ - u8 *initial_hashstate; /* salted initial state, if version >= 1 */ + union { + struct sha256_ctx *sha256; /* for use_sha256_lib=1 */ + u8 *shash; /* for use_sha256_lib=0 */ + } initial_hashstate; /* salted initial state, if version >= 1 */ u8 *zero_digest; /* digest for a zero block */ #ifdef CONFIG_SECURITY u8 *root_digest_sig; /* signature of the root digest */ @@ -59,6 +63,8 @@ struct dm_verity { unsigned char version; bool hash_failed:1; /* set if hash of any block failed */ bool use_bh_wq:1; /* try to verify in BH wq before normal work-queue */ + bool use_sha256_lib:1; /* use SHA-256 library instead of generic crypto API */ + bool use_sha256_finup_2x:1; /* use interleaved hashing optimization */ unsigned int digest_size; /* digest size for the current hash algorithm */ enum verity_mode mode; /* mode for handling verification errors */ enum verity_mode error_mode;/* mode for handling I/O errors */ @@ -78,6 +84,13 @@ struct dm_verity { mempool_t recheck_pool; }; +struct pending_block { + void *data; + sector_t blkno; + u8 want_digest[HASH_MAX_DIGESTSIZE]; + u8 real_digest[HASH_MAX_DIGESTSIZE]; +}; + struct dm_verity_io { struct dm_verity *v; @@ -94,28 +107,29 @@ struct dm_verity_io { struct work_struct work; struct work_struct bh_work; - u8 real_digest[HASH_MAX_DIGESTSIZE]; - u8 want_digest[HASH_MAX_DIGESTSIZE]; + u8 tmp_digest[HASH_MAX_DIGESTSIZE]; /* - * Temporary space for hashing. This is variable-length and must be at - * the end of the struct. struct shash_desc is just the fixed part; - * it's followed by a context of size crypto_shash_descsize(shash_tfm). + * This is the queue of data blocks that are pending verification. When + * the crypto layer supports interleaved hashing, we allow multiple + * blocks to be queued up in order to utilize it. This can improve + * performance significantly vs. sequential hashing of each block. */ - struct shash_desc hash_desc; -}; + int num_pending; + struct pending_block pending_blocks[2]; -static inline u8 *verity_io_real_digest(struct dm_verity *v, - struct dm_verity_io *io) -{ - return io->real_digest; -} - -static inline u8 *verity_io_want_digest(struct dm_verity *v, - struct dm_verity_io *io) -{ - return io->want_digest; -} + /* + * Temporary space for hashing. Either sha256 or shash is used, + * depending on the value of use_sha256_lib. If shash is used, + * then this field is variable-length, with total size + * sizeof(struct shash_desc) + crypto_shash_descsize(shash_tfm). + * For this reason, this field must be the end of the struct. + */ + union { + struct sha256_ctx sha256; + struct shash_desc shash; + } hash_ctx; +}; extern int verity_hash(struct dm_verity *v, struct dm_verity_io *io, const u8 *data, size_t len, u8 *digest); diff --git a/drivers/md/dm-zone.c b/drivers/md/dm-zone.c index 5a840c4ae316..c95e417194b3 100644 --- a/drivers/md/dm-zone.c +++ b/drivers/md/dm-zone.c @@ -203,8 +203,6 @@ int dm_revalidate_zones(struct dm_table *t, struct request_queue *q) return ret; } - md->nr_zones = disk->nr_zones; - return 0; } @@ -452,7 +450,6 @@ void dm_finalize_zone_settings(struct dm_table *t, struct queue_limits *lim) set_bit(DMF_EMULATE_ZONE_APPEND, &md->flags); } else { clear_bit(DMF_EMULATE_ZONE_APPEND, &md->flags); - md->nr_zones = 0; md->disk->nr_zones = 0; } } diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 6c83ab940af7..b63279202260 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -272,7 +272,7 @@ static int __init dm_init(void) int r, i; #if (IS_ENABLED(CONFIG_IMA) && !IS_ENABLED(CONFIG_IMA_DISABLE_HTABLE)) - DMWARN("CONFIG_IMA_DISABLE_HTABLE is disabled." + DMINFO("CONFIG_IMA_DISABLE_HTABLE is disabled." " Duplicate IMA measurements will not be recorded in the IMA log."); #endif @@ -1321,6 +1321,7 @@ void dm_accept_partial_bio(struct bio *bio, unsigned int n_sectors) BUG_ON(dm_tio_flagged(tio, DM_TIO_IS_DUPLICATE_BIO)); BUG_ON(bio_sectors > *tio->len_ptr); BUG_ON(n_sectors > bio_sectors); + BUG_ON(bio->bi_opf & REQ_ATOMIC); if (static_branch_unlikely(&zoned_enabled) && unlikely(bdev_is_zoned(bio->bi_bdev))) { @@ -1735,8 +1736,12 @@ static blk_status_t __split_and_process_bio(struct clone_info *ci) ci->submit_as_polled = !!(ci->bio->bi_opf & REQ_POLLED); len = min_t(sector_t, max_io_len(ti, ci->sector), ci->sector_count); - if (ci->bio->bi_opf & REQ_ATOMIC && len != ci->sector_count) - return BLK_STS_IOERR; + if (ci->bio->bi_opf & REQ_ATOMIC) { + if (unlikely(!dm_target_supports_atomic_writes(ti->type))) + return BLK_STS_IOERR; + if (unlikely(len != ci->sector_count)) + return BLK_STS_IOERR; + } setup_split_accounting(ci, len); @@ -2439,7 +2444,6 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t, { struct dm_table *old_map; sector_t size, old_size; - int ret; lockdep_assert_held(&md->suspend_lock); @@ -2454,11 +2458,13 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t, set_capacity(md->disk, size); - ret = dm_table_set_restrictions(t, md->queue, limits); - if (ret) { - set_capacity(md->disk, old_size); - old_map = ERR_PTR(ret); - goto out; + if (limits) { + int ret = dm_table_set_restrictions(t, md->queue, limits); + if (ret) { + set_capacity(md->disk, old_size); + old_map = ERR_PTR(ret); + goto out; + } } /* @@ -2836,6 +2842,7 @@ static void dm_wq_work(struct work_struct *work) static void dm_queue_flush(struct mapped_device *md) { + clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags); smp_mb__after_atomic(); queue_work(md->wq, &md->work); @@ -2848,6 +2855,7 @@ struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table) { struct dm_table *live_map = NULL, *map = ERR_PTR(-EINVAL); struct queue_limits limits; + bool update_limits = true; int r; mutex_lock(&md->suspend_lock); @@ -2857,19 +2865,30 @@ struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table) goto out; /* + * To avoid a potential deadlock locking the queue limits, disallow + * updating the queue limits during a table swap, when updating an + * immutable request-based dm device (dm-multipath) during a noflush + * suspend. It is userspace's responsibility to make sure that the new + * table uses the same limits as the existing table, if it asks for a + * noflush suspend. + */ + if (dm_request_based(md) && md->immutable_target && + __noflush_suspending(md)) + update_limits = false; + /* * If the new table has no data devices, retain the existing limits. * This helps multipath with queue_if_no_path if all paths disappear, * then new I/O is queued based on these limits, and then some paths * reappear. */ - if (dm_table_has_no_data_devices(table)) { + else if (dm_table_has_no_data_devices(table)) { live_map = dm_get_live_table_fast(md); if (live_map) limits = md->queue->limits; dm_put_live_table_fast(md); } - if (!live_map) { + if (update_limits && !live_map) { r = dm_calculate_queue_limits(table, &limits); if (r) { map = ERR_PTR(r); @@ -2877,7 +2896,7 @@ struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table) } } - map = __bind(md, table, &limits); + map = __bind(md, table, update_limits ? &limits : NULL); dm_issue_global_event(); out: @@ -2930,7 +2949,6 @@ static int __dm_suspend(struct mapped_device *md, struct dm_table *map, /* * DMF_NOFLUSH_SUSPENDING must be set before presuspend. - * This flag is cleared before dm_suspend returns. */ if (noflush) set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); @@ -2993,8 +3011,6 @@ static int __dm_suspend(struct mapped_device *md, struct dm_table *map, if (!r) set_bit(dmf_suspended_flag, &md->flags); - if (noflush) - clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags); if (map) synchronize_srcu(&md->io_barrier); diff --git a/drivers/platform/Kconfig b/drivers/platform/Kconfig index 324c69c63f76..312788f249c9 100644 --- a/drivers/platform/Kconfig +++ b/drivers/platform/Kconfig @@ -20,3 +20,5 @@ source "drivers/platform/x86/Kconfig" source "drivers/platform/arm64/Kconfig" source "drivers/platform/raspberrypi/Kconfig" + +source "drivers/platform/wmi/Kconfig" diff --git a/drivers/platform/Makefile b/drivers/platform/Makefile index b0935c602ada..fa322e7f8716 100644 --- a/drivers/platform/Makefile +++ b/drivers/platform/Makefile @@ -14,3 +14,4 @@ obj-$(CONFIG_CZNIC_PLATFORMS) += cznic/ obj-$(CONFIG_SURFACE_PLATFORMS) += surface/ obj-$(CONFIG_ARM64_PLATFORM_DEVICES) += arm64/ obj-$(CONFIG_BCM2835_VCHIQ) += raspberrypi/ +obj-$(CONFIG_ACPI_WMI) += wmi/ diff --git a/drivers/platform/arm64/lenovo-thinkpad-t14s.c b/drivers/platform/arm64/lenovo-thinkpad-t14s.c index cf6a1d3b2617..5590302a5694 100644 --- a/drivers/platform/arm64/lenovo-thinkpad-t14s.c +++ b/drivers/platform/arm64/lenovo-thinkpad-t14s.c @@ -20,19 +20,23 @@ #include <linux/module.h> #include <linux/regmap.h> #include <linux/slab.h> +#include <linux/pm.h> #define T14S_EC_CMD_ECRD 0x02 #define T14S_EC_CMD_ECWR 0x03 #define T14S_EC_CMD_EVT 0xf0 -#define T14S_EC_REG_LED 0x0c -#define T14S_EC_REG_KBD_BL1 0x0d -#define T14S_EC_REG_KBD_BL2 0xe1 -#define T14S_EC_KBD_BL1_MASK GENMASK_U8(7, 6) -#define T14S_EC_KBD_BL2_MASK GENMASK_U8(3, 2) -#define T14S_EC_REG_AUD 0x30 -#define T14S_EC_MIC_MUTE_LED BIT(5) -#define T14S_EC_SPK_MUTE_LED BIT(6) +#define T14S_EC_REG_LED 0x0c +#define T14S_EC_REG_KBD_BL1 0x0d +#define T14S_EC_REG_MODERN_STANDBY 0xe0 +#define T14S_EC_MODERN_STANDBY_ENTRY BIT(1) +#define T14S_EC_MODERN_STANDBY_EXIT BIT(0) +#define T14S_EC_REG_KBD_BL2 0xe1 +#define T14S_EC_KBD_BL1_MASK GENMASK_U8(7, 6) +#define T14S_EC_KBD_BL2_MASK GENMASK_U8(3, 2) +#define T14S_EC_REG_AUD 0x30 +#define T14S_EC_MIC_MUTE_LED BIT(5) +#define T14S_EC_SPK_MUTE_LED BIT(6) #define T14S_EC_EVT_NONE 0x00 #define T14S_EC_EVT_KEY_FN_4 0x13 @@ -202,6 +206,14 @@ out: return ret; } +static void t14s_ec_write_sequence(struct t14s_ec *ec, u8 reg, u8 val, u8 cnt) +{ + int i; + + for (i = 0; i < cnt; i++) + regmap_write(ec->regmap, reg, val); +} + static int t14s_led_set_status(struct t14s_ec *ec, struct t14s_ec_led_classdev *led, const enum t14s_ec_led_status_t ledstatus) @@ -554,6 +566,7 @@ static int t14s_ec_probe(struct i2c_client *client) return -ENOMEM; ec->dev = dev; + i2c_set_clientdata(client, ec); ec->regmap = devm_regmap_init(dev, &t14s_ec_regmap_bus, ec, &t14s_ec_regmap_config); @@ -593,6 +606,30 @@ static int t14s_ec_probe(struct i2c_client *client) return 0; } +static int t14s_ec_suspend(struct device *dev) +{ + struct t14s_ec *ec = dev_get_drvdata(dev); + + led_classdev_suspend(&ec->kbd_backlight); + + t14s_ec_write_sequence(ec, T14S_EC_REG_MODERN_STANDBY, + T14S_EC_MODERN_STANDBY_ENTRY, 3); + + return 0; +} + +static int t14s_ec_resume(struct device *dev) +{ + struct t14s_ec *ec = dev_get_drvdata(dev); + + t14s_ec_write_sequence(ec, T14S_EC_REG_MODERN_STANDBY, + T14S_EC_MODERN_STANDBY_EXIT, 3); + + led_classdev_resume(&ec->kbd_backlight); + + return 0; +} + static const struct of_device_id t14s_ec_of_match[] = { { .compatible = "lenovo,thinkpad-t14s-ec" }, {} @@ -605,10 +642,15 @@ static const struct i2c_device_id t14s_ec_i2c_id_table[] = { }; MODULE_DEVICE_TABLE(i2c, t14s_ec_i2c_id_table); +static const struct dev_pm_ops t14s_ec_pm_ops = { + SYSTEM_SLEEP_PM_OPS(t14s_ec_suspend, t14s_ec_resume) +}; + static struct i2c_driver t14s_ec_i2c_driver = { .driver = { .name = "thinkpad-t14s-ec", .of_match_table = t14s_ec_of_match, + .pm = &t14s_ec_pm_ops, }, .probe = t14s_ec_probe, .id_table = t14s_ec_i2c_id_table, diff --git a/drivers/platform/surface/aggregator/core.c b/drivers/platform/surface/aggregator/core.c index c58e1fdd1a5f..c7e05f7bc199 100644 --- a/drivers/platform/surface/aggregator/core.c +++ b/drivers/platform/surface/aggregator/core.c @@ -676,7 +676,7 @@ static int ssam_serial_hub_probe(struct serdev_device *serdev) status = ssam_serdev_setup(ssh, serdev); if (status) { - status = dev_err_probe(dev, status, "failed to setup serdev\n"); + dev_err_probe(dev, status, "failed to setup serdev\n"); goto err_devinit; } diff --git a/drivers/platform/surface/aggregator/ssh_packet_layer.c b/drivers/platform/surface/aggregator/ssh_packet_layer.c index 6081b0146d5f..3dd22856570f 100644 --- a/drivers/platform/surface/aggregator/ssh_packet_layer.c +++ b/drivers/platform/surface/aggregator/ssh_packet_layer.c @@ -671,7 +671,7 @@ static void ssh_ptl_timeout_reaper_mod(struct ssh_ptl *ptl, ktime_t now, /* Re-adjust / schedule reaper only if it is above resolution delta. */ if (ktime_before(aexp, ptl->rtx_timeout.expires)) { ptl->rtx_timeout.expires = expires; - mod_delayed_work(system_wq, &ptl->rtx_timeout.reaper, delta); + mod_delayed_work(system_percpu_wq, &ptl->rtx_timeout.reaper, delta); } spin_unlock(&ptl->rtx_timeout.lock); diff --git a/drivers/platform/surface/aggregator/ssh_request_layer.c b/drivers/platform/surface/aggregator/ssh_request_layer.c index 879ca9ee7ff6..a356e4956562 100644 --- a/drivers/platform/surface/aggregator/ssh_request_layer.c +++ b/drivers/platform/surface/aggregator/ssh_request_layer.c @@ -434,7 +434,7 @@ static void ssh_rtl_timeout_reaper_mod(struct ssh_rtl *rtl, ktime_t now, /* Re-adjust / schedule reaper only if it is above resolution delta. */ if (ktime_before(aexp, rtl->rtx_timeout.expires)) { rtl->rtx_timeout.expires = expires; - mod_delayed_work(system_wq, &rtl->rtx_timeout.reaper, delta); + mod_delayed_work(system_percpu_wq, &rtl->rtx_timeout.reaper, delta); } spin_unlock(&rtl->rtx_timeout.lock); diff --git a/drivers/platform/surface/surface_acpi_notify.c b/drivers/platform/surface/surface_acpi_notify.c index 3b30cfe3466b..a9dcb0bbe90e 100644 --- a/drivers/platform/surface/surface_acpi_notify.c +++ b/drivers/platform/surface/surface_acpi_notify.c @@ -862,7 +862,7 @@ static int __init san_init(void) { int ret; - san_wq = alloc_workqueue("san_wq", 0, 0); + san_wq = alloc_workqueue("san_wq", WQ_PERCPU, 0); if (!san_wq) return -ENOMEM; ret = platform_driver_register(&surface_acpi_notify); diff --git a/drivers/platform/wmi/Kconfig b/drivers/platform/wmi/Kconfig new file mode 100644 index 000000000000..77fcbb18746b --- /dev/null +++ b/drivers/platform/wmi/Kconfig @@ -0,0 +1,34 @@ +# SPDX-License-Identifier: GPL-2.0-or-later +# +# ACPI WMI Core +# + +menuconfig ACPI_WMI + tristate "ACPI-WMI support" + depends on ACPI && X86 + help + This option enables support for the ACPI-WMI driver core. + + The ACPI-WMI interface is a proprietary extension of ACPI allowing + the platform firmware to expose WMI (Windows Management Instrumentation) + objects used for managing various aspects of the underlying system. + Mapping between ACPI control methods and WMI objects happens through + special mapper devices (PNP0C14) defined inside the ACPI tables. + + Enabling this option is necessary for building the vendor specific + ACPI-WMI client drivers for Acer, Dell an HP machines (among others). + + It is safe to enable this option even for machines that do not contain + any ACPI-WMI mapper devices at all. + +if ACPI_WMI + +config ACPI_WMI_LEGACY_DEVICE_NAMES + bool "Use legacy WMI device naming scheme" + help + Say Y here to force the WMI driver core to use the old WMI device naming + scheme when creating WMI devices. Doing so might be necessary for some + userspace applications but will cause the registration of WMI devices with + the same GUID to fail in some corner cases. + +endif # ACPI_WMI diff --git a/drivers/platform/wmi/Makefile b/drivers/platform/wmi/Makefile new file mode 100644 index 000000000000..98393d7391ec --- /dev/null +++ b/drivers/platform/wmi/Makefile @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: GPL-2.0-or-later +# +# Makefile for linux/drivers/platform/wmi +# ACPI WMI core +# + +wmi-y := core.o +obj-$(CONFIG_ACPI_WMI) += wmi.o diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/wmi/core.c index 4e86a422f05f..6878c4fcb0b5 100644 --- a/drivers/platform/x86/wmi.c +++ b/drivers/platform/wmi/core.c @@ -142,14 +142,6 @@ static inline void get_acpi_method_name(const struct wmi_block *wblock, buffer[4] = '\0'; } -static inline acpi_object_type get_param_acpi_type(const struct wmi_block *wblock) -{ - if (wblock->gblock.flags & ACPI_WMI_STRING) - return ACPI_TYPE_STRING; - else - return ACPI_TYPE_BUFFER; -} - static int wmidev_match_guid(struct device *dev, const void *data) { struct wmi_block *wblock = dev_to_wblock(dev); @@ -351,9 +343,16 @@ acpi_status wmidev_evaluate_method(struct wmi_device *wdev, u8 instance, u32 met params[0].integer.value = instance; params[1].type = ACPI_TYPE_INTEGER; params[1].integer.value = method_id; - params[2].type = get_param_acpi_type(wblock); - params[2].buffer.length = in->length; - params[2].buffer.pointer = in->pointer; + + if (wblock->gblock.flags & ACPI_WMI_STRING) { + params[2].type = ACPI_TYPE_STRING; + params[2].string.length = in->length; + params[2].string.pointer = in->pointer; + } else { + params[2].type = ACPI_TYPE_BUFFER; + params[2].buffer.length = in->length; + params[2].buffer.pointer = in->pointer; + } get_acpi_method_name(wblock, 'M', method); @@ -519,9 +518,16 @@ acpi_status wmidev_block_set(struct wmi_device *wdev, u8 instance, const struct input.pointer = params; params[0].type = ACPI_TYPE_INTEGER; params[0].integer.value = instance; - params[1].type = get_param_acpi_type(wblock); - params[1].buffer.length = in->length; - params[1].buffer.pointer = in->pointer; + + if (wblock->gblock.flags & ACPI_WMI_STRING) { + params[1].type = ACPI_TYPE_STRING; + params[1].string.length = in->length; + params[1].string.pointer = in->pointer; + } else { + params[1].type = ACPI_TYPE_BUFFER; + params[1].buffer.length = in->length; + params[1].buffer.pointer = in->pointer; + } get_acpi_method_name(wblock, 'S', method); diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig index c883a28e0916..4cb7d97a9fcc 100644 --- a/drivers/platform/x86/Kconfig +++ b/drivers/platform/x86/Kconfig @@ -16,36 +16,6 @@ menuconfig X86_PLATFORM_DEVICES if X86_PLATFORM_DEVICES -config ACPI_WMI - tristate "WMI" - depends on ACPI - help - This driver adds support for the ACPI-WMI (Windows Management - Instrumentation) mapper device (PNP0C14) found on some systems. - - ACPI-WMI is a proprietary extension to ACPI to expose parts of the - ACPI firmware to userspace - this is done through various vendor - defined methods and data blocks in a PNP0C14 device, which are then - made available for userspace to call. - - The implementation of this in Linux currently only exposes this to - other kernel space drivers. - - This driver is a required dependency to build the firmware specific - drivers needed on many machines, including Acer and HP laptops. - - It is safe to enable this driver even if your DSDT doesn't define - any ACPI-WMI devices. - -config ACPI_WMI_LEGACY_DEVICE_NAMES - bool "Use legacy WMI device naming scheme" - depends on ACPI_WMI - help - Say Y here to force the WMI driver core to use the old WMI device naming - scheme when creating WMI devices. Doing so might be necessary for some - userspace applications but will cause the registration of WMI devices with - the same GUID to fail in some corner cases. - config WMI_BMOF tristate "WMI embedded Binary MOF driver" depends on ACPI_WMI @@ -74,6 +44,8 @@ config HUAWEI_WMI To compile this driver as a module, choose M here: the module will be called huawei-wmi. +source "drivers/platform/x86/uniwill/Kconfig" + config UV_SYSFS tristate "Sysfs structure for UV systems" depends on X86_UV @@ -262,6 +234,18 @@ config ASUS_WIRELESS If you choose to compile this driver as a module the module will be called asus-wireless. +config ASUS_ARMOURY + tristate "ASUS Armoury driver" + depends on ASUS_WMI + select FW_ATTR_CLASS + help + Say Y here if you have a WMI aware Asus machine and would like to use the + firmware_attributes API to control various settings typically exposed in + the ASUS Armoury Crate application available on Windows. + + To compile this driver as a module, choose M here: the module will + be called asus-armoury. + config ASUS_WMI tristate "ASUS WMI Driver" depends on ACPI_WMI @@ -284,6 +268,17 @@ config ASUS_WMI To compile this driver as a module, choose M here: the module will be called asus-wmi. +config ASUS_WMI_DEPRECATED_ATTRS + bool "BIOS option support in WMI platform (DEPRECATED)" + depends on ASUS_WMI + default y + help + Say Y to expose the configurable BIOS options through the asus-wmi + driver. + + This can be used with or without the asus-armoury driver which + has the same attributes, but more, and better features. + config ASUS_NB_WMI tristate "Asus Notebook WMI Driver" depends on ASUS_WMI @@ -316,6 +311,19 @@ config ASUS_TF103C_DOCK If you have an Asus TF103C tablet say Y or M here, for a generic x86 distro config say M here. +config AYANEO_EC + tristate "Ayaneo EC platform control" + depends on DMI + depends on ACPI_EC + depends on ACPI_BATTERY + depends on HWMON + help + Enables support for the platform EC of Ayaneo devices. This + includes fan control, fan speed, charge limit, magic + module detection, and controller power control. + + If you have an Ayaneo device, say Y or M here. + config MERAKI_MX100 tristate "Cisco Meraki MX100 Platform Driver" depends on GPIOLIB @@ -1031,9 +1039,7 @@ config OXP_EC help Enables support for the platform EC of OneXPlayer and AOKZOE handheld devices. This includes fan speed, fan controls, and - disabling the default TDP behavior of the device. Due to legacy - reasons, this driver also provides hwmon functionality to Ayaneo - devices and the OrangePi Neo. + disabling the default TDP behavior of the device. source "drivers/platform/x86/tuxedo/Kconfig" diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile index c7db2a88c11a..d25762f7114f 100644 --- a/drivers/platform/x86/Makefile +++ b/drivers/platform/x86/Makefile @@ -5,7 +5,6 @@ # # Windows Management Interface -obj-$(CONFIG_ACPI_WMI) += wmi.o obj-$(CONFIG_WMI_BMOF) += wmi-bmof.o # WMI drivers @@ -33,12 +32,16 @@ obj-$(CONFIG_APPLE_GMUX) += apple-gmux.o # ASUS obj-$(CONFIG_ASUS_LAPTOP) += asus-laptop.o obj-$(CONFIG_ASUS_WIRELESS) += asus-wireless.o +obj-$(CONFIG_ASUS_ARMOURY) += asus-armoury.o obj-$(CONFIG_ASUS_WMI) += asus-wmi.o obj-$(CONFIG_ASUS_NB_WMI) += asus-nb-wmi.o obj-$(CONFIG_ASUS_TF103C_DOCK) += asus-tf103c-dock.o obj-$(CONFIG_EEEPC_LAPTOP) += eeepc-laptop.o obj-$(CONFIG_EEEPC_WMI) += eeepc-wmi.o +# Ayaneo +obj-$(CONFIG_AYANEO_EC) += ayaneo-ec.o + # Cisco/Meraki obj-$(CONFIG_MERAKI_MX100) += meraki-mx100.o @@ -110,6 +113,9 @@ obj-$(CONFIG_TOSHIBA_WMI) += toshiba-wmi.o # before toshiba_acpi initializes obj-$(CONFIG_ACPI_TOSHIBA) += toshiba_acpi.o +# Uniwill +obj-y += uniwill/ + # Inspur obj-$(CONFIG_INSPUR_PLATFORM_PROFILE) += inspur_platform_profile.o diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c index d848afc91f87..bf97381faf58 100644 --- a/drivers/platform/x86/acer-wmi.c +++ b/drivers/platform/x86/acer-wmi.c @@ -12,10 +12,12 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> +#include <linux/minmax.h> #include <linux/module.h> #include <linux/init.h> #include <linux/types.h> #include <linux/dmi.h> +#include <linux/fixp-arith.h> #include <linux/backlight.h> #include <linux/leds.h> #include <linux/platform_device.h> @@ -68,10 +70,27 @@ MODULE_LICENSE("GPL"); #define ACER_WMID_SET_GAMING_LED_METHODID 2 #define ACER_WMID_GET_GAMING_LED_METHODID 4 #define ACER_WMID_GET_GAMING_SYS_INFO_METHODID 5 -#define ACER_WMID_SET_GAMING_FAN_BEHAVIOR 14 +#define ACER_WMID_SET_GAMING_FAN_BEHAVIOR_METHODID 14 +#define ACER_WMID_GET_GAMING_FAN_BEHAVIOR_METHODID 15 +#define ACER_WMID_SET_GAMING_FAN_SPEED_METHODID 16 +#define ACER_WMID_GET_GAMING_FAN_SPEED_METHODID 17 #define ACER_WMID_SET_GAMING_MISC_SETTING_METHODID 22 #define ACER_WMID_GET_GAMING_MISC_SETTING_METHODID 23 +#define ACER_GAMING_FAN_BEHAVIOR_CPU BIT(0) +#define ACER_GAMING_FAN_BEHAVIOR_GPU BIT(3) + +#define ACER_GAMING_FAN_BEHAVIOR_STATUS_MASK GENMASK_ULL(7, 0) +#define ACER_GAMING_FAN_BEHAVIOR_ID_MASK GENMASK_ULL(15, 0) +#define ACER_GAMING_FAN_BEHAVIOR_SET_CPU_MODE_MASK GENMASK(17, 16) +#define ACER_GAMING_FAN_BEHAVIOR_SET_GPU_MODE_MASK GENMASK(23, 22) +#define ACER_GAMING_FAN_BEHAVIOR_GET_CPU_MODE_MASK GENMASK(9, 8) +#define ACER_GAMING_FAN_BEHAVIOR_GET_GPU_MODE_MASK GENMASK(15, 14) + +#define ACER_GAMING_FAN_SPEED_STATUS_MASK GENMASK_ULL(7, 0) +#define ACER_GAMING_FAN_SPEED_ID_MASK GENMASK_ULL(7, 0) +#define ACER_GAMING_FAN_SPEED_VALUE_MASK GENMASK_ULL(15, 8) + #define ACER_GAMING_MISC_SETTING_STATUS_MASK GENMASK_ULL(7, 0) #define ACER_GAMING_MISC_SETTING_INDEX_MASK GENMASK_ULL(7, 0) #define ACER_GAMING_MISC_SETTING_VALUE_MASK GENMASK_ULL(15, 8) @@ -122,6 +141,17 @@ enum acer_wmi_predator_v4_sensor_id { ACER_WMID_SENSOR_GPU_TEMPERATURE = 0x0A, }; +enum acer_wmi_gaming_fan_id { + ACER_WMID_CPU_FAN = 0x01, + ACER_WMID_GPU_FAN = 0x04, +}; + +enum acer_wmi_gaming_fan_mode { + ACER_WMID_FAN_MODE_AUTO = 0x01, + ACER_WMID_FAN_MODE_TURBO = 0x02, + ACER_WMID_FAN_MODE_CUSTOM = 0x03, +}; + enum acer_wmi_predator_v4_oc { ACER_WMID_OC_NORMAL = 0x0000, ACER_WMID_OC_TURBO = 0x0002, @@ -279,6 +309,7 @@ struct hotkey_function_type_aa { #define ACER_CAP_TURBO_FAN BIT(9) #define ACER_CAP_PLATFORM_PROFILE BIT(10) #define ACER_CAP_HWMON BIT(11) +#define ACER_CAP_PWM BIT(12) /* * Interface type flags @@ -373,6 +404,7 @@ struct quirk_entry { u8 cpu_fans; u8 gpu_fans; u8 predator_v4; + u8 pwm; }; static struct quirk_entry *quirks; @@ -392,6 +424,9 @@ static void __init set_quirks(void) if (quirks->predator_v4) interface->capability |= ACER_CAP_PLATFORM_PROFILE | ACER_CAP_HWMON; + + if (quirks->pwm) + interface->capability |= ACER_CAP_PWM; } static int __init dmi_matched(const struct dmi_system_id *dmi) @@ -431,6 +466,7 @@ static struct quirk_entry quirk_acer_predator_ph16_72 = { .cpu_fans = 1, .gpu_fans = 1, .predator_v4 = 1, + .pwm = 1, }; static struct quirk_entry quirk_acer_predator_pt14_51 = { @@ -438,6 +474,7 @@ static struct quirk_entry quirk_acer_predator_pt14_51 = { .cpu_fans = 1, .gpu_fans = 1, .predator_v4 = 1, + .pwm = 1, }; static struct quirk_entry quirk_acer_predator_v4 = { @@ -658,6 +695,15 @@ static const struct dmi_system_id acer_quirks[] __initconst = { }, { .callback = dmi_matched, + .ident = "Acer Predator Helios Neo 16", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "Predator PHN16-72"), + }, + .driver_data = &quirk_acer_predator_ph16_72, + }, + { + .callback = dmi_matched, .ident = "Acer Predator PH18-71", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Acer"), @@ -1564,9 +1610,6 @@ static acpi_status WMID_gaming_set_u64(u64 value, u32 cap) case ACER_CAP_TURBO_LED: method_id = ACER_WMID_SET_GAMING_LED_METHODID; break; - case ACER_CAP_TURBO_FAN: - method_id = ACER_WMID_SET_GAMING_FAN_BEHAVIOR; - break; default: return AE_BAD_PARAMETER; } @@ -1617,25 +1660,125 @@ static int WMID_gaming_get_sys_info(u32 command, u64 *out) return 0; } -static void WMID_gaming_set_fan_mode(u8 fan_mode) +static int WMID_gaming_set_fan_behavior(u16 fan_bitmap, enum acer_wmi_gaming_fan_mode mode) { - /* fan_mode = 1 is used for auto, fan_mode = 2 used for turbo*/ - u64 gpu_fan_config1 = 0, gpu_fan_config2 = 0; - int i; + acpi_status status; + u64 input = 0; + u64 result; + + input |= FIELD_PREP(ACER_GAMING_FAN_BEHAVIOR_ID_MASK, fan_bitmap); + + if (fan_bitmap & ACER_GAMING_FAN_BEHAVIOR_CPU) + input |= FIELD_PREP(ACER_GAMING_FAN_BEHAVIOR_SET_CPU_MODE_MASK, mode); + + if (fan_bitmap & ACER_GAMING_FAN_BEHAVIOR_GPU) + input |= FIELD_PREP(ACER_GAMING_FAN_BEHAVIOR_SET_GPU_MODE_MASK, mode); + + status = WMI_gaming_execute_u64(ACER_WMID_SET_GAMING_FAN_BEHAVIOR_METHODID, input, + &result); + if (ACPI_FAILURE(status)) + return -EIO; + + /* The return status must be zero for the operation to have succeeded */ + if (FIELD_GET(ACER_GAMING_FAN_BEHAVIOR_STATUS_MASK, result)) + return -EIO; + + return 0; +} + +static int WMID_gaming_get_fan_behavior(u16 fan_bitmap, enum acer_wmi_gaming_fan_mode *mode) +{ + acpi_status status; + u32 input = 0; + u64 result; + int value; + + input |= FIELD_PREP(ACER_GAMING_FAN_BEHAVIOR_ID_MASK, fan_bitmap); + status = WMI_gaming_execute_u32_u64(ACER_WMID_GET_GAMING_FAN_BEHAVIOR_METHODID, input, + &result); + if (ACPI_FAILURE(status)) + return -EIO; + + /* The return status must be zero for the operation to have succeeded */ + if (FIELD_GET(ACER_GAMING_FAN_BEHAVIOR_STATUS_MASK, result)) + return -EIO; + + /* Theoretically multiple fans can be specified, but this is currently unused */ + if (fan_bitmap & ACER_GAMING_FAN_BEHAVIOR_CPU) + value = FIELD_GET(ACER_GAMING_FAN_BEHAVIOR_GET_CPU_MODE_MASK, result); + else if (fan_bitmap & ACER_GAMING_FAN_BEHAVIOR_GPU) + value = FIELD_GET(ACER_GAMING_FAN_BEHAVIOR_GET_GPU_MODE_MASK, result); + else + return -EINVAL; + + if (value < ACER_WMID_FAN_MODE_AUTO || value > ACER_WMID_FAN_MODE_CUSTOM) + return -ENXIO; + + *mode = value; + + return 0; +} + +static void WMID_gaming_set_fan_mode(enum acer_wmi_gaming_fan_mode mode) +{ + u16 fan_bitmap = 0; if (quirks->cpu_fans > 0) - gpu_fan_config2 |= 1; - for (i = 0; i < (quirks->cpu_fans + quirks->gpu_fans); ++i) - gpu_fan_config2 |= 1 << (i + 1); - for (i = 0; i < quirks->gpu_fans; ++i) - gpu_fan_config2 |= 1 << (i + 3); - if (quirks->cpu_fans > 0) - gpu_fan_config1 |= fan_mode; - for (i = 0; i < (quirks->cpu_fans + quirks->gpu_fans); ++i) - gpu_fan_config1 |= fan_mode << (2 * i + 2); - for (i = 0; i < quirks->gpu_fans; ++i) - gpu_fan_config1 |= fan_mode << (2 * i + 6); - WMID_gaming_set_u64(gpu_fan_config2 | gpu_fan_config1 << 16, ACER_CAP_TURBO_FAN); + fan_bitmap |= ACER_GAMING_FAN_BEHAVIOR_CPU; + + if (quirks->gpu_fans > 0) + fan_bitmap |= ACER_GAMING_FAN_BEHAVIOR_GPU; + + WMID_gaming_set_fan_behavior(fan_bitmap, mode); +} + +static int WMID_gaming_set_gaming_fan_speed(u8 fan, u8 speed) +{ + acpi_status status; + u64 input = 0; + u64 result; + + if (speed > 100) + return -EINVAL; + + input |= FIELD_PREP(ACER_GAMING_FAN_SPEED_ID_MASK, fan); + input |= FIELD_PREP(ACER_GAMING_FAN_SPEED_VALUE_MASK, speed); + + status = WMI_gaming_execute_u64(ACER_WMID_SET_GAMING_FAN_SPEED_METHODID, input, &result); + if (ACPI_FAILURE(status)) + return -EIO; + + switch (FIELD_GET(ACER_GAMING_FAN_SPEED_STATUS_MASK, result)) { + case 0x00: + return 0; + case 0x01: + return -ENODEV; + case 0x02: + return -EINVAL; + default: + return -ENXIO; + } +} + +static int WMID_gaming_get_gaming_fan_speed(u8 fan, u8 *speed) +{ + acpi_status status; + u32 input = 0; + u64 result; + + input |= FIELD_PREP(ACER_GAMING_FAN_SPEED_ID_MASK, fan); + + status = WMI_gaming_execute_u32_u64(ACER_WMID_GET_GAMING_FAN_SPEED_METHODID, input, + &result); + if (ACPI_FAILURE(status)) + return -EIO; + + if (FIELD_GET(ACER_GAMING_FAN_SPEED_STATUS_MASK, result)) + return -ENODEV; + + *speed = FIELD_GET(ACER_GAMING_FAN_SPEED_VALUE_MASK, result); + + return 0; } static int WMID_gaming_set_misc_setting(enum acer_wmi_gaming_misc_setting setting, u8 value) @@ -1922,7 +2065,7 @@ static int acer_toggle_turbo(void) WMID_gaming_set_u64(0x1, ACER_CAP_TURBO_LED); /* Set FAN mode to auto */ - WMID_gaming_set_fan_mode(0x1); + WMID_gaming_set_fan_mode(ACER_WMID_FAN_MODE_AUTO); /* Set OC to normal */ if (has_cap(ACER_CAP_TURBO_OC)) { @@ -1936,7 +2079,7 @@ static int acer_toggle_turbo(void) WMID_gaming_set_u64(0x10001, ACER_CAP_TURBO_LED); /* Set FAN mode to turbo */ - WMID_gaming_set_fan_mode(0x2); + WMID_gaming_set_fan_mode(ACER_WMID_FAN_MODE_TURBO); /* Set OC to turbo mode */ if (has_cap(ACER_CAP_TURBO_OC)) { @@ -2767,6 +2910,16 @@ static const enum acer_wmi_predator_v4_sensor_id acer_wmi_fan_channel_to_sensor_ [1] = ACER_WMID_SENSOR_GPU_FAN_SPEED, }; +static const enum acer_wmi_gaming_fan_id acer_wmi_fan_channel_to_fan_id[] = { + [0] = ACER_WMID_CPU_FAN, + [1] = ACER_WMID_GPU_FAN, +}; + +static const u16 acer_wmi_fan_channel_to_fan_bitmap[] = { + [0] = ACER_GAMING_FAN_BEHAVIOR_CPU, + [1] = ACER_GAMING_FAN_BEHAVIOR_GPU, +}; + static umode_t acer_wmi_hwmon_is_visible(const void *data, enum hwmon_sensor_types type, u32 attr, int channel) @@ -2778,6 +2931,11 @@ static umode_t acer_wmi_hwmon_is_visible(const void *data, case hwmon_temp: sensor_id = acer_wmi_temp_channel_to_sensor_id[channel]; break; + case hwmon_pwm: + if (!has_cap(ACER_CAP_PWM)) + return 0; + + fallthrough; case hwmon_fan: sensor_id = acer_wmi_fan_channel_to_sensor_id[channel]; break; @@ -2785,8 +2943,12 @@ static umode_t acer_wmi_hwmon_is_visible(const void *data, return 0; } - if (*supported_sensors & BIT(sensor_id - 1)) + if (*supported_sensors & BIT(sensor_id - 1)) { + if (type == hwmon_pwm) + return 0644; + return 0444; + } return 0; } @@ -2795,6 +2957,9 @@ static int acer_wmi_hwmon_read(struct device *dev, enum hwmon_sensor_types type, u32 attr, int channel, long *val) { u64 command = ACER_WMID_CMD_GET_PREDATOR_V4_SENSOR_READING; + enum acer_wmi_gaming_fan_mode mode; + u16 fan_bitmap; + u8 fan, speed; u64 result; int ret; @@ -2820,6 +2985,80 @@ static int acer_wmi_hwmon_read(struct device *dev, enum hwmon_sensor_types type, *val = FIELD_GET(ACER_PREDATOR_V4_SENSOR_READING_BIT_MASK, result); return 0; + case hwmon_pwm: + switch (attr) { + case hwmon_pwm_input: + fan = acer_wmi_fan_channel_to_fan_id[channel]; + ret = WMID_gaming_get_gaming_fan_speed(fan, &speed); + if (ret < 0) + return ret; + + *val = fixp_linear_interpolate(0, 0, 100, U8_MAX, speed); + return 0; + case hwmon_pwm_enable: + fan_bitmap = acer_wmi_fan_channel_to_fan_bitmap[channel]; + ret = WMID_gaming_get_fan_behavior(fan_bitmap, &mode); + if (ret < 0) + return ret; + + switch (mode) { + case ACER_WMID_FAN_MODE_AUTO: + *val = 2; + return 0; + case ACER_WMID_FAN_MODE_TURBO: + *val = 0; + return 0; + case ACER_WMID_FAN_MODE_CUSTOM: + *val = 1; + return 0; + default: + return -ENXIO; + } + default: + return -EOPNOTSUPP; + } + default: + return -EOPNOTSUPP; + } +} + +static int acer_wmi_hwmon_write(struct device *dev, enum hwmon_sensor_types type, + u32 attr, int channel, long val) +{ + enum acer_wmi_gaming_fan_mode mode; + u16 fan_bitmap; + u8 fan, speed; + + switch (type) { + case hwmon_pwm: + switch (attr) { + case hwmon_pwm_input: + fan = acer_wmi_fan_channel_to_fan_id[channel]; + speed = fixp_linear_interpolate(0, 0, U8_MAX, 100, + clamp_val(val, 0, U8_MAX)); + + return WMID_gaming_set_gaming_fan_speed(fan, speed); + case hwmon_pwm_enable: + fan_bitmap = acer_wmi_fan_channel_to_fan_bitmap[channel]; + + switch (val) { + case 0: + mode = ACER_WMID_FAN_MODE_TURBO; + break; + case 1: + mode = ACER_WMID_FAN_MODE_CUSTOM; + break; + case 2: + mode = ACER_WMID_FAN_MODE_AUTO; + break; + default: + return -EINVAL; + } + + return WMID_gaming_set_fan_behavior(fan_bitmap, mode); + default: + return -EOPNOTSUPP; + } default: return -EOPNOTSUPP; } @@ -2835,11 +3074,16 @@ static const struct hwmon_channel_info *const acer_wmi_hwmon_info[] = { HWMON_F_INPUT, HWMON_F_INPUT ), + HWMON_CHANNEL_INFO(pwm, + HWMON_PWM_INPUT | HWMON_PWM_ENABLE, + HWMON_PWM_INPUT | HWMON_PWM_ENABLE + ), NULL }; static const struct hwmon_ops acer_wmi_hwmon_ops = { .read = acer_wmi_hwmon_read, + .write = acer_wmi_hwmon_write, .is_visible = acer_wmi_hwmon_is_visible, }; diff --git a/drivers/platform/x86/amd/hfi/hfi.c b/drivers/platform/x86/amd/hfi/hfi.c index a465ac6f607e..83863a5e0fbc 100644 --- a/drivers/platform/x86/amd/hfi/hfi.c +++ b/drivers/platform/x86/amd/hfi/hfi.c @@ -12,7 +12,6 @@ #include <linux/acpi.h> #include <linux/cpu.h> -#include <linux/cpumask.h> #include <linux/debugfs.h> #include <linux/gfp.h> #include <linux/init.h> @@ -95,7 +94,6 @@ struct amd_hfi_classes { * struct amd_hfi_cpuinfo - HFI workload class info per CPU * @cpu: CPU index * @apic_id: APIC id of the current CPU - * @cpus: mask of CPUs associated with amd_hfi_cpuinfo * @class_index: workload class ID index * @nr_class: max number of workload class supported * @ipcc_scores: ipcc scores for each class @@ -106,7 +104,6 @@ struct amd_hfi_classes { struct amd_hfi_cpuinfo { int cpu; u32 apic_id; - cpumask_var_t cpus; s16 class_index; u8 nr_class; int *ipcc_scores; @@ -295,11 +292,6 @@ static int amd_hfi_online(unsigned int cpu) guard(mutex)(&hfi_cpuinfo_lock); - if (!zalloc_cpumask_var(&hfi_info->cpus, GFP_KERNEL)) - return -ENOMEM; - - cpumask_set_cpu(cpu, hfi_info->cpus); - ret = amd_hfi_set_state(cpu, true); if (ret) pr_err("WCT enable failed for CPU %u\n", cpu); @@ -329,8 +321,6 @@ static int amd_hfi_offline(unsigned int cpu) if (ret) pr_err("WCT disable failed for CPU %u\n", cpu); - free_cpumask_var(hfi_info->cpus); - return ret; } @@ -515,7 +505,6 @@ static int amd_hfi_probe(struct platform_device *pdev) static struct platform_driver amd_hfi_driver = { .driver = { .name = AMD_HFI_DRIVER, - .owner = THIS_MODULE, .pm = &amd_hfi_pm_ops, .acpi_match_table = ACPI_PTR(amd_hfi_platform_match), }, diff --git a/drivers/platform/x86/amd/hsmp/acpi.c b/drivers/platform/x86/amd/hsmp/acpi.c index d0b74d243ce4..97ed71593bdf 100644 --- a/drivers/platform/x86/amd/hsmp/acpi.c +++ b/drivers/platform/x86/amd/hsmp/acpi.c @@ -22,12 +22,11 @@ #include <linux/module.h> #include <linux/platform_device.h> #include <linux/sysfs.h> +#include <linux/topology.h> #include <linux/uuid.h> #include <uapi/asm-generic/errno-base.h> -#include <asm/amd/node.h> - #include "hsmp.h" #define DRIVER_NAME "hsmp_acpi" @@ -586,9 +585,9 @@ static int hsmp_acpi_probe(struct platform_device *pdev) return -ENOMEM; if (!hsmp_pdev->is_probed) { - hsmp_pdev->num_sockets = amd_num_nodes(); - if (hsmp_pdev->num_sockets == 0 || hsmp_pdev->num_sockets > MAX_AMD_NUM_NODES) { - dev_err(&pdev->dev, "Wrong number of sockets\n"); + hsmp_pdev->num_sockets = topology_max_packages(); + if (!hsmp_pdev->num_sockets) { + dev_err(&pdev->dev, "No CPU sockets detected\n"); return -ENODEV; } diff --git a/drivers/platform/x86/amd/pmf/auto-mode.c b/drivers/platform/x86/amd/pmf/auto-mode.c index a184922bba8d..faf15a8f74bb 100644 --- a/drivers/platform/x86/amd/pmf/auto-mode.c +++ b/drivers/platform/x86/amd/pmf/auto-mode.c @@ -114,14 +114,14 @@ static void amd_pmf_set_automode(struct amd_pmf_dev *dev, int idx, { struct power_table_control *pwr_ctrl = &config_store.mode_set[idx].power_control; - amd_pmf_send_cmd(dev, SET_SPL, false, pwr_ctrl->spl, NULL); - amd_pmf_send_cmd(dev, SET_FPPT, false, pwr_ctrl->fppt, NULL); - amd_pmf_send_cmd(dev, SET_SPPT, false, pwr_ctrl->sppt, NULL); - amd_pmf_send_cmd(dev, SET_SPPT_APU_ONLY, false, pwr_ctrl->sppt_apu_only, NULL); - amd_pmf_send_cmd(dev, SET_STT_MIN_LIMIT, false, pwr_ctrl->stt_min, NULL); - amd_pmf_send_cmd(dev, SET_STT_LIMIT_APU, false, + amd_pmf_send_cmd(dev, SET_SPL, SET_CMD, pwr_ctrl->spl, NULL); + amd_pmf_send_cmd(dev, SET_FPPT, SET_CMD, pwr_ctrl->fppt, NULL); + amd_pmf_send_cmd(dev, SET_SPPT, SET_CMD, pwr_ctrl->sppt, NULL); + amd_pmf_send_cmd(dev, SET_SPPT_APU_ONLY, SET_CMD, pwr_ctrl->sppt_apu_only, NULL); + amd_pmf_send_cmd(dev, SET_STT_MIN_LIMIT, SET_CMD, pwr_ctrl->stt_min, NULL); + amd_pmf_send_cmd(dev, SET_STT_LIMIT_APU, SET_CMD, fixp_q88_fromint(pwr_ctrl->stt_skin_temp[STT_TEMP_APU]), NULL); - amd_pmf_send_cmd(dev, SET_STT_LIMIT_HS2, false, + amd_pmf_send_cmd(dev, SET_STT_LIMIT_HS2, SET_CMD, fixp_q88_fromint(pwr_ctrl->stt_skin_temp[STT_TEMP_HS2]), NULL); if (is_apmf_func_supported(dev, APMF_FUNC_SET_FAN_IDX)) diff --git a/drivers/platform/x86/amd/pmf/cnqf.c b/drivers/platform/x86/amd/pmf/cnqf.c index 207a0b33d8d3..5469fefb6001 100644 --- a/drivers/platform/x86/amd/pmf/cnqf.c +++ b/drivers/platform/x86/amd/pmf/cnqf.c @@ -76,14 +76,14 @@ static int amd_pmf_set_cnqf(struct amd_pmf_dev *dev, int src, int idx, pc = &config_store.mode_set[src][idx].power_control; - amd_pmf_send_cmd(dev, SET_SPL, false, pc->spl, NULL); - amd_pmf_send_cmd(dev, SET_FPPT, false, pc->fppt, NULL); - amd_pmf_send_cmd(dev, SET_SPPT, false, pc->sppt, NULL); - amd_pmf_send_cmd(dev, SET_SPPT_APU_ONLY, false, pc->sppt_apu_only, NULL); - amd_pmf_send_cmd(dev, SET_STT_MIN_LIMIT, false, pc->stt_min, NULL); - amd_pmf_send_cmd(dev, SET_STT_LIMIT_APU, false, + amd_pmf_send_cmd(dev, SET_SPL, SET_CMD, pc->spl, NULL); + amd_pmf_send_cmd(dev, SET_FPPT, SET_CMD, pc->fppt, NULL); + amd_pmf_send_cmd(dev, SET_SPPT, SET_CMD, pc->sppt, NULL); + amd_pmf_send_cmd(dev, SET_SPPT_APU_ONLY, SET_CMD, pc->sppt_apu_only, NULL); + amd_pmf_send_cmd(dev, SET_STT_MIN_LIMIT, SET_CMD, pc->stt_min, NULL); + amd_pmf_send_cmd(dev, SET_STT_LIMIT_APU, SET_CMD, fixp_q88_fromint(pc->stt_skin_temp[STT_TEMP_APU]), NULL); - amd_pmf_send_cmd(dev, SET_STT_LIMIT_HS2, false, + amd_pmf_send_cmd(dev, SET_STT_LIMIT_HS2, SET_CMD, fixp_q88_fromint(pc->stt_skin_temp[STT_TEMP_HS2]), NULL); if (is_apmf_func_supported(dev, APMF_FUNC_SET_FAN_IDX)) diff --git a/drivers/platform/x86/amd/pmf/core.c b/drivers/platform/x86/amd/pmf/core.c index bc544a4a5266..8fc293c9c538 100644 --- a/drivers/platform/x86/amd/pmf/core.c +++ b/drivers/platform/x86/amd/pmf/core.c @@ -131,7 +131,7 @@ static void amd_pmf_get_metrics(struct work_struct *work) /* Transfer table contents */ memset(dev->buf, 0, sizeof(dev->m_table)); - amd_pmf_send_cmd(dev, SET_TRANSFER_TABLE, 0, 7, NULL); + amd_pmf_send_cmd(dev, SET_TRANSFER_TABLE, SET_CMD, METRICS_TABLE_ID, NULL); memcpy(&dev->m_table, dev->buf, sizeof(dev->m_table)); time_elapsed_ms = ktime_to_ms(ktime_get()) - dev->start_time; @@ -289,8 +289,8 @@ int amd_pmf_set_dram_addr(struct amd_pmf_dev *dev, bool alloc_buffer) hi = phys_addr >> 32; low = phys_addr & GENMASK(31, 0); - amd_pmf_send_cmd(dev, SET_DRAM_ADDR_HIGH, 0, hi, NULL); - amd_pmf_send_cmd(dev, SET_DRAM_ADDR_LOW, 0, low, NULL); + amd_pmf_send_cmd(dev, SET_DRAM_ADDR_HIGH, SET_CMD, hi, NULL); + amd_pmf_send_cmd(dev, SET_DRAM_ADDR_LOW, SET_CMD, low, NULL); return 0; } @@ -465,9 +465,17 @@ static int amd_pmf_probe(struct platform_device *pdev) if (!dev->regbase) return -ENOMEM; - mutex_init(&dev->lock); - mutex_init(&dev->update_mutex); - mutex_init(&dev->cb_mutex); + err = devm_mutex_init(dev->dev, &dev->lock); + if (err) + return err; + + err = devm_mutex_init(dev->dev, &dev->update_mutex); + if (err) + return err; + + err = devm_mutex_init(dev->dev, &dev->cb_mutex); + if (err) + return err; apmf_acpi_init(dev); platform_set_drvdata(pdev, dev); @@ -491,9 +499,6 @@ static void amd_pmf_remove(struct platform_device *pdev) amd_pmf_notify_sbios_heartbeat_event_v2(dev, ON_UNLOAD); apmf_acpi_deinit(dev); amd_pmf_dbgfs_unregister(dev); - mutex_destroy(&dev->lock); - mutex_destroy(&dev->update_mutex); - mutex_destroy(&dev->cb_mutex); } static const struct attribute_group *amd_pmf_driver_groups[] = { diff --git a/drivers/platform/x86/amd/pmf/pmf.h b/drivers/platform/x86/amd/pmf/pmf.h index bd19f2a6bc78..9144c8c3bbaf 100644 --- a/drivers/platform/x86/amd/pmf/pmf.h +++ b/drivers/platform/x86/amd/pmf/pmf.h @@ -119,6 +119,13 @@ struct cookie_header { #define APTS_MAX_STATES 16 #define CUSTOM_BIOS_INPUT_BITS GENMASK(16, 7) +#define BIOS_INPUTS_MAX 10 + +/* amd_pmf_send_cmd() set/get */ +#define SET_CMD false +#define GET_CMD true + +#define METRICS_TABLE_ID 7 typedef void (*apmf_event_handler_t)(acpi_handle handle, u32 event, void *data); @@ -204,7 +211,7 @@ struct apmf_sbios_req_v1 { u8 skin_temp_apu; u8 skin_temp_hs2; u8 enable_cnqf; - u32 custom_policy[10]; + u32 custom_policy[BIOS_INPUTS_MAX]; } __packed; struct apmf_sbios_req_v2 { @@ -216,7 +223,7 @@ struct apmf_sbios_req_v2 { u32 stt_min_limit; u8 skin_temp_apu; u8 skin_temp_hs2; - u32 custom_policy[10]; + u32 custom_policy[BIOS_INPUTS_MAX]; } __packed; struct apmf_fan_idx { @@ -243,12 +250,12 @@ struct smu_pmf_metrics_v2 { u16 vclk_freq; /* MHz */ u16 vcn_activity; /* VCN busy % [0-100] */ u16 vpeclk_freq; /* MHz */ - u16 ipuclk_freq; /* MHz */ - u16 ipu_busy[8]; /* NPU busy % [0-100] */ + u16 npuclk_freq; /* MHz */ + u16 npu_busy[8]; /* NPU busy % [0-100] */ u16 dram_reads; /* MB/sec */ u16 dram_writes; /* MB/sec */ u16 core_c0residency[16]; /* C0 residency % [0-100] */ - u16 ipu_power; /* mW */ + u16 npu_power; /* mW */ u32 apu_power; /* mW */ u32 gfx_power; /* mW */ u32 dgpu_power; /* mW */ @@ -257,9 +264,9 @@ struct smu_pmf_metrics_v2 { u32 filter_alpha_value; /* time constant [us] */ u32 metrics_counter; u16 memclk_freq; /* MHz */ - u16 mpipuclk_freq; /* MHz */ - u16 ipu_reads; /* MB/sec */ - u16 ipu_writes; /* MB/sec */ + u16 mpnpuclk_freq; /* MHz */ + u16 npu_reads; /* MB/sec */ + u16 npu_writes; /* MB/sec */ u32 throttle_residency_prochot; u32 throttle_residency_spl; u32 throttle_residency_fppt; @@ -355,7 +362,7 @@ enum power_modes_v2 { }; struct pmf_bios_inputs_prev { - u32 custom_bios_inputs[10]; + u32 custom_bios_inputs[BIOS_INPUTS_MAX]; }; struct amd_pmf_dev { @@ -451,7 +458,7 @@ struct os_power_slider { struct amd_pmf_notify_smart_pc_update { u16 size; u32 pending_req; - u32 custom_bios[10]; + u32 custom_bios[BIOS_INPUTS_MAX]; } __packed; struct fan_table_control { diff --git a/drivers/platform/x86/amd/pmf/spc.c b/drivers/platform/x86/amd/pmf/spc.c index 85192c7536b8..0a37dc6a7950 100644 --- a/drivers/platform/x86/amd/pmf/spc.c +++ b/drivers/platform/x86/amd/pmf/spc.c @@ -202,7 +202,7 @@ static void amd_pmf_get_smu_info(struct amd_pmf_dev *dev, struct ta_pmf_enact_ta { /* Get the updated metrics table data */ memset(dev->buf, 0, dev->mtable_size); - amd_pmf_send_cmd(dev, SET_TRANSFER_TABLE, 0, 7, NULL); + amd_pmf_send_cmd(dev, SET_TRANSFER_TABLE, SET_CMD, METRICS_TABLE_ID, NULL); switch (dev->cpu_id) { case AMD_CPU_ID_PS: diff --git a/drivers/platform/x86/amd/pmf/sps.c b/drivers/platform/x86/amd/pmf/sps.c index c28f3c5744c2..0b70a5153f46 100644 --- a/drivers/platform/x86/amd/pmf/sps.c +++ b/drivers/platform/x86/amd/pmf/sps.c @@ -192,15 +192,15 @@ static void amd_pmf_load_defaults_sps(struct amd_pmf_dev *dev) static void amd_pmf_update_slider_v2(struct amd_pmf_dev *dev, int idx) { - amd_pmf_send_cmd(dev, SET_PMF_PPT, false, apts_config_store.val[idx].pmf_ppt, NULL); - amd_pmf_send_cmd(dev, SET_PMF_PPT_APU_ONLY, false, + amd_pmf_send_cmd(dev, SET_PMF_PPT, SET_CMD, apts_config_store.val[idx].pmf_ppt, NULL); + amd_pmf_send_cmd(dev, SET_PMF_PPT_APU_ONLY, SET_CMD, apts_config_store.val[idx].ppt_pmf_apu_only, NULL); - amd_pmf_send_cmd(dev, SET_STT_MIN_LIMIT, false, + amd_pmf_send_cmd(dev, SET_STT_MIN_LIMIT, SET_CMD, apts_config_store.val[idx].stt_min_limit, NULL); - amd_pmf_send_cmd(dev, SET_STT_LIMIT_APU, false, + amd_pmf_send_cmd(dev, SET_STT_LIMIT_APU, SET_CMD, fixp_q88_fromint(apts_config_store.val[idx].stt_skin_temp_limit_apu), NULL); - amd_pmf_send_cmd(dev, SET_STT_LIMIT_HS2, false, + amd_pmf_send_cmd(dev, SET_STT_LIMIT_HS2, SET_CMD, fixp_q88_fromint(apts_config_store.val[idx].stt_skin_temp_limit_hs2), NULL); } @@ -211,30 +211,30 @@ void amd_pmf_update_slider(struct amd_pmf_dev *dev, bool op, int idx, int src = amd_pmf_get_power_source(); if (op == SLIDER_OP_SET) { - amd_pmf_send_cmd(dev, SET_SPL, false, config_store.prop[src][idx].spl, NULL); - amd_pmf_send_cmd(dev, SET_FPPT, false, config_store.prop[src][idx].fppt, NULL); - amd_pmf_send_cmd(dev, SET_SPPT, false, config_store.prop[src][idx].sppt, NULL); - amd_pmf_send_cmd(dev, SET_SPPT_APU_ONLY, false, + amd_pmf_send_cmd(dev, SET_SPL, SET_CMD, config_store.prop[src][idx].spl, NULL); + amd_pmf_send_cmd(dev, SET_FPPT, SET_CMD, config_store.prop[src][idx].fppt, NULL); + amd_pmf_send_cmd(dev, SET_SPPT, SET_CMD, config_store.prop[src][idx].sppt, NULL); + amd_pmf_send_cmd(dev, SET_SPPT_APU_ONLY, SET_CMD, config_store.prop[src][idx].sppt_apu_only, NULL); - amd_pmf_send_cmd(dev, SET_STT_MIN_LIMIT, false, + amd_pmf_send_cmd(dev, SET_STT_MIN_LIMIT, SET_CMD, config_store.prop[src][idx].stt_min, NULL); - amd_pmf_send_cmd(dev, SET_STT_LIMIT_APU, false, + amd_pmf_send_cmd(dev, SET_STT_LIMIT_APU, SET_CMD, fixp_q88_fromint(config_store.prop[src][idx].stt_skin_temp[STT_TEMP_APU]), NULL); - amd_pmf_send_cmd(dev, SET_STT_LIMIT_HS2, false, + amd_pmf_send_cmd(dev, SET_STT_LIMIT_HS2, SET_CMD, fixp_q88_fromint(config_store.prop[src][idx].stt_skin_temp[STT_TEMP_HS2]), NULL); } else if (op == SLIDER_OP_GET) { - amd_pmf_send_cmd(dev, GET_SPL, true, ARG_NONE, &table->prop[src][idx].spl); - amd_pmf_send_cmd(dev, GET_FPPT, true, ARG_NONE, &table->prop[src][idx].fppt); - amd_pmf_send_cmd(dev, GET_SPPT, true, ARG_NONE, &table->prop[src][idx].sppt); - amd_pmf_send_cmd(dev, GET_SPPT_APU_ONLY, true, ARG_NONE, + amd_pmf_send_cmd(dev, GET_SPL, GET_CMD, ARG_NONE, &table->prop[src][idx].spl); + amd_pmf_send_cmd(dev, GET_FPPT, GET_CMD, ARG_NONE, &table->prop[src][idx].fppt); + amd_pmf_send_cmd(dev, GET_SPPT, GET_CMD, ARG_NONE, &table->prop[src][idx].sppt); + amd_pmf_send_cmd(dev, GET_SPPT_APU_ONLY, GET_CMD, ARG_NONE, &table->prop[src][idx].sppt_apu_only); - amd_pmf_send_cmd(dev, GET_STT_MIN_LIMIT, true, ARG_NONE, + amd_pmf_send_cmd(dev, GET_STT_MIN_LIMIT, GET_CMD, ARG_NONE, &table->prop[src][idx].stt_min); - amd_pmf_send_cmd(dev, GET_STT_LIMIT_APU, true, ARG_NONE, + amd_pmf_send_cmd(dev, GET_STT_LIMIT_APU, GET_CMD, ARG_NONE, (u32 *)&table->prop[src][idx].stt_skin_temp[STT_TEMP_APU]); - amd_pmf_send_cmd(dev, GET_STT_LIMIT_HS2, true, ARG_NONE, + amd_pmf_send_cmd(dev, GET_STT_LIMIT_HS2, GET_CMD, ARG_NONE, (u32 *)&table->prop[src][idx].stt_skin_temp[STT_TEMP_HS2]); } } diff --git a/drivers/platform/x86/amd/pmf/tee-if.c b/drivers/platform/x86/amd/pmf/tee-if.c index 6e8116bef4f6..0abce76f89ff 100644 --- a/drivers/platform/x86/amd/pmf/tee-if.c +++ b/drivers/platform/x86/amd/pmf/tee-if.c @@ -73,17 +73,56 @@ static void amd_pmf_update_uevents(struct amd_pmf_dev *dev, u16 event) input_sync(dev->pmf_idev); } +static int amd_pmf_get_bios_output_idx(u32 action_idx) +{ + switch (action_idx) { + case PMF_POLICY_BIOS_OUTPUT_1: + return 0; + case PMF_POLICY_BIOS_OUTPUT_2: + return 1; + case PMF_POLICY_BIOS_OUTPUT_3: + return 2; + case PMF_POLICY_BIOS_OUTPUT_4: + return 3; + case PMF_POLICY_BIOS_OUTPUT_5: + return 4; + case PMF_POLICY_BIOS_OUTPUT_6: + return 5; + case PMF_POLICY_BIOS_OUTPUT_7: + return 6; + case PMF_POLICY_BIOS_OUTPUT_8: + return 7; + case PMF_POLICY_BIOS_OUTPUT_9: + return 8; + case PMF_POLICY_BIOS_OUTPUT_10: + return 9; + default: + return -EINVAL; + } +} + +static void amd_pmf_update_bios_output(struct amd_pmf_dev *pdev, struct ta_pmf_action *action) +{ + u32 bios_idx; + + bios_idx = amd_pmf_get_bios_output_idx(action->action_index); + + amd_pmf_smartpc_apply_bios_output(pdev, action->value, BIT(bios_idx), bios_idx); +} + static void amd_pmf_apply_policies(struct amd_pmf_dev *dev, struct ta_pmf_enact_result *out) { + struct ta_pmf_action *action; u32 val; int idx; for (idx = 0; idx < out->actions_count; idx++) { - val = out->actions_list[idx].value; - switch (out->actions_list[idx].action_index) { + action = &out->actions_list[idx]; + val = action->value; + switch (action->action_index) { case PMF_POLICY_SPL: if (dev->prev_data->spl != val) { - amd_pmf_send_cmd(dev, SET_SPL, false, val, NULL); + amd_pmf_send_cmd(dev, SET_SPL, SET_CMD, val, NULL); dev_dbg(dev->dev, "update SPL: %u\n", val); dev->prev_data->spl = val; } @@ -91,7 +130,7 @@ static void amd_pmf_apply_policies(struct amd_pmf_dev *dev, struct ta_pmf_enact_ case PMF_POLICY_SPPT: if (dev->prev_data->sppt != val) { - amd_pmf_send_cmd(dev, SET_SPPT, false, val, NULL); + amd_pmf_send_cmd(dev, SET_SPPT, SET_CMD, val, NULL); dev_dbg(dev->dev, "update SPPT: %u\n", val); dev->prev_data->sppt = val; } @@ -99,7 +138,7 @@ static void amd_pmf_apply_policies(struct amd_pmf_dev *dev, struct ta_pmf_enact_ case PMF_POLICY_FPPT: if (dev->prev_data->fppt != val) { - amd_pmf_send_cmd(dev, SET_FPPT, false, val, NULL); + amd_pmf_send_cmd(dev, SET_FPPT, SET_CMD, val, NULL); dev_dbg(dev->dev, "update FPPT: %u\n", val); dev->prev_data->fppt = val; } @@ -107,7 +146,7 @@ static void amd_pmf_apply_policies(struct amd_pmf_dev *dev, struct ta_pmf_enact_ case PMF_POLICY_SPPT_APU_ONLY: if (dev->prev_data->sppt_apuonly != val) { - amd_pmf_send_cmd(dev, SET_SPPT_APU_ONLY, false, val, NULL); + amd_pmf_send_cmd(dev, SET_SPPT_APU_ONLY, SET_CMD, val, NULL); dev_dbg(dev->dev, "update SPPT_APU_ONLY: %u\n", val); dev->prev_data->sppt_apuonly = val; } @@ -115,7 +154,7 @@ static void amd_pmf_apply_policies(struct amd_pmf_dev *dev, struct ta_pmf_enact_ case PMF_POLICY_STT_MIN: if (dev->prev_data->stt_minlimit != val) { - amd_pmf_send_cmd(dev, SET_STT_MIN_LIMIT, false, val, NULL); + amd_pmf_send_cmd(dev, SET_STT_MIN_LIMIT, SET_CMD, val, NULL); dev_dbg(dev->dev, "update STT_MIN: %u\n", val); dev->prev_data->stt_minlimit = val; } @@ -123,7 +162,7 @@ static void amd_pmf_apply_policies(struct amd_pmf_dev *dev, struct ta_pmf_enact_ case PMF_POLICY_STT_SKINTEMP_APU: if (dev->prev_data->stt_skintemp_apu != val) { - amd_pmf_send_cmd(dev, SET_STT_LIMIT_APU, false, + amd_pmf_send_cmd(dev, SET_STT_LIMIT_APU, SET_CMD, fixp_q88_fromint(val), NULL); dev_dbg(dev->dev, "update STT_SKINTEMP_APU: %u\n", val); dev->prev_data->stt_skintemp_apu = val; @@ -132,7 +171,7 @@ static void amd_pmf_apply_policies(struct amd_pmf_dev *dev, struct ta_pmf_enact_ case PMF_POLICY_STT_SKINTEMP_HS2: if (dev->prev_data->stt_skintemp_hs2 != val) { - amd_pmf_send_cmd(dev, SET_STT_LIMIT_HS2, false, + amd_pmf_send_cmd(dev, SET_STT_LIMIT_HS2, SET_CMD, fixp_q88_fromint(val), NULL); dev_dbg(dev->dev, "update STT_SKINTEMP_HS2: %u\n", val); dev->prev_data->stt_skintemp_hs2 = val; @@ -141,7 +180,7 @@ static void amd_pmf_apply_policies(struct amd_pmf_dev *dev, struct ta_pmf_enact_ case PMF_POLICY_P3T: if (dev->prev_data->p3t_limit != val) { - amd_pmf_send_cmd(dev, SET_P3T, false, val, NULL); + amd_pmf_send_cmd(dev, SET_P3T, SET_CMD, val, NULL); dev_dbg(dev->dev, "update P3T: %u\n", val); dev->prev_data->p3t_limit = val; } @@ -149,7 +188,7 @@ static void amd_pmf_apply_policies(struct amd_pmf_dev *dev, struct ta_pmf_enact_ case PMF_POLICY_PMF_PPT: if (dev->prev_data->pmf_ppt != val) { - amd_pmf_send_cmd(dev, SET_PMF_PPT, false, val, NULL); + amd_pmf_send_cmd(dev, SET_PMF_PPT, SET_CMD, val, NULL); dev_dbg(dev->dev, "update PMF PPT: %u\n", val); dev->prev_data->pmf_ppt = val; } @@ -157,7 +196,7 @@ static void amd_pmf_apply_policies(struct amd_pmf_dev *dev, struct ta_pmf_enact_ case PMF_POLICY_PMF_PPT_APU_ONLY: if (dev->prev_data->pmf_ppt_apu_only != val) { - amd_pmf_send_cmd(dev, SET_PMF_PPT_APU_ONLY, false, val, NULL); + amd_pmf_send_cmd(dev, SET_PMF_PPT_APU_ONLY, SET_CMD, val, NULL); dev_dbg(dev->dev, "update PMF PPT APU ONLY: %u\n", val); dev->prev_data->pmf_ppt_apu_only = val; } @@ -183,43 +222,16 @@ static void amd_pmf_apply_policies(struct amd_pmf_dev *dev, struct ta_pmf_enact_ break; case PMF_POLICY_BIOS_OUTPUT_1: - amd_pmf_smartpc_apply_bios_output(dev, val, BIT(0), 0); - break; - case PMF_POLICY_BIOS_OUTPUT_2: - amd_pmf_smartpc_apply_bios_output(dev, val, BIT(1), 1); - break; - case PMF_POLICY_BIOS_OUTPUT_3: - amd_pmf_smartpc_apply_bios_output(dev, val, BIT(2), 2); - break; - case PMF_POLICY_BIOS_OUTPUT_4: - amd_pmf_smartpc_apply_bios_output(dev, val, BIT(3), 3); - break; - case PMF_POLICY_BIOS_OUTPUT_5: - amd_pmf_smartpc_apply_bios_output(dev, val, BIT(4), 4); - break; - case PMF_POLICY_BIOS_OUTPUT_6: - amd_pmf_smartpc_apply_bios_output(dev, val, BIT(5), 5); - break; - case PMF_POLICY_BIOS_OUTPUT_7: - amd_pmf_smartpc_apply_bios_output(dev, val, BIT(6), 6); - break; - case PMF_POLICY_BIOS_OUTPUT_8: - amd_pmf_smartpc_apply_bios_output(dev, val, BIT(7), 7); - break; - case PMF_POLICY_BIOS_OUTPUT_9: - amd_pmf_smartpc_apply_bios_output(dev, val, BIT(8), 8); - break; - case PMF_POLICY_BIOS_OUTPUT_10: - amd_pmf_smartpc_apply_bios_output(dev, val, BIT(9), 9); + amd_pmf_update_bios_output(dev, action); break; } } diff --git a/drivers/platform/x86/asus-armoury.c b/drivers/platform/x86/asus-armoury.c new file mode 100644 index 000000000000..9c1a9ad42bc4 --- /dev/null +++ b/drivers/platform/x86/asus-armoury.c @@ -0,0 +1,1161 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Asus Armoury (WMI) attributes driver. + * + * This driver uses the fw_attributes class to expose various WMI functions + * that are present in many gaming and some non-gaming ASUS laptops. + * + * These typically don't fit anywhere else in the sysfs such as under LED class, + * hwmon or others, and are set in Windows using the ASUS Armoury Crate tool. + * + * Copyright(C) 2024 Luke Jones <luke@ljones.dev> + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/acpi.h> +#include <linux/array_size.h> +#include <linux/bitfield.h> +#include <linux/device.h> +#include <linux/dmi.h> +#include <linux/err.h> +#include <linux/errno.h> +#include <linux/fs.h> +#include <linux/kernel.h> +#include <linux/kmod.h> +#include <linux/kobject.h> +#include <linux/kstrtox.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/pci.h> +#include <linux/platform_data/x86/asus-wmi.h> +#include <linux/printk.h> +#include <linux/power_supply.h> +#include <linux/sysfs.h> + +#include "asus-armoury.h" +#include "firmware_attributes_class.h" + +#define ASUS_NB_WMI_EVENT_GUID "0B3CBB35-E3C2-45ED-91C2-4C5A6D195D1C" + +#define ASUS_MINI_LED_MODE_MASK GENMASK(1, 0) +/* Standard modes for devices with only on/off */ +#define ASUS_MINI_LED_OFF 0x00 +#define ASUS_MINI_LED_ON 0x01 +/* Like "on" but the effect is more vibrant or brighter */ +#define ASUS_MINI_LED_STRONG_MODE 0x02 +/* New modes for devices with 3 mini-led mode types */ +#define ASUS_MINI_LED_2024_WEAK 0x00 +#define ASUS_MINI_LED_2024_STRONG 0x01 +#define ASUS_MINI_LED_2024_OFF 0x02 + +/* Power tunable attribute name defines */ +#define ATTR_PPT_PL1_SPL "ppt_pl1_spl" +#define ATTR_PPT_PL2_SPPT "ppt_pl2_sppt" +#define ATTR_PPT_PL3_FPPT "ppt_pl3_fppt" +#define ATTR_PPT_APU_SPPT "ppt_apu_sppt" +#define ATTR_PPT_PLATFORM_SPPT "ppt_platform_sppt" +#define ATTR_NV_DYNAMIC_BOOST "nv_dynamic_boost" +#define ATTR_NV_TEMP_TARGET "nv_temp_target" +#define ATTR_NV_BASE_TGP "nv_base_tgp" +#define ATTR_NV_TGP "nv_tgp" + +#define ASUS_ROG_TUNABLE_DC 0 +#define ASUS_ROG_TUNABLE_AC 1 + +struct rog_tunables { + const struct power_limits *power_limits; + u32 ppt_pl1_spl; // cpu + u32 ppt_pl2_sppt; // cpu + u32 ppt_pl3_fppt; // cpu + u32 ppt_apu_sppt; // plat + u32 ppt_platform_sppt; // plat + + u32 nv_dynamic_boost; + u32 nv_temp_target; + u32 nv_tgp; +}; + +struct asus_armoury_priv { + struct device *fw_attr_dev; + struct kset *fw_attr_kset; + + /* + * Mutex to protect eGPU activation/deactivation + * sequences and dGPU connection status: + * do not allow concurrent changes or changes + * before a reboot if dGPU got disabled. + */ + struct mutex egpu_mutex; + + /* Index 0 for DC, 1 for AC */ + struct rog_tunables *rog_tunables[2]; + + u32 mini_led_dev_id; + u32 gpu_mux_dev_id; +}; + +static struct asus_armoury_priv asus_armoury = { + .egpu_mutex = __MUTEX_INITIALIZER(asus_armoury.egpu_mutex), +}; + +struct fw_attrs_group { + bool pending_reboot; +}; + +static struct fw_attrs_group fw_attrs = { + .pending_reboot = false, +}; + +struct asus_attr_group { + const struct attribute_group *attr_group; + u32 wmi_devid; +}; + +static void asus_set_reboot_and_signal_event(void) +{ + fw_attrs.pending_reboot = true; + kobject_uevent(&asus_armoury.fw_attr_dev->kobj, KOBJ_CHANGE); +} + +static ssize_t pending_reboot_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) +{ + return sysfs_emit(buf, "%d\n", fw_attrs.pending_reboot); +} + +static struct kobj_attribute pending_reboot = __ATTR_RO(pending_reboot); + +static bool asus_bios_requires_reboot(struct kobj_attribute *attr) +{ + return !strcmp(attr->attr.name, "gpu_mux_mode") || + !strcmp(attr->attr.name, "panel_hd_mode"); +} + +/** + * armoury_has_devstate() - Check presence of the WMI function state. + * + * @dev_id: The WMI method ID to check for presence. + * + * Returns: true iif method is supported. + */ +static bool armoury_has_devstate(u32 dev_id) +{ + u32 retval; + int status; + + status = asus_wmi_evaluate_method(ASUS_WMI_METHODID_DSTS, dev_id, 0, &retval); + pr_debug("%s called (0x%08x), retval: 0x%08x\n", __func__, dev_id, retval); + + return status == 0 && (retval & ASUS_WMI_DSTS_PRESENCE_BIT); +} + +/** + * armoury_get_devstate() - Get the WMI function state. + * @attr: NULL or the kobj_attribute associated to called WMI function. + * @dev_id: The WMI method ID to call. + * @retval: + * * non-NULL pointer to where to store the value returned from WMI + * * with the function presence bit cleared. + * + * Intended usage is from sysfs attribute checking associated WMI function. + * + * Returns: + * * %-ENODEV - method ID is unsupported. + * * %0 - successful and retval is filled. + * * %other - error from WMI call. + */ +static int armoury_get_devstate(struct kobj_attribute *attr, u32 *retval, u32 dev_id) +{ + int err; + + err = asus_wmi_get_devstate_dsts(dev_id, retval); + if (err) { + if (attr) + pr_err("Failed to get %s: %d\n", attr->attr.name, err); + else + pr_err("Failed to get devstate for 0x%x: %d\n", dev_id, err); + + return err; + } + + /* + * asus_wmi_get_devstate_dsts will populate retval with WMI return, but + * the true value is expressed when ASUS_WMI_DSTS_PRESENCE_BIT is clear. + */ + *retval &= ~ASUS_WMI_DSTS_PRESENCE_BIT; + + return 0; +} + +/** + * armoury_set_devstate() - Set the WMI function state. + * @attr: The kobj_attribute associated to called WMI function. + * @dev_id: The WMI method ID to call. + * @value: The new value to be set. + * @retval: Where to store the value returned from WMI or NULL. + * + * Intended usage is from sysfs attribute setting associated WMI function. + * Before calling the presence of the function should be checked. + * + * Every WMI write MUST go through this function to enforce safety checks. + * + * Results !1 is usually considered a fail by ASUS, but some WMI methods + * (like eGPU or CPU cores) do use > 1 to return a status code or similar: + * in these cases caller is interested in the actual return value + * and should perform relevant checks. + * + * Returns: + * * %-EINVAL - attempt to set a dangerous or unsupported value. + * * %-EIO - WMI function returned an error. + * * %0 - successful and retval is filled. + * * %other - error from WMI call. + */ +static int armoury_set_devstate(struct kobj_attribute *attr, + u32 value, u32 *retval, u32 dev_id) +{ + u32 result; + int err; + + /* + * Prevent developers from bricking devices or issuing dangerous + * commands that can be difficult or impossible to recover from. + */ + switch (dev_id) { + case ASUS_WMI_DEVID_APU_MEM: + /* + * A hard reset might suffice to save the device, + * but there is no value in sending these commands. + */ + if (value == 0x100 || value == 0x101) { + pr_err("Refusing to set APU memory to unsafe value: 0x%x\n", value); + return -EINVAL; + } + break; + default: + /* No problems are known for this dev_id */ + break; + } + + err = asus_wmi_set_devstate(dev_id, value, retval ? retval : &result); + if (err) { + if (attr) + pr_err("Failed to set %s: %d\n", attr->attr.name, err); + else + pr_err("Failed to set devstate for 0x%x: %d\n", dev_id, err); + + return err; + } + + /* + * If retval == NULL caller is uninterested in return value: + * perform the most common result check here. + */ + if ((retval == NULL) && (result == 0)) { + pr_err("Failed to set %s: (result): 0x%x\n", attr->attr.name, result); + return -EIO; + } + + return 0; +} + +static int armoury_attr_enum_list(char *buf, size_t enum_values) +{ + size_t i; + int len = 0; + + for (i = 0; i < enum_values; i++) { + if (i == 0) + len += sysfs_emit_at(buf, len, "%zu", i); + else + len += sysfs_emit_at(buf, len, ";%zu", i); + } + len += sysfs_emit_at(buf, len, "\n"); + + return len; +} + +ssize_t armoury_attr_uint_store(struct kobject *kobj, struct kobj_attribute *attr, + const char *buf, size_t count, u32 min, u32 max, + u32 *store_value, u32 wmi_dev) +{ + u32 value; + int err; + + err = kstrtou32(buf, 10, &value); + if (err) + return err; + + if (value < min || value > max) + return -EINVAL; + + err = armoury_set_devstate(attr, value, NULL, wmi_dev); + if (err) + return err; + + if (store_value != NULL) + *store_value = value; + sysfs_notify(kobj, NULL, attr->attr.name); + + if (asus_bios_requires_reboot(attr)) + asus_set_reboot_and_signal_event(); + + return count; +} + +ssize_t armoury_attr_uint_show(struct kobject *kobj, struct kobj_attribute *attr, + char *buf, u32 wmi_dev) +{ + u32 result; + int err; + + err = armoury_get_devstate(attr, &result, wmi_dev); + if (err) + return err; + + return sysfs_emit(buf, "%u\n", result); +} + +static ssize_t enum_type_show(struct kobject *kobj, struct kobj_attribute *attr, + char *buf) +{ + return sysfs_emit(buf, "enumeration\n"); +} + +static ssize_t int_type_show(struct kobject *kobj, struct kobj_attribute *attr, + char *buf) +{ + return sysfs_emit(buf, "integer\n"); +} + +/* Mini-LED mode **************************************************************/ + +/* Values map for mini-led modes on 2023 and earlier models. */ +static u32 mini_led_mode1_map[] = { + [0] = ASUS_MINI_LED_OFF, + [1] = ASUS_MINI_LED_ON, +}; + +/* Values map for mini-led modes on 2024 and later models. */ +static u32 mini_led_mode2_map[] = { + [0] = ASUS_MINI_LED_2024_OFF, + [1] = ASUS_MINI_LED_2024_WEAK, + [2] = ASUS_MINI_LED_2024_STRONG, +}; + +static ssize_t mini_led_mode_current_value_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + u32 *mini_led_mode_map; + size_t mini_led_mode_map_size; + u32 i, mode; + int err; + + switch (asus_armoury.mini_led_dev_id) { + case ASUS_WMI_DEVID_MINI_LED_MODE: + mini_led_mode_map = mini_led_mode1_map; + mini_led_mode_map_size = ARRAY_SIZE(mini_led_mode1_map); + break; + + case ASUS_WMI_DEVID_MINI_LED_MODE2: + mini_led_mode_map = mini_led_mode2_map; + mini_led_mode_map_size = ARRAY_SIZE(mini_led_mode2_map); + break; + + default: + pr_err("Unrecognized mini-LED device: %u\n", asus_armoury.mini_led_dev_id); + return -ENODEV; + } + + err = armoury_get_devstate(attr, &mode, asus_armoury.mini_led_dev_id); + if (err) + return err; + + mode = FIELD_GET(ASUS_MINI_LED_MODE_MASK, 0); + + for (i = 0; i < mini_led_mode_map_size; i++) + if (mode == mini_led_mode_map[i]) + return sysfs_emit(buf, "%u\n", i); + + pr_warn("Unrecognized mini-LED mode: %u", mode); + return -EINVAL; +} + +static ssize_t mini_led_mode_current_value_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, size_t count) +{ + u32 *mini_led_mode_map; + size_t mini_led_mode_map_size; + u32 mode; + int err; + + err = kstrtou32(buf, 10, &mode); + if (err) + return err; + + switch (asus_armoury.mini_led_dev_id) { + case ASUS_WMI_DEVID_MINI_LED_MODE: + mini_led_mode_map = mini_led_mode1_map; + mini_led_mode_map_size = ARRAY_SIZE(mini_led_mode1_map); + break; + + case ASUS_WMI_DEVID_MINI_LED_MODE2: + mini_led_mode_map = mini_led_mode2_map; + mini_led_mode_map_size = ARRAY_SIZE(mini_led_mode2_map); + break; + + default: + pr_err("Unrecognized mini-LED devid: %u\n", asus_armoury.mini_led_dev_id); + return -EINVAL; + } + + if (mode >= mini_led_mode_map_size) { + pr_warn("mini-LED mode unrecognized device: %u\n", mode); + return -ENODEV; + } + + return armoury_attr_uint_store(kobj, attr, buf, count, + 0, mini_led_mode_map[mode], + NULL, asus_armoury.mini_led_dev_id); +} + +static ssize_t mini_led_mode_possible_values_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +{ + switch (asus_armoury.mini_led_dev_id) { + case ASUS_WMI_DEVID_MINI_LED_MODE: + return armoury_attr_enum_list(buf, ARRAY_SIZE(mini_led_mode1_map)); + case ASUS_WMI_DEVID_MINI_LED_MODE2: + return armoury_attr_enum_list(buf, ARRAY_SIZE(mini_led_mode2_map)); + default: + return -ENODEV; + } +} +ASUS_ATTR_GROUP_ENUM(mini_led_mode, "mini_led_mode", "Set the mini-LED backlight mode"); + +static ssize_t gpu_mux_mode_current_value_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, size_t count) +{ + int result, err; + bool optimus; + + err = kstrtobool(buf, &optimus); + if (err) + return err; + + if (armoury_has_devstate(ASUS_WMI_DEVID_DGPU)) { + err = armoury_get_devstate(NULL, &result, ASUS_WMI_DEVID_DGPU); + if (err) + return err; + if (result && !optimus) { + pr_warn("Cannot switch MUX to dGPU mode when dGPU is disabled: %02X\n", + result); + return -ENODEV; + } + } + + if (armoury_has_devstate(ASUS_WMI_DEVID_EGPU)) { + err = armoury_get_devstate(NULL, &result, ASUS_WMI_DEVID_EGPU); + if (err) + return err; + if (result && !optimus) { + pr_warn("Cannot switch MUX to dGPU mode when eGPU is enabled\n"); + return -EBUSY; + } + } + + err = armoury_set_devstate(attr, optimus ? 1 : 0, NULL, asus_armoury.gpu_mux_dev_id); + if (err) + return err; + + sysfs_notify(kobj, NULL, attr->attr.name); + asus_set_reboot_and_signal_event(); + + return count; +} +ASUS_WMI_SHOW_INT(gpu_mux_mode_current_value, asus_armoury.gpu_mux_dev_id); +ASUS_ATTR_GROUP_BOOL(gpu_mux_mode, "gpu_mux_mode", "Set the GPU display MUX mode"); + +static ssize_t dgpu_disable_current_value_store(struct kobject *kobj, + struct kobj_attribute *attr, const char *buf, + size_t count) +{ + int result, err; + bool disable; + + err = kstrtobool(buf, &disable); + if (err) + return err; + + if (asus_armoury.gpu_mux_dev_id) { + err = armoury_get_devstate(NULL, &result, asus_armoury.gpu_mux_dev_id); + if (err) + return err; + if (!result && disable) { + pr_warn("Cannot disable dGPU when the MUX is in dGPU mode\n"); + return -EBUSY; + } + } + + scoped_guard(mutex, &asus_armoury.egpu_mutex) { + err = armoury_set_devstate(attr, disable ? 1 : 0, NULL, ASUS_WMI_DEVID_DGPU); + if (err) + return err; + } + + sysfs_notify(kobj, NULL, attr->attr.name); + + return count; +} +ASUS_WMI_SHOW_INT(dgpu_disable_current_value, ASUS_WMI_DEVID_DGPU); +ASUS_ATTR_GROUP_BOOL(dgpu_disable, "dgpu_disable", "Disable the dGPU"); + +/* Values map for eGPU activation requests. */ +static u32 egpu_status_map[] = { + [0] = 0x00000000U, + [1] = 0x00000001U, + [2] = 0x00000101U, + [3] = 0x00000201U, +}; + +/* + * armoury_pci_rescan() - Performs a PCI rescan + * + * Bring up any GPU that has been hotplugged in the system. + */ +static void armoury_pci_rescan(void) +{ + struct pci_bus *b = NULL; + + pci_lock_rescan_remove(); + while ((b = pci_find_next_bus(b)) != NULL) + pci_rescan_bus(b); + pci_unlock_rescan_remove(); +} + +/* + * The ACPI call to enable the eGPU might also disable the internal dGPU, + * but this is not always the case and on certain models enabling the eGPU + * when the dGPU is either still active or has been disabled without rebooting + * will make both GPUs malfunction and the kernel will detect many + * PCI AER unrecoverable errors. + */ +static ssize_t egpu_enable_current_value_store(struct kobject *kobj, struct kobj_attribute *attr, + const char *buf, size_t count) +{ + int err; + u32 requested, enable, result; + + err = kstrtou32(buf, 10, &requested); + if (err) + return err; + + if (requested >= ARRAY_SIZE(egpu_status_map)) + return -EINVAL; + enable = egpu_status_map[requested]; + + scoped_guard(mutex, &asus_armoury.egpu_mutex) { + /* Ensure the eGPU is connected before attempting to activate it. */ + if (enable) { + err = armoury_get_devstate(NULL, &result, ASUS_WMI_DEVID_EGPU_CONNECTED); + if (err) { + pr_warn("Failed to get eGPU connection status: %d\n", err); + return err; + } + if (!result) { + pr_warn("Cannot activate eGPU while undetected\n"); + return -ENOENT; + } + } + + if (asus_armoury.gpu_mux_dev_id) { + err = armoury_get_devstate(NULL, &result, asus_armoury.gpu_mux_dev_id); + if (err) + return err; + + if (!result && enable) { + pr_warn("Cannot enable eGPU when the MUX is in dGPU mode\n"); + return -ENODEV; + } + } + + err = armoury_set_devstate(attr, enable, &result, ASUS_WMI_DEVID_EGPU); + if (err) { + pr_err("Failed to set %s: %d\n", attr->attr.name, err); + return err; + } + + /* + * ACPI returns value 0x01 on success and 0x02 on a partial activation: + * performing a pci rescan will bring up the device in pci-e 3.0 speed, + * after a reboot the device will work at full speed. + */ + switch (result) { + case 0x01: + /* + * When a GPU is in use it does not get disconnected even if + * the ACPI call returns a success. + */ + if (!enable) { + err = armoury_get_devstate(attr, &result, ASUS_WMI_DEVID_EGPU); + if (err) { + pr_warn("Failed to ensure eGPU is deactivated: %d\n", err); + return err; + } + + if (result != 0) + return -EBUSY; + } + + pr_debug("Success changing the eGPU status\n"); + break; + case 0x02: + pr_info("Success changing the eGPU status, a reboot is strongly advised\n"); + asus_set_reboot_and_signal_event(); + break; + default: + pr_err("Failed to change the eGPU status: wmi result is 0x%x\n", result); + return -EIO; + } + } + + /* + * Perform a PCI rescan: on every tested model this is necessary + * to make the eGPU visible on the bus without rebooting. + */ + armoury_pci_rescan(); + + sysfs_notify(kobj, NULL, attr->attr.name); + + return count; +} + +static ssize_t egpu_enable_current_value_show(struct kobject *kobj, struct kobj_attribute *attr, + char *buf) +{ + int i, err; + u32 status; + + scoped_guard(mutex, &asus_armoury.egpu_mutex) { + err = armoury_get_devstate(attr, &status, ASUS_WMI_DEVID_EGPU); + if (err) + return err; + } + + for (i = 0; i < ARRAY_SIZE(egpu_status_map); i++) { + if (egpu_status_map[i] == status) + return sysfs_emit(buf, "%u\n", i); + } + + return -EIO; +} + +static ssize_t egpu_enable_possible_values_show(struct kobject *kobj, struct kobj_attribute *attr, + char *buf) +{ + return armoury_attr_enum_list(buf, ARRAY_SIZE(egpu_status_map)); +} +ASUS_ATTR_GROUP_ENUM(egpu_enable, "egpu_enable", "Enable the eGPU (also disables dGPU)"); + +/* Device memory available to APU */ + +/* + * Values map for APU reserved memory (index + 1 number of GB). + * Some looks out of order, but are actually correct. + */ +static u32 apu_mem_map[] = { + [0] = 0x000, /* called "AUTO" on the BIOS, is the minimum available */ + [1] = 0x102, + [2] = 0x103, + [3] = 0x104, + [4] = 0x105, + [5] = 0x107, + [6] = 0x108, + [7] = 0x109, + [8] = 0x106, +}; + +static ssize_t apu_mem_current_value_show(struct kobject *kobj, struct kobj_attribute *attr, + char *buf) +{ + int err; + u32 mem; + + err = armoury_get_devstate(attr, &mem, ASUS_WMI_DEVID_APU_MEM); + if (err) + return err; + + /* After 0x000 is set, a read will return 0x100 */ + if (mem == 0x100) + return sysfs_emit(buf, "0\n"); + + for (unsigned int i = 0; i < ARRAY_SIZE(apu_mem_map); i++) { + if (apu_mem_map[i] == mem) + return sysfs_emit(buf, "%u\n", i); + } + + pr_warn("Unrecognised value for APU mem 0x%08x\n", mem); + return -EIO; +} + +static ssize_t apu_mem_current_value_store(struct kobject *kobj, struct kobj_attribute *attr, + const char *buf, size_t count) +{ + int result, err; + u32 requested, mem; + + result = kstrtou32(buf, 10, &requested); + if (result) + return result; + + if (requested >= ARRAY_SIZE(apu_mem_map)) + return -EINVAL; + mem = apu_mem_map[requested]; + + err = armoury_set_devstate(attr, mem, NULL, ASUS_WMI_DEVID_APU_MEM); + if (err) { + pr_warn("Failed to set apu_mem 0x%x: %d\n", mem, err); + return err; + } + + pr_info("APU memory changed to %uGB, reboot required\n", requested + 1); + sysfs_notify(kobj, NULL, attr->attr.name); + + asus_set_reboot_and_signal_event(); + + return count; +} + +static ssize_t apu_mem_possible_values_show(struct kobject *kobj, struct kobj_attribute *attr, + char *buf) +{ + return armoury_attr_enum_list(buf, ARRAY_SIZE(apu_mem_map)); +} +ASUS_ATTR_GROUP_ENUM(apu_mem, "apu_mem", "Set available system RAM (in GB) for the APU to use"); + +/* Define helper to access the current power mode tunable values */ +static inline struct rog_tunables *get_current_tunables(void) +{ + if (power_supply_is_system_supplied()) + return asus_armoury.rog_tunables[ASUS_ROG_TUNABLE_AC]; + + return asus_armoury.rog_tunables[ASUS_ROG_TUNABLE_DC]; +} + +/* Simple attribute creation */ +ASUS_ATTR_GROUP_ENUM_INT_RO(charge_mode, "charge_mode", ASUS_WMI_DEVID_CHARGE_MODE, "0;1;2\n", + "Show the current mode of charging"); +ASUS_ATTR_GROUP_BOOL_RW(boot_sound, "boot_sound", ASUS_WMI_DEVID_BOOT_SOUND, + "Set the boot POST sound"); +ASUS_ATTR_GROUP_BOOL_RW(mcu_powersave, "mcu_powersave", ASUS_WMI_DEVID_MCU_POWERSAVE, + "Set MCU powersaving mode"); +ASUS_ATTR_GROUP_BOOL_RW(panel_od, "panel_overdrive", ASUS_WMI_DEVID_PANEL_OD, + "Set the panel refresh overdrive"); +ASUS_ATTR_GROUP_BOOL_RW(panel_hd_mode, "panel_hd_mode", ASUS_WMI_DEVID_PANEL_HD, + "Set the panel HD mode to UHD<0> or FHD<1>"); +ASUS_ATTR_GROUP_BOOL_RW(screen_auto_brightness, "screen_auto_brightness", + ASUS_WMI_DEVID_SCREEN_AUTO_BRIGHTNESS, + "Set the panel brightness to Off<0> or On<1>"); +ASUS_ATTR_GROUP_BOOL_RO(egpu_connected, "egpu_connected", ASUS_WMI_DEVID_EGPU_CONNECTED, + "Show the eGPU connection status"); +ASUS_ATTR_GROUP_ROG_TUNABLE(ppt_pl1_spl, ATTR_PPT_PL1_SPL, ASUS_WMI_DEVID_PPT_PL1_SPL, + "Set the CPU slow package limit"); +ASUS_ATTR_GROUP_ROG_TUNABLE(ppt_pl2_sppt, ATTR_PPT_PL2_SPPT, ASUS_WMI_DEVID_PPT_PL2_SPPT, + "Set the CPU fast package limit"); +ASUS_ATTR_GROUP_ROG_TUNABLE(ppt_pl3_fppt, ATTR_PPT_PL3_FPPT, ASUS_WMI_DEVID_PPT_PL3_FPPT, + "Set the CPU fastest package limit"); +ASUS_ATTR_GROUP_ROG_TUNABLE(ppt_apu_sppt, ATTR_PPT_APU_SPPT, ASUS_WMI_DEVID_PPT_APU_SPPT, + "Set the APU package limit"); +ASUS_ATTR_GROUP_ROG_TUNABLE(ppt_platform_sppt, ATTR_PPT_PLATFORM_SPPT, ASUS_WMI_DEVID_PPT_PLAT_SPPT, + "Set the platform package limit"); +ASUS_ATTR_GROUP_ROG_TUNABLE(nv_dynamic_boost, ATTR_NV_DYNAMIC_BOOST, ASUS_WMI_DEVID_NV_DYN_BOOST, + "Set the Nvidia dynamic boost limit"); +ASUS_ATTR_GROUP_ROG_TUNABLE(nv_temp_target, ATTR_NV_TEMP_TARGET, ASUS_WMI_DEVID_NV_THERM_TARGET, + "Set the Nvidia max thermal limit"); +ASUS_ATTR_GROUP_ROG_TUNABLE(nv_tgp, "nv_tgp", ASUS_WMI_DEVID_DGPU_SET_TGP, + "Set the additional TGP on top of the base TGP"); +ASUS_ATTR_GROUP_INT_VALUE_ONLY_RO(nv_base_tgp, ATTR_NV_BASE_TGP, ASUS_WMI_DEVID_DGPU_BASE_TGP, + "Read the base TGP value"); + +/* If an attribute does not require any special case handling add it here */ +static const struct asus_attr_group armoury_attr_groups[] = { + { &egpu_connected_attr_group, ASUS_WMI_DEVID_EGPU_CONNECTED }, + { &egpu_enable_attr_group, ASUS_WMI_DEVID_EGPU }, + { &dgpu_disable_attr_group, ASUS_WMI_DEVID_DGPU }, + { &apu_mem_attr_group, ASUS_WMI_DEVID_APU_MEM }, + + { &ppt_pl1_spl_attr_group, ASUS_WMI_DEVID_PPT_PL1_SPL }, + { &ppt_pl2_sppt_attr_group, ASUS_WMI_DEVID_PPT_PL2_SPPT }, + { &ppt_pl3_fppt_attr_group, ASUS_WMI_DEVID_PPT_PL3_FPPT }, + { &ppt_apu_sppt_attr_group, ASUS_WMI_DEVID_PPT_APU_SPPT }, + { &ppt_platform_sppt_attr_group, ASUS_WMI_DEVID_PPT_PLAT_SPPT }, + { &nv_dynamic_boost_attr_group, ASUS_WMI_DEVID_NV_DYN_BOOST }, + { &nv_temp_target_attr_group, ASUS_WMI_DEVID_NV_THERM_TARGET }, + { &nv_base_tgp_attr_group, ASUS_WMI_DEVID_DGPU_BASE_TGP }, + { &nv_tgp_attr_group, ASUS_WMI_DEVID_DGPU_SET_TGP }, + + { &charge_mode_attr_group, ASUS_WMI_DEVID_CHARGE_MODE }, + { &boot_sound_attr_group, ASUS_WMI_DEVID_BOOT_SOUND }, + { &mcu_powersave_attr_group, ASUS_WMI_DEVID_MCU_POWERSAVE }, + { &panel_od_attr_group, ASUS_WMI_DEVID_PANEL_OD }, + { &panel_hd_mode_attr_group, ASUS_WMI_DEVID_PANEL_HD }, + { &screen_auto_brightness_attr_group, ASUS_WMI_DEVID_SCREEN_AUTO_BRIGHTNESS }, +}; + +/** + * is_power_tunable_attr - Determines if an attribute is a power-related tunable + * @name: The name of the attribute to check + * + * This function checks if the given attribute name is related to power tuning. + * + * Return: true if the attribute is a power-related tunable, false otherwise + */ +static bool is_power_tunable_attr(const char *name) +{ + static const char * const power_tunable_attrs[] = { + ATTR_PPT_PL1_SPL, ATTR_PPT_PL2_SPPT, + ATTR_PPT_PL3_FPPT, ATTR_PPT_APU_SPPT, + ATTR_PPT_PLATFORM_SPPT, ATTR_NV_DYNAMIC_BOOST, + ATTR_NV_TEMP_TARGET, ATTR_NV_BASE_TGP, + ATTR_NV_TGP + }; + + for (unsigned int i = 0; i < ARRAY_SIZE(power_tunable_attrs); i++) { + if (!strcmp(name, power_tunable_attrs[i])) + return true; + } + + return false; +} + +/** + * has_valid_limit - Checks if a power-related attribute has a valid limit value + * @name: The name of the attribute to check + * @limits: Pointer to the power_limits structure containing limit values + * + * This function checks if a power-related attribute has a valid limit value. + * It returns false if limits is NULL or if the corresponding limit value is zero. + * + * Return: true if the attribute has a valid limit value, false otherwise + */ +static bool has_valid_limit(const char *name, const struct power_limits *limits) +{ + u32 limit_value = 0; + + if (!limits) + return false; + + if (!strcmp(name, ATTR_PPT_PL1_SPL)) + limit_value = limits->ppt_pl1_spl_max; + else if (!strcmp(name, ATTR_PPT_PL2_SPPT)) + limit_value = limits->ppt_pl2_sppt_max; + else if (!strcmp(name, ATTR_PPT_PL3_FPPT)) + limit_value = limits->ppt_pl3_fppt_max; + else if (!strcmp(name, ATTR_PPT_APU_SPPT)) + limit_value = limits->ppt_apu_sppt_max; + else if (!strcmp(name, ATTR_PPT_PLATFORM_SPPT)) + limit_value = limits->ppt_platform_sppt_max; + else if (!strcmp(name, ATTR_NV_DYNAMIC_BOOST)) + limit_value = limits->nv_dynamic_boost_max; + else if (!strcmp(name, ATTR_NV_TEMP_TARGET)) + limit_value = limits->nv_temp_target_max; + else if (!strcmp(name, ATTR_NV_BASE_TGP) || + !strcmp(name, ATTR_NV_TGP)) + limit_value = limits->nv_tgp_max; + + return limit_value > 0; +} + +static int asus_fw_attr_add(void) +{ + const struct rog_tunables *const ac_rog_tunables = + asus_armoury.rog_tunables[ASUS_ROG_TUNABLE_AC]; + const struct power_limits *limits; + bool should_create; + const char *name; + int err, i; + + asus_armoury.fw_attr_dev = device_create(&firmware_attributes_class, NULL, MKDEV(0, 0), + NULL, "%s", DRIVER_NAME); + if (IS_ERR(asus_armoury.fw_attr_dev)) { + err = PTR_ERR(asus_armoury.fw_attr_dev); + goto fail_class_get; + } + + asus_armoury.fw_attr_kset = kset_create_and_add("attributes", NULL, + &asus_armoury.fw_attr_dev->kobj); + if (!asus_armoury.fw_attr_kset) { + err = -ENOMEM; + goto err_destroy_classdev; + } + + err = sysfs_create_file(&asus_armoury.fw_attr_kset->kobj, &pending_reboot.attr); + if (err) { + pr_err("Failed to create sysfs level attributes\n"); + goto err_destroy_kset; + } + + asus_armoury.mini_led_dev_id = 0; + if (armoury_has_devstate(ASUS_WMI_DEVID_MINI_LED_MODE)) + asus_armoury.mini_led_dev_id = ASUS_WMI_DEVID_MINI_LED_MODE; + else if (armoury_has_devstate(ASUS_WMI_DEVID_MINI_LED_MODE2)) + asus_armoury.mini_led_dev_id = ASUS_WMI_DEVID_MINI_LED_MODE2; + + if (asus_armoury.mini_led_dev_id) { + err = sysfs_create_group(&asus_armoury.fw_attr_kset->kobj, + &mini_led_mode_attr_group); + if (err) { + pr_err("Failed to create sysfs-group for mini_led\n"); + goto err_remove_file; + } + } + + asus_armoury.gpu_mux_dev_id = 0; + if (armoury_has_devstate(ASUS_WMI_DEVID_GPU_MUX)) + asus_armoury.gpu_mux_dev_id = ASUS_WMI_DEVID_GPU_MUX; + else if (armoury_has_devstate(ASUS_WMI_DEVID_GPU_MUX_VIVO)) + asus_armoury.gpu_mux_dev_id = ASUS_WMI_DEVID_GPU_MUX_VIVO; + + if (asus_armoury.gpu_mux_dev_id) { + err = sysfs_create_group(&asus_armoury.fw_attr_kset->kobj, + &gpu_mux_mode_attr_group); + if (err) { + pr_err("Failed to create sysfs-group for gpu_mux\n"); + goto err_remove_mini_led_group; + } + } + + for (i = 0; i < ARRAY_SIZE(armoury_attr_groups); i++) { + if (!armoury_has_devstate(armoury_attr_groups[i].wmi_devid)) + continue; + + /* Always create by default, unless PPT is not present */ + should_create = true; + name = armoury_attr_groups[i].attr_group->name; + + /* Check if this is a power-related tunable requiring limits */ + if (ac_rog_tunables && ac_rog_tunables->power_limits && + is_power_tunable_attr(name)) { + limits = ac_rog_tunables->power_limits; + /* Check only AC: if not present then DC won't be either */ + should_create = has_valid_limit(name, limits); + if (!should_create) + pr_debug("Missing max value for tunable %s\n", name); + } + + if (should_create) { + err = sysfs_create_group(&asus_armoury.fw_attr_kset->kobj, + armoury_attr_groups[i].attr_group); + if (err) { + pr_err("Failed to create sysfs-group for %s\n", + armoury_attr_groups[i].attr_group->name); + goto err_remove_groups; + } + } + } + + return 0; + +err_remove_groups: + while (i--) { + if (armoury_has_devstate(armoury_attr_groups[i].wmi_devid)) + sysfs_remove_group(&asus_armoury.fw_attr_kset->kobj, + armoury_attr_groups[i].attr_group); + } + if (asus_armoury.gpu_mux_dev_id) + sysfs_remove_group(&asus_armoury.fw_attr_kset->kobj, &gpu_mux_mode_attr_group); +err_remove_mini_led_group: + if (asus_armoury.mini_led_dev_id) + sysfs_remove_group(&asus_armoury.fw_attr_kset->kobj, &mini_led_mode_attr_group); +err_remove_file: + sysfs_remove_file(&asus_armoury.fw_attr_kset->kobj, &pending_reboot.attr); +err_destroy_kset: + kset_unregister(asus_armoury.fw_attr_kset); +err_destroy_classdev: +fail_class_get: + device_destroy(&firmware_attributes_class, MKDEV(0, 0)); + return err; +} + +/* Init / exit ****************************************************************/ + +/* Set up the min/max and defaults for ROG tunables */ +static void init_rog_tunables(void) +{ + const struct power_limits *ac_limits, *dc_limits; + struct rog_tunables *ac_rog_tunables = NULL, *dc_rog_tunables = NULL; + const struct power_data *power_data; + const struct dmi_system_id *dmi_id; + + /* Match the system against the power_limits table */ + dmi_id = dmi_first_match(power_limits); + if (!dmi_id) { + pr_warn("No matching power limits found for this system\n"); + return; + } + + /* Get the power data for this system */ + power_data = dmi_id->driver_data; + if (!power_data) { + pr_info("No power data available for this system\n"); + return; + } + + /* Initialize AC power tunables */ + ac_limits = power_data->ac_data; + if (ac_limits) { + ac_rog_tunables = kzalloc(sizeof(*asus_armoury.rog_tunables[ASUS_ROG_TUNABLE_AC]), + GFP_KERNEL); + if (!ac_rog_tunables) + goto err_nomem; + + asus_armoury.rog_tunables[ASUS_ROG_TUNABLE_AC] = ac_rog_tunables; + ac_rog_tunables->power_limits = ac_limits; + + /* Set initial AC values */ + ac_rog_tunables->ppt_pl1_spl = + ac_limits->ppt_pl1_spl_def ? + ac_limits->ppt_pl1_spl_def : + ac_limits->ppt_pl1_spl_max; + + ac_rog_tunables->ppt_pl2_sppt = + ac_limits->ppt_pl2_sppt_def ? + ac_limits->ppt_pl2_sppt_def : + ac_limits->ppt_pl2_sppt_max; + + ac_rog_tunables->ppt_pl3_fppt = + ac_limits->ppt_pl3_fppt_def ? + ac_limits->ppt_pl3_fppt_def : + ac_limits->ppt_pl3_fppt_max; + + ac_rog_tunables->ppt_apu_sppt = + ac_limits->ppt_apu_sppt_def ? + ac_limits->ppt_apu_sppt_def : + ac_limits->ppt_apu_sppt_max; + + ac_rog_tunables->ppt_platform_sppt = + ac_limits->ppt_platform_sppt_def ? + ac_limits->ppt_platform_sppt_def : + ac_limits->ppt_platform_sppt_max; + + ac_rog_tunables->nv_dynamic_boost = + ac_limits->nv_dynamic_boost_max; + ac_rog_tunables->nv_temp_target = + ac_limits->nv_temp_target_max; + ac_rog_tunables->nv_tgp = ac_limits->nv_tgp_max; + + pr_debug("AC power limits initialized for %s\n", dmi_id->matches[0].substr); + } else { + pr_debug("No AC PPT limits defined\n"); + } + + /* Initialize DC power tunables */ + dc_limits = power_data->dc_data; + if (dc_limits) { + dc_rog_tunables = kzalloc(sizeof(*asus_armoury.rog_tunables[ASUS_ROG_TUNABLE_DC]), + GFP_KERNEL); + if (!dc_rog_tunables) { + kfree(ac_rog_tunables); + goto err_nomem; + } + + asus_armoury.rog_tunables[ASUS_ROG_TUNABLE_DC] = dc_rog_tunables; + dc_rog_tunables->power_limits = dc_limits; + + /* Set initial DC values */ + dc_rog_tunables->ppt_pl1_spl = + dc_limits->ppt_pl1_spl_def ? + dc_limits->ppt_pl1_spl_def : + dc_limits->ppt_pl1_spl_max; + + dc_rog_tunables->ppt_pl2_sppt = + dc_limits->ppt_pl2_sppt_def ? + dc_limits->ppt_pl2_sppt_def : + dc_limits->ppt_pl2_sppt_max; + + dc_rog_tunables->ppt_pl3_fppt = + dc_limits->ppt_pl3_fppt_def ? + dc_limits->ppt_pl3_fppt_def : + dc_limits->ppt_pl3_fppt_max; + + dc_rog_tunables->ppt_apu_sppt = + dc_limits->ppt_apu_sppt_def ? + dc_limits->ppt_apu_sppt_def : + dc_limits->ppt_apu_sppt_max; + + dc_rog_tunables->ppt_platform_sppt = + dc_limits->ppt_platform_sppt_def ? + dc_limits->ppt_platform_sppt_def : + dc_limits->ppt_platform_sppt_max; + + dc_rog_tunables->nv_dynamic_boost = + dc_limits->nv_dynamic_boost_max; + dc_rog_tunables->nv_temp_target = + dc_limits->nv_temp_target_max; + dc_rog_tunables->nv_tgp = dc_limits->nv_tgp_max; + + pr_debug("DC power limits initialized for %s\n", dmi_id->matches[0].substr); + } else { + pr_debug("No DC PPT limits defined\n"); + } + + return; + +err_nomem: + pr_err("Failed to allocate memory for tunables\n"); +} + +static int __init asus_fw_init(void) +{ + char *wmi_uid; + + wmi_uid = wmi_get_acpi_device_uid(ASUS_WMI_MGMT_GUID); + if (!wmi_uid) + return -ENODEV; + + /* + * if equal to "ASUSWMI" then it's DCTS that can't be used for this + * driver, DSTS is required. + */ + if (!strcmp(wmi_uid, ASUS_ACPI_UID_ASUSWMI)) + return -ENODEV; + + init_rog_tunables(); + + /* Must always be last step to ensure data is available */ + return asus_fw_attr_add(); +} + +static void __exit asus_fw_exit(void) +{ + int i; + + for (i = ARRAY_SIZE(armoury_attr_groups) - 1; i >= 0; i--) { + if (armoury_has_devstate(armoury_attr_groups[i].wmi_devid)) + sysfs_remove_group(&asus_armoury.fw_attr_kset->kobj, + armoury_attr_groups[i].attr_group); + } + + if (asus_armoury.gpu_mux_dev_id) + sysfs_remove_group(&asus_armoury.fw_attr_kset->kobj, &gpu_mux_mode_attr_group); + + if (asus_armoury.mini_led_dev_id) + sysfs_remove_group(&asus_armoury.fw_attr_kset->kobj, &mini_led_mode_attr_group); + + sysfs_remove_file(&asus_armoury.fw_attr_kset->kobj, &pending_reboot.attr); + kset_unregister(asus_armoury.fw_attr_kset); + device_destroy(&firmware_attributes_class, MKDEV(0, 0)); + + kfree(asus_armoury.rog_tunables[ASUS_ROG_TUNABLE_AC]); + kfree(asus_armoury.rog_tunables[ASUS_ROG_TUNABLE_DC]); +} + +module_init(asus_fw_init); +module_exit(asus_fw_exit); + +MODULE_IMPORT_NS("ASUS_WMI"); +MODULE_AUTHOR("Luke Jones <luke@ljones.dev>"); +MODULE_DESCRIPTION("ASUS BIOS Configuration Driver"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("wmi:" ASUS_NB_WMI_EVENT_GUID); diff --git a/drivers/platform/x86/asus-armoury.h b/drivers/platform/x86/asus-armoury.h new file mode 100644 index 000000000000..a1bb2005c3f3 --- /dev/null +++ b/drivers/platform/x86/asus-armoury.h @@ -0,0 +1,1541 @@ +/* SPDX-License-Identifier: GPL-2.0 + * + * Definitions for kernel modules using asus-armoury driver + * + * Copyright (c) 2024 Luke Jones <luke@ljones.dev> + */ + +#ifndef _ASUS_ARMOURY_H_ +#define _ASUS_ARMOURY_H_ + +#include <linux/dmi.h> +#include <linux/platform_device.h> +#include <linux/sysfs.h> +#include <linux/types.h> + +#define DRIVER_NAME "asus-armoury" + +/** + * armoury_attr_uint_store() - Send an uint to WMI method if within min/max. + * @kobj: Pointer to the driver object. + * @attr: Pointer to the attribute calling this function. + * @buf: The buffer to read from, this is parsed to `uint` type. + * @count: Required by sysfs attribute macros, pass in from the callee attr. + * @min: Minimum accepted value. Below this returns -EINVAL. + * @max: Maximum accepted value. Above this returns -EINVAL. + * @store_value: Pointer to where the parsed value should be stored. + * @wmi_dev: The WMI function ID to use. + * + * This function is intended to be generic so it can be called from any "_store" + * attribute which works only with integers. + * + * Integers to be sent to the WMI method is inclusive range checked and + * an error returned if out of range. + * + * If the value is valid and WMI is success then the sysfs attribute is notified + * and if asus_bios_requires_reboot() is true then reboot attribute + * is also notified. + * + * Returns: Either count, or an error. + */ +ssize_t armoury_attr_uint_store(struct kobject *kobj, struct kobj_attribute *attr, + const char *buf, size_t count, u32 min, u32 max, + u32 *store_value, u32 wmi_dev); + +/** + * armoury_attr_uint_show() - Receive an uint from a WMI method. + * @kobj: Pointer to the driver object. + * @attr: Pointer to the attribute calling this function. + * @buf: The buffer to write to, as an `uint` type. + * @wmi_dev: The WMI function ID to use. + * + * This function is intended to be generic so it can be called from any "_show" + * attribute which works only with integers. + * + * Returns: Either count, or an error. + */ +ssize_t armoury_attr_uint_show(struct kobject *kobj, struct kobj_attribute *attr, + char *buf, u32 wmi_dev); + +#define __ASUS_ATTR_RO(_func, _name) \ + { \ + .attr = { .name = __stringify(_name), .mode = 0444 }, \ + .show = _func##_##_name##_show, \ + } + +#define __ASUS_ATTR_RO_AS(_name, _show) \ + { \ + .attr = { .name = __stringify(_name), .mode = 0444 }, \ + .show = _show, \ + } + +#define __ASUS_ATTR_RW(_func, _name) \ + __ATTR(_name, 0644, _func##_##_name##_show, _func##_##_name##_store) + +#define __WMI_STORE_INT(_attr, _min, _max, _wmi) \ + static ssize_t _attr##_store(struct kobject *kobj, \ + struct kobj_attribute *attr, \ + const char *buf, size_t count) \ + { \ + return armoury_attr_uint_store(kobj, attr, buf, count, _min, \ + _max, NULL, _wmi); \ + } + +#define ASUS_WMI_SHOW_INT(_attr, _wmi) \ + static ssize_t _attr##_show(struct kobject *kobj, \ + struct kobj_attribute *attr, char *buf) \ + { \ + return armoury_attr_uint_show(kobj, attr, buf, _wmi); \ + } + +/* Create functions and attributes for use in other macros or on their own */ + +/* Shows a formatted static variable */ +#define __ATTR_SHOW_FMT(_prop, _attrname, _fmt, _val) \ + static ssize_t _attrname##_##_prop##_show( \ + struct kobject *kobj, struct kobj_attribute *attr, char *buf) \ + { \ + return sysfs_emit(buf, _fmt, _val); \ + } \ + static struct kobj_attribute attr_##_attrname##_##_prop = \ + __ASUS_ATTR_RO(_attrname, _prop) + +#define __ATTR_RO_INT_GROUP_ENUM(_attrname, _wmi, _fsname, _possible, _dispname)\ + ASUS_WMI_SHOW_INT(_attrname##_current_value, _wmi); \ + static struct kobj_attribute attr_##_attrname##_current_value = \ + __ASUS_ATTR_RO(_attrname, current_value); \ + __ATTR_SHOW_FMT(display_name, _attrname, "%s\n", _dispname); \ + __ATTR_SHOW_FMT(possible_values, _attrname, "%s\n", _possible); \ + static struct kobj_attribute attr_##_attrname##_type = \ + __ASUS_ATTR_RO_AS(type, enum_type_show); \ + static struct attribute *_attrname##_attrs[] = { \ + &attr_##_attrname##_current_value.attr, \ + &attr_##_attrname##_display_name.attr, \ + &attr_##_attrname##_possible_values.attr, \ + &attr_##_attrname##_type.attr, \ + NULL \ + }; \ + static const struct attribute_group _attrname##_attr_group = { \ + .name = _fsname, .attrs = _attrname##_attrs \ + } + +#define __ATTR_RW_INT_GROUP_ENUM(_attrname, _minv, _maxv, _wmi, _fsname,\ + _possible, _dispname) \ + __WMI_STORE_INT(_attrname##_current_value, _minv, _maxv, _wmi); \ + ASUS_WMI_SHOW_INT(_attrname##_current_value, _wmi); \ + static struct kobj_attribute attr_##_attrname##_current_value = \ + __ASUS_ATTR_RW(_attrname, current_value); \ + __ATTR_SHOW_FMT(display_name, _attrname, "%s\n", _dispname); \ + __ATTR_SHOW_FMT(possible_values, _attrname, "%s\n", _possible); \ + static struct kobj_attribute attr_##_attrname##_type = \ + __ASUS_ATTR_RO_AS(type, enum_type_show); \ + static struct attribute *_attrname##_attrs[] = { \ + &attr_##_attrname##_current_value.attr, \ + &attr_##_attrname##_display_name.attr, \ + &attr_##_attrname##_possible_values.attr, \ + &attr_##_attrname##_type.attr, \ + NULL \ + }; \ + static const struct attribute_group _attrname##_attr_group = { \ + .name = _fsname, .attrs = _attrname##_attrs \ + } + +/* Boolean style enumeration, base macro. Requires adding show/store */ +#define __ATTR_GROUP_ENUM(_attrname, _fsname, _possible, _dispname) \ + __ATTR_SHOW_FMT(display_name, _attrname, "%s\n", _dispname); \ + __ATTR_SHOW_FMT(possible_values, _attrname, "%s\n", _possible); \ + static struct kobj_attribute attr_##_attrname##_type = \ + __ASUS_ATTR_RO_AS(type, enum_type_show); \ + static struct attribute *_attrname##_attrs[] = { \ + &attr_##_attrname##_current_value.attr, \ + &attr_##_attrname##_display_name.attr, \ + &attr_##_attrname##_possible_values.attr, \ + &attr_##_attrname##_type.attr, \ + NULL \ + }; \ + static const struct attribute_group _attrname##_attr_group = { \ + .name = _fsname, .attrs = _attrname##_attrs \ + } + +#define ASUS_ATTR_GROUP_BOOL_RO(_attrname, _fsname, _wmi, _dispname) \ + __ATTR_RO_INT_GROUP_ENUM(_attrname, _wmi, _fsname, "0;1", _dispname) + + +#define ASUS_ATTR_GROUP_BOOL_RW(_attrname, _fsname, _wmi, _dispname) \ + __ATTR_RW_INT_GROUP_ENUM(_attrname, 0, 1, _wmi, _fsname, "0;1", _dispname) + +#define ASUS_ATTR_GROUP_ENUM_INT_RO(_attrname, _fsname, _wmi, _possible, _dispname) \ + __ATTR_RO_INT_GROUP_ENUM(_attrname, _wmi, _fsname, _possible, _dispname) + +/* + * Requires <name>_current_value_show(), <name>_current_value_show() + */ +#define ASUS_ATTR_GROUP_BOOL(_attrname, _fsname, _dispname) \ + static struct kobj_attribute attr_##_attrname##_current_value = \ + __ASUS_ATTR_RW(_attrname, current_value); \ + __ATTR_GROUP_ENUM(_attrname, _fsname, "0;1", _dispname) + +/* + * Requires <name>_current_value_show(), <name>_current_value_show() + * and <name>_possible_values_show() + */ +#define ASUS_ATTR_GROUP_ENUM(_attrname, _fsname, _dispname) \ + __ATTR_SHOW_FMT(display_name, _attrname, "%s\n", _dispname); \ + static struct kobj_attribute attr_##_attrname##_current_value = \ + __ASUS_ATTR_RW(_attrname, current_value); \ + static struct kobj_attribute attr_##_attrname##_possible_values = \ + __ASUS_ATTR_RO(_attrname, possible_values); \ + static struct kobj_attribute attr_##_attrname##_type = \ + __ASUS_ATTR_RO_AS(type, enum_type_show); \ + static struct attribute *_attrname##_attrs[] = { \ + &attr_##_attrname##_current_value.attr, \ + &attr_##_attrname##_display_name.attr, \ + &attr_##_attrname##_possible_values.attr, \ + &attr_##_attrname##_type.attr, \ + NULL \ + }; \ + static const struct attribute_group _attrname##_attr_group = { \ + .name = _fsname, .attrs = _attrname##_attrs \ + } + +#define ASUS_ATTR_GROUP_INT_VALUE_ONLY_RO(_attrname, _fsname, _wmi, _dispname) \ + ASUS_WMI_SHOW_INT(_attrname##_current_value, _wmi); \ + static struct kobj_attribute attr_##_attrname##_current_value = \ + __ASUS_ATTR_RO(_attrname, current_value); \ + __ATTR_SHOW_FMT(display_name, _attrname, "%s\n", _dispname); \ + static struct kobj_attribute attr_##_attrname##_type = \ + __ASUS_ATTR_RO_AS(type, int_type_show); \ + static struct attribute *_attrname##_attrs[] = { \ + &attr_##_attrname##_current_value.attr, \ + &attr_##_attrname##_display_name.attr, \ + &attr_##_attrname##_type.attr, NULL \ + }; \ + static const struct attribute_group _attrname##_attr_group = { \ + .name = _fsname, .attrs = _attrname##_attrs \ + } + +/* + * ROG PPT attributes need a little different in setup as they + * require rog_tunables members. + */ + +#define __ROG_TUNABLE_SHOW(_prop, _attrname, _val) \ + static ssize_t _attrname##_##_prop##_show( \ + struct kobject *kobj, struct kobj_attribute *attr, char *buf) \ + { \ + struct rog_tunables *tunables = get_current_tunables(); \ + \ + if (!tunables || !tunables->power_limits) \ + return -ENODEV; \ + \ + return sysfs_emit(buf, "%d\n", tunables->power_limits->_val); \ + } \ + static struct kobj_attribute attr_##_attrname##_##_prop = \ + __ASUS_ATTR_RO(_attrname, _prop) + +#define __ROG_TUNABLE_SHOW_DEFAULT(_attrname) \ + static ssize_t _attrname##_default_value_show( \ + struct kobject *kobj, struct kobj_attribute *attr, char *buf) \ + { \ + struct rog_tunables *tunables = get_current_tunables(); \ + \ + if (!tunables || !tunables->power_limits) \ + return -ENODEV; \ + \ + return sysfs_emit( \ + buf, "%d\n", \ + tunables->power_limits->_attrname##_def ? \ + tunables->power_limits->_attrname##_def : \ + tunables->power_limits->_attrname##_max); \ + } \ + static struct kobj_attribute attr_##_attrname##_default_value = \ + __ASUS_ATTR_RO(_attrname, default_value) + +#define __ROG_TUNABLE_RW(_attr, _wmi) \ + static ssize_t _attr##_current_value_store( \ + struct kobject *kobj, struct kobj_attribute *attr, \ + const char *buf, size_t count) \ + { \ + struct rog_tunables *tunables = get_current_tunables(); \ + \ + if (!tunables || !tunables->power_limits) \ + return -ENODEV; \ + \ + if (tunables->power_limits->_attr##_min == \ + tunables->power_limits->_attr##_max) \ + return -EINVAL; \ + \ + return armoury_attr_uint_store(kobj, attr, buf, count, \ + tunables->power_limits->_attr##_min, \ + tunables->power_limits->_attr##_max, \ + &tunables->_attr, _wmi); \ + } \ + static ssize_t _attr##_current_value_show( \ + struct kobject *kobj, struct kobj_attribute *attr, char *buf) \ + { \ + struct rog_tunables *tunables = get_current_tunables(); \ + \ + if (!tunables) \ + return -ENODEV; \ + \ + return sysfs_emit(buf, "%u\n", tunables->_attr); \ + } \ + static struct kobj_attribute attr_##_attr##_current_value = \ + __ASUS_ATTR_RW(_attr, current_value) + +#define ASUS_ATTR_GROUP_ROG_TUNABLE(_attrname, _fsname, _wmi, _dispname) \ + __ROG_TUNABLE_RW(_attrname, _wmi); \ + __ROG_TUNABLE_SHOW_DEFAULT(_attrname); \ + __ROG_TUNABLE_SHOW(min_value, _attrname, _attrname##_min); \ + __ROG_TUNABLE_SHOW(max_value, _attrname, _attrname##_max); \ + __ATTR_SHOW_FMT(scalar_increment, _attrname, "%d\n", 1); \ + __ATTR_SHOW_FMT(display_name, _attrname, "%s\n", _dispname); \ + static struct kobj_attribute attr_##_attrname##_type = \ + __ASUS_ATTR_RO_AS(type, int_type_show); \ + static struct attribute *_attrname##_attrs[] = { \ + &attr_##_attrname##_current_value.attr, \ + &attr_##_attrname##_default_value.attr, \ + &attr_##_attrname##_min_value.attr, \ + &attr_##_attrname##_max_value.attr, \ + &attr_##_attrname##_scalar_increment.attr, \ + &attr_##_attrname##_display_name.attr, \ + &attr_##_attrname##_type.attr, \ + NULL \ + }; \ + static const struct attribute_group _attrname##_attr_group = { \ + .name = _fsname, .attrs = _attrname##_attrs \ + } + +/* Default is always the maximum value unless *_def is specified */ +struct power_limits { + u8 ppt_pl1_spl_min; + u8 ppt_pl1_spl_def; + u8 ppt_pl1_spl_max; + u8 ppt_pl2_sppt_min; + u8 ppt_pl2_sppt_def; + u8 ppt_pl2_sppt_max; + u8 ppt_pl3_fppt_min; + u8 ppt_pl3_fppt_def; + u8 ppt_pl3_fppt_max; + u8 ppt_apu_sppt_min; + u8 ppt_apu_sppt_def; + u8 ppt_apu_sppt_max; + u8 ppt_platform_sppt_min; + u8 ppt_platform_sppt_def; + u8 ppt_platform_sppt_max; + /* Nvidia GPU specific, default is always max */ + u8 nv_dynamic_boost_def; // unused. exists for macro + u8 nv_dynamic_boost_min; + u8 nv_dynamic_boost_max; + u8 nv_temp_target_def; // unused. exists for macro + u8 nv_temp_target_min; + u8 nv_temp_target_max; + u8 nv_tgp_def; // unused. exists for macro + u8 nv_tgp_min; + u8 nv_tgp_max; +}; + +struct power_data { + const struct power_limits *ac_data; + const struct power_limits *dc_data; + bool requires_fan_curve; +}; + +/* + * For each available attribute there must be a min and a max. + * _def is not required and will be assumed to be default == max if missing. + */ +static const struct dmi_system_id power_limits[] = { + { + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "FA401W"), + }, + .driver_data = &(struct power_data) { + .ac_data = &(struct power_limits) { + .ppt_pl1_spl_min = 15, + .ppt_pl1_spl_max = 80, + .ppt_pl2_sppt_min = 35, + .ppt_pl2_sppt_max = 80, + .ppt_pl3_fppt_min = 35, + .ppt_pl3_fppt_max = 80, + .nv_dynamic_boost_min = 5, + .nv_dynamic_boost_max = 25, + .nv_temp_target_min = 75, + .nv_temp_target_max = 87, + .nv_tgp_min = 55, + .nv_tgp_max = 75, + }, + .dc_data = &(struct power_limits) { + .ppt_pl1_spl_min = 25, + .ppt_pl1_spl_max = 30, + .ppt_pl2_sppt_min = 31, + .ppt_pl2_sppt_max = 44, + .ppt_pl3_fppt_min = 45, + .ppt_pl3_fppt_max = 65, + .nv_temp_target_min = 75, + .nv_temp_target_max = 87, + }, + }, + }, + { + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "FA507N"), + }, + .driver_data = &(struct power_data) { + .ac_data = &(struct power_limits) { + .ppt_pl1_spl_min = 15, + .ppt_pl1_spl_max = 80, + .ppt_pl2_sppt_min = 35, + .ppt_pl2_sppt_max = 80, + .ppt_pl3_fppt_min = 35, + .ppt_pl3_fppt_max = 80, + .nv_dynamic_boost_min = 5, + .nv_dynamic_boost_max = 25, + .nv_temp_target_min = 75, + .nv_temp_target_max = 87, + }, + .dc_data = &(struct power_limits) { + .ppt_pl1_spl_min = 15, + .ppt_pl1_spl_def = 45, + .ppt_pl1_spl_max = 65, + .ppt_pl2_sppt_min = 35, + .ppt_pl2_sppt_def = 54, + .ppt_pl2_sppt_max = 65, + .ppt_pl3_fppt_min = 35, + .ppt_pl3_fppt_max = 65, + .nv_temp_target_min = 75, + .nv_temp_target_max = 87, + }, + }, + }, + { + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "FA507UV"), + }, + .driver_data = &(struct power_data) { + .ac_data = &(struct power_limits) { + .ppt_pl1_spl_min = 15, + .ppt_pl1_spl_max = 80, + .ppt_pl2_sppt_min = 35, + .ppt_pl2_sppt_max = 80, + .ppt_pl3_fppt_min = 35, + .ppt_pl3_fppt_max = 80, + .nv_dynamic_boost_min = 5, + .nv_dynamic_boost_max = 25, + .nv_temp_target_min = 75, + .nv_temp_target_max = 87, + .nv_tgp_min = 55, + .nv_tgp_max = 115, + }, + .dc_data = &(struct power_limits) { + .ppt_pl1_spl_min = 15, + .ppt_pl1_spl_def = 45, + .ppt_pl1_spl_max = 65, + .ppt_pl2_sppt_min = 35, + .ppt_pl2_sppt_def = 54, + .ppt_pl2_sppt_max = 65, + .ppt_pl3_fppt_min = 35, + .ppt_pl3_fppt_max = 65, + .nv_temp_target_min = 75, + .nv_temp_target_max = 87, + }, + }, + }, + { + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "FA507R"), + }, + .driver_data = &(struct power_data) { + .ac_data = &(struct power_limits) { + .ppt_pl1_spl_min = 15, + .ppt_pl1_spl_max = 80, + .ppt_pl2_sppt_min = 25, + .ppt_pl2_sppt_max = 80, + .ppt_pl3_fppt_min = 35, + .ppt_pl3_fppt_max = 80 + }, + .dc_data = NULL, + }, + }, + { + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "FA507X"), + }, + .driver_data = &(struct power_data) { + .ac_data = &(struct power_limits) { + .ppt_pl1_spl_min = 15, + .ppt_pl1_spl_max = 80, + .ppt_pl2_sppt_min = 35, + .ppt_pl2_sppt_max = 80, + .ppt_pl3_fppt_min = 35, + .ppt_pl3_fppt_max = 80, + .nv_dynamic_boost_min = 5, + .nv_dynamic_boost_max = 20, + .nv_temp_target_min = 75, + .nv_temp_target_max = 87, + .nv_tgp_min = 55, + .nv_tgp_max = 85, + }, + .dc_data = &(struct power_limits) { + .ppt_pl1_spl_min = 15, + .ppt_pl1_spl_def = 45, + .ppt_pl1_spl_max = 65, + .ppt_pl2_sppt_min = 35, + .ppt_pl2_sppt_def = 54, + .ppt_pl2_sppt_max = 65, + .ppt_pl3_fppt_min = 35, + .ppt_pl3_fppt_max = 65, + .nv_temp_target_min = 75, + .nv_temp_target_max = 87, + }, + }, + }, + { + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "FA507Z"), + }, + .driver_data = &(struct power_data) { + .ac_data = &(struct power_limits) { + .ppt_pl1_spl_min = 28, + .ppt_pl1_spl_max = 65, + .ppt_pl2_sppt_min = 28, + .ppt_pl2_sppt_max = 105, + .nv_dynamic_boost_min = 5, + .nv_dynamic_boost_max = 15, + .nv_temp_target_min = 75, + .nv_temp_target_max = 87, + .nv_tgp_min = 55, + .nv_tgp_max = 85, + }, + .dc_data = &(struct power_limits) { + .ppt_pl1_spl_min = 25, + .ppt_pl1_spl_max = 45, + .ppt_pl2_sppt_min = 35, + .ppt_pl2_sppt_max = 60, + .nv_temp_target_min = 75, + .nv_temp_target_max = 87, + }, + }, + }, + { + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "FA607P"), + }, + .driver_data = &(struct power_data) { + .ac_data = &(struct power_limits) { + .ppt_pl1_spl_min = 30, + .ppt_pl1_spl_def = 100, + .ppt_pl1_spl_max = 135, + .ppt_pl2_sppt_min = 30, + .ppt_pl2_sppt_def = 115, + .ppt_pl2_sppt_max = 135, + .ppt_pl3_fppt_min = 30, + .ppt_pl3_fppt_max = 135, + .nv_dynamic_boost_min = 5, + .nv_dynamic_boost_max = 25, + .nv_temp_target_min = 75, + .nv_temp_target_max = 87, + .nv_tgp_min = 55, + .nv_tgp_max = 115, + }, + .dc_data = &(struct power_limits) { + .ppt_pl1_spl_min = 25, + .ppt_pl1_spl_def = 45, + .ppt_pl1_spl_max = 80, + .ppt_pl2_sppt_min = 25, + .ppt_pl2_sppt_def = 60, + .ppt_pl2_sppt_max = 80, + .ppt_pl3_fppt_min = 25, + .ppt_pl3_fppt_max = 80, + .nv_temp_target_min = 75, + .nv_temp_target_max = 87, + }, + }, + }, + { + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "FA608WI"), + }, + .driver_data = &(struct power_data) { + .ac_data = &(struct power_limits) { + .ppt_pl1_spl_min = 15, + .ppt_pl1_spl_def = 90, + .ppt_pl1_spl_max = 90, + .ppt_pl2_sppt_min = 35, + .ppt_pl2_sppt_def = 90, + .ppt_pl2_sppt_max = 90, + .ppt_pl3_fppt_min = 35, + .ppt_pl3_fppt_def = 90, + .ppt_pl3_fppt_max = 90, + .nv_dynamic_boost_min = 5, + .nv_dynamic_boost_max = 25, + .nv_temp_target_min = 75, + .nv_temp_target_max = 87, + .nv_tgp_min = 55, + .nv_tgp_max = 115, + }, + .dc_data = &(struct power_limits) { + .ppt_pl1_spl_min = 15, + .ppt_pl1_spl_def = 45, + .ppt_pl1_spl_max = 65, + .ppt_pl2_sppt_min = 35, + .ppt_pl2_sppt_def = 54, + .ppt_pl2_sppt_max = 65, + .ppt_pl3_fppt_min = 35, + .ppt_pl3_fppt_def = 65, + .ppt_pl3_fppt_max = 65, + .nv_temp_target_min = 75, + .nv_temp_target_max = 87, + }, + }, + }, + { + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "FA617NS"), + }, + .driver_data = &(struct power_data) { + .ac_data = &(struct power_limits) { + .ppt_apu_sppt_min = 15, + .ppt_apu_sppt_max = 80, + .ppt_platform_sppt_min = 30, + .ppt_platform_sppt_max = 120, + }, + .dc_data = &(struct power_limits) { + .ppt_apu_sppt_min = 25, + .ppt_apu_sppt_max = 35, + .ppt_platform_sppt_min = 45, + .ppt_platform_sppt_max = 100, + }, + }, + }, + { + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "FA617NT"), + }, + .driver_data = &(struct power_data) { + .ac_data = &(struct power_limits) { + .ppt_apu_sppt_min = 15, + .ppt_apu_sppt_max = 80, + .ppt_platform_sppt_min = 30, + .ppt_platform_sppt_max = 115, + }, + .dc_data = &(struct power_limits) { + .ppt_apu_sppt_min = 15, + .ppt_apu_sppt_max = 45, + .ppt_platform_sppt_min = 30, + .ppt_platform_sppt_max = 50, + }, + }, + }, + { + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "FA617XS"), + }, + .driver_data = &(struct power_data) { + .ac_data = &(struct power_limits) { + .ppt_apu_sppt_min = 15, + .ppt_apu_sppt_max = 80, + .ppt_platform_sppt_min = 30, + .ppt_platform_sppt_max = 120, + .nv_temp_target_min = 75, + .nv_temp_target_max = 87, + }, + .dc_data = &(struct power_limits) { + .ppt_apu_sppt_min = 25, + .ppt_apu_sppt_max = 35, + .ppt_platform_sppt_min = 45, + .ppt_platform_sppt_max = 100, + .nv_temp_target_min = 75, + .nv_temp_target_max = 87, + }, + }, + }, + { + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "FX507VI"), + }, + .driver_data = &(struct power_data) { + .ac_data = &(struct power_limits) { + .ppt_pl1_spl_min = 28, + .ppt_pl1_spl_max = 135, + .ppt_pl2_sppt_min = 28, + .ppt_pl2_sppt_max = 135, + .nv_dynamic_boost_min = 5, + .nv_dynamic_boost_max = 25, + .nv_temp_target_min = 75, + .nv_temp_target_max = 87, + }, + .dc_data = &(struct power_limits) { + .ppt_pl1_spl_min = 25, + .ppt_pl1_spl_max = 45, + .ppt_pl2_sppt_min = 35, + .ppt_pl2_sppt_max = 60, + .nv_temp_target_min = 75, + .nv_temp_target_max = 87, + }, + .requires_fan_curve = true, + }, + }, + { + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "FX507VV"), + }, + .driver_data = &(struct power_data) { + .ac_data = &(struct power_limits) { + .ppt_pl1_spl_min = 28, + .ppt_pl1_spl_def = 115, + .ppt_pl1_spl_max = 135, + .ppt_pl2_sppt_min = 28, + .ppt_pl2_sppt_max = 135, + .nv_dynamic_boost_min = 5, + .nv_dynamic_boost_max = 25, + .nv_temp_target_min = 75, + .nv_temp_target_max = 87, + }, + .dc_data = &(struct power_limits) { + .ppt_pl1_spl_min = 25, + .ppt_pl1_spl_max = 45, + .ppt_pl2_sppt_min = 35, + .ppt_pl2_sppt_max = 60, + .nv_temp_target_min = 75, + .nv_temp_target_max = 87, + }, + .requires_fan_curve = true, + }, + }, + { + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "FX507Z"), + }, + .driver_data = &(struct power_data) { + .ac_data = &(struct power_limits) { + .ppt_pl1_spl_min = 28, + .ppt_pl1_spl_max = 90, + .ppt_pl2_sppt_min = 28, + .ppt_pl2_sppt_max = 135, + .nv_dynamic_boost_min = 5, + .nv_dynamic_boost_max = 15, + }, + .dc_data = &(struct power_limits) { + .ppt_pl1_spl_min = 25, + .ppt_pl1_spl_max = 45, + .ppt_pl2_sppt_min = 35, + .ppt_pl2_sppt_max = 60, + }, + .requires_fan_curve = true, + }, + }, + { + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "GA401Q"), + }, + .driver_data = &(struct power_data) { + .ac_data = &(struct power_limits) { + .ppt_pl1_spl_min = 15, + .ppt_pl1_spl_max = 80, + .ppt_pl2_sppt_min = 15, + .ppt_pl2_sppt_max = 80, + }, + .dc_data = NULL, + }, + }, + { + .matches = { + // This model is full AMD. No Nvidia dGPU. + DMI_MATCH(DMI_BOARD_NAME, "GA402R"), + }, + .driver_data = &(struct power_data) { + .ac_data = &(struct power_limits) { + .ppt_apu_sppt_min = 15, + .ppt_apu_sppt_max = 80, + .ppt_platform_sppt_min = 30, + .ppt_platform_sppt_max = 115, + }, + .dc_data = &(struct power_limits) { + .ppt_apu_sppt_min = 25, + .ppt_apu_sppt_def = 30, + .ppt_apu_sppt_max = 45, + .ppt_platform_sppt_min = 40, + .ppt_platform_sppt_max = 60, + }, + }, + }, + { + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "GA402X"), + }, + .driver_data = &(struct power_data) { + .ac_data = &(struct power_limits) { + .ppt_pl1_spl_min = 15, + .ppt_pl1_spl_def = 35, + .ppt_pl1_spl_max = 80, + .ppt_pl2_sppt_min = 25, + .ppt_pl2_sppt_def = 65, + .ppt_pl2_sppt_max = 80, + .ppt_pl3_fppt_min = 35, + .ppt_pl3_fppt_max = 80, + .nv_temp_target_min = 75, + .nv_temp_target_max = 87, + }, + .dc_data = &(struct power_limits) { + .ppt_pl1_spl_min = 15, + .ppt_pl1_spl_max = 35, + .ppt_pl2_sppt_min = 25, + .ppt_pl2_sppt_max = 35, + .ppt_pl3_fppt_min = 35, + .ppt_pl3_fppt_max = 65, + .nv_temp_target_min = 75, + .nv_temp_target_max = 87, + }, + .requires_fan_curve = true, + }, + }, + { + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "GA403U"), + }, + .driver_data = &(struct power_data) { + .ac_data = &(struct power_limits) { + .ppt_pl1_spl_min = 15, + .ppt_pl1_spl_max = 80, + .ppt_pl2_sppt_min = 25, + .ppt_pl2_sppt_max = 80, + .ppt_pl3_fppt_min = 35, + .ppt_pl3_fppt_max = 80, + .nv_dynamic_boost_min = 5, + .nv_dynamic_boost_max = 25, + .nv_temp_target_min = 75, + .nv_temp_target_max = 87, + .nv_tgp_min = 55, + .nv_tgp_max = 65, + }, + .dc_data = &(struct power_limits) { + .ppt_pl1_spl_min = 15, + .ppt_pl1_spl_max = 35, + .ppt_pl2_sppt_min = 25, + .ppt_pl2_sppt_max = 35, + .ppt_pl3_fppt_min = 35, + .ppt_pl3_fppt_max = 65, + .nv_temp_target_min = 75, + .nv_temp_target_max = 87, + }, + .requires_fan_curve = true, + }, + }, + { + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "GA503QR"), + }, + .driver_data = &(struct power_data) { + .ac_data = &(struct power_limits) { + .ppt_pl1_spl_min = 15, + .ppt_pl1_spl_def = 35, + .ppt_pl1_spl_max = 80, + .ppt_pl2_sppt_min = 65, + .ppt_pl2_sppt_max = 80, + }, + }, + }, + { + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "GA503R"), + }, + .driver_data = &(struct power_data) { + .ac_data = &(struct power_limits) { + .ppt_pl1_spl_min = 15, + .ppt_pl1_spl_def = 35, + .ppt_pl1_spl_max = 80, + .ppt_pl2_sppt_min = 35, + .ppt_pl2_sppt_def = 65, + .ppt_pl2_sppt_max = 80, + .ppt_pl3_fppt_min = 35, + .ppt_pl3_fppt_max = 80, + .nv_dynamic_boost_min = 5, + .nv_dynamic_boost_max = 20, + .nv_temp_target_min = 75, + .nv_temp_target_max = 87, + }, + .dc_data = &(struct power_limits) { + .ppt_pl1_spl_min = 15, + .ppt_pl1_spl_def = 25, + .ppt_pl1_spl_max = 65, + .ppt_pl2_sppt_min = 35, + .ppt_pl2_sppt_def = 54, + .ppt_pl2_sppt_max = 60, + .ppt_pl3_fppt_min = 35, + .ppt_pl3_fppt_max = 65, + }, + }, + }, + { + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "GA605W"), + }, + .driver_data = &(struct power_data) { + .ac_data = &(struct power_limits) { + .ppt_pl1_spl_min = 15, + .ppt_pl1_spl_max = 80, + .ppt_pl2_sppt_min = 35, + .ppt_pl2_sppt_max = 80, + .ppt_pl3_fppt_min = 35, + .ppt_pl3_fppt_max = 80, + .nv_dynamic_boost_min = 5, + .nv_dynamic_boost_max = 20, + .nv_temp_target_min = 75, + .nv_temp_target_max = 87, + .nv_tgp_min = 55, + .nv_tgp_max = 85, + }, + .dc_data = &(struct power_limits) { + .ppt_pl1_spl_min = 25, + .ppt_pl1_spl_max = 35, + .ppt_pl2_sppt_min = 31, + .ppt_pl2_sppt_max = 44, + .ppt_pl3_fppt_min = 45, + .ppt_pl3_fppt_max = 65, + .nv_temp_target_min = 75, + .nv_temp_target_max = 87, + }, + .requires_fan_curve = true, + }, + }, + { + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "GU603Z"), + }, + .driver_data = &(struct power_data) { + .ac_data = &(struct power_limits) { + .ppt_pl1_spl_min = 25, + .ppt_pl1_spl_max = 60, + .ppt_pl2_sppt_min = 25, + .ppt_pl2_sppt_max = 135, + .nv_dynamic_boost_min = 5, + .nv_dynamic_boost_max = 20, + .nv_temp_target_min = 75, + .nv_temp_target_max = 87, + }, + .dc_data = &(struct power_limits) { + .ppt_pl1_spl_min = 25, + .ppt_pl1_spl_max = 40, + .ppt_pl2_sppt_min = 25, + .ppt_pl2_sppt_max = 40, + .nv_temp_target_min = 75, + .nv_temp_target_max = 87, + } + }, + }, + { + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "GU604V"), + }, + .driver_data = &(struct power_data) { + .ac_data = &(struct power_limits) { + .ppt_pl1_spl_min = 65, + .ppt_pl1_spl_max = 120, + .ppt_pl2_sppt_min = 65, + .ppt_pl2_sppt_max = 150, + .nv_dynamic_boost_min = 5, + .nv_dynamic_boost_max = 25, + .nv_temp_target_min = 75, + .nv_temp_target_max = 87, + }, + .dc_data = &(struct power_limits) { + .ppt_pl1_spl_min = 25, + .ppt_pl1_spl_max = 40, + .ppt_pl2_sppt_min = 35, + .ppt_pl2_sppt_def = 40, + .ppt_pl2_sppt_max = 60, + .nv_temp_target_min = 75, + .nv_temp_target_max = 87, + }, + }, + }, + { + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "GU605CW"), + }, + .driver_data = &(struct power_data) { + .ac_data = &(struct power_limits) { + .ppt_pl1_spl_min = 45, + .ppt_pl1_spl_max = 85, + .ppt_pl2_sppt_min = 56, + .ppt_pl2_sppt_max = 110, + .nv_dynamic_boost_min = 5, + .nv_dynamic_boost_max = 20, + .nv_temp_target_min = 75, + .nv_temp_target_max = 87, + .nv_tgp_min = 80, + .nv_tgp_def = 90, + .nv_tgp_max = 110, + }, + .dc_data = &(struct power_limits) { + .ppt_pl1_spl_min = 25, + .ppt_pl1_spl_max = 85, + .ppt_pl2_sppt_min = 32, + .ppt_pl2_sppt_max = 110, + .nv_temp_target_min = 75, + .nv_temp_target_max = 87, + }, + .requires_fan_curve = true, + }, + }, + { + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "GU605CX"), + }, + .driver_data = &(struct power_data) { + .ac_data = &(struct power_limits) { + .ppt_pl1_spl_min = 45, + .ppt_pl1_spl_max = 85, + .ppt_pl2_sppt_min = 56, + .ppt_pl2_sppt_max = 110, + .nv_dynamic_boost_min = 5, + .nv_dynamic_boost_max = 20, + .nv_temp_target_min = 7, + .nv_temp_target_max = 87, + .nv_tgp_min = 95, + .nv_tgp_def = 100, + .nv_tgp_max = 110, + }, + .dc_data = &(struct power_limits) { + .ppt_pl1_spl_min = 25, + .ppt_pl1_spl_max = 85, + .ppt_pl2_sppt_min = 32, + .ppt_pl2_sppt_max = 110, + .nv_temp_target_min = 75, + .nv_temp_target_max = 87, + }, + .requires_fan_curve = true, + }, + }, + { + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "GU605M"), + }, + .driver_data = &(struct power_data) { + .ac_data = &(struct power_limits) { + .ppt_pl1_spl_min = 28, + .ppt_pl1_spl_max = 90, + .ppt_pl2_sppt_min = 28, + .ppt_pl2_sppt_max = 135, + .nv_dynamic_boost_min = 5, + .nv_dynamic_boost_max = 20, + .nv_temp_target_min = 75, + .nv_temp_target_max = 87, + }, + .dc_data = &(struct power_limits) { + .ppt_pl1_spl_min = 25, + .ppt_pl1_spl_max = 35, + .ppt_pl2_sppt_min = 38, + .ppt_pl2_sppt_max = 53, + .nv_temp_target_min = 75, + .nv_temp_target_max = 87, + }, + .requires_fan_curve = true, + }, + }, + { + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "GV301Q"), + }, + .driver_data = &(struct power_data) { + .ac_data = &(struct power_limits) { + .ppt_pl1_spl_min = 15, + .ppt_pl1_spl_max = 45, + .ppt_pl2_sppt_min = 65, + .ppt_pl2_sppt_max = 80, + }, + .dc_data = NULL, + }, + }, + { + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "GV301R"), + }, + .driver_data = &(struct power_data) { + .ac_data = &(struct power_limits) { + .ppt_pl1_spl_min = 15, + .ppt_pl1_spl_max = 45, + .ppt_pl2_sppt_min = 25, + .ppt_pl2_sppt_max = 54, + .ppt_pl3_fppt_min = 35, + .ppt_pl3_fppt_max = 65, + .nv_temp_target_min = 75, + .nv_temp_target_max = 87, + }, + .dc_data = &(struct power_limits) { + .ppt_pl1_spl_min = 15, + .ppt_pl1_spl_max = 35, + .ppt_pl2_sppt_min = 25, + .ppt_pl2_sppt_max = 35, + .ppt_pl3_fppt_min = 35, + .ppt_pl3_fppt_max = 65, + .nv_temp_target_min = 75, + .nv_temp_target_max = 87, + }, + }, + }, + { + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "GV601R"), + }, + .driver_data = &(struct power_data) { + .ac_data = &(struct power_limits) { + .ppt_pl1_spl_min = 15, + .ppt_pl1_spl_def = 35, + .ppt_pl1_spl_max = 90, + .ppt_pl2_sppt_min = 35, + .ppt_pl2_sppt_def = 54, + .ppt_pl2_sppt_max = 100, + .ppt_pl3_fppt_min = 35, + .ppt_pl3_fppt_def = 80, + .ppt_pl3_fppt_max = 125, + .nv_dynamic_boost_min = 5, + .nv_dynamic_boost_max = 25, + .nv_temp_target_min = 75, + .nv_temp_target_max = 87, + }, + .dc_data = &(struct power_limits) { + .ppt_pl1_spl_min = 15, + .ppt_pl1_spl_def = 28, + .ppt_pl1_spl_max = 65, + .ppt_pl2_sppt_min = 35, + .ppt_pl2_sppt_def = 54, + .ppt_pl2_sppt_max = 60, + .ppt_pl3_fppt_min = 35, + .ppt_pl3_fppt_def = 80, + .ppt_pl3_fppt_max = 65, + .nv_temp_target_min = 75, + .nv_temp_target_max = 87, + }, + }, + }, + { + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "GV601V"), + }, + .driver_data = &(struct power_data) { + .ac_data = &(struct power_limits) { + .ppt_pl1_spl_min = 28, + .ppt_pl1_spl_def = 100, + .ppt_pl1_spl_max = 110, + .ppt_pl2_sppt_min = 28, + .ppt_pl2_sppt_max = 135, + .nv_dynamic_boost_min = 5, + .nv_dynamic_boost_max = 20, + .nv_temp_target_min = 75, + .nv_temp_target_max = 87, + }, + .dc_data = &(struct power_limits) { + .ppt_pl1_spl_min = 25, + .ppt_pl1_spl_max = 40, + .ppt_pl2_sppt_min = 35, + .ppt_pl2_sppt_def = 40, + .ppt_pl2_sppt_max = 60, + .nv_temp_target_min = 75, + .nv_temp_target_max = 87, + }, + }, + }, + { + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "GX650P"), + }, + .driver_data = &(struct power_data) { + .ac_data = &(struct power_limits) { + .ppt_pl1_spl_min = 15, + .ppt_pl1_spl_def = 110, + .ppt_pl1_spl_max = 130, + .ppt_pl2_sppt_min = 35, + .ppt_pl2_sppt_def = 125, + .ppt_pl2_sppt_max = 130, + .ppt_pl3_fppt_min = 35, + .ppt_pl3_fppt_def = 125, + .ppt_pl3_fppt_max = 135, + .nv_dynamic_boost_min = 5, + .nv_dynamic_boost_max = 25, + .nv_temp_target_min = 75, + .nv_temp_target_max = 87, + }, + .dc_data = &(struct power_limits) { + .ppt_pl1_spl_min = 15, + .ppt_pl1_spl_def = 25, + .ppt_pl1_spl_max = 65, + .ppt_pl2_sppt_min = 35, + .ppt_pl2_sppt_def = 35, + .ppt_pl2_sppt_max = 65, + .ppt_pl3_fppt_min = 35, + .ppt_pl3_fppt_def = 42, + .ppt_pl3_fppt_max = 65, + .nv_temp_target_min = 75, + .nv_temp_target_max = 87, + }, + }, + }, + { + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "G513I"), + }, + .driver_data = &(struct power_data) { + .ac_data = &(struct power_limits) { + /* Yes this laptop is very limited */ + .ppt_pl1_spl_min = 15, + .ppt_pl1_spl_max = 80, + .ppt_pl2_sppt_min = 15, + .ppt_pl2_sppt_max = 80, + }, + .dc_data = NULL, + .requires_fan_curve = true, + }, + }, + { + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "G513QM"), + }, + .driver_data = &(struct power_data) { + .ac_data = &(struct power_limits) { + /* Yes this laptop is very limited */ + .ppt_pl1_spl_min = 15, + .ppt_pl1_spl_max = 100, + .ppt_pl2_sppt_min = 15, + .ppt_pl2_sppt_max = 190, + }, + .dc_data = NULL, + .requires_fan_curve = true, + }, + }, + { + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "G513R"), + }, + .driver_data = &(struct power_data) { + .ac_data = &(struct power_limits) { + .ppt_pl1_spl_min = 35, + .ppt_pl1_spl_max = 90, + .ppt_pl2_sppt_min = 54, + .ppt_pl2_sppt_max = 100, + .ppt_pl3_fppt_min = 54, + .ppt_pl3_fppt_max = 125, + .nv_dynamic_boost_min = 5, + .nv_dynamic_boost_max = 25, + .nv_temp_target_min = 75, + .nv_temp_target_max = 87, + }, + .dc_data = &(struct power_limits) { + .ppt_pl1_spl_min = 28, + .ppt_pl1_spl_max = 50, + .ppt_pl2_sppt_min = 28, + .ppt_pl2_sppt_max = 50, + .ppt_pl3_fppt_min = 28, + .ppt_pl3_fppt_max = 65, + .nv_temp_target_min = 75, + .nv_temp_target_max = 87, + }, + .requires_fan_curve = true, + }, + }, + { + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "G614J"), + }, + .driver_data = &(struct power_data) { + .ac_data = &(struct power_limits) { + .ppt_pl1_spl_min = 28, + .ppt_pl1_spl_max = 140, + .ppt_pl2_sppt_min = 28, + .ppt_pl2_sppt_max = 175, + .nv_temp_target_min = 75, + .nv_temp_target_max = 87, + .nv_dynamic_boost_min = 5, + .nv_dynamic_boost_max = 25, + }, + .dc_data = &(struct power_limits) { + .ppt_pl1_spl_min = 25, + .ppt_pl1_spl_max = 55, + .ppt_pl2_sppt_min = 25, + .ppt_pl2_sppt_max = 70, + .nv_temp_target_min = 75, + .nv_temp_target_max = 87, + }, + .requires_fan_curve = true, + }, + }, + { + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "G634J"), + }, + .driver_data = &(struct power_data) { + .ac_data = &(struct power_limits) { + .ppt_pl1_spl_min = 28, + .ppt_pl1_spl_max = 140, + .ppt_pl2_sppt_min = 28, + .ppt_pl2_sppt_max = 175, + .nv_temp_target_min = 75, + .nv_temp_target_max = 87, + .nv_dynamic_boost_min = 5, + .nv_dynamic_boost_max = 25, + }, + .dc_data = &(struct power_limits) { + .ppt_pl1_spl_min = 25, + .ppt_pl1_spl_max = 55, + .ppt_pl2_sppt_min = 25, + .ppt_pl2_sppt_max = 70, + .nv_temp_target_min = 75, + .nv_temp_target_max = 87, + }, + .requires_fan_curve = true, + }, + }, + { + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "G713PV"), + }, + .driver_data = &(struct power_data) { + .ac_data = &(struct power_limits) { + .ppt_pl1_spl_min = 30, + .ppt_pl1_spl_def = 120, + .ppt_pl1_spl_max = 130, + .ppt_pl2_sppt_min = 65, + .ppt_pl2_sppt_def = 125, + .ppt_pl2_sppt_max = 130, + .ppt_pl3_fppt_min = 65, + .ppt_pl3_fppt_def = 125, + .ppt_pl3_fppt_max = 130, + .nv_temp_target_min = 75, + .nv_temp_target_max = 87, + .nv_dynamic_boost_min = 5, + .nv_dynamic_boost_max = 25, + }, + .dc_data = &(struct power_limits) { + .ppt_pl1_spl_min = 25, + .ppt_pl1_spl_max = 65, + .ppt_pl2_sppt_min = 25, + .ppt_pl2_sppt_max = 65, + .ppt_pl3_fppt_min = 35, + .ppt_pl3_fppt_max = 75, + .nv_temp_target_min = 75, + .nv_temp_target_max = 87, + }, + .requires_fan_curve = true, + }, + }, + { + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "G733C"), + }, + .driver_data = &(struct power_data) { + .ac_data = &(struct power_limits) { + .ppt_pl1_spl_min = 28, + .ppt_pl1_spl_max = 170, + .ppt_pl2_sppt_min = 28, + .ppt_pl2_sppt_max = 175, + .nv_temp_target_min = 75, + .nv_temp_target_max = 87, + .nv_dynamic_boost_min = 5, + .nv_dynamic_boost_max = 25, + }, + .dc_data = &(struct power_limits) { + .ppt_pl1_spl_min = 28, + .ppt_pl1_spl_max = 35, + .ppt_pl2_sppt_min = 28, + .ppt_pl2_sppt_max = 35, + .nv_temp_target_min = 75, + .nv_temp_target_max = 87, + }, + .requires_fan_curve = true, + }, + }, + { + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "G733P"), + }, + .driver_data = &(struct power_data) { + .ac_data = &(struct power_limits) { + .ppt_pl1_spl_min = 30, + .ppt_pl1_spl_def = 100, + .ppt_pl1_spl_max = 130, + .ppt_pl2_sppt_min = 65, + .ppt_pl2_sppt_def = 125, + .ppt_pl2_sppt_max = 130, + .ppt_pl3_fppt_min = 65, + .ppt_pl3_fppt_def = 125, + .ppt_pl3_fppt_max = 130, + .nv_temp_target_min = 75, + .nv_temp_target_max = 87, + .nv_dynamic_boost_min = 5, + .nv_dynamic_boost_max = 25, + }, + .dc_data = &(struct power_limits) { + .ppt_pl1_spl_min = 25, + .ppt_pl1_spl_max = 65, + .ppt_pl2_sppt_min = 25, + .ppt_pl2_sppt_max = 65, + .ppt_pl3_fppt_min = 35, + .ppt_pl3_fppt_max = 75, + .nv_temp_target_min = 75, + .nv_temp_target_max = 87, + }, + .requires_fan_curve = true, + }, + }, + { + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "G814J"), + }, + .driver_data = &(struct power_data) { + .ac_data = &(struct power_limits) { + .ppt_pl1_spl_min = 28, + .ppt_pl1_spl_max = 140, + .ppt_pl2_sppt_min = 28, + .ppt_pl2_sppt_max = 140, + .nv_dynamic_boost_min = 5, + .nv_dynamic_boost_max = 25, + }, + .dc_data = &(struct power_limits) { + .ppt_pl1_spl_min = 25, + .ppt_pl1_spl_max = 55, + .ppt_pl2_sppt_min = 25, + .ppt_pl2_sppt_max = 70, + }, + .requires_fan_curve = true, + }, + }, + { + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "G834J"), + }, + .driver_data = &(struct power_data) { + .ac_data = &(struct power_limits) { + .ppt_pl1_spl_min = 28, + .ppt_pl1_spl_max = 140, + .ppt_pl2_sppt_min = 28, + .ppt_pl2_sppt_max = 175, + .nv_dynamic_boost_min = 5, + .nv_dynamic_boost_max = 25, + .nv_temp_target_min = 75, + .nv_temp_target_max = 87, + }, + .dc_data = &(struct power_limits) { + .ppt_pl1_spl_min = 25, + .ppt_pl1_spl_max = 55, + .ppt_pl2_sppt_min = 25, + .ppt_pl2_sppt_max = 70, + .nv_temp_target_min = 75, + .nv_temp_target_max = 87, + }, + .requires_fan_curve = true, + }, + }, + { + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "H7606W"), + }, + .driver_data = &(struct power_data) { + .ac_data = &(struct power_limits) { + .ppt_pl1_spl_min = 15, + .ppt_pl1_spl_max = 80, + .ppt_pl2_sppt_min = 35, + .ppt_pl2_sppt_max = 80, + .ppt_pl3_fppt_min = 35, + .ppt_pl3_fppt_max = 80, + .nv_dynamic_boost_min = 5, + .nv_dynamic_boost_max = 20, + .nv_temp_target_min = 75, + .nv_temp_target_max = 87, + .nv_tgp_min = 55, + .nv_tgp_max = 85, + }, + .dc_data = &(struct power_limits) { + .ppt_pl1_spl_min = 25, + .ppt_pl1_spl_max = 35, + .ppt_pl2_sppt_min = 31, + .ppt_pl2_sppt_max = 44, + .ppt_pl3_fppt_min = 45, + .ppt_pl3_fppt_max = 65, + .nv_temp_target_min = 75, + .nv_temp_target_max = 87, + }, + }, + }, + { + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "RC71"), + }, + .driver_data = &(struct power_data) { + .ac_data = &(struct power_limits) { + .ppt_pl1_spl_min = 7, + .ppt_pl1_spl_max = 30, + .ppt_pl2_sppt_min = 15, + .ppt_pl2_sppt_max = 43, + .ppt_pl3_fppt_min = 15, + .ppt_pl3_fppt_max = 53, + }, + .dc_data = &(struct power_limits) { + .ppt_pl1_spl_min = 7, + .ppt_pl1_spl_def = 15, + .ppt_pl1_spl_max = 25, + .ppt_pl2_sppt_min = 15, + .ppt_pl2_sppt_def = 20, + .ppt_pl2_sppt_max = 30, + .ppt_pl3_fppt_min = 15, + .ppt_pl3_fppt_def = 25, + .ppt_pl3_fppt_max = 35, + }, + }, + }, + { + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "RC72"), + }, + .driver_data = &(struct power_data) { + .ac_data = &(struct power_limits) { + .ppt_pl1_spl_min = 7, + .ppt_pl1_spl_max = 30, + .ppt_pl2_sppt_min = 15, + .ppt_pl2_sppt_max = 43, + .ppt_pl3_fppt_min = 15, + .ppt_pl3_fppt_max = 53, + }, + .dc_data = &(struct power_limits) { + .ppt_pl1_spl_min = 7, + .ppt_pl1_spl_def = 17, + .ppt_pl1_spl_max = 25, + .ppt_pl2_sppt_min = 15, + .ppt_pl2_sppt_def = 24, + .ppt_pl2_sppt_max = 30, + .ppt_pl3_fppt_min = 15, + .ppt_pl3_fppt_def = 30, + .ppt_pl3_fppt_max = 35, + }, + }, + }, + { + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "RC73XA"), + }, + .driver_data = &(struct power_data) { + .ac_data = &(struct power_limits) { + .ppt_pl1_spl_min = 7, + .ppt_pl1_spl_max = 35, + .ppt_pl2_sppt_min = 14, + .ppt_pl2_sppt_max = 45, + .ppt_pl3_fppt_min = 19, + .ppt_pl3_fppt_max = 55, + }, + .dc_data = &(struct power_limits) { + .ppt_pl1_spl_min = 7, + .ppt_pl1_spl_def = 17, + .ppt_pl1_spl_max = 35, + .ppt_pl2_sppt_min = 13, + .ppt_pl2_sppt_def = 21, + .ppt_pl2_sppt_max = 45, + .ppt_pl3_fppt_min = 19, + .ppt_pl3_fppt_def = 26, + .ppt_pl3_fppt_max = 55, + }, + }, + }, + {} +}; + +#endif /* _ASUS_ARMOURY_H_ */ diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c index e72a2b5d158e..4aec7ec69250 100644 --- a/drivers/platform/x86/asus-wmi.c +++ b/drivers/platform/x86/asus-wmi.c @@ -15,6 +15,7 @@ #include <linux/acpi.h> #include <linux/backlight.h> +#include <linux/bits.h> #include <linux/debugfs.h> #include <linux/delay.h> #include <linux/dmi.h> @@ -30,6 +31,7 @@ #include <linux/pci.h> #include <linux/pci_hotplug.h> #include <linux/platform_data/x86/asus-wmi.h> +#include <linux/platform_data/x86/asus-wmi-leds-ids.h> #include <linux/platform_device.h> #include <linux/platform_profile.h> #include <linux/power_supply.h> @@ -55,8 +57,6 @@ module_param(fnlock_default, bool, 0444); #define to_asus_wmi_driver(pdrv) \ (container_of((pdrv), struct asus_wmi_driver, platform_driver)) -#define ASUS_WMI_MGMT_GUID "97845ED0-4E6D-11DE-8A39-0800200C9A66" - #define NOTIFY_BRNUP_MIN 0x11 #define NOTIFY_BRNUP_MAX 0x1f #define NOTIFY_BRNDOWN_MIN 0x20 @@ -105,8 +105,6 @@ module_param(fnlock_default, bool, 0444); #define USB_INTEL_XUSB2PR 0xD0 #define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI 0x9c31 -#define ASUS_ACPI_UID_ASUSWMI "ASUSWMI" - #define WMI_EVENT_MASK 0xFFFF #define FAN_CURVE_POINTS 8 @@ -340,6 +338,13 @@ struct asus_wmi { /* Global to allow setting externally without requiring driver data */ static enum asus_ally_mcu_hack use_ally_mcu_hack = ASUS_WMI_ALLY_MCU_HACK_INIT; +#if IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS) +static void asus_wmi_show_deprecated(void) +{ + pr_notice_once("Accessing attributes through /sys/bus/platform/asus_wmi is deprecated and will be removed in a future release. Please switch over to /sys/class/firmware_attributes.\n"); +} +#endif /* IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS) */ + /* WMI ************************************************************************/ static int asus_wmi_evaluate_method3(u32 method_id, @@ -390,7 +395,7 @@ int asus_wmi_evaluate_method(u32 method_id, u32 arg0, u32 arg1, u32 *retval) { return asus_wmi_evaluate_method3(method_id, arg0, arg1, 0, retval); } -EXPORT_SYMBOL_GPL(asus_wmi_evaluate_method); +EXPORT_SYMBOL_NS_GPL(asus_wmi_evaluate_method, "ASUS_WMI"); static int asus_wmi_evaluate_method5(u32 method_id, u32 arg0, u32 arg1, u32 arg2, u32 arg3, u32 arg4, u32 *retval) @@ -554,12 +559,52 @@ static int asus_wmi_get_devstate(struct asus_wmi *asus, u32 dev_id, u32 *retval) return 0; } -int asus_wmi_set_devstate(u32 dev_id, u32 ctrl_param, - u32 *retval) +/** + * asus_wmi_get_devstate_dsts() - Get the WMI function state. + * @dev_id: The WMI method ID to call. + * @retval: A pointer to where to store the value returned from WMI. + * + * Returns: + * * %-ENODEV - method ID is unsupported. + * * %0 - successful and retval is filled. + * * %other - error from WMI call. + */ +int asus_wmi_get_devstate_dsts(u32 dev_id, u32 *retval) +{ + int err; + + err = asus_wmi_evaluate_method(ASUS_WMI_METHODID_DSTS, dev_id, 0, retval); + if (err) + return err; + + if ((*retval & ASUS_WMI_DSTS_PRESENCE_BIT) == 0x00) + return -ENODEV; + + return 0; +} +EXPORT_SYMBOL_NS_GPL(asus_wmi_get_devstate_dsts, "ASUS_WMI"); + +/** + * asus_wmi_set_devstate() - Set the WMI function state. + * + * Note: an asus_wmi_set_devstate() call must be paired with a + * asus_wmi_get_devstate_dsts() to check if the WMI function is supported. + * + * @dev_id: The WMI function to call. + * @ctrl_param: The argument to be used for this WMI function. + * @retval: A pointer to where to store the value returned from WMI. + * + * Returns: + * * %-ENODEV - method ID is unsupported. + * * %0 - successful and retval is filled. + * * %other - error from WMI call. + */ +int asus_wmi_set_devstate(u32 dev_id, u32 ctrl_param, u32 *retval) { return asus_wmi_evaluate_method(ASUS_WMI_METHODID_DEVS, dev_id, ctrl_param, retval); } +EXPORT_SYMBOL_NS_GPL(asus_wmi_set_devstate, "ASUS_WMI"); /* Helper for special devices with magic return codes */ static int asus_wmi_get_devstate_bits(struct asus_wmi *asus, @@ -692,6 +737,7 @@ static void asus_wmi_tablet_mode_get_state(struct asus_wmi *asus) } /* Charging mode, 1=Barrel, 2=USB ******************************************/ +#if IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS) static ssize_t charge_mode_show(struct device *dev, struct device_attribute *attr, char *buf) { @@ -702,12 +748,16 @@ static ssize_t charge_mode_show(struct device *dev, if (result < 0) return result; + asus_wmi_show_deprecated(); + return sysfs_emit(buf, "%d\n", value & 0xff); } static DEVICE_ATTR_RO(charge_mode); +#endif /* IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS) */ /* dGPU ********************************************************************/ +#if IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS) static ssize_t dgpu_disable_show(struct device *dev, struct device_attribute *attr, char *buf) { @@ -718,6 +768,8 @@ static ssize_t dgpu_disable_show(struct device *dev, if (result < 0) return result; + asus_wmi_show_deprecated(); + return sysfs_emit(buf, "%d\n", result); } @@ -771,8 +823,10 @@ static ssize_t dgpu_disable_store(struct device *dev, return count; } static DEVICE_ATTR_RW(dgpu_disable); +#endif /* IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS) */ /* eGPU ********************************************************************/ +#if IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS) static ssize_t egpu_enable_show(struct device *dev, struct device_attribute *attr, char *buf) { @@ -783,6 +837,8 @@ static ssize_t egpu_enable_show(struct device *dev, if (result < 0) return result; + asus_wmi_show_deprecated(); + return sysfs_emit(buf, "%d\n", result); } @@ -839,8 +895,10 @@ static ssize_t egpu_enable_store(struct device *dev, return count; } static DEVICE_ATTR_RW(egpu_enable); +#endif /* IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS) */ /* Is eGPU connected? *********************************************************/ +#if IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS) static ssize_t egpu_connected_show(struct device *dev, struct device_attribute *attr, char *buf) { @@ -851,12 +909,16 @@ static ssize_t egpu_connected_show(struct device *dev, if (result < 0) return result; + asus_wmi_show_deprecated(); + return sysfs_emit(buf, "%d\n", result); } static DEVICE_ATTR_RO(egpu_connected); +#endif /* IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS) */ /* gpu mux switch *************************************************************/ +#if IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS) static ssize_t gpu_mux_mode_show(struct device *dev, struct device_attribute *attr, char *buf) { @@ -867,6 +929,8 @@ static ssize_t gpu_mux_mode_show(struct device *dev, if (result < 0) return result; + asus_wmi_show_deprecated(); + return sysfs_emit(buf, "%d\n", result); } @@ -925,6 +989,7 @@ static ssize_t gpu_mux_mode_store(struct device *dev, return count; } static DEVICE_ATTR_RW(gpu_mux_mode); +#endif /* IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS) */ /* TUF Laptop Keyboard RGB Modes **********************************************/ static ssize_t kbd_rgb_mode_store(struct device *dev, @@ -1048,6 +1113,7 @@ static const struct attribute_group *kbd_rgb_mode_groups[] = { }; /* Tunable: PPT: Intel=PL1, AMD=SPPT *****************************************/ +#if IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS) static ssize_t ppt_pl2_sppt_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) @@ -1086,6 +1152,8 @@ static ssize_t ppt_pl2_sppt_show(struct device *dev, { struct asus_wmi *asus = dev_get_drvdata(dev); + asus_wmi_show_deprecated(); + return sysfs_emit(buf, "%u\n", asus->ppt_pl2_sppt); } static DEVICE_ATTR_RW(ppt_pl2_sppt); @@ -1128,6 +1196,8 @@ static ssize_t ppt_pl1_spl_show(struct device *dev, { struct asus_wmi *asus = dev_get_drvdata(dev); + asus_wmi_show_deprecated(); + return sysfs_emit(buf, "%u\n", asus->ppt_pl1_spl); } static DEVICE_ATTR_RW(ppt_pl1_spl); @@ -1148,7 +1218,7 @@ static ssize_t ppt_fppt_store(struct device *dev, if (value < PPT_TOTAL_MIN || value > PPT_TOTAL_MAX) return -EINVAL; - err = asus_wmi_set_devstate(ASUS_WMI_DEVID_PPT_FPPT, value, &result); + err = asus_wmi_set_devstate(ASUS_WMI_DEVID_PPT_PL3_FPPT, value, &result); if (err) { pr_warn("Failed to set ppt_fppt: %d\n", err); return err; @@ -1171,6 +1241,8 @@ static ssize_t ppt_fppt_show(struct device *dev, { struct asus_wmi *asus = dev_get_drvdata(dev); + asus_wmi_show_deprecated(); + return sysfs_emit(buf, "%u\n", asus->ppt_fppt); } static DEVICE_ATTR_RW(ppt_fppt); @@ -1214,6 +1286,8 @@ static ssize_t ppt_apu_sppt_show(struct device *dev, { struct asus_wmi *asus = dev_get_drvdata(dev); + asus_wmi_show_deprecated(); + return sysfs_emit(buf, "%u\n", asus->ppt_apu_sppt); } static DEVICE_ATTR_RW(ppt_apu_sppt); @@ -1257,6 +1331,8 @@ static ssize_t ppt_platform_sppt_show(struct device *dev, { struct asus_wmi *asus = dev_get_drvdata(dev); + asus_wmi_show_deprecated(); + return sysfs_emit(buf, "%u\n", asus->ppt_platform_sppt); } static DEVICE_ATTR_RW(ppt_platform_sppt); @@ -1300,6 +1376,8 @@ static ssize_t nv_dynamic_boost_show(struct device *dev, { struct asus_wmi *asus = dev_get_drvdata(dev); + asus_wmi_show_deprecated(); + return sysfs_emit(buf, "%u\n", asus->nv_dynamic_boost); } static DEVICE_ATTR_RW(nv_dynamic_boost); @@ -1343,9 +1421,12 @@ static ssize_t nv_temp_target_show(struct device *dev, { struct asus_wmi *asus = dev_get_drvdata(dev); + asus_wmi_show_deprecated(); + return sysfs_emit(buf, "%u\n", asus->nv_temp_target); } static DEVICE_ATTR_RW(nv_temp_target); +#endif /* IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS) */ /* Ally MCU Powersave ********************************************************/ @@ -1386,6 +1467,7 @@ void set_ally_mcu_powersave(bool enabled) } EXPORT_SYMBOL_NS_GPL(set_ally_mcu_powersave, "ASUS_WMI"); +#if IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS) static ssize_t mcu_powersave_show(struct device *dev, struct device_attribute *attr, char *buf) { @@ -1396,6 +1478,8 @@ static ssize_t mcu_powersave_show(struct device *dev, if (result < 0) return result; + asus_wmi_show_deprecated(); + return sysfs_emit(buf, "%d\n", result); } @@ -1431,6 +1515,7 @@ static ssize_t mcu_powersave_store(struct device *dev, return count; } static DEVICE_ATTR_RW(mcu_powersave); +#endif /* IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS) */ /* Battery ********************************************************************/ @@ -1619,14 +1704,14 @@ static void do_kbd_led_set(struct led_classdev *led_cdev, int value) kbd_led_update(asus); } -static void kbd_led_set(struct led_classdev *led_cdev, - enum led_brightness value) +static int kbd_led_set(struct led_classdev *led_cdev, enum led_brightness value) { /* Prevent disabling keyboard backlight on module unregister */ if (led_cdev->flags & LED_UNREGISTERING) - return; + return 0; do_kbd_led_set(led_cdev, value); + return 0; } static void kbd_led_set_by_kbd(struct asus_wmi *asus, enum led_brightness value) @@ -1802,7 +1887,7 @@ static int asus_wmi_led_init(struct asus_wmi *asus) asus->kbd_led_wk = led_val; asus->kbd_led.name = "asus::kbd_backlight"; asus->kbd_led.flags = LED_BRIGHT_HW_CHANGED; - asus->kbd_led.brightness_set = kbd_led_set; + asus->kbd_led.brightness_set_blocking = kbd_led_set; asus->kbd_led.brightness_get = kbd_led_get; asus->kbd_led.max_brightness = 3; @@ -2304,6 +2389,7 @@ exit: } /* Panel Overdrive ************************************************************/ +#if IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS) static ssize_t panel_od_show(struct device *dev, struct device_attribute *attr, char *buf) { @@ -2314,6 +2400,8 @@ static ssize_t panel_od_show(struct device *dev, if (result < 0) return result; + asus_wmi_show_deprecated(); + return sysfs_emit(buf, "%d\n", result); } @@ -2350,9 +2438,10 @@ static ssize_t panel_od_store(struct device *dev, return count; } static DEVICE_ATTR_RW(panel_od); +#endif /* IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS) */ /* Bootup sound ***************************************************************/ - +#if IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS) static ssize_t boot_sound_show(struct device *dev, struct device_attribute *attr, char *buf) { @@ -2363,6 +2452,8 @@ static ssize_t boot_sound_show(struct device *dev, if (result < 0) return result; + asus_wmi_show_deprecated(); + return sysfs_emit(buf, "%d\n", result); } @@ -2398,8 +2489,10 @@ static ssize_t boot_sound_store(struct device *dev, return count; } static DEVICE_ATTR_RW(boot_sound); +#endif /* IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS) */ /* Mini-LED mode **************************************************************/ +#if IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS) static ssize_t mini_led_mode_show(struct device *dev, struct device_attribute *attr, char *buf) { @@ -2430,6 +2523,8 @@ static ssize_t mini_led_mode_show(struct device *dev, } } + asus_wmi_show_deprecated(); + return sysfs_emit(buf, "%d\n", value); } @@ -2500,10 +2595,13 @@ static ssize_t available_mini_led_mode_show(struct device *dev, return sysfs_emit(buf, "0 1 2\n"); } + asus_wmi_show_deprecated(); + return sysfs_emit(buf, "0\n"); } static DEVICE_ATTR_RO(available_mini_led_mode); +#endif /* IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS) */ /* Quirks *********************************************************************/ @@ -3791,6 +3889,7 @@ static int throttle_thermal_policy_set_default(struct asus_wmi *asus) return throttle_thermal_policy_write(asus); } +#if IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS) static ssize_t throttle_thermal_policy_show(struct device *dev, struct device_attribute *attr, char *buf) { @@ -3834,6 +3933,7 @@ static ssize_t throttle_thermal_policy_store(struct device *dev, * Throttle thermal policy: 0 - default, 1 - overboost, 2 - silent */ static DEVICE_ATTR_RW(throttle_thermal_policy); +#endif /* IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS) */ /* Platform profile ***********************************************************/ static int asus_wmi_platform_profile_get(struct device *dev, @@ -4435,27 +4535,29 @@ static struct attribute *platform_attributes[] = { &dev_attr_camera.attr, &dev_attr_cardr.attr, &dev_attr_touchpad.attr, - &dev_attr_charge_mode.attr, - &dev_attr_egpu_enable.attr, - &dev_attr_egpu_connected.attr, - &dev_attr_dgpu_disable.attr, - &dev_attr_gpu_mux_mode.attr, &dev_attr_lid_resume.attr, &dev_attr_als_enable.attr, &dev_attr_fan_boost_mode.attr, - &dev_attr_throttle_thermal_policy.attr, - &dev_attr_ppt_pl2_sppt.attr, - &dev_attr_ppt_pl1_spl.attr, - &dev_attr_ppt_fppt.attr, - &dev_attr_ppt_apu_sppt.attr, - &dev_attr_ppt_platform_sppt.attr, - &dev_attr_nv_dynamic_boost.attr, - &dev_attr_nv_temp_target.attr, - &dev_attr_mcu_powersave.attr, - &dev_attr_boot_sound.attr, - &dev_attr_panel_od.attr, - &dev_attr_mini_led_mode.attr, - &dev_attr_available_mini_led_mode.attr, +#if IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS) + &dev_attr_charge_mode.attr, + &dev_attr_egpu_enable.attr, + &dev_attr_egpu_connected.attr, + &dev_attr_dgpu_disable.attr, + &dev_attr_gpu_mux_mode.attr, + &dev_attr_ppt_pl2_sppt.attr, + &dev_attr_ppt_pl1_spl.attr, + &dev_attr_ppt_fppt.attr, + &dev_attr_ppt_apu_sppt.attr, + &dev_attr_ppt_platform_sppt.attr, + &dev_attr_nv_dynamic_boost.attr, + &dev_attr_nv_temp_target.attr, + &dev_attr_mcu_powersave.attr, + &dev_attr_boot_sound.attr, + &dev_attr_panel_od.attr, + &dev_attr_mini_led_mode.attr, + &dev_attr_available_mini_led_mode.attr, + &dev_attr_throttle_thermal_policy.attr, +#endif /* IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS) */ NULL }; @@ -4477,7 +4579,11 @@ static umode_t asus_sysfs_is_visible(struct kobject *kobj, devid = ASUS_WMI_DEVID_LID_RESUME; else if (attr == &dev_attr_als_enable.attr) devid = ASUS_WMI_DEVID_ALS_ENABLE; - else if (attr == &dev_attr_charge_mode.attr) + else if (attr == &dev_attr_fan_boost_mode.attr) + ok = asus->fan_boost_mode_available; + +#if IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS) + if (attr == &dev_attr_charge_mode.attr) devid = ASUS_WMI_DEVID_CHARGE_MODE; else if (attr == &dev_attr_egpu_enable.attr) ok = asus->egpu_enable_available; @@ -4496,7 +4602,7 @@ static umode_t asus_sysfs_is_visible(struct kobject *kobj, else if (attr == &dev_attr_ppt_pl1_spl.attr) devid = ASUS_WMI_DEVID_PPT_PL1_SPL; else if (attr == &dev_attr_ppt_fppt.attr) - devid = ASUS_WMI_DEVID_PPT_FPPT; + devid = ASUS_WMI_DEVID_PPT_PL3_FPPT; else if (attr == &dev_attr_ppt_apu_sppt.attr) devid = ASUS_WMI_DEVID_PPT_APU_SPPT; else if (attr == &dev_attr_ppt_platform_sppt.attr) @@ -4515,6 +4621,7 @@ static umode_t asus_sysfs_is_visible(struct kobject *kobj, ok = asus->mini_led_dev_id != 0; else if (attr == &dev_attr_available_mini_led_mode.attr) ok = asus->mini_led_dev_id != 0; +#endif /* IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS) */ if (devid != -1) { ok = !(asus_wmi_get_devstate_simple(asus, devid) < 0); @@ -4770,6 +4877,7 @@ static int asus_wmi_add(struct platform_device *pdev) } /* ensure defaults for tunables */ +#if IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS) asus->ppt_pl2_sppt = 5; asus->ppt_pl1_spl = 5; asus->ppt_apu_sppt = 5; @@ -4792,17 +4900,18 @@ static int asus_wmi_add(struct platform_device *pdev) asus->gpu_mux_dev = ASUS_WMI_DEVID_GPU_MUX; else if (asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_GPU_MUX_VIVO)) asus->gpu_mux_dev = ASUS_WMI_DEVID_GPU_MUX_VIVO; - - if (asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_TUF_RGB_MODE)) - asus->kbd_rgb_dev = ASUS_WMI_DEVID_TUF_RGB_MODE; - else if (asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_TUF_RGB_MODE2)) - asus->kbd_rgb_dev = ASUS_WMI_DEVID_TUF_RGB_MODE2; +#endif /* IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS) */ if (asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_THROTTLE_THERMAL_POLICY)) asus->throttle_thermal_policy_dev = ASUS_WMI_DEVID_THROTTLE_THERMAL_POLICY; else if (asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_THROTTLE_THERMAL_POLICY_VIVO)) asus->throttle_thermal_policy_dev = ASUS_WMI_DEVID_THROTTLE_THERMAL_POLICY_VIVO; + if (asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_TUF_RGB_MODE)) + asus->kbd_rgb_dev = ASUS_WMI_DEVID_TUF_RGB_MODE; + else if (asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_TUF_RGB_MODE2)) + asus->kbd_rgb_dev = ASUS_WMI_DEVID_TUF_RGB_MODE2; + err = fan_boost_mode_check_present(asus); if (err) goto fail_fan_boost_mode; diff --git a/drivers/platform/x86/ayaneo-ec.c b/drivers/platform/x86/ayaneo-ec.c new file mode 100644 index 000000000000..41a24e091248 --- /dev/null +++ b/drivers/platform/x86/ayaneo-ec.c @@ -0,0 +1,593 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Platform driver for the Embedded Controller (EC) of Ayaneo devices. Handles + * hwmon (fan speed, fan control), battery charge limits, and magic module + * control (connected modules, controller disconnection). + * + * Copyright (C) 2025 Antheas Kapenekakis <lkml@antheas.dev> + */ + +#include <linux/acpi.h> +#include <linux/bits.h> +#include <linux/dmi.h> +#include <linux/err.h> +#include <linux/hwmon.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/pm.h> +#include <linux/power_supply.h> +#include <linux/sysfs.h> +#include <acpi/battery.h> + +#define AYANEO_PWM_ENABLE_REG 0x4A +#define AYANEO_PWM_REG 0x4B +#define AYANEO_PWM_MODE_AUTO 0x00 +#define AYANEO_PWM_MODE_MANUAL 0x01 + +#define AYANEO_FAN_REG 0x76 + +#define EC_CHARGE_CONTROL_BEHAVIOURS \ + (BIT(POWER_SUPPLY_CHARGE_BEHAVIOUR_AUTO) | \ + BIT(POWER_SUPPLY_CHARGE_BEHAVIOUR_INHIBIT_CHARGE)) +#define AYANEO_CHARGE_REG 0x1e +#define AYANEO_CHARGE_VAL_AUTO 0xaa +#define AYANEO_CHARGE_VAL_INHIBIT 0x55 + +#define AYANEO_POWER_REG 0x2d +#define AYANEO_POWER_OFF 0xfe +#define AYANEO_POWER_ON 0xff +#define AYANEO_MODULE_REG 0x2f +#define AYANEO_MODULE_LEFT BIT(0) +#define AYANEO_MODULE_RIGHT BIT(1) +#define AYANEO_MODULE_MASK (AYANEO_MODULE_LEFT | AYANEO_MODULE_RIGHT) + +struct ayaneo_ec_quirk { + bool has_fan_control; + bool has_charge_control; + bool has_magic_modules; +}; + +struct ayaneo_ec_platform_data { + struct platform_device *pdev; + struct ayaneo_ec_quirk *quirks; + struct acpi_battery_hook battery_hook; + + // Protects access to restore_pwm + struct mutex hwmon_lock; + bool restore_charge_limit; + bool restore_pwm; +}; + +static const struct ayaneo_ec_quirk quirk_fan = { + .has_fan_control = true, +}; + +static const struct ayaneo_ec_quirk quirk_charge_limit = { + .has_fan_control = true, + .has_charge_control = true, +}; + +static const struct ayaneo_ec_quirk quirk_ayaneo3 = { + .has_fan_control = true, + .has_charge_control = true, + .has_magic_modules = true, +}; + +static const struct dmi_system_id dmi_table[] = { + { + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "AYANEO"), + DMI_MATCH(DMI_BOARD_NAME, "AYANEO 2"), + }, + .driver_data = (void *)&quirk_fan, + }, + { + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "AYANEO"), + DMI_MATCH(DMI_BOARD_NAME, "FLIP"), + }, + .driver_data = (void *)&quirk_fan, + }, + { + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "AYANEO"), + DMI_MATCH(DMI_BOARD_NAME, "GEEK"), + }, + .driver_data = (void *)&quirk_fan, + }, + { + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "AYANEO"), + DMI_EXACT_MATCH(DMI_BOARD_NAME, "AIR"), + }, + .driver_data = (void *)&quirk_charge_limit, + }, + { + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "AYANEO"), + DMI_EXACT_MATCH(DMI_BOARD_NAME, "AIR 1S"), + }, + .driver_data = (void *)&quirk_charge_limit, + }, + { + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "AYANEO"), + DMI_EXACT_MATCH(DMI_BOARD_NAME, "AB05-Mendocino"), + }, + .driver_data = (void *)&quirk_charge_limit, + }, + { + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "AYANEO"), + DMI_EXACT_MATCH(DMI_BOARD_NAME, "AIR Pro"), + }, + .driver_data = (void *)&quirk_charge_limit, + }, + { + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "AYANEO"), + DMI_EXACT_MATCH(DMI_BOARD_NAME, "KUN"), + }, + .driver_data = (void *)&quirk_charge_limit, + }, + { + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "AYANEO"), + DMI_EXACT_MATCH(DMI_BOARD_NAME, "AYANEO 3"), + }, + .driver_data = (void *)&quirk_ayaneo3, + }, + {}, +}; + +/* Callbacks for hwmon interface */ +static umode_t ayaneo_ec_hwmon_is_visible(const void *drvdata, + enum hwmon_sensor_types type, u32 attr, + int channel) +{ + switch (type) { + case hwmon_fan: + return 0444; + case hwmon_pwm: + return 0644; + default: + return 0; + } +} + +static int ayaneo_ec_read(struct device *dev, enum hwmon_sensor_types type, + u32 attr, int channel, long *val) +{ + u8 tmp; + int ret; + + switch (type) { + case hwmon_fan: + switch (attr) { + case hwmon_fan_input: + ret = ec_read(AYANEO_FAN_REG, &tmp); + if (ret) + return ret; + *val = tmp << 8; + ret = ec_read(AYANEO_FAN_REG + 1, &tmp); + if (ret) + return ret; + *val |= tmp; + return 0; + default: + break; + } + break; + case hwmon_pwm: + switch (attr) { + case hwmon_pwm_input: + ret = ec_read(AYANEO_PWM_REG, &tmp); + if (ret) + return ret; + if (tmp > 100) + return -EIO; + *val = (255 * tmp) / 100; + return 0; + case hwmon_pwm_enable: + ret = ec_read(AYANEO_PWM_ENABLE_REG, &tmp); + if (ret) + return ret; + if (tmp == AYANEO_PWM_MODE_MANUAL) + *val = 1; + else if (tmp == AYANEO_PWM_MODE_AUTO) + *val = 2; + else + return -EIO; + return 0; + default: + break; + } + break; + default: + break; + } + return -EOPNOTSUPP; +} + +static int ayaneo_ec_write(struct device *dev, enum hwmon_sensor_types type, + u32 attr, int channel, long val) +{ + struct ayaneo_ec_platform_data *data = dev_get_drvdata(dev); + int ret; + + guard(mutex)(&data->hwmon_lock); + + switch (type) { + case hwmon_pwm: + switch (attr) { + case hwmon_pwm_enable: + data->restore_pwm = false; + switch (val) { + case 1: + return ec_write(AYANEO_PWM_ENABLE_REG, + AYANEO_PWM_MODE_MANUAL); + case 2: + return ec_write(AYANEO_PWM_ENABLE_REG, + AYANEO_PWM_MODE_AUTO); + default: + return -EINVAL; + } + case hwmon_pwm_input: + if (val < 0 || val > 255) + return -EINVAL; + if (data->restore_pwm) { + /* + * Defer restoring PWM control to after + * userspace resumes successfully + */ + ret = ec_write(AYANEO_PWM_ENABLE_REG, + AYANEO_PWM_MODE_MANUAL); + if (ret) + return ret; + data->restore_pwm = false; + } + return ec_write(AYANEO_PWM_REG, (val * 100) / 255); + default: + break; + } + break; + default: + break; + } + return -EOPNOTSUPP; +} + +static const struct hwmon_ops ayaneo_ec_hwmon_ops = { + .is_visible = ayaneo_ec_hwmon_is_visible, + .read = ayaneo_ec_read, + .write = ayaneo_ec_write, +}; + +static const struct hwmon_channel_info *const ayaneo_ec_sensors[] = { + HWMON_CHANNEL_INFO(fan, HWMON_F_INPUT), + HWMON_CHANNEL_INFO(pwm, HWMON_PWM_INPUT | HWMON_PWM_ENABLE), + NULL, +}; + +static const struct hwmon_chip_info ayaneo_ec_chip_info = { + .ops = &ayaneo_ec_hwmon_ops, + .info = ayaneo_ec_sensors, +}; + +static int ayaneo_psy_ext_get_prop(struct power_supply *psy, + const struct power_supply_ext *ext, + void *data, + enum power_supply_property psp, + union power_supply_propval *val) +{ + int ret; + u8 tmp; + + switch (psp) { + case POWER_SUPPLY_PROP_CHARGE_BEHAVIOUR: + ret = ec_read(AYANEO_CHARGE_REG, &tmp); + if (ret) + return ret; + + if (tmp == AYANEO_CHARGE_VAL_INHIBIT) + val->intval = POWER_SUPPLY_CHARGE_BEHAVIOUR_INHIBIT_CHARGE; + else + val->intval = POWER_SUPPLY_CHARGE_BEHAVIOUR_AUTO; + return 0; + default: + return -EINVAL; + } +} + +static int ayaneo_psy_ext_set_prop(struct power_supply *psy, + const struct power_supply_ext *ext, + void *data, + enum power_supply_property psp, + const union power_supply_propval *val) +{ + u8 raw_val; + + switch (psp) { + case POWER_SUPPLY_PROP_CHARGE_BEHAVIOUR: + switch (val->intval) { + case POWER_SUPPLY_CHARGE_BEHAVIOUR_AUTO: + raw_val = AYANEO_CHARGE_VAL_AUTO; + break; + case POWER_SUPPLY_CHARGE_BEHAVIOUR_INHIBIT_CHARGE: + raw_val = AYANEO_CHARGE_VAL_INHIBIT; + break; + default: + return -EINVAL; + } + return ec_write(AYANEO_CHARGE_REG, raw_val); + default: + return -EINVAL; + } +} + +static int ayaneo_psy_prop_is_writeable(struct power_supply *psy, + const struct power_supply_ext *ext, + void *data, + enum power_supply_property psp) +{ + return true; +} + +static const enum power_supply_property ayaneo_psy_ext_props[] = { + POWER_SUPPLY_PROP_CHARGE_BEHAVIOUR, +}; + +static const struct power_supply_ext ayaneo_psy_ext = { + .name = "ayaneo-charge-control", + .properties = ayaneo_psy_ext_props, + .num_properties = ARRAY_SIZE(ayaneo_psy_ext_props), + .charge_behaviours = EC_CHARGE_CONTROL_BEHAVIOURS, + .get_property = ayaneo_psy_ext_get_prop, + .set_property = ayaneo_psy_ext_set_prop, + .property_is_writeable = ayaneo_psy_prop_is_writeable, +}; + +static int ayaneo_add_battery(struct power_supply *battery, + struct acpi_battery_hook *hook) +{ + struct ayaneo_ec_platform_data *data = + container_of(hook, struct ayaneo_ec_platform_data, battery_hook); + + return power_supply_register_extension(battery, &ayaneo_psy_ext, + &data->pdev->dev, NULL); +} + +static int ayaneo_remove_battery(struct power_supply *battery, + struct acpi_battery_hook *hook) +{ + power_supply_unregister_extension(battery, &ayaneo_psy_ext); + return 0; +} + +static ssize_t controller_power_store(struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t count) +{ + bool value; + int ret; + + ret = kstrtobool(buf, &value); + if (ret) + return ret; + + ret = ec_write(AYANEO_POWER_REG, value ? AYANEO_POWER_ON : AYANEO_POWER_OFF); + if (ret) + return ret; + + return count; +} + +static ssize_t controller_power_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + int ret; + u8 val; + + ret = ec_read(AYANEO_POWER_REG, &val); + if (ret) + return ret; + + return sysfs_emit(buf, "%d\n", val == AYANEO_POWER_ON); +} + +static DEVICE_ATTR_RW(controller_power); + +static ssize_t controller_modules_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + u8 unconnected_modules; + char *out; + int ret; + + ret = ec_read(AYANEO_MODULE_REG, &unconnected_modules); + if (ret) + return ret; + + switch (~unconnected_modules & AYANEO_MODULE_MASK) { + case AYANEO_MODULE_LEFT | AYANEO_MODULE_RIGHT: + out = "both"; + break; + case AYANEO_MODULE_LEFT: + out = "left"; + break; + case AYANEO_MODULE_RIGHT: + out = "right"; + break; + default: + out = "none"; + break; + } + + return sysfs_emit(buf, "%s\n", out); +} + +static DEVICE_ATTR_RO(controller_modules); + +static struct attribute *aya_mm_attrs[] = { + &dev_attr_controller_power.attr, + &dev_attr_controller_modules.attr, + NULL +}; + +static umode_t aya_mm_is_visible(struct kobject *kobj, + struct attribute *attr, int n) +{ + struct device *dev = kobj_to_dev(kobj); + struct platform_device *pdev = to_platform_device(dev); + struct ayaneo_ec_platform_data *data = platform_get_drvdata(pdev); + + if (data->quirks->has_magic_modules) + return attr->mode; + return 0; +} + +static const struct attribute_group aya_mm_attribute_group = { + .is_visible = aya_mm_is_visible, + .attrs = aya_mm_attrs, +}; + +static const struct attribute_group *ayaneo_ec_groups[] = { + &aya_mm_attribute_group, + NULL +}; + +static int ayaneo_ec_probe(struct platform_device *pdev) +{ + const struct dmi_system_id *dmi_entry; + struct ayaneo_ec_platform_data *data; + struct device *hwdev; + int ret; + + dmi_entry = dmi_first_match(dmi_table); + if (!dmi_entry) + return -ENODEV; + + data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->pdev = pdev; + data->quirks = dmi_entry->driver_data; + ret = devm_mutex_init(&pdev->dev, &data->hwmon_lock); + if (ret) + return ret; + platform_set_drvdata(pdev, data); + + if (data->quirks->has_fan_control) { + hwdev = devm_hwmon_device_register_with_info(&pdev->dev, + "ayaneo_ec", data, &ayaneo_ec_chip_info, NULL); + if (IS_ERR(hwdev)) + return PTR_ERR(hwdev); + } + + if (data->quirks->has_charge_control) { + data->battery_hook.add_battery = ayaneo_add_battery; + data->battery_hook.remove_battery = ayaneo_remove_battery; + data->battery_hook.name = "Ayaneo Battery"; + ret = devm_battery_hook_register(&pdev->dev, &data->battery_hook); + if (ret) + return ret; + } + + return 0; +} + +static int ayaneo_freeze(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct ayaneo_ec_platform_data *data = platform_get_drvdata(pdev); + int ret; + u8 tmp; + + if (data->quirks->has_charge_control) { + ret = ec_read(AYANEO_CHARGE_REG, &tmp); + if (ret) + return ret; + + data->restore_charge_limit = tmp == AYANEO_CHARGE_VAL_INHIBIT; + } + + if (data->quirks->has_fan_control) { + ret = ec_read(AYANEO_PWM_ENABLE_REG, &tmp); + if (ret) + return ret; + + data->restore_pwm = tmp == AYANEO_PWM_MODE_MANUAL; + + /* + * Release the fan when entering hibernation to avoid + * overheating if hibernation fails and hangs. + */ + if (data->restore_pwm) { + ret = ec_write(AYANEO_PWM_ENABLE_REG, AYANEO_PWM_MODE_AUTO); + if (ret) + return ret; + } + } + + return 0; +} + +static int ayaneo_restore(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct ayaneo_ec_platform_data *data = platform_get_drvdata(pdev); + int ret; + + if (data->quirks->has_charge_control && data->restore_charge_limit) { + ret = ec_write(AYANEO_CHARGE_REG, AYANEO_CHARGE_VAL_INHIBIT); + if (ret) + return ret; + } + + return 0; +} + +static const struct dev_pm_ops ayaneo_pm_ops = { + .freeze = ayaneo_freeze, + .restore = ayaneo_restore, +}; + +static struct platform_driver ayaneo_platform_driver = { + .driver = { + .name = "ayaneo-ec", + .dev_groups = ayaneo_ec_groups, + .pm = pm_sleep_ptr(&ayaneo_pm_ops), + }, + .probe = ayaneo_ec_probe, +}; + +static struct platform_device *ayaneo_platform_device; + +static int __init ayaneo_ec_init(void) +{ + ayaneo_platform_device = + platform_create_bundle(&ayaneo_platform_driver, + ayaneo_ec_probe, NULL, 0, NULL, 0); + + return PTR_ERR_OR_ZERO(ayaneo_platform_device); +} + +static void __exit ayaneo_ec_exit(void) +{ + platform_device_unregister(ayaneo_platform_device); + platform_driver_unregister(&ayaneo_platform_driver); +} + +MODULE_DEVICE_TABLE(dmi, dmi_table); + +module_init(ayaneo_ec_init); +module_exit(ayaneo_ec_exit); + +MODULE_AUTHOR("Antheas Kapenekakis <lkml@antheas.dev>"); +MODULE_DESCRIPTION("Ayaneo Embedded Controller (EC) platform features"); +MODULE_LICENSE("GPL"); diff --git a/drivers/platform/x86/dell/alienware-wmi-wmax.c b/drivers/platform/x86/dell/alienware-wmi-wmax.c index fadf7aac6779..1418bd326edf 100644 --- a/drivers/platform/x86/dell/alienware-wmi-wmax.c +++ b/drivers/platform/x86/dell/alienware-wmi-wmax.c @@ -235,11 +235,6 @@ enum AWCC_THERMAL_TABLES { AWCC_THERMAL_TABLE_USTT = 0xA, }; -enum AWCC_SPECIAL_THERMAL_CODES { - AWCC_SPECIAL_PROFILE_CUSTOM = 0x00, - AWCC_SPECIAL_PROFILE_GMODE = 0xAB, -}; - enum AWCC_TEMP_SENSOR_TYPES { AWCC_TEMP_SENSOR_CPU = 0x01, AWCC_TEMP_SENSOR_FRONT = 0x03, @@ -266,17 +261,18 @@ enum AWCC_FAN_TYPES { }; enum awcc_thermal_profile { - AWCC_PROFILE_USTT_BALANCED, - AWCC_PROFILE_USTT_BALANCED_PERFORMANCE, - AWCC_PROFILE_USTT_COOL, - AWCC_PROFILE_USTT_QUIET, - AWCC_PROFILE_USTT_PERFORMANCE, - AWCC_PROFILE_USTT_LOW_POWER, - AWCC_PROFILE_LEGACY_QUIET, - AWCC_PROFILE_LEGACY_BALANCED, - AWCC_PROFILE_LEGACY_BALANCED_PERFORMANCE, - AWCC_PROFILE_LEGACY_PERFORMANCE, - AWCC_PROFILE_LAST, + AWCC_PROFILE_SPECIAL_CUSTOM = 0x00, + AWCC_PROFILE_LEGACY_QUIET = 0x96, + AWCC_PROFILE_LEGACY_BALANCED = 0x97, + AWCC_PROFILE_LEGACY_BALANCED_PERFORMANCE = 0x98, + AWCC_PROFILE_LEGACY_PERFORMANCE = 0x99, + AWCC_PROFILE_USTT_BALANCED = 0xA0, + AWCC_PROFILE_USTT_BALANCED_PERFORMANCE = 0xA1, + AWCC_PROFILE_USTT_COOL = 0xA2, + AWCC_PROFILE_USTT_QUIET = 0xA3, + AWCC_PROFILE_USTT_PERFORMANCE = 0xA4, + AWCC_PROFILE_USTT_LOW_POWER = 0xA5, + AWCC_PROFILE_SPECIAL_GMODE = 0xAB, }; struct wmax_led_args { @@ -332,19 +328,6 @@ struct awcc_priv { u32 gpio_count; }; -static const enum platform_profile_option awcc_mode_to_platform_profile[AWCC_PROFILE_LAST] = { - [AWCC_PROFILE_USTT_BALANCED] = PLATFORM_PROFILE_BALANCED, - [AWCC_PROFILE_USTT_BALANCED_PERFORMANCE] = PLATFORM_PROFILE_BALANCED_PERFORMANCE, - [AWCC_PROFILE_USTT_COOL] = PLATFORM_PROFILE_COOL, - [AWCC_PROFILE_USTT_QUIET] = PLATFORM_PROFILE_QUIET, - [AWCC_PROFILE_USTT_PERFORMANCE] = PLATFORM_PROFILE_PERFORMANCE, - [AWCC_PROFILE_USTT_LOW_POWER] = PLATFORM_PROFILE_LOW_POWER, - [AWCC_PROFILE_LEGACY_QUIET] = PLATFORM_PROFILE_QUIET, - [AWCC_PROFILE_LEGACY_BALANCED] = PLATFORM_PROFILE_BALANCED, - [AWCC_PROFILE_LEGACY_BALANCED_PERFORMANCE] = PLATFORM_PROFILE_BALANCED_PERFORMANCE, - [AWCC_PROFILE_LEGACY_PERFORMANCE] = PLATFORM_PROFILE_PERFORMANCE, -}; - static struct awcc_quirks *awcc; /* @@ -562,21 +545,41 @@ const struct attribute_group wmax_deepsleep_attribute_group = { /* * AWCC Helpers */ -static bool is_awcc_thermal_profile_id(u8 code) +static int awcc_profile_to_pprof(enum awcc_thermal_profile profile, + enum platform_profile_option *pprof) { - u8 table = FIELD_GET(AWCC_THERMAL_TABLE_MASK, code); - u8 mode = FIELD_GET(AWCC_THERMAL_MODE_MASK, code); - - if (mode >= AWCC_PROFILE_LAST) - return false; - - if (table == AWCC_THERMAL_TABLE_LEGACY && mode >= AWCC_PROFILE_LEGACY_QUIET) - return true; - - if (table == AWCC_THERMAL_TABLE_USTT && mode <= AWCC_PROFILE_USTT_LOW_POWER) - return true; + switch (profile) { + case AWCC_PROFILE_SPECIAL_CUSTOM: + *pprof = PLATFORM_PROFILE_CUSTOM; + break; + case AWCC_PROFILE_LEGACY_QUIET: + case AWCC_PROFILE_USTT_QUIET: + *pprof = PLATFORM_PROFILE_QUIET; + break; + case AWCC_PROFILE_LEGACY_BALANCED: + case AWCC_PROFILE_USTT_BALANCED: + *pprof = PLATFORM_PROFILE_BALANCED; + break; + case AWCC_PROFILE_LEGACY_BALANCED_PERFORMANCE: + case AWCC_PROFILE_USTT_BALANCED_PERFORMANCE: + *pprof = PLATFORM_PROFILE_BALANCED_PERFORMANCE; + break; + case AWCC_PROFILE_LEGACY_PERFORMANCE: + case AWCC_PROFILE_USTT_PERFORMANCE: + case AWCC_PROFILE_SPECIAL_GMODE: + *pprof = PLATFORM_PROFILE_PERFORMANCE; + break; + case AWCC_PROFILE_USTT_COOL: + *pprof = PLATFORM_PROFILE_COOL; + break; + case AWCC_PROFILE_USTT_LOW_POWER: + *pprof = PLATFORM_PROFILE_LOW_POWER; + break; + default: + return -EINVAL; + } - return false; + return 0; } static int awcc_wmi_command(struct wmi_device *wdev, u32 method_id, @@ -1225,24 +1228,7 @@ static int awcc_platform_profile_get(struct device *dev, if (ret) return ret; - switch (out_data) { - case AWCC_SPECIAL_PROFILE_CUSTOM: - *profile = PLATFORM_PROFILE_CUSTOM; - return 0; - case AWCC_SPECIAL_PROFILE_GMODE: - *profile = PLATFORM_PROFILE_PERFORMANCE; - return 0; - default: - break; - } - - if (!is_awcc_thermal_profile_id(out_data)) - return -ENODATA; - - out_data = FIELD_GET(AWCC_THERMAL_MODE_MASK, out_data); - *profile = awcc_mode_to_platform_profile[out_data]; - - return 0; + return awcc_profile_to_pprof(out_data, profile); } static int awcc_platform_profile_set(struct device *dev, @@ -1279,7 +1265,6 @@ static int awcc_platform_profile_probe(void *drvdata, unsigned long *choices) { enum platform_profile_option profile; struct awcc_priv *priv = drvdata; - enum awcc_thermal_profile mode; u8 id, offset = 0; int ret; @@ -1301,15 +1286,20 @@ static int awcc_platform_profile_probe(void *drvdata, unsigned long *choices) if (ret) return ret; - if (!is_awcc_thermal_profile_id(id)) { + /* + * G-Mode profile ID is not listed consistently across modeles + * that support it, therefore we handle it through quirks. + */ + if (id == AWCC_PROFILE_SPECIAL_GMODE) + continue; + + ret = awcc_profile_to_pprof(id, &profile); + if (ret) { dev_dbg(&priv->wdev->dev, "Unmapped thermal profile ID 0x%02x\n", id); continue; } - mode = FIELD_GET(AWCC_THERMAL_MODE_MASK, id); - profile = awcc_mode_to_platform_profile[mode]; priv->supported_profiles[profile] = id; - __set_bit(profile, choices); } @@ -1318,14 +1308,14 @@ static int awcc_platform_profile_probe(void *drvdata, unsigned long *choices) if (awcc->gmode) { priv->supported_profiles[PLATFORM_PROFILE_PERFORMANCE] = - AWCC_SPECIAL_PROFILE_GMODE; + AWCC_PROFILE_SPECIAL_GMODE; __set_bit(PLATFORM_PROFILE_PERFORMANCE, choices); } /* Every model supports the "custom" profile */ priv->supported_profiles[PLATFORM_PROFILE_CUSTOM] = - AWCC_SPECIAL_PROFILE_CUSTOM; + AWCC_PROFILE_SPECIAL_CUSTOM; __set_bit(PLATFORM_PROFILE_CUSTOM, choices); diff --git a/drivers/platform/x86/gpd-pocket-fan.c b/drivers/platform/x86/gpd-pocket-fan.c index 7a20f68ae206..c9236738f896 100644 --- a/drivers/platform/x86/gpd-pocket-fan.c +++ b/drivers/platform/x86/gpd-pocket-fan.c @@ -112,14 +112,14 @@ set_speed: gpd_pocket_fan_set_speed(fan, speed); /* When mostly idle (low temp/speed), slow down the poll interval. */ - queue_delayed_work(system_wq, &fan->work, + queue_delayed_work(system_percpu_wq, &fan->work, msecs_to_jiffies(4000 / (speed + 1))); } static void gpd_pocket_fan_force_update(struct gpd_pocket_fan_data *fan) { fan->last_speed = -1; - mod_delayed_work(system_wq, &fan->work, 0); + mod_delayed_work(system_percpu_wq, &fan->work, 0); } static int gpd_pocket_fan_probe(struct platform_device *pdev) diff --git a/drivers/platform/x86/hp/hp-wmi.c b/drivers/platform/x86/hp/hp-wmi.c index ad9d9f97960f..f4ea1ea05997 100644 --- a/drivers/platform/x86/hp/hp-wmi.c +++ b/drivers/platform/x86/hp/hp-wmi.c @@ -63,12 +63,16 @@ MODULE_ALIAS("wmi:5FB7F034-2C63-45E9-BE91-3D44E2C707E4"); * contains "PerformanceControl". */ static const char * const omen_thermal_profile_boards[] = { - "84DA", "84DB", "84DC", "8574", "8575", "860A", "87B5", "8572", "8573", - "8600", "8601", "8602", "8605", "8606", "8607", "8746", "8747", "8749", - "874A", "8603", "8604", "8748", "886B", "886C", "878A", "878B", "878C", - "88C8", "88CB", "8786", "8787", "8788", "88D1", "88D2", "88F4", "88FD", - "88F5", "88F6", "88F7", "88FE", "88FF", "8900", "8901", "8902", "8912", - "8917", "8918", "8949", "894A", "89EB", "8BAD", "8A42", "8A15" + "84DA", "84DB", "84DC", + "8572", "8573", "8574", "8575", + "8600", "8601", "8602", "8603", "8604", "8605", "8606", "8607", "860A", + "8746", "8747", "8748", "8749", "874A", "8786", "8787", "8788", "878A", + "878B", "878C", "87B5", + "886B", "886C", "88C8", "88CB", "88D1", "88D2", "88F4", "88F5", "88F6", + "88F7", "88FD", "88FE", "88FF", + "8900", "8901", "8902", "8912", "8917", "8918", "8949", "894A", "89EB", + "8A15", "8A42", + "8BAD", }; /* DMI Board names of Omen laptops that are specifically set to be thermal @@ -76,7 +80,8 @@ static const char * const omen_thermal_profile_boards[] = { * the get system design information WMI call returns */ static const char * const omen_thermal_profile_force_v0_boards[] = { - "8607", "8746", "8747", "8749", "874A", "8748" + "8607", + "8746", "8747", "8748", "8749", "874A", }; /* DMI board names of Omen laptops that have a thermal profile timer which will @@ -84,12 +89,13 @@ static const char * const omen_thermal_profile_force_v0_boards[] = { * "balanced" when reaching zero. */ static const char * const omen_timed_thermal_profile_boards[] = { - "8BAD", "8A42", "8A15" + "8A15", "8A42", + "8BAD", }; /* DMI Board names of Victus 16-d1xxx laptops */ static const char * const victus_thermal_profile_boards[] = { - "8A25" + "8A25", }; /* DMI Board names of Victus 16-r and Victus 16-s laptops */ diff --git a/drivers/platform/x86/intel/hid.c b/drivers/platform/x86/intel/hid.c index 9c07a7faf18f..560cc063198e 100644 --- a/drivers/platform/x86/intel/hid.c +++ b/drivers/platform/x86/intel/hid.c @@ -177,6 +177,18 @@ static const struct dmi_system_id dmi_vgbs_allow_list[] = { DMI_MATCH(DMI_PRODUCT_NAME, "HP Elite Dragonfly G2 Notebook PC"), }, }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Dell Pro Rugged 10 Tablet RA00260"), + }, + }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Dell Pro Rugged 12 Tablet RA02260"), + }, + }, { } }; diff --git a/drivers/platform/x86/intel/pmc/arl.c b/drivers/platform/x86/intel/pmc/arl.c index 17ad87b392ab..eb23bc68340a 100644 --- a/drivers/platform/x86/intel/pmc/arl.c +++ b/drivers/platform/x86/intel/pmc/arl.c @@ -281,6 +281,7 @@ static const struct pmc_reg_map arl_socs_reg_map = { .etr3_offset = ETR3_OFFSET, .pson_residency_offset = TGL_PSON_RESIDENCY_OFFSET, .pson_residency_counter_step = TGL_PSON_RES_COUNTER_STEP, + .lpm_req_guid = SOCS_LPM_REQ_GUID, }; static const struct pmc_bit_map arl_pchs_ltr_show_map[] = { @@ -648,26 +649,23 @@ static const struct pmc_reg_map arl_pchs_reg_map = { .lpm_num_maps = ADL_LPM_NUM_MAPS, .lpm_reg_index = ARL_LPM_REG_INDEX, .etr3_offset = ETR3_OFFSET, + .lpm_req_guid = PCHS_LPM_REQ_GUID, }; static struct pmc_info arl_pmc_info_list[] = { { - .guid = IOEP_LPM_REQ_GUID, .devid = PMC_DEVID_ARL_IOEP, .map = &mtl_ioep_reg_map, }, { - .guid = SOCS_LPM_REQ_GUID, .devid = PMC_DEVID_ARL_SOCS, .map = &arl_socs_reg_map, }, { - .guid = PCHS_LPM_REQ_GUID, .devid = PMC_DEVID_ARL_PCHS, .map = &arl_pchs_reg_map, }, { - .guid = SOCM_LPM_REQ_GUID, .devid = PMC_DEVID_ARL_SOCM, .map = &mtl_socm_reg_map, }, @@ -720,9 +718,10 @@ static int arl_h_core_init(struct pmc_dev *pmcdev, struct pmc_dev_info *pmc_dev_ return generic_core_init(pmcdev, pmc_dev_info); } +static u32 ARL_PMT_DMU_GUIDS[] = {ARL_PMT_DMU_GUID, 0x0}; struct pmc_dev_info arl_pmc_dev = { .pci_func = 0, - .dmu_guid = ARL_PMT_DMU_GUID, + .dmu_guids = ARL_PMT_DMU_GUIDS, .regmap_list = arl_pmc_info_list, .map = &arl_socs_reg_map, .sub_req_show = &pmc_core_substate_req_regs_fops, @@ -732,9 +731,10 @@ struct pmc_dev_info arl_pmc_dev = { .sub_req = pmc_core_pmt_get_lpm_req, }; +static u32 ARL_H_PMT_DMU_GUIDS[] = {ARL_PMT_DMU_GUID, ARL_H_PMT_DMU_GUID, 0x0}; struct pmc_dev_info arl_h_pmc_dev = { .pci_func = 2, - .dmu_guid = ARL_PMT_DMU_GUID, + .dmu_guids = ARL_H_PMT_DMU_GUIDS, .regmap_list = arl_pmc_info_list, .map = &mtl_socm_reg_map, .sub_req_show = &pmc_core_substate_req_regs_fops, diff --git a/drivers/platform/x86/intel/pmc/core.c b/drivers/platform/x86/intel/pmc/core.c index ac3d19ae8c56..7d7ae8a40b0e 100644 --- a/drivers/platform/x86/intel/pmc/core.c +++ b/drivers/platform/x86/intel/pmc/core.c @@ -20,6 +20,7 @@ enum header_type { #include <linux/debugfs.h> #include <linux/delay.h> #include <linux/dmi.h> +#include <linux/err.h> #include <linux/io.h> #include <linux/module.h> #include <linux/pci.h> @@ -311,20 +312,20 @@ static inline u8 pmc_core_reg_read_byte(struct pmc *pmc, int offset) } static void pmc_core_display_map(struct seq_file *s, int index, int idx, int ip, - int pmc_index, u8 pf_reg, const struct pmc_bit_map **pf_map) + int pmc_idx, u8 pf_reg, const struct pmc_bit_map **pf_map) { seq_printf(s, "PMC%d:PCH IP: %-2d - %-32s\tState: %s\n", - pmc_index, ip, pf_map[idx][index].name, + pmc_idx, ip, pf_map[idx][index].name, pf_map[idx][index].bit_mask & pf_reg ? "Off" : "On"); } static int pmc_core_ppfear_show(struct seq_file *s, void *unused) { struct pmc_dev *pmcdev = s->private; - unsigned int i; + unsigned int pmc_idx; - for (i = 0; i < ARRAY_SIZE(pmcdev->pmcs); ++i) { - struct pmc *pmc = pmcdev->pmcs[i]; + for (pmc_idx = 0; pmc_idx < ARRAY_SIZE(pmcdev->pmcs); ++pmc_idx) { + struct pmc *pmc = pmcdev->pmcs[pmc_idx]; const struct pmc_bit_map **maps; u8 pf_regs[PPFEAR_MAX_NUM_ENTRIES]; unsigned int index, iter, idx, ip = 0; @@ -342,7 +343,7 @@ static int pmc_core_ppfear_show(struct seq_file *s, void *unused) for (idx = 0; maps[idx]; idx++) { for (index = 0; maps[idx][index].name && index < pmc->map->ppfear_buckets * 8; ip++, index++) - pmc_core_display_map(s, index, idx, ip, i, + pmc_core_display_map(s, index, idx, ip, pmc_idx, pf_regs[index / 8], maps); } } @@ -471,7 +472,7 @@ int pmc_core_send_ltr_ignore(struct pmc_dev *pmcdev, u32 value, int ignore) struct pmc *pmc; const struct pmc_reg_map *map; u32 reg; - unsigned int pmc_index; + unsigned int pmc_idx; int ltr_index; ltr_index = value; @@ -479,8 +480,8 @@ int pmc_core_send_ltr_ignore(struct pmc_dev *pmcdev, u32 value, int ignore) * is based on the contiguous indexes from ltr_show output. * pmc index and ltr index needs to be calculated from it. */ - for (pmc_index = 0; pmc_index < ARRAY_SIZE(pmcdev->pmcs) && ltr_index >= 0; pmc_index++) { - pmc = pmcdev->pmcs[pmc_index]; + for (pmc_idx = 0; pmc_idx < ARRAY_SIZE(pmcdev->pmcs) && ltr_index >= 0; pmc_idx++) { + pmc = pmcdev->pmcs[pmc_idx]; if (!pmc) continue; @@ -497,10 +498,10 @@ int pmc_core_send_ltr_ignore(struct pmc_dev *pmcdev, u32 value, int ignore) ltr_index = ltr_index - (map->ltr_ignore_max + 2) - 1; } - if (pmc_index >= ARRAY_SIZE(pmcdev->pmcs) || ltr_index < 0) + if (pmc_idx >= ARRAY_SIZE(pmcdev->pmcs) || ltr_index < 0) return -EINVAL; - pr_debug("ltr_ignore for pmc%d: ltr_index:%d\n", pmc_index, ltr_index); + pr_debug("ltr_ignore for pmc%d: ltr_index:%d\n", pmc_idx, ltr_index); guard(mutex)(&pmcdev->lock); @@ -635,14 +636,14 @@ static int pmc_core_ltr_show(struct seq_file *s, void *unused) u64 decoded_snoop_ltr, decoded_non_snoop_ltr, val; u32 ltr_raw_data, scale; u16 snoop_ltr, nonsnoop_ltr; - unsigned int i, index, ltr_index = 0; + unsigned int pmc_idx, index, ltr_index = 0; - for (i = 0; i < ARRAY_SIZE(pmcdev->pmcs); ++i) { + for (pmc_idx = 0; pmc_idx < ARRAY_SIZE(pmcdev->pmcs); ++pmc_idx) { struct pmc *pmc; const struct pmc_bit_map *map; u32 ltr_ign_reg; - pmc = pmcdev->pmcs[i]; + pmc = pmcdev->pmcs[pmc_idx]; if (!pmc) continue; @@ -676,7 +677,7 @@ static int pmc_core_ltr_show(struct seq_file *s, void *unused) } seq_printf(s, "%d\tPMC%d:%-32s\tLTR: RAW: 0x%-16x\tNon-Snoop(ns): %-16llu\tSnoop(ns): %-16llu\tLTR_IGNORE: %d\n", - ltr_index, i, map[index].name, ltr_raw_data, + ltr_index, pmc_idx, map[index].name, ltr_raw_data, decoded_non_snoop_ltr, decoded_snoop_ltr, ltr_ign_data); ltr_index++; @@ -689,15 +690,15 @@ DEFINE_SHOW_ATTRIBUTE(pmc_core_ltr); static int pmc_core_s0ix_blocker_show(struct seq_file *s, void *unused) { struct pmc_dev *pmcdev = s->private; - unsigned int pmcidx; + unsigned int pmc_idx; - for (pmcidx = 0; pmcidx < ARRAY_SIZE(pmcdev->pmcs); pmcidx++) { + for (pmc_idx = 0; pmc_idx < ARRAY_SIZE(pmcdev->pmcs); pmc_idx++) { const struct pmc_bit_map **maps; unsigned int arr_size, r_idx; u32 offset, counter; struct pmc *pmc; - pmc = pmcdev->pmcs[pmcidx]; + pmc = pmcdev->pmcs[pmc_idx]; if (!pmc) continue; maps = pmc->map->s0ix_blocker_maps; @@ -711,7 +712,7 @@ static int pmc_core_s0ix_blocker_show(struct seq_file *s, void *unused) if (!map->blk) continue; counter = pmc_core_reg_read(pmc, offset); - seq_printf(s, "PMC%d:%-30s %-30d\n", pmcidx, + seq_printf(s, "PMC%d:%-30s %-30d\n", pmc_idx, map->name, counter); offset += map->blk * S0IX_BLK_SIZE; } @@ -723,13 +724,13 @@ DEFINE_SHOW_ATTRIBUTE(pmc_core_s0ix_blocker); static void pmc_core_ltr_ignore_all(struct pmc_dev *pmcdev) { - unsigned int i; + unsigned int pmc_idx; - for (i = 0; i < ARRAY_SIZE(pmcdev->pmcs); i++) { + for (pmc_idx = 0; pmc_idx < ARRAY_SIZE(pmcdev->pmcs); pmc_idx++) { struct pmc *pmc; u32 ltr_ign; - pmc = pmcdev->pmcs[i]; + pmc = pmcdev->pmcs[pmc_idx]; if (!pmc) continue; @@ -750,12 +751,12 @@ static void pmc_core_ltr_ignore_all(struct pmc_dev *pmcdev) static void pmc_core_ltr_restore_all(struct pmc_dev *pmcdev) { - unsigned int i; + unsigned int pmc_idx; - for (i = 0; i < ARRAY_SIZE(pmcdev->pmcs); i++) { + for (pmc_idx = 0; pmc_idx < ARRAY_SIZE(pmcdev->pmcs); pmc_idx++) { struct pmc *pmc; - pmc = pmcdev->pmcs[i]; + pmc = pmcdev->pmcs[pmc_idx]; if (!pmc) continue; @@ -794,10 +795,10 @@ DEFINE_SHOW_ATTRIBUTE(pmc_core_substate_res); static int pmc_core_substate_sts_regs_show(struct seq_file *s, void *unused) { struct pmc_dev *pmcdev = s->private; - unsigned int i; + unsigned int pmc_idx; - for (i = 0; i < ARRAY_SIZE(pmcdev->pmcs); ++i) { - struct pmc *pmc = pmcdev->pmcs[i]; + for (pmc_idx = 0; pmc_idx < ARRAY_SIZE(pmcdev->pmcs); ++pmc_idx) { + struct pmc *pmc = pmcdev->pmcs[pmc_idx]; const struct pmc_bit_map **maps; u32 offset; @@ -805,7 +806,7 @@ static int pmc_core_substate_sts_regs_show(struct seq_file *s, void *unused) continue; maps = pmc->map->lpm_sts; offset = pmc->map->lpm_status_offset; - pmc_core_lpm_display(pmc, NULL, s, offset, i, "STATUS", maps); + pmc_core_lpm_display(pmc, NULL, s, offset, pmc_idx, "STATUS", maps); } return 0; @@ -815,10 +816,10 @@ DEFINE_SHOW_ATTRIBUTE(pmc_core_substate_sts_regs); static int pmc_core_substate_l_sts_regs_show(struct seq_file *s, void *unused) { struct pmc_dev *pmcdev = s->private; - unsigned int i; + unsigned int pmc_idx; - for (i = 0; i < ARRAY_SIZE(pmcdev->pmcs); ++i) { - struct pmc *pmc = pmcdev->pmcs[i]; + for (pmc_idx = 0; pmc_idx < ARRAY_SIZE(pmcdev->pmcs); ++pmc_idx) { + struct pmc *pmc = pmcdev->pmcs[pmc_idx]; const struct pmc_bit_map **maps; u32 offset; @@ -826,7 +827,7 @@ static int pmc_core_substate_l_sts_regs_show(struct seq_file *s, void *unused) continue; maps = pmc->map->lpm_sts; offset = pmc->map->lpm_live_status_offset; - pmc_core_lpm_display(pmc, NULL, s, offset, i, "LIVE_STATUS", maps); + pmc_core_lpm_display(pmc, NULL, s, offset, pmc_idx, "LIVE_STATUS", maps); } return 0; @@ -919,11 +920,11 @@ static int pmc_core_substate_req_regs_show(struct seq_file *s, void *unused) u32 sts_offset; u32 sts_offset_live; u32 *lpm_req_regs; - unsigned int mp, pmc_index; + unsigned int mp, pmc_idx; int num_maps; - for (pmc_index = 0; pmc_index < ARRAY_SIZE(pmcdev->pmcs); ++pmc_index) { - struct pmc *pmc = pmcdev->pmcs[pmc_index]; + for (pmc_idx = 0; pmc_idx < ARRAY_SIZE(pmcdev->pmcs); ++pmc_idx) { + struct pmc *pmc = pmcdev->pmcs[pmc_idx]; const struct pmc_bit_map **maps; if (!pmc) @@ -944,7 +945,7 @@ static int pmc_core_substate_req_regs_show(struct seq_file *s, void *unused) continue; /* Display the header */ - pmc_core_substate_req_header_show(s, pmc_index, HEADER_STATUS); + pmc_core_substate_req_header_show(s, pmc_idx, HEADER_STATUS); /* Loop over maps */ for (mp = 0; mp < num_maps; mp++) { @@ -982,7 +983,7 @@ static int pmc_core_substate_req_regs_show(struct seq_file *s, void *unused) } /* Display the element name in the first column */ - seq_printf(s, "pmc%d: %34s |", pmc_index, map[i].name); + seq_printf(s, "pmc%d: %34s |", pmc_idx, map[i].name); /* Loop over the enabled states and display if required */ pmc_for_each_mode(mode, pmcdev) { @@ -1281,7 +1282,20 @@ int get_primary_reg_base(struct pmc *pmc) return 0; } -void pmc_core_punit_pmt_init(struct pmc_dev *pmcdev, u32 guid) +static struct telem_endpoint *pmc_core_register_endpoint(struct pci_dev *pcidev, u32 *guids) +{ + struct telem_endpoint *ep; + unsigned int i; + + for (i = 0; guids[i]; i++) { + ep = pmt_telem_find_and_register_endpoint(pcidev, guids[i], 0); + if (!IS_ERR(ep)) + return ep; + } + return ERR_PTR(-ENODEV); +} + +void pmc_core_punit_pmt_init(struct pmc_dev *pmcdev, u32 *guids) { struct telem_endpoint *ep; struct pci_dev *pcidev; @@ -1292,7 +1306,7 @@ void pmc_core_punit_pmt_init(struct pmc_dev *pmcdev, u32 guid) return; } - ep = pmt_telem_find_and_register_endpoint(pcidev, guid, 0); + ep = pmc_core_register_endpoint(pcidev, guids); pci_dev_put(pcidev); if (IS_ERR(ep)) { dev_err(&pmcdev->pdev->dev, @@ -1302,8 +1316,6 @@ void pmc_core_punit_pmt_init(struct pmc_dev *pmcdev, u32 guid) } pmcdev->punit_ep = ep; - - pmcdev->has_die_c6 = true; pmcdev->die_c6_offset = MTL_PMT_DMU_DIE_C6_OFFSET; } @@ -1423,22 +1435,13 @@ static void pmc_core_dbgfs_register(struct pmc_dev *pmcdev, struct pmc_dev_info pmcdev->dbgfs_dir, primary_pmc, &pmc_core_pson_residency); } - if (pmcdev->has_die_c6) { + if (pmcdev->punit_ep) { debugfs_create_file("die_c6_us_show", 0444, pmcdev->dbgfs_dir, pmcdev, &pmc_core_die_c6_us_fops); } } -static u32 pmc_core_find_guid(struct pmc_info *list, const struct pmc_reg_map *map) -{ - for (; list->map; ++list) - if (list->map == map) - return list->guid; - - return 0; -} - /* * This function retrieves low power mode requirement data from PMC Low * Power Mode (LPM) table. @@ -1553,26 +1556,24 @@ static int pmc_core_get_telem_info(struct pmc_dev *pmcdev, struct pmc_dev_info * { struct pci_dev *pcidev __free(pci_dev_put) = NULL; struct telem_endpoint *ep; - unsigned int i; - u32 guid; + unsigned int pmc_idx; int ret; pcidev = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(20, pmc_dev_info->pci_func)); if (!pcidev) return -ENODEV; - for (i = 0; i < ARRAY_SIZE(pmcdev->pmcs); ++i) { + for (pmc_idx = 0; pmc_idx < ARRAY_SIZE(pmcdev->pmcs); ++pmc_idx) { struct pmc *pmc; - pmc = pmcdev->pmcs[i]; + pmc = pmcdev->pmcs[pmc_idx]; if (!pmc) continue; - guid = pmc_core_find_guid(pmcdev->regmap_list, pmc->map); - if (!guid) + if (!pmc->map->lpm_req_guid) return -ENXIO; - ep = pmt_telem_find_and_register_endpoint(pcidev, guid, 0); + ep = pmt_telem_find_and_register_endpoint(pcidev, pmc->map->lpm_req_guid, 0); if (IS_ERR(ep)) { dev_dbg(&pmcdev->pdev->dev, "couldn't get telem endpoint %pe", ep); return -EPROBE_DEFER; @@ -1596,7 +1597,7 @@ static const struct pmc_reg_map *pmc_core_find_regmap(struct pmc_info *list, u16 return NULL; } -static int pmc_core_pmc_add(struct pmc_dev *pmcdev, unsigned int pmc_index) +static int pmc_core_pmc_add(struct pmc_dev *pmcdev, unsigned int pmc_idx) { struct pmc_ssram_telemetry pmc_ssram_telemetry; @@ -1604,7 +1605,7 @@ static int pmc_core_pmc_add(struct pmc_dev *pmcdev, unsigned int pmc_index) struct pmc *pmc; int ret; - ret = pmc_ssram_telemetry_get_pmc_info(pmc_index, &pmc_ssram_telemetry); + ret = pmc_ssram_telemetry_get_pmc_info(pmc_idx, &pmc_ssram_telemetry); if (ret) return ret; @@ -1612,7 +1613,7 @@ static int pmc_core_pmc_add(struct pmc_dev *pmcdev, unsigned int pmc_index) if (!map) return -ENODEV; - pmc = pmcdev->pmcs[pmc_index]; + pmc = pmcdev->pmcs[pmc_idx]; /* Memory for primary PMC has been allocated */ if (!pmc) { pmc = devm_kzalloc(&pmcdev->pdev->dev, sizeof(*pmc), GFP_KERNEL); @@ -1629,7 +1630,7 @@ static int pmc_core_pmc_add(struct pmc_dev *pmcdev, unsigned int pmc_index) return -ENOMEM; } - pmcdev->pmcs[pmc_index] = pmc; + pmcdev->pmcs[pmc_idx] = pmc; return 0; } @@ -1689,8 +1690,8 @@ int generic_core_init(struct pmc_dev *pmcdev, struct pmc_dev_info *pmc_dev_info) } pmc_core_get_low_power_modes(pmcdev); - if (pmc_dev_info->dmu_guid) - pmc_core_punit_pmt_init(pmcdev, pmc_dev_info->dmu_guid); + if (pmc_dev_info->dmu_guids) + pmc_core_punit_pmt_init(pmcdev, pmc_dev_info->dmu_guids); if (ssram) { ret = pmc_core_get_telem_info(pmcdev, pmc_dev_info); @@ -1701,8 +1702,8 @@ int generic_core_init(struct pmc_dev *pmcdev, struct pmc_dev_info *pmc_dev_info) return 0; unmap_regbase: - for (unsigned int i = 0; i < ARRAY_SIZE(pmcdev->pmcs); ++i) { - struct pmc *pmc = pmcdev->pmcs[i]; + for (unsigned int pmc_idx = 0; pmc_idx < ARRAY_SIZE(pmcdev->pmcs); ++pmc_idx) { + struct pmc *pmc = pmcdev->pmcs[pmc_idx]; if (pmc && pmc->regbase) iounmap(pmc->regbase); @@ -1795,10 +1796,10 @@ static void pmc_core_do_dmi_quirks(struct pmc *pmc) static void pmc_core_clean_structure(struct platform_device *pdev) { struct pmc_dev *pmcdev = platform_get_drvdata(pdev); - unsigned int i; + unsigned int pmc_idx; - for (i = 0; i < ARRAY_SIZE(pmcdev->pmcs); ++i) { - struct pmc *pmc = pmcdev->pmcs[i]; + for (pmc_idx = 0; pmc_idx < ARRAY_SIZE(pmcdev->pmcs); ++pmc_idx) { + struct pmc *pmc = pmcdev->pmcs[pmc_idx]; if (pmc && pmc->regbase) iounmap(pmc->regbase); @@ -1958,7 +1959,7 @@ int pmc_core_resume_common(struct pmc_dev *pmcdev) struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN]; const struct pmc_bit_map **maps = pmc->map->lpm_sts; int offset = pmc->map->lpm_status_offset; - unsigned int i; + unsigned int pmc_idx, i; /* Check if the syspend used S0ix */ if (pm_suspend_via_firmware()) @@ -1996,13 +1997,13 @@ int pmc_core_resume_common(struct pmc_dev *pmcdev) if (pmc->map->slps0_dbg_maps) pmc_core_slps0_display(pmc, dev, NULL); - for (i = 0; i < ARRAY_SIZE(pmcdev->pmcs); ++i) { - struct pmc *pmc = pmcdev->pmcs[i]; + for (pmc_idx = 0; pmc_idx < ARRAY_SIZE(pmcdev->pmcs); ++pmc_idx) { + struct pmc *pmc = pmcdev->pmcs[pmc_idx]; if (!pmc) continue; if (pmc->map->lpm_sts) - pmc_core_lpm_display(pmc, dev, NULL, offset, i, "STATUS", maps); + pmc_core_lpm_display(pmc, dev, NULL, offset, pmc_idx, "STATUS", maps); } return 0; diff --git a/drivers/platform/x86/intel/pmc/core.h b/drivers/platform/x86/intel/pmc/core.h index f4dadb696a31..272fb4f57f34 100644 --- a/drivers/platform/x86/intel/pmc/core.h +++ b/drivers/platform/x86/intel/pmc/core.h @@ -282,7 +282,8 @@ enum ppfear_regs { /* Die C6 from PUNIT telemetry */ #define MTL_PMT_DMU_DIE_C6_OFFSET 15 #define MTL_PMT_DMU_GUID 0x1A067102 -#define ARL_PMT_DMU_GUID 0x1A06A000 +#define ARL_PMT_DMU_GUID 0x1A06A102 +#define ARL_H_PMT_DMU_GUID 0x1A06A101 #define LNL_PMC_MMIO_REG_LEN 0x2708 #define LNL_PMC_LTR_OSSE 0x1B88 @@ -303,6 +304,8 @@ enum ppfear_regs { /* Wildcat Lake */ #define WCL_PMC_LTR_RESERVED 0x1B64 #define WCL_PCD_PMC_MMIO_REG_LEN 0x3178 +#define WCL_NUM_S0IX_BLOCKER 94 +#define WCL_BLK_REQ_OFFSET 50 /* SSRAM PMC Device ID */ /* LNL */ @@ -355,6 +358,7 @@ struct pmc_bit_map { * @s0ix_blocker_offset PWRMBASE offset to S0ix blocker counter * @num_s0ix_blocker: Number of S0ix blockers * @blocker_req_offset: Telemetry offset to S0ix blocker low power mode substate requirement table + * @lpm_req_guid: Telemetry GUID to read low power mode substate requirement table * * Each PCH has unique set of register offsets and bit indexes. This structure * captures them to have a common implementation. @@ -396,6 +400,8 @@ struct pmc_reg_map { const u8 *lpm_reg_index; const u32 pson_residency_offset; const u32 pson_residency_counter_step; + /* GUID for telemetry regions */ + const u32 lpm_req_guid; }; /** @@ -405,7 +411,6 @@ struct pmc_reg_map { * specific attributes */ struct pmc_info { - u32 guid; u16 devid; const struct pmc_reg_map *map; }; @@ -465,7 +470,6 @@ struct pmc_dev { u64 *pkgc_res_cnt; u8 num_of_pkgc; - bool has_die_c6; u32 die_c6_offset; struct telem_endpoint *punit_ep; struct pmc_info *regmap_list; @@ -481,7 +485,7 @@ enum pmc_index { /** * struct pmc_dev_info - Structure to keep PMC device info * @pci_func: Function number of the primary PMC - * @dmu_guid: Die Management Unit GUID + * @dmu_guids: List of Die Management Unit GUID * @regmap_list: Pointer to a list of pmc_info structure that could be * available for the platform. When set, this field implies * SSRAM support. @@ -495,7 +499,7 @@ enum pmc_index { */ struct pmc_dev_info { u8 pci_func; - u32 dmu_guid; + u32 *dmu_guids; struct pmc_info *regmap_list; const struct pmc_reg_map *map; const struct file_operations *sub_req_show; @@ -532,7 +536,7 @@ int pmc_core_send_ltr_ignore(struct pmc_dev *pmcdev, u32 value, int ignore); int pmc_core_resume_common(struct pmc_dev *pmcdev); int get_primary_reg_base(struct pmc *pmc); void pmc_core_get_low_power_modes(struct pmc_dev *pmcdev); -void pmc_core_punit_pmt_init(struct pmc_dev *pmcdev, u32 guid); +void pmc_core_punit_pmt_init(struct pmc_dev *pmcdev, u32 *guids); void pmc_core_set_device_d3(unsigned int device); int generic_core_init(struct pmc_dev *pmcdev, struct pmc_dev_info *pmc_dev_info); diff --git a/drivers/platform/x86/intel/pmc/lnl.c b/drivers/platform/x86/intel/pmc/lnl.c index 6fa027e7071f..1cd81ee54dcf 100644 --- a/drivers/platform/x86/intel/pmc/lnl.c +++ b/drivers/platform/x86/intel/pmc/lnl.c @@ -533,11 +533,11 @@ static const struct pmc_reg_map lnl_socm_reg_map = { .s0ix_blocker_maps = lnl_blk_maps, .s0ix_blocker_offset = LNL_S0IX_BLOCKER_OFFSET, .lpm_reg_index = LNL_LPM_REG_INDEX, + .lpm_req_guid = SOCM_LPM_REQ_GUID, }; static struct pmc_info lnl_pmc_info_list[] = { { - .guid = SOCM_LPM_REQ_GUID, .devid = PMC_DEVID_LNL_SOCM, .map = &lnl_socm_reg_map, }, diff --git a/drivers/platform/x86/intel/pmc/mtl.c b/drivers/platform/x86/intel/pmc/mtl.c index 0b87e10f864e..57508cbf9cd4 100644 --- a/drivers/platform/x86/intel/pmc/mtl.c +++ b/drivers/platform/x86/intel/pmc/mtl.c @@ -473,6 +473,7 @@ const struct pmc_reg_map mtl_socm_reg_map = { .lpm_status_offset = MTL_LPM_STATUS_OFFSET, .lpm_live_status_offset = MTL_LPM_LIVE_STATUS_OFFSET, .lpm_reg_index = MTL_LPM_REG_INDEX, + .lpm_req_guid = SOCP_LPM_REQ_GUID, }; static const struct pmc_bit_map mtl_ioep_pfear_map[] = { @@ -797,6 +798,7 @@ const struct pmc_reg_map mtl_ioep_reg_map = { .lpm_en_offset = MTL_LPM_EN_OFFSET, .lpm_sts_latch_en_offset = MTL_LPM_STATUS_LATCH_EN_OFFSET, .lpm_reg_index = MTL_LPM_REG_INDEX, + .lpm_req_guid = IOEP_LPM_REQ_GUID, }; static const struct pmc_bit_map mtl_ioem_pfear_map[] = { @@ -944,21 +946,19 @@ static const struct pmc_reg_map mtl_ioem_reg_map = { .lpm_res_counter_step_x2 = TGL_PMC_LPM_RES_COUNTER_STEP_X2, .lpm_residency_offset = MTL_LPM_RESIDENCY_OFFSET, .lpm_reg_index = MTL_LPM_REG_INDEX, + .lpm_req_guid = IOEM_LPM_REQ_GUID, }; static struct pmc_info mtl_pmc_info_list[] = { { - .guid = SOCP_LPM_REQ_GUID, .devid = PMC_DEVID_MTL_SOCM, .map = &mtl_socm_reg_map, }, { - .guid = IOEP_LPM_REQ_GUID, .devid = PMC_DEVID_MTL_IOEP, .map = &mtl_ioep_reg_map, }, { - .guid = IOEM_LPM_REQ_GUID, .devid = PMC_DEVID_MTL_IOEM, .map = &mtl_ioem_reg_map }, @@ -992,9 +992,10 @@ static int mtl_core_init(struct pmc_dev *pmcdev, struct pmc_dev_info *pmc_dev_in return generic_core_init(pmcdev, pmc_dev_info); } +static u32 MTL_PMT_DMU_GUIDS[] = {MTL_PMT_DMU_GUID, 0x0}; struct pmc_dev_info mtl_pmc_dev = { .pci_func = 2, - .dmu_guid = MTL_PMT_DMU_GUID, + .dmu_guids = MTL_PMT_DMU_GUIDS, .regmap_list = mtl_pmc_info_list, .map = &mtl_socm_reg_map, .sub_req_show = &pmc_core_substate_req_regs_fops, diff --git a/drivers/platform/x86/intel/pmc/ptl.c b/drivers/platform/x86/intel/pmc/ptl.c index 1b35b84e06fa..1f48e2bbc699 100644 --- a/drivers/platform/x86/intel/pmc/ptl.c +++ b/drivers/platform/x86/intel/pmc/ptl.c @@ -528,16 +528,15 @@ static const struct pmc_reg_map ptl_pcdp_reg_map = { .s0ix_blocker_offset = LNL_S0IX_BLOCKER_OFFSET, .num_s0ix_blocker = PTL_NUM_S0IX_BLOCKER, .blocker_req_offset = PTL_BLK_REQ_OFFSET, + .lpm_req_guid = PCDP_LPM_REQ_GUID, }; static struct pmc_info ptl_pmc_info_list[] = { { - .guid = PCDP_LPM_REQ_GUID, .devid = PMC_DEVID_PTL_PCDH, .map = &ptl_pcdp_reg_map, }, { - .guid = PCDP_LPM_REQ_GUID, .devid = PMC_DEVID_PTL_PCDP, .map = &ptl_pcdp_reg_map, }, diff --git a/drivers/platform/x86/intel/pmc/wcl.c b/drivers/platform/x86/intel/pmc/wcl.c index 85e90a639e65..a45707e6364f 100644 --- a/drivers/platform/x86/intel/pmc/wcl.c +++ b/drivers/platform/x86/intel/pmc/wcl.c @@ -11,6 +11,9 @@ #include "core.h" +/* PMC SSRAM PMT Telemetry GUIDS */ +#define PCDN_LPM_REQ_GUID 0x33747648 + static const struct pmc_bit_map wcl_pcdn_pfear_map[] = { {"PMC_0", BIT(0)}, {"FUSE_OSSE", BIT(1)}, @@ -453,6 +456,17 @@ static const struct pmc_reg_map wcl_pcdn_reg_map = { .lpm_live_status_offset = MTL_LPM_LIVE_STATUS_OFFSET, .s0ix_blocker_maps = wcl_pcdn_blk_maps, .s0ix_blocker_offset = LNL_S0IX_BLOCKER_OFFSET, + .num_s0ix_blocker = WCL_NUM_S0IX_BLOCKER, + .blocker_req_offset = WCL_BLK_REQ_OFFSET, + .lpm_req_guid = PCDN_LPM_REQ_GUID, +}; + +static struct pmc_info wcl_pmc_info_list[] = { + { + .devid = PMC_DEVID_WCL_PCDN, + .map = &wcl_pcdn_reg_map, + }, + {} }; #define WCL_NPU_PCI_DEV 0xfd3e @@ -479,8 +493,12 @@ static int wcl_core_init(struct pmc_dev *pmcdev, struct pmc_dev_info *pmc_dev_in } struct pmc_dev_info wcl_pmc_dev = { + .pci_func = 2, + .regmap_list = wcl_pmc_info_list, .map = &wcl_pcdn_reg_map, + .sub_req_show = &pmc_core_substate_blk_req_fops, .suspend = cnl_suspend, .resume = wcl_resume, .init = wcl_core_init, + .sub_req = pmc_core_pmt_get_blk_sub_req, }; diff --git a/drivers/platform/x86/intel/vsec.c b/drivers/platform/x86/intel/vsec.c index f66f0ce8559b..ecfc7703f201 100644 --- a/drivers/platform/x86/intel/vsec.c +++ b/drivers/platform/x86/intel/vsec.c @@ -765,6 +765,7 @@ static const struct intel_vsec_platform_info lnl_info = { #define PCI_DEVICE_ID_INTEL_VSEC_TGL 0x9a0d #define PCI_DEVICE_ID_INTEL_VSEC_LNL_M 0x647d #define PCI_DEVICE_ID_INTEL_VSEC_PTL 0xb07d +#define PCI_DEVICE_ID_INTEL_VSEC_WCL 0xfd7d static const struct pci_device_id intel_vsec_pci_ids[] = { { PCI_DEVICE_DATA(INTEL, VSEC_ADL, &tgl_info) }, { PCI_DEVICE_DATA(INTEL, VSEC_DG1, &dg1_info) }, @@ -776,6 +777,7 @@ static const struct pci_device_id intel_vsec_pci_ids[] = { { PCI_DEVICE_DATA(INTEL, VSEC_TGL, &tgl_info) }, { PCI_DEVICE_DATA(INTEL, VSEC_LNL_M, &lnl_info) }, { PCI_DEVICE_DATA(INTEL, VSEC_PTL, &mtl_info) }, + { PCI_DEVICE_DATA(INTEL, VSEC_WCL, &mtl_info) }, { } }; MODULE_DEVICE_TABLE(pci, intel_vsec_pci_ids); diff --git a/drivers/platform/x86/lenovo/ideapad-laptop.c b/drivers/platform/x86/lenovo/ideapad-laptop.c index fcebfbaf0460..5171a077f62c 100644 --- a/drivers/platform/x86/lenovo/ideapad-laptop.c +++ b/drivers/platform/x86/lenovo/ideapad-laptop.c @@ -31,6 +31,7 @@ #include <linux/power_supply.h> #include <linux/rfkill.h> #include <linux/seq_file.h> +#include <linux/string_choices.h> #include <linux/sysfs.h> #include <linux/types.h> #include <linux/wmi.h> @@ -62,13 +63,27 @@ enum { CFG_OSD_CAM_BIT = 31, }; +/* + * There are two charge modes supported by the GBMD/SBMC interface: + * - "Rapid Charge": increase power to speed up charging + * - "Conservation Mode": stop charging at 60-80% (depends on model) + * + * The interface doesn't prohibit enabling both modes at the same time. + * However, doing so is essentially meaningless, and the manufacturer utilities + * on Windows always make them mutually exclusive. + */ + enum { + GBMD_RAPID_CHARGE_STATE_BIT = 2, GBMD_CONSERVATION_STATE_BIT = 5, + GBMD_RAPID_CHARGE_SUPPORTED_BIT = 17, }; enum { SBMC_CONSERVATION_ON = 3, SBMC_CONSERVATION_OFF = 5, + SBMC_RAPID_CHARGE_ON = 7, + SBMC_RAPID_CHARGE_OFF = 8, }; enum { @@ -158,6 +173,7 @@ struct ideapad_rfk_priv { struct ideapad_private { struct acpi_device *adev; struct mutex vpc_mutex; /* protects the VPC calls */ + struct mutex gbmd_sbmc_mutex; /* protects GBMD/SBMC calls */ struct rfkill *rfk[IDEAPAD_RFKILL_DEV_NUM]; struct ideapad_rfk_priv rfk_priv[IDEAPAD_RFKILL_DEV_NUM]; struct platform_device *platform_device; @@ -166,9 +182,11 @@ struct ideapad_private { struct ideapad_dytc_priv *dytc; struct dentry *debug; struct acpi_battery_hook battery_hook; + const struct power_supply_ext *battery_ext; unsigned long cfg; unsigned long r_touchpad_val; struct { + bool rapid_charge : 1; bool conservation_mode : 1; bool dytc : 1; bool fan_mode : 1; @@ -455,37 +473,40 @@ static int debugfs_status_show(struct seq_file *s, void *data) struct ideapad_private *priv = s->private; unsigned long value; - guard(mutex)(&priv->vpc_mutex); - - if (!read_ec_data(priv->adev->handle, VPCCMD_R_BL_MAX, &value)) - seq_printf(s, "Backlight max: %lu\n", value); - if (!read_ec_data(priv->adev->handle, VPCCMD_R_BL, &value)) - seq_printf(s, "Backlight now: %lu\n", value); - if (!read_ec_data(priv->adev->handle, VPCCMD_R_BL_POWER, &value)) - seq_printf(s, "BL power value: %s (%lu)\n", value ? "on" : "off", value); - - seq_puts(s, "=====================\n"); - - if (!read_ec_data(priv->adev->handle, VPCCMD_R_RF, &value)) - seq_printf(s, "Radio status: %s (%lu)\n", value ? "on" : "off", value); - if (!read_ec_data(priv->adev->handle, VPCCMD_R_WIFI, &value)) - seq_printf(s, "Wifi status: %s (%lu)\n", value ? "on" : "off", value); - if (!read_ec_data(priv->adev->handle, VPCCMD_R_BT, &value)) - seq_printf(s, "BT status: %s (%lu)\n", value ? "on" : "off", value); - if (!read_ec_data(priv->adev->handle, VPCCMD_R_3G, &value)) - seq_printf(s, "3G status: %s (%lu)\n", value ? "on" : "off", value); + scoped_guard(mutex, &priv->vpc_mutex) { + if (!read_ec_data(priv->adev->handle, VPCCMD_R_BL_MAX, &value)) + seq_printf(s, "Backlight max: %lu\n", value); + if (!read_ec_data(priv->adev->handle, VPCCMD_R_BL, &value)) + seq_printf(s, "Backlight now: %lu\n", value); + if (!read_ec_data(priv->adev->handle, VPCCMD_R_BL_POWER, &value)) + seq_printf(s, "BL power value: %s (%lu)\n", str_on_off(value), value); + + seq_puts(s, "=====================\n"); + + if (!read_ec_data(priv->adev->handle, VPCCMD_R_RF, &value)) + seq_printf(s, "Radio status: %s (%lu)\n", str_on_off(value), value); + if (!read_ec_data(priv->adev->handle, VPCCMD_R_WIFI, &value)) + seq_printf(s, "Wifi status: %s (%lu)\n", str_on_off(value), value); + if (!read_ec_data(priv->adev->handle, VPCCMD_R_BT, &value)) + seq_printf(s, "BT status: %s (%lu)\n", str_on_off(value), value); + if (!read_ec_data(priv->adev->handle, VPCCMD_R_3G, &value)) + seq_printf(s, "3G status: %s (%lu)\n", str_on_off(value), value); + + seq_puts(s, "=====================\n"); + + if (!read_ec_data(priv->adev->handle, VPCCMD_R_TOUCHPAD, &value)) + seq_printf(s, "Touchpad status: %s (%lu)\n", str_on_off(value), value); + if (!read_ec_data(priv->adev->handle, VPCCMD_R_CAMERA, &value)) + seq_printf(s, "Camera status: %s (%lu)\n", str_on_off(value), value); + } seq_puts(s, "=====================\n"); - if (!read_ec_data(priv->adev->handle, VPCCMD_R_TOUCHPAD, &value)) - seq_printf(s, "Touchpad status: %s (%lu)\n", value ? "on" : "off", value); - if (!read_ec_data(priv->adev->handle, VPCCMD_R_CAMERA, &value)) - seq_printf(s, "Camera status: %s (%lu)\n", value ? "on" : "off", value); - - seq_puts(s, "=====================\n"); + scoped_guard(mutex, &priv->gbmd_sbmc_mutex) { + if (!eval_gbmd(priv->adev->handle, &value)) + seq_printf(s, "GBMD: %#010lx\n", value); + } - if (!eval_gbmd(priv->adev->handle, &value)) - seq_printf(s, "GBMD: %#010lx\n", value); if (!eval_hals(priv->adev->handle, &value)) seq_printf(s, "HALS: %#010lx\n", value); @@ -622,10 +643,16 @@ static ssize_t conservation_mode_show(struct device *dev, show_conservation_mode_deprecation_warning(dev); - err = eval_gbmd(priv->adev->handle, &result); - if (err) - return err; + scoped_guard(mutex, &priv->gbmd_sbmc_mutex) { + err = eval_gbmd(priv->adev->handle, &result); + if (err) + return err; + } + /* + * For backward compatibility, ignore Rapid Charge while reporting the + * state of Conservation Mode. + */ return sysfs_emit(buf, "%d\n", !!test_bit(GBMD_CONSERVATION_STATE_BIT, &result)); } @@ -643,6 +670,18 @@ static ssize_t conservation_mode_store(struct device *dev, if (err) return err; + guard(mutex)(&priv->gbmd_sbmc_mutex); + + /* + * Prevent mutually exclusive modes from being set at the same time, + * but do not disable Rapid Charge while disabling Conservation Mode. + */ + if (priv->features.rapid_charge && state) { + err = exec_sbmc(priv->adev->handle, SBMC_RAPID_CHARGE_OFF); + if (err) + return err; + } + err = exec_sbmc(priv->adev->handle, state ? SBMC_CONSERVATION_ON : SBMC_CONSERVATION_OFF); if (err) return err; @@ -2007,15 +2046,39 @@ static int ideapad_psy_ext_set_prop(struct power_supply *psy, const union power_supply_propval *val) { struct ideapad_private *priv = ext_data; + unsigned long op1, op2; + int err; switch (val->intval) { + case POWER_SUPPLY_CHARGE_TYPE_FAST: + if (WARN_ON(!priv->features.rapid_charge)) + return -EINVAL; + + op1 = SBMC_CONSERVATION_OFF; + op2 = SBMC_RAPID_CHARGE_ON; + break; case POWER_SUPPLY_CHARGE_TYPE_LONGLIFE: - return exec_sbmc(priv->adev->handle, SBMC_CONSERVATION_ON); + op1 = SBMC_RAPID_CHARGE_OFF; + op2 = SBMC_CONSERVATION_ON; + break; case POWER_SUPPLY_CHARGE_TYPE_STANDARD: - return exec_sbmc(priv->adev->handle, SBMC_CONSERVATION_OFF); + op1 = SBMC_RAPID_CHARGE_OFF; + op2 = SBMC_CONSERVATION_OFF; + break; default: return -EINVAL; } + + guard(mutex)(&priv->gbmd_sbmc_mutex); + + /* If !rapid_charge, op1 must be SBMC_RAPID_CHARGE_OFF. Skip it. */ + if (priv->features.rapid_charge) { + err = exec_sbmc(priv->adev->handle, op1); + if (err) + return err; + } + + return exec_sbmc(priv->adev->handle, op2); } static int ideapad_psy_ext_get_prop(struct power_supply *psy, @@ -2025,14 +2088,29 @@ static int ideapad_psy_ext_get_prop(struct power_supply *psy, union power_supply_propval *val) { struct ideapad_private *priv = ext_data; + bool is_rapid_charge, is_conservation; unsigned long result; int err; - err = eval_gbmd(priv->adev->handle, &result); - if (err) - return err; + scoped_guard(mutex, &priv->gbmd_sbmc_mutex) { + err = eval_gbmd(priv->adev->handle, &result); + if (err) + return err; + } + + is_rapid_charge = (priv->features.rapid_charge && + test_bit(GBMD_RAPID_CHARGE_STATE_BIT, &result)); + is_conservation = test_bit(GBMD_CONSERVATION_STATE_BIT, &result); + + if (unlikely(is_rapid_charge && is_conservation)) { + dev_err(&priv->platform_device->dev, + "unexpected charge_types: both [Fast] and [Long_Life] are enabled\n"); + return -EINVAL; + } - if (test_bit(GBMD_CONSERVATION_STATE_BIT, &result)) + if (is_rapid_charge) + val->intval = POWER_SUPPLY_CHARGE_TYPE_FAST; + else if (is_conservation) val->intval = POWER_SUPPLY_CHARGE_TYPE_LONGLIFE; else val->intval = POWER_SUPPLY_CHARGE_TYPE_STANDARD; @@ -2052,29 +2130,42 @@ static const enum power_supply_property ideapad_power_supply_props[] = { POWER_SUPPLY_PROP_CHARGE_TYPES, }; -static const struct power_supply_ext ideapad_battery_ext = { - .name = "ideapad_laptop", - .properties = ideapad_power_supply_props, - .num_properties = ARRAY_SIZE(ideapad_power_supply_props), - .charge_types = (BIT(POWER_SUPPLY_CHARGE_TYPE_STANDARD) | - BIT(POWER_SUPPLY_CHARGE_TYPE_LONGLIFE)), - .get_property = ideapad_psy_ext_get_prop, - .set_property = ideapad_psy_ext_set_prop, - .property_is_writeable = ideapad_psy_prop_is_writeable, -}; +#define DEFINE_IDEAPAD_POWER_SUPPLY_EXTENSION(_name, _charge_types) \ + static const struct power_supply_ext _name = { \ + .name = "ideapad_laptop", \ + .properties = ideapad_power_supply_props, \ + .num_properties = ARRAY_SIZE(ideapad_power_supply_props), \ + .charge_types = _charge_types, \ + .get_property = ideapad_psy_ext_get_prop, \ + .set_property = ideapad_psy_ext_set_prop, \ + .property_is_writeable = ideapad_psy_prop_is_writeable, \ + } + +DEFINE_IDEAPAD_POWER_SUPPLY_EXTENSION(ideapad_battery_ext_v1, + (BIT(POWER_SUPPLY_CHARGE_TYPE_STANDARD) | + BIT(POWER_SUPPLY_CHARGE_TYPE_LONGLIFE)) +); + +DEFINE_IDEAPAD_POWER_SUPPLY_EXTENSION(ideapad_battery_ext_v2, + (BIT(POWER_SUPPLY_CHARGE_TYPE_STANDARD) | + BIT(POWER_SUPPLY_CHARGE_TYPE_FAST) | + BIT(POWER_SUPPLY_CHARGE_TYPE_LONGLIFE)) +); static int ideapad_battery_add(struct power_supply *battery, struct acpi_battery_hook *hook) { struct ideapad_private *priv = container_of(hook, struct ideapad_private, battery_hook); - return power_supply_register_extension(battery, &ideapad_battery_ext, + return power_supply_register_extension(battery, priv->battery_ext, &priv->platform_device->dev, priv); } static int ideapad_battery_remove(struct power_supply *battery, struct acpi_battery_hook *hook) { - power_supply_unregister_extension(battery, &ideapad_battery_ext); + struct ideapad_private *priv = container_of(hook, struct ideapad_private, battery_hook); + + power_supply_unregister_extension(battery, priv->battery_ext); return 0; } @@ -2099,14 +2190,25 @@ static int ideapad_check_features(struct ideapad_private *priv) priv->features.fan_mode = true; if (acpi_has_method(handle, "GBMD") && acpi_has_method(handle, "SBMC")) { - priv->features.conservation_mode = true; - priv->battery_hook.add_battery = ideapad_battery_add; - priv->battery_hook.remove_battery = ideapad_battery_remove; - priv->battery_hook.name = "Ideapad Battery Extension"; - - err = devm_battery_hook_register(&priv->platform_device->dev, &priv->battery_hook); - if (err) - return err; + /* Not acquiring gbmd_sbmc_mutex as race condition is impossible on init */ + if (!eval_gbmd(handle, &val)) { + priv->features.conservation_mode = true; + priv->features.rapid_charge = test_bit(GBMD_RAPID_CHARGE_SUPPORTED_BIT, + &val); + + priv->battery_ext = priv->features.rapid_charge + ? &ideapad_battery_ext_v2 + : &ideapad_battery_ext_v1; + + priv->battery_hook.add_battery = ideapad_battery_add; + priv->battery_hook.remove_battery = ideapad_battery_remove; + priv->battery_hook.name = "Ideapad Battery Extension"; + + err = devm_battery_hook_register(&priv->platform_device->dev, + &priv->battery_hook); + if (err) + return err; + } } if (acpi_has_method(handle, "DYTC")) @@ -2292,6 +2394,10 @@ static int ideapad_acpi_add(struct platform_device *pdev) if (err) return err; + err = devm_mutex_init(&pdev->dev, &priv->gbmd_sbmc_mutex); + if (err) + return err; + err = ideapad_check_features(priv); if (err) return err; diff --git a/drivers/platform/x86/lenovo/wmi-gamezone.c b/drivers/platform/x86/lenovo/wmi-gamezone.c index 0eb7fe8222f4..381836d29a96 100644 --- a/drivers/platform/x86/lenovo/wmi-gamezone.c +++ b/drivers/platform/x86/lenovo/wmi-gamezone.c @@ -171,14 +171,10 @@ static int lwmi_gz_profile_get(struct device *dev, *profile = PLATFORM_PROFILE_BALANCED; break; case LWMI_GZ_THERMAL_MODE_PERFORMANCE: - if (priv->extreme_supported) { - *profile = PLATFORM_PROFILE_BALANCED_PERFORMANCE; - break; - } *profile = PLATFORM_PROFILE_PERFORMANCE; break; case LWMI_GZ_THERMAL_MODE_EXTREME: - *profile = PLATFORM_PROFILE_PERFORMANCE; + *profile = PLATFORM_PROFILE_MAX_POWER; break; case LWMI_GZ_THERMAL_MODE_CUSTOM: *profile = PLATFORM_PROFILE_CUSTOM; @@ -218,16 +214,12 @@ static int lwmi_gz_profile_set(struct device *dev, case PLATFORM_PROFILE_BALANCED: mode = LWMI_GZ_THERMAL_MODE_BALANCED; break; - case PLATFORM_PROFILE_BALANCED_PERFORMANCE: - mode = LWMI_GZ_THERMAL_MODE_PERFORMANCE; - break; case PLATFORM_PROFILE_PERFORMANCE: - if (priv->extreme_supported) { - mode = LWMI_GZ_THERMAL_MODE_EXTREME; - break; - } mode = LWMI_GZ_THERMAL_MODE_PERFORMANCE; break; + case PLATFORM_PROFILE_MAX_POWER: + mode = LWMI_GZ_THERMAL_MODE_EXTREME; + break; case PLATFORM_PROFILE_CUSTOM: mode = LWMI_GZ_THERMAL_MODE_CUSTOM; break; @@ -274,8 +266,23 @@ static const struct dmi_system_id fwbug_list[] = { }, .driver_data = &quirk_no_extreme_bug, }, + { + .ident = "Legion Go 8ASP2", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "Legion Go 8ASP2"), + }, + .driver_data = &quirk_no_extreme_bug, + }, + { + .ident = "Legion Go 8AHP2", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "Legion Go 8AHP2"), + }, + .driver_data = &quirk_no_extreme_bug, + }, {}, - }; /** @@ -338,7 +345,7 @@ static int lwmi_gz_platform_profile_probe(void *drvdata, unsigned long *choices) priv->extreme_supported = lwmi_gz_extreme_supported(profile_support_ver); if (priv->extreme_supported) - set_bit(PLATFORM_PROFILE_BALANCED_PERFORMANCE, choices); + set_bit(PLATFORM_PROFILE_MAX_POWER, choices); return 0; } diff --git a/drivers/platform/x86/lg-laptop.c b/drivers/platform/x86/lg-laptop.c index 6af6cf477c5b..f92e89c75db9 100644 --- a/drivers/platform/x86/lg-laptop.c +++ b/drivers/platform/x86/lg-laptop.c @@ -19,6 +19,7 @@ #include <linux/leds.h> #include <linux/module.h> #include <linux/platform_device.h> +#include <linux/string_choices.h> #include <linux/types.h> #include <acpi/battery.h> @@ -42,6 +43,7 @@ MODULE_PARM_DESC(fw_debug, "Enable printing of firmware debug messages"); #define LG_ADDRESS_SPACE_ID 0x8F #define LG_ADDRESS_SPACE_DEBUG_FLAG_ADR 0x00 +#define LG_ADDRESS_SPACE_HD_AUDIO_POWER_ADDR 0x01 #define LG_ADDRESS_SPACE_FAN_MODE_ADR 0x03 #define LG_ADDRESS_SPACE_DTTM_FLAG_ADR 0x20 @@ -668,6 +670,15 @@ static acpi_status lg_laptop_address_space_write(struct device *dev, acpi_physic byte = value & 0xFF; switch (address) { + case LG_ADDRESS_SPACE_HD_AUDIO_POWER_ADDR: + /* + * The HD audio power field is not affected by the DTTM flag, + * so we have to manually check fw_debug. + */ + if (fw_debug) + dev_dbg(dev, "HD audio power %s\n", str_enabled_disabled(byte)); + + return AE_OK; case LG_ADDRESS_SPACE_FAN_MODE_ADR: /* * The fan mode field is not affected by the DTTM flag, so we diff --git a/drivers/platform/x86/oxpec.c b/drivers/platform/x86/oxpec.c index 54377b282ff8..144a454103b9 100644 --- a/drivers/platform/x86/oxpec.c +++ b/drivers/platform/x86/oxpec.c @@ -1,8 +1,6 @@ // SPDX-License-Identifier: GPL-2.0+ /* - * Platform driver for OneXPlayer and AOKZOE devices. For the time being, - * it also exposes fan controls for AYANEO, and OrangePi Handhelds via - * hwmon sysfs. + * Platform driver for OneXPlayer and AOKZOE devices. * * Fan control is provided via pwm interface in the range [0-255]. * Old AMD boards use [0-100] as range in the EC, the written value is @@ -43,14 +41,6 @@ static bool unlock_global_acpi_lock(void) enum oxp_board { aok_zoe_a1 = 1, - aya_neo_2, - aya_neo_air, - aya_neo_air_1s, - aya_neo_air_plus_mendo, - aya_neo_air_pro, - aya_neo_flip, - aya_neo_geek, - aya_neo_kun, orange_pi_neo, oxp_2, oxp_fly, @@ -133,62 +123,6 @@ static const struct dmi_system_id dmi_table[] = { }, { .matches = { - DMI_MATCH(DMI_BOARD_VENDOR, "AYANEO"), - DMI_MATCH(DMI_BOARD_NAME, "AYANEO 2"), - }, - .driver_data = (void *)aya_neo_2, - }, - { - .matches = { - DMI_MATCH(DMI_BOARD_VENDOR, "AYANEO"), - DMI_EXACT_MATCH(DMI_BOARD_NAME, "AIR"), - }, - .driver_data = (void *)aya_neo_air, - }, - { - .matches = { - DMI_MATCH(DMI_BOARD_VENDOR, "AYANEO"), - DMI_EXACT_MATCH(DMI_BOARD_NAME, "AIR 1S"), - }, - .driver_data = (void *)aya_neo_air_1s, - }, - { - .matches = { - DMI_MATCH(DMI_BOARD_VENDOR, "AYANEO"), - DMI_EXACT_MATCH(DMI_BOARD_NAME, "AB05-Mendocino"), - }, - .driver_data = (void *)aya_neo_air_plus_mendo, - }, - { - .matches = { - DMI_MATCH(DMI_BOARD_VENDOR, "AYANEO"), - DMI_EXACT_MATCH(DMI_BOARD_NAME, "AIR Pro"), - }, - .driver_data = (void *)aya_neo_air_pro, - }, - { - .matches = { - DMI_MATCH(DMI_BOARD_VENDOR, "AYANEO"), - DMI_MATCH(DMI_BOARD_NAME, "FLIP"), - }, - .driver_data = (void *)aya_neo_flip, - }, - { - .matches = { - DMI_MATCH(DMI_BOARD_VENDOR, "AYANEO"), - DMI_MATCH(DMI_BOARD_NAME, "GEEK"), - }, - .driver_data = (void *)aya_neo_geek, - }, - { - .matches = { - DMI_MATCH(DMI_BOARD_VENDOR, "AYANEO"), - DMI_EXACT_MATCH(DMI_BOARD_NAME, "KUN"), - }, - .driver_data = (void *)aya_neo_kun, - }, - { - .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "OrangePi"), DMI_EXACT_MATCH(DMI_BOARD_NAME, "NEO-01"), }, @@ -672,13 +606,6 @@ static int oxp_pwm_enable(void) case orange_pi_neo: return write_to_ec(ORANGEPI_SENSOR_PWM_ENABLE_REG, PWM_MODE_MANUAL); case aok_zoe_a1: - case aya_neo_2: - case aya_neo_air: - case aya_neo_air_plus_mendo: - case aya_neo_air_pro: - case aya_neo_flip: - case aya_neo_geek: - case aya_neo_kun: case oxp_2: case oxp_fly: case oxp_mini_amd: @@ -699,14 +626,6 @@ static int oxp_pwm_disable(void) case orange_pi_neo: return write_to_ec(ORANGEPI_SENSOR_PWM_ENABLE_REG, PWM_MODE_AUTO); case aok_zoe_a1: - case aya_neo_2: - case aya_neo_air: - case aya_neo_air_1s: - case aya_neo_air_plus_mendo: - case aya_neo_air_pro: - case aya_neo_flip: - case aya_neo_geek: - case aya_neo_kun: case oxp_2: case oxp_fly: case oxp_mini_amd: @@ -727,14 +646,6 @@ static int oxp_pwm_read(long *val) case orange_pi_neo: return read_from_ec(ORANGEPI_SENSOR_PWM_ENABLE_REG, 1, val); case aok_zoe_a1: - case aya_neo_2: - case aya_neo_air: - case aya_neo_air_1s: - case aya_neo_air_plus_mendo: - case aya_neo_air_pro: - case aya_neo_flip: - case aya_neo_geek: - case aya_neo_kun: case oxp_2: case oxp_fly: case oxp_mini_amd: @@ -774,14 +685,6 @@ static int oxp_pwm_fan_speed(long *val) case oxp_g1_i: return read_from_ec(OXP_2_SENSOR_FAN_REG, 2, val); case aok_zoe_a1: - case aya_neo_2: - case aya_neo_air: - case aya_neo_air_1s: - case aya_neo_air_plus_mendo: - case aya_neo_air_pro: - case aya_neo_flip: - case aya_neo_geek: - case aya_neo_kun: case oxp_fly: case oxp_mini_amd: case oxp_mini_amd_a07: @@ -810,14 +713,6 @@ static int oxp_pwm_input_write(long val) /* scale to range [0-184] */ val = (val * 184) / 255; return write_to_ec(OXP_SENSOR_PWM_REG, val); - case aya_neo_2: - case aya_neo_air: - case aya_neo_air_1s: - case aya_neo_air_plus_mendo: - case aya_neo_air_pro: - case aya_neo_flip: - case aya_neo_geek: - case aya_neo_kun: case oxp_mini_amd: case oxp_mini_amd_a07: /* scale to range [0-100] */ @@ -854,14 +749,6 @@ static int oxp_pwm_input_read(long *val) /* scale from range [0-184] */ *val = (*val * 255) / 184; break; - case aya_neo_2: - case aya_neo_air: - case aya_neo_air_1s: - case aya_neo_air_plus_mendo: - case aya_neo_air_pro: - case aya_neo_flip: - case aya_neo_geek: - case aya_neo_kun: case oxp_mini_amd: case oxp_mini_amd_a07: ret = read_from_ec(OXP_SENSOR_PWM_REG, 1, val); diff --git a/drivers/platform/x86/serial-multi-instantiate.c b/drivers/platform/x86/serial-multi-instantiate.c index db030b0f176a..1a369334f9cb 100644 --- a/drivers/platform/x86/serial-multi-instantiate.c +++ b/drivers/platform/x86/serial-multi-instantiate.c @@ -22,6 +22,7 @@ #define IRQ_RESOURCE_GPIO 1 #define IRQ_RESOURCE_APIC 2 #define IRQ_RESOURCE_AUTO 3 +#define IRQ_RESOURCE_OPT BIT(2) enum smi_bus_type { SMI_I2C, @@ -64,6 +65,10 @@ static int smi_get_irq(struct platform_device *pdev, struct acpi_device *adev, dev_dbg(&pdev->dev, "Using platform irq\n"); break; } + if (inst->flags & IRQ_RESOURCE_OPT) { + dev_dbg(&pdev->dev, "No irq\n"); + return 0; + } break; case IRQ_RESOURCE_GPIO: ret = acpi_dev_gpio_irq_get(adev, inst->irq_idx); @@ -386,10 +391,10 @@ static const struct smi_node cs35l57_hda = { static const struct smi_node tas2781_hda = { .instances = { - { "tas2781-hda", IRQ_RESOURCE_AUTO, 0 }, - { "tas2781-hda", IRQ_RESOURCE_AUTO, 0 }, - { "tas2781-hda", IRQ_RESOURCE_AUTO, 0 }, - { "tas2781-hda", IRQ_RESOURCE_AUTO, 0 }, + { "tas2781-hda", IRQ_RESOURCE_AUTO | IRQ_RESOURCE_OPT, 0 }, + { "tas2781-hda", IRQ_RESOURCE_AUTO | IRQ_RESOURCE_OPT, 0 }, + { "tas2781-hda", IRQ_RESOURCE_AUTO | IRQ_RESOURCE_OPT, 0 }, + { "tas2781-hda", IRQ_RESOURCE_AUTO | IRQ_RESOURCE_OPT, 0 }, {} }, .bus_type = SMI_AUTO_DETECT, diff --git a/drivers/platform/x86/uniwill/Kconfig b/drivers/platform/x86/uniwill/Kconfig new file mode 100644 index 000000000000..d07cc8440188 --- /dev/null +++ b/drivers/platform/x86/uniwill/Kconfig @@ -0,0 +1,38 @@ +# SPDX-License-Identifier: GPL-2.0-or-later +# +# Uniwill X86 Platform Specific Drivers +# + +menuconfig X86_PLATFORM_DRIVERS_UNIWILL + bool "Uniwill X86 Platform Specific Device Drivers" + depends on X86_PLATFORM_DEVICES + help + Say Y here to see options for device drivers for various + Uniwill x86 platforms, including many OEM laptops originally + manufactured by Uniwill. + This option alone does not add any kernel code. + + If you say N, all options in this submenu will be skipped and disabled. + +if X86_PLATFORM_DRIVERS_UNIWILL + +config UNIWILL_LAPTOP + tristate "Uniwill Laptop Extras" + default m + depends on ACPI + depends on ACPI_WMI + depends on ACPI_BATTERY + depends on HWMON + depends on INPUT + depends on LEDS_CLASS_MULTICOLOR + depends on DMI + select REGMAP + select INPUT_SPARSEKMAP + help + This driver adds support for various extra features found on Uniwill laptops, + like the lightbar, hwmon sensors and hotkeys. It also supports many OEM laptops + originally manufactured by Uniwill. + + If you have such a laptop, say Y or M here. + +endif diff --git a/drivers/platform/x86/uniwill/Makefile b/drivers/platform/x86/uniwill/Makefile new file mode 100644 index 000000000000..05cd1747a240 --- /dev/null +++ b/drivers/platform/x86/uniwill/Makefile @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: GPL-2.0-or-later +# +# Makefile for linux/drivers/platform/x86/uniwill +# Uniwill X86 Platform Specific Drivers +# + +obj-$(CONFIG_UNIWILL_LAPTOP) += uniwill-laptop.o +uniwill-laptop-y := uniwill-acpi.o uniwill-wmi.o diff --git a/drivers/platform/x86/uniwill/uniwill-acpi.c b/drivers/platform/x86/uniwill/uniwill-acpi.c new file mode 100644 index 000000000000..bd7e63dd5181 --- /dev/null +++ b/drivers/platform/x86/uniwill/uniwill-acpi.c @@ -0,0 +1,1912 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Linux driver for Uniwill notebooks. + * + * Special thanks go to PÅ‘cze BarnabĂ¡s, Christoffer Sandberg and Werner Sembach + * for supporting the development of this driver either through prior work or + * by answering questions regarding the underlying ACPI and WMI interfaces. + * + * Copyright (C) 2025 Armin Wolf <W_Armin@gmx.de> + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/acpi.h> +#include <linux/array_size.h> +#include <linux/bits.h> +#include <linux/bitfield.h> +#include <linux/cleanup.h> +#include <linux/debugfs.h> +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/device/driver.h> +#include <linux/dmi.h> +#include <linux/errno.h> +#include <linux/fixp-arith.h> +#include <linux/hwmon.h> +#include <linux/hwmon-sysfs.h> +#include <linux/init.h> +#include <linux/input.h> +#include <linux/input/sparse-keymap.h> +#include <linux/kernel.h> +#include <linux/kstrtox.h> +#include <linux/leds.h> +#include <linux/led-class-multicolor.h> +#include <linux/limits.h> +#include <linux/list.h> +#include <linux/minmax.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/notifier.h> +#include <linux/platform_device.h> +#include <linux/pm.h> +#include <linux/printk.h> +#include <linux/regmap.h> +#include <linux/string.h> +#include <linux/sysfs.h> +#include <linux/types.h> +#include <linux/units.h> + +#include <acpi/battery.h> + +#include "uniwill-wmi.h" + +#define EC_ADDR_BAT_POWER_UNIT_1 0x0400 + +#define EC_ADDR_BAT_POWER_UNIT_2 0x0401 + +#define EC_ADDR_BAT_DESIGN_CAPACITY_1 0x0402 + +#define EC_ADDR_BAT_DESIGN_CAPACITY_2 0x0403 + +#define EC_ADDR_BAT_FULL_CAPACITY_1 0x0404 + +#define EC_ADDR_BAT_FULL_CAPACITY_2 0x0405 + +#define EC_ADDR_BAT_DESIGN_VOLTAGE_1 0x0408 + +#define EC_ADDR_BAT_DESIGN_VOLTAGE_2 0x0409 + +#define EC_ADDR_BAT_STATUS_1 0x0432 +#define BAT_DISCHARGING BIT(0) + +#define EC_ADDR_BAT_STATUS_2 0x0433 + +#define EC_ADDR_BAT_CURRENT_1 0x0434 + +#define EC_ADDR_BAT_CURRENT_2 0x0435 + +#define EC_ADDR_BAT_REMAIN_CAPACITY_1 0x0436 + +#define EC_ADDR_BAT_REMAIN_CAPACITY_2 0x0437 + +#define EC_ADDR_BAT_VOLTAGE_1 0x0438 + +#define EC_ADDR_BAT_VOLTAGE_2 0x0439 + +#define EC_ADDR_CPU_TEMP 0x043E + +#define EC_ADDR_GPU_TEMP 0x044F + +#define EC_ADDR_MAIN_FAN_RPM_1 0x0464 + +#define EC_ADDR_MAIN_FAN_RPM_2 0x0465 + +#define EC_ADDR_SECOND_FAN_RPM_1 0x046C + +#define EC_ADDR_SECOND_FAN_RPM_2 0x046D + +#define EC_ADDR_DEVICE_STATUS 0x047B +#define WIFI_STATUS_ON BIT(7) +/* BIT(5) is also unset depending on the rfkill state (bluetooth?) */ + +#define EC_ADDR_BAT_ALERT 0x0494 + +#define EC_ADDR_BAT_CYCLE_COUNT_1 0x04A6 + +#define EC_ADDR_BAT_CYCLE_COUNT_2 0x04A7 + +#define EC_ADDR_PROJECT_ID 0x0740 + +#define EC_ADDR_AP_OEM 0x0741 +#define ENABLE_MANUAL_CTRL BIT(0) +#define ITE_KBD_EFFECT_REACTIVE BIT(3) +#define FAN_ABNORMAL BIT(5) + +#define EC_ADDR_SUPPORT_5 0x0742 +#define FAN_TURBO_SUPPORTED BIT(4) +#define FAN_SUPPORT BIT(5) + +#define EC_ADDR_CTGP_DB_CTRL 0x0743 +#define CTGP_DB_GENERAL_ENABLE BIT(0) +#define CTGP_DB_DB_ENABLE BIT(1) +#define CTGP_DB_CTGP_ENABLE BIT(2) + +#define EC_ADDR_CTGP_OFFSET 0x0744 + +#define EC_ADDR_TPP_OFFSET 0x0745 + +#define EC_ADDR_MAX_TGP 0x0746 + +#define EC_ADDR_LIGHTBAR_AC_CTRL 0x0748 +#define LIGHTBAR_APP_EXISTS BIT(0) +#define LIGHTBAR_POWER_SAVE BIT(1) +#define LIGHTBAR_S0_OFF BIT(2) +#define LIGHTBAR_S3_OFF BIT(3) // Breathing animation when suspended +#define LIGHTBAR_WELCOME BIT(7) // Rainbow animation + +#define EC_ADDR_LIGHTBAR_AC_RED 0x0749 + +#define EC_ADDR_LIGHTBAR_AC_GREEN 0x074A + +#define EC_ADDR_LIGHTBAR_AC_BLUE 0x074B + +#define EC_ADDR_BIOS_OEM 0x074E +#define FN_LOCK_STATUS BIT(4) + +#define EC_ADDR_MANUAL_FAN_CTRL 0x0751 +#define FAN_LEVEL_MASK GENMASK(2, 0) +#define FAN_MODE_TURBO BIT(4) +#define FAN_MODE_HIGH BIT(5) +#define FAN_MODE_BOOST BIT(6) +#define FAN_MODE_USER BIT(7) + +#define EC_ADDR_PWM_1 0x075B + +#define EC_ADDR_PWM_2 0x075C + +/* Unreliable */ +#define EC_ADDR_SUPPORT_1 0x0765 +#define AIRPLANE_MODE BIT(0) +#define GPS_SWITCH BIT(1) +#define OVERCLOCK BIT(2) +#define MACRO_KEY BIT(3) +#define SHORTCUT_KEY BIT(4) +#define SUPER_KEY_LOCK BIT(5) +#define LIGHTBAR BIT(6) +#define FAN_BOOST BIT(7) + +#define EC_ADDR_SUPPORT_2 0x0766 +#define SILENT_MODE BIT(0) +#define USB_CHARGING BIT(1) +#define RGB_KEYBOARD BIT(2) +#define CHINA_MODE BIT(5) +#define MY_BATTERY BIT(6) + +#define EC_ADDR_TRIGGER 0x0767 +#define TRIGGER_SUPER_KEY_LOCK BIT(0) +#define TRIGGER_LIGHTBAR BIT(1) +#define TRIGGER_FAN_BOOST BIT(2) +#define TRIGGER_SILENT_MODE BIT(3) +#define TRIGGER_USB_CHARGING BIT(4) +#define RGB_APPLY_COLOR BIT(5) +#define RGB_LOGO_EFFECT BIT(6) +#define RGB_RAINBOW_EFFECT BIT(7) + +#define EC_ADDR_SWITCH_STATUS 0x0768 +#define SUPER_KEY_LOCK_STATUS BIT(0) +#define LIGHTBAR_STATUS BIT(1) +#define FAN_BOOST_STATUS BIT(2) +#define MACRO_KEY_STATUS BIT(3) +#define MY_BAT_POWER_BAT_STATUS BIT(4) + +#define EC_ADDR_RGB_RED 0x0769 + +#define EC_ADDR_RGB_GREEN 0x076A + +#define EC_ADDR_RGB_BLUE 0x076B + +#define EC_ADDR_ROMID_START 0x0770 +#define ROMID_LENGTH 14 + +#define EC_ADDR_ROMID_EXTRA_1 0x077E + +#define EC_ADDR_ROMID_EXTRA_2 0x077F + +#define EC_ADDR_BIOS_OEM_2 0x0782 +#define FAN_V2_NEW BIT(0) +#define FAN_QKEY BIT(1) +#define FAN_TABLE_OFFICE_MODE BIT(2) +#define FAN_V3 BIT(3) +#define DEFAULT_MODE BIT(4) + +#define EC_ADDR_PL1_SETTING 0x0783 + +#define EC_ADDR_PL2_SETTING 0x0784 + +#define EC_ADDR_PL4_SETTING 0x0785 + +#define EC_ADDR_FAN_DEFAULT 0x0786 +#define FAN_CURVE_LENGTH 5 + +#define EC_ADDR_KBD_STATUS 0x078C +#define KBD_WHITE_ONLY BIT(0) // ~single color +#define KBD_SINGLE_COLOR_OFF BIT(1) +#define KBD_TURBO_LEVEL_MASK GENMASK(3, 2) +#define KBD_APPLY BIT(4) +#define KBD_BRIGHTNESS GENMASK(7, 5) + +#define EC_ADDR_FAN_CTRL 0x078E +#define FAN3P5 BIT(1) +#define CHARGING_PROFILE BIT(3) +#define UNIVERSAL_FAN_CTRL BIT(6) + +#define EC_ADDR_BIOS_OEM_3 0x07A3 +#define FAN_REDUCED_DURY_CYCLE BIT(5) +#define FAN_ALWAYS_ON BIT(6) + +#define EC_ADDR_BIOS_BYTE 0x07A4 +#define FN_LOCK_SWITCH BIT(3) + +#define EC_ADDR_OEM_3 0x07A5 +#define POWER_LED_MASK GENMASK(1, 0) +#define POWER_LED_LEFT 0x00 +#define POWER_LED_BOTH 0x01 +#define POWER_LED_NONE 0x02 +#define FAN_QUIET BIT(2) +#define OVERBOOST BIT(4) +#define HIGH_POWER BIT(7) + +#define EC_ADDR_OEM_4 0x07A6 +#define OVERBOOST_DYN_TEMP_OFF BIT(1) +#define TOUCHPAD_TOGGLE_OFF BIT(6) + +#define EC_ADDR_CHARGE_CTRL 0x07B9 +#define CHARGE_CTRL_MASK GENMASK(6, 0) +#define CHARGE_CTRL_REACHED BIT(7) + +#define EC_ADDR_UNIVERSAL_FAN_CTRL 0x07C5 +#define SPLIT_TABLES BIT(7) + +#define EC_ADDR_AP_OEM_6 0x07C6 +#define ENABLE_UNIVERSAL_FAN_CTRL BIT(2) +#define BATTERY_CHARGE_FULL_OVER_24H BIT(3) +#define BATTERY_ERM_STATUS_REACHED BIT(4) + +#define EC_ADDR_CHARGE_PRIO 0x07CC +#define CHARGING_PERFORMANCE BIT(7) + +/* Same bits as EC_ADDR_LIGHTBAR_AC_CTRL except LIGHTBAR_S3_OFF */ +#define EC_ADDR_LIGHTBAR_BAT_CTRL 0x07E2 + +#define EC_ADDR_LIGHTBAR_BAT_RED 0x07E3 + +#define EC_ADDR_LIGHTBAR_BAT_GREEN 0x07E4 + +#define EC_ADDR_LIGHTBAR_BAT_BLUE 0x07E5 + +#define EC_ADDR_CPU_TEMP_END_TABLE 0x0F00 + +#define EC_ADDR_CPU_TEMP_START_TABLE 0x0F10 + +#define EC_ADDR_CPU_FAN_SPEED_TABLE 0x0F20 + +#define EC_ADDR_GPU_TEMP_END_TABLE 0x0F30 + +#define EC_ADDR_GPU_TEMP_START_TABLE 0x0F40 + +#define EC_ADDR_GPU_FAN_SPEED_TABLE 0x0F50 + +/* + * Those two registers technically allow for manual fan control, + * but are unstable on some models and are likely not meant to + * be used by applications as they are only accessible when using + * the WMI interface. + */ +#define EC_ADDR_PWM_1_WRITEABLE 0x1804 + +#define EC_ADDR_PWM_2_WRITEABLE 0x1809 + +#define DRIVER_NAME "uniwill" + +/* + * The OEM software always sleeps up to 6 ms after reading/writing EC + * registers, so we emulate this behaviour for maximum compatibility. + */ +#define UNIWILL_EC_DELAY_US 6000 + +#define PWM_MAX 200 +#define FAN_TABLE_LENGTH 16 + +#define LED_CHANNELS 3 +#define LED_MAX_BRIGHTNESS 200 + +#define UNIWILL_FEATURE_FN_LOCK_TOGGLE BIT(0) +#define UNIWILL_FEATURE_SUPER_KEY_TOGGLE BIT(1) +#define UNIWILL_FEATURE_TOUCHPAD_TOGGLE BIT(2) +#define UNIWILL_FEATURE_LIGHTBAR BIT(3) +#define UNIWILL_FEATURE_BATTERY BIT(4) +#define UNIWILL_FEATURE_HWMON BIT(5) + +struct uniwill_data { + struct device *dev; + acpi_handle handle; + struct regmap *regmap; + struct acpi_battery_hook hook; + unsigned int last_charge_ctrl; + struct mutex battery_lock; /* Protects the list of currently registered batteries */ + unsigned int last_switch_status; + struct mutex super_key_lock; /* Protects the toggling of the super key lock state */ + struct list_head batteries; + struct mutex led_lock; /* Protects writes to the lightbar registers */ + struct led_classdev_mc led_mc_cdev; + struct mc_subled led_mc_subled_info[LED_CHANNELS]; + struct mutex input_lock; /* Protects input sequence during notify */ + struct input_dev *input_device; + struct notifier_block nb; +}; + +struct uniwill_battery_entry { + struct list_head head; + struct power_supply *battery; +}; + +static bool force; +module_param_unsafe(force, bool, 0); +MODULE_PARM_DESC(force, "Force loading without checking for supported devices\n"); + +/* Feature bitmask since the associated registers are not reliable */ +static unsigned int supported_features; + +static const char * const uniwill_temp_labels[] = { + "CPU", + "GPU", +}; + +static const char * const uniwill_fan_labels[] = { + "Main", + "Secondary", +}; + +static const struct key_entry uniwill_keymap[] = { + /* Reported via keyboard controller */ + { KE_IGNORE, UNIWILL_OSD_CAPSLOCK, { KEY_CAPSLOCK }}, + { KE_IGNORE, UNIWILL_OSD_NUMLOCK, { KEY_NUMLOCK }}, + + /* Reported when the user locks/unlocks the super key */ + { KE_IGNORE, UNIWILL_OSD_SUPER_KEY_LOCK_ENABLE, { KEY_UNKNOWN }}, + { KE_IGNORE, UNIWILL_OSD_SUPER_KEY_LOCK_DISABLE, { KEY_UNKNOWN }}, + /* Optional, might not be reported by all devices */ + { KE_IGNORE, UNIWILL_OSD_SUPER_KEY_LOCK_CHANGED, { KEY_UNKNOWN }}, + + /* Reported in manual mode when toggling the airplane mode status */ + { KE_KEY, UNIWILL_OSD_RFKILL, { KEY_RFKILL }}, + { KE_IGNORE, UNIWILL_OSD_RADIOON, { KEY_UNKNOWN }}, + { KE_IGNORE, UNIWILL_OSD_RADIOOFF, { KEY_UNKNOWN }}, + + /* Reported when user wants to cycle the platform profile */ + { KE_KEY, UNIWILL_OSD_PERFORMANCE_MODE_TOGGLE, { KEY_F14 }}, + + /* Reported when the user wants to adjust the brightness of the keyboard */ + { KE_KEY, UNIWILL_OSD_KBDILLUMDOWN, { KEY_KBDILLUMDOWN }}, + { KE_KEY, UNIWILL_OSD_KBDILLUMUP, { KEY_KBDILLUMUP }}, + + /* Reported when the user wants to toggle the microphone mute status */ + { KE_KEY, UNIWILL_OSD_MIC_MUTE, { KEY_MICMUTE }}, + + /* Reported when the user wants to toggle the mute status */ + { KE_IGNORE, UNIWILL_OSD_MUTE, { KEY_MUTE }}, + + /* Reported when the user locks/unlocks the Fn key */ + { KE_IGNORE, UNIWILL_OSD_FN_LOCK, { KEY_FN_ESC }}, + + /* Reported when the user wants to toggle the brightness of the keyboard */ + { KE_KEY, UNIWILL_OSD_KBDILLUMTOGGLE, { KEY_KBDILLUMTOGGLE }}, + { KE_KEY, UNIWILL_OSD_KB_LED_LEVEL0, { KEY_KBDILLUMTOGGLE }}, + { KE_KEY, UNIWILL_OSD_KB_LED_LEVEL1, { KEY_KBDILLUMTOGGLE }}, + { KE_KEY, UNIWILL_OSD_KB_LED_LEVEL2, { KEY_KBDILLUMTOGGLE }}, + { KE_KEY, UNIWILL_OSD_KB_LED_LEVEL3, { KEY_KBDILLUMTOGGLE }}, + { KE_KEY, UNIWILL_OSD_KB_LED_LEVEL4, { KEY_KBDILLUMTOGGLE }}, + + /* FIXME: find out the exact meaning of those events */ + { KE_IGNORE, UNIWILL_OSD_BAT_CHARGE_FULL_24_H, { KEY_UNKNOWN }}, + { KE_IGNORE, UNIWILL_OSD_BAT_ERM_UPDATE, { KEY_UNKNOWN }}, + + /* Reported when the user wants to toggle the benchmark mode status */ + { KE_IGNORE, UNIWILL_OSD_BENCHMARK_MODE_TOGGLE, { KEY_UNKNOWN }}, + + /* Reported when the user wants to toggle the webcam */ + { KE_IGNORE, UNIWILL_OSD_WEBCAM_TOGGLE, { KEY_UNKNOWN }}, + + { KE_END } +}; + +static int uniwill_ec_reg_write(void *context, unsigned int reg, unsigned int val) +{ + union acpi_object params[2] = { + { + .integer = { + .type = ACPI_TYPE_INTEGER, + .value = reg, + }, + }, + { + .integer = { + .type = ACPI_TYPE_INTEGER, + .value = val, + }, + }, + }; + struct uniwill_data *data = context; + struct acpi_object_list input = { + .count = ARRAY_SIZE(params), + .pointer = params, + }; + acpi_status status; + + status = acpi_evaluate_object(data->handle, "ECRW", &input, NULL); + if (ACPI_FAILURE(status)) + return -EIO; + + usleep_range(UNIWILL_EC_DELAY_US, UNIWILL_EC_DELAY_US * 2); + + return 0; +} + +static int uniwill_ec_reg_read(void *context, unsigned int reg, unsigned int *val) +{ + union acpi_object params[1] = { + { + .integer = { + .type = ACPI_TYPE_INTEGER, + .value = reg, + }, + }, + }; + struct uniwill_data *data = context; + struct acpi_object_list input = { + .count = ARRAY_SIZE(params), + .pointer = params, + }; + unsigned long long output; + acpi_status status; + + status = acpi_evaluate_integer(data->handle, "ECRR", &input, &output); + if (ACPI_FAILURE(status)) + return -EIO; + + if (output > U8_MAX) + return -ENXIO; + + usleep_range(UNIWILL_EC_DELAY_US, UNIWILL_EC_DELAY_US * 2); + + *val = output; + + return 0; +} + +static const struct regmap_bus uniwill_ec_bus = { + .reg_write = uniwill_ec_reg_write, + .reg_read = uniwill_ec_reg_read, + .reg_format_endian_default = REGMAP_ENDIAN_LITTLE, + .val_format_endian_default = REGMAP_ENDIAN_LITTLE, +}; + +static bool uniwill_writeable_reg(struct device *dev, unsigned int reg) +{ + switch (reg) { + case EC_ADDR_AP_OEM: + case EC_ADDR_LIGHTBAR_AC_CTRL: + case EC_ADDR_LIGHTBAR_AC_RED: + case EC_ADDR_LIGHTBAR_AC_GREEN: + case EC_ADDR_LIGHTBAR_AC_BLUE: + case EC_ADDR_BIOS_OEM: + case EC_ADDR_TRIGGER: + case EC_ADDR_OEM_4: + case EC_ADDR_CHARGE_CTRL: + case EC_ADDR_LIGHTBAR_BAT_CTRL: + case EC_ADDR_LIGHTBAR_BAT_RED: + case EC_ADDR_LIGHTBAR_BAT_GREEN: + case EC_ADDR_LIGHTBAR_BAT_BLUE: + return true; + default: + return false; + } +} + +static bool uniwill_readable_reg(struct device *dev, unsigned int reg) +{ + switch (reg) { + case EC_ADDR_CPU_TEMP: + case EC_ADDR_GPU_TEMP: + case EC_ADDR_MAIN_FAN_RPM_1: + case EC_ADDR_MAIN_FAN_RPM_2: + case EC_ADDR_SECOND_FAN_RPM_1: + case EC_ADDR_SECOND_FAN_RPM_2: + case EC_ADDR_BAT_ALERT: + case EC_ADDR_PROJECT_ID: + case EC_ADDR_AP_OEM: + case EC_ADDR_LIGHTBAR_AC_CTRL: + case EC_ADDR_LIGHTBAR_AC_RED: + case EC_ADDR_LIGHTBAR_AC_GREEN: + case EC_ADDR_LIGHTBAR_AC_BLUE: + case EC_ADDR_BIOS_OEM: + case EC_ADDR_PWM_1: + case EC_ADDR_PWM_2: + case EC_ADDR_TRIGGER: + case EC_ADDR_SWITCH_STATUS: + case EC_ADDR_OEM_4: + case EC_ADDR_CHARGE_CTRL: + case EC_ADDR_LIGHTBAR_BAT_CTRL: + case EC_ADDR_LIGHTBAR_BAT_RED: + case EC_ADDR_LIGHTBAR_BAT_GREEN: + case EC_ADDR_LIGHTBAR_BAT_BLUE: + return true; + default: + return false; + } +} + +static bool uniwill_volatile_reg(struct device *dev, unsigned int reg) +{ + switch (reg) { + case EC_ADDR_CPU_TEMP: + case EC_ADDR_GPU_TEMP: + case EC_ADDR_MAIN_FAN_RPM_1: + case EC_ADDR_MAIN_FAN_RPM_2: + case EC_ADDR_SECOND_FAN_RPM_1: + case EC_ADDR_SECOND_FAN_RPM_2: + case EC_ADDR_BAT_ALERT: + case EC_ADDR_PWM_1: + case EC_ADDR_PWM_2: + case EC_ADDR_TRIGGER: + case EC_ADDR_SWITCH_STATUS: + case EC_ADDR_CHARGE_CTRL: + return true; + default: + return false; + } +} + +static const struct regmap_config uniwill_ec_config = { + .reg_bits = 16, + .val_bits = 8, + .writeable_reg = uniwill_writeable_reg, + .readable_reg = uniwill_readable_reg, + .volatile_reg = uniwill_volatile_reg, + .can_sleep = true, + .max_register = 0xFFF, + .cache_type = REGCACHE_MAPLE, + .use_single_read = true, + .use_single_write = true, +}; + +static ssize_t fn_lock_toggle_enable_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct uniwill_data *data = dev_get_drvdata(dev); + unsigned int value; + bool enable; + int ret; + + ret = kstrtobool(buf, &enable); + if (ret < 0) + return ret; + + if (enable) + value = FN_LOCK_STATUS; + else + value = 0; + + ret = regmap_update_bits(data->regmap, EC_ADDR_BIOS_OEM, FN_LOCK_STATUS, value); + if (ret < 0) + return ret; + + return count; +} + +static ssize_t fn_lock_toggle_enable_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct uniwill_data *data = dev_get_drvdata(dev); + unsigned int value; + int ret; + + ret = regmap_read(data->regmap, EC_ADDR_BIOS_OEM, &value); + if (ret < 0) + return ret; + + return sysfs_emit(buf, "%d\n", !!(value & FN_LOCK_STATUS)); +} + +static DEVICE_ATTR_RW(fn_lock_toggle_enable); + +static ssize_t super_key_toggle_enable_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct uniwill_data *data = dev_get_drvdata(dev); + unsigned int value; + bool enable; + int ret; + + ret = kstrtobool(buf, &enable); + if (ret < 0) + return ret; + + guard(mutex)(&data->super_key_lock); + + ret = regmap_read(data->regmap, EC_ADDR_SWITCH_STATUS, &value); + if (ret < 0) + return ret; + + /* + * We can only toggle the super key lock, so we return early if the setting + * is already in the correct state. + */ + if (enable == !(value & SUPER_KEY_LOCK_STATUS)) + return count; + + ret = regmap_write_bits(data->regmap, EC_ADDR_TRIGGER, TRIGGER_SUPER_KEY_LOCK, + TRIGGER_SUPER_KEY_LOCK); + if (ret < 0) + return ret; + + return count; +} + +static ssize_t super_key_toggle_enable_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct uniwill_data *data = dev_get_drvdata(dev); + unsigned int value; + int ret; + + ret = regmap_read(data->regmap, EC_ADDR_SWITCH_STATUS, &value); + if (ret < 0) + return ret; + + return sysfs_emit(buf, "%d\n", !(value & SUPER_KEY_LOCK_STATUS)); +} + +static DEVICE_ATTR_RW(super_key_toggle_enable); + +static ssize_t touchpad_toggle_enable_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct uniwill_data *data = dev_get_drvdata(dev); + unsigned int value; + bool enable; + int ret; + + ret = kstrtobool(buf, &enable); + if (ret < 0) + return ret; + + if (enable) + value = 0; + else + value = TOUCHPAD_TOGGLE_OFF; + + ret = regmap_update_bits(data->regmap, EC_ADDR_OEM_4, TOUCHPAD_TOGGLE_OFF, value); + if (ret < 0) + return ret; + + return count; +} + +static ssize_t touchpad_toggle_enable_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct uniwill_data *data = dev_get_drvdata(dev); + unsigned int value; + int ret; + + ret = regmap_read(data->regmap, EC_ADDR_OEM_4, &value); + if (ret < 0) + return ret; + + return sysfs_emit(buf, "%d\n", !(value & TOUCHPAD_TOGGLE_OFF)); +} + +static DEVICE_ATTR_RW(touchpad_toggle_enable); + +static ssize_t rainbow_animation_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct uniwill_data *data = dev_get_drvdata(dev); + unsigned int value; + bool enable; + int ret; + + ret = kstrtobool(buf, &enable); + if (ret < 0) + return ret; + + if (enable) + value = LIGHTBAR_WELCOME; + else + value = 0; + + guard(mutex)(&data->led_lock); + + ret = regmap_update_bits(data->regmap, EC_ADDR_LIGHTBAR_AC_CTRL, LIGHTBAR_WELCOME, value); + if (ret < 0) + return ret; + + ret = regmap_update_bits(data->regmap, EC_ADDR_LIGHTBAR_BAT_CTRL, LIGHTBAR_WELCOME, value); + if (ret < 0) + return ret; + + return count; +} + +static ssize_t rainbow_animation_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct uniwill_data *data = dev_get_drvdata(dev); + unsigned int value; + int ret; + + ret = regmap_read(data->regmap, EC_ADDR_LIGHTBAR_AC_CTRL, &value); + if (ret < 0) + return ret; + + return sysfs_emit(buf, "%d\n", !!(value & LIGHTBAR_WELCOME)); +} + +static DEVICE_ATTR_RW(rainbow_animation); + +static ssize_t breathing_in_suspend_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + struct uniwill_data *data = dev_get_drvdata(dev); + unsigned int value; + bool enable; + int ret; + + ret = kstrtobool(buf, &enable); + if (ret < 0) + return ret; + + if (enable) + value = 0; + else + value = LIGHTBAR_S3_OFF; + + /* We only access a single register here, so we do not need to use data->led_lock */ + ret = regmap_update_bits(data->regmap, EC_ADDR_LIGHTBAR_AC_CTRL, LIGHTBAR_S3_OFF, value); + if (ret < 0) + return ret; + + return count; +} + +static ssize_t breathing_in_suspend_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct uniwill_data *data = dev_get_drvdata(dev); + unsigned int value; + int ret; + + ret = regmap_read(data->regmap, EC_ADDR_LIGHTBAR_AC_CTRL, &value); + if (ret < 0) + return ret; + + return sysfs_emit(buf, "%d\n", !(value & LIGHTBAR_S3_OFF)); +} + +static DEVICE_ATTR_RW(breathing_in_suspend); + +static struct attribute *uniwill_attrs[] = { + /* Keyboard-related */ + &dev_attr_fn_lock_toggle_enable.attr, + &dev_attr_super_key_toggle_enable.attr, + &dev_attr_touchpad_toggle_enable.attr, + /* Lightbar-related */ + &dev_attr_rainbow_animation.attr, + &dev_attr_breathing_in_suspend.attr, + NULL +}; + +static umode_t uniwill_attr_is_visible(struct kobject *kobj, struct attribute *attr, int n) +{ + if (attr == &dev_attr_fn_lock_toggle_enable.attr) { + if (supported_features & UNIWILL_FEATURE_FN_LOCK_TOGGLE) + return attr->mode; + } + + if (attr == &dev_attr_super_key_toggle_enable.attr) { + if (supported_features & UNIWILL_FEATURE_SUPER_KEY_TOGGLE) + return attr->mode; + } + + if (attr == &dev_attr_touchpad_toggle_enable.attr) { + if (supported_features & UNIWILL_FEATURE_TOUCHPAD_TOGGLE) + return attr->mode; + } + + if (attr == &dev_attr_rainbow_animation.attr || + attr == &dev_attr_breathing_in_suspend.attr) { + if (supported_features & UNIWILL_FEATURE_LIGHTBAR) + return attr->mode; + } + + return 0; +} + +static const struct attribute_group uniwill_group = { + .is_visible = uniwill_attr_is_visible, + .attrs = uniwill_attrs, +}; + +static const struct attribute_group *uniwill_groups[] = { + &uniwill_group, + NULL +}; + +static int uniwill_read(struct device *dev, enum hwmon_sensor_types type, u32 attr, int channel, + long *val) +{ + struct uniwill_data *data = dev_get_drvdata(dev); + unsigned int value; + __be16 rpm; + int ret; + + switch (type) { + case hwmon_temp: + switch (channel) { + case 0: + ret = regmap_read(data->regmap, EC_ADDR_CPU_TEMP, &value); + break; + case 1: + ret = regmap_read(data->regmap, EC_ADDR_GPU_TEMP, &value); + break; + default: + return -EOPNOTSUPP; + } + + if (ret < 0) + return ret; + + *val = value * MILLIDEGREE_PER_DEGREE; + return 0; + case hwmon_fan: + switch (channel) { + case 0: + ret = regmap_bulk_read(data->regmap, EC_ADDR_MAIN_FAN_RPM_1, &rpm, + sizeof(rpm)); + break; + case 1: + ret = regmap_bulk_read(data->regmap, EC_ADDR_SECOND_FAN_RPM_1, &rpm, + sizeof(rpm)); + break; + default: + return -EOPNOTSUPP; + } + + if (ret < 0) + return ret; + + *val = be16_to_cpu(rpm); + return 0; + case hwmon_pwm: + switch (channel) { + case 0: + ret = regmap_read(data->regmap, EC_ADDR_PWM_1, &value); + break; + case 1: + ret = regmap_read(data->regmap, EC_ADDR_PWM_2, &value); + break; + default: + return -EOPNOTSUPP; + } + + if (ret < 0) + return ret; + + *val = fixp_linear_interpolate(0, 0, PWM_MAX, U8_MAX, value); + return 0; + default: + return -EOPNOTSUPP; + } +} + +static int uniwill_read_string(struct device *dev, enum hwmon_sensor_types type, u32 attr, + int channel, const char **str) +{ + switch (type) { + case hwmon_temp: + *str = uniwill_temp_labels[channel]; + return 0; + case hwmon_fan: + *str = uniwill_fan_labels[channel]; + return 0; + default: + return -EOPNOTSUPP; + } +} + +static const struct hwmon_ops uniwill_ops = { + .visible = 0444, + .read = uniwill_read, + .read_string = uniwill_read_string, +}; + +static const struct hwmon_channel_info * const uniwill_info[] = { + HWMON_CHANNEL_INFO(chip, HWMON_C_REGISTER_TZ), + HWMON_CHANNEL_INFO(temp, + HWMON_T_INPUT | HWMON_T_LABEL, + HWMON_T_INPUT | HWMON_T_LABEL), + HWMON_CHANNEL_INFO(fan, + HWMON_F_INPUT | HWMON_F_LABEL, + HWMON_F_INPUT | HWMON_F_LABEL), + HWMON_CHANNEL_INFO(pwm, + HWMON_PWM_INPUT, + HWMON_PWM_INPUT), + NULL +}; + +static const struct hwmon_chip_info uniwill_chip_info = { + .ops = &uniwill_ops, + .info = uniwill_info, +}; + +static int uniwill_hwmon_init(struct uniwill_data *data) +{ + struct device *hdev; + + if (!(supported_features & UNIWILL_FEATURE_HWMON)) + return 0; + + hdev = devm_hwmon_device_register_with_info(data->dev, "uniwill", data, + &uniwill_chip_info, NULL); + + return PTR_ERR_OR_ZERO(hdev); +} + +static const unsigned int uniwill_led_channel_to_bat_reg[LED_CHANNELS] = { + EC_ADDR_LIGHTBAR_BAT_RED, + EC_ADDR_LIGHTBAR_BAT_GREEN, + EC_ADDR_LIGHTBAR_BAT_BLUE, +}; + +static const unsigned int uniwill_led_channel_to_ac_reg[LED_CHANNELS] = { + EC_ADDR_LIGHTBAR_AC_RED, + EC_ADDR_LIGHTBAR_AC_GREEN, + EC_ADDR_LIGHTBAR_AC_BLUE, +}; + +static int uniwill_led_brightness_set(struct led_classdev *led_cdev, enum led_brightness brightness) +{ + struct led_classdev_mc *led_mc_cdev = lcdev_to_mccdev(led_cdev); + struct uniwill_data *data = container_of(led_mc_cdev, struct uniwill_data, led_mc_cdev); + unsigned int value; + int ret; + + ret = led_mc_calc_color_components(led_mc_cdev, brightness); + if (ret < 0) + return ret; + + guard(mutex)(&data->led_lock); + + for (int i = 0; i < LED_CHANNELS; i++) { + /* Prevent the brightness values from overflowing */ + value = min(LED_MAX_BRIGHTNESS, data->led_mc_subled_info[i].brightness); + ret = regmap_write(data->regmap, uniwill_led_channel_to_ac_reg[i], value); + if (ret < 0) + return ret; + + ret = regmap_write(data->regmap, uniwill_led_channel_to_bat_reg[i], value); + if (ret < 0) + return ret; + } + + if (brightness) + value = 0; + else + value = LIGHTBAR_S0_OFF; + + ret = regmap_update_bits(data->regmap, EC_ADDR_LIGHTBAR_AC_CTRL, LIGHTBAR_S0_OFF, value); + if (ret < 0) + return ret; + + return regmap_update_bits(data->regmap, EC_ADDR_LIGHTBAR_BAT_CTRL, LIGHTBAR_S0_OFF, value); +} + +#define LIGHTBAR_MASK (LIGHTBAR_APP_EXISTS | LIGHTBAR_S0_OFF | LIGHTBAR_S3_OFF | LIGHTBAR_WELCOME) + +static int uniwill_led_init(struct uniwill_data *data) +{ + struct led_init_data init_data = { + .devicename = DRIVER_NAME, + .default_label = "multicolor:" LED_FUNCTION_STATUS, + .devname_mandatory = true, + }; + unsigned int color_indices[3] = { + LED_COLOR_ID_RED, + LED_COLOR_ID_GREEN, + LED_COLOR_ID_BLUE, + }; + unsigned int value; + int ret; + + if (!(supported_features & UNIWILL_FEATURE_LIGHTBAR)) + return 0; + + ret = devm_mutex_init(data->dev, &data->led_lock); + if (ret < 0) + return ret; + + /* + * The EC has separate lightbar settings for AC and battery mode, + * so we have to ensure that both settings are the same. + */ + ret = regmap_read(data->regmap, EC_ADDR_LIGHTBAR_AC_CTRL, &value); + if (ret < 0) + return ret; + + value |= LIGHTBAR_APP_EXISTS; + ret = regmap_write(data->regmap, EC_ADDR_LIGHTBAR_AC_CTRL, value); + if (ret < 0) + return ret; + + /* + * The breathing animation during suspend is not supported when + * running on battery power. + */ + value |= LIGHTBAR_S3_OFF; + ret = regmap_update_bits(data->regmap, EC_ADDR_LIGHTBAR_BAT_CTRL, LIGHTBAR_MASK, value); + if (ret < 0) + return ret; + + data->led_mc_cdev.led_cdev.color = LED_COLOR_ID_MULTI; + data->led_mc_cdev.led_cdev.max_brightness = LED_MAX_BRIGHTNESS; + data->led_mc_cdev.led_cdev.flags = LED_REJECT_NAME_CONFLICT; + data->led_mc_cdev.led_cdev.brightness_set_blocking = uniwill_led_brightness_set; + + if (value & LIGHTBAR_S0_OFF) + data->led_mc_cdev.led_cdev.brightness = 0; + else + data->led_mc_cdev.led_cdev.brightness = LED_MAX_BRIGHTNESS; + + for (int i = 0; i < LED_CHANNELS; i++) { + data->led_mc_subled_info[i].color_index = color_indices[i]; + + ret = regmap_read(data->regmap, uniwill_led_channel_to_ac_reg[i], &value); + if (ret < 0) + return ret; + + /* + * Make sure that the initial intensity value is not greater than + * the maximum brightness. + */ + value = min(LED_MAX_BRIGHTNESS, value); + ret = regmap_write(data->regmap, uniwill_led_channel_to_ac_reg[i], value); + if (ret < 0) + return ret; + + ret = regmap_write(data->regmap, uniwill_led_channel_to_bat_reg[i], value); + if (ret < 0) + return ret; + + data->led_mc_subled_info[i].intensity = value; + data->led_mc_subled_info[i].channel = i; + } + + data->led_mc_cdev.subled_info = data->led_mc_subled_info; + data->led_mc_cdev.num_colors = LED_CHANNELS; + + return devm_led_classdev_multicolor_register_ext(data->dev, &data->led_mc_cdev, + &init_data); +} + +static int uniwill_get_property(struct power_supply *psy, const struct power_supply_ext *ext, + void *drvdata, enum power_supply_property psp, + union power_supply_propval *val) +{ + struct uniwill_data *data = drvdata; + union power_supply_propval prop; + unsigned int regval; + int ret; + + switch (psp) { + case POWER_SUPPLY_PROP_HEALTH: + ret = power_supply_get_property_direct(psy, POWER_SUPPLY_PROP_PRESENT, &prop); + if (ret < 0) + return ret; + + if (!prop.intval) { + val->intval = POWER_SUPPLY_HEALTH_NO_BATTERY; + return 0; + } + + ret = power_supply_get_property_direct(psy, POWER_SUPPLY_PROP_STATUS, &prop); + if (ret < 0) + return ret; + + if (prop.intval == POWER_SUPPLY_STATUS_UNKNOWN) { + val->intval = POWER_SUPPLY_HEALTH_UNKNOWN; + return 0; + } + + ret = regmap_read(data->regmap, EC_ADDR_BAT_ALERT, ®val); + if (ret < 0) + return ret; + + if (regval) { + /* Charging issue */ + val->intval = POWER_SUPPLY_HEALTH_UNSPEC_FAILURE; + return 0; + } + + val->intval = POWER_SUPPLY_HEALTH_GOOD; + return 0; + case POWER_SUPPLY_PROP_CHARGE_CONTROL_END_THRESHOLD: + ret = regmap_read(data->regmap, EC_ADDR_CHARGE_CTRL, ®val); + if (ret < 0) + return ret; + + val->intval = clamp_val(FIELD_GET(CHARGE_CTRL_MASK, regval), 0, 100); + return 0; + default: + return -EINVAL; + } +} + +static int uniwill_set_property(struct power_supply *psy, const struct power_supply_ext *ext, + void *drvdata, enum power_supply_property psp, + const union power_supply_propval *val) +{ + struct uniwill_data *data = drvdata; + + switch (psp) { + case POWER_SUPPLY_PROP_CHARGE_CONTROL_END_THRESHOLD: + if (val->intval < 1 || val->intval > 100) + return -EINVAL; + + return regmap_update_bits(data->regmap, EC_ADDR_CHARGE_CTRL, CHARGE_CTRL_MASK, + val->intval); + default: + return -EINVAL; + } +} + +static int uniwill_property_is_writeable(struct power_supply *psy, + const struct power_supply_ext *ext, void *drvdata, + enum power_supply_property psp) +{ + if (psp == POWER_SUPPLY_PROP_CHARGE_CONTROL_END_THRESHOLD) + return true; + + return false; +} + +static const enum power_supply_property uniwill_properties[] = { + POWER_SUPPLY_PROP_HEALTH, + POWER_SUPPLY_PROP_CHARGE_CONTROL_END_THRESHOLD, +}; + +static const struct power_supply_ext uniwill_extension = { + .name = DRIVER_NAME, + .properties = uniwill_properties, + .num_properties = ARRAY_SIZE(uniwill_properties), + .get_property = uniwill_get_property, + .set_property = uniwill_set_property, + .property_is_writeable = uniwill_property_is_writeable, +}; + +static int uniwill_add_battery(struct power_supply *battery, struct acpi_battery_hook *hook) +{ + struct uniwill_data *data = container_of(hook, struct uniwill_data, hook); + struct uniwill_battery_entry *entry; + int ret; + + entry = kzalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) + return -ENOMEM; + + ret = power_supply_register_extension(battery, &uniwill_extension, data->dev, data); + if (ret < 0) { + kfree(entry); + return ret; + } + + guard(mutex)(&data->battery_lock); + + entry->battery = battery; + list_add(&entry->head, &data->batteries); + + return 0; +} + +static int uniwill_remove_battery(struct power_supply *battery, struct acpi_battery_hook *hook) +{ + struct uniwill_data *data = container_of(hook, struct uniwill_data, hook); + struct uniwill_battery_entry *entry, *tmp; + + scoped_guard(mutex, &data->battery_lock) { + list_for_each_entry_safe(entry, tmp, &data->batteries, head) { + if (entry->battery == battery) { + list_del(&entry->head); + kfree(entry); + break; + } + } + } + + power_supply_unregister_extension(battery, &uniwill_extension); + + return 0; +} + +static int uniwill_battery_init(struct uniwill_data *data) +{ + int ret; + + if (!(supported_features & UNIWILL_FEATURE_BATTERY)) + return 0; + + ret = devm_mutex_init(data->dev, &data->battery_lock); + if (ret < 0) + return ret; + + INIT_LIST_HEAD(&data->batteries); + data->hook.name = "Uniwill Battery Extension"; + data->hook.add_battery = uniwill_add_battery; + data->hook.remove_battery = uniwill_remove_battery; + + return devm_battery_hook_register(data->dev, &data->hook); +} + +static int uniwill_notifier_call(struct notifier_block *nb, unsigned long action, void *dummy) +{ + struct uniwill_data *data = container_of(nb, struct uniwill_data, nb); + struct uniwill_battery_entry *entry; + + switch (action) { + case UNIWILL_OSD_BATTERY_ALERT: + mutex_lock(&data->battery_lock); + list_for_each_entry(entry, &data->batteries, head) { + power_supply_changed(entry->battery); + } + mutex_unlock(&data->battery_lock); + + return NOTIFY_OK; + case UNIWILL_OSD_DC_ADAPTER_CHANGED: + /* noop for the time being, will change once charging priority + * gets implemented. + */ + + return NOTIFY_OK; + default: + mutex_lock(&data->input_lock); + sparse_keymap_report_event(data->input_device, action, 1, true); + mutex_unlock(&data->input_lock); + + return NOTIFY_OK; + } +} + +static int uniwill_input_init(struct uniwill_data *data) +{ + int ret; + + ret = devm_mutex_init(data->dev, &data->input_lock); + if (ret < 0) + return ret; + + data->input_device = devm_input_allocate_device(data->dev); + if (!data->input_device) + return -ENOMEM; + + ret = sparse_keymap_setup(data->input_device, uniwill_keymap, NULL); + if (ret < 0) + return ret; + + data->input_device->name = "Uniwill WMI hotkeys"; + data->input_device->phys = "wmi/input0"; + data->input_device->id.bustype = BUS_HOST; + ret = input_register_device(data->input_device); + if (ret < 0) + return ret; + + data->nb.notifier_call = uniwill_notifier_call; + + return devm_uniwill_wmi_register_notifier(data->dev, &data->nb); +} + +static void uniwill_disable_manual_control(void *context) +{ + struct uniwill_data *data = context; + + regmap_clear_bits(data->regmap, EC_ADDR_AP_OEM, ENABLE_MANUAL_CTRL); +} + +static int uniwill_ec_init(struct uniwill_data *data) +{ + unsigned int value; + int ret; + + ret = regmap_read(data->regmap, EC_ADDR_PROJECT_ID, &value); + if (ret < 0) + return ret; + + dev_dbg(data->dev, "Project ID: %u\n", value); + + ret = regmap_set_bits(data->regmap, EC_ADDR_AP_OEM, ENABLE_MANUAL_CTRL); + if (ret < 0) + return ret; + + return devm_add_action_or_reset(data->dev, uniwill_disable_manual_control, data); +} + +static int uniwill_probe(struct platform_device *pdev) +{ + struct uniwill_data *data; + struct regmap *regmap; + acpi_handle handle; + int ret; + + handle = ACPI_HANDLE(&pdev->dev); + if (!handle) + return -ENODEV; + + data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->dev = &pdev->dev; + data->handle = handle; + platform_set_drvdata(pdev, data); + + regmap = devm_regmap_init(&pdev->dev, &uniwill_ec_bus, data, &uniwill_ec_config); + if (IS_ERR(regmap)) + return PTR_ERR(regmap); + + data->regmap = regmap; + ret = devm_mutex_init(&pdev->dev, &data->super_key_lock); + if (ret < 0) + return ret; + + ret = uniwill_ec_init(data); + if (ret < 0) + return ret; + + ret = uniwill_battery_init(data); + if (ret < 0) + return ret; + + ret = uniwill_led_init(data); + if (ret < 0) + return ret; + + ret = uniwill_hwmon_init(data); + if (ret < 0) + return ret; + + return uniwill_input_init(data); +} + +static void uniwill_shutdown(struct platform_device *pdev) +{ + struct uniwill_data *data = platform_get_drvdata(pdev); + + regmap_clear_bits(data->regmap, EC_ADDR_AP_OEM, ENABLE_MANUAL_CTRL); +} + +static int uniwill_suspend_keyboard(struct uniwill_data *data) +{ + if (!(supported_features & UNIWILL_FEATURE_SUPER_KEY_TOGGLE)) + return 0; + + /* + * The EC_ADDR_SWITCH_STATUS is marked as volatile, so we have to restore it + * ourselves. + */ + return regmap_read(data->regmap, EC_ADDR_SWITCH_STATUS, &data->last_switch_status); +} + +static int uniwill_suspend_battery(struct uniwill_data *data) +{ + if (!(supported_features & UNIWILL_FEATURE_BATTERY)) + return 0; + + /* + * Save the current charge limit in order to restore it during resume. + * We cannot use the regmap code for that since this register needs to + * be declared as volatile due to CHARGE_CTRL_REACHED. + */ + return regmap_read(data->regmap, EC_ADDR_CHARGE_CTRL, &data->last_charge_ctrl); +} + +static int uniwill_suspend(struct device *dev) +{ + struct uniwill_data *data = dev_get_drvdata(dev); + int ret; + + ret = uniwill_suspend_keyboard(data); + if (ret < 0) + return ret; + + ret = uniwill_suspend_battery(data); + if (ret < 0) + return ret; + + regcache_cache_only(data->regmap, true); + regcache_mark_dirty(data->regmap); + + return 0; +} + +static int uniwill_resume_keyboard(struct uniwill_data *data) +{ + unsigned int value; + int ret; + + if (!(supported_features & UNIWILL_FEATURE_SUPER_KEY_TOGGLE)) + return 0; + + ret = regmap_read(data->regmap, EC_ADDR_SWITCH_STATUS, &value); + if (ret < 0) + return ret; + + if ((data->last_switch_status & SUPER_KEY_LOCK_STATUS) == (value & SUPER_KEY_LOCK_STATUS)) + return 0; + + return regmap_write_bits(data->regmap, EC_ADDR_TRIGGER, TRIGGER_SUPER_KEY_LOCK, + TRIGGER_SUPER_KEY_LOCK); +} + +static int uniwill_resume_battery(struct uniwill_data *data) +{ + if (!(supported_features & UNIWILL_FEATURE_BATTERY)) + return 0; + + return regmap_update_bits(data->regmap, EC_ADDR_CHARGE_CTRL, CHARGE_CTRL_MASK, + data->last_charge_ctrl); +} + +static int uniwill_resume(struct device *dev) +{ + struct uniwill_data *data = dev_get_drvdata(dev); + int ret; + + regcache_cache_only(data->regmap, false); + + ret = regcache_sync(data->regmap); + if (ret < 0) + return ret; + + ret = uniwill_resume_keyboard(data); + if (ret < 0) + return ret; + + return uniwill_resume_battery(data); +} + +static DEFINE_SIMPLE_DEV_PM_OPS(uniwill_pm_ops, uniwill_suspend, uniwill_resume); + +/* + * We only use the DMI table for auoloading because the ACPI device itself + * does not guarantee that the underlying EC implementation is supported. + */ +static const struct acpi_device_id uniwill_id_table[] = { + { "INOU0000" }, + { }, +}; + +static struct platform_driver uniwill_driver = { + .driver = { + .name = DRIVER_NAME, + .dev_groups = uniwill_groups, + .probe_type = PROBE_PREFER_ASYNCHRONOUS, + .acpi_match_table = uniwill_id_table, + .pm = pm_sleep_ptr(&uniwill_pm_ops), + }, + .probe = uniwill_probe, + .shutdown = uniwill_shutdown, +}; + +static const struct dmi_system_id uniwill_dmi_table[] __initconst = { + { + .ident = "XMG FUSION 15", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "SchenkerTechnologiesGmbH"), + DMI_EXACT_MATCH(DMI_BOARD_NAME, "LAPQC71A"), + }, + }, + { + .ident = "XMG FUSION 15", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "SchenkerTechnologiesGmbH"), + DMI_EXACT_MATCH(DMI_BOARD_NAME, "LAPQC71B"), + }, + }, + { + .ident = "Intel NUC x15", + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Intel(R) Client Systems"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "LAPAC71H"), + }, + .driver_data = (void *)(UNIWILL_FEATURE_FN_LOCK_TOGGLE | + UNIWILL_FEATURE_SUPER_KEY_TOGGLE | + UNIWILL_FEATURE_TOUCHPAD_TOGGLE | + UNIWILL_FEATURE_BATTERY | + UNIWILL_FEATURE_HWMON), + }, + { + .ident = "Intel NUC x15", + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Intel(R) Client Systems"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "LAPKC71F"), + }, + .driver_data = (void *)(UNIWILL_FEATURE_FN_LOCK_TOGGLE | + UNIWILL_FEATURE_SUPER_KEY_TOGGLE | + UNIWILL_FEATURE_TOUCHPAD_TOGGLE | + UNIWILL_FEATURE_LIGHTBAR | + UNIWILL_FEATURE_BATTERY | + UNIWILL_FEATURE_HWMON), + }, + { + .ident = "TUXEDO InfinityBook Pro 14 Gen6 Intel", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"), + DMI_EXACT_MATCH(DMI_BOARD_NAME, "PHxTxX1"), + }, + }, + { + .ident = "TUXEDO InfinityBook Pro 14 Gen6 Intel", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"), + DMI_EXACT_MATCH(DMI_BOARD_NAME, "PHxTQx1"), + }, + }, + { + .ident = "TUXEDO InfinityBook Pro 14/16 Gen7 Intel", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"), + DMI_EXACT_MATCH(DMI_BOARD_NAME, "PHxARX1_PHxAQF1"), + }, + }, + { + .ident = "TUXEDO InfinityBook Pro 16 Gen7 Intel/Commodore Omnia-Book Pro Gen 7", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"), + DMI_EXACT_MATCH(DMI_BOARD_NAME, "PH6AG01_PH6AQ71_PH6AQI1"), + }, + }, + { + .ident = "TUXEDO InfinityBook Pro 14/16 Gen8 Intel/Commodore Omnia-Book Pro Gen 8", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"), + DMI_EXACT_MATCH(DMI_BOARD_NAME, "PH4PRX1_PH6PRX1"), + }, + }, + { + .ident = "TUXEDO InfinityBook Pro 14 Gen8 Intel/Commodore Omnia-Book Pro Gen 8", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"), + DMI_EXACT_MATCH(DMI_BOARD_NAME, "PH4PG31"), + }, + }, + { + .ident = "TUXEDO InfinityBook Pro 16 Gen8 Intel", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"), + DMI_EXACT_MATCH(DMI_BOARD_NAME, "PH6PG01_PH6PG71"), + }, + }, + { + .ident = "TUXEDO InfinityBook Pro 14/15 Gen9 AMD", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"), + DMI_EXACT_MATCH(DMI_BOARD_NAME, "GXxHRXx"), + }, + }, + { + .ident = "TUXEDO InfinityBook Pro 14/15 Gen9 Intel/Commodore Omnia-Book 15 Gen9", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"), + DMI_EXACT_MATCH(DMI_BOARD_NAME, "GXxMRXx"), + }, + }, + { + .ident = "TUXEDO InfinityBook Pro 14/15 Gen10 AMD", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"), + DMI_EXACT_MATCH(DMI_BOARD_NAME, "XxHP4NAx"), + }, + }, + { + .ident = "TUXEDO InfinityBook Pro 14/15 Gen10 AMD", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"), + DMI_EXACT_MATCH(DMI_BOARD_NAME, "XxKK4NAx_XxSP4NAx"), + }, + }, + { + .ident = "TUXEDO InfinityBook Pro 15 Gen10 Intel", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"), + DMI_EXACT_MATCH(DMI_BOARD_NAME, "XxAR4NAx"), + }, + }, + { + .ident = "TUXEDO InfinityBook Max 15 Gen10 AMD", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"), + DMI_EXACT_MATCH(DMI_BOARD_NAME, "X5KK45xS_X5SP45xS"), + }, + }, + { + .ident = "TUXEDO InfinityBook Max 16 Gen10 AMD", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"), + DMI_EXACT_MATCH(DMI_BOARD_NAME, "X6HP45xU"), + }, + }, + { + .ident = "TUXEDO InfinityBook Max 16 Gen10 AMD", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"), + DMI_EXACT_MATCH(DMI_BOARD_NAME, "X6KK45xU_X6SP45xU"), + }, + }, + { + .ident = "TUXEDO InfinityBook Max 15 Gen10 Intel", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"), + DMI_EXACT_MATCH(DMI_BOARD_NAME, "X5AR45xS"), + }, + }, + { + .ident = "TUXEDO InfinityBook Max 16 Gen10 Intel", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"), + DMI_EXACT_MATCH(DMI_BOARD_NAME, "X6AR55xU"), + }, + }, + { + .ident = "TUXEDO Polaris 15 Gen1 AMD", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"), + DMI_EXACT_MATCH(DMI_BOARD_NAME, "POLARIS1501A1650TI"), + }, + }, + { + .ident = "TUXEDO Polaris 15 Gen1 AMD", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"), + DMI_EXACT_MATCH(DMI_BOARD_NAME, "POLARIS1501A2060"), + }, + }, + { + .ident = "TUXEDO Polaris 17 Gen1 AMD", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"), + DMI_EXACT_MATCH(DMI_BOARD_NAME, "POLARIS1701A1650TI"), + }, + }, + { + .ident = "TUXEDO Polaris 17 Gen1 AMD", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"), + DMI_EXACT_MATCH(DMI_BOARD_NAME, "POLARIS1701A2060"), + }, + }, + { + .ident = "TUXEDO Polaris 15 Gen1 Intel", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"), + DMI_EXACT_MATCH(DMI_BOARD_NAME, "POLARIS1501I1650TI"), + }, + }, + { + .ident = "TUXEDO Polaris 15 Gen1 Intel", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"), + DMI_EXACT_MATCH(DMI_BOARD_NAME, "POLARIS1501I2060"), + }, + }, + { + .ident = "TUXEDO Polaris 17 Gen1 Intel", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"), + DMI_EXACT_MATCH(DMI_BOARD_NAME, "POLARIS1701I1650TI"), + }, + }, + { + .ident = "TUXEDO Polaris 17 Gen1 Intel", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"), + DMI_EXACT_MATCH(DMI_BOARD_NAME, "POLARIS1701I2060"), + }, + }, + { + .ident = "TUXEDO Trinity 15 Intel Gen1", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"), + DMI_EXACT_MATCH(DMI_BOARD_NAME, "TRINITY1501I"), + }, + }, + { + .ident = "TUXEDO Trinity 17 Intel Gen1", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"), + DMI_EXACT_MATCH(DMI_BOARD_NAME, "TRINITY1701I"), + }, + }, + { + .ident = "TUXEDO Polaris 15/17 Gen2 AMD", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"), + DMI_EXACT_MATCH(DMI_BOARD_NAME, "GMxMGxx"), + }, + }, + { + .ident = "TUXEDO Polaris 15/17 Gen2 Intel", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"), + DMI_EXACT_MATCH(DMI_BOARD_NAME, "GMxNGxx"), + }, + }, + { + .ident = "TUXEDO Stellaris/Polaris 15/17 Gen3 AMD", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"), + DMI_EXACT_MATCH(DMI_BOARD_NAME, "GMxZGxx"), + }, + }, + { + .ident = "TUXEDO Stellaris/Polaris 15/17 Gen3 Intel", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"), + DMI_EXACT_MATCH(DMI_BOARD_NAME, "GMxTGxx"), + }, + }, + { + .ident = "TUXEDO Stellaris/Polaris 15/17 Gen4 AMD", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"), + DMI_EXACT_MATCH(DMI_BOARD_NAME, "GMxRGxx"), + }, + }, + { + .ident = "TUXEDO Stellaris 15 Gen4 Intel", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"), + DMI_EXACT_MATCH(DMI_BOARD_NAME, "GMxAGxx"), + }, + }, + { + .ident = "TUXEDO Polaris 15/17 Gen5 AMD", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"), + DMI_EXACT_MATCH(DMI_BOARD_NAME, "GMxXGxx"), + }, + }, + { + .ident = "TUXEDO Stellaris 16 Gen5 AMD", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"), + DMI_EXACT_MATCH(DMI_BOARD_NAME, "GM6XGxX"), + }, + }, + { + .ident = "TUXEDO Stellaris 16/17 Gen5 Intel/Commodore ORION Gen 5", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"), + DMI_EXACT_MATCH(DMI_BOARD_NAME, "GMxPXxx"), + }, + }, + { + .ident = "TUXEDO Stellaris Slim 15 Gen6 AMD", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"), + DMI_EXACT_MATCH(DMI_BOARD_NAME, "GMxHGxx"), + }, + }, + { + .ident = "TUXEDO Stellaris Slim 15 Gen6 Intel/Commodore ORION Slim 15 Gen6", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"), + DMI_EXACT_MATCH(DMI_BOARD_NAME, "GM5IXxA"), + }, + }, + { + .ident = "TUXEDO Stellaris 16 Gen6 Intel/Commodore ORION 16 Gen6", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"), + DMI_EXACT_MATCH(DMI_BOARD_NAME, "GM6IXxB_MB1"), + }, + }, + { + .ident = "TUXEDO Stellaris 16 Gen6 Intel/Commodore ORION 16 Gen6", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"), + DMI_EXACT_MATCH(DMI_BOARD_NAME, "GM6IXxB_MB2"), + }, + }, + { + .ident = "TUXEDO Stellaris 17 Gen6 Intel/Commodore ORION 17 Gen6", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"), + DMI_EXACT_MATCH(DMI_BOARD_NAME, "GM7IXxN"), + }, + }, + { + .ident = "TUXEDO Stellaris 16 Gen7 AMD", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"), + DMI_EXACT_MATCH(DMI_BOARD_NAME, "X6FR5xxY"), + }, + }, + { + .ident = "TUXEDO Stellaris 16 Gen7 Intel", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"), + DMI_EXACT_MATCH(DMI_BOARD_NAME, "X6AR5xxY"), + }, + }, + { + .ident = "TUXEDO Stellaris 16 Gen7 Intel", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"), + DMI_EXACT_MATCH(DMI_BOARD_NAME, "X6AR5xxY_mLED"), + }, + }, + { + .ident = "TUXEDO Pulse 14 Gen1 AMD", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"), + DMI_EXACT_MATCH(DMI_BOARD_NAME, "PULSE1401"), + }, + }, + { + .ident = "TUXEDO Pulse 15 Gen1 AMD", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"), + DMI_EXACT_MATCH(DMI_BOARD_NAME, "PULSE1501"), + }, + }, + { + .ident = "TUXEDO Pulse 15 Gen2 AMD", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"), + DMI_EXACT_MATCH(DMI_BOARD_NAME, "PF5LUXG"), + }, + }, + { } +}; +MODULE_DEVICE_TABLE(dmi, uniwill_dmi_table); + +static int __init uniwill_init(void) +{ + const struct dmi_system_id *id; + int ret; + + id = dmi_first_match(uniwill_dmi_table); + if (!id) { + if (!force) + return -ENODEV; + + /* Assume that the device supports all features */ + supported_features = UINT_MAX; + pr_warn("Loading on a potentially unsupported device\n"); + } else { + supported_features = (uintptr_t)id->driver_data; + } + + ret = platform_driver_register(&uniwill_driver); + if (ret < 0) + return ret; + + ret = uniwill_wmi_register_driver(); + if (ret < 0) { + platform_driver_unregister(&uniwill_driver); + return ret; + } + + return 0; +} +module_init(uniwill_init); + +static void __exit uniwill_exit(void) +{ + uniwill_wmi_unregister_driver(); + platform_driver_unregister(&uniwill_driver); +} +module_exit(uniwill_exit); + +MODULE_AUTHOR("Armin Wolf <W_Armin@gmx.de>"); +MODULE_DESCRIPTION("Uniwill notebook driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/platform/x86/uniwill/uniwill-wmi.c b/drivers/platform/x86/uniwill/uniwill-wmi.c new file mode 100644 index 000000000000..31d9c39f14ab --- /dev/null +++ b/drivers/platform/x86/uniwill/uniwill-wmi.c @@ -0,0 +1,92 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Linux hotkey driver for Uniwill notebooks. + * + * Special thanks go to PÅ‘cze BarnabĂ¡s, Christoffer Sandberg and Werner Sembach + * for supporting the development of this driver either through prior work or + * by answering questions regarding the underlying WMI interface. + * + * Copyright (C) 2025 Armin Wolf <W_Armin@gmx.de> + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/acpi.h> +#include <linux/device.h> +#include <linux/init.h> +#include <linux/mod_devicetable.h> +#include <linux/notifier.h> +#include <linux/printk.h> +#include <linux/types.h> +#include <linux/wmi.h> + +#include "uniwill-wmi.h" + +#define DRIVER_NAME "uniwill-wmi" +#define UNIWILL_EVENT_GUID "ABBC0F72-8EA1-11D1-00A0-C90629100000" + +static BLOCKING_NOTIFIER_HEAD(uniwill_wmi_chain_head); + +static void devm_uniwill_wmi_unregister_notifier(void *data) +{ + struct notifier_block *nb = data; + + blocking_notifier_chain_unregister(&uniwill_wmi_chain_head, nb); +} + +int devm_uniwill_wmi_register_notifier(struct device *dev, struct notifier_block *nb) +{ + int ret; + + ret = blocking_notifier_chain_register(&uniwill_wmi_chain_head, nb); + if (ret < 0) + return ret; + + return devm_add_action_or_reset(dev, devm_uniwill_wmi_unregister_notifier, nb); +} + +static void uniwill_wmi_notify(struct wmi_device *wdev, union acpi_object *obj) +{ + u32 value; + + if (obj->type != ACPI_TYPE_INTEGER) + return; + + value = obj->integer.value; + + dev_dbg(&wdev->dev, "Received WMI event %u\n", value); + + blocking_notifier_call_chain(&uniwill_wmi_chain_head, value, NULL); +} + +/* + * We cannot fully trust this GUID since Uniwill just copied the WMI GUID + * from the Windows driver example, and others probably did the same. + * + * Because of this we cannot use this WMI GUID for autoloading. Instead the + * associated driver will be registered manually after matching a DMI table. + */ +static const struct wmi_device_id uniwill_wmi_id_table[] = { + { UNIWILL_EVENT_GUID, NULL }, + { } +}; + +static struct wmi_driver uniwill_wmi_driver = { + .driver = { + .name = DRIVER_NAME, + .probe_type = PROBE_PREFER_ASYNCHRONOUS, + }, + .id_table = uniwill_wmi_id_table, + .notify = uniwill_wmi_notify, + .no_singleton = true, +}; + +int __init uniwill_wmi_register_driver(void) +{ + return wmi_driver_register(&uniwill_wmi_driver); +} + +void __exit uniwill_wmi_unregister_driver(void) +{ + wmi_driver_unregister(&uniwill_wmi_driver); +} diff --git a/drivers/platform/x86/uniwill/uniwill-wmi.h b/drivers/platform/x86/uniwill/uniwill-wmi.h new file mode 100644 index 000000000000..48783b2e9ffb --- /dev/null +++ b/drivers/platform/x86/uniwill/uniwill-wmi.h @@ -0,0 +1,129 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Linux hotkey driver for Uniwill notebooks. + * + * Copyright (C) 2025 Armin Wolf <W_Armin@gmx.de> + */ + +#ifndef UNIWILL_WMI_H +#define UNIWILL_WMI_H + +#include <linux/init.h> + +#define UNIWILL_OSD_CAPSLOCK 0x01 +#define UNIWILL_OSD_NUMLOCK 0x02 +#define UNIWILL_OSD_SCROLLLOCK 0x03 + +#define UNIWILL_OSD_TOUCHPAD_ON 0x04 +#define UNIWILL_OSD_TOUCHPAD_OFF 0x05 + +#define UNIWILL_OSD_SILENT_MODE_ON 0x06 +#define UNIWILL_OSD_SILENT_MODE_OFF 0x07 + +#define UNIWILL_OSD_WLAN_ON 0x08 +#define UNIWILL_OSD_WLAN_OFF 0x09 + +#define UNIWILL_OSD_WIMAX_ON 0x0A +#define UNIWILL_OSD_WIMAX_OFF 0x0B + +#define UNIWILL_OSD_BLUETOOTH_ON 0x0C +#define UNIWILL_OSD_BLUETOOTH_OFF 0x0D + +#define UNIWILL_OSD_RF_ON 0x0E +#define UNIWILL_OSD_RF_OFF 0x0F + +#define UNIWILL_OSD_3G_ON 0x10 +#define UNIWILL_OSD_3G_OFF 0x11 + +#define UNIWILL_OSD_WEBCAM_ON 0x12 +#define UNIWILL_OSD_WEBCAM_OFF 0x13 + +#define UNIWILL_OSD_BRIGHTNESSUP 0x14 +#define UNIWILL_OSD_BRIGHTNESSDOWN 0x15 + +#define UNIWILL_OSD_RADIOON 0x1A +#define UNIWILL_OSD_RADIOOFF 0x1B + +#define UNIWILL_OSD_POWERSAVE_ON 0x31 +#define UNIWILL_OSD_POWERSAVE_OFF 0x32 + +#define UNIWILL_OSD_MENU 0x34 + +#define UNIWILL_OSD_MUTE 0x35 +#define UNIWILL_OSD_VOLUMEDOWN 0x36 +#define UNIWILL_OSD_VOLUMEUP 0x37 + +#define UNIWILL_OSD_MENU_2 0x38 + +#define UNIWILL_OSD_LIGHTBAR_ON 0x39 +#define UNIWILL_OSD_LIGHTBAR_OFF 0x3A + +#define UNIWILL_OSD_KB_LED_LEVEL0 0x3B +#define UNIWILL_OSD_KB_LED_LEVEL1 0x3C +#define UNIWILL_OSD_KB_LED_LEVEL2 0x3D +#define UNIWILL_OSD_KB_LED_LEVEL3 0x3E +#define UNIWILL_OSD_KB_LED_LEVEL4 0x3F + +#define UNIWILL_OSD_SUPER_KEY_LOCK_ENABLE 0x40 +#define UNIWILL_OSD_SUPER_KEY_LOCK_DISABLE 0x41 + +#define UNIWILL_OSD_MENU_JP 0x42 + +#define UNIWILL_OSD_CAMERA_ON 0x90 +#define UNIWILL_OSD_CAMERA_OFF 0x91 + +#define UNIWILL_OSD_RFKILL 0xA4 + +#define UNIWILL_OSD_SUPER_KEY_LOCK_CHANGED 0xA5 + +#define UNIWILL_OSD_LIGHTBAR_STATE_CHANGED 0xA6 + +#define UNIWILL_OSD_FAN_BOOST_STATE_CHANGED 0xA7 + +#define UNIWILL_OSD_LCD_SW 0xA9 + +#define UNIWILL_OSD_FAN_OVERTEMP 0xAA + +#define UNIWILL_OSD_DC_ADAPTER_CHANGED 0xAB + +#define UNIWILL_OSD_BAT_HP_OFF 0xAC + +#define UNIWILL_OSD_FAN_DOWN_TEMP 0xAD + +#define UNIWILL_OSD_BATTERY_ALERT 0xAE + +#define UNIWILL_OSD_TIMAP_HAIERLB_SW 0xAF + +#define UNIWILL_OSD_PERFORMANCE_MODE_TOGGLE 0xB0 + +#define UNIWILL_OSD_KBDILLUMDOWN 0xB1 +#define UNIWILL_OSD_KBDILLUMUP 0xB2 + +#define UNIWILL_OSD_BACKLIGHT_LEVEL_CHANGE 0xB3 +#define UNIWILL_OSD_BACKLIGHT_POWER_CHANGE 0xB4 + +#define UNIWILL_OSD_MIC_MUTE 0xB7 + +#define UNIWILL_OSD_FN_LOCK 0xB8 +#define UNIWILL_OSD_KBDILLUMTOGGLE 0xB9 + +#define UNIWILL_OSD_BAT_CHARGE_FULL_24_H 0xBE + +#define UNIWILL_OSD_BAT_ERM_UPDATE 0xBF + +#define UNIWILL_OSD_BENCHMARK_MODE_TOGGLE 0xC0 + +#define UNIWILL_OSD_WEBCAM_TOGGLE 0xCF + +#define UNIWILL_OSD_KBD_BACKLIGHT_CHANGED 0xF0 + +struct device; +struct notifier_block; + +int devm_uniwill_wmi_register_notifier(struct device *dev, struct notifier_block *nb); + +int __init uniwill_wmi_register_driver(void); + +void __exit uniwill_wmi_unregister_driver(void); + +#endif /* UNIWILL_WMI_H */ diff --git a/drivers/platform/x86/x86-android-tablets/lenovo.c b/drivers/platform/x86/x86-android-tablets/lenovo.c index e3d3a8290949..8d825e0b4661 100644 --- a/drivers/platform/x86/x86-android-tablets/lenovo.c +++ b/drivers/platform/x86/x86-android-tablets/lenovo.c @@ -543,7 +543,7 @@ static int __init lenovo_yoga_tab2_830_1050_init_codec(void) ret = device_add_software_node(codec_dev, &lenovo_yoga_tab2_830_1050_wm5102); if (ret) { - ret = dev_err_probe(codec_dev, ret, "adding software node\n"); + dev_err_probe(codec_dev, ret, "adding software node\n"); goto err_put_pinctrl; } diff --git a/drivers/platform/x86/x86-android-tablets/vexia_atla10_ec.c b/drivers/platform/x86/x86-android-tablets/vexia_atla10_ec.c index 2f8cd8d9e0ab..ebbedfe5f4e8 100644 --- a/drivers/platform/x86/x86-android-tablets/vexia_atla10_ec.c +++ b/drivers/platform/x86/x86-android-tablets/vexia_atla10_ec.c @@ -183,7 +183,7 @@ static void atla10_ec_external_power_changed(struct power_supply *psy) struct atla10_ec_data *data = power_supply_get_drvdata(psy); /* After charger plug in/out wait 0.5s for things to stabilize */ - mod_delayed_work(system_wq, &data->work, HZ / 2); + mod_delayed_work(system_percpu_wq, &data->work, HZ / 2); } static const enum power_supply_property atla10_ec_psy_props[] = { diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index f4987f54e01b..4b6182cde859 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c @@ -2823,14 +2823,18 @@ static void regulator_ena_gpio_free(struct regulator_dev *rdev) static int regulator_ena_gpio_ctrl(struct regulator_dev *rdev, bool enable) { struct regulator_enable_gpio *pin = rdev->ena_pin; + int ret; if (!pin) return -EINVAL; if (enable) { /* Enable GPIO at initial use */ - if (pin->enable_count == 0) - gpiod_set_value_cansleep(pin->gpiod, 1); + if (pin->enable_count == 0) { + ret = gpiod_set_value_cansleep(pin->gpiod, 1); + if (ret) + return ret; + } pin->enable_count++; } else { @@ -2841,7 +2845,10 @@ static int regulator_ena_gpio_ctrl(struct regulator_dev *rdev, bool enable) /* Disable GPIO if not used */ if (pin->enable_count <= 1) { - gpiod_set_value_cansleep(pin->gpiod, 0); + ret = gpiod_set_value_cansleep(pin->gpiod, 0); + if (ret) + return ret; + pin->enable_count = 0; } } diff --git a/drivers/regulator/fixed.c b/drivers/regulator/fixed.c index a2d16e9abfb5..254c0a8a4555 100644 --- a/drivers/regulator/fixed.c +++ b/drivers/regulator/fixed.c @@ -330,13 +330,10 @@ static int reg_fixed_voltage_probe(struct platform_device *pdev) drvdata->dev = devm_regulator_register(&pdev->dev, &drvdata->desc, &cfg); - if (IS_ERR(drvdata->dev)) { - ret = dev_err_probe(&pdev->dev, PTR_ERR(drvdata->dev), - "Failed to register regulator: %ld\n", - PTR_ERR(drvdata->dev)); - gpiod_put(cfg.ena_gpiod); - return ret; - } + if (IS_ERR(drvdata->dev)) + return dev_err_probe(&pdev->dev, PTR_ERR(drvdata->dev), + "Failed to register regulator: %ld\n", + PTR_ERR(drvdata->dev)); platform_set_drvdata(pdev, drvdata); diff --git a/drivers/regulator/spacemit-p1.c b/drivers/regulator/spacemit-p1.c index d437e6738ea1..2bf9137e12b1 100644 --- a/drivers/regulator/spacemit-p1.c +++ b/drivers/regulator/spacemit-p1.c @@ -87,10 +87,10 @@ static const struct linear_range p1_ldo_ranges[] = { } #define P1_BUCK_DESC(_n) \ - P1_REG_DESC(BUCK, buck, _n, "vcc", 0x47, BUCK_MASK, 254, p1_buck_ranges) + P1_REG_DESC(BUCK, buck, _n, "vin", 0x47, BUCK_MASK, 254, p1_buck_ranges) #define P1_ALDO_DESC(_n) \ - P1_REG_DESC(ALDO, aldo, _n, "vcc", 0x5b, LDO_MASK, 117, p1_ldo_ranges) + P1_REG_DESC(ALDO, aldo, _n, "vin", 0x5b, LDO_MASK, 117, p1_ldo_ranges) #define P1_DLDO_DESC(_n) \ P1_REG_DESC(DLDO, dldo, _n, "buck5", 0x67, LDO_MASK, 117, p1_ldo_ranges) diff --git a/drivers/s390/char/sclp_mem.c b/drivers/s390/char/sclp_mem.c index 676c085b4f8a..27f0d2f12a8b 100644 --- a/drivers/s390/char/sclp_mem.c +++ b/drivers/s390/char/sclp_mem.c @@ -44,6 +44,9 @@ struct sclp_mem { unsigned int id; unsigned int memmap_on_memory; unsigned int config; +#ifdef CONFIG_KASAN + unsigned int early_shadow_mapped; +#endif }; struct sclp_mem_arg { @@ -244,6 +247,16 @@ static ssize_t sclp_config_mem_store(struct kobject *kobj, struct kobj_attribute put_device(&mem->dev); sclp_mem_change_state(addr, block_size, 0); __remove_memory(addr, block_size); +#ifdef CONFIG_KASAN + if (sclp_mem->early_shadow_mapped) { + unsigned long start, end; + + start = (unsigned long)kasan_mem_to_shadow(__va(addr)); + end = start + (block_size >> KASAN_SHADOW_SCALE_SHIFT); + vmemmap_free(start, end, NULL); + sclp_mem->early_shadow_mapped = 0; + } +#endif WRITE_ONCE(sclp_mem->config, 0); } out_unlock: @@ -316,6 +329,9 @@ static int sclp_create_mem(struct sclp_mem *sclp_mem, struct kset *kset, sclp_mem->memmap_on_memory = memmap_on_memory; sclp_mem->config = config; +#ifdef CONFIG_KASAN + sclp_mem->early_shadow_mapped = config; +#endif sclp_mem->id = id; kobject_init(&sclp_mem->kobj, &ktype); rc = kobject_add(&sclp_mem->kobj, &kset->kobj, "memory%d", id); diff --git a/drivers/s390/char/vmur.c b/drivers/s390/char/vmur.c index e3e0e9f36527..a226ff208eda 100644 --- a/drivers/s390/char/vmur.c +++ b/drivers/s390/char/vmur.c @@ -154,7 +154,7 @@ static struct urdev *urdev_get_from_devno(u16 devno) struct ccw_device *cdev; struct urdev *urd; - sprintf(bus_id, "0.0.%04x", devno); + scnprintf(bus_id, sizeof(bus_id), "0.0.%04x", devno); cdev = get_ccwdev_by_busid(&ur_driver, bus_id); if (!cdev) return NULL; @@ -904,11 +904,11 @@ static int ur_set_online(struct ccw_device *cdev) goto fail_free_cdev; if (urd->cdev->id.cu_type == READER_PUNCH_DEVTYPE) { if (urd->class == DEV_CLASS_UR_I) - sprintf(node_id, "vmrdr-%s", dev_name(&cdev->dev)); + scnprintf(node_id, sizeof(node_id), "vmrdr-%s", dev_name(&cdev->dev)); if (urd->class == DEV_CLASS_UR_O) - sprintf(node_id, "vmpun-%s", dev_name(&cdev->dev)); + scnprintf(node_id, sizeof(node_id), "vmpun-%s", dev_name(&cdev->dev)); } else if (urd->cdev->id.cu_type == PRINTER_DEVTYPE) { - sprintf(node_id, "vmprt-%s", dev_name(&cdev->dev)); + scnprintf(node_id, sizeof(node_id), "vmprt-%s", dev_name(&cdev->dev)); } else { rc = -EOPNOTSUPP; goto fail_free_cdev; diff --git a/drivers/spi/spi-microchip-core-spi.c b/drivers/spi/spi-microchip-core-spi.c index 98bf0e6cd00e..89e40fc45d73 100644 --- a/drivers/spi/spi-microchip-core-spi.c +++ b/drivers/spi/spi-microchip-core-spi.c @@ -387,6 +387,7 @@ static int mchp_corespi_probe(struct platform_device *pdev) ret = devm_spi_register_controller(dev, host); if (ret) { + mchp_corespi_disable_ints(spi); mchp_corespi_disable(spi); return dev_err_probe(dev, ret, "unable to register host for CoreSPI controller\n"); } diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c index c8b837006bb2..fabda0f6ec1a 100644 --- a/fs/nfs/callback.c +++ b/fs/nfs/callback.c @@ -258,7 +258,7 @@ err_start: /* * Kill the callback thread if it's no longer being used. */ -void nfs_callback_down(int minorversion, struct net *net) +void nfs_callback_down(int minorversion, struct net *net, struct rpc_xprt *xprt) { struct nfs_callback_data *cb_info = &nfs_callback_info[minorversion]; struct svc_serv *serv; @@ -270,7 +270,7 @@ void nfs_callback_down(int minorversion, struct net *net) if (cb_info->users == 0) { svc_set_num_threads(serv, NULL, 0); dprintk("nfs_callback_down: service destroyed\n"); - svc_destroy(&cb_info->serv); + xprt_svc_destroy_nullify_bc(xprt, &cb_info->serv); } mutex_unlock(&nfs_callback_mutex); } diff --git a/fs/nfs/callback.h b/fs/nfs/callback.h index 154a6ed1299f..8809f93d82c0 100644 --- a/fs/nfs/callback.h +++ b/fs/nfs/callback.h @@ -188,7 +188,8 @@ extern __be32 nfs4_callback_recall(void *argp, void *resp, struct cb_process_state *cps); #if IS_ENABLED(CONFIG_NFS_V4) extern int nfs_callback_up(u32 minorversion, struct rpc_xprt *xprt); -extern void nfs_callback_down(int minorversion, struct net *net); +extern void nfs_callback_down(int minorversion, struct net *net, + struct rpc_xprt *xprt); #endif /* CONFIG_NFS_V4 */ /* * nfs41: Callbacks are expected to not cause substantial latency, diff --git a/fs/nfs/client.c b/fs/nfs/client.c index 54699299d5b1..2aaea9c98c2c 100644 --- a/fs/nfs/client.c +++ b/fs/nfs/client.c @@ -784,10 +784,18 @@ static int nfs_init_server(struct nfs_server *server, server->fattr_valid = NFS_ATTR_FATTR_V4; } - if (ctx->rsize) + if (ctx->bsize) { + server->bsize = ctx->bsize; + server->automount_inherit |= NFS_AUTOMOUNT_INHERIT_BSIZE; + } + if (ctx->rsize) { server->rsize = nfs_io_size(ctx->rsize, clp->cl_proto); - if (ctx->wsize) + server->automount_inherit |= NFS_AUTOMOUNT_INHERIT_RSIZE; + } + if (ctx->wsize) { server->wsize = nfs_io_size(ctx->wsize, clp->cl_proto); + server->automount_inherit |= NFS_AUTOMOUNT_INHERIT_WSIZE; + } server->acregmin = ctx->acregmin * HZ; server->acregmax = ctx->acregmax * HZ; @@ -977,8 +985,13 @@ EXPORT_SYMBOL_GPL(nfs_probe_server); void nfs_server_copy_userdata(struct nfs_server *target, struct nfs_server *source) { target->flags = source->flags; - target->rsize = source->rsize; - target->wsize = source->wsize; + target->automount_inherit = source->automount_inherit; + if (source->automount_inherit & NFS_AUTOMOUNT_INHERIT_BSIZE) + target->bsize = source->bsize; + if (source->automount_inherit & NFS_AUTOMOUNT_INHERIT_RSIZE) + target->rsize = source->rsize; + if (source->automount_inherit & NFS_AUTOMOUNT_INHERIT_WSIZE) + target->wsize = source->wsize; target->acregmin = source->acregmin; target->acregmax = source->acregmax; target->acdirmin = source->acdirmin; diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c index 9d3a5f29f17f..2248e3ad089a 100644 --- a/fs/nfs/delegation.c +++ b/fs/nfs/delegation.c @@ -30,6 +30,11 @@ static unsigned nfs_delegation_watermark = NFS_DEFAULT_DELEGATION_WATERMARK; module_param_named(delegation_watermark, nfs_delegation_watermark, uint, 0644); +bool directory_delegations = true; +module_param(directory_delegations, bool, 0644); +MODULE_PARM_DESC(directory_delegations, + "Enable the use of directory delegations, defaults to on."); + static struct hlist_head *nfs_delegation_hash(struct nfs_server *server, const struct nfs_fh *fhandle) { @@ -143,6 +148,8 @@ static int nfs4_do_check_delegation(struct inode *inode, fmode_t type, */ int nfs4_have_delegation(struct inode *inode, fmode_t type, int flags) { + if (S_ISDIR(inode->i_mode) && !directory_delegations) + nfs_inode_evict_delegation(inode); return nfs4_do_check_delegation(inode, type, flags, true); } @@ -379,6 +386,7 @@ nfs_detach_delegation_locked(struct nfs_inode *nfsi, delegation->inode = NULL; rcu_assign_pointer(nfsi->delegation, NULL); spin_unlock(&delegation->lock); + clear_bit(NFS_INO_REQ_DIR_DELEG, &nfsi->flags); return delegation; } diff --git a/fs/nfs/delegation.h b/fs/nfs/delegation.h index 08ec2e9c68a4..46d866adb5c2 100644 --- a/fs/nfs/delegation.h +++ b/fs/nfs/delegation.h @@ -124,6 +124,19 @@ static inline int nfs_have_delegated_mtime(struct inode *inode) NFS_DELEGATION_FLAG_TIME); } +extern bool directory_delegations; + +static inline void nfs_request_directory_delegation(struct inode *inode) +{ + if (S_ISDIR(inode->i_mode)) + set_bit(NFS_INO_REQ_DIR_DELEG, &NFS_I(inode)->flags); +} + +static inline bool nfs_have_directory_delegation(struct inode *inode) +{ + return S_ISDIR(inode->i_mode) && nfs_have_delegated_attributes(inode); +} + int nfs4_delegation_hash_alloc(struct nfs_server *server); #endif diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index ea9f6ca8f30f..23a78a742b61 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -789,16 +789,17 @@ again: goto out; } + nfs_set_verifier(dentry, dir_verifier); inode = nfs_fhget(dentry->d_sb, entry->fh, entry->fattr); alias = d_splice_alias(inode, dentry); d_lookup_done(dentry); if (alias) { if (IS_ERR(alias)) goto out; + nfs_set_verifier(alias, dir_verifier); dput(dentry); dentry = alias; } - nfs_set_verifier(dentry, dir_verifier); trace_nfs_readdir_lookup(d_inode(parent), dentry, 0); out: dput(dentry); @@ -1514,6 +1515,15 @@ static int nfs_check_verifier(struct inode *dir, struct dentry *dentry, return 0; if (!nfs_dentry_verify_change(dir, dentry)) return 0; + + /* + * If we have a directory delegation then we don't need to revalidate + * the directory. The delegation will either get recalled or we will + * receive a notification when it changes. + */ + if (nfs_have_directory_delegation(dir)) + return 0; + /* Revalidate nfsi->cache_change_attribute before we declare a match */ if (nfs_mapping_need_revalidate_inode(dir)) { if (rcu_walk) @@ -1894,13 +1904,15 @@ static int nfs_dentry_delete(const struct dentry *dentry) } /* Ensure that we revalidate inode->i_nlink */ -static void nfs_drop_nlink(struct inode *inode) +static void nfs_drop_nlink(struct inode *inode, unsigned long gencount) { + struct nfs_inode *nfsi = NFS_I(inode); + spin_lock(&inode->i_lock); /* drop the inode if we're reasonably sure this is the last link */ - if (inode->i_nlink > 0) + if (inode->i_nlink > 0 && gencount == nfsi->attr_gencount) drop_nlink(inode); - NFS_I(inode)->attr_gencount = nfs_inc_attr_generation_counter(); + nfsi->attr_gencount = nfs_inc_attr_generation_counter(); nfs_set_cache_invalid( inode, NFS_INO_INVALID_CHANGE | NFS_INO_INVALID_CTIME | NFS_INO_INVALID_NLINK); @@ -1914,8 +1926,9 @@ static void nfs_drop_nlink(struct inode *inode) static void nfs_dentry_iput(struct dentry *dentry, struct inode *inode) { if (dentry->d_flags & DCACHE_NFSFS_RENAMED) { + unsigned long gencount = READ_ONCE(NFS_I(inode)->attr_gencount); nfs_complete_unlink(dentry, inode); - nfs_drop_nlink(inode); + nfs_drop_nlink(inode, gencount); } iput(inode); } @@ -1991,13 +2004,14 @@ struct dentry *nfs_lookup(struct inode *dir, struct dentry * dentry, unsigned in nfs_lookup_advise_force_readdirplus(dir, flags); no_entry: + nfs_set_verifier(dentry, dir_verifier); res = d_splice_alias(inode, dentry); if (res != NULL) { if (IS_ERR(res)) goto out; + nfs_set_verifier(res, dir_verifier); dentry = res; } - nfs_set_verifier(dentry, dir_verifier); out: trace_nfs_lookup_exit(dir, dentry, flags, PTR_ERR_OR_ZERO(res)); nfs_free_fattr(fattr); @@ -2139,12 +2153,12 @@ int nfs_atomic_open(struct inode *dir, struct dentry *dentry, d_drop(dentry); switch (err) { case -ENOENT: - d_splice_alias(NULL, dentry); if (nfs_server_capable(dir, NFS_CAP_CASE_INSENSITIVE)) dir_verifier = inode_peek_iversion_raw(dir); else dir_verifier = nfs_save_change_attribute(dir); nfs_set_verifier(dentry, dir_verifier); + d_splice_alias(NULL, dentry); break; case -EISDIR: case -ENOTDIR: @@ -2203,6 +2217,13 @@ no_open: EXPORT_SYMBOL_GPL(nfs_atomic_open); static int +nfs_lookup_revalidate_delegated_parent(struct inode *dir, struct dentry *dentry, + struct inode *inode) +{ + return nfs_lookup_revalidate_done(dir, dentry, inode, 1); +} + +static int nfs4_lookup_revalidate(struct inode *dir, const struct qstr *name, struct dentry *dentry, unsigned int flags) { @@ -2229,6 +2250,9 @@ nfs4_lookup_revalidate(struct inode *dir, const struct qstr *name, if (nfs_verifier_is_delegated(dentry)) return nfs_lookup_revalidate_delegated(dir, dentry, inode); + if (nfs_have_directory_delegation(dir)) + return nfs_lookup_revalidate_delegated_parent(dir, dentry, inode); + /* NFS only supports OPEN on regular files */ if (!S_ISREG(inode->i_mode)) goto full_reval; @@ -2507,9 +2531,11 @@ static int nfs_safe_remove(struct dentry *dentry) trace_nfs_remove_enter(dir, dentry); if (inode != NULL) { + unsigned long gencount = READ_ONCE(NFS_I(inode)->attr_gencount); + error = NFS_PROTO(dir)->remove(dir, dentry); if (error == 0) - nfs_drop_nlink(inode); + nfs_drop_nlink(inode, gencount); } else error = NFS_PROTO(dir)->remove(dir, dentry); if (error == -ENOENT) @@ -2709,6 +2735,7 @@ int nfs_rename(struct mnt_idmap *idmap, struct inode *old_dir, { struct inode *old_inode = d_inode(old_dentry); struct inode *new_inode = d_inode(new_dentry); + unsigned long new_gencount = 0; struct dentry *dentry = NULL; struct rpc_task *task; bool must_unblock = false; @@ -2761,6 +2788,7 @@ int nfs_rename(struct mnt_idmap *idmap, struct inode *old_dir, } else { block_revalidate(new_dentry); must_unblock = true; + new_gencount = NFS_I(new_inode)->attr_gencount; spin_unlock(&new_dentry->d_lock); } @@ -2800,7 +2828,7 @@ out: new_dir, new_dentry, error); if (!error) { if (new_inode != NULL) - nfs_drop_nlink(new_inode); + nfs_drop_nlink(new_inode, new_gencount); /* * The d_move() should be here instead of in an async RPC completion * handler because we need the proper locks to move the dentry. If diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index f76fe406937a..84049f3cd340 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c @@ -1389,6 +1389,9 @@ __nfs_revalidate_inode(struct nfs_server *server, struct inode *inode) status = pnfs_sync_inode(inode, false); if (status) goto out; + } else if (nfs_have_directory_delegation(inode)) { + status = 0; + goto out; } status = -ENOMEM; diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h index 2ecd38e1d17a..2e596244799f 100644 --- a/fs/nfs/internal.h +++ b/fs/nfs/internal.h @@ -13,7 +13,7 @@ #include <linux/nfslocalio.h> #include <linux/wait_bit.h> -#define NFS_SB_MASK (SB_NOSUID|SB_NODEV|SB_NOEXEC|SB_SYNCHRONOUS) +#define NFS_SB_MASK (SB_RDONLY|SB_NOSUID|SB_NODEV|SB_NOEXEC|SB_SYNCHRONOUS) extern const struct export_operations nfs_export_ops; @@ -152,7 +152,6 @@ struct nfs_fs_context { struct super_block *sb; struct dentry *dentry; struct nfs_fattr *fattr; - unsigned int inherited_bsize; } clone_data; }; diff --git a/fs/nfs/localio.c b/fs/nfs/localio.c index f33bfa7b58e6..a113bfdacfd6 100644 --- a/fs/nfs/localio.c +++ b/fs/nfs/localio.c @@ -43,8 +43,8 @@ struct nfs_local_kiocb { size_t end_len; short int end_iter_index; atomic_t n_iters; + struct iov_iter iters[NFSLOCAL_MAX_IOS]; bool iter_is_dio_aligned[NFSLOCAL_MAX_IOS]; - struct iov_iter iters[NFSLOCAL_MAX_IOS] ____cacheline_aligned; /* End mostly DIO-specific members */ }; @@ -339,8 +339,6 @@ nfs_is_local_dio_possible(struct nfs_local_kiocb *iocb, int rw, if (unlikely(!nf_dio_mem_align || !nf_dio_offset_align)) return false; - if (unlikely(nf_dio_offset_align > PAGE_SIZE)) - return false; if (unlikely(len < nf_dio_offset_align)) return false; diff --git a/fs/nfs/namespace.c b/fs/nfs/namespace.c index 5a4d193da1a9..af9be0c5f516 100644 --- a/fs/nfs/namespace.c +++ b/fs/nfs/namespace.c @@ -149,6 +149,7 @@ struct vfsmount *nfs_d_automount(struct path *path) struct vfsmount *mnt = ERR_PTR(-ENOMEM); struct nfs_server *server = NFS_SB(path->dentry->d_sb); struct nfs_client *client = server->nfs_client; + unsigned long s_flags = path->dentry->d_sb->s_flags; int timeout = READ_ONCE(nfs_mountpoint_expiry_timeout); int ret; @@ -169,11 +170,21 @@ struct vfsmount *nfs_d_automount(struct path *path) if (!ctx->clone_data.fattr) goto out_fc; + if (fc->cred != server->cred) { + put_cred(fc->cred); + fc->cred = get_cred(server->cred); + } + if (fc->net_ns != client->cl_net) { put_net(fc->net_ns); fc->net_ns = get_net(client->cl_net); } + /* Inherit the flags covered by NFS_SB_MASK */ + fc->sb_flags_mask |= NFS_SB_MASK; + fc->sb_flags &= ~NFS_SB_MASK; + fc->sb_flags |= s_flags & NFS_SB_MASK; + /* for submounts we want the same server; referrals will reassign */ memcpy(&ctx->nfs_server._address, &client->cl_addr, client->cl_addrlen); ctx->nfs_server.addrlen = client->cl_addrlen; @@ -184,6 +195,10 @@ struct vfsmount *nfs_d_automount(struct path *path) ctx->nfs_mod = client->cl_nfs_mod; get_nfs_version(ctx->nfs_mod); + /* Inherit block sizes if they were specified as mount parameters */ + if (server->automount_inherit & NFS_AUTOMOUNT_INHERIT_BSIZE) + ctx->bsize = server->bsize; + ret = client->rpc_ops->submount(fc, server); if (ret < 0) { mnt = ERR_PTR(ret); @@ -283,7 +298,6 @@ int nfs_do_submount(struct fs_context *fc) return -ENOMEM; ctx->internal = true; - ctx->clone_data.inherited_bsize = ctx->clone_data.sb->s_blocksize_bits; p = nfs_devname(dentry, buffer, 4096); if (IS_ERR(p)) { diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c index a4cb67573aa7..1181f9cc6dbd 100644 --- a/fs/nfs/nfs3proc.c +++ b/fs/nfs/nfs3proc.c @@ -483,7 +483,8 @@ nfs3_proc_unlink_done(struct rpc_task *task, struct inode *dir) static void nfs3_proc_rename_setup(struct rpc_message *msg, struct dentry *old_dentry, - struct dentry *new_dentry) + struct dentry *new_dentry, + struct inode *same_parent) { msg->rpc_proc = &nfs3_procedures[NFS3PROC_RENAME]; } diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c index 3a4baed993c9..96bccefbe2cb 100644 --- a/fs/nfs/nfs4client.c +++ b/fs/nfs/nfs4client.c @@ -281,8 +281,13 @@ error: */ static void nfs4_destroy_callback(struct nfs_client *clp) { - if (__test_and_clear_bit(NFS_CS_CALLBACK, &clp->cl_res_state)) - nfs_callback_down(clp->cl_mvops->minor_version, clp->cl_net); + if (__test_and_clear_bit(NFS_CS_CALLBACK, &clp->cl_res_state)) { + struct rpc_xprt *xprt; + + xprt = rcu_dereference_raw(clp->cl_rpcclient->cl_xprt); + nfs_callback_down(clp->cl_mvops->minor_version, clp->cl_net, + xprt); + } } static void nfs4_shutdown_client(struct nfs_client *clp) @@ -1174,10 +1179,20 @@ static int nfs4_init_server(struct nfs_server *server, struct fs_context *fc) if (error < 0) return error; - if (ctx->rsize) - server->rsize = nfs_io_size(ctx->rsize, server->nfs_client->cl_proto); - if (ctx->wsize) - server->wsize = nfs_io_size(ctx->wsize, server->nfs_client->cl_proto); + if (ctx->bsize) { + server->bsize = ctx->bsize; + server->automount_inherit |= NFS_AUTOMOUNT_INHERIT_BSIZE; + } + if (ctx->rsize) { + server->rsize = + nfs_io_size(ctx->rsize, server->nfs_client->cl_proto); + server->automount_inherit |= NFS_AUTOMOUNT_INHERIT_RSIZE; + } + if (ctx->wsize) { + server->wsize = + nfs_io_size(ctx->wsize, server->nfs_client->cl_proto); + server->automount_inherit |= NFS_AUTOMOUNT_INHERIT_WSIZE; + } server->acregmin = ctx->acregmin * HZ; server->acregmax = ctx->acregmax * HZ; diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 93c6ce04332b..ec1ce593dea2 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -1780,8 +1780,17 @@ static void nfs_set_open_stateid_locked(struct nfs4_state *state, if (nfs_stateid_is_sequential(state, stateid)) break; - if (status) - break; + if (status) { + if (nfs4_stateid_match_other(stateid, &state->open_stateid) && + !nfs4_stateid_is_newer(stateid, &state->open_stateid)) { + trace_nfs4_open_stateid_update_skip(state->inode, + stateid, status); + return; + } else { + break; + } + } + /* Rely on seqids for serialisation with NFSv4.0 */ if (!nfs4_has_session(NFS_SERVER(state->inode)->nfs_client)) break; @@ -3174,18 +3183,6 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata, if (opendata->o_res.rflags & NFS4_OPEN_RESULT_PRESERVE_UNLINKED) set_bit(NFS_INO_PRESERVE_UNLINKED, &NFS_I(state->inode)->flags); - dentry = opendata->dentry; - if (d_really_is_negative(dentry)) { - struct dentry *alias; - d_drop(dentry); - alias = d_splice_alias(igrab(state->inode), dentry); - /* d_splice_alias() can't fail here - it's a non-directory */ - if (alias) { - dput(ctx->dentry); - ctx->dentry = dentry = alias; - } - } - switch(opendata->o_arg.claim) { default: break; @@ -3196,7 +3193,20 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata, break; if (opendata->o_res.delegation.type != 0) dir_verifier = nfs_save_change_attribute(dir); - nfs_set_verifier(dentry, dir_verifier); + } + + dentry = opendata->dentry; + nfs_set_verifier(dentry, dir_verifier); + if (d_really_is_negative(dentry)) { + struct dentry *alias; + d_drop(dentry); + alias = d_splice_alias(igrab(state->inode), dentry); + /* d_splice_alias() can't fail here - it's a non-directory */ + if (alias) { + dput(ctx->dentry); + nfs_set_verifier(alias, dir_verifier); + ctx->dentry = dentry = alias; + } } /* Parse layoutget results before we check for access */ @@ -4460,6 +4470,30 @@ out: return status; } +#if IS_ENABLED(CONFIG_NFS_V4_1) +static bool should_request_dir_deleg(struct inode *inode) +{ + if (!directory_delegations) + return false; + if (!inode) + return false; + if (!S_ISDIR(inode->i_mode)) + return false; + if (!nfs_server_capable(inode, NFS_CAP_DIR_DELEG)) + return false; + if (!test_and_clear_bit(NFS_INO_REQ_DIR_DELEG, &(NFS_I(inode)->flags))) + return false; + if (nfs4_have_delegation(inode, FMODE_READ, 0)) + return false; + return true; +} +#else +static bool should_request_dir_deleg(struct inode *inode) +{ + return false; +} +#endif /* CONFIG_NFS_V4_1 */ + static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr, struct inode *inode) { @@ -4477,7 +4511,9 @@ static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, .rpc_argp = &args, .rpc_resp = &res, }; + struct nfs4_gdd_res gdd_res; unsigned short task_flags = 0; + int status; if (nfs4_has_session(server->nfs_client)) task_flags = RPC_TASK_MOVEABLE; @@ -4486,11 +4522,31 @@ static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, if (inode && (server->flags & NFS_MOUNT_SOFTREVAL)) task_flags |= RPC_TASK_TIMEOUT; + args.get_dir_deleg = should_request_dir_deleg(inode); + if (args.get_dir_deleg) + res.gdd_res = &gdd_res; + nfs4_bitmap_copy_adjust(bitmask, nfs4_bitmask(server, fattr->label), inode, 0); nfs_fattr_init(fattr); nfs4_init_sequence(&args.seq_args, &res.seq_res, 0, 0); - return nfs4_do_call_sync(server->client, server, &msg, - &args.seq_args, &res.seq_res, task_flags); + + status = nfs4_do_call_sync(server->client, server, &msg, + &args.seq_args, &res.seq_res, task_flags); + if (args.get_dir_deleg) { + switch (status) { + case 0: + if (gdd_res.status != GDD4_OK) + break; + status = nfs_inode_set_delegation( + inode, current_cred(), FMODE_READ, + &gdd_res.deleg, 0, NFS4_OPEN_DELEGATE_READ); + break; + case -ENOTSUPP: + case -EOPNOTSUPP: + server->caps &= ~NFS_CAP_DIR_DELEG; + } + } + return status; } int nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, @@ -4503,8 +4559,14 @@ int nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, do { err = _nfs4_proc_getattr(server, fhandle, fattr, inode); trace_nfs4_getattr(server, fhandle, fattr, err); - err = nfs4_handle_exception(server, err, - &exception); + switch (err) { + default: + err = nfs4_handle_exception(server, err, &exception); + break; + case -ENOTSUPP: + case -EOPNOTSUPP: + exception.retry = true; + } } while (exception.retry); return err; } @@ -4768,6 +4830,7 @@ static int _nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry int status = 0; if (!nfs4_have_delegation(inode, FMODE_READ, 0)) { + nfs_request_directory_delegation(inode); res.fattr = nfs_alloc_fattr(); if (res.fattr == NULL) return -ENOMEM; @@ -4875,6 +4938,8 @@ nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr, ilabel = nfs4_label_init_security(dir, dentry, sattr, &l); + nfs_request_directory_delegation(dir); + if (!(server->attr_bitmask[2] & FATTR4_WORD2_MODE_UMASK)) sattr->ia_mode &= ~current_umask(); state = nfs4_do_open(dir, ctx, flags, sattr, ilabel, NULL); @@ -4971,6 +5036,7 @@ static void nfs4_proc_unlink_setup(struct rpc_message *msg, nfs4_init_sequence(&args->seq_args, &res->seq_res, 1, 0); nfs_fattr_init(res->dir_attr); + nfs_request_directory_delegation(d_inode(dentry->d_parent)); if (inode) { nfs4_inode_return_delegation(inode); @@ -5005,7 +5071,8 @@ static int nfs4_proc_unlink_done(struct rpc_task *task, struct inode *dir) static void nfs4_proc_rename_setup(struct rpc_message *msg, struct dentry *old_dentry, - struct dentry *new_dentry) + struct dentry *new_dentry, + struct inode *same_parent) { struct nfs_renameargs *arg = msg->rpc_argp; struct nfs_renameres *res = msg->rpc_resp; @@ -5016,6 +5083,8 @@ static void nfs4_proc_rename_setup(struct rpc_message *msg, nfs4_inode_make_writeable(old_inode); if (new_inode) nfs4_inode_return_delegation(new_inode); + if (same_parent) + nfs_request_directory_delegation(same_parent); msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENAME]; res->server = NFS_SB(old_dentry->d_sb); nfs4_init_sequence(&arg->seq_args, &res->seq_res, 1, 0); @@ -10822,6 +10891,7 @@ static const struct nfs4_minor_version_ops nfs_v4_1_minor_ops = { .minor_version = 1, .init_caps = NFS_CAP_READDIRPLUS | NFS_CAP_ATOMIC_OPEN + | NFS_CAP_DIR_DELEG | NFS_CAP_POSIX_LOCK | NFS_CAP_STATEID_NFSV41 | NFS_CAP_ATOMIC_OPEN_V1 @@ -10848,6 +10918,7 @@ static const struct nfs4_minor_version_ops nfs_v4_2_minor_ops = { .minor_version = 2, .init_caps = NFS_CAP_READDIRPLUS | NFS_CAP_ATOMIC_OPEN + | NFS_CAP_DIR_DELEG | NFS_CAP_POSIX_LOCK | NFS_CAP_STATEID_NFSV41 | NFS_CAP_ATOMIC_OPEN_V1 diff --git a/fs/nfs/nfs4trace.h b/fs/nfs/nfs4trace.h index 9776d220cec3..6285128e631a 100644 --- a/fs/nfs/nfs4trace.h +++ b/fs/nfs/nfs4trace.h @@ -1353,6 +1353,7 @@ DEFINE_NFS4_INODE_STATEID_EVENT(nfs4_setattr); DEFINE_NFS4_INODE_STATEID_EVENT(nfs4_delegreturn); DEFINE_NFS4_INODE_STATEID_EVENT(nfs4_open_stateid_update); DEFINE_NFS4_INODE_STATEID_EVENT(nfs4_open_stateid_update_wait); +DEFINE_NFS4_INODE_STATEID_EVENT(nfs4_open_stateid_update_skip); DEFINE_NFS4_INODE_STATEID_EVENT(nfs4_close_stateid_update_wait); DECLARE_EVENT_CLASS(nfs4_getattr_event, diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index 1d0e6c10f921..b6fe30577fab 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c @@ -393,6 +393,20 @@ static int decode_layoutget(struct xdr_stream *xdr, struct rpc_rqst *req, XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + 5) #define encode_reclaim_complete_maxsz (op_encode_hdr_maxsz + 4) #define decode_reclaim_complete_maxsz (op_decode_hdr_maxsz + 4) +#define encode_get_dir_deleg_maxsz (op_encode_hdr_maxsz + \ + 4 /* gdda_signal_deleg_avail */ + \ + 8 /* gdda_notification_types */ + \ + nfstime4_maxsz /* gdda_child_attr_delay */ + \ + nfstime4_maxsz /* gdda_dir_attr_delay */ + \ + nfs4_fattr_bitmap_maxsz /* gdda_child_attributes */ + \ + nfs4_fattr_bitmap_maxsz /* gdda_dir_attributes */) +#define decode_get_dir_deleg_maxsz (op_decode_hdr_maxsz + \ + 4 /* gddrnf_status */ + \ + encode_verifier_maxsz /* gddr_cookieverf */ + \ + encode_stateid_maxsz /* gddr_stateid */ + \ + 8 /* gddr_notification */ + \ + nfs4_fattr_maxsz /* gddr_child_attributes */ + \ + nfs4_fattr_maxsz /* gddr_dir_attributes */) #define encode_getdeviceinfo_maxsz (op_encode_hdr_maxsz + \ XDR_QUADLEN(NFS4_DEVICEID4_SIZE) + \ 1 /* layout type */ + \ @@ -444,6 +458,8 @@ static int decode_layoutget(struct xdr_stream *xdr, struct rpc_rqst *req, #else /* CONFIG_NFS_V4_1 */ #define encode_sequence_maxsz 0 #define decode_sequence_maxsz 0 +#define encode_get_dir_deleg_maxsz 0 +#define decode_get_dir_deleg_maxsz 0 #define encode_layoutreturn_maxsz 0 #define decode_layoutreturn_maxsz 0 #define encode_layoutget_maxsz 0 @@ -631,11 +647,13 @@ static int decode_layoutget(struct xdr_stream *xdr, struct rpc_rqst *req, #define NFS4_enc_getattr_sz (compound_encode_hdr_maxsz + \ encode_sequence_maxsz + \ encode_putfh_maxsz + \ + encode_get_dir_deleg_maxsz + \ encode_getattr_maxsz + \ encode_renew_maxsz) #define NFS4_dec_getattr_sz (compound_decode_hdr_maxsz + \ decode_sequence_maxsz + \ decode_putfh_maxsz + \ + decode_get_dir_deleg_maxsz + \ decode_getattr_maxsz + \ decode_renew_maxsz) #define NFS4_enc_lookup_sz (compound_encode_hdr_maxsz + \ @@ -2008,6 +2026,33 @@ static void encode_sequence(struct xdr_stream *xdr, #ifdef CONFIG_NFS_V4_1 static void +encode_get_dir_delegation(struct xdr_stream *xdr, struct compound_hdr *hdr) +{ + struct timespec64 ts = { 0, 0 }; + u32 notifications[1] = { 0 }; + u32 attributes[1] = { 0 }; + __be32 *p; + + encode_op_hdr(xdr, OP_GET_DIR_DELEGATION, decode_get_dir_deleg_maxsz, hdr); + + /* We don't handle CB_RECALLABLE_OBJ_AVAIL yet. */ + xdr_stream_encode_bool(xdr, false); + + xdr_encode_bitmap4(xdr, notifications, ARRAY_SIZE(notifications)); + + /* Request no delay on attribute updates */ + p = reserve_space(xdr, 12 + 12); + p = xdr_encode_nfstime4(p, &ts); + xdr_encode_nfstime4(p, &ts); + + /* Requested child attributes */ + xdr_encode_bitmap4(xdr, attributes, ARRAY_SIZE(attributes)); + + /* Requested dir attributes */ + xdr_encode_bitmap4(xdr, attributes, ARRAY_SIZE(attributes)); +} + +static void encode_getdeviceinfo(struct xdr_stream *xdr, const struct nfs4_getdeviceinfo_args *args, struct compound_hdr *hdr) @@ -2143,6 +2188,11 @@ static void encode_free_stateid(struct xdr_stream *xdr, } #else static inline void +encode_get_dir_delegation(struct xdr_stream *xdr, struct compound_hdr *hdr) +{ +} + +static inline void encode_layoutreturn(struct xdr_stream *xdr, const struct nfs4_layoutreturn_args *args, struct compound_hdr *hdr) @@ -2356,6 +2406,8 @@ static void nfs4_xdr_enc_getattr(struct rpc_rqst *req, struct xdr_stream *xdr, encode_compound_hdr(xdr, req, &hdr); encode_sequence(xdr, &args->seq_args, &hdr); encode_putfh(xdr, args->fh, &hdr); + if (args->get_dir_deleg) + encode_get_dir_delegation(xdr, &hdr); encode_getfattr(xdr, args->bitmask, &hdr); encode_nops(&hdr); } @@ -5994,6 +6046,49 @@ static int decode_layout_stateid(struct xdr_stream *xdr, nfs4_stateid *stateid) return decode_stateid(xdr, stateid); } +static int decode_get_dir_delegation(struct xdr_stream *xdr, + struct nfs4_getattr_res *res) +{ + struct nfs4_gdd_res *gdd_res = res->gdd_res; + nfs4_verifier cookieverf; + u32 bitmap[1]; + int status; + + status = decode_op_hdr(xdr, OP_GET_DIR_DELEGATION); + if (status) + return status; + + if (xdr_stream_decode_u32(xdr, &gdd_res->status)) + return -EIO; + + if (gdd_res->status == GDD4_UNAVAIL) + return xdr_inline_decode(xdr, 4) ? 0 : -EIO; + + status = decode_verifier(xdr, &cookieverf); + if (status) + return status; + + status = decode_delegation_stateid(xdr, &gdd_res->deleg); + if (status) + return status; + + /* Decode supported notification types. */ + status = decode_bitmap4(xdr, bitmap, ARRAY_SIZE(bitmap)); + if (status < 0) + return status; + + /* Decode supported child attributes. */ + status = decode_bitmap4(xdr, bitmap, ARRAY_SIZE(bitmap)); + if (status < 0) + return status; + + /* Decode supported attributes. */ + status = decode_bitmap4(xdr, bitmap, ARRAY_SIZE(bitmap)); + if (status < 0) + return status; + return 0; +} + static int decode_getdeviceinfo(struct xdr_stream *xdr, struct nfs4_getdeviceinfo_res *res) { @@ -6208,6 +6303,12 @@ static int decode_free_stateid(struct xdr_stream *xdr, return res->status; } #else +static int decode_get_dir_delegation(struct xdr_stream *xdr, + struct nfs4_getattr_res *res) +{ + return 0; +} + static inline int decode_layoutreturn(struct xdr_stream *xdr, struct nfs4_layoutreturn_res *res) @@ -6525,6 +6626,11 @@ static int nfs4_xdr_dec_getattr(struct rpc_rqst *rqstp, struct xdr_stream *xdr, status = decode_putfh(xdr); if (status) goto out; + if (res->gdd_res) { + status = decode_get_dir_delegation(xdr, res); + if (status) + goto out; + } status = decode_getfattr(xdr, res->fattr, res->server); out: return status; diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index f157d43d1312..b72d7cc36766 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c @@ -464,6 +464,7 @@ pnfs_mark_layout_stateid_invalid(struct pnfs_layout_hdr *lo, struct pnfs_layout_segment *lseg, *next; set_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags); + clear_bit(NFS_INO_LAYOUTCOMMIT, &NFS_I(lo->plh_inode)->flags); list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list) pnfs_clear_lseg_state(lseg, lseg_list); pnfs_clear_layoutreturn_info(lo); diff --git a/fs/nfs/proc.c b/fs/nfs/proc.c index 63e71310b9f6..39df80e4ae6f 100644 --- a/fs/nfs/proc.c +++ b/fs/nfs/proc.c @@ -353,7 +353,8 @@ static int nfs_proc_unlink_done(struct rpc_task *task, struct inode *dir) static void nfs_proc_rename_setup(struct rpc_message *msg, struct dentry *old_dentry, - struct dentry *new_dentry) + struct dentry *new_dentry, + struct inode *same_parent) { msg->rpc_proc = &nfs_procedures[NFSPROC_RENAME]; } diff --git a/fs/nfs/super.c b/fs/nfs/super.c index 72dee6f3050e..57d372db03b9 100644 --- a/fs/nfs/super.c +++ b/fs/nfs/super.c @@ -1052,16 +1052,6 @@ int nfs_reconfigure(struct fs_context *fc) sync_filesystem(sb); /* - * The SB_RDONLY flag has been removed from the superblock during - * mounts to prevent interference between different filesystems. - * Similarly, it is also necessary to ignore the SB_RDONLY flag - * during reconfiguration; otherwise, it may also result in the - * creation of redundant superblocks when mounting a directory with - * different rw and ro flags multiple times. - */ - fc->sb_flags_mask &= ~SB_RDONLY; - - /* * Userspace mount programs that send binary options generally send * them populated with default values. We have no way to know which * ones were explicitly specified. Fall back to legacy behavior and @@ -1101,8 +1091,9 @@ static void nfs_fill_super(struct super_block *sb, struct nfs_fs_context *ctx) sb->s_blocksize = 0; sb->s_xattr = server->nfs_client->cl_nfs_mod->xattr; sb->s_op = server->nfs_client->cl_nfs_mod->sops; - if (ctx->bsize) - sb->s_blocksize = nfs_block_size(ctx->bsize, &sb->s_blocksize_bits); + if (server->bsize) + sb->s_blocksize = + nfs_block_size(server->bsize, &sb->s_blocksize_bits); switch (server->nfs_client->rpc_ops->version) { case 2: @@ -1318,26 +1309,13 @@ int nfs_get_tree_common(struct fs_context *fc) if (IS_ERR(server)) return PTR_ERR(server); - /* - * When NFS_MOUNT_UNSHARED is not set, NFS forces the sharing of a - * superblock among each filesystem that mounts sub-directories - * belonging to a single exported root path. - * To prevent interference between different filesystems, the - * SB_RDONLY flag should be removed from the superblock. - */ if (server->flags & NFS_MOUNT_UNSHARED) compare_super = NULL; - else - fc->sb_flags &= ~SB_RDONLY; /* -o noac implies -o sync */ if (server->flags & NFS_MOUNT_NOAC) fc->sb_flags |= SB_SYNCHRONOUS; - if (ctx->clone_data.sb) - if (ctx->clone_data.sb->s_flags & SB_SYNCHRONOUS) - fc->sb_flags |= SB_SYNCHRONOUS; - /* Get a superblock - note that we may end up sharing one that already exists */ fc->s_fs_info = server; s = sget_fc(fc, compare_super, nfs_set_super); @@ -1361,13 +1339,8 @@ int nfs_get_tree_common(struct fs_context *fc) } if (!s->s_root) { - unsigned bsize = ctx->clone_data.inherited_bsize; /* initial superblock/root creation */ nfs_fill_super(s, ctx); - if (bsize) { - s->s_blocksize_bits = bsize; - s->s_blocksize = 1U << bsize; - } error = nfs_get_cache_cookie(s, ctx); if (error < 0) goto error_splat_super; diff --git a/fs/nfs/unlink.c b/fs/nfs/unlink.c index b55467911648..4db818c0f9dd 100644 --- a/fs/nfs/unlink.c +++ b/fs/nfs/unlink.c @@ -390,7 +390,8 @@ nfs_async_rename(struct inode *old_dir, struct inode *new_dir, nfs_sb_active(old_dir->i_sb); - NFS_PROTO(data->old_dir)->rename_setup(&msg, old_dentry, new_dentry); + NFS_PROTO(data->old_dir)->rename_setup(&msg, old_dentry, new_dentry, + old_dir == new_dir ? old_dir : NULL); return rpc_run_task(&task_setup_data); } diff --git a/fs/nls/nls_base.c b/fs/nls/nls_base.c index 18d597e49a19..a5c3a9f1b8dc 100644 --- a/fs/nls/nls_base.c +++ b/fs/nls/nls_base.c @@ -67,19 +67,22 @@ int utf8_to_utf32(const u8 *s, int inlen, unicode_t *pu) l &= t->lmask; if (l < t->lval || l > UNICODE_MAX || (l & SURROGATE_MASK) == SURROGATE_PAIR) - return -1; + return -EILSEQ; + *pu = (unicode_t) l; return nc; } if (inlen <= nc) - return -1; + return -EOVERFLOW; + s++; c = (*s ^ 0x80) & 0xFF; if (c & 0xC0) - return -1; + return -EILSEQ; + l = (l << 6) | c; } - return -1; + return -EILSEQ; } EXPORT_SYMBOL(utf8_to_utf32); @@ -94,7 +97,7 @@ int utf32_to_utf8(unicode_t u, u8 *s, int maxout) l = u; if (l > UNICODE_MAX || (l & SURROGATE_MASK) == SURROGATE_PAIR) - return -1; + return -EILSEQ; nc = 0; for (t = utf8_table; t->cmask && maxout; t++, maxout--) { @@ -110,7 +113,7 @@ int utf32_to_utf8(unicode_t u, u8 *s, int maxout) return nc; } } - return -1; + return -EOVERFLOW; } EXPORT_SYMBOL(utf32_to_utf8); @@ -217,8 +220,16 @@ int utf16s_to_utf8s(const wchar_t *pwcs, int inlen, enum utf16_endian endian, inlen--; } size = utf32_to_utf8(u, op, maxout); - if (size == -1) { - /* Ignore character and move on */ + if (size < 0) { + if (size == -EILSEQ) { + /* Ignore character and move on */ + continue; + } + /* + * Stop filling the buffer with data once a character + * does not fit anymore. + */ + break; } else { op += size; maxout -= size; diff --git a/fs/smb/client/cifspdu.h b/fs/smb/client/cifspdu.h index 37b23664ddf3..eeb4011cb217 100644 --- a/fs/smb/client/cifspdu.h +++ b/fs/smb/client/cifspdu.h @@ -1357,37 +1357,6 @@ typedef struct smb_com_transaction_change_notify_rsp { __u16 ByteCount; /* __u8 Pad[3]; */ } __packed TRANSACT_CHANGE_NOTIFY_RSP; -/* Completion Filter flags for Notify */ -#define FILE_NOTIFY_CHANGE_FILE_NAME 0x00000001 -#define FILE_NOTIFY_CHANGE_DIR_NAME 0x00000002 -#define FILE_NOTIFY_CHANGE_NAME 0x00000003 -#define FILE_NOTIFY_CHANGE_ATTRIBUTES 0x00000004 -#define FILE_NOTIFY_CHANGE_SIZE 0x00000008 -#define FILE_NOTIFY_CHANGE_LAST_WRITE 0x00000010 -#define FILE_NOTIFY_CHANGE_LAST_ACCESS 0x00000020 -#define FILE_NOTIFY_CHANGE_CREATION 0x00000040 -#define FILE_NOTIFY_CHANGE_EA 0x00000080 -#define FILE_NOTIFY_CHANGE_SECURITY 0x00000100 -#define FILE_NOTIFY_CHANGE_STREAM_NAME 0x00000200 -#define FILE_NOTIFY_CHANGE_STREAM_SIZE 0x00000400 -#define FILE_NOTIFY_CHANGE_STREAM_WRITE 0x00000800 - -#define FILE_ACTION_ADDED 0x00000001 -#define FILE_ACTION_REMOVED 0x00000002 -#define FILE_ACTION_MODIFIED 0x00000003 -#define FILE_ACTION_RENAMED_OLD_NAME 0x00000004 -#define FILE_ACTION_RENAMED_NEW_NAME 0x00000005 -#define FILE_ACTION_ADDED_STREAM 0x00000006 -#define FILE_ACTION_REMOVED_STREAM 0x00000007 -#define FILE_ACTION_MODIFIED_STREAM 0x00000008 - -/* response contains array of the following structures */ -struct file_notify_information { - __le32 NextEntryOffset; - __le32 Action; - __le32 FileNameLength; - __u8 FileName[]; -} __packed; struct cifs_quota_data { __u32 rsrvd1; /* 0 */ @@ -2034,40 +2003,6 @@ typedef struct { #define CIFS_POSIX_EXTENSIONS 0x00000010 /* support for new QFSInfo */ -/* DeviceType Flags */ -#define FILE_DEVICE_CD_ROM 0x00000002 -#define FILE_DEVICE_CD_ROM_FILE_SYSTEM 0x00000003 -#define FILE_DEVICE_DFS 0x00000006 -#define FILE_DEVICE_DISK 0x00000007 -#define FILE_DEVICE_DISK_FILE_SYSTEM 0x00000008 -#define FILE_DEVICE_FILE_SYSTEM 0x00000009 -#define FILE_DEVICE_NAMED_PIPE 0x00000011 -#define FILE_DEVICE_NETWORK 0x00000012 -#define FILE_DEVICE_NETWORK_FILE_SYSTEM 0x00000014 -#define FILE_DEVICE_NULL 0x00000015 -#define FILE_DEVICE_PARALLEL_PORT 0x00000016 -#define FILE_DEVICE_PRINTER 0x00000018 -#define FILE_DEVICE_SERIAL_PORT 0x0000001b -#define FILE_DEVICE_STREAMS 0x0000001e -#define FILE_DEVICE_TAPE 0x0000001f -#define FILE_DEVICE_TAPE_FILE_SYSTEM 0x00000020 -#define FILE_DEVICE_VIRTUAL_DISK 0x00000024 -#define FILE_DEVICE_NETWORK_REDIRECTOR 0x00000028 - -/* Device Characteristics */ -#define FILE_REMOVABLE_MEDIA 0x00000001 -#define FILE_READ_ONLY_DEVICE 0x00000002 -#define FILE_FLOPPY_DISKETTE 0x00000004 -#define FILE_WRITE_ONCE_MEDIA 0x00000008 -#define FILE_REMOTE_DEVICE 0x00000010 -#define FILE_DEVICE_IS_MOUNTED 0x00000020 -#define FILE_VIRTUAL_VOLUME 0x00000040 -#define FILE_DEVICE_SECURE_OPEN 0x00000100 -#define FILE_CHARACTERISTIC_TS_DEVICE 0x00001000 -#define FILE_CHARACTERISTIC_WEBDAV_DEVICE 0x00002000 -#define FILE_PORTABLE_DEVICE 0x00004000 -#define FILE_DEVICE_ALLOW_APPCONTAINER_TRAVERSAL 0x00020000 - /******************************************************************************/ /* QueryFileInfo/QueryPathinfo (also for SetPath/SetFile) data buffer formats */ /******************************************************************************/ diff --git a/fs/smb/client/nterr.c b/fs/smb/client/nterr.c index 8f0bc441295e..77f84767b7df 100644 --- a/fs/smb/client/nterr.c +++ b/fs/smb/client/nterr.c @@ -13,6 +13,7 @@ const struct nt_err_code_struct nt_errs[] = { {"NT_STATUS_OK", NT_STATUS_OK}, + {"NT_STATUS_PENDING", NT_STATUS_PENDING}, {"NT_STATUS_MEDIA_CHANGED", NT_STATUS_MEDIA_CHANGED}, {"NT_STATUS_END_OF_MEDIA", NT_STATUS_END_OF_MEDIA}, {"NT_STATUS_MEDIA_CHECK", NT_STATUS_MEDIA_CHECK}, @@ -544,6 +545,7 @@ const struct nt_err_code_struct nt_errs[] = { {"NT_STATUS_DOMAIN_TRUST_INCONSISTENT", NT_STATUS_DOMAIN_TRUST_INCONSISTENT}, {"NT_STATUS_FS_DRIVER_REQUIRED", NT_STATUS_FS_DRIVER_REQUIRED}, + {"NT_STATUS_INVALID_LOCK_RANGE", NT_STATUS_INVALID_LOCK_RANGE}, {"NT_STATUS_NO_USER_SESSION_KEY", NT_STATUS_NO_USER_SESSION_KEY}, {"NT_STATUS_USER_SESSION_DELETED", NT_STATUS_USER_SESSION_DELETED}, {"NT_STATUS_RESOURCE_LANG_NOT_FOUND", @@ -675,9 +677,12 @@ const struct nt_err_code_struct nt_errs[] = { NT_STATUS_QUOTA_LIST_INCONSISTENT}, {"NT_STATUS_FILE_IS_OFFLINE", NT_STATUS_FILE_IS_OFFLINE}, {"NT_STATUS_NOT_A_REPARSE_POINT", NT_STATUS_NOT_A_REPARSE_POINT}, + {"NT_STATUS_NETWORK_SESSION_EXPIRED", NT_STATUS_NETWORK_SESSION_EXPIRED}, {"NT_STATUS_NO_MORE_ENTRIES", NT_STATUS_NO_MORE_ENTRIES}, {"NT_STATUS_MORE_ENTRIES", NT_STATUS_MORE_ENTRIES}, {"NT_STATUS_SOME_UNMAPPED", NT_STATUS_SOME_UNMAPPED}, {"NT_STATUS_NO_SUCH_JOB", NT_STATUS_NO_SUCH_JOB}, + {"NT_STATUS_NO_PREAUTH_INTEGRITY_HASH_OVERLAP", + NT_STATUS_NO_PREAUTH_INTEGRITY_HASH_OVERLAP}, {NULL, 0} }; diff --git a/fs/smb/client/nterr.h b/fs/smb/client/nterr.h index 180602c22355..81f1a78cf41f 100644 --- a/fs/smb/client/nterr.h +++ b/fs/smb/client/nterr.h @@ -35,518 +35,522 @@ extern const struct nt_err_code_struct nt_errs[]; */ #define NT_STATUS_OK 0x0000 +#define NT_STATUS_PENDING 0x0103 #define NT_STATUS_SOME_UNMAPPED 0x0107 #define NT_STATUS_BUFFER_OVERFLOW 0x80000005 #define NT_STATUS_NO_MORE_ENTRIES 0x8000001a #define NT_STATUS_MEDIA_CHANGED 0x8000001c #define NT_STATUS_END_OF_MEDIA 0x8000001e #define NT_STATUS_MEDIA_CHECK 0x80000020 -#define NT_STATUS_NO_DATA_DETECTED 0x8000001c +#define NT_STATUS_NO_DATA_DETECTED 0x80000022 #define NT_STATUS_STOPPED_ON_SYMLINK 0x8000002d #define NT_STATUS_DEVICE_REQUIRES_CLEANING 0x80000288 -#define NT_STATUS_DEVICE_DOOR_OPEN 0x80000288 -#define NT_STATUS_UNSUCCESSFUL 0xC0000000 | 0x0001 -#define NT_STATUS_NOT_IMPLEMENTED 0xC0000000 | 0x0002 -#define NT_STATUS_INVALID_INFO_CLASS 0xC0000000 | 0x0003 -#define NT_STATUS_INFO_LENGTH_MISMATCH 0xC0000000 | 0x0004 -#define NT_STATUS_ACCESS_VIOLATION 0xC0000000 | 0x0005 -#define NT_STATUS_IN_PAGE_ERROR 0xC0000000 | 0x0006 -#define NT_STATUS_PAGEFILE_QUOTA 0xC0000000 | 0x0007 -#define NT_STATUS_INVALID_HANDLE 0xC0000000 | 0x0008 -#define NT_STATUS_BAD_INITIAL_STACK 0xC0000000 | 0x0009 -#define NT_STATUS_BAD_INITIAL_PC 0xC0000000 | 0x000a -#define NT_STATUS_INVALID_CID 0xC0000000 | 0x000b -#define NT_STATUS_TIMER_NOT_CANCELED 0xC0000000 | 0x000c -#define NT_STATUS_INVALID_PARAMETER 0xC0000000 | 0x000d -#define NT_STATUS_NO_SUCH_DEVICE 0xC0000000 | 0x000e -#define NT_STATUS_NO_SUCH_FILE 0xC0000000 | 0x000f -#define NT_STATUS_INVALID_DEVICE_REQUEST 0xC0000000 | 0x0010 -#define NT_STATUS_END_OF_FILE 0xC0000000 | 0x0011 -#define NT_STATUS_WRONG_VOLUME 0xC0000000 | 0x0012 -#define NT_STATUS_NO_MEDIA_IN_DEVICE 0xC0000000 | 0x0013 -#define NT_STATUS_UNRECOGNIZED_MEDIA 0xC0000000 | 0x0014 -#define NT_STATUS_NONEXISTENT_SECTOR 0xC0000000 | 0x0015 -#define NT_STATUS_MORE_PROCESSING_REQUIRED 0xC0000000 | 0x0016 -#define NT_STATUS_NO_MEMORY 0xC0000000 | 0x0017 -#define NT_STATUS_CONFLICTING_ADDRESSES 0xC0000000 | 0x0018 -#define NT_STATUS_NOT_MAPPED_VIEW 0xC0000000 | 0x0019 -#define NT_STATUS_UNABLE_TO_FREE_VM 0x80000000 | 0x001a -#define NT_STATUS_UNABLE_TO_DELETE_SECTION 0xC0000000 | 0x001b -#define NT_STATUS_INVALID_SYSTEM_SERVICE 0xC0000000 | 0x001c -#define NT_STATUS_ILLEGAL_INSTRUCTION 0xC0000000 | 0x001d -#define NT_STATUS_INVALID_LOCK_SEQUENCE 0xC0000000 | 0x001e -#define NT_STATUS_INVALID_VIEW_SIZE 0xC0000000 | 0x001f -#define NT_STATUS_INVALID_FILE_FOR_SECTION 0xC0000000 | 0x0020 -#define NT_STATUS_ALREADY_COMMITTED 0xC0000000 | 0x0021 -#define NT_STATUS_ACCESS_DENIED 0xC0000000 | 0x0022 -#define NT_STATUS_BUFFER_TOO_SMALL 0xC0000000 | 0x0023 -#define NT_STATUS_OBJECT_TYPE_MISMATCH 0xC0000000 | 0x0024 -#define NT_STATUS_NONCONTINUABLE_EXCEPTION 0xC0000000 | 0x0025 -#define NT_STATUS_INVALID_DISPOSITION 0xC0000000 | 0x0026 -#define NT_STATUS_UNWIND 0xC0000000 | 0x0027 -#define NT_STATUS_BAD_STACK 0xC0000000 | 0x0028 -#define NT_STATUS_INVALID_UNWIND_TARGET 0xC0000000 | 0x0029 -#define NT_STATUS_NOT_LOCKED 0xC0000000 | 0x002a -#define NT_STATUS_PARITY_ERROR 0xC0000000 | 0x002b -#define NT_STATUS_UNABLE_TO_DECOMMIT_VM 0xC0000000 | 0x002c -#define NT_STATUS_NOT_COMMITTED 0xC0000000 | 0x002d -#define NT_STATUS_INVALID_PORT_ATTRIBUTES 0xC0000000 | 0x002e -#define NT_STATUS_PORT_MESSAGE_TOO_LONG 0xC0000000 | 0x002f -#define NT_STATUS_INVALID_PARAMETER_MIX 0xC0000000 | 0x0030 -#define NT_STATUS_INVALID_QUOTA_LOWER 0xC0000000 | 0x0031 -#define NT_STATUS_DISK_CORRUPT_ERROR 0xC0000000 | 0x0032 -#define NT_STATUS_OBJECT_NAME_INVALID 0xC0000000 | 0x0033 -#define NT_STATUS_OBJECT_NAME_NOT_FOUND 0xC0000000 | 0x0034 -#define NT_STATUS_OBJECT_NAME_COLLISION 0xC0000000 | 0x0035 -#define NT_STATUS_HANDLE_NOT_WAITABLE 0xC0000000 | 0x0036 -#define NT_STATUS_PORT_DISCONNECTED 0xC0000000 | 0x0037 -#define NT_STATUS_DEVICE_ALREADY_ATTACHED 0xC0000000 | 0x0038 -#define NT_STATUS_OBJECT_PATH_INVALID 0xC0000000 | 0x0039 -#define NT_STATUS_OBJECT_PATH_NOT_FOUND 0xC0000000 | 0x003a -#define NT_STATUS_OBJECT_PATH_SYNTAX_BAD 0xC0000000 | 0x003b -#define NT_STATUS_DATA_OVERRUN 0xC0000000 | 0x003c -#define NT_STATUS_DATA_LATE_ERROR 0xC0000000 | 0x003d -#define NT_STATUS_DATA_ERROR 0xC0000000 | 0x003e -#define NT_STATUS_CRC_ERROR 0xC0000000 | 0x003f -#define NT_STATUS_SECTION_TOO_BIG 0xC0000000 | 0x0040 -#define NT_STATUS_PORT_CONNECTION_REFUSED 0xC0000000 | 0x0041 -#define NT_STATUS_INVALID_PORT_HANDLE 0xC0000000 | 0x0042 -#define NT_STATUS_SHARING_VIOLATION 0xC0000000 | 0x0043 -#define NT_STATUS_QUOTA_EXCEEDED 0xC0000000 | 0x0044 -#define NT_STATUS_INVALID_PAGE_PROTECTION 0xC0000000 | 0x0045 -#define NT_STATUS_MUTANT_NOT_OWNED 0xC0000000 | 0x0046 -#define NT_STATUS_SEMAPHORE_LIMIT_EXCEEDED 0xC0000000 | 0x0047 -#define NT_STATUS_PORT_ALREADY_SET 0xC0000000 | 0x0048 -#define NT_STATUS_SECTION_NOT_IMAGE 0xC0000000 | 0x0049 -#define NT_STATUS_SUSPEND_COUNT_EXCEEDED 0xC0000000 | 0x004a -#define NT_STATUS_THREAD_IS_TERMINATING 0xC0000000 | 0x004b -#define NT_STATUS_BAD_WORKING_SET_LIMIT 0xC0000000 | 0x004c -#define NT_STATUS_INCOMPATIBLE_FILE_MAP 0xC0000000 | 0x004d -#define NT_STATUS_SECTION_PROTECTION 0xC0000000 | 0x004e -#define NT_STATUS_EAS_NOT_SUPPORTED 0xC0000000 | 0x004f -#define NT_STATUS_EA_TOO_LARGE 0xC0000000 | 0x0050 -#define NT_STATUS_NONEXISTENT_EA_ENTRY 0xC0000000 | 0x0051 -#define NT_STATUS_NO_EAS_ON_FILE 0xC0000000 | 0x0052 -#define NT_STATUS_EA_CORRUPT_ERROR 0xC0000000 | 0x0053 -#define NT_STATUS_FILE_LOCK_CONFLICT 0xC0000000 | 0x0054 -#define NT_STATUS_LOCK_NOT_GRANTED 0xC0000000 | 0x0055 -#define NT_STATUS_DELETE_PENDING 0xC0000000 | 0x0056 -#define NT_STATUS_CTL_FILE_NOT_SUPPORTED 0xC0000000 | 0x0057 -#define NT_STATUS_UNKNOWN_REVISION 0xC0000000 | 0x0058 -#define NT_STATUS_REVISION_MISMATCH 0xC0000000 | 0x0059 -#define NT_STATUS_INVALID_OWNER 0xC0000000 | 0x005a -#define NT_STATUS_INVALID_PRIMARY_GROUP 0xC0000000 | 0x005b -#define NT_STATUS_NO_IMPERSONATION_TOKEN 0xC0000000 | 0x005c -#define NT_STATUS_CANT_DISABLE_MANDATORY 0xC0000000 | 0x005d -#define NT_STATUS_NO_LOGON_SERVERS 0xC0000000 | 0x005e -#define NT_STATUS_NO_SUCH_LOGON_SESSION 0xC0000000 | 0x005f -#define NT_STATUS_NO_SUCH_PRIVILEGE 0xC0000000 | 0x0060 -#define NT_STATUS_PRIVILEGE_NOT_HELD 0xC0000000 | 0x0061 -#define NT_STATUS_INVALID_ACCOUNT_NAME 0xC0000000 | 0x0062 -#define NT_STATUS_USER_EXISTS 0xC0000000 | 0x0063 -#define NT_STATUS_NO_SUCH_USER 0xC0000000 | 0x0064 -#define NT_STATUS_GROUP_EXISTS 0xC0000000 | 0x0065 -#define NT_STATUS_NO_SUCH_GROUP 0xC0000000 | 0x0066 -#define NT_STATUS_MEMBER_IN_GROUP 0xC0000000 | 0x0067 -#define NT_STATUS_MEMBER_NOT_IN_GROUP 0xC0000000 | 0x0068 -#define NT_STATUS_LAST_ADMIN 0xC0000000 | 0x0069 -#define NT_STATUS_WRONG_PASSWORD 0xC0000000 | 0x006a -#define NT_STATUS_ILL_FORMED_PASSWORD 0xC0000000 | 0x006b -#define NT_STATUS_PASSWORD_RESTRICTION 0xC0000000 | 0x006c -#define NT_STATUS_LOGON_FAILURE 0xC0000000 | 0x006d -#define NT_STATUS_ACCOUNT_RESTRICTION 0xC0000000 | 0x006e -#define NT_STATUS_INVALID_LOGON_HOURS 0xC0000000 | 0x006f -#define NT_STATUS_INVALID_WORKSTATION 0xC0000000 | 0x0070 -#define NT_STATUS_PASSWORD_EXPIRED 0xC0000000 | 0x0071 -#define NT_STATUS_ACCOUNT_DISABLED 0xC0000000 | 0x0072 -#define NT_STATUS_NONE_MAPPED 0xC0000000 | 0x0073 -#define NT_STATUS_TOO_MANY_LUIDS_REQUESTED 0xC0000000 | 0x0074 -#define NT_STATUS_LUIDS_EXHAUSTED 0xC0000000 | 0x0075 -#define NT_STATUS_INVALID_SUB_AUTHORITY 0xC0000000 | 0x0076 -#define NT_STATUS_INVALID_ACL 0xC0000000 | 0x0077 -#define NT_STATUS_INVALID_SID 0xC0000000 | 0x0078 -#define NT_STATUS_INVALID_SECURITY_DESCR 0xC0000000 | 0x0079 -#define NT_STATUS_PROCEDURE_NOT_FOUND 0xC0000000 | 0x007a -#define NT_STATUS_INVALID_IMAGE_FORMAT 0xC0000000 | 0x007b -#define NT_STATUS_NO_TOKEN 0xC0000000 | 0x007c -#define NT_STATUS_BAD_INHERITANCE_ACL 0xC0000000 | 0x007d -#define NT_STATUS_RANGE_NOT_LOCKED 0xC0000000 | 0x007e -#define NT_STATUS_DISK_FULL 0xC0000000 | 0x007f -#define NT_STATUS_SERVER_DISABLED 0xC0000000 | 0x0080 -#define NT_STATUS_SERVER_NOT_DISABLED 0xC0000000 | 0x0081 -#define NT_STATUS_TOO_MANY_GUIDS_REQUESTED 0xC0000000 | 0x0082 -#define NT_STATUS_GUIDS_EXHAUSTED 0xC0000000 | 0x0083 -#define NT_STATUS_INVALID_ID_AUTHORITY 0xC0000000 | 0x0084 -#define NT_STATUS_AGENTS_EXHAUSTED 0xC0000000 | 0x0085 -#define NT_STATUS_INVALID_VOLUME_LABEL 0xC0000000 | 0x0086 -#define NT_STATUS_SECTION_NOT_EXTENDED 0xC0000000 | 0x0087 -#define NT_STATUS_NOT_MAPPED_DATA 0xC0000000 | 0x0088 -#define NT_STATUS_RESOURCE_DATA_NOT_FOUND 0xC0000000 | 0x0089 -#define NT_STATUS_RESOURCE_TYPE_NOT_FOUND 0xC0000000 | 0x008a -#define NT_STATUS_RESOURCE_NAME_NOT_FOUND 0xC0000000 | 0x008b -#define NT_STATUS_ARRAY_BOUNDS_EXCEEDED 0xC0000000 | 0x008c -#define NT_STATUS_FLOAT_DENORMAL_OPERAND 0xC0000000 | 0x008d -#define NT_STATUS_FLOAT_DIVIDE_BY_ZERO 0xC0000000 | 0x008e -#define NT_STATUS_FLOAT_INEXACT_RESULT 0xC0000000 | 0x008f -#define NT_STATUS_FLOAT_INVALID_OPERATION 0xC0000000 | 0x0090 -#define NT_STATUS_FLOAT_OVERFLOW 0xC0000000 | 0x0091 -#define NT_STATUS_FLOAT_STACK_CHECK 0xC0000000 | 0x0092 -#define NT_STATUS_FLOAT_UNDERFLOW 0xC0000000 | 0x0093 -#define NT_STATUS_INTEGER_DIVIDE_BY_ZERO 0xC0000000 | 0x0094 -#define NT_STATUS_INTEGER_OVERFLOW 0xC0000000 | 0x0095 -#define NT_STATUS_PRIVILEGED_INSTRUCTION 0xC0000000 | 0x0096 -#define NT_STATUS_TOO_MANY_PAGING_FILES 0xC0000000 | 0x0097 -#define NT_STATUS_FILE_INVALID 0xC0000000 | 0x0098 -#define NT_STATUS_ALLOTTED_SPACE_EXCEEDED 0xC0000000 | 0x0099 -#define NT_STATUS_INSUFFICIENT_RESOURCES 0xC0000000 | 0x009a -#define NT_STATUS_DFS_EXIT_PATH_FOUND 0xC0000000 | 0x009b -#define NT_STATUS_DEVICE_DATA_ERROR 0xC0000000 | 0x009c -#define NT_STATUS_DEVICE_NOT_CONNECTED 0xC0000000 | 0x009d -#define NT_STATUS_DEVICE_POWER_FAILURE 0xC0000000 | 0x009e -#define NT_STATUS_FREE_VM_NOT_AT_BASE 0xC0000000 | 0x009f -#define NT_STATUS_MEMORY_NOT_ALLOCATED 0xC0000000 | 0x00a0 -#define NT_STATUS_WORKING_SET_QUOTA 0xC0000000 | 0x00a1 -#define NT_STATUS_MEDIA_WRITE_PROTECTED 0xC0000000 | 0x00a2 -#define NT_STATUS_DEVICE_NOT_READY 0xC0000000 | 0x00a3 -#define NT_STATUS_INVALID_GROUP_ATTRIBUTES 0xC0000000 | 0x00a4 -#define NT_STATUS_BAD_IMPERSONATION_LEVEL 0xC0000000 | 0x00a5 -#define NT_STATUS_CANT_OPEN_ANONYMOUS 0xC0000000 | 0x00a6 -#define NT_STATUS_BAD_VALIDATION_CLASS 0xC0000000 | 0x00a7 -#define NT_STATUS_BAD_TOKEN_TYPE 0xC0000000 | 0x00a8 -#define NT_STATUS_BAD_MASTER_BOOT_RECORD 0xC0000000 | 0x00a9 -#define NT_STATUS_INSTRUCTION_MISALIGNMENT 0xC0000000 | 0x00aa -#define NT_STATUS_INSTANCE_NOT_AVAILABLE 0xC0000000 | 0x00ab -#define NT_STATUS_PIPE_NOT_AVAILABLE 0xC0000000 | 0x00ac -#define NT_STATUS_INVALID_PIPE_STATE 0xC0000000 | 0x00ad -#define NT_STATUS_PIPE_BUSY 0xC0000000 | 0x00ae -#define NT_STATUS_ILLEGAL_FUNCTION 0xC0000000 | 0x00af -#define NT_STATUS_PIPE_DISCONNECTED 0xC0000000 | 0x00b0 -#define NT_STATUS_PIPE_CLOSING 0xC0000000 | 0x00b1 -#define NT_STATUS_PIPE_CONNECTED 0xC0000000 | 0x00b2 -#define NT_STATUS_PIPE_LISTENING 0xC0000000 | 0x00b3 -#define NT_STATUS_INVALID_READ_MODE 0xC0000000 | 0x00b4 -#define NT_STATUS_IO_TIMEOUT 0xC0000000 | 0x00b5 -#define NT_STATUS_FILE_FORCED_CLOSED 0xC0000000 | 0x00b6 -#define NT_STATUS_PROFILING_NOT_STARTED 0xC0000000 | 0x00b7 -#define NT_STATUS_PROFILING_NOT_STOPPED 0xC0000000 | 0x00b8 -#define NT_STATUS_COULD_NOT_INTERPRET 0xC0000000 | 0x00b9 -#define NT_STATUS_FILE_IS_A_DIRECTORY 0xC0000000 | 0x00ba -#define NT_STATUS_NOT_SUPPORTED 0xC0000000 | 0x00bb -#define NT_STATUS_REMOTE_NOT_LISTENING 0xC0000000 | 0x00bc -#define NT_STATUS_DUPLICATE_NAME 0xC0000000 | 0x00bd -#define NT_STATUS_BAD_NETWORK_PATH 0xC0000000 | 0x00be -#define NT_STATUS_NETWORK_BUSY 0xC0000000 | 0x00bf -#define NT_STATUS_DEVICE_DOES_NOT_EXIST 0xC0000000 | 0x00c0 -#define NT_STATUS_TOO_MANY_COMMANDS 0xC0000000 | 0x00c1 -#define NT_STATUS_ADAPTER_HARDWARE_ERROR 0xC0000000 | 0x00c2 -#define NT_STATUS_INVALID_NETWORK_RESPONSE 0xC0000000 | 0x00c3 -#define NT_STATUS_UNEXPECTED_NETWORK_ERROR 0xC0000000 | 0x00c4 -#define NT_STATUS_BAD_REMOTE_ADAPTER 0xC0000000 | 0x00c5 -#define NT_STATUS_PRINT_QUEUE_FULL 0xC0000000 | 0x00c6 -#define NT_STATUS_NO_SPOOL_SPACE 0xC0000000 | 0x00c7 -#define NT_STATUS_PRINT_CANCELLED 0xC0000000 | 0x00c8 -#define NT_STATUS_NETWORK_NAME_DELETED 0xC0000000 | 0x00c9 -#define NT_STATUS_NETWORK_ACCESS_DENIED 0xC0000000 | 0x00ca -#define NT_STATUS_BAD_DEVICE_TYPE 0xC0000000 | 0x00cb -#define NT_STATUS_BAD_NETWORK_NAME 0xC0000000 | 0x00cc -#define NT_STATUS_TOO_MANY_NAMES 0xC0000000 | 0x00cd -#define NT_STATUS_TOO_MANY_SESSIONS 0xC0000000 | 0x00ce -#define NT_STATUS_SHARING_PAUSED 0xC0000000 | 0x00cf -#define NT_STATUS_REQUEST_NOT_ACCEPTED 0xC0000000 | 0x00d0 -#define NT_STATUS_REDIRECTOR_PAUSED 0xC0000000 | 0x00d1 -#define NT_STATUS_NET_WRITE_FAULT 0xC0000000 | 0x00d2 -#define NT_STATUS_PROFILING_AT_LIMIT 0xC0000000 | 0x00d3 -#define NT_STATUS_NOT_SAME_DEVICE 0xC0000000 | 0x00d4 -#define NT_STATUS_FILE_RENAMED 0xC0000000 | 0x00d5 -#define NT_STATUS_VIRTUAL_CIRCUIT_CLOSED 0xC0000000 | 0x00d6 -#define NT_STATUS_NO_SECURITY_ON_OBJECT 0xC0000000 | 0x00d7 -#define NT_STATUS_CANT_WAIT 0xC0000000 | 0x00d8 -#define NT_STATUS_PIPE_EMPTY 0xC0000000 | 0x00d9 -#define NT_STATUS_CANT_ACCESS_DOMAIN_INFO 0xC0000000 | 0x00da -#define NT_STATUS_CANT_TERMINATE_SELF 0xC0000000 | 0x00db -#define NT_STATUS_INVALID_SERVER_STATE 0xC0000000 | 0x00dc -#define NT_STATUS_INVALID_DOMAIN_STATE 0xC0000000 | 0x00dd -#define NT_STATUS_INVALID_DOMAIN_ROLE 0xC0000000 | 0x00de -#define NT_STATUS_NO_SUCH_DOMAIN 0xC0000000 | 0x00df -#define NT_STATUS_DOMAIN_EXISTS 0xC0000000 | 0x00e0 -#define NT_STATUS_DOMAIN_LIMIT_EXCEEDED 0xC0000000 | 0x00e1 -#define NT_STATUS_OPLOCK_NOT_GRANTED 0xC0000000 | 0x00e2 -#define NT_STATUS_INVALID_OPLOCK_PROTOCOL 0xC0000000 | 0x00e3 -#define NT_STATUS_INTERNAL_DB_CORRUPTION 0xC0000000 | 0x00e4 -#define NT_STATUS_INTERNAL_ERROR 0xC0000000 | 0x00e5 -#define NT_STATUS_GENERIC_NOT_MAPPED 0xC0000000 | 0x00e6 -#define NT_STATUS_BAD_DESCRIPTOR_FORMAT 0xC0000000 | 0x00e7 -#define NT_STATUS_INVALID_USER_BUFFER 0xC0000000 | 0x00e8 -#define NT_STATUS_UNEXPECTED_IO_ERROR 0xC0000000 | 0x00e9 -#define NT_STATUS_UNEXPECTED_MM_CREATE_ERR 0xC0000000 | 0x00ea -#define NT_STATUS_UNEXPECTED_MM_MAP_ERROR 0xC0000000 | 0x00eb -#define NT_STATUS_UNEXPECTED_MM_EXTEND_ERR 0xC0000000 | 0x00ec -#define NT_STATUS_NOT_LOGON_PROCESS 0xC0000000 | 0x00ed -#define NT_STATUS_LOGON_SESSION_EXISTS 0xC0000000 | 0x00ee -#define NT_STATUS_INVALID_PARAMETER_1 0xC0000000 | 0x00ef -#define NT_STATUS_INVALID_PARAMETER_2 0xC0000000 | 0x00f0 -#define NT_STATUS_INVALID_PARAMETER_3 0xC0000000 | 0x00f1 -#define NT_STATUS_INVALID_PARAMETER_4 0xC0000000 | 0x00f2 -#define NT_STATUS_INVALID_PARAMETER_5 0xC0000000 | 0x00f3 -#define NT_STATUS_INVALID_PARAMETER_6 0xC0000000 | 0x00f4 -#define NT_STATUS_INVALID_PARAMETER_7 0xC0000000 | 0x00f5 -#define NT_STATUS_INVALID_PARAMETER_8 0xC0000000 | 0x00f6 -#define NT_STATUS_INVALID_PARAMETER_9 0xC0000000 | 0x00f7 -#define NT_STATUS_INVALID_PARAMETER_10 0xC0000000 | 0x00f8 -#define NT_STATUS_INVALID_PARAMETER_11 0xC0000000 | 0x00f9 -#define NT_STATUS_INVALID_PARAMETER_12 0xC0000000 | 0x00fa -#define NT_STATUS_REDIRECTOR_NOT_STARTED 0xC0000000 | 0x00fb -#define NT_STATUS_REDIRECTOR_STARTED 0xC0000000 | 0x00fc -#define NT_STATUS_STACK_OVERFLOW 0xC0000000 | 0x00fd -#define NT_STATUS_NO_SUCH_PACKAGE 0xC0000000 | 0x00fe -#define NT_STATUS_BAD_FUNCTION_TABLE 0xC0000000 | 0x00ff -#define NT_STATUS_DIRECTORY_NOT_EMPTY 0xC0000000 | 0x0101 -#define NT_STATUS_FILE_CORRUPT_ERROR 0xC0000000 | 0x0102 -#define NT_STATUS_NOT_A_DIRECTORY 0xC0000000 | 0x0103 -#define NT_STATUS_BAD_LOGON_SESSION_STATE 0xC0000000 | 0x0104 -#define NT_STATUS_LOGON_SESSION_COLLISION 0xC0000000 | 0x0105 -#define NT_STATUS_NAME_TOO_LONG 0xC0000000 | 0x0106 -#define NT_STATUS_FILES_OPEN 0xC0000000 | 0x0107 -#define NT_STATUS_CONNECTION_IN_USE 0xC0000000 | 0x0108 -#define NT_STATUS_MESSAGE_NOT_FOUND 0xC0000000 | 0x0109 -#define NT_STATUS_PROCESS_IS_TERMINATING 0xC0000000 | 0x010a -#define NT_STATUS_INVALID_LOGON_TYPE 0xC0000000 | 0x010b -#define NT_STATUS_NO_GUID_TRANSLATION 0xC0000000 | 0x010c -#define NT_STATUS_CANNOT_IMPERSONATE 0xC0000000 | 0x010d -#define NT_STATUS_IMAGE_ALREADY_LOADED 0xC0000000 | 0x010e -#define NT_STATUS_ABIOS_NOT_PRESENT 0xC0000000 | 0x010f -#define NT_STATUS_ABIOS_LID_NOT_EXIST 0xC0000000 | 0x0110 -#define NT_STATUS_ABIOS_LID_ALREADY_OWNED 0xC0000000 | 0x0111 -#define NT_STATUS_ABIOS_NOT_LID_OWNER 0xC0000000 | 0x0112 -#define NT_STATUS_ABIOS_INVALID_COMMAND 0xC0000000 | 0x0113 -#define NT_STATUS_ABIOS_INVALID_LID 0xC0000000 | 0x0114 -#define NT_STATUS_ABIOS_SELECTOR_NOT_AVAILABLE 0xC0000000 | 0x0115 -#define NT_STATUS_ABIOS_INVALID_SELECTOR 0xC0000000 | 0x0116 -#define NT_STATUS_NO_LDT 0xC0000000 | 0x0117 -#define NT_STATUS_INVALID_LDT_SIZE 0xC0000000 | 0x0118 -#define NT_STATUS_INVALID_LDT_OFFSET 0xC0000000 | 0x0119 -#define NT_STATUS_INVALID_LDT_DESCRIPTOR 0xC0000000 | 0x011a -#define NT_STATUS_INVALID_IMAGE_NE_FORMAT 0xC0000000 | 0x011b -#define NT_STATUS_RXACT_INVALID_STATE 0xC0000000 | 0x011c -#define NT_STATUS_RXACT_COMMIT_FAILURE 0xC0000000 | 0x011d -#define NT_STATUS_MAPPED_FILE_SIZE_ZERO 0xC0000000 | 0x011e -#define NT_STATUS_TOO_MANY_OPENED_FILES 0xC0000000 | 0x011f -#define NT_STATUS_CANCELLED 0xC0000000 | 0x0120 -#define NT_STATUS_CANNOT_DELETE 0xC0000000 | 0x0121 -#define NT_STATUS_INVALID_COMPUTER_NAME 0xC0000000 | 0x0122 -#define NT_STATUS_FILE_DELETED 0xC0000000 | 0x0123 -#define NT_STATUS_SPECIAL_ACCOUNT 0xC0000000 | 0x0124 -#define NT_STATUS_SPECIAL_GROUP 0xC0000000 | 0x0125 -#define NT_STATUS_SPECIAL_USER 0xC0000000 | 0x0126 -#define NT_STATUS_MEMBERS_PRIMARY_GROUP 0xC0000000 | 0x0127 -#define NT_STATUS_FILE_CLOSED 0xC0000000 | 0x0128 -#define NT_STATUS_TOO_MANY_THREADS 0xC0000000 | 0x0129 -#define NT_STATUS_THREAD_NOT_IN_PROCESS 0xC0000000 | 0x012a -#define NT_STATUS_TOKEN_ALREADY_IN_USE 0xC0000000 | 0x012b -#define NT_STATUS_PAGEFILE_QUOTA_EXCEEDED 0xC0000000 | 0x012c -#define NT_STATUS_COMMITMENT_LIMIT 0xC0000000 | 0x012d -#define NT_STATUS_INVALID_IMAGE_LE_FORMAT 0xC0000000 | 0x012e -#define NT_STATUS_INVALID_IMAGE_NOT_MZ 0xC0000000 | 0x012f -#define NT_STATUS_INVALID_IMAGE_PROTECT 0xC0000000 | 0x0130 -#define NT_STATUS_INVALID_IMAGE_WIN_16 0xC0000000 | 0x0131 -#define NT_STATUS_LOGON_SERVER_CONFLICT 0xC0000000 | 0x0132 -#define NT_STATUS_TIME_DIFFERENCE_AT_DC 0xC0000000 | 0x0133 -#define NT_STATUS_SYNCHRONIZATION_REQUIRED 0xC0000000 | 0x0134 -#define NT_STATUS_DLL_NOT_FOUND 0xC0000000 | 0x0135 -#define NT_STATUS_OPEN_FAILED 0xC0000000 | 0x0136 -#define NT_STATUS_IO_PRIVILEGE_FAILED 0xC0000000 | 0x0137 -#define NT_STATUS_ORDINAL_NOT_FOUND 0xC0000000 | 0x0138 -#define NT_STATUS_ENTRYPOINT_NOT_FOUND 0xC0000000 | 0x0139 -#define NT_STATUS_CONTROL_C_EXIT 0xC0000000 | 0x013a -#define NT_STATUS_LOCAL_DISCONNECT 0xC0000000 | 0x013b -#define NT_STATUS_REMOTE_DISCONNECT 0xC0000000 | 0x013c -#define NT_STATUS_REMOTE_RESOURCES 0xC0000000 | 0x013d -#define NT_STATUS_LINK_FAILED 0xC0000000 | 0x013e -#define NT_STATUS_LINK_TIMEOUT 0xC0000000 | 0x013f -#define NT_STATUS_INVALID_CONNECTION 0xC0000000 | 0x0140 -#define NT_STATUS_INVALID_ADDRESS 0xC0000000 | 0x0141 -#define NT_STATUS_DLL_INIT_FAILED 0xC0000000 | 0x0142 -#define NT_STATUS_MISSING_SYSTEMFILE 0xC0000000 | 0x0143 -#define NT_STATUS_UNHANDLED_EXCEPTION 0xC0000000 | 0x0144 -#define NT_STATUS_APP_INIT_FAILURE 0xC0000000 | 0x0145 -#define NT_STATUS_PAGEFILE_CREATE_FAILED 0xC0000000 | 0x0146 -#define NT_STATUS_NO_PAGEFILE 0xC0000000 | 0x0147 -#define NT_STATUS_INVALID_LEVEL 0xC0000000 | 0x0148 -#define NT_STATUS_WRONG_PASSWORD_CORE 0xC0000000 | 0x0149 -#define NT_STATUS_ILLEGAL_FLOAT_CONTEXT 0xC0000000 | 0x014a -#define NT_STATUS_PIPE_BROKEN 0xC0000000 | 0x014b -#define NT_STATUS_REGISTRY_CORRUPT 0xC0000000 | 0x014c -#define NT_STATUS_REGISTRY_IO_FAILED 0xC0000000 | 0x014d -#define NT_STATUS_NO_EVENT_PAIR 0xC0000000 | 0x014e -#define NT_STATUS_UNRECOGNIZED_VOLUME 0xC0000000 | 0x014f -#define NT_STATUS_SERIAL_NO_DEVICE_INITED 0xC0000000 | 0x0150 -#define NT_STATUS_NO_SUCH_ALIAS 0xC0000000 | 0x0151 -#define NT_STATUS_MEMBER_NOT_IN_ALIAS 0xC0000000 | 0x0152 -#define NT_STATUS_MEMBER_IN_ALIAS 0xC0000000 | 0x0153 -#define NT_STATUS_ALIAS_EXISTS 0xC0000000 | 0x0154 -#define NT_STATUS_LOGON_NOT_GRANTED 0xC0000000 | 0x0155 -#define NT_STATUS_TOO_MANY_SECRETS 0xC0000000 | 0x0156 -#define NT_STATUS_SECRET_TOO_LONG 0xC0000000 | 0x0157 -#define NT_STATUS_INTERNAL_DB_ERROR 0xC0000000 | 0x0158 -#define NT_STATUS_FULLSCREEN_MODE 0xC0000000 | 0x0159 -#define NT_STATUS_TOO_MANY_CONTEXT_IDS 0xC0000000 | 0x015a -#define NT_STATUS_LOGON_TYPE_NOT_GRANTED 0xC0000000 | 0x015b -#define NT_STATUS_NOT_REGISTRY_FILE 0xC0000000 | 0x015c -#define NT_STATUS_NT_CROSS_ENCRYPTION_REQUIRED 0xC0000000 | 0x015d -#define NT_STATUS_DOMAIN_CTRLR_CONFIG_ERROR 0xC0000000 | 0x015e -#define NT_STATUS_FT_MISSING_MEMBER 0xC0000000 | 0x015f -#define NT_STATUS_ILL_FORMED_SERVICE_ENTRY 0xC0000000 | 0x0160 -#define NT_STATUS_ILLEGAL_CHARACTER 0xC0000000 | 0x0161 -#define NT_STATUS_UNMAPPABLE_CHARACTER 0xC0000000 | 0x0162 -#define NT_STATUS_UNDEFINED_CHARACTER 0xC0000000 | 0x0163 -#define NT_STATUS_FLOPPY_VOLUME 0xC0000000 | 0x0164 -#define NT_STATUS_FLOPPY_ID_MARK_NOT_FOUND 0xC0000000 | 0x0165 -#define NT_STATUS_FLOPPY_WRONG_CYLINDER 0xC0000000 | 0x0166 -#define NT_STATUS_FLOPPY_UNKNOWN_ERROR 0xC0000000 | 0x0167 -#define NT_STATUS_FLOPPY_BAD_REGISTERS 0xC0000000 | 0x0168 -#define NT_STATUS_DISK_RECALIBRATE_FAILED 0xC0000000 | 0x0169 -#define NT_STATUS_DISK_OPERATION_FAILED 0xC0000000 | 0x016a -#define NT_STATUS_DISK_RESET_FAILED 0xC0000000 | 0x016b -#define NT_STATUS_SHARED_IRQ_BUSY 0xC0000000 | 0x016c -#define NT_STATUS_FT_ORPHANING 0xC0000000 | 0x016d -#define NT_STATUS_PARTITION_FAILURE 0xC0000000 | 0x0172 -#define NT_STATUS_INVALID_BLOCK_LENGTH 0xC0000000 | 0x0173 -#define NT_STATUS_DEVICE_NOT_PARTITIONED 0xC0000000 | 0x0174 -#define NT_STATUS_UNABLE_TO_LOCK_MEDIA 0xC0000000 | 0x0175 -#define NT_STATUS_UNABLE_TO_UNLOAD_MEDIA 0xC0000000 | 0x0176 -#define NT_STATUS_EOM_OVERFLOW 0xC0000000 | 0x0177 -#define NT_STATUS_NO_MEDIA 0xC0000000 | 0x0178 -#define NT_STATUS_NO_SUCH_MEMBER 0xC0000000 | 0x017a -#define NT_STATUS_INVALID_MEMBER 0xC0000000 | 0x017b -#define NT_STATUS_KEY_DELETED 0xC0000000 | 0x017c -#define NT_STATUS_NO_LOG_SPACE 0xC0000000 | 0x017d -#define NT_STATUS_TOO_MANY_SIDS 0xC0000000 | 0x017e -#define NT_STATUS_LM_CROSS_ENCRYPTION_REQUIRED 0xC0000000 | 0x017f -#define NT_STATUS_KEY_HAS_CHILDREN 0xC0000000 | 0x0180 -#define NT_STATUS_CHILD_MUST_BE_VOLATILE 0xC0000000 | 0x0181 -#define NT_STATUS_DEVICE_CONFIGURATION_ERROR 0xC0000000 | 0x0182 -#define NT_STATUS_DRIVER_INTERNAL_ERROR 0xC0000000 | 0x0183 -#define NT_STATUS_INVALID_DEVICE_STATE 0xC0000000 | 0x0184 -#define NT_STATUS_IO_DEVICE_ERROR 0xC0000000 | 0x0185 -#define NT_STATUS_DEVICE_PROTOCOL_ERROR 0xC0000000 | 0x0186 -#define NT_STATUS_BACKUP_CONTROLLER 0xC0000000 | 0x0187 -#define NT_STATUS_LOG_FILE_FULL 0xC0000000 | 0x0188 -#define NT_STATUS_TOO_LATE 0xC0000000 | 0x0189 -#define NT_STATUS_NO_TRUST_LSA_SECRET 0xC0000000 | 0x018a -#define NT_STATUS_NO_TRUST_SAM_ACCOUNT 0xC0000000 | 0x018b -#define NT_STATUS_TRUSTED_DOMAIN_FAILURE 0xC0000000 | 0x018c -#define NT_STATUS_TRUSTED_RELATIONSHIP_FAILURE 0xC0000000 | 0x018d -#define NT_STATUS_EVENTLOG_FILE_CORRUPT 0xC0000000 | 0x018e -#define NT_STATUS_EVENTLOG_CANT_START 0xC0000000 | 0x018f -#define NT_STATUS_TRUST_FAILURE 0xC0000000 | 0x0190 -#define NT_STATUS_MUTANT_LIMIT_EXCEEDED 0xC0000000 | 0x0191 -#define NT_STATUS_NETLOGON_NOT_STARTED 0xC0000000 | 0x0192 -#define NT_STATUS_ACCOUNT_EXPIRED 0xC0000000 | 0x0193 -#define NT_STATUS_POSSIBLE_DEADLOCK 0xC0000000 | 0x0194 -#define NT_STATUS_NETWORK_CREDENTIAL_CONFLICT 0xC0000000 | 0x0195 -#define NT_STATUS_REMOTE_SESSION_LIMIT 0xC0000000 | 0x0196 -#define NT_STATUS_EVENTLOG_FILE_CHANGED 0xC0000000 | 0x0197 -#define NT_STATUS_NOLOGON_INTERDOMAIN_TRUST_ACCOUNT 0xC0000000 | 0x0198 -#define NT_STATUS_NOLOGON_WORKSTATION_TRUST_ACCOUNT 0xC0000000 | 0x0199 -#define NT_STATUS_NOLOGON_SERVER_TRUST_ACCOUNT 0xC0000000 | 0x019a -#define NT_STATUS_DOMAIN_TRUST_INCONSISTENT 0xC0000000 | 0x019b -#define NT_STATUS_FS_DRIVER_REQUIRED 0xC0000000 | 0x019c -#define NT_STATUS_NO_USER_SESSION_KEY 0xC0000000 | 0x0202 -#define NT_STATUS_USER_SESSION_DELETED 0xC0000000 | 0x0203 -#define NT_STATUS_RESOURCE_LANG_NOT_FOUND 0xC0000000 | 0x0204 -#define NT_STATUS_INSUFF_SERVER_RESOURCES 0xC0000000 | 0x0205 -#define NT_STATUS_INVALID_BUFFER_SIZE 0xC0000000 | 0x0206 -#define NT_STATUS_INVALID_ADDRESS_COMPONENT 0xC0000000 | 0x0207 -#define NT_STATUS_INVALID_ADDRESS_WILDCARD 0xC0000000 | 0x0208 -#define NT_STATUS_TOO_MANY_ADDRESSES 0xC0000000 | 0x0209 -#define NT_STATUS_ADDRESS_ALREADY_EXISTS 0xC0000000 | 0x020a -#define NT_STATUS_ADDRESS_CLOSED 0xC0000000 | 0x020b -#define NT_STATUS_CONNECTION_DISCONNECTED 0xC0000000 | 0x020c -#define NT_STATUS_CONNECTION_RESET 0xC0000000 | 0x020d -#define NT_STATUS_TOO_MANY_NODES 0xC0000000 | 0x020e -#define NT_STATUS_TRANSACTION_ABORTED 0xC0000000 | 0x020f -#define NT_STATUS_TRANSACTION_TIMED_OUT 0xC0000000 | 0x0210 -#define NT_STATUS_TRANSACTION_NO_RELEASE 0xC0000000 | 0x0211 -#define NT_STATUS_TRANSACTION_NO_MATCH 0xC0000000 | 0x0212 -#define NT_STATUS_TRANSACTION_RESPONDED 0xC0000000 | 0x0213 -#define NT_STATUS_TRANSACTION_INVALID_ID 0xC0000000 | 0x0214 -#define NT_STATUS_TRANSACTION_INVALID_TYPE 0xC0000000 | 0x0215 -#define NT_STATUS_NOT_SERVER_SESSION 0xC0000000 | 0x0216 -#define NT_STATUS_NOT_CLIENT_SESSION 0xC0000000 | 0x0217 -#define NT_STATUS_CANNOT_LOAD_REGISTRY_FILE 0xC0000000 | 0x0218 -#define NT_STATUS_DEBUG_ATTACH_FAILED 0xC0000000 | 0x0219 -#define NT_STATUS_SYSTEM_PROCESS_TERMINATED 0xC0000000 | 0x021a -#define NT_STATUS_DATA_NOT_ACCEPTED 0xC0000000 | 0x021b -#define NT_STATUS_NO_BROWSER_SERVERS_FOUND 0xC0000000 | 0x021c -#define NT_STATUS_VDM_HARD_ERROR 0xC0000000 | 0x021d -#define NT_STATUS_DRIVER_CANCEL_TIMEOUT 0xC0000000 | 0x021e -#define NT_STATUS_REPLY_MESSAGE_MISMATCH 0xC0000000 | 0x021f -#define NT_STATUS_MAPPED_ALIGNMENT 0xC0000000 | 0x0220 -#define NT_STATUS_IMAGE_CHECKSUM_MISMATCH 0xC0000000 | 0x0221 -#define NT_STATUS_LOST_WRITEBEHIND_DATA 0xC0000000 | 0x0222 -#define NT_STATUS_CLIENT_SERVER_PARAMETERS_INVALID 0xC0000000 | 0x0223 -#define NT_STATUS_PASSWORD_MUST_CHANGE 0xC0000000 | 0x0224 -#define NT_STATUS_NOT_FOUND 0xC0000000 | 0x0225 -#define NT_STATUS_NOT_TINY_STREAM 0xC0000000 | 0x0226 -#define NT_STATUS_RECOVERY_FAILURE 0xC0000000 | 0x0227 -#define NT_STATUS_STACK_OVERFLOW_READ 0xC0000000 | 0x0228 -#define NT_STATUS_FAIL_CHECK 0xC0000000 | 0x0229 -#define NT_STATUS_DUPLICATE_OBJECTID 0xC0000000 | 0x022a -#define NT_STATUS_OBJECTID_EXISTS 0xC0000000 | 0x022b -#define NT_STATUS_CONVERT_TO_LARGE 0xC0000000 | 0x022c -#define NT_STATUS_RETRY 0xC0000000 | 0x022d -#define NT_STATUS_FOUND_OUT_OF_SCOPE 0xC0000000 | 0x022e -#define NT_STATUS_ALLOCATE_BUCKET 0xC0000000 | 0x022f -#define NT_STATUS_PROPSET_NOT_FOUND 0xC0000000 | 0x0230 -#define NT_STATUS_MARSHALL_OVERFLOW 0xC0000000 | 0x0231 -#define NT_STATUS_INVALID_VARIANT 0xC0000000 | 0x0232 -#define NT_STATUS_DOMAIN_CONTROLLER_NOT_FOUND 0xC0000000 | 0x0233 -#define NT_STATUS_ACCOUNT_LOCKED_OUT 0xC0000000 | 0x0234 -#define NT_STATUS_HANDLE_NOT_CLOSABLE 0xC0000000 | 0x0235 -#define NT_STATUS_CONNECTION_REFUSED 0xC0000000 | 0x0236 -#define NT_STATUS_GRACEFUL_DISCONNECT 0xC0000000 | 0x0237 -#define NT_STATUS_ADDRESS_ALREADY_ASSOCIATED 0xC0000000 | 0x0238 -#define NT_STATUS_ADDRESS_NOT_ASSOCIATED 0xC0000000 | 0x0239 -#define NT_STATUS_CONNECTION_INVALID 0xC0000000 | 0x023a -#define NT_STATUS_CONNECTION_ACTIVE 0xC0000000 | 0x023b -#define NT_STATUS_NETWORK_UNREACHABLE 0xC0000000 | 0x023c -#define NT_STATUS_HOST_UNREACHABLE 0xC0000000 | 0x023d -#define NT_STATUS_PROTOCOL_UNREACHABLE 0xC0000000 | 0x023e -#define NT_STATUS_PORT_UNREACHABLE 0xC0000000 | 0x023f -#define NT_STATUS_REQUEST_ABORTED 0xC0000000 | 0x0240 -#define NT_STATUS_CONNECTION_ABORTED 0xC0000000 | 0x0241 -#define NT_STATUS_BAD_COMPRESSION_BUFFER 0xC0000000 | 0x0242 -#define NT_STATUS_USER_MAPPED_FILE 0xC0000000 | 0x0243 -#define NT_STATUS_AUDIT_FAILED 0xC0000000 | 0x0244 -#define NT_STATUS_TIMER_RESOLUTION_NOT_SET 0xC0000000 | 0x0245 -#define NT_STATUS_CONNECTION_COUNT_LIMIT 0xC0000000 | 0x0246 -#define NT_STATUS_LOGIN_TIME_RESTRICTION 0xC0000000 | 0x0247 -#define NT_STATUS_LOGIN_WKSTA_RESTRICTION 0xC0000000 | 0x0248 -#define NT_STATUS_IMAGE_MP_UP_MISMATCH 0xC0000000 | 0x0249 -#define NT_STATUS_INSUFFICIENT_LOGON_INFO 0xC0000000 | 0x0250 -#define NT_STATUS_BAD_DLL_ENTRYPOINT 0xC0000000 | 0x0251 -#define NT_STATUS_BAD_SERVICE_ENTRYPOINT 0xC0000000 | 0x0252 -#define NT_STATUS_LPC_REPLY_LOST 0xC0000000 | 0x0253 -#define NT_STATUS_IP_ADDRESS_CONFLICT1 0xC0000000 | 0x0254 -#define NT_STATUS_IP_ADDRESS_CONFLICT2 0xC0000000 | 0x0255 -#define NT_STATUS_REGISTRY_QUOTA_LIMIT 0xC0000000 | 0x0256 -#define NT_STATUS_PATH_NOT_COVERED 0xC0000000 | 0x0257 -#define NT_STATUS_NO_CALLBACK_ACTIVE 0xC0000000 | 0x0258 -#define NT_STATUS_LICENSE_QUOTA_EXCEEDED 0xC0000000 | 0x0259 -#define NT_STATUS_PWD_TOO_SHORT 0xC0000000 | 0x025a -#define NT_STATUS_PWD_TOO_RECENT 0xC0000000 | 0x025b -#define NT_STATUS_PWD_HISTORY_CONFLICT 0xC0000000 | 0x025c -#define NT_STATUS_PLUGPLAY_NO_DEVICE 0xC0000000 | 0x025e -#define NT_STATUS_UNSUPPORTED_COMPRESSION 0xC0000000 | 0x025f -#define NT_STATUS_INVALID_HW_PROFILE 0xC0000000 | 0x0260 -#define NT_STATUS_INVALID_PLUGPLAY_DEVICE_PATH 0xC0000000 | 0x0261 -#define NT_STATUS_DRIVER_ORDINAL_NOT_FOUND 0xC0000000 | 0x0262 -#define NT_STATUS_DRIVER_ENTRYPOINT_NOT_FOUND 0xC0000000 | 0x0263 -#define NT_STATUS_RESOURCE_NOT_OWNED 0xC0000000 | 0x0264 -#define NT_STATUS_TOO_MANY_LINKS 0xC0000000 | 0x0265 -#define NT_STATUS_QUOTA_LIST_INCONSISTENT 0xC0000000 | 0x0266 -#define NT_STATUS_FILE_IS_OFFLINE 0xC0000000 | 0x0267 -#define NT_STATUS_NOT_A_REPARSE_POINT 0xC0000000 | 0x0275 -#define NT_STATUS_NO_SUCH_JOB 0xC0000000 | 0xEDE /* scheduler */ +#define NT_STATUS_DEVICE_DOOR_OPEN 0x80000289 +#define NT_STATUS_UNSUCCESSFUL (0xC0000000 | 0x0001) +#define NT_STATUS_NOT_IMPLEMENTED (0xC0000000 | 0x0002) +#define NT_STATUS_INVALID_INFO_CLASS (0xC0000000 | 0x0003) +#define NT_STATUS_INFO_LENGTH_MISMATCH (0xC0000000 | 0x0004) +#define NT_STATUS_ACCESS_VIOLATION (0xC0000000 | 0x0005) +#define NT_STATUS_IN_PAGE_ERROR (0xC0000000 | 0x0006) +#define NT_STATUS_PAGEFILE_QUOTA (0xC0000000 | 0x0007) +#define NT_STATUS_INVALID_HANDLE (0xC0000000 | 0x0008) +#define NT_STATUS_BAD_INITIAL_STACK (0xC0000000 | 0x0009) +#define NT_STATUS_BAD_INITIAL_PC (0xC0000000 | 0x000a) +#define NT_STATUS_INVALID_CID (0xC0000000 | 0x000b) +#define NT_STATUS_TIMER_NOT_CANCELED (0xC0000000 | 0x000c) +#define NT_STATUS_INVALID_PARAMETER (0xC0000000 | 0x000d) +#define NT_STATUS_NO_SUCH_DEVICE (0xC0000000 | 0x000e) +#define NT_STATUS_NO_SUCH_FILE (0xC0000000 | 0x000f) +#define NT_STATUS_INVALID_DEVICE_REQUEST (0xC0000000 | 0x0010) +#define NT_STATUS_END_OF_FILE (0xC0000000 | 0x0011) +#define NT_STATUS_WRONG_VOLUME (0xC0000000 | 0x0012) +#define NT_STATUS_NO_MEDIA_IN_DEVICE (0xC0000000 | 0x0013) +#define NT_STATUS_UNRECOGNIZED_MEDIA (0xC0000000 | 0x0014) +#define NT_STATUS_NONEXISTENT_SECTOR (0xC0000000 | 0x0015) +#define NT_STATUS_MORE_PROCESSING_REQUIRED (0xC0000000 | 0x0016) +#define NT_STATUS_NO_MEMORY (0xC0000000 | 0x0017) +#define NT_STATUS_CONFLICTING_ADDRESSES (0xC0000000 | 0x0018) +#define NT_STATUS_NOT_MAPPED_VIEW (0xC0000000 | 0x0019) +#define NT_STATUS_UNABLE_TO_FREE_VM (0xC0000000 | 0x001a) +#define NT_STATUS_UNABLE_TO_DELETE_SECTION (0xC0000000 | 0x001b) +#define NT_STATUS_INVALID_SYSTEM_SERVICE (0xC0000000 | 0x001c) +#define NT_STATUS_ILLEGAL_INSTRUCTION (0xC0000000 | 0x001d) +#define NT_STATUS_INVALID_LOCK_SEQUENCE (0xC0000000 | 0x001e) +#define NT_STATUS_INVALID_VIEW_SIZE (0xC0000000 | 0x001f) +#define NT_STATUS_INVALID_FILE_FOR_SECTION (0xC0000000 | 0x0020) +#define NT_STATUS_ALREADY_COMMITTED (0xC0000000 | 0x0021) +#define NT_STATUS_ACCESS_DENIED (0xC0000000 | 0x0022) +#define NT_STATUS_BUFFER_TOO_SMALL (0xC0000000 | 0x0023) +#define NT_STATUS_OBJECT_TYPE_MISMATCH (0xC0000000 | 0x0024) +#define NT_STATUS_NONCONTINUABLE_EXCEPTION (0xC0000000 | 0x0025) +#define NT_STATUS_INVALID_DISPOSITION (0xC0000000 | 0x0026) +#define NT_STATUS_UNWIND (0xC0000000 | 0x0027) +#define NT_STATUS_BAD_STACK (0xC0000000 | 0x0028) +#define NT_STATUS_INVALID_UNWIND_TARGET (0xC0000000 | 0x0029) +#define NT_STATUS_NOT_LOCKED (0xC0000000 | 0x002a) +#define NT_STATUS_PARITY_ERROR (0xC0000000 | 0x002b) +#define NT_STATUS_UNABLE_TO_DECOMMIT_VM (0xC0000000 | 0x002c) +#define NT_STATUS_NOT_COMMITTED (0xC0000000 | 0x002d) +#define NT_STATUS_INVALID_PORT_ATTRIBUTES (0xC0000000 | 0x002e) +#define NT_STATUS_PORT_MESSAGE_TOO_LONG (0xC0000000 | 0x002f) +#define NT_STATUS_INVALID_PARAMETER_MIX (0xC0000000 | 0x0030) +#define NT_STATUS_INVALID_QUOTA_LOWER (0xC0000000 | 0x0031) +#define NT_STATUS_DISK_CORRUPT_ERROR (0xC0000000 | 0x0032) +#define NT_STATUS_OBJECT_NAME_INVALID (0xC0000000 | 0x0033) +#define NT_STATUS_OBJECT_NAME_NOT_FOUND (0xC0000000 | 0x0034) +#define NT_STATUS_OBJECT_NAME_COLLISION (0xC0000000 | 0x0035) +#define NT_STATUS_HANDLE_NOT_WAITABLE (0xC0000000 | 0x0036) +#define NT_STATUS_PORT_DISCONNECTED (0xC0000000 | 0x0037) +#define NT_STATUS_DEVICE_ALREADY_ATTACHED (0xC0000000 | 0x0038) +#define NT_STATUS_OBJECT_PATH_INVALID (0xC0000000 | 0x0039) +#define NT_STATUS_OBJECT_PATH_NOT_FOUND (0xC0000000 | 0x003a) +#define NT_STATUS_OBJECT_PATH_SYNTAX_BAD (0xC0000000 | 0x003b) +#define NT_STATUS_DATA_OVERRUN (0xC0000000 | 0x003c) +#define NT_STATUS_DATA_LATE_ERROR (0xC0000000 | 0x003d) +#define NT_STATUS_DATA_ERROR (0xC0000000 | 0x003e) +#define NT_STATUS_CRC_ERROR (0xC0000000 | 0x003f) +#define NT_STATUS_SECTION_TOO_BIG (0xC0000000 | 0x0040) +#define NT_STATUS_PORT_CONNECTION_REFUSED (0xC0000000 | 0x0041) +#define NT_STATUS_INVALID_PORT_HANDLE (0xC0000000 | 0x0042) +#define NT_STATUS_SHARING_VIOLATION (0xC0000000 | 0x0043) +#define NT_STATUS_QUOTA_EXCEEDED (0xC0000000 | 0x0044) +#define NT_STATUS_INVALID_PAGE_PROTECTION (0xC0000000 | 0x0045) +#define NT_STATUS_MUTANT_NOT_OWNED (0xC0000000 | 0x0046) +#define NT_STATUS_SEMAPHORE_LIMIT_EXCEEDED (0xC0000000 | 0x0047) +#define NT_STATUS_PORT_ALREADY_SET (0xC0000000 | 0x0048) +#define NT_STATUS_SECTION_NOT_IMAGE (0xC0000000 | 0x0049) +#define NT_STATUS_SUSPEND_COUNT_EXCEEDED (0xC0000000 | 0x004a) +#define NT_STATUS_THREAD_IS_TERMINATING (0xC0000000 | 0x004b) +#define NT_STATUS_BAD_WORKING_SET_LIMIT (0xC0000000 | 0x004c) +#define NT_STATUS_INCOMPATIBLE_FILE_MAP (0xC0000000 | 0x004d) +#define NT_STATUS_SECTION_PROTECTION (0xC0000000 | 0x004e) +#define NT_STATUS_EAS_NOT_SUPPORTED (0xC0000000 | 0x004f) +#define NT_STATUS_EA_TOO_LARGE (0xC0000000 | 0x0050) +#define NT_STATUS_NONEXISTENT_EA_ENTRY (0xC0000000 | 0x0051) +#define NT_STATUS_NO_EAS_ON_FILE (0xC0000000 | 0x0052) +#define NT_STATUS_EA_CORRUPT_ERROR (0xC0000000 | 0x0053) +#define NT_STATUS_FILE_LOCK_CONFLICT (0xC0000000 | 0x0054) +#define NT_STATUS_LOCK_NOT_GRANTED (0xC0000000 | 0x0055) +#define NT_STATUS_DELETE_PENDING (0xC0000000 | 0x0056) +#define NT_STATUS_CTL_FILE_NOT_SUPPORTED (0xC0000000 | 0x0057) +#define NT_STATUS_UNKNOWN_REVISION (0xC0000000 | 0x0058) +#define NT_STATUS_REVISION_MISMATCH (0xC0000000 | 0x0059) +#define NT_STATUS_INVALID_OWNER (0xC0000000 | 0x005a) +#define NT_STATUS_INVALID_PRIMARY_GROUP (0xC0000000 | 0x005b) +#define NT_STATUS_NO_IMPERSONATION_TOKEN (0xC0000000 | 0x005c) +#define NT_STATUS_CANT_DISABLE_MANDATORY (0xC0000000 | 0x005d) +#define NT_STATUS_NO_LOGON_SERVERS (0xC0000000 | 0x005e) +#define NT_STATUS_NO_SUCH_LOGON_SESSION (0xC0000000 | 0x005f) +#define NT_STATUS_NO_SUCH_PRIVILEGE (0xC0000000 | 0x0060) +#define NT_STATUS_PRIVILEGE_NOT_HELD (0xC0000000 | 0x0061) +#define NT_STATUS_INVALID_ACCOUNT_NAME (0xC0000000 | 0x0062) +#define NT_STATUS_USER_EXISTS (0xC0000000 | 0x0063) +#define NT_STATUS_NO_SUCH_USER (0xC0000000 | 0x0064) +#define NT_STATUS_GROUP_EXISTS (0xC0000000 | 0x0065) +#define NT_STATUS_NO_SUCH_GROUP (0xC0000000 | 0x0066) +#define NT_STATUS_MEMBER_IN_GROUP (0xC0000000 | 0x0067) +#define NT_STATUS_MEMBER_NOT_IN_GROUP (0xC0000000 | 0x0068) +#define NT_STATUS_LAST_ADMIN (0xC0000000 | 0x0069) +#define NT_STATUS_WRONG_PASSWORD (0xC0000000 | 0x006a) +#define NT_STATUS_ILL_FORMED_PASSWORD (0xC0000000 | 0x006b) +#define NT_STATUS_PASSWORD_RESTRICTION (0xC0000000 | 0x006c) +#define NT_STATUS_LOGON_FAILURE (0xC0000000 | 0x006d) +#define NT_STATUS_ACCOUNT_RESTRICTION (0xC0000000 | 0x006e) +#define NT_STATUS_INVALID_LOGON_HOURS (0xC0000000 | 0x006f) +#define NT_STATUS_INVALID_WORKSTATION (0xC0000000 | 0x0070) +#define NT_STATUS_PASSWORD_EXPIRED (0xC0000000 | 0x0071) +#define NT_STATUS_ACCOUNT_DISABLED (0xC0000000 | 0x0072) +#define NT_STATUS_NONE_MAPPED (0xC0000000 | 0x0073) +#define NT_STATUS_TOO_MANY_LUIDS_REQUESTED (0xC0000000 | 0x0074) +#define NT_STATUS_LUIDS_EXHAUSTED (0xC0000000 | 0x0075) +#define NT_STATUS_INVALID_SUB_AUTHORITY (0xC0000000 | 0x0076) +#define NT_STATUS_INVALID_ACL (0xC0000000 | 0x0077) +#define NT_STATUS_INVALID_SID (0xC0000000 | 0x0078) +#define NT_STATUS_INVALID_SECURITY_DESCR (0xC0000000 | 0x0079) +#define NT_STATUS_PROCEDURE_NOT_FOUND (0xC0000000 | 0x007a) +#define NT_STATUS_INVALID_IMAGE_FORMAT (0xC0000000 | 0x007b) +#define NT_STATUS_NO_TOKEN (0xC0000000 | 0x007c) +#define NT_STATUS_BAD_INHERITANCE_ACL (0xC0000000 | 0x007d) +#define NT_STATUS_RANGE_NOT_LOCKED (0xC0000000 | 0x007e) +#define NT_STATUS_DISK_FULL (0xC0000000 | 0x007f) +#define NT_STATUS_SERVER_DISABLED (0xC0000000 | 0x0080) +#define NT_STATUS_SERVER_NOT_DISABLED (0xC0000000 | 0x0081) +#define NT_STATUS_TOO_MANY_GUIDS_REQUESTED (0xC0000000 | 0x0082) +#define NT_STATUS_GUIDS_EXHAUSTED (0xC0000000 | 0x0083) +#define NT_STATUS_INVALID_ID_AUTHORITY (0xC0000000 | 0x0084) +#define NT_STATUS_AGENTS_EXHAUSTED (0xC0000000 | 0x0085) +#define NT_STATUS_INVALID_VOLUME_LABEL (0xC0000000 | 0x0086) +#define NT_STATUS_SECTION_NOT_EXTENDED (0xC0000000 | 0x0087) +#define NT_STATUS_NOT_MAPPED_DATA (0xC0000000 | 0x0088) +#define NT_STATUS_RESOURCE_DATA_NOT_FOUND (0xC0000000 | 0x0089) +#define NT_STATUS_RESOURCE_TYPE_NOT_FOUND (0xC0000000 | 0x008a) +#define NT_STATUS_RESOURCE_NAME_NOT_FOUND (0xC0000000 | 0x008b) +#define NT_STATUS_ARRAY_BOUNDS_EXCEEDED (0xC0000000 | 0x008c) +#define NT_STATUS_FLOAT_DENORMAL_OPERAND (0xC0000000 | 0x008d) +#define NT_STATUS_FLOAT_DIVIDE_BY_ZERO (0xC0000000 | 0x008e) +#define NT_STATUS_FLOAT_INEXACT_RESULT (0xC0000000 | 0x008f) +#define NT_STATUS_FLOAT_INVALID_OPERATION (0xC0000000 | 0x0090) +#define NT_STATUS_FLOAT_OVERFLOW (0xC0000000 | 0x0091) +#define NT_STATUS_FLOAT_STACK_CHECK (0xC0000000 | 0x0092) +#define NT_STATUS_FLOAT_UNDERFLOW (0xC0000000 | 0x0093) +#define NT_STATUS_INTEGER_DIVIDE_BY_ZERO (0xC0000000 | 0x0094) +#define NT_STATUS_INTEGER_OVERFLOW (0xC0000000 | 0x0095) +#define NT_STATUS_PRIVILEGED_INSTRUCTION (0xC0000000 | 0x0096) +#define NT_STATUS_TOO_MANY_PAGING_FILES (0xC0000000 | 0x0097) +#define NT_STATUS_FILE_INVALID (0xC0000000 | 0x0098) +#define NT_STATUS_ALLOTTED_SPACE_EXCEEDED (0xC0000000 | 0x0099) +#define NT_STATUS_INSUFFICIENT_RESOURCES (0xC0000000 | 0x009a) +#define NT_STATUS_DFS_EXIT_PATH_FOUND (0xC0000000 | 0x009b) +#define NT_STATUS_DEVICE_DATA_ERROR (0xC0000000 | 0x009c) +#define NT_STATUS_DEVICE_NOT_CONNECTED (0xC0000000 | 0x009d) +#define NT_STATUS_DEVICE_POWER_FAILURE (0xC0000000 | 0x009e) +#define NT_STATUS_FREE_VM_NOT_AT_BASE (0xC0000000 | 0x009f) +#define NT_STATUS_MEMORY_NOT_ALLOCATED (0xC0000000 | 0x00a0) +#define NT_STATUS_WORKING_SET_QUOTA (0xC0000000 | 0x00a1) +#define NT_STATUS_MEDIA_WRITE_PROTECTED (0xC0000000 | 0x00a2) +#define NT_STATUS_DEVICE_NOT_READY (0xC0000000 | 0x00a3) +#define NT_STATUS_INVALID_GROUP_ATTRIBUTES (0xC0000000 | 0x00a4) +#define NT_STATUS_BAD_IMPERSONATION_LEVEL (0xC0000000 | 0x00a5) +#define NT_STATUS_CANT_OPEN_ANONYMOUS (0xC0000000 | 0x00a6) +#define NT_STATUS_BAD_VALIDATION_CLASS (0xC0000000 | 0x00a7) +#define NT_STATUS_BAD_TOKEN_TYPE (0xC0000000 | 0x00a8) +#define NT_STATUS_BAD_MASTER_BOOT_RECORD (0xC0000000 | 0x00a9) +#define NT_STATUS_INSTRUCTION_MISALIGNMENT (0xC0000000 | 0x00aa) +#define NT_STATUS_INSTANCE_NOT_AVAILABLE (0xC0000000 | 0x00ab) +#define NT_STATUS_PIPE_NOT_AVAILABLE (0xC0000000 | 0x00ac) +#define NT_STATUS_INVALID_PIPE_STATE (0xC0000000 | 0x00ad) +#define NT_STATUS_PIPE_BUSY (0xC0000000 | 0x00ae) +#define NT_STATUS_ILLEGAL_FUNCTION (0xC0000000 | 0x00af) +#define NT_STATUS_PIPE_DISCONNECTED (0xC0000000 | 0x00b0) +#define NT_STATUS_PIPE_CLOSING (0xC0000000 | 0x00b1) +#define NT_STATUS_PIPE_CONNECTED (0xC0000000 | 0x00b2) +#define NT_STATUS_PIPE_LISTENING (0xC0000000 | 0x00b3) +#define NT_STATUS_INVALID_READ_MODE (0xC0000000 | 0x00b4) +#define NT_STATUS_IO_TIMEOUT (0xC0000000 | 0x00b5) +#define NT_STATUS_FILE_FORCED_CLOSED (0xC0000000 | 0x00b6) +#define NT_STATUS_PROFILING_NOT_STARTED (0xC0000000 | 0x00b7) +#define NT_STATUS_PROFILING_NOT_STOPPED (0xC0000000 | 0x00b8) +#define NT_STATUS_COULD_NOT_INTERPRET (0xC0000000 | 0x00b9) +#define NT_STATUS_FILE_IS_A_DIRECTORY (0xC0000000 | 0x00ba) +#define NT_STATUS_NOT_SUPPORTED (0xC0000000 | 0x00bb) +#define NT_STATUS_REMOTE_NOT_LISTENING (0xC0000000 | 0x00bc) +#define NT_STATUS_DUPLICATE_NAME (0xC0000000 | 0x00bd) +#define NT_STATUS_BAD_NETWORK_PATH (0xC0000000 | 0x00be) +#define NT_STATUS_NETWORK_BUSY (0xC0000000 | 0x00bf) +#define NT_STATUS_DEVICE_DOES_NOT_EXIST (0xC0000000 | 0x00c0) +#define NT_STATUS_TOO_MANY_COMMANDS (0xC0000000 | 0x00c1) +#define NT_STATUS_ADAPTER_HARDWARE_ERROR (0xC0000000 | 0x00c2) +#define NT_STATUS_INVALID_NETWORK_RESPONSE (0xC0000000 | 0x00c3) +#define NT_STATUS_UNEXPECTED_NETWORK_ERROR (0xC0000000 | 0x00c4) +#define NT_STATUS_BAD_REMOTE_ADAPTER (0xC0000000 | 0x00c5) +#define NT_STATUS_PRINT_QUEUE_FULL (0xC0000000 | 0x00c6) +#define NT_STATUS_NO_SPOOL_SPACE (0xC0000000 | 0x00c7) +#define NT_STATUS_PRINT_CANCELLED (0xC0000000 | 0x00c8) +#define NT_STATUS_NETWORK_NAME_DELETED (0xC0000000 | 0x00c9) +#define NT_STATUS_NETWORK_ACCESS_DENIED (0xC0000000 | 0x00ca) +#define NT_STATUS_BAD_DEVICE_TYPE (0xC0000000 | 0x00cb) +#define NT_STATUS_BAD_NETWORK_NAME (0xC0000000 | 0x00cc) +#define NT_STATUS_TOO_MANY_NAMES (0xC0000000 | 0x00cd) +#define NT_STATUS_TOO_MANY_SESSIONS (0xC0000000 | 0x00ce) +#define NT_STATUS_SHARING_PAUSED (0xC0000000 | 0x00cf) +#define NT_STATUS_REQUEST_NOT_ACCEPTED (0xC0000000 | 0x00d0) +#define NT_STATUS_REDIRECTOR_PAUSED (0xC0000000 | 0x00d1) +#define NT_STATUS_NET_WRITE_FAULT (0xC0000000 | 0x00d2) +#define NT_STATUS_PROFILING_AT_LIMIT (0xC0000000 | 0x00d3) +#define NT_STATUS_NOT_SAME_DEVICE (0xC0000000 | 0x00d4) +#define NT_STATUS_FILE_RENAMED (0xC0000000 | 0x00d5) +#define NT_STATUS_VIRTUAL_CIRCUIT_CLOSED (0xC0000000 | 0x00d6) +#define NT_STATUS_NO_SECURITY_ON_OBJECT (0xC0000000 | 0x00d7) +#define NT_STATUS_CANT_WAIT (0xC0000000 | 0x00d8) +#define NT_STATUS_PIPE_EMPTY (0xC0000000 | 0x00d9) +#define NT_STATUS_CANT_ACCESS_DOMAIN_INFO (0xC0000000 | 0x00da) +#define NT_STATUS_CANT_TERMINATE_SELF (0xC0000000 | 0x00db) +#define NT_STATUS_INVALID_SERVER_STATE (0xC0000000 | 0x00dc) +#define NT_STATUS_INVALID_DOMAIN_STATE (0xC0000000 | 0x00dd) +#define NT_STATUS_INVALID_DOMAIN_ROLE (0xC0000000 | 0x00de) +#define NT_STATUS_NO_SUCH_DOMAIN (0xC0000000 | 0x00df) +#define NT_STATUS_DOMAIN_EXISTS (0xC0000000 | 0x00e0) +#define NT_STATUS_DOMAIN_LIMIT_EXCEEDED (0xC0000000 | 0x00e1) +#define NT_STATUS_OPLOCK_NOT_GRANTED (0xC0000000 | 0x00e2) +#define NT_STATUS_INVALID_OPLOCK_PROTOCOL (0xC0000000 | 0x00e3) +#define NT_STATUS_INTERNAL_DB_CORRUPTION (0xC0000000 | 0x00e4) +#define NT_STATUS_INTERNAL_ERROR (0xC0000000 | 0x00e5) +#define NT_STATUS_GENERIC_NOT_MAPPED (0xC0000000 | 0x00e6) +#define NT_STATUS_BAD_DESCRIPTOR_FORMAT (0xC0000000 | 0x00e7) +#define NT_STATUS_INVALID_USER_BUFFER (0xC0000000 | 0x00e8) +#define NT_STATUS_UNEXPECTED_IO_ERROR (0xC0000000 | 0x00e9) +#define NT_STATUS_UNEXPECTED_MM_CREATE_ERR (0xC0000000 | 0x00ea) +#define NT_STATUS_UNEXPECTED_MM_MAP_ERROR (0xC0000000 | 0x00eb) +#define NT_STATUS_UNEXPECTED_MM_EXTEND_ERR (0xC0000000 | 0x00ec) +#define NT_STATUS_NOT_LOGON_PROCESS (0xC0000000 | 0x00ed) +#define NT_STATUS_LOGON_SESSION_EXISTS (0xC0000000 | 0x00ee) +#define NT_STATUS_INVALID_PARAMETER_1 (0xC0000000 | 0x00ef) +#define NT_STATUS_INVALID_PARAMETER_2 (0xC0000000 | 0x00f0) +#define NT_STATUS_INVALID_PARAMETER_3 (0xC0000000 | 0x00f1) +#define NT_STATUS_INVALID_PARAMETER_4 (0xC0000000 | 0x00f2) +#define NT_STATUS_INVALID_PARAMETER_5 (0xC0000000 | 0x00f3) +#define NT_STATUS_INVALID_PARAMETER_6 (0xC0000000 | 0x00f4) +#define NT_STATUS_INVALID_PARAMETER_7 (0xC0000000 | 0x00f5) +#define NT_STATUS_INVALID_PARAMETER_8 (0xC0000000 | 0x00f6) +#define NT_STATUS_INVALID_PARAMETER_9 (0xC0000000 | 0x00f7) +#define NT_STATUS_INVALID_PARAMETER_10 (0xC0000000 | 0x00f8) +#define NT_STATUS_INVALID_PARAMETER_11 (0xC0000000 | 0x00f9) +#define NT_STATUS_INVALID_PARAMETER_12 (0xC0000000 | 0x00fa) +#define NT_STATUS_REDIRECTOR_NOT_STARTED (0xC0000000 | 0x00fb) +#define NT_STATUS_REDIRECTOR_STARTED (0xC0000000 | 0x00fc) +#define NT_STATUS_STACK_OVERFLOW (0xC0000000 | 0x00fd) +#define NT_STATUS_NO_SUCH_PACKAGE (0xC0000000 | 0x00fe) +#define NT_STATUS_BAD_FUNCTION_TABLE (0xC0000000 | 0x00ff) +#define NT_STATUS_DIRECTORY_NOT_EMPTY (0xC0000000 | 0x0101) +#define NT_STATUS_FILE_CORRUPT_ERROR (0xC0000000 | 0x0102) +#define NT_STATUS_NOT_A_DIRECTORY (0xC0000000 | 0x0103) +#define NT_STATUS_BAD_LOGON_SESSION_STATE (0xC0000000 | 0x0104) +#define NT_STATUS_LOGON_SESSION_COLLISION (0xC0000000 | 0x0105) +#define NT_STATUS_NAME_TOO_LONG (0xC0000000 | 0x0106) +#define NT_STATUS_FILES_OPEN (0xC0000000 | 0x0107) +#define NT_STATUS_CONNECTION_IN_USE (0xC0000000 | 0x0108) +#define NT_STATUS_MESSAGE_NOT_FOUND (0xC0000000 | 0x0109) +#define NT_STATUS_PROCESS_IS_TERMINATING (0xC0000000 | 0x010a) +#define NT_STATUS_INVALID_LOGON_TYPE (0xC0000000 | 0x010b) +#define NT_STATUS_NO_GUID_TRANSLATION (0xC0000000 | 0x010c) +#define NT_STATUS_CANNOT_IMPERSONATE (0xC0000000 | 0x010d) +#define NT_STATUS_IMAGE_ALREADY_LOADED (0xC0000000 | 0x010e) +#define NT_STATUS_ABIOS_NOT_PRESENT (0xC0000000 | 0x010f) +#define NT_STATUS_ABIOS_LID_NOT_EXIST (0xC0000000 | 0x0110) +#define NT_STATUS_ABIOS_LID_ALREADY_OWNED (0xC0000000 | 0x0111) +#define NT_STATUS_ABIOS_NOT_LID_OWNER (0xC0000000 | 0x0112) +#define NT_STATUS_ABIOS_INVALID_COMMAND (0xC0000000 | 0x0113) +#define NT_STATUS_ABIOS_INVALID_LID (0xC0000000 | 0x0114) +#define NT_STATUS_ABIOS_SELECTOR_NOT_AVAILABLE (0xC0000000 | 0x0115) +#define NT_STATUS_ABIOS_INVALID_SELECTOR (0xC0000000 | 0x0116) +#define NT_STATUS_NO_LDT (0xC0000000 | 0x0117) +#define NT_STATUS_INVALID_LDT_SIZE (0xC0000000 | 0x0118) +#define NT_STATUS_INVALID_LDT_OFFSET (0xC0000000 | 0x0119) +#define NT_STATUS_INVALID_LDT_DESCRIPTOR (0xC0000000 | 0x011a) +#define NT_STATUS_INVALID_IMAGE_NE_FORMAT (0xC0000000 | 0x011b) +#define NT_STATUS_RXACT_INVALID_STATE (0xC0000000 | 0x011c) +#define NT_STATUS_RXACT_COMMIT_FAILURE (0xC0000000 | 0x011d) +#define NT_STATUS_MAPPED_FILE_SIZE_ZERO (0xC0000000 | 0x011e) +#define NT_STATUS_TOO_MANY_OPENED_FILES (0xC0000000 | 0x011f) +#define NT_STATUS_CANCELLED (0xC0000000 | 0x0120) +#define NT_STATUS_CANNOT_DELETE (0xC0000000 | 0x0121) +#define NT_STATUS_INVALID_COMPUTER_NAME (0xC0000000 | 0x0122) +#define NT_STATUS_FILE_DELETED (0xC0000000 | 0x0123) +#define NT_STATUS_SPECIAL_ACCOUNT (0xC0000000 | 0x0124) +#define NT_STATUS_SPECIAL_GROUP (0xC0000000 | 0x0125) +#define NT_STATUS_SPECIAL_USER (0xC0000000 | 0x0126) +#define NT_STATUS_MEMBERS_PRIMARY_GROUP (0xC0000000 | 0x0127) +#define NT_STATUS_FILE_CLOSED (0xC0000000 | 0x0128) +#define NT_STATUS_TOO_MANY_THREADS (0xC0000000 | 0x0129) +#define NT_STATUS_THREAD_NOT_IN_PROCESS (0xC0000000 | 0x012a) +#define NT_STATUS_TOKEN_ALREADY_IN_USE (0xC0000000 | 0x012b) +#define NT_STATUS_PAGEFILE_QUOTA_EXCEEDED (0xC0000000 | 0x012c) +#define NT_STATUS_COMMITMENT_LIMIT (0xC0000000 | 0x012d) +#define NT_STATUS_INVALID_IMAGE_LE_FORMAT (0xC0000000 | 0x012e) +#define NT_STATUS_INVALID_IMAGE_NOT_MZ (0xC0000000 | 0x012f) +#define NT_STATUS_INVALID_IMAGE_PROTECT (0xC0000000 | 0x0130) +#define NT_STATUS_INVALID_IMAGE_WIN_16 (0xC0000000 | 0x0131) +#define NT_STATUS_LOGON_SERVER_CONFLICT (0xC0000000 | 0x0132) +#define NT_STATUS_TIME_DIFFERENCE_AT_DC (0xC0000000 | 0x0133) +#define NT_STATUS_SYNCHRONIZATION_REQUIRED (0xC0000000 | 0x0134) +#define NT_STATUS_DLL_NOT_FOUND (0xC0000000 | 0x0135) +#define NT_STATUS_OPEN_FAILED (0xC0000000 | 0x0136) +#define NT_STATUS_IO_PRIVILEGE_FAILED (0xC0000000 | 0x0137) +#define NT_STATUS_ORDINAL_NOT_FOUND (0xC0000000 | 0x0138) +#define NT_STATUS_ENTRYPOINT_NOT_FOUND (0xC0000000 | 0x0139) +#define NT_STATUS_CONTROL_C_EXIT (0xC0000000 | 0x013a) +#define NT_STATUS_LOCAL_DISCONNECT (0xC0000000 | 0x013b) +#define NT_STATUS_REMOTE_DISCONNECT (0xC0000000 | 0x013c) +#define NT_STATUS_REMOTE_RESOURCES (0xC0000000 | 0x013d) +#define NT_STATUS_LINK_FAILED (0xC0000000 | 0x013e) +#define NT_STATUS_LINK_TIMEOUT (0xC0000000 | 0x013f) +#define NT_STATUS_INVALID_CONNECTION (0xC0000000 | 0x0140) +#define NT_STATUS_INVALID_ADDRESS (0xC0000000 | 0x0141) +#define NT_STATUS_DLL_INIT_FAILED (0xC0000000 | 0x0142) +#define NT_STATUS_MISSING_SYSTEMFILE (0xC0000000 | 0x0143) +#define NT_STATUS_UNHANDLED_EXCEPTION (0xC0000000 | 0x0144) +#define NT_STATUS_APP_INIT_FAILURE (0xC0000000 | 0x0145) +#define NT_STATUS_PAGEFILE_CREATE_FAILED (0xC0000000 | 0x0146) +#define NT_STATUS_NO_PAGEFILE (0xC0000000 | 0x0147) +#define NT_STATUS_INVALID_LEVEL (0xC0000000 | 0x0148) +#define NT_STATUS_WRONG_PASSWORD_CORE (0xC0000000 | 0x0149) +#define NT_STATUS_ILLEGAL_FLOAT_CONTEXT (0xC0000000 | 0x014a) +#define NT_STATUS_PIPE_BROKEN (0xC0000000 | 0x014b) +#define NT_STATUS_REGISTRY_CORRUPT (0xC0000000 | 0x014c) +#define NT_STATUS_REGISTRY_IO_FAILED (0xC0000000 | 0x014d) +#define NT_STATUS_NO_EVENT_PAIR (0xC0000000 | 0x014e) +#define NT_STATUS_UNRECOGNIZED_VOLUME (0xC0000000 | 0x014f) +#define NT_STATUS_SERIAL_NO_DEVICE_INITED (0xC0000000 | 0x0150) +#define NT_STATUS_NO_SUCH_ALIAS (0xC0000000 | 0x0151) +#define NT_STATUS_MEMBER_NOT_IN_ALIAS (0xC0000000 | 0x0152) +#define NT_STATUS_MEMBER_IN_ALIAS (0xC0000000 | 0x0153) +#define NT_STATUS_ALIAS_EXISTS (0xC0000000 | 0x0154) +#define NT_STATUS_LOGON_NOT_GRANTED (0xC0000000 | 0x0155) +#define NT_STATUS_TOO_MANY_SECRETS (0xC0000000 | 0x0156) +#define NT_STATUS_SECRET_TOO_LONG (0xC0000000 | 0x0157) +#define NT_STATUS_INTERNAL_DB_ERROR (0xC0000000 | 0x0158) +#define NT_STATUS_FULLSCREEN_MODE (0xC0000000 | 0x0159) +#define NT_STATUS_TOO_MANY_CONTEXT_IDS (0xC0000000 | 0x015a) +#define NT_STATUS_LOGON_TYPE_NOT_GRANTED (0xC0000000 | 0x015b) +#define NT_STATUS_NOT_REGISTRY_FILE (0xC0000000 | 0x015c) +#define NT_STATUS_NT_CROSS_ENCRYPTION_REQUIRED (0xC0000000 | 0x015d) +#define NT_STATUS_DOMAIN_CTRLR_CONFIG_ERROR (0xC0000000 | 0x015e) +#define NT_STATUS_FT_MISSING_MEMBER (0xC0000000 | 0x015f) +#define NT_STATUS_ILL_FORMED_SERVICE_ENTRY (0xC0000000 | 0x0160) +#define NT_STATUS_ILLEGAL_CHARACTER (0xC0000000 | 0x0161) +#define NT_STATUS_UNMAPPABLE_CHARACTER (0xC0000000 | 0x0162) +#define NT_STATUS_UNDEFINED_CHARACTER (0xC0000000 | 0x0163) +#define NT_STATUS_FLOPPY_VOLUME (0xC0000000 | 0x0164) +#define NT_STATUS_FLOPPY_ID_MARK_NOT_FOUND (0xC0000000 | 0x0165) +#define NT_STATUS_FLOPPY_WRONG_CYLINDER (0xC0000000 | 0x0166) +#define NT_STATUS_FLOPPY_UNKNOWN_ERROR (0xC0000000 | 0x0167) +#define NT_STATUS_FLOPPY_BAD_REGISTERS (0xC0000000 | 0x0168) +#define NT_STATUS_DISK_RECALIBRATE_FAILED (0xC0000000 | 0x0169) +#define NT_STATUS_DISK_OPERATION_FAILED (0xC0000000 | 0x016a) +#define NT_STATUS_DISK_RESET_FAILED (0xC0000000 | 0x016b) +#define NT_STATUS_SHARED_IRQ_BUSY (0xC0000000 | 0x016c) +#define NT_STATUS_FT_ORPHANING (0xC0000000 | 0x016d) +#define NT_STATUS_PARTITION_FAILURE (0xC0000000 | 0x0172) +#define NT_STATUS_INVALID_BLOCK_LENGTH (0xC0000000 | 0x0173) +#define NT_STATUS_DEVICE_NOT_PARTITIONED (0xC0000000 | 0x0174) +#define NT_STATUS_UNABLE_TO_LOCK_MEDIA (0xC0000000 | 0x0175) +#define NT_STATUS_UNABLE_TO_UNLOAD_MEDIA (0xC0000000 | 0x0176) +#define NT_STATUS_EOM_OVERFLOW (0xC0000000 | 0x0177) +#define NT_STATUS_NO_MEDIA (0xC0000000 | 0x0178) +#define NT_STATUS_NO_SUCH_MEMBER (0xC0000000 | 0x017a) +#define NT_STATUS_INVALID_MEMBER (0xC0000000 | 0x017b) +#define NT_STATUS_KEY_DELETED (0xC0000000 | 0x017c) +#define NT_STATUS_NO_LOG_SPACE (0xC0000000 | 0x017d) +#define NT_STATUS_TOO_MANY_SIDS (0xC0000000 | 0x017e) +#define NT_STATUS_LM_CROSS_ENCRYPTION_REQUIRED (0xC0000000 | 0x017f) +#define NT_STATUS_KEY_HAS_CHILDREN (0xC0000000 | 0x0180) +#define NT_STATUS_CHILD_MUST_BE_VOLATILE (0xC0000000 | 0x0181) +#define NT_STATUS_DEVICE_CONFIGURATION_ERROR (0xC0000000 | 0x0182) +#define NT_STATUS_DRIVER_INTERNAL_ERROR (0xC0000000 | 0x0183) +#define NT_STATUS_INVALID_DEVICE_STATE (0xC0000000 | 0x0184) +#define NT_STATUS_IO_DEVICE_ERROR (0xC0000000 | 0x0185) +#define NT_STATUS_DEVICE_PROTOCOL_ERROR (0xC0000000 | 0x0186) +#define NT_STATUS_BACKUP_CONTROLLER (0xC0000000 | 0x0187) +#define NT_STATUS_LOG_FILE_FULL (0xC0000000 | 0x0188) +#define NT_STATUS_TOO_LATE (0xC0000000 | 0x0189) +#define NT_STATUS_NO_TRUST_LSA_SECRET (0xC0000000 | 0x018a) +#define NT_STATUS_NO_TRUST_SAM_ACCOUNT (0xC0000000 | 0x018b) +#define NT_STATUS_TRUSTED_DOMAIN_FAILURE (0xC0000000 | 0x018c) +#define NT_STATUS_TRUSTED_RELATIONSHIP_FAILURE (0xC0000000 | 0x018d) +#define NT_STATUS_EVENTLOG_FILE_CORRUPT (0xC0000000 | 0x018e) +#define NT_STATUS_EVENTLOG_CANT_START (0xC0000000 | 0x018f) +#define NT_STATUS_TRUST_FAILURE (0xC0000000 | 0x0190) +#define NT_STATUS_MUTANT_LIMIT_EXCEEDED (0xC0000000 | 0x0191) +#define NT_STATUS_NETLOGON_NOT_STARTED (0xC0000000 | 0x0192) +#define NT_STATUS_ACCOUNT_EXPIRED (0xC0000000 | 0x0193) +#define NT_STATUS_POSSIBLE_DEADLOCK (0xC0000000 | 0x0194) +#define NT_STATUS_NETWORK_CREDENTIAL_CONFLICT (0xC0000000 | 0x0195) +#define NT_STATUS_REMOTE_SESSION_LIMIT (0xC0000000 | 0x0196) +#define NT_STATUS_EVENTLOG_FILE_CHANGED (0xC0000000 | 0x0197) +#define NT_STATUS_NOLOGON_INTERDOMAIN_TRUST_ACCOUNT (0xC0000000 | 0x0198) +#define NT_STATUS_NOLOGON_WORKSTATION_TRUST_ACCOUNT (0xC0000000 | 0x0199) +#define NT_STATUS_NOLOGON_SERVER_TRUST_ACCOUNT (0xC0000000 | 0x019a) +#define NT_STATUS_DOMAIN_TRUST_INCONSISTENT (0xC0000000 | 0x019b) +#define NT_STATUS_FS_DRIVER_REQUIRED (0xC0000000 | 0x019c) +#define NT_STATUS_INVALID_LOCK_RANGE (0xC0000000 | 0x01a1) +#define NT_STATUS_NO_USER_SESSION_KEY (0xC0000000 | 0x0202) +#define NT_STATUS_USER_SESSION_DELETED (0xC0000000 | 0x0203) +#define NT_STATUS_RESOURCE_LANG_NOT_FOUND (0xC0000000 | 0x0204) +#define NT_STATUS_INSUFF_SERVER_RESOURCES (0xC0000000 | 0x0205) +#define NT_STATUS_INVALID_BUFFER_SIZE (0xC0000000 | 0x0206) +#define NT_STATUS_INVALID_ADDRESS_COMPONENT (0xC0000000 | 0x0207) +#define NT_STATUS_INVALID_ADDRESS_WILDCARD (0xC0000000 | 0x0208) +#define NT_STATUS_TOO_MANY_ADDRESSES (0xC0000000 | 0x0209) +#define NT_STATUS_ADDRESS_ALREADY_EXISTS (0xC0000000 | 0x020a) +#define NT_STATUS_ADDRESS_CLOSED (0xC0000000 | 0x020b) +#define NT_STATUS_CONNECTION_DISCONNECTED (0xC0000000 | 0x020c) +#define NT_STATUS_CONNECTION_RESET (0xC0000000 | 0x020d) +#define NT_STATUS_TOO_MANY_NODES (0xC0000000 | 0x020e) +#define NT_STATUS_TRANSACTION_ABORTED (0xC0000000 | 0x020f) +#define NT_STATUS_TRANSACTION_TIMED_OUT (0xC0000000 | 0x0210) +#define NT_STATUS_TRANSACTION_NO_RELEASE (0xC0000000 | 0x0211) +#define NT_STATUS_TRANSACTION_NO_MATCH (0xC0000000 | 0x0212) +#define NT_STATUS_TRANSACTION_RESPONDED (0xC0000000 | 0x0213) +#define NT_STATUS_TRANSACTION_INVALID_ID (0xC0000000 | 0x0214) +#define NT_STATUS_TRANSACTION_INVALID_TYPE (0xC0000000 | 0x0215) +#define NT_STATUS_NOT_SERVER_SESSION (0xC0000000 | 0x0216) +#define NT_STATUS_NOT_CLIENT_SESSION (0xC0000000 | 0x0217) +#define NT_STATUS_CANNOT_LOAD_REGISTRY_FILE (0xC0000000 | 0x0218) +#define NT_STATUS_DEBUG_ATTACH_FAILED (0xC0000000 | 0x0219) +#define NT_STATUS_SYSTEM_PROCESS_TERMINATED (0xC0000000 | 0x021a) +#define NT_STATUS_DATA_NOT_ACCEPTED (0xC0000000 | 0x021b) +#define NT_STATUS_NO_BROWSER_SERVERS_FOUND (0xC0000000 | 0x021c) +#define NT_STATUS_VDM_HARD_ERROR (0xC0000000 | 0x021d) +#define NT_STATUS_DRIVER_CANCEL_TIMEOUT (0xC0000000 | 0x021e) +#define NT_STATUS_REPLY_MESSAGE_MISMATCH (0xC0000000 | 0x021f) +#define NT_STATUS_MAPPED_ALIGNMENT (0xC0000000 | 0x0220) +#define NT_STATUS_IMAGE_CHECKSUM_MISMATCH (0xC0000000 | 0x0221) +#define NT_STATUS_LOST_WRITEBEHIND_DATA (0xC0000000 | 0x0222) +#define NT_STATUS_CLIENT_SERVER_PARAMETERS_INVALID (0xC0000000 | 0x0223) +#define NT_STATUS_PASSWORD_MUST_CHANGE (0xC0000000 | 0x0224) +#define NT_STATUS_NOT_FOUND (0xC0000000 | 0x0225) +#define NT_STATUS_NOT_TINY_STREAM (0xC0000000 | 0x0226) +#define NT_STATUS_RECOVERY_FAILURE (0xC0000000 | 0x0227) +#define NT_STATUS_STACK_OVERFLOW_READ (0xC0000000 | 0x0228) +#define NT_STATUS_FAIL_CHECK (0xC0000000 | 0x0229) +#define NT_STATUS_DUPLICATE_OBJECTID (0xC0000000 | 0x022a) +#define NT_STATUS_OBJECTID_EXISTS (0xC0000000 | 0x022b) +#define NT_STATUS_CONVERT_TO_LARGE (0xC0000000 | 0x022c) +#define NT_STATUS_RETRY (0xC0000000 | 0x022d) +#define NT_STATUS_FOUND_OUT_OF_SCOPE (0xC0000000 | 0x022e) +#define NT_STATUS_ALLOCATE_BUCKET (0xC0000000 | 0x022f) +#define NT_STATUS_PROPSET_NOT_FOUND (0xC0000000 | 0x0230) +#define NT_STATUS_MARSHALL_OVERFLOW (0xC0000000 | 0x0231) +#define NT_STATUS_INVALID_VARIANT (0xC0000000 | 0x0232) +#define NT_STATUS_DOMAIN_CONTROLLER_NOT_FOUND (0xC0000000 | 0x0233) +#define NT_STATUS_ACCOUNT_LOCKED_OUT (0xC0000000 | 0x0234) +#define NT_STATUS_HANDLE_NOT_CLOSABLE (0xC0000000 | 0x0235) +#define NT_STATUS_CONNECTION_REFUSED (0xC0000000 | 0x0236) +#define NT_STATUS_GRACEFUL_DISCONNECT (0xC0000000 | 0x0237) +#define NT_STATUS_ADDRESS_ALREADY_ASSOCIATED (0xC0000000 | 0x0238) +#define NT_STATUS_ADDRESS_NOT_ASSOCIATED (0xC0000000 | 0x0239) +#define NT_STATUS_CONNECTION_INVALID (0xC0000000 | 0x023a) +#define NT_STATUS_CONNECTION_ACTIVE (0xC0000000 | 0x023b) +#define NT_STATUS_NETWORK_UNREACHABLE (0xC0000000 | 0x023c) +#define NT_STATUS_HOST_UNREACHABLE (0xC0000000 | 0x023d) +#define NT_STATUS_PROTOCOL_UNREACHABLE (0xC0000000 | 0x023e) +#define NT_STATUS_PORT_UNREACHABLE (0xC0000000 | 0x023f) +#define NT_STATUS_REQUEST_ABORTED (0xC0000000 | 0x0240) +#define NT_STATUS_CONNECTION_ABORTED (0xC0000000 | 0x0241) +#define NT_STATUS_BAD_COMPRESSION_BUFFER (0xC0000000 | 0x0242) +#define NT_STATUS_USER_MAPPED_FILE (0xC0000000 | 0x0243) +#define NT_STATUS_AUDIT_FAILED (0xC0000000 | 0x0244) +#define NT_STATUS_TIMER_RESOLUTION_NOT_SET (0xC0000000 | 0x0245) +#define NT_STATUS_CONNECTION_COUNT_LIMIT (0xC0000000 | 0x0246) +#define NT_STATUS_LOGIN_TIME_RESTRICTION (0xC0000000 | 0x0247) +#define NT_STATUS_LOGIN_WKSTA_RESTRICTION (0xC0000000 | 0x0248) +#define NT_STATUS_IMAGE_MP_UP_MISMATCH (0xC0000000 | 0x0249) +#define NT_STATUS_INSUFFICIENT_LOGON_INFO (0xC0000000 | 0x0250) +#define NT_STATUS_BAD_DLL_ENTRYPOINT (0xC0000000 | 0x0251) +#define NT_STATUS_BAD_SERVICE_ENTRYPOINT (0xC0000000 | 0x0252) +#define NT_STATUS_LPC_REPLY_LOST (0xC0000000 | 0x0253) +#define NT_STATUS_IP_ADDRESS_CONFLICT1 (0xC0000000 | 0x0254) +#define NT_STATUS_IP_ADDRESS_CONFLICT2 (0xC0000000 | 0x0255) +#define NT_STATUS_REGISTRY_QUOTA_LIMIT (0xC0000000 | 0x0256) +#define NT_STATUS_PATH_NOT_COVERED (0xC0000000 | 0x0257) +#define NT_STATUS_NO_CALLBACK_ACTIVE (0xC0000000 | 0x0258) +#define NT_STATUS_LICENSE_QUOTA_EXCEEDED (0xC0000000 | 0x0259) +#define NT_STATUS_PWD_TOO_SHORT (0xC0000000 | 0x025a) +#define NT_STATUS_PWD_TOO_RECENT (0xC0000000 | 0x025b) +#define NT_STATUS_PWD_HISTORY_CONFLICT (0xC0000000 | 0x025c) +#define NT_STATUS_PLUGPLAY_NO_DEVICE (0xC0000000 | 0x025e) +#define NT_STATUS_UNSUPPORTED_COMPRESSION (0xC0000000 | 0x025f) +#define NT_STATUS_INVALID_HW_PROFILE (0xC0000000 | 0x0260) +#define NT_STATUS_INVALID_PLUGPLAY_DEVICE_PATH (0xC0000000 | 0x0261) +#define NT_STATUS_DRIVER_ORDINAL_NOT_FOUND (0xC0000000 | 0x0262) +#define NT_STATUS_DRIVER_ENTRYPOINT_NOT_FOUND (0xC0000000 | 0x0263) +#define NT_STATUS_RESOURCE_NOT_OWNED (0xC0000000 | 0x0264) +#define NT_STATUS_TOO_MANY_LINKS (0xC0000000 | 0x0265) +#define NT_STATUS_QUOTA_LIST_INCONSISTENT (0xC0000000 | 0x0266) +#define NT_STATUS_FILE_IS_OFFLINE (0xC0000000 | 0x0267) +#define NT_STATUS_NOT_A_REPARSE_POINT (0xC0000000 | 0x0275) +#define NT_STATUS_NETWORK_SESSION_EXPIRED (0xC0000000 | 0x035c) +#define NT_STATUS_NO_SUCH_JOB (0xC0000000 | 0xEDE) /* scheduler */ +#define NT_STATUS_NO_PREAUTH_INTEGRITY_HASH_OVERLAP (0xC0000000 | 0x5D0000) #endif /* _NTERR_H */ diff --git a/fs/smb/common/fscc.h b/fs/smb/common/fscc.h index 35dbacdbb902..0123f34db1e8 100644 --- a/fs/smb/common/fscc.h +++ b/fs/smb/common/fscc.h @@ -145,6 +145,62 @@ typedef struct { } __packed FILE_SYSTEM_DEVICE_INFO; /* device info level 0x104 */ /* + * File Attributes + * See MS-FSCC 2.6 + */ +#define FILE_ATTRIBUTE_READONLY 0x00000001 +#define FILE_ATTRIBUTE_HIDDEN 0x00000002 +#define FILE_ATTRIBUTE_SYSTEM 0x00000004 +#define FILE_ATTRIBUTE_DIRECTORY 0x00000010 +#define FILE_ATTRIBUTE_ARCHIVE 0x00000020 +#define FILE_ATTRIBUTE_NORMAL 0x00000080 +#define FILE_ATTRIBUTE_TEMPORARY 0x00000100 +#define FILE_ATTRIBUTE_SPARSE_FILE 0x00000200 +#define FILE_ATTRIBUTE_REPARSE_POINT 0x00000400 +#define FILE_ATTRIBUTE_COMPRESSED 0x00000800 +#define FILE_ATTRIBUTE_OFFLINE 0x00001000 +#define FILE_ATTRIBUTE_NOT_CONTENT_INDEXED 0x00002000 +#define FILE_ATTRIBUTE_ENCRYPTED 0x00004000 +#define FILE_ATTRIBUTE_INTEGRITY_STREAM 0x00008000 +#define FILE_ATTRIBUTE_NO_SCRUB_DATA 0x00020000 +#define FILE_ATTRIBUTE_MASK (FILE_ATTRIBUTE_READONLY | FILE_ATTRIBUTE_HIDDEN | \ + FILE_ATTRIBUTE_SYSTEM | FILE_ATTRIBUTE_DIRECTORY | \ + FILE_ATTRIBUTE_ARCHIVE | FILE_ATTRIBUTE_NORMAL | \ + FILE_ATTRIBUTE_TEMPORARY | FILE_ATTRIBUTE_SPARSE_FILE | \ + FILE_ATTRIBUTE_REPARSE_POINT | FILE_ATTRIBUTE_COMPRESSED | \ + FILE_ATTRIBUTE_OFFLINE | FILE_ATTRIBUTE_NOT_CONTENT_INDEXED | \ + FILE_ATTRIBUTE_ENCRYPTED | FILE_ATTRIBUTE_INTEGRITY_STREAM | \ + FILE_ATTRIBUTE_NO_SCRUB_DATA) + +#define FILE_ATTRIBUTE_READONLY_LE cpu_to_le32(FILE_ATTRIBUTE_READONLY) +#define FILE_ATTRIBUTE_HIDDEN_LE cpu_to_le32(FILE_ATTRIBUTE_HIDDEN) +#define FILE_ATTRIBUTE_SYSTEM_LE cpu_to_le32(FILE_ATTRIBUTE_SYSTEM) +#define FILE_ATTRIBUTE_DIRECTORY_LE cpu_to_le32(FILE_ATTRIBUTE_DIRECTORY) +#define FILE_ATTRIBUTE_ARCHIVE_LE cpu_to_le32(FILE_ATTRIBUTE_ARCHIVE) +#define FILE_ATTRIBUTE_NORMAL_LE cpu_to_le32(FILE_ATTRIBUTE_NORMAL) +#define FILE_ATTRIBUTE_TEMPORARY_LE cpu_to_le32(FILE_ATTRIBUTE_TEMPORARY) +#define FILE_ATTRIBUTE_SPARSE_FILE_LE cpu_to_le32(FILE_ATTRIBUTE_SPARSE_FILE) +#define FILE_ATTRIBUTE_REPARSE_POINT_LE cpu_to_le32(FILE_ATTRIBUTE_REPARSE_POINT) +#define FILE_ATTRIBUTE_COMPRESSED_LE cpu_to_le32(FILE_ATTRIBUTE_COMPRESSED) +#define FILE_ATTRIBUTE_OFFLINE_LE cpu_to_le32(FILE_ATTRIBUTE_OFFLINE) +#define FILE_ATTRIBUTE_NOT_CONTENT_INDEXED_LE cpu_to_le32(FILE_ATTRIBUTE_NOT_CONTENT_INDEXED) +#define FILE_ATTRIBUTE_ENCRYPTED_LE cpu_to_le32(FILE_ATTRIBUTE_ENCRYPTED) +#define FILE_ATTRIBUTE_INTEGRITY_STREAM_LE cpu_to_le32(FILE_ATTRIBUTE_INTEGRITY_STREAM) +#define FILE_ATTRIBUTE_NO_SCRUB_DATA_LE cpu_to_le32(FILE_ATTRIBUTE_NO_SCRUB_DATA) +#define FILE_ATTRIBUTE_MASK_LE cpu_to_le32(FILE_ATTRIBUTE_MASK) + +/* + * Response contains array of the following structures + * See MS-FSCC 2.7.1 + */ +struct file_notify_information { + __le32 NextEntryOffset; + __le32 Action; + __le32 FileNameLength; + __u8 FileName[]; +} __packed; + +/* * See POSIX Extensions to MS-FSCC 2.3.2.1 * Link: https://gitlab.com/samba-team/smb3-posix-spec/-/blob/master/fscc_posix_extensions.md */ diff --git a/fs/smb/common/smb2pdu.h b/fs/smb/common/smb2pdu.h index 945a8e0cf36c..3c8d8a4e7439 100644 --- a/fs/smb/common/smb2pdu.h +++ b/fs/smb/common/smb2pdu.h @@ -991,6 +991,7 @@ struct smb2_set_info_rsp { /* notify completion filter flags. See MS-FSCC 2.6 and MS-SMB2 2.2.35 */ #define FILE_NOTIFY_CHANGE_FILE_NAME 0x00000001 #define FILE_NOTIFY_CHANGE_DIR_NAME 0x00000002 +#define FILE_NOTIFY_CHANGE_NAME 0x00000003 #define FILE_NOTIFY_CHANGE_ATTRIBUTES 0x00000004 #define FILE_NOTIFY_CHANGE_SIZE 0x00000008 #define FILE_NOTIFY_CHANGE_LAST_WRITE 0x00000010 @@ -1002,7 +1003,10 @@ struct smb2_set_info_rsp { #define FILE_NOTIFY_CHANGE_STREAM_SIZE 0x00000400 #define FILE_NOTIFY_CHANGE_STREAM_WRITE 0x00000800 -/* SMB2 Notify Action Flags */ +/* + * SMB2 Notify Action Flags + * See MS-FSCC 2.7.1 + */ #define FILE_ACTION_ADDED 0x00000001 #define FILE_ACTION_REMOVED 0x00000002 #define FILE_ACTION_MODIFIED 0x00000003 @@ -1012,7 +1016,10 @@ struct smb2_set_info_rsp { #define FILE_ACTION_REMOVED_STREAM 0x00000007 #define FILE_ACTION_MODIFIED_STREAM 0x00000008 #define FILE_ACTION_REMOVED_BY_DELETE 0x00000009 +#define FILE_ACTION_ID_NOT_TUNNELLED 0x0000000A +#define FILE_ACTION_TUNNELLED_ID_COLLISION 0x0000000B +/* See MS-SMB2 2.2.35 */ struct smb2_change_notify_req { struct smb2_hdr hdr; __le16 StructureSize; @@ -1024,6 +1031,7 @@ struct smb2_change_notify_req { __u32 Reserved; } __packed; +/* See MS-SMB2 2.2.36 */ struct smb2_change_notify_rsp { struct smb2_hdr hdr; __le16 StructureSize; /* Must be 9 */ @@ -1064,41 +1072,6 @@ struct smb2_server_client_notification { #define IL_IMPERSONATION cpu_to_le32(0x00000002) #define IL_DELEGATE cpu_to_le32(0x00000003) -/* File Attributes */ -#define FILE_ATTRIBUTE_READONLY 0x00000001 -#define FILE_ATTRIBUTE_HIDDEN 0x00000002 -#define FILE_ATTRIBUTE_SYSTEM 0x00000004 -#define FILE_ATTRIBUTE_DIRECTORY 0x00000010 -#define FILE_ATTRIBUTE_ARCHIVE 0x00000020 -#define FILE_ATTRIBUTE_NORMAL 0x00000080 -#define FILE_ATTRIBUTE_TEMPORARY 0x00000100 -#define FILE_ATTRIBUTE_SPARSE_FILE 0x00000200 -#define FILE_ATTRIBUTE_REPARSE_POINT 0x00000400 -#define FILE_ATTRIBUTE_COMPRESSED 0x00000800 -#define FILE_ATTRIBUTE_OFFLINE 0x00001000 -#define FILE_ATTRIBUTE_NOT_CONTENT_INDEXED 0x00002000 -#define FILE_ATTRIBUTE_ENCRYPTED 0x00004000 -#define FILE_ATTRIBUTE_INTEGRITY_STREAM 0x00008000 -#define FILE_ATTRIBUTE_NO_SCRUB_DATA 0x00020000 -#define FILE_ATTRIBUTE__MASK 0x00007FB7 - -#define FILE_ATTRIBUTE_READONLY_LE cpu_to_le32(0x00000001) -#define FILE_ATTRIBUTE_HIDDEN_LE cpu_to_le32(0x00000002) -#define FILE_ATTRIBUTE_SYSTEM_LE cpu_to_le32(0x00000004) -#define FILE_ATTRIBUTE_DIRECTORY_LE cpu_to_le32(0x00000010) -#define FILE_ATTRIBUTE_ARCHIVE_LE cpu_to_le32(0x00000020) -#define FILE_ATTRIBUTE_NORMAL_LE cpu_to_le32(0x00000080) -#define FILE_ATTRIBUTE_TEMPORARY_LE cpu_to_le32(0x00000100) -#define FILE_ATTRIBUTE_SPARSE_FILE_LE cpu_to_le32(0x00000200) -#define FILE_ATTRIBUTE_REPARSE_POINT_LE cpu_to_le32(0x00000400) -#define FILE_ATTRIBUTE_COMPRESSED_LE cpu_to_le32(0x00000800) -#define FILE_ATTRIBUTE_OFFLINE_LE cpu_to_le32(0x00001000) -#define FILE_ATTRIBUTE_NOT_CONTENT_INDEXED_LE cpu_to_le32(0x00002000) -#define FILE_ATTRIBUTE_ENCRYPTED_LE cpu_to_le32(0x00004000) -#define FILE_ATTRIBUTE_INTEGRITY_STREAM_LE cpu_to_le32(0x00008000) -#define FILE_ATTRIBUTE_NO_SCRUB_DATA_LE cpu_to_le32(0x00020000) -#define FILE_ATTRIBUTE_MASK_LE cpu_to_le32(0x00007FB7) - /* Desired Access Flags */ #define FILE_READ_DATA_LE cpu_to_le32(0x00000001) #define FILE_LIST_DIRECTORY_LE cpu_to_le32(0x00000001) @@ -1537,9 +1510,10 @@ struct duplicate_extents_to_file { __le64 ByteCount; /* Bytes to be copied */ } __packed; -/* See MS-FSCC 2.3.8 */ +/* See MS-FSCC 2.3.9 */ #define DUPLICATE_EXTENTS_DATA_EX_SOURCE_ATOMIC 0x00000001 struct duplicate_extents_to_file_ex { + __le64 StructureSize; /* MUST be set to 0x30 */ __u64 PersistentFileHandle; /* source file handle, opaque endianness */ __u64 VolatileFileHandle; __le64 SourceFileOffset; diff --git a/fs/smb/server/nterr.h b/fs/smb/server/nterr.h deleted file mode 100644 index 2f358f88a018..000000000000 --- a/fs/smb/server/nterr.h +++ /dev/null @@ -1,543 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* - * Unix SMB/Netbios implementation. - * Version 1.9. - * NT error code constants - * Copyright (C) Andrew Tridgell 1992-2000 - * Copyright (C) John H Terpstra 1996-2000 - * Copyright (C) Luke Kenneth Casson Leighton 1996-2000 - * Copyright (C) Paul Ashton 1998-2000 - */ - -#ifndef _NTERR_H -#define _NTERR_H - -/* Win32 Status codes. */ -#define NT_STATUS_MORE_ENTRIES 0x0105 -#define NT_ERROR_INVALID_PARAMETER 0x0057 -#define NT_ERROR_INSUFFICIENT_BUFFER 0x007a -#define NT_STATUS_1804 0x070c -#define NT_STATUS_NOTIFY_ENUM_DIR 0x010c -#define NT_STATUS_INVALID_LOCK_RANGE (0xC0000000 | 0x01a1) -/* - * Win32 Error codes extracted using a loop in smbclient then printing a netmon - * sniff to a file. - */ - -#define NT_STATUS_OK 0x0000 -#define NT_STATUS_SOME_UNMAPPED 0x0107 -#define NT_STATUS_BUFFER_OVERFLOW 0x80000005 -#define NT_STATUS_NO_MORE_ENTRIES 0x8000001a -#define NT_STATUS_MEDIA_CHANGED 0x8000001c -#define NT_STATUS_END_OF_MEDIA 0x8000001e -#define NT_STATUS_MEDIA_CHECK 0x80000020 -#define NT_STATUS_NO_DATA_DETECTED 0x8000001c -#define NT_STATUS_STOPPED_ON_SYMLINK 0x8000002d -#define NT_STATUS_DEVICE_REQUIRES_CLEANING 0x80000288 -#define NT_STATUS_DEVICE_DOOR_OPEN 0x80000288 -#define NT_STATUS_UNSUCCESSFUL (0xC0000000 | 0x0001) -#define NT_STATUS_NOT_IMPLEMENTED (0xC0000000 | 0x0002) -#define NT_STATUS_INVALID_INFO_CLASS (0xC0000000 | 0x0003) -#define NT_STATUS_INFO_LENGTH_MISMATCH (0xC0000000 | 0x0004) -#define NT_STATUS_ACCESS_VIOLATION (0xC0000000 | 0x0005) -#define NT_STATUS_IN_PAGE_ERROR (0xC0000000 | 0x0006) -#define NT_STATUS_PAGEFILE_QUOTA (0xC0000000 | 0x0007) -#define NT_STATUS_INVALID_HANDLE (0xC0000000 | 0x0008) -#define NT_STATUS_BAD_INITIAL_STACK (0xC0000000 | 0x0009) -#define NT_STATUS_BAD_INITIAL_PC (0xC0000000 | 0x000a) -#define NT_STATUS_INVALID_CID (0xC0000000 | 0x000b) -#define NT_STATUS_TIMER_NOT_CANCELED (0xC0000000 | 0x000c) -#define NT_STATUS_INVALID_PARAMETER (0xC0000000 | 0x000d) -#define NT_STATUS_NO_SUCH_DEVICE (0xC0000000 | 0x000e) -#define NT_STATUS_NO_SUCH_FILE (0xC0000000 | 0x000f) -#define NT_STATUS_INVALID_DEVICE_REQUEST (0xC0000000 | 0x0010) -#define NT_STATUS_END_OF_FILE (0xC0000000 | 0x0011) -#define NT_STATUS_WRONG_VOLUME (0xC0000000 | 0x0012) -#define NT_STATUS_NO_MEDIA_IN_DEVICE (0xC0000000 | 0x0013) -#define NT_STATUS_UNRECOGNIZED_MEDIA (0xC0000000 | 0x0014) -#define NT_STATUS_NONEXISTENT_SECTOR (0xC0000000 | 0x0015) -#define NT_STATUS_MORE_PROCESSING_REQUIRED (0xC0000000 | 0x0016) -#define NT_STATUS_NO_MEMORY (0xC0000000 | 0x0017) -#define NT_STATUS_CONFLICTING_ADDRESSES (0xC0000000 | 0x0018) -#define NT_STATUS_NOT_MAPPED_VIEW (0xC0000000 | 0x0019) -#define NT_STATUS_UNABLE_TO_FREE_VM (0x80000000 | 0x001a) -#define NT_STATUS_UNABLE_TO_DELETE_SECTION (0xC0000000 | 0x001b) -#define NT_STATUS_INVALID_SYSTEM_SERVICE (0xC0000000 | 0x001c) -#define NT_STATUS_ILLEGAL_INSTRUCTION (0xC0000000 | 0x001d) -#define NT_STATUS_INVALID_LOCK_SEQUENCE (0xC0000000 | 0x001e) -#define NT_STATUS_INVALID_VIEW_SIZE (0xC0000000 | 0x001f) -#define NT_STATUS_INVALID_FILE_FOR_SECTION (0xC0000000 | 0x0020) -#define NT_STATUS_ALREADY_COMMITTED (0xC0000000 | 0x0021) -#define NT_STATUS_ACCESS_DENIED (0xC0000000 | 0x0022) -#define NT_STATUS_BUFFER_TOO_SMALL (0xC0000000 | 0x0023) -#define NT_STATUS_OBJECT_TYPE_MISMATCH (0xC0000000 | 0x0024) -#define NT_STATUS_NONCONTINUABLE_EXCEPTION (0xC0000000 | 0x0025) -#define NT_STATUS_INVALID_DISPOSITION (0xC0000000 | 0x0026) -#define NT_STATUS_UNWIND (0xC0000000 | 0x0027) -#define NT_STATUS_BAD_STACK (0xC0000000 | 0x0028) -#define NT_STATUS_INVALID_UNWIND_TARGET (0xC0000000 | 0x0029) -#define NT_STATUS_NOT_LOCKED (0xC0000000 | 0x002a) -#define NT_STATUS_PARITY_ERROR (0xC0000000 | 0x002b) -#define NT_STATUS_UNABLE_TO_DECOMMIT_VM (0xC0000000 | 0x002c) -#define NT_STATUS_NOT_COMMITTED (0xC0000000 | 0x002d) -#define NT_STATUS_INVALID_PORT_ATTRIBUTES (0xC0000000 | 0x002e) -#define NT_STATUS_PORT_MESSAGE_TOO_LONG (0xC0000000 | 0x002f) -#define NT_STATUS_INVALID_PARAMETER_MIX (0xC0000000 | 0x0030) -#define NT_STATUS_INVALID_QUOTA_LOWER (0xC0000000 | 0x0031) -#define NT_STATUS_DISK_CORRUPT_ERROR (0xC0000000 | 0x0032) -#define NT_STATUS_OBJECT_NAME_INVALID (0xC0000000 | 0x0033) -#define NT_STATUS_OBJECT_NAME_NOT_FOUND (0xC0000000 | 0x0034) -#define NT_STATUS_OBJECT_NAME_COLLISION (0xC0000000 | 0x0035) -#define NT_STATUS_HANDLE_NOT_WAITABLE (0xC0000000 | 0x0036) -#define NT_STATUS_PORT_DISCONNECTED (0xC0000000 | 0x0037) -#define NT_STATUS_DEVICE_ALREADY_ATTACHED (0xC0000000 | 0x0038) -#define NT_STATUS_OBJECT_PATH_INVALID (0xC0000000 | 0x0039) -#define NT_STATUS_OBJECT_PATH_NOT_FOUND (0xC0000000 | 0x003a) -#define NT_STATUS_OBJECT_PATH_SYNTAX_BAD (0xC0000000 | 0x003b) -#define NT_STATUS_DATA_OVERRUN (0xC0000000 | 0x003c) -#define NT_STATUS_DATA_LATE_ERROR (0xC0000000 | 0x003d) -#define NT_STATUS_DATA_ERROR (0xC0000000 | 0x003e) -#define NT_STATUS_CRC_ERROR (0xC0000000 | 0x003f) -#define NT_STATUS_SECTION_TOO_BIG (0xC0000000 | 0x0040) -#define NT_STATUS_PORT_CONNECTION_REFUSED (0xC0000000 | 0x0041) -#define NT_STATUS_INVALID_PORT_HANDLE (0xC0000000 | 0x0042) -#define NT_STATUS_SHARING_VIOLATION (0xC0000000 | 0x0043) -#define NT_STATUS_QUOTA_EXCEEDED (0xC0000000 | 0x0044) -#define NT_STATUS_INVALID_PAGE_PROTECTION (0xC0000000 | 0x0045) -#define NT_STATUS_MUTANT_NOT_OWNED (0xC0000000 | 0x0046) -#define NT_STATUS_SEMAPHORE_LIMIT_EXCEEDED (0xC0000000 | 0x0047) -#define NT_STATUS_PORT_ALREADY_SET (0xC0000000 | 0x0048) -#define NT_STATUS_SECTION_NOT_IMAGE (0xC0000000 | 0x0049) -#define NT_STATUS_SUSPEND_COUNT_EXCEEDED (0xC0000000 | 0x004a) -#define NT_STATUS_THREAD_IS_TERMINATING (0xC0000000 | 0x004b) -#define NT_STATUS_BAD_WORKING_SET_LIMIT (0xC0000000 | 0x004c) -#define NT_STATUS_INCOMPATIBLE_FILE_MAP (0xC0000000 | 0x004d) -#define NT_STATUS_SECTION_PROTECTION (0xC0000000 | 0x004e) -#define NT_STATUS_EAS_NOT_SUPPORTED (0xC0000000 | 0x004f) -#define NT_STATUS_EA_TOO_LARGE (0xC0000000 | 0x0050) -#define NT_STATUS_NONEXISTENT_EA_ENTRY (0xC0000000 | 0x0051) -#define NT_STATUS_NO_EAS_ON_FILE (0xC0000000 | 0x0052) -#define NT_STATUS_EA_CORRUPT_ERROR (0xC0000000 | 0x0053) -#define NT_STATUS_FILE_LOCK_CONFLICT (0xC0000000 | 0x0054) -#define NT_STATUS_LOCK_NOT_GRANTED (0xC0000000 | 0x0055) -#define NT_STATUS_DELETE_PENDING (0xC0000000 | 0x0056) -#define NT_STATUS_CTL_FILE_NOT_SUPPORTED (0xC0000000 | 0x0057) -#define NT_STATUS_UNKNOWN_REVISION (0xC0000000 | 0x0058) -#define NT_STATUS_REVISION_MISMATCH (0xC0000000 | 0x0059) -#define NT_STATUS_INVALID_OWNER (0xC0000000 | 0x005a) -#define NT_STATUS_INVALID_PRIMARY_GROUP (0xC0000000 | 0x005b) -#define NT_STATUS_NO_IMPERSONATION_TOKEN (0xC0000000 | 0x005c) -#define NT_STATUS_CANT_DISABLE_MANDATORY (0xC0000000 | 0x005d) -#define NT_STATUS_NO_LOGON_SERVERS (0xC0000000 | 0x005e) -#define NT_STATUS_NO_SUCH_LOGON_SESSION (0xC0000000 | 0x005f) -#define NT_STATUS_NO_SUCH_PRIVILEGE (0xC0000000 | 0x0060) -#define NT_STATUS_PRIVILEGE_NOT_HELD (0xC0000000 | 0x0061) -#define NT_STATUS_INVALID_ACCOUNT_NAME (0xC0000000 | 0x0062) -#define NT_STATUS_USER_EXISTS (0xC0000000 | 0x0063) -#define NT_STATUS_NO_SUCH_USER (0xC0000000 | 0x0064) -#define NT_STATUS_GROUP_EXISTS (0xC0000000 | 0x0065) -#define NT_STATUS_NO_SUCH_GROUP (0xC0000000 | 0x0066) -#define NT_STATUS_MEMBER_IN_GROUP (0xC0000000 | 0x0067) -#define NT_STATUS_MEMBER_NOT_IN_GROUP (0xC0000000 | 0x0068) -#define NT_STATUS_LAST_ADMIN (0xC0000000 | 0x0069) -#define NT_STATUS_WRONG_PASSWORD (0xC0000000 | 0x006a) -#define NT_STATUS_ILL_FORMED_PASSWORD (0xC0000000 | 0x006b) -#define NT_STATUS_PASSWORD_RESTRICTION (0xC0000000 | 0x006c) -#define NT_STATUS_LOGON_FAILURE (0xC0000000 | 0x006d) -#define NT_STATUS_ACCOUNT_RESTRICTION (0xC0000000 | 0x006e) -#define NT_STATUS_INVALID_LOGON_HOURS (0xC0000000 | 0x006f) -#define NT_STATUS_INVALID_WORKSTATION (0xC0000000 | 0x0070) -#define NT_STATUS_PASSWORD_EXPIRED (0xC0000000 | 0x0071) -#define NT_STATUS_ACCOUNT_DISABLED (0xC0000000 | 0x0072) -#define NT_STATUS_NONE_MAPPED (0xC0000000 | 0x0073) -#define NT_STATUS_TOO_MANY_LUIDS_REQUESTED (0xC0000000 | 0x0074) -#define NT_STATUS_LUIDS_EXHAUSTED (0xC0000000 | 0x0075) -#define NT_STATUS_INVALID_SUB_AUTHORITY (0xC0000000 | 0x0076) -#define NT_STATUS_INVALID_ACL (0xC0000000 | 0x0077) -#define NT_STATUS_INVALID_SID (0xC0000000 | 0x0078) -#define NT_STATUS_INVALID_SECURITY_DESCR (0xC0000000 | 0x0079) -#define NT_STATUS_PROCEDURE_NOT_FOUND (0xC0000000 | 0x007a) -#define NT_STATUS_INVALID_IMAGE_FORMAT (0xC0000000 | 0x007b) -#define NT_STATUS_NO_TOKEN (0xC0000000 | 0x007c) -#define NT_STATUS_BAD_INHERITANCE_ACL (0xC0000000 | 0x007d) -#define NT_STATUS_RANGE_NOT_LOCKED (0xC0000000 | 0x007e) -#define NT_STATUS_DISK_FULL (0xC0000000 | 0x007f) -#define NT_STATUS_SERVER_DISABLED (0xC0000000 | 0x0080) -#define NT_STATUS_SERVER_NOT_DISABLED (0xC0000000 | 0x0081) -#define NT_STATUS_TOO_MANY_GUIDS_REQUESTED (0xC0000000 | 0x0082) -#define NT_STATUS_GUIDS_EXHAUSTED (0xC0000000 | 0x0083) -#define NT_STATUS_INVALID_ID_AUTHORITY (0xC0000000 | 0x0084) -#define NT_STATUS_AGENTS_EXHAUSTED (0xC0000000 | 0x0085) -#define NT_STATUS_INVALID_VOLUME_LABEL (0xC0000000 | 0x0086) -#define NT_STATUS_SECTION_NOT_EXTENDED (0xC0000000 | 0x0087) -#define NT_STATUS_NOT_MAPPED_DATA (0xC0000000 | 0x0088) -#define NT_STATUS_RESOURCE_DATA_NOT_FOUND (0xC0000000 | 0x0089) -#define NT_STATUS_RESOURCE_TYPE_NOT_FOUND (0xC0000000 | 0x008a) -#define NT_STATUS_RESOURCE_NAME_NOT_FOUND (0xC0000000 | 0x008b) -#define NT_STATUS_ARRAY_BOUNDS_EXCEEDED (0xC0000000 | 0x008c) -#define NT_STATUS_FLOAT_DENORMAL_OPERAND (0xC0000000 | 0x008d) -#define NT_STATUS_FLOAT_DIVIDE_BY_ZERO (0xC0000000 | 0x008e) -#define NT_STATUS_FLOAT_INEXACT_RESULT (0xC0000000 | 0x008f) -#define NT_STATUS_FLOAT_INVALID_OPERATION (0xC0000000 | 0x0090) -#define NT_STATUS_FLOAT_OVERFLOW (0xC0000000 | 0x0091) -#define NT_STATUS_FLOAT_STACK_CHECK (0xC0000000 | 0x0092) -#define NT_STATUS_FLOAT_UNDERFLOW (0xC0000000 | 0x0093) -#define NT_STATUS_INTEGER_DIVIDE_BY_ZERO (0xC0000000 | 0x0094) -#define NT_STATUS_INTEGER_OVERFLOW (0xC0000000 | 0x0095) -#define NT_STATUS_PRIVILEGED_INSTRUCTION (0xC0000000 | 0x0096) -#define NT_STATUS_TOO_MANY_PAGING_FILES (0xC0000000 | 0x0097) -#define NT_STATUS_FILE_INVALID (0xC0000000 | 0x0098) -#define NT_STATUS_ALLOTTED_SPACE_EXCEEDED (0xC0000000 | 0x0099) -#define NT_STATUS_INSUFFICIENT_RESOURCES (0xC0000000 | 0x009a) -#define NT_STATUS_DFS_EXIT_PATH_FOUND (0xC0000000 | 0x009b) -#define NT_STATUS_DEVICE_DATA_ERROR (0xC0000000 | 0x009c) -#define NT_STATUS_DEVICE_NOT_CONNECTED (0xC0000000 | 0x009d) -#define NT_STATUS_DEVICE_POWER_FAILURE (0xC0000000 | 0x009e) -#define NT_STATUS_FREE_VM_NOT_AT_BASE (0xC0000000 | 0x009f) -#define NT_STATUS_MEMORY_NOT_ALLOCATED (0xC0000000 | 0x00a0) -#define NT_STATUS_WORKING_SET_QUOTA (0xC0000000 | 0x00a1) -#define NT_STATUS_MEDIA_WRITE_PROTECTED (0xC0000000 | 0x00a2) -#define NT_STATUS_DEVICE_NOT_READY (0xC0000000 | 0x00a3) -#define NT_STATUS_INVALID_GROUP_ATTRIBUTES (0xC0000000 | 0x00a4) -#define NT_STATUS_BAD_IMPERSONATION_LEVEL (0xC0000000 | 0x00a5) -#define NT_STATUS_CANT_OPEN_ANONYMOUS (0xC0000000 | 0x00a6) -#define NT_STATUS_BAD_VALIDATION_CLASS (0xC0000000 | 0x00a7) -#define NT_STATUS_BAD_TOKEN_TYPE (0xC0000000 | 0x00a8) -#define NT_STATUS_BAD_MASTER_BOOT_RECORD (0xC0000000 | 0x00a9) -#define NT_STATUS_INSTRUCTION_MISALIGNMENT (0xC0000000 | 0x00aa) -#define NT_STATUS_INSTANCE_NOT_AVAILABLE (0xC0000000 | 0x00ab) -#define NT_STATUS_PIPE_NOT_AVAILABLE (0xC0000000 | 0x00ac) -#define NT_STATUS_INVALID_PIPE_STATE (0xC0000000 | 0x00ad) -#define NT_STATUS_PIPE_BUSY (0xC0000000 | 0x00ae) -#define NT_STATUS_ILLEGAL_FUNCTION (0xC0000000 | 0x00af) -#define NT_STATUS_PIPE_DISCONNECTED (0xC0000000 | 0x00b0) -#define NT_STATUS_PIPE_CLOSING (0xC0000000 | 0x00b1) -#define NT_STATUS_PIPE_CONNECTED (0xC0000000 | 0x00b2) -#define NT_STATUS_PIPE_LISTENING (0xC0000000 | 0x00b3) -#define NT_STATUS_INVALID_READ_MODE (0xC0000000 | 0x00b4) -#define NT_STATUS_IO_TIMEOUT (0xC0000000 | 0x00b5) -#define NT_STATUS_FILE_FORCED_CLOSED (0xC0000000 | 0x00b6) -#define NT_STATUS_PROFILING_NOT_STARTED (0xC0000000 | 0x00b7) -#define NT_STATUS_PROFILING_NOT_STOPPED (0xC0000000 | 0x00b8) -#define NT_STATUS_COULD_NOT_INTERPRET (0xC0000000 | 0x00b9) -#define NT_STATUS_FILE_IS_A_DIRECTORY (0xC0000000 | 0x00ba) -#define NT_STATUS_NOT_SUPPORTED (0xC0000000 | 0x00bb) -#define NT_STATUS_REMOTE_NOT_LISTENING (0xC0000000 | 0x00bc) -#define NT_STATUS_DUPLICATE_NAME (0xC0000000 | 0x00bd) -#define NT_STATUS_BAD_NETWORK_PATH (0xC0000000 | 0x00be) -#define NT_STATUS_NETWORK_BUSY (0xC0000000 | 0x00bf) -#define NT_STATUS_DEVICE_DOES_NOT_EXIST (0xC0000000 | 0x00c0) -#define NT_STATUS_TOO_MANY_COMMANDS (0xC0000000 | 0x00c1) -#define NT_STATUS_ADAPTER_HARDWARE_ERROR (0xC0000000 | 0x00c2) -#define NT_STATUS_INVALID_NETWORK_RESPONSE (0xC0000000 | 0x00c3) -#define NT_STATUS_UNEXPECTED_NETWORK_ERROR (0xC0000000 | 0x00c4) -#define NT_STATUS_BAD_REMOTE_ADAPTER (0xC0000000 | 0x00c5) -#define NT_STATUS_PRINT_QUEUE_FULL (0xC0000000 | 0x00c6) -#define NT_STATUS_NO_SPOOL_SPACE (0xC0000000 | 0x00c7) -#define NT_STATUS_PRINT_CANCELLED (0xC0000000 | 0x00c8) -#define NT_STATUS_NETWORK_NAME_DELETED (0xC0000000 | 0x00c9) -#define NT_STATUS_NETWORK_ACCESS_DENIED (0xC0000000 | 0x00ca) -#define NT_STATUS_BAD_DEVICE_TYPE (0xC0000000 | 0x00cb) -#define NT_STATUS_BAD_NETWORK_NAME (0xC0000000 | 0x00cc) -#define NT_STATUS_TOO_MANY_NAMES (0xC0000000 | 0x00cd) -#define NT_STATUS_TOO_MANY_SESSIONS (0xC0000000 | 0x00ce) -#define NT_STATUS_SHARING_PAUSED (0xC0000000 | 0x00cf) -#define NT_STATUS_REQUEST_NOT_ACCEPTED (0xC0000000 | 0x00d0) -#define NT_STATUS_REDIRECTOR_PAUSED (0xC0000000 | 0x00d1) -#define NT_STATUS_NET_WRITE_FAULT (0xC0000000 | 0x00d2) -#define NT_STATUS_PROFILING_AT_LIMIT (0xC0000000 | 0x00d3) -#define NT_STATUS_NOT_SAME_DEVICE (0xC0000000 | 0x00d4) -#define NT_STATUS_FILE_RENAMED (0xC0000000 | 0x00d5) -#define NT_STATUS_VIRTUAL_CIRCUIT_CLOSED (0xC0000000 | 0x00d6) -#define NT_STATUS_NO_SECURITY_ON_OBJECT (0xC0000000 | 0x00d7) -#define NT_STATUS_CANT_WAIT (0xC0000000 | 0x00d8) -#define NT_STATUS_PIPE_EMPTY (0xC0000000 | 0x00d9) -#define NT_STATUS_CANT_ACCESS_DOMAIN_INFO (0xC0000000 | 0x00da) -#define NT_STATUS_CANT_TERMINATE_SELF (0xC0000000 | 0x00db) -#define NT_STATUS_INVALID_SERVER_STATE (0xC0000000 | 0x00dc) -#define NT_STATUS_INVALID_DOMAIN_STATE (0xC0000000 | 0x00dd) -#define NT_STATUS_INVALID_DOMAIN_ROLE (0xC0000000 | 0x00de) -#define NT_STATUS_NO_SUCH_DOMAIN (0xC0000000 | 0x00df) -#define NT_STATUS_DOMAIN_EXISTS (0xC0000000 | 0x00e0) -#define NT_STATUS_DOMAIN_LIMIT_EXCEEDED (0xC0000000 | 0x00e1) -#define NT_STATUS_OPLOCK_NOT_GRANTED (0xC0000000 | 0x00e2) -#define NT_STATUS_INVALID_OPLOCK_PROTOCOL (0xC0000000 | 0x00e3) -#define NT_STATUS_INTERNAL_DB_CORRUPTION (0xC0000000 | 0x00e4) -#define NT_STATUS_INTERNAL_ERROR (0xC0000000 | 0x00e5) -#define NT_STATUS_GENERIC_NOT_MAPPED (0xC0000000 | 0x00e6) -#define NT_STATUS_BAD_DESCRIPTOR_FORMAT (0xC0000000 | 0x00e7) -#define NT_STATUS_INVALID_USER_BUFFER (0xC0000000 | 0x00e8) -#define NT_STATUS_UNEXPECTED_IO_ERROR (0xC0000000 | 0x00e9) -#define NT_STATUS_UNEXPECTED_MM_CREATE_ERR (0xC0000000 | 0x00ea) -#define NT_STATUS_UNEXPECTED_MM_MAP_ERROR (0xC0000000 | 0x00eb) -#define NT_STATUS_UNEXPECTED_MM_EXTEND_ERR (0xC0000000 | 0x00ec) -#define NT_STATUS_NOT_LOGON_PROCESS (0xC0000000 | 0x00ed) -#define NT_STATUS_LOGON_SESSION_EXISTS (0xC0000000 | 0x00ee) -#define NT_STATUS_INVALID_PARAMETER_1 (0xC0000000 | 0x00ef) -#define NT_STATUS_INVALID_PARAMETER_2 (0xC0000000 | 0x00f0) -#define NT_STATUS_INVALID_PARAMETER_3 (0xC0000000 | 0x00f1) -#define NT_STATUS_INVALID_PARAMETER_4 (0xC0000000 | 0x00f2) -#define NT_STATUS_INVALID_PARAMETER_5 (0xC0000000 | 0x00f3) -#define NT_STATUS_INVALID_PARAMETER_6 (0xC0000000 | 0x00f4) -#define NT_STATUS_INVALID_PARAMETER_7 (0xC0000000 | 0x00f5) -#define NT_STATUS_INVALID_PARAMETER_8 (0xC0000000 | 0x00f6) -#define NT_STATUS_INVALID_PARAMETER_9 (0xC0000000 | 0x00f7) -#define NT_STATUS_INVALID_PARAMETER_10 (0xC0000000 | 0x00f8) -#define NT_STATUS_INVALID_PARAMETER_11 (0xC0000000 | 0x00f9) -#define NT_STATUS_INVALID_PARAMETER_12 (0xC0000000 | 0x00fa) -#define NT_STATUS_REDIRECTOR_NOT_STARTED (0xC0000000 | 0x00fb) -#define NT_STATUS_REDIRECTOR_STARTED (0xC0000000 | 0x00fc) -#define NT_STATUS_STACK_OVERFLOW (0xC0000000 | 0x00fd) -#define NT_STATUS_NO_SUCH_PACKAGE (0xC0000000 | 0x00fe) -#define NT_STATUS_BAD_FUNCTION_TABLE (0xC0000000 | 0x00ff) -#define NT_STATUS_DIRECTORY_NOT_EMPTY (0xC0000000 | 0x0101) -#define NT_STATUS_FILE_CORRUPT_ERROR (0xC0000000 | 0x0102) -#define NT_STATUS_NOT_A_DIRECTORY (0xC0000000 | 0x0103) -#define NT_STATUS_BAD_LOGON_SESSION_STATE (0xC0000000 | 0x0104) -#define NT_STATUS_LOGON_SESSION_COLLISION (0xC0000000 | 0x0105) -#define NT_STATUS_NAME_TOO_LONG (0xC0000000 | 0x0106) -#define NT_STATUS_FILES_OPEN (0xC0000000 | 0x0107) -#define NT_STATUS_CONNECTION_IN_USE (0xC0000000 | 0x0108) -#define NT_STATUS_MESSAGE_NOT_FOUND (0xC0000000 | 0x0109) -#define NT_STATUS_PROCESS_IS_TERMINATING (0xC0000000 | 0x010a) -#define NT_STATUS_INVALID_LOGON_TYPE (0xC0000000 | 0x010b) -#define NT_STATUS_NO_GUID_TRANSLATION (0xC0000000 | 0x010c) -#define NT_STATUS_CANNOT_IMPERSONATE (0xC0000000 | 0x010d) -#define NT_STATUS_IMAGE_ALREADY_LOADED (0xC0000000 | 0x010e) -#define NT_STATUS_ABIOS_NOT_PRESENT (0xC0000000 | 0x010f) -#define NT_STATUS_ABIOS_LID_NOT_EXIST (0xC0000000 | 0x0110) -#define NT_STATUS_ABIOS_LID_ALREADY_OWNED (0xC0000000 | 0x0111) -#define NT_STATUS_ABIOS_NOT_LID_OWNER (0xC0000000 | 0x0112) -#define NT_STATUS_ABIOS_INVALID_COMMAND (0xC0000000 | 0x0113) -#define NT_STATUS_ABIOS_INVALID_LID (0xC0000000 | 0x0114) -#define NT_STATUS_ABIOS_SELECTOR_NOT_AVAILABLE (0xC0000000 | 0x0115) -#define NT_STATUS_ABIOS_INVALID_SELECTOR (0xC0000000 | 0x0116) -#define NT_STATUS_NO_LDT (0xC0000000 | 0x0117) -#define NT_STATUS_INVALID_LDT_SIZE (0xC0000000 | 0x0118) -#define NT_STATUS_INVALID_LDT_OFFSET (0xC0000000 | 0x0119) -#define NT_STATUS_INVALID_LDT_DESCRIPTOR (0xC0000000 | 0x011a) -#define NT_STATUS_INVALID_IMAGE_NE_FORMAT (0xC0000000 | 0x011b) -#define NT_STATUS_RXACT_INVALID_STATE (0xC0000000 | 0x011c) -#define NT_STATUS_RXACT_COMMIT_FAILURE (0xC0000000 | 0x011d) -#define NT_STATUS_MAPPED_FILE_SIZE_ZERO (0xC0000000 | 0x011e) -#define NT_STATUS_TOO_MANY_OPENED_FILES (0xC0000000 | 0x011f) -#define NT_STATUS_CANCELLED (0xC0000000 | 0x0120) -#define NT_STATUS_CANNOT_DELETE (0xC0000000 | 0x0121) -#define NT_STATUS_INVALID_COMPUTER_NAME (0xC0000000 | 0x0122) -#define NT_STATUS_FILE_DELETED (0xC0000000 | 0x0123) -#define NT_STATUS_SPECIAL_ACCOUNT (0xC0000000 | 0x0124) -#define NT_STATUS_SPECIAL_GROUP (0xC0000000 | 0x0125) -#define NT_STATUS_SPECIAL_USER (0xC0000000 | 0x0126) -#define NT_STATUS_MEMBERS_PRIMARY_GROUP (0xC0000000 | 0x0127) -#define NT_STATUS_FILE_CLOSED (0xC0000000 | 0x0128) -#define NT_STATUS_TOO_MANY_THREADS (0xC0000000 | 0x0129) -#define NT_STATUS_THREAD_NOT_IN_PROCESS (0xC0000000 | 0x012a) -#define NT_STATUS_TOKEN_ALREADY_IN_USE (0xC0000000 | 0x012b) -#define NT_STATUS_PAGEFILE_QUOTA_EXCEEDED (0xC0000000 | 0x012c) -#define NT_STATUS_COMMITMENT_LIMIT (0xC0000000 | 0x012d) -#define NT_STATUS_INVALID_IMAGE_LE_FORMAT (0xC0000000 | 0x012e) -#define NT_STATUS_INVALID_IMAGE_NOT_MZ (0xC0000000 | 0x012f) -#define NT_STATUS_INVALID_IMAGE_PROTECT (0xC0000000 | 0x0130) -#define NT_STATUS_INVALID_IMAGE_WIN_16 (0xC0000000 | 0x0131) -#define NT_STATUS_LOGON_SERVER_CONFLICT (0xC0000000 | 0x0132) -#define NT_STATUS_TIME_DIFFERENCE_AT_DC (0xC0000000 | 0x0133) -#define NT_STATUS_SYNCHRONIZATION_REQUIRED (0xC0000000 | 0x0134) -#define NT_STATUS_DLL_NOT_FOUND (0xC0000000 | 0x0135) -#define NT_STATUS_OPEN_FAILED (0xC0000000 | 0x0136) -#define NT_STATUS_IO_PRIVILEGE_FAILED (0xC0000000 | 0x0137) -#define NT_STATUS_ORDINAL_NOT_FOUND (0xC0000000 | 0x0138) -#define NT_STATUS_ENTRYPOINT_NOT_FOUND (0xC0000000 | 0x0139) -#define NT_STATUS_CONTROL_C_EXIT (0xC0000000 | 0x013a) -#define NT_STATUS_LOCAL_DISCONNECT (0xC0000000 | 0x013b) -#define NT_STATUS_REMOTE_DISCONNECT (0xC0000000 | 0x013c) -#define NT_STATUS_REMOTE_RESOURCES (0xC0000000 | 0x013d) -#define NT_STATUS_LINK_FAILED (0xC0000000 | 0x013e) -#define NT_STATUS_LINK_TIMEOUT (0xC0000000 | 0x013f) -#define NT_STATUS_INVALID_CONNECTION (0xC0000000 | 0x0140) -#define NT_STATUS_INVALID_ADDRESS (0xC0000000 | 0x0141) -#define NT_STATUS_DLL_INIT_FAILED (0xC0000000 | 0x0142) -#define NT_STATUS_MISSING_SYSTEMFILE (0xC0000000 | 0x0143) -#define NT_STATUS_UNHANDLED_EXCEPTION (0xC0000000 | 0x0144) -#define NT_STATUS_APP_INIT_FAILURE (0xC0000000 | 0x0145) -#define NT_STATUS_PAGEFILE_CREATE_FAILED (0xC0000000 | 0x0146) -#define NT_STATUS_NO_PAGEFILE (0xC0000000 | 0x0147) -#define NT_STATUS_INVALID_LEVEL (0xC0000000 | 0x0148) -#define NT_STATUS_WRONG_PASSWORD_CORE (0xC0000000 | 0x0149) -#define NT_STATUS_ILLEGAL_FLOAT_CONTEXT (0xC0000000 | 0x014a) -#define NT_STATUS_PIPE_BROKEN (0xC0000000 | 0x014b) -#define NT_STATUS_REGISTRY_CORRUPT (0xC0000000 | 0x014c) -#define NT_STATUS_REGISTRY_IO_FAILED (0xC0000000 | 0x014d) -#define NT_STATUS_NO_EVENT_PAIR (0xC0000000 | 0x014e) -#define NT_STATUS_UNRECOGNIZED_VOLUME (0xC0000000 | 0x014f) -#define NT_STATUS_SERIAL_NO_DEVICE_INITED (0xC0000000 | 0x0150) -#define NT_STATUS_NO_SUCH_ALIAS (0xC0000000 | 0x0151) -#define NT_STATUS_MEMBER_NOT_IN_ALIAS (0xC0000000 | 0x0152) -#define NT_STATUS_MEMBER_IN_ALIAS (0xC0000000 | 0x0153) -#define NT_STATUS_ALIAS_EXISTS (0xC0000000 | 0x0154) -#define NT_STATUS_LOGON_NOT_GRANTED (0xC0000000 | 0x0155) -#define NT_STATUS_TOO_MANY_SECRETS (0xC0000000 | 0x0156) -#define NT_STATUS_SECRET_TOO_LONG (0xC0000000 | 0x0157) -#define NT_STATUS_INTERNAL_DB_ERROR (0xC0000000 | 0x0158) -#define NT_STATUS_FULLSCREEN_MODE (0xC0000000 | 0x0159) -#define NT_STATUS_TOO_MANY_CONTEXT_IDS (0xC0000000 | 0x015a) -#define NT_STATUS_LOGON_TYPE_NOT_GRANTED (0xC0000000 | 0x015b) -#define NT_STATUS_NOT_REGISTRY_FILE (0xC0000000 | 0x015c) -#define NT_STATUS_NT_CROSS_ENCRYPTION_REQUIRED (0xC0000000 | 0x015d) -#define NT_STATUS_DOMAIN_CTRLR_CONFIG_ERROR (0xC0000000 | 0x015e) -#define NT_STATUS_FT_MISSING_MEMBER (0xC0000000 | 0x015f) -#define NT_STATUS_ILL_FORMED_SERVICE_ENTRY (0xC0000000 | 0x0160) -#define NT_STATUS_ILLEGAL_CHARACTER (0xC0000000 | 0x0161) -#define NT_STATUS_UNMAPPABLE_CHARACTER (0xC0000000 | 0x0162) -#define NT_STATUS_UNDEFINED_CHARACTER (0xC0000000 | 0x0163) -#define NT_STATUS_FLOPPY_VOLUME (0xC0000000 | 0x0164) -#define NT_STATUS_FLOPPY_ID_MARK_NOT_FOUND (0xC0000000 | 0x0165) -#define NT_STATUS_FLOPPY_WRONG_CYLINDER (0xC0000000 | 0x0166) -#define NT_STATUS_FLOPPY_UNKNOWN_ERROR (0xC0000000 | 0x0167) -#define NT_STATUS_FLOPPY_BAD_REGISTERS (0xC0000000 | 0x0168) -#define NT_STATUS_DISK_RECALIBRATE_FAILED (0xC0000000 | 0x0169) -#define NT_STATUS_DISK_OPERATION_FAILED (0xC0000000 | 0x016a) -#define NT_STATUS_DISK_RESET_FAILED (0xC0000000 | 0x016b) -#define NT_STATUS_SHARED_IRQ_BUSY (0xC0000000 | 0x016c) -#define NT_STATUS_FT_ORPHANING (0xC0000000 | 0x016d) -#define NT_STATUS_PARTITION_FAILURE (0xC0000000 | 0x0172) -#define NT_STATUS_INVALID_BLOCK_LENGTH (0xC0000000 | 0x0173) -#define NT_STATUS_DEVICE_NOT_PARTITIONED (0xC0000000 | 0x0174) -#define NT_STATUS_UNABLE_TO_LOCK_MEDIA (0xC0000000 | 0x0175) -#define NT_STATUS_UNABLE_TO_UNLOAD_MEDIA (0xC0000000 | 0x0176) -#define NT_STATUS_EOM_OVERFLOW (0xC0000000 | 0x0177) -#define NT_STATUS_NO_MEDIA (0xC0000000 | 0x0178) -#define NT_STATUS_NO_SUCH_MEMBER (0xC0000000 | 0x017a) -#define NT_STATUS_INVALID_MEMBER (0xC0000000 | 0x017b) -#define NT_STATUS_KEY_DELETED (0xC0000000 | 0x017c) -#define NT_STATUS_NO_LOG_SPACE (0xC0000000 | 0x017d) -#define NT_STATUS_TOO_MANY_SIDS (0xC0000000 | 0x017e) -#define NT_STATUS_LM_CROSS_ENCRYPTION_REQUIRED (0xC0000000 | 0x017f) -#define NT_STATUS_KEY_HAS_CHILDREN (0xC0000000 | 0x0180) -#define NT_STATUS_CHILD_MUST_BE_VOLATILE (0xC0000000 | 0x0181) -#define NT_STATUS_DEVICE_CONFIGURATION_ERROR (0xC0000000 | 0x0182) -#define NT_STATUS_DRIVER_INTERNAL_ERROR (0xC0000000 | 0x0183) -#define NT_STATUS_INVALID_DEVICE_STATE (0xC0000000 | 0x0184) -#define NT_STATUS_IO_DEVICE_ERROR (0xC0000000 | 0x0185) -#define NT_STATUS_DEVICE_PROTOCOL_ERROR (0xC0000000 | 0x0186) -#define NT_STATUS_BACKUP_CONTROLLER (0xC0000000 | 0x0187) -#define NT_STATUS_LOG_FILE_FULL (0xC0000000 | 0x0188) -#define NT_STATUS_TOO_LATE (0xC0000000 | 0x0189) -#define NT_STATUS_NO_TRUST_LSA_SECRET (0xC0000000 | 0x018a) -#define NT_STATUS_NO_TRUST_SAM_ACCOUNT (0xC0000000 | 0x018b) -#define NT_STATUS_TRUSTED_DOMAIN_FAILURE (0xC0000000 | 0x018c) -#define NT_STATUS_TRUSTED_RELATIONSHIP_FAILURE (0xC0000000 | 0x018d) -#define NT_STATUS_EVENTLOG_FILE_CORRUPT (0xC0000000 | 0x018e) -#define NT_STATUS_EVENTLOG_CANT_START (0xC0000000 | 0x018f) -#define NT_STATUS_TRUST_FAILURE (0xC0000000 | 0x0190) -#define NT_STATUS_MUTANT_LIMIT_EXCEEDED (0xC0000000 | 0x0191) -#define NT_STATUS_NETLOGON_NOT_STARTED (0xC0000000 | 0x0192) -#define NT_STATUS_ACCOUNT_EXPIRED (0xC0000000 | 0x0193) -#define NT_STATUS_POSSIBLE_DEADLOCK (0xC0000000 | 0x0194) -#define NT_STATUS_NETWORK_CREDENTIAL_CONFLICT (0xC0000000 | 0x0195) -#define NT_STATUS_REMOTE_SESSION_LIMIT (0xC0000000 | 0x0196) -#define NT_STATUS_EVENTLOG_FILE_CHANGED (0xC0000000 | 0x0197) -#define NT_STATUS_NOLOGON_INTERDOMAIN_TRUST_ACCOUNT (0xC0000000 | 0x0198) -#define NT_STATUS_NOLOGON_WORKSTATION_TRUST_ACCOUNT (0xC0000000 | 0x0199) -#define NT_STATUS_NOLOGON_SERVER_TRUST_ACCOUNT (0xC0000000 | 0x019a) -#define NT_STATUS_DOMAIN_TRUST_INCONSISTENT (0xC0000000 | 0x019b) -#define NT_STATUS_FS_DRIVER_REQUIRED (0xC0000000 | 0x019c) -#define NT_STATUS_NO_USER_SESSION_KEY (0xC0000000 | 0x0202) -#define NT_STATUS_USER_SESSION_DELETED (0xC0000000 | 0x0203) -#define NT_STATUS_RESOURCE_LANG_NOT_FOUND (0xC0000000 | 0x0204) -#define NT_STATUS_INSUFF_SERVER_RESOURCES (0xC0000000 | 0x0205) -#define NT_STATUS_INVALID_BUFFER_SIZE (0xC0000000 | 0x0206) -#define NT_STATUS_INVALID_ADDRESS_COMPONENT (0xC0000000 | 0x0207) -#define NT_STATUS_INVALID_ADDRESS_WILDCARD (0xC0000000 | 0x0208) -#define NT_STATUS_TOO_MANY_ADDRESSES (0xC0000000 | 0x0209) -#define NT_STATUS_ADDRESS_ALREADY_EXISTS (0xC0000000 | 0x020a) -#define NT_STATUS_ADDRESS_CLOSED (0xC0000000 | 0x020b) -#define NT_STATUS_CONNECTION_DISCONNECTED (0xC0000000 | 0x020c) -#define NT_STATUS_CONNECTION_RESET (0xC0000000 | 0x020d) -#define NT_STATUS_TOO_MANY_NODES (0xC0000000 | 0x020e) -#define NT_STATUS_TRANSACTION_ABORTED (0xC0000000 | 0x020f) -#define NT_STATUS_TRANSACTION_TIMED_OUT (0xC0000000 | 0x0210) -#define NT_STATUS_TRANSACTION_NO_RELEASE (0xC0000000 | 0x0211) -#define NT_STATUS_TRANSACTION_NO_MATCH (0xC0000000 | 0x0212) -#define NT_STATUS_TRANSACTION_RESPONDED (0xC0000000 | 0x0213) -#define NT_STATUS_TRANSACTION_INVALID_ID (0xC0000000 | 0x0214) -#define NT_STATUS_TRANSACTION_INVALID_TYPE (0xC0000000 | 0x0215) -#define NT_STATUS_NOT_SERVER_SESSION (0xC0000000 | 0x0216) -#define NT_STATUS_NOT_CLIENT_SESSION (0xC0000000 | 0x0217) -#define NT_STATUS_CANNOT_LOAD_REGISTRY_FILE (0xC0000000 | 0x0218) -#define NT_STATUS_DEBUG_ATTACH_FAILED (0xC0000000 | 0x0219) -#define NT_STATUS_SYSTEM_PROCESS_TERMINATED (0xC0000000 | 0x021a) -#define NT_STATUS_DATA_NOT_ACCEPTED (0xC0000000 | 0x021b) -#define NT_STATUS_NO_BROWSER_SERVERS_FOUND (0xC0000000 | 0x021c) -#define NT_STATUS_VDM_HARD_ERROR (0xC0000000 | 0x021d) -#define NT_STATUS_DRIVER_CANCEL_TIMEOUT (0xC0000000 | 0x021e) -#define NT_STATUS_REPLY_MESSAGE_MISMATCH (0xC0000000 | 0x021f) -#define NT_STATUS_MAPPED_ALIGNMENT (0xC0000000 | 0x0220) -#define NT_STATUS_IMAGE_CHECKSUM_MISMATCH (0xC0000000 | 0x0221) -#define NT_STATUS_LOST_WRITEBEHIND_DATA (0xC0000000 | 0x0222) -#define NT_STATUS_CLIENT_SERVER_PARAMETERS_INVALID (0xC0000000 | 0x0223) -#define NT_STATUS_PASSWORD_MUST_CHANGE (0xC0000000 | 0x0224) -#define NT_STATUS_NOT_FOUND (0xC0000000 | 0x0225) -#define NT_STATUS_NOT_TINY_STREAM (0xC0000000 | 0x0226) -#define NT_STATUS_RECOVERY_FAILURE (0xC0000000 | 0x0227) -#define NT_STATUS_STACK_OVERFLOW_READ (0xC0000000 | 0x0228) -#define NT_STATUS_FAIL_CHECK (0xC0000000 | 0x0229) -#define NT_STATUS_DUPLICATE_OBJECTID (0xC0000000 | 0x022a) -#define NT_STATUS_OBJECTID_EXISTS (0xC0000000 | 0x022b) -#define NT_STATUS_CONVERT_TO_LARGE (0xC0000000 | 0x022c) -#define NT_STATUS_RETRY (0xC0000000 | 0x022d) -#define NT_STATUS_FOUND_OUT_OF_SCOPE (0xC0000000 | 0x022e) -#define NT_STATUS_ALLOCATE_BUCKET (0xC0000000 | 0x022f) -#define NT_STATUS_PROPSET_NOT_FOUND (0xC0000000 | 0x0230) -#define NT_STATUS_MARSHALL_OVERFLOW (0xC0000000 | 0x0231) -#define NT_STATUS_INVALID_VARIANT (0xC0000000 | 0x0232) -#define NT_STATUS_DOMAIN_CONTROLLER_NOT_FOUND (0xC0000000 | 0x0233) -#define NT_STATUS_ACCOUNT_LOCKED_OUT (0xC0000000 | 0x0234) -#define NT_STATUS_HANDLE_NOT_CLOSABLE (0xC0000000 | 0x0235) -#define NT_STATUS_CONNECTION_REFUSED (0xC0000000 | 0x0236) -#define NT_STATUS_GRACEFUL_DISCONNECT (0xC0000000 | 0x0237) -#define NT_STATUS_ADDRESS_ALREADY_ASSOCIATED (0xC0000000 | 0x0238) -#define NT_STATUS_ADDRESS_NOT_ASSOCIATED (0xC0000000 | 0x0239) -#define NT_STATUS_CONNECTION_INVALID (0xC0000000 | 0x023a) -#define NT_STATUS_CONNECTION_ACTIVE (0xC0000000 | 0x023b) -#define NT_STATUS_NETWORK_UNREACHABLE (0xC0000000 | 0x023c) -#define NT_STATUS_HOST_UNREACHABLE (0xC0000000 | 0x023d) -#define NT_STATUS_PROTOCOL_UNREACHABLE (0xC0000000 | 0x023e) -#define NT_STATUS_PORT_UNREACHABLE (0xC0000000 | 0x023f) -#define NT_STATUS_REQUEST_ABORTED (0xC0000000 | 0x0240) -#define NT_STATUS_CONNECTION_ABORTED (0xC0000000 | 0x0241) -#define NT_STATUS_BAD_COMPRESSION_BUFFER (0xC0000000 | 0x0242) -#define NT_STATUS_USER_MAPPED_FILE (0xC0000000 | 0x0243) -#define NT_STATUS_AUDIT_FAILED (0xC0000000 | 0x0244) -#define NT_STATUS_TIMER_RESOLUTION_NOT_SET (0xC0000000 | 0x0245) -#define NT_STATUS_CONNECTION_COUNT_LIMIT (0xC0000000 | 0x0246) -#define NT_STATUS_LOGIN_TIME_RESTRICTION (0xC0000000 | 0x0247) -#define NT_STATUS_LOGIN_WKSTA_RESTRICTION (0xC0000000 | 0x0248) -#define NT_STATUS_IMAGE_MP_UP_MISMATCH (0xC0000000 | 0x0249) -#define NT_STATUS_INSUFFICIENT_LOGON_INFO (0xC0000000 | 0x0250) -#define NT_STATUS_BAD_DLL_ENTRYPOINT (0xC0000000 | 0x0251) -#define NT_STATUS_BAD_SERVICE_ENTRYPOINT (0xC0000000 | 0x0252) -#define NT_STATUS_LPC_REPLY_LOST (0xC0000000 | 0x0253) -#define NT_STATUS_IP_ADDRESS_CONFLICT1 (0xC0000000 | 0x0254) -#define NT_STATUS_IP_ADDRESS_CONFLICT2 (0xC0000000 | 0x0255) -#define NT_STATUS_REGISTRY_QUOTA_LIMIT (0xC0000000 | 0x0256) -#define NT_STATUS_PATH_NOT_COVERED (0xC0000000 | 0x0257) -#define NT_STATUS_NO_CALLBACK_ACTIVE (0xC0000000 | 0x0258) -#define NT_STATUS_LICENSE_QUOTA_EXCEEDED (0xC0000000 | 0x0259) -#define NT_STATUS_PWD_TOO_SHORT (0xC0000000 | 0x025a) -#define NT_STATUS_PWD_TOO_RECENT (0xC0000000 | 0x025b) -#define NT_STATUS_PWD_HISTORY_CONFLICT (0xC0000000 | 0x025c) -#define NT_STATUS_PLUGPLAY_NO_DEVICE (0xC0000000 | 0x025e) -#define NT_STATUS_UNSUPPORTED_COMPRESSION (0xC0000000 | 0x025f) -#define NT_STATUS_INVALID_HW_PROFILE (0xC0000000 | 0x0260) -#define NT_STATUS_INVALID_PLUGPLAY_DEVICE_PATH (0xC0000000 | 0x0261) -#define NT_STATUS_DRIVER_ORDINAL_NOT_FOUND (0xC0000000 | 0x0262) -#define NT_STATUS_DRIVER_ENTRYPOINT_NOT_FOUND (0xC0000000 | 0x0263) -#define NT_STATUS_RESOURCE_NOT_OWNED (0xC0000000 | 0x0264) -#define NT_STATUS_TOO_MANY_LINKS (0xC0000000 | 0x0265) -#define NT_STATUS_QUOTA_LIST_INCONSISTENT (0xC0000000 | 0x0266) -#define NT_STATUS_FILE_IS_OFFLINE (0xC0000000 | 0x0267) -#define NT_STATUS_NETWORK_SESSION_EXPIRED (0xC0000000 | 0x035c) -#define NT_STATUS_NO_SUCH_JOB (0xC0000000 | 0xEDE) /* scheduler */ -#define NT_STATUS_NO_PREAUTH_INTEGRITY_HASH_OVERLAP (0xC0000000 | 0x5D0000) -#define NT_STATUS_PENDING 0x00000103 -#endif /* _NTERR_H */ diff --git a/fs/smb/server/smb2misc.c b/fs/smb/server/smb2misc.c index 67a2d7a793f6..a1ddca21c47b 100644 --- a/fs/smb/server/smb2misc.c +++ b/fs/smb/server/smb2misc.c @@ -5,7 +5,6 @@ */ #include "glob.h" -#include "nterr.h" #include "smb_common.h" #include "../common/smb2status.h" #include "mgmt/user_session.h" diff --git a/fs/smb/server/smb2pdu.c b/fs/smb/server/smb2pdu.c index f3184b217575..27f87a13f20a 100644 --- a/fs/smb/server/smb2pdu.c +++ b/fs/smb/server/smb2pdu.c @@ -5497,6 +5497,13 @@ static int smb2_get_info_filesystem(struct ksmbd_work *work, info->Attributes |= cpu_to_le32(FILE_NAMED_STREAMS); info->MaxPathNameComponentLength = cpu_to_le32(stfs.f_namelen); + /* + * some application(potableapp) can not run on ksmbd share + * because only NTFS handle security setting on windows. + * So Although local fs(EXT4 or F2fs, etc) is not NTFS, + * ksmbd should show share as NTFS. Later, If needed, we can add + * fs type(s) parameter to change fs type user wanted. + */ len = smbConvertToUTF16((__le16 *)info->FileSystemName, "NTFS", PATH_MAX, conn->local_nls, 0); len = len * 2; diff --git a/fs/smb/server/smb_common.h b/fs/smb/server/smb_common.h index 2baf4aa330eb..067b45048c73 100644 --- a/fs/smb/server/smb_common.h +++ b/fs/smb/server/smb_common.h @@ -3,13 +3,12 @@ * Copyright (C) 2018 Samsung Electronics Co., Ltd. */ -#ifndef __SMB_COMMON_H__ -#define __SMB_COMMON_H__ +#ifndef __SMB_SERVER_COMMON_H__ +#define __SMB_SERVER_COMMON_H__ #include <linux/kernel.h> #include "glob.h" -#include "nterr.h" #include "../common/smbglob.h" #include "../common/smb2pdu.h" #include "../common/fscc.h" @@ -203,4 +202,4 @@ unsigned int ksmbd_server_side_copy_max_chunk_size(void); unsigned int ksmbd_server_side_copy_max_total_size(void); bool is_asterisk(char *p); __le32 smb_map_generic_desired_access(__le32 daccess); -#endif /* __SMB_COMMON_H__ */ +#endif /* __SMB_SERVER_COMMON_H__ */ diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index 2ceda49c609f..aa36a0d1d9df 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -90,7 +90,7 @@ */ #define DMA_MAPPING_ERROR (~(dma_addr_t)0) -#define DMA_BIT_MASK(n) GENMASK_ULL(n - 1, 0) +#define DMA_BIT_MASK(n) GENMASK_ULL((n) - 1, 0) struct dma_iova_state { dma_addr_t addr; diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h index 37e0b5b5600a..17902861de76 100644 --- a/include/linux/irqdesc.h +++ b/include/linux/irqdesc.h @@ -182,9 +182,9 @@ int generic_handle_irq_safe(unsigned int irq); * and handle the result interrupt number. Return -EINVAL if * conversion failed. */ -int generic_handle_domain_irq(struct irq_domain *domain, unsigned int hwirq); -int generic_handle_domain_irq_safe(struct irq_domain *domain, unsigned int hwirq); -int generic_handle_domain_nmi(struct irq_domain *domain, unsigned int hwirq); +int generic_handle_domain_irq(struct irq_domain *domain, irq_hw_number_t hwirq); +int generic_handle_domain_irq_safe(struct irq_domain *domain, irq_hw_number_t hwirq); +int generic_handle_domain_nmi(struct irq_domain *domain, irq_hw_number_t hwirq); #endif /* Test to see if a driver has successfully requested an irq */ diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index c585939b6cd6..a6624edb7226 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -344,6 +344,7 @@ struct nfs4_copy_state { #define NFS_INO_LAYOUTCOMMITTING (10) /* layoutcommit inflight */ #define NFS_INO_LAYOUTSTATS (11) /* layoutstats inflight */ #define NFS_INO_ODIRECT (12) /* I/O setting is O_DIRECT */ +#define NFS_INO_REQ_DIR_DELEG (13) /* Request a directory delegation */ static inline struct nfs_inode *NFS_I(const struct inode *inode) { diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h index d30c0245031c..c58b870f31ee 100644 --- a/include/linux/nfs_fs_sb.h +++ b/include/linux/nfs_fs_sb.h @@ -172,6 +172,11 @@ struct nfs_server { #define NFS_MOUNT_FORCE_RDIRPLUS 0x20000000 #define NFS_MOUNT_NETUNREACH_FATAL 0x40000000 + unsigned int automount_inherit; /* Properties inherited by automount */ +#define NFS_AUTOMOUNT_INHERIT_BSIZE 0x0001 +#define NFS_AUTOMOUNT_INHERIT_RSIZE 0x0002 +#define NFS_AUTOMOUNT_INHERIT_WSIZE 0x0004 + unsigned int caps; /* server capabilities */ __u64 fattr_valid; /* Valid attributes */ unsigned int rsize; /* read size */ @@ -305,6 +310,7 @@ struct nfs_server { #define NFS_CAP_REBOOT_LAYOUTRETURN (1U << 8) #define NFS_CAP_OFFLOAD_STATUS (1U << 9) #define NFS_CAP_ZERO_RANGE (1U << 10) +#define NFS_CAP_DIR_DELEG (1U << 11) #define NFS_CAP_OPEN_XOR (1U << 12) #define NFS_CAP_DELEGTIME (1U << 13) #define NFS_CAP_POSIX_LOCK (1U << 14) diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index 31463286402f..79fe2dfb470f 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -1092,12 +1092,19 @@ struct nfs4_getattr_arg { struct nfs4_sequence_args seq_args; const struct nfs_fh * fh; const u32 * bitmask; + bool get_dir_deleg; +}; + +struct nfs4_gdd_res { + u32 status; + nfs4_stateid deleg; }; struct nfs4_getattr_res { struct nfs4_sequence_res seq_res; const struct nfs_server * server; struct nfs_fattr * fattr; + struct nfs4_gdd_res * gdd_res; }; struct nfs4_link_arg { @@ -1801,7 +1808,8 @@ struct nfs_rpc_ops { int (*unlink_done) (struct rpc_task *, struct inode *); void (*rename_setup) (struct rpc_message *msg, struct dentry *old_dentry, - struct dentry *new_dentry); + struct dentry *new_dentry, + struct inode *same_parent); void (*rename_rpc_prepare)(struct rpc_task *task, struct nfs_renamedata *); int (*rename_done) (struct rpc_task *task, struct inode *old_dir, struct inode *new_dir); int (*link) (struct inode *, struct inode *, const struct qstr *); diff --git a/include/linux/platform_data/x86/asus-wmi-leds-ids.h b/include/linux/platform_data/x86/asus-wmi-leds-ids.h new file mode 100644 index 000000000000..034a039c4e37 --- /dev/null +++ b/include/linux/platform_data/x86/asus-wmi-leds-ids.h @@ -0,0 +1,50 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __PLATFORM_DATA_X86_ASUS_WMI_LEDS_IDS_H +#define __PLATFORM_DATA_X86_ASUS_WMI_LEDS_IDS_H + +#include <linux/dmi.h> +#include <linux/types.h> + +/* To be used by both hid-asus and asus-wmi to determine which controls kbd_brightness */ +#if IS_REACHABLE(CONFIG_ASUS_WMI) || IS_REACHABLE(CONFIG_HID_ASUS) +static const struct dmi_system_id asus_use_hid_led_dmi_ids[] = { + { + .matches = { + DMI_MATCH(DMI_PRODUCT_FAMILY, "ROG Zephyrus"), + }, + }, + { + .matches = { + DMI_MATCH(DMI_PRODUCT_FAMILY, "ROG Strix"), + }, + }, + { + .matches = { + DMI_MATCH(DMI_PRODUCT_FAMILY, "ROG Flow"), + }, + }, + { + .matches = { + DMI_MATCH(DMI_PRODUCT_FAMILY, "ProArt P16"), + }, + }, + { + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "GA403U"), + }, + }, + { + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "GU605M"), + }, + }, + { + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "RC71L"), + }, + }, + { }, +}; +#endif + +#endif /* __PLATFORM_DATA_X86_ASUS_WMI_LEDS_IDS_H */ diff --git a/include/linux/platform_data/x86/asus-wmi.h b/include/linux/platform_data/x86/asus-wmi.h index 8a515179113d..419491d4abca 100644 --- a/include/linux/platform_data/x86/asus-wmi.h +++ b/include/linux/platform_data/x86/asus-wmi.h @@ -4,7 +4,9 @@ #include <linux/errno.h> #include <linux/types.h> -#include <linux/dmi.h> + +#define ASUS_WMI_MGMT_GUID "97845ED0-4E6D-11DE-8A39-0800200C9A66" +#define ASUS_ACPI_UID_ASUSWMI "ASUSWMI" /* WMI Methods */ #define ASUS_WMI_METHODID_SPEC 0x43455053 /* BIOS SPECification */ @@ -73,12 +75,14 @@ #define ASUS_WMI_DEVID_THROTTLE_THERMAL_POLICY_VIVO 0x00110019 /* Misc */ +#define ASUS_WMI_DEVID_PANEL_HD 0x0005001C #define ASUS_WMI_DEVID_PANEL_OD 0x00050019 #define ASUS_WMI_DEVID_CAMERA 0x00060013 #define ASUS_WMI_DEVID_LID_FLIP 0x00060062 #define ASUS_WMI_DEVID_LID_FLIP_ROG 0x00060077 #define ASUS_WMI_DEVID_MINI_LED_MODE 0x0005001E #define ASUS_WMI_DEVID_MINI_LED_MODE2 0x0005002E +#define ASUS_WMI_DEVID_SCREEN_AUTO_BRIGHTNESS 0x0005002A /* Storage */ #define ASUS_WMI_DEVID_CARDREADER 0x00080013 @@ -103,7 +107,7 @@ #define ASUS_WMI_DEVID_PPT_PL1_SPL 0x001200A3 #define ASUS_WMI_DEVID_PPT_APU_SPPT 0x001200B0 #define ASUS_WMI_DEVID_PPT_PLAT_SPPT 0x001200B1 -#define ASUS_WMI_DEVID_PPT_FPPT 0x001200C1 +#define ASUS_WMI_DEVID_PPT_PL3_FPPT 0x001200C1 #define ASUS_WMI_DEVID_NV_DYN_BOOST 0x001200C0 #define ASUS_WMI_DEVID_NV_THERM_TARGET 0x001200C2 @@ -133,6 +137,11 @@ /* dgpu on/off */ #define ASUS_WMI_DEVID_DGPU 0x00090020 +#define ASUS_WMI_DEVID_APU_MEM 0x000600C1 + +#define ASUS_WMI_DEVID_DGPU_BASE_TGP 0x00120099 +#define ASUS_WMI_DEVID_DGPU_SET_TGP 0x00120098 + /* gpu mux switch, 0 = dGPU, 1 = Optimus */ #define ASUS_WMI_DEVID_GPU_MUX 0x00090016 #define ASUS_WMI_DEVID_GPU_MUX_VIVO 0x00090026 @@ -166,6 +175,7 @@ enum asus_ally_mcu_hack { #if IS_REACHABLE(CONFIG_ASUS_WMI) void set_ally_mcu_hack(enum asus_ally_mcu_hack status); void set_ally_mcu_powersave(bool enabled); +int asus_wmi_get_devstate_dsts(u32 dev_id, u32 *retval); int asus_wmi_set_devstate(u32 dev_id, u32 ctrl_param, u32 *retval); int asus_wmi_evaluate_method(u32 method_id, u32 arg0, u32 arg1, u32 *retval); #else @@ -179,6 +189,10 @@ static inline int asus_wmi_set_devstate(u32 dev_id, u32 ctrl_param, u32 *retval) { return -ENODEV; } +static inline int asus_wmi_get_devstate_dsts(u32 dev_id, u32 *retval) +{ + return -ENODEV; +} static inline int asus_wmi_evaluate_method(u32 method_id, u32 arg0, u32 arg1, u32 *retval) { @@ -186,44 +200,4 @@ static inline int asus_wmi_evaluate_method(u32 method_id, u32 arg0, u32 arg1, } #endif -/* To be used by both hid-asus and asus-wmi to determine which controls kbd_brightness */ -static const struct dmi_system_id asus_use_hid_led_dmi_ids[] = { - { - .matches = { - DMI_MATCH(DMI_PRODUCT_FAMILY, "ROG Zephyrus"), - }, - }, - { - .matches = { - DMI_MATCH(DMI_PRODUCT_FAMILY, "ROG Strix"), - }, - }, - { - .matches = { - DMI_MATCH(DMI_PRODUCT_FAMILY, "ROG Flow"), - }, - }, - { - .matches = { - DMI_MATCH(DMI_PRODUCT_FAMILY, "ProArt P16"), - }, - }, - { - .matches = { - DMI_MATCH(DMI_BOARD_NAME, "GA403U"), - }, - }, - { - .matches = { - DMI_MATCH(DMI_BOARD_NAME, "GU605M"), - }, - }, - { - .matches = { - DMI_MATCH(DMI_BOARD_NAME, "RC71L"), - }, - }, - { }, -}; - #endif /* __PLATFORM_DATA_X86_ASUS_WMI_H */ diff --git a/include/linux/platform_data/x86/intel_pmc_ipc.h b/include/linux/platform_data/x86/intel_pmc_ipc.h index 1d34435b7001..85ea381e4a27 100644 --- a/include/linux/platform_data/x86/intel_pmc_ipc.h +++ b/include/linux/platform_data/x86/intel_pmc_ipc.h @@ -9,6 +9,7 @@ #ifndef INTEL_PMC_IPC_H #define INTEL_PMC_IPC_H #include <linux/acpi.h> +#include <linux/cleanup.h> #define IPC_SOC_REGISTER_ACCESS 0xAA #define IPC_SOC_SUB_CMD_READ 0x00 @@ -48,7 +49,6 @@ static inline int intel_pmc_ipc(struct pmc_ipc_cmd *ipc_cmd, struct pmc_ipc_rbuf {.type = ACPI_TYPE_INTEGER,}, }; struct acpi_object_list arg_list = { PMC_IPCS_PARAM_COUNT, params }; - union acpi_object *obj; int status; if (!ipc_cmd || !rbuf) @@ -72,7 +72,7 @@ static inline int intel_pmc_ipc(struct pmc_ipc_cmd *ipc_cmd, struct pmc_ipc_rbuf if (ACPI_FAILURE(status)) return -ENODEV; - obj = buffer.pointer; + union acpi_object *obj __free(kfree) = buffer.pointer; if (obj && obj->type == ACPI_TYPE_PACKAGE && obj->package.count == VALID_IPC_RESPONSE) { diff --git a/include/linux/platform_profile.h b/include/linux/platform_profile.h index a299225ab92e..855b28340e95 100644 --- a/include/linux/platform_profile.h +++ b/include/linux/platform_profile.h @@ -24,6 +24,7 @@ enum platform_profile_option { PLATFORM_PROFILE_BALANCED, PLATFORM_PROFILE_BALANCED_PERFORMANCE, PLATFORM_PROFILE_PERFORMANCE, + PLATFORM_PROFILE_MAX_POWER, PLATFORM_PROFILE_CUSTOM, PLATFORM_PROFILE_LAST, /*must always be last */ }; diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h index 911d7a4d32c1..41037c513f06 100644 --- a/include/linux/pm_runtime.h +++ b/include/linux/pm_runtime.h @@ -76,7 +76,7 @@ extern int pm_runtime_get_if_active(struct device *dev); extern int pm_runtime_get_if_in_use(struct device *dev); extern int pm_schedule_suspend(struct device *dev, unsigned int delay); extern int __pm_runtime_set_status(struct device *dev, unsigned int status); -extern int pm_runtime_barrier(struct device *dev); +extern void pm_runtime_barrier(struct device *dev); extern bool pm_runtime_block_if_disabled(struct device *dev); extern void pm_runtime_unblock(struct device *dev); extern void pm_runtime_enable(struct device *dev); @@ -284,7 +284,7 @@ static inline int pm_runtime_get_if_active(struct device *dev) } static inline int __pm_runtime_set_status(struct device *dev, unsigned int status) { return 0; } -static inline int pm_runtime_barrier(struct device *dev) { return 0; } +static inline void pm_runtime_barrier(struct device *dev) {} static inline bool pm_runtime_block_if_disabled(struct device *dev) { return true; } static inline void pm_runtime_unblock(struct device *dev) {} static inline void pm_runtime_enable(struct device *dev) {} diff --git a/include/linux/restart_block.h b/include/linux/restart_block.h index 36ddfa1ec301..67d2bf579942 100644 --- a/include/linux/restart_block.h +++ b/include/linux/restart_block.h @@ -32,7 +32,7 @@ struct restart_block { u32 val; u32 flags; u32 bitset; - u64 time; + ktime_t time; u32 __user *uaddr2; } futex; /* For nanosleep */ diff --git a/include/linux/slab.h b/include/linux/slab.h index cf443f064a66..2482992248dc 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -1150,10 +1150,17 @@ static inline void kvfree_rcu_barrier(void) rcu_barrier(); } +static inline void kvfree_rcu_barrier_on_cache(struct kmem_cache *s) +{ + rcu_barrier(); +} + static inline void kfree_rcu_scheduler_running(void) { } #else void kvfree_rcu_barrier(void); +void kvfree_rcu_barrier_on_cache(struct kmem_cache *s); + void kfree_rcu_scheduler_running(void); #endif diff --git a/include/linux/sunrpc/bc_xprt.h b/include/linux/sunrpc/bc_xprt.h index f22bf915dcf6..98939cb664cf 100644 --- a/include/linux/sunrpc/bc_xprt.h +++ b/include/linux/sunrpc/bc_xprt.h @@ -25,12 +25,14 @@ void xprt_init_bc_request(struct rpc_rqst *req, struct rpc_task *task, void xprt_free_bc_request(struct rpc_rqst *req); int xprt_setup_backchannel(struct rpc_xprt *, unsigned int min_reqs); void xprt_destroy_backchannel(struct rpc_xprt *, unsigned int max_reqs); +void xprt_enqueue_bc_request(struct rpc_rqst *req); /* Socket backchannel transport methods */ int xprt_setup_bc(struct rpc_xprt *xprt, unsigned int min_reqs); void xprt_destroy_bc(struct rpc_xprt *xprt, unsigned int max_reqs); void xprt_free_bc_rqst(struct rpc_rqst *req); unsigned int xprt_bc_max_slots(struct rpc_xprt *xprt); +void xprt_svc_destroy_nullify_bc(struct rpc_xprt *xprt, struct svc_serv **serv); /* * Determine if a shared backchannel is in use @@ -68,5 +70,10 @@ static inline void set_bc_enabled(struct svc_serv *serv) static inline void xprt_free_bc_request(struct rpc_rqst *req) { } + +static inline void xprt_svc_destroy_nullify_bc(struct rpc_xprt *xprt, struct svc_serv **serv) +{ + svc_destroy(serv); +} #endif /* CONFIG_SUNRPC_BACKCHANNEL */ #endif /* _LINUX_SUNRPC_BC_XPRT_H */ diff --git a/include/linux/wmi.h b/include/linux/wmi.h index 10751c8e5e6a..665ea7dc8a92 100644 --- a/include/linux/wmi.h +++ b/include/linux/wmi.h @@ -36,13 +36,10 @@ struct wmi_device { */ #define to_wmi_device(device) container_of_const(device, struct wmi_device, dev) -extern acpi_status wmidev_evaluate_method(struct wmi_device *wdev, - u8 instance, u32 method_id, - const struct acpi_buffer *in, - struct acpi_buffer *out); +acpi_status wmidev_evaluate_method(struct wmi_device *wdev, u8 instance, u32 method_id, + const struct acpi_buffer *in, struct acpi_buffer *out); -extern union acpi_object *wmidev_block_query(struct wmi_device *wdev, - u8 instance); +union acpi_object *wmidev_block_query(struct wmi_device *wdev, u8 instance); acpi_status wmidev_block_set(struct wmi_device *wdev, u8 instance, const struct acpi_buffer *in); @@ -81,9 +78,9 @@ struct wmi_driver { */ #define to_wmi_driver(drv) container_of_const(drv, struct wmi_driver, driver) -extern int __must_check __wmi_driver_register(struct wmi_driver *driver, - struct module *owner); -extern void wmi_driver_unregister(struct wmi_driver *driver); +int __must_check __wmi_driver_register(struct wmi_driver *driver, struct module *owner); + +void wmi_driver_unregister(struct wmi_driver *driver); /** * wmi_driver_register() - Helper macro to register a WMI driver diff --git a/include/uapi/linux/i2c.h b/include/uapi/linux/i2c.h index a2db2a56c8b0..2a226657d9f8 100644 --- a/include/uapi/linux/i2c.h +++ b/include/uapi/linux/i2c.h @@ -36,7 +36,7 @@ * * Only if I2C_FUNC_NOSTART is set: * %I2C_M_NOSTART: skip repeated start sequence - + * * Only if I2C_FUNC_PROTOCOL_MANGLING is set: * %I2C_M_NO_RD_ACK: in a read message, master ACK/NACK bit is skipped * %I2C_M_IGNORE_NAK: treat NACK from client as ACK diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index 5d130c578435..6cb24cdf8e68 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -2536,6 +2536,9 @@ static enum hrtimer_restart io_cqring_min_timer_wakeup(struct hrtimer *timer) goto out_wake; } + /* any generated CQE posted past this time should wake us up */ + iowq->cq_tail = iowq->cq_min_tail; + hrtimer_update_function(&iowq->t, io_cqring_timer_wakeup); hrtimer_set_expires(timer, iowq->timeout); return HRTIMER_RESTART; diff --git a/kernel/dma/pool.c b/kernel/dma/pool.c index ee45dee33d49..26392badc36b 100644 --- a/kernel/dma/pool.c +++ b/kernel/dma/pool.c @@ -93,7 +93,7 @@ static int atomic_pool_expand(struct gen_pool *pool, size_t pool_size, page = dma_alloc_from_contiguous(NULL, 1 << order, order, false); if (!page) - page = alloc_pages(gfp, order); + page = alloc_pages(gfp | __GFP_NOWARN, order); } while (!page && order-- > 0); if (!page) goto out; diff --git a/kernel/futex/waitwake.c b/kernel/futex/waitwake.c index e2bbe5509ec2..1c2dd03f11ec 100644 --- a/kernel/futex/waitwake.c +++ b/kernel/futex/waitwake.c @@ -738,12 +738,11 @@ int futex_wait(u32 __user *uaddr, unsigned int flags, u32 val, ktime_t *abs_time static long futex_wait_restart(struct restart_block *restart) { u32 __user *uaddr = restart->futex.uaddr; - ktime_t t, *tp = NULL; + ktime_t *tp = NULL; + + if (restart->futex.flags & FLAGS_HAS_TIMEOUT) + tp = &restart->futex.time; - if (restart->futex.flags & FLAGS_HAS_TIMEOUT) { - t = restart->futex.time; - tp = &t; - } restart->fn = do_no_restart_syscall; return (long)futex_wait(uaddr, restart->futex.flags, diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c index 6acf268f005b..f8e4e13dbe33 100644 --- a/kernel/irq/irqdesc.c +++ b/kernel/irq/irqdesc.c @@ -720,7 +720,7 @@ EXPORT_SYMBOL_GPL(generic_handle_irq_safe); * This function must be called from an IRQ context with irq regs * initialized. */ -int generic_handle_domain_irq(struct irq_domain *domain, unsigned int hwirq) +int generic_handle_domain_irq(struct irq_domain *domain, irq_hw_number_t hwirq) { return handle_irq_desc(irq_resolve_mapping(domain, hwirq)); } @@ -738,7 +738,7 @@ EXPORT_SYMBOL_GPL(generic_handle_domain_irq); * context). If the interrupt is marked as 'enforce IRQ-context only' then * the function must be invoked from hard interrupt context. */ -int generic_handle_domain_irq_safe(struct irq_domain *domain, unsigned int hwirq) +int generic_handle_domain_irq_safe(struct irq_domain *domain, irq_hw_number_t hwirq) { unsigned long flags; int ret; @@ -761,7 +761,7 @@ EXPORT_SYMBOL_GPL(generic_handle_domain_irq_safe); * This function must be called from an NMI context with irq regs * initialized. **/ -int generic_handle_domain_nmi(struct irq_domain *domain, unsigned int hwirq) +int generic_handle_domain_nmi(struct irq_domain *domain, irq_hw_number_t hwirq) { WARN_ON_ONCE(!in_nmi()); return handle_irq_desc(irq_resolve_mapping(domain, hwirq)); diff --git a/mm/slab.h b/mm/slab.h index f730e012553c..e767aa7e91b0 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -422,6 +422,7 @@ static inline bool is_kmalloc_normal(struct kmem_cache *s) bool __kfree_rcu_sheaf(struct kmem_cache *s, void *obj); void flush_all_rcu_sheaves(void); +void flush_rcu_sheaves_on_cache(struct kmem_cache *s); #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | \ SLAB_CACHE_DMA32 | SLAB_PANIC | \ diff --git a/mm/slab_common.c b/mm/slab_common.c index b613533b29e7..eed7ea556cb1 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -492,7 +492,7 @@ void kmem_cache_destroy(struct kmem_cache *s) return; /* in-flight kfree_rcu()'s may include objects from our cache */ - kvfree_rcu_barrier(); + kvfree_rcu_barrier_on_cache(s); if (IS_ENABLED(CONFIG_SLUB_RCU_DEBUG) && (s->flags & SLAB_TYPESAFE_BY_RCU)) { @@ -2038,25 +2038,13 @@ unlock_return: } EXPORT_SYMBOL_GPL(kvfree_call_rcu); -/** - * kvfree_rcu_barrier - Wait until all in-flight kvfree_rcu() complete. - * - * Note that a single argument of kvfree_rcu() call has a slow path that - * triggers synchronize_rcu() following by freeing a pointer. It is done - * before the return from the function. Therefore for any single-argument - * call that will result in a kfree() to a cache that is to be destroyed - * during module exit, it is developer's responsibility to ensure that all - * such calls have returned before the call to kmem_cache_destroy(). - */ -void kvfree_rcu_barrier(void) +static inline void __kvfree_rcu_barrier(void) { struct kfree_rcu_cpu_work *krwp; struct kfree_rcu_cpu *krcp; bool queued; int i, cpu; - flush_all_rcu_sheaves(); - /* * Firstly we detach objects and queue them over an RCU-batch * for all CPUs. Finally queued works are flushed for each CPU. @@ -2118,8 +2106,43 @@ void kvfree_rcu_barrier(void) } } } + +/** + * kvfree_rcu_barrier - Wait until all in-flight kvfree_rcu() complete. + * + * Note that a single argument of kvfree_rcu() call has a slow path that + * triggers synchronize_rcu() following by freeing a pointer. It is done + * before the return from the function. Therefore for any single-argument + * call that will result in a kfree() to a cache that is to be destroyed + * during module exit, it is developer's responsibility to ensure that all + * such calls have returned before the call to kmem_cache_destroy(). + */ +void kvfree_rcu_barrier(void) +{ + flush_all_rcu_sheaves(); + __kvfree_rcu_barrier(); +} EXPORT_SYMBOL_GPL(kvfree_rcu_barrier); +/** + * kvfree_rcu_barrier_on_cache - Wait for in-flight kvfree_rcu() calls on a + * specific slab cache. + * @s: slab cache to wait for + * + * See the description of kvfree_rcu_barrier() for details. + */ +void kvfree_rcu_barrier_on_cache(struct kmem_cache *s) +{ + if (s->cpu_sheaves) + flush_rcu_sheaves_on_cache(s); + /* + * TODO: Introduce a version of __kvfree_rcu_barrier() that works + * on a specific slab cache. + */ + __kvfree_rcu_barrier(); +} +EXPORT_SYMBOL_GPL(kvfree_rcu_barrier_on_cache); + static unsigned long kfree_rcu_shrink_count(struct shrinker *shrink, struct shrink_control *sc) { @@ -2215,4 +2238,3 @@ void __init kvfree_rcu_init(void) } #endif /* CONFIG_KVFREE_RCU_BATCHED */ - diff --git a/mm/slub.c b/mm/slub.c index e6a330e24145..f21b2f0c6f5a 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -4122,42 +4122,47 @@ static void flush_rcu_sheaf(struct work_struct *w) /* needed for kvfree_rcu_barrier() */ -void flush_all_rcu_sheaves(void) +void flush_rcu_sheaves_on_cache(struct kmem_cache *s) { struct slub_flush_work *sfw; - struct kmem_cache *s; unsigned int cpu; - cpus_read_lock(); - mutex_lock(&slab_mutex); + mutex_lock(&flush_lock); - list_for_each_entry(s, &slab_caches, list) { - if (!s->cpu_sheaves) - continue; + for_each_online_cpu(cpu) { + sfw = &per_cpu(slub_flush, cpu); - mutex_lock(&flush_lock); + /* + * we don't check if rcu_free sheaf exists - racing + * __kfree_rcu_sheaf() might have just removed it. + * by executing flush_rcu_sheaf() on the cpu we make + * sure the __kfree_rcu_sheaf() finished its call_rcu() + */ - for_each_online_cpu(cpu) { - sfw = &per_cpu(slub_flush, cpu); + INIT_WORK(&sfw->work, flush_rcu_sheaf); + sfw->s = s; + queue_work_on(cpu, flushwq, &sfw->work); + } - /* - * we don't check if rcu_free sheaf exists - racing - * __kfree_rcu_sheaf() might have just removed it. - * by executing flush_rcu_sheaf() on the cpu we make - * sure the __kfree_rcu_sheaf() finished its call_rcu() - */ + for_each_online_cpu(cpu) { + sfw = &per_cpu(slub_flush, cpu); + flush_work(&sfw->work); + } - INIT_WORK(&sfw->work, flush_rcu_sheaf); - sfw->s = s; - queue_work_on(cpu, flushwq, &sfw->work); - } + mutex_unlock(&flush_lock); +} - for_each_online_cpu(cpu) { - sfw = &per_cpu(slub_flush, cpu); - flush_work(&sfw->work); - } +void flush_all_rcu_sheaves(void) +{ + struct kmem_cache *s; + + cpus_read_lock(); + mutex_lock(&slab_mutex); - mutex_unlock(&flush_lock); + list_for_each_entry(s, &slab_caches, list) { + if (!s->cpu_sheaves) + continue; + flush_rcu_sheaves_on_cache(s); } mutex_unlock(&slab_mutex); diff --git a/net/sunrpc/backchannel_rqst.c b/net/sunrpc/backchannel_rqst.c index caa94cf57123..68b1fcdea8f0 100644 --- a/net/sunrpc/backchannel_rqst.c +++ b/net/sunrpc/backchannel_rqst.c @@ -25,6 +25,22 @@ unsigned int xprt_bc_max_slots(struct rpc_xprt *xprt) } /* + * Helper function to nullify backchannel server pointer in transport. + * We need to synchronize setting the pointer to NULL (done so after + * the backchannel server is shutdown) with the usage of that pointer + * by the backchannel request processing routines + * xprt_complete_bc_request() and rpcrdma_bc_receive_call(). + */ +void xprt_svc_destroy_nullify_bc(struct rpc_xprt *xprt, struct svc_serv **serv) +{ + spin_lock(&xprt->bc_pa_lock); + svc_destroy(serv); + xprt->bc_serv = NULL; + spin_unlock(&xprt->bc_pa_lock); +} +EXPORT_SYMBOL_GPL(xprt_svc_destroy_nullify_bc); + +/* * Helper routines that track the number of preallocation elements * on the transport. */ @@ -354,7 +370,6 @@ found: void xprt_complete_bc_request(struct rpc_rqst *req, uint32_t copied) { struct rpc_xprt *xprt = req->rq_xprt; - struct svc_serv *bc_serv = xprt->bc_serv; spin_lock(&xprt->bc_pa_lock); list_del(&req->rq_bc_pa_list); @@ -365,7 +380,21 @@ void xprt_complete_bc_request(struct rpc_rqst *req, uint32_t copied) set_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state); dprintk("RPC: add callback request to list\n"); + xprt_enqueue_bc_request(req); +} + +void xprt_enqueue_bc_request(struct rpc_rqst *req) +{ + struct rpc_xprt *xprt = req->rq_xprt; + struct svc_serv *bc_serv; + xprt_get(xprt); - lwq_enqueue(&req->rq_bc_list, &bc_serv->sv_cb_list); - svc_pool_wake_idle_thread(&bc_serv->sv_pools[0]); + spin_lock(&xprt->bc_pa_lock); + bc_serv = xprt->bc_serv; + if (bc_serv) { + lwq_enqueue(&req->rq_bc_list, &bc_serv->sv_cb_list); + svc_pool_wake_idle_thread(&bc_serv->sv_pools[0]); + } + spin_unlock(&xprt->bc_pa_lock); } +EXPORT_SYMBOL_GPL(xprt_enqueue_bc_request); diff --git a/net/sunrpc/xprtrdma/backchannel.c b/net/sunrpc/xprtrdma/backchannel.c index 8c817e755262..2f0f9618dd05 100644 --- a/net/sunrpc/xprtrdma/backchannel.c +++ b/net/sunrpc/xprtrdma/backchannel.c @@ -9,6 +9,7 @@ #include <linux/sunrpc/svc.h> #include <linux/sunrpc/svc_xprt.h> #include <linux/sunrpc/svc_rdma.h> +#include <linux/sunrpc/bc_xprt.h> #include "xprt_rdma.h" #include <trace/events/rpcrdma.h> @@ -220,7 +221,6 @@ void rpcrdma_bc_receive_call(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep) { struct rpc_xprt *xprt = &r_xprt->rx_xprt; - struct svc_serv *bc_serv; struct rpcrdma_req *req; struct rpc_rqst *rqst; struct xdr_buf *buf; @@ -261,11 +261,7 @@ void rpcrdma_bc_receive_call(struct rpcrdma_xprt *r_xprt, trace_xprtrdma_cb_call(r_xprt, rqst); /* Queue rqst for ULP's callback service */ - bc_serv = xprt->bc_serv; - xprt_get(xprt); - lwq_enqueue(&rqst->rq_bc_list, &bc_serv->sv_cb_list); - - svc_pool_wake_idle_thread(&bc_serv->sv_pools[0]); + xprt_enqueue_bc_request(rqst); r_xprt->rx_stats.bcall_count++; return; diff --git a/scripts/coccinelle/api/pm_runtime.cocci b/scripts/coccinelle/api/pm_runtime.cocci index 2c931e748dda..bf128ccae921 100644 --- a/scripts/coccinelle/api/pm_runtime.cocci +++ b/scripts/coccinelle/api/pm_runtime.cocci @@ -37,7 +37,6 @@ ret@p = \(pm_runtime_idle\| pm_runtime_put_sync_autosuspend\| pm_runtime_set_active\| pm_schedule_suspend\| - pm_runtime_barrier\| pm_generic_runtime_suspend\| pm_generic_runtime_resume\)(...); ... diff --git a/scripts/package/install-extmod-build b/scripts/package/install-extmod-build index 054fdf45cc37..2576cf7902db 100755 --- a/scripts/package/install-extmod-build +++ b/scripts/package/install-extmod-build @@ -63,7 +63,7 @@ if [ "${CC}" != "${HOSTCC}" ]; then # Clear VPATH and srcroot because the source files reside in the output # directory. # shellcheck disable=SC2016 # $(MAKE) and $(build) will be expanded by Make - "${MAKE}" run-command KBUILD_RUN_COMMAND='+$(MAKE) HOSTCC='"${CC}"' VPATH= srcroot=. $(build)='"$(realpath --relative-to=. "${destdir}")"/scripts + "${MAKE}" run-command KBUILD_RUN_COMMAND='+$(MAKE) HOSTCC="'"${CC}"'" VPATH= srcroot=. $(build)='"$(realpath --relative-to=. "${destdir}")"/scripts rm -f "${destdir}/scripts/Kbuild" fi diff --git a/scripts/tracepoint-update.c b/scripts/tracepoint-update.c index 7f7d90df14ce..90046aedc97b 100644 --- a/scripts/tracepoint-update.c +++ b/scripts/tracepoint-update.c @@ -210,6 +210,9 @@ static int process_tracepoints(bool mod, void *addr, const char *fname) } if (!tracepoint_data_sec) { + /* A module may reference only exported tracepoints */ + if (mod) + return 0; fprintf(stderr, "no __tracepoint_strings in file: %s\n", fname); return -1; } diff --git a/tools/power/x86/intel-speed-select/isst-config.c b/tools/power/x86/intel-speed-select/isst-config.c index 0ce251b8d466..558138eea75e 100644 --- a/tools/power/x86/intel-speed-select/isst-config.c +++ b/tools/power/x86/intel-speed-select/isst-config.c @@ -16,7 +16,7 @@ struct process_cmd_struct { int arg; }; -static const char *version_str = "v1.23"; +static const char *version_str = "v1.24"; static const int supported_api_ver = 3; static struct isst_if_platform_info isst_platform_info; diff --git a/tools/power/x86/intel-speed-select/isst-core-tpmi.c b/tools/power/x86/intel-speed-select/isst-core-tpmi.c index 4f389e1c0525..ebaad0dc8ca6 100644 --- a/tools/power/x86/intel-speed-select/isst-core-tpmi.c +++ b/tools/power/x86/intel-speed-select/isst-core-tpmi.c @@ -452,13 +452,16 @@ static int tpmi_get_pbf_info(struct isst_id *id, int level, return _pbf_get_coremask_info(id, level, pbf_info); } +#define FEATURE_ENABLE_WAIT_US 1000 +#define FEATURE_ENABLE_RETRIES 5 + static int tpmi_set_pbf_fact_status(struct isst_id *id, int pbf, int enable) { struct isst_pkg_ctdp pkg_dev; struct isst_pkg_ctdp_level_info ctdp_level; int current_level; struct isst_perf_feature_control info; - int ret; + int ret, i; ret = isst_get_ctdp_levels(id, &pkg_dev); if (ret) @@ -503,6 +506,30 @@ static int tpmi_set_pbf_fact_status(struct isst_id *id, int pbf, int enable) if (ret == -1) return ret; + for (i = 0; i < FEATURE_ENABLE_RETRIES; ++i) { + + usleep(FEATURE_ENABLE_WAIT_US); + + /* Check status */ + ret = isst_get_ctdp_control(id, current_level, &ctdp_level); + if (ret) + return ret; + + debug_printf("pbf_enabled:%d fact_enabled:%d\n", + ctdp_level.pbf_enabled, ctdp_level.fact_enabled); + + if (pbf) { + if (ctdp_level.pbf_enabled == enable) + break; + } else { + if (ctdp_level.fact_enabled == enable) + break; + } + } + + if (i == FEATURE_ENABLE_RETRIES) + return -1; + return 0; } @@ -513,6 +540,7 @@ static int tpmi_get_fact_info(struct isst_id *id, int level, int fact_bucket, int i, j; int ret; + memset(&info, 0, sizeof(info)); info.socket_id = id->pkg; info.power_domain_id = id->punit; info.level = level; @@ -659,7 +687,8 @@ static int tpmi_pm_qos_config(struct isst_id *id, int enable_clos, int priority_type) { struct isst_core_power info; - int i, ret, saved_punit; + int cp_state = 0, cp_cap = 0; + int i, j, ret, saved_punit; info.get_set = 1; info.socket_id = id->pkg; @@ -679,6 +708,19 @@ static int tpmi_pm_qos_config(struct isst_id *id, int enable_clos, id->punit = saved_punit; return ret; } + /* Get status */ + for (j = 0; j < FEATURE_ENABLE_RETRIES; ++j) { + usleep(FEATURE_ENABLE_WAIT_US); + ret = tpmi_read_pm_config(id, &cp_state, &cp_cap); + debug_printf("ret:%d cp_state:%d enable_clos:%d\n", ret, + cp_state, enable_clos); + if (ret || cp_state == enable_clos) + break; + } + if (j == FEATURE_ENABLE_RETRIES) { + id->punit = saved_punit; + return -1; + } } } diff --git a/tools/testing/selftests/futex/functional/.gitignore b/tools/testing/selftests/futex/functional/.gitignore index 776ad658f75e..23b9fea8d190 100644 --- a/tools/testing/selftests/futex/functional/.gitignore +++ b/tools/testing/selftests/futex/functional/.gitignore @@ -12,3 +12,4 @@ futex_wait_uninitialized_heap futex_wait_wouldblock futex_waitv futex_numa +robust_list diff --git a/tools/testing/selftests/futex/functional/Makefile b/tools/testing/selftests/futex/functional/Makefile index 490ace1f017e..af7ec309ea78 100644 --- a/tools/testing/selftests/futex/functional/Makefile +++ b/tools/testing/selftests/futex/functional/Makefile @@ -22,7 +22,8 @@ TEST_GEN_PROGS := \ futex_priv_hash \ futex_numa_mpol \ futex_waitv \ - futex_numa + futex_numa \ + robust_list TEST_PROGS := run.sh diff --git a/tools/testing/selftests/futex/functional/futex_numa_mpol.c b/tools/testing/selftests/futex/functional/futex_numa_mpol.c index ab8555752137..220ef219c823 100644 --- a/tools/testing/selftests/futex/functional/futex_numa_mpol.c +++ b/tools/testing/selftests/futex/functional/futex_numa_mpol.c @@ -131,11 +131,6 @@ static void test_futex(void *futex_ptr, int err_value) __test_futex(futex_ptr, err_value, FUTEX2_SIZE_U32 | FUTEX_PRIVATE_FLAG | FUTEX2_NUMA); } -static void test_futex_mpol(void *futex_ptr, int err_value) -{ - __test_futex(futex_ptr, err_value, FUTEX2_SIZE_U32 | FUTEX_PRIVATE_FLAG | FUTEX2_NUMA | FUTEX2_MPOL); -} - TEST(futex_numa_mpol) { struct futex32_numa *futex_numa; diff --git a/tools/testing/selftests/futex/functional/futex_wait.c b/tools/testing/selftests/futex/functional/futex_wait.c index 0e69c53524c1..7b8879409007 100644 --- a/tools/testing/selftests/futex/functional/futex_wait.c +++ b/tools/testing/selftests/futex/functional/futex_wait.c @@ -71,6 +71,8 @@ TEST(anon_page) /* Testing an anon page shared memory */ shm_id = shmget(IPC_PRIVATE, 4096, IPC_CREAT | 0666); if (shm_id < 0) { + if (errno == ENOSYS) + ksft_exit_skip("shmget syscall not supported\n"); perror("shmget"); exit(1); } @@ -108,14 +110,14 @@ TEST(file_backed) /* Testing a file backed shared memory */ fd = open(SHM_PATH, O_RDWR | O_CREAT, S_IRUSR | S_IWUSR); if (fd < 0) - ksft_exit_fail_msg("open"); + ksft_exit_fail_msg("open\n"); if (ftruncate(fd, sizeof(f_private))) - ksft_exit_fail_msg("ftruncate"); + ksft_exit_fail_msg("ftruncate\n"); shm = mmap(NULL, sizeof(f_private), PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); if (shm == MAP_FAILED) - ksft_exit_fail_msg("mmap"); + ksft_exit_fail_msg("mmap\n"); memcpy(shm, &f_private, sizeof(f_private)); diff --git a/tools/testing/selftests/futex/functional/futex_waitv.c b/tools/testing/selftests/futex/functional/futex_waitv.c index d60876164d4b..b5ada9fdb26f 100644 --- a/tools/testing/selftests/futex/functional/futex_waitv.c +++ b/tools/testing/selftests/futex/functional/futex_waitv.c @@ -86,6 +86,8 @@ TEST(shared_waitv) int shm_id = shmget(IPC_PRIVATE, 4096, IPC_CREAT | 0666); if (shm_id < 0) { + if (errno == ENOSYS) + ksft_exit_skip("shmget syscall not supported\n"); perror("shmget"); exit(1); } diff --git a/tools/testing/selftests/futex/functional/robust_list.c b/tools/testing/selftests/futex/functional/robust_list.c new file mode 100644 index 000000000000..e7d1254e18ca --- /dev/null +++ b/tools/testing/selftests/futex/functional/robust_list.c @@ -0,0 +1,552 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright (C) 2025 Igalia S.L. + * + * Robust list test by AndrĂ© Almeida <andrealmeid@igalia.com> + * + * The robust list uAPI allows userspace to create "robust" locks, in the sense + * that if the lock holder thread dies, the remaining threads that are waiting + * for the lock won't block forever, waiting for a lock that will never be + * released. + * + * This is achieve by userspace setting a list where a thread can enter all the + * locks (futexes) that it is holding. The robust list is a linked list, and + * userspace register the start of the list with the syscall set_robust_list(). + * If such thread eventually dies, the kernel will walk this list, waking up one + * thread waiting for each futex and marking the futex word with the flag + * FUTEX_OWNER_DIED. + * + * See also + * man set_robust_list + * Documententation/locking/robust-futex-ABI.rst + * Documententation/locking/robust-futexes.rst + */ + +#define _GNU_SOURCE + +#include "futextest.h" +#include "../../kselftest_harness.h" + +#include <errno.h> +#include <pthread.h> +#include <signal.h> +#include <stdatomic.h> +#include <stdbool.h> +#include <stddef.h> +#include <sys/mman.h> +#include <sys/wait.h> + +#define STACK_SIZE (1024 * 1024) + +#define FUTEX_TIMEOUT 3 + +#define SLEEP_US 100 + +static pthread_barrier_t barrier, barrier2; + +static int set_robust_list(struct robust_list_head *head, size_t len) +{ + return syscall(SYS_set_robust_list, head, len); +} + +static int get_robust_list(int pid, struct robust_list_head **head, size_t *len_ptr) +{ + return syscall(SYS_get_robust_list, pid, head, len_ptr); +} + +/* + * Basic lock struct, contains just the futex word and the robust list element + * Real implementations have also a *prev to easily walk in the list + */ +struct lock_struct { + _Atomic(unsigned int) futex; + struct robust_list list; +}; + +/* + * Helper function to spawn a child thread. Returns -1 on error, pid on success + */ +static int create_child(int (*fn)(void *arg), void *arg) +{ + char *stack; + pid_t pid; + + stack = mmap(NULL, STACK_SIZE, PROT_READ | PROT_WRITE, + MAP_PRIVATE | MAP_ANONYMOUS | MAP_STACK, -1, 0); + if (stack == MAP_FAILED) + return -1; + + stack += STACK_SIZE; + + pid = clone(fn, stack, CLONE_VM | SIGCHLD, arg); + + if (pid == -1) + return -1; + + return pid; +} + +/* + * Helper function to prepare and register a robust list + */ +static int set_list(struct robust_list_head *head) +{ + int ret; + + ret = set_robust_list(head, sizeof(*head)); + if (ret) + return ret; + + head->futex_offset = (size_t) offsetof(struct lock_struct, futex) - + (size_t) offsetof(struct lock_struct, list); + head->list.next = &head->list; + head->list_op_pending = NULL; + + return 0; +} + +/* + * A basic (and incomplete) mutex lock function with robustness + */ +static int mutex_lock(struct lock_struct *lock, struct robust_list_head *head, bool error_inject) +{ + _Atomic(unsigned int) *futex = &lock->futex; + unsigned int zero = 0; + pid_t tid = gettid(); + int ret = -1; + + /* + * Set list_op_pending before starting the lock, so the kernel can catch + * the case where the thread died during the lock operation + */ + head->list_op_pending = &lock->list; + + if (atomic_compare_exchange_strong(futex, &zero, tid)) { + /* + * We took the lock, insert it in the robust list + */ + struct robust_list *list = &head->list; + + /* Error injection to test list_op_pending */ + if (error_inject) + return 0; + + while (list->next != &head->list) + list = list->next; + + list->next = &lock->list; + lock->list.next = &head->list; + + ret = 0; + } else { + /* + * We didn't take the lock, wait until the owner wakes (or dies) + */ + struct timespec to; + + to.tv_sec = FUTEX_TIMEOUT; + to.tv_nsec = 0; + + tid = atomic_load(futex); + /* Kernel ignores futexes without the waiters flag */ + tid |= FUTEX_WAITERS; + atomic_store(futex, tid); + + ret = futex_wait((futex_t *) futex, tid, &to, 0); + + /* + * A real mutex_lock() implementation would loop here to finally + * take the lock. We don't care about that, so we stop here. + */ + } + + head->list_op_pending = NULL; + + return ret; +} + +/* + * This child thread will succeed taking the lock, and then will exit holding it + */ +static int child_fn_lock(void *arg) +{ + struct lock_struct *lock = arg; + struct robust_list_head head; + int ret; + + ret = set_list(&head); + if (ret) { + ksft_test_result_fail("set_robust_list error\n"); + return ret; + } + + ret = mutex_lock(lock, &head, false); + if (ret) { + ksft_test_result_fail("mutex_lock error\n"); + return ret; + } + + pthread_barrier_wait(&barrier); + + /* + * There's a race here: the parent thread needs to be inside + * futex_wait() before the child thread dies, otherwise it will miss the + * wakeup from handle_futex_death() that this child will emit. We wait a + * little bit just to make sure that this happens. + */ + usleep(SLEEP_US); + + return 0; +} + +/* + * Spawns a child thread that will set a robust list, take the lock, register it + * in the robust list and die. The parent thread will wait on this futex, and + * should be waken up when the child exits. + */ +TEST(test_robustness) +{ + struct lock_struct lock = { .futex = 0 }; + _Atomic(unsigned int) *futex = &lock.futex; + struct robust_list_head head; + int ret, pid, wstatus; + + ret = set_list(&head); + ASSERT_EQ(ret, 0); + + /* + * Lets use a barrier to ensure that the child thread takes the lock + * before the parent + */ + ret = pthread_barrier_init(&barrier, NULL, 2); + ASSERT_EQ(ret, 0); + + pid = create_child(&child_fn_lock, &lock); + ASSERT_NE(pid, -1); + + pthread_barrier_wait(&barrier); + ret = mutex_lock(&lock, &head, false); + + /* + * futex_wait() should return 0 and the futex word should be marked with + * FUTEX_OWNER_DIED + */ + ASSERT_EQ(ret, 0); + + ASSERT_TRUE(*futex & FUTEX_OWNER_DIED); + + wait(&wstatus); + pthread_barrier_destroy(&barrier); + + /* Pass only if the child hasn't return error */ + if (!WEXITSTATUS(wstatus)) + ksft_test_result_pass("%s\n", __func__); +} + +/* + * The only valid value for len is sizeof(*head) + */ +TEST(test_set_robust_list_invalid_size) +{ + struct robust_list_head head; + size_t head_size = sizeof(head); + int ret; + + ret = set_robust_list(&head, head_size); + ASSERT_EQ(ret, 0); + + ret = set_robust_list(&head, head_size * 2); + ASSERT_EQ(ret, -1); + ASSERT_EQ(errno, EINVAL); + + ret = set_robust_list(&head, head_size - 1); + ASSERT_EQ(ret, -1); + ASSERT_EQ(errno, EINVAL); + + ret = set_robust_list(&head, 0); + ASSERT_EQ(ret, -1); + ASSERT_EQ(errno, EINVAL); + + ksft_test_result_pass("%s\n", __func__); +} + +/* + * Test get_robust_list with pid = 0, getting the list of the running thread + */ +TEST(test_get_robust_list_self) +{ + struct robust_list_head head, head2, *get_head; + size_t head_size = sizeof(head), len_ptr; + int ret; + + ret = set_robust_list(&head, head_size); + ASSERT_EQ(ret, 0); + + ret = get_robust_list(0, &get_head, &len_ptr); + ASSERT_EQ(ret, 0); + ASSERT_EQ(get_head, &head); + ASSERT_EQ(head_size, len_ptr); + + ret = set_robust_list(&head2, head_size); + ASSERT_EQ(ret, 0); + + ret = get_robust_list(0, &get_head, &len_ptr); + ASSERT_EQ(ret, 0); + ASSERT_EQ(get_head, &head2); + ASSERT_EQ(head_size, len_ptr); + + ksft_test_result_pass("%s\n", __func__); +} + +static int child_list(void *arg) +{ + struct robust_list_head *head = arg; + int ret; + + ret = set_robust_list(head, sizeof(*head)); + if (ret) { + ksft_test_result_fail("set_robust_list error\n"); + return -1; + } + + /* + * After setting the list head, wait until the main thread can call + * get_robust_list() for this thread before exiting. + */ + pthread_barrier_wait(&barrier); + pthread_barrier_wait(&barrier2); + + return 0; +} + +/* + * Test get_robust_list from another thread. We use two barriers here to ensure + * that: + * 1) the child thread set the list before we try to get it from the + * parent + * 2) the child thread still alive when we try to get the list from it + */ +TEST(test_get_robust_list_child) +{ + struct robust_list_head head, *get_head; + int ret, wstatus; + size_t len_ptr; + pid_t tid; + + ret = pthread_barrier_init(&barrier, NULL, 2); + ret = pthread_barrier_init(&barrier2, NULL, 2); + ASSERT_EQ(ret, 0); + + tid = create_child(&child_list, &head); + ASSERT_NE(tid, -1); + + pthread_barrier_wait(&barrier); + + ret = get_robust_list(tid, &get_head, &len_ptr); + ASSERT_EQ(ret, 0); + ASSERT_EQ(&head, get_head); + + pthread_barrier_wait(&barrier2); + + wait(&wstatus); + pthread_barrier_destroy(&barrier); + pthread_barrier_destroy(&barrier2); + + /* Pass only if the child hasn't return error */ + if (!WEXITSTATUS(wstatus)) + ksft_test_result_pass("%s\n", __func__); +} + +static int child_fn_lock_with_error(void *arg) +{ + struct lock_struct *lock = arg; + struct robust_list_head head; + int ret; + + ret = set_list(&head); + if (ret) { + ksft_test_result_fail("set_robust_list error\n"); + return -1; + } + + ret = mutex_lock(lock, &head, true); + if (ret) { + ksft_test_result_fail("mutex_lock error\n"); + return -1; + } + + pthread_barrier_wait(&barrier); + + /* See comment at child_fn_lock() */ + usleep(SLEEP_US); + + return 0; +} + +/* + * Same as robustness test, but inject an error where the mutex_lock() exits + * earlier, just after setting list_op_pending and taking the lock, to test the + * list_op_pending mechanism + */ +TEST(test_set_list_op_pending) +{ + struct lock_struct lock = { .futex = 0 }; + _Atomic(unsigned int) *futex = &lock.futex; + struct robust_list_head head; + int ret, wstatus; + + ret = set_list(&head); + ASSERT_EQ(ret, 0); + + ret = pthread_barrier_init(&barrier, NULL, 2); + ASSERT_EQ(ret, 0); + + ret = create_child(&child_fn_lock_with_error, &lock); + ASSERT_NE(ret, -1); + + pthread_barrier_wait(&barrier); + ret = mutex_lock(&lock, &head, false); + + ASSERT_EQ(ret, 0); + + ASSERT_TRUE(*futex & FUTEX_OWNER_DIED); + + wait(&wstatus); + pthread_barrier_destroy(&barrier); + + /* Pass only if the child hasn't return error */ + if (!WEXITSTATUS(wstatus)) + ksft_test_result_pass("%s\n", __func__); + else + ksft_test_result_fail("%s\n", __func__); +} + +#define CHILD_NR 10 + +static int child_lock_holder(void *arg) +{ + struct lock_struct *locks = arg; + struct robust_list_head head; + int i; + + set_list(&head); + + for (i = 0; i < CHILD_NR; i++) { + locks[i].futex = 0; + mutex_lock(&locks[i], &head, false); + } + + pthread_barrier_wait(&barrier); + pthread_barrier_wait(&barrier2); + + /* See comment at child_fn_lock() */ + usleep(SLEEP_US); + + return 0; +} + +static int child_wait_lock(void *arg) +{ + struct lock_struct *lock = arg; + struct robust_list_head head; + int ret; + + pthread_barrier_wait(&barrier2); + ret = mutex_lock(lock, &head, false); + + if (ret) { + ksft_test_result_fail("mutex_lock error\n"); + return -1; + } + + if (!(lock->futex & FUTEX_OWNER_DIED)) { + ksft_test_result_fail("futex not marked with FUTEX_OWNER_DIED\n"); + return -1; + } + + return 0; +} + +/* + * Test a robust list of more than one element. All the waiters should wake when + * the holder dies + */ +TEST(test_robust_list_multiple_elements) +{ + struct lock_struct locks[CHILD_NR]; + pid_t pids[CHILD_NR + 1]; + int i, ret, wstatus; + + ret = pthread_barrier_init(&barrier, NULL, 2); + ASSERT_EQ(ret, 0); + ret = pthread_barrier_init(&barrier2, NULL, CHILD_NR + 1); + ASSERT_EQ(ret, 0); + + pids[0] = create_child(&child_lock_holder, &locks); + + /* Wait until the locker thread takes the look */ + pthread_barrier_wait(&barrier); + + for (i = 0; i < CHILD_NR; i++) + pids[i+1] = create_child(&child_wait_lock, &locks[i]); + + /* Wait for all children to return */ + ret = 0; + + for (i = 0; i < CHILD_NR; i++) { + waitpid(pids[i], &wstatus, 0); + if (WEXITSTATUS(wstatus)) + ret = -1; + } + + pthread_barrier_destroy(&barrier); + pthread_barrier_destroy(&barrier2); + + /* Pass only if the child hasn't return error */ + if (!ret) + ksft_test_result_pass("%s\n", __func__); +} + +static int child_circular_list(void *arg) +{ + static struct robust_list_head head; + struct lock_struct a, b, c; + int ret; + + ret = set_list(&head); + if (ret) { + ksft_test_result_fail("set_list error\n"); + return -1; + } + + head.list.next = &a.list; + + /* + * The last element should point to head list, but we short circuit it + */ + a.list.next = &b.list; + b.list.next = &c.list; + c.list.next = &a.list; + + return 0; +} + +/* + * Create a circular robust list. The kernel should be able to destroy the list + * while processing it so it won't be trapped in an infinite loop while handling + * a process exit + */ +TEST(test_circular_list) +{ + int wstatus; + + create_child(child_circular_list, NULL); + + wait(&wstatus); + + /* Pass only if the child hasn't return error */ + if (!WEXITSTATUS(wstatus)) + ksft_test_result_pass("%s\n", __func__); +} + +TEST_HARNESS_MAIN |
