summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/platform_profile.c7
-rw-r--r--drivers/base/power/runtime-test.c8
-rw-r--r--drivers/base/power/runtime.c14
-rw-r--r--drivers/block/ublk_drv.c28
-rw-r--r--drivers/hid/hid-asus.c1
-rw-r--r--drivers/i2c/algos/i2c-algo-pcf.c105
-rw-r--r--drivers/i2c/busses/Kconfig3
-rw-r--r--drivers/i2c/busses/i2c-amd-mp2-pci.c5
-rw-r--r--drivers/i2c/busses/i2c-bcm2835.c12
-rw-r--r--drivers/i2c/busses/i2c-designware-core.h2
-rw-r--r--drivers/i2c/busses/i2c-designware-master.c7
-rw-r--r--drivers/i2c/busses/i2c-designware-platdrv.c13
-rw-r--r--drivers/i2c/busses/i2c-i801.c3
-rw-r--r--drivers/i2c/busses/i2c-k1.c19
-rw-r--r--drivers/i2c/busses/i2c-qcom-cci.c46
-rw-r--r--drivers/i2c/busses/i2c-stm32.c7
-rw-r--r--drivers/input/misc/qnap-mcu-input.c2
-rw-r--r--drivers/input/touchscreen/cyttsp5.c4
-rw-r--r--drivers/input/touchscreen/ti_am335x_tsc.c4
-rw-r--r--drivers/input/touchscreen/zforce_ts.c3
-rw-r--r--drivers/md/Kconfig2
-rw-r--r--drivers/md/bcache/request.c6
-rw-r--r--drivers/md/dm-bufio.c10
-rw-r--r--drivers/md/dm-core.h1
-rw-r--r--drivers/md/dm-crypt.c117
-rw-r--r--drivers/md/dm-ebs-target.c2
-rw-r--r--drivers/md/dm-exception-store.h2
-rw-r--r--drivers/md/dm-log-writes.c1
-rw-r--r--drivers/md/dm-mpath.c63
-rw-r--r--drivers/md/dm-pcache/cache.c13
-rw-r--r--drivers/md/dm-pcache/cache_segment.c13
-rw-r--r--drivers/md/dm-raid.c2
-rw-r--r--drivers/md/dm-snap.c73
-rw-r--r--drivers/md/dm-sysfs.c8
-rw-r--r--drivers/md/dm-table.c4
-rw-r--r--drivers/md/dm-thin.c19
-rw-r--r--drivers/md/dm-vdo/action-manager.c2
-rw-r--r--drivers/md/dm-vdo/admin-state.c75
-rw-r--r--drivers/md/dm-vdo/block-map.c51
-rw-r--r--drivers/md/dm-vdo/completion.c5
-rw-r--r--drivers/md/dm-vdo/data-vio.c34
-rw-r--r--drivers/md/dm-vdo/dedupe.c42
-rw-r--r--drivers/md/dm-vdo/dm-vdo-target.c5
-rw-r--r--drivers/md/dm-vdo/encodings.c26
-rw-r--r--drivers/md/dm-vdo/flush.c6
-rw-r--r--drivers/md/dm-vdo/funnel-workqueue.c7
-rw-r--r--drivers/md/dm-vdo/io-submitter.c26
-rw-r--r--drivers/md/dm-vdo/logical-zone.c20
-rw-r--r--drivers/md/dm-vdo/packer.c15
-rw-r--r--drivers/md/dm-vdo/physical-zone.c5
-rw-r--r--drivers/md/dm-vdo/recovery-journal.c30
-rw-r--r--drivers/md/dm-vdo/slab-depot.c96
-rw-r--r--drivers/md/dm-vdo/vdo.c9
-rw-r--r--drivers/md/dm-vdo/vdo.h4
-rw-r--r--drivers/md/dm-vdo/vio.c3
-rw-r--r--drivers/md/dm-vdo/vio.h6
-rw-r--r--drivers/md/dm-verity-fec.c41
-rw-r--r--drivers/md/dm-verity-fec.h10
-rw-r--r--drivers/md/dm-verity-target.c209
-rw-r--r--drivers/md/dm-verity.h52
-rw-r--r--drivers/md/dm-zone.c3
-rw-r--r--drivers/md/dm.c46
-rw-r--r--drivers/platform/Kconfig2
-rw-r--r--drivers/platform/Makefile1
-rw-r--r--drivers/platform/arm64/lenovo-thinkpad-t14s.c58
-rw-r--r--drivers/platform/surface/aggregator/core.c2
-rw-r--r--drivers/platform/surface/aggregator/ssh_packet_layer.c2
-rw-r--r--drivers/platform/surface/aggregator/ssh_request_layer.c2
-rw-r--r--drivers/platform/surface/surface_acpi_notify.c2
-rw-r--r--drivers/platform/wmi/Kconfig34
-rw-r--r--drivers/platform/wmi/Makefile8
-rw-r--r--drivers/platform/wmi/core.c (renamed from drivers/platform/x86/wmi.c)34
-rw-r--r--drivers/platform/x86/Kconfig72
-rw-r--r--drivers/platform/x86/Makefile8
-rw-r--r--drivers/platform/x86/acer-wmi.c290
-rw-r--r--drivers/platform/x86/amd/hfi/hfi.c11
-rw-r--r--drivers/platform/x86/amd/hsmp/acpi.c9
-rw-r--r--drivers/platform/x86/amd/pmf/auto-mode.c14
-rw-r--r--drivers/platform/x86/amd/pmf/cnqf.c14
-rw-r--r--drivers/platform/x86/amd/pmf/core.c23
-rw-r--r--drivers/platform/x86/amd/pmf/pmf.h27
-rw-r--r--drivers/platform/x86/amd/pmf/spc.c2
-rw-r--r--drivers/platform/x86/amd/pmf/sps.c38
-rw-r--r--drivers/platform/x86/amd/pmf/tee-if.c92
-rw-r--r--drivers/platform/x86/asus-armoury.c1161
-rw-r--r--drivers/platform/x86/asus-armoury.h1541
-rw-r--r--drivers/platform/x86/asus-wmi.c185
-rw-r--r--drivers/platform/x86/ayaneo-ec.c593
-rw-r--r--drivers/platform/x86/dell/alienware-wmi-wmax.c124
-rw-r--r--drivers/platform/x86/gpd-pocket-fan.c4
-rw-r--r--drivers/platform/x86/hp/hp-wmi.c24
-rw-r--r--drivers/platform/x86/intel/hid.c12
-rw-r--r--drivers/platform/x86/intel/pmc/arl.c12
-rw-r--r--drivers/platform/x86/intel/pmc/core.c149
-rw-r--r--drivers/platform/x86/intel/pmc/core.h16
-rw-r--r--drivers/platform/x86/intel/pmc/lnl.c2
-rw-r--r--drivers/platform/x86/intel/pmc/mtl.c9
-rw-r--r--drivers/platform/x86/intel/pmc/ptl.c3
-rw-r--r--drivers/platform/x86/intel/pmc/wcl.c18
-rw-r--r--drivers/platform/x86/intel/vsec.c2
-rw-r--r--drivers/platform/x86/lenovo/ideapad-laptop.c218
-rw-r--r--drivers/platform/x86/lenovo/wmi-gamezone.c35
-rw-r--r--drivers/platform/x86/lg-laptop.c11
-rw-r--r--drivers/platform/x86/oxpec.c115
-rw-r--r--drivers/platform/x86/serial-multi-instantiate.c13
-rw-r--r--drivers/platform/x86/uniwill/Kconfig38
-rw-r--r--drivers/platform/x86/uniwill/Makefile8
-rw-r--r--drivers/platform/x86/uniwill/uniwill-acpi.c1912
-rw-r--r--drivers/platform/x86/uniwill/uniwill-wmi.c92
-rw-r--r--drivers/platform/x86/uniwill/uniwill-wmi.h129
-rw-r--r--drivers/platform/x86/x86-android-tablets/lenovo.c2
-rw-r--r--drivers/platform/x86/x86-android-tablets/vexia_atla10_ec.c2
-rw-r--r--drivers/regulator/core.c13
-rw-r--r--drivers/regulator/fixed.c11
-rw-r--r--drivers/regulator/spacemit-p1.c4
-rw-r--r--drivers/s390/char/sclp_mem.c16
-rw-r--r--drivers/s390/char/vmur.c8
-rw-r--r--drivers/spi/spi-microchip-core-spi.c1
118 files changed, 7458 insertions, 1227 deletions
diff --git a/drivers/acpi/platform_profile.c b/drivers/acpi/platform_profile.c
index b43f4459a4f6..ea04a8c69215 100644
--- a/drivers/acpi/platform_profile.c
+++ b/drivers/acpi/platform_profile.c
@@ -37,6 +37,7 @@ static const char * const profile_names[] = {
[PLATFORM_PROFILE_BALANCED] = "balanced",
[PLATFORM_PROFILE_BALANCED_PERFORMANCE] = "balanced-performance",
[PLATFORM_PROFILE_PERFORMANCE] = "performance",
+ [PLATFORM_PROFILE_MAX_POWER] = "max-power",
[PLATFORM_PROFILE_CUSTOM] = "custom",
};
static_assert(ARRAY_SIZE(profile_names) == PLATFORM_PROFILE_LAST);
@@ -506,7 +507,8 @@ int platform_profile_cycle(void)
if (err)
return err;
- if (profile == PLATFORM_PROFILE_CUSTOM ||
+ if (profile == PLATFORM_PROFILE_MAX_POWER ||
+ profile == PLATFORM_PROFILE_CUSTOM ||
profile == PLATFORM_PROFILE_LAST)
return -EINVAL;
@@ -515,7 +517,8 @@ int platform_profile_cycle(void)
if (err)
return err;
- /* never iterate into a custom if all drivers supported it */
+ /* never iterate into a custom or max power if all drivers supported it */
+ clear_bit(PLATFORM_PROFILE_MAX_POWER, data.aggregate);
clear_bit(PLATFORM_PROFILE_CUSTOM, data.aggregate);
next = find_next_bit_wrap(data.aggregate,
diff --git a/drivers/base/power/runtime-test.c b/drivers/base/power/runtime-test.c
index 477feca804c7..1535ad2b0264 100644
--- a/drivers/base/power/runtime-test.c
+++ b/drivers/base/power/runtime-test.c
@@ -38,10 +38,6 @@ static void pm_runtime_already_suspended_test(struct kunit *test)
KUNIT_EXPECT_TRUE(test, pm_runtime_suspended(dev));
pm_runtime_get_noresume(dev);
- KUNIT_EXPECT_EQ(test, 0, pm_runtime_barrier(dev)); /* no wakeup needed */
- pm_runtime_put(dev);
-
- pm_runtime_get_noresume(dev);
KUNIT_EXPECT_EQ(test, 1, pm_runtime_put_sync(dev));
KUNIT_EXPECT_EQ(test, 1, pm_runtime_suspend(dev));
@@ -174,7 +170,7 @@ static void pm_runtime_error_test(struct kunit *test)
KUNIT_EXPECT_TRUE(test, pm_runtime_suspended(dev));
KUNIT_EXPECT_EQ(test, 0, pm_runtime_get(dev));
- KUNIT_EXPECT_EQ(test, 1, pm_runtime_barrier(dev)); /* resume was pending */
+ pm_runtime_barrier(dev);
pm_runtime_put(dev);
pm_runtime_suspend(dev); /* flush the put(), to suspend */
KUNIT_EXPECT_TRUE(test, pm_runtime_suspended(dev));
@@ -225,7 +221,7 @@ static void pm_runtime_probe_active_test(struct kunit *test)
KUNIT_EXPECT_TRUE(test, pm_runtime_active(dev));
/* Nothing to flush. We stay active. */
- KUNIT_EXPECT_EQ(test, 0, pm_runtime_barrier(dev));
+ pm_runtime_barrier(dev);
KUNIT_EXPECT_TRUE(test, pm_runtime_active(dev));
/* Ask for idle? Now we suspend. */
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 62707738caa4..84676cc24221 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -1467,30 +1467,20 @@ static void __pm_runtime_barrier(struct device *dev)
* Next, make sure that all pending requests for the device have been flushed
* from pm_wq and wait for all runtime PM operations involving the device in
* progress to complete.
- *
- * Return value:
- * 1, if there was a resume request pending and the device had to be woken up,
- * 0, otherwise
*/
-int pm_runtime_barrier(struct device *dev)
+void pm_runtime_barrier(struct device *dev)
{
- int retval = 0;
-
pm_runtime_get_noresume(dev);
spin_lock_irq(&dev->power.lock);
if (dev->power.request_pending
- && dev->power.request == RPM_REQ_RESUME) {
+ && dev->power.request == RPM_REQ_RESUME)
rpm_resume(dev, 0);
- retval = 1;
- }
__pm_runtime_barrier(dev);
spin_unlock_irq(&dev->power.lock);
pm_runtime_put_noidle(dev);
-
- return retval;
}
EXPORT_SYMBOL_GPL(pm_runtime_barrier);
diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
index 2c715df63f23..df9831783a13 100644
--- a/drivers/block/ublk_drv.c
+++ b/drivers/block/ublk_drv.c
@@ -926,6 +926,7 @@ static size_t ublk_copy_user_pages(const struct request *req,
size_t done = 0;
rq_for_each_segment(bv, req, iter) {
+ unsigned len;
void *bv_buf;
size_t copied;
@@ -934,18 +935,17 @@ static size_t ublk_copy_user_pages(const struct request *req,
continue;
}
- bv.bv_offset += offset;
- bv.bv_len -= offset;
- bv_buf = bvec_kmap_local(&bv);
+ len = bv.bv_len - offset;
+ bv_buf = kmap_local_page(bv.bv_page) + bv.bv_offset + offset;
if (dir == ITER_DEST)
- copied = copy_to_iter(bv_buf, bv.bv_len, uiter);
+ copied = copy_to_iter(bv_buf, len, uiter);
else
- copied = copy_from_iter(bv_buf, bv.bv_len, uiter);
+ copied = copy_from_iter(bv_buf, len, uiter);
kunmap_local(bv_buf);
done += copied;
- if (copied < bv.bv_len)
+ if (copied < len)
break;
offset = 0;
@@ -3673,6 +3673,19 @@ exit:
return ret;
}
+static bool ublk_ctrl_uring_cmd_may_sleep(u32 cmd_op)
+{
+ switch (_IOC_NR(cmd_op)) {
+ case UBLK_CMD_GET_QUEUE_AFFINITY:
+ case UBLK_CMD_GET_DEV_INFO:
+ case UBLK_CMD_GET_DEV_INFO2:
+ case _IOC_NR(UBLK_U_CMD_GET_FEATURES):
+ return false;
+ default:
+ return true;
+ }
+}
+
static int ublk_ctrl_uring_cmd(struct io_uring_cmd *cmd,
unsigned int issue_flags)
{
@@ -3681,7 +3694,8 @@ static int ublk_ctrl_uring_cmd(struct io_uring_cmd *cmd,
u32 cmd_op = cmd->cmd_op;
int ret = -EINVAL;
- if (issue_flags & IO_URING_F_NONBLOCK)
+ if (ublk_ctrl_uring_cmd_may_sleep(cmd_op) &&
+ issue_flags & IO_URING_F_NONBLOCK)
return -EAGAIN;
ublk_ctrl_cmd_dump(cmd);
diff --git a/drivers/hid/hid-asus.c b/drivers/hid/hid-asus.c
index a444d41e53b6..472bca54642b 100644
--- a/drivers/hid/hid-asus.c
+++ b/drivers/hid/hid-asus.c
@@ -27,6 +27,7 @@
#include <linux/hid.h>
#include <linux/module.h>
#include <linux/platform_data/x86/asus-wmi.h>
+#include <linux/platform_data/x86/asus-wmi-leds-ids.h>
#include <linux/input/mt.h>
#include <linux/usb.h> /* For to_usb_interface for T100 touchpad intf check */
#include <linux/power_supply.h>
diff --git a/drivers/i2c/algos/i2c-algo-pcf.c b/drivers/i2c/algos/i2c-algo-pcf.c
index fd563e845d4b..a87ecea7f510 100644
--- a/drivers/i2c/algos/i2c-algo-pcf.c
+++ b/drivers/i2c/algos/i2c-algo-pcf.c
@@ -23,17 +23,8 @@
#include "i2c-algo-pcf.h"
-#define DEB2(x) if (i2c_debug >= 2) x
-#define DEB3(x) if (i2c_debug >= 3) x /* print several statistical values */
-#define DEBPROTO(x) if (i2c_debug >= 9) x;
- /* debug the protocol by showing transferred bits */
#define DEF_TIMEOUT 16
-/*
- * module parameters:
- */
-static int i2c_debug;
-
/* setting states on the bus with the right timing: */
#define set_pcf(adap, ctl, val) adap->setpcf(adap->data, ctl, val)
@@ -47,27 +38,21 @@ static int i2c_debug;
static void i2c_start(struct i2c_algo_pcf_data *adap)
{
- DEBPROTO(printk(KERN_DEBUG "S "));
set_pcf(adap, 1, I2C_PCF_START);
}
static void i2c_repstart(struct i2c_algo_pcf_data *adap)
{
- DEBPROTO(printk(" Sr "));
set_pcf(adap, 1, I2C_PCF_REPSTART);
}
static void i2c_stop(struct i2c_algo_pcf_data *adap)
{
- DEBPROTO(printk("P\n"));
set_pcf(adap, 1, I2C_PCF_STOP);
}
static void handle_lab(struct i2c_algo_pcf_data *adap, const int *status)
{
- DEB2(printk(KERN_INFO
- "i2c-algo-pcf.o: lost arbitration (CSR 0x%02x)\n",
- *status));
/*
* Cleanup from LAB -- reset and enable ESO.
* This resets the PCF8584; since we've lost the bus, no
@@ -88,9 +73,6 @@ static void handle_lab(struct i2c_algo_pcf_data *adap, const int *status)
if (adap->lab_mdelay)
mdelay(adap->lab_mdelay);
- DEB2(printk(KERN_INFO
- "i2c-algo-pcf.o: reset LAB condition (CSR 0x%02x)\n",
- get_pcf(adap, 1)));
}
static int wait_for_bb(struct i2c_algo_pcf_data *adap)
@@ -147,56 +129,48 @@ static int wait_for_pin(struct i2c_algo_pcf_data *adap, int *status)
*
* vdovikin: added detect code for PCF8584
*/
-static int pcf_init_8584 (struct i2c_algo_pcf_data *adap)
+static int pcf_init_8584(struct i2c_algo_pcf_data *adap)
{
unsigned char temp;
- DEB3(printk(KERN_DEBUG "i2c-algo-pcf.o: PCF state 0x%02x\n",
- get_pcf(adap, 1)));
-
/* S1=0x80: S0 selected, serial interface off */
set_pcf(adap, 1, I2C_PCF_PIN);
/*
* check to see S1 now used as R/W ctrl -
* PCF8584 does that when ESO is zero
*/
- if (((temp = get_pcf(adap, 1)) & 0x7f) != (0)) {
- DEB2(printk(KERN_ERR "i2c-algo-pcf.o: PCF detection failed -- can't select S0 (0x%02x).\n", temp));
+ temp = get_pcf(adap, 1);
+ if ((temp & 0x7f) != 0)
return -ENXIO; /* definitely not PCF8584 */
- }
/* load own address in S0, effective address is (own << 1) */
i2c_outb(adap, get_own(adap));
/* check it's really written */
- if ((temp = i2c_inb(adap)) != get_own(adap)) {
- DEB2(printk(KERN_ERR "i2c-algo-pcf.o: PCF detection failed -- can't set S0 (0x%02x).\n", temp));
+ temp = i2c_inb(adap);
+ if (temp != get_own(adap))
return -ENXIO;
- }
/* S1=0xA0, next byte in S2 */
set_pcf(adap, 1, I2C_PCF_PIN | I2C_PCF_ES1);
/* check to see S2 now selected */
- if (((temp = get_pcf(adap, 1)) & 0x7f) != I2C_PCF_ES1) {
- DEB2(printk(KERN_ERR "i2c-algo-pcf.o: PCF detection failed -- can't select S2 (0x%02x).\n", temp));
+ temp = get_pcf(adap, 1);
+ if ((temp & 0x7f) != I2C_PCF_ES1)
return -ENXIO;
- }
/* load clock register S2 */
i2c_outb(adap, get_clock(adap));
/* check it's really written, the only 5 lowest bits does matter */
- if (((temp = i2c_inb(adap)) & 0x1f) != get_clock(adap)) {
- DEB2(printk(KERN_ERR "i2c-algo-pcf.o: PCF detection failed -- can't set S2 (0x%02x).\n", temp));
+ temp = i2c_inb(adap);
+ if ((temp & 0x1f) != get_clock(adap))
return -ENXIO;
- }
/* Enable serial interface, idle, S0 selected */
set_pcf(adap, 1, I2C_PCF_IDLE);
/* check to see PCF is really idled and we can access status register */
- if ((temp = get_pcf(adap, 1)) != (I2C_PCF_PIN | I2C_PCF_BB)) {
- DEB2(printk(KERN_ERR "i2c-algo-pcf.o: PCF detection failed -- can't select S1` (0x%02x).\n", temp));
+ temp = get_pcf(adap, 1);
+ if (temp != (I2C_PCF_PIN | I2C_PCF_BB))
return -ENXIO;
- }
printk(KERN_DEBUG "i2c-algo-pcf.o: detected and initialized PCF8584.\n");
@@ -209,9 +183,7 @@ static int pcf_sendbytes(struct i2c_adapter *i2c_adap, const char *buf,
struct i2c_algo_pcf_data *adap = i2c_adap->algo_data;
int wrcount, status, timeout;
- for (wrcount=0; wrcount<count; ++wrcount) {
- DEB2(dev_dbg(&i2c_adap->dev, "i2c_write: writing %2.2X\n",
- buf[wrcount] & 0xff));
+ for (wrcount = 0; wrcount < count; ++wrcount) {
i2c_outb(adap, buf[wrcount]);
timeout = wait_for_pin(adap, &status);
if (timeout) {
@@ -246,7 +218,8 @@ static int pcf_readbytes(struct i2c_adapter *i2c_adap, char *buf,
/* increment number of bytes to read by one -- read dummy byte */
for (i = 0; i <= count; i++) {
- if ((wfp = wait_for_pin(adap, &status))) {
+ wfp = wait_for_pin(adap, &status);
+ if (wfp) {
if (wfp == -EINTR)
return -EINTR; /* arbitration lost */
@@ -280,7 +253,7 @@ static int pcf_readbytes(struct i2c_adapter *i2c_adap, char *buf,
}
-static int pcf_doAddress(struct i2c_algo_pcf_data *adap,
+static void pcf_send_address(struct i2c_algo_pcf_data *adap,
struct i2c_msg *msg)
{
unsigned char addr = i2c_8bit_addr_from_msg(msg);
@@ -288,8 +261,6 @@ static int pcf_doAddress(struct i2c_algo_pcf_data *adap,
if (msg->flags & I2C_M_REV_DIR_ADDR)
addr ^= 1;
i2c_outb(adap, addr);
-
- return 0;
}
static int pcf_xfer(struct i2c_adapter *i2c_adap,
@@ -299,7 +270,7 @@ static int pcf_xfer(struct i2c_adapter *i2c_adap,
struct i2c_algo_pcf_data *adap = i2c_adap->algo_data;
struct i2c_msg *pmsg;
int i;
- int ret=0, timeout, status;
+ int timeout, status;
if (adap->xfer_begin)
adap->xfer_begin(adap->data);
@@ -307,20 +278,15 @@ static int pcf_xfer(struct i2c_adapter *i2c_adap,
/* Check for bus busy */
timeout = wait_for_bb(adap);
if (timeout) {
- DEB2(printk(KERN_ERR "i2c-algo-pcf.o: "
- "Timeout waiting for BB in pcf_xfer\n");)
i = -EIO;
goto out;
}
- for (i = 0;ret >= 0 && i < num; i++) {
- pmsg = &msgs[i];
-
- DEB2(printk(KERN_DEBUG "i2c-algo-pcf.o: Doing %s %d bytes to 0x%02x - %d of %d messages\n",
- str_read_write(pmsg->flags & I2C_M_RD),
- pmsg->len, pmsg->addr, i + 1, num);)
+ for (i = 0; i < num; i++) {
+ int ret;
- ret = pcf_doAddress(adap, pmsg);
+ pmsg = &msgs[i];
+ pcf_send_address(adap, pmsg);
/* Send START */
if (i == 0)
@@ -335,8 +301,6 @@ static int pcf_xfer(struct i2c_adapter *i2c_adap,
goto out;
}
i2c_stop(adap);
- DEB2(printk(KERN_ERR "i2c-algo-pcf.o: Timeout waiting "
- "for PIN(1) in pcf_xfer\n");)
i = -EREMOTEIO;
goto out;
}
@@ -344,35 +308,21 @@ static int pcf_xfer(struct i2c_adapter *i2c_adap,
/* Check LRB (last rcvd bit - slave ack) */
if (status & I2C_PCF_LRB) {
i2c_stop(adap);
- DEB2(printk(KERN_ERR "i2c-algo-pcf.o: No LRB(1) in pcf_xfer\n");)
i = -EREMOTEIO;
goto out;
}
- DEB3(printk(KERN_DEBUG "i2c-algo-pcf.o: Msg %d, addr=0x%x, flags=0x%x, len=%d\n",
- i, msgs[i].addr, msgs[i].flags, msgs[i].len);)
if (pmsg->flags & I2C_M_RD) {
ret = pcf_readbytes(i2c_adap, pmsg->buf, pmsg->len,
(i + 1 == num));
-
- if (ret != pmsg->len) {
- DEB2(printk(KERN_DEBUG "i2c-algo-pcf.o: fail: "
- "only read %d bytes.\n",ret));
- } else {
- DEB2(printk(KERN_DEBUG "i2c-algo-pcf.o: read %d bytes.\n",ret));
- }
} else {
ret = pcf_sendbytes(i2c_adap, pmsg->buf, pmsg->len,
(i + 1 == num));
-
- if (ret != pmsg->len) {
- DEB2(printk(KERN_DEBUG "i2c-algo-pcf.o: fail: "
- "only wrote %d bytes.\n",ret));
- } else {
- DEB2(printk(KERN_DEBUG "i2c-algo-pcf.o: wrote %d bytes.\n",ret));
- }
}
+
+ if (ret < 0)
+ goto out;
}
out:
@@ -401,12 +351,11 @@ int i2c_pcf_add_bus(struct i2c_adapter *adap)
struct i2c_algo_pcf_data *pcf_adap = adap->algo_data;
int rval;
- DEB2(dev_dbg(&adap->dev, "hw routines registered.\n"));
-
/* register new adapter to i2c module... */
adap->algo = &pcf_algo;
- if ((rval = pcf_init_8584(pcf_adap)))
+ rval = pcf_init_8584(pcf_adap);
+ if (rval)
return rval;
rval = i2c_add_adapter(adap);
@@ -418,7 +367,3 @@ EXPORT_SYMBOL(i2c_pcf_add_bus);
MODULE_AUTHOR("Hans Berglund <hb@spacetec.no>");
MODULE_DESCRIPTION("I2C-Bus PCF8584 algorithm");
MODULE_LICENSE("GPL");
-
-module_param(i2c_debug, int, S_IRUGO | S_IWUSR);
-MODULE_PARM_DESC(i2c_debug,
- "debug level - 0 off; 1 normal; 2,3 more verbose; 9 pcf-protocol");
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index fd81e49638aa..cea87fcb4a1a 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -166,6 +166,7 @@ config I2C_I801
Arrow Lake (SOC)
Panther Lake (SOC)
Wildcat Lake (SOC)
+ Diamond Rapids (SOC)
This driver can also be built as a module. If so, the module
will be called i2c-i801.
@@ -1474,7 +1475,7 @@ config I2C_ACORN
config I2C_ELEKTOR
tristate "Elektor ISA card"
- depends on ISA && HAS_IOPORT_MAP && BROKEN_ON_SMP
+ depends on ISA && HAS_IOPORT_MAP
select I2C_ALGOPCF
help
This supports the PCF8584 ISA bus I2C adapter. Say Y if you own
diff --git a/drivers/i2c/busses/i2c-amd-mp2-pci.c b/drivers/i2c/busses/i2c-amd-mp2-pci.c
index ef7370d3dbea..60edbabc2986 100644
--- a/drivers/i2c/busses/i2c-amd-mp2-pci.c
+++ b/drivers/i2c/busses/i2c-amd-mp2-pci.c
@@ -458,13 +458,16 @@ struct amd_mp2_dev *amd_mp2_find_device(void)
{
struct device *dev;
struct pci_dev *pci_dev;
+ struct amd_mp2_dev *mp2_dev;
dev = driver_find_next_device(&amd_mp2_pci_driver.driver, NULL);
if (!dev)
return NULL;
pci_dev = to_pci_dev(dev);
- return (struct amd_mp2_dev *)pci_get_drvdata(pci_dev);
+ mp2_dev = (struct amd_mp2_dev *)pci_get_drvdata(pci_dev);
+ put_device(dev);
+ return mp2_dev;
}
EXPORT_SYMBOL_GPL(amd_mp2_find_device);
diff --git a/drivers/i2c/busses/i2c-bcm2835.c b/drivers/i2c/busses/i2c-bcm2835.c
index 8554e790f8e3..0d7e2654a534 100644
--- a/drivers/i2c/busses/i2c-bcm2835.c
+++ b/drivers/i2c/busses/i2c-bcm2835.c
@@ -137,12 +137,14 @@ static int clk_bcm2835_i2c_set_rate(struct clk_hw *hw, unsigned long rate,
return 0;
}
-static long clk_bcm2835_i2c_round_rate(struct clk_hw *hw, unsigned long rate,
- unsigned long *parent_rate)
+static int clk_bcm2835_i2c_determine_rate(struct clk_hw *hw,
+ struct clk_rate_request *req)
{
- u32 divider = clk_bcm2835_i2c_calc_divider(rate, *parent_rate);
+ u32 divider = clk_bcm2835_i2c_calc_divider(req->rate, req->best_parent_rate);
- return DIV_ROUND_UP(*parent_rate, divider);
+ req->rate = DIV_ROUND_UP(req->best_parent_rate, divider);
+
+ return 0;
}
static unsigned long clk_bcm2835_i2c_recalc_rate(struct clk_hw *hw,
@@ -156,7 +158,7 @@ static unsigned long clk_bcm2835_i2c_recalc_rate(struct clk_hw *hw,
static const struct clk_ops clk_bcm2835_i2c_ops = {
.set_rate = clk_bcm2835_i2c_set_rate,
- .round_rate = clk_bcm2835_i2c_round_rate,
+ .determine_rate = clk_bcm2835_i2c_determine_rate,
.recalc_rate = clk_bcm2835_i2c_recalc_rate,
};
diff --git a/drivers/i2c/busses/i2c-designware-core.h b/drivers/i2c/busses/i2c-designware-core.h
index 347843b4f5dd..bb5ce0a382f9 100644
--- a/drivers/i2c/busses/i2c-designware-core.h
+++ b/drivers/i2c/busses/i2c-designware-core.h
@@ -78,6 +78,7 @@
#define DW_IC_TX_ABRT_SOURCE 0x80
#define DW_IC_ENABLE_STATUS 0x9c
#define DW_IC_CLR_RESTART_DET 0xa8
+#define DW_IC_SMBUS_INTR_MASK 0xcc
#define DW_IC_COMP_PARAM_1 0xf4
#define DW_IC_COMP_VERSION 0xf8
#define DW_IC_SDA_HOLD_MIN_VERS 0x3131312A /* "111*" == v1.11* */
@@ -330,7 +331,6 @@ struct dw_i2c_dev {
struct i2c_dw_semaphore_callbacks {
int (*probe)(struct dw_i2c_dev *dev);
- void (*remove)(struct dw_i2c_dev *dev);
};
int i2c_dw_init_regmap(struct dw_i2c_dev *dev);
diff --git a/drivers/i2c/busses/i2c-designware-master.c b/drivers/i2c/busses/i2c-designware-master.c
index 41e9b5ecad20..45bfca05bb30 100644
--- a/drivers/i2c/busses/i2c-designware-master.c
+++ b/drivers/i2c/busses/i2c-designware-master.c
@@ -220,6 +220,13 @@ static int i2c_dw_init_master(struct dw_i2c_dev *dev)
/* Disable the adapter */
__i2c_dw_disable(dev);
+ /*
+ * Mask SMBus interrupts to block storms from broken
+ * firmware that leaves IC_SMBUS=1; the handler never
+ * services them.
+ */
+ regmap_write(dev->map, DW_IC_SMBUS_INTR_MASK, 0);
+
/* Write standard speed timing parameters */
regmap_write(dev->map, DW_IC_SS_SCL_HCNT, dev->ss_hcnt);
regmap_write(dev->map, DW_IC_SS_SCL_LCNT, dev->ss_lcnt);
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
index 34d881572351..7be99656a67d 100644
--- a/drivers/i2c/busses/i2c-designware-platdrv.c
+++ b/drivers/i2c/busses/i2c-designware-platdrv.c
@@ -197,15 +197,6 @@ static int i2c_dw_probe_lock_support(struct dw_i2c_dev *dev)
return 0;
}
-static void i2c_dw_remove_lock_support(struct dw_i2c_dev *dev)
-{
- if (dev->semaphore_idx < 0)
- return;
-
- if (i2c_dw_semaphore_cb_table[dev->semaphore_idx].remove)
- i2c_dw_semaphore_cb_table[dev->semaphore_idx].remove(dev);
-}
-
static int dw_i2c_plat_probe(struct platform_device *pdev)
{
u32 flags = (uintptr_t)device_get_match_data(&pdev->dev);
@@ -248,7 +239,7 @@ static int dw_i2c_plat_probe(struct platform_device *pdev)
ret = i2c_dw_probe_lock_support(dev);
if (ret) {
- ret = dev_err_probe(device, ret, "failed to probe lock support\n");
+ dev_err_probe(device, ret, "failed to probe lock support\n");
goto exit_reset;
}
@@ -339,8 +330,6 @@ static void dw_i2c_plat_remove(struct platform_device *pdev)
i2c_dw_prepare_clk(dev, false);
- i2c_dw_remove_lock_support(dev);
-
reset_control_assert(dev->rst);
}
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index 57fbec1259be..81e6e2d7ad3d 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -84,6 +84,7 @@
* Panther Lake-H (SOC) 0xe322 32 hard yes yes yes
* Panther Lake-P (SOC) 0xe422 32 hard yes yes yes
* Wildcat Lake-U (SOC) 0x4d22 32 hard yes yes yes
+ * Diamond Rapids (SOC) 0x5827 32 hard yes yes yes
*
* Features supported by this driver:
* Software PEC no
@@ -242,6 +243,7 @@
#define PCI_DEVICE_ID_INTEL_ALDER_LAKE_P_SMBUS 0x51a3
#define PCI_DEVICE_ID_INTEL_ALDER_LAKE_M_SMBUS 0x54a3
#define PCI_DEVICE_ID_INTEL_BIRCH_STREAM_SMBUS 0x5796
+#define PCI_DEVICE_ID_INTEL_DIAMOND_RAPIDS_SMBUS 0x5827
#define PCI_DEVICE_ID_INTEL_BROXTON_SMBUS 0x5ad4
#define PCI_DEVICE_ID_INTEL_ARROW_LAKE_H_SMBUS 0x7722
#define PCI_DEVICE_ID_INTEL_RAPTOR_LAKE_S_SMBUS 0x7a23
@@ -1054,6 +1056,7 @@ static const struct pci_device_id i801_ids[] = {
{ PCI_DEVICE_DATA(INTEL, METEOR_LAKE_SOC_S_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
{ PCI_DEVICE_DATA(INTEL, METEOR_LAKE_PCH_S_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
{ PCI_DEVICE_DATA(INTEL, BIRCH_STREAM_SMBUS, FEATURES_ICH5) },
+ { PCI_DEVICE_DATA(INTEL, DIAMOND_RAPIDS_SMBUS, FEATURES_ICH5) },
{ PCI_DEVICE_DATA(INTEL, ARROW_LAKE_H_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
{ PCI_DEVICE_DATA(INTEL, PANTHER_LAKE_H_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
{ PCI_DEVICE_DATA(INTEL, PANTHER_LAKE_P_SMBUS, FEATURES_ICH5 | FEATURE_TCO_CNL) },
diff --git a/drivers/i2c/busses/i2c-k1.c b/drivers/i2c/busses/i2c-k1.c
index 6b918770e612..d42c03ef5db5 100644
--- a/drivers/i2c/busses/i2c-k1.c
+++ b/drivers/i2c/busses/i2c-k1.c
@@ -158,11 +158,16 @@ static int spacemit_i2c_handle_err(struct spacemit_i2c_dev *i2c)
{
dev_dbg(i2c->dev, "i2c error status: 0x%08x\n", i2c->status);
- if (i2c->status & (SPACEMIT_SR_BED | SPACEMIT_SR_ALD)) {
+ /* Arbitration Loss Detected */
+ if (i2c->status & SPACEMIT_SR_ALD) {
spacemit_i2c_reset(i2c);
return -EAGAIN;
}
+ /* Bus Error No ACK/NAK */
+ if (i2c->status & SPACEMIT_SR_BED)
+ spacemit_i2c_reset(i2c);
+
return i2c->status & SPACEMIT_SR_ACKNAK ? -ENXIO : -EIO;
}
@@ -224,6 +229,12 @@ static void spacemit_i2c_check_bus_release(struct spacemit_i2c_dev *i2c)
}
}
+static inline void
+spacemit_i2c_clear_int_status(struct spacemit_i2c_dev *i2c, u32 mask)
+{
+ writel(mask & SPACEMIT_I2C_INT_STATUS_MASK, i2c->base + SPACEMIT_ISR);
+}
+
static void spacemit_i2c_init(struct spacemit_i2c_dev *i2c)
{
u32 val;
@@ -267,12 +278,8 @@ static void spacemit_i2c_init(struct spacemit_i2c_dev *i2c)
val = readl(i2c->base + SPACEMIT_IRCR);
val |= SPACEMIT_RCR_SDA_GLITCH_NOFIX;
writel(val, i2c->base + SPACEMIT_IRCR);
-}
-static inline void
-spacemit_i2c_clear_int_status(struct spacemit_i2c_dev *i2c, u32 mask)
-{
- writel(mask & SPACEMIT_I2C_INT_STATUS_MASK, i2c->base + SPACEMIT_ISR);
+ spacemit_i2c_clear_int_status(i2c, SPACEMIT_I2C_INT_STATUS_MASK);
}
static void spacemit_i2c_start(struct spacemit_i2c_dev *i2c)
diff --git a/drivers/i2c/busses/i2c-qcom-cci.c b/drivers/i2c/busses/i2c-qcom-cci.c
index e631d79baf14..884055df1560 100644
--- a/drivers/i2c/busses/i2c-qcom-cci.c
+++ b/drivers/i2c/busses/i2c-qcom-cci.c
@@ -783,8 +783,54 @@ static const struct cci_data cci_v2_data = {
},
};
+static const struct cci_data cci_msm8953_data = {
+ .num_masters = 2,
+ .queue_size = { 64, 16 },
+ .quirks = {
+ .max_write_len = 11,
+ .max_read_len = 12,
+ },
+ .params[I2C_MODE_STANDARD] = {
+ .thigh = 78,
+ .tlow = 114,
+ .tsu_sto = 28,
+ .tsu_sta = 28,
+ .thd_dat = 10,
+ .thd_sta = 77,
+ .tbuf = 118,
+ .scl_stretch_en = 0,
+ .trdhld = 6,
+ .tsp = 1
+ },
+ .params[I2C_MODE_FAST] = {
+ .thigh = 20,
+ .tlow = 28,
+ .tsu_sto = 21,
+ .tsu_sta = 21,
+ .thd_dat = 13,
+ .thd_sta = 18,
+ .tbuf = 32,
+ .scl_stretch_en = 0,
+ .trdhld = 6,
+ .tsp = 3
+ },
+ .params[I2C_MODE_FAST_PLUS] = {
+ .thigh = 16,
+ .tlow = 22,
+ .tsu_sto = 17,
+ .tsu_sta = 18,
+ .thd_dat = 16,
+ .thd_sta = 15,
+ .tbuf = 19,
+ .scl_stretch_en = 1,
+ .trdhld = 3,
+ .tsp = 3
+ },
+};
+
static const struct of_device_id cci_dt_match[] = {
{ .compatible = "qcom,msm8226-cci", .data = &cci_v1_data},
+ { .compatible = "qcom,msm8953-cci", .data = &cci_msm8953_data},
{ .compatible = "qcom,msm8974-cci", .data = &cci_v1_5_data},
{ .compatible = "qcom,msm8996-cci", .data = &cci_v2_data},
diff --git a/drivers/i2c/busses/i2c-stm32.c b/drivers/i2c/busses/i2c-stm32.c
index f84ec056e36d..becf8977979f 100644
--- a/drivers/i2c/busses/i2c-stm32.c
+++ b/drivers/i2c/busses/i2c-stm32.c
@@ -27,8 +27,8 @@ struct stm32_i2c_dma *stm32_i2c_dma_request(struct device *dev,
if (IS_ERR(dma->chan_tx)) {
ret = PTR_ERR(dma->chan_tx);
if (ret != -ENODEV)
- ret = dev_err_probe(dev, ret,
- "can't request DMA tx channel\n");
+ dev_err_probe(dev, ret, "can't request DMA tx channel\n");
+
goto fail_al;
}
@@ -48,8 +48,7 @@ struct stm32_i2c_dma *stm32_i2c_dma_request(struct device *dev,
if (IS_ERR(dma->chan_rx)) {
ret = PTR_ERR(dma->chan_rx);
if (ret != -ENODEV)
- ret = dev_err_probe(dev, ret,
- "can't request DMA rx channel\n");
+ dev_err_probe(dev, ret, "can't request DMA rx channel\n");
goto fail_tx;
}
diff --git a/drivers/input/misc/qnap-mcu-input.c b/drivers/input/misc/qnap-mcu-input.c
index 76e62f0816c1..3be899bfc114 100644
--- a/drivers/input/misc/qnap-mcu-input.c
+++ b/drivers/input/misc/qnap-mcu-input.c
@@ -103,7 +103,7 @@ static int qnap_mcu_input_probe(struct platform_device *pdev)
input = devm_input_allocate_device(dev);
if (!input)
- return dev_err_probe(dev, -ENOMEM, "no memory for input device\n");
+ return -ENOMEM;
idev->input = input;
idev->dev = dev;
diff --git a/drivers/input/touchscreen/cyttsp5.c b/drivers/input/touchscreen/cyttsp5.c
index 071b7c9bf566..47f4271395a6 100644
--- a/drivers/input/touchscreen/cyttsp5.c
+++ b/drivers/input/touchscreen/cyttsp5.c
@@ -923,8 +923,8 @@ static int cyttsp5_i2c_probe(struct i2c_client *client)
regmap = devm_regmap_init_i2c(client, &config);
if (IS_ERR(regmap)) {
- dev_err(&client->dev, "regmap allocation failed: %ld\n",
- PTR_ERR(regmap));
+ dev_err(&client->dev, "regmap allocation failed: %pe\n",
+ regmap);
return PTR_ERR(regmap);
}
diff --git a/drivers/input/touchscreen/ti_am335x_tsc.c b/drivers/input/touchscreen/ti_am335x_tsc.c
index 93d659ff90aa..d6edfab16770 100644
--- a/drivers/input/touchscreen/ti_am335x_tsc.c
+++ b/drivers/input/touchscreen/ti_am335x_tsc.c
@@ -389,6 +389,10 @@ static int titsc_parse_dt(struct platform_device *pdev,
dev_warn(&pdev->dev,
"invalid co-ordinate readouts, resetting it to 5\n");
ts_dev->coordinate_readouts = 5;
+ } else if (ts_dev->coordinate_readouts > 6) {
+ dev_warn(&pdev->dev,
+ "co-ordinate readouts too large, limiting to 6\n");
+ ts_dev->coordinate_readouts = 6;
}
err = of_property_read_u32(node, "ti,charge-delay",
diff --git a/drivers/input/touchscreen/zforce_ts.c b/drivers/input/touchscreen/zforce_ts.c
index df42fdf36ae3..a360749fa076 100644
--- a/drivers/input/touchscreen/zforce_ts.c
+++ b/drivers/input/touchscreen/zforce_ts.c
@@ -747,8 +747,7 @@ static int zforce_probe(struct i2c_client *client)
input_dev = devm_input_allocate_device(&client->dev);
if (!input_dev)
- return dev_err_probe(&client->dev, -ENOMEM,
- "could not allocate input device\n");
+ return -ENOMEM;
ts->client = client;
ts->input = input_dev;
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index 104aa5355090..239c1744a926 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -299,6 +299,7 @@ config DM_CRYPT
select CRYPTO
select CRYPTO_CBC
select CRYPTO_ESSIV
+ select CRYPTO_LIB_MD5 # needed by lmk IV mode
help
This device-mapper target allows you to create a device that
transparently encrypts the data on it. You'll need to activate
@@ -546,6 +547,7 @@ config DM_VERITY
depends on BLK_DEV_DM
select CRYPTO
select CRYPTO_HASH
+ select CRYPTO_LIB_SHA256
select DM_BUFIO
help
This device-mapper target creates a read-only device that
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index af345dc6fde1..82fdea7dea7a 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -1104,7 +1104,7 @@ static void detached_dev_end_io(struct bio *bio)
}
kfree(ddip);
- bio->bi_end_io(bio);
+ bio_endio(bio);
}
static void detached_dev_do_request(struct bcache_device *d, struct bio *bio,
@@ -1121,7 +1121,7 @@ static void detached_dev_do_request(struct bcache_device *d, struct bio *bio,
ddip = kzalloc(sizeof(struct detached_dev_io_private), GFP_NOIO);
if (!ddip) {
bio->bi_status = BLK_STS_RESOURCE;
- bio->bi_end_io(bio);
+ bio_endio(bio);
return;
}
@@ -1136,7 +1136,7 @@ static void detached_dev_do_request(struct bcache_device *d, struct bio *bio,
if ((bio_op(bio) == REQ_OP_DISCARD) &&
!bdev_max_discard_sectors(dc->bdev))
- bio->bi_end_io(bio);
+ detached_dev_end_io(bio);
else
submit_bio_noacct(bio);
}
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index e6d28be11c5c..5235f3e4924b 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -1374,7 +1374,7 @@ static void submit_io(struct dm_buffer *b, enum req_op op, unsigned short ioprio
{
unsigned int n_sectors;
sector_t sector;
- unsigned int offset, end;
+ unsigned int offset, end, align;
b->end_io = end_io;
@@ -1388,9 +1388,11 @@ static void submit_io(struct dm_buffer *b, enum req_op op, unsigned short ioprio
b->c->write_callback(b);
offset = b->write_start;
end = b->write_end;
- offset &= -DM_BUFIO_WRITE_ALIGN;
- end += DM_BUFIO_WRITE_ALIGN - 1;
- end &= -DM_BUFIO_WRITE_ALIGN;
+ align = max(DM_BUFIO_WRITE_ALIGN,
+ bdev_physical_block_size(b->c->bdev));
+ offset &= -align;
+ end += align - 1;
+ end &= -align;
if (unlikely(end > b->c->block_size))
end = b->c->block_size;
diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h
index a3c9f74fe2dc..1cda8618d74d 100644
--- a/drivers/md/dm-core.h
+++ b/drivers/md/dm-core.h
@@ -139,7 +139,6 @@ struct mapped_device {
struct srcu_struct io_barrier;
#ifdef CONFIG_BLK_DEV_ZONED
- unsigned int nr_zones;
void *zone_revalidate_map;
struct task_struct *revalidate_map_task;
#endif
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 5ef43231fe77..79704fbc523b 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -21,6 +21,7 @@
#include <linux/mempool.h>
#include <linux/slab.h>
#include <linux/crypto.h>
+#include <linux/fips.h>
#include <linux/workqueue.h>
#include <linux/kthread.h>
#include <linux/backing-dev.h>
@@ -120,7 +121,6 @@ struct iv_benbi_private {
#define LMK_SEED_SIZE 64 /* hash + 0 */
struct iv_lmk_private {
- struct crypto_shash *hash_tfm;
u8 *seed;
};
@@ -254,22 +254,15 @@ static unsigned int max_write_size = 0;
module_param(max_write_size, uint, 0644);
MODULE_PARM_DESC(max_write_size, "Maximum size of a write request");
-static unsigned get_max_request_sectors(struct dm_target *ti, struct bio *bio)
+static unsigned get_max_request_sectors(struct dm_target *ti, struct bio *bio, bool no_split)
{
struct crypt_config *cc = ti->private;
unsigned val, sector_align;
bool wrt = op_is_write(bio_op(bio));
- if (wrt) {
- /*
- * For zoned devices, splitting write operations creates the
- * risk of deadlocking queue freeze operations with zone write
- * plugging BIO work when the reminder of a split BIO is
- * issued. So always allow the entire BIO to proceed.
- */
- if (ti->emulate_zone_append)
- return bio_sectors(bio);
-
+ if (no_split) {
+ val = -1;
+ } else if (wrt) {
val = min_not_zero(READ_ONCE(max_write_size),
DM_CRYPT_DEFAULT_MAX_WRITE_SIZE);
} else {
@@ -465,10 +458,6 @@ static void crypt_iv_lmk_dtr(struct crypt_config *cc)
{
struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
- if (lmk->hash_tfm && !IS_ERR(lmk->hash_tfm))
- crypto_free_shash(lmk->hash_tfm);
- lmk->hash_tfm = NULL;
-
kfree_sensitive(lmk->seed);
lmk->seed = NULL;
}
@@ -483,11 +472,10 @@ static int crypt_iv_lmk_ctr(struct crypt_config *cc, struct dm_target *ti,
return -EINVAL;
}
- lmk->hash_tfm = crypto_alloc_shash("md5", 0,
- CRYPTO_ALG_ALLOCATES_MEMORY);
- if (IS_ERR(lmk->hash_tfm)) {
- ti->error = "Error initializing LMK hash";
- return PTR_ERR(lmk->hash_tfm);
+ if (fips_enabled) {
+ ti->error = "LMK support is disabled due to FIPS";
+ /* ... because it uses MD5. */
+ return -EINVAL;
}
/* No seed in LMK version 2 */
@@ -498,7 +486,6 @@ static int crypt_iv_lmk_ctr(struct crypt_config *cc, struct dm_target *ti,
lmk->seed = kzalloc(LMK_SEED_SIZE, GFP_KERNEL);
if (!lmk->seed) {
- crypt_iv_lmk_dtr(cc);
ti->error = "Error kmallocing seed storage in LMK";
return -ENOMEM;
}
@@ -514,7 +501,7 @@ static int crypt_iv_lmk_init(struct crypt_config *cc)
/* LMK seed is on the position of LMK_KEYS + 1 key */
if (lmk->seed)
memcpy(lmk->seed, cc->key + (cc->tfms_count * subkey_size),
- crypto_shash_digestsize(lmk->hash_tfm));
+ MD5_DIGEST_SIZE);
return 0;
}
@@ -529,55 +516,31 @@ static int crypt_iv_lmk_wipe(struct crypt_config *cc)
return 0;
}
-static int crypt_iv_lmk_one(struct crypt_config *cc, u8 *iv,
- struct dm_crypt_request *dmreq,
- u8 *data)
+static void crypt_iv_lmk_one(struct crypt_config *cc, u8 *iv,
+ struct dm_crypt_request *dmreq, u8 *data)
{
struct iv_lmk_private *lmk = &cc->iv_gen_private.lmk;
- SHASH_DESC_ON_STACK(desc, lmk->hash_tfm);
- union {
- struct md5_state md5state;
- u8 state[CRYPTO_MD5_STATESIZE];
- } u;
+ struct md5_ctx ctx;
__le32 buf[4];
- int i, r;
- desc->tfm = lmk->hash_tfm;
+ md5_init(&ctx);
- r = crypto_shash_init(desc);
- if (r)
- return r;
-
- if (lmk->seed) {
- r = crypto_shash_update(desc, lmk->seed, LMK_SEED_SIZE);
- if (r)
- return r;
- }
+ if (lmk->seed)
+ md5_update(&ctx, lmk->seed, LMK_SEED_SIZE);
/* Sector is always 512B, block size 16, add data of blocks 1-31 */
- r = crypto_shash_update(desc, data + 16, 16 * 31);
- if (r)
- return r;
+ md5_update(&ctx, data + 16, 16 * 31);
/* Sector is cropped to 56 bits here */
buf[0] = cpu_to_le32(dmreq->iv_sector & 0xFFFFFFFF);
buf[1] = cpu_to_le32((((u64)dmreq->iv_sector >> 32) & 0x00FFFFFF) | 0x80000000);
buf[2] = cpu_to_le32(4024);
buf[3] = 0;
- r = crypto_shash_update(desc, (u8 *)buf, sizeof(buf));
- if (r)
- return r;
+ md5_update(&ctx, (u8 *)buf, sizeof(buf));
/* No MD5 padding here */
- r = crypto_shash_export(desc, &u.md5state);
- if (r)
- return r;
-
- for (i = 0; i < MD5_HASH_WORDS; i++)
- __cpu_to_le32s(&u.md5state.hash[i]);
- memcpy(iv, &u.md5state.hash, cc->iv_size);
-
- return 0;
+ cpu_to_le32_array(ctx.state.h, ARRAY_SIZE(ctx.state.h));
+ memcpy(iv, ctx.state.h, cc->iv_size);
}
static int crypt_iv_lmk_gen(struct crypt_config *cc, u8 *iv,
@@ -585,17 +548,15 @@ static int crypt_iv_lmk_gen(struct crypt_config *cc, u8 *iv,
{
struct scatterlist *sg;
u8 *src;
- int r = 0;
if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) {
sg = crypt_get_sg_data(cc, dmreq->sg_in);
src = kmap_local_page(sg_page(sg));
- r = crypt_iv_lmk_one(cc, iv, dmreq, src + sg->offset);
+ crypt_iv_lmk_one(cc, iv, dmreq, src + sg->offset);
kunmap_local(src);
} else
memset(iv, 0, cc->iv_size);
-
- return r;
+ return 0;
}
static int crypt_iv_lmk_post(struct crypt_config *cc, u8 *iv,
@@ -603,21 +564,19 @@ static int crypt_iv_lmk_post(struct crypt_config *cc, u8 *iv,
{
struct scatterlist *sg;
u8 *dst;
- int r;
if (bio_data_dir(dmreq->ctx->bio_in) == WRITE)
return 0;
sg = crypt_get_sg_data(cc, dmreq->sg_out);
dst = kmap_local_page(sg_page(sg));
- r = crypt_iv_lmk_one(cc, iv, dmreq, dst + sg->offset);
+ crypt_iv_lmk_one(cc, iv, dmreq, dst + sg->offset);
/* Tweak the first block of plaintext sector */
- if (!r)
- crypto_xor(dst + sg->offset, iv, cc->iv_size);
+ crypto_xor(dst + sg->offset, iv, cc->iv_size);
kunmap_local(dst);
- return r;
+ return 0;
}
static void crypt_iv_tcw_dtr(struct crypt_config *cc)
@@ -1781,7 +1740,7 @@ static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone)
bio_for_each_folio_all(fi, clone) {
if (folio_test_large(fi.folio)) {
percpu_counter_sub(&cc->n_allocated_pages,
- 1 << folio_order(fi.folio));
+ folio_nr_pages(fi.folio));
folio_put(fi.folio);
} else {
mempool_free(&fi.folio->page, &cc->page_pool);
@@ -3496,6 +3455,7 @@ static int crypt_map(struct dm_target *ti, struct bio *bio)
struct dm_crypt_io *io;
struct crypt_config *cc = ti->private;
unsigned max_sectors;
+ bool no_split;
/*
* If bio is REQ_PREFLUSH or REQ_OP_DISCARD, just bypass crypt queues.
@@ -3513,10 +3473,20 @@ static int crypt_map(struct dm_target *ti, struct bio *bio)
/*
* Check if bio is too large, split as needed.
+ *
+ * For zoned devices, splitting write operations creates the
+ * risk of deadlocking queue freeze operations with zone write
+ * plugging BIO work when the reminder of a split BIO is
+ * issued. So always allow the entire BIO to proceed.
*/
- max_sectors = get_max_request_sectors(ti, bio);
- if (unlikely(bio_sectors(bio) > max_sectors))
+ no_split = (ti->emulate_zone_append && op_is_write(bio_op(bio))) ||
+ (bio->bi_opf & REQ_ATOMIC);
+ max_sectors = get_max_request_sectors(ti, bio, no_split);
+ if (unlikely(bio_sectors(bio) > max_sectors)) {
+ if (unlikely(no_split))
+ return DM_MAPIO_KILL;
dm_accept_partial_bio(bio, max_sectors);
+ }
/*
* Ensure that bio is a multiple of internal sector encryption size
@@ -3762,15 +3732,20 @@ static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits)
if (ti->emulate_zone_append)
limits->max_hw_sectors = min(limits->max_hw_sectors,
BIO_MAX_VECS << PAGE_SECTORS_SHIFT);
+
+ limits->atomic_write_hw_unit_max = min(limits->atomic_write_hw_unit_max,
+ BIO_MAX_VECS << PAGE_SHIFT);
+ limits->atomic_write_hw_max = min(limits->atomic_write_hw_max,
+ BIO_MAX_VECS << PAGE_SHIFT);
}
static struct target_type crypt_target = {
.name = "crypt",
- .version = {1, 28, 0},
+ .version = {1, 29, 0},
.module = THIS_MODULE,
.ctr = crypt_ctr,
.dtr = crypt_dtr,
- .features = DM_TARGET_ZONED_HM,
+ .features = DM_TARGET_ZONED_HM | DM_TARGET_ATOMIC_WRITES,
.report_zones = crypt_report_zones,
.map = crypt_map,
.status = crypt_status,
diff --git a/drivers/md/dm-ebs-target.c b/drivers/md/dm-ebs-target.c
index 6abb31ca9662..b354e74a670e 100644
--- a/drivers/md/dm-ebs-target.c
+++ b/drivers/md/dm-ebs-target.c
@@ -103,7 +103,7 @@ static int __ebs_rw_bvec(struct ebs_c *ec, enum req_op op, struct bio_vec *bv,
} else {
flush_dcache_page(bv->bv_page);
memcpy(ba, pa, cur_len);
- dm_bufio_mark_partial_buffer_dirty(b, buf_off, buf_off + cur_len);
+ dm_bufio_mark_buffer_dirty(b);
}
dm_bufio_release(b);
diff --git a/drivers/md/dm-exception-store.h b/drivers/md/dm-exception-store.h
index b67976637538..061b4d310813 100644
--- a/drivers/md/dm-exception-store.h
+++ b/drivers/md/dm-exception-store.h
@@ -29,7 +29,7 @@ typedef sector_t chunk_t;
* chunk within the device.
*/
struct dm_exception {
- struct hlist_bl_node hash_list;
+ struct hlist_node hash_list;
chunk_t old_chunk;
chunk_t new_chunk;
diff --git a/drivers/md/dm-log-writes.c b/drivers/md/dm-log-writes.c
index 7bb7174f8f4f..f0c84e7a5daa 100644
--- a/drivers/md/dm-log-writes.c
+++ b/drivers/md/dm-log-writes.c
@@ -432,6 +432,7 @@ static int log_writes_kthread(void *arg)
struct log_writes_c *lc = arg;
sector_t sector = 0;
+ set_freezable();
while (!kthread_should_stop()) {
bool super = false;
bool logging_enabled;
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index aaf4a0a4b0eb..c18358271618 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -131,7 +131,7 @@ static void queue_if_no_path_timeout_work(struct timer_list *t);
#define MPATHF_QUEUE_IO 0 /* Must we queue all I/O? */
#define MPATHF_QUEUE_IF_NO_PATH 1 /* Queue I/O if last path fails? */
#define MPATHF_SAVED_QUEUE_IF_NO_PATH 2 /* Saved state during suspension */
-#define MPATHF_RETAIN_ATTACHED_HW_HANDLER 3 /* If there's already a hw_handler present, don't change it. */
+/* MPATHF_RETAIN_ATTACHED_HW_HANDLER no longer has any effect */
#define MPATHF_PG_INIT_DISABLED 4 /* pg_init is not currently allowed */
#define MPATHF_PG_INIT_REQUIRED 5 /* pg_init needs calling? */
#define MPATHF_PG_INIT_DELAY_RETRY 6 /* Delay pg_init retry? */
@@ -237,16 +237,10 @@ static struct multipath *alloc_multipath(struct dm_target *ti)
static int alloc_multipath_stage2(struct dm_target *ti, struct multipath *m)
{
- if (m->queue_mode == DM_TYPE_NONE) {
+ if (m->queue_mode == DM_TYPE_NONE)
m->queue_mode = DM_TYPE_REQUEST_BASED;
- } else if (m->queue_mode == DM_TYPE_BIO_BASED) {
+ else if (m->queue_mode == DM_TYPE_BIO_BASED)
INIT_WORK(&m->process_queued_bios, process_queued_bios);
- /*
- * bio-based doesn't support any direct scsi_dh management;
- * it just discovers if a scsi_dh is attached.
- */
- set_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags);
- }
dm_table_set_type(ti->table, m->queue_mode);
@@ -887,36 +881,30 @@ static int setup_scsi_dh(struct block_device *bdev, struct multipath *m,
struct request_queue *q = bdev_get_queue(bdev);
int r;
- if (mpath_double_check_test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, m)) {
-retain:
- if (*attached_handler_name) {
- /*
- * Clear any hw_handler_params associated with a
- * handler that isn't already attached.
- */
- if (m->hw_handler_name && strcmp(*attached_handler_name, m->hw_handler_name)) {
- kfree(m->hw_handler_params);
- m->hw_handler_params = NULL;
- }
-
- /*
- * Reset hw_handler_name to match the attached handler
- *
- * NB. This modifies the table line to show the actual
- * handler instead of the original table passed in.
- */
- kfree(m->hw_handler_name);
- m->hw_handler_name = *attached_handler_name;
- *attached_handler_name = NULL;
+ if (*attached_handler_name) {
+ /*
+ * Clear any hw_handler_params associated with a
+ * handler that isn't already attached.
+ */
+ if (m->hw_handler_name && strcmp(*attached_handler_name,
+ m->hw_handler_name)) {
+ kfree(m->hw_handler_params);
+ m->hw_handler_params = NULL;
}
+
+ /*
+ * Reset hw_handler_name to match the attached handler
+ *
+ * NB. This modifies the table line to show the actual
+ * handler instead of the original table passed in.
+ */
+ kfree(m->hw_handler_name);
+ m->hw_handler_name = *attached_handler_name;
+ *attached_handler_name = NULL;
}
if (m->hw_handler_name) {
r = scsi_dh_attach(q, m->hw_handler_name);
- if (r == -EBUSY) {
- DMINFO("retaining handler on device %pg", bdev);
- goto retain;
- }
if (r < 0) {
*error = "error attaching hardware handler";
return r;
@@ -1138,7 +1126,7 @@ static int parse_features(struct dm_arg_set *as, struct multipath *m)
}
if (!strcasecmp(arg_name, "retain_attached_hw_handler")) {
- set_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags);
+ /* no longer has any effect */
continue;
}
@@ -1823,7 +1811,6 @@ static void multipath_status(struct dm_target *ti, status_type_t type,
DMEMIT("%u ", test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags) +
(m->pg_init_retries > 0) * 2 +
(m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT) * 2 +
- test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags) +
(m->queue_mode != DM_TYPE_REQUEST_BASED) * 2);
if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
@@ -1832,8 +1819,6 @@ static void multipath_status(struct dm_target *ti, status_type_t type,
DMEMIT("pg_init_retries %u ", m->pg_init_retries);
if (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT)
DMEMIT("pg_init_delay_msecs %u ", m->pg_init_delay_msecs);
- if (test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags))
- DMEMIT("retain_attached_hw_handler ");
if (m->queue_mode != DM_TYPE_REQUEST_BASED) {
switch (m->queue_mode) {
case DM_TYPE_BIO_BASED:
@@ -2307,7 +2292,7 @@ static struct target_type multipath_target = {
.name = "multipath",
.version = {1, 15, 0},
.features = DM_TARGET_SINGLETON | DM_TARGET_IMMUTABLE |
- DM_TARGET_PASSES_INTEGRITY,
+ DM_TARGET_PASSES_INTEGRITY | DM_TARGET_ATOMIC_WRITES,
.module = THIS_MODULE,
.ctr = multipath_ctr,
.dtr = multipath_dtr,
diff --git a/drivers/md/dm-pcache/cache.c b/drivers/md/dm-pcache/cache.c
index 698697a7a73c..534bf07b794f 100644
--- a/drivers/md/dm-pcache/cache.c
+++ b/drivers/md/dm-pcache/cache.c
@@ -10,7 +10,8 @@ struct kmem_cache *key_cache;
static inline struct pcache_cache_info *get_cache_info_addr(struct pcache_cache *cache)
{
- return cache->cache_info_addr + cache->info_index;
+ return (struct pcache_cache_info *)((char *)cache->cache_info_addr +
+ (size_t)cache->info_index * PCACHE_CACHE_INFO_SIZE);
}
static void cache_info_write(struct pcache_cache *cache)
@@ -21,10 +22,10 @@ static void cache_info_write(struct pcache_cache *cache)
cache_info->header.crc = pcache_meta_crc(&cache_info->header,
sizeof(struct pcache_cache_info));
+ cache->info_index = (cache->info_index + 1) % PCACHE_META_INDEX_MAX;
memcpy_flushcache(get_cache_info_addr(cache), cache_info,
sizeof(struct pcache_cache_info));
-
- cache->info_index = (cache->info_index + 1) % PCACHE_META_INDEX_MAX;
+ pmem_wmb();
}
static void cache_info_init_default(struct pcache_cache *cache);
@@ -49,6 +50,8 @@ static int cache_info_init(struct pcache_cache *cache, struct pcache_cache_optio
return -EINVAL;
}
+ cache->info_index = ((char *)cache_info_addr - (char *)cache->cache_info_addr) / PCACHE_CACHE_INFO_SIZE;
+
return 0;
}
@@ -93,10 +96,10 @@ void cache_pos_encode(struct pcache_cache *cache,
pos_onmedia.header.seq = seq;
pos_onmedia.header.crc = cache_pos_onmedia_crc(&pos_onmedia);
+ *index = (*index + 1) % PCACHE_META_INDEX_MAX;
+
memcpy_flushcache(pos_onmedia_addr, &pos_onmedia, sizeof(struct pcache_cache_pos_onmedia));
pmem_wmb();
-
- *index = (*index + 1) % PCACHE_META_INDEX_MAX;
}
int cache_pos_decode(struct pcache_cache *cache,
diff --git a/drivers/md/dm-pcache/cache_segment.c b/drivers/md/dm-pcache/cache_segment.c
index f0b58980806e..9d92e2b067ed 100644
--- a/drivers/md/dm-pcache/cache_segment.c
+++ b/drivers/md/dm-pcache/cache_segment.c
@@ -26,11 +26,11 @@ static void cache_seg_info_write(struct pcache_cache_segment *cache_seg)
seg_info->header.seq++;
seg_info->header.crc = pcache_meta_crc(&seg_info->header, sizeof(struct pcache_segment_info));
+ cache_seg->info_index = (cache_seg->info_index + 1) % PCACHE_META_INDEX_MAX;
+
seg_info_addr = get_seg_info_addr(cache_seg);
memcpy_flushcache(seg_info_addr, seg_info, sizeof(struct pcache_segment_info));
pmem_wmb();
-
- cache_seg->info_index = (cache_seg->info_index + 1) % PCACHE_META_INDEX_MAX;
mutex_unlock(&cache_seg->info_lock);
}
@@ -56,7 +56,10 @@ static int cache_seg_info_load(struct pcache_cache_segment *cache_seg)
ret = -EIO;
goto out;
}
- cache_seg->info_index = cache_seg_info_addr - cache_seg_info_addr_base;
+
+ cache_seg->info_index =
+ ((char *)cache_seg_info_addr - (char *)cache_seg_info_addr_base) /
+ PCACHE_SEG_INFO_SIZE;
out:
mutex_unlock(&cache_seg->info_lock);
@@ -129,10 +132,10 @@ static void cache_seg_ctrl_write(struct pcache_cache_segment *cache_seg)
cache_seg_gen.header.crc = pcache_meta_crc(&cache_seg_gen.header,
sizeof(struct pcache_cache_seg_gen));
+ cache_seg->gen_index = (cache_seg->gen_index + 1) % PCACHE_META_INDEX_MAX;
+
memcpy_flushcache(get_cache_seg_gen_addr(cache_seg), &cache_seg_gen, sizeof(struct pcache_cache_seg_gen));
pmem_wmb();
-
- cache_seg->gen_index = (cache_seg->gen_index + 1) % PCACHE_META_INDEX_MAX;
}
static void cache_seg_ctrl_init(struct pcache_cache_segment *cache_seg)
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index c6f7129e43d3..4bacdc499984 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -2287,6 +2287,8 @@ static int super_init_validation(struct raid_set *rs, struct md_rdev *rdev)
mddev->reshape_position = le64_to_cpu(sb->reshape_position);
rs->raid_type = get_raid_type_by_ll(mddev->level, mddev->layout);
+ if (!rs->raid_type)
+ return -EINVAL;
}
} else {
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index f40c18da4000..dbd148967de4 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -40,10 +40,15 @@ static const char dm_snapshot_merge_target_name[] = "snapshot-merge";
#define DM_TRACKED_CHUNK_HASH(x) ((unsigned long)(x) & \
(DM_TRACKED_CHUNK_HASH_SIZE - 1))
+struct dm_hlist_head {
+ struct hlist_head head;
+ spinlock_t lock;
+};
+
struct dm_exception_table {
uint32_t hash_mask;
unsigned int hash_shift;
- struct hlist_bl_head *table;
+ struct dm_hlist_head *table;
};
struct dm_snapshot {
@@ -628,8 +633,8 @@ static uint32_t exception_hash(struct dm_exception_table *et, chunk_t chunk);
/* Lock to protect access to the completed and pending exception hash tables. */
struct dm_exception_table_lock {
- struct hlist_bl_head *complete_slot;
- struct hlist_bl_head *pending_slot;
+ spinlock_t *complete_slot;
+ spinlock_t *pending_slot;
};
static void dm_exception_table_lock_init(struct dm_snapshot *s, chunk_t chunk,
@@ -638,20 +643,20 @@ static void dm_exception_table_lock_init(struct dm_snapshot *s, chunk_t chunk,
struct dm_exception_table *complete = &s->complete;
struct dm_exception_table *pending = &s->pending;
- lock->complete_slot = &complete->table[exception_hash(complete, chunk)];
- lock->pending_slot = &pending->table[exception_hash(pending, chunk)];
+ lock->complete_slot = &complete->table[exception_hash(complete, chunk)].lock;
+ lock->pending_slot = &pending->table[exception_hash(pending, chunk)].lock;
}
static void dm_exception_table_lock(struct dm_exception_table_lock *lock)
{
- hlist_bl_lock(lock->complete_slot);
- hlist_bl_lock(lock->pending_slot);
+ spin_lock_nested(lock->complete_slot, 1);
+ spin_lock_nested(lock->pending_slot, 2);
}
static void dm_exception_table_unlock(struct dm_exception_table_lock *lock)
{
- hlist_bl_unlock(lock->pending_slot);
- hlist_bl_unlock(lock->complete_slot);
+ spin_unlock(lock->pending_slot);
+ spin_unlock(lock->complete_slot);
}
static int dm_exception_table_init(struct dm_exception_table *et,
@@ -661,13 +666,15 @@ static int dm_exception_table_init(struct dm_exception_table *et,
et->hash_shift = hash_shift;
et->hash_mask = size - 1;
- et->table = kvmalloc_array(size, sizeof(struct hlist_bl_head),
+ et->table = kvmalloc_array(size, sizeof(struct dm_hlist_head),
GFP_KERNEL);
if (!et->table)
return -ENOMEM;
- for (i = 0; i < size; i++)
- INIT_HLIST_BL_HEAD(et->table + i);
+ for (i = 0; i < size; i++) {
+ INIT_HLIST_HEAD(&et->table[i].head);
+ spin_lock_init(&et->table[i].lock);
+ }
return 0;
}
@@ -675,16 +682,17 @@ static int dm_exception_table_init(struct dm_exception_table *et,
static void dm_exception_table_exit(struct dm_exception_table *et,
struct kmem_cache *mem)
{
- struct hlist_bl_head *slot;
+ struct dm_hlist_head *slot;
struct dm_exception *ex;
- struct hlist_bl_node *pos, *n;
+ struct hlist_node *pos;
int i, size;
size = et->hash_mask + 1;
for (i = 0; i < size; i++) {
slot = et->table + i;
- hlist_bl_for_each_entry_safe(ex, pos, n, slot, hash_list) {
+ hlist_for_each_entry_safe(ex, pos, &slot->head, hash_list) {
+ hlist_del(&ex->hash_list);
kmem_cache_free(mem, ex);
cond_resched();
}
@@ -700,7 +708,7 @@ static uint32_t exception_hash(struct dm_exception_table *et, chunk_t chunk)
static void dm_remove_exception(struct dm_exception *e)
{
- hlist_bl_del(&e->hash_list);
+ hlist_del(&e->hash_list);
}
/*
@@ -710,12 +718,11 @@ static void dm_remove_exception(struct dm_exception *e)
static struct dm_exception *dm_lookup_exception(struct dm_exception_table *et,
chunk_t chunk)
{
- struct hlist_bl_head *slot;
- struct hlist_bl_node *pos;
+ struct hlist_head *slot;
struct dm_exception *e;
- slot = &et->table[exception_hash(et, chunk)];
- hlist_bl_for_each_entry(e, pos, slot, hash_list)
+ slot = &et->table[exception_hash(et, chunk)].head;
+ hlist_for_each_entry(e, slot, hash_list)
if (chunk >= e->old_chunk &&
chunk <= e->old_chunk + dm_consecutive_chunk_count(e))
return e;
@@ -762,18 +769,17 @@ static void free_pending_exception(struct dm_snap_pending_exception *pe)
static void dm_insert_exception(struct dm_exception_table *eh,
struct dm_exception *new_e)
{
- struct hlist_bl_head *l;
- struct hlist_bl_node *pos;
+ struct hlist_head *l;
struct dm_exception *e = NULL;
- l = &eh->table[exception_hash(eh, new_e->old_chunk)];
+ l = &eh->table[exception_hash(eh, new_e->old_chunk)].head;
/* Add immediately if this table doesn't support consecutive chunks */
if (!eh->hash_shift)
goto out;
/* List is ordered by old_chunk */
- hlist_bl_for_each_entry(e, pos, l, hash_list) {
+ hlist_for_each_entry(e, l, hash_list) {
/* Insert after an existing chunk? */
if (new_e->old_chunk == (e->old_chunk +
dm_consecutive_chunk_count(e) + 1) &&
@@ -804,13 +810,13 @@ out:
* Either the table doesn't support consecutive chunks or slot
* l is empty.
*/
- hlist_bl_add_head(&new_e->hash_list, l);
+ hlist_add_head(&new_e->hash_list, l);
} else if (new_e->old_chunk < e->old_chunk) {
/* Add before an existing exception */
- hlist_bl_add_before(&new_e->hash_list, &e->hash_list);
+ hlist_add_before(&new_e->hash_list, &e->hash_list);
} else {
/* Add to l's tail: e is the last exception in this slot */
- hlist_bl_add_behind(&new_e->hash_list, &e->hash_list);
+ hlist_add_behind(&new_e->hash_list, &e->hash_list);
}
}
@@ -820,7 +826,6 @@ out:
*/
static int dm_add_exception(void *context, chunk_t old, chunk_t new)
{
- struct dm_exception_table_lock lock;
struct dm_snapshot *s = context;
struct dm_exception *e;
@@ -833,17 +838,7 @@ static int dm_add_exception(void *context, chunk_t old, chunk_t new)
/* Consecutive_count is implicitly initialised to zero */
e->new_chunk = new;
- /*
- * Although there is no need to lock access to the exception tables
- * here, if we don't then hlist_bl_add_head(), called by
- * dm_insert_exception(), will complain about accessing the
- * corresponding list without locking it first.
- */
- dm_exception_table_lock_init(s, old, &lock);
-
- dm_exception_table_lock(&lock);
dm_insert_exception(&s->complete, e);
- dm_exception_table_unlock(&lock);
return 0;
}
@@ -873,7 +868,7 @@ static int calc_max_buckets(void)
/* use a fixed size of 2MB */
unsigned long mem = 2 * 1024 * 1024;
- mem /= sizeof(struct hlist_bl_head);
+ mem /= sizeof(struct dm_hlist_head);
return mem;
}
diff --git a/drivers/md/dm-sysfs.c b/drivers/md/dm-sysfs.c
index bfaef27ca79f..22bc70923a83 100644
--- a/drivers/md/dm-sysfs.c
+++ b/drivers/md/dm-sysfs.c
@@ -86,17 +86,13 @@ static ssize_t dm_attr_uuid_show(struct mapped_device *md, char *buf)
static ssize_t dm_attr_suspended_show(struct mapped_device *md, char *buf)
{
- sprintf(buf, "%d\n", dm_suspended_md(md));
-
- return strlen(buf);
+ return sysfs_emit(buf, "%d\n", dm_suspended_md(md));
}
static ssize_t dm_attr_use_blk_mq_show(struct mapped_device *md, char *buf)
{
/* Purely for userspace compatibility */
- sprintf(buf, "%d\n", true);
-
- return strlen(buf);
+ return sysfs_emit(buf, "%d\n", true);
}
static DM_ATTR_RO(name);
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index ad0a60a07b93..0522cd700e0e 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -2043,6 +2043,10 @@ bool dm_table_supports_size_change(struct dm_table *t, sector_t old_size,
return true;
}
+/*
+ * This function will be skipped by noflush reloads of immutable request
+ * based devices (dm-mpath).
+ */
int dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
struct queue_limits *limits)
{
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index c84149ba4e38..52ffb495f5a8 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -395,13 +395,13 @@ static void begin_discard(struct discard_op *op, struct thin_c *tc, struct bio *
op->bio = NULL;
}
-static int issue_discard(struct discard_op *op, dm_block_t data_b, dm_block_t data_e)
+static void issue_discard(struct discard_op *op, dm_block_t data_b, dm_block_t data_e)
{
struct thin_c *tc = op->tc;
sector_t s = block_to_sectors(tc->pool, data_b);
sector_t len = block_to_sectors(tc->pool, data_e - data_b);
- return __blkdev_issue_discard(tc->pool_dev->bdev, s, len, GFP_NOIO, &op->bio);
+ __blkdev_issue_discard(tc->pool_dev->bdev, s, len, GFP_NOIO, &op->bio);
}
static void end_discard(struct discard_op *op, int r)
@@ -1113,9 +1113,7 @@ static void passdown_double_checking_shared_status(struct dm_thin_new_mapping *m
break;
}
- r = issue_discard(&op, b, e);
- if (r)
- goto out;
+ issue_discard(&op, b, e);
b = e;
}
@@ -1188,8 +1186,8 @@ static void process_prepared_discard_passdown_pt1(struct dm_thin_new_mapping *m)
struct discard_op op;
begin_discard(&op, tc, discard_parent);
- r = issue_discard(&op, m->data_block, data_end);
- end_discard(&op, r);
+ issue_discard(&op, m->data_block, data_end);
+ end_discard(&op, 0);
}
}
@@ -4383,11 +4381,8 @@ static void thin_postsuspend(struct dm_target *ti)
{
struct thin_c *tc = ti->private;
- /*
- * The dm_noflush_suspending flag has been cleared by now, so
- * unfortunately we must always run this.
- */
- noflush_work(tc, do_noflush_stop);
+ if (dm_noflush_suspending(ti))
+ noflush_work(tc, do_noflush_stop);
}
static int thin_preresume(struct dm_target *ti)
diff --git a/drivers/md/dm-vdo/action-manager.c b/drivers/md/dm-vdo/action-manager.c
index a0e5e7077d13..e3bba0b28aad 100644
--- a/drivers/md/dm-vdo/action-manager.c
+++ b/drivers/md/dm-vdo/action-manager.c
@@ -43,7 +43,7 @@ struct action {
* @actions: The two action slots.
* @current_action: The current action slot.
* @zones: The number of zones in which an action is to be applied.
- * @Scheduler: A function to schedule a default next action.
+ * @scheduler: A function to schedule a default next action.
* @get_zone_thread_id: A function to get the id of the thread on which to apply an action to a
* zone.
* @initiator_thread_id: The ID of the thread on which actions may be initiated.
diff --git a/drivers/md/dm-vdo/admin-state.c b/drivers/md/dm-vdo/admin-state.c
index 3f9dba525154..da153fef085e 100644
--- a/drivers/md/dm-vdo/admin-state.c
+++ b/drivers/md/dm-vdo/admin-state.c
@@ -149,7 +149,8 @@ const struct admin_state_code *VDO_ADMIN_STATE_RESUMING = &VDO_CODE_RESUMING;
/**
* get_next_state() - Determine the state which should be set after a given operation completes
* based on the operation and the current state.
- * @operation The operation to be started.
+ * @state: The current admin state.
+ * @operation: The operation to be started.
*
* Return: The state to set when the operation completes or NULL if the operation can not be
* started in the current state.
@@ -187,6 +188,8 @@ static const struct admin_state_code *get_next_state(const struct admin_state *s
/**
* vdo_finish_operation() - Finish the current operation.
+ * @state: The current admin state.
+ * @result: The result of the operation.
*
* Will notify the operation waiter if there is one. This method should be used for operations
* started with vdo_start_operation(). For operations which were started with vdo_start_draining(),
@@ -214,8 +217,10 @@ bool vdo_finish_operation(struct admin_state *state, int result)
/**
* begin_operation() - Begin an operation if it may be started given the current state.
- * @waiter A completion to notify when the operation is complete; may be NULL.
- * @initiator The vdo_admin_initiator_fn to call if the operation may begin; may be NULL.
+ * @state: The current admin state.
+ * @operation: The operation to be started.
+ * @waiter: A completion to notify when the operation is complete; may be NULL.
+ * @initiator: The vdo_admin_initiator_fn to call if the operation may begin; may be NULL.
*
* Return: VDO_SUCCESS or an error.
*/
@@ -259,8 +264,10 @@ static int __must_check begin_operation(struct admin_state *state,
/**
* start_operation() - Start an operation if it may be started given the current state.
- * @waiter A completion to notify when the operation is complete.
- * @initiator The vdo_admin_initiator_fn to call if the operation may begin; may be NULL.
+ * @state: The current admin state.
+ * @operation: The operation to be started.
+ * @waiter: A completion to notify when the operation is complete; may be NULL.
+ * @initiator: The vdo_admin_initiator_fn to call if the operation may begin; may be NULL.
*
* Return: true if the operation was started.
*/
@@ -274,10 +281,10 @@ static inline bool __must_check start_operation(struct admin_state *state,
/**
* check_code() - Check the result of a state validation.
- * @valid true if the code is of an appropriate type.
- * @code The code which failed to be of the correct type.
- * @what What the code failed to be, for logging.
- * @waiter The completion to notify of the error; may be NULL.
+ * @valid: True if the code is of an appropriate type.
+ * @code: The code which failed to be of the correct type.
+ * @what: What the code failed to be, for logging.
+ * @waiter: The completion to notify of the error; may be NULL.
*
* If the result failed, log an invalid state error and, if there is a waiter, notify it.
*
@@ -301,7 +308,8 @@ static bool check_code(bool valid, const struct admin_state_code *code, const ch
/**
* assert_vdo_drain_operation() - Check that an operation is a drain.
- * @waiter The completion to finish with an error if the operation is not a drain.
+ * @operation: The operation to check.
+ * @waiter: The completion to finish with an error if the operation is not a drain.
*
* Return: true if the specified operation is a drain.
*/
@@ -313,9 +321,10 @@ static bool __must_check assert_vdo_drain_operation(const struct admin_state_cod
/**
* vdo_start_draining() - Initiate a drain operation if the current state permits it.
- * @operation The type of drain to initiate.
- * @waiter The completion to notify when the drain is complete.
- * @initiator The vdo_admin_initiator_fn to call if the operation may begin; may be NULL.
+ * @state: The current admin state.
+ * @operation: The type of drain to initiate.
+ * @waiter: The completion to notify when the drain is complete.
+ * @initiator: The vdo_admin_initiator_fn to call if the operation may begin; may be NULL.
*
* Return: true if the drain was initiated, if not the waiter will be notified.
*/
@@ -345,6 +354,7 @@ bool vdo_start_draining(struct admin_state *state,
/**
* vdo_finish_draining() - Finish a drain operation if one was in progress.
+ * @state: The current admin state.
*
* Return: true if the state was draining; will notify the waiter if so.
*/
@@ -355,6 +365,8 @@ bool vdo_finish_draining(struct admin_state *state)
/**
* vdo_finish_draining_with_result() - Finish a drain operation with a status code.
+ * @state: The current admin state.
+ * @result: The result of the drain operation.
*
* Return: true if the state was draining; will notify the waiter if so.
*/
@@ -365,7 +377,8 @@ bool vdo_finish_draining_with_result(struct admin_state *state, int result)
/**
* vdo_assert_load_operation() - Check that an operation is a load.
- * @waiter The completion to finish with an error if the operation is not a load.
+ * @operation: The operation to check.
+ * @waiter: The completion to finish with an error if the operation is not a load.
*
* Return: true if the specified operation is a load.
*/
@@ -377,9 +390,10 @@ bool vdo_assert_load_operation(const struct admin_state_code *operation,
/**
* vdo_start_loading() - Initiate a load operation if the current state permits it.
- * @operation The type of load to initiate.
- * @waiter The completion to notify when the load is complete (may be NULL).
- * @initiator The vdo_admin_initiator_fn to call if the operation may begin; may be NULL.
+ * @state: The current admin state.
+ * @operation: The type of load to initiate.
+ * @waiter: The completion to notify when the load is complete; may be NULL.
+ * @initiator: The vdo_admin_initiator_fn to call if the operation may begin; may be NULL.
*
* Return: true if the load was initiated, if not the waiter will be notified.
*/
@@ -393,6 +407,7 @@ bool vdo_start_loading(struct admin_state *state,
/**
* vdo_finish_loading() - Finish a load operation if one was in progress.
+ * @state: The current admin state.
*
* Return: true if the state was loading; will notify the waiter if so.
*/
@@ -403,7 +418,8 @@ bool vdo_finish_loading(struct admin_state *state)
/**
* vdo_finish_loading_with_result() - Finish a load operation with a status code.
- * @result The result of the load operation.
+ * @state: The current admin state.
+ * @result: The result of the load operation.
*
* Return: true if the state was loading; will notify the waiter if so.
*/
@@ -414,7 +430,8 @@ bool vdo_finish_loading_with_result(struct admin_state *state, int result)
/**
* assert_vdo_resume_operation() - Check whether an admin_state_code is a resume operation.
- * @waiter The completion to notify if the operation is not a resume operation; may be NULL.
+ * @operation: The operation to check.
+ * @waiter: The completion to notify if the operation is not a resume operation; may be NULL.
*
* Return: true if the code is a resume operation.
*/
@@ -427,9 +444,10 @@ static bool __must_check assert_vdo_resume_operation(const struct admin_state_co
/**
* vdo_start_resuming() - Initiate a resume operation if the current state permits it.
- * @operation The type of resume to start.
- * @waiter The completion to notify when the resume is complete (may be NULL).
- * @initiator The vdo_admin_initiator_fn to call if the operation may begin; may be NULL.
+ * @state: The current admin state.
+ * @operation: The type of resume to start.
+ * @waiter: The completion to notify when the resume is complete; may be NULL.
+ * @initiator: The vdo_admin_initiator_fn to call if the operation may begin; may be NULL.
*
* Return: true if the resume was initiated, if not the waiter will be notified.
*/
@@ -443,6 +461,7 @@ bool vdo_start_resuming(struct admin_state *state,
/**
* vdo_finish_resuming() - Finish a resume operation if one was in progress.
+ * @state: The current admin state.
*
* Return: true if the state was resuming; will notify the waiter if so.
*/
@@ -453,7 +472,8 @@ bool vdo_finish_resuming(struct admin_state *state)
/**
* vdo_finish_resuming_with_result() - Finish a resume operation with a status code.
- * @result The result of the resume operation.
+ * @state: The current admin state.
+ * @result: The result of the resume operation.
*
* Return: true if the state was resuming; will notify the waiter if so.
*/
@@ -465,6 +485,7 @@ bool vdo_finish_resuming_with_result(struct admin_state *state, int result)
/**
* vdo_resume_if_quiescent() - Change the state to normal operation if the current state is
* quiescent.
+ * @state: The current admin state.
*
* Return: VDO_SUCCESS if the state resumed, VDO_INVALID_ADMIN_STATE otherwise.
*/
@@ -479,6 +500,8 @@ int vdo_resume_if_quiescent(struct admin_state *state)
/**
* vdo_start_operation() - Attempt to start an operation.
+ * @state: The current admin state.
+ * @operation: The operation to attempt to start.
*
* Return: VDO_SUCCESS if the operation was started, VDO_INVALID_ADMIN_STATE if not
*/
@@ -490,8 +513,10 @@ int vdo_start_operation(struct admin_state *state,
/**
* vdo_start_operation_with_waiter() - Attempt to start an operation.
- * @waiter the completion to notify when the operation completes or fails to start; may be NULL.
- * @initiator The vdo_admin_initiator_fn to call if the operation may begin; may be NULL.
+ * @state: The current admin state.
+ * @operation: The operation to attempt to start.
+ * @waiter: The completion to notify when the operation completes or fails to start; may be NULL.
+ * @initiator: The vdo_admin_initiator_fn to call if the operation may begin; may be NULL.
*
* Return: VDO_SUCCESS if the operation was started, VDO_INVALID_ADMIN_STATE if not
*/
diff --git a/drivers/md/dm-vdo/block-map.c b/drivers/md/dm-vdo/block-map.c
index baf683cabb1b..a7db5b41155e 100644
--- a/drivers/md/dm-vdo/block-map.c
+++ b/drivers/md/dm-vdo/block-map.c
@@ -174,6 +174,7 @@ static inline struct vdo_page_completion *page_completion_from_waiter(struct vdo
/**
* initialize_info() - Initialize all page info structures and put them on the free list.
+ * @cache: The page cache.
*
* Return: VDO_SUCCESS or an error.
*/
@@ -209,6 +210,7 @@ static int initialize_info(struct vdo_page_cache *cache)
/**
* allocate_cache_components() - Allocate components of the cache which require their own
* allocation.
+ * @cache: The page cache.
*
* The caller is responsible for all clean up on errors.
*
@@ -238,6 +240,8 @@ static int __must_check allocate_cache_components(struct vdo_page_cache *cache)
/**
* assert_on_cache_thread() - Assert that a function has been called on the VDO page cache's
* thread.
+ * @cache: The page cache.
+ * @function_name: The funtion name to report if the assertion fails.
*/
static inline void assert_on_cache_thread(struct vdo_page_cache *cache,
const char *function_name)
@@ -271,6 +275,7 @@ static void report_cache_pressure(struct vdo_page_cache *cache)
/**
* get_page_state_name() - Return the name of a page state.
+ * @state: The page state to describe.
*
* If the page state is invalid a static string is returned and the invalid state is logged.
*
@@ -342,6 +347,8 @@ static void update_lru(struct page_info *info)
/**
* set_info_state() - Set the state of a page_info and put it on the right list, adjusting
* counters.
+ * @info: The page info to update.
+ * @new_state: The new state to set.
*/
static void set_info_state(struct page_info *info, enum vdo_page_buffer_state new_state)
{
@@ -416,6 +423,7 @@ static int reset_page_info(struct page_info *info)
/**
* find_free_page() - Find a free page.
+ * @cache: The page cache.
*
* Return: A pointer to the page info structure (if found), NULL otherwise.
*/
@@ -433,6 +441,7 @@ static struct page_info * __must_check find_free_page(struct vdo_page_cache *cac
/**
* find_page() - Find the page info (if any) associated with a given pbn.
+ * @cache: The page cache.
* @pbn: The absolute physical block number of the page.
*
* Return: The page info for the page if available, or NULL if not.
@@ -449,6 +458,7 @@ static struct page_info * __must_check find_page(struct vdo_page_cache *cache,
/**
* select_lru_page() - Determine which page is least recently used.
+ * @cache: The page cache.
*
* Picks the least recently used from among the non-busy entries at the front of each of the lru
* list. Since whenever we mark a page busy we also put it to the end of the list it is unlikely
@@ -523,6 +533,8 @@ static void complete_waiter_with_page(struct vdo_waiter *waiter, void *page_info
/**
* distribute_page_over_waitq() - Complete a waitq of VDO page completions with a page result.
+ * @info: The loaded page info.
+ * @waitq: The list of waiting data_vios.
*
* Upon completion the waitq will be empty.
*
@@ -548,7 +560,9 @@ static unsigned int distribute_page_over_waitq(struct page_info *info,
/**
* set_persistent_error() - Set a persistent error which all requests will receive in the future.
+ * @cache: The page cache.
* @context: A string describing what triggered the error.
+ * @result: The error result to set on the cache.
*
* Once triggered, all enqueued completions will get this error. Any future requests will result in
* this error as well.
@@ -581,6 +595,7 @@ static void set_persistent_error(struct vdo_page_cache *cache, const char *conte
/**
* validate_completed_page() - Check that a page completion which is being freed to the cache
* referred to a valid page and is in a valid state.
+ * @completion: The page completion to check.
* @writable: Whether a writable page is required.
*
* Return: VDO_SUCCESS if the page was valid, otherwise as error
@@ -758,6 +773,8 @@ static void load_cache_page_endio(struct bio *bio)
/**
* launch_page_load() - Begin the process of loading a page.
+ * @info: The page info to launch.
+ * @pbn: The absolute physical block number of the page to load.
*
* Return: VDO_SUCCESS or an error code.
*/
@@ -836,6 +853,7 @@ static void save_pages(struct vdo_page_cache *cache)
/**
* schedule_page_save() - Add a page to the outgoing list of pages waiting to be saved.
+ * @info: The page info to save.
*
* Once in the list, a page may not be used until it has been written out.
*/
@@ -854,6 +872,7 @@ static void schedule_page_save(struct page_info *info)
/**
* launch_page_save() - Add a page to outgoing pages waiting to be saved, and then start saving
* pages if another save is not in progress.
+ * @info: The page info to save.
*/
static void launch_page_save(struct page_info *info)
{
@@ -864,6 +883,7 @@ static void launch_page_save(struct page_info *info)
/**
* completion_needs_page() - Determine whether a given vdo_page_completion (as a waiter) is
* requesting a given page number.
+ * @waiter: The page completion waiter to check.
* @context: A pointer to the pbn of the desired page.
*
* Implements waiter_match_fn.
@@ -880,6 +900,7 @@ static bool completion_needs_page(struct vdo_waiter *waiter, void *context)
/**
* allocate_free_page() - Allocate a free page to the first completion in the waiting queue, and
* any other completions that match it in page number.
+ * @info: The page info to allocate a page for.
*/
static void allocate_free_page(struct page_info *info)
{
@@ -925,6 +946,7 @@ static void allocate_free_page(struct page_info *info)
/**
* discard_a_page() - Begin the process of discarding a page.
+ * @cache: The page cache.
*
* If no page is discardable, increments a count of deferred frees so that the next release of a
* page which is no longer busy will kick off another discard cycle. This is an indication that the
@@ -955,10 +977,6 @@ static void discard_a_page(struct vdo_page_cache *cache)
launch_page_save(info);
}
-/**
- * discard_page_for_completion() - Helper used to trigger a discard so that the completion can get
- * a different page.
- */
static void discard_page_for_completion(struct vdo_page_completion *vdo_page_comp)
{
struct vdo_page_cache *cache = vdo_page_comp->cache;
@@ -1132,6 +1150,7 @@ static void write_pages(struct vdo_completion *flush_completion)
/**
* vdo_release_page_completion() - Release a VDO Page Completion.
+ * @completion: The page completion to release.
*
* The page referenced by this completion (if any) will no longer be held busy by this completion.
* If a page becomes discardable and there are completions awaiting free pages then a new round of
@@ -1172,10 +1191,6 @@ void vdo_release_page_completion(struct vdo_completion *completion)
}
}
-/**
- * load_page_for_completion() - Helper function to load a page as described by a VDO Page
- * Completion.
- */
static void load_page_for_completion(struct page_info *info,
struct vdo_page_completion *vdo_page_comp)
{
@@ -1319,6 +1334,7 @@ int vdo_get_cached_page(struct vdo_completion *completion,
/**
* vdo_invalidate_page_cache() - Invalidate all entries in the VDO page cache.
+ * @cache: The page cache.
*
* There must not be any dirty pages in the cache.
*
@@ -1345,6 +1361,10 @@ int vdo_invalidate_page_cache(struct vdo_page_cache *cache)
/**
* get_tree_page_by_index() - Get the tree page for a given height and page index.
+ * @forest: The block map forest.
+ * @root_index: The root index of the tree to search.
+ * @height: The height in the tree.
+ * @page_index: The page index.
*
* Return: The requested page.
*/
@@ -2211,6 +2231,7 @@ static void allocate_block_map_page(struct block_map_zone *zone,
/**
* vdo_find_block_map_slot() - Find the block map slot in which the block map entry for a data_vio
* resides and cache that result in the data_vio.
+ * @data_vio: The data vio.
*
* All ancestors in the tree will be allocated or loaded, as needed.
*/
@@ -2435,6 +2456,7 @@ static void deforest(struct forest *forest, size_t first_page_segment)
/**
* make_forest() - Make a collection of trees for a block_map, expanding the existing forest if
* there is one.
+ * @map: The block map.
* @entries: The number of entries the block map will hold.
*
* Return: VDO_SUCCESS or an error.
@@ -2476,6 +2498,7 @@ static int make_forest(struct block_map *map, block_count_t entries)
/**
* replace_forest() - Replace a block_map's forest with the already-prepared larger forest.
+ * @map: The block map.
*/
static void replace_forest(struct block_map *map)
{
@@ -2492,6 +2515,7 @@ static void replace_forest(struct block_map *map)
/**
* finish_cursor() - Finish the traversal of a single tree. If it was the last cursor, finish the
* traversal.
+ * @cursor: The cursor to complete.
*/
static void finish_cursor(struct cursor *cursor)
{
@@ -2549,6 +2573,7 @@ static void traversal_endio(struct bio *bio)
/**
* traverse() - Traverse a single block map tree.
+ * @cursor: A cursor tracking traversal progress.
*
* This is the recursive heart of the traversal process.
*/
@@ -2619,6 +2644,7 @@ static void traverse(struct cursor *cursor)
/**
* launch_cursor() - Start traversing a single block map tree now that the cursor has a VIO with
* which to load pages.
+ * @waiter: The parent of the cursor to launch.
* @context: The pooled_vio just acquired.
*
* Implements waiter_callback_fn.
@@ -2636,6 +2662,8 @@ static void launch_cursor(struct vdo_waiter *waiter, void *context)
/**
* compute_boundary() - Compute the number of pages used at each level of the given root's tree.
+ * @map: The block map.
+ * @root_index: The tree root index.
*
* Return: The list of page counts as a boundary structure.
*/
@@ -2668,6 +2696,7 @@ static struct boundary compute_boundary(struct block_map *map, root_count_t root
/**
* vdo_traverse_forest() - Walk the entire forest of a block map.
+ * @map: The block map.
* @callback: A function to call with the pbn of each allocated node in the forest.
* @completion: The completion to notify on each traversed PBN, and when traversal completes.
*/
@@ -2707,6 +2736,9 @@ void vdo_traverse_forest(struct block_map *map, vdo_entry_callback_fn callback,
/**
* initialize_block_map_zone() - Initialize the per-zone portions of the block map.
+ * @map: The block map.
+ * @zone_number: The zone to initialize.
+ * @cache_size: The total block map cache size.
* @maximum_age: The number of journal blocks before a dirtied page is considered old and must be
* written out.
*/
@@ -3091,6 +3123,7 @@ static void fetch_mapping_page(struct data_vio *data_vio, bool modifiable,
/**
* clear_mapped_location() - Clear a data_vio's mapped block location, setting it to be unmapped.
+ * @data_vio: The data vio.
*
* This indicates the block map entry for the logical block is either unmapped or corrupted.
*/
@@ -3104,6 +3137,8 @@ static void clear_mapped_location(struct data_vio *data_vio)
/**
* set_mapped_location() - Decode and validate a block map entry, and set the mapped location of a
* data_vio.
+ * @data_vio: The data vio.
+ * @entry: The new mapped entry to set.
*
* Return: VDO_SUCCESS or VDO_BAD_MAPPING if the map entry is invalid or an error code for any
* other failure
diff --git a/drivers/md/dm-vdo/completion.c b/drivers/md/dm-vdo/completion.c
index 5ad85334632d..2f00acbb3b2b 100644
--- a/drivers/md/dm-vdo/completion.c
+++ b/drivers/md/dm-vdo/completion.c
@@ -65,6 +65,8 @@ static inline void assert_incomplete(struct vdo_completion *completion)
/**
* vdo_set_completion_result() - Set the result of a completion.
+ * @completion: The completion to update.
+ * @result: The result to set.
*
* Older errors will not be masked.
*/
@@ -77,6 +79,7 @@ void vdo_set_completion_result(struct vdo_completion *completion, int result)
/**
* vdo_launch_completion_with_priority() - Run or enqueue a completion.
+ * @completion: The completion to launch.
* @priority: The priority at which to enqueue the completion.
*
* If called on the correct thread (i.e. the one specified in the completion's callback_thread_id
@@ -125,6 +128,8 @@ void vdo_enqueue_completion(struct vdo_completion *completion,
/**
* vdo_requeue_completion_if_needed() - Requeue a completion if not called on the specified thread.
+ * @completion: The completion to requeue.
+ * @callback_thread_id: The thread on which to requeue the completion.
*
* Return: True if the completion was requeued; callers may not access the completion in this case.
*/
diff --git a/drivers/md/dm-vdo/data-vio.c b/drivers/md/dm-vdo/data-vio.c
index 262e11581f2d..3333e1e5b02e 100644
--- a/drivers/md/dm-vdo/data-vio.c
+++ b/drivers/md/dm-vdo/data-vio.c
@@ -227,6 +227,7 @@ static inline u64 get_arrival_time(struct bio *bio)
/**
* check_for_drain_complete_locked() - Check whether a data_vio_pool has no outstanding data_vios
* or waiters while holding the pool's lock.
+ * @pool: The data_vio pool.
*/
static bool check_for_drain_complete_locked(struct data_vio_pool *pool)
{
@@ -387,6 +388,7 @@ struct data_vio_compression_status advance_data_vio_compression_stage(struct dat
/**
* cancel_data_vio_compression() - Prevent this data_vio from being compressed or packed.
+ * @data_vio: The data_vio.
*
* Return: true if the data_vio is in the packer and the caller was the first caller to cancel it.
*/
@@ -483,6 +485,8 @@ static void attempt_logical_block_lock(struct vdo_completion *completion)
/**
* launch_data_vio() - (Re)initialize a data_vio to have a new logical block number, keeping the
* same parent and other state and send it on its way.
+ * @data_vio: The data_vio to launch.
+ * @lbn: The logical block number.
*/
static void launch_data_vio(struct data_vio *data_vio, logical_block_number_t lbn)
{
@@ -641,6 +645,7 @@ static void update_limiter(struct limiter *limiter)
/**
* schedule_releases() - Ensure that release processing is scheduled.
+ * @pool: The data_vio pool.
*
* If this call switches the state to processing, enqueue. Otherwise, some other thread has already
* done so.
@@ -768,6 +773,8 @@ static void initialize_limiter(struct limiter *limiter, struct data_vio_pool *po
/**
* initialize_data_vio() - Allocate the components of a data_vio.
+ * @data_vio: The data_vio to initialize.
+ * @vdo: The vdo containing the data_vio.
*
* The caller is responsible for cleaning up the data_vio on error.
*
@@ -880,6 +887,7 @@ int make_data_vio_pool(struct vdo *vdo, data_vio_count_t pool_size,
/**
* free_data_vio_pool() - Free a data_vio_pool and the data_vios in it.
+ * @pool: The data_vio pool to free.
*
* All data_vios must be returned to the pool before calling this function.
*/
@@ -944,6 +952,8 @@ static void wait_permit(struct limiter *limiter, struct bio *bio)
/**
* vdo_launch_bio() - Acquire a data_vio from the pool, assign the bio to it, and launch it.
+ * @pool: The data_vio pool.
+ * @bio: The bio to launch.
*
* This will block if data_vios or discard permits are not available.
*/
@@ -994,6 +1004,7 @@ static void assert_on_vdo_cpu_thread(const struct vdo *vdo, const char *name)
/**
* drain_data_vio_pool() - Wait asynchronously for all data_vios to be returned to the pool.
+ * @pool: The data_vio pool.
* @completion: The completion to notify when the pool has drained.
*/
void drain_data_vio_pool(struct data_vio_pool *pool, struct vdo_completion *completion)
@@ -1005,6 +1016,7 @@ void drain_data_vio_pool(struct data_vio_pool *pool, struct vdo_completion *comp
/**
* resume_data_vio_pool() - Resume a data_vio pool.
+ * @pool: The data_vio pool.
* @completion: The completion to notify when the pool has resumed.
*/
void resume_data_vio_pool(struct data_vio_pool *pool, struct vdo_completion *completion)
@@ -1024,6 +1036,7 @@ static void dump_limiter(const char *name, struct limiter *limiter)
/**
* dump_data_vio_pool() - Dump a data_vio pool to the log.
+ * @pool: The data_vio pool.
* @dump_vios: Whether to dump the details of each busy data_vio as well.
*/
void dump_data_vio_pool(struct data_vio_pool *pool, bool dump_vios)
@@ -1114,6 +1127,7 @@ static void perform_cleanup_stage(struct data_vio *data_vio,
/**
* release_allocated_lock() - Release the PBN lock and/or the reference on the allocated block at
* the end of processing a data_vio.
+ * @completion: The data_vio holding the lock.
*/
static void release_allocated_lock(struct vdo_completion *completion)
{
@@ -1194,6 +1208,7 @@ static void transfer_lock(struct data_vio *data_vio, struct lbn_lock *lock)
/**
* release_logical_lock() - Release the logical block lock and flush generation lock at the end of
* processing a data_vio.
+ * @completion: The data_vio holding the lock.
*/
static void release_logical_lock(struct vdo_completion *completion)
{
@@ -1228,6 +1243,7 @@ static void clean_hash_lock(struct vdo_completion *completion)
/**
* finish_cleanup() - Make some assertions about a data_vio which has finished cleaning up.
+ * @data_vio: The data_vio.
*
* If it is part of a multi-block discard, starts on the next block, otherwise, returns it to the
* pool.
@@ -1342,6 +1358,7 @@ void handle_data_vio_error(struct vdo_completion *completion)
/**
* get_data_vio_operation_name() - Get the name of the last asynchronous operation performed on a
* data_vio.
+ * @data_vio: The data_vio.
*/
const char *get_data_vio_operation_name(struct data_vio *data_vio)
{
@@ -1355,7 +1372,7 @@ const char *get_data_vio_operation_name(struct data_vio *data_vio)
/**
* data_vio_allocate_data_block() - Allocate a data block.
- *
+ * @data_vio: The data_vio.
* @write_lock_type: The type of write lock to obtain on the block.
* @callback: The callback which will attempt an allocation in the current zone and continue if it
* succeeds.
@@ -1379,6 +1396,7 @@ void data_vio_allocate_data_block(struct data_vio *data_vio,
/**
* release_data_vio_allocation_lock() - Release the PBN lock on a data_vio's allocated block.
+ * @data_vio: The data_vio.
* @reset: If true, the allocation will be reset (i.e. any allocated pbn will be forgotten).
*
* If the reference to the locked block is still provisional, it will be released as well.
@@ -1399,6 +1417,7 @@ void release_data_vio_allocation_lock(struct data_vio *data_vio, bool reset)
/**
* uncompress_data_vio() - Uncompress the data a data_vio has just read.
+ * @data_vio: The data_vio.
* @mapping_state: The mapping state indicating which fragment to decompress.
* @buffer: The buffer to receive the uncompressed data.
*/
@@ -1519,6 +1538,7 @@ static void complete_zero_read(struct vdo_completion *completion)
/**
* read_block() - Read a block asynchronously.
+ * @completion: The data_vio doing the read.
*
* This is the callback registered in read_block_mapping().
*/
@@ -1675,6 +1695,7 @@ static void journal_remapping(struct vdo_completion *completion)
/**
* read_old_block_mapping() - Get the previous PBN/LBN mapping of an in-progress write.
+ * @completion: The data_vio doing the read.
*
* Gets the previous PBN mapped to this LBN from the block map, so as to make an appropriate
* journal entry referencing the removal of this LBN->PBN mapping.
@@ -1704,6 +1725,7 @@ void update_metadata_for_data_vio_write(struct data_vio *data_vio, struct pbn_lo
/**
* pack_compressed_data() - Attempt to pack the compressed data_vio into a block.
+ * @completion: The data_vio.
*
* This is the callback registered in launch_compress_data_vio().
*/
@@ -1725,6 +1747,7 @@ static void pack_compressed_data(struct vdo_completion *completion)
/**
* compress_data_vio() - Do the actual work of compressing the data on a CPU queue.
+ * @completion: The data_vio.
*
* This callback is registered in launch_compress_data_vio().
*/
@@ -1754,6 +1777,7 @@ static void compress_data_vio(struct vdo_completion *completion)
/**
* launch_compress_data_vio() - Continue a write by attempting to compress the data.
+ * @data_vio: The data_vio.
*
* This is a re-entry point to vio_write used by hash locks.
*/
@@ -1796,7 +1820,8 @@ void launch_compress_data_vio(struct data_vio *data_vio)
/**
* hash_data_vio() - Hash the data in a data_vio and set the hash zone (which also flags the record
* name as set).
-
+ * @completion: The data_vio.
+ *
* This callback is registered in prepare_for_dedupe().
*/
static void hash_data_vio(struct vdo_completion *completion)
@@ -1832,6 +1857,7 @@ static void prepare_for_dedupe(struct data_vio *data_vio)
/**
* write_bio_finished() - This is the bio_end_io function registered in write_block() to be called
* when a data_vio's write to the underlying storage has completed.
+ * @bio: The bio to update.
*/
static void write_bio_finished(struct bio *bio)
{
@@ -1884,6 +1910,7 @@ void write_data_vio(struct data_vio *data_vio)
/**
* acknowledge_write_callback() - Acknowledge a write to the requestor.
+ * @completion: The data_vio.
*
* This callback is registered in allocate_block() and continue_write_with_block_map_slot().
*/
@@ -1909,6 +1936,7 @@ static void acknowledge_write_callback(struct vdo_completion *completion)
/**
* allocate_block() - Attempt to allocate a block in the current allocation zone.
+ * @completion: The data_vio.
*
* This callback is registered in continue_write_with_block_map_slot().
*/
@@ -1941,6 +1969,7 @@ static void allocate_block(struct vdo_completion *completion)
/**
* handle_allocation_error() - Handle an error attempting to allocate a block.
+ * @completion: The data_vio.
*
* This error handler is registered in continue_write_with_block_map_slot().
*/
@@ -1970,6 +1999,7 @@ static int assert_is_discard(struct data_vio *data_vio)
/**
* continue_data_vio_with_block_map_slot() - Read the data_vio's mapping from the block map.
+ * @completion: The data_vio to continue.
*
* This callback is registered in launch_read_data_vio().
*/
diff --git a/drivers/md/dm-vdo/dedupe.c b/drivers/md/dm-vdo/dedupe.c
index 4d983092a152..75a26f3f4461 100644
--- a/drivers/md/dm-vdo/dedupe.c
+++ b/drivers/md/dm-vdo/dedupe.c
@@ -917,6 +917,8 @@ static int __must_check acquire_lock(struct hash_zone *zone,
/**
* enter_forked_lock() - Bind the data_vio to a new hash lock.
+ * @waiter: The data_vio's waiter link.
+ * @context: The new hash lock.
*
* Implements waiter_callback_fn. Binds the data_vio that was waiting to a new hash lock and waits
* on that lock.
@@ -971,7 +973,7 @@ static void fork_hash_lock(struct hash_lock *old_lock, struct data_vio *new_agen
* path.
* @lock: The hash lock.
* @data_vio: The data_vio to deduplicate using the hash lock.
- * @has_claim: true if the data_vio already has claimed an increment from the duplicate lock.
+ * @has_claim: True if the data_vio already has claimed an increment from the duplicate lock.
*
* If no increments are available, this will roll over to a new hash lock and launch the data_vio
* as the writing agent for that lock.
@@ -996,7 +998,7 @@ static void launch_dedupe(struct hash_lock *lock, struct data_vio *data_vio,
* true copy of their data on disk.
* @lock: The hash lock.
* @agent: The data_vio acting as the agent for the lock.
- * @agent_is_done: true only if the agent has already written or deduplicated against its data.
+ * @agent_is_done: True only if the agent has already written or deduplicated against its data.
*
* If the agent itself needs to deduplicate, an increment for it must already have been claimed
* from the duplicate lock, ensuring the hash lock will still have a data_vio holding it.
@@ -2146,8 +2148,8 @@ static void start_expiration_timer(struct dedupe_context *context)
/**
* report_dedupe_timeouts() - Record and eventually report that some dedupe requests reached their
* expiration time without getting answers, so we timed them out.
- * @zones: the hash zones.
- * @timeouts: the number of newly timed out requests.
+ * @zones: The hash zones.
+ * @timeouts: The number of newly timed out requests.
*/
static void report_dedupe_timeouts(struct hash_zones *zones, unsigned int timeouts)
{
@@ -2509,6 +2511,8 @@ static void initiate_suspend_index(struct admin_state *state)
/**
* suspend_index() - Suspend the UDS index prior to draining hash zones.
+ * @context: Not used.
+ * @completion: The completion for the suspend operation.
*
* Implements vdo_action_preamble_fn
*/
@@ -2521,21 +2525,13 @@ static void suspend_index(void *context, struct vdo_completion *completion)
initiate_suspend_index);
}
-/**
- * initiate_drain() - Initiate a drain.
- *
- * Implements vdo_admin_initiator_fn.
- */
+/** Implements vdo_admin_initiator_fn. */
static void initiate_drain(struct admin_state *state)
{
check_for_drain_complete(container_of(state, struct hash_zone, state));
}
-/**
- * drain_hash_zone() - Drain a hash zone.
- *
- * Implements vdo_zone_action_fn.
- */
+/** Implements vdo_zone_action_fn. */
static void drain_hash_zone(void *context, zone_count_t zone_number,
struct vdo_completion *parent)
{
@@ -2572,6 +2568,8 @@ static void launch_dedupe_state_change(struct hash_zones *zones)
/**
* resume_index() - Resume the UDS index prior to resuming hash zones.
+ * @context: Not used.
+ * @parent: The completion for the resume operation.
*
* Implements vdo_action_preamble_fn
*/
@@ -2602,11 +2600,7 @@ static void resume_index(void *context, struct vdo_completion *parent)
vdo_finish_completion(parent);
}
-/**
- * resume_hash_zone() - Resume a hash zone.
- *
- * Implements vdo_zone_action_fn.
- */
+/** Implements vdo_zone_action_fn. */
static void resume_hash_zone(void *context, zone_count_t zone_number,
struct vdo_completion *parent)
{
@@ -2634,7 +2628,7 @@ void vdo_resume_hash_zones(struct hash_zones *zones, struct vdo_completion *pare
/**
* get_hash_zone_statistics() - Add the statistics for this hash zone to the tally for all zones.
* @zone: The hash zone to query.
- * @tally: The tally
+ * @tally: The tally.
*/
static void get_hash_zone_statistics(const struct hash_zone *zone,
struct hash_lock_statistics *tally)
@@ -2680,8 +2674,8 @@ static void get_index_statistics(struct hash_zones *zones,
/**
* vdo_get_dedupe_statistics() - Tally the statistics from all the hash zones and the UDS index.
- * @zones: The hash zones to query
- * @stats: A structure to store the statistics
+ * @zones: The hash zones to query.
+ * @stats: A structure to store the statistics.
*
* Return: The sum of the hash lock statistics from all hash zones plus the statistics from the UDS
* index
@@ -2856,9 +2850,9 @@ void vdo_set_dedupe_index_min_timer_interval(unsigned int value)
/**
* acquire_context() - Acquire a dedupe context from a hash_zone if any are available.
- * @zone: the hash zone
+ * @zone: The hash zone.
*
- * Return: A dedupe_context or NULL if none are available
+ * Return: A dedupe_context or NULL if none are available.
*/
static struct dedupe_context * __must_check acquire_context(struct hash_zone *zone)
{
diff --git a/drivers/md/dm-vdo/dm-vdo-target.c b/drivers/md/dm-vdo/dm-vdo-target.c
index 0e04c2021682..6af40d40f255 100644
--- a/drivers/md/dm-vdo/dm-vdo-target.c
+++ b/drivers/md/dm-vdo/dm-vdo-target.c
@@ -1144,6 +1144,7 @@ static bool vdo_uses_device(struct vdo *vdo, const void *context)
/**
* get_thread_id_for_phase() - Get the thread id for the current phase of the admin operation in
* progress.
+ * @vdo: The vdo.
*/
static thread_id_t __must_check get_thread_id_for_phase(struct vdo *vdo)
{
@@ -1188,9 +1189,9 @@ static struct vdo_completion *prepare_admin_completion(struct vdo *vdo,
/**
* advance_phase() - Increment the phase of the current admin operation and prepare the admin
* completion to run on the thread for the next phase.
- * @vdo: The on which an admin operation is being performed
+ * @vdo: The vdo on which an admin operation is being performed.
*
- * Return: The current phase
+ * Return: The current phase.
*/
static u32 advance_phase(struct vdo *vdo)
{
diff --git a/drivers/md/dm-vdo/encodings.c b/drivers/md/dm-vdo/encodings.c
index b7cc0f41caca..dd59691be840 100644
--- a/drivers/md/dm-vdo/encodings.c
+++ b/drivers/md/dm-vdo/encodings.c
@@ -432,7 +432,10 @@ static void encode_block_map_state_2_0(u8 *buffer, size_t *offset,
/**
* vdo_compute_new_forest_pages() - Compute the number of pages which must be allocated at each
* level in order to grow the forest to a new number of entries.
+ * @root_count: The number of block map roots.
+ * @old_sizes: The sizes of the old tree segments.
* @entries: The new number of entries the block map must address.
+ * @new_sizes: The sizes of the new tree segments.
*
* Return: The total number of non-leaf pages required.
*/
@@ -462,6 +465,9 @@ block_count_t vdo_compute_new_forest_pages(root_count_t root_count,
/**
* encode_recovery_journal_state_7_0() - Encode the state of a recovery journal.
+ * @buffer: A buffer to store the encoding.
+ * @offset: The offset in the buffer at which to encode.
+ * @state: The recovery journal state to encode.
*
* Return: VDO_SUCCESS or an error code.
*/
@@ -484,6 +490,7 @@ static void encode_recovery_journal_state_7_0(u8 *buffer, size_t *offset,
/**
* decode_recovery_journal_state_7_0() - Decode the state of a recovery journal saved in a buffer.
* @buffer: The buffer containing the saved state.
+ * @offset: The offset to start decoding from.
* @state: A pointer to a recovery journal state to hold the result of a successful decode.
*
* Return: VDO_SUCCESS or an error code.
@@ -544,6 +551,9 @@ const char *vdo_get_journal_operation_name(enum journal_operation operation)
/**
* encode_slab_depot_state_2_0() - Encode the state of a slab depot into a buffer.
+ * @buffer: A buffer to store the encoding.
+ * @offset: The offset in the buffer at which to encode.
+ * @state: The slab depot state to encode.
*/
static void encode_slab_depot_state_2_0(u8 *buffer, size_t *offset,
struct slab_depot_state_2_0 state)
@@ -570,6 +580,9 @@ static void encode_slab_depot_state_2_0(u8 *buffer, size_t *offset,
/**
* decode_slab_depot_state_2_0() - Decode slab depot component state version 2.0 from a buffer.
+ * @buffer: The buffer being decoded.
+ * @offset: The offset to start decoding from.
+ * @state: A pointer to a slab depot state to hold the decoded result.
*
* Return: VDO_SUCCESS or an error code.
*/
@@ -1156,6 +1169,9 @@ static struct vdo_component unpack_vdo_component_41_0(struct packed_vdo_componen
/**
* decode_vdo_component() - Decode the component data for the vdo itself out of the super block.
+ * @buffer: The buffer being decoded.
+ * @offset: The offset to start decoding from.
+ * @component: The vdo component structure to decode into.
*
* Return: VDO_SUCCESS or an error.
*/
@@ -1290,7 +1306,7 @@ void vdo_destroy_component_states(struct vdo_component_states *states)
* understand.
* @buffer: The buffer being decoded.
* @offset: The offset to start decoding from.
- * @geometry: The vdo geometry
+ * @geometry: The vdo geometry.
* @states: An object to hold the successfully decoded state.
*
* Return: VDO_SUCCESS or an error.
@@ -1329,7 +1345,7 @@ static int __must_check decode_components(u8 *buffer, size_t *offset,
/**
* vdo_decode_component_states() - Decode the payload of a super block.
* @buffer: The buffer containing the encoded super block contents.
- * @geometry: The vdo geometry
+ * @geometry: The vdo geometry.
* @states: A pointer to hold the decoded states.
*
* Return: VDO_SUCCESS or an error.
@@ -1383,6 +1399,9 @@ int vdo_validate_component_states(struct vdo_component_states *states,
/**
* vdo_encode_component_states() - Encode the state of all vdo components in the super block.
+ * @buffer: A buffer to store the encoding.
+ * @offset: The offset into the buffer to start the encoding.
+ * @states: The component states to encode.
*/
static void vdo_encode_component_states(u8 *buffer, size_t *offset,
const struct vdo_component_states *states)
@@ -1402,6 +1421,8 @@ static void vdo_encode_component_states(u8 *buffer, size_t *offset,
/**
* vdo_encode_super_block() - Encode a super block into its on-disk representation.
+ * @buffer: A buffer to store the encoding.
+ * @states: The component states to encode.
*/
void vdo_encode_super_block(u8 *buffer, struct vdo_component_states *states)
{
@@ -1426,6 +1447,7 @@ void vdo_encode_super_block(u8 *buffer, struct vdo_component_states *states)
/**
* vdo_decode_super_block() - Decode a super block from its on-disk representation.
+ * @buffer: The buffer to decode from.
*/
int vdo_decode_super_block(u8 *buffer)
{
diff --git a/drivers/md/dm-vdo/flush.c b/drivers/md/dm-vdo/flush.c
index dd4fdee2ca0c..82a259ef1601 100644
--- a/drivers/md/dm-vdo/flush.c
+++ b/drivers/md/dm-vdo/flush.c
@@ -522,11 +522,7 @@ static void vdo_complete_flush(struct vdo_flush *flush)
vdo_enqueue_completion(completion, BIO_Q_FLUSH_PRIORITY);
}
-/**
- * initiate_drain() - Initiate a drain.
- *
- * Implements vdo_admin_initiator_fn.
- */
+/** Implements vdo_admin_initiator_fn. */
static void initiate_drain(struct admin_state *state)
{
check_for_drain_complete(container_of(state, struct flusher, state));
diff --git a/drivers/md/dm-vdo/funnel-workqueue.c b/drivers/md/dm-vdo/funnel-workqueue.c
index 0613c82bbe8e..8a79b33b8b09 100644
--- a/drivers/md/dm-vdo/funnel-workqueue.c
+++ b/drivers/md/dm-vdo/funnel-workqueue.c
@@ -372,6 +372,13 @@ static int make_simple_work_queue(const char *thread_name_prefix, const char *na
/**
* vdo_make_work_queue() - Create a work queue; if multiple threads are requested, completions will
* be distributed to them in round-robin fashion.
+ * @thread_name_prefix: A prefix for the thread names to identify them as a vdo thread.
+ * @name: A base name to identify this queue.
+ * @owner: The vdo_thread structure to manage this queue.
+ * @type: The type of queue to create.
+ * @thread_count: The number of actual threads handling this queue.
+ * @thread_privates: An array of private contexts, one for each thread; may be NULL.
+ * @queue_ptr: A pointer to return the new work queue.
*
* Each queue is associated with a struct vdo_thread which has a single vdo thread id. Regardless
* of the actual number of queues and threads allocated here, code outside of the queue
diff --git a/drivers/md/dm-vdo/io-submitter.c b/drivers/md/dm-vdo/io-submitter.c
index 11d47770b54d..e26d75f8366d 100644
--- a/drivers/md/dm-vdo/io-submitter.c
+++ b/drivers/md/dm-vdo/io-submitter.c
@@ -118,6 +118,7 @@ static void send_bio_to_device(struct vio *vio, struct bio *bio)
/**
* vdo_submit_vio() - Submits a vio's bio to the underlying block device. May block if the device
* is busy. This callback should be used by vios which did not attempt to merge.
+ * @completion: The vio to submit.
*/
void vdo_submit_vio(struct vdo_completion *completion)
{
@@ -133,7 +134,7 @@ void vdo_submit_vio(struct vdo_completion *completion)
* The list will always contain at least one entry (the bio for the vio on which it is called), but
* other bios may have been merged with it as well.
*
- * Return: bio The head of the bio list to submit.
+ * Return: The head of the bio list to submit.
*/
static struct bio *get_bio_list(struct vio *vio)
{
@@ -158,6 +159,7 @@ static struct bio *get_bio_list(struct vio *vio)
/**
* submit_data_vio() - Submit a data_vio's bio to the storage below along with
* any bios that have been merged with it.
+ * @completion: The vio to submit.
*
* Context: This call may block and so should only be called from a bio thread.
*/
@@ -184,7 +186,7 @@ static void submit_data_vio(struct vdo_completion *completion)
* There are two types of merging possible, forward and backward, which are distinguished by a flag
* that uses kernel elevator terminology.
*
- * Return: the vio to merge to, NULL if no merging is possible.
+ * Return: The vio to merge to, NULL if no merging is possible.
*/
static struct vio *get_mergeable_locked(struct int_map *map, struct vio *vio,
bool back_merge)
@@ -262,7 +264,7 @@ static int merge_to_next_head(struct int_map *bio_map, struct vio *vio,
*
* Currently this is only used for data_vios, but is broken out for future use with metadata vios.
*
- * Return: whether or not the vio was merged.
+ * Return: Whether or not the vio was merged.
*/
static bool try_bio_map_merge(struct vio *vio)
{
@@ -306,7 +308,7 @@ static bool try_bio_map_merge(struct vio *vio)
/**
* vdo_submit_data_vio() - Submit I/O for a data_vio.
- * @data_vio: the data_vio for which to issue I/O.
+ * @data_vio: The data_vio for which to issue I/O.
*
* If possible, this I/O will be merged other pending I/Os. Otherwise, the data_vio will be sent to
* the appropriate bio zone directly.
@@ -321,13 +323,13 @@ void vdo_submit_data_vio(struct data_vio *data_vio)
/**
* __submit_metadata_vio() - Submit I/O for a metadata vio.
- * @vio: the vio for which to issue I/O
- * @physical: the physical block number to read or write
- * @callback: the bio endio function which will be called after the I/O completes
- * @error_handler: the handler for submission or I/O errors (may be NULL)
- * @operation: the type of I/O to perform
- * @data: the buffer to read or write (may be NULL)
- * @size: the I/O amount in bytes
+ * @vio: The vio for which to issue I/O.
+ * @physical: The physical block number to read or write.
+ * @callback: The bio endio function which will be called after the I/O completes.
+ * @error_handler: The handler for submission or I/O errors; may be NULL.
+ * @operation: The type of I/O to perform.
+ * @data: The buffer to read or write; may be NULL.
+ * @size: The I/O amount in bytes.
*
* The vio is enqueued on a vdo bio queue so that bio submission (which may block) does not block
* other vdo threads.
@@ -441,7 +443,7 @@ int vdo_make_io_submitter(unsigned int thread_count, unsigned int rotation_inter
/**
* vdo_cleanup_io_submitter() - Tear down the io_submitter fields as needed for a physical layer.
- * @io_submitter: The I/O submitter data to tear down (may be NULL).
+ * @io_submitter: The I/O submitter data to tear down; may be NULL.
*/
void vdo_cleanup_io_submitter(struct io_submitter *io_submitter)
{
diff --git a/drivers/md/dm-vdo/logical-zone.c b/drivers/md/dm-vdo/logical-zone.c
index 026f031ffc9e..0a27e60a9dfd 100644
--- a/drivers/md/dm-vdo/logical-zone.c
+++ b/drivers/md/dm-vdo/logical-zone.c
@@ -159,21 +159,13 @@ static void check_for_drain_complete(struct logical_zone *zone)
vdo_finish_draining(&zone->state);
}
-/**
- * initiate_drain() - Initiate a drain.
- *
- * Implements vdo_admin_initiator_fn.
- */
+/** Implements vdo_admin_initiator_fn. */
static void initiate_drain(struct admin_state *state)
{
check_for_drain_complete(container_of(state, struct logical_zone, state));
}
-/**
- * drain_logical_zone() - Drain a logical zone.
- *
- * Implements vdo_zone_action_fn.
- */
+/** Implements vdo_zone_action_fn. */
static void drain_logical_zone(void *context, zone_count_t zone_number,
struct vdo_completion *parent)
{
@@ -192,11 +184,7 @@ void vdo_drain_logical_zones(struct logical_zones *zones,
parent);
}
-/**
- * resume_logical_zone() - Resume a logical zone.
- *
- * Implements vdo_zone_action_fn.
- */
+/** Implements vdo_zone_action_fn. */
static void resume_logical_zone(void *context, zone_count_t zone_number,
struct vdo_completion *parent)
{
@@ -356,7 +344,7 @@ struct physical_zone *vdo_get_next_allocation_zone(struct logical_zone *zone)
/**
* vdo_dump_logical_zone() - Dump information about a logical zone to the log for debugging.
- * @zone: The zone to dump
+ * @zone: The zone to dump.
*
* Context: the information is dumped in a thread-unsafe fashion.
*
diff --git a/drivers/md/dm-vdo/packer.c b/drivers/md/dm-vdo/packer.c
index f70f5edabc10..666be6d557e1 100644
--- a/drivers/md/dm-vdo/packer.c
+++ b/drivers/md/dm-vdo/packer.c
@@ -35,10 +35,10 @@ static const struct version_number COMPRESSED_BLOCK_1_0 = {
/**
* vdo_get_compressed_block_fragment() - Get a reference to a compressed fragment from a compressed
* block.
- * @mapping_state [in] The mapping state for the look up.
- * @compressed_block [in] The compressed block that was read from disk.
- * @fragment_offset [out] The offset of the fragment within a compressed block.
- * @fragment_size [out] The size of the fragment.
+ * @mapping_state: The mapping state describing the fragment.
+ * @block: The compressed block that was read from disk.
+ * @fragment_offset: The offset of the fragment within the compressed block.
+ * @fragment_size: The size of the fragment.
*
* Return: If a valid compressed fragment is found, VDO_SUCCESS; otherwise, VDO_INVALID_FRAGMENT if
* the fragment is invalid.
@@ -382,6 +382,7 @@ static void initialize_compressed_block(struct compressed_block *block, u16 size
* @compression: The agent's compression_state to pack in to.
* @data_vio: The data_vio to pack.
* @offset: The offset into the compressed block at which to pack the fragment.
+ * @slot: The slot number in the compressed block.
* @block: The compressed block which will be written out when batch is fully packed.
*
* Return: The new amount of space used.
@@ -705,11 +706,7 @@ void vdo_increment_packer_flush_generation(struct packer *packer)
vdo_flush_packer(packer);
}
-/**
- * initiate_drain() - Initiate a drain.
- *
- * Implements vdo_admin_initiator_fn.
- */
+/** Implements vdo_admin_initiator_fn. */
static void initiate_drain(struct admin_state *state)
{
struct packer *packer = container_of(state, struct packer, state);
diff --git a/drivers/md/dm-vdo/physical-zone.c b/drivers/md/dm-vdo/physical-zone.c
index a43b5c45fab7..686eb7d714e6 100644
--- a/drivers/md/dm-vdo/physical-zone.c
+++ b/drivers/md/dm-vdo/physical-zone.c
@@ -60,7 +60,7 @@ static inline bool has_lock_type(const struct pbn_lock *lock, enum pbn_lock_type
* vdo_is_pbn_read_lock() - Check whether a pbn_lock is a read lock.
* @lock: The lock to check.
*
- * Return: true if the lock is a read lock.
+ * Return: True if the lock is a read lock.
*/
bool vdo_is_pbn_read_lock(const struct pbn_lock *lock)
{
@@ -75,6 +75,7 @@ static inline void set_pbn_lock_type(struct pbn_lock *lock, enum pbn_lock_type t
/**
* vdo_downgrade_pbn_write_lock() - Downgrade a PBN write lock to a PBN read lock.
* @lock: The PBN write lock to downgrade.
+ * @compressed_write: True if the written block was a compressed block.
*
* The lock holder count is cleared and the caller is responsible for setting the new count.
*/
@@ -582,7 +583,7 @@ static bool continue_allocating(struct data_vio *data_vio)
* that fails try the next if possible.
* @data_vio: The data_vio needing an allocation.
*
- * Return: true if a block was allocated, if not the data_vio will have been dispatched so the
+ * Return: True if a block was allocated, if not the data_vio will have been dispatched so the
* caller must not touch it.
*/
bool vdo_allocate_block_in_zone(struct data_vio *data_vio)
diff --git a/drivers/md/dm-vdo/recovery-journal.c b/drivers/md/dm-vdo/recovery-journal.c
index de58184f538f..9cc0f0ff1664 100644
--- a/drivers/md/dm-vdo/recovery-journal.c
+++ b/drivers/md/dm-vdo/recovery-journal.c
@@ -109,7 +109,7 @@ static atomic_t *get_decrement_counter(struct recovery_journal *journal,
* @journal: The recovery journal.
* @lock_number: The lock to check.
*
- * Return: true if the journal zone is locked.
+ * Return: True if the journal zone is locked.
*/
static bool is_journal_zone_locked(struct recovery_journal *journal,
block_count_t lock_number)
@@ -217,7 +217,7 @@ static struct recovery_journal_block * __must_check pop_free_list(struct recover
* Indicates it has any uncommitted entries, which includes both entries not written and entries
* written but not yet acknowledged.
*
- * Return: true if the block has any uncommitted entries.
+ * Return: True if the block has any uncommitted entries.
*/
static inline bool __must_check is_block_dirty(const struct recovery_journal_block *block)
{
@@ -228,7 +228,7 @@ static inline bool __must_check is_block_dirty(const struct recovery_journal_blo
* is_block_empty() - Check whether a journal block is empty.
* @block: The block to check.
*
- * Return: true if the block has no entries.
+ * Return: True if the block has no entries.
*/
static inline bool __must_check is_block_empty(const struct recovery_journal_block *block)
{
@@ -239,7 +239,7 @@ static inline bool __must_check is_block_empty(const struct recovery_journal_blo
* is_block_full() - Check whether a journal block is full.
* @block: The block to check.
*
- * Return: true if the block is full.
+ * Return: True if the block is full.
*/
static inline bool __must_check is_block_full(const struct recovery_journal_block *block)
{
@@ -260,6 +260,8 @@ static void assert_on_journal_thread(struct recovery_journal *journal,
/**
* continue_waiter() - Release a data_vio from the journal.
+ * @waiter: The data_vio waiting on journal activity.
+ * @context: The result of the journal operation.
*
* Invoked whenever a data_vio is to be released from the journal, either because its entry was
* committed to disk, or because there was an error. Implements waiter_callback_fn.
@@ -273,7 +275,7 @@ static void continue_waiter(struct vdo_waiter *waiter, void *context)
* has_block_waiters() - Check whether the journal has any waiters on any blocks.
* @journal: The journal in question.
*
- * Return: true if any block has a waiter.
+ * Return: True if any block has a waiter.
*/
static inline bool has_block_waiters(struct recovery_journal *journal)
{
@@ -296,7 +298,7 @@ static void notify_commit_waiters(struct recovery_journal *journal);
* suspend_lock_counter() - Prevent the lock counter from notifying.
* @counter: The counter.
*
- * Return: true if the lock counter was not notifying and hence the suspend was efficacious.
+ * Return: True if the lock counter was not notifying and hence the suspend was efficacious.
*/
static bool suspend_lock_counter(struct lock_counter *counter)
{
@@ -416,7 +418,7 @@ sequence_number_t vdo_get_recovery_journal_current_sequence_number(struct recove
*
* The head is the lowest sequence number of the block map head and the slab journal head.
*
- * Return: the head of the journal.
+ * Return: The head of the journal.
*/
static inline sequence_number_t get_recovery_journal_head(const struct recovery_journal *journal)
{
@@ -535,7 +537,7 @@ static void initialize_journal_state(struct recovery_journal *journal)
* vdo_get_recovery_journal_length() - Get the number of usable recovery journal blocks.
* @journal_size: The size of the recovery journal in blocks.
*
- * Return: the number of recovery journal blocks usable for entries.
+ * Return: The number of recovery journal blocks usable for entries.
*/
block_count_t vdo_get_recovery_journal_length(block_count_t journal_size)
{
@@ -1078,6 +1080,8 @@ static void update_usages(struct recovery_journal *journal, struct data_vio *dat
/**
* assign_entry() - Assign an entry waiter to the active block.
+ * @waiter: The data_vio.
+ * @context: The recovery journal block.
*
* Implements waiter_callback_fn.
*/
@@ -1165,6 +1169,8 @@ static void recycle_journal_block(struct recovery_journal_block *block)
/**
* continue_committed_waiter() - invoked whenever a VIO is to be released from the journal because
* its entry was committed to disk.
+ * @waiter: The data_vio waiting on a journal write.
+ * @context: A pointer to the recovery journal.
*
* Implements waiter_callback_fn.
*/
@@ -1362,6 +1368,8 @@ static void add_queued_recovery_entries(struct recovery_journal_block *block)
/**
* write_block() - Issue a block for writing.
+ * @waiter: The recovery journal block to write.
+ * @context: Not used.
*
* Implements waiter_callback_fn.
*/
@@ -1611,11 +1619,7 @@ void vdo_release_journal_entry_lock(struct recovery_journal *journal,
smp_mb__after_atomic();
}
-/**
- * initiate_drain() - Initiate a drain.
- *
- * Implements vdo_admin_initiator_fn.
- */
+/** Implements vdo_admin_initiator_fn. */
static void initiate_drain(struct admin_state *state)
{
check_for_drain_complete(container_of(state, struct recovery_journal, state));
diff --git a/drivers/md/dm-vdo/slab-depot.c b/drivers/md/dm-vdo/slab-depot.c
index f3d80ff7bef5..034ecaa51f48 100644
--- a/drivers/md/dm-vdo/slab-depot.c
+++ b/drivers/md/dm-vdo/slab-depot.c
@@ -40,7 +40,7 @@ static const bool NORMAL_OPERATION = true;
/**
* get_lock() - Get the lock object for a slab journal block by sequence number.
- * @journal: vdo_slab journal to retrieve from.
+ * @journal: The vdo_slab journal to retrieve from.
* @sequence_number: Sequence number of the block.
*
* Return: The lock object for the given sequence number.
@@ -110,7 +110,7 @@ static void initialize_journal_state(struct slab_journal *journal)
* block_is_full() - Check whether a journal block is full.
* @journal: The slab journal for the block.
*
- * Return: true if the tail block is full.
+ * Return: True if the tail block is full.
*/
static bool __must_check block_is_full(struct slab_journal *journal)
{
@@ -127,10 +127,11 @@ static void release_journal_locks(struct vdo_waiter *waiter, void *context);
/**
* is_slab_journal_blank() - Check whether a slab's journal is blank.
+ * @slab: The slab to check.
*
* A slab journal is blank if it has never had any entries recorded in it.
*
- * Return: true if the slab's journal has never been modified.
+ * Return: True if the slab's journal has never been modified.
*/
static bool is_slab_journal_blank(const struct vdo_slab *slab)
{
@@ -227,6 +228,7 @@ static u8 __must_check compute_fullness_hint(struct slab_depot *depot,
/**
* check_summary_drain_complete() - Check whether an allocators summary has finished draining.
+ * @allocator: The allocator to check.
*/
static void check_summary_drain_complete(struct block_allocator *allocator)
{
@@ -349,7 +351,7 @@ static void launch_write(struct slab_summary_block *block)
/**
* update_slab_summary_entry() - Update the entry for a slab.
- * @slab: The slab whose entry is to be updated
+ * @slab: The slab whose entry is to be updated.
* @waiter: The waiter that is updating the summary.
* @tail_block_offset: The offset of the slab journal's tail block.
* @load_ref_counts: Whether the reference counts must be loaded from disk on the vdo load.
@@ -654,6 +656,7 @@ static void update_tail_block_location(struct slab_journal *journal)
/**
* reopen_slab_journal() - Reopen a slab's journal by emptying it and then adding pending entries.
+ * @slab: The slab to reopen.
*/
static void reopen_slab_journal(struct vdo_slab *slab)
{
@@ -839,8 +842,6 @@ static void commit_tail(struct slab_journal *journal)
* @sbn: The slab block number of the entry to encode.
* @operation: The type of the entry.
* @increment: True if this is an increment.
- *
- * Exposed for unit tests.
*/
static void encode_slab_journal_entry(struct slab_journal_block_header *tail_header,
slab_journal_payload *payload,
@@ -951,7 +952,7 @@ static inline block_count_t journal_length(const struct slab_journal *journal)
* @parent: The completion to notify when there is space to add the entry if the entry could not be
* added immediately.
*
- * Return: true if the entry was added immediately.
+ * Return: True if the entry was added immediately.
*/
bool vdo_attempt_replay_into_slab(struct vdo_slab *slab, physical_block_number_t pbn,
enum journal_operation operation, bool increment,
@@ -1003,7 +1004,7 @@ bool vdo_attempt_replay_into_slab(struct vdo_slab *slab, physical_block_number_t
* requires_reaping() - Check whether the journal must be reaped before adding new entries.
* @journal: The journal to check.
*
- * Return: true if the journal must be reaped.
+ * Return: True if the journal must be reaped.
*/
static bool requires_reaping(const struct slab_journal *journal)
{
@@ -1275,6 +1276,8 @@ static void dirty_block(struct reference_block *block)
/**
* get_reference_block() - Get the reference block that covers the given block index.
+ * @slab: The slab containing the references.
+ * @index: The index of the physical block.
*/
static struct reference_block * __must_check get_reference_block(struct vdo_slab *slab,
slab_block_number index)
@@ -1379,7 +1382,8 @@ static void prioritize_slab(struct vdo_slab *slab)
/**
* adjust_free_block_count() - Adjust the free block count and (if needed) reprioritize the slab.
- * @incremented: true if the free block count went up.
+ * @slab: The slab.
+ * @incremented: True if the free block count went up.
*/
static void adjust_free_block_count(struct vdo_slab *slab, bool incremented)
{
@@ -1885,6 +1889,7 @@ static void add_entries(struct slab_journal *journal)
/**
* reset_search_cursor() - Reset the free block search back to the first reference counter in the
* first reference block of a slab.
+ * @slab: The slab.
*/
static void reset_search_cursor(struct vdo_slab *slab)
{
@@ -1892,17 +1897,17 @@ static void reset_search_cursor(struct vdo_slab *slab)
cursor->block = cursor->first_block;
cursor->index = 0;
- /* Unit tests have slabs with only one reference block (and it's a runt). */
cursor->end_index = min_t(u32, COUNTS_PER_BLOCK, slab->block_count);
}
/**
* advance_search_cursor() - Advance the search cursor to the start of the next reference block in
- * a slab,
+ * a slab.
+ * @slab: The slab.
*
* Wraps around to the first reference block if the current block is the last reference block.
*
- * Return: true unless the cursor was at the last reference block.
+ * Return: True unless the cursor was at the last reference block.
*/
static bool advance_search_cursor(struct vdo_slab *slab)
{
@@ -1933,6 +1938,9 @@ static bool advance_search_cursor(struct vdo_slab *slab)
/**
* vdo_adjust_reference_count_for_rebuild() - Adjust the reference count of a block during rebuild.
+ * @depot: The slab depot.
+ * @pbn: The physical block number to adjust.
+ * @operation: The type opf operation.
*
* Return: VDO_SUCCESS or an error.
*/
@@ -2038,9 +2046,7 @@ static inline slab_block_number find_zero_byte_in_word(const u8 *word_ptr,
* @slab: The slab counters to scan.
* @index_ptr: A pointer to hold the array index of the free block.
*
- * Exposed for unit testing.
- *
- * Return: true if a free block was found in the specified range.
+ * Return: True if a free block was found in the specified range.
*/
static bool find_free_block(const struct vdo_slab *slab, slab_block_number *index_ptr)
{
@@ -2097,7 +2103,7 @@ static bool find_free_block(const struct vdo_slab *slab, slab_block_number *inde
* @slab: The slab to search.
* @free_index_ptr: A pointer to receive the array index of the zero reference count.
*
- * Return: true if an unreferenced counter was found.
+ * Return: True if an unreferenced counter was found.
*/
static bool search_current_reference_block(const struct vdo_slab *slab,
slab_block_number *free_index_ptr)
@@ -2116,7 +2122,7 @@ static bool search_current_reference_block(const struct vdo_slab *slab,
* counter index saved in the search cursor and searching up to the end of the last reference
* block. The search does not wrap.
*
- * Return: true if an unreferenced counter was found.
+ * Return: True if an unreferenced counter was found.
*/
static bool search_reference_blocks(struct vdo_slab *slab,
slab_block_number *free_index_ptr)
@@ -2136,6 +2142,8 @@ static bool search_reference_blocks(struct vdo_slab *slab,
/**
* make_provisional_reference() - Do the bookkeeping for making a provisional reference.
+ * @slab: The slab.
+ * @block_number: The index for the physical block to reference.
*/
static void make_provisional_reference(struct vdo_slab *slab,
slab_block_number block_number)
@@ -2155,6 +2163,7 @@ static void make_provisional_reference(struct vdo_slab *slab,
/**
* dirty_all_reference_blocks() - Mark all reference count blocks in a slab as dirty.
+ * @slab: The slab.
*/
static void dirty_all_reference_blocks(struct vdo_slab *slab)
{
@@ -2173,10 +2182,10 @@ static inline bool journal_points_equal(struct journal_point first,
/**
* match_bytes() - Check an 8-byte word for bytes matching the value specified
- * @input: A word to examine the bytes of
- * @match: The byte value sought
+ * @input: A word to examine the bytes of.
+ * @match: The byte value sought.
*
- * Return: 1 in each byte when the corresponding input byte matched, 0 otherwise
+ * Return: 1 in each byte when the corresponding input byte matched, 0 otherwise.
*/
static inline u64 match_bytes(u64 input, u8 match)
{
@@ -2191,12 +2200,12 @@ static inline u64 match_bytes(u64 input, u8 match)
/**
* count_valid_references() - Process a newly loaded refcount array
- * @counters: the array of counters from a metadata block
+ * @counters: The array of counters from a metadata block.
*
- * Scan a 8-byte-aligned array of counters, fixing up any "provisional" values that weren't
- * cleaned up at shutdown, changing them internally to "empty".
+ * Scan an 8-byte-aligned array of counters, fixing up any provisional values that
+ * weren't cleaned up at shutdown, changing them internally to zero.
*
- * Return: the number of blocks that are referenced (counters not "empty")
+ * Return: The number of blocks with a non-zero reference count.
*/
static unsigned int count_valid_references(vdo_refcount_t *counters)
{
@@ -2351,6 +2360,7 @@ static void load_reference_block_group(struct vdo_waiter *waiter, void *context)
/**
* load_reference_blocks() - Load a slab's reference blocks from the underlying storage into a
* pre-allocated reference counter.
+ * @slab: The slab.
*/
static void load_reference_blocks(struct vdo_slab *slab)
{
@@ -2375,6 +2385,7 @@ static void load_reference_blocks(struct vdo_slab *slab)
/**
* drain_slab() - Drain all reference count I/O.
+ * @slab: The slab.
*
* Depending upon the type of drain being performed (as recorded in the ref_count's vdo_slab), the
* reference blocks may be loaded from disk or dirty reference blocks may be written out.
@@ -2564,6 +2575,7 @@ static void read_slab_journal_tail(struct vdo_waiter *waiter, void *context)
/**
* load_slab_journal() - Load a slab's journal by reading the journal's tail.
+ * @slab: The slab.
*/
static void load_slab_journal(struct vdo_slab *slab)
{
@@ -2663,11 +2675,7 @@ static void queue_slab(struct vdo_slab *slab)
prioritize_slab(slab);
}
-/**
- * initiate_slab_action() - Initiate a slab action.
- *
- * Implements vdo_admin_initiator_fn.
- */
+/** Implements vdo_admin_initiator_fn. */
static void initiate_slab_action(struct admin_state *state)
{
struct vdo_slab *slab = container_of(state, struct vdo_slab, state);
@@ -2720,7 +2728,7 @@ static struct vdo_slab *get_next_slab(struct slab_scrubber *scrubber)
* has_slabs_to_scrub() - Check whether a scrubber has slabs to scrub.
* @scrubber: The scrubber to check.
*
- * Return: true if the scrubber has slabs to scrub.
+ * Return: True if the scrubber has slabs to scrub.
*/
static inline bool __must_check has_slabs_to_scrub(struct slab_scrubber *scrubber)
{
@@ -2741,6 +2749,7 @@ static void uninitialize_scrubber_vio(struct slab_scrubber *scrubber)
* finish_scrubbing() - Stop scrubbing, either because there are no more slabs to scrub or because
* there's been an error.
* @scrubber: The scrubber.
+ * @result: The result of the scrubbing operation.
*/
static void finish_scrubbing(struct slab_scrubber *scrubber, int result)
{
@@ -3132,11 +3141,13 @@ static struct vdo_slab *next_slab(struct slab_iterator *iterator)
/**
* abort_waiter() - Abort vios waiting to make journal entries when read-only.
+ * @waiter: A waiting data_vio.
+ * @context: Not used.
*
* This callback is invoked on all vios waiting to make slab journal entries after the VDO has gone
* into read-only mode. Implements waiter_callback_fn.
*/
-static void abort_waiter(struct vdo_waiter *waiter, void *context __always_unused)
+static void abort_waiter(struct vdo_waiter *waiter, void __always_unused *context)
{
struct reference_updater *updater =
container_of(waiter, struct reference_updater, waiter);
@@ -3536,7 +3547,7 @@ static void initiate_load(struct admin_state *state)
/**
* vdo_notify_slab_journals_are_recovered() - Inform a block allocator that its slab journals have
* been recovered from the recovery journal.
- * @completion The allocator completion
+ * @completion: The allocator completion.
*/
void vdo_notify_slab_journals_are_recovered(struct vdo_completion *completion)
{
@@ -3775,7 +3786,7 @@ static int initialize_slab_journal(struct vdo_slab *slab)
* in the slab.
* @allocator: The block allocator to which the slab belongs.
* @slab_number: The slab number of the slab.
- * @is_new: true if this slab is being allocated as part of a resize.
+ * @is_new: True if this slab is being allocated as part of a resize.
* @slab_ptr: A pointer to receive the new slab.
*
* Return: VDO_SUCCESS or an error code.
@@ -3894,11 +3905,7 @@ void vdo_abandon_new_slabs(struct slab_depot *depot)
vdo_free(vdo_forget(depot->new_slabs));
}
-/**
- * get_allocator_thread_id() - Get the ID of the thread on which a given allocator operates.
- *
- * Implements vdo_zone_thread_getter_fn.
- */
+/** Implements vdo_zone_thread_getter_fn. */
static thread_id_t get_allocator_thread_id(void *context, zone_count_t zone_number)
{
return ((struct slab_depot *) context)->allocators[zone_number].thread_id;
@@ -3911,7 +3918,7 @@ static thread_id_t get_allocator_thread_id(void *context, zone_count_t zone_numb
* @recovery_lock: The sequence number of the recovery journal block whose locks should be
* released.
*
- * Return: true if the journal does hold a lock on the specified block (which it will release).
+ * Return: True if the journal released a lock on the specified block.
*/
static bool __must_check release_recovery_journal_lock(struct slab_journal *journal,
sequence_number_t recovery_lock)
@@ -3955,6 +3962,8 @@ static void release_tail_block_locks(void *context, zone_count_t zone_number,
/**
* prepare_for_tail_block_commit() - Prepare to commit oldest tail blocks.
+ * @context: The slab depot.
+ * @parent: The parent operation.
*
* Implements vdo_action_preamble_fn.
*/
@@ -3968,6 +3977,7 @@ static void prepare_for_tail_block_commit(void *context, struct vdo_completion *
/**
* schedule_tail_block_commit() - Schedule a tail block commit if necessary.
+ * @context: The slab depot.
*
* This method should not be called directly. Rather, call vdo_schedule_default_action() on the
* depot's action manager.
@@ -4361,6 +4371,7 @@ struct slab_depot_state_2_0 vdo_record_slab_depot(const struct slab_depot *depot
/**
* vdo_allocate_reference_counters() - Allocate the reference counters for all slabs in the depot.
+ * @depot: The slab depot.
*
* Context: This method may be called only before entering normal operation from the load thread.
*
@@ -4615,7 +4626,9 @@ static void load_summary_endio(struct bio *bio)
}
/**
- * load_slab_summary() - The preamble of a load operation.
+ * load_slab_summary() - Load the slab summary before the slab data.
+ * @context: The slab depot.
+ * @parent: The load operation.
*
* Implements vdo_action_preamble_fn.
*/
@@ -4731,7 +4744,7 @@ void vdo_update_slab_depot_size(struct slab_depot *depot)
* vdo_prepare_to_grow_slab_depot() - Allocate new memory needed for a resize of a slab depot to
* the given size.
* @depot: The depot to prepare to resize.
- * @partition: The new depot partition
+ * @partition: The new depot partition.
*
* Return: VDO_SUCCESS or an error.
*/
@@ -4781,6 +4794,7 @@ int vdo_prepare_to_grow_slab_depot(struct slab_depot *depot,
/**
* finish_registration() - Finish registering new slabs now that all of the allocators have
* received their new slabs.
+ * @context: The slab depot.
*
* Implements vdo_action_conclusion_fn.
*/
diff --git a/drivers/md/dm-vdo/vdo.c b/drivers/md/dm-vdo/vdo.c
index 80b608674022..09fd0628d18c 100644
--- a/drivers/md/dm-vdo/vdo.c
+++ b/drivers/md/dm-vdo/vdo.c
@@ -181,6 +181,8 @@ static void assign_thread_ids(struct thread_config *config,
/**
* initialize_thread_config() - Initialize the thread mapping
+ * @counts: The number and types of threads to create.
+ * @config: The thread_config to initialize.
*
* If the logical, physical, and hash zone counts are all 0, a single thread will be shared by all
* three plus the packer and recovery journal. Otherwise, there must be at least one of each type,
@@ -884,6 +886,7 @@ const struct admin_state_code *vdo_get_admin_state(const struct vdo *vdo)
/**
* record_vdo() - Record the state of the VDO for encoding in the super block.
+ * @vdo: The vdo.
*/
static void record_vdo(struct vdo *vdo)
{
@@ -1277,7 +1280,7 @@ void vdo_enter_read_only_mode(struct vdo *vdo, int error_code)
* vdo_is_read_only() - Check whether the VDO is read-only.
* @vdo: The vdo.
*
- * Return: true if the vdo is read-only.
+ * Return: True if the vdo is read-only.
*
* This method may be called from any thread, as opposed to examining the VDO's state field which
* is only safe to check from the admin thread.
@@ -1291,7 +1294,7 @@ bool vdo_is_read_only(struct vdo *vdo)
* vdo_in_read_only_mode() - Check whether a vdo is in read-only mode.
* @vdo: The vdo to query.
*
- * Return: true if the vdo is in read-only mode.
+ * Return: True if the vdo is in read-only mode.
*/
bool vdo_in_read_only_mode(const struct vdo *vdo)
{
@@ -1302,7 +1305,7 @@ bool vdo_in_read_only_mode(const struct vdo *vdo)
* vdo_in_recovery_mode() - Check whether the vdo is in recovery mode.
* @vdo: The vdo to query.
*
- * Return: true if the vdo is in recovery mode.
+ * Return: True if the vdo is in recovery mode.
*/
bool vdo_in_recovery_mode(const struct vdo *vdo)
{
diff --git a/drivers/md/dm-vdo/vdo.h b/drivers/md/dm-vdo/vdo.h
index 483ae873e002..1aaba73997b7 100644
--- a/drivers/md/dm-vdo/vdo.h
+++ b/drivers/md/dm-vdo/vdo.h
@@ -279,8 +279,10 @@ static inline bool vdo_uses_bio_ack_queue(struct vdo *vdo)
/**
* typedef vdo_filter_fn - Method type for vdo matching methods.
+ * @vdo: The vdo to match.
+ * @context: A parameter for the filter to use.
*
- * A filter function returns false if the vdo doesn't match.
+ * Return: True if the vdo matches the filter criteria, false if it doesn't.
*/
typedef bool (*vdo_filter_fn)(struct vdo *vdo, const void *context);
diff --git a/drivers/md/dm-vdo/vio.c b/drivers/md/dm-vdo/vio.c
index 8fc22fb14196..5ffc867d9c5e 100644
--- a/drivers/md/dm-vdo/vio.c
+++ b/drivers/md/dm-vdo/vio.c
@@ -398,8 +398,9 @@ void free_vio_pool(struct vio_pool *pool)
/**
* is_vio_pool_busy() - Check whether an vio pool has outstanding entries.
+ * @pool: The vio pool.
*
- * Return: true if the pool is busy.
+ * Return: True if the pool is busy.
*/
bool is_vio_pool_busy(struct vio_pool *pool)
{
diff --git a/drivers/md/dm-vdo/vio.h b/drivers/md/dm-vdo/vio.h
index 4bfcb21901f1..7a8a6819aec4 100644
--- a/drivers/md/dm-vdo/vio.h
+++ b/drivers/md/dm-vdo/vio.h
@@ -156,8 +156,7 @@ static inline enum vdo_completion_priority get_metadata_priority(struct vio *vio
/**
* continue_vio() - Enqueue a vio to run its next callback.
* @vio: The vio to continue.
- *
- * Return: The result of the current operation.
+ * @result: The result of the current operation.
*/
static inline void continue_vio(struct vio *vio, int result)
{
@@ -172,6 +171,9 @@ void vdo_count_completed_bios(struct bio *bio);
/**
* continue_vio_after_io() - Continue a vio now that its I/O has returned.
+ * @vio: The vio to continue.
+ * @callback: The next operation for this vio.
+ * @thread: Which thread to run the next operation on.
*/
static inline void continue_vio_after_io(struct vio *vio, vdo_action_fn callback,
thread_id_t thread)
diff --git a/drivers/md/dm-verity-fec.c b/drivers/md/dm-verity-fec.c
index 72047b47a7a0..c79de517afee 100644
--- a/drivers/md/dm-verity-fec.c
+++ b/drivers/md/dm-verity-fec.c
@@ -177,9 +177,11 @@ error:
if (r < 0 && neras)
DMERR_LIMIT("%s: FEC %llu: failed to correct: %d",
v->data_dev->name, (unsigned long long)rsb, r);
- else if (r > 0)
+ else if (r > 0) {
DMWARN_LIMIT("%s: FEC %llu: corrected %d errors",
v->data_dev->name, (unsigned long long)rsb, r);
+ atomic64_inc(&v->fec->corrected);
+ }
return r;
}
@@ -188,14 +190,13 @@ error:
* Locate data block erasures using verity hashes.
*/
static int fec_is_erasure(struct dm_verity *v, struct dm_verity_io *io,
- u8 *want_digest, u8 *data)
+ const u8 *want_digest, const u8 *data)
{
if (unlikely(verity_hash(v, io, data, 1 << v->data_dev_block_bits,
- verity_io_real_digest(v, io))))
+ io->tmp_digest)))
return 0;
- return memcmp(verity_io_real_digest(v, io), want_digest,
- v->digest_size) != 0;
+ return memcmp(io->tmp_digest, want_digest, v->digest_size) != 0;
}
/*
@@ -328,7 +329,7 @@ static int fec_alloc_bufs(struct dm_verity *v, struct dm_verity_fec_io *fio)
if (fio->bufs[n])
continue;
- fio->bufs[n] = mempool_alloc(&v->fec->extra_pool, GFP_NOWAIT);
+ fio->bufs[n] = kmem_cache_alloc(v->fec->cache, GFP_NOWAIT);
/* we can manage with even one buffer if necessary */
if (unlikely(!fio->bufs[n]))
break;
@@ -362,7 +363,7 @@ static void fec_init_bufs(struct dm_verity *v, struct dm_verity_fec_io *fio)
*/
static int fec_decode_rsb(struct dm_verity *v, struct dm_verity_io *io,
struct dm_verity_fec_io *fio, u64 rsb, u64 offset,
- bool use_erasures)
+ const u8 *want_digest, bool use_erasures)
{
int r, neras = 0;
unsigned int pos;
@@ -388,12 +389,11 @@ static int fec_decode_rsb(struct dm_verity *v, struct dm_verity_io *io,
/* Always re-validate the corrected block against the expected hash */
r = verity_hash(v, io, fio->output, 1 << v->data_dev_block_bits,
- verity_io_real_digest(v, io));
+ io->tmp_digest);
if (unlikely(r < 0))
return r;
- if (memcmp(verity_io_real_digest(v, io), verity_io_want_digest(v, io),
- v->digest_size)) {
+ if (memcmp(io->tmp_digest, want_digest, v->digest_size)) {
DMERR_LIMIT("%s: FEC %llu: failed to correct (%d erasures)",
v->data_dev->name, (unsigned long long)rsb, neras);
return -EILSEQ;
@@ -404,7 +404,8 @@ static int fec_decode_rsb(struct dm_verity *v, struct dm_verity_io *io,
/* Correct errors in a block. Copies corrected block to dest. */
int verity_fec_decode(struct dm_verity *v, struct dm_verity_io *io,
- enum verity_block_type type, sector_t block, u8 *dest)
+ enum verity_block_type type, const u8 *want_digest,
+ sector_t block, u8 *dest)
{
int r;
struct dm_verity_fec_io *fio = fec_io(io);
@@ -413,10 +414,8 @@ int verity_fec_decode(struct dm_verity *v, struct dm_verity_io *io,
if (!verity_fec_is_enabled(v))
return -EOPNOTSUPP;
- if (fio->level >= DM_VERITY_FEC_MAX_RECURSION) {
- DMWARN_LIMIT("%s: FEC: recursion too deep", v->data_dev->name);
+ if (fio->level)
return -EIO;
- }
fio->level++;
@@ -447,9 +446,9 @@ int verity_fec_decode(struct dm_verity *v, struct dm_verity_io *io,
* them first. Do a second attempt with erasures if the corruption is
* bad enough.
*/
- r = fec_decode_rsb(v, io, fio, rsb, offset, false);
+ r = fec_decode_rsb(v, io, fio, rsb, offset, want_digest, false);
if (r < 0) {
- r = fec_decode_rsb(v, io, fio, rsb, offset, true);
+ r = fec_decode_rsb(v, io, fio, rsb, offset, want_digest, true);
if (r < 0)
goto done;
}
@@ -479,7 +478,8 @@ void verity_fec_finish_io(struct dm_verity_io *io)
mempool_free(fio->bufs[n], &f->prealloc_pool);
fec_for_each_extra_buffer(fio, n)
- mempool_free(fio->bufs[n], &f->extra_pool);
+ if (fio->bufs[n])
+ kmem_cache_free(f->cache, fio->bufs[n]);
mempool_free(fio->output, &f->output_pool);
}
@@ -531,7 +531,6 @@ void verity_fec_dtr(struct dm_verity *v)
mempool_exit(&f->rs_pool);
mempool_exit(&f->prealloc_pool);
- mempool_exit(&f->extra_pool);
mempool_exit(&f->output_pool);
kmem_cache_destroy(f->cache);
@@ -784,12 +783,6 @@ int verity_fec_ctr(struct dm_verity *v)
return ret;
}
- ret = mempool_init_slab_pool(&f->extra_pool, 0, f->cache);
- if (ret) {
- ti->error = "Cannot allocate FEC buffer extra pool";
- return ret;
- }
-
/* Preallocate an output buffer for each thread */
ret = mempool_init_kmalloc_pool(&f->output_pool, num_online_cpus(),
1 << v->data_dev_block_bits);
diff --git a/drivers/md/dm-verity-fec.h b/drivers/md/dm-verity-fec.h
index 09123a612953..5fd267873812 100644
--- a/drivers/md/dm-verity-fec.h
+++ b/drivers/md/dm-verity-fec.h
@@ -23,9 +23,6 @@
#define DM_VERITY_FEC_BUF_MAX \
(1 << (PAGE_SHIFT - DM_VERITY_FEC_BUF_RS_BITS))
-/* maximum recursion level for verity_fec_decode */
-#define DM_VERITY_FEC_MAX_RECURSION 4
-
#define DM_VERITY_OPT_FEC_DEV "use_fec_from_device"
#define DM_VERITY_OPT_FEC_BLOCKS "fec_blocks"
#define DM_VERITY_OPT_FEC_START "fec_start"
@@ -45,9 +42,9 @@ struct dm_verity_fec {
unsigned char rsn; /* N of RS(M, N) */
mempool_t rs_pool; /* mempool for fio->rs */
mempool_t prealloc_pool; /* mempool for preallocated buffers */
- mempool_t extra_pool; /* mempool for extra buffers */
mempool_t output_pool; /* mempool for output */
struct kmem_cache *cache; /* cache for buffers */
+ atomic64_t corrected; /* corrected errors */
};
/* per-bio data */
@@ -68,8 +65,8 @@ struct dm_verity_fec_io {
extern bool verity_fec_is_enabled(struct dm_verity *v);
extern int verity_fec_decode(struct dm_verity *v, struct dm_verity_io *io,
- enum verity_block_type type, sector_t block,
- u8 *dest);
+ enum verity_block_type type, const u8 *want_digest,
+ sector_t block, u8 *dest);
extern unsigned int verity_fec_status_table(struct dm_verity *v, unsigned int sz,
char *result, unsigned int maxlen);
@@ -99,6 +96,7 @@ static inline bool verity_fec_is_enabled(struct dm_verity *v)
static inline int verity_fec_decode(struct dm_verity *v,
struct dm_verity_io *io,
enum verity_block_type type,
+ const u8 *want_digest,
sector_t block, u8 *dest)
{
return -EOPNOTSUPP;
diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
index 66a00a8ccb39..5c17472d7896 100644
--- a/drivers/md/dm-verity-target.c
+++ b/drivers/md/dm-verity-target.c
@@ -117,11 +117,25 @@ static sector_t verity_position_at_level(struct dm_verity *v, sector_t block,
int verity_hash(struct dm_verity *v, struct dm_verity_io *io,
const u8 *data, size_t len, u8 *digest)
{
- struct shash_desc *desc = &io->hash_desc;
+ struct shash_desc *desc;
int r;
+ if (likely(v->use_sha256_lib)) {
+ struct sha256_ctx *ctx = &io->hash_ctx.sha256;
+
+ /*
+ * Fast path using SHA-256 library. This is enabled only for
+ * verity version 1, where the salt is at the beginning.
+ */
+ *ctx = *v->initial_hashstate.sha256;
+ sha256_update(ctx, data, len);
+ sha256_final(ctx, digest);
+ return 0;
+ }
+
+ desc = &io->hash_ctx.shash;
desc->tfm = v->shash_tfm;
- if (unlikely(v->initial_hashstate == NULL)) {
+ if (unlikely(v->initial_hashstate.shash == NULL)) {
/* Version 0: salt at end */
r = crypto_shash_init(desc) ?:
crypto_shash_update(desc, data, len) ?:
@@ -129,7 +143,7 @@ int verity_hash(struct dm_verity *v, struct dm_verity_io *io,
crypto_shash_final(desc, digest);
} else {
/* Version 1: salt at beginning */
- r = crypto_shash_import(desc, v->initial_hashstate) ?:
+ r = crypto_shash_import(desc, v->initial_hashstate.shash) ?:
crypto_shash_finup(desc, data, len, digest);
}
if (unlikely(r))
@@ -215,12 +229,12 @@ out:
* Verify hash of a metadata block pertaining to the specified data block
* ("block" argument) at a specified level ("level" argument).
*
- * On successful return, verity_io_want_digest(v, io) contains the hash value
- * for a lower tree level or for the data block (if we're at the lowest level).
+ * On successful return, want_digest contains the hash value for a lower tree
+ * level or for the data block (if we're at the lowest level).
*
* If "skip_unverified" is true, unverified buffer is skipped and 1 is returned.
* If "skip_unverified" is false, unverified buffer is hashed and verified
- * against current value of verity_io_want_digest(v, io).
+ * against current value of want_digest.
*/
static int verity_verify_level(struct dm_verity *v, struct dm_verity_io *io,
sector_t block, int level, bool skip_unverified,
@@ -259,7 +273,7 @@ static int verity_verify_level(struct dm_verity *v, struct dm_verity_io *io,
if (IS_ERR(data))
return r;
if (verity_fec_decode(v, io, DM_VERITY_BLOCK_TYPE_METADATA,
- hash_block, data) == 0) {
+ want_digest, hash_block, data) == 0) {
aux = dm_bufio_get_aux_data(buf);
aux->hash_verified = 1;
goto release_ok;
@@ -279,11 +293,11 @@ static int verity_verify_level(struct dm_verity *v, struct dm_verity_io *io,
}
r = verity_hash(v, io, data, 1 << v->hash_dev_block_bits,
- verity_io_real_digest(v, io));
+ io->tmp_digest);
if (unlikely(r < 0))
goto release_ret_r;
- if (likely(memcmp(verity_io_real_digest(v, io), want_digest,
+ if (likely(memcmp(io->tmp_digest, want_digest,
v->digest_size) == 0))
aux->hash_verified = 1;
else if (static_branch_unlikely(&use_bh_wq_enabled) && io->in_bh) {
@@ -294,7 +308,7 @@ static int verity_verify_level(struct dm_verity *v, struct dm_verity_io *io,
r = -EAGAIN;
goto release_ret_r;
} else if (verity_fec_decode(v, io, DM_VERITY_BLOCK_TYPE_METADATA,
- hash_block, data) == 0)
+ want_digest, hash_block, data) == 0)
aux->hash_verified = 1;
else if (verity_handle_err(v,
DM_VERITY_BLOCK_TYPE_METADATA,
@@ -358,7 +372,8 @@ out:
}
static noinline int verity_recheck(struct dm_verity *v, struct dm_verity_io *io,
- sector_t cur_block, u8 *dest)
+ const u8 *want_digest, sector_t cur_block,
+ u8 *dest)
{
struct page *page;
void *buffer;
@@ -382,12 +397,11 @@ static noinline int verity_recheck(struct dm_verity *v, struct dm_verity_io *io,
goto free_ret;
r = verity_hash(v, io, buffer, 1 << v->data_dev_block_bits,
- verity_io_real_digest(v, io));
+ io->tmp_digest);
if (unlikely(r))
goto free_ret;
- if (memcmp(verity_io_real_digest(v, io),
- verity_io_want_digest(v, io), v->digest_size)) {
+ if (memcmp(io->tmp_digest, want_digest, v->digest_size)) {
r = -EIO;
goto free_ret;
}
@@ -402,9 +416,13 @@ free_ret:
static int verity_handle_data_hash_mismatch(struct dm_verity *v,
struct dm_verity_io *io,
- struct bio *bio, sector_t blkno,
- u8 *data)
+ struct bio *bio,
+ struct pending_block *block)
{
+ const u8 *want_digest = block->want_digest;
+ sector_t blkno = block->blkno;
+ u8 *data = block->data;
+
if (static_branch_unlikely(&use_bh_wq_enabled) && io->in_bh) {
/*
* Error handling code (FEC included) cannot be run in the
@@ -412,14 +430,14 @@ static int verity_handle_data_hash_mismatch(struct dm_verity *v,
*/
return -EAGAIN;
}
- if (verity_recheck(v, io, blkno, data) == 0) {
+ if (verity_recheck(v, io, want_digest, blkno, data) == 0) {
if (v->validated_blocks)
set_bit(blkno, v->validated_blocks);
return 0;
}
#if defined(CONFIG_DM_VERITY_FEC)
- if (verity_fec_decode(v, io, DM_VERITY_BLOCK_TYPE_DATA, blkno,
- data) == 0)
+ if (verity_fec_decode(v, io, DM_VERITY_BLOCK_TYPE_DATA, want_digest,
+ blkno, data) == 0)
return 0;
#endif
if (bio->bi_status)
@@ -433,6 +451,58 @@ static int verity_handle_data_hash_mismatch(struct dm_verity *v,
return 0;
}
+static void verity_clear_pending_blocks(struct dm_verity_io *io)
+{
+ int i;
+
+ for (i = io->num_pending - 1; i >= 0; i--) {
+ kunmap_local(io->pending_blocks[i].data);
+ io->pending_blocks[i].data = NULL;
+ }
+ io->num_pending = 0;
+}
+
+static int verity_verify_pending_blocks(struct dm_verity *v,
+ struct dm_verity_io *io,
+ struct bio *bio)
+{
+ const unsigned int block_size = 1 << v->data_dev_block_bits;
+ int i, r;
+
+ if (io->num_pending == 2) {
+ /* num_pending == 2 implies that the algorithm is SHA-256 */
+ sha256_finup_2x(v->initial_hashstate.sha256,
+ io->pending_blocks[0].data,
+ io->pending_blocks[1].data, block_size,
+ io->pending_blocks[0].real_digest,
+ io->pending_blocks[1].real_digest);
+ } else {
+ for (i = 0; i < io->num_pending; i++) {
+ r = verity_hash(v, io, io->pending_blocks[i].data,
+ block_size,
+ io->pending_blocks[i].real_digest);
+ if (unlikely(r))
+ return r;
+ }
+ }
+
+ for (i = 0; i < io->num_pending; i++) {
+ struct pending_block *block = &io->pending_blocks[i];
+
+ if (likely(memcmp(block->real_digest, block->want_digest,
+ v->digest_size) == 0)) {
+ if (v->validated_blocks)
+ set_bit(block->blkno, v->validated_blocks);
+ } else {
+ r = verity_handle_data_hash_mismatch(v, io, bio, block);
+ if (unlikely(r))
+ return r;
+ }
+ }
+ verity_clear_pending_blocks(io);
+ return 0;
+}
+
/*
* Verify one "dm_verity_io" structure.
*/
@@ -440,10 +510,14 @@ static int verity_verify_io(struct dm_verity_io *io)
{
struct dm_verity *v = io->v;
const unsigned int block_size = 1 << v->data_dev_block_bits;
+ const int max_pending = v->use_sha256_finup_2x ? 2 : 1;
struct bvec_iter iter_copy;
struct bvec_iter *iter;
struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_io_data_size);
unsigned int b;
+ int r;
+
+ io->num_pending = 0;
if (static_branch_unlikely(&use_bh_wq_enabled) && io->in_bh) {
/*
@@ -457,21 +531,22 @@ static int verity_verify_io(struct dm_verity_io *io)
for (b = 0; b < io->n_blocks;
b++, bio_advance_iter(bio, iter, block_size)) {
- int r;
- sector_t cur_block = io->block + b;
+ sector_t blkno = io->block + b;
+ struct pending_block *block;
bool is_zero;
struct bio_vec bv;
void *data;
if (v->validated_blocks && bio->bi_status == BLK_STS_OK &&
- likely(test_bit(cur_block, v->validated_blocks)))
+ likely(test_bit(blkno, v->validated_blocks)))
continue;
- r = verity_hash_for_block(v, io, cur_block,
- verity_io_want_digest(v, io),
+ block = &io->pending_blocks[io->num_pending];
+
+ r = verity_hash_for_block(v, io, blkno, block->want_digest,
&is_zero);
if (unlikely(r < 0))
- return r;
+ goto error;
bv = bio_iter_iovec(bio, *iter);
if (unlikely(bv.bv_len < block_size)) {
@@ -482,7 +557,8 @@ static int verity_verify_io(struct dm_verity_io *io)
* data block size to be greater than PAGE_SIZE.
*/
DMERR_LIMIT("unaligned io (data block spans pages)");
- return -EIO;
+ r = -EIO;
+ goto error;
}
data = bvec_kmap_local(&bv);
@@ -496,29 +572,26 @@ static int verity_verify_io(struct dm_verity_io *io)
kunmap_local(data);
continue;
}
-
- r = verity_hash(v, io, data, block_size,
- verity_io_real_digest(v, io));
- if (unlikely(r < 0)) {
- kunmap_local(data);
- return r;
+ block->data = data;
+ block->blkno = blkno;
+ if (++io->num_pending == max_pending) {
+ r = verity_verify_pending_blocks(v, io, bio);
+ if (unlikely(r))
+ goto error;
}
+ }
- if (likely(memcmp(verity_io_real_digest(v, io),
- verity_io_want_digest(v, io), v->digest_size) == 0)) {
- if (v->validated_blocks)
- set_bit(cur_block, v->validated_blocks);
- kunmap_local(data);
- continue;
- }
- r = verity_handle_data_hash_mismatch(v, io, bio, cur_block,
- data);
- kunmap_local(data);
+ if (io->num_pending) {
+ r = verity_verify_pending_blocks(v, io, bio);
if (unlikely(r))
- return r;
+ goto error;
}
return 0;
+
+error:
+ verity_clear_pending_blocks(io);
+ return r;
}
/*
@@ -775,6 +848,10 @@ static void verity_status(struct dm_target *ti, status_type_t type,
switch (type) {
case STATUSTYPE_INFO:
DMEMIT("%c", v->hash_failed ? 'C' : 'V');
+ if (verity_fec_is_enabled(v))
+ DMEMIT(" %lld", atomic64_read(&v->fec->corrected));
+ else
+ DMEMIT(" -");
break;
case STATUSTYPE_TABLE:
DMEMIT("%u %s %s %u %u %llu %llu %s ",
@@ -1004,7 +1081,7 @@ static void verity_dtr(struct dm_target *ti)
kvfree(v->validated_blocks);
kfree(v->salt);
- kfree(v->initial_hashstate);
+ kfree(v->initial_hashstate.shash);
kfree(v->root_digest);
kfree(v->zero_digest);
verity_free_sig(v);
@@ -1069,8 +1146,7 @@ static int verity_alloc_zero_digest(struct dm_verity *v)
if (!v->zero_digest)
return r;
- io = kmalloc(sizeof(*io) + crypto_shash_descsize(v->shash_tfm),
- GFP_KERNEL);
+ io = kmalloc(v->ti->per_io_data_size, GFP_KERNEL);
if (!io)
return r; /* verity_dtr will free zero_digest */
@@ -1252,11 +1328,26 @@ static int verity_setup_hash_alg(struct dm_verity *v, const char *alg_name)
}
v->shash_tfm = shash;
v->digest_size = crypto_shash_digestsize(shash);
- DMINFO("%s using \"%s\"", alg_name, crypto_shash_driver_name(shash));
if ((1 << v->hash_dev_block_bits) < v->digest_size * 2) {
ti->error = "Digest size too big";
return -EINVAL;
}
+ if (likely(v->version && strcmp(alg_name, "sha256") == 0)) {
+ /*
+ * Fast path: use the library API for reduced overhead and
+ * interleaved hashing support.
+ */
+ v->use_sha256_lib = true;
+ if (sha256_finup_2x_is_optimized())
+ v->use_sha256_finup_2x = true;
+ ti->per_io_data_size =
+ offsetofend(struct dm_verity_io, hash_ctx.sha256);
+ } else {
+ /* Fallback case: use the generic crypto API. */
+ ti->per_io_data_size =
+ offsetofend(struct dm_verity_io, hash_ctx.shash) +
+ crypto_shash_descsize(shash);
+ }
return 0;
}
@@ -1277,7 +1368,18 @@ static int verity_setup_salt_and_hashstate(struct dm_verity *v, const char *arg)
return -EINVAL;
}
}
- if (v->version) { /* Version 1: salt at beginning */
+ if (likely(v->use_sha256_lib)) {
+ /* Implies version 1: salt at beginning */
+ v->initial_hashstate.sha256 =
+ kmalloc(sizeof(struct sha256_ctx), GFP_KERNEL);
+ if (!v->initial_hashstate.sha256) {
+ ti->error = "Cannot allocate initial hash state";
+ return -ENOMEM;
+ }
+ sha256_init(v->initial_hashstate.sha256);
+ sha256_update(v->initial_hashstate.sha256,
+ v->salt, v->salt_size);
+ } else if (v->version) { /* Version 1: salt at beginning */
SHASH_DESC_ON_STACK(desc, v->shash_tfm);
int r;
@@ -1285,16 +1387,16 @@ static int verity_setup_salt_and_hashstate(struct dm_verity *v, const char *arg)
* Compute the pre-salted hash state that can be passed to
* crypto_shash_import() for each block later.
*/
- v->initial_hashstate = kmalloc(
+ v->initial_hashstate.shash = kmalloc(
crypto_shash_statesize(v->shash_tfm), GFP_KERNEL);
- if (!v->initial_hashstate) {
+ if (!v->initial_hashstate.shash) {
ti->error = "Cannot allocate initial hash state";
return -ENOMEM;
}
desc->tfm = v->shash_tfm;
r = crypto_shash_init(desc) ?:
crypto_shash_update(desc, v->salt, v->salt_size) ?:
- crypto_shash_export(desc, v->initial_hashstate);
+ crypto_shash_export(desc, v->initial_hashstate.shash);
if (r) {
ti->error = "Cannot set up initial hash state";
return r;
@@ -1556,9 +1658,6 @@ static int verity_ctr(struct dm_target *ti, unsigned int argc, char **argv)
goto bad;
}
- ti->per_io_data_size = sizeof(struct dm_verity_io) +
- crypto_shash_descsize(v->shash_tfm);
-
r = verity_fec_ctr(v);
if (r)
goto bad;
@@ -1690,7 +1789,7 @@ static struct target_type verity_target = {
.name = "verity",
/* Note: the LSMs depend on the singleton and immutable features */
.features = DM_TARGET_SINGLETON | DM_TARGET_IMMUTABLE,
- .version = {1, 12, 0},
+ .version = {1, 13, 0},
.module = THIS_MODULE,
.ctr = verity_ctr,
.dtr = verity_dtr,
diff --git a/drivers/md/dm-verity.h b/drivers/md/dm-verity.h
index 6d141abd965c..f975a9e5c5d6 100644
--- a/drivers/md/dm-verity.h
+++ b/drivers/md/dm-verity.h
@@ -16,6 +16,7 @@
#include <linux/device-mapper.h>
#include <linux/interrupt.h>
#include <crypto/hash.h>
+#include <crypto/sha2.h>
#define DM_VERITY_MAX_LEVELS 63
@@ -42,7 +43,10 @@ struct dm_verity {
struct crypto_shash *shash_tfm;
u8 *root_digest; /* digest of the root block */
u8 *salt; /* salt: its size is salt_size */
- u8 *initial_hashstate; /* salted initial state, if version >= 1 */
+ union {
+ struct sha256_ctx *sha256; /* for use_sha256_lib=1 */
+ u8 *shash; /* for use_sha256_lib=0 */
+ } initial_hashstate; /* salted initial state, if version >= 1 */
u8 *zero_digest; /* digest for a zero block */
#ifdef CONFIG_SECURITY
u8 *root_digest_sig; /* signature of the root digest */
@@ -59,6 +63,8 @@ struct dm_verity {
unsigned char version;
bool hash_failed:1; /* set if hash of any block failed */
bool use_bh_wq:1; /* try to verify in BH wq before normal work-queue */
+ bool use_sha256_lib:1; /* use SHA-256 library instead of generic crypto API */
+ bool use_sha256_finup_2x:1; /* use interleaved hashing optimization */
unsigned int digest_size; /* digest size for the current hash algorithm */
enum verity_mode mode; /* mode for handling verification errors */
enum verity_mode error_mode;/* mode for handling I/O errors */
@@ -78,6 +84,13 @@ struct dm_verity {
mempool_t recheck_pool;
};
+struct pending_block {
+ void *data;
+ sector_t blkno;
+ u8 want_digest[HASH_MAX_DIGESTSIZE];
+ u8 real_digest[HASH_MAX_DIGESTSIZE];
+};
+
struct dm_verity_io {
struct dm_verity *v;
@@ -94,28 +107,29 @@ struct dm_verity_io {
struct work_struct work;
struct work_struct bh_work;
- u8 real_digest[HASH_MAX_DIGESTSIZE];
- u8 want_digest[HASH_MAX_DIGESTSIZE];
+ u8 tmp_digest[HASH_MAX_DIGESTSIZE];
/*
- * Temporary space for hashing. This is variable-length and must be at
- * the end of the struct. struct shash_desc is just the fixed part;
- * it's followed by a context of size crypto_shash_descsize(shash_tfm).
+ * This is the queue of data blocks that are pending verification. When
+ * the crypto layer supports interleaved hashing, we allow multiple
+ * blocks to be queued up in order to utilize it. This can improve
+ * performance significantly vs. sequential hashing of each block.
*/
- struct shash_desc hash_desc;
-};
+ int num_pending;
+ struct pending_block pending_blocks[2];
-static inline u8 *verity_io_real_digest(struct dm_verity *v,
- struct dm_verity_io *io)
-{
- return io->real_digest;
-}
-
-static inline u8 *verity_io_want_digest(struct dm_verity *v,
- struct dm_verity_io *io)
-{
- return io->want_digest;
-}
+ /*
+ * Temporary space for hashing. Either sha256 or shash is used,
+ * depending on the value of use_sha256_lib. If shash is used,
+ * then this field is variable-length, with total size
+ * sizeof(struct shash_desc) + crypto_shash_descsize(shash_tfm).
+ * For this reason, this field must be the end of the struct.
+ */
+ union {
+ struct sha256_ctx sha256;
+ struct shash_desc shash;
+ } hash_ctx;
+};
extern int verity_hash(struct dm_verity *v, struct dm_verity_io *io,
const u8 *data, size_t len, u8 *digest);
diff --git a/drivers/md/dm-zone.c b/drivers/md/dm-zone.c
index 5a840c4ae316..c95e417194b3 100644
--- a/drivers/md/dm-zone.c
+++ b/drivers/md/dm-zone.c
@@ -203,8 +203,6 @@ int dm_revalidate_zones(struct dm_table *t, struct request_queue *q)
return ret;
}
- md->nr_zones = disk->nr_zones;
-
return 0;
}
@@ -452,7 +450,6 @@ void dm_finalize_zone_settings(struct dm_table *t, struct queue_limits *lim)
set_bit(DMF_EMULATE_ZONE_APPEND, &md->flags);
} else {
clear_bit(DMF_EMULATE_ZONE_APPEND, &md->flags);
- md->nr_zones = 0;
md->disk->nr_zones = 0;
}
}
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 6c83ab940af7..b63279202260 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -272,7 +272,7 @@ static int __init dm_init(void)
int r, i;
#if (IS_ENABLED(CONFIG_IMA) && !IS_ENABLED(CONFIG_IMA_DISABLE_HTABLE))
- DMWARN("CONFIG_IMA_DISABLE_HTABLE is disabled."
+ DMINFO("CONFIG_IMA_DISABLE_HTABLE is disabled."
" Duplicate IMA measurements will not be recorded in the IMA log.");
#endif
@@ -1321,6 +1321,7 @@ void dm_accept_partial_bio(struct bio *bio, unsigned int n_sectors)
BUG_ON(dm_tio_flagged(tio, DM_TIO_IS_DUPLICATE_BIO));
BUG_ON(bio_sectors > *tio->len_ptr);
BUG_ON(n_sectors > bio_sectors);
+ BUG_ON(bio->bi_opf & REQ_ATOMIC);
if (static_branch_unlikely(&zoned_enabled) &&
unlikely(bdev_is_zoned(bio->bi_bdev))) {
@@ -1735,8 +1736,12 @@ static blk_status_t __split_and_process_bio(struct clone_info *ci)
ci->submit_as_polled = !!(ci->bio->bi_opf & REQ_POLLED);
len = min_t(sector_t, max_io_len(ti, ci->sector), ci->sector_count);
- if (ci->bio->bi_opf & REQ_ATOMIC && len != ci->sector_count)
- return BLK_STS_IOERR;
+ if (ci->bio->bi_opf & REQ_ATOMIC) {
+ if (unlikely(!dm_target_supports_atomic_writes(ti->type)))
+ return BLK_STS_IOERR;
+ if (unlikely(len != ci->sector_count))
+ return BLK_STS_IOERR;
+ }
setup_split_accounting(ci, len);
@@ -2439,7 +2444,6 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
{
struct dm_table *old_map;
sector_t size, old_size;
- int ret;
lockdep_assert_held(&md->suspend_lock);
@@ -2454,11 +2458,13 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
set_capacity(md->disk, size);
- ret = dm_table_set_restrictions(t, md->queue, limits);
- if (ret) {
- set_capacity(md->disk, old_size);
- old_map = ERR_PTR(ret);
- goto out;
+ if (limits) {
+ int ret = dm_table_set_restrictions(t, md->queue, limits);
+ if (ret) {
+ set_capacity(md->disk, old_size);
+ old_map = ERR_PTR(ret);
+ goto out;
+ }
}
/*
@@ -2836,6 +2842,7 @@ static void dm_wq_work(struct work_struct *work)
static void dm_queue_flush(struct mapped_device *md)
{
+ clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
smp_mb__after_atomic();
queue_work(md->wq, &md->work);
@@ -2848,6 +2855,7 @@ struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table)
{
struct dm_table *live_map = NULL, *map = ERR_PTR(-EINVAL);
struct queue_limits limits;
+ bool update_limits = true;
int r;
mutex_lock(&md->suspend_lock);
@@ -2857,19 +2865,30 @@ struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table)
goto out;
/*
+ * To avoid a potential deadlock locking the queue limits, disallow
+ * updating the queue limits during a table swap, when updating an
+ * immutable request-based dm device (dm-multipath) during a noflush
+ * suspend. It is userspace's responsibility to make sure that the new
+ * table uses the same limits as the existing table, if it asks for a
+ * noflush suspend.
+ */
+ if (dm_request_based(md) && md->immutable_target &&
+ __noflush_suspending(md))
+ update_limits = false;
+ /*
* If the new table has no data devices, retain the existing limits.
* This helps multipath with queue_if_no_path if all paths disappear,
* then new I/O is queued based on these limits, and then some paths
* reappear.
*/
- if (dm_table_has_no_data_devices(table)) {
+ else if (dm_table_has_no_data_devices(table)) {
live_map = dm_get_live_table_fast(md);
if (live_map)
limits = md->queue->limits;
dm_put_live_table_fast(md);
}
- if (!live_map) {
+ if (update_limits && !live_map) {
r = dm_calculate_queue_limits(table, &limits);
if (r) {
map = ERR_PTR(r);
@@ -2877,7 +2896,7 @@ struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table)
}
}
- map = __bind(md, table, &limits);
+ map = __bind(md, table, update_limits ? &limits : NULL);
dm_issue_global_event();
out:
@@ -2930,7 +2949,6 @@ static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
/*
* DMF_NOFLUSH_SUSPENDING must be set before presuspend.
- * This flag is cleared before dm_suspend returns.
*/
if (noflush)
set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
@@ -2993,8 +3011,6 @@ static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
if (!r)
set_bit(dmf_suspended_flag, &md->flags);
- if (noflush)
- clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
if (map)
synchronize_srcu(&md->io_barrier);
diff --git a/drivers/platform/Kconfig b/drivers/platform/Kconfig
index 324c69c63f76..312788f249c9 100644
--- a/drivers/platform/Kconfig
+++ b/drivers/platform/Kconfig
@@ -20,3 +20,5 @@ source "drivers/platform/x86/Kconfig"
source "drivers/platform/arm64/Kconfig"
source "drivers/platform/raspberrypi/Kconfig"
+
+source "drivers/platform/wmi/Kconfig"
diff --git a/drivers/platform/Makefile b/drivers/platform/Makefile
index b0935c602ada..fa322e7f8716 100644
--- a/drivers/platform/Makefile
+++ b/drivers/platform/Makefile
@@ -14,3 +14,4 @@ obj-$(CONFIG_CZNIC_PLATFORMS) += cznic/
obj-$(CONFIG_SURFACE_PLATFORMS) += surface/
obj-$(CONFIG_ARM64_PLATFORM_DEVICES) += arm64/
obj-$(CONFIG_BCM2835_VCHIQ) += raspberrypi/
+obj-$(CONFIG_ACPI_WMI) += wmi/
diff --git a/drivers/platform/arm64/lenovo-thinkpad-t14s.c b/drivers/platform/arm64/lenovo-thinkpad-t14s.c
index cf6a1d3b2617..5590302a5694 100644
--- a/drivers/platform/arm64/lenovo-thinkpad-t14s.c
+++ b/drivers/platform/arm64/lenovo-thinkpad-t14s.c
@@ -20,19 +20,23 @@
#include <linux/module.h>
#include <linux/regmap.h>
#include <linux/slab.h>
+#include <linux/pm.h>
#define T14S_EC_CMD_ECRD 0x02
#define T14S_EC_CMD_ECWR 0x03
#define T14S_EC_CMD_EVT 0xf0
-#define T14S_EC_REG_LED 0x0c
-#define T14S_EC_REG_KBD_BL1 0x0d
-#define T14S_EC_REG_KBD_BL2 0xe1
-#define T14S_EC_KBD_BL1_MASK GENMASK_U8(7, 6)
-#define T14S_EC_KBD_BL2_MASK GENMASK_U8(3, 2)
-#define T14S_EC_REG_AUD 0x30
-#define T14S_EC_MIC_MUTE_LED BIT(5)
-#define T14S_EC_SPK_MUTE_LED BIT(6)
+#define T14S_EC_REG_LED 0x0c
+#define T14S_EC_REG_KBD_BL1 0x0d
+#define T14S_EC_REG_MODERN_STANDBY 0xe0
+#define T14S_EC_MODERN_STANDBY_ENTRY BIT(1)
+#define T14S_EC_MODERN_STANDBY_EXIT BIT(0)
+#define T14S_EC_REG_KBD_BL2 0xe1
+#define T14S_EC_KBD_BL1_MASK GENMASK_U8(7, 6)
+#define T14S_EC_KBD_BL2_MASK GENMASK_U8(3, 2)
+#define T14S_EC_REG_AUD 0x30
+#define T14S_EC_MIC_MUTE_LED BIT(5)
+#define T14S_EC_SPK_MUTE_LED BIT(6)
#define T14S_EC_EVT_NONE 0x00
#define T14S_EC_EVT_KEY_FN_4 0x13
@@ -202,6 +206,14 @@ out:
return ret;
}
+static void t14s_ec_write_sequence(struct t14s_ec *ec, u8 reg, u8 val, u8 cnt)
+{
+ int i;
+
+ for (i = 0; i < cnt; i++)
+ regmap_write(ec->regmap, reg, val);
+}
+
static int t14s_led_set_status(struct t14s_ec *ec,
struct t14s_ec_led_classdev *led,
const enum t14s_ec_led_status_t ledstatus)
@@ -554,6 +566,7 @@ static int t14s_ec_probe(struct i2c_client *client)
return -ENOMEM;
ec->dev = dev;
+ i2c_set_clientdata(client, ec);
ec->regmap = devm_regmap_init(dev, &t14s_ec_regmap_bus,
ec, &t14s_ec_regmap_config);
@@ -593,6 +606,30 @@ static int t14s_ec_probe(struct i2c_client *client)
return 0;
}
+static int t14s_ec_suspend(struct device *dev)
+{
+ struct t14s_ec *ec = dev_get_drvdata(dev);
+
+ led_classdev_suspend(&ec->kbd_backlight);
+
+ t14s_ec_write_sequence(ec, T14S_EC_REG_MODERN_STANDBY,
+ T14S_EC_MODERN_STANDBY_ENTRY, 3);
+
+ return 0;
+}
+
+static int t14s_ec_resume(struct device *dev)
+{
+ struct t14s_ec *ec = dev_get_drvdata(dev);
+
+ t14s_ec_write_sequence(ec, T14S_EC_REG_MODERN_STANDBY,
+ T14S_EC_MODERN_STANDBY_EXIT, 3);
+
+ led_classdev_resume(&ec->kbd_backlight);
+
+ return 0;
+}
+
static const struct of_device_id t14s_ec_of_match[] = {
{ .compatible = "lenovo,thinkpad-t14s-ec" },
{}
@@ -605,10 +642,15 @@ static const struct i2c_device_id t14s_ec_i2c_id_table[] = {
};
MODULE_DEVICE_TABLE(i2c, t14s_ec_i2c_id_table);
+static const struct dev_pm_ops t14s_ec_pm_ops = {
+ SYSTEM_SLEEP_PM_OPS(t14s_ec_suspend, t14s_ec_resume)
+};
+
static struct i2c_driver t14s_ec_i2c_driver = {
.driver = {
.name = "thinkpad-t14s-ec",
.of_match_table = t14s_ec_of_match,
+ .pm = &t14s_ec_pm_ops,
},
.probe = t14s_ec_probe,
.id_table = t14s_ec_i2c_id_table,
diff --git a/drivers/platform/surface/aggregator/core.c b/drivers/platform/surface/aggregator/core.c
index c58e1fdd1a5f..c7e05f7bc199 100644
--- a/drivers/platform/surface/aggregator/core.c
+++ b/drivers/platform/surface/aggregator/core.c
@@ -676,7 +676,7 @@ static int ssam_serial_hub_probe(struct serdev_device *serdev)
status = ssam_serdev_setup(ssh, serdev);
if (status) {
- status = dev_err_probe(dev, status, "failed to setup serdev\n");
+ dev_err_probe(dev, status, "failed to setup serdev\n");
goto err_devinit;
}
diff --git a/drivers/platform/surface/aggregator/ssh_packet_layer.c b/drivers/platform/surface/aggregator/ssh_packet_layer.c
index 6081b0146d5f..3dd22856570f 100644
--- a/drivers/platform/surface/aggregator/ssh_packet_layer.c
+++ b/drivers/platform/surface/aggregator/ssh_packet_layer.c
@@ -671,7 +671,7 @@ static void ssh_ptl_timeout_reaper_mod(struct ssh_ptl *ptl, ktime_t now,
/* Re-adjust / schedule reaper only if it is above resolution delta. */
if (ktime_before(aexp, ptl->rtx_timeout.expires)) {
ptl->rtx_timeout.expires = expires;
- mod_delayed_work(system_wq, &ptl->rtx_timeout.reaper, delta);
+ mod_delayed_work(system_percpu_wq, &ptl->rtx_timeout.reaper, delta);
}
spin_unlock(&ptl->rtx_timeout.lock);
diff --git a/drivers/platform/surface/aggregator/ssh_request_layer.c b/drivers/platform/surface/aggregator/ssh_request_layer.c
index 879ca9ee7ff6..a356e4956562 100644
--- a/drivers/platform/surface/aggregator/ssh_request_layer.c
+++ b/drivers/platform/surface/aggregator/ssh_request_layer.c
@@ -434,7 +434,7 @@ static void ssh_rtl_timeout_reaper_mod(struct ssh_rtl *rtl, ktime_t now,
/* Re-adjust / schedule reaper only if it is above resolution delta. */
if (ktime_before(aexp, rtl->rtx_timeout.expires)) {
rtl->rtx_timeout.expires = expires;
- mod_delayed_work(system_wq, &rtl->rtx_timeout.reaper, delta);
+ mod_delayed_work(system_percpu_wq, &rtl->rtx_timeout.reaper, delta);
}
spin_unlock(&rtl->rtx_timeout.lock);
diff --git a/drivers/platform/surface/surface_acpi_notify.c b/drivers/platform/surface/surface_acpi_notify.c
index 3b30cfe3466b..a9dcb0bbe90e 100644
--- a/drivers/platform/surface/surface_acpi_notify.c
+++ b/drivers/platform/surface/surface_acpi_notify.c
@@ -862,7 +862,7 @@ static int __init san_init(void)
{
int ret;
- san_wq = alloc_workqueue("san_wq", 0, 0);
+ san_wq = alloc_workqueue("san_wq", WQ_PERCPU, 0);
if (!san_wq)
return -ENOMEM;
ret = platform_driver_register(&surface_acpi_notify);
diff --git a/drivers/platform/wmi/Kconfig b/drivers/platform/wmi/Kconfig
new file mode 100644
index 000000000000..77fcbb18746b
--- /dev/null
+++ b/drivers/platform/wmi/Kconfig
@@ -0,0 +1,34 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+# ACPI WMI Core
+#
+
+menuconfig ACPI_WMI
+ tristate "ACPI-WMI support"
+ depends on ACPI && X86
+ help
+ This option enables support for the ACPI-WMI driver core.
+
+ The ACPI-WMI interface is a proprietary extension of ACPI allowing
+ the platform firmware to expose WMI (Windows Management Instrumentation)
+ objects used for managing various aspects of the underlying system.
+ Mapping between ACPI control methods and WMI objects happens through
+ special mapper devices (PNP0C14) defined inside the ACPI tables.
+
+ Enabling this option is necessary for building the vendor specific
+ ACPI-WMI client drivers for Acer, Dell an HP machines (among others).
+
+ It is safe to enable this option even for machines that do not contain
+ any ACPI-WMI mapper devices at all.
+
+if ACPI_WMI
+
+config ACPI_WMI_LEGACY_DEVICE_NAMES
+ bool "Use legacy WMI device naming scheme"
+ help
+ Say Y here to force the WMI driver core to use the old WMI device naming
+ scheme when creating WMI devices. Doing so might be necessary for some
+ userspace applications but will cause the registration of WMI devices with
+ the same GUID to fail in some corner cases.
+
+endif # ACPI_WMI
diff --git a/drivers/platform/wmi/Makefile b/drivers/platform/wmi/Makefile
new file mode 100644
index 000000000000..98393d7391ec
--- /dev/null
+++ b/drivers/platform/wmi/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+# Makefile for linux/drivers/platform/wmi
+# ACPI WMI core
+#
+
+wmi-y := core.o
+obj-$(CONFIG_ACPI_WMI) += wmi.o
diff --git a/drivers/platform/x86/wmi.c b/drivers/platform/wmi/core.c
index 4e86a422f05f..6878c4fcb0b5 100644
--- a/drivers/platform/x86/wmi.c
+++ b/drivers/platform/wmi/core.c
@@ -142,14 +142,6 @@ static inline void get_acpi_method_name(const struct wmi_block *wblock,
buffer[4] = '\0';
}
-static inline acpi_object_type get_param_acpi_type(const struct wmi_block *wblock)
-{
- if (wblock->gblock.flags & ACPI_WMI_STRING)
- return ACPI_TYPE_STRING;
- else
- return ACPI_TYPE_BUFFER;
-}
-
static int wmidev_match_guid(struct device *dev, const void *data)
{
struct wmi_block *wblock = dev_to_wblock(dev);
@@ -351,9 +343,16 @@ acpi_status wmidev_evaluate_method(struct wmi_device *wdev, u8 instance, u32 met
params[0].integer.value = instance;
params[1].type = ACPI_TYPE_INTEGER;
params[1].integer.value = method_id;
- params[2].type = get_param_acpi_type(wblock);
- params[2].buffer.length = in->length;
- params[2].buffer.pointer = in->pointer;
+
+ if (wblock->gblock.flags & ACPI_WMI_STRING) {
+ params[2].type = ACPI_TYPE_STRING;
+ params[2].string.length = in->length;
+ params[2].string.pointer = in->pointer;
+ } else {
+ params[2].type = ACPI_TYPE_BUFFER;
+ params[2].buffer.length = in->length;
+ params[2].buffer.pointer = in->pointer;
+ }
get_acpi_method_name(wblock, 'M', method);
@@ -519,9 +518,16 @@ acpi_status wmidev_block_set(struct wmi_device *wdev, u8 instance, const struct
input.pointer = params;
params[0].type = ACPI_TYPE_INTEGER;
params[0].integer.value = instance;
- params[1].type = get_param_acpi_type(wblock);
- params[1].buffer.length = in->length;
- params[1].buffer.pointer = in->pointer;
+
+ if (wblock->gblock.flags & ACPI_WMI_STRING) {
+ params[1].type = ACPI_TYPE_STRING;
+ params[1].string.length = in->length;
+ params[1].string.pointer = in->pointer;
+ } else {
+ params[1].type = ACPI_TYPE_BUFFER;
+ params[1].buffer.length = in->length;
+ params[1].buffer.pointer = in->pointer;
+ }
get_acpi_method_name(wblock, 'S', method);
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index c883a28e0916..4cb7d97a9fcc 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -16,36 +16,6 @@ menuconfig X86_PLATFORM_DEVICES
if X86_PLATFORM_DEVICES
-config ACPI_WMI
- tristate "WMI"
- depends on ACPI
- help
- This driver adds support for the ACPI-WMI (Windows Management
- Instrumentation) mapper device (PNP0C14) found on some systems.
-
- ACPI-WMI is a proprietary extension to ACPI to expose parts of the
- ACPI firmware to userspace - this is done through various vendor
- defined methods and data blocks in a PNP0C14 device, which are then
- made available for userspace to call.
-
- The implementation of this in Linux currently only exposes this to
- other kernel space drivers.
-
- This driver is a required dependency to build the firmware specific
- drivers needed on many machines, including Acer and HP laptops.
-
- It is safe to enable this driver even if your DSDT doesn't define
- any ACPI-WMI devices.
-
-config ACPI_WMI_LEGACY_DEVICE_NAMES
- bool "Use legacy WMI device naming scheme"
- depends on ACPI_WMI
- help
- Say Y here to force the WMI driver core to use the old WMI device naming
- scheme when creating WMI devices. Doing so might be necessary for some
- userspace applications but will cause the registration of WMI devices with
- the same GUID to fail in some corner cases.
-
config WMI_BMOF
tristate "WMI embedded Binary MOF driver"
depends on ACPI_WMI
@@ -74,6 +44,8 @@ config HUAWEI_WMI
To compile this driver as a module, choose M here: the module
will be called huawei-wmi.
+source "drivers/platform/x86/uniwill/Kconfig"
+
config UV_SYSFS
tristate "Sysfs structure for UV systems"
depends on X86_UV
@@ -262,6 +234,18 @@ config ASUS_WIRELESS
If you choose to compile this driver as a module the module will be
called asus-wireless.
+config ASUS_ARMOURY
+ tristate "ASUS Armoury driver"
+ depends on ASUS_WMI
+ select FW_ATTR_CLASS
+ help
+ Say Y here if you have a WMI aware Asus machine and would like to use the
+ firmware_attributes API to control various settings typically exposed in
+ the ASUS Armoury Crate application available on Windows.
+
+ To compile this driver as a module, choose M here: the module will
+ be called asus-armoury.
+
config ASUS_WMI
tristate "ASUS WMI Driver"
depends on ACPI_WMI
@@ -284,6 +268,17 @@ config ASUS_WMI
To compile this driver as a module, choose M here: the module will
be called asus-wmi.
+config ASUS_WMI_DEPRECATED_ATTRS
+ bool "BIOS option support in WMI platform (DEPRECATED)"
+ depends on ASUS_WMI
+ default y
+ help
+ Say Y to expose the configurable BIOS options through the asus-wmi
+ driver.
+
+ This can be used with or without the asus-armoury driver which
+ has the same attributes, but more, and better features.
+
config ASUS_NB_WMI
tristate "Asus Notebook WMI Driver"
depends on ASUS_WMI
@@ -316,6 +311,19 @@ config ASUS_TF103C_DOCK
If you have an Asus TF103C tablet say Y or M here, for a generic x86
distro config say M here.
+config AYANEO_EC
+ tristate "Ayaneo EC platform control"
+ depends on DMI
+ depends on ACPI_EC
+ depends on ACPI_BATTERY
+ depends on HWMON
+ help
+ Enables support for the platform EC of Ayaneo devices. This
+ includes fan control, fan speed, charge limit, magic
+ module detection, and controller power control.
+
+ If you have an Ayaneo device, say Y or M here.
+
config MERAKI_MX100
tristate "Cisco Meraki MX100 Platform Driver"
depends on GPIOLIB
@@ -1031,9 +1039,7 @@ config OXP_EC
help
Enables support for the platform EC of OneXPlayer and AOKZOE
handheld devices. This includes fan speed, fan controls, and
- disabling the default TDP behavior of the device. Due to legacy
- reasons, this driver also provides hwmon functionality to Ayaneo
- devices and the OrangePi Neo.
+ disabling the default TDP behavior of the device.
source "drivers/platform/x86/tuxedo/Kconfig"
diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile
index c7db2a88c11a..d25762f7114f 100644
--- a/drivers/platform/x86/Makefile
+++ b/drivers/platform/x86/Makefile
@@ -5,7 +5,6 @@
#
# Windows Management Interface
-obj-$(CONFIG_ACPI_WMI) += wmi.o
obj-$(CONFIG_WMI_BMOF) += wmi-bmof.o
# WMI drivers
@@ -33,12 +32,16 @@ obj-$(CONFIG_APPLE_GMUX) += apple-gmux.o
# ASUS
obj-$(CONFIG_ASUS_LAPTOP) += asus-laptop.o
obj-$(CONFIG_ASUS_WIRELESS) += asus-wireless.o
+obj-$(CONFIG_ASUS_ARMOURY) += asus-armoury.o
obj-$(CONFIG_ASUS_WMI) += asus-wmi.o
obj-$(CONFIG_ASUS_NB_WMI) += asus-nb-wmi.o
obj-$(CONFIG_ASUS_TF103C_DOCK) += asus-tf103c-dock.o
obj-$(CONFIG_EEEPC_LAPTOP) += eeepc-laptop.o
obj-$(CONFIG_EEEPC_WMI) += eeepc-wmi.o
+# Ayaneo
+obj-$(CONFIG_AYANEO_EC) += ayaneo-ec.o
+
# Cisco/Meraki
obj-$(CONFIG_MERAKI_MX100) += meraki-mx100.o
@@ -110,6 +113,9 @@ obj-$(CONFIG_TOSHIBA_WMI) += toshiba-wmi.o
# before toshiba_acpi initializes
obj-$(CONFIG_ACPI_TOSHIBA) += toshiba_acpi.o
+# Uniwill
+obj-y += uniwill/
+
# Inspur
obj-$(CONFIG_INSPUR_PLATFORM_PROFILE) += inspur_platform_profile.o
diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
index d848afc91f87..bf97381faf58 100644
--- a/drivers/platform/x86/acer-wmi.c
+++ b/drivers/platform/x86/acer-wmi.c
@@ -12,10 +12,12 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
+#include <linux/minmax.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/dmi.h>
+#include <linux/fixp-arith.h>
#include <linux/backlight.h>
#include <linux/leds.h>
#include <linux/platform_device.h>
@@ -68,10 +70,27 @@ MODULE_LICENSE("GPL");
#define ACER_WMID_SET_GAMING_LED_METHODID 2
#define ACER_WMID_GET_GAMING_LED_METHODID 4
#define ACER_WMID_GET_GAMING_SYS_INFO_METHODID 5
-#define ACER_WMID_SET_GAMING_FAN_BEHAVIOR 14
+#define ACER_WMID_SET_GAMING_FAN_BEHAVIOR_METHODID 14
+#define ACER_WMID_GET_GAMING_FAN_BEHAVIOR_METHODID 15
+#define ACER_WMID_SET_GAMING_FAN_SPEED_METHODID 16
+#define ACER_WMID_GET_GAMING_FAN_SPEED_METHODID 17
#define ACER_WMID_SET_GAMING_MISC_SETTING_METHODID 22
#define ACER_WMID_GET_GAMING_MISC_SETTING_METHODID 23
+#define ACER_GAMING_FAN_BEHAVIOR_CPU BIT(0)
+#define ACER_GAMING_FAN_BEHAVIOR_GPU BIT(3)
+
+#define ACER_GAMING_FAN_BEHAVIOR_STATUS_MASK GENMASK_ULL(7, 0)
+#define ACER_GAMING_FAN_BEHAVIOR_ID_MASK GENMASK_ULL(15, 0)
+#define ACER_GAMING_FAN_BEHAVIOR_SET_CPU_MODE_MASK GENMASK(17, 16)
+#define ACER_GAMING_FAN_BEHAVIOR_SET_GPU_MODE_MASK GENMASK(23, 22)
+#define ACER_GAMING_FAN_BEHAVIOR_GET_CPU_MODE_MASK GENMASK(9, 8)
+#define ACER_GAMING_FAN_BEHAVIOR_GET_GPU_MODE_MASK GENMASK(15, 14)
+
+#define ACER_GAMING_FAN_SPEED_STATUS_MASK GENMASK_ULL(7, 0)
+#define ACER_GAMING_FAN_SPEED_ID_MASK GENMASK_ULL(7, 0)
+#define ACER_GAMING_FAN_SPEED_VALUE_MASK GENMASK_ULL(15, 8)
+
#define ACER_GAMING_MISC_SETTING_STATUS_MASK GENMASK_ULL(7, 0)
#define ACER_GAMING_MISC_SETTING_INDEX_MASK GENMASK_ULL(7, 0)
#define ACER_GAMING_MISC_SETTING_VALUE_MASK GENMASK_ULL(15, 8)
@@ -122,6 +141,17 @@ enum acer_wmi_predator_v4_sensor_id {
ACER_WMID_SENSOR_GPU_TEMPERATURE = 0x0A,
};
+enum acer_wmi_gaming_fan_id {
+ ACER_WMID_CPU_FAN = 0x01,
+ ACER_WMID_GPU_FAN = 0x04,
+};
+
+enum acer_wmi_gaming_fan_mode {
+ ACER_WMID_FAN_MODE_AUTO = 0x01,
+ ACER_WMID_FAN_MODE_TURBO = 0x02,
+ ACER_WMID_FAN_MODE_CUSTOM = 0x03,
+};
+
enum acer_wmi_predator_v4_oc {
ACER_WMID_OC_NORMAL = 0x0000,
ACER_WMID_OC_TURBO = 0x0002,
@@ -279,6 +309,7 @@ struct hotkey_function_type_aa {
#define ACER_CAP_TURBO_FAN BIT(9)
#define ACER_CAP_PLATFORM_PROFILE BIT(10)
#define ACER_CAP_HWMON BIT(11)
+#define ACER_CAP_PWM BIT(12)
/*
* Interface type flags
@@ -373,6 +404,7 @@ struct quirk_entry {
u8 cpu_fans;
u8 gpu_fans;
u8 predator_v4;
+ u8 pwm;
};
static struct quirk_entry *quirks;
@@ -392,6 +424,9 @@ static void __init set_quirks(void)
if (quirks->predator_v4)
interface->capability |= ACER_CAP_PLATFORM_PROFILE |
ACER_CAP_HWMON;
+
+ if (quirks->pwm)
+ interface->capability |= ACER_CAP_PWM;
}
static int __init dmi_matched(const struct dmi_system_id *dmi)
@@ -431,6 +466,7 @@ static struct quirk_entry quirk_acer_predator_ph16_72 = {
.cpu_fans = 1,
.gpu_fans = 1,
.predator_v4 = 1,
+ .pwm = 1,
};
static struct quirk_entry quirk_acer_predator_pt14_51 = {
@@ -438,6 +474,7 @@ static struct quirk_entry quirk_acer_predator_pt14_51 = {
.cpu_fans = 1,
.gpu_fans = 1,
.predator_v4 = 1,
+ .pwm = 1,
};
static struct quirk_entry quirk_acer_predator_v4 = {
@@ -658,6 +695,15 @@ static const struct dmi_system_id acer_quirks[] __initconst = {
},
{
.callback = dmi_matched,
+ .ident = "Acer Predator Helios Neo 16",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Predator PHN16-72"),
+ },
+ .driver_data = &quirk_acer_predator_ph16_72,
+ },
+ {
+ .callback = dmi_matched,
.ident = "Acer Predator PH18-71",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
@@ -1564,9 +1610,6 @@ static acpi_status WMID_gaming_set_u64(u64 value, u32 cap)
case ACER_CAP_TURBO_LED:
method_id = ACER_WMID_SET_GAMING_LED_METHODID;
break;
- case ACER_CAP_TURBO_FAN:
- method_id = ACER_WMID_SET_GAMING_FAN_BEHAVIOR;
- break;
default:
return AE_BAD_PARAMETER;
}
@@ -1617,25 +1660,125 @@ static int WMID_gaming_get_sys_info(u32 command, u64 *out)
return 0;
}
-static void WMID_gaming_set_fan_mode(u8 fan_mode)
+static int WMID_gaming_set_fan_behavior(u16 fan_bitmap, enum acer_wmi_gaming_fan_mode mode)
{
- /* fan_mode = 1 is used for auto, fan_mode = 2 used for turbo*/
- u64 gpu_fan_config1 = 0, gpu_fan_config2 = 0;
- int i;
+ acpi_status status;
+ u64 input = 0;
+ u64 result;
+
+ input |= FIELD_PREP(ACER_GAMING_FAN_BEHAVIOR_ID_MASK, fan_bitmap);
+
+ if (fan_bitmap & ACER_GAMING_FAN_BEHAVIOR_CPU)
+ input |= FIELD_PREP(ACER_GAMING_FAN_BEHAVIOR_SET_CPU_MODE_MASK, mode);
+
+ if (fan_bitmap & ACER_GAMING_FAN_BEHAVIOR_GPU)
+ input |= FIELD_PREP(ACER_GAMING_FAN_BEHAVIOR_SET_GPU_MODE_MASK, mode);
+
+ status = WMI_gaming_execute_u64(ACER_WMID_SET_GAMING_FAN_BEHAVIOR_METHODID, input,
+ &result);
+ if (ACPI_FAILURE(status))
+ return -EIO;
+
+ /* The return status must be zero for the operation to have succeeded */
+ if (FIELD_GET(ACER_GAMING_FAN_BEHAVIOR_STATUS_MASK, result))
+ return -EIO;
+
+ return 0;
+}
+
+static int WMID_gaming_get_fan_behavior(u16 fan_bitmap, enum acer_wmi_gaming_fan_mode *mode)
+{
+ acpi_status status;
+ u32 input = 0;
+ u64 result;
+ int value;
+
+ input |= FIELD_PREP(ACER_GAMING_FAN_BEHAVIOR_ID_MASK, fan_bitmap);
+ status = WMI_gaming_execute_u32_u64(ACER_WMID_GET_GAMING_FAN_BEHAVIOR_METHODID, input,
+ &result);
+ if (ACPI_FAILURE(status))
+ return -EIO;
+
+ /* The return status must be zero for the operation to have succeeded */
+ if (FIELD_GET(ACER_GAMING_FAN_BEHAVIOR_STATUS_MASK, result))
+ return -EIO;
+
+ /* Theoretically multiple fans can be specified, but this is currently unused */
+ if (fan_bitmap & ACER_GAMING_FAN_BEHAVIOR_CPU)
+ value = FIELD_GET(ACER_GAMING_FAN_BEHAVIOR_GET_CPU_MODE_MASK, result);
+ else if (fan_bitmap & ACER_GAMING_FAN_BEHAVIOR_GPU)
+ value = FIELD_GET(ACER_GAMING_FAN_BEHAVIOR_GET_GPU_MODE_MASK, result);
+ else
+ return -EINVAL;
+
+ if (value < ACER_WMID_FAN_MODE_AUTO || value > ACER_WMID_FAN_MODE_CUSTOM)
+ return -ENXIO;
+
+ *mode = value;
+
+ return 0;
+}
+
+static void WMID_gaming_set_fan_mode(enum acer_wmi_gaming_fan_mode mode)
+{
+ u16 fan_bitmap = 0;
if (quirks->cpu_fans > 0)
- gpu_fan_config2 |= 1;
- for (i = 0; i < (quirks->cpu_fans + quirks->gpu_fans); ++i)
- gpu_fan_config2 |= 1 << (i + 1);
- for (i = 0; i < quirks->gpu_fans; ++i)
- gpu_fan_config2 |= 1 << (i + 3);
- if (quirks->cpu_fans > 0)
- gpu_fan_config1 |= fan_mode;
- for (i = 0; i < (quirks->cpu_fans + quirks->gpu_fans); ++i)
- gpu_fan_config1 |= fan_mode << (2 * i + 2);
- for (i = 0; i < quirks->gpu_fans; ++i)
- gpu_fan_config1 |= fan_mode << (2 * i + 6);
- WMID_gaming_set_u64(gpu_fan_config2 | gpu_fan_config1 << 16, ACER_CAP_TURBO_FAN);
+ fan_bitmap |= ACER_GAMING_FAN_BEHAVIOR_CPU;
+
+ if (quirks->gpu_fans > 0)
+ fan_bitmap |= ACER_GAMING_FAN_BEHAVIOR_GPU;
+
+ WMID_gaming_set_fan_behavior(fan_bitmap, mode);
+}
+
+static int WMID_gaming_set_gaming_fan_speed(u8 fan, u8 speed)
+{
+ acpi_status status;
+ u64 input = 0;
+ u64 result;
+
+ if (speed > 100)
+ return -EINVAL;
+
+ input |= FIELD_PREP(ACER_GAMING_FAN_SPEED_ID_MASK, fan);
+ input |= FIELD_PREP(ACER_GAMING_FAN_SPEED_VALUE_MASK, speed);
+
+ status = WMI_gaming_execute_u64(ACER_WMID_SET_GAMING_FAN_SPEED_METHODID, input, &result);
+ if (ACPI_FAILURE(status))
+ return -EIO;
+
+ switch (FIELD_GET(ACER_GAMING_FAN_SPEED_STATUS_MASK, result)) {
+ case 0x00:
+ return 0;
+ case 0x01:
+ return -ENODEV;
+ case 0x02:
+ return -EINVAL;
+ default:
+ return -ENXIO;
+ }
+}
+
+static int WMID_gaming_get_gaming_fan_speed(u8 fan, u8 *speed)
+{
+ acpi_status status;
+ u32 input = 0;
+ u64 result;
+
+ input |= FIELD_PREP(ACER_GAMING_FAN_SPEED_ID_MASK, fan);
+
+ status = WMI_gaming_execute_u32_u64(ACER_WMID_GET_GAMING_FAN_SPEED_METHODID, input,
+ &result);
+ if (ACPI_FAILURE(status))
+ return -EIO;
+
+ if (FIELD_GET(ACER_GAMING_FAN_SPEED_STATUS_MASK, result))
+ return -ENODEV;
+
+ *speed = FIELD_GET(ACER_GAMING_FAN_SPEED_VALUE_MASK, result);
+
+ return 0;
}
static int WMID_gaming_set_misc_setting(enum acer_wmi_gaming_misc_setting setting, u8 value)
@@ -1922,7 +2065,7 @@ static int acer_toggle_turbo(void)
WMID_gaming_set_u64(0x1, ACER_CAP_TURBO_LED);
/* Set FAN mode to auto */
- WMID_gaming_set_fan_mode(0x1);
+ WMID_gaming_set_fan_mode(ACER_WMID_FAN_MODE_AUTO);
/* Set OC to normal */
if (has_cap(ACER_CAP_TURBO_OC)) {
@@ -1936,7 +2079,7 @@ static int acer_toggle_turbo(void)
WMID_gaming_set_u64(0x10001, ACER_CAP_TURBO_LED);
/* Set FAN mode to turbo */
- WMID_gaming_set_fan_mode(0x2);
+ WMID_gaming_set_fan_mode(ACER_WMID_FAN_MODE_TURBO);
/* Set OC to turbo mode */
if (has_cap(ACER_CAP_TURBO_OC)) {
@@ -2767,6 +2910,16 @@ static const enum acer_wmi_predator_v4_sensor_id acer_wmi_fan_channel_to_sensor_
[1] = ACER_WMID_SENSOR_GPU_FAN_SPEED,
};
+static const enum acer_wmi_gaming_fan_id acer_wmi_fan_channel_to_fan_id[] = {
+ [0] = ACER_WMID_CPU_FAN,
+ [1] = ACER_WMID_GPU_FAN,
+};
+
+static const u16 acer_wmi_fan_channel_to_fan_bitmap[] = {
+ [0] = ACER_GAMING_FAN_BEHAVIOR_CPU,
+ [1] = ACER_GAMING_FAN_BEHAVIOR_GPU,
+};
+
static umode_t acer_wmi_hwmon_is_visible(const void *data,
enum hwmon_sensor_types type, u32 attr,
int channel)
@@ -2778,6 +2931,11 @@ static umode_t acer_wmi_hwmon_is_visible(const void *data,
case hwmon_temp:
sensor_id = acer_wmi_temp_channel_to_sensor_id[channel];
break;
+ case hwmon_pwm:
+ if (!has_cap(ACER_CAP_PWM))
+ return 0;
+
+ fallthrough;
case hwmon_fan:
sensor_id = acer_wmi_fan_channel_to_sensor_id[channel];
break;
@@ -2785,8 +2943,12 @@ static umode_t acer_wmi_hwmon_is_visible(const void *data,
return 0;
}
- if (*supported_sensors & BIT(sensor_id - 1))
+ if (*supported_sensors & BIT(sensor_id - 1)) {
+ if (type == hwmon_pwm)
+ return 0644;
+
return 0444;
+ }
return 0;
}
@@ -2795,6 +2957,9 @@ static int acer_wmi_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
u32 attr, int channel, long *val)
{
u64 command = ACER_WMID_CMD_GET_PREDATOR_V4_SENSOR_READING;
+ enum acer_wmi_gaming_fan_mode mode;
+ u16 fan_bitmap;
+ u8 fan, speed;
u64 result;
int ret;
@@ -2820,6 +2985,80 @@ static int acer_wmi_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
*val = FIELD_GET(ACER_PREDATOR_V4_SENSOR_READING_BIT_MASK, result);
return 0;
+ case hwmon_pwm:
+ switch (attr) {
+ case hwmon_pwm_input:
+ fan = acer_wmi_fan_channel_to_fan_id[channel];
+ ret = WMID_gaming_get_gaming_fan_speed(fan, &speed);
+ if (ret < 0)
+ return ret;
+
+ *val = fixp_linear_interpolate(0, 0, 100, U8_MAX, speed);
+ return 0;
+ case hwmon_pwm_enable:
+ fan_bitmap = acer_wmi_fan_channel_to_fan_bitmap[channel];
+ ret = WMID_gaming_get_fan_behavior(fan_bitmap, &mode);
+ if (ret < 0)
+ return ret;
+
+ switch (mode) {
+ case ACER_WMID_FAN_MODE_AUTO:
+ *val = 2;
+ return 0;
+ case ACER_WMID_FAN_MODE_TURBO:
+ *val = 0;
+ return 0;
+ case ACER_WMID_FAN_MODE_CUSTOM:
+ *val = 1;
+ return 0;
+ default:
+ return -ENXIO;
+ }
+ default:
+ return -EOPNOTSUPP;
+ }
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int acer_wmi_hwmon_write(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long val)
+{
+ enum acer_wmi_gaming_fan_mode mode;
+ u16 fan_bitmap;
+ u8 fan, speed;
+
+ switch (type) {
+ case hwmon_pwm:
+ switch (attr) {
+ case hwmon_pwm_input:
+ fan = acer_wmi_fan_channel_to_fan_id[channel];
+ speed = fixp_linear_interpolate(0, 0, U8_MAX, 100,
+ clamp_val(val, 0, U8_MAX));
+
+ return WMID_gaming_set_gaming_fan_speed(fan, speed);
+ case hwmon_pwm_enable:
+ fan_bitmap = acer_wmi_fan_channel_to_fan_bitmap[channel];
+
+ switch (val) {
+ case 0:
+ mode = ACER_WMID_FAN_MODE_TURBO;
+ break;
+ case 1:
+ mode = ACER_WMID_FAN_MODE_CUSTOM;
+ break;
+ case 2:
+ mode = ACER_WMID_FAN_MODE_AUTO;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return WMID_gaming_set_fan_behavior(fan_bitmap, mode);
+ default:
+ return -EOPNOTSUPP;
+ }
default:
return -EOPNOTSUPP;
}
@@ -2835,11 +3074,16 @@ static const struct hwmon_channel_info *const acer_wmi_hwmon_info[] = {
HWMON_F_INPUT,
HWMON_F_INPUT
),
+ HWMON_CHANNEL_INFO(pwm,
+ HWMON_PWM_INPUT | HWMON_PWM_ENABLE,
+ HWMON_PWM_INPUT | HWMON_PWM_ENABLE
+ ),
NULL
};
static const struct hwmon_ops acer_wmi_hwmon_ops = {
.read = acer_wmi_hwmon_read,
+ .write = acer_wmi_hwmon_write,
.is_visible = acer_wmi_hwmon_is_visible,
};
diff --git a/drivers/platform/x86/amd/hfi/hfi.c b/drivers/platform/x86/amd/hfi/hfi.c
index a465ac6f607e..83863a5e0fbc 100644
--- a/drivers/platform/x86/amd/hfi/hfi.c
+++ b/drivers/platform/x86/amd/hfi/hfi.c
@@ -12,7 +12,6 @@
#include <linux/acpi.h>
#include <linux/cpu.h>
-#include <linux/cpumask.h>
#include <linux/debugfs.h>
#include <linux/gfp.h>
#include <linux/init.h>
@@ -95,7 +94,6 @@ struct amd_hfi_classes {
* struct amd_hfi_cpuinfo - HFI workload class info per CPU
* @cpu: CPU index
* @apic_id: APIC id of the current CPU
- * @cpus: mask of CPUs associated with amd_hfi_cpuinfo
* @class_index: workload class ID index
* @nr_class: max number of workload class supported
* @ipcc_scores: ipcc scores for each class
@@ -106,7 +104,6 @@ struct amd_hfi_classes {
struct amd_hfi_cpuinfo {
int cpu;
u32 apic_id;
- cpumask_var_t cpus;
s16 class_index;
u8 nr_class;
int *ipcc_scores;
@@ -295,11 +292,6 @@ static int amd_hfi_online(unsigned int cpu)
guard(mutex)(&hfi_cpuinfo_lock);
- if (!zalloc_cpumask_var(&hfi_info->cpus, GFP_KERNEL))
- return -ENOMEM;
-
- cpumask_set_cpu(cpu, hfi_info->cpus);
-
ret = amd_hfi_set_state(cpu, true);
if (ret)
pr_err("WCT enable failed for CPU %u\n", cpu);
@@ -329,8 +321,6 @@ static int amd_hfi_offline(unsigned int cpu)
if (ret)
pr_err("WCT disable failed for CPU %u\n", cpu);
- free_cpumask_var(hfi_info->cpus);
-
return ret;
}
@@ -515,7 +505,6 @@ static int amd_hfi_probe(struct platform_device *pdev)
static struct platform_driver amd_hfi_driver = {
.driver = {
.name = AMD_HFI_DRIVER,
- .owner = THIS_MODULE,
.pm = &amd_hfi_pm_ops,
.acpi_match_table = ACPI_PTR(amd_hfi_platform_match),
},
diff --git a/drivers/platform/x86/amd/hsmp/acpi.c b/drivers/platform/x86/amd/hsmp/acpi.c
index d0b74d243ce4..97ed71593bdf 100644
--- a/drivers/platform/x86/amd/hsmp/acpi.c
+++ b/drivers/platform/x86/amd/hsmp/acpi.c
@@ -22,12 +22,11 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/sysfs.h>
+#include <linux/topology.h>
#include <linux/uuid.h>
#include <uapi/asm-generic/errno-base.h>
-#include <asm/amd/node.h>
-
#include "hsmp.h"
#define DRIVER_NAME "hsmp_acpi"
@@ -586,9 +585,9 @@ static int hsmp_acpi_probe(struct platform_device *pdev)
return -ENOMEM;
if (!hsmp_pdev->is_probed) {
- hsmp_pdev->num_sockets = amd_num_nodes();
- if (hsmp_pdev->num_sockets == 0 || hsmp_pdev->num_sockets > MAX_AMD_NUM_NODES) {
- dev_err(&pdev->dev, "Wrong number of sockets\n");
+ hsmp_pdev->num_sockets = topology_max_packages();
+ if (!hsmp_pdev->num_sockets) {
+ dev_err(&pdev->dev, "No CPU sockets detected\n");
return -ENODEV;
}
diff --git a/drivers/platform/x86/amd/pmf/auto-mode.c b/drivers/platform/x86/amd/pmf/auto-mode.c
index a184922bba8d..faf15a8f74bb 100644
--- a/drivers/platform/x86/amd/pmf/auto-mode.c
+++ b/drivers/platform/x86/amd/pmf/auto-mode.c
@@ -114,14 +114,14 @@ static void amd_pmf_set_automode(struct amd_pmf_dev *dev, int idx,
{
struct power_table_control *pwr_ctrl = &config_store.mode_set[idx].power_control;
- amd_pmf_send_cmd(dev, SET_SPL, false, pwr_ctrl->spl, NULL);
- amd_pmf_send_cmd(dev, SET_FPPT, false, pwr_ctrl->fppt, NULL);
- amd_pmf_send_cmd(dev, SET_SPPT, false, pwr_ctrl->sppt, NULL);
- amd_pmf_send_cmd(dev, SET_SPPT_APU_ONLY, false, pwr_ctrl->sppt_apu_only, NULL);
- amd_pmf_send_cmd(dev, SET_STT_MIN_LIMIT, false, pwr_ctrl->stt_min, NULL);
- amd_pmf_send_cmd(dev, SET_STT_LIMIT_APU, false,
+ amd_pmf_send_cmd(dev, SET_SPL, SET_CMD, pwr_ctrl->spl, NULL);
+ amd_pmf_send_cmd(dev, SET_FPPT, SET_CMD, pwr_ctrl->fppt, NULL);
+ amd_pmf_send_cmd(dev, SET_SPPT, SET_CMD, pwr_ctrl->sppt, NULL);
+ amd_pmf_send_cmd(dev, SET_SPPT_APU_ONLY, SET_CMD, pwr_ctrl->sppt_apu_only, NULL);
+ amd_pmf_send_cmd(dev, SET_STT_MIN_LIMIT, SET_CMD, pwr_ctrl->stt_min, NULL);
+ amd_pmf_send_cmd(dev, SET_STT_LIMIT_APU, SET_CMD,
fixp_q88_fromint(pwr_ctrl->stt_skin_temp[STT_TEMP_APU]), NULL);
- amd_pmf_send_cmd(dev, SET_STT_LIMIT_HS2, false,
+ amd_pmf_send_cmd(dev, SET_STT_LIMIT_HS2, SET_CMD,
fixp_q88_fromint(pwr_ctrl->stt_skin_temp[STT_TEMP_HS2]), NULL);
if (is_apmf_func_supported(dev, APMF_FUNC_SET_FAN_IDX))
diff --git a/drivers/platform/x86/amd/pmf/cnqf.c b/drivers/platform/x86/amd/pmf/cnqf.c
index 207a0b33d8d3..5469fefb6001 100644
--- a/drivers/platform/x86/amd/pmf/cnqf.c
+++ b/drivers/platform/x86/amd/pmf/cnqf.c
@@ -76,14 +76,14 @@ static int amd_pmf_set_cnqf(struct amd_pmf_dev *dev, int src, int idx,
pc = &config_store.mode_set[src][idx].power_control;
- amd_pmf_send_cmd(dev, SET_SPL, false, pc->spl, NULL);
- amd_pmf_send_cmd(dev, SET_FPPT, false, pc->fppt, NULL);
- amd_pmf_send_cmd(dev, SET_SPPT, false, pc->sppt, NULL);
- amd_pmf_send_cmd(dev, SET_SPPT_APU_ONLY, false, pc->sppt_apu_only, NULL);
- amd_pmf_send_cmd(dev, SET_STT_MIN_LIMIT, false, pc->stt_min, NULL);
- amd_pmf_send_cmd(dev, SET_STT_LIMIT_APU, false,
+ amd_pmf_send_cmd(dev, SET_SPL, SET_CMD, pc->spl, NULL);
+ amd_pmf_send_cmd(dev, SET_FPPT, SET_CMD, pc->fppt, NULL);
+ amd_pmf_send_cmd(dev, SET_SPPT, SET_CMD, pc->sppt, NULL);
+ amd_pmf_send_cmd(dev, SET_SPPT_APU_ONLY, SET_CMD, pc->sppt_apu_only, NULL);
+ amd_pmf_send_cmd(dev, SET_STT_MIN_LIMIT, SET_CMD, pc->stt_min, NULL);
+ amd_pmf_send_cmd(dev, SET_STT_LIMIT_APU, SET_CMD,
fixp_q88_fromint(pc->stt_skin_temp[STT_TEMP_APU]), NULL);
- amd_pmf_send_cmd(dev, SET_STT_LIMIT_HS2, false,
+ amd_pmf_send_cmd(dev, SET_STT_LIMIT_HS2, SET_CMD,
fixp_q88_fromint(pc->stt_skin_temp[STT_TEMP_HS2]), NULL);
if (is_apmf_func_supported(dev, APMF_FUNC_SET_FAN_IDX))
diff --git a/drivers/platform/x86/amd/pmf/core.c b/drivers/platform/x86/amd/pmf/core.c
index bc544a4a5266..8fc293c9c538 100644
--- a/drivers/platform/x86/amd/pmf/core.c
+++ b/drivers/platform/x86/amd/pmf/core.c
@@ -131,7 +131,7 @@ static void amd_pmf_get_metrics(struct work_struct *work)
/* Transfer table contents */
memset(dev->buf, 0, sizeof(dev->m_table));
- amd_pmf_send_cmd(dev, SET_TRANSFER_TABLE, 0, 7, NULL);
+ amd_pmf_send_cmd(dev, SET_TRANSFER_TABLE, SET_CMD, METRICS_TABLE_ID, NULL);
memcpy(&dev->m_table, dev->buf, sizeof(dev->m_table));
time_elapsed_ms = ktime_to_ms(ktime_get()) - dev->start_time;
@@ -289,8 +289,8 @@ int amd_pmf_set_dram_addr(struct amd_pmf_dev *dev, bool alloc_buffer)
hi = phys_addr >> 32;
low = phys_addr & GENMASK(31, 0);
- amd_pmf_send_cmd(dev, SET_DRAM_ADDR_HIGH, 0, hi, NULL);
- amd_pmf_send_cmd(dev, SET_DRAM_ADDR_LOW, 0, low, NULL);
+ amd_pmf_send_cmd(dev, SET_DRAM_ADDR_HIGH, SET_CMD, hi, NULL);
+ amd_pmf_send_cmd(dev, SET_DRAM_ADDR_LOW, SET_CMD, low, NULL);
return 0;
}
@@ -465,9 +465,17 @@ static int amd_pmf_probe(struct platform_device *pdev)
if (!dev->regbase)
return -ENOMEM;
- mutex_init(&dev->lock);
- mutex_init(&dev->update_mutex);
- mutex_init(&dev->cb_mutex);
+ err = devm_mutex_init(dev->dev, &dev->lock);
+ if (err)
+ return err;
+
+ err = devm_mutex_init(dev->dev, &dev->update_mutex);
+ if (err)
+ return err;
+
+ err = devm_mutex_init(dev->dev, &dev->cb_mutex);
+ if (err)
+ return err;
apmf_acpi_init(dev);
platform_set_drvdata(pdev, dev);
@@ -491,9 +499,6 @@ static void amd_pmf_remove(struct platform_device *pdev)
amd_pmf_notify_sbios_heartbeat_event_v2(dev, ON_UNLOAD);
apmf_acpi_deinit(dev);
amd_pmf_dbgfs_unregister(dev);
- mutex_destroy(&dev->lock);
- mutex_destroy(&dev->update_mutex);
- mutex_destroy(&dev->cb_mutex);
}
static const struct attribute_group *amd_pmf_driver_groups[] = {
diff --git a/drivers/platform/x86/amd/pmf/pmf.h b/drivers/platform/x86/amd/pmf/pmf.h
index bd19f2a6bc78..9144c8c3bbaf 100644
--- a/drivers/platform/x86/amd/pmf/pmf.h
+++ b/drivers/platform/x86/amd/pmf/pmf.h
@@ -119,6 +119,13 @@ struct cookie_header {
#define APTS_MAX_STATES 16
#define CUSTOM_BIOS_INPUT_BITS GENMASK(16, 7)
+#define BIOS_INPUTS_MAX 10
+
+/* amd_pmf_send_cmd() set/get */
+#define SET_CMD false
+#define GET_CMD true
+
+#define METRICS_TABLE_ID 7
typedef void (*apmf_event_handler_t)(acpi_handle handle, u32 event, void *data);
@@ -204,7 +211,7 @@ struct apmf_sbios_req_v1 {
u8 skin_temp_apu;
u8 skin_temp_hs2;
u8 enable_cnqf;
- u32 custom_policy[10];
+ u32 custom_policy[BIOS_INPUTS_MAX];
} __packed;
struct apmf_sbios_req_v2 {
@@ -216,7 +223,7 @@ struct apmf_sbios_req_v2 {
u32 stt_min_limit;
u8 skin_temp_apu;
u8 skin_temp_hs2;
- u32 custom_policy[10];
+ u32 custom_policy[BIOS_INPUTS_MAX];
} __packed;
struct apmf_fan_idx {
@@ -243,12 +250,12 @@ struct smu_pmf_metrics_v2 {
u16 vclk_freq; /* MHz */
u16 vcn_activity; /* VCN busy % [0-100] */
u16 vpeclk_freq; /* MHz */
- u16 ipuclk_freq; /* MHz */
- u16 ipu_busy[8]; /* NPU busy % [0-100] */
+ u16 npuclk_freq; /* MHz */
+ u16 npu_busy[8]; /* NPU busy % [0-100] */
u16 dram_reads; /* MB/sec */
u16 dram_writes; /* MB/sec */
u16 core_c0residency[16]; /* C0 residency % [0-100] */
- u16 ipu_power; /* mW */
+ u16 npu_power; /* mW */
u32 apu_power; /* mW */
u32 gfx_power; /* mW */
u32 dgpu_power; /* mW */
@@ -257,9 +264,9 @@ struct smu_pmf_metrics_v2 {
u32 filter_alpha_value; /* time constant [us] */
u32 metrics_counter;
u16 memclk_freq; /* MHz */
- u16 mpipuclk_freq; /* MHz */
- u16 ipu_reads; /* MB/sec */
- u16 ipu_writes; /* MB/sec */
+ u16 mpnpuclk_freq; /* MHz */
+ u16 npu_reads; /* MB/sec */
+ u16 npu_writes; /* MB/sec */
u32 throttle_residency_prochot;
u32 throttle_residency_spl;
u32 throttle_residency_fppt;
@@ -355,7 +362,7 @@ enum power_modes_v2 {
};
struct pmf_bios_inputs_prev {
- u32 custom_bios_inputs[10];
+ u32 custom_bios_inputs[BIOS_INPUTS_MAX];
};
struct amd_pmf_dev {
@@ -451,7 +458,7 @@ struct os_power_slider {
struct amd_pmf_notify_smart_pc_update {
u16 size;
u32 pending_req;
- u32 custom_bios[10];
+ u32 custom_bios[BIOS_INPUTS_MAX];
} __packed;
struct fan_table_control {
diff --git a/drivers/platform/x86/amd/pmf/spc.c b/drivers/platform/x86/amd/pmf/spc.c
index 85192c7536b8..0a37dc6a7950 100644
--- a/drivers/platform/x86/amd/pmf/spc.c
+++ b/drivers/platform/x86/amd/pmf/spc.c
@@ -202,7 +202,7 @@ static void amd_pmf_get_smu_info(struct amd_pmf_dev *dev, struct ta_pmf_enact_ta
{
/* Get the updated metrics table data */
memset(dev->buf, 0, dev->mtable_size);
- amd_pmf_send_cmd(dev, SET_TRANSFER_TABLE, 0, 7, NULL);
+ amd_pmf_send_cmd(dev, SET_TRANSFER_TABLE, SET_CMD, METRICS_TABLE_ID, NULL);
switch (dev->cpu_id) {
case AMD_CPU_ID_PS:
diff --git a/drivers/platform/x86/amd/pmf/sps.c b/drivers/platform/x86/amd/pmf/sps.c
index c28f3c5744c2..0b70a5153f46 100644
--- a/drivers/platform/x86/amd/pmf/sps.c
+++ b/drivers/platform/x86/amd/pmf/sps.c
@@ -192,15 +192,15 @@ static void amd_pmf_load_defaults_sps(struct amd_pmf_dev *dev)
static void amd_pmf_update_slider_v2(struct amd_pmf_dev *dev, int idx)
{
- amd_pmf_send_cmd(dev, SET_PMF_PPT, false, apts_config_store.val[idx].pmf_ppt, NULL);
- amd_pmf_send_cmd(dev, SET_PMF_PPT_APU_ONLY, false,
+ amd_pmf_send_cmd(dev, SET_PMF_PPT, SET_CMD, apts_config_store.val[idx].pmf_ppt, NULL);
+ amd_pmf_send_cmd(dev, SET_PMF_PPT_APU_ONLY, SET_CMD,
apts_config_store.val[idx].ppt_pmf_apu_only, NULL);
- amd_pmf_send_cmd(dev, SET_STT_MIN_LIMIT, false,
+ amd_pmf_send_cmd(dev, SET_STT_MIN_LIMIT, SET_CMD,
apts_config_store.val[idx].stt_min_limit, NULL);
- amd_pmf_send_cmd(dev, SET_STT_LIMIT_APU, false,
+ amd_pmf_send_cmd(dev, SET_STT_LIMIT_APU, SET_CMD,
fixp_q88_fromint(apts_config_store.val[idx].stt_skin_temp_limit_apu),
NULL);
- amd_pmf_send_cmd(dev, SET_STT_LIMIT_HS2, false,
+ amd_pmf_send_cmd(dev, SET_STT_LIMIT_HS2, SET_CMD,
fixp_q88_fromint(apts_config_store.val[idx].stt_skin_temp_limit_hs2),
NULL);
}
@@ -211,30 +211,30 @@ void amd_pmf_update_slider(struct amd_pmf_dev *dev, bool op, int idx,
int src = amd_pmf_get_power_source();
if (op == SLIDER_OP_SET) {
- amd_pmf_send_cmd(dev, SET_SPL, false, config_store.prop[src][idx].spl, NULL);
- amd_pmf_send_cmd(dev, SET_FPPT, false, config_store.prop[src][idx].fppt, NULL);
- amd_pmf_send_cmd(dev, SET_SPPT, false, config_store.prop[src][idx].sppt, NULL);
- amd_pmf_send_cmd(dev, SET_SPPT_APU_ONLY, false,
+ amd_pmf_send_cmd(dev, SET_SPL, SET_CMD, config_store.prop[src][idx].spl, NULL);
+ amd_pmf_send_cmd(dev, SET_FPPT, SET_CMD, config_store.prop[src][idx].fppt, NULL);
+ amd_pmf_send_cmd(dev, SET_SPPT, SET_CMD, config_store.prop[src][idx].sppt, NULL);
+ amd_pmf_send_cmd(dev, SET_SPPT_APU_ONLY, SET_CMD,
config_store.prop[src][idx].sppt_apu_only, NULL);
- amd_pmf_send_cmd(dev, SET_STT_MIN_LIMIT, false,
+ amd_pmf_send_cmd(dev, SET_STT_MIN_LIMIT, SET_CMD,
config_store.prop[src][idx].stt_min, NULL);
- amd_pmf_send_cmd(dev, SET_STT_LIMIT_APU, false,
+ amd_pmf_send_cmd(dev, SET_STT_LIMIT_APU, SET_CMD,
fixp_q88_fromint(config_store.prop[src][idx].stt_skin_temp[STT_TEMP_APU]),
NULL);
- amd_pmf_send_cmd(dev, SET_STT_LIMIT_HS2, false,
+ amd_pmf_send_cmd(dev, SET_STT_LIMIT_HS2, SET_CMD,
fixp_q88_fromint(config_store.prop[src][idx].stt_skin_temp[STT_TEMP_HS2]),
NULL);
} else if (op == SLIDER_OP_GET) {
- amd_pmf_send_cmd(dev, GET_SPL, true, ARG_NONE, &table->prop[src][idx].spl);
- amd_pmf_send_cmd(dev, GET_FPPT, true, ARG_NONE, &table->prop[src][idx].fppt);
- amd_pmf_send_cmd(dev, GET_SPPT, true, ARG_NONE, &table->prop[src][idx].sppt);
- amd_pmf_send_cmd(dev, GET_SPPT_APU_ONLY, true, ARG_NONE,
+ amd_pmf_send_cmd(dev, GET_SPL, GET_CMD, ARG_NONE, &table->prop[src][idx].spl);
+ amd_pmf_send_cmd(dev, GET_FPPT, GET_CMD, ARG_NONE, &table->prop[src][idx].fppt);
+ amd_pmf_send_cmd(dev, GET_SPPT, GET_CMD, ARG_NONE, &table->prop[src][idx].sppt);
+ amd_pmf_send_cmd(dev, GET_SPPT_APU_ONLY, GET_CMD, ARG_NONE,
&table->prop[src][idx].sppt_apu_only);
- amd_pmf_send_cmd(dev, GET_STT_MIN_LIMIT, true, ARG_NONE,
+ amd_pmf_send_cmd(dev, GET_STT_MIN_LIMIT, GET_CMD, ARG_NONE,
&table->prop[src][idx].stt_min);
- amd_pmf_send_cmd(dev, GET_STT_LIMIT_APU, true, ARG_NONE,
+ amd_pmf_send_cmd(dev, GET_STT_LIMIT_APU, GET_CMD, ARG_NONE,
(u32 *)&table->prop[src][idx].stt_skin_temp[STT_TEMP_APU]);
- amd_pmf_send_cmd(dev, GET_STT_LIMIT_HS2, true, ARG_NONE,
+ amd_pmf_send_cmd(dev, GET_STT_LIMIT_HS2, GET_CMD, ARG_NONE,
(u32 *)&table->prop[src][idx].stt_skin_temp[STT_TEMP_HS2]);
}
}
diff --git a/drivers/platform/x86/amd/pmf/tee-if.c b/drivers/platform/x86/amd/pmf/tee-if.c
index 6e8116bef4f6..0abce76f89ff 100644
--- a/drivers/platform/x86/amd/pmf/tee-if.c
+++ b/drivers/platform/x86/amd/pmf/tee-if.c
@@ -73,17 +73,56 @@ static void amd_pmf_update_uevents(struct amd_pmf_dev *dev, u16 event)
input_sync(dev->pmf_idev);
}
+static int amd_pmf_get_bios_output_idx(u32 action_idx)
+{
+ switch (action_idx) {
+ case PMF_POLICY_BIOS_OUTPUT_1:
+ return 0;
+ case PMF_POLICY_BIOS_OUTPUT_2:
+ return 1;
+ case PMF_POLICY_BIOS_OUTPUT_3:
+ return 2;
+ case PMF_POLICY_BIOS_OUTPUT_4:
+ return 3;
+ case PMF_POLICY_BIOS_OUTPUT_5:
+ return 4;
+ case PMF_POLICY_BIOS_OUTPUT_6:
+ return 5;
+ case PMF_POLICY_BIOS_OUTPUT_7:
+ return 6;
+ case PMF_POLICY_BIOS_OUTPUT_8:
+ return 7;
+ case PMF_POLICY_BIOS_OUTPUT_9:
+ return 8;
+ case PMF_POLICY_BIOS_OUTPUT_10:
+ return 9;
+ default:
+ return -EINVAL;
+ }
+}
+
+static void amd_pmf_update_bios_output(struct amd_pmf_dev *pdev, struct ta_pmf_action *action)
+{
+ u32 bios_idx;
+
+ bios_idx = amd_pmf_get_bios_output_idx(action->action_index);
+
+ amd_pmf_smartpc_apply_bios_output(pdev, action->value, BIT(bios_idx), bios_idx);
+}
+
static void amd_pmf_apply_policies(struct amd_pmf_dev *dev, struct ta_pmf_enact_result *out)
{
+ struct ta_pmf_action *action;
u32 val;
int idx;
for (idx = 0; idx < out->actions_count; idx++) {
- val = out->actions_list[idx].value;
- switch (out->actions_list[idx].action_index) {
+ action = &out->actions_list[idx];
+ val = action->value;
+ switch (action->action_index) {
case PMF_POLICY_SPL:
if (dev->prev_data->spl != val) {
- amd_pmf_send_cmd(dev, SET_SPL, false, val, NULL);
+ amd_pmf_send_cmd(dev, SET_SPL, SET_CMD, val, NULL);
dev_dbg(dev->dev, "update SPL: %u\n", val);
dev->prev_data->spl = val;
}
@@ -91,7 +130,7 @@ static void amd_pmf_apply_policies(struct amd_pmf_dev *dev, struct ta_pmf_enact_
case PMF_POLICY_SPPT:
if (dev->prev_data->sppt != val) {
- amd_pmf_send_cmd(dev, SET_SPPT, false, val, NULL);
+ amd_pmf_send_cmd(dev, SET_SPPT, SET_CMD, val, NULL);
dev_dbg(dev->dev, "update SPPT: %u\n", val);
dev->prev_data->sppt = val;
}
@@ -99,7 +138,7 @@ static void amd_pmf_apply_policies(struct amd_pmf_dev *dev, struct ta_pmf_enact_
case PMF_POLICY_FPPT:
if (dev->prev_data->fppt != val) {
- amd_pmf_send_cmd(dev, SET_FPPT, false, val, NULL);
+ amd_pmf_send_cmd(dev, SET_FPPT, SET_CMD, val, NULL);
dev_dbg(dev->dev, "update FPPT: %u\n", val);
dev->prev_data->fppt = val;
}
@@ -107,7 +146,7 @@ static void amd_pmf_apply_policies(struct amd_pmf_dev *dev, struct ta_pmf_enact_
case PMF_POLICY_SPPT_APU_ONLY:
if (dev->prev_data->sppt_apuonly != val) {
- amd_pmf_send_cmd(dev, SET_SPPT_APU_ONLY, false, val, NULL);
+ amd_pmf_send_cmd(dev, SET_SPPT_APU_ONLY, SET_CMD, val, NULL);
dev_dbg(dev->dev, "update SPPT_APU_ONLY: %u\n", val);
dev->prev_data->sppt_apuonly = val;
}
@@ -115,7 +154,7 @@ static void amd_pmf_apply_policies(struct amd_pmf_dev *dev, struct ta_pmf_enact_
case PMF_POLICY_STT_MIN:
if (dev->prev_data->stt_minlimit != val) {
- amd_pmf_send_cmd(dev, SET_STT_MIN_LIMIT, false, val, NULL);
+ amd_pmf_send_cmd(dev, SET_STT_MIN_LIMIT, SET_CMD, val, NULL);
dev_dbg(dev->dev, "update STT_MIN: %u\n", val);
dev->prev_data->stt_minlimit = val;
}
@@ -123,7 +162,7 @@ static void amd_pmf_apply_policies(struct amd_pmf_dev *dev, struct ta_pmf_enact_
case PMF_POLICY_STT_SKINTEMP_APU:
if (dev->prev_data->stt_skintemp_apu != val) {
- amd_pmf_send_cmd(dev, SET_STT_LIMIT_APU, false,
+ amd_pmf_send_cmd(dev, SET_STT_LIMIT_APU, SET_CMD,
fixp_q88_fromint(val), NULL);
dev_dbg(dev->dev, "update STT_SKINTEMP_APU: %u\n", val);
dev->prev_data->stt_skintemp_apu = val;
@@ -132,7 +171,7 @@ static void amd_pmf_apply_policies(struct amd_pmf_dev *dev, struct ta_pmf_enact_
case PMF_POLICY_STT_SKINTEMP_HS2:
if (dev->prev_data->stt_skintemp_hs2 != val) {
- amd_pmf_send_cmd(dev, SET_STT_LIMIT_HS2, false,
+ amd_pmf_send_cmd(dev, SET_STT_LIMIT_HS2, SET_CMD,
fixp_q88_fromint(val), NULL);
dev_dbg(dev->dev, "update STT_SKINTEMP_HS2: %u\n", val);
dev->prev_data->stt_skintemp_hs2 = val;
@@ -141,7 +180,7 @@ static void amd_pmf_apply_policies(struct amd_pmf_dev *dev, struct ta_pmf_enact_
case PMF_POLICY_P3T:
if (dev->prev_data->p3t_limit != val) {
- amd_pmf_send_cmd(dev, SET_P3T, false, val, NULL);
+ amd_pmf_send_cmd(dev, SET_P3T, SET_CMD, val, NULL);
dev_dbg(dev->dev, "update P3T: %u\n", val);
dev->prev_data->p3t_limit = val;
}
@@ -149,7 +188,7 @@ static void amd_pmf_apply_policies(struct amd_pmf_dev *dev, struct ta_pmf_enact_
case PMF_POLICY_PMF_PPT:
if (dev->prev_data->pmf_ppt != val) {
- amd_pmf_send_cmd(dev, SET_PMF_PPT, false, val, NULL);
+ amd_pmf_send_cmd(dev, SET_PMF_PPT, SET_CMD, val, NULL);
dev_dbg(dev->dev, "update PMF PPT: %u\n", val);
dev->prev_data->pmf_ppt = val;
}
@@ -157,7 +196,7 @@ static void amd_pmf_apply_policies(struct amd_pmf_dev *dev, struct ta_pmf_enact_
case PMF_POLICY_PMF_PPT_APU_ONLY:
if (dev->prev_data->pmf_ppt_apu_only != val) {
- amd_pmf_send_cmd(dev, SET_PMF_PPT_APU_ONLY, false, val, NULL);
+ amd_pmf_send_cmd(dev, SET_PMF_PPT_APU_ONLY, SET_CMD, val, NULL);
dev_dbg(dev->dev, "update PMF PPT APU ONLY: %u\n", val);
dev->prev_data->pmf_ppt_apu_only = val;
}
@@ -183,43 +222,16 @@ static void amd_pmf_apply_policies(struct amd_pmf_dev *dev, struct ta_pmf_enact_
break;
case PMF_POLICY_BIOS_OUTPUT_1:
- amd_pmf_smartpc_apply_bios_output(dev, val, BIT(0), 0);
- break;
-
case PMF_POLICY_BIOS_OUTPUT_2:
- amd_pmf_smartpc_apply_bios_output(dev, val, BIT(1), 1);
- break;
-
case PMF_POLICY_BIOS_OUTPUT_3:
- amd_pmf_smartpc_apply_bios_output(dev, val, BIT(2), 2);
- break;
-
case PMF_POLICY_BIOS_OUTPUT_4:
- amd_pmf_smartpc_apply_bios_output(dev, val, BIT(3), 3);
- break;
-
case PMF_POLICY_BIOS_OUTPUT_5:
- amd_pmf_smartpc_apply_bios_output(dev, val, BIT(4), 4);
- break;
-
case PMF_POLICY_BIOS_OUTPUT_6:
- amd_pmf_smartpc_apply_bios_output(dev, val, BIT(5), 5);
- break;
-
case PMF_POLICY_BIOS_OUTPUT_7:
- amd_pmf_smartpc_apply_bios_output(dev, val, BIT(6), 6);
- break;
-
case PMF_POLICY_BIOS_OUTPUT_8:
- amd_pmf_smartpc_apply_bios_output(dev, val, BIT(7), 7);
- break;
-
case PMF_POLICY_BIOS_OUTPUT_9:
- amd_pmf_smartpc_apply_bios_output(dev, val, BIT(8), 8);
- break;
-
case PMF_POLICY_BIOS_OUTPUT_10:
- amd_pmf_smartpc_apply_bios_output(dev, val, BIT(9), 9);
+ amd_pmf_update_bios_output(dev, action);
break;
}
}
diff --git a/drivers/platform/x86/asus-armoury.c b/drivers/platform/x86/asus-armoury.c
new file mode 100644
index 000000000000..9c1a9ad42bc4
--- /dev/null
+++ b/drivers/platform/x86/asus-armoury.c
@@ -0,0 +1,1161 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Asus Armoury (WMI) attributes driver.
+ *
+ * This driver uses the fw_attributes class to expose various WMI functions
+ * that are present in many gaming and some non-gaming ASUS laptops.
+ *
+ * These typically don't fit anywhere else in the sysfs such as under LED class,
+ * hwmon or others, and are set in Windows using the ASUS Armoury Crate tool.
+ *
+ * Copyright(C) 2024 Luke Jones <luke@ljones.dev>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/acpi.h>
+#include <linux/array_size.h>
+#include <linux/bitfield.h>
+#include <linux/device.h>
+#include <linux/dmi.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/kmod.h>
+#include <linux/kobject.h>
+#include <linux/kstrtox.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/platform_data/x86/asus-wmi.h>
+#include <linux/printk.h>
+#include <linux/power_supply.h>
+#include <linux/sysfs.h>
+
+#include "asus-armoury.h"
+#include "firmware_attributes_class.h"
+
+#define ASUS_NB_WMI_EVENT_GUID "0B3CBB35-E3C2-45ED-91C2-4C5A6D195D1C"
+
+#define ASUS_MINI_LED_MODE_MASK GENMASK(1, 0)
+/* Standard modes for devices with only on/off */
+#define ASUS_MINI_LED_OFF 0x00
+#define ASUS_MINI_LED_ON 0x01
+/* Like "on" but the effect is more vibrant or brighter */
+#define ASUS_MINI_LED_STRONG_MODE 0x02
+/* New modes for devices with 3 mini-led mode types */
+#define ASUS_MINI_LED_2024_WEAK 0x00
+#define ASUS_MINI_LED_2024_STRONG 0x01
+#define ASUS_MINI_LED_2024_OFF 0x02
+
+/* Power tunable attribute name defines */
+#define ATTR_PPT_PL1_SPL "ppt_pl1_spl"
+#define ATTR_PPT_PL2_SPPT "ppt_pl2_sppt"
+#define ATTR_PPT_PL3_FPPT "ppt_pl3_fppt"
+#define ATTR_PPT_APU_SPPT "ppt_apu_sppt"
+#define ATTR_PPT_PLATFORM_SPPT "ppt_platform_sppt"
+#define ATTR_NV_DYNAMIC_BOOST "nv_dynamic_boost"
+#define ATTR_NV_TEMP_TARGET "nv_temp_target"
+#define ATTR_NV_BASE_TGP "nv_base_tgp"
+#define ATTR_NV_TGP "nv_tgp"
+
+#define ASUS_ROG_TUNABLE_DC 0
+#define ASUS_ROG_TUNABLE_AC 1
+
+struct rog_tunables {
+ const struct power_limits *power_limits;
+ u32 ppt_pl1_spl; // cpu
+ u32 ppt_pl2_sppt; // cpu
+ u32 ppt_pl3_fppt; // cpu
+ u32 ppt_apu_sppt; // plat
+ u32 ppt_platform_sppt; // plat
+
+ u32 nv_dynamic_boost;
+ u32 nv_temp_target;
+ u32 nv_tgp;
+};
+
+struct asus_armoury_priv {
+ struct device *fw_attr_dev;
+ struct kset *fw_attr_kset;
+
+ /*
+ * Mutex to protect eGPU activation/deactivation
+ * sequences and dGPU connection status:
+ * do not allow concurrent changes or changes
+ * before a reboot if dGPU got disabled.
+ */
+ struct mutex egpu_mutex;
+
+ /* Index 0 for DC, 1 for AC */
+ struct rog_tunables *rog_tunables[2];
+
+ u32 mini_led_dev_id;
+ u32 gpu_mux_dev_id;
+};
+
+static struct asus_armoury_priv asus_armoury = {
+ .egpu_mutex = __MUTEX_INITIALIZER(asus_armoury.egpu_mutex),
+};
+
+struct fw_attrs_group {
+ bool pending_reboot;
+};
+
+static struct fw_attrs_group fw_attrs = {
+ .pending_reboot = false,
+};
+
+struct asus_attr_group {
+ const struct attribute_group *attr_group;
+ u32 wmi_devid;
+};
+
+static void asus_set_reboot_and_signal_event(void)
+{
+ fw_attrs.pending_reboot = true;
+ kobject_uevent(&asus_armoury.fw_attr_dev->kobj, KOBJ_CHANGE);
+}
+
+static ssize_t pending_reboot_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ return sysfs_emit(buf, "%d\n", fw_attrs.pending_reboot);
+}
+
+static struct kobj_attribute pending_reboot = __ATTR_RO(pending_reboot);
+
+static bool asus_bios_requires_reboot(struct kobj_attribute *attr)
+{
+ return !strcmp(attr->attr.name, "gpu_mux_mode") ||
+ !strcmp(attr->attr.name, "panel_hd_mode");
+}
+
+/**
+ * armoury_has_devstate() - Check presence of the WMI function state.
+ *
+ * @dev_id: The WMI method ID to check for presence.
+ *
+ * Returns: true iif method is supported.
+ */
+static bool armoury_has_devstate(u32 dev_id)
+{
+ u32 retval;
+ int status;
+
+ status = asus_wmi_evaluate_method(ASUS_WMI_METHODID_DSTS, dev_id, 0, &retval);
+ pr_debug("%s called (0x%08x), retval: 0x%08x\n", __func__, dev_id, retval);
+
+ return status == 0 && (retval & ASUS_WMI_DSTS_PRESENCE_BIT);
+}
+
+/**
+ * armoury_get_devstate() - Get the WMI function state.
+ * @attr: NULL or the kobj_attribute associated to called WMI function.
+ * @dev_id: The WMI method ID to call.
+ * @retval:
+ * * non-NULL pointer to where to store the value returned from WMI
+ * * with the function presence bit cleared.
+ *
+ * Intended usage is from sysfs attribute checking associated WMI function.
+ *
+ * Returns:
+ * * %-ENODEV - method ID is unsupported.
+ * * %0 - successful and retval is filled.
+ * * %other - error from WMI call.
+ */
+static int armoury_get_devstate(struct kobj_attribute *attr, u32 *retval, u32 dev_id)
+{
+ int err;
+
+ err = asus_wmi_get_devstate_dsts(dev_id, retval);
+ if (err) {
+ if (attr)
+ pr_err("Failed to get %s: %d\n", attr->attr.name, err);
+ else
+ pr_err("Failed to get devstate for 0x%x: %d\n", dev_id, err);
+
+ return err;
+ }
+
+ /*
+ * asus_wmi_get_devstate_dsts will populate retval with WMI return, but
+ * the true value is expressed when ASUS_WMI_DSTS_PRESENCE_BIT is clear.
+ */
+ *retval &= ~ASUS_WMI_DSTS_PRESENCE_BIT;
+
+ return 0;
+}
+
+/**
+ * armoury_set_devstate() - Set the WMI function state.
+ * @attr: The kobj_attribute associated to called WMI function.
+ * @dev_id: The WMI method ID to call.
+ * @value: The new value to be set.
+ * @retval: Where to store the value returned from WMI or NULL.
+ *
+ * Intended usage is from sysfs attribute setting associated WMI function.
+ * Before calling the presence of the function should be checked.
+ *
+ * Every WMI write MUST go through this function to enforce safety checks.
+ *
+ * Results !1 is usually considered a fail by ASUS, but some WMI methods
+ * (like eGPU or CPU cores) do use > 1 to return a status code or similar:
+ * in these cases caller is interested in the actual return value
+ * and should perform relevant checks.
+ *
+ * Returns:
+ * * %-EINVAL - attempt to set a dangerous or unsupported value.
+ * * %-EIO - WMI function returned an error.
+ * * %0 - successful and retval is filled.
+ * * %other - error from WMI call.
+ */
+static int armoury_set_devstate(struct kobj_attribute *attr,
+ u32 value, u32 *retval, u32 dev_id)
+{
+ u32 result;
+ int err;
+
+ /*
+ * Prevent developers from bricking devices or issuing dangerous
+ * commands that can be difficult or impossible to recover from.
+ */
+ switch (dev_id) {
+ case ASUS_WMI_DEVID_APU_MEM:
+ /*
+ * A hard reset might suffice to save the device,
+ * but there is no value in sending these commands.
+ */
+ if (value == 0x100 || value == 0x101) {
+ pr_err("Refusing to set APU memory to unsafe value: 0x%x\n", value);
+ return -EINVAL;
+ }
+ break;
+ default:
+ /* No problems are known for this dev_id */
+ break;
+ }
+
+ err = asus_wmi_set_devstate(dev_id, value, retval ? retval : &result);
+ if (err) {
+ if (attr)
+ pr_err("Failed to set %s: %d\n", attr->attr.name, err);
+ else
+ pr_err("Failed to set devstate for 0x%x: %d\n", dev_id, err);
+
+ return err;
+ }
+
+ /*
+ * If retval == NULL caller is uninterested in return value:
+ * perform the most common result check here.
+ */
+ if ((retval == NULL) && (result == 0)) {
+ pr_err("Failed to set %s: (result): 0x%x\n", attr->attr.name, result);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int armoury_attr_enum_list(char *buf, size_t enum_values)
+{
+ size_t i;
+ int len = 0;
+
+ for (i = 0; i < enum_values; i++) {
+ if (i == 0)
+ len += sysfs_emit_at(buf, len, "%zu", i);
+ else
+ len += sysfs_emit_at(buf, len, ";%zu", i);
+ }
+ len += sysfs_emit_at(buf, len, "\n");
+
+ return len;
+}
+
+ssize_t armoury_attr_uint_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count, u32 min, u32 max,
+ u32 *store_value, u32 wmi_dev)
+{
+ u32 value;
+ int err;
+
+ err = kstrtou32(buf, 10, &value);
+ if (err)
+ return err;
+
+ if (value < min || value > max)
+ return -EINVAL;
+
+ err = armoury_set_devstate(attr, value, NULL, wmi_dev);
+ if (err)
+ return err;
+
+ if (store_value != NULL)
+ *store_value = value;
+ sysfs_notify(kobj, NULL, attr->attr.name);
+
+ if (asus_bios_requires_reboot(attr))
+ asus_set_reboot_and_signal_event();
+
+ return count;
+}
+
+ssize_t armoury_attr_uint_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf, u32 wmi_dev)
+{
+ u32 result;
+ int err;
+
+ err = armoury_get_devstate(attr, &result, wmi_dev);
+ if (err)
+ return err;
+
+ return sysfs_emit(buf, "%u\n", result);
+}
+
+static ssize_t enum_type_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ return sysfs_emit(buf, "enumeration\n");
+}
+
+static ssize_t int_type_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ return sysfs_emit(buf, "integer\n");
+}
+
+/* Mini-LED mode **************************************************************/
+
+/* Values map for mini-led modes on 2023 and earlier models. */
+static u32 mini_led_mode1_map[] = {
+ [0] = ASUS_MINI_LED_OFF,
+ [1] = ASUS_MINI_LED_ON,
+};
+
+/* Values map for mini-led modes on 2024 and later models. */
+static u32 mini_led_mode2_map[] = {
+ [0] = ASUS_MINI_LED_2024_OFF,
+ [1] = ASUS_MINI_LED_2024_WEAK,
+ [2] = ASUS_MINI_LED_2024_STRONG,
+};
+
+static ssize_t mini_led_mode_current_value_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ u32 *mini_led_mode_map;
+ size_t mini_led_mode_map_size;
+ u32 i, mode;
+ int err;
+
+ switch (asus_armoury.mini_led_dev_id) {
+ case ASUS_WMI_DEVID_MINI_LED_MODE:
+ mini_led_mode_map = mini_led_mode1_map;
+ mini_led_mode_map_size = ARRAY_SIZE(mini_led_mode1_map);
+ break;
+
+ case ASUS_WMI_DEVID_MINI_LED_MODE2:
+ mini_led_mode_map = mini_led_mode2_map;
+ mini_led_mode_map_size = ARRAY_SIZE(mini_led_mode2_map);
+ break;
+
+ default:
+ pr_err("Unrecognized mini-LED device: %u\n", asus_armoury.mini_led_dev_id);
+ return -ENODEV;
+ }
+
+ err = armoury_get_devstate(attr, &mode, asus_armoury.mini_led_dev_id);
+ if (err)
+ return err;
+
+ mode = FIELD_GET(ASUS_MINI_LED_MODE_MASK, 0);
+
+ for (i = 0; i < mini_led_mode_map_size; i++)
+ if (mode == mini_led_mode_map[i])
+ return sysfs_emit(buf, "%u\n", i);
+
+ pr_warn("Unrecognized mini-LED mode: %u", mode);
+ return -EINVAL;
+}
+
+static ssize_t mini_led_mode_current_value_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ u32 *mini_led_mode_map;
+ size_t mini_led_mode_map_size;
+ u32 mode;
+ int err;
+
+ err = kstrtou32(buf, 10, &mode);
+ if (err)
+ return err;
+
+ switch (asus_armoury.mini_led_dev_id) {
+ case ASUS_WMI_DEVID_MINI_LED_MODE:
+ mini_led_mode_map = mini_led_mode1_map;
+ mini_led_mode_map_size = ARRAY_SIZE(mini_led_mode1_map);
+ break;
+
+ case ASUS_WMI_DEVID_MINI_LED_MODE2:
+ mini_led_mode_map = mini_led_mode2_map;
+ mini_led_mode_map_size = ARRAY_SIZE(mini_led_mode2_map);
+ break;
+
+ default:
+ pr_err("Unrecognized mini-LED devid: %u\n", asus_armoury.mini_led_dev_id);
+ return -EINVAL;
+ }
+
+ if (mode >= mini_led_mode_map_size) {
+ pr_warn("mini-LED mode unrecognized device: %u\n", mode);
+ return -ENODEV;
+ }
+
+ return armoury_attr_uint_store(kobj, attr, buf, count,
+ 0, mini_led_mode_map[mode],
+ NULL, asus_armoury.mini_led_dev_id);
+}
+
+static ssize_t mini_led_mode_possible_values_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ switch (asus_armoury.mini_led_dev_id) {
+ case ASUS_WMI_DEVID_MINI_LED_MODE:
+ return armoury_attr_enum_list(buf, ARRAY_SIZE(mini_led_mode1_map));
+ case ASUS_WMI_DEVID_MINI_LED_MODE2:
+ return armoury_attr_enum_list(buf, ARRAY_SIZE(mini_led_mode2_map));
+ default:
+ return -ENODEV;
+ }
+}
+ASUS_ATTR_GROUP_ENUM(mini_led_mode, "mini_led_mode", "Set the mini-LED backlight mode");
+
+static ssize_t gpu_mux_mode_current_value_store(struct kobject *kobj,
+ struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ int result, err;
+ bool optimus;
+
+ err = kstrtobool(buf, &optimus);
+ if (err)
+ return err;
+
+ if (armoury_has_devstate(ASUS_WMI_DEVID_DGPU)) {
+ err = armoury_get_devstate(NULL, &result, ASUS_WMI_DEVID_DGPU);
+ if (err)
+ return err;
+ if (result && !optimus) {
+ pr_warn("Cannot switch MUX to dGPU mode when dGPU is disabled: %02X\n",
+ result);
+ return -ENODEV;
+ }
+ }
+
+ if (armoury_has_devstate(ASUS_WMI_DEVID_EGPU)) {
+ err = armoury_get_devstate(NULL, &result, ASUS_WMI_DEVID_EGPU);
+ if (err)
+ return err;
+ if (result && !optimus) {
+ pr_warn("Cannot switch MUX to dGPU mode when eGPU is enabled\n");
+ return -EBUSY;
+ }
+ }
+
+ err = armoury_set_devstate(attr, optimus ? 1 : 0, NULL, asus_armoury.gpu_mux_dev_id);
+ if (err)
+ return err;
+
+ sysfs_notify(kobj, NULL, attr->attr.name);
+ asus_set_reboot_and_signal_event();
+
+ return count;
+}
+ASUS_WMI_SHOW_INT(gpu_mux_mode_current_value, asus_armoury.gpu_mux_dev_id);
+ASUS_ATTR_GROUP_BOOL(gpu_mux_mode, "gpu_mux_mode", "Set the GPU display MUX mode");
+
+static ssize_t dgpu_disable_current_value_store(struct kobject *kobj,
+ struct kobj_attribute *attr, const char *buf,
+ size_t count)
+{
+ int result, err;
+ bool disable;
+
+ err = kstrtobool(buf, &disable);
+ if (err)
+ return err;
+
+ if (asus_armoury.gpu_mux_dev_id) {
+ err = armoury_get_devstate(NULL, &result, asus_armoury.gpu_mux_dev_id);
+ if (err)
+ return err;
+ if (!result && disable) {
+ pr_warn("Cannot disable dGPU when the MUX is in dGPU mode\n");
+ return -EBUSY;
+ }
+ }
+
+ scoped_guard(mutex, &asus_armoury.egpu_mutex) {
+ err = armoury_set_devstate(attr, disable ? 1 : 0, NULL, ASUS_WMI_DEVID_DGPU);
+ if (err)
+ return err;
+ }
+
+ sysfs_notify(kobj, NULL, attr->attr.name);
+
+ return count;
+}
+ASUS_WMI_SHOW_INT(dgpu_disable_current_value, ASUS_WMI_DEVID_DGPU);
+ASUS_ATTR_GROUP_BOOL(dgpu_disable, "dgpu_disable", "Disable the dGPU");
+
+/* Values map for eGPU activation requests. */
+static u32 egpu_status_map[] = {
+ [0] = 0x00000000U,
+ [1] = 0x00000001U,
+ [2] = 0x00000101U,
+ [3] = 0x00000201U,
+};
+
+/*
+ * armoury_pci_rescan() - Performs a PCI rescan
+ *
+ * Bring up any GPU that has been hotplugged in the system.
+ */
+static void armoury_pci_rescan(void)
+{
+ struct pci_bus *b = NULL;
+
+ pci_lock_rescan_remove();
+ while ((b = pci_find_next_bus(b)) != NULL)
+ pci_rescan_bus(b);
+ pci_unlock_rescan_remove();
+}
+
+/*
+ * The ACPI call to enable the eGPU might also disable the internal dGPU,
+ * but this is not always the case and on certain models enabling the eGPU
+ * when the dGPU is either still active or has been disabled without rebooting
+ * will make both GPUs malfunction and the kernel will detect many
+ * PCI AER unrecoverable errors.
+ */
+static ssize_t egpu_enable_current_value_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ int err;
+ u32 requested, enable, result;
+
+ err = kstrtou32(buf, 10, &requested);
+ if (err)
+ return err;
+
+ if (requested >= ARRAY_SIZE(egpu_status_map))
+ return -EINVAL;
+ enable = egpu_status_map[requested];
+
+ scoped_guard(mutex, &asus_armoury.egpu_mutex) {
+ /* Ensure the eGPU is connected before attempting to activate it. */
+ if (enable) {
+ err = armoury_get_devstate(NULL, &result, ASUS_WMI_DEVID_EGPU_CONNECTED);
+ if (err) {
+ pr_warn("Failed to get eGPU connection status: %d\n", err);
+ return err;
+ }
+ if (!result) {
+ pr_warn("Cannot activate eGPU while undetected\n");
+ return -ENOENT;
+ }
+ }
+
+ if (asus_armoury.gpu_mux_dev_id) {
+ err = armoury_get_devstate(NULL, &result, asus_armoury.gpu_mux_dev_id);
+ if (err)
+ return err;
+
+ if (!result && enable) {
+ pr_warn("Cannot enable eGPU when the MUX is in dGPU mode\n");
+ return -ENODEV;
+ }
+ }
+
+ err = armoury_set_devstate(attr, enable, &result, ASUS_WMI_DEVID_EGPU);
+ if (err) {
+ pr_err("Failed to set %s: %d\n", attr->attr.name, err);
+ return err;
+ }
+
+ /*
+ * ACPI returns value 0x01 on success and 0x02 on a partial activation:
+ * performing a pci rescan will bring up the device in pci-e 3.0 speed,
+ * after a reboot the device will work at full speed.
+ */
+ switch (result) {
+ case 0x01:
+ /*
+ * When a GPU is in use it does not get disconnected even if
+ * the ACPI call returns a success.
+ */
+ if (!enable) {
+ err = armoury_get_devstate(attr, &result, ASUS_WMI_DEVID_EGPU);
+ if (err) {
+ pr_warn("Failed to ensure eGPU is deactivated: %d\n", err);
+ return err;
+ }
+
+ if (result != 0)
+ return -EBUSY;
+ }
+
+ pr_debug("Success changing the eGPU status\n");
+ break;
+ case 0x02:
+ pr_info("Success changing the eGPU status, a reboot is strongly advised\n");
+ asus_set_reboot_and_signal_event();
+ break;
+ default:
+ pr_err("Failed to change the eGPU status: wmi result is 0x%x\n", result);
+ return -EIO;
+ }
+ }
+
+ /*
+ * Perform a PCI rescan: on every tested model this is necessary
+ * to make the eGPU visible on the bus without rebooting.
+ */
+ armoury_pci_rescan();
+
+ sysfs_notify(kobj, NULL, attr->attr.name);
+
+ return count;
+}
+
+static ssize_t egpu_enable_current_value_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ int i, err;
+ u32 status;
+
+ scoped_guard(mutex, &asus_armoury.egpu_mutex) {
+ err = armoury_get_devstate(attr, &status, ASUS_WMI_DEVID_EGPU);
+ if (err)
+ return err;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(egpu_status_map); i++) {
+ if (egpu_status_map[i] == status)
+ return sysfs_emit(buf, "%u\n", i);
+ }
+
+ return -EIO;
+}
+
+static ssize_t egpu_enable_possible_values_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ return armoury_attr_enum_list(buf, ARRAY_SIZE(egpu_status_map));
+}
+ASUS_ATTR_GROUP_ENUM(egpu_enable, "egpu_enable", "Enable the eGPU (also disables dGPU)");
+
+/* Device memory available to APU */
+
+/*
+ * Values map for APU reserved memory (index + 1 number of GB).
+ * Some looks out of order, but are actually correct.
+ */
+static u32 apu_mem_map[] = {
+ [0] = 0x000, /* called "AUTO" on the BIOS, is the minimum available */
+ [1] = 0x102,
+ [2] = 0x103,
+ [3] = 0x104,
+ [4] = 0x105,
+ [5] = 0x107,
+ [6] = 0x108,
+ [7] = 0x109,
+ [8] = 0x106,
+};
+
+static ssize_t apu_mem_current_value_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ int err;
+ u32 mem;
+
+ err = armoury_get_devstate(attr, &mem, ASUS_WMI_DEVID_APU_MEM);
+ if (err)
+ return err;
+
+ /* After 0x000 is set, a read will return 0x100 */
+ if (mem == 0x100)
+ return sysfs_emit(buf, "0\n");
+
+ for (unsigned int i = 0; i < ARRAY_SIZE(apu_mem_map); i++) {
+ if (apu_mem_map[i] == mem)
+ return sysfs_emit(buf, "%u\n", i);
+ }
+
+ pr_warn("Unrecognised value for APU mem 0x%08x\n", mem);
+ return -EIO;
+}
+
+static ssize_t apu_mem_current_value_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ int result, err;
+ u32 requested, mem;
+
+ result = kstrtou32(buf, 10, &requested);
+ if (result)
+ return result;
+
+ if (requested >= ARRAY_SIZE(apu_mem_map))
+ return -EINVAL;
+ mem = apu_mem_map[requested];
+
+ err = armoury_set_devstate(attr, mem, NULL, ASUS_WMI_DEVID_APU_MEM);
+ if (err) {
+ pr_warn("Failed to set apu_mem 0x%x: %d\n", mem, err);
+ return err;
+ }
+
+ pr_info("APU memory changed to %uGB, reboot required\n", requested + 1);
+ sysfs_notify(kobj, NULL, attr->attr.name);
+
+ asus_set_reboot_and_signal_event();
+
+ return count;
+}
+
+static ssize_t apu_mem_possible_values_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ return armoury_attr_enum_list(buf, ARRAY_SIZE(apu_mem_map));
+}
+ASUS_ATTR_GROUP_ENUM(apu_mem, "apu_mem", "Set available system RAM (in GB) for the APU to use");
+
+/* Define helper to access the current power mode tunable values */
+static inline struct rog_tunables *get_current_tunables(void)
+{
+ if (power_supply_is_system_supplied())
+ return asus_armoury.rog_tunables[ASUS_ROG_TUNABLE_AC];
+
+ return asus_armoury.rog_tunables[ASUS_ROG_TUNABLE_DC];
+}
+
+/* Simple attribute creation */
+ASUS_ATTR_GROUP_ENUM_INT_RO(charge_mode, "charge_mode", ASUS_WMI_DEVID_CHARGE_MODE, "0;1;2\n",
+ "Show the current mode of charging");
+ASUS_ATTR_GROUP_BOOL_RW(boot_sound, "boot_sound", ASUS_WMI_DEVID_BOOT_SOUND,
+ "Set the boot POST sound");
+ASUS_ATTR_GROUP_BOOL_RW(mcu_powersave, "mcu_powersave", ASUS_WMI_DEVID_MCU_POWERSAVE,
+ "Set MCU powersaving mode");
+ASUS_ATTR_GROUP_BOOL_RW(panel_od, "panel_overdrive", ASUS_WMI_DEVID_PANEL_OD,
+ "Set the panel refresh overdrive");
+ASUS_ATTR_GROUP_BOOL_RW(panel_hd_mode, "panel_hd_mode", ASUS_WMI_DEVID_PANEL_HD,
+ "Set the panel HD mode to UHD<0> or FHD<1>");
+ASUS_ATTR_GROUP_BOOL_RW(screen_auto_brightness, "screen_auto_brightness",
+ ASUS_WMI_DEVID_SCREEN_AUTO_BRIGHTNESS,
+ "Set the panel brightness to Off<0> or On<1>");
+ASUS_ATTR_GROUP_BOOL_RO(egpu_connected, "egpu_connected", ASUS_WMI_DEVID_EGPU_CONNECTED,
+ "Show the eGPU connection status");
+ASUS_ATTR_GROUP_ROG_TUNABLE(ppt_pl1_spl, ATTR_PPT_PL1_SPL, ASUS_WMI_DEVID_PPT_PL1_SPL,
+ "Set the CPU slow package limit");
+ASUS_ATTR_GROUP_ROG_TUNABLE(ppt_pl2_sppt, ATTR_PPT_PL2_SPPT, ASUS_WMI_DEVID_PPT_PL2_SPPT,
+ "Set the CPU fast package limit");
+ASUS_ATTR_GROUP_ROG_TUNABLE(ppt_pl3_fppt, ATTR_PPT_PL3_FPPT, ASUS_WMI_DEVID_PPT_PL3_FPPT,
+ "Set the CPU fastest package limit");
+ASUS_ATTR_GROUP_ROG_TUNABLE(ppt_apu_sppt, ATTR_PPT_APU_SPPT, ASUS_WMI_DEVID_PPT_APU_SPPT,
+ "Set the APU package limit");
+ASUS_ATTR_GROUP_ROG_TUNABLE(ppt_platform_sppt, ATTR_PPT_PLATFORM_SPPT, ASUS_WMI_DEVID_PPT_PLAT_SPPT,
+ "Set the platform package limit");
+ASUS_ATTR_GROUP_ROG_TUNABLE(nv_dynamic_boost, ATTR_NV_DYNAMIC_BOOST, ASUS_WMI_DEVID_NV_DYN_BOOST,
+ "Set the Nvidia dynamic boost limit");
+ASUS_ATTR_GROUP_ROG_TUNABLE(nv_temp_target, ATTR_NV_TEMP_TARGET, ASUS_WMI_DEVID_NV_THERM_TARGET,
+ "Set the Nvidia max thermal limit");
+ASUS_ATTR_GROUP_ROG_TUNABLE(nv_tgp, "nv_tgp", ASUS_WMI_DEVID_DGPU_SET_TGP,
+ "Set the additional TGP on top of the base TGP");
+ASUS_ATTR_GROUP_INT_VALUE_ONLY_RO(nv_base_tgp, ATTR_NV_BASE_TGP, ASUS_WMI_DEVID_DGPU_BASE_TGP,
+ "Read the base TGP value");
+
+/* If an attribute does not require any special case handling add it here */
+static const struct asus_attr_group armoury_attr_groups[] = {
+ { &egpu_connected_attr_group, ASUS_WMI_DEVID_EGPU_CONNECTED },
+ { &egpu_enable_attr_group, ASUS_WMI_DEVID_EGPU },
+ { &dgpu_disable_attr_group, ASUS_WMI_DEVID_DGPU },
+ { &apu_mem_attr_group, ASUS_WMI_DEVID_APU_MEM },
+
+ { &ppt_pl1_spl_attr_group, ASUS_WMI_DEVID_PPT_PL1_SPL },
+ { &ppt_pl2_sppt_attr_group, ASUS_WMI_DEVID_PPT_PL2_SPPT },
+ { &ppt_pl3_fppt_attr_group, ASUS_WMI_DEVID_PPT_PL3_FPPT },
+ { &ppt_apu_sppt_attr_group, ASUS_WMI_DEVID_PPT_APU_SPPT },
+ { &ppt_platform_sppt_attr_group, ASUS_WMI_DEVID_PPT_PLAT_SPPT },
+ { &nv_dynamic_boost_attr_group, ASUS_WMI_DEVID_NV_DYN_BOOST },
+ { &nv_temp_target_attr_group, ASUS_WMI_DEVID_NV_THERM_TARGET },
+ { &nv_base_tgp_attr_group, ASUS_WMI_DEVID_DGPU_BASE_TGP },
+ { &nv_tgp_attr_group, ASUS_WMI_DEVID_DGPU_SET_TGP },
+
+ { &charge_mode_attr_group, ASUS_WMI_DEVID_CHARGE_MODE },
+ { &boot_sound_attr_group, ASUS_WMI_DEVID_BOOT_SOUND },
+ { &mcu_powersave_attr_group, ASUS_WMI_DEVID_MCU_POWERSAVE },
+ { &panel_od_attr_group, ASUS_WMI_DEVID_PANEL_OD },
+ { &panel_hd_mode_attr_group, ASUS_WMI_DEVID_PANEL_HD },
+ { &screen_auto_brightness_attr_group, ASUS_WMI_DEVID_SCREEN_AUTO_BRIGHTNESS },
+};
+
+/**
+ * is_power_tunable_attr - Determines if an attribute is a power-related tunable
+ * @name: The name of the attribute to check
+ *
+ * This function checks if the given attribute name is related to power tuning.
+ *
+ * Return: true if the attribute is a power-related tunable, false otherwise
+ */
+static bool is_power_tunable_attr(const char *name)
+{
+ static const char * const power_tunable_attrs[] = {
+ ATTR_PPT_PL1_SPL, ATTR_PPT_PL2_SPPT,
+ ATTR_PPT_PL3_FPPT, ATTR_PPT_APU_SPPT,
+ ATTR_PPT_PLATFORM_SPPT, ATTR_NV_DYNAMIC_BOOST,
+ ATTR_NV_TEMP_TARGET, ATTR_NV_BASE_TGP,
+ ATTR_NV_TGP
+ };
+
+ for (unsigned int i = 0; i < ARRAY_SIZE(power_tunable_attrs); i++) {
+ if (!strcmp(name, power_tunable_attrs[i]))
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * has_valid_limit - Checks if a power-related attribute has a valid limit value
+ * @name: The name of the attribute to check
+ * @limits: Pointer to the power_limits structure containing limit values
+ *
+ * This function checks if a power-related attribute has a valid limit value.
+ * It returns false if limits is NULL or if the corresponding limit value is zero.
+ *
+ * Return: true if the attribute has a valid limit value, false otherwise
+ */
+static bool has_valid_limit(const char *name, const struct power_limits *limits)
+{
+ u32 limit_value = 0;
+
+ if (!limits)
+ return false;
+
+ if (!strcmp(name, ATTR_PPT_PL1_SPL))
+ limit_value = limits->ppt_pl1_spl_max;
+ else if (!strcmp(name, ATTR_PPT_PL2_SPPT))
+ limit_value = limits->ppt_pl2_sppt_max;
+ else if (!strcmp(name, ATTR_PPT_PL3_FPPT))
+ limit_value = limits->ppt_pl3_fppt_max;
+ else if (!strcmp(name, ATTR_PPT_APU_SPPT))
+ limit_value = limits->ppt_apu_sppt_max;
+ else if (!strcmp(name, ATTR_PPT_PLATFORM_SPPT))
+ limit_value = limits->ppt_platform_sppt_max;
+ else if (!strcmp(name, ATTR_NV_DYNAMIC_BOOST))
+ limit_value = limits->nv_dynamic_boost_max;
+ else if (!strcmp(name, ATTR_NV_TEMP_TARGET))
+ limit_value = limits->nv_temp_target_max;
+ else if (!strcmp(name, ATTR_NV_BASE_TGP) ||
+ !strcmp(name, ATTR_NV_TGP))
+ limit_value = limits->nv_tgp_max;
+
+ return limit_value > 0;
+}
+
+static int asus_fw_attr_add(void)
+{
+ const struct rog_tunables *const ac_rog_tunables =
+ asus_armoury.rog_tunables[ASUS_ROG_TUNABLE_AC];
+ const struct power_limits *limits;
+ bool should_create;
+ const char *name;
+ int err, i;
+
+ asus_armoury.fw_attr_dev = device_create(&firmware_attributes_class, NULL, MKDEV(0, 0),
+ NULL, "%s", DRIVER_NAME);
+ if (IS_ERR(asus_armoury.fw_attr_dev)) {
+ err = PTR_ERR(asus_armoury.fw_attr_dev);
+ goto fail_class_get;
+ }
+
+ asus_armoury.fw_attr_kset = kset_create_and_add("attributes", NULL,
+ &asus_armoury.fw_attr_dev->kobj);
+ if (!asus_armoury.fw_attr_kset) {
+ err = -ENOMEM;
+ goto err_destroy_classdev;
+ }
+
+ err = sysfs_create_file(&asus_armoury.fw_attr_kset->kobj, &pending_reboot.attr);
+ if (err) {
+ pr_err("Failed to create sysfs level attributes\n");
+ goto err_destroy_kset;
+ }
+
+ asus_armoury.mini_led_dev_id = 0;
+ if (armoury_has_devstate(ASUS_WMI_DEVID_MINI_LED_MODE))
+ asus_armoury.mini_led_dev_id = ASUS_WMI_DEVID_MINI_LED_MODE;
+ else if (armoury_has_devstate(ASUS_WMI_DEVID_MINI_LED_MODE2))
+ asus_armoury.mini_led_dev_id = ASUS_WMI_DEVID_MINI_LED_MODE2;
+
+ if (asus_armoury.mini_led_dev_id) {
+ err = sysfs_create_group(&asus_armoury.fw_attr_kset->kobj,
+ &mini_led_mode_attr_group);
+ if (err) {
+ pr_err("Failed to create sysfs-group for mini_led\n");
+ goto err_remove_file;
+ }
+ }
+
+ asus_armoury.gpu_mux_dev_id = 0;
+ if (armoury_has_devstate(ASUS_WMI_DEVID_GPU_MUX))
+ asus_armoury.gpu_mux_dev_id = ASUS_WMI_DEVID_GPU_MUX;
+ else if (armoury_has_devstate(ASUS_WMI_DEVID_GPU_MUX_VIVO))
+ asus_armoury.gpu_mux_dev_id = ASUS_WMI_DEVID_GPU_MUX_VIVO;
+
+ if (asus_armoury.gpu_mux_dev_id) {
+ err = sysfs_create_group(&asus_armoury.fw_attr_kset->kobj,
+ &gpu_mux_mode_attr_group);
+ if (err) {
+ pr_err("Failed to create sysfs-group for gpu_mux\n");
+ goto err_remove_mini_led_group;
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(armoury_attr_groups); i++) {
+ if (!armoury_has_devstate(armoury_attr_groups[i].wmi_devid))
+ continue;
+
+ /* Always create by default, unless PPT is not present */
+ should_create = true;
+ name = armoury_attr_groups[i].attr_group->name;
+
+ /* Check if this is a power-related tunable requiring limits */
+ if (ac_rog_tunables && ac_rog_tunables->power_limits &&
+ is_power_tunable_attr(name)) {
+ limits = ac_rog_tunables->power_limits;
+ /* Check only AC: if not present then DC won't be either */
+ should_create = has_valid_limit(name, limits);
+ if (!should_create)
+ pr_debug("Missing max value for tunable %s\n", name);
+ }
+
+ if (should_create) {
+ err = sysfs_create_group(&asus_armoury.fw_attr_kset->kobj,
+ armoury_attr_groups[i].attr_group);
+ if (err) {
+ pr_err("Failed to create sysfs-group for %s\n",
+ armoury_attr_groups[i].attr_group->name);
+ goto err_remove_groups;
+ }
+ }
+ }
+
+ return 0;
+
+err_remove_groups:
+ while (i--) {
+ if (armoury_has_devstate(armoury_attr_groups[i].wmi_devid))
+ sysfs_remove_group(&asus_armoury.fw_attr_kset->kobj,
+ armoury_attr_groups[i].attr_group);
+ }
+ if (asus_armoury.gpu_mux_dev_id)
+ sysfs_remove_group(&asus_armoury.fw_attr_kset->kobj, &gpu_mux_mode_attr_group);
+err_remove_mini_led_group:
+ if (asus_armoury.mini_led_dev_id)
+ sysfs_remove_group(&asus_armoury.fw_attr_kset->kobj, &mini_led_mode_attr_group);
+err_remove_file:
+ sysfs_remove_file(&asus_armoury.fw_attr_kset->kobj, &pending_reboot.attr);
+err_destroy_kset:
+ kset_unregister(asus_armoury.fw_attr_kset);
+err_destroy_classdev:
+fail_class_get:
+ device_destroy(&firmware_attributes_class, MKDEV(0, 0));
+ return err;
+}
+
+/* Init / exit ****************************************************************/
+
+/* Set up the min/max and defaults for ROG tunables */
+static void init_rog_tunables(void)
+{
+ const struct power_limits *ac_limits, *dc_limits;
+ struct rog_tunables *ac_rog_tunables = NULL, *dc_rog_tunables = NULL;
+ const struct power_data *power_data;
+ const struct dmi_system_id *dmi_id;
+
+ /* Match the system against the power_limits table */
+ dmi_id = dmi_first_match(power_limits);
+ if (!dmi_id) {
+ pr_warn("No matching power limits found for this system\n");
+ return;
+ }
+
+ /* Get the power data for this system */
+ power_data = dmi_id->driver_data;
+ if (!power_data) {
+ pr_info("No power data available for this system\n");
+ return;
+ }
+
+ /* Initialize AC power tunables */
+ ac_limits = power_data->ac_data;
+ if (ac_limits) {
+ ac_rog_tunables = kzalloc(sizeof(*asus_armoury.rog_tunables[ASUS_ROG_TUNABLE_AC]),
+ GFP_KERNEL);
+ if (!ac_rog_tunables)
+ goto err_nomem;
+
+ asus_armoury.rog_tunables[ASUS_ROG_TUNABLE_AC] = ac_rog_tunables;
+ ac_rog_tunables->power_limits = ac_limits;
+
+ /* Set initial AC values */
+ ac_rog_tunables->ppt_pl1_spl =
+ ac_limits->ppt_pl1_spl_def ?
+ ac_limits->ppt_pl1_spl_def :
+ ac_limits->ppt_pl1_spl_max;
+
+ ac_rog_tunables->ppt_pl2_sppt =
+ ac_limits->ppt_pl2_sppt_def ?
+ ac_limits->ppt_pl2_sppt_def :
+ ac_limits->ppt_pl2_sppt_max;
+
+ ac_rog_tunables->ppt_pl3_fppt =
+ ac_limits->ppt_pl3_fppt_def ?
+ ac_limits->ppt_pl3_fppt_def :
+ ac_limits->ppt_pl3_fppt_max;
+
+ ac_rog_tunables->ppt_apu_sppt =
+ ac_limits->ppt_apu_sppt_def ?
+ ac_limits->ppt_apu_sppt_def :
+ ac_limits->ppt_apu_sppt_max;
+
+ ac_rog_tunables->ppt_platform_sppt =
+ ac_limits->ppt_platform_sppt_def ?
+ ac_limits->ppt_platform_sppt_def :
+ ac_limits->ppt_platform_sppt_max;
+
+ ac_rog_tunables->nv_dynamic_boost =
+ ac_limits->nv_dynamic_boost_max;
+ ac_rog_tunables->nv_temp_target =
+ ac_limits->nv_temp_target_max;
+ ac_rog_tunables->nv_tgp = ac_limits->nv_tgp_max;
+
+ pr_debug("AC power limits initialized for %s\n", dmi_id->matches[0].substr);
+ } else {
+ pr_debug("No AC PPT limits defined\n");
+ }
+
+ /* Initialize DC power tunables */
+ dc_limits = power_data->dc_data;
+ if (dc_limits) {
+ dc_rog_tunables = kzalloc(sizeof(*asus_armoury.rog_tunables[ASUS_ROG_TUNABLE_DC]),
+ GFP_KERNEL);
+ if (!dc_rog_tunables) {
+ kfree(ac_rog_tunables);
+ goto err_nomem;
+ }
+
+ asus_armoury.rog_tunables[ASUS_ROG_TUNABLE_DC] = dc_rog_tunables;
+ dc_rog_tunables->power_limits = dc_limits;
+
+ /* Set initial DC values */
+ dc_rog_tunables->ppt_pl1_spl =
+ dc_limits->ppt_pl1_spl_def ?
+ dc_limits->ppt_pl1_spl_def :
+ dc_limits->ppt_pl1_spl_max;
+
+ dc_rog_tunables->ppt_pl2_sppt =
+ dc_limits->ppt_pl2_sppt_def ?
+ dc_limits->ppt_pl2_sppt_def :
+ dc_limits->ppt_pl2_sppt_max;
+
+ dc_rog_tunables->ppt_pl3_fppt =
+ dc_limits->ppt_pl3_fppt_def ?
+ dc_limits->ppt_pl3_fppt_def :
+ dc_limits->ppt_pl3_fppt_max;
+
+ dc_rog_tunables->ppt_apu_sppt =
+ dc_limits->ppt_apu_sppt_def ?
+ dc_limits->ppt_apu_sppt_def :
+ dc_limits->ppt_apu_sppt_max;
+
+ dc_rog_tunables->ppt_platform_sppt =
+ dc_limits->ppt_platform_sppt_def ?
+ dc_limits->ppt_platform_sppt_def :
+ dc_limits->ppt_platform_sppt_max;
+
+ dc_rog_tunables->nv_dynamic_boost =
+ dc_limits->nv_dynamic_boost_max;
+ dc_rog_tunables->nv_temp_target =
+ dc_limits->nv_temp_target_max;
+ dc_rog_tunables->nv_tgp = dc_limits->nv_tgp_max;
+
+ pr_debug("DC power limits initialized for %s\n", dmi_id->matches[0].substr);
+ } else {
+ pr_debug("No DC PPT limits defined\n");
+ }
+
+ return;
+
+err_nomem:
+ pr_err("Failed to allocate memory for tunables\n");
+}
+
+static int __init asus_fw_init(void)
+{
+ char *wmi_uid;
+
+ wmi_uid = wmi_get_acpi_device_uid(ASUS_WMI_MGMT_GUID);
+ if (!wmi_uid)
+ return -ENODEV;
+
+ /*
+ * if equal to "ASUSWMI" then it's DCTS that can't be used for this
+ * driver, DSTS is required.
+ */
+ if (!strcmp(wmi_uid, ASUS_ACPI_UID_ASUSWMI))
+ return -ENODEV;
+
+ init_rog_tunables();
+
+ /* Must always be last step to ensure data is available */
+ return asus_fw_attr_add();
+}
+
+static void __exit asus_fw_exit(void)
+{
+ int i;
+
+ for (i = ARRAY_SIZE(armoury_attr_groups) - 1; i >= 0; i--) {
+ if (armoury_has_devstate(armoury_attr_groups[i].wmi_devid))
+ sysfs_remove_group(&asus_armoury.fw_attr_kset->kobj,
+ armoury_attr_groups[i].attr_group);
+ }
+
+ if (asus_armoury.gpu_mux_dev_id)
+ sysfs_remove_group(&asus_armoury.fw_attr_kset->kobj, &gpu_mux_mode_attr_group);
+
+ if (asus_armoury.mini_led_dev_id)
+ sysfs_remove_group(&asus_armoury.fw_attr_kset->kobj, &mini_led_mode_attr_group);
+
+ sysfs_remove_file(&asus_armoury.fw_attr_kset->kobj, &pending_reboot.attr);
+ kset_unregister(asus_armoury.fw_attr_kset);
+ device_destroy(&firmware_attributes_class, MKDEV(0, 0));
+
+ kfree(asus_armoury.rog_tunables[ASUS_ROG_TUNABLE_AC]);
+ kfree(asus_armoury.rog_tunables[ASUS_ROG_TUNABLE_DC]);
+}
+
+module_init(asus_fw_init);
+module_exit(asus_fw_exit);
+
+MODULE_IMPORT_NS("ASUS_WMI");
+MODULE_AUTHOR("Luke Jones <luke@ljones.dev>");
+MODULE_DESCRIPTION("ASUS BIOS Configuration Driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("wmi:" ASUS_NB_WMI_EVENT_GUID);
diff --git a/drivers/platform/x86/asus-armoury.h b/drivers/platform/x86/asus-armoury.h
new file mode 100644
index 000000000000..a1bb2005c3f3
--- /dev/null
+++ b/drivers/platform/x86/asus-armoury.h
@@ -0,0 +1,1541 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Definitions for kernel modules using asus-armoury driver
+ *
+ * Copyright (c) 2024 Luke Jones <luke@ljones.dev>
+ */
+
+#ifndef _ASUS_ARMOURY_H_
+#define _ASUS_ARMOURY_H_
+
+#include <linux/dmi.h>
+#include <linux/platform_device.h>
+#include <linux/sysfs.h>
+#include <linux/types.h>
+
+#define DRIVER_NAME "asus-armoury"
+
+/**
+ * armoury_attr_uint_store() - Send an uint to WMI method if within min/max.
+ * @kobj: Pointer to the driver object.
+ * @attr: Pointer to the attribute calling this function.
+ * @buf: The buffer to read from, this is parsed to `uint` type.
+ * @count: Required by sysfs attribute macros, pass in from the callee attr.
+ * @min: Minimum accepted value. Below this returns -EINVAL.
+ * @max: Maximum accepted value. Above this returns -EINVAL.
+ * @store_value: Pointer to where the parsed value should be stored.
+ * @wmi_dev: The WMI function ID to use.
+ *
+ * This function is intended to be generic so it can be called from any "_store"
+ * attribute which works only with integers.
+ *
+ * Integers to be sent to the WMI method is inclusive range checked and
+ * an error returned if out of range.
+ *
+ * If the value is valid and WMI is success then the sysfs attribute is notified
+ * and if asus_bios_requires_reboot() is true then reboot attribute
+ * is also notified.
+ *
+ * Returns: Either count, or an error.
+ */
+ssize_t armoury_attr_uint_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count, u32 min, u32 max,
+ u32 *store_value, u32 wmi_dev);
+
+/**
+ * armoury_attr_uint_show() - Receive an uint from a WMI method.
+ * @kobj: Pointer to the driver object.
+ * @attr: Pointer to the attribute calling this function.
+ * @buf: The buffer to write to, as an `uint` type.
+ * @wmi_dev: The WMI function ID to use.
+ *
+ * This function is intended to be generic so it can be called from any "_show"
+ * attribute which works only with integers.
+ *
+ * Returns: Either count, or an error.
+ */
+ssize_t armoury_attr_uint_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf, u32 wmi_dev);
+
+#define __ASUS_ATTR_RO(_func, _name) \
+ { \
+ .attr = { .name = __stringify(_name), .mode = 0444 }, \
+ .show = _func##_##_name##_show, \
+ }
+
+#define __ASUS_ATTR_RO_AS(_name, _show) \
+ { \
+ .attr = { .name = __stringify(_name), .mode = 0444 }, \
+ .show = _show, \
+ }
+
+#define __ASUS_ATTR_RW(_func, _name) \
+ __ATTR(_name, 0644, _func##_##_name##_show, _func##_##_name##_store)
+
+#define __WMI_STORE_INT(_attr, _min, _max, _wmi) \
+ static ssize_t _attr##_store(struct kobject *kobj, \
+ struct kobj_attribute *attr, \
+ const char *buf, size_t count) \
+ { \
+ return armoury_attr_uint_store(kobj, attr, buf, count, _min, \
+ _max, NULL, _wmi); \
+ }
+
+#define ASUS_WMI_SHOW_INT(_attr, _wmi) \
+ static ssize_t _attr##_show(struct kobject *kobj, \
+ struct kobj_attribute *attr, char *buf) \
+ { \
+ return armoury_attr_uint_show(kobj, attr, buf, _wmi); \
+ }
+
+/* Create functions and attributes for use in other macros or on their own */
+
+/* Shows a formatted static variable */
+#define __ATTR_SHOW_FMT(_prop, _attrname, _fmt, _val) \
+ static ssize_t _attrname##_##_prop##_show( \
+ struct kobject *kobj, struct kobj_attribute *attr, char *buf) \
+ { \
+ return sysfs_emit(buf, _fmt, _val); \
+ } \
+ static struct kobj_attribute attr_##_attrname##_##_prop = \
+ __ASUS_ATTR_RO(_attrname, _prop)
+
+#define __ATTR_RO_INT_GROUP_ENUM(_attrname, _wmi, _fsname, _possible, _dispname)\
+ ASUS_WMI_SHOW_INT(_attrname##_current_value, _wmi); \
+ static struct kobj_attribute attr_##_attrname##_current_value = \
+ __ASUS_ATTR_RO(_attrname, current_value); \
+ __ATTR_SHOW_FMT(display_name, _attrname, "%s\n", _dispname); \
+ __ATTR_SHOW_FMT(possible_values, _attrname, "%s\n", _possible); \
+ static struct kobj_attribute attr_##_attrname##_type = \
+ __ASUS_ATTR_RO_AS(type, enum_type_show); \
+ static struct attribute *_attrname##_attrs[] = { \
+ &attr_##_attrname##_current_value.attr, \
+ &attr_##_attrname##_display_name.attr, \
+ &attr_##_attrname##_possible_values.attr, \
+ &attr_##_attrname##_type.attr, \
+ NULL \
+ }; \
+ static const struct attribute_group _attrname##_attr_group = { \
+ .name = _fsname, .attrs = _attrname##_attrs \
+ }
+
+#define __ATTR_RW_INT_GROUP_ENUM(_attrname, _minv, _maxv, _wmi, _fsname,\
+ _possible, _dispname) \
+ __WMI_STORE_INT(_attrname##_current_value, _minv, _maxv, _wmi); \
+ ASUS_WMI_SHOW_INT(_attrname##_current_value, _wmi); \
+ static struct kobj_attribute attr_##_attrname##_current_value = \
+ __ASUS_ATTR_RW(_attrname, current_value); \
+ __ATTR_SHOW_FMT(display_name, _attrname, "%s\n", _dispname); \
+ __ATTR_SHOW_FMT(possible_values, _attrname, "%s\n", _possible); \
+ static struct kobj_attribute attr_##_attrname##_type = \
+ __ASUS_ATTR_RO_AS(type, enum_type_show); \
+ static struct attribute *_attrname##_attrs[] = { \
+ &attr_##_attrname##_current_value.attr, \
+ &attr_##_attrname##_display_name.attr, \
+ &attr_##_attrname##_possible_values.attr, \
+ &attr_##_attrname##_type.attr, \
+ NULL \
+ }; \
+ static const struct attribute_group _attrname##_attr_group = { \
+ .name = _fsname, .attrs = _attrname##_attrs \
+ }
+
+/* Boolean style enumeration, base macro. Requires adding show/store */
+#define __ATTR_GROUP_ENUM(_attrname, _fsname, _possible, _dispname) \
+ __ATTR_SHOW_FMT(display_name, _attrname, "%s\n", _dispname); \
+ __ATTR_SHOW_FMT(possible_values, _attrname, "%s\n", _possible); \
+ static struct kobj_attribute attr_##_attrname##_type = \
+ __ASUS_ATTR_RO_AS(type, enum_type_show); \
+ static struct attribute *_attrname##_attrs[] = { \
+ &attr_##_attrname##_current_value.attr, \
+ &attr_##_attrname##_display_name.attr, \
+ &attr_##_attrname##_possible_values.attr, \
+ &attr_##_attrname##_type.attr, \
+ NULL \
+ }; \
+ static const struct attribute_group _attrname##_attr_group = { \
+ .name = _fsname, .attrs = _attrname##_attrs \
+ }
+
+#define ASUS_ATTR_GROUP_BOOL_RO(_attrname, _fsname, _wmi, _dispname) \
+ __ATTR_RO_INT_GROUP_ENUM(_attrname, _wmi, _fsname, "0;1", _dispname)
+
+
+#define ASUS_ATTR_GROUP_BOOL_RW(_attrname, _fsname, _wmi, _dispname) \
+ __ATTR_RW_INT_GROUP_ENUM(_attrname, 0, 1, _wmi, _fsname, "0;1", _dispname)
+
+#define ASUS_ATTR_GROUP_ENUM_INT_RO(_attrname, _fsname, _wmi, _possible, _dispname) \
+ __ATTR_RO_INT_GROUP_ENUM(_attrname, _wmi, _fsname, _possible, _dispname)
+
+/*
+ * Requires <name>_current_value_show(), <name>_current_value_show()
+ */
+#define ASUS_ATTR_GROUP_BOOL(_attrname, _fsname, _dispname) \
+ static struct kobj_attribute attr_##_attrname##_current_value = \
+ __ASUS_ATTR_RW(_attrname, current_value); \
+ __ATTR_GROUP_ENUM(_attrname, _fsname, "0;1", _dispname)
+
+/*
+ * Requires <name>_current_value_show(), <name>_current_value_show()
+ * and <name>_possible_values_show()
+ */
+#define ASUS_ATTR_GROUP_ENUM(_attrname, _fsname, _dispname) \
+ __ATTR_SHOW_FMT(display_name, _attrname, "%s\n", _dispname); \
+ static struct kobj_attribute attr_##_attrname##_current_value = \
+ __ASUS_ATTR_RW(_attrname, current_value); \
+ static struct kobj_attribute attr_##_attrname##_possible_values = \
+ __ASUS_ATTR_RO(_attrname, possible_values); \
+ static struct kobj_attribute attr_##_attrname##_type = \
+ __ASUS_ATTR_RO_AS(type, enum_type_show); \
+ static struct attribute *_attrname##_attrs[] = { \
+ &attr_##_attrname##_current_value.attr, \
+ &attr_##_attrname##_display_name.attr, \
+ &attr_##_attrname##_possible_values.attr, \
+ &attr_##_attrname##_type.attr, \
+ NULL \
+ }; \
+ static const struct attribute_group _attrname##_attr_group = { \
+ .name = _fsname, .attrs = _attrname##_attrs \
+ }
+
+#define ASUS_ATTR_GROUP_INT_VALUE_ONLY_RO(_attrname, _fsname, _wmi, _dispname) \
+ ASUS_WMI_SHOW_INT(_attrname##_current_value, _wmi); \
+ static struct kobj_attribute attr_##_attrname##_current_value = \
+ __ASUS_ATTR_RO(_attrname, current_value); \
+ __ATTR_SHOW_FMT(display_name, _attrname, "%s\n", _dispname); \
+ static struct kobj_attribute attr_##_attrname##_type = \
+ __ASUS_ATTR_RO_AS(type, int_type_show); \
+ static struct attribute *_attrname##_attrs[] = { \
+ &attr_##_attrname##_current_value.attr, \
+ &attr_##_attrname##_display_name.attr, \
+ &attr_##_attrname##_type.attr, NULL \
+ }; \
+ static const struct attribute_group _attrname##_attr_group = { \
+ .name = _fsname, .attrs = _attrname##_attrs \
+ }
+
+/*
+ * ROG PPT attributes need a little different in setup as they
+ * require rog_tunables members.
+ */
+
+#define __ROG_TUNABLE_SHOW(_prop, _attrname, _val) \
+ static ssize_t _attrname##_##_prop##_show( \
+ struct kobject *kobj, struct kobj_attribute *attr, char *buf) \
+ { \
+ struct rog_tunables *tunables = get_current_tunables(); \
+ \
+ if (!tunables || !tunables->power_limits) \
+ return -ENODEV; \
+ \
+ return sysfs_emit(buf, "%d\n", tunables->power_limits->_val); \
+ } \
+ static struct kobj_attribute attr_##_attrname##_##_prop = \
+ __ASUS_ATTR_RO(_attrname, _prop)
+
+#define __ROG_TUNABLE_SHOW_DEFAULT(_attrname) \
+ static ssize_t _attrname##_default_value_show( \
+ struct kobject *kobj, struct kobj_attribute *attr, char *buf) \
+ { \
+ struct rog_tunables *tunables = get_current_tunables(); \
+ \
+ if (!tunables || !tunables->power_limits) \
+ return -ENODEV; \
+ \
+ return sysfs_emit( \
+ buf, "%d\n", \
+ tunables->power_limits->_attrname##_def ? \
+ tunables->power_limits->_attrname##_def : \
+ tunables->power_limits->_attrname##_max); \
+ } \
+ static struct kobj_attribute attr_##_attrname##_default_value = \
+ __ASUS_ATTR_RO(_attrname, default_value)
+
+#define __ROG_TUNABLE_RW(_attr, _wmi) \
+ static ssize_t _attr##_current_value_store( \
+ struct kobject *kobj, struct kobj_attribute *attr, \
+ const char *buf, size_t count) \
+ { \
+ struct rog_tunables *tunables = get_current_tunables(); \
+ \
+ if (!tunables || !tunables->power_limits) \
+ return -ENODEV; \
+ \
+ if (tunables->power_limits->_attr##_min == \
+ tunables->power_limits->_attr##_max) \
+ return -EINVAL; \
+ \
+ return armoury_attr_uint_store(kobj, attr, buf, count, \
+ tunables->power_limits->_attr##_min, \
+ tunables->power_limits->_attr##_max, \
+ &tunables->_attr, _wmi); \
+ } \
+ static ssize_t _attr##_current_value_show( \
+ struct kobject *kobj, struct kobj_attribute *attr, char *buf) \
+ { \
+ struct rog_tunables *tunables = get_current_tunables(); \
+ \
+ if (!tunables) \
+ return -ENODEV; \
+ \
+ return sysfs_emit(buf, "%u\n", tunables->_attr); \
+ } \
+ static struct kobj_attribute attr_##_attr##_current_value = \
+ __ASUS_ATTR_RW(_attr, current_value)
+
+#define ASUS_ATTR_GROUP_ROG_TUNABLE(_attrname, _fsname, _wmi, _dispname) \
+ __ROG_TUNABLE_RW(_attrname, _wmi); \
+ __ROG_TUNABLE_SHOW_DEFAULT(_attrname); \
+ __ROG_TUNABLE_SHOW(min_value, _attrname, _attrname##_min); \
+ __ROG_TUNABLE_SHOW(max_value, _attrname, _attrname##_max); \
+ __ATTR_SHOW_FMT(scalar_increment, _attrname, "%d\n", 1); \
+ __ATTR_SHOW_FMT(display_name, _attrname, "%s\n", _dispname); \
+ static struct kobj_attribute attr_##_attrname##_type = \
+ __ASUS_ATTR_RO_AS(type, int_type_show); \
+ static struct attribute *_attrname##_attrs[] = { \
+ &attr_##_attrname##_current_value.attr, \
+ &attr_##_attrname##_default_value.attr, \
+ &attr_##_attrname##_min_value.attr, \
+ &attr_##_attrname##_max_value.attr, \
+ &attr_##_attrname##_scalar_increment.attr, \
+ &attr_##_attrname##_display_name.attr, \
+ &attr_##_attrname##_type.attr, \
+ NULL \
+ }; \
+ static const struct attribute_group _attrname##_attr_group = { \
+ .name = _fsname, .attrs = _attrname##_attrs \
+ }
+
+/* Default is always the maximum value unless *_def is specified */
+struct power_limits {
+ u8 ppt_pl1_spl_min;
+ u8 ppt_pl1_spl_def;
+ u8 ppt_pl1_spl_max;
+ u8 ppt_pl2_sppt_min;
+ u8 ppt_pl2_sppt_def;
+ u8 ppt_pl2_sppt_max;
+ u8 ppt_pl3_fppt_min;
+ u8 ppt_pl3_fppt_def;
+ u8 ppt_pl3_fppt_max;
+ u8 ppt_apu_sppt_min;
+ u8 ppt_apu_sppt_def;
+ u8 ppt_apu_sppt_max;
+ u8 ppt_platform_sppt_min;
+ u8 ppt_platform_sppt_def;
+ u8 ppt_platform_sppt_max;
+ /* Nvidia GPU specific, default is always max */
+ u8 nv_dynamic_boost_def; // unused. exists for macro
+ u8 nv_dynamic_boost_min;
+ u8 nv_dynamic_boost_max;
+ u8 nv_temp_target_def; // unused. exists for macro
+ u8 nv_temp_target_min;
+ u8 nv_temp_target_max;
+ u8 nv_tgp_def; // unused. exists for macro
+ u8 nv_tgp_min;
+ u8 nv_tgp_max;
+};
+
+struct power_data {
+ const struct power_limits *ac_data;
+ const struct power_limits *dc_data;
+ bool requires_fan_curve;
+};
+
+/*
+ * For each available attribute there must be a min and a max.
+ * _def is not required and will be assumed to be default == max if missing.
+ */
+static const struct dmi_system_id power_limits[] = {
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "FA401W"),
+ },
+ .driver_data = &(struct power_data) {
+ .ac_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 15,
+ .ppt_pl1_spl_max = 80,
+ .ppt_pl2_sppt_min = 35,
+ .ppt_pl2_sppt_max = 80,
+ .ppt_pl3_fppt_min = 35,
+ .ppt_pl3_fppt_max = 80,
+ .nv_dynamic_boost_min = 5,
+ .nv_dynamic_boost_max = 25,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
+ .nv_tgp_min = 55,
+ .nv_tgp_max = 75,
+ },
+ .dc_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 25,
+ .ppt_pl1_spl_max = 30,
+ .ppt_pl2_sppt_min = 31,
+ .ppt_pl2_sppt_max = 44,
+ .ppt_pl3_fppt_min = 45,
+ .ppt_pl3_fppt_max = 65,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
+ },
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "FA507N"),
+ },
+ .driver_data = &(struct power_data) {
+ .ac_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 15,
+ .ppt_pl1_spl_max = 80,
+ .ppt_pl2_sppt_min = 35,
+ .ppt_pl2_sppt_max = 80,
+ .ppt_pl3_fppt_min = 35,
+ .ppt_pl3_fppt_max = 80,
+ .nv_dynamic_boost_min = 5,
+ .nv_dynamic_boost_max = 25,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
+ },
+ .dc_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 15,
+ .ppt_pl1_spl_def = 45,
+ .ppt_pl1_spl_max = 65,
+ .ppt_pl2_sppt_min = 35,
+ .ppt_pl2_sppt_def = 54,
+ .ppt_pl2_sppt_max = 65,
+ .ppt_pl3_fppt_min = 35,
+ .ppt_pl3_fppt_max = 65,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
+ },
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "FA507UV"),
+ },
+ .driver_data = &(struct power_data) {
+ .ac_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 15,
+ .ppt_pl1_spl_max = 80,
+ .ppt_pl2_sppt_min = 35,
+ .ppt_pl2_sppt_max = 80,
+ .ppt_pl3_fppt_min = 35,
+ .ppt_pl3_fppt_max = 80,
+ .nv_dynamic_boost_min = 5,
+ .nv_dynamic_boost_max = 25,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
+ .nv_tgp_min = 55,
+ .nv_tgp_max = 115,
+ },
+ .dc_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 15,
+ .ppt_pl1_spl_def = 45,
+ .ppt_pl1_spl_max = 65,
+ .ppt_pl2_sppt_min = 35,
+ .ppt_pl2_sppt_def = 54,
+ .ppt_pl2_sppt_max = 65,
+ .ppt_pl3_fppt_min = 35,
+ .ppt_pl3_fppt_max = 65,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
+ },
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "FA507R"),
+ },
+ .driver_data = &(struct power_data) {
+ .ac_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 15,
+ .ppt_pl1_spl_max = 80,
+ .ppt_pl2_sppt_min = 25,
+ .ppt_pl2_sppt_max = 80,
+ .ppt_pl3_fppt_min = 35,
+ .ppt_pl3_fppt_max = 80
+ },
+ .dc_data = NULL,
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "FA507X"),
+ },
+ .driver_data = &(struct power_data) {
+ .ac_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 15,
+ .ppt_pl1_spl_max = 80,
+ .ppt_pl2_sppt_min = 35,
+ .ppt_pl2_sppt_max = 80,
+ .ppt_pl3_fppt_min = 35,
+ .ppt_pl3_fppt_max = 80,
+ .nv_dynamic_boost_min = 5,
+ .nv_dynamic_boost_max = 20,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
+ .nv_tgp_min = 55,
+ .nv_tgp_max = 85,
+ },
+ .dc_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 15,
+ .ppt_pl1_spl_def = 45,
+ .ppt_pl1_spl_max = 65,
+ .ppt_pl2_sppt_min = 35,
+ .ppt_pl2_sppt_def = 54,
+ .ppt_pl2_sppt_max = 65,
+ .ppt_pl3_fppt_min = 35,
+ .ppt_pl3_fppt_max = 65,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
+ },
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "FA507Z"),
+ },
+ .driver_data = &(struct power_data) {
+ .ac_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 28,
+ .ppt_pl1_spl_max = 65,
+ .ppt_pl2_sppt_min = 28,
+ .ppt_pl2_sppt_max = 105,
+ .nv_dynamic_boost_min = 5,
+ .nv_dynamic_boost_max = 15,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
+ .nv_tgp_min = 55,
+ .nv_tgp_max = 85,
+ },
+ .dc_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 25,
+ .ppt_pl1_spl_max = 45,
+ .ppt_pl2_sppt_min = 35,
+ .ppt_pl2_sppt_max = 60,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
+ },
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "FA607P"),
+ },
+ .driver_data = &(struct power_data) {
+ .ac_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 30,
+ .ppt_pl1_spl_def = 100,
+ .ppt_pl1_spl_max = 135,
+ .ppt_pl2_sppt_min = 30,
+ .ppt_pl2_sppt_def = 115,
+ .ppt_pl2_sppt_max = 135,
+ .ppt_pl3_fppt_min = 30,
+ .ppt_pl3_fppt_max = 135,
+ .nv_dynamic_boost_min = 5,
+ .nv_dynamic_boost_max = 25,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
+ .nv_tgp_min = 55,
+ .nv_tgp_max = 115,
+ },
+ .dc_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 25,
+ .ppt_pl1_spl_def = 45,
+ .ppt_pl1_spl_max = 80,
+ .ppt_pl2_sppt_min = 25,
+ .ppt_pl2_sppt_def = 60,
+ .ppt_pl2_sppt_max = 80,
+ .ppt_pl3_fppt_min = 25,
+ .ppt_pl3_fppt_max = 80,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
+ },
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "FA608WI"),
+ },
+ .driver_data = &(struct power_data) {
+ .ac_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 15,
+ .ppt_pl1_spl_def = 90,
+ .ppt_pl1_spl_max = 90,
+ .ppt_pl2_sppt_min = 35,
+ .ppt_pl2_sppt_def = 90,
+ .ppt_pl2_sppt_max = 90,
+ .ppt_pl3_fppt_min = 35,
+ .ppt_pl3_fppt_def = 90,
+ .ppt_pl3_fppt_max = 90,
+ .nv_dynamic_boost_min = 5,
+ .nv_dynamic_boost_max = 25,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
+ .nv_tgp_min = 55,
+ .nv_tgp_max = 115,
+ },
+ .dc_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 15,
+ .ppt_pl1_spl_def = 45,
+ .ppt_pl1_spl_max = 65,
+ .ppt_pl2_sppt_min = 35,
+ .ppt_pl2_sppt_def = 54,
+ .ppt_pl2_sppt_max = 65,
+ .ppt_pl3_fppt_min = 35,
+ .ppt_pl3_fppt_def = 65,
+ .ppt_pl3_fppt_max = 65,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
+ },
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "FA617NS"),
+ },
+ .driver_data = &(struct power_data) {
+ .ac_data = &(struct power_limits) {
+ .ppt_apu_sppt_min = 15,
+ .ppt_apu_sppt_max = 80,
+ .ppt_platform_sppt_min = 30,
+ .ppt_platform_sppt_max = 120,
+ },
+ .dc_data = &(struct power_limits) {
+ .ppt_apu_sppt_min = 25,
+ .ppt_apu_sppt_max = 35,
+ .ppt_platform_sppt_min = 45,
+ .ppt_platform_sppt_max = 100,
+ },
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "FA617NT"),
+ },
+ .driver_data = &(struct power_data) {
+ .ac_data = &(struct power_limits) {
+ .ppt_apu_sppt_min = 15,
+ .ppt_apu_sppt_max = 80,
+ .ppt_platform_sppt_min = 30,
+ .ppt_platform_sppt_max = 115,
+ },
+ .dc_data = &(struct power_limits) {
+ .ppt_apu_sppt_min = 15,
+ .ppt_apu_sppt_max = 45,
+ .ppt_platform_sppt_min = 30,
+ .ppt_platform_sppt_max = 50,
+ },
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "FA617XS"),
+ },
+ .driver_data = &(struct power_data) {
+ .ac_data = &(struct power_limits) {
+ .ppt_apu_sppt_min = 15,
+ .ppt_apu_sppt_max = 80,
+ .ppt_platform_sppt_min = 30,
+ .ppt_platform_sppt_max = 120,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
+ },
+ .dc_data = &(struct power_limits) {
+ .ppt_apu_sppt_min = 25,
+ .ppt_apu_sppt_max = 35,
+ .ppt_platform_sppt_min = 45,
+ .ppt_platform_sppt_max = 100,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
+ },
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "FX507VI"),
+ },
+ .driver_data = &(struct power_data) {
+ .ac_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 28,
+ .ppt_pl1_spl_max = 135,
+ .ppt_pl2_sppt_min = 28,
+ .ppt_pl2_sppt_max = 135,
+ .nv_dynamic_boost_min = 5,
+ .nv_dynamic_boost_max = 25,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
+ },
+ .dc_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 25,
+ .ppt_pl1_spl_max = 45,
+ .ppt_pl2_sppt_min = 35,
+ .ppt_pl2_sppt_max = 60,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
+ },
+ .requires_fan_curve = true,
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "FX507VV"),
+ },
+ .driver_data = &(struct power_data) {
+ .ac_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 28,
+ .ppt_pl1_spl_def = 115,
+ .ppt_pl1_spl_max = 135,
+ .ppt_pl2_sppt_min = 28,
+ .ppt_pl2_sppt_max = 135,
+ .nv_dynamic_boost_min = 5,
+ .nv_dynamic_boost_max = 25,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
+ },
+ .dc_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 25,
+ .ppt_pl1_spl_max = 45,
+ .ppt_pl2_sppt_min = 35,
+ .ppt_pl2_sppt_max = 60,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
+ },
+ .requires_fan_curve = true,
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "FX507Z"),
+ },
+ .driver_data = &(struct power_data) {
+ .ac_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 28,
+ .ppt_pl1_spl_max = 90,
+ .ppt_pl2_sppt_min = 28,
+ .ppt_pl2_sppt_max = 135,
+ .nv_dynamic_boost_min = 5,
+ .nv_dynamic_boost_max = 15,
+ },
+ .dc_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 25,
+ .ppt_pl1_spl_max = 45,
+ .ppt_pl2_sppt_min = 35,
+ .ppt_pl2_sppt_max = 60,
+ },
+ .requires_fan_curve = true,
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "GA401Q"),
+ },
+ .driver_data = &(struct power_data) {
+ .ac_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 15,
+ .ppt_pl1_spl_max = 80,
+ .ppt_pl2_sppt_min = 15,
+ .ppt_pl2_sppt_max = 80,
+ },
+ .dc_data = NULL,
+ },
+ },
+ {
+ .matches = {
+ // This model is full AMD. No Nvidia dGPU.
+ DMI_MATCH(DMI_BOARD_NAME, "GA402R"),
+ },
+ .driver_data = &(struct power_data) {
+ .ac_data = &(struct power_limits) {
+ .ppt_apu_sppt_min = 15,
+ .ppt_apu_sppt_max = 80,
+ .ppt_platform_sppt_min = 30,
+ .ppt_platform_sppt_max = 115,
+ },
+ .dc_data = &(struct power_limits) {
+ .ppt_apu_sppt_min = 25,
+ .ppt_apu_sppt_def = 30,
+ .ppt_apu_sppt_max = 45,
+ .ppt_platform_sppt_min = 40,
+ .ppt_platform_sppt_max = 60,
+ },
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "GA402X"),
+ },
+ .driver_data = &(struct power_data) {
+ .ac_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 15,
+ .ppt_pl1_spl_def = 35,
+ .ppt_pl1_spl_max = 80,
+ .ppt_pl2_sppt_min = 25,
+ .ppt_pl2_sppt_def = 65,
+ .ppt_pl2_sppt_max = 80,
+ .ppt_pl3_fppt_min = 35,
+ .ppt_pl3_fppt_max = 80,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
+ },
+ .dc_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 15,
+ .ppt_pl1_spl_max = 35,
+ .ppt_pl2_sppt_min = 25,
+ .ppt_pl2_sppt_max = 35,
+ .ppt_pl3_fppt_min = 35,
+ .ppt_pl3_fppt_max = 65,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
+ },
+ .requires_fan_curve = true,
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "GA403U"),
+ },
+ .driver_data = &(struct power_data) {
+ .ac_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 15,
+ .ppt_pl1_spl_max = 80,
+ .ppt_pl2_sppt_min = 25,
+ .ppt_pl2_sppt_max = 80,
+ .ppt_pl3_fppt_min = 35,
+ .ppt_pl3_fppt_max = 80,
+ .nv_dynamic_boost_min = 5,
+ .nv_dynamic_boost_max = 25,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
+ .nv_tgp_min = 55,
+ .nv_tgp_max = 65,
+ },
+ .dc_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 15,
+ .ppt_pl1_spl_max = 35,
+ .ppt_pl2_sppt_min = 25,
+ .ppt_pl2_sppt_max = 35,
+ .ppt_pl3_fppt_min = 35,
+ .ppt_pl3_fppt_max = 65,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
+ },
+ .requires_fan_curve = true,
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "GA503QR"),
+ },
+ .driver_data = &(struct power_data) {
+ .ac_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 15,
+ .ppt_pl1_spl_def = 35,
+ .ppt_pl1_spl_max = 80,
+ .ppt_pl2_sppt_min = 65,
+ .ppt_pl2_sppt_max = 80,
+ },
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "GA503R"),
+ },
+ .driver_data = &(struct power_data) {
+ .ac_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 15,
+ .ppt_pl1_spl_def = 35,
+ .ppt_pl1_spl_max = 80,
+ .ppt_pl2_sppt_min = 35,
+ .ppt_pl2_sppt_def = 65,
+ .ppt_pl2_sppt_max = 80,
+ .ppt_pl3_fppt_min = 35,
+ .ppt_pl3_fppt_max = 80,
+ .nv_dynamic_boost_min = 5,
+ .nv_dynamic_boost_max = 20,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
+ },
+ .dc_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 15,
+ .ppt_pl1_spl_def = 25,
+ .ppt_pl1_spl_max = 65,
+ .ppt_pl2_sppt_min = 35,
+ .ppt_pl2_sppt_def = 54,
+ .ppt_pl2_sppt_max = 60,
+ .ppt_pl3_fppt_min = 35,
+ .ppt_pl3_fppt_max = 65,
+ },
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "GA605W"),
+ },
+ .driver_data = &(struct power_data) {
+ .ac_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 15,
+ .ppt_pl1_spl_max = 80,
+ .ppt_pl2_sppt_min = 35,
+ .ppt_pl2_sppt_max = 80,
+ .ppt_pl3_fppt_min = 35,
+ .ppt_pl3_fppt_max = 80,
+ .nv_dynamic_boost_min = 5,
+ .nv_dynamic_boost_max = 20,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
+ .nv_tgp_min = 55,
+ .nv_tgp_max = 85,
+ },
+ .dc_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 25,
+ .ppt_pl1_spl_max = 35,
+ .ppt_pl2_sppt_min = 31,
+ .ppt_pl2_sppt_max = 44,
+ .ppt_pl3_fppt_min = 45,
+ .ppt_pl3_fppt_max = 65,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
+ },
+ .requires_fan_curve = true,
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "GU603Z"),
+ },
+ .driver_data = &(struct power_data) {
+ .ac_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 25,
+ .ppt_pl1_spl_max = 60,
+ .ppt_pl2_sppt_min = 25,
+ .ppt_pl2_sppt_max = 135,
+ .nv_dynamic_boost_min = 5,
+ .nv_dynamic_boost_max = 20,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
+ },
+ .dc_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 25,
+ .ppt_pl1_spl_max = 40,
+ .ppt_pl2_sppt_min = 25,
+ .ppt_pl2_sppt_max = 40,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
+ }
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "GU604V"),
+ },
+ .driver_data = &(struct power_data) {
+ .ac_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 65,
+ .ppt_pl1_spl_max = 120,
+ .ppt_pl2_sppt_min = 65,
+ .ppt_pl2_sppt_max = 150,
+ .nv_dynamic_boost_min = 5,
+ .nv_dynamic_boost_max = 25,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
+ },
+ .dc_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 25,
+ .ppt_pl1_spl_max = 40,
+ .ppt_pl2_sppt_min = 35,
+ .ppt_pl2_sppt_def = 40,
+ .ppt_pl2_sppt_max = 60,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
+ },
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "GU605CW"),
+ },
+ .driver_data = &(struct power_data) {
+ .ac_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 45,
+ .ppt_pl1_spl_max = 85,
+ .ppt_pl2_sppt_min = 56,
+ .ppt_pl2_sppt_max = 110,
+ .nv_dynamic_boost_min = 5,
+ .nv_dynamic_boost_max = 20,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
+ .nv_tgp_min = 80,
+ .nv_tgp_def = 90,
+ .nv_tgp_max = 110,
+ },
+ .dc_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 25,
+ .ppt_pl1_spl_max = 85,
+ .ppt_pl2_sppt_min = 32,
+ .ppt_pl2_sppt_max = 110,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
+ },
+ .requires_fan_curve = true,
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "GU605CX"),
+ },
+ .driver_data = &(struct power_data) {
+ .ac_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 45,
+ .ppt_pl1_spl_max = 85,
+ .ppt_pl2_sppt_min = 56,
+ .ppt_pl2_sppt_max = 110,
+ .nv_dynamic_boost_min = 5,
+ .nv_dynamic_boost_max = 20,
+ .nv_temp_target_min = 7,
+ .nv_temp_target_max = 87,
+ .nv_tgp_min = 95,
+ .nv_tgp_def = 100,
+ .nv_tgp_max = 110,
+ },
+ .dc_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 25,
+ .ppt_pl1_spl_max = 85,
+ .ppt_pl2_sppt_min = 32,
+ .ppt_pl2_sppt_max = 110,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
+ },
+ .requires_fan_curve = true,
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "GU605M"),
+ },
+ .driver_data = &(struct power_data) {
+ .ac_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 28,
+ .ppt_pl1_spl_max = 90,
+ .ppt_pl2_sppt_min = 28,
+ .ppt_pl2_sppt_max = 135,
+ .nv_dynamic_boost_min = 5,
+ .nv_dynamic_boost_max = 20,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
+ },
+ .dc_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 25,
+ .ppt_pl1_spl_max = 35,
+ .ppt_pl2_sppt_min = 38,
+ .ppt_pl2_sppt_max = 53,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
+ },
+ .requires_fan_curve = true,
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "GV301Q"),
+ },
+ .driver_data = &(struct power_data) {
+ .ac_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 15,
+ .ppt_pl1_spl_max = 45,
+ .ppt_pl2_sppt_min = 65,
+ .ppt_pl2_sppt_max = 80,
+ },
+ .dc_data = NULL,
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "GV301R"),
+ },
+ .driver_data = &(struct power_data) {
+ .ac_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 15,
+ .ppt_pl1_spl_max = 45,
+ .ppt_pl2_sppt_min = 25,
+ .ppt_pl2_sppt_max = 54,
+ .ppt_pl3_fppt_min = 35,
+ .ppt_pl3_fppt_max = 65,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
+ },
+ .dc_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 15,
+ .ppt_pl1_spl_max = 35,
+ .ppt_pl2_sppt_min = 25,
+ .ppt_pl2_sppt_max = 35,
+ .ppt_pl3_fppt_min = 35,
+ .ppt_pl3_fppt_max = 65,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
+ },
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "GV601R"),
+ },
+ .driver_data = &(struct power_data) {
+ .ac_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 15,
+ .ppt_pl1_spl_def = 35,
+ .ppt_pl1_spl_max = 90,
+ .ppt_pl2_sppt_min = 35,
+ .ppt_pl2_sppt_def = 54,
+ .ppt_pl2_sppt_max = 100,
+ .ppt_pl3_fppt_min = 35,
+ .ppt_pl3_fppt_def = 80,
+ .ppt_pl3_fppt_max = 125,
+ .nv_dynamic_boost_min = 5,
+ .nv_dynamic_boost_max = 25,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
+ },
+ .dc_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 15,
+ .ppt_pl1_spl_def = 28,
+ .ppt_pl1_spl_max = 65,
+ .ppt_pl2_sppt_min = 35,
+ .ppt_pl2_sppt_def = 54,
+ .ppt_pl2_sppt_max = 60,
+ .ppt_pl3_fppt_min = 35,
+ .ppt_pl3_fppt_def = 80,
+ .ppt_pl3_fppt_max = 65,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
+ },
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "GV601V"),
+ },
+ .driver_data = &(struct power_data) {
+ .ac_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 28,
+ .ppt_pl1_spl_def = 100,
+ .ppt_pl1_spl_max = 110,
+ .ppt_pl2_sppt_min = 28,
+ .ppt_pl2_sppt_max = 135,
+ .nv_dynamic_boost_min = 5,
+ .nv_dynamic_boost_max = 20,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
+ },
+ .dc_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 25,
+ .ppt_pl1_spl_max = 40,
+ .ppt_pl2_sppt_min = 35,
+ .ppt_pl2_sppt_def = 40,
+ .ppt_pl2_sppt_max = 60,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
+ },
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "GX650P"),
+ },
+ .driver_data = &(struct power_data) {
+ .ac_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 15,
+ .ppt_pl1_spl_def = 110,
+ .ppt_pl1_spl_max = 130,
+ .ppt_pl2_sppt_min = 35,
+ .ppt_pl2_sppt_def = 125,
+ .ppt_pl2_sppt_max = 130,
+ .ppt_pl3_fppt_min = 35,
+ .ppt_pl3_fppt_def = 125,
+ .ppt_pl3_fppt_max = 135,
+ .nv_dynamic_boost_min = 5,
+ .nv_dynamic_boost_max = 25,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
+ },
+ .dc_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 15,
+ .ppt_pl1_spl_def = 25,
+ .ppt_pl1_spl_max = 65,
+ .ppt_pl2_sppt_min = 35,
+ .ppt_pl2_sppt_def = 35,
+ .ppt_pl2_sppt_max = 65,
+ .ppt_pl3_fppt_min = 35,
+ .ppt_pl3_fppt_def = 42,
+ .ppt_pl3_fppt_max = 65,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
+ },
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "G513I"),
+ },
+ .driver_data = &(struct power_data) {
+ .ac_data = &(struct power_limits) {
+ /* Yes this laptop is very limited */
+ .ppt_pl1_spl_min = 15,
+ .ppt_pl1_spl_max = 80,
+ .ppt_pl2_sppt_min = 15,
+ .ppt_pl2_sppt_max = 80,
+ },
+ .dc_data = NULL,
+ .requires_fan_curve = true,
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "G513QM"),
+ },
+ .driver_data = &(struct power_data) {
+ .ac_data = &(struct power_limits) {
+ /* Yes this laptop is very limited */
+ .ppt_pl1_spl_min = 15,
+ .ppt_pl1_spl_max = 100,
+ .ppt_pl2_sppt_min = 15,
+ .ppt_pl2_sppt_max = 190,
+ },
+ .dc_data = NULL,
+ .requires_fan_curve = true,
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "G513R"),
+ },
+ .driver_data = &(struct power_data) {
+ .ac_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 35,
+ .ppt_pl1_spl_max = 90,
+ .ppt_pl2_sppt_min = 54,
+ .ppt_pl2_sppt_max = 100,
+ .ppt_pl3_fppt_min = 54,
+ .ppt_pl3_fppt_max = 125,
+ .nv_dynamic_boost_min = 5,
+ .nv_dynamic_boost_max = 25,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
+ },
+ .dc_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 28,
+ .ppt_pl1_spl_max = 50,
+ .ppt_pl2_sppt_min = 28,
+ .ppt_pl2_sppt_max = 50,
+ .ppt_pl3_fppt_min = 28,
+ .ppt_pl3_fppt_max = 65,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
+ },
+ .requires_fan_curve = true,
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "G614J"),
+ },
+ .driver_data = &(struct power_data) {
+ .ac_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 28,
+ .ppt_pl1_spl_max = 140,
+ .ppt_pl2_sppt_min = 28,
+ .ppt_pl2_sppt_max = 175,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
+ .nv_dynamic_boost_min = 5,
+ .nv_dynamic_boost_max = 25,
+ },
+ .dc_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 25,
+ .ppt_pl1_spl_max = 55,
+ .ppt_pl2_sppt_min = 25,
+ .ppt_pl2_sppt_max = 70,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
+ },
+ .requires_fan_curve = true,
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "G634J"),
+ },
+ .driver_data = &(struct power_data) {
+ .ac_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 28,
+ .ppt_pl1_spl_max = 140,
+ .ppt_pl2_sppt_min = 28,
+ .ppt_pl2_sppt_max = 175,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
+ .nv_dynamic_boost_min = 5,
+ .nv_dynamic_boost_max = 25,
+ },
+ .dc_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 25,
+ .ppt_pl1_spl_max = 55,
+ .ppt_pl2_sppt_min = 25,
+ .ppt_pl2_sppt_max = 70,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
+ },
+ .requires_fan_curve = true,
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "G713PV"),
+ },
+ .driver_data = &(struct power_data) {
+ .ac_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 30,
+ .ppt_pl1_spl_def = 120,
+ .ppt_pl1_spl_max = 130,
+ .ppt_pl2_sppt_min = 65,
+ .ppt_pl2_sppt_def = 125,
+ .ppt_pl2_sppt_max = 130,
+ .ppt_pl3_fppt_min = 65,
+ .ppt_pl3_fppt_def = 125,
+ .ppt_pl3_fppt_max = 130,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
+ .nv_dynamic_boost_min = 5,
+ .nv_dynamic_boost_max = 25,
+ },
+ .dc_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 25,
+ .ppt_pl1_spl_max = 65,
+ .ppt_pl2_sppt_min = 25,
+ .ppt_pl2_sppt_max = 65,
+ .ppt_pl3_fppt_min = 35,
+ .ppt_pl3_fppt_max = 75,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
+ },
+ .requires_fan_curve = true,
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "G733C"),
+ },
+ .driver_data = &(struct power_data) {
+ .ac_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 28,
+ .ppt_pl1_spl_max = 170,
+ .ppt_pl2_sppt_min = 28,
+ .ppt_pl2_sppt_max = 175,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
+ .nv_dynamic_boost_min = 5,
+ .nv_dynamic_boost_max = 25,
+ },
+ .dc_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 28,
+ .ppt_pl1_spl_max = 35,
+ .ppt_pl2_sppt_min = 28,
+ .ppt_pl2_sppt_max = 35,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
+ },
+ .requires_fan_curve = true,
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "G733P"),
+ },
+ .driver_data = &(struct power_data) {
+ .ac_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 30,
+ .ppt_pl1_spl_def = 100,
+ .ppt_pl1_spl_max = 130,
+ .ppt_pl2_sppt_min = 65,
+ .ppt_pl2_sppt_def = 125,
+ .ppt_pl2_sppt_max = 130,
+ .ppt_pl3_fppt_min = 65,
+ .ppt_pl3_fppt_def = 125,
+ .ppt_pl3_fppt_max = 130,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
+ .nv_dynamic_boost_min = 5,
+ .nv_dynamic_boost_max = 25,
+ },
+ .dc_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 25,
+ .ppt_pl1_spl_max = 65,
+ .ppt_pl2_sppt_min = 25,
+ .ppt_pl2_sppt_max = 65,
+ .ppt_pl3_fppt_min = 35,
+ .ppt_pl3_fppt_max = 75,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
+ },
+ .requires_fan_curve = true,
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "G814J"),
+ },
+ .driver_data = &(struct power_data) {
+ .ac_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 28,
+ .ppt_pl1_spl_max = 140,
+ .ppt_pl2_sppt_min = 28,
+ .ppt_pl2_sppt_max = 140,
+ .nv_dynamic_boost_min = 5,
+ .nv_dynamic_boost_max = 25,
+ },
+ .dc_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 25,
+ .ppt_pl1_spl_max = 55,
+ .ppt_pl2_sppt_min = 25,
+ .ppt_pl2_sppt_max = 70,
+ },
+ .requires_fan_curve = true,
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "G834J"),
+ },
+ .driver_data = &(struct power_data) {
+ .ac_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 28,
+ .ppt_pl1_spl_max = 140,
+ .ppt_pl2_sppt_min = 28,
+ .ppt_pl2_sppt_max = 175,
+ .nv_dynamic_boost_min = 5,
+ .nv_dynamic_boost_max = 25,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
+ },
+ .dc_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 25,
+ .ppt_pl1_spl_max = 55,
+ .ppt_pl2_sppt_min = 25,
+ .ppt_pl2_sppt_max = 70,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
+ },
+ .requires_fan_curve = true,
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "H7606W"),
+ },
+ .driver_data = &(struct power_data) {
+ .ac_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 15,
+ .ppt_pl1_spl_max = 80,
+ .ppt_pl2_sppt_min = 35,
+ .ppt_pl2_sppt_max = 80,
+ .ppt_pl3_fppt_min = 35,
+ .ppt_pl3_fppt_max = 80,
+ .nv_dynamic_boost_min = 5,
+ .nv_dynamic_boost_max = 20,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
+ .nv_tgp_min = 55,
+ .nv_tgp_max = 85,
+ },
+ .dc_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 25,
+ .ppt_pl1_spl_max = 35,
+ .ppt_pl2_sppt_min = 31,
+ .ppt_pl2_sppt_max = 44,
+ .ppt_pl3_fppt_min = 45,
+ .ppt_pl3_fppt_max = 65,
+ .nv_temp_target_min = 75,
+ .nv_temp_target_max = 87,
+ },
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "RC71"),
+ },
+ .driver_data = &(struct power_data) {
+ .ac_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 7,
+ .ppt_pl1_spl_max = 30,
+ .ppt_pl2_sppt_min = 15,
+ .ppt_pl2_sppt_max = 43,
+ .ppt_pl3_fppt_min = 15,
+ .ppt_pl3_fppt_max = 53,
+ },
+ .dc_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 7,
+ .ppt_pl1_spl_def = 15,
+ .ppt_pl1_spl_max = 25,
+ .ppt_pl2_sppt_min = 15,
+ .ppt_pl2_sppt_def = 20,
+ .ppt_pl2_sppt_max = 30,
+ .ppt_pl3_fppt_min = 15,
+ .ppt_pl3_fppt_def = 25,
+ .ppt_pl3_fppt_max = 35,
+ },
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "RC72"),
+ },
+ .driver_data = &(struct power_data) {
+ .ac_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 7,
+ .ppt_pl1_spl_max = 30,
+ .ppt_pl2_sppt_min = 15,
+ .ppt_pl2_sppt_max = 43,
+ .ppt_pl3_fppt_min = 15,
+ .ppt_pl3_fppt_max = 53,
+ },
+ .dc_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 7,
+ .ppt_pl1_spl_def = 17,
+ .ppt_pl1_spl_max = 25,
+ .ppt_pl2_sppt_min = 15,
+ .ppt_pl2_sppt_def = 24,
+ .ppt_pl2_sppt_max = 30,
+ .ppt_pl3_fppt_min = 15,
+ .ppt_pl3_fppt_def = 30,
+ .ppt_pl3_fppt_max = 35,
+ },
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_NAME, "RC73XA"),
+ },
+ .driver_data = &(struct power_data) {
+ .ac_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 7,
+ .ppt_pl1_spl_max = 35,
+ .ppt_pl2_sppt_min = 14,
+ .ppt_pl2_sppt_max = 45,
+ .ppt_pl3_fppt_min = 19,
+ .ppt_pl3_fppt_max = 55,
+ },
+ .dc_data = &(struct power_limits) {
+ .ppt_pl1_spl_min = 7,
+ .ppt_pl1_spl_def = 17,
+ .ppt_pl1_spl_max = 35,
+ .ppt_pl2_sppt_min = 13,
+ .ppt_pl2_sppt_def = 21,
+ .ppt_pl2_sppt_max = 45,
+ .ppt_pl3_fppt_min = 19,
+ .ppt_pl3_fppt_def = 26,
+ .ppt_pl3_fppt_max = 55,
+ },
+ },
+ },
+ {}
+};
+
+#endif /* _ASUS_ARMOURY_H_ */
diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c
index e72a2b5d158e..4aec7ec69250 100644
--- a/drivers/platform/x86/asus-wmi.c
+++ b/drivers/platform/x86/asus-wmi.c
@@ -15,6 +15,7 @@
#include <linux/acpi.h>
#include <linux/backlight.h>
+#include <linux/bits.h>
#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/dmi.h>
@@ -30,6 +31,7 @@
#include <linux/pci.h>
#include <linux/pci_hotplug.h>
#include <linux/platform_data/x86/asus-wmi.h>
+#include <linux/platform_data/x86/asus-wmi-leds-ids.h>
#include <linux/platform_device.h>
#include <linux/platform_profile.h>
#include <linux/power_supply.h>
@@ -55,8 +57,6 @@ module_param(fnlock_default, bool, 0444);
#define to_asus_wmi_driver(pdrv) \
(container_of((pdrv), struct asus_wmi_driver, platform_driver))
-#define ASUS_WMI_MGMT_GUID "97845ED0-4E6D-11DE-8A39-0800200C9A66"
-
#define NOTIFY_BRNUP_MIN 0x11
#define NOTIFY_BRNUP_MAX 0x1f
#define NOTIFY_BRNDOWN_MIN 0x20
@@ -105,8 +105,6 @@ module_param(fnlock_default, bool, 0444);
#define USB_INTEL_XUSB2PR 0xD0
#define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI 0x9c31
-#define ASUS_ACPI_UID_ASUSWMI "ASUSWMI"
-
#define WMI_EVENT_MASK 0xFFFF
#define FAN_CURVE_POINTS 8
@@ -340,6 +338,13 @@ struct asus_wmi {
/* Global to allow setting externally without requiring driver data */
static enum asus_ally_mcu_hack use_ally_mcu_hack = ASUS_WMI_ALLY_MCU_HACK_INIT;
+#if IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS)
+static void asus_wmi_show_deprecated(void)
+{
+ pr_notice_once("Accessing attributes through /sys/bus/platform/asus_wmi is deprecated and will be removed in a future release. Please switch over to /sys/class/firmware_attributes.\n");
+}
+#endif /* IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS) */
+
/* WMI ************************************************************************/
static int asus_wmi_evaluate_method3(u32 method_id,
@@ -390,7 +395,7 @@ int asus_wmi_evaluate_method(u32 method_id, u32 arg0, u32 arg1, u32 *retval)
{
return asus_wmi_evaluate_method3(method_id, arg0, arg1, 0, retval);
}
-EXPORT_SYMBOL_GPL(asus_wmi_evaluate_method);
+EXPORT_SYMBOL_NS_GPL(asus_wmi_evaluate_method, "ASUS_WMI");
static int asus_wmi_evaluate_method5(u32 method_id,
u32 arg0, u32 arg1, u32 arg2, u32 arg3, u32 arg4, u32 *retval)
@@ -554,12 +559,52 @@ static int asus_wmi_get_devstate(struct asus_wmi *asus, u32 dev_id, u32 *retval)
return 0;
}
-int asus_wmi_set_devstate(u32 dev_id, u32 ctrl_param,
- u32 *retval)
+/**
+ * asus_wmi_get_devstate_dsts() - Get the WMI function state.
+ * @dev_id: The WMI method ID to call.
+ * @retval: A pointer to where to store the value returned from WMI.
+ *
+ * Returns:
+ * * %-ENODEV - method ID is unsupported.
+ * * %0 - successful and retval is filled.
+ * * %other - error from WMI call.
+ */
+int asus_wmi_get_devstate_dsts(u32 dev_id, u32 *retval)
+{
+ int err;
+
+ err = asus_wmi_evaluate_method(ASUS_WMI_METHODID_DSTS, dev_id, 0, retval);
+ if (err)
+ return err;
+
+ if ((*retval & ASUS_WMI_DSTS_PRESENCE_BIT) == 0x00)
+ return -ENODEV;
+
+ return 0;
+}
+EXPORT_SYMBOL_NS_GPL(asus_wmi_get_devstate_dsts, "ASUS_WMI");
+
+/**
+ * asus_wmi_set_devstate() - Set the WMI function state.
+ *
+ * Note: an asus_wmi_set_devstate() call must be paired with a
+ * asus_wmi_get_devstate_dsts() to check if the WMI function is supported.
+ *
+ * @dev_id: The WMI function to call.
+ * @ctrl_param: The argument to be used for this WMI function.
+ * @retval: A pointer to where to store the value returned from WMI.
+ *
+ * Returns:
+ * * %-ENODEV - method ID is unsupported.
+ * * %0 - successful and retval is filled.
+ * * %other - error from WMI call.
+ */
+int asus_wmi_set_devstate(u32 dev_id, u32 ctrl_param, u32 *retval)
{
return asus_wmi_evaluate_method(ASUS_WMI_METHODID_DEVS, dev_id,
ctrl_param, retval);
}
+EXPORT_SYMBOL_NS_GPL(asus_wmi_set_devstate, "ASUS_WMI");
/* Helper for special devices with magic return codes */
static int asus_wmi_get_devstate_bits(struct asus_wmi *asus,
@@ -692,6 +737,7 @@ static void asus_wmi_tablet_mode_get_state(struct asus_wmi *asus)
}
/* Charging mode, 1=Barrel, 2=USB ******************************************/
+#if IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS)
static ssize_t charge_mode_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -702,12 +748,16 @@ static ssize_t charge_mode_show(struct device *dev,
if (result < 0)
return result;
+ asus_wmi_show_deprecated();
+
return sysfs_emit(buf, "%d\n", value & 0xff);
}
static DEVICE_ATTR_RO(charge_mode);
+#endif /* IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS) */
/* dGPU ********************************************************************/
+#if IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS)
static ssize_t dgpu_disable_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -718,6 +768,8 @@ static ssize_t dgpu_disable_show(struct device *dev,
if (result < 0)
return result;
+ asus_wmi_show_deprecated();
+
return sysfs_emit(buf, "%d\n", result);
}
@@ -771,8 +823,10 @@ static ssize_t dgpu_disable_store(struct device *dev,
return count;
}
static DEVICE_ATTR_RW(dgpu_disable);
+#endif /* IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS) */
/* eGPU ********************************************************************/
+#if IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS)
static ssize_t egpu_enable_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -783,6 +837,8 @@ static ssize_t egpu_enable_show(struct device *dev,
if (result < 0)
return result;
+ asus_wmi_show_deprecated();
+
return sysfs_emit(buf, "%d\n", result);
}
@@ -839,8 +895,10 @@ static ssize_t egpu_enable_store(struct device *dev,
return count;
}
static DEVICE_ATTR_RW(egpu_enable);
+#endif /* IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS) */
/* Is eGPU connected? *********************************************************/
+#if IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS)
static ssize_t egpu_connected_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -851,12 +909,16 @@ static ssize_t egpu_connected_show(struct device *dev,
if (result < 0)
return result;
+ asus_wmi_show_deprecated();
+
return sysfs_emit(buf, "%d\n", result);
}
static DEVICE_ATTR_RO(egpu_connected);
+#endif /* IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS) */
/* gpu mux switch *************************************************************/
+#if IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS)
static ssize_t gpu_mux_mode_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -867,6 +929,8 @@ static ssize_t gpu_mux_mode_show(struct device *dev,
if (result < 0)
return result;
+ asus_wmi_show_deprecated();
+
return sysfs_emit(buf, "%d\n", result);
}
@@ -925,6 +989,7 @@ static ssize_t gpu_mux_mode_store(struct device *dev,
return count;
}
static DEVICE_ATTR_RW(gpu_mux_mode);
+#endif /* IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS) */
/* TUF Laptop Keyboard RGB Modes **********************************************/
static ssize_t kbd_rgb_mode_store(struct device *dev,
@@ -1048,6 +1113,7 @@ static const struct attribute_group *kbd_rgb_mode_groups[] = {
};
/* Tunable: PPT: Intel=PL1, AMD=SPPT *****************************************/
+#if IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS)
static ssize_t ppt_pl2_sppt_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
@@ -1086,6 +1152,8 @@ static ssize_t ppt_pl2_sppt_show(struct device *dev,
{
struct asus_wmi *asus = dev_get_drvdata(dev);
+ asus_wmi_show_deprecated();
+
return sysfs_emit(buf, "%u\n", asus->ppt_pl2_sppt);
}
static DEVICE_ATTR_RW(ppt_pl2_sppt);
@@ -1128,6 +1196,8 @@ static ssize_t ppt_pl1_spl_show(struct device *dev,
{
struct asus_wmi *asus = dev_get_drvdata(dev);
+ asus_wmi_show_deprecated();
+
return sysfs_emit(buf, "%u\n", asus->ppt_pl1_spl);
}
static DEVICE_ATTR_RW(ppt_pl1_spl);
@@ -1148,7 +1218,7 @@ static ssize_t ppt_fppt_store(struct device *dev,
if (value < PPT_TOTAL_MIN || value > PPT_TOTAL_MAX)
return -EINVAL;
- err = asus_wmi_set_devstate(ASUS_WMI_DEVID_PPT_FPPT, value, &result);
+ err = asus_wmi_set_devstate(ASUS_WMI_DEVID_PPT_PL3_FPPT, value, &result);
if (err) {
pr_warn("Failed to set ppt_fppt: %d\n", err);
return err;
@@ -1171,6 +1241,8 @@ static ssize_t ppt_fppt_show(struct device *dev,
{
struct asus_wmi *asus = dev_get_drvdata(dev);
+ asus_wmi_show_deprecated();
+
return sysfs_emit(buf, "%u\n", asus->ppt_fppt);
}
static DEVICE_ATTR_RW(ppt_fppt);
@@ -1214,6 +1286,8 @@ static ssize_t ppt_apu_sppt_show(struct device *dev,
{
struct asus_wmi *asus = dev_get_drvdata(dev);
+ asus_wmi_show_deprecated();
+
return sysfs_emit(buf, "%u\n", asus->ppt_apu_sppt);
}
static DEVICE_ATTR_RW(ppt_apu_sppt);
@@ -1257,6 +1331,8 @@ static ssize_t ppt_platform_sppt_show(struct device *dev,
{
struct asus_wmi *asus = dev_get_drvdata(dev);
+ asus_wmi_show_deprecated();
+
return sysfs_emit(buf, "%u\n", asus->ppt_platform_sppt);
}
static DEVICE_ATTR_RW(ppt_platform_sppt);
@@ -1300,6 +1376,8 @@ static ssize_t nv_dynamic_boost_show(struct device *dev,
{
struct asus_wmi *asus = dev_get_drvdata(dev);
+ asus_wmi_show_deprecated();
+
return sysfs_emit(buf, "%u\n", asus->nv_dynamic_boost);
}
static DEVICE_ATTR_RW(nv_dynamic_boost);
@@ -1343,9 +1421,12 @@ static ssize_t nv_temp_target_show(struct device *dev,
{
struct asus_wmi *asus = dev_get_drvdata(dev);
+ asus_wmi_show_deprecated();
+
return sysfs_emit(buf, "%u\n", asus->nv_temp_target);
}
static DEVICE_ATTR_RW(nv_temp_target);
+#endif /* IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS) */
/* Ally MCU Powersave ********************************************************/
@@ -1386,6 +1467,7 @@ void set_ally_mcu_powersave(bool enabled)
}
EXPORT_SYMBOL_NS_GPL(set_ally_mcu_powersave, "ASUS_WMI");
+#if IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS)
static ssize_t mcu_powersave_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -1396,6 +1478,8 @@ static ssize_t mcu_powersave_show(struct device *dev,
if (result < 0)
return result;
+ asus_wmi_show_deprecated();
+
return sysfs_emit(buf, "%d\n", result);
}
@@ -1431,6 +1515,7 @@ static ssize_t mcu_powersave_store(struct device *dev,
return count;
}
static DEVICE_ATTR_RW(mcu_powersave);
+#endif /* IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS) */
/* Battery ********************************************************************/
@@ -1619,14 +1704,14 @@ static void do_kbd_led_set(struct led_classdev *led_cdev, int value)
kbd_led_update(asus);
}
-static void kbd_led_set(struct led_classdev *led_cdev,
- enum led_brightness value)
+static int kbd_led_set(struct led_classdev *led_cdev, enum led_brightness value)
{
/* Prevent disabling keyboard backlight on module unregister */
if (led_cdev->flags & LED_UNREGISTERING)
- return;
+ return 0;
do_kbd_led_set(led_cdev, value);
+ return 0;
}
static void kbd_led_set_by_kbd(struct asus_wmi *asus, enum led_brightness value)
@@ -1802,7 +1887,7 @@ static int asus_wmi_led_init(struct asus_wmi *asus)
asus->kbd_led_wk = led_val;
asus->kbd_led.name = "asus::kbd_backlight";
asus->kbd_led.flags = LED_BRIGHT_HW_CHANGED;
- asus->kbd_led.brightness_set = kbd_led_set;
+ asus->kbd_led.brightness_set_blocking = kbd_led_set;
asus->kbd_led.brightness_get = kbd_led_get;
asus->kbd_led.max_brightness = 3;
@@ -2304,6 +2389,7 @@ exit:
}
/* Panel Overdrive ************************************************************/
+#if IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS)
static ssize_t panel_od_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -2314,6 +2400,8 @@ static ssize_t panel_od_show(struct device *dev,
if (result < 0)
return result;
+ asus_wmi_show_deprecated();
+
return sysfs_emit(buf, "%d\n", result);
}
@@ -2350,9 +2438,10 @@ static ssize_t panel_od_store(struct device *dev,
return count;
}
static DEVICE_ATTR_RW(panel_od);
+#endif /* IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS) */
/* Bootup sound ***************************************************************/
-
+#if IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS)
static ssize_t boot_sound_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -2363,6 +2452,8 @@ static ssize_t boot_sound_show(struct device *dev,
if (result < 0)
return result;
+ asus_wmi_show_deprecated();
+
return sysfs_emit(buf, "%d\n", result);
}
@@ -2398,8 +2489,10 @@ static ssize_t boot_sound_store(struct device *dev,
return count;
}
static DEVICE_ATTR_RW(boot_sound);
+#endif /* IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS) */
/* Mini-LED mode **************************************************************/
+#if IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS)
static ssize_t mini_led_mode_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -2430,6 +2523,8 @@ static ssize_t mini_led_mode_show(struct device *dev,
}
}
+ asus_wmi_show_deprecated();
+
return sysfs_emit(buf, "%d\n", value);
}
@@ -2500,10 +2595,13 @@ static ssize_t available_mini_led_mode_show(struct device *dev,
return sysfs_emit(buf, "0 1 2\n");
}
+ asus_wmi_show_deprecated();
+
return sysfs_emit(buf, "0\n");
}
static DEVICE_ATTR_RO(available_mini_led_mode);
+#endif /* IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS) */
/* Quirks *********************************************************************/
@@ -3791,6 +3889,7 @@ static int throttle_thermal_policy_set_default(struct asus_wmi *asus)
return throttle_thermal_policy_write(asus);
}
+#if IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS)
static ssize_t throttle_thermal_policy_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -3834,6 +3933,7 @@ static ssize_t throttle_thermal_policy_store(struct device *dev,
* Throttle thermal policy: 0 - default, 1 - overboost, 2 - silent
*/
static DEVICE_ATTR_RW(throttle_thermal_policy);
+#endif /* IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS) */
/* Platform profile ***********************************************************/
static int asus_wmi_platform_profile_get(struct device *dev,
@@ -4435,27 +4535,29 @@ static struct attribute *platform_attributes[] = {
&dev_attr_camera.attr,
&dev_attr_cardr.attr,
&dev_attr_touchpad.attr,
- &dev_attr_charge_mode.attr,
- &dev_attr_egpu_enable.attr,
- &dev_attr_egpu_connected.attr,
- &dev_attr_dgpu_disable.attr,
- &dev_attr_gpu_mux_mode.attr,
&dev_attr_lid_resume.attr,
&dev_attr_als_enable.attr,
&dev_attr_fan_boost_mode.attr,
- &dev_attr_throttle_thermal_policy.attr,
- &dev_attr_ppt_pl2_sppt.attr,
- &dev_attr_ppt_pl1_spl.attr,
- &dev_attr_ppt_fppt.attr,
- &dev_attr_ppt_apu_sppt.attr,
- &dev_attr_ppt_platform_sppt.attr,
- &dev_attr_nv_dynamic_boost.attr,
- &dev_attr_nv_temp_target.attr,
- &dev_attr_mcu_powersave.attr,
- &dev_attr_boot_sound.attr,
- &dev_attr_panel_od.attr,
- &dev_attr_mini_led_mode.attr,
- &dev_attr_available_mini_led_mode.attr,
+#if IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS)
+ &dev_attr_charge_mode.attr,
+ &dev_attr_egpu_enable.attr,
+ &dev_attr_egpu_connected.attr,
+ &dev_attr_dgpu_disable.attr,
+ &dev_attr_gpu_mux_mode.attr,
+ &dev_attr_ppt_pl2_sppt.attr,
+ &dev_attr_ppt_pl1_spl.attr,
+ &dev_attr_ppt_fppt.attr,
+ &dev_attr_ppt_apu_sppt.attr,
+ &dev_attr_ppt_platform_sppt.attr,
+ &dev_attr_nv_dynamic_boost.attr,
+ &dev_attr_nv_temp_target.attr,
+ &dev_attr_mcu_powersave.attr,
+ &dev_attr_boot_sound.attr,
+ &dev_attr_panel_od.attr,
+ &dev_attr_mini_led_mode.attr,
+ &dev_attr_available_mini_led_mode.attr,
+ &dev_attr_throttle_thermal_policy.attr,
+#endif /* IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS) */
NULL
};
@@ -4477,7 +4579,11 @@ static umode_t asus_sysfs_is_visible(struct kobject *kobj,
devid = ASUS_WMI_DEVID_LID_RESUME;
else if (attr == &dev_attr_als_enable.attr)
devid = ASUS_WMI_DEVID_ALS_ENABLE;
- else if (attr == &dev_attr_charge_mode.attr)
+ else if (attr == &dev_attr_fan_boost_mode.attr)
+ ok = asus->fan_boost_mode_available;
+
+#if IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS)
+ if (attr == &dev_attr_charge_mode.attr)
devid = ASUS_WMI_DEVID_CHARGE_MODE;
else if (attr == &dev_attr_egpu_enable.attr)
ok = asus->egpu_enable_available;
@@ -4496,7 +4602,7 @@ static umode_t asus_sysfs_is_visible(struct kobject *kobj,
else if (attr == &dev_attr_ppt_pl1_spl.attr)
devid = ASUS_WMI_DEVID_PPT_PL1_SPL;
else if (attr == &dev_attr_ppt_fppt.attr)
- devid = ASUS_WMI_DEVID_PPT_FPPT;
+ devid = ASUS_WMI_DEVID_PPT_PL3_FPPT;
else if (attr == &dev_attr_ppt_apu_sppt.attr)
devid = ASUS_WMI_DEVID_PPT_APU_SPPT;
else if (attr == &dev_attr_ppt_platform_sppt.attr)
@@ -4515,6 +4621,7 @@ static umode_t asus_sysfs_is_visible(struct kobject *kobj,
ok = asus->mini_led_dev_id != 0;
else if (attr == &dev_attr_available_mini_led_mode.attr)
ok = asus->mini_led_dev_id != 0;
+#endif /* IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS) */
if (devid != -1) {
ok = !(asus_wmi_get_devstate_simple(asus, devid) < 0);
@@ -4770,6 +4877,7 @@ static int asus_wmi_add(struct platform_device *pdev)
}
/* ensure defaults for tunables */
+#if IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS)
asus->ppt_pl2_sppt = 5;
asus->ppt_pl1_spl = 5;
asus->ppt_apu_sppt = 5;
@@ -4792,17 +4900,18 @@ static int asus_wmi_add(struct platform_device *pdev)
asus->gpu_mux_dev = ASUS_WMI_DEVID_GPU_MUX;
else if (asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_GPU_MUX_VIVO))
asus->gpu_mux_dev = ASUS_WMI_DEVID_GPU_MUX_VIVO;
-
- if (asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_TUF_RGB_MODE))
- asus->kbd_rgb_dev = ASUS_WMI_DEVID_TUF_RGB_MODE;
- else if (asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_TUF_RGB_MODE2))
- asus->kbd_rgb_dev = ASUS_WMI_DEVID_TUF_RGB_MODE2;
+#endif /* IS_ENABLED(CONFIG_ASUS_WMI_DEPRECATED_ATTRS) */
if (asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_THROTTLE_THERMAL_POLICY))
asus->throttle_thermal_policy_dev = ASUS_WMI_DEVID_THROTTLE_THERMAL_POLICY;
else if (asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_THROTTLE_THERMAL_POLICY_VIVO))
asus->throttle_thermal_policy_dev = ASUS_WMI_DEVID_THROTTLE_THERMAL_POLICY_VIVO;
+ if (asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_TUF_RGB_MODE))
+ asus->kbd_rgb_dev = ASUS_WMI_DEVID_TUF_RGB_MODE;
+ else if (asus_wmi_dev_is_present(asus, ASUS_WMI_DEVID_TUF_RGB_MODE2))
+ asus->kbd_rgb_dev = ASUS_WMI_DEVID_TUF_RGB_MODE2;
+
err = fan_boost_mode_check_present(asus);
if (err)
goto fail_fan_boost_mode;
diff --git a/drivers/platform/x86/ayaneo-ec.c b/drivers/platform/x86/ayaneo-ec.c
new file mode 100644
index 000000000000..41a24e091248
--- /dev/null
+++ b/drivers/platform/x86/ayaneo-ec.c
@@ -0,0 +1,593 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Platform driver for the Embedded Controller (EC) of Ayaneo devices. Handles
+ * hwmon (fan speed, fan control), battery charge limits, and magic module
+ * control (connected modules, controller disconnection).
+ *
+ * Copyright (C) 2025 Antheas Kapenekakis <lkml@antheas.dev>
+ */
+
+#include <linux/acpi.h>
+#include <linux/bits.h>
+#include <linux/dmi.h>
+#include <linux/err.h>
+#include <linux/hwmon.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/power_supply.h>
+#include <linux/sysfs.h>
+#include <acpi/battery.h>
+
+#define AYANEO_PWM_ENABLE_REG 0x4A
+#define AYANEO_PWM_REG 0x4B
+#define AYANEO_PWM_MODE_AUTO 0x00
+#define AYANEO_PWM_MODE_MANUAL 0x01
+
+#define AYANEO_FAN_REG 0x76
+
+#define EC_CHARGE_CONTROL_BEHAVIOURS \
+ (BIT(POWER_SUPPLY_CHARGE_BEHAVIOUR_AUTO) | \
+ BIT(POWER_SUPPLY_CHARGE_BEHAVIOUR_INHIBIT_CHARGE))
+#define AYANEO_CHARGE_REG 0x1e
+#define AYANEO_CHARGE_VAL_AUTO 0xaa
+#define AYANEO_CHARGE_VAL_INHIBIT 0x55
+
+#define AYANEO_POWER_REG 0x2d
+#define AYANEO_POWER_OFF 0xfe
+#define AYANEO_POWER_ON 0xff
+#define AYANEO_MODULE_REG 0x2f
+#define AYANEO_MODULE_LEFT BIT(0)
+#define AYANEO_MODULE_RIGHT BIT(1)
+#define AYANEO_MODULE_MASK (AYANEO_MODULE_LEFT | AYANEO_MODULE_RIGHT)
+
+struct ayaneo_ec_quirk {
+ bool has_fan_control;
+ bool has_charge_control;
+ bool has_magic_modules;
+};
+
+struct ayaneo_ec_platform_data {
+ struct platform_device *pdev;
+ struct ayaneo_ec_quirk *quirks;
+ struct acpi_battery_hook battery_hook;
+
+ // Protects access to restore_pwm
+ struct mutex hwmon_lock;
+ bool restore_charge_limit;
+ bool restore_pwm;
+};
+
+static const struct ayaneo_ec_quirk quirk_fan = {
+ .has_fan_control = true,
+};
+
+static const struct ayaneo_ec_quirk quirk_charge_limit = {
+ .has_fan_control = true,
+ .has_charge_control = true,
+};
+
+static const struct ayaneo_ec_quirk quirk_ayaneo3 = {
+ .has_fan_control = true,
+ .has_charge_control = true,
+ .has_magic_modules = true,
+};
+
+static const struct dmi_system_id dmi_table[] = {
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "AYANEO"),
+ DMI_MATCH(DMI_BOARD_NAME, "AYANEO 2"),
+ },
+ .driver_data = (void *)&quirk_fan,
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "AYANEO"),
+ DMI_MATCH(DMI_BOARD_NAME, "FLIP"),
+ },
+ .driver_data = (void *)&quirk_fan,
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "AYANEO"),
+ DMI_MATCH(DMI_BOARD_NAME, "GEEK"),
+ },
+ .driver_data = (void *)&quirk_fan,
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "AYANEO"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "AIR"),
+ },
+ .driver_data = (void *)&quirk_charge_limit,
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "AYANEO"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "AIR 1S"),
+ },
+ .driver_data = (void *)&quirk_charge_limit,
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "AYANEO"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "AB05-Mendocino"),
+ },
+ .driver_data = (void *)&quirk_charge_limit,
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "AYANEO"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "AIR Pro"),
+ },
+ .driver_data = (void *)&quirk_charge_limit,
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "AYANEO"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "KUN"),
+ },
+ .driver_data = (void *)&quirk_charge_limit,
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_BOARD_VENDOR, "AYANEO"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "AYANEO 3"),
+ },
+ .driver_data = (void *)&quirk_ayaneo3,
+ },
+ {},
+};
+
+/* Callbacks for hwmon interface */
+static umode_t ayaneo_ec_hwmon_is_visible(const void *drvdata,
+ enum hwmon_sensor_types type, u32 attr,
+ int channel)
+{
+ switch (type) {
+ case hwmon_fan:
+ return 0444;
+ case hwmon_pwm:
+ return 0644;
+ default:
+ return 0;
+ }
+}
+
+static int ayaneo_ec_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+{
+ u8 tmp;
+ int ret;
+
+ switch (type) {
+ case hwmon_fan:
+ switch (attr) {
+ case hwmon_fan_input:
+ ret = ec_read(AYANEO_FAN_REG, &tmp);
+ if (ret)
+ return ret;
+ *val = tmp << 8;
+ ret = ec_read(AYANEO_FAN_REG + 1, &tmp);
+ if (ret)
+ return ret;
+ *val |= tmp;
+ return 0;
+ default:
+ break;
+ }
+ break;
+ case hwmon_pwm:
+ switch (attr) {
+ case hwmon_pwm_input:
+ ret = ec_read(AYANEO_PWM_REG, &tmp);
+ if (ret)
+ return ret;
+ if (tmp > 100)
+ return -EIO;
+ *val = (255 * tmp) / 100;
+ return 0;
+ case hwmon_pwm_enable:
+ ret = ec_read(AYANEO_PWM_ENABLE_REG, &tmp);
+ if (ret)
+ return ret;
+ if (tmp == AYANEO_PWM_MODE_MANUAL)
+ *val = 1;
+ else if (tmp == AYANEO_PWM_MODE_AUTO)
+ *val = 2;
+ else
+ return -EIO;
+ return 0;
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ return -EOPNOTSUPP;
+}
+
+static int ayaneo_ec_write(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long val)
+{
+ struct ayaneo_ec_platform_data *data = dev_get_drvdata(dev);
+ int ret;
+
+ guard(mutex)(&data->hwmon_lock);
+
+ switch (type) {
+ case hwmon_pwm:
+ switch (attr) {
+ case hwmon_pwm_enable:
+ data->restore_pwm = false;
+ switch (val) {
+ case 1:
+ return ec_write(AYANEO_PWM_ENABLE_REG,
+ AYANEO_PWM_MODE_MANUAL);
+ case 2:
+ return ec_write(AYANEO_PWM_ENABLE_REG,
+ AYANEO_PWM_MODE_AUTO);
+ default:
+ return -EINVAL;
+ }
+ case hwmon_pwm_input:
+ if (val < 0 || val > 255)
+ return -EINVAL;
+ if (data->restore_pwm) {
+ /*
+ * Defer restoring PWM control to after
+ * userspace resumes successfully
+ */
+ ret = ec_write(AYANEO_PWM_ENABLE_REG,
+ AYANEO_PWM_MODE_MANUAL);
+ if (ret)
+ return ret;
+ data->restore_pwm = false;
+ }
+ return ec_write(AYANEO_PWM_REG, (val * 100) / 255);
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ return -EOPNOTSUPP;
+}
+
+static const struct hwmon_ops ayaneo_ec_hwmon_ops = {
+ .is_visible = ayaneo_ec_hwmon_is_visible,
+ .read = ayaneo_ec_read,
+ .write = ayaneo_ec_write,
+};
+
+static const struct hwmon_channel_info *const ayaneo_ec_sensors[] = {
+ HWMON_CHANNEL_INFO(fan, HWMON_F_INPUT),
+ HWMON_CHANNEL_INFO(pwm, HWMON_PWM_INPUT | HWMON_PWM_ENABLE),
+ NULL,
+};
+
+static const struct hwmon_chip_info ayaneo_ec_chip_info = {
+ .ops = &ayaneo_ec_hwmon_ops,
+ .info = ayaneo_ec_sensors,
+};
+
+static int ayaneo_psy_ext_get_prop(struct power_supply *psy,
+ const struct power_supply_ext *ext,
+ void *data,
+ enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ int ret;
+ u8 tmp;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_CHARGE_BEHAVIOUR:
+ ret = ec_read(AYANEO_CHARGE_REG, &tmp);
+ if (ret)
+ return ret;
+
+ if (tmp == AYANEO_CHARGE_VAL_INHIBIT)
+ val->intval = POWER_SUPPLY_CHARGE_BEHAVIOUR_INHIBIT_CHARGE;
+ else
+ val->intval = POWER_SUPPLY_CHARGE_BEHAVIOUR_AUTO;
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ayaneo_psy_ext_set_prop(struct power_supply *psy,
+ const struct power_supply_ext *ext,
+ void *data,
+ enum power_supply_property psp,
+ const union power_supply_propval *val)
+{
+ u8 raw_val;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_CHARGE_BEHAVIOUR:
+ switch (val->intval) {
+ case POWER_SUPPLY_CHARGE_BEHAVIOUR_AUTO:
+ raw_val = AYANEO_CHARGE_VAL_AUTO;
+ break;
+ case POWER_SUPPLY_CHARGE_BEHAVIOUR_INHIBIT_CHARGE:
+ raw_val = AYANEO_CHARGE_VAL_INHIBIT;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return ec_write(AYANEO_CHARGE_REG, raw_val);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ayaneo_psy_prop_is_writeable(struct power_supply *psy,
+ const struct power_supply_ext *ext,
+ void *data,
+ enum power_supply_property psp)
+{
+ return true;
+}
+
+static const enum power_supply_property ayaneo_psy_ext_props[] = {
+ POWER_SUPPLY_PROP_CHARGE_BEHAVIOUR,
+};
+
+static const struct power_supply_ext ayaneo_psy_ext = {
+ .name = "ayaneo-charge-control",
+ .properties = ayaneo_psy_ext_props,
+ .num_properties = ARRAY_SIZE(ayaneo_psy_ext_props),
+ .charge_behaviours = EC_CHARGE_CONTROL_BEHAVIOURS,
+ .get_property = ayaneo_psy_ext_get_prop,
+ .set_property = ayaneo_psy_ext_set_prop,
+ .property_is_writeable = ayaneo_psy_prop_is_writeable,
+};
+
+static int ayaneo_add_battery(struct power_supply *battery,
+ struct acpi_battery_hook *hook)
+{
+ struct ayaneo_ec_platform_data *data =
+ container_of(hook, struct ayaneo_ec_platform_data, battery_hook);
+
+ return power_supply_register_extension(battery, &ayaneo_psy_ext,
+ &data->pdev->dev, NULL);
+}
+
+static int ayaneo_remove_battery(struct power_supply *battery,
+ struct acpi_battery_hook *hook)
+{
+ power_supply_unregister_extension(battery, &ayaneo_psy_ext);
+ return 0;
+}
+
+static ssize_t controller_power_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ bool value;
+ int ret;
+
+ ret = kstrtobool(buf, &value);
+ if (ret)
+ return ret;
+
+ ret = ec_write(AYANEO_POWER_REG, value ? AYANEO_POWER_ON : AYANEO_POWER_OFF);
+ if (ret)
+ return ret;
+
+ return count;
+}
+
+static ssize_t controller_power_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ int ret;
+ u8 val;
+
+ ret = ec_read(AYANEO_POWER_REG, &val);
+ if (ret)
+ return ret;
+
+ return sysfs_emit(buf, "%d\n", val == AYANEO_POWER_ON);
+}
+
+static DEVICE_ATTR_RW(controller_power);
+
+static ssize_t controller_modules_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ u8 unconnected_modules;
+ char *out;
+ int ret;
+
+ ret = ec_read(AYANEO_MODULE_REG, &unconnected_modules);
+ if (ret)
+ return ret;
+
+ switch (~unconnected_modules & AYANEO_MODULE_MASK) {
+ case AYANEO_MODULE_LEFT | AYANEO_MODULE_RIGHT:
+ out = "both";
+ break;
+ case AYANEO_MODULE_LEFT:
+ out = "left";
+ break;
+ case AYANEO_MODULE_RIGHT:
+ out = "right";
+ break;
+ default:
+ out = "none";
+ break;
+ }
+
+ return sysfs_emit(buf, "%s\n", out);
+}
+
+static DEVICE_ATTR_RO(controller_modules);
+
+static struct attribute *aya_mm_attrs[] = {
+ &dev_attr_controller_power.attr,
+ &dev_attr_controller_modules.attr,
+ NULL
+};
+
+static umode_t aya_mm_is_visible(struct kobject *kobj,
+ struct attribute *attr, int n)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct platform_device *pdev = to_platform_device(dev);
+ struct ayaneo_ec_platform_data *data = platform_get_drvdata(pdev);
+
+ if (data->quirks->has_magic_modules)
+ return attr->mode;
+ return 0;
+}
+
+static const struct attribute_group aya_mm_attribute_group = {
+ .is_visible = aya_mm_is_visible,
+ .attrs = aya_mm_attrs,
+};
+
+static const struct attribute_group *ayaneo_ec_groups[] = {
+ &aya_mm_attribute_group,
+ NULL
+};
+
+static int ayaneo_ec_probe(struct platform_device *pdev)
+{
+ const struct dmi_system_id *dmi_entry;
+ struct ayaneo_ec_platform_data *data;
+ struct device *hwdev;
+ int ret;
+
+ dmi_entry = dmi_first_match(dmi_table);
+ if (!dmi_entry)
+ return -ENODEV;
+
+ data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->pdev = pdev;
+ data->quirks = dmi_entry->driver_data;
+ ret = devm_mutex_init(&pdev->dev, &data->hwmon_lock);
+ if (ret)
+ return ret;
+ platform_set_drvdata(pdev, data);
+
+ if (data->quirks->has_fan_control) {
+ hwdev = devm_hwmon_device_register_with_info(&pdev->dev,
+ "ayaneo_ec", data, &ayaneo_ec_chip_info, NULL);
+ if (IS_ERR(hwdev))
+ return PTR_ERR(hwdev);
+ }
+
+ if (data->quirks->has_charge_control) {
+ data->battery_hook.add_battery = ayaneo_add_battery;
+ data->battery_hook.remove_battery = ayaneo_remove_battery;
+ data->battery_hook.name = "Ayaneo Battery";
+ ret = devm_battery_hook_register(&pdev->dev, &data->battery_hook);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ayaneo_freeze(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct ayaneo_ec_platform_data *data = platform_get_drvdata(pdev);
+ int ret;
+ u8 tmp;
+
+ if (data->quirks->has_charge_control) {
+ ret = ec_read(AYANEO_CHARGE_REG, &tmp);
+ if (ret)
+ return ret;
+
+ data->restore_charge_limit = tmp == AYANEO_CHARGE_VAL_INHIBIT;
+ }
+
+ if (data->quirks->has_fan_control) {
+ ret = ec_read(AYANEO_PWM_ENABLE_REG, &tmp);
+ if (ret)
+ return ret;
+
+ data->restore_pwm = tmp == AYANEO_PWM_MODE_MANUAL;
+
+ /*
+ * Release the fan when entering hibernation to avoid
+ * overheating if hibernation fails and hangs.
+ */
+ if (data->restore_pwm) {
+ ret = ec_write(AYANEO_PWM_ENABLE_REG, AYANEO_PWM_MODE_AUTO);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ayaneo_restore(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct ayaneo_ec_platform_data *data = platform_get_drvdata(pdev);
+ int ret;
+
+ if (data->quirks->has_charge_control && data->restore_charge_limit) {
+ ret = ec_write(AYANEO_CHARGE_REG, AYANEO_CHARGE_VAL_INHIBIT);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static const struct dev_pm_ops ayaneo_pm_ops = {
+ .freeze = ayaneo_freeze,
+ .restore = ayaneo_restore,
+};
+
+static struct platform_driver ayaneo_platform_driver = {
+ .driver = {
+ .name = "ayaneo-ec",
+ .dev_groups = ayaneo_ec_groups,
+ .pm = pm_sleep_ptr(&ayaneo_pm_ops),
+ },
+ .probe = ayaneo_ec_probe,
+};
+
+static struct platform_device *ayaneo_platform_device;
+
+static int __init ayaneo_ec_init(void)
+{
+ ayaneo_platform_device =
+ platform_create_bundle(&ayaneo_platform_driver,
+ ayaneo_ec_probe, NULL, 0, NULL, 0);
+
+ return PTR_ERR_OR_ZERO(ayaneo_platform_device);
+}
+
+static void __exit ayaneo_ec_exit(void)
+{
+ platform_device_unregister(ayaneo_platform_device);
+ platform_driver_unregister(&ayaneo_platform_driver);
+}
+
+MODULE_DEVICE_TABLE(dmi, dmi_table);
+
+module_init(ayaneo_ec_init);
+module_exit(ayaneo_ec_exit);
+
+MODULE_AUTHOR("Antheas Kapenekakis <lkml@antheas.dev>");
+MODULE_DESCRIPTION("Ayaneo Embedded Controller (EC) platform features");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/dell/alienware-wmi-wmax.c b/drivers/platform/x86/dell/alienware-wmi-wmax.c
index fadf7aac6779..1418bd326edf 100644
--- a/drivers/platform/x86/dell/alienware-wmi-wmax.c
+++ b/drivers/platform/x86/dell/alienware-wmi-wmax.c
@@ -235,11 +235,6 @@ enum AWCC_THERMAL_TABLES {
AWCC_THERMAL_TABLE_USTT = 0xA,
};
-enum AWCC_SPECIAL_THERMAL_CODES {
- AWCC_SPECIAL_PROFILE_CUSTOM = 0x00,
- AWCC_SPECIAL_PROFILE_GMODE = 0xAB,
-};
-
enum AWCC_TEMP_SENSOR_TYPES {
AWCC_TEMP_SENSOR_CPU = 0x01,
AWCC_TEMP_SENSOR_FRONT = 0x03,
@@ -266,17 +261,18 @@ enum AWCC_FAN_TYPES {
};
enum awcc_thermal_profile {
- AWCC_PROFILE_USTT_BALANCED,
- AWCC_PROFILE_USTT_BALANCED_PERFORMANCE,
- AWCC_PROFILE_USTT_COOL,
- AWCC_PROFILE_USTT_QUIET,
- AWCC_PROFILE_USTT_PERFORMANCE,
- AWCC_PROFILE_USTT_LOW_POWER,
- AWCC_PROFILE_LEGACY_QUIET,
- AWCC_PROFILE_LEGACY_BALANCED,
- AWCC_PROFILE_LEGACY_BALANCED_PERFORMANCE,
- AWCC_PROFILE_LEGACY_PERFORMANCE,
- AWCC_PROFILE_LAST,
+ AWCC_PROFILE_SPECIAL_CUSTOM = 0x00,
+ AWCC_PROFILE_LEGACY_QUIET = 0x96,
+ AWCC_PROFILE_LEGACY_BALANCED = 0x97,
+ AWCC_PROFILE_LEGACY_BALANCED_PERFORMANCE = 0x98,
+ AWCC_PROFILE_LEGACY_PERFORMANCE = 0x99,
+ AWCC_PROFILE_USTT_BALANCED = 0xA0,
+ AWCC_PROFILE_USTT_BALANCED_PERFORMANCE = 0xA1,
+ AWCC_PROFILE_USTT_COOL = 0xA2,
+ AWCC_PROFILE_USTT_QUIET = 0xA3,
+ AWCC_PROFILE_USTT_PERFORMANCE = 0xA4,
+ AWCC_PROFILE_USTT_LOW_POWER = 0xA5,
+ AWCC_PROFILE_SPECIAL_GMODE = 0xAB,
};
struct wmax_led_args {
@@ -332,19 +328,6 @@ struct awcc_priv {
u32 gpio_count;
};
-static const enum platform_profile_option awcc_mode_to_platform_profile[AWCC_PROFILE_LAST] = {
- [AWCC_PROFILE_USTT_BALANCED] = PLATFORM_PROFILE_BALANCED,
- [AWCC_PROFILE_USTT_BALANCED_PERFORMANCE] = PLATFORM_PROFILE_BALANCED_PERFORMANCE,
- [AWCC_PROFILE_USTT_COOL] = PLATFORM_PROFILE_COOL,
- [AWCC_PROFILE_USTT_QUIET] = PLATFORM_PROFILE_QUIET,
- [AWCC_PROFILE_USTT_PERFORMANCE] = PLATFORM_PROFILE_PERFORMANCE,
- [AWCC_PROFILE_USTT_LOW_POWER] = PLATFORM_PROFILE_LOW_POWER,
- [AWCC_PROFILE_LEGACY_QUIET] = PLATFORM_PROFILE_QUIET,
- [AWCC_PROFILE_LEGACY_BALANCED] = PLATFORM_PROFILE_BALANCED,
- [AWCC_PROFILE_LEGACY_BALANCED_PERFORMANCE] = PLATFORM_PROFILE_BALANCED_PERFORMANCE,
- [AWCC_PROFILE_LEGACY_PERFORMANCE] = PLATFORM_PROFILE_PERFORMANCE,
-};
-
static struct awcc_quirks *awcc;
/*
@@ -562,21 +545,41 @@ const struct attribute_group wmax_deepsleep_attribute_group = {
/*
* AWCC Helpers
*/
-static bool is_awcc_thermal_profile_id(u8 code)
+static int awcc_profile_to_pprof(enum awcc_thermal_profile profile,
+ enum platform_profile_option *pprof)
{
- u8 table = FIELD_GET(AWCC_THERMAL_TABLE_MASK, code);
- u8 mode = FIELD_GET(AWCC_THERMAL_MODE_MASK, code);
-
- if (mode >= AWCC_PROFILE_LAST)
- return false;
-
- if (table == AWCC_THERMAL_TABLE_LEGACY && mode >= AWCC_PROFILE_LEGACY_QUIET)
- return true;
-
- if (table == AWCC_THERMAL_TABLE_USTT && mode <= AWCC_PROFILE_USTT_LOW_POWER)
- return true;
+ switch (profile) {
+ case AWCC_PROFILE_SPECIAL_CUSTOM:
+ *pprof = PLATFORM_PROFILE_CUSTOM;
+ break;
+ case AWCC_PROFILE_LEGACY_QUIET:
+ case AWCC_PROFILE_USTT_QUIET:
+ *pprof = PLATFORM_PROFILE_QUIET;
+ break;
+ case AWCC_PROFILE_LEGACY_BALANCED:
+ case AWCC_PROFILE_USTT_BALANCED:
+ *pprof = PLATFORM_PROFILE_BALANCED;
+ break;
+ case AWCC_PROFILE_LEGACY_BALANCED_PERFORMANCE:
+ case AWCC_PROFILE_USTT_BALANCED_PERFORMANCE:
+ *pprof = PLATFORM_PROFILE_BALANCED_PERFORMANCE;
+ break;
+ case AWCC_PROFILE_LEGACY_PERFORMANCE:
+ case AWCC_PROFILE_USTT_PERFORMANCE:
+ case AWCC_PROFILE_SPECIAL_GMODE:
+ *pprof = PLATFORM_PROFILE_PERFORMANCE;
+ break;
+ case AWCC_PROFILE_USTT_COOL:
+ *pprof = PLATFORM_PROFILE_COOL;
+ break;
+ case AWCC_PROFILE_USTT_LOW_POWER:
+ *pprof = PLATFORM_PROFILE_LOW_POWER;
+ break;
+ default:
+ return -EINVAL;
+ }
- return false;
+ return 0;
}
static int awcc_wmi_command(struct wmi_device *wdev, u32 method_id,
@@ -1225,24 +1228,7 @@ static int awcc_platform_profile_get(struct device *dev,
if (ret)
return ret;
- switch (out_data) {
- case AWCC_SPECIAL_PROFILE_CUSTOM:
- *profile = PLATFORM_PROFILE_CUSTOM;
- return 0;
- case AWCC_SPECIAL_PROFILE_GMODE:
- *profile = PLATFORM_PROFILE_PERFORMANCE;
- return 0;
- default:
- break;
- }
-
- if (!is_awcc_thermal_profile_id(out_data))
- return -ENODATA;
-
- out_data = FIELD_GET(AWCC_THERMAL_MODE_MASK, out_data);
- *profile = awcc_mode_to_platform_profile[out_data];
-
- return 0;
+ return awcc_profile_to_pprof(out_data, profile);
}
static int awcc_platform_profile_set(struct device *dev,
@@ -1279,7 +1265,6 @@ static int awcc_platform_profile_probe(void *drvdata, unsigned long *choices)
{
enum platform_profile_option profile;
struct awcc_priv *priv = drvdata;
- enum awcc_thermal_profile mode;
u8 id, offset = 0;
int ret;
@@ -1301,15 +1286,20 @@ static int awcc_platform_profile_probe(void *drvdata, unsigned long *choices)
if (ret)
return ret;
- if (!is_awcc_thermal_profile_id(id)) {
+ /*
+ * G-Mode profile ID is not listed consistently across modeles
+ * that support it, therefore we handle it through quirks.
+ */
+ if (id == AWCC_PROFILE_SPECIAL_GMODE)
+ continue;
+
+ ret = awcc_profile_to_pprof(id, &profile);
+ if (ret) {
dev_dbg(&priv->wdev->dev, "Unmapped thermal profile ID 0x%02x\n", id);
continue;
}
- mode = FIELD_GET(AWCC_THERMAL_MODE_MASK, id);
- profile = awcc_mode_to_platform_profile[mode];
priv->supported_profiles[profile] = id;
-
__set_bit(profile, choices);
}
@@ -1318,14 +1308,14 @@ static int awcc_platform_profile_probe(void *drvdata, unsigned long *choices)
if (awcc->gmode) {
priv->supported_profiles[PLATFORM_PROFILE_PERFORMANCE] =
- AWCC_SPECIAL_PROFILE_GMODE;
+ AWCC_PROFILE_SPECIAL_GMODE;
__set_bit(PLATFORM_PROFILE_PERFORMANCE, choices);
}
/* Every model supports the "custom" profile */
priv->supported_profiles[PLATFORM_PROFILE_CUSTOM] =
- AWCC_SPECIAL_PROFILE_CUSTOM;
+ AWCC_PROFILE_SPECIAL_CUSTOM;
__set_bit(PLATFORM_PROFILE_CUSTOM, choices);
diff --git a/drivers/platform/x86/gpd-pocket-fan.c b/drivers/platform/x86/gpd-pocket-fan.c
index 7a20f68ae206..c9236738f896 100644
--- a/drivers/platform/x86/gpd-pocket-fan.c
+++ b/drivers/platform/x86/gpd-pocket-fan.c
@@ -112,14 +112,14 @@ set_speed:
gpd_pocket_fan_set_speed(fan, speed);
/* When mostly idle (low temp/speed), slow down the poll interval. */
- queue_delayed_work(system_wq, &fan->work,
+ queue_delayed_work(system_percpu_wq, &fan->work,
msecs_to_jiffies(4000 / (speed + 1)));
}
static void gpd_pocket_fan_force_update(struct gpd_pocket_fan_data *fan)
{
fan->last_speed = -1;
- mod_delayed_work(system_wq, &fan->work, 0);
+ mod_delayed_work(system_percpu_wq, &fan->work, 0);
}
static int gpd_pocket_fan_probe(struct platform_device *pdev)
diff --git a/drivers/platform/x86/hp/hp-wmi.c b/drivers/platform/x86/hp/hp-wmi.c
index ad9d9f97960f..f4ea1ea05997 100644
--- a/drivers/platform/x86/hp/hp-wmi.c
+++ b/drivers/platform/x86/hp/hp-wmi.c
@@ -63,12 +63,16 @@ MODULE_ALIAS("wmi:5FB7F034-2C63-45E9-BE91-3D44E2C707E4");
* contains "PerformanceControl".
*/
static const char * const omen_thermal_profile_boards[] = {
- "84DA", "84DB", "84DC", "8574", "8575", "860A", "87B5", "8572", "8573",
- "8600", "8601", "8602", "8605", "8606", "8607", "8746", "8747", "8749",
- "874A", "8603", "8604", "8748", "886B", "886C", "878A", "878B", "878C",
- "88C8", "88CB", "8786", "8787", "8788", "88D1", "88D2", "88F4", "88FD",
- "88F5", "88F6", "88F7", "88FE", "88FF", "8900", "8901", "8902", "8912",
- "8917", "8918", "8949", "894A", "89EB", "8BAD", "8A42", "8A15"
+ "84DA", "84DB", "84DC",
+ "8572", "8573", "8574", "8575",
+ "8600", "8601", "8602", "8603", "8604", "8605", "8606", "8607", "860A",
+ "8746", "8747", "8748", "8749", "874A", "8786", "8787", "8788", "878A",
+ "878B", "878C", "87B5",
+ "886B", "886C", "88C8", "88CB", "88D1", "88D2", "88F4", "88F5", "88F6",
+ "88F7", "88FD", "88FE", "88FF",
+ "8900", "8901", "8902", "8912", "8917", "8918", "8949", "894A", "89EB",
+ "8A15", "8A42",
+ "8BAD",
};
/* DMI Board names of Omen laptops that are specifically set to be thermal
@@ -76,7 +80,8 @@ static const char * const omen_thermal_profile_boards[] = {
* the get system design information WMI call returns
*/
static const char * const omen_thermal_profile_force_v0_boards[] = {
- "8607", "8746", "8747", "8749", "874A", "8748"
+ "8607",
+ "8746", "8747", "8748", "8749", "874A",
};
/* DMI board names of Omen laptops that have a thermal profile timer which will
@@ -84,12 +89,13 @@ static const char * const omen_thermal_profile_force_v0_boards[] = {
* "balanced" when reaching zero.
*/
static const char * const omen_timed_thermal_profile_boards[] = {
- "8BAD", "8A42", "8A15"
+ "8A15", "8A42",
+ "8BAD",
};
/* DMI Board names of Victus 16-d1xxx laptops */
static const char * const victus_thermal_profile_boards[] = {
- "8A25"
+ "8A25",
};
/* DMI Board names of Victus 16-r and Victus 16-s laptops */
diff --git a/drivers/platform/x86/intel/hid.c b/drivers/platform/x86/intel/hid.c
index 9c07a7faf18f..560cc063198e 100644
--- a/drivers/platform/x86/intel/hid.c
+++ b/drivers/platform/x86/intel/hid.c
@@ -177,6 +177,18 @@ static const struct dmi_system_id dmi_vgbs_allow_list[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "HP Elite Dragonfly G2 Notebook PC"),
},
},
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Dell Pro Rugged 10 Tablet RA00260"),
+ },
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Dell Pro Rugged 12 Tablet RA02260"),
+ },
+ },
{ }
};
diff --git a/drivers/platform/x86/intel/pmc/arl.c b/drivers/platform/x86/intel/pmc/arl.c
index 17ad87b392ab..eb23bc68340a 100644
--- a/drivers/platform/x86/intel/pmc/arl.c
+++ b/drivers/platform/x86/intel/pmc/arl.c
@@ -281,6 +281,7 @@ static const struct pmc_reg_map arl_socs_reg_map = {
.etr3_offset = ETR3_OFFSET,
.pson_residency_offset = TGL_PSON_RESIDENCY_OFFSET,
.pson_residency_counter_step = TGL_PSON_RES_COUNTER_STEP,
+ .lpm_req_guid = SOCS_LPM_REQ_GUID,
};
static const struct pmc_bit_map arl_pchs_ltr_show_map[] = {
@@ -648,26 +649,23 @@ static const struct pmc_reg_map arl_pchs_reg_map = {
.lpm_num_maps = ADL_LPM_NUM_MAPS,
.lpm_reg_index = ARL_LPM_REG_INDEX,
.etr3_offset = ETR3_OFFSET,
+ .lpm_req_guid = PCHS_LPM_REQ_GUID,
};
static struct pmc_info arl_pmc_info_list[] = {
{
- .guid = IOEP_LPM_REQ_GUID,
.devid = PMC_DEVID_ARL_IOEP,
.map = &mtl_ioep_reg_map,
},
{
- .guid = SOCS_LPM_REQ_GUID,
.devid = PMC_DEVID_ARL_SOCS,
.map = &arl_socs_reg_map,
},
{
- .guid = PCHS_LPM_REQ_GUID,
.devid = PMC_DEVID_ARL_PCHS,
.map = &arl_pchs_reg_map,
},
{
- .guid = SOCM_LPM_REQ_GUID,
.devid = PMC_DEVID_ARL_SOCM,
.map = &mtl_socm_reg_map,
},
@@ -720,9 +718,10 @@ static int arl_h_core_init(struct pmc_dev *pmcdev, struct pmc_dev_info *pmc_dev_
return generic_core_init(pmcdev, pmc_dev_info);
}
+static u32 ARL_PMT_DMU_GUIDS[] = {ARL_PMT_DMU_GUID, 0x0};
struct pmc_dev_info arl_pmc_dev = {
.pci_func = 0,
- .dmu_guid = ARL_PMT_DMU_GUID,
+ .dmu_guids = ARL_PMT_DMU_GUIDS,
.regmap_list = arl_pmc_info_list,
.map = &arl_socs_reg_map,
.sub_req_show = &pmc_core_substate_req_regs_fops,
@@ -732,9 +731,10 @@ struct pmc_dev_info arl_pmc_dev = {
.sub_req = pmc_core_pmt_get_lpm_req,
};
+static u32 ARL_H_PMT_DMU_GUIDS[] = {ARL_PMT_DMU_GUID, ARL_H_PMT_DMU_GUID, 0x0};
struct pmc_dev_info arl_h_pmc_dev = {
.pci_func = 2,
- .dmu_guid = ARL_PMT_DMU_GUID,
+ .dmu_guids = ARL_H_PMT_DMU_GUIDS,
.regmap_list = arl_pmc_info_list,
.map = &mtl_socm_reg_map,
.sub_req_show = &pmc_core_substate_req_regs_fops,
diff --git a/drivers/platform/x86/intel/pmc/core.c b/drivers/platform/x86/intel/pmc/core.c
index ac3d19ae8c56..7d7ae8a40b0e 100644
--- a/drivers/platform/x86/intel/pmc/core.c
+++ b/drivers/platform/x86/intel/pmc/core.c
@@ -20,6 +20,7 @@ enum header_type {
#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/dmi.h>
+#include <linux/err.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/pci.h>
@@ -311,20 +312,20 @@ static inline u8 pmc_core_reg_read_byte(struct pmc *pmc, int offset)
}
static void pmc_core_display_map(struct seq_file *s, int index, int idx, int ip,
- int pmc_index, u8 pf_reg, const struct pmc_bit_map **pf_map)
+ int pmc_idx, u8 pf_reg, const struct pmc_bit_map **pf_map)
{
seq_printf(s, "PMC%d:PCH IP: %-2d - %-32s\tState: %s\n",
- pmc_index, ip, pf_map[idx][index].name,
+ pmc_idx, ip, pf_map[idx][index].name,
pf_map[idx][index].bit_mask & pf_reg ? "Off" : "On");
}
static int pmc_core_ppfear_show(struct seq_file *s, void *unused)
{
struct pmc_dev *pmcdev = s->private;
- unsigned int i;
+ unsigned int pmc_idx;
- for (i = 0; i < ARRAY_SIZE(pmcdev->pmcs); ++i) {
- struct pmc *pmc = pmcdev->pmcs[i];
+ for (pmc_idx = 0; pmc_idx < ARRAY_SIZE(pmcdev->pmcs); ++pmc_idx) {
+ struct pmc *pmc = pmcdev->pmcs[pmc_idx];
const struct pmc_bit_map **maps;
u8 pf_regs[PPFEAR_MAX_NUM_ENTRIES];
unsigned int index, iter, idx, ip = 0;
@@ -342,7 +343,7 @@ static int pmc_core_ppfear_show(struct seq_file *s, void *unused)
for (idx = 0; maps[idx]; idx++) {
for (index = 0; maps[idx][index].name &&
index < pmc->map->ppfear_buckets * 8; ip++, index++)
- pmc_core_display_map(s, index, idx, ip, i,
+ pmc_core_display_map(s, index, idx, ip, pmc_idx,
pf_regs[index / 8], maps);
}
}
@@ -471,7 +472,7 @@ int pmc_core_send_ltr_ignore(struct pmc_dev *pmcdev, u32 value, int ignore)
struct pmc *pmc;
const struct pmc_reg_map *map;
u32 reg;
- unsigned int pmc_index;
+ unsigned int pmc_idx;
int ltr_index;
ltr_index = value;
@@ -479,8 +480,8 @@ int pmc_core_send_ltr_ignore(struct pmc_dev *pmcdev, u32 value, int ignore)
* is based on the contiguous indexes from ltr_show output.
* pmc index and ltr index needs to be calculated from it.
*/
- for (pmc_index = 0; pmc_index < ARRAY_SIZE(pmcdev->pmcs) && ltr_index >= 0; pmc_index++) {
- pmc = pmcdev->pmcs[pmc_index];
+ for (pmc_idx = 0; pmc_idx < ARRAY_SIZE(pmcdev->pmcs) && ltr_index >= 0; pmc_idx++) {
+ pmc = pmcdev->pmcs[pmc_idx];
if (!pmc)
continue;
@@ -497,10 +498,10 @@ int pmc_core_send_ltr_ignore(struct pmc_dev *pmcdev, u32 value, int ignore)
ltr_index = ltr_index - (map->ltr_ignore_max + 2) - 1;
}
- if (pmc_index >= ARRAY_SIZE(pmcdev->pmcs) || ltr_index < 0)
+ if (pmc_idx >= ARRAY_SIZE(pmcdev->pmcs) || ltr_index < 0)
return -EINVAL;
- pr_debug("ltr_ignore for pmc%d: ltr_index:%d\n", pmc_index, ltr_index);
+ pr_debug("ltr_ignore for pmc%d: ltr_index:%d\n", pmc_idx, ltr_index);
guard(mutex)(&pmcdev->lock);
@@ -635,14 +636,14 @@ static int pmc_core_ltr_show(struct seq_file *s, void *unused)
u64 decoded_snoop_ltr, decoded_non_snoop_ltr, val;
u32 ltr_raw_data, scale;
u16 snoop_ltr, nonsnoop_ltr;
- unsigned int i, index, ltr_index = 0;
+ unsigned int pmc_idx, index, ltr_index = 0;
- for (i = 0; i < ARRAY_SIZE(pmcdev->pmcs); ++i) {
+ for (pmc_idx = 0; pmc_idx < ARRAY_SIZE(pmcdev->pmcs); ++pmc_idx) {
struct pmc *pmc;
const struct pmc_bit_map *map;
u32 ltr_ign_reg;
- pmc = pmcdev->pmcs[i];
+ pmc = pmcdev->pmcs[pmc_idx];
if (!pmc)
continue;
@@ -676,7 +677,7 @@ static int pmc_core_ltr_show(struct seq_file *s, void *unused)
}
seq_printf(s, "%d\tPMC%d:%-32s\tLTR: RAW: 0x%-16x\tNon-Snoop(ns): %-16llu\tSnoop(ns): %-16llu\tLTR_IGNORE: %d\n",
- ltr_index, i, map[index].name, ltr_raw_data,
+ ltr_index, pmc_idx, map[index].name, ltr_raw_data,
decoded_non_snoop_ltr,
decoded_snoop_ltr, ltr_ign_data);
ltr_index++;
@@ -689,15 +690,15 @@ DEFINE_SHOW_ATTRIBUTE(pmc_core_ltr);
static int pmc_core_s0ix_blocker_show(struct seq_file *s, void *unused)
{
struct pmc_dev *pmcdev = s->private;
- unsigned int pmcidx;
+ unsigned int pmc_idx;
- for (pmcidx = 0; pmcidx < ARRAY_SIZE(pmcdev->pmcs); pmcidx++) {
+ for (pmc_idx = 0; pmc_idx < ARRAY_SIZE(pmcdev->pmcs); pmc_idx++) {
const struct pmc_bit_map **maps;
unsigned int arr_size, r_idx;
u32 offset, counter;
struct pmc *pmc;
- pmc = pmcdev->pmcs[pmcidx];
+ pmc = pmcdev->pmcs[pmc_idx];
if (!pmc)
continue;
maps = pmc->map->s0ix_blocker_maps;
@@ -711,7 +712,7 @@ static int pmc_core_s0ix_blocker_show(struct seq_file *s, void *unused)
if (!map->blk)
continue;
counter = pmc_core_reg_read(pmc, offset);
- seq_printf(s, "PMC%d:%-30s %-30d\n", pmcidx,
+ seq_printf(s, "PMC%d:%-30s %-30d\n", pmc_idx,
map->name, counter);
offset += map->blk * S0IX_BLK_SIZE;
}
@@ -723,13 +724,13 @@ DEFINE_SHOW_ATTRIBUTE(pmc_core_s0ix_blocker);
static void pmc_core_ltr_ignore_all(struct pmc_dev *pmcdev)
{
- unsigned int i;
+ unsigned int pmc_idx;
- for (i = 0; i < ARRAY_SIZE(pmcdev->pmcs); i++) {
+ for (pmc_idx = 0; pmc_idx < ARRAY_SIZE(pmcdev->pmcs); pmc_idx++) {
struct pmc *pmc;
u32 ltr_ign;
- pmc = pmcdev->pmcs[i];
+ pmc = pmcdev->pmcs[pmc_idx];
if (!pmc)
continue;
@@ -750,12 +751,12 @@ static void pmc_core_ltr_ignore_all(struct pmc_dev *pmcdev)
static void pmc_core_ltr_restore_all(struct pmc_dev *pmcdev)
{
- unsigned int i;
+ unsigned int pmc_idx;
- for (i = 0; i < ARRAY_SIZE(pmcdev->pmcs); i++) {
+ for (pmc_idx = 0; pmc_idx < ARRAY_SIZE(pmcdev->pmcs); pmc_idx++) {
struct pmc *pmc;
- pmc = pmcdev->pmcs[i];
+ pmc = pmcdev->pmcs[pmc_idx];
if (!pmc)
continue;
@@ -794,10 +795,10 @@ DEFINE_SHOW_ATTRIBUTE(pmc_core_substate_res);
static int pmc_core_substate_sts_regs_show(struct seq_file *s, void *unused)
{
struct pmc_dev *pmcdev = s->private;
- unsigned int i;
+ unsigned int pmc_idx;
- for (i = 0; i < ARRAY_SIZE(pmcdev->pmcs); ++i) {
- struct pmc *pmc = pmcdev->pmcs[i];
+ for (pmc_idx = 0; pmc_idx < ARRAY_SIZE(pmcdev->pmcs); ++pmc_idx) {
+ struct pmc *pmc = pmcdev->pmcs[pmc_idx];
const struct pmc_bit_map **maps;
u32 offset;
@@ -805,7 +806,7 @@ static int pmc_core_substate_sts_regs_show(struct seq_file *s, void *unused)
continue;
maps = pmc->map->lpm_sts;
offset = pmc->map->lpm_status_offset;
- pmc_core_lpm_display(pmc, NULL, s, offset, i, "STATUS", maps);
+ pmc_core_lpm_display(pmc, NULL, s, offset, pmc_idx, "STATUS", maps);
}
return 0;
@@ -815,10 +816,10 @@ DEFINE_SHOW_ATTRIBUTE(pmc_core_substate_sts_regs);
static int pmc_core_substate_l_sts_regs_show(struct seq_file *s, void *unused)
{
struct pmc_dev *pmcdev = s->private;
- unsigned int i;
+ unsigned int pmc_idx;
- for (i = 0; i < ARRAY_SIZE(pmcdev->pmcs); ++i) {
- struct pmc *pmc = pmcdev->pmcs[i];
+ for (pmc_idx = 0; pmc_idx < ARRAY_SIZE(pmcdev->pmcs); ++pmc_idx) {
+ struct pmc *pmc = pmcdev->pmcs[pmc_idx];
const struct pmc_bit_map **maps;
u32 offset;
@@ -826,7 +827,7 @@ static int pmc_core_substate_l_sts_regs_show(struct seq_file *s, void *unused)
continue;
maps = pmc->map->lpm_sts;
offset = pmc->map->lpm_live_status_offset;
- pmc_core_lpm_display(pmc, NULL, s, offset, i, "LIVE_STATUS", maps);
+ pmc_core_lpm_display(pmc, NULL, s, offset, pmc_idx, "LIVE_STATUS", maps);
}
return 0;
@@ -919,11 +920,11 @@ static int pmc_core_substate_req_regs_show(struct seq_file *s, void *unused)
u32 sts_offset;
u32 sts_offset_live;
u32 *lpm_req_regs;
- unsigned int mp, pmc_index;
+ unsigned int mp, pmc_idx;
int num_maps;
- for (pmc_index = 0; pmc_index < ARRAY_SIZE(pmcdev->pmcs); ++pmc_index) {
- struct pmc *pmc = pmcdev->pmcs[pmc_index];
+ for (pmc_idx = 0; pmc_idx < ARRAY_SIZE(pmcdev->pmcs); ++pmc_idx) {
+ struct pmc *pmc = pmcdev->pmcs[pmc_idx];
const struct pmc_bit_map **maps;
if (!pmc)
@@ -944,7 +945,7 @@ static int pmc_core_substate_req_regs_show(struct seq_file *s, void *unused)
continue;
/* Display the header */
- pmc_core_substate_req_header_show(s, pmc_index, HEADER_STATUS);
+ pmc_core_substate_req_header_show(s, pmc_idx, HEADER_STATUS);
/* Loop over maps */
for (mp = 0; mp < num_maps; mp++) {
@@ -982,7 +983,7 @@ static int pmc_core_substate_req_regs_show(struct seq_file *s, void *unused)
}
/* Display the element name in the first column */
- seq_printf(s, "pmc%d: %34s |", pmc_index, map[i].name);
+ seq_printf(s, "pmc%d: %34s |", pmc_idx, map[i].name);
/* Loop over the enabled states and display if required */
pmc_for_each_mode(mode, pmcdev) {
@@ -1281,7 +1282,20 @@ int get_primary_reg_base(struct pmc *pmc)
return 0;
}
-void pmc_core_punit_pmt_init(struct pmc_dev *pmcdev, u32 guid)
+static struct telem_endpoint *pmc_core_register_endpoint(struct pci_dev *pcidev, u32 *guids)
+{
+ struct telem_endpoint *ep;
+ unsigned int i;
+
+ for (i = 0; guids[i]; i++) {
+ ep = pmt_telem_find_and_register_endpoint(pcidev, guids[i], 0);
+ if (!IS_ERR(ep))
+ return ep;
+ }
+ return ERR_PTR(-ENODEV);
+}
+
+void pmc_core_punit_pmt_init(struct pmc_dev *pmcdev, u32 *guids)
{
struct telem_endpoint *ep;
struct pci_dev *pcidev;
@@ -1292,7 +1306,7 @@ void pmc_core_punit_pmt_init(struct pmc_dev *pmcdev, u32 guid)
return;
}
- ep = pmt_telem_find_and_register_endpoint(pcidev, guid, 0);
+ ep = pmc_core_register_endpoint(pcidev, guids);
pci_dev_put(pcidev);
if (IS_ERR(ep)) {
dev_err(&pmcdev->pdev->dev,
@@ -1302,8 +1316,6 @@ void pmc_core_punit_pmt_init(struct pmc_dev *pmcdev, u32 guid)
}
pmcdev->punit_ep = ep;
-
- pmcdev->has_die_c6 = true;
pmcdev->die_c6_offset = MTL_PMT_DMU_DIE_C6_OFFSET;
}
@@ -1423,22 +1435,13 @@ static void pmc_core_dbgfs_register(struct pmc_dev *pmcdev, struct pmc_dev_info
pmcdev->dbgfs_dir, primary_pmc, &pmc_core_pson_residency);
}
- if (pmcdev->has_die_c6) {
+ if (pmcdev->punit_ep) {
debugfs_create_file("die_c6_us_show", 0444,
pmcdev->dbgfs_dir, pmcdev,
&pmc_core_die_c6_us_fops);
}
}
-static u32 pmc_core_find_guid(struct pmc_info *list, const struct pmc_reg_map *map)
-{
- for (; list->map; ++list)
- if (list->map == map)
- return list->guid;
-
- return 0;
-}
-
/*
* This function retrieves low power mode requirement data from PMC Low
* Power Mode (LPM) table.
@@ -1553,26 +1556,24 @@ static int pmc_core_get_telem_info(struct pmc_dev *pmcdev, struct pmc_dev_info *
{
struct pci_dev *pcidev __free(pci_dev_put) = NULL;
struct telem_endpoint *ep;
- unsigned int i;
- u32 guid;
+ unsigned int pmc_idx;
int ret;
pcidev = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(20, pmc_dev_info->pci_func));
if (!pcidev)
return -ENODEV;
- for (i = 0; i < ARRAY_SIZE(pmcdev->pmcs); ++i) {
+ for (pmc_idx = 0; pmc_idx < ARRAY_SIZE(pmcdev->pmcs); ++pmc_idx) {
struct pmc *pmc;
- pmc = pmcdev->pmcs[i];
+ pmc = pmcdev->pmcs[pmc_idx];
if (!pmc)
continue;
- guid = pmc_core_find_guid(pmcdev->regmap_list, pmc->map);
- if (!guid)
+ if (!pmc->map->lpm_req_guid)
return -ENXIO;
- ep = pmt_telem_find_and_register_endpoint(pcidev, guid, 0);
+ ep = pmt_telem_find_and_register_endpoint(pcidev, pmc->map->lpm_req_guid, 0);
if (IS_ERR(ep)) {
dev_dbg(&pmcdev->pdev->dev, "couldn't get telem endpoint %pe", ep);
return -EPROBE_DEFER;
@@ -1596,7 +1597,7 @@ static const struct pmc_reg_map *pmc_core_find_regmap(struct pmc_info *list, u16
return NULL;
}
-static int pmc_core_pmc_add(struct pmc_dev *pmcdev, unsigned int pmc_index)
+static int pmc_core_pmc_add(struct pmc_dev *pmcdev, unsigned int pmc_idx)
{
struct pmc_ssram_telemetry pmc_ssram_telemetry;
@@ -1604,7 +1605,7 @@ static int pmc_core_pmc_add(struct pmc_dev *pmcdev, unsigned int pmc_index)
struct pmc *pmc;
int ret;
- ret = pmc_ssram_telemetry_get_pmc_info(pmc_index, &pmc_ssram_telemetry);
+ ret = pmc_ssram_telemetry_get_pmc_info(pmc_idx, &pmc_ssram_telemetry);
if (ret)
return ret;
@@ -1612,7 +1613,7 @@ static int pmc_core_pmc_add(struct pmc_dev *pmcdev, unsigned int pmc_index)
if (!map)
return -ENODEV;
- pmc = pmcdev->pmcs[pmc_index];
+ pmc = pmcdev->pmcs[pmc_idx];
/* Memory for primary PMC has been allocated */
if (!pmc) {
pmc = devm_kzalloc(&pmcdev->pdev->dev, sizeof(*pmc), GFP_KERNEL);
@@ -1629,7 +1630,7 @@ static int pmc_core_pmc_add(struct pmc_dev *pmcdev, unsigned int pmc_index)
return -ENOMEM;
}
- pmcdev->pmcs[pmc_index] = pmc;
+ pmcdev->pmcs[pmc_idx] = pmc;
return 0;
}
@@ -1689,8 +1690,8 @@ int generic_core_init(struct pmc_dev *pmcdev, struct pmc_dev_info *pmc_dev_info)
}
pmc_core_get_low_power_modes(pmcdev);
- if (pmc_dev_info->dmu_guid)
- pmc_core_punit_pmt_init(pmcdev, pmc_dev_info->dmu_guid);
+ if (pmc_dev_info->dmu_guids)
+ pmc_core_punit_pmt_init(pmcdev, pmc_dev_info->dmu_guids);
if (ssram) {
ret = pmc_core_get_telem_info(pmcdev, pmc_dev_info);
@@ -1701,8 +1702,8 @@ int generic_core_init(struct pmc_dev *pmcdev, struct pmc_dev_info *pmc_dev_info)
return 0;
unmap_regbase:
- for (unsigned int i = 0; i < ARRAY_SIZE(pmcdev->pmcs); ++i) {
- struct pmc *pmc = pmcdev->pmcs[i];
+ for (unsigned int pmc_idx = 0; pmc_idx < ARRAY_SIZE(pmcdev->pmcs); ++pmc_idx) {
+ struct pmc *pmc = pmcdev->pmcs[pmc_idx];
if (pmc && pmc->regbase)
iounmap(pmc->regbase);
@@ -1795,10 +1796,10 @@ static void pmc_core_do_dmi_quirks(struct pmc *pmc)
static void pmc_core_clean_structure(struct platform_device *pdev)
{
struct pmc_dev *pmcdev = platform_get_drvdata(pdev);
- unsigned int i;
+ unsigned int pmc_idx;
- for (i = 0; i < ARRAY_SIZE(pmcdev->pmcs); ++i) {
- struct pmc *pmc = pmcdev->pmcs[i];
+ for (pmc_idx = 0; pmc_idx < ARRAY_SIZE(pmcdev->pmcs); ++pmc_idx) {
+ struct pmc *pmc = pmcdev->pmcs[pmc_idx];
if (pmc && pmc->regbase)
iounmap(pmc->regbase);
@@ -1958,7 +1959,7 @@ int pmc_core_resume_common(struct pmc_dev *pmcdev)
struct pmc *pmc = pmcdev->pmcs[PMC_IDX_MAIN];
const struct pmc_bit_map **maps = pmc->map->lpm_sts;
int offset = pmc->map->lpm_status_offset;
- unsigned int i;
+ unsigned int pmc_idx, i;
/* Check if the syspend used S0ix */
if (pm_suspend_via_firmware())
@@ -1996,13 +1997,13 @@ int pmc_core_resume_common(struct pmc_dev *pmcdev)
if (pmc->map->slps0_dbg_maps)
pmc_core_slps0_display(pmc, dev, NULL);
- for (i = 0; i < ARRAY_SIZE(pmcdev->pmcs); ++i) {
- struct pmc *pmc = pmcdev->pmcs[i];
+ for (pmc_idx = 0; pmc_idx < ARRAY_SIZE(pmcdev->pmcs); ++pmc_idx) {
+ struct pmc *pmc = pmcdev->pmcs[pmc_idx];
if (!pmc)
continue;
if (pmc->map->lpm_sts)
- pmc_core_lpm_display(pmc, dev, NULL, offset, i, "STATUS", maps);
+ pmc_core_lpm_display(pmc, dev, NULL, offset, pmc_idx, "STATUS", maps);
}
return 0;
diff --git a/drivers/platform/x86/intel/pmc/core.h b/drivers/platform/x86/intel/pmc/core.h
index f4dadb696a31..272fb4f57f34 100644
--- a/drivers/platform/x86/intel/pmc/core.h
+++ b/drivers/platform/x86/intel/pmc/core.h
@@ -282,7 +282,8 @@ enum ppfear_regs {
/* Die C6 from PUNIT telemetry */
#define MTL_PMT_DMU_DIE_C6_OFFSET 15
#define MTL_PMT_DMU_GUID 0x1A067102
-#define ARL_PMT_DMU_GUID 0x1A06A000
+#define ARL_PMT_DMU_GUID 0x1A06A102
+#define ARL_H_PMT_DMU_GUID 0x1A06A101
#define LNL_PMC_MMIO_REG_LEN 0x2708
#define LNL_PMC_LTR_OSSE 0x1B88
@@ -303,6 +304,8 @@ enum ppfear_regs {
/* Wildcat Lake */
#define WCL_PMC_LTR_RESERVED 0x1B64
#define WCL_PCD_PMC_MMIO_REG_LEN 0x3178
+#define WCL_NUM_S0IX_BLOCKER 94
+#define WCL_BLK_REQ_OFFSET 50
/* SSRAM PMC Device ID */
/* LNL */
@@ -355,6 +358,7 @@ struct pmc_bit_map {
* @s0ix_blocker_offset PWRMBASE offset to S0ix blocker counter
* @num_s0ix_blocker: Number of S0ix blockers
* @blocker_req_offset: Telemetry offset to S0ix blocker low power mode substate requirement table
+ * @lpm_req_guid: Telemetry GUID to read low power mode substate requirement table
*
* Each PCH has unique set of register offsets and bit indexes. This structure
* captures them to have a common implementation.
@@ -396,6 +400,8 @@ struct pmc_reg_map {
const u8 *lpm_reg_index;
const u32 pson_residency_offset;
const u32 pson_residency_counter_step;
+ /* GUID for telemetry regions */
+ const u32 lpm_req_guid;
};
/**
@@ -405,7 +411,6 @@ struct pmc_reg_map {
* specific attributes
*/
struct pmc_info {
- u32 guid;
u16 devid;
const struct pmc_reg_map *map;
};
@@ -465,7 +470,6 @@ struct pmc_dev {
u64 *pkgc_res_cnt;
u8 num_of_pkgc;
- bool has_die_c6;
u32 die_c6_offset;
struct telem_endpoint *punit_ep;
struct pmc_info *regmap_list;
@@ -481,7 +485,7 @@ enum pmc_index {
/**
* struct pmc_dev_info - Structure to keep PMC device info
* @pci_func: Function number of the primary PMC
- * @dmu_guid: Die Management Unit GUID
+ * @dmu_guids: List of Die Management Unit GUID
* @regmap_list: Pointer to a list of pmc_info structure that could be
* available for the platform. When set, this field implies
* SSRAM support.
@@ -495,7 +499,7 @@ enum pmc_index {
*/
struct pmc_dev_info {
u8 pci_func;
- u32 dmu_guid;
+ u32 *dmu_guids;
struct pmc_info *regmap_list;
const struct pmc_reg_map *map;
const struct file_operations *sub_req_show;
@@ -532,7 +536,7 @@ int pmc_core_send_ltr_ignore(struct pmc_dev *pmcdev, u32 value, int ignore);
int pmc_core_resume_common(struct pmc_dev *pmcdev);
int get_primary_reg_base(struct pmc *pmc);
void pmc_core_get_low_power_modes(struct pmc_dev *pmcdev);
-void pmc_core_punit_pmt_init(struct pmc_dev *pmcdev, u32 guid);
+void pmc_core_punit_pmt_init(struct pmc_dev *pmcdev, u32 *guids);
void pmc_core_set_device_d3(unsigned int device);
int generic_core_init(struct pmc_dev *pmcdev, struct pmc_dev_info *pmc_dev_info);
diff --git a/drivers/platform/x86/intel/pmc/lnl.c b/drivers/platform/x86/intel/pmc/lnl.c
index 6fa027e7071f..1cd81ee54dcf 100644
--- a/drivers/platform/x86/intel/pmc/lnl.c
+++ b/drivers/platform/x86/intel/pmc/lnl.c
@@ -533,11 +533,11 @@ static const struct pmc_reg_map lnl_socm_reg_map = {
.s0ix_blocker_maps = lnl_blk_maps,
.s0ix_blocker_offset = LNL_S0IX_BLOCKER_OFFSET,
.lpm_reg_index = LNL_LPM_REG_INDEX,
+ .lpm_req_guid = SOCM_LPM_REQ_GUID,
};
static struct pmc_info lnl_pmc_info_list[] = {
{
- .guid = SOCM_LPM_REQ_GUID,
.devid = PMC_DEVID_LNL_SOCM,
.map = &lnl_socm_reg_map,
},
diff --git a/drivers/platform/x86/intel/pmc/mtl.c b/drivers/platform/x86/intel/pmc/mtl.c
index 0b87e10f864e..57508cbf9cd4 100644
--- a/drivers/platform/x86/intel/pmc/mtl.c
+++ b/drivers/platform/x86/intel/pmc/mtl.c
@@ -473,6 +473,7 @@ const struct pmc_reg_map mtl_socm_reg_map = {
.lpm_status_offset = MTL_LPM_STATUS_OFFSET,
.lpm_live_status_offset = MTL_LPM_LIVE_STATUS_OFFSET,
.lpm_reg_index = MTL_LPM_REG_INDEX,
+ .lpm_req_guid = SOCP_LPM_REQ_GUID,
};
static const struct pmc_bit_map mtl_ioep_pfear_map[] = {
@@ -797,6 +798,7 @@ const struct pmc_reg_map mtl_ioep_reg_map = {
.lpm_en_offset = MTL_LPM_EN_OFFSET,
.lpm_sts_latch_en_offset = MTL_LPM_STATUS_LATCH_EN_OFFSET,
.lpm_reg_index = MTL_LPM_REG_INDEX,
+ .lpm_req_guid = IOEP_LPM_REQ_GUID,
};
static const struct pmc_bit_map mtl_ioem_pfear_map[] = {
@@ -944,21 +946,19 @@ static const struct pmc_reg_map mtl_ioem_reg_map = {
.lpm_res_counter_step_x2 = TGL_PMC_LPM_RES_COUNTER_STEP_X2,
.lpm_residency_offset = MTL_LPM_RESIDENCY_OFFSET,
.lpm_reg_index = MTL_LPM_REG_INDEX,
+ .lpm_req_guid = IOEM_LPM_REQ_GUID,
};
static struct pmc_info mtl_pmc_info_list[] = {
{
- .guid = SOCP_LPM_REQ_GUID,
.devid = PMC_DEVID_MTL_SOCM,
.map = &mtl_socm_reg_map,
},
{
- .guid = IOEP_LPM_REQ_GUID,
.devid = PMC_DEVID_MTL_IOEP,
.map = &mtl_ioep_reg_map,
},
{
- .guid = IOEM_LPM_REQ_GUID,
.devid = PMC_DEVID_MTL_IOEM,
.map = &mtl_ioem_reg_map
},
@@ -992,9 +992,10 @@ static int mtl_core_init(struct pmc_dev *pmcdev, struct pmc_dev_info *pmc_dev_in
return generic_core_init(pmcdev, pmc_dev_info);
}
+static u32 MTL_PMT_DMU_GUIDS[] = {MTL_PMT_DMU_GUID, 0x0};
struct pmc_dev_info mtl_pmc_dev = {
.pci_func = 2,
- .dmu_guid = MTL_PMT_DMU_GUID,
+ .dmu_guids = MTL_PMT_DMU_GUIDS,
.regmap_list = mtl_pmc_info_list,
.map = &mtl_socm_reg_map,
.sub_req_show = &pmc_core_substate_req_regs_fops,
diff --git a/drivers/platform/x86/intel/pmc/ptl.c b/drivers/platform/x86/intel/pmc/ptl.c
index 1b35b84e06fa..1f48e2bbc699 100644
--- a/drivers/platform/x86/intel/pmc/ptl.c
+++ b/drivers/platform/x86/intel/pmc/ptl.c
@@ -528,16 +528,15 @@ static const struct pmc_reg_map ptl_pcdp_reg_map = {
.s0ix_blocker_offset = LNL_S0IX_BLOCKER_OFFSET,
.num_s0ix_blocker = PTL_NUM_S0IX_BLOCKER,
.blocker_req_offset = PTL_BLK_REQ_OFFSET,
+ .lpm_req_guid = PCDP_LPM_REQ_GUID,
};
static struct pmc_info ptl_pmc_info_list[] = {
{
- .guid = PCDP_LPM_REQ_GUID,
.devid = PMC_DEVID_PTL_PCDH,
.map = &ptl_pcdp_reg_map,
},
{
- .guid = PCDP_LPM_REQ_GUID,
.devid = PMC_DEVID_PTL_PCDP,
.map = &ptl_pcdp_reg_map,
},
diff --git a/drivers/platform/x86/intel/pmc/wcl.c b/drivers/platform/x86/intel/pmc/wcl.c
index 85e90a639e65..a45707e6364f 100644
--- a/drivers/platform/x86/intel/pmc/wcl.c
+++ b/drivers/platform/x86/intel/pmc/wcl.c
@@ -11,6 +11,9 @@
#include "core.h"
+/* PMC SSRAM PMT Telemetry GUIDS */
+#define PCDN_LPM_REQ_GUID 0x33747648
+
static const struct pmc_bit_map wcl_pcdn_pfear_map[] = {
{"PMC_0", BIT(0)},
{"FUSE_OSSE", BIT(1)},
@@ -453,6 +456,17 @@ static const struct pmc_reg_map wcl_pcdn_reg_map = {
.lpm_live_status_offset = MTL_LPM_LIVE_STATUS_OFFSET,
.s0ix_blocker_maps = wcl_pcdn_blk_maps,
.s0ix_blocker_offset = LNL_S0IX_BLOCKER_OFFSET,
+ .num_s0ix_blocker = WCL_NUM_S0IX_BLOCKER,
+ .blocker_req_offset = WCL_BLK_REQ_OFFSET,
+ .lpm_req_guid = PCDN_LPM_REQ_GUID,
+};
+
+static struct pmc_info wcl_pmc_info_list[] = {
+ {
+ .devid = PMC_DEVID_WCL_PCDN,
+ .map = &wcl_pcdn_reg_map,
+ },
+ {}
};
#define WCL_NPU_PCI_DEV 0xfd3e
@@ -479,8 +493,12 @@ static int wcl_core_init(struct pmc_dev *pmcdev, struct pmc_dev_info *pmc_dev_in
}
struct pmc_dev_info wcl_pmc_dev = {
+ .pci_func = 2,
+ .regmap_list = wcl_pmc_info_list,
.map = &wcl_pcdn_reg_map,
+ .sub_req_show = &pmc_core_substate_blk_req_fops,
.suspend = cnl_suspend,
.resume = wcl_resume,
.init = wcl_core_init,
+ .sub_req = pmc_core_pmt_get_blk_sub_req,
};
diff --git a/drivers/platform/x86/intel/vsec.c b/drivers/platform/x86/intel/vsec.c
index f66f0ce8559b..ecfc7703f201 100644
--- a/drivers/platform/x86/intel/vsec.c
+++ b/drivers/platform/x86/intel/vsec.c
@@ -765,6 +765,7 @@ static const struct intel_vsec_platform_info lnl_info = {
#define PCI_DEVICE_ID_INTEL_VSEC_TGL 0x9a0d
#define PCI_DEVICE_ID_INTEL_VSEC_LNL_M 0x647d
#define PCI_DEVICE_ID_INTEL_VSEC_PTL 0xb07d
+#define PCI_DEVICE_ID_INTEL_VSEC_WCL 0xfd7d
static const struct pci_device_id intel_vsec_pci_ids[] = {
{ PCI_DEVICE_DATA(INTEL, VSEC_ADL, &tgl_info) },
{ PCI_DEVICE_DATA(INTEL, VSEC_DG1, &dg1_info) },
@@ -776,6 +777,7 @@ static const struct pci_device_id intel_vsec_pci_ids[] = {
{ PCI_DEVICE_DATA(INTEL, VSEC_TGL, &tgl_info) },
{ PCI_DEVICE_DATA(INTEL, VSEC_LNL_M, &lnl_info) },
{ PCI_DEVICE_DATA(INTEL, VSEC_PTL, &mtl_info) },
+ { PCI_DEVICE_DATA(INTEL, VSEC_WCL, &mtl_info) },
{ }
};
MODULE_DEVICE_TABLE(pci, intel_vsec_pci_ids);
diff --git a/drivers/platform/x86/lenovo/ideapad-laptop.c b/drivers/platform/x86/lenovo/ideapad-laptop.c
index fcebfbaf0460..5171a077f62c 100644
--- a/drivers/platform/x86/lenovo/ideapad-laptop.c
+++ b/drivers/platform/x86/lenovo/ideapad-laptop.c
@@ -31,6 +31,7 @@
#include <linux/power_supply.h>
#include <linux/rfkill.h>
#include <linux/seq_file.h>
+#include <linux/string_choices.h>
#include <linux/sysfs.h>
#include <linux/types.h>
#include <linux/wmi.h>
@@ -62,13 +63,27 @@ enum {
CFG_OSD_CAM_BIT = 31,
};
+/*
+ * There are two charge modes supported by the GBMD/SBMC interface:
+ * - "Rapid Charge": increase power to speed up charging
+ * - "Conservation Mode": stop charging at 60-80% (depends on model)
+ *
+ * The interface doesn't prohibit enabling both modes at the same time.
+ * However, doing so is essentially meaningless, and the manufacturer utilities
+ * on Windows always make them mutually exclusive.
+ */
+
enum {
+ GBMD_RAPID_CHARGE_STATE_BIT = 2,
GBMD_CONSERVATION_STATE_BIT = 5,
+ GBMD_RAPID_CHARGE_SUPPORTED_BIT = 17,
};
enum {
SBMC_CONSERVATION_ON = 3,
SBMC_CONSERVATION_OFF = 5,
+ SBMC_RAPID_CHARGE_ON = 7,
+ SBMC_RAPID_CHARGE_OFF = 8,
};
enum {
@@ -158,6 +173,7 @@ struct ideapad_rfk_priv {
struct ideapad_private {
struct acpi_device *adev;
struct mutex vpc_mutex; /* protects the VPC calls */
+ struct mutex gbmd_sbmc_mutex; /* protects GBMD/SBMC calls */
struct rfkill *rfk[IDEAPAD_RFKILL_DEV_NUM];
struct ideapad_rfk_priv rfk_priv[IDEAPAD_RFKILL_DEV_NUM];
struct platform_device *platform_device;
@@ -166,9 +182,11 @@ struct ideapad_private {
struct ideapad_dytc_priv *dytc;
struct dentry *debug;
struct acpi_battery_hook battery_hook;
+ const struct power_supply_ext *battery_ext;
unsigned long cfg;
unsigned long r_touchpad_val;
struct {
+ bool rapid_charge : 1;
bool conservation_mode : 1;
bool dytc : 1;
bool fan_mode : 1;
@@ -455,37 +473,40 @@ static int debugfs_status_show(struct seq_file *s, void *data)
struct ideapad_private *priv = s->private;
unsigned long value;
- guard(mutex)(&priv->vpc_mutex);
-
- if (!read_ec_data(priv->adev->handle, VPCCMD_R_BL_MAX, &value))
- seq_printf(s, "Backlight max: %lu\n", value);
- if (!read_ec_data(priv->adev->handle, VPCCMD_R_BL, &value))
- seq_printf(s, "Backlight now: %lu\n", value);
- if (!read_ec_data(priv->adev->handle, VPCCMD_R_BL_POWER, &value))
- seq_printf(s, "BL power value: %s (%lu)\n", value ? "on" : "off", value);
-
- seq_puts(s, "=====================\n");
-
- if (!read_ec_data(priv->adev->handle, VPCCMD_R_RF, &value))
- seq_printf(s, "Radio status: %s (%lu)\n", value ? "on" : "off", value);
- if (!read_ec_data(priv->adev->handle, VPCCMD_R_WIFI, &value))
- seq_printf(s, "Wifi status: %s (%lu)\n", value ? "on" : "off", value);
- if (!read_ec_data(priv->adev->handle, VPCCMD_R_BT, &value))
- seq_printf(s, "BT status: %s (%lu)\n", value ? "on" : "off", value);
- if (!read_ec_data(priv->adev->handle, VPCCMD_R_3G, &value))
- seq_printf(s, "3G status: %s (%lu)\n", value ? "on" : "off", value);
+ scoped_guard(mutex, &priv->vpc_mutex) {
+ if (!read_ec_data(priv->adev->handle, VPCCMD_R_BL_MAX, &value))
+ seq_printf(s, "Backlight max: %lu\n", value);
+ if (!read_ec_data(priv->adev->handle, VPCCMD_R_BL, &value))
+ seq_printf(s, "Backlight now: %lu\n", value);
+ if (!read_ec_data(priv->adev->handle, VPCCMD_R_BL_POWER, &value))
+ seq_printf(s, "BL power value: %s (%lu)\n", str_on_off(value), value);
+
+ seq_puts(s, "=====================\n");
+
+ if (!read_ec_data(priv->adev->handle, VPCCMD_R_RF, &value))
+ seq_printf(s, "Radio status: %s (%lu)\n", str_on_off(value), value);
+ if (!read_ec_data(priv->adev->handle, VPCCMD_R_WIFI, &value))
+ seq_printf(s, "Wifi status: %s (%lu)\n", str_on_off(value), value);
+ if (!read_ec_data(priv->adev->handle, VPCCMD_R_BT, &value))
+ seq_printf(s, "BT status: %s (%lu)\n", str_on_off(value), value);
+ if (!read_ec_data(priv->adev->handle, VPCCMD_R_3G, &value))
+ seq_printf(s, "3G status: %s (%lu)\n", str_on_off(value), value);
+
+ seq_puts(s, "=====================\n");
+
+ if (!read_ec_data(priv->adev->handle, VPCCMD_R_TOUCHPAD, &value))
+ seq_printf(s, "Touchpad status: %s (%lu)\n", str_on_off(value), value);
+ if (!read_ec_data(priv->adev->handle, VPCCMD_R_CAMERA, &value))
+ seq_printf(s, "Camera status: %s (%lu)\n", str_on_off(value), value);
+ }
seq_puts(s, "=====================\n");
- if (!read_ec_data(priv->adev->handle, VPCCMD_R_TOUCHPAD, &value))
- seq_printf(s, "Touchpad status: %s (%lu)\n", value ? "on" : "off", value);
- if (!read_ec_data(priv->adev->handle, VPCCMD_R_CAMERA, &value))
- seq_printf(s, "Camera status: %s (%lu)\n", value ? "on" : "off", value);
-
- seq_puts(s, "=====================\n");
+ scoped_guard(mutex, &priv->gbmd_sbmc_mutex) {
+ if (!eval_gbmd(priv->adev->handle, &value))
+ seq_printf(s, "GBMD: %#010lx\n", value);
+ }
- if (!eval_gbmd(priv->adev->handle, &value))
- seq_printf(s, "GBMD: %#010lx\n", value);
if (!eval_hals(priv->adev->handle, &value))
seq_printf(s, "HALS: %#010lx\n", value);
@@ -622,10 +643,16 @@ static ssize_t conservation_mode_show(struct device *dev,
show_conservation_mode_deprecation_warning(dev);
- err = eval_gbmd(priv->adev->handle, &result);
- if (err)
- return err;
+ scoped_guard(mutex, &priv->gbmd_sbmc_mutex) {
+ err = eval_gbmd(priv->adev->handle, &result);
+ if (err)
+ return err;
+ }
+ /*
+ * For backward compatibility, ignore Rapid Charge while reporting the
+ * state of Conservation Mode.
+ */
return sysfs_emit(buf, "%d\n", !!test_bit(GBMD_CONSERVATION_STATE_BIT, &result));
}
@@ -643,6 +670,18 @@ static ssize_t conservation_mode_store(struct device *dev,
if (err)
return err;
+ guard(mutex)(&priv->gbmd_sbmc_mutex);
+
+ /*
+ * Prevent mutually exclusive modes from being set at the same time,
+ * but do not disable Rapid Charge while disabling Conservation Mode.
+ */
+ if (priv->features.rapid_charge && state) {
+ err = exec_sbmc(priv->adev->handle, SBMC_RAPID_CHARGE_OFF);
+ if (err)
+ return err;
+ }
+
err = exec_sbmc(priv->adev->handle, state ? SBMC_CONSERVATION_ON : SBMC_CONSERVATION_OFF);
if (err)
return err;
@@ -2007,15 +2046,39 @@ static int ideapad_psy_ext_set_prop(struct power_supply *psy,
const union power_supply_propval *val)
{
struct ideapad_private *priv = ext_data;
+ unsigned long op1, op2;
+ int err;
switch (val->intval) {
+ case POWER_SUPPLY_CHARGE_TYPE_FAST:
+ if (WARN_ON(!priv->features.rapid_charge))
+ return -EINVAL;
+
+ op1 = SBMC_CONSERVATION_OFF;
+ op2 = SBMC_RAPID_CHARGE_ON;
+ break;
case POWER_SUPPLY_CHARGE_TYPE_LONGLIFE:
- return exec_sbmc(priv->adev->handle, SBMC_CONSERVATION_ON);
+ op1 = SBMC_RAPID_CHARGE_OFF;
+ op2 = SBMC_CONSERVATION_ON;
+ break;
case POWER_SUPPLY_CHARGE_TYPE_STANDARD:
- return exec_sbmc(priv->adev->handle, SBMC_CONSERVATION_OFF);
+ op1 = SBMC_RAPID_CHARGE_OFF;
+ op2 = SBMC_CONSERVATION_OFF;
+ break;
default:
return -EINVAL;
}
+
+ guard(mutex)(&priv->gbmd_sbmc_mutex);
+
+ /* If !rapid_charge, op1 must be SBMC_RAPID_CHARGE_OFF. Skip it. */
+ if (priv->features.rapid_charge) {
+ err = exec_sbmc(priv->adev->handle, op1);
+ if (err)
+ return err;
+ }
+
+ return exec_sbmc(priv->adev->handle, op2);
}
static int ideapad_psy_ext_get_prop(struct power_supply *psy,
@@ -2025,14 +2088,29 @@ static int ideapad_psy_ext_get_prop(struct power_supply *psy,
union power_supply_propval *val)
{
struct ideapad_private *priv = ext_data;
+ bool is_rapid_charge, is_conservation;
unsigned long result;
int err;
- err = eval_gbmd(priv->adev->handle, &result);
- if (err)
- return err;
+ scoped_guard(mutex, &priv->gbmd_sbmc_mutex) {
+ err = eval_gbmd(priv->adev->handle, &result);
+ if (err)
+ return err;
+ }
+
+ is_rapid_charge = (priv->features.rapid_charge &&
+ test_bit(GBMD_RAPID_CHARGE_STATE_BIT, &result));
+ is_conservation = test_bit(GBMD_CONSERVATION_STATE_BIT, &result);
+
+ if (unlikely(is_rapid_charge && is_conservation)) {
+ dev_err(&priv->platform_device->dev,
+ "unexpected charge_types: both [Fast] and [Long_Life] are enabled\n");
+ return -EINVAL;
+ }
- if (test_bit(GBMD_CONSERVATION_STATE_BIT, &result))
+ if (is_rapid_charge)
+ val->intval = POWER_SUPPLY_CHARGE_TYPE_FAST;
+ else if (is_conservation)
val->intval = POWER_SUPPLY_CHARGE_TYPE_LONGLIFE;
else
val->intval = POWER_SUPPLY_CHARGE_TYPE_STANDARD;
@@ -2052,29 +2130,42 @@ static const enum power_supply_property ideapad_power_supply_props[] = {
POWER_SUPPLY_PROP_CHARGE_TYPES,
};
-static const struct power_supply_ext ideapad_battery_ext = {
- .name = "ideapad_laptop",
- .properties = ideapad_power_supply_props,
- .num_properties = ARRAY_SIZE(ideapad_power_supply_props),
- .charge_types = (BIT(POWER_SUPPLY_CHARGE_TYPE_STANDARD) |
- BIT(POWER_SUPPLY_CHARGE_TYPE_LONGLIFE)),
- .get_property = ideapad_psy_ext_get_prop,
- .set_property = ideapad_psy_ext_set_prop,
- .property_is_writeable = ideapad_psy_prop_is_writeable,
-};
+#define DEFINE_IDEAPAD_POWER_SUPPLY_EXTENSION(_name, _charge_types) \
+ static const struct power_supply_ext _name = { \
+ .name = "ideapad_laptop", \
+ .properties = ideapad_power_supply_props, \
+ .num_properties = ARRAY_SIZE(ideapad_power_supply_props), \
+ .charge_types = _charge_types, \
+ .get_property = ideapad_psy_ext_get_prop, \
+ .set_property = ideapad_psy_ext_set_prop, \
+ .property_is_writeable = ideapad_psy_prop_is_writeable, \
+ }
+
+DEFINE_IDEAPAD_POWER_SUPPLY_EXTENSION(ideapad_battery_ext_v1,
+ (BIT(POWER_SUPPLY_CHARGE_TYPE_STANDARD) |
+ BIT(POWER_SUPPLY_CHARGE_TYPE_LONGLIFE))
+);
+
+DEFINE_IDEAPAD_POWER_SUPPLY_EXTENSION(ideapad_battery_ext_v2,
+ (BIT(POWER_SUPPLY_CHARGE_TYPE_STANDARD) |
+ BIT(POWER_SUPPLY_CHARGE_TYPE_FAST) |
+ BIT(POWER_SUPPLY_CHARGE_TYPE_LONGLIFE))
+);
static int ideapad_battery_add(struct power_supply *battery, struct acpi_battery_hook *hook)
{
struct ideapad_private *priv = container_of(hook, struct ideapad_private, battery_hook);
- return power_supply_register_extension(battery, &ideapad_battery_ext,
+ return power_supply_register_extension(battery, priv->battery_ext,
&priv->platform_device->dev, priv);
}
static int ideapad_battery_remove(struct power_supply *battery,
struct acpi_battery_hook *hook)
{
- power_supply_unregister_extension(battery, &ideapad_battery_ext);
+ struct ideapad_private *priv = container_of(hook, struct ideapad_private, battery_hook);
+
+ power_supply_unregister_extension(battery, priv->battery_ext);
return 0;
}
@@ -2099,14 +2190,25 @@ static int ideapad_check_features(struct ideapad_private *priv)
priv->features.fan_mode = true;
if (acpi_has_method(handle, "GBMD") && acpi_has_method(handle, "SBMC")) {
- priv->features.conservation_mode = true;
- priv->battery_hook.add_battery = ideapad_battery_add;
- priv->battery_hook.remove_battery = ideapad_battery_remove;
- priv->battery_hook.name = "Ideapad Battery Extension";
-
- err = devm_battery_hook_register(&priv->platform_device->dev, &priv->battery_hook);
- if (err)
- return err;
+ /* Not acquiring gbmd_sbmc_mutex as race condition is impossible on init */
+ if (!eval_gbmd(handle, &val)) {
+ priv->features.conservation_mode = true;
+ priv->features.rapid_charge = test_bit(GBMD_RAPID_CHARGE_SUPPORTED_BIT,
+ &val);
+
+ priv->battery_ext = priv->features.rapid_charge
+ ? &ideapad_battery_ext_v2
+ : &ideapad_battery_ext_v1;
+
+ priv->battery_hook.add_battery = ideapad_battery_add;
+ priv->battery_hook.remove_battery = ideapad_battery_remove;
+ priv->battery_hook.name = "Ideapad Battery Extension";
+
+ err = devm_battery_hook_register(&priv->platform_device->dev,
+ &priv->battery_hook);
+ if (err)
+ return err;
+ }
}
if (acpi_has_method(handle, "DYTC"))
@@ -2292,6 +2394,10 @@ static int ideapad_acpi_add(struct platform_device *pdev)
if (err)
return err;
+ err = devm_mutex_init(&pdev->dev, &priv->gbmd_sbmc_mutex);
+ if (err)
+ return err;
+
err = ideapad_check_features(priv);
if (err)
return err;
diff --git a/drivers/platform/x86/lenovo/wmi-gamezone.c b/drivers/platform/x86/lenovo/wmi-gamezone.c
index 0eb7fe8222f4..381836d29a96 100644
--- a/drivers/platform/x86/lenovo/wmi-gamezone.c
+++ b/drivers/platform/x86/lenovo/wmi-gamezone.c
@@ -171,14 +171,10 @@ static int lwmi_gz_profile_get(struct device *dev,
*profile = PLATFORM_PROFILE_BALANCED;
break;
case LWMI_GZ_THERMAL_MODE_PERFORMANCE:
- if (priv->extreme_supported) {
- *profile = PLATFORM_PROFILE_BALANCED_PERFORMANCE;
- break;
- }
*profile = PLATFORM_PROFILE_PERFORMANCE;
break;
case LWMI_GZ_THERMAL_MODE_EXTREME:
- *profile = PLATFORM_PROFILE_PERFORMANCE;
+ *profile = PLATFORM_PROFILE_MAX_POWER;
break;
case LWMI_GZ_THERMAL_MODE_CUSTOM:
*profile = PLATFORM_PROFILE_CUSTOM;
@@ -218,16 +214,12 @@ static int lwmi_gz_profile_set(struct device *dev,
case PLATFORM_PROFILE_BALANCED:
mode = LWMI_GZ_THERMAL_MODE_BALANCED;
break;
- case PLATFORM_PROFILE_BALANCED_PERFORMANCE:
- mode = LWMI_GZ_THERMAL_MODE_PERFORMANCE;
- break;
case PLATFORM_PROFILE_PERFORMANCE:
- if (priv->extreme_supported) {
- mode = LWMI_GZ_THERMAL_MODE_EXTREME;
- break;
- }
mode = LWMI_GZ_THERMAL_MODE_PERFORMANCE;
break;
+ case PLATFORM_PROFILE_MAX_POWER:
+ mode = LWMI_GZ_THERMAL_MODE_EXTREME;
+ break;
case PLATFORM_PROFILE_CUSTOM:
mode = LWMI_GZ_THERMAL_MODE_CUSTOM;
break;
@@ -274,8 +266,23 @@ static const struct dmi_system_id fwbug_list[] = {
},
.driver_data = &quirk_no_extreme_bug,
},
+ {
+ .ident = "Legion Go 8ASP2",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "Legion Go 8ASP2"),
+ },
+ .driver_data = &quirk_no_extreme_bug,
+ },
+ {
+ .ident = "Legion Go 8AHP2",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "Legion Go 8AHP2"),
+ },
+ .driver_data = &quirk_no_extreme_bug,
+ },
{},
-
};
/**
@@ -338,7 +345,7 @@ static int lwmi_gz_platform_profile_probe(void *drvdata, unsigned long *choices)
priv->extreme_supported = lwmi_gz_extreme_supported(profile_support_ver);
if (priv->extreme_supported)
- set_bit(PLATFORM_PROFILE_BALANCED_PERFORMANCE, choices);
+ set_bit(PLATFORM_PROFILE_MAX_POWER, choices);
return 0;
}
diff --git a/drivers/platform/x86/lg-laptop.c b/drivers/platform/x86/lg-laptop.c
index 6af6cf477c5b..f92e89c75db9 100644
--- a/drivers/platform/x86/lg-laptop.c
+++ b/drivers/platform/x86/lg-laptop.c
@@ -19,6 +19,7 @@
#include <linux/leds.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/string_choices.h>
#include <linux/types.h>
#include <acpi/battery.h>
@@ -42,6 +43,7 @@ MODULE_PARM_DESC(fw_debug, "Enable printing of firmware debug messages");
#define LG_ADDRESS_SPACE_ID 0x8F
#define LG_ADDRESS_SPACE_DEBUG_FLAG_ADR 0x00
+#define LG_ADDRESS_SPACE_HD_AUDIO_POWER_ADDR 0x01
#define LG_ADDRESS_SPACE_FAN_MODE_ADR 0x03
#define LG_ADDRESS_SPACE_DTTM_FLAG_ADR 0x20
@@ -668,6 +670,15 @@ static acpi_status lg_laptop_address_space_write(struct device *dev, acpi_physic
byte = value & 0xFF;
switch (address) {
+ case LG_ADDRESS_SPACE_HD_AUDIO_POWER_ADDR:
+ /*
+ * The HD audio power field is not affected by the DTTM flag,
+ * so we have to manually check fw_debug.
+ */
+ if (fw_debug)
+ dev_dbg(dev, "HD audio power %s\n", str_enabled_disabled(byte));
+
+ return AE_OK;
case LG_ADDRESS_SPACE_FAN_MODE_ADR:
/*
* The fan mode field is not affected by the DTTM flag, so we
diff --git a/drivers/platform/x86/oxpec.c b/drivers/platform/x86/oxpec.c
index 54377b282ff8..144a454103b9 100644
--- a/drivers/platform/x86/oxpec.c
+++ b/drivers/platform/x86/oxpec.c
@@ -1,8 +1,6 @@
// SPDX-License-Identifier: GPL-2.0+
/*
- * Platform driver for OneXPlayer and AOKZOE devices. For the time being,
- * it also exposes fan controls for AYANEO, and OrangePi Handhelds via
- * hwmon sysfs.
+ * Platform driver for OneXPlayer and AOKZOE devices.
*
* Fan control is provided via pwm interface in the range [0-255].
* Old AMD boards use [0-100] as range in the EC, the written value is
@@ -43,14 +41,6 @@ static bool unlock_global_acpi_lock(void)
enum oxp_board {
aok_zoe_a1 = 1,
- aya_neo_2,
- aya_neo_air,
- aya_neo_air_1s,
- aya_neo_air_plus_mendo,
- aya_neo_air_pro,
- aya_neo_flip,
- aya_neo_geek,
- aya_neo_kun,
orange_pi_neo,
oxp_2,
oxp_fly,
@@ -133,62 +123,6 @@ static const struct dmi_system_id dmi_table[] = {
},
{
.matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "AYANEO"),
- DMI_MATCH(DMI_BOARD_NAME, "AYANEO 2"),
- },
- .driver_data = (void *)aya_neo_2,
- },
- {
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "AYANEO"),
- DMI_EXACT_MATCH(DMI_BOARD_NAME, "AIR"),
- },
- .driver_data = (void *)aya_neo_air,
- },
- {
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "AYANEO"),
- DMI_EXACT_MATCH(DMI_BOARD_NAME, "AIR 1S"),
- },
- .driver_data = (void *)aya_neo_air_1s,
- },
- {
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "AYANEO"),
- DMI_EXACT_MATCH(DMI_BOARD_NAME, "AB05-Mendocino"),
- },
- .driver_data = (void *)aya_neo_air_plus_mendo,
- },
- {
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "AYANEO"),
- DMI_EXACT_MATCH(DMI_BOARD_NAME, "AIR Pro"),
- },
- .driver_data = (void *)aya_neo_air_pro,
- },
- {
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "AYANEO"),
- DMI_MATCH(DMI_BOARD_NAME, "FLIP"),
- },
- .driver_data = (void *)aya_neo_flip,
- },
- {
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "AYANEO"),
- DMI_MATCH(DMI_BOARD_NAME, "GEEK"),
- },
- .driver_data = (void *)aya_neo_geek,
- },
- {
- .matches = {
- DMI_MATCH(DMI_BOARD_VENDOR, "AYANEO"),
- DMI_EXACT_MATCH(DMI_BOARD_NAME, "KUN"),
- },
- .driver_data = (void *)aya_neo_kun,
- },
- {
- .matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "OrangePi"),
DMI_EXACT_MATCH(DMI_BOARD_NAME, "NEO-01"),
},
@@ -672,13 +606,6 @@ static int oxp_pwm_enable(void)
case orange_pi_neo:
return write_to_ec(ORANGEPI_SENSOR_PWM_ENABLE_REG, PWM_MODE_MANUAL);
case aok_zoe_a1:
- case aya_neo_2:
- case aya_neo_air:
- case aya_neo_air_plus_mendo:
- case aya_neo_air_pro:
- case aya_neo_flip:
- case aya_neo_geek:
- case aya_neo_kun:
case oxp_2:
case oxp_fly:
case oxp_mini_amd:
@@ -699,14 +626,6 @@ static int oxp_pwm_disable(void)
case orange_pi_neo:
return write_to_ec(ORANGEPI_SENSOR_PWM_ENABLE_REG, PWM_MODE_AUTO);
case aok_zoe_a1:
- case aya_neo_2:
- case aya_neo_air:
- case aya_neo_air_1s:
- case aya_neo_air_plus_mendo:
- case aya_neo_air_pro:
- case aya_neo_flip:
- case aya_neo_geek:
- case aya_neo_kun:
case oxp_2:
case oxp_fly:
case oxp_mini_amd:
@@ -727,14 +646,6 @@ static int oxp_pwm_read(long *val)
case orange_pi_neo:
return read_from_ec(ORANGEPI_SENSOR_PWM_ENABLE_REG, 1, val);
case aok_zoe_a1:
- case aya_neo_2:
- case aya_neo_air:
- case aya_neo_air_1s:
- case aya_neo_air_plus_mendo:
- case aya_neo_air_pro:
- case aya_neo_flip:
- case aya_neo_geek:
- case aya_neo_kun:
case oxp_2:
case oxp_fly:
case oxp_mini_amd:
@@ -774,14 +685,6 @@ static int oxp_pwm_fan_speed(long *val)
case oxp_g1_i:
return read_from_ec(OXP_2_SENSOR_FAN_REG, 2, val);
case aok_zoe_a1:
- case aya_neo_2:
- case aya_neo_air:
- case aya_neo_air_1s:
- case aya_neo_air_plus_mendo:
- case aya_neo_air_pro:
- case aya_neo_flip:
- case aya_neo_geek:
- case aya_neo_kun:
case oxp_fly:
case oxp_mini_amd:
case oxp_mini_amd_a07:
@@ -810,14 +713,6 @@ static int oxp_pwm_input_write(long val)
/* scale to range [0-184] */
val = (val * 184) / 255;
return write_to_ec(OXP_SENSOR_PWM_REG, val);
- case aya_neo_2:
- case aya_neo_air:
- case aya_neo_air_1s:
- case aya_neo_air_plus_mendo:
- case aya_neo_air_pro:
- case aya_neo_flip:
- case aya_neo_geek:
- case aya_neo_kun:
case oxp_mini_amd:
case oxp_mini_amd_a07:
/* scale to range [0-100] */
@@ -854,14 +749,6 @@ static int oxp_pwm_input_read(long *val)
/* scale from range [0-184] */
*val = (*val * 255) / 184;
break;
- case aya_neo_2:
- case aya_neo_air:
- case aya_neo_air_1s:
- case aya_neo_air_plus_mendo:
- case aya_neo_air_pro:
- case aya_neo_flip:
- case aya_neo_geek:
- case aya_neo_kun:
case oxp_mini_amd:
case oxp_mini_amd_a07:
ret = read_from_ec(OXP_SENSOR_PWM_REG, 1, val);
diff --git a/drivers/platform/x86/serial-multi-instantiate.c b/drivers/platform/x86/serial-multi-instantiate.c
index db030b0f176a..1a369334f9cb 100644
--- a/drivers/platform/x86/serial-multi-instantiate.c
+++ b/drivers/platform/x86/serial-multi-instantiate.c
@@ -22,6 +22,7 @@
#define IRQ_RESOURCE_GPIO 1
#define IRQ_RESOURCE_APIC 2
#define IRQ_RESOURCE_AUTO 3
+#define IRQ_RESOURCE_OPT BIT(2)
enum smi_bus_type {
SMI_I2C,
@@ -64,6 +65,10 @@ static int smi_get_irq(struct platform_device *pdev, struct acpi_device *adev,
dev_dbg(&pdev->dev, "Using platform irq\n");
break;
}
+ if (inst->flags & IRQ_RESOURCE_OPT) {
+ dev_dbg(&pdev->dev, "No irq\n");
+ return 0;
+ }
break;
case IRQ_RESOURCE_GPIO:
ret = acpi_dev_gpio_irq_get(adev, inst->irq_idx);
@@ -386,10 +391,10 @@ static const struct smi_node cs35l57_hda = {
static const struct smi_node tas2781_hda = {
.instances = {
- { "tas2781-hda", IRQ_RESOURCE_AUTO, 0 },
- { "tas2781-hda", IRQ_RESOURCE_AUTO, 0 },
- { "tas2781-hda", IRQ_RESOURCE_AUTO, 0 },
- { "tas2781-hda", IRQ_RESOURCE_AUTO, 0 },
+ { "tas2781-hda", IRQ_RESOURCE_AUTO | IRQ_RESOURCE_OPT, 0 },
+ { "tas2781-hda", IRQ_RESOURCE_AUTO | IRQ_RESOURCE_OPT, 0 },
+ { "tas2781-hda", IRQ_RESOURCE_AUTO | IRQ_RESOURCE_OPT, 0 },
+ { "tas2781-hda", IRQ_RESOURCE_AUTO | IRQ_RESOURCE_OPT, 0 },
{}
},
.bus_type = SMI_AUTO_DETECT,
diff --git a/drivers/platform/x86/uniwill/Kconfig b/drivers/platform/x86/uniwill/Kconfig
new file mode 100644
index 000000000000..d07cc8440188
--- /dev/null
+++ b/drivers/platform/x86/uniwill/Kconfig
@@ -0,0 +1,38 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+# Uniwill X86 Platform Specific Drivers
+#
+
+menuconfig X86_PLATFORM_DRIVERS_UNIWILL
+ bool "Uniwill X86 Platform Specific Device Drivers"
+ depends on X86_PLATFORM_DEVICES
+ help
+ Say Y here to see options for device drivers for various
+ Uniwill x86 platforms, including many OEM laptops originally
+ manufactured by Uniwill.
+ This option alone does not add any kernel code.
+
+ If you say N, all options in this submenu will be skipped and disabled.
+
+if X86_PLATFORM_DRIVERS_UNIWILL
+
+config UNIWILL_LAPTOP
+ tristate "Uniwill Laptop Extras"
+ default m
+ depends on ACPI
+ depends on ACPI_WMI
+ depends on ACPI_BATTERY
+ depends on HWMON
+ depends on INPUT
+ depends on LEDS_CLASS_MULTICOLOR
+ depends on DMI
+ select REGMAP
+ select INPUT_SPARSEKMAP
+ help
+ This driver adds support for various extra features found on Uniwill laptops,
+ like the lightbar, hwmon sensors and hotkeys. It also supports many OEM laptops
+ originally manufactured by Uniwill.
+
+ If you have such a laptop, say Y or M here.
+
+endif
diff --git a/drivers/platform/x86/uniwill/Makefile b/drivers/platform/x86/uniwill/Makefile
new file mode 100644
index 000000000000..05cd1747a240
--- /dev/null
+++ b/drivers/platform/x86/uniwill/Makefile
@@ -0,0 +1,8 @@
+# SPDX-License-Identifier: GPL-2.0-or-later
+#
+# Makefile for linux/drivers/platform/x86/uniwill
+# Uniwill X86 Platform Specific Drivers
+#
+
+obj-$(CONFIG_UNIWILL_LAPTOP) += uniwill-laptop.o
+uniwill-laptop-y := uniwill-acpi.o uniwill-wmi.o
diff --git a/drivers/platform/x86/uniwill/uniwill-acpi.c b/drivers/platform/x86/uniwill/uniwill-acpi.c
new file mode 100644
index 000000000000..bd7e63dd5181
--- /dev/null
+++ b/drivers/platform/x86/uniwill/uniwill-acpi.c
@@ -0,0 +1,1912 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Linux driver for Uniwill notebooks.
+ *
+ * Special thanks go to Pőcze Barnabás, Christoffer Sandberg and Werner Sembach
+ * for supporting the development of this driver either through prior work or
+ * by answering questions regarding the underlying ACPI and WMI interfaces.
+ *
+ * Copyright (C) 2025 Armin Wolf <W_Armin@gmx.de>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/acpi.h>
+#include <linux/array_size.h>
+#include <linux/bits.h>
+#include <linux/bitfield.h>
+#include <linux/cleanup.h>
+#include <linux/debugfs.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/device/driver.h>
+#include <linux/dmi.h>
+#include <linux/errno.h>
+#include <linux/fixp-arith.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/init.h>
+#include <linux/input.h>
+#include <linux/input/sparse-keymap.h>
+#include <linux/kernel.h>
+#include <linux/kstrtox.h>
+#include <linux/leds.h>
+#include <linux/led-class-multicolor.h>
+#include <linux/limits.h>
+#include <linux/list.h>
+#include <linux/minmax.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/notifier.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/printk.h>
+#include <linux/regmap.h>
+#include <linux/string.h>
+#include <linux/sysfs.h>
+#include <linux/types.h>
+#include <linux/units.h>
+
+#include <acpi/battery.h>
+
+#include "uniwill-wmi.h"
+
+#define EC_ADDR_BAT_POWER_UNIT_1 0x0400
+
+#define EC_ADDR_BAT_POWER_UNIT_2 0x0401
+
+#define EC_ADDR_BAT_DESIGN_CAPACITY_1 0x0402
+
+#define EC_ADDR_BAT_DESIGN_CAPACITY_2 0x0403
+
+#define EC_ADDR_BAT_FULL_CAPACITY_1 0x0404
+
+#define EC_ADDR_BAT_FULL_CAPACITY_2 0x0405
+
+#define EC_ADDR_BAT_DESIGN_VOLTAGE_1 0x0408
+
+#define EC_ADDR_BAT_DESIGN_VOLTAGE_2 0x0409
+
+#define EC_ADDR_BAT_STATUS_1 0x0432
+#define BAT_DISCHARGING BIT(0)
+
+#define EC_ADDR_BAT_STATUS_2 0x0433
+
+#define EC_ADDR_BAT_CURRENT_1 0x0434
+
+#define EC_ADDR_BAT_CURRENT_2 0x0435
+
+#define EC_ADDR_BAT_REMAIN_CAPACITY_1 0x0436
+
+#define EC_ADDR_BAT_REMAIN_CAPACITY_2 0x0437
+
+#define EC_ADDR_BAT_VOLTAGE_1 0x0438
+
+#define EC_ADDR_BAT_VOLTAGE_2 0x0439
+
+#define EC_ADDR_CPU_TEMP 0x043E
+
+#define EC_ADDR_GPU_TEMP 0x044F
+
+#define EC_ADDR_MAIN_FAN_RPM_1 0x0464
+
+#define EC_ADDR_MAIN_FAN_RPM_2 0x0465
+
+#define EC_ADDR_SECOND_FAN_RPM_1 0x046C
+
+#define EC_ADDR_SECOND_FAN_RPM_2 0x046D
+
+#define EC_ADDR_DEVICE_STATUS 0x047B
+#define WIFI_STATUS_ON BIT(7)
+/* BIT(5) is also unset depending on the rfkill state (bluetooth?) */
+
+#define EC_ADDR_BAT_ALERT 0x0494
+
+#define EC_ADDR_BAT_CYCLE_COUNT_1 0x04A6
+
+#define EC_ADDR_BAT_CYCLE_COUNT_2 0x04A7
+
+#define EC_ADDR_PROJECT_ID 0x0740
+
+#define EC_ADDR_AP_OEM 0x0741
+#define ENABLE_MANUAL_CTRL BIT(0)
+#define ITE_KBD_EFFECT_REACTIVE BIT(3)
+#define FAN_ABNORMAL BIT(5)
+
+#define EC_ADDR_SUPPORT_5 0x0742
+#define FAN_TURBO_SUPPORTED BIT(4)
+#define FAN_SUPPORT BIT(5)
+
+#define EC_ADDR_CTGP_DB_CTRL 0x0743
+#define CTGP_DB_GENERAL_ENABLE BIT(0)
+#define CTGP_DB_DB_ENABLE BIT(1)
+#define CTGP_DB_CTGP_ENABLE BIT(2)
+
+#define EC_ADDR_CTGP_OFFSET 0x0744
+
+#define EC_ADDR_TPP_OFFSET 0x0745
+
+#define EC_ADDR_MAX_TGP 0x0746
+
+#define EC_ADDR_LIGHTBAR_AC_CTRL 0x0748
+#define LIGHTBAR_APP_EXISTS BIT(0)
+#define LIGHTBAR_POWER_SAVE BIT(1)
+#define LIGHTBAR_S0_OFF BIT(2)
+#define LIGHTBAR_S3_OFF BIT(3) // Breathing animation when suspended
+#define LIGHTBAR_WELCOME BIT(7) // Rainbow animation
+
+#define EC_ADDR_LIGHTBAR_AC_RED 0x0749
+
+#define EC_ADDR_LIGHTBAR_AC_GREEN 0x074A
+
+#define EC_ADDR_LIGHTBAR_AC_BLUE 0x074B
+
+#define EC_ADDR_BIOS_OEM 0x074E
+#define FN_LOCK_STATUS BIT(4)
+
+#define EC_ADDR_MANUAL_FAN_CTRL 0x0751
+#define FAN_LEVEL_MASK GENMASK(2, 0)
+#define FAN_MODE_TURBO BIT(4)
+#define FAN_MODE_HIGH BIT(5)
+#define FAN_MODE_BOOST BIT(6)
+#define FAN_MODE_USER BIT(7)
+
+#define EC_ADDR_PWM_1 0x075B
+
+#define EC_ADDR_PWM_2 0x075C
+
+/* Unreliable */
+#define EC_ADDR_SUPPORT_1 0x0765
+#define AIRPLANE_MODE BIT(0)
+#define GPS_SWITCH BIT(1)
+#define OVERCLOCK BIT(2)
+#define MACRO_KEY BIT(3)
+#define SHORTCUT_KEY BIT(4)
+#define SUPER_KEY_LOCK BIT(5)
+#define LIGHTBAR BIT(6)
+#define FAN_BOOST BIT(7)
+
+#define EC_ADDR_SUPPORT_2 0x0766
+#define SILENT_MODE BIT(0)
+#define USB_CHARGING BIT(1)
+#define RGB_KEYBOARD BIT(2)
+#define CHINA_MODE BIT(5)
+#define MY_BATTERY BIT(6)
+
+#define EC_ADDR_TRIGGER 0x0767
+#define TRIGGER_SUPER_KEY_LOCK BIT(0)
+#define TRIGGER_LIGHTBAR BIT(1)
+#define TRIGGER_FAN_BOOST BIT(2)
+#define TRIGGER_SILENT_MODE BIT(3)
+#define TRIGGER_USB_CHARGING BIT(4)
+#define RGB_APPLY_COLOR BIT(5)
+#define RGB_LOGO_EFFECT BIT(6)
+#define RGB_RAINBOW_EFFECT BIT(7)
+
+#define EC_ADDR_SWITCH_STATUS 0x0768
+#define SUPER_KEY_LOCK_STATUS BIT(0)
+#define LIGHTBAR_STATUS BIT(1)
+#define FAN_BOOST_STATUS BIT(2)
+#define MACRO_KEY_STATUS BIT(3)
+#define MY_BAT_POWER_BAT_STATUS BIT(4)
+
+#define EC_ADDR_RGB_RED 0x0769
+
+#define EC_ADDR_RGB_GREEN 0x076A
+
+#define EC_ADDR_RGB_BLUE 0x076B
+
+#define EC_ADDR_ROMID_START 0x0770
+#define ROMID_LENGTH 14
+
+#define EC_ADDR_ROMID_EXTRA_1 0x077E
+
+#define EC_ADDR_ROMID_EXTRA_2 0x077F
+
+#define EC_ADDR_BIOS_OEM_2 0x0782
+#define FAN_V2_NEW BIT(0)
+#define FAN_QKEY BIT(1)
+#define FAN_TABLE_OFFICE_MODE BIT(2)
+#define FAN_V3 BIT(3)
+#define DEFAULT_MODE BIT(4)
+
+#define EC_ADDR_PL1_SETTING 0x0783
+
+#define EC_ADDR_PL2_SETTING 0x0784
+
+#define EC_ADDR_PL4_SETTING 0x0785
+
+#define EC_ADDR_FAN_DEFAULT 0x0786
+#define FAN_CURVE_LENGTH 5
+
+#define EC_ADDR_KBD_STATUS 0x078C
+#define KBD_WHITE_ONLY BIT(0) // ~single color
+#define KBD_SINGLE_COLOR_OFF BIT(1)
+#define KBD_TURBO_LEVEL_MASK GENMASK(3, 2)
+#define KBD_APPLY BIT(4)
+#define KBD_BRIGHTNESS GENMASK(7, 5)
+
+#define EC_ADDR_FAN_CTRL 0x078E
+#define FAN3P5 BIT(1)
+#define CHARGING_PROFILE BIT(3)
+#define UNIVERSAL_FAN_CTRL BIT(6)
+
+#define EC_ADDR_BIOS_OEM_3 0x07A3
+#define FAN_REDUCED_DURY_CYCLE BIT(5)
+#define FAN_ALWAYS_ON BIT(6)
+
+#define EC_ADDR_BIOS_BYTE 0x07A4
+#define FN_LOCK_SWITCH BIT(3)
+
+#define EC_ADDR_OEM_3 0x07A5
+#define POWER_LED_MASK GENMASK(1, 0)
+#define POWER_LED_LEFT 0x00
+#define POWER_LED_BOTH 0x01
+#define POWER_LED_NONE 0x02
+#define FAN_QUIET BIT(2)
+#define OVERBOOST BIT(4)
+#define HIGH_POWER BIT(7)
+
+#define EC_ADDR_OEM_4 0x07A6
+#define OVERBOOST_DYN_TEMP_OFF BIT(1)
+#define TOUCHPAD_TOGGLE_OFF BIT(6)
+
+#define EC_ADDR_CHARGE_CTRL 0x07B9
+#define CHARGE_CTRL_MASK GENMASK(6, 0)
+#define CHARGE_CTRL_REACHED BIT(7)
+
+#define EC_ADDR_UNIVERSAL_FAN_CTRL 0x07C5
+#define SPLIT_TABLES BIT(7)
+
+#define EC_ADDR_AP_OEM_6 0x07C6
+#define ENABLE_UNIVERSAL_FAN_CTRL BIT(2)
+#define BATTERY_CHARGE_FULL_OVER_24H BIT(3)
+#define BATTERY_ERM_STATUS_REACHED BIT(4)
+
+#define EC_ADDR_CHARGE_PRIO 0x07CC
+#define CHARGING_PERFORMANCE BIT(7)
+
+/* Same bits as EC_ADDR_LIGHTBAR_AC_CTRL except LIGHTBAR_S3_OFF */
+#define EC_ADDR_LIGHTBAR_BAT_CTRL 0x07E2
+
+#define EC_ADDR_LIGHTBAR_BAT_RED 0x07E3
+
+#define EC_ADDR_LIGHTBAR_BAT_GREEN 0x07E4
+
+#define EC_ADDR_LIGHTBAR_BAT_BLUE 0x07E5
+
+#define EC_ADDR_CPU_TEMP_END_TABLE 0x0F00
+
+#define EC_ADDR_CPU_TEMP_START_TABLE 0x0F10
+
+#define EC_ADDR_CPU_FAN_SPEED_TABLE 0x0F20
+
+#define EC_ADDR_GPU_TEMP_END_TABLE 0x0F30
+
+#define EC_ADDR_GPU_TEMP_START_TABLE 0x0F40
+
+#define EC_ADDR_GPU_FAN_SPEED_TABLE 0x0F50
+
+/*
+ * Those two registers technically allow for manual fan control,
+ * but are unstable on some models and are likely not meant to
+ * be used by applications as they are only accessible when using
+ * the WMI interface.
+ */
+#define EC_ADDR_PWM_1_WRITEABLE 0x1804
+
+#define EC_ADDR_PWM_2_WRITEABLE 0x1809
+
+#define DRIVER_NAME "uniwill"
+
+/*
+ * The OEM software always sleeps up to 6 ms after reading/writing EC
+ * registers, so we emulate this behaviour for maximum compatibility.
+ */
+#define UNIWILL_EC_DELAY_US 6000
+
+#define PWM_MAX 200
+#define FAN_TABLE_LENGTH 16
+
+#define LED_CHANNELS 3
+#define LED_MAX_BRIGHTNESS 200
+
+#define UNIWILL_FEATURE_FN_LOCK_TOGGLE BIT(0)
+#define UNIWILL_FEATURE_SUPER_KEY_TOGGLE BIT(1)
+#define UNIWILL_FEATURE_TOUCHPAD_TOGGLE BIT(2)
+#define UNIWILL_FEATURE_LIGHTBAR BIT(3)
+#define UNIWILL_FEATURE_BATTERY BIT(4)
+#define UNIWILL_FEATURE_HWMON BIT(5)
+
+struct uniwill_data {
+ struct device *dev;
+ acpi_handle handle;
+ struct regmap *regmap;
+ struct acpi_battery_hook hook;
+ unsigned int last_charge_ctrl;
+ struct mutex battery_lock; /* Protects the list of currently registered batteries */
+ unsigned int last_switch_status;
+ struct mutex super_key_lock; /* Protects the toggling of the super key lock state */
+ struct list_head batteries;
+ struct mutex led_lock; /* Protects writes to the lightbar registers */
+ struct led_classdev_mc led_mc_cdev;
+ struct mc_subled led_mc_subled_info[LED_CHANNELS];
+ struct mutex input_lock; /* Protects input sequence during notify */
+ struct input_dev *input_device;
+ struct notifier_block nb;
+};
+
+struct uniwill_battery_entry {
+ struct list_head head;
+ struct power_supply *battery;
+};
+
+static bool force;
+module_param_unsafe(force, bool, 0);
+MODULE_PARM_DESC(force, "Force loading without checking for supported devices\n");
+
+/* Feature bitmask since the associated registers are not reliable */
+static unsigned int supported_features;
+
+static const char * const uniwill_temp_labels[] = {
+ "CPU",
+ "GPU",
+};
+
+static const char * const uniwill_fan_labels[] = {
+ "Main",
+ "Secondary",
+};
+
+static const struct key_entry uniwill_keymap[] = {
+ /* Reported via keyboard controller */
+ { KE_IGNORE, UNIWILL_OSD_CAPSLOCK, { KEY_CAPSLOCK }},
+ { KE_IGNORE, UNIWILL_OSD_NUMLOCK, { KEY_NUMLOCK }},
+
+ /* Reported when the user locks/unlocks the super key */
+ { KE_IGNORE, UNIWILL_OSD_SUPER_KEY_LOCK_ENABLE, { KEY_UNKNOWN }},
+ { KE_IGNORE, UNIWILL_OSD_SUPER_KEY_LOCK_DISABLE, { KEY_UNKNOWN }},
+ /* Optional, might not be reported by all devices */
+ { KE_IGNORE, UNIWILL_OSD_SUPER_KEY_LOCK_CHANGED, { KEY_UNKNOWN }},
+
+ /* Reported in manual mode when toggling the airplane mode status */
+ { KE_KEY, UNIWILL_OSD_RFKILL, { KEY_RFKILL }},
+ { KE_IGNORE, UNIWILL_OSD_RADIOON, { KEY_UNKNOWN }},
+ { KE_IGNORE, UNIWILL_OSD_RADIOOFF, { KEY_UNKNOWN }},
+
+ /* Reported when user wants to cycle the platform profile */
+ { KE_KEY, UNIWILL_OSD_PERFORMANCE_MODE_TOGGLE, { KEY_F14 }},
+
+ /* Reported when the user wants to adjust the brightness of the keyboard */
+ { KE_KEY, UNIWILL_OSD_KBDILLUMDOWN, { KEY_KBDILLUMDOWN }},
+ { KE_KEY, UNIWILL_OSD_KBDILLUMUP, { KEY_KBDILLUMUP }},
+
+ /* Reported when the user wants to toggle the microphone mute status */
+ { KE_KEY, UNIWILL_OSD_MIC_MUTE, { KEY_MICMUTE }},
+
+ /* Reported when the user wants to toggle the mute status */
+ { KE_IGNORE, UNIWILL_OSD_MUTE, { KEY_MUTE }},
+
+ /* Reported when the user locks/unlocks the Fn key */
+ { KE_IGNORE, UNIWILL_OSD_FN_LOCK, { KEY_FN_ESC }},
+
+ /* Reported when the user wants to toggle the brightness of the keyboard */
+ { KE_KEY, UNIWILL_OSD_KBDILLUMTOGGLE, { KEY_KBDILLUMTOGGLE }},
+ { KE_KEY, UNIWILL_OSD_KB_LED_LEVEL0, { KEY_KBDILLUMTOGGLE }},
+ { KE_KEY, UNIWILL_OSD_KB_LED_LEVEL1, { KEY_KBDILLUMTOGGLE }},
+ { KE_KEY, UNIWILL_OSD_KB_LED_LEVEL2, { KEY_KBDILLUMTOGGLE }},
+ { KE_KEY, UNIWILL_OSD_KB_LED_LEVEL3, { KEY_KBDILLUMTOGGLE }},
+ { KE_KEY, UNIWILL_OSD_KB_LED_LEVEL4, { KEY_KBDILLUMTOGGLE }},
+
+ /* FIXME: find out the exact meaning of those events */
+ { KE_IGNORE, UNIWILL_OSD_BAT_CHARGE_FULL_24_H, { KEY_UNKNOWN }},
+ { KE_IGNORE, UNIWILL_OSD_BAT_ERM_UPDATE, { KEY_UNKNOWN }},
+
+ /* Reported when the user wants to toggle the benchmark mode status */
+ { KE_IGNORE, UNIWILL_OSD_BENCHMARK_MODE_TOGGLE, { KEY_UNKNOWN }},
+
+ /* Reported when the user wants to toggle the webcam */
+ { KE_IGNORE, UNIWILL_OSD_WEBCAM_TOGGLE, { KEY_UNKNOWN }},
+
+ { KE_END }
+};
+
+static int uniwill_ec_reg_write(void *context, unsigned int reg, unsigned int val)
+{
+ union acpi_object params[2] = {
+ {
+ .integer = {
+ .type = ACPI_TYPE_INTEGER,
+ .value = reg,
+ },
+ },
+ {
+ .integer = {
+ .type = ACPI_TYPE_INTEGER,
+ .value = val,
+ },
+ },
+ };
+ struct uniwill_data *data = context;
+ struct acpi_object_list input = {
+ .count = ARRAY_SIZE(params),
+ .pointer = params,
+ };
+ acpi_status status;
+
+ status = acpi_evaluate_object(data->handle, "ECRW", &input, NULL);
+ if (ACPI_FAILURE(status))
+ return -EIO;
+
+ usleep_range(UNIWILL_EC_DELAY_US, UNIWILL_EC_DELAY_US * 2);
+
+ return 0;
+}
+
+static int uniwill_ec_reg_read(void *context, unsigned int reg, unsigned int *val)
+{
+ union acpi_object params[1] = {
+ {
+ .integer = {
+ .type = ACPI_TYPE_INTEGER,
+ .value = reg,
+ },
+ },
+ };
+ struct uniwill_data *data = context;
+ struct acpi_object_list input = {
+ .count = ARRAY_SIZE(params),
+ .pointer = params,
+ };
+ unsigned long long output;
+ acpi_status status;
+
+ status = acpi_evaluate_integer(data->handle, "ECRR", &input, &output);
+ if (ACPI_FAILURE(status))
+ return -EIO;
+
+ if (output > U8_MAX)
+ return -ENXIO;
+
+ usleep_range(UNIWILL_EC_DELAY_US, UNIWILL_EC_DELAY_US * 2);
+
+ *val = output;
+
+ return 0;
+}
+
+static const struct regmap_bus uniwill_ec_bus = {
+ .reg_write = uniwill_ec_reg_write,
+ .reg_read = uniwill_ec_reg_read,
+ .reg_format_endian_default = REGMAP_ENDIAN_LITTLE,
+ .val_format_endian_default = REGMAP_ENDIAN_LITTLE,
+};
+
+static bool uniwill_writeable_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case EC_ADDR_AP_OEM:
+ case EC_ADDR_LIGHTBAR_AC_CTRL:
+ case EC_ADDR_LIGHTBAR_AC_RED:
+ case EC_ADDR_LIGHTBAR_AC_GREEN:
+ case EC_ADDR_LIGHTBAR_AC_BLUE:
+ case EC_ADDR_BIOS_OEM:
+ case EC_ADDR_TRIGGER:
+ case EC_ADDR_OEM_4:
+ case EC_ADDR_CHARGE_CTRL:
+ case EC_ADDR_LIGHTBAR_BAT_CTRL:
+ case EC_ADDR_LIGHTBAR_BAT_RED:
+ case EC_ADDR_LIGHTBAR_BAT_GREEN:
+ case EC_ADDR_LIGHTBAR_BAT_BLUE:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool uniwill_readable_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case EC_ADDR_CPU_TEMP:
+ case EC_ADDR_GPU_TEMP:
+ case EC_ADDR_MAIN_FAN_RPM_1:
+ case EC_ADDR_MAIN_FAN_RPM_2:
+ case EC_ADDR_SECOND_FAN_RPM_1:
+ case EC_ADDR_SECOND_FAN_RPM_2:
+ case EC_ADDR_BAT_ALERT:
+ case EC_ADDR_PROJECT_ID:
+ case EC_ADDR_AP_OEM:
+ case EC_ADDR_LIGHTBAR_AC_CTRL:
+ case EC_ADDR_LIGHTBAR_AC_RED:
+ case EC_ADDR_LIGHTBAR_AC_GREEN:
+ case EC_ADDR_LIGHTBAR_AC_BLUE:
+ case EC_ADDR_BIOS_OEM:
+ case EC_ADDR_PWM_1:
+ case EC_ADDR_PWM_2:
+ case EC_ADDR_TRIGGER:
+ case EC_ADDR_SWITCH_STATUS:
+ case EC_ADDR_OEM_4:
+ case EC_ADDR_CHARGE_CTRL:
+ case EC_ADDR_LIGHTBAR_BAT_CTRL:
+ case EC_ADDR_LIGHTBAR_BAT_RED:
+ case EC_ADDR_LIGHTBAR_BAT_GREEN:
+ case EC_ADDR_LIGHTBAR_BAT_BLUE:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool uniwill_volatile_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case EC_ADDR_CPU_TEMP:
+ case EC_ADDR_GPU_TEMP:
+ case EC_ADDR_MAIN_FAN_RPM_1:
+ case EC_ADDR_MAIN_FAN_RPM_2:
+ case EC_ADDR_SECOND_FAN_RPM_1:
+ case EC_ADDR_SECOND_FAN_RPM_2:
+ case EC_ADDR_BAT_ALERT:
+ case EC_ADDR_PWM_1:
+ case EC_ADDR_PWM_2:
+ case EC_ADDR_TRIGGER:
+ case EC_ADDR_SWITCH_STATUS:
+ case EC_ADDR_CHARGE_CTRL:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static const struct regmap_config uniwill_ec_config = {
+ .reg_bits = 16,
+ .val_bits = 8,
+ .writeable_reg = uniwill_writeable_reg,
+ .readable_reg = uniwill_readable_reg,
+ .volatile_reg = uniwill_volatile_reg,
+ .can_sleep = true,
+ .max_register = 0xFFF,
+ .cache_type = REGCACHE_MAPLE,
+ .use_single_read = true,
+ .use_single_write = true,
+};
+
+static ssize_t fn_lock_toggle_enable_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct uniwill_data *data = dev_get_drvdata(dev);
+ unsigned int value;
+ bool enable;
+ int ret;
+
+ ret = kstrtobool(buf, &enable);
+ if (ret < 0)
+ return ret;
+
+ if (enable)
+ value = FN_LOCK_STATUS;
+ else
+ value = 0;
+
+ ret = regmap_update_bits(data->regmap, EC_ADDR_BIOS_OEM, FN_LOCK_STATUS, value);
+ if (ret < 0)
+ return ret;
+
+ return count;
+}
+
+static ssize_t fn_lock_toggle_enable_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct uniwill_data *data = dev_get_drvdata(dev);
+ unsigned int value;
+ int ret;
+
+ ret = regmap_read(data->regmap, EC_ADDR_BIOS_OEM, &value);
+ if (ret < 0)
+ return ret;
+
+ return sysfs_emit(buf, "%d\n", !!(value & FN_LOCK_STATUS));
+}
+
+static DEVICE_ATTR_RW(fn_lock_toggle_enable);
+
+static ssize_t super_key_toggle_enable_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct uniwill_data *data = dev_get_drvdata(dev);
+ unsigned int value;
+ bool enable;
+ int ret;
+
+ ret = kstrtobool(buf, &enable);
+ if (ret < 0)
+ return ret;
+
+ guard(mutex)(&data->super_key_lock);
+
+ ret = regmap_read(data->regmap, EC_ADDR_SWITCH_STATUS, &value);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * We can only toggle the super key lock, so we return early if the setting
+ * is already in the correct state.
+ */
+ if (enable == !(value & SUPER_KEY_LOCK_STATUS))
+ return count;
+
+ ret = regmap_write_bits(data->regmap, EC_ADDR_TRIGGER, TRIGGER_SUPER_KEY_LOCK,
+ TRIGGER_SUPER_KEY_LOCK);
+ if (ret < 0)
+ return ret;
+
+ return count;
+}
+
+static ssize_t super_key_toggle_enable_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct uniwill_data *data = dev_get_drvdata(dev);
+ unsigned int value;
+ int ret;
+
+ ret = regmap_read(data->regmap, EC_ADDR_SWITCH_STATUS, &value);
+ if (ret < 0)
+ return ret;
+
+ return sysfs_emit(buf, "%d\n", !(value & SUPER_KEY_LOCK_STATUS));
+}
+
+static DEVICE_ATTR_RW(super_key_toggle_enable);
+
+static ssize_t touchpad_toggle_enable_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct uniwill_data *data = dev_get_drvdata(dev);
+ unsigned int value;
+ bool enable;
+ int ret;
+
+ ret = kstrtobool(buf, &enable);
+ if (ret < 0)
+ return ret;
+
+ if (enable)
+ value = 0;
+ else
+ value = TOUCHPAD_TOGGLE_OFF;
+
+ ret = regmap_update_bits(data->regmap, EC_ADDR_OEM_4, TOUCHPAD_TOGGLE_OFF, value);
+ if (ret < 0)
+ return ret;
+
+ return count;
+}
+
+static ssize_t touchpad_toggle_enable_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct uniwill_data *data = dev_get_drvdata(dev);
+ unsigned int value;
+ int ret;
+
+ ret = regmap_read(data->regmap, EC_ADDR_OEM_4, &value);
+ if (ret < 0)
+ return ret;
+
+ return sysfs_emit(buf, "%d\n", !(value & TOUCHPAD_TOGGLE_OFF));
+}
+
+static DEVICE_ATTR_RW(touchpad_toggle_enable);
+
+static ssize_t rainbow_animation_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct uniwill_data *data = dev_get_drvdata(dev);
+ unsigned int value;
+ bool enable;
+ int ret;
+
+ ret = kstrtobool(buf, &enable);
+ if (ret < 0)
+ return ret;
+
+ if (enable)
+ value = LIGHTBAR_WELCOME;
+ else
+ value = 0;
+
+ guard(mutex)(&data->led_lock);
+
+ ret = regmap_update_bits(data->regmap, EC_ADDR_LIGHTBAR_AC_CTRL, LIGHTBAR_WELCOME, value);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_update_bits(data->regmap, EC_ADDR_LIGHTBAR_BAT_CTRL, LIGHTBAR_WELCOME, value);
+ if (ret < 0)
+ return ret;
+
+ return count;
+}
+
+static ssize_t rainbow_animation_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ struct uniwill_data *data = dev_get_drvdata(dev);
+ unsigned int value;
+ int ret;
+
+ ret = regmap_read(data->regmap, EC_ADDR_LIGHTBAR_AC_CTRL, &value);
+ if (ret < 0)
+ return ret;
+
+ return sysfs_emit(buf, "%d\n", !!(value & LIGHTBAR_WELCOME));
+}
+
+static DEVICE_ATTR_RW(rainbow_animation);
+
+static ssize_t breathing_in_suspend_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct uniwill_data *data = dev_get_drvdata(dev);
+ unsigned int value;
+ bool enable;
+ int ret;
+
+ ret = kstrtobool(buf, &enable);
+ if (ret < 0)
+ return ret;
+
+ if (enable)
+ value = 0;
+ else
+ value = LIGHTBAR_S3_OFF;
+
+ /* We only access a single register here, so we do not need to use data->led_lock */
+ ret = regmap_update_bits(data->regmap, EC_ADDR_LIGHTBAR_AC_CTRL, LIGHTBAR_S3_OFF, value);
+ if (ret < 0)
+ return ret;
+
+ return count;
+}
+
+static ssize_t breathing_in_suspend_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct uniwill_data *data = dev_get_drvdata(dev);
+ unsigned int value;
+ int ret;
+
+ ret = regmap_read(data->regmap, EC_ADDR_LIGHTBAR_AC_CTRL, &value);
+ if (ret < 0)
+ return ret;
+
+ return sysfs_emit(buf, "%d\n", !(value & LIGHTBAR_S3_OFF));
+}
+
+static DEVICE_ATTR_RW(breathing_in_suspend);
+
+static struct attribute *uniwill_attrs[] = {
+ /* Keyboard-related */
+ &dev_attr_fn_lock_toggle_enable.attr,
+ &dev_attr_super_key_toggle_enable.attr,
+ &dev_attr_touchpad_toggle_enable.attr,
+ /* Lightbar-related */
+ &dev_attr_rainbow_animation.attr,
+ &dev_attr_breathing_in_suspend.attr,
+ NULL
+};
+
+static umode_t uniwill_attr_is_visible(struct kobject *kobj, struct attribute *attr, int n)
+{
+ if (attr == &dev_attr_fn_lock_toggle_enable.attr) {
+ if (supported_features & UNIWILL_FEATURE_FN_LOCK_TOGGLE)
+ return attr->mode;
+ }
+
+ if (attr == &dev_attr_super_key_toggle_enable.attr) {
+ if (supported_features & UNIWILL_FEATURE_SUPER_KEY_TOGGLE)
+ return attr->mode;
+ }
+
+ if (attr == &dev_attr_touchpad_toggle_enable.attr) {
+ if (supported_features & UNIWILL_FEATURE_TOUCHPAD_TOGGLE)
+ return attr->mode;
+ }
+
+ if (attr == &dev_attr_rainbow_animation.attr ||
+ attr == &dev_attr_breathing_in_suspend.attr) {
+ if (supported_features & UNIWILL_FEATURE_LIGHTBAR)
+ return attr->mode;
+ }
+
+ return 0;
+}
+
+static const struct attribute_group uniwill_group = {
+ .is_visible = uniwill_attr_is_visible,
+ .attrs = uniwill_attrs,
+};
+
+static const struct attribute_group *uniwill_groups[] = {
+ &uniwill_group,
+ NULL
+};
+
+static int uniwill_read(struct device *dev, enum hwmon_sensor_types type, u32 attr, int channel,
+ long *val)
+{
+ struct uniwill_data *data = dev_get_drvdata(dev);
+ unsigned int value;
+ __be16 rpm;
+ int ret;
+
+ switch (type) {
+ case hwmon_temp:
+ switch (channel) {
+ case 0:
+ ret = regmap_read(data->regmap, EC_ADDR_CPU_TEMP, &value);
+ break;
+ case 1:
+ ret = regmap_read(data->regmap, EC_ADDR_GPU_TEMP, &value);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ if (ret < 0)
+ return ret;
+
+ *val = value * MILLIDEGREE_PER_DEGREE;
+ return 0;
+ case hwmon_fan:
+ switch (channel) {
+ case 0:
+ ret = regmap_bulk_read(data->regmap, EC_ADDR_MAIN_FAN_RPM_1, &rpm,
+ sizeof(rpm));
+ break;
+ case 1:
+ ret = regmap_bulk_read(data->regmap, EC_ADDR_SECOND_FAN_RPM_1, &rpm,
+ sizeof(rpm));
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ if (ret < 0)
+ return ret;
+
+ *val = be16_to_cpu(rpm);
+ return 0;
+ case hwmon_pwm:
+ switch (channel) {
+ case 0:
+ ret = regmap_read(data->regmap, EC_ADDR_PWM_1, &value);
+ break;
+ case 1:
+ ret = regmap_read(data->regmap, EC_ADDR_PWM_2, &value);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ if (ret < 0)
+ return ret;
+
+ *val = fixp_linear_interpolate(0, 0, PWM_MAX, U8_MAX, value);
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int uniwill_read_string(struct device *dev, enum hwmon_sensor_types type, u32 attr,
+ int channel, const char **str)
+{
+ switch (type) {
+ case hwmon_temp:
+ *str = uniwill_temp_labels[channel];
+ return 0;
+ case hwmon_fan:
+ *str = uniwill_fan_labels[channel];
+ return 0;
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static const struct hwmon_ops uniwill_ops = {
+ .visible = 0444,
+ .read = uniwill_read,
+ .read_string = uniwill_read_string,
+};
+
+static const struct hwmon_channel_info * const uniwill_info[] = {
+ HWMON_CHANNEL_INFO(chip, HWMON_C_REGISTER_TZ),
+ HWMON_CHANNEL_INFO(temp,
+ HWMON_T_INPUT | HWMON_T_LABEL,
+ HWMON_T_INPUT | HWMON_T_LABEL),
+ HWMON_CHANNEL_INFO(fan,
+ HWMON_F_INPUT | HWMON_F_LABEL,
+ HWMON_F_INPUT | HWMON_F_LABEL),
+ HWMON_CHANNEL_INFO(pwm,
+ HWMON_PWM_INPUT,
+ HWMON_PWM_INPUT),
+ NULL
+};
+
+static const struct hwmon_chip_info uniwill_chip_info = {
+ .ops = &uniwill_ops,
+ .info = uniwill_info,
+};
+
+static int uniwill_hwmon_init(struct uniwill_data *data)
+{
+ struct device *hdev;
+
+ if (!(supported_features & UNIWILL_FEATURE_HWMON))
+ return 0;
+
+ hdev = devm_hwmon_device_register_with_info(data->dev, "uniwill", data,
+ &uniwill_chip_info, NULL);
+
+ return PTR_ERR_OR_ZERO(hdev);
+}
+
+static const unsigned int uniwill_led_channel_to_bat_reg[LED_CHANNELS] = {
+ EC_ADDR_LIGHTBAR_BAT_RED,
+ EC_ADDR_LIGHTBAR_BAT_GREEN,
+ EC_ADDR_LIGHTBAR_BAT_BLUE,
+};
+
+static const unsigned int uniwill_led_channel_to_ac_reg[LED_CHANNELS] = {
+ EC_ADDR_LIGHTBAR_AC_RED,
+ EC_ADDR_LIGHTBAR_AC_GREEN,
+ EC_ADDR_LIGHTBAR_AC_BLUE,
+};
+
+static int uniwill_led_brightness_set(struct led_classdev *led_cdev, enum led_brightness brightness)
+{
+ struct led_classdev_mc *led_mc_cdev = lcdev_to_mccdev(led_cdev);
+ struct uniwill_data *data = container_of(led_mc_cdev, struct uniwill_data, led_mc_cdev);
+ unsigned int value;
+ int ret;
+
+ ret = led_mc_calc_color_components(led_mc_cdev, brightness);
+ if (ret < 0)
+ return ret;
+
+ guard(mutex)(&data->led_lock);
+
+ for (int i = 0; i < LED_CHANNELS; i++) {
+ /* Prevent the brightness values from overflowing */
+ value = min(LED_MAX_BRIGHTNESS, data->led_mc_subled_info[i].brightness);
+ ret = regmap_write(data->regmap, uniwill_led_channel_to_ac_reg[i], value);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_write(data->regmap, uniwill_led_channel_to_bat_reg[i], value);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (brightness)
+ value = 0;
+ else
+ value = LIGHTBAR_S0_OFF;
+
+ ret = regmap_update_bits(data->regmap, EC_ADDR_LIGHTBAR_AC_CTRL, LIGHTBAR_S0_OFF, value);
+ if (ret < 0)
+ return ret;
+
+ return regmap_update_bits(data->regmap, EC_ADDR_LIGHTBAR_BAT_CTRL, LIGHTBAR_S0_OFF, value);
+}
+
+#define LIGHTBAR_MASK (LIGHTBAR_APP_EXISTS | LIGHTBAR_S0_OFF | LIGHTBAR_S3_OFF | LIGHTBAR_WELCOME)
+
+static int uniwill_led_init(struct uniwill_data *data)
+{
+ struct led_init_data init_data = {
+ .devicename = DRIVER_NAME,
+ .default_label = "multicolor:" LED_FUNCTION_STATUS,
+ .devname_mandatory = true,
+ };
+ unsigned int color_indices[3] = {
+ LED_COLOR_ID_RED,
+ LED_COLOR_ID_GREEN,
+ LED_COLOR_ID_BLUE,
+ };
+ unsigned int value;
+ int ret;
+
+ if (!(supported_features & UNIWILL_FEATURE_LIGHTBAR))
+ return 0;
+
+ ret = devm_mutex_init(data->dev, &data->led_lock);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * The EC has separate lightbar settings for AC and battery mode,
+ * so we have to ensure that both settings are the same.
+ */
+ ret = regmap_read(data->regmap, EC_ADDR_LIGHTBAR_AC_CTRL, &value);
+ if (ret < 0)
+ return ret;
+
+ value |= LIGHTBAR_APP_EXISTS;
+ ret = regmap_write(data->regmap, EC_ADDR_LIGHTBAR_AC_CTRL, value);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * The breathing animation during suspend is not supported when
+ * running on battery power.
+ */
+ value |= LIGHTBAR_S3_OFF;
+ ret = regmap_update_bits(data->regmap, EC_ADDR_LIGHTBAR_BAT_CTRL, LIGHTBAR_MASK, value);
+ if (ret < 0)
+ return ret;
+
+ data->led_mc_cdev.led_cdev.color = LED_COLOR_ID_MULTI;
+ data->led_mc_cdev.led_cdev.max_brightness = LED_MAX_BRIGHTNESS;
+ data->led_mc_cdev.led_cdev.flags = LED_REJECT_NAME_CONFLICT;
+ data->led_mc_cdev.led_cdev.brightness_set_blocking = uniwill_led_brightness_set;
+
+ if (value & LIGHTBAR_S0_OFF)
+ data->led_mc_cdev.led_cdev.brightness = 0;
+ else
+ data->led_mc_cdev.led_cdev.brightness = LED_MAX_BRIGHTNESS;
+
+ for (int i = 0; i < LED_CHANNELS; i++) {
+ data->led_mc_subled_info[i].color_index = color_indices[i];
+
+ ret = regmap_read(data->regmap, uniwill_led_channel_to_ac_reg[i], &value);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Make sure that the initial intensity value is not greater than
+ * the maximum brightness.
+ */
+ value = min(LED_MAX_BRIGHTNESS, value);
+ ret = regmap_write(data->regmap, uniwill_led_channel_to_ac_reg[i], value);
+ if (ret < 0)
+ return ret;
+
+ ret = regmap_write(data->regmap, uniwill_led_channel_to_bat_reg[i], value);
+ if (ret < 0)
+ return ret;
+
+ data->led_mc_subled_info[i].intensity = value;
+ data->led_mc_subled_info[i].channel = i;
+ }
+
+ data->led_mc_cdev.subled_info = data->led_mc_subled_info;
+ data->led_mc_cdev.num_colors = LED_CHANNELS;
+
+ return devm_led_classdev_multicolor_register_ext(data->dev, &data->led_mc_cdev,
+ &init_data);
+}
+
+static int uniwill_get_property(struct power_supply *psy, const struct power_supply_ext *ext,
+ void *drvdata, enum power_supply_property psp,
+ union power_supply_propval *val)
+{
+ struct uniwill_data *data = drvdata;
+ union power_supply_propval prop;
+ unsigned int regval;
+ int ret;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_HEALTH:
+ ret = power_supply_get_property_direct(psy, POWER_SUPPLY_PROP_PRESENT, &prop);
+ if (ret < 0)
+ return ret;
+
+ if (!prop.intval) {
+ val->intval = POWER_SUPPLY_HEALTH_NO_BATTERY;
+ return 0;
+ }
+
+ ret = power_supply_get_property_direct(psy, POWER_SUPPLY_PROP_STATUS, &prop);
+ if (ret < 0)
+ return ret;
+
+ if (prop.intval == POWER_SUPPLY_STATUS_UNKNOWN) {
+ val->intval = POWER_SUPPLY_HEALTH_UNKNOWN;
+ return 0;
+ }
+
+ ret = regmap_read(data->regmap, EC_ADDR_BAT_ALERT, &regval);
+ if (ret < 0)
+ return ret;
+
+ if (regval) {
+ /* Charging issue */
+ val->intval = POWER_SUPPLY_HEALTH_UNSPEC_FAILURE;
+ return 0;
+ }
+
+ val->intval = POWER_SUPPLY_HEALTH_GOOD;
+ return 0;
+ case POWER_SUPPLY_PROP_CHARGE_CONTROL_END_THRESHOLD:
+ ret = regmap_read(data->regmap, EC_ADDR_CHARGE_CTRL, &regval);
+ if (ret < 0)
+ return ret;
+
+ val->intval = clamp_val(FIELD_GET(CHARGE_CTRL_MASK, regval), 0, 100);
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int uniwill_set_property(struct power_supply *psy, const struct power_supply_ext *ext,
+ void *drvdata, enum power_supply_property psp,
+ const union power_supply_propval *val)
+{
+ struct uniwill_data *data = drvdata;
+
+ switch (psp) {
+ case POWER_SUPPLY_PROP_CHARGE_CONTROL_END_THRESHOLD:
+ if (val->intval < 1 || val->intval > 100)
+ return -EINVAL;
+
+ return regmap_update_bits(data->regmap, EC_ADDR_CHARGE_CTRL, CHARGE_CTRL_MASK,
+ val->intval);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int uniwill_property_is_writeable(struct power_supply *psy,
+ const struct power_supply_ext *ext, void *drvdata,
+ enum power_supply_property psp)
+{
+ if (psp == POWER_SUPPLY_PROP_CHARGE_CONTROL_END_THRESHOLD)
+ return true;
+
+ return false;
+}
+
+static const enum power_supply_property uniwill_properties[] = {
+ POWER_SUPPLY_PROP_HEALTH,
+ POWER_SUPPLY_PROP_CHARGE_CONTROL_END_THRESHOLD,
+};
+
+static const struct power_supply_ext uniwill_extension = {
+ .name = DRIVER_NAME,
+ .properties = uniwill_properties,
+ .num_properties = ARRAY_SIZE(uniwill_properties),
+ .get_property = uniwill_get_property,
+ .set_property = uniwill_set_property,
+ .property_is_writeable = uniwill_property_is_writeable,
+};
+
+static int uniwill_add_battery(struct power_supply *battery, struct acpi_battery_hook *hook)
+{
+ struct uniwill_data *data = container_of(hook, struct uniwill_data, hook);
+ struct uniwill_battery_entry *entry;
+ int ret;
+
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return -ENOMEM;
+
+ ret = power_supply_register_extension(battery, &uniwill_extension, data->dev, data);
+ if (ret < 0) {
+ kfree(entry);
+ return ret;
+ }
+
+ guard(mutex)(&data->battery_lock);
+
+ entry->battery = battery;
+ list_add(&entry->head, &data->batteries);
+
+ return 0;
+}
+
+static int uniwill_remove_battery(struct power_supply *battery, struct acpi_battery_hook *hook)
+{
+ struct uniwill_data *data = container_of(hook, struct uniwill_data, hook);
+ struct uniwill_battery_entry *entry, *tmp;
+
+ scoped_guard(mutex, &data->battery_lock) {
+ list_for_each_entry_safe(entry, tmp, &data->batteries, head) {
+ if (entry->battery == battery) {
+ list_del(&entry->head);
+ kfree(entry);
+ break;
+ }
+ }
+ }
+
+ power_supply_unregister_extension(battery, &uniwill_extension);
+
+ return 0;
+}
+
+static int uniwill_battery_init(struct uniwill_data *data)
+{
+ int ret;
+
+ if (!(supported_features & UNIWILL_FEATURE_BATTERY))
+ return 0;
+
+ ret = devm_mutex_init(data->dev, &data->battery_lock);
+ if (ret < 0)
+ return ret;
+
+ INIT_LIST_HEAD(&data->batteries);
+ data->hook.name = "Uniwill Battery Extension";
+ data->hook.add_battery = uniwill_add_battery;
+ data->hook.remove_battery = uniwill_remove_battery;
+
+ return devm_battery_hook_register(data->dev, &data->hook);
+}
+
+static int uniwill_notifier_call(struct notifier_block *nb, unsigned long action, void *dummy)
+{
+ struct uniwill_data *data = container_of(nb, struct uniwill_data, nb);
+ struct uniwill_battery_entry *entry;
+
+ switch (action) {
+ case UNIWILL_OSD_BATTERY_ALERT:
+ mutex_lock(&data->battery_lock);
+ list_for_each_entry(entry, &data->batteries, head) {
+ power_supply_changed(entry->battery);
+ }
+ mutex_unlock(&data->battery_lock);
+
+ return NOTIFY_OK;
+ case UNIWILL_OSD_DC_ADAPTER_CHANGED:
+ /* noop for the time being, will change once charging priority
+ * gets implemented.
+ */
+
+ return NOTIFY_OK;
+ default:
+ mutex_lock(&data->input_lock);
+ sparse_keymap_report_event(data->input_device, action, 1, true);
+ mutex_unlock(&data->input_lock);
+
+ return NOTIFY_OK;
+ }
+}
+
+static int uniwill_input_init(struct uniwill_data *data)
+{
+ int ret;
+
+ ret = devm_mutex_init(data->dev, &data->input_lock);
+ if (ret < 0)
+ return ret;
+
+ data->input_device = devm_input_allocate_device(data->dev);
+ if (!data->input_device)
+ return -ENOMEM;
+
+ ret = sparse_keymap_setup(data->input_device, uniwill_keymap, NULL);
+ if (ret < 0)
+ return ret;
+
+ data->input_device->name = "Uniwill WMI hotkeys";
+ data->input_device->phys = "wmi/input0";
+ data->input_device->id.bustype = BUS_HOST;
+ ret = input_register_device(data->input_device);
+ if (ret < 0)
+ return ret;
+
+ data->nb.notifier_call = uniwill_notifier_call;
+
+ return devm_uniwill_wmi_register_notifier(data->dev, &data->nb);
+}
+
+static void uniwill_disable_manual_control(void *context)
+{
+ struct uniwill_data *data = context;
+
+ regmap_clear_bits(data->regmap, EC_ADDR_AP_OEM, ENABLE_MANUAL_CTRL);
+}
+
+static int uniwill_ec_init(struct uniwill_data *data)
+{
+ unsigned int value;
+ int ret;
+
+ ret = regmap_read(data->regmap, EC_ADDR_PROJECT_ID, &value);
+ if (ret < 0)
+ return ret;
+
+ dev_dbg(data->dev, "Project ID: %u\n", value);
+
+ ret = regmap_set_bits(data->regmap, EC_ADDR_AP_OEM, ENABLE_MANUAL_CTRL);
+ if (ret < 0)
+ return ret;
+
+ return devm_add_action_or_reset(data->dev, uniwill_disable_manual_control, data);
+}
+
+static int uniwill_probe(struct platform_device *pdev)
+{
+ struct uniwill_data *data;
+ struct regmap *regmap;
+ acpi_handle handle;
+ int ret;
+
+ handle = ACPI_HANDLE(&pdev->dev);
+ if (!handle)
+ return -ENODEV;
+
+ data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ data->dev = &pdev->dev;
+ data->handle = handle;
+ platform_set_drvdata(pdev, data);
+
+ regmap = devm_regmap_init(&pdev->dev, &uniwill_ec_bus, data, &uniwill_ec_config);
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ data->regmap = regmap;
+ ret = devm_mutex_init(&pdev->dev, &data->super_key_lock);
+ if (ret < 0)
+ return ret;
+
+ ret = uniwill_ec_init(data);
+ if (ret < 0)
+ return ret;
+
+ ret = uniwill_battery_init(data);
+ if (ret < 0)
+ return ret;
+
+ ret = uniwill_led_init(data);
+ if (ret < 0)
+ return ret;
+
+ ret = uniwill_hwmon_init(data);
+ if (ret < 0)
+ return ret;
+
+ return uniwill_input_init(data);
+}
+
+static void uniwill_shutdown(struct platform_device *pdev)
+{
+ struct uniwill_data *data = platform_get_drvdata(pdev);
+
+ regmap_clear_bits(data->regmap, EC_ADDR_AP_OEM, ENABLE_MANUAL_CTRL);
+}
+
+static int uniwill_suspend_keyboard(struct uniwill_data *data)
+{
+ if (!(supported_features & UNIWILL_FEATURE_SUPER_KEY_TOGGLE))
+ return 0;
+
+ /*
+ * The EC_ADDR_SWITCH_STATUS is marked as volatile, so we have to restore it
+ * ourselves.
+ */
+ return regmap_read(data->regmap, EC_ADDR_SWITCH_STATUS, &data->last_switch_status);
+}
+
+static int uniwill_suspend_battery(struct uniwill_data *data)
+{
+ if (!(supported_features & UNIWILL_FEATURE_BATTERY))
+ return 0;
+
+ /*
+ * Save the current charge limit in order to restore it during resume.
+ * We cannot use the regmap code for that since this register needs to
+ * be declared as volatile due to CHARGE_CTRL_REACHED.
+ */
+ return regmap_read(data->regmap, EC_ADDR_CHARGE_CTRL, &data->last_charge_ctrl);
+}
+
+static int uniwill_suspend(struct device *dev)
+{
+ struct uniwill_data *data = dev_get_drvdata(dev);
+ int ret;
+
+ ret = uniwill_suspend_keyboard(data);
+ if (ret < 0)
+ return ret;
+
+ ret = uniwill_suspend_battery(data);
+ if (ret < 0)
+ return ret;
+
+ regcache_cache_only(data->regmap, true);
+ regcache_mark_dirty(data->regmap);
+
+ return 0;
+}
+
+static int uniwill_resume_keyboard(struct uniwill_data *data)
+{
+ unsigned int value;
+ int ret;
+
+ if (!(supported_features & UNIWILL_FEATURE_SUPER_KEY_TOGGLE))
+ return 0;
+
+ ret = regmap_read(data->regmap, EC_ADDR_SWITCH_STATUS, &value);
+ if (ret < 0)
+ return ret;
+
+ if ((data->last_switch_status & SUPER_KEY_LOCK_STATUS) == (value & SUPER_KEY_LOCK_STATUS))
+ return 0;
+
+ return regmap_write_bits(data->regmap, EC_ADDR_TRIGGER, TRIGGER_SUPER_KEY_LOCK,
+ TRIGGER_SUPER_KEY_LOCK);
+}
+
+static int uniwill_resume_battery(struct uniwill_data *data)
+{
+ if (!(supported_features & UNIWILL_FEATURE_BATTERY))
+ return 0;
+
+ return regmap_update_bits(data->regmap, EC_ADDR_CHARGE_CTRL, CHARGE_CTRL_MASK,
+ data->last_charge_ctrl);
+}
+
+static int uniwill_resume(struct device *dev)
+{
+ struct uniwill_data *data = dev_get_drvdata(dev);
+ int ret;
+
+ regcache_cache_only(data->regmap, false);
+
+ ret = regcache_sync(data->regmap);
+ if (ret < 0)
+ return ret;
+
+ ret = uniwill_resume_keyboard(data);
+ if (ret < 0)
+ return ret;
+
+ return uniwill_resume_battery(data);
+}
+
+static DEFINE_SIMPLE_DEV_PM_OPS(uniwill_pm_ops, uniwill_suspend, uniwill_resume);
+
+/*
+ * We only use the DMI table for auoloading because the ACPI device itself
+ * does not guarantee that the underlying EC implementation is supported.
+ */
+static const struct acpi_device_id uniwill_id_table[] = {
+ { "INOU0000" },
+ { },
+};
+
+static struct platform_driver uniwill_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .dev_groups = uniwill_groups,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ .acpi_match_table = uniwill_id_table,
+ .pm = pm_sleep_ptr(&uniwill_pm_ops),
+ },
+ .probe = uniwill_probe,
+ .shutdown = uniwill_shutdown,
+};
+
+static const struct dmi_system_id uniwill_dmi_table[] __initconst = {
+ {
+ .ident = "XMG FUSION 15",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "SchenkerTechnologiesGmbH"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "LAPQC71A"),
+ },
+ },
+ {
+ .ident = "XMG FUSION 15",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "SchenkerTechnologiesGmbH"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "LAPQC71B"),
+ },
+ },
+ {
+ .ident = "Intel NUC x15",
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Intel(R) Client Systems"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "LAPAC71H"),
+ },
+ .driver_data = (void *)(UNIWILL_FEATURE_FN_LOCK_TOGGLE |
+ UNIWILL_FEATURE_SUPER_KEY_TOGGLE |
+ UNIWILL_FEATURE_TOUCHPAD_TOGGLE |
+ UNIWILL_FEATURE_BATTERY |
+ UNIWILL_FEATURE_HWMON),
+ },
+ {
+ .ident = "Intel NUC x15",
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Intel(R) Client Systems"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "LAPKC71F"),
+ },
+ .driver_data = (void *)(UNIWILL_FEATURE_FN_LOCK_TOGGLE |
+ UNIWILL_FEATURE_SUPER_KEY_TOGGLE |
+ UNIWILL_FEATURE_TOUCHPAD_TOGGLE |
+ UNIWILL_FEATURE_LIGHTBAR |
+ UNIWILL_FEATURE_BATTERY |
+ UNIWILL_FEATURE_HWMON),
+ },
+ {
+ .ident = "TUXEDO InfinityBook Pro 14 Gen6 Intel",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "PHxTxX1"),
+ },
+ },
+ {
+ .ident = "TUXEDO InfinityBook Pro 14 Gen6 Intel",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "PHxTQx1"),
+ },
+ },
+ {
+ .ident = "TUXEDO InfinityBook Pro 14/16 Gen7 Intel",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "PHxARX1_PHxAQF1"),
+ },
+ },
+ {
+ .ident = "TUXEDO InfinityBook Pro 16 Gen7 Intel/Commodore Omnia-Book Pro Gen 7",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "PH6AG01_PH6AQ71_PH6AQI1"),
+ },
+ },
+ {
+ .ident = "TUXEDO InfinityBook Pro 14/16 Gen8 Intel/Commodore Omnia-Book Pro Gen 8",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "PH4PRX1_PH6PRX1"),
+ },
+ },
+ {
+ .ident = "TUXEDO InfinityBook Pro 14 Gen8 Intel/Commodore Omnia-Book Pro Gen 8",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "PH4PG31"),
+ },
+ },
+ {
+ .ident = "TUXEDO InfinityBook Pro 16 Gen8 Intel",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "PH6PG01_PH6PG71"),
+ },
+ },
+ {
+ .ident = "TUXEDO InfinityBook Pro 14/15 Gen9 AMD",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "GXxHRXx"),
+ },
+ },
+ {
+ .ident = "TUXEDO InfinityBook Pro 14/15 Gen9 Intel/Commodore Omnia-Book 15 Gen9",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "GXxMRXx"),
+ },
+ },
+ {
+ .ident = "TUXEDO InfinityBook Pro 14/15 Gen10 AMD",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "XxHP4NAx"),
+ },
+ },
+ {
+ .ident = "TUXEDO InfinityBook Pro 14/15 Gen10 AMD",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "XxKK4NAx_XxSP4NAx"),
+ },
+ },
+ {
+ .ident = "TUXEDO InfinityBook Pro 15 Gen10 Intel",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "XxAR4NAx"),
+ },
+ },
+ {
+ .ident = "TUXEDO InfinityBook Max 15 Gen10 AMD",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "X5KK45xS_X5SP45xS"),
+ },
+ },
+ {
+ .ident = "TUXEDO InfinityBook Max 16 Gen10 AMD",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "X6HP45xU"),
+ },
+ },
+ {
+ .ident = "TUXEDO InfinityBook Max 16 Gen10 AMD",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "X6KK45xU_X6SP45xU"),
+ },
+ },
+ {
+ .ident = "TUXEDO InfinityBook Max 15 Gen10 Intel",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "X5AR45xS"),
+ },
+ },
+ {
+ .ident = "TUXEDO InfinityBook Max 16 Gen10 Intel",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "X6AR55xU"),
+ },
+ },
+ {
+ .ident = "TUXEDO Polaris 15 Gen1 AMD",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "POLARIS1501A1650TI"),
+ },
+ },
+ {
+ .ident = "TUXEDO Polaris 15 Gen1 AMD",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "POLARIS1501A2060"),
+ },
+ },
+ {
+ .ident = "TUXEDO Polaris 17 Gen1 AMD",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "POLARIS1701A1650TI"),
+ },
+ },
+ {
+ .ident = "TUXEDO Polaris 17 Gen1 AMD",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "POLARIS1701A2060"),
+ },
+ },
+ {
+ .ident = "TUXEDO Polaris 15 Gen1 Intel",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "POLARIS1501I1650TI"),
+ },
+ },
+ {
+ .ident = "TUXEDO Polaris 15 Gen1 Intel",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "POLARIS1501I2060"),
+ },
+ },
+ {
+ .ident = "TUXEDO Polaris 17 Gen1 Intel",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "POLARIS1701I1650TI"),
+ },
+ },
+ {
+ .ident = "TUXEDO Polaris 17 Gen1 Intel",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "POLARIS1701I2060"),
+ },
+ },
+ {
+ .ident = "TUXEDO Trinity 15 Intel Gen1",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "TRINITY1501I"),
+ },
+ },
+ {
+ .ident = "TUXEDO Trinity 17 Intel Gen1",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "TRINITY1701I"),
+ },
+ },
+ {
+ .ident = "TUXEDO Polaris 15/17 Gen2 AMD",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "GMxMGxx"),
+ },
+ },
+ {
+ .ident = "TUXEDO Polaris 15/17 Gen2 Intel",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "GMxNGxx"),
+ },
+ },
+ {
+ .ident = "TUXEDO Stellaris/Polaris 15/17 Gen3 AMD",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "GMxZGxx"),
+ },
+ },
+ {
+ .ident = "TUXEDO Stellaris/Polaris 15/17 Gen3 Intel",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "GMxTGxx"),
+ },
+ },
+ {
+ .ident = "TUXEDO Stellaris/Polaris 15/17 Gen4 AMD",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "GMxRGxx"),
+ },
+ },
+ {
+ .ident = "TUXEDO Stellaris 15 Gen4 Intel",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "GMxAGxx"),
+ },
+ },
+ {
+ .ident = "TUXEDO Polaris 15/17 Gen5 AMD",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "GMxXGxx"),
+ },
+ },
+ {
+ .ident = "TUXEDO Stellaris 16 Gen5 AMD",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "GM6XGxX"),
+ },
+ },
+ {
+ .ident = "TUXEDO Stellaris 16/17 Gen5 Intel/Commodore ORION Gen 5",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "GMxPXxx"),
+ },
+ },
+ {
+ .ident = "TUXEDO Stellaris Slim 15 Gen6 AMD",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "GMxHGxx"),
+ },
+ },
+ {
+ .ident = "TUXEDO Stellaris Slim 15 Gen6 Intel/Commodore ORION Slim 15 Gen6",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "GM5IXxA"),
+ },
+ },
+ {
+ .ident = "TUXEDO Stellaris 16 Gen6 Intel/Commodore ORION 16 Gen6",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "GM6IXxB_MB1"),
+ },
+ },
+ {
+ .ident = "TUXEDO Stellaris 16 Gen6 Intel/Commodore ORION 16 Gen6",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "GM6IXxB_MB2"),
+ },
+ },
+ {
+ .ident = "TUXEDO Stellaris 17 Gen6 Intel/Commodore ORION 17 Gen6",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "GM7IXxN"),
+ },
+ },
+ {
+ .ident = "TUXEDO Stellaris 16 Gen7 AMD",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "X6FR5xxY"),
+ },
+ },
+ {
+ .ident = "TUXEDO Stellaris 16 Gen7 Intel",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "X6AR5xxY"),
+ },
+ },
+ {
+ .ident = "TUXEDO Stellaris 16 Gen7 Intel",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "X6AR5xxY_mLED"),
+ },
+ },
+ {
+ .ident = "TUXEDO Pulse 14 Gen1 AMD",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "PULSE1401"),
+ },
+ },
+ {
+ .ident = "TUXEDO Pulse 15 Gen1 AMD",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "PULSE1501"),
+ },
+ },
+ {
+ .ident = "TUXEDO Pulse 15 Gen2 AMD",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "TUXEDO"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "PF5LUXG"),
+ },
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(dmi, uniwill_dmi_table);
+
+static int __init uniwill_init(void)
+{
+ const struct dmi_system_id *id;
+ int ret;
+
+ id = dmi_first_match(uniwill_dmi_table);
+ if (!id) {
+ if (!force)
+ return -ENODEV;
+
+ /* Assume that the device supports all features */
+ supported_features = UINT_MAX;
+ pr_warn("Loading on a potentially unsupported device\n");
+ } else {
+ supported_features = (uintptr_t)id->driver_data;
+ }
+
+ ret = platform_driver_register(&uniwill_driver);
+ if (ret < 0)
+ return ret;
+
+ ret = uniwill_wmi_register_driver();
+ if (ret < 0) {
+ platform_driver_unregister(&uniwill_driver);
+ return ret;
+ }
+
+ return 0;
+}
+module_init(uniwill_init);
+
+static void __exit uniwill_exit(void)
+{
+ uniwill_wmi_unregister_driver();
+ platform_driver_unregister(&uniwill_driver);
+}
+module_exit(uniwill_exit);
+
+MODULE_AUTHOR("Armin Wolf <W_Armin@gmx.de>");
+MODULE_DESCRIPTION("Uniwill notebook driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/uniwill/uniwill-wmi.c b/drivers/platform/x86/uniwill/uniwill-wmi.c
new file mode 100644
index 000000000000..31d9c39f14ab
--- /dev/null
+++ b/drivers/platform/x86/uniwill/uniwill-wmi.c
@@ -0,0 +1,92 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Linux hotkey driver for Uniwill notebooks.
+ *
+ * Special thanks go to Pőcze Barnabás, Christoffer Sandberg and Werner Sembach
+ * for supporting the development of this driver either through prior work or
+ * by answering questions regarding the underlying WMI interface.
+ *
+ * Copyright (C) 2025 Armin Wolf <W_Armin@gmx.de>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/acpi.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/mod_devicetable.h>
+#include <linux/notifier.h>
+#include <linux/printk.h>
+#include <linux/types.h>
+#include <linux/wmi.h>
+
+#include "uniwill-wmi.h"
+
+#define DRIVER_NAME "uniwill-wmi"
+#define UNIWILL_EVENT_GUID "ABBC0F72-8EA1-11D1-00A0-C90629100000"
+
+static BLOCKING_NOTIFIER_HEAD(uniwill_wmi_chain_head);
+
+static void devm_uniwill_wmi_unregister_notifier(void *data)
+{
+ struct notifier_block *nb = data;
+
+ blocking_notifier_chain_unregister(&uniwill_wmi_chain_head, nb);
+}
+
+int devm_uniwill_wmi_register_notifier(struct device *dev, struct notifier_block *nb)
+{
+ int ret;
+
+ ret = blocking_notifier_chain_register(&uniwill_wmi_chain_head, nb);
+ if (ret < 0)
+ return ret;
+
+ return devm_add_action_or_reset(dev, devm_uniwill_wmi_unregister_notifier, nb);
+}
+
+static void uniwill_wmi_notify(struct wmi_device *wdev, union acpi_object *obj)
+{
+ u32 value;
+
+ if (obj->type != ACPI_TYPE_INTEGER)
+ return;
+
+ value = obj->integer.value;
+
+ dev_dbg(&wdev->dev, "Received WMI event %u\n", value);
+
+ blocking_notifier_call_chain(&uniwill_wmi_chain_head, value, NULL);
+}
+
+/*
+ * We cannot fully trust this GUID since Uniwill just copied the WMI GUID
+ * from the Windows driver example, and others probably did the same.
+ *
+ * Because of this we cannot use this WMI GUID for autoloading. Instead the
+ * associated driver will be registered manually after matching a DMI table.
+ */
+static const struct wmi_device_id uniwill_wmi_id_table[] = {
+ { UNIWILL_EVENT_GUID, NULL },
+ { }
+};
+
+static struct wmi_driver uniwill_wmi_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ },
+ .id_table = uniwill_wmi_id_table,
+ .notify = uniwill_wmi_notify,
+ .no_singleton = true,
+};
+
+int __init uniwill_wmi_register_driver(void)
+{
+ return wmi_driver_register(&uniwill_wmi_driver);
+}
+
+void __exit uniwill_wmi_unregister_driver(void)
+{
+ wmi_driver_unregister(&uniwill_wmi_driver);
+}
diff --git a/drivers/platform/x86/uniwill/uniwill-wmi.h b/drivers/platform/x86/uniwill/uniwill-wmi.h
new file mode 100644
index 000000000000..48783b2e9ffb
--- /dev/null
+++ b/drivers/platform/x86/uniwill/uniwill-wmi.h
@@ -0,0 +1,129 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Linux hotkey driver for Uniwill notebooks.
+ *
+ * Copyright (C) 2025 Armin Wolf <W_Armin@gmx.de>
+ */
+
+#ifndef UNIWILL_WMI_H
+#define UNIWILL_WMI_H
+
+#include <linux/init.h>
+
+#define UNIWILL_OSD_CAPSLOCK 0x01
+#define UNIWILL_OSD_NUMLOCK 0x02
+#define UNIWILL_OSD_SCROLLLOCK 0x03
+
+#define UNIWILL_OSD_TOUCHPAD_ON 0x04
+#define UNIWILL_OSD_TOUCHPAD_OFF 0x05
+
+#define UNIWILL_OSD_SILENT_MODE_ON 0x06
+#define UNIWILL_OSD_SILENT_MODE_OFF 0x07
+
+#define UNIWILL_OSD_WLAN_ON 0x08
+#define UNIWILL_OSD_WLAN_OFF 0x09
+
+#define UNIWILL_OSD_WIMAX_ON 0x0A
+#define UNIWILL_OSD_WIMAX_OFF 0x0B
+
+#define UNIWILL_OSD_BLUETOOTH_ON 0x0C
+#define UNIWILL_OSD_BLUETOOTH_OFF 0x0D
+
+#define UNIWILL_OSD_RF_ON 0x0E
+#define UNIWILL_OSD_RF_OFF 0x0F
+
+#define UNIWILL_OSD_3G_ON 0x10
+#define UNIWILL_OSD_3G_OFF 0x11
+
+#define UNIWILL_OSD_WEBCAM_ON 0x12
+#define UNIWILL_OSD_WEBCAM_OFF 0x13
+
+#define UNIWILL_OSD_BRIGHTNESSUP 0x14
+#define UNIWILL_OSD_BRIGHTNESSDOWN 0x15
+
+#define UNIWILL_OSD_RADIOON 0x1A
+#define UNIWILL_OSD_RADIOOFF 0x1B
+
+#define UNIWILL_OSD_POWERSAVE_ON 0x31
+#define UNIWILL_OSD_POWERSAVE_OFF 0x32
+
+#define UNIWILL_OSD_MENU 0x34
+
+#define UNIWILL_OSD_MUTE 0x35
+#define UNIWILL_OSD_VOLUMEDOWN 0x36
+#define UNIWILL_OSD_VOLUMEUP 0x37
+
+#define UNIWILL_OSD_MENU_2 0x38
+
+#define UNIWILL_OSD_LIGHTBAR_ON 0x39
+#define UNIWILL_OSD_LIGHTBAR_OFF 0x3A
+
+#define UNIWILL_OSD_KB_LED_LEVEL0 0x3B
+#define UNIWILL_OSD_KB_LED_LEVEL1 0x3C
+#define UNIWILL_OSD_KB_LED_LEVEL2 0x3D
+#define UNIWILL_OSD_KB_LED_LEVEL3 0x3E
+#define UNIWILL_OSD_KB_LED_LEVEL4 0x3F
+
+#define UNIWILL_OSD_SUPER_KEY_LOCK_ENABLE 0x40
+#define UNIWILL_OSD_SUPER_KEY_LOCK_DISABLE 0x41
+
+#define UNIWILL_OSD_MENU_JP 0x42
+
+#define UNIWILL_OSD_CAMERA_ON 0x90
+#define UNIWILL_OSD_CAMERA_OFF 0x91
+
+#define UNIWILL_OSD_RFKILL 0xA4
+
+#define UNIWILL_OSD_SUPER_KEY_LOCK_CHANGED 0xA5
+
+#define UNIWILL_OSD_LIGHTBAR_STATE_CHANGED 0xA6
+
+#define UNIWILL_OSD_FAN_BOOST_STATE_CHANGED 0xA7
+
+#define UNIWILL_OSD_LCD_SW 0xA9
+
+#define UNIWILL_OSD_FAN_OVERTEMP 0xAA
+
+#define UNIWILL_OSD_DC_ADAPTER_CHANGED 0xAB
+
+#define UNIWILL_OSD_BAT_HP_OFF 0xAC
+
+#define UNIWILL_OSD_FAN_DOWN_TEMP 0xAD
+
+#define UNIWILL_OSD_BATTERY_ALERT 0xAE
+
+#define UNIWILL_OSD_TIMAP_HAIERLB_SW 0xAF
+
+#define UNIWILL_OSD_PERFORMANCE_MODE_TOGGLE 0xB0
+
+#define UNIWILL_OSD_KBDILLUMDOWN 0xB1
+#define UNIWILL_OSD_KBDILLUMUP 0xB2
+
+#define UNIWILL_OSD_BACKLIGHT_LEVEL_CHANGE 0xB3
+#define UNIWILL_OSD_BACKLIGHT_POWER_CHANGE 0xB4
+
+#define UNIWILL_OSD_MIC_MUTE 0xB7
+
+#define UNIWILL_OSD_FN_LOCK 0xB8
+#define UNIWILL_OSD_KBDILLUMTOGGLE 0xB9
+
+#define UNIWILL_OSD_BAT_CHARGE_FULL_24_H 0xBE
+
+#define UNIWILL_OSD_BAT_ERM_UPDATE 0xBF
+
+#define UNIWILL_OSD_BENCHMARK_MODE_TOGGLE 0xC0
+
+#define UNIWILL_OSD_WEBCAM_TOGGLE 0xCF
+
+#define UNIWILL_OSD_KBD_BACKLIGHT_CHANGED 0xF0
+
+struct device;
+struct notifier_block;
+
+int devm_uniwill_wmi_register_notifier(struct device *dev, struct notifier_block *nb);
+
+int __init uniwill_wmi_register_driver(void);
+
+void __exit uniwill_wmi_unregister_driver(void);
+
+#endif /* UNIWILL_WMI_H */
diff --git a/drivers/platform/x86/x86-android-tablets/lenovo.c b/drivers/platform/x86/x86-android-tablets/lenovo.c
index e3d3a8290949..8d825e0b4661 100644
--- a/drivers/platform/x86/x86-android-tablets/lenovo.c
+++ b/drivers/platform/x86/x86-android-tablets/lenovo.c
@@ -543,7 +543,7 @@ static int __init lenovo_yoga_tab2_830_1050_init_codec(void)
ret = device_add_software_node(codec_dev, &lenovo_yoga_tab2_830_1050_wm5102);
if (ret) {
- ret = dev_err_probe(codec_dev, ret, "adding software node\n");
+ dev_err_probe(codec_dev, ret, "adding software node\n");
goto err_put_pinctrl;
}
diff --git a/drivers/platform/x86/x86-android-tablets/vexia_atla10_ec.c b/drivers/platform/x86/x86-android-tablets/vexia_atla10_ec.c
index 2f8cd8d9e0ab..ebbedfe5f4e8 100644
--- a/drivers/platform/x86/x86-android-tablets/vexia_atla10_ec.c
+++ b/drivers/platform/x86/x86-android-tablets/vexia_atla10_ec.c
@@ -183,7 +183,7 @@ static void atla10_ec_external_power_changed(struct power_supply *psy)
struct atla10_ec_data *data = power_supply_get_drvdata(psy);
/* After charger plug in/out wait 0.5s for things to stabilize */
- mod_delayed_work(system_wq, &data->work, HZ / 2);
+ mod_delayed_work(system_percpu_wq, &data->work, HZ / 2);
}
static const enum power_supply_property atla10_ec_psy_props[] = {
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index f4987f54e01b..4b6182cde859 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -2823,14 +2823,18 @@ static void regulator_ena_gpio_free(struct regulator_dev *rdev)
static int regulator_ena_gpio_ctrl(struct regulator_dev *rdev, bool enable)
{
struct regulator_enable_gpio *pin = rdev->ena_pin;
+ int ret;
if (!pin)
return -EINVAL;
if (enable) {
/* Enable GPIO at initial use */
- if (pin->enable_count == 0)
- gpiod_set_value_cansleep(pin->gpiod, 1);
+ if (pin->enable_count == 0) {
+ ret = gpiod_set_value_cansleep(pin->gpiod, 1);
+ if (ret)
+ return ret;
+ }
pin->enable_count++;
} else {
@@ -2841,7 +2845,10 @@ static int regulator_ena_gpio_ctrl(struct regulator_dev *rdev, bool enable)
/* Disable GPIO if not used */
if (pin->enable_count <= 1) {
- gpiod_set_value_cansleep(pin->gpiod, 0);
+ ret = gpiod_set_value_cansleep(pin->gpiod, 0);
+ if (ret)
+ return ret;
+
pin->enable_count = 0;
}
}
diff --git a/drivers/regulator/fixed.c b/drivers/regulator/fixed.c
index a2d16e9abfb5..254c0a8a4555 100644
--- a/drivers/regulator/fixed.c
+++ b/drivers/regulator/fixed.c
@@ -330,13 +330,10 @@ static int reg_fixed_voltage_probe(struct platform_device *pdev)
drvdata->dev = devm_regulator_register(&pdev->dev, &drvdata->desc,
&cfg);
- if (IS_ERR(drvdata->dev)) {
- ret = dev_err_probe(&pdev->dev, PTR_ERR(drvdata->dev),
- "Failed to register regulator: %ld\n",
- PTR_ERR(drvdata->dev));
- gpiod_put(cfg.ena_gpiod);
- return ret;
- }
+ if (IS_ERR(drvdata->dev))
+ return dev_err_probe(&pdev->dev, PTR_ERR(drvdata->dev),
+ "Failed to register regulator: %ld\n",
+ PTR_ERR(drvdata->dev));
platform_set_drvdata(pdev, drvdata);
diff --git a/drivers/regulator/spacemit-p1.c b/drivers/regulator/spacemit-p1.c
index d437e6738ea1..2bf9137e12b1 100644
--- a/drivers/regulator/spacemit-p1.c
+++ b/drivers/regulator/spacemit-p1.c
@@ -87,10 +87,10 @@ static const struct linear_range p1_ldo_ranges[] = {
}
#define P1_BUCK_DESC(_n) \
- P1_REG_DESC(BUCK, buck, _n, "vcc", 0x47, BUCK_MASK, 254, p1_buck_ranges)
+ P1_REG_DESC(BUCK, buck, _n, "vin", 0x47, BUCK_MASK, 254, p1_buck_ranges)
#define P1_ALDO_DESC(_n) \
- P1_REG_DESC(ALDO, aldo, _n, "vcc", 0x5b, LDO_MASK, 117, p1_ldo_ranges)
+ P1_REG_DESC(ALDO, aldo, _n, "vin", 0x5b, LDO_MASK, 117, p1_ldo_ranges)
#define P1_DLDO_DESC(_n) \
P1_REG_DESC(DLDO, dldo, _n, "buck5", 0x67, LDO_MASK, 117, p1_ldo_ranges)
diff --git a/drivers/s390/char/sclp_mem.c b/drivers/s390/char/sclp_mem.c
index 676c085b4f8a..27f0d2f12a8b 100644
--- a/drivers/s390/char/sclp_mem.c
+++ b/drivers/s390/char/sclp_mem.c
@@ -44,6 +44,9 @@ struct sclp_mem {
unsigned int id;
unsigned int memmap_on_memory;
unsigned int config;
+#ifdef CONFIG_KASAN
+ unsigned int early_shadow_mapped;
+#endif
};
struct sclp_mem_arg {
@@ -244,6 +247,16 @@ static ssize_t sclp_config_mem_store(struct kobject *kobj, struct kobj_attribute
put_device(&mem->dev);
sclp_mem_change_state(addr, block_size, 0);
__remove_memory(addr, block_size);
+#ifdef CONFIG_KASAN
+ if (sclp_mem->early_shadow_mapped) {
+ unsigned long start, end;
+
+ start = (unsigned long)kasan_mem_to_shadow(__va(addr));
+ end = start + (block_size >> KASAN_SHADOW_SCALE_SHIFT);
+ vmemmap_free(start, end, NULL);
+ sclp_mem->early_shadow_mapped = 0;
+ }
+#endif
WRITE_ONCE(sclp_mem->config, 0);
}
out_unlock:
@@ -316,6 +329,9 @@ static int sclp_create_mem(struct sclp_mem *sclp_mem, struct kset *kset,
sclp_mem->memmap_on_memory = memmap_on_memory;
sclp_mem->config = config;
+#ifdef CONFIG_KASAN
+ sclp_mem->early_shadow_mapped = config;
+#endif
sclp_mem->id = id;
kobject_init(&sclp_mem->kobj, &ktype);
rc = kobject_add(&sclp_mem->kobj, &kset->kobj, "memory%d", id);
diff --git a/drivers/s390/char/vmur.c b/drivers/s390/char/vmur.c
index e3e0e9f36527..a226ff208eda 100644
--- a/drivers/s390/char/vmur.c
+++ b/drivers/s390/char/vmur.c
@@ -154,7 +154,7 @@ static struct urdev *urdev_get_from_devno(u16 devno)
struct ccw_device *cdev;
struct urdev *urd;
- sprintf(bus_id, "0.0.%04x", devno);
+ scnprintf(bus_id, sizeof(bus_id), "0.0.%04x", devno);
cdev = get_ccwdev_by_busid(&ur_driver, bus_id);
if (!cdev)
return NULL;
@@ -904,11 +904,11 @@ static int ur_set_online(struct ccw_device *cdev)
goto fail_free_cdev;
if (urd->cdev->id.cu_type == READER_PUNCH_DEVTYPE) {
if (urd->class == DEV_CLASS_UR_I)
- sprintf(node_id, "vmrdr-%s", dev_name(&cdev->dev));
+ scnprintf(node_id, sizeof(node_id), "vmrdr-%s", dev_name(&cdev->dev));
if (urd->class == DEV_CLASS_UR_O)
- sprintf(node_id, "vmpun-%s", dev_name(&cdev->dev));
+ scnprintf(node_id, sizeof(node_id), "vmpun-%s", dev_name(&cdev->dev));
} else if (urd->cdev->id.cu_type == PRINTER_DEVTYPE) {
- sprintf(node_id, "vmprt-%s", dev_name(&cdev->dev));
+ scnprintf(node_id, sizeof(node_id), "vmprt-%s", dev_name(&cdev->dev));
} else {
rc = -EOPNOTSUPP;
goto fail_free_cdev;
diff --git a/drivers/spi/spi-microchip-core-spi.c b/drivers/spi/spi-microchip-core-spi.c
index 98bf0e6cd00e..89e40fc45d73 100644
--- a/drivers/spi/spi-microchip-core-spi.c
+++ b/drivers/spi/spi-microchip-core-spi.c
@@ -387,6 +387,7 @@ static int mchp_corespi_probe(struct platform_device *pdev)
ret = devm_spi_register_controller(dev, host);
if (ret) {
+ mchp_corespi_disable_ints(spi);
mchp_corespi_disable(spi);
return dev_err_probe(dev, ret, "unable to register host for CoreSPI controller\n");
}