summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/ufs/core/Makefile2
-rw-r--r--drivers/ufs/core/ufs-debugfs.c290
-rw-r--r--drivers/ufs/core/ufs-txeq.c1293
-rw-r--r--drivers/ufs/core/ufshcd-priv.h59
-rw-r--r--drivers/ufs/core/ufshcd.c192
-rw-r--r--drivers/ufs/host/ufs-amd-versal2.c3
-rw-r--r--drivers/ufs/host/ufs-exynos.c34
-rw-r--r--drivers/ufs/host/ufs-hisi.c23
-rw-r--r--drivers/ufs/host/ufs-mediatek.c40
-rw-r--r--drivers/ufs/host/ufs-qcom.c591
-rw-r--r--drivers/ufs/host/ufs-qcom.h42
-rw-r--r--drivers/ufs/host/ufs-sprd.c3
-rw-r--r--drivers/ufs/host/ufshcd-pci.c7
13 files changed, 2476 insertions, 103 deletions
diff --git a/drivers/ufs/core/Makefile b/drivers/ufs/core/Makefile
index 51e1867e524e..ce7d16d2cf35 100644
--- a/drivers/ufs/core/Makefile
+++ b/drivers/ufs/core/Makefile
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_SCSI_UFSHCD) += ufshcd-core.o
-ufshcd-core-y += ufshcd.o ufs-sysfs.o ufs-mcq.o
+ufshcd-core-y += ufshcd.o ufs-sysfs.o ufs-mcq.o ufs-txeq.o
ufshcd-core-$(CONFIG_RPMB) += ufs-rpmb.o
ufshcd-core-$(CONFIG_DEBUG_FS) += ufs-debugfs.o
ufshcd-core-$(CONFIG_SCSI_UFS_BSG) += ufs_bsg.o
diff --git a/drivers/ufs/core/ufs-debugfs.c b/drivers/ufs/core/ufs-debugfs.c
index e3baed6c70bd..e3dd81d6fe82 100644
--- a/drivers/ufs/core/ufs-debugfs.c
+++ b/drivers/ufs/core/ufs-debugfs.c
@@ -209,6 +209,265 @@ static const struct ufs_debugfs_attr ufs_attrs[] = {
{ }
};
+static int ufs_tx_eq_params_show(struct seq_file *s, void *data)
+{
+ const char *file_name = s->file->f_path.dentry->d_name.name;
+ u32 gear = (u32)(uintptr_t)s->file->f_inode->i_private;
+ struct ufs_hba *hba = hba_from_file(s->file);
+ struct ufshcd_tx_eq_settings *settings;
+ struct ufs_pa_layer_attr *pwr_info;
+ struct ufshcd_tx_eq_params *params;
+ u32 rate = hba->pwr_info.hs_rate;
+ u32 num_lanes;
+ int lane;
+
+ if (!ufshcd_is_tx_eq_supported(hba))
+ return -EOPNOTSUPP;
+
+ if (gear < UFS_HS_G1 || gear > UFS_HS_GEAR_MAX) {
+ seq_printf(s, "Invalid gear selected: %u\n", gear);
+ return 0;
+ }
+
+ if (!hba->max_pwr_info.is_valid) {
+ seq_puts(s, "Max power info is invalid\n");
+ return 0;
+ }
+
+ pwr_info = &hba->max_pwr_info.info;
+ params = &hba->tx_eq_params[gear - 1];
+ if (!params->is_valid) {
+ seq_printf(s, "TX EQ params are invalid for HS-G%u, Rate-%s\n",
+ gear, ufs_hs_rate_to_str(rate));
+ return 0;
+ }
+
+ if (strcmp(file_name, "host_tx_eq_params") == 0) {
+ settings = params->host;
+ num_lanes = pwr_info->lane_tx;
+ seq_printf(s, "Host TX EQ PreShoot Cap: 0x%02x, DeEmphasis Cap: 0x%02x\n",
+ hba->host_preshoot_cap, hba->host_deemphasis_cap);
+ } else if (strcmp(file_name, "device_tx_eq_params") == 0) {
+ settings = params->device;
+ num_lanes = pwr_info->lane_rx;
+ seq_printf(s, "Device TX EQ PreShoot Cap: 0x%02x, DeEmphasis Cap: 0x%02x\n",
+ hba->device_preshoot_cap, hba->device_deemphasis_cap);
+ } else {
+ return -ENOENT;
+ }
+
+ seq_printf(s, "TX EQ setting for HS-G%u, Rate-%s:\n", gear,
+ ufs_hs_rate_to_str(rate));
+ for (lane = 0; lane < num_lanes; lane++)
+ seq_printf(s, "TX Lane %d - PreShoot: %d, DeEmphasis: %d, Pre-Coding %senabled\n",
+ lane, settings[lane].preshoot,
+ settings[lane].deemphasis,
+ settings[lane].precode_en ? "" : "not ");
+
+ return 0;
+}
+
+static int ufs_tx_eq_params_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ufs_tx_eq_params_show, inode->i_private);
+}
+
+static const struct file_operations ufs_tx_eq_params_fops = {
+ .owner = THIS_MODULE,
+ .open = ufs_tx_eq_params_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static const struct ufs_debugfs_attr ufs_tx_eq_attrs[] = {
+ { "host_tx_eq_params", 0400, &ufs_tx_eq_params_fops },
+ { "device_tx_eq_params", 0400, &ufs_tx_eq_params_fops },
+ { }
+};
+
+static int ufs_tx_eqtr_record_show(struct seq_file *s, void *data)
+{
+ const char *file_name = s->file->f_path.dentry->d_name.name;
+ u8 (*fom_array)[TX_HS_NUM_PRESHOOT][TX_HS_NUM_DEEMPHASIS];
+ u32 gear = (u32)(uintptr_t)s->file->f_inode->i_private;
+ unsigned long preshoot_bitmap, deemphasis_bitmap;
+ struct ufs_hba *hba = hba_from_file(s->file);
+ struct ufs_pa_layer_attr *pwr_info;
+ struct ufshcd_tx_eq_params *params;
+ struct ufshcd_tx_eqtr_record *rec;
+ u32 rate = hba->pwr_info.hs_rate;
+ u8 preshoot, deemphasis;
+ u32 num_lanes;
+ char name[32];
+ int lane;
+
+ if (!ufshcd_is_tx_eq_supported(hba))
+ return -EOPNOTSUPP;
+
+ if (gear < UFS_HS_G1 || gear > UFS_HS_GEAR_MAX) {
+ seq_printf(s, "Invalid gear selected: %u\n", gear);
+ return 0;
+ }
+
+ if (!hba->max_pwr_info.is_valid) {
+ seq_puts(s, "Max power info is invalid\n");
+ return 0;
+ }
+
+ pwr_info = &hba->max_pwr_info.info;
+ params = &hba->tx_eq_params[gear - 1];
+ if (!params->is_valid) {
+ seq_printf(s, "TX EQ params are invalid for HS-G%u, Rate-%s\n",
+ gear, ufs_hs_rate_to_str(rate));
+ return 0;
+ }
+
+ rec = params->eqtr_record;
+ if (!rec || !rec->last_record_index) {
+ seq_printf(s, "No TX EQTR records found for HS-G%u, Rate-%s.\n",
+ gear, ufs_hs_rate_to_str(rate));
+ return 0;
+ }
+
+ if (strcmp(file_name, "host_tx_eqtr_record") == 0) {
+ preshoot_bitmap = (hba->host_preshoot_cap << 0x1) | 0x1;
+ deemphasis_bitmap = (hba->host_deemphasis_cap << 0x1) | 0x1;
+ num_lanes = pwr_info->lane_tx;
+ fom_array = rec->host_fom;
+ snprintf(name, sizeof(name), "%s", "Host");
+ } else if (strcmp(file_name, "device_tx_eqtr_record") == 0) {
+ preshoot_bitmap = (hba->device_preshoot_cap << 0x1) | 0x1;
+ deemphasis_bitmap = (hba->device_deemphasis_cap << 0x1) | 0x1;
+ num_lanes = pwr_info->lane_rx;
+ fom_array = rec->device_fom;
+ snprintf(name, sizeof(name), "%s", "Device");
+ } else {
+ return -ENOENT;
+ }
+
+ seq_printf(s, "%s TX EQTR record summary -\n", name);
+ seq_printf(s, "Target Power Mode: HS-G%u, Rate-%s\n", gear,
+ ufs_hs_rate_to_str(rate));
+ seq_printf(s, "Most recent record index: %d\n",
+ rec->last_record_index);
+ seq_printf(s, "Most recent record timestamp: %llu us\n",
+ ktime_to_us(rec->last_record_ts));
+
+ for (lane = 0; lane < num_lanes; lane++) {
+ seq_printf(s, "\nTX Lane %d FOM - %s\n", lane, "PreShoot\\DeEmphasis");
+ seq_puts(s, "\\");
+ /* Print DeEmphasis header as X-axis. */
+ for (deemphasis = 0; deemphasis < TX_HS_NUM_DEEMPHASIS; deemphasis++)
+ seq_printf(s, "%8d%s", deemphasis, " ");
+ seq_puts(s, "\n");
+ /* Print matrix rows with PreShoot as Y-axis. */
+ for (preshoot = 0; preshoot < TX_HS_NUM_PRESHOOT; preshoot++) {
+ seq_printf(s, "%d", preshoot);
+ for (deemphasis = 0; deemphasis < TX_HS_NUM_DEEMPHASIS; deemphasis++) {
+ if (test_bit(preshoot, &preshoot_bitmap) &&
+ test_bit(deemphasis, &deemphasis_bitmap)) {
+ u8 fom = fom_array[lane][preshoot][deemphasis];
+ u8 fom_val = fom & RX_FOM_VALUE_MASK;
+ bool precode_en = fom & RX_FOM_PRECODING_EN_BIT;
+
+ if (ufshcd_is_txeq_presets_used(hba) &&
+ !ufshcd_is_txeq_preset_selected(preshoot, deemphasis))
+ seq_printf(s, "%8s%s", "-", " ");
+ else
+ seq_printf(s, "%8u%s", fom_val,
+ precode_en ? "*" : " ");
+ } else {
+ seq_printf(s, "%8s%s", "x", " ");
+ }
+ }
+ seq_puts(s, "\n");
+ }
+ }
+
+ return 0;
+}
+
+static int ufs_tx_eqtr_record_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ufs_tx_eqtr_record_show, inode->i_private);
+}
+
+static const struct file_operations ufs_tx_eqtr_record_fops = {
+ .owner = THIS_MODULE,
+ .open = ufs_tx_eqtr_record_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static ssize_t ufs_tx_eq_ctrl_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ u32 gear = (u32)(uintptr_t)file->f_inode->i_private;
+ struct ufs_hba *hba = hba_from_file(file);
+ char kbuf[32];
+ int ret;
+
+ if (count >= sizeof(kbuf))
+ return -EINVAL;
+
+ if (copy_from_user(kbuf, buf, count))
+ return -EFAULT;
+
+ if (!ufshcd_is_tx_eq_supported(hba))
+ return -EOPNOTSUPP;
+
+ if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL ||
+ !hba->max_pwr_info.is_valid)
+ return -EBUSY;
+
+ if (!hba->ufs_device_wlun)
+ return -ENODEV;
+
+ kbuf[count] = '\0';
+
+ if (sysfs_streq(kbuf, "retrain")) {
+ ret = ufs_debugfs_get_user_access(hba);
+ if (ret)
+ return ret;
+ ret = ufshcd_retrain_tx_eq(hba, gear);
+ ufs_debugfs_put_user_access(hba);
+ } else {
+ /* Unknown operation */
+ return -EINVAL;
+ }
+
+ return ret ? ret : count;
+}
+
+static int ufs_tx_eq_ctrl_show(struct seq_file *s, void *data)
+{
+ seq_puts(s, "write 'retrain' to retrain TX Equalization settings\n");
+ return 0;
+}
+
+static int ufs_tx_eq_ctrl_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ufs_tx_eq_ctrl_show, inode->i_private);
+}
+
+static const struct file_operations ufs_tx_eq_ctrl_fops = {
+ .owner = THIS_MODULE,
+ .open = ufs_tx_eq_ctrl_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .write = ufs_tx_eq_ctrl_write,
+ .release = single_release,
+};
+
+static const struct ufs_debugfs_attr ufs_tx_eqtr_attrs[] = {
+ { "host_tx_eqtr_record", 0400, &ufs_tx_eqtr_record_fops },
+ { "device_tx_eqtr_record", 0400, &ufs_tx_eqtr_record_fops },
+ { "tx_eq_ctrl", 0600, &ufs_tx_eq_ctrl_fops },
+ { }
+};
+
void ufs_debugfs_hba_init(struct ufs_hba *hba)
{
const struct ufs_debugfs_attr *attr;
@@ -230,6 +489,37 @@ void ufs_debugfs_hba_init(struct ufs_hba *hba)
hba, &ee_usr_mask_fops);
debugfs_create_u32("exception_event_rate_limit_ms", 0600, hba->debugfs_root,
&hba->debugfs_ee_rate_limit_ms);
+
+ if (!(hba->caps & UFSHCD_CAP_TX_EQUALIZATION))
+ return;
+
+ for (u32 gear = UFS_HS_G1; gear <= UFS_HS_GEAR_MAX; gear++) {
+ struct dentry *txeq_dir;
+ char name[32];
+
+ snprintf(name, sizeof(name), "tx_eq_hs_gear%d", gear);
+ txeq_dir = debugfs_create_dir(name, hba->debugfs_root);
+ if (IS_ERR_OR_NULL(txeq_dir))
+ return;
+
+ d_inode(txeq_dir)->i_private = hba;
+
+ /* Create files for TX Equalization parameters */
+ for (attr = ufs_tx_eq_attrs; attr->name; attr++)
+ debugfs_create_file(attr->name, attr->mode, txeq_dir,
+ (void *)(uintptr_t)gear,
+ attr->fops);
+
+ /* TX EQTR is supported for HS-G4 and higher Gears */
+ if (gear < UFS_HS_G4)
+ continue;
+
+ /* Create files for TX EQTR related attributes */
+ for (attr = ufs_tx_eqtr_attrs; attr->name; attr++)
+ debugfs_create_file(attr->name, attr->mode, txeq_dir,
+ (void *)(uintptr_t)gear,
+ attr->fops);
+ }
}
void ufs_debugfs_hba_exit(struct ufs_hba *hba)
diff --git a/drivers/ufs/core/ufs-txeq.c b/drivers/ufs/core/ufs-txeq.c
new file mode 100644
index 000000000000..b2dc89124353
--- /dev/null
+++ b/drivers/ufs/core/ufs-txeq.c
@@ -0,0 +1,1293 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2026 Qualcomm Technologies, Inc.
+ *
+ * Author:
+ * Can Guo <can.guo@oss.qualcomm.com>
+ */
+
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <ufs/ufshcd.h>
+#include <ufs/unipro.h>
+#include "ufshcd-priv.h"
+
+static bool use_adaptive_txeq;
+module_param(use_adaptive_txeq, bool, 0644);
+MODULE_PARM_DESC(use_adaptive_txeq, "Find and apply optimal TX Equalization settings before changing Power Mode (default: false)");
+
+static int txeq_gear_set(const char *val, const struct kernel_param *kp)
+{
+ return param_set_uint_minmax(val, kp, UFS_HS_G1, UFS_HS_GEAR_MAX);
+}
+
+static const struct kernel_param_ops txeq_gear_ops = {
+ .set = txeq_gear_set,
+ .get = param_get_uint,
+};
+
+static unsigned int adaptive_txeq_gear = UFS_HS_G6;
+module_param_cb(adaptive_txeq_gear, &txeq_gear_ops, &adaptive_txeq_gear, 0644);
+MODULE_PARM_DESC(adaptive_txeq_gear, "For HS-Gear[n] and above, adaptive txeq shall be used");
+
+static bool use_txeq_presets;
+module_param(use_txeq_presets, bool, 0644);
+MODULE_PARM_DESC(use_txeq_presets, "Use only the 8 TX Equalization Presets (pre-defined Pre-Shoot & De-Emphasis combinations) for TX EQTR (default: false)");
+
+static bool txeq_presets_selected[UFS_TX_EQ_PRESET_MAX] = {[0 ... (UFS_TX_EQ_PRESET_MAX - 1)] = 1};
+module_param_array(txeq_presets_selected, bool, NULL, 0644);
+MODULE_PARM_DESC(txeq_presets_selected, "Use only the selected Presets out of the 8 TX Equalization Presets for TX EQTR");
+
+/*
+ * ufs_tx_eq_preset - Table of minimum required list of presets.
+ *
+ * A HS-G6 capable M-TX shall support the presets defined in M-PHY v6.0 spec.
+ * Preset Pre-Shoot(dB) De-Emphasis(dB)
+ * P0 0.0 0.0
+ * P1 0.0 0.8
+ * P2 0.0 1.6
+ * P3 0.8 0.0
+ * P4 1.6 0.0
+ * P5 0.8 0.8
+ * P6 0.8 1.6
+ * P7 1.6 0.8
+ */
+static const struct __ufs_tx_eq_preset {
+ u8 preshoot;
+ u8 deemphasis;
+} ufs_tx_eq_preset[UFS_TX_EQ_PRESET_MAX] = {
+ [UFS_TX_EQ_PRESET_P0] = {UFS_TX_HS_PRESHOOT_DB_0P0, UFS_TX_HS_DEEMPHASIS_DB_0P0},
+ [UFS_TX_EQ_PRESET_P1] = {UFS_TX_HS_PRESHOOT_DB_0P0, UFS_TX_HS_DEEMPHASIS_DB_0P8},
+ [UFS_TX_EQ_PRESET_P2] = {UFS_TX_HS_PRESHOOT_DB_0P0, UFS_TX_HS_DEEMPHASIS_DB_1P6},
+ [UFS_TX_EQ_PRESET_P3] = {UFS_TX_HS_PRESHOOT_DB_0P8, UFS_TX_HS_DEEMPHASIS_DB_0P0},
+ [UFS_TX_EQ_PRESET_P4] = {UFS_TX_HS_PRESHOOT_DB_1P6, UFS_TX_HS_DEEMPHASIS_DB_0P0},
+ [UFS_TX_EQ_PRESET_P5] = {UFS_TX_HS_PRESHOOT_DB_0P8, UFS_TX_HS_DEEMPHASIS_DB_0P8},
+ [UFS_TX_EQ_PRESET_P6] = {UFS_TX_HS_PRESHOOT_DB_0P8, UFS_TX_HS_DEEMPHASIS_DB_1P6},
+ [UFS_TX_EQ_PRESET_P7] = {UFS_TX_HS_PRESHOOT_DB_1P6, UFS_TX_HS_DEEMPHASIS_DB_0P8},
+};
+
+/*
+ * pa_peer_rx_adapt_initial - Table of UniPro PA_PeerRxHSGnAdaptInitial
+ * attribute IDs for High Speed (HS) Gears.
+ *
+ * This table maps HS Gears to their respective UniPro PA_PeerRxHSGnAdaptInitial
+ * attribute IDs. Entries for Gears 1-3 are 0 (unsupported).
+ */
+static const u32 pa_peer_rx_adapt_initial[UFS_HS_GEAR_MAX] = {
+ 0,
+ 0,
+ 0,
+ PA_PEERRXHSG4ADAPTINITIAL,
+ PA_PEERRXHSG5ADAPTINITIAL,
+ PA_PEERRXHSG6ADAPTINITIALL0L3
+};
+
+/*
+ * rx_adapt_initial_cap - Table of M-PHY RX_HS_Gn_ADAPT_INITIAL_Capability
+ * attribute IDs for High Speed (HS) Gears.
+ *
+ * This table maps HS Gears to their respective M-PHY
+ * RX_HS_Gn_ADAPT_INITIAL_Capability attribute IDs. Entries for Gears 1-3 are 0
+ * (unsupported).
+ */
+static const u32 rx_adapt_initial_cap[UFS_HS_GEAR_MAX] = {
+ 0,
+ 0,
+ 0,
+ RX_HS_G4_ADAPT_INITIAL_CAP,
+ RX_HS_G5_ADAPT_INITIAL_CAP,
+ RX_HS_G6_ADAPT_INITIAL_CAP
+};
+
+/*
+ * pa_tx_eq_setting - Table of UniPro PA_TxEQGnSetting attribute IDs for High
+ * Speed (HS) Gears.
+ *
+ * This table maps HS Gears to their respective UniPro PA_TxEQGnSetting
+ * attribute IDs.
+ */
+static const u32 pa_tx_eq_setting[UFS_HS_GEAR_MAX] = {
+ PA_TXEQG1SETTING,
+ PA_TXEQG2SETTING,
+ PA_TXEQG3SETTING,
+ PA_TXEQG4SETTING,
+ PA_TXEQG5SETTING,
+ PA_TXEQG6SETTING
+};
+
+/**
+ * ufshcd_configure_precoding - Configure Pre-Coding for all active lanes
+ * @hba: per adapter instance
+ * @params: TX EQ parameters data structure
+ *
+ * Bit[7] in RX_FOM indicates that the receiver needs to enable Pre-Coding when
+ * set. Pre-Coding must be enabled on both the transmitter and receiver to
+ * ensure proper operation.
+ *
+ * Returns 0 on success, non-zero error code otherwise
+ */
+static int ufshcd_configure_precoding(struct ufs_hba *hba,
+ struct ufshcd_tx_eq_params *params)
+{
+ struct ufs_pa_layer_attr *pwr_info = &hba->max_pwr_info.info;
+ u32 local_precode_en = 0;
+ u32 peer_precode_en = 0;
+ int lane, ret;
+
+ /* Enable Pre-Coding for Host's TX & Device's RX pair */
+ for (lane = 0; lane < pwr_info->lane_tx; lane++) {
+ if (params->host[lane].precode_en) {
+ local_precode_en |= PRECODEEN_TX_BIT(lane);
+ peer_precode_en |= PRECODEEN_RX_BIT(lane);
+ }
+ }
+
+ /* Enable Pre-Coding for Device's TX & Host's RX pair */
+ for (lane = 0; lane < pwr_info->lane_rx; lane++) {
+ if (params->device[lane].precode_en) {
+ peer_precode_en |= PRECODEEN_TX_BIT(lane);
+ local_precode_en |= PRECODEEN_RX_BIT(lane);
+ }
+ }
+
+ if (!local_precode_en && !peer_precode_en) {
+ dev_dbg(hba->dev, "Pre-Coding is not required for Host and Device\n");
+ return 0;
+ }
+
+ /* Set local PA_PreCodeEn */
+ ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PRECODEEN), local_precode_en);
+ if (ret) {
+ dev_err(hba->dev, "Failed to set local PA_PreCodeEn: %d\n", ret);
+ return ret;
+ }
+
+ /* Set peer PA_PreCodeEn */
+ ret = ufshcd_dme_peer_set(hba, UIC_ARG_MIB(PA_PRECODEEN), peer_precode_en);
+ if (ret) {
+ dev_err(hba->dev, "Failed to set peer PA_PreCodeEn: %d\n", ret);
+ return ret;
+ }
+
+ dev_dbg(hba->dev, "Local PA_PreCodeEn: 0x%02x, Peer PA_PreCodeEn: 0x%02x\n",
+ local_precode_en, peer_precode_en);
+
+ return 0;
+}
+
+void ufshcd_print_tx_eq_params(struct ufs_hba *hba)
+{
+ struct ufs_pa_layer_attr *pwr_info = &hba->max_pwr_info.info;
+ struct ufshcd_tx_eq_params *params;
+ u32 gear = hba->pwr_info.gear_tx;
+ int lane;
+
+ if (!ufshcd_is_tx_eq_supported(hba))
+ return;
+
+ if (gear < UFS_HS_G1 || gear > UFS_HS_GEAR_MAX)
+ return;
+
+ params = &hba->tx_eq_params[gear - 1];
+ if (!params->is_valid || !params->is_applied)
+ return;
+
+ for (lane = 0; lane < pwr_info->lane_tx; lane++)
+ dev_dbg(hba->dev, "Host TX Lane %d: PreShoot %u, DeEmphasis %u, FOM %u, PreCodeEn %d\n",
+ lane, params->host[lane].preshoot,
+ params->host[lane].deemphasis,
+ params->host[lane].fom_val,
+ params->host[lane].precode_en);
+
+ for (lane = 0; lane < pwr_info->lane_rx; lane++)
+ dev_dbg(hba->dev, "Device TX Lane %d: PreShoot %u, DeEmphasis %u, FOM %u, PreCodeEn %d\n",
+ lane, params->device[lane].preshoot,
+ params->device[lane].deemphasis,
+ params->device[lane].fom_val,
+ params->device[lane].precode_en);
+}
+
+static inline u32
+ufshcd_compose_tx_eq_setting(struct ufshcd_tx_eq_settings *settings,
+ int num_lanes)
+{
+ u32 setting = 0;
+ int lane;
+
+ for (lane = 0; lane < num_lanes; lane++, settings++) {
+ setting |= TX_HS_PRESHOOT_BITS(lane, settings->preshoot);
+ setting |= TX_HS_DEEMPHASIS_BITS(lane, settings->deemphasis);
+ }
+
+ return setting;
+}
+
+/**
+ * ufshcd_apply_tx_eq_settings - Apply TX Equalization settings for target gear
+ * @hba: per adapter instance
+ * @params: TX EQ parameters data structure
+ * @gear: target gear
+ *
+ * Returns 0 on success, negative error code otherwise
+ */
+int ufshcd_apply_tx_eq_settings(struct ufs_hba *hba,
+ struct ufshcd_tx_eq_params *params, u32 gear)
+{
+ struct ufs_pa_layer_attr *pwr_info = &hba->max_pwr_info.info;
+ u32 setting;
+ int ret;
+
+ /* Compose settings for Host's TX Lanes */
+ setting = ufshcd_compose_tx_eq_setting(params->host, pwr_info->lane_tx);
+ ret = ufshcd_dme_set(hba, UIC_ARG_MIB(pa_tx_eq_setting[gear - 1]), setting);
+ if (ret)
+ return ret;
+
+ /* Compose settings for Device's TX Lanes */
+ setting = ufshcd_compose_tx_eq_setting(params->device, pwr_info->lane_rx);
+ ret = ufshcd_dme_peer_set(hba, UIC_ARG_MIB(pa_tx_eq_setting[gear - 1]), setting);
+ if (ret)
+ return ret;
+
+ /* Configure Pre-Coding */
+ if (gear >= UFS_HS_G6) {
+ ret = ufshcd_configure_precoding(hba, params);
+ if (ret) {
+ dev_err(hba->dev, "Failed to configure pre-coding: %d\n", ret);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ufshcd_apply_tx_eq_settings);
+
+/**
+ * ufshcd_evaluate_tx_eqtr_fom - Evaluate TX EQTR FOM results
+ * @hba: per adapter instance
+ * @pwr_mode: target power mode containing gear and rate information
+ * @eqtr_data: TX EQTR data structure
+ * @h_iter: host TX EQTR iterator data structure
+ * @d_iter: device TX EQTR iterator data structure
+ *
+ * Evaluate TX EQTR FOM results, update host and device TX EQTR data accordingy
+ * if FOM have been improved compared to previous iteration, and record TX EQTR
+ * FOM results.
+ */
+static void ufshcd_evaluate_tx_eqtr_fom(struct ufs_hba *hba,
+ struct ufs_pa_layer_attr *pwr_mode,
+ struct ufshcd_tx_eqtr_data *eqtr_data,
+ struct tx_eqtr_iter *h_iter,
+ struct tx_eqtr_iter *d_iter)
+{
+ u8 preshoot, deemphasis, fom_value;
+ bool precode_en;
+ int lane;
+
+ for (lane = 0; h_iter->is_updated && lane < pwr_mode->lane_tx; lane++) {
+ preshoot = h_iter->preshoot;
+ deemphasis = h_iter->deemphasis;
+ fom_value = h_iter->fom[lane] & RX_FOM_VALUE_MASK;
+ precode_en = h_iter->fom[lane] & RX_FOM_PRECODING_EN_BIT;
+
+ /* Record host TX EQTR FOM */
+ eqtr_data->host_fom[lane][preshoot][deemphasis] = h_iter->fom[lane];
+
+ /* Check if FOM has been improved for host's TX Lanes */
+ if (fom_value > eqtr_data->host[lane].fom_val) {
+ eqtr_data->host[lane].preshoot = preshoot;
+ eqtr_data->host[lane].deemphasis = deemphasis;
+ eqtr_data->host[lane].fom_val = fom_value;
+ eqtr_data->host[lane].precode_en = precode_en;
+ }
+
+ dev_dbg(hba->dev, "TX EQTR: Host TX Lane %d: PreShoot %u, DeEmphasis %u, FOM value %u, PreCodeEn %d\n",
+ lane, preshoot, deemphasis, fom_value, precode_en);
+ }
+
+ for (lane = 0; d_iter->is_updated && lane < pwr_mode->lane_rx; lane++) {
+ preshoot = d_iter->preshoot;
+ deemphasis = d_iter->deemphasis;
+ fom_value = d_iter->fom[lane] & RX_FOM_VALUE_MASK;
+ precode_en = d_iter->fom[lane] & RX_FOM_PRECODING_EN_BIT;
+
+ /* Record device TX EQTR FOM */
+ eqtr_data->device_fom[lane][preshoot][deemphasis] = d_iter->fom[lane];
+
+ /* Check if FOM has been improved for Device's TX Lanes */
+ if (fom_value > eqtr_data->device[lane].fom_val) {
+ eqtr_data->device[lane].preshoot = preshoot;
+ eqtr_data->device[lane].deemphasis = deemphasis;
+ eqtr_data->device[lane].fom_val = fom_value;
+ eqtr_data->device[lane].precode_en = precode_en;
+ }
+
+ dev_dbg(hba->dev, "TX EQTR: Device TX Lane %d: PreShoot %u, DeEmphasis %u, FOM value %u, PreCodeEn %d\n",
+ lane, preshoot, deemphasis, fom_value, precode_en);
+ }
+}
+
+/**
+ * ufshcd_get_rx_fom - Get Figure of Merit (FOM) for both sides
+ * @hba: per adapter instance
+ * @pwr_mode: target power mode containing gear and rate information
+ * @h_iter: host TX EQTR iterator data structure
+ * @d_iter: device TX EQTR iterator data structure
+ *
+ * Returns 0 on success, negative error code otherwise
+ */
+static int ufshcd_get_rx_fom(struct ufs_hba *hba,
+ struct ufs_pa_layer_attr *pwr_mode,
+ struct tx_eqtr_iter *h_iter,
+ struct tx_eqtr_iter *d_iter)
+{
+ int lane, ret;
+ u32 fom;
+
+ /* Get FOM of host's TX lanes from device's RX_FOM. */
+ for (lane = 0; lane < pwr_mode->lane_tx; lane++) {
+ ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB_SEL(RX_FOM,
+ UIC_ARG_MPHY_RX_GEN_SEL_INDEX(lane)),
+ &fom);
+ if (ret)
+ return ret;
+
+ h_iter->fom[lane] = (u8)fom;
+ }
+
+ /* Get FOM of device's TX lanes from host's RX_FOM. */
+ for (lane = 0; lane < pwr_mode->lane_rx; lane++) {
+ ret = ufshcd_dme_get(hba, UIC_ARG_MIB_SEL(RX_FOM,
+ UIC_ARG_MPHY_RX_GEN_SEL_INDEX(lane)),
+ &fom);
+ if (ret)
+ return ret;
+
+ d_iter->fom[lane] = (u8)fom;
+ }
+
+ ret = ufshcd_vops_get_rx_fom(hba, pwr_mode, h_iter, d_iter);
+ if (ret)
+ dev_err(hba->dev, "Failed to get FOM via vops: %d\n", ret);
+
+ return ret;
+}
+
+bool ufshcd_is_txeq_presets_used(struct ufs_hba *hba)
+{
+ return use_txeq_presets;
+}
+
+bool ufshcd_is_txeq_preset_selected(u8 preshoot, u8 deemphasis)
+{
+ int i;
+
+ for (i = 0; i < UFS_TX_EQ_PRESET_MAX; i++) {
+ if (!txeq_presets_selected[i])
+ continue;
+
+ if (preshoot == ufs_tx_eq_preset[i].preshoot &&
+ deemphasis == ufs_tx_eq_preset[i].deemphasis)
+ return true;
+ }
+
+ return false;
+}
+
+/**
+ * tx_eqtr_iter_try_update - Try to update a TX EQTR iterator
+ * @iter: TX EQTR iterator data structure
+ * @preshoot: PreShoot value
+ * @deemphasis: DeEmphasis value
+ *
+ * This function validates whether the provided PreShoot and DeEmphasis
+ * combination can be used or not. If yes, it updates the TX EQTR iterator with
+ * the provided PreShoot and DeEmphasis, it also sets the is_updated flag
+ * to indicate the iterator has been updated.
+ */
+static void tx_eqtr_iter_try_update(struct tx_eqtr_iter *iter,
+ u8 preshoot, u8 deemphasis)
+{
+ if (!test_bit(preshoot, &iter->preshoot_bitmap) ||
+ !test_bit(deemphasis, &iter->deemphasis_bitmap) ||
+ (use_txeq_presets && !ufshcd_is_txeq_preset_selected(preshoot, deemphasis))) {
+ iter->is_updated = false;
+ return;
+ }
+
+ iter->preshoot = preshoot;
+ iter->deemphasis = deemphasis;
+ iter->is_updated = true;
+}
+
+/**
+ * tx_eqtr_iter_update() - Update host and deviceTX EQTR iterators
+ * @preshoot: PreShoot value
+ * @deemphasis: DeEmphasis value
+ * @h_iter: Host TX EQTR iterator data structure
+ * @d_iter: Device TX EQTR iterator data structure
+ *
+ * Updates host and device TX Equalization training iterators with the
+ * provided PreShoot and DeEmphasis.
+ *
+ * Return: true if host and/or device TX Equalization training iterator has
+ * been updated to the provided PreShoot and DeEmphasis, false otherwise.
+ */
+static bool tx_eqtr_iter_update(u8 preshoot, u8 deemphasis,
+ struct tx_eqtr_iter *h_iter,
+ struct tx_eqtr_iter *d_iter)
+{
+ tx_eqtr_iter_try_update(h_iter, preshoot, deemphasis);
+ tx_eqtr_iter_try_update(d_iter, preshoot, deemphasis);
+
+ return h_iter->is_updated || d_iter->is_updated;
+}
+
+/**
+ * ufshcd_tx_eqtr_iter_init - Initialize host and device TX EQTR iterators
+ * @hba: per adapter instance
+ * @h_iter: host TX EQTR iterator data structure
+ * @d_iter: device TX EQTR iterator data structure
+ *
+ * This function initializes the TX EQTR iterator structures for both host and
+ * device by reading their TX equalization capabilities. The capabilities are
+ * cached in the hba structure to avoid redundant DME operations in subsequent
+ * calls. In the TX EQTR procedure, the iterator structures are updated by
+ * tx_eqtr_iter_update() to systematically iterate through supported TX
+ * Equalization setting combinations.
+ *
+ * Returns 0 on success, negative error code otherwise
+ */
+static int ufshcd_tx_eqtr_iter_init(struct ufs_hba *hba,
+ struct tx_eqtr_iter *h_iter,
+ struct tx_eqtr_iter *d_iter)
+{
+ u32 cap;
+ int ret;
+
+ if (!hba->host_preshoot_cap) {
+ ret = ufshcd_dme_get(hba, UIC_ARG_MIB(TX_HS_PRESHOOT_SETTING_CAP), &cap);
+ if (ret)
+ return ret;
+
+ hba->host_preshoot_cap = cap & TX_EQTR_CAP_MASK;
+ }
+
+ if (!hba->host_deemphasis_cap) {
+ ret = ufshcd_dme_get(hba, UIC_ARG_MIB(TX_HS_DEEMPHASIS_SETTING_CAP), &cap);
+ if (ret)
+ return ret;
+
+ hba->host_deemphasis_cap = cap & TX_EQTR_CAP_MASK;
+ }
+
+ if (!hba->device_preshoot_cap) {
+ ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(TX_HS_PRESHOOT_SETTING_CAP), &cap);
+ if (ret)
+ return ret;
+
+ hba->device_preshoot_cap = cap & TX_EQTR_CAP_MASK;
+ }
+
+ if (!hba->device_deemphasis_cap) {
+ ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(TX_HS_DEEMPHASIS_SETTING_CAP), &cap);
+ if (ret)
+ return ret;
+
+ hba->device_deemphasis_cap = cap & TX_EQTR_CAP_MASK;
+ }
+
+ /*
+ * Support PreShoot & DeEmphasis of value 0 is mandatory, hence they are
+ * not reflected in PreShoot/DeEmphasis capabilities. Left shift the
+ * capability bitmap by 1 and set bit[0] to reflect value 0 is
+ * supported, such that test_bit() can be used later for convenience.
+ */
+ h_iter->preshoot_bitmap = (hba->host_preshoot_cap << 0x1) | 0x1;
+ h_iter->deemphasis_bitmap = (hba->host_deemphasis_cap << 0x1) | 0x1;
+ d_iter->preshoot_bitmap = (hba->device_preshoot_cap << 0x1) | 0x1;
+ d_iter->deemphasis_bitmap = (hba->device_deemphasis_cap << 0x1) | 0x1;
+
+ return 0;
+}
+
+/**
+ * adapt_cap_to_t_adapt - Calculate TAdapt from adapt capability
+ * @adapt_cap: Adapt capability
+ *
+ * For NRZ:
+ * IF (ADAPT_range = FINE)
+ * TADAPT = 650 x (ADAPT_length + 1)
+ * ELSE (IF ADAPT_range = COARSE)
+ * TADAPT = 650 x 2^ADAPT_length
+ *
+ * Returns calculated TAdapt value in term of Unit Intervals (UI)
+ */
+static inline u64 adapt_cap_to_t_adapt(u32 adapt_cap)
+{
+ u64 tadapt;
+ u8 adapt_length = adapt_cap & ADAPT_LENGTH_MASK;
+
+ if (!IS_ADAPT_RANGE_COARSE(adapt_cap))
+ tadapt = TADAPT_FACTOR * (adapt_length + 1);
+ else
+ tadapt = TADAPT_FACTOR * (1 << adapt_length);
+
+ return tadapt;
+}
+
+/**
+ * adapt_cap_to_t_adapt_l0l3 - Calculate TAdapt_L0_L3 from adapt capability
+ * @adapt_cap: Adapt capability
+ *
+ * For PAM-4:
+ * IF (ADAPT_range = FINE)
+ * TADAPT_L0_L3 = 2^9 x ADAPT_length
+ * ELSE IF (ADAPT_range = COARSE)
+ * TADAPT_L0_L3 = 2^9 x (2^ADAPT_length)
+ *
+ * Returns calculated TAdapt value in term of Unit Intervals (UI)
+ */
+static inline u64 adapt_cap_to_t_adapt_l0l3(u32 adapt_cap)
+{
+ u64 tadapt;
+ u8 adapt_length = adapt_cap & ADAPT_LENGTH_MASK;
+
+ if (!IS_ADAPT_RANGE_COARSE(adapt_cap))
+ tadapt = TADAPT_L0L3_FACTOR * adapt_length;
+ else
+ tadapt = TADAPT_L0L3_FACTOR * (1 << adapt_length);
+
+ return tadapt;
+}
+
+/**
+ * adapt_cap_to_t_adapt_l0l1l2l3 - Calculate TAdapt_L0_L1_L2_L3 from adapt capability
+ * @adapt_cap: Adapt capability
+ *
+ * For PAM-4:
+ * IF (ADAPT_range_L0_L1_L2_L3 = FINE)
+ * TADAPT_L0_L1_L2_L3 = 2^15 x (ADAPT_length_L0_L1_L2_L3 + 1)
+ * ELSE IF (ADAPT_range_L0_L1_L2_L3 = COARSE)
+ * TADAPT_L0_L1_L2_L3 = 2^15 x 2^ADAPT_length_L0_L1_L2_L3
+ *
+ * Returns calculated TAdapt value in term of Unit Intervals (UI)
+ */
+static inline u64 adapt_cap_to_t_adapt_l0l1l2l3(u32 adapt_cap)
+{
+ u64 tadapt;
+ u8 adapt_length = adapt_cap & ADAPT_LENGTH_MASK;
+
+ if (!IS_ADAPT_RANGE_COARSE(adapt_cap))
+ tadapt = TADAPT_L0L1L2L3_FACTOR * (adapt_length + 1);
+ else
+ tadapt = TADAPT_L0L1L2L3_FACTOR * (1 << adapt_length);
+
+ return tadapt;
+}
+
+/**
+ * ufshcd_setup_tx_eqtr_adapt_length - Setup TX adapt length for EQTR
+ * @hba: per adapter instance
+ * @params: TX EQ parameters data structure
+ * @gear: target gear for EQTR
+ *
+ * This function determines and configures the proper TX adapt length (TAdapt)
+ * for the TX EQTR procedure based on the target gear and RX adapt capabilities
+ * of both host and device.
+ *
+ * Guidelines from MIPI UniPro v3.0 spec - select the minimum Adapt Length for
+ * the Equalization Training procedure based on the following conditions:
+ *
+ * If the target High-Speed Gear n is HS-G4 or HS-G5:
+ * PA_TxAdaptLength_EQTR[7:0] >= Max (10us, RX_HS_Gn_ADAPT_INITIAL_Capability,
+ * PA_PeerRxHsGnAdaptInitial)
+ * PA_TxAdaptLength_EQTR[7:0] shall be shorter than PACP_REQUEST_TIMER (10ms)
+ * PA_TxAdaptLength_EQTR[15:8] is not relevant for HS-G4 and HS-G5. This field
+ * is set to 255 (reserved value).
+ *
+ * If the target High-Speed Gear n is HS-G6:
+ * PA_TxAdapthLength_EQTR >= 10us
+ * PA_TxAdapthLength_EQTR[7:0] >= Max (RX_HS_G6_ADAPT_INITIAL_Capability,
+ * PA_PeerRxHsG6AdaptInitialL0L3)
+ * PA_TxAdapthLength_EQTR[15:8] >= Max (RX_HS_G6_ADAPT_INITIAL_L0_L1_L2_L3_Capability,
+ * PA_PeerRxHsG6AdaptInitialL0L1L2L3)
+ * PA_TxAdaptLength_EQTR shall be shorter than PACP_REQUEST_TIMER value of 10ms.
+ *
+ * Since adapt capabilities encode both range (fine/coarse) and length values,
+ * direct comparison is not possible. This function converts adapt capabilities
+ * to actual time durations in Unit Intervals (UI) using the Adapt time
+ * calculation formular in M-PHY v6.0 spec (Table 8), then selects the maximum
+ * to ensure both host and device use adequate TX adapt length.
+ *
+ * Returns 0 on success, negative error code otherwise
+ */
+static int ufshcd_setup_tx_eqtr_adapt_length(struct ufs_hba *hba,
+ struct ufshcd_tx_eq_params *params,
+ u32 gear)
+{
+ struct ufshcd_tx_eqtr_record *rec = params->eqtr_record;
+ u32 adapt_eqtr;
+ int ret;
+
+ if (rec && rec->saved_adapt_eqtr) {
+ adapt_eqtr = rec->saved_adapt_eqtr;
+ goto set_adapt_eqtr;
+ }
+
+ if (gear == UFS_HS_G4 || gear == UFS_HS_G5) {
+ u64 t_adapt, t_adapt_local, t_adapt_peer;
+ u32 adapt_cap_local, adapt_cap_peer, adapt_length;
+
+ ret = ufshcd_dme_get(hba, UIC_ARG_MIB_SEL(rx_adapt_initial_cap[gear - 1],
+ UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
+ &adapt_cap_local);
+ if (ret)
+ return ret;
+
+ if (adapt_cap_local > ADAPT_LENGTH_MAX) {
+ dev_err(hba->dev, "local RX_HS_G%u_ADAPT_INITIAL_CAP (0x%x) exceeds MAX\n",
+ gear, adapt_cap_local);
+ return -EINVAL;
+ }
+
+ ret = ufshcd_dme_get(hba, UIC_ARG_MIB(pa_peer_rx_adapt_initial[gear - 1]),
+ &adapt_cap_peer);
+ if (ret)
+ return ret;
+
+ if (adapt_cap_peer > ADAPT_LENGTH_MAX) {
+ dev_err(hba->dev, "local RX_HS_G%u_ADAPT_INITIAL_CAP (0x%x) exceeds MAX\n",
+ gear, adapt_cap_peer);
+ return -EINVAL;
+ }
+
+ t_adapt_local = adapt_cap_to_t_adapt(adapt_cap_local);
+ t_adapt_peer = adapt_cap_to_t_adapt(adapt_cap_peer);
+ t_adapt = max(t_adapt_local, t_adapt_peer);
+
+ dev_dbg(hba->dev, "local RX_HS_G%u_ADAPT_INITIAL_CAP = 0x%x\n",
+ gear, adapt_cap_local);
+ dev_dbg(hba->dev, "peer RX_HS_G%u_ADAPT_INITIAL_CAP = 0x%x\n",
+ gear, adapt_cap_peer);
+ dev_dbg(hba->dev, "t_adapt_local = %llu UI, t_adapt_peer = %llu UI\n",
+ t_adapt_local, t_adapt_peer);
+ dev_dbg(hba->dev, "TAdapt %llu UI selected for TX EQTR\n",
+ t_adapt);
+
+ adapt_length = (t_adapt_local >= t_adapt_peer) ?
+ adapt_cap_local : adapt_cap_peer;
+
+ if (gear == UFS_HS_G4 && t_adapt < TX_EQTR_HS_G4_MIN_T_ADAPT) {
+ dev_dbg(hba->dev, "TAdapt %llu UI is too short for TX EQTR for HS-G%u, use default Adapt 0x%x\n",
+ t_adapt, gear, TX_EQTR_HS_G4_ADAPT_DEFAULT);
+ adapt_length = TX_EQTR_HS_G4_ADAPT_DEFAULT;
+ } else if (gear == UFS_HS_G5 && t_adapt < TX_EQTR_HS_G5_MIN_T_ADAPT) {
+ dev_dbg(hba->dev, "TAdapt %llu UI is too short for TX EQTR for HS-G%u, use default Adapt 0x%x\n",
+ t_adapt, gear, TX_EQTR_HS_G5_ADAPT_DEFAULT);
+ adapt_length = TX_EQTR_HS_G5_ADAPT_DEFAULT;
+ }
+
+ adapt_eqtr = adapt_length |
+ (TX_EQTR_ADAPT_RESERVED << TX_EQTR_ADAPT_LENGTH_L0L1L2L3_SHIFT);
+ } else if (gear == UFS_HS_G6) {
+ u64 t_adapt, t_adapt_l0l3, t_adapt_l0l3_local, t_adapt_l0l3_peer;
+ u64 t_adapt_l0l1l2l3, t_adapt_l0l1l2l3_local, t_adapt_l0l1l2l3_peer;
+ u32 adapt_l0l3_cap_local, adapt_l0l3_cap_peer, adapt_length_l0l3;
+ u32 adapt_l0l1l2l3_cap_local, adapt_l0l1l2l3_cap_peer, adapt_length_l0l1l2l3;
+
+ ret = ufshcd_dme_get(hba, UIC_ARG_MIB_SEL(rx_adapt_initial_cap[gear - 1],
+ UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
+ &adapt_l0l3_cap_local);
+ if (ret)
+ return ret;
+
+ if (adapt_l0l3_cap_local > ADAPT_L0L3_LENGTH_MAX) {
+ dev_err(hba->dev, "local RX_HS_G%u_ADAPT_INITIAL_CAP (0x%x) exceeds MAX\n",
+ gear, adapt_l0l3_cap_local);
+ return -EINVAL;
+ }
+
+ ret = ufshcd_dme_get(hba, UIC_ARG_MIB(pa_peer_rx_adapt_initial[gear - 1]),
+ &adapt_l0l3_cap_peer);
+ if (ret)
+ return ret;
+
+ if (adapt_l0l3_cap_peer > ADAPT_L0L3_LENGTH_MAX) {
+ dev_err(hba->dev, "peer RX_HS_G%u_ADAPT_INITIAL_CAP (0x%x) exceeds MAX\n",
+ gear, adapt_l0l3_cap_peer);
+ return -EINVAL;
+ }
+
+ t_adapt_l0l3_local = adapt_cap_to_t_adapt_l0l3(adapt_l0l3_cap_local);
+ t_adapt_l0l3_peer = adapt_cap_to_t_adapt_l0l3(adapt_l0l3_cap_peer);
+
+ dev_dbg(hba->dev, "local RX_HS_G%u_ADAPT_INITIAL_CAP = 0x%x\n",
+ gear, adapt_l0l3_cap_local);
+ dev_dbg(hba->dev, "peer RX_HS_G%u_ADAPT_INITIAL_CAP = 0x%x\n",
+ gear, adapt_l0l3_cap_peer);
+ dev_dbg(hba->dev, "t_adapt_l0l3_local = %llu UI, t_adapt_l0l3_peer = %llu UI\n",
+ t_adapt_l0l3_local, t_adapt_l0l3_peer);
+
+ ret = ufshcd_dme_get(hba, UIC_ARG_MIB_SEL(RX_HS_G6_ADAPT_INITIAL_L0L1L2L3_CAP,
+ UIC_ARG_MPHY_RX_GEN_SEL_INDEX(0)),
+ &adapt_l0l1l2l3_cap_local);
+ if (ret)
+ return ret;
+
+ if (adapt_l0l1l2l3_cap_local > ADAPT_L0L1L2L3_LENGTH_MAX) {
+ dev_err(hba->dev, "local RX_HS_G%u_ADAPT_INITIAL_L0L1L2L3_CAP (0x%x) exceeds MAX\n",
+ gear, adapt_l0l1l2l3_cap_local);
+ return -EINVAL;
+ }
+
+ ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_PEERRXHSG6ADAPTINITIALL0L1L2L3),
+ &adapt_l0l1l2l3_cap_peer);
+ if (ret)
+ return ret;
+
+ if (adapt_l0l1l2l3_cap_peer > ADAPT_L0L1L2L3_LENGTH_MAX) {
+ dev_err(hba->dev, "peer RX_HS_G%u_ADAPT_INITIAL_L0L1L2L3_CAP (0x%x) exceeds MAX\n",
+ gear, adapt_l0l1l2l3_cap_peer);
+ return -EINVAL;
+ }
+
+ t_adapt_l0l1l2l3_local = adapt_cap_to_t_adapt_l0l1l2l3(adapt_l0l1l2l3_cap_local);
+ t_adapt_l0l1l2l3_peer = adapt_cap_to_t_adapt_l0l1l2l3(adapt_l0l1l2l3_cap_peer);
+
+ dev_dbg(hba->dev, "local RX_HS_G%u_ADAPT_INITIAL_L0L1L2L3_CAP = 0x%x\n",
+ gear, adapt_l0l1l2l3_cap_local);
+ dev_dbg(hba->dev, "peer RX_HS_G%u_ADAPT_INITIAL_L0L1L2L3_CAP = 0x%x\n",
+ gear, adapt_l0l1l2l3_cap_peer);
+ dev_dbg(hba->dev, "t_adapt_l0l1l2l3_local = %llu UI, t_adapt_l0l1l2l3_peer = %llu UI\n",
+ t_adapt_l0l1l2l3_local, t_adapt_l0l1l2l3_peer);
+
+ t_adapt_l0l1l2l3 = max(t_adapt_l0l1l2l3_local, t_adapt_l0l1l2l3_peer);
+ t_adapt_l0l3 = max(t_adapt_l0l3_local, t_adapt_l0l3_peer);
+ t_adapt = t_adapt_l0l3 + t_adapt_l0l1l2l3;
+
+ dev_dbg(hba->dev, "TAdapt %llu PAM-4 UI selected for TX EQTR\n",
+ t_adapt);
+
+ adapt_length_l0l3 = (t_adapt_l0l3_local >= t_adapt_l0l3_peer) ?
+ adapt_l0l3_cap_local : adapt_l0l3_cap_peer;
+ adapt_length_l0l1l2l3 = (t_adapt_l0l1l2l3_local >= t_adapt_l0l1l2l3_peer) ?
+ adapt_l0l1l2l3_cap_local : adapt_l0l1l2l3_cap_peer;
+
+ if (t_adapt < TX_EQTR_HS_G6_MIN_T_ADAPT) {
+ dev_dbg(hba->dev, "TAdapt %llu UI is too short for TX EQTR for HS-G%u, use default Adapt 0x%x\n",
+ t_adapt, gear, TX_EQTR_HS_G6_ADAPT_DEFAULT);
+ adapt_length_l0l3 = TX_EQTR_HS_G6_ADAPT_DEFAULT;
+ }
+
+ adapt_eqtr = adapt_length_l0l3 |
+ (adapt_length_l0l1l2l3 << TX_EQTR_ADAPT_LENGTH_L0L1L2L3_SHIFT);
+ } else {
+ return -EINVAL;
+ }
+
+ if (rec)
+ rec->saved_adapt_eqtr = (u16)adapt_eqtr;
+
+set_adapt_eqtr:
+ ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXADAPTLENGTH_EQTR), adapt_eqtr);
+ if (ret)
+ dev_err(hba->dev, "Failed to set adapt length for TX EQTR: %d\n", ret);
+ else
+ dev_dbg(hba->dev, "PA_TXADAPTLENGTH_EQTR configured to 0x%08x\n", adapt_eqtr);
+
+ return ret;
+}
+
+/**
+ * ufshcd_compose_tx_eqtr_setting - Compose TX EQTR setting
+ * @iter: TX EQTR iterator data structure
+ * @num_lanes: number of active lanes
+ *
+ * Returns composed TX EQTR setting, same setting is used for all active lanes
+ */
+static inline u32 ufshcd_compose_tx_eqtr_setting(struct tx_eqtr_iter *iter,
+ int num_lanes)
+{
+ u32 setting = 0;
+ int lane;
+
+ for (lane = 0; lane < num_lanes; lane++) {
+ setting |= TX_HS_PRESHOOT_BITS(lane, iter->preshoot);
+ setting |= TX_HS_DEEMPHASIS_BITS(lane, iter->deemphasis);
+ }
+
+ return setting;
+}
+
+/**
+ * ufshcd_apply_tx_eqtr_settings - Apply TX EQTR setting
+ * @hba: per adapter instance
+ * @pwr_mode: target power mode containing gear and rate information
+ * @h_iter: host TX EQTR iterator data structure
+ * @d_iter: device TX EQTR iterator data structure
+ *
+ * Returns 0 on success, negative error code otherwise
+ */
+static int ufshcd_apply_tx_eqtr_settings(struct ufs_hba *hba,
+ struct ufs_pa_layer_attr *pwr_mode,
+ struct tx_eqtr_iter *h_iter,
+ struct tx_eqtr_iter *d_iter)
+{
+ u32 setting;
+ int ret;
+
+ setting = ufshcd_compose_tx_eqtr_setting(h_iter, pwr_mode->lane_tx);
+ ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXEQTRSETTING), setting);
+ if (ret)
+ return ret;
+
+ setting = ufshcd_compose_tx_eqtr_setting(d_iter, pwr_mode->lane_rx);
+ ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PEERTXEQTRSETTING), setting);
+ if (ret)
+ return ret;
+
+ ret = ufshcd_vops_apply_tx_eqtr_settings(hba, pwr_mode, h_iter, d_iter);
+
+ return ret;
+}
+
+/**
+ * ufshcd_update_tx_eq_params - Update TX Equalization params
+ * @params: TX EQ parameters data structure
+ * @pwr_mode: target power mode containing gear and rate
+ * @eqtr_data: TX EQTR data structure
+ *
+ * Update TX Equalization params using results from TX EQTR data. Check also
+ * the TX EQTR FOM value for each TX lane in the TX EQTR data. If a TX lane got
+ * a FOM value of 0, restore the TX Equalization settings from the last known
+ * valid TX Equalization params for that specific TX lane.
+ */
+static inline void
+ufshcd_update_tx_eq_params(struct ufshcd_tx_eq_params *params,
+ struct ufs_pa_layer_attr *pwr_mode,
+ struct ufshcd_tx_eqtr_data *eqtr_data)
+{
+ struct ufshcd_tx_eqtr_record *rec = params->eqtr_record;
+
+ if (params->is_valid) {
+ int lane;
+
+ for (lane = 0; lane < pwr_mode->lane_tx; lane++)
+ if (eqtr_data->host[lane].fom_val == 0)
+ eqtr_data->host[lane] = params->host[lane];
+
+ for (lane = 0; lane < pwr_mode->lane_rx; lane++)
+ if (eqtr_data->device[lane].fom_val == 0)
+ eqtr_data->device[lane] = params->device[lane];
+ }
+
+ memcpy(params->host, eqtr_data->host, sizeof(params->host));
+ memcpy(params->device, eqtr_data->device, sizeof(params->device));
+
+ if (!rec)
+ return;
+
+ memcpy(rec->host_fom, eqtr_data->host_fom, sizeof(rec->host_fom));
+ memcpy(rec->device_fom, eqtr_data->device_fom, sizeof(rec->device_fom));
+ rec->last_record_ts = ktime_get();
+ rec->last_record_index++;
+}
+
+/**
+ * __ufshcd_tx_eqtr - TX Equalization Training (EQTR) procedure
+ * @hba: per adapter instance
+ * @params: TX EQ parameters data structure
+ * @pwr_mode: target power mode containing gear and rate information
+ *
+ * This function implements the complete TX EQTR procedure as defined in UFSHCI
+ * v5.0 specification. It iterates through all possible combinations of PreShoot
+ * and DeEmphasis settings to find the optimal TX Equalization settings for all
+ * active lanes.
+ *
+ * Returns 0 on success, negative error code otherwise
+ */
+static int __ufshcd_tx_eqtr(struct ufs_hba *hba,
+ struct ufshcd_tx_eq_params *params,
+ struct ufs_pa_layer_attr *pwr_mode)
+{
+ struct ufshcd_tx_eqtr_data *eqtr_data __free(kfree) =
+ kzalloc(sizeof(*eqtr_data), GFP_KERNEL);
+ struct tx_eqtr_iter h_iter = {};
+ struct tx_eqtr_iter d_iter = {};
+ u32 gear = pwr_mode->gear_tx;
+ u8 preshoot, deemphasis;
+ ktime_t start;
+ int ret;
+
+ if (!eqtr_data)
+ return -ENOMEM;
+
+ dev_info(hba->dev, "Start TX EQTR procedure for HS-G%u, Rate-%s, RX Lanes: %u, TX Lanes: %u\n",
+ gear, ufs_hs_rate_to_str(pwr_mode->hs_rate),
+ pwr_mode->lane_rx, pwr_mode->lane_tx);
+
+ start = ktime_get();
+
+ /* Step 1 - Determine the TX Adapt Length for EQTR */
+ ret = ufshcd_setup_tx_eqtr_adapt_length(hba, params, gear);
+ if (ret) {
+ dev_err(hba->dev, "Failed to setup TX EQTR Adaptation length: %d\n", ret);
+ return ret;
+ }
+
+ /* Step 2 - Determine TX Equalization setting capabilities */
+ ret = ufshcd_tx_eqtr_iter_init(hba, &h_iter, &d_iter);
+ if (ret) {
+ dev_err(hba->dev, "Failed to init TX EQTR data: %d\n", ret);
+ return ret;
+ }
+
+ /* TX EQTR main loop */
+ for (preshoot = 0; preshoot < TX_HS_NUM_PRESHOOT; preshoot++) {
+ for (deemphasis = 0; deemphasis < TX_HS_NUM_DEEMPHASIS; deemphasis++) {
+ if (!tx_eqtr_iter_update(preshoot, deemphasis, &h_iter, &d_iter))
+ continue;
+
+ /* Step 3 - Apply TX EQTR settings */
+ ret = ufshcd_apply_tx_eqtr_settings(hba, pwr_mode, &h_iter, &d_iter);
+ if (ret) {
+ dev_err(hba->dev, "Failed to apply TX EQTR settings (PreShoot %u, DeEmphasis %u): %d\n",
+ preshoot, deemphasis, ret);
+ return ret;
+ }
+
+ /* Step 4 - Trigger UIC TX EQTR */
+ ret = ufshcd_uic_tx_eqtr(hba, gear);
+ if (ret) {
+ dev_err(hba->dev, "Failed to trigger UIC TX EQTR for target gear %u: %d\n",
+ gear, ret);
+ return ret;
+ }
+
+ /* Step 5 - Get FOM */
+ ret = ufshcd_get_rx_fom(hba, pwr_mode, &h_iter, &d_iter);
+ if (ret) {
+ dev_err(hba->dev, "Failed to get RX_FOM: %d\n",
+ ret);
+ return ret;
+ }
+
+ ufshcd_evaluate_tx_eqtr_fom(hba, pwr_mode, eqtr_data, &h_iter, &d_iter);
+ }
+ }
+
+ dev_info(hba->dev, "TX EQTR procedure completed! Time elapsed: %llu ms\n",
+ ktime_to_ms(ktime_sub(ktime_get(), start)));
+
+ ufshcd_update_tx_eq_params(params, pwr_mode, eqtr_data);
+
+ return ret;
+}
+
+/**
+ * ufshcd_tx_eqtr_prepare - Prepare UFS link for TX EQTR procedure
+ * @hba: per adapter instance
+ * @pwr_mode: target power mode containing gear and rate
+ *
+ * This function prepares the UFS link for TX Equalization Training (EQTR) by
+ * establishing the proper initial conditions required by the EQTR procedure.
+ * It ensures that EQTR starts from the most reliable Power Mode (HS-G1) with
+ * all connected lanes activated and sets host TX HS Adapt Type to INITIAL.
+ *
+ * Returns 0 on successful preparation, negative error code on failure
+ */
+static int ufshcd_tx_eqtr_prepare(struct ufs_hba *hba,
+ struct ufs_pa_layer_attr *pwr_mode)
+{
+ struct ufs_pa_layer_attr pwr_mode_hs_g1 = {
+ /* TX EQTR shall be initiated from the most reliable HS-G1 */
+ .gear_rx = UFS_HS_G1,
+ .gear_tx = UFS_HS_G1,
+ .lane_rx = pwr_mode->lane_rx,
+ .lane_tx = pwr_mode->lane_tx,
+ .pwr_rx = FAST_MODE,
+ .pwr_tx = FAST_MODE,
+ /* Use the target power mode's HS rate */
+ .hs_rate = pwr_mode->hs_rate,
+ };
+ u32 rate = pwr_mode->hs_rate;
+ int ret;
+
+ /* Change power mode to HS-G1, activate all connected lanes. */
+ ret = ufshcd_change_power_mode(hba, &pwr_mode_hs_g1,
+ UFSHCD_PMC_POLICY_DONT_FORCE);
+ if (ret) {
+ dev_err(hba->dev, "TX EQTR: Failed to change power mode to HS-G1, Rate-%s: %d\n",
+ ufs_hs_rate_to_str(rate), ret);
+ return ret;
+ }
+
+ ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXHSADAPTTYPE),
+ PA_INITIAL_ADAPT);
+ if (ret)
+ dev_err(hba->dev, "TX EQTR: Failed to set Host Adapt type to INITIAL: %d\n",
+ ret);
+
+ return ret;
+}
+
+static void ufshcd_tx_eqtr_unprepare(struct ufs_hba *hba,
+ struct ufs_pa_layer_attr *pwr_mode)
+{
+ int err;
+
+ if (pwr_mode->pwr_rx == SLOWAUTO_MODE || pwr_mode->hs_rate == 0)
+ return;
+
+ err = ufshcd_change_power_mode(hba, pwr_mode,
+ UFSHCD_PMC_POLICY_DONT_FORCE);
+ if (err)
+ dev_err(hba->dev, "%s: Failed to restore Power Mode: %d\n",
+ __func__, err);
+}
+
+/**
+ * ufshcd_tx_eqtr - Perform TX EQTR procedures with vops callbacks
+ * @hba: per adapter instance
+ * @params: TX EQ parameters data structure to populate
+ * @pwr_mode: target power mode containing gear and rate information
+ *
+ * This is the main entry point for performing TX Equalization Training (EQTR)
+ * procedure as defined in UFSCHI v5.0 specification. It serves as a wrapper
+ * around __ufshcd_tx_eqtr() to provide vops support through the variant
+ * operations framework.
+ *
+ * Returns 0 on success, negative error code on failure
+ */
+static int ufshcd_tx_eqtr(struct ufs_hba *hba,
+ struct ufshcd_tx_eq_params *params,
+ struct ufs_pa_layer_attr *pwr_mode)
+{
+ struct ufs_pa_layer_attr old_pwr_info;
+ int ret;
+
+ if (!params->eqtr_record) {
+ params->eqtr_record = devm_kzalloc(hba->dev,
+ sizeof(*params->eqtr_record),
+ GFP_KERNEL);
+ if (!params->eqtr_record)
+ return -ENOMEM;
+ }
+
+ memcpy(&old_pwr_info, &hba->pwr_info, sizeof(struct ufs_pa_layer_attr));
+
+ ret = ufshcd_tx_eqtr_prepare(hba, pwr_mode);
+ if (ret) {
+ dev_err(hba->dev, "Failed to prepare TX EQTR: %d\n", ret);
+ goto out;
+ }
+
+ ret = ufshcd_vops_tx_eqtr_notify(hba, PRE_CHANGE, pwr_mode);
+ if (ret)
+ goto out;
+
+ ret = __ufshcd_tx_eqtr(hba, params, pwr_mode);
+ if (ret)
+ goto out;
+
+ ret = ufshcd_vops_tx_eqtr_notify(hba, POST_CHANGE, pwr_mode);
+
+out:
+ if (ret)
+ ufshcd_tx_eqtr_unprepare(hba, &old_pwr_info);
+
+ return ret;
+}
+
+/**
+ * ufshcd_config_tx_eq_settings - Configure TX Equalization settings
+ * @hba: per adapter instance
+ * @pwr_mode: target power mode containing gear and rate information
+ * @force_tx_eqtr: execute the TX EQTR procedure
+ *
+ * This function finds and sets the TX Equalization settings for the given
+ * target power mode.
+ *
+ * Returns 0 on success, error code otherwise
+ */
+int ufshcd_config_tx_eq_settings(struct ufs_hba *hba,
+ struct ufs_pa_layer_attr *pwr_mode,
+ bool force_tx_eqtr)
+{
+ struct ufshcd_tx_eq_params *params;
+ u32 gear, rate;
+
+ if (!ufshcd_is_tx_eq_supported(hba) || !use_adaptive_txeq)
+ return 0;
+
+ if (!hba->max_pwr_info.is_valid) {
+ dev_err(hba->dev, "Max power info is invalid\n");
+ return -EINVAL;
+ }
+
+ if (!pwr_mode) {
+ dev_err(hba->dev, "Target power mode is NULL\n");
+ return -EINVAL;
+ }
+
+ gear = pwr_mode->gear_tx;
+ rate = pwr_mode->hs_rate;
+
+ if (gear < UFS_HS_G1 || gear > UFS_HS_GEAR_MAX) {
+ dev_err(hba->dev, "Invalid HS-Gear (%u) for TX Equalization\n",
+ gear);
+ return -EINVAL;
+ } else if (gear < max_t(u32, adaptive_txeq_gear, UFS_HS_G4)) {
+ /* TX EQTR is supported for HS-G4 and higher Gears */
+ return 0;
+ }
+
+ if (rate != PA_HS_MODE_A && rate != PA_HS_MODE_B) {
+ dev_err(hba->dev, "Invalid HS-Rate (%u) for TX Equalization\n",
+ rate);
+ return -EINVAL;
+ }
+
+ params = &hba->tx_eq_params[gear - 1];
+ if (!params->is_valid || force_tx_eqtr) {
+ int ret;
+
+ ret = ufshcd_tx_eqtr(hba, params, pwr_mode);
+ if (ret) {
+ dev_err(hba->dev, "Failed to train TX Equalization for HS-G%u, Rate-%s: %d\n",
+ gear, ufs_hs_rate_to_str(rate), ret);
+ return ret;
+ }
+
+ /* Mark TX Equalization settings as valid */
+ params->is_valid = true;
+ params->is_applied = false;
+ }
+
+ if (params->is_valid && !params->is_applied) {
+ int ret;
+
+ ret = ufshcd_apply_tx_eq_settings(hba, params, gear);
+ if (ret) {
+ dev_err(hba->dev, "Failed to apply TX Equalization settings for HS-G%u, Rate-%s: %d\n",
+ gear, ufs_hs_rate_to_str(rate), ret);
+ return ret;
+ }
+
+ params->is_applied = true;
+ }
+
+ return 0;
+}
+
+/**
+ * ufshcd_apply_valid_tx_eq_settings - Apply valid TX Equalization settings
+ * @hba: per-adapter instance
+ *
+ * This function iterates through all supported High-Speed (HS) gears and
+ * applies valid TX Equalization settings to both Host and Device.
+ */
+void ufshcd_apply_valid_tx_eq_settings(struct ufs_hba *hba)
+{
+ struct ufshcd_tx_eq_params *params;
+ int gear, err;
+
+ if (!ufshcd_is_tx_eq_supported(hba))
+ return;
+
+ if (!hba->max_pwr_info.is_valid) {
+ dev_err(hba->dev, "Max power info is invalid, cannot apply TX Equalization settings\n");
+ return;
+ }
+
+ for (gear = UFS_HS_G1; gear <= UFS_HS_GEAR_MAX; gear++) {
+ params = &hba->tx_eq_params[gear - 1];
+
+ if (params->is_valid) {
+ err = ufshcd_apply_tx_eq_settings(hba, params, gear);
+ if (err) {
+ params->is_applied = false;
+ dev_err(hba->dev, "Failed to apply TX Equalization settings for HS-G%u: %d\n",
+ gear, err);
+ } else {
+ params->is_applied = true;
+ }
+ }
+ }
+}
+
+/**
+ * ufshcd_retrain_tx_eq - Retrain TX Equalization and apply new settings
+ * @hba: per-adapter instance
+ * @gear: target High-Speed (HS) gear for retraining
+ *
+ * This function initiates a refresh of the TX Equalization settings for a
+ * specific HS gear. It scales the clocks to maximum frequency, negotiates the
+ * power mode with the device, retrains TX EQ and applies new TX EQ settings
+ * by conducting a Power Mode change.
+ *
+ * Returns 0 on success, non-zero error code otherwise
+ */
+int ufshcd_retrain_tx_eq(struct ufs_hba *hba, u32 gear)
+{
+ struct ufs_pa_layer_attr new_pwr_info, final_params = {};
+ int ret;
+
+ if (!ufshcd_is_tx_eq_supported(hba) || !use_adaptive_txeq)
+ return -EOPNOTSUPP;
+
+ if (gear < adaptive_txeq_gear)
+ return -ERANGE;
+
+ ufshcd_hold(hba);
+
+ ret = ufshcd_pause_command_processing(hba, 1 * USEC_PER_SEC);
+ if (ret) {
+ ufshcd_release(hba);
+ return ret;
+ }
+
+ /* scale up clocks to max frequency before TX EQTR */
+ if (ufshcd_is_clkscaling_supported(hba))
+ ufshcd_scale_clks(hba, ULONG_MAX, true);
+
+ new_pwr_info = hba->pwr_info;
+ new_pwr_info.gear_tx = gear;
+ new_pwr_info.gear_rx = gear;
+
+ ret = ufshcd_vops_negotiate_pwr_mode(hba, &new_pwr_info, &final_params);
+ if (ret)
+ memcpy(&final_params, &new_pwr_info, sizeof(final_params));
+
+ if (final_params.gear_tx != gear) {
+ dev_err(hba->dev, "Negotiated Gear (%u) does not match target Gear (%u)\n",
+ final_params.gear_tx, gear);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = ufshcd_config_tx_eq_settings(hba, &final_params, true);
+ if (ret) {
+ dev_err(hba->dev, "Failed to config TX Equalization for HS-G%u, Rate-%s: %d\n",
+ final_params.gear_tx,
+ ufs_hs_rate_to_str(final_params.hs_rate), ret);
+ goto out;
+ }
+
+ /* Change Power Mode to apply the new TX EQ settings */
+ ret = ufshcd_change_power_mode(hba, &final_params,
+ UFSHCD_PMC_POLICY_FORCE);
+ if (ret)
+ dev_err(hba->dev, "%s: Failed to change Power Mode to HS-G%u, Rate-%s: %d\n",
+ __func__, final_params.gear_tx,
+ ufs_hs_rate_to_str(final_params.hs_rate), ret);
+
+out:
+ ufshcd_resume_command_processing(hba);
+ ufshcd_release(hba);
+
+ return ret;
+}
diff --git a/drivers/ufs/core/ufshcd-priv.h b/drivers/ufs/core/ufshcd-priv.h
index 6d3d14e883b8..0a72148cb053 100644
--- a/drivers/ufs/core/ufshcd-priv.h
+++ b/drivers/ufs/core/ufshcd-priv.h
@@ -80,6 +80,9 @@ u32 ufshcd_mcq_read_mcqiacr(struct ufs_hba *hba, int i);
void ufshcd_mcq_write_mcqiacr(struct ufs_hba *hba, u32 val, int i);
int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag);
void ufshcd_release_scsi_cmd(struct ufs_hba *hba, struct scsi_cmnd *cmd);
+int ufshcd_pause_command_processing(struct ufs_hba *hba, u64 timeout_us);
+void ufshcd_resume_command_processing(struct ufs_hba *hba);
+int ufshcd_scale_clks(struct ufs_hba *hba, unsigned long freq, bool scale_up);
/**
* enum ufs_descr_fmt - UFS string descriptor format
@@ -105,6 +108,16 @@ int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
int ufshcd_wb_toggle(struct ufs_hba *hba, bool enable);
int ufshcd_read_device_lvl_exception_id(struct ufs_hba *hba, u64 *exception_id);
+int ufshcd_uic_tx_eqtr(struct ufs_hba *hba, int gear);
+void ufshcd_apply_valid_tx_eq_settings(struct ufs_hba *hba);
+int ufshcd_config_tx_eq_settings(struct ufs_hba *hba,
+ struct ufs_pa_layer_attr *pwr_mode,
+ bool force_tx_eqtr);
+void ufshcd_print_tx_eq_params(struct ufs_hba *hba);
+bool ufshcd_is_txeq_presets_used(struct ufs_hba *hba);
+bool ufshcd_is_txeq_preset_selected(u8 preshoot, u8 deemphasis);
+int ufshcd_retrain_tx_eq(struct ufs_hba *hba, u32 gear);
+
/* Wrapper functions for safely calling variant operations */
static inline const char *ufshcd_get_var_name(struct ufs_hba *hba)
{
@@ -169,14 +182,24 @@ static inline int ufshcd_vops_link_startup_notify(struct ufs_hba *hba,
return 0;
}
+static inline int ufshcd_vops_negotiate_pwr_mode(struct ufs_hba *hba,
+ const struct ufs_pa_layer_attr *dev_max_params,
+ struct ufs_pa_layer_attr *dev_req_params)
+{
+ if (hba->vops && hba->vops->negotiate_pwr_mode)
+ return hba->vops->negotiate_pwr_mode(hba, dev_max_params,
+ dev_req_params);
+
+ return -ENOTSUPP;
+}
+
static inline int ufshcd_vops_pwr_change_notify(struct ufs_hba *hba,
enum ufs_notify_change_status status,
- const struct ufs_pa_layer_attr *dev_max_params,
struct ufs_pa_layer_attr *dev_req_params)
{
if (hba->vops && hba->vops->pwr_change_notify)
return hba->vops->pwr_change_notify(hba, status,
- dev_max_params, dev_req_params);
+ dev_req_params);
return -ENOTSUPP;
}
@@ -289,6 +312,38 @@ static inline u32 ufshcd_vops_freq_to_gear_speed(struct ufs_hba *hba, unsigned l
return 0;
}
+static inline int ufshcd_vops_get_rx_fom(struct ufs_hba *hba,
+ struct ufs_pa_layer_attr *pwr_mode,
+ struct tx_eqtr_iter *h_iter,
+ struct tx_eqtr_iter *d_iter)
+{
+ if (hba->vops && hba->vops->get_rx_fom)
+ return hba->vops->get_rx_fom(hba, pwr_mode, h_iter, d_iter);
+
+ return 0;
+}
+
+static inline int ufshcd_vops_apply_tx_eqtr_settings(struct ufs_hba *hba,
+ struct ufs_pa_layer_attr *pwr_mode,
+ struct tx_eqtr_iter *h_iter,
+ struct tx_eqtr_iter *d_iter)
+{
+ if (hba->vops && hba->vops->apply_tx_eqtr_settings)
+ return hba->vops->apply_tx_eqtr_settings(hba, pwr_mode, h_iter, d_iter);
+
+ return 0;
+}
+
+static inline int ufshcd_vops_tx_eqtr_notify(struct ufs_hba *hba,
+ enum ufs_notify_change_status status,
+ struct ufs_pa_layer_attr *pwr_mode)
+{
+ if (hba->vops && hba->vops->tx_eqtr_notify)
+ return hba->vops->tx_eqtr_notify(hba, status, pwr_mode);
+
+ return 0;
+}
+
extern const struct ufs_pm_lvl_states ufs_pm_lvl_states[];
/**
diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
index 54ad34a4c4ef..cb81aa94d5c4 100644
--- a/drivers/ufs/core/ufshcd.c
+++ b/drivers/ufs/core/ufshcd.c
@@ -333,11 +333,7 @@ static inline void ufshcd_add_delay_before_dme_cmd(struct ufs_hba *hba);
static int ufshcd_host_reset_and_restore(struct ufs_hba *hba);
static void ufshcd_resume_clkscaling(struct ufs_hba *hba);
static void ufshcd_suspend_clkscaling(struct ufs_hba *hba);
-static int ufshcd_scale_clks(struct ufs_hba *hba, unsigned long freq,
- bool scale_up);
static irqreturn_t ufshcd_intr(int irq, void *__hba);
-static int ufshcd_change_power_mode(struct ufs_hba *hba,
- struct ufs_pa_layer_attr *pwr_mode);
static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on);
static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on);
static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
@@ -1211,8 +1207,7 @@ static int ufshcd_opp_set_rate(struct ufs_hba *hba, unsigned long freq)
*
* Return: 0 if successful; < 0 upon failure.
*/
-static int ufshcd_scale_clks(struct ufs_hba *hba, unsigned long freq,
- bool scale_up)
+int ufshcd_scale_clks(struct ufs_hba *hba, unsigned long freq, bool scale_up)
{
int ret = 0;
ktime_t start = ktime_get();
@@ -1366,6 +1361,48 @@ out:
}
/**
+ * ufshcd_pause_command_processing - Pause command processing
+ * @hba: per-adapter instance
+ * @timeout_us: timeout in microseconds to wait for pending commands to finish
+ *
+ * This function stops new command submissions and waits for existing commands
+ * to complete.
+ *
+ * Return: 0 on success, %-EBUSY if commands did not finish within @timeout_us.
+ * On failure, all acquired locks are released and the tagset is unquiesced.
+ */
+int ufshcd_pause_command_processing(struct ufs_hba *hba, u64 timeout_us)
+{
+ int ret = 0;
+
+ mutex_lock(&hba->host->scan_mutex);
+ blk_mq_quiesce_tagset(&hba->host->tag_set);
+ down_write(&hba->clk_scaling_lock);
+
+ if (ufshcd_wait_for_pending_cmds(hba, timeout_us)) {
+ ret = -EBUSY;
+ up_write(&hba->clk_scaling_lock);
+ blk_mq_unquiesce_tagset(&hba->host->tag_set);
+ mutex_unlock(&hba->host->scan_mutex);
+ }
+
+ return ret;
+}
+
+/**
+ * ufshcd_resume_command_processing - Resume command processing
+ * @hba: per-adapter instance
+ *
+ * This function resumes command submissions.
+ */
+void ufshcd_resume_command_processing(struct ufs_hba *hba)
+{
+ up_write(&hba->clk_scaling_lock);
+ blk_mq_unquiesce_tagset(&hba->host->tag_set);
+ mutex_unlock(&hba->host->scan_mutex);
+}
+
+/**
* ufshcd_scale_gear - scale up/down UFS gear
* @hba: per adapter instance
* @target_gear: target gear to scale to
@@ -1410,7 +1447,8 @@ static int ufshcd_scale_gear(struct ufs_hba *hba, u32 target_gear, bool scale_up
config_pwr_mode:
/* check if the power mode needs to be changed or not? */
- ret = ufshcd_config_pwr_mode(hba, &new_pwr_info);
+ ret = ufshcd_config_pwr_mode(hba, &new_pwr_info,
+ UFSHCD_PMC_POLICY_DONT_FORCE);
if (ret)
dev_err(hba->dev, "%s: failed err %d, old gear: (tx %d rx %d), new gear: (tx %d rx %d)",
__func__, ret,
@@ -4251,7 +4289,8 @@ int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
pwr_mode_change = true;
}
if (pwr_mode_change) {
- ret = ufshcd_change_power_mode(hba, &temp_pwr_info);
+ ret = ufshcd_change_power_mode(hba, &temp_pwr_info,
+ UFSHCD_PMC_POLICY_DONT_FORCE);
if (ret)
goto out;
}
@@ -4275,7 +4314,8 @@ int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)
&& pwr_mode_change)
- ufshcd_change_power_mode(hba, &orig_pwr_info);
+ ufshcd_change_power_mode(hba, &orig_pwr_info,
+ UFSHCD_PMC_POLICY_DONT_FORCE);
out:
return ret;
}
@@ -4342,16 +4382,18 @@ static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
ret = __ufshcd_send_uic_cmd(hba, cmd);
if (ret) {
dev_err(hba->dev,
- "pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
- cmd->command, cmd->argument3, ret);
+ "pwr ctrl cmd 0x%x with (MIBattribute 0x%x, mode 0x%x) uic error %d\n",
+ cmd->command, UIC_GET_ATTR_ID(cmd->argument1),
+ cmd->argument3, ret);
goto out;
}
if (!wait_for_completion_timeout(hba->uic_async_done,
msecs_to_jiffies(uic_cmd_timeout))) {
dev_err(hba->dev,
- "pwr ctrl cmd 0x%x with mode 0x%x completion timeout\n",
- cmd->command, cmd->argument3);
+ "pwr ctrl cmd 0x%x with (MIBattribute 0x%x, mode 0x%x) completion timeout\n",
+ cmd->command, UIC_GET_ATTR_ID(cmd->argument1),
+ cmd->argument3);
if (!cmd->cmd_active) {
dev_err(hba->dev, "%s: Power Mode Change operation has been completed, go check UPMCRS\n",
@@ -4367,14 +4409,16 @@ check_upmcrs:
status = ufshcd_get_upmcrs(hba);
if (status != PWR_LOCAL) {
dev_err(hba->dev,
- "pwr ctrl cmd 0x%x failed, host upmcrs:0x%x\n",
- cmd->command, status);
+ "pwr ctrl cmd 0x%x with (MIBattribute 0x%x, mode 0x%x) failed, host upmcrs:0x%x\n",
+ cmd->command, UIC_GET_ATTR_ID(cmd->argument1),
+ cmd->argument3, status);
ret = (status != PWR_OK) ? status : -1;
}
out:
if (ret) {
ufshcd_print_host_state(hba);
ufshcd_print_pwr_info(hba);
+ ufshcd_print_tx_eq_params(hba);
ufshcd_print_evt_hist(hba);
}
@@ -4401,6 +4445,29 @@ out_unlock:
}
/**
+ * ufshcd_uic_tx_eqtr - Perform UIC TX Equalization Training
+ * @hba: per adapter instance
+ * @gear: target gear for EQTR
+ *
+ * Returns 0 on success, negative error code otherwise
+ */
+int ufshcd_uic_tx_eqtr(struct ufs_hba *hba, int gear)
+{
+ struct uic_command uic_cmd = {
+ .command = UIC_CMD_DME_SET,
+ .argument1 = UIC_ARG_MIB(PA_EQTR_GEAR),
+ .argument3 = gear,
+ };
+ int ret;
+
+ ufshcd_hold(hba);
+ ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
+ ufshcd_release(hba);
+
+ return ret;
+}
+
+/**
* ufshcd_send_bsg_uic_cmd - Send UIC commands requested via BSG layer and retrieve the result
* @hba: per adapter instance
* @uic_cmd: UIC command
@@ -4663,13 +4730,33 @@ static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba)
return 0;
}
-static int ufshcd_change_power_mode(struct ufs_hba *hba,
- struct ufs_pa_layer_attr *pwr_mode)
+/**
+ * ufshcd_dme_change_power_mode() - UniPro DME Power Mode change sequence
+ * @hba: per-adapter instance
+ * @pwr_mode: pointer to the target power mode (gear/lane) attributes
+ * @pmc_policy: Power Mode change policy
+ *
+ * This function handles the low-level DME (Device Management Entity)
+ * configuration required to transition the UFS link to a new power mode. It
+ * performs the following steps:
+ * 1. Checks if the requested mode matches the current state.
+ * 2. Sets M-PHY and UniPro attributes including Gear (PA_RXGEAR/TXGEAR),
+ * Lanes, Termination, and HS Series (PA_HSSERIES).
+ * 3. Configures default UniPro timeout values (DL_FC0, etc.) unless
+ * explicitly skipped via quirks.
+ * 4. Triggers the actual hardware mode change via ufshcd_uic_change_pwr_mode().
+ * 5. Updates the HBA's cached power information on success.
+ *
+ * Return: 0 on success, non-zero error code on failure.
+ */
+static int ufshcd_dme_change_power_mode(struct ufs_hba *hba,
+ struct ufs_pa_layer_attr *pwr_mode,
+ enum ufshcd_pmc_policy pmc_policy)
{
int ret;
/* if already configured to the requested pwr_mode */
- if (!hba->force_pmc &&
+ if (pmc_policy == UFSHCD_PMC_POLICY_DONT_FORCE &&
pwr_mode->gear_rx == hba->pwr_info.gear_rx &&
pwr_mode->gear_tx == hba->pwr_info.gear_tx &&
pwr_mode->lane_rx == hba->pwr_info.lane_rx &&
@@ -4749,31 +4836,67 @@ static int ufshcd_change_power_mode(struct ufs_hba *hba,
}
/**
+ * ufshcd_change_power_mode() - Change UFS Link Power Mode
+ * @hba: per-adapter instance
+ * @pwr_mode: pointer to the target power mode (gear/lane) attributes
+ * @pmc_policy: Power Mode change policy
+ *
+ * This function handles the high-level sequence for changing the UFS link
+ * power mode. It triggers vendor-specific pre-change notification,
+ * executes the DME (Device Management Entity) power mode change sequence,
+ * and, upon success, triggers vendor-specific post-change notification.
+ *
+ * Return: 0 on success, non-zero error code on failure.
+ */
+int ufshcd_change_power_mode(struct ufs_hba *hba,
+ struct ufs_pa_layer_attr *pwr_mode,
+ enum ufshcd_pmc_policy pmc_policy)
+{
+ int ret;
+
+ ufshcd_vops_pwr_change_notify(hba, PRE_CHANGE, pwr_mode);
+
+ ret = ufshcd_dme_change_power_mode(hba, pwr_mode, pmc_policy);
+
+ if (!ret)
+ ufshcd_vops_pwr_change_notify(hba, POST_CHANGE, pwr_mode);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ufshcd_change_power_mode);
+
+/**
* ufshcd_config_pwr_mode - configure a new power mode
* @hba: per-adapter instance
* @desired_pwr_mode: desired power configuration
+ * @pmc_policy: Power Mode change policy
*
* Return: 0 upon success; < 0 upon failure.
*/
int ufshcd_config_pwr_mode(struct ufs_hba *hba,
- struct ufs_pa_layer_attr *desired_pwr_mode)
+ struct ufs_pa_layer_attr *desired_pwr_mode,
+ enum ufshcd_pmc_policy pmc_policy)
{
struct ufs_pa_layer_attr final_params = { 0 };
int ret;
- ret = ufshcd_vops_pwr_change_notify(hba, PRE_CHANGE,
- desired_pwr_mode, &final_params);
+ ret = ufshcd_vops_negotiate_pwr_mode(hba, desired_pwr_mode,
+ &final_params);
+ if (ret) {
+ if (ret != -ENOTSUPP)
+ dev_err(hba->dev, "Failed to negotiate power mode: %d, use desired as is\n",
+ ret);
- if (ret)
memcpy(&final_params, desired_pwr_mode, sizeof(final_params));
+ }
- ret = ufshcd_change_power_mode(hba, &final_params);
-
- if (!ret)
- ufshcd_vops_pwr_change_notify(hba, POST_CHANGE, NULL,
- &final_params);
+ ret = ufshcd_config_tx_eq_settings(hba, &final_params, false);
+ if (ret)
+ dev_warn(hba->dev, "Failed to configure TX Equalization for HS-G%u, Rate-%s: %d\n",
+ final_params.gear_tx,
+ ufs_hs_rate_to_str(final_params.hs_rate), ret);
- return ret;
+ return ufshcd_change_power_mode(hba, &final_params, pmc_policy);
}
EXPORT_SYMBOL_GPL(ufshcd_config_pwr_mode);
@@ -6775,6 +6898,7 @@ again:
spin_unlock_irqrestore(hba->host->host_lock, flags);
ufshcd_print_host_state(hba);
ufshcd_print_pwr_info(hba);
+ ufshcd_print_tx_eq_params(hba);
ufshcd_print_evt_hist(hba);
ufshcd_print_tmrs(hba, hba->outstanding_tasks);
ufshcd_print_trs_all(hba, pr_prdt);
@@ -6833,14 +6957,13 @@ again:
* are sent via bsg and/or sysfs.
*/
down_write(&hba->clk_scaling_lock);
- hba->force_pmc = true;
- pmc_err = ufshcd_config_pwr_mode(hba, &(hba->pwr_info));
+ pmc_err = ufshcd_config_pwr_mode(hba, &hba->pwr_info,
+ UFSHCD_PMC_POLICY_FORCE);
if (pmc_err) {
needs_reset = true;
dev_err(hba->dev, "%s: Failed to restore power mode, err = %d\n",
__func__, pmc_err);
}
- hba->force_pmc = false;
ufshcd_print_pwr_info(hba);
up_write(&hba->clk_scaling_lock);
spin_lock_irqsave(hba->host->host_lock, flags);
@@ -7048,6 +7171,7 @@ static irqreturn_t ufshcd_check_errors(struct ufs_hba *hba, u32 intr_status)
ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE,
"host_regs: ");
ufshcd_print_pwr_info(hba);
+ ufshcd_print_tx_eq_params(hba);
}
ufshcd_schedule_eh_work(hba);
retval |= IRQ_HANDLED;
@@ -7843,6 +7967,7 @@ static int ufshcd_abort(struct scsi_cmnd *cmd)
ufshcd_print_evt_hist(hba);
ufshcd_print_host_state(hba);
ufshcd_print_pwr_info(hba);
+ ufshcd_print_tx_eq_params(hba);
ufshcd_print_tr(hba, cmd, true);
} else {
ufshcd_print_tr(hba, cmd, false);
@@ -8820,6 +8945,8 @@ static void ufshcd_tune_unipro_params(struct ufs_hba *hba)
if (hba->dev_quirks & UFS_DEVICE_QUIRK_PA_HIBER8TIME)
ufshcd_quirk_override_pa_h8time(hba);
+
+ ufshcd_apply_valid_tx_eq_settings(hba);
}
static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba *hba)
@@ -9144,7 +9271,8 @@ static int ufshcd_post_device_init(struct ufs_hba *hba)
if (hba->dev_ref_clk_freq != REF_CLK_FREQ_INVAL)
ufshcd_set_dev_ref_clk(hba);
/* Gear up to HS gear. */
- ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info);
+ ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info,
+ UFSHCD_PMC_POLICY_DONT_FORCE);
if (ret) {
dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n",
__func__, ret);
diff --git a/drivers/ufs/host/ufs-amd-versal2.c b/drivers/ufs/host/ufs-amd-versal2.c
index 6c454ae8a9c8..2154d6286817 100644
--- a/drivers/ufs/host/ufs-amd-versal2.c
+++ b/drivers/ufs/host/ufs-amd-versal2.c
@@ -443,7 +443,6 @@ static int ufs_versal2_phy_ratesel(struct ufs_hba *hba, u32 activelanes, u32 rx_
}
static int ufs_versal2_pwr_change_notify(struct ufs_hba *hba, enum ufs_notify_change_status status,
- const struct ufs_pa_layer_attr *dev_max_params,
struct ufs_pa_layer_attr *dev_req_params)
{
struct ufs_versal2_host *host = ufshcd_get_variant(hba);
@@ -451,8 +450,6 @@ static int ufs_versal2_pwr_change_notify(struct ufs_hba *hba, enum ufs_notify_ch
int ret = 0;
if (status == PRE_CHANGE) {
- memcpy(dev_req_params, dev_max_params, sizeof(struct ufs_pa_layer_attr));
-
/* If it is not a calibrated part, switch PWRMODE to SLOW_MODE */
if (!host->attcompval0 && !host->attcompval1 && !host->ctlecompval0 &&
!host->ctlecompval1) {
diff --git a/drivers/ufs/host/ufs-exynos.c b/drivers/ufs/host/ufs-exynos.c
index 76fee3a79c77..77a6c8e44485 100644
--- a/drivers/ufs/host/ufs-exynos.c
+++ b/drivers/ufs/host/ufs-exynos.c
@@ -818,12 +818,10 @@ static u32 exynos_ufs_get_hs_gear(struct ufs_hba *hba)
}
static int exynos_ufs_pre_pwr_mode(struct ufs_hba *hba,
- const struct ufs_pa_layer_attr *dev_max_params,
struct ufs_pa_layer_attr *dev_req_params)
{
struct exynos_ufs *ufs = ufshcd_get_variant(hba);
struct phy *generic_phy = ufs->phy;
- struct ufs_host_params host_params;
int ret;
if (!dev_req_params) {
@@ -832,18 +830,6 @@ static int exynos_ufs_pre_pwr_mode(struct ufs_hba *hba,
goto out;
}
- ufshcd_init_host_params(&host_params);
-
- /* This driver only support symmetric gear setting e.g. hs_tx_gear == hs_rx_gear */
- host_params.hs_tx_gear = exynos_ufs_get_hs_gear(hba);
- host_params.hs_rx_gear = exynos_ufs_get_hs_gear(hba);
-
- ret = ufshcd_negotiate_pwr_params(&host_params, dev_max_params, dev_req_params);
- if (ret) {
- pr_err("%s: failed to determine capabilities\n", __func__);
- goto out;
- }
-
if (ufs->drv_data->pre_pwr_change)
ufs->drv_data->pre_pwr_change(ufs, dev_req_params);
@@ -1677,17 +1663,30 @@ static int exynos_ufs_link_startup_notify(struct ufs_hba *hba,
return ret;
}
+static int exynos_ufs_negotiate_pwr_mode(struct ufs_hba *hba,
+ const struct ufs_pa_layer_attr *dev_max_params,
+ struct ufs_pa_layer_attr *dev_req_params)
+{
+ struct ufs_host_params host_params;
+
+ ufshcd_init_host_params(&host_params);
+
+ /* This driver only support symmetric gear setting e.g. hs_tx_gear == hs_rx_gear */
+ host_params.hs_tx_gear = exynos_ufs_get_hs_gear(hba);
+ host_params.hs_rx_gear = exynos_ufs_get_hs_gear(hba);
+
+ return ufshcd_negotiate_pwr_params(&host_params, dev_max_params, dev_req_params);
+}
+
static int exynos_ufs_pwr_change_notify(struct ufs_hba *hba,
enum ufs_notify_change_status status,
- const struct ufs_pa_layer_attr *dev_max_params,
struct ufs_pa_layer_attr *dev_req_params)
{
int ret = 0;
switch (status) {
case PRE_CHANGE:
- ret = exynos_ufs_pre_pwr_mode(hba, dev_max_params,
- dev_req_params);
+ ret = exynos_ufs_pre_pwr_mode(hba, dev_req_params);
break;
case POST_CHANGE:
ret = exynos_ufs_post_pwr_mode(hba, dev_req_params);
@@ -2015,6 +2014,7 @@ static const struct ufs_hba_variant_ops ufs_hba_exynos_ops = {
.exit = exynos_ufs_exit,
.hce_enable_notify = exynos_ufs_hce_enable_notify,
.link_startup_notify = exynos_ufs_link_startup_notify,
+ .negotiate_pwr_mode = exynos_ufs_negotiate_pwr_mode,
.pwr_change_notify = exynos_ufs_pwr_change_notify,
.setup_clocks = exynos_ufs_setup_clocks,
.setup_xfer_req = exynos_ufs_specify_nexus_t_xfer_req,
diff --git a/drivers/ufs/host/ufs-hisi.c b/drivers/ufs/host/ufs-hisi.c
index 6f2e6bf31225..993e20ac211d 100644
--- a/drivers/ufs/host/ufs-hisi.c
+++ b/drivers/ufs/host/ufs-hisi.c
@@ -298,6 +298,17 @@ static void ufs_hisi_set_dev_cap(struct ufs_host_params *host_params)
ufshcd_init_host_params(host_params);
}
+static int ufs_hisi_negotiate_pwr_mode(struct ufs_hba *hba,
+ const struct ufs_pa_layer_attr *dev_max_params,
+ struct ufs_pa_layer_attr *dev_req_params)
+{
+ struct ufs_host_params host_params;
+
+ ufs_hisi_set_dev_cap(&host_params);
+
+ return ufshcd_negotiate_pwr_params(&host_params, dev_max_params, dev_req_params);
+}
+
static void ufs_hisi_pwr_change_pre_change(struct ufs_hba *hba)
{
struct ufs_hisi_host *host = ufshcd_get_variant(hba);
@@ -362,10 +373,8 @@ static void ufs_hisi_pwr_change_pre_change(struct ufs_hba *hba)
static int ufs_hisi_pwr_change_notify(struct ufs_hba *hba,
enum ufs_notify_change_status status,
- const struct ufs_pa_layer_attr *dev_max_params,
struct ufs_pa_layer_attr *dev_req_params)
{
- struct ufs_host_params host_params;
int ret = 0;
if (!dev_req_params) {
@@ -377,14 +386,6 @@ static int ufs_hisi_pwr_change_notify(struct ufs_hba *hba,
switch (status) {
case PRE_CHANGE:
- ufs_hisi_set_dev_cap(&host_params);
- ret = ufshcd_negotiate_pwr_params(&host_params, dev_max_params, dev_req_params);
- if (ret) {
- dev_err(hba->dev,
- "%s: failed to determine capabilities\n", __func__);
- goto out;
- }
-
ufs_hisi_pwr_change_pre_change(hba);
break;
case POST_CHANGE:
@@ -543,6 +544,7 @@ static const struct ufs_hba_variant_ops ufs_hba_hi3660_vops = {
.name = "hi3660",
.init = ufs_hi3660_init,
.link_startup_notify = ufs_hisi_link_startup_notify,
+ .negotiate_pwr_mode = ufs_hisi_negotiate_pwr_mode,
.pwr_change_notify = ufs_hisi_pwr_change_notify,
.suspend = ufs_hisi_suspend,
.resume = ufs_hisi_resume,
@@ -552,6 +554,7 @@ static const struct ufs_hba_variant_ops ufs_hba_hi3670_vops = {
.name = "hi3670",
.init = ufs_hi3670_init,
.link_startup_notify = ufs_hisi_link_startup_notify,
+ .negotiate_pwr_mode = ufs_hisi_negotiate_pwr_mode,
.pwr_change_notify = ufs_hisi_pwr_change_notify,
.suspend = ufs_hisi_suspend,
.resume = ufs_hisi_resume,
diff --git a/drivers/ufs/host/ufs-mediatek.c b/drivers/ufs/host/ufs-mediatek.c
index 4618d7834414..3991a51263a6 100644
--- a/drivers/ufs/host/ufs-mediatek.c
+++ b/drivers/ufs/host/ufs-mediatek.c
@@ -1317,6 +1317,23 @@ out:
return err;
}
+static int ufs_mtk_negotiate_pwr_mode(struct ufs_hba *hba,
+ const struct ufs_pa_layer_attr *dev_max_params,
+ struct ufs_pa_layer_attr *dev_req_params)
+{
+ struct ufs_host_params host_params;
+
+ ufshcd_init_host_params(&host_params);
+ host_params.hs_rx_gear = UFS_HS_G5;
+ host_params.hs_tx_gear = UFS_HS_G5;
+
+ if (dev_max_params->pwr_rx == SLOW_MODE ||
+ dev_max_params->pwr_tx == SLOW_MODE)
+ host_params.desired_working_mode = UFS_PWM_MODE;
+
+ return ufshcd_negotiate_pwr_params(&host_params, dev_max_params, dev_req_params);
+}
+
static bool ufs_mtk_pmc_via_fastauto(struct ufs_hba *hba,
struct ufs_pa_layer_attr *dev_req_params)
{
@@ -1372,26 +1389,10 @@ static void ufs_mtk_adjust_sync_length(struct ufs_hba *hba)
}
static int ufs_mtk_pre_pwr_change(struct ufs_hba *hba,
- const struct ufs_pa_layer_attr *dev_max_params,
struct ufs_pa_layer_attr *dev_req_params)
{
struct ufs_mtk_host *host = ufshcd_get_variant(hba);
- struct ufs_host_params host_params;
- int ret;
-
- ufshcd_init_host_params(&host_params);
- host_params.hs_rx_gear = UFS_HS_G5;
- host_params.hs_tx_gear = UFS_HS_G5;
-
- if (dev_max_params->pwr_rx == SLOW_MODE ||
- dev_max_params->pwr_tx == SLOW_MODE)
- host_params.desired_working_mode = UFS_PWM_MODE;
-
- ret = ufshcd_negotiate_pwr_params(&host_params, dev_max_params, dev_req_params);
- if (ret) {
- pr_info("%s: failed to determine capabilities\n",
- __func__);
- }
+ int ret = 0;
if (ufs_mtk_pmc_via_fastauto(hba, dev_req_params)) {
ufs_mtk_adjust_sync_length(hba);
@@ -1503,7 +1504,6 @@ out:
static int ufs_mtk_pwr_change_notify(struct ufs_hba *hba,
enum ufs_notify_change_status stage,
- const struct ufs_pa_layer_attr *dev_max_params,
struct ufs_pa_layer_attr *dev_req_params)
{
int ret = 0;
@@ -1515,8 +1515,7 @@ static int ufs_mtk_pwr_change_notify(struct ufs_hba *hba,
reg = ufshcd_readl(hba, REG_AUTO_HIBERNATE_IDLE_TIMER);
ufs_mtk_auto_hibern8_disable(hba);
}
- ret = ufs_mtk_pre_pwr_change(hba, dev_max_params,
- dev_req_params);
+ ret = ufs_mtk_pre_pwr_change(hba, dev_req_params);
break;
case POST_CHANGE:
if (ufshcd_is_auto_hibern8_supported(hba))
@@ -2329,6 +2328,7 @@ static const struct ufs_hba_variant_ops ufs_hba_mtk_vops = {
.setup_clocks = ufs_mtk_setup_clocks,
.hce_enable_notify = ufs_mtk_hce_enable_notify,
.link_startup_notify = ufs_mtk_link_startup_notify,
+ .negotiate_pwr_mode = ufs_mtk_negotiate_pwr_mode,
.pwr_change_notify = ufs_mtk_pwr_change_notify,
.apply_dev_quirks = ufs_mtk_apply_dev_quirks,
.fixup_dev_quirks = ufs_mtk_fixup_dev_quirks,
diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c
index 375fd24ba458..5a58ffef3d27 100644
--- a/drivers/ufs/host/ufs-qcom.c
+++ b/drivers/ufs/host/ufs-qcom.c
@@ -966,13 +966,21 @@ static void ufs_qcom_set_tx_hs_equalizer(struct ufs_hba *hba, u32 gear, u32 tx_l
}
}
-static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
- enum ufs_notify_change_status status,
- const struct ufs_pa_layer_attr *dev_max_params,
- struct ufs_pa_layer_attr *dev_req_params)
+static int ufs_qcom_negotiate_pwr_mode(struct ufs_hba *hba,
+ const struct ufs_pa_layer_attr *dev_max_params,
+ struct ufs_pa_layer_attr *dev_req_params)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
struct ufs_host_params *host_params = &host->host_params;
+
+ return ufshcd_negotiate_pwr_params(host_params, dev_max_params, dev_req_params);
+}
+
+static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
+ enum ufs_notify_change_status status,
+ struct ufs_pa_layer_attr *dev_req_params)
+{
+ struct ufs_qcom_host *host = ufshcd_get_variant(hba);
int ret = 0;
if (!dev_req_params) {
@@ -982,13 +990,6 @@ static int ufs_qcom_pwr_change_notify(struct ufs_hba *hba,
switch (status) {
case PRE_CHANGE:
- ret = ufshcd_negotiate_pwr_params(host_params, dev_max_params, dev_req_params);
- if (ret) {
- dev_err(hba->dev, "%s: failed to determine capabilities\n",
- __func__);
- return ret;
- }
-
/*
* During UFS driver probe, always update the PHY gear to match the negotiated
* gear, so that, if quirk UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH is enabled,
@@ -1068,10 +1069,188 @@ static void ufs_qcom_override_pa_tx_hsg1_sync_len(struct ufs_hba *hba)
dev_err(hba->dev, "Failed (%d) set PA_TX_HSG1_SYNC_LENGTH\n", err);
}
+/**
+ * ufs_qcom_double_t_adapt_l0l1l2l3 - Create a new adapt that doubles the
+ * adaptation duration TADAPT_L0_L1_L2_L3 derived from the old adapt.
+ *
+ * @old_adapt: Original ADAPT_L0_L1_L2_L3 capability
+ *
+ * ADAPT_length_L0_L1_L2_L3 formula from M-PHY spec:
+ * if (ADAPT_range_L0_L1_L2_L3 == COARSE) {
+ * ADAPT_length_L0_L1_L2_L3 = [0, 12]
+ * ADAPT_L0_L1_L2_L3 = 215 x 2^ADAPT_length_L0_L1_L2_L3
+ * } else if (ADAPT_range_L0_L1_L2_L3 == FINE) {
+ * ADAPT_length_L0_L1_L2_L3 = [0, 127]
+ * TADAPT_L0_L1_L2_L3 = 215 x (ADAPT_length_L0_L1_L2_L3 + 1)
+ * }
+ *
+ * To double the adaptation duration TADAPT_L0_L1_L2_L3:
+ * 1. If adapt range is COARSE (1'b1), new adapt = old adapt + 1.
+ * 2. If adapt range is FINE (1'b0):
+ * a) If old adapt length is < 64, (new adapt + 1) = 2 * (old adapt + 1).
+ * b) If old adapt length is >= 64, set new adapt to 0x88 using COARSE
+ * range, because new adapt get from equation in a) shall exceed 127.
+ *
+ * Examples:
+ * ADAPT_range_L0_L1_L2_L3 | ADAPT_length_L0_L1_L2_L3 | TADAPT_L0_L1_L2_L3 (PAM-4 UI)
+ * 0 3 131072
+ * 0 7 262144
+ * 0 63 2097152
+ * 0 64 2129920
+ * 0 127 4194304
+ * 1 8 8388608
+ * 1 9 16777216
+ * 1 10 33554432
+ * 1 11 67108864
+ * 1 12 134217728
+ *
+ * Return: new adapt.
+ */
+static u32 ufs_qcom_double_t_adapt_l0l1l2l3(u32 old_adapt)
+{
+ u32 adapt_length = old_adapt & ADAPT_LENGTH_MASK;
+ u32 new_adapt;
+
+ if (IS_ADAPT_RANGE_COARSE(old_adapt)) {
+ new_adapt = (adapt_length + 1) | ADAPT_RANGE_BIT;
+ } else {
+ if (adapt_length < 64)
+ new_adapt = (adapt_length << 1) + 1;
+ else
+ /*
+ * 0x88 is the very coarse Adapt value which is two
+ * times of the largest fine Adapt value (0x7F)
+ */
+ new_adapt = 0x88;
+ }
+
+ return new_adapt;
+}
+
+static void ufs_qcom_limit_max_gear(struct ufs_hba *hba,
+ enum ufs_hs_gear_tag gear)
+{
+ struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+ struct ufs_pa_layer_attr *pwr_info = &hba->max_pwr_info.info;
+ struct ufs_host_params *host_params = &host->host_params;
+
+ host_params->hs_tx_gear = gear;
+ host_params->hs_rx_gear = gear;
+ pwr_info->gear_tx = gear;
+ pwr_info->gear_rx = gear;
+
+ dev_warn(hba->dev, "Limited max gear of host and device to HS-G%d\n", gear);
+}
+
+static void ufs_qcom_fixup_tx_adapt_l0l1l2l3(struct ufs_hba *hba)
+{
+ struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+ struct ufs_pa_layer_attr *pwr_info = &hba->max_pwr_info.info;
+ struct ufs_host_params *host_params = &host->host_params;
+ u32 old_adapt, new_adapt, actual_adapt;
+ bool limit_speed = false;
+ int err;
+
+ if (host->hw_ver.major != 0x7 || host->hw_ver.minor > 0x1 ||
+ host_params->hs_tx_gear <= UFS_HS_G5 ||
+ pwr_info->gear_tx <= UFS_HS_G5)
+ return;
+
+ err = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_PEERRXHSG6ADAPTINITIALL0L1L2L3), &old_adapt);
+ if (err)
+ goto out;
+
+ if (old_adapt > ADAPT_L0L1L2L3_LENGTH_MAX) {
+ dev_err(hba->dev, "PA_PeerRxHsG6AdaptInitialL0L1L2L3 value (0x%x) exceeds MAX\n",
+ old_adapt);
+ err = -ERANGE;
+ goto out;
+ }
+
+ new_adapt = ufs_qcom_double_t_adapt_l0l1l2l3(old_adapt);
+ dev_dbg(hba->dev, "Original PA_PeerRxHsG6AdaptInitialL0L1L2L3 = 0x%x, new value = 0x%x\n",
+ old_adapt, new_adapt);
+
+ /*
+ * 0x8C is the max possible value allowed by UniPro v3.0 spec, some HWs
+ * can accept 0x8D but some cannot.
+ */
+ if (new_adapt <= ADAPT_L0L1L2L3_LENGTH_MAX ||
+ (new_adapt == ADAPT_L0L1L2L3_LENGTH_MAX + 1 && host->hw_ver.minor == 0x1)) {
+ err = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PEERRXHSG6ADAPTINITIALL0L1L2L3),
+ new_adapt);
+ if (err)
+ goto out;
+
+ err = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_PEERRXHSG6ADAPTINITIALL0L1L2L3),
+ &actual_adapt);
+ if (err)
+ goto out;
+
+ if (actual_adapt != new_adapt) {
+ limit_speed = true;
+ dev_warn(hba->dev, "PA_PeerRxHsG6AdaptInitialL0L1L2L3 0x%x, expect 0x%x\n",
+ actual_adapt, new_adapt);
+ }
+ } else {
+ limit_speed = true;
+ dev_warn(hba->dev, "New PA_PeerRxHsG6AdaptInitialL0L1L2L3 (0x%x) is too large!\n",
+ new_adapt);
+ }
+
+ err = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_PEERRXHSG6ADAPTREFRESHL0L1L2L3), &old_adapt);
+ if (err)
+ goto out;
+
+ if (old_adapt > ADAPT_L0L1L2L3_LENGTH_MAX) {
+ dev_err(hba->dev, "PA_PeerRxHsG6AdaptRefreshL0L1L2L3 value (0x%x) exceeds MAX\n",
+ old_adapt);
+ err = -ERANGE;
+ goto out;
+ }
+
+ new_adapt = ufs_qcom_double_t_adapt_l0l1l2l3(old_adapt);
+ dev_dbg(hba->dev, "Original PA_PeerRxHsG6AdaptRefreshL0L1L2L3 = 0x%x, new value = 0x%x\n",
+ old_adapt, new_adapt);
+
+ /*
+ * 0x8C is the max possible value allowed by UniPro v3.0 spec, some HWs
+ * can accept 0x8D but some cannot.
+ */
+ if (new_adapt <= ADAPT_L0L1L2L3_LENGTH_MAX ||
+ (new_adapt == ADAPT_L0L1L2L3_LENGTH_MAX + 1 && host->hw_ver.minor == 0x1)) {
+ err = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PEERRXHSG6ADAPTREFRESHL0L1L2L3),
+ new_adapt);
+ if (err)
+ goto out;
+
+ err = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_PEERRXHSG6ADAPTREFRESHL0L1L2L3),
+ &actual_adapt);
+ if (err)
+ goto out;
+
+ if (actual_adapt != new_adapt) {
+ limit_speed = true;
+ dev_warn(hba->dev, "PA_PeerRxHsG6AdaptRefreshL0L1L2L3 0x%x, expect 0x%x\n",
+ new_adapt, actual_adapt);
+ }
+ } else {
+ limit_speed = true;
+ dev_warn(hba->dev, "New PA_PeerRxHsG6AdaptRefreshL0L1L2L3 (0x%x) is too large!\n",
+ new_adapt);
+ }
+
+out:
+ if (limit_speed || err)
+ ufs_qcom_limit_max_gear(hba, UFS_HS_G5);
+}
+
static int ufs_qcom_apply_dev_quirks(struct ufs_hba *hba)
{
int err = 0;
+ ufs_qcom_fixup_tx_adapt_l0l1l2l3(hba);
+
if (hba->dev_quirks & UFS_DEVICE_QUIRK_HOST_PA_SAVECONFIGTIME)
err = ufs_qcom_quirk_host_pa_saveconfigtime(hba);
@@ -1205,6 +1384,8 @@ static void ufs_qcom_set_host_caps(struct ufs_hba *hba)
static void ufs_qcom_set_caps(struct ufs_hba *hba)
{
+ struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+
hba->caps |= UFSHCD_CAP_CLK_GATING | UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
hba->caps |= UFSHCD_CAP_CLK_SCALING | UFSHCD_CAP_WB_WITH_CLK_SCALING;
hba->caps |= UFSHCD_CAP_AUTO_BKOPS_SUSPEND;
@@ -1212,6 +1393,9 @@ static void ufs_qcom_set_caps(struct ufs_hba *hba)
hba->caps |= UFSHCD_CAP_AGGR_POWER_COLLAPSE;
hba->caps |= UFSHCD_CAP_RPM_AUTOSUSPEND;
+ if (host->hw_ver.major >= 0x7)
+ hba->caps |= UFSHCD_CAP_TX_EQUALIZATION;
+
ufs_qcom_set_host_caps(hba);
}
@@ -2326,6 +2510,387 @@ static u32 ufs_qcom_freq_to_gear_speed(struct ufs_hba *hba, unsigned long freq)
return min_t(u32, gear, hba->max_pwr_info.info.gear_rx);
}
+static int ufs_qcom_host_eom_config(struct ufs_hba *hba, int lane,
+ const struct ufs_eom_coord *eom_coord,
+ u32 target_test_count)
+{
+ enum ufs_eom_eye_mask eye_mask = eom_coord->eye_mask;
+ int v_step = eom_coord->v_step;
+ int t_step = eom_coord->t_step;
+ u32 volt_step, timing_step;
+ int ret;
+
+ if (abs(v_step) > UFS_QCOM_EOM_VOLTAGE_STEPS_MAX) {
+ dev_err(hba->dev, "Invalid EOM Voltage Step: %d\n", v_step);
+ return -ERANGE;
+ }
+
+ if (abs(t_step) > UFS_QCOM_EOM_TIMING_STEPS_MAX) {
+ dev_err(hba->dev, "Invalid EOM Timing Step: %d\n", t_step);
+ return -ERANGE;
+ }
+
+ if (v_step < 0)
+ volt_step = RX_EYEMON_NEGATIVE_STEP_BIT | (u32)(-v_step);
+ else
+ volt_step = (u32)v_step;
+
+ if (t_step < 0)
+ timing_step = RX_EYEMON_NEGATIVE_STEP_BIT | (u32)(-t_step);
+ else
+ timing_step = (u32)t_step;
+
+ ret = ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(RX_EYEMON_ENABLE,
+ UIC_ARG_MPHY_RX_GEN_SEL_INDEX(lane)),
+ BIT(eye_mask) | RX_EYEMON_EXTENDED_VRANGE_BIT);
+ if (ret) {
+ dev_err(hba->dev, "Failed to enable Host EOM on Lane %d: %d\n",
+ lane, ret);
+ return ret;
+ }
+
+ ret = ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(RX_EYEMON_TIMING_STEPS,
+ UIC_ARG_MPHY_RX_GEN_SEL_INDEX(lane)),
+ timing_step);
+ if (ret) {
+ dev_err(hba->dev, "Failed to set Host EOM timing step on Lane %d: %d\n",
+ lane, ret);
+ return ret;
+ }
+
+ ret = ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(RX_EYEMON_VOLTAGE_STEPS,
+ UIC_ARG_MPHY_RX_GEN_SEL_INDEX(lane)),
+ volt_step);
+ if (ret) {
+ dev_err(hba->dev, "Failed to set Host EOM voltage step on Lane %d: %d\n",
+ lane, ret);
+ return ret;
+ }
+
+ ret = ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(RX_EYEMON_TARGET_TEST_COUNT,
+ UIC_ARG_MPHY_RX_GEN_SEL_INDEX(lane)),
+ target_test_count);
+ if (ret)
+ dev_err(hba->dev, "Failed to set Host EOM target test count on Lane %d: %d\n",
+ lane, ret);
+
+ return ret;
+}
+
+static int ufs_qcom_host_eom_may_stop(struct ufs_hba *hba, int lane,
+ u32 target_test_count, u32 *err_count)
+{
+ u32 start, tested_count, error_count;
+ int ret;
+
+ ret = ufshcd_dme_get(hba, UIC_ARG_MIB_SEL(RX_EYEMON_START,
+ UIC_ARG_MPHY_RX_GEN_SEL_INDEX(lane)),
+ &start);
+ if (ret) {
+ dev_err(hba->dev, "Failed to get Host EOM start status on Lane %d: %d\n",
+ lane, ret);
+ return ret;
+ }
+
+ if (start & 0x1)
+ return -EAGAIN;
+
+ ret = ufshcd_dme_get(hba, UIC_ARG_MIB_SEL(RX_EYEMON_TESTED_COUNT,
+ UIC_ARG_MPHY_RX_GEN_SEL_INDEX(lane)),
+ &tested_count);
+ if (ret) {
+ dev_err(hba->dev, "Failed to get Host EOM tested count on Lane %d: %d\n",
+ lane, ret);
+ return ret;
+ }
+
+ ret = ufshcd_dme_get(hba, UIC_ARG_MIB_SEL(RX_EYEMON_ERROR_COUNT,
+ UIC_ARG_MPHY_RX_GEN_SEL_INDEX(lane)),
+ &error_count);
+ if (ret) {
+ dev_err(hba->dev, "Failed to get Host EOM error count on Lane %d: %d\n",
+ lane, ret);
+ return ret;
+ }
+
+ /* EOM can stop */
+ if ((tested_count >= target_test_count - 3) || error_count > 0) {
+ *err_count = error_count;
+
+ /* Disable EOM */
+ ret = ufshcd_dme_set(hba, UIC_ARG_MIB_SEL(RX_EYEMON_ENABLE,
+ UIC_ARG_MPHY_RX_GEN_SEL_INDEX(lane)),
+ 0x0);
+ if (ret) {
+ dev_err(hba->dev, "Failed to disable Host EOM on Lane %d: %d\n",
+ lane, ret);
+ return ret;
+ }
+ } else {
+ return -EAGAIN;
+ }
+
+ return 0;
+}
+
+static int ufs_qcom_host_eom_scan(struct ufs_hba *hba, int num_lanes,
+ const struct ufs_eom_coord *eom_coord,
+ u32 target_test_count, u32 *err_count)
+{
+ bool eom_stopped[PA_MAXDATALANES] = { 0 };
+ int lane, ret;
+ u32 setting;
+
+ if (!err_count || !eom_coord)
+ return -EINVAL;
+
+ if (target_test_count < UFS_QCOM_EOM_TARGET_TEST_COUNT_MIN) {
+ dev_err(hba->dev, "Target test count (%u) too small for Host EOM\n",
+ target_test_count);
+ return -ERANGE;
+ }
+
+ for (lane = 0; lane < num_lanes; lane++) {
+ ret = ufs_qcom_host_eom_config(hba, lane, eom_coord,
+ target_test_count);
+ if (ret) {
+ dev_err(hba->dev, "Failed to config Host RX EOM: %d\n", ret);
+ return ret;
+ }
+ }
+
+ /*
+ * Trigger a PACP_PWR_req to kick start EOM, but not to really change
+ * the Power Mode.
+ */
+ ret = ufshcd_uic_change_pwr_mode(hba, FAST_MODE << 4 | FAST_MODE);
+ if (ret) {
+ dev_err(hba->dev, "Failed to change power mode to kick start Host EOM: %d\n",
+ ret);
+ return ret;
+ }
+
+more_burst:
+ /* Create burst on Host RX Lane. */
+ ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_LOCALVERINFO), &setting);
+
+ for (lane = 0; lane < num_lanes; lane++) {
+ if (eom_stopped[lane])
+ continue;
+
+ ret = ufs_qcom_host_eom_may_stop(hba, lane, target_test_count,
+ &err_count[lane]);
+ if (!ret) {
+ eom_stopped[lane] = true;
+ } else if (ret == -EAGAIN) {
+ /* Need more burst to excercise EOM */
+ goto more_burst;
+ } else {
+ dev_err(hba->dev, "Failed to stop Host EOM: %d\n", ret);
+ return ret;
+ }
+
+ dev_dbg(hba->dev, "Host RX Lane %d EOM, v_step %d, t_step %d, error count %u\n",
+ lane, eom_coord->v_step, eom_coord->t_step,
+ err_count[lane]);
+ }
+
+ return 0;
+}
+
+static int ufs_qcom_host_sw_rx_fom(struct ufs_hba *hba, int num_lanes, u32 *fom)
+{
+ const struct ufs_eom_coord *eom_coord = sw_rx_fom_eom_coords_g6;
+ u32 eom_err_count[PA_MAXDATALANES] = { 0 };
+ u32 curr_ahit;
+ int lane, i, ret;
+
+ if (!fom)
+ return -EINVAL;
+
+ /* Stop the auto hibernate idle timer */
+ curr_ahit = ufshcd_readl(hba, REG_AUTO_HIBERNATE_IDLE_TIMER);
+ if (curr_ahit)
+ ufshcd_writel(hba, 0, REG_AUTO_HIBERNATE_IDLE_TIMER);
+
+ ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXHSADAPTTYPE), PA_NO_ADAPT);
+ if (ret) {
+ dev_err(hba->dev, "Failed to select NO_ADAPT before starting Host EOM: %d\n", ret);
+ goto out;
+ }
+
+ for (i = 0; i < SW_RX_FOM_EOM_COORDS; i++, eom_coord++) {
+ ret = ufs_qcom_host_eom_scan(hba, num_lanes, eom_coord,
+ UFS_QCOM_EOM_TARGET_TEST_COUNT_G6,
+ eom_err_count);
+ if (ret) {
+ dev_err(hba->dev, "Failed to run Host EOM scan: %d\n", ret);
+ break;
+ }
+
+ for (lane = 0; lane < num_lanes; lane++) {
+ /* Bad coordinates have no weights */
+ if (eom_err_count[lane])
+ continue;
+ fom[lane] += SW_RX_FOM_EOM_COORDS_WEIGHT;
+ }
+ }
+
+out:
+ /* Restore the auto hibernate idle timer */
+ if (curr_ahit)
+ ufshcd_writel(hba, curr_ahit, REG_AUTO_HIBERNATE_IDLE_TIMER);
+
+ return ret;
+}
+
+static int ufs_qcom_get_rx_fom(struct ufs_hba *hba,
+ struct ufs_pa_layer_attr *pwr_mode,
+ struct tx_eqtr_iter *h_iter,
+ struct tx_eqtr_iter *d_iter)
+{
+ struct ufshcd_tx_eq_params *params __free(kfree) =
+ kzalloc(sizeof(*params), GFP_KERNEL);
+ struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+ struct ufs_pa_layer_attr old_pwr_info;
+ u32 fom[PA_MAXDATALANES] = { 0 };
+ u32 gear = pwr_mode->gear_tx;
+ u32 rate = pwr_mode->hs_rate;
+ int lane, ret;
+
+ if (host->hw_ver.major != 0x7 || host->hw_ver.minor > 0x1 ||
+ gear <= UFS_HS_G5 || !d_iter || !d_iter->is_updated)
+ return 0;
+
+ if (gear < UFS_HS_G1 || gear > UFS_HS_GEAR_MAX)
+ return -ERANGE;
+
+ if (!params)
+ return -ENOMEM;
+
+ memcpy(&old_pwr_info, &hba->pwr_info, sizeof(struct ufs_pa_layer_attr));
+
+ memcpy(params, &hba->tx_eq_params[gear - 1], sizeof(struct ufshcd_tx_eq_params));
+ for (lane = 0; lane < pwr_mode->lane_rx; lane++) {
+ params->device[lane].preshoot = d_iter->preshoot;
+ params->device[lane].deemphasis = d_iter->deemphasis;
+ }
+
+ /* Use TX EQTR settings as Device's TX Equalization settings. */
+ ret = ufshcd_apply_tx_eq_settings(hba, params, gear);
+ if (ret) {
+ dev_err(hba->dev, "%s: Failed to apply TX EQ settings for HS-G%u: %d\n",
+ __func__, gear, ret);
+ return ret;
+ }
+
+ /* Force PMC to target HS Gear to use new TX Equalization settings. */
+ ret = ufshcd_change_power_mode(hba, pwr_mode, UFSHCD_PMC_POLICY_FORCE);
+ if (ret) {
+ dev_err(hba->dev, "%s: Failed to change power mode to HS-G%u, Rate-%s: %d\n",
+ __func__, gear, ufs_hs_rate_to_str(rate), ret);
+ return ret;
+ }
+
+ ret = ufs_qcom_host_sw_rx_fom(hba, pwr_mode->lane_rx, fom);
+ if (ret) {
+ dev_err(hba->dev, "Failed to get SW FOM of TX (PreShoot: %u, DeEmphasis: %u): %d\n",
+ d_iter->preshoot, d_iter->deemphasis, ret);
+ return ret;
+ }
+
+ /* Restore Device's TX Equalization settings. */
+ ret = ufshcd_apply_tx_eq_settings(hba, &hba->tx_eq_params[gear - 1], gear);
+ if (ret) {
+ dev_err(hba->dev, "%s: Failed to apply TX EQ settings for HS-G%u: %d\n",
+ __func__, gear, ret);
+ return ret;
+ }
+
+ /* Restore Power Mode. */
+ ret = ufshcd_change_power_mode(hba, &old_pwr_info, UFSHCD_PMC_POLICY_FORCE);
+ if (ret) {
+ dev_err(hba->dev, "%s: Failed to retore power mode to HS-G%u: %d\n",
+ __func__, old_pwr_info.gear_tx, ret);
+ return ret;
+ }
+
+ for (lane = 0; lane < pwr_mode->lane_rx; lane++)
+ d_iter->fom[lane] = fom[lane];
+
+ return 0;
+}
+
+static int ufs_qcom_apply_tx_eqtr_settings(struct ufs_hba *hba,
+ struct ufs_pa_layer_attr *pwr_mode,
+ struct tx_eqtr_iter *h_iter,
+ struct tx_eqtr_iter *d_iter)
+{
+ struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+ u32 setting = 0;
+ int lane;
+
+ if (host->hw_ver.major != 0x7 || host->hw_ver.minor > 0x1)
+ return 0;
+
+ for (lane = 0; lane < pwr_mode->lane_tx; lane++) {
+ setting |= TX_HS_PRESHOOT_BITS(lane, h_iter->preshoot);
+ setting |= TX_HS_DEEMPHASIS_BITS(lane, h_iter->deemphasis);
+ }
+
+ return ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXEQG1SETTING), setting);
+}
+
+static int ufs_qcom_tx_eqtr_notify(struct ufs_hba *hba,
+ enum ufs_notify_change_status status,
+ struct ufs_pa_layer_attr *pwr_mode)
+{
+ struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+ struct ufs_pa_layer_attr pwr_mode_hs_g1 = {
+ .gear_rx = UFS_HS_G1,
+ .gear_tx = UFS_HS_G1,
+ .lane_rx = pwr_mode->lane_rx,
+ .lane_tx = pwr_mode->lane_tx,
+ .pwr_rx = FAST_MODE,
+ .pwr_tx = FAST_MODE,
+ .hs_rate = pwr_mode->hs_rate,
+ };
+ u32 gear = pwr_mode->gear_tx;
+ u32 rate = pwr_mode->hs_rate;
+ int ret;
+
+ if (host->hw_ver.major != 0x7 || host->hw_ver.minor > 0x1)
+ return 0;
+
+ if (status == PRE_CHANGE) {
+ ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TXEQG1SETTING),
+ &host->saved_tx_eq_g1_setting);
+ if (ret)
+ return ret;
+
+ /* PMC to target HS Gear. */
+ ret = ufshcd_change_power_mode(hba, pwr_mode,
+ UFSHCD_PMC_POLICY_DONT_FORCE);
+ if (ret)
+ dev_err(hba->dev, "%s: Failed to PMC to target HS-G%u, Rate-%s: %d\n",
+ __func__, gear, ufs_hs_rate_to_str(rate), ret);
+ } else {
+ ret = ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXEQG1SETTING),
+ host->saved_tx_eq_g1_setting);
+ if (ret)
+ return ret;
+
+ /* PMC back to HS-G1. */
+ ret = ufshcd_change_power_mode(hba, &pwr_mode_hs_g1,
+ UFSHCD_PMC_POLICY_DONT_FORCE);
+ if (ret)
+ dev_err(hba->dev, "%s: Failed to PMC to HS-G1, Rate-%s: %d\n",
+ __func__, ufs_hs_rate_to_str(rate), ret);
+ }
+
+ return ret;
+}
+
/*
* struct ufs_hba_qcom_vops - UFS QCOM specific variant operations
*
@@ -2341,6 +2906,7 @@ static const struct ufs_hba_variant_ops ufs_hba_qcom_vops = {
.setup_clocks = ufs_qcom_setup_clocks,
.hce_enable_notify = ufs_qcom_hce_enable_notify,
.link_startup_notify = ufs_qcom_link_startup_notify,
+ .negotiate_pwr_mode = ufs_qcom_negotiate_pwr_mode,
.pwr_change_notify = ufs_qcom_pwr_change_notify,
.apply_dev_quirks = ufs_qcom_apply_dev_quirks,
.fixup_dev_quirks = ufs_qcom_fixup_dev_quirks,
@@ -2355,6 +2921,9 @@ static const struct ufs_hba_variant_ops ufs_hba_qcom_vops = {
.get_outstanding_cqs = ufs_qcom_get_outstanding_cqs,
.config_esi = ufs_qcom_config_esi,
.freq_to_gear_speed = ufs_qcom_freq_to_gear_speed,
+ .get_rx_fom = ufs_qcom_get_rx_fom,
+ .apply_tx_eqtr_settings = ufs_qcom_apply_tx_eqtr_settings,
+ .tx_eqtr_notify = ufs_qcom_tx_eqtr_notify,
};
static const struct ufs_hba_variant_ops ufs_hba_qcom_sa8255p_vops = {
diff --git a/drivers/ufs/host/ufs-qcom.h b/drivers/ufs/host/ufs-qcom.h
index 1111ab34da01..5d083331a7f4 100644
--- a/drivers/ufs/host/ufs-qcom.h
+++ b/drivers/ufs/host/ufs-qcom.h
@@ -33,6 +33,46 @@
#define DL_VS_CLK_CFG_MASK GENMASK(9, 0)
#define DME_VS_CORE_CLK_CTRL_DME_HW_CGC_EN BIT(9)
+#define UFS_QCOM_EOM_VOLTAGE_STEPS_MAX 127
+#define UFS_QCOM_EOM_TIMING_STEPS_MAX 63
+#define UFS_QCOM_EOM_TARGET_TEST_COUNT_MIN 8
+#define UFS_QCOM_EOM_TARGET_TEST_COUNT_G6 0x3F
+
+#define SW_RX_FOM_EOM_COORDS 23
+#define SW_RX_FOM_EOM_COORDS_WEIGHT (127 / SW_RX_FOM_EOM_COORDS)
+
+struct ufs_eom_coord {
+ int t_step;
+ int v_step;
+ u8 eye_mask;
+};
+
+static const struct ufs_eom_coord sw_rx_fom_eom_coords_g6[SW_RX_FOM_EOM_COORDS] = {
+ [0] = { -2, -15, UFS_EOM_EYE_MASK_M },
+ [1] = { 0, -15, UFS_EOM_EYE_MASK_M },
+ [2] = { 2, -15, UFS_EOM_EYE_MASK_M },
+ [3] = { -4, -10, UFS_EOM_EYE_MASK_M },
+ [4] = { -2, -10, UFS_EOM_EYE_MASK_M },
+ [5] = { 0, -10, UFS_EOM_EYE_MASK_M },
+ [6] = { 2, -10, UFS_EOM_EYE_MASK_M },
+ [7] = { 4, -10, UFS_EOM_EYE_MASK_M },
+ [8] = { -6, 0, UFS_EOM_EYE_MASK_M },
+ [9] = { -4, 0, UFS_EOM_EYE_MASK_M },
+ [10] = { -2, 0, UFS_EOM_EYE_MASK_M },
+ [11] = { 0, 0, UFS_EOM_EYE_MASK_M },
+ [12] = { 2, 0, UFS_EOM_EYE_MASK_M },
+ [13] = { 4, 0, UFS_EOM_EYE_MASK_M },
+ [14] = { 6, 0, UFS_EOM_EYE_MASK_M },
+ [15] = { -4, 10, UFS_EOM_EYE_MASK_M },
+ [16] = { -2, 10, UFS_EOM_EYE_MASK_M },
+ [17] = { 0, 10, UFS_EOM_EYE_MASK_M },
+ [18] = { 2, 10, UFS_EOM_EYE_MASK_M },
+ [19] = { 4, 10, UFS_EOM_EYE_MASK_M },
+ [20] = { -2, 15, UFS_EOM_EYE_MASK_M },
+ [21] = { 0, 15, UFS_EOM_EYE_MASK_M },
+ [22] = { 2, 15, UFS_EOM_EYE_MASK_M },
+};
+
/* Qualcomm MCQ Configuration */
#define UFS_QCOM_MCQCAP_QCFGPTR 224 /* 0xE0 in hex */
#define UFS_QCOM_MCQ_CONFIG_OFFSET (UFS_QCOM_MCQCAP_QCFGPTR * 0x200) /* 0x1C000 */
@@ -308,6 +348,8 @@ struct ufs_qcom_host {
u32 phy_gear;
bool esi_enabled;
+
+ u32 saved_tx_eq_g1_setting;
};
struct ufs_qcom_drvdata {
diff --git a/drivers/ufs/host/ufs-sprd.c b/drivers/ufs/host/ufs-sprd.c
index 65bd8fb96b99..a5e8c591bead 100644
--- a/drivers/ufs/host/ufs-sprd.c
+++ b/drivers/ufs/host/ufs-sprd.c
@@ -161,14 +161,11 @@ static int ufs_sprd_common_init(struct ufs_hba *hba)
static int sprd_ufs_pwr_change_notify(struct ufs_hba *hba,
enum ufs_notify_change_status status,
- const struct ufs_pa_layer_attr *dev_max_params,
struct ufs_pa_layer_attr *dev_req_params)
{
struct ufs_sprd_host *host = ufshcd_get_variant(hba);
if (status == PRE_CHANGE) {
- memcpy(dev_req_params, dev_max_params,
- sizeof(struct ufs_pa_layer_attr));
if (host->unipro_ver >= UFS_UNIPRO_VER_1_8)
ufshcd_dme_configure_adapt(hba, dev_req_params->gear_tx,
PA_INITIAL_ADAPT);
diff --git a/drivers/ufs/host/ufshcd-pci.c b/drivers/ufs/host/ufshcd-pci.c
index 63f6b36b912f..effa3c7a01c5 100644
--- a/drivers/ufs/host/ufshcd-pci.c
+++ b/drivers/ufs/host/ufshcd-pci.c
@@ -145,7 +145,8 @@ static int ufs_intel_set_lanes(struct ufs_hba *hba, u32 lanes)
pwr_info.lane_rx = lanes;
pwr_info.lane_tx = lanes;
- ret = ufshcd_config_pwr_mode(hba, &pwr_info);
+ ret = ufshcd_change_power_mode(hba, &pwr_info,
+ UFSHCD_PMC_POLICY_DONT_FORCE);
if (ret)
dev_err(hba->dev, "%s: Setting %u lanes, err = %d\n",
__func__, lanes, ret);
@@ -154,17 +155,15 @@ static int ufs_intel_set_lanes(struct ufs_hba *hba, u32 lanes)
static int ufs_intel_lkf_pwr_change_notify(struct ufs_hba *hba,
enum ufs_notify_change_status status,
- const struct ufs_pa_layer_attr *dev_max_params,
struct ufs_pa_layer_attr *dev_req_params)
{
int err = 0;
switch (status) {
case PRE_CHANGE:
- if (ufshcd_is_hs_mode(dev_max_params) &&
+ if (ufshcd_is_hs_mode(dev_req_params) &&
(hba->pwr_info.lane_rx != 2 || hba->pwr_info.lane_tx != 2))
ufs_intel_set_lanes(hba, 2);
- memcpy(dev_req_params, dev_max_params, sizeof(*dev_req_params));
break;
case POST_CHANGE:
if (ufshcd_is_hs_mode(dev_req_params)) {